From 8a438115fb9e3ed8327de25b23d341dccde229d9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 14 Apr 2025 18:00:33 -0400 Subject: [PATCH 0001/1073] add RMSNorm to comfy.ops --- comfy/ldm/common_dit.py | 20 ++----------- comfy/ops.py | 20 +++++++++++++ comfy/rmsnorm.py | 65 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+), 17 deletions(-) create mode 100644 comfy/rmsnorm.py diff --git a/comfy/ldm/common_dit.py b/comfy/ldm/common_dit.py index e0f3057f7..f7f56b72c 100644 --- a/comfy/ldm/common_dit.py +++ b/comfy/ldm/common_dit.py @@ -1,5 +1,6 @@ import torch -import comfy.ops +import comfy.rmsnorm + def pad_to_patch_size(img, patch_size=(2, 2), padding_mode="circular"): if padding_mode == "circular" and (torch.jit.is_tracing() or torch.jit.is_scripting()): @@ -11,20 +12,5 @@ def pad_to_patch_size(img, patch_size=(2, 2), padding_mode="circular"): return torch.nn.functional.pad(img, pad, mode=padding_mode) -try: - rms_norm_torch = torch.nn.functional.rms_norm -except: - rms_norm_torch = None -def rms_norm(x, weight=None, eps=1e-6): - if rms_norm_torch is not None and not (torch.jit.is_tracing() or torch.jit.is_scripting()): - if weight is None: - return rms_norm_torch(x, (x.shape[-1],), eps=eps) - else: - return rms_norm_torch(x, weight.shape, weight=comfy.ops.cast_to(weight, dtype=x.dtype, device=x.device), eps=eps) - else: - r = x * torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + eps) - if weight is None: - return r - else: - return r * comfy.ops.cast_to(weight, dtype=x.dtype, device=x.device) +rms_norm = comfy.rmsnorm.rms_norm diff --git a/comfy/ops.py b/comfy/ops.py index 9a5c1ee99..6b0e29307 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -21,6 +21,7 @@ import logging import comfy.model_management from comfy.cli_args import args, PerformanceFeature import comfy.float +import comfy.rmsnorm cast_to = comfy.model_management.cast_to #TODO: remove once no more references @@ -146,6 +147,25 @@ class disable_weight_init: else: return super().forward(*args, **kwargs) + class RMSNorm(comfy.rmsnorm.RMSNorm, CastWeightBiasOp): + def reset_parameters(self): + self.bias = None + return None + + def forward_comfy_cast_weights(self, input): + if self.weight is not None: + weight, bias = cast_bias_weight(self, input) + else: + weight = None + return comfy.rmsnorm.rms_norm(input, weight, self.eps) # TODO: switch to commented out line when old torch is deprecated + # return torch.nn.functional.rms_norm(input, self.normalized_shape, weight, self.eps) + + def forward(self, *args, **kwargs): + if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: + return self.forward_comfy_cast_weights(*args, **kwargs) + else: + return super().forward(*args, **kwargs) + class ConvTranspose2d(torch.nn.ConvTranspose2d, CastWeightBiasOp): def reset_parameters(self): return None diff --git a/comfy/rmsnorm.py b/comfy/rmsnorm.py new file mode 100644 index 000000000..81b3e9062 --- /dev/null +++ b/comfy/rmsnorm.py @@ -0,0 +1,65 @@ +import torch +import comfy.model_management +import numbers + +RMSNorm = None + +try: + rms_norm_torch = torch.nn.functional.rms_norm + RMSNorm = torch.nn.RMSNorm +except: + rms_norm_torch = None + + +def rms_norm(x, weight=None, eps=1e-6): + if rms_norm_torch is not None and not (torch.jit.is_tracing() or torch.jit.is_scripting()): + if weight is None: + return rms_norm_torch(x, (x.shape[-1],), eps=eps) + else: + return rms_norm_torch(x, weight.shape, weight=comfy.model_management.cast_to(weight, dtype=x.dtype, device=x.device), eps=eps) + else: + r = x * torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + eps) + if weight is None: + return r + else: + return r * comfy.model_management.cast_to(weight, dtype=x.dtype, device=x.device) + + +if RMSNorm is None: + class RMSNorm(torch.nn.Module): + def __init__( + self, dim: int, elementwise_affine: bool = False, eps: float = 1e-6, device=None, dtype=None, **kwargs + ): + super().__init__() + self.eps = eps + self.learnable_scale = elementwise_affine + if self.learnable_scale: + self.weight = torch.nn.Parameter(torch.empty(dim, device=device, dtype=dtype)) + else: + self.register_parameter("weight", None) + + def __init__( + self, + normalized_shape, + eps=None, + elementwise_affine=True, + device=None, + dtype=None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + if isinstance(normalized_shape, numbers.Integral): + # mypy error: incompatible types in assignment + normalized_shape = (normalized_shape,) # type: ignore[assignment] + self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] + self.eps = eps + self.elementwise_affine = elementwise_affine + if self.elementwise_affine: + self.weight = torch.nn.Parameter( + torch.empty(self.normalized_shape, **factory_kwargs) + ) + else: + self.register_parameter("weight", None) + + def forward(self, x): + return rms_norm(x, self.weight, self.eps) From 3e8155f7a3d7601838bbc82a8ccf550343bbb132 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 15 Apr 2025 10:32:21 -0400 Subject: [PATCH 0002/1073] More flexible long clip support. Add clip g long clip support. Text encoder refactor. Support llama models with different vocab sizes. --- comfy/sd1_clip.py | 23 ++++++++++++++--- comfy/sdxl_clip.py | 14 +++++------ comfy/text_encoders/aura_t5.py | 2 +- comfy/text_encoders/cosmos.py | 2 +- comfy/text_encoders/flux.py | 10 +++----- comfy/text_encoders/genmo.py | 2 +- comfy/text_encoders/hunyuan_video.py | 22 ++++++++++------- comfy/text_encoders/hydit.py | 8 +++--- comfy/text_encoders/llama.py | 14 ++++++++++- comfy/text_encoders/long_clipl.py | 37 ++++++++++++++-------------- comfy/text_encoders/lt.py | 2 +- comfy/text_encoders/lumina2.py | 2 +- comfy/text_encoders/pixart_t5.py | 2 +- comfy/text_encoders/sa_t5.py | 2 +- comfy/text_encoders/sd2_clip.py | 2 +- comfy/text_encoders/sd3_clip.py | 15 ++++++----- comfy/text_encoders/wan.py | 2 +- 17 files changed, 95 insertions(+), 66 deletions(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index be21ec18d..2ca5ed9ba 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -82,7 +82,8 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): LAYERS = [ "last", "pooled", - "hidden" + "hidden", + "all" ] def __init__(self, device="cpu", max_length=77, freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, dtype=None, model_class=comfy.clip_model.CLIPTextModel, @@ -93,6 +94,8 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): if textmodel_json_config is None: textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json") + if "model_name" not in model_options: + model_options = {**model_options, "model_name": "clip_l"} if isinstance(textmodel_json_config, dict): config = textmodel_json_config @@ -100,6 +103,10 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): with open(textmodel_json_config) as f: config = json.load(f) + te_model_options = model_options.get("{}_model_config".format(model_options.get("model_name", "")), {}) + for k, v in te_model_options.items(): + config[k] = v + operations = model_options.get("custom_operations", None) scaled_fp8 = None @@ -147,7 +154,9 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): def set_clip_options(self, options): layer_idx = options.get("layer", self.layer_idx) self.return_projected_pooled = options.get("projected_pooled", self.return_projected_pooled) - if layer_idx is None or abs(layer_idx) > self.num_layers: + if self.layer == "all": + pass + elif layer_idx is None or abs(layer_idx) > self.num_layers: self.layer = "last" else: self.layer = "hidden" @@ -244,7 +253,12 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): if self.enable_attention_masks: attention_mask_model = attention_mask - outputs = self.transformer(None, attention_mask_model, embeds=embeds, num_tokens=num_tokens, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state, dtype=torch.float32) + if self.layer == "all": + intermediate_output = "all" + else: + intermediate_output = self.layer_idx + + outputs = self.transformer(None, attention_mask_model, embeds=embeds, num_tokens=num_tokens, intermediate_output=intermediate_output, final_layer_norm_intermediate=self.layer_norm_hidden_state, dtype=torch.float32) if self.layer == "last": z = outputs[0].float() @@ -447,7 +461,7 @@ class SDTokenizer: if tokenizer_path is None: tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer") self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, **tokenizer_args) - self.max_length = max_length + self.max_length = tokenizer_data.get("{}_max_length".format(embedding_key), max_length) self.min_length = min_length self.end_token = None @@ -645,6 +659,7 @@ class SD1ClipModel(torch.nn.Module): self.clip = "clip_{}".format(self.clip_name) clip_model = model_options.get("{}_class".format(self.clip), clip_model) + model_options = {**model_options, "model_name": self.clip} setattr(self, self.clip, clip_model(device=device, dtype=dtype, model_options=model_options, **kwargs)) self.dtypes = set() diff --git a/comfy/sdxl_clip.py b/comfy/sdxl_clip.py index 5b7c8a412..ea7f5d10f 100644 --- a/comfy/sdxl_clip.py +++ b/comfy/sdxl_clip.py @@ -9,6 +9,7 @@ class SDXLClipG(sd1_clip.SDClipModel): layer_idx=-2 textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_config_bigg.json") + model_options = {**model_options, "model_name": "clip_g"} super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0}, layer_norm_hidden_state=False, return_projected_pooled=True, model_options=model_options) @@ -17,14 +18,13 @@ class SDXLClipG(sd1_clip.SDClipModel): class SDXLClipGTokenizer(sd1_clip.SDTokenizer): def __init__(self, tokenizer_path=None, embedding_directory=None, tokenizer_data={}): - super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1280, embedding_key='clip_g') + super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1280, embedding_key='clip_g', tokenizer_data=tokenizer_data) class SDXLTokenizer: def __init__(self, embedding_directory=None, tokenizer_data={}): - clip_l_tokenizer_class = tokenizer_data.get("clip_l_tokenizer_class", sd1_clip.SDTokenizer) - self.clip_l = clip_l_tokenizer_class(embedding_directory=embedding_directory) - self.clip_g = SDXLClipGTokenizer(embedding_directory=embedding_directory) + self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + self.clip_g = SDXLClipGTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} @@ -41,8 +41,7 @@ class SDXLTokenizer: class SDXLClipModel(torch.nn.Module): def __init__(self, device="cpu", dtype=None, model_options={}): super().__init__() - clip_l_class = model_options.get("clip_l_class", sd1_clip.SDClipModel) - self.clip_l = clip_l_class(layer="hidden", layer_idx=-2, device=device, dtype=dtype, layer_norm_hidden_state=False, model_options=model_options) + self.clip_l = sd1_clip.SDClipModel(layer="hidden", layer_idx=-2, device=device, dtype=dtype, layer_norm_hidden_state=False, model_options=model_options) self.clip_g = SDXLClipG(device=device, dtype=dtype, model_options=model_options) self.dtypes = set([dtype]) @@ -75,7 +74,7 @@ class SDXLRefinerClipModel(sd1_clip.SD1ClipModel): class StableCascadeClipGTokenizer(sd1_clip.SDTokenizer): def __init__(self, tokenizer_path=None, embedding_directory=None, tokenizer_data={}): - super().__init__(tokenizer_path, pad_with_end=True, embedding_directory=embedding_directory, embedding_size=1280, embedding_key='clip_g') + super().__init__(tokenizer_path, pad_with_end=True, embedding_directory=embedding_directory, embedding_size=1280, embedding_key='clip_g', tokenizer_data=tokenizer_data) class StableCascadeTokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): @@ -84,6 +83,7 @@ class StableCascadeTokenizer(sd1_clip.SD1Tokenizer): class StableCascadeClipG(sd1_clip.SDClipModel): def __init__(self, device="cpu", max_length=77, freeze=True, layer="hidden", layer_idx=-1, dtype=None, model_options={}): textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_config_bigg.json") + model_options = {**model_options, "model_name": "clip_g"} super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 49407}, layer_norm_hidden_state=False, enable_attention_masks=True, return_projected_pooled=True, model_options=model_options) diff --git a/comfy/text_encoders/aura_t5.py b/comfy/text_encoders/aura_t5.py index e9ad45a7f..cf4252eea 100644 --- a/comfy/text_encoders/aura_t5.py +++ b/comfy/text_encoders/aura_t5.py @@ -11,7 +11,7 @@ class PT5XlModel(sd1_clip.SDClipModel): class PT5XlTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_pile_tokenizer"), "tokenizer.model") - super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='pile_t5xl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256, pad_token=1) + super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='pile_t5xl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256, pad_token=1, tokenizer_data=tokenizer_data) class AuraT5Tokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): diff --git a/comfy/text_encoders/cosmos.py b/comfy/text_encoders/cosmos.py index 5441c8952..a1adb5242 100644 --- a/comfy/text_encoders/cosmos.py +++ b/comfy/text_encoders/cosmos.py @@ -22,7 +22,7 @@ class CosmosT5XXL(sd1_clip.SD1ClipModel): class T5XXLTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") - super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=1024, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=512) + super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=1024, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=512, tokenizer_data=tokenizer_data) class CosmosT5Tokenizer(sd1_clip.SD1Tokenizer): diff --git a/comfy/text_encoders/flux.py b/comfy/text_encoders/flux.py index a12995ec0..0666dde7f 100644 --- a/comfy/text_encoders/flux.py +++ b/comfy/text_encoders/flux.py @@ -9,14 +9,13 @@ import os class T5XXLTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") - super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256) + super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256, tokenizer_data=tokenizer_data) class FluxTokenizer: def __init__(self, embedding_directory=None, tokenizer_data={}): - clip_l_tokenizer_class = tokenizer_data.get("clip_l_tokenizer_class", sd1_clip.SDTokenizer) - self.clip_l = clip_l_tokenizer_class(embedding_directory=embedding_directory) - self.t5xxl = T5XXLTokenizer(embedding_directory=embedding_directory) + self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + self.t5xxl = T5XXLTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} @@ -35,8 +34,7 @@ class FluxClipModel(torch.nn.Module): def __init__(self, dtype_t5=None, device="cpu", dtype=None, model_options={}): super().__init__() dtype_t5 = comfy.model_management.pick_weight_dtype(dtype_t5, dtype, device) - clip_l_class = model_options.get("clip_l_class", sd1_clip.SDClipModel) - self.clip_l = clip_l_class(device=device, dtype=dtype, return_projected_pooled=False, model_options=model_options) + self.clip_l = sd1_clip.SDClipModel(device=device, dtype=dtype, return_projected_pooled=False, model_options=model_options) self.t5xxl = comfy.text_encoders.sd3_clip.T5XXLModel(device=device, dtype=dtype_t5, model_options=model_options) self.dtypes = set([dtype, dtype_t5]) diff --git a/comfy/text_encoders/genmo.py b/comfy/text_encoders/genmo.py index 45987a480..9dcf190a2 100644 --- a/comfy/text_encoders/genmo.py +++ b/comfy/text_encoders/genmo.py @@ -18,7 +18,7 @@ class MochiT5XXL(sd1_clip.SD1ClipModel): class T5XXLTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") - super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256) + super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256, tokenizer_data=tokenizer_data) class MochiT5Tokenizer(sd1_clip.SD1Tokenizer): diff --git a/comfy/text_encoders/hunyuan_video.py b/comfy/text_encoders/hunyuan_video.py index dbb259e54..33ac22497 100644 --- a/comfy/text_encoders/hunyuan_video.py +++ b/comfy/text_encoders/hunyuan_video.py @@ -21,26 +21,31 @@ def llama_detect(state_dict, prefix=""): class LLAMA3Tokenizer(sd1_clip.SDTokenizer): - def __init__(self, embedding_directory=None, tokenizer_data={}, min_length=256): + def __init__(self, embedding_directory=None, tokenizer_data={}, min_length=256, pad_token=128258): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "llama_tokenizer") - super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='llama', tokenizer_class=LlamaTokenizerFast, has_start_token=True, has_end_token=False, pad_to_max_length=False, max_length=99999999, pad_token=128258, min_length=min_length) + super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='llama', tokenizer_class=LlamaTokenizerFast, has_start_token=True, has_end_token=False, pad_to_max_length=False, max_length=99999999, pad_token=pad_token, min_length=min_length, tokenizer_data=tokenizer_data) class LLAMAModel(sd1_clip.SDClipModel): - def __init__(self, device="cpu", layer="hidden", layer_idx=-3, dtype=None, attention_mask=True, model_options={}): + def __init__(self, device="cpu", layer="hidden", layer_idx=-3, dtype=None, attention_mask=True, model_options={}, special_tokens={"start": 128000, "pad": 128258}): llama_scaled_fp8 = model_options.get("llama_scaled_fp8", None) if llama_scaled_fp8 is not None: model_options = model_options.copy() model_options["scaled_fp8"] = llama_scaled_fp8 - super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"start": 128000, "pad": 128258}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Llama2, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) + textmodel_json_config = {} + vocab_size = model_options.get("vocab_size", None) + if vocab_size is not None: + textmodel_json_config["vocab_size"] = vocab_size + + model_options = {**model_options, "model_name": "llama"} + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens=special_tokens, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Llama2, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) class HunyuanVideoTokenizer: def __init__(self, embedding_directory=None, tokenizer_data={}): - clip_l_tokenizer_class = tokenizer_data.get("clip_l_tokenizer_class", sd1_clip.SDTokenizer) - self.clip_l = clip_l_tokenizer_class(embedding_directory=embedding_directory) + self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) self.llama_template = """<|start_header_id|>system<|end_header_id|>\n\nDescribe the video by detailing the following aspects: 1. The main content and theme of the video.2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects.3. Actions, events, behaviors temporal relationships, physical movement changes of the objects.4. background environment, light, style and atmosphere.5. camera angles, movements, and transitions used in the video:<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>""" # 95 tokens - self.llama = LLAMA3Tokenizer(embedding_directory=embedding_directory, min_length=1) + self.llama = LLAMA3Tokenizer(embedding_directory=embedding_directory, min_length=1, tokenizer_data=tokenizer_data) def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, image_embeds=None, image_interleave=1, **kwargs): out = {} @@ -72,8 +77,7 @@ class HunyuanVideoClipModel(torch.nn.Module): def __init__(self, dtype_llama=None, device="cpu", dtype=None, model_options={}): super().__init__() dtype_llama = comfy.model_management.pick_weight_dtype(dtype_llama, dtype, device) - clip_l_class = model_options.get("clip_l_class", sd1_clip.SDClipModel) - self.clip_l = clip_l_class(device=device, dtype=dtype, return_projected_pooled=False, model_options=model_options) + self.clip_l = sd1_clip.SDClipModel(device=device, dtype=dtype, return_projected_pooled=False, model_options=model_options) self.llama = LLAMAModel(device=device, dtype=dtype_llama, model_options=model_options) self.dtypes = set([dtype, dtype_llama]) diff --git a/comfy/text_encoders/hydit.py b/comfy/text_encoders/hydit.py index 7da3e9fc5..e7273f425 100644 --- a/comfy/text_encoders/hydit.py +++ b/comfy/text_encoders/hydit.py @@ -9,24 +9,26 @@ import torch class HyditBertModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, model_options={}): textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "hydit_clip.json") + model_options = {**model_options, "model_name": "hydit_clip"} super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"start": 101, "end": 102, "pad": 0}, model_class=BertModel, enable_attention_masks=True, return_attention_masks=True, model_options=model_options) class HyditBertTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "hydit_clip_tokenizer") - super().__init__(tokenizer_path, pad_with_end=False, embedding_size=1024, embedding_key='chinese_roberta', tokenizer_class=BertTokenizer, pad_to_max_length=False, max_length=512, min_length=77) + super().__init__(tokenizer_path, pad_with_end=False, embedding_size=1024, embedding_key='chinese_roberta', tokenizer_class=BertTokenizer, pad_to_max_length=False, max_length=512, min_length=77, tokenizer_data=tokenizer_data) class MT5XLModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, model_options={}): textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mt5_config_xl.json") + model_options = {**model_options, "model_name": "mt5xl"} super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=True, return_attention_masks=True, model_options=model_options) class MT5XLTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): #tokenizer_path = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "mt5_tokenizer"), "spiece.model") tokenizer = tokenizer_data.get("spiece_model", None) - super().__init__(tokenizer, pad_with_end=False, embedding_size=2048, embedding_key='mt5xl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256) + super().__init__(tokenizer, pad_with_end=False, embedding_size=2048, embedding_key='mt5xl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256, tokenizer_data=tokenizer_data) def state_dict(self): return {"spiece_model": self.tokenizer.serialize_model()} @@ -35,7 +37,7 @@ class HyditTokenizer: def __init__(self, embedding_directory=None, tokenizer_data={}): mt5_tokenizer_data = tokenizer_data.get("mt5xl.spiece_model", None) self.hydit_clip = HyditBertTokenizer(embedding_directory=embedding_directory) - self.mt5xl = MT5XLTokenizer(tokenizer_data={"spiece_model": mt5_tokenizer_data}, embedding_directory=embedding_directory) + self.mt5xl = MT5XLTokenizer(tokenizer_data={**tokenizer_data, "spiece_model": mt5_tokenizer_data}, embedding_directory=embedding_directory) def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index 58710b2bf..34eb870e3 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -268,11 +268,17 @@ class Llama2_(nn.Module): optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None, small_input=True) intermediate = None + all_intermediate = None if intermediate_output is not None: - if intermediate_output < 0: + if intermediate_output == "all": + all_intermediate = [] + intermediate_output = None + elif intermediate_output < 0: intermediate_output = len(self.layers) + intermediate_output for i, layer in enumerate(self.layers): + if all_intermediate is not None: + all_intermediate.append(x.unsqueeze(1).clone()) x = layer( x=x, attention_mask=mask, @@ -283,6 +289,12 @@ class Llama2_(nn.Module): intermediate = x.clone() x = self.norm(x) + if all_intermediate is not None: + all_intermediate.append(x.unsqueeze(1).clone()) + + if all_intermediate is not None: + intermediate = torch.cat(all_intermediate, dim=1) + if intermediate is not None and final_layer_norm_intermediate: intermediate = self.norm(intermediate) diff --git a/comfy/text_encoders/long_clipl.py b/comfy/text_encoders/long_clipl.py index b81912cb3..f9483b427 100644 --- a/comfy/text_encoders/long_clipl.py +++ b/comfy/text_encoders/long_clipl.py @@ -1,30 +1,29 @@ from comfy import sd1_clip import os -class LongClipTokenizer_(sd1_clip.SDTokenizer): - def __init__(self, embedding_directory=None, tokenizer_data={}): - super().__init__(max_length=248, embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) - -class LongClipModel_(sd1_clip.SDClipModel): - def __init__(self, *args, **kwargs): - textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "long_clipl.json") - super().__init__(*args, textmodel_json_config=textmodel_json_config, **kwargs) - -class LongClipTokenizer(sd1_clip.SD1Tokenizer): - def __init__(self, embedding_directory=None, tokenizer_data={}): - super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, tokenizer=LongClipTokenizer_) - -class LongClipModel(sd1_clip.SD1ClipModel): - def __init__(self, device="cpu", dtype=None, model_options={}, **kwargs): - super().__init__(device=device, dtype=dtype, model_options=model_options, clip_model=LongClipModel_, **kwargs) def model_options_long_clip(sd, tokenizer_data, model_options): w = sd.get("clip_l.text_model.embeddings.position_embedding.weight", None) + if w is None: + w = sd.get("clip_g.text_model.embeddings.position_embedding.weight", None) + else: + model_name = "clip_g" + if w is None: w = sd.get("text_model.embeddings.position_embedding.weight", None) - if w is not None and w.shape[0] == 248: + if w is not None: + if "text_model.encoder.layers.30.mlp.fc1.weight" in sd: + model_name = "clip_g" + elif "text_model.encoder.layers.1.mlp.fc1.weight" in sd: + model_name = "clip_l" + else: + model_name = "clip_l" + + if w is not None: tokenizer_data = tokenizer_data.copy() model_options = model_options.copy() - tokenizer_data["clip_l_tokenizer_class"] = LongClipTokenizer_ - model_options["clip_l_class"] = LongClipModel_ + model_config = model_options.get("model_config", {}) + model_config["max_position_embeddings"] = w.shape[0] + model_options["{}_model_config".format(model_name)] = model_config + tokenizer_data["{}_max_length".format(model_name)] = w.shape[0] return tokenizer_data, model_options diff --git a/comfy/text_encoders/lt.py b/comfy/text_encoders/lt.py index 5c2ce583f..48ea67e67 100644 --- a/comfy/text_encoders/lt.py +++ b/comfy/text_encoders/lt.py @@ -6,7 +6,7 @@ import comfy.text_encoders.genmo class T5XXLTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") - super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=128) #pad to 128? + super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=128, tokenizer_data=tokenizer_data) #pad to 128? class LTXVT5Tokenizer(sd1_clip.SD1Tokenizer): diff --git a/comfy/text_encoders/lumina2.py b/comfy/text_encoders/lumina2.py index a7b1d702b..674461b75 100644 --- a/comfy/text_encoders/lumina2.py +++ b/comfy/text_encoders/lumina2.py @@ -6,7 +6,7 @@ import comfy.text_encoders.llama class Gemma2BTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer = tokenizer_data.get("spiece_model", None) - super().__init__(tokenizer, pad_with_end=False, embedding_size=2304, embedding_key='gemma2_2b', tokenizer_class=SPieceTokenizer, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_args={"add_bos": True, "add_eos": False}) + super().__init__(tokenizer, pad_with_end=False, embedding_size=2304, embedding_key='gemma2_2b', tokenizer_class=SPieceTokenizer, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_args={"add_bos": True, "add_eos": False}, tokenizer_data=tokenizer_data) def state_dict(self): return {"spiece_model": self.tokenizer.serialize_model()} diff --git a/comfy/text_encoders/pixart_t5.py b/comfy/text_encoders/pixart_t5.py index d56d57f1b..b8de6bc4e 100644 --- a/comfy/text_encoders/pixart_t5.py +++ b/comfy/text_encoders/pixart_t5.py @@ -24,7 +24,7 @@ class PixArtT5XXL(sd1_clip.SD1ClipModel): class T5XXLTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") - super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=1) # no padding + super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_data=tokenizer_data) # no padding class PixArtTokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): diff --git a/comfy/text_encoders/sa_t5.py b/comfy/text_encoders/sa_t5.py index 7778ce47a..2803926ac 100644 --- a/comfy/text_encoders/sa_t5.py +++ b/comfy/text_encoders/sa_t5.py @@ -11,7 +11,7 @@ class T5BaseModel(sd1_clip.SDClipModel): class T5BaseTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") - super().__init__(tokenizer_path, pad_with_end=False, embedding_size=768, embedding_key='t5base', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=128) + super().__init__(tokenizer_path, pad_with_end=False, embedding_size=768, embedding_key='t5base', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=128, tokenizer_data=tokenizer_data) class SAT5Tokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): diff --git a/comfy/text_encoders/sd2_clip.py b/comfy/text_encoders/sd2_clip.py index 31fc89869..700a23bf0 100644 --- a/comfy/text_encoders/sd2_clip.py +++ b/comfy/text_encoders/sd2_clip.py @@ -12,7 +12,7 @@ class SD2ClipHModel(sd1_clip.SDClipModel): class SD2ClipHTokenizer(sd1_clip.SDTokenizer): def __init__(self, tokenizer_path=None, embedding_directory=None, tokenizer_data={}): - super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024) + super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024, embedding_key='clip_h', tokenizer_data=tokenizer_data) class SD2Tokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): diff --git a/comfy/text_encoders/sd3_clip.py b/comfy/text_encoders/sd3_clip.py index 3ad2ed93a..1727998a8 100644 --- a/comfy/text_encoders/sd3_clip.py +++ b/comfy/text_encoders/sd3_clip.py @@ -15,6 +15,7 @@ class T5XXLModel(sd1_clip.SDClipModel): model_options = model_options.copy() model_options["scaled_fp8"] = t5xxl_scaled_fp8 + model_options = {**model_options, "model_name": "t5xxl"} super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) @@ -31,17 +32,16 @@ def t5_xxl_detect(state_dict, prefix=""): return out class T5XXLTokenizer(sd1_clip.SDTokenizer): - def __init__(self, embedding_directory=None, tokenizer_data={}): + def __init__(self, embedding_directory=None, tokenizer_data={}, min_length=77): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") - super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=77) + super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=min_length, tokenizer_data=tokenizer_data) class SD3Tokenizer: def __init__(self, embedding_directory=None, tokenizer_data={}): - clip_l_tokenizer_class = tokenizer_data.get("clip_l_tokenizer_class", sd1_clip.SDTokenizer) - self.clip_l = clip_l_tokenizer_class(embedding_directory=embedding_directory) - self.clip_g = sdxl_clip.SDXLClipGTokenizer(embedding_directory=embedding_directory) - self.t5xxl = T5XXLTokenizer(embedding_directory=embedding_directory) + self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + self.clip_g = sdxl_clip.SDXLClipGTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + self.t5xxl = T5XXLTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} @@ -61,8 +61,7 @@ class SD3ClipModel(torch.nn.Module): super().__init__() self.dtypes = set() if clip_l: - clip_l_class = model_options.get("clip_l_class", sd1_clip.SDClipModel) - self.clip_l = clip_l_class(layer="hidden", layer_idx=-2, device=device, dtype=dtype, layer_norm_hidden_state=False, return_projected_pooled=False, model_options=model_options) + self.clip_l = sd1_clip.SDClipModel(layer="hidden", layer_idx=-2, device=device, dtype=dtype, layer_norm_hidden_state=False, return_projected_pooled=False, model_options=model_options) self.dtypes.add(dtype) else: self.clip_l = None diff --git a/comfy/text_encoders/wan.py b/comfy/text_encoders/wan.py index 971ac8fa8..d50fa4b28 100644 --- a/comfy/text_encoders/wan.py +++ b/comfy/text_encoders/wan.py @@ -11,7 +11,7 @@ class UMT5XXlModel(sd1_clip.SDClipModel): class UMT5XXlTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer = tokenizer_data.get("spiece_model", None) - super().__init__(tokenizer, pad_with_end=False, embedding_size=4096, embedding_key='umt5xxl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=512, pad_token=0) + super().__init__(tokenizer, pad_with_end=False, embedding_size=4096, embedding_key='umt5xxl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=512, pad_token=0, tokenizer_data=tokenizer_data) def state_dict(self): return {"spiece_model": self.tokenizer.serialize_model()} From 6fc5dbd52ab70952020e6bc486c4d851a7ba6625 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 15 Apr 2025 12:13:28 -0400 Subject: [PATCH 0003/1073] Cleanup. --- comfy/rmsnorm.py | 11 ----------- comfy/text_encoders/long_clipl.py | 2 -- 2 files changed, 13 deletions(-) diff --git a/comfy/rmsnorm.py b/comfy/rmsnorm.py index 81b3e9062..77df44464 100644 --- a/comfy/rmsnorm.py +++ b/comfy/rmsnorm.py @@ -27,17 +27,6 @@ def rms_norm(x, weight=None, eps=1e-6): if RMSNorm is None: class RMSNorm(torch.nn.Module): - def __init__( - self, dim: int, elementwise_affine: bool = False, eps: float = 1e-6, device=None, dtype=None, **kwargs - ): - super().__init__() - self.eps = eps - self.learnable_scale = elementwise_affine - if self.learnable_scale: - self.weight = torch.nn.Parameter(torch.empty(dim, device=device, dtype=dtype)) - else: - self.register_parameter("weight", None) - def __init__( self, normalized_shape, diff --git a/comfy/text_encoders/long_clipl.py b/comfy/text_encoders/long_clipl.py index f9483b427..8d4c7619d 100644 --- a/comfy/text_encoders/long_clipl.py +++ b/comfy/text_encoders/long_clipl.py @@ -1,5 +1,3 @@ -from comfy import sd1_clip -import os def model_options_long_clip(sd, tokenizer_data, model_options): From 9ad792f92706e2179c58b2e5348164acafa69288 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 15 Apr 2025 17:35:05 -0400 Subject: [PATCH 0004/1073] Basic support for hidream i1 model. --- comfy/ldm/hidream/model.py | 828 +++++++++++++++++++++++++++++++++ comfy/model_base.py | 18 + comfy/model_detection.py | 19 + comfy/ops.py | 3 + comfy/sd.py | 4 + comfy/supported_models.py | 32 +- comfy/text_encoders/hidream.py | 150 ++++++ comfy_extras/nodes_hidream.py | 32 ++ nodes.py | 3 +- 9 files changed, 1087 insertions(+), 2 deletions(-) create mode 100644 comfy/ldm/hidream/model.py create mode 100644 comfy/text_encoders/hidream.py create mode 100644 comfy_extras/nodes_hidream.py diff --git a/comfy/ldm/hidream/model.py b/comfy/ldm/hidream/model.py new file mode 100644 index 000000000..de749a373 --- /dev/null +++ b/comfy/ldm/hidream/model.py @@ -0,0 +1,828 @@ +from typing import Optional, Tuple, List + +import torch +import torch.nn as nn +import einops +from einops import repeat + +from comfy.ldm.lightricks.model import TimestepEmbedding, Timesteps +import torch.nn.functional as F + +from comfy.ldm.flux.math import apply_rope +from comfy.ldm.modules.attention import optimized_attention +import comfy.model_management + +# Copied from https://github.com/black-forest-labs/flux/blob/main/src/flux/math.py +def rope(pos: torch.Tensor, dim: int, theta: int) -> torch.Tensor: + assert dim % 2 == 0, "The dimension must be even." + + scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim + omega = 1.0 / (theta**scale) + + batch_size, seq_length = pos.shape + out = torch.einsum("...n,d->...nd", pos, omega) + cos_out = torch.cos(out) + sin_out = torch.sin(out) + + stacked_out = torch.stack([cos_out, -sin_out, sin_out, cos_out], dim=-1) + out = stacked_out.view(batch_size, -1, dim // 2, 2, 2) + return out.float() + + +# Copied from https://github.com/black-forest-labs/flux/blob/main/src/flux/modules/layers.py +class EmbedND(nn.Module): + def __init__(self, theta: int, axes_dim: List[int]): + super().__init__() + self.theta = theta + self.axes_dim = axes_dim + + def forward(self, ids: torch.Tensor) -> torch.Tensor: + n_axes = ids.shape[-1] + emb = torch.cat( + [rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)], + dim=-3, + ) + return emb.unsqueeze(2) + + +class PatchEmbed(nn.Module): + def __init__( + self, + patch_size=2, + in_channels=4, + out_channels=1024, + dtype=None, device=None, operations=None + ): + super().__init__() + self.patch_size = patch_size + self.out_channels = out_channels + self.proj = operations.Linear(in_channels * patch_size * patch_size, out_channels, bias=True, dtype=dtype, device=device) + + def forward(self, latent): + latent = self.proj(latent) + return latent + + +class PooledEmbed(nn.Module): + def __init__(self, text_emb_dim, hidden_size, dtype=None, device=None, operations=None): + super().__init__() + self.pooled_embedder = TimestepEmbedding(in_channels=text_emb_dim, time_embed_dim=hidden_size, dtype=dtype, device=device, operations=operations) + + def forward(self, pooled_embed): + return self.pooled_embedder(pooled_embed) + + +class TimestepEmbed(nn.Module): + def __init__(self, hidden_size, frequency_embedding_size=256, dtype=None, device=None, operations=None): + super().__init__() + self.time_proj = Timesteps(num_channels=frequency_embedding_size, flip_sin_to_cos=True, downscale_freq_shift=0) + self.timestep_embedder = TimestepEmbedding(in_channels=frequency_embedding_size, time_embed_dim=hidden_size, dtype=dtype, device=device, operations=operations) + + def forward(self, timesteps, wdtype): + t_emb = self.time_proj(timesteps).to(dtype=wdtype) + t_emb = self.timestep_embedder(t_emb) + return t_emb + + +class OutEmbed(nn.Module): + def __init__(self, hidden_size, patch_size, out_channels, dtype=None, device=None, operations=None): + super().__init__() + self.norm_final = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) + self.linear = operations.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True, dtype=dtype, device=device) + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + operations.Linear(hidden_size, 2 * hidden_size, bias=True, dtype=dtype, device=device) + ) + + def forward(self, x, adaln_input): + shift, scale = self.adaLN_modulation(adaln_input).chunk(2, dim=1) + x = self.norm_final(x) * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) + x = self.linear(x) + return x + + +def attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor): + return optimized_attention(query.view(query.shape[0], -1, query.shape[-1] * query.shape[-2]), key.view(key.shape[0], -1, key.shape[-1] * key.shape[-2]), value.view(value.shape[0], -1, value.shape[-1] * value.shape[-2]), query.shape[2]) + + +class HiDreamAttnProcessor_flashattn: + """Attention processor used typically in processing the SD3-like self-attention projections.""" + + def __call__( + self, + attn, + image_tokens: torch.FloatTensor, + image_tokens_masks: Optional[torch.FloatTensor] = None, + text_tokens: Optional[torch.FloatTensor] = None, + rope: torch.FloatTensor = None, + *args, + **kwargs, + ) -> torch.FloatTensor: + dtype = image_tokens.dtype + batch_size = image_tokens.shape[0] + + query_i = attn.q_rms_norm(attn.to_q(image_tokens)).to(dtype=dtype) + key_i = attn.k_rms_norm(attn.to_k(image_tokens)).to(dtype=dtype) + value_i = attn.to_v(image_tokens) + + inner_dim = key_i.shape[-1] + head_dim = inner_dim // attn.heads + + query_i = query_i.view(batch_size, -1, attn.heads, head_dim) + key_i = key_i.view(batch_size, -1, attn.heads, head_dim) + value_i = value_i.view(batch_size, -1, attn.heads, head_dim) + if image_tokens_masks is not None: + key_i = key_i * image_tokens_masks.view(batch_size, -1, 1, 1) + + if not attn.single: + query_t = attn.q_rms_norm_t(attn.to_q_t(text_tokens)).to(dtype=dtype) + key_t = attn.k_rms_norm_t(attn.to_k_t(text_tokens)).to(dtype=dtype) + value_t = attn.to_v_t(text_tokens) + + query_t = query_t.view(batch_size, -1, attn.heads, head_dim) + key_t = key_t.view(batch_size, -1, attn.heads, head_dim) + value_t = value_t.view(batch_size, -1, attn.heads, head_dim) + + num_image_tokens = query_i.shape[1] + num_text_tokens = query_t.shape[1] + query = torch.cat([query_i, query_t], dim=1) + key = torch.cat([key_i, key_t], dim=1) + value = torch.cat([value_i, value_t], dim=1) + else: + query = query_i + key = key_i + value = value_i + + if query.shape[-1] == rope.shape[-3] * 2: + query, key = apply_rope(query, key, rope) + else: + query_1, query_2 = query.chunk(2, dim=-1) + key_1, key_2 = key.chunk(2, dim=-1) + query_1, key_1 = apply_rope(query_1, key_1, rope) + query = torch.cat([query_1, query_2], dim=-1) + key = torch.cat([key_1, key_2], dim=-1) + + hidden_states = attention(query, key, value) + + if not attn.single: + hidden_states_i, hidden_states_t = torch.split(hidden_states, [num_image_tokens, num_text_tokens], dim=1) + hidden_states_i = attn.to_out(hidden_states_i) + hidden_states_t = attn.to_out_t(hidden_states_t) + return hidden_states_i, hidden_states_t + else: + hidden_states = attn.to_out(hidden_states) + return hidden_states + +class HiDreamAttention(nn.Module): + def __init__( + self, + query_dim: int, + heads: int = 8, + dim_head: int = 64, + upcast_attention: bool = False, + upcast_softmax: bool = False, + scale_qk: bool = True, + eps: float = 1e-5, + processor = None, + out_dim: int = None, + single: bool = False, + dtype=None, device=None, operations=None + ): + # super(Attention, self).__init__() + super().__init__() + self.inner_dim = out_dim if out_dim is not None else dim_head * heads + self.query_dim = query_dim + self.upcast_attention = upcast_attention + self.upcast_softmax = upcast_softmax + self.out_dim = out_dim if out_dim is not None else query_dim + + self.scale_qk = scale_qk + self.scale = dim_head**-0.5 if self.scale_qk else 1.0 + + self.heads = out_dim // dim_head if out_dim is not None else heads + self.sliceable_head_dim = heads + self.single = single + + linear_cls = operations.Linear + self.linear_cls = linear_cls + self.to_q = linear_cls(query_dim, self.inner_dim, dtype=dtype, device=device) + self.to_k = linear_cls(self.inner_dim, self.inner_dim, dtype=dtype, device=device) + self.to_v = linear_cls(self.inner_dim, self.inner_dim, dtype=dtype, device=device) + self.to_out = linear_cls(self.inner_dim, self.out_dim, dtype=dtype, device=device) + self.q_rms_norm = operations.RMSNorm(self.inner_dim, eps, dtype=dtype, device=device) + self.k_rms_norm = operations.RMSNorm(self.inner_dim, eps, dtype=dtype, device=device) + + if not single: + self.to_q_t = linear_cls(query_dim, self.inner_dim, dtype=dtype, device=device) + self.to_k_t = linear_cls(self.inner_dim, self.inner_dim, dtype=dtype, device=device) + self.to_v_t = linear_cls(self.inner_dim, self.inner_dim, dtype=dtype, device=device) + self.to_out_t = linear_cls(self.inner_dim, self.out_dim, dtype=dtype, device=device) + self.q_rms_norm_t = operations.RMSNorm(self.inner_dim, eps, dtype=dtype, device=device) + self.k_rms_norm_t = operations.RMSNorm(self.inner_dim, eps, dtype=dtype, device=device) + + self.processor = processor + + def forward( + self, + norm_image_tokens: torch.FloatTensor, + image_tokens_masks: torch.FloatTensor = None, + norm_text_tokens: torch.FloatTensor = None, + rope: torch.FloatTensor = None, + ) -> torch.Tensor: + return self.processor( + self, + image_tokens = norm_image_tokens, + image_tokens_masks = image_tokens_masks, + text_tokens = norm_text_tokens, + rope = rope, + ) + + +class FeedForwardSwiGLU(nn.Module): + def __init__( + self, + dim: int, + hidden_dim: int, + multiple_of: int = 256, + ffn_dim_multiplier: Optional[float] = None, + dtype=None, device=None, operations=None + ): + super().__init__() + hidden_dim = int(2 * hidden_dim / 3) + # custom dim factor multiplier + if ffn_dim_multiplier is not None: + hidden_dim = int(ffn_dim_multiplier * hidden_dim) + hidden_dim = multiple_of * ( + (hidden_dim + multiple_of - 1) // multiple_of + ) + + self.w1 = operations.Linear(dim, hidden_dim, bias=False, dtype=dtype, device=device) + self.w2 = operations.Linear(hidden_dim, dim, bias=False, dtype=dtype, device=device) + self.w3 = operations.Linear(dim, hidden_dim, bias=False, dtype=dtype, device=device) + + def forward(self, x): + return self.w2(torch.nn.functional.silu(self.w1(x)) * self.w3(x)) + + +# Modified from https://github.com/deepseek-ai/DeepSeek-V3/blob/main/inference/model.py +class MoEGate(nn.Module): + def __init__(self, embed_dim, num_routed_experts=4, num_activated_experts=2, aux_loss_alpha=0.01, dtype=None, device=None, operations=None): + super().__init__() + self.top_k = num_activated_experts + self.n_routed_experts = num_routed_experts + + self.scoring_func = 'softmax' + self.alpha = aux_loss_alpha + self.seq_aux = False + + # topk selection algorithm + self.norm_topk_prob = False + self.gating_dim = embed_dim + self.weight = nn.Parameter(torch.empty((self.n_routed_experts, self.gating_dim), dtype=dtype, device=device)) + self.reset_parameters() + + def reset_parameters(self) -> None: + pass + # import torch.nn.init as init + # init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + + def forward(self, hidden_states): + bsz, seq_len, h = hidden_states.shape + + ### compute gating score + hidden_states = hidden_states.view(-1, h) + logits = F.linear(hidden_states, comfy.model_management.cast_to(self.weight, dtype=hidden_states.dtype, device=hidden_states.device), None) + if self.scoring_func == 'softmax': + scores = logits.softmax(dim=-1) + else: + raise NotImplementedError(f'insupportable scoring function for MoE gating: {self.scoring_func}') + + ### select top-k experts + topk_weight, topk_idx = torch.topk(scores, k=self.top_k, dim=-1, sorted=False) + + ### norm gate to sum 1 + if self.top_k > 1 and self.norm_topk_prob: + denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20 + topk_weight = topk_weight / denominator + + aux_loss = None + return topk_idx, topk_weight, aux_loss + + +# Modified from https://github.com/deepseek-ai/DeepSeek-V3/blob/main/inference/model.py +class MOEFeedForwardSwiGLU(nn.Module): + def __init__( + self, + dim: int, + hidden_dim: int, + num_routed_experts: int, + num_activated_experts: int, + dtype=None, device=None, operations=None + ): + super().__init__() + self.shared_experts = FeedForwardSwiGLU(dim, hidden_dim // 2, dtype=dtype, device=device, operations=operations) + self.experts = nn.ModuleList([FeedForwardSwiGLU(dim, hidden_dim, dtype=dtype, device=device, operations=operations) for i in range(num_routed_experts)]) + self.gate = MoEGate( + embed_dim = dim, + num_routed_experts = num_routed_experts, + num_activated_experts = num_activated_experts, + dtype=dtype, device=device, operations=operations + ) + self.num_activated_experts = num_activated_experts + + def forward(self, x): + wtype = x.dtype + identity = x + orig_shape = x.shape + topk_idx, topk_weight, aux_loss = self.gate(x) + x = x.view(-1, x.shape[-1]) + flat_topk_idx = topk_idx.view(-1) + if True: # self.training: # TODO: check which branch performs faster + x = x.repeat_interleave(self.num_activated_experts, dim=0) + y = torch.empty_like(x, dtype=wtype) + for i, expert in enumerate(self.experts): + y[flat_topk_idx == i] = expert(x[flat_topk_idx == i]).to(dtype=wtype) + y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1) + y = y.view(*orig_shape).to(dtype=wtype) + #y = AddAuxiliaryLoss.apply(y, aux_loss) + else: + y = self.moe_infer(x, flat_topk_idx, topk_weight.view(-1, 1)).view(*orig_shape) + y = y + self.shared_experts(identity) + return y + + @torch.no_grad() + def moe_infer(self, x, flat_expert_indices, flat_expert_weights): + expert_cache = torch.zeros_like(x) + idxs = flat_expert_indices.argsort() + tokens_per_expert = flat_expert_indices.bincount().cpu().numpy().cumsum(0) + token_idxs = idxs // self.num_activated_experts + for i, end_idx in enumerate(tokens_per_expert): + start_idx = 0 if i == 0 else tokens_per_expert[i-1] + if start_idx == end_idx: + continue + expert = self.experts[i] + exp_token_idx = token_idxs[start_idx:end_idx] + expert_tokens = x[exp_token_idx] + expert_out = expert(expert_tokens) + expert_out.mul_(flat_expert_weights[idxs[start_idx:end_idx]]) + + # for fp16 and other dtype + expert_cache = expert_cache.to(expert_out.dtype) + expert_cache.scatter_reduce_(0, exp_token_idx.view(-1, 1).repeat(1, x.shape[-1]), expert_out, reduce='sum') + return expert_cache + + +class TextProjection(nn.Module): + def __init__(self, in_features, hidden_size, dtype=None, device=None, operations=None): + super().__init__() + self.linear = operations.Linear(in_features=in_features, out_features=hidden_size, bias=False, dtype=dtype, device=device) + + def forward(self, caption): + hidden_states = self.linear(caption) + return hidden_states + + +class BlockType: + TransformerBlock = 1 + SingleTransformerBlock = 2 + + +class HiDreamImageSingleTransformerBlock(nn.Module): + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + num_routed_experts: int = 4, + num_activated_experts: int = 2, + dtype=None, device=None, operations=None + ): + super().__init__() + self.num_attention_heads = num_attention_heads + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + operations.Linear(dim, 6 * dim, bias=True, dtype=dtype, device=device) + ) + + # 1. Attention + self.norm1_i = operations.LayerNorm(dim, eps = 1e-06, elementwise_affine = False, dtype=dtype, device=device) + self.attn1 = HiDreamAttention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + processor = HiDreamAttnProcessor_flashattn(), + single = True, + dtype=dtype, device=device, operations=operations + ) + + # 3. Feed-forward + self.norm3_i = operations.LayerNorm(dim, eps = 1e-06, elementwise_affine = False, dtype=dtype, device=device) + if num_routed_experts > 0: + self.ff_i = MOEFeedForwardSwiGLU( + dim = dim, + hidden_dim = 4 * dim, + num_routed_experts = num_routed_experts, + num_activated_experts = num_activated_experts, + dtype=dtype, device=device, operations=operations + ) + else: + self.ff_i = FeedForwardSwiGLU(dim = dim, hidden_dim = 4 * dim, dtype=dtype, device=device, operations=operations) + + def forward( + self, + image_tokens: torch.FloatTensor, + image_tokens_masks: Optional[torch.FloatTensor] = None, + text_tokens: Optional[torch.FloatTensor] = None, + adaln_input: Optional[torch.FloatTensor] = None, + rope: torch.FloatTensor = None, + + ) -> torch.FloatTensor: + wtype = image_tokens.dtype + shift_msa_i, scale_msa_i, gate_msa_i, shift_mlp_i, scale_mlp_i, gate_mlp_i = \ + self.adaLN_modulation(adaln_input)[:,None].chunk(6, dim=-1) + + # 1. MM-Attention + norm_image_tokens = self.norm1_i(image_tokens).to(dtype=wtype) + norm_image_tokens = norm_image_tokens * (1 + scale_msa_i) + shift_msa_i + attn_output_i = self.attn1( + norm_image_tokens, + image_tokens_masks, + rope = rope, + ) + image_tokens = gate_msa_i * attn_output_i + image_tokens + + # 2. Feed-forward + norm_image_tokens = self.norm3_i(image_tokens).to(dtype=wtype) + norm_image_tokens = norm_image_tokens * (1 + scale_mlp_i) + shift_mlp_i + ff_output_i = gate_mlp_i * self.ff_i(norm_image_tokens.to(dtype=wtype)) + image_tokens = ff_output_i + image_tokens + return image_tokens + + +class HiDreamImageTransformerBlock(nn.Module): + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + num_routed_experts: int = 4, + num_activated_experts: int = 2, + dtype=None, device=None, operations=None + ): + super().__init__() + self.num_attention_heads = num_attention_heads + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + operations.Linear(dim, 12 * dim, bias=True, dtype=dtype, device=device) + ) + # nn.init.zeros_(self.adaLN_modulation[1].weight) + # nn.init.zeros_(self.adaLN_modulation[1].bias) + + # 1. Attention + self.norm1_i = operations.LayerNorm(dim, eps = 1e-06, elementwise_affine = False, dtype=dtype, device=device) + self.norm1_t = operations.LayerNorm(dim, eps = 1e-06, elementwise_affine = False, dtype=dtype, device=device) + self.attn1 = HiDreamAttention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + processor = HiDreamAttnProcessor_flashattn(), + single = False, + dtype=dtype, device=device, operations=operations + ) + + # 3. Feed-forward + self.norm3_i = operations.LayerNorm(dim, eps = 1e-06, elementwise_affine = False, dtype=dtype, device=device) + if num_routed_experts > 0: + self.ff_i = MOEFeedForwardSwiGLU( + dim = dim, + hidden_dim = 4 * dim, + num_routed_experts = num_routed_experts, + num_activated_experts = num_activated_experts, + dtype=dtype, device=device, operations=operations + ) + else: + self.ff_i = FeedForwardSwiGLU(dim = dim, hidden_dim = 4 * dim, dtype=dtype, device=device, operations=operations) + self.norm3_t = operations.LayerNorm(dim, eps = 1e-06, elementwise_affine = False) + self.ff_t = FeedForwardSwiGLU(dim = dim, hidden_dim = 4 * dim, dtype=dtype, device=device, operations=operations) + + def forward( + self, + image_tokens: torch.FloatTensor, + image_tokens_masks: Optional[torch.FloatTensor] = None, + text_tokens: Optional[torch.FloatTensor] = None, + adaln_input: Optional[torch.FloatTensor] = None, + rope: torch.FloatTensor = None, + ) -> torch.FloatTensor: + wtype = image_tokens.dtype + shift_msa_i, scale_msa_i, gate_msa_i, shift_mlp_i, scale_mlp_i, gate_mlp_i, \ + shift_msa_t, scale_msa_t, gate_msa_t, shift_mlp_t, scale_mlp_t, gate_mlp_t = \ + self.adaLN_modulation(adaln_input)[:,None].chunk(12, dim=-1) + + # 1. MM-Attention + norm_image_tokens = self.norm1_i(image_tokens).to(dtype=wtype) + norm_image_tokens = norm_image_tokens * (1 + scale_msa_i) + shift_msa_i + norm_text_tokens = self.norm1_t(text_tokens).to(dtype=wtype) + norm_text_tokens = norm_text_tokens * (1 + scale_msa_t) + shift_msa_t + + attn_output_i, attn_output_t = self.attn1( + norm_image_tokens, + image_tokens_masks, + norm_text_tokens, + rope = rope, + ) + + image_tokens = gate_msa_i * attn_output_i + image_tokens + text_tokens = gate_msa_t * attn_output_t + text_tokens + + # 2. Feed-forward + norm_image_tokens = self.norm3_i(image_tokens).to(dtype=wtype) + norm_image_tokens = norm_image_tokens * (1 + scale_mlp_i) + shift_mlp_i + norm_text_tokens = self.norm3_t(text_tokens).to(dtype=wtype) + norm_text_tokens = norm_text_tokens * (1 + scale_mlp_t) + shift_mlp_t + + ff_output_i = gate_mlp_i * self.ff_i(norm_image_tokens) + ff_output_t = gate_mlp_t * self.ff_t(norm_text_tokens) + image_tokens = ff_output_i + image_tokens + text_tokens = ff_output_t + text_tokens + return image_tokens, text_tokens + + +class HiDreamImageBlock(nn.Module): + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + num_routed_experts: int = 4, + num_activated_experts: int = 2, + block_type: BlockType = BlockType.TransformerBlock, + dtype=None, device=None, operations=None + ): + super().__init__() + block_classes = { + BlockType.TransformerBlock: HiDreamImageTransformerBlock, + BlockType.SingleTransformerBlock: HiDreamImageSingleTransformerBlock, + } + self.block = block_classes[block_type]( + dim, + num_attention_heads, + attention_head_dim, + num_routed_experts, + num_activated_experts, + dtype=dtype, device=device, operations=operations + ) + + def forward( + self, + image_tokens: torch.FloatTensor, + image_tokens_masks: Optional[torch.FloatTensor] = None, + text_tokens: Optional[torch.FloatTensor] = None, + adaln_input: torch.FloatTensor = None, + rope: torch.FloatTensor = None, + ) -> torch.FloatTensor: + return self.block( + image_tokens, + image_tokens_masks, + text_tokens, + adaln_input, + rope, + ) + + +class HiDreamImageTransformer2DModel(nn.Module): + def __init__( + self, + patch_size: Optional[int] = None, + in_channels: int = 64, + out_channels: Optional[int] = None, + num_layers: int = 16, + num_single_layers: int = 32, + attention_head_dim: int = 128, + num_attention_heads: int = 20, + caption_channels: List[int] = None, + text_emb_dim: int = 2048, + num_routed_experts: int = 4, + num_activated_experts: int = 2, + axes_dims_rope: Tuple[int, int] = (32, 32), + max_resolution: Tuple[int, int] = (128, 128), + llama_layers: List[int] = None, + image_model=None, + dtype=None, device=None, operations=None + ): + self.patch_size = patch_size + self.num_attention_heads = num_attention_heads + self.attention_head_dim = attention_head_dim + self.num_layers = num_layers + self.num_single_layers = num_single_layers + + self.gradient_checkpointing = False + + super().__init__() + self.dtype = dtype + self.out_channels = out_channels or in_channels + self.inner_dim = self.num_attention_heads * self.attention_head_dim + self.llama_layers = llama_layers + + self.t_embedder = TimestepEmbed(self.inner_dim, dtype=dtype, device=device, operations=operations) + self.p_embedder = PooledEmbed(text_emb_dim, self.inner_dim, dtype=dtype, device=device, operations=operations) + self.x_embedder = PatchEmbed( + patch_size = patch_size, + in_channels = in_channels, + out_channels = self.inner_dim, + dtype=dtype, device=device, operations=operations + ) + self.pe_embedder = EmbedND(theta=10000, axes_dim=axes_dims_rope) + + self.double_stream_blocks = nn.ModuleList( + [ + HiDreamImageBlock( + dim = self.inner_dim, + num_attention_heads = self.num_attention_heads, + attention_head_dim = self.attention_head_dim, + num_routed_experts = num_routed_experts, + num_activated_experts = num_activated_experts, + block_type = BlockType.TransformerBlock, + dtype=dtype, device=device, operations=operations + ) + for i in range(self.num_layers) + ] + ) + + self.single_stream_blocks = nn.ModuleList( + [ + HiDreamImageBlock( + dim = self.inner_dim, + num_attention_heads = self.num_attention_heads, + attention_head_dim = self.attention_head_dim, + num_routed_experts = num_routed_experts, + num_activated_experts = num_activated_experts, + block_type = BlockType.SingleTransformerBlock, + dtype=dtype, device=device, operations=operations + ) + for i in range(self.num_single_layers) + ] + ) + + self.final_layer = OutEmbed(self.inner_dim, patch_size, self.out_channels, dtype=dtype, device=device, operations=operations) + + caption_channels = [caption_channels[1], ] * (num_layers + num_single_layers) + [caption_channels[0], ] + caption_projection = [] + for caption_channel in caption_channels: + caption_projection.append(TextProjection(in_features=caption_channel, hidden_size=self.inner_dim, dtype=dtype, device=device, operations=operations)) + self.caption_projection = nn.ModuleList(caption_projection) + self.max_seq = max_resolution[0] * max_resolution[1] // (patch_size * patch_size) + + def expand_timesteps(self, timesteps, batch_size, device): + if not torch.is_tensor(timesteps): + is_mps = device.type == "mps" + if isinstance(timesteps, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(device) + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(batch_size) + return timesteps + + def unpatchify(self, x: torch.Tensor, img_sizes: List[Tuple[int, int]]) -> List[torch.Tensor]: + x_arr = [] + for i, img_size in enumerate(img_sizes): + pH, pW = img_size + x_arr.append( + einops.rearrange(x[i, :pH*pW].reshape(1, pH, pW, -1), 'B H W (p1 p2 C) -> B C (H p1) (W p2)', + p1=self.patch_size, p2=self.patch_size) + ) + x = torch.cat(x_arr, dim=0) + return x + + def patchify(self, x, max_seq, img_sizes=None): + pz2 = self.patch_size * self.patch_size + if isinstance(x, torch.Tensor): + B = x.shape[0] + device = x.device + dtype = x.dtype + else: + B = len(x) + device = x[0].device + dtype = x[0].dtype + x_masks = torch.zeros((B, max_seq), dtype=dtype, device=device) + + if img_sizes is not None: + for i, img_size in enumerate(img_sizes): + x_masks[i, 0:img_size[0] * img_size[1]] = 1 + x = einops.rearrange(x, 'B C S p -> B S (p C)', p=pz2) + elif isinstance(x, torch.Tensor): + pH, pW = x.shape[-2] // self.patch_size, x.shape[-1] // self.patch_size + x = einops.rearrange(x, 'B C (H p1) (W p2) -> B (H W) (p1 p2 C)', p1=self.patch_size, p2=self.patch_size) + img_sizes = [[pH, pW]] * B + x_masks = None + else: + raise NotImplementedError + return x, x_masks, img_sizes + + def forward( + self, + x: torch.Tensor, + t: torch.Tensor, + y: Optional[torch.Tensor] = None, + context: Optional[torch.Tensor] = None, + encoder_hidden_states_llama3=None, + control = None, + transformer_options = {}, + ) -> torch.Tensor: + hidden_states = x + timesteps = t + pooled_embeds = y + T5_encoder_hidden_states = context + + img_sizes = None + + # spatial forward + batch_size = hidden_states.shape[0] + hidden_states_type = hidden_states.dtype + + # 0. time + timesteps = self.expand_timesteps(timesteps, batch_size, hidden_states.device) + timesteps = self.t_embedder(timesteps, hidden_states_type) + p_embedder = self.p_embedder(pooled_embeds) + adaln_input = timesteps + p_embedder + + hidden_states, image_tokens_masks, img_sizes = self.patchify(hidden_states, self.max_seq, img_sizes) + if image_tokens_masks is None: + pH, pW = img_sizes[0] + img_ids = torch.zeros(pH, pW, 3, device=hidden_states.device) + img_ids[..., 1] = img_ids[..., 1] + torch.arange(pH, device=hidden_states.device)[:, None] + img_ids[..., 2] = img_ids[..., 2] + torch.arange(pW, device=hidden_states.device)[None, :] + img_ids = repeat(img_ids, "h w c -> b (h w) c", b=batch_size) + hidden_states = self.x_embedder(hidden_states) + + # T5_encoder_hidden_states = encoder_hidden_states[0] + encoder_hidden_states = encoder_hidden_states_llama3.movedim(1, 0) + encoder_hidden_states = [encoder_hidden_states[k] for k in self.llama_layers] + + if self.caption_projection is not None: + new_encoder_hidden_states = [] + for i, enc_hidden_state in enumerate(encoder_hidden_states): + enc_hidden_state = self.caption_projection[i](enc_hidden_state) + enc_hidden_state = enc_hidden_state.view(batch_size, -1, hidden_states.shape[-1]) + new_encoder_hidden_states.append(enc_hidden_state) + encoder_hidden_states = new_encoder_hidden_states + T5_encoder_hidden_states = self.caption_projection[-1](T5_encoder_hidden_states) + T5_encoder_hidden_states = T5_encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) + encoder_hidden_states.append(T5_encoder_hidden_states) + + txt_ids = torch.zeros( + batch_size, + encoder_hidden_states[-1].shape[1] + encoder_hidden_states[-2].shape[1] + encoder_hidden_states[0].shape[1], + 3, + device=img_ids.device, dtype=img_ids.dtype + ) + ids = torch.cat((img_ids, txt_ids), dim=1) + rope = self.pe_embedder(ids) + + # 2. Blocks + block_id = 0 + initial_encoder_hidden_states = torch.cat([encoder_hidden_states[-1], encoder_hidden_states[-2]], dim=1) + initial_encoder_hidden_states_seq_len = initial_encoder_hidden_states.shape[1] + for bid, block in enumerate(self.double_stream_blocks): + cur_llama31_encoder_hidden_states = encoder_hidden_states[block_id] + cur_encoder_hidden_states = torch.cat([initial_encoder_hidden_states, cur_llama31_encoder_hidden_states], dim=1) + hidden_states, initial_encoder_hidden_states = block( + image_tokens = hidden_states, + image_tokens_masks = image_tokens_masks, + text_tokens = cur_encoder_hidden_states, + adaln_input = adaln_input, + rope = rope, + ) + initial_encoder_hidden_states = initial_encoder_hidden_states[:, :initial_encoder_hidden_states_seq_len] + block_id += 1 + + image_tokens_seq_len = hidden_states.shape[1] + hidden_states = torch.cat([hidden_states, initial_encoder_hidden_states], dim=1) + hidden_states_seq_len = hidden_states.shape[1] + if image_tokens_masks is not None: + encoder_attention_mask_ones = torch.ones( + (batch_size, initial_encoder_hidden_states.shape[1] + cur_llama31_encoder_hidden_states.shape[1]), + device=image_tokens_masks.device, dtype=image_tokens_masks.dtype + ) + image_tokens_masks = torch.cat([image_tokens_masks, encoder_attention_mask_ones], dim=1) + + for bid, block in enumerate(self.single_stream_blocks): + cur_llama31_encoder_hidden_states = encoder_hidden_states[block_id] + hidden_states = torch.cat([hidden_states, cur_llama31_encoder_hidden_states], dim=1) + hidden_states = block( + image_tokens=hidden_states, + image_tokens_masks=image_tokens_masks, + text_tokens=None, + adaln_input=adaln_input, + rope=rope, + ) + hidden_states = hidden_states[:, :hidden_states_seq_len] + block_id += 1 + + hidden_states = hidden_states[:, :image_tokens_seq_len, ...] + output = self.final_layer(hidden_states, adaln_input) + output = self.unpatchify(output, img_sizes) + return -output diff --git a/comfy/model_base.py b/comfy/model_base.py index 6bc627ae3..8dab1740b 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -37,6 +37,7 @@ import comfy.ldm.cosmos.model import comfy.ldm.lumina.model import comfy.ldm.wan.model import comfy.ldm.hunyuan3d.model +import comfy.ldm.hidream.model import comfy.model_management import comfy.patcher_extension @@ -1056,3 +1057,20 @@ class Hunyuan3Dv2(BaseModel): if guidance is not None: out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance])) return out + +class HiDream(BaseModel): + def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.hidream.model.HiDreamImageTransformer2DModel) + + def encode_adm(self, **kwargs): + return kwargs["pooled_output"] + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + conditioning_llama3 = kwargs.get("conditioning_llama3", None) + if conditioning_llama3 is not None: + out['encoder_hidden_states_llama3'] = comfy.conds.CONDRegular(conditioning_llama3) + return out diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 4217f5831..a4da1afcd 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -338,6 +338,25 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["guidance_embed"] = "{}guidance_in.in_layer.weight".format(key_prefix) in state_dict_keys return dit_config + if '{}caption_projection.0.linear.weight'.format(key_prefix) in state_dict_keys: # HiDream + dit_config = {} + dit_config["image_model"] = "hidream" + dit_config["attention_head_dim"] = 128 + dit_config["axes_dims_rope"] = [64, 32, 32] + dit_config["caption_channels"] = [4096, 4096] + dit_config["max_resolution"] = [128, 128] + dit_config["in_channels"] = 16 + dit_config["llama_layers"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31] + dit_config["num_attention_heads"] = 20 + dit_config["num_routed_experts"] = 4 + dit_config["num_activated_experts"] = 2 + dit_config["num_layers"] = 16 + dit_config["num_single_layers"] = 32 + dit_config["out_channels"] = 16 + dit_config["patch_size"] = 2 + dit_config["text_emb_dim"] = 2048 + return dit_config + if '{}input_blocks.0.0.weight'.format(key_prefix) not in state_dict_keys: return None diff --git a/comfy/ops.py b/comfy/ops.py index 6b0e29307..aae6cafac 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -263,6 +263,9 @@ class manual_cast(disable_weight_init): class ConvTranspose1d(disable_weight_init.ConvTranspose1d): comfy_cast_weights = True + class RMSNorm(disable_weight_init.RMSNorm): + comfy_cast_weights = True + class Embedding(disable_weight_init.Embedding): comfy_cast_weights = True diff --git a/comfy/sd.py b/comfy/sd.py index 4d3aef3e1..d97873ba2 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -41,6 +41,7 @@ import comfy.text_encoders.hunyuan_video import comfy.text_encoders.cosmos import comfy.text_encoders.lumina2 import comfy.text_encoders.wan +import comfy.text_encoders.hidream import comfy.model_patcher import comfy.lora @@ -853,6 +854,9 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip elif len(clip_data) == 3: clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(**t5xxl_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer + elif len(clip_data) == 4: + clip_target.clip = comfy.text_encoders.hidream.hidream_clip(**t5xxl_detect(clip_data), **llama_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer parameters = 0 for c in clip_data: diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 2a6a61560..81c47ac68 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1025,6 +1025,36 @@ class Hunyuan3Dv2mini(Hunyuan3Dv2): latent_format = latent_formats.Hunyuan3Dv2mini -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, Hunyuan3Dv2mini, Hunyuan3Dv2] +class HiDream(supported_models_base.BASE): + unet_config = { + "image_model": "hidream", + } + + sampling_settings = { + "shift": 3.0, + } + + sampling_settings = { + } + + # memory_usage_factor = 1.2 # TODO + + unet_extra_config = {} + latent_format = latent_formats.Flux + + supported_inference_dtypes = [torch.bfloat16, torch.float32] + + vae_key_prefix = ["vae."] + text_encoder_key_prefix = ["text_encoders."] + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.HiDream(self, device=device) + return out + + def clip_target(self, state_dict={}): + return None # TODO + + +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream] models += [SVD_img2vid] diff --git a/comfy/text_encoders/hidream.py b/comfy/text_encoders/hidream.py new file mode 100644 index 000000000..af105f9bb --- /dev/null +++ b/comfy/text_encoders/hidream.py @@ -0,0 +1,150 @@ +from . import hunyuan_video +from . import sd3_clip +from comfy import sd1_clip +from comfy import sdxl_clip +import comfy.model_management +import torch +import logging + + +class HiDreamTokenizer: + def __init__(self, embedding_directory=None, tokenizer_data={}): + self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + self.clip_g = sdxl_clip.SDXLClipGTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + self.t5xxl = sd3_clip.T5XXLTokenizer(embedding_directory=embedding_directory, min_length=128, tokenizer_data=tokenizer_data) + self.llama = hunyuan_video.LLAMA3Tokenizer(embedding_directory=embedding_directory, min_length=128, pad_token=128009, tokenizer_data=tokenizer_data) + + def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): + out = {} + out["g"] = self.clip_g.tokenize_with_weights(text, return_word_ids) + out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids) + out["t5xxl"] = self.t5xxl.tokenize_with_weights(text, return_word_ids) + out["llama"] = self.llama.tokenize_with_weights(text, return_word_ids) + return out + + def untokenize(self, token_weight_pair): + return self.clip_g.untokenize(token_weight_pair) + + def state_dict(self): + return {} + + +class HiDreamTEModel(torch.nn.Module): + def __init__(self, clip_l=True, clip_g=True, t5=True, llama=True, dtype_t5=None, dtype_llama=None, device="cpu", dtype=None, model_options={}): + super().__init__() + self.dtypes = set() + if clip_l: + self.clip_l = sd1_clip.SDClipModel(device=device, dtype=dtype, return_projected_pooled=True, model_options=model_options) + self.dtypes.add(dtype) + else: + self.clip_l = None + + if clip_g: + self.clip_g = sdxl_clip.SDXLClipG(device=device, dtype=dtype, model_options=model_options) + self.dtypes.add(dtype) + else: + self.clip_g = None + + if t5: + dtype_t5 = comfy.model_management.pick_weight_dtype(dtype_t5, dtype, device) + self.t5xxl = sd3_clip.T5XXLModel(device=device, dtype=dtype_t5, model_options=model_options, attention_mask=True) + self.dtypes.add(dtype_t5) + else: + self.t5xxl = None + + if llama: + dtype_llama = comfy.model_management.pick_weight_dtype(dtype_llama, dtype, device) + if "vocab_size" not in model_options: + model_options["vocab_size"] = 128256 + self.llama = hunyuan_video.LLAMAModel(device=device, dtype=dtype_llama, model_options=model_options, layer="all", layer_idx=None, special_tokens={"start": 128000, "pad": 128009}) + self.dtypes.add(dtype_llama) + else: + self.llama = None + + logging.debug("Created HiDream text encoder with: clip_l {}, clip_g {}, t5xxl {}:{}, llama {}:{}".format(clip_l, clip_g, t5, dtype_t5, llama, dtype_llama)) + + def set_clip_options(self, options): + if self.clip_l is not None: + self.clip_l.set_clip_options(options) + if self.clip_g is not None: + self.clip_g.set_clip_options(options) + if self.t5xxl is not None: + self.t5xxl.set_clip_options(options) + if self.llama is not None: + self.llama.set_clip_options(options) + + def reset_clip_options(self): + if self.clip_l is not None: + self.clip_l.reset_clip_options() + if self.clip_g is not None: + self.clip_g.reset_clip_options() + if self.t5xxl is not None: + self.t5xxl.reset_clip_options() + if self.llama is not None: + self.llama.reset_clip_options() + + def encode_token_weights(self, token_weight_pairs): + token_weight_pairs_l = token_weight_pairs["l"] + token_weight_pairs_g = token_weight_pairs["g"] + token_weight_pairs_t5 = token_weight_pairs["t5xxl"] + token_weight_pairs_llama = token_weight_pairs["llama"] + lg_out = None + pooled = None + extra = {} + + if len(token_weight_pairs_g) > 0 or len(token_weight_pairs_l) > 0: + if self.clip_l is not None: + lg_out, l_pooled = self.clip_l.encode_token_weights(token_weight_pairs_l) + else: + l_pooled = torch.zeros((1, 768), device=comfy.model_management.intermediate_device()) + + if self.clip_g is not None: + g_out, g_pooled = self.clip_g.encode_token_weights(token_weight_pairs_g) + else: + g_pooled = torch.zeros((1, 1280), device=comfy.model_management.intermediate_device()) + + pooled = torch.cat((l_pooled, g_pooled), dim=-1) + + if self.t5xxl is not None: + t5_output = self.t5xxl.encode_token_weights(token_weight_pairs_t5) + t5_out, t5_pooled = t5_output[:2] + + if self.llama is not None: + ll_output = self.llama.encode_token_weights(token_weight_pairs_llama) + ll_out, ll_pooled = ll_output[:2] + ll_out = ll_out[:, 1:] + + if t5_out is None: + t5_out = torch.zeros((1, 1, 4096), device=comfy.model_management.intermediate_device()) + + if ll_out is None: + ll_out = torch.zeros((1, 32, 1, 4096), device=comfy.model_management.intermediate_device()) + + if pooled is None: + pooled = torch.zeros((1, 768 + 1280), device=comfy.model_management.intermediate_device()) + + extra["conditioning_llama3"] = ll_out + return t5_out, pooled, extra + + def load_sd(self, sd): + if "text_model.encoder.layers.30.mlp.fc1.weight" in sd: + return self.clip_g.load_sd(sd) + elif "text_model.encoder.layers.1.mlp.fc1.weight" in sd: + return self.clip_l.load_sd(sd) + elif "encoder.block.23.layer.1.DenseReluDense.wi_1.weight" in sd: + return self.t5xxl.load_sd(sd) + else: + return self.llama.load_sd(sd) + + +def hidream_clip(clip_l=True, clip_g=True, t5=True, llama=True, dtype_t5=None, dtype_llama=None, t5xxl_scaled_fp8=None, llama_scaled_fp8=None): + class HiDreamTEModel_(HiDreamTEModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options: + model_options = model_options.copy() + model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8 + if llama_scaled_fp8 is not None and "llama_scaled_fp8" not in model_options: + model_options = model_options.copy() + model_options["llama_scaled_fp8"] = llama_scaled_fp8 + super().__init__(clip_l=clip_l, clip_g=clip_g, t5=t5, llama=llama, dtype_t5=dtype_t5, dtype_llama=dtype_llama, device=device, dtype=dtype, model_options=model_options) + return HiDreamTEModel_ diff --git a/comfy_extras/nodes_hidream.py b/comfy_extras/nodes_hidream.py new file mode 100644 index 000000000..5a160c2ba --- /dev/null +++ b/comfy_extras/nodes_hidream.py @@ -0,0 +1,32 @@ +import folder_paths +import comfy.sd +import comfy.model_management + + +class QuadrupleCLIPLoader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ), + "clip_name2": (folder_paths.get_filename_list("text_encoders"), ), + "clip_name3": (folder_paths.get_filename_list("text_encoders"), ), + "clip_name4": (folder_paths.get_filename_list("text_encoders"), ) + }} + RETURN_TYPES = ("CLIP",) + FUNCTION = "load_clip" + + CATEGORY = "advanced/loaders" + + DESCRIPTION = "[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct" + + def load_clip(self, clip_name1, clip_name2, clip_name3, clip_name4): + clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1) + clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2) + clip_path3 = folder_paths.get_full_path_or_raise("text_encoders", clip_name3) + clip_path4 = folder_paths.get_full_path_or_raise("text_encoders", clip_name4) + clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2, clip_path3, clip_path4], embedding_directory=folder_paths.get_folder_paths("embeddings")) + return (clip,) + + +NODE_CLASS_MAPPINGS = { + "QuadrupleCLIPLoader": QuadrupleCLIPLoader, +} diff --git a/nodes.py b/nodes.py index e66b5c714..ae0a2e183 100644 --- a/nodes.py +++ b/nodes.py @@ -2280,7 +2280,8 @@ def init_builtin_extra_nodes(): "nodes_hunyuan3d.py", "nodes_primitive.py", "nodes_cfg.py", - "nodes_optimalsteps.py" + "nodes_optimalsteps.py", + "nodes_hidream.py" ] import_failed = [] From b4dc03ad7669b155d3c7714e9e5a474365d50c8c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 16 Apr 2025 04:53:56 -0400 Subject: [PATCH 0005/1073] Fix issue on old torch. --- comfy/rmsnorm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/rmsnorm.py b/comfy/rmsnorm.py index 77df44464..9d82bee1a 100644 --- a/comfy/rmsnorm.py +++ b/comfy/rmsnorm.py @@ -49,6 +49,7 @@ if RMSNorm is None: ) else: self.register_parameter("weight", None) + self.bias = None def forward(self, x): return rms_norm(x, self.weight, self.eps) From cce1d9145e06c0f86336a2a7f5558610fdc76718 Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Wed, 16 Apr 2025 15:41:00 -0400 Subject: [PATCH 0006/1073] [Type] Mark input options NotRequired (#7614) --- comfy/comfy_types/node_typing.py | 54 ++++++++++++++++---------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/comfy/comfy_types/node_typing.py b/comfy/comfy_types/node_typing.py index 3535966fb..42ed5174e 100644 --- a/comfy/comfy_types/node_typing.py +++ b/comfy/comfy_types/node_typing.py @@ -99,59 +99,59 @@ class InputTypeOptions(TypedDict): Comfy Docs: https://docs.comfy.org/custom-nodes/backend/datatypes """ - default: bool | str | float | int | list | tuple + default: NotRequired[bool | str | float | int | list | tuple] """The default value of the widget""" - defaultInput: bool + defaultInput: NotRequired[bool] """@deprecated in v1.16 frontend. v1.16 frontend allows input socket and widget to co-exist. - defaultInput on required inputs should be dropped. - defaultInput on optional inputs should be replaced with forceInput. Ref: https://github.com/Comfy-Org/ComfyUI_frontend/pull/3364 """ - forceInput: bool + forceInput: NotRequired[bool] """Forces the input to be an input slot rather than a widget even a widget is available for the input type.""" - lazy: bool + lazy: NotRequired[bool] """Declares that this input uses lazy evaluation""" - rawLink: bool + rawLink: NotRequired[bool] """When a link exists, rather than receiving the evaluated value, you will receive the link (i.e. `["nodeId", ]`). Designed for node expansion.""" - tooltip: str + tooltip: NotRequired[str] """Tooltip for the input (or widget), shown on pointer hover""" # class InputTypeNumber(InputTypeOptions): # default: float | int - min: float + min: NotRequired[float] """The minimum value of a number (``FLOAT`` | ``INT``)""" - max: float + max: NotRequired[float] """The maximum value of a number (``FLOAT`` | ``INT``)""" - step: float + step: NotRequired[float] """The amount to increment or decrement a widget by when stepping up/down (``FLOAT`` | ``INT``)""" - round: float + round: NotRequired[float] """Floats are rounded by this value (``FLOAT``)""" # class InputTypeBoolean(InputTypeOptions): # default: bool - label_on: str + label_on: NotRequired[str] """The label to use in the UI when the bool is True (``BOOLEAN``)""" - label_off: str + label_off: NotRequired[str] """The label to use in the UI when the bool is False (``BOOLEAN``)""" # class InputTypeString(InputTypeOptions): # default: str - multiline: bool + multiline: NotRequired[bool] """Use a multiline text box (``STRING``)""" - placeholder: str + placeholder: NotRequired[str] """Placeholder text to display in the UI when empty (``STRING``)""" # Deprecated: # defaultVal: str - dynamicPrompts: bool + dynamicPrompts: NotRequired[bool] """Causes the front-end to evaluate dynamic prompts (``STRING``)""" # class InputTypeCombo(InputTypeOptions): - image_upload: bool + image_upload: NotRequired[bool] """Specifies whether the input should have an image upload button and image preview attached to it. Requires that the input's name is `image`.""" - image_folder: Literal["input", "output", "temp"] + image_folder: NotRequired[Literal["input", "output", "temp"]] """Specifies which folder to get preview images from if the input has the ``image_upload`` flag. """ - remote: RemoteInputOptions + remote: NotRequired[RemoteInputOptions] """Specifies the configuration for a remote input. Available after ComfyUI frontend v1.9.7 https://github.com/Comfy-Org/ComfyUI_frontend/pull/2422""" - control_after_generate: bool + control_after_generate: NotRequired[bool] """Specifies whether a control widget should be added to the input, adding options to automatically change the value after each prompt is queued. Currently only used for INT and COMBO types.""" options: NotRequired[list[str | int | float]] """COMBO type only. Specifies the selectable options for the combo widget. @@ -169,15 +169,15 @@ class InputTypeOptions(TypedDict): class HiddenInputTypeDict(TypedDict): """Provides type hinting for the hidden entry of node INPUT_TYPES.""" - node_id: Literal["UNIQUE_ID"] + node_id: NotRequired[Literal["UNIQUE_ID"]] """UNIQUE_ID is the unique identifier of the node, and matches the id property of the node on the client side. It is commonly used in client-server communications (see messages).""" - unique_id: Literal["UNIQUE_ID"] + unique_id: NotRequired[Literal["UNIQUE_ID"]] """UNIQUE_ID is the unique identifier of the node, and matches the id property of the node on the client side. It is commonly used in client-server communications (see messages).""" - prompt: Literal["PROMPT"] + prompt: NotRequired[Literal["PROMPT"]] """PROMPT is the complete prompt sent by the client to the server. See the prompt object for a full description.""" - extra_pnginfo: Literal["EXTRA_PNGINFO"] + extra_pnginfo: NotRequired[Literal["EXTRA_PNGINFO"]] """EXTRA_PNGINFO is a dictionary that will be copied into the metadata of any .png files saved. Custom nodes can store additional information in this dictionary for saving (or as a way to communicate with a downstream node).""" - dynprompt: Literal["DYNPROMPT"] + dynprompt: NotRequired[Literal["DYNPROMPT"]] """DYNPROMPT is an instance of comfy_execution.graph.DynamicPrompt. It differs from PROMPT in that it may mutate during the course of execution in response to Node Expansion.""" @@ -187,11 +187,11 @@ class InputTypeDict(TypedDict): Comfy Docs: https://docs.comfy.org/custom-nodes/backend/more_on_inputs """ - required: dict[str, tuple[IO, InputTypeOptions]] + required: NotRequired[dict[str, tuple[IO, InputTypeOptions]]] """Describes all inputs that must be connected for the node to execute.""" - optional: dict[str, tuple[IO, InputTypeOptions]] + optional: NotRequired[dict[str, tuple[IO, InputTypeOptions]]] """Describes inputs which do not need to be connected.""" - hidden: HiddenInputTypeDict + hidden: NotRequired[HiddenInputTypeDict] """Offers advanced functionality and server-client communication. Comfy Docs: https://docs.comfy.org/custom-nodes/backend/more_on_inputs#hidden-inputs From f00f340a56013001a6148eee6d3d00d02078e43e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 16 Apr 2025 17:43:55 -0400 Subject: [PATCH 0007/1073] Reuse code from flux model. --- comfy/ldm/hidream/model.py | 39 ++++---------------------------------- 1 file changed, 4 insertions(+), 35 deletions(-) diff --git a/comfy/ldm/hidream/model.py b/comfy/ldm/hidream/model.py index de749a373..39c67a193 100644 --- a/comfy/ldm/hidream/model.py +++ b/comfy/ldm/hidream/model.py @@ -8,26 +8,12 @@ from einops import repeat from comfy.ldm.lightricks.model import TimestepEmbedding, Timesteps import torch.nn.functional as F -from comfy.ldm.flux.math import apply_rope +from comfy.ldm.flux.math import apply_rope, rope +from comfy.ldm.flux.layers import LastLayer + from comfy.ldm.modules.attention import optimized_attention import comfy.model_management -# Copied from https://github.com/black-forest-labs/flux/blob/main/src/flux/math.py -def rope(pos: torch.Tensor, dim: int, theta: int) -> torch.Tensor: - assert dim % 2 == 0, "The dimension must be even." - - scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim - omega = 1.0 / (theta**scale) - - batch_size, seq_length = pos.shape - out = torch.einsum("...n,d->...nd", pos, omega) - cos_out = torch.cos(out) - sin_out = torch.sin(out) - - stacked_out = torch.stack([cos_out, -sin_out, sin_out, cos_out], dim=-1) - out = stacked_out.view(batch_size, -1, dim // 2, 2, 2) - return out.float() - # Copied from https://github.com/black-forest-labs/flux/blob/main/src/flux/modules/layers.py class EmbedND(nn.Module): @@ -84,23 +70,6 @@ class TimestepEmbed(nn.Module): return t_emb -class OutEmbed(nn.Module): - def __init__(self, hidden_size, patch_size, out_channels, dtype=None, device=None, operations=None): - super().__init__() - self.norm_final = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - self.linear = operations.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True, dtype=dtype, device=device) - self.adaLN_modulation = nn.Sequential( - nn.SiLU(), - operations.Linear(hidden_size, 2 * hidden_size, bias=True, dtype=dtype, device=device) - ) - - def forward(self, x, adaln_input): - shift, scale = self.adaLN_modulation(adaln_input).chunk(2, dim=1) - x = self.norm_final(x) * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) - x = self.linear(x) - return x - - def attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor): return optimized_attention(query.view(query.shape[0], -1, query.shape[-1] * query.shape[-2]), key.view(key.shape[0], -1, key.shape[-1] * key.shape[-2]), value.view(value.shape[0], -1, value.shape[-1] * value.shape[-2]), query.shape[2]) @@ -663,7 +632,7 @@ class HiDreamImageTransformer2DModel(nn.Module): ] ) - self.final_layer = OutEmbed(self.inner_dim, patch_size, self.out_channels, dtype=dtype, device=device, operations=operations) + self.final_layer = LastLayer(self.inner_dim, patch_size, self.out_channels, dtype=dtype, device=device, operations=operations) caption_channels = [caption_channels[1], ] * (num_layers + num_single_layers) + [caption_channels[0], ] caption_projection = [] From 9899d187b16a9a823a98fc1df9bf1fbb58674087 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 16 Apr 2025 18:07:55 -0400 Subject: [PATCH 0008/1073] Limit T5 to 128 tokens for HiDream: #7620 --- comfy/text_encoders/hidream.py | 5 +++-- comfy/text_encoders/sd3_clip.py | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/comfy/text_encoders/hidream.py b/comfy/text_encoders/hidream.py index af105f9bb..6c34c5572 100644 --- a/comfy/text_encoders/hidream.py +++ b/comfy/text_encoders/hidream.py @@ -11,14 +11,15 @@ class HiDreamTokenizer: def __init__(self, embedding_directory=None, tokenizer_data={}): self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) self.clip_g = sdxl_clip.SDXLClipGTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) - self.t5xxl = sd3_clip.T5XXLTokenizer(embedding_directory=embedding_directory, min_length=128, tokenizer_data=tokenizer_data) + self.t5xxl = sd3_clip.T5XXLTokenizer(embedding_directory=embedding_directory, min_length=128, max_length=128, tokenizer_data=tokenizer_data) self.llama = hunyuan_video.LLAMA3Tokenizer(embedding_directory=embedding_directory, min_length=128, pad_token=128009, tokenizer_data=tokenizer_data) def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} out["g"] = self.clip_g.tokenize_with_weights(text, return_word_ids) out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids) - out["t5xxl"] = self.t5xxl.tokenize_with_weights(text, return_word_ids) + t5xxl = self.t5xxl.tokenize_with_weights(text, return_word_ids) + out["t5xxl"] = [t5xxl[0]] # Use only first 128 tokens out["llama"] = self.llama.tokenize_with_weights(text, return_word_ids) return out diff --git a/comfy/text_encoders/sd3_clip.py b/comfy/text_encoders/sd3_clip.py index 1727998a8..6c2fbeca4 100644 --- a/comfy/text_encoders/sd3_clip.py +++ b/comfy/text_encoders/sd3_clip.py @@ -32,9 +32,9 @@ def t5_xxl_detect(state_dict, prefix=""): return out class T5XXLTokenizer(sd1_clip.SDTokenizer): - def __init__(self, embedding_directory=None, tokenizer_data={}, min_length=77): + def __init__(self, embedding_directory=None, tokenizer_data={}, min_length=77, max_length=99999999): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") - super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=min_length, tokenizer_data=tokenizer_data) + super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=max_length, min_length=min_length, tokenizer_data=tokenizer_data) class SD3Tokenizer: From 1fc00ba4b6576ed5910a88caa47866774ee6d0ca Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 16 Apr 2025 18:34:14 -0400 Subject: [PATCH 0009/1073] Make hidream work with any latent resolution. --- comfy/ldm/hidream/model.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/hidream/model.py b/comfy/ldm/hidream/model.py index 39c67a193..fcb5a9c51 100644 --- a/comfy/ldm/hidream/model.py +++ b/comfy/ldm/hidream/model.py @@ -13,6 +13,7 @@ from comfy.ldm.flux.layers import LastLayer from comfy.ldm.modules.attention import optimized_attention import comfy.model_management +import comfy.ldm.common_dit # Copied from https://github.com/black-forest-labs/flux/blob/main/src/flux/modules/layers.py @@ -701,7 +702,8 @@ class HiDreamImageTransformer2DModel(nn.Module): control = None, transformer_options = {}, ) -> torch.Tensor: - hidden_states = x + bs, c, h, w = x.shape + hidden_states = comfy.ldm.common_dit.pad_to_patch_size(x, (self.patch_size, self.patch_size)) timesteps = t pooled_embeds = y T5_encoder_hidden_states = context @@ -794,4 +796,4 @@ class HiDreamImageTransformer2DModel(nn.Module): hidden_states = hidden_states[:, :image_tokens_seq_len, ...] output = self.final_layer(hidden_states, adaln_input) output = self.unpatchify(output, img_sizes) - return -output + return -output[:, :, :h, :w] From 0d720e4367c1c149dbfa0a98ebd81c7776914545 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 17 Apr 2025 06:25:39 -0400 Subject: [PATCH 0010/1073] Don't hardcode length of context_img in wan code. --- comfy/ldm/wan/model.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 9b5e5332c..d64e73a8e 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -83,7 +83,7 @@ class WanSelfAttention(nn.Module): class WanT2VCrossAttention(WanSelfAttention): - def forward(self, x, context): + def forward(self, x, context, **kwargs): r""" Args: x(Tensor): Shape [B, L1, C] @@ -116,14 +116,14 @@ class WanI2VCrossAttention(WanSelfAttention): # self.alpha = nn.Parameter(torch.zeros((1, ))) self.norm_k_img = RMSNorm(dim, eps=eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) if qk_norm else nn.Identity() - def forward(self, x, context): + def forward(self, x, context, context_img_len): r""" Args: x(Tensor): Shape [B, L1, C] context(Tensor): Shape [B, L2, C] """ - context_img = context[:, :257] - context = context[:, 257:] + context_img = context[:, :context_img_len] + context = context[:, context_img_len:] # compute query, key, value q = self.norm_q(self.q(x)) @@ -193,6 +193,7 @@ class WanAttentionBlock(nn.Module): e, freqs, context, + context_img_len=None, ): r""" Args: @@ -213,7 +214,7 @@ class WanAttentionBlock(nn.Module): x = x + y * e[2] # cross-attention & ffn - x = x + self.cross_attn(self.norm3(x), context) + x = x + self.cross_attn(self.norm3(x), context, context_img_len=context_img_len) y = self.ffn(self.norm2(x) * (1 + e[4]) + e[3]) x = x + y * e[5] return x @@ -420,9 +421,12 @@ class WanModel(torch.nn.Module): # context context = self.text_embedding(context) - if clip_fea is not None and self.img_emb is not None: - context_clip = self.img_emb(clip_fea) # bs x 257 x dim - context = torch.concat([context_clip, context], dim=1) + context_img_len = None + if clip_fea is not None: + if self.img_emb is not None: + context_clip = self.img_emb(clip_fea) # bs x 257 x dim + context = torch.concat([context_clip, context], dim=1) + context_img_len = clip_fea.shape[-2] patches_replace = transformer_options.get("patches_replace", {}) blocks_replace = patches_replace.get("dit", {}) @@ -430,12 +434,12 @@ class WanModel(torch.nn.Module): if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} - out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"]) + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len) return out out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs}, {"original_block": block_wrap}) x = out["img"] else: - x = block(x, e=e0, freqs=freqs, context=context) + x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) # head x = self.head(x, e) From c14429940f6f9491c77250eb15cad3746e350753 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 17 Apr 2025 12:04:48 -0400 Subject: [PATCH 0011/1073] Support loading WAN FLF model. --- comfy/ldm/wan/model.py | 13 +++++++++++-- comfy/model_detection.py | 3 +++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index d64e73a8e..8907f70ad 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -251,7 +251,7 @@ class Head(nn.Module): class MLPProj(torch.nn.Module): - def __init__(self, in_dim, out_dim, operation_settings={}): + def __init__(self, in_dim, out_dim, flf_pos_embed_token_number=None, operation_settings={}): super().__init__() self.proj = torch.nn.Sequential( @@ -259,7 +259,15 @@ class MLPProj(torch.nn.Module): torch.nn.GELU(), operation_settings.get("operations").Linear(in_dim, out_dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")), operation_settings.get("operations").LayerNorm(out_dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype"))) + if flf_pos_embed_token_number is not None: + self.emb_pos = nn.Parameter(torch.empty((1, flf_pos_embed_token_number, in_dim), device=operation_settings.get("device"), dtype=operation_settings.get("dtype"))) + else: + self.emb_pos = None + def forward(self, image_embeds): + if self.emb_pos is not None: + image_embeds = image_embeds[:, :self.emb_pos.shape[1]] + comfy.model_management.cast_to(self.emb_pos[:, :image_embeds.shape[1]], dtype=image_embeds.dtype, device=image_embeds.device) + clip_extra_context_tokens = self.proj(image_embeds) return clip_extra_context_tokens @@ -285,6 +293,7 @@ class WanModel(torch.nn.Module): qk_norm=True, cross_attn_norm=True, eps=1e-6, + flf_pos_embed_token_number=None, image_model=None, device=None, dtype=None, @@ -374,7 +383,7 @@ class WanModel(torch.nn.Module): self.rope_embedder = EmbedND(dim=d, theta=10000.0, axes_dim=[d - 4 * (d // 6), 2 * (d // 6), 2 * (d // 6)]) if model_type == 'i2v': - self.img_emb = MLPProj(1280, dim, operation_settings=operation_settings) + self.img_emb = MLPProj(1280, dim, flf_pos_embed_token_number=flf_pos_embed_token_number, operation_settings=operation_settings) else: self.img_emb = None diff --git a/comfy/model_detection.py b/comfy/model_detection.py index a4da1afcd..6499bf238 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -321,6 +321,9 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["model_type"] = "i2v" else: dit_config["model_type"] = "t2v" + flf_weight = state_dict.get('{}img_emb.emb_pos'.format(key_prefix)) + if flf_weight is not None: + dit_config["flf_pos_embed_token_number"] = flf_weight.shape[1] return dit_config if '{}latent_in.weight'.format(key_prefix) in state_dict_keys: # Hunyuan 3D From dbcfd092a29c272696bae856d943005fc0cc3036 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 17 Apr 2025 12:42:34 -0400 Subject: [PATCH 0012/1073] Set default context_img_len to 257 --- comfy/ldm/wan/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 8907f70ad..2a30497c5 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -193,7 +193,7 @@ class WanAttentionBlock(nn.Module): e, freqs, context, - context_img_len=None, + context_img_len=257, ): r""" Args: From eba7a25e7abf9ec47ab2a42a5c1e6a5cf52351e1 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 17 Apr 2025 13:18:43 -0400 Subject: [PATCH 0013/1073] Add WanFirstLastFrameToVideo node to use the new model. --- comfy_extras/nodes_wan.py | 98 ++++++++++++++++++++++++++++----------- 1 file changed, 70 insertions(+), 28 deletions(-) diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 2d0f31ac8..8ad358ce8 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -4,6 +4,7 @@ import torch import comfy.model_management import comfy.utils import comfy.latent_formats +import comfy.clip_vision class WanImageToVideo: @@ -99,6 +100,72 @@ class WanFunControlToVideo: out_latent["samples"] = latent return (positive, negative, out_latent) +class WanFirstLastFrameToVideo: + @classmethod + def INPUT_TYPES(s): + return {"required": {"positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "vae": ("VAE", ), + "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + }, + "optional": {"clip_vision_start_image": ("CLIP_VISION_OUTPUT", ), + "clip_vision_end_image": ("CLIP_VISION_OUTPUT", ), + "start_image": ("IMAGE", ), + "end_image": ("IMAGE", ), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + FUNCTION = "encode" + + CATEGORY = "conditioning/video_models" + + def encode(self, positive, negative, vae, width, height, length, batch_size, start_image=None, end_image=None, clip_vision_start_image=None, clip_vision_end_image=None): + latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + if start_image is not None: + start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + if end_image is not None: + end_image = comfy.utils.common_upscale(end_image[-length:].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + + image = torch.ones((length, height, width, 3)) * 0.5 + mask = torch.ones((1, 1, latent.shape[2] * 4, latent.shape[-2], latent.shape[-1])) + + if start_image is not None: + image[:start_image.shape[0]] = start_image + mask[:, :, :start_image.shape[0] + 3] = 0.0 + + if end_image is not None: + image[-end_image.shape[0]:] = end_image + mask[:, :, -end_image.shape[0]:] = 0.0 + + concat_latent_image = vae.encode(image[:, :, :, :3]) + mask = mask.view(1, mask.shape[2] // 4, 4, mask.shape[3], mask.shape[4]).transpose(1, 2) + positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) + negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) + + if clip_vision_start_image is not None: + clip_vision_output = clip_vision_start_image + + if clip_vision_end_image is not None: + if clip_vision_output is not None: + states = torch.cat([clip_vision_output.penultimate_hidden_states, clip_vision_end_image.penultimate_hidden_states], dim=-2) + clip_vision_output = comfy.clip_vision.Output() + clip_vision_output.penultimate_hidden_states = states + else: + clip_vision_output = clip_vision_end_image + + if clip_vision_output is not None: + positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output}) + negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output}) + + out_latent = {} + out_latent["samples"] = latent + return (positive, negative, out_latent) + + class WanFunInpaintToVideo: @classmethod def INPUT_TYPES(s): @@ -122,38 +189,13 @@ class WanFunInpaintToVideo: CATEGORY = "conditioning/video_models" def encode(self, positive, negative, vae, width, height, length, batch_size, start_image=None, end_image=None, clip_vision_output=None): - latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) - if start_image is not None: - start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) - if end_image is not None: - end_image = comfy.utils.common_upscale(end_image[-length:].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + flfv = WanFirstLastFrameToVideo() + return flfv.encode(positive, negative, vae, width, height, length, batch_size, start_image=start_image, end_image=end_image, clip_vision_start_image=clip_vision_output) - image = torch.ones((length, height, width, 3)) * 0.5 - mask = torch.ones((1, 1, latent.shape[2] * 4, latent.shape[-2], latent.shape[-1])) - - if start_image is not None: - image[:start_image.shape[0]] = start_image - mask[:, :, :start_image.shape[0] + 3] = 0.0 - - if end_image is not None: - image[-end_image.shape[0]:] = end_image - mask[:, :, -end_image.shape[0]:] = 0.0 - - concat_latent_image = vae.encode(image[:, :, :, :3]) - mask = mask.view(1, mask.shape[2] // 4, 4, mask.shape[3], mask.shape[4]).transpose(1, 2) - positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) - negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) - - if clip_vision_output is not None: - positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output}) - negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output}) - - out_latent = {} - out_latent["samples"] = latent - return (positive, negative, out_latent) NODE_CLASS_MAPPINGS = { "WanImageToVideo": WanImageToVideo, "WanFunControlToVideo": WanFunControlToVideo, "WanFunInpaintToVideo": WanFunInpaintToVideo, + "WanFirstLastFrameToVideo": WanFirstLastFrameToVideo, } From 05d5a75cdcb749286c9ce9e034bb37a2f6195c37 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Fri, 18 Apr 2025 02:25:33 +0800 Subject: [PATCH 0014/1073] Update frontend to 1.16 (Install templates as pip package) (#7623) * install templates as pip package * Update requirements.txt * bump templates version to include hidream --------- Co-authored-by: Chenlei Hu --- app/frontend_management.py | 21 +++++++++++++++++++++ requirements.txt | 3 ++- server.py | 6 ++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/app/frontend_management.py b/app/frontend_management.py index c56ea86e0..7b7923b79 100644 --- a/app/frontend_management.py +++ b/app/frontend_management.py @@ -184,6 +184,27 @@ comfyui-frontend-package is not installed. ) sys.exit(-1) + @classmethod + def templates_path(cls) -> str: + try: + import comfyui_workflow_templates + + return str( + importlib.resources.files(comfyui_workflow_templates) / "templates" + ) + except ImportError: + logging.error( + f""" +********** ERROR *********** + +comfyui-workflow-templates is not installed. + +{frontend_install_warning_message()} + +********** ERROR *********** +""".strip() + ) + @classmethod def parse_version_string(cls, value: str) -> tuple[str, str, str]: """ diff --git a/requirements.txt b/requirements.txt index 851db23bd..278e3eaa8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ -comfyui-frontend-package==1.15.13 +comfyui-frontend-package==1.16.8 +comfyui-workflow-templates==0.1.1 torch torchsde torchvision diff --git a/server.py b/server.py index 62667ce18..0cc97b248 100644 --- a/server.py +++ b/server.py @@ -736,6 +736,12 @@ class PromptServer(): for name, dir in nodes.EXTENSION_WEB_DIRS.items(): self.app.add_routes([web.static('/extensions/' + name, dir)]) + workflow_templates_path = FrontendManager.templates_path() + if workflow_templates_path: + self.app.add_routes([ + web.static('/templates', workflow_templates_path) + ]) + self.app.add_routes([ web.static('/', self.web_root), ]) From 93292bc450dd291925c45adea00ebedb8a3209ef Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 17 Apr 2025 14:45:01 -0400 Subject: [PATCH 0015/1073] ComfyUI version 0.3.29 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index a44538d1a..f9161b37e 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.28" +__version__ = "0.3.29" diff --git a/pyproject.toml b/pyproject.toml index 6eb1704db..e8fc9555d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.28" +version = "0.3.29" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 19373aee759be2f0868a69603c5d967e5e63e1c5 Mon Sep 17 00:00:00 2001 From: BVH <82035780+bvhari@users.noreply.github.com> Date: Fri, 18 Apr 2025 00:54:33 +0530 Subject: [PATCH 0016/1073] Add FreSca node (#7631) --- comfy_extras/nodes_fresca.py | 102 +++++++++++++++++++++++++++++++++++ nodes.py | 3 +- 2 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 comfy_extras/nodes_fresca.py diff --git a/comfy_extras/nodes_fresca.py b/comfy_extras/nodes_fresca.py new file mode 100644 index 000000000..b0b86f235 --- /dev/null +++ b/comfy_extras/nodes_fresca.py @@ -0,0 +1,102 @@ +# Code based on https://github.com/WikiChao/FreSca (MIT License) +import torch +import torch.fft as fft + + +def Fourier_filter(x, scale_low=1.0, scale_high=1.5, freq_cutoff=20): + """ + Apply frequency-dependent scaling to an image tensor using Fourier transforms. + + Parameters: + x: Input tensor of shape (B, C, H, W) + scale_low: Scaling factor for low-frequency components (default: 1.0) + scale_high: Scaling factor for high-frequency components (default: 1.5) + freq_cutoff: Number of frequency indices around center to consider as low-frequency (default: 20) + + Returns: + x_filtered: Filtered version of x in spatial domain with frequency-specific scaling applied. + """ + # Preserve input dtype and device + dtype, device = x.dtype, x.device + + # Convert to float32 for FFT computations + x = x.to(torch.float32) + + # 1) Apply FFT and shift low frequencies to center + x_freq = fft.fftn(x, dim=(-2, -1)) + x_freq = fft.fftshift(x_freq, dim=(-2, -1)) + + # 2) Create a mask to scale frequencies differently + B, C, H, W = x_freq.shape + crow, ccol = H // 2, W // 2 + + # Initialize mask with high-frequency scaling factor + mask = torch.ones((B, C, H, W), device=device) * scale_high + + # Apply low-frequency scaling factor to center region + mask[ + ..., + crow - freq_cutoff : crow + freq_cutoff, + ccol - freq_cutoff : ccol + freq_cutoff, + ] = scale_low + + # 3) Apply frequency-specific scaling + x_freq = x_freq * mask + + # 4) Convert back to spatial domain + x_freq = fft.ifftshift(x_freq, dim=(-2, -1)) + x_filtered = fft.ifftn(x_freq, dim=(-2, -1)).real + + # 5) Restore original dtype + x_filtered = x_filtered.to(dtype) + + return x_filtered + + +class FreSca: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "scale_low": ("FLOAT", {"default": 1.0, "min": 0, "max": 10, "step": 0.01, + "tooltip": "Scaling factor for low-frequency components"}), + "scale_high": ("FLOAT", {"default": 1.25, "min": 0, "max": 10, "step": 0.01, + "tooltip": "Scaling factor for high-frequency components"}), + "freq_cutoff": ("INT", {"default": 20, "min": 1, "max": 100, "step": 1, + "tooltip": "Number of frequency indices around center to consider as low-frequency"}), + } + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + CATEGORY = "_for_testing" + DESCRIPTION = "Applies frequency-dependent scaling to the guidance" + def patch(self, model, scale_low, scale_high, freq_cutoff): + def custom_cfg_function(args): + cond = args["conds_out"][0] + uncond = args["conds_out"][1] + + guidance = cond - uncond + filtered_guidance = Fourier_filter( + guidance, + scale_low=scale_low, + scale_high=scale_high, + freq_cutoff=freq_cutoff, + ) + filtered_cond = filtered_guidance + uncond + + return [filtered_cond, uncond] + + m = model.clone() + m.set_model_sampler_pre_cfg_function(custom_cfg_function) + + return (m,) + + +NODE_CLASS_MAPPINGS = { + "FreSca": FreSca, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "FreSca": "FreSca", +} diff --git a/nodes.py b/nodes.py index ae0a2e183..fce3dcb3b 100644 --- a/nodes.py +++ b/nodes.py @@ -2281,7 +2281,8 @@ def init_builtin_extra_nodes(): "nodes_primitive.py", "nodes_cfg.py", "nodes_optimalsteps.py", - "nodes_hidream.py" + "nodes_hidream.py", + "nodes_fresca.py", ] import_failed = [] From 3dc240d08939bef67ed6e7308d6c68d6410bbfa5 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 17 Apr 2025 15:46:41 -0400 Subject: [PATCH 0017/1073] Make fresca work on multi dim. --- comfy_extras/nodes_fresca.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/comfy_extras/nodes_fresca.py b/comfy_extras/nodes_fresca.py index b0b86f235..fa573299a 100644 --- a/comfy_extras/nodes_fresca.py +++ b/comfy_extras/nodes_fresca.py @@ -26,19 +26,17 @@ def Fourier_filter(x, scale_low=1.0, scale_high=1.5, freq_cutoff=20): x_freq = fft.fftn(x, dim=(-2, -1)) x_freq = fft.fftshift(x_freq, dim=(-2, -1)) - # 2) Create a mask to scale frequencies differently - B, C, H, W = x_freq.shape - crow, ccol = H // 2, W // 2 - # Initialize mask with high-frequency scaling factor - mask = torch.ones((B, C, H, W), device=device) * scale_high + mask = torch.ones(x_freq.shape, device=device) * scale_high + m = mask + for d in range(len(x_freq.shape) - 2): + dim = d + 2 + cc = x_freq.shape[dim] // 2 + f_c = min(freq_cutoff, cc) + m = m.narrow(dim, cc - f_c, f_c * 2) # Apply low-frequency scaling factor to center region - mask[ - ..., - crow - freq_cutoff : crow + freq_cutoff, - ccol - freq_cutoff : ccol + freq_cutoff, - ] = scale_low + m[:] = scale_low # 3) Apply frequency-specific scaling x_freq = x_freq * mask From 880c205df1fca4491c78523eb52d1a388f89ef92 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 17 Apr 2025 16:58:27 -0400 Subject: [PATCH 0018/1073] Add hidream to readme. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index a99aca0e7..cf6df7e55 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [HunyuanDiT](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_dit/) - [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/) - [Lumina Image 2.0](https://comfyanonymous.github.io/ComfyUI_examples/lumina2/) + - [HiDream](https://comfyanonymous.github.io/ComfyUI_examples/hidream/) - Video Models - [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/) - [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/) From 55822faa05293dce6039d504695a12124a3eb35f Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Thu, 17 Apr 2025 21:02:24 -0400 Subject: [PATCH 0019/1073] [Type] Annotate graph.get_input_info (#7386) * [Type] Annotate graph.get_input_info * nit * nit --- comfy_execution/graph.py | 24 +++++++++++++++++++++--- execution.py | 31 ++++++++++++++++--------------- 2 files changed, 37 insertions(+), 18 deletions(-) diff --git a/comfy_execution/graph.py b/comfy_execution/graph.py index 59b42b746..a2799b52e 100644 --- a/comfy_execution/graph.py +++ b/comfy_execution/graph.py @@ -1,6 +1,9 @@ -import nodes +from __future__ import annotations +from typing import Type, Literal +import nodes from comfy_execution.graph_utils import is_link +from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, InputTypeOptions class DependencyCycleError(Exception): pass @@ -54,7 +57,22 @@ class DynamicPrompt: def get_original_prompt(self): return self.original_prompt -def get_input_info(class_def, input_name, valid_inputs=None): +def get_input_info( + class_def: Type[ComfyNodeABC], + input_name: str, + valid_inputs: InputTypeDict | None = None +) -> tuple[str, Literal["required", "optional", "hidden"], InputTypeOptions] | tuple[None, None, None]: + """Get the input type, category, and extra info for a given input name. + + Arguments: + class_def: The class definition of the node. + input_name: The name of the input to get info for. + valid_inputs: The valid inputs for the node, or None to use the class_def.INPUT_TYPES(). + + Returns: + tuple[str, str, dict] | tuple[None, None, None]: The input type, category, and extra info for the input name. + """ + valid_inputs = valid_inputs or class_def.INPUT_TYPES() input_info = None input_category = None @@ -126,7 +144,7 @@ class TopologicalSort: from_node_id, from_socket = value if subgraph_nodes is not None and from_node_id not in subgraph_nodes: continue - input_type, input_category, input_info = self.get_input_info(unique_id, input_name) + _, _, input_info = self.get_input_info(unique_id, input_name) is_lazy = input_info is not None and "lazy" in input_info and input_info["lazy"] if (include_lazy or not is_lazy) and not self.is_cached(from_node_id): node_ids.append(from_node_id) diff --git a/execution.py b/execution.py index 9a5e27771..d09102f55 100644 --- a/execution.py +++ b/execution.py @@ -111,7 +111,7 @@ def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, e missing_keys = {} for x in inputs: input_data = inputs[x] - input_type, input_category, input_info = get_input_info(class_def, x, valid_inputs) + _, input_category, input_info = get_input_info(class_def, x, valid_inputs) def mark_missing(): missing_keys[x] = True input_data_all[x] = (None,) @@ -574,7 +574,7 @@ def validate_inputs(prompt, item, validated): received_types = {} for x in valid_inputs: - type_input, input_category, extra_info = get_input_info(obj_class, x, class_inputs) + input_type, input_category, extra_info = get_input_info(obj_class, x, class_inputs) assert extra_info is not None if x not in inputs: if input_category == "required": @@ -590,7 +590,7 @@ def validate_inputs(prompt, item, validated): continue val = inputs[x] - info = (type_input, extra_info) + info = (input_type, extra_info) if isinstance(val, list): if len(val) != 2: error = { @@ -611,8 +611,8 @@ def validate_inputs(prompt, item, validated): r = nodes.NODE_CLASS_MAPPINGS[o_class_type].RETURN_TYPES received_type = r[val[1]] received_types[x] = received_type - if 'input_types' not in validate_function_inputs and not validate_node_input(received_type, type_input): - details = f"{x}, received_type({received_type}) mismatch input_type({type_input})" + if 'input_types' not in validate_function_inputs and not validate_node_input(received_type, input_type): + details = f"{x}, received_type({received_type}) mismatch input_type({input_type})" error = { "type": "return_type_mismatch", "message": "Return type mismatch between linked nodes", @@ -660,22 +660,22 @@ def validate_inputs(prompt, item, validated): val = val["__value__"] inputs[x] = val - if type_input == "INT": + if input_type == "INT": val = int(val) inputs[x] = val - if type_input == "FLOAT": + if input_type == "FLOAT": val = float(val) inputs[x] = val - if type_input == "STRING": + if input_type == "STRING": val = str(val) inputs[x] = val - if type_input == "BOOLEAN": + if input_type == "BOOLEAN": val = bool(val) inputs[x] = val except Exception as ex: error = { "type": "invalid_input_type", - "message": f"Failed to convert an input value to a {type_input} value", + "message": f"Failed to convert an input value to a {input_type} value", "details": f"{x}, {val}, {ex}", "extra_info": { "input_name": x, @@ -715,18 +715,19 @@ def validate_inputs(prompt, item, validated): errors.append(error) continue - if isinstance(type_input, list): - if val not in type_input: + if isinstance(input_type, list): + combo_options = input_type + if val not in combo_options: input_config = info list_info = "" # Don't send back gigantic lists like if they're lots of # scanned model filepaths - if len(type_input) > 20: - list_info = f"(list of length {len(type_input)})" + if len(combo_options) > 20: + list_info = f"(list of length {len(combo_options)})" input_config = None else: - list_info = str(type_input) + list_info = str(combo_options) error = { "type": "value_not_in_list", From 34e06bf7ecb6bca4631b746da5af433098db92c7 Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Fri, 18 Apr 2025 02:52:18 -0400 Subject: [PATCH 0020/1073] add support to output camera state (#7582) --- comfy_extras/nodes_load_3d.py | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/comfy_extras/nodes_load_3d.py b/comfy_extras/nodes_load_3d.py index db30030fb..53d892bc4 100644 --- a/comfy_extras/nodes_load_3d.py +++ b/comfy_extras/nodes_load_3d.py @@ -21,8 +21,8 @@ class Load3D(): "height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), }} - RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "IMAGE") - RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "lineart") + RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "IMAGE", "LOAD3D_CAMERA") + RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "lineart", "camera_info") FUNCTION = "process" EXPERIMENTAL = True @@ -41,7 +41,7 @@ class Load3D(): normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path) lineart_image, ignore_mask3 = load_image_node.load_image(image=lineart_path) - return output_image, output_mask, model_file, normal_image, lineart_image + return output_image, output_mask, model_file, normal_image, lineart_image, image['camera_info'] class Load3DAnimation(): @classmethod @@ -59,8 +59,8 @@ class Load3DAnimation(): "height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), }} - RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE") - RETURN_NAMES = ("image", "mask", "mesh_path", "normal") + RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "LOAD3D_CAMERA") + RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "camera_info") FUNCTION = "process" EXPERIMENTAL = True @@ -77,13 +77,16 @@ class Load3DAnimation(): ignore_image, output_mask = load_image_node.load_image(image=mask_path) normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path) - return output_image, output_mask, model_file, normal_image + return output_image, output_mask, model_file, normal_image, image['camera_info'] class Preview3D(): @classmethod def INPUT_TYPES(s): return {"required": { "model_file": ("STRING", {"default": "", "multiline": False}), + }, + "optional": { + "camera_info": ("LOAD3D_CAMERA", {}) }} OUTPUT_NODE = True @@ -95,13 +98,22 @@ class Preview3D(): EXPERIMENTAL = True def process(self, model_file, **kwargs): - return {"ui": {"model_file": [model_file]}, "result": ()} + camera_info = kwargs.get("camera_info", None) + + return { + "ui": { + "result": [model_file, camera_info] + } + } class Preview3DAnimation(): @classmethod def INPUT_TYPES(s): return {"required": { "model_file": ("STRING", {"default": "", "multiline": False}), + }, + "optional": { + "camera_info": ("LOAD3D_CAMERA", {}) }} OUTPUT_NODE = True @@ -113,7 +125,13 @@ class Preview3DAnimation(): EXPERIMENTAL = True def process(self, model_file, **kwargs): - return {"ui": {"model_file": [model_file]}, "result": ()} + camera_info = kwargs.get("camera_info", None) + + return { + "ui": { + "result": [model_file, camera_info] + } + } NODE_CLASS_MAPPINGS = { "Load3D": Load3D, From 2383a39e3baa11344b0a23b51e3c2f5deff0fc27 Mon Sep 17 00:00:00 2001 From: City <125218114+city96@users.noreply.github.com> Date: Fri, 18 Apr 2025 08:53:36 +0200 Subject: [PATCH 0021/1073] Replace CLIPType if with getattr (#7589) * Replace CLIPType if with getattr * Forgot to remove breakpoint from testing --- nodes.py | 31 +++---------------------------- 1 file changed, 3 insertions(+), 28 deletions(-) diff --git a/nodes.py b/nodes.py index fce3dcb3b..d4082d19d 100644 --- a/nodes.py +++ b/nodes.py @@ -930,26 +930,7 @@ class CLIPLoader: DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl" def load_clip(self, clip_name, type="stable_diffusion", device="default"): - if type == "stable_cascade": - clip_type = comfy.sd.CLIPType.STABLE_CASCADE - elif type == "sd3": - clip_type = comfy.sd.CLIPType.SD3 - elif type == "stable_audio": - clip_type = comfy.sd.CLIPType.STABLE_AUDIO - elif type == "mochi": - clip_type = comfy.sd.CLIPType.MOCHI - elif type == "ltxv": - clip_type = comfy.sd.CLIPType.LTXV - elif type == "pixart": - clip_type = comfy.sd.CLIPType.PIXART - elif type == "cosmos": - clip_type = comfy.sd.CLIPType.COSMOS - elif type == "lumina2": - clip_type = comfy.sd.CLIPType.LUMINA2 - elif type == "wan": - clip_type = comfy.sd.CLIPType.WAN - else: - clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION + clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) model_options = {} if device == "cpu": @@ -977,16 +958,10 @@ class DualCLIPLoader: DESCRIPTION = "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5" def load_clip(self, clip_name1, clip_name2, type, device="default"): + clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) + clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1) clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2) - if type == "sdxl": - clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION - elif type == "sd3": - clip_type = comfy.sd.CLIPType.SD3 - elif type == "flux": - clip_type = comfy.sd.CLIPType.FLUX - elif type == "hunyuan_video": - clip_type = comfy.sd.CLIPType.HUNYUAN_VIDEO model_options = {} if device == "cpu": From 7ecd5e961465d9bb20fb12b7068e1930da875b0e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 18 Apr 2025 03:16:16 -0400 Subject: [PATCH 0022/1073] Increase freq_cutoff in FreSca node. --- comfy_extras/nodes_fresca.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_fresca.py b/comfy_extras/nodes_fresca.py index fa573299a..ee310c874 100644 --- a/comfy_extras/nodes_fresca.py +++ b/comfy_extras/nodes_fresca.py @@ -61,7 +61,7 @@ class FreSca: "tooltip": "Scaling factor for low-frequency components"}), "scale_high": ("FLOAT", {"default": 1.25, "min": 0, "max": 10, "step": 0.01, "tooltip": "Scaling factor for high-frequency components"}), - "freq_cutoff": ("INT", {"default": 20, "min": 1, "max": 100, "step": 1, + "freq_cutoff": ("INT", {"default": 20, "min": 1, "max": 10000, "step": 1, "tooltip": "Number of frequency indices around center to consider as low-frequency"}), } } From f3b09b9f2d374518449d4e0211dbb21b95858eb5 Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Fri, 18 Apr 2025 15:12:42 -0400 Subject: [PATCH 0023/1073] [BugFix] Update frontend to 1.16.9 (#7655) Backport https://github.com/Comfy-Org/ComfyUI_frontend/pull/3505 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 278e3eaa8..ff9f66a77 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.16.8 +comfyui-frontend-package==1.16.9 comfyui-workflow-templates==0.1.1 torch torchsde From dc300a45698e5cb85f155b8fcb899b1df3c0f855 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Sat, 19 Apr 2025 12:21:46 -0700 Subject: [PATCH 0024/1073] Add wanfun template workflows. (#7678) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ff9f66a77..5c3a854ce 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.16.9 -comfyui-workflow-templates==0.1.1 +comfyui-workflow-templates==0.1.3 torch torchsde torchvision From 636d4bfb8994c7f123f15971af5d38a9754377ab Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 19 Apr 2025 15:55:43 -0400 Subject: [PATCH 0025/1073] Fix hard crash when the spiece tokenizer path is bad. --- comfy/text_encoders/spiece_tokenizer.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/text_encoders/spiece_tokenizer.py b/comfy/text_encoders/spiece_tokenizer.py index 21df4f863..caccb3ca2 100644 --- a/comfy/text_encoders/spiece_tokenizer.py +++ b/comfy/text_encoders/spiece_tokenizer.py @@ -1,4 +1,5 @@ import torch +import os class SPieceTokenizer: @staticmethod @@ -15,6 +16,8 @@ class SPieceTokenizer: if isinstance(tokenizer_path, bytes): self.tokenizer = sentencepiece.SentencePieceProcessor(model_proto=tokenizer_path, add_bos=self.add_bos, add_eos=self.add_eos) else: + if not os.path.isfile(tokenizer_path): + raise ValueError("invalid tokenizer") self.tokenizer = sentencepiece.SentencePieceProcessor(model_file=tokenizer_path, add_bos=self.add_bos, add_eos=self.add_eos) def get_vocab(self): From 4486b0d0ff536b32100863f68e870ba18bf3d051 Mon Sep 17 00:00:00 2001 From: Yoland Yan <4950057+yoland68@users.noreply.github.com> Date: Sat, 19 Apr 2025 14:23:31 -0700 Subject: [PATCH 0026/1073] Update CODEOWNERS and add christian-byrne (#7663) --- CODEOWNERS | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index 72a59effe..013ea8622 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -5,20 +5,20 @@ # Inlined the team members for now. # Maintainers -*.md @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink -/tests/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink -/tests-unit/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink -/notebooks/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink -/script_examples/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink -/.github/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink -/requirements.txt @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink -/pyproject.toml @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink +*.md @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/tests/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/tests-unit/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/notebooks/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/script_examples/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/.github/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/requirements.txt @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/pyproject.toml @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne # Python web server -/api_server/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata -/app/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata -/utils/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata +/api_server/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @christian-byrne +/app/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @christian-byrne +/utils/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @christian-byrne # Node developers -/comfy_extras/ @yoland68 @robinjhuang @huchenlei @pythongosssss @ltdrdata @Kosinkadink @webfiltered -/comfy/comfy_types/ @yoland68 @robinjhuang @huchenlei @pythongosssss @ltdrdata @Kosinkadink @webfiltered +/comfy_extras/ @yoland68 @robinjhuang @huchenlei @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne +/comfy/comfy_types/ @yoland68 @robinjhuang @huchenlei @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne From f43e1d7f415374cea5bf7561d8e1278e1e52c95a Mon Sep 17 00:00:00 2001 From: power88 <741815398@qq.com> Date: Sun, 20 Apr 2025 07:47:30 +0800 Subject: [PATCH 0027/1073] Hidream: Allow loading hidream text encoders in CLIPLoader and DualCLIPLoader (#7676) * Hidream: Allow partial loading text encoders * reformat code for ruff check. --- comfy/sd.py | 34 ++++++++++++++++++++++++++++++++++ comfy/text_encoders/hidream.py | 4 ++++ nodes.py | 8 ++++---- 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index d97873ba2..8aba5d655 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -703,6 +703,7 @@ class CLIPType(Enum): COSMOS = 11 LUMINA2 = 12 WAN = 13 + HIDREAM = 14 def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}): @@ -791,6 +792,9 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip elif clip_type == CLIPType.SD3: clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(clip_l=False, clip_g=True, t5=False) clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer + elif clip_type == CLIPType.HIDREAM: + clip_target.clip = comfy.text_encoders.hidream.hidream_clip(clip_l=False, clip_g=True, t5=False, llama=False, dtype_t5=None, dtype_llama=None, t5xxl_scaled_fp8=None, llama_scaled_fp8=None) + clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer else: clip_target.clip = sdxl_clip.SDXLRefinerClipModel clip_target.tokenizer = sdxl_clip.SDXLTokenizer @@ -811,6 +815,10 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.wan.te(**t5xxl_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.wan.WanT5Tokenizer tokenizer_data["spiece_model"] = clip_data[0].get("spiece_model", None) + elif clip_type == CLIPType.HIDREAM: + clip_target.clip = comfy.text_encoders.hidream.hidream_clip(**t5xxl_detect(clip_data), + clip_l=False, clip_g=False, t5=True, llama=False, dtype_llama=None, llama_scaled_fp8=None) + clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer else: #CLIPType.MOCHI clip_target.clip = comfy.text_encoders.genmo.mochi_te(**t5xxl_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.genmo.MochiT5Tokenizer @@ -827,10 +835,18 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.lumina2.te(**llama_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.lumina2.LuminaTokenizer tokenizer_data["spiece_model"] = clip_data[0].get("spiece_model", None) + elif te_model == TEModel.LLAMA3_8: + clip_target.clip = comfy.text_encoders.hidream.hidream_clip(**llama_detect(clip_data), + clip_l=False, clip_g=False, t5=False, llama=True, dtype_t5=None, t5xxl_scaled_fp8=None) + clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer else: + # clip_l if clip_type == CLIPType.SD3: clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(clip_l=True, clip_g=False, t5=False) clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer + elif clip_type == CLIPType.HIDREAM: + clip_target.clip = comfy.text_encoders.hidream.hidream_clip(clip_l=True, clip_g=False, t5=False, llama=False, dtype_t5=None, dtype_llama=None, t5xxl_scaled_fp8=None, llama_scaled_fp8=None) + clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer else: clip_target.clip = sd1_clip.SD1ClipModel clip_target.tokenizer = sd1_clip.SD1Tokenizer @@ -848,6 +864,24 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip elif clip_type == CLIPType.HUNYUAN_VIDEO: clip_target.clip = comfy.text_encoders.hunyuan_video.hunyuan_video_clip(**llama_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.hunyuan_video.HunyuanVideoTokenizer + elif clip_type == CLIPType.HIDREAM: + # Detect + hidream_dualclip_classes = [] + for hidream_te in clip_data: + te_model = detect_te_model(hidream_te) + hidream_dualclip_classes.append(te_model) + + clip_l = TEModel.CLIP_L in hidream_dualclip_classes + clip_g = TEModel.CLIP_G in hidream_dualclip_classes + t5 = TEModel.T5_XXL in hidream_dualclip_classes + llama = TEModel.LLAMA3_8 in hidream_dualclip_classes + + # Initialize t5xxl_detect and llama_detect kwargs if needed + t5_kwargs = t5xxl_detect(clip_data) if t5 else {} + llama_kwargs = llama_detect(clip_data) if llama else {} + + clip_target.clip = comfy.text_encoders.hidream.hidream_clip(clip_l=clip_l, clip_g=clip_g, t5=t5, llama=llama, **t5_kwargs, **llama_kwargs) + clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer else: clip_target.clip = sdxl_clip.SDXLClipModel clip_target.tokenizer = sdxl_clip.SDXLTokenizer diff --git a/comfy/text_encoders/hidream.py b/comfy/text_encoders/hidream.py index 6c34c5572..ca54eaa78 100644 --- a/comfy/text_encoders/hidream.py +++ b/comfy/text_encoders/hidream.py @@ -109,11 +109,15 @@ class HiDreamTEModel(torch.nn.Module): if self.t5xxl is not None: t5_output = self.t5xxl.encode_token_weights(token_weight_pairs_t5) t5_out, t5_pooled = t5_output[:2] + else: + t5_out = None if self.llama is not None: ll_output = self.llama.encode_token_weights(token_weight_pairs_llama) ll_out, ll_pooled = ll_output[:2] ll_out = ll_out[:, 1:] + else: + ll_out = None if t5_out is None: t5_out = torch.zeros((1, 1, 4096), device=comfy.model_management.intermediate_device()) diff --git a/nodes.py b/nodes.py index d4082d19d..b1ab62aad 100644 --- a/nodes.py +++ b/nodes.py @@ -917,7 +917,7 @@ class CLIPLoader: @classmethod def INPUT_TYPES(s): return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan"], ), + "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), @@ -927,7 +927,7 @@ class CLIPLoader: CATEGORY = "advanced/loaders" - DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl" + DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl\n hidream: llama-3.1 (Recommend) or t5" def load_clip(self, clip_name, type="stable_diffusion", device="default"): clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) @@ -945,7 +945,7 @@ class DualCLIPLoader: def INPUT_TYPES(s): return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ), "clip_name2": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["sdxl", "sd3", "flux", "hunyuan_video"], ), + "type": (["sdxl", "sd3", "flux", "hunyuan_video", "hidream"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), @@ -955,7 +955,7 @@ class DualCLIPLoader: CATEGORY = "advanced/loaders" - DESCRIPTION = "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5" + DESCRIPTION = "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5\nhidream: at least one of t5 or llama, recommended t5 and llama" def load_clip(self, clip_name1, clip_name2, type, device="default"): clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) From fd274944418f1148b762a6e2d37efa820a569071 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 19 Apr 2025 19:49:40 -0400 Subject: [PATCH 0028/1073] Use empty t5 of size 128 for hidream, seems to give closer results. --- comfy/text_encoders/hidream.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/text_encoders/hidream.py b/comfy/text_encoders/hidream.py index ca54eaa78..8e1abcfc1 100644 --- a/comfy/text_encoders/hidream.py +++ b/comfy/text_encoders/hidream.py @@ -120,7 +120,7 @@ class HiDreamTEModel(torch.nn.Module): ll_out = None if t5_out is None: - t5_out = torch.zeros((1, 1, 4096), device=comfy.model_management.intermediate_device()) + t5_out = torch.zeros((1, 128, 4096), device=comfy.model_management.intermediate_device()) if ll_out is None: ll_out = torch.zeros((1, 32, 1, 4096), device=comfy.model_management.intermediate_device()) From 2c735c13b4fbdb9ffa654b0afadb4e05d729dd65 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 20 Apr 2025 08:33:27 -0700 Subject: [PATCH 0029/1073] Slightly better fix for #7687 --- comfy/controlnet.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index ceb24c852..11483e21d 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -736,6 +736,7 @@ def load_controlnet_state_dict(state_dict, model=None, model_options={}): return control def load_controlnet(ckpt_path, model=None, model_options={}): + model_options = model_options.copy() if "global_average_pooling" not in model_options: filename = os.path.splitext(ckpt_path)[0] if filename.endswith("_shuffle") or filename.endswith("_shuffle_fp16"): #TODO: smarter way of enabling global_average_pooling From 11b72c9c55d469c6f256eb0a8598e251ce504120 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 20 Apr 2025 23:41:51 -0700 Subject: [PATCH 0030/1073] CLIPTextEncodeHiDream. (#7703) --- comfy_extras/nodes_hidream.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/comfy_extras/nodes_hidream.py b/comfy_extras/nodes_hidream.py index 5a160c2ba..dfb98597b 100644 --- a/comfy_extras/nodes_hidream.py +++ b/comfy_extras/nodes_hidream.py @@ -26,7 +26,30 @@ class QuadrupleCLIPLoader: clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2, clip_path3, clip_path4], embedding_directory=folder_paths.get_folder_paths("embeddings")) return (clip,) +class CLIPTextEncodeHiDream: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "clip": ("CLIP", ), + "clip_l": ("STRING", {"multiline": True, "dynamicPrompts": True}), + "clip_g": ("STRING", {"multiline": True, "dynamicPrompts": True}), + "t5xxl": ("STRING", {"multiline": True, "dynamicPrompts": True}), + "llama": ("STRING", {"multiline": True, "dynamicPrompts": True}) + }} + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "encode" + + CATEGORY = "advanced/conditioning" + + def encode(self, clip, clip_l, clip_g, t5xxl, llama): + + tokens = clip.tokenize(clip_g) + tokens["l"] = clip.tokenize(clip_l)["l"] + tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"] + tokens["llama"] = clip.tokenize(llama)["llama"] + return (clip.encode_from_tokens_scheduled(tokens), ) NODE_CLASS_MAPPINGS = { "QuadrupleCLIPLoader": QuadrupleCLIPLoader, + "CLIPTextEncodeHiDream": CLIPTextEncodeHiDream, } From b6fd3ffd10cd367f80c44a1920151d65219b0f9d Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Mon, 21 Apr 2025 14:39:45 -0400 Subject: [PATCH 0031/1073] Populate AUTH_TOKEN_COMFY_ORG hidden input (#7709) --- execution.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/execution.py b/execution.py index d09102f55..feb61ae82 100644 --- a/execution.py +++ b/execution.py @@ -144,6 +144,8 @@ def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, e input_data_all[x] = [extra_data.get('extra_pnginfo', None)] if h[x] == "UNIQUE_ID": input_data_all[x] = [unique_id] + if h[x] == "AUTH_TOKEN_COMFY_ORG": + input_data_all[x] = [extra_data.get("auth_token_comfy_org", None)] return input_data_all, missing_keys map_node_over_list = None #Don't hook this please From ce22f687cc35b4414d792dd75812446ef56aa627 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 21 Apr 2025 11:40:29 -0700 Subject: [PATCH 0032/1073] Support for WAN VACE preview model. (#7711) * Support for WAN VACE preview model. * Remove print. --- comfy/ldm/wan/model.py | 144 +++++++++++++++++++++++++++++++++++++- comfy/model_base.py | 28 ++++++++ comfy/model_detection.py | 11 ++- comfy/supported_models.py | 12 +++- comfy_extras/nodes_wan.py | 106 ++++++++++++++++++++++++++++ 5 files changed, 295 insertions(+), 6 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 2a30497c5..5e7848bd5 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -220,6 +220,34 @@ class WanAttentionBlock(nn.Module): return x +class VaceWanAttentionBlock(WanAttentionBlock): + def __init__( + self, + cross_attn_type, + dim, + ffn_dim, + num_heads, + window_size=(-1, -1), + qk_norm=True, + cross_attn_norm=False, + eps=1e-6, + block_id=0, + operation_settings={} + ): + super().__init__(cross_attn_type, dim, ffn_dim, num_heads, window_size, qk_norm, cross_attn_norm, eps, operation_settings=operation_settings) + self.block_id = block_id + if block_id == 0: + self.before_proj = operation_settings.get("operations").Linear(self.dim, self.dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.after_proj = operation_settings.get("operations").Linear(self.dim, self.dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + + def forward(self, c, x, **kwargs): + if self.block_id == 0: + c = self.before_proj(c) + x + c = super().forward(c, **kwargs) + c_skip = self.after_proj(c) + return c_skip, c + + class Head(nn.Module): def __init__(self, dim, out_dim, patch_size, eps=1e-6, operation_settings={}): @@ -395,6 +423,7 @@ class WanModel(torch.nn.Module): clip_fea=None, freqs=None, transformer_options={}, + **kwargs, ): r""" Forward pass through the diffusion model @@ -457,7 +486,7 @@ class WanModel(torch.nn.Module): x = self.unpatchify(x, grid_sizes) return x - def forward(self, x, timestep, context, clip_fea=None, transformer_options={},**kwargs): + def forward(self, x, timestep, context, clip_fea=None, transformer_options={}, **kwargs): bs, c, t, h, w = x.shape x = comfy.ldm.common_dit.pad_to_patch_size(x, self.patch_size) patch_size = self.patch_size @@ -471,7 +500,7 @@ class WanModel(torch.nn.Module): img_ids = repeat(img_ids, "t h w c -> b (t h w) c", b=bs) freqs = self.rope_embedder(img_ids).movedim(1, 2) - return self.forward_orig(x, timestep, context, clip_fea=clip_fea, freqs=freqs, transformer_options=transformer_options)[:, :, :t, :h, :w] + return self.forward_orig(x, timestep, context, clip_fea=clip_fea, freqs=freqs, transformer_options=transformer_options, **kwargs)[:, :, :t, :h, :w] def unpatchify(self, x, grid_sizes): r""" @@ -496,3 +525,114 @@ class WanModel(torch.nn.Module): u = torch.einsum('bfhwpqrc->bcfphqwr', u) u = u.reshape(b, c, *[i * j for i, j in zip(grid_sizes, self.patch_size)]) return u + + +class VaceWanModel(WanModel): + r""" + Wan diffusion backbone supporting both text-to-video and image-to-video. + """ + + def __init__(self, + model_type='vace', + patch_size=(1, 2, 2), + text_len=512, + in_dim=16, + dim=2048, + ffn_dim=8192, + freq_dim=256, + text_dim=4096, + out_dim=16, + num_heads=16, + num_layers=32, + window_size=(-1, -1), + qk_norm=True, + cross_attn_norm=True, + eps=1e-6, + flf_pos_embed_token_number=None, + image_model=None, + vace_layers=None, + vace_in_dim=None, + device=None, + dtype=None, + operations=None, + ): + + super().__init__(model_type='t2v', patch_size=patch_size, text_len=text_len, in_dim=in_dim, dim=dim, ffn_dim=ffn_dim, freq_dim=freq_dim, text_dim=text_dim, out_dim=out_dim, num_heads=num_heads, num_layers=num_layers, window_size=window_size, qk_norm=qk_norm, cross_attn_norm=cross_attn_norm, eps=eps, flf_pos_embed_token_number=flf_pos_embed_token_number, image_model=image_model, device=device, dtype=dtype, operations=operations) + operation_settings = {"operations": operations, "device": device, "dtype": dtype} + + # Vace + if vace_layers is not None: + self.vace_layers = vace_layers + self.vace_in_dim = vace_in_dim + # vace blocks + self.vace_blocks = nn.ModuleList([ + VaceWanAttentionBlock('t2v_cross_attn', self.dim, self.ffn_dim, self.num_heads, self.window_size, self.qk_norm, self.cross_attn_norm, self.eps, block_id=i, operation_settings=operation_settings) + for i in range(self.vace_layers) + ]) + + self.vace_layers_mapping = {i: n for n, i in enumerate(range(0, self.num_layers, self.num_layers // self.vace_layers))} + # vace patch embeddings + self.vace_patch_embedding = operations.Conv3d( + self.vace_in_dim, self.dim, kernel_size=self.patch_size, stride=self.patch_size, device=device, dtype=torch.float32 + ) + + def forward_orig( + self, + x, + t, + context, + vace_context, + clip_fea=None, + freqs=None, + transformer_options={}, + **kwargs, + ): + # embeddings + x = self.patch_embedding(x.float()).to(x.dtype) + grid_sizes = x.shape[2:] + x = x.flatten(2).transpose(1, 2) + + # time embeddings + e = self.time_embedding( + sinusoidal_embedding_1d(self.freq_dim, t).to(dtype=x[0].dtype)) + e0 = self.time_projection(e).unflatten(1, (6, self.dim)) + + # context + context = self.text_embedding(context) + + context_img_len = None + if clip_fea is not None: + if self.img_emb is not None: + context_clip = self.img_emb(clip_fea) # bs x 257 x dim + context = torch.concat([context_clip, context], dim=1) + context_img_len = clip_fea.shape[-2] + + c = self.vace_patch_embedding(vace_context.float()).to(vace_context.dtype) + c = c.flatten(2).transpose(1, 2) + + # arguments + x_orig = x + + patches_replace = transformer_options.get("patches_replace", {}) + blocks_replace = patches_replace.get("dit", {}) + for i, block in enumerate(self.blocks): + if ("double_block", i) in blocks_replace: + def block_wrap(args): + out = {} + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len) + return out + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs}, {"original_block": block_wrap}) + x = out["img"] + else: + x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) + + ii = self.vace_layers_mapping.get(i, None) + if ii is not None: + c_skip, c = self.vace_blocks[ii](c, x=x_orig, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) + x += c_skip + # head + x = self.head(x, e) + + # unpatchify + x = self.unpatchify(x, grid_sizes) + return x diff --git a/comfy/model_base.py b/comfy/model_base.py index 8dab1740b..04a101526 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1043,6 +1043,34 @@ class WAN21(BaseModel): out['clip_fea'] = comfy.conds.CONDRegular(clip_vision_output.penultimate_hidden_states) return out + +class WAN21_Vace(WAN21): + def __init__(self, model_config, model_type=ModelType.FLOW, image_to_video=False, device=None): + super(WAN21, self).__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model.VaceWanModel) + self.image_to_video = image_to_video + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + noise = kwargs.get("noise", None) + noise_shape = list(noise.shape) + vace_frames = kwargs.get("vace_frames", None) + if vace_frames is None: + noise_shape[1] = 32 + vace_frames = torch.zeros(noise_shape, device=noise.device, dtype=noise.dtype) + + for i in range(0, vace_frames.shape[1], 16): + vace_frames = vace_frames.clone() + vace_frames[:, i:i + 16] = self.process_latent_in(vace_frames[:, i:i + 16]) + + mask = kwargs.get("vace_mask", None) + if mask is None: + noise_shape[1] = 64 + mask = torch.ones(noise_shape, device=noise.device, dtype=noise.dtype) + + out['vace_context'] = comfy.conds.CONDRegular(torch.cat([vace_frames.to(noise), mask.to(noise)], dim=1)) + return out + + class Hunyuan3Dv2(BaseModel): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.hunyuan3d.model.Hunyuan3Dv2) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 6499bf238..76de78a8a 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -317,10 +317,15 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["cross_attn_norm"] = True dit_config["eps"] = 1e-6 dit_config["in_dim"] = state_dict['{}patch_embedding.weight'.format(key_prefix)].shape[1] - if '{}img_emb.proj.0.bias'.format(key_prefix) in state_dict_keys: - dit_config["model_type"] = "i2v" + if '{}vace_patch_embedding.weight'.format(key_prefix) in state_dict_keys: + dit_config["model_type"] = "vace" + dit_config["vace_in_dim"] = state_dict['{}vace_patch_embedding.weight'.format(key_prefix)].shape[1] + dit_config["vace_layers"] = count_blocks(state_dict_keys, '{}vace_blocks.'.format(key_prefix) + '{}.') else: - dit_config["model_type"] = "t2v" + if '{}img_emb.proj.0.bias'.format(key_prefix) in state_dict_keys: + dit_config["model_type"] = "i2v" + else: + dit_config["model_type"] = "t2v" flf_weight = state_dict.get('{}img_emb.emb_pos'.format(key_prefix)) if flf_weight is not None: dit_config["flf_pos_embed_token_number"] = flf_weight.shape[1] diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 81c47ac68..5e55035cf 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -987,6 +987,16 @@ class WAN21_FunControl2V(WAN21_T2V): out = model_base.WAN21(self, image_to_video=False, device=device) return out +class WAN21_Vace(WAN21_T2V): + unet_config = { + "image_model": "wan2.1", + "model_type": "vace", + } + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.WAN21_Vace(self, image_to_video=False, device=device) + return out + class Hunyuan3Dv2(supported_models_base.BASE): unet_config = { "image_model": "hunyuan3d2", @@ -1055,6 +1065,6 @@ class HiDream(supported_models_base.BASE): return None # TODO -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream] models += [SVD_img2vid] diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 8ad358ce8..19a6cdfa4 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -193,9 +193,115 @@ class WanFunInpaintToVideo: return flfv.encode(positive, negative, vae, width, height, length, batch_size, start_image=start_image, end_image=end_image, clip_vision_start_image=clip_vision_output) +class WanVaceToVideo: + @classmethod + def INPUT_TYPES(s): + return {"required": {"positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "vae": ("VAE", ), + "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + }, + "optional": {"control_video": ("IMAGE", ), + "control_masks": ("MASK", ), + "reference_image": ("IMAGE", ), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT", "INT") + RETURN_NAMES = ("positive", "negative", "latent", "trim_latent") + FUNCTION = "encode" + + CATEGORY = "conditioning/video_models" + + EXPERIMENTAL = True + + def encode(self, positive, negative, vae, width, height, length, batch_size, control_video=None, control_masks=None, reference_image=None): + latent_length = ((length - 1) // 4) + 1 + if control_video is not None: + control_video = comfy.utils.common_upscale(control_video[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + if control_video.shape[0] < length: + control_video = torch.nn.functional.pad(control_video, (0, 0, 0, 0, 0, 0, 0, length - control_video.shape[0]), value=0.5) + else: + control_video = torch.ones((length, height, width, 3)) * 0.5 + + if reference_image is not None: + reference_image = comfy.utils.common_upscale(reference_image[:1].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + reference_image = vae.encode(reference_image[:, :, :, :3]) + reference_image = torch.cat([reference_image, comfy.latent_formats.Wan21().process_out(torch.zeros_like(reference_image))], dim=1) + + if control_masks is None: + mask = torch.ones((length, height, width, 1)) + else: + mask = control_masks + if mask.ndim == 3: + mask = mask.unsqueeze(1) + mask = comfy.utils.common_upscale(mask[:length], width, height, "bilinear", "center").movedim(1, -1) + if mask.shape[0] < length: + mask = torch.nn.functional.pad(mask, (0, 0, 0, 0, 0, 0, 0, length - mask.shape[0]), value=1.0) + + control_video = control_video - 0.5 + inactive = (control_video * (1 - mask)) + 0.5 + reactive = (control_video * mask) + 0.5 + + inactive = vae.encode(inactive[:, :, :, :3]) + reactive = vae.encode(reactive[:, :, :, :3]) + control_video_latent = torch.cat((inactive, reactive), dim=1) + if reference_image is not None: + control_video_latent = torch.cat((reference_image, control_video_latent), dim=2) + + vae_stride = 8 + height_mask = height // vae_stride + width_mask = width // vae_stride + mask = mask.view(length, height_mask, vae_stride, width_mask, vae_stride) + mask = mask.permute(2, 4, 0, 1, 3) + mask = mask.reshape(vae_stride * vae_stride, length, height_mask, width_mask) + mask = torch.nn.functional.interpolate(mask.unsqueeze(0), size=(latent_length, height_mask, width_mask), mode='nearest-exact').squeeze(0) + + trim_latent = 0 + if reference_image is not None: + mask_pad = torch.zeros_like(mask[:, :reference_image.shape[2], :, :]) + mask = torch.cat((mask_pad, mask), dim=1) + latent_length += reference_image.shape[2] + trim_latent = reference_image.shape[2] + + mask = mask.unsqueeze(0) + positive = node_helpers.conditioning_set_values(positive, {"vace_frames": control_video_latent, "vace_mask": mask}) + negative = node_helpers.conditioning_set_values(negative, {"vace_frames": control_video_latent, "vace_mask": mask}) + + latent = torch.zeros([batch_size, 16, latent_length, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + out_latent = {} + out_latent["samples"] = latent + return (positive, negative, out_latent, trim_latent) + +class TrimVideoLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": { "samples": ("LATENT",), + "trim_amount": ("INT", {"default": 0, "min": 0, "max": 99999}), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "op" + + CATEGORY = "latent/video" + + EXPERIMENTAL = True + + def op(self, samples, trim_amount): + samples_out = samples.copy() + + s1 = samples["samples"] + samples_out["samples"] = s1[:, :, trim_amount:] + return (samples_out,) + + NODE_CLASS_MAPPINGS = { "WanImageToVideo": WanImageToVideo, "WanFunControlToVideo": WanFunControlToVideo, "WanFunInpaintToVideo": WanFunInpaintToVideo, "WanFirstLastFrameToVideo": WanFirstLastFrameToVideo, + "WanVaceToVideo": WanVaceToVideo, + "TrimVideoLatent": TrimVideoLatent, } From 5d51794607d71e1bbffd7d9d5a1eed417de771ae Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Tue, 22 Apr 2025 06:13:00 +1000 Subject: [PATCH 0033/1073] Add node type hint for socketless option (#7714) * Add node type hint for socketless option * nit - Doc --- comfy/comfy_types/node_typing.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/comfy/comfy_types/node_typing.py b/comfy/comfy_types/node_typing.py index 42ed5174e..a348791a9 100644 --- a/comfy/comfy_types/node_typing.py +++ b/comfy/comfy_types/node_typing.py @@ -115,6 +115,11 @@ class InputTypeOptions(TypedDict): """When a link exists, rather than receiving the evaluated value, you will receive the link (i.e. `["nodeId", ]`). Designed for node expansion.""" tooltip: NotRequired[str] """Tooltip for the input (or widget), shown on pointer hover""" + socketless: NotRequired[bool] + """All inputs (including widgets) have an input socket to connect links. When ``true``, if there is a widget for this input, no socket will be created. + Available from frontend v1.17.5 + Ref: https://github.com/Comfy-Org/ComfyUI_frontend/pull/3548 + """ # class InputTypeNumber(InputTypeOptions): # default: float | int min: NotRequired[float] From 9d57b8afd8c9f14776b1464919472ae17de2b03e Mon Sep 17 00:00:00 2001 From: "Alexander G. Morano" Date: Mon, 21 Apr 2025 18:51:31 -0400 Subject: [PATCH 0034/1073] Update nodes_primitive.py (#7716) Allow FLOAT and INT types to support negative numbers. Caps the numbers at the user's own system min and max. --- comfy_extras/nodes_primitive.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_primitive.py b/comfy_extras/nodes_primitive.py index b770104fb..184b990c3 100644 --- a/comfy_extras/nodes_primitive.py +++ b/comfy_extras/nodes_primitive.py @@ -1,6 +1,8 @@ # Primitive nodes that are evaluated at backend. from __future__ import annotations +import sys + from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, IO @@ -23,7 +25,7 @@ class Int(ComfyNodeABC): @classmethod def INPUT_TYPES(cls) -> InputTypeDict: return { - "required": {"value": (IO.INT, {"control_after_generate": True})}, + "required": {"value": (IO.INT, {"min": -sys.maxsize, "max": sys.maxsize, "control_after_generate": True})}, } RETURN_TYPES = (IO.INT,) @@ -38,7 +40,7 @@ class Float(ComfyNodeABC): @classmethod def INPUT_TYPES(cls) -> InputTypeDict: return { - "required": {"value": (IO.FLOAT, {})}, + "required": {"value": (IO.FLOAT, {"min": -sys.maxsize, "max": sys.maxsize})}, } RETURN_TYPES = (IO.FLOAT,) From 5d0d4ee98a24b6c72c94635fc5a6e93af2b005bc Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 21 Apr 2025 16:36:20 -0700 Subject: [PATCH 0035/1073] Add strength control for vace. (#7717) --- comfy/ldm/wan/model.py | 3 ++- comfy/model_base.py | 3 +++ comfy_extras/nodes_wan.py | 7 ++++--- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 5e7848bd5..4ef86d5f2 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -582,6 +582,7 @@ class VaceWanModel(WanModel): t, context, vace_context, + vace_strength=1.0, clip_fea=None, freqs=None, transformer_options={}, @@ -629,7 +630,7 @@ class VaceWanModel(WanModel): ii = self.vace_layers_mapping.get(i, None) if ii is not None: c_skip, c = self.vace_blocks[ii](c, x=x_orig, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) - x += c_skip + x += c_skip * vace_strength # head x = self.head(x, e) diff --git a/comfy/model_base.py b/comfy/model_base.py index 04a101526..b0c6a465b 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1068,6 +1068,9 @@ class WAN21_Vace(WAN21): mask = torch.ones(noise_shape, device=noise.device, dtype=noise.dtype) out['vace_context'] = comfy.conds.CONDRegular(torch.cat([vace_frames.to(noise), mask.to(noise)], dim=1)) + + vace_strength = kwargs.get("vace_strength", 1.0) + out['vace_strength'] = comfy.conds.CONDConstant(vace_strength) return out diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 19a6cdfa4..9dda64597 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -203,6 +203,7 @@ class WanVaceToVideo: "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1000.0, "step": 0.01}), }, "optional": {"control_video": ("IMAGE", ), "control_masks": ("MASK", ), @@ -217,7 +218,7 @@ class WanVaceToVideo: EXPERIMENTAL = True - def encode(self, positive, negative, vae, width, height, length, batch_size, control_video=None, control_masks=None, reference_image=None): + def encode(self, positive, negative, vae, width, height, length, batch_size, strength, control_video=None, control_masks=None, reference_image=None): latent_length = ((length - 1) // 4) + 1 if control_video is not None: control_video = comfy.utils.common_upscale(control_video[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) @@ -267,8 +268,8 @@ class WanVaceToVideo: trim_latent = reference_image.shape[2] mask = mask.unsqueeze(0) - positive = node_helpers.conditioning_set_values(positive, {"vace_frames": control_video_latent, "vace_mask": mask}) - negative = node_helpers.conditioning_set_values(negative, {"vace_frames": control_video_latent, "vace_mask": mask}) + positive = node_helpers.conditioning_set_values(positive, {"vace_frames": control_video_latent, "vace_mask": mask, "vace_strength": strength}) + negative = node_helpers.conditioning_set_values(negative, {"vace_frames": control_video_latent, "vace_mask": mask, "vace_strength": strength}) latent = torch.zeros([batch_size, 16, latent_length, height // 8, width // 8], device=comfy.model_management.intermediate_device()) out_latent = {} From 1f3fba2af518073551a73582c8dce7bae4ad7716 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 22 Apr 2025 08:15:32 +0800 Subject: [PATCH 0036/1073] Unified Weight Adapter system for better maintainability and future feature of Lora system (#7540) --- comfy/lora.py | 321 ++----------------------------- comfy/weight_adapter/__init__.py | 13 ++ comfy/weight_adapter/base.py | 94 +++++++++ comfy/weight_adapter/glora.py | 93 +++++++++ comfy/weight_adapter/loha.py | 100 ++++++++++ comfy/weight_adapter/lokr.py | 133 +++++++++++++ comfy/weight_adapter/lora.py | 142 ++++++++++++++ 7 files changed, 592 insertions(+), 304 deletions(-) create mode 100644 comfy/weight_adapter/__init__.py create mode 100644 comfy/weight_adapter/base.py create mode 100644 comfy/weight_adapter/glora.py create mode 100644 comfy/weight_adapter/loha.py create mode 100644 comfy/weight_adapter/lokr.py create mode 100644 comfy/weight_adapter/lora.py diff --git a/comfy/lora.py b/comfy/lora.py index bc9f3022a..8760a21fb 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -20,6 +20,7 @@ from __future__ import annotations import comfy.utils import comfy.model_management import comfy.model_base +import comfy.weight_adapter as weight_adapter import logging import torch @@ -49,139 +50,12 @@ def load_lora(lora, to_load, log_missing=True): dora_scale = lora[dora_scale_name] loaded_keys.add(dora_scale_name) - reshape_name = "{}.reshape_weight".format(x) - reshape = None - if reshape_name in lora.keys(): - try: - reshape = lora[reshape_name].tolist() - loaded_keys.add(reshape_name) - except: - pass - - regular_lora = "{}.lora_up.weight".format(x) - diffusers_lora = "{}_lora.up.weight".format(x) - diffusers2_lora = "{}.lora_B.weight".format(x) - diffusers3_lora = "{}.lora.up.weight".format(x) - mochi_lora = "{}.lora_B".format(x) - transformers_lora = "{}.lora_linear_layer.up.weight".format(x) - A_name = None - - if regular_lora in lora.keys(): - A_name = regular_lora - B_name = "{}.lora_down.weight".format(x) - mid_name = "{}.lora_mid.weight".format(x) - elif diffusers_lora in lora.keys(): - A_name = diffusers_lora - B_name = "{}_lora.down.weight".format(x) - mid_name = None - elif diffusers2_lora in lora.keys(): - A_name = diffusers2_lora - B_name = "{}.lora_A.weight".format(x) - mid_name = None - elif diffusers3_lora in lora.keys(): - A_name = diffusers3_lora - B_name = "{}.lora.down.weight".format(x) - mid_name = None - elif mochi_lora in lora.keys(): - A_name = mochi_lora - B_name = "{}.lora_A".format(x) - mid_name = None - elif transformers_lora in lora.keys(): - A_name = transformers_lora - B_name ="{}.lora_linear_layer.down.weight".format(x) - mid_name = None - - if A_name is not None: - mid = None - if mid_name is not None and mid_name in lora.keys(): - mid = lora[mid_name] - loaded_keys.add(mid_name) - patch_dict[to_load[x]] = ("lora", (lora[A_name], lora[B_name], alpha, mid, dora_scale, reshape)) - loaded_keys.add(A_name) - loaded_keys.add(B_name) - - - ######## loha - hada_w1_a_name = "{}.hada_w1_a".format(x) - hada_w1_b_name = "{}.hada_w1_b".format(x) - hada_w2_a_name = "{}.hada_w2_a".format(x) - hada_w2_b_name = "{}.hada_w2_b".format(x) - hada_t1_name = "{}.hada_t1".format(x) - hada_t2_name = "{}.hada_t2".format(x) - if hada_w1_a_name in lora.keys(): - hada_t1 = None - hada_t2 = None - if hada_t1_name in lora.keys(): - hada_t1 = lora[hada_t1_name] - hada_t2 = lora[hada_t2_name] - loaded_keys.add(hada_t1_name) - loaded_keys.add(hada_t2_name) - - patch_dict[to_load[x]] = ("loha", (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2, dora_scale)) - loaded_keys.add(hada_w1_a_name) - loaded_keys.add(hada_w1_b_name) - loaded_keys.add(hada_w2_a_name) - loaded_keys.add(hada_w2_b_name) - - - ######## lokr - lokr_w1_name = "{}.lokr_w1".format(x) - lokr_w2_name = "{}.lokr_w2".format(x) - lokr_w1_a_name = "{}.lokr_w1_a".format(x) - lokr_w1_b_name = "{}.lokr_w1_b".format(x) - lokr_t2_name = "{}.lokr_t2".format(x) - lokr_w2_a_name = "{}.lokr_w2_a".format(x) - lokr_w2_b_name = "{}.lokr_w2_b".format(x) - - lokr_w1 = None - if lokr_w1_name in lora.keys(): - lokr_w1 = lora[lokr_w1_name] - loaded_keys.add(lokr_w1_name) - - lokr_w2 = None - if lokr_w2_name in lora.keys(): - lokr_w2 = lora[lokr_w2_name] - loaded_keys.add(lokr_w2_name) - - lokr_w1_a = None - if lokr_w1_a_name in lora.keys(): - lokr_w1_a = lora[lokr_w1_a_name] - loaded_keys.add(lokr_w1_a_name) - - lokr_w1_b = None - if lokr_w1_b_name in lora.keys(): - lokr_w1_b = lora[lokr_w1_b_name] - loaded_keys.add(lokr_w1_b_name) - - lokr_w2_a = None - if lokr_w2_a_name in lora.keys(): - lokr_w2_a = lora[lokr_w2_a_name] - loaded_keys.add(lokr_w2_a_name) - - lokr_w2_b = None - if lokr_w2_b_name in lora.keys(): - lokr_w2_b = lora[lokr_w2_b_name] - loaded_keys.add(lokr_w2_b_name) - - lokr_t2 = None - if lokr_t2_name in lora.keys(): - lokr_t2 = lora[lokr_t2_name] - loaded_keys.add(lokr_t2_name) - - if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None): - patch_dict[to_load[x]] = ("lokr", (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2, dora_scale)) - - #glora - a1_name = "{}.a1.weight".format(x) - a2_name = "{}.a2.weight".format(x) - b1_name = "{}.b1.weight".format(x) - b2_name = "{}.b2.weight".format(x) - if a1_name in lora: - patch_dict[to_load[x]] = ("glora", (lora[a1_name], lora[a2_name], lora[b1_name], lora[b2_name], alpha, dora_scale)) - loaded_keys.add(a1_name) - loaded_keys.add(a2_name) - loaded_keys.add(b1_name) - loaded_keys.add(b2_name) + for adapter_cls in weight_adapter.adapters: + adapter = adapter_cls.load(x, lora, alpha, dora_scale, loaded_keys) + if adapter is not None: + patch_dict[to_load[x]] = adapter + loaded_keys.update(adapter.loaded_keys) + continue w_norm_name = "{}.w_norm".format(x) b_norm_name = "{}.b_norm".format(x) @@ -408,26 +282,6 @@ def model_lora_keys_unet(model, key_map={}): return key_map -def weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function): - dora_scale = comfy.model_management.cast_to_device(dora_scale, weight.device, intermediate_dtype) - lora_diff *= alpha - weight_calc = weight + function(lora_diff).type(weight.dtype) - weight_norm = ( - weight_calc.transpose(0, 1) - .reshape(weight_calc.shape[1], -1) - .norm(dim=1, keepdim=True) - .reshape(weight_calc.shape[1], *[1] * (weight_calc.dim() - 1)) - .transpose(0, 1) - ) - - weight_calc *= (dora_scale / weight_norm).type(weight.dtype) - if strength != 1.0: - weight_calc -= weight - weight += strength * (weight_calc) - else: - weight[:] = weight_calc - return weight - def pad_tensor_to_shape(tensor: torch.Tensor, new_shape: list[int]) -> torch.Tensor: """ Pad a tensor to a new shape with zeros. @@ -482,6 +336,16 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, ori if isinstance(v, list): v = (calculate_weight(v[1:], v[0][1](comfy.model_management.cast_to_device(v[0][0], weight.device, intermediate_dtype, copy=True), inplace=True), key, intermediate_dtype=intermediate_dtype), ) + if isinstance(v, weight_adapter.WeightAdapterBase): + output = v.calculate_weight(weight, key, strength, strength_model, offset, function, intermediate_dtype, original_weights) + if output is None: + logging.warning("Calculate Weight Failed: {} {}".format(v.name, key)) + else: + weight = output + if old_weight is not None: + weight = old_weight + continue + if len(v) == 1: patch_type = "diff" elif len(v) == 2: @@ -508,157 +372,6 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, ori diff_weight = comfy.model_management.cast_to_device(target_weight, weight.device, intermediate_dtype) - \ comfy.model_management.cast_to_device(original_weights[key][0][0], weight.device, intermediate_dtype) weight += function(strength * comfy.model_management.cast_to_device(diff_weight, weight.device, weight.dtype)) - elif patch_type == "lora": #lora/locon - mat1 = comfy.model_management.cast_to_device(v[0], weight.device, intermediate_dtype) - mat2 = comfy.model_management.cast_to_device(v[1], weight.device, intermediate_dtype) - dora_scale = v[4] - reshape = v[5] - - if reshape is not None: - weight = pad_tensor_to_shape(weight, reshape) - - if v[2] is not None: - alpha = v[2] / mat2.shape[0] - else: - alpha = 1.0 - - if v[3] is not None: - #locon mid weights, hopefully the math is fine because I didn't properly test it - mat3 = comfy.model_management.cast_to_device(v[3], weight.device, intermediate_dtype) - final_shape = [mat2.shape[1], mat2.shape[0], mat3.shape[2], mat3.shape[3]] - mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1), mat3.transpose(0, 1).flatten(start_dim=1)).reshape(final_shape).transpose(0, 1) - try: - lora_diff = torch.mm(mat1.flatten(start_dim=1), mat2.flatten(start_dim=1)).reshape(weight.shape) - if dora_scale is not None: - weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function) - else: - weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) - except Exception as e: - logging.error("ERROR {} {} {}".format(patch_type, key, e)) - elif patch_type == "lokr": - w1 = v[0] - w2 = v[1] - w1_a = v[3] - w1_b = v[4] - w2_a = v[5] - w2_b = v[6] - t2 = v[7] - dora_scale = v[8] - dim = None - - if w1 is None: - dim = w1_b.shape[0] - w1 = torch.mm(comfy.model_management.cast_to_device(w1_a, weight.device, intermediate_dtype), - comfy.model_management.cast_to_device(w1_b, weight.device, intermediate_dtype)) - else: - w1 = comfy.model_management.cast_to_device(w1, weight.device, intermediate_dtype) - - if w2 is None: - dim = w2_b.shape[0] - if t2 is None: - w2 = torch.mm(comfy.model_management.cast_to_device(w2_a, weight.device, intermediate_dtype), - comfy.model_management.cast_to_device(w2_b, weight.device, intermediate_dtype)) - else: - w2 = torch.einsum('i j k l, j r, i p -> p r k l', - comfy.model_management.cast_to_device(t2, weight.device, intermediate_dtype), - comfy.model_management.cast_to_device(w2_b, weight.device, intermediate_dtype), - comfy.model_management.cast_to_device(w2_a, weight.device, intermediate_dtype)) - else: - w2 = comfy.model_management.cast_to_device(w2, weight.device, intermediate_dtype) - - if len(w2.shape) == 4: - w1 = w1.unsqueeze(2).unsqueeze(2) - if v[2] is not None and dim is not None: - alpha = v[2] / dim - else: - alpha = 1.0 - - try: - lora_diff = torch.kron(w1, w2).reshape(weight.shape) - if dora_scale is not None: - weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function) - else: - weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) - except Exception as e: - logging.error("ERROR {} {} {}".format(patch_type, key, e)) - elif patch_type == "loha": - w1a = v[0] - w1b = v[1] - if v[2] is not None: - alpha = v[2] / w1b.shape[0] - else: - alpha = 1.0 - - w2a = v[3] - w2b = v[4] - dora_scale = v[7] - if v[5] is not None: #cp decomposition - t1 = v[5] - t2 = v[6] - m1 = torch.einsum('i j k l, j r, i p -> p r k l', - comfy.model_management.cast_to_device(t1, weight.device, intermediate_dtype), - comfy.model_management.cast_to_device(w1b, weight.device, intermediate_dtype), - comfy.model_management.cast_to_device(w1a, weight.device, intermediate_dtype)) - - m2 = torch.einsum('i j k l, j r, i p -> p r k l', - comfy.model_management.cast_to_device(t2, weight.device, intermediate_dtype), - comfy.model_management.cast_to_device(w2b, weight.device, intermediate_dtype), - comfy.model_management.cast_to_device(w2a, weight.device, intermediate_dtype)) - else: - m1 = torch.mm(comfy.model_management.cast_to_device(w1a, weight.device, intermediate_dtype), - comfy.model_management.cast_to_device(w1b, weight.device, intermediate_dtype)) - m2 = torch.mm(comfy.model_management.cast_to_device(w2a, weight.device, intermediate_dtype), - comfy.model_management.cast_to_device(w2b, weight.device, intermediate_dtype)) - - try: - lora_diff = (m1 * m2).reshape(weight.shape) - if dora_scale is not None: - weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function) - else: - weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) - except Exception as e: - logging.error("ERROR {} {} {}".format(patch_type, key, e)) - elif patch_type == "glora": - dora_scale = v[5] - - old_glora = False - if v[3].shape[1] == v[2].shape[0] == v[0].shape[0] == v[1].shape[1]: - rank = v[0].shape[0] - old_glora = True - - if v[3].shape[0] == v[2].shape[1] == v[0].shape[1] == v[1].shape[0]: - if old_glora and v[1].shape[0] == weight.shape[0] and weight.shape[0] == weight.shape[1]: - pass - else: - old_glora = False - rank = v[1].shape[0] - - a1 = comfy.model_management.cast_to_device(v[0].flatten(start_dim=1), weight.device, intermediate_dtype) - a2 = comfy.model_management.cast_to_device(v[1].flatten(start_dim=1), weight.device, intermediate_dtype) - b1 = comfy.model_management.cast_to_device(v[2].flatten(start_dim=1), weight.device, intermediate_dtype) - b2 = comfy.model_management.cast_to_device(v[3].flatten(start_dim=1), weight.device, intermediate_dtype) - - if v[4] is not None: - alpha = v[4] / rank - else: - alpha = 1.0 - - try: - if old_glora: - lora_diff = (torch.mm(b2, b1) + torch.mm(torch.mm(weight.flatten(start_dim=1).to(dtype=intermediate_dtype), a2), a1)).reshape(weight.shape) #old lycoris glora - else: - if weight.dim() > 2: - lora_diff = torch.einsum("o i ..., i j -> o j ...", torch.einsum("o i ..., i j -> o j ...", weight.to(dtype=intermediate_dtype), a1), a2).reshape(weight.shape) - else: - lora_diff = torch.mm(torch.mm(weight.to(dtype=intermediate_dtype), a1), a2).reshape(weight.shape) - lora_diff += torch.mm(b1, b2).reshape(weight.shape) - - if dora_scale is not None: - weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function) - else: - weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) - except Exception as e: - logging.error("ERROR {} {} {}".format(patch_type, key, e)) else: logging.warning("patch type not recognized {} {}".format(patch_type, key)) diff --git a/comfy/weight_adapter/__init__.py b/comfy/weight_adapter/__init__.py new file mode 100644 index 000000000..e6cd805b6 --- /dev/null +++ b/comfy/weight_adapter/__init__.py @@ -0,0 +1,13 @@ +from .base import WeightAdapterBase +from .lora import LoRAAdapter +from .loha import LoHaAdapter +from .lokr import LoKrAdapter +from .glora import GLoRAAdapter + + +adapters: list[type[WeightAdapterBase]] = [ + LoRAAdapter, + LoHaAdapter, + LoKrAdapter, + GLoRAAdapter, +] diff --git a/comfy/weight_adapter/base.py b/comfy/weight_adapter/base.py new file mode 100644 index 000000000..54af3babe --- /dev/null +++ b/comfy/weight_adapter/base.py @@ -0,0 +1,94 @@ +from typing import Optional + +import torch +import torch.nn as nn + +import comfy.model_management + + +class WeightAdapterBase: + name: str + loaded_keys: set[str] + weights: list[torch.Tensor] + + @classmethod + def load(cls, x: str, lora: dict[str, torch.Tensor]) -> Optional["WeightAdapterBase"]: + raise NotImplementedError + + def to_train(self) -> "WeightAdapterTrainBase": + raise NotImplementedError + + def calculate_weight( + self, + weight, + key, + strength, + strength_model, + offset, + function, + intermediate_dtype=torch.float32, + original_weight=None, + ): + raise NotImplementedError + + +class WeightAdapterTrainBase(nn.Module): + def __init__(self): + super().__init__() + + # [TODO] Collaborate with LoRA training PR #7032 + + +def weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function): + dora_scale = comfy.model_management.cast_to_device(dora_scale, weight.device, intermediate_dtype) + lora_diff *= alpha + weight_calc = weight + function(lora_diff).type(weight.dtype) + weight_norm = ( + weight_calc.transpose(0, 1) + .reshape(weight_calc.shape[1], -1) + .norm(dim=1, keepdim=True) + .reshape(weight_calc.shape[1], *[1] * (weight_calc.dim() - 1)) + .transpose(0, 1) + ) + + weight_calc *= (dora_scale / weight_norm).type(weight.dtype) + if strength != 1.0: + weight_calc -= weight + weight += strength * (weight_calc) + else: + weight[:] = weight_calc + return weight + + +def pad_tensor_to_shape(tensor: torch.Tensor, new_shape: list[int]) -> torch.Tensor: + """ + Pad a tensor to a new shape with zeros. + + Args: + tensor (torch.Tensor): The original tensor to be padded. + new_shape (List[int]): The desired shape of the padded tensor. + + Returns: + torch.Tensor: A new tensor padded with zeros to the specified shape. + + Note: + If the new shape is smaller than the original tensor in any dimension, + the original tensor will be truncated in that dimension. + """ + if any([new_shape[i] < tensor.shape[i] for i in range(len(new_shape))]): + raise ValueError("The new shape must be larger than the original tensor in all dimensions") + + if len(new_shape) != len(tensor.shape): + raise ValueError("The new shape must have the same number of dimensions as the original tensor") + + # Create a new tensor filled with zeros + padded_tensor = torch.zeros(new_shape, dtype=tensor.dtype, device=tensor.device) + + # Create slicing tuples for both tensors + orig_slices = tuple(slice(0, dim) for dim in tensor.shape) + new_slices = tuple(slice(0, dim) for dim in tensor.shape) + + # Copy the original tensor into the new tensor + padded_tensor[new_slices] = tensor[orig_slices] + + return padded_tensor diff --git a/comfy/weight_adapter/glora.py b/comfy/weight_adapter/glora.py new file mode 100644 index 000000000..939abbba5 --- /dev/null +++ b/comfy/weight_adapter/glora.py @@ -0,0 +1,93 @@ +import logging +from typing import Optional + +import torch +import comfy.model_management +from .base import WeightAdapterBase, weight_decompose + + +class GLoRAAdapter(WeightAdapterBase): + name = "glora" + + def __init__(self, loaded_keys, weights): + self.loaded_keys = loaded_keys + self.weights = weights + + @classmethod + def load( + cls, + x: str, + lora: dict[str, torch.Tensor], + alpha: float, + dora_scale: torch.Tensor, + loaded_keys: set[str] = None, + ) -> Optional["GLoRAAdapter"]: + if loaded_keys is None: + loaded_keys = set() + a1_name = "{}.a1.weight".format(x) + a2_name = "{}.a2.weight".format(x) + b1_name = "{}.b1.weight".format(x) + b2_name = "{}.b2.weight".format(x) + if a1_name in lora: + weights = (lora[a1_name], lora[a2_name], lora[b1_name], lora[b2_name], alpha, dora_scale) + loaded_keys.add(a1_name) + loaded_keys.add(a2_name) + loaded_keys.add(b1_name) + loaded_keys.add(b2_name) + return cls(loaded_keys, weights) + else: + return None + + def calculate_weight( + self, + weight, + key, + strength, + strength_model, + offset, + function, + intermediate_dtype=torch.float32, + original_weight=None, + ): + v = self.weights + dora_scale = v[5] + + old_glora = False + if v[3].shape[1] == v[2].shape[0] == v[0].shape[0] == v[1].shape[1]: + rank = v[0].shape[0] + old_glora = True + + if v[3].shape[0] == v[2].shape[1] == v[0].shape[1] == v[1].shape[0]: + if old_glora and v[1].shape[0] == weight.shape[0] and weight.shape[0] == weight.shape[1]: + pass + else: + old_glora = False + rank = v[1].shape[0] + + a1 = comfy.model_management.cast_to_device(v[0].flatten(start_dim=1), weight.device, intermediate_dtype) + a2 = comfy.model_management.cast_to_device(v[1].flatten(start_dim=1), weight.device, intermediate_dtype) + b1 = comfy.model_management.cast_to_device(v[2].flatten(start_dim=1), weight.device, intermediate_dtype) + b2 = comfy.model_management.cast_to_device(v[3].flatten(start_dim=1), weight.device, intermediate_dtype) + + if v[4] is not None: + alpha = v[4] / rank + else: + alpha = 1.0 + + try: + if old_glora: + lora_diff = (torch.mm(b2, b1) + torch.mm(torch.mm(weight.flatten(start_dim=1).to(dtype=intermediate_dtype), a2), a1)).reshape(weight.shape) #old lycoris glora + else: + if weight.dim() > 2: + lora_diff = torch.einsum("o i ..., i j -> o j ...", torch.einsum("o i ..., i j -> o j ...", weight.to(dtype=intermediate_dtype), a1), a2).reshape(weight.shape) + else: + lora_diff = torch.mm(torch.mm(weight.to(dtype=intermediate_dtype), a1), a2).reshape(weight.shape) + lora_diff += torch.mm(b1, b2).reshape(weight.shape) + + if dora_scale is not None: + weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function) + else: + weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) + except Exception as e: + logging.error("ERROR {} {} {}".format(self.name, key, e)) + return weight diff --git a/comfy/weight_adapter/loha.py b/comfy/weight_adapter/loha.py new file mode 100644 index 000000000..ce79abad5 --- /dev/null +++ b/comfy/weight_adapter/loha.py @@ -0,0 +1,100 @@ +import logging +from typing import Optional + +import torch +import comfy.model_management +from .base import WeightAdapterBase, weight_decompose + + +class LoHaAdapter(WeightAdapterBase): + name = "loha" + + def __init__(self, loaded_keys, weights): + self.loaded_keys = loaded_keys + self.weights = weights + + @classmethod + def load( + cls, + x: str, + lora: dict[str, torch.Tensor], + alpha: float, + dora_scale: torch.Tensor, + loaded_keys: set[str] = None, + ) -> Optional["LoHaAdapter"]: + if loaded_keys is None: + loaded_keys = set() + + hada_w1_a_name = "{}.hada_w1_a".format(x) + hada_w1_b_name = "{}.hada_w1_b".format(x) + hada_w2_a_name = "{}.hada_w2_a".format(x) + hada_w2_b_name = "{}.hada_w2_b".format(x) + hada_t1_name = "{}.hada_t1".format(x) + hada_t2_name = "{}.hada_t2".format(x) + if hada_w1_a_name in lora.keys(): + hada_t1 = None + hada_t2 = None + if hada_t1_name in lora.keys(): + hada_t1 = lora[hada_t1_name] + hada_t2 = lora[hada_t2_name] + loaded_keys.add(hada_t1_name) + loaded_keys.add(hada_t2_name) + + weights = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2, dora_scale) + loaded_keys.add(hada_w1_a_name) + loaded_keys.add(hada_w1_b_name) + loaded_keys.add(hada_w2_a_name) + loaded_keys.add(hada_w2_b_name) + return cls(loaded_keys, weights) + else: + return None + + def calculate_weight( + self, + weight, + key, + strength, + strength_model, + offset, + function, + intermediate_dtype=torch.float32, + original_weight=None, + ): + v = self.weights + w1a = v[0] + w1b = v[1] + if v[2] is not None: + alpha = v[2] / w1b.shape[0] + else: + alpha = 1.0 + + w2a = v[3] + w2b = v[4] + dora_scale = v[7] + if v[5] is not None: #cp decomposition + t1 = v[5] + t2 = v[6] + m1 = torch.einsum('i j k l, j r, i p -> p r k l', + comfy.model_management.cast_to_device(t1, weight.device, intermediate_dtype), + comfy.model_management.cast_to_device(w1b, weight.device, intermediate_dtype), + comfy.model_management.cast_to_device(w1a, weight.device, intermediate_dtype)) + + m2 = torch.einsum('i j k l, j r, i p -> p r k l', + comfy.model_management.cast_to_device(t2, weight.device, intermediate_dtype), + comfy.model_management.cast_to_device(w2b, weight.device, intermediate_dtype), + comfy.model_management.cast_to_device(w2a, weight.device, intermediate_dtype)) + else: + m1 = torch.mm(comfy.model_management.cast_to_device(w1a, weight.device, intermediate_dtype), + comfy.model_management.cast_to_device(w1b, weight.device, intermediate_dtype)) + m2 = torch.mm(comfy.model_management.cast_to_device(w2a, weight.device, intermediate_dtype), + comfy.model_management.cast_to_device(w2b, weight.device, intermediate_dtype)) + + try: + lora_diff = (m1 * m2).reshape(weight.shape) + if dora_scale is not None: + weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function) + else: + weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) + except Exception as e: + logging.error("ERROR {} {} {}".format(self.name, key, e)) + return weight diff --git a/comfy/weight_adapter/lokr.py b/comfy/weight_adapter/lokr.py new file mode 100644 index 000000000..51233db2d --- /dev/null +++ b/comfy/weight_adapter/lokr.py @@ -0,0 +1,133 @@ +import logging +from typing import Optional + +import torch +import comfy.model_management +from .base import WeightAdapterBase, weight_decompose + + +class LoKrAdapter(WeightAdapterBase): + name = "lokr" + + def __init__(self, loaded_keys, weights): + self.loaded_keys = loaded_keys + self.weights = weights + + @classmethod + def load( + cls, + x: str, + lora: dict[str, torch.Tensor], + alpha: float, + dora_scale: torch.Tensor, + loaded_keys: set[str] = None, + ) -> Optional["LoKrAdapter"]: + if loaded_keys is None: + loaded_keys = set() + lokr_w1_name = "{}.lokr_w1".format(x) + lokr_w2_name = "{}.lokr_w2".format(x) + lokr_w1_a_name = "{}.lokr_w1_a".format(x) + lokr_w1_b_name = "{}.lokr_w1_b".format(x) + lokr_t2_name = "{}.lokr_t2".format(x) + lokr_w2_a_name = "{}.lokr_w2_a".format(x) + lokr_w2_b_name = "{}.lokr_w2_b".format(x) + + lokr_w1 = None + if lokr_w1_name in lora.keys(): + lokr_w1 = lora[lokr_w1_name] + loaded_keys.add(lokr_w1_name) + + lokr_w2 = None + if lokr_w2_name in lora.keys(): + lokr_w2 = lora[lokr_w2_name] + loaded_keys.add(lokr_w2_name) + + lokr_w1_a = None + if lokr_w1_a_name in lora.keys(): + lokr_w1_a = lora[lokr_w1_a_name] + loaded_keys.add(lokr_w1_a_name) + + lokr_w1_b = None + if lokr_w1_b_name in lora.keys(): + lokr_w1_b = lora[lokr_w1_b_name] + loaded_keys.add(lokr_w1_b_name) + + lokr_w2_a = None + if lokr_w2_a_name in lora.keys(): + lokr_w2_a = lora[lokr_w2_a_name] + loaded_keys.add(lokr_w2_a_name) + + lokr_w2_b = None + if lokr_w2_b_name in lora.keys(): + lokr_w2_b = lora[lokr_w2_b_name] + loaded_keys.add(lokr_w2_b_name) + + lokr_t2 = None + if lokr_t2_name in lora.keys(): + lokr_t2 = lora[lokr_t2_name] + loaded_keys.add(lokr_t2_name) + + if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None): + weights = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2, dora_scale) + return cls(loaded_keys, weights) + else: + return None + + def calculate_weight( + self, + weight, + key, + strength, + strength_model, + offset, + function, + intermediate_dtype=torch.float32, + original_weight=None, + ): + v = self.weights + w1 = v[0] + w2 = v[1] + w1_a = v[3] + w1_b = v[4] + w2_a = v[5] + w2_b = v[6] + t2 = v[7] + dora_scale = v[8] + dim = None + + if w1 is None: + dim = w1_b.shape[0] + w1 = torch.mm(comfy.model_management.cast_to_device(w1_a, weight.device, intermediate_dtype), + comfy.model_management.cast_to_device(w1_b, weight.device, intermediate_dtype)) + else: + w1 = comfy.model_management.cast_to_device(w1, weight.device, intermediate_dtype) + + if w2 is None: + dim = w2_b.shape[0] + if t2 is None: + w2 = torch.mm(comfy.model_management.cast_to_device(w2_a, weight.device, intermediate_dtype), + comfy.model_management.cast_to_device(w2_b, weight.device, intermediate_dtype)) + else: + w2 = torch.einsum('i j k l, j r, i p -> p r k l', + comfy.model_management.cast_to_device(t2, weight.device, intermediate_dtype), + comfy.model_management.cast_to_device(w2_b, weight.device, intermediate_dtype), + comfy.model_management.cast_to_device(w2_a, weight.device, intermediate_dtype)) + else: + w2 = comfy.model_management.cast_to_device(w2, weight.device, intermediate_dtype) + + if len(w2.shape) == 4: + w1 = w1.unsqueeze(2).unsqueeze(2) + if v[2] is not None and dim is not None: + alpha = v[2] / dim + else: + alpha = 1.0 + + try: + lora_diff = torch.kron(w1, w2).reshape(weight.shape) + if dora_scale is not None: + weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function) + else: + weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) + except Exception as e: + logging.error("ERROR {} {} {}".format(self.name, key, e)) + return weight diff --git a/comfy/weight_adapter/lora.py b/comfy/weight_adapter/lora.py new file mode 100644 index 000000000..b2e623924 --- /dev/null +++ b/comfy/weight_adapter/lora.py @@ -0,0 +1,142 @@ +import logging +from typing import Optional + +import torch +import comfy.model_management +from .base import WeightAdapterBase, weight_decompose, pad_tensor_to_shape + + +class LoRAAdapter(WeightAdapterBase): + name = "lora" + + def __init__(self, loaded_keys, weights): + self.loaded_keys = loaded_keys + self.weights = weights + + @classmethod + def load( + cls, + x: str, + lora: dict[str, torch.Tensor], + alpha: float, + dora_scale: torch.Tensor, + loaded_keys: set[str] = None, + ) -> Optional["LoRAAdapter"]: + if loaded_keys is None: + loaded_keys = set() + + reshape_name = "{}.reshape_weight".format(x) + regular_lora = "{}.lora_up.weight".format(x) + diffusers_lora = "{}_lora.up.weight".format(x) + diffusers2_lora = "{}.lora_B.weight".format(x) + diffusers3_lora = "{}.lora.up.weight".format(x) + mochi_lora = "{}.lora_B".format(x) + transformers_lora = "{}.lora_linear_layer.up.weight".format(x) + A_name = None + + if regular_lora in lora.keys(): + A_name = regular_lora + B_name = "{}.lora_down.weight".format(x) + mid_name = "{}.lora_mid.weight".format(x) + elif diffusers_lora in lora.keys(): + A_name = diffusers_lora + B_name = "{}_lora.down.weight".format(x) + mid_name = None + elif diffusers2_lora in lora.keys(): + A_name = diffusers2_lora + B_name = "{}.lora_A.weight".format(x) + mid_name = None + elif diffusers3_lora in lora.keys(): + A_name = diffusers3_lora + B_name = "{}.lora.down.weight".format(x) + mid_name = None + elif mochi_lora in lora.keys(): + A_name = mochi_lora + B_name = "{}.lora_A".format(x) + mid_name = None + elif transformers_lora in lora.keys(): + A_name = transformers_lora + B_name = "{}.lora_linear_layer.down.weight".format(x) + mid_name = None + + if A_name is not None: + mid = None + if mid_name is not None and mid_name in lora.keys(): + mid = lora[mid_name] + loaded_keys.add(mid_name) + reshape = None + if reshape_name in lora.keys(): + try: + reshape = lora[reshape_name].tolist() + loaded_keys.add(reshape_name) + except: + pass + weights = (lora[A_name], lora[B_name], alpha, mid, dora_scale, reshape) + loaded_keys.add(A_name) + loaded_keys.add(B_name) + return cls(loaded_keys, weights) + else: + return None + + def calculate_weight( + self, + weight, + key, + strength, + strength_model, + offset, + function, + intermediate_dtype=torch.float32, + original_weight=None, + ): + v = self.weights + mat1 = comfy.model_management.cast_to_device( + v[0], weight.device, intermediate_dtype + ) + mat2 = comfy.model_management.cast_to_device( + v[1], weight.device, intermediate_dtype + ) + dora_scale = v[4] + reshape = v[5] + + if reshape is not None: + weight = pad_tensor_to_shape(weight, reshape) + + if v[2] is not None: + alpha = v[2] / mat2.shape[0] + else: + alpha = 1.0 + + if v[3] is not None: + # locon mid weights, hopefully the math is fine because I didn't properly test it + mat3 = comfy.model_management.cast_to_device( + v[3], weight.device, intermediate_dtype + ) + final_shape = [mat2.shape[1], mat2.shape[0], mat3.shape[2], mat3.shape[3]] + mat2 = ( + torch.mm( + mat2.transpose(0, 1).flatten(start_dim=1), + mat3.transpose(0, 1).flatten(start_dim=1), + ) + .reshape(final_shape) + .transpose(0, 1) + ) + try: + lora_diff = torch.mm( + mat1.flatten(start_dim=1), mat2.flatten(start_dim=1) + ).reshape(weight.shape) + if dora_scale is not None: + weight = weight_decompose( + dora_scale, + weight, + lora_diff, + alpha, + strength, + intermediate_dtype, + function, + ) + else: + weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) + except Exception as e: + logging.error("ERROR {} {} {}".format(self.name, key, e)) + return weight From 3ab231f01f26f9cec03bd94382ae5b6289789d9e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 21 Apr 2025 20:36:12 -0700 Subject: [PATCH 0037/1073] Fix issue with WAN VACE implementation. (#7724) --- comfy/ldm/wan/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 4ef86d5f2..b8eec3afb 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -630,7 +630,7 @@ class VaceWanModel(WanModel): ii = self.vace_layers_mapping.get(i, None) if ii is not None: c_skip, c = self.vace_blocks[ii](c, x=x_orig, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) - x += c_skip * vace_strength + x += c_skip * vace_strength # head x = self.head(x, e) From 966c43ce268341de6e60762ef18e7628f7d311bf Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 22 Apr 2025 16:59:47 +0800 Subject: [PATCH 0038/1073] Add OFT/BOFT algorithm in weight adapter (#7725) --- comfy/weight_adapter/__init__.py | 4 ++ comfy/weight_adapter/boft.py | 115 +++++++++++++++++++++++++++++++ comfy/weight_adapter/oft.py | 94 +++++++++++++++++++++++++ 3 files changed, 213 insertions(+) create mode 100644 comfy/weight_adapter/boft.py create mode 100644 comfy/weight_adapter/oft.py diff --git a/comfy/weight_adapter/__init__.py b/comfy/weight_adapter/__init__.py index e6cd805b6..d2a1d0151 100644 --- a/comfy/weight_adapter/__init__.py +++ b/comfy/weight_adapter/__init__.py @@ -3,6 +3,8 @@ from .lora import LoRAAdapter from .loha import LoHaAdapter from .lokr import LoKrAdapter from .glora import GLoRAAdapter +from .oft import OFTAdapter +from .boft import BOFTAdapter adapters: list[type[WeightAdapterBase]] = [ @@ -10,4 +12,6 @@ adapters: list[type[WeightAdapterBase]] = [ LoHaAdapter, LoKrAdapter, GLoRAAdapter, + OFTAdapter, + BOFTAdapter, ] diff --git a/comfy/weight_adapter/boft.py b/comfy/weight_adapter/boft.py new file mode 100644 index 000000000..c85adc7ab --- /dev/null +++ b/comfy/weight_adapter/boft.py @@ -0,0 +1,115 @@ +import logging +from typing import Optional + +import torch +import comfy.model_management +from .base import WeightAdapterBase, weight_decompose + + +class BOFTAdapter(WeightAdapterBase): + name = "boft" + + def __init__(self, loaded_keys, weights): + self.loaded_keys = loaded_keys + self.weights = weights + + @classmethod + def load( + cls, + x: str, + lora: dict[str, torch.Tensor], + alpha: float, + dora_scale: torch.Tensor, + loaded_keys: set[str] = None, + ) -> Optional["BOFTAdapter"]: + if loaded_keys is None: + loaded_keys = set() + blocks_name = "{}.boft_blocks".format(x) + rescale_name = "{}.rescale".format(x) + + blocks = None + if blocks_name in lora.keys(): + blocks = lora[blocks_name] + if blocks.ndim == 4: + loaded_keys.add(blocks_name) + + rescale = None + if rescale_name in lora.keys(): + rescale = lora[rescale_name] + loaded_keys.add(rescale_name) + + if blocks is not None: + weights = (blocks, rescale, alpha, dora_scale) + return cls(loaded_keys, weights) + else: + return None + + def calculate_weight( + self, + weight, + key, + strength, + strength_model, + offset, + function, + intermediate_dtype=torch.float32, + original_weight=None, + ): + v = self.weights + blocks = v[0] + rescale = v[1] + alpha = v[2] + dora_scale = v[3] + + blocks = comfy.model_management.cast_to_device(blocks, weight.device, intermediate_dtype) + if rescale is not None: + rescale = comfy.model_management.cast_to_device(rescale, weight.device, intermediate_dtype) + + boft_m, block_num, boft_b, *_ = blocks.shape + + try: + # Get r + I = torch.eye(boft_b, device=blocks.device, dtype=blocks.dtype) + # for Q = -Q^T + q = blocks - blocks.transpose(1, 2) + normed_q = q + if alpha > 0: # alpha in boft/bboft is for constraint + q_norm = torch.norm(q) + 1e-8 + if q_norm > alpha: + normed_q = q * alpha / q_norm + # use float() to prevent unsupported type in .inverse() + r = (I + normed_q) @ (I - normed_q).float().inverse() + r = r.to(original_weight) + + inp = org = original_weight + + r_b = boft_b//2 + for i in range(boft_m): + bi = r[i] + g = 2 + k = 2**i * r_b + if strength != 1: + bi = bi * strength + (1-strength) * I + inp = ( + inp.unflatten(-1, (-1, g, k)) + .transpose(-2, -1) + .flatten(-3) + .unflatten(-1, (-1, boft_b)) + ) + inp = torch.einsum("b n m, b n ... -> b m ...", inp, bi) + inp = ( + inp.flatten(-2).unflatten(-1, (-1, k, g)).transpose(-2, -1).flatten(-3) + ) + + if rescale is not None: + inp = inp * rescale + + lora_diff = inp - org + lora_diff = comfy.model_management.cast_to_device(lora_diff, weight.device, intermediate_dtype) + if dora_scale is not None: + weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function) + else: + weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) + except Exception as e: + logging.error("ERROR {} {} {}".format(self.name, key, e)) + return weight diff --git a/comfy/weight_adapter/oft.py b/comfy/weight_adapter/oft.py new file mode 100644 index 000000000..0ea229b79 --- /dev/null +++ b/comfy/weight_adapter/oft.py @@ -0,0 +1,94 @@ +import logging +from typing import Optional + +import torch +import comfy.model_management +from .base import WeightAdapterBase, weight_decompose + + +class OFTAdapter(WeightAdapterBase): + name = "oft" + + def __init__(self, loaded_keys, weights): + self.loaded_keys = loaded_keys + self.weights = weights + + @classmethod + def load( + cls, + x: str, + lora: dict[str, torch.Tensor], + alpha: float, + dora_scale: torch.Tensor, + loaded_keys: set[str] = None, + ) -> Optional["OFTAdapter"]: + if loaded_keys is None: + loaded_keys = set() + blocks_name = "{}.oft_blocks".format(x) + rescale_name = "{}.rescale".format(x) + + blocks = None + if blocks_name in lora.keys(): + blocks = lora[blocks_name] + if blocks.ndim == 3: + loaded_keys.add(blocks_name) + + rescale = None + if rescale_name in lora.keys(): + rescale = lora[rescale_name] + loaded_keys.add(rescale_name) + + if blocks is not None: + weights = (blocks, rescale, alpha, dora_scale) + return cls(loaded_keys, weights) + else: + return None + + def calculate_weight( + self, + weight, + key, + strength, + strength_model, + offset, + function, + intermediate_dtype=torch.float32, + original_weight=None, + ): + v = self.weights + blocks = v[0] + rescale = v[1] + alpha = v[2] + dora_scale = v[3] + + blocks = comfy.model_management.cast_to_device(blocks, weight.device, intermediate_dtype) + if rescale is not None: + rescale = comfy.model_management.cast_to_device(rescale, weight.device, intermediate_dtype) + + block_num, block_size, *_ = blocks.shape + + try: + # Get r + I = torch.eye(block_size, device=blocks.device, dtype=blocks.dtype) + # for Q = -Q^T + q = blocks - blocks.transpose(1, 2) + normed_q = q + if alpha > 0: # alpha in oft/boft is for constraint + q_norm = torch.norm(q) + 1e-8 + if q_norm > alpha: + normed_q = q * alpha / q_norm + # use float() to prevent unsupported type in .inverse() + r = (I + normed_q) @ (I - normed_q).float().inverse() + r = r.to(original_weight) + lora_diff = torch.einsum( + "k n m, k n ... -> k m ...", + (r * strength) - strength * I, + original_weight, + ) + if dora_scale is not None: + weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function) + else: + weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) + except Exception as e: + logging.error("ERROR {} {} {}".format(self.name, key, e)) + return weight From 454a635c1b8aae9f635e7fb4f696bf7ac2e1fd1f Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Tue, 22 Apr 2025 05:00:28 -0400 Subject: [PATCH 0039/1073] upstream MaskPreview from ComfyUI_essentials (#7719) --- comfy_extras/nodes_mask.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 13d2b4bab..99b264a32 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -3,7 +3,10 @@ import scipy.ndimage import torch import comfy.utils import node_helpers +import folder_paths +import random +import nodes from nodes import MAX_RESOLUTION def composite(destination, source, x, y, mask = None, multiplier = 8, resize_source = False): @@ -362,6 +365,30 @@ class ThresholdMask: mask = (mask > value).float() return (mask,) +# Mask Preview - original implement from +# https://github.com/cubiq/ComfyUI_essentials/blob/9d9f4bedfc9f0321c19faf71855e228c93bd0dc9/mask.py#L81 +# upstream requested in https://github.com/Kosinkadink/rfcs/blob/main/rfcs/0000-corenodes.md#preview-nodes +class MaskPreview(nodes.SaveImage): + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + self.compress_level = 4 + + @classmethod + def INPUT_TYPES(s): + return { + "required": {"mask": ("MASK",), }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + FUNCTION = "execute" + CATEGORY = "mask" + + def execute(self, mask, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): + preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) + return self.save_images(preview, filename_prefix, prompt, extra_pnginfo) + NODE_CLASS_MAPPINGS = { "LatentCompositeMasked": LatentCompositeMasked, @@ -376,6 +403,7 @@ NODE_CLASS_MAPPINGS = { "FeatherMask": FeatherMask, "GrowMask": GrowMask, "ThresholdMask": ThresholdMask, + "MaskPreview": MaskPreview } NODE_DISPLAY_NAME_MAPPINGS = { From a8f63c0d5b40b4ed12faa1376e973b0e790b1c0d Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 22 Apr 2025 17:01:27 +0800 Subject: [PATCH 0040/1073] Support dora_scale on both axis (#7727) --- comfy/weight_adapter/base.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/comfy/weight_adapter/base.py b/comfy/weight_adapter/base.py index 54af3babe..29873519d 100644 --- a/comfy/weight_adapter/base.py +++ b/comfy/weight_adapter/base.py @@ -43,13 +43,23 @@ def weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediat dora_scale = comfy.model_management.cast_to_device(dora_scale, weight.device, intermediate_dtype) lora_diff *= alpha weight_calc = weight + function(lora_diff).type(weight.dtype) - weight_norm = ( - weight_calc.transpose(0, 1) - .reshape(weight_calc.shape[1], -1) - .norm(dim=1, keepdim=True) - .reshape(weight_calc.shape[1], *[1] * (weight_calc.dim() - 1)) - .transpose(0, 1) - ) + + wd_on_output_axis = dora_scale.shape[0] == weight_calc.shape[0] + if wd_on_output_axis: + weight_norm = ( + weight.reshape(weight.shape[0], -1) + .norm(dim=1, keepdim=True) + .reshape(weight.shape[0], *[1] * (weight.dim() - 1)) + ) + else: + weight_norm = ( + weight_calc.transpose(0, 1) + .reshape(weight_calc.shape[1], -1) + .norm(dim=1, keepdim=True) + .reshape(weight_calc.shape[1], *[1] * (weight_calc.dim() - 1)) + .transpose(0, 1) + ) + weight_norm = weight_norm + torch.finfo(weight.dtype).eps weight_calc *= (dora_scale / weight_norm).type(weight.dtype) if strength != 1.0: From 2d6805ce57cede78acb6515112439c5092c7b257 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 22 Apr 2025 03:17:38 -0700 Subject: [PATCH 0041/1073] Add option for using fp8_e8m0fnu for model weights. (#7733) Seems to break every model I have tried but worth testing? --- comfy/cli_args.py | 1 + comfy/model_management.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 81f29f098..1b971be3c 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -66,6 +66,7 @@ fpunet_group.add_argument("--bf16-unet", action="store_true", help="Run the diff fpunet_group.add_argument("--fp16-unet", action="store_true", help="Run the diffusion model in fp16") fpunet_group.add_argument("--fp8_e4m3fn-unet", action="store_true", help="Store unet weights in fp8_e4m3fn.") fpunet_group.add_argument("--fp8_e5m2-unet", action="store_true", help="Store unet weights in fp8_e5m2.") +fpunet_group.add_argument("--fp8_e8m0fnu-unet", action="store_true", help="Store unet weights in fp8_e8m0fnu.") fpvae_group = parser.add_mutually_exclusive_group() fpvae_group.add_argument("--fp16-vae", action="store_true", help="Run the VAE in fp16, might cause black images.") diff --git a/comfy/model_management.py b/comfy/model_management.py index 19e6c8dff..43e402243 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -725,6 +725,8 @@ def unet_dtype(device=None, model_params=0, supported_dtypes=[torch.float16, tor return torch.float8_e4m3fn if args.fp8_e5m2_unet: return torch.float8_e5m2 + if args.fp8_e8m0fnu_unet: + return torch.float8_e8m0fnu fp8_dtype = None if weight_dtype in FLOAT8_TYPES: From 92cdc692f47188e6e4c48c5666ac802281240a37 Mon Sep 17 00:00:00 2001 From: Alex Butler Date: Tue, 22 Apr 2025 22:57:17 +0100 Subject: [PATCH 0042/1073] Replace aom-av1 with svt-av1 for saving webm videos, use preset 6 + yuv420p10le pixel format (#7736) * Add support for saving svt-av1 webm videos & yuv420p10le pixel format * Replace aom-av1 with svt-av1 Use yuv420p10le for av1 --- comfy_extras/nodes_video.py | 6 ++++-- requirements.txt | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_video.py b/comfy_extras/nodes_video.py index 97ca513d8..a9e244ebe 100644 --- a/comfy_extras/nodes_video.py +++ b/comfy_extras/nodes_video.py @@ -50,13 +50,15 @@ class SaveWEBM: for x in extra_pnginfo: container.metadata[x] = json.dumps(extra_pnginfo[x]) - codec_map = {"vp9": "libvpx-vp9", "av1": "libaom-av1"} + codec_map = {"vp9": "libvpx-vp9", "av1": "libsvtav1"} stream = container.add_stream(codec_map[codec], rate=Fraction(round(fps * 1000), 1000)) stream.width = images.shape[-2] stream.height = images.shape[-3] - stream.pix_fmt = "yuv420p" + stream.pix_fmt = "yuv420p10le" if codec == "av1" else "yuv420p" stream.bit_rate = 0 stream.options = {'crf': str(crf)} + if codec == "av1": + stream.options["preset"] = "6" for frame in images: frame = av.VideoFrame.from_ndarray(torch.clamp(frame[..., :3] * 255, min=0, max=255).to(device=torch.device("cpu"), dtype=torch.uint8).numpy(), format="rgb24") diff --git a/requirements.txt b/requirements.txt index 5c3a854ce..90eb04612 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,4 +22,4 @@ psutil kornia>=0.7.1 spandrel soundfile -av +av>=14.1.0 From 0738e4ea5dd5ecac34d8cf61bb381ea6d159394b Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 22 Apr 2025 23:18:08 -0700 Subject: [PATCH 0043/1073] [API nodes] Add backbone for supporting api nodes in ComfyUI (#7745) * Add Ideogram generate node. * Add staging api. * COMFY_API_NODE_NAME node property * switch to boolean flag and use original node name for id * add optional to type * Add API_NODE and common error for missing auth token (#5) * Add Minimax Video Generation + Async Task queue polling example (#6) * [Minimax] Show video preview and embed workflow in ouput (#7) * [API Nodes] Send empty request body instead of empty dictionary. (#8) * Fixed: removed function from rebase. * Add pydantic. * Remove uv.lock * Remove polling operations. * Update stubs workflow. * Remove polling comments. * Update stubs. * Use pydantic v2. * Use pydantic v2. * Add basic OpenAITextToImage node * Add. * convert image to tensor. * Improve types. * Ruff. * Push tests. * Handle multi-form data. - Don't set content-type for multi-part/form - Use data field instead of JSON * Change to api.comfy.org * Handle error code 409. * Remove nodes. --------- Co-authored-by: bymyself Co-authored-by: Yoland Y <4950057+yoland68@users.noreply.github.com> --- comfy/comfy_types/node_typing.py | 4 +- comfy_api_nodes/__init__.py | 0 comfy_api_nodes/apis/client.py | 337 +++++++++++++++++++++++++++++++ requirements.txt | 1 + server.py | 3 + 5 files changed, 344 insertions(+), 1 deletion(-) create mode 100644 comfy_api_nodes/__init__.py create mode 100644 comfy_api_nodes/apis/client.py diff --git a/comfy/comfy_types/node_typing.py b/comfy/comfy_types/node_typing.py index a348791a9..0bdda032e 100644 --- a/comfy/comfy_types/node_typing.py +++ b/comfy/comfy_types/node_typing.py @@ -1,7 +1,7 @@ """Comfy-specific type hinting""" from __future__ import annotations -from typing import Literal, TypedDict +from typing import Literal, TypedDict, Optional from typing_extensions import NotRequired from abc import ABC, abstractmethod from enum import Enum @@ -229,6 +229,8 @@ class ComfyNodeABC(ABC): """Flags a node as experimental, informing users that it may change or not work as expected.""" DEPRECATED: bool """Flags a node as deprecated, indicating to users that they should find alternatives to this node.""" + API_NODE: Optional[bool] + """Flags a node as an API node.""" @classmethod @abstractmethod diff --git a/comfy_api_nodes/__init__.py b/comfy_api_nodes/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py new file mode 100644 index 000000000..cd81d5a1d --- /dev/null +++ b/comfy_api_nodes/apis/client.py @@ -0,0 +1,337 @@ +import logging + +""" +API Client Framework for api.comfy.org. + +This module provides a flexible framework for making API requests from ComfyUI nodes. +It supports both synchronous and asynchronous API operations with proper type validation. + +Key Components: +-------------- +1. ApiClient - Handles HTTP requests with authentication and error handling +2. ApiEndpoint - Defines a single HTTP endpoint with its request/response models +3. ApiOperation - Executes a single synchronous API operation + +Usage Examples: +-------------- + +# Example 1: Synchronous API Operation +# ------------------------------------ +# For a simple API call that returns the result immediately: + +# 1. Create the API client +api_client = ApiClient( + base_url="https://api.example.com", + api_key="your_api_key_here", + timeout=30.0, + verify_ssl=True +) + +# 2. Define the endpoint +user_info_endpoint = ApiEndpoint( + path="/v1/users/me", + method=HttpMethod.GET, + request_model=EmptyRequest, # No request body needed + response_model=UserProfile, # Pydantic model for the response + query_params=None +) + +# 3. Create the request object +request = EmptyRequest() + +# 4. Create and execute the operation +operation = ApiOperation( + endpoint=user_info_endpoint, + request=request +) +user_profile = operation.execute(client=api_client) # Returns immediately with the result + +""" + +from typing import ( + Dict, + Type, + Optional, + Any, + TypeVar, + Generic, +) +from pydantic import BaseModel +from enum import Enum +import json +import requests +from urllib.parse import urljoin + +T = TypeVar("T", bound=BaseModel) +R = TypeVar("R", bound=BaseModel) + +class EmptyRequest(BaseModel): + """Base class for empty request bodies. + For GET requests, fields will be sent as query parameters.""" + + pass + + +class HttpMethod(str, Enum): + GET = "GET" + POST = "POST" + PUT = "PUT" + DELETE = "DELETE" + PATCH = "PATCH" + + +class ApiClient: + """ + Client for making HTTP requests to an API with authentication and error handling. + """ + + def __init__( + self, + base_url: str, + api_key: Optional[str] = None, + timeout: float = 30.0, + verify_ssl: bool = True, + ): + self.base_url = base_url + self.api_key = api_key + self.timeout = timeout + self.verify_ssl = verify_ssl + + def get_headers(self) -> Dict[str, str]: + """Get headers for API requests, including authentication if available""" + headers = {"Content-Type": "application/json", "Accept": "application/json"} + + if self.api_key: + headers["Authorization"] = f"Bearer {self.api_key}" + + return headers + + def request( + self, + method: str, + path: str, + params: Optional[Dict[str, Any]] = None, + json: Optional[Dict[str, Any]] = None, + files: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, str]] = None, + ) -> Dict[str, Any]: + """ + Make an HTTP request to the API + + Args: + method: HTTP method (GET, POST, etc.) + path: API endpoint path (will be joined with base_url) + params: Query parameters + json: JSON body data + files: Files to upload + headers: Additional headers + + Returns: + Parsed JSON response + + Raises: + requests.RequestException: If the request fails + """ + url = urljoin(self.base_url, path) + self.check_auth_token(self.api_key) + # Combine default headers with any provided headers + request_headers = self.get_headers() + if headers: + request_headers.update(headers) + + # Let requests handle the content type when files are present. + if files: + del request_headers["Content-Type"] + + logging.debug(f"[DEBUG] Request Headers: {request_headers}") + logging.debug(f"[DEBUG] Files: {files}") + logging.debug(f"[DEBUG] Params: {params}") + logging.debug(f"[DEBUG] Json: {json}") + + try: + # If files are present, use data parameter instead of json + if files: + form_data = {} + if json: + form_data.update(json) + response = requests.request( + method=method, + url=url, + params=params, + data=form_data, # Use data instead of json + files=files, + headers=request_headers, + timeout=self.timeout, + verify=self.verify_ssl, + ) + else: + response = requests.request( + method=method, + url=url, + params=params, + json=json, + headers=request_headers, + timeout=self.timeout, + verify=self.verify_ssl, + ) + + # Raise exception for error status codes + response.raise_for_status() + except requests.ConnectionError: + raise Exception( + f"Unable to connect to the API server at {self.base_url}. Please check your internet connection or verify the service is available." + ) + + except requests.Timeout: + raise Exception( + f"Request timed out after {self.timeout} seconds. The server might be experiencing high load or the operation is taking longer than expected." + ) + + except requests.HTTPError as e: + status_code = e.response.status_code if hasattr(e, "response") else None + error_message = f"HTTP Error: {str(e)}" + + # Try to extract detailed error message from JSON response + try: + if hasattr(e, "response") and e.response.content: + error_json = e.response.json() + if "error" in error_json and "message" in error_json["error"]: + error_message = f"API Error: {error_json['error']['message']}" + if "type" in error_json["error"]: + error_message += f" (Type: {error_json['error']['type']})" + else: + error_message = f"API Error: {error_json}" + except Exception as json_error: + # If we can't parse the JSON, fall back to the original error message + logging.debug(f"[DEBUG] Failed to parse error response: {str(json_error)}") + + logging.debug(f"[DEBUG] API Error: {error_message} (Status: {status_code})") + if hasattr(e, "response") and e.response.content: + logging.debug(f"[DEBUG] Response content: {e.response.content}") + if status_code == 401: + error_message = "Unauthorized: Please login first to use this node." + if status_code == 402: + error_message = "Payment Required: Please add credits to your account to use this node." + if status_code == 409: + error_message = "There is a problem with your account. Please contact support@comfy.org. " + if status_code == 429: + error_message = "Rate Limit Exceeded: Please try again later." + raise Exception(error_message) + + # Parse and return JSON response + if response.content: + return response.json() + return {} + + def check_auth_token(self, auth_token): + """Verify that an auth token is present.""" + if auth_token is None: + raise Exception("Please login first to use this node.") + return auth_token + + +class ApiEndpoint(Generic[T, R]): + """Defines an API endpoint with its request and response types""" + + def __init__( + self, + path: str, + method: HttpMethod, + request_model: Type[T], + response_model: Type[R], + query_params: Optional[Dict[str, Any]] = None, + ): + """Initialize an API endpoint definition. + + Args: + path: The URL path for this endpoint, can include placeholders like {id} + method: The HTTP method to use (GET, POST, etc.) + request_model: Pydantic model class that defines the structure and validation rules for API requests to this endpoint + response_model: Pydantic model class that defines the structure and validation rules for API responses from this endpoint + query_params: Optional dictionary of query parameters to include in the request + """ + self.path = path + self.method = method + self.request_model = request_model + self.response_model = response_model + self.query_params = query_params or {} + + +class SynchronousOperation(Generic[T, R]): + """ + Represents a single synchronous API operation. + """ + + def __init__( + self, + endpoint: ApiEndpoint[T, R], + request: T, + files: Optional[Dict[str, Any]] = None, + api_base: str = "https://api.comfy.org", + auth_token: Optional[str] = None, + timeout: float = 60.0, + verify_ssl: bool = True, + ): + self.endpoint = endpoint + self.request = request + self.response = None + self.error = None + self.api_base = api_base + self.auth_token = auth_token + self.timeout = timeout + self.verify_ssl = verify_ssl + self.files = files + def execute(self, client: Optional[ApiClient] = None) -> R: + """Execute the API operation using the provided client or create one""" + try: + # Create client if not provided + if client is None: + if self.api_base is None: + raise ValueError("Either client or api_base must be provided") + client = ApiClient( + base_url=self.api_base, + api_key=self.auth_token, + timeout=self.timeout, + verify_ssl=self.verify_ssl, + ) + + # Convert request model to dict, but use None for EmptyRequest + request_dict = None if isinstance(self.request, EmptyRequest) else self.request.model_dump(exclude_none=True) + + # Debug log for request + logging.debug(f"[DEBUG] API Request: {self.endpoint.method.value} {self.endpoint.path}") + logging.debug(f"[DEBUG] Request Data: {json.dumps(request_dict, indent=2)}") + logging.debug(f"[DEBUG] Query Params: {self.endpoint.query_params}") + + # Make the request + resp = client.request( + method=self.endpoint.method.value, + path=self.endpoint.path, + json=request_dict, + params=self.endpoint.query_params, + files=self.files, + ) + + # Debug log for response + logging.debug("=" * 50) + logging.debug("[DEBUG] RESPONSE DETAILS:") + logging.debug("[DEBUG] Status Code: 200 (Success)") + logging.debug(f"[DEBUG] Response Body: {json.dumps(resp, indent=2)}") + logging.debug("=" * 50) + + # Parse and return the response + return self._parse_response(resp) + + except Exception as e: + logging.debug(f"[DEBUG] API Exception: {str(e)}") + raise Exception(str(e)) + + def _parse_response(self, resp): + """Parse response data - can be overridden by subclasses""" + # The response is already the complete object, don't extract just the "data" field + # as that would lose the outer structure (created timestamp, etc.) + + # Parse response using the provided model + self.response = self.endpoint.response_model.model_validate(resp) + logging.debug(f"[DEBUG] Parsed Response: {self.response}") + return self.response diff --git a/requirements.txt b/requirements.txt index 90eb04612..f8ad908ca 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,3 +23,4 @@ kornia>=0.7.1 spandrel soundfile av>=14.1.0 +pydantic~=2.0 diff --git a/server.py b/server.py index 0cc97b248..f64ec27d4 100644 --- a/server.py +++ b/server.py @@ -580,6 +580,9 @@ class PromptServer(): info['deprecated'] = True if getattr(obj_class, "EXPERIMENTAL", False): info['experimental'] = True + + if hasattr(obj_class, 'API_NODE'): + info['api_node'] = obj_class.API_NODE return info @routes.get("/object_info") From 552615235dc043f0b07d11e1ff2e6571e6f90d4d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 23 Apr 2025 01:12:52 -0700 Subject: [PATCH 0044/1073] Fix for dino lowvram. (#7748) --- comfy/image_encoders/dino2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/image_encoders/dino2.py b/comfy/image_encoders/dino2.py index 130ed6fd7..976f98c65 100644 --- a/comfy/image_encoders/dino2.py +++ b/comfy/image_encoders/dino2.py @@ -116,7 +116,7 @@ class Dino2Embeddings(torch.nn.Module): def forward(self, pixel_values): x = self.patch_embeddings(pixel_values) # TODO: mask_token? - x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + x = torch.cat((self.cls_token.to(device=x.device, dtype=x.dtype).expand(x.shape[0], -1, -1), x), dim=1) x = x + comfy.model_management.cast_to_device(self.position_embeddings, x.device, x.dtype) return x From 21a11ef817e3749047c6b548231210ff84fe331d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 23 Apr 2025 02:12:59 -0700 Subject: [PATCH 0045/1073] Pytorch stable 2.7 is out and support cu128 (#7749) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index cf6df7e55..62800bb4f 100644 --- a/README.md +++ b/README.md @@ -216,9 +216,9 @@ Additional discussion and help can be found [here](https://github.com/comfyanony Nvidia users should install stable pytorch using this command: -```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu126``` +```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu128``` -This is the command to install pytorch nightly instead which supports the new blackwell 50xx series GPUs and might have performance improvements. +This is the command to install pytorch nightly instead which might have performance improvements. ```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu128``` From 7eaff81be106fa5e1479cfa69f5fd06265611f2e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 23 Apr 2025 02:28:24 -0700 Subject: [PATCH 0046/1073] fp16 accumulation can now be enabled on the stable package. (#7750) --- .../run_nvidia_gpu_fast_fp16_accumulation.bat | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .ci/{windows_nightly_base_files => windows_base_files}/run_nvidia_gpu_fast_fp16_accumulation.bat (100%) diff --git a/.ci/windows_nightly_base_files/run_nvidia_gpu_fast_fp16_accumulation.bat b/.ci/windows_base_files/run_nvidia_gpu_fast_fp16_accumulation.bat similarity index 100% rename from .ci/windows_nightly_base_files/run_nvidia_gpu_fast_fp16_accumulation.bat rename to .ci/windows_base_files/run_nvidia_gpu_fast_fp16_accumulation.bat From 3eaad0590e51bc186b1d533fef906e3f296cdd42 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 23 Apr 2025 02:47:09 -0700 Subject: [PATCH 0047/1073] Lower size of release package. (#7751) --- .github/workflows/windows_release_package.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/windows_release_package.yml b/.github/workflows/windows_release_package.yml index 416544f71..8300c2faf 100644 --- a/.github/workflows/windows_release_package.yml +++ b/.github/workflows/windows_release_package.yml @@ -50,7 +50,7 @@ jobs: - uses: actions/checkout@v4 with: - fetch-depth: 0 + fetch-depth: 150 persist-credentials: false - shell: bash run: | @@ -82,7 +82,7 @@ jobs: cd .. - "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=8 -mfb=64 -md=32m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable + "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=256m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable mv ComfyUI_windows_portable.7z ComfyUI/new_ComfyUI_windows_portable_nvidia_cu${{ inputs.cu }}_or_cpu.7z cd ComfyUI_windows_portable From 154f2911aaf0333db576a237c6098ed0a8160a7d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 23 Apr 2025 03:33:09 -0700 Subject: [PATCH 0048/1073] Lower size of release package more. (#7754) --- .github/workflows/stable-release.yml | 6 +++--- .github/workflows/windows_release_nightly_pytorch.yml | 2 +- .github/workflows/windows_release_package.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index f7d30a9a4..40df7ab88 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -36,7 +36,7 @@ jobs: - uses: actions/checkout@v4 with: ref: ${{ inputs.git_tag }} - fetch-depth: 0 + fetch-depth: 150 persist-credentials: false - uses: actions/cache/restore@v4 id: cache @@ -70,7 +70,7 @@ jobs: cd .. git clone --depth 1 https://github.com/comfyanonymous/taesd - cp taesd/*.pth ./ComfyUI_copy/models/vae_approx/ + cp taesd/*.safetensors ./ComfyUI_copy/models/vae_approx/ mkdir ComfyUI_windows_portable mv python_embeded ComfyUI_windows_portable @@ -85,7 +85,7 @@ jobs: cd .. - "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=8 -mfb=64 -md=32m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable + "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=512m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia.7z cd ComfyUI_windows_portable diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index 24599249a..eb5ed9c91 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -56,7 +56,7 @@ jobs: cd .. git clone --depth 1 https://github.com/comfyanonymous/taesd - cp taesd/*.pth ./ComfyUI_copy/models/vae_approx/ + cp taesd/*.safetensors ./ComfyUI_copy/models/vae_approx/ mkdir ComfyUI_windows_portable_nightly_pytorch mv python_embeded ComfyUI_windows_portable_nightly_pytorch diff --git a/.github/workflows/windows_release_package.yml b/.github/workflows/windows_release_package.yml index 8300c2faf..dc79b1f4a 100644 --- a/.github/workflows/windows_release_package.yml +++ b/.github/workflows/windows_release_package.yml @@ -67,7 +67,7 @@ jobs: cd .. git clone --depth 1 https://github.com/comfyanonymous/taesd - cp taesd/*.pth ./ComfyUI_copy/models/vae_approx/ + cp taesd/*.safetensors ./ComfyUI_copy/models/vae_approx/ mkdir ComfyUI_windows_portable mv python_embeded ComfyUI_windows_portable @@ -82,7 +82,7 @@ jobs: cd .. - "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=256m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable + "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=512m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable mv ComfyUI_windows_portable.7z ComfyUI/new_ComfyUI_windows_portable_nvidia_cu${{ inputs.cu }}_or_cpu.7z cd ComfyUI_windows_portable From dea1c7474a8e663732a755204970e09006df68c7 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Wed, 23 Apr 2025 12:38:34 -0700 Subject: [PATCH 0049/1073] Add support for API Nodes in ComfyUI. (#7726) * Add Ideogram generate node. * Add staging api. * COMFY_API_NODE_NAME node property * switch to boolean flag and use original node name for id * add optional to type * Add API_NODE and common error for missing auth token (#5) * Add Minimax Video Generation + Async Task queue polling example (#6) * [Minimax] Show video preview and embed workflow in ouput (#7) * [API Nodes] Send empty request body instead of empty dictionary. (#8) * Fixed: removed function from rebase. * Add pydantic. * Remove uv.lock * Remove polling operations. * Update stubs workflow. * Remove polling comments. * Update stubs. * Use pydantic v2. * Use pydantic v2. * Add basic OpenAITextToImage node * Add. * convert image to tensor. * Improve types. * Ruff. * Push tests. * Handle multi-form data. - Don't set content-type for multi-part/form - Use data field instead of JSON * Change to api.comfy.org * Handle error code 409. * separate out nodes per openai model * Update error message. * fix wrong output type * re-categorize nodes, remove ideogram (for now) * oops, fix mappings * fix ruff * Update frontend to 1.17.9 * embargo lift rename nodes * remove unused autogenerated model code * fix API type error and add b64 support for 4o * fix ruff * oops forgot mask scaling code * Remove unused types. --------- Co-authored-by: bymyself Co-authored-by: Yoland Y <4950057+yoland68@users.noreply.github.com> Co-authored-by: thot-experiment --- .github/workflows/update-api-stubs.yml | 47 +++ comfy_api_nodes/apis/PixverseController.py | 17 + comfy_api_nodes/apis/PixverseDto.py | 57 +++ comfy_api_nodes/apis/__init__.py | 422 ++++++++++++++++++++ comfy_api_nodes/apis/client.py | 2 +- comfy_api_nodes/nodes_api.py | 425 +++++++++++++++++++++ nodes.py | 9 + requirements.txt | 2 +- 8 files changed, 979 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/update-api-stubs.yml create mode 100644 comfy_api_nodes/apis/PixverseController.py create mode 100644 comfy_api_nodes/apis/PixverseDto.py create mode 100644 comfy_api_nodes/apis/__init__.py create mode 100644 comfy_api_nodes/nodes_api.py diff --git a/.github/workflows/update-api-stubs.yml b/.github/workflows/update-api-stubs.yml new file mode 100644 index 000000000..2ae99b673 --- /dev/null +++ b/.github/workflows/update-api-stubs.yml @@ -0,0 +1,47 @@ +name: Generate Pydantic Stubs from api.comfy.org + +on: + schedule: + - cron: '0 0 * * 1' + workflow_dispatch: + +jobs: + generate-models: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install 'datamodel-code-generator[http]' + + - name: Generate API models + run: | + datamodel-codegen --use-subclass-enum --url https://api.comfy.org/openapi --output comfy_api_nodes/apis --output-model-type pydantic_v2.BaseModel + + - name: Check for changes + id: git-check + run: | + git diff --exit-code comfy_api_nodes/apis || echo "changes=true" >> $GITHUB_OUTPUT + + - name: Create Pull Request + if: steps.git-check.outputs.changes == 'true' + uses: peter-evans/create-pull-request@v5 + with: + commit-message: 'chore: update API models from OpenAPI spec' + title: 'Update API models from api.comfy.org' + body: | + This PR updates the API models based on the latest api.comfy.org OpenAPI specification. + + Generated automatically by the a Github workflow. + branch: update-api-stubs + delete-branch: true + base: main diff --git a/comfy_api_nodes/apis/PixverseController.py b/comfy_api_nodes/apis/PixverseController.py new file mode 100644 index 000000000..29a3ab33b --- /dev/null +++ b/comfy_api_nodes/apis/PixverseController.py @@ -0,0 +1,17 @@ +# generated by datamodel-codegen: +# filename: https://api.comfy.org/openapi +# timestamp: 2025-04-23T15:56:33+00:00 + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel + +from . import PixverseDto + + +class ResponseData(BaseModel): + ErrCode: Optional[int] = None + ErrMsg: Optional[str] = None + Resp: Optional[PixverseDto.V2OpenAPII2VResp] = None diff --git a/comfy_api_nodes/apis/PixverseDto.py b/comfy_api_nodes/apis/PixverseDto.py new file mode 100644 index 000000000..399512214 --- /dev/null +++ b/comfy_api_nodes/apis/PixverseDto.py @@ -0,0 +1,57 @@ +# generated by datamodel-codegen: +# filename: https://api.comfy.org/openapi +# timestamp: 2025-04-23T15:56:33+00:00 + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, Field, constr + + +class V2OpenAPII2VResp(BaseModel): + video_id: Optional[int] = Field(None, description='Video_id') + + +class V2OpenAPIT2VReq(BaseModel): + aspect_ratio: str = Field( + ..., description='Aspect ratio (16:9, 4:3, 1:1, 3:4, 9:16)', examples=['16:9'] + ) + duration: int = Field( + ..., + description='Video duration (5, 8 seconds, --model=v3.5 only allows 5,8; --quality=1080p does not support 8s)', + examples=[5], + ) + model: str = Field( + ..., description='Model version (only supports v3.5)', examples=['v3.5'] + ) + motion_mode: Optional[str] = Field( + 'normal', + description='Motion mode (normal, fast, --fast only available when duration=5; --quality=1080p does not support fast)', + examples=['normal'], + ) + negative_prompt: Optional[constr(max_length=2048)] = Field( + None, description='Negative prompt\n' + ) + prompt: constr(max_length=2048) = Field(..., description='Prompt') + quality: str = Field( + ..., + description='Video quality ("360p"(Turbo model), "540p", "720p", "1080p")', + examples=['540p'], + ) + seed: Optional[int] = Field(None, description='Random seed, range: 0 - 2147483647') + style: Optional[str] = Field( + None, + description='Style (effective when model=v3.5, "anime", "3d_animation", "clay", "comic", "cyberpunk") Do not include style parameter unless needed', + examples=['anime'], + ) + template_id: Optional[int] = Field( + None, + description='Template ID (template_id must be activated before use)', + examples=[302325299692608], + ) + water_mark: Optional[bool] = Field( + False, + description='Watermark (true: add watermark, false: no watermark)', + examples=[False], + ) diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py new file mode 100644 index 000000000..e7ea9b332 --- /dev/null +++ b/comfy_api_nodes/apis/__init__.py @@ -0,0 +1,422 @@ +# generated by datamodel-codegen: +# filename: https://api.comfy.org/openapi +# timestamp: 2025-04-23T15:56:33+00:00 + +from __future__ import annotations + +from datetime import datetime +from enum import Enum +from typing import Any, Dict, List, Optional + +from pydantic import AnyUrl, BaseModel, Field, confloat, conint + +class Customer(BaseModel): + createdAt: Optional[datetime] = Field( + None, description='The date and time the user was created' + ) + email: Optional[str] = Field(None, description='The email address for this user') + id: str = Field(..., description='The firebase UID of the user') + name: Optional[str] = Field(None, description='The name for this user') + updatedAt: Optional[datetime] = Field( + None, description='The date and time the user was last updated' + ) + + +class Error(BaseModel): + details: Optional[List[str]] = Field( + None, + description='Optional detailed information about the error or hints for resolving it.', + ) + message: Optional[str] = Field( + None, description='A clear and concise description of the error.' + ) + + +class ErrorResponse(BaseModel): + error: str + message: str + +class ImageRequest(BaseModel): + aspect_ratio: Optional[str] = Field( + None, + description="Optional. The aspect ratio (e.g., 'ASPECT_16_9', 'ASPECT_1_1'). Cannot be used with resolution. Defaults to 'ASPECT_1_1' if unspecified.", + ) + color_palette: Optional[Dict[str, Any]] = Field( + None, description='Optional. Color palette object. Only for V_2, V_2_TURBO.' + ) + magic_prompt_option: Optional[str] = Field( + None, description="Optional. MagicPrompt usage ('AUTO', 'ON', 'OFF')." + ) + model: str = Field(..., description="The model used (e.g., 'V_2', 'V_2A_TURBO')") + negative_prompt: Optional[str] = Field( + None, + description='Optional. Description of what to exclude. Only for V_1, V_1_TURBO, V_2, V_2_TURBO.', + ) + num_images: Optional[conint(ge=1, le=8)] = Field( + 1, description='Optional. Number of images to generate (1-8). Defaults to 1.' + ) + prompt: str = Field( + ..., description='Required. The prompt to use to generate the image.' + ) + resolution: Optional[str] = Field( + None, + description="Optional. Resolution (e.g., 'RESOLUTION_1024_1024'). Only for model V_2. Cannot be used with aspect_ratio.", + ) + seed: Optional[conint(ge=0, le=2147483647)] = Field( + None, description='Optional. A number between 0 and 2147483647.' + ) + style_type: Optional[str] = Field( + None, + description="Optional. Style type ('AUTO', 'GENERAL', 'REALISTIC', 'DESIGN', 'RENDER_3D', 'ANIME'). Only for models V_2 and above.", + ) + + +class Datum(BaseModel): + is_image_safe: Optional[bool] = Field( + None, description='Indicates whether the image is considered safe.' + ) + prompt: Optional[str] = Field( + None, description='The prompt used to generate this image.' + ) + resolution: Optional[str] = Field( + None, description="The resolution of the generated image (e.g., '1024x1024')." + ) + seed: Optional[int] = Field( + None, description='The seed value used for this generation.' + ) + style_type: Optional[str] = Field( + None, + description="The style type used for generation (e.g., 'REALISTIC', 'ANIME').", + ) + url: Optional[str] = Field(None, description='URL to the generated image.') + + +class Code(Enum): + int_1100 = 1100 + int_1101 = 1101 + int_1102 = 1102 + int_1103 = 1103 + + +class Code1(Enum): + int_1000 = 1000 + int_1001 = 1001 + int_1002 = 1002 + int_1003 = 1003 + int_1004 = 1004 + + +class AspectRatio(str, Enum): + field_16_9 = '16:9' + field_9_16 = '9:16' + field_1_1 = '1:1' + + +class Config(BaseModel): + horizontal: Optional[confloat(ge=-10.0, le=10.0)] = None + pan: Optional[confloat(ge=-10.0, le=10.0)] = None + roll: Optional[confloat(ge=-10.0, le=10.0)] = None + tilt: Optional[confloat(ge=-10.0, le=10.0)] = None + vertical: Optional[confloat(ge=-10.0, le=10.0)] = None + zoom: Optional[confloat(ge=-10.0, le=10.0)] = None + + +class Type(str, Enum): + simple = 'simple' + down_back = 'down_back' + forward_up = 'forward_up' + right_turn_forward = 'right_turn_forward' + left_turn_forward = 'left_turn_forward' + + +class CameraControl(BaseModel): + config: Optional[Config] = None + type: Optional[Type] = Field(None, description='Predefined camera movements type') + + +class Duration(str, Enum): + field_5 = 5 + field_10 = 10 + + +class Mode(str, Enum): + std = 'std' + pro = 'pro' + + +class TaskInfo(BaseModel): + external_task_id: Optional[str] = None + + +class Video(BaseModel): + duration: Optional[str] = Field(None, description='Total video duration') + id: Optional[str] = Field(None, description='Generated video ID') + url: Optional[AnyUrl] = Field(None, description='URL for generated video') + + +class TaskResult(BaseModel): + videos: Optional[List[Video]] = None + + +class TaskStatus(str, Enum): + submitted = 'submitted' + processing = 'processing' + succeed = 'succeed' + failed = 'failed' + + +class Data(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_info: Optional[TaskInfo] = None + task_result: Optional[TaskResult] = None + task_status: Optional[TaskStatus] = None + updated_at: Optional[int] = Field(None, description='Task update time') + + +class AspectRatio1(str, Enum): + field_16_9 = '16:9' + field_9_16 = '9:16' + field_1_1 = '1:1' + field_4_3 = '4:3' + field_3_4 = '3:4' + field_3_2 = '3:2' + field_2_3 = '2:3' + field_21_9 = '21:9' + + +class ImageReference(str, Enum): + subject = 'subject' + face = 'face' + + +class Image(BaseModel): + index: Optional[int] = Field(None, description='Image Number (0-9)') + url: Optional[AnyUrl] = Field(None, description='URL for generated image') + + +class TaskResult1(BaseModel): + images: Optional[List[Image]] = None + + +class Data1(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_result: Optional[TaskResult1] = None + task_status: Optional[TaskStatus] = None + task_status_msg: Optional[str] = Field(None, description='Task status information') + updated_at: Optional[int] = Field(None, description='Task update time') + + +class AspectRatio2(str, Enum): + field_16_9 = '16:9' + field_9_16 = '9:16' + field_1_1 = '1:1' + + +class CameraControl1(BaseModel): + config: Optional[Config] = None + type: Optional[Type] = Field(None, description='Predefined camera movements type') + + +class ModelName2(str, Enum): + kling_v1 = 'kling-v1' + kling_v1_6 = 'kling-v1-6' + + +class TaskResult2(BaseModel): + videos: Optional[List[Video]] = None + + +class Data2(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_info: Optional[TaskInfo] = None + task_result: Optional[TaskResult2] = None + task_status: Optional[TaskStatus] = None + updated_at: Optional[int] = Field(None, description='Task update time') + + +class Code2(Enum): + int_1200 = 1200 + int_1201 = 1201 + int_1202 = 1202 + int_1203 = 1203 + + +class ResourcePackType(str, Enum): + decreasing_total = 'decreasing_total' + constant_period = 'constant_period' + + +class Status(str, Enum): + toBeOnline = 'toBeOnline' + online = 'online' + expired = 'expired' + runOut = 'runOut' + + +class ResourcePackSubscribeInfo(BaseModel): + effective_time: Optional[int] = Field( + None, description='Effective time, Unix timestamp in ms' + ) + invalid_time: Optional[int] = Field( + None, description='Expiration time, Unix timestamp in ms' + ) + purchase_time: Optional[int] = Field( + None, description='Purchase time, Unix timestamp in ms' + ) + remaining_quantity: Optional[float] = Field( + None, description='Remaining quantity (updated with a 12-hour delay)' + ) + resource_pack_id: Optional[str] = Field(None, description='Resource package ID') + resource_pack_name: Optional[str] = Field(None, description='Resource package name') + resource_pack_type: Optional[ResourcePackType] = Field( + None, + description='Resource package type (decreasing_total=decreasing total, constant_period=constant periodicity)', + ) + status: Optional[Status] = Field(None, description='Resource Package Status') + total_quantity: Optional[float] = Field(None, description='Total quantity') + +class Background(str, Enum): + transparent = 'transparent' + opaque = 'opaque' + + +class Moderation(str, Enum): + low = 'low' + auto = 'auto' + + +class OutputFormat(str, Enum): + png = 'png' + webp = 'webp' + jpeg = 'jpeg' + + +class Quality(str, Enum): + low = 'low' + medium = 'medium' + high = 'high' + + +class OpenAIImageEditRequest(BaseModel): + background: Optional[str] = Field( + None, description='Background transparency', examples=['opaque'] + ) + model: str = Field( + ..., description='The model to use for image editing', examples=['gpt-image-1'] + ) + moderation: Optional[Moderation] = Field( + None, description='Content moderation setting', examples=['auto'] + ) + n: Optional[int] = Field( + None, description='The number of images to generate', examples=[1] + ) + output_compression: Optional[int] = Field( + None, description='Compression level for JPEG or WebP (0-100)', examples=[100] + ) + output_format: Optional[OutputFormat] = Field( + None, description='Format of the output image', examples=['png'] + ) + prompt: str = Field( + ..., + description='A text description of the desired edit', + examples=['Give the rocketship rainbow coloring'], + ) + quality: Optional[str] = Field( + None, description='The quality of the edited image', examples=['low'] + ) + size: Optional[str] = Field( + None, description='Size of the output image', examples=['1024x1024'] + ) + user: Optional[str] = Field( + None, + description='A unique identifier for end-user monitoring', + examples=['user-1234'], + ) + + +class Quality1(str, Enum): + low = 'low' + medium = 'medium' + high = 'high' + standard = 'standard' + hd = 'hd' + + +class ResponseFormat(str, Enum): + url = 'url' + b64_json = 'b64_json' + + +class Style(str, Enum): + vivid = 'vivid' + natural = 'natural' + + +class OpenAIImageGenerationRequest(BaseModel): + background: Optional[Background] = Field( + None, description='Background transparency', examples=['opaque'] + ) + model: Optional[str] = Field( + None, description='The model to use for image generation', examples=['dall-e-3'] + ) + moderation: Optional[Moderation] = Field( + None, description='Content moderation setting', examples=['auto'] + ) + n: Optional[int] = Field( + None, + description='The number of images to generate (1-10). Only 1 supported for dall-e-3.', + examples=[1], + ) + output_compression: Optional[int] = Field( + None, description='Compression level for JPEG or WebP (0-100)', examples=[100] + ) + output_format: Optional[OutputFormat] = Field( + None, description='Format of the output image', examples=['png'] + ) + prompt: str = Field( + ..., + description='A text description of the desired image', + examples=['Draw a rocket in front of a blackhole in deep space'], + ) + quality: Optional[Quality1] = Field( + None, description='The quality of the generated image', examples=['high'] + ) + response_format: Optional[ResponseFormat] = Field( + None, description='Response format of image data', examples=['b64_json'] + ) + size: Optional[str] = Field( + None, + description='Size of the image (e.g., 1024x1024, 1536x1024, auto)', + examples=['1024x1536'], + ) + style: Optional[Style] = Field( + None, description='Style of the image (only for dall-e-3)', examples=['vivid'] + ) + user: Optional[str] = Field( + None, + description='A unique identifier for end-user monitoring', + examples=['user-1234'], + ) + + +class Datum1(BaseModel): + b64_json: Optional[str] = Field(None, description='Base64 encoded image data') + revised_prompt: Optional[str] = Field(None, description='Revised prompt') + url: Optional[str] = Field(None, description='URL of the image') + + +class OpenAIImageGenerationResponse(BaseModel): + data: Optional[List[Datum1]] = None +class User(BaseModel): + email: Optional[str] = Field(None, description='The email address for this user.') + id: Optional[str] = Field(None, description='The unique id for this user.') + isAdmin: Optional[bool] = Field( + None, description='Indicates if the user has admin privileges.' + ) + isApproved: Optional[bool] = Field( + None, description='Indicates if the user is approved.' + ) + name: Optional[str] = Field(None, description='The name for this user.') diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index cd81d5a1d..9bc3d76d5 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -226,7 +226,7 @@ class ApiClient: def check_auth_token(self, auth_token): """Verify that an auth token is present.""" if auth_token is None: - raise Exception("Please login first to use this node.") + raise Exception("Unauthorized: Please login first to use this node.") return auth_token diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py new file mode 100644 index 000000000..92f4a0c87 --- /dev/null +++ b/comfy_api_nodes/nodes_api.py @@ -0,0 +1,425 @@ +import io +from inspect import cleandoc + +from comfy.utils import common_upscale +from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict +from comfy_api_nodes.apis import ( + OpenAIImageGenerationRequest, + OpenAIImageEditRequest, + OpenAIImageGenerationResponse +) +from comfy_api_nodes.apis.client import ApiEndpoint, HttpMethod, SynchronousOperation + +import numpy as np +from PIL import Image +import requests +import torch +import math +import base64 + +def downscale_input(image): + samples = image.movedim(-1,1) + #downscaling input images to roughly the same size as the outputs + total = int(1536 * 1024) + scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) + if scale_by >= 1: + return image + width = round(samples.shape[3] * scale_by) + height = round(samples.shape[2] * scale_by) + + s = common_upscale(samples, width, height, "lanczos", "disabled") + s = s.movedim(1,-1) + return s + +def validate_and_cast_response (response): + # validate raw JSON response + data = response.data + if not data or len(data) == 0: + raise Exception("No images returned from API endpoint") + + # Get base64 image data + image_url = data[0].url + b64_data = data[0].b64_json + if not image_url and not b64_data: + raise Exception("No image was generated in the response") + + if b64_data: + img_data = base64.b64decode(b64_data) + img = Image.open(io.BytesIO(img_data)) + + elif image_url: + img_response = requests.get(image_url) + if img_response.status_code != 200: + raise Exception("Failed to download the image") + img = Image.open(io.BytesIO(img_response.content)) + + img = img.convert("RGB") # Ensure RGB format + + # Convert to numpy array, normalize to float32 between 0 and 1 + img_array = np.array(img).astype(np.float32) / 255.0 + + # Convert to torch tensor and add batch dimension + return torch.from_numpy(img_array)[None,] + +class OpenAIDalle2(ComfyNodeABC): + """ + Generates images synchronously via OpenAI's DALL·E 2 endpoint. + + Uses the proxy at /proxy/openai/images/generations. Returned URLs are short‑lived, + so download or cache results if you need to keep them. + """ + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": (IO.STRING, { + "multiline": True, + "default": "", + "tooltip": "Text prompt for DALL·E", + }), + }, + "optional": { + "seed": (IO.INT, { + "default": 0, + "min": 0, + "max": 2**31-1, + "step": 1, + "display": "number", + "tooltip": "not implemented yet in backend", + }), + "size": (IO.COMBO, { + "options": ["256x256", "512x512", "1024x1024"], + "default": "1024x1024", + "tooltip": "Image size", + }), + "n": (IO.INT, { + "default": 1, + "min": 1, + "max": 8, + "step": 1, + "display": "number", + "tooltip": "How many images to generate", + }), + "image": (IO.IMAGE, { + "default": None, + "tooltip": "Optional reference image for image editing.", + }), + "mask": (IO.MASK, { + "default": None, + "tooltip": "Optional mask for inpainting (white areas will be replaced)", + }), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG" + } + } + + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "api_call" + CATEGORY = "api node" + DESCRIPTION = cleandoc(__doc__ or "") + API_NODE = True + + def api_call(self, prompt, seed=0, image=None, mask=None, n=1, size="1024x1024", auth_token=None): + model = "dall-e-2" + path = "/proxy/openai/images/generations" + request_class = OpenAIImageGenerationRequest + img_binary = None + + if image is not None and mask is not None: + path = "/proxy/openai/images/edits" + request_class = OpenAIImageEditRequest + + input_tensor = image.squeeze().cpu() + height, width, channels = input_tensor.shape + rgba_tensor = torch.ones(height, width, 4, device="cpu") + rgba_tensor[:, :, :channels] = input_tensor + + if mask.shape[1:] != image.shape[1:-1]: + raise Exception("Mask and Image must be the same size") + rgba_tensor[:,:,3] = (1-mask.squeeze().cpu()) + + rgba_tensor = downscale_input(rgba_tensor.unsqueeze(0)).squeeze() + + image_np = (rgba_tensor.numpy() * 255).astype(np.uint8) + img = Image.fromarray(image_np) + img_byte_arr = io.BytesIO() + img.save(img_byte_arr, format='PNG') + img_byte_arr.seek(0) + img_binary = img_byte_arr#.getvalue() + img_binary.name = "image.png" + elif image is not None or mask is not None: + raise Exception("Dall-E 2 image editing requires an image AND a mask") + + # Build the operation + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=request_class, + response_model=OpenAIImageGenerationResponse + ), + request=request_class( + model=model, + prompt=prompt, + n=n, + size=size, + seed=seed, + ), + files={ + "image": img_binary, + } if img_binary else None, + auth_token=auth_token + ) + + response = operation.execute() + + img_tensor = validate_and_cast_response(response) + return (img_tensor,) + +class OpenAIDalle3(ComfyNodeABC): + """ + Generates images synchronously via OpenAI's DALL·E 3 endpoint. + + Uses the proxy at /proxy/openai/images/generations. Returned URLs are short‑lived, + so download or cache results if you need to keep them. + """ + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": (IO.STRING, { + "multiline": True, + "default": "", + "tooltip": "Text prompt for DALL·E", + }), + }, + "optional": { + "seed": (IO.INT, { + "default": 0, + "min": 0, + "max": 2**31-1, + "step": 1, + "display": "number", + "tooltip": "not implemented yet in backend", + }), + "quality" : (IO.COMBO, { + "options": ["standard","hd"], + "default": "standard", + "tooltip": "Image quality", + }), + "style": (IO.COMBO, { + "options": ["natural","vivid"], + "default": "natural", + "tooltip": "Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images.", + }), + "size": (IO.COMBO, { + "options": ["1024x1024", "1024x1792", "1792x1024"], + "default": "1024x1024", + "tooltip": "Image size", + }), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG" + } + } + + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "api_call" + CATEGORY = "api node" + DESCRIPTION = cleandoc(__doc__ or "") + API_NODE = True + + def api_call(self, prompt, seed=0, style="natural", quality="standard", size="1024x1024", auth_token=None): + model = "dall-e-3" + + # build the operation + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/openai/images/generations", + method=HttpMethod.POST, + request_model=OpenAIImageGenerationRequest, + response_model=OpenAIImageGenerationResponse + ), + request=OpenAIImageGenerationRequest( + model=model, + prompt=prompt, + quality=quality, + size=size, + style=style, + seed=seed, + ), + auth_token=auth_token + ) + + response = operation.execute() + + img_tensor = validate_and_cast_response(response) + return (img_tensor,) + +class OpenAIGPTImage1(ComfyNodeABC): + """ + Generates images synchronously via OpenAI's GPT Image 1 endpoint. + + Uses the proxy at /proxy/openai/images/generations. Returned URLs are short‑lived, + so download or cache results if you need to keep them. + """ + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": (IO.STRING, { + "multiline": True, + "default": "", + "tooltip": "Text prompt for GPT Image 1", + }), + }, + "optional": { + "seed": (IO.INT, { + "default": 0, + "min": 0, + "max": 2**31-1, + "step": 1, + "display": "number", + "tooltip": "not implemented yet in backend", + }), + "quality": (IO.COMBO, { + "options": ["low","medium","high"], + "default": "low", + "tooltip": "Image quality, affects cost and generation time.", + }), + "background": (IO.COMBO, { + "options": ["opaque","transparent"], + "default": "opaque", + "tooltip": "Return image with or without background", + }), + "size": (IO.COMBO, { + "options": ["auto", "1024x1024", "1024x1536", "1536x1024"], + "default": "auto", + "tooltip": "Image size", + }), + "n": (IO.INT, { + "default": 1, + "min": 1, + "max": 8, + "step": 1, + "display": "number", + "tooltip": "How many images to generate", + }), + "image": (IO.IMAGE, { + "default": None, + "tooltip": "Optional reference image for image editing.", + }), + "mask": (IO.MASK, { + "default": None, + "tooltip": "Optional mask for inpainting (white areas will be replaced)", + }), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG" + } + } + + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "api_call" + CATEGORY = "api node" + DESCRIPTION = cleandoc(__doc__ or "") + API_NODE = True + + def api_call(self, prompt, seed=0, quality="low", background="opaque", image=None, mask=None, n=1, size="1024x1024", auth_token=None): + model = "gpt-image-1" + path = "/proxy/openai/images/generations" + request_class = OpenAIImageGenerationRequest + img_binary = None + mask_binary = None + + + if image is not None: + path = "/proxy/openai/images/edits" + request_class = OpenAIImageEditRequest + + scaled_image = downscale_input(image).squeeze() + + image_np = (scaled_image.numpy() * 255).astype(np.uint8) + img = Image.fromarray(image_np) + img_byte_arr = io.BytesIO() + img.save(img_byte_arr, format='PNG') + img_byte_arr.seek(0) + img_binary = img_byte_arr#.getvalue() + img_binary.name = "image.png" + + if mask is not None: + if image is None: + raise Exception("Cannot use a mask without an input image") + if mask.shape[1:] != image.shape[1:-1]: + raise Exception("Mask and Image must be the same size") + batch, height, width = mask.shape + rgba_mask = torch.zeros(height, width, 4, device="cpu") + rgba_mask[:,:,3] = (1-mask.squeeze().cpu()) + + scaled_mask = downscale_input(rgba_mask.unsqueeze(0)).squeeze() + + mask_np = (scaled_mask.numpy() * 255).astype(np.uint8) + mask_img = Image.fromarray(mask_np) + mask_img_byte_arr = io.BytesIO() + mask_img.save(mask_img_byte_arr, format='PNG') + mask_img_byte_arr.seek(0) + mask_binary = mask_img_byte_arr#.getvalue() + mask_binary.name = "mask.png" + + files = {} + if img_binary: + files["image"] = img_binary + if mask_binary: + files["mask"] = mask_binary + + # Build the operation + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=request_class, + response_model=OpenAIImageGenerationResponse + ), + request=request_class( + model=model, + prompt=prompt, + quality=quality, + background=background, + n=n, + seed=seed, + size=size, + ), + files=files if files else None, + auth_token=auth_token + ) + + response = operation.execute() + + img_tensor = validate_and_cast_response(response) + return (img_tensor,) + + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "OpenAIDalle2": OpenAIDalle2, + "OpenAIDalle3": OpenAIDalle3, + "OpenAIGPTImage1": OpenAIGPTImage1, +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "OpenAIDalle2": "OpenAI DALL·E 2", + "OpenAIDalle3": "OpenAI DALL·E 3", + "OpenAIGPTImage1": "OpenAI GPT Image 1", +} diff --git a/nodes.py b/nodes.py index b1ab62aad..73a62d930 100644 --- a/nodes.py +++ b/nodes.py @@ -2260,11 +2260,20 @@ def init_builtin_extra_nodes(): "nodes_fresca.py", ] + api_nodes_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_api_nodes") + api_nodes_files = [ + "nodes_api.py", + ] + import_failed = [] for node_file in extras_files: if not load_custom_node(os.path.join(extras_dir, node_file), module_parent="comfy_extras"): import_failed.append(node_file) + for node_file in api_nodes_files: + if not load_custom_node(os.path.join(api_nodes_dir, node_file), module_parent="comfy_api_nodes"): + import_failed.append(node_file) + return import_failed diff --git a/requirements.txt b/requirements.txt index f8ad908ca..2ac241261 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.16.9 +comfyui-frontend-package==1.17.9 comfyui-workflow-templates==0.1.3 torch torchsde From e8ddc2be95e3c70363414dfca94f57d6dad25c8f Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Thu, 24 Apr 2025 06:02:41 +1000 Subject: [PATCH 0050/1073] [BugFix] Update frontend to 1.17.10 (#7762) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2ac241261..291f81838 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.17.9 +comfyui-frontend-package==1.17.10 comfyui-workflow-templates==0.1.3 torch torchsde From 2c1d686ec61f26f3a64bb4c1afdcdb78bb943a4f Mon Sep 17 00:00:00 2001 From: thot experiment <94414189+thot-experiment@users.noreply.github.com> Date: Wed, 23 Apr 2025 13:10:10 -0700 Subject: [PATCH 0051/1073] implement multi image prompting for gpt-image-1 and fix transparency in outputs (#7763) * implement multi image prompting for GPTI Image 1 * fix transparency not working * fix ruff --- comfy_api_nodes/nodes_api.py | 43 ++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 92f4a0c87..7bca0b503 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -53,7 +53,7 @@ def validate_and_cast_response (response): raise Exception("Failed to download the image") img = Image.open(io.BytesIO(img_response.content)) - img = img.convert("RGB") # Ensure RGB format + img = img.convert("RGBA") # Convert to numpy array, normalize to float32 between 0 and 1 img_array = np.array(img).astype(np.float32) / 255.0 @@ -339,25 +339,38 @@ class OpenAIGPTImage1(ComfyNodeABC): model = "gpt-image-1" path = "/proxy/openai/images/generations" request_class = OpenAIImageGenerationRequest - img_binary = None + img_binaries = [] mask_binary = None - + files = [] if image is not None: path = "/proxy/openai/images/edits" request_class = OpenAIImageEditRequest - scaled_image = downscale_input(image).squeeze() + batch_size = image.shape[0] - image_np = (scaled_image.numpy() * 255).astype(np.uint8) - img = Image.fromarray(image_np) - img_byte_arr = io.BytesIO() - img.save(img_byte_arr, format='PNG') - img_byte_arr.seek(0) - img_binary = img_byte_arr#.getvalue() - img_binary.name = "image.png" + + for i in range(batch_size): + single_image = image[i:i+1] + scaled_image = downscale_input(single_image).squeeze() + + image_np = (scaled_image.numpy() * 255).astype(np.uint8) + img = Image.fromarray(image_np) + img_byte_arr = io.BytesIO() + img.save(img_byte_arr, format='PNG') + img_byte_arr.seek(0) + img_binary = img_byte_arr + img_binary.name = f"image_{i}.png" + + img_binaries.append(img_binary) + if batch_size == 1: + files.append(("image", img_binary)) + else: + files.append(("image[]", img_binary)) if mask is not None: + if image.shape[0] != 1: + raise Exception("Cannot use a mask with multiple image") if image is None: raise Exception("Cannot use a mask without an input image") if mask.shape[1:] != image.shape[1:-1]: @@ -373,14 +386,10 @@ class OpenAIGPTImage1(ComfyNodeABC): mask_img_byte_arr = io.BytesIO() mask_img.save(mask_img_byte_arr, format='PNG') mask_img_byte_arr.seek(0) - mask_binary = mask_img_byte_arr#.getvalue() + mask_binary = mask_img_byte_arr mask_binary.name = "mask.png" + files.append(("mask", mask_binary)) - files = {} - if img_binary: - files["image"] = img_binary - if mask_binary: - files["mask"] = mask_binary # Build the operation operation = SynchronousOperation( From 188b383c35f0a790e407cb337dd554fccb188f6f Mon Sep 17 00:00:00 2001 From: thot experiment <94414189+thot-experiment@users.noreply.github.com> Date: Wed, 23 Apr 2025 14:53:34 -0700 Subject: [PATCH 0052/1073] change timeout to 7 days (#7765) --- comfy_api_nodes/apis/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 9bc3d76d5..384e559dc 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -269,7 +269,7 @@ class SynchronousOperation(Generic[T, R]): files: Optional[Dict[str, Any]] = None, api_base: str = "https://api.comfy.org", auth_token: Optional[str] = None, - timeout: float = 60.0, + timeout: float = 604800.0, verify_ssl: bool = True, ): self.endpoint = endpoint From 11b68ebd22c2137661ec6a70f39943a337edf897 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Thu, 24 Apr 2025 08:16:12 +1000 Subject: [PATCH 0053/1073] [BugFix] Update frontend to 1.17.11 (#7766) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 291f81838..10cc177af 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.17.10 +comfyui-frontend-package==1.17.11 comfyui-workflow-templates==0.1.3 torch torchsde From e2eed9eb9b70f1b2290d5384fd8cfb739c092b44 Mon Sep 17 00:00:00 2001 From: thot experiment <94414189+thot-experiment@users.noreply.github.com> Date: Wed, 23 Apr 2025 18:28:36 -0700 Subject: [PATCH 0054/1073] throw away alpha channel in clip vision preprocessor (#7769) saves users having to explicitly discard the channel --- comfy/clip_vision.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index 11bc57789..00aab9164 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -18,6 +18,7 @@ class Output: setattr(self, key, item) def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], crop=True): + image = image[:, :, :, :3] if image.shape[3] > 3 else image mean = torch.tensor(mean, device=image.device, dtype=image.dtype) std = torch.tensor(std, device=image.device, dtype=image.dtype) image = image.movedim(-1, 1) From 5c80da31dbfe6382da5b489098b57a411e7f58ed Mon Sep 17 00:00:00 2001 From: thot experiment <94414189+thot-experiment@users.noreply.github.com> Date: Thu, 24 Apr 2025 00:29:05 -0700 Subject: [PATCH 0055/1073] fix multiple image return from api nodes (#7772) --- comfy_api_nodes/nodes_api.py | 46 +++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 7bca0b503..4105ba7e1 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -31,35 +31,43 @@ def downscale_input(image): s = s.movedim(1,-1) return s -def validate_and_cast_response (response): +def validate_and_cast_response(response): # validate raw JSON response data = response.data if not data or len(data) == 0: raise Exception("No images returned from API endpoint") - # Get base64 image data - image_url = data[0].url - b64_data = data[0].b64_json - if not image_url and not b64_data: - raise Exception("No image was generated in the response") + # Initialize list to store image tensors + image_tensors = [] - if b64_data: - img_data = base64.b64decode(b64_data) - img = Image.open(io.BytesIO(img_data)) + # Process each image in the data array + for image_data in data: + image_url = image_data.url + b64_data = image_data.b64_json - elif image_url: - img_response = requests.get(image_url) - if img_response.status_code != 200: - raise Exception("Failed to download the image") - img = Image.open(io.BytesIO(img_response.content)) + if not image_url and not b64_data: + raise Exception("No image was generated in the response") - img = img.convert("RGBA") + if b64_data: + img_data = base64.b64decode(b64_data) + img = Image.open(io.BytesIO(img_data)) - # Convert to numpy array, normalize to float32 between 0 and 1 - img_array = np.array(img).astype(np.float32) / 255.0 + elif image_url: + img_response = requests.get(image_url) + if img_response.status_code != 200: + raise Exception("Failed to download the image") + img = Image.open(io.BytesIO(img_response.content)) - # Convert to torch tensor and add batch dimension - return torch.from_numpy(img_array)[None,] + img = img.convert("RGBA") + + # Convert to numpy array, normalize to float32 between 0 and 1 + img_array = np.array(img).astype(np.float32) / 255.0 + img_tensor = torch.from_numpy(img_array) + + # Add to list of tensors + image_tensors.append(img_tensor) + + return torch.stack(image_tensors, dim=0) class OpenAIDalle2(ComfyNodeABC): """ From 5acb7058577ca81d26107ace01dd5c5c7a4a5f27 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 24 Apr 2025 10:58:31 -0700 Subject: [PATCH 0056/1073] Switch LTXVPreprocess to libx264 (#7776) --- comfy_extras/nodes_lt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_lt.py b/comfy_extras/nodes_lt.py index 525889200..ff3fe5cdc 100644 --- a/comfy_extras/nodes_lt.py +++ b/comfy_extras/nodes_lt.py @@ -385,7 +385,7 @@ def encode_single_frame(output_file, image_array: np.ndarray, crf): container = av.open(output_file, "w", format="mp4") try: stream = container.add_stream( - "h264", rate=1, options={"crf": str(crf), "preset": "veryfast"} + "libx264", rate=1, options={"crf": str(crf), "preset": "veryfast"} ) stream.height = image_array.shape[0] stream.width = image_array.shape[1] From a97f2f850abd7dd330e6363c8d8074bb243eb413 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 24 Apr 2025 16:03:01 -0400 Subject: [PATCH 0057/1073] ComfyUI version 0.3.30 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index f9161b37e..67d27f942 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.29" +__version__ = "0.3.30" diff --git a/pyproject.toml b/pyproject.toml index e8fc9555d..eadca662e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.29" +version = "0.3.30" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From f935d42d8ee399e57028d33e0142730d0c163a91 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 25 Apr 2025 03:11:14 -0400 Subject: [PATCH 0058/1073] Support SimpleTuner lycoris lora format for HiDream. --- comfy/lora.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/comfy/lora.py b/comfy/lora.py index 8760a21fb..fff524be2 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -279,6 +279,13 @@ def model_lora_keys_unet(model, key_map={}): key_map["transformer.{}".format(key_lora)] = k key_map["diffusion_model.{}".format(key_lora)] = k # Old loras + if isinstance(model, comfy.model_base.HiDream): + for k in sdk: + if k.startswith("diffusion_model."): + if k.endswith(".weight"): + key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_") + key_map["lycoris_{}".format(key_lora)] = k #SimpleTuner lycoris format + return key_map From 78992c4b25ce7ef1305113872aa1f1e6aa6a070b Mon Sep 17 00:00:00 2001 From: AustinMroz Date: Fri, 25 Apr 2025 12:35:07 -0500 Subject: [PATCH 0059/1073] [NodeDef] Add documentation on widgetType (#7768) * [NodeDef] Add documentation on widgetType * Document required version for widgetType --- comfy/comfy_types/node_typing.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy/comfy_types/node_typing.py b/comfy/comfy_types/node_typing.py index 0bdda032e..4ceeb3468 100644 --- a/comfy/comfy_types/node_typing.py +++ b/comfy/comfy_types/node_typing.py @@ -120,6 +120,10 @@ class InputTypeOptions(TypedDict): Available from frontend v1.17.5 Ref: https://github.com/Comfy-Org/ComfyUI_frontend/pull/3548 """ + widgetType: NotRequired[str] + """Specifies a type to be used for widget initialization if different from the input type. + Available from frontend v1.18.0 + https://github.com/Comfy-Org/ComfyUI_frontend/pull/3550""" # class InputTypeNumber(InputTypeOptions): # default: float | int min: NotRequired[float] From 23e39f2ba7c38d5fc21206da31ce7d357b232e15 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 25 Apr 2025 16:36:00 -0700 Subject: [PATCH 0060/1073] Add a T5TokenizerOptions node to set options for the T5 tokenizer. (#7803) --- comfy/sd.py | 10 ++++++++++ comfy/sd1_clip.py | 17 +++++++++++------ comfy/sdxl_clip.py | 4 ++-- comfy/text_encoders/flux.py | 4 ++-- comfy/text_encoders/hidream.py | 8 ++++---- comfy/text_encoders/hunyuan_video.py | 4 ++-- comfy/text_encoders/hydit.py | 4 ++-- comfy/text_encoders/sd3_clip.py | 6 +++--- comfy_extras/nodes_cond.py | 25 ++++++++++++++++++++++++- 9 files changed, 60 insertions(+), 22 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 8aba5d655..748f6c1ec 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -120,6 +120,7 @@ class CLIP: self.layer_idx = None self.use_clip_schedule = False logging.info("CLIP/text encoder model load device: {}, offload device: {}, current: {}, dtype: {}".format(load_device, offload_device, params['device'], dtype)) + self.tokenizer_options = {} def clone(self): n = CLIP(no_init=True) @@ -127,6 +128,7 @@ class CLIP: n.cond_stage_model = self.cond_stage_model n.tokenizer = self.tokenizer n.layer_idx = self.layer_idx + n.tokenizer_options = self.tokenizer_options.copy() n.use_clip_schedule = self.use_clip_schedule n.apply_hooks_to_conds = self.apply_hooks_to_conds return n @@ -134,10 +136,18 @@ class CLIP: def add_patches(self, patches, strength_patch=1.0, strength_model=1.0): return self.patcher.add_patches(patches, strength_patch, strength_model) + def set_tokenizer_option(self, option_name, value): + self.tokenizer_options[option_name] = value + def clip_layer(self, layer_idx): self.layer_idx = layer_idx def tokenize(self, text, return_word_ids=False, **kwargs): + tokenizer_options = kwargs.get("tokenizer_options", {}) + if len(self.tokenizer_options) > 0: + tokenizer_options = {**self.tokenizer_options, **tokenizer_options} + if len(tokenizer_options) > 0: + kwargs["tokenizer_options"] = tokenizer_options return self.tokenizer.tokenize_with_weights(text, return_word_ids, **kwargs) def add_hooks_to_dict(self, pooled_dict: dict[str]): diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 2ca5ed9ba..ac61babe9 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -457,13 +457,14 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No return embed_out class SDTokenizer: - def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, tokenizer_data={}, tokenizer_args={}): + def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, min_padding=None, tokenizer_data={}, tokenizer_args={}): if tokenizer_path is None: tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer") self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, **tokenizer_args) self.max_length = tokenizer_data.get("{}_max_length".format(embedding_key), max_length) self.min_length = min_length self.end_token = None + self.min_padding = min_padding empty = self.tokenizer('')["input_ids"] self.tokenizer_adds_end_token = has_end_token @@ -518,13 +519,15 @@ class SDTokenizer: return (embed, leftover) - def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): + def tokenize_with_weights(self, text:str, return_word_ids=False, tokenizer_options={}, **kwargs): ''' Takes a prompt and converts it to a list of (token, weight, word id) elements. Tokens can both be integer tokens and pre computed CLIP tensors. Word id values are unique per word and embedding, where the id 0 is reserved for non word tokens. Returned list has the dimensions NxM where M is the input size of CLIP ''' + min_length = tokenizer_options.get("{}_min_length".format(self.embedding_key), self.min_length) + min_padding = tokenizer_options.get("{}_min_padding".format(self.embedding_key), self.min_padding) text = escape_important(text) parsed_weights = token_weights(text, 1.0) @@ -603,10 +606,12 @@ class SDTokenizer: #fill last batch if self.end_token is not None: batch.append((self.end_token, 1.0, 0)) - if self.pad_to_max_length: + if min_padding is not None: + batch.extend([(self.pad_token, 1.0, 0)] * min_padding) + if self.pad_to_max_length and len(batch) < self.max_length: batch.extend([(self.pad_token, 1.0, 0)] * (self.max_length - len(batch))) - if self.min_length is not None and len(batch) < self.min_length: - batch.extend([(self.pad_token, 1.0, 0)] * (self.min_length - len(batch))) + if min_length is not None and len(batch) < min_length: + batch.extend([(self.pad_token, 1.0, 0)] * (min_length - len(batch))) if not return_word_ids: batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens] @@ -634,7 +639,7 @@ class SD1Tokenizer: def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} - out[self.clip_name] = getattr(self, self.clip).tokenize_with_weights(text, return_word_ids) + out[self.clip_name] = getattr(self, self.clip).tokenize_with_weights(text, return_word_ids, **kwargs) return out def untokenize(self, token_weight_pair): diff --git a/comfy/sdxl_clip.py b/comfy/sdxl_clip.py index ea7f5d10f..c8cef14e4 100644 --- a/comfy/sdxl_clip.py +++ b/comfy/sdxl_clip.py @@ -28,8 +28,8 @@ class SDXLTokenizer: def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} - out["g"] = self.clip_g.tokenize_with_weights(text, return_word_ids) - out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids) + out["g"] = self.clip_g.tokenize_with_weights(text, return_word_ids, **kwargs) + out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids, **kwargs) return out def untokenize(self, token_weight_pair): diff --git a/comfy/text_encoders/flux.py b/comfy/text_encoders/flux.py index 0666dde7f..d61ef6668 100644 --- a/comfy/text_encoders/flux.py +++ b/comfy/text_encoders/flux.py @@ -19,8 +19,8 @@ class FluxTokenizer: def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} - out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids) - out["t5xxl"] = self.t5xxl.tokenize_with_weights(text, return_word_ids) + out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids, **kwargs) + out["t5xxl"] = self.t5xxl.tokenize_with_weights(text, return_word_ids, **kwargs) return out def untokenize(self, token_weight_pair): diff --git a/comfy/text_encoders/hidream.py b/comfy/text_encoders/hidream.py index 8e1abcfc1..dbcf52784 100644 --- a/comfy/text_encoders/hidream.py +++ b/comfy/text_encoders/hidream.py @@ -16,11 +16,11 @@ class HiDreamTokenizer: def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} - out["g"] = self.clip_g.tokenize_with_weights(text, return_word_ids) - out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids) - t5xxl = self.t5xxl.tokenize_with_weights(text, return_word_ids) + out["g"] = self.clip_g.tokenize_with_weights(text, return_word_ids, **kwargs) + out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids, **kwargs) + t5xxl = self.t5xxl.tokenize_with_weights(text, return_word_ids, **kwargs) out["t5xxl"] = [t5xxl[0]] # Use only first 128 tokens - out["llama"] = self.llama.tokenize_with_weights(text, return_word_ids) + out["llama"] = self.llama.tokenize_with_weights(text, return_word_ids, **kwargs) return out def untokenize(self, token_weight_pair): diff --git a/comfy/text_encoders/hunyuan_video.py b/comfy/text_encoders/hunyuan_video.py index 33ac22497..b02148b33 100644 --- a/comfy/text_encoders/hunyuan_video.py +++ b/comfy/text_encoders/hunyuan_video.py @@ -49,13 +49,13 @@ class HunyuanVideoTokenizer: def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, image_embeds=None, image_interleave=1, **kwargs): out = {} - out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids) + out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids, **kwargs) if llama_template is None: llama_text = self.llama_template.format(text) else: llama_text = llama_template.format(text) - llama_text_tokens = self.llama.tokenize_with_weights(llama_text, return_word_ids) + llama_text_tokens = self.llama.tokenize_with_weights(llama_text, return_word_ids, **kwargs) embed_count = 0 for r in llama_text_tokens: for i in range(len(r)): diff --git a/comfy/text_encoders/hydit.py b/comfy/text_encoders/hydit.py index e7273f425..ac6994529 100644 --- a/comfy/text_encoders/hydit.py +++ b/comfy/text_encoders/hydit.py @@ -41,8 +41,8 @@ class HyditTokenizer: def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} - out["hydit_clip"] = self.hydit_clip.tokenize_with_weights(text, return_word_ids) - out["mt5xl"] = self.mt5xl.tokenize_with_weights(text, return_word_ids) + out["hydit_clip"] = self.hydit_clip.tokenize_with_weights(text, return_word_ids, **kwargs) + out["mt5xl"] = self.mt5xl.tokenize_with_weights(text, return_word_ids, **kwargs) return out def untokenize(self, token_weight_pair): diff --git a/comfy/text_encoders/sd3_clip.py b/comfy/text_encoders/sd3_clip.py index 6c2fbeca4..ff5d412db 100644 --- a/comfy/text_encoders/sd3_clip.py +++ b/comfy/text_encoders/sd3_clip.py @@ -45,9 +45,9 @@ class SD3Tokenizer: def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): out = {} - out["g"] = self.clip_g.tokenize_with_weights(text, return_word_ids) - out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids) - out["t5xxl"] = self.t5xxl.tokenize_with_weights(text, return_word_ids) + out["g"] = self.clip_g.tokenize_with_weights(text, return_word_ids, **kwargs) + out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids, **kwargs) + out["t5xxl"] = self.t5xxl.tokenize_with_weights(text, return_word_ids, **kwargs) return out def untokenize(self, token_weight_pair): diff --git a/comfy_extras/nodes_cond.py b/comfy_extras/nodes_cond.py index 4c3a1d5bf..574262178 100644 --- a/comfy_extras/nodes_cond.py +++ b/comfy_extras/nodes_cond.py @@ -20,6 +20,29 @@ class CLIPTextEncodeControlnet: c.append(n) return (c, ) +class T5TokenizerOptions: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "clip": ("CLIP", ), + "min_padding": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}), + "min_length": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}), + } + } + + RETURN_TYPES = ("CLIP",) + FUNCTION = "set_options" + + def set_options(self, clip, min_padding, min_length): + clip = clip.clone() + for t5_type in ["t5xxl", "pile_t5xl", "t5base", "mt5xl", "umt5xxl"]: + clip.set_tokenizer_option("{}_min_padding".format(t5_type), min_padding) + clip.set_tokenizer_option("{}_min_length".format(t5_type), min_length) + + return (clip, ) + NODE_CLASS_MAPPINGS = { - "CLIPTextEncodeControlnet": CLIPTextEncodeControlnet + "CLIPTextEncodeControlnet": CLIPTextEncodeControlnet, + "T5TokenizerOptions": T5TokenizerOptions, } From b685b8a4e098237919adae580eb29e8d861b738f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 26 Apr 2025 01:43:12 -0700 Subject: [PATCH 0061/1073] Update portable package workflow to cu128 (#7812) --- .github/workflows/stable-release.yml | 4 ++-- .github/workflows/windows_release_dependencies.yml | 4 ++-- .github/workflows/windows_release_package.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index 40df7ab88..c4302cdd6 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -12,7 +12,7 @@ on: description: 'CUDA version' required: true type: string - default: "126" + default: "128" python_minor: description: 'Python minor version' required: true @@ -22,7 +22,7 @@ on: description: 'Python patch version' required: true type: string - default: "9" + default: "10" jobs: diff --git a/.github/workflows/windows_release_dependencies.yml b/.github/workflows/windows_release_dependencies.yml index 7a8ec5782..dfdb96d50 100644 --- a/.github/workflows/windows_release_dependencies.yml +++ b/.github/workflows/windows_release_dependencies.yml @@ -17,7 +17,7 @@ on: description: 'cuda version' required: true type: string - default: "126" + default: "128" python_minor: description: 'python minor version' @@ -29,7 +29,7 @@ on: description: 'python patch version' required: true type: string - default: "9" + default: "10" # push: # branches: # - master diff --git a/.github/workflows/windows_release_package.yml b/.github/workflows/windows_release_package.yml index dc79b1f4a..80a45b321 100644 --- a/.github/workflows/windows_release_package.yml +++ b/.github/workflows/windows_release_package.yml @@ -7,7 +7,7 @@ on: description: 'cuda version' required: true type: string - default: "126" + default: "128" python_minor: description: 'python minor version' @@ -19,7 +19,7 @@ on: description: 'python patch version' required: true type: string - default: "9" + default: "10" # push: # branches: # - master From 0dcc75ca547b533a129699208aefa95c6742f1b6 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 26 Apr 2025 13:11:21 -0700 Subject: [PATCH 0062/1073] Add experimental --async-offload lowvram weight offloading. (#7820) This should speed up the lowvram mode a bit. It currently is only enabled when --async-offload is used but it will be enabled by default in the future if there are no problems. --- comfy/cli_args.py | 1 + comfy/model_management.py | 47 ++++++++++++++++++++++++++++++++++++--- comfy/ops.py | 7 ++++-- 3 files changed, 50 insertions(+), 5 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 1b971be3c..f89a7aab4 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -128,6 +128,7 @@ vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for e parser.add_argument("--reserve-vram", type=float, default=None, help="Set the amount of vram in GB you want to reserve for use by your OS/other software. By default some amount is reserved depending on your OS.") +parser.add_argument("--async-offload", action="store_true", help="Use async weight offloading.") parser.add_argument("--default-hashing-function", type=str, choices=['md5', 'sha1', 'sha256', 'sha512'], default='sha256', help="Allows you to choose the hash function to use for duplicate filename / contents comparison. Default is sha256.") diff --git a/comfy/model_management.py b/comfy/model_management.py index 43e402243..d118f6b91 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -939,15 +939,56 @@ def force_channels_last(): #TODO return False -def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False): + +STREAMS = {} +NUM_STREAMS = 1 +if args.async_offload: + NUM_STREAMS = 2 + logging.info("Using async weight offloading with {} streams".format(NUM_STREAMS)) + +stream_counter = 0 +def get_offload_stream(device): + global stream_counter + if NUM_STREAMS <= 1: + return None + + if device in STREAMS: + ss = STREAMS[device] + s = ss[stream_counter] + stream_counter = (stream_counter + 1) % len(ss) + if is_device_cuda(device): + ss[stream_counter].wait_stream(torch.cuda.current_stream()) + return s + elif is_device_cuda(device): + ss = [] + for k in range(NUM_STREAMS): + ss.append(torch.cuda.Stream(device=device, priority=10)) + STREAMS[device] = ss + s = ss[stream_counter] + stream_counter = (stream_counter + 1) % len(ss) + return s + return None + +def sync_stream(device, stream): + if stream is None: + return + if is_device_cuda(device): + torch.cuda.current_stream().wait_stream(stream) + +def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False, stream=None): if device is None or weight.device == device: if not copy: if dtype is None or weight.dtype == dtype: return weight return weight.to(dtype=dtype, copy=copy) - r = torch.empty_like(weight, dtype=dtype, device=device) - r.copy_(weight, non_blocking=non_blocking) + if stream is not None: + with stream: + r = torch.empty_like(weight, dtype=dtype, device=device) + r.copy_(weight, non_blocking=non_blocking) + else: + r = torch.empty_like(weight, dtype=dtype, device=device) + r.copy_(weight, non_blocking=non_blocking) return r def cast_to_device(tensor, device, dtype, copy=False): diff --git a/comfy/ops.py b/comfy/ops.py index aae6cafac..62daf447b 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -37,20 +37,23 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None): if device is None: device = input.device + offload_stream = comfy.model_management.get_offload_stream(device) bias = None non_blocking = comfy.model_management.device_supports_non_blocking(device) if s.bias is not None: has_function = len(s.bias_function) > 0 - bias = comfy.model_management.cast_to(s.bias, bias_dtype, device, non_blocking=non_blocking, copy=has_function) + bias = comfy.model_management.cast_to(s.bias, bias_dtype, device, non_blocking=non_blocking, copy=has_function, stream=offload_stream) if has_function: for f in s.bias_function: bias = f(bias) has_function = len(s.weight_function) > 0 - weight = comfy.model_management.cast_to(s.weight, dtype, device, non_blocking=non_blocking, copy=has_function) + weight = comfy.model_management.cast_to(s.weight, dtype, device, non_blocking=non_blocking, copy=has_function, stream=offload_stream) if has_function: for f in s.weight_function: weight = f(weight) + + comfy.model_management.sync_stream(device, offload_stream) return weight, bias class CastWeightBiasOp: From ac10a0d69e9905662296c5280bcea61945c39762 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 26 Apr 2025 16:56:22 -0700 Subject: [PATCH 0063/1073] Make loras work with --async-offload (#7824) --- comfy/ops.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 62daf447b..032787915 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -22,6 +22,7 @@ import comfy.model_management from comfy.cli_args import args, PerformanceFeature import comfy.float import comfy.rmsnorm +import contextlib cast_to = comfy.model_management.cast_to #TODO: remove once no more references @@ -38,20 +39,28 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None): device = input.device offload_stream = comfy.model_management.get_offload_stream(device) + if offload_stream is not None: + wf_context = offload_stream + else: + wf_context = contextlib.nullcontext() + bias = None non_blocking = comfy.model_management.device_supports_non_blocking(device) if s.bias is not None: has_function = len(s.bias_function) > 0 bias = comfy.model_management.cast_to(s.bias, bias_dtype, device, non_blocking=non_blocking, copy=has_function, stream=offload_stream) + if has_function: - for f in s.bias_function: - bias = f(bias) + with wf_context: + for f in s.bias_function: + bias = f(bias) has_function = len(s.weight_function) > 0 weight = comfy.model_management.cast_to(s.weight, dtype, device, non_blocking=non_blocking, copy=has_function, stream=offload_stream) if has_function: - for f in s.weight_function: - weight = f(weight) + with wf_context: + for f in s.weight_function: + weight = f(weight) comfy.model_management.sync_stream(device, offload_stream) return weight, bias From 542b4b36b694148504656ad54433b8ddf0c38c4d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 26 Apr 2025 17:52:56 -0700 Subject: [PATCH 0064/1073] Prevent custom nodes from hooking certain functions. (#7825) --- hook_breaker_ac10a0.py | 17 +++++++++++++++++ main.py | 5 ++++- 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 hook_breaker_ac10a0.py diff --git a/hook_breaker_ac10a0.py b/hook_breaker_ac10a0.py new file mode 100644 index 000000000..c3e1c0633 --- /dev/null +++ b/hook_breaker_ac10a0.py @@ -0,0 +1,17 @@ +# Prevent custom nodes from hooking anything important +import comfy.model_management + +HOOK_BREAK = [(comfy.model_management, "cast_to")] + + +SAVED_FUNCTIONS = [] + + +def save_functions(): + for f in HOOK_BREAK: + SAVED_FUNCTIONS.append((f[0], f[1], getattr(f[0], f[1]))) + + +def restore_functions(): + for f in SAVED_FUNCTIONS: + setattr(f[0], f[1], f[2]) diff --git a/main.py b/main.py index ac9d24b7b..f3f56597a 100644 --- a/main.py +++ b/main.py @@ -141,7 +141,7 @@ import nodes import comfy.model_management import comfyui_version import app.logger - +import hook_breaker_ac10a0 def cuda_malloc_warning(): device = comfy.model_management.get_torch_device() @@ -215,6 +215,7 @@ def prompt_worker(q, server_instance): comfy.model_management.soft_empty_cache() last_gc_collect = current_time need_gc = False + hook_breaker_ac10a0.restore_functions() async def run(server_instance, address='', port=8188, verbose=True, call_on_start=None): @@ -268,7 +269,9 @@ def start_comfyui(asyncio_loop=None): prompt_server = server.PromptServer(asyncio_loop) q = execution.PromptQueue(prompt_server) + hook_breaker_ac10a0.save_functions() nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes) + hook_breaker_ac10a0.restore_functions() cuda_malloc_warning() From c8cd7ad795ec4ecc5256bdfe2c12c352eef26e3b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 27 Apr 2025 02:38:11 -0700 Subject: [PATCH 0065/1073] Use stream for casting if enabled. (#7833) --- comfy/model_management.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index d118f6b91..516b6e2f1 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -980,6 +980,9 @@ def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False, str if not copy: if dtype is None or weight.dtype == dtype: return weight + if stream is not None: + with stream: + return weight.to(dtype=dtype, copy=copy) return weight.to(dtype=dtype, copy=copy) if stream is not None: From 8115a7895bf07a75ccf0c4b65122cbf4cd8b0e2b Mon Sep 17 00:00:00 2001 From: Benjamin Lu Date: Sun, 27 Apr 2025 20:06:55 -0400 Subject: [PATCH 0066/1073] Add `/api/v2/userdata` endpoint (#7817) * Add list_userdata_v2 * nit * nit * nit * nit * please set me free * \\\\ * \\\\ --- app/user_manager.py | 106 ++++++++++++++++++ .../prompt_server_test/user_manager_test.py | 58 ++++++++++ 2 files changed, 164 insertions(+) diff --git a/app/user_manager.py b/app/user_manager.py index e7381e621..d31da5b9b 100644 --- a/app/user_manager.py +++ b/app/user_manager.py @@ -197,6 +197,112 @@ class UserManager(): return web.json_response(results) + @routes.get("/v2/userdata") + async def list_userdata_v2(request): + """ + List files and directories in a user's data directory. + + This endpoint provides a structured listing of contents within a specified + subdirectory of the user's data storage. + + Query Parameters: + - path (optional): The relative path within the user's data directory + to list. Defaults to the root (''). + + Returns: + - 400: If the requested path is invalid, outside the user's data directory, or is not a directory. + - 404: If the requested path does not exist. + - 403: If the user is invalid. + - 500: If there is an error reading the directory contents. + - 200: JSON response containing a list of file and directory objects. + Each object includes: + - name: The name of the file or directory. + - type: 'file' or 'directory'. + - path: The relative path from the user's data root. + - size (for files): The size in bytes. + - modified (for files): The last modified timestamp (Unix epoch). + """ + requested_rel_path = request.rel_url.query.get('path', '') + + # URL-decode the path parameter + try: + requested_rel_path = parse.unquote(requested_rel_path) + except Exception as e: + logging.warning(f"Failed to decode path parameter: {requested_rel_path}, Error: {e}") + return web.Response(status=400, text="Invalid characters in path parameter") + + + # Check user validity and get the absolute path for the requested directory + try: + base_user_path = self.get_request_user_filepath(request, None, create_dir=False) + + if requested_rel_path: + target_abs_path = self.get_request_user_filepath(request, requested_rel_path, create_dir=False) + else: + target_abs_path = base_user_path + + except KeyError as e: + # Invalid user detected by get_request_user_id inside get_request_user_filepath + logging.warning(f"Access denied for user: {e}") + return web.Response(status=403, text="Invalid user specified in request") + + + if not target_abs_path: + # Path traversal or other issue detected by get_request_user_filepath + return web.Response(status=400, text="Invalid path requested") + + # Handle cases where the user directory or target path doesn't exist + if not os.path.exists(target_abs_path): + # Check if it's the base user directory that's missing (new user case) + if target_abs_path == base_user_path: + # It's okay if the base user directory doesn't exist yet, return empty list + return web.json_response([]) + else: + # A specific subdirectory was requested but doesn't exist + return web.Response(status=404, text="Requested path not found") + + if not os.path.isdir(target_abs_path): + return web.Response(status=400, text="Requested path is not a directory") + + results = [] + try: + for root, dirs, files in os.walk(target_abs_path, topdown=True): + # Process directories + for dir_name in dirs: + dir_path = os.path.join(root, dir_name) + rel_path = os.path.relpath(dir_path, base_user_path).replace(os.sep, '/') + results.append({ + "name": dir_name, + "path": rel_path, + "type": "directory" + }) + + # Process files + for file_name in files: + file_path = os.path.join(root, file_name) + rel_path = os.path.relpath(file_path, base_user_path).replace(os.sep, '/') + entry_info = { + "name": file_name, + "path": rel_path, + "type": "file" + } + try: + stats = os.stat(file_path) # Use os.stat for potentially better performance with os.walk + entry_info["size"] = stats.st_size + entry_info["modified"] = stats.st_mtime + except OSError as stat_error: + logging.warning(f"Could not stat file {file_path}: {stat_error}") + pass # Include file with available info + results.append(entry_info) + except OSError as e: + logging.error(f"Error listing directory {target_abs_path}: {e}") + return web.Response(status=500, text="Error reading directory contents") + + # Sort results alphabetically, directories first then files + results.sort(key=lambda x: (x['type'] != 'directory', x['name'].lower())) + + return web.json_response(results) + def get_user_data_path(request, check_exists = False, param = "file"): file = request.match_info.get(param, None) if not file: diff --git a/tests-unit/prompt_server_test/user_manager_test.py b/tests-unit/prompt_server_test/user_manager_test.py index 7e523cbf4..b939d8e68 100644 --- a/tests-unit/prompt_server_test/user_manager_test.py +++ b/tests-unit/prompt_server_test/user_manager_test.py @@ -229,3 +229,61 @@ async def test_move_userdata_full_info(aiohttp_client, app, tmp_path): assert not os.path.exists(tmp_path / "source.txt") with open(tmp_path / "dest.txt", "r") as f: assert f.read() == "test content" + + +async def test_listuserdata_v2_empty_root(aiohttp_client, app): + client = await aiohttp_client(app) + resp = await client.get("/v2/userdata") + assert resp.status == 200 + assert await resp.json() == [] + + +async def test_listuserdata_v2_nonexistent_subdirectory(aiohttp_client, app): + client = await aiohttp_client(app) + resp = await client.get("/v2/userdata?path=does_not_exist") + assert resp.status == 404 + + +async def test_listuserdata_v2_default(aiohttp_client, app, tmp_path): + os.makedirs(tmp_path / "test_dir" / "subdir") + (tmp_path / "test_dir" / "file1.txt").write_text("content") + (tmp_path / "test_dir" / "subdir" / "file2.txt").write_text("content") + + client = await aiohttp_client(app) + resp = await client.get("/v2/userdata?path=test_dir") + assert resp.status == 200 + data = await resp.json() + file_paths = {item["path"] for item in data if item["type"] == "file"} + assert file_paths == {"test_dir/file1.txt", "test_dir/subdir/file2.txt"} + + +async def test_listuserdata_v2_normalized_separators(aiohttp_client, app, tmp_path, monkeypatch): + # Force backslash as os separator + monkeypatch.setattr(os, 'sep', '\\') + monkeypatch.setattr(os.path, 'sep', '\\') + os.makedirs(tmp_path / "test_dir" / "subdir") + (tmp_path / "test_dir" / "subdir" / "file1.txt").write_text("x") + + client = await aiohttp_client(app) + resp = await client.get("/v2/userdata?path=test_dir") + assert resp.status == 200 + data = await resp.json() + for item in data: + assert "/" in item["path"] + assert "\\" not in item["path"]\ + +async def test_listuserdata_v2_url_encoded_path(aiohttp_client, app, tmp_path): + # Create a directory with a space in its name and a file inside + os.makedirs(tmp_path / "my dir") + (tmp_path / "my dir" / "file.txt").write_text("content") + + client = await aiohttp_client(app) + # Use URL-encoded space in path parameter + resp = await client.get("/v2/userdata?path=my%20dir&recurse=false") + assert resp.status == 200 + data = await resp.json() + assert len(data) == 1 + entry = data[0] + assert entry["name"] == "file.txt" + # Ensure the path is correctly decoded and uses forward slash + assert entry["path"] == "my dir/file.txt" From cb9ac3db586b675a96ebd2604bf099bc189c1b28 Mon Sep 17 00:00:00 2001 From: Andrew Kvochko Date: Mon, 28 Apr 2025 19:59:17 +0300 Subject: [PATCH 0067/1073] ltxv: add strength parameter to conditioning. (#7849) This commit adds strength parameter to the LTXVImgToVideo node. --- comfy_extras/nodes_lt.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_lt.py b/comfy_extras/nodes_lt.py index ff3fe5cdc..1a667e01a 100644 --- a/comfy_extras/nodes_lt.py +++ b/comfy_extras/nodes_lt.py @@ -38,6 +38,7 @@ class LTXVImgToVideo: "height": ("INT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}), "length": ("INT", {"default": 97, "min": 9, "max": nodes.MAX_RESOLUTION, "step": 8}), "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0}), }} RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") @@ -46,7 +47,7 @@ class LTXVImgToVideo: CATEGORY = "conditioning/video_models" FUNCTION = "generate" - def generate(self, positive, negative, image, vae, width, height, length, batch_size): + def generate(self, positive, negative, image, vae, width, height, length, batch_size, strength): pixels = comfy.utils.common_upscale(image.movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) encode_pixels = pixels[:, :, :, :3] t = vae.encode(encode_pixels) @@ -59,7 +60,7 @@ class LTXVImgToVideo: dtype=torch.float32, device=latent.device, ) - conditioning_latent_frames_mask[:, :, :t.shape[2]] = 0 + conditioning_latent_frames_mask[:, :, :t.shape[2]] = 1.0 - strength return (positive, negative, {"samples": latent, "noise_mask": conditioning_latent_frames_mask}, ) From 30159a7fe6bfa54fc1b9ba4c80b9041837038819 Mon Sep 17 00:00:00 2001 From: Pam <42671363+pamparamm@users.noreply.github.com> Date: Mon, 28 Apr 2025 22:03:21 +0500 Subject: [PATCH 0068/1073] Save v pred zsnr metadata (#7840) --- comfy/model_sampling.py | 3 ++- comfy_extras/nodes_model_merging.py | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index b79af1e92..7e7291476 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -111,13 +111,14 @@ class ModelSamplingDiscrete(torch.nn.Module): self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end + self.zsnr = zsnr # self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32)) # self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) # self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 - if zsnr: + if self.zsnr: sigmas = rescale_zero_terminal_snr_sigmas(sigmas) self.set_sigmas(sigmas) diff --git a/comfy_extras/nodes_model_merging.py b/comfy_extras/nodes_model_merging.py index ccf601158..78d284889 100644 --- a/comfy_extras/nodes_model_merging.py +++ b/comfy_extras/nodes_model_merging.py @@ -209,6 +209,9 @@ def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefi metadata["modelspec.predict_key"] = "epsilon" elif model.model.model_type == comfy.model_base.ModelType.V_PREDICTION: metadata["modelspec.predict_key"] = "v" + extra_keys["v_pred"] = torch.tensor([]) + if getattr(model_sampling, "zsnr", False): + extra_keys["ztsnr"] = torch.tensor([]) if not args.disable_metadata: metadata["prompt"] = prompt_info From 5a50c3c7e59a867ddf0b3b7cc207c877a7b422fb Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 28 Apr 2025 10:07:21 -0700 Subject: [PATCH 0069/1073] Fix stream priority to support older pytorch. (#7856) --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 516b6e2f1..78317af3c 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -962,7 +962,7 @@ def get_offload_stream(device): elif is_device_cuda(device): ss = [] for k in range(NUM_STREAMS): - ss.append(torch.cuda.Stream(device=device, priority=10)) + ss.append(torch.cuda.Stream(device=device, priority=0)) STREAMS[device] = ss s = ss[stream_counter] stream_counter = (stream_counter + 1) % len(ss) From 772b4c594549fb42f70833053be8613d79932965 Mon Sep 17 00:00:00 2001 From: Andrew Kvochko Date: Mon, 28 Apr 2025 20:42:04 +0300 Subject: [PATCH 0070/1073] ltxv: overwrite existing mask on conditioned frame. (#7845) This commit overwrites the noise mask on the latent frame that is being conditioned with keyframe conditioning, setting it to one. --- comfy_extras/nodes_lt.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/comfy_extras/nodes_lt.py b/comfy_extras/nodes_lt.py index 1a667e01a..e6dc122ca 100644 --- a/comfy_extras/nodes_lt.py +++ b/comfy_extras/nodes_lt.py @@ -153,6 +153,15 @@ class LTXVAddGuide: return node_helpers.conditioning_set_values(cond, {"keyframe_idxs": keyframe_idxs}) def append_keyframe(self, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors): + _, latent_idx = self.get_latent_index( + cond=positive, + latent_length=latent_image.shape[2], + guide_length=guiding_latent.shape[2], + frame_idx=frame_idx, + scale_factors=scale_factors, + ) + noise_mask[:, :, latent_idx:latent_idx + guiding_latent.shape[2]] = 1.0 + positive = self.add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors) negative = self.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors) From c15909bb621b31b0ede693f30cc927021e473c72 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Tue, 29 Apr 2025 01:51:35 +0800 Subject: [PATCH 0071/1073] CFG++ for gradient estimation sampler (#7809) --- comfy/k_diffusion/sampling.py | 34 +++++++++++++++++++++++++++++----- comfy/samplers.py | 2 +- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 6388d3faf..77ef748e8 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -1345,28 +1345,52 @@ def sample_res_multistep_ancestral_cfg_pp(model, x, sigmas, extra_args=None, cal return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=eta, cfg_pp=True) @torch.no_grad() -def sample_gradient_estimation(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2.): +def sample_gradient_estimation(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2., cfg_pp=False): """Gradient-estimation sampler. Paper: https://openreview.net/pdf?id=o2ND9v0CeK""" extra_args = {} if extra_args is None else extra_args s_in = x.new_ones([x.shape[0]]) old_d = None + uncond_denoised = None + def post_cfg_function(args): + nonlocal uncond_denoised + uncond_denoised = args["uncond_denoised"] + return args["denoised"] + + if cfg_pp: + model_options = extra_args.get("model_options", {}).copy() + extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True) + for i in trange(len(sigmas) - 1, disable=disable): denoised = model(x, sigmas[i] * s_in, **extra_args) - d = to_d(x, sigmas[i], denoised) + if cfg_pp: + d = to_d(x, sigmas[i], uncond_denoised) + else: + d = to_d(x, sigmas[i], denoised) if callback is not None: callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) dt = sigmas[i + 1] - sigmas[i] if i == 0: # Euler method - x = x + d * dt + if cfg_pp: + x = denoised + d * sigmas[i + 1] + else: + x = x + d * dt else: # Gradient estimation - d_bar = ge_gamma * d + (1 - ge_gamma) * old_d - x = x + d_bar * dt + if cfg_pp: + d_bar = (ge_gamma - 1) * (d - old_d) + x = denoised + d * sigmas[i + 1] + d_bar * dt + else: + d_bar = ge_gamma * d + (1 - ge_gamma) * old_d + x = x + d_bar * dt old_d = d return x +@torch.no_grad() +def sample_gradient_estimation_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2.): + return sample_gradient_estimation(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, ge_gamma=ge_gamma, cfg_pp=True) + @torch.no_grad() def sample_er_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., noise_sampler=None, noise_scaler=None, max_stage=3): """ diff --git a/comfy/samplers.py b/comfy/samplers.py index 27dfce45a..67ae09a25 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -710,7 +710,7 @@ KSAMPLER_NAMES = ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_c "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", - "gradient_estimation", "er_sde", "seeds_2", "seeds_3"] + "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3"] class KSAMPLER(Sampler): def __init__(self, sampler_function, extra_options={}, inpaint_options={}): From 7d329771f9f966fec20aa72079fc39202d877cbd Mon Sep 17 00:00:00 2001 From: Yoland Yan <4950057+yoland68@users.noreply.github.com> Date: Mon, 28 Apr 2025 10:59:22 -0700 Subject: [PATCH 0072/1073] Add moderation level option to OpenAIGPTImage1 node and update api_call method signature (#7804) --- comfy_api_nodes/nodes_api.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py index 4105ba7e1..a977bb9b7 100644 --- a/comfy_api_nodes/nodes_api.py +++ b/comfy_api_nodes/nodes_api.py @@ -1,21 +1,22 @@ +import base64 import io +import math from inspect import cleandoc -from comfy.utils import common_upscale +import numpy as np +import requests +import torch +from PIL import Image + from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict +from comfy.utils import common_upscale from comfy_api_nodes.apis import ( - OpenAIImageGenerationRequest, OpenAIImageEditRequest, - OpenAIImageGenerationResponse + OpenAIImageGenerationRequest, + OpenAIImageGenerationResponse, ) from comfy_api_nodes.apis.client import ApiEndpoint, HttpMethod, SynchronousOperation -import numpy as np -from PIL import Image -import requests -import torch -import math -import base64 def downscale_input(image): samples = image.movedim(-1,1) @@ -331,6 +332,11 @@ class OpenAIGPTImage1(ComfyNodeABC): "default": None, "tooltip": "Optional mask for inpainting (white areas will be replaced)", }), + "moderation": (IO.COMBO, { + "options": ["low","auto"], + "default": "low", + "tooltip": "Moderation level", + }), }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG" @@ -343,7 +349,7 @@ class OpenAIGPTImage1(ComfyNodeABC): DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True - def api_call(self, prompt, seed=0, quality="low", background="opaque", image=None, mask=None, n=1, size="1024x1024", auth_token=None): + def api_call(self, prompt, seed=0, quality="low", background="opaque", image=None, mask=None, n=1, size="1024x1024", auth_token=None, moderation="low"): model = "gpt-image-1" path = "/proxy/openai/images/generations" request_class = OpenAIImageGenerationRequest @@ -415,6 +421,7 @@ class OpenAIGPTImage1(ComfyNodeABC): n=n, seed=seed, size=size, + moderation=moderation, ), files=files if files else None, auth_token=auth_token From 83d04717b6cd84fca7af31ee655f7e5a3585a371 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 28 Apr 2025 12:01:15 -0700 Subject: [PATCH 0073/1073] Support HiDream E1 model. (#7857) --- comfy/ldm/hidream/model.py | 3 +++ comfy/model_base.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/comfy/ldm/hidream/model.py b/comfy/ldm/hidream/model.py index fcb5a9c51..0305747bf 100644 --- a/comfy/ldm/hidream/model.py +++ b/comfy/ldm/hidream/model.py @@ -699,10 +699,13 @@ class HiDreamImageTransformer2DModel(nn.Module): y: Optional[torch.Tensor] = None, context: Optional[torch.Tensor] = None, encoder_hidden_states_llama3=None, + image_cond=None, control = None, transformer_options = {}, ) -> torch.Tensor: bs, c, h, w = x.shape + if image_cond is not None: + x = torch.cat([x, image_cond], dim=-1) hidden_states = comfy.ldm.common_dit.pad_to_patch_size(x, (self.patch_size, self.patch_size)) timesteps = t pooled_embeds = y diff --git a/comfy/model_base.py b/comfy/model_base.py index b0c6a465b..d2aa4ce7a 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1104,4 +1104,7 @@ class HiDream(BaseModel): conditioning_llama3 = kwargs.get("conditioning_llama3", None) if conditioning_llama3 is not None: out['encoder_hidden_states_llama3'] = comfy.conds.CONDRegular(conditioning_llama3) + image_cond = kwargs.get("concat_latent_image", None) + if image_cond is not None: + out['image_cond'] = comfy.conds.CONDNoiseShape(self.process_latent_in(image_cond)) return out From 68f0d3529667a2b34b27cc0ac5051bc0e8c45b49 Mon Sep 17 00:00:00 2001 From: guill Date: Tue, 29 Apr 2025 02:58:00 -0700 Subject: [PATCH 0074/1073] Add support for VIDEO as a built-in type (#7844) * Add basic support for videos as types This PR adds support for VIDEO as first-class types. In order to avoid unnecessary costs, VIDEO outputs must implement the `VideoInput` ABC, but their implementation details can vary. Included are two implementations of this type which can be returned by other nodes: * `VideoFromFile` - Created with either a path on disk (as a string) or a `io.BytesIO` containing the contents of a file in a supported format (like .mp4). This implementation won't actually load the video unless necessary. It will also avoid re-encoding when saving if possible. * `VideoFromComponents` - Created from an image tensor and an optional audio tensor. Currently, only h264 encoded videos in .mp4 containers are supported for saving, but the plan is to add additional encodings/containers in the near future (particularly .webm). * Add optimization to avoid parsing entire video * Improve type declarations to reduce warnings * Make sure bytesIO objects can be read many times * Fix a potential issue when saving long videos * Fix incorrect type annotation * Add a `LoadVideo` node to make testing easier * Refactor new types out of the base comfy folder I've created a new `comfy_api` top-level module. The intention is that anything within this folder would be covered by semver-style versioning that would allow custom nodes to rely on them not introducing breaking changes. * Fix linting issue --- comfy/comfy_types/node_typing.py | 9 +- comfy_api/input/__init__.py | 8 + comfy_api/input/basic_types.py | 20 +++ comfy_api/input/video_types.py | 45 ++++++ comfy_api/input_impl/__init__.py | 7 + comfy_api/input_impl/video_types.py | 224 ++++++++++++++++++++++++++++ comfy_api/util/__init__.py | 8 + comfy_api/util/video_types.py | 51 +++++++ comfy_extras/nodes_video.py | 164 +++++++++++++++++++- folder_paths.py | 4 +- 10 files changed, 532 insertions(+), 8 deletions(-) create mode 100644 comfy_api/input/__init__.py create mode 100644 comfy_api/input/basic_types.py create mode 100644 comfy_api/input/video_types.py create mode 100644 comfy_api/input_impl/__init__.py create mode 100644 comfy_api/input_impl/video_types.py create mode 100644 comfy_api/util/__init__.py create mode 100644 comfy_api/util/video_types.py diff --git a/comfy/comfy_types/node_typing.py b/comfy/comfy_types/node_typing.py index 4ceeb3468..2ffc9c021 100644 --- a/comfy/comfy_types/node_typing.py +++ b/comfy/comfy_types/node_typing.py @@ -48,6 +48,7 @@ class IO(StrEnum): FACE_ANALYSIS = "FACE_ANALYSIS" BBOX = "BBOX" SEGS = "SEGS" + VIDEO = "VIDEO" ANY = "*" """Always matches any type, but at a price. @@ -273,7 +274,7 @@ class ComfyNodeABC(ABC): Comfy Docs: https://docs.comfy.org/custom-nodes/backend/lists#list-processing """ - OUTPUT_IS_LIST: tuple[bool] + OUTPUT_IS_LIST: tuple[bool, ...] """A tuple indicating which node outputs are lists, but will be connected to nodes that expect individual items. Connected nodes that do not implement `INPUT_IS_LIST` will be executed once for every item in the list. @@ -292,7 +293,7 @@ class ComfyNodeABC(ABC): Comfy Docs: https://docs.comfy.org/custom-nodes/backend/lists#list-processing """ - RETURN_TYPES: tuple[IO] + RETURN_TYPES: tuple[IO, ...] """A tuple representing the outputs of this node. Usage:: @@ -301,12 +302,12 @@ class ComfyNodeABC(ABC): Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#return-types """ - RETURN_NAMES: tuple[str] + RETURN_NAMES: tuple[str, ...] """The output slot names for each item in `RETURN_TYPES`, e.g. ``RETURN_NAMES = ("count", "filter_string")`` Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#return-names """ - OUTPUT_TOOLTIPS: tuple[str] + OUTPUT_TOOLTIPS: tuple[str, ...] """A tuple of strings to use as tooltips for node outputs, one for each item in `RETURN_TYPES`.""" FUNCTION: str """The name of the function to execute as a literal string, e.g. `FUNCTION = "execute"` diff --git a/comfy_api/input/__init__.py b/comfy_api/input/__init__.py new file mode 100644 index 000000000..66667946f --- /dev/null +++ b/comfy_api/input/__init__.py @@ -0,0 +1,8 @@ +from .basic_types import ImageInput, AudioInput +from .video_types import VideoInput + +__all__ = [ + "ImageInput", + "AudioInput", + "VideoInput", +] diff --git a/comfy_api/input/basic_types.py b/comfy_api/input/basic_types.py new file mode 100644 index 000000000..033fb7e27 --- /dev/null +++ b/comfy_api/input/basic_types.py @@ -0,0 +1,20 @@ +import torch +from typing import TypedDict + +ImageInput = torch.Tensor +""" +An image in format [B, H, W, C] where B is the batch size, C is the number of channels, +""" + +class AudioInput(TypedDict): + """ + TypedDict representing audio input. + """ + + waveform: torch.Tensor + """ + Tensor in the format [B, C, T] where B is the batch size, C is the number of channels, + """ + + sample_rate: int + diff --git a/comfy_api/input/video_types.py b/comfy_api/input/video_types.py new file mode 100644 index 000000000..0676e0e66 --- /dev/null +++ b/comfy_api/input/video_types.py @@ -0,0 +1,45 @@ +from __future__ import annotations +from abc import ABC, abstractmethod +from typing import Optional +from comfy_api.util import VideoContainer, VideoCodec, VideoComponents + +class VideoInput(ABC): + """ + Abstract base class for video input types. + """ + + @abstractmethod + def get_components(self) -> VideoComponents: + """ + Abstract method to get the video components (images, audio, and frame rate). + + Returns: + VideoComponents containing images, audio, and frame rate + """ + pass + + @abstractmethod + def save_to( + self, + path: str, + format: VideoContainer = VideoContainer.AUTO, + codec: VideoCodec = VideoCodec.AUTO, + metadata: Optional[dict] = None + ): + """ + Abstract method to save the video input to a file. + """ + pass + + # Provide a default implementation, but subclasses can provide optimized versions + # if possible. + def get_dimensions(self) -> tuple[int, int]: + """ + Returns the dimensions of the video input. + + Returns: + Tuple of (width, height) + """ + components = self.get_components() + return components.images.shape[2], components.images.shape[1] + diff --git a/comfy_api/input_impl/__init__.py b/comfy_api/input_impl/__init__.py new file mode 100644 index 000000000..02901b8b9 --- /dev/null +++ b/comfy_api/input_impl/__init__.py @@ -0,0 +1,7 @@ +from .video_types import VideoFromFile, VideoFromComponents + +__all__ = [ + # Implementations + "VideoFromFile", + "VideoFromComponents", +] diff --git a/comfy_api/input_impl/video_types.py b/comfy_api/input_impl/video_types.py new file mode 100644 index 000000000..12e5783db --- /dev/null +++ b/comfy_api/input_impl/video_types.py @@ -0,0 +1,224 @@ +from __future__ import annotations +from av.container import InputContainer +from av.subtitles.stream import SubtitleStream +from fractions import Fraction +from typing import Optional +from comfy_api.input import AudioInput +import av +import io +import json +import numpy as np +import torch +from comfy_api.input import VideoInput +from comfy_api.util import VideoContainer, VideoCodec, VideoComponents + +class VideoFromFile(VideoInput): + """ + Class representing video input from a file. + """ + + def __init__(self, file: str | io.BytesIO): + """ + Initialize the VideoFromFile object based off of either a path on disk or a BytesIO object + containing the file contents. + """ + self.__file = file + + def get_dimensions(self) -> tuple[int, int]: + """ + Returns the dimensions of the video input. + + Returns: + Tuple of (width, height) + """ + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) # Reset the BytesIO object to the beginning + with av.open(self.__file, mode='r') as container: + for stream in container.streams: + if stream.type == 'video': + assert isinstance(stream, av.VideoStream) + return stream.width, stream.height + raise ValueError(f"No video stream found in file '{self.__file}'") + + def get_components_internal(self, container: InputContainer) -> VideoComponents: + # Get video frames + frames = [] + for frame in container.decode(video=0): + img = frame.to_ndarray(format='rgb24') # shape: (H, W, 3) + img = torch.from_numpy(img) / 255.0 # shape: (H, W, 3) + frames.append(img) + + images = torch.stack(frames) if len(frames) > 0 else torch.zeros(0, 3, 0, 0) + + # Get frame rate + video_stream = next(s for s in container.streams if s.type == 'video') + frame_rate = Fraction(video_stream.average_rate) if video_stream and video_stream.average_rate else Fraction(1) + + # Get audio if available + audio = None + try: + container.seek(0) # Reset the container to the beginning + for stream in container.streams: + if stream.type != 'audio': + continue + assert isinstance(stream, av.AudioStream) + audio_frames = [] + for packet in container.demux(stream): + for frame in packet.decode(): + assert isinstance(frame, av.AudioFrame) + audio_frames.append(frame.to_ndarray()) # shape: (channels, samples) + if len(audio_frames) > 0: + audio_data = np.concatenate(audio_frames, axis=1) # shape: (channels, total_samples) + audio_tensor = torch.from_numpy(audio_data).unsqueeze(0) # shape: (1, channels, total_samples) + audio = AudioInput({ + "waveform": audio_tensor, + "sample_rate": int(stream.sample_rate) if stream.sample_rate else 1, + }) + except StopIteration: + pass # No audio stream + + metadata = container.metadata + return VideoComponents(images=images, audio=audio, frame_rate=frame_rate, metadata=metadata) + + def get_components(self) -> VideoComponents: + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) # Reset the BytesIO object to the beginning + with av.open(self.__file, mode='r') as container: + return self.get_components_internal(container) + raise ValueError(f"No video stream found in file '{self.__file}'") + + def save_to( + self, + path: str, + format: VideoContainer = VideoContainer.AUTO, + codec: VideoCodec = VideoCodec.AUTO, + metadata: Optional[dict] = None + ): + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) # Reset the BytesIO object to the beginning + with av.open(self.__file, mode='r') as container: + container_format = container.format.name + video_encoding = container.streams.video[0].codec.name if len(container.streams.video) > 0 else None + reuse_streams = True + if format != VideoContainer.AUTO and format not in container_format.split(","): + reuse_streams = False + if codec != VideoCodec.AUTO and codec != video_encoding and video_encoding is not None: + reuse_streams = False + + if not reuse_streams: + components = self.get_components_internal(container) + video = VideoFromComponents(components) + return video.save_to( + path, + format=format, + codec=codec, + metadata=metadata + ) + + streams = container.streams + with av.open(path, mode='w', options={"movflags": "use_metadata_tags"}) as output_container: + # Copy over the original metadata + for key, value in container.metadata.items(): + if metadata is None or key not in metadata: + output_container.metadata[key] = value + + # Add our new metadata + if metadata is not None: + for key, value in metadata.items(): + if isinstance(value, str): + output_container.metadata[key] = value + else: + output_container.metadata[key] = json.dumps(value) + + # Add streams to the new container + stream_map = {} + for stream in streams: + if isinstance(stream, (av.VideoStream, av.AudioStream, SubtitleStream)): + out_stream = output_container.add_stream_from_template(template=stream, opaque=True) + stream_map[stream] = out_stream + + # Write packets to the new container + for packet in container.demux(): + if packet.stream in stream_map and packet.dts is not None: + packet.stream = stream_map[packet.stream] + output_container.mux(packet) + +class VideoFromComponents(VideoInput): + """ + Class representing video input from tensors. + """ + + def __init__(self, components: VideoComponents): + self.__components = components + + def get_components(self) -> VideoComponents: + return VideoComponents( + images=self.__components.images, + audio=self.__components.audio, + frame_rate=self.__components.frame_rate + ) + + def save_to( + self, + path: str, + format: VideoContainer = VideoContainer.AUTO, + codec: VideoCodec = VideoCodec.AUTO, + metadata: Optional[dict] = None + ): + if format != VideoContainer.AUTO and format != VideoContainer.MP4: + raise ValueError("Only MP4 format is supported for now") + if codec != VideoCodec.AUTO and codec != VideoCodec.H264: + raise ValueError("Only H264 codec is supported for now") + with av.open(path, mode='w', options={'movflags': 'use_metadata_tags'}) as output: + # Add metadata before writing any streams + if metadata is not None: + for key, value in metadata.items(): + output.metadata[key] = json.dumps(value) + + frame_rate = Fraction(round(self.__components.frame_rate * 1000), 1000) + # Create a video stream + video_stream = output.add_stream('h264', rate=frame_rate) + video_stream.width = self.__components.images.shape[2] + video_stream.height = self.__components.images.shape[1] + video_stream.pix_fmt = 'yuv420p' + + # Create an audio stream + audio_sample_rate = 1 + audio_stream: Optional[av.AudioStream] = None + if self.__components.audio: + audio_sample_rate = int(self.__components.audio['sample_rate']) + audio_stream = output.add_stream('aac', rate=audio_sample_rate) + audio_stream.sample_rate = audio_sample_rate + audio_stream.format = 'fltp' + + # Encode video + for i, frame in enumerate(self.__components.images): + img = (frame * 255).clamp(0, 255).byte().cpu().numpy() # shape: (H, W, 3) + frame = av.VideoFrame.from_ndarray(img, format='rgb24') + frame = frame.reformat(format='yuv420p') # Convert to YUV420P as required by h264 + packet = video_stream.encode(frame) + output.mux(packet) + + # Flush video + packet = video_stream.encode(None) + output.mux(packet) + + if audio_stream and self.__components.audio: + # Encode audio + samples_per_frame = int(audio_sample_rate / frame_rate) + num_frames = self.__components.audio['waveform'].shape[2] // samples_per_frame + for i in range(num_frames): + start = i * samples_per_frame + end = start + samples_per_frame + # TODO(Feature) - Add support for stereo audio + chunk = self.__components.audio['waveform'][0, 0, start:end].unsqueeze(0).numpy() + audio_frame = av.AudioFrame.from_ndarray(chunk, format='fltp', layout='mono') + audio_frame.sample_rate = audio_sample_rate + audio_frame.pts = i * samples_per_frame + for packet in audio_stream.encode(audio_frame): + output.mux(packet) + + # Flush audio + for packet in audio_stream.encode(None): + output.mux(packet) + diff --git a/comfy_api/util/__init__.py b/comfy_api/util/__init__.py new file mode 100644 index 000000000..9019c46db --- /dev/null +++ b/comfy_api/util/__init__.py @@ -0,0 +1,8 @@ +from .video_types import VideoContainer, VideoCodec, VideoComponents + +__all__ = [ + # Utility Types + "VideoContainer", + "VideoCodec", + "VideoComponents", +] diff --git a/comfy_api/util/video_types.py b/comfy_api/util/video_types.py new file mode 100644 index 000000000..d09663db9 --- /dev/null +++ b/comfy_api/util/video_types.py @@ -0,0 +1,51 @@ +from __future__ import annotations +from dataclasses import dataclass +from enum import Enum +from fractions import Fraction +from typing import Optional +from comfy_api.input import ImageInput, AudioInput + +class VideoCodec(str, Enum): + AUTO = "auto" + H264 = "h264" + + @classmethod + def as_input(cls) -> list[str]: + """ + Returns a list of codec names that can be used as node input. + """ + return [member.value for member in cls] + +class VideoContainer(str, Enum): + AUTO = "auto" + MP4 = "mp4" + + @classmethod + def as_input(cls) -> list[str]: + """ + Returns a list of container names that can be used as node input. + """ + return [member.value for member in cls] + + @classmethod + def get_extension(cls, value) -> str: + """ + Returns the file extension for the container. + """ + if isinstance(value, str): + value = cls(value) + if value == VideoContainer.MP4 or value == VideoContainer.AUTO: + return "mp4" + return "" + +@dataclass +class VideoComponents: + """ + Dataclass representing the components of a video. + """ + + images: ImageInput + frame_rate: Fraction + audio: Optional[AudioInput] = None + metadata: Optional[dict] = None + diff --git a/comfy_extras/nodes_video.py b/comfy_extras/nodes_video.py index a9e244ebe..61f7171b2 100644 --- a/comfy_extras/nodes_video.py +++ b/comfy_extras/nodes_video.py @@ -5,9 +5,13 @@ import av import torch import folder_paths import json +from typing import Optional, Literal from fractions import Fraction -from comfy.comfy_types import FileLocator - +from comfy.comfy_types import IO, FileLocator, ComfyNodeABC +from comfy_api.input import ImageInput, AudioInput, VideoInput +from comfy_api.util import VideoContainer, VideoCodec, VideoComponents +from comfy_api.input_impl import VideoFromFile, VideoFromComponents +from comfy.cli_args import args class SaveWEBM: def __init__(self): @@ -75,7 +79,163 @@ class SaveWEBM: return {"ui": {"images": results, "animated": (True,)}} # TODO: frontend side +class SaveVideo(ComfyNodeABC): + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type: Literal["output"] = "output" + self.prefix_append = "" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "video": (IO.VIDEO, {"tooltip": "The video to save."}), + "filename_prefix": ("STRING", {"default": "video/ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}), + "format": (VideoContainer.as_input(), {"default": "auto", "tooltip": "The format to save the video as."}), + "codec": (VideoCodec.as_input(), {"default": "auto", "tooltip": "The codec to use for the video."}), + }, + "hidden": { + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO" + }, + } + + RETURN_TYPES = () + FUNCTION = "save_video" + + OUTPUT_NODE = True + + CATEGORY = "image/video" + DESCRIPTION = "Saves the input images to your ComfyUI output directory." + + def save_video(self, video: VideoInput, filename_prefix, format, codec, prompt=None, extra_pnginfo=None): + filename_prefix += self.prefix_append + width, height = video.get_dimensions() + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path( + filename_prefix, + self.output_dir, + width, + height + ) + results: list[FileLocator] = list() + saved_metadata = None + if not args.disable_metadata: + metadata = {} + if extra_pnginfo is not None: + metadata.update(extra_pnginfo) + if prompt is not None: + metadata["prompt"] = prompt + if len(metadata) > 0: + saved_metadata = metadata + file = f"{filename}_{counter:05}_.{VideoContainer.get_extension(format)}" + video.save_to( + os.path.join(full_output_folder, file), + format=format, + codec=codec, + metadata=saved_metadata + ) + + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + counter += 1 + + return { "ui": { "images": results, "animated": (True,) } } + +class CreateVideo(ComfyNodeABC): + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": (IO.IMAGE, {"tooltip": "The images to create a video from."}), + "fps": ("FLOAT", {"default": 30.0, "min": 1.0, "max": 120.0, "step": 1.0}), + }, + "optional": { + "audio": (IO.AUDIO, {"tooltip": "The audio to add to the video."}), + } + } + + RETURN_TYPES = (IO.VIDEO,) + FUNCTION = "create_video" + + CATEGORY = "image/video" + DESCRIPTION = "Create a video from images." + + def create_video(self, images: ImageInput, fps: float, audio: Optional[AudioInput] = None): + return (VideoFromComponents( + VideoComponents( + images=images, + audio=audio, + frame_rate=Fraction(fps), + ) + ),) + +class GetVideoComponents(ComfyNodeABC): + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "video": (IO.VIDEO, {"tooltip": "The video to extract components from."}), + } + } + RETURN_TYPES = (IO.IMAGE, IO.AUDIO, IO.FLOAT) + RETURN_NAMES = ("images", "audio", "fps") + FUNCTION = "get_components" + + CATEGORY = "image/video" + DESCRIPTION = "Extracts all components from a video: frames, audio, and framerate." + + def get_components(self, video: VideoInput): + components = video.get_components() + + return (components.images, components.audio, float(components.frame_rate)) + +class LoadVideo(ComfyNodeABC): + @classmethod + def INPUT_TYPES(cls): + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] + files = folder_paths.filter_files_content_types(files, ["video"]) + return {"required": + {"file": (sorted(files), {"video_upload": True})}, + } + + CATEGORY = "image/video" + + RETURN_TYPES = (IO.VIDEO,) + FUNCTION = "load_video" + def load_video(self, file): + video_path = folder_paths.get_annotated_filepath(file) + return (VideoFromFile(video_path),) + + @classmethod + def IS_CHANGED(cls, file): + video_path = folder_paths.get_annotated_filepath(file) + mod_time = os.path.getmtime(video_path) + # Instead of hashing the file, we can just use the modification time to avoid + # rehashing large files. + return mod_time + + @classmethod + def VALIDATE_INPUTS(cls, file): + if not folder_paths.exists_annotated_filepath(file): + return "Invalid video file: {}".format(file) + + return True NODE_CLASS_MAPPINGS = { "SaveWEBM": SaveWEBM, + "SaveVideo": SaveVideo, + "CreateVideo": CreateVideo, + "GetVideoComponents": GetVideoComponents, + "LoadVideo": LoadVideo, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "SaveVideo": "Save Video", + "CreateVideo": "Create Video", + "GetVideoComponents": "Get Video Components", + "LoadVideo": "Load Video", } diff --git a/folder_paths.py b/folder_paths.py index 9a525e5a1..f0b3fd103 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -4,7 +4,7 @@ import os import time import mimetypes import logging -from typing import Literal +from typing import Literal, List from collections.abc import Collection from comfy.cli_args import args @@ -141,7 +141,7 @@ def get_directory_by_type(type_name: str) -> str | None: return get_input_directory() return None -def filter_files_content_types(files: list[str], content_types: Literal["image", "video", "audio", "model"]) -> list[str]: +def filter_files_content_types(files: list[str], content_types: List[Literal["image", "video", "audio", "model"]]) -> list[str]: """ Example: files = os.listdir(folder_paths.get_input_directory()) From 005a91ce2b6455d7e76f7a5cf2e77dbd020afa1f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 29 Apr 2025 03:29:38 -0700 Subject: [PATCH 0075/1073] Latest desktop and portable should work on blackwell. (#7861) Removed the mention about the cards from the readme. --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 62800bb4f..24a65b942 100644 --- a/README.md +++ b/README.md @@ -149,8 +149,6 @@ Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you If you have trouble extracting it, right click the file -> properties -> unblock -If you have a 50 series Blackwell card like a 5090 or 5080 see [this discussion thread](https://github.com/comfyanonymous/ComfyUI/discussions/6643) - #### How do I share models between another UI and ComfyUI? See the [Config file](extra_model_paths.yaml.example) to set the search paths for models. In the standalone windows build you can find this file in the ComfyUI directory. Rename this file to extra_model_paths.yaml and edit it with your favorite text editor. From 45503f649925fdf8e6ddce85fabee551ee3c446b Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Tue, 29 Apr 2025 06:32:34 -0400 Subject: [PATCH 0076/1073] Add release process section to README (#7855) * Add release process section to README * move * Update README.md --- README.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 24a65b942..0f39cfce2 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,6 @@ Supports all operating systems and GPU types (NVIDIA, AMD, Intel, Apple Silicon, ## [Examples](https://comfyanonymous.github.io/ComfyUI_examples/) See what ComfyUI can do with the [example workflows](https://comfyanonymous.github.io/ComfyUI_examples/). - ## Features - Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything. - Image Models @@ -99,6 +98,23 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith Workflow examples can be found on the [Examples page](https://comfyanonymous.github.io/ComfyUI_examples/) +## Release Process + +ComfyUI follows a weekly release cycle every Friday, with three interconnected repositories: + +1. **[ComfyUI Core](https://github.com/comfyanonymous/ComfyUI)** + - Releases a new stable version (e.g., v0.7.0) + - Serves as the foundation for the desktop release + +2. **[ComfyUI Desktop](https://github.com/Comfy-Org/desktop)** + - Builds a new release using the latest stable core version + - Version numbers match the core release (e.g., Desktop v1.7.0 uses Core v1.7.0) + +3. **[ComfyUI Frontend](https://github.com/Comfy-Org/ComfyUI_frontend)** + - Weekly frontend updates are merged into the core repository + - Features are frozen for the upcoming core release + - Development continues for the next release cycle + ## Shortcuts | Keybind | Explanation | From 5c5457a4ef151f9f855020ff544b4362cdcf1201 Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Tue, 29 Apr 2025 11:28:04 -0400 Subject: [PATCH 0077/1073] support more example folders (#7836) * support more example folders * add warning message --- app/custom_node_manager.py | 43 ++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/app/custom_node_manager.py b/app/custom_node_manager.py index 42b0d75ba..27d85d9ce 100644 --- a/app/custom_node_manager.py +++ b/app/custom_node_manager.py @@ -93,16 +93,20 @@ class CustomNodeManager: def add_routes(self, routes, webapp, loadedModules): + example_workflow_folder_names = ["example_workflows", "example", "examples", "workflow", "workflows"] + @routes.get("/workflow_templates") async def get_workflow_templates(request): """Returns a web response that contains the map of custom_nodes names and their associated workflow templates. The ones without templates are omitted.""" - files = [ - file - for folder in folder_paths.get_folder_paths("custom_nodes") - for file in glob.glob( - os.path.join(folder, "*/example_workflows/*.json") - ) - ] + + files = [] + + for folder in folder_paths.get_folder_paths("custom_nodes"): + for folder_name in example_workflow_folder_names: + pattern = os.path.join(folder, f"*/{folder_name}/*.json") + matched_files = glob.glob(pattern) + files.extend(matched_files) + workflow_templates_dict = ( {} ) # custom_nodes folder name -> example workflow names @@ -118,15 +122,22 @@ class CustomNodeManager: # Serve workflow templates from custom nodes. for module_name, module_dir in loadedModules: - workflows_dir = os.path.join(module_dir, "example_workflows") - if os.path.exists(workflows_dir): - webapp.add_routes( - [ - web.static( - "/api/workflow_templates/" + module_name, workflows_dir - ) - ] - ) + for folder_name in example_workflow_folder_names: + workflows_dir = os.path.join(module_dir, folder_name) + + if os.path.exists(workflows_dir): + if folder_name != "example_workflows": + logging.warning( + "WARNING: Found example workflow folder '%s' for custom node '%s', consider renaming it to 'example_workflows'", + folder_name, module_name) + + webapp.add_routes( + [ + web.static( + "/api/workflow_templates/" + module_name, workflows_dir + ) + ] + ) @routes.get("/i18n") async def get_i18n(request): From 0a66d4b0afe4a78a200809b7d1d3beec6c6a2a8f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 29 Apr 2025 17:28:52 -0700 Subject: [PATCH 0078/1073] Per device stream counters for async offload. (#7873) --- comfy/model_management.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 78317af3c..44aff3762 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -946,9 +946,9 @@ if args.async_offload: NUM_STREAMS = 2 logging.info("Using async weight offloading with {} streams".format(NUM_STREAMS)) -stream_counter = 0 +stream_counters = {} def get_offload_stream(device): - global stream_counter + stream_counter = stream_counters.get(device, 0) if NUM_STREAMS <= 1: return None @@ -958,6 +958,7 @@ def get_offload_stream(device): stream_counter = (stream_counter + 1) % len(ss) if is_device_cuda(device): ss[stream_counter].wait_stream(torch.cuda.current_stream()) + stream_counters[device] = stream_counter return s elif is_device_cuda(device): ss = [] @@ -966,6 +967,7 @@ def get_offload_stream(device): STREAMS[device] = ss s = ss[stream_counter] stream_counter = (stream_counter + 1) % len(ss) + stream_counters[device] = stream_counter return s return None From 7ee96455e2ed29293aa6076db9b4866862d41142 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 29 Apr 2025 17:38:45 -0700 Subject: [PATCH 0079/1073] Bump minimum pyav version to 14.2.0 (#7874) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 10cc177af..f64a05947 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,5 +22,5 @@ psutil kornia>=0.7.1 spandrel soundfile -av>=14.1.0 +av>=14.2.0 pydantic~=2.0 From dbc726f80c9ac0512d2611fad63d984b8c03886f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 29 Apr 2025 17:42:00 -0700 Subject: [PATCH 0080/1073] Better vace memory estimation. (#7875) --- comfy/ldm/wan/model.py | 1 + comfy/supported_models.py | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index b8eec3afb..66bee7480 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -631,6 +631,7 @@ class VaceWanModel(WanModel): if ii is not None: c_skip, c = self.vace_blocks[ii](c, x=x_orig, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) x += c_skip * vace_strength + del c_skip # head x = self.head(x, e) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 5e55035cf..69bcee1f7 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -993,6 +993,10 @@ class WAN21_Vace(WAN21_T2V): "model_type": "vace", } + def __init__(self, unet_config): + super().__init__(unet_config) + self.memory_usage_factor = 1.2 * self.memory_usage_factor + def get_model(self, state_dict, prefix="", device=None): out = model_base.WAN21_Vace(self, image_to_video=False, device=device) return out From b1c7291569daaeec34ccf6df25d57886eb73d98e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 30 Apr 2025 11:18:20 -0700 Subject: [PATCH 0081/1073] Test updater in the windows release workflow. (#7886) --- .github/workflows/windows_release_package.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/windows_release_package.yml b/.github/workflows/windows_release_package.yml index 80a45b321..3926a65f3 100644 --- a/.github/workflows/windows_release_package.yml +++ b/.github/workflows/windows_release_package.yml @@ -88,6 +88,8 @@ jobs: cd ComfyUI_windows_portable python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu + python_embeded/python.exe -s ./update/update.py ComfyUI/ + ls - name: Upload binaries to release From 39c27a37052b5e18850a6c63cd0a4b92131db3ba Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 30 Apr 2025 11:42:18 -0700 Subject: [PATCH 0082/1073] Add updater test to stable release workflow. (#7887) --- .github/workflows/stable-release.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index c4302cdd6..a046ff9ea 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -91,6 +91,8 @@ jobs: cd ComfyUI_windows_portable python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu + python_embeded/python.exe -s ./update/update.py ComfyUI/ + ls - name: Upload binaries to release From 4ca3d842774421968cc19c319ee96daa58a98fbb Mon Sep 17 00:00:00 2001 From: Silver <65376327+silveroxides@users.noreply.github.com> Date: Thu, 1 May 2025 02:57:00 +0200 Subject: [PATCH 0083/1073] Support for Chroma - Flux1 Schnell distilled with CFG (#7355) * Upload files for Chroma Implementation * Remove trailing whitespace * trim more trailing whitespace..oops * remove unused imports * Add supported_inference_dtypes * Set min_length to 0 and remove attention_mask=True * Set min_length to 1 * get_mdulations added from blepping and minor changes * Add lora conversion if statement in lora.py * Update supported_models.py * update model_base.py * add uptream commits * set modelType.FLOW, will cause beta scheduler to work properly * Adjust memory usage factor and remove unnecessary code * fix mistake * reduce code duplication * remove unused imports * refactor for upstream sync * sync chroma-support with upstream via syncbranch patch * Update sd.py * Add Chroma as option for the OptimalStepsScheduler node --- comfy/ldm/chroma/layers.py | 183 +++++++++++++++++++ comfy/ldm/chroma/math.py | 44 +++++ comfy/ldm/chroma/model.py | 271 +++++++++++++++++++++++++++++ comfy/lora.py | 2 +- comfy/model_base.py | 62 +++++++ comfy/model_detection.py | 26 +++ comfy/sd.py | 5 + comfy/supported_models.py | 30 +++- comfy/text_encoders/chroma.py | 43 +++++ comfy_extras/nodes_optimalsteps.py | 3 +- nodes.py | 2 +- 11 files changed, 667 insertions(+), 4 deletions(-) create mode 100644 comfy/ldm/chroma/layers.py create mode 100644 comfy/ldm/chroma/math.py create mode 100644 comfy/ldm/chroma/model.py create mode 100644 comfy/text_encoders/chroma.py diff --git a/comfy/ldm/chroma/layers.py b/comfy/ldm/chroma/layers.py new file mode 100644 index 000000000..dd0b72f70 --- /dev/null +++ b/comfy/ldm/chroma/layers.py @@ -0,0 +1,183 @@ +import torch +from torch import Tensor, nn + +from .math import attention +from comfy.ldm.flux.layers import ( + MLPEmbedder, + RMSNorm, + QKNorm, + SelfAttention, + ModulationOut, +) + + + +class ChromaModulationOut(ModulationOut): + @classmethod + def from_offset(cls, tensor: torch.Tensor, offset: int = 0) -> ModulationOut: + return cls( + shift=tensor[:, offset : offset + 1, :], + scale=tensor[:, offset + 1 : offset + 2, :], + gate=tensor[:, offset + 2 : offset + 3, :], + ) + + + + +class Approximator(nn.Module): + def __init__(self, in_dim: int, out_dim: int, hidden_dim: int, n_layers = 5, dtype=None, device=None, operations=None): + super().__init__() + self.in_proj = operations.Linear(in_dim, hidden_dim, bias=True, dtype=dtype, device=device) + self.layers = nn.ModuleList([MLPEmbedder(hidden_dim, hidden_dim, dtype=dtype, device=device, operations=operations) for x in range( n_layers)]) + self.norms = nn.ModuleList([RMSNorm(hidden_dim, dtype=dtype, device=device, operations=operations) for x in range( n_layers)]) + self.out_proj = operations.Linear(hidden_dim, out_dim, dtype=dtype, device=device) + + @property + def device(self): + # Get the device of the module (assumes all parameters are on the same device) + return next(self.parameters()).device + + def forward(self, x: Tensor) -> Tensor: + x = self.in_proj(x) + + for layer, norms in zip(self.layers, self.norms): + x = x + layer(norms(x)) + + x = self.out_proj(x) + + return x + + +class DoubleStreamBlock(nn.Module): + def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False, flipped_img_txt=False, dtype=None, device=None, operations=None): + super().__init__() + + mlp_hidden_dim = int(hidden_size * mlp_ratio) + self.num_heads = num_heads + self.hidden_size = hidden_size + self.img_norm1 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) + self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias, dtype=dtype, device=device, operations=operations) + + self.img_norm2 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) + self.img_mlp = nn.Sequential( + operations.Linear(hidden_size, mlp_hidden_dim, bias=True, dtype=dtype, device=device), + nn.GELU(approximate="tanh"), + operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), + ) + + self.txt_norm1 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) + self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias, dtype=dtype, device=device, operations=operations) + + self.txt_norm2 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) + self.txt_mlp = nn.Sequential( + operations.Linear(hidden_size, mlp_hidden_dim, bias=True, dtype=dtype, device=device), + nn.GELU(approximate="tanh"), + operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), + ) + self.flipped_img_txt = flipped_img_txt + + def forward(self, img: Tensor, txt: Tensor, pe: Tensor, vec: Tensor, attn_mask=None): + (img_mod1, img_mod2), (txt_mod1, txt_mod2) = vec + + # prepare image for attention + img_modulated = self.img_norm1(img) + img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift + img_qkv = self.img_attn.qkv(img_modulated) + img_q, img_k, img_v = img_qkv.view(img_qkv.shape[0], img_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + img_q, img_k = self.img_attn.norm(img_q, img_k, img_v) + + # prepare txt for attention + txt_modulated = self.txt_norm1(txt) + txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift + txt_qkv = self.txt_attn.qkv(txt_modulated) + txt_q, txt_k, txt_v = txt_qkv.view(txt_qkv.shape[0], txt_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v) + + # run actual attention + attn = attention(torch.cat((txt_q, img_q), dim=2), + torch.cat((txt_k, img_k), dim=2), + torch.cat((txt_v, img_v), dim=2), + pe=pe, mask=attn_mask) + + txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :] + + # calculate the img bloks + img = img + img_mod1.gate * self.img_attn.proj(img_attn) + img = img + img_mod2.gate * self.img_mlp((1 + img_mod2.scale) * self.img_norm2(img) + img_mod2.shift) + + # calculate the txt bloks + txt += txt_mod1.gate * self.txt_attn.proj(txt_attn) + txt += txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift) + + if txt.dtype == torch.float16: + txt = torch.nan_to_num(txt, nan=0.0, posinf=65504, neginf=-65504) + + return img, txt + + +class SingleStreamBlock(nn.Module): + """ + A DiT block with parallel linear layers as described in + https://arxiv.org/abs/2302.05442 and adapted modulation interface. + """ + + def __init__( + self, + hidden_size: int, + num_heads: int, + mlp_ratio: float = 4.0, + qk_scale: float = None, + dtype=None, + device=None, + operations=None + ): + super().__init__() + self.hidden_dim = hidden_size + self.num_heads = num_heads + head_dim = hidden_size // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.mlp_hidden_dim = int(hidden_size * mlp_ratio) + # qkv and mlp_in + self.linear1 = operations.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim, dtype=dtype, device=device) + # proj and mlp_out + self.linear2 = operations.Linear(hidden_size + self.mlp_hidden_dim, hidden_size, dtype=dtype, device=device) + + self.norm = QKNorm(head_dim, dtype=dtype, device=device, operations=operations) + + self.hidden_size = hidden_size + self.pre_norm = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) + + self.mlp_act = nn.GELU(approximate="tanh") + + def forward(self, x: Tensor, pe: Tensor, vec: Tensor, attn_mask=None) -> Tensor: + mod = vec + x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift + qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1) + + q, k, v = qkv.view(qkv.shape[0], qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k = self.norm(q, k, v) + + # compute attention + attn = attention(q, k, v, pe=pe, mask=attn_mask) + # compute activation in mlp stream, cat again and run second linear layer + output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2)) + x += mod.gate * output + if x.dtype == torch.float16: + x = torch.nan_to_num(x, nan=0.0, posinf=65504, neginf=-65504) + return x + + +class LastLayer(nn.Module): + def __init__(self, hidden_size: int, patch_size: int, out_channels: int, dtype=None, device=None, operations=None): + super().__init__() + self.norm_final = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) + self.linear = operations.Linear(hidden_size, out_channels, bias=True, dtype=dtype, device=device) + + def forward(self, x: Tensor, vec: Tensor) -> Tensor: + shift, scale = vec + shift = shift.squeeze(1) + scale = scale.squeeze(1) + x = (1 + scale[:, None, :]) * self.norm_final(x) + shift[:, None, :] + x = self.linear(x) + return x diff --git a/comfy/ldm/chroma/math.py b/comfy/ldm/chroma/math.py new file mode 100644 index 000000000..36b67931c --- /dev/null +++ b/comfy/ldm/chroma/math.py @@ -0,0 +1,44 @@ +import torch +from einops import rearrange +from torch import Tensor + +from comfy.ldm.modules.attention import optimized_attention +import comfy.model_management + + +def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None) -> Tensor: + q_shape = q.shape + k_shape = k.shape + + q = q.float().reshape(*q.shape[:-1], -1, 1, 2) + k = k.float().reshape(*k.shape[:-1], -1, 1, 2) + q = (pe[..., 0] * q[..., 0] + pe[..., 1] * q[..., 1]).reshape(*q_shape).type_as(v) + k = (pe[..., 0] * k[..., 0] + pe[..., 1] * k[..., 1]).reshape(*k_shape).type_as(v) + + heads = q.shape[1] + x = optimized_attention(q, k, v, heads, skip_reshape=True, mask=mask) + return x + + +def rope(pos: Tensor, dim: int, theta: int) -> Tensor: + assert dim % 2 == 0 + if comfy.model_management.is_device_mps(pos.device) or comfy.model_management.is_intel_xpu() or comfy.model_management.is_directml_enabled(): + device = torch.device("cpu") + else: + device = pos.device + + scale = torch.linspace(0, (dim - 2) / dim, steps=dim//2, dtype=torch.float64, device=device) + omega = 1.0 / (theta**scale) + out = torch.einsum("...n,d->...nd", pos.to(dtype=torch.float32, device=device), omega) + out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1) + out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2) + return out.to(dtype=torch.float32, device=pos.device) + + +def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor): + xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2) + xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2) + xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1] + xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1] + return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk) + diff --git a/comfy/ldm/chroma/model.py b/comfy/ldm/chroma/model.py new file mode 100644 index 000000000..636748fc5 --- /dev/null +++ b/comfy/ldm/chroma/model.py @@ -0,0 +1,271 @@ +#Original code can be found on: https://github.com/black-forest-labs/flux + +from dataclasses import dataclass + +import torch +from torch import Tensor, nn +from einops import rearrange, repeat +import comfy.ldm.common_dit + +from comfy.ldm.flux.layers import ( + EmbedND, + timestep_embedding, +) + +from .layers import ( + DoubleStreamBlock, + LastLayer, + SingleStreamBlock, + Approximator, + ChromaModulationOut, +) + + +@dataclass +class ChromaParams: + in_channels: int + out_channels: int + context_in_dim: int + hidden_size: int + mlp_ratio: float + num_heads: int + depth: int + depth_single_blocks: int + axes_dim: list + theta: int + patch_size: int + qkv_bias: bool + in_dim: int + out_dim: int + hidden_dim: int + n_layers: int + + + + +class Chroma(nn.Module): + """ + Transformer model for flow matching on sequences. + """ + + def __init__(self, image_model=None, final_layer=True, dtype=None, device=None, operations=None, **kwargs): + super().__init__() + self.dtype = dtype + params = ChromaParams(**kwargs) + self.params = params + self.patch_size = params.patch_size + self.in_channels = params.in_channels + self.out_channels = params.out_channels + if params.hidden_size % params.num_heads != 0: + raise ValueError( + f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}" + ) + pe_dim = params.hidden_size // params.num_heads + if sum(params.axes_dim) != pe_dim: + raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}") + self.hidden_size = params.hidden_size + self.num_heads = params.num_heads + self.in_dim = params.in_dim + self.out_dim = params.out_dim + self.hidden_dim = params.hidden_dim + self.n_layers = params.n_layers + self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim) + self.img_in = operations.Linear(self.in_channels, self.hidden_size, bias=True, dtype=dtype, device=device) + self.txt_in = operations.Linear(params.context_in_dim, self.hidden_size, dtype=dtype, device=device) + # set as nn identity for now, will overwrite it later. + self.distilled_guidance_layer = Approximator( + in_dim=self.in_dim, + hidden_dim=self.hidden_dim, + out_dim=self.out_dim, + n_layers=self.n_layers, + dtype=dtype, device=device, operations=operations + ) + + + self.double_blocks = nn.ModuleList( + [ + DoubleStreamBlock( + self.hidden_size, + self.num_heads, + mlp_ratio=params.mlp_ratio, + qkv_bias=params.qkv_bias, + dtype=dtype, device=device, operations=operations + ) + for _ in range(params.depth) + ] + ) + + self.single_blocks = nn.ModuleList( + [ + SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio, dtype=dtype, device=device, operations=operations) + for _ in range(params.depth_single_blocks) + ] + ) + + if final_layer: + self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels, dtype=dtype, device=device, operations=operations) + + self.skip_mmdit = [] + self.skip_dit = [] + self.lite = False + + def get_modulations(self, tensor: torch.Tensor, block_type: str, *, idx: int = 0): + # This function slices up the modulations tensor which has the following layout: + # single : num_single_blocks * 3 elements + # double_img : num_double_blocks * 6 elements + # double_txt : num_double_blocks * 6 elements + # final : 2 elements + if block_type == "final": + return (tensor[:, -2:-1, :], tensor[:, -1:, :]) + single_block_count = self.params.depth_single_blocks + double_block_count = self.params.depth + offset = 3 * idx + if block_type == "single": + return ChromaModulationOut.from_offset(tensor, offset) + # Double block modulations are 6 elements so we double 3 * idx. + offset *= 2 + if block_type in {"double_img", "double_txt"}: + # Advance past the single block modulations. + offset += 3 * single_block_count + if block_type == "double_txt": + # Advance past the double block img modulations. + offset += 6 * double_block_count + return ( + ChromaModulationOut.from_offset(tensor, offset), + ChromaModulationOut.from_offset(tensor, offset + 3), + ) + raise ValueError("Bad block_type") + + + def forward_orig( + self, + img: Tensor, + img_ids: Tensor, + txt: Tensor, + txt_ids: Tensor, + timesteps: Tensor, + guidance: Tensor = None, + control = None, + transformer_options={}, + attn_mask: Tensor = None, + ) -> Tensor: + patches_replace = transformer_options.get("patches_replace", {}) + if img.ndim != 3 or txt.ndim != 3: + raise ValueError("Input img and txt tensors must have 3 dimensions.") + + # running on sequences img + img = self.img_in(img) + + # distilled vector guidance + mod_index_length = 344 + distill_timestep = timestep_embedding(timesteps.detach().clone(), 16).to(img.device, img.dtype) + # guidance = guidance * + distil_guidance = timestep_embedding(guidance.detach().clone(), 16).to(img.device, img.dtype) + + # get all modulation index + modulation_index = timestep_embedding(torch.arange(mod_index_length), 32).to(img.device, img.dtype) + # we need to broadcast the modulation index here so each batch has all of the index + modulation_index = modulation_index.unsqueeze(0).repeat(img.shape[0], 1, 1).to(img.device, img.dtype) + # and we need to broadcast timestep and guidance along too + timestep_guidance = torch.cat([distill_timestep, distil_guidance], dim=1).unsqueeze(1).repeat(1, mod_index_length, 1).to(img.dtype).to(img.device, img.dtype) + # then and only then we could concatenate it together + input_vec = torch.cat([timestep_guidance, modulation_index], dim=-1).to(img.device, img.dtype) + + mod_vectors = self.distilled_guidance_layer(input_vec) + + txt = self.txt_in(txt) + + ids = torch.cat((txt_ids, img_ids), dim=1) + pe = self.pe_embedder(ids) + + blocks_replace = patches_replace.get("dit", {}) + for i, block in enumerate(self.double_blocks): + if i not in self.skip_mmdit: + double_mod = ( + self.get_modulations(mod_vectors, "double_img", idx=i), + self.get_modulations(mod_vectors, "double_txt", idx=i), + ) + if ("double_block", i) in blocks_replace: + def block_wrap(args): + out = {} + out["img"], out["txt"] = block(img=args["img"], + txt=args["txt"], + vec=args["vec"], + pe=args["pe"], + attn_mask=args.get("attn_mask")) + return out + + out = blocks_replace[("double_block", i)]({"img": img, + "txt": txt, + "vec": double_mod, + "pe": pe, + "attn_mask": attn_mask}, + {"original_block": block_wrap}) + txt = out["txt"] + img = out["img"] + else: + img, txt = block(img=img, + txt=txt, + vec=double_mod, + pe=pe, + attn_mask=attn_mask) + + if control is not None: # Controlnet + control_i = control.get("input") + if i < len(control_i): + add = control_i[i] + if add is not None: + img += add + + img = torch.cat((txt, img), 1) + + for i, block in enumerate(self.single_blocks): + if i not in self.skip_dit: + single_mod = self.get_modulations(mod_vectors, "single", idx=i) + if ("single_block", i) in blocks_replace: + def block_wrap(args): + out = {} + out["img"] = block(args["img"], + vec=args["vec"], + pe=args["pe"], + attn_mask=args.get("attn_mask")) + return out + + out = blocks_replace[("single_block", i)]({"img": img, + "vec": single_mod, + "pe": pe, + "attn_mask": attn_mask}, + {"original_block": block_wrap}) + img = out["img"] + else: + img = block(img, vec=single_mod, pe=pe, attn_mask=attn_mask) + + if control is not None: # Controlnet + control_o = control.get("output") + if i < len(control_o): + add = control_o[i] + if add is not None: + img[:, txt.shape[1] :, ...] += add + + img = img[:, txt.shape[1] :, ...] + final_mod = self.get_modulations(mod_vectors, "final") + img = self.final_layer(img, vec=final_mod) # (N, T, patch_size ** 2 * out_channels) + return img + + def forward(self, x, timestep, context, guidance, control=None, transformer_options={}, **kwargs): + bs, c, h, w = x.shape + patch_size = 2 + x = comfy.ldm.common_dit.pad_to_patch_size(x, (patch_size, patch_size)) + + img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size) + + h_len = ((h + (patch_size // 2)) // patch_size) + w_len = ((w + (patch_size // 2)) // patch_size) + img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) + img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) + img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) + img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs) + + txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype) + out = self.forward_orig(img, img_ids, context, txt_ids, timestep, guidance, control, transformer_options, attn_mask=kwargs.get("attention_mask", None)) + return rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2)[:,:,:h,:w] diff --git a/comfy/lora.py b/comfy/lora.py index fff524be2..dbabd3335 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -252,7 +252,7 @@ def model_lora_keys_unet(model, key_map={}): key_lora = k[len("diffusion_model."):-len(".weight")] key_map["base_model.model.{}".format(key_lora)] = k #official hunyuan lora format - if isinstance(model, comfy.model_base.Flux): #Diffusers lora Flux + if isinstance(model, comfy.model_base.Flux) or isinstance(model, comfy.model_base.Chroma): #Diffusers lora Flux or a diffusers lora Chroma diffusers_keys = comfy.utils.flux_to_diffusers(model.model_config.unet_config, output_prefix="diffusion_model.") for k in diffusers_keys: if k.endswith(".weight"): diff --git a/comfy/model_base.py b/comfy/model_base.py index d2aa4ce7a..1a06bb503 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -38,6 +38,7 @@ import comfy.ldm.lumina.model import comfy.ldm.wan.model import comfy.ldm.hunyuan3d.model import comfy.ldm.hidream.model +import comfy.ldm.chroma.model import comfy.model_management import comfy.patcher_extension @@ -1108,3 +1109,64 @@ class HiDream(BaseModel): if image_cond is not None: out['image_cond'] = comfy.conds.CONDNoiseShape(self.process_latent_in(image_cond)) return out + +class Chroma(BaseModel): + def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.chroma.model.Chroma) + + def concat_cond(self, **kwargs): + try: + #Handle Flux control loras dynamically changing the img_in weight. + num_channels = self.diffusion_model.img_in.weight.shape[1] + except: + #Some cases like tensorrt might not have the weights accessible + num_channels = self.model_config.unet_config["in_channels"] + + out_channels = self.model_config.unet_config["out_channels"] + + if num_channels <= out_channels: + return None + + image = kwargs.get("concat_latent_image", None) + noise = kwargs.get("noise", None) + device = kwargs["device"] + + if image is None: + image = torch.zeros_like(noise) + + image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") + image = utils.resize_to_batch_size(image, noise.shape[0]) + image = self.process_latent_in(image) + if num_channels <= out_channels * 2: + return image + + #inpaint model + mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None)) + if mask is None: + mask = torch.ones_like(noise)[:, :1] + + mask = torch.mean(mask, dim=1, keepdim=True) + mask = utils.common_upscale(mask.to(device), noise.shape[-1] * 8, noise.shape[-2] * 8, "bilinear", "center") + mask = mask.view(mask.shape[0], mask.shape[2] // 8, 8, mask.shape[3] // 8, 8).permute(0, 2, 4, 1, 3).reshape(mask.shape[0], -1, mask.shape[2] // 8, mask.shape[3] // 8) + mask = utils.resize_to_batch_size(mask, noise.shape[0]) + return torch.cat((image, mask), dim=1) + + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + # upscale the attention mask, since now we + attention_mask = kwargs.get("attention_mask", None) + if attention_mask is not None: + shape = kwargs["noise"].shape + mask_ref_size = kwargs["attention_mask_img_shape"] + # the model will pad to the patch size, and then divide + # essentially dividing and rounding up + (h_tok, w_tok) = (math.ceil(shape[2] / self.diffusion_model.patch_size), math.ceil(shape[3] / self.diffusion_model.patch_size)) + attention_mask = utils.upscale_dit_mask(attention_mask, mask_ref_size, (h_tok, w_tok)) + out['attention_mask'] = comfy.conds.CONDRegular(attention_mask) + guidance = 0.0 + out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor((guidance,))) + return out diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 76de78a8a..daf6d04e7 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -154,6 +154,32 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["guidance_embed"] = len(guidance_keys) > 0 return dit_config + if '{}distilled_guidance_layer.0.norms.0.scale'.format(key_prefix) in state_dict_keys or '{}distilled_guidance_layer.norms.0.scale'.format(key_prefix) in state_dict_keys: #Chroma + dit_config = {} + dit_config["image_model"] = "chroma" + dit_config["depth"] = 48 + dit_config["in_channels"] = 64 + patch_size = 2 + dit_config["patch_size"] = patch_size + in_key = "{}img_in.weight".format(key_prefix) + if in_key in state_dict_keys: + dit_config["in_channels"] = state_dict[in_key].shape[1] + dit_config["out_channels"] = 64 + dit_config["context_in_dim"] = 4096 + dit_config["hidden_size"] = 3072 + dit_config["mlp_ratio"] = 4.0 + dit_config["num_heads"] = 24 + dit_config["depth"] = count_blocks(state_dict_keys, '{}double_blocks.'.format(key_prefix) + '{}.') + dit_config["depth_single_blocks"] = count_blocks(state_dict_keys, '{}single_blocks.'.format(key_prefix) + '{}.') + dit_config["axes_dim"] = [16, 56, 56] + dit_config["theta"] = 10000 + dit_config["qkv_bias"] = True + dit_config["in_dim"] = 64 + dit_config["out_dim"] = 3072 + dit_config["hidden_dim"] = 5120 + dit_config["n_layers"] = 5 + return dit_config + if '{}double_blocks.0.img_attn.norm.key_norm.scale'.format(key_prefix) in state_dict_keys and '{}img_in.weight'.format(key_prefix) in state_dict_keys: #Flux dit_config = {} dit_config["image_model"] = "flux" diff --git a/comfy/sd.py b/comfy/sd.py index 748f6c1ec..454b5929a 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -42,6 +42,7 @@ import comfy.text_encoders.cosmos import comfy.text_encoders.lumina2 import comfy.text_encoders.wan import comfy.text_encoders.hidream +import comfy.text_encoders.chroma import comfy.model_patcher import comfy.lora @@ -714,6 +715,7 @@ class CLIPType(Enum): LUMINA2 = 12 WAN = 13 HIDREAM = 14 + CHROMA = 15 def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}): @@ -829,6 +831,9 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.hidream.hidream_clip(**t5xxl_detect(clip_data), clip_l=False, clip_g=False, t5=True, llama=False, dtype_llama=None, llama_scaled_fp8=None) clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer + elif clip_type == CLIPType.CHROMA: + clip_target.clip = comfy.text_encoders.chroma.chroma_te(**t5xxl_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.chroma.ChromaT5Tokenizer else: #CLIPType.MOCHI clip_target.clip = comfy.text_encoders.genmo.mochi_te(**t5xxl_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.genmo.MochiT5Tokenizer diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 69bcee1f7..f03f2790e 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -17,6 +17,7 @@ import comfy.text_encoders.hunyuan_video import comfy.text_encoders.cosmos import comfy.text_encoders.lumina2 import comfy.text_encoders.wan +import comfy.text_encoders.chroma from . import supported_models_base from . import latent_formats @@ -1068,7 +1069,34 @@ class HiDream(supported_models_base.BASE): def clip_target(self, state_dict={}): return None # TODO +class Chroma(supported_models_base.BASE): + unet_config = { + "image_model": "chroma", + } -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream] + unet_extra_config = { + } + + sampling_settings = { + "multiplier": 1.0, + } + + latent_format = comfy.latent_formats.Flux + + memory_usage_factor = 3.2 + + supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32] + + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.Chroma(self, device=device) + return out + + def clip_target(self, state_dict={}): + pref = self.text_encoder_key_prefix[0] + t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref)) + return supported_models_base.ClipTarget(comfy.text_encoders.chroma.ChromaTokenizer, comfy.text_encoders.chroma.chroma_te(**t5_detect)) + +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma] models += [SVD_img2vid] diff --git a/comfy/text_encoders/chroma.py b/comfy/text_encoders/chroma.py new file mode 100644 index 000000000..aa8dffb25 --- /dev/null +++ b/comfy/text_encoders/chroma.py @@ -0,0 +1,43 @@ +from comfy import sd1_clip +import comfy.text_encoders.t5 +import os +from transformers import T5TokenizerFast + + +class T5XXLModel(sd1_clip.SDClipModel): + def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=False, model_options={}): + textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_config_xxl.json") + t5xxl_scaled_fp8 = model_options.get("t5xxl_scaled_fp8", None) + if t5xxl_scaled_fp8 is not None: + model_options = model_options.copy() + model_options["scaled_fp8"] = t5xxl_scaled_fp8 + + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) + + +class ChromaT5XXL(sd1_clip.SD1ClipModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + super().__init__(device=device, dtype=dtype, name="t5xxl", clip_model=T5XXLModel, model_options=model_options) + + +class T5XXLTokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") + super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_data=tokenizer_data) + + +class ChromaT5Tokenizer(sd1_clip.SD1Tokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="t5xxl", tokenizer=T5XXLTokenizer) + + +def chroma_te(dtype_t5=None, t5xxl_scaled_fp8=None): + class ChromaTEModel_(ChromaT5XXL): + def __init__(self, device="cpu", dtype=None, model_options={}): + if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options: + model_options = model_options.copy() + model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8 + if dtype is None: + dtype = dtype_t5 + super().__init__(device=device, dtype=dtype, model_options=model_options) + return ChromaTEModel_ diff --git a/comfy_extras/nodes_optimalsteps.py b/comfy_extras/nodes_optimalsteps.py index f6928199b..102958734 100644 --- a/comfy_extras/nodes_optimalsteps.py +++ b/comfy_extras/nodes_optimalsteps.py @@ -20,13 +20,14 @@ def loglinear_interp(t_steps, num_steps): NOISE_LEVELS = {"FLUX": [0.9968, 0.9886, 0.9819, 0.975, 0.966, 0.9471, 0.9158, 0.8287, 0.5512, 0.2808, 0.001], "Wan":[1.0, 0.997, 0.995, 0.993, 0.991, 0.989, 0.987, 0.985, 0.98, 0.975, 0.973, 0.968, 0.96, 0.946, 0.927, 0.902, 0.864, 0.776, 0.539, 0.208, 0.001], +"Chroma": [0.9919999837875366, 0.9900000095367432, 0.9879999756813049, 0.9850000143051147, 0.9819999933242798, 0.9779999852180481, 0.9729999899864197, 0.9679999947547913, 0.9610000252723694, 0.953000009059906, 0.9430000185966492, 0.9309999942779541, 0.9169999957084656, 0.8999999761581421, 0.8809999823570251, 0.8579999804496765, 0.8320000171661377, 0.8019999861717224, 0.7689999938011169, 0.7310000061988831, 0.6899999976158142, 0.6460000276565552, 0.5989999771118164, 0.550000011920929, 0.5009999871253967, 0.45100000500679016, 0.4020000100135803, 0.35499998927116394, 0.3109999895095825, 0.27000001072883606, 0.23199999332427979, 0.19900000095367432, 0.16899999976158142, 0.14300000667572021, 0.11999999731779099, 0.10100000351667404, 0.08399999886751175, 0.07000000029802322, 0.057999998331069946, 0.04800000041723251, 0.0], } class OptimalStepsScheduler: @classmethod def INPUT_TYPES(s): return {"required": - {"model_type": (["FLUX", "Wan"], ), + {"model_type": (["FLUX", "Wan", "Chroma"], ), "steps": ("INT", {"default": 20, "min": 3, "max": 1000}), "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), } diff --git a/nodes.py b/nodes.py index 73a62d930..f2ced2c35 100644 --- a/nodes.py +++ b/nodes.py @@ -917,7 +917,7 @@ class CLIPLoader: @classmethod def INPUT_TYPES(s): return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream"], ), + "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), From 08ff5fa08a92e0b3f23b9abec979a830a6cffb03 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 30 Apr 2025 20:57:30 -0400 Subject: [PATCH 0084/1073] Cleanup chroma PR. --- comfy/ldm/chroma/layers.py | 2 +- comfy/ldm/chroma/math.py | 44 --------------------- comfy/lora.py | 2 +- comfy/model_base.py | 63 ++++-------------------------- comfy/model_detection.py | 41 ++++++------------- comfy/sd.py | 6 +-- comfy/supported_models.py | 3 +- comfy/text_encoders/chroma.py | 43 -------------------- comfy_extras/nodes_optimalsteps.py | 2 +- 9 files changed, 25 insertions(+), 181 deletions(-) delete mode 100644 comfy/ldm/chroma/math.py delete mode 100644 comfy/text_encoders/chroma.py diff --git a/comfy/ldm/chroma/layers.py b/comfy/ldm/chroma/layers.py index dd0b72f70..35da91ee2 100644 --- a/comfy/ldm/chroma/layers.py +++ b/comfy/ldm/chroma/layers.py @@ -1,7 +1,7 @@ import torch from torch import Tensor, nn -from .math import attention +from comfy.ldm.flux.math import attention from comfy.ldm.flux.layers import ( MLPEmbedder, RMSNorm, diff --git a/comfy/ldm/chroma/math.py b/comfy/ldm/chroma/math.py deleted file mode 100644 index 36b67931c..000000000 --- a/comfy/ldm/chroma/math.py +++ /dev/null @@ -1,44 +0,0 @@ -import torch -from einops import rearrange -from torch import Tensor - -from comfy.ldm.modules.attention import optimized_attention -import comfy.model_management - - -def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None) -> Tensor: - q_shape = q.shape - k_shape = k.shape - - q = q.float().reshape(*q.shape[:-1], -1, 1, 2) - k = k.float().reshape(*k.shape[:-1], -1, 1, 2) - q = (pe[..., 0] * q[..., 0] + pe[..., 1] * q[..., 1]).reshape(*q_shape).type_as(v) - k = (pe[..., 0] * k[..., 0] + pe[..., 1] * k[..., 1]).reshape(*k_shape).type_as(v) - - heads = q.shape[1] - x = optimized_attention(q, k, v, heads, skip_reshape=True, mask=mask) - return x - - -def rope(pos: Tensor, dim: int, theta: int) -> Tensor: - assert dim % 2 == 0 - if comfy.model_management.is_device_mps(pos.device) or comfy.model_management.is_intel_xpu() or comfy.model_management.is_directml_enabled(): - device = torch.device("cpu") - else: - device = pos.device - - scale = torch.linspace(0, (dim - 2) / dim, steps=dim//2, dtype=torch.float64, device=device) - omega = 1.0 / (theta**scale) - out = torch.einsum("...n,d->...nd", pos.to(dtype=torch.float32, device=device), omega) - out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1) - out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2) - return out.to(dtype=torch.float32, device=pos.device) - - -def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor): - xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2) - xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2) - xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1] - xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1] - return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk) - diff --git a/comfy/lora.py b/comfy/lora.py index dbabd3335..fff524be2 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -252,7 +252,7 @@ def model_lora_keys_unet(model, key_map={}): key_lora = k[len("diffusion_model."):-len(".weight")] key_map["base_model.model.{}".format(key_lora)] = k #official hunyuan lora format - if isinstance(model, comfy.model_base.Flux) or isinstance(model, comfy.model_base.Chroma): #Diffusers lora Flux or a diffusers lora Chroma + if isinstance(model, comfy.model_base.Flux): #Diffusers lora Flux diffusers_keys = comfy.utils.flux_to_diffusers(model.model_config.unet_config, output_prefix="diffusion_model.") for k in diffusers_keys: if k.endswith(".weight"): diff --git a/comfy/model_base.py b/comfy/model_base.py index 1a06bb503..3d33086d8 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -787,8 +787,8 @@ class PixArt(BaseModel): return out class Flux(BaseModel): - def __init__(self, model_config, model_type=ModelType.FLUX, device=None): - super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.flux.model.Flux) + def __init__(self, model_config, model_type=ModelType.FLUX, device=None, unet_model=comfy.ldm.flux.model.Flux): + super().__init__(model_config, model_type, device=device, unet_model=unet_model) def concat_cond(self, **kwargs): try: @@ -1110,63 +1110,14 @@ class HiDream(BaseModel): out['image_cond'] = comfy.conds.CONDNoiseShape(self.process_latent_in(image_cond)) return out -class Chroma(BaseModel): +class Chroma(Flux): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.chroma.model.Chroma) - def concat_cond(self, **kwargs): - try: - #Handle Flux control loras dynamically changing the img_in weight. - num_channels = self.diffusion_model.img_in.weight.shape[1] - except: - #Some cases like tensorrt might not have the weights accessible - num_channels = self.model_config.unet_config["in_channels"] - - out_channels = self.model_config.unet_config["out_channels"] - - if num_channels <= out_channels: - return None - - image = kwargs.get("concat_latent_image", None) - noise = kwargs.get("noise", None) - device = kwargs["device"] - - if image is None: - image = torch.zeros_like(noise) - - image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") - image = utils.resize_to_batch_size(image, noise.shape[0]) - image = self.process_latent_in(image) - if num_channels <= out_channels * 2: - return image - - #inpaint model - mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None)) - if mask is None: - mask = torch.ones_like(noise)[:, :1] - - mask = torch.mean(mask, dim=1, keepdim=True) - mask = utils.common_upscale(mask.to(device), noise.shape[-1] * 8, noise.shape[-2] * 8, "bilinear", "center") - mask = mask.view(mask.shape[0], mask.shape[2] // 8, 8, mask.shape[3] // 8, 8).permute(0, 2, 4, 1, 3).reshape(mask.shape[0], -1, mask.shape[2] // 8, mask.shape[3] // 8) - mask = utils.resize_to_batch_size(mask, noise.shape[0]) - return torch.cat((image, mask), dim=1) - - def extra_conds(self, **kwargs): out = super().extra_conds(**kwargs) - cross_attn = kwargs.get("cross_attn", None) - if cross_attn is not None: - out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) - # upscale the attention mask, since now we - attention_mask = kwargs.get("attention_mask", None) - if attention_mask is not None: - shape = kwargs["noise"].shape - mask_ref_size = kwargs["attention_mask_img_shape"] - # the model will pad to the patch size, and then divide - # essentially dividing and rounding up - (h_tok, w_tok) = (math.ceil(shape[2] / self.diffusion_model.patch_size), math.ceil(shape[3] / self.diffusion_model.patch_size)) - attention_mask = utils.upscale_dit_mask(attention_mask, mask_ref_size, (h_tok, w_tok)) - out['attention_mask'] = comfy.conds.CONDRegular(attention_mask) - guidance = 0.0 - out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor((guidance,))) + + guidance = kwargs.get("guidance", 0) + if guidance is not None: + out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance])) return out diff --git a/comfy/model_detection.py b/comfy/model_detection.py index daf6d04e7..9254843ea 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -154,32 +154,6 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["guidance_embed"] = len(guidance_keys) > 0 return dit_config - if '{}distilled_guidance_layer.0.norms.0.scale'.format(key_prefix) in state_dict_keys or '{}distilled_guidance_layer.norms.0.scale'.format(key_prefix) in state_dict_keys: #Chroma - dit_config = {} - dit_config["image_model"] = "chroma" - dit_config["depth"] = 48 - dit_config["in_channels"] = 64 - patch_size = 2 - dit_config["patch_size"] = patch_size - in_key = "{}img_in.weight".format(key_prefix) - if in_key in state_dict_keys: - dit_config["in_channels"] = state_dict[in_key].shape[1] - dit_config["out_channels"] = 64 - dit_config["context_in_dim"] = 4096 - dit_config["hidden_size"] = 3072 - dit_config["mlp_ratio"] = 4.0 - dit_config["num_heads"] = 24 - dit_config["depth"] = count_blocks(state_dict_keys, '{}double_blocks.'.format(key_prefix) + '{}.') - dit_config["depth_single_blocks"] = count_blocks(state_dict_keys, '{}single_blocks.'.format(key_prefix) + '{}.') - dit_config["axes_dim"] = [16, 56, 56] - dit_config["theta"] = 10000 - dit_config["qkv_bias"] = True - dit_config["in_dim"] = 64 - dit_config["out_dim"] = 3072 - dit_config["hidden_dim"] = 5120 - dit_config["n_layers"] = 5 - return dit_config - if '{}double_blocks.0.img_attn.norm.key_norm.scale'.format(key_prefix) in state_dict_keys and '{}img_in.weight'.format(key_prefix) in state_dict_keys: #Flux dit_config = {} dit_config["image_model"] = "flux" @@ -190,7 +164,9 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): if in_key in state_dict_keys: dit_config["in_channels"] = state_dict[in_key].shape[1] // (patch_size * patch_size) dit_config["out_channels"] = 16 - dit_config["vec_in_dim"] = 768 + vec_in_key = '{}vector_in.in_layer.weight'.format(key_prefix) + if vec_in_key in state_dict_keys: + dit_config["vec_in_dim"] = state_dict[vec_in_key].shape[1] dit_config["context_in_dim"] = 4096 dit_config["hidden_size"] = 3072 dit_config["mlp_ratio"] = 4.0 @@ -200,7 +176,16 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["axes_dim"] = [16, 56, 56] dit_config["theta"] = 10000 dit_config["qkv_bias"] = True - dit_config["guidance_embed"] = "{}guidance_in.in_layer.weight".format(key_prefix) in state_dict_keys + if '{}distilled_guidance_layer.0.norms.0.scale'.format(key_prefix) in state_dict_keys or '{}distilled_guidance_layer.norms.0.scale'.format(key_prefix) in state_dict_keys: #Chroma + dit_config["image_model"] = "chroma" + dit_config["in_channels"] = 64 + dit_config["out_channels"] = 64 + dit_config["in_dim"] = 64 + dit_config["out_dim"] = 3072 + dit_config["hidden_dim"] = 5120 + dit_config["n_layers"] = 5 + else: + dit_config["guidance_embed"] = "{}guidance_in.in_layer.weight".format(key_prefix) in state_dict_keys return dit_config if '{}t5_yproj.weight'.format(key_prefix) in state_dict_keys: #Genmo mochi preview diff --git a/comfy/sd.py b/comfy/sd.py index 454b5929a..da9b36d0e 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -42,7 +42,6 @@ import comfy.text_encoders.cosmos import comfy.text_encoders.lumina2 import comfy.text_encoders.wan import comfy.text_encoders.hidream -import comfy.text_encoders.chroma import comfy.model_patcher import comfy.lora @@ -820,7 +819,7 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip elif clip_type == CLIPType.LTXV: clip_target.clip = comfy.text_encoders.lt.ltxv_te(**t5xxl_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.lt.LTXVT5Tokenizer - elif clip_type == CLIPType.PIXART: + elif clip_type == CLIPType.PIXART or clip_type == CLIPType.CHROMA: clip_target.clip = comfy.text_encoders.pixart_t5.pixart_te(**t5xxl_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.pixart_t5.PixArtTokenizer elif clip_type == CLIPType.WAN: @@ -831,9 +830,6 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.hidream.hidream_clip(**t5xxl_detect(clip_data), clip_l=False, clip_g=False, t5=True, llama=False, dtype_llama=None, llama_scaled_fp8=None) clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer - elif clip_type == CLIPType.CHROMA: - clip_target.clip = comfy.text_encoders.chroma.chroma_te(**t5xxl_detect(clip_data)) - clip_target.tokenizer = comfy.text_encoders.chroma.ChromaT5Tokenizer else: #CLIPType.MOCHI clip_target.clip = comfy.text_encoders.genmo.mochi_te(**t5xxl_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.genmo.MochiT5Tokenizer diff --git a/comfy/supported_models.py b/comfy/supported_models.py index f03f2790e..d5210cfac 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -17,7 +17,6 @@ import comfy.text_encoders.hunyuan_video import comfy.text_encoders.cosmos import comfy.text_encoders.lumina2 import comfy.text_encoders.wan -import comfy.text_encoders.chroma from . import supported_models_base from . import latent_formats @@ -1095,7 +1094,7 @@ class Chroma(supported_models_base.BASE): def clip_target(self, state_dict={}): pref = self.text_encoder_key_prefix[0] t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref)) - return supported_models_base.ClipTarget(comfy.text_encoders.chroma.ChromaTokenizer, comfy.text_encoders.chroma.chroma_te(**t5_detect)) + return supported_models_base.ClipTarget(comfy.text_encoders.pixart_t5.PixArtTokenizer, comfy.text_encoders.pixart_t5.pixart_te(**t5_detect)) models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma] diff --git a/comfy/text_encoders/chroma.py b/comfy/text_encoders/chroma.py deleted file mode 100644 index aa8dffb25..000000000 --- a/comfy/text_encoders/chroma.py +++ /dev/null @@ -1,43 +0,0 @@ -from comfy import sd1_clip -import comfy.text_encoders.t5 -import os -from transformers import T5TokenizerFast - - -class T5XXLModel(sd1_clip.SDClipModel): - def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=False, model_options={}): - textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_config_xxl.json") - t5xxl_scaled_fp8 = model_options.get("t5xxl_scaled_fp8", None) - if t5xxl_scaled_fp8 is not None: - model_options = model_options.copy() - model_options["scaled_fp8"] = t5xxl_scaled_fp8 - - super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) - - -class ChromaT5XXL(sd1_clip.SD1ClipModel): - def __init__(self, device="cpu", dtype=None, model_options={}): - super().__init__(device=device, dtype=dtype, name="t5xxl", clip_model=T5XXLModel, model_options=model_options) - - -class T5XXLTokenizer(sd1_clip.SDTokenizer): - def __init__(self, embedding_directory=None, tokenizer_data={}): - tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") - super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_data=tokenizer_data) - - -class ChromaT5Tokenizer(sd1_clip.SD1Tokenizer): - def __init__(self, embedding_directory=None, tokenizer_data={}): - super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="t5xxl", tokenizer=T5XXLTokenizer) - - -def chroma_te(dtype_t5=None, t5xxl_scaled_fp8=None): - class ChromaTEModel_(ChromaT5XXL): - def __init__(self, device="cpu", dtype=None, model_options={}): - if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options: - model_options = model_options.copy() - model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8 - if dtype is None: - dtype = dtype_t5 - super().__init__(device=device, dtype=dtype, model_options=model_options) - return ChromaTEModel_ diff --git a/comfy_extras/nodes_optimalsteps.py b/comfy_extras/nodes_optimalsteps.py index 102958734..e7c851ca2 100644 --- a/comfy_extras/nodes_optimalsteps.py +++ b/comfy_extras/nodes_optimalsteps.py @@ -20,7 +20,7 @@ def loglinear_interp(t_steps, num_steps): NOISE_LEVELS = {"FLUX": [0.9968, 0.9886, 0.9819, 0.975, 0.966, 0.9471, 0.9158, 0.8287, 0.5512, 0.2808, 0.001], "Wan":[1.0, 0.997, 0.995, 0.993, 0.991, 0.989, 0.987, 0.985, 0.98, 0.975, 0.973, 0.968, 0.96, 0.946, 0.927, 0.902, 0.864, 0.776, 0.539, 0.208, 0.001], -"Chroma": [0.9919999837875366, 0.9900000095367432, 0.9879999756813049, 0.9850000143051147, 0.9819999933242798, 0.9779999852180481, 0.9729999899864197, 0.9679999947547913, 0.9610000252723694, 0.953000009059906, 0.9430000185966492, 0.9309999942779541, 0.9169999957084656, 0.8999999761581421, 0.8809999823570251, 0.8579999804496765, 0.8320000171661377, 0.8019999861717224, 0.7689999938011169, 0.7310000061988831, 0.6899999976158142, 0.6460000276565552, 0.5989999771118164, 0.550000011920929, 0.5009999871253967, 0.45100000500679016, 0.4020000100135803, 0.35499998927116394, 0.3109999895095825, 0.27000001072883606, 0.23199999332427979, 0.19900000095367432, 0.16899999976158142, 0.14300000667572021, 0.11999999731779099, 0.10100000351667404, 0.08399999886751175, 0.07000000029802322, 0.057999998331069946, 0.04800000041723251, 0.0], +"Chroma": [0.992, 0.99, 0.988, 0.985, 0.982, 0.978, 0.973, 0.968, 0.961, 0.953, 0.943, 0.931, 0.917, 0.9, 0.881, 0.858, 0.832, 0.802, 0.769, 0.731, 0.69, 0.646, 0.599, 0.55, 0.501, 0.451, 0.402, 0.355, 0.311, 0.27, 0.232, 0.199, 0.169, 0.143, 0.12, 0.101, 0.084, 0.07, 0.058, 0.048, 0.001], } class OptimalStepsScheduler: From c6c19e99803a2538cee17e2f94314f77b7d48e98 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Thu, 1 May 2025 00:24:32 -0700 Subject: [PATCH 0085/1073] fix bug (#7894) --- comfy_api_nodes/apis/client.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 384e559dc..d3cd9ad2f 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -297,6 +297,10 @@ class SynchronousOperation(Generic[T, R]): # Convert request model to dict, but use None for EmptyRequest request_dict = None if isinstance(self.request, EmptyRequest) else self.request.model_dump(exclude_none=True) + if request_dict: + for key, value in request_dict.items(): + if isinstance(value, Enum): + request_dict[key] = value.value # Debug log for request logging.debug(f"[DEBUG] API Request: {self.endpoint.method.value} {self.endpoint.path}") From aa9d759df36faa2e34cc5722463749a09a7f529b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 1 May 2025 03:33:42 -0700 Subject: [PATCH 0086/1073] Switch ltxv to use the pytorch RMSNorm. (#7897) --- comfy/ldm/lightricks/model.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/lightricks/model.py b/comfy/ldm/lightricks/model.py index 6e8e06181..056e101a4 100644 --- a/comfy/ldm/lightricks/model.py +++ b/comfy/ldm/lightricks/model.py @@ -1,7 +1,6 @@ import torch from torch import nn import comfy.ldm.modules.attention -from comfy.ldm.genmo.joint_model.layers import RMSNorm import comfy.ldm.common_dit from einops import rearrange import math @@ -262,8 +261,8 @@ class CrossAttention(nn.Module): self.heads = heads self.dim_head = dim_head - self.q_norm = RMSNorm(inner_dim, dtype=dtype, device=device) - self.k_norm = RMSNorm(inner_dim, dtype=dtype, device=device) + self.q_norm = operations.RMSNorm(inner_dim, dtype=dtype, device=device) + self.k_norm = operations.RMSNorm(inner_dim, dtype=dtype, device=device) self.to_q = operations.Linear(query_dim, inner_dim, bias=True, dtype=dtype, device=device) self.to_k = operations.Linear(context_dim, inner_dim, bias=True, dtype=dtype, device=device) From 6d32dc049e676a1373dddc4fd912f2ba1311d4cb Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Thu, 1 May 2025 10:57:54 -0400 Subject: [PATCH 0087/1073] Update frontend to v1.18 (#7898) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f64a05947..74a4ceb02 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.17.11 +comfyui-frontend-package==1.18.5 comfyui-workflow-templates==0.1.3 torch torchsde From 8d0661d0ba35fd54a72e4b49f68d4625febafc10 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 1 May 2025 19:32:04 -0400 Subject: [PATCH 0088/1073] Lint instance methods (#7903) --- comfy_extras/nodes_post_processing.py | 1 + comfy_extras/nodes_webcam.py | 2 +- pyproject.toml | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index 5b9542015..cb1a0d883 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -141,6 +141,7 @@ class Quantize: CATEGORY = "image/postprocessing" + @staticmethod def bayer(im, pal_im, order): def normalized_bayer_matrix(n): if n == 0: diff --git a/comfy_extras/nodes_webcam.py b/comfy_extras/nodes_webcam.py index 31eddb2d6..062b15cf8 100644 --- a/comfy_extras/nodes_webcam.py +++ b/comfy_extras/nodes_webcam.py @@ -20,7 +20,7 @@ class WebcamCapture(nodes.LoadImage): CATEGORY = "image" - def load_capture(s, image, **kwargs): + def load_capture(self, image, **kwargs): return super().load_image(folder_paths.get_annotated_filepath(image)) diff --git a/pyproject.toml b/pyproject.toml index eadca662e..a9c028c7e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,6 +12,7 @@ documentation = "https://docs.comfy.org/" [tool.ruff] lint.select = [ + "N805", # invalid-first-argument-name-for-method "S307", # suspicious-eval-usage "S102", # exec "T", # print-usage From ff99861650a195234f2858b864663920ca811c55 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 2 May 2025 02:15:32 -0700 Subject: [PATCH 0089/1073] Make clipsave work with more TE models. (#7908) --- comfy_extras/nodes_model_merging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_model_merging.py b/comfy_extras/nodes_model_merging.py index 78d284889..f20beab7d 100644 --- a/comfy_extras/nodes_model_merging.py +++ b/comfy_extras/nodes_model_merging.py @@ -276,7 +276,7 @@ class CLIPSave: comfy.model_management.load_models_gpu([clip.load_model()], force_patch_weights=True) clip_sd = clip.get_sd() - for prefix in ["clip_l.", "clip_g.", ""]: + for prefix in ["clip_l.", "clip_g.", "clip_h.", "t5xxl.", "pile_t5xl.", "mt5xl.", "umt5xxl.", "t5base.", "gemma2_2b.", "llama.", "hydit_clip.", ""]: k = list(filter(lambda a: a.startswith(prefix), clip_sd.keys())) current_clip_sd = {} for x in k: From 551fe8dceebb07abed486580d8326a6202d0cf7a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 2 May 2025 05:28:05 -0400 Subject: [PATCH 0090/1073] Add node to extend sigmas (#7901) * Add ExpandSigmas node * Rename, add interpolation functions Co-authored-by: liesen * Move computed interpolation outside loop * Add type hints --------- Co-authored-by: liesen --- comfy_extras/nodes_custom_sampler.py | 51 ++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index c9689b745..3e5be3d3c 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -1,3 +1,4 @@ +import math import comfy.samplers import comfy.sample from comfy.k_diffusion import sampling as k_diffusion_sampling @@ -249,6 +250,55 @@ class SetFirstSigma: sigmas[0] = sigma return (sigmas, ) +class ExtendIntermediateSigmas: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"sigmas": ("SIGMAS", ), + "steps": ("INT", {"default": 2, "min": 1, "max": 100}), + "start_at_sigma": ("FLOAT", {"default": -1.0, "min": -1.0, "max": 20000.0, "step": 0.01, "round": False}), + "end_at_sigma": ("FLOAT", {"default": 12.0, "min": 0.0, "max": 20000.0, "step": 0.01, "round": False}), + "spacing": (['linear', 'cosine', 'sine'],), + } + } + RETURN_TYPES = ("SIGMAS",) + CATEGORY = "sampling/custom_sampling/sigmas" + + FUNCTION = "extend" + + def extend(self, sigmas: torch.Tensor, steps: int, start_at_sigma: float, end_at_sigma: float, spacing: str): + if start_at_sigma < 0: + start_at_sigma = float("inf") + + interpolator = { + 'linear': lambda x: x, + 'cosine': lambda x: torch.sin(x*math.pi/2), + 'sine': lambda x: 1 - torch.cos(x*math.pi/2) + }[spacing] + + # linear space for our interpolation function + x = torch.linspace(0, 1, steps + 1, device=sigmas.device)[1:-1] + computed_spacing = interpolator(x) + + extended_sigmas = [] + for i in range(len(sigmas) - 1): + sigma_current = sigmas[i] + sigma_next = sigmas[i+1] + + extended_sigmas.append(sigma_current) + + if end_at_sigma <= sigma_current <= start_at_sigma: + interpolated_steps = computed_spacing * (sigma_next - sigma_current) + sigma_current + extended_sigmas.extend(interpolated_steps.tolist()) + + # Add the last sigma value + if len(sigmas) > 0: + extended_sigmas.append(sigmas[-1]) + + extended_sigmas = torch.FloatTensor(extended_sigmas) + + return (extended_sigmas,) + class KSamplerSelect: @classmethod def INPUT_TYPES(s): @@ -735,6 +785,7 @@ NODE_CLASS_MAPPINGS = { "SplitSigmasDenoise": SplitSigmasDenoise, "FlipSigmas": FlipSigmas, "SetFirstSigma": SetFirstSigma, + "ExtendIntermediateSigmas": ExtendIntermediateSigmas, "CFGGuider": CFGGuider, "DualCFGGuider": DualCFGGuider, From d9a87c1e6a390eb0e915b544e35baf4d807919db Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 2 May 2025 05:28:27 -0400 Subject: [PATCH 0091/1073] Fix outdated comment about Internet connectivity (#7827) --- main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.py b/main.py index f3f56597a..5c21542b3 100644 --- a/main.py +++ b/main.py @@ -13,7 +13,7 @@ import logging import sys if __name__ == "__main__": - #NOTE: These do not do anything on core ComfyUI which should already have no communication with the internet, they are for custom nodes. + #NOTE: These do not do anything on core ComfyUI, they are for custom nodes. os.environ['HF_HUB_DISABLE_TELEMETRY'] = '1' os.environ['DO_NOT_TRACK'] = '1' From 2ab9618732b738b52c96ae128597371b9b9fff96 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sat, 3 May 2025 01:12:37 +0800 Subject: [PATCH 0092/1073] Fix the bugs in OFT/BOFT moule (#7909) * Correct calculate_weight and load for OFT * Correct calculate_weight and loading for BOFT --- comfy/weight_adapter/boft.py | 34 +++++++++++++++++----------------- comfy/weight_adapter/oft.py | 20 +++++++++++--------- 2 files changed, 28 insertions(+), 26 deletions(-) diff --git a/comfy/weight_adapter/boft.py b/comfy/weight_adapter/boft.py index c85adc7ab..b2a2f1bd4 100644 --- a/comfy/weight_adapter/boft.py +++ b/comfy/weight_adapter/boft.py @@ -24,7 +24,7 @@ class BOFTAdapter(WeightAdapterBase): ) -> Optional["BOFTAdapter"]: if loaded_keys is None: loaded_keys = set() - blocks_name = "{}.boft_blocks".format(x) + blocks_name = "{}.oft_blocks".format(x) rescale_name = "{}.rescale".format(x) blocks = None @@ -32,17 +32,18 @@ class BOFTAdapter(WeightAdapterBase): blocks = lora[blocks_name] if blocks.ndim == 4: loaded_keys.add(blocks_name) + else: + blocks = None + if blocks is None: + return None rescale = None if rescale_name in lora.keys(): rescale = lora[rescale_name] loaded_keys.add(rescale_name) - if blocks is not None: - weights = (blocks, rescale, alpha, dora_scale) - return cls(loaded_keys, weights) - else: - return None + weights = (blocks, rescale, alpha, dora_scale) + return cls(loaded_keys, weights) def calculate_weight( self, @@ -71,7 +72,7 @@ class BOFTAdapter(WeightAdapterBase): # Get r I = torch.eye(boft_b, device=blocks.device, dtype=blocks.dtype) # for Q = -Q^T - q = blocks - blocks.transpose(1, 2) + q = blocks - blocks.transpose(-1, -2) normed_q = q if alpha > 0: # alpha in boft/bboft is for constraint q_norm = torch.norm(q) + 1e-8 @@ -79,9 +80,8 @@ class BOFTAdapter(WeightAdapterBase): normed_q = q * alpha / q_norm # use float() to prevent unsupported type in .inverse() r = (I + normed_q) @ (I - normed_q).float().inverse() - r = r.to(original_weight) - - inp = org = original_weight + r = r.to(weight) + inp = org = weight r_b = boft_b//2 for i in range(boft_m): @@ -91,14 +91,14 @@ class BOFTAdapter(WeightAdapterBase): if strength != 1: bi = bi * strength + (1-strength) * I inp = ( - inp.unflatten(-1, (-1, g, k)) - .transpose(-2, -1) - .flatten(-3) - .unflatten(-1, (-1, boft_b)) + inp.unflatten(0, (-1, g, k)) + .transpose(1, 2) + .flatten(0, 2) + .unflatten(0, (-1, boft_b)) ) - inp = torch.einsum("b n m, b n ... -> b m ...", inp, bi) + inp = torch.einsum("b i j, b j ...-> b i ...", bi, inp) inp = ( - inp.flatten(-2).unflatten(-1, (-1, k, g)).transpose(-2, -1).flatten(-3) + inp.flatten(0, 1).unflatten(0, (-1, k, g)).transpose(1, 2).flatten(0, 2) ) if rescale is not None: @@ -109,7 +109,7 @@ class BOFTAdapter(WeightAdapterBase): if dora_scale is not None: weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function) else: - weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) + weight += function((strength * lora_diff).type(weight.dtype)) except Exception as e: logging.error("ERROR {} {} {}".format(self.name, key, e)) return weight diff --git a/comfy/weight_adapter/oft.py b/comfy/weight_adapter/oft.py index 0ea229b79..25009eca3 100644 --- a/comfy/weight_adapter/oft.py +++ b/comfy/weight_adapter/oft.py @@ -32,17 +32,18 @@ class OFTAdapter(WeightAdapterBase): blocks = lora[blocks_name] if blocks.ndim == 3: loaded_keys.add(blocks_name) + else: + blocks = None + if blocks is None: + return None rescale = None if rescale_name in lora.keys(): rescale = lora[rescale_name] loaded_keys.add(rescale_name) - if blocks is not None: - weights = (blocks, rescale, alpha, dora_scale) - return cls(loaded_keys, weights) - else: - return None + weights = (blocks, rescale, alpha, dora_scale) + return cls(loaded_keys, weights) def calculate_weight( self, @@ -79,16 +80,17 @@ class OFTAdapter(WeightAdapterBase): normed_q = q * alpha / q_norm # use float() to prevent unsupported type in .inverse() r = (I + normed_q) @ (I - normed_q).float().inverse() - r = r.to(original_weight) + r = r.to(weight) + _, *shape = weight.shape lora_diff = torch.einsum( "k n m, k n ... -> k m ...", (r * strength) - strength * I, - original_weight, - ) + weight.view(block_num, block_size, *shape), + ).view(-1, *shape) if dora_scale is not None: weight = weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function) else: - weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) + weight += function((strength * lora_diff).type(weight.dtype)) except Exception as e: logging.error("ERROR {} {} {}".format(self.name, key, e)) return weight From 530494588d9e58a8bcaf55b471f4c3ce2b97ce65 Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Fri, 2 May 2025 13:14:52 -0400 Subject: [PATCH 0093/1073] [BugFix] Update frontend 1.18.6 (#7910) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 74a4ceb02..05ceba00a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.18.5 +comfyui-frontend-package==1.18.6 comfyui-workflow-templates==0.1.3 torch torchsde From 065d855f14968406051a1340e3f2f26461a00e5d Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Fri, 2 May 2025 13:15:54 -0400 Subject: [PATCH 0094/1073] upstream Preview Any from rgthree-comfy (#7815) * upstream Preview Any from rgthree-comfy * use IO.ANY --- comfy_extras/nodes_preview_any.py | 43 +++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 44 insertions(+) create mode 100644 comfy_extras/nodes_preview_any.py diff --git a/comfy_extras/nodes_preview_any.py b/comfy_extras/nodes_preview_any.py new file mode 100644 index 000000000..e6805696f --- /dev/null +++ b/comfy_extras/nodes_preview_any.py @@ -0,0 +1,43 @@ +import json +from comfy.comfy_types.node_typing import IO + +# Preview Any - original implement from +# https://github.com/rgthree/rgthree-comfy/blob/main/py/display_any.py +# upstream requested in https://github.com/Kosinkadink/rfcs/blob/main/rfcs/0000-corenodes.md#preview-nodes +class PreviewAny(): + @classmethod + def INPUT_TYPES(cls): + return { + "required": {"source": (IO.ANY, {})}, + } + + RETURN_TYPES = () + FUNCTION = "main" + OUTPUT_NODE = True + + CATEGORY = "utils" + + def main(self, source=None): + value = 'None' + if isinstance(source, str): + value = source + elif isinstance(source, (int, float, bool)): + value = str(source) + elif source is not None: + try: + value = json.dumps(source) + except Exception: + try: + value = str(source) + except Exception: + value = 'source exists, but could not be serialized.' + + return {"ui": {"text": (value,)}} + +NODE_CLASS_MAPPINGS = { + "PreviewAny": PreviewAny, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "PreviewAny": "Preview Any", +} diff --git a/nodes.py b/nodes.py index f2ced2c35..92b8ca6ae 100644 --- a/nodes.py +++ b/nodes.py @@ -2258,6 +2258,7 @@ def init_builtin_extra_nodes(): "nodes_optimalsteps.py", "nodes_hidream.py", "nodes_fresca.py", + "nodes_preview_any.py", ] api_nodes_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_api_nodes") From 486ad8fdc55495a705a211c22cc50bec9ec95643 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 2 May 2025 21:28:10 -0700 Subject: [PATCH 0095/1073] Fix updater issue with newer portable. (#7917) --- .ci/update_windows/update.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.ci/update_windows/update.py b/.ci/update_windows/update.py index 731b6bc53..51a263203 100755 --- a/.ci/update_windows/update.py +++ b/.ci/update_windows/update.py @@ -63,7 +63,12 @@ except: print("checking out master branch") # noqa: T201 branch = repo.lookup_branch('master') if branch is None: - ref = repo.lookup_reference('refs/remotes/origin/master') + try: + ref = repo.lookup_reference('refs/remotes/origin/master') + except: + print("pulling.") # noqa: T201 + pull(repo) + ref = repo.lookup_reference('refs/remotes/origin/master') repo.checkout(ref) branch = repo.lookup_branch('master') if branch is None: From 7689917113fe521adfaba2a4fff952ef1805ad2b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 3 May 2025 00:34:01 -0400 Subject: [PATCH 0096/1073] ComfyUI version 0.3.31 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 67d27f942..8d2068de7 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.30" +__version__ = "0.3.31" diff --git a/pyproject.toml b/pyproject.toml index a9c028c7e..8b549a0b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.30" +version = "0.3.31" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 3041e5c354511ad8e4a6fe73ed397e41845caae2 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 3 May 2025 16:07:55 -0700 Subject: [PATCH 0097/1073] Switch mochi and wan modes to use pytorch RMSNorm. (#7925) * Switch genmo model to native RMSNorm. * Switch WAN to native RMSNorm. --- comfy/ldm/genmo/joint_model/asymm_models_joint.py | 9 ++++----- comfy/ldm/genmo/joint_model/layers.py | 11 ----------- comfy/ldm/wan/model.py | 7 +++---- 3 files changed, 7 insertions(+), 20 deletions(-) diff --git a/comfy/ldm/genmo/joint_model/asymm_models_joint.py b/comfy/ldm/genmo/joint_model/asymm_models_joint.py index 2c46c24bf..366a8b713 100644 --- a/comfy/ldm/genmo/joint_model/asymm_models_joint.py +++ b/comfy/ldm/genmo/joint_model/asymm_models_joint.py @@ -13,7 +13,6 @@ from comfy.ldm.modules.attention import optimized_attention from .layers import ( FeedForward, PatchEmbed, - RMSNorm, TimestepEmbedder, ) @@ -90,10 +89,10 @@ class AsymmetricAttention(nn.Module): # Query and key normalization for stability. assert qk_norm - self.q_norm_x = RMSNorm(self.head_dim, device=device, dtype=dtype) - self.k_norm_x = RMSNorm(self.head_dim, device=device, dtype=dtype) - self.q_norm_y = RMSNorm(self.head_dim, device=device, dtype=dtype) - self.k_norm_y = RMSNorm(self.head_dim, device=device, dtype=dtype) + self.q_norm_x = operations.RMSNorm(self.head_dim, eps=1e-5, device=device, dtype=dtype) + self.k_norm_x = operations.RMSNorm(self.head_dim, eps=1e-5, device=device, dtype=dtype) + self.q_norm_y = operations.RMSNorm(self.head_dim, eps=1e-5, device=device, dtype=dtype) + self.k_norm_y = operations.RMSNorm(self.head_dim, eps=1e-5, device=device, dtype=dtype) # Output layers. y features go back down from dim_x -> dim_y. self.proj_x = operations.Linear(dim_x, dim_x, bias=out_bias, device=device, dtype=dtype) diff --git a/comfy/ldm/genmo/joint_model/layers.py b/comfy/ldm/genmo/joint_model/layers.py index 51d979559..e310bd717 100644 --- a/comfy/ldm/genmo/joint_model/layers.py +++ b/comfy/ldm/genmo/joint_model/layers.py @@ -151,14 +151,3 @@ class PatchEmbed(nn.Module): x = self.norm(x) return x - - -class RMSNorm(torch.nn.Module): - def __init__(self, hidden_size, eps=1e-5, device=None, dtype=None): - super().__init__() - self.eps = eps - self.weight = torch.nn.Parameter(torch.empty(hidden_size, device=device, dtype=dtype)) - self.register_parameter("bias", None) - - def forward(self, x): - return comfy.ldm.common_dit.rms_norm(x, self.weight, self.eps) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 66bee7480..fc5ff40c5 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -9,7 +9,6 @@ from einops import repeat from comfy.ldm.modules.attention import optimized_attention from comfy.ldm.flux.layers import EmbedND from comfy.ldm.flux.math import apply_rope -from comfy.ldm.modules.diffusionmodules.mmdit import RMSNorm import comfy.ldm.common_dit import comfy.model_management @@ -49,8 +48,8 @@ class WanSelfAttention(nn.Module): self.k = operation_settings.get("operations").Linear(dim, dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.v = operation_settings.get("operations").Linear(dim, dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.o = operation_settings.get("operations").Linear(dim, dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) - self.norm_q = RMSNorm(dim, eps=eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) if qk_norm else nn.Identity() - self.norm_k = RMSNorm(dim, eps=eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) if qk_norm else nn.Identity() + self.norm_q = operation_settings.get("operations").RMSNorm(dim, eps=eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) if qk_norm else nn.Identity() + self.norm_k = operation_settings.get("operations").RMSNorm(dim, eps=eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) if qk_norm else nn.Identity() def forward(self, x, freqs): r""" @@ -114,7 +113,7 @@ class WanI2VCrossAttention(WanSelfAttention): self.k_img = operation_settings.get("operations").Linear(dim, dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.v_img = operation_settings.get("operations").Linear(dim, dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) # self.alpha = nn.Parameter(torch.zeros((1, ))) - self.norm_k_img = RMSNorm(dim, eps=eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) if qk_norm else nn.Identity() + self.norm_k_img = operation_settings.get("operations").RMSNorm(dim, eps=eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) if qk_norm else nn.Identity() def forward(self, x, context, context_img_len): r""" From 9187a09483514c9d363bf064481c4614563951b0 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 4 May 2025 03:26:20 -0700 Subject: [PATCH 0098/1073] Change cosmos and hydit models to use the native RMSNorm. (#7934) --- comfy/ldm/cosmos/blocks.py | 11 +++++------ comfy/ldm/cosmos/model.py | 4 +--- comfy/ldm/hydit/models.py | 4 ++-- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/comfy/ldm/cosmos/blocks.py b/comfy/ldm/cosmos/blocks.py index 84fd6d839..a12f892d2 100644 --- a/comfy/ldm/cosmos/blocks.py +++ b/comfy/ldm/cosmos/blocks.py @@ -23,7 +23,6 @@ from einops import rearrange, repeat from einops.layers.torch import Rearrange from torch import nn -from comfy.ldm.modules.diffusionmodules.mmdit import RMSNorm from comfy.ldm.modules.attention import optimized_attention @@ -37,11 +36,11 @@ def apply_rotary_pos_emb( return t_out -def get_normalization(name: str, channels: int, weight_args={}): +def get_normalization(name: str, channels: int, weight_args={}, operations=None): if name == "I": return nn.Identity() elif name == "R": - return RMSNorm(channels, elementwise_affine=True, eps=1e-6, **weight_args) + return operations.RMSNorm(channels, elementwise_affine=True, eps=1e-6, **weight_args) else: raise ValueError(f"Normalization {name} not found") @@ -120,15 +119,15 @@ class Attention(nn.Module): self.to_q = nn.Sequential( operations.Linear(query_dim, inner_dim, bias=qkv_bias, **weight_args), - get_normalization(qkv_norm[0], norm_dim), + get_normalization(qkv_norm[0], norm_dim, weight_args=weight_args, operations=operations), ) self.to_k = nn.Sequential( operations.Linear(context_dim, inner_dim, bias=qkv_bias, **weight_args), - get_normalization(qkv_norm[1], norm_dim), + get_normalization(qkv_norm[1], norm_dim, weight_args=weight_args, operations=operations), ) self.to_v = nn.Sequential( operations.Linear(context_dim, inner_dim, bias=qkv_bias, **weight_args), - get_normalization(qkv_norm[2], norm_dim), + get_normalization(qkv_norm[2], norm_dim, weight_args=weight_args, operations=operations), ) self.to_out = nn.Sequential( diff --git a/comfy/ldm/cosmos/model.py b/comfy/ldm/cosmos/model.py index 06d0baef3..4836e0b69 100644 --- a/comfy/ldm/cosmos/model.py +++ b/comfy/ldm/cosmos/model.py @@ -27,8 +27,6 @@ from torchvision import transforms from enum import Enum import logging -from comfy.ldm.modules.diffusionmodules.mmdit import RMSNorm - from .blocks import ( FinalLayer, GeneralDITTransformerBlock, @@ -195,7 +193,7 @@ class GeneralDIT(nn.Module): if self.affline_emb_norm: logging.debug("Building affine embedding normalization layer") - self.affline_norm = RMSNorm(model_channels, elementwise_affine=True, eps=1e-6) + self.affline_norm = operations.RMSNorm(model_channels, elementwise_affine=True, eps=1e-6, device=device, dtype=dtype) else: self.affline_norm = nn.Identity() diff --git a/comfy/ldm/hydit/models.py b/comfy/ldm/hydit/models.py index 359f6a965..5ba2b76e0 100644 --- a/comfy/ldm/hydit/models.py +++ b/comfy/ldm/hydit/models.py @@ -3,7 +3,7 @@ import torch import torch.nn as nn import comfy.ops -from comfy.ldm.modules.diffusionmodules.mmdit import Mlp, TimestepEmbedder, PatchEmbed, RMSNorm +from comfy.ldm.modules.diffusionmodules.mmdit import Mlp, TimestepEmbedder, PatchEmbed from comfy.ldm.modules.diffusionmodules.util import timestep_embedding from torch.utils import checkpoint @@ -51,7 +51,7 @@ class HunYuanDiTBlock(nn.Module): if norm_type == "layer": norm_layer = operations.LayerNorm elif norm_type == "rms": - norm_layer = RMSNorm + norm_layer = operations.RMSNorm else: raise ValueError(f"Unknown norm_type: {norm_type}") From 80a44b97f5cbcb890896e2b9e65d177f1ac6a588 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 4 May 2025 03:39:23 -0700 Subject: [PATCH 0099/1073] Change lumina to native RMSNorm. (#7935) --- comfy/ldm/lumina/model.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index ccd5d2c0e..f8dc4d7db 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -8,7 +8,7 @@ import torch.nn as nn import torch.nn.functional as F import comfy.ldm.common_dit -from comfy.ldm.modules.diffusionmodules.mmdit import TimestepEmbedder, RMSNorm +from comfy.ldm.modules.diffusionmodules.mmdit import TimestepEmbedder from comfy.ldm.modules.attention import optimized_attention_masked from comfy.ldm.flux.layers import EmbedND @@ -64,8 +64,8 @@ class JointAttention(nn.Module): ) if qk_norm: - self.q_norm = RMSNorm(self.head_dim, elementwise_affine=True, **operation_settings) - self.k_norm = RMSNorm(self.head_dim, elementwise_affine=True, **operation_settings) + self.q_norm = operation_settings.get("operations").RMSNorm(self.head_dim, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.k_norm = operation_settings.get("operations").RMSNorm(self.head_dim, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) else: self.q_norm = self.k_norm = nn.Identity() @@ -242,11 +242,11 @@ class JointTransformerBlock(nn.Module): operation_settings=operation_settings, ) self.layer_id = layer_id - self.attention_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings) - self.ffn_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings) + self.attention_norm1 = operation_settings.get("operations").RMSNorm(dim, eps=norm_eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.ffn_norm1 = operation_settings.get("operations").RMSNorm(dim, eps=norm_eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) - self.attention_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings) - self.ffn_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings) + self.attention_norm2 = operation_settings.get("operations").RMSNorm(dim, eps=norm_eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.ffn_norm2 = operation_settings.get("operations").RMSNorm(dim, eps=norm_eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.modulation = modulation if modulation: @@ -431,7 +431,7 @@ class NextDiT(nn.Module): self.t_embedder = TimestepEmbedder(min(dim, 1024), **operation_settings) self.cap_embedder = nn.Sequential( - RMSNorm(cap_feat_dim, eps=norm_eps, elementwise_affine=True, **operation_settings), + operation_settings.get("operations").RMSNorm(cap_feat_dim, eps=norm_eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")), operation_settings.get("operations").Linear( cap_feat_dim, dim, @@ -457,7 +457,7 @@ class NextDiT(nn.Module): for layer_id in range(n_layers) ] ) - self.norm_final = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings) + self.norm_final = operation_settings.get("operations").RMSNorm(dim, eps=norm_eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.final_layer = FinalLayer(dim, patch_size, self.out_channels, operation_settings=operation_settings) assert (dim // n_heads) == sum(axes_dims) From cd18582578d38081af5614d018781bcdfbe95e0b Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sun, 4 May 2025 20:26:57 -0700 Subject: [PATCH 0100/1073] Support saving Comfy `VIDEO` type to buffer (#7939) * get output format when saving to buffer * add unit tests for writing to file or stream with correct fmt * handle `to_format=None` * fix formatting --- comfy_api/input_impl/video_types.py | 46 +++++++++- tests-unit/comfy_api_test/input_impl_test.py | 91 ++++++++++++++++++++ 2 files changed, 135 insertions(+), 2 deletions(-) create mode 100644 tests-unit/comfy_api_test/input_impl_test.py diff --git a/comfy_api/input_impl/video_types.py b/comfy_api/input_impl/video_types.py index 12e5783db..d0b0b36d2 100644 --- a/comfy_api/input_impl/video_types.py +++ b/comfy_api/input_impl/video_types.py @@ -12,6 +12,46 @@ import torch from comfy_api.input import VideoInput from comfy_api.util import VideoContainer, VideoCodec, VideoComponents + +def container_to_output_format(container_format: str | None) -> str | None: + """ + A container's `format` may be a comma-separated list of formats. + E.g., iso container's `format` may be `mov,mp4,m4a,3gp,3g2,mj2`. + However, writing to a file/stream with `av.open` requires a single format, + or `None` to auto-detect. + """ + if not container_format: + return None # Auto-detect + + if "," not in container_format: + return container_format + + formats = container_format.split(",") + return formats[0] + + +def get_open_write_kwargs( + dest: str | io.BytesIO, container_format: str, to_format: str | None +) -> dict: + """Get kwargs for writing a `VideoFromFile` to a file/stream with `av.open`""" + open_kwargs = { + "mode": "w", + # If isobmff, preserve custom metadata tags (workflow, prompt, extra_pnginfo) + "options": {"movflags": "use_metadata_tags"}, + } + + is_write_to_buffer = isinstance(dest, io.BytesIO) + if is_write_to_buffer: + # Set output format explicitly, since it cannot be inferred from file extension + if to_format == VideoContainer.AUTO: + to_format = container_format.lower() + elif isinstance(to_format, str): + to_format = to_format.lower() + open_kwargs["format"] = container_to_output_format(to_format) + + return open_kwargs + + class VideoFromFile(VideoInput): """ Class representing video input from a file. @@ -89,7 +129,7 @@ class VideoFromFile(VideoInput): def save_to( self, - path: str, + path: str | io.BytesIO, format: VideoContainer = VideoContainer.AUTO, codec: VideoCodec = VideoCodec.AUTO, metadata: Optional[dict] = None @@ -116,7 +156,9 @@ class VideoFromFile(VideoInput): ) streams = container.streams - with av.open(path, mode='w', options={"movflags": "use_metadata_tags"}) as output_container: + + open_kwargs = get_open_write_kwargs(path, container_format, format) + with av.open(path, **open_kwargs) as output_container: # Copy over the original metadata for key, value in container.metadata.items(): if metadata is None or key not in metadata: diff --git a/tests-unit/comfy_api_test/input_impl_test.py b/tests-unit/comfy_api_test/input_impl_test.py new file mode 100644 index 000000000..5fc21a9a7 --- /dev/null +++ b/tests-unit/comfy_api_test/input_impl_test.py @@ -0,0 +1,91 @@ +import io +from comfy_api.input_impl.video_types import ( + container_to_output_format, + get_open_write_kwargs, +) +from comfy_api.util import VideoContainer + + +def test_container_to_output_format_empty_string(): + """Test that an empty string input returns None. `None` arg allows default auto-detection.""" + assert container_to_output_format("") is None + + +def test_container_to_output_format_none(): + """Test that None input returns None.""" + assert container_to_output_format(None) is None + + +def test_container_to_output_format_comma_separated(): + """Test that a comma-separated list returns a valid singular format from the list.""" + comma_separated_format = "mp4,mov,m4a" + output_format = container_to_output_format(comma_separated_format) + assert output_format in comma_separated_format + + +def test_container_to_output_format_single(): + """Test that a single format string (not comma-separated list) is returned as is.""" + assert container_to_output_format("mp4") == "mp4" + + +def test_get_open_write_kwargs_filepath_no_format(): + """Test that 'format' kwarg is NOT set when dest is a file path.""" + kwargs_auto = get_open_write_kwargs("output.mp4", "mp4", VideoContainer.AUTO) + assert "format" not in kwargs_auto, "Format should not be set for file paths (AUTO)" + + kwargs_specific = get_open_write_kwargs("output.avi", "mp4", "avi") + fail_msg = "Format should not be set for file paths (Specific)" + assert "format" not in kwargs_specific, fail_msg + + +def test_get_open_write_kwargs_base_options_mode(): + """Test basic kwargs for file path: mode and movflags.""" + kwargs = get_open_write_kwargs("output.mp4", "mp4", VideoContainer.AUTO) + assert kwargs["mode"] == "w", "mode should be set to write" + + fail_msg = "movflags should be set to preserve custom metadata tags" + assert "movflags" in kwargs["options"], fail_msg + assert kwargs["options"]["movflags"] == "use_metadata_tags", fail_msg + + +def test_get_open_write_kwargs_bytesio_auto_format(): + """Test kwargs for BytesIO dest with AUTO format.""" + dest = io.BytesIO() + container_fmt = "mov,mp4,m4a" + kwargs = get_open_write_kwargs(dest, container_fmt, VideoContainer.AUTO) + + assert kwargs["mode"] == "w" + assert kwargs["options"]["movflags"] == "use_metadata_tags" + + fail_msg = ( + "Format should be a valid format from the container's format list when AUTO" + ) + assert kwargs["format"] in container_fmt, fail_msg + + +def test_get_open_write_kwargs_bytesio_specific_format(): + """Test kwargs for BytesIO dest with a specific single format.""" + dest = io.BytesIO() + container_fmt = "avi" + to_fmt = VideoContainer.MP4 + kwargs = get_open_write_kwargs(dest, container_fmt, to_fmt) + + assert kwargs["mode"] == "w" + assert kwargs["options"]["movflags"] == "use_metadata_tags" + + fail_msg = "Format should be the specified format (lowercased) when output format is not AUTO" + assert kwargs["format"] == "mp4", fail_msg + + +def test_get_open_write_kwargs_bytesio_specific_format_list(): + """Test kwargs for BytesIO dest with a specific comma-separated format.""" + dest = io.BytesIO() + container_fmt = "avi" + to_fmt = "mov,mp4,m4a" # A format string that is a list + kwargs = get_open_write_kwargs(dest, container_fmt, to_fmt) + + assert kwargs["mode"] == "w" + assert kwargs["options"]["movflags"] == "use_metadata_tags" + + fail_msg = "Format should be a valid format from the specified format list when output format is not AUTO" + assert kwargs["format"] in to_fmt, fail_msg From 3e62c5513a5ab5311a1fb4c87ad787fdaad06ea2 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sun, 4 May 2025 20:27:23 -0700 Subject: [PATCH 0101/1073] make audio chunks contiguous before encoding (#7942) --- comfy_api/input_impl/video_types.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/comfy_api/input_impl/video_types.py b/comfy_api/input_impl/video_types.py index d0b0b36d2..ae48dbaa4 100644 --- a/comfy_api/input_impl/video_types.py +++ b/comfy_api/input_impl/video_types.py @@ -253,7 +253,12 @@ class VideoFromComponents(VideoInput): start = i * samples_per_frame end = start + samples_per_frame # TODO(Feature) - Add support for stereo audio - chunk = self.__components.audio['waveform'][0, 0, start:end].unsqueeze(0).numpy() + chunk = ( + self.__components.audio["waveform"][0, 0, start:end] + .unsqueeze(0) + .contiguous() + .numpy() + ) audio_frame = av.AudioFrame.from_ndarray(chunk, format='fltp', layout='mono') audio_frame.sample_rate = audio_sample_rate audio_frame.pts = i * samples_per_frame From d9c80a85e54ebdd5aaba3374d76a6e32e1988c97 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 5 May 2025 04:49:07 -0700 Subject: [PATCH 0102/1073] This should not be a warning. (#7946) --- app/custom_node_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/custom_node_manager.py b/app/custom_node_manager.py index 27d85d9ce..281febca9 100644 --- a/app/custom_node_manager.py +++ b/app/custom_node_manager.py @@ -127,8 +127,8 @@ class CustomNodeManager: if os.path.exists(workflows_dir): if folder_name != "example_workflows": - logging.warning( - "WARNING: Found example workflow folder '%s' for custom node '%s', consider renaming it to 'example_workflows'", + logging.debug( + "Found example workflow folder '%s' for custom node '%s', consider renaming it to 'example_workflows'", folder_name, module_name) webapp.add_routes( From 1271c4ef9df2b4eb037688da514f63e1bd8bd727 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Tue, 6 May 2025 03:23:00 -0500 Subject: [PATCH 0103/1073] More API Nodes (#7956) * Add Ideogram generate node. * Add staging api. * Add API_NODE and common error for missing auth token (#5) * Add Minimax Video Generation + Async Task queue polling example (#6) * [Minimax] Show video preview and embed workflow in ouput (#7) * Remove uv.lock * Remove polling operations. * Revert "Remove polling operations." * Update stubs. * Added Ideogram and Minimax back in. * Added initial BFL Flux 1.1 [pro] Ultra node (#11) * Add --comfy-api-base launch arg (#13) * Add instructions for staging development. (#14) * remove validation to make it easier to run against LAN copies of the API * Manually add BFL polling status response schema (#15) * Add function for uploading files. (#18) * Add Luma nodes (#16) * Refactor util functions (#20) * Add VIDEO type (#21) * Add rest of Luma node functionality (#19) * Fix image_luma_ref not working (#28) * [Bug] Remove duplicated option T2V-01 in MinimaxTextToVideoNode (#31) * Add utils to map from pydantic model fields to comfy node inputs (#30) * add veo2, bump av req (#32) * Add Recraft nodes (#29) * Add Kling Nodes (#12) * Add Camera Concepts (luma_concepts) to Luma Video nodes (#33) * Add Runway nodes (#17) * Convert Minimax node to use VIDEO output type (#34) * Standard `CATEGORY` system for api nodes (#35) * Set `Content-Type` header when uploading files (#36) * add better error propagation to veo2 (#37) * Add Realistic Image and Logo Raster styles for Recraft v3 (#38) * Fix runway image upload and progress polling (#39) * Fix image upload for Luma: only include `Content-Type` header field if it's set explicitly (#40) * Moved Luma nodes to nodes_luma.py (#47) * Moved Recraft nodes to nodes_recraft.py (#48) * Add Pixverse nodes (#46) * Move and fix BFL nodes to node_bfl.py (#49) * Move and edit Minimax node to nodes_minimax.py (#50) * Add Minimax Image to Video node + Cleanup (#51) * Add Recraft Text to Vector node, add Save SVG node to handle its output (#53) * Added pixverse_template support to Pixverse Text to Video node (#54) * Added Recraft Controls + Recraft Color RGB nodes (#57) * split remaining nodes out of nodes_api, make utility lib, refactor ideogram (#61) * Add types and doctstrings to utils file (#64) * Fix: `PollingOperation` progress bar update progress by absolute value (#65) * Use common download function in kling nodes module (#67) * Fix: Luma video nodes in `api nodes/image` category (#68) * Set request type explicitly (#66) * Add `control_after_generate` to all seed inputs (#69) * Fix bug: deleting `Content-Type` when property does not exist (#73) * Add preview to Save SVG node (#74) * change default poll interval (#76), rework veo2 * Add Pixverse and updated Kling types (#75) * Added Pixverse Image to VIdeo node (#77) * Add Pixverse Transition Video node (#79) * Proper ray-1-6 support as fix has been applied in backend (#80) * Added Recraft Style - Infinite Style Library node (#82) * add ideogram v3 (#83) * [Kling] Split Camera Control config to its own node (#81) * Add Pika i2v and t2v nodes (#52) * Temporary Fix for Runway (#87) * Added Stability Stable Image Ultra node (#86) * Remove Runway nodes (#88) * Fix: Prompt text can't be validated in Kling nodes when using primitive nodes (#90) * Fix: typo in node name "Stabiliy" => "Stability" (#91) * Add String (Multiline) node (#93) * Update Pika Duration and Resolution options (#94) * Change base branch to master. Not main. (#95) * Fix UploadRequest file_name param (#98) * Removed Infinite Style Library until later (#99) * fix ideogram style types (#100) * fix multi image return (#101) * add metadata saving to SVG (#102) * Bump templates version to include API node template workflows (#104) * Fix: `download_url_to_video_output` return type (#103) * fix 4o generation bug (#106) * Serve SVG files directly (#107) * Add a bunch of nodes, 3 ready to use, the rest waiting for endpoint support (#108) * Revert "Serve SVG files directly" (#111) * Expose 4 remaining Recraft nodes (#112) * [Kling] Add `Duration` and `Video ID` outputs (#105) * Fix: datamodel-codegen sets string#binary type to non-existent `bytes_aliased` variable (#114) * Fix: Dall-e 2 not setting request content-type dynamically (#113) * Default request timeout: one hour. (#116) * Add Kling nodes: camera control, start-end frame, lip-sync, video extend (#115) * Add 8 nodes - 4 BFL, 4 Stability (#117) * Fix error for Recraft ImageToImage error for nonexistent random_seed param (#118) * Add remaining Pika nodes (#119) * Make controls input work for Recraft Image to Image node (#120) * Use upstream PR: Support saving Comfy VIDEO type to buffer (#123) * Use Upstream PR: "Fix: Error creating video when sliced audio tensor chunks are non-c-contiguous" (#127) * Improve audio upload utils (#128) * Fix: Nested `AnyUrl` in request model cannot be serialized (Kling, Runway) (#129) * Show errors and API output URLs to the user (change log levels) (#131) * Fix: Luma I2I fails when weight is <=0.01 (#132) * Change category of `LumaConcepts` node from image to video (#133) * Fix: `image.shape` accessed before `image` is null-checked (#134) * Apply small fixes and most prompt validation (if needed to avoid API error) (#135) * Node name/category modifications (#140) * Add back Recraft Style - Infinite Style Library node (#141) * Fixed Kling: Check attributes of pydantic types. (#144) * Bump `comfyui-workflow-templates` version (#142) * [Kling] Print response data when error validating response (#146) * Fix: error validating Kling image response, trying to use `"key" in` on Pydantic class instance (#147) * [Kling] Fix: Correct/verify supported subset of input combos in Kling nodes (#149) * [Kling] Fix typo in node description (#150) * [Kling] Fix: CFG min/max not being enforced (#151) * Rebase launch-rebase (private) on prep-branch (public copy of master) (#153) * Bump templates version (#154) * Fix: Kling image gen nodes don't return entire batch when `n` > 1 (#152) * Remove pixverse_template from PixVerse Transition Video node (#155) * Invert image_weight value on Luma Image to Image node (#156) * Invert and resize mask for Ideogram V3 node to match masking conventions (#158) * [Kling] Fix: image generation nodes not returning Tuple (#159) * [Bug] [Kling] Fix Kling camera control (#161) * Kling Image Gen v2 + improve node descriptions for Flux/OpenAI (#160) * [Kling] Don't return video_id from dual effect video (#162) * Bump frontend to 1.18.8 (#163) * Use 3.9 compat syntax (#164) * Use Python 3.10 * add example env var * Update templates to 0.1.11 * Bump frontend to 1.18.9 --------- Co-authored-by: Robin Huang Co-authored-by: Christian Byrne Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com> --- .github/workflows/test-launch.yml | 2 +- .github/workflows/update-api-stubs.yml | 13 +- .gitignore | 3 + comfy/cli_args.py | 7 + comfy_api_nodes/README.md | 41 + comfy_api_nodes/apinode_utils.py | 575 +++ comfy_api_nodes/apis/PixverseController.py | 4 +- comfy_api_nodes/apis/PixverseDto.py | 12 +- comfy_api_nodes/apis/__init__.py | 3857 ++++++++++++++++- comfy_api_nodes/apis/bfl_api.py | 156 + comfy_api_nodes/apis/client.py | 373 +- comfy_api_nodes/apis/luma_api.py | 253 ++ comfy_api_nodes/apis/pixverse_api.py | 146 + comfy_api_nodes/apis/recraft_api.py | 263 ++ comfy_api_nodes/apis/stability_api.py | 127 + comfy_api_nodes/mapper_utils.py | 116 + comfy_api_nodes/nodes_api.py | 449 -- comfy_api_nodes/nodes_bfl.py | 906 ++++ comfy_api_nodes/nodes_ideogram.py | 777 ++++ comfy_api_nodes/nodes_kling.py | 1563 +++++++ comfy_api_nodes/nodes_luma.py | 702 +++ comfy_api_nodes/nodes_minimax.py | 306 ++ comfy_api_nodes/nodes_openai.py | 487 +++ comfy_api_nodes/nodes_pika.py | 749 ++++ comfy_api_nodes/nodes_pixverse.py | 492 +++ comfy_api_nodes/nodes_recraft.py | 1217 ++++++ comfy_api_nodes/nodes_stability.py | 609 +++ comfy_api_nodes/nodes_veo2.py | 283 ++ comfy_api_nodes/redocly-dev.yaml | 10 + comfy_api_nodes/redocly.yaml | 10 + comfy_extras/nodes_primitive.py | 17 + nodes.py | 12 +- requirements.txt | 4 +- .../comfy_api_nodes_test/mapper_utils_test.py | 297 ++ 34 files changed, 14101 insertions(+), 737 deletions(-) create mode 100644 comfy_api_nodes/README.md create mode 100644 comfy_api_nodes/apinode_utils.py create mode 100644 comfy_api_nodes/apis/bfl_api.py create mode 100644 comfy_api_nodes/apis/luma_api.py create mode 100644 comfy_api_nodes/apis/pixverse_api.py create mode 100644 comfy_api_nodes/apis/recraft_api.py create mode 100644 comfy_api_nodes/apis/stability_api.py create mode 100644 comfy_api_nodes/mapper_utils.py delete mode 100644 comfy_api_nodes/nodes_api.py create mode 100644 comfy_api_nodes/nodes_bfl.py create mode 100644 comfy_api_nodes/nodes_ideogram.py create mode 100644 comfy_api_nodes/nodes_kling.py create mode 100644 comfy_api_nodes/nodes_luma.py create mode 100644 comfy_api_nodes/nodes_minimax.py create mode 100644 comfy_api_nodes/nodes_openai.py create mode 100644 comfy_api_nodes/nodes_pika.py create mode 100644 comfy_api_nodes/nodes_pixverse.py create mode 100644 comfy_api_nodes/nodes_recraft.py create mode 100644 comfy_api_nodes/nodes_stability.py create mode 100644 comfy_api_nodes/nodes_veo2.py create mode 100644 comfy_api_nodes/redocly-dev.yaml create mode 100644 comfy_api_nodes/redocly.yaml create mode 100644 tests-unit/comfy_api_nodes_test/mapper_utils_test.py diff --git a/.github/workflows/test-launch.yml b/.github/workflows/test-launch.yml index c56283c2d..1735fd83b 100644 --- a/.github/workflows/test-launch.yml +++ b/.github/workflows/test-launch.yml @@ -17,7 +17,7 @@ jobs: path: "ComfyUI" - uses: actions/setup-python@v4 with: - python-version: '3.9' + python-version: '3.10' - name: Install requirements run: | python -m pip install --upgrade pip diff --git a/.github/workflows/update-api-stubs.yml b/.github/workflows/update-api-stubs.yml index 2ae99b673..c99ec9fc1 100644 --- a/.github/workflows/update-api-stubs.yml +++ b/.github/workflows/update-api-stubs.yml @@ -22,10 +22,19 @@ jobs: run: | python -m pip install --upgrade pip pip install 'datamodel-code-generator[http]' + npm install @redocly/cli + + - name: Download OpenAPI spec + run: | + curl -o openapi.yaml https://api.comfy.org/openapi + + - name: Filter OpenAPI spec with Redocly + run: | + npx @redocly/cli bundle openapi.yaml --output filtered-openapi.yaml --config comfy_api_nodes/redocly.yaml --remove-unused-components - name: Generate API models run: | - datamodel-codegen --use-subclass-enum --url https://api.comfy.org/openapi --output comfy_api_nodes/apis --output-model-type pydantic_v2.BaseModel + datamodel-codegen --use-subclass-enum --input filtered-openapi.yaml --output comfy_api_nodes/apis --output-model-type pydantic_v2.BaseModel - name: Check for changes id: git-check @@ -44,4 +53,4 @@ jobs: Generated automatically by the a Github workflow. branch: update-api-stubs delete-branch: true - base: main + base: master diff --git a/.gitignore b/.gitignore index 61881b8a4..4e8cea71e 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,6 @@ venv/ *.log web_custom_versions/ .DS_Store +openapi.yaml +filtered-openapi.yaml +uv.lock diff --git a/comfy/cli_args.py b/comfy/cli_args.py index f89a7aab4..ef5ab6277 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -192,6 +192,13 @@ parser.add_argument("--user-directory", type=is_valid_directory, default=None, h parser.add_argument("--enable-compress-response-body", action="store_true", help="Enable compressing response body.") +parser.add_argument( + "--comfy-api-base", + type=str, + default="https://api.comfy.org", + help="Set the base URL for the ComfyUI API. (default: https://api.comfy.org)", +) + if comfy.options.args_parsing: args = parser.parse_args() else: diff --git a/comfy_api_nodes/README.md b/comfy_api_nodes/README.md new file mode 100644 index 000000000..e2633a769 --- /dev/null +++ b/comfy_api_nodes/README.md @@ -0,0 +1,41 @@ +# ComfyUI API Nodes + +## Introduction + +Below are a collection of nodes that work by calling external APIs. More information available in our [docs](https://docs.comfy.org/tutorials/api-nodes/overview#api-nodes). + +## Development + +While developing, you should be testing against the Staging environment. To test against staging: + +**Install ComfyUI_frontend** + +Follow the instructions [here](https://github.com/Comfy-Org/ComfyUI_frontend) to start the frontend server. By default, it will connect to Staging authentication. + +> **Hint:** If you use --front-end-version argument for ComfyUI, it will use production authentication. + +```bash +python run main.py --comfy-api-base https://stagingapi.comfy.org +``` + +API stubs are generated through automatic codegen tools from OpenAPI definitions. Since the Comfy Org OpenAPI definition contains many things from the Comfy Registry as well, we use redocly/cli to filter out only the paths relevant for API nodes. + +### Redocly Instructions + +**Tip** +When developing locally, use the `redocly-dev.yaml` file to generate pydantic models. This lets you use stubs for APIs that are not marked `Released` yet. + +Before your API node PR merges, make sure to add the `Released` tag to the `openapi.yaml` file and test in staging. + +```bash +# Download the OpenAPI file from prod server. +curl -o openapi.yaml https://stagingapi.comfy.org/openapi + +# Filter out unneeded API definitions. +npm install -g @redocly/cli +redocly bundle openapi.yaml --output filtered-openapi.yaml --config comfy_api_nodes/redocly-dev.yaml --remove-unused-components + +# Generate the pydantic datamodels for validation. +datamodel-codegen --use-subclass-enum --field-constraints --strict-types bytes --input filtered-openapi.yaml --output comfy_api_nodes/apis/__init__.py --output-model-type pydantic_v2.BaseModel + +``` diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py new file mode 100644 index 000000000..bd3b8908b --- /dev/null +++ b/comfy_api_nodes/apinode_utils.py @@ -0,0 +1,575 @@ +import io +import logging +from typing import Optional +from comfy.utils import common_upscale +from comfy_api.input_impl import VideoFromFile +from comfy_api.util import VideoContainer, VideoCodec +from comfy_api.input.video_types import VideoInput +from comfy_api.input.basic_types import AudioInput +from comfy_api_nodes.apis.client import ( + ApiClient, + ApiEndpoint, + HttpMethod, + SynchronousOperation, + UploadRequest, + UploadResponse, +) + + +import numpy as np +from PIL import Image +import requests +import torch +import math +import base64 +import uuid +from io import BytesIO +import av + + +def download_url_to_video_output(video_url: str, timeout: int = None) -> VideoFromFile: + """Downloads a video from a URL and returns a `VIDEO` output. + + Args: + video_url: The URL of the video to download. + + Returns: + A Comfy node `VIDEO` output. + """ + video_io = download_url_to_bytesio(video_url, timeout) + if video_io is None: + error_msg = f"Failed to download video from {video_url}" + logging.error(error_msg) + raise ValueError(error_msg) + return VideoFromFile(video_io) + + +def downscale_image_tensor(image, total_pixels=1536 * 1024) -> torch.Tensor: + """Downscale input image tensor to roughly the specified total pixels.""" + samples = image.movedim(-1, 1) + total = int(total_pixels) + scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) + if scale_by >= 1: + return image + width = round(samples.shape[3] * scale_by) + height = round(samples.shape[2] * scale_by) + + s = common_upscale(samples, width, height, "lanczos", "disabled") + s = s.movedim(1, -1) + return s + + +def validate_and_cast_response(response, timeout: int = None) -> torch.Tensor: + """Validates and casts a response to a torch.Tensor. + + Args: + response: The response to validate and cast. + timeout: Request timeout in seconds. Defaults to None (no timeout). + + Returns: + A torch.Tensor representing the image (1, H, W, C). + + Raises: + ValueError: If the response is not valid. + """ + # validate raw JSON response + data = response.data + if not data or len(data) == 0: + raise ValueError("No images returned from API endpoint") + + # Initialize list to store image tensors + image_tensors: list[torch.Tensor] = [] + + # Process each image in the data array + for image_data in data: + image_url = image_data.url + b64_data = image_data.b64_json + + if not image_url and not b64_data: + raise ValueError("No image was generated in the response") + + if b64_data: + img_data = base64.b64decode(b64_data) + img = Image.open(io.BytesIO(img_data)) + + elif image_url: + img_response = requests.get(image_url, timeout=timeout) + if img_response.status_code != 200: + raise ValueError("Failed to download the image") + img = Image.open(io.BytesIO(img_response.content)) + + img = img.convert("RGBA") + + # Convert to numpy array, normalize to float32 between 0 and 1 + img_array = np.array(img).astype(np.float32) / 255.0 + img_tensor = torch.from_numpy(img_array) + + # Add to list of tensors + image_tensors.append(img_tensor) + + return torch.stack(image_tensors, dim=0) + + +def validate_aspect_ratio( + aspect_ratio: str, + minimum_ratio: float, + maximum_ratio: float, + minimum_ratio_str: str, + maximum_ratio_str: str, +) -> float: + """Validates and casts an aspect ratio string to a float. + + Args: + aspect_ratio: The aspect ratio string to validate. + minimum_ratio: The minimum aspect ratio. + maximum_ratio: The maximum aspect ratio. + minimum_ratio_str: The minimum aspect ratio string. + maximum_ratio_str: The maximum aspect ratio string. + + Returns: + The validated and cast aspect ratio. + + Raises: + Exception: If the aspect ratio is not valid. + """ + # get ratio values + numbers = aspect_ratio.split(":") + if len(numbers) != 2: + raise TypeError( + f"Aspect ratio must be in the format X:Y, such as 16:9, but was {aspect_ratio}." + ) + try: + numerator = int(numbers[0]) + denominator = int(numbers[1]) + except ValueError as exc: + raise TypeError( + f"Aspect ratio must contain numbers separated by ':', such as 16:9, but was {aspect_ratio}." + ) from exc + calculated_ratio = numerator / denominator + # if not close to minimum and maximum, check bounds + if not math.isclose(calculated_ratio, minimum_ratio) or not math.isclose( + calculated_ratio, maximum_ratio + ): + if calculated_ratio < minimum_ratio: + raise TypeError( + f"Aspect ratio cannot reduce to any less than {minimum_ratio_str} ({minimum_ratio}), but was {aspect_ratio} ({calculated_ratio})." + ) + elif calculated_ratio > maximum_ratio: + raise TypeError( + f"Aspect ratio cannot reduce to any greater than {maximum_ratio_str} ({maximum_ratio}), but was {aspect_ratio} ({calculated_ratio})." + ) + return aspect_ratio + + +def mimetype_to_extension(mime_type: str) -> str: + """Converts a MIME type to a file extension.""" + return mime_type.split("/")[-1].lower() + + +def download_url_to_bytesio(url: str, timeout: int = None) -> BytesIO: + """Downloads content from a URL using requests and returns it as BytesIO. + + Args: + url: The URL to download. + timeout: Request timeout in seconds. Defaults to None (no timeout). + + Returns: + BytesIO object containing the downloaded content. + """ + response = requests.get(url, stream=True, timeout=timeout) + response.raise_for_status() # Raises HTTPError for bad responses (4XX or 5XX) + return BytesIO(response.content) + + +def bytesio_to_image_tensor(image_bytesio: BytesIO, mode: str = "RGBA") -> torch.Tensor: + """Converts image data from BytesIO to a torch.Tensor. + + Args: + image_bytesio: BytesIO object containing the image data. + mode: The PIL mode to convert the image to (e.g., "RGB", "RGBA"). + + Returns: + A torch.Tensor representing the image (1, H, W, C). + + Raises: + PIL.UnidentifiedImageError: If the image data cannot be identified. + ValueError: If the specified mode is invalid. + """ + image = Image.open(image_bytesio) + image = image.convert(mode) + image_array = np.array(image).astype(np.float32) / 255.0 + return torch.from_numpy(image_array).unsqueeze(0) + + +def download_url_to_image_tensor(url: str, timeout: int = None) -> torch.Tensor: + """Downloads an image from a URL and returns a [B, H, W, C] tensor.""" + image_bytesio = download_url_to_bytesio(url, timeout) + return bytesio_to_image_tensor(image_bytesio) + +def process_image_response(response: requests.Response) -> torch.Tensor: + """Uses content from a Response object and converts it to a torch.Tensor""" + return bytesio_to_image_tensor(BytesIO(response.content)) + + +def _tensor_to_pil(image: torch.Tensor, total_pixels: int = 2048 * 2048) -> Image.Image: + """Converts a single torch.Tensor image [H, W, C] to a PIL Image, optionally downscaling.""" + if len(image.shape) > 3: + image = image[0] + # TODO: remove alpha if not allowed and present + input_tensor = image.cpu() + input_tensor = downscale_image_tensor( + input_tensor.unsqueeze(0), total_pixels=total_pixels + ).squeeze() + image_np = (input_tensor.numpy() * 255).astype(np.uint8) + img = Image.fromarray(image_np) + return img + + +def _pil_to_bytesio(img: Image.Image, mime_type: str = "image/png") -> BytesIO: + """Converts a PIL Image to a BytesIO object.""" + if not mime_type: + mime_type = "image/png" + + img_byte_arr = io.BytesIO() + # Derive PIL format from MIME type (e.g., 'image/png' -> 'PNG') + pil_format = mime_type.split("/")[-1].upper() + if pil_format == "JPG": + pil_format = "JPEG" + img.save(img_byte_arr, format=pil_format) + img_byte_arr.seek(0) + return img_byte_arr + + +def tensor_to_bytesio( + image: torch.Tensor, + name: Optional[str] = None, + total_pixels: int = 2048 * 2048, + mime_type: str = "image/png", +) -> BytesIO: + """Converts a torch.Tensor image to a named BytesIO object. + + Args: + image: Input torch.Tensor image. + name: Optional filename for the BytesIO object. + total_pixels: Maximum total pixels for potential downscaling. + mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4'). + + Returns: + Named BytesIO object containing the image data. + """ + if not mime_type: + mime_type = "image/png" + + pil_image = _tensor_to_pil(image, total_pixels=total_pixels) + img_binary = _pil_to_bytesio(pil_image, mime_type=mime_type) + img_binary.name = ( + f"{name if name else uuid.uuid4()}.{mimetype_to_extension(mime_type)}" + ) + return img_binary + + +def tensor_to_base64_string( + image_tensor: torch.Tensor, + total_pixels: int = 2048 * 2048, + mime_type: str = "image/png", +) -> str: + """Convert [B, H, W, C] or [H, W, C] tensor to a base64 string. + + Args: + image_tensor: Input torch.Tensor image. + total_pixels: Maximum total pixels for potential downscaling. + mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4'). + + Returns: + Base64 encoded string of the image. + """ + pil_image = _tensor_to_pil(image_tensor, total_pixels=total_pixels) + img_byte_arr = _pil_to_bytesio(pil_image, mime_type=mime_type) + img_bytes = img_byte_arr.getvalue() + # Encode bytes to base64 string + base64_encoded_string = base64.b64encode(img_bytes).decode("utf-8") + return base64_encoded_string + + +def tensor_to_data_uri( + image_tensor: torch.Tensor, + total_pixels: int = 2048 * 2048, + mime_type: str = "image/png", +) -> str: + """Converts a tensor image to a Data URI string. + + Args: + image_tensor: Input torch.Tensor image. + total_pixels: Maximum total pixels for potential downscaling. + mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp'). + + Returns: + Data URI string (e.g., 'data:image/png;base64,...'). + """ + base64_string = tensor_to_base64_string(image_tensor, total_pixels, mime_type) + return f"data:{mime_type};base64,{base64_string}" + + +def upload_file_to_comfyapi( + file_bytes_io: BytesIO, + filename: str, + upload_mime_type: str, + auth_token: Optional[str] = None, +) -> str: + """ + Uploads a single file to ComfyUI API and returns its download URL. + + Args: + file_bytes_io: BytesIO object containing the file data. + filename: The filename of the file. + upload_mime_type: MIME type of the file. + auth_token: Optional authentication token. + + Returns: + The download URL for the uploaded file. + """ + request_object = UploadRequest(file_name=filename, content_type=upload_mime_type) + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/customers/storage", + method=HttpMethod.POST, + request_model=UploadRequest, + response_model=UploadResponse, + ), + request=request_object, + auth_token=auth_token, + ) + + response: UploadResponse = operation.execute() + upload_response = ApiClient.upload_file( + response.upload_url, file_bytes_io, content_type=upload_mime_type + ) + upload_response.raise_for_status() + + return response.download_url + + +def upload_video_to_comfyapi( + video: VideoInput, + auth_token: Optional[str] = None, + container: VideoContainer = VideoContainer.MP4, + codec: VideoCodec = VideoCodec.H264, + max_duration: Optional[int] = None, +) -> str: + """ + Uploads a single video to ComfyUI API and returns its download URL. + Uses the specified container and codec for saving the video before upload. + + Args: + video: VideoInput object (Comfy VIDEO type). + auth_token: Optional authentication token. + container: The video container format to use (default: MP4). + codec: The video codec to use (default: H264). + max_duration: Optional maximum duration of the video in seconds. If the video is longer than this, an error will be raised. + + Returns: + The download URL for the uploaded video file. + """ + if max_duration is not None: + try: + actual_duration = video.duration_seconds + if actual_duration is not None and actual_duration > max_duration: + raise ValueError( + f"Video duration ({actual_duration:.2f}s) exceeds the maximum allowed ({max_duration}s)." + ) + except Exception as e: + logging.error(f"Error getting video duration: {e}") + raise ValueError(f"Could not verify video duration from source: {e}") from e + + upload_mime_type = f"video/{container.value.lower()}" + filename = f"uploaded_video.{container.value.lower()}" + + # Convert VideoInput to BytesIO using specified container/codec + video_bytes_io = io.BytesIO() + video.save_to(video_bytes_io, format=container, codec=codec) + video_bytes_io.seek(0) + + return upload_file_to_comfyapi( + video_bytes_io, filename, upload_mime_type, auth_token + ) + + +def audio_tensor_to_contiguous_ndarray(waveform: torch.Tensor) -> np.ndarray: + """ + Prepares audio waveform for av library by converting to a contiguous numpy array. + + Args: + waveform: a tensor of shape (1, channels, samples) derived from a Comfy `AUDIO` type. + + Returns: + Contiguous numpy array of the audio waveform. If the audio was batched, + the first item is taken. + """ + if waveform.ndim != 3 or waveform.shape[0] != 1: + raise ValueError("Expected waveform tensor shape (1, channels, samples)") + + # If batch is > 1, take first item + if waveform.shape[0] > 1: + waveform = waveform[0] + + # Prepare for av: remove batch dim, move to CPU, make contiguous, convert to numpy array + audio_data_np = waveform.squeeze(0).cpu().contiguous().numpy() + if audio_data_np.dtype != np.float32: + audio_data_np = audio_data_np.astype(np.float32) + + return audio_data_np + + +def audio_ndarray_to_bytesio( + audio_data_np: np.ndarray, + sample_rate: int, + container_format: str = "mp4", + codec_name: str = "aac", +) -> BytesIO: + """ + Encodes a numpy array of audio data into a BytesIO object. + """ + audio_bytes_io = io.BytesIO() + with av.open(audio_bytes_io, mode="w", format=container_format) as output_container: + audio_stream = output_container.add_stream(codec_name, rate=sample_rate) + frame = av.AudioFrame.from_ndarray( + audio_data_np, + format="fltp", + layout="stereo" if audio_data_np.shape[0] > 1 else "mono", + ) + frame.sample_rate = sample_rate + frame.pts = 0 + + for packet in audio_stream.encode(frame): + output_container.mux(packet) + + # Flush stream + for packet in audio_stream.encode(None): + output_container.mux(packet) + + audio_bytes_io.seek(0) + return audio_bytes_io + + +def upload_audio_to_comfyapi( + audio: AudioInput, + auth_token: Optional[str] = None, + container_format: str = "mp4", + codec_name: str = "aac", + mime_type: str = "audio/mp4", + filename: str = "uploaded_audio.mp4", +) -> str: + """ + Uploads a single audio input to ComfyUI API and returns its download URL. + Encodes the raw waveform into the specified format before uploading. + + Args: + audio: a Comfy `AUDIO` type (contains waveform tensor and sample_rate) + auth_token: Optional authentication token. + + Returns: + The download URL for the uploaded audio file. + """ + sample_rate: int = audio["sample_rate"] + waveform: torch.Tensor = audio["waveform"] + audio_data_np = audio_tensor_to_contiguous_ndarray(waveform) + audio_bytes_io = audio_ndarray_to_bytesio( + audio_data_np, sample_rate, container_format, codec_name + ) + + return upload_file_to_comfyapi(audio_bytes_io, filename, mime_type, auth_token) + + +def upload_images_to_comfyapi( + image: torch.Tensor, max_images=8, auth_token=None, mime_type: Optional[str] = None +) -> list[str]: + """ + Uploads images to ComfyUI API and returns download URLs. + To upload multiple images, stack them in the batch dimension first. + + Args: + image: Input torch.Tensor image. + max_images: Maximum number of images to upload. + auth_token: Optional authentication token. + mime_type: Optional MIME type for the image. + """ + # if batch, try to upload each file if max_images is greater than 0 + idx_image = 0 + download_urls: list[str] = [] + is_batch = len(image.shape) > 3 + batch_length = 1 + if is_batch: + batch_length = image.shape[0] + while True: + curr_image = image + if len(image.shape) > 3: + curr_image = image[idx_image] + # get BytesIO version of image + img_binary = tensor_to_bytesio(curr_image, mime_type=mime_type) + # first, request upload/download urls from comfy API + if not mime_type: + request_object = UploadRequest(file_name=img_binary.name) + else: + request_object = UploadRequest( + file_name=img_binary.name, content_type=mime_type + ) + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/customers/storage", + method=HttpMethod.POST, + request_model=UploadRequest, + response_model=UploadResponse, + ), + request=request_object, + auth_token=auth_token, + ) + response = operation.execute() + + upload_response = ApiClient.upload_file( + response.upload_url, img_binary, content_type=mime_type + ) + # verify success + try: + upload_response.raise_for_status() + except requests.exceptions.HTTPError as e: + raise ValueError(f"Could not upload one or more images: {e}") from e + # add download_url to list + download_urls.append(response.download_url) + + idx_image += 1 + # stop uploading additional files if done + if is_batch and max_images > 0: + if idx_image >= max_images: + break + if idx_image >= batch_length: + break + return download_urls + + +def resize_mask_to_image(mask: torch.Tensor, image: torch.Tensor, + upscale_method="nearest-exact", crop="disabled", + allow_gradient=True, add_channel_dim=False): + """ + Resize mask to be the same dimensions as an image, while maintaining proper format for API calls. + """ + _, H, W, _ = image.shape + mask = mask.unsqueeze(-1) + mask = mask.movedim(-1,1) + mask = common_upscale(mask, width=W, height=H, upscale_method=upscale_method, crop=crop) + mask = mask.movedim(1,-1) + if not add_channel_dim: + mask = mask.squeeze(-1) + if not allow_gradient: + mask = (mask > 0.5).float() + return mask + + +def validate_string(string: str, strip_whitespace=True, field_name="prompt", min_length=None, max_length=None): + if strip_whitespace: + string = string.strip() + if min_length and len(string) < min_length: + raise Exception(f"Field '{field_name}' cannot be shorter than {min_length} characters; was {len(string)} characters long.") + if max_length and len(string) > max_length: + raise Exception(f" Field '{field_name} cannot be longer than {max_length} characters; was {len(string)} characters long.") + if not string: + raise Exception(f"Field '{field_name}' cannot be empty.") diff --git a/comfy_api_nodes/apis/PixverseController.py b/comfy_api_nodes/apis/PixverseController.py index 29a3ab33b..310c0f546 100644 --- a/comfy_api_nodes/apis/PixverseController.py +++ b/comfy_api_nodes/apis/PixverseController.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: -# filename: https://api.comfy.org/openapi -# timestamp: 2025-04-23T15:56:33+00:00 +# filename: filtered-openapi.yaml +# timestamp: 2025-04-29T23:44:54+00:00 from __future__ import annotations diff --git a/comfy_api_nodes/apis/PixverseDto.py b/comfy_api_nodes/apis/PixverseDto.py index 399512214..323c38e96 100644 --- a/comfy_api_nodes/apis/PixverseDto.py +++ b/comfy_api_nodes/apis/PixverseDto.py @@ -1,12 +1,12 @@ # generated by datamodel-codegen: -# filename: https://api.comfy.org/openapi -# timestamp: 2025-04-23T15:56:33+00:00 +# filename: filtered-openapi.yaml +# timestamp: 2025-04-29T23:44:54+00:00 from __future__ import annotations from typing import Optional -from pydantic import BaseModel, Field, constr +from pydantic import BaseModel, Field class V2OpenAPII2VResp(BaseModel): @@ -30,10 +30,10 @@ class V2OpenAPIT2VReq(BaseModel): description='Motion mode (normal, fast, --fast only available when duration=5; --quality=1080p does not support fast)', examples=['normal'], ) - negative_prompt: Optional[constr(max_length=2048)] = Field( - None, description='Negative prompt\n' + negative_prompt: Optional[str] = Field( + None, description='Negative prompt\n', max_length=2048 ) - prompt: constr(max_length=2048) = Field(..., description='Prompt') + prompt: str = Field(..., description='Prompt', max_length=2048) quality: str = Field( ..., description='Video quality ("360p"(Turbo model), "540p", "720p", "1080p")', diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index e7ea9b332..aa1c4ce0b 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -1,127 +1,455 @@ # generated by datamodel-codegen: -# filename: https://api.comfy.org/openapi -# timestamp: 2025-04-23T15:56:33+00:00 +# filename: filtered-openapi.yaml +# timestamp: 2025-05-04T04:12:39+00:00 from __future__ import annotations from datetime import datetime from enum import Enum -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Literal, Optional, Union +from uuid import UUID -from pydantic import AnyUrl, BaseModel, Field, confloat, conint - -class Customer(BaseModel): - createdAt: Optional[datetime] = Field( - None, description='The date and time the user was created' - ) - email: Optional[str] = Field(None, description='The email address for this user') - id: str = Field(..., description='The firebase UID of the user') - name: Optional[str] = Field(None, description='The name for this user') - updatedAt: Optional[datetime] = Field( - None, description='The date and time the user was last updated' - ) +from pydantic import AnyUrl, BaseModel, Field, RootModel, StrictBytes -class Error(BaseModel): - details: Optional[List[str]] = Field( +class PersonalAccessToken(BaseModel): + id: Optional[UUID] = Field(None, description='Unique identifier for the GitCommit') + name: Optional[str] = Field( None, - description='Optional detailed information about the error or hints for resolving it.', + description='Required. The name of the token. Can be a simple description.', ) - message: Optional[str] = Field( - None, description='A clear and concise description of the error.' + description: Optional[str] = Field( + None, + description="Optional. A more detailed description of the token's intended use.", ) + createdAt: Optional[datetime] = Field( + None, description='[Output Only]The date and time the token was created.' + ) + token: Optional[str] = Field( + None, + description='[Output Only]. The personal access token. Only returned during creation.', + ) + + +class GitCommitSummary(BaseModel): + commit_hash: Optional[str] = Field(None, description='The hash of the commit') + commit_name: Optional[str] = Field(None, description='The name of the commit') + branch_name: Optional[str] = Field( + None, description='The branch where the commit was made' + ) + author: Optional[str] = Field(None, description='The author of the commit') + timestamp: Optional[datetime] = Field( + None, description='The timestamp when the commit was made' + ) + status_summary: Optional[Dict[str, str]] = Field( + None, description='A map of operating system to status pairs' + ) + + +class User(BaseModel): + id: Optional[str] = Field(None, description='The unique id for this user.') + email: Optional[str] = Field(None, description='The email address for this user.') + name: Optional[str] = Field(None, description='The name for this user.') + isApproved: Optional[bool] = Field( + None, description='Indicates if the user is approved.' + ) + isAdmin: Optional[bool] = Field( + None, description='Indicates if the user has admin privileges.' + ) + + +class PublisherUser(BaseModel): + id: Optional[str] = Field(None, description='The unique id for this user.') + email: Optional[str] = Field(None, description='The email address for this user.') + name: Optional[str] = Field(None, description='The name for this user.') class ErrorResponse(BaseModel): error: str message: str + +class StorageFile(BaseModel): + id: Optional[UUID] = Field( + None, description='Unique identifier for the storage file' + ) + file_path: Optional[str] = Field(None, description='Path to the file in storage') + public_url: Optional[str] = Field(None, description='Public URL') + + +class PublisherMember(BaseModel): + id: Optional[str] = Field( + None, description='The unique identifier for the publisher member.' + ) + user: Optional[PublisherUser] = Field( + None, description='The user associated with this publisher member.' + ) + role: Optional[str] = Field( + None, description='The role of the user in the publisher.' + ) + + +class ComfyNode(BaseModel): + comfy_node_name: Optional[str] = Field( + None, description='Unique identifier for the node' + ) + category: Optional[str] = Field( + None, + description='UI category where the node is listed, used for grouping nodes.', + ) + description: Optional[str] = Field( + None, description="Brief description of the node's functionality or purpose." + ) + input_types: Optional[str] = Field(None, description='Defines input parameters') + deprecated: Optional[bool] = Field( + None, + description='Indicates if the node is deprecated. Deprecated nodes are hidden in the UI.', + ) + experimental: Optional[bool] = Field( + None, + description='Indicates if the node is experimental, subject to changes or removal.', + ) + output_is_list: Optional[List[bool]] = Field( + None, description='Boolean values indicating if each output is a list.' + ) + return_names: Optional[str] = Field( + None, description='Names of the outputs for clarity in workflows.' + ) + return_types: Optional[str] = Field( + None, description='Specifies the types of outputs produced by the node.' + ) + function: Optional[str] = Field( + None, description='Name of the entry-point function to execute the node.' + ) + + +class ComfyNodeCloudBuildInfo(BaseModel): + project_id: Optional[str] = None + project_number: Optional[str] = None + location: Optional[str] = None + build_id: Optional[str] = None + + +class Error(BaseModel): + message: Optional[str] = Field( + None, description='A clear and concise description of the error.' + ) + details: Optional[List[str]] = Field( + None, + description='Optional detailed information about the error or hints for resolving it.', + ) + + +class NodeVersionUpdateRequest(BaseModel): + changelog: Optional[str] = Field( + None, description='The changelog describing the version changes.' + ) + deprecated: Optional[bool] = Field( + None, description='Whether the version is deprecated.' + ) + + +class NodeStatus(str, Enum): + NodeStatusActive = 'NodeStatusActive' + NodeStatusDeleted = 'NodeStatusDeleted' + NodeStatusBanned = 'NodeStatusBanned' + + +class NodeVersionStatus(str, Enum): + NodeVersionStatusActive = 'NodeVersionStatusActive' + NodeVersionStatusDeleted = 'NodeVersionStatusDeleted' + NodeVersionStatusBanned = 'NodeVersionStatusBanned' + NodeVersionStatusPending = 'NodeVersionStatusPending' + NodeVersionStatusFlagged = 'NodeVersionStatusFlagged' + + +class PublisherStatus(str, Enum): + PublisherStatusActive = 'PublisherStatusActive' + PublisherStatusBanned = 'PublisherStatusBanned' + + +class WorkflowRunStatus(str, Enum): + WorkflowRunStatusStarted = 'WorkflowRunStatusStarted' + WorkflowRunStatusFailed = 'WorkflowRunStatusFailed' + WorkflowRunStatusCompleted = 'WorkflowRunStatusCompleted' + + +class MachineStats(BaseModel): + machine_name: Optional[str] = Field(None, description='Name of the machine.') + os_version: Optional[str] = Field( + None, description='The operating system version. eg. Ubuntu Linux 20.04' + ) + gpu_type: Optional[str] = Field( + None, description='The GPU type. eg. NVIDIA Tesla K80' + ) + cpu_capacity: Optional[str] = Field(None, description='Total CPU on the machine.') + initial_cpu: Optional[str] = Field( + None, description='Initial CPU available before the job starts.' + ) + memory_capacity: Optional[str] = Field( + None, description='Total memory on the machine.' + ) + initial_ram: Optional[str] = Field( + None, description='Initial RAM available before the job starts.' + ) + vram_time_series: Optional[Dict[str, Any]] = Field( + None, description='Time series of VRAM usage.' + ) + disk_capacity: Optional[str] = Field( + None, description='Total disk capacity on the machine.' + ) + initial_disk: Optional[str] = Field( + None, description='Initial disk available before the job starts.' + ) + pip_freeze: Optional[str] = Field(None, description='The pip freeze output') + + +class Customer(BaseModel): + id: str = Field(..., description='The firebase UID of the user') + email: Optional[str] = Field(None, description='The email address for this user') + name: Optional[str] = Field(None, description='The name for this user') + createdAt: Optional[datetime] = Field( + None, description='The date and time the user was created' + ) + updatedAt: Optional[datetime] = Field( + None, description='The date and time the user was last updated' + ) + + +class MagicPrompt(str, Enum): + ON = 'ON' + OFF = 'OFF' + + +class ColorPalette(BaseModel): + name: str = Field(..., description='Name of the color palette', examples=['PASTEL']) + + +class StyleCode(RootModel[str]): + root: str = Field(..., pattern='^[0-9A-Fa-f]{8}$') + + +class StyleType(str, Enum): + GENERAL = 'GENERAL' + + +class IdeogramColorPalette1(BaseModel): + name: str = Field(..., description='Name of the preset color palette') + + +class Member(BaseModel): + color: Optional[str] = Field( + None, description='Hexadecimal color code', pattern='^#[0-9A-Fa-f]{6}$' + ) + weight: Optional[float] = Field( + None, description='Optional weight for the color (0-1)', ge=0.0, le=1.0 + ) + + +class IdeogramColorPalette2(BaseModel): + members: List[Member] = Field( + ..., description='Array of color definitions with optional weights' + ) + + +class IdeogramColorPalette( + RootModel[Union[IdeogramColorPalette1, IdeogramColorPalette2]] +): + root: Union[IdeogramColorPalette1, IdeogramColorPalette2] = Field( + ..., + description='A color palette specification that can either use a preset name or explicit color definitions with weights', + ) + + class ImageRequest(BaseModel): + prompt: str = Field( + ..., description='Required. The prompt to use to generate the image.' + ) aspect_ratio: Optional[str] = Field( None, description="Optional. The aspect ratio (e.g., 'ASPECT_16_9', 'ASPECT_1_1'). Cannot be used with resolution. Defaults to 'ASPECT_1_1' if unspecified.", ) - color_palette: Optional[Dict[str, Any]] = Field( - None, description='Optional. Color palette object. Only for V_2, V_2_TURBO.' - ) + model: str = Field(..., description="The model used (e.g., 'V_2', 'V_2A_TURBO')") magic_prompt_option: Optional[str] = Field( None, description="Optional. MagicPrompt usage ('AUTO', 'ON', 'OFF')." ) - model: str = Field(..., description="The model used (e.g., 'V_2', 'V_2A_TURBO')") - negative_prompt: Optional[str] = Field( + seed: Optional[int] = Field( None, - description='Optional. Description of what to exclude. Only for V_1, V_1_TURBO, V_2, V_2_TURBO.', - ) - num_images: Optional[conint(ge=1, le=8)] = Field( - 1, description='Optional. Number of images to generate (1-8). Defaults to 1.' - ) - prompt: str = Field( - ..., description='Required. The prompt to use to generate the image.' - ) - resolution: Optional[str] = Field( - None, - description="Optional. Resolution (e.g., 'RESOLUTION_1024_1024'). Only for model V_2. Cannot be used with aspect_ratio.", - ) - seed: Optional[conint(ge=0, le=2147483647)] = Field( - None, description='Optional. A number between 0 and 2147483647.' + description='Optional. A number between 0 and 2147483647.', + ge=0, + le=2147483647, ) style_type: Optional[str] = Field( None, description="Optional. Style type ('AUTO', 'GENERAL', 'REALISTIC', 'DESIGN', 'RENDER_3D', 'ANIME'). Only for models V_2 and above.", ) + negative_prompt: Optional[str] = Field( + None, + description='Optional. Description of what to exclude. Only for V_1, V_1_TURBO, V_2, V_2_TURBO.', + ) + num_images: Optional[int] = Field( + 1, + description='Optional. Number of images to generate (1-8). Defaults to 1.', + ge=1, + le=8, + ) + resolution: Optional[str] = Field( + None, + description="Optional. Resolution (e.g., 'RESOLUTION_1024_1024'). Only for model V_2. Cannot be used with aspect_ratio.", + ) + color_palette: Optional[Dict[str, Any]] = Field( + None, description='Optional. Color palette object. Only for V_2, V_2_TURBO.' + ) + + +class IdeogramGenerateRequest(BaseModel): + image_request: ImageRequest = Field( + ..., description='The image generation request parameters.' + ) class Datum(BaseModel): - is_image_safe: Optional[bool] = Field( - None, description='Indicates whether the image is considered safe.' - ) prompt: Optional[str] = Field( None, description='The prompt used to generate this image.' ) resolution: Optional[str] = Field( None, description="The resolution of the generated image (e.g., '1024x1024')." ) + is_image_safe: Optional[bool] = Field( + None, description='Indicates whether the image is considered safe.' + ) seed: Optional[int] = Field( None, description='The seed value used for this generation.' ) + url: Optional[str] = Field(None, description='URL to the generated image.') style_type: Optional[str] = Field( None, description="The style type used for generation (e.g., 'REALISTIC', 'ANIME').", ) - url: Optional[str] = Field(None, description='URL to the generated image.') -class Code(Enum): - int_1100 = 1100 - int_1101 = 1101 - int_1102 = 1102 - int_1103 = 1103 +class IdeogramGenerateResponse(BaseModel): + created: Optional[datetime] = Field( + None, description='Timestamp when the generation was created.' + ) + data: Optional[List[Datum]] = Field( + None, description='Array of generated image information.' + ) -class Code1(Enum): - int_1000 = 1000 - int_1001 = 1001 - int_1002 = 1002 - int_1003 = 1003 - int_1004 = 1004 +class RenderingSpeed1(str, Enum): + TURBO = 'TURBO' + DEFAULT = 'DEFAULT' + QUALITY = 'QUALITY' -class AspectRatio(str, Enum): +class MagicPrompt1(str, Enum): + AUTO = 'AUTO' + ON = 'ON' + OFF = 'OFF' + + +class StyleType1(str, Enum): + AUTO = 'AUTO' + GENERAL = 'GENERAL' + REALISTIC = 'REALISTIC' + DESIGN = 'DESIGN' + + +class IdeogramV3RemixRequest(BaseModel): + image: Optional[StrictBytes] = None + prompt: str + image_weight: Optional[int] = Field(50, ge=1, le=100) + seed: Optional[int] = Field(None, ge=0, le=2147483647) + resolution: Optional[str] = None + aspect_ratio: Optional[str] = None + rendering_speed: Optional[RenderingSpeed1] = None + magic_prompt: Optional[MagicPrompt1] = None + negative_prompt: Optional[str] = None + num_images: Optional[int] = Field(None, ge=1, le=8) + color_palette: Optional[Dict[str, Any]] = None + style_codes: Optional[List[str]] = None + style_type: Optional[StyleType1] = None + style_reference_images: Optional[List[StrictBytes]] = None + + +class Datum1(BaseModel): + prompt: Optional[str] = None + resolution: Optional[str] = None + is_image_safe: Optional[bool] = None + seed: Optional[int] = None + url: Optional[str] = None + style_type: Optional[str] = None + + +class IdeogramV3IdeogramResponse(BaseModel): + created: Optional[datetime] = None + data: Optional[List[Datum1]] = None + + +class IdeogramV3ReframeRequest(BaseModel): + image: Optional[StrictBytes] = None + resolution: str + num_images: Optional[int] = Field(None, ge=1, le=8) + seed: Optional[int] = Field(None, ge=0, le=2147483647) + rendering_speed: Optional[RenderingSpeed1] = None + color_palette: Optional[Dict[str, Any]] = None + style_codes: Optional[List[str]] = None + style_reference_images: Optional[List[StrictBytes]] = None + + +class IdeogramV3ReplaceBackgroundRequest(BaseModel): + image: Optional[StrictBytes] = None + prompt: str + magic_prompt: Optional[MagicPrompt1] = None + num_images: Optional[int] = Field(None, ge=1, le=8) + seed: Optional[int] = Field(None, ge=0, le=2147483647) + rendering_speed: Optional[RenderingSpeed1] = None + color_palette: Optional[Dict[str, Any]] = None + style_codes: Optional[List[str]] = None + style_reference_images: Optional[List[StrictBytes]] = None + + +class KlingTaskStatus(str, Enum): + submitted = 'submitted' + processing = 'processing' + succeed = 'succeed' + failed = 'failed' + + +class KlingVideoGenModelName(str, Enum): + kling_v1 = 'kling-v1' + kling_v1_5 = 'kling-v1-5' + kling_v1_6 = 'kling-v1-6' + kling_v2_master = 'kling-v2-master' + + +class KlingVideoGenMode(str, Enum): + std = 'std' + pro = 'pro' + + +class KlingVideoGenAspectRatio(str, Enum): field_16_9 = '16:9' field_9_16 = '9:16' field_1_1 = '1:1' -class Config(BaseModel): - horizontal: Optional[confloat(ge=-10.0, le=10.0)] = None - pan: Optional[confloat(ge=-10.0, le=10.0)] = None - roll: Optional[confloat(ge=-10.0, le=10.0)] = None - tilt: Optional[confloat(ge=-10.0, le=10.0)] = None - vertical: Optional[confloat(ge=-10.0, le=10.0)] = None - zoom: Optional[confloat(ge=-10.0, le=10.0)] = None +class KlingVideoGenDuration(str, Enum): + field_5 = '5' + field_10 = '10' -class Type(str, Enum): +class KlingVideoGenCfgScale(RootModel[float]): + root: float = Field( + ..., + description="Flexibility in video generation. The higher the value, the lower the model's degree of flexibility, and the stronger the relevance to the user's prompt.", + ge=0.0, + le=1.0, + ) + + +class KlingCameraControlType(str, Enum): simple = 'simple' down_back = 'down_back' forward_up = 'forward_up' @@ -129,52 +457,99 @@ class Type(str, Enum): left_turn_forward = 'left_turn_forward' -class CameraControl(BaseModel): - config: Optional[Config] = None - type: Optional[Type] = Field(None, description='Predefined camera movements type') +class KlingCameraConfig(BaseModel): + horizontal: Optional[float] = Field( + None, + description="Controls camera's movement along horizontal axis (x-axis). Negative indicates left, positive indicates right.", + ge=-10.0, + le=10.0, + ) + vertical: Optional[float] = Field( + None, + description="Controls camera's movement along vertical axis (y-axis). Negative indicates downward, positive indicates upward.", + ge=-10.0, + le=10.0, + ) + pan: Optional[float] = Field( + None, + description="Controls camera's rotation in vertical plane (x-axis). Negative indicates downward rotation, positive indicates upward rotation.", + ge=-10.0, + le=10.0, + ) + tilt: Optional[float] = Field( + None, + description="Controls camera's rotation in horizontal plane (y-axis). Negative indicates left rotation, positive indicates right rotation.", + ge=-10.0, + le=10.0, + ) + roll: Optional[float] = Field( + None, + description="Controls camera's rolling amount (z-axis). Negative indicates counterclockwise, positive indicates clockwise.", + ge=-10.0, + le=10.0, + ) + zoom: Optional[float] = Field( + None, + description="Controls change in camera's focal length. Negative indicates narrower field of view, positive indicates wider field of view.", + ge=-10.0, + le=10.0, + ) -class Duration(str, Enum): - field_5 = 5 - field_10 = 10 - - -class Mode(str, Enum): - std = 'std' - pro = 'pro' - - -class TaskInfo(BaseModel): - external_task_id: Optional[str] = None - - -class Video(BaseModel): - duration: Optional[str] = Field(None, description='Total video duration') +class KlingVideoResult(BaseModel): id: Optional[str] = Field(None, description='Generated video ID') url: Optional[AnyUrl] = Field(None, description='URL for generated video') + duration: Optional[str] = Field(None, description='Total video duration') -class TaskResult(BaseModel): - videos: Optional[List[Video]] = None +class KlingAudioUploadType(str, Enum): + file = 'file' + url = 'url' -class TaskStatus(str, Enum): - submitted = 'submitted' - processing = 'processing' - succeed = 'succeed' - failed = 'failed' +class KlingLipSyncMode(str, Enum): + text2video = 'text2video' + audio2video = 'audio2video' -class Data(BaseModel): - created_at: Optional[int] = Field(None, description='Task creation time') - task_id: Optional[str] = Field(None, description='Task ID') - task_info: Optional[TaskInfo] = None - task_result: Optional[TaskResult] = None - task_status: Optional[TaskStatus] = None - updated_at: Optional[int] = Field(None, description='Task update time') +class KlingLipSyncVoiceLanguage(str, Enum): + zh = 'zh' + en = 'en' -class AspectRatio1(str, Enum): +class KlingDualCharacterEffectsScene(str, Enum): + hug = 'hug' + kiss = 'kiss' + heart_gesture = 'heart_gesture' + + +class KlingSingleImageEffectsScene(str, Enum): + bloombloom = 'bloombloom' + dizzydizzy = 'dizzydizzy' + fuzzyfuzzy = 'fuzzyfuzzy' + squish = 'squish' + expansion = 'expansion' + + +class KlingCharacterEffectModelName(str, Enum): + kling_v1 = 'kling-v1' + kling_v1_5 = 'kling-v1-5' + kling_v1_6 = 'kling-v1-6' + + +class KlingSingleImageEffectModelName(str, Enum): + kling_v1_6 = 'kling-v1-6' + + +class KlingSingleImageEffectDuration(str, Enum): + field_5 = '5' + + +class KlingDualCharacterImages(RootModel[List[str]]): + root: List[str] = Field(..., max_length=2, min_length=2) + + +class KlingImageGenAspectRatio(str, Enum): field_16_9 = '16:9' field_9_16 = '9:16' field_1_1 = '1:1' @@ -185,63 +560,289 @@ class AspectRatio1(str, Enum): field_21_9 = '21:9' -class ImageReference(str, Enum): +class KlingImageGenImageReferenceType(str, Enum): subject = 'subject' face = 'face' -class Image(BaseModel): +class KlingImageGenModelName(str, Enum): + kling_v1 = 'kling-v1' + kling_v1_5 = 'kling-v1-5' + kling_v2 = 'kling-v2' + + +class KlingImageResult(BaseModel): index: Optional[int] = Field(None, description='Image Number (0-9)') url: Optional[AnyUrl] = Field(None, description='URL for generated image') -class TaskResult1(BaseModel): - images: Optional[List[Image]] = None +class KlingVirtualTryOnModelName(str, Enum): + kolors_virtual_try_on_v1 = 'kolors-virtual-try-on-v1' + kolors_virtual_try_on_v1_5 = 'kolors-virtual-try-on-v1-5' + + +class TaskInfo(BaseModel): + external_task_id: Optional[str] = None + + +class TaskResult(BaseModel): + videos: Optional[List[KlingVideoResult]] = None + + +class Data(BaseModel): + task_id: Optional[str] = Field(None, description='Task ID') + task_status: Optional[KlingTaskStatus] = None + task_info: Optional[TaskInfo] = None + created_at: Optional[int] = Field(None, description='Task creation time') + updated_at: Optional[int] = Field(None, description='Task update time') + task_result: Optional[TaskResult] = None + + +class KlingText2VideoResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + data: Optional[Data] = None + + +class Trajectory(BaseModel): + x: Optional[int] = Field( + None, + description='The horizontal coordinate of trajectory point. Based on bottom-left corner of image as origin (0,0).', + ) + y: Optional[int] = Field( + None, + description='The vertical coordinate of trajectory point. Based on bottom-left corner of image as origin (0,0).', + ) + + +class DynamicMask(BaseModel): + mask: Optional[AnyUrl] = Field( + None, + description='Dynamic Brush Application Area (Mask image created by users using the motion brush). The aspect ratio must match the input image.', + ) + trajectories: Optional[List[Trajectory]] = None class Data1(BaseModel): - created_at: Optional[int] = Field(None, description='Task creation time') task_id: Optional[str] = Field(None, description='Task ID') - task_result: Optional[TaskResult1] = None - task_status: Optional[TaskStatus] = None - task_status_msg: Optional[str] = Field(None, description='Task status information') + task_status: Optional[KlingTaskStatus] = None + task_info: Optional[TaskInfo] = None + created_at: Optional[int] = Field(None, description='Task creation time') updated_at: Optional[int] = Field(None, description='Task update time') + task_result: Optional[TaskResult] = None -class AspectRatio2(str, Enum): - field_16_9 = '16:9' - field_9_16 = '9:16' - field_1_1 = '1:1' +class KlingImage2VideoResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + data: Optional[Data1] = None -class CameraControl1(BaseModel): - config: Optional[Config] = None - type: Optional[Type] = Field(None, description='Predefined camera movements type') - - -class ModelName2(str, Enum): - kling_v1 = 'kling-v1' - kling_v1_6 = 'kling-v1-6' - - -class TaskResult2(BaseModel): - videos: Optional[List[Video]] = None +class KlingVideoExtendRequest(BaseModel): + video_id: Optional[str] = Field( + None, + description='The ID of the video to be extended. Supports videos generated by text-to-video, image-to-video, and previous video extension operations. Cannot exceed 3 minutes total duration after extension.', + ) + prompt: Optional[str] = Field( + None, + description='Positive text prompt for guiding the video extension', + max_length=2500, + ) + negative_prompt: Optional[str] = Field( + None, + description='Negative text prompt for elements to avoid in the extended video', + max_length=2500, + ) + cfg_scale: Optional[KlingVideoGenCfgScale] = Field( + default_factory=lambda: KlingVideoGenCfgScale.model_validate(0.5) + ) + callback_url: Optional[AnyUrl] = Field( + None, + description='The callback notification address. Server will notify when the task status changes.', + ) class Data2(BaseModel): - created_at: Optional[int] = Field(None, description='Task creation time') task_id: Optional[str] = Field(None, description='Task ID') + task_status: Optional[KlingTaskStatus] = None task_info: Optional[TaskInfo] = None - task_result: Optional[TaskResult2] = None - task_status: Optional[TaskStatus] = None + created_at: Optional[int] = Field(None, description='Task creation time') updated_at: Optional[int] = Field(None, description='Task update time') + task_result: Optional[TaskResult] = None -class Code2(Enum): - int_1200 = 1200 - int_1201 = 1201 - int_1202 = 1202 - int_1203 = 1203 +class KlingVideoExtendResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + data: Optional[Data2] = None + + +class KlingLipSyncInputObject(BaseModel): + video_id: Optional[str] = Field( + None, + description='The ID of the video generated by Kling AI. Only supports 5-second and 10-second videos generated within the last 30 days.', + ) + video_url: Optional[str] = Field( + None, + description='Get link for uploaded video. Video files support .mp4/.mov, file size does not exceed 100MB, video length between 2-10s.', + ) + mode: KlingLipSyncMode + text: Optional[str] = Field( + None, + description='Text Content for Lip-Sync Video Generation. Required when mode is text2video. Maximum length is 120 characters.', + ) + voice_id: Optional[str] = Field( + None, + description='Voice ID. Required when mode is text2video. The system offers a variety of voice options to choose from.', + ) + voice_language: Optional[KlingLipSyncVoiceLanguage] = 'en' + voice_speed: Optional[float] = Field( + 1, + description='Speech Rate. Valid range: 0.8~2.0, accurate to one decimal place.', + ge=0.8, + le=2.0, + ) + audio_type: Optional[KlingAudioUploadType] = None + audio_file: Optional[str] = Field( + None, + description='Local Path of Audio File. Supported formats: .mp3/.wav/.m4a/.aac, maximum file size of 5MB. Base64 code.', + ) + audio_url: Optional[str] = Field( + None, + description='Audio File Download URL. Supported formats: .mp3/.wav/.m4a/.aac, maximum file size of 5MB.', + ) + + +class KlingLipSyncRequest(BaseModel): + input: KlingLipSyncInputObject + callback_url: Optional[AnyUrl] = Field( + None, + description='The callback notification address. Server will notify when the task status changes.', + ) + + +class Data3(BaseModel): + task_id: Optional[str] = Field(None, description='Task ID') + task_status: Optional[KlingTaskStatus] = None + task_info: Optional[TaskInfo] = None + created_at: Optional[int] = Field(None, description='Task creation time') + updated_at: Optional[int] = Field(None, description='Task update time') + task_result: Optional[TaskResult] = None + + +class KlingLipSyncResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + data: Optional[Data3] = None + + +class KlingSingleImageEffectInput(BaseModel): + model_name: KlingSingleImageEffectModelName + image: str = Field( + ..., + description='Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1.', + ) + duration: KlingSingleImageEffectDuration + + +class KlingDualCharacterEffectInput(BaseModel): + model_name: Optional[KlingCharacterEffectModelName] = 'kling-v1' + mode: Optional[KlingVideoGenMode] = 'std' + images: KlingDualCharacterImages + duration: KlingVideoGenDuration + + +class Data4(BaseModel): + task_id: Optional[str] = Field(None, description='Task ID') + task_status: Optional[KlingTaskStatus] = None + task_info: Optional[TaskInfo] = None + created_at: Optional[int] = Field(None, description='Task creation time') + updated_at: Optional[int] = Field(None, description='Task update time') + task_result: Optional[TaskResult] = None + + +class KlingVideoEffectsResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + data: Optional[Data4] = None + + +class KlingImageGenerationsRequest(BaseModel): + model_name: Optional[KlingImageGenModelName] = 'kling-v1' + prompt: str = Field(..., description='Positive text prompt', max_length=500) + negative_prompt: Optional[str] = Field( + None, description='Negative text prompt', max_length=200 + ) + image: Optional[str] = Field( + None, description='Reference Image - Base64 encoded string or image URL' + ) + image_reference: Optional[KlingImageGenImageReferenceType] = None + image_fidelity: Optional[float] = Field( + 0.5, description='Reference intensity for user-uploaded images', ge=0.0, le=1.0 + ) + human_fidelity: Optional[float] = Field( + 0.45, description='Subject reference similarity', ge=0.0, le=1.0 + ) + n: Optional[int] = Field(1, description='Number of generated images', ge=1, le=9) + aspect_ratio: Optional[KlingImageGenAspectRatio] = '16:9' + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + + +class TaskResult5(BaseModel): + images: Optional[List[KlingImageResult]] = None + + +class Data5(BaseModel): + task_id: Optional[str] = Field(None, description='Task ID') + task_status: Optional[KlingTaskStatus] = None + task_status_msg: Optional[str] = Field(None, description='Task status information') + created_at: Optional[int] = Field(None, description='Task creation time') + updated_at: Optional[int] = Field(None, description='Task update time') + task_result: Optional[TaskResult5] = None + + +class KlingImageGenerationsResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + data: Optional[Data5] = None + + +class KlingVirtualTryOnRequest(BaseModel): + model_name: Optional[KlingVirtualTryOnModelName] = 'kolors-virtual-try-on-v1' + human_image: str = Field( + ..., description='Reference human image - Base64 encoded string or image URL' + ) + cloth_image: Optional[str] = Field( + None, + description='Reference clothing image - Base64 encoded string or image URL', + ) + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + + +class Data6(BaseModel): + task_id: Optional[str] = Field(None, description='Task ID') + task_status: Optional[KlingTaskStatus] = None + task_status_msg: Optional[str] = Field(None, description='Task status information') + created_at: Optional[int] = Field(None, description='Task creation time') + updated_at: Optional[int] = Field(None, description='Task update time') + task_result: Optional[TaskResult5] = None + + +class KlingVirtualTryOnResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + data: Optional[Data6] = None class ResourcePackType(str, Enum): @@ -257,87 +858,1140 @@ class Status(str, Enum): class ResourcePackSubscribeInfo(BaseModel): + resource_pack_name: Optional[str] = Field(None, description='Resource package name') + resource_pack_id: Optional[str] = Field(None, description='Resource package ID') + resource_pack_type: Optional[ResourcePackType] = Field( + None, + description='Resource package type (decreasing_total=decreasing total, constant_period=constant periodicity)', + ) + total_quantity: Optional[float] = Field(None, description='Total quantity') + remaining_quantity: Optional[float] = Field( + None, description='Remaining quantity (updated with a 12-hour delay)' + ) + purchase_time: Optional[int] = Field( + None, description='Purchase time, Unix timestamp in ms' + ) effective_time: Optional[int] = Field( None, description='Effective time, Unix timestamp in ms' ) invalid_time: Optional[int] = Field( None, description='Expiration time, Unix timestamp in ms' ) - purchase_time: Optional[int] = Field( - None, description='Purchase time, Unix timestamp in ms' - ) - remaining_quantity: Optional[float] = Field( - None, description='Remaining quantity (updated with a 12-hour delay)' - ) - resource_pack_id: Optional[str] = Field(None, description='Resource package ID') - resource_pack_name: Optional[str] = Field(None, description='Resource package name') - resource_pack_type: Optional[ResourcePackType] = Field( - None, - description='Resource package type (decreasing_total=decreasing total, constant_period=constant periodicity)', - ) status: Optional[Status] = Field(None, description='Resource Package Status') - total_quantity: Optional[float] = Field(None, description='Total quantity') - -class Background(str, Enum): - transparent = 'transparent' - opaque = 'opaque' -class Moderation(str, Enum): - low = 'low' - auto = 'auto' +class Data7(BaseModel): + code: Optional[int] = Field(None, description='Error code; 0 indicates success') + msg: Optional[str] = Field(None, description='Error information') + resource_pack_subscribe_infos: Optional[List[ResourcePackSubscribeInfo]] = Field( + None, description='Resource package list' + ) + + +class KlingResourcePackageResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code; 0 indicates success') + message: Optional[str] = Field(None, description='Error information') + request_id: Optional[str] = Field( + None, + description='Request ID, generated by the system, used to track requests and troubleshoot problems', + ) + data: Optional[Data7] = None + + +class Object(str, Enum): + event = 'event' + + +class Type(str, Enum): + payment_intent_succeeded = 'payment_intent.succeeded' + + +class StripeRequestInfo(BaseModel): + id: Optional[str] = None + idempotency_key: Optional[str] = None + + +class Object1(str, Enum): + payment_intent = 'payment_intent' + + +class StripeAmountDetails(BaseModel): + tip: Optional[Dict[str, Any]] = None + + +class Object2(str, Enum): + charge = 'charge' + + +class StripeAddress(BaseModel): + city: Optional[str] = None + country: Optional[str] = None + line1: Optional[str] = None + line2: Optional[str] = None + postal_code: Optional[str] = None + state: Optional[str] = None + + +class StripeOutcome(BaseModel): + advice_code: Optional[Any] = None + network_advice_code: Optional[Any] = None + network_decline_code: Optional[Any] = None + network_status: Optional[str] = None + reason: Optional[Any] = None + risk_level: Optional[str] = None + risk_score: Optional[int] = None + seller_message: Optional[str] = None + type: Optional[str] = None + + +class Checks(BaseModel): + address_line1_check: Optional[Any] = None + address_postal_code_check: Optional[Any] = None + cvc_check: Optional[str] = None + + +class ExtendedAuthorization(BaseModel): + status: Optional[str] = None + + +class IncrementalAuthorization(BaseModel): + status: Optional[str] = None + + +class Multicapture(BaseModel): + status: Optional[str] = None + + +class NetworkToken(BaseModel): + used: Optional[bool] = None + + +class Overcapture(BaseModel): + maximum_amount_capturable: Optional[int] = None + status: Optional[str] = None + + +class StripeCardDetails(BaseModel): + amount_authorized: Optional[int] = None + authorization_code: Optional[Any] = None + brand: Optional[str] = None + checks: Optional[Checks] = None + country: Optional[str] = None + exp_month: Optional[int] = None + exp_year: Optional[int] = None + extended_authorization: Optional[ExtendedAuthorization] = None + fingerprint: Optional[str] = None + funding: Optional[str] = None + incremental_authorization: Optional[IncrementalAuthorization] = None + installments: Optional[Any] = None + last4: Optional[str] = None + mandate: Optional[Any] = None + multicapture: Optional[Multicapture] = None + network: Optional[str] = None + network_token: Optional[NetworkToken] = None + network_transaction_id: Optional[str] = None + overcapture: Optional[Overcapture] = None + regulated_status: Optional[str] = None + three_d_secure: Optional[Any] = None + wallet: Optional[Any] = None + + +class StripeRefundList(BaseModel): + object: Optional[str] = None + data: Optional[List[Dict[str, Any]]] = None + has_more: Optional[bool] = None + total_count: Optional[int] = None + url: Optional[str] = None + + +class Card(BaseModel): + installments: Optional[Any] = None + mandate_options: Optional[Any] = None + network: Optional[Any] = None + request_three_d_secure: Optional[str] = None + + +class StripePaymentMethodOptions(BaseModel): + card: Optional[Card] = None + + +class StripeShipping(BaseModel): + address: Optional[StripeAddress] = None + carrier: Optional[str] = None + name: Optional[str] = None + phone: Optional[str] = None + tracking_number: Optional[str] = None + + +class Model(str, Enum): + T2V_01_Director = 'T2V-01-Director' + I2V_01_Director = 'I2V-01-Director' + S2V_01 = 'S2V-01' + I2V_01 = 'I2V-01' + I2V_01_live = 'I2V-01-live' + T2V_01 = 'T2V-01' + + +class SubjectReferenceItem(BaseModel): + image: Optional[str] = Field( + None, description='URL or base64 encoding of the subject reference image.' + ) + mask: Optional[str] = Field( + None, + description='URL or base64 encoding of the mask for the subject reference image.', + ) + + +class MinimaxVideoGenerationRequest(BaseModel): + model: Model = Field( + ..., + description='Required. ID of model. Options: T2V-01-Director, I2V-01-Director, S2V-01, I2V-01, I2V-01-live, T2V-01', + ) + prompt: Optional[str] = Field( + None, + description='Description of the video. Should be less than 2000 characters. Supports camera movement instructions in [brackets].', + max_length=2000, + ) + prompt_optimizer: Optional[bool] = Field( + True, + description='If true (default), the model will automatically optimize the prompt. Set to false for more precise control.', + ) + first_frame_image: Optional[str] = Field( + None, + description='URL or base64 encoding of the first frame image. Required when model is I2V-01, I2V-01-Director, or I2V-01-live.', + ) + subject_reference: Optional[List[SubjectReferenceItem]] = Field( + None, + description='Only available when model is S2V-01. The model will generate a video based on the subject uploaded through this parameter.', + ) + callback_url: Optional[str] = Field( + None, + description='Optional. URL to receive real-time status updates about the video generation task.', + ) + + +class MinimaxBaseResponse(BaseModel): + status_code: int = Field( + ..., + description='Status code. 0 indicates success, other values indicate errors.', + ) + status_msg: str = Field( + ..., description='Specific error details or success message.' + ) + + +class MinimaxVideoGenerationResponse(BaseModel): + task_id: str = Field( + ..., description='The task ID for the asynchronous video generation task.' + ) + base_resp: MinimaxBaseResponse + + +class File(BaseModel): + file_id: Optional[int] = Field(None, description='Unique identifier for the file') + bytes: Optional[int] = Field(None, description='File size in bytes') + created_at: Optional[int] = Field( + None, description='Unix timestamp when the file was created, in seconds' + ) + filename: Optional[str] = Field(None, description='The name of the file') + purpose: Optional[str] = Field(None, description='The purpose of using the file') + download_url: Optional[str] = Field( + None, description='The URL to download the video' + ) + + +class MinimaxFileRetrieveResponse(BaseModel): + file: File + base_resp: MinimaxBaseResponse + + +class Status1(str, Enum): + Queueing = 'Queueing' + Preparing = 'Preparing' + Processing = 'Processing' + Success = 'Success' + Fail = 'Fail' + + +class MinimaxTaskResultResponse(BaseModel): + task_id: str = Field(..., description='The task ID being queried.') + status: Status1 = Field( + ..., + description="Task status: 'Queueing' (in queue), 'Preparing' (task is preparing), 'Processing' (generating), 'Success' (task completed successfully), or 'Fail' (task failed).", + ) + file_id: Optional[str] = Field( + None, + description='After the task status changes to Success, this field returns the file ID corresponding to the generated video.', + ) + base_resp: MinimaxBaseResponse class OutputFormat(str, Enum): - png = 'png' - webp = 'webp' jpeg = 'jpeg' + png = 'png' + + +class BFLFluxPro11GenerateRequest(BaseModel): + prompt: str = Field(..., description='The main text prompt for image generation') + image_prompt: Optional[str] = Field(None, description='Optional image prompt') + width: int = Field(..., description='Width of the generated image') + height: int = Field(..., description='Height of the generated image') + prompt_upsampling: Optional[bool] = Field( + None, description='Whether to use prompt upsampling' + ) + seed: Optional[int] = Field(None, description='Random seed for reproducibility') + safety_tolerance: Optional[int] = Field(None, description='Safety tolerance level') + output_format: Optional[OutputFormat] = Field( + None, description='Output image format' + ) + webhook_url: Optional[str] = Field( + None, description='Optional webhook URL for async processing' + ) + webhook_secret: Optional[str] = Field( + None, description='Optional webhook secret for async processing' + ) + + +class BFLFluxPro11GenerateResponse(BaseModel): + id: str = Field(..., description='Job ID for tracking') + polling_url: str = Field(..., description='URL to poll for results') + + +class BFLFluxProGenerateRequest(BaseModel): + prompt: str = Field(..., description='The text prompt for image generation.') + negative_prompt: Optional[str] = Field( + None, description='The negative prompt for image generation.' + ) + width: int = Field( + ..., description='The width of the image to generate.', ge=64, le=2048 + ) + height: int = Field( + ..., description='The height of the image to generate.', ge=64, le=2048 + ) + num_inference_steps: Optional[int] = Field( + None, description='The number of inference steps.', ge=1, le=100 + ) + guidance_scale: Optional[float] = Field( + None, description='The guidance scale for generation.', ge=1.0, le=20.0 + ) + seed: Optional[int] = Field(None, description='The seed value for reproducibility.') + num_images: Optional[int] = Field( + None, description='The number of images to generate.', ge=1, le=4 + ) + + +class BFLFluxProGenerateResponse(BaseModel): + id: str = Field(..., description='The unique identifier for the generation task.') + polling_url: str = Field(..., description='URL to poll for the generation result.') + + +class Steps(RootModel[int]): + root: int = Field( + ..., + description='Number of steps for the image generation process', + examples=[50], + ge=15, + le=50, + title='Steps', + ) + + +class Guidance(RootModel[float]): + root: float = Field( + ..., + description='Guidance strength for the image generation process', + ge=1.5, + le=100.0, + title='Guidance', + ) + + +class WebhookUrl(RootModel[AnyUrl]): + root: AnyUrl = Field( + ..., description='URL to receive webhook notifications', title='Webhook Url' + ) + + +class BFLAsyncResponse(BaseModel): + id: str = Field(..., title='Id') + polling_url: str = Field(..., title='Polling Url') + + +class BFLAsyncWebhookResponse(BaseModel): + id: str = Field(..., title='Id') + status: str = Field(..., title='Status') + webhook_url: str = Field(..., title='Webhook Url') + + +class Top(RootModel[int]): + root: int = Field( + ..., + description='Number of pixels to expand at the top of the image', + ge=0, + le=2048, + title='Top', + ) + + +class Bottom(RootModel[int]): + root: int = Field( + ..., + description='Number of pixels to expand at the bottom of the image', + ge=0, + le=2048, + title='Bottom', + ) + + +class Left(RootModel[int]): + root: int = Field( + ..., + description='Number of pixels to expand on the left side of the image', + ge=0, + le=2048, + title='Left', + ) + + +class Right(RootModel[int]): + root: int = Field( + ..., + description='Number of pixels to expand on the right side of the image', + ge=0, + le=2048, + title='Right', + ) + + +class CannyLowThreshold(RootModel[int]): + root: int = Field( + ..., + description='Low threshold for Canny edge detection', + ge=0, + le=500, + title='Canny Low Threshold', + ) + + +class CannyHighThreshold(RootModel[int]): + root: int = Field( + ..., + description='High threshold for Canny edge detection', + ge=0, + le=500, + title='Canny High Threshold', + ) + + +class Steps2(RootModel[int]): + root: int = Field( + ..., + description='Number of steps for the image generation process', + ge=15, + le=50, + title='Steps', + ) + + +class Guidance2(RootModel[float]): + root: float = Field( + ..., + description='Guidance strength for the image generation process', + ge=1.0, + le=100.0, + title='Guidance', + ) + + +class BFLOutputFormat(str, Enum): + jpeg = 'jpeg' + png = 'png' + + +class BFLValidationError(BaseModel): + loc: List[Union[str, int]] = Field(..., title='Location') + msg: str = Field(..., title='Message') + type: str = Field(..., title='Error Type') + + +class Datum2(BaseModel): + image_id: Optional[str] = Field( + None, description='Unique identifier for the generated image' + ) + url: Optional[str] = Field(None, description='URL to access the generated image') + + +class RecraftImageGenerationResponse(BaseModel): + created: int = Field( + ..., description='Unix timestamp when the generation was created' + ) + credits: int = Field(..., description='Number of credits used for the generation') + data: List[Datum2] = Field(..., description='Array of generated image information') + + +class RecraftImageFeatures(BaseModel): + nsfw_score: Optional[float] = None + + +class RecraftTextLayoutItem(BaseModel): + bbox: List[List[float]] + text: str + + +class RecraftImageColor(BaseModel): + rgb: Optional[List[int]] = None + std: Optional[List[float]] = None + weight: Optional[float] = None + + +class RecraftImageStyle(str, Enum): + digital_illustration = 'digital_illustration' + icon = 'icon' + realistic_image = 'realistic_image' + vector_illustration = 'vector_illustration' + + +class RecraftImageSubStyle(str, Enum): + field_2d_art_poster = '2d_art_poster' + field_3d = '3d' + field_80s = '80s' + glow = 'glow' + grain = 'grain' + hand_drawn = 'hand_drawn' + infantile_sketch = 'infantile_sketch' + kawaii = 'kawaii' + pixel_art = 'pixel_art' + psychedelic = 'psychedelic' + seamless = 'seamless' + voxel = 'voxel' + watercolor = 'watercolor' + broken_line = 'broken_line' + colored_outline = 'colored_outline' + colored_shapes = 'colored_shapes' + colored_shapes_gradient = 'colored_shapes_gradient' + doodle_fill = 'doodle_fill' + doodle_offset_fill = 'doodle_offset_fill' + offset_fill = 'offset_fill' + outline = 'outline' + outline_gradient = 'outline_gradient' + uneven_fill = 'uneven_fill' + field_70s = '70s' + cartoon = 'cartoon' + doodle_line_art = 'doodle_line_art' + engraving = 'engraving' + flat_2 = 'flat_2' + kawaii_1 = 'kawaii' + line_art = 'line_art' + linocut = 'linocut' + seamless_1 = 'seamless' + b_and_w = 'b_and_w' + enterprise = 'enterprise' + hard_flash = 'hard_flash' + hdr = 'hdr' + motion_blur = 'motion_blur' + natural_light = 'natural_light' + studio_portrait = 'studio_portrait' + line_circuit = 'line_circuit' + field_2d_art_poster_2 = '2d_art_poster_2' + engraving_color = 'engraving_color' + flat_air_art = 'flat_air_art' + hand_drawn_outline = 'hand_drawn_outline' + handmade_3d = 'handmade_3d' + stickers_drawings = 'stickers_drawings' + plastic = 'plastic' + pictogram = 'pictogram' + + +class RecraftTransformModel(str, Enum): + refm1 = 'refm1' + recraft20b = 'recraft20b' + recraftv2 = 'recraftv2' + recraftv3 = 'recraftv3' + flux1_1pro = 'flux1_1pro' + flux1dev = 'flux1dev' + imagen3 = 'imagen3' + hidream_i1_dev = 'hidream_i1_dev' + + +class RecraftImageFormat(str, Enum): + webp = 'webp' + png = 'png' + + +class RecraftResponseFormat(str, Enum): + url = 'url' + b64_json = 'b64_json' + + +class RecraftImage(BaseModel): + b64_json: Optional[str] = None + features: Optional[RecraftImageFeatures] = None + image_id: UUID + revised_prompt: Optional[str] = None + url: Optional[str] = None + + +class RecraftUserControls(BaseModel): + artistic_level: Optional[int] = None + background_color: Optional[RecraftImageColor] = None + colors: Optional[List[RecraftImageColor]] = None + no_text: Optional[bool] = None + + +class RecraftTextLayout(RootModel[List[RecraftTextLayoutItem]]): + root: List[RecraftTextLayoutItem] + + +class RecraftProcessImageRequest(BaseModel): + image: StrictBytes + image_format: Optional[RecraftImageFormat] = None + response_format: Optional[RecraftResponseFormat] = None + + +class RecraftProcessImageResponse(BaseModel): + created: int + credits: int + image: RecraftImage + + +class RecraftImageToImageRequest(BaseModel): + block_nsfw: Optional[bool] = None + calculate_features: Optional[bool] = None + controls: Optional[RecraftUserControls] = None + image: StrictBytes + image_format: Optional[RecraftImageFormat] = None + model: Optional[RecraftTransformModel] = None + n: Optional[int] = None + negative_prompt: Optional[str] = None + prompt: str + random_seed: Optional[int] = None + response_format: Optional[RecraftResponseFormat] = None + strength: float + style: Optional[RecraftImageStyle] = None + style_id: Optional[UUID] = None + substyle: Optional[RecraftImageSubStyle] = None + text_layout: Optional[RecraftTextLayout] = None + + +class RecraftGenerateImageResponse(BaseModel): + created: int + credits: int + data: List[RecraftImage] + + +class RecraftTransformImageWithMaskRequest(BaseModel): + block_nsfw: Optional[bool] = None + calculate_features: Optional[bool] = None + image: StrictBytes + image_format: Optional[RecraftImageFormat] = None + mask: StrictBytes + model: Optional[RecraftTransformModel] = None + n: Optional[int] = None + negative_prompt: Optional[str] = None + prompt: str + random_seed: Optional[int] = None + response_format: Optional[RecraftResponseFormat] = None + style: Optional[RecraftImageStyle] = None + style_id: Optional[UUID] = None + substyle: Optional[RecraftImageSubStyle] = None + text_layout: Optional[RecraftTextLayout] = None + + +class KlingErrorResponse(BaseModel): + code: int = Field( + ..., + description='- 1000: Authentication failed\n- 1001: Authorization is empty\n- 1002: Authorization is invalid\n- 1003: Authorization is not yet valid\n- 1004: Authorization has expired\n- 1100: Account exception\n- 1101: Account in arrears (postpaid scenario)\n- 1102: Resource pack depleted or expired (prepaid scenario)\n- 1103: Unauthorized access to requested resource\n- 1200: Invalid request parameters\n- 1201: Invalid parameters\n- 1202: Invalid request method\n- 1203: Requested resource does not exist\n- 1300: Trigger platform strategy\n- 1301: Trigger content security policy\n- 1302: API request too frequent\n- 1303: Concurrency/QPS exceeds limit\n- 1304: Trigger IP whitelist policy\n- 5000: Internal server error\n- 5001: Service temporarily unavailable\n- 5002: Server internal timeout\n', + ) + message: str = Field(..., description='Human-readable error message') + request_id: str = Field( + ..., description='Request ID for tracking and troubleshooting' + ) + + +class LumaAspectRatio(str, Enum): + field_1_1 = '1:1' + field_16_9 = '16:9' + field_9_16 = '9:16' + field_4_3 = '4:3' + field_3_4 = '3:4' + field_21_9 = '21:9' + field_9_21 = '9:21' + + +class LumaVideoModel(str, Enum): + ray_2 = 'ray-2' + ray_flash_2 = 'ray-flash-2' + ray_1_6 = 'ray-1-6' + + +class LumaVideoModelOutputResolution1(str, Enum): + field_540p = '540p' + field_720p = '720p' + field_1080p = '1080p' + field_4k = '4k' + + +class LumaVideoModelOutputResolution( + RootModel[Union[LumaVideoModelOutputResolution1, str]] +): + root: Union[LumaVideoModelOutputResolution1, str] + + +class LumaVideoModelOutputDuration1(str, Enum): + field_5s = '5s' + field_9s = '9s' + + +class LumaVideoModelOutputDuration( + RootModel[Union[LumaVideoModelOutputDuration1, str]] +): + root: Union[LumaVideoModelOutputDuration1, str] + + +class LumaImageModel(str, Enum): + photon_1 = 'photon-1' + photon_flash_1 = 'photon-flash-1' + + +class LumaImageRef(BaseModel): + url: Optional[AnyUrl] = Field(None, description='The URL of the image reference') + weight: Optional[float] = Field( + None, description='The weight of the image reference' + ) + + +class LumaImageIdentity(BaseModel): + images: Optional[List[AnyUrl]] = Field( + None, description='The URLs of the image identity' + ) + + +class LumaModifyImageRef(BaseModel): + url: Optional[AnyUrl] = Field(None, description='The URL of the image reference') + weight: Optional[float] = Field( + None, description='The weight of the modify image reference' + ) + + +class Type1(str, Enum): + generation = 'generation' + + +class LumaGenerationReference(BaseModel): + type: Literal['generation'] + id: UUID = Field(..., description='The ID of the generation') + + +class Type2(str, Enum): + image = 'image' + + +class LumaImageReference(BaseModel): + type: Literal['image'] + url: AnyUrl = Field(..., description='The URL of the image') + + +class LumaKeyframe(RootModel[Union[LumaGenerationReference, LumaImageReference]]): + root: Union[LumaGenerationReference, LumaImageReference] = Field( + ..., + description='A keyframe can be either a Generation reference, an Image, or a Video', + discriminator='type', + ) + + +class LumaGenerationType(str, Enum): + video = 'video' + image = 'image' + + +class LumaState(str, Enum): + queued = 'queued' + dreaming = 'dreaming' + completed = 'completed' + failed = 'failed' + + +class LumaAssets(BaseModel): + video: Optional[AnyUrl] = Field(None, description='The URL of the video') + image: Optional[AnyUrl] = Field(None, description='The URL of the image') + progress_video: Optional[AnyUrl] = Field( + None, description='The URL of the progress video' + ) + + +class GenerationType(str, Enum): + video = 'video' + + +class GenerationType1(str, Enum): + image = 'image' + + +class CharacterRef(BaseModel): + identity0: Optional[LumaImageIdentity] = None + + +class LumaImageGenerationRequest(BaseModel): + generation_type: Optional[GenerationType1] = 'image' + model: Optional[LumaImageModel] = 'photon-1' + prompt: Optional[str] = Field(None, description='The prompt of the generation') + aspect_ratio: Optional[LumaAspectRatio] = '16:9' + callback_url: Optional[AnyUrl] = Field( + None, description='The callback URL for the generation' + ) + image_ref: Optional[List[LumaImageRef]] = None + style_ref: Optional[List[LumaImageRef]] = None + character_ref: Optional[CharacterRef] = None + modify_image_ref: Optional[LumaModifyImageRef] = None + + +class GenerationType2(str, Enum): + upscale_video = 'upscale_video' + + +class LumaUpscaleVideoGenerationRequest(BaseModel): + generation_type: Optional[GenerationType2] = 'upscale_video' + resolution: Optional[LumaVideoModelOutputResolution] = None + callback_url: Optional[AnyUrl] = Field( + None, description='The callback URL for the upscale' + ) + + +class GenerationType3(str, Enum): + add_audio = 'add_audio' + + +class LumaAudioGenerationRequest(BaseModel): + generation_type: Optional[GenerationType3] = 'add_audio' + prompt: Optional[str] = Field(None, description='The prompt of the audio') + negative_prompt: Optional[str] = Field( + None, description='The negative prompt of the audio' + ) + callback_url: Optional[AnyUrl] = Field( + None, description='The callback URL for the audio' + ) + + +class LumaError(BaseModel): + detail: Optional[str] = Field(None, description='The error message') + + +class AspectRatio(str, Enum): + field_16_9 = '16:9' + field_4_3 = '4:3' + field_1_1 = '1:1' + field_3_4 = '3:4' + field_9_16 = '9:16' + + +class Duration(int, Enum): + integer_5 = 5 + integer_8 = 8 + + +class Model1(str, Enum): + v3_5 = 'v3.5' + + +class MotionMode(str, Enum): + normal = 'normal' + fast = 'fast' class Quality(str, Enum): - low = 'low' - medium = 'medium' - high = 'high' + field_360p = '360p' + field_540p = '540p' + field_720p = '720p' + field_1080p = '1080p' -class OpenAIImageEditRequest(BaseModel): - background: Optional[str] = Field( - None, description='Background transparency', examples=['opaque'] - ) - model: str = Field( - ..., description='The model to use for image editing', examples=['gpt-image-1'] - ) - moderation: Optional[Moderation] = Field( - None, description='Content moderation setting', examples=['auto'] - ) - n: Optional[int] = Field( - None, description='The number of images to generate', examples=[1] - ) - output_compression: Optional[int] = Field( - None, description='Compression level for JPEG or WebP (0-100)', examples=[100] - ) - output_format: Optional[OutputFormat] = Field( - None, description='Format of the output image', examples=['png'] - ) - prompt: str = Field( - ..., - description='A text description of the desired edit', - examples=['Give the rocketship rainbow coloring'], - ) - quality: Optional[str] = Field( - None, description='The quality of the edited image', examples=['low'] - ) - size: Optional[str] = Field( - None, description='Size of the output image', examples=['1024x1024'] - ) - user: Optional[str] = Field( +class Style(str, Enum): + anime = 'anime' + field_3d_animation = '3d_animation' + clay = 'clay' + comic = 'comic' + cyberpunk = 'cyberpunk' + + +class PixverseTextVideoRequest(BaseModel): + aspect_ratio: AspectRatio + duration: Duration + model: Model1 + motion_mode: Optional[MotionMode] = None + negative_prompt: Optional[str] = None + prompt: str + quality: Quality + seed: Optional[int] = None + style: Optional[Style] = None + template_id: Optional[int] = None + water_mark: Optional[bool] = None + + +class Resp(BaseModel): + video_id: Optional[int] = None + + +class PixverseVideoResponse(BaseModel): + ErrCode: Optional[int] = None + ErrMsg: Optional[str] = None + Resp_1: Optional[Resp] = Field(None, alias='Resp') + + +class Resp1(BaseModel): + img_id: Optional[int] = None + + +class PixverseImageUploadResponse(BaseModel): + ErrCode: Optional[int] = None + ErrMsg: Optional[str] = None + Resp: Optional[Resp1] = None + + +class PixverseImageVideoRequest(BaseModel): + img_id: int + model: Model1 + prompt: str + duration: Duration + quality: Quality + motion_mode: Optional[MotionMode] = None + seed: Optional[int] = None + style: Optional[Style] = None + template_id: Optional[int] = None + water_mark: Optional[bool] = None + + +class PixverseTransitionVideoRequest(BaseModel): + first_frame_img: int + last_frame_img: int + model: Model1 + duration: Duration + quality: Quality + motion_mode: MotionMode + seed: int + prompt: str + style: Optional[Style] = None + template_id: Optional[int] = None + water_mark: Optional[bool] = None + + +class Status2(int, Enum): + integer_1 = 1 + integer_5 = 5 + integer_6 = 6 + integer_7 = 7 + integer_8 = 8 + + +class Resp2(BaseModel): + create_time: Optional[str] = None + id: Optional[int] = None + modify_time: Optional[str] = None + negative_prompt: Optional[str] = None + outputHeight: Optional[int] = None + outputWidth: Optional[int] = None + prompt: Optional[str] = None + resolution_ratio: Optional[int] = None + seed: Optional[int] = None + size: Optional[int] = None + status: Optional[Status2] = Field( None, - description='A unique identifier for end-user monitoring', - examples=['user-1234'], + description='Video generation status codes:\n* 1 - Generation successful\n* 5 - Generating\n* 6 - Deleted\n* 7 - Contents moderation failed\n* 8 - Generation failed\n', + ) + style: Optional[str] = None + url: Optional[str] = None + + +class PixverseVideoResultResponse(BaseModel): + ErrCode: Optional[int] = None + ErrMsg: Optional[str] = None + Resp: Optional[Resp2] = None + + +class Image(BaseModel): + bytesBase64Encoded: str + gcsUri: Optional[str] = None + mimeType: Optional[str] = None + + +class Image1(BaseModel): + bytesBase64Encoded: Optional[str] = None + gcsUri: str + mimeType: Optional[str] = None + + +class Instance(BaseModel): + prompt: str = Field(..., description='Text description of the video') + image: Optional[Union[Image, Image1]] = Field( + None, description='Optional image to guide video generation' ) -class Quality1(str, Enum): +class PersonGeneration(str, Enum): + ALLOW = 'ALLOW' + BLOCK = 'BLOCK' + + +class Parameters(BaseModel): + aspectRatio: Optional[str] = Field(None, examples=['16:9']) + negativePrompt: Optional[str] = None + personGeneration: Optional[PersonGeneration] = None + sampleCount: Optional[int] = None + seed: Optional[int] = None + storageUri: Optional[str] = Field( + None, description='Optional Cloud Storage URI to upload the video' + ) + durationSeconds: Optional[int] = None + enhancePrompt: Optional[bool] = None + + +class Veo2GenVidRequest(BaseModel): + instances: Optional[List[Instance]] = None + parameters: Optional[Parameters] = None + + +class Veo2GenVidResponse(BaseModel): + name: str = Field( + ..., + description='Operation resource name', + examples=[ + 'projects/PROJECT_ID/locations/us-central1/publishers/google/models/MODEL_ID/operations/a1b07c8e-7b5a-4aba-bb34-3e1ccb8afcc8' + ], + ) + + +class Veo2GenVidPollRequest(BaseModel): + operationName: str = Field( + ..., + description='Full operation name (from predict response)', + examples=[ + 'projects/PROJECT_ID/locations/us-central1/publishers/google/models/MODEL_ID/operations/OPERATION_ID' + ], + ) + + +class Video(BaseModel): + gcsUri: Optional[str] = Field(None, description='Cloud Storage URI of the video') + bytesBase64Encoded: Optional[str] = Field( + None, description='Base64-encoded video content' + ) + mimeType: Optional[str] = Field(None, description='Video MIME type') + + +class Response(BaseModel): + field_type: Optional[str] = Field( + None, + alias='@type', + examples=[ + 'type.googleapis.com/cloud.ai.large_models.vision.GenerateVideoResponse' + ], + ) + raiMediaFilteredCount: Optional[int] = Field( + None, description='Count of media filtered by responsible AI policies' + ) + raiMediaFilteredReasons: Optional[List[str]] = Field( + None, description='Reasons why media was filtered by responsible AI policies' + ) + videos: Optional[List[Video]] = None + + +class Error1(BaseModel): + code: Optional[int] = Field(None, description='Error code') + message: Optional[str] = Field(None, description='Error message') + + +class Veo2GenVidPollResponse(BaseModel): + name: Optional[str] = None + done: Optional[bool] = None + response: Optional[Response] = Field( + None, description='The actual prediction response if done is true' + ) + error: Optional[Error1] = Field( + None, description='Error details if operation failed' + ) + + +class RunwayImageToVideoResponse(BaseModel): + id: Optional[str] = Field(None, description='Task ID') + + +class RunwayTaskStatusEnum(str, Enum): + SUCCEEDED = 'SUCCEEDED' + RUNNING = 'RUNNING' + FAILED = 'FAILED' + PENDING = 'PENDING' + CANCELLED = 'CANCELLED' + THROTTLED = 'THROTTLED' + + +class RunwayModelEnum(str, Enum): + gen4_turbo = 'gen4_turbo' + gen3a_turbo = 'gen3a_turbo' + + +class Position(str, Enum): + first = 'first' + last = 'last' + + +class RunwayPromptImageDetailedObject(BaseModel): + uri: str = Field( + ..., description='A HTTPS URL or data URI containing an encoded image.' + ) + position: Position = Field( + ..., + description="The position of the image in the output video. 'last' is currently supported for gen3a_turbo only.", + ) + + +class RunwayDurationEnum(int, Enum): + integer_5 = 5 + integer_10 = 10 + + +class RunwayAspectRatioEnum(str, Enum): + field_1280_720 = '1280:720' + field_720_1280 = '720:1280' + field_1104_832 = '1104:832' + field_832_1104 = '832:1104' + field_960_960 = '960:960' + field_1584_672 = '1584:672' + field_1280_768 = '1280:768' + field_768_1280 = '768:1280' + + +class RunwayPromptImageObject( + RootModel[Union[str, List[RunwayPromptImageDetailedObject]]] +): + root: Union[str, List[RunwayPromptImageDetailedObject]] = Field( + ..., + description='Image(s) to use for the video generation. Can be a single URI or an array of image objects with positions.', + ) + + +class Datum3(BaseModel): + b64_json: Optional[str] = Field(None, description='Base64 encoded image data') + url: Optional[str] = Field(None, description='URL of the image') + revised_prompt: Optional[str] = Field(None, description='Revised prompt') + + +class InputTokensDetails(BaseModel): + text_tokens: Optional[int] = None + image_tokens: Optional[int] = None + + +class Usage(BaseModel): + input_tokens: Optional[int] = None + input_tokens_details: Optional[InputTokensDetails] = None + output_tokens: Optional[int] = None + total_tokens: Optional[int] = None + + +class OpenAIImageGenerationResponse(BaseModel): + data: Optional[List[Datum3]] = None + usage: Optional[Usage] = None + + +class Quality3(str, Enum): low = 'low' medium = 'medium' high = 'high' @@ -345,54 +1999,70 @@ class Quality1(str, Enum): hd = 'hd' +class OutputFormat1(str, Enum): + png = 'png' + webp = 'webp' + jpeg = 'jpeg' + + +class Moderation(str, Enum): + low = 'low' + auto = 'auto' + + +class Background(str, Enum): + transparent = 'transparent' + opaque = 'opaque' + + class ResponseFormat(str, Enum): url = 'url' b64_json = 'b64_json' -class Style(str, Enum): +class Style3(str, Enum): vivid = 'vivid' natural = 'natural' class OpenAIImageGenerationRequest(BaseModel): - background: Optional[Background] = Field( - None, description='Background transparency', examples=['opaque'] - ) model: Optional[str] = Field( None, description='The model to use for image generation', examples=['dall-e-3'] ) - moderation: Optional[Moderation] = Field( - None, description='Content moderation setting', examples=['auto'] - ) - n: Optional[int] = Field( - None, - description='The number of images to generate (1-10). Only 1 supported for dall-e-3.', - examples=[1], - ) - output_compression: Optional[int] = Field( - None, description='Compression level for JPEG or WebP (0-100)', examples=[100] - ) - output_format: Optional[OutputFormat] = Field( - None, description='Format of the output image', examples=['png'] - ) prompt: str = Field( ..., description='A text description of the desired image', examples=['Draw a rocket in front of a blackhole in deep space'], ) - quality: Optional[Quality1] = Field( - None, description='The quality of the generated image', examples=['high'] + n: Optional[int] = Field( + None, + description='The number of images to generate (1-10). Only 1 supported for dall-e-3.', + examples=[1], ) - response_format: Optional[ResponseFormat] = Field( - None, description='Response format of image data', examples=['b64_json'] + quality: Optional[Quality3] = Field( + None, description='The quality of the generated image', examples=['high'] ) size: Optional[str] = Field( None, description='Size of the image (e.g., 1024x1024, 1536x1024, auto)', examples=['1024x1536'], ) - style: Optional[Style] = Field( + output_format: Optional[OutputFormat1] = Field( + None, description='Format of the output image', examples=['png'] + ) + output_compression: Optional[int] = Field( + None, description='Compression level for JPEG or WebP (0-100)', examples=[100] + ) + moderation: Optional[Moderation] = Field( + None, description='Content moderation setting', examples=['auto'] + ) + background: Optional[Background] = Field( + None, description='Background transparency', examples=['opaque'] + ) + response_format: Optional[ResponseFormat] = Field( + None, description='Response format of image data', examples=['b64_json'] + ) + style: Optional[Style3] = Field( None, description='Style of the image (only for dall-e-3)', examples=['vivid'] ) user: Optional[str] = Field( @@ -402,21 +2072,1758 @@ class OpenAIImageGenerationRequest(BaseModel): ) -class Datum1(BaseModel): - b64_json: Optional[str] = Field(None, description='Base64 encoded image data') - revised_prompt: Optional[str] = Field(None, description='Revised prompt') - url: Optional[str] = Field(None, description='URL of the image') +class OpenAIImageEditRequest(BaseModel): + model: str = Field( + ..., description='The model to use for image editing', examples=['gpt-image-1'] + ) + prompt: str = Field( + ..., + description='A text description of the desired edit', + examples=['Give the rocketship rainbow coloring'], + ) + n: Optional[int] = Field( + None, description='The number of images to generate', examples=[1] + ) + quality: Optional[str] = Field( + None, description='The quality of the edited image', examples=['low'] + ) + size: Optional[str] = Field( + None, description='Size of the output image', examples=['1024x1024'] + ) + output_format: Optional[OutputFormat1] = Field( + None, description='Format of the output image', examples=['png'] + ) + output_compression: Optional[int] = Field( + None, description='Compression level for JPEG or WebP (0-100)', examples=[100] + ) + moderation: Optional[Moderation] = Field( + None, description='Content moderation setting', examples=['auto'] + ) + background: Optional[str] = Field( + None, description='Background transparency', examples=['opaque'] + ) + user: Optional[str] = Field( + None, + description='A unique identifier for end-user monitoring', + examples=['user-1234'], + ) -class OpenAIImageGenerationResponse(BaseModel): - data: Optional[List[Datum1]] = None -class User(BaseModel): - email: Optional[str] = Field(None, description='The email address for this user.') - id: Optional[str] = Field(None, description='The unique id for this user.') - isAdmin: Optional[bool] = Field( - None, description='Indicates if the user has admin privileges.' +class CustomerStorageResourceResponse(BaseModel): + download_url: Optional[str] = Field( + None, + description='The signed URL to use for downloading the file from the specified path', ) - isApproved: Optional[bool] = Field( - None, description='Indicates if the user is approved.' + upload_url: Optional[str] = Field( + None, + description='The signed URL to use for uploading the file to the specified path', ) - name: Optional[str] = Field(None, description='The name for this user.') + expires_at: Optional[datetime] = Field( + None, description='When the signed URL will expire' + ) + existing_file: Optional[bool] = Field( + None, description='Whether an existing file with the same hash was found' + ) + + +class Pikaffect(str, Enum): + Cake_ify = 'Cake-ify' + Crumble = 'Crumble' + Crush = 'Crush' + Decapitate = 'Decapitate' + Deflate = 'Deflate' + Dissolve = 'Dissolve' + Explode = 'Explode' + Eye_pop = 'Eye-pop' + Inflate = 'Inflate' + Levitate = 'Levitate' + Melt = 'Melt' + Peel = 'Peel' + Poke = 'Poke' + Squish = 'Squish' + Ta_da = 'Ta-da' + Tear = 'Tear' + + +class PikaBodyGeneratePikaffectsGeneratePikaffectsPost(BaseModel): + image: Optional[StrictBytes] = Field(None, title='Image') + pikaffect: Optional[Pikaffect] = Field(None, title='Pikaffect') + promptText: Optional[str] = Field(None, title='Prompttext') + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + seed: Optional[int] = Field(None, title='Seed') + + +class PikaGenerateResponse(BaseModel): + video_id: str = Field(..., title='Video Id') + + +class PikaBodyGeneratePikadditionsGeneratePikadditionsPost(BaseModel): + video: Optional[StrictBytes] = Field(None, title='Video') + image: Optional[StrictBytes] = Field(None, title='Image') + promptText: Optional[str] = Field(None, title='Prompttext') + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + seed: Optional[int] = Field(None, title='Seed') + + +class PikaBodyGeneratePikaswapsGeneratePikaswapsPost(BaseModel): + video: Optional[StrictBytes] = Field(None, title='Video') + image: Optional[StrictBytes] = Field(None, title='Image') + promptText: Optional[str] = Field(None, title='Prompttext') + modifyRegionMask: Optional[StrictBytes] = Field( + None, + description='A mask image that specifies the region to modify, where the mask is white and the background is black', + title='Modifyregionmask', + ) + modifyRegionRoi: Optional[str] = Field( + None, + description='Plaintext description of the object / region to modify', + title='Modifyregionroi', + ) + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + seed: Optional[int] = Field(None, title='Seed') + + +class IngredientsMode(str, Enum): + creative = 'creative' + precise = 'precise' + + +class AspectRatio1(RootModel[float]): + root: float = Field( + ..., + description='Aspect ratio (width / height)', + ge=0.4, + le=2.5, + title='Aspectratio', + ) + + +class PikaBodyGenerate22C2vGenerate22PikascenesPost(BaseModel): + images: Optional[List[StrictBytes]] = Field(None, title='Images') + ingredientsMode: IngredientsMode = Field(..., title='Ingredientsmode') + promptText: Optional[str] = Field(None, title='Prompttext') + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + seed: Optional[int] = Field(None, title='Seed') + resolution: Optional[str] = Field('1080p', title='Resolution') + duration: Optional[int] = Field(5, title='Duration') + aspectRatio: Optional[AspectRatio1] = Field( + None, description='Aspect ratio (width / height)', title='Aspectratio' + ) + + +class PikaStatusEnum(str, Enum): + queued = 'queued' + started = 'started' + finished = 'finished' + + +class PikaValidationError(BaseModel): + loc: List[Union[str, int]] = Field(..., title='Location') + msg: str = Field(..., title='Message') + type: str = Field(..., title='Error Type') + + +class PikaResolutionEnum(str, Enum): + field_1080p = '1080p' + field_720p = '720p' + + +class PikaDurationEnum(int, Enum): + integer_5 = 5 + integer_10 = 10 + + +class RgbItem(RootModel[int]): + root: int = Field(..., ge=0, le=255) + + +class RGBColor(BaseModel): + rgb: List[RgbItem] = Field(..., max_length=3, min_length=3) + + +class StabilityStabilityClientID(RootModel[str]): + root: str = Field( + ..., + description='The name of your application, used to help us communicate app-specific debugging or moderation issues to you.', + examples=['my-awesome-app'], + max_length=256, + ) + + +class StabilityStabilityClientUserID(RootModel[str]): + root: str = Field( + ..., + description='A unique identifier for your end user. Used to help us communicate user-specific debugging or moderation issues to you. Feel free to obfuscate this value to protect user privacy.', + examples=['DiscordUser#9999'], + max_length=256, + ) + + +class StabilityStabilityClientVersion(RootModel[str]): + root: str = Field( + ..., + description='The version of your application, used to help us communicate version-specific debugging or moderation issues to you.', + examples=['1.2.1'], + max_length=256, + ) + + +class Name(str, Enum): + content_moderation = 'content_moderation' + + +class StabilityContentModerationResponse(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new) you file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: Name = Field( + ..., + description='Our content moderation system has flagged some part of your request and subsequently denied it. You were not charged for this request. While this may at times be frustrating, it is necessary to maintain the integrity of our platform and ensure a safe experience for all users. If you would like to provide feedback, please use the [Support Form](https://kb.stability.ai/knowledge-base/kb-tickets/new).', + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class RenderingSpeed(str, Enum): + BALANCED = 'BALANCED' + TURBO = 'TURBO' + QUALITY = 'QUALITY' + + +class StabilityCreativity(RootModel[float]): + root: float = Field( + ..., + description='Controls the likelihood of creating additional details not heavily conditioned by the init image.', + ge=0.2, + le=0.5, + ) + + +class StabilityGenerationID(RootModel[str]): + root: str = Field( + ..., + description='The `id` of a generation, typically used for async generations, that can be used to check the status of the generation or retrieve the result.', + examples=['a6dc6c6e20acda010fe14d71f180658f2896ed9b4ec25aa99a6ff06c796987c4'], + max_length=64, + min_length=64, + ) + + +class Mode(str, Enum): + text_to_image = 'text-to-image' + image_to_image = 'image-to-image' + + +class AspectRatio2(str, Enum): + field_21_9 = '21:9' + field_16_9 = '16:9' + field_3_2 = '3:2' + field_5_4 = '5:4' + field_1_1 = '1:1' + field_4_5 = '4:5' + field_2_3 = '2:3' + field_9_16 = '9:16' + field_9_21 = '9:21' + + +class Model4(str, Enum): + sd3_5_large = 'sd3.5-large' + sd3_5_large_turbo = 'sd3.5-large-turbo' + sd3_5_medium = 'sd3.5-medium' + + +class OutputFormat3(str, Enum): + png = 'png' + jpeg = 'jpeg' + + +class StylePreset(str, Enum): + enhance = 'enhance' + anime = 'anime' + photographic = 'photographic' + digital_art = 'digital-art' + comic_book = 'comic-book' + fantasy_art = 'fantasy-art' + line_art = 'line-art' + analog_film = 'analog-film' + neon_punk = 'neon-punk' + isometric = 'isometric' + low_poly = 'low-poly' + origami = 'origami' + modeling_compound = 'modeling-compound' + cinematic = 'cinematic' + field_3d_model = '3d-model' + pixel_art = 'pixel-art' + tile_texture = 'tile-texture' + + +class StabilityImageGenrationSD3Request(BaseModel): + prompt: str = Field( + ..., + description='What you wish to see in the output image. A strong, descriptive prompt that clearly defines\nelements, colors, and subjects will lead to better results.', + max_length=10000, + min_length=1, + ) + mode: Optional[Mode] = Field( + 'text-to-image', + description='Controls whether this is a text-to-image or image-to-image generation, which affects which parameters are required:\n- **text-to-image** requires only the `prompt` parameter\n- **image-to-image** requires the `prompt`, `image`, and `strength` parameters', + title='GenerationMode', + ) + image: Optional[StrictBytes] = Field( + None, + description='The image to use as the starting point for the generation.\n\nSupported formats:\n\n\n\n - jpeg\n - png\n - webp\n\nSupported dimensions:\n\n\n\n - Every side must be at least 64 pixels\n\n> **Important:** This parameter is only valid for **image-to-image** requests.', + ) + strength: Optional[float] = Field( + None, + description='Sometimes referred to as _denoising_, this parameter controls how much influence the\n`image` parameter has on the generated image. A value of 0 would yield an image that\nis identical to the input. A value of 1 would be as if you passed in no image at all.\n\n> **Important:** This parameter is only valid for **image-to-image** requests.', + ge=0.0, + le=1.0, + ) + aspect_ratio: Optional[AspectRatio2] = Field( + '1:1', + description='Controls the aspect ratio of the generated image. Defaults to 1:1.\n\n> **Important:** This parameter is only valid for **text-to-image** requests.', + ) + model: Optional[Model4] = Field( + 'sd3.5-large', + description='The model to use for generation.\n\n- `sd3.5-large` requires 6.5 credits per generation\n- `sd3.5-large-turbo` requires 4 credits per generation\n- `sd3.5-medium` requires 3.5 credits per generation\n- As of the April 17, 2025, `sd3-large`, `sd3-large-turbo` and `sd3-medium`\n\n\n\n are re-routed to their `sd3.5-[model version]` equivalent, at the same price.', + ) + seed: Optional[float] = Field( + 0, + description="A specific value that is used to guide the 'randomness' of the generation. (Omit this parameter or pass `0` to use a random seed.)", + ge=0.0, + le=4294967294.0, + ) + output_format: Optional[OutputFormat3] = Field( + 'png', description='Dictates the `content-type` of the generated image.' + ) + style_preset: Optional[StylePreset] = Field( + None, description='Guides the image model towards a particular style.' + ) + negative_prompt: Optional[str] = Field( + None, + description='Keywords of what you **do not** wish to see in the output image.\nThis is an advanced feature.', + max_length=10000, + ) + cfg_scale: Optional[float] = Field( + None, + description='How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt). The _Large_ and _Medium_ models use a default of `4`. The _Turbo_ model uses a default of `1`.', + ge=1.0, + le=10.0, + ) + + +class FinishReason(str, Enum): + SUCCESS = 'SUCCESS' + CONTENT_FILTERED = 'CONTENT_FILTERED' + + +class StabilityImageGenrationSD3Response200(BaseModel): + image: str = Field( + ..., + description='The generated image, encoded to base64.', + examples=['AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1...'], + ) + seed: Optional[float] = Field( + 0, + description='The seed used as random noise for this generation.', + examples=[343940597], + ge=0.0, + le=4294967294.0, + ) + finish_reason: FinishReason = Field( + ..., + description='The reason the generation finished.\n\n- `SUCCESS` = successful generation.\n- `CONTENT_FILTERED` = successful generation, however the output violated our content moderation\npolicy and has been blurred as a result.', + examples=['SUCCESS'], + ) + + +class StabilityImageGenrationSD3Response400(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationSD3Response413(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationSD3Response422(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationSD3Response429(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationSD3Response500(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class OutputFormat4(str, Enum): + jpeg = 'jpeg' + png = 'png' + webp = 'webp' + + +class StabilityImageGenrationUpscaleConservativeRequest(BaseModel): + image: StrictBytes = Field( + ..., + description='The image you wish to upscale.\n\nSupported Formats:\n- jpeg\n- png\n- webp\n\nValidation Rules:\n- Every side must be at least 64 pixels\n- Total pixel count must be between 4,096 and 9,437,184 pixels\n- The aspect ratio must be between 1:2.5 and 2.5:1', + examples=['./some/image.png'], + ) + prompt: str = Field( + ..., + description="What you wish to see in the output image. A strong, descriptive prompt that clearly defines\nelements, colors, and subjects will lead to better results.\n\nTo control the weight of a given word use the format `(word:weight)`,\nwhere `word` is the word you'd like to control the weight of and `weight`\nis a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`\nwould convey a sky that was blue and green, but more green than blue.", + max_length=10000, + min_length=1, + ) + negative_prompt: Optional[str] = Field( + None, + description='A blurb of text describing what you **do not** wish to see in the output image.\nThis is an advanced feature.', + max_length=10000, + ) + seed: Optional[float] = Field( + 0, + description="A specific value that is used to guide the 'randomness' of the generation. (Omit this parameter or pass `0` to use a random seed.)", + ge=0.0, + le=4294967294.0, + ) + output_format: Optional[OutputFormat4] = Field( + 'png', description='Dictates the `content-type` of the generated image.' + ) + creativity: Optional[StabilityCreativity] = Field( + default_factory=lambda: StabilityCreativity.model_validate(0.35) + ) + + +class StabilityImageGenrationUpscaleConservativeResponse200(BaseModel): + image: str = Field( + ..., + description='The generated image, encoded to base64.', + examples=['AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1...'], + ) + seed: Optional[float] = Field( + 0, + description='The seed used as random noise for this generation.', + examples=[343940597], + ge=0.0, + le=4294967294.0, + ) + finish_reason: FinishReason = Field( + ..., + description='The reason the generation finished.\n\n- `SUCCESS` = successful generation.\n- `CONTENT_FILTERED` = successful generation, however the output violated our content moderation\npolicy and has been blurred as a result.', + examples=['SUCCESS'], + ) + + +class StabilityImageGenrationUpscaleConservativeResponse400(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleConservativeResponse413(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleConservativeResponse422(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleConservativeResponse429(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleConservativeResponse500(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleCreativeRequest(BaseModel): + image: StrictBytes = Field( + ..., + description='The image you wish to upscale.\n\nSupported Formats:\n- jpeg\n- png\n- webp\n\nValidation Rules:\n- Every side must be at least 64 pixels\n- Total pixel count must be between 4,096 and 1,048,576 pixels', + examples=['./some/image.png'], + ) + prompt: str = Field( + ..., + description="What you wish to see in the output image. A strong, descriptive prompt that clearly defines\nelements, colors, and subjects will lead to better results.\n\nTo control the weight of a given word use the format `(word:weight)`,\nwhere `word` is the word you'd like to control the weight of and `weight`\nis a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`\nwould convey a sky that was blue and green, but more green than blue.", + max_length=10000, + min_length=1, + ) + negative_prompt: Optional[str] = Field( + None, + description='A blurb of text describing what you **do not** wish to see in the output image.\nThis is an advanced feature.', + max_length=10000, + ) + output_format: Optional[OutputFormat4] = Field( + 'png', description='Dictates the `content-type` of the generated image.' + ) + seed: Optional[float] = Field( + 0, + description="A specific value that is used to guide the 'randomness' of the generation. (Omit this parameter or pass `0` to use a random seed.)", + ge=0.0, + le=4294967294.0, + ) + creativity: Optional[float] = Field( + 0.3, + description='Indicates how creative the model should be when upscaling an image.\nHigher values will result in more details being added to the image during upscaling.', + ge=0.1, + le=0.5, + ) + style_preset: Optional[StylePreset] = Field( + None, description='Guides the image model towards a particular style.' + ) + + +class StabilityImageGenrationUpscaleCreativeResponse200(BaseModel): + id: StabilityGenerationID + + +class StabilityImageGenrationUpscaleCreativeResponse400(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleCreativeResponse413(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleCreativeResponse422(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleCreativeResponse429(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleCreativeResponse500(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleFastRequest(BaseModel): + image: StrictBytes = Field( + ..., + description='The image you wish to upscale.\n\nSupported Formats:\n- jpeg\n- png\n- webp\n\nValidation Rules:\n- Width must be between 32 and 1,536 pixels\n- Height must be between 32 and 1,536 pixels\n- Total pixel count must be between 1,024 and 1,048,576 pixels', + examples=['./some/image.png'], + ) + output_format: Optional[OutputFormat4] = Field( + 'png', description='Dictates the `content-type` of the generated image.' + ) + + +class StabilityImageGenrationUpscaleFastResponse200(BaseModel): + image: str = Field( + ..., + description='The generated image, encoded to base64.', + examples=['AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1...'], + ) + seed: Optional[float] = Field( + 0, + description='The seed used as random noise for this generation.', + examples=[343940597], + ge=0.0, + le=4294967294.0, + ) + finish_reason: FinishReason = Field( + ..., + description='The reason the generation finished.\n\n- `SUCCESS` = successful generation.\n- `CONTENT_FILTERED` = successful generation, however the output violated our content moderation\npolicy and has been blurred as a result.', + examples=['SUCCESS'], + ) + + +class StabilityImageGenrationUpscaleFastResponse400(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleFastResponse413(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleFastResponse422(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleFastResponse429(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleFastResponse500(BaseModel): + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + + +class ActionJobResult(BaseModel): + id: Optional[UUID] = Field(None, description='Unique identifier for the job result') + workflow_name: Optional[str] = Field(None, description='Name of the workflow') + operating_system: Optional[str] = Field(None, description='Operating system used') + python_version: Optional[str] = Field(None, description='PyTorch version used') + pytorch_version: Optional[str] = Field(None, description='PyTorch version used') + action_run_id: Optional[str] = Field( + None, description='Identifier of the run this result belongs to' + ) + action_job_id: Optional[str] = Field( + None, description='Identifier of the job this result belongs to' + ) + cuda_version: Optional[str] = Field(None, description='CUDA version used') + branch_name: Optional[str] = Field( + None, description='Name of the relevant git branch' + ) + commit_hash: Optional[str] = Field(None, description='The hash of the commit') + commit_id: Optional[str] = Field(None, description='The ID of the commit') + commit_time: Optional[int] = Field( + None, description='The Unix timestamp when the commit was made' + ) + commit_message: Optional[str] = Field(None, description='The message of the commit') + comfy_run_flags: Optional[str] = Field( + None, description='The comfy run flags. E.g. `--low-vram`' + ) + git_repo: Optional[str] = Field(None, description='The repository name') + pr_number: Optional[str] = Field(None, description='The pull request number') + start_time: Optional[int] = Field( + None, description='The start time of the job as a Unix timestamp.' + ) + end_time: Optional[int] = Field( + None, description='The end time of the job as a Unix timestamp.' + ) + avg_vram: Optional[int] = Field( + None, description='The average VRAM used by the job' + ) + peak_vram: Optional[int] = Field(None, description='The peak VRAM used by the job') + job_trigger_user: Optional[str] = Field( + None, description='The user who triggered the job.' + ) + author: Optional[str] = Field(None, description='The author of the commit') + machine_stats: Optional[MachineStats] = None + status: Optional[WorkflowRunStatus] = None + storage_file: Optional[StorageFile] = None + + +class Publisher(BaseModel): + name: Optional[str] = None + id: Optional[str] = Field( + None, + description="The unique identifier for the publisher. It's akin to a username. Should be lowercase.", + ) + description: Optional[str] = None + website: Optional[str] = None + support: Optional[str] = None + source_code_repo: Optional[str] = None + logo: Optional[str] = Field(None, description="URL to the publisher's logo.") + createdAt: Optional[datetime] = Field( + None, description='The date and time the publisher was created.' + ) + members: Optional[List[PublisherMember]] = Field( + None, description='A list of members in the publisher.' + ) + status: Optional[PublisherStatus] = Field( + None, description='The status of the publisher.' + ) + + +class NodeVersion(BaseModel): + id: Optional[str] = None + version: Optional[str] = Field( + None, + description='The version identifier, following semantic versioning. Must be unique for the node.', + ) + createdAt: Optional[datetime] = Field( + None, description='The date and time the version was created.' + ) + changelog: Optional[str] = Field( + None, description='Summary of changes made in this version' + ) + dependencies: Optional[List[str]] = Field( + None, description='A list of pip dependencies required by the node.' + ) + downloadUrl: Optional[str] = Field( + None, description='[Output Only] URL to download this version of the node' + ) + deprecated: Optional[bool] = Field( + None, description='Indicates if this version is deprecated.' + ) + status: Optional[NodeVersionStatus] = Field( + None, description='The status of the node version.' + ) + status_reason: Optional[str] = Field( + None, description='The reason for the status change.' + ) + node_id: Optional[str] = Field( + None, description='The unique identifier of the node.' + ) + comfy_node_extract_status: Optional[str] = Field( + None, description='The status of comfy node extraction process.' + ) + + +class IdeogramV3Request(BaseModel): + prompt: str = Field(..., description='The text prompt for image generation') + seed: Optional[int] = Field( + None, description='Seed value for reproducible generation' + ) + resolution: Optional[str] = Field( + None, description='Image resolution in format WxH', examples=['1280x800'] + ) + aspect_ratio: Optional[str] = Field( + None, description='Aspect ratio in format WxH', examples=['1x3'] + ) + rendering_speed: RenderingSpeed + magic_prompt: Optional[MagicPrompt] = Field( + None, description='Whether to enable magic prompt enhancement' + ) + negative_prompt: Optional[str] = Field( + None, description='Text prompt specifying what to avoid in the generation' + ) + num_images: Optional[int] = Field( + None, description='Number of images to generate', ge=1 + ) + color_palette: Optional[ColorPalette] = None + style_codes: Optional[List[StyleCode]] = Field( + None, description='Array of style codes in hexadecimal format' + ) + style_type: Optional[StyleType] = Field( + None, description='The type of style to apply' + ) + style_reference_images: Optional[List[str]] = Field( + None, description='Array of reference image URLs or identifiers' + ) + + +class IdeogramV3EditRequest(BaseModel): + image: Optional[StrictBytes] = Field( + None, + description='The image being edited (max size 10MB); only JPEG, WebP and PNG formats are supported at this time.', + ) + mask: Optional[StrictBytes] = Field( + None, + description='A black and white image of the same size as the image being edited (max size 10MB). Black regions in the mask should match up with the regions of the image that you would like to edit; only JPEG, WebP and PNG formats are supported at this time.', + ) + prompt: str = Field( + ..., description='The prompt used to describe the edited result.' + ) + magic_prompt: Optional[str] = Field( + None, + description='Determine if MagicPrompt should be used in generating the request or not.', + ) + num_images: Optional[int] = Field( + None, description='The number of images to generate.' + ) + seed: Optional[int] = Field( + None, description='Random seed. Set for reproducible generation.' + ) + rendering_speed: RenderingSpeed + color_palette: Optional[IdeogramColorPalette] = Field( + None, + description='A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members). Not supported by V_1, V_1_TURBO, V_2A and V_2A_TURBO models.', + ) + style_codes: Optional[List[StyleCode]] = Field( + None, + description='A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style_type.', + ) + style_reference_images: Optional[List[StrictBytes]] = Field( + None, + description='A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format.', + ) + + +class KlingCameraControl(BaseModel): + type: Optional[KlingCameraControlType] = None + config: Optional[KlingCameraConfig] = None + + +class KlingText2VideoRequest(BaseModel): + model_name: Optional[KlingVideoGenModelName] = 'kling-v2-master' + prompt: Optional[str] = Field( + None, description='Positive text prompt', max_length=2500 + ) + negative_prompt: Optional[str] = Field( + None, description='Negative text prompt', max_length=2500 + ) + cfg_scale: Optional[KlingVideoGenCfgScale] = Field( + default_factory=lambda: KlingVideoGenCfgScale.model_validate(0.5) + ) + mode: Optional[KlingVideoGenMode] = 'std' + camera_control: Optional[KlingCameraControl] = None + aspect_ratio: Optional[KlingVideoGenAspectRatio] = '16:9' + duration: Optional[KlingVideoGenDuration] = '5' + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + external_task_id: Optional[str] = Field(None, description='Customized Task ID') + + +class KlingImage2VideoRequest(BaseModel): + model_name: Optional[KlingVideoGenModelName] = 'kling-v2-master' + image: Optional[str] = Field( + None, + description='Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix.', + ) + image_tail: Optional[str] = Field( + None, + description='Reference Image - End frame control. URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px. Base64 should not include data:image prefix.', + ) + prompt: Optional[str] = Field( + None, description='Positive text prompt', max_length=2500 + ) + negative_prompt: Optional[str] = Field( + None, description='Negative text prompt', max_length=2500 + ) + cfg_scale: Optional[KlingVideoGenCfgScale] = Field( + default_factory=lambda: KlingVideoGenCfgScale.model_validate(0.5) + ) + mode: Optional[KlingVideoGenMode] = 'std' + static_mask: Optional[str] = Field( + None, + description='Static Brush Application Area (Mask image created by users using the motion brush). The aspect ratio must match the input image.', + ) + dynamic_masks: Optional[List[DynamicMask]] = Field( + None, + description='Dynamic Brush Configuration List (up to 6 groups). For 5-second videos, trajectory length must not exceed 77 coordinates.', + ) + camera_control: Optional[KlingCameraControl] = None + aspect_ratio: Optional[KlingVideoGenAspectRatio] = '16:9' + duration: Optional[KlingVideoGenDuration] = '5' + callback_url: Optional[AnyUrl] = Field( + None, + description='The callback notification address. Server will notify when the task status changes.', + ) + external_task_id: Optional[str] = Field( + None, + description='Customized Task ID. Must be unique within a single user account.', + ) + + +class KlingVideoEffectsInput( + RootModel[Union[KlingSingleImageEffectInput, KlingDualCharacterEffectInput]] +): + root: Union[KlingSingleImageEffectInput, KlingDualCharacterEffectInput] + + +class StripeBillingDetails(BaseModel): + address: Optional[StripeAddress] = None + email: Optional[str] = None + name: Optional[str] = None + phone: Optional[str] = None + tax_id: Optional[Any] = None + + +class StripePaymentMethodDetails(BaseModel): + card: Optional[StripeCardDetails] = None + type: Optional[str] = None + + +class BFLFluxProFillInputs(BaseModel): + image: str = Field( + ..., + description='A Base64-encoded string representing the image you wish to modify. Can contain alpha mask if desired.', + title='Image', + ) + mask: Optional[str] = Field( + None, + description='A Base64-encoded string representing a mask for the areas you want to modify in the image. The mask should be the same dimensions as the image and in black and white. Black areas (0%) indicate no modification, while white areas (100%) specify areas for inpainting. Optional if you provide an alpha mask in the original image. Validation: The endpoint verifies that the dimensions of the mask match the original image.', + title='Mask', + ) + prompt: Optional[str] = Field( + '', + description='The description of the changes you want to make. This text guides the inpainting process, allowing you to specify features, styles, or modifications for the masked area.', + examples=['ein fantastisches bild'], + title='Prompt', + ) + steps: Optional[Steps] = Field( + default_factory=lambda: Steps.model_validate(50), + description='Number of steps for the image generation process', + examples=[50], + title='Steps', + ) + prompt_upsampling: Optional[bool] = Field( + False, + description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation', + title='Prompt Upsampling', + ) + seed: Optional[int] = Field( + None, description='Optional seed for reproducibility', title='Seed' + ) + guidance: Optional[Guidance] = Field( + default_factory=lambda: Guidance.model_validate(60), + description='Guidance strength for the image generation process', + title='Guidance', + ) + output_format: Optional[BFLOutputFormat] = Field( + 'jpeg', + description="Output format for the generated image. Can be 'jpeg' or 'png'.", + ) + safety_tolerance: Optional[int] = Field( + 2, + description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.', + examples=[2], + ge=0, + le=6, + title='Safety Tolerance', + ) + webhook_url: Optional[WebhookUrl] = Field( + None, description='URL to receive webhook notifications', title='Webhook Url' + ) + webhook_secret: Optional[str] = Field( + None, + description='Optional secret for webhook signature verification', + title='Webhook Secret', + ) + + +class BFLHTTPValidationError(BaseModel): + detail: Optional[List[BFLValidationError]] = Field(None, title='Detail') + + +class BFLFluxProExpandInputs(BaseModel): + image: str = Field( + ..., + description='A Base64-encoded string representing the image you wish to expand.', + title='Image', + ) + top: Optional[Top] = Field( + 0, description='Number of pixels to expand at the top of the image', title='Top' + ) + bottom: Optional[Bottom] = Field( + 0, + description='Number of pixels to expand at the bottom of the image', + title='Bottom', + ) + left: Optional[Left] = Field( + 0, + description='Number of pixels to expand on the left side of the image', + title='Left', + ) + right: Optional[Right] = Field( + 0, + description='Number of pixels to expand on the right side of the image', + title='Right', + ) + prompt: Optional[str] = Field( + '', + description='The description of the changes you want to make. This text guides the expansion process, allowing you to specify features, styles, or modifications for the expanded areas.', + examples=['ein fantastisches bild'], + title='Prompt', + ) + steps: Optional[Steps] = Field( + default_factory=lambda: Steps.model_validate(50), + description='Number of steps for the image generation process', + examples=[50], + title='Steps', + ) + prompt_upsampling: Optional[bool] = Field( + False, + description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation', + title='Prompt Upsampling', + ) + seed: Optional[int] = Field( + None, description='Optional seed for reproducibility', title='Seed' + ) + guidance: Optional[Guidance] = Field( + default_factory=lambda: Guidance.model_validate(60), + description='Guidance strength for the image generation process', + title='Guidance', + ) + output_format: Optional[BFLOutputFormat] = Field( + 'jpeg', + description="Output format for the generated image. Can be 'jpeg' or 'png'.", + ) + safety_tolerance: Optional[int] = Field( + 2, + description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.', + examples=[2], + ge=0, + le=6, + title='Safety Tolerance', + ) + webhook_url: Optional[WebhookUrl] = Field( + None, description='URL to receive webhook notifications', title='Webhook Url' + ) + webhook_secret: Optional[str] = Field( + None, + description='Optional secret for webhook signature verification', + title='Webhook Secret', + ) + + +class BFLCannyInputs(BaseModel): + prompt: str = Field( + ..., + description='Text prompt for image generation', + examples=['ein fantastisches bild'], + title='Prompt', + ) + control_image: Optional[str] = Field( + None, + description='Base64 encoded image to use as control input if no preprocessed image is provided', + title='Control Image', + ) + preprocessed_image: Optional[str] = Field( + None, + description='Optional pre-processed image that will bypass the control preprocessing step', + title='Preprocessed Image', + ) + canny_low_threshold: Optional[CannyLowThreshold] = Field( + default_factory=lambda: CannyLowThreshold.model_validate(50), + description='Low threshold for Canny edge detection', + title='Canny Low Threshold', + ) + canny_high_threshold: Optional[CannyHighThreshold] = Field( + default_factory=lambda: CannyHighThreshold.model_validate(200), + description='High threshold for Canny edge detection', + title='Canny High Threshold', + ) + prompt_upsampling: Optional[bool] = Field( + False, + description='Whether to perform upsampling on the prompt', + title='Prompt Upsampling', + ) + seed: Optional[int] = Field( + None, + description='Optional seed for reproducibility', + examples=[42], + title='Seed', + ) + steps: Optional[Steps2] = Field( + default_factory=lambda: Steps2.model_validate(50), + description='Number of steps for the image generation process', + title='Steps', + ) + output_format: Optional[BFLOutputFormat] = Field( + 'jpeg', + description="Output format for the generated image. Can be 'jpeg' or 'png'.", + ) + guidance: Optional[Guidance2] = Field( + default_factory=lambda: Guidance2.model_validate(30), + description='Guidance strength for the image generation process', + title='Guidance', + ) + safety_tolerance: Optional[int] = Field( + 2, + description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.', + ge=0, + le=6, + title='Safety Tolerance', + ) + webhook_url: Optional[WebhookUrl] = Field( + None, description='URL to receive webhook notifications', title='Webhook Url' + ) + webhook_secret: Optional[str] = Field( + None, + description='Optional secret for webhook signature verification', + title='Webhook Secret', + ) + + +class BFLDepthInputs(BaseModel): + prompt: str = Field( + ..., + description='Text prompt for image generation', + examples=['ein fantastisches bild'], + title='Prompt', + ) + control_image: Optional[str] = Field( + None, + description='Base64 encoded image to use as control input', + title='Control Image', + ) + preprocessed_image: Optional[str] = Field( + None, + description='Optional pre-processed image that will bypass the control preprocessing step', + title='Preprocessed Image', + ) + prompt_upsampling: Optional[bool] = Field( + False, + description='Whether to perform upsampling on the prompt', + title='Prompt Upsampling', + ) + seed: Optional[int] = Field( + None, + description='Optional seed for reproducibility', + examples=[42], + title='Seed', + ) + steps: Optional[Steps2] = Field( + default_factory=lambda: Steps2.model_validate(50), + description='Number of steps for the image generation process', + title='Steps', + ) + output_format: Optional[BFLOutputFormat] = Field( + 'jpeg', + description="Output format for the generated image. Can be 'jpeg' or 'png'.", + ) + guidance: Optional[Guidance2] = Field( + default_factory=lambda: Guidance2.model_validate(15), + description='Guidance strength for the image generation process', + title='Guidance', + ) + safety_tolerance: Optional[int] = Field( + 2, + description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.', + ge=0, + le=6, + title='Safety Tolerance', + ) + webhook_url: Optional[WebhookUrl] = Field( + None, description='URL to receive webhook notifications', title='Webhook Url' + ) + webhook_secret: Optional[str] = Field( + None, + description='Optional secret for webhook signature verification', + title='Webhook Secret', + ) + + +class Controls(BaseModel): + artistic_level: Optional[int] = Field( + None, + description='Defines artistic tone of your image. At a simple level, the person looks straight at the camera in a static and clean style. Dynamic and eccentric levels introduce movement and creativity.', + ge=0, + le=5, + ) + colors: Optional[List[RGBColor]] = Field( + None, description='An array of preferable colors' + ) + background_color: Optional[RGBColor] = Field( + None, description='Use given color as a desired background color' + ) + no_text: Optional[bool] = Field(None, description='Do not embed text layouts') + + +class RecraftImageGenerationRequest(BaseModel): + prompt: str = Field( + ..., description='The text prompt describing the image to generate' + ) + model: str = Field( + ..., description='The model to use for generation (e.g., "recraftv3")' + ) + style: Optional[str] = Field( + None, + description='The style to apply to the generated image (e.g., "digital_illustration")', + ) + style_id: Optional[str] = Field( + None, + description='The style ID to apply to the generated image (e.g., "123e4567-e89b-12d3-a456-426614174000"). If style_id is provided, style should not be provided.', + ) + size: str = Field( + ..., description='The size of the generated image (e.g., "1024x1024")' + ) + controls: Optional[Controls] = Field( + None, description='The controls for the generated image' + ) + n: int = Field(..., description='The number of images to generate', ge=1, le=4) + + +class LumaKeyframes(BaseModel): + frame0: Optional[LumaKeyframe] = None + frame1: Optional[LumaKeyframe] = None + + +class LumaGenerationRequest(BaseModel): + generation_type: Optional[GenerationType] = 'video' + prompt: str = Field(..., description='The prompt of the generation') + aspect_ratio: LumaAspectRatio + loop: Optional[bool] = Field(None, description='Whether to loop the video') + keyframes: Optional[LumaKeyframes] = None + callback_url: Optional[AnyUrl] = Field( + None, + description='The callback URL of the generation, a POST request with Generation object will be sent to the callback URL when the generation is dreaming, completed, or failed', + ) + model: LumaVideoModel + resolution: LumaVideoModelOutputResolution + duration: LumaVideoModelOutputDuration + + +class LumaGeneration(BaseModel): + id: Optional[UUID] = Field(None, description='The ID of the generation') + generation_type: Optional[LumaGenerationType] = None + state: Optional[LumaState] = None + failure_reason: Optional[str] = Field( + None, description='The reason for the state of the generation' + ) + created_at: Optional[datetime] = Field( + None, description='The date and time when the generation was created' + ) + assets: Optional[LumaAssets] = None + model: Optional[str] = Field(None, description='The model used for the generation') + request: Optional[ + Union[ + LumaGenerationRequest, + LumaImageGenerationRequest, + LumaUpscaleVideoGenerationRequest, + LumaAudioGenerationRequest, + ] + ] = Field(None, description='The request of the generation') + + +class RunwayImageToVideoRequest(BaseModel): + promptImage: RunwayPromptImageObject + seed: int = Field( + ..., description='Random seed for generation', ge=0, le=4294967295 + ) + model: RunwayModelEnum = Field(..., description='Model to use for generation') + promptText: Optional[str] = Field( + None, description='Text prompt for the generation', max_length=1000 + ) + duration: RunwayDurationEnum = Field( + ..., description='The number of seconds of duration for the output video.' + ) + ratio: RunwayAspectRatioEnum = Field( + ..., + description='The resolution (aspect ratio) of the output video. Allowable values depend on the selected model. 1280:768 and 768:1280 are only supported for gen3a_turbo.', + ) + + +class RunwayTaskStatusResponse(BaseModel): + id: Optional[str] = Field(None, description='Task ID') + status: Optional[RunwayTaskStatusEnum] = Field(None, description='Task status') + createdAt: Optional[datetime] = Field(None, description='Task creation timestamp') + output: Optional[List[str]] = Field(None, description='Array of output video URLs') + + +class PikaHTTPValidationError(BaseModel): + detail: Optional[List[PikaValidationError]] = Field(None, title='Detail') + + +class PikaBodyGenerate22T2vGenerate22T2vPost(BaseModel): + promptText: str = Field(..., title='Prompttext') + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + seed: Optional[int] = Field(None, title='Seed') + resolution: Optional[PikaResolutionEnum] = Field('1080p', title='Resolution') + duration: Optional[PikaDurationEnum] = Field(5, title='Duration') + aspectRatio: Optional[float] = Field( + 1.7777777777777777, + description='Aspect ratio (width / height)', + ge=0.4, + le=2.5, + title='Aspectratio', + ) + + +class PikaBodyGenerate22I2vGenerate22I2vPost(BaseModel): + image: Optional[StrictBytes] = Field(None, title='Image') + promptText: Optional[str] = Field(None, title='Prompttext') + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + seed: Optional[int] = Field(None, title='Seed') + resolution: Optional[PikaResolutionEnum] = Field('1080p', title='Resolution') + duration: Optional[PikaDurationEnum] = Field(5, title='Duration') + + +class PikaBodyGenerate22KeyframeGenerate22PikaframesPost(BaseModel): + keyFrames: Optional[List[StrictBytes]] = Field( + None, description='Array of keyframe images', title='Keyframes' + ) + promptText: str = Field(..., title='Prompttext') + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + seed: Optional[int] = Field(None, title='Seed') + resolution: Optional[PikaResolutionEnum] = Field('1080p', title='Resolution') + duration: Optional[int] = Field(None, ge=5, le=10, title='Duration') + + +class PikaVideoResponse(BaseModel): + id: str = Field(..., title='Id') + status: PikaStatusEnum = Field( + ..., description='The status of the video', title='Status' + ) + url: Optional[str] = Field(None, title='Url') + progress: Optional[int] = Field(None, title='Progress') + + +class Node(BaseModel): + id: Optional[str] = Field(None, description='The unique identifier of the node.') + name: Optional[str] = Field(None, description='The display name of the node.') + category: Optional[str] = Field(None, description='The category of the node.') + description: Optional[str] = None + author: Optional[str] = None + license: Optional[str] = Field( + None, description="The path to the LICENSE file in the node's repository." + ) + icon: Optional[str] = Field(None, description="URL to the node's icon.") + repository: Optional[str] = Field(None, description="URL to the node's repository.") + tags: Optional[List[str]] = None + latest_version: Optional[NodeVersion] = Field( + None, description='The latest version of the node.' + ) + rating: Optional[float] = Field(None, description='The average rating of the node.') + downloads: Optional[int] = Field( + None, description='The number of downloads of the node.' + ) + publisher: Optional[Publisher] = Field( + None, description='The publisher of the node.' + ) + status: Optional[NodeStatus] = Field(None, description='The status of the node.') + status_detail: Optional[str] = Field( + None, description='The status detail of the node.' + ) + translations: Optional[Dict[str, Dict[str, Any]]] = None + + +class KlingVideoEffectsRequest(BaseModel): + effect_scene: Union[KlingDualCharacterEffectsScene, KlingSingleImageEffectsScene] + input: KlingVideoEffectsInput + callback_url: Optional[AnyUrl] = Field( + None, + description='The callback notification address for the result of this task.', + ) + external_task_id: Optional[str] = Field( + None, + description='Customized Task ID. Must be unique within a single user account.', + ) + + +class StripeCharge(BaseModel): + id: Optional[str] = None + object: Optional[Object2] = None + amount: Optional[int] = None + amount_captured: Optional[int] = None + amount_refunded: Optional[int] = None + application: Optional[str] = None + application_fee: Optional[str] = None + application_fee_amount: Optional[int] = None + balance_transaction: Optional[str] = None + billing_details: Optional[StripeBillingDetails] = None + calculated_statement_descriptor: Optional[str] = None + captured: Optional[bool] = None + created: Optional[int] = None + currency: Optional[str] = None + customer: Optional[str] = None + description: Optional[str] = None + destination: Optional[Any] = None + dispute: Optional[Any] = None + disputed: Optional[bool] = None + failure_balance_transaction: Optional[Any] = None + failure_code: Optional[Any] = None + failure_message: Optional[Any] = None + fraud_details: Optional[Dict[str, Any]] = None + invoice: Optional[Any] = None + livemode: Optional[bool] = None + metadata: Optional[Dict[str, Any]] = None + on_behalf_of: Optional[Any] = None + order: Optional[Any] = None + outcome: Optional[StripeOutcome] = None + paid: Optional[bool] = None + payment_intent: Optional[str] = None + payment_method: Optional[str] = None + payment_method_details: Optional[StripePaymentMethodDetails] = None + radar_options: Optional[Dict[str, Any]] = None + receipt_email: Optional[str] = None + receipt_number: Optional[str] = None + receipt_url: Optional[str] = None + refunded: Optional[bool] = None + refunds: Optional[StripeRefundList] = None + review: Optional[Any] = None + shipping: Optional[StripeShipping] = None + source: Optional[Any] = None + source_transfer: Optional[Any] = None + statement_descriptor: Optional[Any] = None + statement_descriptor_suffix: Optional[Any] = None + status: Optional[str] = None + transfer_data: Optional[Any] = None + transfer_group: Optional[Any] = None + + +class StripeChargeList(BaseModel): + object: Optional[str] = None + data: Optional[List[StripeCharge]] = None + has_more: Optional[bool] = None + total_count: Optional[int] = None + url: Optional[str] = None + + +class StripePaymentIntent(BaseModel): + id: Optional[str] = None + object: Optional[Object1] = None + amount: Optional[int] = None + amount_capturable: Optional[int] = None + amount_details: Optional[StripeAmountDetails] = None + amount_received: Optional[int] = None + application: Optional[str] = None + application_fee_amount: Optional[int] = None + automatic_payment_methods: Optional[Any] = None + canceled_at: Optional[int] = None + cancellation_reason: Optional[str] = None + capture_method: Optional[str] = None + charges: Optional[StripeChargeList] = None + client_secret: Optional[str] = None + confirmation_method: Optional[str] = None + created: Optional[int] = None + currency: Optional[str] = None + customer: Optional[str] = None + description: Optional[str] = None + invoice: Optional[str] = None + last_payment_error: Optional[Any] = None + latest_charge: Optional[str] = None + livemode: Optional[bool] = None + metadata: Optional[Dict[str, Any]] = None + next_action: Optional[Any] = None + on_behalf_of: Optional[Any] = None + payment_method: Optional[str] = None + payment_method_configuration_details: Optional[Any] = None + payment_method_options: Optional[StripePaymentMethodOptions] = None + payment_method_types: Optional[List[str]] = None + processing: Optional[Any] = None + receipt_email: Optional[str] = None + review: Optional[Any] = None + setup_future_usage: Optional[Any] = None + shipping: Optional[StripeShipping] = None + source: Optional[Any] = None + statement_descriptor: Optional[Any] = None + statement_descriptor_suffix: Optional[Any] = None + status: Optional[str] = None + transfer_data: Optional[Any] = None + transfer_group: Optional[Any] = None + + +class Data8(BaseModel): + object: Optional[StripePaymentIntent] = None + + +class StripeEvent(BaseModel): + id: str + object: Object + api_version: Optional[str] = None + created: Optional[int] = None + data: Data8 + livemode: Optional[bool] = None + pending_webhooks: Optional[int] = None + request: Optional[StripeRequestInfo] = None + type: Type diff --git a/comfy_api_nodes/apis/bfl_api.py b/comfy_api_nodes/apis/bfl_api.py new file mode 100644 index 000000000..c189038fb --- /dev/null +++ b/comfy_api_nodes/apis/bfl_api.py @@ -0,0 +1,156 @@ +from __future__ import annotations + +from enum import Enum +from typing import Any, Dict, Optional + +from pydantic import BaseModel, Field, confloat, conint + + +class BFLOutputFormat(str, Enum): + png = 'png' + jpeg = 'jpeg' + + +class BFLFluxExpandImageRequest(BaseModel): + prompt: str = Field(..., description='The description of the changes you want to make. This text guides the expansion process, allowing you to specify features, styles, or modifications for the expanded areas.') + prompt_upsampling: Optional[bool] = Field( + None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.' + ) + seed: Optional[int] = Field(None, description='The seed value for reproducibility.') + top: conint(ge=0, le=2048) = Field(..., description='Number of pixels to expand at the top of the image') + bottom: conint(ge=0, le=2048) = Field(..., description='Number of pixels to expand at the bottom of the image') + left: conint(ge=0, le=2048) = Field(..., description='Number of pixels to expand at the left side of the image') + right: conint(ge=0, le=2048) = Field(..., description='Number of pixels to expand at the right side of the image') + steps: conint(ge=15, le=50) = Field(..., description='Number of steps for the image generation process') + guidance: confloat(ge=1.5, le=100) = Field(..., description='Guidance strength for the image generation process') + safety_tolerance: Optional[conint(ge=0, le=6)] = Field( + 6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.' + ) + output_format: Optional[BFLOutputFormat] = Field( + BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png'] + ) + image: str = Field(None, description='A Base64-encoded string representing the image you wish to expand') + + +class BFLFluxFillImageRequest(BaseModel): + prompt: str = Field(..., description='The description of the changes you want to make. This text guides the expansion process, allowing you to specify features, styles, or modifications for the expanded areas.') + prompt_upsampling: Optional[bool] = Field( + None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.' + ) + seed: Optional[int] = Field(None, description='The seed value for reproducibility.') + steps: conint(ge=15, le=50) = Field(..., description='Number of steps for the image generation process') + guidance: confloat(ge=1.5, le=100) = Field(..., description='Guidance strength for the image generation process') + safety_tolerance: Optional[conint(ge=0, le=6)] = Field( + 6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.' + ) + output_format: Optional[BFLOutputFormat] = Field( + BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png'] + ) + image: str = Field(None, description='A Base64-encoded string representing the image you wish to modify. Can contain alpha mask if desired.') + mask: str = Field(None, description='A Base64-encoded string representing the mask of the areas you with to modify.') + + +class BFLFluxCannyImageRequest(BaseModel): + prompt: str = Field(..., description='Text prompt for image generation') + prompt_upsampling: Optional[bool] = Field( + None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.' + ) + canny_low_threshold: Optional[int] = Field(None, description='Low threshold for Canny edge detection') + canny_high_threshold: Optional[int] = Field(None, description='High threshold for Canny edge detection') + seed: Optional[int] = Field(None, description='The seed value for reproducibility.') + steps: conint(ge=15, le=50) = Field(..., description='Number of steps for the image generation process') + guidance: confloat(ge=1, le=100) = Field(..., description='Guidance strength for the image generation process') + safety_tolerance: Optional[conint(ge=0, le=6)] = Field( + 6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.' + ) + output_format: Optional[BFLOutputFormat] = Field( + BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png'] + ) + control_image: Optional[str] = Field(None, description='Base64 encoded image to use as control input if no preprocessed image is provided') + preprocessed_image: Optional[str] = Field(None, description='Optional pre-processed image that will bypass the control preprocessing step') + + +class BFLFluxDepthImageRequest(BaseModel): + prompt: str = Field(..., description='Text prompt for image generation') + prompt_upsampling: Optional[bool] = Field( + None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.' + ) + seed: Optional[int] = Field(None, description='The seed value for reproducibility.') + steps: conint(ge=15, le=50) = Field(..., description='Number of steps for the image generation process') + guidance: confloat(ge=1, le=100) = Field(..., description='Guidance strength for the image generation process') + safety_tolerance: Optional[conint(ge=0, le=6)] = Field( + 6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.' + ) + output_format: Optional[BFLOutputFormat] = Field( + BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png'] + ) + control_image: Optional[str] = Field(None, description='Base64 encoded image to use as control input if no preprocessed image is provided') + preprocessed_image: Optional[str] = Field(None, description='Optional pre-processed image that will bypass the control preprocessing step') + + +class BFLFluxProGenerateRequest(BaseModel): + prompt: str = Field(..., description='The text prompt for image generation.') + prompt_upsampling: Optional[bool] = Field( + None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.' + ) + seed: Optional[int] = Field(None, description='The seed value for reproducibility.') + width: conint(ge=256, le=1440) = Field(1024, description='Width of the generated image in pixels. Must be a multiple of 32.') + height: conint(ge=256, le=1440) = Field(768, description='Height of the generated image in pixels. Must be a multiple of 32.') + safety_tolerance: Optional[conint(ge=0, le=6)] = Field( + 6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.' + ) + output_format: Optional[BFLOutputFormat] = Field( + BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png'] + ) + image_prompt: Optional[str] = Field(None, description='Optional image to remix in base64 format') + # image_prompt_strength: Optional[confloat(ge=0.0, le=1.0)] = Field( + # None, description='Blend between the prompt and the image prompt.' + # ) + + +class BFLFluxProUltraGenerateRequest(BaseModel): + prompt: str = Field(..., description='The text prompt for image generation.') + prompt_upsampling: Optional[bool] = Field( + None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.' + ) + seed: Optional[int] = Field(None, description='The seed value for reproducibility.') + aspect_ratio: Optional[str] = Field(None, description='Aspect ratio of the image between 21:9 and 9:21.') + safety_tolerance: Optional[conint(ge=0, le=6)] = Field( + 6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.' + ) + output_format: Optional[BFLOutputFormat] = Field( + BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png'] + ) + raw: Optional[bool] = Field(None, description='Generate less processed, more natural-looking images.') + image_prompt: Optional[str] = Field(None, description='Optional image to remix in base64 format') + image_prompt_strength: Optional[confloat(ge=0.0, le=1.0)] = Field( + None, description='Blend between the prompt and the image prompt.' + ) + + +class BFLFluxProGenerateResponse(BaseModel): + id: str = Field(..., description='The unique identifier for the generation task.') + polling_url: str = Field(..., description='URL to poll for the generation result.') + + +class BFLStatus(str, Enum): + task_not_found = "Task not found" + pending = "Pending" + request_moderated = "Request Moderated" + content_moderated = "Content Moderated" + ready = "Ready" + error = "Error" + + +class BFLFluxProStatusResponse(BaseModel): + id: str = Field(..., description="The unique identifier for the generation task.") + status: BFLStatus = Field(..., description="The status of the task.") + result: Optional[Dict[str, Any]] = Field( + None, description="The result of the task (null if not completed)." + ) + progress: confloat(ge=0.0, le=1.0) = Field( + ..., description="The progress of the task (0.0 to 1.0)." + ) + details: Optional[Dict[str, Any]] = Field( + None, description="Additional details about the task (null if not available)." + ) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index d3cd9ad2f..929e386d4 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -1,5 +1,3 @@ -import logging - """ API Client Framework for api.comfy.org. @@ -46,24 +44,71 @@ operation = ApiOperation( ) user_profile = operation.execute(client=api_client) # Returns immediately with the result + +# Example 2: Asynchronous API Operation with Polling +# ------------------------------------------------- +# For an API that starts a task and requires polling for completion: + +# 1. Define the endpoints (initial request and polling) +generate_image_endpoint = ApiEndpoint( + path="/v1/images/generate", + method=HttpMethod.POST, + request_model=ImageGenerationRequest, + response_model=TaskCreatedResponse, + query_params=None +) + +check_task_endpoint = ApiEndpoint( + path="/v1/tasks/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=ImageGenerationResult, + query_params=None +) + +# 2. Create the request object +request = ImageGenerationRequest( + prompt="a beautiful sunset over mountains", + width=1024, + height=1024, + num_images=1 +) + +# 3. Create and execute the polling operation +operation = PollingOperation( + initial_endpoint=generate_image_endpoint, + initial_request=request, + poll_endpoint=check_task_endpoint, + task_id_field="task_id", + status_field="status", + completed_statuses=["completed"], + failed_statuses=["failed", "error"] +) + +# This will make the initial request and then poll until completion +result = operation.execute(client=api_client) # Returns the final ImageGenerationResult when done """ -from typing import ( - Dict, - Type, - Optional, - Any, - TypeVar, - Generic, -) -from pydantic import BaseModel +from __future__ import annotations +import logging +import time +import io +from typing import Dict, Type, Optional, Any, TypeVar, Generic, Callable from enum import Enum import json import requests from urllib.parse import urljoin +from pydantic import BaseModel, Field + +from comfy.cli_args import args +from comfy import utils T = TypeVar("T", bound=BaseModel) R = TypeVar("R", bound=BaseModel) +P = TypeVar("P", bound=BaseModel) # For poll response + +PROGRESS_BAR_MAX = 100 + class EmptyRequest(BaseModel): """Base class for empty request bodies. @@ -72,6 +117,19 @@ class EmptyRequest(BaseModel): pass +class UploadRequest(BaseModel): + file_name: str = Field(..., description="Filename to upload") + content_type: str | None = Field( + None, + description="Mime type of the file. For example: image/png, image/jpeg, video/mp4, etc.", + ) + + +class UploadResponse(BaseModel): + download_url: str = Field(..., description="URL to GET uploaded file") + upload_url: str = Field(..., description="URL to PUT file to upload") + + class HttpMethod(str, Enum): GET = "GET" POST = "POST" @@ -89,7 +147,7 @@ class ApiClient: self, base_url: str, api_key: Optional[str] = None, - timeout: float = 30.0, + timeout: float = 3600.0, verify_ssl: bool = True, ): self.base_url = base_url @@ -97,6 +155,48 @@ class ApiClient: self.timeout = timeout self.verify_ssl = verify_ssl + def _create_json_payload_args( + self, + data: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, str]] = None, + ) -> Dict[str, Any]: + return { + "json": data, + "headers": headers, + } + + def _create_form_data_args( + self, + data: Dict[str, Any], + files: Dict[str, Any], + headers: Optional[Dict[str, str]] = None, + multipart_parser = None, + ) -> Dict[str, Any]: + if headers and "Content-Type" in headers: + del headers["Content-Type"] + + if multipart_parser: + data = multipart_parser(data) + + return { + "data": data, + "files": files, + "headers": headers, + } + + def _create_urlencoded_form_data_args( + self, + data: Dict[str, Any], + headers: Optional[Dict[str, str]] = None, + ) -> Dict[str, Any]: + headers = headers or {} + headers["Content-Type"] = "application/x-www-form-urlencoded" + + return { + "data": data, + "headers": headers, + } + def get_headers(self) -> Dict[str, str]: """Get headers for API requests, including authentication if available""" headers = {"Content-Type": "application/json", "Accept": "application/json"} @@ -111,9 +211,11 @@ class ApiClient: method: str, path: str, params: Optional[Dict[str, Any]] = None, - json: Optional[Dict[str, Any]] = None, + data: Optional[Dict[str, Any]] = None, files: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, + content_type: str = "application/json", + multipart_parser: Callable = None, ) -> Dict[str, Any]: """ Make an HTTP request to the API @@ -122,9 +224,10 @@ class ApiClient: method: HTTP method (GET, POST, etc.) path: API endpoint path (will be joined with base_url) params: Query parameters - json: JSON body data + data: body data files: Files to upload headers: Additional headers + content_type: Content type of the request. Defaults to application/json. Returns: Parsed JSON response @@ -146,34 +249,26 @@ class ApiClient: logging.debug(f"[DEBUG] Request Headers: {request_headers}") logging.debug(f"[DEBUG] Files: {files}") logging.debug(f"[DEBUG] Params: {params}") - logging.debug(f"[DEBUG] Json: {json}") + logging.debug(f"[DEBUG] Data: {data}") + + if content_type == "application/x-www-form-urlencoded": + payload_args = self._create_urlencoded_form_data_args(data, request_headers) + elif content_type == "multipart/form-data": + payload_args = self._create_form_data_args( + data, files, request_headers, multipart_parser + ) + else: + payload_args = self._create_json_payload_args(data, request_headers) try: - # If files are present, use data parameter instead of json - if files: - form_data = {} - if json: - form_data.update(json) - response = requests.request( - method=method, - url=url, - params=params, - data=form_data, # Use data instead of json - files=files, - headers=request_headers, - timeout=self.timeout, - verify=self.verify_ssl, - ) - else: - response = requests.request( - method=method, - url=url, - params=params, - json=json, - headers=request_headers, - timeout=self.timeout, - verify=self.verify_ssl, - ) + response = requests.request( + method=method, + url=url, + params=params, + timeout=self.timeout, + verify=self.verify_ssl, + **payload_args, + ) # Raise exception for error status codes response.raise_for_status() @@ -203,7 +298,9 @@ class ApiClient: error_message = f"API Error: {error_json}" except Exception as json_error: # If we can't parse the JSON, fall back to the original error message - logging.debug(f"[DEBUG] Failed to parse error response: {str(json_error)}") + logging.debug( + f"[DEBUG] Failed to parse error response: {str(json_error)}" + ) logging.debug(f"[DEBUG] API Error: {error_message} (Status: {status_code})") if hasattr(e, "response") and e.response.content: @@ -229,6 +326,32 @@ class ApiClient: raise Exception("Unauthorized: Please login first to use this node.") return auth_token + @staticmethod + def upload_file( + upload_url: str, + file: io.BytesIO | str, + content_type: str | None = None, + ): + """Upload a file to the API. Make sure the file has a filename equal to what the url expects. + + Args: + upload_url: The URL to upload to + file: Either a file path string, BytesIO object, or tuple of (file_path, filename) + mime_type: Optional mime type to set for the upload + """ + headers = {} + if content_type: + headers["Content-Type"] = content_type + + if isinstance(file, io.BytesIO): + file.seek(0) # Ensure we're at the start of the file + data = file.read() + return requests.put(upload_url, data=data, headers=headers) + elif isinstance(file, str): + with open(file, "rb") as f: + data = f.read() + return requests.put(upload_url, data=data, headers=headers) + class ApiEndpoint(Generic[T, R]): """Defines an API endpoint with its request and response types""" @@ -267,27 +390,29 @@ class SynchronousOperation(Generic[T, R]): endpoint: ApiEndpoint[T, R], request: T, files: Optional[Dict[str, Any]] = None, - api_base: str = "https://api.comfy.org", + api_base: str | None = None, auth_token: Optional[str] = None, timeout: float = 604800.0, verify_ssl: bool = True, + content_type: str = "application/json", + multipart_parser: Callable = None, ): self.endpoint = endpoint self.request = request self.response = None self.error = None - self.api_base = api_base + self.api_base: str = api_base or args.comfy_api_base self.auth_token = auth_token self.timeout = timeout self.verify_ssl = verify_ssl self.files = files + self.content_type = content_type + self.multipart_parser = multipart_parser def execute(self, client: Optional[ApiClient] = None) -> R: """Execute the API operation using the provided client or create one""" try: # Create client if not provided if client is None: - if self.api_base is None: - raise ValueError("Either client or api_base must be provided") client = ApiClient( base_url=self.api_base, api_key=self.auth_token, @@ -296,14 +421,25 @@ class SynchronousOperation(Generic[T, R]): ) # Convert request model to dict, but use None for EmptyRequest - request_dict = None if isinstance(self.request, EmptyRequest) else self.request.model_dump(exclude_none=True) + request_dict = ( + None + if isinstance(self.request, EmptyRequest) + else self.request.model_dump(exclude_none=True) + ) if request_dict: for key, value in request_dict.items(): if isinstance(value, Enum): request_dict[key] = value.value + if request_dict: + for key, value in request_dict.items(): + if isinstance(value, Enum): + request_dict[key] = value.value + # Debug log for request - logging.debug(f"[DEBUG] API Request: {self.endpoint.method.value} {self.endpoint.path}") + logging.debug( + f"[DEBUG] API Request: {self.endpoint.method.value} {self.endpoint.path}" + ) logging.debug(f"[DEBUG] Request Data: {json.dumps(request_dict, indent=2)}") logging.debug(f"[DEBUG] Query Params: {self.endpoint.query_params}") @@ -311,9 +447,11 @@ class SynchronousOperation(Generic[T, R]): resp = client.request( method=self.endpoint.method.value, path=self.endpoint.path, - json=request_dict, + data=request_dict, params=self.endpoint.query_params, files=self.files, + content_type=self.content_type, + multipart_parser=self.multipart_parser ) # Debug log for response @@ -327,7 +465,7 @@ class SynchronousOperation(Generic[T, R]): return self._parse_response(resp) except Exception as e: - logging.debug(f"[DEBUG] API Exception: {str(e)}") + logging.error(f"[DEBUG] API Exception: {str(e)}") raise Exception(str(e)) def _parse_response(self, resp): @@ -339,3 +477,140 @@ class SynchronousOperation(Generic[T, R]): self.response = self.endpoint.response_model.model_validate(resp) logging.debug(f"[DEBUG] Parsed Response: {self.response}") return self.response + + +class TaskStatus(str, Enum): + """Enum for task status values""" + + COMPLETED = "completed" + FAILED = "failed" + PENDING = "pending" + + +class PollingOperation(Generic[T, R]): + """ + Represents an asynchronous API operation that requires polling for completion. + """ + + def __init__( + self, + poll_endpoint: ApiEndpoint[EmptyRequest, R], + completed_statuses: list, + failed_statuses: list, + status_extractor: Callable[[R], str], + progress_extractor: Callable[[R], float] = None, + request: Optional[T] = None, + api_base: str | None = None, + auth_token: Optional[str] = None, + poll_interval: float = 5.0, + ): + self.poll_endpoint = poll_endpoint + self.request = request + self.api_base: str = api_base or args.comfy_api_base + self.auth_token = auth_token + self.poll_interval = poll_interval + + # Polling configuration + self.status_extractor = status_extractor or ( + lambda x: getattr(x, "status", None) + ) + self.progress_extractor = progress_extractor + self.completed_statuses = completed_statuses + self.failed_statuses = failed_statuses + + # For storing response data + self.final_response = None + self.error = None + + def execute(self, client: Optional[ApiClient] = None) -> R: + """Execute the polling operation using the provided client. If failed, raise an exception.""" + try: + if client is None: + client = ApiClient( + base_url=self.api_base, + api_key=self.auth_token, + ) + return self._poll_until_complete(client) + except Exception as e: + raise Exception(f"Error during polling: {str(e)}") + + def _check_task_status(self, response: R) -> TaskStatus: + """Check task status using the status extractor function""" + try: + status = self.status_extractor(response) + if status in self.completed_statuses: + return TaskStatus.COMPLETED + elif status in self.failed_statuses: + return TaskStatus.FAILED + return TaskStatus.PENDING + except Exception as e: + logging.error(f"Error extracting status: {e}") + return TaskStatus.PENDING + + def _poll_until_complete(self, client: ApiClient) -> R: + """Poll until the task is complete""" + poll_count = 0 + if self.progress_extractor: + progress = utils.ProgressBar(PROGRESS_BAR_MAX) + + while True: + try: + poll_count += 1 + logging.debug(f"[DEBUG] Polling attempt #{poll_count}") + + request_dict = ( + self.request.model_dump(exclude_none=True) + if self.request is not None + else None + ) + + if poll_count == 1: + logging.debug( + f"[DEBUG] Poll Request: {self.poll_endpoint.method.value} {self.poll_endpoint.path}" + ) + logging.debug( + f"[DEBUG] Poll Request Data: {json.dumps(request_dict, indent=2) if request_dict else 'None'}" + ) + + # Query task status + resp = client.request( + method=self.poll_endpoint.method.value, + path=self.poll_endpoint.path, + params=self.poll_endpoint.query_params, + data=request_dict, + ) + + # Parse response + response_obj = self.poll_endpoint.response_model.model_validate(resp) + # Check if task is complete + status = self._check_task_status(response_obj) + logging.debug(f"[DEBUG] Task Status: {status}") + + # If progress extractor is provided, extract progress + if self.progress_extractor: + new_progress = self.progress_extractor(response_obj) + if new_progress is not None: + progress.update_absolute(new_progress, total=PROGRESS_BAR_MAX) + + if status == TaskStatus.COMPLETED: + logging.debug("[DEBUG] Task completed successfully") + self.final_response = response_obj + if self.progress_extractor: + progress.update(100) + return self.final_response + elif status == TaskStatus.FAILED: + message = f"Task failed: {json.dumps(resp)}" + logging.error(f"[DEBUG] {message}") + raise Exception(message) + else: + logging.debug("[DEBUG] Task still pending, continuing to poll...") + + # Wait before polling again + logging.debug( + f"[DEBUG] Waiting {self.poll_interval} seconds before next poll" + ) + time.sleep(self.poll_interval) + + except Exception as e: + logging.error(f"[DEBUG] Polling error: {str(e)}") + raise Exception(f"Error while polling: {str(e)}") diff --git a/comfy_api_nodes/apis/luma_api.py b/comfy_api_nodes/apis/luma_api.py new file mode 100644 index 000000000..632c4ab96 --- /dev/null +++ b/comfy_api_nodes/apis/luma_api.py @@ -0,0 +1,253 @@ +from __future__ import annotations + + +import torch + +from enum import Enum +from typing import Optional, Union + +from pydantic import BaseModel, Field, confloat + + + +class LumaIO: + LUMA_REF = "LUMA_REF" + LUMA_CONCEPTS = "LUMA_CONCEPTS" + + +class LumaReference: + def __init__(self, image: torch.Tensor, weight: float): + self.image = image + self.weight = weight + + def create_api_model(self, download_url: str): + return LumaImageRef(url=download_url, weight=self.weight) + +class LumaReferenceChain: + def __init__(self, first_ref: LumaReference=None): + self.refs: list[LumaReference] = [] + if first_ref: + self.refs.append(first_ref) + + def add(self, luma_ref: LumaReference=None): + self.refs.append(luma_ref) + + def create_api_model(self, download_urls: list[str], max_refs=4): + if len(self.refs) == 0: + return None + api_refs: list[LumaImageRef] = [] + for ref, url in zip(self.refs, download_urls): + api_ref = LumaImageRef(url=url, weight=ref.weight) + api_refs.append(api_ref) + return api_refs + + def clone(self): + c = LumaReferenceChain() + for ref in self.refs: + c.add(ref) + return c + + +class LumaConcept: + def __init__(self, key: str): + self.key = key + + +class LumaConceptChain: + def __init__(self, str_list: list[str] = None): + self.concepts: list[LumaConcept] = [] + if str_list is not None: + for c in str_list: + if c != "None": + self.add(LumaConcept(key=c)) + + def add(self, concept: LumaConcept): + self.concepts.append(concept) + + def create_api_model(self): + if len(self.concepts) == 0: + return None + api_concepts: list[LumaConceptObject] = [] + for concept in self.concepts: + if concept.key == "None": + continue + api_concepts.append(LumaConceptObject(key=concept.key)) + if len(api_concepts) == 0: + return None + return api_concepts + + def clone(self): + c = LumaConceptChain() + for concept in self.concepts: + c.add(concept) + return c + + def clone_and_merge(self, other: LumaConceptChain): + c = self.clone() + for concept in other.concepts: + c.add(concept) + return c + + +def get_luma_concepts(include_none=False): + concepts = [] + if include_none: + concepts.append("None") + return concepts + [ + "truck_left", + "pan_right", + "pedestal_down", + "low_angle", + "pedestal_up", + "selfie", + "pan_left", + "roll_right", + "zoom_in", + "over_the_shoulder", + "orbit_right", + "orbit_left", + "static", + "tiny_planet", + "high_angle", + "bolt_cam", + "dolly_zoom", + "overhead", + "zoom_out", + "handheld", + "roll_left", + "pov", + "aerial_drone", + "push_in", + "crane_down", + "truck_right", + "tilt_down", + "elevator_doors", + "tilt_up", + "ground_level", + "pull_out", + "aerial", + "crane_up", + "eye_level" + ] + + +class LumaImageModel(str, Enum): + photon_1 = "photon-1" + photon_flash_1 = "photon-flash-1" + + +class LumaVideoModel(str, Enum): + ray_2 = "ray-2" + ray_flash_2 = "ray-flash-2" + ray_1_6 = "ray-1-6" + + +class LumaAspectRatio(str, Enum): + ratio_1_1 = "1:1" + ratio_16_9 = "16:9" + ratio_9_16 = "9:16" + ratio_4_3 = "4:3" + ratio_3_4 = "3:4" + ratio_21_9 = "21:9" + ratio_9_21 = "9:21" + + +class LumaVideoOutputResolution(str, Enum): + res_540p = "540p" + res_720p = "720p" + res_1080p = "1080p" + res_4k = "4k" + + +class LumaVideoModelOutputDuration(str, Enum): + dur_5s = "5s" + dur_9s = "9s" + + +class LumaGenerationType(str, Enum): + video = 'video' + image = 'image' + + +class LumaState(str, Enum): + queued = "queued" + dreaming = "dreaming" + completed = "completed" + failed = "failed" + + +class LumaAssets(BaseModel): + video: Optional[str] = Field(None, description='The URL of the video') + image: Optional[str] = Field(None, description='The URL of the image') + progress_video: Optional[str] = Field(None, description='The URL of the progress video') + + +class LumaImageRef(BaseModel): + '''Used for image gen''' + url: str = Field(..., description='The URL of the image reference') + weight: confloat(ge=0.0, le=1.0) = Field(..., description='The weight of the image reference') + + +class LumaImageReference(BaseModel): + '''Used for video gen''' + type: Optional[str] = Field('image', description='Input type, defaults to image') + url: str = Field(..., description='The URL of the image') + + +class LumaModifyImageRef(BaseModel): + url: str = Field(..., description='The URL of the image reference') + weight: confloat(ge=0.0, le=1.0) = Field(..., description='The weight of the image reference') + + +class LumaCharacterRef(BaseModel): + identity0: LumaImageIdentity = Field(..., description='The image identity object') + + +class LumaImageIdentity(BaseModel): + images: list[str] = Field(..., description='The URLs of the image identity') + + +class LumaGenerationReference(BaseModel): + type: str = Field('generation', description='Input type, defaults to generation') + id: str = Field(..., description='The ID of the generation') + + +class LumaKeyframes(BaseModel): + frame0: Optional[Union[LumaImageReference, LumaGenerationReference]] = Field(None, description='') + frame1: Optional[Union[LumaImageReference, LumaGenerationReference]] = Field(None, description='') + + +class LumaConceptObject(BaseModel): + key: str = Field(..., description='Camera Concept name') + + +class LumaImageGenerationRequest(BaseModel): + prompt: str = Field(..., description='The prompt of the generation') + model: LumaImageModel = Field(LumaImageModel.photon_1, description='The image model used for the generation') + aspect_ratio: Optional[LumaAspectRatio] = Field(LumaAspectRatio.ratio_16_9, description='The aspect ratio of the generation') + image_ref: Optional[list[LumaImageRef]] = Field(None, description='List of image reference objects') + style_ref: Optional[list[LumaImageRef]] = Field(None, description='List of style reference objects') + character_ref: Optional[LumaCharacterRef] = Field(None, description='The image identity object') + modify_image_ref: Optional[LumaModifyImageRef] = Field(None, description='The modify image reference object') + + +class LumaGenerationRequest(BaseModel): + prompt: str = Field(..., description='The prompt of the generation') + model: LumaVideoModel = Field(LumaVideoModel.ray_2, description='The video model used for the generation') + duration: Optional[LumaVideoModelOutputDuration] = Field(None, description='The duration of the generation') + aspect_ratio: Optional[LumaAspectRatio] = Field(None, description='The aspect ratio of the generation') + resolution: Optional[LumaVideoOutputResolution] = Field(None, description='The resolution of the generation') + loop: Optional[bool] = Field(None, description='Whether to loop the video') + keyframes: Optional[LumaKeyframes] = Field(None, description='The keyframes of the generation') + concepts: Optional[list[LumaConceptObject]] = Field(None, description='Camera Concepts to apply to generation') + + +class LumaGeneration(BaseModel): + id: str = Field(..., description='The ID of the generation') + generation_type: LumaGenerationType = Field(..., description='Generation type, image or video') + state: LumaState = Field(..., description='The state of the generation') + failure_reason: Optional[str] = Field(None, description='The reason for the state of the generation') + created_at: str = Field(..., description='The date and time when the generation was created') + assets: Optional[LumaAssets] = Field(None, description='The assets of the generation') + model: str = Field(..., description='The model used for the generation') + request: Union[LumaGenerationRequest, LumaImageGenerationRequest] = Field(..., description="The request used for the generation") diff --git a/comfy_api_nodes/apis/pixverse_api.py b/comfy_api_nodes/apis/pixverse_api.py new file mode 100644 index 000000000..9bb29c383 --- /dev/null +++ b/comfy_api_nodes/apis/pixverse_api.py @@ -0,0 +1,146 @@ +from __future__ import annotations + +from enum import Enum +from typing import Optional + +from pydantic import BaseModel, Field + + +pixverse_templates = { + "Microwave": 324641385496960, + "Suit Swagger": 328545151283968, + "Anything, Robot": 313358700761536, + "Subject 3 Fever": 327828816843648, + "kiss kiss": 315446315336768, +} + + +class PixverseIO: + TEMPLATE = "PIXVERSE_TEMPLATE" + + +class PixverseStatus(int, Enum): + successful = 1 + generating = 5 + deleted = 6 + contents_moderation = 7 + failed = 8 + + +class PixverseAspectRatio(str, Enum): + ratio_16_9 = "16:9" + ratio_4_3 = "4:3" + ratio_1_1 = "1:1" + ratio_3_4 = "3:4" + ratio_9_16 = "9:16" + + +class PixverseQuality(str, Enum): + res_360p = "360p" + res_540p = "540p" + res_720p = "720p" + res_1080p = "1080p" + + +class PixverseDuration(int, Enum): + dur_5 = 5 + dur_8 = 8 + + +class PixverseMotionMode(str, Enum): + normal = "normal" + fast = "fast" + + +class PixverseStyle(str, Enum): + anime = "anime" + animation_3d = "3d_animation" + clay = "clay" + comic = "comic" + cyberpunk = "cyberpunk" + + +# NOTE: forgoing descriptions for now in return for dev speed +class PixverseTextVideoRequest(BaseModel): + aspect_ratio: PixverseAspectRatio = Field(...) + quality: PixverseQuality = Field(...) + duration: PixverseDuration = Field(...) + model: Optional[str] = Field("v3.5") + motion_mode: Optional[PixverseMotionMode] = Field(PixverseMotionMode.normal) + prompt: str = Field(...) + negative_prompt: Optional[str] = Field(None) + seed: Optional[int] = Field(None) + style: Optional[str] = Field(None) + template_id: Optional[int] = Field(None) + water_mark: Optional[bool] = Field(None) + + +class PixverseImageVideoRequest(BaseModel): + quality: PixverseQuality = Field(...) + duration: PixverseDuration = Field(...) + img_id: int = Field(...) + model: Optional[str] = Field("v3.5") + motion_mode: Optional[PixverseMotionMode] = Field(PixverseMotionMode.normal) + prompt: str = Field(...) + negative_prompt: Optional[str] = Field(None) + seed: Optional[int] = Field(None) + style: Optional[str] = Field(None) + template_id: Optional[int] = Field(None) + water_mark: Optional[bool] = Field(None) + + +class PixverseTransitionVideoRequest(BaseModel): + quality: PixverseQuality = Field(...) + duration: PixverseDuration = Field(...) + first_frame_img: int = Field(...) + last_frame_img: int = Field(...) + model: Optional[str] = Field("v3.5") + motion_mode: Optional[PixverseMotionMode] = Field(PixverseMotionMode.normal) + prompt: str = Field(...) + # negative_prompt: Optional[str] = Field(None) + seed: Optional[int] = Field(None) + # style: Optional[str] = Field(None) + # template_id: Optional[int] = Field(None) + # water_mark: Optional[bool] = Field(None) + + +class PixverseImageUploadResponse(BaseModel): + ErrCode: Optional[int] = None + ErrMsg: Optional[str] = None + Resp: Optional[PixverseImgIdResponseObject] = Field(None, alias='Resp') + + +class PixverseImgIdResponseObject(BaseModel): + img_id: Optional[int] = None + + +class PixverseVideoResponse(BaseModel): + ErrCode: Optional[int] = Field(None) + ErrMsg: Optional[str] = Field(None) + Resp: Optional[PixverseVideoIdResponseObject] = Field(None) + + +class PixverseVideoIdResponseObject(BaseModel): + video_id: int = Field(..., description='Video_id') + + +class PixverseGenerationStatusResponse(BaseModel): + ErrCode: Optional[int] = Field(None) + ErrMsg: Optional[str] = Field(None) + Resp: Optional[PixverseGenerationStatusResponseObject] = Field(None) + + +class PixverseGenerationStatusResponseObject(BaseModel): + create_time: Optional[str] = Field(None) + id: Optional[int] = Field(None) + modify_time: Optional[str] = Field(None) + negative_prompt: Optional[str] = Field(None) + outputHeight: Optional[int] = Field(None) + outputWidth: Optional[int] = Field(None) + prompt: Optional[str] = Field(None) + resolution_ratio: Optional[int] = Field(None) + seed: Optional[int] = Field(None) + size: Optional[int] = Field(None) + status: Optional[int] = Field(None) + style: Optional[str] = Field(None) + url: Optional[str] = Field(None) diff --git a/comfy_api_nodes/apis/recraft_api.py b/comfy_api_nodes/apis/recraft_api.py new file mode 100644 index 000000000..c0ec9d0c8 --- /dev/null +++ b/comfy_api_nodes/apis/recraft_api.py @@ -0,0 +1,263 @@ +from __future__ import annotations + + + +from enum import Enum +from typing import Optional + +from pydantic import BaseModel, Field, conint, confloat + + +class RecraftColor: + def __init__(self, r: int, g: int, b: int): + self.color = [r, g, b] + + def create_api_model(self): + return RecraftColorObject(rgb=self.color) + + +class RecraftColorChain: + def __init__(self): + self.colors: list[RecraftColor] = [] + + def get_first(self): + if len(self.colors) > 0: + return self.colors[0] + return None + + def add(self, color: RecraftColor): + self.colors.append(color) + + def create_api_model(self): + if not self.colors: + return None + colors_api = [x.create_api_model() for x in self.colors] + return colors_api + + def clone(self): + c = RecraftColorChain() + for color in self.colors: + c.add(color) + return c + + def clone_and_merge(self, other: RecraftColorChain): + c = self.clone() + for color in other.colors: + c.add(color) + return c + + +class RecraftControls: + def __init__(self, colors: RecraftColorChain=None, background_color: RecraftColorChain=None, + artistic_level: int=None, no_text: bool=None): + self.colors = colors + self.background_color = background_color + self.artistic_level = artistic_level + self.no_text = no_text + + def create_api_model(self): + if self.colors is None and self.background_color is None and self.artistic_level is None and self.no_text is None: + return None + colors_api = None + background_color_api = None + if self.colors: + colors_api = self.colors.create_api_model() + if self.background_color: + first_background = self.background_color.get_first() + background_color_api = first_background.create_api_model() if first_background else None + + return RecraftControlsObject(colors=colors_api, background_color=background_color_api, + artistic_level=self.artistic_level, no_text=self.no_text) + + +class RecraftStyle: + def __init__(self, style: str=None, substyle: str=None, style_id: str=None): + self.style = style + if substyle == "None": + substyle = None + self.substyle = substyle + self.style_id = style_id + + +class RecraftIO: + STYLEV3 = "RECRAFT_V3_STYLE" + SVG = "SVG" # TODO: if acceptable, move into ComfyUI's typing class + COLOR = "RECRAFT_COLOR" + CONTROLS = "RECRAFT_CONTROLS" + + +class RecraftStyleV3(str, Enum): + #any = 'any' NOTE: this does not work for some reason... why? + realistic_image = 'realistic_image' + digital_illustration = 'digital_illustration' + vector_illustration = 'vector_illustration' + logo_raster = 'logo_raster' + + +def get_v3_substyles(style_v3: str, include_none=True) -> list[str]: + substyles: list[str] = [] + if include_none: + substyles.append("None") + return substyles + dict_recraft_substyles_v3.get(style_v3, []) + + +dict_recraft_substyles_v3 = { + RecraftStyleV3.realistic_image: [ + "b_and_w", + "enterprise", + "evening_light", + "faded_nostalgia", + "forest_life", + "hard_flash", + "hdr", + "motion_blur", + "mystic_naturalism", + "natural_light", + "natural_tones", + "organic_calm", + "real_life_glow", + "retro_realism", + "retro_snapshot", + "studio_portrait", + "urban_drama", + "village_realism", + "warm_folk" + ], + RecraftStyleV3.digital_illustration: [ + "2d_art_poster", + "2d_art_poster_2", + "antiquarian", + "bold_fantasy", + "child_book", + "child_books", + "cover", + "crosshatch", + "digital_engraving", + "engraving_color", + "expressionism", + "freehand_details", + "grain", + "grain_20", + "graphic_intensity", + "hand_drawn", + "hand_drawn_outline", + "handmade_3d", + "hard_comics", + "infantile_sketch", + "long_shadow", + "modern_folk", + "multicolor", + "neon_calm", + "noir", + "nostalgic_pastel", + "outline_details", + "pastel_gradient", + "pastel_sketch", + "pixel_art", + "plastic", + "pop_art", + "pop_renaissance", + "seamless", + "street_art", + "tablet_sketch", + "urban_glow", + "urban_sketching", + "vanilla_dreams", + "young_adult_book", + "young_adult_book_2" + ], + RecraftStyleV3.vector_illustration: [ + "bold_stroke", + "chemistry", + "colored_stencil", + "contour_pop_art", + "cosmics", + "cutout", + "depressive", + "editorial", + "emotional_flat", + "engraving", + "infographical", + "line_art", + "line_circuit", + "linocut", + "marker_outline", + "mosaic", + "naivector", + "roundish_flat", + "seamless", + "segmented_colors", + "sharp_contrast", + "thin", + "vector_photo", + "vivid_shapes" + ], + RecraftStyleV3.logo_raster: [ + "emblem_graffiti", + "emblem_pop_art", + "emblem_punk", + "emblem_stamp", + "emblem_vintage" + ], +} + + +class RecraftModel(str, Enum): + recraftv3 = 'recraftv3' + recraftv2 = 'recraftv2' + + +class RecraftImageSize(str, Enum): + res_1024x1024 = '1024x1024' + res_1365x1024 = '1365x1024' + res_1024x1365 = '1024x1365' + res_1536x1024 = '1536x1024' + res_1024x1536 = '1024x1536' + res_1820x1024 = '1820x1024' + res_1024x1820 = '1024x1820' + res_1024x2048 = '1024x2048' + res_2048x1024 = '2048x1024' + res_1434x1024 = '1434x1024' + res_1024x1434 = '1024x1434' + res_1024x1280 = '1024x1280' + res_1280x1024 = '1280x1024' + res_1024x1707 = '1024x1707' + res_1707x1024 = '1707x1024' + + +class RecraftColorObject(BaseModel): + rgb: list[int] = Field(..., description='An array of 3 integer values in range of 0...255 defining RGB Color Model') + + +class RecraftControlsObject(BaseModel): + colors: Optional[list[RecraftColorObject]] = Field(None, description='An array of preferable colors') + background_color: Optional[RecraftColorObject] = Field(None, description='Use given color as a desired background color') + no_text: Optional[bool] = Field(None, description='Do not embed text layouts') + artistic_level: Optional[conint(ge=0, le=5)] = Field(None, description='Defines artistic tone of your image. At a simple level, the person looks straight at the camera in a static and clean style. Dynamic and eccentric levels introduce movement and creativity. The value should be in range [0..5].') + + +class RecraftImageGenerationRequest(BaseModel): + prompt: str = Field(..., description='The text prompt describing the image to generate') + size: Optional[RecraftImageSize] = Field(None, description='The size of the generated image (e.g., "1024x1024")') + n: conint(ge=1, le=6) = Field(..., description='The number of images to generate') + negative_prompt: Optional[str] = Field(None, description='A text description of undesired elements on an image') + model: Optional[RecraftModel] = Field(RecraftModel.recraftv3, description='The model to use for generation (e.g., "recraftv3")') + style: Optional[str] = Field(None, description='The style to apply to the generated image (e.g., "digital_illustration")') + substyle: Optional[str] = Field(None, description='The substyle to apply to the generated image, depending on the style input') + controls: Optional[RecraftControlsObject] = Field(None, description='A set of custom parameters to tweak generation process') + style_id: Optional[str] = Field(None, description='Use a previously uploaded style as a reference; UUID') + strength: Optional[confloat(ge=0.0, le=1.0)] = Field(None, description='Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity') + random_seed: Optional[int] = Field(None, description="Seed for video generation") + # text_layout + + +class RecraftReturnedObject(BaseModel): + image_id: str = Field(..., description='Unique identifier for the generated image') + url: str = Field(..., description='URL to access the generated image') + + +class RecraftImageGenerationResponse(BaseModel): + created: int = Field(..., description='Unix timestamp when the generation was created') + credits: int = Field(..., description='Number of credits used for the generation') + data: Optional[list[RecraftReturnedObject]] = Field(None, description='Array of generated image information') + image: Optional[RecraftReturnedObject] = Field(None, description='Single generated image') diff --git a/comfy_api_nodes/apis/stability_api.py b/comfy_api_nodes/apis/stability_api.py new file mode 100644 index 000000000..47c87daec --- /dev/null +++ b/comfy_api_nodes/apis/stability_api.py @@ -0,0 +1,127 @@ +from __future__ import annotations + +from enum import Enum +from typing import Optional + +from pydantic import BaseModel, Field, confloat + + +class StabilityFormat(str, Enum): + png = 'png' + jpeg = 'jpeg' + webp = 'webp' + + +class StabilityAspectRatio(str, Enum): + ratio_1_1 = "1:1" + ratio_16_9 = "16:9" + ratio_9_16 = "9:16" + ratio_3_2 = "3:2" + ratio_2_3 = "2:3" + ratio_5_4 = "5:4" + ratio_4_5 = "4:5" + ratio_21_9 = "21:9" + ratio_9_21 = "9:21" + + +def get_stability_style_presets(include_none=True): + presets = [] + if include_none: + presets.append("None") + return presets + [x.value for x in StabilityStylePreset] + + +class StabilityStylePreset(str, Enum): + _3d_model = "3d-model" + analog_film = "analog-film" + anime = "anime" + cinematic = "cinematic" + comic_book = "comic-book" + digital_art = "digital-art" + enhance = "enhance" + fantasy_art = "fantasy-art" + isometric = "isometric" + line_art = "line-art" + low_poly = "low-poly" + modeling_compound = "modeling-compound" + neon_punk = "neon-punk" + origami = "origami" + photographic = "photographic" + pixel_art = "pixel-art" + tile_texture = "tile-texture" + + +class Stability_SD3_5_Model(str, Enum): + sd3_5_large = "sd3.5-large" + # sd3_5_large_turbo = "sd3.5-large-turbo" + sd3_5_medium = "sd3.5-medium" + + +class Stability_SD3_5_GenerationMode(str, Enum): + text_to_image = "text-to-image" + image_to_image = "image-to-image" + + +class StabilityStable3_5Request(BaseModel): + model: str = Field(...) + mode: str = Field(...) + prompt: str = Field(...) + negative_prompt: Optional[str] = Field(None) + aspect_ratio: Optional[str] = Field(None) + seed: Optional[int] = Field(None) + output_format: Optional[str] = Field(StabilityFormat.png.value) + image: Optional[str] = Field(None) + style_preset: Optional[str] = Field(None) + cfg_scale: float = Field(...) + strength: Optional[confloat(ge=0.0, le=1.0)] = Field(None) + + +class StabilityUpscaleConservativeRequest(BaseModel): + prompt: str = Field(...) + negative_prompt: Optional[str] = Field(None) + seed: Optional[int] = Field(None) + output_format: Optional[str] = Field(StabilityFormat.png.value) + image: Optional[str] = Field(None) + creativity: Optional[confloat(ge=0.2, le=0.5)] = Field(None) + + +class StabilityUpscaleCreativeRequest(BaseModel): + prompt: str = Field(...) + negative_prompt: Optional[str] = Field(None) + seed: Optional[int] = Field(None) + output_format: Optional[str] = Field(StabilityFormat.png.value) + image: Optional[str] = Field(None) + creativity: Optional[confloat(ge=0.1, le=0.5)] = Field(None) + style_preset: Optional[str] = Field(None) + + +class StabilityStableUltraRequest(BaseModel): + prompt: str = Field(...) + negative_prompt: Optional[str] = Field(None) + aspect_ratio: Optional[str] = Field(None) + seed: Optional[int] = Field(None) + output_format: Optional[str] = Field(StabilityFormat.png.value) + image: Optional[str] = Field(None) + style_preset: Optional[str] = Field(None) + strength: Optional[confloat(ge=0.0, le=1.0)] = Field(None) + + +class StabilityStableUltraResponse(BaseModel): + image: Optional[str] = Field(None) + finish_reason: Optional[str] = Field(None) + seed: Optional[int] = Field(None) + + +class StabilityResultsGetResponse(BaseModel): + image: Optional[str] = Field(None) + finish_reason: Optional[str] = Field(None) + seed: Optional[int] = Field(None) + id: Optional[str] = Field(None) + name: Optional[str] = Field(None) + errors: Optional[list[str]] = Field(None) + status: Optional[str] = Field(None) + result: Optional[str] = Field(None) + + +class StabilityAsyncResponse(BaseModel): + id: Optional[str] = Field(None) diff --git a/comfy_api_nodes/mapper_utils.py b/comfy_api_nodes/mapper_utils.py new file mode 100644 index 000000000..6fab8f4bb --- /dev/null +++ b/comfy_api_nodes/mapper_utils.py @@ -0,0 +1,116 @@ +from enum import Enum + +from pydantic.fields import FieldInfo +from pydantic import BaseModel +from pydantic_core import PydanticUndefined + +from comfy.comfy_types.node_typing import IO, InputTypeOptions + +NodeInput = tuple[IO, InputTypeOptions] + + +def _create_base_config(field_info: FieldInfo) -> InputTypeOptions: + config = {} + if hasattr(field_info, "default") and field_info.default is not PydanticUndefined: + config["default"] = field_info.default + if hasattr(field_info, "description") and field_info.description is not None: + config["tooltip"] = field_info.description + return config + + +def _get_number_constraints_config(field_info: FieldInfo) -> dict: + config = {} + if hasattr(field_info, "metadata"): + metadata = field_info.metadata + for constraint in metadata: + if hasattr(constraint, "ge"): + config["min"] = constraint.ge + if hasattr(constraint, "le"): + config["max"] = constraint.le + if hasattr(constraint, "multiple_of"): + config["step"] = constraint.multiple_of + return config + + +def _model_field_to_image_input(field_info: FieldInfo, **kwargs) -> NodeInput: + return IO.IMAGE, { + **_create_base_config(field_info), + **kwargs, + } + + +def _model_field_to_string_input(field_info: FieldInfo, **kwargs) -> NodeInput: + return IO.STRING, { + **_create_base_config(field_info), + **kwargs, + } + + +def _model_field_to_float_input(field_info: FieldInfo, **kwargs) -> NodeInput: + return IO.FLOAT, { + **_create_base_config(field_info), + **_get_number_constraints_config(field_info), + **kwargs, + } + + +def _model_field_to_int_input(field_info: FieldInfo, **kwargs) -> NodeInput: + return IO.INT, { + **_create_base_config(field_info), + **_get_number_constraints_config(field_info), + **kwargs, + } + + +def _model_field_to_combo_input( + field_info: FieldInfo, enum_type: type[Enum] = None, **kwargs +) -> NodeInput: + combo_config = {} + if enum_type is not None: + combo_config["options"] = [option.value for option in enum_type] + combo_config = { + **combo_config, + **_create_base_config(field_info), + **kwargs, + } + return IO.COMBO, combo_config + + +def model_field_to_node_input( + input_type: IO, base_model: type[BaseModel], field_name: str, **kwargs +) -> NodeInput: + """ + Maps a field from a Pydantic model to a Comfy node input. + + Args: + input_type: The type of the input. + base_model: The Pydantic model to map the field from. + field_name: The name of the field to map. + **kwargs: Additional key/values to include in the input options. + + Note: + For combo inputs, pass an `Enum` to the `enum_type` keyword argument to populate the options automatically. + + Example: + >>> model_field_to_node_input(IO.STRING, MyModel, "my_field", multiline=True) + >>> model_field_to_node_input(IO.COMBO, MyModel, "my_field", enum_type=MyEnum) + >>> model_field_to_node_input(IO.FLOAT, MyModel, "my_field", slider=True) + """ + field_info: FieldInfo = base_model.model_fields[field_name] + result: NodeInput + + if input_type == IO.IMAGE: + result = _model_field_to_image_input(field_info, **kwargs) + elif input_type == IO.STRING: + result = _model_field_to_string_input(field_info, **kwargs) + elif input_type == IO.FLOAT: + result = _model_field_to_float_input(field_info, **kwargs) + elif input_type == IO.INT: + result = _model_field_to_int_input(field_info, **kwargs) + elif input_type == IO.COMBO: + result = _model_field_to_combo_input(field_info, **kwargs) + else: + message = f"Invalid input type: {input_type}" + raise ValueError(message) + + return result diff --git a/comfy_api_nodes/nodes_api.py b/comfy_api_nodes/nodes_api.py deleted file mode 100644 index a977bb9b7..000000000 --- a/comfy_api_nodes/nodes_api.py +++ /dev/null @@ -1,449 +0,0 @@ -import base64 -import io -import math -from inspect import cleandoc - -import numpy as np -import requests -import torch -from PIL import Image - -from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict -from comfy.utils import common_upscale -from comfy_api_nodes.apis import ( - OpenAIImageEditRequest, - OpenAIImageGenerationRequest, - OpenAIImageGenerationResponse, -) -from comfy_api_nodes.apis.client import ApiEndpoint, HttpMethod, SynchronousOperation - - -def downscale_input(image): - samples = image.movedim(-1,1) - #downscaling input images to roughly the same size as the outputs - total = int(1536 * 1024) - scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) - if scale_by >= 1: - return image - width = round(samples.shape[3] * scale_by) - height = round(samples.shape[2] * scale_by) - - s = common_upscale(samples, width, height, "lanczos", "disabled") - s = s.movedim(1,-1) - return s - -def validate_and_cast_response(response): - # validate raw JSON response - data = response.data - if not data or len(data) == 0: - raise Exception("No images returned from API endpoint") - - # Initialize list to store image tensors - image_tensors = [] - - # Process each image in the data array - for image_data in data: - image_url = image_data.url - b64_data = image_data.b64_json - - if not image_url and not b64_data: - raise Exception("No image was generated in the response") - - if b64_data: - img_data = base64.b64decode(b64_data) - img = Image.open(io.BytesIO(img_data)) - - elif image_url: - img_response = requests.get(image_url) - if img_response.status_code != 200: - raise Exception("Failed to download the image") - img = Image.open(io.BytesIO(img_response.content)) - - img = img.convert("RGBA") - - # Convert to numpy array, normalize to float32 between 0 and 1 - img_array = np.array(img).astype(np.float32) / 255.0 - img_tensor = torch.from_numpy(img_array) - - # Add to list of tensors - image_tensors.append(img_tensor) - - return torch.stack(image_tensors, dim=0) - -class OpenAIDalle2(ComfyNodeABC): - """ - Generates images synchronously via OpenAI's DALL·E 2 endpoint. - - Uses the proxy at /proxy/openai/images/generations. Returned URLs are short‑lived, - so download or cache results if you need to keep them. - """ - def __init__(self): - pass - - @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "prompt": (IO.STRING, { - "multiline": True, - "default": "", - "tooltip": "Text prompt for DALL·E", - }), - }, - "optional": { - "seed": (IO.INT, { - "default": 0, - "min": 0, - "max": 2**31-1, - "step": 1, - "display": "number", - "tooltip": "not implemented yet in backend", - }), - "size": (IO.COMBO, { - "options": ["256x256", "512x512", "1024x1024"], - "default": "1024x1024", - "tooltip": "Image size", - }), - "n": (IO.INT, { - "default": 1, - "min": 1, - "max": 8, - "step": 1, - "display": "number", - "tooltip": "How many images to generate", - }), - "image": (IO.IMAGE, { - "default": None, - "tooltip": "Optional reference image for image editing.", - }), - "mask": (IO.MASK, { - "default": None, - "tooltip": "Optional mask for inpainting (white areas will be replaced)", - }), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG" - } - } - - RETURN_TYPES = (IO.IMAGE,) - FUNCTION = "api_call" - CATEGORY = "api node" - DESCRIPTION = cleandoc(__doc__ or "") - API_NODE = True - - def api_call(self, prompt, seed=0, image=None, mask=None, n=1, size="1024x1024", auth_token=None): - model = "dall-e-2" - path = "/proxy/openai/images/generations" - request_class = OpenAIImageGenerationRequest - img_binary = None - - if image is not None and mask is not None: - path = "/proxy/openai/images/edits" - request_class = OpenAIImageEditRequest - - input_tensor = image.squeeze().cpu() - height, width, channels = input_tensor.shape - rgba_tensor = torch.ones(height, width, 4, device="cpu") - rgba_tensor[:, :, :channels] = input_tensor - - if mask.shape[1:] != image.shape[1:-1]: - raise Exception("Mask and Image must be the same size") - rgba_tensor[:,:,3] = (1-mask.squeeze().cpu()) - - rgba_tensor = downscale_input(rgba_tensor.unsqueeze(0)).squeeze() - - image_np = (rgba_tensor.numpy() * 255).astype(np.uint8) - img = Image.fromarray(image_np) - img_byte_arr = io.BytesIO() - img.save(img_byte_arr, format='PNG') - img_byte_arr.seek(0) - img_binary = img_byte_arr#.getvalue() - img_binary.name = "image.png" - elif image is not None or mask is not None: - raise Exception("Dall-E 2 image editing requires an image AND a mask") - - # Build the operation - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=path, - method=HttpMethod.POST, - request_model=request_class, - response_model=OpenAIImageGenerationResponse - ), - request=request_class( - model=model, - prompt=prompt, - n=n, - size=size, - seed=seed, - ), - files={ - "image": img_binary, - } if img_binary else None, - auth_token=auth_token - ) - - response = operation.execute() - - img_tensor = validate_and_cast_response(response) - return (img_tensor,) - -class OpenAIDalle3(ComfyNodeABC): - """ - Generates images synchronously via OpenAI's DALL·E 3 endpoint. - - Uses the proxy at /proxy/openai/images/generations. Returned URLs are short‑lived, - so download or cache results if you need to keep them. - """ - def __init__(self): - pass - - @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "prompt": (IO.STRING, { - "multiline": True, - "default": "", - "tooltip": "Text prompt for DALL·E", - }), - }, - "optional": { - "seed": (IO.INT, { - "default": 0, - "min": 0, - "max": 2**31-1, - "step": 1, - "display": "number", - "tooltip": "not implemented yet in backend", - }), - "quality" : (IO.COMBO, { - "options": ["standard","hd"], - "default": "standard", - "tooltip": "Image quality", - }), - "style": (IO.COMBO, { - "options": ["natural","vivid"], - "default": "natural", - "tooltip": "Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images.", - }), - "size": (IO.COMBO, { - "options": ["1024x1024", "1024x1792", "1792x1024"], - "default": "1024x1024", - "tooltip": "Image size", - }), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG" - } - } - - RETURN_TYPES = (IO.IMAGE,) - FUNCTION = "api_call" - CATEGORY = "api node" - DESCRIPTION = cleandoc(__doc__ or "") - API_NODE = True - - def api_call(self, prompt, seed=0, style="natural", quality="standard", size="1024x1024", auth_token=None): - model = "dall-e-3" - - # build the operation - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/openai/images/generations", - method=HttpMethod.POST, - request_model=OpenAIImageGenerationRequest, - response_model=OpenAIImageGenerationResponse - ), - request=OpenAIImageGenerationRequest( - model=model, - prompt=prompt, - quality=quality, - size=size, - style=style, - seed=seed, - ), - auth_token=auth_token - ) - - response = operation.execute() - - img_tensor = validate_and_cast_response(response) - return (img_tensor,) - -class OpenAIGPTImage1(ComfyNodeABC): - """ - Generates images synchronously via OpenAI's GPT Image 1 endpoint. - - Uses the proxy at /proxy/openai/images/generations. Returned URLs are short‑lived, - so download or cache results if you need to keep them. - """ - def __init__(self): - pass - - @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "prompt": (IO.STRING, { - "multiline": True, - "default": "", - "tooltip": "Text prompt for GPT Image 1", - }), - }, - "optional": { - "seed": (IO.INT, { - "default": 0, - "min": 0, - "max": 2**31-1, - "step": 1, - "display": "number", - "tooltip": "not implemented yet in backend", - }), - "quality": (IO.COMBO, { - "options": ["low","medium","high"], - "default": "low", - "tooltip": "Image quality, affects cost and generation time.", - }), - "background": (IO.COMBO, { - "options": ["opaque","transparent"], - "default": "opaque", - "tooltip": "Return image with or without background", - }), - "size": (IO.COMBO, { - "options": ["auto", "1024x1024", "1024x1536", "1536x1024"], - "default": "auto", - "tooltip": "Image size", - }), - "n": (IO.INT, { - "default": 1, - "min": 1, - "max": 8, - "step": 1, - "display": "number", - "tooltip": "How many images to generate", - }), - "image": (IO.IMAGE, { - "default": None, - "tooltip": "Optional reference image for image editing.", - }), - "mask": (IO.MASK, { - "default": None, - "tooltip": "Optional mask for inpainting (white areas will be replaced)", - }), - "moderation": (IO.COMBO, { - "options": ["low","auto"], - "default": "low", - "tooltip": "Moderation level", - }), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG" - } - } - - RETURN_TYPES = (IO.IMAGE,) - FUNCTION = "api_call" - CATEGORY = "api node" - DESCRIPTION = cleandoc(__doc__ or "") - API_NODE = True - - def api_call(self, prompt, seed=0, quality="low", background="opaque", image=None, mask=None, n=1, size="1024x1024", auth_token=None, moderation="low"): - model = "gpt-image-1" - path = "/proxy/openai/images/generations" - request_class = OpenAIImageGenerationRequest - img_binaries = [] - mask_binary = None - files = [] - - if image is not None: - path = "/proxy/openai/images/edits" - request_class = OpenAIImageEditRequest - - batch_size = image.shape[0] - - - for i in range(batch_size): - single_image = image[i:i+1] - scaled_image = downscale_input(single_image).squeeze() - - image_np = (scaled_image.numpy() * 255).astype(np.uint8) - img = Image.fromarray(image_np) - img_byte_arr = io.BytesIO() - img.save(img_byte_arr, format='PNG') - img_byte_arr.seek(0) - img_binary = img_byte_arr - img_binary.name = f"image_{i}.png" - - img_binaries.append(img_binary) - if batch_size == 1: - files.append(("image", img_binary)) - else: - files.append(("image[]", img_binary)) - - if mask is not None: - if image.shape[0] != 1: - raise Exception("Cannot use a mask with multiple image") - if image is None: - raise Exception("Cannot use a mask without an input image") - if mask.shape[1:] != image.shape[1:-1]: - raise Exception("Mask and Image must be the same size") - batch, height, width = mask.shape - rgba_mask = torch.zeros(height, width, 4, device="cpu") - rgba_mask[:,:,3] = (1-mask.squeeze().cpu()) - - scaled_mask = downscale_input(rgba_mask.unsqueeze(0)).squeeze() - - mask_np = (scaled_mask.numpy() * 255).astype(np.uint8) - mask_img = Image.fromarray(mask_np) - mask_img_byte_arr = io.BytesIO() - mask_img.save(mask_img_byte_arr, format='PNG') - mask_img_byte_arr.seek(0) - mask_binary = mask_img_byte_arr - mask_binary.name = "mask.png" - files.append(("mask", mask_binary)) - - - # Build the operation - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=path, - method=HttpMethod.POST, - request_model=request_class, - response_model=OpenAIImageGenerationResponse - ), - request=request_class( - model=model, - prompt=prompt, - quality=quality, - background=background, - n=n, - seed=seed, - size=size, - moderation=moderation, - ), - files=files if files else None, - auth_token=auth_token - ) - - response = operation.execute() - - img_tensor = validate_and_cast_response(response) - return (img_tensor,) - - -# A dictionary that contains all nodes you want to export with their names -# NOTE: names should be globally unique -NODE_CLASS_MAPPINGS = { - "OpenAIDalle2": OpenAIDalle2, - "OpenAIDalle3": OpenAIDalle3, - "OpenAIGPTImage1": OpenAIGPTImage1, -} - -# A dictionary that contains the friendly/humanly readable titles for the nodes -NODE_DISPLAY_NAME_MAPPINGS = { - "OpenAIDalle2": "OpenAI DALL·E 2", - "OpenAIDalle3": "OpenAI DALL·E 3", - "OpenAIGPTImage1": "OpenAI GPT Image 1", -} diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py new file mode 100644 index 000000000..122a6ddf8 --- /dev/null +++ b/comfy_api_nodes/nodes_bfl.py @@ -0,0 +1,906 @@ +import io +from inspect import cleandoc +from comfy.comfy_types.node_typing import IO, ComfyNodeABC +from comfy_api_nodes.apis.bfl_api import ( + BFLStatus, + BFLFluxExpandImageRequest, + BFLFluxFillImageRequest, + BFLFluxCannyImageRequest, + BFLFluxDepthImageRequest, + BFLFluxProGenerateRequest, + BFLFluxProUltraGenerateRequest, + BFLFluxProGenerateResponse, +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, +) +from comfy_api_nodes.apinode_utils import ( + downscale_image_tensor, + validate_aspect_ratio, + process_image_response, + resize_mask_to_image, + validate_string, +) + +import numpy as np +from PIL import Image +import requests +import torch +import base64 +import time + + +def convert_mask_to_image(mask: torch.Tensor): + """ + Make mask have the expected amount of dims (4) and channels (3) to be recognized as an image. + """ + mask = mask.unsqueeze(-1) + mask = torch.cat([mask]*3, dim=-1) + return mask + + +def handle_bfl_synchronous_operation( + operation: SynchronousOperation, timeout_bfl_calls=360 +): + response_api: BFLFluxProGenerateResponse = operation.execute() + return _poll_until_generated( + response_api.polling_url, timeout=timeout_bfl_calls + ) + +def _poll_until_generated(polling_url: str, timeout=360): + # used bfl-comfy-nodes to verify code implementation: + # https://github.com/black-forest-labs/bfl-comfy-nodes/tree/main + start_time = time.time() + retries_404 = 0 + max_retries_404 = 5 + retry_404_seconds = 2 + retry_202_seconds = 2 + retry_pending_seconds = 1 + request = requests.Request(method=HttpMethod.GET, url=polling_url) + # NOTE: should True loop be replaced with checking if workflow has been interrupted? + while True: + response = requests.Session().send(request.prepare()) + if response.status_code == 200: + result = response.json() + if result["status"] == BFLStatus.ready: + img_url = result["result"]["sample"] + img_response = requests.get(img_url) + return process_image_response(img_response) + elif result["status"] in [ + BFLStatus.request_moderated, + BFLStatus.content_moderated, + ]: + status = result["status"] + raise Exception( + f"BFL API did not return an image due to: {status}." + ) + elif result["status"] == BFLStatus.error: + raise Exception(f"BFL API encountered an error: {result}.") + elif result["status"] == BFLStatus.pending: + time.sleep(retry_pending_seconds) + continue + elif response.status_code == 404: + if retries_404 < max_retries_404: + retries_404 += 1 + time.sleep(retry_404_seconds) + continue + raise Exception( + f"BFL API could not find task after {max_retries_404} tries." + ) + elif response.status_code == 202: + time.sleep(retry_202_seconds) + elif time.time() - start_time > timeout: + raise Exception( + f"BFL API experienced a timeout; could not return request under {timeout} seconds." + ) + else: + raise Exception(f"BFL API encountered an error: {response.json()}") + +def convert_image_to_base64(image: torch.Tensor): + scaled_image = downscale_image_tensor(image, total_pixels=2048 * 2048) + # remove batch dimension if present + if len(scaled_image.shape) > 3: + scaled_image = scaled_image[0] + image_np = (scaled_image.numpy() * 255).astype(np.uint8) + img = Image.fromarray(image_np) + img_byte_arr = io.BytesIO() + img.save(img_byte_arr, format="PNG") + return base64.b64encode(img_byte_arr.getvalue()).decode() + + +class FluxProUltraImageNode(ComfyNodeABC): + """ + Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution. + """ + + MINIMUM_RATIO = 1 / 4 + MAXIMUM_RATIO = 4 / 1 + MINIMUM_RATIO_STR = "1:4" + MAXIMUM_RATIO_STR = "4:1" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation", + }, + ), + "prompt_upsampling": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + "aspect_ratio": ( + IO.STRING, + { + "default": "16:9", + "tooltip": "Aspect ratio of image; must be between 1:4 and 4:1.", + }, + ), + "raw": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "When True, generate less processed, more natural-looking images.", + }, + ), + }, + "optional": { + "image_prompt": (IO.IMAGE,), + "image_prompt_strength": ( + IO.FLOAT, + { + "default": 0.1, + "min": 0.0, + "max": 1.0, + "step": 0.01, + "tooltip": "Blend between the prompt and the image prompt.", + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + @classmethod + def VALIDATE_INPUTS(cls, aspect_ratio: str): + try: + validate_aspect_ratio( + aspect_ratio, + minimum_ratio=cls.MINIMUM_RATIO, + maximum_ratio=cls.MAXIMUM_RATIO, + minimum_ratio_str=cls.MINIMUM_RATIO_STR, + maximum_ratio_str=cls.MAXIMUM_RATIO_STR, + ) + except Exception as e: + return str(e) + return True + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/BFL" + + def api_call( + self, + prompt: str, + aspect_ratio: str, + prompt_upsampling=False, + raw=False, + seed=0, + image_prompt=None, + image_prompt_strength=0.1, + auth_token=None, + **kwargs, + ): + if image_prompt is None: + validate_string(prompt, strip_whitespace=False) + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/bfl/flux-pro-1.1-ultra/generate", + method=HttpMethod.POST, + request_model=BFLFluxProUltraGenerateRequest, + response_model=BFLFluxProGenerateResponse, + ), + request=BFLFluxProUltraGenerateRequest( + prompt=prompt, + prompt_upsampling=prompt_upsampling, + seed=seed, + aspect_ratio=validate_aspect_ratio( + aspect_ratio, + minimum_ratio=self.MINIMUM_RATIO, + maximum_ratio=self.MAXIMUM_RATIO, + minimum_ratio_str=self.MINIMUM_RATIO_STR, + maximum_ratio_str=self.MAXIMUM_RATIO_STR, + ), + raw=raw, + image_prompt=( + image_prompt + if image_prompt is None + else convert_image_to_base64(image_prompt) + ), + image_prompt_strength=( + None if image_prompt is None else round(image_prompt_strength, 2) + ), + ), + auth_token=auth_token, + ) + output_image = handle_bfl_synchronous_operation(operation) + return (output_image,) + + + +class FluxProImageNode(ComfyNodeABC): + """ + Generates images synchronously based on prompt and resolution. + """ + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation", + }, + ), + "prompt_upsampling": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", + }, + ), + "width": ( + IO.INT, + { + "default": 1024, + "min": 256, + "max": 1440, + "step": 32, + }, + ), + "height": ( + IO.INT, + { + "default": 768, + "min": 256, + "max": 1440, + "step": 32, + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + }, + "optional": { + "image_prompt": (IO.IMAGE,), + # "image_prompt_strength": ( + # IO.FLOAT, + # { + # "default": 0.1, + # "min": 0.0, + # "max": 1.0, + # "step": 0.01, + # "tooltip": "Blend between the prompt and the image prompt.", + # }, + # ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/BFL" + + def api_call( + self, + prompt: str, + prompt_upsampling, + width: int, + height: int, + seed=0, + image_prompt=None, + # image_prompt_strength=0.1, + auth_token=None, + **kwargs, + ): + image_prompt = ( + image_prompt + if image_prompt is None + else convert_image_to_base64(image_prompt) + ) + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/bfl/flux-pro-1.1/generate", + method=HttpMethod.POST, + request_model=BFLFluxProGenerateRequest, + response_model=BFLFluxProGenerateResponse, + ), + request=BFLFluxProGenerateRequest( + prompt=prompt, + prompt_upsampling=prompt_upsampling, + width=width, + height=height, + seed=seed, + image_prompt=image_prompt, + ), + auth_token=auth_token, + ) + output_image = handle_bfl_synchronous_operation(operation) + return (output_image,) + + +class FluxProExpandNode(ComfyNodeABC): + """ + Outpaints image based on prompt. + """ + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE,), + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation", + }, + ), + "prompt_upsampling": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", + }, + ), + "top": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2048, + "tooltip": "Number of pixels to expand at the top of the image" + }, + ), + "bottom": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2048, + "tooltip": "Number of pixels to expand at the bottom of the image" + }, + ), + "left": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2048, + "tooltip": "Number of pixels to expand at the left side of the image" + }, + ), + "right": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2048, + "tooltip": "Number of pixels to expand at the right side of the image" + }, + ), + "guidance": ( + IO.FLOAT, + { + "default": 60, + "min": 1.5, + "max": 100, + "tooltip": "Guidance strength for the image generation process" + }, + ), + "steps": ( + IO.INT, + { + "default": 50, + "min": 15, + "max": 50, + "tooltip": "Number of steps for the image generation process" + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + }, + "optional": { + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/BFL" + + def api_call( + self, + image: torch.Tensor, + prompt: str, + prompt_upsampling: bool, + top: int, + bottom: int, + left: int, + right: int, + steps: int, + guidance: float, + seed=0, + auth_token=None, + **kwargs, + ): + image = convert_image_to_base64(image) + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/bfl/flux-pro-1.0-expand/generate", + method=HttpMethod.POST, + request_model=BFLFluxExpandImageRequest, + response_model=BFLFluxProGenerateResponse, + ), + request=BFLFluxExpandImageRequest( + prompt=prompt, + prompt_upsampling=prompt_upsampling, + top=top, + bottom=bottom, + left=left, + right=right, + steps=steps, + guidance=guidance, + seed=seed, + image=image, + ), + auth_token=auth_token, + ) + output_image = handle_bfl_synchronous_operation(operation) + return (output_image,) + + + +class FluxProFillNode(ComfyNodeABC): + """ + Inpaints image based on mask and prompt. + """ + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE,), + "mask": (IO.MASK,), + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation", + }, + ), + "prompt_upsampling": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", + }, + ), + "guidance": ( + IO.FLOAT, + { + "default": 60, + "min": 1.5, + "max": 100, + "tooltip": "Guidance strength for the image generation process" + }, + ), + "steps": ( + IO.INT, + { + "default": 50, + "min": 15, + "max": 50, + "tooltip": "Number of steps for the image generation process" + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + }, + "optional": { + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/BFL" + + def api_call( + self, + image: torch.Tensor, + mask: torch.Tensor, + prompt: str, + prompt_upsampling: bool, + steps: int, + guidance: float, + seed=0, + auth_token=None, + **kwargs, + ): + # prepare mask + mask = resize_mask_to_image(mask, image) + mask = convert_image_to_base64(convert_mask_to_image(mask)) + # make sure image will have alpha channel removed + image = convert_image_to_base64(image[:,:,:,:3]) + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/bfl/flux-pro-1.0-fill/generate", + method=HttpMethod.POST, + request_model=BFLFluxFillImageRequest, + response_model=BFLFluxProGenerateResponse, + ), + request=BFLFluxFillImageRequest( + prompt=prompt, + prompt_upsampling=prompt_upsampling, + steps=steps, + guidance=guidance, + seed=seed, + image=image, + mask=mask, + ), + auth_token=auth_token, + ) + output_image = handle_bfl_synchronous_operation(operation) + return (output_image,) + + +class FluxProCannyNode(ComfyNodeABC): + """ + Generate image using a control image (canny). + """ + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "control_image": (IO.IMAGE,), + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation", + }, + ), + "prompt_upsampling": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", + }, + ), + "canny_low_threshold": ( + IO.FLOAT, + { + "default": 0.1, + "min": 0.01, + "max": 0.99, + "step": 0.01, + "tooltip": "Low threshold for Canny edge detection; ignored if skip_processing is True" + }, + ), + "canny_high_threshold": ( + IO.FLOAT, + { + "default": 0.4, + "min": 0.01, + "max": 0.99, + "step": 0.01, + "tooltip": "High threshold for Canny edge detection; ignored if skip_processing is True" + }, + ), + "skip_preprocessing": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Whether to skip preprocessing; set to True if control_image already is canny-fied, False if it is a raw image.", + }, + ), + "guidance": ( + IO.FLOAT, + { + "default": 30, + "min": 1, + "max": 100, + "tooltip": "Guidance strength for the image generation process" + }, + ), + "steps": ( + IO.INT, + { + "default": 50, + "min": 15, + "max": 50, + "tooltip": "Number of steps for the image generation process" + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + }, + "optional": { + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/BFL" + + def api_call( + self, + control_image: torch.Tensor, + prompt: str, + prompt_upsampling: bool, + canny_low_threshold: float, + canny_high_threshold: float, + skip_preprocessing: bool, + steps: int, + guidance: float, + seed=0, + auth_token=None, + **kwargs, + ): + control_image = convert_image_to_base64(control_image[:,:,:,:3]) + preprocessed_image = None + + # scale canny threshold between 0-500, to match BFL's API + def scale_value(value: float, min_val=0, max_val=500): + return min_val + value * (max_val - min_val) + canny_low_threshold = int(round(scale_value(canny_low_threshold))) + canny_high_threshold = int(round(scale_value(canny_high_threshold))) + + + if skip_preprocessing: + preprocessed_image = control_image + control_image = None + canny_low_threshold = None + canny_high_threshold = None + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/bfl/flux-pro-1.0-canny/generate", + method=HttpMethod.POST, + request_model=BFLFluxCannyImageRequest, + response_model=BFLFluxProGenerateResponse, + ), + request=BFLFluxCannyImageRequest( + prompt=prompt, + prompt_upsampling=prompt_upsampling, + steps=steps, + guidance=guidance, + seed=seed, + control_image=control_image, + canny_low_threshold=canny_low_threshold, + canny_high_threshold=canny_high_threshold, + preprocessed_image=preprocessed_image, + ), + auth_token=auth_token, + ) + output_image = handle_bfl_synchronous_operation(operation) + return (output_image,) + + +class FluxProDepthNode(ComfyNodeABC): + """ + Generate image using a control image (depth). + """ + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "control_image": (IO.IMAGE,), + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation", + }, + ), + "prompt_upsampling": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", + }, + ), + "skip_preprocessing": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Whether to skip preprocessing; set to True if control_image already is depth-ified, False if it is a raw image.", + }, + ), + "guidance": ( + IO.FLOAT, + { + "default": 15, + "min": 1, + "max": 100, + "tooltip": "Guidance strength for the image generation process" + }, + ), + "steps": ( + IO.INT, + { + "default": 50, + "min": 15, + "max": 50, + "tooltip": "Number of steps for the image generation process" + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + }, + "optional": { + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/BFL" + + def api_call( + self, + control_image: torch.Tensor, + prompt: str, + prompt_upsampling: bool, + skip_preprocessing: bool, + steps: int, + guidance: float, + seed=0, + auth_token=None, + **kwargs, + ): + control_image = convert_image_to_base64(control_image[:,:,:,:3]) + preprocessed_image = None + + if skip_preprocessing: + preprocessed_image = control_image + control_image = None + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/bfl/flux-pro-1.0-depth/generate", + method=HttpMethod.POST, + request_model=BFLFluxDepthImageRequest, + response_model=BFLFluxProGenerateResponse, + ), + request=BFLFluxDepthImageRequest( + prompt=prompt, + prompt_upsampling=prompt_upsampling, + steps=steps, + guidance=guidance, + seed=seed, + control_image=control_image, + preprocessed_image=preprocessed_image, + ), + auth_token=auth_token, + ) + output_image = handle_bfl_synchronous_operation(operation) + return (output_image,) + + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "FluxProUltraImageNode": FluxProUltraImageNode, + # "FluxProImageNode": FluxProImageNode, + "FluxProExpandNode": FluxProExpandNode, + "FluxProFillNode": FluxProFillNode, + "FluxProCannyNode": FluxProCannyNode, + "FluxProDepthNode": FluxProDepthNode, +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "FluxProUltraImageNode": "Flux 1.1 [pro] Ultra Image", + # "FluxProImageNode": "Flux 1.1 [pro] Image", + "FluxProExpandNode": "Flux.1 Expand Image", + "FluxProFillNode": "Flux.1 Fill Image", + "FluxProCannyNode": "Flux.1 Canny Control Image", + "FluxProDepthNode": "Flux.1 Depth Control Image", +} diff --git a/comfy_api_nodes/nodes_ideogram.py b/comfy_api_nodes/nodes_ideogram.py new file mode 100644 index 000000000..45c021f4a --- /dev/null +++ b/comfy_api_nodes/nodes_ideogram.py @@ -0,0 +1,777 @@ +from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict +from inspect import cleandoc +from PIL import Image +import numpy as np +import io +import torch +from comfy_api_nodes.apis import ( + IdeogramGenerateRequest, + IdeogramGenerateResponse, + ImageRequest, + IdeogramV3Request, + IdeogramV3EditRequest, +) + +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, +) + +from comfy_api_nodes.apinode_utils import ( + download_url_to_bytesio, + bytesio_to_image_tensor, + resize_mask_to_image, +) + +V1_V1_RES_MAP = { + "Auto":"AUTO", + "512 x 1536":"RESOLUTION_512_1536", + "576 x 1408":"RESOLUTION_576_1408", + "576 x 1472":"RESOLUTION_576_1472", + "576 x 1536":"RESOLUTION_576_1536", + "640 x 1024":"RESOLUTION_640_1024", + "640 x 1344":"RESOLUTION_640_1344", + "640 x 1408":"RESOLUTION_640_1408", + "640 x 1472":"RESOLUTION_640_1472", + "640 x 1536":"RESOLUTION_640_1536", + "704 x 1152":"RESOLUTION_704_1152", + "704 x 1216":"RESOLUTION_704_1216", + "704 x 1280":"RESOLUTION_704_1280", + "704 x 1344":"RESOLUTION_704_1344", + "704 x 1408":"RESOLUTION_704_1408", + "704 x 1472":"RESOLUTION_704_1472", + "720 x 1280":"RESOLUTION_720_1280", + "736 x 1312":"RESOLUTION_736_1312", + "768 x 1024":"RESOLUTION_768_1024", + "768 x 1088":"RESOLUTION_768_1088", + "768 x 1152":"RESOLUTION_768_1152", + "768 x 1216":"RESOLUTION_768_1216", + "768 x 1232":"RESOLUTION_768_1232", + "768 x 1280":"RESOLUTION_768_1280", + "768 x 1344":"RESOLUTION_768_1344", + "832 x 960":"RESOLUTION_832_960", + "832 x 1024":"RESOLUTION_832_1024", + "832 x 1088":"RESOLUTION_832_1088", + "832 x 1152":"RESOLUTION_832_1152", + "832 x 1216":"RESOLUTION_832_1216", + "832 x 1248":"RESOLUTION_832_1248", + "864 x 1152":"RESOLUTION_864_1152", + "896 x 960":"RESOLUTION_896_960", + "896 x 1024":"RESOLUTION_896_1024", + "896 x 1088":"RESOLUTION_896_1088", + "896 x 1120":"RESOLUTION_896_1120", + "896 x 1152":"RESOLUTION_896_1152", + "960 x 832":"RESOLUTION_960_832", + "960 x 896":"RESOLUTION_960_896", + "960 x 1024":"RESOLUTION_960_1024", + "960 x 1088":"RESOLUTION_960_1088", + "1024 x 640":"RESOLUTION_1024_640", + "1024 x 768":"RESOLUTION_1024_768", + "1024 x 832":"RESOLUTION_1024_832", + "1024 x 896":"RESOLUTION_1024_896", + "1024 x 960":"RESOLUTION_1024_960", + "1024 x 1024":"RESOLUTION_1024_1024", + "1088 x 768":"RESOLUTION_1088_768", + "1088 x 832":"RESOLUTION_1088_832", + "1088 x 896":"RESOLUTION_1088_896", + "1088 x 960":"RESOLUTION_1088_960", + "1120 x 896":"RESOLUTION_1120_896", + "1152 x 704":"RESOLUTION_1152_704", + "1152 x 768":"RESOLUTION_1152_768", + "1152 x 832":"RESOLUTION_1152_832", + "1152 x 864":"RESOLUTION_1152_864", + "1152 x 896":"RESOLUTION_1152_896", + "1216 x 704":"RESOLUTION_1216_704", + "1216 x 768":"RESOLUTION_1216_768", + "1216 x 832":"RESOLUTION_1216_832", + "1232 x 768":"RESOLUTION_1232_768", + "1248 x 832":"RESOLUTION_1248_832", + "1280 x 704":"RESOLUTION_1280_704", + "1280 x 720":"RESOLUTION_1280_720", + "1280 x 768":"RESOLUTION_1280_768", + "1280 x 800":"RESOLUTION_1280_800", + "1312 x 736":"RESOLUTION_1312_736", + "1344 x 640":"RESOLUTION_1344_640", + "1344 x 704":"RESOLUTION_1344_704", + "1344 x 768":"RESOLUTION_1344_768", + "1408 x 576":"RESOLUTION_1408_576", + "1408 x 640":"RESOLUTION_1408_640", + "1408 x 704":"RESOLUTION_1408_704", + "1472 x 576":"RESOLUTION_1472_576", + "1472 x 640":"RESOLUTION_1472_640", + "1472 x 704":"RESOLUTION_1472_704", + "1536 x 512":"RESOLUTION_1536_512", + "1536 x 576":"RESOLUTION_1536_576", + "1536 x 640":"RESOLUTION_1536_640", +} + +V1_V2_RATIO_MAP = { + "1:1":"ASPECT_1_1", + "4:3":"ASPECT_4_3", + "3:4":"ASPECT_3_4", + "16:9":"ASPECT_16_9", + "9:16":"ASPECT_9_16", + "2:1":"ASPECT_2_1", + "1:2":"ASPECT_1_2", + "3:2":"ASPECT_3_2", + "2:3":"ASPECT_2_3", + "4:5":"ASPECT_4_5", + "5:4":"ASPECT_5_4", +} + +V3_RATIO_MAP = { + "1:3":"1x3", + "3:1":"3x1", + "1:2":"1x2", + "2:1":"2x1", + "9:16":"9x16", + "16:9":"16x9", + "10:16":"10x16", + "16:10":"16x10", + "2:3":"2x3", + "3:2":"3x2", + "3:4":"3x4", + "4:3":"4x3", + "4:5":"4x5", + "5:4":"5x4", + "1:1":"1x1", +} + +V3_RESOLUTIONS= [ + "Auto", + "512x1536", + "576x1408", + "576x1472", + "576x1536", + "640x1344", + "640x1408", + "640x1472", + "640x1536", + "704x1152", + "704x1216", + "704x1280", + "704x1344", + "704x1408", + "704x1472", + "736x1312", + "768x1088", + "768x1216", + "768x1280", + "768x1344", + "800x1280", + "832x960", + "832x1024", + "832x1088", + "832x1152", + "832x1216", + "832x1248", + "864x1152", + "896x960", + "896x1024", + "896x1088", + "896x1120", + "896x1152", + "960x832", + "960x896", + "960x1024", + "960x1088", + "1024x832", + "1024x896", + "1024x960", + "1024x1024", + "1088x768", + "1088x832", + "1088x896", + "1088x960", + "1120x896", + "1152x704", + "1152x832", + "1152x864", + "1152x896", + "1216x704", + "1216x768", + "1216x832", + "1248x832", + "1280x704", + "1280x768", + "1280x800", + "1312x736", + "1344x640", + "1344x704", + "1344x768", + "1408x576", + "1408x640", + "1408x704", + "1472x576", + "1472x640", + "1472x704", + "1536x512", + "1536x576", + "1536x640" +] + +def download_and_process_images(image_urls): + """Helper function to download and process multiple images from URLs""" + + # Initialize list to store image tensors + image_tensors = [] + + for image_url in image_urls: + # Using functions from apinode_utils.py to handle downloading and processing + image_bytesio = download_url_to_bytesio(image_url) # Download image content to BytesIO + img_tensor = bytesio_to_image_tensor(image_bytesio, mode="RGB") # Convert to torch.Tensor with RGB mode + image_tensors.append(img_tensor) + + # Stack tensors to match (N, width, height, channels) + if image_tensors: + stacked_tensors = torch.cat(image_tensors, dim=0) + else: + raise Exception("No valid images were processed") + + return stacked_tensors + + +class IdeogramV1(ComfyNodeABC): + """ + Generates images synchronously using the Ideogram V1 model. + + Images links are available for a limited period of time; if you would like to keep the image, you must download it. + """ + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation", + }, + ), + "turbo": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Whether to use turbo mode (faster generation, potentially lower quality)", + } + ), + }, + "optional": { + "aspect_ratio": ( + IO.COMBO, + { + "options": list(V1_V2_RATIO_MAP.keys()), + "default": "1:1", + "tooltip": "The aspect ratio for image generation.", + }, + ), + "magic_prompt_option": ( + IO.COMBO, + { + "options": ["AUTO", "ON", "OFF"], + "default": "AUTO", + "tooltip": "Determine if MagicPrompt should be used in generation", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2147483647, + "step": 1, + "control_after_generate": True, + "display": "number", + }, + ), + "negative_prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Description of what to exclude from the image", + }, + ), + "num_images": ( + IO.INT, + {"default": 1, "min": 1, "max": 8, "step": 1, "display": "number"}, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "api_call" + CATEGORY = "api node/image/Ideogram/v1" + DESCRIPTION = cleandoc(__doc__ or "") + API_NODE = True + + def api_call( + self, + prompt, + turbo=False, + aspect_ratio="1:1", + magic_prompt_option="AUTO", + seed=0, + negative_prompt="", + num_images=1, + auth_token=None, + ): + # Determine the model based on turbo setting + aspect_ratio = V1_V2_RATIO_MAP.get(aspect_ratio, None) + model = "V_1_TURBO" if turbo else "V_1" + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/ideogram/generate", + method=HttpMethod.POST, + request_model=IdeogramGenerateRequest, + response_model=IdeogramGenerateResponse, + ), + request=IdeogramGenerateRequest( + image_request=ImageRequest( + prompt=prompt, + model=model, + num_images=num_images, + seed=seed, + aspect_ratio=aspect_ratio if aspect_ratio != "ASPECT_1_1" else None, + magic_prompt_option=( + magic_prompt_option if magic_prompt_option != "AUTO" else None + ), + negative_prompt=negative_prompt if negative_prompt else None, + ) + ), + auth_token=auth_token, + ) + + response = operation.execute() + + if not response.data or len(response.data) == 0: + raise Exception("No images were generated in the response") + + image_urls = [image_data.url for image_data in response.data if image_data.url] + + if not image_urls: + raise Exception("No image URLs were generated in the response") + + return (download_and_process_images(image_urls),) + + +class IdeogramV2(ComfyNodeABC): + """ + Generates images synchronously using the Ideogram V2 model. + + Images links are available for a limited period of time; if you would like to keep the image, you must download it. + """ + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation", + }, + ), + "turbo": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Whether to use turbo mode (faster generation, potentially lower quality)", + } + ), + }, + "optional": { + "aspect_ratio": ( + IO.COMBO, + { + "options": list(V1_V2_RATIO_MAP.keys()), + "default": "1:1", + "tooltip": "The aspect ratio for image generation. Ignored if resolution is not set to AUTO.", + }, + ), + "resolution": ( + IO.COMBO, + { + "options": list(V1_V1_RES_MAP.keys()), + "default": "Auto", + "tooltip": "The resolution for image generation. If not set to AUTO, this overrides the aspect_ratio setting.", + }, + ), + "magic_prompt_option": ( + IO.COMBO, + { + "options": ["AUTO", "ON", "OFF"], + "default": "AUTO", + "tooltip": "Determine if MagicPrompt should be used in generation", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2147483647, + "step": 1, + "control_after_generate": True, + "display": "number", + }, + ), + "style_type": ( + IO.COMBO, + { + "options": ["AUTO", "GENERAL", "REALISTIC", "DESIGN", "RENDER_3D", "ANIME"], + "default": "NONE", + "tooltip": "Style type for generation (V2 only)", + }, + ), + "negative_prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Description of what to exclude from the image", + }, + ), + "num_images": ( + IO.INT, + {"default": 1, "min": 1, "max": 8, "step": 1, "display": "number"}, + ), + #"color_palette": ( + # IO.STRING, + # { + # "multiline": False, + # "default": "", + # "tooltip": "Color palette preset name or hex colors with weights", + # }, + #), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "api_call" + CATEGORY = "api node/image/Ideogram/v2" + DESCRIPTION = cleandoc(__doc__ or "") + API_NODE = True + + def api_call( + self, + prompt, + turbo=False, + aspect_ratio="1:1", + resolution="Auto", + magic_prompt_option="AUTO", + seed=0, + style_type="NONE", + negative_prompt="", + num_images=1, + color_palette="", + auth_token=None, + ): + aspect_ratio = V1_V2_RATIO_MAP.get(aspect_ratio, None) + resolution = V1_V1_RES_MAP.get(resolution, None) + # Determine the model based on turbo setting + model = "V_2_TURBO" if turbo else "V_2" + + # Handle resolution vs aspect_ratio logic + # If resolution is not AUTO, it overrides aspect_ratio + final_resolution = None + final_aspect_ratio = None + + if resolution != "AUTO": + final_resolution = resolution + else: + final_aspect_ratio = aspect_ratio if aspect_ratio != "ASPECT_1_1" else None + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/ideogram/generate", + method=HttpMethod.POST, + request_model=IdeogramGenerateRequest, + response_model=IdeogramGenerateResponse, + ), + request=IdeogramGenerateRequest( + image_request=ImageRequest( + prompt=prompt, + model=model, + num_images=num_images, + seed=seed, + aspect_ratio=final_aspect_ratio, + resolution=final_resolution, + magic_prompt_option=( + magic_prompt_option if magic_prompt_option != "AUTO" else None + ), + style_type=style_type if style_type != "NONE" else None, + negative_prompt=negative_prompt if negative_prompt else None, + color_palette=color_palette if color_palette else None, + ) + ), + auth_token=auth_token, + ) + + response = operation.execute() + + if not response.data or len(response.data) == 0: + raise Exception("No images were generated in the response") + + image_urls = [image_data.url for image_data in response.data if image_data.url] + + if not image_urls: + raise Exception("No image URLs were generated in the response") + + return (download_and_process_images(image_urls),) + +class IdeogramV3(ComfyNodeABC): + """ + Generates images synchronously using the Ideogram V3 model. + + Supports both regular image generation from text prompts and image editing with mask. + Images links are available for a limited period of time; if you would like to keep the image, you must download it. + """ + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation or editing", + }, + ), + }, + "optional": { + "image": ( + IO.IMAGE, + { + "default": None, + "tooltip": "Optional reference image for image editing.", + }, + ), + "mask": ( + IO.MASK, + { + "default": None, + "tooltip": "Optional mask for inpainting (white areas will be replaced)", + }, + ), + "aspect_ratio": ( + IO.COMBO, + { + "options": list(V3_RATIO_MAP.keys()), + "default": "1:1", + "tooltip": "The aspect ratio for image generation. Ignored if resolution is not set to Auto.", + }, + ), + "resolution": ( + IO.COMBO, + { + "options": V3_RESOLUTIONS, + "default": "Auto", + "tooltip": "The resolution for image generation. If not set to Auto, this overrides the aspect_ratio setting.", + }, + ), + "magic_prompt_option": ( + IO.COMBO, + { + "options": ["AUTO", "ON", "OFF"], + "default": "AUTO", + "tooltip": "Determine if MagicPrompt should be used in generation", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2147483647, + "step": 1, + "control_after_generate": True, + "display": "number", + }, + ), + "num_images": ( + IO.INT, + {"default": 1, "min": 1, "max": 8, "step": 1, "display": "number"}, + ), + "rendering_speed": ( + IO.COMBO, + { + "options": ["BALANCED", "TURBO", "QUALITY"], + "default": "BALANCED", + "tooltip": "Controls the trade-off between generation speed and quality", + }, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "api_call" + CATEGORY = "api node/image/Ideogram/v3" + DESCRIPTION = cleandoc(__doc__ or "") + API_NODE = True + + def api_call( + self, + prompt, + image=None, + mask=None, + resolution="Auto", + aspect_ratio="1:1", + magic_prompt_option="AUTO", + seed=0, + num_images=1, + rendering_speed="BALANCED", + auth_token=None, + ): + # Check if both image and mask are provided for editing mode + if image is not None and mask is not None: + # Edit mode + path = "/proxy/ideogram/ideogram-v3/edit" + + # Process image and mask + input_tensor = image.squeeze().cpu() + # Resize mask to match image dimension + mask = resize_mask_to_image(mask, image, allow_gradient=False) + # Invert mask, as Ideogram API will edit black areas instead of white areas (opposite of convention). + mask = 1.0 - mask + + # Validate mask dimensions match image + if mask.shape[1:] != image.shape[1:-1]: + raise Exception("Mask and Image must be the same size") + + # Process image + img_np = (input_tensor.numpy() * 255).astype(np.uint8) + img = Image.fromarray(img_np) + img_byte_arr = io.BytesIO() + img.save(img_byte_arr, format="PNG") + img_byte_arr.seek(0) + img_binary = img_byte_arr + img_binary.name = "image.png" + + # Process mask - white areas will be replaced + mask_np = (mask.squeeze().cpu().numpy() * 255).astype(np.uint8) + mask_img = Image.fromarray(mask_np) + mask_byte_arr = io.BytesIO() + mask_img.save(mask_byte_arr, format="PNG") + mask_byte_arr.seek(0) + mask_binary = mask_byte_arr + mask_binary.name = "mask.png" + + # Create edit request + edit_request = IdeogramV3EditRequest( + prompt=prompt, + rendering_speed=rendering_speed, + ) + + # Add optional parameters + if magic_prompt_option != "AUTO": + edit_request.magic_prompt = magic_prompt_option + if seed != 0: + edit_request.seed = seed + if num_images > 1: + edit_request.num_images = num_images + + # Execute the operation for edit mode + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=IdeogramV3EditRequest, + response_model=IdeogramGenerateResponse, + ), + request=edit_request, + files={ + "image": img_binary, + "mask": mask_binary, + }, + content_type="multipart/form-data", + auth_token=auth_token, + ) + + elif image is not None or mask is not None: + # If only one of image or mask is provided, raise an error + raise Exception("Ideogram V3 image editing requires both an image AND a mask") + else: + # Generation mode + path = "/proxy/ideogram/ideogram-v3/generate" + + # Create generation request + gen_request = IdeogramV3Request( + prompt=prompt, + rendering_speed=rendering_speed, + ) + + # Handle resolution vs aspect ratio + if resolution != "Auto": + gen_request.resolution = resolution + elif aspect_ratio != "1:1": + v3_aspect = V3_RATIO_MAP.get(aspect_ratio) + if v3_aspect: + gen_request.aspect_ratio = v3_aspect + + # Add optional parameters + if magic_prompt_option != "AUTO": + gen_request.magic_prompt = magic_prompt_option + if seed != 0: + gen_request.seed = seed + if num_images > 1: + gen_request.num_images = num_images + + # Execute the operation for generation mode + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=IdeogramV3Request, + response_model=IdeogramGenerateResponse, + ), + request=gen_request, + auth_token=auth_token, + ) + + # Execute the operation and process response + response = operation.execute() + + if not response.data or len(response.data) == 0: + raise Exception("No images were generated in the response") + + image_urls = [image_data.url for image_data in response.data if image_data.url] + + if not image_urls: + raise Exception("No image URLs were generated in the response") + + return (download_and_process_images(image_urls),) + + +NODE_CLASS_MAPPINGS = { + "IdeogramV1": IdeogramV1, + "IdeogramV2": IdeogramV2, + "IdeogramV3": IdeogramV3, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "IdeogramV1": "Ideogram V1", + "IdeogramV2": "Ideogram V2", + "IdeogramV3": "Ideogram V3", +} + diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py new file mode 100644 index 000000000..9aa8df58b --- /dev/null +++ b/comfy_api_nodes/nodes_kling.py @@ -0,0 +1,1563 @@ +"""Kling API Nodes + +For source of truth on the allowed permutations of request fields, please reference: +- [Compatibility Table](https://app.klingai.com/global/dev/document-api/apiReference/model/skillsMap) +""" + +from __future__ import annotations +from typing import Optional, TypeVar, Any +import math +import logging + +import torch + +from comfy_api_nodes.apis import ( + KlingTaskStatus, + KlingCameraControl, + KlingCameraConfig, + KlingCameraControlType, + KlingVideoGenDuration, + KlingVideoGenMode, + KlingVideoGenAspectRatio, + KlingVideoGenModelName, + KlingText2VideoRequest, + KlingText2VideoResponse, + KlingImage2VideoRequest, + KlingImage2VideoResponse, + KlingVideoExtendRequest, + KlingVideoExtendResponse, + KlingLipSyncVoiceLanguage, + KlingLipSyncInputObject, + KlingLipSyncRequest, + KlingLipSyncResponse, + KlingVirtualTryOnModelName, + KlingVirtualTryOnRequest, + KlingVirtualTryOnResponse, + KlingVideoResult, + KlingImageResult, + KlingImageGenerationsRequest, + KlingImageGenerationsResponse, + KlingImageGenImageReferenceType, + KlingImageGenModelName, + KlingImageGenAspectRatio, + KlingVideoEffectsRequest, + KlingVideoEffectsResponse, + KlingDualCharacterEffectsScene, + KlingSingleImageEffectsScene, + KlingDualCharacterEffectInput, + KlingSingleImageEffectInput, + KlingCharacterEffectModelName, + KlingSingleImageEffectModelName, +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, + EmptyRequest, +) +from comfy_api_nodes.apinode_utils import ( + tensor_to_base64_string, + download_url_to_video_output, + upload_video_to_comfyapi, + upload_audio_to_comfyapi, + download_url_to_image_tensor, +) +from comfy_api_nodes.mapper_utils import model_field_to_node_input +from comfy_api.input.basic_types import AudioInput +from comfy_api.input.video_types import VideoInput +from comfy_api.input_impl import VideoFromFile +from comfy.comfy_types.node_typing import IO, InputTypeOptions, ComfyNodeABC + +KLING_API_VERSION = "v1" +PATH_TEXT_TO_VIDEO = f"/proxy/kling/{KLING_API_VERSION}/videos/text2video" +PATH_IMAGE_TO_VIDEO = f"/proxy/kling/{KLING_API_VERSION}/videos/image2video" +PATH_VIDEO_EXTEND = f"/proxy/kling/{KLING_API_VERSION}/videos/video-extend" +PATH_LIP_SYNC = f"/proxy/kling/{KLING_API_VERSION}/videos/lip-sync" +PATH_VIDEO_EFFECTS = f"/proxy/kling/{KLING_API_VERSION}/videos/effects" +PATH_CHARACTER_IMAGE = f"/proxy/kling/{KLING_API_VERSION}/images/generations" +PATH_VIRTUAL_TRY_ON = f"/proxy/kling/{KLING_API_VERSION}/images/kolors-virtual-try-on" +PATH_IMAGE_GENERATIONS = f"/proxy/kling/{KLING_API_VERSION}/images/generations" + + +MAX_PROMPT_LENGTH_T2V = 2500 +MAX_PROMPT_LENGTH_I2V = 500 +MAX_PROMPT_LENGTH_IMAGE_GEN = 500 +MAX_NEGATIVE_PROMPT_LENGTH_IMAGE_GEN = 200 +MAX_PROMPT_LENGTH_LIP_SYNC = 120 + +R = TypeVar("R") + + +class KlingApiError(Exception): + """Base exception for Kling API errors.""" + + pass + + +def poll_until_finished(auth_token: str, api_endpoint: ApiEndpoint[Any, R]) -> R: + """Polls the Kling API endpoint until the task reaches a terminal state, then returns the response.""" + return PollingOperation( + poll_endpoint=api_endpoint, + completed_statuses=[ + KlingTaskStatus.succeed.value, + ], + failed_statuses=[KlingTaskStatus.failed.value], + status_extractor=lambda response: ( + response.data.task_status.value + if response.data and response.data.task_status + else None + ), + auth_token=auth_token, + ).execute() + + +def is_valid_camera_control_configs(configs: list[float]) -> bool: + """Verifies that at least one camera control configuration is non-zero.""" + return any(not math.isclose(value, 0.0) for value in configs) + + +def is_valid_prompt(prompt: str) -> bool: + """Verifies that the prompt is not empty.""" + return bool(prompt) + + +def is_valid_task_creation_response(response: KlingText2VideoResponse) -> bool: + """Verifies that the initial response contains a task ID.""" + return bool(response.data.task_id) + + +def is_valid_video_response(response: KlingText2VideoResponse) -> bool: + """Verifies that the response contains a task result with at least one video.""" + return ( + response.data is not None + and response.data.task_result is not None + and response.data.task_result.videos is not None + and len(response.data.task_result.videos) > 0 + ) + + +def is_valid_image_response(response: KlingVirtualTryOnResponse) -> bool: + """Verifies that the response contains a task result with at least one image.""" + return ( + response.data is not None + and response.data.task_result is not None + and response.data.task_result.images is not None + and len(response.data.task_result.images) > 0 + ) + + +def validate_prompts(prompt: str, negative_prompt: str, max_length: int) -> bool: + """Verifies that the positive prompt is not empty and that neither promt is too long.""" + if not prompt: + raise ValueError("Positive prompt is empty") + if len(prompt) > max_length: + raise ValueError(f"Positive prompt is too long: {len(prompt)} characters") + if negative_prompt and len(negative_prompt) > max_length: + raise ValueError( + f"Negative prompt is too long: {len(negative_prompt)} characters" + ) + return True + + +def validate_task_creation_response(response) -> None: + """Validates that the Kling task creation request was successful.""" + if not is_valid_task_creation_response(response): + error_msg = f"Kling initial request failed. Code: {response.code}, Message: {response.message}, Data: {response.data}" + logging.error(error_msg) + raise KlingApiError(error_msg) + + +def validate_video_result_response(response) -> None: + """Validates that the Kling task result contains a video.""" + if not is_valid_video_response(response): + error_msg = f"Kling task {response.data.task_id} succeeded but no video data found in response." + logging.error(f"Error: {error_msg}.\nResponse: {response}") + raise KlingApiError(error_msg) + + +def validate_image_result_response(response) -> None: + """Validates that the Kling task result contains an image.""" + if not is_valid_image_response(response): + error_msg = f"Kling task {response.data.task_id} succeeded but no image data found in response." + logging.error(f"Error: {error_msg}.\nResponse: {response}") + raise KlingApiError(error_msg) + + +def get_camera_control_input_config( + tooltip: str, default: float = 0.0 +) -> tuple[IO, InputTypeOptions]: + """Returns common InputTypeOptions for Kling camera control configurations.""" + input_config = { + "default": default, + "min": -10.0, + "max": 10.0, + "step": 0.25, + "display": "slider", + "tooltip": tooltip, + } + return IO.FLOAT, input_config + + +def get_video_from_response(response) -> KlingVideoResult: + """Returns the first video object from the Kling video generation task result.""" + video = response.data.task_result.videos[0] + logging.info( + "Kling task %s succeeded. Video URL: %s", response.data.task_id, video.url + ) + return video + + +def get_images_from_response(response) -> list[KlingImageResult]: + images = response.data.task_result.images + logging.info("Kling task %s succeeded. Images: %s", response.data.task_id, images) + return images + + +def video_result_to_node_output( + video: KlingVideoResult, +) -> tuple[VideoFromFile, str, str]: + """Converts a KlingVideoResult to a tuple of (VideoFromFile, str, str) to be used as a ComfyUI node output.""" + return ( + download_url_to_video_output(video.url), + str(video.id), + str(video.duration), + ) + + +def image_result_to_node_output( + images: list[KlingImageResult], +) -> torch.Tensor: + """ + Converts a KlingImageResult to a tuple containing a [B, H, W, C] tensor. + If multiple images are returned, they will be stacked along the batch dimension. + """ + if len(images) == 1: + return download_url_to_image_tensor(images[0].url) + else: + return torch.cat([download_url_to_image_tensor(image.url) for image in images]) + + +class KlingNodeBase(ComfyNodeABC): + """Base class for Kling nodes.""" + + FUNCTION = "api_call" + CATEGORY = "api node/video/Kling" + API_NODE = True + + +class KlingCameraControls(KlingNodeBase): + """Kling Camera Controls Node""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "camera_control_type": model_field_to_node_input( + IO.COMBO, + KlingCameraControl, + "type", + enum_type=KlingCameraControlType, + ), + "horizontal_movement": get_camera_control_input_config( + "Controls camera's movement along horizontal axis (x-axis). Negative indicates left, positive indicates right" + ), + "vertical_movement": get_camera_control_input_config( + "Controls camera's movement along vertical axis (y-axis). Negative indicates downward, positive indicates upward." + ), + "pan": get_camera_control_input_config( + "Controls camera's rotation in vertical plane (x-axis). Negative indicates downward rotation, positive indicates upward rotation.", + default=0.5, + ), + "tilt": get_camera_control_input_config( + "Controls camera's rotation in horizontal plane (y-axis). Negative indicates left rotation, positive indicates right rotation.", + ), + "roll": get_camera_control_input_config( + "Controls camera's rolling amount (z-axis). Negative indicates counterclockwise, positive indicates clockwise.", + ), + "zoom": get_camera_control_input_config( + "Controls change in camera's focal length. Negative indicates narrower field of view, positive indicates wider field of view.", + ), + } + } + + DESCRIPTION = "Allows specifying configuration options for Kling Camera Controls and motion control effects." + RETURN_TYPES = ("CAMERA_CONTROL",) + RETURN_NAMES = ("camera_control",) + FUNCTION = "main" + + @classmethod + def VALIDATE_INPUTS( + cls, + horizontal_movement: float, + vertical_movement: float, + pan: float, + tilt: float, + roll: float, + zoom: float, + ) -> bool | str: + if not is_valid_camera_control_configs( + [ + horizontal_movement, + vertical_movement, + pan, + tilt, + roll, + zoom, + ] + ): + return "Invalid camera control configs: at least one of the values must be non-zero" + return True + + def main( + self, + camera_control_type: str, + horizontal_movement: float, + vertical_movement: float, + pan: float, + tilt: float, + roll: float, + zoom: float, + ) -> tuple[KlingCameraControl]: + return ( + KlingCameraControl( + type=KlingCameraControlType(camera_control_type), + config=KlingCameraConfig( + horizontal=horizontal_movement, + vertical=vertical_movement, + pan=pan, + roll=roll, + tilt=tilt, + zoom=zoom, + ), + ), + ) + + +class KlingTextToVideoNode(KlingNodeBase): + """Kling Text to Video Node""" + + @staticmethod + def get_mode_string_mapping() -> dict[str, tuple[str, str, str]]: + """ + Returns a mapping of mode strings to their corresponding (mode, duration, model_name) tuples. + Only includes config combos that support the `image_tail` request field. + + See: [Kling API Docs Capability Map](https://app.klingai.com/global/dev/document-api/apiReference/model/skillsMap) + """ + return { + "standard mode / 5s duration / kling-v1": ("std", "5", "kling-v1"), + "standard mode / 10s duration / kling-v1": ("std", "10", "kling-v1"), + "pro mode / 5s duration / kling-v1": ("pro", "5", "kling-v1"), + "pro mode / 10s duration / kling-v1": ("pro", "10", "kling-v1"), + "standard mode / 5s duration / kling-v1-6": ("std", "5", "kling-v1-6"), + "standard mode / 10s duration / kling-v1-6": ("std", "10", "kling-v1-6"), + "pro mode / 5s duration / kling-v2-master": ("pro", "5", "kling-v2-master"), + "pro mode / 10s duration / kling-v2-master": ("pro", "10", "kling-v2-master"), + "standard mode / 5s duration / kling-v2-master": ("std", "5", "kling-v2-master"), + "standard mode / 10s duration / kling-v2-master": ("std", "10", "kling-v2-master"), + } + + @classmethod + def INPUT_TYPES(s): + modes = list(KlingTextToVideoNode.get_mode_string_mapping().keys()) + return { + "required": { + "prompt": model_field_to_node_input( + IO.STRING, KlingText2VideoRequest, "prompt", multiline=True + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, KlingText2VideoRequest, "negative_prompt", multiline=True + ), + "cfg_scale": model_field_to_node_input( + IO.FLOAT, + KlingText2VideoRequest, + "cfg_scale", + default=1.0, + min=0.0, + max=1.0, + ), + "aspect_ratio": model_field_to_node_input( + IO.COMBO, + KlingText2VideoRequest, + "aspect_ratio", + enum_type=KlingVideoGenAspectRatio, + ), + "mode": ( + modes, + { + "default": modes[4], + "tooltip": "The configuration to use for the video generation following the format: mode / duration / model_name.", + }, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + RETURN_TYPES = ("VIDEO", "STRING", "STRING") + RETURN_NAMES = ("VIDEO", "video_id", "duration") + DESCRIPTION = "Kling Text to Video Node" + + def get_response(self, task_id: str, auth_token: str) -> KlingText2VideoResponse: + return poll_until_finished( + auth_token, + ApiEndpoint( + path=f"{PATH_TEXT_TO_VIDEO}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=KlingText2VideoResponse, + ), + ) + + def api_call( + self, + prompt: str, + negative_prompt: str, + cfg_scale: float, + mode: str, + aspect_ratio: str, + camera_control: Optional[KlingCameraControl] = None, + model_name: Optional[str] = None, + duration: Optional[str] = None, + auth_token: Optional[str] = None, + ) -> tuple[VideoFromFile, str, str]: + validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) + if model_name is None: + mode, duration, model_name = self.get_mode_string_mapping()[mode] + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_TEXT_TO_VIDEO, + method=HttpMethod.POST, + request_model=KlingText2VideoRequest, + response_model=KlingText2VideoResponse, + ), + request=KlingText2VideoRequest( + prompt=prompt if prompt else None, + negative_prompt=negative_prompt if negative_prompt else None, + duration=KlingVideoGenDuration(duration), + mode=KlingVideoGenMode(mode), + model_name=KlingVideoGenModelName(model_name), + cfg_scale=cfg_scale, + aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio), + camera_control=camera_control, + ), + auth_token=auth_token, + ) + + task_creation_response = initial_operation.execute() + validate_task_creation_response(task_creation_response) + + task_id = task_creation_response.data.task_id + final_response = self.get_response(task_id, auth_token) + validate_video_result_response(final_response) + + video = get_video_from_response(final_response) + return video_result_to_node_output(video) + + +class KlingCameraControlT2VNode(KlingTextToVideoNode): + """ + Kling Text to Video Camera Control Node. This node is a text to video node, but it supports controlling the camera. + Duration, mode, and model_name request fields are hard-coded because camera control is only supported in pro mode with the kling-v1-5 model at 5s duration as of 2025-05-02. + """ + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": model_field_to_node_input( + IO.STRING, KlingText2VideoRequest, "prompt", multiline=True + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, + KlingText2VideoRequest, + "negative_prompt", + multiline=True, + ), + "cfg_scale": model_field_to_node_input( + IO.FLOAT, + KlingText2VideoRequest, + "cfg_scale", + default=0.75, + min=0.0, + max=1.0, + ), + "aspect_ratio": model_field_to_node_input( + IO.COMBO, + KlingText2VideoRequest, + "aspect_ratio", + enum_type=KlingVideoGenAspectRatio, + ), + "camera_control": ( + "CAMERA_CONTROL", + { + "tooltip": "Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation.", + }, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + DESCRIPTION = "Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text." + + def api_call( + self, + prompt: str, + negative_prompt: str, + cfg_scale: float, + aspect_ratio: str, + camera_control: Optional[KlingCameraControl] = None, + auth_token: Optional[str] = None, + ): + return super().api_call( + model_name=KlingVideoGenModelName.kling_v1, + cfg_scale=cfg_scale, + mode=KlingVideoGenMode.std, + aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio), + duration=KlingVideoGenDuration.field_5, + prompt=prompt, + negative_prompt=negative_prompt, + camera_control=camera_control, + auth_token=auth_token, + ) + + +class KlingImage2VideoNode(KlingNodeBase): + """Kling Image to Video Node""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "start_frame": model_field_to_node_input( + IO.IMAGE, KlingImage2VideoRequest, "image" + ), + "prompt": model_field_to_node_input( + IO.STRING, KlingImage2VideoRequest, "prompt", multiline=True + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, + KlingImage2VideoRequest, + "negative_prompt", + multiline=True, + ), + "model_name": model_field_to_node_input( + IO.COMBO, + KlingImage2VideoRequest, + "model_name", + enum_type=KlingVideoGenModelName, + ), + "cfg_scale": model_field_to_node_input( + IO.FLOAT, + KlingImage2VideoRequest, + "cfg_scale", + default=0.8, + min=0.0, + max=1.0, + ), + "mode": model_field_to_node_input( + IO.COMBO, + KlingImage2VideoRequest, + "mode", + enum_type=KlingVideoGenMode, + ), + "aspect_ratio": model_field_to_node_input( + IO.COMBO, + KlingImage2VideoRequest, + "aspect_ratio", + enum_type=KlingVideoGenAspectRatio, + ), + "duration": model_field_to_node_input( + IO.COMBO, + KlingImage2VideoRequest, + "duration", + enum_type=KlingVideoGenDuration, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + RETURN_TYPES = ("VIDEO", "STRING", "STRING") + RETURN_NAMES = ("VIDEO", "video_id", "duration") + DESCRIPTION = "Kling Image to Video Node" + + def get_response(self, task_id: str, auth_token: str) -> KlingImage2VideoResponse: + return poll_until_finished( + auth_token, + ApiEndpoint( + path=f"{PATH_IMAGE_TO_VIDEO}/{task_id}", + method=HttpMethod.GET, + request_model=KlingImage2VideoRequest, + response_model=KlingImage2VideoResponse, + ), + ) + + def api_call( + self, + start_frame: torch.Tensor, + prompt: str, + negative_prompt: str, + model_name: str, + cfg_scale: float, + mode: str, + aspect_ratio: str, + duration: str, + camera_control: Optional[KlingCameraControl] = None, + end_frame: Optional[torch.Tensor] = None, + auth_token: Optional[str] = None, + ) -> tuple[VideoFromFile]: + validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V) + + if camera_control is not None: + # Camera control type for image 2 video is always simple + camera_control.type = KlingCameraControlType.simple + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_IMAGE_TO_VIDEO, + method=HttpMethod.POST, + request_model=KlingImage2VideoRequest, + response_model=KlingImage2VideoResponse, + ), + request=KlingImage2VideoRequest( + model_name=KlingVideoGenModelName(model_name), + image=tensor_to_base64_string(start_frame), + image_tail=( + tensor_to_base64_string(end_frame) + if end_frame is not None + else None + ), + prompt=prompt, + negative_prompt=negative_prompt if negative_prompt else None, + cfg_scale=cfg_scale, + mode=KlingVideoGenMode(mode), + aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio), + duration=KlingVideoGenDuration(duration), + camera_control=camera_control, + ), + auth_token=auth_token, + ) + + task_creation_response = initial_operation.execute() + validate_task_creation_response(task_creation_response) + task_id = task_creation_response.data.task_id + + final_response = self.get_response(task_id, auth_token) + validate_video_result_response(final_response) + + video = get_video_from_response(final_response) + return video_result_to_node_output(video) + + +class KlingCameraControlI2VNode(KlingImage2VideoNode): + """ + Kling Image to Video Camera Control Node. This node is a image to video node, but it supports controlling the camera. + Duration, mode, and model_name request fields are hard-coded because camera control is only supported in pro mode with the kling-v1-5 model at 5s duration as of 2025-05-02. + """ + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "start_frame": model_field_to_node_input( + IO.IMAGE, KlingImage2VideoRequest, "image" + ), + "prompt": model_field_to_node_input( + IO.STRING, KlingImage2VideoRequest, "prompt", multiline=True + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, + KlingImage2VideoRequest, + "negative_prompt", + multiline=True, + ), + "cfg_scale": model_field_to_node_input( + IO.FLOAT, + KlingImage2VideoRequest, + "cfg_scale", + default=0.75, + min=0.0, + max=1.0, + ), + "aspect_ratio": model_field_to_node_input( + IO.COMBO, + KlingImage2VideoRequest, + "aspect_ratio", + enum_type=KlingVideoGenAspectRatio, + ), + "camera_control": ( + "CAMERA_CONTROL", + { + "tooltip": "Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation.", + }, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + DESCRIPTION = "Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image." + + def api_call( + self, + start_frame: torch.Tensor, + prompt: str, + negative_prompt: str, + cfg_scale: float, + aspect_ratio: str, + camera_control: KlingCameraControl, + auth_token: Optional[str] = None, + ): + return super().api_call( + model_name=KlingVideoGenModelName.kling_v1_5, + start_frame=start_frame, + cfg_scale=cfg_scale, + mode=KlingVideoGenMode.pro, + aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio), + duration=KlingVideoGenDuration.field_5, + prompt=prompt, + negative_prompt=negative_prompt, + camera_control=camera_control, + auth_token=auth_token, + ) + + +class KlingStartEndFrameNode(KlingImage2VideoNode): + """ + Kling First Last Frame Node. This node allows creation of a video from a first and last frame. It calls the normal image to video endpoint, but only allows the subset of input options that support the `image_tail` request field. + """ + + @staticmethod + def get_mode_string_mapping() -> dict[str, tuple[str, str, str]]: + """ + Returns a mapping of mode strings to their corresponding (mode, duration, model_name) tuples. + Only includes config combos that support the `image_tail` request field. + + See: [Kling API Docs Capability Map](https://app.klingai.com/global/dev/document-api/apiReference/model/skillsMap) + """ + return { + "standard mode / 5s duration / kling-v1": ("std", "5", "kling-v1"), + "pro mode / 5s duration / kling-v1": ("pro", "5", "kling-v1"), + "pro mode / 5s duration / kling-v1-5": ("pro", "5", "kling-v1-5"), + "pro mode / 10s duration / kling-v1-5": ("pro", "10", "kling-v1-5"), + "pro mode / 5s duration / kling-v1-6": ("pro", "5", "kling-v1-6"), + "pro mode / 10s duration / kling-v1-6": ("pro", "10", "kling-v1-6"), + } + + @classmethod + def INPUT_TYPES(s): + modes = list(KlingStartEndFrameNode.get_mode_string_mapping().keys()) + return { + "required": { + "start_frame": model_field_to_node_input( + IO.IMAGE, KlingImage2VideoRequest, "image" + ), + "end_frame": model_field_to_node_input( + IO.IMAGE, KlingImage2VideoRequest, "image_tail" + ), + "prompt": model_field_to_node_input( + IO.STRING, KlingImage2VideoRequest, "prompt", multiline=True + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, + KlingImage2VideoRequest, + "negative_prompt", + multiline=True, + ), + "cfg_scale": model_field_to_node_input( + IO.FLOAT, + KlingImage2VideoRequest, + "cfg_scale", + default=0.5, + min=0.0, + max=1.0, + ), + "aspect_ratio": model_field_to_node_input( + IO.COMBO, + KlingImage2VideoRequest, + "aspect_ratio", + enum_type=KlingVideoGenAspectRatio, + ), + "mode": ( + modes, + { + "default": modes[2], + "tooltip": "The configuration to use for the video generation following the format: mode / duration / model_name.", + }, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + DESCRIPTION = "Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last." + + def api_call( + self, + start_frame: torch.Tensor, + end_frame: torch.Tensor, + prompt: str, + negative_prompt: str, + cfg_scale: float, + aspect_ratio: str, + mode: str, + auth_token: Optional[str] = None, + ): + mode, duration, model_name = KlingStartEndFrameNode.get_mode_string_mapping()[ + mode + ] + return super().api_call( + prompt=prompt, + negative_prompt=negative_prompt, + model_name=model_name, + start_frame=start_frame, + cfg_scale=cfg_scale, + mode=mode, + aspect_ratio=aspect_ratio, + duration=duration, + end_frame=end_frame, + auth_token=auth_token, + ) + + +class KlingVideoExtendNode(KlingNodeBase): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": model_field_to_node_input( + IO.STRING, KlingVideoExtendRequest, "prompt", multiline=True + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, + KlingVideoExtendRequest, + "negative_prompt", + multiline=True, + ), + "cfg_scale": model_field_to_node_input( + IO.FLOAT, + KlingVideoExtendRequest, + "cfg_scale", + default=0.5, + min=0.0, + max=1.0, + ), + "video_id": model_field_to_node_input( + IO.STRING, KlingVideoExtendRequest, "video_id", forceInput=True + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + RETURN_TYPES = ("VIDEO", "STRING", "STRING") + RETURN_NAMES = ("VIDEO", "video_id", "duration") + DESCRIPTION = "Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes." + + def get_response(self, task_id: str, auth_token: str) -> KlingVideoExtendResponse: + return poll_until_finished( + auth_token, + ApiEndpoint( + path=f"{PATH_VIDEO_EXTEND}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=KlingVideoExtendResponse, + ), + ) + + def api_call( + self, + prompt: str, + negative_prompt: str, + cfg_scale: float, + video_id: str, + auth_token: Optional[str] = None, + ) -> tuple[VideoFromFile, str, str]: + validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_VIDEO_EXTEND, + method=HttpMethod.POST, + request_model=KlingVideoExtendRequest, + response_model=KlingVideoExtendResponse, + ), + request=KlingVideoExtendRequest( + prompt=prompt if prompt else None, + negative_prompt=negative_prompt if negative_prompt else None, + cfg_scale=cfg_scale, + video_id=video_id, + ), + auth_token=auth_token, + ) + + task_creation_response = initial_operation.execute() + validate_task_creation_response(task_creation_response) + task_id = task_creation_response.data.task_id + + final_response = self.get_response(task_id, auth_token) + validate_video_result_response(final_response) + + video = get_video_from_response(final_response) + return video_result_to_node_output(video) + + +class KlingVideoEffectsBase(KlingNodeBase): + """Kling Video Effects Base""" + + RETURN_TYPES = ("VIDEO", "STRING", "STRING") + RETURN_NAMES = ("VIDEO", "video_id", "duration") + + def get_response(self, task_id: str, auth_token: str) -> KlingVideoEffectsResponse: + return poll_until_finished( + auth_token, + ApiEndpoint( + path=f"{PATH_VIDEO_EFFECTS}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=KlingVideoEffectsResponse, + ), + ) + + def api_call( + self, + dual_character: bool, + effect_scene: KlingDualCharacterEffectsScene | KlingSingleImageEffectsScene, + model_name: str, + duration: KlingVideoGenDuration, + image_1: torch.Tensor, + image_2: Optional[torch.Tensor] = None, + mode: Optional[KlingVideoGenMode] = None, + auth_token: Optional[str] = None, + ): + if dual_character: + request_input_field = KlingDualCharacterEffectInput( + model_name=model_name, + mode=mode, + images=[ + tensor_to_base64_string(image_1), + tensor_to_base64_string(image_2), + ], + duration=duration, + ) + else: + request_input_field = KlingSingleImageEffectInput( + model_name=model_name, + image=tensor_to_base64_string(image_1), + duration=duration, + ) + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_VIDEO_EFFECTS, + method=HttpMethod.POST, + request_model=KlingVideoEffectsRequest, + response_model=KlingVideoEffectsResponse, + ), + request=KlingVideoEffectsRequest( + effect_scene=effect_scene, + input=request_input_field, + ), + auth_token=auth_token, + ) + + task_creation_response = initial_operation.execute() + validate_task_creation_response(task_creation_response) + task_id = task_creation_response.data.task_id + + final_response = self.get_response(task_id, auth_token) + validate_video_result_response(final_response) + + video = get_video_from_response(final_response) + return video_result_to_node_output(video) + + +class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase): + """Kling Dual Character Video Effect Node""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image_left": (IO.IMAGE, {"tooltip": "Left side image"}), + "image_right": (IO.IMAGE, {"tooltip": "Right side image"}), + "effect_scene": model_field_to_node_input( + IO.COMBO, + KlingVideoEffectsRequest, + "effect_scene", + enum_type=KlingDualCharacterEffectsScene, + ), + "model_name": model_field_to_node_input( + IO.COMBO, + KlingDualCharacterEffectInput, + "model_name", + enum_type=KlingCharacterEffectModelName, + ), + "mode": model_field_to_node_input( + IO.COMBO, + KlingDualCharacterEffectInput, + "mode", + enum_type=KlingVideoGenMode, + ), + "duration": model_field_to_node_input( + IO.COMBO, + KlingDualCharacterEffectInput, + "duration", + enum_type=KlingVideoGenDuration, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + DESCRIPTION = "Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite." + RETURN_TYPES = ("VIDEO", "STRING") + RETURN_NAMES = ("VIDEO", "duration") + + def api_call( + self, + image_left: torch.Tensor, + image_right: torch.Tensor, + effect_scene: KlingDualCharacterEffectsScene, + model_name: KlingCharacterEffectModelName, + mode: KlingVideoGenMode, + duration: KlingVideoGenDuration, + auth_token: Optional[str] = None, + ): + video, _, duration = super().api_call( + dual_character=True, + effect_scene=effect_scene, + model_name=model_name, + mode=mode, + duration=duration, + image_1=image_left, + image_2=image_right, + auth_token=auth_token, + ) + return video, duration + +class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase): + """Kling Single Image Video Effect Node""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ( + IO.IMAGE, + { + "tooltip": " Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1" + }, + ), + "effect_scene": model_field_to_node_input( + IO.COMBO, + KlingVideoEffectsRequest, + "effect_scene", + enum_type=KlingSingleImageEffectsScene, + ), + "model_name": model_field_to_node_input( + IO.COMBO, + KlingSingleImageEffectInput, + "model_name", + enum_type=KlingSingleImageEffectModelName, + ), + "duration": model_field_to_node_input( + IO.COMBO, + KlingSingleImageEffectInput, + "duration", + enum_type=KlingVideoGenDuration, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + DESCRIPTION = "Achieve different special effects when generating a video based on the effect_scene." + + def api_call( + self, + image: torch.Tensor, + effect_scene: KlingSingleImageEffectsScene, + model_name: KlingSingleImageEffectModelName, + duration: KlingVideoGenDuration, + auth_token: Optional[str] = None, + ): + return super().api_call( + dual_character=False, + effect_scene=effect_scene, + model_name=model_name, + duration=duration, + image_1=image, + auth_token=auth_token, + ) + + +class KlingLipSyncBase(KlingNodeBase): + """Kling Lip Sync Base""" + + RETURN_TYPES = ("VIDEO", "STRING", "STRING") + RETURN_NAMES = ("VIDEO", "video_id", "duration") + + def validate_text(self, text: str): + if not text: + raise ValueError("Text is required") + if len(text) > MAX_PROMPT_LENGTH_LIP_SYNC: + raise ValueError( + f"Text is too long. Maximum length is {MAX_PROMPT_LENGTH_LIP_SYNC} characters." + ) + + def get_response(self, task_id: str, auth_token: str) -> KlingLipSyncResponse: + """Polls the Kling API endpoint until the task reaches a terminal state.""" + return poll_until_finished( + auth_token, + ApiEndpoint( + path=f"{PATH_LIP_SYNC}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=KlingLipSyncResponse, + ), + ) + + def api_call( + self, + video: VideoInput, + audio: Optional[AudioInput] = None, + voice_language: Optional[str] = None, + mode: Optional[str] = None, + text: Optional[str] = None, + voice_speed: Optional[float] = None, + voice_id: Optional[str] = None, + auth_token: Optional[str] = None, + ) -> tuple[VideoFromFile, str, str]: + if text: + self.validate_text(text) + + # Upload video to Comfy API and get download URL + video_url = upload_video_to_comfyapi(video, auth_token) + logging.info("Uploaded video to Comfy API. URL: %s", video_url) + + # Upload the audio file to Comfy API and get download URL + if audio: + audio_url = upload_audio_to_comfyapi(audio, auth_token) + logging.info("Uploaded audio to Comfy API. URL: %s", audio_url) + else: + audio_url = None + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_LIP_SYNC, + method=HttpMethod.POST, + request_model=KlingLipSyncRequest, + response_model=KlingLipSyncResponse, + ), + request=KlingLipSyncRequest( + input=KlingLipSyncInputObject( + video_url=video_url, + mode=mode, + text=text, + voice_language=voice_language, + voice_speed=voice_speed, + audio_type="url", + audio_url=audio_url, + voice_id=voice_id, + ), + ), + auth_token=auth_token, + ) + + task_creation_response = initial_operation.execute() + validate_task_creation_response(task_creation_response) + task_id = task_creation_response.data.task_id + + final_response = self.get_response(task_id, auth_token) + validate_video_result_response(final_response) + + video = get_video_from_response(final_response) + return video_result_to_node_output(video) + + +class KlingLipSyncAudioToVideoNode(KlingLipSyncBase): + """Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file.""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "video": (IO.VIDEO, {}), + "audio": (IO.AUDIO, {}), + "voice_language": model_field_to_node_input( + IO.COMBO, + KlingLipSyncInputObject, + "voice_language", + enum_type=KlingLipSyncVoiceLanguage, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + DESCRIPTION = "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file." + + def api_call( + self, + video: VideoInput, + audio: AudioInput, + voice_language: str, + auth_token: Optional[str] = None, + ): + return super().api_call( + video=video, + audio=audio, + voice_language=voice_language, + mode="audio2video", + auth_token=auth_token, + ) + + +class KlingLipSyncTextToVideoNode(KlingLipSyncBase): + """Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt.""" + + @staticmethod + def get_voice_config() -> dict[str, tuple[str, str]]: + return { + # English voices + "Melody": ("girlfriend_4_speech02", "en"), + "Sunny": ("genshin_vindi2", "en"), + "Sage": ("zhinen_xuesheng", "en"), + "Ace": ("AOT", "en"), + "Blossom": ("ai_shatang", "en"), + "Peppy": ("genshin_klee2", "en"), + "Dove": ("genshin_kirara", "en"), + "Shine": ("ai_kaiya", "en"), + "Anchor": ("oversea_male1", "en"), + "Lyric": ("ai_chenjiahao_712", "en"), + "Tender": ("chat1_female_new-3", "en"), + "Siren": ("chat_0407_5-1", "en"), + "Zippy": ("cartoon-boy-07", "en"), + "Bud": ("uk_boy1", "en"), + "Sprite": ("cartoon-girl-01", "en"), + "Candy": ("PeppaPig_platform", "en"), + "Beacon": ("ai_huangzhong_712", "en"), + "Rock": ("ai_huangyaoshi_712", "en"), + "Titan": ("ai_laoguowang_712", "en"), + "Grace": ("chengshu_jiejie", "en"), + "Helen": ("you_pingjing", "en"), + "Lore": ("calm_story1", "en"), + "Crag": ("uk_man2", "en"), + "Prattle": ("laopopo_speech02", "en"), + "Hearth": ("heainainai_speech02", "en"), + "The Reader": ("reader_en_m-v1", "en"), + "Commercial Lady": ("commercial_lady_en_f-v1", "en"), + # Chinese voices + "阳光少年": ("genshin_vindi2", "zh"), + "懂事小弟": ("zhinen_xuesheng", "zh"), + "运动少年": ("tiyuxi_xuedi", "zh"), + "青春少女": ("ai_shatang", "zh"), + "温柔小妹": ("genshin_klee2", "zh"), + "元气少女": ("genshin_kirara", "zh"), + "阳光男生": ("ai_kaiya", "zh"), + "幽默小哥": ("tiexin_nanyou", "zh"), + "文艺小哥": ("ai_chenjiahao_712", "zh"), + "甜美邻家": ("girlfriend_1_speech02", "zh"), + "温柔姐姐": ("chat1_female_new-3", "zh"), + "职场女青": ("girlfriend_2_speech02", "zh"), + "活泼男童": ("cartoon-boy-07", "zh"), + "俏皮女童": ("cartoon-girl-01", "zh"), + "稳重老爸": ("ai_huangyaoshi_712", "zh"), + "温柔妈妈": ("you_pingjing", "zh"), + "严肃上司": ("ai_laoguowang_712", "zh"), + "优雅贵妇": ("chengshu_jiejie", "zh"), + "慈祥爷爷": ("zhuxi_speech02", "zh"), + "唠叨爷爷": ("uk_oldman3", "zh"), + "唠叨奶奶": ("laopopo_speech02", "zh"), + "和蔼奶奶": ("heainainai_speech02", "zh"), + "东北老铁": ("dongbeilaotie_speech02", "zh"), + "重庆小伙": ("chongqingxiaohuo_speech02", "zh"), + "四川妹子": ("chuanmeizi_speech02", "zh"), + "潮汕大叔": ("chaoshandashu_speech02", "zh"), + "台湾男生": ("ai_taiwan_man2_speech02", "zh"), + "西安掌柜": ("xianzhanggui_speech02", "zh"), + "天津姐姐": ("tianjinjiejie_speech02", "zh"), + "新闻播报男": ("diyinnansang_DB_CN_M_04-v2", "zh"), + "译制片男": ("yizhipiannan-v1", "zh"), + "撒娇女友": ("tianmeixuemei-v1", "zh"), + "刀片烟嗓": ("daopianyansang-v1", "zh"), + "乖巧正太": ("mengwa-v1", "zh"), + } + + @classmethod + def INPUT_TYPES(s): + voice_options = list(s.get_voice_config().keys()) + return { + "required": { + "video": (IO.VIDEO, {}), + "text": model_field_to_node_input( + IO.STRING, KlingLipSyncInputObject, "text", multiline=True + ), + "voice": (voice_options, {"default": voice_options[0]}), + "voice_speed": model_field_to_node_input( + IO.FLOAT, KlingLipSyncInputObject, "voice_speed", slider=True + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + DESCRIPTION = "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt." + + def api_call( + self, + video: VideoInput, + text: str, + voice: str, + voice_speed: float, + auth_token: Optional[str] = None, + ): + voice_id, voice_language = KlingLipSyncTextToVideoNode.get_voice_config()[voice] + return super().api_call( + video=video, + text=text, + voice_language=voice_language, + voice_id=voice_id, + voice_speed=voice_speed, + mode="text2video", + auth_token=auth_token, + ) + + +class KlingImageGenerationBase(KlingNodeBase): + """Kling Image Generation Base Node.""" + + RETURN_TYPES = ("IMAGE",) + CATEGORY = "api node/image/Kling" + + def validate_prompt(self, prompt: str, negative_prompt: Optional[str] = None): + if not prompt or len(prompt) > MAX_PROMPT_LENGTH_IMAGE_GEN: + raise ValueError( + f"Prompt must be less than {MAX_PROMPT_LENGTH_IMAGE_GEN} characters" + ) + if negative_prompt and len(negative_prompt) > MAX_PROMPT_LENGTH_IMAGE_GEN: + raise ValueError( + f"Negative prompt must be less than {MAX_PROMPT_LENGTH_IMAGE_GEN} characters" + ) + + +class KlingVirtualTryOnNode(KlingImageGenerationBase): + """Kling Virtual Try On Node.""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "human_image": (IO.IMAGE, {}), + "cloth_image": (IO.IMAGE, {}), + "model_name": model_field_to_node_input( + IO.COMBO, + KlingVirtualTryOnRequest, + "model_name", + enum_type=KlingVirtualTryOnModelName, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + DESCRIPTION = "Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human." + + def get_response( + self, task_id: str, auth_token: Optional[str] = None + ) -> KlingVirtualTryOnResponse: + return poll_until_finished( + auth_token, + ApiEndpoint( + path=f"{PATH_VIRTUAL_TRY_ON}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=KlingVirtualTryOnResponse, + ), + ) + + def api_call( + self, + human_image: torch.Tensor, + cloth_image: torch.Tensor, + model_name: KlingVirtualTryOnModelName, + auth_token: Optional[str] = None, + ): + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_VIRTUAL_TRY_ON, + method=HttpMethod.POST, + request_model=KlingVirtualTryOnRequest, + response_model=KlingVirtualTryOnResponse, + ), + request=KlingVirtualTryOnRequest( + human_image=tensor_to_base64_string(human_image), + cloth_image=tensor_to_base64_string(cloth_image), + model_name=model_name, + ), + auth_token=auth_token, + ) + + task_creation_response = initial_operation.execute() + validate_task_creation_response(task_creation_response) + task_id = task_creation_response.data.task_id + + final_response = self.get_response(task_id, auth_token) + validate_image_result_response(final_response) + + images = get_images_from_response(final_response) + return (image_result_to_node_output(images),) + + +class KlingImageGenerationNode(KlingImageGenerationBase): + """Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": model_field_to_node_input( + IO.STRING, + KlingImageGenerationsRequest, + "prompt", + multiline=True, + max_length=MAX_PROMPT_LENGTH_IMAGE_GEN, + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, + KlingImageGenerationsRequest, + "negative_prompt", + multiline=True, + ), + "image_type": model_field_to_node_input( + IO.COMBO, + KlingImageGenerationsRequest, + "image_reference", + enum_type=KlingImageGenImageReferenceType, + ), + "image_fidelity": model_field_to_node_input( + IO.FLOAT, + KlingImageGenerationsRequest, + "image_fidelity", + slider=True, + step=0.01, + ), + "human_fidelity": model_field_to_node_input( + IO.FLOAT, + KlingImageGenerationsRequest, + "human_fidelity", + slider=True, + step=0.01, + ), + "model_name": model_field_to_node_input( + IO.COMBO, + KlingImageGenerationsRequest, + "model_name", + enum_type=KlingImageGenModelName, + ), + "aspect_ratio": model_field_to_node_input( + IO.COMBO, + KlingImageGenerationsRequest, + "aspect_ratio", + enum_type=KlingImageGenAspectRatio, + ), + "n": model_field_to_node_input( + IO.INT, + KlingImageGenerationsRequest, + "n", + ), + }, + "optional": { + "image": (IO.IMAGE, {}), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + DESCRIPTION = "Kling Image Generation Node. Generate an image from a text prompt with an optional reference image." + + def get_response( + self, task_id: str, auth_token: Optional[str] = None + ) -> KlingImageGenerationsResponse: + return poll_until_finished( + auth_token, + ApiEndpoint( + path=f"{PATH_IMAGE_GENERATIONS}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=KlingImageGenerationsResponse, + ), + ) + + def api_call( + self, + model_name: KlingImageGenModelName, + prompt: str, + negative_prompt: str, + image_type: KlingImageGenImageReferenceType, + image_fidelity: float, + human_fidelity: float, + n: int, + aspect_ratio: KlingImageGenAspectRatio, + image: Optional[torch.Tensor] = None, + auth_token: Optional[str] = None, + ): + self.validate_prompt(prompt, negative_prompt) + + if image is not None: + image = tensor_to_base64_string(image) + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_IMAGE_GENERATIONS, + method=HttpMethod.POST, + request_model=KlingImageGenerationsRequest, + response_model=KlingImageGenerationsResponse, + ), + request=KlingImageGenerationsRequest( + model_name=model_name, + prompt=prompt, + negative_prompt=negative_prompt, + image=image, + image_reference=image_type, + image_fidelity=image_fidelity, + human_fidelity=human_fidelity, + n=n, + aspect_ratio=aspect_ratio, + ), + auth_token=auth_token, + ) + + task_creation_response = initial_operation.execute() + validate_task_creation_response(task_creation_response) + task_id = task_creation_response.data.task_id + + final_response = self.get_response(task_id, auth_token) + validate_image_result_response(final_response) + + images = get_images_from_response(final_response) + return (image_result_to_node_output(images),) + + +NODE_CLASS_MAPPINGS = { + "KlingCameraControls": KlingCameraControls, + "KlingTextToVideoNode": KlingTextToVideoNode, + "KlingImage2VideoNode": KlingImage2VideoNode, + "KlingCameraControlI2VNode": KlingCameraControlI2VNode, + "KlingCameraControlT2VNode": KlingCameraControlT2VNode, + "KlingStartEndFrameNode": KlingStartEndFrameNode, + "KlingVideoExtendNode": KlingVideoExtendNode, + "KlingLipSyncAudioToVideoNode": KlingLipSyncAudioToVideoNode, + "KlingLipSyncTextToVideoNode": KlingLipSyncTextToVideoNode, + "KlingVirtualTryOnNode": KlingVirtualTryOnNode, + "KlingImageGenerationNode": KlingImageGenerationNode, + "KlingSingleImageVideoEffectNode": KlingSingleImageVideoEffectNode, + "KlingDualCharacterVideoEffectNode": KlingDualCharacterVideoEffectNode, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "KlingCameraControls": "Kling Camera Controls", + "KlingTextToVideoNode": "Kling Text to Video", + "KlingImage2VideoNode": "Kling Image to Video", + "KlingCameraControlI2VNode": "Kling Image to Video (Camera Control)", + "KlingCameraControlT2VNode": "Kling Text to Video (Camera Control)", + "KlingStartEndFrameNode": "Kling Start-End Frame to Video", + "KlingVideoExtendNode": "Kling Video Extend", + "KlingLipSyncAudioToVideoNode": "Kling Lip Sync Video with Audio", + "KlingLipSyncTextToVideoNode": "Kling Lip Sync Video with Text", + "KlingVirtualTryOnNode": "Kling Virtual Try On", + "KlingImageGenerationNode": "Kling Image Generation", + "KlingSingleImageVideoEffectNode": "Kling Video Effects", + "KlingDualCharacterVideoEffectNode": "Kling Dual Character Video Effects", +} diff --git a/comfy_api_nodes/nodes_luma.py b/comfy_api_nodes/nodes_luma.py new file mode 100644 index 000000000..0f0d9aa80 --- /dev/null +++ b/comfy_api_nodes/nodes_luma.py @@ -0,0 +1,702 @@ +from inspect import cleandoc +from comfy.comfy_types.node_typing import IO, ComfyNodeABC +from comfy_api.input_impl.video_types import VideoFromFile +from comfy_api_nodes.apis.luma_api import ( + LumaImageModel, + LumaVideoModel, + LumaVideoOutputResolution, + LumaVideoModelOutputDuration, + LumaAspectRatio, + LumaState, + LumaImageGenerationRequest, + LumaGenerationRequest, + LumaGeneration, + LumaCharacterRef, + LumaModifyImageRef, + LumaImageIdentity, + LumaReference, + LumaReferenceChain, + LumaImageReference, + LumaKeyframes, + LumaConceptChain, + LumaIO, + get_luma_concepts, +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, + EmptyRequest, +) +from comfy_api_nodes.apinode_utils import ( + upload_images_to_comfyapi, + process_image_response, + validate_string, +) + +import requests +import torch +from io import BytesIO + + +class LumaReferenceNode(ComfyNodeABC): + """ + Holds an image and weight for use with Luma Generate Image node. + """ + + RETURN_TYPES = (LumaIO.LUMA_REF,) + RETURN_NAMES = ("luma_ref",) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "create_luma_reference" + CATEGORY = "api node/image/Luma" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ( + IO.IMAGE, + { + "tooltip": "Image to use as reference.", + }, + ), + "weight": ( + IO.FLOAT, + { + "default": 1.0, + "min": 0.0, + "max": 1.0, + "step": 0.01, + "tooltip": "Weight of image reference.", + }, + ), + }, + "optional": {"luma_ref": (LumaIO.LUMA_REF,)}, + } + + def create_luma_reference( + self, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None + ): + if luma_ref is not None: + luma_ref = luma_ref.clone() + else: + luma_ref = LumaReferenceChain() + luma_ref.add(LumaReference(image=image, weight=round(weight, 2))) + return (luma_ref,) + + +class LumaConceptsNode(ComfyNodeABC): + """ + Holds one or more Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes. + """ + + RETURN_TYPES = (LumaIO.LUMA_CONCEPTS,) + RETURN_NAMES = ("luma_concepts",) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "create_concepts" + CATEGORY = "api node/video/Luma" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "concept1": (get_luma_concepts(include_none=True),), + "concept2": (get_luma_concepts(include_none=True),), + "concept3": (get_luma_concepts(include_none=True),), + "concept4": (get_luma_concepts(include_none=True),), + }, + "optional": { + "luma_concepts": ( + LumaIO.LUMA_CONCEPTS, + { + "tooltip": "Optional Camera Concepts to add to the ones chosen here." + }, + ), + }, + } + + def create_concepts( + self, + concept1: str, + concept2: str, + concept3: str, + concept4: str, + luma_concepts: LumaConceptChain = None, + ): + chain = LumaConceptChain(str_list=[concept1, concept2, concept3, concept4]) + if luma_concepts is not None: + chain = luma_concepts.clone_and_merge(chain) + return (chain,) + + +class LumaImageGenerationNode(ComfyNodeABC): + """ + Generates images synchronously based on prompt and aspect ratio. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Luma" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation", + }, + ), + "model": ([model.value for model in LumaImageModel],), + "aspect_ratio": ( + [ratio.value for ratio in LumaAspectRatio], + { + "default": LumaAspectRatio.ratio_16_9, + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + }, + ), + "style_image_weight": ( + IO.FLOAT, + { + "default": 1.0, + "min": 0.0, + "max": 1.0, + "step": 0.01, + "tooltip": "Weight of style image. Ignored if no style_image provided.", + }, + ), + }, + "optional": { + "image_luma_ref": ( + LumaIO.LUMA_REF, + { + "tooltip": "Luma Reference node connection to influence generation with input images; up to 4 images can be considered." + }, + ), + "style_image": ( + IO.IMAGE, + {"tooltip": "Style reference image; only 1 image will be used."}, + ), + "character_image": ( + IO.IMAGE, + { + "tooltip": "Character reference images; can be a batch of multiple, up to 4 images can be considered." + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + prompt: str, + model: str, + aspect_ratio: str, + seed, + style_image_weight: float, + image_luma_ref: LumaReferenceChain = None, + style_image: torch.Tensor = None, + character_image: torch.Tensor = None, + auth_token=None, + **kwargs, + ): + validate_string(prompt, strip_whitespace=True, min_length=3) + # handle image_luma_ref + api_image_ref = None + if image_luma_ref is not None: + api_image_ref = self._convert_luma_refs( + image_luma_ref, max_refs=4, auth_token=auth_token + ) + # handle style_luma_ref + api_style_ref = None + if style_image is not None: + api_style_ref = self._convert_style_image( + style_image, weight=style_image_weight, auth_token=auth_token + ) + # handle character_ref images + character_ref = None + if character_image is not None: + download_urls = upload_images_to_comfyapi( + character_image, max_images=4, auth_token=auth_token + ) + character_ref = LumaCharacterRef( + identity0=LumaImageIdentity(images=download_urls) + ) + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/luma/generations/image", + method=HttpMethod.POST, + request_model=LumaImageGenerationRequest, + response_model=LumaGeneration, + ), + request=LumaImageGenerationRequest( + prompt=prompt, + model=model, + aspect_ratio=aspect_ratio, + image_ref=api_image_ref, + style_ref=api_style_ref, + character_ref=character_ref, + ), + auth_token=auth_token, + ) + response_api: LumaGeneration = operation.execute() + + operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"/proxy/luma/generations/{response_api.id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=LumaGeneration, + ), + completed_statuses=[LumaState.completed], + failed_statuses=[LumaState.failed], + status_extractor=lambda x: x.state, + auth_token=auth_token, + ) + response_poll = operation.execute() + + img_response = requests.get(response_poll.assets.image) + img = process_image_response(img_response) + return (img,) + + def _convert_luma_refs( + self, luma_ref: LumaReferenceChain, max_refs: int, auth_token=None + ): + luma_urls = [] + ref_count = 0 + for ref in luma_ref.refs: + download_urls = upload_images_to_comfyapi( + ref.image, max_images=1, auth_token=auth_token + ) + luma_urls.append(download_urls[0]) + ref_count += 1 + if ref_count >= max_refs: + break + return luma_ref.create_api_model(download_urls=luma_urls, max_refs=max_refs) + + def _convert_style_image( + self, style_image: torch.Tensor, weight: float, auth_token=None + ): + chain = LumaReferenceChain( + first_ref=LumaReference(image=style_image, weight=weight) + ) + return self._convert_luma_refs(chain, max_refs=1, auth_token=auth_token) + + +class LumaImageModifyNode(ComfyNodeABC): + """ + Modifies images synchronously based on prompt and aspect ratio. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Luma" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE,), + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation", + }, + ), + "image_weight": ( + IO.FLOAT, + { + "default": 0.1, + "min": 0.0, + "max": 0.98, + "step": 0.01, + "tooltip": "Weight of the image; the closer to 1.0, the less the image will be modified.", + }, + ), + "model": ([model.value for model in LumaImageModel],), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + }, + ), + }, + "optional": {}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + prompt: str, + model: str, + image: torch.Tensor, + image_weight: float, + seed, + auth_token=None, + **kwargs, + ): + # first, upload image + download_urls = upload_images_to_comfyapi( + image, max_images=1, auth_token=auth_token + ) + image_url = download_urls[0] + # next, make Luma call with download url provided + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/luma/generations/image", + method=HttpMethod.POST, + request_model=LumaImageGenerationRequest, + response_model=LumaGeneration, + ), + request=LumaImageGenerationRequest( + prompt=prompt, + model=model, + modify_image_ref=LumaModifyImageRef( + url=image_url, weight=round(max(min(1.0-image_weight, 0.98), 0.0), 2) + ), + ), + auth_token=auth_token, + ) + response_api: LumaGeneration = operation.execute() + + operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"/proxy/luma/generations/{response_api.id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=LumaGeneration, + ), + completed_statuses=[LumaState.completed], + failed_statuses=[LumaState.failed], + status_extractor=lambda x: x.state, + auth_token=auth_token, + ) + response_poll = operation.execute() + + img_response = requests.get(response_poll.assets.image) + img = process_image_response(img_response) + return (img,) + + +class LumaTextToVideoGenerationNode(ComfyNodeABC): + """ + Generates videos synchronously based on prompt and output_size. + """ + + RETURN_TYPES = (IO.VIDEO,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/video/Luma" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the video generation", + }, + ), + "model": ([model.value for model in LumaVideoModel],), + "aspect_ratio": ( + [ratio.value for ratio in LumaAspectRatio], + { + "default": LumaAspectRatio.ratio_16_9, + }, + ), + "resolution": ( + [resolution.value for resolution in LumaVideoOutputResolution], + { + "default": LumaVideoOutputResolution.res_540p, + }, + ), + "duration": ([dur.value for dur in LumaVideoModelOutputDuration],), + "loop": ( + IO.BOOLEAN, + { + "default": False, + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + }, + ), + }, + "optional": { + "luma_concepts": ( + LumaIO.LUMA_CONCEPTS, + { + "tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node." + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + prompt: str, + model: str, + aspect_ratio: str, + resolution: str, + duration: str, + loop: bool, + seed, + luma_concepts: LumaConceptChain = None, + auth_token=None, + **kwargs, + ): + validate_string(prompt, strip_whitespace=False, min_length=3) + duration = duration if model != LumaVideoModel.ray_1_6 else None + resolution = resolution if model != LumaVideoModel.ray_1_6 else None + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/luma/generations", + method=HttpMethod.POST, + request_model=LumaGenerationRequest, + response_model=LumaGeneration, + ), + request=LumaGenerationRequest( + prompt=prompt, + model=model, + resolution=resolution, + aspect_ratio=aspect_ratio, + duration=duration, + loop=loop, + concepts=luma_concepts.create_api_model() if luma_concepts else None, + ), + auth_token=auth_token, + ) + response_api: LumaGeneration = operation.execute() + + operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"/proxy/luma/generations/{response_api.id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=LumaGeneration, + ), + completed_statuses=[LumaState.completed], + failed_statuses=[LumaState.failed], + status_extractor=lambda x: x.state, + auth_token=auth_token, + ) + response_poll = operation.execute() + + vid_response = requests.get(response_poll.assets.video) + return (VideoFromFile(BytesIO(vid_response.content)),) + + +class LumaImageToVideoGenerationNode(ComfyNodeABC): + """ + Generates videos synchronously based on prompt, input images, and output_size. + """ + + RETURN_TYPES = (IO.VIDEO,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/video/Luma" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the video generation", + }, + ), + "model": ([model.value for model in LumaVideoModel],), + # "aspect_ratio": ([ratio.value for ratio in LumaAspectRatio], { + # "default": LumaAspectRatio.ratio_16_9, + # }), + "resolution": ( + [resolution.value for resolution in LumaVideoOutputResolution], + { + "default": LumaVideoOutputResolution.res_540p, + }, + ), + "duration": ([dur.value for dur in LumaVideoModelOutputDuration],), + "loop": ( + IO.BOOLEAN, + { + "default": False, + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + }, + ), + }, + "optional": { + "first_image": ( + IO.IMAGE, + {"tooltip": "First frame of generated video."}, + ), + "last_image": (IO.IMAGE, {"tooltip": "Last frame of generated video."}), + "luma_concepts": ( + LumaIO.LUMA_CONCEPTS, + { + "tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node." + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + prompt: str, + model: str, + resolution: str, + duration: str, + loop: bool, + seed, + first_image: torch.Tensor = None, + last_image: torch.Tensor = None, + luma_concepts: LumaConceptChain = None, + auth_token=None, + **kwargs, + ): + if first_image is None and last_image is None: + raise Exception( + "At least one of first_image and last_image requires an input." + ) + keyframes = self._convert_to_keyframes(first_image, last_image, auth_token) + duration = duration if model != LumaVideoModel.ray_1_6 else None + resolution = resolution if model != LumaVideoModel.ray_1_6 else None + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/luma/generations", + method=HttpMethod.POST, + request_model=LumaGenerationRequest, + response_model=LumaGeneration, + ), + request=LumaGenerationRequest( + prompt=prompt, + model=model, + aspect_ratio=LumaAspectRatio.ratio_16_9, # ignored, but still needed by the API for some reason + resolution=resolution, + duration=duration, + loop=loop, + keyframes=keyframes, + concepts=luma_concepts.create_api_model() if luma_concepts else None, + ), + auth_token=auth_token, + ) + response_api: LumaGeneration = operation.execute() + + operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"/proxy/luma/generations/{response_api.id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=LumaGeneration, + ), + completed_statuses=[LumaState.completed], + failed_statuses=[LumaState.failed], + status_extractor=lambda x: x.state, + auth_token=auth_token, + ) + response_poll = operation.execute() + + vid_response = requests.get(response_poll.assets.video) + return (VideoFromFile(BytesIO(vid_response.content)),) + + def _convert_to_keyframes( + self, + first_image: torch.Tensor = None, + last_image: torch.Tensor = None, + auth_token=None, + ): + if first_image is None and last_image is None: + return None + frame0 = None + frame1 = None + if first_image is not None: + download_urls = upload_images_to_comfyapi( + first_image, max_images=1, auth_token=auth_token + ) + frame0 = LumaImageReference(type="image", url=download_urls[0]) + if last_image is not None: + download_urls = upload_images_to_comfyapi( + last_image, max_images=1, auth_token=auth_token + ) + frame1 = LumaImageReference(type="image", url=download_urls[0]) + return LumaKeyframes(frame0=frame0, frame1=frame1) + + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "LumaImageNode": LumaImageGenerationNode, + "LumaImageModifyNode": LumaImageModifyNode, + "LumaVideoNode": LumaTextToVideoGenerationNode, + "LumaImageToVideoNode": LumaImageToVideoGenerationNode, + "LumaReferenceNode": LumaReferenceNode, + "LumaConceptsNode": LumaConceptsNode, +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "LumaImageNode": "Luma Text to Image", + "LumaImageModifyNode": "Luma Image to Image", + "LumaVideoNode": "Luma Text to Video", + "LumaImageToVideoNode": "Luma Image to Video", + "LumaReferenceNode": "Luma Reference", + "LumaConceptsNode": "Luma Concepts", +} diff --git a/comfy_api_nodes/nodes_minimax.py b/comfy_api_nodes/nodes_minimax.py new file mode 100644 index 000000000..cacda22c6 --- /dev/null +++ b/comfy_api_nodes/nodes_minimax.py @@ -0,0 +1,306 @@ +from comfy.comfy_types.node_typing import IO +from comfy_api.input_impl.video_types import VideoFromFile +from comfy_api_nodes.apis import ( + MinimaxVideoGenerationRequest, + MinimaxVideoGenerationResponse, + MinimaxFileRetrieveResponse, + MinimaxTaskResultResponse, + SubjectReferenceItem, + Model +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, + EmptyRequest, +) +from comfy_api_nodes.apinode_utils import ( + download_url_to_bytesio, + upload_images_to_comfyapi, + validate_string, +) + +import torch +import logging + + +class MinimaxTextToVideoNode: + """ + Generates videos synchronously based on a prompt, and optional parameters using MiniMax's API. + """ + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt_text": ( + "STRING", + { + "multiline": True, + "default": "", + "tooltip": "Text prompt to guide the video generation", + }, + ), + "model": ( + [ + "T2V-01", + "T2V-01-Director", + ], + { + "default": "T2V-01", + "tooltip": "Model to use for video generation", + }, + ), + }, + "optional": { + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + RETURN_TYPES = ("VIDEO",) + DESCRIPTION = "Generates videos from prompts using MiniMax's API" + FUNCTION = "generate_video" + CATEGORY = "api node/video/MiniMax" + API_NODE = True + OUTPUT_NODE = True + + def generate_video( + self, + prompt_text, + seed=0, + model="T2V-01", + image: torch.Tensor=None, # used for ImageToVideo + subject: torch.Tensor=None, # used for SubjectToVideo + auth_token=None, + ): + ''' + Function used between MiniMax nodes - supports T2V, I2V, and S2V, based on provided arguments. + ''' + if image is None: + validate_string(prompt_text, field_name="prompt_text") + # upload image, if passed in + image_url = None + if image is not None: + image_url = upload_images_to_comfyapi(image, max_images=1, auth_token=auth_token)[0] + + # TODO: figure out how to deal with subject properly, API returns invalid params when using S2V-01 model + subject_reference = None + if subject is not None: + subject_url = upload_images_to_comfyapi(subject, max_images=1, auth_token=auth_token)[0] + subject_reference = [SubjectReferenceItem(image=subject_url)] + + + video_generate_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/minimax/video_generation", + method=HttpMethod.POST, + request_model=MinimaxVideoGenerationRequest, + response_model=MinimaxVideoGenerationResponse, + ), + request=MinimaxVideoGenerationRequest( + model=Model(model), + prompt=prompt_text, + callback_url=None, + first_frame_image=image_url, + subject_reference=subject_reference, + prompt_optimizer=None, + ), + auth_token=auth_token, + ) + response = video_generate_operation.execute() + + task_id = response.task_id + if not task_id: + raise Exception(f"MiniMax generation failed: {response.base_resp}") + + video_generate_operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path="/proxy/minimax/query/video_generation", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=MinimaxTaskResultResponse, + query_params={"task_id": task_id}, + ), + completed_statuses=["Success"], + failed_statuses=["Fail"], + status_extractor=lambda x: x.status.value, + auth_token=auth_token, + ) + task_result = video_generate_operation.execute() + + file_id = task_result.file_id + if file_id is None: + raise Exception("Request was not successful. Missing file ID.") + file_retrieve_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/minimax/files/retrieve", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=MinimaxFileRetrieveResponse, + query_params={"file_id": int(file_id)}, + ), + request=EmptyRequest(), + auth_token=auth_token, + ) + file_result = file_retrieve_operation.execute() + + file_url = file_result.file.download_url + if file_url is None: + raise Exception( + f"No video was found in the response. Full response: {file_result.model_dump()}" + ) + logging.info(f"Generated video URL: {file_url}") + + video_io = download_url_to_bytesio(file_url) + if video_io is None: + error_msg = f"Failed to download video from {file_url}" + logging.error(error_msg) + raise Exception(error_msg) + return (VideoFromFile(video_io),) + + +class MinimaxImageToVideoNode(MinimaxTextToVideoNode): + """ + Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API. + """ + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ( + IO.IMAGE, + { + "tooltip": "Image to use as first frame of video generation" + }, + ), + "prompt_text": ( + "STRING", + { + "multiline": True, + "default": "", + "tooltip": "Text prompt to guide the video generation", + }, + ), + "model": ( + [ + "I2V-01-Director", + "I2V-01", + "I2V-01-live", + ], + { + "default": "I2V-01", + "tooltip": "Model to use for video generation", + }, + ), + }, + "optional": { + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + RETURN_TYPES = ("VIDEO",) + DESCRIPTION = "Generates videos from an image and prompts using MiniMax's API" + FUNCTION = "generate_video" + CATEGORY = "api node/video/MiniMax" + API_NODE = True + OUTPUT_NODE = True + + +class MinimaxSubjectToVideoNode(MinimaxTextToVideoNode): + """ + Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API. + """ + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "subject": ( + IO.IMAGE, + { + "tooltip": "Image of subject to reference video generation" + }, + ), + "prompt_text": ( + "STRING", + { + "multiline": True, + "default": "", + "tooltip": "Text prompt to guide the video generation", + }, + ), + "model": ( + [ + "S2V-01", + ], + { + "default": "S2V-01", + "tooltip": "Model to use for video generation", + }, + ), + }, + "optional": { + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + RETURN_TYPES = ("VIDEO",) + DESCRIPTION = "Generates videos from an image and prompts using MiniMax's API" + FUNCTION = "generate_video" + CATEGORY = "api node/video/MiniMax" + API_NODE = True + OUTPUT_NODE = True + + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "MinimaxTextToVideoNode": MinimaxTextToVideoNode, + "MinimaxImageToVideoNode": MinimaxImageToVideoNode, + # "MinimaxSubjectToVideoNode": MinimaxSubjectToVideoNode, +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "MinimaxTextToVideoNode": "MiniMax Text to Video", + "MinimaxImageToVideoNode": "MiniMax Image to Video", + "MinimaxSubjectToVideoNode": "MiniMax Subject to Video", +} diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py new file mode 100644 index 000000000..c18c65d7a --- /dev/null +++ b/comfy_api_nodes/nodes_openai.py @@ -0,0 +1,487 @@ +import io +from inspect import cleandoc +import numpy as np +import torch +from PIL import Image + +from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict + + +from comfy_api_nodes.apis import ( + OpenAIImageGenerationRequest, + OpenAIImageEditRequest, + OpenAIImageGenerationResponse, +) + +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, +) + +from comfy_api_nodes.apinode_utils import ( + downscale_image_tensor, + validate_and_cast_response, + validate_string, +) + +class OpenAIDalle2(ComfyNodeABC): + """ + Generates images synchronously via OpenAI's DALL·E 2 endpoint. + """ + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Text prompt for DALL·E", + }, + ), + }, + "optional": { + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2**31 - 1, + "step": 1, + "display": "number", + "control_after_generate": True, + "tooltip": "not implemented yet in backend", + }, + ), + "size": ( + IO.COMBO, + { + "options": ["256x256", "512x512", "1024x1024"], + "default": "1024x1024", + "tooltip": "Image size", + }, + ), + "n": ( + IO.INT, + { + "default": 1, + "min": 1, + "max": 8, + "step": 1, + "display": "number", + "tooltip": "How many images to generate", + }, + ), + "image": ( + IO.IMAGE, + { + "default": None, + "tooltip": "Optional reference image for image editing.", + }, + ), + "mask": ( + IO.MASK, + { + "default": None, + "tooltip": "Optional mask for inpainting (white areas will be replaced)", + }, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "api_call" + CATEGORY = "api node/image/OpenAI" + DESCRIPTION = cleandoc(__doc__ or "") + API_NODE = True + + def api_call( + self, + prompt, + seed=0, + image=None, + mask=None, + n=1, + size="1024x1024", + auth_token=None, + ): + validate_string(prompt, strip_whitespace=False) + model = "dall-e-2" + path = "/proxy/openai/images/generations" + content_type = "application/json" + request_class = OpenAIImageGenerationRequest + img_binary = None + + if image is not None and mask is not None: + path = "/proxy/openai/images/edits" + content_type = "multipart/form-data" + request_class = OpenAIImageEditRequest + + input_tensor = image.squeeze().cpu() + height, width, channels = input_tensor.shape + rgba_tensor = torch.ones(height, width, 4, device="cpu") + rgba_tensor[:, :, :channels] = input_tensor + + if mask.shape[1:] != image.shape[1:-1]: + raise Exception("Mask and Image must be the same size") + rgba_tensor[:, :, 3] = 1 - mask.squeeze().cpu() + + rgba_tensor = downscale_image_tensor(rgba_tensor.unsqueeze(0)).squeeze() + + image_np = (rgba_tensor.numpy() * 255).astype(np.uint8) + img = Image.fromarray(image_np) + img_byte_arr = io.BytesIO() + img.save(img_byte_arr, format="PNG") + img_byte_arr.seek(0) + img_binary = img_byte_arr # .getvalue() + img_binary.name = "image.png" + elif image is not None or mask is not None: + raise Exception("Dall-E 2 image editing requires an image AND a mask") + + # Build the operation + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=request_class, + response_model=OpenAIImageGenerationResponse, + ), + request=request_class( + model=model, + prompt=prompt, + n=n, + size=size, + seed=seed, + ), + files=( + { + "image": img_binary, + } + if img_binary + else None + ), + content_type=content_type, + auth_token=auth_token, + ) + + response = operation.execute() + + img_tensor = validate_and_cast_response(response) + return (img_tensor,) + + +class OpenAIDalle3(ComfyNodeABC): + """ + Generates images synchronously via OpenAI's DALL·E 3 endpoint. + """ + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Text prompt for DALL·E", + }, + ), + }, + "optional": { + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2**31 - 1, + "step": 1, + "display": "number", + "control_after_generate": True, + "tooltip": "not implemented yet in backend", + }, + ), + "quality": ( + IO.COMBO, + { + "options": ["standard", "hd"], + "default": "standard", + "tooltip": "Image quality", + }, + ), + "style": ( + IO.COMBO, + { + "options": ["natural", "vivid"], + "default": "natural", + "tooltip": "Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images.", + }, + ), + "size": ( + IO.COMBO, + { + "options": ["1024x1024", "1024x1792", "1792x1024"], + "default": "1024x1024", + "tooltip": "Image size", + }, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "api_call" + CATEGORY = "api node/image/OpenAI" + DESCRIPTION = cleandoc(__doc__ or "") + API_NODE = True + + def api_call( + self, + prompt, + seed=0, + style="natural", + quality="standard", + size="1024x1024", + auth_token=None, + ): + validate_string(prompt, strip_whitespace=False) + model = "dall-e-3" + + # build the operation + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/openai/images/generations", + method=HttpMethod.POST, + request_model=OpenAIImageGenerationRequest, + response_model=OpenAIImageGenerationResponse, + ), + request=OpenAIImageGenerationRequest( + model=model, + prompt=prompt, + quality=quality, + size=size, + style=style, + seed=seed, + ), + auth_token=auth_token, + ) + + response = operation.execute() + + img_tensor = validate_and_cast_response(response) + return (img_tensor,) + + +class OpenAIGPTImage1(ComfyNodeABC): + """ + Generates images synchronously via OpenAI's GPT Image 1 endpoint. + """ + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Text prompt for GPT Image 1", + }, + ), + }, + "optional": { + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2**31 - 1, + "step": 1, + "display": "number", + "control_after_generate": True, + "tooltip": "not implemented yet in backend", + }, + ), + "quality": ( + IO.COMBO, + { + "options": ["low", "medium", "high"], + "default": "low", + "tooltip": "Image quality, affects cost and generation time.", + }, + ), + "background": ( + IO.COMBO, + { + "options": ["opaque", "transparent"], + "default": "opaque", + "tooltip": "Return image with or without background", + }, + ), + "size": ( + IO.COMBO, + { + "options": ["auto", "1024x1024", "1024x1536", "1536x1024"], + "default": "auto", + "tooltip": "Image size", + }, + ), + "n": ( + IO.INT, + { + "default": 1, + "min": 1, + "max": 8, + "step": 1, + "display": "number", + "tooltip": "How many images to generate", + }, + ), + "image": ( + IO.IMAGE, + { + "default": None, + "tooltip": "Optional reference image for image editing.", + }, + ), + "mask": ( + IO.MASK, + { + "default": None, + "tooltip": "Optional mask for inpainting (white areas will be replaced)", + }, + ), + }, + "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + } + + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "api_call" + CATEGORY = "api node/image/OpenAI" + DESCRIPTION = cleandoc(__doc__ or "") + API_NODE = True + + def api_call( + self, + prompt, + seed=0, + quality="low", + background="opaque", + image=None, + mask=None, + n=1, + size="1024x1024", + auth_token=None, + ): + validate_string(prompt, strip_whitespace=False) + model = "gpt-image-1" + path = "/proxy/openai/images/generations" + content_type="application/json" + request_class = OpenAIImageGenerationRequest + img_binaries = [] + mask_binary = None + files = [] + + if image is not None: + path = "/proxy/openai/images/edits" + request_class = OpenAIImageEditRequest + content_type ="multipart/form-data" + + batch_size = image.shape[0] + + for i in range(batch_size): + single_image = image[i : i + 1] + scaled_image = downscale_image_tensor(single_image).squeeze() + + image_np = (scaled_image.numpy() * 255).astype(np.uint8) + img = Image.fromarray(image_np) + img_byte_arr = io.BytesIO() + img.save(img_byte_arr, format="PNG") + img_byte_arr.seek(0) + img_binary = img_byte_arr + img_binary.name = f"image_{i}.png" + + img_binaries.append(img_binary) + if batch_size == 1: + files.append(("image", img_binary)) + else: + files.append(("image[]", img_binary)) + + if mask is not None: + if image is None: + raise Exception("Cannot use a mask without an input image") + if image.shape[0] != 1: + raise Exception("Cannot use a mask with multiple image") + if mask.shape[1:] != image.shape[1:-1]: + raise Exception("Mask and Image must be the same size") + batch, height, width = mask.shape + rgba_mask = torch.zeros(height, width, 4, device="cpu") + rgba_mask[:, :, 3] = 1 - mask.squeeze().cpu() + + scaled_mask = downscale_image_tensor(rgba_mask.unsqueeze(0)).squeeze() + + mask_np = (scaled_mask.numpy() * 255).astype(np.uint8) + mask_img = Image.fromarray(mask_np) + mask_img_byte_arr = io.BytesIO() + mask_img.save(mask_img_byte_arr, format="PNG") + mask_img_byte_arr.seek(0) + mask_binary = mask_img_byte_arr + mask_binary.name = "mask.png" + files.append(("mask", mask_binary)) + + # Build the operation + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=request_class, + response_model=OpenAIImageGenerationResponse, + ), + request=request_class( + model=model, + prompt=prompt, + quality=quality, + background=background, + n=n, + seed=seed, + size=size, + ), + files=files if files else None, + content_type=content_type, + auth_token=auth_token, + ) + + response = operation.execute() + + img_tensor = validate_and_cast_response(response) + return (img_tensor,) + + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "OpenAIDalle2": OpenAIDalle2, + "OpenAIDalle3": OpenAIDalle3, + "OpenAIGPTImage1": OpenAIGPTImage1, +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "OpenAIDalle2": "OpenAI DALL·E 2", + "OpenAIDalle3": "OpenAI DALL·E 3", + "OpenAIGPTImage1": "OpenAI GPT Image 1", +} diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py new file mode 100644 index 000000000..ba4e8457d --- /dev/null +++ b/comfy_api_nodes/nodes_pika.py @@ -0,0 +1,749 @@ +""" +Pika x ComfyUI API Nodes + +Pika API docs: https://pika-827374fb.mintlify.app/api-reference +""" + +import io +from typing import Optional, TypeVar +import logging +import torch +import numpy as np +from comfy_api_nodes.apis import ( + PikaBodyGenerate22T2vGenerate22T2vPost, + PikaGenerateResponse, + PikaBodyGenerate22I2vGenerate22I2vPost, + PikaVideoResponse, + PikaBodyGenerate22C2vGenerate22PikascenesPost, + IngredientsMode, + PikaDurationEnum, + PikaResolutionEnum, + PikaBodyGeneratePikaffectsGeneratePikaffectsPost, + PikaBodyGeneratePikadditionsGeneratePikadditionsPost, + PikaBodyGeneratePikaswapsGeneratePikaswapsPost, + PikaBodyGenerate22KeyframeGenerate22PikaframesPost, + Pikaffect, +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, + EmptyRequest, +) +from comfy_api_nodes.apinode_utils import ( + tensor_to_bytesio, + download_url_to_video_output, +) +from comfy_api_nodes.mapper_utils import model_field_to_node_input +from comfy_api.input_impl.video_types import VideoInput, VideoContainer, VideoCodec +from comfy_api.input_impl import VideoFromFile +from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeOptions + +R = TypeVar("R") + +PATH_PIKADDITIONS = "/proxy/pika/generate/pikadditions" +PATH_PIKASWAPS = "/proxy/pika/generate/pikaswaps" +PATH_PIKAFFECTS = "/proxy/pika/generate/pikaffects" + +PIKA_API_VERSION = "2.2" +PATH_TEXT_TO_VIDEO = f"/proxy/pika/generate/{PIKA_API_VERSION}/t2v" +PATH_IMAGE_TO_VIDEO = f"/proxy/pika/generate/{PIKA_API_VERSION}/i2v" +PATH_PIKAFRAMES = f"/proxy/pika/generate/{PIKA_API_VERSION}/pikaframes" +PATH_PIKASCENES = f"/proxy/pika/generate/{PIKA_API_VERSION}/pikascenes" + +PATH_VIDEO_GET = "/proxy/pika/videos" + + +class PikaApiError(Exception): + """Exception for Pika API errors.""" + + pass + + +def is_valid_video_response(response: PikaVideoResponse) -> bool: + """Check if the video response is valid.""" + return hasattr(response, "url") and response.url is not None + + +def is_valid_initial_response(response: PikaGenerateResponse) -> bool: + """Check if the initial response is valid.""" + return hasattr(response, "video_id") and response.video_id is not None + + +class PikaNodeBase(ComfyNodeABC): + """Base class for Pika nodes.""" + + @classmethod + def get_base_inputs_types( + cls, request_model + ) -> dict[str, tuple[IO, InputTypeOptions]]: + """Get the base required inputs types common to all Pika nodes.""" + return { + "prompt_text": model_field_to_node_input( + IO.STRING, + request_model, + "promptText", + multiline=True, + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, + request_model, + "negativePrompt", + multiline=True, + ), + "seed": model_field_to_node_input( + IO.INT, + request_model, + "seed", + min=0, + max=0xFFFFFFFF, + control_after_generate=True, + ), + "resolution": model_field_to_node_input( + IO.COMBO, + request_model, + "resolution", + enum_type=PikaResolutionEnum, + ), + "duration": model_field_to_node_input( + IO.COMBO, + request_model, + "duration", + enum_type=PikaDurationEnum, + ), + } + + CATEGORY = "api node/video/Pika" + API_NODE = True + FUNCTION = "api_call" + RETURN_TYPES = ("VIDEO",) + + def poll_for_task_status( + self, task_id: str, auth_token: str + ) -> PikaGenerateResponse: + polling_operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"{PATH_VIDEO_GET}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=PikaVideoResponse, + ), + completed_statuses=[ + "finished", + ], + failed_statuses=["failed", "cancelled"], + status_extractor=lambda response: ( + response.status.value if response.status else None + ), + progress_extractor=lambda response: ( + response.progress if hasattr(response, "progress") else None + ), + auth_token=auth_token, + ) + return polling_operation.execute() + + def execute_task( + self, + initial_operation: SynchronousOperation[R, PikaGenerateResponse], + auth_token: Optional[str] = None, + ) -> tuple[VideoFromFile]: + """Executes the initial operation then polls for the task status until it is completed. + + Args: + initial_operation: The initial operation to execute. + auth_token: The authentication token to use for the API call. + + Returns: + A tuple containing the video file as a VIDEO output. + """ + initial_response = initial_operation.execute() + if not is_valid_initial_response(initial_response): + error_msg = f"Pika initial request failed. Code: {initial_response.code}, Message: {initial_response.message}, Data: {initial_response.data}" + logging.error(error_msg) + raise PikaApiError(error_msg) + + task_id = initial_response.video_id + final_response = self.poll_for_task_status(task_id, auth_token) + if not is_valid_video_response(final_response): + error_msg = ( + f"Pika task {task_id} succeeded but no video data found in response." + ) + logging.error(error_msg) + raise PikaApiError(error_msg) + + video_url = str(final_response.url) + logging.info("Pika task %s succeeded. Video URL: %s", task_id, video_url) + + return (download_url_to_video_output(video_url),) + + +class PikaImageToVideoV2_2(PikaNodeBase): + """Pika 2.2 Image to Video Node.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ( + IO.IMAGE, + {"tooltip": "The image to convert to video"}, + ), + **cls.get_base_inputs_types(PikaBodyGenerate22I2vGenerate22I2vPost), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + DESCRIPTION = "Sends an image and prompt to the Pika API v2.2 to generate a video." + + def api_call( + self, + image: torch.Tensor, + prompt_text: str, + negative_prompt: str, + seed: int, + resolution: str, + duration: int, + auth_token: Optional[str] = None, + ) -> tuple[VideoFromFile]: + # Convert image to BytesIO + image_bytes_io = tensor_to_bytesio(image) + image_bytes_io.seek(0) + + pika_files = {"image": ("image.png", image_bytes_io, "image/png")} + + # Prepare non-file data + pika_request_data = PikaBodyGenerate22I2vGenerate22I2vPost( + promptText=prompt_text, + negativePrompt=negative_prompt, + seed=seed, + resolution=resolution, + duration=duration, + ) + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_IMAGE_TO_VIDEO, + method=HttpMethod.POST, + request_model=PikaBodyGenerate22I2vGenerate22I2vPost, + response_model=PikaGenerateResponse, + ), + request=pika_request_data, + files=pika_files, + content_type="multipart/form-data", + auth_token=auth_token, + ) + + return self.execute_task(initial_operation, auth_token) + + +class PikaTextToVideoNodeV2_2(PikaNodeBase): + """Pika Text2Video v2.2 Node.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + **cls.get_base_inputs_types(PikaBodyGenerate22T2vGenerate22T2vPost), + "aspect_ratio": model_field_to_node_input( + IO.FLOAT, + PikaBodyGenerate22T2vGenerate22T2vPost, + "aspectRatio", + step=0.001, + min=0.4, + max=2.5, + default=1.7777777777777777, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + DESCRIPTION = "Sends a text prompt to the Pika API v2.2 to generate a video." + + def api_call( + self, + prompt_text: str, + negative_prompt: str, + seed: int, + resolution: str, + duration: int, + aspect_ratio: float, + auth_token: Optional[str] = None, + ) -> tuple[VideoFromFile]: + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_TEXT_TO_VIDEO, + method=HttpMethod.POST, + request_model=PikaBodyGenerate22T2vGenerate22T2vPost, + response_model=PikaGenerateResponse, + ), + request=PikaBodyGenerate22T2vGenerate22T2vPost( + promptText=prompt_text, + negativePrompt=negative_prompt, + seed=seed, + resolution=resolution, + duration=duration, + aspectRatio=aspect_ratio, + ), + auth_token=auth_token, + content_type="application/x-www-form-urlencoded", + ) + + return self.execute_task(initial_operation, auth_token) + + +class PikaScenesV2_2(PikaNodeBase): + """PikaScenes v2.2 Node.""" + + @classmethod + def INPUT_TYPES(cls): + image_ingredient_input = ( + IO.IMAGE, + {"tooltip": "Image that will be used as ingredient to create a video."}, + ) + return { + "required": { + **cls.get_base_inputs_types( + PikaBodyGenerate22C2vGenerate22PikascenesPost, + ), + "ingredients_mode": model_field_to_node_input( + IO.COMBO, + PikaBodyGenerate22C2vGenerate22PikascenesPost, + "ingredientsMode", + enum_type=IngredientsMode, + default="creative", + ), + "aspect_ratio": model_field_to_node_input( + IO.FLOAT, + PikaBodyGenerate22C2vGenerate22PikascenesPost, + "aspectRatio", + step=0.001, + min=0.4, + max=2.5, + default=1.7777777777777777, + ), + }, + "optional": { + "image_ingredient_1": image_ingredient_input, + "image_ingredient_2": image_ingredient_input, + "image_ingredient_3": image_ingredient_input, + "image_ingredient_4": image_ingredient_input, + "image_ingredient_5": image_ingredient_input, + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + DESCRIPTION = "Combine your images to create a video with the objects in them. Upload multiple images as ingredients and generate a high-quality video that incorporates all of them." + + def api_call( + self, + prompt_text: str, + negative_prompt: str, + seed: int, + resolution: str, + duration: int, + ingredients_mode: str, + aspect_ratio: float, + image_ingredient_1: Optional[torch.Tensor] = None, + image_ingredient_2: Optional[torch.Tensor] = None, + image_ingredient_3: Optional[torch.Tensor] = None, + image_ingredient_4: Optional[torch.Tensor] = None, + image_ingredient_5: Optional[torch.Tensor] = None, + auth_token: Optional[str] = None, + ) -> tuple[VideoFromFile]: + # Convert all passed images to BytesIO + all_image_bytes_io = [] + for image in [ + image_ingredient_1, + image_ingredient_2, + image_ingredient_3, + image_ingredient_4, + image_ingredient_5, + ]: + if image is not None: + image_bytes_io = tensor_to_bytesio(image) + image_bytes_io.seek(0) + all_image_bytes_io.append(image_bytes_io) + + pika_files = [ + ("images", (f"image_{i}.png", image_bytes_io, "image/png")) + for i, image_bytes_io in enumerate(all_image_bytes_io) + ] + + pika_request_data = PikaBodyGenerate22C2vGenerate22PikascenesPost( + ingredientsMode=ingredients_mode, + promptText=prompt_text, + negativePrompt=negative_prompt, + seed=seed, + resolution=resolution, + duration=duration, + aspectRatio=aspect_ratio, + ) + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_PIKASCENES, + method=HttpMethod.POST, + request_model=PikaBodyGenerate22C2vGenerate22PikascenesPost, + response_model=PikaGenerateResponse, + ), + request=pika_request_data, + files=pika_files, + content_type="multipart/form-data", + auth_token=auth_token, + ) + + return self.execute_task(initial_operation, auth_token) + + +class PikAdditionsNode(PikaNodeBase): + """Pika Pikadditions Node. Add an image into a video.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "video": (IO.VIDEO, {"tooltip": "The video to add an image to."}), + "image": (IO.IMAGE, {"tooltip": "The image to add to the video."}), + "prompt_text": model_field_to_node_input( + IO.STRING, + PikaBodyGeneratePikadditionsGeneratePikadditionsPost, + "promptText", + multiline=True, + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, + PikaBodyGeneratePikadditionsGeneratePikadditionsPost, + "negativePrompt", + multiline=True, + ), + "seed": model_field_to_node_input( + IO.INT, + PikaBodyGeneratePikadditionsGeneratePikadditionsPost, + "seed", + min=0, + max=0xFFFFFFFF, + control_after_generate=True, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + DESCRIPTION = "Add any object or image into your video. Upload a video and specify what you’d like to add to create a seamlessly integrated result." + + def api_call( + self, + video: VideoInput, + image: torch.Tensor, + prompt_text: str, + negative_prompt: str, + seed: int, + auth_token: Optional[str] = None, + ) -> tuple[VideoFromFile]: + # Convert video to BytesIO + video_bytes_io = io.BytesIO() + video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264) + video_bytes_io.seek(0) + + # Convert image to BytesIO + image_bytes_io = tensor_to_bytesio(image) + image_bytes_io.seek(0) + + pika_files = [ + ("video", ("video.mp4", video_bytes_io, "video/mp4")), + ("image", ("image.png", image_bytes_io, "image/png")), + ] + + # Prepare non-file data + pika_request_data = PikaBodyGeneratePikadditionsGeneratePikadditionsPost( + promptText=prompt_text, + negativePrompt=negative_prompt, + seed=seed, + ) + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_PIKADDITIONS, + method=HttpMethod.POST, + request_model=PikaBodyGeneratePikadditionsGeneratePikadditionsPost, + response_model=PikaGenerateResponse, + ), + request=pika_request_data, + files=pika_files, + content_type="multipart/form-data", + auth_token=auth_token, + ) + + return self.execute_task(initial_operation, auth_token) + + +class PikaSwapsNode(PikaNodeBase): + """Pika Pikaswaps Node.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "video": (IO.VIDEO, {"tooltip": "The video to swap an object in."}), + "image": ( + IO.IMAGE, + { + "tooltip": "The image used to replace the masked object in the video." + }, + ), + "mask": ( + IO.MASK, + {"tooltip": "Use the mask to define areas in the video to replace"}, + ), + "prompt_text": model_field_to_node_input( + IO.STRING, + PikaBodyGeneratePikaswapsGeneratePikaswapsPost, + "promptText", + multiline=True, + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, + PikaBodyGeneratePikaswapsGeneratePikaswapsPost, + "negativePrompt", + multiline=True, + ), + "seed": model_field_to_node_input( + IO.INT, + PikaBodyGeneratePikaswapsGeneratePikaswapsPost, + "seed", + min=0, + max=0xFFFFFFFF, + control_after_generate=True, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + DESCRIPTION = "Swap out any object or region of your video with a new image or object. Define areas to replace either with a mask or coordinates." + RETURN_TYPES = ("VIDEO",) + + def api_call( + self, + video: VideoInput, + image: torch.Tensor, + mask: torch.Tensor, + prompt_text: str, + negative_prompt: str, + seed: int, + auth_token: Optional[str] = None, + ) -> tuple[VideoFromFile]: + # Convert video to BytesIO + video_bytes_io = io.BytesIO() + video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264) + video_bytes_io.seek(0) + + # Convert mask to binary mask with three channels + mask = torch.round(mask) + mask = mask.repeat(1, 3, 1, 1) + + # Convert 3-channel binary mask to BytesIO + mask_bytes_io = io.BytesIO() + mask_bytes_io.write(mask.numpy().astype(np.uint8)) + mask_bytes_io.seek(0) + + # Convert image to BytesIO + image_bytes_io = tensor_to_bytesio(image) + image_bytes_io.seek(0) + + pika_files = [ + ("video", ("video.mp4", video_bytes_io, "video/mp4")), + ("image", ("image.png", image_bytes_io, "image/png")), + ("modifyRegionMask", ("mask.png", mask_bytes_io, "image/png")), + ] + + # Prepare non-file data + pika_request_data = PikaBodyGeneratePikaswapsGeneratePikaswapsPost( + promptText=prompt_text, + negativePrompt=negative_prompt, + seed=seed, + ) + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_PIKADDITIONS, + method=HttpMethod.POST, + request_model=PikaBodyGeneratePikadditionsGeneratePikadditionsPost, + response_model=PikaGenerateResponse, + ), + request=pika_request_data, + files=pika_files, + content_type="multipart/form-data", + auth_token=auth_token, + ) + + return self.execute_task(initial_operation, auth_token) + + +class PikaffectsNode(PikaNodeBase): + """Pika Pikaffects Node.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ( + IO.IMAGE, + {"tooltip": "The reference image to apply the Pikaffect to."}, + ), + "pikaffect": model_field_to_node_input( + IO.COMBO, + PikaBodyGeneratePikaffectsGeneratePikaffectsPost, + "pikaffect", + enum_type=Pikaffect, + default="Cake-ify", + ), + "prompt_text": model_field_to_node_input( + IO.STRING, + PikaBodyGeneratePikaffectsGeneratePikaffectsPost, + "promptText", + multiline=True, + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, + PikaBodyGeneratePikaffectsGeneratePikaffectsPost, + "negativePrompt", + multiline=True, + ), + "seed": model_field_to_node_input( + IO.INT, + PikaBodyGeneratePikaffectsGeneratePikaffectsPost, + "seed", + min=0, + max=0xFFFFFFFF, + control_after_generate=True, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + DESCRIPTION = "Generate a video with a specific Pikaffect. Supported Pikaffects: Cake-ify, Crumble, Crush, Decapitate, Deflate, Dissolve, Explode, Eye-pop, Inflate, Levitate, Melt, Peel, Poke, Squish, Ta-da, Tear" + + def api_call( + self, + image: torch.Tensor, + pikaffect: str, + prompt_text: str, + negative_prompt: str, + seed: int, + auth_token: Optional[str] = None, + ) -> tuple[VideoFromFile]: + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_PIKAFFECTS, + method=HttpMethod.POST, + request_model=PikaBodyGeneratePikaffectsGeneratePikaffectsPost, + response_model=PikaGenerateResponse, + ), + request=PikaBodyGeneratePikaffectsGeneratePikaffectsPost( + pikaffect=pikaffect, + promptText=prompt_text, + negativePrompt=negative_prompt, + seed=seed, + ), + files={"image": ("image.png", tensor_to_bytesio(image), "image/png")}, + content_type="multipart/form-data", + auth_token=auth_token, + ) + + return self.execute_task(initial_operation, auth_token) + + +class PikaStartEndFrameNode2_2(PikaNodeBase): + """PikaFrames v2.2 Node.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_start": (IO.IMAGE, {"tooltip": "The first image to combine."}), + "image_end": (IO.IMAGE, {"tooltip": "The last image to combine."}), + **cls.get_base_inputs_types( + PikaBodyGenerate22KeyframeGenerate22PikaframesPost + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + DESCRIPTION = "Generate a video by combining your first and last frame. Upload two images to define the start and end points, and let the AI create a smooth transition between them." + + def api_call( + self, + image_start: torch.Tensor, + image_end: torch.Tensor, + prompt_text: str, + negative_prompt: str, + seed: int, + resolution: str, + duration: int, + auth_token: Optional[str] = None, + ) -> tuple[VideoFromFile]: + + pika_files = [ + ( + "keyFrames", + ("image_start.png", tensor_to_bytesio(image_start), "image/png"), + ), + ("keyFrames", ("image_end.png", tensor_to_bytesio(image_end), "image/png")), + ] + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_PIKAFRAMES, + method=HttpMethod.POST, + request_model=PikaBodyGenerate22KeyframeGenerate22PikaframesPost, + response_model=PikaGenerateResponse, + ), + request=PikaBodyGenerate22KeyframeGenerate22PikaframesPost( + promptText=prompt_text, + negativePrompt=negative_prompt, + seed=seed, + resolution=resolution, + duration=duration, + ), + files=pika_files, + content_type="multipart/form-data", + auth_token=auth_token, + ) + + return self.execute_task(initial_operation, auth_token) + + +NODE_CLASS_MAPPINGS = { + "PikaImageToVideoNode2_2": PikaImageToVideoV2_2, + "PikaTextToVideoNode2_2": PikaTextToVideoNodeV2_2, + "PikaScenesV2_2": PikaScenesV2_2, + "Pikadditions": PikAdditionsNode, + "Pikaswaps": PikaSwapsNode, + "Pikaffects": PikaffectsNode, + "PikaStartEndFrameNode2_2": PikaStartEndFrameNode2_2, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "PikaImageToVideoNode2_2": "Pika Image to Video", + "PikaTextToVideoNode2_2": "Pika Text to Video", + "PikaScenesV2_2": "Pika Scenes (Video Image Composition)", + "Pikadditions": "Pikadditions (Video Object Insertion)", + "Pikaswaps": "Pika Swaps (Video Object Replacement)", + "Pikaffects": "Pikaffects (Video Effects)", + "PikaStartEndFrameNode2_2": "Pika Start and End Frame to Video", +} diff --git a/comfy_api_nodes/nodes_pixverse.py b/comfy_api_nodes/nodes_pixverse.py new file mode 100644 index 000000000..dbb90c1dd --- /dev/null +++ b/comfy_api_nodes/nodes_pixverse.py @@ -0,0 +1,492 @@ +from inspect import cleandoc + +from comfy_api_nodes.apis.pixverse_api import ( + PixverseTextVideoRequest, + PixverseImageVideoRequest, + PixverseTransitionVideoRequest, + PixverseImageUploadResponse, + PixverseVideoResponse, + PixverseGenerationStatusResponse, + PixverseAspectRatio, + PixverseQuality, + PixverseDuration, + PixverseMotionMode, + PixverseStatus, + PixverseIO, + pixverse_templates, +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, + EmptyRequest, +) +from comfy_api_nodes.apinode_utils import ( + tensor_to_bytesio, + validate_string, +) +from comfy.comfy_types.node_typing import IO, ComfyNodeABC +from comfy_api.input_impl import VideoFromFile + +import torch +import requests +from io import BytesIO + + +def upload_image_to_pixverse(image: torch.Tensor, auth_token=None): + # first, upload image to Pixverse and get image id to use in actual generation call + files = { + "image": tensor_to_bytesio(image) + } + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/pixverse/image/upload", + method=HttpMethod.POST, + request_model=EmptyRequest, + response_model=PixverseImageUploadResponse, + ), + request=EmptyRequest(), + files=files, + content_type="multipart/form-data", + auth_token=auth_token, + ) + response_upload: PixverseImageUploadResponse = operation.execute() + + if response_upload.Resp is None: + raise Exception(f"PixVerse image upload request failed: '{response_upload.ErrMsg}'") + + return response_upload.Resp.img_id + + +class PixverseTemplateNode: + """ + Select template for PixVerse Video generation. + """ + + RETURN_TYPES = (PixverseIO.TEMPLATE,) + RETURN_NAMES = ("pixverse_template",) + FUNCTION = "create_template" + CATEGORY = "api node/video/PixVerse" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "template": (list(pixverse_templates.keys()), ), + } + } + + def create_template(self, template: str): + template_id = pixverse_templates.get(template, None) + if template_id is None: + raise Exception(f"Template '{template}' is not recognized.") + # just return the integer + return (template_id,) + + +class PixverseTextToVideoNode(ComfyNodeABC): + """ + Generates videos synchronously based on prompt and output_size. + """ + + RETURN_TYPES = (IO.VIDEO,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/video/PixVerse" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the video generation", + }, + ), + "aspect_ratio": ( + [ratio.value for ratio in PixverseAspectRatio], + ), + "quality": ( + [resolution.value for resolution in PixverseQuality], + { + "default": PixverseQuality.res_540p, + }, + ), + "duration_seconds": ([dur.value for dur in PixverseDuration],), + "motion_mode": ([mode.value for mode in PixverseMotionMode],), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2147483647, + "control_after_generate": True, + "tooltip": "Seed for video generation.", + }, + ), + }, + "optional": { + "negative_prompt": ( + IO.STRING, + { + "default": "", + "forceInput": True, + "tooltip": "An optional text description of undesired elements on an image.", + }, + ), + "pixverse_template": ( + PixverseIO.TEMPLATE, + { + "tooltip": "An optional template to influence style of generation, created by the PixVerse Template node." + } + ) + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + prompt: str, + aspect_ratio: str, + quality: str, + duration_seconds: int, + motion_mode: str, + seed, + negative_prompt: str=None, + pixverse_template: int=None, + auth_token=None, + **kwargs, + ): + validate_string(prompt, strip_whitespace=False) + # 1080p is limited to 5 seconds duration + # only normal motion_mode supported for 1080p or for non-5 second duration + if quality == PixverseQuality.res_1080p: + motion_mode = PixverseMotionMode.normal + duration_seconds = PixverseDuration.dur_5 + elif duration_seconds != PixverseDuration.dur_5: + motion_mode = PixverseMotionMode.normal + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/pixverse/video/text/generate", + method=HttpMethod.POST, + request_model=PixverseTextVideoRequest, + response_model=PixverseVideoResponse, + ), + request=PixverseTextVideoRequest( + prompt=prompt, + aspect_ratio=aspect_ratio, + quality=quality, + duration=duration_seconds, + motion_mode=motion_mode, + negative_prompt=negative_prompt if negative_prompt else None, + template_id=pixverse_template, + seed=seed, + ), + auth_token=auth_token, + ) + response_api = operation.execute() + + if response_api.Resp is None: + raise Exception(f"PixVerse request failed: '{response_api.ErrMsg}'") + + operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"/proxy/pixverse/video/result/{response_api.Resp.video_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=PixverseGenerationStatusResponse, + ), + completed_statuses=[PixverseStatus.successful], + failed_statuses=[PixverseStatus.contents_moderation, PixverseStatus.failed, PixverseStatus.deleted], + status_extractor=lambda x: x.Resp.status, + auth_token=auth_token, + ) + response_poll = operation.execute() + + vid_response = requests.get(response_poll.Resp.url) + return (VideoFromFile(BytesIO(vid_response.content)),) + + +class PixverseImageToVideoNode(ComfyNodeABC): + """ + Generates videos synchronously based on prompt and output_size. + """ + + RETURN_TYPES = (IO.VIDEO,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/video/PixVerse" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ( + IO.IMAGE, + ), + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the video generation", + }, + ), + "quality": ( + [resolution.value for resolution in PixverseQuality], + { + "default": PixverseQuality.res_540p, + }, + ), + "duration_seconds": ([dur.value for dur in PixverseDuration],), + "motion_mode": ([mode.value for mode in PixverseMotionMode],), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2147483647, + "control_after_generate": True, + "tooltip": "Seed for video generation.", + }, + ), + }, + "optional": { + "negative_prompt": ( + IO.STRING, + { + "default": "", + "forceInput": True, + "tooltip": "An optional text description of undesired elements on an image.", + }, + ), + "pixverse_template": ( + PixverseIO.TEMPLATE, + { + "tooltip": "An optional template to influence style of generation, created by the PixVerse Template node." + } + ) + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + image: torch.Tensor, + prompt: str, + quality: str, + duration_seconds: int, + motion_mode: str, + seed, + negative_prompt: str=None, + pixverse_template: int=None, + auth_token=None, + **kwargs, + ): + validate_string(prompt, strip_whitespace=False) + img_id = upload_image_to_pixverse(image, auth_token=auth_token) + + # 1080p is limited to 5 seconds duration + # only normal motion_mode supported for 1080p or for non-5 second duration + if quality == PixverseQuality.res_1080p: + motion_mode = PixverseMotionMode.normal + duration_seconds = PixverseDuration.dur_5 + elif duration_seconds != PixverseDuration.dur_5: + motion_mode = PixverseMotionMode.normal + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/pixverse/video/img/generate", + method=HttpMethod.POST, + request_model=PixverseImageVideoRequest, + response_model=PixverseVideoResponse, + ), + request=PixverseImageVideoRequest( + img_id=img_id, + prompt=prompt, + quality=quality, + duration=duration_seconds, + motion_mode=motion_mode, + negative_prompt=negative_prompt if negative_prompt else None, + template_id=pixverse_template, + seed=seed, + ), + auth_token=auth_token, + ) + response_api = operation.execute() + + if response_api.Resp is None: + raise Exception(f"PixVerse request failed: '{response_api.ErrMsg}'") + + operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"/proxy/pixverse/video/result/{response_api.Resp.video_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=PixverseGenerationStatusResponse, + ), + completed_statuses=[PixverseStatus.successful], + failed_statuses=[PixverseStatus.contents_moderation, PixverseStatus.failed, PixverseStatus.deleted], + status_extractor=lambda x: x.Resp.status, + auth_token=auth_token, + ) + response_poll = operation.execute() + + vid_response = requests.get(response_poll.Resp.url) + return (VideoFromFile(BytesIO(vid_response.content)),) + + +class PixverseTransitionVideoNode(ComfyNodeABC): + """ + Generates videos synchronously based on prompt and output_size. + """ + + RETURN_TYPES = (IO.VIDEO,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/video/PixVerse" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "first_frame": ( + IO.IMAGE, + ), + "last_frame": ( + IO.IMAGE, + ), + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the video generation", + }, + ), + "quality": ( + [resolution.value for resolution in PixverseQuality], + { + "default": PixverseQuality.res_540p, + }, + ), + "duration_seconds": ([dur.value for dur in PixverseDuration],), + "motion_mode": ([mode.value for mode in PixverseMotionMode],), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 2147483647, + "control_after_generate": True, + "tooltip": "Seed for video generation.", + }, + ), + }, + "optional": { + "negative_prompt": ( + IO.STRING, + { + "default": "", + "forceInput": True, + "tooltip": "An optional text description of undesired elements on an image.", + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + first_frame: torch.Tensor, + last_frame: torch.Tensor, + prompt: str, + quality: str, + duration_seconds: int, + motion_mode: str, + seed, + negative_prompt: str=None, + auth_token=None, + **kwargs, + ): + validate_string(prompt, strip_whitespace=False) + first_frame_id = upload_image_to_pixverse(first_frame, auth_token=auth_token) + last_frame_id = upload_image_to_pixverse(last_frame, auth_token=auth_token) + + # 1080p is limited to 5 seconds duration + # only normal motion_mode supported for 1080p or for non-5 second duration + if quality == PixverseQuality.res_1080p: + motion_mode = PixverseMotionMode.normal + duration_seconds = PixverseDuration.dur_5 + elif duration_seconds != PixverseDuration.dur_5: + motion_mode = PixverseMotionMode.normal + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/pixverse/video/transition/generate", + method=HttpMethod.POST, + request_model=PixverseTransitionVideoRequest, + response_model=PixverseVideoResponse, + ), + request=PixverseTransitionVideoRequest( + first_frame_img=first_frame_id, + last_frame_img=last_frame_id, + prompt=prompt, + quality=quality, + duration=duration_seconds, + motion_mode=motion_mode, + negative_prompt=negative_prompt if negative_prompt else None, + seed=seed, + ), + auth_token=auth_token, + ) + response_api = operation.execute() + + if response_api.Resp is None: + raise Exception(f"PixVerse request failed: '{response_api.ErrMsg}'") + + operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"/proxy/pixverse/video/result/{response_api.Resp.video_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=PixverseGenerationStatusResponse, + ), + completed_statuses=[PixverseStatus.successful], + failed_statuses=[PixverseStatus.contents_moderation, PixverseStatus.failed, PixverseStatus.deleted], + status_extractor=lambda x: x.Resp.status, + auth_token=auth_token, + ) + response_poll = operation.execute() + + vid_response = requests.get(response_poll.Resp.url) + return (VideoFromFile(BytesIO(vid_response.content)),) + + +NODE_CLASS_MAPPINGS = { + "PixverseTextToVideoNode": PixverseTextToVideoNode, + "PixverseImageToVideoNode": PixverseImageToVideoNode, + "PixverseTransitionVideoNode": PixverseTransitionVideoNode, + "PixverseTemplateNode": PixverseTemplateNode, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "PixverseTextToVideoNode": "PixVerse Text to Video", + "PixverseImageToVideoNode": "PixVerse Image to Video", + "PixverseTransitionVideoNode": "PixVerse Transition Video", + "PixverseTemplateNode": "PixVerse Template", +} diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py new file mode 100644 index 000000000..994f377d1 --- /dev/null +++ b/comfy_api_nodes/nodes_recraft.py @@ -0,0 +1,1217 @@ +from __future__ import annotations +from inspect import cleandoc +from comfy.utils import ProgressBar +from comfy.comfy_types.node_typing import IO +from comfy_api_nodes.apis.recraft_api import ( + RecraftImageGenerationRequest, + RecraftImageGenerationResponse, + RecraftImageSize, + RecraftModel, + RecraftStyle, + RecraftStyleV3, + RecraftColor, + RecraftColorChain, + RecraftControls, + RecraftIO, + get_v3_substyles, +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + EmptyRequest, +) +from comfy_api_nodes.apinode_utils import ( + bytesio_to_image_tensor, + download_url_to_bytesio, + tensor_to_bytesio, + resize_mask_to_image, + validate_string, +) +import folder_paths +import json +import os +import torch +from io import BytesIO +from PIL import UnidentifiedImageError + + +def handle_recraft_file_request( + image: torch.Tensor, + path: str, + mask: torch.Tensor=None, + total_pixels=4096*4096, + timeout=1024, + request=None, + auth_token=None + ) -> list[BytesIO]: + """ + Handle sending common Recraft file-only request to get back file bytes. + """ + if request is None: + request = EmptyRequest() + + files = { + 'image': tensor_to_bytesio(image, total_pixels=total_pixels).read() + } + if mask is not None: + files['mask'] = tensor_to_bytesio(mask, total_pixels=total_pixels).read() + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=type(request), + response_model=RecraftImageGenerationResponse, + ), + request=request, + files=files, + content_type="multipart/form-data", + auth_token=auth_token, + multipart_parser=recraft_multipart_parser, + ) + response: RecraftImageGenerationResponse = operation.execute() + all_bytesio = [] + if response.image is not None: + all_bytesio.append(download_url_to_bytesio(response.image.url, timeout=timeout)) + else: + for data in response.data: + all_bytesio.append(download_url_to_bytesio(data.url, timeout=timeout)) + + return all_bytesio + + +def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, converted_to_check: list[list]=None, is_list=False) -> dict: + """ + Formats data such that multipart/form-data will work with requests library + when both files and data are present. + + The OpenAI client that Recraft uses has a bizarre way of serializing lists: + + It does NOT keep track of indeces of each list, so for background_color, that must be serialized as: + 'background_color[rgb][]' = [0, 0, 255] + where the array is assigned to a key that has '[]' at the end, to signal it's an array. + + This has the consequence of nested lists having the exact same key, forcing arrays to merge; all colors inputs fall under the same key: + if 1 color -> 'controls[colors][][rgb][]' = [0, 0, 255] + if 2 colors -> 'controls[colors][][rgb][]' = [0, 0, 255, 255, 0, 0] + if 3 colors -> 'controls[colors][][rgb][]' = [0, 0, 255, 255, 0, 0, 0, 255, 0] + etc. + Whoever made this serialization up at OpenAI added the constraint that lists must be of uniform length on objects of same 'type'. + """ + # Modification of a function that handled a different type of multipart parsing, big ups: + # https://gist.github.com/kazqvaizer/4cebebe5db654a414132809f9f88067b + + def handle_converted_lists(data, parent_key, lists_to_check=tuple[list]): + # if list already exists exists, just extend list with data + for check_list in lists_to_check: + for conv_tuple in check_list: + if conv_tuple[0] == parent_key and type(conv_tuple[1]) is list: + conv_tuple[1].append(formatter(data)) + return True + return False + + if converted_to_check is None: + converted_to_check = [] + + + if formatter is None: + formatter = lambda v: v # Multipart representation of value + + if type(data) is not dict: + # if list already exists exists, just extend list with data + added = handle_converted_lists(data, parent_key, converted_to_check) + if added: + return {} + # otherwise if is_list, create new list with data + if is_list: + return {parent_key: [formatter(data)]} + # return new key with data + return {parent_key: formatter(data)} + + converted = [] + next_check = [converted] + next_check.extend(converted_to_check) + + for key, value in data.items(): + current_key = key if parent_key is None else f"{parent_key}[{key}]" + if type(value) is dict: + converted.extend(recraft_multipart_parser(value, current_key, formatter, next_check).items()) + elif type(value) is list: + for ind, list_value in enumerate(value): + iter_key = f"{current_key}[]" + converted.extend(recraft_multipart_parser(list_value, iter_key, formatter, next_check, is_list=True).items()) + else: + converted.append((current_key, formatter(value))) + + return dict(converted) + + +class handle_recraft_image_output: + """ + Catch an exception related to receiving SVG data instead of image, when Infinite Style Library style_id is in use. + """ + def __init__(self): + pass + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None and exc_type is UnidentifiedImageError: + raise Exception("Received output data was not an image; likely an SVG. If you used style_id, make sure it is not a Vector art style.") + + +class SVG: + """ + Stores SVG representations via a list of BytesIO objects. + """ + def __init__(self, data: list[BytesIO]): + self.data = data + + def combine(self, other: SVG): + return SVG(self.data + other.data) + + @staticmethod + def combine_all(svgs: list[SVG]): + all_svgs = [] + for svg in svgs: + all_svgs.extend(svg.data) + return SVG(all_svgs) + + +class SaveSVGNode: + """ + Save SVG files on disk. + """ + + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + + RETURN_TYPES = () + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "save_svg" + CATEGORY = "api node/image/Recraft" + OUTPUT_NODE = True + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "svg": (RecraftIO.SVG,), + "filename_prefix": ("STRING", {"default": "svg/ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}) + }, + "hidden": { + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO" + } + } + + def save_svg(self, svg: SVG, filename_prefix="svg/ComfyUI", prompt=None, extra_pnginfo=None): + filename_prefix += self.prefix_append + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + results = list() + + # Prepare metadata JSON + metadata_dict = {} + if prompt is not None: + metadata_dict["prompt"] = prompt + if extra_pnginfo is not None: + metadata_dict.update(extra_pnginfo) + + # Convert metadata to JSON string + metadata_json = json.dumps(metadata_dict, indent=2) if metadata_dict else None + + for batch_number, svg_bytes in enumerate(svg.data): + filename_with_batch_num = filename.replace("%batch_num%", str(batch_number)) + file = f"{filename_with_batch_num}_{counter:05}_.svg" + + # Read SVG content + svg_bytes.seek(0) + svg_content = svg_bytes.read().decode('utf-8') + + # Inject metadata if available + if metadata_json: + # Create metadata element with CDATA section + metadata_element = f""" + + +""" + # Insert metadata after opening svg tag using regex + import re + svg_content = re.sub(r'(]*>)', r'\1\n' + metadata_element, svg_content) + + # Write the modified SVG to file + with open(os.path.join(full_output_folder, file), 'wb') as svg_file: + svg_file.write(svg_content.encode('utf-8')) + + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + counter += 1 + return { "ui": { "images": results } } + + +class RecraftColorRGBNode: + """ + Create Recraft Color by choosing specific RGB values. + """ + + RETURN_TYPES = (RecraftIO.COLOR,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + RETURN_NAMES = ("recraft_color",) + FUNCTION = "create_color" + CATEGORY = "api node/image/Recraft" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "r": (IO.INT, { + "default": 0, + "min": 0, + "max": 255, + "tooltip": "Red value of color." + }), + "g": (IO.INT, { + "default": 0, + "min": 0, + "max": 255, + "tooltip": "Green value of color." + }), + "b": (IO.INT, { + "default": 0, + "min": 0, + "max": 255, + "tooltip": "Blue value of color." + }), + }, + "optional": { + "recraft_color": (RecraftIO.COLOR,), + } + } + + def create_color(self, r: int, g: int, b: int, recraft_color: RecraftColorChain=None): + recraft_color = recraft_color.clone() if recraft_color else RecraftColorChain() + recraft_color.add(RecraftColor(r, g, b)) + return (recraft_color, ) + + +class RecraftControlsNode: + """ + Create Recraft Controls for customizing Recraft generation. + """ + + RETURN_TYPES = (RecraftIO.CONTROLS,) + RETURN_NAMES = ("recraft_controls",) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "create_controls" + CATEGORY = "api node/image/Recraft" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + }, + "optional": { + "colors": (RecraftIO.COLOR,), + "background_color": (RecraftIO.COLOR,), + } + } + + def create_controls(self, colors: RecraftColorChain=None, background_color: RecraftColorChain=None): + return (RecraftControls(colors=colors, background_color=background_color), ) + + +class RecraftStyleV3RealisticImageNode: + """ + Select realistic_image style and optional substyle. + """ + + RETURN_TYPES = (RecraftIO.STYLEV3,) + RETURN_NAMES = ("recraft_style",) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "create_style" + CATEGORY = "api node/image/Recraft" + + RECRAFT_STYLE = RecraftStyleV3.realistic_image + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "substyle": (get_v3_substyles(s.RECRAFT_STYLE),), + } + } + + def create_style(self, substyle: str): + if substyle == "None": + substyle = None + return (RecraftStyle(self.RECRAFT_STYLE, substyle),) + + +class RecraftStyleV3DigitalIllustrationNode(RecraftStyleV3RealisticImageNode): + """ + Select digital_illustration style and optional substyle. + """ + + RECRAFT_STYLE = RecraftStyleV3.digital_illustration + + +class RecraftStyleV3VectorIllustrationNode(RecraftStyleV3RealisticImageNode): + """ + Select vector_illustration style and optional substyle. + """ + + RECRAFT_STYLE = RecraftStyleV3.vector_illustration + + +class RecraftStyleV3LogoRasterNode(RecraftStyleV3RealisticImageNode): + """ + Select vector_illustration style and optional substyle. + """ + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "substyle": (get_v3_substyles(s.RECRAFT_STYLE, include_none=False),), + } + } + + RECRAFT_STYLE = RecraftStyleV3.logo_raster + + +class RecraftStyleInfiniteStyleLibrary: + """ + Select style based on preexisting UUID from Recraft's Infinite Style Library. + """ + + RETURN_TYPES = (RecraftIO.STYLEV3,) + RETURN_NAMES = ("recraft_style",) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "create_style" + CATEGORY = "api node/image/Recraft" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "style_id": (IO.STRING, { + "default": "", + "tooltip": "UUID of style from Infinite Style Library.", + }) + } + } + + def create_style(self, style_id: str): + if not style_id: + raise Exception("The style_id input cannot be empty.") + return (RecraftStyle(style_id=style_id),) + + +class RecraftTextToImageNode: + """ + Generates images synchronously based on prompt and resolution. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Recraft" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation.", + }, + ), + "size": ( + [res.value for res in RecraftImageSize], + { + "default": RecraftImageSize.res_1024x1024, + "tooltip": "The size of the generated image.", + }, + ), + "n": ( + IO.INT, + { + "default": 1, + "min": 1, + "max": 6, + "tooltip": "The number of images to generate.", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + }, + ), + }, + "optional": { + "recraft_style": (RecraftIO.STYLEV3,), + "negative_prompt": ( + IO.STRING, + { + "default": "", + "forceInput": True, + "tooltip": "An optional text description of undesired elements on an image.", + }, + ), + "recraft_controls": ( + RecraftIO.CONTROLS, + { + "tooltip": "Optional additional controls over the generation via the Recraft Controls node." + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + prompt: str, + size: str, + n: int, + seed, + recraft_style: RecraftStyle = None, + negative_prompt: str = None, + recraft_controls: RecraftControls = None, + auth_token=None, + **kwargs, + ): + validate_string(prompt, strip_whitespace=False, max_length=1000) + default_style = RecraftStyle(RecraftStyleV3.realistic_image) + if recraft_style is None: + recraft_style = default_style + + controls_api = None + if recraft_controls: + controls_api = recraft_controls.create_api_model() + + if not negative_prompt: + negative_prompt = None + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/recraft/image_generation", + method=HttpMethod.POST, + request_model=RecraftImageGenerationRequest, + response_model=RecraftImageGenerationResponse, + ), + request=RecraftImageGenerationRequest( + prompt=prompt, + negative_prompt=negative_prompt, + model=RecraftModel.recraftv3, + size=size, + n=n, + style=recraft_style.style, + substyle=recraft_style.substyle, + style_id=recraft_style.style_id, + controls=controls_api, + ), + auth_token=auth_token, + ) + response: RecraftImageGenerationResponse = operation.execute() + images = [] + for data in response.data: + with handle_recraft_image_output(): + image = bytesio_to_image_tensor( + download_url_to_bytesio(data.url, timeout=1024) + ) + if len(image.shape) < 4: + image = image.unsqueeze(0) + images.append(image) + output_image = torch.cat(images, dim=0) + + return (output_image,) + + +class RecraftImageToImageNode: + """ + Modify image based on prompt and strength. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Recraft" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE, ), + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation.", + }, + ), + "n": ( + IO.INT, + { + "default": 1, + "min": 1, + "max": 6, + "tooltip": "The number of images to generate.", + }, + ), + "strength": ( + IO.FLOAT, + { + "default": 0.5, + "min": 0.0, + "max": 1.0, + "step": 0.01, + "tooltip": "Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity." + } + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + }, + ), + }, + "optional": { + "recraft_style": (RecraftIO.STYLEV3,), + "negative_prompt": ( + IO.STRING, + { + "default": "", + "forceInput": True, + "tooltip": "An optional text description of undesired elements on an image.", + }, + ), + "recraft_controls": ( + RecraftIO.CONTROLS, + { + "tooltip": "Optional additional controls over the generation via the Recraft Controls node." + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + image: torch.Tensor, + prompt: str, + n: int, + strength: float, + seed, + auth_token=None, + recraft_style: RecraftStyle = None, + negative_prompt: str = None, + recraft_controls: RecraftControls = None, + **kwargs, + ): + validate_string(prompt, strip_whitespace=False, max_length=1000) + default_style = RecraftStyle(RecraftStyleV3.realistic_image) + if recraft_style is None: + recraft_style = default_style + + controls_api = None + if recraft_controls: + controls_api = recraft_controls.create_api_model() + + if not negative_prompt: + negative_prompt = None + + request = RecraftImageGenerationRequest( + prompt=prompt, + negative_prompt=negative_prompt, + model=RecraftModel.recraftv3, + n=n, + strength=round(strength, 2), + style=recraft_style.style, + substyle=recraft_style.substyle, + style_id=recraft_style.style_id, + controls=controls_api, + ) + + images = [] + total = image.shape[0] + pbar = ProgressBar(total) + for i in range(total): + sub_bytes = handle_recraft_file_request( + image=image[i], + path="/proxy/recraft/images/imageToImage", + request=request, + auth_token=auth_token, + ) + with handle_recraft_image_output(): + images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) + pbar.update(1) + + images_tensor = torch.cat(images, dim=0) + return (images_tensor, ) + + +class RecraftImageInpaintingNode: + """ + Modify image based on prompt and mask. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Recraft" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE, ), + "mask": (IO.MASK, ), + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation.", + }, + ), + "n": ( + IO.INT, + { + "default": 1, + "min": 1, + "max": 6, + "tooltip": "The number of images to generate.", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + }, + ), + }, + "optional": { + "recraft_style": (RecraftIO.STYLEV3,), + "negative_prompt": ( + IO.STRING, + { + "default": "", + "forceInput": True, + "tooltip": "An optional text description of undesired elements on an image.", + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + image: torch.Tensor, + mask: torch.Tensor, + prompt: str, + n: int, + seed, + auth_token=None, + recraft_style: RecraftStyle = None, + negative_prompt: str = None, + **kwargs, + ): + validate_string(prompt, strip_whitespace=False, max_length=1000) + default_style = RecraftStyle(RecraftStyleV3.realistic_image) + if recraft_style is None: + recraft_style = default_style + + if not negative_prompt: + negative_prompt = None + + request = RecraftImageGenerationRequest( + prompt=prompt, + negative_prompt=negative_prompt, + model=RecraftModel.recraftv3, + n=n, + style=recraft_style.style, + substyle=recraft_style.substyle, + style_id=recraft_style.style_id, + ) + + # prepare mask tensor + mask = resize_mask_to_image(mask, image, allow_gradient=False, add_channel_dim=True) + + images = [] + total = image.shape[0] + pbar = ProgressBar(total) + for i in range(total): + sub_bytes = handle_recraft_file_request( + image=image[i], + mask=mask[i:i+1], + path="/proxy/recraft/images/inpaint", + request=request, + auth_token=auth_token, + ) + with handle_recraft_image_output(): + images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) + pbar.update(1) + + images_tensor = torch.cat(images, dim=0) + return (images_tensor, ) + + +class RecraftTextToVectorNode: + """ + Generates SVG synchronously based on prompt and resolution. + """ + + RETURN_TYPES = (RecraftIO.SVG,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Recraft" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation.", + }, + ), + "substyle": (get_v3_substyles(RecraftStyleV3.vector_illustration),), + "size": ( + [res.value for res in RecraftImageSize], + { + "default": RecraftImageSize.res_1024x1024, + "tooltip": "The size of the generated image.", + }, + ), + "n": ( + IO.INT, + { + "default": 1, + "min": 1, + "max": 6, + "tooltip": "The number of images to generate.", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + }, + ), + }, + "optional": { + "negative_prompt": ( + IO.STRING, + { + "default": "", + "forceInput": True, + "tooltip": "An optional text description of undesired elements on an image.", + }, + ), + "recraft_controls": ( + RecraftIO.CONTROLS, + { + "tooltip": "Optional additional controls over the generation via the Recraft Controls node." + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + prompt: str, + substyle: str, + size: str, + n: int, + seed, + negative_prompt: str = None, + recraft_controls: RecraftControls = None, + auth_token=None, + **kwargs, + ): + validate_string(prompt, strip_whitespace=False, max_length=1000) + # create RecraftStyle so strings will be formatted properly (i.e. "None" will become None) + recraft_style = RecraftStyle(RecraftStyleV3.vector_illustration, substyle=substyle) + + controls_api = None + if recraft_controls: + controls_api = recraft_controls.create_api_model() + + if not negative_prompt: + negative_prompt = None + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/recraft/image_generation", + method=HttpMethod.POST, + request_model=RecraftImageGenerationRequest, + response_model=RecraftImageGenerationResponse, + ), + request=RecraftImageGenerationRequest( + prompt=prompt, + negative_prompt=negative_prompt, + model=RecraftModel.recraftv3, + size=size, + n=n, + style=recraft_style.style, + substyle=recraft_style.substyle, + controls=controls_api, + ), + auth_token=auth_token, + ) + response: RecraftImageGenerationResponse = operation.execute() + svg_data = [] + for data in response.data: + svg_data.append(download_url_to_bytesio(data.url, timeout=1024)) + + return (SVG(svg_data),) + + +class RecraftVectorizeImageNode: + """ + Generates SVG synchronously from an input image. + """ + + RETURN_TYPES = (RecraftIO.SVG,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Recraft" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE, ), + }, + "optional": { + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + image: torch.Tensor, + auth_token=None, + **kwargs, + ): + svgs = [] + total = image.shape[0] + pbar = ProgressBar(total) + for i in range(total): + sub_bytes = handle_recraft_file_request( + image=image[i], + path="/proxy/recraft/images/vectorize", + auth_token=auth_token, + ) + svgs.append(SVG(sub_bytes)) + pbar.update(1) + + return (SVG.combine_all(svgs), ) + + +class RecraftReplaceBackgroundNode: + """ + Replace background on image, based on provided prompt. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Recraft" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE, ), + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation.", + }, + ), + "n": ( + IO.INT, + { + "default": 1, + "min": 1, + "max": 6, + "tooltip": "The number of images to generate.", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + }, + ), + }, + "optional": { + "recraft_style": (RecraftIO.STYLEV3,), + "negative_prompt": ( + IO.STRING, + { + "default": "", + "forceInput": True, + "tooltip": "An optional text description of undesired elements on an image.", + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + image: torch.Tensor, + prompt: str, + n: int, + seed, + auth_token=None, + recraft_style: RecraftStyle = None, + negative_prompt: str = None, + **kwargs, + ): + default_style = RecraftStyle(RecraftStyleV3.realistic_image) + if recraft_style is None: + recraft_style = default_style + + if not negative_prompt: + negative_prompt = None + + request = RecraftImageGenerationRequest( + prompt=prompt, + negative_prompt=negative_prompt, + model=RecraftModel.recraftv3, + n=n, + style=recraft_style.style, + substyle=recraft_style.substyle, + style_id=recraft_style.style_id, + ) + + images = [] + total = image.shape[0] + pbar = ProgressBar(total) + for i in range(total): + sub_bytes = handle_recraft_file_request( + image=image[i], + path="/proxy/recraft/images/replaceBackground", + request=request, + auth_token=auth_token, + ) + images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) + pbar.update(1) + + images_tensor = torch.cat(images, dim=0) + return (images_tensor, ) + + +class RecraftRemoveBackgroundNode: + """ + Remove background from image, and return processed image and mask. + """ + + RETURN_TYPES = (IO.IMAGE, IO.MASK) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Recraft" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE, ), + }, + "optional": { + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + image: torch.Tensor, + auth_token=None, + **kwargs, + ): + images = [] + total = image.shape[0] + pbar = ProgressBar(total) + for i in range(total): + sub_bytes = handle_recraft_file_request( + image=image[i], + path="/proxy/recraft/images/removeBackground", + auth_token=auth_token, + ) + images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) + pbar.update(1) + + images_tensor = torch.cat(images, dim=0) + # use alpha channel as masks, in B,H,W format + masks_tensor = images_tensor[:,:,:,-1:].squeeze(-1) + return (images_tensor, masks_tensor) + + +class RecraftCrispUpscaleNode: + """ + Upscale image synchronously. + Enhances a given raster image using ‘crisp upscale’ tool, increasing image resolution, making the image sharper and cleaner. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Recraft" + + RECRAFT_PATH = "/proxy/recraft/images/crispUpscale" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE, ), + }, + "optional": { + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call( + self, + image: torch.Tensor, + auth_token=None, + **kwargs, + ): + images = [] + total = image.shape[0] + pbar = ProgressBar(total) + for i in range(total): + sub_bytes = handle_recraft_file_request( + image=image[i], + path=self.RECRAFT_PATH, + auth_token=auth_token, + ) + images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) + pbar.update(1) + + images_tensor = torch.cat(images, dim=0) + return (images_tensor,) + + +class RecraftCreativeUpscaleNode(RecraftCrispUpscaleNode): + """ + Upscale image synchronously. + Enhances a given raster image using ‘creative upscale’ tool, boosting resolution with a focus on refining small details and faces. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Recraft" + + RECRAFT_PATH = "/proxy/recraft/images/creativeUpscale" + + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "RecraftTextToImageNode": RecraftTextToImageNode, + "RecraftImageToImageNode": RecraftImageToImageNode, + "RecraftImageInpaintingNode": RecraftImageInpaintingNode, + "RecraftTextToVectorNode": RecraftTextToVectorNode, + "RecraftVectorizeImageNode": RecraftVectorizeImageNode, + "RecraftRemoveBackgroundNode": RecraftRemoveBackgroundNode, + "RecraftReplaceBackgroundNode": RecraftReplaceBackgroundNode, + "RecraftCrispUpscaleNode": RecraftCrispUpscaleNode, + "RecraftCreativeUpscaleNode": RecraftCreativeUpscaleNode, + "RecraftStyleV3RealisticImage": RecraftStyleV3RealisticImageNode, + "RecraftStyleV3DigitalIllustration": RecraftStyleV3DigitalIllustrationNode, + "RecraftStyleV3LogoRaster": RecraftStyleV3LogoRasterNode, + "RecraftStyleV3InfiniteStyleLibrary": RecraftStyleInfiniteStyleLibrary, + "RecraftColorRGB": RecraftColorRGBNode, + "RecraftControls": RecraftControlsNode, + "SaveSVG": SaveSVGNode, +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "RecraftTextToImageNode": "Recraft Text to Image", + "RecraftImageToImageNode": "Recraft Image to Image", + "RecraftImageInpaintingNode": "Recraft Image Inpainting", + "RecraftTextToVectorNode": "Recraft Text to Vector", + "RecraftVectorizeImageNode": "Recraft Vectorize Image", + "RecraftRemoveBackgroundNode": "Recraft Remove Background", + "RecraftReplaceBackgroundNode": "Recraft Replace Background", + "RecraftCrispUpscaleNode": "Recraft Crisp Upscale Image", + "RecraftCreativeUpscaleNode": "Recraft Creative Upscale Image", + "RecraftStyleV3RealisticImage": "Recraft Style - Realistic Image", + "RecraftStyleV3DigitalIllustration": "Recraft Style - Digital Illustration", + "RecraftStyleV3LogoRaster": "Recraft Style - Logo Raster", + "RecraftStyleV3InfiniteStyleLibrary": "Recraft Style - Infinite Style Library", + "RecraftColorRGB": "Recraft Color RGB", + "RecraftControls": "Recraft Controls", + "SaveSVG": "Save SVG", +} diff --git a/comfy_api_nodes/nodes_stability.py b/comfy_api_nodes/nodes_stability.py new file mode 100644 index 000000000..52fe2417c --- /dev/null +++ b/comfy_api_nodes/nodes_stability.py @@ -0,0 +1,609 @@ +from inspect import cleandoc +from comfy.comfy_types.node_typing import IO +from comfy_api_nodes.apis.stability_api import ( + StabilityUpscaleConservativeRequest, + StabilityUpscaleCreativeRequest, + StabilityAsyncResponse, + StabilityResultsGetResponse, + StabilityStable3_5Request, + StabilityStableUltraRequest, + StabilityStableUltraResponse, + StabilityAspectRatio, + Stability_SD3_5_Model, + Stability_SD3_5_GenerationMode, + get_stability_style_presets, +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, + EmptyRequest, +) +from comfy_api_nodes.apinode_utils import ( + bytesio_to_image_tensor, + tensor_to_bytesio, + validate_string, +) + +import torch +import base64 +from io import BytesIO +from enum import Enum + + +class StabilityPollStatus(str, Enum): + finished = "finished" + in_progress = "in_progress" + failed = "failed" + + +def get_async_dummy_status(x: StabilityResultsGetResponse): + if x.name is not None or x.errors is not None: + return StabilityPollStatus.failed + elif x.finish_reason is not None: + return StabilityPollStatus.finished + return StabilityPollStatus.in_progress + + +class StabilityStableImageUltraNode: + """ + Generates images synchronously based on prompt and resolution. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Stability AI" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines" + + "What you wish to see in the output image. A strong, descriptive prompt that clearly defines" + + "elements, colors, and subjects will lead to better results. " + + "To control the weight of a given word use the format `(word:weight)`," + + "where `word` is the word you'd like to control the weight of and `weight`" + + "is a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`" + + "would convey a sky that was blue and green, but more green than blue." + }, + ), + "aspect_ratio": ([x.value for x in StabilityAspectRatio], + { + "default": StabilityAspectRatio.ratio_1_1, + "tooltip": "Aspect ratio of generated image.", + }, + ), + "style_preset": (get_stability_style_presets(), + { + "tooltip": "Optional desired style of generated image.", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 4294967294, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + }, + "optional": { + "image": (IO.IMAGE,), + "negative_prompt": ( + IO.STRING, + { + "default": "", + "forceInput": True, + "tooltip": "A blurb of text describing what you do not wish to see in the output image. This is an advanced feature." + }, + ), + "image_denoise": ( + IO.FLOAT, + { + "default": 0.5, + "min": 0.0, + "max": 1.0, + "step": 0.01, + "tooltip": "Denoise of input image; 0.0 yields image identical to input, 1.0 is as if no image was provided at all.", + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call(self, prompt: str, aspect_ratio: str, style_preset: str, seed: int, + negative_prompt: str=None, image: torch.Tensor = None, image_denoise: float=None, + auth_token=None): + validate_string(prompt, strip_whitespace=False) + # prepare image binary if image present + image_binary = None + if image is not None: + image_binary = tensor_to_bytesio(image, total_pixels=1504*1504).read() + else: + image_denoise = None + + if not negative_prompt: + negative_prompt = None + if style_preset == "None": + style_preset = None + + files = { + "image": image_binary + } + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/stability/v2beta/stable-image/generate/ultra", + method=HttpMethod.POST, + request_model=StabilityStableUltraRequest, + response_model=StabilityStableUltraResponse, + ), + request=StabilityStableUltraRequest( + prompt=prompt, + negative_prompt=negative_prompt, + aspect_ratio=aspect_ratio, + seed=seed, + strength=image_denoise, + style_preset=style_preset, + ), + files=files, + content_type="multipart/form-data", + auth_token=auth_token, + ) + response_api = operation.execute() + + if response_api.finish_reason != "SUCCESS": + raise Exception(f"Stable Image Ultra generation failed: {response_api.finish_reason}.") + + image_data = base64.b64decode(response_api.image) + returned_image = bytesio_to_image_tensor(BytesIO(image_data)) + + return (returned_image,) + + +class StabilityStableImageSD_3_5Node: + """ + Generates images synchronously based on prompt and resolution. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Stability AI" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results." + }, + ), + "model": ([x.value for x in Stability_SD3_5_Model],), + "aspect_ratio": ([x.value for x in StabilityAspectRatio], + { + "default": StabilityAspectRatio.ratio_1_1, + "tooltip": "Aspect ratio of generated image.", + }, + ), + "style_preset": (get_stability_style_presets(), + { + "tooltip": "Optional desired style of generated image.", + }, + ), + "cfg_scale": ( + IO.FLOAT, + { + "default": 4.0, + "min": 1.0, + "max": 10.0, + "step": 0.1, + "tooltip": "How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt)", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 4294967294, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + }, + "optional": { + "image": (IO.IMAGE,), + "negative_prompt": ( + IO.STRING, + { + "default": "", + "forceInput": True, + "tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature." + }, + ), + "image_denoise": ( + IO.FLOAT, + { + "default": 0.5, + "min": 0.0, + "max": 1.0, + "step": 0.01, + "tooltip": "Denoise of input image; 0.0 yields image identical to input, 1.0 is as if no image was provided at all.", + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call(self, model: str, prompt: str, aspect_ratio: str, style_preset: str, seed: int, cfg_scale: float, + negative_prompt: str=None, image: torch.Tensor = None, image_denoise: float=None, + auth_token=None): + validate_string(prompt, strip_whitespace=False) + # prepare image binary if image present + image_binary = None + mode = Stability_SD3_5_GenerationMode.text_to_image + if image is not None: + image_binary = tensor_to_bytesio(image, total_pixels=1504*1504).read() + mode = Stability_SD3_5_GenerationMode.image_to_image + aspect_ratio = None + else: + image_denoise = None + + if not negative_prompt: + negative_prompt = None + if style_preset == "None": + style_preset = None + + files = { + "image": image_binary + } + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/stability/v2beta/stable-image/generate/sd3", + method=HttpMethod.POST, + request_model=StabilityStable3_5Request, + response_model=StabilityStableUltraResponse, + ), + request=StabilityStable3_5Request( + prompt=prompt, + negative_prompt=negative_prompt, + aspect_ratio=aspect_ratio, + seed=seed, + strength=image_denoise, + style_preset=style_preset, + cfg_scale=cfg_scale, + model=model, + mode=mode, + ), + files=files, + content_type="multipart/form-data", + auth_token=auth_token, + ) + response_api = operation.execute() + + if response_api.finish_reason != "SUCCESS": + raise Exception(f"Stable Diffusion 3.5 Image generation failed: {response_api.finish_reason}.") + + image_data = base64.b64decode(response_api.image) + returned_image = bytesio_to_image_tensor(BytesIO(image_data)) + + return (returned_image,) + + +class StabilityUpscaleConservativeNode: + """ + Upscale image with minimal alterations to 4K resolution. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Stability AI" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE,), + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results." + }, + ), + "creativity": ( + IO.FLOAT, + { + "default": 0.35, + "min": 0.2, + "max": 0.5, + "step": 0.01, + "tooltip": "Controls the likelihood of creating additional details not heavily conditioned by the init image.", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 4294967294, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + }, + "optional": { + "negative_prompt": ( + IO.STRING, + { + "default": "", + "forceInput": True, + "tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature." + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call(self, image: torch.Tensor, prompt: str, creativity: float, seed: int, negative_prompt: str=None, + auth_token=None): + validate_string(prompt, strip_whitespace=False) + image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read() + + if not negative_prompt: + negative_prompt = None + + files = { + "image": image_binary + } + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/stability/v2beta/stable-image/upscale/conservative", + method=HttpMethod.POST, + request_model=StabilityUpscaleConservativeRequest, + response_model=StabilityStableUltraResponse, + ), + request=StabilityUpscaleConservativeRequest( + prompt=prompt, + negative_prompt=negative_prompt, + creativity=round(creativity,2), + seed=seed, + ), + files=files, + content_type="multipart/form-data", + auth_token=auth_token, + ) + response_api = operation.execute() + + if response_api.finish_reason != "SUCCESS": + raise Exception(f"Stability Upscale Conservative generation failed: {response_api.finish_reason}.") + + image_data = base64.b64decode(response_api.image) + returned_image = bytesio_to_image_tensor(BytesIO(image_data)) + + return (returned_image,) + + +class StabilityUpscaleCreativeNode: + """ + Upscale image with minimal alterations to 4K resolution. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Stability AI" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE,), + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results." + }, + ), + "creativity": ( + IO.FLOAT, + { + "default": 0.3, + "min": 0.1, + "max": 0.5, + "step": 0.01, + "tooltip": "Controls the likelihood of creating additional details not heavily conditioned by the init image.", + }, + ), + "style_preset": (get_stability_style_presets(), + { + "tooltip": "Optional desired style of generated image.", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 4294967294, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + }, + "optional": { + "negative_prompt": ( + IO.STRING, + { + "default": "", + "forceInput": True, + "tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature." + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call(self, image: torch.Tensor, prompt: str, creativity: float, style_preset: str, seed: int, negative_prompt: str=None, + auth_token=None): + validate_string(prompt, strip_whitespace=False) + image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read() + + if not negative_prompt: + negative_prompt = None + if style_preset == "None": + style_preset = None + + files = { + "image": image_binary + } + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/stability/v2beta/stable-image/upscale/creative", + method=HttpMethod.POST, + request_model=StabilityUpscaleCreativeRequest, + response_model=StabilityAsyncResponse, + ), + request=StabilityUpscaleCreativeRequest( + prompt=prompt, + negative_prompt=negative_prompt, + creativity=round(creativity,2), + style_preset=style_preset, + seed=seed, + ), + files=files, + content_type="multipart/form-data", + auth_token=auth_token, + ) + response_api = operation.execute() + + operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"/proxy/stability/v2beta/results/{response_api.id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=StabilityResultsGetResponse, + ), + poll_interval=3, + completed_statuses=[StabilityPollStatus.finished], + failed_statuses=[StabilityPollStatus.failed], + status_extractor=lambda x: get_async_dummy_status(x), + auth_token=auth_token, + ) + response_poll: StabilityResultsGetResponse = operation.execute() + + if response_poll.finish_reason != "SUCCESS": + raise Exception(f"Stability Upscale Creative generation failed: {response_poll.finish_reason}.") + + image_data = base64.b64decode(response_poll.result) + returned_image = bytesio_to_image_tensor(BytesIO(image_data)) + + return (returned_image,) + + +class StabilityUpscaleFastNode: + """ + Quickly upscales an image via Stability API call to 4x its original size; intended for upscaling low-quality/compressed images. + """ + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/Stability AI" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE,), + }, + "optional": { + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + def api_call(self, image: torch.Tensor, + auth_token=None): + image_binary = tensor_to_bytesio(image, total_pixels=4096*4096).read() + + files = { + "image": image_binary + } + + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/stability/v2beta/stable-image/upscale/fast", + method=HttpMethod.POST, + request_model=EmptyRequest, + response_model=StabilityStableUltraResponse, + ), + request=EmptyRequest(), + files=files, + content_type="multipart/form-data", + auth_token=auth_token, + ) + response_api = operation.execute() + + if response_api.finish_reason != "SUCCESS": + raise Exception(f"Stability Upscale Fast failed: {response_api.finish_reason}.") + + image_data = base64.b64decode(response_api.image) + returned_image = bytesio_to_image_tensor(BytesIO(image_data)) + + return (returned_image,) + + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "StabilityStableImageUltraNode": StabilityStableImageUltraNode, + "StabilityStableImageSD_3_5Node": StabilityStableImageSD_3_5Node, + "StabilityUpscaleConservativeNode": StabilityUpscaleConservativeNode, + "StabilityUpscaleCreativeNode": StabilityUpscaleCreativeNode, + "StabilityUpscaleFastNode": StabilityUpscaleFastNode, +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "StabilityStableImageUltraNode": "Stability AI Stable Image Ultra", + "StabilityStableImageSD_3_5Node": "Stability AI Stable Diffusion 3.5 Image", + "StabilityUpscaleConservativeNode": "Stability AI Upscale Conservative", + "StabilityUpscaleCreativeNode": "Stability AI Upscale Creative", + "StabilityUpscaleFastNode": "Stability AI Upscale Fast", +} diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py new file mode 100644 index 000000000..9233944b5 --- /dev/null +++ b/comfy_api_nodes/nodes_veo2.py @@ -0,0 +1,283 @@ +import io +import logging +import base64 +import requests +import torch + +from comfy.comfy_types.node_typing import IO, ComfyNodeABC +from comfy_api.input_impl.video_types import VideoFromFile +from comfy_api_nodes.apis import ( + Veo2GenVidRequest, + Veo2GenVidResponse, + Veo2GenVidPollRequest, + Veo2GenVidPollResponse +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, +) + +from comfy_api_nodes.apinode_utils import ( + downscale_image_tensor, + tensor_to_base64_string +) + +def convert_image_to_base64(image: torch.Tensor): + if image is None: + return None + + scaled_image = downscale_image_tensor(image, total_pixels=2048*2048) + return tensor_to_base64_string(scaled_image) + +class VeoVideoGenerationNode(ComfyNodeABC): + """ + Generates videos from text prompts using Google's Veo API. + + This node can create videos from text descriptions and optional image inputs, + with control over parameters like aspect ratio, duration, and more. + """ + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Text description of the video", + }, + ), + "aspect_ratio": ( + IO.COMBO, + { + "options": ["16:9", "9:16"], + "default": "16:9", + "tooltip": "Aspect ratio of the output video", + }, + ), + }, + "optional": { + "negative_prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Negative text prompt to guide what to avoid in the video", + }, + ), + "duration_seconds": ( + IO.INT, + { + "default": 5, + "min": 5, + "max": 8, + "step": 1, + "display": "number", + "tooltip": "Duration of the output video in seconds", + }, + ), + "enhance_prompt": ( + IO.BOOLEAN, + { + "default": True, + "tooltip": "Whether to enhance the prompt with AI assistance", + } + ), + "person_generation": ( + IO.COMBO, + { + "options": ["ALLOW", "BLOCK"], + "default": "ALLOW", + "tooltip": "Whether to allow generating people in the video", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFF, + "step": 1, + "display": "number", + "control_after_generate": True, + "tooltip": "Seed for video generation (0 for random)", + }, + ), + "image": (IO.IMAGE, { + "default": None, + "tooltip": "Optional reference image to guide video generation", + }), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + }, + } + + RETURN_TYPES = (IO.VIDEO,) + FUNCTION = "generate_video" + CATEGORY = "api node/video/Veo" + DESCRIPTION = "Generates videos from text prompts using Google's Veo API" + API_NODE = True + + def generate_video( + self, + prompt, + aspect_ratio="16:9", + negative_prompt="", + duration_seconds=5, + enhance_prompt=True, + person_generation="ALLOW", + seed=0, + image=None, + auth_token=None, + ): + # Prepare the instances for the request + instances = [] + + instance = { + "prompt": prompt + } + + # Add image if provided + if image is not None: + image_base64 = convert_image_to_base64(image) + if image_base64: + instance["image"] = { + "bytesBase64Encoded": image_base64, + "mimeType": "image/png" + } + + instances.append(instance) + + # Create parameters dictionary + parameters = { + "aspectRatio": aspect_ratio, + "personGeneration": person_generation, + "durationSeconds": duration_seconds, + "enhancePrompt": enhance_prompt, + } + + # Add optional parameters if provided + if negative_prompt: + parameters["negativePrompt"] = negative_prompt + if seed > 0: + parameters["seed"] = seed + + # Initial request to start video generation + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/veo/generate", + method=HttpMethod.POST, + request_model=Veo2GenVidRequest, + response_model=Veo2GenVidResponse + ), + request=Veo2GenVidRequest( + instances=instances, + parameters=parameters + ), + auth_token=auth_token + ) + + initial_response = initial_operation.execute() + operation_name = initial_response.name + + logging.info(f"Veo generation started with operation name: {operation_name}") + + # Define status extractor function + def status_extractor(response): + # Only return "completed" if the operation is done, regardless of success or failure + # We'll check for errors after polling completes + return "completed" if response.done else "pending" + + # Define progress extractor function + def progress_extractor(response): + # Could be enhanced if the API provides progress information + return None + + # Define the polling operation + poll_operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path="/proxy/veo/poll", + method=HttpMethod.POST, + request_model=Veo2GenVidPollRequest, + response_model=Veo2GenVidPollResponse + ), + completed_statuses=["completed"], + failed_statuses=[], # No failed statuses, we'll handle errors after polling + status_extractor=status_extractor, + progress_extractor=progress_extractor, + request=Veo2GenVidPollRequest( + operationName=operation_name + ), + auth_token=auth_token, + poll_interval=5.0 + ) + + # Execute the polling operation + poll_response = poll_operation.execute() + + # Now check for errors in the final response + # Check for error in poll response + if hasattr(poll_response, 'error') and poll_response.error: + error_message = f"Veo API error: {poll_response.error.message} (code: {poll_response.error.code})" + logging.error(error_message) + raise Exception(error_message) + + # Check for RAI filtered content + if (hasattr(poll_response.response, 'raiMediaFilteredCount') and + poll_response.response.raiMediaFilteredCount > 0): + + # Extract reason message if available + if (hasattr(poll_response.response, 'raiMediaFilteredReasons') and + poll_response.response.raiMediaFilteredReasons): + reason = poll_response.response.raiMediaFilteredReasons[0] + error_message = f"Content filtered by Google's Responsible AI practices: {reason} ({poll_response.response.raiMediaFilteredCount} videos filtered.)" + else: + error_message = f"Content filtered by Google's Responsible AI practices ({poll_response.response.raiMediaFilteredCount} videos filtered.)" + + logging.error(error_message) + raise Exception(error_message) + + # Extract video data + video_data = None + if poll_response.response and hasattr(poll_response.response, 'videos') and poll_response.response.videos and len(poll_response.response.videos) > 0: + video = poll_response.response.videos[0] + + # Check if video is provided as base64 or URL + if hasattr(video, 'bytesBase64Encoded') and video.bytesBase64Encoded: + # Decode base64 string to bytes + video_data = base64.b64decode(video.bytesBase64Encoded) + elif hasattr(video, 'gcsUri') and video.gcsUri: + # Download from URL + video_url = video.gcsUri + video_response = requests.get(video_url) + video_data = video_response.content + else: + raise Exception("Video returned but no data or URL was provided") + else: + raise Exception("Video generation completed but no video was returned") + + if not video_data: + raise Exception("No video data was returned") + + logging.info("Video generation completed successfully") + + # Convert video data to BytesIO object + video_io = io.BytesIO(video_data) + + # Return VideoFromFile object + return (VideoFromFile(video_io),) + + +# Register the node +NODE_CLASS_MAPPINGS = { + "VeoVideoGenerationNode": VeoVideoGenerationNode, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "VeoVideoGenerationNode": "Google Veo2 Video Generation", +} diff --git a/comfy_api_nodes/redocly-dev.yaml b/comfy_api_nodes/redocly-dev.yaml new file mode 100644 index 000000000..d9e3cab70 --- /dev/null +++ b/comfy_api_nodes/redocly-dev.yaml @@ -0,0 +1,10 @@ +# This file is used to filter the Comfy Org OpenAPI spec for schemas related to API Nodes. +# This is used for development purposes to generate stubs for unreleased API endpoints. +apis: + filter: + root: openapi.yaml + decorators: + filter-in: + property: tags + value: ['API Nodes'] + matchStrategy: all diff --git a/comfy_api_nodes/redocly.yaml b/comfy_api_nodes/redocly.yaml new file mode 100644 index 000000000..d102345b1 --- /dev/null +++ b/comfy_api_nodes/redocly.yaml @@ -0,0 +1,10 @@ +# This file is used to filter the Comfy Org OpenAPI spec for schemas related to API Nodes. + +apis: + filter: + root: openapi.yaml + decorators: + filter-in: + property: tags + value: ['API Nodes', 'Released'] + matchStrategy: all diff --git a/comfy_extras/nodes_primitive.py b/comfy_extras/nodes_primitive.py index 184b990c3..1f93f87a7 100644 --- a/comfy_extras/nodes_primitive.py +++ b/comfy_extras/nodes_primitive.py @@ -21,6 +21,21 @@ class String(ComfyNodeABC): return (value,) +class StringMultiline(ComfyNodeABC): + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": {"value": (IO.STRING, {"multiline": True,},)}, + } + + RETURN_TYPES = (IO.STRING,) + FUNCTION = "execute" + CATEGORY = "utils/primitive" + + def execute(self, value: str) -> tuple[str]: + return (value,) + + class Int(ComfyNodeABC): @classmethod def INPUT_TYPES(cls) -> InputTypeDict: @@ -68,6 +83,7 @@ class Boolean(ComfyNodeABC): NODE_CLASS_MAPPINGS = { "PrimitiveString": String, + "PrimitiveStringMultiline": StringMultiline, "PrimitiveInt": Int, "PrimitiveFloat": Float, "PrimitiveBoolean": Boolean, @@ -75,6 +91,7 @@ NODE_CLASS_MAPPINGS = { NODE_DISPLAY_NAME_MAPPINGS = { "PrimitiveString": "String", + "PrimitiveStringMultiline": "String (Multiline)", "PrimitiveInt": "Int", "PrimitiveFloat": "Float", "PrimitiveBoolean": "Boolean", diff --git a/nodes.py b/nodes.py index 92b8ca6ae..d31e0774d 100644 --- a/nodes.py +++ b/nodes.py @@ -2263,7 +2263,17 @@ def init_builtin_extra_nodes(): api_nodes_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_api_nodes") api_nodes_files = [ - "nodes_api.py", + "nodes_ideogram.py", + "nodes_openai.py", + "nodes_minimax.py", + "nodes_veo2.py", + "nodes_kling.py", + "nodes_bfl.py", + "nodes_luma.py", + "nodes_recraft.py", + "nodes_pixverse.py", + "nodes_stability.py", + "nodes_pika.py", ] import_failed = [] diff --git a/requirements.txt b/requirements.txt index 05ceba00a..29cf0e2ac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ -comfyui-frontend-package==1.18.6 -comfyui-workflow-templates==0.1.3 +comfyui-frontend-package==1.18.9 +comfyui-workflow-templates==0.1.11 torch torchsde torchvision diff --git a/tests-unit/comfy_api_nodes_test/mapper_utils_test.py b/tests-unit/comfy_api_nodes_test/mapper_utils_test.py new file mode 100644 index 000000000..69488f691 --- /dev/null +++ b/tests-unit/comfy_api_nodes_test/mapper_utils_test.py @@ -0,0 +1,297 @@ +from typing import Optional +from enum import Enum + +from pydantic import BaseModel, Field + +from comfy.comfy_types.node_typing import IO +from comfy_api_nodes.mapper_utils import model_field_to_node_input + + +def test_model_field_to_float_input(): + """Tests mapping a float field with constraints.""" + + class ModelWithFloatField(BaseModel): + cfg_scale: Optional[float] = Field( + default=0.5, + description="Flexibility in video generation", + ge=0.0, + le=1.0, + multiple_of=0.001, + ) + + expected_output = ( + IO.FLOAT, + { + "default": 0.5, + "tooltip": "Flexibility in video generation", + "min": 0.0, + "max": 1.0, + "step": 0.001, + }, + ) + + actual_output = model_field_to_node_input( + IO.FLOAT, ModelWithFloatField, "cfg_scale" + ) + + assert actual_output[0] == expected_output[0] + assert actual_output[1] == expected_output[1] + + +def test_model_field_to_float_input_no_constraints(): + """Tests mapping a float field with no constraints.""" + + class ModelWithFloatField(BaseModel): + cfg_scale: Optional[float] = Field(default=0.5) + + expected_output = ( + IO.FLOAT, + { + "default": 0.5, + }, + ) + + actual_output = model_field_to_node_input( + IO.FLOAT, ModelWithFloatField, "cfg_scale" + ) + + assert actual_output[0] == expected_output[0] + assert actual_output[1] == expected_output[1] + + +def test_model_field_to_int_input(): + """Tests mapping an int field with constraints.""" + + class ModelWithIntField(BaseModel): + num_frames: Optional[int] = Field( + default=10, + description="Number of frames to generate", + ge=1, + le=100, + multiple_of=1, + ) + + expected_output = ( + IO.INT, + { + "default": 10, + "tooltip": "Number of frames to generate", + "min": 1, + "max": 100, + "step": 1, + }, + ) + + actual_output = model_field_to_node_input(IO.INT, ModelWithIntField, "num_frames") + + assert actual_output[0] == expected_output[0] + assert actual_output[1] == expected_output[1] + + +def test_model_field_to_string_input(): + """Tests mapping a string field.""" + + class ModelWithStringField(BaseModel): + prompt: Optional[str] = Field( + default="A beautiful sunset over a calm ocean", + description="A prompt for the video generation", + ) + + expected_output = ( + IO.STRING, + { + "default": "A beautiful sunset over a calm ocean", + "tooltip": "A prompt for the video generation", + }, + ) + + actual_output = model_field_to_node_input(IO.STRING, ModelWithStringField, "prompt") + + assert actual_output[0] == expected_output[0] + assert actual_output[1] == expected_output[1] + + +def test_model_field_to_string_input_multiline(): + """Tests mapping a string field.""" + + class ModelWithStringField(BaseModel): + prompt: Optional[str] = Field( + default="A beautiful sunset over a calm ocean", + description="A prompt for the video generation", + ) + + expected_output = ( + IO.STRING, + { + "default": "A beautiful sunset over a calm ocean", + "tooltip": "A prompt for the video generation", + "multiline": True, + }, + ) + + actual_output = model_field_to_node_input( + IO.STRING, ModelWithStringField, "prompt", multiline=True + ) + + assert actual_output[0] == expected_output[0] + assert actual_output[1] == expected_output[1] + + +def test_model_field_to_combo_input(): + """Tests mapping a combo field.""" + + class MockEnum(str, Enum): + option_1 = "option 1" + option_2 = "option 2" + option_3 = "option 3" + + class ModelWithComboField(BaseModel): + model_name: Optional[MockEnum] = Field("option 1", description="Model Name") + + expected_output = ( + IO.COMBO, + { + "options": ["option 1", "option 2", "option 3"], + "default": "option 1", + "tooltip": "Model Name", + }, + ) + + actual_output = model_field_to_node_input( + IO.COMBO, ModelWithComboField, "model_name", enum_type=MockEnum + ) + + assert actual_output[0] == expected_output[0] + assert actual_output[1] == expected_output[1] + + +def test_model_field_to_combo_input_no_options(): + """Tests mapping a combo field with no options.""" + + class ModelWithComboField(BaseModel): + model_name: Optional[str] = Field(description="Model Name") + + expected_output = ( + IO.COMBO, + { + "tooltip": "Model Name", + }, + ) + + actual_output = model_field_to_node_input( + IO.COMBO, ModelWithComboField, "model_name" + ) + + assert actual_output[0] == expected_output[0] + assert actual_output[1] == expected_output[1] + + +def test_model_field_to_image_input(): + """Tests mapping an image field.""" + + class ModelWithImageField(BaseModel): + image: Optional[str] = Field( + default=None, + description="An image for the video generation", + ) + + expected_output = ( + IO.IMAGE, + { + "default": None, + "tooltip": "An image for the video generation", + }, + ) + + actual_output = model_field_to_node_input(IO.IMAGE, ModelWithImageField, "image") + + assert actual_output[0] == expected_output[0] + assert actual_output[1] == expected_output[1] + + +def test_model_field_to_node_input_no_description(): + """Tests mapping a field with no description.""" + + class ModelWithNoDescriptionField(BaseModel): + field: Optional[str] = Field(default="default value") + + expected_output = ( + IO.STRING, + { + "default": "default value", + }, + ) + + actual_output = model_field_to_node_input( + IO.STRING, ModelWithNoDescriptionField, "field" + ) + + assert actual_output[0] == expected_output[0] + assert actual_output[1] == expected_output[1] + + +def test_model_field_to_node_input_no_default(): + """Tests mapping a field with no default.""" + + class ModelWithNoDefaultField(BaseModel): + field: Optional[str] = Field(description="A field with no default") + + expected_output = ( + IO.STRING, + { + "tooltip": "A field with no default", + }, + ) + + actual_output = model_field_to_node_input( + IO.STRING, ModelWithNoDefaultField, "field" + ) + + assert actual_output[0] == expected_output[0] + assert actual_output[1] == expected_output[1] + + +def test_model_field_to_node_input_no_metadata(): + """Tests mapping a field with no metadata or properties defined on the schema.""" + + class ModelWithNoMetadataField(BaseModel): + field: Optional[str] = Field() + + expected_output = ( + IO.STRING, + {}, + ) + + actual_output = model_field_to_node_input( + IO.STRING, ModelWithNoMetadataField, "field" + ) + + assert actual_output[0] == expected_output[0] + assert actual_output[1] == expected_output[1] + + +def test_model_field_to_node_input_default_is_none(): + """ + Tests mapping a field with a default of `None`. + I.e., the default field should be included as the schema explicitly sets it to `None`. + """ + + class ModelWithNoneDefaultField(BaseModel): + field: Optional[str] = Field( + default=None, description="A field with a default of None" + ) + + expected_output = ( + IO.STRING, + { + "default": None, + "tooltip": "A field with a default of None", + }, + ) + + actual_output = model_field_to_node_input( + IO.STRING, ModelWithNoneDefaultField, "field" + ) + + assert actual_output[0] == expected_output[0] + assert actual_output[1] == expected_output[1] From 094e9ef126fb60a6f5eeb7e1e30669b9328e7349 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 6 May 2025 01:53:53 -0700 Subject: [PATCH 0104/1073] Add a way to disable api nodes: --disable-api-nodes (#7960) --- comfy/cli_args.py | 1 + main.py | 2 +- nodes.py | 30 +++++++++++++++++++++++++----- 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index ef5ab6277..97b348f0d 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -148,6 +148,7 @@ parser.add_argument("--windows-standalone-build", action="store_true", help="Win parser.add_argument("--disable-metadata", action="store_true", help="Disable saving prompt metadata in files.") parser.add_argument("--disable-all-custom-nodes", action="store_true", help="Disable loading all custom nodes.") +parser.add_argument("--disable-api-nodes", action="store_true", help="Disable loading all api nodes.") parser.add_argument("--multi-user", action="store_true", help="Enables per-user storage.") diff --git a/main.py b/main.py index 5c21542b3..221e48e41 100644 --- a/main.py +++ b/main.py @@ -270,7 +270,7 @@ def start_comfyui(asyncio_loop=None): q = execution.PromptQueue(prompt_server) hook_breaker_ac10a0.save_functions() - nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes) + nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes, init_api_nodes=not args.disable_api_nodes) hook_breaker_ac10a0.restore_functions() cuda_malloc_warning() diff --git a/nodes.py b/nodes.py index d31e0774d..3c3617562 100644 --- a/nodes.py +++ b/nodes.py @@ -2261,6 +2261,15 @@ def init_builtin_extra_nodes(): "nodes_preview_any.py", ] + import_failed = [] + for node_file in extras_files: + if not load_custom_node(os.path.join(extras_dir, node_file), module_parent="comfy_extras"): + import_failed.append(node_file) + + return import_failed + + +def init_builtin_api_nodes(): api_nodes_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_api_nodes") api_nodes_files = [ "nodes_ideogram.py", @@ -2277,10 +2286,6 @@ def init_builtin_extra_nodes(): ] import_failed = [] - for node_file in extras_files: - if not load_custom_node(os.path.join(extras_dir, node_file), module_parent="comfy_extras"): - import_failed.append(node_file) - for node_file in api_nodes_files: if not load_custom_node(os.path.join(api_nodes_dir, node_file), module_parent="comfy_api_nodes"): import_failed.append(node_file) @@ -2288,14 +2293,29 @@ def init_builtin_extra_nodes(): return import_failed -def init_extra_nodes(init_custom_nodes=True): +def init_extra_nodes(init_custom_nodes=True, init_api_nodes=True): import_failed = init_builtin_extra_nodes() + import_failed_api = [] + if init_api_nodes: + import_failed_api = init_builtin_api_nodes() + if init_custom_nodes: init_external_custom_nodes() else: logging.info("Skipping loading of custom nodes") + if len(import_failed_api) > 0: + logging.warning("WARNING: some comfy_api_nodes/ nodes did not import correctly. This may be because they are missing some dependencies.\n") + for node in import_failed_api: + logging.warning("IMPORT FAILED: {}".format(node)) + logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.") + if args.windows_standalone_build: + logging.warning("Please run the update script: update/update_comfyui.bat") + else: + logging.warning("Please do a: pip install -r requirements.txt") + logging.warning("") + if len(import_failed) > 0: logging.warning("WARNING: some comfy_extras/ nodes did not import correctly. This may be because they are missing some dependencies.\n") for node in import_failed: From 0cf2e46b1725a5d0d6cb7b177a524026ca00f5a4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 6 May 2025 07:39:54 -0400 Subject: [PATCH 0105/1073] ComfyUI version 0.3.32 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 8d2068de7..61573aead 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.31" +__version__ = "0.3.32" diff --git a/pyproject.toml b/pyproject.toml index 8b549a0b6..878e7c66a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.31" +version = "0.3.32" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From a4e679765ec6a7adc6dda326c7bab915e2914bd6 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 6 May 2025 06:00:01 -0700 Subject: [PATCH 0106/1073] Change chroma to use Flux shift. (#7961) --- comfy/model_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 3d33086d8..045df1317 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1111,7 +1111,7 @@ class HiDream(BaseModel): return out class Chroma(Flux): - def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + def __init__(self, model_config, model_type=ModelType.FLUX, device=None): super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.chroma.model.Chroma) def extra_conds(self, **kwargs): From 271c9c5b9eb027c682161ed7848bbf640253f973 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 6 May 2025 06:52:37 -0700 Subject: [PATCH 0107/1073] Better mem estimation for the LTXV 13B model. (#7963) --- comfy/supported_models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index d5210cfac..a1dea2343 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -785,6 +785,10 @@ class LTXV(supported_models_base.BASE): vae_key_prefix = ["vae."] text_encoder_key_prefix = ["text_encoders."] + def __init__(self, unet_config): + super().__init__(unet_config) + self.memory_usage_factor = (unet_config.get("cross_attention_dim", 2048) / 2048) * 5.5 + def get_model(self, state_dict, prefix="", device=None): out = model_base.LTXV(self, device=device) return out From 16417b40d9411c6e3a63949aa0f3582be25b28db Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 7 May 2025 05:33:34 -0700 Subject: [PATCH 0108/1073] Initial ACE-Step model implementation. (#7972) --- comfy/latent_formats.py | 4 + comfy/ldm/ace/attention.py | 768 + comfy/ldm/ace/lyric_encoder.py | 1067 ++ comfy/ldm/ace/model.py | 381 + comfy/ldm/ace/vae/autoencoder_dc.py | 644 + comfy/ldm/ace/vae/music_dcae_pipeline.py | 104 + comfy/ldm/ace/vae/music_log_mel.py | 108 + comfy/ldm/ace/vae/music_vocoder.py | 542 + comfy/model_base.py | 19 + comfy/model_detection.py | 25 + comfy/sd.py | 25 +- comfy/supported_models.py | 31 +- comfy/text_encoders/ace.py | 145 + .../ace_lyrics_tokenizer/vocab.json | 15535 ++++++++++++++++ comfy/text_encoders/ace_text_cleaners.py | 270 + comfy/text_encoders/umt5_config_base.json | 22 + comfy_extras/nodes_ace.py | 46 + nodes.py | 6 +- 18 files changed, 19738 insertions(+), 4 deletions(-) create mode 100644 comfy/ldm/ace/attention.py create mode 100644 comfy/ldm/ace/lyric_encoder.py create mode 100644 comfy/ldm/ace/model.py create mode 100644 comfy/ldm/ace/vae/autoencoder_dc.py create mode 100644 comfy/ldm/ace/vae/music_dcae_pipeline.py create mode 100755 comfy/ldm/ace/vae/music_log_mel.py create mode 100755 comfy/ldm/ace/vae/music_vocoder.py create mode 100644 comfy/text_encoders/ace.py create mode 100644 comfy/text_encoders/ace_lyrics_tokenizer/vocab.json create mode 100644 comfy/text_encoders/ace_text_cleaners.py create mode 100644 comfy/text_encoders/umt5_config_base.json create mode 100644 comfy_extras/nodes_ace.py diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index 556c39512..82d9f9bb8 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -466,3 +466,7 @@ class Hunyuan3Dv2mini(LatentFormat): latent_channels = 64 latent_dimensions = 1 scale_factor = 1.0188137142395404 + +class ACEAudio(LatentFormat): + latent_channels = 8 + latent_dimensions = 2 diff --git a/comfy/ldm/ace/attention.py b/comfy/ldm/ace/attention.py new file mode 100644 index 000000000..631d13647 --- /dev/null +++ b/comfy/ldm/ace/attention.py @@ -0,0 +1,768 @@ +# Original from: https://github.com/ace-step/ACE-Step/blob/main/models/attention.py +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple, Union, Optional + +import torch +import torch.nn.functional as F +from torch import nn + +import comfy.model_management + +class Attention(nn.Module): + def __init__( + self, + query_dim: int, + cross_attention_dim: Optional[int] = None, + heads: int = 8, + kv_heads: Optional[int] = None, + dim_head: int = 64, + dropout: float = 0.0, + bias: bool = False, + qk_norm: Optional[str] = None, + added_kv_proj_dim: Optional[int] = None, + added_proj_bias: Optional[bool] = True, + out_bias: bool = True, + scale_qk: bool = True, + only_cross_attention: bool = False, + eps: float = 1e-5, + rescale_output_factor: float = 1.0, + residual_connection: bool = False, + processor=None, + out_dim: int = None, + out_context_dim: int = None, + context_pre_only=None, + pre_only=False, + elementwise_affine: bool = True, + is_causal: bool = False, + dtype=None, device=None, operations=None + ): + super().__init__() + + self.inner_dim = out_dim if out_dim is not None else dim_head * heads + self.inner_kv_dim = self.inner_dim if kv_heads is None else dim_head * kv_heads + self.query_dim = query_dim + self.use_bias = bias + self.is_cross_attention = cross_attention_dim is not None + self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim + self.rescale_output_factor = rescale_output_factor + self.residual_connection = residual_connection + self.dropout = dropout + self.fused_projections = False + self.out_dim = out_dim if out_dim is not None else query_dim + self.out_context_dim = out_context_dim if out_context_dim is not None else query_dim + self.context_pre_only = context_pre_only + self.pre_only = pre_only + self.is_causal = is_causal + + self.scale_qk = scale_qk + self.scale = dim_head**-0.5 if self.scale_qk else 1.0 + + self.heads = out_dim // dim_head if out_dim is not None else heads + # for slice_size > 0 the attention score computation + # is split across the batch axis to save memory + # You can set slice_size with `set_attention_slice` + self.sliceable_head_dim = heads + + self.added_kv_proj_dim = added_kv_proj_dim + self.only_cross_attention = only_cross_attention + + if self.added_kv_proj_dim is None and self.only_cross_attention: + raise ValueError( + "`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`." + ) + + self.group_norm = None + self.spatial_norm = None + + self.norm_q = None + self.norm_k = None + + self.norm_cross = None + self.to_q = operations.Linear(query_dim, self.inner_dim, bias=bias, dtype=dtype, device=device) + + if not self.only_cross_attention: + # only relevant for the `AddedKVProcessor` classes + self.to_k = operations.Linear(self.cross_attention_dim, self.inner_kv_dim, bias=bias, dtype=dtype, device=device) + self.to_v = operations.Linear(self.cross_attention_dim, self.inner_kv_dim, bias=bias, dtype=dtype, device=device) + else: + self.to_k = None + self.to_v = None + + self.added_proj_bias = added_proj_bias + if self.added_kv_proj_dim is not None: + self.add_k_proj = operations.Linear(added_kv_proj_dim, self.inner_kv_dim, bias=added_proj_bias, dtype=dtype, device=device) + self.add_v_proj = operations.Linear(added_kv_proj_dim, self.inner_kv_dim, bias=added_proj_bias, dtype=dtype, device=device) + if self.context_pre_only is not None: + self.add_q_proj = operations.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias, dtype=dtype, device=device) + else: + self.add_q_proj = None + self.add_k_proj = None + self.add_v_proj = None + + if not self.pre_only: + self.to_out = nn.ModuleList([]) + self.to_out.append(operations.Linear(self.inner_dim, self.out_dim, bias=out_bias, dtype=dtype, device=device)) + self.to_out.append(nn.Dropout(dropout)) + else: + self.to_out = None + + if self.context_pre_only is not None and not self.context_pre_only: + self.to_add_out = operations.Linear(self.inner_dim, self.out_context_dim, bias=out_bias, dtype=dtype, device=device) + else: + self.to_add_out = None + + self.norm_added_q = None + self.norm_added_k = None + self.processor = processor + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + **cross_attention_kwargs, + ) -> torch.Tensor: + return self.processor( + self, + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + **cross_attention_kwargs, + ) + + +class CustomLiteLAProcessor2_0: + """Attention processor used typically in processing the SD3-like self-attention projections. add rms norm for query and key and apply RoPE""" + + def __init__(self): + self.kernel_func = nn.ReLU(inplace=False) + self.eps = 1e-15 + self.pad_val = 1.0 + + def apply_rotary_emb( + self, + x: torch.Tensor, + freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Apply rotary embeddings to input tensors using the given frequency tensor. This function applies rotary embeddings + to the given query or key 'x' tensors using the provided frequency tensor 'freqs_cis'. The input tensors are + reshaped as complex numbers, and the frequency tensor is reshaped for broadcasting compatibility. The resulting + tensors contain rotary embeddings and are returned as real tensors. + + Args: + x (`torch.Tensor`): + Query or key tensor to apply rotary embeddings. [B, H, S, D] xk (torch.Tensor): Key tensor to apply + freqs_cis (`Tuple[torch.Tensor]`): Precomputed frequency tensor for complex exponentials. ([S, D], [S, D],) + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings. + """ + cos, sin = freqs_cis # [S, D] + cos = cos[None, None] + sin = sin[None, None] + cos, sin = cos.to(x.device), sin.to(x.device) + + x_real, x_imag = x.reshape(*x.shape[:-1], -1, 2).unbind(-1) # [B, S, H, D//2] + x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) + out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype) + + return out + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: torch.FloatTensor = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + rotary_freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]] = None, + rotary_freqs_cis_cross: Union[torch.Tensor, Tuple[torch.Tensor]] = None, + *args, + **kwargs, + ) -> torch.FloatTensor: + hidden_states_len = hidden_states.shape[1] + + input_ndim = hidden_states.ndim + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + if encoder_hidden_states is not None: + context_input_ndim = encoder_hidden_states.ndim + if context_input_ndim == 4: + batch_size, channel, height, width = encoder_hidden_states.shape + encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size = hidden_states.shape[0] + + # `sample` projections. + dtype = hidden_states.dtype + query = attn.to_q(hidden_states) + key = attn.to_k(hidden_states) + value = attn.to_v(hidden_states) + + # `context` projections. + has_encoder_hidden_state_proj = hasattr(attn, "add_q_proj") and hasattr(attn, "add_k_proj") and hasattr(attn, "add_v_proj") + if encoder_hidden_states is not None and has_encoder_hidden_state_proj: + encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) + encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) + encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) + + # attention + if not attn.is_cross_attention: + query = torch.cat([query, encoder_hidden_states_query_proj], dim=1) + key = torch.cat([key, encoder_hidden_states_key_proj], dim=1) + value = torch.cat([value, encoder_hidden_states_value_proj], dim=1) + else: + query = hidden_states + key = encoder_hidden_states + value = encoder_hidden_states + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.transpose(-1, -2).reshape(batch_size, attn.heads, head_dim, -1) + key = key.transpose(-1, -2).reshape(batch_size, attn.heads, head_dim, -1).transpose(-1, -2) + value = value.transpose(-1, -2).reshape(batch_size, attn.heads, head_dim, -1) + + # RoPE需要 [B, H, S, D] 输入 + # 此时 query是 [B, H, D, S], 需要转成 [B, H, S, D] 才能应用RoPE + query = query.permute(0, 1, 3, 2) # [B, H, S, D] (从 [B, H, D, S]) + + # Apply query and key normalization if needed + if attn.norm_q is not None: + query = attn.norm_q(query) + if attn.norm_k is not None: + key = attn.norm_k(key) + + # Apply RoPE if needed + if rotary_freqs_cis is not None: + query = self.apply_rotary_emb(query, rotary_freqs_cis) + if not attn.is_cross_attention: + key = self.apply_rotary_emb(key, rotary_freqs_cis) + elif rotary_freqs_cis_cross is not None and has_encoder_hidden_state_proj: + key = self.apply_rotary_emb(key, rotary_freqs_cis_cross) + + # 此时 query是 [B, H, S, D],需要还原成 [B, H, D, S] + query = query.permute(0, 1, 3, 2) # [B, H, D, S] + + if attention_mask is not None: + # attention_mask: [B, S] -> [B, 1, S, 1] + attention_mask = attention_mask[:, None, :, None].to(key.dtype) # [B, 1, S, 1] + query = query * attention_mask.permute(0, 1, 3, 2) # [B, H, S, D] * [B, 1, S, 1] + if not attn.is_cross_attention: + key = key * attention_mask # key: [B, h, S, D] 与 mask [B, 1, S, 1] 相乘 + value = value * attention_mask.permute(0, 1, 3, 2) # 如果 value 是 [B, h, D, S],那么需调整mask以匹配S维度 + + if attn.is_cross_attention and encoder_attention_mask is not None and has_encoder_hidden_state_proj: + encoder_attention_mask = encoder_attention_mask[:, None, :, None].to(key.dtype) # [B, 1, S_enc, 1] + # 此时 key: [B, h, S_enc, D], value: [B, h, D, S_enc] + key = key * encoder_attention_mask # [B, h, S_enc, D] * [B, 1, S_enc, 1] + value = value * encoder_attention_mask.permute(0, 1, 3, 2) # [B, h, D, S_enc] * [B, 1, 1, S_enc] + + query = self.kernel_func(query) + key = self.kernel_func(key) + + query, key, value = query.float(), key.float(), value.float() + + value = F.pad(value, (0, 0, 0, 1), mode="constant", value=self.pad_val) + + vk = torch.matmul(value, key) + + hidden_states = torch.matmul(vk, query) + + if hidden_states.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.float() + + hidden_states = hidden_states[:, :, :-1] / (hidden_states[:, :, -1:] + self.eps) + + hidden_states = hidden_states.view(batch_size, attn.heads * head_dim, -1).permute(0, 2, 1) + + hidden_states = hidden_states.to(dtype) + if encoder_hidden_states is not None: + encoder_hidden_states = encoder_hidden_states.to(dtype) + + # Split the attention outputs. + if encoder_hidden_states is not None and not attn.is_cross_attention and has_encoder_hidden_state_proj: + hidden_states, encoder_hidden_states = ( + hidden_states[:, : hidden_states_len], + hidden_states[:, hidden_states_len:], + ) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + if encoder_hidden_states is not None and not attn.context_pre_only and not attn.is_cross_attention and hasattr(attn, "to_add_out"): + encoder_hidden_states = attn.to_add_out(encoder_hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + if encoder_hidden_states is not None and context_input_ndim == 4: + encoder_hidden_states = encoder_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if torch.get_autocast_gpu_dtype() == torch.float16: + hidden_states = hidden_states.clip(-65504, 65504) + if encoder_hidden_states is not None: + encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) + + return hidden_states, encoder_hidden_states + + +class CustomerAttnProcessor2_0: + r""" + Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). + """ + + def __init__(self): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + + def apply_rotary_emb( + self, + x: torch.Tensor, + freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Apply rotary embeddings to input tensors using the given frequency tensor. This function applies rotary embeddings + to the given query or key 'x' tensors using the provided frequency tensor 'freqs_cis'. The input tensors are + reshaped as complex numbers, and the frequency tensor is reshaped for broadcasting compatibility. The resulting + tensors contain rotary embeddings and are returned as real tensors. + + Args: + x (`torch.Tensor`): + Query or key tensor to apply rotary embeddings. [B, H, S, D] xk (torch.Tensor): Key tensor to apply + freqs_cis (`Tuple[torch.Tensor]`): Precomputed frequency tensor for complex exponentials. ([S, D], [S, D],) + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings. + """ + cos, sin = freqs_cis # [S, D] + cos = cos[None, None] + sin = sin[None, None] + cos, sin = cos.to(x.device), sin.to(x.device) + + x_real, x_imag = x.reshape(*x.shape[:-1], -1, 2).unbind(-1) # [B, S, H, D//2] + x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) + out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype) + + return out + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: torch.FloatTensor = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + rotary_freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]] = None, + rotary_freqs_cis_cross: Union[torch.Tensor, Tuple[torch.Tensor]] = None, + *args, + **kwargs, + ) -> torch.Tensor: + + residual = hidden_states + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + + has_encoder_hidden_state_proj = hasattr(attn, "add_q_proj") and hasattr(attn, "add_k_proj") and hasattr(attn, "add_v_proj") + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + if attn.norm_q is not None: + query = attn.norm_q(query) + if attn.norm_k is not None: + key = attn.norm_k(key) + + # Apply RoPE if needed + if rotary_freqs_cis is not None: + query = self.apply_rotary_emb(query, rotary_freqs_cis) + if not attn.is_cross_attention: + key = self.apply_rotary_emb(key, rotary_freqs_cis) + elif rotary_freqs_cis_cross is not None and has_encoder_hidden_state_proj: + key = self.apply_rotary_emb(key, rotary_freqs_cis_cross) + + if attn.is_cross_attention and encoder_attention_mask is not None and has_encoder_hidden_state_proj: + # attention_mask: N x S1 + # encoder_attention_mask: N x S2 + # cross attention 整合attention_mask和encoder_attention_mask + combined_mask = attention_mask[:, :, None] * encoder_attention_mask[:, None, :] + attention_mask = torch.where(combined_mask == 1, 0.0, -torch.inf) + attention_mask = attention_mask[:, None, :, :].expand(-1, attn.heads, -1, -1).to(query.dtype) + + elif not attn.is_cross_attention and attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + +def val2list(x: list or tuple or any, repeat_time=1) -> list: # type: ignore + """Repeat `val` for `repeat_time` times and return the list or val if list/tuple.""" + if isinstance(x, (list, tuple)): + return list(x) + return [x for _ in range(repeat_time)] + + +def val2tuple(x: list or tuple or any, min_len: int = 1, idx_repeat: int = -1) -> tuple: # type: ignore + """Return tuple with min_len by repeating element at idx_repeat.""" + # convert to list first + x = val2list(x) + + # repeat elements if necessary + if len(x) > 0: + x[idx_repeat:idx_repeat] = [x[idx_repeat] for _ in range(min_len - len(x))] + + return tuple(x) + + +def t2i_modulate(x, shift, scale): + return x * (1 + scale) + shift + + +def get_same_padding(kernel_size: Union[int, Tuple[int, ...]]) -> Union[int, Tuple[int, ...]]: + if isinstance(kernel_size, tuple): + return tuple([get_same_padding(ks) for ks in kernel_size]) + else: + assert kernel_size % 2 > 0, f"kernel size {kernel_size} should be odd number" + return kernel_size // 2 + +class ConvLayer(nn.Module): + def __init__( + self, + in_dim: int, + out_dim: int, + kernel_size=3, + stride=1, + dilation=1, + groups=1, + padding: Union[int, None] = None, + use_bias=False, + norm=None, + act=None, + dtype=None, device=None, operations=None + ): + super().__init__() + if padding is None: + padding = get_same_padding(kernel_size) + padding *= dilation + + self.in_dim = in_dim + self.out_dim = out_dim + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + self.groups = groups + self.padding = padding + self.use_bias = use_bias + + self.conv = operations.Conv1d( + in_dim, + out_dim, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=use_bias, + device=device, + dtype=dtype + ) + if norm is not None: + self.norm = operations.RMSNorm(out_dim, elementwise_affine=False, dtype=dtype, device=device) + else: + self.norm = None + if act is not None: + self.act = nn.SiLU(inplace=True) + else: + self.act = None + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.conv(x) + if self.norm: + x = self.norm(x) + if self.act: + x = self.act(x) + return x + + +class GLUMBConv(nn.Module): + def __init__( + self, + in_features: int, + hidden_features: int, + out_feature=None, + kernel_size=3, + stride=1, + padding: Union[int, None] = None, + use_bias=False, + norm=(None, None, None), + act=("silu", "silu", None), + dilation=1, + dtype=None, device=None, operations=None + ): + out_feature = out_feature or in_features + super().__init__() + use_bias = val2tuple(use_bias, 3) + norm = val2tuple(norm, 3) + act = val2tuple(act, 3) + + self.glu_act = nn.SiLU(inplace=False) + self.inverted_conv = ConvLayer( + in_features, + hidden_features * 2, + 1, + use_bias=use_bias[0], + norm=norm[0], + act=act[0], + dtype=dtype, + device=device, + operations=operations, + ) + self.depth_conv = ConvLayer( + hidden_features * 2, + hidden_features * 2, + kernel_size, + stride=stride, + groups=hidden_features * 2, + padding=padding, + use_bias=use_bias[1], + norm=norm[1], + act=None, + dilation=dilation, + dtype=dtype, + device=device, + operations=operations, + ) + self.point_conv = ConvLayer( + hidden_features, + out_feature, + 1, + use_bias=use_bias[2], + norm=norm[2], + act=act[2], + dtype=dtype, + device=device, + operations=operations, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = x.transpose(1, 2) + x = self.inverted_conv(x) + x = self.depth_conv(x) + + x, gate = torch.chunk(x, 2, dim=1) + gate = self.glu_act(gate) + x = x * gate + + x = self.point_conv(x) + x = x.transpose(1, 2) + + return x + + +class LinearTransformerBlock(nn.Module): + """ + A Sana block with global shared adaptive layer norm (adaLN-single) conditioning. + """ + def __init__( + self, + dim, + num_attention_heads, + attention_head_dim, + use_adaln_single=True, + cross_attention_dim=None, + added_kv_proj_dim=None, + context_pre_only=False, + mlp_ratio=4.0, + add_cross_attention=False, + add_cross_attention_dim=None, + qk_norm=None, + dtype=None, device=None, operations=None + ): + super().__init__() + + self.norm1 = operations.RMSNorm(dim, elementwise_affine=False, eps=1e-6) + self.attn = Attention( + query_dim=dim, + cross_attention_dim=cross_attention_dim, + added_kv_proj_dim=added_kv_proj_dim, + dim_head=attention_head_dim, + heads=num_attention_heads, + out_dim=dim, + bias=True, + qk_norm=qk_norm, + processor=CustomLiteLAProcessor2_0(), + dtype=dtype, + device=device, + operations=operations, + ) + + self.add_cross_attention = add_cross_attention + self.context_pre_only = context_pre_only + + if add_cross_attention and add_cross_attention_dim is not None: + self.cross_attn = Attention( + query_dim=dim, + cross_attention_dim=add_cross_attention_dim, + added_kv_proj_dim=add_cross_attention_dim, + dim_head=attention_head_dim, + heads=num_attention_heads, + out_dim=dim, + context_pre_only=context_pre_only, + bias=True, + qk_norm=qk_norm, + processor=CustomerAttnProcessor2_0(), + dtype=dtype, + device=device, + operations=operations, + ) + + self.norm2 = operations.RMSNorm(dim, 1e-06, elementwise_affine=False) + + self.ff = GLUMBConv( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + use_bias=(True, True, False), + norm=(None, None, None), + act=("silu", "silu", None), + dtype=dtype, + device=device, + operations=operations, + ) + self.use_adaln_single = use_adaln_single + if use_adaln_single: + self.scale_shift_table = nn.Parameter(torch.empty(6, dim, dtype=dtype, device=device)) + + def forward( + self, + hidden_states: torch.FloatTensor, + encoder_hidden_states: torch.FloatTensor = None, + attention_mask: torch.FloatTensor = None, + encoder_attention_mask: torch.FloatTensor = None, + rotary_freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]] = None, + rotary_freqs_cis_cross: Union[torch.Tensor, Tuple[torch.Tensor]] = None, + temb: torch.FloatTensor = None, + ): + + N = hidden_states.shape[0] + + # step 1: AdaLN single + if self.use_adaln_single: + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( + comfy.model_management.cast_to(self.scale_shift_table[None], dtype=temb.dtype, device=temb.device) + temb.reshape(N, 6, -1) + ).chunk(6, dim=1) + + norm_hidden_states = self.norm1(hidden_states) + if self.use_adaln_single: + norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa + + # step 2: attention + if not self.add_cross_attention: + attn_output, encoder_hidden_states = self.attn( + hidden_states=norm_hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + rotary_freqs_cis=rotary_freqs_cis, + rotary_freqs_cis_cross=rotary_freqs_cis_cross, + ) + else: + attn_output, _ = self.attn( + hidden_states=norm_hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=None, + encoder_attention_mask=None, + rotary_freqs_cis=rotary_freqs_cis, + rotary_freqs_cis_cross=None, + ) + + if self.use_adaln_single: + attn_output = gate_msa * attn_output + hidden_states = attn_output + hidden_states + + if self.add_cross_attention: + attn_output = self.cross_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + rotary_freqs_cis=rotary_freqs_cis, + rotary_freqs_cis_cross=rotary_freqs_cis_cross, + ) + hidden_states = attn_output + hidden_states + + # step 3: add norm + norm_hidden_states = self.norm2(hidden_states) + if self.use_adaln_single: + norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp + + # step 4: feed forward + ff_output = self.ff(norm_hidden_states) + if self.use_adaln_single: + ff_output = gate_mlp * ff_output + + hidden_states = hidden_states + ff_output + + return hidden_states diff --git a/comfy/ldm/ace/lyric_encoder.py b/comfy/ldm/ace/lyric_encoder.py new file mode 100644 index 000000000..ff4359b26 --- /dev/null +++ b/comfy/ldm/ace/lyric_encoder.py @@ -0,0 +1,1067 @@ +# Original from: https://github.com/ace-step/ACE-Step/blob/main/models/lyrics_utils/lyric_encoder.py +from typing import Optional, Tuple, Union +import math +import torch +from torch import nn + +import comfy.model_management + +class ConvolutionModule(nn.Module): + """ConvolutionModule in Conformer model.""" + + def __init__(self, + channels: int, + kernel_size: int = 15, + activation: nn.Module = nn.ReLU(), + norm: str = "batch_norm", + causal: bool = False, + bias: bool = True, + dtype=None, device=None, operations=None): + """Construct an ConvolutionModule object. + Args: + channels (int): The number of channels of conv layers. + kernel_size (int): Kernel size of conv layers. + causal (int): Whether use causal convolution or not + """ + super().__init__() + + self.pointwise_conv1 = operations.Conv1d( + channels, + 2 * channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + dtype=dtype, device=device + ) + # self.lorder is used to distinguish if it's a causal convolution, + # if self.lorder > 0: it's a causal convolution, the input will be + # padded with self.lorder frames on the left in forward. + # else: it's a symmetrical convolution + if causal: + padding = 0 + self.lorder = kernel_size - 1 + else: + # kernel_size should be an odd number for none causal convolution + assert (kernel_size - 1) % 2 == 0 + padding = (kernel_size - 1) // 2 + self.lorder = 0 + self.depthwise_conv = operations.Conv1d( + channels, + channels, + kernel_size, + stride=1, + padding=padding, + groups=channels, + bias=bias, + dtype=dtype, device=device + ) + + assert norm in ['batch_norm', 'layer_norm'] + if norm == "batch_norm": + self.use_layer_norm = False + self.norm = nn.BatchNorm1d(channels) + else: + self.use_layer_norm = True + self.norm = operations.LayerNorm(channels, dtype=dtype, device=device) + + self.pointwise_conv2 = operations.Conv1d( + channels, + channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + dtype=dtype, device=device + ) + self.activation = activation + + def forward( + self, + x: torch.Tensor, + mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + cache: torch.Tensor = torch.zeros((0, 0, 0)), + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Compute convolution module. + Args: + x (torch.Tensor): Input tensor (#batch, time, channels). + mask_pad (torch.Tensor): used for batch padding (#batch, 1, time), + (0, 0, 0) means fake mask. + cache (torch.Tensor): left context cache, it is only + used in causal convolution (#batch, channels, cache_t), + (0, 0, 0) meas fake cache. + Returns: + torch.Tensor: Output tensor (#batch, time, channels). + """ + # exchange the temporal dimension and the feature dimension + x = x.transpose(1, 2) # (#batch, channels, time) + + # mask batch padding + if mask_pad.size(2) > 0: # time > 0 + x.masked_fill_(~mask_pad, 0.0) + + if self.lorder > 0: + if cache.size(2) == 0: # cache_t == 0 + x = nn.functional.pad(x, (self.lorder, 0), 'constant', 0.0) + else: + assert cache.size(0) == x.size(0) # equal batch + assert cache.size(1) == x.size(1) # equal channel + x = torch.cat((cache, x), dim=2) + assert (x.size(2) > self.lorder) + new_cache = x[:, :, -self.lorder:] + else: + # It's better we just return None if no cache is required, + # However, for JIT export, here we just fake one tensor instead of + # None. + new_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device) + + # GLU mechanism + x = self.pointwise_conv1(x) # (batch, 2*channel, dim) + x = nn.functional.glu(x, dim=1) # (batch, channel, dim) + + # 1D Depthwise Conv + x = self.depthwise_conv(x) + if self.use_layer_norm: + x = x.transpose(1, 2) + x = self.activation(self.norm(x)) + if self.use_layer_norm: + x = x.transpose(1, 2) + x = self.pointwise_conv2(x) + # mask batch padding + if mask_pad.size(2) > 0: # time > 0 + x.masked_fill_(~mask_pad, 0.0) + + return x.transpose(1, 2), new_cache + +class PositionwiseFeedForward(torch.nn.Module): + """Positionwise feed forward layer. + + FeedForward are appied on each position of the sequence. + The output dim is same with the input dim. + + Args: + idim (int): Input dimenstion. + hidden_units (int): The number of hidden units. + dropout_rate (float): Dropout rate. + activation (torch.nn.Module): Activation function + """ + + def __init__( + self, + idim: int, + hidden_units: int, + dropout_rate: float, + activation: torch.nn.Module = torch.nn.ReLU(), + dtype=None, device=None, operations=None + ): + """Construct a PositionwiseFeedForward object.""" + super(PositionwiseFeedForward, self).__init__() + self.w_1 = operations.Linear(idim, hidden_units, dtype=dtype, device=device) + self.activation = activation + self.dropout = torch.nn.Dropout(dropout_rate) + self.w_2 = operations.Linear(hidden_units, idim, dtype=dtype, device=device) + + def forward(self, xs: torch.Tensor) -> torch.Tensor: + """Forward function. + + Args: + xs: input tensor (B, L, D) + Returns: + output tensor, (B, L, D) + """ + return self.w_2(self.dropout(self.activation(self.w_1(xs)))) + +class Swish(torch.nn.Module): + """Construct an Swish object.""" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Return Swish activation function.""" + return x * torch.sigmoid(x) + +class MultiHeadedAttention(nn.Module): + """Multi-Head Attention layer. + + Args: + n_head (int): The number of heads. + n_feat (int): The number of features. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, + n_head: int, + n_feat: int, + dropout_rate: float, + key_bias: bool = True, + dtype=None, device=None, operations=None): + """Construct an MultiHeadedAttention object.""" + super().__init__() + assert n_feat % n_head == 0 + # We assume d_v always equals d_k + self.d_k = n_feat // n_head + self.h = n_head + self.linear_q = operations.Linear(n_feat, n_feat, dtype=dtype, device=device) + self.linear_k = operations.Linear(n_feat, n_feat, bias=key_bias, dtype=dtype, device=device) + self.linear_v = operations.Linear(n_feat, n_feat, dtype=dtype, device=device) + self.linear_out = operations.Linear(n_feat, n_feat, dtype=dtype, device=device) + self.dropout = nn.Dropout(p=dropout_rate) + + def forward_qkv( + self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Transform query, key and value. + + Args: + query (torch.Tensor): Query tensor (#batch, time1, size). + key (torch.Tensor): Key tensor (#batch, time2, size). + value (torch.Tensor): Value tensor (#batch, time2, size). + + Returns: + torch.Tensor: Transformed query tensor, size + (#batch, n_head, time1, d_k). + torch.Tensor: Transformed key tensor, size + (#batch, n_head, time2, d_k). + torch.Tensor: Transformed value tensor, size + (#batch, n_head, time2, d_k). + + """ + n_batch = query.size(0) + q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) + k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) + v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) + q = q.transpose(1, 2) # (batch, head, time1, d_k) + k = k.transpose(1, 2) # (batch, head, time2, d_k) + v = v.transpose(1, 2) # (batch, head, time2, d_k) + return q, k, v + + def forward_attention( + self, + value: torch.Tensor, + scores: torch.Tensor, + mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool) + ) -> torch.Tensor: + """Compute attention context vector. + + Args: + value (torch.Tensor): Transformed value, size + (#batch, n_head, time2, d_k). + scores (torch.Tensor): Attention score, size + (#batch, n_head, time1, time2). + mask (torch.Tensor): Mask, size (#batch, 1, time2) or + (#batch, time1, time2), (0, 0, 0) means fake mask. + + Returns: + torch.Tensor: Transformed value (#batch, time1, d_model) + weighted by the attention score (#batch, time1, time2). + + """ + n_batch = value.size(0) + + if mask is not None and mask.size(2) > 0: # time2 > 0 + mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2) + # For last chunk, time2 might be larger than scores.size(-1) + mask = mask[:, :, :, :scores.size(-1)] # (batch, 1, *, time2) + scores = scores.masked_fill(mask, -float('inf')) + attn = torch.softmax(scores, dim=-1).masked_fill( + mask, 0.0) # (batch, head, time1, time2) + + else: + attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2) + + p_attn = self.dropout(attn) + x = torch.matmul(p_attn, value) # (batch, head, time1, d_k) + x = (x.transpose(1, 2).contiguous().view(n_batch, -1, + self.h * self.d_k) + ) # (batch, time1, d_model) + + return self.linear_out(x) # (batch, time1, d_model) + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + pos_emb: torch.Tensor = torch.empty(0), + cache: torch.Tensor = torch.zeros((0, 0, 0, 0)) + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Compute scaled dot product attention. + + Args: + query (torch.Tensor): Query tensor (#batch, time1, size). + key (torch.Tensor): Key tensor (#batch, time2, size). + value (torch.Tensor): Value tensor (#batch, time2, size). + mask (torch.Tensor): Mask tensor (#batch, 1, time2) or + (#batch, time1, time2). + 1.When applying cross attention between decoder and encoder, + the batch padding mask for input is in (#batch, 1, T) shape. + 2.When applying self attention of encoder, + the mask is in (#batch, T, T) shape. + 3.When applying self attention of decoder, + the mask is in (#batch, L, L) shape. + 4.If the different position in decoder see different block + of the encoder, such as Mocha, the passed in mask could be + in (#batch, L, T) shape. But there is no such case in current + CosyVoice. + cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2), + where `cache_t == chunk_size * num_decoding_left_chunks` + and `head * d_k == size` + + + Returns: + torch.Tensor: Output tensor (#batch, time1, d_model). + torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2) + where `cache_t == chunk_size * num_decoding_left_chunks` + and `head * d_k == size` + + """ + q, k, v = self.forward_qkv(query, key, value) + if cache.size(0) > 0: + key_cache, value_cache = torch.split(cache, + cache.size(-1) // 2, + dim=-1) + k = torch.cat([key_cache, k], dim=2) + v = torch.cat([value_cache, v], dim=2) + new_cache = torch.cat((k, v), dim=-1) + + scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) + return self.forward_attention(v, scores, mask), new_cache + + +class RelPositionMultiHeadedAttention(MultiHeadedAttention): + """Multi-Head Attention layer with relative position encoding. + Paper: https://arxiv.org/abs/1901.02860 + Args: + n_head (int): The number of heads. + n_feat (int): The number of features. + dropout_rate (float): Dropout rate. + """ + + def __init__(self, + n_head: int, + n_feat: int, + dropout_rate: float, + key_bias: bool = True, + dtype=None, device=None, operations=None): + """Construct an RelPositionMultiHeadedAttention object.""" + super().__init__(n_head, n_feat, dropout_rate, key_bias, dtype=dtype, device=device, operations=operations) + # linear transformation for positional encoding + self.linear_pos = operations.Linear(n_feat, n_feat, bias=False, dtype=dtype, device=device) + # these two learnable bias are used in matrix c and matrix d + # as described in https://arxiv.org/abs/1901.02860 Section 3.3 + self.pos_bias_u = nn.Parameter(torch.empty(self.h, self.d_k, dtype=dtype, device=device)) + self.pos_bias_v = nn.Parameter(torch.empty(self.h, self.d_k, dtype=dtype, device=device)) + # torch.nn.init.xavier_uniform_(self.pos_bias_u) + # torch.nn.init.xavier_uniform_(self.pos_bias_v) + + def rel_shift(self, x: torch.Tensor) -> torch.Tensor: + """Compute relative positional encoding. + + Args: + x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1). + time1 means the length of query vector. + + Returns: + torch.Tensor: Output tensor. + + """ + zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1), + device=x.device, + dtype=x.dtype) + x_padded = torch.cat([zero_pad, x], dim=-1) + + x_padded = x_padded.view(x.size()[0], + x.size()[1], + x.size(3) + 1, x.size(2)) + x = x_padded[:, :, 1:].view_as(x)[ + :, :, :, : x.size(-1) // 2 + 1 + ] # only keep the positions from 0 to time2 + return x + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + pos_emb: torch.Tensor = torch.empty(0), + cache: torch.Tensor = torch.zeros((0, 0, 0, 0)) + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Compute 'Scaled Dot Product Attention' with rel. positional encoding. + Args: + query (torch.Tensor): Query tensor (#batch, time1, size). + key (torch.Tensor): Key tensor (#batch, time2, size). + value (torch.Tensor): Value tensor (#batch, time2, size). + mask (torch.Tensor): Mask tensor (#batch, 1, time2) or + (#batch, time1, time2), (0, 0, 0) means fake mask. + pos_emb (torch.Tensor): Positional embedding tensor + (#batch, time2, size). + cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2), + where `cache_t == chunk_size * num_decoding_left_chunks` + and `head * d_k == size` + Returns: + torch.Tensor: Output tensor (#batch, time1, d_model). + torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2) + where `cache_t == chunk_size * num_decoding_left_chunks` + and `head * d_k == size` + """ + q, k, v = self.forward_qkv(query, key, value) + q = q.transpose(1, 2) # (batch, time1, head, d_k) + + if cache.size(0) > 0: + key_cache, value_cache = torch.split(cache, + cache.size(-1) // 2, + dim=-1) + k = torch.cat([key_cache, k], dim=2) + v = torch.cat([value_cache, v], dim=2) + # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's + # non-trivial to calculate `next_cache_start` here. + new_cache = torch.cat((k, v), dim=-1) + + n_batch_pos = pos_emb.size(0) + p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k) + p = p.transpose(1, 2) # (batch, head, time1, d_k) + + # (batch, head, time1, d_k) + q_with_bias_u = (q + comfy.model_management.cast_to(self.pos_bias_u, dtype=q.dtype, device=q.device)).transpose(1, 2) + # (batch, head, time1, d_k) + q_with_bias_v = (q + comfy.model_management.cast_to(self.pos_bias_v, dtype=q.dtype, device=q.device)).transpose(1, 2) + + # compute attention score + # first compute matrix a and matrix c + # as described in https://arxiv.org/abs/1901.02860 Section 3.3 + # (batch, head, time1, time2) + matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1)) + + # compute matrix b and matrix d + # (batch, head, time1, time2) + matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1)) + # NOTE(Xiang Lyu): Keep rel_shift since espnet rel_pos_emb is used + if matrix_ac.shape != matrix_bd.shape: + matrix_bd = self.rel_shift(matrix_bd) + + scores = (matrix_ac + matrix_bd) / math.sqrt( + self.d_k) # (batch, head, time1, time2) + + return self.forward_attention(v, scores, mask), new_cache + + + +def subsequent_mask( + size: int, + device: torch.device = torch.device("cpu"), +) -> torch.Tensor: + """Create mask for subsequent steps (size, size). + + This mask is used only in decoder which works in an auto-regressive mode. + This means the current step could only do attention with its left steps. + + In encoder, fully attention is used when streaming is not necessary and + the sequence is not long. In this case, no attention mask is needed. + + When streaming is need, chunk-based attention is used in encoder. See + subsequent_chunk_mask for the chunk-based attention mask. + + Args: + size (int): size of mask + str device (str): "cpu" or "cuda" or torch.Tensor.device + dtype (torch.device): result dtype + + Returns: + torch.Tensor: mask + + Examples: + >>> subsequent_mask(3) + [[1, 0, 0], + [1, 1, 0], + [1, 1, 1]] + """ + arange = torch.arange(size, device=device) + mask = arange.expand(size, size) + arange = arange.unsqueeze(-1) + mask = mask <= arange + return mask + + +def subsequent_chunk_mask( + size: int, + chunk_size: int, + num_left_chunks: int = -1, + device: torch.device = torch.device("cpu"), + ) -> torch.Tensor: + """Create mask for subsequent steps (size, size) with chunk size, + this is for streaming encoder + + Args: + size (int): size of mask + chunk_size (int): size of chunk + num_left_chunks (int): number of left chunks + <0: use full chunk + >=0: use num_left_chunks + device (torch.device): "cpu" or "cuda" or torch.Tensor.device + + Returns: + torch.Tensor: mask + + Examples: + >>> subsequent_chunk_mask(4, 2) + [[1, 1, 0, 0], + [1, 1, 0, 0], + [1, 1, 1, 1], + [1, 1, 1, 1]] + """ + ret = torch.zeros(size, size, device=device, dtype=torch.bool) + for i in range(size): + if num_left_chunks < 0: + start = 0 + else: + start = max((i // chunk_size - num_left_chunks) * chunk_size, 0) + ending = min((i // chunk_size + 1) * chunk_size, size) + ret[i, start:ending] = True + return ret + +def add_optional_chunk_mask(xs: torch.Tensor, + masks: torch.Tensor, + use_dynamic_chunk: bool, + use_dynamic_left_chunk: bool, + decoding_chunk_size: int, + static_chunk_size: int, + num_decoding_left_chunks: int, + enable_full_context: bool = True): + """ Apply optional mask for encoder. + + Args: + xs (torch.Tensor): padded input, (B, L, D), L for max length + mask (torch.Tensor): mask for xs, (B, 1, L) + use_dynamic_chunk (bool): whether to use dynamic chunk or not + use_dynamic_left_chunk (bool): whether to use dynamic left chunk for + training. + decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's + 0: default for training, use random dynamic chunk. + <0: for decoding, use full chunk. + >0: for decoding, use fixed chunk size as set. + static_chunk_size (int): chunk size for static chunk training/decoding + if it's greater than 0, if use_dynamic_chunk is true, + this parameter will be ignored + num_decoding_left_chunks: number of left chunks, this is for decoding, + the chunk size is decoding_chunk_size. + >=0: use num_decoding_left_chunks + <0: use all left chunks + enable_full_context (bool): + True: chunk size is either [1, 25] or full context(max_len) + False: chunk size ~ U[1, 25] + + Returns: + torch.Tensor: chunk mask of the input xs. + """ + # Whether to use chunk mask or not + if use_dynamic_chunk: + max_len = xs.size(1) + if decoding_chunk_size < 0: + chunk_size = max_len + num_left_chunks = -1 + elif decoding_chunk_size > 0: + chunk_size = decoding_chunk_size + num_left_chunks = num_decoding_left_chunks + else: + # chunk size is either [1, 25] or full context(max_len). + # Since we use 4 times subsampling and allow up to 1s(100 frames) + # delay, the maximum frame is 100 / 4 = 25. + chunk_size = torch.randint(1, max_len, (1, )).item() + num_left_chunks = -1 + if chunk_size > max_len // 2 and enable_full_context: + chunk_size = max_len + else: + chunk_size = chunk_size % 25 + 1 + if use_dynamic_left_chunk: + max_left_chunks = (max_len - 1) // chunk_size + num_left_chunks = torch.randint(0, max_left_chunks, + (1, )).item() + chunk_masks = subsequent_chunk_mask(xs.size(1), chunk_size, + num_left_chunks, + xs.device) # (L, L) + chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L) + chunk_masks = masks & chunk_masks # (B, L, L) + elif static_chunk_size > 0: + num_left_chunks = num_decoding_left_chunks + chunk_masks = subsequent_chunk_mask(xs.size(1), static_chunk_size, + num_left_chunks, + xs.device) # (L, L) + chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L) + chunk_masks = masks & chunk_masks # (B, L, L) + else: + chunk_masks = masks + return chunk_masks + + +class ConformerEncoderLayer(nn.Module): + """Encoder layer module. + Args: + size (int): Input dimension. + self_attn (torch.nn.Module): Self-attention module instance. + `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` + instance can be used as the argument. + feed_forward (torch.nn.Module): Feed-forward module instance. + `PositionwiseFeedForward` instance can be used as the argument. + feed_forward_macaron (torch.nn.Module): Additional feed-forward module + instance. + `PositionwiseFeedForward` instance can be used as the argument. + conv_module (torch.nn.Module): Convolution module instance. + `ConvlutionModule` instance can be used as the argument. + dropout_rate (float): Dropout rate. + normalize_before (bool): + True: use layer_norm before each sub-block. + False: use layer_norm after each sub-block. + """ + + def __init__( + self, + size: int, + self_attn: torch.nn.Module, + feed_forward: Optional[nn.Module] = None, + feed_forward_macaron: Optional[nn.Module] = None, + conv_module: Optional[nn.Module] = None, + dropout_rate: float = 0.1, + normalize_before: bool = True, + dtype=None, device=None, operations=None + ): + """Construct an EncoderLayer object.""" + super().__init__() + self.self_attn = self_attn + self.feed_forward = feed_forward + self.feed_forward_macaron = feed_forward_macaron + self.conv_module = conv_module + self.norm_ff = operations.LayerNorm(size, eps=1e-5, dtype=dtype, device=device) # for the FNN module + self.norm_mha = operations.LayerNorm(size, eps=1e-5, dtype=dtype, device=device) # for the MHA module + if feed_forward_macaron is not None: + self.norm_ff_macaron = operations.LayerNorm(size, eps=1e-5, dtype=dtype, device=device) + self.ff_scale = 0.5 + else: + self.ff_scale = 1.0 + if self.conv_module is not None: + self.norm_conv = operations.LayerNorm(size, eps=1e-5, dtype=dtype, device=device) # for the CNN module + self.norm_final = operations.LayerNorm( + size, eps=1e-5, dtype=dtype, device=device) # for the final output of the block + self.dropout = nn.Dropout(dropout_rate) + self.size = size + self.normalize_before = normalize_before + + def forward( + self, + x: torch.Tensor, + mask: torch.Tensor, + pos_emb: torch.Tensor, + mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)), + cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)), + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Compute encoded features. + + Args: + x (torch.Tensor): (#batch, time, size) + mask (torch.Tensor): Mask tensor for the input (#batch, time,time), + (0, 0, 0) means fake mask. + pos_emb (torch.Tensor): positional encoding, must not be None + for ConformerEncoderLayer. + mask_pad (torch.Tensor): batch padding mask used for conv module. + (#batch, 1,time), (0, 0, 0) means fake mask. + att_cache (torch.Tensor): Cache tensor of the KEY & VALUE + (#batch=1, head, cache_t1, d_k * 2), head * d_k == size. + cnn_cache (torch.Tensor): Convolution cache in conformer layer + (#batch=1, size, cache_t2) + Returns: + torch.Tensor: Output tensor (#batch, time, size). + torch.Tensor: Mask tensor (#batch, time, time). + torch.Tensor: att_cache tensor, + (#batch=1, head, cache_t1 + time, d_k * 2). + torch.Tensor: cnn_cahce tensor (#batch, size, cache_t2). + """ + + # whether to use macaron style + if self.feed_forward_macaron is not None: + residual = x + if self.normalize_before: + x = self.norm_ff_macaron(x) + x = residual + self.ff_scale * self.dropout( + self.feed_forward_macaron(x)) + if not self.normalize_before: + x = self.norm_ff_macaron(x) + + # multi-headed self-attention module + residual = x + if self.normalize_before: + x = self.norm_mha(x) + x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb, + att_cache) + x = residual + self.dropout(x_att) + if not self.normalize_before: + x = self.norm_mha(x) + + # convolution module + # Fake new cnn cache here, and then change it in conv_module + new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device) + if self.conv_module is not None: + residual = x + if self.normalize_before: + x = self.norm_conv(x) + x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache) + x = residual + self.dropout(x) + + if not self.normalize_before: + x = self.norm_conv(x) + + # feed forward module + residual = x + if self.normalize_before: + x = self.norm_ff(x) + + x = residual + self.ff_scale * self.dropout(self.feed_forward(x)) + if not self.normalize_before: + x = self.norm_ff(x) + + if self.conv_module is not None: + x = self.norm_final(x) + + return x, mask, new_att_cache, new_cnn_cache + + + +class EspnetRelPositionalEncoding(torch.nn.Module): + """Relative positional encoding module (new implementation). + + Details can be found in https://github.com/espnet/espnet/pull/2816. + + See : Appendix B in https://arxiv.org/abs/1901.02860 + + Args: + d_model (int): Embedding dimension. + dropout_rate (float): Dropout rate. + max_len (int): Maximum input length. + + """ + + def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000): + """Construct an PositionalEncoding object.""" + super(EspnetRelPositionalEncoding, self).__init__() + self.d_model = d_model + self.xscale = math.sqrt(self.d_model) + self.dropout = torch.nn.Dropout(p=dropout_rate) + self.pe = None + self.extend_pe(torch.tensor(0.0).expand(1, max_len)) + + def extend_pe(self, x: torch.Tensor): + """Reset the positional encodings.""" + if self.pe is not None: + # self.pe contains both positive and negative parts + # the length of self.pe is 2 * input_len - 1 + if self.pe.size(1) >= x.size(1) * 2 - 1: + if self.pe.dtype != x.dtype or self.pe.device != x.device: + self.pe = self.pe.to(dtype=x.dtype, device=x.device) + return + # Suppose `i` means to the position of query vecotr and `j` means the + # position of key vector. We use position relative positions when keys + # are to the left (i>j) and negative relative positions otherwise (i Tuple[torch.Tensor, torch.Tensor]: + """Add positional encoding. + + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + + """ + self.extend_pe(x) + x = x * self.xscale + pos_emb = self.position_encoding(size=x.size(1), offset=offset) + return self.dropout(x), self.dropout(pos_emb) + + def position_encoding(self, + offset: Union[int, torch.Tensor], + size: int) -> torch.Tensor: + """ For getting encoding in a streaming fashion + + Attention!!!!! + we apply dropout only once at the whole utterance level in a none + streaming way, but will call this function several times with + increasing input size in a streaming scenario, so the dropout will + be applied several times. + + Args: + offset (int or torch.tensor): start offset + size (int): required size of position encoding + + Returns: + torch.Tensor: Corresponding encoding + """ + pos_emb = self.pe[ + :, + self.pe.size(1) // 2 - size + 1: self.pe.size(1) // 2 + size, + ] + return pos_emb + + + +class LinearEmbed(torch.nn.Module): + """Linear transform the input without subsampling + + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module, dtype=None, device=None, operations=None): + """Construct an linear object.""" + super().__init__() + self.out = torch.nn.Sequential( + operations.Linear(idim, odim, dtype=dtype, device=device), + operations.LayerNorm(odim, eps=1e-5, dtype=dtype, device=device), + torch.nn.Dropout(dropout_rate), + ) + self.pos_enc = pos_enc_class #rel_pos_espnet + + def position_encoding(self, offset: Union[int, torch.Tensor], + size: int) -> torch.Tensor: + return self.pos_enc.position_encoding(offset, size) + + def forward( + self, + x: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Input x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: linear input tensor (#batch, time', odim), + where time' = time . + torch.Tensor: linear input mask (#batch, 1, time'), + where time' = time . + + """ + x = self.out(x) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb + + +ATTENTION_CLASSES = { + "selfattn": MultiHeadedAttention, + "rel_selfattn": RelPositionMultiHeadedAttention, +} + +ACTIVATION_CLASSES = { + "hardtanh": torch.nn.Hardtanh, + "tanh": torch.nn.Tanh, + "relu": torch.nn.ReLU, + "selu": torch.nn.SELU, + "swish": getattr(torch.nn, "SiLU", Swish), + "gelu": torch.nn.GELU, +} + + +def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor: + """Make mask tensor containing indices of padded part. + + See description of make_non_pad_mask. + + Args: + lengths (torch.Tensor): Batch of lengths (B,). + Returns: + torch.Tensor: Mask tensor containing indices of padded part. + + Examples: + >>> lengths = [5, 3, 2] + >>> make_pad_mask(lengths) + masks = [[0, 0, 0, 0 ,0], + [0, 0, 0, 1, 1], + [0, 0, 1, 1, 1]] + """ + batch_size = lengths.size(0) + max_len = max_len if max_len > 0 else lengths.max().item() + seq_range = torch.arange(0, + max_len, + dtype=torch.int64, + device=lengths.device) + seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) + seq_length_expand = lengths.unsqueeze(-1) + mask = seq_range_expand >= seq_length_expand + return mask + +#https://github.com/FunAudioLLM/CosyVoice/blob/main/examples/magicdata-read/cosyvoice/conf/cosyvoice.yaml +class ConformerEncoder(torch.nn.Module): + """Conformer encoder module.""" + + def __init__( + self, + input_size: int, + output_size: int = 1024, + attention_heads: int = 16, + linear_units: int = 4096, + num_blocks: int = 6, + dropout_rate: float = 0.1, + positional_dropout_rate: float = 0.1, + attention_dropout_rate: float = 0.0, + input_layer: str = 'linear', + pos_enc_layer_type: str = 'rel_pos_espnet', + normalize_before: bool = True, + static_chunk_size: int = 1, # 1: causal_mask; 0: full_mask + use_dynamic_chunk: bool = False, + use_dynamic_left_chunk: bool = False, + positionwise_conv_kernel_size: int = 1, + macaron_style: bool =False, + selfattention_layer_type: str = "rel_selfattn", + activation_type: str = "swish", + use_cnn_module: bool = False, + cnn_module_kernel: int = 15, + causal: bool = False, + cnn_module_norm: str = "batch_norm", + key_bias: bool = True, + dtype=None, device=None, operations=None + ): + """Construct ConformerEncoder + + Args: + input_size to use_dynamic_chunk, see in BaseEncoder + positionwise_conv_kernel_size (int): Kernel size of positionwise + conv1d layer. + macaron_style (bool): Whether to use macaron style for + positionwise layer. + selfattention_layer_type (str): Encoder attention layer type, + the parameter has no effect now, it's just for configure + compatibility. #'rel_selfattn' + activation_type (str): Encoder activation function type. + use_cnn_module (bool): Whether to use convolution module. + cnn_module_kernel (int): Kernel size of convolution module. + causal (bool): whether to use causal convolution or not. + key_bias: whether use bias in attention.linear_k, False for whisper models. + """ + super().__init__() + self.output_size = output_size + self.embed = LinearEmbed(input_size, output_size, dropout_rate, + EspnetRelPositionalEncoding(output_size, positional_dropout_rate), dtype=dtype, device=device, operations=operations) + self.normalize_before = normalize_before + self.after_norm = operations.LayerNorm(output_size, eps=1e-5, dtype=dtype, device=device) + self.use_dynamic_chunk = use_dynamic_chunk + + self.static_chunk_size = static_chunk_size + self.use_dynamic_chunk = use_dynamic_chunk + self.use_dynamic_left_chunk = use_dynamic_left_chunk + activation = ACTIVATION_CLASSES[activation_type]() + + # self-attention module definition + encoder_selfattn_layer_args = ( + attention_heads, + output_size, + attention_dropout_rate, + key_bias, + ) + # feed-forward module definition + positionwise_layer_args = ( + output_size, + linear_units, + dropout_rate, + activation, + ) + # convolution module definition + convolution_layer_args = (output_size, cnn_module_kernel, activation, + cnn_module_norm, causal) + + self.encoders = torch.nn.ModuleList([ + ConformerEncoderLayer( + output_size, + RelPositionMultiHeadedAttention( + *encoder_selfattn_layer_args, dtype=dtype, device=device, operations=operations), + PositionwiseFeedForward(*positionwise_layer_args, dtype=dtype, device=device, operations=operations), + PositionwiseFeedForward( + *positionwise_layer_args, dtype=dtype, device=device, operations=operations) if macaron_style else None, + ConvolutionModule( + *convolution_layer_args, dtype=dtype, device=device, operations=operations) if use_cnn_module else None, + dropout_rate, + normalize_before, dtype=dtype, device=device, operations=operations + ) for _ in range(num_blocks) + ]) + + def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor, + pos_emb: torch.Tensor, + mask_pad: torch.Tensor) -> torch.Tensor: + for layer in self.encoders: + xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad) + return xs + + def forward( + self, + xs: torch.Tensor, + pad_mask: torch.Tensor, + decoding_chunk_size: int = 0, + num_decoding_left_chunks: int = -1, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Embed positions in tensor. + + Args: + xs: padded input tensor (B, T, D) + xs_lens: input length (B) + decoding_chunk_size: decoding chunk size for dynamic chunk + 0: default for training, use random dynamic chunk. + <0: for decoding, use full chunk. + >0: for decoding, use fixed chunk size as set. + num_decoding_left_chunks: number of left chunks, this is for decoding, + the chunk size is decoding_chunk_size. + >=0: use num_decoding_left_chunks + <0: use all left chunks + Returns: + encoder output tensor xs, and subsampled masks + xs: padded output tensor (B, T' ~= T/subsample_rate, D) + masks: torch.Tensor batch padding mask after subsample + (B, 1, T' ~= T/subsample_rate) + NOTE(xcsong): + We pass the `__call__` method of the modules instead of `forward` to the + checkpointing API because `__call__` attaches all the hooks of the module. + https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2 + """ + masks = None + if pad_mask is not None: + masks = pad_mask.to(torch.bool).unsqueeze(1) # (B, 1, T) + xs, pos_emb = self.embed(xs) + mask_pad = masks # (B, 1, T/subsample_rate) + chunk_masks = add_optional_chunk_mask(xs, masks, + self.use_dynamic_chunk, + self.use_dynamic_left_chunk, + decoding_chunk_size, + self.static_chunk_size, + num_decoding_left_chunks) + + xs = self.forward_layers(xs, chunk_masks, pos_emb, mask_pad) + if self.normalize_before: + xs = self.after_norm(xs) + # Here we assume the mask is not changed in encoder layers, so just + # return the masks before encoder layers, and the masks will be used + # for cross attention with decoder later + return xs, masks + diff --git a/comfy/ldm/ace/model.py b/comfy/ldm/ace/model.py new file mode 100644 index 000000000..e5883df90 --- /dev/null +++ b/comfy/ldm/ace/model.py @@ -0,0 +1,381 @@ +# Original from: https://github.com/ace-step/ACE-Step/blob/main/models/ace_step_transformer.py + +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, List, Union + +import torch +from torch import nn + +import comfy.model_management + +from comfy.ldm.lightricks.model import TimestepEmbedding, Timesteps +from .attention import LinearTransformerBlock, t2i_modulate +from .lyric_encoder import ConformerEncoder as LyricEncoder + + +def cross_norm(hidden_states, controlnet_input): + # input N x T x c + mean_hidden_states, std_hidden_states = hidden_states.mean(dim=(1,2), keepdim=True), hidden_states.std(dim=(1,2), keepdim=True) + mean_controlnet_input, std_controlnet_input = controlnet_input.mean(dim=(1,2), keepdim=True), controlnet_input.std(dim=(1,2), keepdim=True) + controlnet_input = (controlnet_input - mean_controlnet_input) * (std_hidden_states / (std_controlnet_input + 1e-12)) + mean_hidden_states + return controlnet_input + + +# Copied from transformers.models.mixtral.modeling_mixtral.MixtralRotaryEmbedding with Mixtral->Qwen2 +class Qwen2RotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, dtype=None, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=device).float() / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.float32 + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), + ) + + +class T2IFinalLayer(nn.Module): + """ + The final layer of Sana. + """ + + def __init__(self, hidden_size, patch_size=[16, 1], out_channels=256, dtype=None, device=None, operations=None): + super().__init__() + self.norm_final = operations.RMSNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) + self.linear = operations.Linear(hidden_size, patch_size[0] * patch_size[1] * out_channels, bias=True, dtype=dtype, device=device) + self.scale_shift_table = nn.Parameter(torch.empty(2, hidden_size, dtype=dtype, device=device)) + self.out_channels = out_channels + self.patch_size = patch_size + + def unpatchfy( + self, + hidden_states: torch.Tensor, + width: int, + ): + # 4 unpatchify + new_height, new_width = 1, hidden_states.size(1) + hidden_states = hidden_states.reshape( + shape=(hidden_states.shape[0], new_height, new_width, self.patch_size[0], self.patch_size[1], self.out_channels) + ).contiguous() + hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) + output = hidden_states.reshape( + shape=(hidden_states.shape[0], self.out_channels, new_height * self.patch_size[0], new_width * self.patch_size[1]) + ).contiguous() + if width > new_width: + output = torch.nn.functional.pad(output, (0, width - new_width, 0, 0), 'constant', 0) + elif width < new_width: + output = output[:, :, :, :width] + return output + + def forward(self, x, t, output_length): + shift, scale = (comfy.model_management.cast_to(self.scale_shift_table[None], device=t.device, dtype=t.dtype) + t[:, None]).chunk(2, dim=1) + x = t2i_modulate(self.norm_final(x), shift, scale) + x = self.linear(x) + # unpatchify + output = self.unpatchfy(x, output_length) + return output + + +class PatchEmbed(nn.Module): + """2D Image to Patch Embedding""" + + def __init__( + self, + height=16, + width=4096, + patch_size=(16, 1), + in_channels=8, + embed_dim=1152, + bias=True, + dtype=None, device=None, operations=None + ): + super().__init__() + patch_size_h, patch_size_w = patch_size + self.early_conv_layers = nn.Sequential( + operations.Conv2d(in_channels, in_channels*256, kernel_size=patch_size, stride=patch_size, padding=0, bias=bias, dtype=dtype, device=device), + operations.GroupNorm(num_groups=32, num_channels=in_channels*256, eps=1e-6, affine=True, dtype=dtype, device=device), + operations.Conv2d(in_channels*256, embed_dim, kernel_size=1, stride=1, padding=0, bias=bias, dtype=dtype, device=device) + ) + self.patch_size = patch_size + self.height, self.width = height // patch_size_h, width // patch_size_w + self.base_size = self.width + + def forward(self, latent): + # early convolutions, N x C x H x W -> N x 256 * sqrt(patch_size) x H/patch_size x W/patch_size + latent = self.early_conv_layers(latent) + latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC + return latent + + +class ACEStepTransformer2DModel(nn.Module): + # _supports_gradient_checkpointing = True + + def __init__( + self, + in_channels: Optional[int] = 8, + num_layers: int = 28, + inner_dim: int = 1536, + attention_head_dim: int = 64, + num_attention_heads: int = 24, + mlp_ratio: float = 4.0, + out_channels: int = 8, + max_position: int = 32768, + rope_theta: float = 1000000.0, + speaker_embedding_dim: int = 512, + text_embedding_dim: int = 768, + ssl_encoder_depths: List[int] = [9, 9], + ssl_names: List[str] = ["mert", "m-hubert"], + ssl_latent_dims: List[int] = [1024, 768], + lyric_encoder_vocab_size: int = 6681, + lyric_hidden_size: int = 1024, + patch_size: List[int] = [16, 1], + max_height: int = 16, + max_width: int = 4096, + audio_model=None, + dtype=None, device=None, operations=None + + ): + super().__init__() + + self.dtype = dtype + self.num_attention_heads = num_attention_heads + self.attention_head_dim = attention_head_dim + inner_dim = num_attention_heads * attention_head_dim + self.inner_dim = inner_dim + self.out_channels = out_channels + self.max_position = max_position + self.patch_size = patch_size + + self.rope_theta = rope_theta + + self.rotary_emb = Qwen2RotaryEmbedding( + dim=self.attention_head_dim, + max_position_embeddings=self.max_position, + base=self.rope_theta, + dtype=dtype, + device=device, + ) + + # 2. Define input layers + self.in_channels = in_channels + + self.num_layers = num_layers + # 3. Define transformers blocks + self.transformer_blocks = nn.ModuleList( + [ + LinearTransformerBlock( + dim=self.inner_dim, + num_attention_heads=self.num_attention_heads, + attention_head_dim=attention_head_dim, + mlp_ratio=mlp_ratio, + add_cross_attention=True, + add_cross_attention_dim=self.inner_dim, + dtype=dtype, + device=device, + operations=operations, + ) + for i in range(self.num_layers) + ] + ) + + self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) + self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=self.inner_dim, dtype=dtype, device=device, operations=operations) + self.t_block = nn.Sequential(nn.SiLU(), operations.Linear(self.inner_dim, 6 * self.inner_dim, bias=True, dtype=dtype, device=device)) + + # speaker + self.speaker_embedder = operations.Linear(speaker_embedding_dim, self.inner_dim, dtype=dtype, device=device) + + # genre + self.genre_embedder = operations.Linear(text_embedding_dim, self.inner_dim, dtype=dtype, device=device) + + # lyric + self.lyric_embs = operations.Embedding(lyric_encoder_vocab_size, lyric_hidden_size, dtype=dtype, device=device) + self.lyric_encoder = LyricEncoder(input_size=lyric_hidden_size, static_chunk_size=0, dtype=dtype, device=device, operations=operations) + self.lyric_proj = operations.Linear(lyric_hidden_size, self.inner_dim, dtype=dtype, device=device) + + projector_dim = 2 * self.inner_dim + + self.projectors = nn.ModuleList([ + nn.Sequential( + operations.Linear(self.inner_dim, projector_dim, dtype=dtype, device=device), + nn.SiLU(), + operations.Linear(projector_dim, projector_dim, dtype=dtype, device=device), + nn.SiLU(), + operations.Linear(projector_dim, ssl_dim, dtype=dtype, device=device), + ) for ssl_dim in ssl_latent_dims + ]) + + self.proj_in = PatchEmbed( + height=max_height, + width=max_width, + patch_size=patch_size, + embed_dim=self.inner_dim, + bias=True, + dtype=dtype, + device=device, + operations=operations, + ) + + self.final_layer = T2IFinalLayer(self.inner_dim, patch_size=patch_size, out_channels=out_channels, dtype=dtype, device=device, operations=operations) + + def forward_lyric_encoder( + self, + lyric_token_idx: Optional[torch.LongTensor] = None, + lyric_mask: Optional[torch.LongTensor] = None, + out_dtype=None, + ): + # N x T x D + lyric_embs = self.lyric_embs(lyric_token_idx, out_dtype=out_dtype) + prompt_prenet_out, _mask = self.lyric_encoder(lyric_embs, lyric_mask, decoding_chunk_size=1, num_decoding_left_chunks=-1) + prompt_prenet_out = self.lyric_proj(prompt_prenet_out) + return prompt_prenet_out + + def encode( + self, + encoder_text_hidden_states: Optional[torch.Tensor] = None, + text_attention_mask: Optional[torch.LongTensor] = None, + speaker_embeds: Optional[torch.FloatTensor] = None, + lyric_token_idx: Optional[torch.LongTensor] = None, + lyric_mask: Optional[torch.LongTensor] = None, + ): + + bs = encoder_text_hidden_states.shape[0] + device = encoder_text_hidden_states.device + + # speaker embedding + encoder_spk_hidden_states = self.speaker_embedder(speaker_embeds).unsqueeze(1) + + # genre embedding + encoder_text_hidden_states = self.genre_embedder(encoder_text_hidden_states) + + # lyric + encoder_lyric_hidden_states = self.forward_lyric_encoder( + lyric_token_idx=lyric_token_idx, + lyric_mask=lyric_mask, + out_dtype=encoder_text_hidden_states.dtype, + ) + + encoder_hidden_states = torch.cat([encoder_spk_hidden_states, encoder_text_hidden_states, encoder_lyric_hidden_states], dim=1) + + encoder_hidden_mask = None + if text_attention_mask is not None: + speaker_mask = torch.ones(bs, 1, device=device) + encoder_hidden_mask = torch.cat([speaker_mask, text_attention_mask, lyric_mask], dim=1) + + return encoder_hidden_states, encoder_hidden_mask + + def decode( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + encoder_hidden_states: torch.Tensor, + encoder_hidden_mask: torch.Tensor, + timestep: Optional[torch.Tensor], + output_length: int = 0, + block_controlnet_hidden_states: Optional[Union[List[torch.Tensor], torch.Tensor]] = None, + controlnet_scale: Union[float, torch.Tensor] = 1.0, + return_dict: bool = True, + ): + embedded_timestep = self.timestep_embedder(self.time_proj(timestep).to(dtype=hidden_states.dtype)) + temb = self.t_block(embedded_timestep) + + hidden_states = self.proj_in(hidden_states) + + # controlnet logic + if block_controlnet_hidden_states is not None: + control_condi = cross_norm(hidden_states, block_controlnet_hidden_states) + hidden_states = hidden_states + control_condi * controlnet_scale + + # inner_hidden_states = [] + + rotary_freqs_cis = self.rotary_emb(hidden_states, seq_len=hidden_states.shape[1]) + encoder_rotary_freqs_cis = self.rotary_emb(encoder_hidden_states, seq_len=encoder_hidden_states.shape[1]) + + for index_block, block in enumerate(self.transformer_blocks): + hidden_states = block( + hidden_states=hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_hidden_mask, + rotary_freqs_cis=rotary_freqs_cis, + rotary_freqs_cis_cross=encoder_rotary_freqs_cis, + temb=temb, + ) + + output = self.final_layer(hidden_states, embedded_timestep, output_length) + return output + + def forward( + self, + x, + timestep, + attention_mask=None, + context: Optional[torch.Tensor] = None, + text_attention_mask: Optional[torch.LongTensor] = None, + speaker_embeds: Optional[torch.FloatTensor] = None, + lyric_token_idx: Optional[torch.LongTensor] = None, + lyric_mask: Optional[torch.LongTensor] = None, + block_controlnet_hidden_states: Optional[Union[List[torch.Tensor], torch.Tensor]] = None, + controlnet_scale: Union[float, torch.Tensor] = 1.0, + **kwargs + ): + hidden_states = x + encoder_text_hidden_states = context + encoder_hidden_states, encoder_hidden_mask = self.encode( + encoder_text_hidden_states=encoder_text_hidden_states, + text_attention_mask=text_attention_mask, + speaker_embeds=speaker_embeds, + lyric_token_idx=lyric_token_idx, + lyric_mask=lyric_mask, + ) + + output_length = hidden_states.shape[-1] + + output = self.decode( + hidden_states=hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_hidden_mask=encoder_hidden_mask, + timestep=timestep, + output_length=output_length, + block_controlnet_hidden_states=block_controlnet_hidden_states, + controlnet_scale=controlnet_scale, + ) + + return output diff --git a/comfy/ldm/ace/vae/autoencoder_dc.py b/comfy/ldm/ace/vae/autoencoder_dc.py new file mode 100644 index 000000000..e7b1d4801 --- /dev/null +++ b/comfy/ldm/ace/vae/autoencoder_dc.py @@ -0,0 +1,644 @@ +# Rewritten from diffusers +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Tuple, Union + +import comfy.model_management +import comfy.ops +ops = comfy.ops.disable_weight_init + + +class RMSNorm(ops.RMSNorm): + def __init__(self, dim, eps=1e-5, elementwise_affine=True, bias=False): + super().__init__(dim, eps=eps, elementwise_affine=elementwise_affine) + if elementwise_affine: + self.bias = nn.Parameter(torch.empty(dim)) if bias else None + + def forward(self, x): + x = super().forward(x) + if self.elementwise_affine: + if self.bias is not None: + x = x + comfy.model_management.cast_to(self.bias, dtype=x.dtype, device=x.device) + return x + + +def get_normalization(norm_type, num_features, num_groups=32, eps=1e-5): + if norm_type == "batch_norm": + return nn.BatchNorm2d(num_features) + elif norm_type == "group_norm": + return ops.GroupNorm(num_groups, num_features) + elif norm_type == "layer_norm": + return ops.LayerNorm(num_features) + elif norm_type == "rms_norm": + return RMSNorm(num_features, eps=eps, elementwise_affine=True, bias=True) + else: + raise ValueError(f"Unknown normalization type: {norm_type}") + + +def get_activation(activation_type): + if activation_type == "relu": + return nn.ReLU() + elif activation_type == "relu6": + return nn.ReLU6() + elif activation_type == "silu": + return nn.SiLU() + elif activation_type == "leaky_relu": + return nn.LeakyReLU(0.2) + else: + raise ValueError(f"Unknown activation type: {activation_type}") + + +class ResBlock(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + norm_type: str = "batch_norm", + act_fn: str = "relu6", + ) -> None: + super().__init__() + + self.norm_type = norm_type + self.nonlinearity = get_activation(act_fn) if act_fn is not None else nn.Identity() + self.conv1 = ops.Conv2d(in_channels, in_channels, 3, 1, 1) + self.conv2 = ops.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False) + self.norm = get_normalization(norm_type, out_channels) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + residual = hidden_states + hidden_states = self.conv1(hidden_states) + hidden_states = self.nonlinearity(hidden_states) + hidden_states = self.conv2(hidden_states) + + if self.norm_type == "rms_norm": + # move channel to the last dimension so we apply RMSnorm across channel dimension + hidden_states = self.norm(hidden_states.movedim(1, -1)).movedim(-1, 1) + else: + hidden_states = self.norm(hidden_states) + + return hidden_states + residual + +class SanaMultiscaleAttentionProjection(nn.Module): + def __init__( + self, + in_channels: int, + num_attention_heads: int, + kernel_size: int, + ) -> None: + super().__init__() + + channels = 3 * in_channels + self.proj_in = ops.Conv2d( + channels, + channels, + kernel_size, + padding=kernel_size // 2, + groups=channels, + bias=False, + ) + self.proj_out = ops.Conv2d(channels, channels, 1, 1, 0, groups=3 * num_attention_heads, bias=False) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.proj_in(hidden_states) + hidden_states = self.proj_out(hidden_states) + return hidden_states + +class SanaMultiscaleLinearAttention(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + num_attention_heads: int = None, + attention_head_dim: int = 8, + mult: float = 1.0, + norm_type: str = "batch_norm", + kernel_sizes: tuple = (5,), + eps: float = 1e-15, + residual_connection: bool = False, + ): + super().__init__() + + self.eps = eps + self.attention_head_dim = attention_head_dim + self.norm_type = norm_type + self.residual_connection = residual_connection + + num_attention_heads = ( + int(in_channels // attention_head_dim * mult) + if num_attention_heads is None + else num_attention_heads + ) + inner_dim = num_attention_heads * attention_head_dim + + self.to_q = ops.Linear(in_channels, inner_dim, bias=False) + self.to_k = ops.Linear(in_channels, inner_dim, bias=False) + self.to_v = ops.Linear(in_channels, inner_dim, bias=False) + + self.to_qkv_multiscale = nn.ModuleList() + for kernel_size in kernel_sizes: + self.to_qkv_multiscale.append( + SanaMultiscaleAttentionProjection(inner_dim, num_attention_heads, kernel_size) + ) + + self.nonlinearity = nn.ReLU() + self.to_out = ops.Linear(inner_dim * (1 + len(kernel_sizes)), out_channels, bias=False) + self.norm_out = get_normalization(norm_type, out_channels) + + def apply_linear_attention(self, query, key, value): + value = F.pad(value, (0, 0, 0, 1), mode="constant", value=1) + scores = torch.matmul(value, key.transpose(-1, -2)) + hidden_states = torch.matmul(scores, query) + + hidden_states = hidden_states.to(dtype=torch.float32) + hidden_states = hidden_states[:, :, :-1] / (hidden_states[:, :, -1:] + self.eps) + return hidden_states + + def apply_quadratic_attention(self, query, key, value): + scores = torch.matmul(key.transpose(-1, -2), query) + scores = scores.to(dtype=torch.float32) + scores = scores / (torch.sum(scores, dim=2, keepdim=True) + self.eps) + hidden_states = torch.matmul(value, scores.to(value.dtype)) + return hidden_states + + def forward(self, hidden_states): + height, width = hidden_states.shape[-2:] + if height * width > self.attention_head_dim: + use_linear_attention = True + else: + use_linear_attention = False + + residual = hidden_states + + batch_size, _, height, width = list(hidden_states.size()) + original_dtype = hidden_states.dtype + + hidden_states = hidden_states.movedim(1, -1) + query = self.to_q(hidden_states) + key = self.to_k(hidden_states) + value = self.to_v(hidden_states) + hidden_states = torch.cat([query, key, value], dim=3) + hidden_states = hidden_states.movedim(-1, 1) + + multi_scale_qkv = [hidden_states] + for block in self.to_qkv_multiscale: + multi_scale_qkv.append(block(hidden_states)) + + hidden_states = torch.cat(multi_scale_qkv, dim=1) + + if use_linear_attention: + # for linear attention upcast hidden_states to float32 + hidden_states = hidden_states.to(dtype=torch.float32) + + hidden_states = hidden_states.reshape(batch_size, -1, 3 * self.attention_head_dim, height * width) + + query, key, value = hidden_states.chunk(3, dim=2) + query = self.nonlinearity(query) + key = self.nonlinearity(key) + + if use_linear_attention: + hidden_states = self.apply_linear_attention(query, key, value) + hidden_states = hidden_states.to(dtype=original_dtype) + else: + hidden_states = self.apply_quadratic_attention(query, key, value) + + hidden_states = torch.reshape(hidden_states, (batch_size, -1, height, width)) + hidden_states = self.to_out(hidden_states.movedim(1, -1)).movedim(-1, 1) + + if self.norm_type == "rms_norm": + hidden_states = self.norm_out(hidden_states.movedim(1, -1)).movedim(-1, 1) + else: + hidden_states = self.norm_out(hidden_states) + + if self.residual_connection: + hidden_states = hidden_states + residual + + return hidden_states + + +class EfficientViTBlock(nn.Module): + def __init__( + self, + in_channels: int, + mult: float = 1.0, + attention_head_dim: int = 32, + qkv_multiscales: tuple = (5,), + norm_type: str = "batch_norm", + ) -> None: + super().__init__() + + self.attn = SanaMultiscaleLinearAttention( + in_channels=in_channels, + out_channels=in_channels, + mult=mult, + attention_head_dim=attention_head_dim, + norm_type=norm_type, + kernel_sizes=qkv_multiscales, + residual_connection=True, + ) + + self.conv_out = GLUMBConv( + in_channels=in_channels, + out_channels=in_channels, + norm_type="rms_norm", + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.attn(x) + x = self.conv_out(x) + return x + + +class GLUMBConv(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + expand_ratio: float = 4, + norm_type: str = None, + residual_connection: bool = True, + ) -> None: + super().__init__() + + hidden_channels = int(expand_ratio * in_channels) + self.norm_type = norm_type + self.residual_connection = residual_connection + + self.nonlinearity = nn.SiLU() + self.conv_inverted = ops.Conv2d(in_channels, hidden_channels * 2, 1, 1, 0) + self.conv_depth = ops.Conv2d(hidden_channels * 2, hidden_channels * 2, 3, 1, 1, groups=hidden_channels * 2) + self.conv_point = ops.Conv2d(hidden_channels, out_channels, 1, 1, 0, bias=False) + + self.norm = None + if norm_type == "rms_norm": + self.norm = RMSNorm(out_channels, eps=1e-5, elementwise_affine=True, bias=True) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + if self.residual_connection: + residual = hidden_states + + hidden_states = self.conv_inverted(hidden_states) + hidden_states = self.nonlinearity(hidden_states) + + hidden_states = self.conv_depth(hidden_states) + hidden_states, gate = torch.chunk(hidden_states, 2, dim=1) + hidden_states = hidden_states * self.nonlinearity(gate) + + hidden_states = self.conv_point(hidden_states) + + if self.norm_type == "rms_norm": + # move channel to the last dimension so we apply RMSnorm across channel dimension + hidden_states = self.norm(hidden_states.movedim(1, -1)).movedim(-1, 1) + + if self.residual_connection: + hidden_states = hidden_states + residual + + return hidden_states + + +def get_block( + block_type: str, + in_channels: int, + out_channels: int, + attention_head_dim: int, + norm_type: str, + act_fn: str, + qkv_mutliscales: tuple = (), +): + if block_type == "ResBlock": + block = ResBlock(in_channels, out_channels, norm_type, act_fn) + elif block_type == "EfficientViTBlock": + block = EfficientViTBlock( + in_channels, + attention_head_dim=attention_head_dim, + norm_type=norm_type, + qkv_multiscales=qkv_mutliscales + ) + else: + raise ValueError(f"Block with {block_type=} is not supported.") + + return block + + +class DCDownBlock2d(nn.Module): + def __init__(self, in_channels: int, out_channels: int, downsample: bool = False, shortcut: bool = True) -> None: + super().__init__() + + self.downsample = downsample + self.factor = 2 + self.stride = 1 if downsample else 2 + self.group_size = in_channels * self.factor**2 // out_channels + self.shortcut = shortcut + + out_ratio = self.factor**2 + if downsample: + assert out_channels % out_ratio == 0 + out_channels = out_channels // out_ratio + + self.conv = ops.Conv2d( + in_channels, + out_channels, + kernel_size=3, + stride=self.stride, + padding=1, + ) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + x = self.conv(hidden_states) + if self.downsample: + x = F.pixel_unshuffle(x, self.factor) + + if self.shortcut: + y = F.pixel_unshuffle(hidden_states, self.factor) + y = y.unflatten(1, (-1, self.group_size)) + y = y.mean(dim=2) + hidden_states = x + y + else: + hidden_states = x + + return hidden_states + + +class DCUpBlock2d(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + interpolate: bool = False, + shortcut: bool = True, + interpolation_mode: str = "nearest", + ) -> None: + super().__init__() + + self.interpolate = interpolate + self.interpolation_mode = interpolation_mode + self.shortcut = shortcut + self.factor = 2 + self.repeats = out_channels * self.factor**2 // in_channels + + out_ratio = self.factor**2 + if not interpolate: + out_channels = out_channels * out_ratio + + self.conv = ops.Conv2d(in_channels, out_channels, 3, 1, 1) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + if self.interpolate: + x = F.interpolate(hidden_states, scale_factor=self.factor, mode=self.interpolation_mode) + x = self.conv(x) + else: + x = self.conv(hidden_states) + x = F.pixel_shuffle(x, self.factor) + + if self.shortcut: + y = hidden_states.repeat_interleave(self.repeats, dim=1, output_size=hidden_states.shape[1] * self.repeats) + y = F.pixel_shuffle(y, self.factor) + hidden_states = x + y + else: + hidden_states = x + + return hidden_states + + +class Encoder(nn.Module): + def __init__( + self, + in_channels: int, + latent_channels: int, + attention_head_dim: int = 32, + block_type: str or tuple = "ResBlock", + block_out_channels: tuple = (128, 256, 512, 512, 1024, 1024), + layers_per_block: tuple = (2, 2, 2, 2, 2, 2), + qkv_multiscales: tuple = ((), (), (), (5,), (5,), (5,)), + downsample_block_type: str = "pixel_unshuffle", + out_shortcut: bool = True, + ): + super().__init__() + + num_blocks = len(block_out_channels) + + if isinstance(block_type, str): + block_type = (block_type,) * num_blocks + + if layers_per_block[0] > 0: + self.conv_in = ops.Conv2d( + in_channels, + block_out_channels[0] if layers_per_block[0] > 0 else block_out_channels[1], + kernel_size=3, + stride=1, + padding=1, + ) + else: + self.conv_in = DCDownBlock2d( + in_channels=in_channels, + out_channels=block_out_channels[0] if layers_per_block[0] > 0 else block_out_channels[1], + downsample=downsample_block_type == "pixel_unshuffle", + shortcut=False, + ) + + down_blocks = [] + for i, (out_channel, num_layers) in enumerate(zip(block_out_channels, layers_per_block)): + down_block_list = [] + + for _ in range(num_layers): + block = get_block( + block_type[i], + out_channel, + out_channel, + attention_head_dim=attention_head_dim, + norm_type="rms_norm", + act_fn="silu", + qkv_mutliscales=qkv_multiscales[i], + ) + down_block_list.append(block) + + if i < num_blocks - 1 and num_layers > 0: + downsample_block = DCDownBlock2d( + in_channels=out_channel, + out_channels=block_out_channels[i + 1], + downsample=downsample_block_type == "pixel_unshuffle", + shortcut=True, + ) + down_block_list.append(downsample_block) + + down_blocks.append(nn.Sequential(*down_block_list)) + + self.down_blocks = nn.ModuleList(down_blocks) + + self.conv_out = ops.Conv2d(block_out_channels[-1], latent_channels, 3, 1, 1) + + self.out_shortcut = out_shortcut + if out_shortcut: + self.out_shortcut_average_group_size = block_out_channels[-1] // latent_channels + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.conv_in(hidden_states) + for down_block in self.down_blocks: + hidden_states = down_block(hidden_states) + + if self.out_shortcut: + x = hidden_states.unflatten(1, (-1, self.out_shortcut_average_group_size)) + x = x.mean(dim=2) + hidden_states = self.conv_out(hidden_states) + x + else: + hidden_states = self.conv_out(hidden_states) + + return hidden_states + + +class Decoder(nn.Module): + def __init__( + self, + in_channels: int, + latent_channels: int, + attention_head_dim: int = 32, + block_type: str or tuple = "ResBlock", + block_out_channels: tuple = (128, 256, 512, 512, 1024, 1024), + layers_per_block: tuple = (2, 2, 2, 2, 2, 2), + qkv_multiscales: tuple = ((), (), (), (5,), (5,), (5,)), + norm_type: str or tuple = "rms_norm", + act_fn: str or tuple = "silu", + upsample_block_type: str = "pixel_shuffle", + in_shortcut: bool = True, + ): + super().__init__() + + num_blocks = len(block_out_channels) + + if isinstance(block_type, str): + block_type = (block_type,) * num_blocks + if isinstance(norm_type, str): + norm_type = (norm_type,) * num_blocks + if isinstance(act_fn, str): + act_fn = (act_fn,) * num_blocks + + self.conv_in = ops.Conv2d(latent_channels, block_out_channels[-1], 3, 1, 1) + + self.in_shortcut = in_shortcut + if in_shortcut: + self.in_shortcut_repeats = block_out_channels[-1] // latent_channels + + up_blocks = [] + for i, (out_channel, num_layers) in reversed(list(enumerate(zip(block_out_channels, layers_per_block)))): + up_block_list = [] + + if i < num_blocks - 1 and num_layers > 0: + upsample_block = DCUpBlock2d( + block_out_channels[i + 1], + out_channel, + interpolate=upsample_block_type == "interpolate", + shortcut=True, + ) + up_block_list.append(upsample_block) + + for _ in range(num_layers): + block = get_block( + block_type[i], + out_channel, + out_channel, + attention_head_dim=attention_head_dim, + norm_type=norm_type[i], + act_fn=act_fn[i], + qkv_mutliscales=qkv_multiscales[i], + ) + up_block_list.append(block) + + up_blocks.insert(0, nn.Sequential(*up_block_list)) + + self.up_blocks = nn.ModuleList(up_blocks) + + channels = block_out_channels[0] if layers_per_block[0] > 0 else block_out_channels[1] + + self.norm_out = RMSNorm(channels, 1e-5, elementwise_affine=True, bias=True) + self.conv_act = nn.ReLU() + self.conv_out = None + + if layers_per_block[0] > 0: + self.conv_out = ops.Conv2d(channels, in_channels, 3, 1, 1) + else: + self.conv_out = DCUpBlock2d( + channels, in_channels, interpolate=upsample_block_type == "interpolate", shortcut=False + ) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + if self.in_shortcut: + x = hidden_states.repeat_interleave( + self.in_shortcut_repeats, dim=1, output_size=hidden_states.shape[1] * self.in_shortcut_repeats + ) + hidden_states = self.conv_in(hidden_states) + x + else: + hidden_states = self.conv_in(hidden_states) + + for up_block in reversed(self.up_blocks): + hidden_states = up_block(hidden_states) + + hidden_states = self.norm_out(hidden_states.movedim(1, -1)).movedim(-1, 1) + hidden_states = self.conv_act(hidden_states) + hidden_states = self.conv_out(hidden_states) + return hidden_states + + +class AutoencoderDC(nn.Module): + def __init__( + self, + in_channels: int = 2, + latent_channels: int = 8, + attention_head_dim: int = 32, + encoder_block_types: Union[str, Tuple[str]] = ["ResBlock", "ResBlock", "ResBlock", "EfficientViTBlock"], + decoder_block_types: Union[str, Tuple[str]] = ["ResBlock", "ResBlock", "ResBlock", "EfficientViTBlock"], + encoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 1024), + decoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 1024), + encoder_layers_per_block: Tuple[int] = (2, 2, 3, 3), + decoder_layers_per_block: Tuple[int] = (3, 3, 3, 3), + encoder_qkv_multiscales: Tuple[Tuple[int, ...], ...] = ((), (), (5,), (5,)), + decoder_qkv_multiscales: Tuple[Tuple[int, ...], ...] = ((), (), (5,), (5,)), + upsample_block_type: str = "interpolate", + downsample_block_type: str = "Conv", + decoder_norm_types: Union[str, Tuple[str]] = "rms_norm", + decoder_act_fns: Union[str, Tuple[str]] = "silu", + scaling_factor: float = 0.41407, + ) -> None: + super().__init__() + + self.encoder = Encoder( + in_channels=in_channels, + latent_channels=latent_channels, + attention_head_dim=attention_head_dim, + block_type=encoder_block_types, + block_out_channels=encoder_block_out_channels, + layers_per_block=encoder_layers_per_block, + qkv_multiscales=encoder_qkv_multiscales, + downsample_block_type=downsample_block_type, + ) + + self.decoder = Decoder( + in_channels=in_channels, + latent_channels=latent_channels, + attention_head_dim=attention_head_dim, + block_type=decoder_block_types, + block_out_channels=decoder_block_out_channels, + layers_per_block=decoder_layers_per_block, + qkv_multiscales=decoder_qkv_multiscales, + norm_type=decoder_norm_types, + act_fn=decoder_act_fns, + upsample_block_type=upsample_block_type, + ) + + self.scaling_factor = scaling_factor + self.spatial_compression_ratio = 2 ** (len(encoder_block_out_channels) - 1) + + def encode(self, x: torch.Tensor) -> torch.Tensor: + """Internal encoding function.""" + encoded = self.encoder(x) + return encoded * self.scaling_factor + + def decode(self, z: torch.Tensor) -> torch.Tensor: + # Scale the latents back + z = z / self.scaling_factor + decoded = self.decoder(z) + return decoded + + def forward(self, x: torch.Tensor) -> torch.Tensor: + z = self.encode(x) + return self.decode(z) + diff --git a/comfy/ldm/ace/vae/music_dcae_pipeline.py b/comfy/ldm/ace/vae/music_dcae_pipeline.py new file mode 100644 index 000000000..3188bc770 --- /dev/null +++ b/comfy/ldm/ace/vae/music_dcae_pipeline.py @@ -0,0 +1,104 @@ +# Original from: https://github.com/ace-step/ACE-Step/blob/main/music_dcae/music_dcae_pipeline.py +import torch +from .autoencoder_dc import AutoencoderDC +import torchaudio +import torchvision.transforms as transforms +from .music_vocoder import ADaMoSHiFiGANV1 + + +class MusicDCAE(torch.nn.Module): + def __init__(self, source_sample_rate=None, dcae_config={}, vocoder_config={}): + super(MusicDCAE, self).__init__() + + self.dcae = AutoencoderDC(**dcae_config) + self.vocoder = ADaMoSHiFiGANV1(**vocoder_config) + + if source_sample_rate is None: + self.source_sample_rate = 48000 + else: + self.source_sample_rate = source_sample_rate + + # self.resampler = torchaudio.transforms.Resample(source_sample_rate, 44100) + + self.transform = transforms.Compose([ + transforms.Normalize(0.5, 0.5), + ]) + self.min_mel_value = -11.0 + self.max_mel_value = 3.0 + self.audio_chunk_size = int(round((1024 * 512 / 44100 * 48000))) + self.mel_chunk_size = 1024 + self.time_dimention_multiple = 8 + self.latent_chunk_size = self.mel_chunk_size // self.time_dimention_multiple + self.scale_factor = 0.1786 + self.shift_factor = -1.9091 + + def load_audio(self, audio_path): + audio, sr = torchaudio.load(audio_path) + return audio, sr + + def forward_mel(self, audios): + mels = [] + for i in range(len(audios)): + image = self.vocoder.mel_transform(audios[i]) + mels.append(image) + mels = torch.stack(mels) + return mels + + @torch.no_grad() + def encode(self, audios, audio_lengths=None, sr=None): + if audio_lengths is None: + audio_lengths = torch.tensor([audios.shape[2]] * audios.shape[0]) + audio_lengths = audio_lengths.to(audios.device) + + if sr is None: + sr = self.source_sample_rate + + if sr != 44100: + audios = torchaudio.functional.resample(audios, sr, 44100) + + max_audio_len = audios.shape[-1] + if max_audio_len % (8 * 512) != 0: + audios = torch.nn.functional.pad(audios, (0, 8 * 512 - max_audio_len % (8 * 512))) + + mels = self.forward_mel(audios) + mels = (mels - self.min_mel_value) / (self.max_mel_value - self.min_mel_value) + mels = self.transform(mels) + latents = [] + for mel in mels: + latent = self.dcae.encoder(mel.unsqueeze(0)) + latents.append(latent) + latents = torch.cat(latents, dim=0) + # latent_lengths = (audio_lengths / sr * 44100 / 512 / self.time_dimention_multiple).long() + latents = (latents - self.shift_factor) * self.scale_factor + return latents + # return latents, latent_lengths + + @torch.no_grad() + def decode(self, latents, audio_lengths=None, sr=None): + latents = latents / self.scale_factor + self.shift_factor + + pred_wavs = [] + + for latent in latents: + mels = self.dcae.decoder(latent.unsqueeze(0)) + mels = mels * 0.5 + 0.5 + mels = mels * (self.max_mel_value - self.min_mel_value) + self.min_mel_value + wav = self.vocoder.decode(mels[0]).squeeze(1) + + if sr is not None: + # resampler = torchaudio.transforms.Resample(44100, sr).to(latents.device).to(latents.dtype) + wav = torchaudio.functional.resample(wav, 44100, sr) + # wav = resampler(wav) + else: + sr = 44100 + pred_wavs.append(wav) + + if audio_lengths is not None: + pred_wavs = [wav[:, :length].cpu() for wav, length in zip(pred_wavs, audio_lengths)] + return torch.stack(pred_wavs) + # return sr, pred_wavs + + def forward(self, audios, audio_lengths=None, sr=None): + latents, latent_lengths = self.encode(audios=audios, audio_lengths=audio_lengths, sr=sr) + sr, pred_wavs = self.decode(latents=latents, audio_lengths=audio_lengths, sr=sr) + return sr, pred_wavs, latents, latent_lengths diff --git a/comfy/ldm/ace/vae/music_log_mel.py b/comfy/ldm/ace/vae/music_log_mel.py new file mode 100755 index 000000000..d73d3f8e8 --- /dev/null +++ b/comfy/ldm/ace/vae/music_log_mel.py @@ -0,0 +1,108 @@ +# Original from: https://github.com/ace-step/ACE-Step/blob/main/music_dcae/music_log_mel.py +import torch +import torch.nn as nn +from torch import Tensor +from torchaudio.transforms import MelScale +import comfy.model_management + +class LinearSpectrogram(nn.Module): + def __init__( + self, + n_fft=2048, + win_length=2048, + hop_length=512, + center=False, + mode="pow2_sqrt", + ): + super().__init__() + + self.n_fft = n_fft + self.win_length = win_length + self.hop_length = hop_length + self.center = center + self.mode = mode + + self.register_buffer("window", torch.hann_window(win_length)) + + def forward(self, y: Tensor) -> Tensor: + if y.ndim == 3: + y = y.squeeze(1) + + y = torch.nn.functional.pad( + y.unsqueeze(1), + ( + (self.win_length - self.hop_length) // 2, + (self.win_length - self.hop_length + 1) // 2, + ), + mode="reflect", + ).squeeze(1) + dtype = y.dtype + spec = torch.stft( + y.float(), + self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + window=comfy.model_management.cast_to(self.window, dtype=torch.float32, device=y.device), + center=self.center, + pad_mode="reflect", + normalized=False, + onesided=True, + return_complex=True, + ) + spec = torch.view_as_real(spec) + + if self.mode == "pow2_sqrt": + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + spec = spec.to(dtype) + return spec + + +class LogMelSpectrogram(nn.Module): + def __init__( + self, + sample_rate=44100, + n_fft=2048, + win_length=2048, + hop_length=512, + n_mels=128, + center=False, + f_min=0.0, + f_max=None, + ): + super().__init__() + + self.sample_rate = sample_rate + self.n_fft = n_fft + self.win_length = win_length + self.hop_length = hop_length + self.center = center + self.n_mels = n_mels + self.f_min = f_min + self.f_max = f_max or sample_rate // 2 + + self.spectrogram = LinearSpectrogram(n_fft, win_length, hop_length, center) + self.mel_scale = MelScale( + self.n_mels, + self.sample_rate, + self.f_min, + self.f_max, + self.n_fft // 2 + 1, + "slaney", + "slaney", + ) + + def compress(self, x: Tensor) -> Tensor: + return torch.log(torch.clamp(x, min=1e-5)) + + def decompress(self, x: Tensor) -> Tensor: + return torch.exp(x) + + def forward(self, x: Tensor, return_linear: bool = False) -> Tensor: + linear = self.spectrogram(x) + x = self.mel_scale(linear) + x = self.compress(x) + # print(x.shape) + if return_linear: + return x, self.compress(linear) + + return x diff --git a/comfy/ldm/ace/vae/music_vocoder.py b/comfy/ldm/ace/vae/music_vocoder.py new file mode 100755 index 000000000..dc7c867da --- /dev/null +++ b/comfy/ldm/ace/vae/music_vocoder.py @@ -0,0 +1,542 @@ +# Original from: https://github.com/ace-step/ACE-Step/blob/main/music_dcae/music_vocoder.py +import torch +from torch import nn + +from functools import partial +from math import prod +from typing import Callable, Tuple, List + +import numpy as np +import torch.nn.functional as F +from torch.nn.utils import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations as remove_weight_norm +# from diffusers.models.modeling_utils import ModelMixin +# from diffusers.loaders import FromOriginalModelMixin +# from diffusers.configuration_utils import ConfigMixin, register_to_config + +from .music_log_mel import LogMelSpectrogram + +import comfy.model_management +import comfy.ops +ops = comfy.ops.disable_weight_init + + +def drop_path( + x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True +): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ # noqa: E501 + + if drop_prob == 0.0 or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * ( + x.ndim - 1 + ) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" # noqa: E501 + + def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) + + def extra_repr(self): + return f"drop_prob={round(self.drop_prob,3):0.3f}" + + +class LayerNorm(nn.Module): + r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with + shape (batch_size, height, width, channels) while channels_first corresponds to inputs + with shape (batch_size, channels, height, width). + """ # noqa: E501 + + def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.data_format = data_format + if self.data_format not in ["channels_last", "channels_first"]: + raise NotImplementedError + self.normalized_shape = (normalized_shape,) + + def forward(self, x): + if self.data_format == "channels_last": + return F.layer_norm( + x, self.normalized_shape, comfy.model_management.cast_to(self.weight, dtype=x.dtype, device=x.device), comfy.model_management.cast_to(self.bias, dtype=x.dtype, device=x.device), self.eps + ) + elif self.data_format == "channels_first": + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = comfy.model_management.cast_to(self.weight[:, None], dtype=x.dtype, device=x.device) * x + comfy.model_management.cast_to(self.bias[:, None], dtype=x.dtype, device=x.device) + return x + + +class ConvNeXtBlock(nn.Module): + r"""ConvNeXt Block. There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + We use (2) as we find it slightly faster in PyTorch + + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.0. + kernel_size (int): Kernel size for depthwise conv. Default: 7. + dilation (int): Dilation for depthwise conv. Default: 1. + """ # noqa: E501 + + def __init__( + self, + dim: int, + drop_path: float = 0.0, + layer_scale_init_value: float = 1e-6, + mlp_ratio: float = 4.0, + kernel_size: int = 7, + dilation: int = 1, + ): + super().__init__() + + self.dwconv = ops.Conv1d( + dim, + dim, + kernel_size=kernel_size, + padding=int(dilation * (kernel_size - 1) / 2), + groups=dim, + ) # depthwise conv + self.norm = LayerNorm(dim, eps=1e-6) + self.pwconv1 = ops.Linear( + dim, int(mlp_ratio * dim) + ) # pointwise/1x1 convs, implemented with linear layers + self.act = nn.GELU() + self.pwconv2 = ops.Linear(int(mlp_ratio * dim), dim) + self.gamma = ( + nn.Parameter(torch.empty((dim)), requires_grad=False) + if layer_scale_init_value > 0 + else None + ) + self.drop_path = DropPath( + drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x, apply_residual: bool = True): + input = x + + x = self.dwconv(x) + x = x.permute(0, 2, 1) # (N, C, L) -> (N, L, C) + x = self.norm(x) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + + if self.gamma is not None: + x = comfy.model_management.cast_to(self.gamma, dtype=x.dtype, device=x.device) * x + + x = x.permute(0, 2, 1) # (N, L, C) -> (N, C, L) + x = self.drop_path(x) + + if apply_residual: + x = input + x + + return x + + +class ParallelConvNeXtBlock(nn.Module): + def __init__(self, kernel_sizes: List[int], *args, **kwargs): + super().__init__() + self.blocks = nn.ModuleList( + [ + ConvNeXtBlock(kernel_size=kernel_size, *args, **kwargs) + for kernel_size in kernel_sizes + ] + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.stack( + [block(x, apply_residual=False) for block in self.blocks] + [x], + dim=1, + ).sum(dim=1) + + +class ConvNeXtEncoder(nn.Module): + def __init__( + self, + input_channels=3, + depths=[3, 3, 9, 3], + dims=[96, 192, 384, 768], + drop_path_rate=0.0, + layer_scale_init_value=1e-6, + kernel_sizes: Tuple[int] = (7,), + ): + super().__init__() + assert len(depths) == len(dims) + + self.channel_layers = nn.ModuleList() + stem = nn.Sequential( + ops.Conv1d( + input_channels, + dims[0], + kernel_size=7, + padding=3, + padding_mode="replicate", + ), + LayerNorm(dims[0], eps=1e-6, data_format="channels_first"), + ) + self.channel_layers.append(stem) + + for i in range(len(depths) - 1): + mid_layer = nn.Sequential( + LayerNorm(dims[i], eps=1e-6, data_format="channels_first"), + ops.Conv1d(dims[i], dims[i + 1], kernel_size=1), + ) + self.channel_layers.append(mid_layer) + + block_fn = ( + partial(ConvNeXtBlock, kernel_size=kernel_sizes[0]) + if len(kernel_sizes) == 1 + else partial(ParallelConvNeXtBlock, kernel_sizes=kernel_sizes) + ) + + self.stages = nn.ModuleList() + drop_path_rates = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] + + cur = 0 + for i in range(len(depths)): + stage = nn.Sequential( + *[ + block_fn( + dim=dims[i], + drop_path=drop_path_rates[cur + j], + layer_scale_init_value=layer_scale_init_value, + ) + for j in range(depths[i]) + ] + ) + self.stages.append(stage) + cur += depths[i] + + self.norm = LayerNorm(dims[-1], eps=1e-6, data_format="channels_first") + + def forward( + self, + x: torch.Tensor, + ) -> torch.Tensor: + for channel_layer, stage in zip(self.channel_layers, self.stages): + x = channel_layer(x) + x = stage(x) + + return self.norm(x) + + +def get_padding(kernel_size, dilation=1): + return (kernel_size * dilation - dilation) // 2 + + +class ResBlock1(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): + super().__init__() + + self.convs1 = nn.ModuleList( + [ + weight_norm( + ops.Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]), + ) + ), + weight_norm( + ops.Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]), + ) + ), + weight_norm( + ops.Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]), + ) + ), + ] + ) + + self.convs2 = nn.ModuleList( + [ + weight_norm( + ops.Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + weight_norm( + ops.Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + weight_norm( + ops.Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + ] + ) + + def forward(self, x): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.silu(x) + xt = c1(xt) + xt = F.silu(xt) + xt = c2(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for conv in self.convs1: + remove_weight_norm(conv) + for conv in self.convs2: + remove_weight_norm(conv) + + +class HiFiGANGenerator(nn.Module): + def __init__( + self, + *, + hop_length: int = 512, + upsample_rates: Tuple[int] = (8, 8, 2, 2, 2), + upsample_kernel_sizes: Tuple[int] = (16, 16, 8, 2, 2), + resblock_kernel_sizes: Tuple[int] = (3, 7, 11), + resblock_dilation_sizes: Tuple[Tuple[int]] = ( + (1, 3, 5), (1, 3, 5), (1, 3, 5)), + num_mels: int = 128, + upsample_initial_channel: int = 512, + use_template: bool = True, + pre_conv_kernel_size: int = 7, + post_conv_kernel_size: int = 7, + post_activation: Callable = partial(nn.SiLU, inplace=True), + ): + super().__init__() + + assert ( + prod(upsample_rates) == hop_length + ), f"hop_length must be {prod(upsample_rates)}" + + self.conv_pre = weight_norm( + ops.Conv1d( + num_mels, + upsample_initial_channel, + pre_conv_kernel_size, + 1, + padding=get_padding(pre_conv_kernel_size), + ) + ) + + self.num_upsamples = len(upsample_rates) + self.num_kernels = len(resblock_kernel_sizes) + + self.noise_convs = nn.ModuleList() + self.use_template = use_template + self.ups = nn.ModuleList() + + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + c_cur = upsample_initial_channel // (2 ** (i + 1)) + self.ups.append( + weight_norm( + ops.ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + if not use_template: + continue + + if i + 1 < len(upsample_rates): + stride_f0 = np.prod(upsample_rates[i + 1:]) + self.noise_convs.append( + ops.Conv1d( + 1, + c_cur, + kernel_size=stride_f0 * 2, + stride=stride_f0, + padding=stride_f0 // 2, + ) + ) + else: + self.noise_convs.append(ops.Conv1d(1, c_cur, kernel_size=1)) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes): + self.resblocks.append(ResBlock1(ch, k, d)) + + self.activation_post = post_activation() + self.conv_post = weight_norm( + ops.Conv1d( + ch, + 1, + post_conv_kernel_size, + 1, + padding=get_padding(post_conv_kernel_size), + ) + ) + + def forward(self, x, template=None): + x = self.conv_pre(x) + + for i in range(self.num_upsamples): + x = F.silu(x, inplace=True) + x = self.ups[i](x) + + if self.use_template: + x = x + self.noise_convs[i](template) + + xs = None + + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + + x = xs / self.num_kernels + + x = self.activation_post(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + for up in self.ups: + remove_weight_norm(up) + for block in self.resblocks: + block.remove_weight_norm() + remove_weight_norm(self.conv_pre) + remove_weight_norm(self.conv_post) + + +class ADaMoSHiFiGANV1(nn.Module): + def __init__( + self, + input_channels: int = 128, + depths: List[int] = [3, 3, 9, 3], + dims: List[int] = [128, 256, 384, 512], + drop_path_rate: float = 0.0, + kernel_sizes: Tuple[int] = (7,), + upsample_rates: Tuple[int] = (4, 4, 2, 2, 2, 2, 2), + upsample_kernel_sizes: Tuple[int] = (8, 8, 4, 4, 4, 4, 4), + resblock_kernel_sizes: Tuple[int] = (3, 7, 11, 13), + resblock_dilation_sizes: Tuple[Tuple[int]] = ( + (1, 3, 5), (1, 3, 5), (1, 3, 5), (1, 3, 5)), + num_mels: int = 512, + upsample_initial_channel: int = 1024, + use_template: bool = False, + pre_conv_kernel_size: int = 13, + post_conv_kernel_size: int = 13, + sampling_rate: int = 44100, + n_fft: int = 2048, + win_length: int = 2048, + hop_length: int = 512, + f_min: int = 40, + f_max: int = 16000, + n_mels: int = 128, + ): + super().__init__() + + self.backbone = ConvNeXtEncoder( + input_channels=input_channels, + depths=depths, + dims=dims, + drop_path_rate=drop_path_rate, + kernel_sizes=kernel_sizes, + ) + + self.head = HiFiGANGenerator( + hop_length=hop_length, + upsample_rates=upsample_rates, + upsample_kernel_sizes=upsample_kernel_sizes, + resblock_kernel_sizes=resblock_kernel_sizes, + resblock_dilation_sizes=resblock_dilation_sizes, + num_mels=num_mels, + upsample_initial_channel=upsample_initial_channel, + use_template=use_template, + pre_conv_kernel_size=pre_conv_kernel_size, + post_conv_kernel_size=post_conv_kernel_size, + ) + self.sampling_rate = sampling_rate + self.mel_transform = LogMelSpectrogram( + sample_rate=sampling_rate, + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + f_min=f_min, + f_max=f_max, + n_mels=n_mels, + ) + self.eval() + + @torch.no_grad() + def decode(self, mel): + y = self.backbone(mel) + y = self.head(y) + return y + + @torch.no_grad() + def encode(self, x): + return self.mel_transform(x) + + def forward(self, mel): + y = self.backbone(mel) + y = self.head(y) + return y diff --git a/comfy/model_base.py b/comfy/model_base.py index 045df1317..6408005b6 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -39,6 +39,7 @@ import comfy.ldm.wan.model import comfy.ldm.hunyuan3d.model import comfy.ldm.hidream.model import comfy.ldm.chroma.model +import comfy.ldm.ace.model import comfy.model_management import comfy.patcher_extension @@ -1121,3 +1122,21 @@ class Chroma(Flux): if guidance is not None: out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance])) return out + +class ACEStep(BaseModel): + def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.ace.model.ACEStepTransformer2DModel) + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + noise = kwargs.get("noise", None) + + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + + conditioning_lyrics = kwargs.get("conditioning_lyrics", None) + if cross_attn is not None: + out['lyric_token_idx'] = comfy.conds.CONDRegular(conditioning_lyrics) + out['speaker_embeds'] = comfy.conds.CONDRegular(torch.zeros(noise.shape[0], 512, device=noise.device, dtype=noise.dtype)) + return out diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 9254843ea..ff4c29d7e 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -226,6 +226,31 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config.update(json.loads(metadata["config"]).get("transformer", {})) return dit_config + if '{}genre_embedder.weight'.format(key_prefix) in state_dict_keys: #ACE-Step model + dit_config = {} + dit_config["audio_model"] = "ace" + dit_config["attention_head_dim"] = 128 + dit_config["in_channels"] = 8 + dit_config["inner_dim"] = 2560 + dit_config["max_height"] = 16 + dit_config["max_position"] = 32768 + dit_config["max_width"] = 32768 + dit_config["mlp_ratio"] = 2.5 + dit_config["num_attention_heads"] = 20 + dit_config["num_layers"] = 24 + dit_config["out_channels"] = 8 + dit_config["patch_size"] = [16, 1] + dit_config["rope_theta"] = 1000000.0 + dit_config["speaker_embedding_dim"] = 512 + dit_config["text_embedding_dim"] = 768 + + dit_config["ssl_encoder_depths"] = [8, 8] + dit_config["ssl_latent_dims"] = [1024, 768] + dit_config["ssl_names"] = ["mert", "m-hubert"] + dit_config["lyric_encoder_vocab_size"] = 6693 + dit_config["lyric_hidden_size"] = 1024 + return dit_config + if '{}t_block.1.weight'.format(key_prefix) in state_dict_keys: # PixArt patch_size = 2 dit_config = {} diff --git a/comfy/sd.py b/comfy/sd.py index da9b36d0e..50af243ba 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -15,6 +15,7 @@ import comfy.ldm.lightricks.vae.causal_video_autoencoder import comfy.ldm.cosmos.vae import comfy.ldm.wan.vae import comfy.ldm.hunyuan3d.vae +import comfy.ldm.ace.vae.music_dcae_pipeline import yaml import math @@ -42,6 +43,7 @@ import comfy.text_encoders.cosmos import comfy.text_encoders.lumina2 import comfy.text_encoders.wan import comfy.text_encoders.hidream +import comfy.text_encoders.ace import comfy.model_patcher import comfy.lora @@ -437,6 +439,19 @@ class VAE: ddconfig = {"embed_dim": 64, "num_freqs": 8, "include_pi": False, "heads": 16, "width": 1024, "num_decoder_layers": 16, "qkv_bias": False, "qk_norm": True, "geo_decoder_mlp_expand_ratio": mlp_expand, "geo_decoder_downsample_ratio": downsample_ratio, "geo_decoder_ln_post": ln_post} self.first_stage_model = comfy.ldm.hunyuan3d.vae.ShapeVAE(**ddconfig) self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] + elif "vocoder.backbone.channel_layers.0.0.bias" in sd: #Ace Step Audio + self.first_stage_model = comfy.ldm.ace.vae.music_dcae_pipeline.MusicDCAE(source_sample_rate=44100) + self.memory_used_encode = lambda shape, dtype: (shape[2] * 300) * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: (shape[2] * shape[3] * 72000) * model_management.dtype_size(dtype) + self.latent_channels = 8 + self.output_channels = 2 + # self.upscale_ratio = 2048 + # self.downscale_ratio = 2048 + self.latent_dim = 2 + self.process_output = lambda audio: audio + self.process_input = lambda audio: audio + self.working_dtypes = [torch.bfloat16, torch.float32] + self.disable_offload = True else: logging.warning("WARNING: No VAE weights detected, VAE not initalized.") self.first_stage_model = None @@ -715,6 +730,7 @@ class CLIPType(Enum): WAN = 13 HIDREAM = 14 CHROMA = 15 + ACE = 16 def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}): @@ -840,8 +856,13 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.aura_t5.AuraT5Model clip_target.tokenizer = comfy.text_encoders.aura_t5.AuraT5Tokenizer elif te_model == TEModel.T5_BASE: - clip_target.clip = comfy.text_encoders.sa_t5.SAT5Model - clip_target.tokenizer = comfy.text_encoders.sa_t5.SAT5Tokenizer + if clip_type == CLIPType.ACE or "spiece_model" in clip_data[0]: + clip_target.clip = comfy.text_encoders.ace.AceT5Model + clip_target.tokenizer = comfy.text_encoders.ace.AceT5Tokenizer + tokenizer_data["spiece_model"] = clip_data[0].get("spiece_model", None) + else: + clip_target.clip = comfy.text_encoders.sa_t5.SAT5Model + clip_target.tokenizer = comfy.text_encoders.sa_t5.SAT5Tokenizer elif te_model == TEModel.GEMMA_2_2B: clip_target.clip = comfy.text_encoders.lumina2.te(**llama_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.lumina2.LuminaTokenizer diff --git a/comfy/supported_models.py b/comfy/supported_models.py index a1dea2343..fef25eb24 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -17,6 +17,7 @@ import comfy.text_encoders.hunyuan_video import comfy.text_encoders.cosmos import comfy.text_encoders.lumina2 import comfy.text_encoders.wan +import comfy.text_encoders.ace from . import supported_models_base from . import latent_formats @@ -1100,6 +1101,34 @@ class Chroma(supported_models_base.BASE): t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref)) return supported_models_base.ClipTarget(comfy.text_encoders.pixart_t5.PixArtTokenizer, comfy.text_encoders.pixart_t5.pixart_te(**t5_detect)) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma] +class ACEStep(supported_models_base.BASE): + unet_config = { + "audio_model": "ace", + } + + unet_extra_config = { + } + + sampling_settings = { + "shift": 3.0, + } + + latent_format = comfy.latent_formats.ACEAudio + + memory_usage_factor = 0.5 + + supported_inference_dtypes = [torch.bfloat16, torch.float32] + + vae_key_prefix = ["vae."] + text_encoder_key_prefix = ["text_encoders."] + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.ACEStep(self, device=device) + return out + + def clip_target(self, state_dict={}): + return supported_models_base.ClipTarget(comfy.text_encoders.ace.AceT5Tokenizer, comfy.text_encoders.ace.AceT5Model) + +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep] models += [SVD_img2vid] diff --git a/comfy/text_encoders/ace.py b/comfy/text_encoders/ace.py new file mode 100644 index 000000000..b6fe451bd --- /dev/null +++ b/comfy/text_encoders/ace.py @@ -0,0 +1,145 @@ +from comfy import sd1_clip +from .spiece_tokenizer import SPieceTokenizer +import comfy.text_encoders.t5 +import os +import re +import torch +import logging + +from tokenizers import Tokenizer +from .ace_text_cleaners import multilingual_cleaners + +SUPPORT_LANGUAGES = { + "en": 259, "de": 260, "fr": 262, "es": 284, "it": 285, + "pt": 286, "pl": 294, "tr": 295, "ru": 267, "cs": 293, + "nl": 297, "ar": 5022, "zh": 5023, "ja": 5412, "hu": 5753, + "ko": 6152, "hi": 6680 +} + +structure_pattern = re.compile(r"\[.*?\]") + +DEFAULT_VOCAB_FILE = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "ace_lyrics_tokenizer"), "vocab.json") + + +class VoiceBpeTokenizer: + def __init__(self, vocab_file=DEFAULT_VOCAB_FILE): + self.tokenizer = None + if vocab_file is not None: + self.tokenizer = Tokenizer.from_file(vocab_file) + + def preprocess_text(self, txt, lang): + txt = multilingual_cleaners(txt, lang) + return txt + + def encode(self, txt, lang='en'): + # lang = lang.split("-")[0] # remove the region + # self.check_input_length(txt, lang) + txt = self.preprocess_text(txt, lang) + lang = "zh-cn" if lang == "zh" else lang + txt = f"[{lang}]{txt}" + txt = txt.replace(" ", "[SPACE]") + return self.tokenizer.encode(txt).ids + + def get_lang(self, line): + if line.startswith("[") and line[3:4] == ']': + lang = line[1:3].lower() + if lang in SUPPORT_LANGUAGES: + return lang, line[4:] + return "en", line + + def __call__(self, string): + lines = string.split("\n") + lyric_token_idx = [261] + for line in lines: + line = line.strip() + if not line: + lyric_token_idx += [2] + continue + + lang, line = self.get_lang(line) + + if lang not in SUPPORT_LANGUAGES: + lang = "en" + if "zh" in lang: + lang = "zh" + if "spa" in lang: + lang = "es" + + try: + if structure_pattern.match(line): + token_idx = self.encode(line, "en") + else: + token_idx = self.encode(line, lang) + lyric_token_idx = lyric_token_idx + token_idx + [2] + except Exception as e: + logging.warning("tokenize error {} for line {} major_language {}".format(e, line, lang)) + return {"input_ids": lyric_token_idx} + + @staticmethod + def from_pretrained(path, **kwargs): + return VoiceBpeTokenizer(path, **kwargs) + + def get_vocab(self): + return {} + + +class UMT5BaseModel(sd1_clip.SDClipModel): + def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, model_options={}): + textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "umt5_config_base.json") + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=True, zero_out_masked=False, model_options=model_options) + +class UMT5BaseTokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + tokenizer = tokenizer_data.get("spiece_model", None) + super().__init__(tokenizer, pad_with_end=False, embedding_size=768, embedding_key='umt5base', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, pad_token=0, tokenizer_data=tokenizer_data) + + def state_dict(self): + return {"spiece_model": self.tokenizer.serialize_model()} + +class LyricsTokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + tokenizer = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "ace_lyrics_tokenizer"), "vocab.json") + super().__init__(tokenizer, pad_with_end=False, embedding_size=1024, embedding_key='lyrics', tokenizer_class=VoiceBpeTokenizer, has_start_token=True, pad_to_max_length=False, max_length=99999999, min_length=1, pad_token=2, has_end_token=False, tokenizer_data=tokenizer_data) + +class AceT5Tokenizer: + def __init__(self, embedding_directory=None, tokenizer_data={}): + self.voicebpe = LyricsTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + self.umt5base = UMT5BaseTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + + def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): + out = {} + out["lyrics"] = self.voicebpe.tokenize_with_weights(kwargs.get("lyrics", ""), return_word_ids, **kwargs) + out["umt5base"] = self.umt5base.tokenize_with_weights(text, return_word_ids, **kwargs) + return out + + def untokenize(self, token_weight_pair): + return self.umt5base.untokenize(token_weight_pair) + + def state_dict(self): + return self.umt5base.state_dict() + +class AceT5Model(torch.nn.Module): + def __init__(self, device="cpu", dtype=None, model_options={}, **kwargs): + super().__init__() + self.umt5base = UMT5BaseModel(device=device, dtype=dtype, model_options=model_options) + self.dtypes = set() + if dtype is not None: + self.dtypes.add(dtype) + + def set_clip_options(self, options): + self.umt5base.set_clip_options(options) + + def reset_clip_options(self): + self.umt5base.reset_clip_options() + + def encode_token_weights(self, token_weight_pairs): + token_weight_pairs_umt5base = token_weight_pairs["umt5base"] + token_weight_pairs_lyrics = token_weight_pairs["lyrics"] + + t5_out, t5_pooled = self.umt5base.encode_token_weights(token_weight_pairs_umt5base) + + lyrics_embeds = torch.tensor(list(map(lambda a: a[0], token_weight_pairs_lyrics[0]))).unsqueeze(0) + return t5_out, None, {"conditioning_lyrics": lyrics_embeds} + + def load_sd(self, sd): + return self.umt5base.load_sd(sd) diff --git a/comfy/text_encoders/ace_lyrics_tokenizer/vocab.json b/comfy/text_encoders/ace_lyrics_tokenizer/vocab.json new file mode 100644 index 000000000..519ed340c --- /dev/null +++ b/comfy/text_encoders/ace_lyrics_tokenizer/vocab.json @@ -0,0 +1,15535 @@ +{ + "version": "1.0", + "truncation": null, + "padding": null, + "added_tokens": [ + { + "id": 0, + "special": true, + "content": "[STOP]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 1, + "special": true, + "content": "[UNK]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 2, + "special": true, + "content": "[SPACE]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 259, + "special": true, + "content": "[en]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 260, + "special": true, + "content": "[de]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 261, + "special": true, + "content": "[START]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 262, + "special": true, + "content": "[fr]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 284, + "special": true, + "content": "[es]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 285, + "special": true, + "content": "[it]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 286, + "special": true, + "content": "[pt]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 294, + "special": true, + "content": "[pl]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 295, + "special": true, + "content": "[tr]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 267, + "special": true, + "content": "[ru]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 293, + "special": true, + "content": "[cs]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 297, + "special": true, + "content": "[nl]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 5022, + "special": true, + "content": "[ar]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 5023, + "special": true, + "content": "[zh-cn]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 5412, + "special": true, + "content": "[ja]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 5753, + "special": true, + "content": "[hu]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6152, + "special": true, + "content": "[ko]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6680, + "special": true, + "content": "[hi]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6681, + "special": true, + "content": "[start]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6682, + "special": true, + "content": "[intro]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6683, + "special": true, + "content": "[verse]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6684, + "special": true, + "content": "[chorus]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6685, + "special": true, + "content": "[bridge]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6686, + "special": true, + "content": "[outro]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6687, + "special": true, + "content": "[end]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6688, + "special": true, + "content": "[inst]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6689, + "special": true, + "content": "[solo]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6690, + "special": true, + "content": "[hook]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6691, + "special": true, + "content": "[pre-chorus]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + }, + { + "id": 6692, + "special": true, + "content": "[break]", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false + } + ], + "normalizer": null, + "pre_tokenizer": { + "type": "Whitespace" + }, + "post_processor": null, + "decoder": null, + "model": { + "type": "BPE", + "dropout": null, + "unk_token": "[UNK]", + "continuing_subword_prefix": null, + "end_of_word_suffix": null, + "fuse_unk": false, + "vocab": { + "[STOP]": 0, + "[UNK]": 1, + "[SPACE]": 2, + "!": 3, + "'": 4, + "(": 5, + ")": 6, + ",": 7, + "-": 8, + ".": 9, + "/": 10, + ":": 11, + ";": 12, + "?": 13, + "a": 14, + "b": 15, + "c": 16, + "d": 17, + "e": 18, + "f": 19, + "g": 20, + "h": 21, + "i": 22, + "j": 23, + "k": 24, + "l": 25, + "m": 26, + "n": 27, + "o": 28, + "p": 29, + "q": 30, + "r": 31, + "s": 32, + "t": 33, + "u": 34, + "v": 35, + "w": 36, + "x": 37, + "y": 38, + "z": 39, + "th": 40, + "in": 41, + "the": 42, + "an": 43, + "er": 44, + "ou": 45, + "re": 46, + "on": 47, + "at": 48, + "ed": 49, + "en": 50, + "to": 51, + "ing": 52, + "and": 53, + "is": 54, + "as": 55, + "al": 56, + "or": 57, + "of": 58, + "ar": 59, + "it": 60, + "es": 61, + "he": 62, + "st": 63, + "le": 64, + "om": 65, + "se": 66, + "be": 67, + "ad": 68, + "ow": 69, + "ly": 70, + "ch": 71, + "wh": 72, + "that": 73, + "you": 74, + "li": 75, + "ve": 76, + "ac": 77, + "ti": 78, + "ld": 79, + "me": 80, + "was": 81, + "gh": 82, + "id": 83, + "ll": 84, + "wi": 85, + "ent": 86, + "for": 87, + "ay": 88, + "ro": 89, + "ver": 90, + "ic": 91, + "her": 92, + "ke": 93, + "his": 94, + "no": 95, + "ut": 96, + "un": 97, + "ir": 98, + "lo": 99, + "we": 100, + "ri": 101, + "ha": 102, + "with": 103, + "ght": 104, + "out": 105, + "im": 106, + "ion": 107, + "all": 108, + "ab": 109, + "one": 110, + "ne": 111, + "ge": 112, + "ould": 113, + "ter": 114, + "mo": 115, + "had": 116, + "ce": 117, + "she": 118, + "go": 119, + "sh": 120, + "ur": 121, + "am": 122, + "so": 123, + "pe": 124, + "my": 125, + "de": 126, + "are": 127, + "but": 128, + "ome": 129, + "fr": 130, + "ther": 131, + "fe": 132, + "su": 133, + "do": 134, + "con": 135, + "te": 136, + "ain": 137, + "ere": 138, + "po": 139, + "if": 140, + "they": 141, + "us": 142, + "ag": 143, + "tr": 144, + "now": 145, + "oun": 146, + "this": 147, + "have": 148, + "not": 149, + "sa": 150, + "il": 151, + "up": 152, + "thing": 153, + "from": 154, + "ap": 155, + "him": 156, + "ack": 157, + "ation": 158, + "ant": 159, + "our": 160, + "op": 161, + "like": 162, + "ust": 163, + "ess": 164, + "bo": 165, + "ok": 166, + "ul": 167, + "ind": 168, + "ex": 169, + "com": 170, + "some": 171, + "there": 172, + "ers": 173, + "co": 174, + "res": 175, + "man": 176, + "ard": 177, + "pl": 178, + "wor": 179, + "way": 180, + "tion": 181, + "fo": 182, + "ca": 183, + "were": 184, + "by": 185, + "ate": 186, + "pro": 187, + "ted": 188, + "ound": 189, + "own": 190, + "would": 191, + "ts": 192, + "what": 193, + "qu": 194, + "ally": 195, + "ight": 196, + "ck": 197, + "gr": 198, + "when": 199, + "ven": 200, + "can": 201, + "ough": 202, + "ine": 203, + "end": 204, + "per": 205, + "ous": 206, + "od": 207, + "ide": 208, + "know": 209, + "ty": 210, + "very": 211, + "si": 212, + "ak": 213, + "who": 214, + "about": 215, + "ill": 216, + "them": 217, + "est": 218, + "red": 219, + "ye": 220, + "could": 221, + "ong": 222, + "your": 223, + "their": 224, + "em": 225, + "just": 226, + "other": 227, + "into": 228, + "any": 229, + "whi": 230, + "um": 231, + "tw": 232, + "ast": 233, + "der": 234, + "did": 235, + "ie": 236, + "been": 237, + "ace": 238, + "ink": 239, + "ity": 240, + "back": 241, + "ting": 242, + "br": 243, + "more": 244, + "ake": 245, + "pp": 246, + "then": 247, + "sp": 248, + "el": 249, + "use": 250, + "bl": 251, + "said": 252, + "over": 253, + "get": 254, + "ß": 255, + "ä": 256, + "ö": 257, + "ü": 258, + "[en]": 259, + "[de]": 260, + "[START]": 261, + "[fr]": 262, + "œ": 263, + "ï": 264, + "ê": 265, + "â": 266, + "[ru]": 267, + "ÿ": 268, + "è": 269, + "à": 270, + "ë": 271, + "ù": 272, + "î": 273, + "ç": 274, + "æ": 275, + "ô": 276, + "û": 277, + "á": 278, + "é": 279, + "í": 280, + "ó": 281, + "ú": 282, + "ñ": 283, + "[es]": 284, + "[it]": 285, + "[pt]": 286, + "ń": 287, + "ś": 288, + "ę": 289, + "ą": 290, + "ż": 291, + "ć": 292, + "[cs]": 293, + "[pl]": 294, + "[tr]": 295, + "ã": 296, + "[nl]": 297, + "ş": 298, + "ğ": 299, + "ı": 300, + "ò": 301, + "ì": 302, + "¿": 303, + "…": 304, + "i̇": 305, + "õ": 306, + "\"": 307, + "´": 308, + "ø": 309, + "č": 310, + "ō": 311, + "š": 312, + "ž": 313, + "̇": 314, + "ei": 315, + "ich": 316, + "ein": 317, + "au": 318, + "sch": 319, + "und": 320, + "die": 321, + "da": 322, + "den": 323, + "gen": 324, + "zu": 325, + "hr": 326, + "ten": 327, + "mi": 328, + "sie": 329, + "das": 330, + "eine": 331, + "icht": 332, + "ber": 333, + "ach": 334, + "auf": 335, + "lich": 336, + "nicht": 337, + "mm": 338, + "ben": 339, + "war": 340, + "mit": 341, + "sich": 342, + "ig": 343, + "aus": 344, + "ist": 345, + "wie": 346, + "och": 347, + "ung": 348, + "ann": 349, + "ür": 350, + "hn": 351, + "ihr": 352, + "sen": 353, + "tz": 354, + "dem": 355, + "eit": 356, + "hat": 357, + "wir": 358, + "von": 359, + "wei": 360, + "ier": 361, + "ra": 362, + "einen": 363, + "vor": 364, + "als": 365, + "wo": 366, + "rei": 367, + "ste": 368, + "lie": 369, + "auch": 370, + "du": 371, + "des": 372, + "ko": 373, + "über": 374, + "bei": 375, + "hen": 376, + "hm": 377, + "lei": 378, + "aber": 379, + "wen": 380, + "hl": 381, + "ger": 382, + "nach": 383, + "ft": 384, + "imm": 385, + "je": 386, + "schen": 387, + "wer": 388, + "ser": 389, + "än": 390, + "sein": 391, + "ol": 392, + "cht": 393, + "für": 394, + "kl": 395, + "ff": 396, + "einem": 397, + "nen": 398, + "ja": 399, + "noch": 400, + "hatte": 401, + "pf": 402, + "hin": 403, + "di": 404, + "chen": 405, + "rü": 406, + "iel": 407, + "sel": 408, + "dass": 409, + "ihn": 410, + "mir": 411, + "schl": 412, + "ön": 413, + "gan": 414, + "gt": 415, + "einer": 416, + "sten": 417, + "mich": 418, + "wenn": 419, + "ell": 420, + "gte": 421, + "mal": 422, + "gel": 423, + "ken": 424, + "nur": 425, + "mmen": 426, + "fü": 427, + "ern": 428, + "ör": 429, + "unter": 430, + "ander": 431, + "dur": 432, + "uch": 433, + "ta": 434, + "men": 435, + "mach": 436, + "doch": 437, + "durch": 438, + "os": 439, + "gl": 440, + "hal": 441, + "ihre": 442, + "wä": 443, + "immer": 444, + "ihm": 445, + "kann": 446, + "ort": 447, + "dann": 448, + "lan": 449, + "tzt": 450, + "oder": 451, + "hren": 452, + "et": 453, + "kön": 454, + "ick": 455, + "fa": 456, + "wieder": 457, + "daß": 458, + "mein": 459, + "fen": 460, + "ganz": 461, + "diese": 462, + "ster": 463, + "dar": 464, + "wa": 465, + "ges": 466, + "na": 467, + "fl": 468, + "igen": 469, + "sche": 470, + "ungen": 471, + "mehr": 472, + "ßen": 473, + "ot": 474, + "kon": 475, + "gew": 476, + "haben": 477, + "geh": 478, + "ät": 479, + "sind": 480, + "dr": 481, + "wel": 482, + "uns": 483, + "vo": 484, + "ma": 485, + "ute": 486, + "schon": 487, + "bes": 488, + "gesch": 489, + "bt": 490, + "che": 491, + "son": 492, + "ob": 493, + "la": 494, + "rück": 495, + "seine": 496, + "kr": 497, + "fre": 498, + "eil": 499, + "zum": 500, + "hier": 501, + "kt": 502, + "ige": 503, + "spr": 504, + "leben": 505, + "bst": 506, + "zeit": 507, + "gro": 508, + "denn": 509, + "ho": 510, + "scha": 511, + "bar": 512, + "alle": 513, + "gegen": 514, + "wür": 515, + "mü": 516, + "ze": 517, + "werden": 518, + "jetzt": 519, + "kommen": 520, + "nie": 521, + "sei": 522, + "heit": 523, + "soll": 524, + "glei": 525, + "meine": 526, + "woll": 527, + "ner": 528, + "habe": 529, + "wur": 530, + "lichen": 531, + "assen": 532, + "nte": 533, + "sehen": 534, + "wird": 535, + "bis": 536, + "gar": 537, + "ien": 538, + "mus": 539, + "uß": 540, + "är": 541, + "stell": 542, + "keit": 543, + "zwei": 544, + "selbst": 545, + "sta": 546, + "pa": 547, + "sagte": 548, + "tet": 549, + "kam": 550, + "ssen": 551, + "viel": 552, + "ug": 553, + "zen": 554, + "hei": 555, + "mann": 556, + "will": 557, + "geb": 558, + "waren": 559, + "ück": 560, + "äch": 561, + "mer": 562, + "ru": 563, + "hau": 564, + "eigen": 565, + "ang": 566, + "weg": 567, + "blick": 568, + "fra": 569, + "alles": 570, + "ka": 571, + "augen": 572, + "fin": 573, + "liche": 574, + "unser": 575, + "dern": 576, + "herr": 577, + "nun": 578, + "vie": 579, + "chte": 580, + "wohl": 581, + "fall": 582, + "ht": 583, + "ün": 584, + "etwas": 585, + "stand": 586, + "äu": 587, + "mö": 588, + "tel": 589, + "rie": 590, + "dich": 591, + "dies": 592, + "hand": 593, + "bin": 594, + "ffen": 595, + "nichts": 596, + "dan": 597, + "hne": 598, + "ihnen": 599, + "esen": 600, + "dieser": 601, + "frau": 602, + "art": 603, + "dir": 604, + "isch": 605, + "erst": 606, + "gleich": 607, + "komm": 608, + "hör": 609, + "ße": 610, + "dig": 611, + "sehr": 612, + "zei": 613, + "sam": 614, + "aum": 615, + "hät": 616, + "ingen": 617, + "gut": 618, + "mut": 619, + "cken": 620, + "konnte": 621, + "stimm": 622, + "zur": 623, + "itz": 624, + "weil": 625, + "würde": 626, + "fä": 627, + "können": 628, + "keine": 629, + "fer": 630, + "ischen": 631, + "voll": 632, + "eines": 633, + "setz": 634, + "zie": 635, + "del": 636, + "tete": 637, + "seiner": 638, + "ieren": 639, + "gest": 640, + "zurück": 641, + "wurde": 642, + "schn": 643, + "pr": 644, + "ließ": 645, + "tra": 646, + "mä": 647, + "gend": 648, + "fol": 649, + "ik": 650, + "schla": 651, + "schaft": 652, + "ater": 653, + "weiß": 654, + "seinen": 655, + "lassen": 656, + "lu": 657, + "unden": 658, + "teil": 659, + "neu": 660, + "iert": 661, + "menschen": 662, + "hmen": 663, + "str": 664, + "gi": 665, + "sah": 666, + "ihren": 667, + "eln": 668, + "weiter": 669, + "gehen": 670, + "iger": 671, + "macht": 672, + "tag": 673, + "also": 674, + "halten": 675, + "nis": 676, + "acht": 677, + "geben": 678, + "og": 679, + "nat": 680, + "mar": 681, + "det": 682, + "ohne": 683, + "haus": 684, + "tro": 685, + "ange": 686, + "lau": 687, + "spiel": 688, + "tre": 689, + "schr": 690, + "inn": 691, + "los": 692, + "machen": 693, + "hätte": 694, + "beg": 695, + "wirk": 696, + "alt": 697, + "glich": 698, + "tes": 699, + "richt": 700, + "freund": 701, + "ihrer": 702, + "fel": 703, + "bel": 704, + "sol": 705, + "einmal": 706, + "eben": 707, + "hol": 708, + "hän": 709, + "tern": 710, + "hö": 711, + "schw": 712, + "recht": 713, + "wahr": 714, + "seinem": 715, + "stehen": 716, + "hlen": 717, + "ins": 718, + "ging": 719, + "wollte": 720, + "wissen": 721, + "ungs": 722, + "ald": 723, + "ass": 724, + "jahr": 725, + "mor": 726, + "welt": 727, + "under": 728, + "zusa": 729, + "kopf": 730, + "lang": 731, + "hinter": 732, + "atz": 733, + "stra": 734, + "angen": 735, + "ank": 736, + "ade": 737, + "glau": 738, + "fach": 739, + "hatten": 740, + "fort": 741, + "eicht": 742, + "iff": 743, + "ler": 744, + "mei": 745, + "diesem": 746, + "kein": 747, + "frei": 748, + "führ": 749, + "vom": 750, + "β": 751, + "ai": 752, + "ait": 753, + "que": 754, + "les": 755, + "av": 756, + "ais": 757, + "oi": 758, + "eu": 759, + "lle": 760, + "par": 761, + "ans": 762, + "ment": 763, + "ét": 764, + "une": 765, + "pas": 766, + "qui": 767, + "elle": 768, + "dé": 769, + "pour": 770, + "dans": 771, + "ré": 772, + "tou": 773, + "vous": 774, + "vi": 775, + "ouv": 776, + "mon": 777, + "sur": 778, + "ci": 779, + "plu": 780, + "ère": 781, + "mais": 782, + "ois": 783, + "plus": 784, + "ée": 785, + "aient": 786, + "mp": 787, + "lui": 788, + "ave": 789, + "était": 790, + "ses": 791, + "tout": 792, + "oir": 793, + "avait": 794, + "és": 795, + "mes": 796, + "nous": 797, + "eux": 798, + "bi": 799, + "ons": 800, + "pu": 801, + "ces": 802, + "tu": 803, + "leur": 804, + "don": 805, + "eur": 806, + "ette": 807, + "aire": 808, + "avec": 809, + "dit": 810, + "té": 811, + "ille": 812, + "comme": 813, + "cr": 814, + "ux": 815, + "ès": 816, + "aux": 817, + "jour": 818, + "ils": 819, + "bien": 820, + "cou": 821, + "quel": 822, + "peu": 823, + "cette": 824, + "cu": 825, + "mê": 826, + "fait": 827, + "gu": 828, + "être": 829, + "ité": 830, + "ens": 831, + "ni": 832, + "lé": 833, + "dis": 834, + "ble": 835, + "né": 836, + "puis": 837, + "même": 838, + "ques": 839, + "fi": 840, + "age": 841, + "moi": 842, + "ence": 843, + "ont": 844, + "main": 845, + "ors": 846, + "aut": 847, + "ance": 848, + "mé": 849, + "sans": 850, + "sé": 851, + "lon": 852, + "hom": 853, + "car": 854, + "able": 855, + "cher": 856, + "deux": 857, + "enf": 858, + "où": 859, + "ph": 860, + "ure": 861, + "temp": 862, + "pos": 863, + "rent": 864, + "pé": 865, + "faire": 866, + "pi": 867, + "tres": 868, + "ça": 869, + "endre": 870, + "bon": 871, + "sou": 872, + "int": 873, + "pré": 874, + "sent": 875, + "tant": 876, + "cer": 877, + "là": 878, + "lais": 879, + "près": 880, + "bre": 881, + "cour": 882, + "pet": 883, + "comp": 884, + "lait": 885, + "trouv": 886, + "entre": 887, + "sont": 888, + "dev": 889, + "nu": 890, + "temps": 891, + "dou": 892, + "rait": 893, + "bou": 894, + "quand": 895, + "jours": 896, + "avoir": 897, + "été": 898, + "ale": 899, + "pre": 900, + "fois": 901, + "orte": 902, + "vé": 903, + "non": 904, + "tous": 905, + "jus": 906, + "coup": 907, + "homme": 908, + "ête": 909, + "aussi": 910, + "urs": 911, + "seu": 912, + "ord": 913, + "min": 914, + "gé": 915, + "core": 916, + "va": 917, + "vre": 918, + "encore": 919, + "sem": 920, + "ite": 921, + "autre": 922, + "pris": 923, + "peut": 924, + "ue": 925, + "ante": 926, + "gn": 927, + "rép": 928, + "hu": 929, + "sion": 930, + "votre": 931, + "dire": 932, + "ez": 933, + "fem": 934, + "leurs": 935, + "met": 936, + "cri": 937, + "mis": 938, + "tour": 939, + "rai": 940, + "jam": 941, + "regar": 942, + "rien": 943, + "vers": 944, + "suis": 945, + "pouv": 946, + "vis": 947, + "grand": 948, + "ants": 949, + "cor": 950, + "rer": 951, + "cé": 952, + "tent": 953, + "pres": 954, + "vou": 955, + "alors": 956, + "sieur": 957, + "aine": 958, + "quoi": 959, + "fon": 960, + "endant": 961, + "arri": 962, + "eure": 963, + "après": 964, + "donc": 965, + "itu": 966, + "lè": 967, + "sait": 968, + "toi": 969, + "cha": 970, + "ail": 971, + "asse": 972, + "imp": 973, + "voy": 974, + "conn": 975, + "pla": 976, + "petit": 977, + "avant": 978, + "nom": 979, + "tin": 980, + "dont": 981, + "sous": 982, + "emp": 983, + "person": 984, + "elles": 985, + "beau": 986, + "parti": 987, + "cho": 988, + "prit": 989, + "toujours": 990, + "rais": 991, + "jamais": 992, + "trav": 993, + "tions": 994, + "très": 995, + "voi": 996, + "ren": 997, + "yeux": 998, + "voir": 999, + "premi": 1000, + "gne": 1001, + "heure": 1002, + "rou": 1003, + "eff": 1004, + "notre": 1005, + "ments": 1006, + "ton": 1007, + "fais": 1008, + "cela": 1009, + "répon": 1010, + "cons": 1011, + "air": 1012, + "ôt": 1013, + "pendant": 1014, + "ici": 1015, + "toute": 1016, + "jet": 1017, + "port": 1018, + "étaient": 1019, + "pen": 1020, + "hé": 1021, + "autres": 1022, + "père": 1023, + "oc": 1024, + "quelques": 1025, + "ique": 1026, + "lis": 1027, + "femme": 1028, + "jou": 1029, + "teur": 1030, + "monde": 1031, + "nes": 1032, + "dre": 1033, + "aff": 1034, + "rap": 1035, + "part": 1036, + "lement": 1037, + "cla": 1038, + "fut": 1039, + "quelque": 1040, + "prendre": 1041, + "rê": 1042, + "aille": 1043, + "sais": 1044, + "ches": 1045, + "let": 1046, + "char": 1047, + "ères": 1048, + "ents": 1049, + "moins": 1050, + "eau": 1051, + "aî": 1052, + "jeu": 1053, + "heur": 1054, + "ées": 1055, + "tri": 1056, + "point": 1057, + "mom": 1058, + "vent": 1059, + "nouv": 1060, + "gran": 1061, + "trois": 1062, + "sant": 1063, + "toutes": 1064, + "contre": 1065, + "èrent": 1066, + "chez": 1067, + "avez": 1068, + "ût": 1069, + "att": 1070, + "pau": 1071, + "porte": 1072, + "ouver": 1073, + "lit": 1074, + "prés": 1075, + "chose": 1076, + "vit": 1077, + "monsieur": 1078, + "hab": 1079, + "tête": 1080, + "ju": 1081, + "tement": 1082, + "ction": 1083, + "vrai": 1084, + "lar": 1085, + "cet": 1086, + "regard": 1087, + "lant": 1088, + "som": 1089, + "moment": 1090, + "illes": 1091, + "ple": 1092, + "ps": 1093, + "mère": 1094, + "cl": 1095, + "sour": 1096, + "ys": 1097, + "trop": 1098, + "enne": 1099, + "jusqu": 1100, + "avaient": 1101, + "avais": 1102, + "jeune": 1103, + "depuis": 1104, + "personne": 1105, + "fit": 1106, + "cert": 1107, + "jo": 1108, + "oui": 1109, + "rest": 1110, + "semb": 1111, + "cap": 1112, + "mat": 1113, + "mu": 1114, + "long": 1115, + "fran": 1116, + "faut": 1117, + "iti": 1118, + "bli": 1119, + "chev": 1120, + "pri": 1121, + "ente": 1122, + "ainsi": 1123, + "cham": 1124, + "lors": 1125, + "cas": 1126, + "ili": 1127, + "bé": 1128, + "nos": 1129, + "sui": 1130, + "rit": 1131, + "cro": 1132, + "gue": 1133, + "ía": 1134, + "por": 1135, + "las": 1136, + "ón": 1137, + "una": 1138, + "aba": 1139, + "dos": 1140, + "era": 1141, + "mb": 1142, + "para": 1143, + "ás": 1144, + "mos": 1145, + "ando": 1146, + "como": 1147, + "más": 1148, + "ción": 1149, + "tan": 1150, + "dad": 1151, + "ado": 1152, + "fu": 1153, + "cia": 1154, + "mente": 1155, + "sus": 1156, + "tar": 1157, + "za": 1158, + "ba": 1159, + "pero": 1160, + "sin": 1161, + "lla": 1162, + "án": 1163, + "ia": 1164, + "ran": 1165, + "ga": 1166, + "yo": 1167, + "tos": 1168, + "cos": 1169, + "ya": 1170, + "ones": 1171, + "había": 1172, + "hi": 1173, + "esta": 1174, + "mas": 1175, + "tor": 1176, + "aban": 1177, + "dor": 1178, + "ían": 1179, + "tas": 1180, + "én": 1181, + "endo": 1182, + "aque": 1183, + "ero": 1184, + "io": 1185, + "qué": 1186, + "cab": 1187, + "tal": 1188, + "señ": 1189, + "ora": 1190, + "todo": 1191, + "sal": 1192, + "cuando": 1193, + "gun": 1194, + "bu": 1195, + "ras": 1196, + "esto": 1197, + "pare": 1198, + "él": 1199, + "tras": 1200, + "jos": 1201, + "mien": 1202, + "pue": 1203, + "cre": 1204, + "pon": 1205, + "día": 1206, + "tros": 1207, + "sab": 1208, + "sobre": 1209, + "ese": 1210, + "mbre": 1211, + "eron": 1212, + "añ": 1213, + "ido": 1214, + "porque": 1215, + "ella": 1216, + "cen": 1217, + "muy": 1218, + "cal": 1219, + "este": 1220, + "has": 1221, + "có": 1222, + "gra": 1223, + "ros": 1224, + "aquel": 1225, + "dijo": 1226, + "cía": 1227, + "zo": 1228, + "ciones": 1229, + "mbi": 1230, + "elo": 1231, + "tó": 1232, + "ina": 1233, + "todos": 1234, + "tien": 1235, + "estaba": 1236, + "deci": 1237, + "cio": 1238, + "ño": 1239, + "lor": 1240, + "nues": 1241, + "medi": 1242, + "len": 1243, + "vida": 1244, + "ali": 1245, + "pues": 1246, + "ales": 1247, + "vol": 1248, + "mí": 1249, + "rar": 1250, + "cion": 1251, + "hasta": 1252, + "señor": 1253, + "cono": 1254, + "ah": 1255, + "dios": 1256, + "esa": 1257, + "ún": 1258, + "var": 1259, + "san": 1260, + "gui": 1261, + "otros": 1262, + "tado": 1263, + "buen": 1264, + "ña": 1265, + "tiemp": 1266, + "hacer": 1267, + "jer": 1268, + "vu": 1269, + "ana": 1270, + "así": 1271, + "antes": 1272, + "vez": 1273, + "miento": 1274, + "jar": 1275, + "lab": 1276, + "casa": 1277, + "eso": 1278, + "ego": 1279, + "dió": 1280, + "está": 1281, + "encia": 1282, + "eli": 1283, + "ías": 1284, + "tiempo": 1285, + "zar": 1286, + "van": 1287, + "mun": 1288, + "erta": 1289, + "tambi": 1290, + "sí": 1291, + "aun": 1292, + "mismo": 1293, + "entes": 1294, + "mano": 1295, + "ele": 1296, + "nada": 1297, + "segu": 1298, + "mej": 1299, + "erra": 1300, + "tir": 1301, + "uno": 1302, + "donde": 1303, + "toda": 1304, + "desde": 1305, + "también": 1306, + "cuer": 1307, + "hombre": 1308, + "otro": 1309, + "lib": 1310, + "trar": 1311, + "cual": 1312, + "hay": 1313, + "cada": 1314, + "taba": 1315, + "mento": 1316, + "tenía": 1317, + "quer": 1318, + "eran": 1319, + "siemp": 1320, + "siempre": 1321, + "erto": 1322, + "quí": 1323, + "gos": 1324, + "pués": 1325, + "ellos": 1326, + "después": 1327, + "nue": 1328, + "llo": 1329, + "inter": 1330, + "cómo": 1331, + "ahora": 1332, + "uste": 1333, + "traba": 1334, + "lado": 1335, + "ino": 1336, + "poco": 1337, + "erte": 1338, + "mujer": 1339, + "quier": 1340, + "algun": 1341, + "fue": 1342, + "ojos": 1343, + "enton": 1344, + "vos": 1345, + "esper": 1346, + "much": 1347, + "otra": 1348, + "az": 1349, + "eza": 1350, + "aquí": 1351, + "cias": 1352, + "gua": 1353, + "mucho": 1354, + "decir": 1355, + "esti": 1356, + "idad": 1357, + "algo": 1358, + "ocu": 1359, + "entonces": 1360, + "dido": 1361, + "entos": 1362, + "gri": 1363, + "dado": 1364, + "ios": 1365, + "dose": 1366, + "usted": 1367, + "quien": 1368, + "ami": 1369, + "unto": 1370, + "mejor": 1371, + "bas": 1372, + "solo": 1373, + "pregun": 1374, + "tur": 1375, + "alg": 1376, + "todas": 1377, + "parte": 1378, + "emb": 1379, + "cto": 1380, + "mundo": 1381, + "tiene": 1382, + "tante": 1383, + "palab": 1384, + "tran": 1385, + "aquella": 1386, + "cios": 1387, + "aunque": 1388, + "cuen": 1389, + "tener": 1390, + "fun": 1391, + "respon": 1392, + "allí": 1393, + "xi": 1394, + "han": 1395, + "pens": 1396, + "contra": 1397, + "tura": 1398, + "val": 1399, + "dio": 1400, + "tanto": 1401, + "camin": 1402, + "mó": 1403, + "esp": 1404, + "ada": 1405, + "ío": 1406, + "hacia": 1407, + "dej": 1408, + "estar": 1409, + "ión": 1410, + "gas": 1411, + "vas": 1412, + "noche": 1413, + "ér": 1414, + "años": 1415, + "padre": 1416, + "gus": 1417, + "ár": 1418, + "sino": 1419, + "manos": 1420, + "cido": 1421, + "estu": 1422, + "hubi": 1423, + "vir": 1424, + "bri": 1425, + "raz": 1426, + "chi": 1427, + "puede": 1428, + "menos": 1429, + "habi": 1430, + "homb": 1431, + "neces": 1432, + "may": 1433, + "eros": 1434, + "ría": 1435, + "hecho": 1436, + "escu": 1437, + "lti": 1438, + "ándo": 1439, + "bus": 1440, + "cosas": 1441, + "tú": 1442, + "espa": 1443, + "reci": 1444, + "ctor": 1445, + "prim": 1446, + "dia": 1447, + "dese": 1448, + "mientras": 1449, + "hor": 1450, + "fuer": 1451, + "ida": 1452, + "posi": 1453, + "lante": 1454, + "ano": 1455, + "estas": 1456, + "pli": 1457, + "luego": 1458, + "sión": 1459, + "cin": 1460, + "tierra": 1461, + "guar": 1462, + "cado": 1463, + "encon": 1464, + "pren": 1465, + "mayor": 1466, + "fal": 1467, + "ð": 1468, + "ħ": 1469, + "ň": 1470, + "ə": 1471, + "θ": 1472, + "’": 1473, + "“": 1474, + "”": 1475, + "zi": 1476, + "gli": 1477, + "tto": 1478, + "ono": 1479, + "nel": 1480, + "tti": 1481, + "della": 1482, + "zione": 1483, + "tta": 1484, + "tà": 1485, + "uo": 1486, + "come": 1487, + "alla": 1488, + "oni": 1489, + "ggi": 1490, + "ssi": 1491, + "più": 1492, + "ini": 1493, + "bb": 1494, + "sto": 1495, + "sono": 1496, + "eri": 1497, + "sse": 1498, + "sc": 1499, + "sul": 1500, + "vano": 1501, + "sti": 1502, + "suo": 1503, + "cchi": 1504, + "zza": 1505, + "anche": 1506, + "tte": 1507, + "sci": 1508, + "col": 1509, + "sso": 1510, + "ssa": 1511, + "dei": 1512, + "aveva": 1513, + "zz": 1514, + "amo": 1515, + "gno": 1516, + "sua": 1517, + "ria": 1518, + "sì": 1519, + "ché": 1520, + "dal": 1521, + "ona": 1522, + "spe": 1523, + "gni": 1524, + "tt": 1525, + "delle": 1526, + "questo": 1527, + "nella": 1528, + "dere": 1529, + "anno": 1530, + "dell": 1531, + "uni": 1532, + "bbe": 1533, + "anti": 1534, + "ene": 1535, + "gio": 1536, + "uto": 1537, + "qual": 1538, + "glia": 1539, + "quando": 1540, + "tutto": 1541, + "glio": 1542, + "zioni": 1543, + "cam": 1544, + "esso": 1545, + "ss": 1546, + "mol": 1547, + "loro": 1548, + "perché": 1549, + "cosa": 1550, + "due": 1551, + "poi": 1552, + "sco": 1553, + "cco": 1554, + "gna": 1555, + "tem": 1556, + "prima": 1557, + "così": 1558, + "essere": 1559, + "ani": 1560, + "bra": 1561, + "rio": 1562, + "anco": 1563, + "cui": 1564, + "spi": 1565, + "via": 1566, + "gior": 1567, + "bile": 1568, + "ggio": 1569, + "mai": 1570, + "tare": 1571, + "indi": 1572, + "rebbe": 1573, + "senza": 1574, + "zio": 1575, + "tutti": 1576, + "stato": 1577, + "zia": 1578, + "dalla": 1579, + "mia": 1580, + "vita": 1581, + "quella": 1582, + "qua": 1583, + "dove": 1584, + "allo": 1585, + "sempre": 1586, + "zzo": 1587, + "sia": 1588, + "dopo": 1589, + "porta": 1590, + "ccia": 1591, + "erano": 1592, + "anni": 1593, + "chia": 1594, + "enza": 1595, + "propri": 1596, + "anda": 1597, + "cca": 1598, + "occhi": 1599, + "questa": 1600, + "ffi": 1601, + "ron": 1602, + "mio": 1603, + "ris": 1604, + "ogni": 1605, + "rin": 1606, + "far": 1607, + "menti": 1608, + "ancora": 1609, + "fatto": 1610, + "mani": 1611, + "senti": 1612, + "pra": 1613, + "tempo": 1614, + "essi": 1615, + "bbi": 1616, + "lare": 1617, + "pers": 1618, + "sor": 1619, + "anza": 1620, + "pie": 1621, + "verso": 1622, + "altro": 1623, + "tato": 1624, + "cato": 1625, + "ato": 1626, + "volta": 1627, + "cc": 1628, + "fare": 1629, + "ciò": 1630, + "bili": 1631, + "nuo": 1632, + "quello": 1633, + "colo": 1634, + "ppo": 1635, + "trova": 1636, + "ore": 1637, + "rono": 1638, + "molto": 1639, + "almente": 1640, + "sca": 1641, + "vole": 1642, + "tali": 1643, + "sulla": 1644, + "sce": 1645, + "meno": 1646, + "anto": 1647, + "pun": 1648, + "stu": 1649, + "capi": 1650, + "giu": 1651, + "mini": 1652, + "pia": 1653, + "lavo": 1654, + "vero": 1655, + "rsi": 1656, + "altri": 1657, + "scia": 1658, + "suoi": 1659, + "glie": 1660, + "sotto": 1661, + "bene": 1662, + "scri": 1663, + "tale": 1664, + "degli": 1665, + "alc": 1666, + "uomo": 1667, + "pel": 1668, + "pote": 1669, + "essa": 1670, + "scu": 1671, + "signo": 1672, + "stro": 1673, + "uti": 1674, + "sione": 1675, + "gre": 1676, + "fini": 1677, + "lun": 1678, + "esi": 1679, + "passa": 1680, + "rà": 1681, + "mentre": 1682, + "hanno": 1683, + "usci": 1684, + "gia": 1685, + "già": 1686, + "mina": 1687, + "tica": 1688, + "giorno": 1689, + "esse": 1690, + "modo": 1691, + "spa": 1692, + "proprio": 1693, + "ori": 1694, + "contro": 1695, + "stru": 1696, + "diven": 1697, + "disse": 1698, + "rato": 1699, + "noi": 1700, + "vere": 1701, + "può": 1702, + "dice": 1703, + "cci": 1704, + "secon": 1705, + "ccio": 1706, + "qualche": 1707, + "tutta": 1708, + "gg": 1709, + "mondo": 1710, + "forma": 1711, + "mma": 1712, + "pensa": 1713, + "deva": 1714, + "fosse": 1715, + "sopra": 1716, + "tamente": 1717, + "ness": 1718, + "quanto": 1719, + "raga": 1720, + "unque": 1721, + "care": 1722, + "stre": 1723, + "grande": 1724, + "picco": 1725, + "guarda": 1726, + "nell": 1727, + "possi": 1728, + "presen": 1729, + "rò": 1730, + "paro": 1731, + "tua": 1732, + "vin": 1733, + "ane": 1734, + "stesso": 1735, + "dav": 1736, + "nei": 1737, + "nelle": 1738, + "ghi": 1739, + "pio": 1740, + "lato": 1741, + "sid": 1742, + "fine": 1743, + "fuo": 1744, + "quasi": 1745, + "ulti": 1746, + "ito": 1747, + "sue": 1748, + "fil": 1749, + "allora": 1750, + "veni": 1751, + "tano": 1752, + "ello": 1753, + "ão": 1754, + "não": 1755, + "uma": 1756, + "ela": 1757, + "lh": 1758, + "ção": 1759, + "cê": 1760, + "inha": 1761, + "você": 1762, + "ec": 1763, + "dade": 1764, + "ao": 1765, + "ram": 1766, + "vel": 1767, + "ém": 1768, + "pode": 1769, + "estava": 1770, + "isso": 1771, + "mui": 1772, + "faz": 1773, + "ões": 1774, + "pes": 1775, + "ix": 1776, + "sim": 1777, + "olh": 1778, + "isa": 1779, + "ên": 1780, + "tinha": 1781, + "meu": 1782, + "são": 1783, + "minha": 1784, + "muito": 1785, + "foi": 1786, + "bem": 1787, + "diz": 1788, + "parec": 1789, + "ço": 1790, + "pesso": 1791, + "pois": 1792, + "mesmo": 1793, + "ções": 1794, + "seus": 1795, + "até": 1796, + "ência": 1797, + "lhe": 1798, + "tiv": 1799, + "mã": 1800, + "só": 1801, + "tão": 1802, + "tudo": 1803, + "então": 1804, + "inda": 1805, + "bal": 1806, + "indo": 1807, + "ndo": 1808, + "já": 1809, + "vam": 1810, + "eito": 1811, + "depois": 1812, + "mel": 1813, + "lha": 1814, + "ainda": 1815, + "fazer": 1816, + "pou": 1817, + "pergun": 1818, + "deix": 1819, + "tamb": 1820, + "ala": 1821, + "pelo": 1822, + "também": 1823, + "fica": 1824, + "prec": 1825, + "eles": 1826, + "havia": 1827, + "lá": 1828, + "nas": 1829, + "gem": 1830, + "mem": 1831, + "ós": 1832, + "deu": 1833, + "eiro": 1834, + "..": 1835, + "assim": 1836, + "ior": 1837, + "har": 1838, + "aqui": 1839, + "cul": 1840, + "sar": 1841, + "outra": 1842, + "olhos": 1843, + "ima": 1844, + "mim": 1845, + "ago": 1846, + "pessoas": 1847, + "eram": 1848, + "eira": 1849, + "pela": 1850, + "coisa": 1851, + "mão": 1852, + "conh": 1853, + "agora": 1854, + "iam": 1855, + "há": 1856, + "suas": 1857, + "guém": 1858, + "cabe": 1859, + "nem": 1860, + "ível": 1861, + "consegu": 1862, + "trabal": 1863, + "lev": 1864, + "lem": 1865, + "vai": 1866, + "tei": 1867, + "pró": 1868, + "quem": 1869, + "onde": 1870, + "cabeça": 1871, + "nunca": 1872, + "mentos": 1873, + "hum": 1874, + "dele": 1875, + "verdade": 1876, + "tá": 1877, + "hos": 1878, + "algum": 1879, + "dizer": 1880, + "penas": 1881, + "nós": 1882, + "enquanto": 1883, + "outro": 1884, + "lho": 1885, + "melhor": 1886, + "primei": 1887, + "iu": 1888, + "apenas": 1889, + "estou": 1890, + "conte": 1891, + "homem": 1892, + "dois": 1893, + "ças": 1894, + "pouco": 1895, + "senhor": 1896, + "tando": 1897, + "espera": 1898, + "pai": 1899, + "rios": 1900, + "baix": 1901, + "ase": 1902, + "isas": 1903, + "hora": 1904, + "ficar": 1905, + "seja": 1906, + "ân": 1907, + "clar": 1908, + "inc": 1909, + "fos": 1910, + "ouvi": 1911, + "vem": 1912, + "tava": 1913, + "ário": 1914, + "sos": 1915, + "inho": 1916, + "rando": 1917, + "ês": 1918, + "coisas": 1919, + "aconte": 1920, + "lher": 1921, + "anos": 1922, + "talvez": 1923, + "estão": 1924, + "liv": 1925, + "outros": 1926, + "qualquer": 1927, + "gou": 1928, + "lí": 1929, + "tivesse": 1930, + "rado": 1931, + "precisa": 1932, + "mãe": 1933, + "dela": 1934, + "entra": 1935, + "maior": 1936, + "noite": 1937, + "tiva": 1938, + "pala": 1939, + "ração": 1940, + "deus": 1941, + "sas": 1942, + "inte": 1943, + "fei": 1944, + "palav": 1945, + "trás": 1946, + "cidade": 1947, + "lugar": 1948, + "vezes": 1949, + "encontra": 1950, + "tru": 1951, + "eci": 1952, + "ın": 1953, + "bir": 1954, + "yor": 1955, + "ek": 1956, + "dı": 1957, + "ey": 1958, + "tı": 1959, + "mı": 1960, + "iz": 1961, + "ır": 1962, + "gö": 1963, + "sı": 1964, + "bil": 1965, + "lı": 1966, + "üz": 1967, + "iç": 1968, + "iy": 1969, + "ım": 1970, + "uz": 1971, + "cak": 1972, + "iş": 1973, + "ını": 1974, + "iyor": 1975, + "baş": 1976, + "dü": 1977, + "değ": 1978, + "kar": 1979, + "ev": 1980, + "öy": 1981, + "bun": 1982, + "yap": 1983, + "sun": 1984, + "gör": 1985, + "yı": 1986, + "ki": 1987, + "ara": 1988, + "alı": 1989, + "onu": 1990, + "çı": 1991, + "şey": 1992, + "sın": 1993, + "kı": 1994, + "kad": 1995, + "ağ": 1996, + "değil": 1997, + "ük": 1998, + "çok": 1999, + "şı": 2000, + "ül": 2001, + "için": 2002, + "eye": 2003, + "oldu": 2004, + "mış": 2005, + "kal": 2006, + "mek": 2007, + "öyle": 2008, + "yordu": 2009, + "yüz": 2010, + "miş": 2011, + "mak": 2012, + "ola": 2013, + "yan": 2014, + "cek": 2015, + "yorum": 2016, + "bak": 2017, + "üm": 2018, + "ları": 2019, + "oğ": 2020, + "kadar": 2021, + "arı": 2022, + "ında": 2023, + "gün": 2024, + "yok": 2025, + "yer": 2026, + "dım": 2027, + "daha": 2028, + "ına": 2029, + "dim": 2030, + "bilir": 2031, + "iki": 2032, + "siz": 2033, + "diğ": 2034, + "bü": 2035, + "düş": 2036, + "üç": 2037, + "unu": 2038, + "aman": 2039, + "fak": 2040, + "ede": 2041, + "sonra": 2042, + "hiç": 2043, + "aki": 2044, + "ğı": 2045, + "bul": 2046, + "maz": 2047, + "anla": 2048, + "bura": 2049, + "geç": 2050, + "maya": 2051, + "konu": 2052, + "din": 2053, + "tek": 2054, + "zaman": 2055, + "eler": 2056, + "öz": 2057, + "dır": 2058, + "gibi": 2059, + "şa": 2060, + "leri": 2061, + "kim": 2062, + "ku": 2063, + "fakat": 2064, + "yar": 2065, + "göz": 2066, + "cı": 2067, + "yorsun": 2068, + "bek": 2069, + "inde": 2070, + "pek": 2071, + "bunu": 2072, + "lik": 2073, + "iler": 2074, + "edi": 2075, + "öl": 2076, + "sür": 2077, + "sır": 2078, + "çık": 2079, + "sıl": 2080, + "alar": 2081, + "kes": 2082, + "yak": 2083, + "çek": 2084, + "yıl": 2085, + "ecek": 2086, + "ız": 2087, + "git": 2088, + "kap": 2089, + "ama": 2090, + "ıl": 2091, + "ların": 2092, + "biz": 2093, + "tır": 2094, + "oy": 2095, + "ancak": 2096, + "doğ": 2097, + "bana": 2098, + "şim": 2099, + "başla": 2100, + "lü": 2101, + "madı": 2102, + "beni": 2103, + "yük": 2104, + "lık": 2105, + "beş": 2106, + "nasıl": 2107, + "tık": 2108, + "tür": 2109, + "daki": 2110, + "ceğ": 2111, + "zı": 2112, + "iyi": 2113, + "dok": 2114, + "benim": 2115, + "cağ": 2116, + "yen": 2117, + "şu": 2118, + "mez": 2119, + "düşün": 2120, + "kendi": 2121, + "şimdi": 2122, + "yol": 2123, + "yu": 2124, + "iste": 2125, + "sek": 2126, + "mam": 2127, + "söyle": 2128, + "dik": 2129, + "kur": 2130, + "olduğ": 2131, + "sını": 2132, + "biliyor": 2133, + "kan": 2134, + "yal": 2135, + "meye": 2136, + "muş": 2137, + "kaç": 2138, + "iye": 2139, + "tü": 2140, + "ef": 2141, + "tım": 2142, + "evet": 2143, + "yet": 2144, + "burada": 2145, + "tim": 2146, + "biraz": 2147, + "kor": 2148, + "doğru": 2149, + "inin": 2150, + "kız": 2151, + "diye": 2152, + "dör": 2153, + "etti": 2154, + "onun": 2155, + "isti": 2156, + "ği": 2157, + "sana": 2158, + "üş": 2159, + "arka": 2160, + "hayır": 2161, + "karşı": 2162, + "ile": 2163, + "hak": 2164, + "ıyor": 2165, + "neden": 2166, + "sev": 2167, + "sız": 2168, + "çocu": 2169, + "çalı": 2170, + "olur": 2171, + "bır": 2172, + "gir": 2173, + "ise": 2174, + "ih": 2175, + "kır": 2176, + "dön": 2177, + "böyle": 2178, + "seni": 2179, + "!\"": 2180, + "dört": 2181, + "söy": 2182, + "oş": 2183, + "musun": 2184, + "laş": 2185, + "ip": 2186, + "kay": 2187, + "hem": 2188, + "büyük": 2189, + "aç": 2190, + "bırak": 2191, + "misin": 2192, + "söz": 2193, + "değiş": 2194, + "ünü": 2195, + "gül": 2196, + "kö": 2197, + "karı": 2198, + "tamam": 2199, + "olu": 2200, + "yeni": 2201, + "lam": 2202, + "mıştı": 2203, + "yaş": 2204, + "iniz": 2205, + "kadın": 2206, + "bunun": 2207, + "mey": 2208, + "altı": 2209, + "yi": 2210, + "inden": 2211, + "senin": 2212, + "yat": 2213, + "top": 2214, + "isi": 2215, + "dün": 2216, + "hiçbir": 2217, + "yon": 2218, + "dın": 2219, + "tün": 2220, + "başka": 2221, + "hep": 2222, + "irmi": 2223, + "devam": 2224, + "olacak": 2225, + "artık": 2226, + "durum": 2227, + "imiz": 2228, + "üzel": 2229, + "lerini": 2230, + "sağ": 2231, + "gerek": 2232, + "yirmi": 2233, + "şek": 2234, + "bağ": 2235, + "lara": 2236, + "yür": 2237, + "ması": 2238, + "katı": 2239, + "dedi": 2240, + "gü": 2241, + "sorun": 2242, + "üne": 2243, + "mız": 2244, + "yapı": 2245, + "mil": 2246, + "ğını": 2247, + "tara": 2248, + "vardı": 2249, + "konuş": 2250, + "arak": 2251, + "larak": 2252, + "çocuk": 2253, + "bütün": 2254, + "ley": 2255, + "dür": 2256, + "güzel": 2257, + "ayı": 2258, + "yapa": 2259, + "nı": 2260, + "ayr": 2261, + "öne": 2262, + "yordum": 2263, + "ban": 2264, + "i̇ş": 2265, + "dum": 2266, + "yorlar": 2267, + "larını": 2268, + "çıkar": 2269, + "zan": 2270, + "seç": 2271, + "liyor": 2272, + "tak": 2273, + "şık": 2274, + "tekrar": 2275, + "aş": 2276, + "eş": 2277, + "mişti": 2278, + "kin": 2279, + "imi": 2280, + "eğ": 2281, + "gidi": 2282, + "leş": 2283, + "başladı": 2284, + "gide": 2285, + "otur": 2286, + "dde": 2287, + "ından": 2288, + "üzer": 2289, + "ının": 2290, + "nız": 2291, + "uy": 2292, + "yedi": 2293, + "kat": 2294, + "olarak": 2295, + "ladı": 2296, + "yalnız": 2297, + "bah": 2298, + "iyet": 2299, + "sak": 2300, + "açık": 2301, + "sında": 2302, + "...": 2303, + "insan": 2304, + "aynı": 2305, + "eder": 2306, + "istan": 2307, + "uzun": 2308, + "geri": 2309, + "erek": 2310, + "olan": 2311, + "gerçek": 2312, + "alan": 2313, + "dış": 2314, + "alık": 2315, + "fark": 2316, + "üst": 2317, + "sade": 2318, + "kiş": 2319, + "ldı": 2320, + "zor": 2321, + "etir": 2322, + "herkes": 2323, + "ömer": 2324, + "unda": 2325, + "haf": 2326, + "buna": 2327, + "ydı": 2328, + "peki": 2329, + "adam": 2330, + "haz": 2331, + "sına": 2332, + "kapı": 2333, + "görüş": 2334, + "sadece": 2335, + "aldı": 2336, + "geldi": 2337, + "rz": 2338, + "sz": 2339, + "cz": 2340, + "ię": 2341, + "dz": 2342, + "ał": 2343, + "się": 2344, + "rze": 2345, + "że": 2346, + "wy": 2347, + "rzy": 2348, + "ła": 2349, + "ło": 2350, + "ny": 2351, + "dzie": 2352, + "dzi": 2353, + "czy": 2354, + "cie": 2355, + "prze": 2356, + "dy": 2357, + "kie": 2358, + "ry": 2359, + "ją": 2360, + "ów": 2361, + "przy": 2362, + "mie": 2363, + "szy": 2364, + "cze": 2365, + "bie": 2366, + "cy": 2367, + "nia": 2368, + "ści": 2369, + "sze": 2370, + "jest": 2371, + "ży": 2372, + "ną": 2373, + "któ": 2374, + "ała": 2375, + "mnie": 2376, + "ły": 2377, + "cza": 2378, + "jak": 2379, + "roz": 2380, + "ró": 2381, + "zna": 2382, + "łu": 2383, + "ść": 2384, + "wia": 2385, + "wszy": 2386, + "spo": 2387, + "gdy": 2388, + "wał": 2389, + "wię": 2390, + "łem": 2391, + "ję": 2392, + "sk": 2393, + "rę": 2394, + "dob": 2395, + "już": 2396, + "bę": 2397, + "ałem": 2398, + "sza": 2399, + "pod": 2400, + "dla": 2401, + "pan": 2402, + "nę": 2403, + "może": 2404, + "śli": 2405, + "ało": 2406, + "lko": 2407, + "nych": 2408, + "powie": 2409, + "cię": 2410, + "tylko": 2411, + "naj": 2412, + "tego": 2413, + "ski": 2414, + "nego": 2415, + "wszyst": 2416, + "szcze": 2417, + "jed": 2418, + "jej": 2419, + "two": 2420, + "ąd": 2421, + "śmy": 2422, + "czę": 2423, + "wać": 2424, + "jego": 2425, + "ża": 2426, + "sy": 2427, + "praw": 2428, + "tym": 2429, + "który": 2430, + "ały": 2431, + "trze": 2432, + "niej": 2433, + "nym": 2434, + "gło": 2435, + "jąc": 2436, + "mówi": 2437, + "ska": 2438, + "nej": 2439, + "słu": 2440, + "wła": 2441, + "będzie": 2442, + "dę": 2443, + "pó": 2444, + "bez": 2445, + "nic": 2446, + "pła": 2447, + "ście": 2448, + "są": 2449, + "trzy": 2450, + "kiem": 2451, + "był": 2452, + "mog": 2453, + "robi": 2454, + "tam": 2455, + "mię": 2456, + "zy": 2457, + "pew": 2458, + "myś": 2459, + "przed": 2460, + "sko": 2461, + "które": 2462, + "lę": 2463, + "wsze": 2464, + "ąc": 2465, + "było": 2466, + "sobie": 2467, + "py": 2468, + "cią": 2469, + "jeszcze": 2470, + "tę": 2471, + "czas": 2472, + "szę": 2473, + "gł": 2474, + "kę": 2475, + "czu": 2476, + "przez": 2477, + "sło": 2478, + "wz": 2479, + "kto": 2480, + "ków": 2481, + "czo": 2482, + "liśmy": 2483, + "więc": 2484, + "rą": 2485, + "wó": 2486, + "rza": 2487, + "ności": 2488, + "wet": 2489, + "nął": 2490, + "śmie": 2491, + "nawet": 2492, + "musi": 2493, + "swo": 2494, + "tej": 2495, + "wą": 2496, + "wu": 2497, + "wią": 2498, + "niu": 2499, + "czą": 2500, + "dzo": 2501, + "skie": 2502, + "jeśli": 2503, + "czego": 2504, + "chy": 2505, + "dł": 2506, + "tych": 2507, + "bym": 2508, + "żo": 2509, + "eś": 2510, + "sią": 2511, + "kiedy": 2512, + "wró": 2513, + "dze": 2514, + "dro": 2515, + "rów": 2516, + "pani": 2517, + "kul": 2518, + "nad": 2519, + "chwi": 2520, + "nim": 2521, + "być": 2522, + "chodzi": 2523, + "nio": 2524, + "dobrze": 2525, + "teraz": 2526, + "wokul": 2527, + "coś": 2528, + "kł": 2529, + "pier": 2530, + "gdzie": 2531, + "dzy": 2532, + "pię": 2533, + "dź": 2534, + "ką": 2535, + "gó": 2536, + "zda": 2537, + "chce": 2538, + "stę": 2539, + "świa": 2540, + "wszystko": 2541, + "peł": 2542, + "wiem": 2543, + "wiel": 2544, + "każ": 2545, + "rzu": 2546, + "sły": 2547, + "jedna": 2548, + "myśl": 2549, + "mój": 2550, + "jestem": 2551, + "óż": 2552, + "miej": 2553, + "moż": 2554, + "kła": 2555, + "resz": 2556, + "dłu": 2557, + "stwo": 2558, + "nię": 2559, + "masz": 2560, + "żeby": 2561, + "niem": 2562, + "jakie": 2563, + "sty": 2564, + "nią": 2565, + "wej": 2566, + "oj": 2567, + "sła": 2568, + "ność": 2569, + "zło": 2570, + "szczę": 2571, + "lej": 2572, + "wego": 2573, + "cał": 2574, + "dział": 2575, + "kich": 2576, + "dza": 2577, + "dzię": 2578, + "oczy": 2579, + "zosta": 2580, + "czło": 2581, + "nam": 2582, + "kil": 2583, + "szu": 2584, + "wę": 2585, + "miał": 2586, + "strze": 2587, + "cej": 2588, + "ej": 2589, + "znaj": 2590, + "dać": 2591, + "miejs": 2592, + "kró": 2593, + "kry": 2594, + "bardzo": 2595, + "śnie": 2596, + "lą": 2597, + "gie": 2598, + "ciebie": 2599, + "dni": 2600, + "potrze": 2601, + "wokulski": 2602, + "uwa": 2603, + "umie": 2604, + "jednak": 2605, + "kra": 2606, + "wróci": 2607, + "człowie": 2608, + "czyć": 2609, + "była": 2610, + "żeli": 2611, + "mę": 2612, + "cę": 2613, + "zrobi": 2614, + "mogę": 2615, + "prowa": 2616, + "rem": 2617, + "niech": 2618, + "cznie": 2619, + "kro": 2620, + "tą": 2621, + "chci": 2622, + "bro": 2623, + "dzieć": 2624, + "szą": 2625, + "pad": 2626, + "trz": 2627, + "jem": 2628, + "tów": 2629, + "dru": 2630, + "taj": 2631, + "rzekł": 2632, + "niego": 2633, + "takie": 2634, + "wała": 2635, + "towa": 2636, + "kapła": 2637, + "widzi": 2638, + "podob": 2639, + "dzę": 2640, + "tał": 2641, + "stęp": 2642, + "bą": 2643, + "poko": 2644, + "wem": 2645, + "gę": 2646, + "aby": 2647, + "albo": 2648, + "spra": 2649, + "zno": 2650, + "smo": 2651, + "jesz": 2652, + "księ": 2653, + "jesteś": 2654, + "poz": 2655, + "nigdy": 2656, + "ksią": 2657, + "cóż": 2658, + "ws": 2659, + "pow": 2660, + "tka": 2661, + "świe": 2662, + "szka": 2663, + "samo": 2664, + "sł": 2665, + "rzę": 2666, + "nale": 2667, + "chcesz": 2668, + "nik": 2669, + "pę": 2670, + "chyba": 2671, + "ciąg": 2672, + "jący": 2673, + "woj": 2674, + "nasze": 2675, + "mniej": 2676, + "więcej": 2677, + "zwy": 2678, + "osta": 2679, + "waż": 2680, + "śmier": 2681, + "wier": 2682, + "dzą": 2683, + "zaś": 2684, + "gdyby": 2685, + "jaki": 2686, + "wol": 2687, + "win": 2688, + "dą": 2689, + "ścia": 2690, + "rozma": 2691, + "wal": 2692, + "panie": 2693, + "star": 2694, + "kaz": 2695, + "jeżeli": 2696, + "wra": 2697, + "koń": 2698, + "siebie": 2699, + "znowu": 2700, + "czem": 2701, + "stwa": 2702, + "isto": 2703, + "pół": 2704, + "dał": 2705, + "kobie": 2706, + "ałam": 2707, + "wych": 2708, + "cesa": 2709, + "nich": 2710, + "zawsze": 2711, + "dzić": 2712, + "też": 2713, + "lepie": 2714, + "proszę": 2715, + "kre": 2716, + "twa": 2717, + "łą": 2718, + "chu": 2719, + "cą": 2720, + "prz": 2721, + "łe": 2722, + "szedł": 2723, + "odpowie": 2724, + "myśli": 2725, + "świą": 2726, + "ź": 2727, + "ł": 2728, + "&": 2729, + "=": 2730, + "ă": 2731, + "đ": 2732, + "ţ": 2733, + "–": 2734, + "‘": 2735, + "ij": 2736, + "aa": 2737, + "een": 2738, + "het": 2739, + "aar": 2740, + "oor": 2741, + "ijn": 2742, + "dat": 2743, + "oe": 2744, + "ijk": 2745, + "aan": 2746, + "voor": 2747, + "iet": 2748, + "zijn": 2749, + "niet": 2750, + "oo": 2751, + "moet": 2752, + "heb": 2753, + "uit": 2754, + "wij": 2755, + "aat": 2756, + "lijk": 2757, + "sl": 2758, + "daar": 2759, + "deze": 2760, + "worden": 2761, + "moeten": 2762, + "onder": 2763, + "hebben": 2764, + "ook": 2765, + "ct": 2766, + "nog": 2767, + "aal": 2768, + "eer": 2769, + "bij": 2770, + "mijn": 2771, + "kom": 2772, + "atie": 2773, + "eft": 2774, + "kel": 2775, + "rij": 2776, + "heid": 2777, + "af": 2778, + "stel": 2779, + "maar": 2780, + "wee": 2781, + "heeft": 2782, + "waar": 2783, + "eren": 2784, + "wat": 2785, + "wil": 2786, + "aag": 2787, + "bet": 2788, + "hij": 2789, + "kun": 2790, + "uw": 2791, + "dt": 2792, + "door": 2793, + "tij": 2794, + "ond": 2795, + "geen": 2796, + "gev": 2797, + "veel": 2798, + "naar": 2799, + "aten": 2800, + "kunnen": 2801, + "echt": 2802, + "goe": 2803, + "twee": 2804, + "delijk": 2805, + "uur": 2806, + "toe": 2807, + "meer": 2808, + "onze": 2809, + "tijd": 2810, + "hoe": 2811, + "tot": 2812, + "zou": 2813, + "aak": 2814, + "amen": 2815, + "woor": 2816, + "wordt": 2817, + "gelijk": 2818, + "gaan": 2819, + "ker": 2820, + "eld": 2821, + "hou": 2822, + "zel": 2823, + "tegen": 2824, + "komen": 2825, + "werk": 2826, + "goed": 2827, + "zal": 2828, + "zij": 2829, + "slag": 2830, + "zien": 2831, + "echter": 2832, + "itie": 2833, + "tie": 2834, + "elijk": 2835, + "ische": 2836, + "belan": 2837, + "haar": 2838, + "vr": 2839, + "grijk": 2840, + "doen": 2841, + "land": 2842, + "belangrijk": 2843, + "open": 2844, + "ctie": 2845, + "zelf": 2846, + "mij": 2847, + "iteit": 2848, + "stem": 2849, + "mee": 2850, + "aren": 2851, + "dien": 2852, + "gaat": 2853, + "prob": 2854, + "moe": 2855, + "ullen": 2856, + "zich": 2857, + "daarom": 2858, + "orm": 2859, + "staat": 2860, + "zit": 2861, + "dui": 2862, + "dus": 2863, + "ds": 2864, + "verslag": 2865, + "kelijk": 2866, + "proble": 2867, + "schap": 2868, + "gd": 2869, + "hun": 2870, + "erd": 2871, + "zet": 2872, + "staan": 2873, + "maal": 2874, + "inder": 2875, + "eid": 2876, + "kken": 2877, + "ged": 2878, + "zullen": 2879, + "mensen": 2880, + "jaar": 2881, + "regel": 2882, + "ieder": 2883, + "volgen": 2884, + "geven": 2885, + "even": 2886, + "blij": 2887, + "ië": 2888, + "uwe": 2889, + "maken": 2890, + "oek": 2891, + "nieuwe": 2892, + "baar": 2893, + "andere": 2894, + "ruik": 2895, + "agen": 2896, + "ouw": 2897, + "willen": 2898, + "aakt": 2899, + "hoo": 2900, + "anden": 2901, + "lig": 2902, + "samen": 2903, + "zeer": 2904, + "duidelijk": 2905, + "antwoor": 2906, + "heel": 2907, + "punt": 2908, + "houden": 2909, + "vraag": 2910, + "gele": 2911, + "eens": 2912, + "besch": 2913, + "omen": 2914, + "erg": 2915, + "doel": 2916, + "dag": 2917, + "uren": 2918, + "ings": 2919, + "oren": 2920, + "delen": 2921, + "steun": 2922, + "innen": 2923, + "pol": 2924, + "oon": 2925, + "sn": 2926, + "zonder": 2927, + "nodig": 2928, + "alleen": 2929, + "mid": 2930, + "ragen": 2931, + "iets": 2932, + "versch": 2933, + "gebruik": 2934, + "rouw": 2935, + "stellen": 2936, + "menten": 2937, + "eerste": 2938, + "laat": 2939, + "groot": 2940, + "ood": 2941, + "toch": 2942, + "laten": 2943, + "aard": 2944, + "sle": 2945, + "deel": 2946, + "plaat": 2947, + "ree": 2948, + "betre": 2949, + "lid": 2950, + "uiten": 2951, + "racht": 2952, + "beleid": 2953, + "stie": 2954, + "staten": 2955, + "ggen": 2956, + "reken": 2957, + "alen": 2958, + "ming": 2959, + "mogelijk": 2960, + "grote": 2961, + "altijd": 2962, + "enkel": 2963, + "wik": 2964, + "politie": 2965, + "elk": 2966, + "handel": 2967, + "kwe": 2968, + "maat": 2969, + "elen": 2970, + "vrij": 2971, + "jes": 2972, + "aam": 2973, + "huis": 2974, + "weer": 2975, + "lidstaten": 2976, + "king": 2977, + "kle": 2978, + "bed": 2979, + "geval": 2980, + "wikkel": 2981, + "kwestie": 2982, + "stee": 2983, + "hel": 2984, + "komst": 2985, + "iden": 2986, + "eerd": 2987, + "tweede": 2988, + "probleem": 2989, + "ussen": 2990, + "snel": 2991, + "tig": 2992, + "ult": 2993, + "nemen": 2994, + "commis": 2995, + "verschil": 2996, + "zoek": 2997, + "krij": 2998, + "graag": 2999, + "denk": 3000, + "landen": 3001, + "reden": 3002, + "besl": 3003, + "oeg": 3004, + "beter": 3005, + "heden": 3006, + "mag": 3007, + "boven": 3008, + "cont": 3009, + "fd": 3010, + "hele": 3011, + "vier": 3012, + "gez": 3013, + "kw": 3014, + "aas": 3015, + "ontwikkel": 3016, + "drie": 3017, + "vaak": 3018, + "plaats": 3019, + "gang": 3020, + "ijf": 3021, + "natuur": 3022, + "tussen": 3023, + "bat": 3024, + "komt": 3025, + "wacht": 3026, + "aad": 3027, + "achter": 3028, + "gebie": 3029, + "verk": 3030, + "ligt": 3031, + "nieuw": 3032, + "vand": 3033, + "ý": 3034, + "ď": 3035, + "ě": 3036, + "ř": 3037, + "ť": 3038, + "ů": 3039, + "„": 3040, + "ní": 3041, + "ně": 3042, + "ře": 3043, + "ná": 3044, + "vě": 3045, + "vá": 3046, + "rá": 3047, + "vy": 3048, + "mě": 3049, + "ři": 3050, + "ří": 3051, + "že": 3052, + "jí": 3053, + "vý": 3054, + "ji": 3055, + "dě": 3056, + "če": 3057, + "tě": 3058, + "ky": 3059, + "še": 3060, + "ké": 3061, + "ší": 3062, + "pře": 3063, + "ví": 3064, + "ný": 3065, + "ži": 3066, + "má": 3067, + "cí": 3068, + "zá": 3069, + "ské": 3070, + "dá": 3071, + "byl": 3072, + "tí": 3073, + "pří": 3074, + "při": 3075, + "či": 3076, + "vní": 3077, + "ča": 3078, + "dí": 3079, + "dní": 3080, + "ká": 3081, + "nou": 3082, + "vět": 3083, + "pě": 3084, + "kou": 3085, + "ých": 3086, + "bě": 3087, + "prá": 3088, + "jako": 3089, + "ží": 3090, + "zí": 3091, + "jsou": 3092, + "jsem": 3093, + "lní": 3094, + "cké": 3095, + "vat": 3096, + "před": 3097, + "hla": 3098, + "stá": 3099, + "čí": 3100, + "ši": 3101, + "kla": 3102, + "ště": 3103, + "lou": 3104, + "mů": 3105, + "chá": 3106, + "pů": 3107, + "také": 3108, + "dů": 3109, + "nost": 3110, + "tře": 3111, + "sku": 3112, + "vše": 3113, + "tní": 3114, + "byla": 3115, + "ční": 3116, + "jeho": 3117, + "bý": 3118, + "vání": 3119, + "ných": 3120, + "tři": 3121, + "vz": 3122, + "stře": 3123, + "dva": 3124, + "hle": 3125, + "čá": 3126, + "nosti": 3127, + "vš": 3128, + "hra": 3129, + "jen": 3130, + "slo": 3131, + "však": 3132, + "kdy": 3133, + "bylo": 3134, + "bude": 3135, + "jší": 3136, + "vých": 3137, + "ním": 3138, + "sm": 3139, + "koli": 3140, + "rů": 3141, + "může": 3142, + "není": 3143, + "hod": 3144, + "bí": 3145, + "tý": 3146, + "stě": 3147, + "uje": 3148, + "sá": 3149, + "pět": 3150, + "krá": 3151, + "tom": 3152, + "ství": 3153, + "vně": 3154, + "sed": 3155, + "své": 3156, + "pí": 3157, + "musí": 3158, + "už": 3159, + "tím": 3160, + "jící": 3161, + "jedno": 3162, + "čas": 3163, + "čty": 3164, + "ský": 3165, + "evro": 3166, + "toho": 3167, + "hy": 3168, + "kter": 3169, + "rní": 3170, + "stí": 3171, + "svě": 3172, + "pak": 3173, + "všech": 3174, + "ků": 3175, + "ng": 3176, + "ád": 3177, + "chází": 3178, + "být": 3179, + "první": 3180, + "mno": 3181, + "ského": 3182, + "pá": 3183, + "nebo": 3184, + "kem": 3185, + "sla": 3186, + "ného": 3187, + "zde": 3188, + "další": 3189, + "řa": 3190, + "čtyři": 3191, + "hrá": 3192, + "druh": 3193, + "lně": 3194, + "vla": 3195, + "ských": 3196, + "ško": 3197, + "půso": 3198, + "proto": 3199, + "vů": 3200, + "ská": 3201, + "šest": 3202, + "dně": 3203, + "ještě": 3204, + "mezi": 3205, + "několi": 3206, + "již": 3207, + "čně": 3208, + "slu": 3209, + "zná": 3210, + "sedm": 3211, + "vlá": 3212, + "osm": 3213, + "byly": 3214, + "vám": 3215, + "cký": 3216, + "tech": 3217, + "ději": 3218, + "velmi": 3219, + "leži": 3220, + "vala": 3221, + "lý": 3222, + "tvo": 3223, + "spole": 3224, + "stup": 3225, + "mož": 3226, + "evrop": 3227, + "stal": 3228, + "jde": 3229, + "rodi": 3230, + "její": 3231, + "poli": 3232, + "devět": 3233, + "sme": 3234, + "až": 3235, + "této": 3236, + "tento": 3237, + "kaž": 3238, + "nula": 3239, + "bych": 3240, + "moc": 3241, + "stou": 3242, + "kdo": 3243, + "zd": 3244, + "praco": 3245, + "tomu": 3246, + "ným": 3247, + "živo": 3248, + "zem": 3249, + "násle": 3250, + "sky": 3251, + "jich": 3252, + "měl": 3253, + "děla": 3254, + "jsme": 3255, + "nice": 3256, + "stej": 3257, + "stní": 3258, + "náro": 3259, + "nit": 3260, + "později": 3261, + "tako": 3262, + "nce": 3263, + "čer": 3264, + "ším": 3265, + "něco": 3266, + "vál": 3267, + "řej": 3268, + "krát": 3269, + "ální": 3270, + "asi": 3271, + "které": 3272, + "stav": 3273, + "mají": 3274, + "mys": 3275, + "době": 3276, + "sně": 3277, + "zku": 3278, + "tů": 3279, + "chod": 3280, + "spě": 3281, + "jejich": 3282, + "součas": 3283, + "vali": 3284, + "kte": 3285, + "prů": 3286, + "zení": 3287, + "pat": 3288, + "potře": 3289, + "dnes": 3290, + "zemí": 3291, + "znam": 3292, + "mám": 3293, + "tedy": 3294, + "hlavní": 3295, + "použí": 3296, + "bní": 3297, + "vede": 3298, + "lep": 3299, + "jek": 3300, + "prav": 3301, + "politi": 3302, + "dne": 3303, + "čení": 3304, + "než": 3305, + "děl": 3306, + "čo": 3307, + "cích": 3308, + "sté": 3309, + "dlou": 3310, + "několik": 3311, + "vyu": 3312, + "ckých": 3313, + "nové": 3314, + "čin": 3315, + "dělá": 3316, + "ký": 3317, + "obla": 3318, + "podle": 3319, + "důleži": 3320, + "poku": 3321, + "kone": 3322, + "dý": 3323, + "dvě": 3324, + "žád": 3325, + "nout": 3326, + "tku": 3327, + "tvr": 3328, + "ckého": 3329, + "rov": 3330, + "tele": 3331, + "psa": 3332, + "svět": 3333, + "tivní": 3334, + "dosta": 3335, + "šel": 3336, + "druhé": 3337, + "skou": 3338, + "žo": 3339, + "jedná": 3340, + "význam": 3341, + "problé": 3342, + "publi": 3343, + "ván": 3344, + "odpo": 3345, + "podpo": 3346, + "dle": 3347, + "jaké": 3348, + "šení": 3349, + "vím": 3350, + "během": 3351, + "nachází": 3352, + "slou": 3353, + "pouze": 3354, + "otá": 3355, + "plo": 3356, + "tové": 3357, + "větši": 3358, + "komi": 3359, + "vají": 3360, + "tyto": 3361, + "zápa": 3362, + "změ": 3363, + "moh": 3364, + "více": 3365, + "společ": 3366, + "auto": 3367, + "proti": 3368, + "dět": 3369, + "cháze": 3370, + "žel": 3371, + "«": 3372, + "»": 3373, + "а": 3374, + "б": 3375, + "в": 3376, + "г": 3377, + "д": 3378, + "е": 3379, + "ж": 3380, + "з": 3381, + "и": 3382, + "й": 3383, + "к": 3384, + "л": 3385, + "м": 3386, + "н": 3387, + "о": 3388, + "п": 3389, + "р": 3390, + "с": 3391, + "т": 3392, + "у": 3393, + "ф": 3394, + "х": 3395, + "ц": 3396, + "ч": 3397, + "ш": 3398, + "щ": 3399, + "ъ": 3400, + "ы": 3401, + "ь": 3402, + "э": 3403, + "ю": 3404, + "я": 3405, + "ё": 3406, + "‑": 3407, + "−": 3408, + "ст": 3409, + "ен": 3410, + "но": 3411, + "на": 3412, + "пр": 3413, + "то": 3414, + "по": 3415, + "ра": 3416, + "го": 3417, + "ко": 3418, + "не": 3419, + "во": 3420, + "ва": 3421, + "ет": 3422, + "ер": 3423, + "ни": 3424, + "ел": 3425, + "ит": 3426, + "ны": 3427, + "за": 3428, + "ро": 3429, + "ени": 3430, + "ка": 3431, + "ли": 3432, + "ем": 3433, + "да": 3434, + "об": 3435, + "ла": 3436, + "до": 3437, + "ся": 3438, + "ть": 3439, + "от": 3440, + "ло": 3441, + "ль": 3442, + "ед": 3443, + "со": 3444, + "ми": 3445, + "ре": 3446, + "мо": 3447, + "ци": 3448, + "про": 3449, + "та": 3450, + "это": 3451, + "ки": 3452, + "ру": 3453, + "при": 3454, + "ти": 3455, + "се": 3456, + "ста": 3457, + "вы": 3458, + "мы": 3459, + "ви": 3460, + "бы": 3461, + "ма": 3462, + "ес": 3463, + "ля": 3464, + "сти": 3465, + "ле": 3466, + "что": 3467, + "ме": 3468, + "ри": 3469, + "ча": 3470, + "од": 3471, + "ей": 3472, + "ель": 3473, + "ения": 3474, + "га": 3475, + "ну": 3476, + "си": 3477, + "па": 3478, + "раз": 3479, + "бо": 3480, + "сто": 3481, + "су": 3482, + "са": 3483, + "ду": 3484, + "его": 3485, + "ест": 3486, + "ин": 3487, + "ить": 3488, + "из": 3489, + "же": 3490, + "му": 3491, + "пер": 3492, + "под": 3493, + "ение": 3494, + "сь": 3495, + "ку": 3496, + "пред": 3497, + "ного": 3498, + "ных": 3499, + "вер": 3500, + "те": 3501, + "ной": 3502, + "ции": 3503, + "де": 3504, + "ры": 3505, + "дел": 3506, + "лю": 3507, + "ве": 3508, + "он": 3509, + "мен": 3510, + "ги": 3511, + "ня": 3512, + "бу": 3513, + "пра": 3514, + "все": 3515, + "ется": 3516, + "сть": 3517, + "жа": 3518, + "дол": 3519, + "жи": 3520, + "бе": 3521, + "кон": 3522, + "сл": 3523, + "ши": 3524, + "ди": 3525, + "ств": 3526, + "ско": 3527, + "ные": 3528, + "чи": 3529, + "ют": 3530, + "дер": 3531, + "стра": 3532, + "ты": 3533, + "ход": 3534, + "щи": 3535, + "зо": 3536, + "зна": 3537, + "ности": 3538, + "чес": 3539, + "вля": 3540, + "вать": 3541, + "ор": 3542, + "пол": 3543, + "вет": 3544, + "так": 3545, + "ша": 3546, + "ту": 3547, + "сво": 3548, + "пре": 3549, + "она": 3550, + "итель": 3551, + "ный": 3552, + "сло": 3553, + "как": 3554, + "вл": 3555, + "ность": 3556, + "хо": 3557, + "мож": 3558, + "пе": 3559, + "для": 3560, + "ния": 3561, + "ное": 3562, + "рас": 3563, + "долж": 3564, + "дар": 3565, + "тель": 3566, + "ска": 3567, + "пу": 3568, + "ство": 3569, + "кото": 3570, + "раб": 3571, + "ее": 3572, + "род": 3573, + "эти": 3574, + "соб": 3575, + "ору": 3576, + "жен": 3577, + "ным": 3578, + "ити": 3579, + "ние": 3580, + "ком": 3581, + "дет": 3582, + "сту": 3583, + "гу": 3584, + "пи": 3585, + "меж": 3586, + "ению": 3587, + "тер": 3588, + "работ": 3589, + "воз": 3590, + "ция": 3591, + "кой": 3592, + "щест": 3593, + "гра": 3594, + "зи": 3595, + "ря": 3596, + "между": 3597, + "ства": 3598, + "вс": 3599, + "ело": 3600, + "ше": 3601, + "мер": 3602, + "ба": 3603, + "зы": 3604, + "лу": 3605, + "аль": 3606, + "дей": 3607, + "гла": 3608, + "народ": 3609, + "кти": 3610, + "предста": 3611, + "лся": 3612, + "явля": 3613, + "ски": 3614, + "нов": 3615, + "един": 3616, + "ров": 3617, + "ис": 3618, + "нима": 3619, + "рем": 3620, + "ходи": 3621, + "также": 3622, + "дру": 3623, + "ать": 3624, + "след": 3625, + "гово": 3626, + "ная": 3627, + "ющи": 3628, + "ень": 3629, + "которы": 3630, + "хот": 3631, + "ву": 3632, + "их": 3633, + "ему": 3634, + "чит": 3635, + "важ": 3636, + "орга": 3637, + "чески": 3638, + "ще": 3639, + "ке": 3640, + "ха": 3641, + "пос": 3642, + "том": 3643, + "боль": 3644, + "мне": 3645, + "пас": 3646, + "объ": 3647, + "прав": 3648, + "конф": 3649, + "слу": 3650, + "поддер": 3651, + "стви": 3652, + "наш": 3653, + "лько": 3654, + "стоя": 3655, + "ную": 3656, + "лем": 3657, + "енных": 3658, + "кра": 3659, + "ды": 3660, + "международ": 3661, + "гда": 3662, + "необ": 3663, + "госу": 3664, + "ству": 3665, + "ении": 3666, + "государ": 3667, + "кто": 3668, + "им": 3669, + "чест": 3670, + "рет": 3671, + "вопро": 3672, + "лен": 3673, + "ели": 3674, + "рова": 3675, + "ций": 3676, + "нам": 3677, + "этой": 3678, + "жения": 3679, + "необходи": 3680, + "меня": 3681, + "было": 3682, + "сили": 3683, + "фи": 3684, + "вя": 3685, + "шь": 3686, + "этого": 3687, + "они": 3688, + "органи": 3689, + "безо": 3690, + "проб": 3691, + "име": 3692, + "реш": 3693, + "би": 3694, + "безопас": 3695, + "ются": 3696, + "оста": 3697, + "енно": 3698, + "год": 3699, + "ела": 3700, + "представ": 3701, + "ться": 3702, + "слово": 3703, + "организа": 3704, + "должны": 3705, + "этом": 3706, + "бла": 3707, + "че": 3708, + "чу": 3709, + "благо": 3710, + "этому": 3711, + "врем": 3712, + "спе": 3713, + "ном": 3714, + "ений": 3715, + "спо": 3716, + "нас": 3717, + "нет": 3718, + "зу": 3719, + "вед": 3720, + "еще": 3721, + "сказа": 3722, + "сей": 3723, + "ерен": 3724, + "дан": 3725, + "сам": 3726, + "еля": 3727, + "ран": 3728, + "зыва": 3729, + "является": 3730, + "будет": 3731, + "ктив": 3732, + "тре": 3733, + "деле": 3734, + "мот": 3735, + "конферен": 3736, + "лась": 3737, + "час": 3738, + "сторо": 3739, + "кого": 3740, + "ез": 3741, + "ней": 3742, + "ос": 3743, + "лись": 3744, + "разору": 3745, + "пере": 3746, + "сси": 3747, + "ными": 3748, + "проц": 3749, + "голо": 3750, + "чело": 3751, + "боле": 3752, + "челове": 3753, + "сер": 3754, + "пл": 3755, + "чет": 3756, + "стран": 3757, + "пя": 3758, + "был": 3759, + "кла": 3760, + "тов": 3761, + "жд": 3762, + "дела": 3763, + "ера": 3764, + "уже": 3765, + "совет": 3766, + "ген": 3767, + "безопасности": 3768, + "ца": 3769, + "седа": 3770, + "поз": 3771, + "ответ": 3772, + "проблем": 3773, + "нако": 3774, + "тем": 3775, + "доста": 3776, + "пы": 3777, + "ща": 3778, + "вой": 3779, + "сущест": 3780, + "необходимо": 3781, + "быть": 3782, + "может": 3783, + "дем": 3784, + "чтобы": 3785, + "ек": 3786, + "чер": 3787, + "усили": 3788, + "рес": 3789, + "руд": 3790, + "единенных": 3791, + "доб": 3792, + "дости": 3793, + "ствен": 3794, + "ядер": 3795, + "годня": 3796, + "каза": 3797, + "сегодня": 3798, + "сейчас": 3799, + "только": 3800, + "вод": 3801, + "есь": 3802, + "много": 3803, + "буду": 3804, + "ев": 3805, + "есть": 3806, + "три": 3807, + "общест": 3808, + "явл": 3809, + "высту": 3810, + "ред": 3811, + "счит": 3812, + "сит": 3813, + "делега": 3814, + "лож": 3815, + "этот": 3816, + "фор": 3817, + "клю": 3818, + "возмож": 3819, + "вания": 3820, + "бли": 3821, + "или": 3822, + "вз": 3823, + "наций": 3824, + "ского": 3825, + "приня": 3826, + "пла": 3827, + "оч": 3828, + "иться": 3829, + "сте": 3830, + "наши": 3831, + "которые": 3832, + "ар": 3833, + "имеет": 3834, + "сот": 3835, + "знач": 3836, + "перь": 3837, + "следу": 3838, + "ены": 3839, + "таки": 3840, + "объединенных": 3841, + "стро": 3842, + "теперь": 3843, + "бле": 3844, + "благодар": 3845, + "разв": 3846, + "ан": 3847, + "жива": 3848, + "очень": 3849, + "ят": 3850, + "без": 3851, + "обес": 3852, + "гро": 3853, + "лось": 3854, + "сы": 3855, + "организации": 3856, + "член": 3857, + "того": 3858, + "ональ": 3859, + "жда": 3860, + "всех": 3861, + "свя": 3862, + "более": 3863, + "сов": 3864, + "когда": 3865, + "вот": 3866, + "кре": 3867, + "кры": 3868, + "поэтому": 3869, + "воль": 3870, + "ой": 3871, + "генера": 3872, + "чем": 3873, + "лы": 3874, + "полити": 3875, + "вен": 3876, + "конференции": 3877, + "процес": 3878, + "бя": 3879, + "ите": 3880, + "отно": 3881, + "развити": 3882, + "аф": 3883, + "ющ": 3884, + "вно": 3885, + "мир": 3886, + "нии": 3887, + "кая": 3888, + "ас": 3889, + "ительно": 3890, + "вто": 3891, + "ением": 3892, + "генераль": 3893, + "прот": 3894, + "всем": 3895, + "самбле": 3896, + "ассамбле": 3897, + "ом": 3898, + "зд": 3899, + "смот": 3900, + "реги": 3901, + "чего": 3902, + "однако": 3903, + "усилия": 3904, + "действи": 3905, + "чно": 3906, + "уча": 3907, + "образ": 3908, + "вос": 3909, + "эта": 3910, + "перего": 3911, + "говор": 3912, + "вам": 3913, + "моло": 3914, + "время": 3915, + "дь": 3916, + "хотел": 3917, + "гру": 3918, + "заявл": 3919, + "предоста": 3920, + "поль": 3921, + "нее": 3922, + "резо": 3923, + "перегово": 3924, + "резолю": 3925, + "крет": 3926, + "поддерж": 3927, + "обеспе": 3928, + "него": 3929, + "представит": 3930, + "наде": 3931, + "кри": 3932, + "чь": 3933, + "проек": 3934, + "лет": 3935, + "други": 3936, + "_": 3937, + "،": 3938, + "؛": 3939, + "؟": 3940, + "ء": 3941, + "آ": 3942, + "أ": 3943, + "ؤ": 3944, + "إ": 3945, + "ئ": 3946, + "ا": 3947, + "ب": 3948, + "ة": 3949, + "ت": 3950, + "ث": 3951, + "ج": 3952, + "ح": 3953, + "خ": 3954, + "د": 3955, + "ذ": 3956, + "ر": 3957, + "ز": 3958, + "س": 3959, + "ش": 3960, + "ص": 3961, + "ض": 3962, + "ط": 3963, + "ظ": 3964, + "ع": 3965, + "غ": 3966, + "ـ": 3967, + "ف": 3968, + "ق": 3969, + "ك": 3970, + "ل": 3971, + "م": 3972, + "ن": 3973, + "ه": 3974, + "و": 3975, + "ى": 3976, + "ي": 3977, + "ً": 3978, + "ٌ": 3979, + "ٍ": 3980, + "َ": 3981, + "ُ": 3982, + "ِ": 3983, + "ّ": 3984, + "ْ": 3985, + "ٰ": 3986, + "چ": 3987, + "ڨ": 3988, + "ک": 3989, + "ھ": 3990, + "ی": 3991, + "ۖ": 3992, + "ۗ": 3993, + "ۘ": 3994, + "ۚ": 3995, + "ۛ": 3996, + "—": 3997, + "☭": 3998, + "ﺃ": 3999, + "ﻻ": 4000, + "ال": 4001, + "َا": 4002, + "وَ": 4003, + "َّ": 4004, + "ِي": 4005, + "أَ": 4006, + "لَ": 4007, + "نَ": 4008, + "الْ": 4009, + "هُ": 4010, + "ُو": 4011, + "ما": 4012, + "نْ": 4013, + "من": 4014, + "عَ": 4015, + "نا": 4016, + "لا": 4017, + "مَ": 4018, + "تَ": 4019, + "فَ": 4020, + "أن": 4021, + "لي": 4022, + "مِ": 4023, + "ان": 4024, + "في": 4025, + "رَ": 4026, + "يَ": 4027, + "هِ": 4028, + "مْ": 4029, + "قَ": 4030, + "بِ": 4031, + "لى": 4032, + "ين": 4033, + "إِ": 4034, + "لِ": 4035, + "وا": 4036, + "كَ": 4037, + "ها": 4038, + "ًا": 4039, + "مُ": 4040, + "ون": 4041, + "الم": 4042, + "بَ": 4043, + "يا": 4044, + "ذا": 4045, + "سا": 4046, + "الل": 4047, + "مي": 4048, + "يْ": 4049, + "را": 4050, + "ري": 4051, + "لك": 4052, + "مَا": 4053, + "نَّ": 4054, + "لم": 4055, + "إن": 4056, + "ست": 4057, + "وم": 4058, + "َّا": 4059, + "لَا": 4060, + "هم": 4061, + "ِّ": 4062, + "كُ": 4063, + "كان": 4064, + "سَ": 4065, + "با": 4066, + "دي": 4067, + "حَ": 4068, + "عْ": 4069, + "بي": 4070, + "الأ": 4071, + "ول": 4072, + "فِي": 4073, + "رِ": 4074, + "دا": 4075, + "مِنْ": 4076, + "ُونَ": 4077, + "وْ": 4078, + "هَا": 4079, + "ُّ": 4080, + "الس": 4081, + "الَ": 4082, + "ني": 4083, + "لْ": 4084, + "تُ": 4085, + "هل": 4086, + "رة": 4087, + "دَ": 4088, + "سْ": 4089, + "تِ": 4090, + "نَا": 4091, + "رْ": 4092, + "اللَّ": 4093, + "سامي": 4094, + "كن": 4095, + "كل": 4096, + "هَ": 4097, + "عَلَ": 4098, + "على": 4099, + "مع": 4100, + "إلى": 4101, + "قد": 4102, + "الر": 4103, + "ُوا": 4104, + "ير": 4105, + "عن": 4106, + "يُ": 4107, + "نِ": 4108, + "بْ": 4109, + "الح": 4110, + "هُمْ": 4111, + "قا": 4112, + "ذه": 4113, + "الت": 4114, + "ِينَ": 4115, + "جَ": 4116, + "هذا": 4117, + "عد": 4118, + "الع": 4119, + "دْ": 4120, + "قَالَ": 4121, + "رُ": 4122, + "يم": 4123, + "ية": 4124, + "نُ": 4125, + "خَ": 4126, + "رب": 4127, + "الك": 4128, + "وَا": 4129, + "أنا": 4130, + "ةِ": 4131, + "الن": 4132, + "حد": 4133, + "عِ": 4134, + "تا": 4135, + "هو": 4136, + "فا": 4137, + "عا": 4138, + "الش": 4139, + "لُ": 4140, + "يت": 4141, + "ذَا": 4142, + "يع": 4143, + "الذ": 4144, + "حْ": 4145, + "الص": 4146, + "إِنَّ": 4147, + "جا": 4148, + "علي": 4149, + "كَا": 4150, + "بُ": 4151, + "تع": 4152, + "وق": 4153, + "مل": 4154, + "لَّ": 4155, + "يد": 4156, + "أخ": 4157, + "رف": 4158, + "تي": 4159, + "الِ": 4160, + "ّا": 4161, + "ذلك": 4162, + "أَنْ": 4163, + "سِ": 4164, + "توم": 4165, + "مر": 4166, + "مَنْ": 4167, + "بل": 4168, + "الق": 4169, + "الله": 4170, + "ِيَ": 4171, + "كم": 4172, + "ذَ": 4173, + "عل": 4174, + "حب": 4175, + "سي": 4176, + "عُ": 4177, + "الج": 4178, + "الد": 4179, + "شَ": 4180, + "تك": 4181, + "فْ": 4182, + "صَ": 4183, + "لل": 4184, + "دِ": 4185, + "بر": 4186, + "فِ": 4187, + "ته": 4188, + "أع": 4189, + "تْ": 4190, + "قْ": 4191, + "الْأَ": 4192, + "ئِ": 4193, + "عَنْ": 4194, + "ور": 4195, + "حا": 4196, + "الَّ": 4197, + "مت": 4198, + "فر": 4199, + "دُ": 4200, + "هنا": 4201, + "وَأَ": 4202, + "تب": 4203, + "ةُ": 4204, + "أي": 4205, + "سب": 4206, + "ريد": 4207, + "وج": 4208, + "كُمْ": 4209, + "حِ": 4210, + "كْ": 4211, + "در": 4212, + "َاء": 4213, + "هذه": 4214, + "الط": 4215, + "الْمُ": 4216, + "دة": 4217, + "قل": 4218, + "غَ": 4219, + "يوم": 4220, + "الَّذ": 4221, + "كر": 4222, + "تر": 4223, + "كِ": 4224, + "كي": 4225, + "عَلَى": 4226, + "رَب": 4227, + "عة": 4228, + "قُ": 4229, + "جْ": 4230, + "فض": 4231, + "لة": 4232, + "هْ": 4233, + "رَا": 4234, + "وَلَ": 4235, + "الْمَ": 4236, + "أَنَّ": 4237, + "يَا": 4238, + "أُ": 4239, + "شي": 4240, + "اللَّهُ": 4241, + "لَى": 4242, + "قِ": 4243, + "أت": 4244, + "عَلَيْ": 4245, + "اللَّهِ": 4246, + "الب": 4247, + "ضَ": 4248, + "ةً": 4249, + "قي": 4250, + "ار": 4251, + "بد": 4252, + "خْ": 4253, + "سْتَ": 4254, + "طَ": 4255, + "قَدْ": 4256, + "ذهب": 4257, + "أم": 4258, + "ماذا": 4259, + "وَإِ": 4260, + "ةٌ": 4261, + "ونَ": 4262, + "ليلى": 4263, + "ولا": 4264, + "حُ": 4265, + "هي": 4266, + "صل": 4267, + "الخ": 4268, + "ود": 4269, + "ليس": 4270, + "لدي": 4271, + "قال": 4272, + "كَانَ": 4273, + "مَّ": 4274, + "حي": 4275, + "تم": 4276, + "لن": 4277, + "وَلَا": 4278, + "بع": 4279, + "يمكن": 4280, + "سُ": 4281, + "ةَ": 4282, + "حت": 4283, + "رًا": 4284, + "كا": 4285, + "شا": 4286, + "هِمْ": 4287, + "لَهُ": 4288, + "زَ": 4289, + "داً": 4290, + "مس": 4291, + "كث": 4292, + "الْعَ": 4293, + "جِ": 4294, + "صْ": 4295, + "فَا": 4296, + "له": 4297, + "وي": 4298, + "عَا": 4299, + "هُوَ": 4300, + "بِي": 4301, + "بَا": 4302, + "أس": 4303, + "ثَ": 4304, + "لِي": 4305, + "رض": 4306, + "الرَّ": 4307, + "لِكَ": 4308, + "تَّ": 4309, + "فُ": 4310, + "قة": 4311, + "فعل": 4312, + "مِن": 4313, + "الآ": 4314, + "ثُ": 4315, + "سم": 4316, + "مَّا": 4317, + "بِهِ": 4318, + "تق": 4319, + "خر": 4320, + "لقد": 4321, + "خل": 4322, + "شر": 4323, + "أنت": 4324, + "لَّا": 4325, + "سن": 4326, + "السَّ": 4327, + "الذي": 4328, + "سَا": 4329, + "وما": 4330, + "زل": 4331, + "وب": 4332, + "أْ": 4333, + "إذا": 4334, + "رِي": 4335, + "حة": 4336, + "نِي": 4337, + "الْحَ": 4338, + "وَقَالَ": 4339, + "به": 4340, + "ةٍ": 4341, + "سأ": 4342, + "رٌ": 4343, + "بال": 4344, + "مة": 4345, + "شْ": 4346, + "وت": 4347, + "عند": 4348, + "فس": 4349, + "بَعْ": 4350, + "هر": 4351, + "قط": 4352, + "أح": 4353, + "إنه": 4354, + "وع": 4355, + "فت": 4356, + "غا": 4357, + "هناك": 4358, + "بت": 4359, + "مِنَ": 4360, + "سر": 4361, + "ذَلِكَ": 4362, + "رس": 4363, + "حدث": 4364, + "غْ": 4365, + "ِّي": 4366, + "الإ": 4367, + "وَيَ": 4368, + "جل": 4369, + "است": 4370, + "قِي": 4371, + "عب": 4372, + "وس": 4373, + "يش": 4374, + "الَّذِينَ": 4375, + "تاب": 4376, + "دِي": 4377, + "جب": 4378, + "كون": 4379, + "بن": 4380, + "الث": 4381, + "لَيْ": 4382, + "بعد": 4383, + "وَالْ": 4384, + "فَأَ": 4385, + "عم": 4386, + "هُم": 4387, + "تن": 4388, + "ذْ": 4389, + "أص": 4390, + "أين": 4391, + "رَبِّ": 4392, + "الذين": 4393, + "إِن": 4394, + "بين": 4395, + "جُ": 4396, + "عَلَيْهِ": 4397, + "حَا": 4398, + "لو": 4399, + "ستط": 4400, + "ظر": 4401, + "لَمْ": 4402, + "ءِ": 4403, + "كُل": 4404, + "طل": 4405, + "تَا": 4406, + "ضُ": 4407, + "كنت": 4408, + "لًا": 4409, + "مٌ": 4410, + "قبل": 4411, + "ــ": 4412, + "ذِ": 4413, + "قَوْ": 4414, + "صِ": 4415, + "مًا": 4416, + "كانت": 4417, + "صا": 4418, + "يق": 4419, + "الف": 4420, + "النا": 4421, + "مٍ": 4422, + "إِنْ": 4423, + "النَّ": 4424, + "جد": 4425, + "وَمَا": 4426, + "تت": 4427, + "بح": 4428, + "مكان": 4429, + "كيف": 4430, + "ّة": 4431, + "الا": 4432, + "جَا": 4433, + "أو": 4434, + "ساعد": 4435, + "ضِ": 4436, + "إلا": 4437, + "راً": 4438, + "قَا": 4439, + "رأ": 4440, + "عت": 4441, + "أحد": 4442, + "هد": 4443, + "ضا": 4444, + "طر": 4445, + "أق": 4446, + "ماء": 4447, + "دَّ": 4448, + "البا": 4449, + "مُو": 4450, + "أَوْ": 4451, + "طا": 4452, + "قُو": 4453, + "خِ": 4454, + "تل": 4455, + "ستطيع": 4456, + "دَا": 4457, + "النَّا": 4458, + "إلَى": 4459, + "وَتَ": 4460, + "هَذَا": 4461, + "بة": 4462, + "عليك": 4463, + "جر": 4464, + "المن": 4465, + "زا": 4466, + "رٍ": 4467, + "دع": 4468, + "ًّا": 4469, + "سة": 4470, + "ثُمَّ": 4471, + "شيء": 4472, + "الغ": 4473, + "تح": 4474, + "رُونَ": 4475, + "اليوم": 4476, + "مِي": 4477, + "نُوا": 4478, + "أر": 4479, + "تُمْ": 4480, + "عر": 4481, + "يف": 4482, + "أب": 4483, + "دًا": 4484, + "صَا": 4485, + "التَّ": 4486, + "أريد": 4487, + "الز": 4488, + "يَوْ": 4489, + "إلي": 4490, + "جي": 4491, + "يَعْ": 4492, + "فضل": 4493, + "الإن": 4494, + "أنه": 4495, + "1": 4496, + "2": 4497, + "3": 4498, + "4": 4499, + "5": 4500, + "·": 4501, + "×": 4502, + "̃": 4503, + "̌": 4504, + "ε": 4505, + "λ": 4506, + "μ": 4507, + "•": 4508, + "‧": 4509, + "─": 4510, + "□": 4511, + "、": 4512, + "。": 4513, + "〈": 4514, + "〉": 4515, + "《": 4516, + "》": 4517, + "「": 4518, + "」": 4519, + "『": 4520, + "』": 4521, + "ア": 4522, + "オ": 4523, + "カ": 4524, + "チ": 4525, + "ド": 4526, + "ベ": 4527, + "ャ": 4528, + "ヤ": 4529, + "ン": 4530, + "・": 4531, + "ー": 4532, + "ㄟ": 4533, + "!": 4534, + "(": 4535, + ")": 4536, + ",": 4537, + "-": 4538, + "/": 4539, + ":": 4540, + ";": 4541, + "?": 4542, + "p": 4543, + "i4": 4544, + "zh": 4545, + "i2": 4546, + "ng1": 4547, + "u4": 4548, + "i1": 4549, + "ng2": 4550, + "u3": 4551, + "de5": 4552, + "e4": 4553, + "i3": 4554, + "ng4": 4555, + "an4": 4556, + "shi4": 4557, + "an2": 4558, + "u2": 4559, + "u1": 4560, + "ng3": 4561, + "a1": 4562, + "an1": 4563, + "e2": 4564, + "a4": 4565, + "ei4": 4566, + "ong1": 4567, + "ai4": 4568, + "ao4": 4569, + "ang1": 4570, + "an3": 4571, + "wei4": 4572, + "uo2": 4573, + "n1": 4574, + "en2": 4575, + "ao3": 4576, + "e1": 4577, + "qi": 4578, + "eng2": 4579, + "zho": 4580, + "ang3": 4581, + "ang4": 4582, + "ang2": 4583, + "uo4": 4584, + "ge4": 4585, + "yi1": 4586, + "guo2": 4587, + "a3": 4588, + "he2": 4589, + "e3": 4590, + "yi2": 4591, + "di4": 4592, + "zhong1": 4593, + "bu4": 4594, + "ai2": 4595, + "n2": 4596, + "zai4": 4597, + "shi2": 4598, + "eng1": 4599, + "ren2": 4600, + "ong2": 4601, + "xian4": 4602, + "xu": 4603, + "n4": 4604, + "li4": 4605, + "en4": 4606, + "yu2": 4607, + "ei2": 4608, + "yi2ge4": 4609, + "ou4": 4610, + "ei3": 4611, + "ui4": 4612, + "a2": 4613, + "you3": 4614, + "ao1": 4615, + "da4": 4616, + "cheng2": 4617, + "en1": 4618, + "eng4": 4619, + "yi4": 4620, + "si1": 4621, + "zhi4": 4622, + "jia1": 4623, + "yuan2": 4624, + "ta1": 4625, + "de5yi2ge4": 4626, + "ke1": 4627, + "shu3": 4628, + "xi1": 4629, + "ji2": 4630, + "ao2": 4631, + "ou3": 4632, + "ong4": 4633, + "xia4": 4634, + "ai1": 4635, + "gong1": 4636, + "zhi1": 4637, + "en3": 4638, + "wei2": 4639, + "xue2": 4640, + "qu1": 4641, + "zhou1": 4642, + "er3": 4643, + "ming2": 4644, + "zhong3": 4645, + "li3": 4646, + "wu4": 4647, + "yi3": 4648, + "uo1": 4649, + "e5": 4650, + "ji4": 4651, + "xing2": 4652, + "jian4": 4653, + "hua4": 4654, + "yu3": 4655, + "uo3": 4656, + "ji1": 4657, + "ai3": 4658, + "zuo4": 4659, + "hou4": 4660, + "hui4": 4661, + "ei1": 4662, + "nian2": 4663, + "qi2": 4664, + "dao4": 4665, + "sheng1": 4666, + "de2": 4667, + "dai4": 4668, + "uan2": 4669, + "zhe4": 4670, + "zheng4": 4671, + "ben3": 4672, + "shang4": 4673, + "zhu3": 4674, + "bei4": 4675, + "ye4": 4676, + "chu1": 4677, + "zhan4": 4678, + "le5": 4679, + "lai2": 4680, + "shi3": 4681, + "nan2": 4682, + "ren4": 4683, + "you2": 4684, + "ke4": 4685, + "ba1": 4686, + "fu4": 4687, + "dui4": 4688, + "ya4": 4689, + "mei3": 4690, + "zi4": 4691, + "xin1": 4692, + "jing1": 4693, + "zhu": 4694, + "n3": 4695, + "yong4": 4696, + "mu4": 4697, + "jiao4": 4698, + "ye3": 4699, + "jin4": 4700, + "bian4": 4701, + "lu4": 4702, + "qi1": 4703, + "she4": 4704, + "xiang1": 4705, + "ong3": 4706, + "shu4": 4707, + "dong4": 4708, + "suo3": 4709, + "guan1": 4710, + "san1": 4711, + "te4": 4712, + "duo1": 4713, + "fu2": 4714, + "min2": 4715, + "la1": 4716, + "zhi2": 4717, + "zhen4": 4718, + "ou1": 4719, + "wu3": 4720, + "ma3": 4721, + "i5": 4722, + "zi5": 4723, + "ju4": 4724, + "er4": 4725, + "yao4": 4726, + "xia4de5yi2ge4": 4727, + "si4": 4728, + "tu2": 4729, + "shan1": 4730, + "zui4": 4731, + "yin1": 4732, + "er2": 4733, + "tong2": 4734, + "dong1": 4735, + "yu4": 4736, + "yan2": 4737, + "qian2": 4738, + "shu3xia4de5yi2ge4": 4739, + "jun1": 4740, + "ke3": 4741, + "wen2": 4742, + "fa3": 4743, + "luo2": 4744, + "zhu4": 4745, + "xi4": 4746, + "kou3": 4747, + "bei3": 4748, + "jian1": 4749, + "fa1": 4750, + "dian4": 4751, + "jiang1": 4752, + "wei4yu2": 4753, + "xiang4": 4754, + "zhi3": 4755, + "eng3": 4756, + "fang1": 4757, + "lan2": 4758, + "shu": 4759, + "ri4": 4760, + "lian2": 4761, + "shou3": 4762, + "qiu2": 4763, + "jin1": 4764, + "huo4": 4765, + "shu3xia4de5yi2ge4zhong3": 4766, + "fen1": 4767, + "nei4": 4768, + "gai1": 4769, + "mei3guo2": 4770, + "un2": 4771, + "ge2": 4772, + "bao3": 4773, + "qing1": 4774, + "gao1": 4775, + "tai2": 4776, + "xiao3": 4777, + "jie2": 4778, + "tian1": 4779, + "chang2": 4780, + "quan2": 4781, + "lie4": 4782, + "hai3": 4783, + "fei1": 4784, + "ti3": 4785, + "jue2": 4786, + "ou2": 4787, + "ci3": 4788, + "zu2": 4789, + "ni2": 4790, + "biao3": 4791, + "zhong1guo2": 4792, + "du4": 4793, + "yue4": 4794, + "xing4": 4795, + "sheng4": 4796, + "che1": 4797, + "dan1": 4798, + "jie1": 4799, + "lin2": 4800, + "ping2": 4801, + "fu3": 4802, + "gu3": 4803, + "jie4": 4804, + "v3": 4805, + "sheng3": 4806, + "na4": 4807, + "yuan4": 4808, + "zhang3": 4809, + "guan3": 4810, + "dao3": 4811, + "zu3": 4812, + "ding4": 4813, + "dian3": 4814, + "ceng2": 4815, + "ren2kou3": 4816, + "tai4": 4817, + "tong1": 4818, + "guo4": 4819, + "neng2": 4820, + "chang3": 4821, + "hua2": 4822, + "liu2": 4823, + "ying1": 4824, + "xiao4": 4825, + "ci4": 4826, + "bian4hua4": 4827, + "liang3": 4828, + "gong4": 4829, + "zhong4": 4830, + "de5yi1": 4831, + "se4": 4832, + "kai1": 4833, + "wang2": 4834, + "jiu4": 4835, + "shi1": 4836, + "shou4": 4837, + "mei2": 4838, + "feng1": 4839, + "ze2": 4840, + "tu2shi4": 4841, + "ti2": 4842, + "qi4": 4843, + "jiu3": 4844, + "shen1": 4845, + "zhe3": 4846, + "ren2kou3bian4hua4": 4847, + "ren2kou3bian4hua4tu2shi4": 4848, + "di4qu1": 4849, + "yang2": 4850, + "men5": 4851, + "long2": 4852, + "bing4": 4853, + "chan3": 4854, + "zhu1": 4855, + "wei3": 4856, + "wai4": 4857, + "xing1": 4858, + "bo1": 4859, + "bi3": 4860, + "tang2": 4861, + "hua1": 4862, + "bo2": 4863, + "shui3": 4864, + "shu1": 4865, + "dou1": 4866, + "sai4": 4867, + "chao2": 4868, + "bi4": 4869, + "ling2": 4870, + "lei4": 4871, + "da4xue2": 4872, + "fen4": 4873, + "shu3de5": 4874, + "mu3": 4875, + "jiao1": 4876, + "dang1": 4877, + "cheng1": 4878, + "tong3": 4879, + "nv3": 4880, + "qi3": 4881, + "yan3": 4882, + "mian4": 4883, + "luo4": 4884, + "jing4": 4885, + "ge1": 4886, + "ru4": 4887, + "dan4": 4888, + "ri4ben3": 4889, + "pu3": 4890, + "yun4": 4891, + "huang2": 4892, + "wo3": 4893, + "lv": 4894, + "hai2": 4895, + "shi4yi1": 4896, + "xie1": 4897, + "ying3": 4898, + "wu2": 4899, + "shen2": 4900, + "wang3": 4901, + "guang3": 4902, + "liu4": 4903, + "su4": 4904, + "shi4zhen4": 4905, + "can1": 4906, + "cao3": 4907, + "xia2": 4908, + "ka3": 4909, + "da2": 4910, + "hu4": 4911, + "ban4": 4912, + "dang3": 4913, + "hu2": 4914, + "zong3": 4915, + "deng3": 4916, + "de5yi2ge4shi4zhen4": 4917, + "chuan2": 4918, + "mo4": 4919, + "zhang1": 4920, + "ban1": 4921, + "mo2": 4922, + "cha2": 4923, + "ce4": 4924, + "zhu3yao4": 4925, + "tou2": 4926, + "ju2": 4927, + "shi4wei4yu2": 4928, + "sa4": 4929, + "un1": 4930, + "ke3yi3": 4931, + "du1": 4932, + "han4": 4933, + "liang4": 4934, + "sha1": 4935, + "jia3": 4936, + "zi1": 4937, + "lv4": 4938, + "fu1": 4939, + "xian1": 4940, + "xu4": 4941, + "guang1": 4942, + "meng2": 4943, + "bao4": 4944, + "you4": 4945, + "rong2": 4946, + "zhi1yi1": 4947, + "wei1": 4948, + "mao2": 4949, + "guo2jia1": 4950, + "cong2": 4951, + "gou4": 4952, + "tie3": 4953, + "zhen1": 4954, + "du2": 4955, + "bian1": 4956, + "ci2": 4957, + "qu3": 4958, + "fan4": 4959, + "xiang3": 4960, + "men2": 4961, + "ju1": 4962, + "hong2": 4963, + "zi3": 4964, + "ta1men5": 4965, + "ji3": 4966, + "zong1": 4967, + "zhou1de5yi2ge4shi4zhen4": 4968, + "tuan2": 4969, + "jing3": 4970, + "gong1si1": 4971, + "xie4": 4972, + "li2": 4973, + "li4shi3": 4974, + "bao1": 4975, + "gang3": 4976, + "gui1": 4977, + "zheng1": 4978, + "zhi2wu4": 4979, + "ta1de5": 4980, + "pin3": 4981, + "zhuan1": 4982, + "chong2": 4983, + "shi3yong4": 4984, + "wa3": 4985, + "shuo1": 4986, + "chuan1": 4987, + "lei2": 4988, + "wan1": 4989, + "huo2": 4990, + "su1": 4991, + "zao3": 4992, + "gai3": 4993, + "qu4": 4994, + "gu4": 4995, + "xi2": 4996, + "hang2": 4997, + "ying4": 4998, + "cun1": 4999, + "gen1": 5000, + "ying2": 5001, + "ting2": 5002, + "cheng2shi4": 5003, + "jiang3": 5004, + "ling3": 5005, + "lun2": 5006, + "bu4fen4": 5007, + "deng1": 5008, + "xuan3": 5009, + "dong4wu4": 5010, + "de2guo2": 5011, + "xian3": 5012, + "fan3": 5013, + "zhe5": 5014, + "han2": 5015, + "hao4": 5016, + "mi4": 5017, + "ran2": 5018, + "qin1": 5019, + "tiao2": 5020, + "zhan3": 5021, + "[ar]": 5022, + "[zh-cn]": 5023, + "¡": 5024, + "é": 5025, + "shi": 5026, + "tsu": 5027, + "teki": 5028, + "nai": 5029, + "aru": 5030, + "uu": 5031, + "kai": 5032, + "shite": 5033, + "mono": 5034, + "koto": 5035, + "kara": 5036, + "shita": 5037, + "suru": 5038, + "masu": 5039, + "tai": 5040, + "ware": 5041, + "shin": 5042, + "oku": 5043, + "yuu": 5044, + "iru": 5045, + "jiko": 5046, + "desu": 5047, + "rare": 5048, + "shou": 5049, + "sha": 5050, + "sekai": 5051, + "kyou": 5052, + "mashita": 5053, + "nara": 5054, + "kei": 5055, + "ita": 5056, + "ari": 5057, + "itsu": 5058, + "kono": 5059, + "naka": 5060, + "chou": 5061, + "sore": 5062, + "naru": 5063, + "gaku": 5064, + "reba": 5065, + "hito": 5066, + "sai": 5067, + "nan": 5068, + "dai": 5069, + "tsuku": 5070, + "shiki": 5071, + "sare": 5072, + "naku": 5073, + "jun": 5074, + "kaku": 5075, + "zai": 5076, + "wata": 5077, + "shuu": 5078, + "ii": 5079, + "kare": 5080, + "shii": 5081, + "made": 5082, + "sho": 5083, + "kereba": 5084, + "shika": 5085, + "ichi": 5086, + "deki": 5087, + "nin": 5088, + "wareware": 5089, + "nakereba": 5090, + "oite": 5091, + "yaku": 5092, + "mujun": 5093, + "yoku": 5094, + "butsu": 5095, + "omo": 5096, + "gae": 5097, + "naranai": 5098, + "tachi": 5099, + "chuu": 5100, + "kangae": 5101, + "toki": 5102, + "koro": 5103, + "mujunteki": 5104, + "naga": 5105, + "jin": 5106, + "shima": 5107, + "iku": 5108, + "imasu": 5109, + "hon": 5110, + "kae": 5111, + "kore": 5112, + "kita": 5113, + "datta": 5114, + "jitsu": 5115, + "mae": 5116, + "toku": 5117, + "douitsu": 5118, + "ritsu": 5119, + "kyuu": 5120, + "hyou": 5121, + "rareta": 5122, + "keisei": 5123, + "kkan": 5124, + "rareru": 5125, + "mou": 5126, + "doko": 5127, + "ryou": 5128, + "dake": 5129, + "nakatta": 5130, + "soko": 5131, + "tabe": 5132, + "hana": 5133, + "fuku": 5134, + "yasu": 5135, + "wataku": 5136, + "yama": 5137, + "kyo": 5138, + "genzai": 5139, + "boku": 5140, + "ata": 5141, + "kawa": 5142, + "masen": 5143, + "juu": 5144, + "natte": 5145, + "watakushi": 5146, + "yotte": 5147, + "hai": 5148, + "jishin": 5149, + "rete": 5150, + "oka": 5151, + "kagaku": 5152, + "natta": 5153, + "karu": 5154, + "nari": 5155, + "mata": 5156, + "kuru": 5157, + "gai": 5158, + "kari": 5159, + "shakai": 5160, + "koui": 5161, + "yori": 5162, + "setsu": 5163, + "reru": 5164, + "tokoro": 5165, + "jutsu": 5166, + "saku": 5167, + "ttai": 5168, + "ningen": 5169, + "tame": 5170, + "kankyou": 5171, + "ooku": 5172, + "watashi": 5173, + "tsukuru": 5174, + "sugi": 5175, + "jibun": 5176, + "shitsu": 5177, + "keru": 5178, + "kishi": 5179, + "shikashi": 5180, + "moto": 5181, + "mari": 5182, + "itte": 5183, + "deshita": 5184, + "nde": 5185, + "arimasu": 5186, + "koe": 5187, + "zettai": 5188, + "kkanteki": 5189, + "rekishi": 5190, + "dekiru": 5191, + "tsuka": 5192, + "itta": 5193, + "kobutsu": 5194, + "miru": 5195, + "shoku": 5196, + "shimasu": 5197, + "gijutsu": 5198, + "gyou": 5199, + "joushiki": 5200, + "atta": 5201, + "hodo": 5202, + "koko": 5203, + "tsukurareta": 5204, + "zoku": 5205, + "hitei": 5206, + "koku": 5207, + "rekishiteki": 5208, + "kete": 5209, + "kako": 5210, + "nagara": 5211, + "kakaru": 5212, + "shutai": 5213, + "haji": 5214, + "taku": 5215, + "douitsuteki": 5216, + "mete": 5217, + "tsuu": 5218, + "sarete": 5219, + "genjitsu": 5220, + "bai": 5221, + "nawa": 5222, + "jikan": 5223, + "waru": 5224, + "rt": 5225, + "atsu": 5226, + "soku": 5227, + "kouiteki": 5228, + "kata": 5229, + "tetsu": 5230, + "gawa": 5231, + "kedo": 5232, + "reta": 5233, + "sayou": 5234, + "tteru": 5235, + "tori": 5236, + "kimi": 5237, + "mura": 5238, + "sareru": 5239, + "machi": 5240, + "kya": 5241, + "osa": 5242, + "konna": 5243, + "aku": 5244, + "sareta": 5245, + "ipp": 5246, + "shiku": 5247, + "uchi": 5248, + "hitotsu": 5249, + "hatara": 5250, + "tachiba": 5251, + "shiro": 5252, + "katachi": 5253, + "tomo": 5254, + "ete": 5255, + "meru": 5256, + "nichi": 5257, + "dare": 5258, + "katta": 5259, + "eru": 5260, + "suki": 5261, + "ooki": 5262, + "maru": 5263, + "moku": 5264, + "oko": 5265, + "kangaerareru": 5266, + "oto": 5267, + "tanni": 5268, + "tada": 5269, + "taiteki": 5270, + "motte": 5271, + "kinou": 5272, + "shinai": 5273, + "kki": 5274, + "tari": 5275, + "ranai": 5276, + "kkou": 5277, + "mirai": 5278, + "ppon": 5279, + "goto": 5280, + "hitsu": 5281, + "teru": 5282, + "mochi": 5283, + "katsu": 5284, + "nyuu": 5285, + "zuka": 5286, + "tsuite": 5287, + "nomi": 5288, + "sugu": 5289, + "kuda": 5290, + "tetsugaku": 5291, + "ika": 5292, + "ronri": 5293, + "oki": 5294, + "nippon": 5295, + "shimashita": 5296, + "chishiki": 5297, + "chokkanteki": 5298, + "suko": 5299, + "kuu": 5300, + "arou": 5301, + "katte": 5302, + "kuri": 5303, + "inai": 5304, + "hyougen": 5305, + "ishiki": 5306, + "doku": 5307, + "atte": 5308, + "atara": 5309, + "wari": 5310, + "kao": 5311, + "seisan": 5312, + "hanashi": 5313, + "kake": 5314, + "naji": 5315, + "sunawa": 5316, + "sunawachi": 5317, + "ugo": 5318, + "suu": 5319, + "bara": 5320, + "hiro": 5321, + "iwa": 5322, + "betsu": 5323, + "yoi": 5324, + "seru": 5325, + "shiteru": 5326, + "rarete": 5327, + "toshi": 5328, + "seki": 5329, + "tairitsu": 5330, + "wakara": 5331, + "tokyo": 5332, + "kka": 5333, + "kyoku": 5334, + "iro": 5335, + "mite": 5336, + "saki": 5337, + "kanji": 5338, + "mita": 5339, + "sube": 5340, + "ryoku": 5341, + "matta": 5342, + "kudasai": 5343, + "omoi": 5344, + "wareru": 5345, + "hitsuyou": 5346, + "kashi": 5347, + "renai": 5348, + "kankei": 5349, + "gatte": 5350, + "ochi": 5351, + "motsu": 5352, + "sonzai": 5353, + "taishite": 5354, + "ame": 5355, + "seimei": 5356, + "kano": 5357, + "giri": 5358, + "kangaeru": 5359, + "yue": 5360, + "asa": 5361, + "onaji": 5362, + "yoru": 5363, + "niku": 5364, + "osaka": 5365, + "sukoshi": 5366, + "tama": 5367, + "kanojo": 5368, + "kite": 5369, + "mondai": 5370, + "amari": 5371, + "eki": 5372, + "kojin": 5373, + "haya": 5374, + "dete": 5375, + "atarashii": 5376, + "awa": 5377, + "gakkou": 5378, + "tsuzu": 5379, + "shukan": 5380, + "imashita": 5381, + "atae": 5382, + "darou": 5383, + "hataraku": 5384, + "gata": 5385, + "dachi": 5386, + "matsu": 5387, + "arimasen": 5388, + "seibutsu": 5389, + "mitsu": 5390, + "heya": 5391, + "yasui": 5392, + "deni": 5393, + "noko": 5394, + "haha": 5395, + "domo": 5396, + "kami": 5397, + "sudeni": 5398, + "nao": 5399, + "raku": 5400, + "ike": 5401, + "meta": 5402, + "kodomo": 5403, + "soshite": 5404, + "game": 5405, + "bakari": 5406, + "tote": 5407, + "hatsu": 5408, + "mise": 5409, + "mokuteki": 5410, + "dakara": 5411, + "[ja]": 5412, + "ő": 5413, + "ű": 5414, + "そ": 5415, + "な": 5416, + "ん": 5417, + "포": 5418, + "�": 5419, + "gy": 5420, + "eg": 5421, + "cs": 5422, + "ál": 5423, + "egy": 5424, + "át": 5425, + "ott": 5426, + "ett": 5427, + "meg": 5428, + "hogy": 5429, + "ég": 5430, + "ól": 5431, + "nek": 5432, + "volt": 5433, + "ág": 5434, + "nk": 5435, + "ék": 5436, + "ít": 5437, + "ák": 5438, + "ud": 5439, + "szer": 5440, + "mind": 5441, + "oz": 5442, + "ép": 5443, + "ért": 5444, + "mond": 5445, + "szt": 5446, + "nak": 5447, + "ől": 5448, + "csak": 5449, + "oly": 5450, + "áll": 5451, + "ány": 5452, + "mint": 5453, + "már": 5454, + "ött": 5455, + "nagy": 5456, + "ész": 5457, + "azt": 5458, + "elő": 5459, + "tud": 5460, + "ény": 5461, + "áz": 5462, + "még": 5463, + "köz": 5464, + "ely": 5465, + "ség": 5466, + "hoz": 5467, + "uk": 5468, + "kez": 5469, + "ám": 5470, + "aj": 5471, + "unk": 5472, + "vagy": 5473, + "szem": 5474, + "ember": 5475, + "fog": 5476, + "mert": 5477, + "ös": 5478, + "ság": 5479, + "leg": 5480, + "ünk": 5481, + "hát": 5482, + "ony": 5483, + "ezt": 5484, + "minden": 5485, + "ült": 5486, + "jó": 5487, + "kis": 5488, + "áj": 5489, + "úgy": 5490, + "most": 5491, + "ír": 5492, + "itt": 5493, + "elt": 5494, + "mondta": 5495, + "kell": 5496, + "ált": 5497, + "érd": 5498, + "tö": 5499, + "vár": 5500, + "lát": 5501, + "ők": 5502, + "vet": 5503, + "után": 5504, + "két": 5505, + "nap": 5506, + "ív": 5507, + "ály": 5508, + "vég": 5509, + "ök": 5510, + "dul": 5511, + "néz": 5512, + "ában": 5513, + "kül": 5514, + "akkor": 5515, + "szél": 5516, + "új": 5517, + "olyan": 5518, + "ked": 5519, + "hely": 5520, + "tör": 5521, + "ból": 5522, + "elm": 5523, + "ára": 5524, + "ló": 5525, + "volna": 5526, + "lehet": 5527, + "ebb": 5528, + "sok": 5529, + "olt": 5530, + "eket": 5531, + "bor": 5532, + "fej": 5533, + "gond": 5534, + "akar": 5535, + "fél": 5536, + "úl": 5537, + "otta": 5538, + "valami": 5539, + "jel": 5540, + "éd": 5541, + "arc": 5542, + "hall": 5543, + "föl": 5544, + "ába": 5545, + "olg": 5546, + "kir": 5547, + "old": 5548, + "kérd": 5549, + "jár": 5550, + "úr": 5551, + "zs": 5552, + "élet": 5553, + "ját": 5554, + "ov": 5555, + "éz": 5556, + "vil": 5557, + "őr": 5558, + "ög": 5559, + "lesz": 5560, + "koz": 5561, + "ább": 5562, + "király": 5563, + "eng": 5564, + "igaz": 5565, + "haj": 5566, + "kod": 5567, + "ról": 5568, + "több": 5569, + "szó": 5570, + "ében": 5571, + "öt": 5572, + "nyi": 5573, + "szól": 5574, + "gondol": 5575, + "egész": 5576, + "így": 5577, + "ős": 5578, + "obb": 5579, + "osan": 5580, + "ből": 5581, + "abb": 5582, + "őt": 5583, + "nál": 5584, + "kép": 5585, + "aztán": 5586, + "tart": 5587, + "beszél": 5588, + "előtt": 5589, + "aszt": 5590, + "maj": 5591, + "kör": 5592, + "hang": 5593, + "íz": 5594, + "incs": 5595, + "év": 5596, + "ód": 5597, + "ók": 5598, + "hozz": 5599, + "okat": 5600, + "nagyon": 5601, + "ház": 5602, + "ped": 5603, + "ezte": 5604, + "etlen": 5605, + "neki": 5606, + "majd": 5607, + "szony": 5608, + "ának": 5609, + "felé": 5610, + "egyszer": 5611, + "adt": 5612, + "gyer": 5613, + "amikor": 5614, + "foly": 5615, + "szak": 5616, + "őd": 5617, + "hú": 5618, + "ász": 5619, + "amely": 5620, + "ére": 5621, + "ilyen": 5622, + "oda": 5623, + "ják": 5624, + "tár": 5625, + "ával": 5626, + "lak": 5627, + "gyan": 5628, + "ély": 5629, + "út": 5630, + "kezd": 5631, + "mell": 5632, + "mikor": 5633, + "hez": 5634, + "való": 5635, + "szeret": 5636, + "rend": 5637, + "vissza": 5638, + "fő": 5639, + "asszony": 5640, + "ről": 5641, + "pedig": 5642, + "szép": 5643, + "ták": 5644, + "öv": 5645, + "világ": 5646, + "maga": 5647, + "szik": 5648, + "éj": 5649, + "ént": 5650, + "jött": 5651, + "szí": 5652, + "gat": 5653, + "ettem": 5654, + "hány": 5655, + "ást": 5656, + "ahol": 5657, + "őket": 5658, + "hár": 5659, + "nő": 5660, + "csi": 5661, + "talál": 5662, + "elte": 5663, + "látt": 5664, + "tört": 5665, + "hagy": 5666, + "esz": 5667, + "nél": 5668, + "kut": 5669, + "lány": 5670, + "amit": 5671, + "ső": 5672, + "ellen": 5673, + "magát": 5674, + "ugyan": 5675, + "külön": 5676, + "asz": 5677, + "mindig": 5678, + "lép": 5679, + "talán": 5680, + "szor": 5681, + "illan": 5682, + "nincs": 5683, + "vagyok": 5684, + "telen": 5685, + "ismer": 5686, + "isten": 5687, + "ított": 5688, + "jobb": 5689, + "ves": 5690, + "dult": 5691, + "juk": 5692, + "szen": 5693, + "öm": 5694, + "lett": 5695, + "egyik": 5696, + "bár": 5697, + "szi": 5698, + "szív": 5699, + "azon": 5700, + "eszt": 5701, + "föld": 5702, + "kuty": 5703, + "pillan": 5704, + "fér": 5705, + "től": 5706, + "tű": 5707, + "ébe": 5708, + "tött": 5709, + "barát": 5710, + "íg": 5711, + "ahogy": 5712, + "eh": 5713, + "ep": 5714, + "jelent": 5715, + "tat": 5716, + "szeg": 5717, + "mintha": 5718, + "egyen": 5719, + "szab": 5720, + "bizony": 5721, + "jon": 5722, + "öreg": 5723, + "dolg": 5724, + "csap": 5725, + "tiszt": 5726, + "állt": 5727, + "ancs": 5728, + "idő": 5729, + "ügy": 5730, + "miért": 5731, + "ót": 5732, + "csin": 5733, + "ének": 5734, + "vér": 5735, + "jól": 5736, + "alatt": 5737, + "mely": 5738, + "semmi": 5739, + "nyug": 5740, + "vág": 5741, + "követ": 5742, + "össze": 5743, + "mad": 5744, + "acs": 5745, + "fiú": 5746, + "másik": 5747, + "jön": 5748, + "szám": 5749, + "rész": 5750, + "kér": 5751, + "ével": 5752, + "[hu]": 5753, + "%": 5754, + "0": 5755, + "6": 5756, + "7": 5757, + "8": 5758, + "9": 5759, + "A": 5760, + "B": 5761, + "C": 5762, + "D": 5763, + "E": 5764, + "F": 5765, + "G": 5766, + "H": 5767, + "I": 5768, + "J": 5769, + "K": 5770, + "L": 5771, + "M": 5772, + "N": 5773, + "O": 5774, + "P": 5775, + "Q": 5776, + "R": 5777, + "S": 5778, + "T": 5779, + "U": 5780, + "V": 5781, + "W": 5782, + "X": 5783, + "Y": 5784, + "Z": 5785, + "Ł": 5786, + "α": 5787, + "ς": 5788, + "♥": 5789, + "か": 5790, + "ズ": 5791, + "因": 5792, + "国": 5793, + "怎": 5794, + "抱": 5795, + "推": 5796, + "有": 5797, + "樣": 5798, + "為": 5799, + "群": 5800, + "麼": 5801, + "eo": 5802, + "eul": 5803, + "eun": 5804, + "eon": 5805, + "ae": 5806, + "yeon": 5807, + "yeo": 5808, + "ui": 5809, + "hae": 5810, + "geo": 5811, + "neun": 5812, + "ssda": 5813, + "seo": 5814, + "eong": 5815, + "kk": 5816, + "jeo": 5817, + "deul": 5818, + "eum": 5819, + "yeong": 5820, + "geos": 5821, + "hag": 5822, + "aneun": 5823, + "iss": 5824, + "dae": 5825, + "eob": 5826, + "eol": 5827, + "geu": 5828, + "jeong": 5829, + "sae": 5830, + "doe": 5831, + "geul": 5832, + "eulo": 5833, + "bn": 5834, + "sang": 5835, + "bnida": 5836, + "haneun": 5837, + "jeog": 5838, + "saeng": 5839, + "ineun": 5840, + "anh": 5841, + "salam": 5842, + "eom": 5843, + "nae": 5844, + "gwa": 5845, + "yeol": 5846, + "eseo": 5847, + "myeon": 5848, + "ttae": 5849, + "hw": 5850, + "eobs": 5851, + "jang": 5852, + "gw": 5853, + "ileul": 5854, + "yeog": 5855, + "jeon": 5856, + "sig": 5857, + "jag": 5858, + "hago": 5859, + "deun": 5860, + "seong": 5861, + "gag": 5862, + "ham": 5863, + "dang": 5864, + "leul": 5865, + "sil": 5866, + "dong": 5867, + "handa": 5868, + "eossda": 5869, + "aeg": 5870, + "seon": 5871, + "haessda": 5872, + "issda": 5873, + "ege": 5874, + "mul": 5875, + "jung": 5876, + "jig": 5877, + "issneun": 5878, + "geun": 5879, + "seubnida": 5880, + "won": 5881, + "daneun": 5882, + "eoh": 5883, + "deo": 5884, + "gam": 5885, + "jal": 5886, + "haeng": 5887, + "yang": 5888, + "bang": 5889, + "jae": 5890, + "saenggag": 5891, + "hage": 5892, + "sog": 5893, + "eoss": 5894, + "jasin": 5895, + "jil": 5896, + "eog": 5897, + "gyeong": 5898, + "gong": 5899, + "deon": 5900, + "haess": 5901, + "eung": 5902, + "joh": 5903, + "nal": 5904, + "myeong": 5905, + "eona": 5906, + "igo": 5907, + "gyeol": 5908, + "yag": 5909, + "gwan": 5910, + "uli": 5911, + "yong": 5912, + "lyeo": 5913, + "jog": 5914, + "eohge": 5915, + "bog": 5916, + "tong": 5917, + "manh": 5918, + "jeol": 5919, + "geol": 5920, + "aga": 5921, + "naneun": 5922, + "uneun": 5923, + "cheol": 5924, + "dol": 5925, + "bad": 5926, + "hamyeon": 5927, + "yeossda": 5928, + "ibnida": 5929, + "gye": 5930, + "eos": 5931, + "hwal": 5932, + "salamdeul": 5933, + "jiman": 5934, + "dangsin": 5935, + "jib": 5936, + "ttaemun": 5937, + "ib": 5938, + "eneun": 5939, + "eug": 5940, + "jeom": 5941, + "geuleon": 5942, + "hwa": 5943, + "assda": 5944, + "beob": 5945, + "bae": 5946, + "yeoss": 5947, + "chin": 5948, + "chaeg": 5949, + "geon": 5950, + "naega": 5951, + "iga": 5952, + "sigan": 5953, + "gil": 5954, + "hyeon": 5955, + "lyeog": 5956, + "gug": 5957, + "pyeon": 5958, + "wae": 5959, + "jul": 5960, + "seul": 5961, + "deung": 5962, + "hajiman": 5963, + "eumyeon": 5964, + "pil": 5965, + "nyeon": 5966, + "tae": 5967, + "pyo": 5968, + "jineun": 5969, + "beon": 5970, + "hada": 5971, + "seol": 5972, + "sip": 5973, + "daleun": 5974, + "salm": 5975, + "gyo": 5976, + "cheon": 5977, + "hagi": 5978, + "cheoleom": 5979, + "gal": 5980, + "ila": 5981, + "kkaji": 5982, + "anhneun": 5983, + "habnida": 5984, + "tteon": 5985, + "haeseo": 5986, + "doenda": 5987, + "ttal": 5988, + "ilo": 5989, + "seub": 5990, + "byeon": 5991, + "myeo": 5992, + "beol": 5993, + "jeung": 5994, + "chim": 5995, + "hwang": 5996, + "euneun": 5997, + "jong": 5998, + "boda": 5999, + "nol": 6000, + "neom": 6001, + "buteo": 6002, + "jigeum": 6003, + "eobsda": 6004, + "daelo": 6005, + "yul": 6006, + "pyeong": 6007, + "seoneun": 6008, + "salang": 6009, + "seut": 6010, + "heom": 6011, + "hyang": 6012, + "gwang": 6013, + "eobsneun": 6014, + "hwag": 6015, + "gess": 6016, + "jagi": 6017, + "ileon": 6018, + "wihae": 6019, + "daehan": 6020, + "gaji": 6021, + "meog": 6022, + "jyeo": 6023, + "chaj": 6024, + "byeong": 6025, + "eod": 6026, + "gyeo": 6027, + "eoji": 6028, + "gul": 6029, + "modeun": 6030, + "insaeng": 6031, + "geulae": 6032, + "sasil": 6033, + "sib": 6034, + "chal": 6035, + "ilago": 6036, + "geum": 6037, + "doeneun": 6038, + "bol": 6039, + "gajang": 6040, + "geuligo": 6041, + "hyeong": 6042, + "haengbog": 6043, + "chul": 6044, + "chae": 6045, + "mang": 6046, + "dam": 6047, + "choe": 6048, + "sijag": 6049, + "cheong": 6050, + "ilaneun": 6051, + "ulineun": 6052, + "aen": 6053, + "kke": 6054, + "munje": 6055, + "teu": 6056, + "geuneun": 6057, + "bge": 6058, + "cheo": 6059, + "baeg": 6060, + "jug": 6061, + "sangdae": 6062, + "geugeos": 6063, + "dog": 6064, + "eus": 6065, + "jab": 6066, + "hyeo": 6067, + "tteohge": 6068, + "chil": 6069, + "swi": 6070, + "jileul": 6071, + "chang": 6072, + "ganeun": 6073, + "iji": 6074, + "dago": 6075, + "yohan": 6076, + "teug": 6077, + "ppun": 6078, + "aleul": 6079, + "haengdong": 6080, + "sesang": 6081, + "edo": 6082, + "mandeul": 6083, + "amyeon": 6084, + "kkae": 6085, + "bag": 6086, + "ideul": 6087, + "pum": 6088, + "meol": 6089, + "neul": 6090, + "hamkke": 6091, + "chung": 6092, + "dab": 6093, + "yug": 6094, + "sag": 6095, + "gwangye": 6096, + "ileohge": 6097, + "balo": 6098, + "neunde": 6099, + "hamyeo": 6100, + "geuleoh": 6101, + "anila": 6102, + "bangbeob": 6103, + "dasi": 6104, + "byeol": 6105, + "gyeon": 6106, + "gamjeong": 6107, + "oneul": 6108, + "janeun": 6109, + "yeom": 6110, + "lago": 6111, + "igi": 6112, + "hwan": 6113, + "teul": 6114, + "eoseo": 6115, + "sik": 6116, + "jaga": 6117, + "geuleom": 6118, + "geuleona": 6119, + "jeongdo": 6120, + "gyeog": 6121, + "geuleohge": 6122, + "geudeul": 6123, + "eut": 6124, + "imyeon": 6125, + "jjae": 6126, + "keun": 6127, + "isang": 6128, + "malhaessda": 6129, + "euge": 6130, + "nop": 6131, + "ingan": 6132, + "bomyeon": 6133, + "taeg": 6134, + "dwi": 6135, + "saneun": 6136, + "wan": 6137, + "anhgo": 6138, + "nugu": 6139, + "sung": 6140, + "damyeon": 6141, + "adeul": 6142, + "peul": 6143, + "ttala": 6144, + "geosdo": 6145, + "aji": 6146, + "meon": 6147, + "eumyeo": 6148, + "dolog": 6149, + "neung": 6150, + "modu": 6151, + "[ko]": 6152, + "\u0014": 6153, + "\u0016": 6154, + "$": 6155, + "*": 6156, + "|": 6157, + "°": 6158, + "º": 6159, + "ँ": 6160, + "ं": 6161, + "ः": 6162, + "अ": 6163, + "आ": 6164, + "इ": 6165, + "ई": 6166, + "उ": 6167, + "ऊ": 6168, + "ऋ": 6169, + "ऎ": 6170, + "ए": 6171, + "ऐ": 6172, + "ऑ": 6173, + "ऒ": 6174, + "ओ": 6175, + "औ": 6176, + "क": 6177, + "ख": 6178, + "ग": 6179, + "घ": 6180, + "ङ": 6181, + "च": 6182, + "छ": 6183, + "ज": 6184, + "झ": 6185, + "ञ": 6186, + "ट": 6187, + "ठ": 6188, + "ड": 6189, + "ढ": 6190, + "ण": 6191, + "त": 6192, + "थ": 6193, + "द": 6194, + "ध": 6195, + "न": 6196, + "ऩ": 6197, + "प": 6198, + "फ": 6199, + "ब": 6200, + "भ": 6201, + "म": 6202, + "य": 6203, + "र": 6204, + "ऱ": 6205, + "ल": 6206, + "ळ": 6207, + "व": 6208, + "श": 6209, + "ष": 6210, + "स": 6211, + "ह": 6212, + "़": 6213, + "ा": 6214, + "ि": 6215, + "ी": 6216, + "ु": 6217, + "ू": 6218, + "ृ": 6219, + "ॄ": 6220, + "ॅ": 6221, + "ॆ": 6222, + "े": 6223, + "ै": 6224, + "ॉ": 6225, + "ॊ": 6226, + "ो": 6227, + "ौ": 6228, + "्": 6229, + "ॐ": 6230, + "ॖ": 6231, + "क़": 6232, + "ख़": 6233, + "ग़": 6234, + "ज़": 6235, + "ड़": 6236, + "ढ़": 6237, + "फ़": 6238, + "य़": 6239, + "ॠ": 6240, + "।": 6241, + "॥": 6242, + "०": 6243, + "१": 6244, + "२": 6245, + "३": 6246, + "४": 6247, + "५": 6248, + "६": 6249, + "७": 6250, + "८": 6251, + "९": 6252, + "॰": 6253, + "ॲ": 6254, + "​": 6255, + "‌": 6256, + "‍": 6257, + "‎": 6258, + "₹": 6259, + "के": 6260, + "है": 6261, + "ें": 6262, + "्र": 6263, + "ार": 6264, + "ने": 6265, + "या": 6266, + "में": 6267, + "से": 6268, + "की": 6269, + "का": 6270, + "ों": 6271, + "ता": 6272, + "कर": 6273, + "स्": 6274, + "कि": 6275, + "को": 6276, + "र्": 6277, + "ना": 6278, + "क्": 6279, + "ही": 6280, + "और": 6281, + "पर": 6282, + "ते": 6283, + "हो": 6284, + "प्र": 6285, + "ान": 6286, + "्य": 6287, + "ला": 6288, + "वा": 6289, + "ले": 6290, + "सा": 6291, + "हैं": 6292, + "लि": 6293, + "जा": 6294, + "हा": 6295, + "भी": 6296, + "वि": 6297, + "इस": 6298, + "ती": 6299, + "न्": 6300, + "रा": 6301, + "मा": 6302, + "दे": 6303, + "दि": 6304, + "बा": 6305, + "ति": 6306, + "था": 6307, + "नि": 6308, + "कार": 6309, + "एक": 6310, + "हीं": 6311, + "हु": 6312, + "ंग": 6313, + "ैं": 6314, + "नी": 6315, + "सी": 6316, + "अप": 6317, + "त्": 6318, + "नहीं": 6319, + "री": 6320, + "मे": 6321, + "मु": 6322, + "ित": 6323, + "तो": 6324, + "पा": 6325, + "ली": 6326, + "लिए": 6327, + "गा": 6328, + "ल्": 6329, + "रह": 6330, + "रे": 6331, + "क्ष": 6332, + "मैं": 6333, + "सम": 6334, + "उस": 6335, + "जि": 6336, + "त्र": 6337, + "मि": 6338, + "चा": 6339, + "ोग": 6340, + "सं": 6341, + "द्": 6342, + "सि": 6343, + "आप": 6344, + "तु": 6345, + "दा": 6346, + "कु": 6347, + "यों": 6348, + "वे": 6349, + "जी": 6350, + "्या": 6351, + "उन": 6352, + "िक": 6353, + "ये": 6354, + "भा": 6355, + "्ट": 6356, + "हम": 6357, + "स्ट": 6358, + "शा": 6359, + "ड़": 6360, + "ंद": 6361, + "खा": 6362, + "म्": 6363, + "श्": 6364, + "यह": 6365, + "सक": 6366, + "पू": 6367, + "किया": 6368, + "अपने": 6369, + "रू": 6370, + "सु": 6371, + "मी": 6372, + "हि": 6373, + "जो": 6374, + "थे": 6375, + "रि": 6376, + "दी": 6377, + "थी": 6378, + "गी": 6379, + "लोग": 6380, + "गया": 6381, + "तर": 6382, + "न्ह": 6383, + "च्": 6384, + "वार": 6385, + "बी": 6386, + "प्": 6387, + "दो": 6388, + "टी": 6389, + "शि": 6390, + "करने": 6391, + "गे": 6392, + "ैसे": 6393, + "इन": 6394, + "ंड": 6395, + "साथ": 6396, + "पु": 6397, + "बे": 6398, + "बार": 6399, + "वी": 6400, + "अन": 6401, + "हर": 6402, + "उन्ह": 6403, + "होता": 6404, + "जब": 6405, + "कुछ": 6406, + "मान": 6407, + "क्र": 6408, + "बि": 6409, + "पह": 6410, + "फि": 6411, + "सर": 6412, + "ारी": 6413, + "रो": 6414, + "दू": 6415, + "कहा": 6416, + "तक": 6417, + "शन": 6418, + "ब्": 6419, + "स्थ": 6420, + "वह": 6421, + "बाद": 6422, + "ओं": 6423, + "गु": 6424, + "ज्": 6425, + "्रे": 6426, + "गर": 6427, + "रहे": 6428, + "वर्": 6429, + "हू": 6430, + "ार्": 6431, + "पी": 6432, + "बहु": 6433, + "मुझ": 6434, + "्रा": 6435, + "दिया": 6436, + "सब": 6437, + "करते": 6438, + "अपनी": 6439, + "बहुत": 6440, + "कह": 6441, + "टे": 6442, + "हुए": 6443, + "किसी": 6444, + "रहा": 6445, + "ष्ट": 6446, + "ज़": 6447, + "बना": 6448, + "सो": 6449, + "डि": 6450, + "कोई": 6451, + "व्य": 6452, + "बात": 6453, + "रु": 6454, + "वो": 6455, + "मुझे": 6456, + "द्ध": 6457, + "चार": 6458, + "मेरे": 6459, + "वर": 6460, + "्री": 6461, + "जाता": 6462, + "नों": 6463, + "प्रा": 6464, + "देख": 6465, + "टा": 6466, + "क्या": 6467, + "अध": 6468, + "लग": 6469, + "लो": 6470, + "पि": 6471, + "यु": 6472, + "चे": 6473, + "जिस": 6474, + "ंत": 6475, + "ानी": 6476, + "पै": 6477, + "जन": 6478, + "ारे": 6479, + "ची": 6480, + "मिल": 6481, + "दु": 6482, + "देश": 6483, + "च्छ": 6484, + "ष्": 6485, + "सू": 6486, + "खे": 6487, + "चु": 6488, + "िया": 6489, + "लगा": 6490, + "बु": 6491, + "उनके": 6492, + "ज्ञ": 6493, + "क्षा": 6494, + "तरह": 6495, + "्यादा": 6496, + "वाले": 6497, + "पूर्": 6498, + "मैंने": 6499, + "काम": 6500, + "रूप": 6501, + "होती": 6502, + "उप": 6503, + "जान": 6504, + "प्रकार": 6505, + "भार": 6506, + "मन": 6507, + "हुआ": 6508, + "टर": 6509, + "हूँ": 6510, + "परि": 6511, + "पास": 6512, + "अनु": 6513, + "राज": 6514, + "लोगों": 6515, + "अब": 6516, + "समझ": 6517, + "डी": 6518, + "मौ": 6519, + "शु": 6520, + "चि": 6521, + "पे": 6522, + "कृ": 6523, + "सकते": 6524, + "मह": 6525, + "योग": 6526, + "दर्": 6527, + "उसे": 6528, + "ंध": 6529, + "डा": 6530, + "जाए": 6531, + "बो": 6532, + "ूल": 6533, + "मो": 6534, + "ोंने": 6535, + "ंस": 6536, + "तुम": 6537, + "पहले": 6538, + "बता": 6539, + "तथा": 6540, + "यो": 6541, + "गई": 6542, + "उत्": 6543, + "सकता": 6544, + "कम": 6545, + "ज्यादा": 6546, + "रख": 6547, + "समय": 6548, + "ारा": 6549, + "अगर": 6550, + "स्त": 6551, + "चल": 6552, + "फिर": 6553, + "वारा": 6554, + "करना": 6555, + "शी": 6556, + "गए": 6557, + "बन": 6558, + "ौर": 6559, + "होने": 6560, + "चाह": 6561, + "खु": 6562, + "हाँ": 6563, + "उन्हें": 6564, + "उन्होंने": 6565, + "छो": 6566, + "म्ह": 6567, + "प्रति": 6568, + "निक": 6569, + "वन": 6570, + "्यू": 6571, + "रही": 6572, + "तुम्ह": 6573, + "जैसे": 6574, + "ियों": 6575, + "क्यों": 6576, + "लों": 6577, + "फ़": 6578, + "ंत्र": 6579, + "होते": 6580, + "क्ति": 6581, + "त्य": 6582, + "कर्": 6583, + "कई": 6584, + "वं": 6585, + "किन": 6586, + "पो": 6587, + "कारण": 6588, + "ड़ी": 6589, + "भि": 6590, + "इसके": 6591, + "बर": 6592, + "उसके": 6593, + "द्वारा": 6594, + "शे": 6595, + "कॉ": 6596, + "दिन": 6597, + "न्न": 6598, + "ड़ा": 6599, + "स्व": 6600, + "निर्": 6601, + "मुख": 6602, + "लिया": 6603, + "टि": 6604, + "ज्ञान": 6605, + "क्त": 6606, + "द्र": 6607, + "ग्": 6608, + "क्स": 6609, + "मै": 6610, + "गो": 6611, + "जे": 6612, + "ट्र": 6613, + "मार": 6614, + "त्व": 6615, + "धार": 6616, + "भाव": 6617, + "करता": 6618, + "खि": 6619, + "कं": 6620, + "चाहि": 6621, + "यर": 6622, + "प्त": 6623, + "कों": 6624, + "ंच": 6625, + "जु": 6626, + "मत": 6627, + "अच्छ": 6628, + "हुई": 6629, + "कभी": 6630, + "लेकिन": 6631, + "भू": 6632, + "अपना": 6633, + "दूस": 6634, + "चाहिए": 6635, + "यू": 6636, + "घर": 6637, + "सबसे": 6638, + "मेरी": 6639, + "नाम": 6640, + "ढ़": 6641, + "ंट": 6642, + "ेंगे": 6643, + "बै": 6644, + "फा": 6645, + "एवं": 6646, + "यी": 6647, + "ग्र": 6648, + "क्षे": 6649, + "आज": 6650, + "आपको": 6651, + "भाग": 6652, + "ठा": 6653, + "कै": 6654, + "भारत": 6655, + "उनकी": 6656, + "पहु": 6657, + "सभी": 6658, + "धा": 6659, + "णा": 6660, + "सान": 6661, + "होगा": 6662, + "तब": 6663, + "संग": 6664, + "पर्": 6665, + "अव": 6666, + "तना": 6667, + "गि": 6668, + "यन": 6669, + "स्था": 6670, + "चित": 6671, + "ट्": 6672, + "छा": 6673, + "जाने": 6674, + "क्षेत्र": 6675, + "वाली": 6676, + "पूर्ण": 6677, + "समा": 6678, + "कारी": 6679, + "[hi]": 6680 + }, + "merges": [ + "t h", + "i n", + "th e", + "a n", + "e r", + "o u", + "r e", + "o n", + "a t", + "e d", + "e n", + "t o", + "in g", + "an d", + "i s", + "a s", + "a l", + "o r", + "o f", + "a r", + "i t", + "e s", + "h e", + "s t", + "l e", + "o m", + "s e", + "b e", + "a d", + "o w", + "l y", + "c h", + "w h", + "th at", + "y ou", + "l i", + "v e", + "a c", + "t i", + "l d", + "m e", + "w as", + "g h", + "i d", + "l l", + "w i", + "en t", + "f or", + "a y", + "r o", + "v er", + "i c", + "h er", + "k e", + "h is", + "n o", + "u t", + "u n", + "i r", + "l o", + "w e", + "r i", + "h a", + "wi th", + "gh t", + "ou t", + "i m", + "i on", + "al l", + "a b", + "on e", + "n e", + "g e", + "ou ld", + "t er", + "m o", + "h ad", + "c e", + "s he", + "g o", + "s h", + "u r", + "a m", + "s o", + "p e", + "m y", + "d e", + "a re", + "b ut", + "om e", + "f r", + "the r", + "f e", + "s u", + "d o", + "c on", + "t e", + "a in", + "er e", + "p o", + "i f", + "the y", + "u s", + "a g", + "t r", + "n ow", + "ou n", + "th is", + "ha ve", + "no t", + "s a", + "i l", + "u p", + "th ing", + "fr om", + "a p", + "h im", + "ac k", + "at ion", + "an t", + "ou r", + "o p", + "li ke", + "u st", + "es s", + "b o", + "o k", + "u l", + "in d", + "e x", + "c om", + "s ome", + "the re", + "er s", + "c o", + "re s", + "m an", + "ar d", + "p l", + "w or", + "w ay", + "ti on", + "f o", + "c a", + "w ere", + "b y", + "at e", + "p ro", + "t ed", + "oun d", + "ow n", + "w ould", + "t s", + "wh at", + "q u", + "al ly", + "i ght", + "c k", + "g r", + "wh en", + "v en", + "c an", + "ou gh", + "in e", + "en d", + "p er", + "ou s", + "o d", + "id e", + "k now", + "t y", + "ver y", + "s i", + "a k", + "wh o", + "ab out", + "i ll", + "the m", + "es t", + "re d", + "y e", + "c ould", + "on g", + "you r", + "the ir", + "e m", + "j ust", + "o ther", + "in to", + "an y", + "wh i", + "u m", + "t w", + "as t", + "d er", + "d id", + "i e", + "be en", + "ac e", + "in k", + "it y", + "b ack", + "t ing", + "b r", + "mo re", + "a ke", + "p p", + "the n", + "s p", + "e l", + "u se", + "b l", + "sa id", + "o ver", + "ge t", + "e n", + "e r", + "c h", + "e i", + "i e", + "u n", + "i ch", + "ei n", + "s t", + "a n", + "t e", + "g e", + "a u", + "i n", + "s ch", + "d er", + "un d", + "d ie", + "d a", + "e s", + "a l", + "d en", + "a r", + "g en", + "z u", + "d e", + "h r", + "o n", + "t en", + "e l", + "o r", + "m i", + "s ie", + "da s", + "a t", + "b e", + "ein e", + "ich t", + "b er", + "l e", + "a ch", + "v er", + "s e", + "au f", + "w i", + "s o", + "t er", + "l ich", + "c k", + "u r", + "n icht", + "m m", + "b en", + "a s", + "w ar", + "r e", + "mi t", + "s ich", + "i g", + "l l", + "au s", + "i st", + "w ie", + "o ch", + "un g", + "an n", + "ü r", + "h n", + "i hr", + "s a", + "s en", + "t z", + "de m", + "ei t", + "u m", + "h at", + "wi r", + "v on", + "h a", + "s p", + "w ei", + "i er", + "r o", + "h er", + "r a", + "ein en", + "n e", + "v or", + "al s", + "an d", + "al l", + "w as", + "w o", + "r ei", + "st e", + "l ie", + "au ch", + "d u", + "d es", + "k o", + "ü ber", + "a m", + "b ei", + "h en", + "h m", + "l ei", + "a ber", + "w en", + "h l", + "g er", + "i m", + "u t", + "n ach", + "h e", + "i s", + "b r", + "f t", + "en t", + "i mm", + "j e", + "sch en", + "w er", + "s er", + "a b", + "ä n", + "m e", + "s ein", + "i t", + "o l", + "ch t", + "f ür", + "k l", + "f f", + "eine m", + "n en", + "w e", + "j a", + "u s", + "n och", + "hat te", + "t r", + "p f", + "h in", + "d i", + "ch en", + "b l", + "m an", + "r ü", + "ie l", + "s el", + "das s", + "i hn", + "mi r", + "sch l", + "ö n", + "g an", + "g t", + "ein er", + "st en", + "m ich", + "wen n", + "el l", + "g te", + "in d", + "m al", + "ge l", + "k en", + "n ur", + "mm en", + "f ü", + "er n", + "ö r", + "un ter", + "f r", + "an der", + "g r", + "i l", + "d ur", + "u ch", + "f e", + "t a", + "m en", + "m ach", + "d och", + "t i", + "dur ch", + "o s", + "g l", + "h al", + "ihr e", + "w ä", + "imm er", + "i hm", + "k ann", + "or t", + "d ann", + "l an", + "tz t", + "o der", + "hr en", + "e t", + "k ön", + "i ck", + "f a", + "in g", + "i r", + "wie der", + "da ß", + "m ein", + "f en", + "gan z", + "die se", + "st er", + "da r", + "w a", + "ge s", + "n a", + "f l", + "i gen", + "sch e", + "un gen", + "me hr", + "ß en", + "o t", + "k on", + "ge w", + "ha ben", + "ge h", + "ä t", + "s ind", + "d r", + "w el", + "un s", + "v o", + "m a", + "u te", + "sch on", + "b es", + "ge sch", + "b t", + "ch e", + "s on", + "o b", + "l a", + "p p", + "rü ck", + "s eine", + "k r", + "f re", + "ei l", + "zu m", + "u l", + "h ier", + "k t", + "i ge", + "sp r", + "k e", + "le ben", + "b st", + "z eit", + "i on", + "g ro", + "den n", + "h o", + "sch a", + "b ar", + "al le", + "ge gen", + "w ür", + "m ü", + "z e", + "wer den", + "je tzt", + "ko mmen", + "n ie", + "s ei", + "h eit", + "so ll", + "g lei", + "m eine", + "wo ll", + "n er", + "ha be", + "w ur", + "lich en", + "p er", + "as sen", + "n te", + "se hen", + "wir d", + "b is", + "g ar", + "i en", + "m us", + "u ß", + "ä r", + "st ell", + "k eit", + "z wei", + "sel bst", + "st a", + "p a", + "sa gte", + "te t", + "k am", + "s sen", + "v iel", + "u g", + "z en", + "h ei", + "m ann", + "wi ll", + "ge b", + "war en", + "ü ck", + "ä ch", + "m er", + "r u", + "w or", + "h au", + "ei gen", + "an g", + "we g", + "bl ick", + "f ra", + "all es", + "k a", + "au gen", + "f in", + "lich e", + "t o", + "un ser", + "der n", + "her r", + "n un", + "v ie", + "ch te", + "wo hl", + "f all", + "h t", + "ü n", + "et was", + "st and", + "en d", + "ä u", + "e m", + "m ö", + "te l", + "r ie", + "d ich", + "die s", + "h and", + "b in", + "ff en", + "nicht s", + "d an", + "p l", + "hn e", + "ihn en", + "es en", + "die ser", + "fr au", + "an t", + "ar t", + "di r", + "i sch", + "er st", + "glei ch", + "ko mm", + "h ör", + "ß e", + "d ig", + "se hr", + "z ei", + "sa m", + "au m", + "h ät", + "in gen", + "g ut", + "b o", + "m ut", + "ck en", + "kon nte", + "st imm", + "p ro", + "zu r", + "i tz", + "wei l", + "wür de", + "f ä", + "kön nen", + "k eine", + "f er", + "i schen", + "vo ll", + "ein es", + "se tz", + "z ie", + "de l", + "te te", + "sein er", + "ier en", + "ge st", + "zu rück", + "wur de", + "sch n", + "p r", + "lie ß", + "t ra", + "m ä", + "gen d", + "f ol", + "i k", + "schl a", + "scha ft", + "at er", + "wei ß", + "s einen", + "l assen", + "l u", + "und en", + "t eil", + "ne u", + "ier t", + "men schen", + "hm en", + "st r", + "g i", + "sa h", + "ihr en", + "el n", + "wei ter", + "ge hen", + "ig er", + "mach t", + "ta g", + "al so", + "hal ten", + "n is", + "ach t", + "ge ben", + "f or", + "o g", + "n at", + "m ar", + "de t", + "o hne", + "h aus", + "t ro", + "an ge", + "l au", + "sp iel", + "t re", + "sch r", + "in n", + "s u", + "l os", + "mach en", + "hät te", + "be g", + "wir k", + "al t", + "g lich", + "te s", + "r icht", + "fre und", + "m o", + "ihr er", + "f el", + "b el", + "so l", + "ein mal", + "e ben", + "h ol", + "h än", + "q u", + "ter n", + "h ö", + "sch w", + "re cht", + "wa hr", + "s einem", + "ste hen", + "hl en", + "in s", + "g ing", + "woll te", + "wi ssen", + "ung s", + "al d", + "as s", + "ja hr", + "m or", + "wel t", + "un der", + "zu sa", + "at ion", + "ko pf", + "lan g", + "hin ter", + "at z", + "st ra", + "an gen", + "an k", + "a de", + "gl au", + "f ach", + "hat ten", + "l o", + "f ort", + "ei cht", + "i ff", + "l er", + "m ei", + "diese m", + "k ein", + "f rei", + "fü hr", + "vo m", + "e s", + "e n", + "a i", + "o u", + "o n", + "l e", + "d e", + "r e", + "q u", + "a n", + "e r", + "en t", + "e t", + "l a", + "n e", + "i l", + "a r", + "i s", + "ai t", + "t e", + "a u", + "i n", + "qu e", + "i t", + "u r", + "s e", + "l es", + "c h", + "c e", + "m e", + "o r", + "ou r", + "a s", + "p r", + "a v", + "o m", + "ai s", + "u n", + "an t", + "ou s", + "t r", + "t i", + "l u", + "o i", + "e u", + "l le", + "s i", + "p ar", + "d es", + "an s", + "m ent", + "é t", + "es t", + "j e", + "u ne", + "a l", + "p as", + "t re", + "qu i", + "d u", + "r i", + "c on", + "s on", + "c om", + "e lle", + "d é", + "p our", + "d ans", + "l i", + "s a", + "r é", + "t ou", + "v ous", + "d i", + "v i", + "a g", + "a m", + "a t", + "ou v", + "a p", + "ti on", + "m on", + "s ur", + "c i", + "o s", + "p lu", + "s u", + "en d", + "a b", + "è re", + "ai n", + "m ais", + "o is", + "r es", + "plu s", + "é e", + "ai ent", + "m p", + "ch e", + "lu i", + "av e", + "ét ait", + "m a", + "s es", + "tou t", + "i r", + "v o", + "a c", + "s er", + "an d", + "f f", + "oi r", + "g r", + "av ait", + "é s", + "m es", + "n ous", + "eu x", + "b i", + "t er", + "c o", + "on s", + "p u", + "c es", + "g e", + "t u", + "le ur", + "pr o", + "d on", + "e ur", + "et te", + "ai re", + "ave c", + "d it", + "t é", + "i e", + "u s", + "il le", + "p er", + "com me", + "c r", + "or t", + "m i", + "e x", + "u x", + "v er", + "m o", + "è s", + "v e", + "au x", + "r a", + "j our", + "il s", + "bi en", + "c ou", + "p e", + "que l", + "p eu", + "c ette", + "t es", + "p o", + "in s", + "c u", + "m ê", + "s o", + "f ait", + "g u", + "m ar", + "ê tre", + "l o", + "it é", + "f r", + "a tion", + "en s", + "b r", + "n i", + "l é", + "d is", + "b le", + "m an", + "n é", + "pu is", + "mê me", + "qu es", + "f i", + "e l", + "ag e", + "g ar", + "m oi", + "en ce", + "on t", + "m ain", + "or s", + "au t", + "an ce", + "v en", + "m é", + "s ans", + "e m", + "s é", + "l on", + "h om", + "r o", + "u t", + "c ar", + "ab le", + "i m", + "de r", + "ch er", + "n o", + "vi e", + "au s", + "b e", + "de ux", + "en f", + "o ù", + "t en", + "p h", + "u re", + "te mp", + "p os", + "r ent", + "p é", + "f aire", + "p i", + "tr es", + "ç a", + "an g", + "end re", + "f or", + "p a", + "b on", + "s ou", + "in t", + "pr é", + "s ent", + "t ant", + "n er", + "c er", + "l à", + "l ais", + "pr ès", + "b re", + "c our", + "p et", + "i on", + "i ne", + "com p", + "l ait", + "tr ouv", + "t a", + "ent re", + "son t", + "de v", + "n u", + "temp s", + "d ou", + "r ait", + "b ou", + "qu and", + "jour s", + "l an", + "er s", + "av oir", + "ét é", + "a le", + "p re", + "f ois", + "or te", + "v é", + "m er", + "n on", + "t ous", + "j us", + "cou p", + "t s", + "hom me", + "ê te", + "a d", + "aus si", + "ur s", + "se u", + "or d", + "o b", + "m in", + "g é", + "co re", + "v a", + "v re", + "en core", + "se m", + "i te", + "au tre", + "pr is", + "peu t", + "u e", + "an te", + "m al", + "g n", + "ré p", + "h u", + "si on", + "vo tre", + "di re", + "e z", + "f em", + "leur s", + "m et", + "f in", + "c ri", + "m is", + "t our", + "r ai", + "j am", + "re gar", + "ri en", + "ver s", + "su is", + "p ouv", + "o p", + "v is", + "gr and", + "ant s", + "c or", + "re r", + "ar d", + "c é", + "t ent", + "pr es", + "v ou", + "f a", + "al ors", + "si eur", + "ai ne", + "le r", + "qu oi", + "f on", + "end ant", + "ar ri", + "eu re", + "a près", + "don c", + "it u", + "l è", + "s ait", + "t oi", + "ch a", + "ai l", + "as se", + "i mp", + "vo y", + "con n", + "p la", + "pet it", + "av ant", + "n om", + "t in", + "don t", + "d a", + "s ous", + "e mp", + "per son", + "el les", + "be au", + "par ti", + "ch o", + "pr it", + "tou jours", + "m en", + "r ais", + "jam ais", + "tr av", + "tion s", + "tr ès", + "v oi", + "r en", + "y eux", + "f er", + "v oir", + "pre mi", + "c a", + "g ne", + "h eure", + "r ou", + "e ff", + "no tre", + "ment s", + "t on", + "f ais", + "ce la", + "i er", + "rép on", + "con s", + "ai r", + "ô t", + "p endant", + "i ci", + "tou te", + "j et", + "p ort", + "ét aient", + "p en", + "h é", + "au tres", + "p ère", + "o c", + "quel ques", + "i que", + "l is", + "fem me", + "j ou", + "te ur", + "mon de", + "u se", + "n es", + "d re", + "a ff", + "r ap", + "par t", + "le ment", + "c la", + "f ut", + "quel que", + "pr endre", + "r ê", + "ai lle", + "s ais", + "ch es", + "le t", + "ch ar", + "è res", + "ent s", + "b er", + "g er", + "mo ins", + "e au", + "a î", + "j eu", + "h eur", + "é es", + "tr i", + "po int", + "m om", + "v ent", + "n ouv", + "gr an", + "tr ois", + "s ant", + "tout es", + "con tre", + "è rent", + "che z", + "ave z", + "û t", + "a lle", + "at t", + "p au", + "p orte", + "ouv er", + "b ar", + "l it", + "f ort", + "o t", + "as s", + "pr és", + "cho se", + "v it", + "mon sieur", + "h ab", + "t ête", + "j u", + "te ment", + "c tion", + "v rai", + "la r", + "c et", + "regar d", + "l ant", + "de m", + "s om", + "mom ent", + "il les", + "p le", + "p s", + "b es", + "m ère", + "c l", + "s our", + "y s", + "tr op", + "en ne", + "jus qu", + "av aient", + "av ais", + "jeu ne", + "de puis", + "person ne", + "f it", + "cer t", + "j o", + "g es", + "ou i", + "r est", + "sem b", + "c ap", + "m at", + "m u", + "lon g", + "fr an", + "f aut", + "it i", + "b li", + "che v", + "pr i", + "ent e", + "ain si", + "ch am", + "l ors", + "c as", + "d o", + "il i", + "b é", + "n os", + "an ge", + "su i", + "r it", + "cr o", + "gu e", + "d e", + "e n", + "e s", + "o s", + "l a", + "e r", + "q u", + "a r", + "a n", + "o n", + "qu e", + "a s", + "o r", + "e l", + "d o", + "a l", + "c i", + "u n", + "r e", + "a b", + "i n", + "t e", + "t o", + "s e", + "d i", + "t r", + "d a", + "c on", + "t a", + "s u", + "m i", + "c o", + "t i", + "l e", + "l os", + "n o", + "l o", + "í a", + "c u", + "c a", + "s i", + "v i", + "m e", + "p or", + "m o", + "p ar", + "r a", + "r i", + "la s", + "c h", + "r o", + "m a", + "p er", + "ó n", + "m en", + "de s", + "un a", + "m p", + "s o", + "ab a", + "p u", + "d os", + "t u", + "g u", + "er a", + "de l", + "h a", + "m u", + "l i", + "en t", + "m b", + "h ab", + "es t", + "g o", + "p a", + "r es", + "par a", + "p o", + "á s", + "m os", + "tr a", + "t en", + "an do", + "p i", + "qu i", + "b i", + "m an", + "co mo", + "v e", + "m ás", + "j o", + "ci ón", + "i s", + "t an", + "v o", + "da d", + "c e", + "a do", + "v er", + "f u", + "ci a", + "c er", + "p e", + "c as", + "c ar", + "men te", + "n i", + "su s", + "t ar", + "n a", + "f i", + "t er", + "z a", + "p ro", + "tr o", + "s a", + "l u", + "b a", + "per o", + "s er", + "c es", + "d as", + "d u", + "s in", + "e mp", + "m ar", + "l la", + "e x", + "á n", + "c or", + "i a", + "v a", + "r an", + "ch o", + "g a", + "y o", + "t os", + "c os", + "mi s", + "l es", + "t es", + "v en", + "h o", + "y a", + "en te", + "on es", + "hab ía", + "n u", + "u s", + "p as", + "h i", + "n os", + "es ta", + "la n", + "m as", + "t or", + "l le", + "h e", + "s on", + "b re", + "p re", + "ab an", + "d or", + "í an", + "i r", + "t as", + "é n", + "r u", + "en do", + "a que", + "er o", + "i o", + "qu é", + "m in", + "c ab", + "j a", + "de r", + "t al", + "é s", + "se ñ", + "or a", + "to do", + "la r", + "d on", + "g ar", + "s al", + "p r", + "cu ando", + "j e", + "h u", + "g un", + "b u", + "g i", + "d ar", + "n e", + "r as", + "de n", + "es to", + "par e", + "p en", + "é l", + "tr as", + "c an", + "b o", + "j os", + "mi en", + "pu e", + "c re", + "co mp", + "p on", + "d ía", + "tr os", + "s ab", + "so bre", + "es e", + "mb re", + "er on", + "a ñ", + "m or", + "f or", + "i do", + "por que", + "el la", + "p ri", + "g ran", + "f a", + "c en", + "di s", + "c ri", + "mu y", + "ch a", + "c al", + "es te", + "h as", + "c ó", + "g ra", + "r os", + "p os", + "o b", + "al l", + "aque l", + "j u", + "p res", + "m er", + "di jo", + "c ía", + "ent re", + "z o", + "ci ones", + "bi en", + "mb i", + "el o", + "t ó", + "in a", + "to dos", + "g en", + "ti en", + "est aba", + "de ci", + "ci o", + "h er", + "ñ o", + "l or", + "nu es", + "me di", + "l en", + "vi da", + "f e", + "al i", + "m on", + "c la", + "d re", + "pu es", + "al es", + "vo l", + "m í", + "r ar", + "b le", + "ci on", + "has ta", + "señ or", + "con o", + "a h", + "di os", + "s en", + "es a", + "ú n", + "v ar", + "s an", + "gu i", + "a c", + "o tros", + "ta do", + "bu en", + "ñ a", + "ti emp", + "ha cer", + "j er", + "f er", + "v u", + "f in", + "an a", + "as í", + "an tes", + "t in", + "ve z", + "mien to", + "j ar", + "la b", + "ch e", + "cas a", + "d r", + "es o", + "e go", + "di ó", + "an te", + "est á", + "m al", + "en cia", + "el i", + "í as", + "tiemp o", + "z ar", + "v an", + "m un", + "er ta", + "ta mbi", + "s í", + "b ar", + "a un", + "al e", + "mis mo", + "ent es", + "vi s", + "man o", + "el e", + "na da", + "se gu", + "me j", + "er ra", + "ab le", + "b e", + "ti r", + "un o", + "don de", + "to da", + "des de", + "r en", + "tambi én", + "cu er", + "per son", + "ho mbre", + "o tro", + "li b", + "tr ar", + "cu al", + "ha y", + "a u", + "ca da", + "t aba", + "i mp", + "men to", + "ten ía", + "qu er", + "er an", + "si emp", + "siemp re", + "er to", + "qu í", + "g os", + "pu és", + "el los", + "des pués", + "nu e", + "g an", + "l lo", + "in ter", + "có mo", + "tr i", + "ah ora", + "us te", + "tr aba", + "la do", + "in o", + "po co", + "er te", + "mu jer", + "i m", + "qui er", + "al gun", + "fu e", + "o jos", + "ent on", + "v os", + "es per", + "mu ch", + "o tra", + "a z", + "a d", + "in g", + "e za", + "a quí", + "ci as", + "gu a", + "mu cho", + "deci r", + "es ti", + "i dad", + "al go", + "e z", + "o cu", + "enton ces", + "di do", + "ent os", + "g ri", + "da do", + "i os", + "so l", + "dos e", + "uste d", + "qui en", + "a mi", + "un to", + "f r", + "mi r", + "mej or", + "b as", + "so lo", + "pre gun", + "tu r", + "al g", + "p la", + "to das", + "par te", + "e mb", + "c to", + "mun do", + "tien e", + "tan te", + "pa lab", + "tr an", + "aque lla", + "ci os", + "aun que", + "a y", + "cu en", + "ten er", + "f un", + "res pon", + "all í", + "x i", + "h an", + "pen s", + "con tra", + "tu ra", + "v al", + "di o", + "tr es", + "t re", + "tan to", + "ca min", + "m ó", + "es p", + "a da", + "í o", + "in s", + "ha cia", + "de j", + "est ar", + "i ón", + "g as", + "b er", + "v as", + "no che", + "é r", + "añ os", + "pa dre", + "gu s", + "á r", + "sin o", + "man os", + "ci do", + "es tu", + "a de", + "hu bi", + "vi r", + "b ri", + "ra z", + "ch i", + "pue de", + "men os", + "hab i", + "ho mb", + "ne ces", + "ma y", + "er os", + "r ía", + "he cho", + "es cu", + "l ti", + "án do", + "b us", + "cos as", + "t ú", + "es pa", + "re ci", + "c tor", + "pri m", + "di a", + "de se", + "mien tras", + "h or", + "fu er", + "i da", + "pos i", + "lan te", + "t on", + "an o", + "est as", + "p li", + "ch ar", + "lu ego", + "si ón", + "ci n", + "ti erra", + "m es", + "gu ar", + "ca do", + "en con", + "pr en", + "may or", + "f al", + "e r", + "o n", + "a n", + "t o", + "d i", + "r e", + "l a", + "i n", + "e n", + "a l", + "t a", + "c h", + "e l", + "r i", + "c o", + "t i", + "t e", + "s i", + "r a", + "u n", + "l e", + "l i", + "ch e", + "r o", + "c i", + "c a", + "s e", + "q u", + "m a", + "p o", + "s o", + "i l", + "d o", + "e s", + "v a", + "p er", + "l o", + "c on", + "d el", + "p a", + "m o", + "s a", + "p i", + "d a", + "m i", + "g i", + "s u", + "d e", + "v i", + "z i", + "m e", + "g li", + "n o", + "m en", + "v o", + "t u", + "n on", + "v e", + "t to", + "s t", + "on e", + "an o", + "ch i", + "er a", + "er e", + "f a", + "c e", + "z a", + "un a", + "b i", + "p re", + "s ta", + "o r", + "a r", + "f i", + "on o", + "t ra", + "n a", + "n el", + "n e", + "p ro", + "t ro", + "al e", + "v er", + "n i", + "c u", + "t ti", + "men te", + "del la", + "t er", + "zi one", + "g u", + "p e", + "t ta", + "an do", + "t à", + "al i", + "u o", + "qu el", + "co m", + "s en", + "co me", + "b a", + "al la", + "p ri", + "d u", + "qu es", + "l u", + "on i", + "g gi", + "pa r", + "s si", + "v en", + "in a", + "g a", + "pi ù", + "ci a", + "i m", + "co r", + "m an", + "in o", + "in i", + "t en", + "r an", + "b b", + "g o", + "s to", + "t re", + "a ve", + "a v", + "s ono", + "er i", + "a c", + "s se", + "er o", + "h a", + "s c", + "su l", + "f or", + "v ano", + "po r", + "s ti", + "su o", + "c chi", + "t an", + "z za", + "an che", + "p u", + "i o", + "t te", + "vo l", + "es s", + "s ci", + "co l", + "r u", + "p en", + "f u", + "al l", + "s so", + "s te", + "se m", + "s sa", + "d en", + "a d", + "t ri", + "de i", + "in e", + "ave va", + "men to", + "z z", + "a mo", + "g no", + "f o", + "un o", + "su a", + "g en", + "ri a", + "g e", + "st ra", + "s ì", + "c er", + "ch é", + "b u", + "a p", + "c en", + "d al", + "on a", + "s pe", + "g ni", + "b o", + "t t", + "del le", + "ques to", + "nel la", + "f f", + "d ere", + "an no", + "del l", + "un i", + "bb e", + "an ti", + "g ra", + "s p", + "en e", + "gi o", + "u to", + "qu al", + "gli a", + "qu ando", + "tu tto", + "c an", + "gli o", + "zi oni", + "ca m", + "h o", + "es so", + "s s", + "mo l", + "a t", + "lo ro", + "per ché", + "co sa", + "du e", + "po i", + "ca r", + "s co", + "ci o", + "to r", + "c co", + "c re", + "a m", + "g na", + "te m", + "pri ma", + "lu i", + "co sì", + "qu e", + "gu ar", + "ess ere", + "an i", + "con o", + "b ra", + "al le", + "m on", + "ri o", + "an co", + "cu i", + "s pi", + "vi a", + "g ran", + "gi or", + "a i", + "bi le", + "u l", + "ggi o", + "f e", + "an te", + "ma i", + "ta re", + "in ter", + "in di", + "re bbe", + "sen za", + "so lo", + "zi o", + "e d", + "en te", + "tu tti", + "sta to", + "zi a", + "d alla", + "tu ra", + "mi a", + "vi ta", + "quel la", + "qu a", + "ma r", + "do ve", + "g h", + "al lo", + "sem pre", + "zz o", + "si a", + "mo r", + "do po", + "por ta", + "d re", + "c cia", + "er ano", + "an ni", + "di o", + "chi a", + "en za", + "pro pri", + "qu i", + "m u", + "m b", + "an da", + "c ca", + "o cchi", + "ques ta", + "f fi", + "le i", + "par te", + "d on", + "r on", + "mi o", + "tan to", + "ri s", + "o gni", + "di s", + "r in", + "fa r", + "men ti", + "t el", + "anco ra", + "f ra", + "fa tto", + "man i", + "sen ti", + "p ra", + "tem po", + "es si", + "b bi", + "f in", + "a re", + "la re", + "per s", + "f on", + "b el", + "so r", + "d er", + "pre n", + "an za", + "di re", + "pi e", + "o ra", + "ver so", + "se gu", + "al tro", + "ta to", + "ca to", + "a to", + "vol ta", + "c c", + "fa re", + "pa re", + "ci ò", + "li b", + "bi li", + "n uo", + "s er", + "quel lo", + "co lo", + "p po", + "ca sa", + "tro va", + "o re", + "f er", + "r ono", + "d es", + "mol to", + "al mente", + "s ca", + "vo le", + "t ali", + "sul la", + "s ce", + "men o", + "an to", + "p un", + "s tu", + "ca pi", + "so l", + "gi u", + "m ini", + "m ano", + "z e", + "pi a", + "par ti", + "s al", + "la vo", + "ver o", + "r si", + "al tri", + "es ti", + "s cia", + "suo i", + "gli e", + "so tto", + "b ene", + "sc ri", + "t ale", + "de gli", + "n u", + "al c", + "uo mo", + "p el", + "f re", + "po te", + "es sa", + "s cu", + "si gno", + "el e", + "st ro", + "u ti", + "di a", + "si one", + "g re", + "f ini", + "ar ri", + "l un", + "c ri", + "e si", + "pa ssa", + "r à", + "men tre", + "an d", + "h anno", + "el o", + "u sci", + "gi a", + "gi à", + "di e", + "m ina", + "b e", + "ti ca", + "gior no", + "t in", + "es se", + "mo do", + "c al", + "s pa", + "propri o", + "l en", + "o ri", + "con tro", + "st ru", + "di ven", + "di sse", + "ra to", + "no i", + "v ere", + "pu ò", + "di ce", + "s an", + "es a", + "c ci", + "se con", + "re n", + "c cio", + "qual che", + "tu tta", + "g g", + "mon do", + "for ma", + "p li", + "m ma", + "pen sa", + "de va", + "tu r", + "fo sse", + "so pra", + "ta mente", + "n ess", + "qu anto", + "ra ga", + "un que", + "ca re", + "st re", + "gran de", + "pi cco", + "guar da", + "b en", + "nel l", + "a ff", + "po ssi", + "pre sen", + "r ò", + "pa ro", + "tu a", + "v in", + "an e", + "a s", + "ste sso", + "da v", + "ne i", + "nel le", + "gh i", + "pi o", + "ta r", + "an a", + "la to", + "si d", + "f ine", + "f uo", + "m er", + "z o", + "qua si", + "ul ti", + "i to", + "su e", + "si e", + "f il", + "allo ra", + "m in", + "ven i", + "t ano", + "el lo", + "d e", + "r a", + "e s", + "d o", + "e n", + "q u", + "c o", + "a s", + "o s", + "e r", + "a r", + "s e", + "qu e", + "a n", + "i n", + "i s", + "t o", + "ã o", + "t e", + "d a", + "m a", + "e l", + "t a", + "o r", + "i a", + "r e", + "e m", + "a l", + "co m", + "p a", + "o u", + "c a", + "u m", + "r o", + "v a", + "t i", + "s o", + "m en", + "n ão", + "h a", + "co n", + "m e", + "r i", + "pa ra", + "p o", + "d i", + "s a", + "v o", + "u ma", + "c i", + "n a", + "p or", + "n o", + "g u", + "s u", + "h o", + "an do", + "t ra", + "e i", + "v i", + "e u", + "i m", + "do s", + "el e", + "r es", + "m o", + "en t", + "f i", + "l a", + "e ra", + "l e", + "de s", + "el a", + "men te", + "l h", + "p er", + "l i", + "ç ão", + "m as", + "t er", + "m u", + "es t", + "v e", + "g o", + "l o", + "u s", + "ma is", + "v er", + "c ê", + "in ha", + "vo cê", + "f a", + "t u", + "c u", + "p ar", + "com o", + "p ro", + "s i", + "m os", + "e c", + "p re", + "d as", + "ç a", + "es ta", + "s er", + "u n", + "da de", + "d is", + "f o", + "e x", + "c h", + "i r", + "ra n", + "t ar", + "en te", + "g a", + "t r", + "p e", + "t os", + "b o", + "c ia", + "p en", + "c ar", + "s en", + "su a", + "se m", + "c as", + "f or", + "to u", + "n os", + "te m", + "r ia", + "m es", + "se u", + "co r", + "o n", + "a o", + "p os", + "ra m", + "v el", + "é m", + "t en", + "po de", + "t es", + "esta va", + "c e", + "b a", + "qu ando", + "m i", + "qu er", + "men to", + "se gu", + "t as", + "is so", + "mu i", + "g ar", + "t ro", + "d u", + "fa z", + "õ es", + "p es", + "an to", + "l u", + "p i", + "i x", + "ve z", + "s im", + "j a", + "p r", + "m in", + "b e", + "ra s", + "m an", + "p res", + "est á", + "c er", + "b re", + "p as", + "d ia", + "m b", + "dis se", + "n i", + "r os", + "es se", + "v ia", + "o lh", + "is a", + "an te", + "ê n", + "z a", + "qu i", + "b i", + "t inha", + "me u", + "s ão", + "m inha", + "a c", + "ri o", + "m ar", + "a t", + "p el", + "mui to", + "ta l", + "to r", + "fo i", + "h or", + "j o", + "b em", + "g i", + "f al", + "vo l", + "po n", + "di z", + "l ar", + "gu n", + "m or", + "r u", + "par ec", + "ç o", + "do r", + "pes so", + "n e", + "f er", + "b er", + "p u", + "po is", + "in a", + "es p", + "d ar", + "en do", + "de n", + "so bre", + "co s", + "p ri", + "al i", + "mes mo", + "ç ões", + "g ra", + "se us", + "me i", + "b ra", + "vi da", + "an tes", + "b ri", + "at é", + "ên cia", + "lh e", + "ti v", + "m ã", + "al g", + "qu anto", + "s ó", + "g os", + "de r", + "t ão", + "tu do", + "ent ão", + "r ou", + "es s", + "in da", + "b al", + "in do", + "ci o", + "n do", + "j á", + "va m", + "re i", + "l es", + "ei to", + "v is", + "tem po", + "de pois", + "c ha", + "m el", + "ch e", + "l ha", + "a inda", + "faz er", + "con tra", + "p ou", + "per gun", + "de ix", + "ta mb", + "ra r", + "al a", + "v en", + "t in", + "pel o", + "tamb ém", + "fi ca", + "pre c", + "el es", + "tra n", + "ha via", + "l á", + "to dos", + "j u", + "qu al", + "c an", + "ta do", + "cas a", + "es sa", + "n as", + "g em", + "m em", + "se i", + "na da", + "sen ti", + "c ri", + "ó s", + "de u", + "ei ro", + ". .", + "f un", + "as sim", + "s ou", + "ent re", + "com e", + "i or", + "h ar", + "f e", + "por que", + "s or", + "f in", + "ta mente", + "a qui", + "cu l", + "t ó", + "for ma", + "s ar", + "ou tra", + "olh os", + "i ma", + "m im", + "a go", + "in s", + "co u", + "g ran", + "v al", + "pesso as", + "era m", + "ei ra", + "a que", + "com p", + "de i", + "p ela", + "co isa", + "m ão", + "con h", + "ca da", + "ago ra", + "ia m", + "h á", + "con s", + "su as", + "gu ém", + "o b", + "l an", + "es ti", + "á s", + "la do", + "in ter", + "ca be", + "por ta", + "n em", + "í vel", + "r is", + "j e", + "n un", + "sem pre", + "con segu", + "h as", + "tra bal", + "f u", + "le v", + "l em", + "l as", + "va i", + "tr os", + "t ante", + "te i", + "pr ó", + "que m", + "tu ra", + "on de", + "cabe ça", + "nun ca", + "men tos", + "h um", + "de le", + "ver dade", + "t á", + "h os", + "el i", + "ent es", + "m er", + "alg um", + "diz er", + "s in", + "pen as", + "n ós", + "en quanto", + "ou tro", + "l ho", + "es te", + "mel hor", + "est ar", + "g an", + "b ar", + "pri mei", + "a u", + "i u", + "pen sa", + "a penas", + "p ra", + "es tou", + "con te", + "res pon", + "ho mem", + "do is", + "a do", + "c al", + "a b", + "l os", + "ç as", + "pou co", + "sen hor", + "t ando", + "esp era", + "pa i", + "ri os", + "no i", + "i da", + "ba ix", + "as e", + "is as", + "f r", + "ho ra", + "mu ndo", + "pas sa", + "fi car", + "to do", + "se ja", + "al mente", + "â n", + "c lar", + "a d", + "in c", + "f os", + "lo n", + "g ri", + "ou vi", + "v em", + "g e", + "ta va", + "á rio", + "mo n", + "s os", + "in ho", + "ma l", + "t an", + "t re", + "gran de", + "ran do", + "b u", + "v ou", + "ê s", + "co isas", + "a conte", + "lh er", + "g en", + "ci on", + "an os", + "i do", + "tal vez", + "est ão", + "li v", + "sa b", + "su r", + "ou tros", + "c re", + "qual quer", + "g ou", + "t ri", + "l í", + "tiv esse", + "ra do", + "prec isa", + "mã e", + "su s", + "t anto", + "de la", + "men os", + "s al", + "en tra", + "p é", + "ma ior", + "noi te", + "ti va", + "p ala", + "so n", + "ra ção", + "de us", + "s as", + "un i", + "l or", + "u l", + "in te", + "f ei", + "an o", + "par ti", + "pala v", + "tr ás", + "par te", + "b el", + "ci dade", + "lu gar", + "v os", + "vez es", + "do u", + "en contra", + "tr u", + "e ci", + "a r", + "e r", + "a n", + "e n", + "i n", + "i r", + "o r", + "d e", + "a k", + "ı n", + "a l", + "d i", + "d a", + "b u", + "b ir", + "y or", + "i l", + "e k", + "y a", + "m a", + "l a", + "e l", + "u n", + "k a", + "l ar", + "i m", + "d ı", + "e t", + "o n", + "d u", + "o l", + "e y", + "t ı", + "m i", + "h a", + "b a", + "l er", + "ü n", + "m ı", + "i z", + "l e", + "ı r", + "m e", + "i s", + "n e", + "o k", + "t a", + "s a", + "u m", + "r a", + "g ö", + "i k", + "s ı", + "d en", + "e s", + "b il", + "t i", + "l ı", + "ü z", + "i ç", + "ü r", + "g i", + "u r", + "t e", + "b en", + "d an", + "i y", + "ı m", + "u z", + "v e", + "c ak", + "a y", + "c e", + "i ş", + "ın ı", + "i yor", + "ba ş", + "d ü", + "a t", + "a m", + "g el", + "de ğ", + "k ar", + "i ̇", + "m u", + "e v", + "ö y", + "bu n", + "v ar", + "ya p", + "s en", + "an a", + "s un", + "in i", + "gö r", + "y ı", + "k i", + "l i", + "ar a", + "al ı", + "on u", + "ç ı", + "ş ey", + "s ın", + "k ı", + "ka d", + "s e", + "t an", + "a ğ", + "değ il", + "s in", + "ü k", + "a z", + "ç ok", + "s on", + "ş ı", + "b i", + "ü l", + "t u", + "v er", + "iç in", + "g e", + "k en", + "ey e", + "ol du", + "mı ş", + "y e", + "k al", + "m ek", + "l an", + "öy le", + "yor du", + "er i", + "y üz", + "mi ş", + "b e", + "m ak", + "o la", + "in e", + "y an", + "h er", + "c ek", + "yor um", + "b ak", + "ü m", + "ö n", + "lar ı", + "o ğ", + "d er", + "kad ar", + "h al", + "ar ı", + "s t", + "s an", + "ın da", + "du r", + "g ün", + "v a", + "y ok", + "y er", + "dı m", + "k o", + "da ha", + "l u", + "ın a", + "di m", + "e m", + "bil ir", + "ik i", + "s iz", + "s i", + "n a", + "di ğ", + "s u", + "b ü", + "ha y", + "s or", + "dü ş", + "ü ç", + "un u", + "ö r", + "d ir", + "m ü", + "c a", + "am an", + "f ak", + "a da", + "e de", + "son ra", + "h iç", + "ak i", + "ğ ı", + "bu l", + "r u", + "ma z", + "an la", + "bu ra", + "ge ç", + "ma ya", + "l en", + "k onu", + "c i", + "c u", + "d in", + "t ek", + "z aman", + "el er", + "ö z", + "dı r", + "gi bi", + "o t", + "ş a", + "g er", + "ler i", + "k im", + "k u", + "fak at", + "y ar", + "gö z", + "c ı", + "yor sun", + "b ek", + "in de", + "r o", + "p ek", + "bun u", + "l ik", + "m an", + "il er", + "e di", + "ö l", + "s ür", + "b in", + "s ır", + "çı k", + "sı l", + "al ar", + "k es", + "y ak", + "ç ek", + "yı l", + "e cek", + "ı z", + "gi t", + "ka p", + "a ma", + "ı l", + "lar ın", + "b iz", + "tı r", + "o y", + "an cak", + "d oğ", + "ç a", + "b ana", + "ş im", + "baş la", + "l ü", + "ma dı", + "ben i", + "t ir", + "y ük", + "lı k", + "be ş", + "b el", + "b er", + "m er", + "na sıl", + "tı k", + "k e", + "t ür", + "a v", + ". .", + "d aki", + "p ar", + "t er", + "ce ğ", + "t en", + "z ı", + "iy i", + "d ok", + "ben im", + "c ağ", + "n er", + "y en", + "ş u", + "me z", + "düş ün", + "ken di", + "şim di", + "y ol", + "y u", + "de v", + "is te", + "s ek", + "ma m", + "s öyle", + "di k", + "t o", + "k ur", + "oldu ğ", + "s ını", + "t ar", + "bil iyor", + "k an", + "y al", + "m eye", + "mu ş", + "f a", + "ka ç", + "bil e", + "iy e", + "t ü", + "e f", + "tı m", + "ev et", + "ç o", + "y et", + "g en", + "bura da", + "t im", + "bir az", + "es i", + "k or", + "doğ ru", + "in in", + "kı z", + "di ye", + "d ör", + "et ti", + "on un", + "is ti", + "ğ i", + "h e", + "s ana", + "ü ş", + "ar ka", + "hay ır", + "kar şı", + "h ar", + "il e", + "h ak", + "ı yor", + "ne den", + "s ev", + "sı z", + "ço cu", + "me m", + "ç alı", + "ol ur", + "b ır", + "g ir", + "is e", + "i h", + "c an", + "k ır", + "d ön", + "b öyle", + "sen i", + "! \"", + "al t", + "dör t", + "s öy", + "o ş", + "mu sun", + "la ş", + "h an", + "i p", + "ka y", + "h em", + "bü yük", + "a ç", + "bır ak", + "mi sin", + "s öz", + "u l", + "değ iş", + "ün ü", + "g ül", + "k ö", + "kar ı", + "ta mam", + "ol u", + "r ar", + "yen i", + "la m", + "mış tı", + "ya ş", + "al a", + "in iz", + "kad ın", + "bun un", + "m ey", + "al tı", + "y i", + "s o", + "in den", + "sen in", + "ya t", + "to p", + "s er", + "is i", + "d ün", + "s es", + "hiç bir", + "y on", + "d ın", + "t ün", + "baş ka", + "a s", + "he p", + "i t", + "ir mi", + "dev am", + "ola cak", + "ar tık", + "r e", + "dur um", + "im iz", + "üz el", + "ler ini", + "sa ğ", + "p ro", + "ger ek", + "y irmi", + "ş ek", + "ba ğ", + "me di", + "lar a", + "a h", + "t ur", + "y ür", + "ma sı", + "ka tı", + "de di", + "g ü", + "sor un", + "el i", + "ün e", + "mı z", + "yap ı", + "m il", + "ğ ını", + "t ara", + "m en", + "ha t", + "var dı", + "m et", + "konu ş", + "ar ak", + "lar ak", + "çocu k", + "bü tün", + "l ey", + "d ür", + "g üzel", + "ay ı", + "yap a", + "n ı", + "ay r", + "ö ne", + "yordu m", + "b an", + "i̇ ş", + "du m", + "un a", + "on a", + "yor lar", + "lar ını", + "çı kar", + "z an", + "se ç", + "l iyor", + "t ak", + "şı k", + "tek rar", + "a ş", + "e ş", + "miş ti", + "f ar", + "k in", + "im i", + "i f", + "e ğ", + "gi di", + "le ş", + "başla dı", + "gi de", + "ot ur", + "d de", + "ın dan", + "üz er", + "ın ın", + "n ız", + "u y", + "ye di", + "ka t", + "o larak", + "la dı", + "yal nız", + "ba h", + "iy et", + "m al", + "s ak", + "a çık", + "sın da", + ".. .", + "in san", + "ay nı", + "e der", + "is tan", + "uz un", + "sa h", + "d o", + "g eri", + "er ek", + "ol an", + "ger çek", + "f en", + "al an", + "dı ş", + "alı k", + "far k", + "ü st", + "sa de", + "r i", + "k iş", + "l dı", + "z or", + "et ir", + "her kes", + "s al", + "ö mer", + "s el", + "un da", + "ha f", + "bun a", + "y dı", + "pek i", + "ada m", + "ha z", + "sın a", + "kap ı", + "gör üş", + "sade ce", + "al dı", + "gel di", + "i e", + "n ie", + "n a", + "r z", + "s z", + "c z", + "p o", + "s t", + "c h", + "i ę", + "d z", + "n i", + "a ł", + "r a", + "j e", + "r o", + "d o", + "s ię", + "z a", + "g o", + "e m", + "w i", + "c i", + "rz e", + "k o", + "l e", + "l i", + "w a", + "t o", + "k a", + "m i", + "ż e", + "t a", + "w ie", + "b y", + "m o", + "w y", + "rz y", + "ł a", + "j a", + "n o", + "ł o", + "w o", + "p a", + "m a", + "t e", + "t y", + "n y", + "k i", + "d a", + "n e", + "dz ie", + "dz i", + "cz y", + "c ie", + "m y", + "p rze", + "d y", + "o d", + "l a", + "k ie", + "r y", + "st a", + "j ą", + "ó w", + "c e", + "p rzy", + "c o", + "k u", + "m ie", + "sz y", + "cz e", + "r e", + "b a", + "s i", + "b ie", + "m u", + "w e", + "c y", + "ni a", + "ś ci", + "sz e", + "je st", + "k t", + "s a", + "b o", + "t u", + "ż y", + "n ą", + "b i", + "r u", + "a le", + "kt ó", + "p ra", + "ał a", + "m nie", + "p ie", + "ł y", + "cz a", + "ja k", + "ro z", + "r ó", + "l u", + "z na", + "g a", + "ra z", + "ł u", + "ta k", + "j u", + "p i", + "ś ć", + "s o", + "wi a", + "m ó", + "ch o", + "w szy", + "p e", + "s po", + "c a", + "g dy", + "w ał", + "w ię", + "d e", + "b e", + "p ro", + "ł em", + "j ę", + "s k", + "z e", + "l o", + "g i", + "r ę", + "do b", + "d u", + "ju ż", + "st o", + "b ę", + "ał em", + "sz a", + "m e", + "po d", + "d la", + "pa n", + "n ę", + "z o", + "mo że", + "ś li", + "s ie", + "ał o", + "t em", + "l ko", + "ny ch", + "po wie", + "c ię", + "s u", + "ty lko", + "i n", + "b u", + "na j", + "ch a", + "te go", + "p u", + "s ki", + "ne go", + "wszy st", + "sz cze", + "je d", + "je j", + "t wo", + "ą d", + "ś my", + "cz ę", + "wa ć", + "je go", + "ż a", + "i m", + "s y", + "pra w", + "ty m", + "któ ry", + "ał y", + "t rze", + "nie j", + "s e", + "ny m", + "i ch", + "o b", + ". .", + "g ło", + "ją c", + "mó wi", + "s ka", + "o n", + "ne j", + "s łu", + "w ła", + "bę dzie", + "d ę", + "p ó", + "be z", + "ni c", + "p ła", + "ś cie", + "mi a", + "s ą", + "t rzy", + "kie m", + "by ł", + "mo g", + "ro bi", + "ta m", + "c u", + "te n", + "m ię", + "z y", + "pe w", + "ci a", + "my ś", + "prze d", + "s ko", + "n u", + "któ re", + "a l", + "l ę", + "w sze", + "ą c", + "by ło", + "so bie", + "p y", + "ci ą", + "ba r", + "je szcze", + "h a", + "t ę", + "b ra", + "cza s", + "sz ę", + "g ł", + "k ę", + "ma r", + "cz u", + "prze z", + "f i", + "s ło", + "w z", + "k to", + "k ów", + "cz o", + "li śmy", + "st ra", + "wię c", + "r ą", + "ma m", + "w ó", + "rz a", + "g ro", + "no ści", + "f a", + "we t", + "ną ł", + "ś mie", + "na wet", + "mu si", + "s wo", + "te j", + "w ą", + "w u", + "wi ą", + "ni u", + "cz ą", + "b li", + "dz o", + "s kie", + "n em", + "je śli", + "cze go", + "ch y", + "d ł", + "ty ch", + "by m", + "ż o", + "e ś", + "si ą", + "kie dy", + "na s", + "w ró", + "dz e", + "d ro", + "t ra", + "r ów", + "pa ni", + "z ie", + "ku l", + "na d", + "ch wi", + "ni m", + "t ro", + "by ć", + "cho dzi", + "ni o", + "dob rze", + "te raz", + "wo kul", + "co ś", + "k ł", + "pie r", + "h e", + "g dzie", + "dz y", + "p ię", + "d ź", + "k ą", + "g ó", + "z da", + "ch ce", + "st ę", + "o r", + "ś wia", + "wszyst ko", + "st ro", + "pe ł", + "wie m", + "wie l", + "ka ż", + "ki m", + "rz u", + "s ły", + "jed na", + "z u", + "myś l", + "mó j", + "g u", + "wa r", + "jest em", + "ó ż", + "mie j", + "mo ż", + "k ła", + "re sz", + "d łu", + "st wo", + "n ię", + "ma sz", + "że by", + "nie m", + "ja kie", + "st y", + "ni ą", + "we j", + "o j", + "g ra", + "s ła", + "no ść", + "z ło", + "sz czę", + ".. .", + "r i", + "le j", + "we go", + "c ał", + "dzi ał", + "ki ch", + "dz a", + "dz ię", + "o czy", + "zo sta", + "cz ło", + "na m", + "ki l", + "o na", + "sz u", + "w ę", + "pa r", + "mi ał", + "st rze", + "ce j", + "e j", + "zna j", + "da ć", + "miej s", + "k ró", + "k ry", + "bar dzo", + "si a", + "z i", + "ś nie", + "l ą", + "g ie", + "cie bie", + "d ni", + "st u", + "po trze", + "wokul ski", + "u wa", + "u mie", + "jedna k", + "k ra", + "wró ci", + "czło wie", + "czy ć", + "by ła", + "że li", + "m ę", + "c ę", + "z robi", + "mog ę", + "pro wa", + "r em", + "nie ch", + "cz nie", + "k ro", + "t ą", + "ch ci", + "b ro", + "dzie ć", + "sz ą", + "pa d", + "t rz", + "t ru", + "je m", + "a ni", + "t ów", + "a r", + "d ru", + "ta j", + "rze kł", + "sa m", + "st e", + "nie go", + "ta kie", + "w ała", + "to wa", + "ka pła", + "wi dzi", + "po dob", + "dz ę", + "t ał", + "stę p", + "b ą", + "po ko", + "w em", + "g ę", + "a by", + "g e", + "al bo", + "s pra", + "z no", + "de n", + "s mo", + "je sz", + "k się", + "jest eś", + "po z", + "ni gdy", + "k sią", + "c óż", + "w s", + "po w", + "t ka", + "ś wie", + "sz ka", + "sa mo", + "s ł", + "rz ę", + "na le", + "chce sz", + "ni k", + "p ę", + "chy ba", + "cią g", + "ją cy", + "wo j", + "na sze", + "mnie j", + "wię cej", + "z wy", + "o sta", + "f e", + "wa ż", + "h o", + "se r", + "śmie r", + "wie r", + "dz ą", + "za ś", + "gdy by", + "ja ki", + "wo l", + "wi n", + "d ą", + "ści a", + "roz ma", + "wa l", + "pa nie", + "sta r", + "ka z", + "je żeli", + "d em", + "w ra", + "ko ń", + "sie bie", + "zno wu", + "p ró", + "cz em", + "st wa", + "i sto", + "pó ł", + "d ał", + "ko bie", + "ała m", + "wy ch", + "ce sa", + "ni ch", + "za wsze", + "dzi ć", + "te ż", + "le pie", + "pro szę", + "k re", + "t wa", + "o t", + "ł ą", + "ch u", + "c ą", + "p rz", + "ł e", + "sze dł", + "od powie", + "my śli", + "ś wią", + "e n", + "e r", + "d e", + "a n", + "e t", + "i j", + "i n", + "e l", + "a a", + "s t", + "o r", + "g e", + "i s", + "a t", + "i e", + "c h", + "o n", + "e en", + "h et", + "i t", + "v er", + "aa r", + "a l", + "o or", + "g en", + "v an", + "o p", + "d en", + "h e", + "o m", + "t e", + "w e", + "i k", + "r e", + "z e", + "ij n", + "d at", + "b e", + "d er", + "in g", + "o e", + "ij k", + "a an", + "ch t", + "v oor", + "l e", + "i et", + "r o", + "m o", + "k en", + "z ijn", + "m en", + "i g", + "j e", + "n iet", + "a r", + "o o", + "i d", + "u n", + "i l", + "s ch", + "mo et", + "st e", + "u r", + "o l", + "he b", + "u it", + "g el", + "w ij", + "a s", + "m e", + "t en", + "w or", + "o u", + "v en", + "l en", + "aa t", + "d it", + "m et", + "r a", + "b en", + "s p", + "o ver", + "d ie", + "n o", + "w er", + "l ijk", + "f t", + "s l", + "an d", + "v e", + "t er", + "i er", + "i en", + "t o", + "d aar", + "g r", + "b el", + "de ze", + "d u", + "a g", + "k an", + "wor den", + "in gen", + "moet en", + "n en", + "on der", + "heb ben", + "r u", + "oo k", + "s en", + "c t", + "k t", + "no g", + "aa l", + "w as", + "u l", + "e er", + "b ij", + "m ijn", + "p ro", + "v ol", + "d o", + "k om", + "at ie", + "e ft", + "k el", + "al s", + "r ij", + "he id", + "a f", + "st el", + "m aar", + "a p", + "we e", + "a d", + "he eft", + "w aar", + "i cht", + "d an", + "er en", + "n e", + "w el", + "w at", + "w il", + "a cht", + "aa g", + "ge b", + "c on", + "z o", + "k e", + "b et", + "h ij", + "d ig", + "k un", + "u w", + "d t", + "d oor", + "t ij", + "a m", + "an g", + "on d", + "er s", + "is ch", + "ge en", + "i ge", + "ge v", + "ve el", + "n u", + "m a", + "on s", + "o f", + "b l", + "n aar", + "g ro", + "p l", + "an der", + "at en", + "kun nen", + "e cht", + "h ier", + "g oe", + "an t", + "u s", + "t wee", + "on t", + "de lijk", + "el e", + "u ur", + "al le", + "t oe", + "me er", + "i st", + "n a", + "n ie", + "on ze", + "l o", + "i m", + "p en", + "h ad", + "tij d", + "h oe", + "to t", + "z ou", + "a k", + "aa k", + "a men", + "d r", + "w oor", + "s e", + "wor dt", + "o t", + "gel ijk", + "g aan", + "i c", + "g er", + "k er", + "el d", + "e m", + "h ou", + "de l", + "z en", + "z el", + "te gen", + "b o", + "kom en", + "c om", + "i gen", + "e it", + "wer k", + "goe d", + "z al", + "z ij", + "sl ag", + "e s", + "z ien", + "a st", + "echt er", + "it ie", + "t ie", + "el ijk", + "m is", + "isch e", + "bel an", + "h aar", + "i ch", + "b er", + "h an", + "v r", + "al e", + "c i", + "gr ijk", + "in d", + "do en", + "l and", + "belan grijk", + "p un", + "op en", + "ct ie", + "zel f", + "m ij", + "it eit", + "ste m", + "me e", + "ar en", + "al l", + "b r", + "re cht", + "d ien", + "h u", + "g aat", + "pro b", + "m oe", + "p er", + "a u", + "ul len", + "z ich", + "daar om", + "or m", + "k l", + "v o", + "en t", + "st aat", + "z it", + "du i", + "n at", + "du s", + "d s", + "ver slag", + "kel ijk", + "prob le", + "w et", + "ge m", + "c r", + "i on", + "p r", + "sch ap", + "g d", + "h un", + "z a", + "er d", + "z et", + "st aan", + "st r", + "m aal", + "in der", + "e id", + "st en", + "p ar", + "k ken", + "ge d", + "z ullen", + "re s", + "men sen", + "j aar", + "re gel", + "ie der", + "vol gen", + "ge ven", + "e ven", + "l u", + "bl ij", + "i ë", + "k o", + "u we", + "m an", + "ma ken", + "l ie", + "g a", + "oe k", + "nie uwe", + "b aar", + "h o", + "h er", + "in ter", + "ander e", + "ru ik", + "s u", + "a gen", + "or t", + "m er", + "ou w", + "st er", + "wil len", + "aa kt", + "h oo", + "an den", + "f f", + "l ig", + "t re", + "s amen", + "ze er", + "dui delijk", + "ant woor", + "he el", + "men t", + "pun t", + "hou den", + "we g", + "vr aag", + "gel e", + "een s", + "be sch", + "om en", + "er g", + "do el", + "d ag", + "sp e", + "ur en", + "ing s", + "or en", + "l ang", + "de len", + "m ar", + "ste un", + "in nen", + "p ol", + "o on", + "i de", + "s n", + "s ie", + "r icht", + "z onder", + "no dig", + "all een", + "m id", + "ra gen", + "iet s", + "ver sch", + "geb ruik", + "st u", + "ro uw", + "stel len", + "be g", + "men ten", + "v in", + "eer ste", + "l aat", + "gro ot", + "oo d", + "to ch", + "l aten", + "aar d", + "s le", + "de el", + "st and", + "pl aat", + "re e", + "bet re", + "d i", + "l id", + "uit en", + "ra cht", + "bel eid", + "g et", + "ar t", + "st ie", + "st aten", + "g gen", + "re ken", + "e in", + "al en", + "m ing", + "mo gelijk", + "gro te", + "al tijd", + "z or", + "en kel", + "w ik", + "pol itie", + "e igen", + "el k", + "han del", + "g t", + "k we", + "m aat", + "el en", + "i p", + "v rij", + "s om", + "je s", + "aa m", + "hu is", + "v al", + "we er", + "lid staten", + "k ing", + "k le", + "be d", + "gev al", + "stel l", + "a i", + "wik kel", + "kwe stie", + "t al", + "ste e", + "a b", + "h el", + "kom st", + "p as", + "s s", + "it u", + "i den", + "eer d", + "m in", + "c e", + "p o", + "twee de", + "proble em", + "w aren", + "us sen", + "sn el", + "t ig", + "ge w", + "j u", + "ul t", + "ne men", + "com mis", + "versch il", + "k on", + "z oek", + "k rij", + "gr aag", + "den k", + "l anden", + "re den", + "be sl", + "oe g", + "bet er", + "he den", + "m ag", + "p e", + "bo ven", + "a c", + "con t", + "f d", + "h ele", + "k r", + "v ier", + "w in", + "ge z", + "k w", + "m il", + "v or", + "he m", + "ra m", + "aa s", + "ont wikkel", + "dr ie", + "v aak", + "plaat s", + "l a", + "g ang", + "ij f", + "f in", + "nat uur", + "t ussen", + "u g", + "in e", + "d a", + "b at", + "kom t", + "w acht", + "aa d", + "u t", + "é n", + "acht er", + "geb ie", + "ver k", + "lig t", + "c es", + "nie uw", + "van d", + "s t", + "n í", + "j e", + "p o", + "c h", + "r o", + "n a", + "s e", + "t o", + "n e", + "l e", + "k o", + "l a", + "d o", + "r a", + "n o", + "t e", + "h o", + "n ě", + "v a", + "l i", + "l o", + "ř e", + "c e", + "d e", + "v e", + "b y", + "n i", + "s k", + "t a", + "n á", + "z a", + "p ro", + "v o", + "v ě", + "m e", + "v á", + "s o", + "k a", + "r á", + "v y", + "z e", + "m i", + "p a", + "t i", + "st a", + "m ě", + "n é", + "ř i", + "ř í", + "m o", + "ž e", + "m a", + "j í", + "v ý", + "j i", + "d ě", + "r e", + "d a", + "k u", + "j a", + "c i", + "r u", + "č e", + "o b", + "t ě", + "m u", + "k y", + "d i", + "š e", + "k é", + "š í", + "t u", + "v i", + "p ře", + "v í", + "s i", + "n ý", + "o d", + "so u", + "v é", + "n y", + "r i", + "d y", + "b u", + "b o", + "t y", + "l á", + "l u", + "n u", + "ž i", + "m á", + "st i", + "c í", + "z á", + "p ra", + "sk é", + "m í", + "c o", + "d u", + "d á", + "by l", + "st o", + "s a", + "t í", + "je d", + "p ří", + "p ři", + "t é", + "s í", + "č i", + "v ní", + "č a", + "d í", + "z i", + "st u", + "p e", + "b a", + "d ní", + "ro z", + "va l", + "l í", + "s po", + "k á", + "b e", + "p i", + "no u", + "ta k", + "st e", + "r y", + "l é", + "vě t", + "se m", + "p ě", + "ko n", + "ne j", + "l y", + "ko u", + "ý ch", + "b ě", + "p r", + "f i", + "p rá", + "a le", + "ja ko", + "po d", + "ž í", + "z í", + "j sou", + "j sem", + "ch o", + "l ní", + "c ké", + "t á", + "m y", + "a k", + "h u", + "va t", + "pře d", + "h la", + "k e", + "st á", + "č í", + "š i", + "s le", + "k la", + "š tě", + "lo u", + "m ů", + "z na", + "ch á", + "o r", + "p ů", + "h a", + "b i", + "ta ké", + "d ů", + "no st", + "t ře", + "te r", + "p u", + "i n", + "v r", + "ve l", + "sk u", + "v še", + "t ní", + "do b", + "by la", + "č ní", + "ja k", + "v u", + "je ho", + "b ý", + "vá ní", + "ný ch", + "po u", + "te n", + "t ři", + "v z", + "st ře", + "d va", + "h le", + "č á", + "no sti", + "c k", + "v š", + "vo u", + "s u", + "h e", + "h ra", + "je n", + "s y", + "da l", + "po z", + "s lo", + "te l", + "d ru", + "de n", + "vš ak", + "g i", + "k dy", + "by lo", + "bu de", + "st ra", + "j ší", + "m é", + "me n", + "vý ch", + "ní m", + "s m", + "ko li", + "r ů", + "t ra", + "mů že", + "ne ní", + "ho d", + "b í", + "do u", + "sk a", + "t ý", + "st ě", + "u je", + "s á", + "pě t", + "ne s", + "k rá", + "to m", + "st ví", + "v ně", + "se d", + "s vé", + "p í", + "z o", + "mu sí", + "u ž", + "tí m", + "jí cí", + "jed no", + "t r", + "ča s", + "e v", + "č ty", + "sk ý", + "ni c", + "ev ro", + "to ho", + "h y", + "k ter", + "r ní", + "st í", + "s vě", + "pa k", + "vše ch", + "k ů", + "n g", + "á d", + "chá zí", + "a ni", + "a r", + "jed na", + "bý t", + "t ro", + "k ra", + "pr vní", + "m no", + "ské ho", + "p á", + "p la", + "le m", + "ne bo", + "ke m", + "st ro", + "s la", + "né ho", + "z de", + "dal ší", + "ř a", + "čty ři", + "h rá", + "dru h", + "l ně", + "v la", + "sk ých", + "š ko", + "pů so", + "pro to", + "v ů", + "sk á", + "ve n", + "še st", + "d ně", + "je ště", + "me zi", + "te k", + "s ko", + "ch a", + "ně koli", + "be z", + "g ra", + "ji ž", + "č ně", + "j á", + "s lu", + "z ná", + "ve r", + "sed m", + "k ro", + "ta m", + "a no", + "v lá", + "o sm", + "byl y", + "vá m", + "ck ý", + "te ch", + "dě ji", + "vel mi", + "le ži", + "va la", + "l ý", + "t vo", + "spo le", + "ch u", + "stu p", + "mo ž", + "evro p", + "g e", + "sta l", + "j de", + "ch y", + "ro di", + "je jí", + "po li", + "de vět", + "s me", + "a ž", + "té to", + "re m", + "d é", + "f or", + "u ni", + "f o", + "ten to", + "a u", + "ka ž", + "nu la", + "na d", + "by ch", + "mo c", + "sto u", + "e x", + "le n", + "k do", + "z d", + "pra co", + "to mu", + "ný m", + "ži vo", + "ze m", + "f e", + "f u", + "ná sle", + "j o", + "sk y", + "ji ch", + "h á", + "mě l", + "dě la", + "j sme", + "p re", + "ni ce", + "ste j", + "ne m", + "st ní", + "he m", + "ná ro", + "z u", + "b li", + "ni t", + "pa r", + "a l", + "poz ději", + "ta ko", + "n ce", + "če r", + "ší m", + "ně co", + "vá l", + "ře j", + "krá t", + "á lní", + "u r", + ". .", + "a si", + "kter é", + "sta v", + "ma jí", + "my s", + "do bě", + "s ně", + "ce n", + "z y", + "z ku", + "t ů", + "ch od", + "s pě", + "je jich", + "sou čas", + "d r", + "va li", + "ri e", + "k te", + "pr ů", + "ze ní", + "pa t", + "a n", + "po tře", + "de m", + "d nes", + "ze mí", + "sa mo", + "zna m", + "b ra", + "má m", + "te dy", + "g o", + "hla vní", + "pou ží", + "b ní", + "ve de", + "le p", + "je k", + "pra v", + "poli ti", + "d ne", + "je m", + "le t", + "če ní", + "pro b", + "ne ž", + "dě l", + "fi l", + "č o", + "cí ch", + "st é", + "d lou", + "h i", + "a by", + "to u", + "několi k", + "d la", + "vy u", + "vi t", + "ho u", + "ck ých", + "no vé", + "či n", + "st y", + "dě lá", + "k ý", + "ob la", + "pod le", + "ra n", + "dů leži", + "ta to", + "po ku", + "ko ne", + "d ý", + "d vě", + "ž ád", + "nou t", + "t ku", + "t vr", + "cké ho", + "ro v", + "r é", + "te le", + "p sa", + "s vět", + "ti vní", + "do sta", + "te m", + "še l", + "druh é", + "s kou", + "ž o", + "jed ná", + "vý znam", + "prob lé", + "pu bli", + "vá n", + "od po", + "pod po", + "d le", + "ja ké", + "še ní", + "ví m", + "bě hem", + "na chází", + "s lou", + "pou ze", + "o tá", + "p lo", + "to vé", + "vět ši", + "ko mi", + "va jí", + "ty to", + "zá pa", + "z mě", + "mo h", + "ví ce", + "spole č", + "au to", + "pro ti", + "st ru", + "dě t", + "chá ze", + "že l", + "с т", + "е н", + "н о", + "н а", + "п р", + "т о", + "п о", + "р а", + "г о", + "к о", + "н е", + "в о", + "в а", + "е т", + "е р", + "н и", + "е л", + "и т", + "н ы", + "з а", + "р о", + "ен и", + "к а", + "л и", + "е м", + "д а", + "о б", + "л а", + "д о", + "с я", + "т ь", + "о т", + "л о", + "л ь", + "е д", + "с о", + "м и", + "р е", + "м о", + "ц и", + "пр о", + "т а", + "э то", + "к и", + "р у", + "пр и", + "т и", + "с е", + "ст а", + "в ы", + "м ы", + "в и", + "б ы", + "м а", + "е с", + "л я", + "ст и", + "л е", + "ч то", + "м е", + "р и", + "ч а", + "о д", + "е й", + "ел ь", + "ени я", + "г а", + "н у", + "с и", + "п а", + "ра з", + "б о", + "ст о", + "с у", + "с а", + "д у", + "е го", + "е ст", + "и н", + "ит ь", + "и з", + "ж е", + "м у", + "п ер", + "по д", + "ени е", + "с ь", + "к у", + "пр ед", + "но го", + "ны х", + "в ер", + "т е", + "но й", + "ци и", + "д е", + "р ы", + "д ел", + "л ю", + "в е", + "о н", + "м ен", + "г и", + "н я", + "б у", + "пр а", + "в се", + "ет ся", + "ст ь", + "ж а", + "до л", + "ж и", + "б е", + "ко н", + "с л", + "ш и", + "д и", + "ст в", + "с ко", + "ны е", + "ч и", + "ю т", + "д ер", + "ст ра", + "т ы", + "х од", + "щ и", + "з о", + "з на", + "но сти", + "ч ес", + "в ля", + "ва ть", + "о р", + "по л", + "в ет", + "та к", + "ш а", + "т у", + "с во", + "пр е", + "о на", + "ит ель", + "ны й", + "с ло", + "ка к", + "в л", + "но сть", + "х о", + "мо ж", + "п е", + "д ля", + "ни я", + "но е", + "ра с", + "дол ж", + "да р", + "т ель", + "с ка", + "п у", + "ст во", + "ко то", + "ра б", + "е е", + "ро д", + "э ти", + "с об", + "о ру", + "ж ен", + "ны м", + "ит и", + "ни е", + "ко м", + "д ет", + "ст у", + "г у", + "п и", + "ме ж", + "ени ю", + "т ер", + "раб от", + "во з", + "ци я", + "ко й", + "щ ест", + "г ра", + "з и", + "р я", + "меж ду", + "ст ва", + "в с", + "ел о", + "ш е", + "м ер", + "б а", + "з ы", + "л у", + "а ль", + "д ей", + "г ла", + "на род", + "к ти", + "пред ста", + "л ся", + "я вля", + "с ки", + "но в", + "ед ин", + "ро в", + "и с", + "ни ма", + "р ем", + "ход и", + "так же", + "д ру", + "а ть", + "сл ед", + "го во", + "на я", + "ю щи", + "ен ь", + "кото ры", + "х от", + "в у", + "и х", + "ем у", + "ч ит", + "ва ж", + "ор га", + "чес ки", + "щ е", + "к е", + "х а", + "по с", + "то м", + "бо ль", + "м не", + "па с", + "об ъ", + "пра в", + "кон ф", + "сл у", + "под дер", + "ст ви", + "на ш", + "ль ко", + "сто я", + "ну ю", + "л ем", + "ен ных", + "к ра", + "д ы", + "между народ", + "г да", + "не об", + "го су", + "ств у", + "ени и", + "госу дар", + "к то", + "и м", + "ч ест", + "р ет", + "во про", + "л ен", + "ел и", + "ро ва", + "ци й", + "на м", + "это й", + "ж ения", + "необ ходи", + "мен я", + "бы ло", + "си ли", + "ф и", + "в я", + "ш ь", + "это го", + "о ни", + "орга ни", + "бе зо", + "пр об", + "и ме", + "ре ш", + "б и", + "безо пас", + "ют ся", + "о ста", + "ен но", + "го д", + "ел а", + "предста в", + "ть ся", + "сло во", + "органи за", + "долж ны", + "это м", + "б ла", + "ч е", + "ч у", + "бла го", + "это му", + "в рем", + "с пе", + "но м", + "ени й", + "с по", + "на с", + "не т", + "з у", + "в ед", + "е ще", + "ска за", + "се й", + "ер ен", + "да н", + "са м", + "ел я", + "ра н", + "зы ва", + "явля ется", + "бу дет", + "кти в", + "т ре", + "дел е", + "м от", + "конф ерен", + "ла сь", + "ча с", + "сто ро", + "ко го", + "е з", + "не й", + "о с", + "ли сь", + "раз ору", + "пер е", + "с си", + "ны ми", + "про ц", + "го ло", + "ч ело", + "бо ле", + "чело ве", + "с ер", + "п л", + "ч ет", + "стра н", + "п я", + "бы л", + "к ла", + "то в", + "ж д", + "дел а", + "е ра", + "у же", + "со вет", + "г ен", + "безопас ности", + "ц а", + "се да", + "по з", + "от вет", + "проб лем", + "на ко", + "т ем", + "до ста", + "п ы", + "щ а", + "во й", + "су щест", + "необходи мо", + "бы ть", + "мож ет", + "д ем", + "что бы", + "е к", + "ч ер", + "у сили", + "ре с", + "ру д", + "един енных", + "д об", + "до сти", + "ств ен", + "я дер", + "год ня", + "ка за", + "се годня", + "сей час", + "то лько", + "во д", + "ес ь", + "м ного", + "бу ду", + "е в", + "ест ь", + "т ри", + "об щест", + ". .", + "я вл", + "вы сту", + "р ед", + "с чит", + "с ит", + "деле га", + "ло ж", + "это т", + "ф ор", + "к лю", + "воз мож", + "ва ния", + "б ли", + "и ли", + "в з", + "на ций", + "ско го", + "при ня", + "п ла", + "о ч", + "ить ся", + "ст е", + "на ши", + "которы е", + "а р", + "име ет", + "с от", + "зна ч", + "пер ь", + "след у", + "ен ы", + "та ки", + "объ единенных", + "ст ро", + "те перь", + "б ле", + "благо дар", + "раз в", + "а н", + "жи ва", + "оч ень", + "я т", + "бе з", + "об ес", + "г ро", + "ло сь", + "с ы", + "организа ции", + "ч лен", + "то го", + "она ль", + "ж да", + "все х", + "с вя", + "боле е", + "со в", + "ко гда", + "во т", + "к ре", + "к ры", + "по этому", + "во ль", + "о й", + "ген ера", + "ч ем", + "л ы", + "пол ити", + "в ен", + "конферен ции", + "проц ес", + "б я", + "ит е", + "от но", + "разв ити", + "а ф", + "ю щ", + "в но", + "ми р", + "ни и", + "ка я", + "а с", + "итель но", + "в то", + "ени ем", + "генера ль", + "пр от", + "вс ем", + "сам бле", + "ас самбле", + "о м", + "з д", + "с мот", + "ре ги", + "ч его", + "од нако", + "усили я", + "дей стви", + "ч но", + "у ча", + "об раз", + "во с", + "э та", + "пер его", + "гово р", + "ва м", + "мо ло", + "врем я", + "д ь", + "хот ел", + "г ру", + "за явл", + "пре доста", + "по ль", + "не е", + "ре зо", + "перего во", + "резо лю", + "к рет", + "поддер ж", + "обес пе", + "не го", + "представ ит", + "на де", + "к ри", + "ч ь", + "про ек", + "л ет", + "дру ги", + "ا ل", + "َ ا", + "و َ", + "ّ َ", + "ِ ي", + "أ َ", + "ل َ", + "ن َ", + "ال ْ", + "ه ُ", + "ُ و", + "م ا", + "ن ْ", + "م ن", + "ع َ", + "ن ا", + "ل ا", + "م َ", + "ت َ", + "ف َ", + "أ ن", + "ل ي", + "م ِ", + "ا ن", + "ف ي", + "ر َ", + "ي َ", + "ه ِ", + "م ْ", + "ق َ", + "ب ِ", + "ل ى", + "ي ن", + "إ ِ", + "ل ِ", + "و ا", + "ك َ", + "ه ا", + "ً ا", + "م ُ", + "و ن", + "ال م", + "ب َ", + "ي ا", + "ذ ا", + "س ا", + "ال ل", + "م ي", + "ي ْ", + "ر ا", + "ر ي", + "ل ك", + "م َا", + "ن َّ", + "ل م", + "إ ن", + "س ت", + "و م", + "ّ َا", + "ل َا", + "ه م", + "ّ ِ", + "ك ُ", + "ك ان", + "س َ", + "ب ا", + "د ي", + "ح َ", + "ع ْ", + "ب ي", + "ال أ", + "و ل", + "ف ِي", + "ر ِ", + "د ا", + "مِ نْ", + "ُو نَ", + "و ْ", + "ه َا", + "ّ ُ", + "ال س", + "ال َ", + "ن ي", + "ل ْ", + "ت ُ", + "ه ل", + "ر ة", + "د َ", + "س ْ", + "ت ِ", + "ن َا", + "ر ْ", + "الل َّ", + "سا مي", + "ك ن", + "ك ل", + "ه َ", + "عَ لَ", + "ع لى", + "م ع", + "إ لى", + "ق د", + "ال ر", + "ُو ا", + "ي ر", + "ع ن", + "ي ُ", + "ن ِ", + "ب ْ", + "ال ح", + "هُ مْ", + "ق ا", + "ذ ه", + "ال ت", + "ِي نَ", + "ج َ", + "ه ذا", + "ع د", + "ال ع", + "د ْ", + "قَ الَ", + "ر ُ", + "ي م", + "ي ة", + "ن ُ", + "خ َ", + "ر ب", + "ال ك", + "و َا", + "أ نا", + "ة ِ", + "ال ن", + "ح د", + "ع ِ", + "ت ا", + "ه و", + "ف ا", + "ع ا", + "ال ش", + "ل ُ", + "ي ت", + "ذ َا", + "ي ع", + "ال ذ", + "ح ْ", + "ال ص", + "إِ نَّ", + "ج ا", + "ع لي", + "ك َا", + "ب ُ", + "ت ع", + "و ق", + "م ل", + "ل َّ", + "ي د", + "أ خ", + "ر ف", + "ت ي", + "ال ِ", + "ّ ا", + "ذ لك", + "أَ نْ", + "س ِ", + "ت وم", + "م ر", + "مَ نْ", + "ب ل", + "ال ق", + "الل ه", + "ِي َ", + "ك م", + "ذ َ", + "ع ل", + "ح ب", + "س ي", + "ع ُ", + "ال ج", + "ال د", + "ش َ", + "ت ك", + "ف ْ", + "ص َ", + "ل ل", + "د ِ", + "ب ر", + "ف ِ", + "ت ه", + "أ ع", + "ت ْ", + "ق ْ", + "الْ أَ", + "ئ ِ", + "عَ نْ", + "و ر", + "ح ا", + "ال َّ", + "م ت", + "ف ر", + "د ُ", + "ه نا", + "وَ أَ", + "ت ب", + "ة ُ", + "أ ي", + "س ب", + "ري د", + "و ج", + "كُ مْ", + "ح ِ", + "ك ْ", + "د ر", + "َا ء", + "ه ذه", + "ال ط", + "الْ مُ", + "د ة", + "ق ل", + "غ َ", + "ي وم", + "الَّ ذ", + "ك ر", + "ت ر", + "ك ِ", + "ك ي", + "عَلَ ى", + "رَ ب", + "ع ة", + "ق ُ", + "ج ْ", + "ف ض", + "ل ة", + "ه ْ", + "ر َا", + "وَ لَ", + "الْ مَ", + "أَ نَّ", + "ي َا", + "أ ُ", + "ش ي", + "اللَّ هُ", + "لَ ى", + "ق ِ", + "أ ت", + "عَلَ يْ", + "اللَّ هِ", + "ال ب", + "ض َ", + "ة ً", + "ق ي", + "ا ر", + "ب د", + "خ ْ", + "سْ تَ", + "ط َ", + "قَ دْ", + "ذه ب", + "أ م", + "ما ذا", + "وَ إِ", + "ة ٌ", + "و نَ", + "لي لى", + "و لا", + "ح ُ", + "ه ي", + "ص ل", + "ال خ", + "و د", + "لي س", + "ل دي", + "ق ال", + "كَا نَ", + "م َّ", + "ح ي", + "ت م", + "ل ن", + "وَ لَا", + "ب ع", + "يم كن", + "س ُ", + "ة َ", + "ح ت", + "ر ًا", + "ك ا", + "ش ا", + "هِ مْ", + "لَ هُ", + "ز َ", + "دا ً", + "م س", + "ك ث", + "الْ عَ", + "ج ِ", + "ص ْ", + "ف َا", + "ل ه", + "و ي", + "ع َا", + "هُ وَ", + "ب ِي", + "ب َا", + "أ س", + "ث َ", + "ل ِي", + "ر ض", + "الر َّ", + "لِ كَ", + "ت َّ", + "ف ُ", + "ق ة", + "ف عل", + "مِ ن", + "ال آ", + "ث ُ", + "س م", + "م َّا", + "بِ هِ", + "ت ق", + "خ ر", + "ل قد", + "خ ل", + "ش ر", + "أن ت", + "ل َّا", + "س ن", + "الس َّ", + "الذ ي", + "س َا", + "و ما", + "ز ل", + "و ب", + "أ ْ", + "إ ذا", + "ر ِي", + "ح ة", + "ن ِي", + "الْ حَ", + "وَ قَالَ", + "ب ه", + "ة ٍ", + "س أ", + "ر ٌ", + "ب ال", + "م ة", + "ش ْ", + "و ت", + "عن د", + "ف س", + "بَ عْ", + "ه ر", + "ق ط", + "أ ح", + "إن ه", + "و ع", + "ف ت", + "غ ا", + "هنا ك", + "ب ت", + "مِ نَ", + "س ر", + "ذَ لِكَ", + "ر س", + "حد ث", + "غ ْ", + "ّ ِي", + "ال إ", + "وَ يَ", + "ج ل", + "ا ست", + "ق ِي", + "ع ب", + "و س", + "ي ش", + "الَّذ ِينَ", + "تا ب", + "د ِي", + "ج ب", + "ك ون", + "ب ن", + "ال ث", + "لَ يْ", + "ب عد", + "وَ الْ", + "فَ أَ", + "ع م", + "هُ م", + "ت ن", + "ذ ْ", + "أ ص", + "أ ين", + "رَب ِّ", + "الذ ين", + "إِ ن", + "ب ين", + "ج ُ", + "عَلَيْ هِ", + "ح َا", + "ل و", + "ست ط", + "ظ ر", + "لَ مْ", + "ء ِ", + "كُ ل", + "ط ل", + "ت َا", + "ض ُ", + "كن ت", + "ل ًا", + "م ٌ", + "ق بل", + "ـ ـ", + "ذ ِ", + "قَ وْ", + "ص ِ", + "م ًا", + "كان ت", + "ص ا", + "ي ق", + "ال ف", + "ال نا", + "م ٍ", + "إِ نْ", + "ال نَّ", + "ج د", + "وَ مَا", + "ت ت", + "ب ح", + "م كان", + "كي ف", + "ّ ة", + "ال ا", + "ج َا", + "أ و", + "سا عد", + "ض ِ", + "إ لا", + "را ً", + "ق َا", + "ر أ", + "ع ت", + "أ حد", + "ه د", + "ض ا", + "ط ر", + "أ ق", + "ما ء", + "د َّ", + "ال با", + "م ُو", + "أَ وْ", + "ط ا", + "ق ُو", + "خ ِ", + "ت ل", + "ستط يع", + "د َا", + "الن َّا", + "إ لَى", + "وَ تَ", + "هَ ذَا", + "ب ة", + "علي ك", + "ج ر", + "ال من", + "ز ا", + "ر ٍ", + "د ع", + "ّ ًا", + "س ة", + "ثُ مَّ", + "شي ء", + "ال غ", + "ت ح", + "ر ُونَ", + "ال يوم", + "م ِي", + "ن ُوا", + "أ ر", + "تُ مْ", + "ع ر", + "ي ف", + "أ ب", + "د ًا", + "ص َا", + "الت َّ", + "أ ريد", + "ال ز", + "يَ وْ", + "إ لي", + "ج ي", + "يَ عْ", + "فض ل", + "ال إن", + "أن ه", + "n g", + "i 4", + "a n", + "s h", + "z h", + "i 2", + "ng 1", + "u 4", + "i 1", + "ng 2", + "d e", + "j i", + "a o", + "x i", + "u 3", + "de 5", + "e 4", + "i 3", + "ng 4", + "an 4", + "e n", + "u o", + "sh i4", + "an 2", + "u 2", + "c h", + "u 1", + "ng 3", + "a 1", + "an 1", + "e 2", + "a 4", + "e i4", + "o ng1", + "a i4", + "ao 4", + "h u", + "a ng1", + "l i", + "y o", + "an 3", + "w ei4", + "uo 2", + "n 1", + "en 2", + "ao 3", + "e 1", + "y u", + "q i", + "e ng2", + "zh o", + "a ng3", + "a ng4", + "a ng2", + "uo 4", + "m i", + "g e4", + "y i1", + "g uo2", + "e r", + "b i", + "a 3", + "h e2", + "e 3", + "y i2", + "d i4", + "zh ong1", + "b u4", + "g u", + "a i2", + "n 2", + "z ai4", + "sh i2", + "e ng1", + "r en2", + "o ng2", + "xi an4", + "y i", + "x u", + "n 4", + "l i4", + "en 4", + "y u2", + "e i2", + "yi2 ge4", + "o u4", + "e i3", + "d i", + "u i4", + "a 2", + "yo u3", + "ao 1", + "d a4", + "ch eng2", + "en 1", + "e ng4", + "y i4", + "s i1", + "zh i4", + "ji a1", + "yu an2", + "n i", + "t a1", + "de5 yi2ge4", + "k e1", + "sh u3", + "x i1", + "j i2", + "ao 2", + "t i", + "o u3", + "o ng4", + "xi a4", + "a i1", + "g ong1", + "zh i1", + "en 3", + "w ei2", + "j u", + "xu e2", + "q u1", + "zho u1", + "er 3", + "mi ng2", + "zho ng3", + "l i3", + "w u4", + "y i3", + "uo 1", + "e 5", + "j i4", + "xi ng2", + "ji an4", + "hu a4", + "y u3", + "uo 3", + "j i1", + "a i3", + "z uo4", + "h ou4", + "hu i4", + "e i1", + "ni an2", + "q i2", + "p i", + "d ao4", + "sh eng1", + "de 2", + "d ai4", + "u an2", + "zh e4", + "zh eng4", + "b en3", + "sh ang4", + "zh u3", + "b ei4", + "y e4", + "ch u1", + "zh an4", + "l e5", + "l ai2", + "sh i3", + "n an2", + "r en4", + "yo u2", + "k e4", + "b a1", + "f u4", + "d ui4", + "y a4", + "m ei3", + "z i4", + "xi n1", + "ji ng1", + "zh u", + "n 3", + "yo ng4", + "m u4", + "ji ao4", + "y e3", + "ji n4", + "bi an4", + "l u4", + "q i1", + "sh e4", + "xi ang1", + "o ng3", + "sh u4", + "d ong4", + "s uo3", + "gu an1", + "s an1", + "b o", + "t e4", + "d uo1", + "f u2", + "mi n2", + "l a1", + "zh i2", + "zh en4", + "o u1", + "w u3", + "m a3", + "i 5", + "z i5", + "j u4", + "er 4", + "y ao4", + "xia4 de5yi2ge4", + "s i4", + "t u2", + "sh an1", + "z ui4", + "ch u", + "yi n1", + "er 2", + "t ong2", + "d ong1", + "y u4", + "y an2", + "qi an2", + "shu3 xia4de5yi2ge4", + "ju n1", + "k e3", + "w en2", + "f a3", + "l uo2", + "zh u4", + "x i4", + "k ou3", + "b ei3", + "ji an1", + "f a1", + "di an4", + "ji ang1", + "wei4 yu2", + "xi ang4", + "zh i3", + "e ng3", + "f ang1", + "l an2", + "sh u", + "r i4", + "li an2", + "sh ou3", + "m o", + "qi u2", + "ji n1", + "h uo4", + "shu3xia4de5yi2ge4 zhong3", + "f en1", + "n ei4", + "g ai1", + "mei3 guo2", + "u n2", + "g e2", + "b ao3", + "qi ng1", + "g ao1", + "t ai2", + "d u", + "xi ao3", + "ji e2", + "ti an1", + "ch ang2", + "q uan2", + "li e4", + "h ai3", + "f ei1", + "t i3", + "ju e2", + "o u2", + "c i3", + "z u2", + "n i2", + "bi ao3", + "zhong1 guo2", + "d u4", + "yu e4", + "xi ng4", + "sh eng4", + "ch e1", + "d an1", + "ji e1", + "li n2", + "pi ng2", + "f u3", + "g u3", + "ji e4", + "w o", + "v 3", + "sh eng3", + "n a4", + "yu an4", + "zh ang3", + "gu an3", + "d ao3", + "z u3", + "di ng4", + "di an3", + "c eng2", + "ren2 kou3", + "t ai4", + "t ong1", + "g uo4", + "n eng2", + "ch ang3", + "hu a2", + "li u2", + "yi ng1", + "xi ao4", + "c i4", + "bian4 hua4", + "li ang3", + "g ong4", + "zho ng4", + "de5 yi1", + "s e4", + "k ai1", + "w ang2", + "ji u4", + "sh i1", + "sh ou4", + "m ei2", + "k u", + "s u", + "f eng1", + "z e2", + "tu2 shi4", + "t i2", + "q i4", + "ji u3", + "sh en1", + "zh e3", + "ren2kou3 bian4hua4", + "ren2kou3bian4hua4 tu2shi4", + "di4 qu1", + "y ang2", + "m en", + "men 5", + "l ong2", + "bi ng4", + "ch an3", + "zh u1", + "w ei3", + "w ai4", + "xi ng1", + "bo 1", + "b i3", + "t ang2", + "hu a1", + "bo 2", + "shu i3", + "sh u1", + "d ou1", + "s ai4", + "ch ao2", + "b i4", + "li ng2", + "l ei4", + "da4 xue2", + "f en4", + "shu3 de5", + "m u3", + "ji ao1", + "d ang1", + "ch eng1", + "t ong3", + "n v3", + "q i3", + "y an3", + "mi an4", + "l uo4", + "ji ng4", + "g e1", + "r u4", + "d an4", + "ri4 ben3", + "p u3", + "yu n4", + "hu ang2", + "wo 3", + "l v", + "h ai2", + "shi4 yi1", + "xi e1", + "yi ng3", + "w u2", + "sh en2", + "w ang3", + "gu ang3", + "li u4", + "s u4", + "shi4 zhen4", + "c an1", + "c ao3", + "xi a2", + "k a3", + "d a2", + "h u4", + "b an4", + "d ang3", + "h u2", + "z ong3", + "de ng3", + "de5yi2ge4 shi4zhen4", + "ch uan2", + "mo 4", + "zh ang1", + "b an1", + "mo 2", + "ch a2", + "c e4", + "zhu3 yao4", + "t ou2", + "j u2", + "shi4 wei4yu2", + "s a4", + "u n1", + "ke3 yi3", + "d u1", + "h an4", + "li ang4", + "sh a1", + "ji a3", + "z i1", + "lv 4", + "f u1", + "xi an1", + "x u4", + "gu ang1", + "m eng2", + "b ao4", + "yo u4", + "r ong2", + "zhi1 yi1", + "w ei1", + "m ao2", + "guo2 jia1", + "c ong2", + "g ou4", + "ti e3", + "zh en1", + "d u2", + "bi an1", + "c i2", + "q u3", + "f an4", + "xi ang3", + "m en2", + "j u1", + "h ong2", + "z i3", + "ta1 men5", + "ji 3", + "z ong1", + "zhou1 de5yi2ge4shi4zhen4", + "t uan2", + "ji ng3", + "gong1 si1", + "xi e4", + "l i2", + "li4 shi3", + "b ao1", + "g ang3", + "gu i1", + "zh eng1", + "zhi2 wu4", + "ta1 de5", + "pi n3", + "zhu an1", + "ch ong2", + "shi3 yong4", + "w a3", + "sh uo1", + "chu an1", + "l ei2", + "w an1", + "h uo2", + "q u", + "s u1", + "z ao3", + "g ai3", + "q u4", + "g u4", + "l u", + "x i2", + "h ang2", + "yi ng4", + "c un1", + "g en1", + "yi ng2", + "ti ng2", + "cheng2 shi4", + "ji ang3", + "li ng3", + "l un2", + "bu4 fen4", + "de ng1", + "xu an3", + "dong4 wu4", + "de2 guo2", + "xi an3", + "f an3", + "zh e5", + "h an2", + "h ao4", + "m i4", + "r an2", + "qi n1", + "ti ao2", + "zh an3", + "h i", + "k a", + "n o", + "t e", + "s u", + "s hi", + "t a", + "t o", + "n a", + "w a", + "o u", + "r u", + "n i", + "k u", + "k i", + "g a", + "d e", + "k o", + "m a", + "r e", + "r a", + "m o", + "t su", + "w o", + "e n", + "r i", + "s a", + "d a", + "s e", + "j i", + "h a", + "c hi", + "k e", + "te ki", + "m i", + "y ou", + "s h", + "s o", + "y o", + "y a", + "na i", + "t te", + "a ru", + "b a", + "u u", + "t ta", + "ka i", + "ka n", + "shi te", + "m e", + "d o", + "mo no", + "se i", + "r o", + "ko to", + "ka ra", + "shi ta", + "b u", + "m u", + "c h", + "su ru", + "k ou", + "g o", + "ma su", + "ta i", + "f u", + "k en", + "i u", + "g en", + "wa re", + "shi n", + "z u", + "a i", + "o n", + "o ku", + "g i", + "d ou", + "n e", + "y uu", + "i ru", + "i te", + "ji ko", + "de su", + "j u", + "ra re", + "sh u", + "b e", + "sh ou", + "s ha", + "se kai", + "s ou", + "k you", + "ma shita", + "s en", + "na ra", + "sa n", + "ke i", + "i ta", + "a ri", + "i tsu", + "ko no", + "j ou", + "na ka", + "ch ou", + "so re", + "g u", + "na ru", + "ga ku", + "re ba", + "g e", + "h o", + "i n", + "hi to", + "sa i", + "na n", + "da i", + "tsu ku", + "shi ki", + "sa re", + "na ku", + "p p", + "bu n", + "ju n", + "so no", + "ka ku", + "z ai", + "b i", + "to u", + "wa ta", + "sh uu", + "i i", + "te i", + "ka re", + "y u", + "shi i", + "ma de", + "sh o", + "a n", + "ke reba", + "shi ka", + "i chi", + "ha n", + "de ki", + "ni n", + "ware ware", + "na kereba", + "o ite", + "h ou", + "ya ku", + "ra i", + "mu jun", + "l e", + "yo ku", + "bu tsu", + "o o", + "ko n", + "o mo", + "ga e", + "nara nai", + "ta chi", + "z en", + "ch uu", + "kan gae", + "ta ra", + "to ki", + "ko ro", + "mujun teki", + "z e", + "na ga", + "ji n", + "shi ma", + "te n", + "i ki", + "i ku", + "no u", + "i masu", + "r ou", + "h on", + "ka e", + "t to", + "ko re", + "ta n", + "ki ta", + "i s", + "da tta", + "ji tsu", + "ma e", + "i e", + "me i", + "da n", + "h e", + "to ku", + "dou itsu", + "ri tsu", + "k yuu", + "h you", + "rare ta", + "kei sei", + "k kan", + "rare ru", + "m ou", + "do ko", + "r you", + "da ke", + "naka tta", + "so ko", + "ta be", + "e r", + "ha na", + "c o", + "fu ku", + "p a", + "so n", + "ya su", + "ch o", + "wata ku", + "ya ma", + "z a", + "k yo", + "gen zai", + "b oku", + "a ta", + "j a", + "ka wa", + "ma sen", + "j uu", + "ro n", + "b o", + "na tte", + "wataku shi", + "yo tte", + "ma i", + "g ou", + "ha i", + "mo n", + "ba n", + "ji shin", + "c a", + "re te", + "n en", + "o ka", + "ka gaku", + "na tta", + "p o", + "ka ru", + "na ri", + "m en", + "ma ta", + "e i", + "ku ru", + "ga i", + "ka ri", + "sha kai", + "kou i", + "yo ri", + "se tsu", + "j o", + "re ru", + "to koro", + "ju tsu", + "i on", + "sa ku", + "tta i", + "c ha", + "nin gen", + "n u", + "c e", + "ta me", + "kan kyou", + "de n", + "o oku", + "i ma", + "wata shi", + "tsuku ru", + "su gi", + "b en", + "ji bun", + "shi tsu", + "ke ru", + "ki n", + "ki shi", + "shika shi", + "mo to", + "ma ri", + "i tte", + "de shita", + "n de", + "ari masu", + "te r", + "z ou", + "ko e", + "ze ttai", + "kkan teki", + "h en", + "re kishi", + "deki ru", + "tsu ka", + "l a", + "i tta", + "o i", + "ko butsu", + "mi ru", + "sh oku", + "shi masu", + "gi jutsu", + "g you", + "jou shiki", + "a tta", + "ho do", + "ko ko", + "tsuku rareta", + "z oku", + "hi tei", + "ko ku", + "rekishi teki", + "ke te", + "o ri", + "i mi", + "ka ko", + "naga ra", + "ka karu", + "shu tai", + "ha ji", + "ma n", + "ta ku", + "ra n", + "douitsu teki", + "z o", + "me te", + "re i", + "tsu u", + "sare te", + "gen jitsu", + "p e", + "s t", + "ba i", + "na wa", + "ji kan", + "wa ru", + "r t", + "a tsu", + "so ku", + "koui teki", + "a ra", + "u ma", + "a no", + "i de", + "ka ta", + "te tsu", + "ga wa", + "ke do", + "re ta", + "mi n", + "sa you", + "tte ru", + "to ri", + "p u", + "ki mi", + "b ou", + "mu ra", + "sare ru", + "ma chi", + "k ya", + "o sa", + "kon na", + "a ku", + "a l", + "sare ta", + "i pp", + "shi ku", + "u chi", + "hito tsu", + "ha tara", + "tachi ba", + "shi ro", + "ka tachi", + "to mo", + "e te", + "me ru", + "ni chi", + "da re", + "ka tta", + "e ru", + "su ki", + "a ge", + "oo ki", + "ma ru", + "mo ku", + "o ko", + "kangae rareru", + "o to", + "tan ni", + "ta da", + "tai teki", + "mo tte", + "ki nou", + "shi nai", + "k ki", + "u e", + "ta ri", + "l i", + "ra nai", + "k kou", + "mi rai", + "pp on", + "go to", + "hi n", + "hi tsu", + "te ru", + "mo chi", + "ka tsu", + "re n", + "n yuu", + "su i", + "zu ka", + "tsu ite", + "no mi", + "su gu", + "ku da", + "tetsu gaku", + "i ka", + "ron ri", + "o ki", + "ni ppon", + "p er", + "shi mashita", + "chi shiki", + "cho kkanteki", + "su ko", + "t ion", + "ku u", + "a na", + "a rou", + "ka tte", + "ku ri", + "i nai", + "hyou gen", + "i shiki", + "do ku", + "a tte", + "a tara", + "to n", + "wa ri", + "ka o", + "sei san", + "hana shi", + "s i", + "ka ke", + "na ji", + "su nawa", + "sunawa chi", + "u go", + "su u", + "ba ra", + "le v", + "hi ro", + "i wa", + "be tsu", + "yo i", + "se ru", + "shite ru", + "rare te", + "to shi", + "se ki", + "tai ritsu", + "wa kara", + "to kyo", + "k ka", + "k yoku", + "u n", + "i ro", + "mi te", + "sa ki", + "kan ji", + "mi ta", + "su be", + "r yoku", + "ma tta", + "kuda sai", + "omo i", + "ta no", + "ware ru", + "co m", + "hitsu you", + "ka shi", + "re nai", + "kan kei", + "a to", + "ga tte", + "o chi", + "mo tsu", + "in g", + "son zai", + "l l", + "o re", + "tai shite", + "a me", + "sei mei", + "ka no", + "gi ri", + "kangae ru", + "yu e", + "a sa", + "o naji", + "yo ru", + "ni ku", + "osa ka", + "suko shi", + "c k", + "ta ma", + "kano jo", + "ki te", + "mon dai", + "a mari", + "e ki", + "ko jin", + "ha ya", + "i t", + "de te", + "atara shii", + "a wa", + "ga kkou", + "tsu zu", + "shu kan", + "i mashita", + "mi na", + "ata e", + "da rou", + "hatara ku", + "ga ta", + "da chi", + "ma tsu", + "ari masen", + "sei butsu", + "mi tsu", + "he ya", + "yasu i", + "d i", + "de ni", + "no ko", + "ha ha", + "do mo", + "ka mi", + "su deni", + "na o", + "ra ku", + "i ke", + "a ki", + "me ta", + "l o", + "ko domo", + "so shite", + "ga me", + "ba kari", + "to te", + "ha tsu", + "mi se", + "moku teki", + "da kara", + "s z", + "e l", + "g y", + "e n", + "t t", + "e m", + "a n", + "a k", + "e r", + "a z", + "a l", + "e t", + "o l", + "e g", + "e k", + "m i", + "o n", + "é s", + "c s", + "a t", + "á r", + "h o", + "e z", + "á l", + "i s", + "á n", + "o r", + "a r", + "e gy", + "e s", + "é r", + "á t", + "o tt", + "e tt", + "m eg", + "t a", + "o k", + "o s", + "ho gy", + "n em", + "é g", + "n y", + "k i", + "é l", + "h a", + "á s", + "ü l", + "i n", + "mi n", + "n a", + "e d", + "o m", + "i k", + "k ö", + "m a", + "n i", + "v a", + "v ol", + "é t", + "b b", + "f el", + "i g", + "l e", + "r a", + "é n", + "t e", + "d e", + "a d", + "ó l", + "b e", + "on d", + "j a", + "r e", + "u l", + "b en", + "n ek", + "u t", + "vol t", + "b an", + "ö r", + "o g", + "a p", + "o d", + "á g", + "n k", + "é k", + "v al", + "k or", + "a m", + "i l", + "í t", + "á k", + "b a", + "u d", + "sz er", + "min d", + "o z", + "é p", + "el l", + "ér t", + "m ond", + "i t", + "sz t", + "n ak", + "a mi", + "n e", + "ő l", + "cs ak", + "n é", + "ma g", + "ol y", + "m er", + "ál l", + "án y", + "ö n", + "ö l", + "min t", + "m ár", + "ö tt", + "na gy", + "é sz", + "az t", + "el ő", + "t ud", + "o t", + "é ny", + "á z", + "m ég", + "kö z", + "el y", + "s ég", + "en t", + "s em", + "ta m", + "h et", + "h al", + "f i", + "a s", + "v an", + "ho z", + "v e", + "u k", + "k ez", + "á m", + "v el", + "b er", + "a j", + "u nk", + "i z", + "va gy", + "m os", + "sz em", + "em ber", + "f og", + "mer t", + "ü k", + "l en", + "ö s", + "e j", + "t al", + "h at", + "t ak", + "h i", + "m ás", + "s ág", + "ett e", + "l eg", + "ü nk", + "h át", + "sz a", + "on y", + "ez t", + "mind en", + "en d", + "ül t", + "h an", + "j ó", + "k is", + "á j", + "in t", + "ú gy", + "i d", + "mos t", + "ar t", + "í r", + "k er", + "i tt", + "a tt", + "el t", + "mond ta", + "k ell", + "l á", + "ak i", + "ál t", + "ér d", + "t ö", + "l an", + "v ár", + "h ol", + "t el", + "l át", + "ő k", + "v et", + "s e", + "ut án", + "k ét", + "na p", + "í v", + "ál y", + "v ég", + "ö k", + "i r", + "d ul", + "v is", + "né z", + "t er", + "á ban", + "k ül", + "ak kor", + "k ap", + "sz él", + "y en", + "ú j", + "i m", + "oly an", + "es en", + "k ed", + "h ely", + "t ör", + "b ól", + "el m", + "r á", + "ár a", + "r ó", + "l ó", + "vol na", + "t an", + "le het", + "e bb", + "t en", + "t ek", + "s ok", + "k al", + "f or", + "u g", + "ol t", + "k a", + "ek et", + "b or", + "f ej", + "g ond", + "a g", + "ak ar", + "f él", + "ú l", + "b el", + "ott a", + "mi t", + "val ami", + "j el", + "é d", + "ar c", + "u r", + "hal l", + "t i", + "f öl", + "á ba", + "ol g", + "ki r", + "ol d", + "m ar", + "k érd", + "j ár", + "ú r", + "sz e", + "z s", + "él et", + "j át", + "o v", + "u s", + "é z", + "v il", + "v er", + "ő r", + "á d", + "ö g", + "le sz", + "on t", + "b iz", + "k oz", + "á bb", + "kir ály", + "es t", + "a b", + "en g", + "ig az", + "b ar", + "ha j", + "d i", + "o b", + "k od", + "r ól", + "v ez", + "tö bb", + "sz ó", + "é ben", + "ö t", + "ny i", + "t á", + "sz ól", + "gond ol", + "eg ész", + "í gy", + "ő s", + "o bb", + "os an", + "b ől", + "a bb", + "c i", + "ő t", + "n ál", + "k ép", + "azt án", + "v i", + "t art", + "be szél", + "m en", + "elő tt", + "a szt", + "ma j", + "kö r", + "han g", + "í z", + "in cs", + "a i", + "é v", + "ó d", + "ó k", + "hoz z", + "t em", + "ok at", + "an y", + "nagy on", + "h áz", + "p er", + "p ed", + "ez te", + "et len", + "nek i", + "maj d", + "sz ony", + "án ak", + "fel é", + "egy szer", + "j e", + "ad t", + "gy er", + "ami kor", + "f oly", + "sz ak", + "ő d", + "h ú", + "á sz", + "am ely", + "h ar", + "ér e", + "il yen", + "od a", + "j ák", + "t ár", + "á val", + "l ak", + "t ó", + "m ent", + "gy an", + "él y", + "ú t", + "v ar", + "kez d", + "m ell", + "mi kor", + "h ez", + "val ó", + "k o", + "m es", + "szer et", + "r end", + "l et", + "vis sza", + "ig en", + "f ő", + "va s", + "as szony", + "r ől", + "ped ig", + "p i", + "sz ép", + "t ák", + "ö v", + "an i", + "vil ág", + "p en", + "mag a", + "t et", + "sz ik", + "é j", + "én t", + "j ött", + "s an", + "sz í", + "i de", + "g at", + "ett em", + "ul t", + "h ány", + "ás t", + "a hol", + "ők et", + "h ár", + "k el", + "n ő", + "cs i", + "tal ál", + "el te", + "lá tt", + "tör t", + "ha gy", + "e sz", + "s en", + "n él", + "p ar", + "v ál", + "k ut", + "l ány", + "ami t", + "s ő", + "ell en", + "mag át", + "in k", + "u gyan", + "kül ön", + "a sz", + "mind ig", + "l ép", + "tal án", + "u n", + "sz or", + "k e", + "il lan", + "n incs", + "z et", + "vagy ok", + "tel en", + "is mer", + "s or", + "is ten", + "ít ott", + "j obb", + "v es", + "dul t", + "j uk", + "sz en", + "r o", + "ö m", + "l ett", + "k ar", + "egy ik", + "b ár", + "sz i", + "sz ív", + "az on", + "e szt", + "föl d", + "kut y", + "p illan", + "f ér", + "k om", + "t ől", + "t ű", + "é be", + "t ött", + "bar át", + "í g", + "a hogy", + "e h", + "e p", + "s o", + "v en", + "jel ent", + "t at", + "sz eg", + "mint ha", + "f al", + "egy en", + "mi l", + "sza b", + "r i", + "é m", + "biz ony", + "j on", + "ör eg", + "d olg", + "cs ap", + "ti szt", + "áll t", + "an cs", + "id ő", + "k at", + "ü gy", + "mi ért", + "ó t", + "ü r", + "cs in", + "h az", + "b et", + "én ek", + "v ér", + "j ól", + "al att", + "m ely", + "l o", + "sem mi", + "ny ug", + "v ág", + "kö vet", + "ös sze", + "ma d", + "l i", + "a cs", + "fi ú", + "kö n", + "más ik", + "j ön", + "sz ám", + "g er", + "s ó", + "r ész", + "k ér", + "z el", + "é vel", + "e o", + "e u", + "a n", + "eu l", + "eu n", + "eo n", + "a e", + "d a", + "a l", + "s s", + "i n", + "i l", + "a g", + "an g", + "y eon", + "y eo", + "d o", + "c h", + "n g", + "j i", + "h an", + "g a", + "g o", + "u i", + "h ae", + "a m", + "u l", + "u n", + "g eo", + "s i", + "n eun", + "ss da", + "s eo", + "eon g", + "y o", + "i da", + "t t", + "k k", + "j eo", + "d eul", + "w a", + "eu m", + "g e", + "o n", + "o g", + "s al", + "m an", + "yeon g", + "geo s", + "h ag", + "an eun", + "j a", + "g i", + "s u", + "i ss", + "o l", + "d ae", + "eo b", + "h a", + "j u", + "eo l", + "g eu", + "j eong", + "s ae", + "do e", + "g eul", + "s eu", + "s in", + "eul o", + "b n", + "s ang", + "bn ida", + "h al", + "b o", + "han eun", + "m al", + "i m", + "m o", + "b u", + "jeo g", + "sae ng", + "in eun", + "an h", + "m a", + "sal am", + "j o", + "s a", + "eo m", + "n ae", + "w i", + "l o", + "g wa", + "yeo l", + "n a", + "e seo", + "y e", + "m yeon", + "tt ae", + "h w", + "j e", + "eob s", + "j ang", + "g u", + "g w", + "il eul", + "yeo g", + "j eon", + "si g", + "j ag", + "j in", + "y u", + "o e", + "s e", + "hag o", + "d eun", + "y a", + "m un", + "s eong", + "g ag", + "h am", + "d ang", + "b a", + "l eul", + "s il", + "do ng", + "kk a", + "b al", + "da l", + "han da", + "eo ssda", + "ae g", + "l i", + "ha ji", + "s eon", + "o ng", + "hae ssda", + "d e", + "i ssda", + "e ge", + "b un", + "m ul", + "ju ng", + "ji g", + "m u", + "iss neun", + "b i", + "g eun", + "seu bnida", + "w on", + "p p", + "d aneun", + "eo h", + "d eo", + "ga m", + "j al", + "hae ng", + "ag o", + "y ang", + "b ul", + "b ang", + "u m", + "s o", + "h i", + "j ae", + "si m", + "saeng gag", + "hag e", + "s og", + "eo ss", + "d an", + "ja sin", + "j il", + "eo g", + "g yeong", + "doe n", + "go ng", + "m i", + "ch i", + "d eu", + "d eon", + "hae ss", + "d u", + "n am", + "eun g", + "jo h", + "n al", + "m yeong", + "w o", + "eon a", + "i go", + "g yeol", + "y ag", + "gw an", + "ul i", + "yo ng", + "n o", + "l yeo", + "j og", + "eoh ge", + "ga t", + "b og", + "mo s", + "t ong", + "ch a", + "man h", + "jeo l", + "geo l", + "h oe", + "ag a", + "n aneun", + "g an", + "un eun", + "ch eol", + "ch e", + "do l", + "b on", + "b an", + "ba d", + "ch u", + "ham yeon", + "yeo ssda", + "i bnida", + "g ye", + "eo s", + "hw al", + "salam deul", + "ji man", + "dang sin", + "ji b", + "ttae mun", + "m ae", + "i b", + "e neun", + "eu g", + "jeo m", + "geul eon", + "h wa", + "a ssda", + "b eob", + "bu t", + "b ae", + "yeo ss", + "ch in", + "ch aeg", + "g eon", + "g ae", + "nae ga", + "i ga", + "m og", + "sig an", + "g il", + "h yeon", + "l yeog", + "gu g", + "p yeon", + "s an", + "w ae", + "j ul", + "s eul", + "deun g", + "haji man", + "eum yeon", + "p il", + "m ol", + "n eu", + "a ss", + "n yeon", + "t ae", + "h u", + "p yo", + "s ul", + "g ang", + "j ineun", + "b eon", + "ha da", + "seo l", + "si p", + "dal eun", + "a p", + "sal m", + "g yo", + "ch eon", + "hag i", + "in a", + "cheol eom", + "g al", + "il a", + "kka ji", + "anh neun", + "ha bnida", + "tt eon", + "n u", + "hae seo", + "doen da", + "s ol", + "tt al", + "l a", + "il o", + "seu b", + "b yeon", + "m yeo", + "b eol", + "s on", + "n un", + "j un", + "j am", + "j eung", + "tt o", + "e n", + "mo m", + "h o", + "ch im", + "hw ang", + "eun eun", + "jo ng", + "bo da", + "n ol", + "n eom", + "but eo", + "jig eum", + "eobs da", + "dae lo", + "i g", + "y ul", + "p yeong", + "seon eun", + "sal ang", + "seu t", + "h im", + "n an", + "h eom", + "h yang", + "p i", + "gw ang", + "eobs neun", + "hw ag", + "ge ss", + "jag i", + "il eon", + "wi hae", + "dae han", + "ga ji", + "m eog", + "j yeo", + "cha j", + "b yeong", + "eo d", + "g yeo", + "do n", + "eo ji", + "g ul", + "mo deun", + "j on", + "in saeng", + "geul ae", + "h ang", + "sa sil", + "si b", + "ch al", + "il ago", + "doe l", + "g eum", + "doe neun", + "b ol", + "ga jang", + "geul igo", + "e l", + "h yeong", + "haeng bog", + "ch ul", + "h on", + "ch ae", + "s am", + "m ang", + "in da", + "da m", + "w ol", + "ch oe", + "d ul", + "si jag", + "ch eong", + "il aneun", + "ul ineun", + "ae n", + "kk e", + "mun je", + "a do", + "t eu", + "g un", + "geun eun", + "b ge", + "ch eo", + "b aeg", + "ju g", + "t a", + "sang dae", + "geu geos", + "do g", + "eu s", + "deu s", + "ja b", + "h yeo", + "tt eohge", + "u g", + "ma j", + "ch il", + "s wi", + "j ileul", + "ch ang", + "g aneun", + "m ag", + "i ji", + "da go", + "m in", + "yo han", + "t eug", + "pp un", + "al eul", + "haeng dong", + "p o", + "m il", + "ch am", + "se sang", + "e do", + "p an", + "man deul", + "am yeon", + "a b", + "kk ae", + "b ag", + "i deul", + "p um", + "m eol", + "s un", + "n eul", + "ham kke", + "chu ng", + "da b", + "yu g", + "s ag", + "gwang ye", + "il eohge", + "bal o", + "neun de", + "ham yeo", + "go s", + "geul eoh", + "an ila", + "bang beob", + "da si", + "b yeol", + "g yeon", + "gam jeong", + "on eul", + "j aneun", + "yeo m", + "l ago", + "i gi", + "hw an", + "t eul", + "eo seo", + "si k", + "ch o", + "jag a", + "geul eom", + "geul eona", + "jeong do", + "g yeog", + "geul eohge", + "geu deul", + "eu t", + "im yeon", + "j jae", + "k eun", + "i sang", + "mal haessda", + "eu ge", + "no p", + "in gan", + "bo myeon", + "t aeg", + "seu s", + "d wi", + "s aneun", + "w an", + "anh go", + "t an", + "nu gu", + "su ng", + "da myeon", + "a deul", + "p eul", + "ttal a", + "d i", + "geos do", + "a ji", + "m eon", + "eum yeo", + "dol og", + "neun g", + "mo du", + "क े", + "ह ै", + "े ं", + "् र", + "ा र", + "न े", + "य ा", + "म ें", + "स े", + "क ी", + "क ा", + "ो ं", + "त ा", + "क र", + "स ्", + "क ि", + "क ो", + "र ्", + "न ा", + "क ्", + "ह ी", + "औ र", + "प र", + "त े", + "ह ो", + "प ्र", + "ा न", + "् य", + "ल ा", + "व ा", + "ल े", + "स ा", + "है ं", + "ल ि", + "ज ा", + "ह ा", + "भ ी", + "व ि", + "इ स", + "त ी", + "न ्", + "र ा", + "म ा", + "द े", + "द ि", + "ब ा", + "त ि", + "थ ा", + "न ि", + "क ार", + "ए क", + "ही ं", + "ह ु", + "ं ग", + "ै ं", + "न ी", + "स ी", + "अ प", + "त ्", + "न हीं", + "र ी", + "म े", + "म ु", + "ि त", + "त ो", + "प ा", + "ल ी", + "लि ए", + "ग ा", + "ल ्", + "र ह", + "र े", + "क् ष", + "म ैं", + "स म", + "उ स", + "ज ि", + "त ्र", + "म ि", + "च ा", + "ो ग", + "स ं", + "द ्", + "स ि", + "आ प", + "त ु", + "द ा", + "क ु", + "य ों", + "व े", + "ज ी", + "् या", + "उ न", + "ि क", + "य े", + "भ ा", + "् ट", + "ह म", + "स् ट", + "श ा", + "ड ़", + "ं द", + "ख ा", + "म ्", + "श ्", + "य ह", + "स क", + "प ू", + "कि या", + "अप ने", + "र ू", + "स ु", + "म ी", + "ह ि", + "ज ो", + "थ े", + "र ि", + "द ी", + "थ ी", + "ग ी", + "ल ोग", + "ग या", + "त र", + "न् ह", + "च ्", + "व ार", + "ब ी", + "प ्", + "द ो", + "ट ी", + "श ि", + "कर ने", + "ग े", + "ै से", + "इ न", + "ं ड", + "सा थ", + "प ु", + "ब े", + "ब ार", + "व ी", + "अ न", + "ह र", + "उ न्ह", + "हो ता", + "ज ब", + "कु छ", + "म ान", + "क ्र", + "ब ि", + "प ह", + "फ ि", + "स र", + "ार ी", + "र ो", + "द ू", + "क हा", + "त क", + "श न", + "ब ्", + "स् थ", + "व ह", + "बा द", + "ओ ं", + "ग ु", + "ज ्", + "्र े", + "ग र", + "रह े", + "व र्", + "ह ू", + "ार ्", + "प ी", + "ब हु", + "मु झ", + "्र ा", + "दि या", + "स ब", + "कर ते", + "अप नी", + "बहु त", + "क ह", + "ट े", + "हु ए", + "कि सी", + "र हा", + "ष ्ट", + "ज ़", + "ब ना", + "स ो", + "ड ि", + "को ई", + "व ्य", + "बा त", + "र ु", + "व ो", + "मुझ े", + "द् ध", + "च ार", + "मे रे", + "व र", + "्र ी", + "जा ता", + "न ों", + "प्र ा", + "दे ख", + "ट ा", + "क् या", + "अ ध", + "ल ग", + "ल ो", + "प ि", + "य ु", + "च े", + "जि स", + "ं त", + "ान ी", + "प ै", + "ज न", + "ार े", + "च ी", + "मि ल", + "द ु", + "दे श", + "च् छ", + "ष ्", + "स ू", + "ख े", + "च ु", + "ि या", + "ल गा", + "ब ु", + "उन के", + "ज् ञ", + "क्ष ा", + "त रह", + "्या दा", + "वा ले", + "पू र्", + "मैं ने", + "का म", + "रू प", + "हो ती", + "उ प", + "ज ान", + "प्र कार", + "भ ार", + "म न", + "हु आ", + "ट र", + "हू ँ", + "पर ि", + "पा स", + "अन ु", + "रा ज", + "लोग ों", + "अ ब", + "सम झ", + "ड ी", + "म ौ", + "श ु", + "च ि", + "प े", + "क ृ", + "सक ते", + "म ह", + "य ोग", + "द र्", + "उ से", + "ं ध", + "ड ा", + "जा ए", + "ब ो", + "ू ल", + "म ो", + "ों ने", + "ं स", + "तु म", + "पह ले", + "ब ता", + "त था", + "य ो", + "ग ई", + "उ त्", + "सक ता", + "क म", + "ज ्यादा", + "र ख", + "सम य", + "ार ा", + "अ गर", + "स् त", + "च ल", + "फि र", + "वार ा", + "कर ना", + "श ी", + "ग ए", + "ब न", + "ौ र", + "हो ने", + "चा ह", + "ख ु", + "हा ँ", + "उन्ह ें", + "उन्ह ोंने", + "छ ो", + "म् ह", + "प्र ति", + "नि क", + "व न", + "्य ू", + "र ही", + "तु म्ह", + "ज ैसे", + "ि यों", + "क् यों", + "ल ों", + "फ ़", + "ं त्र", + "हो ते", + "क् ति", + "त ्य", + "कर ्", + "क ई", + "व ं", + "कि न", + "प ो", + "कार ण", + "ड़ ी", + "भ ि", + "इस के", + "ब र", + "उस के", + "द् वारा", + "श े", + "क ॉ", + "दि न", + "न् न", + "ड़ ा", + "स् व", + "नि र्", + "मु ख", + "लि या", + "ट ि", + "ज्ञ ान", + "क् त", + "द ्र", + "ग ्", + "क् स", + "म ै", + "ग ो", + "ज े", + "ट ्र", + "म ार", + "त् व", + "ध ार", + "भा व", + "कर ता", + "ख ि", + "क ं", + "चा हि", + "य र", + "प् त", + "क ों", + "ं च", + "ज ु", + "म त", + "अ च्छ", + "हु ई", + "क भी", + "ले किन", + "भ ू", + "अप ना", + "दू स", + "चाहि ए", + "य ू", + "घ र", + "सब से", + "मे री", + "ना म", + "ढ ़", + "ं ट", + "ें गे", + "ब ै", + "फ ा", + "ए वं", + "य ी", + "ग ्र", + "क्ष े", + "आ ज", + "आप को", + "भा ग", + "ठ ा", + "क ै", + "भार त", + "उन की", + "प हु", + "स भी", + "ध ा", + "ण ा", + "स ान", + "हो गा", + "त ब", + "स ंग", + "प र्", + "अ व", + "त ना", + "ग ि", + "य न", + "स् था", + "च ित", + "ट ्", + "छ ा", + "जा ने", + "क्षे त्र", + "वा ली", + "पूर् ण", + "स मा", + "कार ी" + ] + } +} \ No newline at end of file diff --git a/comfy/text_encoders/ace_text_cleaners.py b/comfy/text_encoders/ace_text_cleaners.py new file mode 100644 index 000000000..ad3612e5f --- /dev/null +++ b/comfy/text_encoders/ace_text_cleaners.py @@ -0,0 +1,270 @@ +# basic text cleaners for the ACE step model +# I didn't copy the ones from the reference code because I didn't want to deal with the dependencies +# TODO: more languages than english? + +import re + +def number_to_text(num, ordinal=False): + """ + Convert a number (int or float) to its text representation. + + Args: + num: The number to convert + + Returns: + str: Text representation of the number + """ + + if not isinstance(num, (int, float)): + return "Input must be a number" + + # Handle special case of zero + if num == 0: + return "zero" + + # Handle negative numbers + negative = num < 0 + num = abs(num) + + # Handle floats + if isinstance(num, float): + # Split into integer and decimal parts + int_part = int(num) + + # Convert both parts + int_text = _int_to_text(int_part) + + # Handle decimal part (convert to string and remove '0.') + decimal_str = str(num).split('.')[1] + decimal_text = " point " + " ".join(_digit_to_text(int(digit)) for digit in decimal_str) + + result = int_text + decimal_text + else: + # Handle integers + result = _int_to_text(num) + + # Add 'negative' prefix for negative numbers + if negative: + result = "negative " + result + + return result + + +def _int_to_text(num): + """Helper function to convert an integer to text""" + + ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", + "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", + "seventeen", "eighteen", "nineteen"] + + tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"] + + if num < 20: + return ones[num] + + if num < 100: + return tens[num // 10] + (" " + ones[num % 10] if num % 10 != 0 else "") + + if num < 1000: + return ones[num // 100] + " hundred" + (" " + _int_to_text(num % 100) if num % 100 != 0 else "") + + if num < 1000000: + return _int_to_text(num // 1000) + " thousand" + (" " + _int_to_text(num % 1000) if num % 1000 != 0 else "") + + if num < 1000000000: + return _int_to_text(num // 1000000) + " million" + (" " + _int_to_text(num % 1000000) if num % 1000000 != 0 else "") + + return _int_to_text(num // 1000000000) + " billion" + (" " + _int_to_text(num % 1000000000) if num % 1000000000 != 0 else "") + + +def _digit_to_text(digit): + """Convert a single digit to text""" + digits = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] + return digits[digit] + + +_whitespace_re = re.compile(r"\s+") + + +# List of (regular expression, replacement) pairs for abbreviations: +_abbreviations = { + "en": [ + (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) + for x in [ + ("mrs", "misess"), + ("mr", "mister"), + ("dr", "doctor"), + ("st", "saint"), + ("co", "company"), + ("jr", "junior"), + ("maj", "major"), + ("gen", "general"), + ("drs", "doctors"), + ("rev", "reverend"), + ("lt", "lieutenant"), + ("hon", "honorable"), + ("sgt", "sergeant"), + ("capt", "captain"), + ("esq", "esquire"), + ("ltd", "limited"), + ("col", "colonel"), + ("ft", "fort"), + ] + ], +} + + +def expand_abbreviations_multilingual(text, lang="en"): + for regex, replacement in _abbreviations[lang]: + text = re.sub(regex, replacement, text) + return text + + +_symbols_multilingual = { + "en": [ + (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) + for x in [ + ("&", " and "), + ("@", " at "), + ("%", " percent "), + ("#", " hash "), + ("$", " dollar "), + ("£", " pound "), + ("°", " degree "), + ] + ], +} + + +def expand_symbols_multilingual(text, lang="en"): + for regex, replacement in _symbols_multilingual[lang]: + text = re.sub(regex, replacement, text) + text = text.replace(" ", " ") # Ensure there are no double spaces + return text.strip() + + +_ordinal_re = { + "en": re.compile(r"([0-9]+)(st|nd|rd|th)"), +} +_number_re = re.compile(r"[0-9]+") +_currency_re = { + "USD": re.compile(r"((\$[0-9\.\,]*[0-9]+)|([0-9\.\,]*[0-9]+\$))"), + "GBP": re.compile(r"((£[0-9\.\,]*[0-9]+)|([0-9\.\,]*[0-9]+£))"), + "EUR": re.compile(r"(([0-9\.\,]*[0-9]+€)|((€[0-9\.\,]*[0-9]+)))"), +} + +_comma_number_re = re.compile(r"\b\d{1,3}(,\d{3})*(\.\d+)?\b") +_dot_number_re = re.compile(r"\b\d{1,3}(.\d{3})*(\,\d+)?\b") +_decimal_number_re = re.compile(r"([0-9]+[.,][0-9]+)") + + +def _remove_commas(m): + text = m.group(0) + if "," in text: + text = text.replace(",", "") + return text + + +def _remove_dots(m): + text = m.group(0) + if "." in text: + text = text.replace(".", "") + return text + + +def _expand_decimal_point(m, lang="en"): + amount = m.group(1).replace(",", ".") + return number_to_text(float(amount)) + + +def _expand_currency(m, lang="en", currency="USD"): + amount = float((re.sub(r"[^\d.]", "", m.group(0).replace(",", ".")))) + full_amount = number_to_text(amount) + + and_equivalents = { + "en": ", ", + "es": " con ", + "fr": " et ", + "de": " und ", + "pt": " e ", + "it": " e ", + "pl": ", ", + "cs": ", ", + "ru": ", ", + "nl": ", ", + "ar": ", ", + "tr": ", ", + "hu": ", ", + "ko": ", ", + } + + if amount.is_integer(): + last_and = full_amount.rfind(and_equivalents[lang]) + if last_and != -1: + full_amount = full_amount[:last_and] + + return full_amount + + +def _expand_ordinal(m, lang="en"): + return number_to_text(int(m.group(1)), ordinal=True) + + +def _expand_number(m, lang="en"): + return number_to_text(int(m.group(0))) + + +def expand_numbers_multilingual(text, lang="en"): + if lang in ["en", "ru"]: + text = re.sub(_comma_number_re, _remove_commas, text) + else: + text = re.sub(_dot_number_re, _remove_dots, text) + try: + text = re.sub(_currency_re["GBP"], lambda m: _expand_currency(m, lang, "GBP"), text) + text = re.sub(_currency_re["USD"], lambda m: _expand_currency(m, lang, "USD"), text) + text = re.sub(_currency_re["EUR"], lambda m: _expand_currency(m, lang, "EUR"), text) + except: + pass + + text = re.sub(_decimal_number_re, lambda m: _expand_decimal_point(m, lang), text) + text = re.sub(_ordinal_re[lang], lambda m: _expand_ordinal(m, lang), text) + text = re.sub(_number_re, lambda m: _expand_number(m, lang), text) + return text + + +def lowercase(text): + return text.lower() + + +def collapse_whitespace(text): + return re.sub(_whitespace_re, " ", text) + + +def multilingual_cleaners(text, lang): + text = text.replace('"', "") + if lang == "tr": + text = text.replace("İ", "i") + text = text.replace("Ö", "ö") + text = text.replace("Ü", "ü") + text = lowercase(text) + try: + text = expand_numbers_multilingual(text, lang) + except: + pass + try: + text = expand_abbreviations_multilingual(text, lang) + except: + pass + try: + text = expand_symbols_multilingual(text, lang=lang) + except: + pass + text = collapse_whitespace(text) + return text + + +def basic_cleaners(text): + """Basic pipeline that lowercases and collapses whitespace without transliteration.""" + text = lowercase(text) + text = collapse_whitespace(text) + return text diff --git a/comfy/text_encoders/umt5_config_base.json b/comfy/text_encoders/umt5_config_base.json new file mode 100644 index 000000000..6b3618f07 --- /dev/null +++ b/comfy/text_encoders/umt5_config_base.json @@ -0,0 +1,22 @@ +{ + "d_ff": 2048, + "d_kv": 64, + "d_model": 768, + "decoder_start_token_id": 0, + "dropout_rate": 0.1, + "eos_token_id": 1, + "dense_act_fn": "gelu_pytorch_tanh", + "initializer_factor": 1.0, + "is_encoder_decoder": true, + "is_gated_act": true, + "layer_norm_epsilon": 1e-06, + "model_type": "umt5", + "num_decoder_layers": 12, + "num_heads": 12, + "num_layers": 12, + "output_past": true, + "pad_token_id": 0, + "relative_attention_num_buckets": 32, + "tie_word_embeddings": false, + "vocab_size": 256384 +} diff --git a/comfy_extras/nodes_ace.py b/comfy_extras/nodes_ace.py new file mode 100644 index 000000000..36eb999d1 --- /dev/null +++ b/comfy_extras/nodes_ace.py @@ -0,0 +1,46 @@ +import torch +import comfy.model_management + + +class TextEncodeAceStepAudio: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "clip": ("CLIP", ), + "tags": ("STRING", {"multiline": True, "dynamicPrompts": True}), + "lyrics": ("STRING", {"multiline": True, "dynamicPrompts": True}), + }} + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "encode" + + CATEGORY = "conditioning" + + def encode(self, clip, tags, lyrics): + tokens = clip.tokenize(tags, lyrics=lyrics) + return (clip.encode_from_tokens_scheduled(tokens), ) + + +class EmptyAceStepLatentAudio: + def __init__(self): + self.device = comfy.model_management.intermediate_device() + + @classmethod + def INPUT_TYPES(s): + return {"required": {"seconds": ("FLOAT", {"default": 120.0, "min": 1.0, "max": 1000.0, "step": 0.1}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}), + }} + RETURN_TYPES = ("LATENT",) + FUNCTION = "generate" + + CATEGORY = "latent/audio" + + def generate(self, seconds, batch_size): + length = int(seconds * 44100 / 512 / 8) + latent = torch.zeros([batch_size, 8, 16, length], device=self.device) + return ({"samples": latent, "type": "audio"}, ) + + +NODE_CLASS_MAPPINGS = { + "TextEncodeAceStepAudio": TextEncodeAceStepAudio, + "EmptyAceStepLatentAudio": EmptyAceStepLatentAudio, +} diff --git a/nodes.py b/nodes.py index 3c3617562..d2ffd5259 100644 --- a/nodes.py +++ b/nodes.py @@ -246,6 +246,9 @@ class ConditioningZeroOut: pooled_output = d.get("pooled_output", None) if pooled_output is not None: d["pooled_output"] = torch.zeros_like(pooled_output) + conditioning_lyrics = d.get("conditioning_lyrics", None) + if conditioning_lyrics is not None: + d["conditioning_lyrics"] = torch.zeros_like(conditioning_lyrics) n = [torch.zeros_like(t[0]), d] c.append(n) return (c, ) @@ -917,7 +920,7 @@ class CLIPLoader: @classmethod def INPUT_TYPES(s): return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma"], ), + "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), @@ -2259,6 +2262,7 @@ def init_builtin_extra_nodes(): "nodes_hidream.py", "nodes_fresca.py", "nodes_preview_any.py", + "nodes_ace.py", ] import_failed = [] From b9980592c4c629be4d46b707c65c14dc2a3da842 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 7 May 2025 14:27:16 -0700 Subject: [PATCH 0109/1073] Refuse to load api nodes on old pyav version. (#7981) --- comfy_api_nodes/canary.py | 10 ++++++++++ nodes.py | 3 +++ 2 files changed, 13 insertions(+) create mode 100644 comfy_api_nodes/canary.py diff --git a/comfy_api_nodes/canary.py b/comfy_api_nodes/canary.py new file mode 100644 index 000000000..4df7590b6 --- /dev/null +++ b/comfy_api_nodes/canary.py @@ -0,0 +1,10 @@ +import av + +ver = av.__version__.split(".") +if int(ver[0]) < 14: + raise Exception("INSTALL NEW VERSION OF PYAV TO USE API NODES.") + +if int(ver[0]) == 14 and int(ver[1]) < 2: + raise Exception("INSTALL NEW VERSION OF PYAV TO USE API NODES.") + +NODE_CLASS_MAPPINGS = {} diff --git a/nodes.py b/nodes.py index d2ffd5259..a1ddf2dd6 100644 --- a/nodes.py +++ b/nodes.py @@ -2289,6 +2289,9 @@ def init_builtin_api_nodes(): "nodes_pika.py", ] + if not load_custom_node(os.path.join(api_nodes_dir, "canary.py"), module_parent="comfy_api_nodes"): + return api_nodes_files + import_failed = [] for node_file in api_nodes_files: if not load_custom_node(os.path.join(api_nodes_dir, node_file), module_parent="comfy_api_nodes"): From cc33cd3422642445c994b104f0380821043024ec Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 7 May 2025 16:22:07 -0700 Subject: [PATCH 0110/1073] Experimental lyrics strength for ACE. (#7984) --- comfy/ldm/ace/model.py | 6 +++++- comfy/model_base.py | 1 + comfy_extras/nodes_ace.py | 9 ++++++--- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/ace/model.py b/comfy/ldm/ace/model.py index e5883df90..12c524701 100644 --- a/comfy/ldm/ace/model.py +++ b/comfy/ldm/ace/model.py @@ -273,6 +273,7 @@ class ACEStepTransformer2DModel(nn.Module): speaker_embeds: Optional[torch.FloatTensor] = None, lyric_token_idx: Optional[torch.LongTensor] = None, lyric_mask: Optional[torch.LongTensor] = None, + lyrics_strength=1.0, ): bs = encoder_text_hidden_states.shape[0] @@ -291,6 +292,8 @@ class ACEStepTransformer2DModel(nn.Module): out_dtype=encoder_text_hidden_states.dtype, ) + encoder_lyric_hidden_states *= lyrics_strength + encoder_hidden_states = torch.cat([encoder_spk_hidden_states, encoder_text_hidden_states, encoder_lyric_hidden_states], dim=1) encoder_hidden_mask = None @@ -310,7 +313,6 @@ class ACEStepTransformer2DModel(nn.Module): output_length: int = 0, block_controlnet_hidden_states: Optional[Union[List[torch.Tensor], torch.Tensor]] = None, controlnet_scale: Union[float, torch.Tensor] = 1.0, - return_dict: bool = True, ): embedded_timestep = self.timestep_embedder(self.time_proj(timestep).to(dtype=hidden_states.dtype)) temb = self.t_block(embedded_timestep) @@ -353,6 +355,7 @@ class ACEStepTransformer2DModel(nn.Module): lyric_mask: Optional[torch.LongTensor] = None, block_controlnet_hidden_states: Optional[Union[List[torch.Tensor], torch.Tensor]] = None, controlnet_scale: Union[float, torch.Tensor] = 1.0, + lyrics_strength=1.0, **kwargs ): hidden_states = x @@ -363,6 +366,7 @@ class ACEStepTransformer2DModel(nn.Module): speaker_embeds=speaker_embeds, lyric_token_idx=lyric_token_idx, lyric_mask=lyric_mask, + lyrics_strength=lyrics_strength, ) output_length = hidden_states.shape[-1] diff --git a/comfy/model_base.py b/comfy/model_base.py index 6408005b6..6d27930dc 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1139,4 +1139,5 @@ class ACEStep(BaseModel): if cross_attn is not None: out['lyric_token_idx'] = comfy.conds.CONDRegular(conditioning_lyrics) out['speaker_embeds'] = comfy.conds.CONDRegular(torch.zeros(noise.shape[0], 512, device=noise.device, dtype=noise.dtype)) + out['lyrics_strength'] = comfy.conds.CONDConstant(kwargs.get("lyrics_strength", 1.0)) return out diff --git a/comfy_extras/nodes_ace.py b/comfy_extras/nodes_ace.py index 36eb999d1..cbfec15a2 100644 --- a/comfy_extras/nodes_ace.py +++ b/comfy_extras/nodes_ace.py @@ -1,6 +1,6 @@ import torch import comfy.model_management - +import node_helpers class TextEncodeAceStepAudio: @classmethod @@ -9,15 +9,18 @@ class TextEncodeAceStepAudio: "clip": ("CLIP", ), "tags": ("STRING", {"multiline": True, "dynamicPrompts": True}), "lyrics": ("STRING", {"multiline": True, "dynamicPrompts": True}), + "lyrics_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), }} RETURN_TYPES = ("CONDITIONING",) FUNCTION = "encode" CATEGORY = "conditioning" - def encode(self, clip, tags, lyrics): + def encode(self, clip, tags, lyrics, lyrics_strength): tokens = clip.tokenize(tags, lyrics=lyrics) - return (clip.encode_from_tokens_scheduled(tokens), ) + conditioning = clip.encode_from_tokens_scheduled(tokens) + conditioning = node_helpers.conditioning_set_values(conditioning, {"lyrics_strength": lyrics_strength}) + return (conditioning, ) class EmptyAceStepLatentAudio: From 56b6ee6754f9ca7cf336394eacd17e24364090f1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 7 May 2025 18:28:24 -0700 Subject: [PATCH 0111/1073] Detection code to make ltxv models without config work. (#7986) --- comfy/model_detection.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index ff4c29d7e..28c586389 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -222,6 +222,10 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): if '{}adaln_single.emb.timestep_embedder.linear_1.bias'.format(key_prefix) in state_dict_keys: #Lightricks ltxv dit_config = {} dit_config["image_model"] = "ltxv" + dit_config["num_layers"] = count_blocks(state_dict_keys, '{}transformer_blocks.'.format(key_prefix) + '{}.') + shape = state_dict['{}transformer_blocks.0.attn2.to_k.weight'.format(key_prefix)].shape + dit_config["attention_head_dim"] = shape[0] // 32 + dit_config["cross_attention_dim"] = shape[1] if metadata is not None and "config" in metadata: dit_config.update(json.loads(metadata["config"]).get("transformer", {})) return dit_config From fd08e39588b777552f88cb3800a73eb55e844ac5 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 7 May 2025 18:37:12 -0700 Subject: [PATCH 0112/1073] Make torchaudio not a hard requirement. (#7987) Some platforms can't install it apparently so if it's not there it should only break models that actually use it. --- comfy/ldm/ace/vae/music_dcae_pipeline.py | 7 ++++++- comfy/ldm/ace/vae/music_log_mel.py | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/ace/vae/music_dcae_pipeline.py b/comfy/ldm/ace/vae/music_dcae_pipeline.py index 3188bc770..af81280eb 100644 --- a/comfy/ldm/ace/vae/music_dcae_pipeline.py +++ b/comfy/ldm/ace/vae/music_dcae_pipeline.py @@ -1,7 +1,12 @@ # Original from: https://github.com/ace-step/ACE-Step/blob/main/music_dcae/music_dcae_pipeline.py import torch from .autoencoder_dc import AutoencoderDC -import torchaudio +import logging +try: + import torchaudio +except: + logging.warning("torchaudio missing, ACE model will be broken") + import torchvision.transforms as transforms from .music_vocoder import ADaMoSHiFiGANV1 diff --git a/comfy/ldm/ace/vae/music_log_mel.py b/comfy/ldm/ace/vae/music_log_mel.py index d73d3f8e8..9c584eb7f 100755 --- a/comfy/ldm/ace/vae/music_log_mel.py +++ b/comfy/ldm/ace/vae/music_log_mel.py @@ -2,7 +2,12 @@ import torch import torch.nn as nn from torch import Tensor -from torchaudio.transforms import MelScale +import logging +try: + from torchaudio.transforms import MelScale +except: + logging.warning("torchaudio missing, ACE model will be broken") + import comfy.model_management class LinearSpectrogram(nn.Module): From c7c025b8d16f7f34b01409ead4dba4476cc64dae Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 7 May 2025 22:22:23 -0700 Subject: [PATCH 0113/1073] Adjust memory estimation code for ACE VAE. (#7990) --- comfy/sd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd.py b/comfy/sd.py index 50af243ba..c6b6a3b19 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -442,7 +442,7 @@ class VAE: elif "vocoder.backbone.channel_layers.0.0.bias" in sd: #Ace Step Audio self.first_stage_model = comfy.ldm.ace.vae.music_dcae_pipeline.MusicDCAE(source_sample_rate=44100) self.memory_used_encode = lambda shape, dtype: (shape[2] * 300) * model_management.dtype_size(dtype) - self.memory_used_decode = lambda shape, dtype: (shape[2] * shape[3] * 72000) * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: (shape[2] * shape[3] * 87000) * model_management.dtype_size(dtype) self.latent_channels = 8 self.output_channels = 2 # self.upscale_ratio = 2048 From 5d3cc85e13833aeb6ef9242cdae243083e30c6fc Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 8 May 2025 00:32:36 -0700 Subject: [PATCH 0114/1073] Make japanese hiragana and katakana characters work with ACE. (#7997) --- comfy/sd.py | 2 +- comfy/text_encoders/ace.py | 10 +- comfy/text_encoders/ace_text_cleaners.py | 125 +++++++++++++++++++++++ 3 files changed, 135 insertions(+), 2 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index c6b6a3b19..161d96f1e 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -441,7 +441,7 @@ class VAE: self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] elif "vocoder.backbone.channel_layers.0.0.bias" in sd: #Ace Step Audio self.first_stage_model = comfy.ldm.ace.vae.music_dcae_pipeline.MusicDCAE(source_sample_rate=44100) - self.memory_used_encode = lambda shape, dtype: (shape[2] * 300) * model_management.dtype_size(dtype) + self.memory_used_encode = lambda shape, dtype: (shape[2] * 330) * model_management.dtype_size(dtype) self.memory_used_decode = lambda shape, dtype: (shape[2] * shape[3] * 87000) * model_management.dtype_size(dtype) self.latent_channels = 8 self.output_channels = 2 diff --git a/comfy/text_encoders/ace.py b/comfy/text_encoders/ace.py index b6fe451bd..d650bb10d 100644 --- a/comfy/text_encoders/ace.py +++ b/comfy/text_encoders/ace.py @@ -7,7 +7,7 @@ import torch import logging from tokenizers import Tokenizer -from .ace_text_cleaners import multilingual_cleaners +from .ace_text_cleaners import multilingual_cleaners, japanese_to_romaji SUPPORT_LANGUAGES = { "en": 259, "de": 260, "fr": 262, "es": 284, "it": 285, @@ -65,6 +65,14 @@ class VoiceBpeTokenizer: if "spa" in lang: lang = "es" + try: + line_out = japanese_to_romaji(line) + if line_out != line: + lang = "ja" + line = line_out + except: + pass + try: if structure_pattern.match(line): token_idx = self.encode(line, "en") diff --git a/comfy/text_encoders/ace_text_cleaners.py b/comfy/text_encoders/ace_text_cleaners.py index ad3612e5f..cd31d8d8c 100644 --- a/comfy/text_encoders/ace_text_cleaners.py +++ b/comfy/text_encoders/ace_text_cleaners.py @@ -4,6 +4,131 @@ import re +def japanese_to_romaji(japanese_text): + """ + Convert Japanese hiragana and katakana to romaji (Latin alphabet representation). + + Args: + japanese_text (str): Text containing hiragana and/or katakana characters + + Returns: + str: The romaji (Latin alphabet) equivalent + """ + # Dictionary mapping kana characters to their romaji equivalents + kana_map = { + # Katakana characters + 'ア': 'a', 'イ': 'i', 'ウ': 'u', 'エ': 'e', 'オ': 'o', + 'カ': 'ka', 'キ': 'ki', 'ク': 'ku', 'ケ': 'ke', 'コ': 'ko', + 'サ': 'sa', 'シ': 'shi', 'ス': 'su', 'セ': 'se', 'ソ': 'so', + 'タ': 'ta', 'チ': 'chi', 'ツ': 'tsu', 'テ': 'te', 'ト': 'to', + 'ナ': 'na', 'ニ': 'ni', 'ヌ': 'nu', 'ネ': 'ne', 'ノ': 'no', + 'ハ': 'ha', 'ヒ': 'hi', 'フ': 'fu', 'ヘ': 'he', 'ホ': 'ho', + 'マ': 'ma', 'ミ': 'mi', 'ム': 'mu', 'メ': 'me', 'モ': 'mo', + 'ヤ': 'ya', 'ユ': 'yu', 'ヨ': 'yo', + 'ラ': 'ra', 'リ': 'ri', 'ル': 'ru', 'レ': 're', 'ロ': 'ro', + 'ワ': 'wa', 'ヲ': 'wo', 'ン': 'n', + + # Katakana voiced consonants + 'ガ': 'ga', 'ギ': 'gi', 'グ': 'gu', 'ゲ': 'ge', 'ゴ': 'go', + 'ザ': 'za', 'ジ': 'ji', 'ズ': 'zu', 'ゼ': 'ze', 'ゾ': 'zo', + 'ダ': 'da', 'ヂ': 'ji', 'ヅ': 'zu', 'デ': 'de', 'ド': 'do', + 'バ': 'ba', 'ビ': 'bi', 'ブ': 'bu', 'ベ': 'be', 'ボ': 'bo', + 'パ': 'pa', 'ピ': 'pi', 'プ': 'pu', 'ペ': 'pe', 'ポ': 'po', + + # Katakana combinations + 'キャ': 'kya', 'キュ': 'kyu', 'キョ': 'kyo', + 'シャ': 'sha', 'シュ': 'shu', 'ショ': 'sho', + 'チャ': 'cha', 'チュ': 'chu', 'チョ': 'cho', + 'ニャ': 'nya', 'ニュ': 'nyu', 'ニョ': 'nyo', + 'ヒャ': 'hya', 'ヒュ': 'hyu', 'ヒョ': 'hyo', + 'ミャ': 'mya', 'ミュ': 'myu', 'ミョ': 'myo', + 'リャ': 'rya', 'リュ': 'ryu', 'リョ': 'ryo', + 'ギャ': 'gya', 'ギュ': 'gyu', 'ギョ': 'gyo', + 'ジャ': 'ja', 'ジュ': 'ju', 'ジョ': 'jo', + 'ビャ': 'bya', 'ビュ': 'byu', 'ビョ': 'byo', + 'ピャ': 'pya', 'ピュ': 'pyu', 'ピョ': 'pyo', + + # Katakana small characters and special cases + 'ッ': '', # Small tsu (doubles the following consonant) + 'ャ': 'ya', 'ュ': 'yu', 'ョ': 'yo', + + # Katakana extras + 'ヴ': 'vu', 'ファ': 'fa', 'フィ': 'fi', 'フェ': 'fe', 'フォ': 'fo', + 'ウィ': 'wi', 'ウェ': 'we', 'ウォ': 'wo', + + # Hiragana characters + 'あ': 'a', 'い': 'i', 'う': 'u', 'え': 'e', 'お': 'o', + 'か': 'ka', 'き': 'ki', 'く': 'ku', 'け': 'ke', 'こ': 'ko', + 'さ': 'sa', 'し': 'shi', 'す': 'su', 'せ': 'se', 'そ': 'so', + 'た': 'ta', 'ち': 'chi', 'つ': 'tsu', 'て': 'te', 'と': 'to', + 'な': 'na', 'に': 'ni', 'ぬ': 'nu', 'ね': 'ne', 'の': 'no', + 'は': 'ha', 'ひ': 'hi', 'ふ': 'fu', 'へ': 'he', 'ほ': 'ho', + 'ま': 'ma', 'み': 'mi', 'む': 'mu', 'め': 'me', 'も': 'mo', + 'や': 'ya', 'ゆ': 'yu', 'よ': 'yo', + 'ら': 'ra', 'り': 'ri', 'る': 'ru', 'れ': 're', 'ろ': 'ro', + 'わ': 'wa', 'を': 'wo', 'ん': 'n', + + # Hiragana voiced consonants + 'が': 'ga', 'ぎ': 'gi', 'ぐ': 'gu', 'げ': 'ge', 'ご': 'go', + 'ざ': 'za', 'じ': 'ji', 'ず': 'zu', 'ぜ': 'ze', 'ぞ': 'zo', + 'だ': 'da', 'ぢ': 'ji', 'づ': 'zu', 'で': 'de', 'ど': 'do', + 'ば': 'ba', 'び': 'bi', 'ぶ': 'bu', 'べ': 'be', 'ぼ': 'bo', + 'ぱ': 'pa', 'ぴ': 'pi', 'ぷ': 'pu', 'ぺ': 'pe', 'ぽ': 'po', + + # Hiragana combinations + 'きゃ': 'kya', 'きゅ': 'kyu', 'きょ': 'kyo', + 'しゃ': 'sha', 'しゅ': 'shu', 'しょ': 'sho', + 'ちゃ': 'cha', 'ちゅ': 'chu', 'ちょ': 'cho', + 'にゃ': 'nya', 'にゅ': 'nyu', 'にょ': 'nyo', + 'ひゃ': 'hya', 'ひゅ': 'hyu', 'ひょ': 'hyo', + 'みゃ': 'mya', 'みゅ': 'myu', 'みょ': 'myo', + 'りゃ': 'rya', 'りゅ': 'ryu', 'りょ': 'ryo', + 'ぎゃ': 'gya', 'ぎゅ': 'gyu', 'ぎょ': 'gyo', + 'じゃ': 'ja', 'じゅ': 'ju', 'じょ': 'jo', + 'びゃ': 'bya', 'びゅ': 'byu', 'びょ': 'byo', + 'ぴゃ': 'pya', 'ぴゅ': 'pyu', 'ぴょ': 'pyo', + + # Hiragana small characters and special cases + 'っ': '', # Small tsu (doubles the following consonant) + 'ゃ': 'ya', 'ゅ': 'yu', 'ょ': 'yo', + + # Common punctuation and spaces + ' ': ' ', # Japanese space + '、': ', ', '。': '. ', + } + + result = [] + i = 0 + + while i < len(japanese_text): + # Check for small tsu (doubling the following consonant) + if i < len(japanese_text) - 1 and (japanese_text[i] == 'っ' or japanese_text[i] == 'ッ'): + if i < len(japanese_text) - 1 and japanese_text[i+1] in kana_map: + next_romaji = kana_map[japanese_text[i+1]] + if next_romaji and next_romaji[0] not in 'aiueon': + result.append(next_romaji[0]) # Double the consonant + i += 1 + continue + + # Check for combinations with small ya, yu, yo + if i < len(japanese_text) - 1 and japanese_text[i+1] in ('ゃ', 'ゅ', 'ょ', 'ャ', 'ュ', 'ョ'): + combo = japanese_text[i:i+2] + if combo in kana_map: + result.append(kana_map[combo]) + i += 2 + continue + + # Regular character + if japanese_text[i] in kana_map: + result.append(kana_map[japanese_text[i]]) + else: + # If it's not in our map, keep it as is (might be kanji, romaji, etc.) + result.append(japanese_text[i]) + + i += 1 + + return ''.join(result) + def number_to_text(num, ordinal=False): """ Convert a number (int or float) to its text representation. From a692c3cca40f67ddceefaacf29dbf1bd38bdc293 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 8 May 2025 04:25:45 -0700 Subject: [PATCH 0115/1073] Make ACE VAE tiling work. (#8004) --- comfy/sd.py | 39 +++++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 161d96f1e..ee350d5b5 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -282,6 +282,7 @@ class VAE: self.downscale_index_formula = None self.upscale_index_formula = None + self.extra_1d_channel = None if config is None: if "decoder.mid.block_1.mix_factor" in sd: @@ -445,13 +446,14 @@ class VAE: self.memory_used_decode = lambda shape, dtype: (shape[2] * shape[3] * 87000) * model_management.dtype_size(dtype) self.latent_channels = 8 self.output_channels = 2 - # self.upscale_ratio = 2048 - # self.downscale_ratio = 2048 + self.upscale_ratio = 4096 + self.downscale_ratio = 4096 self.latent_dim = 2 self.process_output = lambda audio: audio self.process_input = lambda audio: audio self.working_dtypes = [torch.bfloat16, torch.float32] self.disable_offload = True + self.extra_1d_channel = 16 else: logging.warning("WARNING: No VAE weights detected, VAE not initalized.") self.first_stage_model = None @@ -510,7 +512,13 @@ class VAE: return output def decode_tiled_1d(self, samples, tile_x=128, overlap=32): - decode_fn = lambda a: self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)).float() + if samples.ndim == 3: + decode_fn = lambda a: self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)).float() + else: + og_shape = samples.shape + samples = samples.reshape((og_shape[0], og_shape[1] * og_shape[2], -1)) + decode_fn = lambda a: self.first_stage_model.decode(a.reshape((-1, og_shape[1], og_shape[2], a.shape[-1])).to(self.vae_dtype).to(self.device)).float() + return self.process_output(comfy.utils.tiled_scale_multidim(samples, decode_fn, tile=(tile_x,), overlap=overlap, upscale_amount=self.upscale_ratio, out_channels=self.output_channels, output_device=self.output_device)) def decode_tiled_3d(self, samples, tile_t=999, tile_x=32, tile_y=32, overlap=(1, 8, 8)): @@ -530,9 +538,24 @@ class VAE: samples /= 3.0 return samples - def encode_tiled_1d(self, samples, tile_x=128 * 2048, overlap=32 * 2048): - encode_fn = lambda a: self.first_stage_model.encode((self.process_input(a)).to(self.vae_dtype).to(self.device)).float() - return comfy.utils.tiled_scale_multidim(samples, encode_fn, tile=(tile_x,), overlap=overlap, upscale_amount=(1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device) + def encode_tiled_1d(self, samples, tile_x=256 * 2048, overlap=64 * 2048): + if self.latent_dim == 1: + encode_fn = lambda a: self.first_stage_model.encode((self.process_input(a)).to(self.vae_dtype).to(self.device)).float() + out_channels = self.latent_channels + upscale_amount = 1 / self.downscale_ratio + else: + extra_channel_size = self.extra_1d_channel + out_channels = self.latent_channels * extra_channel_size + tile_x = tile_x // extra_channel_size + overlap = overlap // extra_channel_size + upscale_amount = 1 / self.downscale_ratio + encode_fn = lambda a: self.first_stage_model.encode((self.process_input(a)).to(self.vae_dtype).to(self.device)).reshape(1, out_channels, -1).float() + + out = comfy.utils.tiled_scale_multidim(samples, encode_fn, tile=(tile_x,), overlap=overlap, upscale_amount=upscale_amount, out_channels=out_channels, output_device=self.output_device) + if self.latent_dim == 1: + return out + else: + return out.reshape(samples.shape[0], self.latent_channels, extra_channel_size, -1) def encode_tiled_3d(self, samples, tile_t=9999, tile_x=512, tile_y=512, overlap=(1, 64, 64)): encode_fn = lambda a: self.first_stage_model.encode((self.process_input(a)).to(self.vae_dtype).to(self.device)).float() @@ -557,7 +580,7 @@ class VAE: except model_management.OOM_EXCEPTION: logging.warning("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.") dims = samples_in.ndim - 2 - if dims == 1: + if dims == 1 or self.extra_1d_channel is not None: pixel_samples = self.decode_tiled_1d(samples_in) elif dims == 2: pixel_samples = self.decode_tiled_(samples_in) @@ -624,7 +647,7 @@ class VAE: tile = 256 overlap = tile // 4 samples = self.encode_tiled_3d(pixel_samples, tile_x=tile, tile_y=tile, overlap=(1, overlap, overlap)) - elif self.latent_dim == 1: + elif self.latent_dim == 1 or self.extra_1d_channel is not None: samples = self.encode_tiled_1d(pixel_samples) else: samples = self.encode_tiled_(pixel_samples) From 02a1b01aad28470f06c8b4f95b90914413d3e4c8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 8 May 2025 07:36:48 -0400 Subject: [PATCH 0116/1073] ComfyUI version 0.3.33 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 61573aead..5a73f76e4 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.32" +__version__ = "0.3.33" diff --git a/pyproject.toml b/pyproject.toml index 878e7c66a..e0be329de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.32" +version = "0.3.33" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 924d771e18000f4cb223575189daa6d2c6c5a9c1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 8 May 2025 05:40:57 -0700 Subject: [PATCH 0117/1073] Add ACE Step to README. (#8005) --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0f39cfce2..deee70c6b 100644 --- a/README.md +++ b/README.md @@ -69,9 +69,11 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Hunyuan Video](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/) - [Nvidia Cosmos](https://comfyanonymous.github.io/ComfyUI_examples/cosmos/) - [Wan 2.1](https://comfyanonymous.github.io/ComfyUI_examples/wan/) +- Audio Models + - [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/) + - [ACE Step](https://comfyanonymous.github.io/ComfyUI_examples/audio/) - 3D Models - [Hunyuan3D 2.0](https://docs.comfy.org/tutorials/3d/hunyuan3D-2) -- [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/) - Asynchronous Queue system - Many optimizations: Only re-executes the parts of the workflow that changes between executions. - Smart memory management: can automatically run models on GPUs with as low as 1GB vram. From 8ab15c863c91bce1f9c3a32f947cb4ec659fd7fb Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 9 May 2025 01:52:47 -0700 Subject: [PATCH 0118/1073] Add --mmap-torch-files to enable use of mmap when loading ckpt/pt (#8021) --- comfy/cli_args.py | 2 ++ comfy/utils.py | 9 ++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 97b348f0d..de292d9b3 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -142,6 +142,8 @@ class PerformanceFeature(enum.Enum): parser.add_argument("--fast", nargs="*", type=PerformanceFeature, help="Enable some untested and potentially quality deteriorating optimizations. --fast with no arguments enables everything. You can pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: fp16_accumulation fp8_matrix_mult cublas_ops") +parser.add_argument("--mmap-torch-files", action="store_true", help="Use mmap when loading ckpt/pt files.") + parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build: Enable convenient things that most people using the standalone windows build will probably enjoy (like auto opening the page on startup).") diff --git a/comfy/utils.py b/comfy/utils.py index a826e41bf..561e1b858 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -28,6 +28,9 @@ import logging import itertools from torch.nn.functional import interpolate from einops import rearrange +from comfy.cli_args import args + +MMAP_TORCH_FILES = args.mmap_torch_files ALWAYS_SAFE_LOAD = False if hasattr(torch.serialization, "add_safe_globals"): # TODO: this was added in pytorch 2.4, the unsafe path should be removed once earlier versions are deprecated @@ -67,8 +70,12 @@ def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False): raise ValueError("{}\n\nFile path: {}\n\nThe safetensors file is corrupt/incomplete. Check the file size and make sure you have copied/downloaded it correctly.".format(message, ckpt)) raise e else: + torch_args = {} + if MMAP_TORCH_FILES: + torch_args["mmap"] = True + if safe_load or ALWAYS_SAFE_LOAD: - pl_sd = torch.load(ckpt, map_location=device, weights_only=True) + pl_sd = torch.load(ckpt, map_location=device, weights_only=True, **torch_args) else: pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle) if "global_step" in pl_sd: From 28f178a840aaa59971ecc6e0ce287bb40d275a89 Mon Sep 17 00:00:00 2001 From: thot experiment <94414189+thot-experiment@users.noreply.github.com> Date: Fri, 9 May 2025 10:46:34 -0700 Subject: [PATCH 0119/1073] move SVG to core (#7982) * move SVG to core * fix workflow embedding w/ unicode characters --- comfy_api_nodes/apis/recraft_api.py | 1 - comfy_api_nodes/nodes_recraft.py | 110 ++-------------------------- comfy_extras/nodes_images.py | 102 ++++++++++++++++++++++++++ 3 files changed, 107 insertions(+), 106 deletions(-) diff --git a/comfy_api_nodes/apis/recraft_api.py b/comfy_api_nodes/apis/recraft_api.py index c0ec9d0c8..c36d95f24 100644 --- a/comfy_api_nodes/apis/recraft_api.py +++ b/comfy_api_nodes/apis/recraft_api.py @@ -81,7 +81,6 @@ class RecraftStyle: class RecraftIO: STYLEV3 = "RECRAFT_V3_STYLE" - SVG = "SVG" # TODO: if acceptable, move into ComfyUI's typing class COLOR = "RECRAFT_COLOR" CONTROLS = "RECRAFT_CONTROLS" diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py index 994f377d1..5c89d21e9 100644 --- a/comfy_api_nodes/nodes_recraft.py +++ b/comfy_api_nodes/nodes_recraft.py @@ -1,6 +1,7 @@ from __future__ import annotations from inspect import cleandoc from comfy.utils import ProgressBar +from comfy_extras.nodes_images import SVG # Added from comfy.comfy_types.node_typing import IO from comfy_api_nodes.apis.recraft_api import ( RecraftImageGenerationRequest, @@ -28,9 +29,6 @@ from comfy_api_nodes.apinode_utils import ( resize_mask_to_image, validate_string, ) -import folder_paths -import json -import os import torch from io import BytesIO from PIL import UnidentifiedImageError @@ -162,102 +160,6 @@ class handle_recraft_image_output: raise Exception("Received output data was not an image; likely an SVG. If you used style_id, make sure it is not a Vector art style.") -class SVG: - """ - Stores SVG representations via a list of BytesIO objects. - """ - def __init__(self, data: list[BytesIO]): - self.data = data - - def combine(self, other: SVG): - return SVG(self.data + other.data) - - @staticmethod - def combine_all(svgs: list[SVG]): - all_svgs = [] - for svg in svgs: - all_svgs.extend(svg.data) - return SVG(all_svgs) - - -class SaveSVGNode: - """ - Save SVG files on disk. - """ - - def __init__(self): - self.output_dir = folder_paths.get_output_directory() - self.type = "output" - self.prefix_append = "" - - RETURN_TYPES = () - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "save_svg" - CATEGORY = "api node/image/Recraft" - OUTPUT_NODE = True - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "svg": (RecraftIO.SVG,), - "filename_prefix": ("STRING", {"default": "svg/ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}) - }, - "hidden": { - "prompt": "PROMPT", - "extra_pnginfo": "EXTRA_PNGINFO" - } - } - - def save_svg(self, svg: SVG, filename_prefix="svg/ComfyUI", prompt=None, extra_pnginfo=None): - filename_prefix += self.prefix_append - full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) - results = list() - - # Prepare metadata JSON - metadata_dict = {} - if prompt is not None: - metadata_dict["prompt"] = prompt - if extra_pnginfo is not None: - metadata_dict.update(extra_pnginfo) - - # Convert metadata to JSON string - metadata_json = json.dumps(metadata_dict, indent=2) if metadata_dict else None - - for batch_number, svg_bytes in enumerate(svg.data): - filename_with_batch_num = filename.replace("%batch_num%", str(batch_number)) - file = f"{filename_with_batch_num}_{counter:05}_.svg" - - # Read SVG content - svg_bytes.seek(0) - svg_content = svg_bytes.read().decode('utf-8') - - # Inject metadata if available - if metadata_json: - # Create metadata element with CDATA section - metadata_element = f""" - - -""" - # Insert metadata after opening svg tag using regex - import re - svg_content = re.sub(r'(]*>)', r'\1\n' + metadata_element, svg_content) - - # Write the modified SVG to file - with open(os.path.join(full_output_folder, file), 'wb') as svg_file: - svg_file.write(svg_content.encode('utf-8')) - - results.append({ - "filename": file, - "subfolder": subfolder, - "type": self.type - }) - counter += 1 - return { "ui": { "images": results } } - - class RecraftColorRGBNode: """ Create Recraft Color by choosing specific RGB values. @@ -796,8 +698,8 @@ class RecraftTextToVectorNode: Generates SVG synchronously based on prompt and resolution. """ - RETURN_TYPES = (RecraftIO.SVG,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + RETURN_TYPES = ("SVG",) # Changed + DESCRIPTION = cleandoc(__doc__ or "") if 'cleandoc' in globals() else __doc__ # Keep cleandoc if other nodes use it FUNCTION = "api_call" API_NODE = True CATEGORY = "api node/image/Recraft" @@ -918,8 +820,8 @@ class RecraftVectorizeImageNode: Generates SVG synchronously from an input image. """ - RETURN_TYPES = (RecraftIO.SVG,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + RETURN_TYPES = ("SVG",) # Changed + DESCRIPTION = cleandoc(__doc__ or "") if 'cleandoc' in globals() else __doc__ # Keep cleandoc if other nodes use it FUNCTION = "api_call" API_NODE = True CATEGORY = "api node/image/Recraft" @@ -1193,7 +1095,6 @@ NODE_CLASS_MAPPINGS = { "RecraftStyleV3InfiniteStyleLibrary": RecraftStyleInfiniteStyleLibrary, "RecraftColorRGB": RecraftColorRGBNode, "RecraftControls": RecraftControlsNode, - "SaveSVG": SaveSVGNode, } # A dictionary that contains the friendly/humanly readable titles for the nodes @@ -1213,5 +1114,4 @@ NODE_DISPLAY_NAME_MAPPINGS = { "RecraftStyleV3InfiniteStyleLibrary": "Recraft Style - Infinite Style Library", "RecraftColorRGB": "Recraft Color RGB", "RecraftControls": "Recraft Controls", - "SaveSVG": "Save SVG", } diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index e11a4583a..77c305619 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -10,6 +10,9 @@ from PIL.PngImagePlugin import PngInfo import numpy as np import json import os +import re +from io import BytesIO +from inspect import cleandoc from comfy.comfy_types import FileLocator @@ -190,10 +193,109 @@ class SaveAnimatedPNG: return { "ui": { "images": results, "animated": (True,)} } +class SVG: + """ + Stores SVG representations via a list of BytesIO objects. + """ + def __init__(self, data: list[BytesIO]): + self.data = data + + def combine(self, other: 'SVG') -> 'SVG': + return SVG(self.data + other.data) + + @staticmethod + def combine_all(svgs: list['SVG']) -> 'SVG': + all_svgs_list: list[BytesIO] = [] + for svg_item in svgs: + all_svgs_list.extend(svg_item.data) + return SVG(all_svgs_list) + +class SaveSVGNode: + """ + Save SVG files on disk. + """ + + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + + RETURN_TYPES = () + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "save_svg" + CATEGORY = "image/save" # Changed + OUTPUT_NODE = True + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "svg": ("SVG",), # Changed + "filename_prefix": ("STRING", {"default": "svg/ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}) + }, + "hidden": { + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO" + } + } + + def save_svg(self, svg: SVG, filename_prefix="svg/ComfyUI", prompt=None, extra_pnginfo=None): + filename_prefix += self.prefix_append + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + results = list() + + # Prepare metadata JSON + metadata_dict = {} + if prompt is not None: + metadata_dict["prompt"] = prompt + if extra_pnginfo is not None: + metadata_dict.update(extra_pnginfo) + + # Convert metadata to JSON string + metadata_json = json.dumps(metadata_dict, indent=2) if metadata_dict else None + + for batch_number, svg_bytes in enumerate(svg.data): + filename_with_batch_num = filename.replace("%batch_num%", str(batch_number)) + file = f"{filename_with_batch_num}_{counter:05}_.svg" + + # Read SVG content + svg_bytes.seek(0) + svg_content = svg_bytes.read().decode('utf-8') + + # Inject metadata if available + if metadata_json: + # Create metadata element with CDATA section + metadata_element = f""" + + + """ + # Insert metadata after opening svg tag using regex with a replacement function + def replacement(match): + # match.group(1) contains the captured tag + return match.group(1) + '\n' + metadata_element + + # Apply the substitution + svg_content = re.sub(r'(]*>)', replacement, svg_content, flags=re.UNICODE) + + # Write the modified SVG to file + with open(os.path.join(full_output_folder, file), 'wb') as svg_file: + svg_file.write(svg_content.encode('utf-8')) + + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + counter += 1 + return { "ui": { "images": results } } + NODE_CLASS_MAPPINGS = { "ImageCrop": ImageCrop, "RepeatImageBatch": RepeatImageBatch, "ImageFromBatch": ImageFromBatch, "SaveAnimatedWEBP": SaveAnimatedWEBP, "SaveAnimatedPNG": SaveAnimatedPNG, + "SaveSVGNode": SaveSVGNode, } From 42da274717ff75640e1fb50f88d5c117a9c50630 Mon Sep 17 00:00:00 2001 From: blepping <157360029+blepping@users.noreply.github.com> Date: Fri, 9 May 2025 11:51:02 -0600 Subject: [PATCH 0120/1073] Use normal ComfyUI attention in ACE-Steps model (#8023) * Use normal ComfyUI attention in ACE-Steps model * Let optimized_attention handle output reshape for ACE --- comfy/ldm/ace/attention.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/comfy/ldm/ace/attention.py b/comfy/ldm/ace/attention.py index 631d13647..f20a01669 100644 --- a/comfy/ldm/ace/attention.py +++ b/comfy/ldm/ace/attention.py @@ -19,6 +19,7 @@ import torch.nn.functional as F from torch import nn import comfy.model_management +from comfy.ldm.modules.attention import optimized_attention class Attention(nn.Module): def __init__( @@ -326,10 +327,6 @@ class CustomerAttnProcessor2_0: Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). """ - def __init__(self): - if not hasattr(F, "scaled_dot_product_attention"): - raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") - def apply_rotary_emb( self, x: torch.Tensor, @@ -435,13 +432,9 @@ class CustomerAttnProcessor2_0: attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) # the output of sdp = (batch, num_heads, seq_len, head_dim) - # TODO: add support for attn.scale when we move to Torch 2.1 - hidden_states = F.scaled_dot_product_attention( - query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False - ) - - hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) - hidden_states = hidden_states.to(query.dtype) + hidden_states = optimized_attention( + query, key, value, heads=query.shape[1], mask=attention_mask, skip_reshape=True, + ).to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) From ae60b150e577de470032840ed7194889686fa424 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Fri, 9 May 2025 17:02:45 -0700 Subject: [PATCH 0121/1073] update node tooltips and validation (#8036) --- comfy_api_nodes/nodes_ideogram.py | 13 +++--------- comfy_api_nodes/nodes_kling.py | 35 +++++++++++++++++++++++++++++-- 2 files changed, 36 insertions(+), 12 deletions(-) diff --git a/comfy_api_nodes/nodes_ideogram.py b/comfy_api_nodes/nodes_ideogram.py index 45c021f4a..0a16d74bf 100644 --- a/comfy_api_nodes/nodes_ideogram.py +++ b/comfy_api_nodes/nodes_ideogram.py @@ -234,9 +234,7 @@ def download_and_process_images(image_urls): class IdeogramV1(ComfyNodeABC): """ - Generates images synchronously using the Ideogram V1 model. - - Images links are available for a limited period of time; if you would like to keep the image, you must download it. + Generates images using the Ideogram V1 model. """ def __init__(self): @@ -365,9 +363,7 @@ class IdeogramV1(ComfyNodeABC): class IdeogramV2(ComfyNodeABC): """ - Generates images synchronously using the Ideogram V2 model. - - Images links are available for a limited period of time; if you would like to keep the image, you must download it. + Generates images using the Ideogram V2 model. """ def __init__(self): @@ -536,10 +532,7 @@ class IdeogramV2(ComfyNodeABC): class IdeogramV3(ComfyNodeABC): """ - Generates images synchronously using the Ideogram V3 model. - - Supports both regular image generation from text prompts and image editing with mask. - Images links are available for a limited period of time; if you would like to keep the image, you must download it. + Generates images using the Ideogram V3 model. Supports both regular image generation from text prompts and image editing with mask. """ def __init__(self): diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 9aa8df58b..c8d1704c1 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -184,6 +184,33 @@ def validate_image_result_response(response) -> None: raise KlingApiError(error_msg) +def validate_input_image(image: torch.Tensor) -> None: + """ + Validates the input image adheres to the expectations of the Kling API: + - The image resolution should not be less than 300*300px + - The aspect ratio of the image should be between 1:2.5 ~ 2.5:1 + + See: https://app.klingai.com/global/dev/document-api/apiReference/model/imageToVideo + """ + if len(image.shape) == 4: + height, width = image.shape[1], image.shape[2] + elif len(image.shape) == 3: + height, width = image.shape[0], image.shape[1] + else: + raise ValueError("Invalid image tensor shape.") + + # Ensure minimum resolution is met + if height < 300: + raise ValueError("Image height must be at least 300px") + if width < 300: + raise ValueError("Image width must be at least 300px") + + # Ensure aspect ratio is within acceptable range + aspect_ratio = width / height + if aspect_ratio < 1 / 2.5 or aspect_ratio > 2.5: + raise ValueError("Image aspect ratio must be between 1:2.5 and 2.5:1") + + def get_camera_control_input_config( tooltip: str, default: float = 0.0 ) -> tuple[IO, InputTypeOptions]: @@ -530,7 +557,10 @@ class KlingImage2VideoNode(KlingNodeBase): return { "required": { "start_frame": model_field_to_node_input( - IO.IMAGE, KlingImage2VideoRequest, "image" + IO.IMAGE, + KlingImage2VideoRequest, + "image", + tooltip="The reference image used to generate the video.", ), "prompt": model_field_to_node_input( IO.STRING, KlingImage2VideoRequest, "prompt", multiline=True @@ -607,9 +637,10 @@ class KlingImage2VideoNode(KlingNodeBase): auth_token: Optional[str] = None, ) -> tuple[VideoFromFile]: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V) + validate_input_image(start_frame) if camera_control is not None: - # Camera control type for image 2 video is always simple + # Camera control type for image 2 video is always `simple` camera_control.type = KlingCameraControlType.simple initial_operation = SynchronousOperation( From 1b3bf0a5dac887ec651df8e326bd260e17e56909 Mon Sep 17 00:00:00 2001 From: Pam <42671363+pamparamm@users.noreply.github.com> Date: Sat, 10 May 2025 05:14:13 +0500 Subject: [PATCH 0122/1073] Fix res_multistep_ancestral sampler (#8030) --- comfy/k_diffusion/sampling.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 77ef748e8..fbdf6f554 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -1277,6 +1277,7 @@ def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None phi1_fn = lambda t: torch.expm1(t) / t phi2_fn = lambda t: (phi1_fn(t) - 1.0) / t + old_sigma_down = None old_denoised = None uncond_denoised = None def post_cfg_function(args): @@ -1304,9 +1305,9 @@ def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None x = x + d * dt else: # Second order multistep method in https://arxiv.org/pdf/2308.02157 - t, t_next, t_prev = t_fn(sigmas[i]), t_fn(sigma_down), t_fn(sigmas[i - 1]) + t, t_old, t_next, t_prev = t_fn(sigmas[i]), t_fn(old_sigma_down), t_fn(sigma_down), t_fn(sigmas[i - 1]) h = t_next - t - c2 = (t_prev - t) / h + c2 = (t_prev - t_old) / h phi1_val, phi2_val = phi1_fn(-h), phi2_fn(-h) b1 = torch.nan_to_num(phi1_val - phi2_val / c2, nan=0.0) @@ -1326,6 +1327,7 @@ def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None old_denoised = uncond_denoised else: old_denoised = denoised + old_sigma_down = sigma_down return x @torch.no_grad() From d42613686f3db1bf55e6dec75434ed2649baa6bc Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 10 May 2025 04:52:56 -0700 Subject: [PATCH 0123/1073] Fix issue with fp8 ops on some models. (#8045) _scaled_mm errors when an input is non contiguous. --- comfy/ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 032787915..431c8f89d 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -308,10 +308,10 @@ def fp8_linear(self, input): if scale_input is None: scale_input = torch.ones((), device=input.device, dtype=torch.float32) input = torch.clamp(input, min=-448, max=448, out=input) - input = input.reshape(-1, input_shape[2]).to(dtype) + input = input.reshape(-1, input_shape[2]).to(dtype).contiguous() else: scale_input = scale_input.to(input.device) - input = (input * (1.0 / scale_input).to(input_dtype)).reshape(-1, input_shape[2]).to(dtype) + input = (input * (1.0 / scale_input).to(input_dtype)).reshape(-1, input_shape[2]).to(dtype).contiguous() if bias is not None: o = torch._scaled_mm(input, w, out_dtype=input_dtype, bias=bias, scale_a=scale_input, scale_b=scale_weight) From 235d3901fc3f97698e97ff52f61a7caa5f1f11ed Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sat, 10 May 2025 17:40:02 -0700 Subject: [PATCH 0124/1073] Add method to stream text to node UI (#8018) * show text progress preview * include node id in message --- server.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/server.py b/server.py index f64ec27d4..cb1c6a8fd 100644 --- a/server.py +++ b/server.py @@ -32,12 +32,13 @@ from app.frontend_management import FrontendManager from app.user_manager import UserManager from app.model_manager import ModelFileManager from app.custom_node_manager import CustomNodeManager -from typing import Optional +from typing import Optional, Union from api_server.routes.internal.internal_routes import InternalRoutes class BinaryEventTypes: PREVIEW_IMAGE = 1 UNENCODED_PREVIEW_IMAGE = 2 + TEXT = 3 async def send_socket_catch_exception(function, message): try: @@ -878,3 +879,15 @@ class PromptServer(): logging.warning(traceback.format_exc()) return json_data + + def send_progress_text( + self, text: Union[bytes, bytearray, str], node_id: str, sid=None + ): + if isinstance(text, str): + text = text.encode("utf-8") + node_id_bytes = str(node_id).encode("utf-8") + + # Pack the node_id length as a 4-byte unsigned integer, followed by the node_id bytes + message = struct.pack(">I", len(node_id_bytes)) + node_id_bytes + text + + self.send_sync(BinaryEventTypes.TEXT, message, sid) From 3535909eb8581ba3e4461f3c9723d244c446f65e Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sat, 10 May 2025 19:10:58 -0700 Subject: [PATCH 0125/1073] Add support for Comfy API keys (#8041) * Handle Comfy API key based authorizaton (#167) Co-authored-by: Jedrzej Kosinski * Bump frontend version to include API key features (#170) * bump templates version --------- Co-authored-by: Jedrzej Kosinski --- comfy_api_nodes/apinode_utils.py | 25 ++--- comfy_api_nodes/apis/client.py | 43 +++++--- comfy_api_nodes/nodes_bfl.py | 24 ++--- comfy_api_nodes/nodes_ideogram.py | 29 +++-- comfy_api_nodes/nodes_kling.py | 166 ++++++++++++++++++----------- comfy_api_nodes/nodes_luma.py | 50 ++++----- comfy_api_nodes/nodes_minimax.py | 15 +-- comfy_api_nodes/nodes_openai.py | 27 +++-- comfy_api_nodes/nodes_pika.py | 60 ++++++----- comfy_api_nodes/nodes_pixverse.py | 28 ++--- comfy_api_nodes/nodes_recraft.py | 36 +++---- comfy_api_nodes/nodes_stability.py | 27 +++-- comfy_api_nodes/nodes_veo2.py | 7 +- execution.py | 2 + requirements.txt | 4 +- 15 files changed, 319 insertions(+), 224 deletions(-) diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index bd3b8908b..e28d7d607 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -1,3 +1,4 @@ +from __future__ import annotations import io import logging from typing import Optional @@ -314,7 +315,7 @@ def upload_file_to_comfyapi( file_bytes_io: BytesIO, filename: str, upload_mime_type: str, - auth_token: Optional[str] = None, + auth_kwargs: Optional[dict[str,str]] = None, ) -> str: """ Uploads a single file to ComfyUI API and returns its download URL. @@ -323,7 +324,7 @@ def upload_file_to_comfyapi( file_bytes_io: BytesIO object containing the file data. filename: The filename of the file. upload_mime_type: MIME type of the file. - auth_token: Optional authentication token. + auth_kwargs: Optional authentication token(s). Returns: The download URL for the uploaded file. @@ -337,7 +338,7 @@ def upload_file_to_comfyapi( response_model=UploadResponse, ), request=request_object, - auth_token=auth_token, + auth_kwargs=auth_kwargs, ) response: UploadResponse = operation.execute() @@ -351,7 +352,7 @@ def upload_file_to_comfyapi( def upload_video_to_comfyapi( video: VideoInput, - auth_token: Optional[str] = None, + auth_kwargs: Optional[dict[str,str]] = None, container: VideoContainer = VideoContainer.MP4, codec: VideoCodec = VideoCodec.H264, max_duration: Optional[int] = None, @@ -362,7 +363,7 @@ def upload_video_to_comfyapi( Args: video: VideoInput object (Comfy VIDEO type). - auth_token: Optional authentication token. + auth_kwargs: Optional authentication token(s). container: The video container format to use (default: MP4). codec: The video codec to use (default: H264). max_duration: Optional maximum duration of the video in seconds. If the video is longer than this, an error will be raised. @@ -390,7 +391,7 @@ def upload_video_to_comfyapi( video_bytes_io.seek(0) return upload_file_to_comfyapi( - video_bytes_io, filename, upload_mime_type, auth_token + video_bytes_io, filename, upload_mime_type, auth_kwargs ) @@ -453,7 +454,7 @@ def audio_ndarray_to_bytesio( def upload_audio_to_comfyapi( audio: AudioInput, - auth_token: Optional[str] = None, + auth_kwargs: Optional[dict[str,str]] = None, container_format: str = "mp4", codec_name: str = "aac", mime_type: str = "audio/mp4", @@ -465,7 +466,7 @@ def upload_audio_to_comfyapi( Args: audio: a Comfy `AUDIO` type (contains waveform tensor and sample_rate) - auth_token: Optional authentication token. + auth_kwargs: Optional authentication token(s). Returns: The download URL for the uploaded audio file. @@ -477,11 +478,11 @@ def upload_audio_to_comfyapi( audio_data_np, sample_rate, container_format, codec_name ) - return upload_file_to_comfyapi(audio_bytes_io, filename, mime_type, auth_token) + return upload_file_to_comfyapi(audio_bytes_io, filename, mime_type, auth_kwargs) def upload_images_to_comfyapi( - image: torch.Tensor, max_images=8, auth_token=None, mime_type: Optional[str] = None + image: torch.Tensor, max_images=8, auth_kwargs: Optional[dict[str,str]] = None, mime_type: Optional[str] = None ) -> list[str]: """ Uploads images to ComfyUI API and returns download URLs. @@ -490,7 +491,7 @@ def upload_images_to_comfyapi( Args: image: Input torch.Tensor image. max_images: Maximum number of images to upload. - auth_token: Optional authentication token. + auth_kwargs: Optional authentication token(s). mime_type: Optional MIME type for the image. """ # if batch, try to upload each file if max_images is greater than 0 @@ -521,7 +522,7 @@ def upload_images_to_comfyapi( response_model=UploadResponse, ), request=request_object, - auth_token=auth_token, + auth_kwargs=auth_kwargs, ) response = operation.execute() diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 929e386d4..cff52714f 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -20,7 +20,8 @@ Usage Examples: # 1. Create the API client api_client = ApiClient( base_url="https://api.example.com", - api_key="your_api_key_here", + auth_token="your_auth_token_here", + comfy_api_key="your_comfy_api_key_here", timeout=30.0, verify_ssl=True ) @@ -146,12 +147,14 @@ class ApiClient: def __init__( self, base_url: str, - api_key: Optional[str] = None, + auth_token: Optional[str] = None, + comfy_api_key: Optional[str] = None, timeout: float = 3600.0, verify_ssl: bool = True, ): self.base_url = base_url - self.api_key = api_key + self.auth_token = auth_token + self.comfy_api_key = comfy_api_key self.timeout = timeout self.verify_ssl = verify_ssl @@ -201,8 +204,10 @@ class ApiClient: """Get headers for API requests, including authentication if available""" headers = {"Content-Type": "application/json", "Accept": "application/json"} - if self.api_key: - headers["Authorization"] = f"Bearer {self.api_key}" + if self.auth_token: + headers["Authorization"] = f"Bearer {self.auth_token}" + elif self.comfy_api_key: + headers["X-API-KEY"] = self.comfy_api_key return headers @@ -236,7 +241,7 @@ class ApiClient: requests.RequestException: If the request fails """ url = urljoin(self.base_url, path) - self.check_auth_token(self.api_key) + self.check_auth(self.auth_token, self.comfy_api_key) # Combine default headers with any provided headers request_headers = self.get_headers() if headers: @@ -320,11 +325,11 @@ class ApiClient: return response.json() return {} - def check_auth_token(self, auth_token): - """Verify that an auth token is present.""" - if auth_token is None: + def check_auth(self, auth_token, comfy_api_key): + """Verify that an auth token is present or comfy_api_key is present""" + if auth_token is None and comfy_api_key is None: raise Exception("Unauthorized: Please login first to use this node.") - return auth_token + return auth_token or comfy_api_key @staticmethod def upload_file( @@ -392,6 +397,8 @@ class SynchronousOperation(Generic[T, R]): files: Optional[Dict[str, Any]] = None, api_base: str | None = None, auth_token: Optional[str] = None, + comfy_api_key: Optional[str] = None, + auth_kwargs: Optional[Dict[str,str]] = None, timeout: float = 604800.0, verify_ssl: bool = True, content_type: str = "application/json", @@ -403,6 +410,10 @@ class SynchronousOperation(Generic[T, R]): self.error = None self.api_base: str = api_base or args.comfy_api_base self.auth_token = auth_token + self.comfy_api_key = comfy_api_key + if auth_kwargs is not None: + self.auth_token = auth_kwargs.get("auth_token", self.auth_token) + self.comfy_api_key = auth_kwargs.get("comfy_api_key", self.comfy_api_key) self.timeout = timeout self.verify_ssl = verify_ssl self.files = files @@ -415,7 +426,8 @@ class SynchronousOperation(Generic[T, R]): if client is None: client = ApiClient( base_url=self.api_base, - api_key=self.auth_token, + auth_token=self.auth_token, + comfy_api_key=self.comfy_api_key, timeout=self.timeout, verify_ssl=self.verify_ssl, ) @@ -502,12 +514,18 @@ class PollingOperation(Generic[T, R]): request: Optional[T] = None, api_base: str | None = None, auth_token: Optional[str] = None, + comfy_api_key: Optional[str] = None, + auth_kwargs: Optional[Dict[str,str]] = None, poll_interval: float = 5.0, ): self.poll_endpoint = poll_endpoint self.request = request self.api_base: str = api_base or args.comfy_api_base self.auth_token = auth_token + self.comfy_api_key = comfy_api_key + if auth_kwargs is not None: + self.auth_token = auth_kwargs.get("auth_token", self.auth_token) + self.comfy_api_key = auth_kwargs.get("comfy_api_key", self.comfy_api_key) self.poll_interval = poll_interval # Polling configuration @@ -528,7 +546,8 @@ class PollingOperation(Generic[T, R]): if client is None: client = ApiClient( base_url=self.api_base, - api_key=self.auth_token, + auth_token=self.auth_token, + comfy_api_key=self.comfy_api_key, ) return self._poll_until_complete(client) except Exception as e: diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py index 122a6ddf8..66ef1b391 100644 --- a/comfy_api_nodes/nodes_bfl.py +++ b/comfy_api_nodes/nodes_bfl.py @@ -179,6 +179,7 @@ class FluxProUltraImageNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -211,7 +212,6 @@ class FluxProUltraImageNode(ComfyNodeABC): seed=0, image_prompt=None, image_prompt_strength=0.1, - auth_token=None, **kwargs, ): if image_prompt is None: @@ -244,7 +244,7 @@ class FluxProUltraImageNode(ComfyNodeABC): None if image_prompt is None else round(image_prompt_strength, 2) ), ), - auth_token=auth_token, + auth_kwargs=kwargs, ) output_image = handle_bfl_synchronous_operation(operation) return (output_image,) @@ -319,6 +319,7 @@ class FluxProImageNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -337,7 +338,6 @@ class FluxProImageNode(ComfyNodeABC): seed=0, image_prompt=None, # image_prompt_strength=0.1, - auth_token=None, **kwargs, ): image_prompt = ( @@ -361,7 +361,7 @@ class FluxProImageNode(ComfyNodeABC): seed=seed, image_prompt=image_prompt, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) output_image = handle_bfl_synchronous_operation(operation) return (output_image,) @@ -461,6 +461,7 @@ class FluxProExpandNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -482,7 +483,6 @@ class FluxProExpandNode(ComfyNodeABC): steps: int, guidance: float, seed=0, - auth_token=None, **kwargs, ): image = convert_image_to_base64(image) @@ -506,7 +506,7 @@ class FluxProExpandNode(ComfyNodeABC): seed=seed, image=image, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) output_image = handle_bfl_synchronous_operation(operation) return (output_image,) @@ -572,6 +572,7 @@ class FluxProFillNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -590,7 +591,6 @@ class FluxProFillNode(ComfyNodeABC): steps: int, guidance: float, seed=0, - auth_token=None, **kwargs, ): # prepare mask @@ -615,7 +615,7 @@ class FluxProFillNode(ComfyNodeABC): image=image, mask=mask, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) output_image = handle_bfl_synchronous_operation(operation) return (output_image,) @@ -706,6 +706,7 @@ class FluxProCannyNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -726,7 +727,6 @@ class FluxProCannyNode(ComfyNodeABC): steps: int, guidance: float, seed=0, - auth_token=None, **kwargs, ): control_image = convert_image_to_base64(control_image[:,:,:,:3]) @@ -763,7 +763,7 @@ class FluxProCannyNode(ComfyNodeABC): canny_high_threshold=canny_high_threshold, preprocessed_image=preprocessed_image, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) output_image = handle_bfl_synchronous_operation(operation) return (output_image,) @@ -834,6 +834,7 @@ class FluxProDepthNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -852,7 +853,6 @@ class FluxProDepthNode(ComfyNodeABC): steps: int, guidance: float, seed=0, - auth_token=None, **kwargs, ): control_image = convert_image_to_base64(control_image[:,:,:,:3]) @@ -878,7 +878,7 @@ class FluxProDepthNode(ComfyNodeABC): control_image=control_image, preprocessed_image=preprocessed_image, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) output_image = handle_bfl_synchronous_operation(operation) return (output_image,) diff --git a/comfy_api_nodes/nodes_ideogram.py b/comfy_api_nodes/nodes_ideogram.py index 0a16d74bf..d25468b17 100644 --- a/comfy_api_nodes/nodes_ideogram.py +++ b/comfy_api_nodes/nodes_ideogram.py @@ -301,7 +301,10 @@ class IdeogramV1(ComfyNodeABC): {"default": 1, "min": 1, "max": 8, "step": 1, "display": "number"}, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } RETURN_TYPES = (IO.IMAGE,) @@ -319,7 +322,7 @@ class IdeogramV1(ComfyNodeABC): seed=0, negative_prompt="", num_images=1, - auth_token=None, + **kwargs, ): # Determine the model based on turbo setting aspect_ratio = V1_V2_RATIO_MAP.get(aspect_ratio, None) @@ -345,7 +348,7 @@ class IdeogramV1(ComfyNodeABC): negative_prompt=negative_prompt if negative_prompt else None, ) ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response = operation.execute() @@ -454,7 +457,10 @@ class IdeogramV2(ComfyNodeABC): # }, #), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } RETURN_TYPES = (IO.IMAGE,) @@ -475,7 +481,7 @@ class IdeogramV2(ComfyNodeABC): negative_prompt="", num_images=1, color_palette="", - auth_token=None, + **kwargs, ): aspect_ratio = V1_V2_RATIO_MAP.get(aspect_ratio, None) resolution = V1_V1_RES_MAP.get(resolution, None) @@ -515,7 +521,7 @@ class IdeogramV2(ComfyNodeABC): color_palette=color_palette if color_palette else None, ) ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response = operation.execute() @@ -614,7 +620,10 @@ class IdeogramV3(ComfyNodeABC): }, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } RETURN_TYPES = (IO.IMAGE,) @@ -634,7 +643,7 @@ class IdeogramV3(ComfyNodeABC): seed=0, num_images=1, rendering_speed="BALANCED", - auth_token=None, + **kwargs, ): # Check if both image and mask are provided for editing mode if image is not None and mask is not None: @@ -698,7 +707,7 @@ class IdeogramV3(ComfyNodeABC): "mask": mask_binary, }, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=kwargs, ) elif image is not None or mask is not None: @@ -739,7 +748,7 @@ class IdeogramV3(ComfyNodeABC): response_model=IdeogramGenerateResponse, ), request=gen_request, - auth_token=auth_token, + auth_kwargs=kwargs, ) # Execute the operation and process response diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index c8d1704c1..b2be83656 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -95,7 +95,7 @@ class KlingApiError(Exception): pass -def poll_until_finished(auth_token: str, api_endpoint: ApiEndpoint[Any, R]) -> R: +def poll_until_finished(auth_kwargs: dict[str,str], api_endpoint: ApiEndpoint[Any, R]) -> R: """Polls the Kling API endpoint until the task reaches a terminal state, then returns the response.""" return PollingOperation( poll_endpoint=api_endpoint, @@ -108,7 +108,7 @@ def poll_until_finished(auth_token: str, api_endpoint: ApiEndpoint[Any, R]) -> R if response.data and response.data.task_status else None ), - auth_token=auth_token, + auth_kwargs=auth_kwargs, ).execute() @@ -418,16 +418,19 @@ class KlingTextToVideoNode(KlingNodeBase): }, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } RETURN_TYPES = ("VIDEO", "STRING", "STRING") RETURN_NAMES = ("VIDEO", "video_id", "duration") DESCRIPTION = "Kling Text to Video Node" - def get_response(self, task_id: str, auth_token: str) -> KlingText2VideoResponse: + def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingText2VideoResponse: return poll_until_finished( - auth_token, + auth_kwargs, ApiEndpoint( path=f"{PATH_TEXT_TO_VIDEO}/{task_id}", method=HttpMethod.GET, @@ -446,7 +449,7 @@ class KlingTextToVideoNode(KlingNodeBase): camera_control: Optional[KlingCameraControl] = None, model_name: Optional[str] = None, duration: Optional[str] = None, - auth_token: Optional[str] = None, + **kwargs, ) -> tuple[VideoFromFile, str, str]: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) if model_name is None: @@ -468,14 +471,14 @@ class KlingTextToVideoNode(KlingNodeBase): aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio), camera_control=camera_control, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) task_creation_response = initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_token) + final_response = self.get_response(task_id, auth_kwargs=kwargs) validate_video_result_response(final_response) video = get_video_from_response(final_response) @@ -522,7 +525,10 @@ class KlingCameraControlT2VNode(KlingTextToVideoNode): }, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } DESCRIPTION = "Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text." @@ -534,7 +540,7 @@ class KlingCameraControlT2VNode(KlingTextToVideoNode): cfg_scale: float, aspect_ratio: str, camera_control: Optional[KlingCameraControl] = None, - auth_token: Optional[str] = None, + **kwargs, ): return super().api_call( model_name=KlingVideoGenModelName.kling_v1, @@ -545,7 +551,7 @@ class KlingCameraControlT2VNode(KlingTextToVideoNode): prompt=prompt, negative_prompt=negative_prompt, camera_control=camera_control, - auth_token=auth_token, + **kwargs, ) @@ -604,16 +610,19 @@ class KlingImage2VideoNode(KlingNodeBase): enum_type=KlingVideoGenDuration, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } RETURN_TYPES = ("VIDEO", "STRING", "STRING") RETURN_NAMES = ("VIDEO", "video_id", "duration") DESCRIPTION = "Kling Image to Video Node" - def get_response(self, task_id: str, auth_token: str) -> KlingImage2VideoResponse: + def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingImage2VideoResponse: return poll_until_finished( - auth_token, + auth_kwargs, ApiEndpoint( path=f"{PATH_IMAGE_TO_VIDEO}/{task_id}", method=HttpMethod.GET, @@ -634,7 +643,7 @@ class KlingImage2VideoNode(KlingNodeBase): duration: str, camera_control: Optional[KlingCameraControl] = None, end_frame: Optional[torch.Tensor] = None, - auth_token: Optional[str] = None, + **kwargs, ) -> tuple[VideoFromFile]: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V) validate_input_image(start_frame) @@ -666,14 +675,14 @@ class KlingImage2VideoNode(KlingNodeBase): duration=KlingVideoGenDuration(duration), camera_control=camera_control, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) task_creation_response = initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_token) + final_response = self.get_response(task_id, auth_kwargs=kwargs) validate_video_result_response(final_response) video = get_video_from_response(final_response) @@ -723,7 +732,10 @@ class KlingCameraControlI2VNode(KlingImage2VideoNode): }, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } DESCRIPTION = "Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image." @@ -736,7 +748,7 @@ class KlingCameraControlI2VNode(KlingImage2VideoNode): cfg_scale: float, aspect_ratio: str, camera_control: KlingCameraControl, - auth_token: Optional[str] = None, + **kwargs, ): return super().api_call( model_name=KlingVideoGenModelName.kling_v1_5, @@ -748,7 +760,7 @@ class KlingCameraControlI2VNode(KlingImage2VideoNode): prompt=prompt, negative_prompt=negative_prompt, camera_control=camera_control, - auth_token=auth_token, + **kwargs, ) @@ -816,7 +828,10 @@ class KlingStartEndFrameNode(KlingImage2VideoNode): }, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } DESCRIPTION = "Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last." @@ -830,7 +845,7 @@ class KlingStartEndFrameNode(KlingImage2VideoNode): cfg_scale: float, aspect_ratio: str, mode: str, - auth_token: Optional[str] = None, + **kwargs, ): mode, duration, model_name = KlingStartEndFrameNode.get_mode_string_mapping()[ mode @@ -845,7 +860,7 @@ class KlingStartEndFrameNode(KlingImage2VideoNode): aspect_ratio=aspect_ratio, duration=duration, end_frame=end_frame, - auth_token=auth_token, + **kwargs, ) @@ -875,16 +890,19 @@ class KlingVideoExtendNode(KlingNodeBase): IO.STRING, KlingVideoExtendRequest, "video_id", forceInput=True ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } RETURN_TYPES = ("VIDEO", "STRING", "STRING") RETURN_NAMES = ("VIDEO", "video_id", "duration") DESCRIPTION = "Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes." - def get_response(self, task_id: str, auth_token: str) -> KlingVideoExtendResponse: + def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingVideoExtendResponse: return poll_until_finished( - auth_token, + auth_kwargs, ApiEndpoint( path=f"{PATH_VIDEO_EXTEND}/{task_id}", method=HttpMethod.GET, @@ -899,7 +917,7 @@ class KlingVideoExtendNode(KlingNodeBase): negative_prompt: str, cfg_scale: float, video_id: str, - auth_token: Optional[str] = None, + **kwargs, ) -> tuple[VideoFromFile, str, str]: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) initial_operation = SynchronousOperation( @@ -915,14 +933,14 @@ class KlingVideoExtendNode(KlingNodeBase): cfg_scale=cfg_scale, video_id=video_id, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) task_creation_response = initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_token) + final_response = self.get_response(task_id, auth_kwargs=kwargs) validate_video_result_response(final_response) video = get_video_from_response(final_response) @@ -935,9 +953,9 @@ class KlingVideoEffectsBase(KlingNodeBase): RETURN_TYPES = ("VIDEO", "STRING", "STRING") RETURN_NAMES = ("VIDEO", "video_id", "duration") - def get_response(self, task_id: str, auth_token: str) -> KlingVideoEffectsResponse: + def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingVideoEffectsResponse: return poll_until_finished( - auth_token, + auth_kwargs, ApiEndpoint( path=f"{PATH_VIDEO_EFFECTS}/{task_id}", method=HttpMethod.GET, @@ -955,7 +973,7 @@ class KlingVideoEffectsBase(KlingNodeBase): image_1: torch.Tensor, image_2: Optional[torch.Tensor] = None, mode: Optional[KlingVideoGenMode] = None, - auth_token: Optional[str] = None, + **kwargs, ): if dual_character: request_input_field = KlingDualCharacterEffectInput( @@ -985,14 +1003,14 @@ class KlingVideoEffectsBase(KlingNodeBase): effect_scene=effect_scene, input=request_input_field, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) task_creation_response = initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_token) + final_response = self.get_response(task_id, auth_kwargs=kwargs) validate_video_result_response(final_response) video = get_video_from_response(final_response) @@ -1033,7 +1051,10 @@ class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase): enum_type=KlingVideoGenDuration, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } DESCRIPTION = "Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite." @@ -1048,7 +1069,7 @@ class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase): model_name: KlingCharacterEffectModelName, mode: KlingVideoGenMode, duration: KlingVideoGenDuration, - auth_token: Optional[str] = None, + **kwargs, ): video, _, duration = super().api_call( dual_character=True, @@ -1058,7 +1079,7 @@ class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase): duration=duration, image_1=image_left, image_2=image_right, - auth_token=auth_token, + **kwargs, ) return video, duration @@ -1094,7 +1115,10 @@ class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase): enum_type=KlingVideoGenDuration, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } DESCRIPTION = "Achieve different special effects when generating a video based on the effect_scene." @@ -1105,7 +1129,7 @@ class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase): effect_scene: KlingSingleImageEffectsScene, model_name: KlingSingleImageEffectModelName, duration: KlingVideoGenDuration, - auth_token: Optional[str] = None, + **kwargs, ): return super().api_call( dual_character=False, @@ -1113,7 +1137,7 @@ class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase): model_name=model_name, duration=duration, image_1=image, - auth_token=auth_token, + **kwargs, ) @@ -1131,10 +1155,10 @@ class KlingLipSyncBase(KlingNodeBase): f"Text is too long. Maximum length is {MAX_PROMPT_LENGTH_LIP_SYNC} characters." ) - def get_response(self, task_id: str, auth_token: str) -> KlingLipSyncResponse: + def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingLipSyncResponse: """Polls the Kling API endpoint until the task reaches a terminal state.""" return poll_until_finished( - auth_token, + auth_kwargs, ApiEndpoint( path=f"{PATH_LIP_SYNC}/{task_id}", method=HttpMethod.GET, @@ -1152,18 +1176,18 @@ class KlingLipSyncBase(KlingNodeBase): text: Optional[str] = None, voice_speed: Optional[float] = None, voice_id: Optional[str] = None, - auth_token: Optional[str] = None, + **kwargs ) -> tuple[VideoFromFile, str, str]: if text: self.validate_text(text) # Upload video to Comfy API and get download URL - video_url = upload_video_to_comfyapi(video, auth_token) + video_url = upload_video_to_comfyapi(video, auth_kwargs=kwargs) logging.info("Uploaded video to Comfy API. URL: %s", video_url) # Upload the audio file to Comfy API and get download URL if audio: - audio_url = upload_audio_to_comfyapi(audio, auth_token) + audio_url = upload_audio_to_comfyapi(audio, auth_kwargs=kwargs) logging.info("Uploaded audio to Comfy API. URL: %s", audio_url) else: audio_url = None @@ -1187,14 +1211,14 @@ class KlingLipSyncBase(KlingNodeBase): voice_id=voice_id, ), ), - auth_token=auth_token, + auth_kwargs=kwargs, ) task_creation_response = initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_token) + final_response = self.get_response(task_id, auth_kwargs=kwargs) validate_video_result_response(final_response) video = get_video_from_response(final_response) @@ -1217,7 +1241,10 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase): enum_type=KlingLipSyncVoiceLanguage, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } DESCRIPTION = "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file." @@ -1227,14 +1254,14 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase): video: VideoInput, audio: AudioInput, voice_language: str, - auth_token: Optional[str] = None, + **kwargs, ): return super().api_call( video=video, audio=audio, voice_language=voice_language, mode="audio2video", - auth_token=auth_token, + **kwargs, ) @@ -1323,7 +1350,10 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase): IO.FLOAT, KlingLipSyncInputObject, "voice_speed", slider=True ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } DESCRIPTION = "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt." @@ -1334,7 +1364,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase): text: str, voice: str, voice_speed: float, - auth_token: Optional[str] = None, + **kwargs, ): voice_id, voice_language = KlingLipSyncTextToVideoNode.get_voice_config()[voice] return super().api_call( @@ -1344,7 +1374,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase): voice_id=voice_id, voice_speed=voice_speed, mode="text2video", - auth_token=auth_token, + **kwargs, ) @@ -1381,16 +1411,19 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase): enum_type=KlingVirtualTryOnModelName, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } DESCRIPTION = "Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human." def get_response( - self, task_id: str, auth_token: Optional[str] = None + self, task_id: str, auth_kwargs: dict[str,str] = None ) -> KlingVirtualTryOnResponse: return poll_until_finished( - auth_token, + auth_kwargs, ApiEndpoint( path=f"{PATH_VIRTUAL_TRY_ON}/{task_id}", method=HttpMethod.GET, @@ -1404,7 +1437,7 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase): human_image: torch.Tensor, cloth_image: torch.Tensor, model_name: KlingVirtualTryOnModelName, - auth_token: Optional[str] = None, + **kwargs, ): initial_operation = SynchronousOperation( endpoint=ApiEndpoint( @@ -1418,14 +1451,14 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase): cloth_image=tensor_to_base64_string(cloth_image), model_name=model_name, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) task_creation_response = initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_token) + final_response = self.get_response(task_id, auth_kwargs=kwargs) validate_image_result_response(final_response) images = get_images_from_response(final_response) @@ -1493,16 +1526,19 @@ class KlingImageGenerationNode(KlingImageGenerationBase): "optional": { "image": (IO.IMAGE, {}), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } DESCRIPTION = "Kling Image Generation Node. Generate an image from a text prompt with an optional reference image." def get_response( - self, task_id: str, auth_token: Optional[str] = None + self, task_id: str, auth_kwargs: Optional[dict[str,str]] = None ) -> KlingImageGenerationsResponse: return poll_until_finished( - auth_token, + auth_kwargs, ApiEndpoint( path=f"{PATH_IMAGE_GENERATIONS}/{task_id}", method=HttpMethod.GET, @@ -1522,7 +1558,7 @@ class KlingImageGenerationNode(KlingImageGenerationBase): n: int, aspect_ratio: KlingImageGenAspectRatio, image: Optional[torch.Tensor] = None, - auth_token: Optional[str] = None, + **kwargs, ): self.validate_prompt(prompt, negative_prompt) @@ -1547,14 +1583,14 @@ class KlingImageGenerationNode(KlingImageGenerationBase): n=n, aspect_ratio=aspect_ratio, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) task_creation_response = initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_token) + final_response = self.get_response(task_id, auth_kwargs=kwargs) validate_image_result_response(final_response) images = get_images_from_response(final_response) diff --git a/comfy_api_nodes/nodes_luma.py b/comfy_api_nodes/nodes_luma.py index 0f0d9aa80..bd33a53e0 100644 --- a/comfy_api_nodes/nodes_luma.py +++ b/comfy_api_nodes/nodes_luma.py @@ -1,4 +1,6 @@ +from __future__ import annotations from inspect import cleandoc +from typing import Optional from comfy.comfy_types.node_typing import IO, ComfyNodeABC from comfy_api.input_impl.video_types import VideoFromFile from comfy_api_nodes.apis.luma_api import ( @@ -201,6 +203,7 @@ class LumaImageGenerationNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -214,7 +217,6 @@ class LumaImageGenerationNode(ComfyNodeABC): image_luma_ref: LumaReferenceChain = None, style_image: torch.Tensor = None, character_image: torch.Tensor = None, - auth_token=None, **kwargs, ): validate_string(prompt, strip_whitespace=True, min_length=3) @@ -222,19 +224,19 @@ class LumaImageGenerationNode(ComfyNodeABC): api_image_ref = None if image_luma_ref is not None: api_image_ref = self._convert_luma_refs( - image_luma_ref, max_refs=4, auth_token=auth_token + image_luma_ref, max_refs=4, auth_kwargs=kwargs, ) # handle style_luma_ref api_style_ref = None if style_image is not None: api_style_ref = self._convert_style_image( - style_image, weight=style_image_weight, auth_token=auth_token + style_image, weight=style_image_weight, auth_kwargs=kwargs, ) # handle character_ref images character_ref = None if character_image is not None: download_urls = upload_images_to_comfyapi( - character_image, max_images=4, auth_token=auth_token + character_image, max_images=4, auth_kwargs=kwargs, ) character_ref = LumaCharacterRef( identity0=LumaImageIdentity(images=download_urls) @@ -255,7 +257,7 @@ class LumaImageGenerationNode(ComfyNodeABC): style_ref=api_style_ref, character_ref=character_ref, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response_api: LumaGeneration = operation.execute() @@ -269,7 +271,7 @@ class LumaImageGenerationNode(ComfyNodeABC): completed_statuses=[LumaState.completed], failed_statuses=[LumaState.failed], status_extractor=lambda x: x.state, - auth_token=auth_token, + auth_kwargs=kwargs, ) response_poll = operation.execute() @@ -278,13 +280,13 @@ class LumaImageGenerationNode(ComfyNodeABC): return (img,) def _convert_luma_refs( - self, luma_ref: LumaReferenceChain, max_refs: int, auth_token=None + self, luma_ref: LumaReferenceChain, max_refs: int, auth_kwargs: Optional[dict[str,str]] = None ): luma_urls = [] ref_count = 0 for ref in luma_ref.refs: download_urls = upload_images_to_comfyapi( - ref.image, max_images=1, auth_token=auth_token + ref.image, max_images=1, auth_kwargs=auth_kwargs ) luma_urls.append(download_urls[0]) ref_count += 1 @@ -293,12 +295,12 @@ class LumaImageGenerationNode(ComfyNodeABC): return luma_ref.create_api_model(download_urls=luma_urls, max_refs=max_refs) def _convert_style_image( - self, style_image: torch.Tensor, weight: float, auth_token=None + self, style_image: torch.Tensor, weight: float, auth_kwargs: Optional[dict[str,str]] = None ): chain = LumaReferenceChain( first_ref=LumaReference(image=style_image, weight=weight) ) - return self._convert_luma_refs(chain, max_refs=1, auth_token=auth_token) + return self._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs) class LumaImageModifyNode(ComfyNodeABC): @@ -350,6 +352,7 @@ class LumaImageModifyNode(ComfyNodeABC): "optional": {}, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -360,12 +363,11 @@ class LumaImageModifyNode(ComfyNodeABC): image: torch.Tensor, image_weight: float, seed, - auth_token=None, **kwargs, ): # first, upload image download_urls = upload_images_to_comfyapi( - image, max_images=1, auth_token=auth_token + image, max_images=1, auth_kwargs=kwargs, ) image_url = download_urls[0] # next, make Luma call with download url provided @@ -383,7 +385,7 @@ class LumaImageModifyNode(ComfyNodeABC): url=image_url, weight=round(max(min(1.0-image_weight, 0.98), 0.0), 2) ), ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response_api: LumaGeneration = operation.execute() @@ -397,7 +399,7 @@ class LumaImageModifyNode(ComfyNodeABC): completed_statuses=[LumaState.completed], failed_statuses=[LumaState.failed], status_extractor=lambda x: x.state, - auth_token=auth_token, + auth_kwargs=kwargs, ) response_poll = operation.execute() @@ -470,6 +472,7 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -483,7 +486,6 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): loop: bool, seed, luma_concepts: LumaConceptChain = None, - auth_token=None, **kwargs, ): validate_string(prompt, strip_whitespace=False, min_length=3) @@ -506,7 +508,7 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): loop=loop, concepts=luma_concepts.create_api_model() if luma_concepts else None, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response_api: LumaGeneration = operation.execute() @@ -520,7 +522,7 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): completed_statuses=[LumaState.completed], failed_statuses=[LumaState.failed], status_extractor=lambda x: x.state, - auth_token=auth_token, + auth_kwargs=kwargs, ) response_poll = operation.execute() @@ -594,6 +596,7 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -608,14 +611,13 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): first_image: torch.Tensor = None, last_image: torch.Tensor = None, luma_concepts: LumaConceptChain = None, - auth_token=None, **kwargs, ): if first_image is None and last_image is None: raise Exception( "At least one of first_image and last_image requires an input." ) - keyframes = self._convert_to_keyframes(first_image, last_image, auth_token) + keyframes = self._convert_to_keyframes(first_image, last_image, auth_kwargs=kwargs) duration = duration if model != LumaVideoModel.ray_1_6 else None resolution = resolution if model != LumaVideoModel.ray_1_6 else None @@ -636,7 +638,7 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): keyframes=keyframes, concepts=luma_concepts.create_api_model() if luma_concepts else None, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response_api: LumaGeneration = operation.execute() @@ -650,7 +652,7 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): completed_statuses=[LumaState.completed], failed_statuses=[LumaState.failed], status_extractor=lambda x: x.state, - auth_token=auth_token, + auth_kwargs=kwargs, ) response_poll = operation.execute() @@ -661,7 +663,7 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): self, first_image: torch.Tensor = None, last_image: torch.Tensor = None, - auth_token=None, + auth_kwargs: Optional[dict[str,str]] = None, ): if first_image is None and last_image is None: return None @@ -669,12 +671,12 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): frame1 = None if first_image is not None: download_urls = upload_images_to_comfyapi( - first_image, max_images=1, auth_token=auth_token + first_image, max_images=1, auth_kwargs=auth_kwargs, ) frame0 = LumaImageReference(type="image", url=download_urls[0]) if last_image is not None: download_urls = upload_images_to_comfyapi( - last_image, max_images=1, auth_token=auth_token + last_image, max_images=1, auth_kwargs=auth_kwargs, ) frame1 = LumaImageReference(type="image", url=download_urls[0]) return LumaKeyframes(frame0=frame0, frame1=frame1) diff --git a/comfy_api_nodes/nodes_minimax.py b/comfy_api_nodes/nodes_minimax.py index cacda22c6..fd64aeb0b 100644 --- a/comfy_api_nodes/nodes_minimax.py +++ b/comfy_api_nodes/nodes_minimax.py @@ -67,6 +67,7 @@ class MinimaxTextToVideoNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -84,7 +85,7 @@ class MinimaxTextToVideoNode: model="T2V-01", image: torch.Tensor=None, # used for ImageToVideo subject: torch.Tensor=None, # used for SubjectToVideo - auth_token=None, + **kwargs, ): ''' Function used between MiniMax nodes - supports T2V, I2V, and S2V, based on provided arguments. @@ -94,12 +95,12 @@ class MinimaxTextToVideoNode: # upload image, if passed in image_url = None if image is not None: - image_url = upload_images_to_comfyapi(image, max_images=1, auth_token=auth_token)[0] + image_url = upload_images_to_comfyapi(image, max_images=1, auth_kwargs=kwargs)[0] # TODO: figure out how to deal with subject properly, API returns invalid params when using S2V-01 model subject_reference = None if subject is not None: - subject_url = upload_images_to_comfyapi(subject, max_images=1, auth_token=auth_token)[0] + subject_url = upload_images_to_comfyapi(subject, max_images=1, auth_kwargs=kwargs)[0] subject_reference = [SubjectReferenceItem(image=subject_url)] @@ -118,7 +119,7 @@ class MinimaxTextToVideoNode: subject_reference=subject_reference, prompt_optimizer=None, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response = video_generate_operation.execute() @@ -137,7 +138,7 @@ class MinimaxTextToVideoNode: completed_statuses=["Success"], failed_statuses=["Fail"], status_extractor=lambda x: x.status.value, - auth_token=auth_token, + auth_kwargs=kwargs, ) task_result = video_generate_operation.execute() @@ -153,7 +154,7 @@ class MinimaxTextToVideoNode: query_params={"file_id": int(file_id)}, ), request=EmptyRequest(), - auth_token=auth_token, + auth_kwargs=kwargs, ) file_result = file_retrieve_operation.execute() @@ -221,6 +222,7 @@ class MinimaxImageToVideoNode(MinimaxTextToVideoNode): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -279,6 +281,7 @@ class MinimaxSubjectToVideoNode(MinimaxTextToVideoNode): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index c18c65d7a..c63908be2 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -93,7 +93,10 @@ class OpenAIDalle2(ComfyNodeABC): }, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } RETURN_TYPES = (IO.IMAGE,) @@ -110,7 +113,7 @@ class OpenAIDalle2(ComfyNodeABC): mask=None, n=1, size="1024x1024", - auth_token=None, + **kwargs ): validate_string(prompt, strip_whitespace=False) model = "dall-e-2" @@ -168,7 +171,7 @@ class OpenAIDalle2(ComfyNodeABC): else None ), content_type=content_type, - auth_token=auth_token, + auth_kwargs=kwargs, ) response = operation.execute() @@ -236,7 +239,10 @@ class OpenAIDalle3(ComfyNodeABC): }, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } RETURN_TYPES = (IO.IMAGE,) @@ -252,7 +258,7 @@ class OpenAIDalle3(ComfyNodeABC): style="natural", quality="standard", size="1024x1024", - auth_token=None, + **kwargs ): validate_string(prompt, strip_whitespace=False) model = "dall-e-3" @@ -273,7 +279,7 @@ class OpenAIDalle3(ComfyNodeABC): style=style, seed=seed, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response = operation.execute() @@ -366,7 +372,10 @@ class OpenAIGPTImage1(ComfyNodeABC): }, ), }, - "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"}, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, } RETURN_TYPES = (IO.IMAGE,) @@ -385,7 +394,7 @@ class OpenAIGPTImage1(ComfyNodeABC): mask=None, n=1, size="1024x1024", - auth_token=None, + **kwargs ): validate_string(prompt, strip_whitespace=False) model = "gpt-image-1" @@ -462,7 +471,7 @@ class OpenAIGPTImage1(ComfyNodeABC): ), files=files if files else None, content_type=content_type, - auth_token=auth_token, + auth_kwargs=kwargs, ) response = operation.execute() diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py index ba4e8457d..08ec9cf07 100644 --- a/comfy_api_nodes/nodes_pika.py +++ b/comfy_api_nodes/nodes_pika.py @@ -3,6 +3,7 @@ Pika x ComfyUI API Nodes Pika API docs: https://pika-827374fb.mintlify.app/api-reference """ +from __future__ import annotations import io from typing import Optional, TypeVar @@ -120,7 +121,7 @@ class PikaNodeBase(ComfyNodeABC): RETURN_TYPES = ("VIDEO",) def poll_for_task_status( - self, task_id: str, auth_token: str + self, task_id: str, auth_kwargs: Optional[dict[str,str]] = None ) -> PikaGenerateResponse: polling_operation = PollingOperation( poll_endpoint=ApiEndpoint( @@ -139,20 +140,20 @@ class PikaNodeBase(ComfyNodeABC): progress_extractor=lambda response: ( response.progress if hasattr(response, "progress") else None ), - auth_token=auth_token, + auth_kwargs=auth_kwargs, ) return polling_operation.execute() def execute_task( self, initial_operation: SynchronousOperation[R, PikaGenerateResponse], - auth_token: Optional[str] = None, + auth_kwargs: Optional[dict[str,str]] = None, ) -> tuple[VideoFromFile]: """Executes the initial operation then polls for the task status until it is completed. Args: initial_operation: The initial operation to execute. - auth_token: The authentication token to use for the API call. + auth_kwargs: The authentication token(s) to use for the API call. Returns: A tuple containing the video file as a VIDEO output. @@ -164,7 +165,7 @@ class PikaNodeBase(ComfyNodeABC): raise PikaApiError(error_msg) task_id = initial_response.video_id - final_response = self.poll_for_task_status(task_id, auth_token) + final_response = self.poll_for_task_status(task_id, auth_kwargs) if not is_valid_video_response(final_response): error_msg = ( f"Pika task {task_id} succeeded but no video data found in response." @@ -193,6 +194,7 @@ class PikaImageToVideoV2_2(PikaNodeBase): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -206,7 +208,7 @@ class PikaImageToVideoV2_2(PikaNodeBase): seed: int, resolution: str, duration: int, - auth_token: Optional[str] = None, + **kwargs ) -> tuple[VideoFromFile]: # Convert image to BytesIO image_bytes_io = tensor_to_bytesio(image) @@ -233,10 +235,10 @@ class PikaImageToVideoV2_2(PikaNodeBase): request=pika_request_data, files=pika_files, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_token) + return self.execute_task(initial_operation, auth_kwargs=kwargs) class PikaTextToVideoNodeV2_2(PikaNodeBase): @@ -259,6 +261,7 @@ class PikaTextToVideoNodeV2_2(PikaNodeBase): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -272,7 +275,7 @@ class PikaTextToVideoNodeV2_2(PikaNodeBase): resolution: str, duration: int, aspect_ratio: float, - auth_token: Optional[str] = None, + **kwargs, ) -> tuple[VideoFromFile]: initial_operation = SynchronousOperation( endpoint=ApiEndpoint( @@ -289,11 +292,11 @@ class PikaTextToVideoNodeV2_2(PikaNodeBase): duration=duration, aspectRatio=aspect_ratio, ), - auth_token=auth_token, + auth_kwargs=kwargs, content_type="application/x-www-form-urlencoded", ) - return self.execute_task(initial_operation, auth_token) + return self.execute_task(initial_operation, auth_kwargs=kwargs) class PikaScenesV2_2(PikaNodeBase): @@ -336,6 +339,7 @@ class PikaScenesV2_2(PikaNodeBase): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -355,7 +359,7 @@ class PikaScenesV2_2(PikaNodeBase): image_ingredient_3: Optional[torch.Tensor] = None, image_ingredient_4: Optional[torch.Tensor] = None, image_ingredient_5: Optional[torch.Tensor] = None, - auth_token: Optional[str] = None, + **kwargs, ) -> tuple[VideoFromFile]: # Convert all passed images to BytesIO all_image_bytes_io = [] @@ -396,10 +400,10 @@ class PikaScenesV2_2(PikaNodeBase): request=pika_request_data, files=pika_files, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_token) + return self.execute_task(initial_operation, auth_kwargs=kwargs) class PikAdditionsNode(PikaNodeBase): @@ -434,6 +438,7 @@ class PikAdditionsNode(PikaNodeBase): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -446,7 +451,7 @@ class PikAdditionsNode(PikaNodeBase): prompt_text: str, negative_prompt: str, seed: int, - auth_token: Optional[str] = None, + **kwargs, ) -> tuple[VideoFromFile]: # Convert video to BytesIO video_bytes_io = io.BytesIO() @@ -479,10 +484,10 @@ class PikAdditionsNode(PikaNodeBase): request=pika_request_data, files=pika_files, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_token) + return self.execute_task(initial_operation, auth_kwargs=kwargs) class PikaSwapsNode(PikaNodeBase): @@ -526,6 +531,7 @@ class PikaSwapsNode(PikaNodeBase): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -540,7 +546,7 @@ class PikaSwapsNode(PikaNodeBase): prompt_text: str, negative_prompt: str, seed: int, - auth_token: Optional[str] = None, + **kwargs, ) -> tuple[VideoFromFile]: # Convert video to BytesIO video_bytes_io = io.BytesIO() @@ -583,10 +589,10 @@ class PikaSwapsNode(PikaNodeBase): request=pika_request_data, files=pika_files, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_token) + return self.execute_task(initial_operation, auth_kwargs=kwargs) class PikaffectsNode(PikaNodeBase): @@ -630,6 +636,7 @@ class PikaffectsNode(PikaNodeBase): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -642,7 +649,7 @@ class PikaffectsNode(PikaNodeBase): prompt_text: str, negative_prompt: str, seed: int, - auth_token: Optional[str] = None, + **kwargs, ) -> tuple[VideoFromFile]: initial_operation = SynchronousOperation( @@ -660,10 +667,10 @@ class PikaffectsNode(PikaNodeBase): ), files={"image": ("image.png", tensor_to_bytesio(image), "image/png")}, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_token) + return self.execute_task(initial_operation, auth_kwargs=kwargs) class PikaStartEndFrameNode2_2(PikaNodeBase): @@ -681,6 +688,7 @@ class PikaStartEndFrameNode2_2(PikaNodeBase): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -695,7 +703,7 @@ class PikaStartEndFrameNode2_2(PikaNodeBase): seed: int, resolution: str, duration: int, - auth_token: Optional[str] = None, + **kwargs, ) -> tuple[VideoFromFile]: pika_files = [ @@ -722,10 +730,10 @@ class PikaStartEndFrameNode2_2(PikaNodeBase): ), files=pika_files, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_token) + return self.execute_task(initial_operation, auth_kwargs=kwargs) NODE_CLASS_MAPPINGS = { diff --git a/comfy_api_nodes/nodes_pixverse.py b/comfy_api_nodes/nodes_pixverse.py index dbb90c1dd..0c29e77c2 100644 --- a/comfy_api_nodes/nodes_pixverse.py +++ b/comfy_api_nodes/nodes_pixverse.py @@ -34,7 +34,7 @@ import requests from io import BytesIO -def upload_image_to_pixverse(image: torch.Tensor, auth_token=None): +def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None): # first, upload image to Pixverse and get image id to use in actual generation call files = { "image": tensor_to_bytesio(image) @@ -49,7 +49,7 @@ def upload_image_to_pixverse(image: torch.Tensor, auth_token=None): request=EmptyRequest(), files=files, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=auth_kwargs, ) response_upload: PixverseImageUploadResponse = operation.execute() @@ -148,6 +148,7 @@ class PixverseTextToVideoNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -161,7 +162,6 @@ class PixverseTextToVideoNode(ComfyNodeABC): seed, negative_prompt: str=None, pixverse_template: int=None, - auth_token=None, **kwargs, ): validate_string(prompt, strip_whitespace=False) @@ -190,7 +190,7 @@ class PixverseTextToVideoNode(ComfyNodeABC): template_id=pixverse_template, seed=seed, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response_api = operation.execute() @@ -207,7 +207,7 @@ class PixverseTextToVideoNode(ComfyNodeABC): completed_statuses=[PixverseStatus.successful], failed_statuses=[PixverseStatus.contents_moderation, PixverseStatus.failed, PixverseStatus.deleted], status_extractor=lambda x: x.Resp.status, - auth_token=auth_token, + auth_kwargs=kwargs, ) response_poll = operation.execute() @@ -278,6 +278,7 @@ class PixverseImageToVideoNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -291,11 +292,10 @@ class PixverseImageToVideoNode(ComfyNodeABC): seed, negative_prompt: str=None, pixverse_template: int=None, - auth_token=None, **kwargs, ): validate_string(prompt, strip_whitespace=False) - img_id = upload_image_to_pixverse(image, auth_token=auth_token) + img_id = upload_image_to_pixverse(image, auth_kwargs=kwargs) # 1080p is limited to 5 seconds duration # only normal motion_mode supported for 1080p or for non-5 second duration @@ -322,7 +322,7 @@ class PixverseImageToVideoNode(ComfyNodeABC): template_id=pixverse_template, seed=seed, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response_api = operation.execute() @@ -339,7 +339,7 @@ class PixverseImageToVideoNode(ComfyNodeABC): completed_statuses=[PixverseStatus.successful], failed_statuses=[PixverseStatus.contents_moderation, PixverseStatus.failed, PixverseStatus.deleted], status_extractor=lambda x: x.Resp.status, - auth_token=auth_token, + auth_kwargs=kwargs, ) response_poll = operation.execute() @@ -407,6 +407,7 @@ class PixverseTransitionVideoNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -420,12 +421,11 @@ class PixverseTransitionVideoNode(ComfyNodeABC): motion_mode: str, seed, negative_prompt: str=None, - auth_token=None, **kwargs, ): validate_string(prompt, strip_whitespace=False) - first_frame_id = upload_image_to_pixverse(first_frame, auth_token=auth_token) - last_frame_id = upload_image_to_pixverse(last_frame, auth_token=auth_token) + first_frame_id = upload_image_to_pixverse(first_frame, auth_kwargs=kwargs) + last_frame_id = upload_image_to_pixverse(last_frame, auth_kwargs=kwargs) # 1080p is limited to 5 seconds duration # only normal motion_mode supported for 1080p or for non-5 second duration @@ -452,7 +452,7 @@ class PixverseTransitionVideoNode(ComfyNodeABC): negative_prompt=negative_prompt if negative_prompt else None, seed=seed, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response_api = operation.execute() @@ -469,7 +469,7 @@ class PixverseTransitionVideoNode(ComfyNodeABC): completed_statuses=[PixverseStatus.successful], failed_statuses=[PixverseStatus.contents_moderation, PixverseStatus.failed, PixverseStatus.deleted], status_extractor=lambda x: x.Resp.status, - auth_token=auth_token, + auth_kwargs=kwargs, ) response_poll = operation.execute() diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py index 5c89d21e9..767d93e3c 100644 --- a/comfy_api_nodes/nodes_recraft.py +++ b/comfy_api_nodes/nodes_recraft.py @@ -41,7 +41,7 @@ def handle_recraft_file_request( total_pixels=4096*4096, timeout=1024, request=None, - auth_token=None + auth_kwargs: dict[str,str] = None, ) -> list[BytesIO]: """ Handle sending common Recraft file-only request to get back file bytes. @@ -65,7 +65,7 @@ def handle_recraft_file_request( request=request, files=files, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=auth_kwargs, multipart_parser=recraft_multipart_parser, ) response: RecraftImageGenerationResponse = operation.execute() @@ -387,6 +387,7 @@ class RecraftTextToImageNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -399,7 +400,6 @@ class RecraftTextToImageNode: recraft_style: RecraftStyle = None, negative_prompt: str = None, recraft_controls: RecraftControls = None, - auth_token=None, **kwargs, ): validate_string(prompt, strip_whitespace=False, max_length=1000) @@ -432,7 +432,7 @@ class RecraftTextToImageNode: style_id=recraft_style.style_id, controls=controls_api, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response: RecraftImageGenerationResponse = operation.execute() images = [] @@ -522,6 +522,7 @@ class RecraftImageToImageNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -532,7 +533,6 @@ class RecraftImageToImageNode: n: int, strength: float, seed, - auth_token=None, recraft_style: RecraftStyle = None, negative_prompt: str = None, recraft_controls: RecraftControls = None, @@ -570,7 +570,7 @@ class RecraftImageToImageNode: image=image[i], path="/proxy/recraft/images/imageToImage", request=request, - auth_token=auth_token, + auth_kwargs=kwargs, ) with handle_recraft_image_output(): images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) @@ -638,6 +638,7 @@ class RecraftImageInpaintingNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -648,7 +649,6 @@ class RecraftImageInpaintingNode: prompt: str, n: int, seed, - auth_token=None, recraft_style: RecraftStyle = None, negative_prompt: str = None, **kwargs, @@ -683,7 +683,7 @@ class RecraftImageInpaintingNode: mask=mask[i:i+1], path="/proxy/recraft/images/inpaint", request=request, - auth_token=auth_token, + auth_kwargs=kwargs, ) with handle_recraft_image_output(): images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) @@ -762,6 +762,7 @@ class RecraftTextToVectorNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -774,7 +775,6 @@ class RecraftTextToVectorNode: seed, negative_prompt: str = None, recraft_controls: RecraftControls = None, - auth_token=None, **kwargs, ): validate_string(prompt, strip_whitespace=False, max_length=1000) @@ -805,7 +805,7 @@ class RecraftTextToVectorNode: substyle=recraft_style.substyle, controls=controls_api, ), - auth_token=auth_token, + auth_kwargs=kwargs, ) response: RecraftImageGenerationResponse = operation.execute() svg_data = [] @@ -836,13 +836,13 @@ class RecraftVectorizeImageNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } def api_call( self, image: torch.Tensor, - auth_token=None, **kwargs, ): svgs = [] @@ -852,7 +852,7 @@ class RecraftVectorizeImageNode: sub_bytes = handle_recraft_file_request( image=image[i], path="/proxy/recraft/images/vectorize", - auth_token=auth_token, + auth_kwargs=kwargs, ) svgs.append(SVG(sub_bytes)) pbar.update(1) @@ -917,6 +917,7 @@ class RecraftReplaceBackgroundNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -926,7 +927,6 @@ class RecraftReplaceBackgroundNode: prompt: str, n: int, seed, - auth_token=None, recraft_style: RecraftStyle = None, negative_prompt: str = None, **kwargs, @@ -956,7 +956,7 @@ class RecraftReplaceBackgroundNode: image=image[i], path="/proxy/recraft/images/replaceBackground", request=request, - auth_token=auth_token, + auth_kwargs=kwargs, ) images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) pbar.update(1) @@ -986,13 +986,13 @@ class RecraftRemoveBackgroundNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } def api_call( self, image: torch.Tensor, - auth_token=None, **kwargs, ): images = [] @@ -1002,7 +1002,7 @@ class RecraftRemoveBackgroundNode: sub_bytes = handle_recraft_file_request( image=image[i], path="/proxy/recraft/images/removeBackground", - auth_token=auth_token, + auth_kwargs=kwargs, ) images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) pbar.update(1) @@ -1037,13 +1037,13 @@ class RecraftCrispUpscaleNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } def api_call( self, image: torch.Tensor, - auth_token=None, **kwargs, ): images = [] @@ -1053,7 +1053,7 @@ class RecraftCrispUpscaleNode: sub_bytes = handle_recraft_file_request( image=image[i], path=self.RECRAFT_PATH, - auth_token=auth_token, + auth_kwargs=kwargs, ) images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) pbar.update(1) diff --git a/comfy_api_nodes/nodes_stability.py b/comfy_api_nodes/nodes_stability.py index 52fe2417c..02e421678 100644 --- a/comfy_api_nodes/nodes_stability.py +++ b/comfy_api_nodes/nodes_stability.py @@ -120,12 +120,13 @@ class StabilityStableImageUltraNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } def api_call(self, prompt: str, aspect_ratio: str, style_preset: str, seed: int, negative_prompt: str=None, image: torch.Tensor = None, image_denoise: float=None, - auth_token=None): + **kwargs): validate_string(prompt, strip_whitespace=False) # prepare image binary if image present image_binary = None @@ -160,7 +161,7 @@ class StabilityStableImageUltraNode: ), files=files, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=kwargs, ) response_api = operation.execute() @@ -252,12 +253,13 @@ class StabilityStableImageSD_3_5Node: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } def api_call(self, model: str, prompt: str, aspect_ratio: str, style_preset: str, seed: int, cfg_scale: float, negative_prompt: str=None, image: torch.Tensor = None, image_denoise: float=None, - auth_token=None): + **kwargs): validate_string(prompt, strip_whitespace=False) # prepare image binary if image present image_binary = None @@ -298,7 +300,7 @@ class StabilityStableImageSD_3_5Node: ), files=files, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=kwargs, ) response_api = operation.execute() @@ -368,11 +370,12 @@ class StabilityUpscaleConservativeNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } def api_call(self, image: torch.Tensor, prompt: str, creativity: float, seed: int, negative_prompt: str=None, - auth_token=None): + **kwargs): validate_string(prompt, strip_whitespace=False) image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read() @@ -398,7 +401,7 @@ class StabilityUpscaleConservativeNode: ), files=files, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=kwargs, ) response_api = operation.execute() @@ -473,11 +476,12 @@ class StabilityUpscaleCreativeNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } def api_call(self, image: torch.Tensor, prompt: str, creativity: float, style_preset: str, seed: int, negative_prompt: str=None, - auth_token=None): + **kwargs): validate_string(prompt, strip_whitespace=False) image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read() @@ -506,7 +510,7 @@ class StabilityUpscaleCreativeNode: ), files=files, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=kwargs, ) response_api = operation.execute() @@ -521,7 +525,7 @@ class StabilityUpscaleCreativeNode: completed_statuses=[StabilityPollStatus.finished], failed_statuses=[StabilityPollStatus.failed], status_extractor=lambda x: get_async_dummy_status(x), - auth_token=auth_token, + auth_kwargs=kwargs, ) response_poll: StabilityResultsGetResponse = operation.execute() @@ -555,11 +559,12 @@ class StabilityUpscaleFastNode: }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } def api_call(self, image: torch.Tensor, - auth_token=None): + **kwargs): image_binary = tensor_to_bytesio(image, total_pixels=4096*4096).read() files = { @@ -576,7 +581,7 @@ class StabilityUpscaleFastNode: request=EmptyRequest(), files=files, content_type="multipart/form-data", - auth_token=auth_token, + auth_kwargs=kwargs, ) response_api = operation.execute() diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index 9233944b5..2740179c8 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -114,6 +114,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", }, } @@ -133,7 +134,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): person_generation="ALLOW", seed=0, image=None, - auth_token=None, + **kwargs, ): # Prepare the instances for the request instances = [] @@ -179,7 +180,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): instances=instances, parameters=parameters ), - auth_token=auth_token + auth_kwargs=kwargs, ) initial_response = initial_operation.execute() @@ -213,7 +214,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): request=Veo2GenVidPollRequest( operationName=operation_name ), - auth_token=auth_token, + auth_kwargs=kwargs, poll_interval=5.0 ) diff --git a/execution.py b/execution.py index feb61ae82..e5d1c69d9 100644 --- a/execution.py +++ b/execution.py @@ -146,6 +146,8 @@ def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, e input_data_all[x] = [unique_id] if h[x] == "AUTH_TOKEN_COMFY_ORG": input_data_all[x] = [extra_data.get("auth_token_comfy_org", None)] + if h[x] == "API_KEY_COMFY_ORG": + input_data_all[x] = [extra_data.get("api_key_comfy_org", None)] return input_data_all, missing_keys map_node_over_list = None #Don't hook this please diff --git a/requirements.txt b/requirements.txt index 29cf0e2ac..01aab4ca2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ -comfyui-frontend-package==1.18.9 -comfyui-workflow-templates==0.1.11 +comfyui-frontend-package==1.18.10 +comfyui-workflow-templates==0.1.14 torch torchsde torchvision From 577de83ca9c99e997825439e017113456c4c78f7 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 11 May 2025 01:58:00 -0700 Subject: [PATCH 0126/1073] ACE VAE works in fp16. (#8055) --- comfy/sd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd.py b/comfy/sd.py index ee350d5b5..e98a3aa87 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -451,7 +451,7 @@ class VAE: self.latent_dim = 2 self.process_output = lambda audio: audio self.process_input = lambda audio: audio - self.working_dtypes = [torch.bfloat16, torch.float32] + self.working_dtypes = [torch.bfloat16, torch.float16, torch.float32] self.disable_offload = True self.extra_1d_channel = 16 else: From 31e9e36c941bbedd29bb388a052911a65bed1bc4 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Mon, 12 May 2025 10:32:24 -0700 Subject: [PATCH 0127/1073] remove aspect ratio from kling request (#8062) --- comfy_api_nodes/nodes_kling.py | 1 - 1 file changed, 1 deletion(-) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index b2be83656..2d0fd8883 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -671,7 +671,6 @@ class KlingImage2VideoNode(KlingNodeBase): negative_prompt=negative_prompt if negative_prompt else None, cfg_scale=cfg_scale, mode=KlingVideoGenMode(mode), - aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio), duration=KlingVideoGenDuration(duration), camera_control=camera_control, ), From 640c47e7deeb52978e84716613015cd8bfd93c5c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 12 May 2025 11:32:01 -0700 Subject: [PATCH 0128/1073] Fix torch warning about deprecated function. (#8075) Drop support for torch versions below 2.2 on the audio VAEs. --- comfy/ldm/ace/vae/music_vocoder.py | 22 +++++++++------------- comfy/ldm/audio/autoencoder.py | 10 ++-------- 2 files changed, 11 insertions(+), 21 deletions(-) diff --git a/comfy/ldm/ace/vae/music_vocoder.py b/comfy/ldm/ace/vae/music_vocoder.py index dc7c867da..2f989fa86 100755 --- a/comfy/ldm/ace/vae/music_vocoder.py +++ b/comfy/ldm/ace/vae/music_vocoder.py @@ -8,11 +8,7 @@ from typing import Callable, Tuple, List import numpy as np import torch.nn.functional as F -from torch.nn.utils import weight_norm from torch.nn.utils.parametrize import remove_parametrizations as remove_weight_norm -# from diffusers.models.modeling_utils import ModelMixin -# from diffusers.loaders import FromOriginalModelMixin -# from diffusers.configuration_utils import ConfigMixin, register_to_config from .music_log_mel import LogMelSpectrogram @@ -259,7 +255,7 @@ class ResBlock1(torch.nn.Module): self.convs1 = nn.ModuleList( [ - weight_norm( + torch.nn.utils.parametrizations.weight_norm( ops.Conv1d( channels, channels, @@ -269,7 +265,7 @@ class ResBlock1(torch.nn.Module): padding=get_padding(kernel_size, dilation[0]), ) ), - weight_norm( + torch.nn.utils.parametrizations.weight_norm( ops.Conv1d( channels, channels, @@ -279,7 +275,7 @@ class ResBlock1(torch.nn.Module): padding=get_padding(kernel_size, dilation[1]), ) ), - weight_norm( + torch.nn.utils.parametrizations.weight_norm( ops.Conv1d( channels, channels, @@ -294,7 +290,7 @@ class ResBlock1(torch.nn.Module): self.convs2 = nn.ModuleList( [ - weight_norm( + torch.nn.utils.parametrizations.weight_norm( ops.Conv1d( channels, channels, @@ -304,7 +300,7 @@ class ResBlock1(torch.nn.Module): padding=get_padding(kernel_size, 1), ) ), - weight_norm( + torch.nn.utils.parametrizations.weight_norm( ops.Conv1d( channels, channels, @@ -314,7 +310,7 @@ class ResBlock1(torch.nn.Module): padding=get_padding(kernel_size, 1), ) ), - weight_norm( + torch.nn.utils.parametrizations.weight_norm( ops.Conv1d( channels, channels, @@ -366,7 +362,7 @@ class HiFiGANGenerator(nn.Module): prod(upsample_rates) == hop_length ), f"hop_length must be {prod(upsample_rates)}" - self.conv_pre = weight_norm( + self.conv_pre = torch.nn.utils.parametrizations.weight_norm( ops.Conv1d( num_mels, upsample_initial_channel, @@ -386,7 +382,7 @@ class HiFiGANGenerator(nn.Module): for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): c_cur = upsample_initial_channel // (2 ** (i + 1)) self.ups.append( - weight_norm( + torch.nn.utils.parametrizations.weight_norm( ops.ConvTranspose1d( upsample_initial_channel // (2**i), upsample_initial_channel // (2 ** (i + 1)), @@ -421,7 +417,7 @@ class HiFiGANGenerator(nn.Module): self.resblocks.append(ResBlock1(ch, k, d)) self.activation_post = post_activation() - self.conv_post = weight_norm( + self.conv_post = torch.nn.utils.parametrizations.weight_norm( ops.Conv1d( ch, 1, diff --git a/comfy/ldm/audio/autoencoder.py b/comfy/ldm/audio/autoencoder.py index 9e7e7c876..78ed6ffa6 100644 --- a/comfy/ldm/audio/autoencoder.py +++ b/comfy/ldm/audio/autoencoder.py @@ -75,16 +75,10 @@ class SnakeBeta(nn.Module): return x def WNConv1d(*args, **kwargs): - try: - return torch.nn.utils.parametrizations.weight_norm(ops.Conv1d(*args, **kwargs)) - except: - return torch.nn.utils.weight_norm(ops.Conv1d(*args, **kwargs)) #support pytorch 2.1 and older + return torch.nn.utils.parametrizations.weight_norm(ops.Conv1d(*args, **kwargs)) def WNConvTranspose1d(*args, **kwargs): - try: - return torch.nn.utils.parametrizations.weight_norm(ops.ConvTranspose1d(*args, **kwargs)) - except: - return torch.nn.utils.weight_norm(ops.ConvTranspose1d(*args, **kwargs)) #support pytorch 2.1 and older + return torch.nn.utils.parametrizations.weight_norm(ops.ConvTranspose1d(*args, **kwargs)) def get_activation(activation: Literal["elu", "snake", "none"], antialias=False, channels=None) -> nn.Module: if activation == "elu": From 158419f3a0017c2ce123484b14b6c527716d6ec8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 12 May 2025 15:58:28 -0400 Subject: [PATCH 0129/1073] ComfyUI version 0.3.34 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 5a73f76e4..b740b378d 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.33" +__version__ = "0.3.34" diff --git a/pyproject.toml b/pyproject.toml index e0be329de..80061b39a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.33" +version = "0.3.34" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From b4abca828e3a76ef565a2f5f519028e689837fc6 Mon Sep 17 00:00:00 2001 From: thot experiment <94414189+thot-experiment@users.noreply.github.com> Date: Mon, 12 May 2025 13:00:01 -0700 Subject: [PATCH 0130/1073] add opus and mp3 to audio output node (#8019) * first pass at opus and mp3 as well as migrating flac to pyav * minor mp3 encoding fix * fix ruff * delete dead code * split out save audio to separate nodes per filetype * fix ruff --- comfy_extras/nodes_audio.py | 229 +++++++++++++++++++++++++----------- 1 file changed, 160 insertions(+), 69 deletions(-) diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index 136ad6159..49af1eae4 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -1,5 +1,6 @@ from __future__ import annotations +import av import torchaudio import torch import comfy.model_management @@ -7,7 +8,6 @@ import folder_paths import os import io import json -import struct import random import hashlib import node_helpers @@ -90,60 +90,118 @@ class VAEDecodeAudio: return ({"waveform": audio, "sample_rate": 44100}, ) -def create_vorbis_comment_block(comment_dict, last_block): - vendor_string = b'ComfyUI' - vendor_length = len(vendor_string) +def save_audio(self, audio, filename_prefix="ComfyUI", format="flac", prompt=None, extra_pnginfo=None, quality="128k"): - comments = [] - for key, value in comment_dict.items(): - comment = f"{key}={value}".encode('utf-8') - comments.append(struct.pack('I', len(comment_data))[1:] + comment_data + # Opus supported sample rates + OPUS_RATES = [8000, 12000, 16000, 24000, 48000] - return comment_block + for (batch_number, waveform) in enumerate(audio["waveform"].cpu()): + filename_with_batch_num = filename.replace("%batch_num%", str(batch_number)) + file = f"{filename_with_batch_num}_{counter:05}_.{format}" + output_path = os.path.join(full_output_folder, file) -def insert_or_replace_vorbis_comment(flac_io, comment_dict): - if len(comment_dict) == 0: - return flac_io + # Use original sample rate initially + sample_rate = audio["sample_rate"] - flac_io.seek(4) + # Handle Opus sample rate requirements + if format == "opus": + if sample_rate > 48000: + sample_rate = 48000 + elif sample_rate not in OPUS_RATES: + # Find the next highest supported rate + for rate in sorted(OPUS_RATES): + if rate > sample_rate: + sample_rate = rate + break + if sample_rate not in OPUS_RATES: # Fallback if still not supported + sample_rate = 48000 - blocks = [] - last_block = False + # Resample if necessary + if sample_rate != audio["sample_rate"]: + waveform = torchaudio.functional.resample(waveform, audio["sample_rate"], sample_rate) - while not last_block: - header = flac_io.read(4) - last_block = (header[0] & 0x80) != 0 - block_type = header[0] & 0x7F - block_length = struct.unpack('>I', b'\x00' + header[1:])[0] - block_data = flac_io.read(block_length) + # Create in-memory WAV buffer + wav_buffer = io.BytesIO() + torchaudio.save(wav_buffer, waveform, sample_rate, format="WAV") + wav_buffer.seek(0) # Rewind for reading - if block_type == 4 or block_type == 1: - pass - else: - header = bytes([(header[0] & (~0x80))]) + header[1:] - blocks.append(header + block_data) + # Use PyAV to convert and add metadata + input_container = av.open(wav_buffer) - blocks.append(create_vorbis_comment_block(comment_dict, last_block=True)) + # Create output with specified format + output_buffer = io.BytesIO() + output_container = av.open(output_buffer, mode='w', format=format) - new_flac_io = io.BytesIO() - new_flac_io.write(b'fLaC') - for block in blocks: - new_flac_io.write(block) + # Set metadata on the container + for key, value in metadata.items(): + output_container.metadata[key] = value - new_flac_io.write(flac_io.read()) - return new_flac_io + # Set up the output stream with appropriate properties + input_container.streams.audio[0] + if format == "opus": + out_stream = output_container.add_stream("libopus", rate=sample_rate) + if quality == "64k": + out_stream.bit_rate = 64000 + elif quality == "96k": + out_stream.bit_rate = 96000 + elif quality == "128k": + out_stream.bit_rate = 128000 + elif quality == "192k": + out_stream.bit_rate = 192000 + elif quality == "320k": + out_stream.bit_rate = 320000 + elif format == "mp3": + out_stream = output_container.add_stream("libmp3lame", rate=sample_rate) + if quality == "V0": + #TODO i would really love to support V3 and V5 but there doesn't seem to be a way to set the qscale level, the property below is a bool + out_stream.codec_context.qscale = 1 + elif quality == "128k": + out_stream.bit_rate = 128000 + elif quality == "320k": + out_stream.bit_rate = 320000 + else: #format == "flac": + out_stream = output_container.add_stream("flac", rate=sample_rate) + # Copy frames from input to output + for frame in input_container.decode(audio=0): + frame.pts = None # Let PyAV handle timestamps + output_container.mux(out_stream.encode(frame)) + + # Flush encoder + output_container.mux(out_stream.encode(None)) + + # Close containers + output_container.close() + input_container.close() + + # Write the output to file + output_buffer.seek(0) + with open(output_path, 'wb') as f: + f.write(output_buffer.getbuffer()) + + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + counter += 1 + + return { "ui": { "audio": results } } + class SaveAudio: def __init__(self): self.output_dir = folder_paths.get_output_directory() @@ -153,50 +211,70 @@ class SaveAudio: @classmethod def INPUT_TYPES(s): return {"required": { "audio": ("AUDIO", ), - "filename_prefix": ("STRING", {"default": "audio/ComfyUI"})}, + "filename_prefix": ("STRING", {"default": "audio/ComfyUI"}), + }, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, } RETURN_TYPES = () - FUNCTION = "save_audio" + FUNCTION = "save_flac" OUTPUT_NODE = True CATEGORY = "audio" - def save_audio(self, audio, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): - filename_prefix += self.prefix_append - full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) - results: list[FileLocator] = [] + def save_flac(self, audio, filename_prefix="ComfyUI", format="flac", prompt=None, extra_pnginfo=None): + return save_audio(self, audio, filename_prefix, format, prompt, extra_pnginfo) - metadata = {} - if not args.disable_metadata: - if prompt is not None: - metadata["prompt"] = json.dumps(prompt) - if extra_pnginfo is not None: - for x in extra_pnginfo: - metadata[x] = json.dumps(extra_pnginfo[x]) +class SaveAudioMP3: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" - for (batch_number, waveform) in enumerate(audio["waveform"].cpu()): - filename_with_batch_num = filename.replace("%batch_num%", str(batch_number)) - file = f"{filename_with_batch_num}_{counter:05}_.flac" + @classmethod + def INPUT_TYPES(s): + return {"required": { "audio": ("AUDIO", ), + "filename_prefix": ("STRING", {"default": "audio/ComfyUI"}), + "quality": (["V0", "128k", "320k"], {"default": "V0"}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } - buff = io.BytesIO() - torchaudio.save(buff, waveform, audio["sample_rate"], format="FLAC") + RETURN_TYPES = () + FUNCTION = "save_mp3" - buff = insert_or_replace_vorbis_comment(buff, metadata) + OUTPUT_NODE = True - with open(os.path.join(full_output_folder, file), 'wb') as f: - f.write(buff.getbuffer()) + CATEGORY = "audio" - results.append({ - "filename": file, - "subfolder": subfolder, - "type": self.type - }) - counter += 1 + def save_mp3(self, audio, filename_prefix="ComfyUI", format="mp3", prompt=None, extra_pnginfo=None, quality="128k"): + return save_audio(self, audio, filename_prefix, format, prompt, extra_pnginfo, quality) - return { "ui": { "audio": results } } +class SaveAudioOpus: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + + @classmethod + def INPUT_TYPES(s): + return {"required": { "audio": ("AUDIO", ), + "filename_prefix": ("STRING", {"default": "audio/ComfyUI"}), + "quality": (["64k", "96k", "128k", "192k", "320k"], {"default": "128k"}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = () + FUNCTION = "save_opus" + + OUTPUT_NODE = True + + CATEGORY = "audio" + + def save_opus(self, audio, filename_prefix="ComfyUI", format="opus", prompt=None, extra_pnginfo=None, quality="V3"): + return save_audio(self, audio, filename_prefix, format, prompt, extra_pnginfo, quality) class PreviewAudio(SaveAudio): def __init__(self): @@ -248,7 +326,20 @@ NODE_CLASS_MAPPINGS = { "VAEEncodeAudio": VAEEncodeAudio, "VAEDecodeAudio": VAEDecodeAudio, "SaveAudio": SaveAudio, + "SaveAudioMP3": SaveAudioMP3, + "SaveAudioOpus": SaveAudioOpus, "LoadAudio": LoadAudio, "PreviewAudio": PreviewAudio, "ConditioningStableAudio": ConditioningStableAudio, } + +NODE_DISPLAY_NAME_MAPPINGS = { + "EmptyLatentAudio": "Empty Latent Audio", + "VAEEncodeAudio": "VAE Encode Audio", + "VAEDecodeAudio": "VAE Decode Audio", + "PreviewAudio": "Preview Audio", + "LoadAudio": "Load Audio", + "SaveAudio": "Save Audio (FLAC)", + "SaveAudioMP3": "Save Audio (MP3)", + "SaveAudioOpus": "Save Audio (Opus)", +} From b7ed5f57bda02d7cad3f77abe6de68da6dedc4e4 Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Mon, 12 May 2025 16:29:32 -0400 Subject: [PATCH 0131/1073] string node (#7952) --- comfy_extras/nodes_string.py | 322 +++++++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 323 insertions(+) create mode 100644 comfy_extras/nodes_string.py diff --git a/comfy_extras/nodes_string.py b/comfy_extras/nodes_string.py new file mode 100644 index 000000000..a852326e5 --- /dev/null +++ b/comfy_extras/nodes_string.py @@ -0,0 +1,322 @@ +import re + +from comfy.comfy_types.node_typing import IO + +class StringConcatenate(): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string_a": (IO.STRING, {"multiline": True}), + "string_b": (IO.STRING, {"multiline": True}) + } + } + + RETURN_TYPES = (IO.STRING,) + FUNCTION = "execute" + CATEGORY = "utils/string" + + def execute(self, string_a, string_b, **kwargs): + return string_a + string_b, + +class StringSubstring(): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": (IO.STRING, {"multiline": True}), + "start": (IO.INT, {}), + "end": (IO.INT, {}), + } + } + + RETURN_TYPES = (IO.STRING,) + FUNCTION = "execute" + CATEGORY = "utils/string" + + def execute(self, string, start, end, **kwargs): + return string[start:end], + +class StringLength(): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": (IO.STRING, {"multiline": True}) + } + } + + RETURN_TYPES = (IO.INT,) + RETURN_NAMES = ("length",) + FUNCTION = "execute" + CATEGORY = "utils/string" + + def execute(self, string, **kwargs): + length = len(string) + + return length, + +class CaseConverter(): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": (IO.STRING, {"multiline": True}), + "mode": (IO.COMBO, {"options": ["UPPERCASE", "lowercase", "Capitalize", "Title Case"]}) + } + } + + RETURN_TYPES = (IO.STRING,) + FUNCTION = "execute" + CATEGORY = "utils/string" + + def execute(self, string, mode, **kwargs): + if mode == "UPPERCASE": + result = string.upper() + elif mode == "lowercase": + result = string.lower() + elif mode == "Capitalize": + result = string.capitalize() + elif mode == "Title Case": + result = string.title() + else: + result = string + + return result, + + +class StringTrim(): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": (IO.STRING, {"multiline": True}), + "mode": (IO.COMBO, {"options": ["Both", "Left", "Right"]}) + } + } + + RETURN_TYPES = (IO.STRING,) + FUNCTION = "execute" + CATEGORY = "utils/string" + + def execute(self, string, mode, **kwargs): + if mode == "Both": + result = string.strip() + elif mode == "Left": + result = string.lstrip() + elif mode == "Right": + result = string.rstrip() + else: + result = string + + return result, + +class StringReplace(): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": (IO.STRING, {"multiline": True}), + "find": (IO.STRING, {"multiline": True}), + "replace": (IO.STRING, {"multiline": True}) + } + } + + RETURN_TYPES = (IO.STRING,) + FUNCTION = "execute" + CATEGORY = "utils/string" + + def execute(self, string, find, replace, **kwargs): + result = string.replace(find, replace) + return result, + + +class StringContains(): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": (IO.STRING, {"multiline": True}), + "substring": (IO.STRING, {"multiline": True}), + "case_sensitive": (IO.BOOLEAN, {"default": True}) + } + } + + RETURN_TYPES = (IO.BOOLEAN,) + RETURN_NAMES = ("contains",) + FUNCTION = "execute" + CATEGORY = "utils/string" + + def execute(self, string, substring, case_sensitive, **kwargs): + if case_sensitive: + contains = substring in string + else: + contains = substring.lower() in string.lower() + + return contains, + + +class StringCompare(): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string_a": (IO.STRING, {"multiline": True}), + "string_b": (IO.STRING, {"multiline": True}), + "mode": (IO.COMBO, {"options": ["Starts With", "Ends With", "Equal"]}), + "case_sensitive": (IO.BOOLEAN, {"default": True}) + } + } + + RETURN_TYPES = (IO.BOOLEAN,) + FUNCTION = "execute" + CATEGORY = "utils/string" + + def execute(self, string_a, string_b, mode, case_sensitive, **kwargs): + if case_sensitive: + a = string_a + b = string_b + else: + a = string_a.lower() + b = string_b.lower() + + if mode == "Equal": + return a == b, + elif mode == "Starts With": + return a.startswith(b), + elif mode == "Ends With": + return a.endswith(b), + +class RegexMatch(): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": (IO.STRING, {"multiline": True}), + "regex_pattern": (IO.STRING, {"multiline": True}), + "case_insensitive": (IO.BOOLEAN, {"default": True}), + "multiline": (IO.BOOLEAN, {"default": False}), + "dotall": (IO.BOOLEAN, {"default": False}) + } + } + + RETURN_TYPES = (IO.BOOLEAN,) + RETURN_NAMES = ("matches",) + FUNCTION = "execute" + CATEGORY = "utils/string" + + def execute(self, string, regex_pattern, case_insensitive, multiline, dotall, **kwargs): + flags = 0 + + if case_insensitive: + flags |= re.IGNORECASE + if multiline: + flags |= re.MULTILINE + if dotall: + flags |= re.DOTALL + + try: + match = re.search(regex_pattern, string, flags) + result = match is not None + + except re.error: + result = False + + return result, + + +class RegexExtract(): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": (IO.STRING, {"multiline": True}), + "regex_pattern": (IO.STRING, {"multiline": True}), + "mode": (IO.COMBO, {"options": ["First Match", "All Matches", "First Group", "All Groups"]}), + "case_insensitive": (IO.BOOLEAN, {"default": True}), + "multiline": (IO.BOOLEAN, {"default": False}), + "dotall": (IO.BOOLEAN, {"default": False}), + "group_index": (IO.INT, {"default": 1, "min": 0, "max": 100}) + } + } + + RETURN_TYPES = (IO.STRING,) + FUNCTION = "execute" + CATEGORY = "utils/string" + + def execute(self, string, regex_pattern, mode, case_insensitive, multiline, dotall, group_index, **kwargs): + join_delimiter = "\n" + + flags = 0 + if case_insensitive: + flags |= re.IGNORECASE + if multiline: + flags |= re.MULTILINE + if dotall: + flags |= re.DOTALL + + try: + if mode == "First Match": + match = re.search(regex_pattern, string, flags) + if match: + result = match.group(0) + else: + result = "" + + elif mode == "All Matches": + matches = re.findall(regex_pattern, string, flags) + if matches: + if isinstance(matches[0], tuple): + result = join_delimiter.join([m[0] for m in matches]) + else: + result = join_delimiter.join(matches) + else: + result = "" + + elif mode == "First Group": + match = re.search(regex_pattern, string, flags) + if match and len(match.groups()) >= group_index: + result = match.group(group_index) + else: + result = "" + + elif mode == "All Groups": + matches = re.finditer(regex_pattern, string, flags) + results = [] + for match in matches: + if match.groups() and len(match.groups()) >= group_index: + results.append(match.group(group_index)) + result = join_delimiter.join(results) + else: + result = "" + + except re.error: + result = "" + + return result, + +NODE_CLASS_MAPPINGS = { + "StringConcatenate": StringConcatenate, + "StringSubstring": StringSubstring, + "StringLength": StringLength, + "CaseConverter": CaseConverter, + "StringTrim": StringTrim, + "StringReplace": StringReplace, + "StringContains": StringContains, + "StringCompare": StringCompare, + "RegexMatch": RegexMatch, + "RegexExtract": RegexExtract +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "StringConcatenate": "Concatenate", + "StringSubstring": "Substring", + "StringLength": "Length", + "CaseConverter": "Case Converter", + "StringTrim": "Trim", + "StringReplace": "Replace", + "StringContains": "Contains", + "StringCompare": "Compare", + "RegexMatch": "Regex Match", + "RegexExtract": "Regex Extract" +} diff --git a/nodes.py b/nodes.py index a1ddf2dd6..a26a138fa 100644 --- a/nodes.py +++ b/nodes.py @@ -2263,6 +2263,7 @@ def init_builtin_extra_nodes(): "nodes_fresca.py", "nodes_preview_any.py", "nodes_ace.py", + "nodes_string.py", ] import_failed = [] From f5cacaeb14acb07c9dde561297ec5e06f5c9c4c8 Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Mon, 12 May 2025 16:47:02 -0400 Subject: [PATCH 0132/1073] Update frontend to v1.19 (#8076) * Update frontend to v1.19 * Update requirements.txt --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 01aab4ca2..8f7a78984 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.18.10 +comfyui-frontend-package==1.19.9 comfyui-workflow-templates==0.1.14 torch torchsde From 9ad287ff20daca21e5ab30e82ceb3095fc32c154 Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Mon, 12 May 2025 16:47:14 -0400 Subject: [PATCH 0133/1073] add support to record video as output for 3d node (#7927) * add support to record video as output for 3d node * source format * add support to record video for load3d animation node --- comfy_extras/nodes_load_3d.py | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/comfy_extras/nodes_load_3d.py b/comfy_extras/nodes_load_3d.py index 53d892bc4..d5b4d9111 100644 --- a/comfy_extras/nodes_load_3d.py +++ b/comfy_extras/nodes_load_3d.py @@ -2,6 +2,10 @@ import nodes import folder_paths import os +from comfy.comfy_types import IO +from comfy_api.input_impl import VideoFromFile + + def normalize_path(path): return path.replace('\\', '/') @@ -21,8 +25,8 @@ class Load3D(): "height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), }} - RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "IMAGE", "LOAD3D_CAMERA") - RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "lineart", "camera_info") + RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "IMAGE", "LOAD3D_CAMERA", IO.VIDEO) + RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "lineart", "camera_info", "recording_video") FUNCTION = "process" EXPERIMENTAL = True @@ -41,7 +45,14 @@ class Load3D(): normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path) lineart_image, ignore_mask3 = load_image_node.load_image(image=lineart_path) - return output_image, output_mask, model_file, normal_image, lineart_image, image['camera_info'] + video = None + + if image['recording'] != "": + recording_video_path = folder_paths.get_annotated_filepath(image['recording']) + + video = VideoFromFile(recording_video_path) + + return output_image, output_mask, model_file, normal_image, lineart_image, image['camera_info'], video class Load3DAnimation(): @classmethod @@ -59,8 +70,8 @@ class Load3DAnimation(): "height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), }} - RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "LOAD3D_CAMERA") - RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "camera_info") + RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "LOAD3D_CAMERA", IO.VIDEO) + RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "camera_info", "recording_video") FUNCTION = "process" EXPERIMENTAL = True @@ -77,7 +88,14 @@ class Load3DAnimation(): ignore_image, output_mask = load_image_node.load_image(image=mask_path) normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path) - return output_image, output_mask, model_file, normal_image, image['camera_info'] + video = None + + if image['recording'] != "": + recording_video_path = folder_paths.get_annotated_filepath(image['recording']) + + video = VideoFromFile(recording_video_path) + + return output_image, output_mask, model_file, normal_image, image['camera_info'], video class Preview3D(): @classmethod From 4136502b7a530df28e489f81c0c58b30612f9ece Mon Sep 17 00:00:00 2001 From: thot experiment <94414189+thot-experiment@users.noreply.github.com> Date: Mon, 12 May 2025 18:10:24 -0700 Subject: [PATCH 0134/1073] implement APG guidance (#8081) * first pass at impementing AGP * rename, cleanup code * fix ruff * fix modified cond to match ref impl better, support different cond arity --- comfy_extras/nodes_apg.py | 76 +++++++++++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 77 insertions(+) create mode 100644 comfy_extras/nodes_apg.py diff --git a/comfy_extras/nodes_apg.py b/comfy_extras/nodes_apg.py new file mode 100644 index 000000000..1325985b2 --- /dev/null +++ b/comfy_extras/nodes_apg.py @@ -0,0 +1,76 @@ +import torch + +def project(v0, v1): + v1 = torch.nn.functional.normalize(v1, dim=[-1, -2, -3]) + v0_parallel = (v0 * v1).sum(dim=[-1, -2, -3], keepdim=True) * v1 + v0_orthogonal = v0 - v0_parallel + return v0_parallel, v0_orthogonal + +class APG: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "eta": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01, "tooltip": "Controls the scale of the parallel guidance vector. Default CFG behavior at a setting of 1."}), + "norm_threshold": ("FLOAT", {"default": 5.0, "min": 0.0, "max": 50.0, "step": 0.1, "tooltip": "Normalize guidance vector to this value, normalization disable at a setting of 0."}), + "momentum": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip":"Controls a running average of guidance during diffusion, disabled at a setting of 0."}), + } + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + CATEGORY = "sampling/custom_sampling" + + def patch(self, model, eta, norm_threshold, momentum): + running_avg = 0 + prev_sigma = None + + def pre_cfg_function(args): + nonlocal running_avg, prev_sigma + + if len(args["conds_out"]) == 1: return args["conds_out"] + + cond = args["conds_out"][0] + uncond = args["conds_out"][1] + sigma = args["sigma"][0] + cond_scale = args["cond_scale"] + + if prev_sigma is not None and sigma > prev_sigma: + running_avg = 0 + prev_sigma = sigma + + guidance = cond - uncond + + if momentum > 0: + if not torch.is_tensor(running_avg): + running_avg = guidance + else: + running_avg = momentum * running_avg + guidance + guidance = running_avg + + if norm_threshold > 0: + guidance_norm = guidance.norm(p=2, dim=[-1, -2, -3], keepdim=True) + scale = torch.minimum( + torch.ones_like(guidance_norm), + norm_threshold / guidance_norm + ) + guidance = guidance * scale + + guidance_parallel, guidance_orthogonal = project(guidance, cond) + modified_guidance = guidance_orthogonal + eta * guidance_parallel + + modified_cond = (uncond + modified_guidance) + (cond - uncond) / cond_scale + + return [modified_cond, uncond] + args["conds_out"][2:] + + m = model.clone() + m.set_model_sampler_pre_cfg_function(pre_cfg_function) + return (m,) + +NODE_CLASS_MAPPINGS = { + "APG": APG, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "APG": "Adaptive Projected Guidance", +} diff --git a/nodes.py b/nodes.py index a26a138fa..54e3886a3 100644 --- a/nodes.py +++ b/nodes.py @@ -2261,6 +2261,7 @@ def init_builtin_extra_nodes(): "nodes_optimalsteps.py", "nodes_hidream.py", "nodes_fresca.py", + "nodes_apg.py", "nodes_preview_any.py", "nodes_ace.py", "nodes_string.py", From 2156ce9453a8508efec4c36c26e810a14a3ce473 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Mon, 12 May 2025 20:06:44 -0700 Subject: [PATCH 0135/1073] add comment about using api key in headless (#8082) --- script_examples/basic_api_example.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/script_examples/basic_api_example.py b/script_examples/basic_api_example.py index c916e6cb9..9128420c4 100644 --- a/script_examples/basic_api_example.py +++ b/script_examples/basic_api_example.py @@ -101,6 +101,14 @@ prompt_text = """ def queue_prompt(prompt): p = {"prompt": prompt} + + # If the workflow contains API nodes, you can add a Comfy API key to the `extra_data`` field of the payload. + # p["extra_data"] = { + # "api_key_comfy_org": "comfyui-87d01e28d*******************************************************" # replace with real key + # } + # See: https://docs.comfy.org/tutorials/api-nodes/overview + # Generate a key here: https://platform.comfy.org/login + data = json.dumps(p).encode('utf-8') req = request.Request("http://127.0.0.1:8188/prompt", data=data) request.urlopen(req) From 481732a0ed609ccd86cb3d2df00e3d14c9133280 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 13 May 2025 04:32:16 -0700 Subject: [PATCH 0136/1073] Support official ACE Step loras. (#8094) --- comfy/lora.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/comfy/lora.py b/comfy/lora.py index fff524be2..ef110c164 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -286,6 +286,12 @@ def model_lora_keys_unet(model, key_map={}): key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_") key_map["lycoris_{}".format(key_lora)] = k #SimpleTuner lycoris format + if isinstance(model, comfy.model_base.ACEStep): + for k in sdk: + if k.startswith("diffusion_model.") and k.endswith(".weight"): #Official ACE step lora format + key_lora = k[len("diffusion_model."):-len(".weight")] + key_map["{}".format(key_lora)] = k + return key_map From a814f2e8ccf171c967ee2dc095dd02f2a93dccde Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 13 May 2025 04:54:28 -0700 Subject: [PATCH 0137/1073] Fix issue with old pytorch RMSNorm. (#8095) --- comfy/rmsnorm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/rmsnorm.py b/comfy/rmsnorm.py index 9d82bee1a..66ae8321d 100644 --- a/comfy/rmsnorm.py +++ b/comfy/rmsnorm.py @@ -30,7 +30,7 @@ if RMSNorm is None: def __init__( self, normalized_shape, - eps=None, + eps=1e-6, elementwise_affine=True, device=None, dtype=None, From 8a7c894d5415c499817c1fcdcba26940755c03a4 Mon Sep 17 00:00:00 2001 From: thot experiment <94414189+thot-experiment@users.noreply.github.com> Date: Tue, 13 May 2025 10:50:32 -0700 Subject: [PATCH 0138/1073] fix negative momentum (#8100) --- comfy_extras/nodes_apg.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_apg.py b/comfy_extras/nodes_apg.py index 1325985b2..25b21b1b8 100644 --- a/comfy_extras/nodes_apg.py +++ b/comfy_extras/nodes_apg.py @@ -14,7 +14,7 @@ class APG: "model": ("MODEL",), "eta": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01, "tooltip": "Controls the scale of the parallel guidance vector. Default CFG behavior at a setting of 1."}), "norm_threshold": ("FLOAT", {"default": 5.0, "min": 0.0, "max": 50.0, "step": 0.1, "tooltip": "Normalize guidance vector to this value, normalization disable at a setting of 0."}), - "momentum": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip":"Controls a running average of guidance during diffusion, disabled at a setting of 0."}), + "momentum": ("FLOAT", {"default": 0.0, "min": -5.0, "max": 1.0, "step": 0.01, "tooltip":"Controls a running average of guidance during diffusion, disabled at a setting of 0."}), } } RETURN_TYPES = ("MODEL",) @@ -41,7 +41,7 @@ class APG: guidance = cond - uncond - if momentum > 0: + if momentum != 0: if not torch.is_tensor(running_avg): running_avg = guidance else: From 4a9014e201b71dfcf97d59d1086cb451cd40873f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 13 May 2025 12:53:47 -0700 Subject: [PATCH 0139/1073] Hunyuan Custom initial untested implementation. (#8101) --- comfy/ldm/hunyuan_video/model.py | 21 ++++++++++++++++++--- comfy/model_base.py | 4 ++++ comfy_extras/nodes_hunyuan.py | 6 ++++-- 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/comfy/ldm/hunyuan_video/model.py b/comfy/ldm/hunyuan_video/model.py index 72af3d5bb..fbd8d4196 100644 --- a/comfy/ldm/hunyuan_video/model.py +++ b/comfy/ldm/hunyuan_video/model.py @@ -228,6 +228,7 @@ class HunyuanVideo(nn.Module): y: Tensor, guidance: Tensor = None, guiding_frame_index=None, + ref_latent=None, control=None, transformer_options={}, ) -> Tensor: @@ -238,6 +239,14 @@ class HunyuanVideo(nn.Module): img = self.img_in(img) vec = self.time_in(timestep_embedding(timesteps, 256, time_factor=1.0).to(img.dtype)) + if ref_latent is not None: + ref_latent_ids = self.img_ids(ref_latent) + ref_latent = self.img_in(ref_latent) + img = torch.cat([ref_latent, img], dim=-2) + ref_latent_ids[..., 0] = -1 + ref_latent_ids[..., 2] += (initial_shape[-1] // self.patch_size[-1]) + img_ids = torch.cat([ref_latent_ids, img_ids], dim=-2) + if guiding_frame_index is not None: token_replace_vec = self.time_in(timestep_embedding(guiding_frame_index, 256, time_factor=1.0)) vec_ = self.vector_in(y[:, :self.params.vec_in_dim]) @@ -313,6 +322,8 @@ class HunyuanVideo(nn.Module): img[:, : img_len] += add img = img[:, : img_len] + if ref_latent is not None: + img = img[:, ref_latent.shape[1]:] img = self.final_layer(img, vec, modulation_dims=modulation_dims) # (N, T, patch_size ** 2 * out_channels) @@ -324,7 +335,7 @@ class HunyuanVideo(nn.Module): img = img.reshape(initial_shape[0], self.out_channels, initial_shape[2], initial_shape[3], initial_shape[4]) return img - def forward(self, x, timestep, context, y, guidance=None, attention_mask=None, guiding_frame_index=None, control=None, transformer_options={}, **kwargs): + def img_ids(self, x): bs, c, t, h, w = x.shape patch_size = self.patch_size t_len = ((t + (patch_size[0] // 2)) // patch_size[0]) @@ -334,7 +345,11 @@ class HunyuanVideo(nn.Module): img_ids[:, :, :, 0] = img_ids[:, :, :, 0] + torch.linspace(0, t_len - 1, steps=t_len, device=x.device, dtype=x.dtype).reshape(-1, 1, 1) img_ids[:, :, :, 1] = img_ids[:, :, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).reshape(1, -1, 1) img_ids[:, :, :, 2] = img_ids[:, :, :, 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).reshape(1, 1, -1) - img_ids = repeat(img_ids, "t h w c -> b (t h w) c", b=bs) + return repeat(img_ids, "t h w c -> b (t h w) c", b=bs) + + def forward(self, x, timestep, context, y, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, control=None, transformer_options={}, **kwargs): + bs, c, t, h, w = x.shape + img_ids = self.img_ids(x) txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype) - out = self.forward_orig(x, img_ids, context, txt_ids, attention_mask, timestep, y, guidance, guiding_frame_index, control, transformer_options) + out = self.forward_orig(x, img_ids, context, txt_ids, attention_mask, timestep, y, guidance, guiding_frame_index, ref_latent, control=control, transformer_options=transformer_options) return out diff --git a/comfy/model_base.py b/comfy/model_base.py index 6d27930dc..047861593 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -924,6 +924,10 @@ class HunyuanVideo(BaseModel): if guiding_frame_index is not None: out['guiding_frame_index'] = comfy.conds.CONDRegular(torch.FloatTensor([guiding_frame_index])) + ref_latent = kwargs.get("ref_latent", None) + if ref_latent is not None: + out['ref_latent'] = comfy.conds.CONDRegular(self.process_latent_in(ref_latent)) + return out def scale_latent_inpaint(self, latent_image, **kwargs): diff --git a/comfy_extras/nodes_hunyuan.py b/comfy_extras/nodes_hunyuan.py index 504010ad0..d7278e7a7 100644 --- a/comfy_extras/nodes_hunyuan.py +++ b/comfy_extras/nodes_hunyuan.py @@ -77,7 +77,7 @@ class HunyuanImageToVideo: "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), "length": ("INT", {"default": 53, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - "guidance_type": (["v1 (concat)", "v2 (replace)"], ) + "guidance_type": (["v1 (concat)", "v2 (replace)", "custom"], ) }, "optional": {"start_image": ("IMAGE", ), }} @@ -101,10 +101,12 @@ class HunyuanImageToVideo: if guidance_type == "v1 (concat)": cond = {"concat_latent_image": concat_latent_image, "concat_mask": mask} - else: + elif guidance_type == "v2 (replace)": cond = {'guiding_frame_index': 0} latent[:, :, :concat_latent_image.shape[2]] = concat_latent_image out_latent["noise_mask"] = mask + elif guidance_type == "custom": + cond = {"ref_latent": concat_latent_image} positive = node_helpers.conditioning_set_values(positive, cond) From bab836d88d62ebbe3b3040f3a9fff05f79d5049d Mon Sep 17 00:00:00 2001 From: thot experiment <94414189+thot-experiment@users.noreply.github.com> Date: Tue, 13 May 2025 17:42:29 -0700 Subject: [PATCH 0140/1073] rework client.py to be more robust, add logging of api requests (#7988) * rework how errors are handled on the client side * add logging to /temp * fix ruff * fix rebase, stupid vscode gui --- comfy_api_nodes/apis/client.py | 543 ++++++++++++++++++++++--- comfy_api_nodes/apis/request_logger.py | 125 ++++++ 2 files changed, 622 insertions(+), 46 deletions(-) create mode 100644 comfy_api_nodes/apis/request_logger.py diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index cff52714f..158d20deb 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -94,15 +94,18 @@ from __future__ import annotations import logging import time import io -from typing import Dict, Type, Optional, Any, TypeVar, Generic, Callable +import socket +from typing import Dict, Type, Optional, Any, TypeVar, Generic, Callable, Tuple from enum import Enum import json import requests -from urllib.parse import urljoin +from urllib.parse import urljoin, urlparse from pydantic import BaseModel, Field +import uuid # For generating unique operation IDs from comfy.cli_args import args from comfy import utils +from . import request_logger T = TypeVar("T", bound=BaseModel) R = TypeVar("R", bound=BaseModel) @@ -111,6 +114,21 @@ P = TypeVar("P", bound=BaseModel) # For poll response PROGRESS_BAR_MAX = 100 +class NetworkError(Exception): + """Base exception for network-related errors with diagnostic information.""" + pass + + +class LocalNetworkError(NetworkError): + """Exception raised when local network connectivity issues are detected.""" + pass + + +class ApiServerError(NetworkError): + """Exception raised when the API server is unreachable but internet is working.""" + pass + + class EmptyRequest(BaseModel): """Base class for empty request bodies. For GET requests, fields will be sent as query parameters.""" @@ -141,7 +159,7 @@ class HttpMethod(str, Enum): class ApiClient: """ - Client for making HTTP requests to an API with authentication and error handling. + Client for making HTTP requests to an API with authentication, error handling, and retry logic. """ def __init__( @@ -151,12 +169,26 @@ class ApiClient: comfy_api_key: Optional[str] = None, timeout: float = 3600.0, verify_ssl: bool = True, + max_retries: int = 3, + retry_delay: float = 1.0, + retry_backoff_factor: float = 2.0, + retry_status_codes: Optional[Tuple[int, ...]] = None, ): self.base_url = base_url self.auth_token = auth_token self.comfy_api_key = comfy_api_key self.timeout = timeout self.verify_ssl = verify_ssl + self.max_retries = max_retries + self.retry_delay = retry_delay + self.retry_backoff_factor = retry_backoff_factor + # Default retry status codes: 408 (Request Timeout), 429 (Too Many Requests), + # 500, 502, 503, 504 (Server Errors) + self.retry_status_codes = retry_status_codes or (408, 429, 500, 502, 503, 504) + + def _generate_operation_id(self, path: str) -> str: + """Generates a unique operation ID for logging.""" + return f"{path.strip('/').replace('/', '_')}_{uuid.uuid4().hex[:8]}" def _create_json_payload_args( self, @@ -211,6 +243,56 @@ class ApiClient: return headers + def _check_connectivity(self, target_url: str) -> Dict[str, bool]: + """ + Check connectivity to determine if network issues are local or server-related. + + Args: + target_url: URL to check connectivity to + + Returns: + Dictionary with connectivity status details + """ + results = { + "internet_accessible": False, + "api_accessible": False, + "is_local_issue": False, + "is_api_issue": False + } + + # First check basic internet connectivity using a reliable external site + try: + # Use a reliable external domain for checking basic connectivity + check_response = requests.get("https://www.google.com", + timeout=5.0, + verify=self.verify_ssl) + if check_response.status_code < 500: + results["internet_accessible"] = True + except (requests.RequestException, socket.error): + results["internet_accessible"] = False + results["is_local_issue"] = True + return results + + # Now check API server connectivity + try: + # Extract domain from the target URL to do a simpler health check + parsed_url = urlparse(target_url) + api_base = f"{parsed_url.scheme}://{parsed_url.netloc}" + + # Try to reach the API domain + api_response = requests.get(f"{api_base}/health", timeout=5.0, verify=self.verify_ssl) + if api_response.status_code < 500: + results["api_accessible"] = True + else: + results["api_accessible"] = False + results["is_api_issue"] = True + except requests.RequestException: + results["api_accessible"] = False + # If we can reach the internet but not the API, it's an API issue + results["is_api_issue"] = True + + return results + def request( self, method: str, @@ -221,9 +303,10 @@ class ApiClient: headers: Optional[Dict[str, str]] = None, content_type: str = "application/json", multipart_parser: Callable = None, + retry_count: int = 0, # Used internally for tracking retries ) -> Dict[str, Any]: """ - Make an HTTP request to the API + Make an HTTP request to the API with automatic retries for transient errors. Args: method: HTTP method (GET, POST, etc.) @@ -233,12 +316,15 @@ class ApiClient: files: Files to upload headers: Additional headers content_type: Content type of the request. Defaults to application/json. + retry_count: Internal parameter for tracking retries, do not set manually Returns: Parsed JSON response Raises: - requests.RequestException: If the request fails + LocalNetworkError: If local network connectivity issues are detected + ApiServerError: If the API server is unreachable but internet is working + Exception: For other request failures """ url = urljoin(self.base_url, path) self.check_auth(self.auth_token, self.comfy_api_key) @@ -265,6 +351,16 @@ class ApiClient: else: payload_args = self._create_json_payload_args(data, request_headers) + operation_id = self._generate_operation_id(path) + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, + request_url=url, + request_headers=request_headers, + request_params=params, + request_data=data if content_type == "application/json" else "[form-data or other]" + ) + try: response = requests.request( method=method, @@ -275,50 +371,228 @@ class ApiClient: **payload_args, ) + # Check if we should retry based on status code + if (response.status_code in self.retry_status_codes and + retry_count < self.max_retries): + + # Calculate delay with exponential backoff + delay = self.retry_delay * (self.retry_backoff_factor ** retry_count) + + logging.warning( + f"Request failed with status {response.status_code}. " + f"Retrying in {delay:.2f}s ({retry_count + 1}/{self.max_retries})" + ) + + time.sleep(delay) + return self.request( + method=method, + path=path, + params=params, + data=data, + files=files, + headers=headers, + content_type=content_type, + multipart_parser=multipart_parser, + retry_count=retry_count + 1, + ) + # Raise exception for error status codes response.raise_for_status() - except requests.ConnectionError: - raise Exception( - f"Unable to connect to the API server at {self.base_url}. Please check your internet connection or verify the service is available." + + # Log successful response + response_content_to_log = response.content + try: + # Attempt to parse JSON for prettier logging, fallback to raw content + response_content_to_log = response.json() + except json.JSONDecodeError: + pass # Keep as bytes/str if not JSON + + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, # Pass request details again for context in log + request_url=url, + response_status_code=response.status_code, + response_headers=dict(response.headers), + response_content=response_content_to_log ) - except requests.Timeout: - raise Exception( - f"Request timed out after {self.timeout} seconds. The server might be experiencing high load or the operation is taking longer than expected." + except requests.ConnectionError as e: + error_message = f"ConnectionError: {str(e)}" + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, + request_url=url, + error_message=error_message ) + # Only perform connectivity check if we've exhausted all retries + if retry_count >= self.max_retries: + # Check connectivity to determine if it's a local or API issue + connectivity = self._check_connectivity(self.base_url) + + if connectivity["is_local_issue"]: + raise LocalNetworkError( + "Unable to connect to the API server due to local network issues. " + "Please check your internet connection and try again." + ) from e + elif connectivity["is_api_issue"]: + raise ApiServerError( + f"The API server at {self.base_url} is currently unreachable. " + f"The service may be experiencing issues. Please try again later." + ) from e + + # If we haven't exhausted retries yet, retry the request + if retry_count < self.max_retries: + delay = self.retry_delay * (self.retry_backoff_factor ** retry_count) + logging.warning( + f"Connection error: {str(e)}. " + f"Retrying in {delay:.2f}s ({retry_count + 1}/{self.max_retries})" + ) + time.sleep(delay) + return self.request( + method=method, + path=path, + params=params, + data=data, + files=files, + headers=headers, + content_type=content_type, + multipart_parser=multipart_parser, + retry_count=retry_count + 1, + ) + + # If we've exhausted retries and didn't identify the specific issue, + # raise a generic exception + final_error_message = ( + f"Unable to connect to the API server after {self.max_retries} attempts. " + f"Please check your internet connection or try again later." + ) + request_logger.log_request_response( # Log final failure + operation_id=operation_id, + request_method=method, request_url=url, + error_message=final_error_message + ) + raise Exception(final_error_message) from e + + except requests.Timeout as e: + error_message = f"Timeout: {str(e)}" + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, request_url=url, + error_message=error_message + ) + # Retry timeouts if we haven't exhausted retries + if retry_count < self.max_retries: + delay = self.retry_delay * (self.retry_backoff_factor ** retry_count) + logging.warning( + f"Request timed out. " + f"Retrying in {delay:.2f}s ({retry_count + 1}/{self.max_retries})" + ) + time.sleep(delay) + return self.request( + method=method, + path=path, + params=params, + data=data, + files=files, + headers=headers, + content_type=content_type, + multipart_parser=multipart_parser, + retry_count=retry_count + 1, + ) + final_error_message = ( + f"Request timed out after {self.timeout} seconds and {self.max_retries} retry attempts. " + f"The server might be experiencing high load or the operation is taking longer than expected." + ) + request_logger.log_request_response( # Log final failure + operation_id=operation_id, + request_method=method, request_url=url, + error_message=final_error_message + ) + raise Exception(final_error_message) from e except requests.HTTPError as e: status_code = e.response.status_code if hasattr(e, "response") else None - error_message = f"HTTP Error: {str(e)}" + original_error_message = f"HTTP Error: {str(e)}" + error_content_for_log = None + if hasattr(e, "response") and e.response is not None: + error_content_for_log = e.response.content + try: + error_content_for_log = e.response.json() + except json.JSONDecodeError: + pass + + + # Try to extract detailed error message from JSON response for user display + # but log the full error content. + user_display_error_message = original_error_message - # Try to extract detailed error message from JSON response try: - if hasattr(e, "response") and e.response.content: + if hasattr(e, "response") and e.response is not None and e.response.content: error_json = e.response.json() if "error" in error_json and "message" in error_json["error"]: - error_message = f"API Error: {error_json['error']['message']}" + user_display_error_message = f"API Error: {error_json['error']['message']}" if "type" in error_json["error"]: - error_message += f" (Type: {error_json['error']['type']})" + user_display_error_message += f" (Type: {error_json['error']['type']})" + elif isinstance(error_json, dict): # Handle cases where error is just a JSON dict + user_display_error_message = f"API Error: {json.dumps(error_json)}" + else: # Non-dict JSON error + user_display_error_message = f"API Error: {str(error_json)}" + except json.JSONDecodeError: + # If not JSON, use the raw content if it's not too long, or a summary + if hasattr(e, "response") and e.response is not None and e.response.content: + raw_content = e.response.content.decode(errors='ignore') + if len(raw_content) < 200: # Arbitrary limit for display + user_display_error_message = f"API Error (raw): {raw_content}" else: - error_message = f"API Error: {error_json}" - except Exception as json_error: - # If we can't parse the JSON, fall back to the original error message - logging.debug( - f"[DEBUG] Failed to parse error response: {str(json_error)}" + user_display_error_message = f"API Error (raw, status {status_code})" + + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, request_url=url, + response_status_code=status_code, + response_headers=dict(e.response.headers) if hasattr(e, "response") and e.response is not None else None, + response_content=error_content_for_log, + error_message=original_error_message # Log the original exception string as error + ) + + logging.debug(f"[DEBUG] API Error: {user_display_error_message} (Status: {status_code})") + if hasattr(e, "response") and e.response is not None and e.response.content: + logging.debug(f"[DEBUG] Response content: {e.response.content}") + + # Retry if the status code is in our retry list and we haven't exhausted retries + if (status_code in self.retry_status_codes and + retry_count < self.max_retries): + + delay = self.retry_delay * (self.retry_backoff_factor ** retry_count) + logging.warning( + f"HTTP error {status_code}. " + f"Retrying in {delay:.2f}s ({retry_count + 1}/{self.max_retries})" + ) + time.sleep(delay) + return self.request( + method=method, + path=path, + params=params, + data=data, + files=files, + headers=headers, + content_type=content_type, + multipart_parser=multipart_parser, + retry_count=retry_count + 1, ) - logging.debug(f"[DEBUG] API Error: {error_message} (Status: {status_code})") - if hasattr(e, "response") and e.response.content: - logging.debug(f"[DEBUG] Response content: {e.response.content}") + # Specific error messages for common status codes for user display if status_code == 401: - error_message = "Unauthorized: Please login first to use this node." - if status_code == 402: - error_message = "Payment Required: Please add credits to your account to use this node." - if status_code == 409: - error_message = "There is a problem with your account. Please contact support@comfy.org. " - if status_code == 429: - error_message = "Rate Limit Exceeded: Please try again later." - raise Exception(error_message) + user_display_error_message = "Unauthorized: Please login first to use this node." + elif status_code == 402: + user_display_error_message = "Payment Required: Please add credits to your account to use this node." + elif status_code == 409: + user_display_error_message = "There is a problem with your account. Please contact support@comfy.org." + elif status_code == 429: + user_display_error_message = "Rate Limit Exceeded: Please try again later." + # else, user_display_error_message remains as parsed from response or original HTTPError string + + raise Exception(user_display_error_message) # Raise with the user-friendly message # Parse and return JSON response if response.content: @@ -336,26 +610,126 @@ class ApiClient: upload_url: str, file: io.BytesIO | str, content_type: str | None = None, + max_retries: int = 3, + retry_delay: float = 1.0, + retry_backoff_factor: float = 2.0, ): - """Upload a file to the API. Make sure the file has a filename equal to what the url expects. + """Upload a file to the API with retry logic. Args: upload_url: The URL to upload to file: Either a file path string, BytesIO object, or tuple of (file_path, filename) - mime_type: Optional mime type to set for the upload + content_type: Optional mime type to set for the upload + max_retries: Maximum number of retry attempts + retry_delay: Initial delay between retries in seconds + retry_backoff_factor: Multiplier for the delay after each retry """ headers = {} if content_type: headers["Content-Type"] = content_type + # Prepare the file data if isinstance(file, io.BytesIO): file.seek(0) # Ensure we're at the start of the file data = file.read() - return requests.put(upload_url, data=data, headers=headers) elif isinstance(file, str): with open(file, "rb") as f: data = f.read() - return requests.put(upload_url, data=data, headers=headers) + else: + raise ValueError("File must be either a BytesIO object or a file path string") + + # Try the upload with retries + last_exception = None + operation_id = f"upload_{upload_url.split('/')[-1]}_{uuid.uuid4().hex[:8]}" # Simplified ID for uploads + + # Log initial attempt (without full file data for brevity) + request_logger.log_request_response( + operation_id=operation_id, + request_method="PUT", + request_url=upload_url, + request_headers=headers, + request_data=f"[File data of type {content_type or 'unknown'}, size {len(data)} bytes]" + ) + + for retry_attempt in range(max_retries + 1): + try: + response = requests.put(upload_url, data=data, headers=headers) + response.raise_for_status() + request_logger.log_request_response( + operation_id=operation_id, + request_method="PUT", request_url=upload_url, # For context + response_status_code=response.status_code, + response_headers=dict(response.headers), + response_content="File uploaded successfully." # Or response.text if available + ) + return response + + except (requests.ConnectionError, requests.Timeout, requests.HTTPError) as e: + last_exception = e + error_message_for_log = f"{type(e).__name__}: {str(e)}" + response_content_for_log = None + status_code_for_log = None + headers_for_log = None + + if hasattr(e, 'response') and e.response is not None: + status_code_for_log = e.response.status_code + headers_for_log = dict(e.response.headers) + try: + response_content_for_log = e.response.json() + except json.JSONDecodeError: + response_content_for_log = e.response.content + + + request_logger.log_request_response( + operation_id=operation_id, + request_method="PUT", request_url=upload_url, + response_status_code=status_code_for_log, + response_headers=headers_for_log, + response_content=response_content_for_log, + error_message=error_message_for_log + ) + + if retry_attempt < max_retries: + delay = retry_delay * (retry_backoff_factor ** retry_attempt) + logging.warning( + f"File upload failed: {str(e)}. " + f"Retrying in {delay:.2f}s ({retry_attempt + 1}/{max_retries})" + ) + time.sleep(delay) + else: + break # Max retries reached + + # If we've exhausted all retries, determine the final error type and raise + final_error_message = f"Failed to upload file after {max_retries + 1} attempts. Error: {str(last_exception)}" + try: + # Check basic internet connectivity + check_response = requests.get("https://www.google.com", timeout=5.0, verify=True) # Assuming verify=True is desired + if check_response.status_code >= 500: # Google itself has an issue (rare) + final_error_message = (f"Failed to upload file. Internet connectivity check to Google failed " + f"(status {check_response.status_code}). Original error: {str(last_exception)}") + # Not raising LocalNetworkError here as Google itself might be down. + # If Google is reachable, the issue is likely with the upload server or a more specific local problem + # not caught by a simple Google ping (e.g., DNS for the specific upload URL, firewall). + # The original last_exception is probably most relevant. + + except (requests.RequestException, socket.error) as conn_check_exc: + # Could not reach Google, likely a local network issue + final_error_message = (f"Failed to upload file due to network connectivity issues " + f"(cannot reach Google: {str(conn_check_exc)}). " + f"Original upload error: {str(last_exception)}") + request_logger.log_request_response( # Log final failure reason + operation_id=operation_id, + request_method="PUT", request_url=upload_url, + error_message=final_error_message + ) + raise LocalNetworkError(final_error_message) from last_exception + + request_logger.log_request_response( # Log final failure reason if not LocalNetworkError + operation_id=operation_id, + request_method="PUT", request_url=upload_url, + error_message=final_error_message + ) + raise Exception(final_error_message) from last_exception class ApiEndpoint(Generic[T, R]): @@ -403,6 +777,9 @@ class SynchronousOperation(Generic[T, R]): verify_ssl: bool = True, content_type: str = "application/json", multipart_parser: Callable = None, + max_retries: int = 3, + retry_delay: float = 1.0, + retry_backoff_factor: float = 2.0, ): self.endpoint = endpoint self.request = request @@ -419,8 +796,12 @@ class SynchronousOperation(Generic[T, R]): self.files = files self.content_type = content_type self.multipart_parser = multipart_parser + self.max_retries = max_retries + self.retry_delay = retry_delay + self.retry_backoff_factor = retry_backoff_factor + def execute(self, client: Optional[ApiClient] = None) -> R: - """Execute the API operation using the provided client or create one""" + """Execute the API operation using the provided client or create one with retry support""" try: # Create client if not provided if client is None: @@ -430,6 +811,9 @@ class SynchronousOperation(Generic[T, R]): comfy_api_key=self.comfy_api_key, timeout=self.timeout, verify_ssl=self.verify_ssl, + max_retries=self.max_retries, + retry_delay=self.retry_delay, + retry_backoff_factor=self.retry_backoff_factor, ) # Convert request model to dict, but use None for EmptyRequest @@ -443,11 +827,6 @@ class SynchronousOperation(Generic[T, R]): if isinstance(value, Enum): request_dict[key] = value.value - if request_dict: - for key, value in request_dict.items(): - if isinstance(value, Enum): - request_dict[key] = value.value - # Debug log for request logging.debug( f"[DEBUG] API Request: {self.endpoint.method.value} {self.endpoint.path}" @@ -455,7 +834,7 @@ class SynchronousOperation(Generic[T, R]): logging.debug(f"[DEBUG] Request Data: {json.dumps(request_dict, indent=2)}") logging.debug(f"[DEBUG] Query Params: {self.endpoint.query_params}") - # Make the request + # Make the request with built-in retry resp = client.request( method=self.endpoint.method.value, path=self.endpoint.path, @@ -476,8 +855,18 @@ class SynchronousOperation(Generic[T, R]): # Parse and return the response return self._parse_response(resp) + except LocalNetworkError as e: + # Propagate specific network error types + logging.error(f"[ERROR] Local network error: {str(e)}") + raise + + except ApiServerError as e: + # Propagate API server errors + logging.error(f"[ERROR] API server error: {str(e)}") + raise + except Exception as e: - logging.error(f"[DEBUG] API Exception: {str(e)}") + logging.error(f"[ERROR] API Exception: {str(e)}") raise Exception(str(e)) def _parse_response(self, resp): @@ -517,6 +906,10 @@ class PollingOperation(Generic[T, R]): comfy_api_key: Optional[str] = None, auth_kwargs: Optional[Dict[str,str]] = None, poll_interval: float = 5.0, + max_poll_attempts: int = 120, # Default max polling attempts (10 minutes with 5s interval) + max_retries: int = 3, # Max retries per individual API call + retry_delay: float = 1.0, + retry_backoff_factor: float = 2.0, ): self.poll_endpoint = poll_endpoint self.request = request @@ -527,6 +920,10 @@ class PollingOperation(Generic[T, R]): self.auth_token = auth_kwargs.get("auth_token", self.auth_token) self.comfy_api_key = auth_kwargs.get("comfy_api_key", self.comfy_api_key) self.poll_interval = poll_interval + self.max_poll_attempts = max_poll_attempts + self.max_retries = max_retries + self.retry_delay = retry_delay + self.retry_backoff_factor = retry_backoff_factor # Polling configuration self.status_extractor = status_extractor or ( @@ -548,8 +945,23 @@ class PollingOperation(Generic[T, R]): base_url=self.api_base, auth_token=self.auth_token, comfy_api_key=self.comfy_api_key, + max_retries=self.max_retries, + retry_delay=self.retry_delay, + retry_backoff_factor=self.retry_backoff_factor, ) return self._poll_until_complete(client) + except LocalNetworkError as e: + # Provide clear message for local network issues + raise Exception( + f"Polling failed due to local network issues. Please check your internet connection. " + f"Details: {str(e)}" + ) from e + except ApiServerError as e: + # Provide clear message for API server issues + raise Exception( + f"Polling failed due to API server issues. The service may be experiencing problems. " + f"Please try again later. Details: {str(e)}" + ) from e except Exception as e: raise Exception(f"Error during polling: {str(e)}") @@ -569,10 +981,13 @@ class PollingOperation(Generic[T, R]): def _poll_until_complete(self, client: ApiClient) -> R: """Poll until the task is complete""" poll_count = 0 + consecutive_errors = 0 + max_consecutive_errors = min(5, self.max_retries * 2) # Limit consecutive errors + if self.progress_extractor: progress = utils.ProgressBar(PROGRESS_BAR_MAX) - while True: + while poll_count < self.max_poll_attempts: try: poll_count += 1 logging.debug(f"[DEBUG] Polling attempt #{poll_count}") @@ -599,8 +1014,12 @@ class PollingOperation(Generic[T, R]): data=request_dict, ) + # Successfully got a response, reset consecutive error count + consecutive_errors = 0 + # Parse response response_obj = self.poll_endpoint.response_model.model_validate(resp) + # Check if task is complete status = self._check_task_status(response_obj) logging.debug(f"[DEBUG] Task Status: {status}") @@ -630,6 +1049,38 @@ class PollingOperation(Generic[T, R]): ) time.sleep(self.poll_interval) + except (LocalNetworkError, ApiServerError) as e: + # For network-related errors, increment error count and potentially abort + consecutive_errors += 1 + if consecutive_errors >= max_consecutive_errors: + raise Exception( + f"Polling aborted after {consecutive_errors} consecutive network errors: {str(e)}" + ) from e + + # Log the error but continue polling + logging.warning( + f"Network error during polling (attempt {poll_count}/{self.max_poll_attempts}): {str(e)}. " + f"Will retry in {self.poll_interval} seconds." + ) + time.sleep(self.poll_interval) + except Exception as e: + # For other errors, increment count and potentially abort + consecutive_errors += 1 + if consecutive_errors >= max_consecutive_errors: + raise Exception( + f"Polling aborted after {consecutive_errors} consecutive errors: {str(e)}" + ) from e + logging.error(f"[DEBUG] Polling error: {str(e)}") - raise Exception(f"Error while polling: {str(e)}") + logging.warning( + f"Error during polling (attempt {poll_count}/{self.max_poll_attempts}): {str(e)}. " + f"Will retry in {self.poll_interval} seconds." + ) + time.sleep(self.poll_interval) + + # If we've exhausted all polling attempts + raise Exception( + f"Polling timed out after {poll_count} attempts ({poll_count * self.poll_interval} seconds). " + f"The operation may still be running on the server but is taking longer than expected." + ) diff --git a/comfy_api_nodes/apis/request_logger.py b/comfy_api_nodes/apis/request_logger.py new file mode 100644 index 000000000..93517ede9 --- /dev/null +++ b/comfy_api_nodes/apis/request_logger.py @@ -0,0 +1,125 @@ +import os +import datetime +import json +import logging +import folder_paths + +# Get the logger instance +logger = logging.getLogger(__name__) + +def get_log_directory(): + """ + Ensures the API log directory exists within ComfyUI's temp directory + and returns its path. + """ + base_temp_dir = folder_paths.get_temp_directory() + log_dir = os.path.join(base_temp_dir, "api_logs") + try: + os.makedirs(log_dir, exist_ok=True) + except Exception as e: + logger.error(f"Error creating API log directory {log_dir}: {e}") + # Fallback to base temp directory if sub-directory creation fails + return base_temp_dir + return log_dir + +def _format_data_for_logging(data): + """Helper to format data (dict, str, bytes) for logging.""" + if isinstance(data, bytes): + try: + return data.decode('utf-8') # Try to decode as text + except UnicodeDecodeError: + return f"[Binary data of length {len(data)} bytes]" + elif isinstance(data, (dict, list)): + try: + return json.dumps(data, indent=2, ensure_ascii=False) + except TypeError: + return str(data) # Fallback for non-serializable objects + return str(data) + +def log_request_response( + operation_id: str, + request_method: str, + request_url: str, + request_headers: dict | None = None, + request_params: dict | None = None, + request_data: any = None, + response_status_code: int | None = None, + response_headers: dict | None = None, + response_content: any = None, + error_message: str | None = None +): + """ + Logs API request and response details to a file in the temp/api_logs directory. + """ + log_dir = get_log_directory() + timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f") + filename = f"{timestamp}_{operation_id.replace('/', '_').replace(':', '_')}.log" + filepath = os.path.join(log_dir, filename) + + log_content = [] + + log_content.append(f"Timestamp: {datetime.datetime.now().isoformat()}") + log_content.append(f"Operation ID: {operation_id}") + log_content.append("-" * 30 + " REQUEST " + "-" * 30) + log_content.append(f"Method: {request_method}") + log_content.append(f"URL: {request_url}") + if request_headers: + log_content.append(f"Headers:\n{_format_data_for_logging(request_headers)}") + if request_params: + log_content.append(f"Params:\n{_format_data_for_logging(request_params)}") + if request_data: + log_content.append(f"Data/Body:\n{_format_data_for_logging(request_data)}") + + log_content.append("\n" + "-" * 30 + " RESPONSE " + "-" * 30) + if response_status_code is not None: + log_content.append(f"Status Code: {response_status_code}") + if response_headers: + log_content.append(f"Headers:\n{_format_data_for_logging(response_headers)}") + if response_content: + log_content.append(f"Content:\n{_format_data_for_logging(response_content)}") + if error_message: + log_content.append(f"Error:\n{error_message}") + + try: + with open(filepath, "w", encoding="utf-8") as f: + f.write("\n".join(log_content)) + logger.debug(f"API log saved to: {filepath}") + except Exception as e: + logger.error(f"Error writing API log to {filepath}: {e}") + +if __name__ == '__main__': + # Example usage (for testing the logger directly) + logger.setLevel(logging.DEBUG) + # Mock folder_paths for direct execution if not running within ComfyUI full context + if not hasattr(folder_paths, 'get_temp_directory'): + class MockFolderPaths: + def get_temp_directory(self): + # Create a local temp dir for testing if needed + p = os.path.join(os.path.dirname(__file__), 'temp_test_logs') + os.makedirs(p, exist_ok=True) + return p + folder_paths = MockFolderPaths() + + log_request_response( + operation_id="test_operation_get", + request_method="GET", + request_url="https://api.example.com/test", + request_headers={"Authorization": "Bearer testtoken"}, + request_params={"param1": "value1"}, + response_status_code=200, + response_content={"message": "Success!"} + ) + log_request_response( + operation_id="test_operation_post_error", + request_method="POST", + request_url="https://api.example.com/submit", + request_data={"key": "value", "nested": {"num": 123}}, + error_message="Connection timed out" + ) + log_request_response( + operation_id="test_binary_response", + request_method="GET", + request_url="https://api.example.com/image.png", + response_status_code=200, + response_content=b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR...' # Sample binary data + ) From 98ff01e1486353a3b0ddd8fa82fcbb25401f8364 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Tue, 13 May 2025 21:33:18 -0700 Subject: [PATCH 0141/1073] Display progress and result URL directly on API nodes (#8102) * [Luma] Print download URL of successful task result directly on nodes (#177) [Veo] Print download URL of successful task result directly on nodes (#184) [Recraft] Print download URL of successful task result directly on nodes (#183) [Pixverse] Print download URL of successful task result directly on nodes (#182) [Kling] Print download URL of successful task result directly on nodes (#181) [MiniMax] Print progress text and download URL of successful task result directly on nodes (#179) [Docs] Link to docs in `API_NODE` class property type annotation comment (#178) [Ideogram] Print download URL of successful task result directly on nodes (#176) [Kling] Print download URL of successful task result directly on nodes (#181) [Veo] Print download URL of successful task result directly on nodes (#184) [Recraft] Print download URL of successful task result directly on nodes (#183) [Pixverse] Print download URL of successful task result directly on nodes (#182) [MiniMax] Print progress text and download URL of successful task result directly on nodes (#179) [Docs] Link to docs in `API_NODE` class property type annotation comment (#178) [Luma] Print download URL of successful task result directly on nodes (#177) [Ideogram] Print download URL of successful task result directly on nodes (#176) Show output URL and progress text on Pika nodes (#168) [BFL] Print download URL of successful task result directly on nodes (#175) [OpenAI ] Print download URL of successful task result directly on nodes (#174) * fix ruff errors * fix 3.10 syntax error --- comfy/comfy_types/node_typing.py | 2 +- comfy_api_nodes/apinode_utils.py | 11 +- comfy_api_nodes/apis/client.py | 42 +++++++- comfy_api_nodes/nodes_bfl.py | 63 ++++++++---- comfy_api_nodes/nodes_ideogram.py | 24 ++++- comfy_api_nodes/nodes_kling.py | 164 ++++++++++++++++++++++++++---- comfy_api_nodes/nodes_luma.py | 33 ++++++ comfy_api_nodes/nodes_minimax.py | 27 ++++- comfy_api_nodes/nodes_openai.py | 12 ++- comfy_api_nodes/nodes_pika.py | 42 ++++++-- comfy_api_nodes/nodes_pixverse.py | 99 ++++++++++++------ comfy_api_nodes/nodes_recraft.py | 21 ++++ comfy_api_nodes/nodes_veo2.py | 26 ++++- 13 files changed, 474 insertions(+), 92 deletions(-) diff --git a/comfy/comfy_types/node_typing.py b/comfy/comfy_types/node_typing.py index 2ffc9c021..470eb9fdb 100644 --- a/comfy/comfy_types/node_typing.py +++ b/comfy/comfy_types/node_typing.py @@ -235,7 +235,7 @@ class ComfyNodeABC(ABC): DEPRECATED: bool """Flags a node as deprecated, indicating to users that they should find alternatives to this node.""" API_NODE: Optional[bool] - """Flags a node as an API node.""" + """Flags a node as an API node. See: https://docs.comfy.org/tutorials/api-nodes/overview.""" @classmethod @abstractmethod diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index e28d7d607..87d8c3e1d 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -1,7 +1,7 @@ from __future__ import annotations import io import logging -from typing import Optional +from typing import Optional, Union from comfy.utils import common_upscale from comfy_api.input_impl import VideoFromFile from comfy_api.util import VideoContainer, VideoCodec @@ -15,6 +15,7 @@ from comfy_api_nodes.apis.client import ( UploadRequest, UploadResponse, ) +from server import PromptServer import numpy as np @@ -60,7 +61,9 @@ def downscale_image_tensor(image, total_pixels=1536 * 1024) -> torch.Tensor: return s -def validate_and_cast_response(response, timeout: int = None) -> torch.Tensor: +def validate_and_cast_response( + response, timeout: int = None, node_id: Union[str, None] = None +) -> torch.Tensor: """Validates and casts a response to a torch.Tensor. Args: @@ -94,6 +97,10 @@ def validate_and_cast_response(response, timeout: int = None) -> torch.Tensor: img = Image.open(io.BytesIO(img_data)) elif image_url: + if node_id: + PromptServer.instance.send_progress_text( + f"Result URL: {image_url}", node_id + ) img_response = requests.get(image_url, timeout=timeout) if img_response.status_code != 200: raise ValueError("Failed to download the image") diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 158d20deb..838ff1e8d 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -103,6 +103,7 @@ from urllib.parse import urljoin, urlparse from pydantic import BaseModel, Field import uuid # For generating unique operation IDs +from server import PromptServer from comfy.cli_args import args from comfy import utils from . import request_logger @@ -900,6 +901,7 @@ class PollingOperation(Generic[T, R]): failed_statuses: list, status_extractor: Callable[[R], str], progress_extractor: Callable[[R], float] = None, + result_url_extractor: Callable[[R], str] = None, request: Optional[T] = None, api_base: str | None = None, auth_token: Optional[str] = None, @@ -910,6 +912,8 @@ class PollingOperation(Generic[T, R]): max_retries: int = 3, # Max retries per individual API call retry_delay: float = 1.0, retry_backoff_factor: float = 2.0, + estimated_duration: Optional[float] = None, + node_id: Optional[str] = None, ): self.poll_endpoint = poll_endpoint self.request = request @@ -924,12 +928,15 @@ class PollingOperation(Generic[T, R]): self.max_retries = max_retries self.retry_delay = retry_delay self.retry_backoff_factor = retry_backoff_factor + self.estimated_duration = estimated_duration # Polling configuration self.status_extractor = status_extractor or ( lambda x: getattr(x, "status", None) ) self.progress_extractor = progress_extractor + self.result_url_extractor = result_url_extractor + self.node_id = node_id self.completed_statuses = completed_statuses self.failed_statuses = failed_statuses @@ -965,6 +972,26 @@ class PollingOperation(Generic[T, R]): except Exception as e: raise Exception(f"Error during polling: {str(e)}") + def _display_text_on_node(self, text: str): + """Sends text to the client which will be displayed on the node in the UI""" + if not self.node_id: + return + + PromptServer.instance.send_progress_text(text, self.node_id) + + def _display_time_progress_on_node(self, time_completed: int): + if not self.node_id: + return + + if self.estimated_duration is not None: + estimated_time_remaining = max( + 0, int(self.estimated_duration) - int(time_completed) + ) + message = f"Task in progress: {time_completed:.0f}s (~{estimated_time_remaining:.0f}s remaining)" + else: + message = f"Task in progress: {time_completed:.0f}s" + self._display_text_on_node(message) + def _check_task_status(self, response: R) -> TaskStatus: """Check task status using the status extractor function""" try: @@ -1031,7 +1058,15 @@ class PollingOperation(Generic[T, R]): progress.update_absolute(new_progress, total=PROGRESS_BAR_MAX) if status == TaskStatus.COMPLETED: - logging.debug("[DEBUG] Task completed successfully") + message = "Task completed successfully" + if self.result_url_extractor: + result_url = self.result_url_extractor(response_obj) + if result_url: + message = f"Result URL: {result_url}" + else: + message = "Task completed successfully!" + logging.debug(f"[DEBUG] {message}") + self._display_text_on_node(message) self.final_response = response_obj if self.progress_extractor: progress.update(100) @@ -1047,7 +1082,10 @@ class PollingOperation(Generic[T, R]): logging.debug( f"[DEBUG] Waiting {self.poll_interval} seconds before next poll" ) - time.sleep(self.poll_interval) + for i in range(int(self.poll_interval)): + time_completed = (poll_count * self.poll_interval) + i + self._display_time_progress_on_node(time_completed) + time.sleep(1) except (LocalNetworkError, ApiServerError) as e: # For network-related errors, increment error count and potentially abort diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py index 66ef1b391..509170b34 100644 --- a/comfy_api_nodes/nodes_bfl.py +++ b/comfy_api_nodes/nodes_bfl.py @@ -1,5 +1,6 @@ import io from inspect import cleandoc +from typing import Union from comfy.comfy_types.node_typing import IO, ComfyNodeABC from comfy_api_nodes.apis.bfl_api import ( BFLStatus, @@ -30,6 +31,7 @@ import requests import torch import base64 import time +from server import PromptServer def convert_mask_to_image(mask: torch.Tensor): @@ -42,14 +44,19 @@ def convert_mask_to_image(mask: torch.Tensor): def handle_bfl_synchronous_operation( - operation: SynchronousOperation, timeout_bfl_calls=360 + operation: SynchronousOperation, + timeout_bfl_calls=360, + node_id: Union[str, None] = None, ): response_api: BFLFluxProGenerateResponse = operation.execute() return _poll_until_generated( - response_api.polling_url, timeout=timeout_bfl_calls + response_api.polling_url, timeout=timeout_bfl_calls, node_id=node_id ) -def _poll_until_generated(polling_url: str, timeout=360): + +def _poll_until_generated( + polling_url: str, timeout=360, node_id: Union[str, None] = None +): # used bfl-comfy-nodes to verify code implementation: # https://github.com/black-forest-labs/bfl-comfy-nodes/tree/main start_time = time.time() @@ -61,11 +68,21 @@ def _poll_until_generated(polling_url: str, timeout=360): request = requests.Request(method=HttpMethod.GET, url=polling_url) # NOTE: should True loop be replaced with checking if workflow has been interrupted? while True: + if node_id: + time_elapsed = time.time() - start_time + PromptServer.instance.send_progress_text( + f"Generating ({time_elapsed:.0f}s)", node_id + ) + response = requests.Session().send(request.prepare()) if response.status_code == 200: result = response.json() if result["status"] == BFLStatus.ready: img_url = result["result"]["sample"] + if node_id: + PromptServer.instance.send_progress_text( + f"Result URL: {img_url}", node_id + ) img_response = requests.get(img_url) return process_image_response(img_response) elif result["status"] in [ @@ -180,6 +197,7 @@ class FluxProUltraImageNode(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -212,6 +230,7 @@ class FluxProUltraImageNode(ComfyNodeABC): seed=0, image_prompt=None, image_prompt_strength=0.1, + unique_id: Union[str, None] = None, **kwargs, ): if image_prompt is None: @@ -246,7 +265,7 @@ class FluxProUltraImageNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation) + output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) @@ -320,6 +339,7 @@ class FluxProImageNode(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -338,6 +358,7 @@ class FluxProImageNode(ComfyNodeABC): seed=0, image_prompt=None, # image_prompt_strength=0.1, + unique_id: Union[str, None] = None, **kwargs, ): image_prompt = ( @@ -363,7 +384,7 @@ class FluxProImageNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation) + output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) @@ -457,11 +478,11 @@ class FluxProExpandNode(ComfyNodeABC): }, ), }, - "optional": { - }, + "optional": {}, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -483,6 +504,7 @@ class FluxProExpandNode(ComfyNodeABC): steps: int, guidance: float, seed=0, + unique_id: Union[str, None] = None, **kwargs, ): image = convert_image_to_base64(image) @@ -508,7 +530,7 @@ class FluxProExpandNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation) + output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) @@ -568,11 +590,11 @@ class FluxProFillNode(ComfyNodeABC): }, ), }, - "optional": { - }, + "optional": {}, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -591,13 +613,14 @@ class FluxProFillNode(ComfyNodeABC): steps: int, guidance: float, seed=0, + unique_id: Union[str, None] = None, **kwargs, ): # prepare mask mask = resize_mask_to_image(mask, image) mask = convert_image_to_base64(convert_mask_to_image(mask)) # make sure image will have alpha channel removed - image = convert_image_to_base64(image[:,:,:,:3]) + image = convert_image_to_base64(image[:, :, :, :3]) operation = SynchronousOperation( endpoint=ApiEndpoint( @@ -617,7 +640,7 @@ class FluxProFillNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation) + output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) @@ -702,11 +725,11 @@ class FluxProCannyNode(ComfyNodeABC): }, ), }, - "optional": { - }, + "optional": {}, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -727,9 +750,10 @@ class FluxProCannyNode(ComfyNodeABC): steps: int, guidance: float, seed=0, + unique_id: Union[str, None] = None, **kwargs, ): - control_image = convert_image_to_base64(control_image[:,:,:,:3]) + control_image = convert_image_to_base64(control_image[:, :, :, :3]) preprocessed_image = None # scale canny threshold between 0-500, to match BFL's API @@ -765,7 +789,7 @@ class FluxProCannyNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation) + output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) @@ -830,11 +854,11 @@ class FluxProDepthNode(ComfyNodeABC): }, ), }, - "optional": { - }, + "optional": {}, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -853,6 +877,7 @@ class FluxProDepthNode(ComfyNodeABC): steps: int, guidance: float, seed=0, + unique_id: Union[str, None] = None, **kwargs, ): control_image = convert_image_to_base64(control_image[:,:,:,:3]) @@ -880,7 +905,7 @@ class FluxProDepthNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation) + output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) diff --git a/comfy_api_nodes/nodes_ideogram.py b/comfy_api_nodes/nodes_ideogram.py index d25468b17..b1cbf511d 100644 --- a/comfy_api_nodes/nodes_ideogram.py +++ b/comfy_api_nodes/nodes_ideogram.py @@ -23,6 +23,7 @@ from comfy_api_nodes.apinode_utils import ( bytesio_to_image_tensor, resize_mask_to_image, ) +from server import PromptServer V1_V1_RES_MAP = { "Auto":"AUTO", @@ -232,6 +233,19 @@ def download_and_process_images(image_urls): return stacked_tensors +def display_image_urls_on_node(image_urls, node_id): + if node_id and image_urls: + if len(image_urls) == 1: + PromptServer.instance.send_progress_text( + f"Generated Image URL:\n{image_urls[0]}", node_id + ) + else: + urls_text = "Generated Image URLs:\n" + "\n".join( + f"{i+1}. {url}" for i, url in enumerate(image_urls) + ) + PromptServer.instance.send_progress_text(urls_text, node_id) + + class IdeogramV1(ComfyNodeABC): """ Generates images using the Ideogram V1 model. @@ -304,6 +318,7 @@ class IdeogramV1(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -322,6 +337,7 @@ class IdeogramV1(ComfyNodeABC): seed=0, negative_prompt="", num_images=1, + unique_id=None, **kwargs, ): # Determine the model based on turbo setting @@ -361,6 +377,7 @@ class IdeogramV1(ComfyNodeABC): if not image_urls: raise Exception("No image URLs were generated in the response") + display_image_urls_on_node(image_urls, unique_id) return (download_and_process_images(image_urls),) @@ -460,6 +477,7 @@ class IdeogramV2(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -481,6 +499,7 @@ class IdeogramV2(ComfyNodeABC): negative_prompt="", num_images=1, color_palette="", + unique_id=None, **kwargs, ): aspect_ratio = V1_V2_RATIO_MAP.get(aspect_ratio, None) @@ -534,6 +553,7 @@ class IdeogramV2(ComfyNodeABC): if not image_urls: raise Exception("No image URLs were generated in the response") + display_image_urls_on_node(image_urls, unique_id) return (download_and_process_images(image_urls),) class IdeogramV3(ComfyNodeABC): @@ -623,6 +643,7 @@ class IdeogramV3(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -643,6 +664,7 @@ class IdeogramV3(ComfyNodeABC): seed=0, num_images=1, rendering_speed="BALANCED", + unique_id=None, **kwargs, ): # Check if both image and mask are provided for editing mode @@ -762,6 +784,7 @@ class IdeogramV3(ComfyNodeABC): if not image_urls: raise Exception("No image URLs were generated in the response") + display_image_urls_on_node(image_urls, unique_id) return (download_and_process_images(image_urls),) @@ -776,4 +799,3 @@ NODE_DISPLAY_NAME_MAPPINGS = { "IdeogramV2": "Ideogram V2", "IdeogramV3": "Ideogram V3", } - diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 2d0fd8883..456a86905 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -6,6 +6,7 @@ For source of truth on the allowed permutations of request fields, please refere from __future__ import annotations from typing import Optional, TypeVar, Any +from collections.abc import Callable import math import logging @@ -86,6 +87,15 @@ MAX_PROMPT_LENGTH_IMAGE_GEN = 500 MAX_NEGATIVE_PROMPT_LENGTH_IMAGE_GEN = 200 MAX_PROMPT_LENGTH_LIP_SYNC = 120 +# TODO: adjust based on tests +AVERAGE_DURATION_T2V = 319 # 319, +AVERAGE_DURATION_I2V = 164 # 164, +AVERAGE_DURATION_LIP_SYNC = 120 +AVERAGE_DURATION_VIRTUAL_TRY_ON = 19 # 19, +AVERAGE_DURATION_IMAGE_GEN = 32 +AVERAGE_DURATION_VIDEO_EFFECTS = 320 +AVERAGE_DURATION_VIDEO_EXTEND = 320 + R = TypeVar("R") @@ -95,7 +105,13 @@ class KlingApiError(Exception): pass -def poll_until_finished(auth_kwargs: dict[str,str], api_endpoint: ApiEndpoint[Any, R]) -> R: +def poll_until_finished( + auth_kwargs: dict[str, str], + api_endpoint: ApiEndpoint[Any, R], + result_url_extractor: Optional[Callable[[R], str]] = None, + estimated_duration: Optional[int] = None, + node_id: Optional[str] = None, +) -> R: """Polls the Kling API endpoint until the task reaches a terminal state, then returns the response.""" return PollingOperation( poll_endpoint=api_endpoint, @@ -109,6 +125,9 @@ def poll_until_finished(auth_kwargs: dict[str,str], api_endpoint: ApiEndpoint[An else None ), auth_kwargs=auth_kwargs, + result_url_extractor=result_url_extractor, + estimated_duration=estimated_duration, + node_id=node_id, ).execute() @@ -227,7 +246,9 @@ def get_camera_control_input_config( def get_video_from_response(response) -> KlingVideoResult: - """Returns the first video object from the Kling video generation task result.""" + """Returns the first video object from the Kling video generation task result. + Will raise an error if the response is not valid. + """ video = response.data.task_result.videos[0] logging.info( "Kling task %s succeeded. Video URL: %s", response.data.task_id, video.url @@ -235,12 +256,37 @@ def get_video_from_response(response) -> KlingVideoResult: return video +def get_video_url_from_response(response) -> Optional[str]: + """Returns the first video url from the Kling video generation task result. + Will not raise an error if the response is not valid. + """ + if response and is_valid_video_response(response): + return str(get_video_from_response(response).url) + else: + return None + + def get_images_from_response(response) -> list[KlingImageResult]: + """Returns the list of image objects from the Kling image generation task result. + Will raise an error if the response is not valid. + """ images = response.data.task_result.images logging.info("Kling task %s succeeded. Images: %s", response.data.task_id, images) return images +def get_images_urls_from_response(response) -> Optional[str]: + """Returns the list of image urls from the Kling image generation task result. + Will not raise an error if the response is not valid. If there is only one image, returns the url as a string. If there are multiple images, returns a list of urls. + """ + if response and is_valid_image_response(response): + images = get_images_from_response(response) + image_urls = [str(image.url) for image in images] + return "\n".join(image_urls) + else: + return None + + def video_result_to_node_output( video: KlingVideoResult, ) -> tuple[VideoFromFile, str, str]: @@ -312,6 +358,7 @@ class KlingCameraControls(KlingNodeBase): RETURN_TYPES = ("CAMERA_CONTROL",) RETURN_NAMES = ("camera_control",) FUNCTION = "main" + API_NODE = False # This is just a helper node, it doesn't make an API call @classmethod def VALIDATE_INPUTS( @@ -421,6 +468,7 @@ class KlingTextToVideoNode(KlingNodeBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -428,7 +476,9 @@ class KlingTextToVideoNode(KlingNodeBase): RETURN_NAMES = ("VIDEO", "video_id", "duration") DESCRIPTION = "Kling Text to Video Node" - def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingText2VideoResponse: + def get_response( + self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None + ) -> KlingText2VideoResponse: return poll_until_finished( auth_kwargs, ApiEndpoint( @@ -437,6 +487,9 @@ class KlingTextToVideoNode(KlingNodeBase): request_model=EmptyRequest, response_model=KlingText2VideoResponse, ), + result_url_extractor=get_video_url_from_response, + estimated_duration=AVERAGE_DURATION_T2V, + node_id=node_id, ) def api_call( @@ -449,6 +502,7 @@ class KlingTextToVideoNode(KlingNodeBase): camera_control: Optional[KlingCameraControl] = None, model_name: Optional[str] = None, duration: Optional[str] = None, + unique_id: Optional[str] = None, **kwargs, ) -> tuple[VideoFromFile, str, str]: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) @@ -478,7 +532,9 @@ class KlingTextToVideoNode(KlingNodeBase): validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_kwargs=kwargs) + final_response = self.get_response( + task_id, auth_kwargs=kwargs, node_id=unique_id + ) validate_video_result_response(final_response) video = get_video_from_response(final_response) @@ -528,6 +584,7 @@ class KlingCameraControlT2VNode(KlingTextToVideoNode): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -540,6 +597,7 @@ class KlingCameraControlT2VNode(KlingTextToVideoNode): cfg_scale: float, aspect_ratio: str, camera_control: Optional[KlingCameraControl] = None, + unique_id: Optional[str] = None, **kwargs, ): return super().api_call( @@ -613,6 +671,7 @@ class KlingImage2VideoNode(KlingNodeBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -620,7 +679,9 @@ class KlingImage2VideoNode(KlingNodeBase): RETURN_NAMES = ("VIDEO", "video_id", "duration") DESCRIPTION = "Kling Image to Video Node" - def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingImage2VideoResponse: + def get_response( + self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None + ) -> KlingImage2VideoResponse: return poll_until_finished( auth_kwargs, ApiEndpoint( @@ -629,6 +690,9 @@ class KlingImage2VideoNode(KlingNodeBase): request_model=KlingImage2VideoRequest, response_model=KlingImage2VideoResponse, ), + result_url_extractor=get_video_url_from_response, + estimated_duration=AVERAGE_DURATION_I2V, + node_id=node_id, ) def api_call( @@ -643,6 +707,7 @@ class KlingImage2VideoNode(KlingNodeBase): duration: str, camera_control: Optional[KlingCameraControl] = None, end_frame: Optional[torch.Tensor] = None, + unique_id: Optional[str] = None, **kwargs, ) -> tuple[VideoFromFile]: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V) @@ -681,7 +746,9 @@ class KlingImage2VideoNode(KlingNodeBase): validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_kwargs=kwargs) + final_response = self.get_response( + task_id, auth_kwargs=kwargs, node_id=unique_id + ) validate_video_result_response(final_response) video = get_video_from_response(final_response) @@ -734,6 +801,7 @@ class KlingCameraControlI2VNode(KlingImage2VideoNode): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -747,6 +815,7 @@ class KlingCameraControlI2VNode(KlingImage2VideoNode): cfg_scale: float, aspect_ratio: str, camera_control: KlingCameraControl, + unique_id: Optional[str] = None, **kwargs, ): return super().api_call( @@ -759,6 +828,7 @@ class KlingCameraControlI2VNode(KlingImage2VideoNode): prompt=prompt, negative_prompt=negative_prompt, camera_control=camera_control, + unique_id=unique_id, **kwargs, ) @@ -830,6 +900,7 @@ class KlingStartEndFrameNode(KlingImage2VideoNode): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -844,6 +915,7 @@ class KlingStartEndFrameNode(KlingImage2VideoNode): cfg_scale: float, aspect_ratio: str, mode: str, + unique_id: Optional[str] = None, **kwargs, ): mode, duration, model_name = KlingStartEndFrameNode.get_mode_string_mapping()[ @@ -859,6 +931,7 @@ class KlingStartEndFrameNode(KlingImage2VideoNode): aspect_ratio=aspect_ratio, duration=duration, end_frame=end_frame, + unique_id=unique_id, **kwargs, ) @@ -892,6 +965,7 @@ class KlingVideoExtendNode(KlingNodeBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -899,7 +973,9 @@ class KlingVideoExtendNode(KlingNodeBase): RETURN_NAMES = ("VIDEO", "video_id", "duration") DESCRIPTION = "Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes." - def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingVideoExtendResponse: + def get_response( + self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None + ) -> KlingVideoExtendResponse: return poll_until_finished( auth_kwargs, ApiEndpoint( @@ -908,6 +984,9 @@ class KlingVideoExtendNode(KlingNodeBase): request_model=EmptyRequest, response_model=KlingVideoExtendResponse, ), + result_url_extractor=get_video_url_from_response, + estimated_duration=AVERAGE_DURATION_VIDEO_EXTEND, + node_id=node_id, ) def api_call( @@ -916,6 +995,7 @@ class KlingVideoExtendNode(KlingNodeBase): negative_prompt: str, cfg_scale: float, video_id: str, + unique_id: Optional[str] = None, **kwargs, ) -> tuple[VideoFromFile, str, str]: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) @@ -939,7 +1019,9 @@ class KlingVideoExtendNode(KlingNodeBase): validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_kwargs=kwargs) + final_response = self.get_response( + task_id, auth_kwargs=kwargs, node_id=unique_id + ) validate_video_result_response(final_response) video = get_video_from_response(final_response) @@ -952,7 +1034,9 @@ class KlingVideoEffectsBase(KlingNodeBase): RETURN_TYPES = ("VIDEO", "STRING", "STRING") RETURN_NAMES = ("VIDEO", "video_id", "duration") - def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingVideoEffectsResponse: + def get_response( + self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None + ) -> KlingVideoEffectsResponse: return poll_until_finished( auth_kwargs, ApiEndpoint( @@ -961,6 +1045,9 @@ class KlingVideoEffectsBase(KlingNodeBase): request_model=EmptyRequest, response_model=KlingVideoEffectsResponse, ), + result_url_extractor=get_video_url_from_response, + estimated_duration=AVERAGE_DURATION_VIDEO_EFFECTS, + node_id=node_id, ) def api_call( @@ -972,6 +1059,7 @@ class KlingVideoEffectsBase(KlingNodeBase): image_1: torch.Tensor, image_2: Optional[torch.Tensor] = None, mode: Optional[KlingVideoGenMode] = None, + unique_id: Optional[str] = None, **kwargs, ): if dual_character: @@ -1009,7 +1097,9 @@ class KlingVideoEffectsBase(KlingNodeBase): validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_kwargs=kwargs) + final_response = self.get_response( + task_id, auth_kwargs=kwargs, node_id=unique_id + ) validate_video_result_response(final_response) video = get_video_from_response(final_response) @@ -1053,6 +1143,7 @@ class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -1068,6 +1159,7 @@ class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase): model_name: KlingCharacterEffectModelName, mode: KlingVideoGenMode, duration: KlingVideoGenDuration, + unique_id: Optional[str] = None, **kwargs, ): video, _, duration = super().api_call( @@ -1078,10 +1170,12 @@ class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase): duration=duration, image_1=image_left, image_2=image_right, + unique_id=unique_id, **kwargs, ) return video, duration + class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase): """Kling Single Image Video Effect Node""" @@ -1117,6 +1211,7 @@ class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -1128,6 +1223,7 @@ class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase): effect_scene: KlingSingleImageEffectsScene, model_name: KlingSingleImageEffectModelName, duration: KlingVideoGenDuration, + unique_id: Optional[str] = None, **kwargs, ): return super().api_call( @@ -1136,6 +1232,7 @@ class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase): model_name=model_name, duration=duration, image_1=image, + unique_id=unique_id, **kwargs, ) @@ -1154,7 +1251,9 @@ class KlingLipSyncBase(KlingNodeBase): f"Text is too long. Maximum length is {MAX_PROMPT_LENGTH_LIP_SYNC} characters." ) - def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingLipSyncResponse: + def get_response( + self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None + ) -> KlingLipSyncResponse: """Polls the Kling API endpoint until the task reaches a terminal state.""" return poll_until_finished( auth_kwargs, @@ -1164,6 +1263,9 @@ class KlingLipSyncBase(KlingNodeBase): request_model=EmptyRequest, response_model=KlingLipSyncResponse, ), + result_url_extractor=get_video_url_from_response, + estimated_duration=AVERAGE_DURATION_LIP_SYNC, + node_id=node_id, ) def api_call( @@ -1175,7 +1277,8 @@ class KlingLipSyncBase(KlingNodeBase): text: Optional[str] = None, voice_speed: Optional[float] = None, voice_id: Optional[str] = None, - **kwargs + unique_id: Optional[str] = None, + **kwargs, ) -> tuple[VideoFromFile, str, str]: if text: self.validate_text(text) @@ -1217,7 +1320,9 @@ class KlingLipSyncBase(KlingNodeBase): validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_kwargs=kwargs) + final_response = self.get_response( + task_id, auth_kwargs=kwargs, node_id=unique_id + ) validate_video_result_response(final_response) video = get_video_from_response(final_response) @@ -1243,6 +1348,7 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -1253,6 +1359,7 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase): video: VideoInput, audio: AudioInput, voice_language: str, + unique_id: Optional[str] = None, **kwargs, ): return super().api_call( @@ -1260,6 +1367,7 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase): audio=audio, voice_language=voice_language, mode="audio2video", + unique_id=unique_id, **kwargs, ) @@ -1352,6 +1460,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -1363,6 +1472,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase): text: str, voice: str, voice_speed: float, + unique_id: Optional[str] = None, **kwargs, ): voice_id, voice_language = KlingLipSyncTextToVideoNode.get_voice_config()[voice] @@ -1373,6 +1483,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase): voice_id=voice_id, voice_speed=voice_speed, mode="text2video", + unique_id=unique_id, **kwargs, ) @@ -1413,13 +1524,14 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } - DESCRIPTION = "Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human." + DESCRIPTION = "Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background." def get_response( - self, task_id: str, auth_kwargs: dict[str,str] = None + self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None ) -> KlingVirtualTryOnResponse: return poll_until_finished( auth_kwargs, @@ -1429,6 +1541,9 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase): request_model=EmptyRequest, response_model=KlingVirtualTryOnResponse, ), + result_url_extractor=get_images_urls_from_response, + estimated_duration=AVERAGE_DURATION_VIRTUAL_TRY_ON, + node_id=node_id, ) def api_call( @@ -1436,6 +1551,7 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase): human_image: torch.Tensor, cloth_image: torch.Tensor, model_name: KlingVirtualTryOnModelName, + unique_id: Optional[str] = None, **kwargs, ): initial_operation = SynchronousOperation( @@ -1457,7 +1573,9 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase): validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_kwargs=kwargs) + final_response = self.get_response( + task_id, auth_kwargs=kwargs, node_id=unique_id + ) validate_image_result_response(final_response) images = get_images_from_response(final_response) @@ -1528,13 +1646,17 @@ class KlingImageGenerationNode(KlingImageGenerationBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } DESCRIPTION = "Kling Image Generation Node. Generate an image from a text prompt with an optional reference image." def get_response( - self, task_id: str, auth_kwargs: Optional[dict[str,str]] = None + self, + task_id: str, + auth_kwargs: Optional[dict[str, str]], + node_id: Optional[str] = None, ) -> KlingImageGenerationsResponse: return poll_until_finished( auth_kwargs, @@ -1544,6 +1666,9 @@ class KlingImageGenerationNode(KlingImageGenerationBase): request_model=EmptyRequest, response_model=KlingImageGenerationsResponse, ), + result_url_extractor=get_images_urls_from_response, + estimated_duration=AVERAGE_DURATION_IMAGE_GEN, + node_id=node_id, ) def api_call( @@ -1557,6 +1682,7 @@ class KlingImageGenerationNode(KlingImageGenerationBase): n: int, aspect_ratio: KlingImageGenAspectRatio, image: Optional[torch.Tensor] = None, + unique_id: Optional[str] = None, **kwargs, ): self.validate_prompt(prompt, negative_prompt) @@ -1589,7 +1715,9 @@ class KlingImageGenerationNode(KlingImageGenerationBase): validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response(task_id, auth_kwargs=kwargs) + final_response = self.get_response( + task_id, auth_kwargs=kwargs, node_id=unique_id + ) validate_image_result_response(final_response) images = get_images_from_response(final_response) diff --git a/comfy_api_nodes/nodes_luma.py b/comfy_api_nodes/nodes_luma.py index bd33a53e0..525dc38e6 100644 --- a/comfy_api_nodes/nodes_luma.py +++ b/comfy_api_nodes/nodes_luma.py @@ -36,11 +36,20 @@ from comfy_api_nodes.apinode_utils import ( process_image_response, validate_string, ) +from server import PromptServer import requests import torch from io import BytesIO +LUMA_T2V_AVERAGE_DURATION = 105 +LUMA_I2V_AVERAGE_DURATION = 100 + +def image_result_url_extractor(response: LumaGeneration): + return response.assets.image if hasattr(response, "assets") and hasattr(response.assets, "image") else None + +def video_result_url_extractor(response: LumaGeneration): + return response.assets.video if hasattr(response, "assets") and hasattr(response.assets, "video") else None class LumaReferenceNode(ComfyNodeABC): """ @@ -204,6 +213,7 @@ class LumaImageGenerationNode(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -217,6 +227,7 @@ class LumaImageGenerationNode(ComfyNodeABC): image_luma_ref: LumaReferenceChain = None, style_image: torch.Tensor = None, character_image: torch.Tensor = None, + unique_id: str = None, **kwargs, ): validate_string(prompt, strip_whitespace=True, min_length=3) @@ -271,6 +282,8 @@ class LumaImageGenerationNode(ComfyNodeABC): completed_statuses=[LumaState.completed], failed_statuses=[LumaState.failed], status_extractor=lambda x: x.state, + result_url_extractor=image_result_url_extractor, + node_id=unique_id, auth_kwargs=kwargs, ) response_poll = operation.execute() @@ -353,6 +366,7 @@ class LumaImageModifyNode(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -363,6 +377,7 @@ class LumaImageModifyNode(ComfyNodeABC): image: torch.Tensor, image_weight: float, seed, + unique_id: str = None, **kwargs, ): # first, upload image @@ -399,6 +414,8 @@ class LumaImageModifyNode(ComfyNodeABC): completed_statuses=[LumaState.completed], failed_statuses=[LumaState.failed], status_extractor=lambda x: x.state, + result_url_extractor=image_result_url_extractor, + node_id=unique_id, auth_kwargs=kwargs, ) response_poll = operation.execute() @@ -473,6 +490,7 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -486,6 +504,7 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): loop: bool, seed, luma_concepts: LumaConceptChain = None, + unique_id: str = None, **kwargs, ): validate_string(prompt, strip_whitespace=False, min_length=3) @@ -512,6 +531,9 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): ) response_api: LumaGeneration = operation.execute() + if unique_id: + PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", unique_id) + operation = PollingOperation( poll_endpoint=ApiEndpoint( path=f"/proxy/luma/generations/{response_api.id}", @@ -522,6 +544,9 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): completed_statuses=[LumaState.completed], failed_statuses=[LumaState.failed], status_extractor=lambda x: x.state, + result_url_extractor=video_result_url_extractor, + node_id=unique_id, + estimated_duration=LUMA_T2V_AVERAGE_DURATION, auth_kwargs=kwargs, ) response_poll = operation.execute() @@ -597,6 +622,7 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -611,6 +637,7 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): first_image: torch.Tensor = None, last_image: torch.Tensor = None, luma_concepts: LumaConceptChain = None, + unique_id: str = None, **kwargs, ): if first_image is None and last_image is None: @@ -642,6 +669,9 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): ) response_api: LumaGeneration = operation.execute() + if unique_id: + PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", unique_id) + operation = PollingOperation( poll_endpoint=ApiEndpoint( path=f"/proxy/luma/generations/{response_api.id}", @@ -652,6 +682,9 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): completed_statuses=[LumaState.completed], failed_statuses=[LumaState.failed], status_extractor=lambda x: x.state, + result_url_extractor=video_result_url_extractor, + node_id=unique_id, + estimated_duration=LUMA_I2V_AVERAGE_DURATION, auth_kwargs=kwargs, ) response_poll = operation.execute() diff --git a/comfy_api_nodes/nodes_minimax.py b/comfy_api_nodes/nodes_minimax.py index fd64aeb0b..9b46636db 100644 --- a/comfy_api_nodes/nodes_minimax.py +++ b/comfy_api_nodes/nodes_minimax.py @@ -1,3 +1,7 @@ +from typing import Union +import logging +import torch + from comfy.comfy_types.node_typing import IO from comfy_api.input_impl.video_types import VideoFromFile from comfy_api_nodes.apis import ( @@ -20,16 +24,19 @@ from comfy_api_nodes.apinode_utils import ( upload_images_to_comfyapi, validate_string, ) +from server import PromptServer -import torch -import logging +I2V_AVERAGE_DURATION = 114 +T2V_AVERAGE_DURATION = 234 class MinimaxTextToVideoNode: """ Generates videos synchronously based on a prompt, and optional parameters using MiniMax's API. """ + AVERAGE_DURATION = T2V_AVERAGE_DURATION + @classmethod def INPUT_TYPES(s): return { @@ -68,6 +75,7 @@ class MinimaxTextToVideoNode: "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -85,6 +93,7 @@ class MinimaxTextToVideoNode: model="T2V-01", image: torch.Tensor=None, # used for ImageToVideo subject: torch.Tensor=None, # used for SubjectToVideo + unique_id: Union[str, None]=None, **kwargs, ): ''' @@ -138,6 +147,8 @@ class MinimaxTextToVideoNode: completed_statuses=["Success"], failed_statuses=["Fail"], status_extractor=lambda x: x.status.value, + estimated_duration=self.AVERAGE_DURATION, + node_id=unique_id, auth_kwargs=kwargs, ) task_result = video_generate_operation.execute() @@ -164,6 +175,12 @@ class MinimaxTextToVideoNode: f"No video was found in the response. Full response: {file_result.model_dump()}" ) logging.info(f"Generated video URL: {file_url}") + if unique_id: + if hasattr(file_result.file, "backup_download_url"): + message = f"Result URL: {file_url}\nBackup URL: {file_result.file.backup_download_url}" + else: + message = f"Result URL: {file_url}" + PromptServer.instance.send_progress_text(message, unique_id) video_io = download_url_to_bytesio(file_url) if video_io is None: @@ -178,6 +195,8 @@ class MinimaxImageToVideoNode(MinimaxTextToVideoNode): Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API. """ + AVERAGE_DURATION = I2V_AVERAGE_DURATION + @classmethod def INPUT_TYPES(s): return { @@ -223,6 +242,7 @@ class MinimaxImageToVideoNode(MinimaxTextToVideoNode): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -239,6 +259,8 @@ class MinimaxSubjectToVideoNode(MinimaxTextToVideoNode): Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API. """ + AVERAGE_DURATION = T2V_AVERAGE_DURATION + @classmethod def INPUT_TYPES(s): return { @@ -282,6 +304,7 @@ class MinimaxSubjectToVideoNode(MinimaxTextToVideoNode): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index c63908be2..ce8054afc 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -96,6 +96,7 @@ class OpenAIDalle2(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -113,6 +114,7 @@ class OpenAIDalle2(ComfyNodeABC): mask=None, n=1, size="1024x1024", + unique_id=None, **kwargs ): validate_string(prompt, strip_whitespace=False) @@ -176,7 +178,7 @@ class OpenAIDalle2(ComfyNodeABC): response = operation.execute() - img_tensor = validate_and_cast_response(response) + img_tensor = validate_and_cast_response(response, node_id=unique_id) return (img_tensor,) @@ -242,6 +244,7 @@ class OpenAIDalle3(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -258,6 +261,7 @@ class OpenAIDalle3(ComfyNodeABC): style="natural", quality="standard", size="1024x1024", + unique_id=None, **kwargs ): validate_string(prompt, strip_whitespace=False) @@ -284,7 +288,7 @@ class OpenAIDalle3(ComfyNodeABC): response = operation.execute() - img_tensor = validate_and_cast_response(response) + img_tensor = validate_and_cast_response(response, node_id=unique_id) return (img_tensor,) @@ -375,6 +379,7 @@ class OpenAIGPTImage1(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -394,6 +399,7 @@ class OpenAIGPTImage1(ComfyNodeABC): mask=None, n=1, size="1024x1024", + unique_id=None, **kwargs ): validate_string(prompt, strip_whitespace=False) @@ -476,7 +482,7 @@ class OpenAIGPTImage1(ComfyNodeABC): response = operation.execute() - img_tensor = validate_and_cast_response(response) + img_tensor = validate_and_cast_response(response, node_id=unique_id) return (img_tensor,) diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py index 08ec9cf07..30562790a 100644 --- a/comfy_api_nodes/nodes_pika.py +++ b/comfy_api_nodes/nodes_pika.py @@ -121,7 +121,10 @@ class PikaNodeBase(ComfyNodeABC): RETURN_TYPES = ("VIDEO",) def poll_for_task_status( - self, task_id: str, auth_kwargs: Optional[dict[str,str]] = None + self, + task_id: str, + auth_kwargs: Optional[dict[str, str]] = None, + node_id: Optional[str] = None, ) -> PikaGenerateResponse: polling_operation = PollingOperation( poll_endpoint=ApiEndpoint( @@ -141,13 +144,19 @@ class PikaNodeBase(ComfyNodeABC): response.progress if hasattr(response, "progress") else None ), auth_kwargs=auth_kwargs, + result_url_extractor=lambda response: ( + response.url if hasattr(response, "url") else None + ), + node_id=node_id, + estimated_duration=60 ) return polling_operation.execute() def execute_task( self, initial_operation: SynchronousOperation[R, PikaGenerateResponse], - auth_kwargs: Optional[dict[str,str]] = None, + auth_kwargs: Optional[dict[str, str]] = None, + node_id: Optional[str] = None, ) -> tuple[VideoFromFile]: """Executes the initial operation then polls for the task status until it is completed. @@ -208,7 +217,8 @@ class PikaImageToVideoV2_2(PikaNodeBase): seed: int, resolution: str, duration: int, - **kwargs + unique_id: str, + **kwargs, ) -> tuple[VideoFromFile]: # Convert image to BytesIO image_bytes_io = tensor_to_bytesio(image) @@ -238,7 +248,7 @@ class PikaImageToVideoV2_2(PikaNodeBase): auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_kwargs=kwargs) + return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) class PikaTextToVideoNodeV2_2(PikaNodeBase): @@ -262,6 +272,7 @@ class PikaTextToVideoNodeV2_2(PikaNodeBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -275,6 +286,7 @@ class PikaTextToVideoNodeV2_2(PikaNodeBase): resolution: str, duration: int, aspect_ratio: float, + unique_id: str, **kwargs, ) -> tuple[VideoFromFile]: initial_operation = SynchronousOperation( @@ -296,7 +308,7 @@ class PikaTextToVideoNodeV2_2(PikaNodeBase): content_type="application/x-www-form-urlencoded", ) - return self.execute_task(initial_operation, auth_kwargs=kwargs) + return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) class PikaScenesV2_2(PikaNodeBase): @@ -340,6 +352,7 @@ class PikaScenesV2_2(PikaNodeBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -354,6 +367,7 @@ class PikaScenesV2_2(PikaNodeBase): duration: int, ingredients_mode: str, aspect_ratio: float, + unique_id: str, image_ingredient_1: Optional[torch.Tensor] = None, image_ingredient_2: Optional[torch.Tensor] = None, image_ingredient_3: Optional[torch.Tensor] = None, @@ -403,7 +417,7 @@ class PikaScenesV2_2(PikaNodeBase): auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_kwargs=kwargs) + return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) class PikAdditionsNode(PikaNodeBase): @@ -439,6 +453,7 @@ class PikAdditionsNode(PikaNodeBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -451,6 +466,7 @@ class PikAdditionsNode(PikaNodeBase): prompt_text: str, negative_prompt: str, seed: int, + unique_id: str, **kwargs, ) -> tuple[VideoFromFile]: # Convert video to BytesIO @@ -487,7 +503,7 @@ class PikAdditionsNode(PikaNodeBase): auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_kwargs=kwargs) + return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) class PikaSwapsNode(PikaNodeBase): @@ -532,6 +548,7 @@ class PikaSwapsNode(PikaNodeBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -546,6 +563,7 @@ class PikaSwapsNode(PikaNodeBase): prompt_text: str, negative_prompt: str, seed: int, + unique_id: str, **kwargs, ) -> tuple[VideoFromFile]: # Convert video to BytesIO @@ -592,7 +610,7 @@ class PikaSwapsNode(PikaNodeBase): auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_kwargs=kwargs) + return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) class PikaffectsNode(PikaNodeBase): @@ -637,6 +655,7 @@ class PikaffectsNode(PikaNodeBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -649,6 +668,7 @@ class PikaffectsNode(PikaNodeBase): prompt_text: str, negative_prompt: str, seed: int, + unique_id: str, **kwargs, ) -> tuple[VideoFromFile]: @@ -670,7 +690,7 @@ class PikaffectsNode(PikaNodeBase): auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_kwargs=kwargs) + return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) class PikaStartEndFrameNode2_2(PikaNodeBase): @@ -689,6 +709,7 @@ class PikaStartEndFrameNode2_2(PikaNodeBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -703,6 +724,7 @@ class PikaStartEndFrameNode2_2(PikaNodeBase): seed: int, resolution: str, duration: int, + unique_id: str, **kwargs, ) -> tuple[VideoFromFile]: @@ -733,7 +755,7 @@ class PikaStartEndFrameNode2_2(PikaNodeBase): auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_kwargs=kwargs) + return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) NODE_CLASS_MAPPINGS = { diff --git a/comfy_api_nodes/nodes_pixverse.py b/comfy_api_nodes/nodes_pixverse.py index 0c29e77c2..ef4a9a802 100644 --- a/comfy_api_nodes/nodes_pixverse.py +++ b/comfy_api_nodes/nodes_pixverse.py @@ -1,5 +1,5 @@ from inspect import cleandoc - +from typing import Optional from comfy_api_nodes.apis.pixverse_api import ( PixverseTextVideoRequest, PixverseImageVideoRequest, @@ -34,11 +34,22 @@ import requests from io import BytesIO +AVERAGE_DURATION_T2V = 32 +AVERAGE_DURATION_I2V = 30 +AVERAGE_DURATION_T2T = 52 + + +def get_video_url_from_response( + response: PixverseGenerationStatusResponse, +) -> Optional[str]: + if response.Resp is None or response.Resp.url is None: + return None + return str(response.Resp.url) + + def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None): # first, upload image to Pixverse and get image id to use in actual generation call - files = { - "image": tensor_to_bytesio(image) - } + files = {"image": tensor_to_bytesio(image)} operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/pixverse/image/upload", @@ -54,7 +65,9 @@ def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None): response_upload: PixverseImageUploadResponse = operation.execute() if response_upload.Resp is None: - raise Exception(f"PixVerse image upload request failed: '{response_upload.ErrMsg}'") + raise Exception( + f"PixVerse image upload request failed: '{response_upload.ErrMsg}'" + ) return response_upload.Resp.img_id @@ -73,7 +86,7 @@ class PixverseTemplateNode: def INPUT_TYPES(s): return { "required": { - "template": (list(pixverse_templates.keys()), ), + "template": (list(pixverse_templates.keys()),), } } @@ -87,7 +100,7 @@ class PixverseTemplateNode: class PixverseTextToVideoNode(ComfyNodeABC): """ - Generates videos synchronously based on prompt and output_size. + Generates videos based on prompt and output_size. """ RETURN_TYPES = (IO.VIDEO,) @@ -108,9 +121,7 @@ class PixverseTextToVideoNode(ComfyNodeABC): "tooltip": "Prompt for the video generation", }, ), - "aspect_ratio": ( - [ratio.value for ratio in PixverseAspectRatio], - ), + "aspect_ratio": ([ratio.value for ratio in PixverseAspectRatio],), "quality": ( [resolution.value for resolution in PixverseQuality], { @@ -143,12 +154,13 @@ class PixverseTextToVideoNode(ComfyNodeABC): PixverseIO.TEMPLATE, { "tooltip": "An optional template to influence style of generation, created by the PixVerse Template node." - } - ) + }, + ), }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -160,8 +172,9 @@ class PixverseTextToVideoNode(ComfyNodeABC): duration_seconds: int, motion_mode: str, seed, - negative_prompt: str=None, - pixverse_template: int=None, + negative_prompt: str = None, + pixverse_template: int = None, + unique_id: Optional[str] = None, **kwargs, ): validate_string(prompt, strip_whitespace=False) @@ -205,19 +218,27 @@ class PixverseTextToVideoNode(ComfyNodeABC): response_model=PixverseGenerationStatusResponse, ), completed_statuses=[PixverseStatus.successful], - failed_statuses=[PixverseStatus.contents_moderation, PixverseStatus.failed, PixverseStatus.deleted], + failed_statuses=[ + PixverseStatus.contents_moderation, + PixverseStatus.failed, + PixverseStatus.deleted, + ], status_extractor=lambda x: x.Resp.status, auth_kwargs=kwargs, + node_id=unique_id, + result_url_extractor=get_video_url_from_response, + estimated_duration=AVERAGE_DURATION_T2V, ) response_poll = operation.execute() vid_response = requests.get(response_poll.Resp.url) + return (VideoFromFile(BytesIO(vid_response.content)),) class PixverseImageToVideoNode(ComfyNodeABC): """ - Generates videos synchronously based on prompt and output_size. + Generates videos based on prompt and output_size. """ RETURN_TYPES = (IO.VIDEO,) @@ -230,9 +251,7 @@ class PixverseImageToVideoNode(ComfyNodeABC): def INPUT_TYPES(s): return { "required": { - "image": ( - IO.IMAGE, - ), + "image": (IO.IMAGE,), "prompt": ( IO.STRING, { @@ -273,12 +292,13 @@ class PixverseImageToVideoNode(ComfyNodeABC): PixverseIO.TEMPLATE, { "tooltip": "An optional template to influence style of generation, created by the PixVerse Template node." - } - ) + }, + ), }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -290,8 +310,9 @@ class PixverseImageToVideoNode(ComfyNodeABC): duration_seconds: int, motion_mode: str, seed, - negative_prompt: str=None, - pixverse_template: int=None, + negative_prompt: str = None, + pixverse_template: int = None, + unique_id: Optional[str] = None, **kwargs, ): validate_string(prompt, strip_whitespace=False) @@ -337,9 +358,16 @@ class PixverseImageToVideoNode(ComfyNodeABC): response_model=PixverseGenerationStatusResponse, ), completed_statuses=[PixverseStatus.successful], - failed_statuses=[PixverseStatus.contents_moderation, PixverseStatus.failed, PixverseStatus.deleted], + failed_statuses=[ + PixverseStatus.contents_moderation, + PixverseStatus.failed, + PixverseStatus.deleted, + ], status_extractor=lambda x: x.Resp.status, auth_kwargs=kwargs, + node_id=unique_id, + result_url_extractor=get_video_url_from_response, + estimated_duration=AVERAGE_DURATION_I2V, ) response_poll = operation.execute() @@ -349,7 +377,7 @@ class PixverseImageToVideoNode(ComfyNodeABC): class PixverseTransitionVideoNode(ComfyNodeABC): """ - Generates videos synchronously based on prompt and output_size. + Generates videos based on prompt and output_size. """ RETURN_TYPES = (IO.VIDEO,) @@ -362,12 +390,8 @@ class PixverseTransitionVideoNode(ComfyNodeABC): def INPUT_TYPES(s): return { "required": { - "first_frame": ( - IO.IMAGE, - ), - "last_frame": ( - IO.IMAGE, - ), + "first_frame": (IO.IMAGE,), + "last_frame": (IO.IMAGE,), "prompt": ( IO.STRING, { @@ -408,6 +432,7 @@ class PixverseTransitionVideoNode(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -420,7 +445,8 @@ class PixverseTransitionVideoNode(ComfyNodeABC): duration_seconds: int, motion_mode: str, seed, - negative_prompt: str=None, + negative_prompt: str = None, + unique_id: Optional[str] = None, **kwargs, ): validate_string(prompt, strip_whitespace=False) @@ -467,9 +493,16 @@ class PixverseTransitionVideoNode(ComfyNodeABC): response_model=PixverseGenerationStatusResponse, ), completed_statuses=[PixverseStatus.successful], - failed_statuses=[PixverseStatus.contents_moderation, PixverseStatus.failed, PixverseStatus.deleted], + failed_statuses=[ + PixverseStatus.contents_moderation, + PixverseStatus.failed, + PixverseStatus.deleted, + ], status_extractor=lambda x: x.Resp.status, auth_kwargs=kwargs, + node_id=unique_id, + result_url_extractor=get_video_url_from_response, + estimated_duration=AVERAGE_DURATION_T2V, ) response_poll = operation.execute() diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py index 767d93e3c..e369c4b7e 100644 --- a/comfy_api_nodes/nodes_recraft.py +++ b/comfy_api_nodes/nodes_recraft.py @@ -1,5 +1,6 @@ from __future__ import annotations from inspect import cleandoc +from typing import Optional from comfy.utils import ProgressBar from comfy_extras.nodes_images import SVG # Added from comfy.comfy_types.node_typing import IO @@ -29,6 +30,8 @@ from comfy_api_nodes.apinode_utils import ( resize_mask_to_image, validate_string, ) +from server import PromptServer + import torch from io import BytesIO from PIL import UnidentifiedImageError @@ -388,6 +391,7 @@ class RecraftTextToImageNode: "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -400,6 +404,7 @@ class RecraftTextToImageNode: recraft_style: RecraftStyle = None, negative_prompt: str = None, recraft_controls: RecraftControls = None, + unique_id: Optional[str] = None, **kwargs, ): validate_string(prompt, strip_whitespace=False, max_length=1000) @@ -436,8 +441,15 @@ class RecraftTextToImageNode: ) response: RecraftImageGenerationResponse = operation.execute() images = [] + urls = [] for data in response.data: with handle_recraft_image_output(): + if unique_id and data.url: + urls.append(data.url) + urls_string = '\n'.join(urls) + PromptServer.instance.send_progress_text( + f"Result URL: {urls_string}", unique_id + ) image = bytesio_to_image_tensor( download_url_to_bytesio(data.url, timeout=1024) ) @@ -763,6 +775,7 @@ class RecraftTextToVectorNode: "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -775,6 +788,7 @@ class RecraftTextToVectorNode: seed, negative_prompt: str = None, recraft_controls: RecraftControls = None, + unique_id: Optional[str] = None, **kwargs, ): validate_string(prompt, strip_whitespace=False, max_length=1000) @@ -809,7 +823,14 @@ class RecraftTextToVectorNode: ) response: RecraftImageGenerationResponse = operation.execute() svg_data = [] + urls = [] for data in response.data: + if unique_id and data.url: + urls.append(data.url) + # Print result on each iteration in case of error + PromptServer.instance.send_progress_text( + f"Result URL: {' '.join(urls)}", unique_id + ) svg_data.append(download_url_to_bytesio(data.url, timeout=1024)) return (SVG(svg_data),) diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index 2740179c8..df846d5dd 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -3,6 +3,7 @@ import logging import base64 import requests import torch +from typing import Optional from comfy.comfy_types.node_typing import IO, ComfyNodeABC from comfy_api.input_impl.video_types import VideoFromFile @@ -24,6 +25,8 @@ from comfy_api_nodes.apinode_utils import ( tensor_to_base64_string ) +AVERAGE_DURATION_VIDEO_GEN = 32 + def convert_image_to_base64(image: torch.Tensor): if image is None: return None @@ -31,6 +34,22 @@ def convert_image_to_base64(image: torch.Tensor): scaled_image = downscale_image_tensor(image, total_pixels=2048*2048) return tensor_to_base64_string(scaled_image) + +def get_video_url_from_response(poll_response: Veo2GenVidPollResponse) -> Optional[str]: + if ( + poll_response.response + and hasattr(poll_response.response, "videos") + and poll_response.response.videos + and len(poll_response.response.videos) > 0 + ): + video = poll_response.response.videos[0] + else: + return None + if hasattr(video, "gcsUri") and video.gcsUri: + return str(video.gcsUri) + return None + + class VeoVideoGenerationNode(ComfyNodeABC): """ Generates videos from text prompts using Google's Veo API. @@ -115,6 +134,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -134,6 +154,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): person_generation="ALLOW", seed=0, image=None, + unique_id: Optional[str] = None, **kwargs, ): # Prepare the instances for the request @@ -215,7 +236,10 @@ class VeoVideoGenerationNode(ComfyNodeABC): operationName=operation_name ), auth_kwargs=kwargs, - poll_interval=5.0 + poll_interval=5.0, + result_url_extractor=get_video_url_from_response, + node_id=unique_id, + estimated_duration=AVERAGE_DURATION_VIDEO_GEN, ) # Execute the polling operation From f3ff5c40db3b68449b47112591eef31094cffe70 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Tue, 13 May 2025 22:28:30 -0700 Subject: [PATCH 0142/1073] don't retry if API returns task failure (#8111) --- comfy_api_nodes/apis/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 838ff1e8d..62866216f 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -1105,7 +1105,7 @@ class PollingOperation(Generic[T, R]): except Exception as e: # For other errors, increment count and potentially abort consecutive_errors += 1 - if consecutive_errors >= max_consecutive_errors: + if consecutive_errors >= max_consecutive_errors or status == TaskStatus.FAILED: raise Exception( f"Polling aborted after {consecutive_errors} consecutive errors: {str(e)}" ) from e From 08368f8e00c07d6888907557c5ef1da7a64b5e42 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 14 May 2025 14:54:50 -0700 Subject: [PATCH 0143/1073] Update comment on ROCm pytorch attention in README. (#8123) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index deee70c6b..dcdaa353c 100644 --- a/README.md +++ b/README.md @@ -302,7 +302,7 @@ For AMD 7600 and maybe other RDNA3 cards: ```HSA_OVERRIDE_GFX_VERSION=11.0.0 pyt ### AMD ROCm Tips -You can enable experimental memory efficient attention on pytorch 2.5 in ComfyUI on RDNA3 and potentially other AMD GPUs using this command: +You can enable experimental memory efficient attention on recent pytorch in ComfyUI on some AMD GPUs using this command, it should already be enabled by default on RDNA3. If this improves speed for you on latest pytorch on your GPU please report it so that I can enable it by default. ```TORCH_ROCM_AOTRITON_ENABLE_EXPERIMENTAL=1 python main.py --use-pytorch-cross-attention``` From f1f9763b4c0e7c361ec2808b205ad32297c4777b Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Wed, 14 May 2025 21:11:41 -0700 Subject: [PATCH 0144/1073] Add `get_duration` method to Comfy VIDEO type (#8122) * get duration from VIDEO type * video get_duration unit test * fix Windows unit test: can't delete opened temp file --- comfy_api/input/video_types.py | 10 + comfy_api/input_impl/video_types.py | 32 +++ tests-unit/comfy_api_test/video_types_test.py | 239 ++++++++++++++++++ 3 files changed, 281 insertions(+) create mode 100644 tests-unit/comfy_api_test/video_types_test.py diff --git a/comfy_api/input/video_types.py b/comfy_api/input/video_types.py index 0676e0e66..dc22d34ff 100644 --- a/comfy_api/input/video_types.py +++ b/comfy_api/input/video_types.py @@ -43,3 +43,13 @@ class VideoInput(ABC): components = self.get_components() return components.images.shape[2], components.images.shape[1] + def get_duration(self) -> float: + """ + Returns the duration of the video in seconds. + + Returns: + Duration in seconds + """ + components = self.get_components() + frame_count = components.images.shape[0] + return float(frame_count / components.frame_rate) diff --git a/comfy_api/input_impl/video_types.py b/comfy_api/input_impl/video_types.py index ae48dbaa4..197f6558c 100644 --- a/comfy_api/input_impl/video_types.py +++ b/comfy_api/input_impl/video_types.py @@ -80,6 +80,38 @@ class VideoFromFile(VideoInput): return stream.width, stream.height raise ValueError(f"No video stream found in file '{self.__file}'") + def get_duration(self) -> float: + """ + Returns the duration of the video in seconds. + + Returns: + Duration in seconds + """ + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) + with av.open(self.__file, mode="r") as container: + if container.duration is not None: + return float(container.duration / av.time_base) + + # Fallback: calculate from frame count and frame rate + video_stream = next( + (s for s in container.streams if s.type == "video"), None + ) + if video_stream and video_stream.frames and video_stream.average_rate: + return float(video_stream.frames / video_stream.average_rate) + + # Last resort: decode frames to count them + if video_stream and video_stream.average_rate: + frame_count = 0 + container.seek(0) + for packet in container.demux(video_stream): + for _ in packet.decode(): + frame_count += 1 + if frame_count > 0: + return float(frame_count / video_stream.average_rate) + + raise ValueError(f"Could not determine duration for file '{self.__file}'") + def get_components_internal(self, container: InputContainer) -> VideoComponents: # Get video frames frames = [] diff --git a/tests-unit/comfy_api_test/video_types_test.py b/tests-unit/comfy_api_test/video_types_test.py new file mode 100644 index 000000000..b25fcb1ca --- /dev/null +++ b/tests-unit/comfy_api_test/video_types_test.py @@ -0,0 +1,239 @@ +import pytest +import torch +import tempfile +import os +import av +import io +from fractions import Fraction +from comfy_api.input_impl.video_types import VideoFromFile, VideoFromComponents +from comfy_api.util.video_types import VideoComponents +from comfy_api.input.basic_types import AudioInput +from av.error import InvalidDataError + +EPSILON = 0.0001 + + +@pytest.fixture +def sample_images(): + """3-frame 2x2 RGB video tensor""" + return torch.rand(3, 2, 2, 3) + + +@pytest.fixture +def sample_audio(): + """Stereo audio with 44.1kHz sample rate""" + return AudioInput( + { + "waveform": torch.rand(1, 2, 1000), + "sample_rate": 44100, + } + ) + + +@pytest.fixture +def video_components(sample_images, sample_audio): + """VideoComponents with images, audio, and metadata""" + return VideoComponents( + images=sample_images, + audio=sample_audio, + frame_rate=Fraction(30), + metadata={"test": "metadata"}, + ) + + +def create_test_video(width=4, height=4, frames=3, fps=30): + """Helper to create a temporary video file""" + tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) + with av.open(tmp.name, mode="w") as container: + stream = container.add_stream("h264", rate=fps) + stream.width = width + stream.height = height + stream.pix_fmt = "yuv420p" + + for i in range(frames): + frame = av.VideoFrame.from_ndarray( + torch.ones(height, width, 3, dtype=torch.uint8).numpy() * (i * 85), + format="rgb24", + ) + frame = frame.reformat(format="yuv420p") + packet = stream.encode(frame) + container.mux(packet) + + # Flush + packet = stream.encode(None) + container.mux(packet) + + return tmp.name + + +@pytest.fixture +def simple_video_file(): + """4x4 video with 3 frames at 30fps""" + file_path = create_test_video() + yield file_path + os.unlink(file_path) + + +def test_video_from_components_get_duration(video_components): + """Duration calculated correctly from frame count and frame rate""" + video = VideoFromComponents(video_components) + duration = video.get_duration() + + expected_duration = 3.0 / 30.0 + assert duration == pytest.approx(expected_duration) + + +def test_video_from_components_get_duration_different_frame_rates(sample_images): + """Duration correct for different frame rates including fractional""" + # Test with 60 fps + components_60fps = VideoComponents(images=sample_images, frame_rate=Fraction(60)) + video_60fps = VideoFromComponents(components_60fps) + assert video_60fps.get_duration() == pytest.approx(3.0 / 60.0) + + # Test with fractional frame rate (23.976fps) + components_frac = VideoComponents( + images=sample_images, frame_rate=Fraction(24000, 1001) + ) + video_frac = VideoFromComponents(components_frac) + expected_frac = 3.0 / (24000.0 / 1001.0) + assert video_frac.get_duration() == pytest.approx(expected_frac) + + +def test_video_from_components_get_duration_empty_video(): + """Duration is zero for empty video""" + empty_components = VideoComponents( + images=torch.zeros(0, 2, 2, 3), frame_rate=Fraction(30) + ) + video = VideoFromComponents(empty_components) + assert video.get_duration() == 0.0 + + +def test_video_from_components_get_dimensions(video_components): + """Dimensions returned correctly from image tensor shape""" + video = VideoFromComponents(video_components) + width, height = video.get_dimensions() + assert width == 2 + assert height == 2 + + +def test_video_from_file_get_duration(simple_video_file): + """Duration extracted from file metadata""" + video = VideoFromFile(simple_video_file) + duration = video.get_duration() + assert duration == pytest.approx(0.1, abs=0.01) + + +def test_video_from_file_get_dimensions(simple_video_file): + """Dimensions read from stream without decoding frames""" + video = VideoFromFile(simple_video_file) + width, height = video.get_dimensions() + assert width == 4 + assert height == 4 + + +def test_video_from_file_bytesio_input(): + """VideoFromFile works with BytesIO input""" + buffer = io.BytesIO() + with av.open(buffer, mode="w", format="mp4") as container: + stream = container.add_stream("h264", rate=30) + stream.width = 2 + stream.height = 2 + stream.pix_fmt = "yuv420p" + + frame = av.VideoFrame.from_ndarray( + torch.zeros(2, 2, 3, dtype=torch.uint8).numpy(), format="rgb24" + ) + frame = frame.reformat(format="yuv420p") + packet = stream.encode(frame) + container.mux(packet) + packet = stream.encode(None) + container.mux(packet) + + buffer.seek(0) + video = VideoFromFile(buffer) + + assert video.get_dimensions() == (2, 2) + assert video.get_duration() == pytest.approx(1 / 30, abs=0.01) + + +def test_video_from_file_invalid_file_error(): + """InvalidDataError raised for non-video files""" + with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as tmp: + tmp.write(b"not a video file") + tmp.flush() + tmp_name = tmp.name + + try: + with pytest.raises(InvalidDataError): + video = VideoFromFile(tmp_name) + video.get_dimensions() + finally: + os.unlink(tmp_name) + + +def test_video_from_file_audio_only_error(): + """ValueError raised for audio-only files""" + with tempfile.NamedTemporaryFile(suffix=".m4a", delete=False) as tmp: + tmp_name = tmp.name + + try: + with av.open(tmp_name, mode="w") as container: + stream = container.add_stream("aac", rate=44100) + stream.sample_rate = 44100 + stream.format = "fltp" + + audio_data = torch.zeros(1, 1024).numpy() + audio_frame = av.AudioFrame.from_ndarray( + audio_data, format="fltp", layout="mono" + ) + audio_frame.sample_rate = 44100 + audio_frame.pts = 0 + packet = stream.encode(audio_frame) + container.mux(packet) + + for packet in stream.encode(None): + container.mux(packet) + + with pytest.raises(ValueError, match="No video stream found"): + video = VideoFromFile(tmp_name) + video.get_dimensions() + finally: + os.unlink(tmp_name) + + +def test_single_frame_video(): + """Single frame video has correct duration""" + components = VideoComponents( + images=torch.rand(1, 10, 10, 3), frame_rate=Fraction(1) + ) + video = VideoFromComponents(components) + assert video.get_duration() == 1.0 + + +@pytest.mark.parametrize( + "frame_rate,expected_fps", + [ + (Fraction(24000, 1001), 24000 / 1001), + (Fraction(30000, 1001), 30000 / 1001), + (Fraction(25, 1), 25.0), + (Fraction(50, 2), 25.0), + ], +) +def test_fractional_frame_rates(frame_rate, expected_fps): + """Duration calculated correctly for various fractional frame rates""" + components = VideoComponents(images=torch.rand(100, 4, 4, 3), frame_rate=frame_rate) + video = VideoFromComponents(components) + duration = video.get_duration() + expected_duration = 100.0 / expected_fps + assert duration == pytest.approx(expected_duration) + + +def test_duration_consistency(video_components): + """get_duration() consistent with manual calculation from components""" + video = VideoFromComponents(video_components) + + duration = video.get_duration() + components = video.get_components() + manual_duration = float(components.images.shape[0] / components.frame_rate) + + assert duration == pytest.approx(manual_duration) From 6a2e4bb9e00c1ef467000d880abbbaf284c26d4a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 15 May 2025 05:21:47 -0700 Subject: [PATCH 0145/1073] Remove old hack used to fix windows pytorch 2.4 on the portable. (#8139) Not necessary anymore. --- fix_torch.py | 28 ---------------------------- main.py | 7 ------- 2 files changed, 35 deletions(-) delete mode 100644 fix_torch.py diff --git a/fix_torch.py b/fix_torch.py deleted file mode 100644 index ce117b639..000000000 --- a/fix_torch.py +++ /dev/null @@ -1,28 +0,0 @@ -import importlib.util -import shutil -import os -import ctypes -import logging - - -def fix_pytorch_libomp(): - """ - Fix PyTorch libomp DLL issue on Windows by copying the correct DLL file if needed. - """ - torch_spec = importlib.util.find_spec("torch") - for folder in torch_spec.submodule_search_locations: - lib_folder = os.path.join(folder, "lib") - test_file = os.path.join(lib_folder, "fbgemm.dll") - dest = os.path.join(lib_folder, "libomp140.x86_64.dll") - if os.path.exists(dest): - break - - with open(test_file, "rb") as f: - contents = f.read() - if b"libomp140.x86_64.dll" not in contents: - break - try: - ctypes.cdll.LoadLibrary(test_file) - except FileNotFoundError: - logging.warning("Detected pytorch version with libomp issue, patching.") - shutil.copyfile(os.path.join(lib_folder, "libiomp5md.dll"), dest) diff --git a/main.py b/main.py index 221e48e41..0fde6d221 100644 --- a/main.py +++ b/main.py @@ -125,13 +125,6 @@ if __name__ == "__main__": import cuda_malloc -if args.windows_standalone_build: - try: - from fix_torch import fix_pytorch_libomp - fix_pytorch_libomp() - except: - pass - import comfy.utils import execution From c820ef950d10a6b4e4fa8ab28bc09274d563b13c Mon Sep 17 00:00:00 2001 From: George0726 <38740075+George0726@users.noreply.github.com> Date: Thu, 15 May 2025 19:00:43 -0400 Subject: [PATCH 0146/1073] Add Wan-FUN Camera Control models and Add WanCameraImageToVideo node (#8013) * support wan camera models * fix by ruff check * change camera_condition type; make camera_condition optional * support camera trajectory nodes * fix camera direction --------- Co-authored-by: Qirui Sun --- comfy/ldm/wan/model.py | 143 ++++++++++++++++ comfy/model_base.py | 11 ++ comfy/supported_models.py | 12 +- comfy_extras/nodes_camera_trajectory.py | 218 ++++++++++++++++++++++++ comfy_extras/nodes_wan.py | 47 +++++ nodes.py | 1 + 6 files changed, 431 insertions(+), 1 deletion(-) create mode 100644 comfy_extras/nodes_camera_trajectory.py diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index fc5ff40c5..a996dedf4 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -247,6 +247,60 @@ class VaceWanAttentionBlock(WanAttentionBlock): return c_skip, c +class WanCamAdapter(nn.Module): + def __init__(self, in_dim, out_dim, kernel_size, stride, num_residual_blocks=1, operation_settings={}): + super(WanCamAdapter, self).__init__() + + # Pixel Unshuffle: reduce spatial dimensions by a factor of 8 + self.pixel_unshuffle = nn.PixelUnshuffle(downscale_factor=8) + + # Convolution: reduce spatial dimensions by a factor + # of 2 (without overlap) + self.conv = operation_settings.get("operations").Conv2d(in_dim * 64, out_dim, kernel_size=kernel_size, stride=stride, padding=0, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + + # Residual blocks for feature extraction + self.residual_blocks = nn.Sequential( + *[WanCamResidualBlock(out_dim, operation_settings = operation_settings) for _ in range(num_residual_blocks)] + ) + + def forward(self, x): + # Reshape to merge the frame dimension into batch + bs, c, f, h, w = x.size() + x = x.permute(0, 2, 1, 3, 4).contiguous().view(bs * f, c, h, w) + + # Pixel Unshuffle operation + x_unshuffled = self.pixel_unshuffle(x) + + # Convolution operation + x_conv = self.conv(x_unshuffled) + + # Feature extraction with residual blocks + out = self.residual_blocks(x_conv) + + # Reshape to restore original bf dimension + out = out.view(bs, f, out.size(1), out.size(2), out.size(3)) + + # Permute dimensions to reorder (if needed), e.g., swap channels and feature frames + out = out.permute(0, 2, 1, 3, 4) + + return out + + +class WanCamResidualBlock(nn.Module): + def __init__(self, dim, operation_settings={}): + super(WanCamResidualBlock, self).__init__() + self.conv1 = operation_settings.get("operations").Conv2d(dim, dim, kernel_size=3, padding=1, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.relu = nn.ReLU(inplace=True) + self.conv2 = operation_settings.get("operations").Conv2d(dim, dim, kernel_size=3, padding=1, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + + def forward(self, x): + residual = x + out = self.relu(self.conv1(x)) + out = self.conv2(out) + out += residual + return out + + class Head(nn.Module): def __init__(self, dim, out_dim, patch_size, eps=1e-6, operation_settings={}): @@ -637,3 +691,92 @@ class VaceWanModel(WanModel): # unpatchify x = self.unpatchify(x, grid_sizes) return x + +class CameraWanModel(WanModel): + r""" + Wan diffusion backbone supporting both text-to-video and image-to-video. + """ + + def __init__(self, + model_type='camera', + patch_size=(1, 2, 2), + text_len=512, + in_dim=16, + dim=2048, + ffn_dim=8192, + freq_dim=256, + text_dim=4096, + out_dim=16, + num_heads=16, + num_layers=32, + window_size=(-1, -1), + qk_norm=True, + cross_attn_norm=True, + eps=1e-6, + flf_pos_embed_token_number=None, + image_model=None, + in_dim_control_adapter=24, + device=None, + dtype=None, + operations=None, + ): + + super().__init__(model_type='i2v', patch_size=patch_size, text_len=text_len, in_dim=in_dim, dim=dim, ffn_dim=ffn_dim, freq_dim=freq_dim, text_dim=text_dim, out_dim=out_dim, num_heads=num_heads, num_layers=num_layers, window_size=window_size, qk_norm=qk_norm, cross_attn_norm=cross_attn_norm, eps=eps, flf_pos_embed_token_number=flf_pos_embed_token_number, image_model=image_model, device=device, dtype=dtype, operations=operations) + operation_settings = {"operations": operations, "device": device, "dtype": dtype} + + self.control_adapter = WanCamAdapter(in_dim_control_adapter, dim, kernel_size=patch_size[1:], stride=patch_size[1:], operation_settings=operation_settings) + + + def forward_orig( + self, + x, + t, + context, + clip_fea=None, + freqs=None, + camera_conditions = None, + transformer_options={}, + **kwargs, + ): + # embeddings + x = self.patch_embedding(x.float()).to(x.dtype) + if self.control_adapter is not None and camera_conditions is not None: + x_camera = self.control_adapter(camera_conditions).to(x.dtype) + x = x + x_camera + grid_sizes = x.shape[2:] + x = x.flatten(2).transpose(1, 2) + + # time embeddings + e = self.time_embedding( + sinusoidal_embedding_1d(self.freq_dim, t).to(dtype=x[0].dtype)) + e0 = self.time_projection(e).unflatten(1, (6, self.dim)) + + # context + context = self.text_embedding(context) + + context_img_len = None + if clip_fea is not None: + if self.img_emb is not None: + context_clip = self.img_emb(clip_fea) # bs x 257 x dim + context = torch.concat([context_clip, context], dim=1) + context_img_len = clip_fea.shape[-2] + + patches_replace = transformer_options.get("patches_replace", {}) + blocks_replace = patches_replace.get("dit", {}) + for i, block in enumerate(self.blocks): + if ("double_block", i) in blocks_replace: + def block_wrap(args): + out = {} + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len) + return out + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs}, {"original_block": block_wrap}) + x = out["img"] + else: + x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) + + # head + x = self.head(x, e) + + # unpatchify + x = self.unpatchify(x, grid_sizes) + return x diff --git a/comfy/model_base.py b/comfy/model_base.py index 047861593..f475e837e 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1079,6 +1079,17 @@ class WAN21_Vace(WAN21): out['vace_strength'] = comfy.conds.CONDConstant(vace_strength) return out +class WAN21_Camera(WAN21): + def __init__(self, model_config, model_type=ModelType.FLOW, image_to_video=False, device=None): + super(WAN21, self).__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model.CameraWanModel) + self.image_to_video = image_to_video + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + camera_conditions = kwargs.get("camera_conditions", None) + if camera_conditions is not None: + out['camera_conditions'] = comfy.conds.CONDRegular(camera_conditions) + return out class Hunyuan3Dv2(BaseModel): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): diff --git a/comfy/supported_models.py b/comfy/supported_models.py index fef25eb24..667393ac0 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -992,6 +992,16 @@ class WAN21_FunControl2V(WAN21_T2V): out = model_base.WAN21(self, image_to_video=False, device=device) return out +class WAN21_Camera(WAN21_T2V): + unet_config = { + "image_model": "wan2.1", + "model_type": "i2v", + "in_dim": 32, + } + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.WAN21_Camera(self, image_to_video=False, device=device) + return out class WAN21_Vace(WAN21_T2V): unet_config = { "image_model": "wan2.1", @@ -1129,6 +1139,6 @@ class ACEStep(supported_models_base.BASE): def clip_target(self, state_dict={}): return supported_models_base.ClipTarget(comfy.text_encoders.ace.AceT5Tokenizer, comfy.text_encoders.ace.AceT5Model) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep] models += [SVD_img2vid] diff --git a/comfy_extras/nodes_camera_trajectory.py b/comfy_extras/nodes_camera_trajectory.py new file mode 100644 index 000000000..b84b4785c --- /dev/null +++ b/comfy_extras/nodes_camera_trajectory.py @@ -0,0 +1,218 @@ +import nodes +import torch +import numpy as np +from einops import rearrange +import comfy.model_management + + + +MAX_RESOLUTION = nodes.MAX_RESOLUTION + +CAMERA_DICT = { + "base_T_norm": 1.5, + "base_angle": np.pi/3, + "Static": { "angle":[0., 0., 0.], "T":[0., 0., 0.]}, + "Pan Up": { "angle":[0., 0., 0.], "T":[0., -1., 0.]}, + "Pan Down": { "angle":[0., 0., 0.], "T":[0.,1.,0.]}, + "Pan Left": { "angle":[0., 0., 0.], "T":[-1.,0.,0.]}, + "Pan Right": { "angle":[0., 0., 0.], "T": [1.,0.,0.]}, + "Zoom In": { "angle":[0., 0., 0.], "T": [0.,0.,2.]}, + "Zoom Out": { "angle":[0., 0., 0.], "T": [0.,0.,-2.]}, + "Anti Clockwise (ACW)": { "angle": [0., 0., -1.], "T":[0., 0., 0.]}, + "ClockWise (CW)": { "angle": [0., 0., 1.], "T":[0., 0., 0.]}, +} + + +def process_pose_params(cam_params, width=672, height=384, original_pose_width=1280, original_pose_height=720, device='cpu'): + + def get_relative_pose(cam_params): + """Copied from https://github.com/hehao13/CameraCtrl/blob/main/inference.py + """ + abs_w2cs = [cam_param.w2c_mat for cam_param in cam_params] + abs_c2ws = [cam_param.c2w_mat for cam_param in cam_params] + cam_to_origin = 0 + target_cam_c2w = np.array([ + [1, 0, 0, 0], + [0, 1, 0, -cam_to_origin], + [0, 0, 1, 0], + [0, 0, 0, 1] + ]) + abs2rel = target_cam_c2w @ abs_w2cs[0] + ret_poses = [target_cam_c2w, ] + [abs2rel @ abs_c2w for abs_c2w in abs_c2ws[1:]] + ret_poses = np.array(ret_poses, dtype=np.float32) + return ret_poses + + """Modified from https://github.com/hehao13/CameraCtrl/blob/main/inference.py + """ + cam_params = [Camera(cam_param) for cam_param in cam_params] + + sample_wh_ratio = width / height + pose_wh_ratio = original_pose_width / original_pose_height # Assuming placeholder ratios, change as needed + + if pose_wh_ratio > sample_wh_ratio: + resized_ori_w = height * pose_wh_ratio + for cam_param in cam_params: + cam_param.fx = resized_ori_w * cam_param.fx / width + else: + resized_ori_h = width / pose_wh_ratio + for cam_param in cam_params: + cam_param.fy = resized_ori_h * cam_param.fy / height + + intrinsic = np.asarray([[cam_param.fx * width, + cam_param.fy * height, + cam_param.cx * width, + cam_param.cy * height] + for cam_param in cam_params], dtype=np.float32) + + K = torch.as_tensor(intrinsic)[None] # [1, 1, 4] + c2ws = get_relative_pose(cam_params) # Assuming this function is defined elsewhere + c2ws = torch.as_tensor(c2ws)[None] # [1, n_frame, 4, 4] + plucker_embedding = ray_condition(K, c2ws, height, width, device=device)[0].permute(0, 3, 1, 2).contiguous() # V, 6, H, W + plucker_embedding = plucker_embedding[None] + plucker_embedding = rearrange(plucker_embedding, "b f c h w -> b f h w c")[0] + return plucker_embedding + +class Camera(object): + """Copied from https://github.com/hehao13/CameraCtrl/blob/main/inference.py + """ + def __init__(self, entry): + fx, fy, cx, cy = entry[1:5] + self.fx = fx + self.fy = fy + self.cx = cx + self.cy = cy + c2w_mat = np.array(entry[7:]).reshape(4, 4) + self.c2w_mat = c2w_mat + self.w2c_mat = np.linalg.inv(c2w_mat) + +def ray_condition(K, c2w, H, W, device): + """Copied from https://github.com/hehao13/CameraCtrl/blob/main/inference.py + """ + # c2w: B, V, 4, 4 + # K: B, V, 4 + + B = K.shape[0] + + j, i = torch.meshgrid( + torch.linspace(0, H - 1, H, device=device, dtype=c2w.dtype), + torch.linspace(0, W - 1, W, device=device, dtype=c2w.dtype), + indexing='ij' + ) + i = i.reshape([1, 1, H * W]).expand([B, 1, H * W]) + 0.5 # [B, HxW] + j = j.reshape([1, 1, H * W]).expand([B, 1, H * W]) + 0.5 # [B, HxW] + + fx, fy, cx, cy = K.chunk(4, dim=-1) # B,V, 1 + + zs = torch.ones_like(i) # [B, HxW] + xs = (i - cx) / fx * zs + ys = (j - cy) / fy * zs + zs = zs.expand_as(ys) + + directions = torch.stack((xs, ys, zs), dim=-1) # B, V, HW, 3 + directions = directions / directions.norm(dim=-1, keepdim=True) # B, V, HW, 3 + + rays_d = directions @ c2w[..., :3, :3].transpose(-1, -2) # B, V, 3, HW + rays_o = c2w[..., :3, 3] # B, V, 3 + rays_o = rays_o[:, :, None].expand_as(rays_d) # B, V, 3, HW + # c2w @ dirctions + rays_dxo = torch.cross(rays_o, rays_d) + plucker = torch.cat([rays_dxo, rays_d], dim=-1) + plucker = plucker.reshape(B, c2w.shape[1], H, W, 6) # B, V, H, W, 6 + # plucker = plucker.permute(0, 1, 4, 2, 3) + return plucker + +def get_camera_motion(angle, T, speed, n=81): + def compute_R_form_rad_angle(angles): + theta_x, theta_y, theta_z = angles + Rx = np.array([[1, 0, 0], + [0, np.cos(theta_x), -np.sin(theta_x)], + [0, np.sin(theta_x), np.cos(theta_x)]]) + + Ry = np.array([[np.cos(theta_y), 0, np.sin(theta_y)], + [0, 1, 0], + [-np.sin(theta_y), 0, np.cos(theta_y)]]) + + Rz = np.array([[np.cos(theta_z), -np.sin(theta_z), 0], + [np.sin(theta_z), np.cos(theta_z), 0], + [0, 0, 1]]) + + R = np.dot(Rz, np.dot(Ry, Rx)) + return R + RT = [] + for i in range(n): + _angle = (i/n)*speed*(CAMERA_DICT["base_angle"])*angle + R = compute_R_form_rad_angle(_angle) + _T=(i/n)*speed*(CAMERA_DICT["base_T_norm"])*(T.reshape(3,1)) + _RT = np.concatenate([R,_T], axis=1) + RT.append(_RT) + RT = np.stack(RT) + return RT + +class WanCameraEmbeding: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "camera_pose":(["Static","Pan Up","Pan Down","Pan Left","Pan Right","Zoom In","Zoom Out","Anti Clockwise (ACW)", "ClockWise (CW)"],{"default":"Static"}), + "width": ("INT", {"default": 832, "min": 16, "max": MAX_RESOLUTION, "step": 16}), + "height": ("INT", {"default": 480, "min": 16, "max": MAX_RESOLUTION, "step": 16}), + "length": ("INT", {"default": 81, "min": 1, "max": MAX_RESOLUTION, "step": 4}), + }, + "optional":{ + "speed":("FLOAT",{"default":1.0, "min": 0, "max": 10.0, "step": 0.1}), + "fx":("FLOAT",{"default":0.5, "min": 0, "max": 1, "step": 0.000000001}), + "fy":("FLOAT",{"default":0.5, "min": 0, "max": 1, "step": 0.000000001}), + "cx":("FLOAT",{"default":0.5, "min": 0, "max": 1, "step": 0.01}), + "cy":("FLOAT",{"default":0.5, "min": 0, "max": 1, "step": 0.01}), + } + + } + + RETURN_TYPES = ("WAN_CAMERA_EMBEDDING","INT","INT","INT") + RETURN_NAMES = ("camera_embedding","width","height","length") + FUNCTION = "run" + CATEGORY = "camera" + + def run(self, camera_pose, width, height, length, speed=1.0, fx=0.5, fy=0.5, cx=0.5, cy=0.5): + """ + Use Camera trajectory as extrinsic parameters to calculate Plücker embeddings (Sitzmannet al., 2021) + Adapted from https://github.com/aigc-apps/VideoX-Fun/blob/main/comfyui/comfyui_nodes.py + """ + motion_list = [camera_pose] + speed = speed + angle = np.array(CAMERA_DICT[motion_list[0]]["angle"]) + T = np.array(CAMERA_DICT[motion_list[0]]["T"]) + RT = get_camera_motion(angle, T, speed, length) + + trajs=[] + for cp in RT.tolist(): + traj=[fx,fy,cx,cy,0,0] + traj.extend(cp[0]) + traj.extend(cp[1]) + traj.extend(cp[2]) + traj.extend([0,0,0,1]) + trajs.append(traj) + + cam_params = np.array([[float(x) for x in pose] for pose in trajs]) + cam_params = np.concatenate([np.zeros_like(cam_params[:, :1]), cam_params], 1) + control_camera_video = process_pose_params(cam_params, width=width, height=height) + control_camera_video = control_camera_video.permute([3, 0, 1, 2]).unsqueeze(0).to(device=comfy.model_management.intermediate_device()) + + control_camera_video = torch.concat( + [ + torch.repeat_interleave(control_camera_video[:, :, 0:1], repeats=4, dim=2), + control_camera_video[:, :, 1:] + ], dim=2 + ).transpose(1, 2) + + # Reshape, transpose, and view into desired shape + b, f, c, h, w = control_camera_video.shape + control_camera_video = control_camera_video.contiguous().view(b, f // 4, 4, c, h, w).transpose(2, 3) + control_camera_video = control_camera_video.contiguous().view(b, f // 4, c * 4, h, w).transpose(1, 2) + + return (control_camera_video, width, height, length) + + +NODE_CLASS_MAPPINGS = { + "WanCameraEmbeding": WanCameraEmbeding, +} diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 9dda64597..a91b4aba9 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -297,6 +297,52 @@ class TrimVideoLatent: samples_out["samples"] = s1[:, :, trim_amount:] return (samples_out,) +class WanCameraImageToVideo: + @classmethod + def INPUT_TYPES(s): + return {"required": {"positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "vae": ("VAE", ), + "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + }, + "optional": {"clip_vision_output": ("CLIP_VISION_OUTPUT", ), + "start_image": ("IMAGE", ), + "camera_conditions": ("WAN_CAMERA_EMBEDDING", ), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + FUNCTION = "encode" + + CATEGORY = "conditioning/video_models" + + def encode(self, positive, negative, vae, width, height, length, batch_size, start_image=None, clip_vision_output=None, camera_conditions=None): + latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + concat_latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + concat_latent = comfy.latent_formats.Wan21().process_out(concat_latent) + + if start_image is not None: + start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + concat_latent_image = vae.encode(start_image[:, :, :, :3]) + concat_latent[:,:,:concat_latent_image.shape[2]] = concat_latent_image[:,:,:concat_latent.shape[2]] + + positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent}) + negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent}) + + if camera_conditions is not None: + positive = node_helpers.conditioning_set_values(positive, {'camera_conditions': camera_conditions}) + negative = node_helpers.conditioning_set_values(negative, {'camera_conditions': camera_conditions}) + + if clip_vision_output is not None: + positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output}) + negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output}) + + out_latent = {} + out_latent["samples"] = latent + return (positive, negative, out_latent) NODE_CLASS_MAPPINGS = { "WanImageToVideo": WanImageToVideo, @@ -305,4 +351,5 @@ NODE_CLASS_MAPPINGS = { "WanFirstLastFrameToVideo": WanFirstLastFrameToVideo, "WanVaceToVideo": WanVaceToVideo, "TrimVideoLatent": TrimVideoLatent, + "WanCameraImageToVideo": WanCameraImageToVideo, } diff --git a/nodes.py b/nodes.py index 54e3886a3..0a9db1393 100644 --- a/nodes.py +++ b/nodes.py @@ -2265,6 +2265,7 @@ def init_builtin_extra_nodes(): "nodes_preview_any.py", "nodes_ace.py", "nodes_string.py", + "nodes_camera_trajectory.py", ] import_failed = [] From 1c2d45d2b575c40efcb303a61318d4a415242e52 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 15 May 2025 16:02:19 -0700 Subject: [PATCH 0147/1073] Fix typo in last PR. (#8144) More robust model detection for future proofing. --- comfy/model_detection.py | 2 ++ comfy/supported_models.py | 2 +- comfy_extras/nodes_camera_trajectory.py | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 28c586389..20f287df9 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -361,6 +361,8 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["model_type"] = "vace" dit_config["vace_in_dim"] = state_dict['{}vace_patch_embedding.weight'.format(key_prefix)].shape[1] dit_config["vace_layers"] = count_blocks(state_dict_keys, '{}vace_blocks.'.format(key_prefix) + '{}.') + elif '{}control_adapter.conv.weight'.format(key_prefix) in state_dict_keys: + dit_config["model_type"] = "camera" else: if '{}img_emb.proj.0.bias'.format(key_prefix) in state_dict_keys: dit_config["model_type"] = "i2v" diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 667393ac0..efe2e6b8f 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -995,7 +995,7 @@ class WAN21_FunControl2V(WAN21_T2V): class WAN21_Camera(WAN21_T2V): unet_config = { "image_model": "wan2.1", - "model_type": "i2v", + "model_type": "camera", "in_dim": 32, } diff --git a/comfy_extras/nodes_camera_trajectory.py b/comfy_extras/nodes_camera_trajectory.py index b84b4785c..5e0e39f91 100644 --- a/comfy_extras/nodes_camera_trajectory.py +++ b/comfy_extras/nodes_camera_trajectory.py @@ -148,7 +148,7 @@ def get_camera_motion(angle, T, speed, n=81): RT = np.stack(RT) return RT -class WanCameraEmbeding: +class WanCameraEmbedding: @classmethod def INPUT_TYPES(cls): return { @@ -214,5 +214,5 @@ class WanCameraEmbeding: NODE_CLASS_MAPPINGS = { - "WanCameraEmbeding": WanCameraEmbeding, + "WanCameraEmbedding": WanCameraEmbedding, } From 7046983d9538d49d1dd286a513aa6db42b9a74fd Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Sat, 17 May 2025 03:45:36 +1000 Subject: [PATCH 0148/1073] Remove Desktop versioning claim from README (#8155) --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index dcdaa353c..9b5f301c9 100644 --- a/README.md +++ b/README.md @@ -110,7 +110,6 @@ ComfyUI follows a weekly release cycle every Friday, with three interconnected r 2. **[ComfyUI Desktop](https://github.com/Comfy-Org/desktop)** - Builds a new release using the latest stable core version - - Version numbers match the core release (e.g., Desktop v1.7.0 uses Core v1.7.0) 3. **[ComfyUI Frontend](https://github.com/Comfy-Org/ComfyUI_frontend)** - Weekly frontend updates are merged into the core repository From dc46db7aa48acd035dfc10730a064a9c994fb76b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 16 May 2025 12:15:55 -0700 Subject: [PATCH 0149/1073] Make ImagePadForOutpaint return a 3 channel mask. (#8157) --- nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index 0a9db1393..95e831b8b 100644 --- a/nodes.py +++ b/nodes.py @@ -1940,7 +1940,7 @@ class ImagePadForOutpaint: mask[top:top + d2, left:left + d3] = t - return (new_image, mask) + return (new_image, mask.unsqueeze(0)) NODE_CLASS_MAPPINGS = { From aee2908d0395577a6e2e13d1307aaf271424108b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 17 May 2025 03:27:34 -0700 Subject: [PATCH 0150/1073] Remove useless log. (#8166) --- comfy/utils.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/comfy/utils.py b/comfy/utils.py index 561e1b858..1f8d71292 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -78,8 +78,6 @@ def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False): pl_sd = torch.load(ckpt, map_location=device, weights_only=True, **torch_args) else: pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle) - if "global_step" in pl_sd: - logging.debug(f"Global Step: {pl_sd['global_step']}") if "state_dict" in pl_sd: sd = pl_sd["state_dict"] else: From f5e4e976f43c9ed79e75b27f73719b2708f9ded9 Mon Sep 17 00:00:00 2001 From: Silver <65376327+silveroxides@users.noreply.github.com> Date: Sun, 18 May 2025 08:59:06 +0200 Subject: [PATCH 0151/1073] Add missing category for T5TokenizerOption (#8177) Change it if you need to but it should at least have a category. --- comfy_extras/nodes_cond.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy_extras/nodes_cond.py b/comfy_extras/nodes_cond.py index 574262178..58c16f621 100644 --- a/comfy_extras/nodes_cond.py +++ b/comfy_extras/nodes_cond.py @@ -31,6 +31,7 @@ class T5TokenizerOptions: } } + CATEGORY = "_for_testing/conditioning" RETURN_TYPES = ("CLIP",) FUNCTION = "set_options" From 05eb10b43a42929033f449def7cd5a8feeb84673 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sun, 18 May 2025 01:08:47 -0700 Subject: [PATCH 0152/1073] Validate video inputs (#8133) * validate kling lip sync input video * add tooltips * update duration estimates * decrease epsilon * fix rebase error --- comfy_api_nodes/nodes_kling.py | 51 ++++++------ comfy_api_nodes/util/__init__.py | 0 comfy_api_nodes/util/validation_utils.py | 100 +++++++++++++++++++++++ 3 files changed, 126 insertions(+), 25 deletions(-) create mode 100644 comfy_api_nodes/util/__init__.py create mode 100644 comfy_api_nodes/util/validation_utils.py diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 456a86905..641cd6353 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -65,6 +65,12 @@ from comfy_api_nodes.apinode_utils import ( download_url_to_image_tensor, ) from comfy_api_nodes.mapper_utils import model_field_to_node_input +from comfy_api_nodes.util.validation_utils import ( + validate_image_dimensions, + validate_image_aspect_ratio, + validate_video_dimensions, + validate_video_duration, +) from comfy_api.input.basic_types import AudioInput from comfy_api.input.video_types import VideoInput from comfy_api.input_impl import VideoFromFile @@ -80,18 +86,16 @@ PATH_CHARACTER_IMAGE = f"/proxy/kling/{KLING_API_VERSION}/images/generations" PATH_VIRTUAL_TRY_ON = f"/proxy/kling/{KLING_API_VERSION}/images/kolors-virtual-try-on" PATH_IMAGE_GENERATIONS = f"/proxy/kling/{KLING_API_VERSION}/images/generations" - MAX_PROMPT_LENGTH_T2V = 2500 MAX_PROMPT_LENGTH_I2V = 500 MAX_PROMPT_LENGTH_IMAGE_GEN = 500 MAX_NEGATIVE_PROMPT_LENGTH_IMAGE_GEN = 200 MAX_PROMPT_LENGTH_LIP_SYNC = 120 -# TODO: adjust based on tests -AVERAGE_DURATION_T2V = 319 # 319, -AVERAGE_DURATION_I2V = 164 # 164, -AVERAGE_DURATION_LIP_SYNC = 120 -AVERAGE_DURATION_VIRTUAL_TRY_ON = 19 # 19, +AVERAGE_DURATION_T2V = 319 +AVERAGE_DURATION_I2V = 164 +AVERAGE_DURATION_LIP_SYNC = 455 +AVERAGE_DURATION_VIRTUAL_TRY_ON = 19 AVERAGE_DURATION_IMAGE_GEN = 32 AVERAGE_DURATION_VIDEO_EFFECTS = 320 AVERAGE_DURATION_VIDEO_EXTEND = 320 @@ -211,23 +215,8 @@ def validate_input_image(image: torch.Tensor) -> None: See: https://app.klingai.com/global/dev/document-api/apiReference/model/imageToVideo """ - if len(image.shape) == 4: - height, width = image.shape[1], image.shape[2] - elif len(image.shape) == 3: - height, width = image.shape[0], image.shape[1] - else: - raise ValueError("Invalid image tensor shape.") - - # Ensure minimum resolution is met - if height < 300: - raise ValueError("Image height must be at least 300px") - if width < 300: - raise ValueError("Image width must be at least 300px") - - # Ensure aspect ratio is within acceptable range - aspect_ratio = width / height - if aspect_ratio < 1 / 2.5 or aspect_ratio > 2.5: - raise ValueError("Image aspect ratio must be between 1:2.5 and 2.5:1") + validate_image_dimensions(image, min_width=300, min_height=300) + validate_image_aspect_ratio(image, min_aspect_ratio=1 / 2.5, max_aspect_ratio=2.5) def get_camera_control_input_config( @@ -1243,6 +1232,17 @@ class KlingLipSyncBase(KlingNodeBase): RETURN_TYPES = ("VIDEO", "STRING", "STRING") RETURN_NAMES = ("VIDEO", "video_id", "duration") + def validate_lip_sync_video(self, video: VideoInput): + """ + Validates the input video adheres to the expectations of the Kling Lip Sync API: + - Video length does not exceed 10s and is not shorter than 2s + - Length and width dimensions should both be between 720px and 1920px + + See: https://app.klingai.com/global/dev/document-api/apiReference/model/videoTolip + """ + validate_video_dimensions(video, 720, 1920) + validate_video_duration(video, 2, 10) + def validate_text(self, text: str): if not text: raise ValueError("Text is required") @@ -1282,6 +1282,7 @@ class KlingLipSyncBase(KlingNodeBase): ) -> tuple[VideoFromFile, str, str]: if text: self.validate_text(text) + self.validate_lip_sync_video(video) # Upload video to Comfy API and get download URL video_url = upload_video_to_comfyapi(video, auth_kwargs=kwargs) @@ -1352,7 +1353,7 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase): }, } - DESCRIPTION = "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file." + DESCRIPTION = "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length." def api_call( self, @@ -1464,7 +1465,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase): }, } - DESCRIPTION = "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt." + DESCRIPTION = "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length." def api_call( self, diff --git a/comfy_api_nodes/util/__init__.py b/comfy_api_nodes/util/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/comfy_api_nodes/util/validation_utils.py b/comfy_api_nodes/util/validation_utils.py new file mode 100644 index 000000000..031b9fbd3 --- /dev/null +++ b/comfy_api_nodes/util/validation_utils.py @@ -0,0 +1,100 @@ +import logging +from typing import Optional + +import torch +from comfy_api.input.video_types import VideoInput + + +def get_image_dimensions(image: torch.Tensor) -> tuple[int, int]: + if len(image.shape) == 4: + return image.shape[1], image.shape[2] + elif len(image.shape) == 3: + return image.shape[0], image.shape[1] + else: + raise ValueError("Invalid image tensor shape.") + + +def validate_image_dimensions( + image: torch.Tensor, + min_width: Optional[int] = None, + max_width: Optional[int] = None, + min_height: Optional[int] = None, + max_height: Optional[int] = None, +): + height, width = get_image_dimensions(image) + + if min_width is not None and width < min_width: + raise ValueError(f"Image width must be at least {min_width}px, got {width}px") + if max_width is not None and width > max_width: + raise ValueError(f"Image width must be at most {max_width}px, got {width}px") + if min_height is not None and height < min_height: + raise ValueError( + f"Image height must be at least {min_height}px, got {height}px" + ) + if max_height is not None and height > max_height: + raise ValueError(f"Image height must be at most {max_height}px, got {height}px") + + +def validate_image_aspect_ratio( + image: torch.Tensor, + min_aspect_ratio: Optional[float] = None, + max_aspect_ratio: Optional[float] = None, +): + width, height = get_image_dimensions(image) + aspect_ratio = width / height + + if min_aspect_ratio is not None and aspect_ratio < min_aspect_ratio: + raise ValueError( + f"Image aspect ratio must be at least {min_aspect_ratio}, got {aspect_ratio}" + ) + if max_aspect_ratio is not None and aspect_ratio > max_aspect_ratio: + raise ValueError( + f"Image aspect ratio must be at most {max_aspect_ratio}, got {aspect_ratio}" + ) + + +def validate_video_dimensions( + video: VideoInput, + min_width: Optional[int] = None, + max_width: Optional[int] = None, + min_height: Optional[int] = None, + max_height: Optional[int] = None, +): + try: + width, height = video.get_dimensions() + except Exception as e: + logging.error("Error getting dimensions of video: %s", e) + return + + if min_width is not None and width < min_width: + raise ValueError(f"Video width must be at least {min_width}px, got {width}px") + if max_width is not None and width > max_width: + raise ValueError(f"Video width must be at most {max_width}px, got {width}px") + if min_height is not None and height < min_height: + raise ValueError( + f"Video height must be at least {min_height}px, got {height}px" + ) + if max_height is not None and height > max_height: + raise ValueError(f"Video height must be at most {max_height}px, got {height}px") + + +def validate_video_duration( + video: VideoInput, + min_duration: Optional[float] = None, + max_duration: Optional[float] = None, +): + try: + duration = video.get_duration() + except Exception as e: + logging.error("Error getting duration of video: %s", e) + return + + epsilon = 0.0001 + if min_duration is not None and min_duration - epsilon > duration: + raise ValueError( + f"Video duration must be at least {min_duration}s, got {duration}s" + ) + if max_duration is not None and duration > max_duration + epsilon: + raise ValueError( + f"Video duration must be at most {max_duration}s, got {duration}s" + ) From 62690eddec9b7d715b4c37246f71abf5ca1c5844 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 18 May 2025 01:09:56 -0700 Subject: [PATCH 0153/1073] Node to add pixel space noise to an image. (#8182) --- comfy_extras/nodes_images.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 77c305619..29a5d5b61 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -13,6 +13,7 @@ import os import re from io import BytesIO from inspect import cleandoc +import torch from comfy.comfy_types import FileLocator @@ -74,6 +75,24 @@ class ImageFromBatch: s = s_in[batch_index:batch_index + length].clone() return (s,) + +class ImageAddNoise: + @classmethod + def INPUT_TYPES(s): + return {"required": { "image": ("IMAGE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "control_after_generate": True, "tooltip": "The random seed used for creating the noise."}), + "strength": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "repeat" + + CATEGORY = "image" + + def repeat(self, image, seed, strength): + generator = torch.manual_seed(seed) + s = torch.clip((image + strength * torch.randn(image.size(), generator=generator, device="cpu").to(image)), min=0.0, max=1.0) + return (s,) + class SaveAnimatedWEBP: def __init__(self): self.output_dir = folder_paths.get_output_directory() @@ -295,6 +314,7 @@ NODE_CLASS_MAPPINGS = { "ImageCrop": ImageCrop, "RepeatImageBatch": RepeatImageBatch, "ImageFromBatch": ImageFromBatch, + "ImageAddNoise": ImageAddNoise, "SaveAnimatedWEBP": SaveAnimatedWEBP, "SaveAnimatedPNG": SaveAnimatedPNG, "SaveSVGNode": SaveSVGNode, From 3d44a09812c4f0880c30fcd1876125b7319300b4 Mon Sep 17 00:00:00 2001 From: LaVie024 <62406970+LaVie024@users.noreply.github.com> Date: Sun, 18 May 2025 08:11:11 +0000 Subject: [PATCH 0154/1073] Update nodes_string.py (#8173) --- comfy_extras/nodes_string.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_string.py b/comfy_extras/nodes_string.py index a852326e5..9eaa71236 100644 --- a/comfy_extras/nodes_string.py +++ b/comfy_extras/nodes_string.py @@ -8,7 +8,8 @@ class StringConcatenate(): return { "required": { "string_a": (IO.STRING, {"multiline": True}), - "string_b": (IO.STRING, {"multiline": True}) + "string_b": (IO.STRING, {"multiline": True}), + "delimiter": (IO.STRING, {"multiline": False, "default": ", "}) } } @@ -16,8 +17,8 @@ class StringConcatenate(): FUNCTION = "execute" CATEGORY = "utils/string" - def execute(self, string_a, string_b, **kwargs): - return string_a + string_b, + def execute(self, string_a, string_b, delimiter, **kwargs): + return delimiter.join((string_a, string_b)), class StringSubstring(): @classmethod From d8e5662822168101afb5e08a8ba75b6eefff6e02 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 18 May 2025 01:12:12 -0700 Subject: [PATCH 0155/1073] Remove default delimiter. (#8183) --- comfy_extras/nodes_string.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_string.py b/comfy_extras/nodes_string.py index 9eaa71236..b24222cee 100644 --- a/comfy_extras/nodes_string.py +++ b/comfy_extras/nodes_string.py @@ -9,7 +9,7 @@ class StringConcatenate(): "required": { "string_a": (IO.STRING, {"multiline": True}), "string_b": (IO.STRING, {"multiline": True}), - "delimiter": (IO.STRING, {"multiline": False, "default": ", "}) + "delimiter": (IO.STRING, {"multiline": False, "default": ""}) } } From e930a387d62cc819117502993b0b821b1e3f2687 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 19 May 2025 01:58:41 -0700 Subject: [PATCH 0156/1073] Update AMD instructions in README. (#8198) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9b5f301c9..15157f527 100644 --- a/README.md +++ b/README.md @@ -197,11 +197,11 @@ Put your VAE in: models/vae ### AMD GPUs (Linux only) AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version: -```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.2.4``` +```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.3``` This is the command to install the nightly with ROCm 6.3 which might have some performance improvements: -```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.3``` +```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.4``` ### Intel GPUs (Windows and Linux) From 4f3b50ba510e02fa3fdd8c755ef9ad319b36bd61 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Tue, 20 May 2025 06:40:55 +1000 Subject: [PATCH 0157/1073] Update README ROCm text to match link (#8199) - Follow-up on #8198 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 15157f527..47514d1b4 100644 --- a/README.md +++ b/README.md @@ -199,7 +199,7 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins ```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.3``` -This is the command to install the nightly with ROCm 6.3 which might have some performance improvements: +This is the command to install the nightly with ROCm 6.4 which might have some performance improvements: ```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.4``` From 7e84bf53737879ace37a68dc93e0df7704a53514 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 20 May 2025 02:29:23 -0700 Subject: [PATCH 0158/1073] This doesn't seem to be needed on chroma. (#8209) --- comfy/ldm/chroma/layers.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/comfy/ldm/chroma/layers.py b/comfy/ldm/chroma/layers.py index 35da91ee2..18a4a9cfc 100644 --- a/comfy/ldm/chroma/layers.py +++ b/comfy/ldm/chroma/layers.py @@ -109,9 +109,6 @@ class DoubleStreamBlock(nn.Module): txt += txt_mod1.gate * self.txt_attn.proj(txt_attn) txt += txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift) - if txt.dtype == torch.float16: - txt = torch.nan_to_num(txt, nan=0.0, posinf=65504, neginf=-65504) - return img, txt @@ -163,8 +160,6 @@ class SingleStreamBlock(nn.Module): # compute activation in mlp stream, cat again and run second linear layer output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2)) x += mod.gate * output - if x.dtype == torch.float16: - x = torch.nan_to_num(x, nan=0.0, posinf=65504, neginf=-65504) return x From 87f91307782ce0b401786d8edddd8f618b955141 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 20 May 2025 02:39:55 -0700 Subject: [PATCH 0159/1073] Revert "This doesn't seem to be needed on chroma. (#8209)" (#8210) This reverts commit 7e84bf53737879ace37a68dc93e0df7704a53514. --- comfy/ldm/chroma/layers.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/comfy/ldm/chroma/layers.py b/comfy/ldm/chroma/layers.py index 18a4a9cfc..35da91ee2 100644 --- a/comfy/ldm/chroma/layers.py +++ b/comfy/ldm/chroma/layers.py @@ -109,6 +109,9 @@ class DoubleStreamBlock(nn.Module): txt += txt_mod1.gate * self.txt_attn.proj(txt_attn) txt += txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift) + if txt.dtype == torch.float16: + txt = torch.nan_to_num(txt, nan=0.0, posinf=65504, neginf=-65504) + return img, txt @@ -160,6 +163,8 @@ class SingleStreamBlock(nn.Module): # compute activation in mlp stream, cat again and run second linear layer output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2)) x += mod.gate * output + if x.dtype == torch.float16: + x = torch.nan_to_num(x, nan=0.0, posinf=65504, neginf=-65504) return x From 10024a38ea8d7e8950b26500a540cd0323d0e611 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 21 May 2025 04:50:37 -0400 Subject: [PATCH 0160/1073] ComfyUI version v0.3.35 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index b740b378d..8db3bc803 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.34" +__version__ = "0.3.35" diff --git a/pyproject.toml b/pyproject.toml index 80061b39a..a33fc4370 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.34" +version = "0.3.35" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 65da29aaa965afcb0811a9c8dac1cc0facb006d4 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Wed, 21 May 2025 01:56:56 -0700 Subject: [PATCH 0161/1073] Make torch.compile LoRA/key-compatible (#8213) * Make torch compile node use wrapper instead of object_patch for the entire diffusion_models object, allowing key assotiations on diffusion_models to not break (loras, getting attributes, etc.) * Moved torch compile code into comfy_api so it can be used by custom nodes with a degree of confidence * Refactor set_torch_compile_wrapper to support a list of keys instead of just diffusion_model, as well as additional torch.compile args * remove unused import * Moved torch compile kwargs to be stored in model_options instead of attachments; attachments are more intended for things to be 'persisted', AKA not deepcopied * Add some comments * Remove random line of code, not sure how it got there --- comfy_api/torch_helpers/__init__.py | 5 ++ comfy_api/torch_helpers/torch_compile.py | 69 ++++++++++++++++++++++++ comfy_extras/nodes_torch_compile.py | 5 +- 3 files changed, 77 insertions(+), 2 deletions(-) create mode 100644 comfy_api/torch_helpers/__init__.py create mode 100644 comfy_api/torch_helpers/torch_compile.py diff --git a/comfy_api/torch_helpers/__init__.py b/comfy_api/torch_helpers/__init__.py new file mode 100644 index 000000000..be7ae7a61 --- /dev/null +++ b/comfy_api/torch_helpers/__init__.py @@ -0,0 +1,5 @@ +from .torch_compile import set_torch_compile_wrapper + +__all__ = [ + "set_torch_compile_wrapper", +] diff --git a/comfy_api/torch_helpers/torch_compile.py b/comfy_api/torch_helpers/torch_compile.py new file mode 100644 index 000000000..9223f58db --- /dev/null +++ b/comfy_api/torch_helpers/torch_compile.py @@ -0,0 +1,69 @@ +from __future__ import annotations +import torch + +import comfy.utils +from comfy.patcher_extension import WrappersMP +from typing import TYPE_CHECKING, Callable, Optional +if TYPE_CHECKING: + from comfy.model_patcher import ModelPatcher + from comfy.patcher_extension import WrapperExecutor + + +COMPILE_KEY = "torch.compile" +TORCH_COMPILE_KWARGS = "torch_compile_kwargs" + + +def apply_torch_compile_factory(compiled_module_dict: dict[str, Callable]) -> Callable: + ''' + Create a wrapper that will refer to the compiled_diffusion_model. + ''' + def apply_torch_compile_wrapper(executor: WrapperExecutor, *args, **kwargs): + try: + orig_modules = {} + for key, value in compiled_module_dict.items(): + orig_modules[key] = comfy.utils.get_attr(executor.class_obj, key) + comfy.utils.set_attr(executor.class_obj, key, value) + return executor(*args, **kwargs) + finally: + for key, value in orig_modules.items(): + comfy.utils.set_attr(executor.class_obj, key, value) + return apply_torch_compile_wrapper + + +def set_torch_compile_wrapper(model: ModelPatcher, backend: str, options: Optional[dict[str,str]]=None, + mode: Optional[str]=None, fullgraph=False, dynamic: Optional[bool]=None, + keys: list[str]=["diffusion_model"], *args, **kwargs): + ''' + Perform torch.compile that will be applied at sample time for either the whole model or specific params of the BaseModel instance. + + When keys is None, it will default to using ["diffusion_model"], compiling the whole diffusion_model. + When a list of keys is provided, it will perform torch.compile on only the selected modules. + ''' + # clear out any other torch.compile wrappers + model.remove_wrappers_with_key(WrappersMP.APPLY_MODEL, COMPILE_KEY) + # if no keys, default to 'diffusion_model' + if not keys: + keys = ["diffusion_model"] + # create kwargs dict that can be referenced later + compile_kwargs = { + "backend": backend, + "options": options, + "mode": mode, + "fullgraph": fullgraph, + "dynamic": dynamic, + } + # get a dict of compiled keys + compiled_modules = {} + for key in keys: + compiled_modules[key] = torch.compile( + model=model.get_model_object(key), + **compile_kwargs, + ) + # add torch.compile wrapper + wrapper_func = apply_torch_compile_factory( + compiled_module_dict=compiled_modules, + ) + # store wrapper to run on BaseModel's apply_model function + model.add_wrapper_with_key(WrappersMP.APPLY_MODEL, COMPILE_KEY, wrapper_func) + # keep compile kwargs for reference + model.model_options[TORCH_COMPILE_KWARGS] = compile_kwargs diff --git a/comfy_extras/nodes_torch_compile.py b/comfy_extras/nodes_torch_compile.py index 1fe6f42c7..605536678 100644 --- a/comfy_extras/nodes_torch_compile.py +++ b/comfy_extras/nodes_torch_compile.py @@ -1,4 +1,5 @@ -import torch +from comfy_api.torch_helpers import set_torch_compile_wrapper + class TorchCompileModel: @classmethod @@ -14,7 +15,7 @@ class TorchCompileModel: def patch(self, model, backend): m = model.clone() - m.add_object_patch("diffusion_model", torch.compile(model=m.get_model_object("diffusion_model"), backend=backend)) + set_torch_compile_wrapper(model=m, backend=backend) return (m, ) NODE_CLASS_MAPPINGS = { From 57893c843f44ea9e8a0be79292d19e5a5e16e9e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=BC=96=E7=A8=8B=E7=95=8C=E7=9A=84=E5=B0=8F=E5=AD=A6?= =?UTF-8?q?=E7=94=9F?= <15620646321@163.com> Date: Wed, 21 May 2025 16:59:42 +0800 Subject: [PATCH 0162/1073] Code Optimization and Issues Fixes in ComfyUI server (#8196) * Update server.py * Update server.py --- server.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/server.py b/server.py index cb1c6a8fd..16cd88d91 100644 --- a/server.py +++ b/server.py @@ -226,7 +226,7 @@ class PromptServer(): return response @routes.get("/embeddings") - def get_embeddings(self): + def get_embeddings(request): embeddings = folder_paths.get_filename_list("embeddings") return web.json_response(list(map(lambda a: os.path.splitext(a)[0], embeddings))) @@ -282,7 +282,6 @@ class PromptServer(): a.update(f.read()) b.update(image.file.read()) image.file.seek(0) - f.close() return a.hexdigest() == b.hexdigest() return False From 8bb858e4d39f7f6a6969c584aeeaa1d606a812d6 Mon Sep 17 00:00:00 2001 From: Michael Abrahams Date: Wed, 21 May 2025 05:14:17 -0400 Subject: [PATCH 0163/1073] Improve performance with large number of queued prompts (#8176) * get_current_queue_volatile * restore get_current_queue method * remove extra import --- execution.py | 9 ++++++++- main.py | 3 +-- server.py | 5 +++-- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/execution.py b/execution.py index e5d1c69d9..15ff7567c 100644 --- a/execution.py +++ b/execution.py @@ -909,7 +909,6 @@ class PromptQueue: self.currently_running = {} self.history = {} self.flags = {} - server.prompt_queue = self def put(self, item): with self.mutex: @@ -954,6 +953,7 @@ class PromptQueue: self.history[prompt[1]].update(history_result) self.server.queue_updated() + # Note: slow def get_current_queue(self): with self.mutex: out = [] @@ -961,6 +961,13 @@ class PromptQueue: out += [x] return (out, copy.deepcopy(self.queue)) + # read-safe as long as queue items are immutable + def get_current_queue_volatile(self): + with self.mutex: + running = [x for x in self.currently_running.values()] + queued = copy.copy(self.queue) + return (running, queued) + def get_tasks_remaining(self): with self.mutex: return len(self.queue) + len(self.currently_running) diff --git a/main.py b/main.py index 0fde6d221..fb1f8d20b 100644 --- a/main.py +++ b/main.py @@ -260,7 +260,6 @@ def start_comfyui(asyncio_loop=None): asyncio_loop = asyncio.new_event_loop() asyncio.set_event_loop(asyncio_loop) prompt_server = server.PromptServer(asyncio_loop) - q = execution.PromptQueue(prompt_server) hook_breaker_ac10a0.save_functions() nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes, init_api_nodes=not args.disable_api_nodes) @@ -271,7 +270,7 @@ def start_comfyui(asyncio_loop=None): prompt_server.add_routes() hijack_progress(prompt_server) - threading.Thread(target=prompt_worker, daemon=True, args=(q, prompt_server,)).start() + threading.Thread(target=prompt_worker, daemon=True, args=(prompt_server.prompt_queue, prompt_server,)).start() if args.quick_test_for_ci: exit(0) diff --git a/server.py b/server.py index 16cd88d91..1b0a73601 100644 --- a/server.py +++ b/server.py @@ -29,6 +29,7 @@ import comfy.model_management import node_helpers from comfyui_version import __version__ from app.frontend_management import FrontendManager + from app.user_manager import UserManager from app.model_manager import ModelFileManager from app.custom_node_manager import CustomNodeManager @@ -159,7 +160,7 @@ class PromptServer(): self.custom_node_manager = CustomNodeManager() self.internal_routes = InternalRoutes(self) self.supports = ["custom_nodes_from_web"] - self.prompt_queue = None + self.prompt_queue = execution.PromptQueue(self) self.loop = loop self.messages = asyncio.Queue() self.client_session:Optional[aiohttp.ClientSession] = None @@ -620,7 +621,7 @@ class PromptServer(): @routes.get("/queue") async def get_queue(request): queue_info = {} - current_queue = self.prompt_queue.get_current_queue() + current_queue = self.prompt_queue.get_current_queue_volatile() queue_info['queue_running'] = current_queue[0] queue_info['queue_pending'] = current_queue[1] return web.json_response(queue_info) From ded60c33a0c0231c7109f30072c25e64e360e636 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Thu, 22 May 2025 02:40:08 +0800 Subject: [PATCH 0164/1073] Update templates to 0.1.18 (#8224) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8f7a78984..858e6343c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.19.9 -comfyui-workflow-templates==0.1.14 +comfyui-workflow-templates==0.1.18 torch torchsde torchvision From fc39184ea9a442b6e9a346fa23d1b3cad3a6f493 Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Thu, 22 May 2025 02:24:36 -0400 Subject: [PATCH 0165/1073] Update frontend to 1.20 (#8232) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 858e6343c..5a988ecd9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.19.9 +comfyui-frontend-package==1.20.4 comfyui-workflow-templates==0.1.18 torch torchsde From b838c367209a8530dc1c56c4150988a1d8af7ed6 Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Thu, 22 May 2025 08:08:36 -0400 Subject: [PATCH 0166/1073] remove mtl from 3d model file list (#8192) --- comfy_extras/nodes_load_3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_load_3d.py b/comfy_extras/nodes_load_3d.py index d5b4d9111..40d03e18a 100644 --- a/comfy_extras/nodes_load_3d.py +++ b/comfy_extras/nodes_load_3d.py @@ -16,7 +16,7 @@ class Load3D(): os.makedirs(input_dir, exist_ok=True) - files = [normalize_path(os.path.join("3d", f)) for f in os.listdir(input_dir) if f.endswith(('.gltf', '.glb', '.obj', '.mtl', '.fbx', '.stl'))] + files = [normalize_path(os.path.join("3d", f)) for f in os.listdir(input_dir) if f.endswith(('.gltf', '.glb', '.obj', '.fbx', '.stl'))] return {"required": { "model_file": (sorted(files), {"file_upload": True}), From 4202e956a0172178f5d4ce1971da8c07c93420a9 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 22 May 2025 05:11:13 -0700 Subject: [PATCH 0167/1073] Add append feature to conditioning_set_values (#8239) Refactor unclipconditioning node. --- node_helpers.py | 10 ++++++++-- nodes.py | 11 +---------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/node_helpers.py b/node_helpers.py index c3e1a14ca..4ff960ef8 100644 --- a/node_helpers.py +++ b/node_helpers.py @@ -5,12 +5,18 @@ from comfy.cli_args import args from PIL import ImageFile, UnidentifiedImageError -def conditioning_set_values(conditioning, values={}): +def conditioning_set_values(conditioning, values={}, append=False): c = [] for t in conditioning: n = [t[0], t[1].copy()] for k in values: - n[1][k] = values[k] + val = values[k] + if append: + old_val = n[1].get(k, None) + if old_val is not None: + val = old_val + val + + n[1][k] = val c.append(n) return c diff --git a/nodes.py b/nodes.py index 95e831b8b..1e328651b 100644 --- a/nodes.py +++ b/nodes.py @@ -1103,16 +1103,7 @@ class unCLIPConditioning: if strength == 0: return (conditioning, ) - c = [] - for t in conditioning: - o = t[1].copy() - x = {"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation} - if "unclip_conditioning" in o: - o["unclip_conditioning"] = o["unclip_conditioning"][:] + [x] - else: - o["unclip_conditioning"] = [x] - n = [t[0], o] - c.append(n) + c = node_helpers.conditioning_set_values(conditioning, {"unclip_conditioning": [{"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation}]}, append=True) return (c, ) class GLIGENLoader: From f85c08df0615a587e0974678b01199b88a1caae0 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 22 May 2025 16:22:26 -0700 Subject: [PATCH 0168/1073] Make VACE conditionings stackable. (#8240) --- comfy/ldm/wan/model.py | 10 +++++++--- comfy/model_base.py | 21 +++++++++++++-------- comfy_extras/nodes_wan.py | 5 +++-- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index a996dedf4..1b51a4e4a 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -635,7 +635,7 @@ class VaceWanModel(WanModel): t, context, vace_context, - vace_strength=1.0, + vace_strength, clip_fea=None, freqs=None, transformer_options={}, @@ -661,8 +661,11 @@ class VaceWanModel(WanModel): context = torch.concat([context_clip, context], dim=1) context_img_len = clip_fea.shape[-2] + orig_shape = list(vace_context.shape) + vace_context = vace_context.movedim(0, 1).reshape([-1] + orig_shape[2:]) c = self.vace_patch_embedding(vace_context.float()).to(vace_context.dtype) c = c.flatten(2).transpose(1, 2) + c = list(c.split(orig_shape[0], dim=0)) # arguments x_orig = x @@ -682,8 +685,9 @@ class VaceWanModel(WanModel): ii = self.vace_layers_mapping.get(i, None) if ii is not None: - c_skip, c = self.vace_blocks[ii](c, x=x_orig, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) - x += c_skip * vace_strength + for iii in range(len(c)): + c_skip, c[iii] = self.vace_blocks[ii](c[iii], x=x_orig, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) + x += c_skip * vace_strength[iii] del c_skip # head x = self.head(x, e) diff --git a/comfy/model_base.py b/comfy/model_base.py index f475e837e..fb4724690 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1062,20 +1062,25 @@ class WAN21_Vace(WAN21): vace_frames = kwargs.get("vace_frames", None) if vace_frames is None: noise_shape[1] = 32 - vace_frames = torch.zeros(noise_shape, device=noise.device, dtype=noise.dtype) - - for i in range(0, vace_frames.shape[1], 16): - vace_frames = vace_frames.clone() - vace_frames[:, i:i + 16] = self.process_latent_in(vace_frames[:, i:i + 16]) + vace_frames = [torch.zeros(noise_shape, device=noise.device, dtype=noise.dtype)] mask = kwargs.get("vace_mask", None) if mask is None: noise_shape[1] = 64 - mask = torch.ones(noise_shape, device=noise.device, dtype=noise.dtype) + mask = [torch.ones(noise_shape, device=noise.device, dtype=noise.dtype)] * len(vace_frames) - out['vace_context'] = comfy.conds.CONDRegular(torch.cat([vace_frames.to(noise), mask.to(noise)], dim=1)) + vace_frames_out = [] + for j in range(len(vace_frames)): + vf = vace_frames[j].clone() + for i in range(0, vf.shape[1], 16): + vf[:, i:i + 16] = self.process_latent_in(vf[:, i:i + 16]) + vf = torch.cat([vf, mask[j]], dim=1) + vace_frames_out.append(vf) - vace_strength = kwargs.get("vace_strength", 1.0) + vace_frames = torch.stack(vace_frames_out, dim=1) + out['vace_context'] = comfy.conds.CONDRegular(vace_frames) + + vace_strength = kwargs.get("vace_strength", [1.0] * len(vace_frames_out)) out['vace_strength'] = comfy.conds.CONDConstant(vace_strength) return out diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index a91b4aba9..c35c4871c 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -268,8 +268,9 @@ class WanVaceToVideo: trim_latent = reference_image.shape[2] mask = mask.unsqueeze(0) - positive = node_helpers.conditioning_set_values(positive, {"vace_frames": control_video_latent, "vace_mask": mask, "vace_strength": strength}) - negative = node_helpers.conditioning_set_values(negative, {"vace_frames": control_video_latent, "vace_mask": mask, "vace_strength": strength}) + + positive = node_helpers.conditioning_set_values(positive, {"vace_frames": [control_video_latent], "vace_mask": [mask], "vace_strength": [strength]}, append=True) + negative = node_helpers.conditioning_set_values(negative, {"vace_frames": [control_video_latent], "vace_mask": [mask], "vace_strength": [strength]}, append=True) latent = torch.zeros([batch_size, 16, latent_length, height // 8, width // 8], device=comfy.model_management.intermediate_device()) out_latent = {} From 30b2eb8a93ce931f7b8e15f9f7dbc7bf751b1c17 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Fri, 23 May 2025 16:15:06 -0400 Subject: [PATCH 0169/1073] create arange on-device (#8255) --- comfy/ldm/chroma/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/chroma/model.py b/comfy/ldm/chroma/model.py index 636748fc5..c75023a31 100644 --- a/comfy/ldm/chroma/model.py +++ b/comfy/ldm/chroma/model.py @@ -163,7 +163,7 @@ class Chroma(nn.Module): distil_guidance = timestep_embedding(guidance.detach().clone(), 16).to(img.device, img.dtype) # get all modulation index - modulation_index = timestep_embedding(torch.arange(mod_index_length), 32).to(img.device, img.dtype) + modulation_index = timestep_embedding(torch.arange(mod_index_length, device=img.device), 32).to(img.device, img.dtype) # we need to broadcast the modulation index here so each batch has all of the index modulation_index = modulation_index.unsqueeze(0).repeat(img.shape[0], 1, 1).to(img.device, img.dtype) # and we need to broadcast timestep and guidance along too From 0b50d4c0db025f4c1ede7d1094567bb22a8901bf Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 23 May 2025 14:43:50 -0700 Subject: [PATCH 0170/1073] Add argument to explicitly enable fp8 compute support. (#8257) This can be used to test if your current GPU/pytorch version supports fp8 matrix mult in combination with --fast or the fp8_e4m3fn_fast dtype. --- comfy/cli_args.py | 1 + comfy/model_management.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index de292d9b3..4fb675f99 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -88,6 +88,7 @@ parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE" parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.") parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.") +parser.add_argument("--supports-fp8-compute", action="store_true", help="ComfyUI will act like if the device supports fp8 compute.") class LatentPreviewMethod(enum.Enum): NoPreviews = "none" diff --git a/comfy/model_management.py b/comfy/model_management.py index 44aff3762..a49ed83e6 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1257,6 +1257,9 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma return False def supports_fp8_compute(device=None): + if args.supports_fp8_compute: + return True + if not is_nvidia(): return False From 464aece92b86c93694390a1b385b3c505190d0cd Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Fri, 23 May 2025 21:53:49 -0700 Subject: [PATCH 0171/1073] update frontend package to v1.20.5 (#8260) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5a988ecd9..48631633d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.20.4 +comfyui-frontend-package==1.20.5 comfyui-workflow-templates==0.1.18 torch torchsde From 5a87757ef96f807cf1cf5b41c55a0a84c9551f20 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 24 May 2025 03:43:12 -0700 Subject: [PATCH 0172/1073] Better error if sageattention is installed but a dependency is missing. (#8264) --- comfy/ldm/modules/attention.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 45f9e311e..2cb77d85d 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -20,8 +20,11 @@ if model_management.xformers_enabled(): if model_management.sage_attention_enabled(): try: from sageattention import sageattn - except ModuleNotFoundError: - logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention") + except ModuleNotFoundError as e: + if e.name == "sageattention": + logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention") + else: + raise e exit(-1) if model_management.flash_attention_enabled(): From ad3bd8aa4904de8c3798e148fcf02b00ac14277c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 24 May 2025 17:30:37 -0400 Subject: [PATCH 0173/1073] ComfyUI version 0.3.36 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 8db3bc803..817b7d83b 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.35" +__version__ = "0.3.36" diff --git a/pyproject.toml b/pyproject.toml index a33fc4370..accf6f864 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.35" +version = "0.3.36" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From a0651359d7a1ee968d5cf01c1b7302e41435e779 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 25 May 2025 02:28:11 -0700 Subject: [PATCH 0174/1073] Return proper error if diffusion model not detected properly. (#8272) --- comfy/model_detection.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 20f287df9..74f539598 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -620,6 +620,9 @@ def convert_config(unet_config): def unet_config_from_diffusers_unet(state_dict, dtype=None): + if "conv_in.weight" not in state_dict: + return None + match = {} transformer_depth = [] From e5799c4899f968d03a1a656c9c5df2926a0768b1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 26 May 2025 01:29:25 -0700 Subject: [PATCH 0175/1073] Enable pytorch attention by default on AMD gfx1151 (#8282) --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index a49ed83e6..1e6e8553f 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -301,7 +301,7 @@ try: logging.info("AMD arch: {}".format(arch)) if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: if torch_version_numeric[0] >= 2 and torch_version_numeric[1] >= 7: # works on 2.6 but doesn't actually seem to improve much - if any((a in arch) for a in ["gfx1100", "gfx1101"]): # TODO: more arches + if any((a in arch) for a in ["gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches ENABLE_PYTORCH_ATTENTION = True except: pass From 89a84e32d2a771743ba10d6a6a0634f7e321fef5 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 26 May 2025 13:39:27 -0700 Subject: [PATCH 0176/1073] Disable initial GPU load when novram is used. (#8294) --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 1e6e8553f..f5b37e68e 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -695,7 +695,7 @@ def unet_inital_load_device(parameters, dtype): return torch_dev cpu_dev = torch.device("cpu") - if DISABLE_SMART_MEMORY: + if DISABLE_SMART_MEMORY or vram_state == VRAMState.NO_VRAM: return cpu_dev model_size = dtype_size(dtype) * parameters From 3a10b9641ce05d7e80b7051fda4195978c8ba656 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Tue, 27 May 2025 16:47:06 +1000 Subject: [PATCH 0177/1073] [BugFix] Update frontend to 1.20.6 (#8296) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 48631633d..f56b3e096 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.20.5 +comfyui-frontend-package==1.20.6 comfyui-workflow-templates==0.1.18 torch torchsde From f58f0f56969ccfe8d57a18976cd296719d795730 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Tue, 27 May 2025 00:00:58 -0700 Subject: [PATCH 0178/1073] More API nodes: Gemini/Open AI Chat, Tripo, Rodin, Runway Image (#8295) * Add Ideogram generate node. * Add staging api. * Add API_NODE and common error for missing auth token (#5) * Add Minimax Video Generation + Async Task queue polling example (#6) * [Minimax] Show video preview and embed workflow in ouput (#7) * Remove uv.lock * Remove polling operations. * Revert "Remove polling operations." This reverts commit 8415404ce8fbc0262b7de54fc700c5c8854a34fc. * Update stubs. * Added Ideogram and Minimax back in. * Added initial BFL Flux 1.1 [pro] Ultra node (#11) * Manually add BFL polling status response schema (#15) * Add function for uploading files. (#18) * Add Luma nodes (#16) Co-authored-by: Robin Huang * Refactor util functions (#20) * Add rest of Luma node functionality (#19) Co-authored-by: Robin Huang * Fix image_luma_ref not working (#28) Co-authored-by: Robin Huang * [Bug] Remove duplicated option T2V-01 in MinimaxTextToVideoNode (#31) * add veo2, bump av req (#32) * Add Recraft nodes (#29) * Add Kling Nodes (#12) * Add Camera Concepts (luma_concepts) to Luma Video nodes (#33) Co-authored-by: Robin Huang * Add Runway nodes (#17) * Convert Minimax node to use VIDEO output type (#34) * Standard `CATEGORY` system for api nodes (#35) * Set `Content-Type` header when uploading files (#36) * add better error propagation to veo2 (#37) * Add Realistic Image and Logo Raster styles for Recraft v3 (#38) * Fix runway image upload and progress polling (#39) * Fix image upload for Luma: only include `Content-Type` header field if it's set explicitly (#40) * Moved Luma nodes to nodes_luma.py (#47) * Moved Recraft nodes to nodes_recraft.py (#48) * Move and fix BFL nodes to node_bfl.py (#49) * Move and edit Minimax node to nodes_minimax.py (#50) * Add Recraft Text to Vector node, add Save SVG node to handle its output (#53) * Added pixverse_template support to Pixverse Text to Video node (#54) * Added Recraft Controls + Recraft Color RGB nodes (#57) * split remaining nodes out of nodes_api, make utility lib, refactor ideogram (#61) * Set request type explicitly (#66) * Add `control_after_generate` to all seed inputs (#69) * Fix bug: deleting `Content-Type` when property does not exist (#73) * Add Pixverse and updated Kling types (#75) * Added Recraft Style - Infinite Style Library node (#82) * add ideogram v3 (#83) * [Kling] Split Camera Control config to its own node (#81) * Add Pika i2v and t2v nodes (#52) * Remove Runway nodes (#88) * Fix: Prompt text can't be validated in Kling nodes when using primitive nodes (#90) * Update Pika Duration and Resolution options (#94) * Removed Infinite Style Library until later (#99) * fix multi image return (#101) close #96 * Serve SVG files directly (#107) * Add a bunch of nodes, 3 ready to use, the rest waiting for endpoint support (#108) * Revert "Serve SVG files directly" (#111) * Expose 4 remaining Recraft nodes (#112) * [Kling] Add `Duration` and `Video ID` outputs (#105) * Add Kling nodes: camera control, start-end frame, lip-sync, video extend (#115) * Fix error for Recraft ImageToImage error for nonexistent random_seed param (#118) * Add remaining Pika nodes (#119) * Make controls input work for Recraft Image to Image node (#120) * Fix: Nested `AnyUrl` in request model cannot be serialized (Kling, Runway) (#129) * Show errors and API output URLs to the user (change log levels) (#131) * Apply small fixes and most prompt validation (if needed to avoid API error) (#135) * Node name/category modifications (#140) * Add back Recraft Style - Infinite Style Library node (#141) * [Kling] Fix: Correct/verify supported subset of input combos in Kling nodes (#149) * Remove pixverse_template from PixVerse Transition Video node (#155) * Use 3.9 compat syntax (#164) * Handle Comfy API key based authorizaton (#167) Co-authored-by: Jedrzej Kosinski * [BFL] Print download URL of successful task result directly on nodes (#175) * Show output URL and progress text on Pika nodes (#168) * [Ideogram] Print download URL of successful task result directly on nodes (#176) * [Kling] Print download URL of successful task result directly on nodes (#181) * Merge upstream may 14 25 (#186) Co-authored-by: comfyanonymous Co-authored-by: AustinMroz Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Co-authored-by: Benjamin Lu Co-authored-by: Andrew Kvochko Co-authored-by: Pam <42671363+pamparamm@users.noreply.github.com> Co-authored-by: chaObserv <154517000+chaObserv@users.noreply.github.com> Co-authored-by: Yoland Yan <4950057+yoland68@users.noreply.github.com> Co-authored-by: guill Co-authored-by: Chenlei Hu Co-authored-by: Terry Jia Co-authored-by: Silver <65376327+silveroxides@users.noreply.github.com> Co-authored-by: catboxanon <122327233+catboxanon@users.noreply.github.com> Co-authored-by: liesen Co-authored-by: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Co-authored-by: Jedrzej Kosinski Co-authored-by: Robin Huang Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com> Co-authored-by: blepping <157360029+blepping@users.noreply.github.com> * Update instructions on how to develop API Nodes. (#171) * Add Runway FLF and I2V nodes (#187) * Add OpenAI chat node (#188) * Update README. * Add Google Gemini API node (#191) * Add Runway Gen 4 Text to Image Node (#193) * [Runway, Gemini] Update node display names and attributes (#194) * Update path from "image-to-video" to "image_to_video" (#197) * [Runway] Split I2V nodes into separate gen3 and gen4 nodes (#198) * Update runway i2v ratio enum (#201) * Rodin3D: implement Rodin3D API Nodes (#190) Co-authored-by: WhiteGiven Co-authored-by: Robin Huang * Add Tripo Nodes. (#189) Co-authored-by: Robin Huang * Change casing of categories "3D" => "3d" (#208) * [tripo] fix negtive_prompt and mv2model (#212) * [tripo] set default param to None (#215) * Add description and tooltip to Tripo Refine model. (#218) * Update. * Fix rebase errors. * Fix rebase errors. * Update templates. * Bump frontend. * Add file type info for file inputs. --------- Co-authored-by: Christian Byrne Co-authored-by: Jedrzej Kosinski Co-authored-by: Chenlei Hu Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com> Co-authored-by: comfyanonymous Co-authored-by: AustinMroz Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Co-authored-by: Benjamin Lu Co-authored-by: Andrew Kvochko Co-authored-by: Pam <42671363+pamparamm@users.noreply.github.com> Co-authored-by: chaObserv <154517000+chaObserv@users.noreply.github.com> Co-authored-by: Yoland Yan <4950057+yoland68@users.noreply.github.com> Co-authored-by: guill Co-authored-by: Terry Jia Co-authored-by: Silver <65376327+silveroxides@users.noreply.github.com> Co-authored-by: catboxanon <122327233+catboxanon@users.noreply.github.com> Co-authored-by: liesen Co-authored-by: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Co-authored-by: blepping <157360029+blepping@users.noreply.github.com> Co-authored-by: Changrz <51637999+WhiteGiven@users.noreply.github.com> Co-authored-by: WhiteGiven Co-authored-by: seed93 --- comfy_api_nodes/README.md | 26 +- comfy_api_nodes/apinode_utils.py | 125 +- comfy_api_nodes/apis/__init__.py | 5906 ++++++++++++++--------------- comfy_api_nodes/apis/client.py | 2 +- comfy_api_nodes/apis/rodin_api.py | 57 + comfy_api_nodes/apis/tripo_api.py | 275 ++ comfy_api_nodes/nodes_gemini.py | 446 +++ comfy_api_nodes/nodes_openai.py | 524 ++- comfy_api_nodes/nodes_rodin.py | 462 +++ comfy_api_nodes/nodes_runway.py | 635 ++++ comfy_api_nodes/nodes_tripo.py | 574 +++ nodes.py | 4 + requirements.txt | 2 +- 13 files changed, 5870 insertions(+), 3168 deletions(-) create mode 100644 comfy_api_nodes/apis/rodin_api.py create mode 100644 comfy_api_nodes/apis/tripo_api.py create mode 100644 comfy_api_nodes/nodes_gemini.py create mode 100644 comfy_api_nodes/nodes_rodin.py create mode 100644 comfy_api_nodes/nodes_runway.py create mode 100644 comfy_api_nodes/nodes_tripo.py diff --git a/comfy_api_nodes/README.md b/comfy_api_nodes/README.md index e2633a769..64a389cc1 100644 --- a/comfy_api_nodes/README.md +++ b/comfy_api_nodes/README.md @@ -18,6 +18,8 @@ Follow the instructions [here](https://github.com/Comfy-Org/ComfyUI_frontend) to python run main.py --comfy-api-base https://stagingapi.comfy.org ``` +To authenticate to staging, please login and then ask one of Comfy Org team to whitelist you for access to staging. + API stubs are generated through automatic codegen tools from OpenAPI definitions. Since the Comfy Org OpenAPI definition contains many things from the Comfy Registry as well, we use redocly/cli to filter out only the paths relevant for API nodes. ### Redocly Instructions @@ -28,7 +30,7 @@ When developing locally, use the `redocly-dev.yaml` file to generate pydantic mo Before your API node PR merges, make sure to add the `Released` tag to the `openapi.yaml` file and test in staging. ```bash -# Download the OpenAPI file from prod server. +# Download the OpenAPI file from staging server. curl -o openapi.yaml https://stagingapi.comfy.org/openapi # Filter out unneeded API definitions. @@ -39,3 +41,25 @@ redocly bundle openapi.yaml --output filtered-openapi.yaml --config comfy_api_no datamodel-codegen --use-subclass-enum --field-constraints --strict-types bytes --input filtered-openapi.yaml --output comfy_api_nodes/apis/__init__.py --output-model-type pydantic_v2.BaseModel ``` + + +# Merging to Master + +Before merging to comfyanonymous/ComfyUI master, follow these steps: + +1. Add the "Released" tag to the ComfyUI OpenAPI yaml file for each endpoint you are using in the nodes. +1. Make sure the ComfyUI API is deployed to prod with your changes. +1. Run the code generation again with `redocly.yaml` and the production OpenAPI yaml file. + +```bash +# Download the OpenAPI file from prod server. +curl -o openapi.yaml https://api.comfy.org/openapi + +# Filter out unneeded API definitions. +npm install -g @redocly/cli +redocly bundle openapi.yaml --output filtered-openapi.yaml --config comfy_api_nodes/redocly.yaml --remove-unused-components + +# Generate the pydantic datamodels for validation. +datamodel-codegen --use-subclass-enum --field-constraints --strict-types bytes --input filtered-openapi.yaml --output comfy_api_nodes/apis/__init__.py --output-model-type pydantic_v2.BaseModel + +``` diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index 87d8c3e1d..788e2803f 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -1,6 +1,7 @@ from __future__ import annotations import io import logging +import mimetypes from typing import Optional, Union from comfy.utils import common_upscale from comfy_api.input_impl import VideoFromFile @@ -214,6 +215,7 @@ def download_url_to_image_tensor(url: str, timeout: int = None) -> torch.Tensor: image_bytesio = download_url_to_bytesio(url, timeout) return bytesio_to_image_tensor(image_bytesio) + def process_image_response(response: requests.Response) -> torch.Tensor: """Uses content from a Response object and converts it to a torch.Tensor""" return bytesio_to_image_tensor(BytesIO(response.content)) @@ -318,11 +320,27 @@ def tensor_to_data_uri( return f"data:{mime_type};base64,{base64_string}" +def text_filepath_to_base64_string(filepath: str) -> str: + """Converts a text file to a base64 string.""" + with open(filepath, "rb") as f: + file_content = f.read() + return base64.b64encode(file_content).decode("utf-8") + + +def text_filepath_to_data_uri(filepath: str) -> str: + """Converts a text file to a data URI.""" + base64_string = text_filepath_to_base64_string(filepath) + mime_type, _ = mimetypes.guess_type(filepath) + if mime_type is None: + mime_type = "application/octet-stream" + return f"data:{mime_type};base64,{base64_string}" + + def upload_file_to_comfyapi( file_bytes_io: BytesIO, filename: str, upload_mime_type: str, - auth_kwargs: Optional[dict[str,str]] = None, + auth_kwargs: Optional[dict[str, str]] = None, ) -> str: """ Uploads a single file to ComfyUI API and returns its download URL. @@ -357,9 +375,33 @@ def upload_file_to_comfyapi( return response.download_url +def video_to_base64_string( + video: VideoInput, + container_format: VideoContainer = None, + codec: VideoCodec = None +) -> str: + """ + Converts a video input to a base64 string. + + Args: + video: The video input to convert + container_format: Optional container format to use (defaults to video.container if available) + codec: Optional codec to use (defaults to video.codec if available) + """ + video_bytes_io = io.BytesIO() + + # Use provided format/codec if specified, otherwise use video's own if available + format_to_use = container_format if container_format is not None else getattr(video, 'container', VideoContainer.MP4) + codec_to_use = codec if codec is not None else getattr(video, 'codec', VideoCodec.H264) + + video.save_to(video_bytes_io, format=format_to_use, codec=codec_to_use) + video_bytes_io.seek(0) + return base64.b64encode(video_bytes_io.getvalue()).decode("utf-8") + + def upload_video_to_comfyapi( video: VideoInput, - auth_kwargs: Optional[dict[str,str]] = None, + auth_kwargs: Optional[dict[str, str]] = None, container: VideoContainer = VideoContainer.MP4, codec: VideoCodec = VideoCodec.H264, max_duration: Optional[int] = None, @@ -461,7 +503,7 @@ def audio_ndarray_to_bytesio( def upload_audio_to_comfyapi( audio: AudioInput, - auth_kwargs: Optional[dict[str,str]] = None, + auth_kwargs: Optional[dict[str, str]] = None, container_format: str = "mp4", codec_name: str = "aac", mime_type: str = "audio/mp4", @@ -488,8 +530,25 @@ def upload_audio_to_comfyapi( return upload_file_to_comfyapi(audio_bytes_io, filename, mime_type, auth_kwargs) +def audio_to_base64_string( + audio: AudioInput, container_format: str = "mp4", codec_name: str = "aac" +) -> str: + """Converts an audio input to a base64 string.""" + sample_rate: int = audio["sample_rate"] + waveform: torch.Tensor = audio["waveform"] + audio_data_np = audio_tensor_to_contiguous_ndarray(waveform) + audio_bytes_io = audio_ndarray_to_bytesio( + audio_data_np, sample_rate, container_format, codec_name + ) + audio_bytes = audio_bytes_io.getvalue() + return base64.b64encode(audio_bytes).decode("utf-8") + + def upload_images_to_comfyapi( - image: torch.Tensor, max_images=8, auth_kwargs: Optional[dict[str,str]] = None, mime_type: Optional[str] = None + image: torch.Tensor, + max_images=8, + auth_kwargs: Optional[dict[str, str]] = None, + mime_type: Optional[str] = None, ) -> list[str]: """ Uploads images to ComfyUI API and returns download URLs. @@ -554,17 +613,24 @@ def upload_images_to_comfyapi( return download_urls -def resize_mask_to_image(mask: torch.Tensor, image: torch.Tensor, - upscale_method="nearest-exact", crop="disabled", - allow_gradient=True, add_channel_dim=False): +def resize_mask_to_image( + mask: torch.Tensor, + image: torch.Tensor, + upscale_method="nearest-exact", + crop="disabled", + allow_gradient=True, + add_channel_dim=False, +): """ Resize mask to be the same dimensions as an image, while maintaining proper format for API calls. """ _, H, W, _ = image.shape mask = mask.unsqueeze(-1) - mask = mask.movedim(-1,1) - mask = common_upscale(mask, width=W, height=H, upscale_method=upscale_method, crop=crop) - mask = mask.movedim(1,-1) + mask = mask.movedim(-1, 1) + mask = common_upscale( + mask, width=W, height=H, upscale_method=upscale_method, crop=crop + ) + mask = mask.movedim(1, -1) if not add_channel_dim: mask = mask.squeeze(-1) if not allow_gradient: @@ -572,12 +638,41 @@ def resize_mask_to_image(mask: torch.Tensor, image: torch.Tensor, return mask -def validate_string(string: str, strip_whitespace=True, field_name="prompt", min_length=None, max_length=None): +def validate_string( + string: str, + strip_whitespace=True, + field_name="prompt", + min_length=None, + max_length=None, +): + if string is None: + raise Exception(f"Field '{field_name}' cannot be empty.") if strip_whitespace: string = string.strip() if min_length and len(string) < min_length: - raise Exception(f"Field '{field_name}' cannot be shorter than {min_length} characters; was {len(string)} characters long.") + raise Exception( + f"Field '{field_name}' cannot be shorter than {min_length} characters; was {len(string)} characters long." + ) if max_length and len(string) > max_length: - raise Exception(f" Field '{field_name} cannot be longer than {max_length} characters; was {len(string)} characters long.") - if not string: - raise Exception(f"Field '{field_name}' cannot be empty.") + raise Exception( + f" Field '{field_name} cannot be longer than {max_length} characters; was {len(string)} characters long." + ) + + +def image_tensor_pair_to_batch( + image1: torch.Tensor, image2: torch.Tensor +) -> torch.Tensor: + """ + Converts a pair of image tensors to a batch tensor. + If the images are not the same size, the smaller image is resized to + match the larger image. + """ + if image1.shape[1:] != image2.shape[1:]: + image2 = common_upscale( + image2.movedim(-1, 1), + image1.shape[2], + image1.shape[1], + "bilinear", + "center", + ).movedim(1, -1) + return torch.cat((image1, image2), dim=0) diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index aa1c4ce0b..e38d38cc9 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -1,67 +1,197 @@ # generated by datamodel-codegen: # filename: filtered-openapi.yaml -# timestamp: 2025-05-04T04:12:39+00:00 +# timestamp: 2025-05-19T21:38:55+00:00 from __future__ import annotations -from datetime import datetime +from datetime import date, datetime from enum import Enum from typing import Any, Dict, List, Literal, Optional, Union from uuid import UUID -from pydantic import AnyUrl, BaseModel, Field, RootModel, StrictBytes +from pydantic import AnyUrl, BaseModel, ConfigDict, Field, RootModel, StrictBytes -class PersonalAccessToken(BaseModel): - id: Optional[UUID] = Field(None, description='Unique identifier for the GitCommit') - name: Optional[str] = Field( - None, - description='Required. The name of the token. Can be a simple description.', - ) - description: Optional[str] = Field( - None, - description="Optional. A more detailed description of the token's intended use.", +class APIKey(BaseModel): + created_at: Optional[datetime] = None + description: Optional[str] = None + id: Optional[str] = None + key_prefix: Optional[str] = None + name: Optional[str] = None + + +class APIKeyWithPlaintext(APIKey): + plaintext_key: Optional[str] = Field( + None, description='The full API key (only returned at creation)' ) + + +class AuditLog(BaseModel): createdAt: Optional[datetime] = Field( - None, description='[Output Only]The date and time the token was created.' + None, description='The date and time the event was created' ) - token: Optional[str] = Field( + event_id: Optional[str] = Field(None, description='the id of the event') + event_type: Optional[str] = Field(None, description='the type of the event') + params: Optional[Dict[str, Any]] = Field( + None, description='data related to the event' + ) + + +class OutputFormat(str, Enum): + jpeg = 'jpeg' + png = 'png' + + +class BFLFluxPro11GenerateRequest(BaseModel): + height: int = Field(..., description='Height of the generated image') + image_prompt: Optional[str] = Field(None, description='Optional image prompt') + output_format: Optional[OutputFormat] = Field( + None, description='Output image format' + ) + prompt: str = Field(..., description='The main text prompt for image generation') + prompt_upsampling: Optional[bool] = Field( + None, description='Whether to use prompt upsampling' + ) + safety_tolerance: Optional[int] = Field(None, description='Safety tolerance level') + seed: Optional[int] = Field(None, description='Random seed for reproducibility') + webhook_secret: Optional[str] = Field( + None, description='Optional webhook secret for async processing' + ) + webhook_url: Optional[str] = Field( + None, description='Optional webhook URL for async processing' + ) + width: int = Field(..., description='Width of the generated image') + + +class BFLFluxPro11GenerateResponse(BaseModel): + id: str = Field(..., description='Job ID for tracking') + polling_url: str = Field(..., description='URL to poll for results') + + +class BFLFluxProGenerateRequest(BaseModel): + guidance_scale: Optional[float] = Field( + None, description='The guidance scale for generation.', ge=1.0, le=20.0 + ) + height: int = Field( + ..., description='The height of the image to generate.', ge=64, le=2048 + ) + negative_prompt: Optional[str] = Field( + None, description='The negative prompt for image generation.' + ) + num_images: Optional[int] = Field( + None, description='The number of images to generate.', ge=1, le=4 + ) + num_inference_steps: Optional[int] = Field( + None, description='The number of inference steps.', ge=1, le=100 + ) + prompt: str = Field(..., description='The text prompt for image generation.') + seed: Optional[int] = Field(None, description='The seed value for reproducibility.') + width: int = Field( + ..., description='The width of the image to generate.', ge=64, le=2048 + ) + + +class BFLFluxProGenerateResponse(BaseModel): + id: str = Field(..., description='The unique identifier for the generation task.') + polling_url: str = Field(..., description='URL to poll for the generation result.') + + +class Status(str, Enum): + in_progress = 'in_progress' + completed = 'completed' + incomplete = 'incomplete' + + +class Type(str, Enum): + computer_call = 'computer_call' + + +class ComputerToolCall(BaseModel): + action: Dict[str, Any] + call_id: str = Field( + ..., + description='An identifier used when responding to the tool call with output.\n', + ) + id: str = Field(..., description='The unique ID of the computer call.') + status: Status = Field( + ..., + description='The status of the item. One of `in_progress`, `completed`, or\n`incomplete`. Populated when items are returned via API.\n', + ) + type: Type = Field( + ..., description='The type of the computer call. Always `computer_call`.' + ) + + +class Environment(str, Enum): + windows = 'windows' + mac = 'mac' + linux = 'linux' + ubuntu = 'ubuntu' + browser = 'browser' + + +class Type1(str, Enum): + computer_use_preview = 'computer_use_preview' + + +class ComputerUsePreviewTool(BaseModel): + display_height: int = Field(..., description='The height of the computer display.') + display_width: int = Field(..., description='The width of the computer display.') + environment: Environment = Field( + ..., description='The type of computer environment to control.' + ) + type: Literal['ComputerUsePreviewTool'] = Field( + ..., + description='The type of the computer use tool. Always `computer_use_preview`.', + ) + + +class CreateAPIKeyRequest(BaseModel): + description: Optional[str] = None + name: str + + +class Customer(BaseModel): + createdAt: Optional[datetime] = Field( + None, description='The date and time the user was created' + ) + email: Optional[str] = Field(None, description='The email address for this user') + id: str = Field(..., description='The firebase UID of the user') + is_admin: Optional[bool] = Field(None, description='Whether the user is an admin') + metronome_id: Optional[str] = Field(None, description='The Metronome customer ID') + name: Optional[str] = Field(None, description='The name for this user') + stripe_id: Optional[str] = Field(None, description='The Stripe customer ID') + updatedAt: Optional[datetime] = Field( + None, description='The date and time the user was last updated' + ) + + +class CustomerStorageResourceResponse(BaseModel): + download_url: Optional[str] = Field( None, - description='[Output Only]. The personal access token. Only returned during creation.', + description='The signed URL to use for downloading the file from the specified path', + ) + existing_file: Optional[bool] = Field( + None, description='Whether an existing file with the same hash was found' + ) + expires_at: Optional[datetime] = Field( + None, description='When the signed URL will expire' + ) + upload_url: Optional[str] = Field( + None, + description='The signed URL to use for uploading the file to the specified path', ) -class GitCommitSummary(BaseModel): - commit_hash: Optional[str] = Field(None, description='The hash of the commit') - commit_name: Optional[str] = Field(None, description='The name of the commit') - branch_name: Optional[str] = Field( - None, description='The branch where the commit was made' - ) - author: Optional[str] = Field(None, description='The author of the commit') - timestamp: Optional[datetime] = Field( - None, description='The timestamp when the commit was made' - ) - status_summary: Optional[Dict[str, str]] = Field( - None, description='A map of operating system to status pairs' - ) +class Role(str, Enum): + user = 'user' + assistant = 'assistant' + system = 'system' + developer = 'developer' -class User(BaseModel): - id: Optional[str] = Field(None, description='The unique id for this user.') - email: Optional[str] = Field(None, description='The email address for this user.') - name: Optional[str] = Field(None, description='The name for this user.') - isApproved: Optional[bool] = Field( - None, description='Indicates if the user is approved.' - ) - isAdmin: Optional[bool] = Field( - None, description='Indicates if the user has admin privileges.' - ) - - -class PublisherUser(BaseModel): - id: Optional[str] = Field(None, description='The unique id for this user.') - email: Optional[str] = Field(None, description='The email address for this user.') - name: Optional[str] = Field(None, description='The name for this user.') +class Type2(str, Enum): + message = 'message' class ErrorResponse(BaseModel): @@ -69,168 +199,247 @@ class ErrorResponse(BaseModel): message: str -class StorageFile(BaseModel): - id: Optional[UUID] = Field( - None, description='Unique identifier for the storage file' - ) - file_path: Optional[str] = Field(None, description='Path to the file in storage') - public_url: Optional[str] = Field(None, description='Public URL') +class Type3(str, Enum): + file_search = 'file_search' -class PublisherMember(BaseModel): - id: Optional[str] = Field( - None, description='The unique identifier for the publisher member.' - ) - user: Optional[PublisherUser] = Field( - None, description='The user associated with this publisher member.' - ) - role: Optional[str] = Field( - None, description='The role of the user in the publisher.' +class FileSearchTool(BaseModel): + type: Literal['FileSearchTool'] = Field(..., description='The type of tool') + vector_store_ids: List[str] = Field( + ..., description='IDs of vector stores to search in' ) -class ComfyNode(BaseModel): - comfy_node_name: Optional[str] = Field( - None, description='Unique identifier for the node' +class Result(BaseModel): + file_id: Optional[str] = Field(None, description='The unique ID of the file.\n') + filename: Optional[str] = Field(None, description='The name of the file.\n') + score: Optional[float] = Field( + None, description='The relevance score of the file - a value between 0 and 1.\n' ) - category: Optional[str] = Field( - None, - description='UI category where the node is listed, used for grouping nodes.', + text: Optional[str] = Field( + None, description='The text that was retrieved from the file.\n' ) + + +class Status1(str, Enum): + in_progress = 'in_progress' + searching = 'searching' + completed = 'completed' + incomplete = 'incomplete' + failed = 'failed' + + +class Type4(str, Enum): + file_search_call = 'file_search_call' + + +class FileSearchToolCall(BaseModel): + id: str = Field(..., description='The unique ID of the file search tool call.\n') + queries: List[str] = Field( + ..., description='The queries used to search for files.\n' + ) + results: Optional[List[Result]] = Field( + None, description='The results of the file search tool call.\n' + ) + status: Status1 = Field( + ..., + description='The status of the file search tool call. One of `in_progress`, \n`searching`, `incomplete` or `failed`,\n', + ) + type: Type4 = Field( + ..., + description='The type of the file search tool call. Always `file_search_call`.\n', + ) + + +class Type5(str, Enum): + function = 'function' + + +class FunctionTool(BaseModel): description: Optional[str] = Field( - None, description="Brief description of the node's functionality or purpose." + None, description='Description of what the function does' ) - input_types: Optional[str] = Field(None, description='Defines input parameters') - deprecated: Optional[bool] = Field( + name: str = Field(..., description='Name of the function') + parameters: Dict[str, Any] = Field( + ..., description='JSON Schema object describing the function parameters' + ) + type: Literal['FunctionTool'] = Field(..., description='The type of tool') + + +class Status2(str, Enum): + in_progress = 'in_progress' + completed = 'completed' + incomplete = 'incomplete' + + +class Type6(str, Enum): + function_call = 'function_call' + + +class FunctionToolCall(BaseModel): + arguments: str = Field( + ..., description='A JSON string of the arguments to pass to the function.\n' + ) + call_id: str = Field( + ..., + description='The unique ID of the function tool call generated by the model.\n', + ) + id: Optional[str] = Field( + None, description='The unique ID of the function tool call.\n' + ) + name: str = Field(..., description='The name of the function to run.\n') + status: Optional[Status2] = Field( None, - description='Indicates if the node is deprecated. Deprecated nodes are hidden in the UI.', + description='The status of the item. One of `in_progress`, `completed`, or\n`incomplete`. Populated when items are returned via API.\n', ) - experimental: Optional[bool] = Field( + type: Type6 = Field( + ..., description='The type of the function tool call. Always `function_call`.\n' + ) + + +class GeminiCitation(BaseModel): + authors: Optional[List[str]] = None + endIndex: Optional[int] = None + license: Optional[str] = None + publicationDate: Optional[date] = None + startIndex: Optional[int] = None + title: Optional[str] = None + uri: Optional[str] = None + + +class GeminiCitationMetadata(BaseModel): + citations: Optional[List[GeminiCitation]] = None + + +class Role1(str, Enum): + user = 'user' + model = 'model' + + +class GeminiFunctionDeclaration(BaseModel): + description: Optional[str] = None + name: str + parameters: Dict[str, Any] = Field( + ..., description='JSON schema for the function parameters' + ) + + +class GeminiGenerationConfig(BaseModel): + maxOutputTokens: Optional[int] = Field( None, - description='Indicates if the node is experimental, subject to changes or removal.', + description='Maximum number of tokens that can be generated in the response. A token is approximately 4 characters. 100 tokens correspond to roughly 60-80 words.\n', + examples=[2048], + ge=16, + le=8192, ) - output_is_list: Optional[List[bool]] = Field( - None, description='Boolean values indicating if each output is a list.' - ) - return_names: Optional[str] = Field( - None, description='Names of the outputs for clarity in workflows.' - ) - return_types: Optional[str] = Field( - None, description='Specifies the types of outputs produced by the node.' - ) - function: Optional[str] = Field( - None, description='Name of the entry-point function to execute the node.' - ) - - -class ComfyNodeCloudBuildInfo(BaseModel): - project_id: Optional[str] = None - project_number: Optional[str] = None - location: Optional[str] = None - build_id: Optional[str] = None - - -class Error(BaseModel): - message: Optional[str] = Field( - None, description='A clear and concise description of the error.' - ) - details: Optional[List[str]] = Field( + seed: Optional[int] = Field( None, - description='Optional detailed information about the error or hints for resolving it.', + description="When seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used. Available for the following models:, gemini-2.5-flash-preview-04-1, gemini-2.5-pro-preview-05-0, gemini-2.0-flash-lite-00, gemini-2.0-flash-001\n", + examples=[343940597], + ) + stopSequences: Optional[List[str]] = None + temperature: Optional[float] = Field( + 1, + description="The temperature is used for sampling during response generation, which occurs when topP and topK are applied. Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that require a less open-ended or creative response, while higher temperatures can lead to more diverse or creative results. A temperature of 0 means that the highest probability tokens are always selected. In this case, responses for a given prompt are mostly deterministic, but a small amount of variation is still possible. If the model returns a response that's too generic, too short, or the model gives a fallback response, try increasing the temperature\n", + ge=0.0, + le=2.0, + ) + topK: Optional[int] = Field( + 40, + description="Top-K changes how the model selects tokens for output. A top-K of 1 means the next selected token is the most probable among all tokens in the model's vocabulary. A top-K of 3 means that the next token is selected from among the 3 most probable tokens by using temperature.\n", + examples=[40], + ge=1, + ) + topP: Optional[float] = Field( + 0.95, + description='If specified, nucleus sampling is used.\nTop-P changes how the model selects tokens for output. Tokens are selected from the most (see top-K) to least probable until the sum of their probabilities equals the top-P value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-P value is 0.5, then the model will select either A or B as the next token by using temperature and excludes C as a candidate.\nSpecify a lower value for less random responses and a higher value for more random responses.\n', + ge=0.0, + le=1.0, ) -class NodeVersionUpdateRequest(BaseModel): - changelog: Optional[str] = Field( - None, description='The changelog describing the version changes.' +class GeminiMimeType(str, Enum): + application_pdf = 'application/pdf' + audio_mpeg = 'audio/mpeg' + audio_mp3 = 'audio/mp3' + audio_wav = 'audio/wav' + image_png = 'image/png' + image_jpeg = 'image/jpeg' + image_webp = 'image/webp' + text_plain = 'text/plain' + video_mov = 'video/mov' + video_mpeg = 'video/mpeg' + video_mp4 = 'video/mp4' + video_mpg = 'video/mpg' + video_avi = 'video/avi' + video_wmv = 'video/wmv' + video_mpegps = 'video/mpegps' + video_flv = 'video/flv' + + +class GeminiOffset(BaseModel): + nanos: Optional[int] = Field( + None, + description='Signed fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values.\n', + examples=[0], + ge=0, + le=999999999, ) - deprecated: Optional[bool] = Field( - None, description='Whether the version is deprecated.' + seconds: Optional[int] = Field( + None, + description='Signed seconds of the span of time. Must be from -315,576,000,000 to +315,576,000,000 inclusive.\n', + examples=[60], + ge=-315576000000, + le=315576000000, ) -class NodeStatus(str, Enum): - NodeStatusActive = 'NodeStatusActive' - NodeStatusDeleted = 'NodeStatusDeleted' - NodeStatusBanned = 'NodeStatusBanned' +class GeminiSafetyCategory(str, Enum): + HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT' + HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH' + HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT' + HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT' -class NodeVersionStatus(str, Enum): - NodeVersionStatusActive = 'NodeVersionStatusActive' - NodeVersionStatusDeleted = 'NodeVersionStatusDeleted' - NodeVersionStatusBanned = 'NodeVersionStatusBanned' - NodeVersionStatusPending = 'NodeVersionStatusPending' - NodeVersionStatusFlagged = 'NodeVersionStatusFlagged' +class Probability(str, Enum): + NEGLIGIBLE = 'NEGLIGIBLE' + LOW = 'LOW' + MEDIUM = 'MEDIUM' + HIGH = 'HIGH' + UNKNOWN = 'UNKNOWN' -class PublisherStatus(str, Enum): - PublisherStatusActive = 'PublisherStatusActive' - PublisherStatusBanned = 'PublisherStatusBanned' - - -class WorkflowRunStatus(str, Enum): - WorkflowRunStatusStarted = 'WorkflowRunStatusStarted' - WorkflowRunStatusFailed = 'WorkflowRunStatusFailed' - WorkflowRunStatusCompleted = 'WorkflowRunStatusCompleted' - - -class MachineStats(BaseModel): - machine_name: Optional[str] = Field(None, description='Name of the machine.') - os_version: Optional[str] = Field( - None, description='The operating system version. eg. Ubuntu Linux 20.04' - ) - gpu_type: Optional[str] = Field( - None, description='The GPU type. eg. NVIDIA Tesla K80' - ) - cpu_capacity: Optional[str] = Field(None, description='Total CPU on the machine.') - initial_cpu: Optional[str] = Field( - None, description='Initial CPU available before the job starts.' - ) - memory_capacity: Optional[str] = Field( - None, description='Total memory on the machine.' - ) - initial_ram: Optional[str] = Field( - None, description='Initial RAM available before the job starts.' - ) - vram_time_series: Optional[Dict[str, Any]] = Field( - None, description='Time series of VRAM usage.' - ) - disk_capacity: Optional[str] = Field( - None, description='Total disk capacity on the machine.' - ) - initial_disk: Optional[str] = Field( - None, description='Initial disk available before the job starts.' - ) - pip_freeze: Optional[str] = Field(None, description='The pip freeze output') - - -class Customer(BaseModel): - id: str = Field(..., description='The firebase UID of the user') - email: Optional[str] = Field(None, description='The email address for this user') - name: Optional[str] = Field(None, description='The name for this user') - createdAt: Optional[datetime] = Field( - None, description='The date and time the user was created' - ) - updatedAt: Optional[datetime] = Field( - None, description='The date and time the user was last updated' +class GeminiSafetyRating(BaseModel): + category: Optional[GeminiSafetyCategory] = None + probability: Optional[Probability] = Field( + None, + description='The probability that the content violates the specified safety category', ) -class MagicPrompt(str, Enum): - ON = 'ON' +class GeminiSafetyThreshold(str, Enum): OFF = 'OFF' + BLOCK_NONE = 'BLOCK_NONE' + BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE' + BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE' + BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH' -class ColorPalette(BaseModel): - name: str = Field(..., description='Name of the color palette', examples=['PASTEL']) +class GeminiTextPart(BaseModel): + text: Optional[str] = Field( + None, + description='A text prompt or code snippet.', + examples=['Answer as concisely as possible'], + ) -class StyleCode(RootModel[str]): - root: str = Field(..., pattern='^[0-9A-Fa-f]{8}$') +class GeminiTool(BaseModel): + functionDeclarations: Optional[List[GeminiFunctionDeclaration]] = None -class StyleType(str, Enum): - GENERAL = 'GENERAL' +class GeminiVideoMetadata(BaseModel): + endOffset: Optional[GeminiOffset] = None + startOffset: Optional[GeminiOffset] = None class IdeogramColorPalette1(BaseModel): @@ -262,17 +471,34 @@ class IdeogramColorPalette( class ImageRequest(BaseModel): - prompt: str = Field( - ..., description='Required. The prompt to use to generate the image.' - ) aspect_ratio: Optional[str] = Field( None, description="Optional. The aspect ratio (e.g., 'ASPECT_16_9', 'ASPECT_1_1'). Cannot be used with resolution. Defaults to 'ASPECT_1_1' if unspecified.", ) - model: str = Field(..., description="The model used (e.g., 'V_2', 'V_2A_TURBO')") + color_palette: Optional[Dict[str, Any]] = Field( + None, description='Optional. Color palette object. Only for V_2, V_2_TURBO.' + ) magic_prompt_option: Optional[str] = Field( None, description="Optional. MagicPrompt usage ('AUTO', 'ON', 'OFF')." ) + model: str = Field(..., description="The model used (e.g., 'V_2', 'V_2A_TURBO')") + negative_prompt: Optional[str] = Field( + None, + description='Optional. Description of what to exclude. Only for V_1, V_1_TURBO, V_2, V_2_TURBO.', + ) + num_images: Optional[int] = Field( + 1, + description='Optional. Number of images to generate (1-8). Defaults to 1.', + ge=1, + le=8, + ) + prompt: str = Field( + ..., description='Required. The prompt to use to generate the image.' + ) + resolution: Optional[str] = Field( + None, + description="Optional. Resolution (e.g., 'RESOLUTION_1024_1024'). Only for model V_2. Cannot be used with aspect_ratio.", + ) seed: Optional[int] = Field( None, description='Optional. A number between 0 and 2147483647.', @@ -283,23 +509,6 @@ class ImageRequest(BaseModel): None, description="Optional. Style type ('AUTO', 'GENERAL', 'REALISTIC', 'DESIGN', 'RENDER_3D', 'ANIME'). Only for models V_2 and above.", ) - negative_prompt: Optional[str] = Field( - None, - description='Optional. Description of what to exclude. Only for V_1, V_1_TURBO, V_2, V_2_TURBO.', - ) - num_images: Optional[int] = Field( - 1, - description='Optional. Number of images to generate (1-8). Defaults to 1.', - ge=1, - le=8, - ) - resolution: Optional[str] = Field( - None, - description="Optional. Resolution (e.g., 'RESOLUTION_1024_1024'). Only for model V_2. Cannot be used with aspect_ratio.", - ) - color_palette: Optional[Dict[str, Any]] = Field( - None, description='Optional. Color palette object. Only for V_2, V_2_TURBO.' - ) class IdeogramGenerateRequest(BaseModel): @@ -309,23 +518,23 @@ class IdeogramGenerateRequest(BaseModel): class Datum(BaseModel): + is_image_safe: Optional[bool] = Field( + None, description='Indicates whether the image is considered safe.' + ) prompt: Optional[str] = Field( None, description='The prompt used to generate this image.' ) resolution: Optional[str] = Field( None, description="The resolution of the generated image (e.g., '1024x1024')." ) - is_image_safe: Optional[bool] = Field( - None, description='Indicates whether the image is considered safe.' - ) seed: Optional[int] = Field( None, description='The seed value used for this generation.' ) - url: Optional[str] = Field(None, description='URL to the generated image.') style_type: Optional[str] = Field( None, description="The style type used for generation (e.g., 'REALISTIC', 'ANIME').", ) + url: Optional[str] = Field(None, description='URL to the generated image.') class IdeogramGenerateResponse(BaseModel): @@ -337,49 +546,17 @@ class IdeogramGenerateResponse(BaseModel): ) -class RenderingSpeed1(str, Enum): - TURBO = 'TURBO' - DEFAULT = 'DEFAULT' - QUALITY = 'QUALITY' - - -class MagicPrompt1(str, Enum): - AUTO = 'AUTO' - ON = 'ON' - OFF = 'OFF' - - -class StyleType1(str, Enum): - AUTO = 'AUTO' - GENERAL = 'GENERAL' - REALISTIC = 'REALISTIC' - DESIGN = 'DESIGN' - - -class IdeogramV3RemixRequest(BaseModel): - image: Optional[StrictBytes] = None - prompt: str - image_weight: Optional[int] = Field(50, ge=1, le=100) - seed: Optional[int] = Field(None, ge=0, le=2147483647) - resolution: Optional[str] = None - aspect_ratio: Optional[str] = None - rendering_speed: Optional[RenderingSpeed1] = None - magic_prompt: Optional[MagicPrompt1] = None - negative_prompt: Optional[str] = None - num_images: Optional[int] = Field(None, ge=1, le=8) - color_palette: Optional[Dict[str, Any]] = None - style_codes: Optional[List[str]] = None - style_type: Optional[StyleType1] = None - style_reference_images: Optional[List[StrictBytes]] = None +class StyleCode(RootModel[str]): + root: str = Field(..., pattern='^[0-9A-Fa-f]{8}$') class Datum1(BaseModel): + is_image_safe: Optional[bool] = None prompt: Optional[str] = None resolution: Optional[str] = None - is_image_safe: Optional[bool] = None seed: Optional[int] = None - url: Optional[str] = None style_type: Optional[str] = None + url: Optional[str] = None class IdeogramV3IdeogramResponse(BaseModel): @@ -387,74 +564,201 @@ class IdeogramV3IdeogramResponse(BaseModel): data: Optional[List[Datum1]] = None +class RenderingSpeed1(str, Enum): + TURBO = 'TURBO' + DEFAULT = 'DEFAULT' + QUALITY = 'QUALITY' + + class IdeogramV3ReframeRequest(BaseModel): - image: Optional[StrictBytes] = None - resolution: str - num_images: Optional[int] = Field(None, ge=1, le=8) - seed: Optional[int] = Field(None, ge=0, le=2147483647) - rendering_speed: Optional[RenderingSpeed1] = None color_palette: Optional[Dict[str, Any]] = None + image: Optional[StrictBytes] = None + num_images: Optional[int] = Field(None, ge=1, le=8) + rendering_speed: Optional[RenderingSpeed1] = None + resolution: str + seed: Optional[int] = Field(None, ge=0, le=2147483647) style_codes: Optional[List[str]] = None style_reference_images: Optional[List[StrictBytes]] = None +class MagicPrompt(str, Enum): + AUTO = 'AUTO' + ON = 'ON' + OFF = 'OFF' + + +class StyleType(str, Enum): + AUTO = 'AUTO' + GENERAL = 'GENERAL' + REALISTIC = 'REALISTIC' + DESIGN = 'DESIGN' + + +class IdeogramV3RemixRequest(BaseModel): + aspect_ratio: Optional[str] = None + color_palette: Optional[Dict[str, Any]] = None + image: Optional[StrictBytes] = None + image_weight: Optional[int] = Field(50, ge=1, le=100) + magic_prompt: Optional[MagicPrompt] = None + negative_prompt: Optional[str] = None + num_images: Optional[int] = Field(None, ge=1, le=8) + prompt: str + rendering_speed: Optional[RenderingSpeed1] = None + resolution: Optional[str] = None + seed: Optional[int] = Field(None, ge=0, le=2147483647) + style_codes: Optional[List[str]] = None + style_reference_images: Optional[List[StrictBytes]] = None + style_type: Optional[StyleType] = None + + class IdeogramV3ReplaceBackgroundRequest(BaseModel): - image: Optional[StrictBytes] = None - prompt: str - magic_prompt: Optional[MagicPrompt1] = None - num_images: Optional[int] = Field(None, ge=1, le=8) - seed: Optional[int] = Field(None, ge=0, le=2147483647) - rendering_speed: Optional[RenderingSpeed1] = None color_palette: Optional[Dict[str, Any]] = None + image: Optional[StrictBytes] = None + magic_prompt: Optional[MagicPrompt] = None + num_images: Optional[int] = Field(None, ge=1, le=8) + prompt: str + rendering_speed: Optional[RenderingSpeed1] = None + seed: Optional[int] = Field(None, ge=0, le=2147483647) style_codes: Optional[List[str]] = None style_reference_images: Optional[List[StrictBytes]] = None -class KlingTaskStatus(str, Enum): - submitted = 'submitted' - processing = 'processing' - succeed = 'succeed' - failed = 'failed' +class ColorPalette(BaseModel): + name: str = Field(..., description='Name of the color palette', examples=['PASTEL']) -class KlingVideoGenModelName(str, Enum): - kling_v1 = 'kling-v1' - kling_v1_5 = 'kling-v1-5' - kling_v1_6 = 'kling-v1-6' - kling_v2_master = 'kling-v2-master' +class MagicPrompt2(str, Enum): + ON = 'ON' + OFF = 'OFF' -class KlingVideoGenMode(str, Enum): - std = 'std' - pro = 'pro' +class StyleType1(str, Enum): + GENERAL = 'GENERAL' -class KlingVideoGenAspectRatio(str, Enum): - field_16_9 = '16:9' - field_9_16 = '9:16' +class ImagenImageGenerationInstance(BaseModel): + prompt: str = Field(..., description='Text prompt for image generation') + + +class AspectRatio(str, Enum): field_1_1 = '1:1' + field_9_16 = '9:16' + field_16_9 = '16:9' + field_3_4 = '3:4' + field_4_3 = '4:3' -class KlingVideoGenDuration(str, Enum): - field_5 = '5' - field_10 = '10' +class PersonGeneration(str, Enum): + dont_allow = 'dont_allow' + allow_adult = 'allow_adult' + allow_all = 'allow_all' -class KlingVideoGenCfgScale(RootModel[float]): - root: float = Field( - ..., - description="Flexibility in video generation. The higher the value, the lower the model's degree of flexibility, and the stronger the relevance to the user's prompt.", - ge=0.0, - le=1.0, +class SafetySetting(str, Enum): + block_most = 'block_most' + block_some = 'block_some' + block_few = 'block_few' + block_fewest = 'block_fewest' + + +class ImagenImagePrediction(BaseModel): + bytesBase64Encoded: Optional[str] = Field( + None, description='Base64-encoded image content' + ) + mimeType: Optional[str] = Field( + None, description='MIME type of the generated image' + ) + prompt: Optional[str] = Field( + None, description='Enhanced or rewritten prompt used to generate this image' ) -class KlingCameraControlType(str, Enum): - simple = 'simple' - down_back = 'down_back' - forward_up = 'forward_up' - right_turn_forward = 'right_turn_forward' - left_turn_forward = 'left_turn_forward' +class MimeType(str, Enum): + image_png = 'image/png' + image_jpeg = 'image/jpeg' + + +class ImagenOutputOptions(BaseModel): + compressionQuality: Optional[int] = Field(None, ge=0, le=100) + mimeType: Optional[MimeType] = None + + +class Includable(str, Enum): + file_search_call_results = 'file_search_call.results' + message_input_image_image_url = 'message.input_image.image_url' + computer_call_output_output_image_url = 'computer_call_output.output.image_url' + + +class Type7(str, Enum): + input_file = 'input_file' + + +class InputFileContent(BaseModel): + file_data: Optional[str] = Field( + None, description='The content of the file to be sent to the model.\n' + ) + file_id: Optional[str] = Field( + None, description='The ID of the file to be sent to the model.' + ) + filename: Optional[str] = Field( + None, description='The name of the file to be sent to the model.' + ) + type: Type7 = Field( + ..., description='The type of the input item. Always `input_file`.' + ) + + +class Detail(str, Enum): + low = 'low' + high = 'high' + auto = 'auto' + + +class Type8(str, Enum): + input_image = 'input_image' + + +class InputImageContent(BaseModel): + detail: Detail = Field( + ..., + description='The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`.', + ) + file_id: Optional[str] = Field( + None, description='The ID of the file to be sent to the model.' + ) + image_url: Optional[str] = Field( + None, + description='The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image in a data URL.', + ) + type: Type8 = Field( + ..., description='The type of the input item. Always `input_image`.' + ) + + +class Role3(str, Enum): + user = 'user' + system = 'system' + developer = 'developer' + + +class Type9(str, Enum): + message = 'message' + + +class Type10(str, Enum): + input_text = 'input_text' + + +class InputTextContent(BaseModel): + text: str = Field(..., description='The text input to the model.') + type: Type10 = Field( + ..., description='The type of the input item. Always `input_text`.' + ) + + +class KlingAudioUploadType(str, Enum): + file = 'file' + url = 'url' class KlingCameraConfig(BaseModel): @@ -464,27 +768,27 @@ class KlingCameraConfig(BaseModel): ge=-10.0, le=10.0, ) - vertical: Optional[float] = Field( - None, - description="Controls camera's movement along vertical axis (y-axis). Negative indicates downward, positive indicates upward.", - ge=-10.0, - le=10.0, - ) pan: Optional[float] = Field( None, description="Controls camera's rotation in vertical plane (x-axis). Negative indicates downward rotation, positive indicates upward rotation.", ge=-10.0, le=10.0, ) + roll: Optional[float] = Field( + None, + description="Controls camera's rolling amount (z-axis). Negative indicates counterclockwise, positive indicates clockwise.", + ge=-10.0, + le=10.0, + ) tilt: Optional[float] = Field( None, description="Controls camera's rotation in horizontal plane (y-axis). Negative indicates left rotation, positive indicates right rotation.", ge=-10.0, le=10.0, ) - roll: Optional[float] = Field( + vertical: Optional[float] = Field( None, - description="Controls camera's rolling amount (z-axis). Negative indicates counterclockwise, positive indicates clockwise.", + description="Controls camera's movement along vertical axis (y-axis). Negative indicates downward, positive indicates upward.", ge=-10.0, le=10.0, ) @@ -496,39 +800,12 @@ class KlingCameraConfig(BaseModel): ) -class KlingVideoResult(BaseModel): - id: Optional[str] = Field(None, description='Generated video ID') - url: Optional[AnyUrl] = Field(None, description='URL for generated video') - duration: Optional[str] = Field(None, description='Total video duration') - - -class KlingAudioUploadType(str, Enum): - file = 'file' - url = 'url' - - -class KlingLipSyncMode(str, Enum): - text2video = 'text2video' - audio2video = 'audio2video' - - -class KlingLipSyncVoiceLanguage(str, Enum): - zh = 'zh' - en = 'en' - - -class KlingDualCharacterEffectsScene(str, Enum): - hug = 'hug' - kiss = 'kiss' - heart_gesture = 'heart_gesture' - - -class KlingSingleImageEffectsScene(str, Enum): - bloombloom = 'bloombloom' - dizzydizzy = 'dizzydizzy' - fuzzyfuzzy = 'fuzzyfuzzy' - squish = 'squish' - expansion = 'expansion' +class KlingCameraControlType(str, Enum): + simple = 'simple' + down_back = 'down_back' + forward_up = 'forward_up' + right_turn_forward = 'right_turn_forward' + left_turn_forward = 'left_turn_forward' class KlingCharacterEffectModelName(str, Enum): @@ -537,18 +814,50 @@ class KlingCharacterEffectModelName(str, Enum): kling_v1_6 = 'kling-v1-6' -class KlingSingleImageEffectModelName(str, Enum): - kling_v1_6 = 'kling-v1-6' - - -class KlingSingleImageEffectDuration(str, Enum): - field_5 = '5' +class KlingDualCharacterEffectsScene(str, Enum): + hug = 'hug' + kiss = 'kiss' + heart_gesture = 'heart_gesture' class KlingDualCharacterImages(RootModel[List[str]]): root: List[str] = Field(..., max_length=2, min_length=2) +class KlingErrorResponse(BaseModel): + code: int = Field( + ..., + description='- 1000: Authentication failed\n- 1001: Authorization is empty\n- 1002: Authorization is invalid\n- 1003: Authorization is not yet valid\n- 1004: Authorization has expired\n- 1100: Account exception\n- 1101: Account in arrears (postpaid scenario)\n- 1102: Resource pack depleted or expired (prepaid scenario)\n- 1103: Unauthorized access to requested resource\n- 1200: Invalid request parameters\n- 1201: Invalid parameters\n- 1202: Invalid request method\n- 1203: Requested resource does not exist\n- 1300: Trigger platform strategy\n- 1301: Trigger content security policy\n- 1302: API request too frequent\n- 1303: Concurrency/QPS exceeds limit\n- 1304: Trigger IP whitelist policy\n- 5000: Internal server error\n- 5001: Service temporarily unavailable\n- 5002: Server internal timeout\n', + ) + message: str = Field(..., description='Human-readable error message') + request_id: str = Field( + ..., description='Request ID for tracking and troubleshooting' + ) + + +class Trajectory(BaseModel): + x: Optional[int] = Field( + None, + description='The horizontal coordinate of trajectory point. Based on bottom-left corner of image as origin (0,0).', + ) + y: Optional[int] = Field( + None, + description='The vertical coordinate of trajectory point. Based on bottom-left corner of image as origin (0,0).', + ) + + +class DynamicMask(BaseModel): + mask: Optional[AnyUrl] = Field( + None, + description='Dynamic Brush Application Area (Mask image created by users using the motion brush). The aspect ratio must match the input image.', + ) + trajectories: Optional[List[Trajectory]] = None + + +class TaskInfo(BaseModel): + external_task_id: Optional[str] = None + + class KlingImageGenAspectRatio(str, Enum): field_16_9 = '16:9' field_9_16 = '9:16' @@ -571,278 +880,42 @@ class KlingImageGenModelName(str, Enum): kling_v2 = 'kling-v2' +class KlingImageGenerationsRequest(BaseModel): + aspect_ratio: Optional[KlingImageGenAspectRatio] = '16:9' + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + human_fidelity: Optional[float] = Field( + 0.45, description='Subject reference similarity', ge=0.0, le=1.0 + ) + image: Optional[str] = Field( + None, description='Reference Image - Base64 encoded string or image URL' + ) + image_fidelity: Optional[float] = Field( + 0.5, description='Reference intensity for user-uploaded images', ge=0.0, le=1.0 + ) + image_reference: Optional[KlingImageGenImageReferenceType] = None + model_name: Optional[KlingImageGenModelName] = 'kling-v1' + n: Optional[int] = Field(1, description='Number of generated images', ge=1, le=9) + negative_prompt: Optional[str] = Field( + None, description='Negative text prompt', max_length=200 + ) + prompt: str = Field(..., description='Positive text prompt', max_length=500) + + class KlingImageResult(BaseModel): index: Optional[int] = Field(None, description='Image Number (0-9)') url: Optional[AnyUrl] = Field(None, description='URL for generated image') -class KlingVirtualTryOnModelName(str, Enum): - kolors_virtual_try_on_v1 = 'kolors-virtual-try-on-v1' - kolors_virtual_try_on_v1_5 = 'kolors-virtual-try-on-v1-5' +class KlingLipSyncMode(str, Enum): + text2video = 'text2video' + audio2video = 'audio2video' -class TaskInfo(BaseModel): - external_task_id: Optional[str] = None - - -class TaskResult(BaseModel): - videos: Optional[List[KlingVideoResult]] = None - - -class Data(BaseModel): - task_id: Optional[str] = Field(None, description='Task ID') - task_status: Optional[KlingTaskStatus] = None - task_info: Optional[TaskInfo] = None - created_at: Optional[int] = Field(None, description='Task creation time') - updated_at: Optional[int] = Field(None, description='Task update time') - task_result: Optional[TaskResult] = None - - -class KlingText2VideoResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - data: Optional[Data] = None - - -class Trajectory(BaseModel): - x: Optional[int] = Field( - None, - description='The horizontal coordinate of trajectory point. Based on bottom-left corner of image as origin (0,0).', - ) - y: Optional[int] = Field( - None, - description='The vertical coordinate of trajectory point. Based on bottom-left corner of image as origin (0,0).', - ) - - -class DynamicMask(BaseModel): - mask: Optional[AnyUrl] = Field( - None, - description='Dynamic Brush Application Area (Mask image created by users using the motion brush). The aspect ratio must match the input image.', - ) - trajectories: Optional[List[Trajectory]] = None - - -class Data1(BaseModel): - task_id: Optional[str] = Field(None, description='Task ID') - task_status: Optional[KlingTaskStatus] = None - task_info: Optional[TaskInfo] = None - created_at: Optional[int] = Field(None, description='Task creation time') - updated_at: Optional[int] = Field(None, description='Task update time') - task_result: Optional[TaskResult] = None - - -class KlingImage2VideoResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - data: Optional[Data1] = None - - -class KlingVideoExtendRequest(BaseModel): - video_id: Optional[str] = Field( - None, - description='The ID of the video to be extended. Supports videos generated by text-to-video, image-to-video, and previous video extension operations. Cannot exceed 3 minutes total duration after extension.', - ) - prompt: Optional[str] = Field( - None, - description='Positive text prompt for guiding the video extension', - max_length=2500, - ) - negative_prompt: Optional[str] = Field( - None, - description='Negative text prompt for elements to avoid in the extended video', - max_length=2500, - ) - cfg_scale: Optional[KlingVideoGenCfgScale] = Field( - default_factory=lambda: KlingVideoGenCfgScale.model_validate(0.5) - ) - callback_url: Optional[AnyUrl] = Field( - None, - description='The callback notification address. Server will notify when the task status changes.', - ) - - -class Data2(BaseModel): - task_id: Optional[str] = Field(None, description='Task ID') - task_status: Optional[KlingTaskStatus] = None - task_info: Optional[TaskInfo] = None - created_at: Optional[int] = Field(None, description='Task creation time') - updated_at: Optional[int] = Field(None, description='Task update time') - task_result: Optional[TaskResult] = None - - -class KlingVideoExtendResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - data: Optional[Data2] = None - - -class KlingLipSyncInputObject(BaseModel): - video_id: Optional[str] = Field( - None, - description='The ID of the video generated by Kling AI. Only supports 5-second and 10-second videos generated within the last 30 days.', - ) - video_url: Optional[str] = Field( - None, - description='Get link for uploaded video. Video files support .mp4/.mov, file size does not exceed 100MB, video length between 2-10s.', - ) - mode: KlingLipSyncMode - text: Optional[str] = Field( - None, - description='Text Content for Lip-Sync Video Generation. Required when mode is text2video. Maximum length is 120 characters.', - ) - voice_id: Optional[str] = Field( - None, - description='Voice ID. Required when mode is text2video. The system offers a variety of voice options to choose from.', - ) - voice_language: Optional[KlingLipSyncVoiceLanguage] = 'en' - voice_speed: Optional[float] = Field( - 1, - description='Speech Rate. Valid range: 0.8~2.0, accurate to one decimal place.', - ge=0.8, - le=2.0, - ) - audio_type: Optional[KlingAudioUploadType] = None - audio_file: Optional[str] = Field( - None, - description='Local Path of Audio File. Supported formats: .mp3/.wav/.m4a/.aac, maximum file size of 5MB. Base64 code.', - ) - audio_url: Optional[str] = Field( - None, - description='Audio File Download URL. Supported formats: .mp3/.wav/.m4a/.aac, maximum file size of 5MB.', - ) - - -class KlingLipSyncRequest(BaseModel): - input: KlingLipSyncInputObject - callback_url: Optional[AnyUrl] = Field( - None, - description='The callback notification address. Server will notify when the task status changes.', - ) - - -class Data3(BaseModel): - task_id: Optional[str] = Field(None, description='Task ID') - task_status: Optional[KlingTaskStatus] = None - task_info: Optional[TaskInfo] = None - created_at: Optional[int] = Field(None, description='Task creation time') - updated_at: Optional[int] = Field(None, description='Task update time') - task_result: Optional[TaskResult] = None - - -class KlingLipSyncResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - data: Optional[Data3] = None - - -class KlingSingleImageEffectInput(BaseModel): - model_name: KlingSingleImageEffectModelName - image: str = Field( - ..., - description='Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1.', - ) - duration: KlingSingleImageEffectDuration - - -class KlingDualCharacterEffectInput(BaseModel): - model_name: Optional[KlingCharacterEffectModelName] = 'kling-v1' - mode: Optional[KlingVideoGenMode] = 'std' - images: KlingDualCharacterImages - duration: KlingVideoGenDuration - - -class Data4(BaseModel): - task_id: Optional[str] = Field(None, description='Task ID') - task_status: Optional[KlingTaskStatus] = None - task_info: Optional[TaskInfo] = None - created_at: Optional[int] = Field(None, description='Task creation time') - updated_at: Optional[int] = Field(None, description='Task update time') - task_result: Optional[TaskResult] = None - - -class KlingVideoEffectsResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - data: Optional[Data4] = None - - -class KlingImageGenerationsRequest(BaseModel): - model_name: Optional[KlingImageGenModelName] = 'kling-v1' - prompt: str = Field(..., description='Positive text prompt', max_length=500) - negative_prompt: Optional[str] = Field( - None, description='Negative text prompt', max_length=200 - ) - image: Optional[str] = Field( - None, description='Reference Image - Base64 encoded string or image URL' - ) - image_reference: Optional[KlingImageGenImageReferenceType] = None - image_fidelity: Optional[float] = Field( - 0.5, description='Reference intensity for user-uploaded images', ge=0.0, le=1.0 - ) - human_fidelity: Optional[float] = Field( - 0.45, description='Subject reference similarity', ge=0.0, le=1.0 - ) - n: Optional[int] = Field(1, description='Number of generated images', ge=1, le=9) - aspect_ratio: Optional[KlingImageGenAspectRatio] = '16:9' - callback_url: Optional[AnyUrl] = Field( - None, description='The callback notification address' - ) - - -class TaskResult5(BaseModel): - images: Optional[List[KlingImageResult]] = None - - -class Data5(BaseModel): - task_id: Optional[str] = Field(None, description='Task ID') - task_status: Optional[KlingTaskStatus] = None - task_status_msg: Optional[str] = Field(None, description='Task status information') - created_at: Optional[int] = Field(None, description='Task creation time') - updated_at: Optional[int] = Field(None, description='Task update time') - task_result: Optional[TaskResult5] = None - - -class KlingImageGenerationsResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - data: Optional[Data5] = None - - -class KlingVirtualTryOnRequest(BaseModel): - model_name: Optional[KlingVirtualTryOnModelName] = 'kolors-virtual-try-on-v1' - human_image: str = Field( - ..., description='Reference human image - Base64 encoded string or image URL' - ) - cloth_image: Optional[str] = Field( - None, - description='Reference clothing image - Base64 encoded string or image URL', - ) - callback_url: Optional[AnyUrl] = Field( - None, description='The callback notification address' - ) - - -class Data6(BaseModel): - task_id: Optional[str] = Field(None, description='Task ID') - task_status: Optional[KlingTaskStatus] = None - task_status_msg: Optional[str] = Field(None, description='Task status information') - created_at: Optional[int] = Field(None, description='Task creation time') - updated_at: Optional[int] = Field(None, description='Task update time') - task_result: Optional[TaskResult5] = None - - -class KlingVirtualTryOnResponse(BaseModel): - code: Optional[int] = Field(None, description='Error code') - message: Optional[str] = Field(None, description='Error message') - request_id: Optional[str] = Field(None, description='Request ID') - data: Optional[Data6] = None +class KlingLipSyncVoiceLanguage(str, Enum): + zh = 'zh' + en = 'en' class ResourcePackType(str, Enum): @@ -850,7 +923,7 @@ class ResourcePackType(str, Enum): constant_period = 'constant_period' -class Status(str, Enum): +class Status4(str, Enum): toBeOnline = 'toBeOnline' online = 'online' expired = 'expired' @@ -858,29 +931,29 @@ class Status(str, Enum): class ResourcePackSubscribeInfo(BaseModel): - resource_pack_name: Optional[str] = Field(None, description='Resource package name') - resource_pack_id: Optional[str] = Field(None, description='Resource package ID') - resource_pack_type: Optional[ResourcePackType] = Field( - None, - description='Resource package type (decreasing_total=decreasing total, constant_period=constant periodicity)', - ) - total_quantity: Optional[float] = Field(None, description='Total quantity') - remaining_quantity: Optional[float] = Field( - None, description='Remaining quantity (updated with a 12-hour delay)' - ) - purchase_time: Optional[int] = Field( - None, description='Purchase time, Unix timestamp in ms' - ) effective_time: Optional[int] = Field( None, description='Effective time, Unix timestamp in ms' ) invalid_time: Optional[int] = Field( None, description='Expiration time, Unix timestamp in ms' ) - status: Optional[Status] = Field(None, description='Resource Package Status') + purchase_time: Optional[int] = Field( + None, description='Purchase time, Unix timestamp in ms' + ) + remaining_quantity: Optional[float] = Field( + None, description='Remaining quantity (updated with a 12-hour delay)' + ) + resource_pack_id: Optional[str] = Field(None, description='Resource package ID') + resource_pack_name: Optional[str] = Field(None, description='Resource package name') + resource_pack_type: Optional[ResourcePackType] = Field( + None, + description='Resource package type (decreasing_total=decreasing total, constant_period=constant periodicity)', + ) + status: Optional[Status4] = Field(None, description='Resource Package Status') + total_quantity: Optional[float] = Field(None, description='Total quantity') -class Data7(BaseModel): +class Data3(BaseModel): code: Optional[int] = Field(None, description='Error code; 0 indicates success') msg: Optional[str] = Field(None, description='Error information') resource_pack_subscribe_infos: Optional[List[ResourcePackSubscribeInfo]] = Field( @@ -890,137 +963,313 @@ class Data7(BaseModel): class KlingResourcePackageResponse(BaseModel): code: Optional[int] = Field(None, description='Error code; 0 indicates success') + data: Optional[Data3] = None message: Optional[str] = Field(None, description='Error information') request_id: Optional[str] = Field( None, description='Request ID, generated by the system, used to track requests and troubleshoot problems', ) + + +class KlingSingleImageEffectDuration(str, Enum): + field_5 = '5' + + +class KlingSingleImageEffectModelName(str, Enum): + kling_v1_6 = 'kling-v1-6' + + +class KlingSingleImageEffectsScene(str, Enum): + bloombloom = 'bloombloom' + dizzydizzy = 'dizzydizzy' + fuzzyfuzzy = 'fuzzyfuzzy' + squish = 'squish' + expansion = 'expansion' + + +class KlingTaskStatus(str, Enum): + submitted = 'submitted' + processing = 'processing' + succeed = 'succeed' + failed = 'failed' + + +class KlingTextToVideoModelName(str, Enum): + kling_v1 = 'kling-v1' + kling_v1_6 = 'kling-v1-6' + + +class KlingVideoGenAspectRatio(str, Enum): + field_16_9 = '16:9' + field_9_16 = '9:16' + field_1_1 = '1:1' + + +class KlingVideoGenCfgScale(RootModel[float]): + root: float = Field( + ..., + description="Flexibility in video generation. The higher the value, the lower the model's degree of flexibility, and the stronger the relevance to the user's prompt.", + ge=0.0, + le=1.0, + ) + + +class KlingVideoGenDuration(str, Enum): + field_5 = '5' + field_10 = '10' + + +class KlingVideoGenMode(str, Enum): + std = 'std' + pro = 'pro' + + +class KlingVideoGenModelName(str, Enum): + kling_v1 = 'kling-v1' + kling_v1_5 = 'kling-v1-5' + kling_v1_6 = 'kling-v1-6' + kling_v2_master = 'kling-v2-master' + + +class KlingVideoResult(BaseModel): + duration: Optional[str] = Field(None, description='Total video duration') + id: Optional[str] = Field(None, description='Generated video ID') + url: Optional[AnyUrl] = Field(None, description='URL for generated video') + + +class KlingVirtualTryOnModelName(str, Enum): + kolors_virtual_try_on_v1 = 'kolors-virtual-try-on-v1' + kolors_virtual_try_on_v1_5 = 'kolors-virtual-try-on-v1-5' + + +class KlingVirtualTryOnRequest(BaseModel): + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + cloth_image: Optional[str] = Field( + None, + description='Reference clothing image - Base64 encoded string or image URL', + ) + human_image: str = Field( + ..., description='Reference human image - Base64 encoded string or image URL' + ) + model_name: Optional[KlingVirtualTryOnModelName] = 'kolors-virtual-try-on-v1' + + +class TaskResult6(BaseModel): + images: Optional[List[KlingImageResult]] = None + + +class Data7(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_result: Optional[TaskResult6] = None + task_status: Optional[KlingTaskStatus] = None + task_status_msg: Optional[str] = Field(None, description='Task status information') + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingVirtualTryOnResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') data: Optional[Data7] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') -class Object(str, Enum): - event = 'event' +class LumaAspectRatio(str, Enum): + field_1_1 = '1:1' + field_16_9 = '16:9' + field_9_16 = '9:16' + field_4_3 = '4:3' + field_3_4 = '3:4' + field_21_9 = '21:9' + field_9_21 = '9:21' -class Type(str, Enum): - payment_intent_succeeded = 'payment_intent.succeeded' +class LumaAssets(BaseModel): + image: Optional[AnyUrl] = Field(None, description='The URL of the image') + progress_video: Optional[AnyUrl] = Field( + None, description='The URL of the progress video' + ) + video: Optional[AnyUrl] = Field(None, description='The URL of the video') -class StripeRequestInfo(BaseModel): - id: Optional[str] = None - idempotency_key: Optional[str] = None +class GenerationType(str, Enum): + add_audio = 'add_audio' -class Object1(str, Enum): - payment_intent = 'payment_intent' +class LumaAudioGenerationRequest(BaseModel): + callback_url: Optional[AnyUrl] = Field( + None, description='The callback URL for the audio' + ) + generation_type: Optional[GenerationType] = 'add_audio' + negative_prompt: Optional[str] = Field( + None, description='The negative prompt of the audio' + ) + prompt: Optional[str] = Field(None, description='The prompt of the audio') -class StripeAmountDetails(BaseModel): - tip: Optional[Dict[str, Any]] = None +class LumaError(BaseModel): + detail: Optional[str] = Field(None, description='The error message') -class Object2(str, Enum): - charge = 'charge' +class Type11(str, Enum): + generation = 'generation' -class StripeAddress(BaseModel): - city: Optional[str] = None - country: Optional[str] = None - line1: Optional[str] = None - line2: Optional[str] = None - postal_code: Optional[str] = None - state: Optional[str] = None +class LumaGenerationReference(BaseModel): + id: UUID = Field(..., description='The ID of the generation') + type: Literal['generation'] -class StripeOutcome(BaseModel): - advice_code: Optional[Any] = None - network_advice_code: Optional[Any] = None - network_decline_code: Optional[Any] = None - network_status: Optional[str] = None - reason: Optional[Any] = None - risk_level: Optional[str] = None - risk_score: Optional[int] = None - seller_message: Optional[str] = None - type: Optional[str] = None +class GenerationType1(str, Enum): + video = 'video' -class Checks(BaseModel): - address_line1_check: Optional[Any] = None - address_postal_code_check: Optional[Any] = None - cvc_check: Optional[str] = None +class LumaGenerationType(str, Enum): + video = 'video' + image = 'image' -class ExtendedAuthorization(BaseModel): - status: Optional[str] = None +class GenerationType2(str, Enum): + image = 'image' -class IncrementalAuthorization(BaseModel): - status: Optional[str] = None +class LumaImageIdentity(BaseModel): + images: Optional[List[AnyUrl]] = Field( + None, description='The URLs of the image identity' + ) -class Multicapture(BaseModel): - status: Optional[str] = None +class LumaImageModel(str, Enum): + photon_1 = 'photon-1' + photon_flash_1 = 'photon-flash-1' -class NetworkToken(BaseModel): - used: Optional[bool] = None +class LumaImageRef(BaseModel): + url: Optional[AnyUrl] = Field(None, description='The URL of the image reference') + weight: Optional[float] = Field( + None, description='The weight of the image reference' + ) -class Overcapture(BaseModel): - maximum_amount_capturable: Optional[int] = None - status: Optional[str] = None +class Type12(str, Enum): + image = 'image' -class StripeCardDetails(BaseModel): - amount_authorized: Optional[int] = None - authorization_code: Optional[Any] = None - brand: Optional[str] = None - checks: Optional[Checks] = None - country: Optional[str] = None - exp_month: Optional[int] = None - exp_year: Optional[int] = None - extended_authorization: Optional[ExtendedAuthorization] = None - fingerprint: Optional[str] = None - funding: Optional[str] = None - incremental_authorization: Optional[IncrementalAuthorization] = None - installments: Optional[Any] = None - last4: Optional[str] = None - mandate: Optional[Any] = None - multicapture: Optional[Multicapture] = None - network: Optional[str] = None - network_token: Optional[NetworkToken] = None - network_transaction_id: Optional[str] = None - overcapture: Optional[Overcapture] = None - regulated_status: Optional[str] = None - three_d_secure: Optional[Any] = None - wallet: Optional[Any] = None +class LumaImageReference(BaseModel): + type: Literal['image'] + url: AnyUrl = Field(..., description='The URL of the image') -class StripeRefundList(BaseModel): - object: Optional[str] = None - data: Optional[List[Dict[str, Any]]] = None - has_more: Optional[bool] = None - total_count: Optional[int] = None - url: Optional[str] = None +class LumaKeyframe(RootModel[Union[LumaGenerationReference, LumaImageReference]]): + root: Union[LumaGenerationReference, LumaImageReference] = Field( + ..., + description='A keyframe can be either a Generation reference, an Image, or a Video', + discriminator='type', + ) -class Card(BaseModel): - installments: Optional[Any] = None - mandate_options: Optional[Any] = None - network: Optional[Any] = None - request_three_d_secure: Optional[str] = None +class LumaKeyframes(BaseModel): + frame0: Optional[LumaKeyframe] = None + frame1: Optional[LumaKeyframe] = None -class StripePaymentMethodOptions(BaseModel): - card: Optional[Card] = None +class LumaModifyImageRef(BaseModel): + url: Optional[AnyUrl] = Field(None, description='The URL of the image reference') + weight: Optional[float] = Field( + None, description='The weight of the modify image reference' + ) -class StripeShipping(BaseModel): - address: Optional[StripeAddress] = None - carrier: Optional[str] = None - name: Optional[str] = None - phone: Optional[str] = None - tracking_number: Optional[str] = None +class LumaState(str, Enum): + queued = 'queued' + dreaming = 'dreaming' + completed = 'completed' + failed = 'failed' + + +class GenerationType3(str, Enum): + upscale_video = 'upscale_video' + + +class LumaVideoModel(str, Enum): + ray_2 = 'ray-2' + ray_flash_2 = 'ray-flash-2' + ray_1_6 = 'ray-1-6' + + +class LumaVideoModelOutputDuration1(str, Enum): + field_5s = '5s' + field_9s = '9s' + + +class LumaVideoModelOutputDuration( + RootModel[Union[LumaVideoModelOutputDuration1, str]] +): + root: Union[LumaVideoModelOutputDuration1, str] + + +class LumaVideoModelOutputResolution1(str, Enum): + field_540p = '540p' + field_720p = '720p' + field_1080p = '1080p' + field_4k = '4k' + + +class LumaVideoModelOutputResolution( + RootModel[Union[LumaVideoModelOutputResolution1, str]] +): + root: Union[LumaVideoModelOutputResolution1, str] + + +class MinimaxBaseResponse(BaseModel): + status_code: int = Field( + ..., + description='Status code. 0 indicates success, other values indicate errors.', + ) + status_msg: str = Field( + ..., description='Specific error details or success message.' + ) + + +class File(BaseModel): + bytes: Optional[int] = Field(None, description='File size in bytes') + created_at: Optional[int] = Field( + None, description='Unix timestamp when the file was created, in seconds' + ) + download_url: Optional[str] = Field( + None, description='The URL to download the video' + ) + file_id: Optional[int] = Field(None, description='Unique identifier for the file') + filename: Optional[str] = Field(None, description='The name of the file') + purpose: Optional[str] = Field(None, description='The purpose of using the file') + + +class MinimaxFileRetrieveResponse(BaseModel): + base_resp: MinimaxBaseResponse + file: File + + +class Status5(str, Enum): + Queueing = 'Queueing' + Preparing = 'Preparing' + Processing = 'Processing' + Success = 'Success' + Fail = 'Fail' + + +class MinimaxTaskResultResponse(BaseModel): + base_resp: MinimaxBaseResponse + file_id: Optional[str] = Field( + None, + description='After the task status changes to Success, this field returns the file ID corresponding to the generated video.', + ) + status: Status5 = Field( + ..., + description="Task status: 'Queueing' (in queue), 'Preparing' (task is preparing), 'Processing' (generating), 'Success' (task completed successfully), or 'Fail' (task failed).", + ) + task_id: str = Field(..., description='The task ID being queried.') class Model(str, Enum): @@ -1043,6 +1292,14 @@ class SubjectReferenceItem(BaseModel): class MinimaxVideoGenerationRequest(BaseModel): + callback_url: Optional[str] = Field( + None, + description='Optional. URL to receive real-time status updates about the video generation task.', + ) + first_frame_image: Optional[str] = Field( + None, + description='URL or base64 encoding of the first frame image. Required when model is I2V-01, I2V-01-Director, or I2V-01-live.', + ) model: Model = Field( ..., description='Required. ID of model. Options: T2V-01-Director, I2V-01-Director, S2V-01, I2V-01, I2V-01-live, T2V-01', @@ -1056,927 +1313,175 @@ class MinimaxVideoGenerationRequest(BaseModel): True, description='If true (default), the model will automatically optimize the prompt. Set to false for more precise control.', ) - first_frame_image: Optional[str] = Field( - None, - description='URL or base64 encoding of the first frame image. Required when model is I2V-01, I2V-01-Director, or I2V-01-live.', - ) subject_reference: Optional[List[SubjectReferenceItem]] = Field( None, description='Only available when model is S2V-01. The model will generate a video based on the subject uploaded through this parameter.', ) - callback_url: Optional[str] = Field( - None, - description='Optional. URL to receive real-time status updates about the video generation task.', - ) - - -class MinimaxBaseResponse(BaseModel): - status_code: int = Field( - ..., - description='Status code. 0 indicates success, other values indicate errors.', - ) - status_msg: str = Field( - ..., description='Specific error details or success message.' - ) class MinimaxVideoGenerationResponse(BaseModel): + base_resp: MinimaxBaseResponse task_id: str = Field( ..., description='The task ID for the asynchronous video generation task.' ) - base_resp: MinimaxBaseResponse -class File(BaseModel): - file_id: Optional[int] = Field(None, description='Unique identifier for the file') - bytes: Optional[int] = Field(None, description='File size in bytes') - created_at: Optional[int] = Field( - None, description='Unix timestamp when the file was created, in seconds' +class Truncation(str, Enum): + disabled = 'disabled' + auto = 'auto' + + +class ModelResponseProperties(BaseModel): + instructions: Optional[str] = Field( + None, description='Instructions for the model on how to generate the response' ) - filename: Optional[str] = Field(None, description='The name of the file') - purpose: Optional[str] = Field(None, description='The purpose of using the file') - download_url: Optional[str] = Field( - None, description='The URL to download the video' + max_output_tokens: Optional[int] = Field( + None, description='Maximum number of tokens to generate' + ) + model: Optional[str] = Field( + None, description='The model used to generate the response' + ) + temperature: Optional[float] = Field( + 1, description='Controls randomness in the response', ge=0.0, le=2.0 + ) + top_p: Optional[float] = Field( + 1, + description='Controls diversity of the response via nucleus sampling', + ge=0.0, + le=1.0, + ) + truncation: Optional[Truncation] = Field( + 'disabled', description='How to handle truncation of the response' ) -class MinimaxFileRetrieveResponse(BaseModel): - file: File - base_resp: MinimaxBaseResponse +class Moderation(str, Enum): + low = 'low' + auto = 'auto' -class Status1(str, Enum): - Queueing = 'Queueing' - Preparing = 'Preparing' - Processing = 'Processing' - Success = 'Success' - Fail = 'Fail' - - -class MinimaxTaskResultResponse(BaseModel): - task_id: str = Field(..., description='The task ID being queried.') - status: Status1 = Field( - ..., - description="Task status: 'Queueing' (in queue), 'Preparing' (task is preparing), 'Processing' (generating), 'Success' (task completed successfully), or 'Fail' (task failed).", - ) - file_id: Optional[str] = Field( - None, - description='After the task status changes to Success, this field returns the file ID corresponding to the generated video.', - ) - base_resp: MinimaxBaseResponse - - -class OutputFormat(str, Enum): - jpeg = 'jpeg' +class OutputFormat1(str, Enum): png = 'png' - - -class BFLFluxPro11GenerateRequest(BaseModel): - prompt: str = Field(..., description='The main text prompt for image generation') - image_prompt: Optional[str] = Field(None, description='Optional image prompt') - width: int = Field(..., description='Width of the generated image') - height: int = Field(..., description='Height of the generated image') - prompt_upsampling: Optional[bool] = Field( - None, description='Whether to use prompt upsampling' - ) - seed: Optional[int] = Field(None, description='Random seed for reproducibility') - safety_tolerance: Optional[int] = Field(None, description='Safety tolerance level') - output_format: Optional[OutputFormat] = Field( - None, description='Output image format' - ) - webhook_url: Optional[str] = Field( - None, description='Optional webhook URL for async processing' - ) - webhook_secret: Optional[str] = Field( - None, description='Optional webhook secret for async processing' - ) - - -class BFLFluxPro11GenerateResponse(BaseModel): - id: str = Field(..., description='Job ID for tracking') - polling_url: str = Field(..., description='URL to poll for results') - - -class BFLFluxProGenerateRequest(BaseModel): - prompt: str = Field(..., description='The text prompt for image generation.') - negative_prompt: Optional[str] = Field( - None, description='The negative prompt for image generation.' - ) - width: int = Field( - ..., description='The width of the image to generate.', ge=64, le=2048 - ) - height: int = Field( - ..., description='The height of the image to generate.', ge=64, le=2048 - ) - num_inference_steps: Optional[int] = Field( - None, description='The number of inference steps.', ge=1, le=100 - ) - guidance_scale: Optional[float] = Field( - None, description='The guidance scale for generation.', ge=1.0, le=20.0 - ) - seed: Optional[int] = Field(None, description='The seed value for reproducibility.') - num_images: Optional[int] = Field( - None, description='The number of images to generate.', ge=1, le=4 - ) - - -class BFLFluxProGenerateResponse(BaseModel): - id: str = Field(..., description='The unique identifier for the generation task.') - polling_url: str = Field(..., description='URL to poll for the generation result.') - - -class Steps(RootModel[int]): - root: int = Field( - ..., - description='Number of steps for the image generation process', - examples=[50], - ge=15, - le=50, - title='Steps', - ) - - -class Guidance(RootModel[float]): - root: float = Field( - ..., - description='Guidance strength for the image generation process', - ge=1.5, - le=100.0, - title='Guidance', - ) - - -class WebhookUrl(RootModel[AnyUrl]): - root: AnyUrl = Field( - ..., description='URL to receive webhook notifications', title='Webhook Url' - ) - - -class BFLAsyncResponse(BaseModel): - id: str = Field(..., title='Id') - polling_url: str = Field(..., title='Polling Url') - - -class BFLAsyncWebhookResponse(BaseModel): - id: str = Field(..., title='Id') - status: str = Field(..., title='Status') - webhook_url: str = Field(..., title='Webhook Url') - - -class Top(RootModel[int]): - root: int = Field( - ..., - description='Number of pixels to expand at the top of the image', - ge=0, - le=2048, - title='Top', - ) - - -class Bottom(RootModel[int]): - root: int = Field( - ..., - description='Number of pixels to expand at the bottom of the image', - ge=0, - le=2048, - title='Bottom', - ) - - -class Left(RootModel[int]): - root: int = Field( - ..., - description='Number of pixels to expand on the left side of the image', - ge=0, - le=2048, - title='Left', - ) - - -class Right(RootModel[int]): - root: int = Field( - ..., - description='Number of pixels to expand on the right side of the image', - ge=0, - le=2048, - title='Right', - ) - - -class CannyLowThreshold(RootModel[int]): - root: int = Field( - ..., - description='Low threshold for Canny edge detection', - ge=0, - le=500, - title='Canny Low Threshold', - ) - - -class CannyHighThreshold(RootModel[int]): - root: int = Field( - ..., - description='High threshold for Canny edge detection', - ge=0, - le=500, - title='Canny High Threshold', - ) - - -class Steps2(RootModel[int]): - root: int = Field( - ..., - description='Number of steps for the image generation process', - ge=15, - le=50, - title='Steps', - ) - - -class Guidance2(RootModel[float]): - root: float = Field( - ..., - description='Guidance strength for the image generation process', - ge=1.0, - le=100.0, - title='Guidance', - ) - - -class BFLOutputFormat(str, Enum): - jpeg = 'jpeg' - png = 'png' - - -class BFLValidationError(BaseModel): - loc: List[Union[str, int]] = Field(..., title='Location') - msg: str = Field(..., title='Message') - type: str = Field(..., title='Error Type') - - -class Datum2(BaseModel): - image_id: Optional[str] = Field( - None, description='Unique identifier for the generated image' - ) - url: Optional[str] = Field(None, description='URL to access the generated image') - - -class RecraftImageGenerationResponse(BaseModel): - created: int = Field( - ..., description='Unix timestamp when the generation was created' - ) - credits: int = Field(..., description='Number of credits used for the generation') - data: List[Datum2] = Field(..., description='Array of generated image information') - - -class RecraftImageFeatures(BaseModel): - nsfw_score: Optional[float] = None - - -class RecraftTextLayoutItem(BaseModel): - bbox: List[List[float]] - text: str - - -class RecraftImageColor(BaseModel): - rgb: Optional[List[int]] = None - std: Optional[List[float]] = None - weight: Optional[float] = None - - -class RecraftImageStyle(str, Enum): - digital_illustration = 'digital_illustration' - icon = 'icon' - realistic_image = 'realistic_image' - vector_illustration = 'vector_illustration' - - -class RecraftImageSubStyle(str, Enum): - field_2d_art_poster = '2d_art_poster' - field_3d = '3d' - field_80s = '80s' - glow = 'glow' - grain = 'grain' - hand_drawn = 'hand_drawn' - infantile_sketch = 'infantile_sketch' - kawaii = 'kawaii' - pixel_art = 'pixel_art' - psychedelic = 'psychedelic' - seamless = 'seamless' - voxel = 'voxel' - watercolor = 'watercolor' - broken_line = 'broken_line' - colored_outline = 'colored_outline' - colored_shapes = 'colored_shapes' - colored_shapes_gradient = 'colored_shapes_gradient' - doodle_fill = 'doodle_fill' - doodle_offset_fill = 'doodle_offset_fill' - offset_fill = 'offset_fill' - outline = 'outline' - outline_gradient = 'outline_gradient' - uneven_fill = 'uneven_fill' - field_70s = '70s' - cartoon = 'cartoon' - doodle_line_art = 'doodle_line_art' - engraving = 'engraving' - flat_2 = 'flat_2' - kawaii_1 = 'kawaii' - line_art = 'line_art' - linocut = 'linocut' - seamless_1 = 'seamless' - b_and_w = 'b_and_w' - enterprise = 'enterprise' - hard_flash = 'hard_flash' - hdr = 'hdr' - motion_blur = 'motion_blur' - natural_light = 'natural_light' - studio_portrait = 'studio_portrait' - line_circuit = 'line_circuit' - field_2d_art_poster_2 = '2d_art_poster_2' - engraving_color = 'engraving_color' - flat_air_art = 'flat_air_art' - hand_drawn_outline = 'hand_drawn_outline' - handmade_3d = 'handmade_3d' - stickers_drawings = 'stickers_drawings' - plastic = 'plastic' - pictogram = 'pictogram' - - -class RecraftTransformModel(str, Enum): - refm1 = 'refm1' - recraft20b = 'recraft20b' - recraftv2 = 'recraftv2' - recraftv3 = 'recraftv3' - flux1_1pro = 'flux1_1pro' - flux1dev = 'flux1dev' - imagen3 = 'imagen3' - hidream_i1_dev = 'hidream_i1_dev' - - -class RecraftImageFormat(str, Enum): webp = 'webp' - png = 'png' + jpeg = 'jpeg' -class RecraftResponseFormat(str, Enum): +class OpenAIImageEditRequest(BaseModel): + background: Optional[str] = Field( + None, description='Background transparency', examples=['opaque'] + ) + model: str = Field( + ..., description='The model to use for image editing', examples=['gpt-image-1'] + ) + moderation: Optional[Moderation] = Field( + None, description='Content moderation setting', examples=['auto'] + ) + n: Optional[int] = Field( + None, description='The number of images to generate', examples=[1] + ) + output_compression: Optional[int] = Field( + None, description='Compression level for JPEG or WebP (0-100)', examples=[100] + ) + output_format: Optional[OutputFormat1] = Field( + None, description='Format of the output image', examples=['png'] + ) + prompt: str = Field( + ..., + description='A text description of the desired edit', + examples=['Give the rocketship rainbow coloring'], + ) + quality: Optional[str] = Field( + None, description='The quality of the edited image', examples=['low'] + ) + size: Optional[str] = Field( + None, description='Size of the output image', examples=['1024x1024'] + ) + user: Optional[str] = Field( + None, + description='A unique identifier for end-user monitoring', + examples=['user-1234'], + ) + + +class Background(str, Enum): + transparent = 'transparent' + opaque = 'opaque' + + +class Quality(str, Enum): + low = 'low' + medium = 'medium' + high = 'high' + standard = 'standard' + hd = 'hd' + + +class ResponseFormat(str, Enum): url = 'url' b64_json = 'b64_json' -class RecraftImage(BaseModel): - b64_json: Optional[str] = None - features: Optional[RecraftImageFeatures] = None - image_id: UUID - revised_prompt: Optional[str] = None - url: Optional[str] = None - - -class RecraftUserControls(BaseModel): - artistic_level: Optional[int] = None - background_color: Optional[RecraftImageColor] = None - colors: Optional[List[RecraftImageColor]] = None - no_text: Optional[bool] = None - - -class RecraftTextLayout(RootModel[List[RecraftTextLayoutItem]]): - root: List[RecraftTextLayoutItem] - - -class RecraftProcessImageRequest(BaseModel): - image: StrictBytes - image_format: Optional[RecraftImageFormat] = None - response_format: Optional[RecraftResponseFormat] = None - - -class RecraftProcessImageResponse(BaseModel): - created: int - credits: int - image: RecraftImage - - -class RecraftImageToImageRequest(BaseModel): - block_nsfw: Optional[bool] = None - calculate_features: Optional[bool] = None - controls: Optional[RecraftUserControls] = None - image: StrictBytes - image_format: Optional[RecraftImageFormat] = None - model: Optional[RecraftTransformModel] = None - n: Optional[int] = None - negative_prompt: Optional[str] = None - prompt: str - random_seed: Optional[int] = None - response_format: Optional[RecraftResponseFormat] = None - strength: float - style: Optional[RecraftImageStyle] = None - style_id: Optional[UUID] = None - substyle: Optional[RecraftImageSubStyle] = None - text_layout: Optional[RecraftTextLayout] = None - - -class RecraftGenerateImageResponse(BaseModel): - created: int - credits: int - data: List[RecraftImage] - - -class RecraftTransformImageWithMaskRequest(BaseModel): - block_nsfw: Optional[bool] = None - calculate_features: Optional[bool] = None - image: StrictBytes - image_format: Optional[RecraftImageFormat] = None - mask: StrictBytes - model: Optional[RecraftTransformModel] = None - n: Optional[int] = None - negative_prompt: Optional[str] = None - prompt: str - random_seed: Optional[int] = None - response_format: Optional[RecraftResponseFormat] = None - style: Optional[RecraftImageStyle] = None - style_id: Optional[UUID] = None - substyle: Optional[RecraftImageSubStyle] = None - text_layout: Optional[RecraftTextLayout] = None - - -class KlingErrorResponse(BaseModel): - code: int = Field( - ..., - description='- 1000: Authentication failed\n- 1001: Authorization is empty\n- 1002: Authorization is invalid\n- 1003: Authorization is not yet valid\n- 1004: Authorization has expired\n- 1100: Account exception\n- 1101: Account in arrears (postpaid scenario)\n- 1102: Resource pack depleted or expired (prepaid scenario)\n- 1103: Unauthorized access to requested resource\n- 1200: Invalid request parameters\n- 1201: Invalid parameters\n- 1202: Invalid request method\n- 1203: Requested resource does not exist\n- 1300: Trigger platform strategy\n- 1301: Trigger content security policy\n- 1302: API request too frequent\n- 1303: Concurrency/QPS exceeds limit\n- 1304: Trigger IP whitelist policy\n- 5000: Internal server error\n- 5001: Service temporarily unavailable\n- 5002: Server internal timeout\n', - ) - message: str = Field(..., description='Human-readable error message') - request_id: str = Field( - ..., description='Request ID for tracking and troubleshooting' - ) - - -class LumaAspectRatio(str, Enum): - field_1_1 = '1:1' - field_16_9 = '16:9' - field_9_16 = '9:16' - field_4_3 = '4:3' - field_3_4 = '3:4' - field_21_9 = '21:9' - field_9_21 = '9:21' - - -class LumaVideoModel(str, Enum): - ray_2 = 'ray-2' - ray_flash_2 = 'ray-flash-2' - ray_1_6 = 'ray-1-6' - - -class LumaVideoModelOutputResolution1(str, Enum): - field_540p = '540p' - field_720p = '720p' - field_1080p = '1080p' - field_4k = '4k' - - -class LumaVideoModelOutputResolution( - RootModel[Union[LumaVideoModelOutputResolution1, str]] -): - root: Union[LumaVideoModelOutputResolution1, str] - - -class LumaVideoModelOutputDuration1(str, Enum): - field_5s = '5s' - field_9s = '9s' - - -class LumaVideoModelOutputDuration( - RootModel[Union[LumaVideoModelOutputDuration1, str]] -): - root: Union[LumaVideoModelOutputDuration1, str] - - -class LumaImageModel(str, Enum): - photon_1 = 'photon-1' - photon_flash_1 = 'photon-flash-1' - - -class LumaImageRef(BaseModel): - url: Optional[AnyUrl] = Field(None, description='The URL of the image reference') - weight: Optional[float] = Field( - None, description='The weight of the image reference' - ) - - -class LumaImageIdentity(BaseModel): - images: Optional[List[AnyUrl]] = Field( - None, description='The URLs of the image identity' - ) - - -class LumaModifyImageRef(BaseModel): - url: Optional[AnyUrl] = Field(None, description='The URL of the image reference') - weight: Optional[float] = Field( - None, description='The weight of the modify image reference' - ) - - -class Type1(str, Enum): - generation = 'generation' - - -class LumaGenerationReference(BaseModel): - type: Literal['generation'] - id: UUID = Field(..., description='The ID of the generation') - - -class Type2(str, Enum): - image = 'image' - - -class LumaImageReference(BaseModel): - type: Literal['image'] - url: AnyUrl = Field(..., description='The URL of the image') - - -class LumaKeyframe(RootModel[Union[LumaGenerationReference, LumaImageReference]]): - root: Union[LumaGenerationReference, LumaImageReference] = Field( - ..., - description='A keyframe can be either a Generation reference, an Image, or a Video', - discriminator='type', - ) - - -class LumaGenerationType(str, Enum): - video = 'video' - image = 'image' - - -class LumaState(str, Enum): - queued = 'queued' - dreaming = 'dreaming' - completed = 'completed' - failed = 'failed' - - -class LumaAssets(BaseModel): - video: Optional[AnyUrl] = Field(None, description='The URL of the video') - image: Optional[AnyUrl] = Field(None, description='The URL of the image') - progress_video: Optional[AnyUrl] = Field( - None, description='The URL of the progress video' - ) - - -class GenerationType(str, Enum): - video = 'video' - - -class GenerationType1(str, Enum): - image = 'image' - - -class CharacterRef(BaseModel): - identity0: Optional[LumaImageIdentity] = None - - -class LumaImageGenerationRequest(BaseModel): - generation_type: Optional[GenerationType1] = 'image' - model: Optional[LumaImageModel] = 'photon-1' - prompt: Optional[str] = Field(None, description='The prompt of the generation') - aspect_ratio: Optional[LumaAspectRatio] = '16:9' - callback_url: Optional[AnyUrl] = Field( - None, description='The callback URL for the generation' - ) - image_ref: Optional[List[LumaImageRef]] = None - style_ref: Optional[List[LumaImageRef]] = None - character_ref: Optional[CharacterRef] = None - modify_image_ref: Optional[LumaModifyImageRef] = None - - -class GenerationType2(str, Enum): - upscale_video = 'upscale_video' - - -class LumaUpscaleVideoGenerationRequest(BaseModel): - generation_type: Optional[GenerationType2] = 'upscale_video' - resolution: Optional[LumaVideoModelOutputResolution] = None - callback_url: Optional[AnyUrl] = Field( - None, description='The callback URL for the upscale' - ) - - -class GenerationType3(str, Enum): - add_audio = 'add_audio' - - -class LumaAudioGenerationRequest(BaseModel): - generation_type: Optional[GenerationType3] = 'add_audio' - prompt: Optional[str] = Field(None, description='The prompt of the audio') - negative_prompt: Optional[str] = Field( - None, description='The negative prompt of the audio' - ) - callback_url: Optional[AnyUrl] = Field( - None, description='The callback URL for the audio' - ) - - -class LumaError(BaseModel): - detail: Optional[str] = Field(None, description='The error message') - - -class AspectRatio(str, Enum): - field_16_9 = '16:9' - field_4_3 = '4:3' - field_1_1 = '1:1' - field_3_4 = '3:4' - field_9_16 = '9:16' - - -class Duration(int, Enum): - integer_5 = 5 - integer_8 = 8 - - -class Model1(str, Enum): - v3_5 = 'v3.5' - - -class MotionMode(str, Enum): - normal = 'normal' - fast = 'fast' - - -class Quality(str, Enum): - field_360p = '360p' - field_540p = '540p' - field_720p = '720p' - field_1080p = '1080p' - - class Style(str, Enum): - anime = 'anime' - field_3d_animation = '3d_animation' - clay = 'clay' - comic = 'comic' - cyberpunk = 'cyberpunk' + vivid = 'vivid' + natural = 'natural' -class PixverseTextVideoRequest(BaseModel): - aspect_ratio: AspectRatio - duration: Duration - model: Model1 - motion_mode: Optional[MotionMode] = None - negative_prompt: Optional[str] = None - prompt: str - quality: Quality - seed: Optional[int] = None - style: Optional[Style] = None - template_id: Optional[int] = None - water_mark: Optional[bool] = None - - -class Resp(BaseModel): - video_id: Optional[int] = None - - -class PixverseVideoResponse(BaseModel): - ErrCode: Optional[int] = None - ErrMsg: Optional[str] = None - Resp_1: Optional[Resp] = Field(None, alias='Resp') - - -class Resp1(BaseModel): - img_id: Optional[int] = None - - -class PixverseImageUploadResponse(BaseModel): - ErrCode: Optional[int] = None - ErrMsg: Optional[str] = None - Resp: Optional[Resp1] = None - - -class PixverseImageVideoRequest(BaseModel): - img_id: int - model: Model1 - prompt: str - duration: Duration - quality: Quality - motion_mode: Optional[MotionMode] = None - seed: Optional[int] = None - style: Optional[Style] = None - template_id: Optional[int] = None - water_mark: Optional[bool] = None - - -class PixverseTransitionVideoRequest(BaseModel): - first_frame_img: int - last_frame_img: int - model: Model1 - duration: Duration - quality: Quality - motion_mode: MotionMode - seed: int - prompt: str - style: Optional[Style] = None - template_id: Optional[int] = None - water_mark: Optional[bool] = None - - -class Status2(int, Enum): - integer_1 = 1 - integer_5 = 5 - integer_6 = 6 - integer_7 = 7 - integer_8 = 8 - - -class Resp2(BaseModel): - create_time: Optional[str] = None - id: Optional[int] = None - modify_time: Optional[str] = None - negative_prompt: Optional[str] = None - outputHeight: Optional[int] = None - outputWidth: Optional[int] = None - prompt: Optional[str] = None - resolution_ratio: Optional[int] = None - seed: Optional[int] = None - size: Optional[int] = None - status: Optional[Status2] = Field( +class OpenAIImageGenerationRequest(BaseModel): + background: Optional[Background] = Field( + None, description='Background transparency', examples=['opaque'] + ) + model: Optional[str] = Field( + None, description='The model to use for image generation', examples=['dall-e-3'] + ) + moderation: Optional[Moderation] = Field( + None, description='Content moderation setting', examples=['auto'] + ) + n: Optional[int] = Field( None, - description='Video generation status codes:\n* 1 - Generation successful\n* 5 - Generating\n* 6 - Deleted\n* 7 - Contents moderation failed\n* 8 - Generation failed\n', + description='The number of images to generate (1-10). Only 1 supported for dall-e-3.', + examples=[1], ) - style: Optional[str] = None - url: Optional[str] = None - - -class PixverseVideoResultResponse(BaseModel): - ErrCode: Optional[int] = None - ErrMsg: Optional[str] = None - Resp: Optional[Resp2] = None - - -class Image(BaseModel): - bytesBase64Encoded: str - gcsUri: Optional[str] = None - mimeType: Optional[str] = None - - -class Image1(BaseModel): - bytesBase64Encoded: Optional[str] = None - gcsUri: str - mimeType: Optional[str] = None - - -class Instance(BaseModel): - prompt: str = Field(..., description='Text description of the video') - image: Optional[Union[Image, Image1]] = Field( - None, description='Optional image to guide video generation' + output_compression: Optional[int] = Field( + None, description='Compression level for JPEG or WebP (0-100)', examples=[100] ) - - -class PersonGeneration(str, Enum): - ALLOW = 'ALLOW' - BLOCK = 'BLOCK' - - -class Parameters(BaseModel): - aspectRatio: Optional[str] = Field(None, examples=['16:9']) - negativePrompt: Optional[str] = None - personGeneration: Optional[PersonGeneration] = None - sampleCount: Optional[int] = None - seed: Optional[int] = None - storageUri: Optional[str] = Field( - None, description='Optional Cloud Storage URI to upload the video' + output_format: Optional[OutputFormat1] = Field( + None, description='Format of the output image', examples=['png'] ) - durationSeconds: Optional[int] = None - enhancePrompt: Optional[bool] = None - - -class Veo2GenVidRequest(BaseModel): - instances: Optional[List[Instance]] = None - parameters: Optional[Parameters] = None - - -class Veo2GenVidResponse(BaseModel): - name: str = Field( + prompt: str = Field( ..., - description='Operation resource name', - examples=[ - 'projects/PROJECT_ID/locations/us-central1/publishers/google/models/MODEL_ID/operations/a1b07c8e-7b5a-4aba-bb34-3e1ccb8afcc8' - ], + description='A text description of the desired image', + examples=['Draw a rocket in front of a blackhole in deep space'], ) - - -class Veo2GenVidPollRequest(BaseModel): - operationName: str = Field( - ..., - description='Full operation name (from predict response)', - examples=[ - 'projects/PROJECT_ID/locations/us-central1/publishers/google/models/MODEL_ID/operations/OPERATION_ID' - ], + quality: Optional[Quality] = Field( + None, description='The quality of the generated image', examples=['high'] ) - - -class Video(BaseModel): - gcsUri: Optional[str] = Field(None, description='Cloud Storage URI of the video') - bytesBase64Encoded: Optional[str] = Field( - None, description='Base64-encoded video content' + response_format: Optional[ResponseFormat] = Field( + None, description='Response format of image data', examples=['b64_json'] ) - mimeType: Optional[str] = Field(None, description='Video MIME type') - - -class Response(BaseModel): - field_type: Optional[str] = Field( + size: Optional[str] = Field( None, - alias='@type', - examples=[ - 'type.googleapis.com/cloud.ai.large_models.vision.GenerateVideoResponse' - ], + description='Size of the image (e.g., 1024x1024, 1536x1024, auto)', + examples=['1024x1536'], ) - raiMediaFilteredCount: Optional[int] = Field( - None, description='Count of media filtered by responsible AI policies' + style: Optional[Style] = Field( + None, description='Style of the image (only for dall-e-3)', examples=['vivid'] ) - raiMediaFilteredReasons: Optional[List[str]] = Field( - None, description='Reasons why media was filtered by responsible AI policies' - ) - videos: Optional[List[Video]] = None - - -class Error1(BaseModel): - code: Optional[int] = Field(None, description='Error code') - message: Optional[str] = Field(None, description='Error message') - - -class Veo2GenVidPollResponse(BaseModel): - name: Optional[str] = None - done: Optional[bool] = None - response: Optional[Response] = Field( - None, description='The actual prediction response if done is true' - ) - error: Optional[Error1] = Field( - None, description='Error details if operation failed' + user: Optional[str] = Field( + None, + description='A unique identifier for end-user monitoring', + examples=['user-1234'], ) -class RunwayImageToVideoResponse(BaseModel): - id: Optional[str] = Field(None, description='Task ID') - - -class RunwayTaskStatusEnum(str, Enum): - SUCCEEDED = 'SUCCEEDED' - RUNNING = 'RUNNING' - FAILED = 'FAILED' - PENDING = 'PENDING' - CANCELLED = 'CANCELLED' - THROTTLED = 'THROTTLED' - - -class RunwayModelEnum(str, Enum): - gen4_turbo = 'gen4_turbo' - gen3a_turbo = 'gen3a_turbo' - - -class Position(str, Enum): - first = 'first' - last = 'last' - - -class RunwayPromptImageDetailedObject(BaseModel): - uri: str = Field( - ..., description='A HTTPS URL or data URI containing an encoded image.' - ) - position: Position = Field( - ..., - description="The position of the image in the output video. 'last' is currently supported for gen3a_turbo only.", - ) - - -class RunwayDurationEnum(int, Enum): - integer_5 = 5 - integer_10 = 10 - - -class RunwayAspectRatioEnum(str, Enum): - field_1280_720 = '1280:720' - field_720_1280 = '720:1280' - field_1104_832 = '1104:832' - field_832_1104 = '832:1104' - field_960_960 = '960:960' - field_1584_672 = '1584:672' - field_1280_768 = '1280:768' - field_768_1280 = '768:1280' - - -class RunwayPromptImageObject( - RootModel[Union[str, List[RunwayPromptImageDetailedObject]]] -): - root: Union[str, List[RunwayPromptImageDetailedObject]] = Field( - ..., - description='Image(s) to use for the video generation. Can be a single URI or an array of image objects with positions.', - ) - - -class Datum3(BaseModel): +class Datum2(BaseModel): b64_json: Optional[str] = Field(None, description='Base64 encoded image data') - url: Optional[str] = Field(None, description='URL of the image') revised_prompt: Optional[str] = Field(None, description='Revised prompt') + url: Optional[str] = Field(None, description='URL of the image') class InputTokensDetails(BaseModel): - text_tokens: Optional[int] = None image_tokens: Optional[int] = None + text_tokens: Optional[int] = None class Usage(BaseModel): @@ -1987,143 +1492,204 @@ class Usage(BaseModel): class OpenAIImageGenerationResponse(BaseModel): - data: Optional[List[Datum3]] = None + data: Optional[List[Datum2]] = None usage: Optional[Usage] = None -class Quality3(str, Enum): - low = 'low' - medium = 'medium' - high = 'high' - standard = 'standard' - hd = 'hd' +class OpenAIModels(str, Enum): + gpt_4 = 'gpt-4' + gpt_4_0314 = 'gpt-4-0314' + gpt_4_0613 = 'gpt-4-0613' + gpt_4_32k = 'gpt-4-32k' + gpt_4_32k_0314 = 'gpt-4-32k-0314' + gpt_4_32k_0613 = 'gpt-4-32k-0613' + gpt_4_0125_preview = 'gpt-4-0125-preview' + gpt_4_turbo = 'gpt-4-turbo' + gpt_4_turbo_2024_04_09 = 'gpt-4-turbo-2024-04-09' + gpt_4_turbo_preview = 'gpt-4-turbo-preview' + gpt_4_1106_preview = 'gpt-4-1106-preview' + gpt_4_vision_preview = 'gpt-4-vision-preview' + gpt_3_5_turbo = 'gpt-3.5-turbo' + gpt_3_5_turbo_16k = 'gpt-3.5-turbo-16k' + gpt_3_5_turbo_0301 = 'gpt-3.5-turbo-0301' + gpt_3_5_turbo_0613 = 'gpt-3.5-turbo-0613' + gpt_3_5_turbo_1106 = 'gpt-3.5-turbo-1106' + gpt_3_5_turbo_0125 = 'gpt-3.5-turbo-0125' + gpt_3_5_turbo_16k_0613 = 'gpt-3.5-turbo-16k-0613' + gpt_4_1 = 'gpt-4.1' + gpt_4_1_mini = 'gpt-4.1-mini' + gpt_4_1_nano = 'gpt-4.1-nano' + gpt_4_1_2025_04_14 = 'gpt-4.1-2025-04-14' + gpt_4_1_mini_2025_04_14 = 'gpt-4.1-mini-2025-04-14' + gpt_4_1_nano_2025_04_14 = 'gpt-4.1-nano-2025-04-14' + o1 = 'o1' + o1_mini = 'o1-mini' + o1_preview = 'o1-preview' + o1_pro = 'o1-pro' + o1_2024_12_17 = 'o1-2024-12-17' + o1_preview_2024_09_12 = 'o1-preview-2024-09-12' + o1_mini_2024_09_12 = 'o1-mini-2024-09-12' + o1_pro_2025_03_19 = 'o1-pro-2025-03-19' + o3 = 'o3' + o3_mini = 'o3-mini' + o3_2025_04_16 = 'o3-2025-04-16' + o3_mini_2025_01_31 = 'o3-mini-2025-01-31' + o4_mini = 'o4-mini' + o4_mini_2025_04_16 = 'o4-mini-2025-04-16' + gpt_4o = 'gpt-4o' + gpt_4o_mini = 'gpt-4o-mini' + gpt_4o_2024_11_20 = 'gpt-4o-2024-11-20' + gpt_4o_2024_08_06 = 'gpt-4o-2024-08-06' + gpt_4o_2024_05_13 = 'gpt-4o-2024-05-13' + gpt_4o_mini_2024_07_18 = 'gpt-4o-mini-2024-07-18' + gpt_4o_audio_preview = 'gpt-4o-audio-preview' + gpt_4o_audio_preview_2024_10_01 = 'gpt-4o-audio-preview-2024-10-01' + gpt_4o_audio_preview_2024_12_17 = 'gpt-4o-audio-preview-2024-12-17' + gpt_4o_mini_audio_preview = 'gpt-4o-mini-audio-preview' + gpt_4o_mini_audio_preview_2024_12_17 = 'gpt-4o-mini-audio-preview-2024-12-17' + gpt_4o_search_preview = 'gpt-4o-search-preview' + gpt_4o_mini_search_preview = 'gpt-4o-mini-search-preview' + gpt_4o_search_preview_2025_03_11 = 'gpt-4o-search-preview-2025-03-11' + gpt_4o_mini_search_preview_2025_03_11 = 'gpt-4o-mini-search-preview-2025-03-11' + computer_use_preview = 'computer-use-preview' + computer_use_preview_2025_03_11 = 'computer-use-preview-2025-03-11' + chatgpt_4o_latest = 'chatgpt-4o-latest' -class OutputFormat1(str, Enum): - png = 'png' - webp = 'webp' - jpeg = 'jpeg' +class Reason(str, Enum): + max_output_tokens = 'max_output_tokens' + content_filter = 'content_filter' -class Moderation(str, Enum): - low = 'low' - auto = 'auto' - - -class Background(str, Enum): - transparent = 'transparent' - opaque = 'opaque' - - -class ResponseFormat(str, Enum): - url = 'url' - b64_json = 'b64_json' - - -class Style3(str, Enum): - vivid = 'vivid' - natural = 'natural' - - -class OpenAIImageGenerationRequest(BaseModel): - model: Optional[str] = Field( - None, description='The model to use for image generation', examples=['dall-e-3'] +class IncompleteDetails(BaseModel): + reason: Optional[Reason] = Field( + None, description='The reason why the response is incomplete.' ) - prompt: str = Field( + + +class Object(str, Enum): + response = 'response' + + +class Status6(str, Enum): + completed = 'completed' + failed = 'failed' + in_progress = 'in_progress' + incomplete = 'incomplete' + + +class Type13(str, Enum): + output_audio = 'output_audio' + + +class OutputAudioContent(BaseModel): + data: str = Field(..., description='Base64-encoded audio data') + transcript: str = Field(..., description='Transcript of the audio') + type: Type13 = Field(..., description='The type of output content') + + +class Role4(str, Enum): + assistant = 'assistant' + + +class Type14(str, Enum): + message = 'message' + + +class Type15(str, Enum): + output_text = 'output_text' + + +class OutputTextContent(BaseModel): + text: str = Field(..., description='The text content') + type: Type15 = Field(..., description='The type of output content') + + +class AspectRatio1(RootModel[float]): + root: float = Field( ..., - description='A text description of the desired image', - examples=['Draw a rocket in front of a blackhole in deep space'], - ) - n: Optional[int] = Field( - None, - description='The number of images to generate (1-10). Only 1 supported for dall-e-3.', - examples=[1], - ) - quality: Optional[Quality3] = Field( - None, description='The quality of the generated image', examples=['high'] - ) - size: Optional[str] = Field( - None, - description='Size of the image (e.g., 1024x1024, 1536x1024, auto)', - examples=['1024x1536'], - ) - output_format: Optional[OutputFormat1] = Field( - None, description='Format of the output image', examples=['png'] - ) - output_compression: Optional[int] = Field( - None, description='Compression level for JPEG or WebP (0-100)', examples=[100] - ) - moderation: Optional[Moderation] = Field( - None, description='Content moderation setting', examples=['auto'] - ) - background: Optional[Background] = Field( - None, description='Background transparency', examples=['opaque'] - ) - response_format: Optional[ResponseFormat] = Field( - None, description='Response format of image data', examples=['b64_json'] - ) - style: Optional[Style3] = Field( - None, description='Style of the image (only for dall-e-3)', examples=['vivid'] - ) - user: Optional[str] = Field( - None, - description='A unique identifier for end-user monitoring', - examples=['user-1234'], + description='Aspect ratio (width / height)', + ge=0.4, + le=2.5, + title='Aspectratio', ) -class OpenAIImageEditRequest(BaseModel): - model: str = Field( - ..., description='The model to use for image editing', examples=['gpt-image-1'] - ) - prompt: str = Field( - ..., - description='A text description of the desired edit', - examples=['Give the rocketship rainbow coloring'], - ) - n: Optional[int] = Field( - None, description='The number of images to generate', examples=[1] - ) - quality: Optional[str] = Field( - None, description='The quality of the edited image', examples=['low'] - ) - size: Optional[str] = Field( - None, description='Size of the output image', examples=['1024x1024'] - ) - output_format: Optional[OutputFormat1] = Field( - None, description='Format of the output image', examples=['png'] - ) - output_compression: Optional[int] = Field( - None, description='Compression level for JPEG or WebP (0-100)', examples=[100] - ) - moderation: Optional[Moderation] = Field( - None, description='Content moderation setting', examples=['auto'] - ) - background: Optional[str] = Field( - None, description='Background transparency', examples=['opaque'] - ) - user: Optional[str] = Field( - None, - description='A unique identifier for end-user monitoring', - examples=['user-1234'], - ) +class IngredientsMode(str, Enum): + creative = 'creative' + precise = 'precise' -class CustomerStorageResourceResponse(BaseModel): - download_url: Optional[str] = Field( +class PikaBodyGenerate22C2vGenerate22PikascenesPost(BaseModel): + aspectRatio: Optional[AspectRatio1] = Field( + None, description='Aspect ratio (width / height)', title='Aspectratio' + ) + duration: Optional[int] = Field(5, title='Duration') + images: Optional[List[StrictBytes]] = Field(None, title='Images') + ingredientsMode: IngredientsMode = Field(..., title='Ingredientsmode') + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + promptText: Optional[str] = Field(None, title='Prompttext') + resolution: Optional[str] = Field('1080p', title='Resolution') + seed: Optional[int] = Field(None, title='Seed') + + +class PikaBodyGeneratePikadditionsGeneratePikadditionsPost(BaseModel): + image: Optional[StrictBytes] = Field(None, title='Image') + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + promptText: Optional[str] = Field(None, title='Prompttext') + seed: Optional[int] = Field(None, title='Seed') + video: Optional[StrictBytes] = Field(None, title='Video') + + +class PikaBodyGeneratePikaswapsGeneratePikaswapsPost(BaseModel): + image: Optional[StrictBytes] = Field(None, title='Image') + modifyRegionMask: Optional[StrictBytes] = Field( None, - description='The signed URL to use for downloading the file from the specified path', + description='A mask image that specifies the region to modify, where the mask is white and the background is black', + title='Modifyregionmask', ) - upload_url: Optional[str] = Field( + modifyRegionRoi: Optional[str] = Field( None, - description='The signed URL to use for uploading the file to the specified path', - ) - expires_at: Optional[datetime] = Field( - None, description='When the signed URL will expire' - ) - existing_file: Optional[bool] = Field( - None, description='Whether an existing file with the same hash was found' + description='Plaintext description of the object / region to modify', + title='Modifyregionroi', ) + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + promptText: Optional[str] = Field(None, title='Prompttext') + seed: Optional[int] = Field(None, title='Seed') + video: Optional[StrictBytes] = Field(None, title='Video') + + +class PikaDurationEnum(int, Enum): + integer_5 = 5 + integer_10 = 10 + + +class PikaGenerateResponse(BaseModel): + video_id: str = Field(..., title='Video Id') + + +class PikaResolutionEnum(str, Enum): + field_1080p = '1080p' + field_720p = '720p' + + +class PikaStatusEnum(str, Enum): + queued = 'queued' + started = 'started' + finished = 'finished' + + +class PikaValidationError(BaseModel): + loc: List[Union[str, int]] = Field(..., title='Location') + msg: str = Field(..., title='Message') + type: str = Field(..., title='Error Type') + + +class PikaVideoResponse(BaseModel): + id: str = Field(..., title='Id') + progress: Optional[int] = Field(None, title='Progress') + status: PikaStatusEnum + url: Optional[str] = Field(None, title='Url') class Pikaffect(str, Enum): @@ -2145,92 +1711,135 @@ class Pikaffect(str, Enum): Tear = 'Tear' -class PikaBodyGeneratePikaffectsGeneratePikaffectsPost(BaseModel): - image: Optional[StrictBytes] = Field(None, title='Image') - pikaffect: Optional[Pikaffect] = Field(None, title='Pikaffect') - promptText: Optional[str] = Field(None, title='Prompttext') - negativePrompt: Optional[str] = Field(None, title='Negativeprompt') - seed: Optional[int] = Field(None, title='Seed') +class Resp(BaseModel): + img_id: Optional[int] = None -class PikaGenerateResponse(BaseModel): - video_id: str = Field(..., title='Video Id') +class PixverseImageUploadResponse(BaseModel): + ErrCode: Optional[int] = None + ErrMsg: Optional[str] = None + Resp_1: Optional[Resp] = Field(None, alias='Resp') -class PikaBodyGeneratePikadditionsGeneratePikadditionsPost(BaseModel): - video: Optional[StrictBytes] = Field(None, title='Video') - image: Optional[StrictBytes] = Field(None, title='Image') - promptText: Optional[str] = Field(None, title='Prompttext') - negativePrompt: Optional[str] = Field(None, title='Negativeprompt') - seed: Optional[int] = Field(None, title='Seed') - - -class PikaBodyGeneratePikaswapsGeneratePikaswapsPost(BaseModel): - video: Optional[StrictBytes] = Field(None, title='Video') - image: Optional[StrictBytes] = Field(None, title='Image') - promptText: Optional[str] = Field(None, title='Prompttext') - modifyRegionMask: Optional[StrictBytes] = Field( - None, - description='A mask image that specifies the region to modify, where the mask is white and the background is black', - title='Modifyregionmask', - ) - modifyRegionRoi: Optional[str] = Field( - None, - description='Plaintext description of the object / region to modify', - title='Modifyregionroi', - ) - negativePrompt: Optional[str] = Field(None, title='Negativeprompt') - seed: Optional[int] = Field(None, title='Seed') - - -class IngredientsMode(str, Enum): - creative = 'creative' - precise = 'precise' - - -class AspectRatio1(RootModel[float]): - root: float = Field( - ..., - description='Aspect ratio (width / height)', - ge=0.4, - le=2.5, - title='Aspectratio', - ) - - -class PikaBodyGenerate22C2vGenerate22PikascenesPost(BaseModel): - images: Optional[List[StrictBytes]] = Field(None, title='Images') - ingredientsMode: IngredientsMode = Field(..., title='Ingredientsmode') - promptText: Optional[str] = Field(None, title='Prompttext') - negativePrompt: Optional[str] = Field(None, title='Negativeprompt') - seed: Optional[int] = Field(None, title='Seed') - resolution: Optional[str] = Field('1080p', title='Resolution') - duration: Optional[int] = Field(5, title='Duration') - aspectRatio: Optional[AspectRatio1] = Field( - None, description='Aspect ratio (width / height)', title='Aspectratio' - ) - - -class PikaStatusEnum(str, Enum): - queued = 'queued' - started = 'started' - finished = 'finished' - - -class PikaValidationError(BaseModel): - loc: List[Union[str, int]] = Field(..., title='Location') - msg: str = Field(..., title='Message') - type: str = Field(..., title='Error Type') - - -class PikaResolutionEnum(str, Enum): - field_1080p = '1080p' - field_720p = '720p' - - -class PikaDurationEnum(int, Enum): +class Duration(int, Enum): integer_5 = 5 - integer_10 = 10 + integer_8 = 8 + + +class Model1(str, Enum): + v3_5 = 'v3.5' + + +class MotionMode(str, Enum): + normal = 'normal' + fast = 'fast' + + +class Quality1(str, Enum): + field_360p = '360p' + field_540p = '540p' + field_720p = '720p' + field_1080p = '1080p' + + +class Style1(str, Enum): + anime = 'anime' + field_3d_animation = '3d_animation' + clay = 'clay' + comic = 'comic' + cyberpunk = 'cyberpunk' + + +class PixverseImageVideoRequest(BaseModel): + duration: Duration + img_id: int + model: Model1 + motion_mode: Optional[MotionMode] = None + prompt: str + quality: Quality1 + seed: Optional[int] = None + style: Optional[Style1] = None + template_id: Optional[int] = None + water_mark: Optional[bool] = None + + +class AspectRatio2(str, Enum): + field_16_9 = '16:9' + field_4_3 = '4:3' + field_1_1 = '1:1' + field_3_4 = '3:4' + field_9_16 = '9:16' + + +class PixverseTextVideoRequest(BaseModel): + aspect_ratio: AspectRatio2 + duration: Duration + model: Model1 + motion_mode: Optional[MotionMode] = None + negative_prompt: Optional[str] = None + prompt: str + quality: Quality1 + seed: Optional[int] = None + style: Optional[Style1] = None + template_id: Optional[int] = None + water_mark: Optional[bool] = None + + +class PixverseTransitionVideoRequest(BaseModel): + duration: Duration + first_frame_img: int + last_frame_img: int + model: Model1 + motion_mode: MotionMode + prompt: str + quality: Quality1 + seed: int + style: Optional[Style1] = None + template_id: Optional[int] = None + water_mark: Optional[bool] = None + + +class Resp1(BaseModel): + video_id: Optional[int] = None + + +class PixverseVideoResponse(BaseModel): + ErrCode: Optional[int] = None + ErrMsg: Optional[str] = None + Resp: Optional[Resp1] = None + + +class Status7(int, Enum): + integer_1 = 1 + integer_5 = 5 + integer_6 = 6 + integer_7 = 7 + integer_8 = 8 + + +class Resp2(BaseModel): + create_time: Optional[str] = None + id: Optional[int] = None + modify_time: Optional[str] = None + negative_prompt: Optional[str] = None + outputHeight: Optional[int] = None + outputWidth: Optional[int] = None + prompt: Optional[str] = None + resolution_ratio: Optional[int] = None + seed: Optional[int] = None + size: Optional[int] = None + status: Optional[Status7] = Field( + None, + description='Video generation status codes:\n* 1 - Generation successful\n* 5 - Generating\n* 6 - Deleted\n* 7 - Contents moderation failed\n* 8 - Generation failed\n', + ) + style: Optional[str] = None + url: Optional[str] = None + + +class PixverseVideoResultResponse(BaseModel): + ErrCode: Optional[int] = None + ErrMsg: Optional[str] = None + Resp: Optional[Resp2] = None class RgbItem(RootModel[int]): @@ -2241,213 +1850,364 @@ class RGBColor(BaseModel): rgb: List[RgbItem] = Field(..., max_length=3, min_length=3) -class StabilityStabilityClientID(RootModel[str]): - root: str = Field( +class GenerateSummary(str, Enum): + auto = 'auto' + concise = 'concise' + detailed = 'detailed' + + +class Summary(str, Enum): + auto = 'auto' + concise = 'concise' + detailed = 'detailed' + + +class ReasoningEffort(str, Enum): + low = 'low' + medium = 'medium' + high = 'high' + + +class Status8(str, Enum): + in_progress = 'in_progress' + completed = 'completed' + incomplete = 'incomplete' + + +class Type16(str, Enum): + summary_text = 'summary_text' + + +class SummaryItem(BaseModel): + text: str = Field( ..., - description='The name of your application, used to help us communicate app-specific debugging or moderation issues to you.', - examples=['my-awesome-app'], - max_length=256, + description='A short summary of the reasoning used by the model when generating\nthe response.\n', + ) + type: Type16 = Field( + ..., description='The type of the object. Always `summary_text`.\n' ) -class StabilityStabilityClientUserID(RootModel[str]): - root: str = Field( - ..., - description='A unique identifier for your end user. Used to help us communicate user-specific debugging or moderation issues to you. Feel free to obfuscate this value to protect user privacy.', - examples=['DiscordUser#9999'], - max_length=256, - ) +class Type17(str, Enum): + reasoning = 'reasoning' -class StabilityStabilityClientVersion(RootModel[str]): - root: str = Field( - ..., - description='The version of your application, used to help us communicate version-specific debugging or moderation issues to you.', - examples=['1.2.1'], - max_length=256, - ) - - -class Name(str, Enum): - content_moderation = 'content_moderation' - - -class StabilityContentModerationResponse(BaseModel): +class ReasoningItem(BaseModel): id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new) you file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, + ..., description='The unique identifier of the reasoning content.\n' ) - name: Name = Field( - ..., - description='Our content moderation system has flagged some part of your request and subsequently denied it. You were not charged for this request. While this may at times be frustrating, it is necessary to maintain the integrity of our platform and ensure a safe experience for all users. If you would like to provide feedback, please use the [Support Form](https://kb.stability.ai/knowledge-base/kb-tickets/new).', + status: Optional[Status8] = Field( + None, + description='The status of the item. One of `in_progress`, `completed`, or\n`incomplete`. Populated when items are returned via API.\n', ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, + summary: List[SummaryItem] = Field(..., description='Reasoning text contents.\n') + type: Type17 = Field( + ..., description='The type of the object. Always `reasoning`.\n' ) +class Controls(BaseModel): + artistic_level: Optional[int] = Field( + None, + description='Defines artistic tone of your image. At a simple level, the person looks straight at the camera in a static and clean style. Dynamic and eccentric levels introduce movement and creativity.', + ge=0, + le=5, + ) + background_color: Optional[RGBColor] = None + colors: Optional[List[RGBColor]] = Field( + None, description='An array of preferable colors' + ) + no_text: Optional[bool] = Field(None, description='Do not embed text layouts') + + +class RecraftImageGenerationRequest(BaseModel): + controls: Optional[Controls] = Field( + None, description='The controls for the generated image' + ) + model: str = Field( + ..., description='The model to use for generation (e.g., "recraftv3")' + ) + n: int = Field(..., description='The number of images to generate', ge=1, le=4) + prompt: str = Field( + ..., description='The text prompt describing the image to generate' + ) + size: str = Field( + ..., description='The size of the generated image (e.g., "1024x1024")' + ) + style: Optional[str] = Field( + None, + description='The style to apply to the generated image (e.g., "digital_illustration")', + ) + style_id: Optional[str] = Field( + None, + description='The style ID to apply to the generated image (e.g., "123e4567-e89b-12d3-a456-426614174000"). If style_id is provided, style should not be provided.', + ) + + +class Datum3(BaseModel): + image_id: Optional[str] = Field( + None, description='Unique identifier for the generated image' + ) + url: Optional[str] = Field(None, description='URL to access the generated image') + + +class RecraftImageGenerationResponse(BaseModel): + created: int = Field( + ..., description='Unix timestamp when the generation was created' + ) + credits: int = Field(..., description='Number of credits used for the generation') + data: List[Datum3] = Field(..., description='Array of generated image information') + + class RenderingSpeed(str, Enum): BALANCED = 'BALANCED' TURBO = 'TURBO' QUALITY = 'QUALITY' -class StabilityCreativity(RootModel[float]): - root: float = Field( +class ResponseErrorCode(str, Enum): + server_error = 'server_error' + rate_limit_exceeded = 'rate_limit_exceeded' + invalid_prompt = 'invalid_prompt' + vector_store_timeout = 'vector_store_timeout' + invalid_image = 'invalid_image' + invalid_image_format = 'invalid_image_format' + invalid_base64_image = 'invalid_base64_image' + invalid_image_url = 'invalid_image_url' + image_too_large = 'image_too_large' + image_too_small = 'image_too_small' + image_parse_error = 'image_parse_error' + image_content_policy_violation = 'image_content_policy_violation' + invalid_image_mode = 'invalid_image_mode' + image_file_too_large = 'image_file_too_large' + unsupported_image_media_type = 'unsupported_image_media_type' + empty_image_file = 'empty_image_file' + failed_to_download_image = 'failed_to_download_image' + image_file_not_found = 'image_file_not_found' + + +class Type18(str, Enum): + json_object = 'json_object' + + +class ResponseFormatJsonObject(BaseModel): + type: Type18 = Field( ..., - description='Controls the likelihood of creating additional details not heavily conditioned by the init image.', - ge=0.2, - le=0.5, + description='The type of response format being defined. Always `json_object`.', ) -class StabilityGenerationID(RootModel[str]): - root: str = Field( +class ResponseFormatJsonSchemaSchema(BaseModel): + pass + model_config = ConfigDict( + extra='allow', + ) + + +class Type19(str, Enum): + text = 'text' + + +class ResponseFormatText(BaseModel): + type: Type19 = Field( + ..., description='The type of response format being defined. Always `text`.' + ) + + +class Truncation1(str, Enum): + auto = 'auto' + disabled = 'disabled' + + +class InputTokensDetails1(BaseModel): + cached_tokens: int = Field( ..., - description='The `id` of a generation, typically used for async generations, that can be used to check the status of the generation or retrieve the result.', - examples=['a6dc6c6e20acda010fe14d71f180658f2896ed9b4ec25aa99a6ff06c796987c4'], - max_length=64, - min_length=64, + description='The number of tokens that were retrieved from the cache. \n[More on prompt caching](/docs/guides/prompt-caching).\n', ) -class Mode(str, Enum): - text_to_image = 'text-to-image' - image_to_image = 'image-to-image' +class OutputTokensDetails(BaseModel): + reasoning_tokens: int = Field(..., description='The number of reasoning tokens.') -class AspectRatio2(str, Enum): - field_21_9 = '21:9' - field_16_9 = '16:9' - field_3_2 = '3:2' - field_5_4 = '5:4' - field_1_1 = '1:1' - field_4_5 = '4:5' - field_2_3 = '2:3' - field_9_16 = '9:16' - field_9_21 = '9:21' +class ResponseUsage(BaseModel): + input_tokens: int = Field(..., description='The number of input tokens.') + input_tokens_details: InputTokensDetails1 = Field( + ..., description='A detailed breakdown of the input tokens.' + ) + output_tokens: int = Field(..., description='The number of output tokens.') + output_tokens_details: OutputTokensDetails = Field( + ..., description='A detailed breakdown of the output tokens.' + ) + total_tokens: int = Field(..., description='The total number of tokens used.') -class Model4(str, Enum): - sd3_5_large = 'sd3.5-large' - sd3_5_large_turbo = 'sd3.5-large-turbo' - sd3_5_medium = 'sd3.5-medium' +class Rodin3DCheckStatusRequest(BaseModel): + subscription_key: str = Field( + ..., description='subscription from generate endpoint' + ) -class OutputFormat3(str, Enum): - png = 'png' - jpeg = 'jpeg' +class Rodin3DCheckStatusResponse(BaseModel): + pass -class StylePreset(str, Enum): - enhance = 'enhance' - anime = 'anime' - photographic = 'photographic' - digital_art = 'digital-art' - comic_book = 'comic-book' - fantasy_art = 'fantasy-art' - line_art = 'line-art' - analog_film = 'analog-film' - neon_punk = 'neon-punk' - isometric = 'isometric' - low_poly = 'low-poly' - origami = 'origami' - modeling_compound = 'modeling-compound' - cinematic = 'cinematic' - field_3d_model = '3d-model' - pixel_art = 'pixel-art' - tile_texture = 'tile-texture' +class Rodin3DDownloadRequest(BaseModel): + task_uuid: str = Field(..., description='Task UUID') -class StabilityImageGenrationSD3Request(BaseModel): - prompt: str = Field( +class RodinGenerateJobsData(BaseModel): + subscription_key: Optional[str] = Field(None, description='Subscription Key.') + uuids: Optional[List[str]] = Field(None, description='subjobs uuid.') + + +class RodinMaterialType(str, Enum): + PBR = 'PBR' + Shaded = 'Shaded' + + +class RodinMeshModeType(str, Enum): + Quad = 'Quad' + Raw = 'Raw' + + +class RodinQualityType(str, Enum): + extra_low = 'extra-low' + low = 'low' + medium = 'medium' + high = 'high' + + +class RodinResourceItem(BaseModel): + name: Optional[str] = Field(None, description='File name') + url: Optional[str] = Field(None, description='Download url') + + +class RodinTierType(str, Enum): + Regular = 'Regular' + Sketch = 'Sketch' + Detail = 'Detail' + Smooth = 'Smooth' + + +class RunwayAspectRatioEnum(str, Enum): + field_1280_720 = '1280:720' + field_720_1280 = '720:1280' + field_1104_832 = '1104:832' + field_832_1104 = '832:1104' + field_960_960 = '960:960' + field_1584_672 = '1584:672' + field_1280_768 = '1280:768' + field_768_1280 = '768:1280' + + +class RunwayDurationEnum(int, Enum): + integer_5 = 5 + integer_10 = 10 + + +class RunwayImageToVideoResponse(BaseModel): + id: Optional[str] = Field(None, description='Task ID') + + +class RunwayModelEnum(str, Enum): + gen4_turbo = 'gen4_turbo' + gen3a_turbo = 'gen3a_turbo' + + +class Position(str, Enum): + first = 'first' + last = 'last' + + +class RunwayPromptImageDetailedObject(BaseModel): + position: Position = Field( ..., - description='What you wish to see in the output image. A strong, descriptive prompt that clearly defines\nelements, colors, and subjects will lead to better results.', - max_length=10000, - min_length=1, + description="The position of the image in the output video. 'last' is currently supported for gen3a_turbo only.", ) - mode: Optional[Mode] = Field( - 'text-to-image', - description='Controls whether this is a text-to-image or image-to-image generation, which affects which parameters are required:\n- **text-to-image** requires only the `prompt` parameter\n- **image-to-image** requires the `prompt`, `image`, and `strength` parameters', - title='GenerationMode', + uri: str = Field( + ..., description='A HTTPS URL or data URI containing an encoded image.' ) - image: Optional[StrictBytes] = Field( + + +class RunwayPromptImageObject( + RootModel[Union[str, List[RunwayPromptImageDetailedObject]]] +): + root: Union[str, List[RunwayPromptImageDetailedObject]] = Field( + ..., + description='Image(s) to use for the video generation. Can be a single URI or an array of image objects with positions.', + ) + + +class RunwayTaskStatusEnum(str, Enum): + SUCCEEDED = 'SUCCEEDED' + RUNNING = 'RUNNING' + FAILED = 'FAILED' + PENDING = 'PENDING' + CANCELLED = 'CANCELLED' + THROTTLED = 'THROTTLED' + + +class RunwayTaskStatusResponse(BaseModel): + createdAt: datetime = Field(..., description='Task creation timestamp') + id: str = Field(..., description='Task ID') + output: Optional[List[str]] = Field(None, description='Array of output video URLs') + progress: Optional[float] = Field( None, - description='The image to use as the starting point for the generation.\n\nSupported formats:\n\n\n\n - jpeg\n - png\n - webp\n\nSupported dimensions:\n\n\n\n - Every side must be at least 64 pixels\n\n> **Important:** This parameter is only valid for **image-to-image** requests.', - ) - strength: Optional[float] = Field( - None, - description='Sometimes referred to as _denoising_, this parameter controls how much influence the\n`image` parameter has on the generated image. A value of 0 would yield an image that\nis identical to the input. A value of 1 would be as if you passed in no image at all.\n\n> **Important:** This parameter is only valid for **image-to-image** requests.', + description='Float value between 0 and 1 representing the progress of the task. Only available if status is RUNNING.', ge=0.0, le=1.0, ) - aspect_ratio: Optional[AspectRatio2] = Field( - '1:1', - description='Controls the aspect ratio of the generated image. Defaults to 1:1.\n\n> **Important:** This parameter is only valid for **text-to-image** requests.', - ) - model: Optional[Model4] = Field( - 'sd3.5-large', - description='The model to use for generation.\n\n- `sd3.5-large` requires 6.5 credits per generation\n- `sd3.5-large-turbo` requires 4 credits per generation\n- `sd3.5-medium` requires 3.5 credits per generation\n- As of the April 17, 2025, `sd3-large`, `sd3-large-turbo` and `sd3-medium`\n\n\n\n are re-routed to their `sd3.5-[model version]` equivalent, at the same price.', - ) - seed: Optional[float] = Field( - 0, - description="A specific value that is used to guide the 'randomness' of the generation. (Omit this parameter or pass `0` to use a random seed.)", - ge=0.0, - le=4294967294.0, - ) - output_format: Optional[OutputFormat3] = Field( - 'png', description='Dictates the `content-type` of the generated image.' - ) - style_preset: Optional[StylePreset] = Field( - None, description='Guides the image model towards a particular style.' - ) - negative_prompt: Optional[str] = Field( - None, - description='Keywords of what you **do not** wish to see in the output image.\nThis is an advanced feature.', - max_length=10000, - ) - cfg_scale: Optional[float] = Field( - None, - description='How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt). The _Large_ and _Medium_ models use a default of `4`. The _Turbo_ model uses a default of `1`.', - ge=1.0, - le=10.0, + status: RunwayTaskStatusEnum + + +class RunwayTextToImageAspectRatioEnum(str, Enum): + field_1920_1080 = '1920:1080' + field_1080_1920 = '1080:1920' + field_1024_1024 = '1024:1024' + field_1360_768 = '1360:768' + field_1080_1080 = '1080:1080' + field_1168_880 = '1168:880' + field_1440_1080 = '1440:1080' + field_1080_1440 = '1080:1440' + field_1808_768 = '1808:768' + field_2112_912 = '2112:912' + +class Model4(str, Enum): + gen4_image = 'gen4_image' + + +class ReferenceImage(BaseModel): + uri: Optional[str] = Field( + None, description='A HTTPS URL or data URI containing an encoded image' ) -class FinishReason(str, Enum): - SUCCESS = 'SUCCESS' - CONTENT_FILTERED = 'CONTENT_FILTERED' +class RunwayTextToImageRequest(BaseModel): + model: Model4 = Field(..., description='Model to use for generation') + promptText: str = Field( + ..., description='Text prompt for the image generation', max_length=1000 + ) + ratio: RunwayTextToImageAspectRatioEnum + referenceImages: Optional[List[ReferenceImage]] = Field( + None, description='Array of reference images to guide the generation' + ) -class StabilityImageGenrationSD3Response200(BaseModel): - image: str = Field( +class RunwayTextToImageResponse(BaseModel): + id: Optional[str] = Field(None, description='Task ID') + + +class StabilityError(BaseModel): + errors: List[str] = Field( ..., - description='The generated image, encoded to base64.', - examples=['AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1...'], + description='One or more error messages indicating what went wrong.', + examples=[[{'some-field': 'is required'}]], + min_length=1, ) - seed: Optional[float] = Field( - 0, - description='The seed used as random noise for this generation.', - examples=[343940597], - ge=0.0, - le=4294967294.0, - ) - finish_reason: FinishReason = Field( - ..., - description='The reason the generation finished.\n\n- `SUCCESS` = successful generation.\n- `CONTENT_FILTERED` = successful generation, however the output violated our content moderation\npolicy and has been blurred as a result.', - examples=['SUCCESS'], - ) - - -class StabilityImageGenrationSD3Response400(BaseModel): id: str = Field( ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new) you file, as it will greatly assist us in diagnosing the root cause of the problem.\n', examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], min_length=1, ) @@ -2457,704 +2217,501 @@ class StabilityImageGenrationSD3Response400(BaseModel): examples=['bad_request'], min_length=1, ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) -class StabilityImageGenrationSD3Response413(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) +class Status9(str, Enum): + in_progress = 'in-progress' -class StabilityImageGenrationSD3Response422(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationSD3Response429(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationSD3Response500(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class OutputFormat4(str, Enum): - jpeg = 'jpeg' - png = 'png' - webp = 'webp' - - -class StabilityImageGenrationUpscaleConservativeRequest(BaseModel): - image: StrictBytes = Field( - ..., - description='The image you wish to upscale.\n\nSupported Formats:\n- jpeg\n- png\n- webp\n\nValidation Rules:\n- Every side must be at least 64 pixels\n- Total pixel count must be between 4,096 and 9,437,184 pixels\n- The aspect ratio must be between 1:2.5 and 2.5:1', - examples=['./some/image.png'], - ) - prompt: str = Field( - ..., - description="What you wish to see in the output image. A strong, descriptive prompt that clearly defines\nelements, colors, and subjects will lead to better results.\n\nTo control the weight of a given word use the format `(word:weight)`,\nwhere `word` is the word you'd like to control the weight of and `weight`\nis a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`\nwould convey a sky that was blue and green, but more green than blue.", - max_length=10000, - min_length=1, - ) - negative_prompt: Optional[str] = Field( - None, - description='A blurb of text describing what you **do not** wish to see in the output image.\nThis is an advanced feature.', - max_length=10000, - ) - seed: Optional[float] = Field( - 0, - description="A specific value that is used to guide the 'randomness' of the generation. (Omit this parameter or pass `0` to use a random seed.)", - ge=0.0, - le=4294967294.0, - ) - output_format: Optional[OutputFormat4] = Field( - 'png', description='Dictates the `content-type` of the generated image.' - ) - creativity: Optional[StabilityCreativity] = Field( - default_factory=lambda: StabilityCreativity.model_validate(0.35) - ) - - -class StabilityImageGenrationUpscaleConservativeResponse200(BaseModel): - image: str = Field( - ..., - description='The generated image, encoded to base64.', - examples=['AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1...'], - ) - seed: Optional[float] = Field( - 0, - description='The seed used as random noise for this generation.', - examples=[343940597], - ge=0.0, - le=4294967294.0, - ) - finish_reason: FinishReason = Field( - ..., - description='The reason the generation finished.\n\n- `SUCCESS` = successful generation.\n- `CONTENT_FILTERED` = successful generation, however the output violated our content moderation\npolicy and has been blurred as a result.', - examples=['SUCCESS'], - ) - - -class StabilityImageGenrationUpscaleConservativeResponse400(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleConservativeResponse413(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleConservativeResponse422(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleConservativeResponse429(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleConservativeResponse500(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleCreativeRequest(BaseModel): - image: StrictBytes = Field( - ..., - description='The image you wish to upscale.\n\nSupported Formats:\n- jpeg\n- png\n- webp\n\nValidation Rules:\n- Every side must be at least 64 pixels\n- Total pixel count must be between 4,096 and 1,048,576 pixels', - examples=['./some/image.png'], - ) - prompt: str = Field( - ..., - description="What you wish to see in the output image. A strong, descriptive prompt that clearly defines\nelements, colors, and subjects will lead to better results.\n\nTo control the weight of a given word use the format `(word:weight)`,\nwhere `word` is the word you'd like to control the weight of and `weight`\nis a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`\nwould convey a sky that was blue and green, but more green than blue.", - max_length=10000, - min_length=1, - ) - negative_prompt: Optional[str] = Field( - None, - description='A blurb of text describing what you **do not** wish to see in the output image.\nThis is an advanced feature.', - max_length=10000, - ) - output_format: Optional[OutputFormat4] = Field( - 'png', description='Dictates the `content-type` of the generated image.' - ) - seed: Optional[float] = Field( - 0, - description="A specific value that is used to guide the 'randomness' of the generation. (Omit this parameter or pass `0` to use a random seed.)", - ge=0.0, - le=4294967294.0, - ) - creativity: Optional[float] = Field( - 0.3, - description='Indicates how creative the model should be when upscaling an image.\nHigher values will result in more details being added to the image during upscaling.', - ge=0.1, - le=0.5, - ) - style_preset: Optional[StylePreset] = Field( - None, description='Guides the image model towards a particular style.' - ) - - -class StabilityImageGenrationUpscaleCreativeResponse200(BaseModel): - id: StabilityGenerationID - - -class StabilityImageGenrationUpscaleCreativeResponse400(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleCreativeResponse413(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleCreativeResponse422(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleCreativeResponse429(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleCreativeResponse500(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleFastRequest(BaseModel): - image: StrictBytes = Field( - ..., - description='The image you wish to upscale.\n\nSupported Formats:\n- jpeg\n- png\n- webp\n\nValidation Rules:\n- Width must be between 32 and 1,536 pixels\n- Height must be between 32 and 1,536 pixels\n- Total pixel count must be between 1,024 and 1,048,576 pixels', - examples=['./some/image.png'], - ) - output_format: Optional[OutputFormat4] = Field( - 'png', description='Dictates the `content-type` of the generated image.' - ) - - -class StabilityImageGenrationUpscaleFastResponse200(BaseModel): - image: str = Field( - ..., - description='The generated image, encoded to base64.', - examples=['AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1...'], - ) - seed: Optional[float] = Field( - 0, - description='The seed used as random noise for this generation.', - examples=[343940597], - ge=0.0, - le=4294967294.0, - ) - finish_reason: FinishReason = Field( - ..., - description='The reason the generation finished.\n\n- `SUCCESS` = successful generation.\n- `CONTENT_FILTERED` = successful generation, however the output violated our content moderation\npolicy and has been blurred as a result.', - examples=['SUCCESS'], - ) - - -class StabilityImageGenrationUpscaleFastResponse400(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleFastResponse413(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleFastResponse422(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleFastResponse429(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class StabilityImageGenrationUpscaleFastResponse500(BaseModel): - id: str = Field( - ..., - description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', - examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], - min_length=1, - ) - name: str = Field( - ..., - description='Short-hand name for an error, useful for discriminating between errors with the same status code.', - examples=['bad_request'], - min_length=1, - ) - errors: List[str] = Field( - ..., - description='One or more error messages indicating what went wrong.', - examples=[['some-field: is required']], - min_length=1, - ) - - -class ActionJobResult(BaseModel): - id: Optional[UUID] = Field(None, description='Unique identifier for the job result') - workflow_name: Optional[str] = Field(None, description='Name of the workflow') - operating_system: Optional[str] = Field(None, description='Operating system used') - python_version: Optional[str] = Field(None, description='PyTorch version used') - pytorch_version: Optional[str] = Field(None, description='PyTorch version used') - action_run_id: Optional[str] = Field( - None, description='Identifier of the run this result belongs to' - ) - action_job_id: Optional[str] = Field( - None, description='Identifier of the job this result belongs to' - ) - cuda_version: Optional[str] = Field(None, description='CUDA version used') - branch_name: Optional[str] = Field( - None, description='Name of the relevant git branch' - ) - commit_hash: Optional[str] = Field(None, description='The hash of the commit') - commit_id: Optional[str] = Field(None, description='The ID of the commit') - commit_time: Optional[int] = Field( - None, description='The Unix timestamp when the commit was made' - ) - commit_message: Optional[str] = Field(None, description='The message of the commit') - comfy_run_flags: Optional[str] = Field( - None, description='The comfy run flags. E.g. `--low-vram`' - ) - git_repo: Optional[str] = Field(None, description='The repository name') - pr_number: Optional[str] = Field(None, description='The pull request number') - start_time: Optional[int] = Field( - None, description='The start time of the job as a Unix timestamp.' - ) - end_time: Optional[int] = Field( - None, description='The end time of the job as a Unix timestamp.' - ) - avg_vram: Optional[int] = Field( - None, description='The average VRAM used by the job' - ) - peak_vram: Optional[int] = Field(None, description='The peak VRAM used by the job') - job_trigger_user: Optional[str] = Field( - None, description='The user who triggered the job.' - ) - author: Optional[str] = Field(None, description='The author of the commit') - machine_stats: Optional[MachineStats] = None - status: Optional[WorkflowRunStatus] = None - storage_file: Optional[StorageFile] = None - - -class Publisher(BaseModel): - name: Optional[str] = None +class StabilityGetResultResponse202(BaseModel): id: Optional[str] = Field( + None, description='The ID of the generation result.', examples=[1234567890] + ) + status: Optional[Status9] = None + + +class Type20(str, Enum): + json_schema = 'json_schema' + + +class TextResponseFormatJsonSchema(BaseModel): + description: Optional[str] = Field( None, - description="The unique identifier for the publisher. It's akin to a username. Should be lowercase.", + description='A description of what the response format is for, used by the model to\ndetermine how to respond in the format.\n', ) - description: Optional[str] = None - website: Optional[str] = None - support: Optional[str] = None - source_code_repo: Optional[str] = None - logo: Optional[str] = Field(None, description="URL to the publisher's logo.") - createdAt: Optional[datetime] = Field( - None, description='The date and time the publisher was created.' + name: str = Field( + ..., + description='The name of the response format. Must be a-z, A-Z, 0-9, or contain\nunderscores and dashes, with a maximum length of 64.\n', ) - members: Optional[List[PublisherMember]] = Field( - None, description='A list of members in the publisher.' + schema_: ResponseFormatJsonSchemaSchema = Field(..., alias='schema') + strict: Optional[bool] = Field( + False, + description='Whether to enable strict schema adherence when generating the output.\nIf set to true, the model will always follow the exact schema defined\nin the `schema` field. Only a subset of JSON Schema is supported when\n`strict` is `true`. To learn more, read the [Structured Outputs\nguide](/docs/guides/structured-outputs).\n', ) - status: Optional[PublisherStatus] = Field( - None, description='The status of the publisher.' + type: Type20 = Field( + ..., + description='The type of response format being defined. Always `json_schema`.', ) -class NodeVersion(BaseModel): - id: Optional[str] = None - version: Optional[str] = Field( +class Type21(str, Enum): + function = 'function' + + +class ToolChoiceFunction(BaseModel): + name: str = Field(..., description='The name of the function to call.') + type: Type21 = Field( + ..., description='For function calling, the type is always `function`.' + ) + + +class ToolChoiceOptions(str, Enum): + none = 'none' + auto = 'auto' + required = 'required' + + +class Type22(str, Enum): + file_search = 'file_search' + web_search_preview = 'web_search_preview' + computer_use_preview = 'computer_use_preview' + web_search_preview_2025_03_11 = 'web_search_preview_2025_03_11' + + +class ToolChoiceTypes(BaseModel): + type: Type22 = Field( + ..., + description='The type of hosted tool the model should to use. Learn more about\n[built-in tools](/docs/guides/tools).\n\nAllowed values are:\n- `file_search`\n- `web_search_preview`\n- `computer_use_preview`\n', + ) + + +class TripoAnimation(str, Enum): + preset_idle = 'preset:idle' + preset_walk = 'preset:walk' + preset_climb = 'preset:climb' + preset_jump = 'preset:jump' + preset_run = 'preset:run' + preset_slash = 'preset:slash' + preset_shoot = 'preset:shoot' + preset_hurt = 'preset:hurt' + preset_fall = 'preset:fall' + preset_turn = 'preset:turn' + + +class TripoBalance(BaseModel): + balance: float + frozen: float + + +class TripoConvertFormat(str, Enum): + GLTF = 'GLTF' + USDZ = 'USDZ' + FBX = 'FBX' + OBJ = 'OBJ' + STL = 'STL' + field_3MF = '3MF' + + +class Code(int, Enum): + integer_1001 = 1001 + integer_2000 = 2000 + integer_2001 = 2001 + integer_2002 = 2002 + integer_2003 = 2003 + integer_2004 = 2004 + integer_2006 = 2006 + integer_2007 = 2007 + integer_2008 = 2008 + integer_2010 = 2010 + + +class TripoErrorResponse(BaseModel): + code: Code + message: str + suggestion: str + + +class TripoImageToModel(str, Enum): + image_to_model = 'image_to_model' + + +class TripoModelStyle(str, Enum): + person_person2cartoon = 'person:person2cartoon' + animal_venom = 'animal:venom' + object_clay = 'object:clay' + object_steampunk = 'object:steampunk' + object_christmas = 'object:christmas' + object_barbie = 'object:barbie' + gold = 'gold' + ancient_bronze = 'ancient_bronze' + + +class TripoModelVersion(str, Enum): + V2_5 = 'v2.5-20250123' + V2_0 = 'v2.0-20240919' + V1_4 = 'v1.4-20240625' + + +class TripoMultiviewMode(str, Enum): + LEFT = 'LEFT' + RIGHT = 'RIGHT' + + +class TripoMultiviewToModel(str, Enum): + multiview_to_model = 'multiview_to_model' + + +class TripoOrientation(str, Enum): + align_image = 'align_image' + default = 'default' + + +class TripoResponseSuccessCode(RootModel[int]): + root: int = Field( + ..., + description='Standard success code for Tripo API responses. Typically 0 for success.', + examples=[0], + ) + + +class TripoSpec(str, Enum): + mixamo = 'mixamo' + tripo = 'tripo' + + +class TripoStandardFormat(str, Enum): + glb = 'glb' + fbx = 'fbx' + + +class TripoStylizeOptions(str, Enum): + lego = 'lego' + voxel = 'voxel' + voronoi = 'voronoi' + minecraft = 'minecraft' + + +class Code1(int, Enum): + integer_0 = 0 + + +class Data8(BaseModel): + task_id: str = Field(..., description='used for getTask') + + +class TripoSuccessTask(BaseModel): + code: Code1 + data: Data8 + + +class Topology(str, Enum): + bip = 'bip' + quad = 'quad' + + +class Output(BaseModel): + base_model: Optional[str] = None + model: Optional[str] = None + pbr_model: Optional[str] = None + rendered_image: Optional[str] = None + riggable: Optional[bool] = None + topology: Optional[Topology] = None + + +class Status10(str, Enum): + queued = 'queued' + running = 'running' + success = 'success' + failed = 'failed' + cancelled = 'cancelled' + unknown = 'unknown' + banned = 'banned' + expired = 'expired' + + +class TripoTask(BaseModel): + create_time: int + input: Dict[str, Any] + output: Output + progress: int = Field(..., ge=0, le=100) + status: Status10 + task_id: str + type: str + + +class TripoTextToModel(str, Enum): + text_to_model = 'text_to_model' + + +class TripoTextureAlignment(str, Enum): + original_image = 'original_image' + geometry = 'geometry' + + +class TripoTextureFormat(str, Enum): + BMP = 'BMP' + DPX = 'DPX' + HDR = 'HDR' + JPEG = 'JPEG' + OPEN_EXR = 'OPEN_EXR' + PNG = 'PNG' + TARGA = 'TARGA' + TIFF = 'TIFF' + WEBP = 'WEBP' + + +class TripoTextureQuality(str, Enum): + standard = 'standard' + detailed = 'detailed' + + +class TripoTopology(str, Enum): + bip = 'bip' + quad = 'quad' + + +class TripoTypeAnimatePrerigcheck(str, Enum): + animate_prerigcheck = 'animate_prerigcheck' + + +class TripoTypeAnimateRetarget(str, Enum): + animate_retarget = 'animate_retarget' + + +class TripoTypeAnimateRig(str, Enum): + animate_rig = 'animate_rig' + + +class TripoTypeConvertModel(str, Enum): + convert_model = 'convert_model' + + +class TripoTypeRefineModel(str, Enum): + refine_model = 'refine_model' + + +class TripoTypeStylizeModel(str, Enum): + stylize_model = 'stylize_model' + + +class TripoTypeTextureModel(str, Enum): + texture_model = 'texture_model' + + +class Veo2GenVidPollRequest(BaseModel): + operationName: str = Field( + ..., + description='Full operation name (from predict response)', + examples=[ + 'projects/PROJECT_ID/locations/us-central1/publishers/google/models/MODEL_ID/operations/OPERATION_ID' + ], + ) + + +class Error(BaseModel): + code: Optional[int] = Field(None, description='Error code') + message: Optional[str] = Field(None, description='Error message') + + +class Video(BaseModel): + bytesBase64Encoded: Optional[str] = Field( + None, description='Base64-encoded video content' + ) + gcsUri: Optional[str] = Field(None, description='Cloud Storage URI of the video') + mimeType: Optional[str] = Field(None, description='Video MIME type') + + +class Response(BaseModel): + field_type: Optional[str] = Field( None, - description='The version identifier, following semantic versioning. Must be unique for the node.', + alias='@type', + examples=[ + 'type.googleapis.com/cloud.ai.large_models.vision.GenerateVideoResponse' + ], ) - createdAt: Optional[datetime] = Field( - None, description='The date and time the version was created.' + raiMediaFilteredCount: Optional[int] = Field( + None, description='Count of media filtered by responsible AI policies' ) - changelog: Optional[str] = Field( - None, description='Summary of changes made in this version' + raiMediaFilteredReasons: Optional[List[str]] = Field( + None, description='Reasons why media was filtered by responsible AI policies' ) - dependencies: Optional[List[str]] = Field( - None, description='A list of pip dependencies required by the node.' + videos: Optional[List[Video]] = None + + +class Veo2GenVidPollResponse(BaseModel): + done: Optional[bool] = None + error: Optional[Error] = Field( + None, description='Error details if operation failed' ) - downloadUrl: Optional[str] = Field( - None, description='[Output Only] URL to download this version of the node' - ) - deprecated: Optional[bool] = Field( - None, description='Indicates if this version is deprecated.' - ) - status: Optional[NodeVersionStatus] = Field( - None, description='The status of the node version.' - ) - status_reason: Optional[str] = Field( - None, description='The reason for the status change.' - ) - node_id: Optional[str] = Field( - None, description='The unique identifier of the node.' - ) - comfy_node_extract_status: Optional[str] = Field( - None, description='The status of comfy node extraction process.' + name: Optional[str] = None + response: Optional[Response] = Field( + None, description='The actual prediction response if done is true' ) -class IdeogramV3Request(BaseModel): - prompt: str = Field(..., description='The text prompt for image generation') - seed: Optional[int] = Field( - None, description='Seed value for reproducible generation' +class Image(BaseModel): + bytesBase64Encoded: str + gcsUri: Optional[str] = None + mimeType: Optional[str] = None + + +class Image1(BaseModel): + bytesBase64Encoded: Optional[str] = None + gcsUri: str + mimeType: Optional[str] = None + + +class Instance(BaseModel): + image: Optional[Union[Image, Image1]] = Field( + None, description='Optional image to guide video generation' ) - resolution: Optional[str] = Field( - None, description='Image resolution in format WxH', examples=['1280x800'] + prompt: str = Field(..., description='Text description of the video') + + +class PersonGeneration1(str, Enum): + ALLOW = 'ALLOW' + BLOCK = 'BLOCK' + + +class Parameters(BaseModel): + aspectRatio: Optional[str] = Field(None, examples=['16:9']) + durationSeconds: Optional[int] = None + enhancePrompt: Optional[bool] = None + negativePrompt: Optional[str] = None + personGeneration: Optional[PersonGeneration1] = None + sampleCount: Optional[int] = None + seed: Optional[int] = None + storageUri: Optional[str] = Field( + None, description='Optional Cloud Storage URI to upload the video' ) - aspect_ratio: Optional[str] = Field( - None, description='Aspect ratio in format WxH', examples=['1x3'] + + +class Veo2GenVidRequest(BaseModel): + instances: Optional[List[Instance]] = None + parameters: Optional[Parameters] = None + + +class Veo2GenVidResponse(BaseModel): + name: str = Field( + ..., + description='Operation resource name', + examples=[ + 'projects/PROJECT_ID/locations/us-central1/publishers/google/models/MODEL_ID/operations/a1b07c8e-7b5a-4aba-bb34-3e1ccb8afcc8' + ], ) - rendering_speed: RenderingSpeed - magic_prompt: Optional[MagicPrompt] = Field( - None, description='Whether to enable magic prompt enhancement' + + +class SearchContextSize(str, Enum): + low = 'low' + medium = 'medium' + high = 'high' + + +class Type23(str, Enum): + web_search_preview = 'web_search_preview' + web_search_preview_2025_03_11 = 'web_search_preview_2025_03_11' + + +class WebSearchPreviewTool(BaseModel): + search_context_size: Optional[SearchContextSize] = Field( + None, + description='High level guidance for the amount of context window space to use for the search. One of `low`, `medium`, or `high`. `medium` is the default.', ) - negative_prompt: Optional[str] = Field( - None, description='Text prompt specifying what to avoid in the generation' + type: Literal['WebSearchPreviewTool'] = Field( + ..., + description='The type of the web search tool. One of `web_search_preview` or `web_search_preview_2025_03_11`.', ) - num_images: Optional[int] = Field( - None, description='Number of images to generate', ge=1 + + +class Status11(str, Enum): + in_progress = 'in_progress' + searching = 'searching' + completed = 'completed' + failed = 'failed' + + +class Type24(str, Enum): + web_search_call = 'web_search_call' + + +class WebSearchToolCall(BaseModel): + id: str = Field(..., description='The unique ID of the web search tool call.\n') + status: Status11 = Field( + ..., description='The status of the web search tool call.\n' ) - color_palette: Optional[ColorPalette] = None - style_codes: Optional[List[StyleCode]] = Field( - None, description='Array of style codes in hexadecimal format' + type: Type24 = Field( + ..., + description='The type of the web search tool call. Always `web_search_call`.\n', ) - style_type: Optional[StyleType] = Field( - None, description='The type of style to apply' + + +class CreateModelResponseProperties(ModelResponseProperties): + pass + + +class GeminiInlineData(BaseModel): + data: Optional[str] = Field( + None, + description='The base64 encoding of the image, PDF, or video to include inline in the prompt. When including media inline, you must also specify the media type (mimeType) of the data. Size limit: 20MB\n', ) - style_reference_images: Optional[List[str]] = Field( - None, description='Array of reference image URLs or identifiers' + mimeType: Optional[GeminiMimeType] = None + + +class GeminiPart(BaseModel): + inlineData: Optional[GeminiInlineData] = None + text: Optional[str] = Field( + None, + description='A text prompt or code snippet.', + examples=['Write a story about a robot learning to paint'], + ) + + +class GeminiPromptFeedback(BaseModel): + blockReason: Optional[str] = None + blockReasonMessage: Optional[str] = None + safetyRatings: Optional[List[GeminiSafetyRating]] = None + + +class GeminiSafetySetting(BaseModel): + category: GeminiSafetyCategory + threshold: GeminiSafetyThreshold + + +class GeminiSystemInstructionContent(BaseModel): + parts: List[GeminiTextPart] = Field( + ..., + description='A list of ordered parts that make up a single message. Different parts may have different IANA MIME types. For limits on the inputs, such as the maximum number of tokens or the number of images, see the model specifications on the Google models page.\n', + ) + role: Role1 = Field( + ..., + description='The identity of the entity that creates the message. The following values are supported: user: This indicates that the message is sent by a real person, typically a user-generated message. model: This indicates that the message is generated by the model. The model value is used to insert messages from the model into the conversation during multi-turn conversations. For non-multi-turn conversations, this field can be left blank or unset.\n', + examples=['user'], ) class IdeogramV3EditRequest(BaseModel): + color_palette: Optional[IdeogramColorPalette] = None image: Optional[StrictBytes] = Field( None, description='The image being edited (max size 10MB); only JPEG, WebP and PNG formats are supported at this time.', ) - mask: Optional[StrictBytes] = Field( - None, - description='A black and white image of the same size as the image being edited (max size 10MB). Black regions in the mask should match up with the regions of the image that you would like to edit; only JPEG, WebP and PNG formats are supported at this time.', - ) - prompt: str = Field( - ..., description='The prompt used to describe the edited result.' - ) magic_prompt: Optional[str] = Field( None, description='Determine if MagicPrompt should be used in generating the request or not.', ) + mask: Optional[StrictBytes] = Field( + None, + description='A black and white image of the same size as the image being edited (max size 10MB). Black regions in the mask should match up with the regions of the image that you would like to edit; only JPEG, WebP and PNG formats are supported at this time.', + ) num_images: Optional[int] = Field( None, description='The number of images to generate.' ) - seed: Optional[int] = Field( - None, description='Random seed. Set for reproducible generation.' + prompt: str = Field( + ..., description='The prompt used to describe the edited result.' ) rendering_speed: RenderingSpeed - color_palette: Optional[IdeogramColorPalette] = Field( - None, - description='A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members). Not supported by V_1, V_1_TURBO, V_2A and V_2A_TURBO models.', + seed: Optional[int] = Field( + None, description='Random seed. Set for reproducible generation.' ) style_codes: Optional[List[StyleCode]] = Field( None, @@ -3166,34 +2723,102 @@ class IdeogramV3EditRequest(BaseModel): ) -class KlingCameraControl(BaseModel): - type: Optional[KlingCameraControlType] = None - config: Optional[KlingCameraConfig] = None - - -class KlingText2VideoRequest(BaseModel): - model_name: Optional[KlingVideoGenModelName] = 'kling-v2-master' - prompt: Optional[str] = Field( - None, description='Positive text prompt', max_length=2500 +class IdeogramV3Request(BaseModel): + aspect_ratio: Optional[str] = Field( + None, description='Aspect ratio in format WxH', examples=['1x3'] + ) + color_palette: Optional[ColorPalette] = None + magic_prompt: Optional[MagicPrompt2] = Field( + None, description='Whether to enable magic prompt enhancement' ) negative_prompt: Optional[str] = Field( - None, description='Negative text prompt', max_length=2500 + None, description='Text prompt specifying what to avoid in the generation' ) - cfg_scale: Optional[KlingVideoGenCfgScale] = Field( - default_factory=lambda: KlingVideoGenCfgScale.model_validate(0.5) + num_images: Optional[int] = Field( + None, description='Number of images to generate', ge=1 ) + prompt: str = Field(..., description='The text prompt for image generation') + rendering_speed: RenderingSpeed + resolution: Optional[str] = Field( + None, description='Image resolution in format WxH', examples=['1280x800'] + ) + seed: Optional[int] = Field( + None, description='Seed value for reproducible generation' + ) + style_codes: Optional[List[StyleCode]] = Field( + None, description='Array of style codes in hexadecimal format' + ) + style_reference_images: Optional[List[str]] = Field( + None, description='Array of reference image URLs or identifiers' + ) + style_type: Optional[StyleType1] = Field( + None, description='The type of style to apply' + ) + + +class ImagenGenerateImageResponse(BaseModel): + predictions: Optional[List[ImagenImagePrediction]] = None + + +class ImagenImageGenerationParameters(BaseModel): + addWatermark: Optional[bool] = None + aspectRatio: Optional[AspectRatio] = None + enhancePrompt: Optional[bool] = None + includeRaiReason: Optional[bool] = None + includeSafetyAttributes: Optional[bool] = None + outputOptions: Optional[ImagenOutputOptions] = None + personGeneration: Optional[PersonGeneration] = None + safetySetting: Optional[SafetySetting] = None + sampleCount: Optional[int] = Field(None, ge=1, le=4) + seed: Optional[int] = None + storageUri: Optional[AnyUrl] = None + + +class InputContent( + RootModel[Union[InputTextContent, InputImageContent, InputFileContent]] +): + root: Union[InputTextContent, InputImageContent, InputFileContent] + + +class InputMessageContentList(RootModel[List[InputContent]]): + root: List[InputContent] = Field( + ..., + description='A list of one or many input items to the model, containing different content \ntypes.\n', + title='Input item content list', + ) + + +class KlingCameraControl(BaseModel): + config: Optional[KlingCameraConfig] = None + type: Optional[KlingCameraControlType] = None + + +class KlingDualCharacterEffectInput(BaseModel): + duration: KlingVideoGenDuration + images: KlingDualCharacterImages mode: Optional[KlingVideoGenMode] = 'std' - camera_control: Optional[KlingCameraControl] = None - aspect_ratio: Optional[KlingVideoGenAspectRatio] = '16:9' - duration: Optional[KlingVideoGenDuration] = '5' - callback_url: Optional[AnyUrl] = Field( - None, description='The callback notification address' - ) - external_task_id: Optional[str] = Field(None, description='Customized Task ID') + model_name: Optional[KlingCharacterEffectModelName] = 'kling-v1' class KlingImage2VideoRequest(BaseModel): - model_name: Optional[KlingVideoGenModelName] = 'kling-v2-master' + aspect_ratio: Optional[KlingVideoGenAspectRatio] = '16:9' + callback_url: Optional[AnyUrl] = Field( + None, + description='The callback notification address. Server will notify when the task status changes.', + ) + camera_control: Optional[KlingCameraControl] = None + cfg_scale: Optional[KlingVideoGenCfgScale] = Field( + default_factory=lambda: KlingVideoGenCfgScale.model_validate(0.5) + ) + duration: Optional[KlingVideoGenDuration] = '5' + dynamic_masks: Optional[List[DynamicMask]] = Field( + None, + description='Dynamic Brush Configuration List (up to 6 groups). For 5-second videos, trajectory length must not exceed 77 coordinates.', + ) + external_task_id: Optional[str] = Field( + None, + description='Customized Task ID. Must be unique within a single user account.', + ) image: Optional[str] = Field( None, description='Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix.', @@ -3202,35 +2827,168 @@ class KlingImage2VideoRequest(BaseModel): None, description='Reference Image - End frame control. URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px. Base64 should not include data:image prefix.', ) - prompt: Optional[str] = Field( - None, description='Positive text prompt', max_length=2500 - ) + mode: Optional[KlingVideoGenMode] = 'std' + model_name: Optional[KlingVideoGenModelName] = 'kling-v2-master' negative_prompt: Optional[str] = Field( None, description='Negative text prompt', max_length=2500 ) - cfg_scale: Optional[KlingVideoGenCfgScale] = Field( - default_factory=lambda: KlingVideoGenCfgScale.model_validate(0.5) + prompt: Optional[str] = Field( + None, description='Positive text prompt', max_length=2500 ) - mode: Optional[KlingVideoGenMode] = 'std' static_mask: Optional[str] = Field( None, description='Static Brush Application Area (Mask image created by users using the motion brush). The aspect ratio must match the input image.', ) - dynamic_masks: Optional[List[DynamicMask]] = Field( + + +class TaskResult(BaseModel): + videos: Optional[List[KlingVideoResult]] = None + + +class Data(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_info: Optional[TaskInfo] = None + task_result: Optional[TaskResult] = None + task_status: Optional[KlingTaskStatus] = None + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingImage2VideoResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + + +class TaskResult1(BaseModel): + images: Optional[List[KlingImageResult]] = None + + +class Data1(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_result: Optional[TaskResult1] = None + task_status: Optional[KlingTaskStatus] = None + task_status_msg: Optional[str] = Field(None, description='Task status information') + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingImageGenerationsResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data1] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + + +class KlingLipSyncInputObject(BaseModel): + audio_file: Optional[str] = Field( None, - description='Dynamic Brush Configuration List (up to 6 groups). For 5-second videos, trajectory length must not exceed 77 coordinates.', + description='Local Path of Audio File. Supported formats: .mp3/.wav/.m4a/.aac, maximum file size of 5MB. Base64 code.', ) - camera_control: Optional[KlingCameraControl] = None - aspect_ratio: Optional[KlingVideoGenAspectRatio] = '16:9' - duration: Optional[KlingVideoGenDuration] = '5' + audio_type: Optional[KlingAudioUploadType] = None + audio_url: Optional[str] = Field( + None, + description='Audio File Download URL. Supported formats: .mp3/.wav/.m4a/.aac, maximum file size of 5MB.', + ) + mode: KlingLipSyncMode + text: Optional[str] = Field( + None, + description='Text Content for Lip-Sync Video Generation. Required when mode is text2video. Maximum length is 120 characters.', + ) + video_id: Optional[str] = Field( + None, + description='The ID of the video generated by Kling AI. Only supports 5-second and 10-second videos generated within the last 30 days.', + ) + video_url: Optional[str] = Field( + None, + description='Get link for uploaded video. Video files support .mp4/.mov, file size does not exceed 100MB, video length between 2-10s.', + ) + voice_id: Optional[str] = Field( + None, + description='Voice ID. Required when mode is text2video. The system offers a variety of voice options to choose from.', + ) + voice_language: Optional[KlingLipSyncVoiceLanguage] = 'en' + voice_speed: Optional[float] = Field( + 1, + description='Speech Rate. Valid range: 0.8~2.0, accurate to one decimal place.', + ge=0.8, + le=2.0, + ) + + +class KlingLipSyncRequest(BaseModel): callback_url: Optional[AnyUrl] = Field( None, description='The callback notification address. Server will notify when the task status changes.', ) - external_task_id: Optional[str] = Field( - None, - description='Customized Task ID. Must be unique within a single user account.', + input: KlingLipSyncInputObject + + +class TaskResult2(BaseModel): + videos: Optional[List[KlingVideoResult]] = None + + +class Data2(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_info: Optional[TaskInfo] = None + task_result: Optional[TaskResult2] = None + task_status: Optional[KlingTaskStatus] = None + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingLipSyncResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data2] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + + +class KlingSingleImageEffectInput(BaseModel): + duration: KlingSingleImageEffectDuration + image: str = Field( + ..., + description='Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1.', ) + model_name: KlingSingleImageEffectModelName + + +class KlingText2VideoRequest(BaseModel): + aspect_ratio: Optional[KlingVideoGenAspectRatio] = '16:9' + callback_url: Optional[AnyUrl] = Field( + None, description='The callback notification address' + ) + camera_control: Optional[KlingCameraControl] = None + cfg_scale: Optional[KlingVideoGenCfgScale] = Field( + default_factory=lambda: KlingVideoGenCfgScale.model_validate(0.5) + ) + duration: Optional[KlingVideoGenDuration] = '5' + external_task_id: Optional[str] = Field(None, description='Customized Task ID') + mode: Optional[KlingVideoGenMode] = 'std' + model_name: Optional[KlingTextToVideoModelName] = 'kling-v1' + negative_prompt: Optional[str] = Field( + None, description='Negative text prompt', max_length=2500 + ) + prompt: Optional[str] = Field( + None, description='Positive text prompt', max_length=2500 + ) + + +class Data4(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_info: Optional[TaskInfo] = None + task_result: Optional[TaskResult2] = None + task_status: Optional[KlingTaskStatus] = None + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingText2VideoResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data4] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') class KlingVideoEffectsInput( @@ -3239,351 +2997,325 @@ class KlingVideoEffectsInput( root: Union[KlingSingleImageEffectInput, KlingDualCharacterEffectInput] -class StripeBillingDetails(BaseModel): - address: Optional[StripeAddress] = None - email: Optional[str] = None - name: Optional[str] = None - phone: Optional[str] = None - tax_id: Optional[Any] = None - - -class StripePaymentMethodDetails(BaseModel): - card: Optional[StripeCardDetails] = None - type: Optional[str] = None - - -class BFLFluxProFillInputs(BaseModel): - image: str = Field( - ..., - description='A Base64-encoded string representing the image you wish to modify. Can contain alpha mask if desired.', - title='Image', - ) - mask: Optional[str] = Field( +class KlingVideoEffectsRequest(BaseModel): + callback_url: Optional[AnyUrl] = Field( None, - description='A Base64-encoded string representing a mask for the areas you want to modify in the image. The mask should be the same dimensions as the image and in black and white. Black areas (0%) indicate no modification, while white areas (100%) specify areas for inpainting. Optional if you provide an alpha mask in the original image. Validation: The endpoint verifies that the dimensions of the mask match the original image.', - title='Mask', + description='The callback notification address for the result of this task.', + ) + effect_scene: Union[KlingDualCharacterEffectsScene, KlingSingleImageEffectsScene] + external_task_id: Optional[str] = Field( + None, + description='Customized Task ID. Must be unique within a single user account.', + ) + input: KlingVideoEffectsInput + + +class Data5(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_info: Optional[TaskInfo] = None + task_result: Optional[TaskResult2] = None + task_status: Optional[KlingTaskStatus] = None + updated_at: Optional[int] = Field(None, description='Task update time') + + +class KlingVideoEffectsResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data5] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') + + +class KlingVideoExtendRequest(BaseModel): + callback_url: Optional[AnyUrl] = Field( + None, + description='The callback notification address. Server will notify when the task status changes.', + ) + cfg_scale: Optional[KlingVideoGenCfgScale] = Field( + default_factory=lambda: KlingVideoGenCfgScale.model_validate(0.5) + ) + negative_prompt: Optional[str] = Field( + None, + description='Negative text prompt for elements to avoid in the extended video', + max_length=2500, ) prompt: Optional[str] = Field( - '', - description='The description of the changes you want to make. This text guides the inpainting process, allowing you to specify features, styles, or modifications for the masked area.', - examples=['ein fantastisches bild'], - title='Prompt', - ) - steps: Optional[Steps] = Field( - default_factory=lambda: Steps.model_validate(50), - description='Number of steps for the image generation process', - examples=[50], - title='Steps', - ) - prompt_upsampling: Optional[bool] = Field( - False, - description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation', - title='Prompt Upsampling', - ) - seed: Optional[int] = Field( - None, description='Optional seed for reproducibility', title='Seed' - ) - guidance: Optional[Guidance] = Field( - default_factory=lambda: Guidance.model_validate(60), - description='Guidance strength for the image generation process', - title='Guidance', - ) - output_format: Optional[BFLOutputFormat] = Field( - 'jpeg', - description="Output format for the generated image. Can be 'jpeg' or 'png'.", - ) - safety_tolerance: Optional[int] = Field( - 2, - description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.', - examples=[2], - ge=0, - le=6, - title='Safety Tolerance', - ) - webhook_url: Optional[WebhookUrl] = Field( - None, description='URL to receive webhook notifications', title='Webhook Url' - ) - webhook_secret: Optional[str] = Field( None, - description='Optional secret for webhook signature verification', - title='Webhook Secret', + description='Positive text prompt for guiding the video extension', + max_length=2500, ) - - -class BFLHTTPValidationError(BaseModel): - detail: Optional[List[BFLValidationError]] = Field(None, title='Detail') - - -class BFLFluxProExpandInputs(BaseModel): - image: str = Field( - ..., - description='A Base64-encoded string representing the image you wish to expand.', - title='Image', - ) - top: Optional[Top] = Field( - 0, description='Number of pixels to expand at the top of the image', title='Top' - ) - bottom: Optional[Bottom] = Field( - 0, - description='Number of pixels to expand at the bottom of the image', - title='Bottom', - ) - left: Optional[Left] = Field( - 0, - description='Number of pixels to expand on the left side of the image', - title='Left', - ) - right: Optional[Right] = Field( - 0, - description='Number of pixels to expand on the right side of the image', - title='Right', - ) - prompt: Optional[str] = Field( - '', - description='The description of the changes you want to make. This text guides the expansion process, allowing you to specify features, styles, or modifications for the expanded areas.', - examples=['ein fantastisches bild'], - title='Prompt', - ) - steps: Optional[Steps] = Field( - default_factory=lambda: Steps.model_validate(50), - description='Number of steps for the image generation process', - examples=[50], - title='Steps', - ) - prompt_upsampling: Optional[bool] = Field( - False, - description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation', - title='Prompt Upsampling', - ) - seed: Optional[int] = Field( - None, description='Optional seed for reproducibility', title='Seed' - ) - guidance: Optional[Guidance] = Field( - default_factory=lambda: Guidance.model_validate(60), - description='Guidance strength for the image generation process', - title='Guidance', - ) - output_format: Optional[BFLOutputFormat] = Field( - 'jpeg', - description="Output format for the generated image. Can be 'jpeg' or 'png'.", - ) - safety_tolerance: Optional[int] = Field( - 2, - description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.', - examples=[2], - ge=0, - le=6, - title='Safety Tolerance', - ) - webhook_url: Optional[WebhookUrl] = Field( - None, description='URL to receive webhook notifications', title='Webhook Url' - ) - webhook_secret: Optional[str] = Field( + video_id: Optional[str] = Field( None, - description='Optional secret for webhook signature verification', - title='Webhook Secret', + description='The ID of the video to be extended. Supports videos generated by text-to-video, image-to-video, and previous video extension operations. Cannot exceed 3 minutes total duration after extension.', ) -class BFLCannyInputs(BaseModel): - prompt: str = Field( - ..., - description='Text prompt for image generation', - examples=['ein fantastisches bild'], - title='Prompt', - ) - control_image: Optional[str] = Field( - None, - description='Base64 encoded image to use as control input if no preprocessed image is provided', - title='Control Image', - ) - preprocessed_image: Optional[str] = Field( - None, - description='Optional pre-processed image that will bypass the control preprocessing step', - title='Preprocessed Image', - ) - canny_low_threshold: Optional[CannyLowThreshold] = Field( - default_factory=lambda: CannyLowThreshold.model_validate(50), - description='Low threshold for Canny edge detection', - title='Canny Low Threshold', - ) - canny_high_threshold: Optional[CannyHighThreshold] = Field( - default_factory=lambda: CannyHighThreshold.model_validate(200), - description='High threshold for Canny edge detection', - title='Canny High Threshold', - ) - prompt_upsampling: Optional[bool] = Field( - False, - description='Whether to perform upsampling on the prompt', - title='Prompt Upsampling', - ) - seed: Optional[int] = Field( - None, - description='Optional seed for reproducibility', - examples=[42], - title='Seed', - ) - steps: Optional[Steps2] = Field( - default_factory=lambda: Steps2.model_validate(50), - description='Number of steps for the image generation process', - title='Steps', - ) - output_format: Optional[BFLOutputFormat] = Field( - 'jpeg', - description="Output format for the generated image. Can be 'jpeg' or 'png'.", - ) - guidance: Optional[Guidance2] = Field( - default_factory=lambda: Guidance2.model_validate(30), - description='Guidance strength for the image generation process', - title='Guidance', - ) - safety_tolerance: Optional[int] = Field( - 2, - description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.', - ge=0, - le=6, - title='Safety Tolerance', - ) - webhook_url: Optional[WebhookUrl] = Field( - None, description='URL to receive webhook notifications', title='Webhook Url' - ) - webhook_secret: Optional[str] = Field( - None, - description='Optional secret for webhook signature verification', - title='Webhook Secret', - ) +class Data6(BaseModel): + created_at: Optional[int] = Field(None, description='Task creation time') + task_id: Optional[str] = Field(None, description='Task ID') + task_info: Optional[TaskInfo] = None + task_result: Optional[TaskResult2] = None + task_status: Optional[KlingTaskStatus] = None + updated_at: Optional[int] = Field(None, description='Task update time') -class BFLDepthInputs(BaseModel): - prompt: str = Field( - ..., - description='Text prompt for image generation', - examples=['ein fantastisches bild'], - title='Prompt', - ) - control_image: Optional[str] = Field( - None, - description='Base64 encoded image to use as control input', - title='Control Image', - ) - preprocessed_image: Optional[str] = Field( - None, - description='Optional pre-processed image that will bypass the control preprocessing step', - title='Preprocessed Image', - ) - prompt_upsampling: Optional[bool] = Field( - False, - description='Whether to perform upsampling on the prompt', - title='Prompt Upsampling', - ) - seed: Optional[int] = Field( - None, - description='Optional seed for reproducibility', - examples=[42], - title='Seed', - ) - steps: Optional[Steps2] = Field( - default_factory=lambda: Steps2.model_validate(50), - description='Number of steps for the image generation process', - title='Steps', - ) - output_format: Optional[BFLOutputFormat] = Field( - 'jpeg', - description="Output format for the generated image. Can be 'jpeg' or 'png'.", - ) - guidance: Optional[Guidance2] = Field( - default_factory=lambda: Guidance2.model_validate(15), - description='Guidance strength for the image generation process', - title='Guidance', - ) - safety_tolerance: Optional[int] = Field( - 2, - description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.', - ge=0, - le=6, - title='Safety Tolerance', - ) - webhook_url: Optional[WebhookUrl] = Field( - None, description='URL to receive webhook notifications', title='Webhook Url' - ) - webhook_secret: Optional[str] = Field( - None, - description='Optional secret for webhook signature verification', - title='Webhook Secret', - ) - - -class Controls(BaseModel): - artistic_level: Optional[int] = Field( - None, - description='Defines artistic tone of your image. At a simple level, the person looks straight at the camera in a static and clean style. Dynamic and eccentric levels introduce movement and creativity.', - ge=0, - le=5, - ) - colors: Optional[List[RGBColor]] = Field( - None, description='An array of preferable colors' - ) - background_color: Optional[RGBColor] = Field( - None, description='Use given color as a desired background color' - ) - no_text: Optional[bool] = Field(None, description='Do not embed text layouts') - - -class RecraftImageGenerationRequest(BaseModel): - prompt: str = Field( - ..., description='The text prompt describing the image to generate' - ) - model: str = Field( - ..., description='The model to use for generation (e.g., "recraftv3")' - ) - style: Optional[str] = Field( - None, - description='The style to apply to the generated image (e.g., "digital_illustration")', - ) - style_id: Optional[str] = Field( - None, - description='The style ID to apply to the generated image (e.g., "123e4567-e89b-12d3-a456-426614174000"). If style_id is provided, style should not be provided.', - ) - size: str = Field( - ..., description='The size of the generated image (e.g., "1024x1024")' - ) - controls: Optional[Controls] = Field( - None, description='The controls for the generated image' - ) - n: int = Field(..., description='The number of images to generate', ge=1, le=4) - - -class LumaKeyframes(BaseModel): - frame0: Optional[LumaKeyframe] = None - frame1: Optional[LumaKeyframe] = None +class KlingVideoExtendResponse(BaseModel): + code: Optional[int] = Field(None, description='Error code') + data: Optional[Data6] = None + message: Optional[str] = Field(None, description='Error message') + request_id: Optional[str] = Field(None, description='Request ID') class LumaGenerationRequest(BaseModel): - generation_type: Optional[GenerationType] = 'video' - prompt: str = Field(..., description='The prompt of the generation') aspect_ratio: LumaAspectRatio - loop: Optional[bool] = Field(None, description='Whether to loop the video') - keyframes: Optional[LumaKeyframes] = None callback_url: Optional[AnyUrl] = Field( None, description='The callback URL of the generation, a POST request with Generation object will be sent to the callback URL when the generation is dreaming, completed, or failed', ) - model: LumaVideoModel - resolution: LumaVideoModelOutputResolution duration: LumaVideoModelOutputDuration + generation_type: Optional[GenerationType1] = 'video' + keyframes: Optional[LumaKeyframes] = None + loop: Optional[bool] = Field(None, description='Whether to loop the video') + model: LumaVideoModel + prompt: str = Field(..., description='The prompt of the generation') + resolution: LumaVideoModelOutputResolution + + +class CharacterRef(BaseModel): + identity0: Optional[LumaImageIdentity] = None + + +class LumaImageGenerationRequest(BaseModel): + aspect_ratio: Optional[LumaAspectRatio] = '16:9' + callback_url: Optional[AnyUrl] = Field( + None, description='The callback URL for the generation' + ) + character_ref: Optional[CharacterRef] = None + generation_type: Optional[GenerationType2] = 'image' + image_ref: Optional[List[LumaImageRef]] = None + model: Optional[LumaImageModel] = 'photon-1' + modify_image_ref: Optional[LumaModifyImageRef] = None + prompt: Optional[str] = Field(None, description='The prompt of the generation') + style_ref: Optional[List[LumaImageRef]] = None + + +class LumaUpscaleVideoGenerationRequest(BaseModel): + callback_url: Optional[AnyUrl] = Field( + None, description='The callback URL for the upscale' + ) + generation_type: Optional[GenerationType3] = 'upscale_video' + resolution: Optional[LumaVideoModelOutputResolution] = None + + +class OutputContent(RootModel[Union[OutputTextContent, OutputAudioContent]]): + root: Union[OutputTextContent, OutputAudioContent] + + +class OutputMessage(BaseModel): + content: List[OutputContent] = Field(..., description='The content of the message') + role: Role4 = Field(..., description='The role of the message') + type: Type14 = Field(..., description='The type of output item') + + +class PikaBodyGenerate22I2vGenerate22I2vPost(BaseModel): + duration: Optional[PikaDurationEnum] = 5 + image: Optional[StrictBytes] = Field(None, title='Image') + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + promptText: Optional[str] = Field(None, title='Prompttext') + resolution: Optional[PikaResolutionEnum] = '1080p' + seed: Optional[int] = Field(None, title='Seed') + + +class PikaBodyGenerate22KeyframeGenerate22PikaframesPost(BaseModel): + duration: Optional[int] = Field(None, ge=5, le=10, title='Duration') + keyFrames: Optional[List[StrictBytes]] = Field( + None, description='Array of keyframe images', title='Keyframes' + ) + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + promptText: str = Field(..., title='Prompttext') + resolution: Optional[PikaResolutionEnum] = '1080p' + seed: Optional[int] = Field(None, title='Seed') + + +class PikaBodyGenerate22T2vGenerate22T2vPost(BaseModel): + aspectRatio: Optional[float] = Field( + 1.7777777777777777, + description='Aspect ratio (width / height)', + ge=0.4, + le=2.5, + title='Aspectratio', + ) + duration: Optional[PikaDurationEnum] = 5 + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + promptText: str = Field(..., title='Prompttext') + resolution: Optional[PikaResolutionEnum] = '1080p' + seed: Optional[int] = Field(None, title='Seed') + + +class PikaBodyGeneratePikaffectsGeneratePikaffectsPost(BaseModel): + image: Optional[StrictBytes] = Field(None, title='Image') + negativePrompt: Optional[str] = Field(None, title='Negativeprompt') + pikaffect: Optional[Pikaffect] = None + promptText: Optional[str] = Field(None, title='Prompttext') + seed: Optional[int] = Field(None, title='Seed') + + +class PikaHTTPValidationError(BaseModel): + detail: Optional[List[PikaValidationError]] = Field(None, title='Detail') + + +class Reasoning(BaseModel): + effort: Optional[ReasoningEffort] = 'medium' + generate_summary: Optional[GenerateSummary] = Field( + None, + description="**Deprecated:** use `summary` instead.\n\nA summary of the reasoning performed by the model. This can be\nuseful for debugging and understanding the model's reasoning process.\nOne of `auto`, `concise`, or `detailed`.\n", + ) + summary: Optional[Summary] = Field( + None, + description="A summary of the reasoning performed by the model. This can be\nuseful for debugging and understanding the model's reasoning process.\nOne of `auto`, `concise`, or `detailed`.\n", + ) + + +class ResponseError(BaseModel): + code: ResponseErrorCode + message: str = Field(..., description='A human-readable description of the error.') + + +class Rodin3DDownloadResponse(BaseModel): + list: Optional[RodinResourceItem] = None + + +class Rodin3DGenerateRequest(BaseModel): + images: str = Field(..., description='The reference images to generate 3D Assets.') + material: Optional[RodinMaterialType] = None + mesh_mode: Optional[RodinMeshModeType] = None + quality: Optional[RodinQualityType] = None + seed: Optional[int] = Field(None, description='Seed.') + tier: Optional[RodinTierType] = None + + +class Rodin3DGenerateResponse(BaseModel): + jobs: Optional[RodinGenerateJobsData] = None + message: Optional[str] = Field(None, description='message') + prompt: Optional[str] = Field(None, description='prompt') + submit_time: Optional[str] = Field(None, description='Time') + uuid: Optional[str] = Field(None, description='Task UUID') + + +class RunwayImageToVideoRequest(BaseModel): + duration: RunwayDurationEnum + model: RunwayModelEnum + promptImage: RunwayPromptImageObject + promptText: Optional[str] = Field( + None, description='Text prompt for the generation', max_length=1000 + ) + ratio: RunwayAspectRatioEnum + seed: int = Field( + ..., description='Random seed for generation', ge=0, le=4294967295 + ) + + +class TextResponseFormatConfiguration( + RootModel[ + Union[ + ResponseFormatText, TextResponseFormatJsonSchema, ResponseFormatJsonObject + ] + ] +): + root: Union[ + ResponseFormatText, TextResponseFormatJsonSchema, ResponseFormatJsonObject + ] = Field( + ..., + description='An object specifying the format that the model must output.\n\nConfiguring `{ "type": "json_schema" }` enables Structured Outputs, \nwhich ensures the model will match your supplied JSON schema. Learn more in the \n[Structured Outputs guide](/docs/guides/structured-outputs).\n\nThe default format is `{ "type": "text" }` with no additional options.\n\n**Not recommended for gpt-4o and newer models:**\n\nSetting to `{ "type": "json_object" }` enables the older JSON mode, which\nensures the message the model generates is valid JSON. Using `json_schema`\nis preferred for models that support it.\n', + ) + + +class Tool( + RootModel[ + Union[ + FileSearchTool, FunctionTool, WebSearchPreviewTool, ComputerUsePreviewTool + ] + ] +): + root: Union[ + FileSearchTool, FunctionTool, WebSearchPreviewTool, ComputerUsePreviewTool + ] = Field(..., discriminator='type') + + +class EasyInputMessage(BaseModel): + content: Union[str, InputMessageContentList] = Field( + ..., + description='Text, image, or audio input to the model, used to generate a response.\nCan also contain previous assistant responses.\n', + ) + role: Role = Field( + ..., + description='The role of the message input. One of `user`, `assistant`, `system`, or\n`developer`.\n', + ) + type: Optional[Type2] = Field( + None, description='The type of the message input. Always `message`.\n' + ) + + +class GeminiContent(BaseModel): + parts: List[GeminiPart] + role: Role1 = Field(..., examples=['user']) + + +class GeminiGenerateContentRequest(BaseModel): + contents: List[GeminiContent] + generationConfig: Optional[GeminiGenerationConfig] = None + safetySettings: Optional[List[GeminiSafetySetting]] = None + systemInstruction: Optional[GeminiSystemInstructionContent] = None + tools: Optional[List[GeminiTool]] = None + videoMetadata: Optional[GeminiVideoMetadata] = None + + +class ImagenGenerateImageRequest(BaseModel): + instances: List[ImagenImageGenerationInstance] + parameters: ImagenImageGenerationParameters + + +class InputMessage(BaseModel): + content: Optional[InputMessageContentList] = None + role: Optional[Role3] = None + status: Optional[Status2] = None + type: Optional[Type9] = None + + +class Item( + RootModel[ + Union[ + InputMessage, + OutputMessage, + FileSearchToolCall, + ComputerToolCall, + WebSearchToolCall, + FunctionToolCall, + ReasoningItem, + ] + ] +): + root: Union[ + InputMessage, + OutputMessage, + FileSearchToolCall, + ComputerToolCall, + WebSearchToolCall, + FunctionToolCall, + ReasoningItem, + ] = Field(..., description='Content item used to generate a response.\n') class LumaGeneration(BaseModel): - id: Optional[UUID] = Field(None, description='The ID of the generation') - generation_type: Optional[LumaGenerationType] = None - state: Optional[LumaState] = None - failure_reason: Optional[str] = Field( - None, description='The reason for the state of the generation' - ) + assets: Optional[LumaAssets] = None created_at: Optional[datetime] = Field( None, description='The date and time when the generation was created' ) - assets: Optional[LumaAssets] = None + failure_reason: Optional[str] = Field( + None, description='The reason for the state of the generation' + ) + generation_type: Optional[LumaGenerationType] = None + id: Optional[UUID] = Field(None, description='The ID of the generation') model: Optional[str] = Field(None, description='The model used for the generation') request: Optional[ Union[ @@ -3593,237 +3325,129 @@ class LumaGeneration(BaseModel): LumaAudioGenerationRequest, ] ] = Field(None, description='The request of the generation') + state: Optional[LumaState] = None -class RunwayImageToVideoRequest(BaseModel): - promptImage: RunwayPromptImageObject - seed: int = Field( - ..., description='Random seed for generation', ge=0, le=4294967295 +class OutputItem( + RootModel[ + Union[ + OutputMessage, + FileSearchToolCall, + FunctionToolCall, + WebSearchToolCall, + ComputerToolCall, + ReasoningItem, + ] + ] +): + root: Union[ + OutputMessage, + FileSearchToolCall, + FunctionToolCall, + WebSearchToolCall, + ComputerToolCall, + ReasoningItem, + ] + + +class Text(BaseModel): + format: Optional[TextResponseFormatConfiguration] = None + + +class ResponseProperties(BaseModel): + instructions: Optional[str] = Field( + None, + description="Inserts a system (or developer) message as the first item in the model's context.\n\nWhen using along with `previous_response_id`, the instructions from a previous\nresponse will not be carried over to the next response. This makes it simple\nto swap out system (or developer) messages in new responses.\n", ) - model: RunwayModelEnum = Field(..., description='Model to use for generation') - promptText: Optional[str] = Field( - None, description='Text prompt for the generation', max_length=1000 + max_output_tokens: Optional[int] = Field( + None, + description='An upper bound for the number of tokens that can be generated for a response, including visible output tokens and [reasoning tokens](/docs/guides/reasoning).\n', ) - duration: RunwayDurationEnum = Field( - ..., description='The number of seconds of duration for the output video.' + model: Optional[OpenAIModels] = None + previous_response_id: Optional[str] = Field( + None, + description='The unique ID of the previous response to the model. Use this to\ncreate multi-turn conversations. Learn more about \n[conversation state](/docs/guides/conversation-state).\n', ) - ratio: RunwayAspectRatioEnum = Field( + reasoning: Optional[Reasoning] = None + text: Optional[Text] = None + tool_choice: Optional[ + Union[ToolChoiceOptions, ToolChoiceTypes, ToolChoiceFunction] + ] = Field( + None, + description='How the model should select which tool (or tools) to use when generating\na response. See the `tools` parameter to see how to specify which tools\nthe model can call.\n', + ) + tools: Optional[List[Tool]] = None + truncation: Optional[Truncation1] = Field( + 'disabled', + description="The truncation strategy to use for the model response.\n- `auto`: If the context of this response and previous ones exceeds\n the model's context window size, the model will truncate the \n response to fit the context window by dropping input items in the\n middle of the conversation. \n- `disabled` (default): If a model response will exceed the context window \n size for a model, the request will fail with a 400 error.\n", + ) + + +class GeminiCandidate(BaseModel): + citationMetadata: Optional[GeminiCitationMetadata] = None + content: Optional[GeminiContent] = None + finishReason: Optional[str] = None + safetyRatings: Optional[List[GeminiSafetyRating]] = None + + +class GeminiGenerateContentResponse(BaseModel): + candidates: Optional[List[GeminiCandidate]] = None + promptFeedback: Optional[GeminiPromptFeedback] = None + + +class InputItem(RootModel[Union[EasyInputMessage, Item]]): + root: Union[EasyInputMessage, Item] + + +class OpenAICreateResponse(CreateModelResponseProperties, ResponseProperties): + include: Optional[List[Includable]] = Field( + None, + description='Specify additional output data to include in the model response. Currently\nsupported values are:\n- `file_search_call.results`: Include the search results of\n the file search tool call.\n- `message.input_image.image_url`: Include image urls from the input message.\n- `computer_call_output.output.image_url`: Include image urls from the computer call output.\n', + ) + input: Union[str, List[InputItem]] = Field( ..., - description='The resolution (aspect ratio) of the output video. Allowable values depend on the selected model. 1280:768 and 768:1280 are only supported for gen3a_turbo.', + description='Text, image, or file inputs to the model, used to generate a response.\n\nLearn more:\n- [Text inputs and outputs](/docs/guides/text)\n- [Image inputs](/docs/guides/images)\n- [File inputs](/docs/guides/pdf-files)\n- [Conversation state](/docs/guides/conversation-state)\n- [Function calling](/docs/guides/function-calling)\n', ) - - -class RunwayTaskStatusResponse(BaseModel): - id: Optional[str] = Field(None, description='Task ID') - status: Optional[RunwayTaskStatusEnum] = Field(None, description='Task status') - createdAt: Optional[datetime] = Field(None, description='Task creation timestamp') - output: Optional[List[str]] = Field(None, description='Array of output video URLs') - - -class PikaHTTPValidationError(BaseModel): - detail: Optional[List[PikaValidationError]] = Field(None, title='Detail') - - -class PikaBodyGenerate22T2vGenerate22T2vPost(BaseModel): - promptText: str = Field(..., title='Prompttext') - negativePrompt: Optional[str] = Field(None, title='Negativeprompt') - seed: Optional[int] = Field(None, title='Seed') - resolution: Optional[PikaResolutionEnum] = Field('1080p', title='Resolution') - duration: Optional[PikaDurationEnum] = Field(5, title='Duration') - aspectRatio: Optional[float] = Field( - 1.7777777777777777, - description='Aspect ratio (width / height)', - ge=0.4, - le=2.5, - title='Aspectratio', + parallel_tool_calls: Optional[bool] = Field( + True, description='Whether to allow the model to run tool calls in parallel.\n' ) - - -class PikaBodyGenerate22I2vGenerate22I2vPost(BaseModel): - image: Optional[StrictBytes] = Field(None, title='Image') - promptText: Optional[str] = Field(None, title='Prompttext') - negativePrompt: Optional[str] = Field(None, title='Negativeprompt') - seed: Optional[int] = Field(None, title='Seed') - resolution: Optional[PikaResolutionEnum] = Field('1080p', title='Resolution') - duration: Optional[PikaDurationEnum] = Field(5, title='Duration') - - -class PikaBodyGenerate22KeyframeGenerate22PikaframesPost(BaseModel): - keyFrames: Optional[List[StrictBytes]] = Field( - None, description='Array of keyframe images', title='Keyframes' + store: Optional[bool] = Field( + True, + description='Whether to store the generated model response for later retrieval via\nAPI.\n', ) - promptText: str = Field(..., title='Prompttext') - negativePrompt: Optional[str] = Field(None, title='Negativeprompt') - seed: Optional[int] = Field(None, title='Seed') - resolution: Optional[PikaResolutionEnum] = Field('1080p', title='Resolution') - duration: Optional[int] = Field(None, ge=5, le=10, title='Duration') + stream: Optional[bool] = Field( + False, + description='If set to true, the model response data will be streamed to the client\nas it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\nSee the [Streaming section below](/docs/api-reference/responses-streaming)\nfor more information.\n', + ) + usage: Optional[ResponseUsage] = None -class PikaVideoResponse(BaseModel): - id: str = Field(..., title='Id') - status: PikaStatusEnum = Field( - ..., description='The status of the video', title='Status' - ) - url: Optional[str] = Field(None, title='Url') - progress: Optional[int] = Field(None, title='Progress') - - -class Node(BaseModel): - id: Optional[str] = Field(None, description='The unique identifier of the node.') - name: Optional[str] = Field(None, description='The display name of the node.') - category: Optional[str] = Field(None, description='The category of the node.') - description: Optional[str] = None - author: Optional[str] = None - license: Optional[str] = Field( - None, description="The path to the LICENSE file in the node's repository." - ) - icon: Optional[str] = Field(None, description="URL to the node's icon.") - repository: Optional[str] = Field(None, description="URL to the node's repository.") - tags: Optional[List[str]] = None - latest_version: Optional[NodeVersion] = Field( - None, description='The latest version of the node.' - ) - rating: Optional[float] = Field(None, description='The average rating of the node.') - downloads: Optional[int] = Field( - None, description='The number of downloads of the node.' - ) - publisher: Optional[Publisher] = Field( - None, description='The publisher of the node.' - ) - status: Optional[NodeStatus] = Field(None, description='The status of the node.') - status_detail: Optional[str] = Field( - None, description='The status detail of the node.' - ) - translations: Optional[Dict[str, Dict[str, Any]]] = None - - -class KlingVideoEffectsRequest(BaseModel): - effect_scene: Union[KlingDualCharacterEffectsScene, KlingSingleImageEffectsScene] - input: KlingVideoEffectsInput - callback_url: Optional[AnyUrl] = Field( +class OpenAIResponse(ModelResponseProperties, ResponseProperties): + created_at: Optional[float] = Field( None, - description='The callback notification address for the result of this task.', + description='Unix timestamp (in seconds) of when this Response was created.', ) - external_task_id: Optional[str] = Field( + error: Optional[ResponseError] = None + id: Optional[str] = Field(None, description='Unique identifier for this Response.') + incomplete_details: Optional[IncompleteDetails] = Field( + None, description='Details about why the response is incomplete.\n' + ) + object: Optional[Object] = Field( + None, description='The object type of this resource - always set to `response`.' + ) + output: Optional[List[OutputItem]] = Field( None, - description='Customized Task ID. Must be unique within a single user account.', + description="An array of content items generated by the model.\n\n- The length and order of items in the `output` array is dependent\n on the model's response.\n- Rather than accessing the first item in the `output` array and \n assuming it's an `assistant` message with the content generated by\n the model, you might consider using the `output_text` property where\n supported in SDKs.\n", ) - - -class StripeCharge(BaseModel): - id: Optional[str] = None - object: Optional[Object2] = None - amount: Optional[int] = None - amount_captured: Optional[int] = None - amount_refunded: Optional[int] = None - application: Optional[str] = None - application_fee: Optional[str] = None - application_fee_amount: Optional[int] = None - balance_transaction: Optional[str] = None - billing_details: Optional[StripeBillingDetails] = None - calculated_statement_descriptor: Optional[str] = None - captured: Optional[bool] = None - created: Optional[int] = None - currency: Optional[str] = None - customer: Optional[str] = None - description: Optional[str] = None - destination: Optional[Any] = None - dispute: Optional[Any] = None - disputed: Optional[bool] = None - failure_balance_transaction: Optional[Any] = None - failure_code: Optional[Any] = None - failure_message: Optional[Any] = None - fraud_details: Optional[Dict[str, Any]] = None - invoice: Optional[Any] = None - livemode: Optional[bool] = None - metadata: Optional[Dict[str, Any]] = None - on_behalf_of: Optional[Any] = None - order: Optional[Any] = None - outcome: Optional[StripeOutcome] = None - paid: Optional[bool] = None - payment_intent: Optional[str] = None - payment_method: Optional[str] = None - payment_method_details: Optional[StripePaymentMethodDetails] = None - radar_options: Optional[Dict[str, Any]] = None - receipt_email: Optional[str] = None - receipt_number: Optional[str] = None - receipt_url: Optional[str] = None - refunded: Optional[bool] = None - refunds: Optional[StripeRefundList] = None - review: Optional[Any] = None - shipping: Optional[StripeShipping] = None - source: Optional[Any] = None - source_transfer: Optional[Any] = None - statement_descriptor: Optional[Any] = None - statement_descriptor_suffix: Optional[Any] = None - status: Optional[str] = None - transfer_data: Optional[Any] = None - transfer_group: Optional[Any] = None - - -class StripeChargeList(BaseModel): - object: Optional[str] = None - data: Optional[List[StripeCharge]] = None - has_more: Optional[bool] = None - total_count: Optional[int] = None - url: Optional[str] = None - - -class StripePaymentIntent(BaseModel): - id: Optional[str] = None - object: Optional[Object1] = None - amount: Optional[int] = None - amount_capturable: Optional[int] = None - amount_details: Optional[StripeAmountDetails] = None - amount_received: Optional[int] = None - application: Optional[str] = None - application_fee_amount: Optional[int] = None - automatic_payment_methods: Optional[Any] = None - canceled_at: Optional[int] = None - cancellation_reason: Optional[str] = None - capture_method: Optional[str] = None - charges: Optional[StripeChargeList] = None - client_secret: Optional[str] = None - confirmation_method: Optional[str] = None - created: Optional[int] = None - currency: Optional[str] = None - customer: Optional[str] = None - description: Optional[str] = None - invoice: Optional[str] = None - last_payment_error: Optional[Any] = None - latest_charge: Optional[str] = None - livemode: Optional[bool] = None - metadata: Optional[Dict[str, Any]] = None - next_action: Optional[Any] = None - on_behalf_of: Optional[Any] = None - payment_method: Optional[str] = None - payment_method_configuration_details: Optional[Any] = None - payment_method_options: Optional[StripePaymentMethodOptions] = None - payment_method_types: Optional[List[str]] = None - processing: Optional[Any] = None - receipt_email: Optional[str] = None - review: Optional[Any] = None - setup_future_usage: Optional[Any] = None - shipping: Optional[StripeShipping] = None - source: Optional[Any] = None - statement_descriptor: Optional[Any] = None - statement_descriptor_suffix: Optional[Any] = None - status: Optional[str] = None - transfer_data: Optional[Any] = None - transfer_group: Optional[Any] = None - - -class Data8(BaseModel): - object: Optional[StripePaymentIntent] = None - - -class StripeEvent(BaseModel): - id: str - object: Object - api_version: Optional[str] = None - created: Optional[int] = None - data: Data8 - livemode: Optional[bool] = None - pending_webhooks: Optional[int] = None - request: Optional[StripeRequestInfo] = None - type: Type + output_text: Optional[str] = Field( + None, + description='SDK-only convenience property that contains the aggregated text output \nfrom all `output_text` items in the `output` array, if any are present. \nSupported in the Python and JavaScript SDKs.\n', + ) + parallel_tool_calls: Optional[bool] = Field( + True, description='Whether to allow the model to run tool calls in parallel.\n' + ) + status: Optional[Status6] = Field( + None, + description='The status of the response generation. One of `completed`, `failed`, `in_progress`, or `incomplete`.', + ) + usage: Optional[ResponseUsage] = None diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 62866216f..0897d5d78 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -139,7 +139,7 @@ class EmptyRequest(BaseModel): class UploadRequest(BaseModel): file_name: str = Field(..., description="Filename to upload") - content_type: str | None = Field( + content_type: Optional[str] = Field( None, description="Mime type of the file. For example: image/png, image/jpeg, video/mp4, etc.", ) diff --git a/comfy_api_nodes/apis/rodin_api.py b/comfy_api_nodes/apis/rodin_api.py new file mode 100644 index 000000000..b0cf171fa --- /dev/null +++ b/comfy_api_nodes/apis/rodin_api.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from enum import Enum +from typing import Optional, List +from pydantic import BaseModel, Field + + +class Rodin3DGenerateRequest(BaseModel): + seed: int = Field(..., description="seed_") + tier: str = Field(..., description="Tier of generation.") + material: str = Field(..., description="The material type.") + quality: str = Field(..., description="The generation quality of the mesh.") + mesh_mode: str = Field(..., description="It controls the type of faces of generated models.") + +class GenerateJobsData(BaseModel): + uuids: List[str] = Field(..., description="str LIST") + subscription_key: str = Field(..., description="subscription key") + +class Rodin3DGenerateResponse(BaseModel): + message: Optional[str] = Field(None, description="Return message.") + prompt: Optional[str] = Field(None, description="Generated Prompt from image.") + submit_time: Optional[str] = Field(None, description="Submit Time") + uuid: Optional[str] = Field(None, description="Task str") + jobs: Optional[GenerateJobsData] = Field(None, description="Details of jobs") + +class JobStatus(str, Enum): + """ + Status for jobs + """ + Done = "Done" + Failed = "Failed" + Generating = "Generating" + Waiting = "Waiting" + +class Rodin3DCheckStatusRequest(BaseModel): + subscription_key: str = Field(..., description="subscription from generate endpoint") + +class JobItem(BaseModel): + uuid: str = Field(..., description="uuid") + status: JobStatus = Field(...,description="Status Currently") + +class Rodin3DCheckStatusResponse(BaseModel): + jobs: List[JobItem] = Field(..., description="Job status List") + +class Rodin3DDownloadRequest(BaseModel): + task_uuid: str = Field(..., description="Task str") + +class RodinResourceItem(BaseModel): + url: str = Field(..., description="Download Url") + name: str = Field(..., description="File name with ext") + +class Rodin3DDownloadResponse(BaseModel): + list: List[RodinResourceItem] = Field(..., description="Source List") + + + + diff --git a/comfy_api_nodes/apis/tripo_api.py b/comfy_api_nodes/apis/tripo_api.py new file mode 100644 index 000000000..626e8d277 --- /dev/null +++ b/comfy_api_nodes/apis/tripo_api.py @@ -0,0 +1,275 @@ +from __future__ import annotations +from comfy_api_nodes.apis import ( + TripoModelVersion, + TripoTextureQuality, +) +from enum import Enum +from typing import Optional, List, Dict, Any, Union + +from pydantic import BaseModel, Field, RootModel + +class TripoStyle(str, Enum): + PERSON_TO_CARTOON = "person:person2cartoon" + ANIMAL_VENOM = "animal:venom" + OBJECT_CLAY = "object:clay" + OBJECT_STEAMPUNK = "object:steampunk" + OBJECT_CHRISTMAS = "object:christmas" + OBJECT_BARBIE = "object:barbie" + GOLD = "gold" + ANCIENT_BRONZE = "ancient_bronze" + NONE = "None" + +class TripoTaskType(str, Enum): + TEXT_TO_MODEL = "text_to_model" + IMAGE_TO_MODEL = "image_to_model" + MULTIVIEW_TO_MODEL = "multiview_to_model" + TEXTURE_MODEL = "texture_model" + REFINE_MODEL = "refine_model" + ANIMATE_PRERIGCHECK = "animate_prerigcheck" + ANIMATE_RIG = "animate_rig" + ANIMATE_RETARGET = "animate_retarget" + STYLIZE_MODEL = "stylize_model" + CONVERT_MODEL = "convert_model" + +class TripoTextureAlignment(str, Enum): + ORIGINAL_IMAGE = "original_image" + GEOMETRY = "geometry" + +class TripoOrientation(str, Enum): + ALIGN_IMAGE = "align_image" + DEFAULT = "default" + +class TripoOutFormat(str, Enum): + GLB = "glb" + FBX = "fbx" + +class TripoTopology(str, Enum): + BIP = "bip" + QUAD = "quad" + +class TripoSpec(str, Enum): + MIXAMO = "mixamo" + TRIPO = "tripo" + +class TripoAnimation(str, Enum): + IDLE = "preset:idle" + WALK = "preset:walk" + CLIMB = "preset:climb" + JUMP = "preset:jump" + RUN = "preset:run" + SLASH = "preset:slash" + SHOOT = "preset:shoot" + HURT = "preset:hurt" + FALL = "preset:fall" + TURN = "preset:turn" + +class TripoStylizeStyle(str, Enum): + LEGO = "lego" + VOXEL = "voxel" + VORONOI = "voronoi" + MINECRAFT = "minecraft" + +class TripoConvertFormat(str, Enum): + GLTF = "GLTF" + USDZ = "USDZ" + FBX = "FBX" + OBJ = "OBJ" + STL = "STL" + _3MF = "3MF" + +class TripoTextureFormat(str, Enum): + BMP = "BMP" + DPX = "DPX" + HDR = "HDR" + JPEG = "JPEG" + OPEN_EXR = "OPEN_EXR" + PNG = "PNG" + TARGA = "TARGA" + TIFF = "TIFF" + WEBP = "WEBP" + +class TripoTaskStatus(str, Enum): + QUEUED = "queued" + RUNNING = "running" + SUCCESS = "success" + FAILED = "failed" + CANCELLED = "cancelled" + UNKNOWN = "unknown" + BANNED = "banned" + EXPIRED = "expired" + +class TripoFileTokenReference(BaseModel): + type: Optional[str] = Field(None, description='The type of the reference') + file_token: str + +class TripoUrlReference(BaseModel): + type: Optional[str] = Field(None, description='The type of the reference') + url: str + +class TripoObjectStorage(BaseModel): + bucket: str + key: str + +class TripoObjectReference(BaseModel): + type: str + object: TripoObjectStorage + +class TripoFileEmptyReference(BaseModel): + pass + +class TripoFileReference(RootModel): + root: Union[TripoFileTokenReference, TripoUrlReference, TripoObjectReference, TripoFileEmptyReference] + +class TripoGetStsTokenRequest(BaseModel): + format: str = Field(..., description='The format of the image') + +class TripoTextToModelRequest(BaseModel): + type: TripoTaskType = Field(TripoTaskType.TEXT_TO_MODEL, description='Type of task') + prompt: str = Field(..., description='The text prompt describing the model to generate', max_length=1024) + negative_prompt: Optional[str] = Field(None, description='The negative text prompt', max_length=1024) + model_version: Optional[TripoModelVersion] = TripoModelVersion.V2_5 + face_limit: Optional[int] = Field(None, description='The number of faces to limit the generation to') + texture: Optional[bool] = Field(True, description='Whether to apply texture to the generated model') + pbr: Optional[bool] = Field(True, description='Whether to apply PBR to the generated model') + image_seed: Optional[int] = Field(None, description='The seed for the text') + model_seed: Optional[int] = Field(None, description='The seed for the model') + texture_seed: Optional[int] = Field(None, description='The seed for the texture') + texture_quality: Optional[TripoTextureQuality] = TripoTextureQuality.standard + style: Optional[TripoStyle] = None + auto_size: Optional[bool] = Field(False, description='Whether to auto-size the model') + quad: Optional[bool] = Field(False, description='Whether to apply quad to the generated model') + +class TripoImageToModelRequest(BaseModel): + type: TripoTaskType = Field(TripoTaskType.IMAGE_TO_MODEL, description='Type of task') + file: TripoFileReference = Field(..., description='The file reference to convert to a model') + model_version: Optional[TripoModelVersion] = Field(None, description='The model version to use for generation') + face_limit: Optional[int] = Field(None, description='The number of faces to limit the generation to') + texture: Optional[bool] = Field(True, description='Whether to apply texture to the generated model') + pbr: Optional[bool] = Field(True, description='Whether to apply PBR to the generated model') + model_seed: Optional[int] = Field(None, description='The seed for the model') + texture_seed: Optional[int] = Field(None, description='The seed for the texture') + texture_quality: Optional[TripoTextureQuality] = TripoTextureQuality.standard + texture_alignment: Optional[TripoTextureAlignment] = Field(TripoTextureAlignment.ORIGINAL_IMAGE, description='The texture alignment method') + style: Optional[TripoStyle] = Field(None, description='The style to apply to the generated model') + auto_size: Optional[bool] = Field(False, description='Whether to auto-size the model') + orientation: Optional[TripoOrientation] = TripoOrientation.DEFAULT + quad: Optional[bool] = Field(False, description='Whether to apply quad to the generated model') + +class TripoMultiviewToModelRequest(BaseModel): + type: TripoTaskType = TripoTaskType.MULTIVIEW_TO_MODEL + files: List[TripoFileReference] = Field(..., description='The file references to convert to a model') + model_version: Optional[TripoModelVersion] = Field(None, description='The model version to use for generation') + orthographic_projection: Optional[bool] = Field(False, description='Whether to use orthographic projection') + face_limit: Optional[int] = Field(None, description='The number of faces to limit the generation to') + texture: Optional[bool] = Field(True, description='Whether to apply texture to the generated model') + pbr: Optional[bool] = Field(True, description='Whether to apply PBR to the generated model') + model_seed: Optional[int] = Field(None, description='The seed for the model') + texture_seed: Optional[int] = Field(None, description='The seed for the texture') + texture_quality: Optional[TripoTextureQuality] = TripoTextureQuality.standard + texture_alignment: Optional[TripoTextureAlignment] = TripoTextureAlignment.ORIGINAL_IMAGE + auto_size: Optional[bool] = Field(False, description='Whether to auto-size the model') + orientation: Optional[TripoOrientation] = Field(TripoOrientation.DEFAULT, description='The orientation for the model') + quad: Optional[bool] = Field(False, description='Whether to apply quad to the generated model') + +class TripoTextureModelRequest(BaseModel): + type: TripoTaskType = Field(TripoTaskType.TEXTURE_MODEL, description='Type of task') + original_model_task_id: str = Field(..., description='The task ID of the original model') + texture: Optional[bool] = Field(True, description='Whether to apply texture to the model') + pbr: Optional[bool] = Field(True, description='Whether to apply PBR to the model') + model_seed: Optional[int] = Field(None, description='The seed for the model') + texture_seed: Optional[int] = Field(None, description='The seed for the texture') + texture_quality: Optional[TripoTextureQuality] = Field(None, description='The quality of the texture') + texture_alignment: Optional[TripoTextureAlignment] = Field(TripoTextureAlignment.ORIGINAL_IMAGE, description='The texture alignment method') + +class TripoRefineModelRequest(BaseModel): + type: TripoTaskType = Field(TripoTaskType.REFINE_MODEL, description='Type of task') + draft_model_task_id: str = Field(..., description='The task ID of the draft model') + +class TripoAnimatePrerigcheckRequest(BaseModel): + type: TripoTaskType = Field(TripoTaskType.ANIMATE_PRERIGCHECK, description='Type of task') + original_model_task_id: str = Field(..., description='The task ID of the original model') + +class TripoAnimateRigRequest(BaseModel): + type: TripoTaskType = Field(TripoTaskType.ANIMATE_RIG, description='Type of task') + original_model_task_id: str = Field(..., description='The task ID of the original model') + out_format: Optional[TripoOutFormat] = Field(TripoOutFormat.GLB, description='The output format') + spec: Optional[TripoSpec] = Field(TripoSpec.TRIPO, description='The specification for rigging') + +class TripoAnimateRetargetRequest(BaseModel): + type: TripoTaskType = Field(TripoTaskType.ANIMATE_RETARGET, description='Type of task') + original_model_task_id: str = Field(..., description='The task ID of the original model') + animation: TripoAnimation = Field(..., description='The animation to apply') + out_format: Optional[TripoOutFormat] = Field(TripoOutFormat.GLB, description='The output format') + bake_animation: Optional[bool] = Field(True, description='Whether to bake the animation') + +class TripoStylizeModelRequest(BaseModel): + type: TripoTaskType = Field(TripoTaskType.STYLIZE_MODEL, description='Type of task') + style: TripoStylizeStyle = Field(..., description='The style to apply to the model') + original_model_task_id: str = Field(..., description='The task ID of the original model') + block_size: Optional[int] = Field(80, description='The block size for stylization') + +class TripoConvertModelRequest(BaseModel): + type: TripoTaskType = Field(TripoTaskType.CONVERT_MODEL, description='Type of task') + format: TripoConvertFormat = Field(..., description='The format to convert to') + original_model_task_id: str = Field(..., description='The task ID of the original model') + quad: Optional[bool] = Field(False, description='Whether to apply quad to the model') + force_symmetry: Optional[bool] = Field(False, description='Whether to force symmetry') + face_limit: Optional[int] = Field(10000, description='The number of faces to limit the conversion to') + flatten_bottom: Optional[bool] = Field(False, description='Whether to flatten the bottom of the model') + flatten_bottom_threshold: Optional[float] = Field(0.01, description='The threshold for flattening the bottom') + texture_size: Optional[int] = Field(4096, description='The size of the texture') + texture_format: Optional[TripoTextureFormat] = Field(TripoTextureFormat.JPEG, description='The format of the texture') + pivot_to_center_bottom: Optional[bool] = Field(False, description='Whether to pivot to the center bottom') + +class TripoTaskRequest(RootModel): + root: Union[ + TripoTextToModelRequest, + TripoImageToModelRequest, + TripoMultiviewToModelRequest, + TripoTextureModelRequest, + TripoRefineModelRequest, + TripoAnimatePrerigcheckRequest, + TripoAnimateRigRequest, + TripoAnimateRetargetRequest, + TripoStylizeModelRequest, + TripoConvertModelRequest + ] + +class TripoTaskOutput(BaseModel): + model: Optional[str] = Field(None, description='URL to the model') + base_model: Optional[str] = Field(None, description='URL to the base model') + pbr_model: Optional[str] = Field(None, description='URL to the PBR model') + rendered_image: Optional[str] = Field(None, description='URL to the rendered image') + riggable: Optional[bool] = Field(None, description='Whether the model is riggable') + +class TripoTask(BaseModel): + task_id: str = Field(..., description='The task ID') + type: Optional[str] = Field(None, description='The type of task') + status: Optional[TripoTaskStatus] = Field(None, description='The status of the task') + input: Optional[Dict[str, Any]] = Field(None, description='The input parameters for the task') + output: Optional[TripoTaskOutput] = Field(None, description='The output of the task') + progress: Optional[int] = Field(None, description='The progress of the task', ge=0, le=100) + create_time: Optional[int] = Field(None, description='The creation time of the task') + running_left_time: Optional[int] = Field(None, description='The estimated time left for the task') + queue_position: Optional[int] = Field(None, description='The position in the queue') + +class TripoTaskResponse(BaseModel): + code: int = Field(0, description='The response code') + data: TripoTask = Field(..., description='The task data') + +class TripoGeneralResponse(BaseModel): + code: int = Field(0, description='The response code') + data: Dict[str, str] = Field(..., description='The task ID data') + +class TripoBalanceData(BaseModel): + balance: float = Field(..., description='The account balance') + frozen: float = Field(..., description='The frozen balance') + +class TripoBalanceResponse(BaseModel): + code: int = Field(0, description='The response code') + data: TripoBalanceData = Field(..., description='The balance data') + +class TripoErrorResponse(BaseModel): + code: int = Field(..., description='The error code') + message: str = Field(..., description='The error message') + suggestion: str = Field(..., description='The suggestion for fixing the error') diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py new file mode 100644 index 000000000..ae7b04846 --- /dev/null +++ b/comfy_api_nodes/nodes_gemini.py @@ -0,0 +1,446 @@ +""" +API Nodes for Gemini Multimodal LLM Usage via Remote API +See: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference +""" + +import os +from enum import Enum +from typing import Optional, Literal + +import torch + +import folder_paths +from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict +from server import PromptServer +from comfy_api_nodes.apis import ( + GeminiContent, + GeminiGenerateContentRequest, + GeminiGenerateContentResponse, + GeminiInlineData, + GeminiPart, + GeminiMimeType, +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, +) +from comfy_api_nodes.apinode_utils import ( + validate_string, + audio_to_base64_string, + video_to_base64_string, + tensor_to_base64_string, +) + + +GEMINI_BASE_ENDPOINT = "/proxy/vertexai/gemini" +GEMINI_MAX_INPUT_FILE_SIZE = 20 * 1024 * 1024 # 20 MB + + +class GeminiModel(str, Enum): + """ + Gemini Model Names allowed by comfy-api + """ + + gemini_2_5_pro_preview_05_06 = "gemini-2.5-pro-preview-05-06" + gemini_2_5_flash_preview_04_17 = "gemini-2.5-flash-preview-04-17" + + +def get_gemini_endpoint( + model: GeminiModel, +) -> ApiEndpoint[GeminiGenerateContentRequest, GeminiGenerateContentResponse]: + """ + Get the API endpoint for a given Gemini model. + + Args: + model: The Gemini model to use, either as enum or string value. + + Returns: + ApiEndpoint configured for the specific Gemini model. + """ + if isinstance(model, str): + model = GeminiModel(model) + return ApiEndpoint( + path=f"{GEMINI_BASE_ENDPOINT}/{model.value}", + method=HttpMethod.POST, + request_model=GeminiGenerateContentRequest, + response_model=GeminiGenerateContentResponse, + ) + + +class GeminiNode(ComfyNodeABC): + """ + Node to generate text responses from a Gemini model. + + This node allows users to interact with Google's Gemini AI models, providing + multimodal inputs (text, images, audio, video, files) to generate coherent + text responses. The node works with the latest Gemini models, handling the + API communication and response parsing. + """ + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Text inputs to the model, used to generate a response. You can include detailed instructions, questions, or context for the model.", + }, + ), + "model": ( + IO.COMBO, + { + "tooltip": "The Gemini model to use for generating responses.", + "options": [model.value for model in GeminiModel], + "default": GeminiModel.gemini_2_5_pro_preview_05_06.value, + }, + ), + "seed": ( + IO.INT, + { + "default": 42, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "When seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used.", + }, + ), + }, + "optional": { + "images": ( + IO.IMAGE, + { + "default": None, + "tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node.", + }, + ), + "audio": ( + IO.AUDIO, + { + "tooltip": "Optional audio to use as context for the model.", + "default": None, + }, + ), + "video": ( + IO.VIDEO, + { + "tooltip": "Optional video to use as context for the model.", + "default": None, + }, + ), + "files": ( + "GEMINI_INPUT_FILES", + { + "default": None, + "tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node.", + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + DESCRIPTION = "Generate text responses with Google's Gemini AI model. You can provide multiple types of inputs (text, images, audio, video) as context for generating more relevant and meaningful responses." + RETURN_TYPES = ("STRING",) + FUNCTION = "api_call" + CATEGORY = "api node/text/Gemini" + API_NODE = True + + def get_parts_from_response( + self, response: GeminiGenerateContentResponse + ) -> list[GeminiPart]: + """ + Extract all parts from the Gemini API response. + + Args: + response: The API response from Gemini. + + Returns: + List of response parts from the first candidate. + """ + return response.candidates[0].content.parts + + def get_parts_by_type( + self, response: GeminiGenerateContentResponse, part_type: Literal["text"] | str + ) -> list[GeminiPart]: + """ + Filter response parts by their type. + + Args: + response: The API response from Gemini. + part_type: Type of parts to extract ("text" or a MIME type). + + Returns: + List of response parts matching the requested type. + """ + parts = [] + for part in self.get_parts_from_response(response): + if part_type == "text" and hasattr(part, "text") and part.text: + parts.append(part) + elif ( + hasattr(part, "inlineData") + and part.inlineData + and part.inlineData.mimeType == part_type + ): + parts.append(part) + # Skip parts that don't match the requested type + return parts + + def get_text_from_response(self, response: GeminiGenerateContentResponse) -> str: + """ + Extract and concatenate all text parts from the response. + + Args: + response: The API response from Gemini. + + Returns: + Combined text from all text parts in the response. + """ + parts = self.get_parts_by_type(response, "text") + return "\n".join([part.text for part in parts]) + + def create_video_parts(self, video_input: IO.VIDEO, **kwargs) -> list[GeminiPart]: + """ + Convert video input to Gemini API compatible parts. + + Args: + video_input: Video tensor from ComfyUI. + **kwargs: Additional arguments to pass to the conversion function. + + Returns: + List of GeminiPart objects containing the encoded video. + """ + from comfy_api.util import VideoContainer, VideoCodec + base_64_string = video_to_base64_string( + video_input, + container_format=VideoContainer.MP4, + codec=VideoCodec.H264 + ) + return [ + GeminiPart( + inlineData=GeminiInlineData( + mimeType=GeminiMimeType.video_mp4, + data=base_64_string, + ) + ) + ] + + def create_audio_parts(self, audio_input: IO.AUDIO) -> list[GeminiPart]: + """ + Convert audio input to Gemini API compatible parts. + + Args: + audio_input: Audio input from ComfyUI, containing waveform tensor and sample rate. + + Returns: + List of GeminiPart objects containing the encoded audio. + """ + audio_parts: list[GeminiPart] = [] + for batch_index in range(audio_input["waveform"].shape[0]): + # Recreate an IO.AUDIO object for the given batch dimension index + audio_at_index = { + "waveform": audio_input["waveform"][batch_index].unsqueeze(0), + "sample_rate": audio_input["sample_rate"], + } + # Convert to MP3 format for compatibility with Gemini API + audio_bytes = audio_to_base64_string( + audio_at_index, + container_format="mp3", + codec_name="libmp3lame", + ) + audio_parts.append( + GeminiPart( + inlineData=GeminiInlineData( + mimeType=GeminiMimeType.audio_mp3, + data=audio_bytes, + ) + ) + ) + return audio_parts + + def create_image_parts(self, image_input: torch.Tensor) -> list[GeminiPart]: + """ + Convert image tensor input to Gemini API compatible parts. + + Args: + image_input: Batch of image tensors from ComfyUI. + + Returns: + List of GeminiPart objects containing the encoded images. + """ + image_parts: list[GeminiPart] = [] + for image_index in range(image_input.shape[0]): + image_as_b64 = tensor_to_base64_string( + image_input[image_index].unsqueeze(0) + ) + image_parts.append( + GeminiPart( + inlineData=GeminiInlineData( + mimeType=GeminiMimeType.image_png, + data=image_as_b64, + ) + ) + ) + return image_parts + + def create_text_part(self, text: str) -> GeminiPart: + """ + Create a text part for the Gemini API request. + + Args: + text: The text content to include in the request. + + Returns: + A GeminiPart object with the text content. + """ + return GeminiPart(text=text) + + def api_call( + self, + prompt: str, + model: GeminiModel, + images: Optional[IO.IMAGE] = None, + audio: Optional[IO.AUDIO] = None, + video: Optional[IO.VIDEO] = None, + files: Optional[list[GeminiPart]] = None, + unique_id: Optional[str] = None, + **kwargs, + ) -> tuple[str]: + # Validate inputs + validate_string(prompt, strip_whitespace=False) + + # Create parts list with text prompt as the first part + parts: list[GeminiPart] = [self.create_text_part(prompt)] + + # Add other modal parts + if images is not None: + image_parts = self.create_image_parts(images) + parts.extend(image_parts) + if audio is not None: + parts.extend(self.create_audio_parts(audio)) + if video is not None: + parts.extend(self.create_video_parts(video)) + if files is not None: + parts.extend(files) + + # Create response + response = SynchronousOperation( + endpoint=get_gemini_endpoint(model), + request=GeminiGenerateContentRequest( + contents=[ + GeminiContent( + role="user", + parts=parts, + ) + ] + ), + auth_kwargs=kwargs, + ).execute() + + # Get result output + output_text = self.get_text_from_response(response) + if unique_id and output_text: + PromptServer.instance.send_progress_text(output_text, node_id=unique_id) + + return (output_text or "Empty response from Gemini model...",) + + +class GeminiInputFiles(ComfyNodeABC): + """ + Loads and formats input files for use with the Gemini API. + + This node allows users to include text (.txt) and PDF (.pdf) files as input + context for the Gemini model. Files are converted to the appropriate format + required by the API and can be chained together to include multiple files + in a single request. + """ + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + """ + For details about the supported file input types, see: + https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference + """ + input_dir = folder_paths.get_input_directory() + input_files = [ + f + for f in os.scandir(input_dir) + if f.is_file() + and (f.name.endswith(".txt") or f.name.endswith(".pdf")) + and f.stat().st_size < GEMINI_MAX_INPUT_FILE_SIZE + ] + input_files = sorted(input_files, key=lambda x: x.name) + input_files = [f.name for f in input_files] + return { + "required": { + "file": ( + IO.COMBO, + { + "tooltip": "Input files to include as context for the model. Only accepts text (.txt) and PDF (.pdf) files for now.", + "options": input_files, + "default": input_files[0] if input_files else None, + }, + ), + }, + "optional": { + "GEMINI_INPUT_FILES": ( + "GEMINI_INPUT_FILES", + { + "tooltip": "An optional additional file(s) to batch together with the file loaded from this node. Allows chaining of input files so that a single message can include multiple input files.", + "default": None, + }, + ), + }, + } + + DESCRIPTION = "Loads and prepares input files to include as inputs for Gemini LLM nodes. The files will be read by the Gemini model when generating a response. The contents of the text file count toward the token limit. 🛈 TIP: Can be chained together with other Gemini Input File nodes." + RETURN_TYPES = ("GEMINI_INPUT_FILES",) + FUNCTION = "prepare_files" + CATEGORY = "api node/text/Gemini" + + def create_file_part(self, file_path: str) -> GeminiPart: + mime_type = ( + GeminiMimeType.pdf + if file_path.endswith(".pdf") + else GeminiMimeType.text_plain + ) + # Use base64 string directly, not the data URI + with open(file_path, "rb") as f: + file_content = f.read() + import base64 + base64_str = base64.b64encode(file_content).decode("utf-8") + + return GeminiPart( + inlineData=GeminiInlineData( + mimeType=mime_type, + data=base64_str, + ) + ) + + def prepare_files( + self, file: str, GEMINI_INPUT_FILES: list[GeminiPart] = [] + ) -> tuple[list[GeminiPart]]: + """ + Loads and formats input files for Gemini API. + """ + file_path = folder_paths.get_annotated_filepath(file) + input_file_content = self.create_file_part(file_path) + files = [input_file_content] + GEMINI_INPUT_FILES + return (files,) + + +NODE_CLASS_MAPPINGS = { + "GeminiNode": GeminiNode, + "GeminiInputFiles": GeminiInputFiles, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "GeminiNode": "Google Gemini", + "GeminiInputFiles": "Gemini Input Files", +} diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index ce8054afc..be1d2de4a 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -1,29 +1,86 @@ import io +from typing import TypedDict, Optional +import json +import os +import time +import re +import uuid +from enum import Enum from inspect import cleandoc import numpy as np import torch from PIL import Image - from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict +from server import PromptServer +import folder_paths from comfy_api_nodes.apis import ( OpenAIImageGenerationRequest, OpenAIImageEditRequest, OpenAIImageGenerationResponse, + OpenAICreateResponse, + OpenAIResponse, + CreateModelResponseProperties, + Item, + Includable, + OutputContent, + InputImageContent, + Detail, + InputTextContent, + InputMessage, + InputMessageContentList, + InputContent, + InputFileContent, ) from comfy_api_nodes.apis.client import ( ApiEndpoint, HttpMethod, SynchronousOperation, + PollingOperation, + EmptyRequest, ) from comfy_api_nodes.apinode_utils import ( downscale_image_tensor, validate_and_cast_response, validate_string, + tensor_to_base64_string, + text_filepath_to_data_uri, ) +from comfy_api_nodes.mapper_utils import model_field_to_node_input + + +RESPONSES_ENDPOINT = "/proxy/openai/v1/responses" +STARTING_POINT_ID_PATTERN = r"" + + +class HistoryEntry(TypedDict): + """Type definition for a single history entry in the chat.""" + + prompt: str + response: str + response_id: str + timestamp: float + + +class ChatHistory(TypedDict): + """Type definition for the chat history dictionary.""" + + __annotations__: dict[str, list[HistoryEntry]] + + +class SupportedOpenAIModel(str, Enum): + o4_mini = "o4-mini" + o1 = "o1" + o3 = "o3" + o1_pro = "o1-pro" + gpt_4o = "gpt-4o" + gpt_4_1 = "gpt-4.1" + gpt_4_1_mini = "gpt-4.1-mini" + gpt_4_1_nano = "gpt-4.1-nano" + class OpenAIDalle2(ComfyNodeABC): """ @@ -115,7 +172,7 @@ class OpenAIDalle2(ComfyNodeABC): n=1, size="1024x1024", unique_id=None, - **kwargs + **kwargs, ): validate_string(prompt, strip_whitespace=False) model = "dall-e-2" @@ -262,7 +319,7 @@ class OpenAIDalle3(ComfyNodeABC): quality="standard", size="1024x1024", unique_id=None, - **kwargs + **kwargs, ): validate_string(prompt, strip_whitespace=False) model = "dall-e-3" @@ -400,12 +457,12 @@ class OpenAIGPTImage1(ComfyNodeABC): n=1, size="1024x1024", unique_id=None, - **kwargs + **kwargs, ): validate_string(prompt, strip_whitespace=False) model = "gpt-image-1" path = "/proxy/openai/images/generations" - content_type="application/json" + content_type = "application/json" request_class = OpenAIImageGenerationRequest img_binaries = [] mask_binary = None @@ -414,7 +471,7 @@ class OpenAIGPTImage1(ComfyNodeABC): if image is not None: path = "/proxy/openai/images/edits" request_class = OpenAIImageEditRequest - content_type ="multipart/form-data" + content_type = "multipart/form-data" batch_size = image.shape[0] @@ -486,17 +543,466 @@ class OpenAIGPTImage1(ComfyNodeABC): return (img_tensor,) -# A dictionary that contains all nodes you want to export with their names -# NOTE: names should be globally unique +class OpenAITextNode(ComfyNodeABC): + """ + Base class for OpenAI text generation nodes. + """ + + RETURN_TYPES = (IO.STRING,) + FUNCTION = "api_call" + CATEGORY = "api node/text/OpenAI" + API_NODE = True + + +class OpenAIChatNode(OpenAITextNode): + """ + Node to generate text responses from an OpenAI model. + """ + + def __init__(self) -> None: + """Initialize the chat node with a new session ID and empty history.""" + self.current_session_id: str = str(uuid.uuid4()) + self.history: dict[str, list[HistoryEntry]] = {} + self.previous_response_id: Optional[str] = None + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Text inputs to the model, used to generate a response.", + }, + ), + "persist_context": ( + IO.BOOLEAN, + { + "default": True, + "tooltip": "Persist chat context between calls (multi-turn conversation)", + }, + ), + "model": model_field_to_node_input( + IO.COMBO, + OpenAICreateResponse, + "model", + enum_type=SupportedOpenAIModel, + ), + }, + "optional": { + "images": ( + IO.IMAGE, + { + "default": None, + "tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node.", + }, + ), + "files": ( + "OPENAI_INPUT_FILES", + { + "default": None, + "tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the OpenAI Chat Input Files node.", + }, + ), + "advanced_options": ( + "OPENAI_CHAT_CONFIG", + { + "default": None, + "tooltip": "Optional configuration for the model. Accepts inputs from the OpenAI Chat Advanced Options node.", + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + DESCRIPTION = "Generate text responses from an OpenAI model." + + def get_result_response( + self, + response_id: str, + include: Optional[list[Includable]] = None, + auth_kwargs: Optional[dict[str, str]] = None, + ) -> OpenAIResponse: + """ + Retrieve a model response with the given ID from the OpenAI API. + + Args: + response_id (str): The ID of the response to retrieve. + include (Optional[List[Includable]]): Additional fields to include + in the response. See the `include` parameter for Response + creation above for more information. + + """ + return PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"{RESPONSES_ENDPOINT}/{response_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=OpenAIResponse, + query_params={"include": include}, + ), + completed_statuses=["completed"], + failed_statuses=["failed"], + status_extractor=lambda response: response.status, + auth_kwargs=auth_kwargs, + ).execute() + + def get_message_content_from_response( + self, response: OpenAIResponse + ) -> list[OutputContent]: + """Extract message content from the API response.""" + for output in response.output: + if output.root.type == "message": + return output.root.content + raise TypeError("No output message found in response") + + def get_text_from_message_content( + self, message_content: list[OutputContent] + ) -> str: + """Extract text content from message content.""" + for content_item in message_content: + if content_item.root.type == "output_text": + return str(content_item.root.text) + return "No text output found in response" + + def get_history_text(self, session_id: str) -> str: + """Convert the entire history for a given session to JSON string.""" + return json.dumps(self.history[session_id]) + + def display_history_on_node(self, session_id: str, node_id: str) -> None: + """Display formatted chat history on the node UI.""" + render_spec = { + "node_id": node_id, + "component": "ChatHistoryWidget", + "props": { + "history": self.get_history_text(session_id), + }, + } + PromptServer.instance.send_sync( + "display_component", + render_spec, + ) + + def add_to_history( + self, session_id: str, prompt: str, output_text: str, response_id: str + ) -> None: + """Add a new entry to the chat history.""" + if session_id not in self.history: + self.history[session_id] = [] + self.history[session_id].append( + { + "prompt": prompt, + "response": output_text, + "response_id": response_id, + "timestamp": time.time(), + } + ) + + def parse_output_text_from_response(self, response: OpenAIResponse) -> str: + """Extract text output from the API response.""" + message_contents = self.get_message_content_from_response(response) + return self.get_text_from_message_content(message_contents) + + def generate_new_session_id(self) -> str: + """Generate a new unique session ID.""" + return str(uuid.uuid4()) + + def get_session_id(self, persist_context: bool) -> str: + """Get the current or generate a new session ID based on context persistence.""" + return ( + self.current_session_id + if persist_context + else self.generate_new_session_id() + ) + + def tensor_to_input_image_content( + self, image: torch.Tensor, detail_level: Detail = "auto" + ) -> InputImageContent: + """Convert a tensor to an input image content object.""" + return InputImageContent( + detail=detail_level, + image_url=f"data:image/png;base64,{tensor_to_base64_string(image)}", + type="input_image", + ) + + def create_input_message_contents( + self, + prompt: str, + image: Optional[torch.Tensor] = None, + files: Optional[list[InputFileContent]] = None, + ) -> InputMessageContentList: + """Create a list of input message contents from prompt and optional image.""" + content_list: list[InputContent] = [ + InputTextContent(text=prompt, type="input_text"), + ] + if image is not None: + for i in range(image.shape[0]): + content_list.append( + self.tensor_to_input_image_content(image[i].unsqueeze(0)) + ) + if files is not None: + content_list.extend(files) + + return InputMessageContentList( + root=content_list, + ) + + def parse_response_id_from_prompt(self, prompt: str) -> Optional[str]: + """Extract response ID from prompt if it exists.""" + parsed_id = re.search(STARTING_POINT_ID_PATTERN, prompt) + return parsed_id.group(1) if parsed_id else None + + def strip_response_tag_from_prompt(self, prompt: str) -> str: + """Remove the response ID tag from the prompt.""" + return re.sub(STARTING_POINT_ID_PATTERN, "", prompt.strip()) + + def delete_history_after_response_id( + self, new_start_id: str, session_id: str + ) -> None: + """Delete history entries after a specific response ID.""" + if session_id not in self.history: + return + + new_history = [] + i = 0 + while ( + i < len(self.history[session_id]) + and self.history[session_id][i]["response_id"] != new_start_id + ): + new_history.append(self.history[session_id][i]) + i += 1 + + # Since it's the new starting point (not the response being edited), we include it as well + if i < len(self.history[session_id]): + new_history.append(self.history[session_id][i]) + + self.history[session_id] = new_history + + def api_call( + self, + prompt: str, + persist_context: bool, + model: SupportedOpenAIModel, + unique_id: Optional[str] = None, + images: Optional[torch.Tensor] = None, + files: Optional[list[InputFileContent]] = None, + advanced_options: Optional[CreateModelResponseProperties] = None, + **kwargs, + ) -> tuple[str]: + # Validate inputs + validate_string(prompt, strip_whitespace=False) + + session_id = self.get_session_id(persist_context) + response_id_override = self.parse_response_id_from_prompt(prompt) + if response_id_override: + is_starting_from_beginning = response_id_override == "start" + if is_starting_from_beginning: + self.history[session_id] = [] + previous_response_id = None + else: + previous_response_id = response_id_override + self.delete_history_after_response_id(response_id_override, session_id) + prompt = self.strip_response_tag_from_prompt(prompt) + elif persist_context: + previous_response_id = self.previous_response_id + else: + previous_response_id = None + + # Create response + create_response = SynchronousOperation( + endpoint=ApiEndpoint( + path=RESPONSES_ENDPOINT, + method=HttpMethod.POST, + request_model=OpenAICreateResponse, + response_model=OpenAIResponse, + ), + request=OpenAICreateResponse( + input=[ + Item( + root=InputMessage( + content=self.create_input_message_contents( + prompt, images, files + ), + role="user", + ) + ), + ], + store=True, + stream=False, + model=model, + previous_response_id=previous_response_id, + **( + advanced_options.model_dump(exclude_none=True) + if advanced_options + else {} + ), + ), + auth_kwargs=kwargs, + ).execute() + response_id = create_response.id + + # Get result output + result_response = self.get_result_response(response_id, auth_kwargs=kwargs) + output_text = self.parse_output_text_from_response(result_response) + + # Update history + self.add_to_history(session_id, prompt, output_text, response_id) + self.display_history_on_node(session_id, unique_id) + self.previous_response_id = response_id + + return (output_text,) + + +class OpenAIInputFiles(ComfyNodeABC): + """ + Loads and formats input files for OpenAI API. + """ + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + """ + For details about the supported file input types, see: + https://platform.openai.com/docs/guides/pdf-files?api-mode=responses + """ + input_dir = folder_paths.get_input_directory() + input_files = [ + f + for f in os.scandir(input_dir) + if f.is_file() + and (f.name.endswith(".txt") or f.name.endswith(".pdf")) + and f.stat().st_size < 32 * 1024 * 1024 + ] + input_files = sorted(input_files, key=lambda x: x.name) + input_files = [f.name for f in input_files] + return { + "required": { + "file": ( + IO.COMBO, + { + "tooltip": "Input files to include as context for the model. Only accepts text (.txt) and PDF (.pdf) files for now.", + "options": input_files, + "default": input_files[0] if input_files else None, + }, + ), + }, + "optional": { + "OPENAI_INPUT_FILES": ( + "OPENAI_INPUT_FILES", + { + "tooltip": "An optional additional file(s) to batch together with the file loaded from this node. Allows chaining of input files so that a single message can include multiple input files.", + "default": None, + }, + ), + }, + } + + DESCRIPTION = "Loads and prepares input files (text, pdf, etc.) to include as inputs for the OpenAI Chat Node. The files will be read by the OpenAI model when generating a response. 🛈 TIP: Can be chained together with other OpenAI Input File nodes." + RETURN_TYPES = ("OPENAI_INPUT_FILES",) + FUNCTION = "prepare_files" + CATEGORY = "api node/text/OpenAI" + + def create_input_file_content(self, file_path: str) -> InputFileContent: + return InputFileContent( + file_data=text_filepath_to_data_uri(file_path), + filename=os.path.basename(file_path), + type="input_file", + ) + + def prepare_files( + self, file: str, OPENAI_INPUT_FILES: list[InputFileContent] = [] + ) -> tuple[list[InputFileContent]]: + """ + Loads and formats input files for OpenAI API. + """ + file_path = folder_paths.get_annotated_filepath(file) + input_file_content = self.create_input_file_content(file_path) + files = [input_file_content] + OPENAI_INPUT_FILES + return (files,) + + +class OpenAIChatConfig(ComfyNodeABC): + """Allows setting additional configuration for the OpenAI Chat Node.""" + + RETURN_TYPES = ("OPENAI_CHAT_CONFIG",) + FUNCTION = "configure" + DESCRIPTION = ( + "Allows specifying advanced configuration options for the OpenAI Chat Nodes." + ) + CATEGORY = "api node/text/OpenAI" + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "truncation": ( + IO.COMBO, + { + "options": ["auto", "disabled"], + "default": "auto", + "tooltip": "The truncation strategy to use for the model response. auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.disabled: If a model response will exceed the context window size for a model, the request will fail with a 400 error", + }, + ), + }, + "optional": { + "max_output_tokens": model_field_to_node_input( + IO.INT, + OpenAICreateResponse, + "max_output_tokens", + min=16, + default=4096, + max=16384, + tooltip="An upper bound for the number of tokens that can be generated for a response, including visible output tokens", + ), + "instructions": model_field_to_node_input( + IO.STRING, OpenAICreateResponse, "instructions", multiline=True + ), + }, + } + + def configure( + self, + truncation: bool, + instructions: Optional[str] = None, + max_output_tokens: Optional[int] = None, + ) -> tuple[CreateModelResponseProperties]: + """ + Configure advanced options for the OpenAI Chat Node. + + Note: + While `top_p` and `temperature` are listed as properties in the + spec, they are not supported for all models (e.g., o4-mini). + They are not exposed as inputs at all to avoid having to manually + remove depending on model choice. + """ + return ( + CreateModelResponseProperties( + instructions=instructions, + truncation=truncation, + max_output_tokens=max_output_tokens, + ), + ) + + NODE_CLASS_MAPPINGS = { "OpenAIDalle2": OpenAIDalle2, "OpenAIDalle3": OpenAIDalle3, "OpenAIGPTImage1": OpenAIGPTImage1, + "OpenAIChatNode": OpenAIChatNode, + "OpenAIInputFiles": OpenAIInputFiles, + "OpenAIChatConfig": OpenAIChatConfig, } -# A dictionary that contains the friendly/humanly readable titles for the nodes NODE_DISPLAY_NAME_MAPPINGS = { "OpenAIDalle2": "OpenAI DALL·E 2", "OpenAIDalle3": "OpenAI DALL·E 3", "OpenAIGPTImage1": "OpenAI GPT Image 1", + "OpenAIChatNode": "OpenAI Chat", + "OpenAIInputFiles": "OpenAI Chat Input Files", + "OpenAIChatConfig": "OpenAI Chat Advanced Options", } diff --git a/comfy_api_nodes/nodes_rodin.py b/comfy_api_nodes/nodes_rodin.py new file mode 100644 index 000000000..67f90478c --- /dev/null +++ b/comfy_api_nodes/nodes_rodin.py @@ -0,0 +1,462 @@ +""" +ComfyUI X Rodin3D(Deemos) API Nodes + +Rodin API docs: https://developer.hyper3d.ai/ + +""" + +from __future__ import annotations +from inspect import cleandoc +from comfy.comfy_types.node_typing import IO +import folder_paths as comfy_paths +import requests +import os +import datetime +import shutil +import time +import io +import logging +import math +from PIL import Image +from comfy_api_nodes.apis.rodin_api import ( + Rodin3DGenerateRequest, + Rodin3DGenerateResponse, + Rodin3DCheckStatusRequest, + Rodin3DCheckStatusResponse, + Rodin3DDownloadRequest, + Rodin3DDownloadResponse, + JobStatus, +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, +) + + +COMMON_PARAMETERS = { + "Seed": ( + IO.INT, + { + "default":0, + "min":0, + "max":65535, + "display":"number" + } + ), + "Material_Type": ( + IO.COMBO, + { + "options": ["PBR", "Shaded"], + "default": "PBR" + } + ), + "Polygon_count": ( + IO.COMBO, + { + "options": ["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "200K-Triangle"], + "default": "18K-Quad" + } + ) +} + +def create_task_error(response: Rodin3DGenerateResponse): + """Check if the response has error""" + return hasattr(response, "error") + + + +class Rodin3DAPI: + """ + Generate 3D Assets using Rodin API + """ + RETURN_TYPES = (IO.STRING,) + RETURN_NAMES = ("3D Model Path",) + CATEGORY = "api node/3d/Rodin" + DESCRIPTION = cleandoc(__doc__ or "") + FUNCTION = "api_call" + API_NODE = True + + def tensor_to_filelike(self, tensor, max_pixels: int = 2048*2048): + """ + Converts a PyTorch tensor to a file-like object. + + Args: + - tensor (torch.Tensor): A tensor representing an image of shape (H, W, C) + where C is the number of channels (3 for RGB), H is height, and W is width. + + Returns: + - io.BytesIO: A file-like object containing the image data. + """ + array = tensor.cpu().numpy() + array = (array * 255).astype('uint8') + image = Image.fromarray(array, 'RGB') + + original_width, original_height = image.size + original_pixels = original_width * original_height + if original_pixels > max_pixels: + scale = math.sqrt(max_pixels / original_pixels) + new_width = int(original_width * scale) + new_height = int(original_height * scale) + else: + new_width, new_height = original_width, original_height + + if new_width != original_width or new_height != original_height: + image = image.resize((new_width, new_height), Image.Resampling.LANCZOS) + + img_byte_arr = io.BytesIO() + image.save(img_byte_arr, format='PNG') # PNG is used for lossless compression + img_byte_arr.seek(0) + return img_byte_arr + + def check_rodin_status(self, response: Rodin3DCheckStatusResponse) -> str: + has_failed = any(job.status == JobStatus.Failed for job in response.jobs) + all_done = all(job.status == JobStatus.Done for job in response.jobs) + status_list = [str(job.status) for job in response.jobs] + logging.info(f"[ Rodin3D API - CheckStatus ] Generate Status: {status_list}") + if has_failed: + logging.error(f"[ Rodin3D API - CheckStatus ] Generate Failed: {status_list}, Please try again.") + raise Exception("[ Rodin3D API ] Generate Failed, Please Try again.") + elif all_done: + return "DONE" + else: + return "Generating" + + def CreateGenerateTask(self, images=None, seed=1, material="PBR", quality="medium", tier="Regular", mesh_mode="Quad", **kwargs): + if images == None: + raise Exception("Rodin 3D generate requires at least 1 image.") + if len(images) >= 5: + raise Exception("Rodin 3D generate requires up to 5 image.") + + path = "/proxy/rodin/api/v2/rodin" + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=Rodin3DGenerateRequest, + response_model=Rodin3DGenerateResponse, + ), + request=Rodin3DGenerateRequest( + seed=seed, + tier=tier, + material=material, + quality=quality, + mesh_mode=mesh_mode + ), + files=[ + ( + "images", + open(image, "rb") if isinstance(image, str) else self.tensor_to_filelike(image) + ) + for image in images if image is not None + ], + content_type = "multipart/form-data", + auth_kwargs=kwargs, + ) + + response = operation.execute() + + if create_task_error(response): + error_message = f"Rodin3D Create 3D generate Task Failed. Message: {response.message}, error: {response.error}" + logging.error(error_message) + raise Exception(error_message) + + logging.info("[ Rodin3D API - Submit Jobs ] Submit Generate Task Success!") + subscription_key = response.jobs.subscription_key + task_uuid = response.uuid + logging.info(f"[ Rodin3D API - Submit Jobs ] UUID: {task_uuid}") + return task_uuid, subscription_key + + def poll_for_task_status(self, subscription_key, **kwargs) -> Rodin3DCheckStatusResponse: + + path = "/proxy/rodin/api/v2/status" + + poll_operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path = path, + method=HttpMethod.POST, + request_model=Rodin3DCheckStatusRequest, + response_model=Rodin3DCheckStatusResponse, + ), + request=Rodin3DCheckStatusRequest( + subscription_key = subscription_key + ), + completed_statuses=["DONE"], + failed_statuses=["FAILED"], + status_extractor=self.check_rodin_status, + poll_interval=3.0, + auth_kwargs=kwargs, + ) + + logging.info("[ Rodin3D API - CheckStatus ] Generate Start!") + + return poll_operation.execute() + + + + def GetRodinDownloadList(self, uuid, **kwargs) -> Rodin3DDownloadResponse: + logging.info("[ Rodin3D API - Downloading ] Generate Successfully!") + + path = "/proxy/rodin/api/v2/download" + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=Rodin3DDownloadRequest, + response_model=Rodin3DDownloadResponse, + ), + request=Rodin3DDownloadRequest( + task_uuid=uuid + ), + auth_kwargs=kwargs + ) + + return operation.execute() + + def GetQualityAndMode(self, PolyCount): + if PolyCount == "200K-Triangle": + mesh_mode = "Raw" + quality = "medium" + else: + mesh_mode = "Quad" + if PolyCount == "4K-Quad": + quality = "extra-low" + elif PolyCount == "8K-Quad": + quality = "low" + elif PolyCount == "18K-Quad": + quality = "medium" + elif PolyCount == "50K-Quad": + quality = "high" + else: + quality = "medium" + + return mesh_mode, quality + + def DownLoadFiles(self, Url_List): + Save_path = os.path.join(comfy_paths.get_output_directory(), "Rodin3D", datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) + os.makedirs(Save_path, exist_ok=True) + model_file_path = None + for Item in Url_List.list: + url = Item.url + file_name = Item.name + file_path = os.path.join(Save_path, file_name) + if file_path.endswith(".glb"): + model_file_path = file_path + logging.info(f"[ Rodin3D API - download_files ] Downloading file: {file_path}") + max_retries = 5 + for attempt in range(max_retries): + try: + with requests.get(url, stream=True) as r: + r.raise_for_status() + with open(file_path, "wb") as f: + shutil.copyfileobj(r.raw, f) + break + except Exception as e: + logging.info(f"[ Rodin3D API - download_files ] Error downloading {file_path}:{e}") + if attempt < max_retries - 1: + logging.info("Retrying...") + time.sleep(2) + else: + logging.info(f"[ Rodin3D API - download_files ] Failed to download {file_path} after {max_retries} attempts.") + + return model_file_path + + +class Rodin3D_Regular(Rodin3DAPI): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "Images": + ( + IO.IMAGE, + { + "forceInput":True, + } + ) + }, + "optional": { + **COMMON_PARAMETERS + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, + } + + def api_call( + self, + Images, + Seed, + Material_Type, + Polygon_count, + **kwargs + ): + tier = "Regular" + num_images = Images.shape[0] + m_images = [] + for i in range(num_images): + m_images.append(Images[i]) + mesh_mode, quality = self.GetQualityAndMode(Polygon_count) + task_uuid, subscription_key = self.CreateGenerateTask(images=m_images, seed=Seed, material=Material_Type, quality=quality, tier=tier, mesh_mode=mesh_mode, **kwargs) + self.poll_for_task_status(subscription_key, **kwargs) + Download_List = self.GetRodinDownloadList(task_uuid, **kwargs) + model = self.DownLoadFiles(Download_List) + + return (model,) + +class Rodin3D_Detail(Rodin3DAPI): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "Images": + ( + IO.IMAGE, + { + "forceInput":True, + } + ) + }, + "optional": { + **COMMON_PARAMETERS + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, + } + + def api_call( + self, + Images, + Seed, + Material_Type, + Polygon_count, + **kwargs + ): + tier = "Detail" + num_images = Images.shape[0] + m_images = [] + for i in range(num_images): + m_images.append(Images[i]) + mesh_mode, quality = self.GetQualityAndMode(Polygon_count) + task_uuid, subscription_key = self.CreateGenerateTask(images=m_images, seed=Seed, material=Material_Type, quality=quality, tier=tier, mesh_mode=mesh_mode, **kwargs) + self.poll_for_task_status(subscription_key, **kwargs) + Download_List = self.GetRodinDownloadList(task_uuid, **kwargs) + model = self.DownLoadFiles(Download_List) + + return (model,) + +class Rodin3D_Smooth(Rodin3DAPI): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "Images": + ( + IO.IMAGE, + { + "forceInput":True, + } + ) + }, + "optional": { + **COMMON_PARAMETERS + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, + } + + def api_call( + self, + Images, + Seed, + Material_Type, + Polygon_count, + **kwargs + ): + tier = "Smooth" + num_images = Images.shape[0] + m_images = [] + for i in range(num_images): + m_images.append(Images[i]) + mesh_mode, quality = self.GetQualityAndMode(Polygon_count) + task_uuid, subscription_key = self.CreateGenerateTask(images=m_images, seed=Seed, material=Material_Type, quality=quality, tier=tier, mesh_mode=mesh_mode, **kwargs) + self.poll_for_task_status(subscription_key, **kwargs) + Download_List = self.GetRodinDownloadList(task_uuid, **kwargs) + model = self.DownLoadFiles(Download_List) + + return (model,) + +class Rodin3D_Sketch(Rodin3DAPI): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "Images": + ( + IO.IMAGE, + { + "forceInput":True, + } + ) + }, + "optional": { + "Seed": + ( + IO.INT, + { + "default":0, + "min":0, + "max":65535, + "display":"number" + } + ) + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, + } + + def api_call( + self, + Images, + Seed, + **kwargs + ): + tier = "Sketch" + num_images = Images.shape[0] + m_images = [] + for i in range(num_images): + m_images.append(Images[i]) + material_type = "PBR" + quality = "medium" + mesh_mode = "Quad" + task_uuid, subscription_key = self.CreateGenerateTask(images=m_images, seed=Seed, material=material_type, quality=quality, tier=tier, mesh_mode=mesh_mode, **kwargs) + self.poll_for_task_status(subscription_key, **kwargs) + Download_List = self.GetRodinDownloadList(task_uuid, **kwargs) + model = self.DownLoadFiles(Download_List) + + return (model,) + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "Rodin3D_Regular": Rodin3D_Regular, + "Rodin3D_Detail": Rodin3D_Detail, + "Rodin3D_Smooth": Rodin3D_Smooth, + "Rodin3D_Sketch": Rodin3D_Sketch, +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "Rodin3D_Regular": "Rodin 3D Generate - Regular Generate", + "Rodin3D_Detail": "Rodin 3D Generate - Detail Generate", + "Rodin3D_Smooth": "Rodin 3D Generate - Smooth Generate", + "Rodin3D_Sketch": "Rodin 3D Generate - Sketch Generate", +} diff --git a/comfy_api_nodes/nodes_runway.py b/comfy_api_nodes/nodes_runway.py new file mode 100644 index 000000000..af4b321f9 --- /dev/null +++ b/comfy_api_nodes/nodes_runway.py @@ -0,0 +1,635 @@ +"""Runway API Nodes + +API Docs: + - https://docs.dev.runwayml.com/api/#tag/Task-management/paths/~1v1~1tasks~1%7Bid%7D/delete + +User Guides: + - https://help.runwayml.com/hc/en-us/sections/30265301423635-Gen-3-Alpha + - https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video + - https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo + - https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3 + +""" + +from typing import Union, Optional, Any +from enum import Enum + +import torch + +from comfy_api_nodes.apis import ( + RunwayImageToVideoRequest, + RunwayImageToVideoResponse, + RunwayTaskStatusResponse as TaskStatusResponse, + RunwayTaskStatusEnum as TaskStatus, + RunwayModelEnum as Model, + RunwayDurationEnum as Duration, + RunwayAspectRatioEnum as AspectRatio, + RunwayPromptImageObject, + RunwayPromptImageDetailedObject, + RunwayTextToImageRequest, + RunwayTextToImageResponse, + Model4, + ReferenceImage, + RunwayTextToImageAspectRatioEnum, +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, + EmptyRequest, +) +from comfy_api_nodes.apinode_utils import ( + upload_images_to_comfyapi, + download_url_to_video_output, + image_tensor_pair_to_batch, + validate_string, + download_url_to_image_tensor, +) +from comfy_api_nodes.mapper_utils import model_field_to_node_input +from comfy_api.input_impl import VideoFromFile +from comfy.comfy_types.node_typing import IO, ComfyNodeABC + +PATH_IMAGE_TO_VIDEO = "/proxy/runway/image_to_video" +PATH_TEXT_TO_IMAGE = "/proxy/runway/text_to_image" +PATH_GET_TASK_STATUS = "/proxy/runway/tasks" + +AVERAGE_DURATION_I2V_SECONDS = 64 +AVERAGE_DURATION_FLF_SECONDS = 256 +AVERAGE_DURATION_T2I_SECONDS = 41 + + +class RunwayApiError(Exception): + """Base exception for Runway API errors.""" + + pass + + +class RunwayGen4TurboAspectRatio(str, Enum): + """Aspect ratios supported for Image to Video API when using gen4_turbo model.""" + + field_1280_720 = "1280:720" + field_720_1280 = "720:1280" + field_1104_832 = "1104:832" + field_832_1104 = "832:1104" + field_960_960 = "960:960" + field_1584_672 = "1584:672" + + +class RunwayGen3aAspectRatio(str, Enum): + """Aspect ratios supported for Image to Video API when using gen3a_turbo model.""" + + field_768_1280 = "768:1280" + field_1280_768 = "1280:768" + + +def get_video_url_from_task_status(response: TaskStatusResponse) -> Union[str, None]: + """Returns the video URL from the task status response if it exists.""" + if response.output and len(response.output) > 0: + return response.output[0] + return None + + +# TODO: replace with updated image validation utils (upstream) +def validate_input_image(image: torch.Tensor) -> bool: + """ + Validate the input image is within the size limits for the Runway API. + See: https://docs.dev.runwayml.com/assets/inputs/#common-error-reasons + """ + return image.shape[2] < 8000 and image.shape[1] < 8000 + + +def poll_until_finished( + auth_kwargs: dict[str, str], + api_endpoint: ApiEndpoint[Any, TaskStatusResponse], + estimated_duration: Optional[int] = None, + node_id: Optional[str] = None, +) -> TaskStatusResponse: + """Polls the Runway API endpoint until the task reaches a terminal state, then returns the response.""" + return PollingOperation( + poll_endpoint=api_endpoint, + completed_statuses=[ + TaskStatus.SUCCEEDED.value, + ], + failed_statuses=[ + TaskStatus.FAILED.value, + TaskStatus.CANCELLED.value, + ], + status_extractor=lambda response: (response.status.value), + auth_kwargs=auth_kwargs, + result_url_extractor=get_video_url_from_task_status, + estimated_duration=estimated_duration, + node_id=node_id, + progress_extractor=extract_progress_from_task_status, + ).execute() + + +def extract_progress_from_task_status( + response: TaskStatusResponse, +) -> Union[float, None]: + if hasattr(response, "progress") and response.progress is not None: + return response.progress * 100 + return None + + +def get_image_url_from_task_status(response: TaskStatusResponse) -> Union[str, None]: + """Returns the image URL from the task status response if it exists.""" + if response.output and len(response.output) > 0: + return response.output[0] + return None + + +class RunwayVideoGenNode(ComfyNodeABC): + """Runway Video Node Base.""" + + RETURN_TYPES = ("VIDEO",) + FUNCTION = "api_call" + CATEGORY = "api node/video/Runway" + API_NODE = True + + def validate_task_created(self, response: RunwayImageToVideoResponse) -> bool: + """ + Validate the task creation response from the Runway API matches + expected format. + """ + if not bool(response.id): + raise RunwayApiError("Invalid initial response from Runway API.") + return True + + def validate_response(self, response: RunwayImageToVideoResponse) -> bool: + """ + Validate the successful task status response from the Runway API + matches expected format. + """ + if not response.output or len(response.output) == 0: + raise RunwayApiError( + "Runway task succeeded but no video data found in response." + ) + return True + + def get_response( + self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None + ) -> RunwayImageToVideoResponse: + """Poll the task status until it is finished then get the response.""" + return poll_until_finished( + auth_kwargs, + ApiEndpoint( + path=f"{PATH_GET_TASK_STATUS}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=TaskStatusResponse, + ), + estimated_duration=AVERAGE_DURATION_FLF_SECONDS, + node_id=node_id, + ) + + def generate_video( + self, + request: RunwayImageToVideoRequest, + auth_kwargs: dict[str, str], + node_id: Optional[str] = None, + ) -> tuple[VideoFromFile]: + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_IMAGE_TO_VIDEO, + method=HttpMethod.POST, + request_model=RunwayImageToVideoRequest, + response_model=RunwayImageToVideoResponse, + ), + request=request, + auth_kwargs=auth_kwargs, + ) + + initial_response = initial_operation.execute() + self.validate_task_created(initial_response) + task_id = initial_response.id + + final_response = self.get_response(task_id, auth_kwargs, node_id) + self.validate_response(final_response) + + video_url = get_video_url_from_task_status(final_response) + return (download_url_to_video_output(video_url),) + + +class RunwayImageToVideoNodeGen3a(RunwayVideoGenNode): + """Runway Image to Video Node using Gen3a Turbo model.""" + + DESCRIPTION = "Generate a video from a single starting frame using Gen3a Turbo model. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo." + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": model_field_to_node_input( + IO.STRING, RunwayImageToVideoRequest, "promptText", multiline=True + ), + "start_frame": ( + IO.IMAGE, + {"tooltip": "Start frame to be used for the video"}, + ), + "duration": model_field_to_node_input( + IO.COMBO, RunwayImageToVideoRequest, "duration", enum_type=Duration + ), + "ratio": model_field_to_node_input( + IO.COMBO, + RunwayImageToVideoRequest, + "ratio", + enum_type=RunwayGen3aAspectRatio, + ), + "seed": model_field_to_node_input( + IO.INT, + RunwayImageToVideoRequest, + "seed", + control_after_generate=True, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + def api_call( + self, + prompt: str, + start_frame: torch.Tensor, + duration: str, + ratio: str, + seed: int, + unique_id: Optional[str] = None, + **kwargs, + ) -> tuple[VideoFromFile]: + # Validate inputs + validate_string(prompt, min_length=1) + validate_input_image(start_frame) + + # Upload image + download_urls = upload_images_to_comfyapi( + start_frame, + max_images=1, + mime_type="image/png", + auth_kwargs=kwargs, + ) + if len(download_urls) != 1: + raise RunwayApiError("Failed to upload one or more images to comfy api.") + + return self.generate_video( + RunwayImageToVideoRequest( + promptText=prompt, + seed=seed, + model=Model("gen3a_turbo"), + duration=Duration(duration), + ratio=AspectRatio(ratio), + promptImage=RunwayPromptImageObject( + root=[ + RunwayPromptImageDetailedObject( + uri=str(download_urls[0]), position="first" + ) + ] + ), + ), + auth_kwargs=kwargs, + node_id=unique_id, + ) + + +class RunwayImageToVideoNodeGen4(RunwayVideoGenNode): + """Runway Image to Video Node using Gen4 Turbo model.""" + + DESCRIPTION = "Generate a video from a single starting frame using Gen4 Turbo model. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video." + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": model_field_to_node_input( + IO.STRING, RunwayImageToVideoRequest, "promptText", multiline=True + ), + "start_frame": ( + IO.IMAGE, + {"tooltip": "Start frame to be used for the video"}, + ), + "duration": model_field_to_node_input( + IO.COMBO, RunwayImageToVideoRequest, "duration", enum_type=Duration + ), + "ratio": model_field_to_node_input( + IO.COMBO, + RunwayImageToVideoRequest, + "ratio", + enum_type=RunwayGen4TurboAspectRatio, + ), + "seed": model_field_to_node_input( + IO.INT, + RunwayImageToVideoRequest, + "seed", + control_after_generate=True, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + def api_call( + self, + prompt: str, + start_frame: torch.Tensor, + duration: str, + ratio: str, + seed: int, + unique_id: Optional[str] = None, + **kwargs, + ) -> tuple[VideoFromFile]: + # Validate inputs + validate_string(prompt, min_length=1) + validate_input_image(start_frame) + + # Upload image + download_urls = upload_images_to_comfyapi( + start_frame, + max_images=1, + mime_type="image/png", + auth_kwargs=kwargs, + ) + if len(download_urls) != 1: + raise RunwayApiError("Failed to upload one or more images to comfy api.") + + return self.generate_video( + RunwayImageToVideoRequest( + promptText=prompt, + seed=seed, + model=Model("gen4_turbo"), + duration=Duration(duration), + ratio=AspectRatio(ratio), + promptImage=RunwayPromptImageObject( + root=[ + RunwayPromptImageDetailedObject( + uri=str(download_urls[0]), position="first" + ) + ] + ), + ), + auth_kwargs=kwargs, + node_id=unique_id, + ) + + +class RunwayFirstLastFrameNode(RunwayVideoGenNode): + """Runway First-Last Frame Node.""" + + DESCRIPTION = "Upload first and last keyframes, draft a prompt, and generate a video. More complex transitions, such as cases where the Last frame is completely different from the First frame, may benefit from the longer 10s duration. This would give the generation more time to smoothly transition between the two inputs. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3." + + def get_response( + self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None + ) -> RunwayImageToVideoResponse: + return poll_until_finished( + auth_kwargs, + ApiEndpoint( + path=f"{PATH_GET_TASK_STATUS}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=TaskStatusResponse, + ), + estimated_duration=AVERAGE_DURATION_FLF_SECONDS, + node_id=node_id, + ) + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": model_field_to_node_input( + IO.STRING, RunwayImageToVideoRequest, "promptText", multiline=True + ), + "start_frame": ( + IO.IMAGE, + {"tooltip": "Start frame to be used for the video"}, + ), + "end_frame": ( + IO.IMAGE, + { + "tooltip": "End frame to be used for the video. Supported for gen3a_turbo only." + }, + ), + "duration": model_field_to_node_input( + IO.COMBO, RunwayImageToVideoRequest, "duration", enum_type=Duration + ), + "ratio": model_field_to_node_input( + IO.COMBO, + RunwayImageToVideoRequest, + "ratio", + enum_type=RunwayGen3aAspectRatio, + ), + "seed": model_field_to_node_input( + IO.INT, + RunwayImageToVideoRequest, + "seed", + control_after_generate=True, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "unique_id": "UNIQUE_ID", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, + } + + def api_call( + self, + prompt: str, + start_frame: torch.Tensor, + end_frame: torch.Tensor, + duration: str, + ratio: str, + seed: int, + unique_id: Optional[str] = None, + **kwargs, + ) -> tuple[VideoFromFile]: + # Validate inputs + validate_string(prompt, min_length=1) + validate_input_image(start_frame) + validate_input_image(end_frame) + + # Upload images + stacked_input_images = image_tensor_pair_to_batch(start_frame, end_frame) + download_urls = upload_images_to_comfyapi( + stacked_input_images, + max_images=2, + mime_type="image/png", + auth_kwargs=kwargs, + ) + if len(download_urls) != 2: + raise RunwayApiError("Failed to upload one or more images to comfy api.") + + return self.generate_video( + RunwayImageToVideoRequest( + promptText=prompt, + seed=seed, + model=Model("gen3a_turbo"), + duration=Duration(duration), + ratio=AspectRatio(ratio), + promptImage=RunwayPromptImageObject( + root=[ + RunwayPromptImageDetailedObject( + uri=str(download_urls[0]), position="first" + ), + RunwayPromptImageDetailedObject( + uri=str(download_urls[1]), position="last" + ), + ] + ), + ), + auth_kwargs=kwargs, + node_id=unique_id, + ) + + +class RunwayTextToImageNode(ComfyNodeABC): + """Runway Text to Image Node.""" + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "api_call" + CATEGORY = "api node/image/Runway" + API_NODE = True + DESCRIPTION = "Generate an image from a text prompt using Runway's Gen 4 model. You can also include reference images to guide the generation." + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": model_field_to_node_input( + IO.STRING, RunwayTextToImageRequest, "promptText", multiline=True + ), + "ratio": model_field_to_node_input( + IO.COMBO, + RunwayTextToImageRequest, + "ratio", + enum_type=RunwayTextToImageAspectRatioEnum, + ), + }, + "optional": { + "reference_image": ( + IO.IMAGE, + {"tooltip": "Optional reference image to guide the generation"}, + ) + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + def validate_task_created(self, response: RunwayTextToImageResponse) -> bool: + """ + Validate the task creation response from the Runway API matches + expected format. + """ + if not bool(response.id): + raise RunwayApiError("Invalid initial response from Runway API.") + return True + + def validate_response(self, response: TaskStatusResponse) -> bool: + """ + Validate the successful task status response from the Runway API + matches expected format. + """ + if not response.output or len(response.output) == 0: + raise RunwayApiError( + "Runway task succeeded but no image data found in response." + ) + return True + + def get_response( + self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None + ) -> TaskStatusResponse: + """Poll the task status until it is finished then get the response.""" + return poll_until_finished( + auth_kwargs, + ApiEndpoint( + path=f"{PATH_GET_TASK_STATUS}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=TaskStatusResponse, + ), + estimated_duration=AVERAGE_DURATION_T2I_SECONDS, + node_id=node_id, + ) + + def api_call( + self, + prompt: str, + ratio: str, + reference_image: Optional[torch.Tensor] = None, + unique_id: Optional[str] = None, + **kwargs, + ) -> tuple[torch.Tensor]: + # Validate inputs + validate_string(prompt, min_length=1) + + # Prepare reference images if provided + reference_images = None + if reference_image is not None: + validate_input_image(reference_image) + download_urls = upload_images_to_comfyapi( + reference_image, + max_images=1, + mime_type="image/png", + auth_kwargs=kwargs, + ) + if len(download_urls) != 1: + raise RunwayApiError("Failed to upload reference image to comfy api.") + + reference_images = [ReferenceImage(uri=str(download_urls[0]))] + + # Create request + request = RunwayTextToImageRequest( + promptText=prompt, + model=Model4.gen4_image, + ratio=ratio, + referenceImages=reference_images, + ) + + # Execute initial request + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_TEXT_TO_IMAGE, + method=HttpMethod.POST, + request_model=RunwayTextToImageRequest, + response_model=RunwayTextToImageResponse, + ), + request=request, + auth_kwargs=kwargs, + ) + + initial_response = initial_operation.execute() + self.validate_task_created(initial_response) + task_id = initial_response.id + + # Poll for completion + final_response = self.get_response( + task_id, auth_kwargs=kwargs, node_id=unique_id + ) + self.validate_response(final_response) + + # Download and return image + image_url = get_image_url_from_task_status(final_response) + return (download_url_to_image_tensor(image_url),) + + +NODE_CLASS_MAPPINGS = { + "RunwayFirstLastFrameNode": RunwayFirstLastFrameNode, + "RunwayImageToVideoNodeGen3a": RunwayImageToVideoNodeGen3a, + "RunwayImageToVideoNodeGen4": RunwayImageToVideoNodeGen4, + "RunwayTextToImageNode": RunwayTextToImageNode, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "RunwayFirstLastFrameNode": "Runway First-Last-Frame to Video", + "RunwayImageToVideoNodeGen3a": "Runway Image to Video (Gen3a Turbo)", + "RunwayImageToVideoNodeGen4": "Runway Image to Video (Gen4 Turbo)", + "RunwayTextToImageNode": "Runway Text to Image", +} diff --git a/comfy_api_nodes/nodes_tripo.py b/comfy_api_nodes/nodes_tripo.py new file mode 100644 index 000000000..65f3b21f5 --- /dev/null +++ b/comfy_api_nodes/nodes_tripo.py @@ -0,0 +1,574 @@ +import os +from folder_paths import get_output_directory +from comfy_api_nodes.mapper_utils import model_field_to_node_input +from comfy.comfy_types.node_typing import IO +from comfy_api_nodes.apis import ( + TripoOrientation, + TripoModelVersion, +) +from comfy_api_nodes.apis.tripo_api import ( + TripoTaskType, + TripoStyle, + TripoFileReference, + TripoFileEmptyReference, + TripoUrlReference, + TripoTaskResponse, + TripoTaskStatus, + TripoTextToModelRequest, + TripoImageToModelRequest, + TripoMultiviewToModelRequest, + TripoTextureModelRequest, + TripoRefineModelRequest, + TripoAnimateRigRequest, + TripoAnimateRetargetRequest, + TripoConvertModelRequest, +) + +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, + EmptyRequest, +) +from comfy_api_nodes.apinode_utils import ( + upload_images_to_comfyapi, + download_url_to_bytesio, +) + + +def upload_image_to_tripo(image, **kwargs): + urls = upload_images_to_comfyapi(image, max_images=1, auth_kwargs=kwargs) + return TripoFileReference(TripoUrlReference(url=urls[0], type="jpeg")) + +def get_model_url_from_response(response: TripoTaskResponse) -> str: + if response.data is not None: + for key in ["pbr_model", "model", "base_model"]: + if getattr(response.data.output, key, None) is not None: + return getattr(response.data.output, key) + raise RuntimeError(f"Failed to get model url from response: {response}") + + +def poll_until_finished( + kwargs: dict[str, str], + response: TripoTaskResponse, +) -> tuple[str, str]: + """Polls the Tripo API endpoint until the task reaches a terminal state, then returns the response.""" + if response.code != 0: + raise RuntimeError(f"Failed to generate mesh: {response.error}") + task_id = response.data.task_id + response_poll = PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"/proxy/tripo/v2/openapi/task/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=TripoTaskResponse, + ), + completed_statuses=[TripoTaskStatus.SUCCESS], + failed_statuses=[ + TripoTaskStatus.FAILED, + TripoTaskStatus.CANCELLED, + TripoTaskStatus.UNKNOWN, + TripoTaskStatus.BANNED, + TripoTaskStatus.EXPIRED, + ], + status_extractor=lambda x: x.data.status, + auth_kwargs=kwargs, + node_id=kwargs["unique_id"], + result_url_extractor=get_model_url_from_response, + progress_extractor=lambda x: x.data.progress, + ).execute() + if response_poll.data.status == TripoTaskStatus.SUCCESS: + url = get_model_url_from_response(response_poll) + bytesio = download_url_to_bytesio(url) + # Save the downloaded model file + model_file = f"tripo_model_{task_id}.glb" + with open(os.path.join(get_output_directory(), model_file), "wb") as f: + f.write(bytesio.getvalue()) + return model_file, task_id + raise RuntimeError(f"Failed to generate mesh: {response_poll}") + +class TripoTextToModelNode: + """ + Generates 3D models synchronously based on a text prompt using Tripo's API. + """ + AVERAGE_DURATION = 80 + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ("STRING", {"multiline": True}), + }, + "optional": { + "negative_prompt": ("STRING", {"multiline": True}), + "model_version": model_field_to_node_input(IO.COMBO, TripoTextToModelRequest, "model_version", enum_type=TripoModelVersion), + "style": model_field_to_node_input(IO.COMBO, TripoTextToModelRequest, "style", enum_type=TripoStyle, default="None"), + "texture": ("BOOLEAN", {"default": True}), + "pbr": ("BOOLEAN", {"default": True}), + "image_seed": ("INT", {"default": 42}), + "model_seed": ("INT", {"default": 42}), + "texture_seed": ("INT", {"default": 42}), + "texture_quality": (["standard", "detailed"], {"default": "standard"}), + "face_limit": ("INT", {"min": -1, "max": 500000, "default": -1}), + "quad": ("BOOLEAN", {"default": False}) + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + RETURN_TYPES = ("STRING", "MODEL_TASK_ID",) + RETURN_NAMES = ("model_file", "model task_id") + FUNCTION = "generate_mesh" + CATEGORY = "api node/3d/Tripo" + API_NODE = True + OUTPUT_NODE = True + + def generate_mesh(self, prompt, negative_prompt=None, model_version=None, style=None, texture=None, pbr=None, image_seed=None, model_seed=None, texture_seed=None, texture_quality=None, face_limit=None, quad=None, **kwargs): + style_enum = None if style == "None" else style + if not prompt: + raise RuntimeError("Prompt is required") + response = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/tripo/v2/openapi/task", + method=HttpMethod.POST, + request_model=TripoTextToModelRequest, + response_model=TripoTaskResponse, + ), + request=TripoTextToModelRequest( + type=TripoTaskType.TEXT_TO_MODEL, + prompt=prompt, + negative_prompt=negative_prompt if negative_prompt else None, + model_version=model_version, + style=style_enum, + texture=texture, + pbr=pbr, + image_seed=image_seed, + model_seed=model_seed, + texture_seed=texture_seed, + texture_quality=texture_quality, + face_limit=face_limit, + auto_size=True, + quad=quad + ), + auth_kwargs=kwargs, + ).execute() + return poll_until_finished(kwargs, response) + +class TripoImageToModelNode: + """ + Generates 3D models synchronously based on a single image using Tripo's API. + """ + AVERAGE_DURATION = 80 + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + }, + "optional": { + "model_version": model_field_to_node_input(IO.COMBO, TripoImageToModelRequest, "model_version", enum_type=TripoModelVersion), + "style": model_field_to_node_input(IO.COMBO, TripoTextToModelRequest, "style", enum_type=TripoStyle, default="None"), + "texture": ("BOOLEAN", {"default": True}), + "pbr": ("BOOLEAN", {"default": True}), + "model_seed": ("INT", {"default": 42}), + "orientation": model_field_to_node_input(IO.COMBO, TripoImageToModelRequest, "orientation", enum_type=TripoOrientation), + "texture_seed": ("INT", {"default": 42}), + "texture_quality": (["standard", "detailed"], {"default": "standard"}), + "texture_alignment": (["original_image", "geometry"], {"default": "original_image"}), + "face_limit": ("INT", {"min": -1, "max": 500000, "default": -1}), + "quad": ("BOOLEAN", {"default": False}) + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + RETURN_TYPES = ("STRING", "MODEL_TASK_ID",) + RETURN_NAMES = ("model_file", "model task_id") + FUNCTION = "generate_mesh" + CATEGORY = "api node/3d/Tripo" + API_NODE = True + OUTPUT_NODE = True + + def generate_mesh(self, image, model_version=None, style=None, texture=None, pbr=None, model_seed=None, orientation=None, texture_alignment=None, texture_seed=None, texture_quality=None, face_limit=None, quad=None, **kwargs): + style_enum = None if style == "None" else style + if image is None: + raise RuntimeError("Image is required") + tripo_file = upload_image_to_tripo(image, **kwargs) + response = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/tripo/v2/openapi/task", + method=HttpMethod.POST, + request_model=TripoImageToModelRequest, + response_model=TripoTaskResponse, + ), + request=TripoImageToModelRequest( + type=TripoTaskType.IMAGE_TO_MODEL, + file=tripo_file, + model_version=model_version, + style=style_enum, + texture=texture, + pbr=pbr, + model_seed=model_seed, + orientation=orientation, + texture_alignment=texture_alignment, + texture_seed=texture_seed, + texture_quality=texture_quality, + face_limit=face_limit, + auto_size=True, + quad=quad + ), + auth_kwargs=kwargs, + ).execute() + return poll_until_finished(kwargs, response) + +class TripoMultiviewToModelNode: + """ + Generates 3D models synchronously based on up to four images (front, left, back, right) using Tripo's API. + """ + AVERAGE_DURATION = 80 + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + }, + "optional": { + "image_left": ("IMAGE",), + "image_back": ("IMAGE",), + "image_right": ("IMAGE",), + "model_version": model_field_to_node_input(IO.COMBO, TripoMultiviewToModelRequest, "model_version", enum_type=TripoModelVersion), + "orientation": model_field_to_node_input(IO.COMBO, TripoImageToModelRequest, "orientation", enum_type=TripoOrientation), + "texture": ("BOOLEAN", {"default": True}), + "pbr": ("BOOLEAN", {"default": True}), + "model_seed": ("INT", {"default": 42}), + "texture_seed": ("INT", {"default": 42}), + "texture_quality": (["standard", "detailed"], {"default": "standard"}), + "texture_alignment": (["original_image", "geometry"], {"default": "original_image"}), + "face_limit": ("INT", {"min": -1, "max": 500000, "default": -1}), + "quad": ("BOOLEAN", {"default": False}) + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + RETURN_TYPES = ("STRING", "MODEL_TASK_ID",) + RETURN_NAMES = ("model_file", "model task_id") + FUNCTION = "generate_mesh" + CATEGORY = "api node/3d/Tripo" + API_NODE = True + OUTPUT_NODE = True + + def generate_mesh(self, image, image_left=None, image_back=None, image_right=None, model_version=None, orientation=None, texture=None, pbr=None, model_seed=None, texture_seed=None, texture_quality=None, texture_alignment=None, face_limit=None, quad=None, **kwargs): + if image is None: + raise RuntimeError("front image for multiview is required") + images = [] + image_dict = { + "image": image, + "image_left": image_left, + "image_back": image_back, + "image_right": image_right + } + if image_left is None and image_back is None and image_right is None: + raise RuntimeError("At least one of left, back, or right image must be provided for multiview") + for image_name in ["image", "image_left", "image_back", "image_right"]: + image_ = image_dict[image_name] + if image_ is not None: + tripo_file = upload_image_to_tripo(image_, **kwargs) + images.append(tripo_file) + else: + images.append(TripoFileEmptyReference()) + response = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/tripo/v2/openapi/task", + method=HttpMethod.POST, + request_model=TripoMultiviewToModelRequest, + response_model=TripoTaskResponse, + ), + request=TripoMultiviewToModelRequest( + type=TripoTaskType.MULTIVIEW_TO_MODEL, + files=images, + model_version=model_version, + orientation=orientation, + texture=texture, + pbr=pbr, + model_seed=model_seed, + texture_seed=texture_seed, + texture_quality=texture_quality, + texture_alignment=texture_alignment, + face_limit=face_limit, + quad=quad, + ), + auth_kwargs=kwargs, + ).execute() + return poll_until_finished(kwargs, response) + +class TripoTextureNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model_task_id": ("MODEL_TASK_ID",), + }, + "optional": { + "texture": ("BOOLEAN", {"default": True}), + "pbr": ("BOOLEAN", {"default": True}), + "texture_seed": ("INT", {"default": 42}), + "texture_quality": (["standard", "detailed"], {"default": "standard"}), + "texture_alignment": (["original_image", "geometry"], {"default": "original_image"}), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + RETURN_TYPES = ("STRING", "MODEL_TASK_ID",) + RETURN_NAMES = ("model_file", "model task_id") + FUNCTION = "generate_mesh" + CATEGORY = "api node/3d/Tripo" + API_NODE = True + OUTPUT_NODE = True + AVERAGE_DURATION = 80 + + def generate_mesh(self, model_task_id, texture=None, pbr=None, texture_seed=None, texture_quality=None, texture_alignment=None, **kwargs): + response = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/tripo/v2/openapi/task", + method=HttpMethod.POST, + request_model=TripoTextureModelRequest, + response_model=TripoTaskResponse, + ), + request=TripoTextureModelRequest( + original_model_task_id=model_task_id, + texture=texture, + pbr=pbr, + texture_seed=texture_seed, + texture_quality=texture_quality, + texture_alignment=texture_alignment + ), + auth_kwargs=kwargs, + ).execute() + return poll_until_finished(kwargs, response) + + +class TripoRefineNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model_task_id": ("MODEL_TASK_ID", { + "tooltip": "Must be a v1.4 Tripo model" + }), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + DESCRIPTION = "Refine a draft model created by v1.4 Tripo models only." + + RETURN_TYPES = ("STRING", "MODEL_TASK_ID",) + RETURN_NAMES = ("model_file", "model task_id") + FUNCTION = "generate_mesh" + CATEGORY = "api node/3d/Tripo" + API_NODE = True + OUTPUT_NODE = True + AVERAGE_DURATION = 240 + + def generate_mesh(self, model_task_id, **kwargs): + response = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/tripo/v2/openapi/task", + method=HttpMethod.POST, + request_model=TripoRefineModelRequest, + response_model=TripoTaskResponse, + ), + request=TripoRefineModelRequest( + draft_model_task_id=model_task_id + ), + auth_kwargs=kwargs, + ).execute() + return poll_until_finished(kwargs, response) + + +class TripoRigNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "original_model_task_id": ("MODEL_TASK_ID",), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + RETURN_TYPES = ("STRING", "RIG_TASK_ID") + RETURN_NAMES = ("model_file", "rig task_id") + FUNCTION = "generate_mesh" + CATEGORY = "api node/3d/Tripo" + API_NODE = True + OUTPUT_NODE = True + AVERAGE_DURATION = 180 + + def generate_mesh(self, original_model_task_id, **kwargs): + response = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/tripo/v2/openapi/task", + method=HttpMethod.POST, + request_model=TripoAnimateRigRequest, + response_model=TripoTaskResponse, + ), + request=TripoAnimateRigRequest( + original_model_task_id=original_model_task_id, + out_format="glb", + spec="tripo" + ), + auth_kwargs=kwargs, + ).execute() + return poll_until_finished(kwargs, response) + +class TripoRetargetNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "original_model_task_id": ("RIG_TASK_ID",), + "animation": ([ + "preset:idle", + "preset:walk", + "preset:climb", + "preset:jump", + "preset:slash", + "preset:shoot", + "preset:hurt", + "preset:fall", + "preset:turn", + ],), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + RETURN_TYPES = ("STRING", "RETARGET_TASK_ID") + RETURN_NAMES = ("model_file", "retarget task_id") + FUNCTION = "generate_mesh" + CATEGORY = "api node/3d/Tripo" + API_NODE = True + OUTPUT_NODE = True + AVERAGE_DURATION = 30 + + def generate_mesh(self, animation, original_model_task_id, **kwargs): + response = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/tripo/v2/openapi/task", + method=HttpMethod.POST, + request_model=TripoAnimateRetargetRequest, + response_model=TripoTaskResponse, + ), + request=TripoAnimateRetargetRequest( + original_model_task_id=original_model_task_id, + animation=animation, + out_format="glb", + bake_animation=True + ), + auth_kwargs=kwargs, + ).execute() + return poll_until_finished(kwargs, response) + +class TripoConversionNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "original_model_task_id": ("MODEL_TASK_ID,RIG_TASK_ID,RETARGET_TASK_ID",), + "format": (["GLTF", "USDZ", "FBX", "OBJ", "STL", "3MF"],), + }, + "optional": { + "quad": ("BOOLEAN", {"default": False}), + "face_limit": ("INT", {"min": -1, "max": 500000, "default": -1}), + "texture_size": ("INT", {"min": 128, "max": 4096, "default": 4096}), + "texture_format": (["BMP", "DPX", "HDR", "JPEG", "OPEN_EXR", "PNG", "TARGA", "TIFF", "WEBP"], {"default": "JPEG"}) + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + @classmethod + def VALIDATE_INPUTS(cls, input_types): + # The min and max of input1 and input2 are still validated because + # we didn't take `input1` or `input2` as arguments + if input_types["original_model_task_id"] not in ("MODEL_TASK_ID", "RIG_TASK_ID", "RETARGET_TASK_ID"): + return "original_model_task_id must be MODEL_TASK_ID, RIG_TASK_ID or RETARGET_TASK_ID type" + return True + + RETURN_TYPES = () + FUNCTION = "generate_mesh" + CATEGORY = "api node/3d/Tripo" + API_NODE = True + OUTPUT_NODE = True + AVERAGE_DURATION = 30 + + def generate_mesh(self, original_model_task_id, format, quad, face_limit, texture_size, texture_format, **kwargs): + if not original_model_task_id: + raise RuntimeError("original_model_task_id is required") + response = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/tripo/v2/openapi/task", + method=HttpMethod.POST, + request_model=TripoConvertModelRequest, + response_model=TripoTaskResponse, + ), + request=TripoConvertModelRequest( + original_model_task_id=original_model_task_id, + format=format, + quad=quad if quad else None, + face_limit=face_limit if face_limit != -1 else None, + texture_size=texture_size if texture_size != 4096 else None, + texture_format=texture_format if texture_format != "JPEG" else None + ), + auth_kwargs=kwargs, + ).execute() + return poll_until_finished(kwargs, response) + +NODE_CLASS_MAPPINGS = { + "TripoTextToModelNode": TripoTextToModelNode, + "TripoImageToModelNode": TripoImageToModelNode, + "TripoMultiviewToModelNode": TripoMultiviewToModelNode, + "TripoTextureNode": TripoTextureNode, + "TripoRefineNode": TripoRefineNode, + "TripoRigNode": TripoRigNode, + "TripoRetargetNode": TripoRetargetNode, + "TripoConversionNode": TripoConversionNode, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "TripoTextToModelNode": "Tripo: Text to Model", + "TripoImageToModelNode": "Tripo: Image to Model", + "TripoMultiviewToModelNode": "Tripo: Multiview to Model", + "TripoTextureNode": "Tripo: Texture model", + "TripoRefineNode": "Tripo: Refine Draft model", + "TripoRigNode": "Tripo: Rig model", + "TripoRetargetNode": "Tripo: Retarget rigged model", + "TripoConversionNode": "Tripo: Convert model", +} diff --git a/nodes.py b/nodes.py index 1e328651b..2d499051e 100644 --- a/nodes.py +++ b/nodes.py @@ -2281,6 +2281,10 @@ def init_builtin_api_nodes(): "nodes_pixverse.py", "nodes_stability.py", "nodes_pika.py", + "nodes_runway.py", + "nodes_tripo.py", + "nodes_rodin.py", + "nodes_gemini.py", ] if not load_custom_node(os.path.join(api_nodes_dir, "canary.py"), module_parent="comfy_api_nodes"): diff --git a/requirements.txt b/requirements.txt index f56b3e096..38991dbf9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.20.6 -comfyui-workflow-templates==0.1.18 +comfyui-workflow-templates==0.1.20 torch torchsde torchvision From c9e1821a7b49bb58f18f114336bae911160ac69d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 27 May 2025 07:07:44 -0400 Subject: [PATCH 0179/1073] ComfyUI version 0.3.37 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 817b7d83b..c13b6501f 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.36" +__version__ = "0.3.37" diff --git a/pyproject.toml b/pyproject.toml index accf6f864..c21b9b5c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.36" +version = "0.3.37" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 06c661004ed67ef33a59df8de7136ad6f4542945 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 27 May 2025 12:09:05 -0700 Subject: [PATCH 0180/1073] Memory estimation code can now take into account conds. (#8307) --- comfy/conds.py | 8 ++++++++ comfy/model_base.py | 16 +++++++++++++--- comfy/sampler_helpers.py | 22 +++++++++++++++++++--- comfy/samplers.py | 8 +++++++- 4 files changed, 47 insertions(+), 7 deletions(-) diff --git a/comfy/conds.py b/comfy/conds.py index 211fb8d57..920e25488 100644 --- a/comfy/conds.py +++ b/comfy/conds.py @@ -24,6 +24,10 @@ class CONDRegular: conds.append(x.cond) return torch.cat(conds) + def size(self): + return list(self.cond.size()) + + class CONDNoiseShape(CONDRegular): def process_cond(self, batch_size, device, area, **kwargs): data = self.cond @@ -64,6 +68,7 @@ class CONDCrossAttn(CONDRegular): out.append(c) return torch.cat(out) + class CONDConstant(CONDRegular): def __init__(self, cond): self.cond = cond @@ -78,3 +83,6 @@ class CONDConstant(CONDRegular): def concat(self, others): return self.cond + + def size(self): + return [1] diff --git a/comfy/model_base.py b/comfy/model_base.py index fb4724690..cfd10d726 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -135,6 +135,7 @@ class BaseModel(torch.nn.Module): logging.info("model_type {}".format(model_type.name)) logging.debug("adm {}".format(self.adm_channels)) self.memory_usage_factor = model_config.memory_usage_factor + self.memory_usage_factor_conds = () def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, transformer_options={}, **kwargs): return comfy.patcher_extension.WrapperExecutor.new_class_executor( @@ -325,19 +326,28 @@ class BaseModel(torch.nn.Module): def scale_latent_inpaint(self, sigma, noise, latent_image, **kwargs): return self.model_sampling.noise_scaling(sigma.reshape([sigma.shape[0]] + [1] * (len(noise.shape) - 1)), noise, latent_image) - def memory_required(self, input_shape): + def memory_required(self, input_shape, cond_shapes={}): + input_shapes = [input_shape] + for c in self.memory_usage_factor_conds: + shape = cond_shapes.get(c, None) + if shape is not None and len(shape) > 0: + input_shapes += shape + if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention(): dtype = self.get_dtype() if self.manual_cast_dtype is not None: dtype = self.manual_cast_dtype #TODO: this needs to be tweaked - area = input_shape[0] * math.prod(input_shape[2:]) + area = sum(map(lambda input_shape: input_shape[0] * math.prod(input_shape[2:]), input_shapes)) return (area * comfy.model_management.dtype_size(dtype) * 0.01 * self.memory_usage_factor) * (1024 * 1024) else: #TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory. - area = input_shape[0] * math.prod(input_shape[2:]) + area = sum(map(lambda input_shape: input_shape[0] * math.prod(input_shape[2:]), input_shapes)) return (area * 0.15 * self.memory_usage_factor) * (1024 * 1024) + def extra_conds_shapes(self, **kwargs): + return {} + def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0, seed=None): adm_inputs = [] diff --git a/comfy/sampler_helpers.py b/comfy/sampler_helpers.py index 96a3040a1..8dbc41455 100644 --- a/comfy/sampler_helpers.py +++ b/comfy/sampler_helpers.py @@ -1,5 +1,7 @@ from __future__ import annotations import uuid +import math +import collections import comfy.model_management import comfy.conds import comfy.utils @@ -104,6 +106,21 @@ def cleanup_additional_models(models): if hasattr(m, 'cleanup'): m.cleanup() +def estimate_memory(model, noise_shape, conds): + cond_shapes = collections.defaultdict(list) + cond_shapes_min = {} + for _, cs in conds.items(): + for cond in cs: + for k, v in model.model.extra_conds_shapes(**cond).items(): + cond_shapes[k].append(v) + if cond_shapes_min.get(k, None) is None: + cond_shapes_min[k] = [v] + elif math.prod(v) > math.prod(cond_shapes_min[k][0]): + cond_shapes_min[k] = [v] + + memory_required = model.model.memory_required([noise_shape[0] * 2] + list(noise_shape[1:]), cond_shapes=cond_shapes) + minimum_memory_required = model.model.memory_required([noise_shape[0]] + list(noise_shape[1:]), cond_shapes=cond_shapes_min) + return memory_required, minimum_memory_required def prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None): executor = comfy.patcher_extension.WrapperExecutor.new_executor( @@ -117,9 +134,8 @@ def _prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=Non models, inference_memory = get_additional_models(conds, model.model_dtype()) models += get_additional_models_from_model_options(model_options) models += model.get_nested_additional_models() # TODO: does this require inference_memory update? - memory_required = model.memory_required([noise_shape[0] * 2] + list(noise_shape[1:])) + inference_memory - minimum_memory_required = model.memory_required([noise_shape[0]] + list(noise_shape[1:])) + inference_memory - comfy.model_management.load_models_gpu([model] + models, memory_required=memory_required, minimum_memory_required=minimum_memory_required) + memory_required, minimum_memory_required = estimate_memory(model, noise_shape, conds) + comfy.model_management.load_models_gpu([model] + models, memory_required=memory_required + inference_memory, minimum_memory_required=minimum_memory_required + inference_memory) real_model = model.model return real_model, conds, models diff --git a/comfy/samplers.py b/comfy/samplers.py index 67ae09a25..efe9bf867 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -256,7 +256,13 @@ def _calc_cond_batch(model: 'BaseModel', conds: list[list[dict]], x_in: torch.Te for i in range(1, len(to_batch_temp) + 1): batch_amount = to_batch_temp[:len(to_batch_temp)//i] input_shape = [len(batch_amount) * first_shape[0]] + list(first_shape)[1:] - if model.memory_required(input_shape) * 1.5 < free_memory: + cond_shapes = collections.defaultdict(list) + for tt in batch_amount: + cond = {k: v.size() for k, v in to_run[tt][0].conditioning.items()} + for k, v in to_run[tt][0].conditioning.items(): + cond_shapes[k].append(v.size()) + + if model.memory_required(input_shape, cond_shapes=cond_shapes) * 1.5 < free_memory: to_batch = batch_amount break From ba37e67964b4a25be788042e01e72c6806038be0 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Tue, 27 May 2025 22:42:18 -0700 Subject: [PATCH 0181/1073] update frontend patch 1.20.7 (#8312) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 38991dbf9..5e3c60659 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.20.6 +comfyui-frontend-package==1.20.7 comfyui-workflow-templates==0.1.20 torch torchsde From e6609dacdeeafa371fe4e9f303016a605a333a76 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 28 May 2025 02:15:11 -0400 Subject: [PATCH 0182/1073] ComfyUI version 0.3.38 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index c13b6501f..64b326db8 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.37" +__version__ = "0.3.38" diff --git a/pyproject.toml b/pyproject.toml index c21b9b5c6..0a841c8ce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.37" +version = "0.3.38" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 1c1687ab1c3ba5b7d952d92359cfb0acd636da5f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 28 May 2025 15:47:15 -0700 Subject: [PATCH 0183/1073] Support HiDream SimpleTuner loras. (#8318) --- comfy/lora.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy/lora.py b/comfy/lora.py index ef110c164..387d5c52a 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -283,8 +283,9 @@ def model_lora_keys_unet(model, key_map={}): for k in sdk: if k.startswith("diffusion_model."): if k.endswith(".weight"): - key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_") - key_map["lycoris_{}".format(key_lora)] = k #SimpleTuner lycoris format + key_lora = k[len("diffusion_model."):-len(".weight")] + key_map["lycoris_{}".format(key_lora.replace(".", "_"))] = k #SimpleTuner lycoris format + key_map["transformer.{}".format(key_lora)] = k #SimpleTuner regular format if isinstance(model, comfy.model_base.ACEStep): for k in sdk: From 592d05610072777d170cf44604366bc489ada81b Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Wed, 28 May 2025 20:42:02 -0700 Subject: [PATCH 0184/1073] Add support for Veo3 API node. (#8320) --- comfy_api_nodes/nodes_veo2.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index df846d5dd..e93f82a9a 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -54,6 +54,10 @@ class VeoVideoGenerationNode(ComfyNodeABC): """ Generates videos from text prompts using Google's Veo API. + Supported models: + - veo-2.0-generate-001 + - veo-3.0-generate-preview + This node can create videos from text descriptions and optional image inputs, with control over parameters like aspect ratio, duration, and more. """ @@ -130,6 +134,14 @@ class VeoVideoGenerationNode(ComfyNodeABC): "default": None, "tooltip": "Optional reference image to guide video generation", }), + "model": ( + IO.COMBO, + { + "options": ["veo-2.0-generate-001", "veo-3.0-generate-preview"], + "default": "veo-2.0-generate-001", + "tooltip": "Model to use for video generation. Defaults to veo 2.0", + }, + ), }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", @@ -154,6 +166,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): person_generation="ALLOW", seed=0, image=None, + model="veo-2.0-generate-001", unique_id: Optional[str] = None, **kwargs, ): @@ -192,7 +205,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): # Initial request to start video generation initial_operation = SynchronousOperation( endpoint=ApiEndpoint( - path="/proxy/veo/generate", + path=f"/proxy/veo/{model}/generate", method=HttpMethod.POST, request_model=Veo2GenVidRequest, response_model=Veo2GenVidResponse @@ -223,7 +236,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): # Define the polling operation poll_operation = PollingOperation( poll_endpoint=ApiEndpoint( - path="/proxy/veo/poll", + path=f"/proxy/veo/{model}/poll", method=HttpMethod.POST, request_model=Veo2GenVidPollRequest, response_model=Veo2GenVidPollResponse @@ -304,5 +317,5 @@ NODE_CLASS_MAPPINGS = { } NODE_DISPLAY_NAME_MAPPINGS = { - "VeoVideoGenerationNode": "Google Veo2 Video Generation", + "VeoVideoGenerationNode": "Google Veo Video Generation", } From 4eba3161cf5481b2b275ab3d0efad581ef028f7e Mon Sep 17 00:00:00 2001 From: Yoland Yan <4950057+yoland68@users.noreply.github.com> Date: Wed, 28 May 2025 20:42:25 -0700 Subject: [PATCH 0185/1073] Refactor Pika API node imports and fix unique_id issue. (#8319) Added unique_id to hidden parameters and corrected description formatting in PikAdditionsNode. --- comfy_api_nodes/nodes_pika.py | 49 +++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py index 30562790a..1cc708564 100644 --- a/comfy_api_nodes/nodes_pika.py +++ b/comfy_api_nodes/nodes_pika.py @@ -6,40 +6,42 @@ Pika API docs: https://pika-827374fb.mintlify.app/api-reference from __future__ import annotations import io -from typing import Optional, TypeVar import logging -import torch +from typing import Optional, TypeVar + import numpy as np +import torch + +from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeOptions +from comfy_api.input_impl import VideoFromFile +from comfy_api.input_impl.video_types import VideoCodec, VideoContainer, VideoInput +from comfy_api_nodes.apinode_utils import ( + download_url_to_video_output, + tensor_to_bytesio, +) from comfy_api_nodes.apis import ( - PikaBodyGenerate22T2vGenerate22T2vPost, - PikaGenerateResponse, - PikaBodyGenerate22I2vGenerate22I2vPost, - PikaVideoResponse, - PikaBodyGenerate22C2vGenerate22PikascenesPost, IngredientsMode, - PikaDurationEnum, - PikaResolutionEnum, - PikaBodyGeneratePikaffectsGeneratePikaffectsPost, - PikaBodyGeneratePikadditionsGeneratePikadditionsPost, - PikaBodyGeneratePikaswapsGeneratePikaswapsPost, + PikaBodyGenerate22C2vGenerate22PikascenesPost, + PikaBodyGenerate22I2vGenerate22I2vPost, PikaBodyGenerate22KeyframeGenerate22PikaframesPost, + PikaBodyGenerate22T2vGenerate22T2vPost, + PikaBodyGeneratePikadditionsGeneratePikadditionsPost, + PikaBodyGeneratePikaffectsGeneratePikaffectsPost, + PikaBodyGeneratePikaswapsGeneratePikaswapsPost, + PikaDurationEnum, Pikaffect, + PikaGenerateResponse, + PikaResolutionEnum, + PikaVideoResponse, ) from comfy_api_nodes.apis.client import ( ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, EmptyRequest, -) -from comfy_api_nodes.apinode_utils import ( - tensor_to_bytesio, - download_url_to_video_output, + HttpMethod, + PollingOperation, + SynchronousOperation, ) from comfy_api_nodes.mapper_utils import model_field_to_node_input -from comfy_api.input_impl.video_types import VideoInput, VideoContainer, VideoCodec -from comfy_api.input_impl import VideoFromFile -from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeOptions R = TypeVar("R") @@ -204,6 +206,7 @@ class PikaImageToVideoV2_2(PikaNodeBase): "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", }, } @@ -457,7 +460,7 @@ class PikAdditionsNode(PikaNodeBase): }, } - DESCRIPTION = "Add any object or image into your video. Upload a video and specify what you’d like to add to create a seamlessly integrated result." + DESCRIPTION = "Add any object or image into your video. Upload a video and specify what you'd like to add to create a seamlessly integrated result." def api_call( self, From 5e5e46d40c94a4efb7e0921d88493c798c021d82 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 28 May 2025 20:46:15 -0700 Subject: [PATCH 0186/1073] Not really tested WAN Phantom Support. (#8321) --- comfy/ldm/wan/model.py | 9 ++++++++- comfy/model_base.py | 5 +++++ comfy_extras/nodes_wan.py | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 1 deletion(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 1b51a4e4a..1d6edb354 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -539,13 +539,20 @@ class WanModel(torch.nn.Module): x = self.unpatchify(x, grid_sizes) return x - def forward(self, x, timestep, context, clip_fea=None, transformer_options={}, **kwargs): + def forward(self, x, timestep, context, clip_fea=None, time_dim_concat=None, transformer_options={}, **kwargs): bs, c, t, h, w = x.shape x = comfy.ldm.common_dit.pad_to_patch_size(x, self.patch_size) + patch_size = self.patch_size t_len = ((t + (patch_size[0] // 2)) // patch_size[0]) h_len = ((h + (patch_size[1] // 2)) // patch_size[1]) w_len = ((w + (patch_size[2] // 2)) // patch_size[2]) + + if time_dim_concat is not None: + time_dim_concat = comfy.ldm.common_dit.pad_to_patch_size(time_dim_concat, self.patch_size) + x = torch.cat([x, time_dim_concat], dim=2) + t_len = ((x.shape[2] + (patch_size[0] // 2)) // patch_size[0]) + img_ids = torch.zeros((t_len, h_len, w_len, 3), device=x.device, dtype=x.dtype) img_ids[:, :, :, 0] = img_ids[:, :, :, 0] + torch.linspace(0, t_len - 1, steps=t_len, device=x.device, dtype=x.dtype).reshape(-1, 1, 1) img_ids[:, :, :, 1] = img_ids[:, :, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).reshape(1, -1, 1) diff --git a/comfy/model_base.py b/comfy/model_base.py index cfd10d726..8ed124277 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1057,6 +1057,11 @@ class WAN21(BaseModel): clip_vision_output = kwargs.get("clip_vision_output", None) if clip_vision_output is not None: out['clip_fea'] = comfy.conds.CONDRegular(clip_vision_output.penultimate_hidden_states) + + time_dim_concat = kwargs.get("time_dim_concat", None) + if time_dim_concat is not None: + out['time_dim_concat'] = comfy.conds.CONDRegular(self.process_latent_in(time_dim_concat)) + return out diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index c35c4871c..d6097a104 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -345,6 +345,44 @@ class WanCameraImageToVideo: out_latent["samples"] = latent return (positive, negative, out_latent) +class WanPhantomSubjectToVideo: + @classmethod + def INPUT_TYPES(s): + return {"required": {"positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "vae": ("VAE", ), + "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + }, + "optional": {"images": ("IMAGE", ), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative_text", "negative_img_text", "latent") + FUNCTION = "encode" + + CATEGORY = "conditioning/video_models" + + def encode(self, positive, negative, vae, width, height, length, batch_size, images): + latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + cond2 = negative + if images is not None: + images = comfy.utils.common_upscale(images[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + latent_images = [] + for i in images: + latent_images += [vae.encode(i.unsqueeze(0)[:, :, :, :3])] + concat_latent_image = torch.cat(latent_images, dim=2) + + positive = node_helpers.conditioning_set_values(positive, {"time_dim_concat": concat_latent_image}) + cond2 = node_helpers.conditioning_set_values(negative, {"time_dim_concat": concat_latent_image}) + negative = node_helpers.conditioning_set_values(negative, {"time_dim_concat": comfy.latent_formats.Wan21().process_out(torch.zeros_like(concat_latent_image))}) + + out_latent = {} + out_latent["samples"] = latent + return (positive, cond2, negative, out_latent) + NODE_CLASS_MAPPINGS = { "WanImageToVideo": WanImageToVideo, "WanFunControlToVideo": WanFunControlToVideo, @@ -353,4 +391,5 @@ NODE_CLASS_MAPPINGS = { "WanVaceToVideo": WanVaceToVideo, "TrimVideoLatent": TrimVideoLatent, "WanCameraImageToVideo": WanCameraImageToVideo, + "WanPhantomSubjectToVideo": WanPhantomSubjectToVideo, } From fb83eda2879f0125323c4b91a2b235501d8f061e Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Thu, 29 May 2025 00:03:11 -0700 Subject: [PATCH 0187/1073] Revert "Add support for Veo3 API node." (#8322) This reverts commit 592d05610072777d170cf44604366bc489ada81b. --- comfy_api_nodes/nodes_veo2.py | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index e93f82a9a..df846d5dd 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -54,10 +54,6 @@ class VeoVideoGenerationNode(ComfyNodeABC): """ Generates videos from text prompts using Google's Veo API. - Supported models: - - veo-2.0-generate-001 - - veo-3.0-generate-preview - This node can create videos from text descriptions and optional image inputs, with control over parameters like aspect ratio, duration, and more. """ @@ -134,14 +130,6 @@ class VeoVideoGenerationNode(ComfyNodeABC): "default": None, "tooltip": "Optional reference image to guide video generation", }), - "model": ( - IO.COMBO, - { - "options": ["veo-2.0-generate-001", "veo-3.0-generate-preview"], - "default": "veo-2.0-generate-001", - "tooltip": "Model to use for video generation. Defaults to veo 2.0", - }, - ), }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", @@ -166,7 +154,6 @@ class VeoVideoGenerationNode(ComfyNodeABC): person_generation="ALLOW", seed=0, image=None, - model="veo-2.0-generate-001", unique_id: Optional[str] = None, **kwargs, ): @@ -205,7 +192,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): # Initial request to start video generation initial_operation = SynchronousOperation( endpoint=ApiEndpoint( - path=f"/proxy/veo/{model}/generate", + path="/proxy/veo/generate", method=HttpMethod.POST, request_model=Veo2GenVidRequest, response_model=Veo2GenVidResponse @@ -236,7 +223,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): # Define the polling operation poll_operation = PollingOperation( poll_endpoint=ApiEndpoint( - path=f"/proxy/veo/{model}/poll", + path="/proxy/veo/poll", method=HttpMethod.POST, request_model=Veo2GenVidPollRequest, response_model=Veo2GenVidPollResponse @@ -317,5 +304,5 @@ NODE_CLASS_MAPPINGS = { } NODE_DISPLAY_NAME_MAPPINGS = { - "VeoVideoGenerationNode": "Google Veo Video Generation", + "VeoVideoGenerationNode": "Google Veo2 Video Generation", } From f2289a1f597082ef8dc5895d7dd4b57edfa96ac8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 29 May 2025 05:29:37 -0700 Subject: [PATCH 0188/1073] Delete useless file. (#8327) --- comfy/text_encoders/long_clipl.json | 25 ------------------------- 1 file changed, 25 deletions(-) delete mode 100644 comfy/text_encoders/long_clipl.json diff --git a/comfy/text_encoders/long_clipl.json b/comfy/text_encoders/long_clipl.json deleted file mode 100644 index 5e2056ff3..000000000 --- a/comfy/text_encoders/long_clipl.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "_name_or_path": "openai/clip-vit-large-patch14", - "architectures": [ - "CLIPTextModel" - ], - "attention_dropout": 0.0, - "bos_token_id": 0, - "dropout": 0.0, - "eos_token_id": 49407, - "hidden_act": "quick_gelu", - "hidden_size": 768, - "initializer_factor": 1.0, - "initializer_range": 0.02, - "intermediate_size": 3072, - "layer_norm_eps": 1e-05, - "max_position_embeddings": 248, - "model_type": "clip_text_model", - "num_attention_heads": 12, - "num_hidden_layers": 12, - "pad_token_id": 1, - "projection_dim": 768, - "torch_dtype": "float32", - "transformers_version": "4.24.0", - "vocab_size": 49408 -} From f1c9ca816ac1806982fe97b33eb8d7c76154bec1 Mon Sep 17 00:00:00 2001 From: Robin Huang Date: Thu, 29 May 2025 10:27:40 -0700 Subject: [PATCH 0189/1073] Add BFL Kontext API Nodes. (#8333) * Added initial Flux.1 Kontext Pro Image node - recreated branch to save myself sanity from rebase crap after master got rebased * Add safety filter to Kontext. * Make safety = 2 and input image is optional. * Add BFL kontext API nodes. --------- Co-authored-by: Jedrzej Kosinski --- comfy_api_nodes/apis/bfl_api.py | 34 ++++ comfy_api_nodes/nodes_bfl.py | 288 +++++++++++++++++++++++++++++++- 2 files changed, 321 insertions(+), 1 deletion(-) diff --git a/comfy_api_nodes/apis/bfl_api.py b/comfy_api_nodes/apis/bfl_api.py index c189038fb..504e507e1 100644 --- a/comfy_api_nodes/apis/bfl_api.py +++ b/comfy_api_nodes/apis/bfl_api.py @@ -108,6 +108,40 @@ class BFLFluxProGenerateRequest(BaseModel): # ) +class BFLFluxKontextProGenerateRequest(BaseModel): + prompt: str = Field(..., description='The text prompt for what you wannt to edit.') + input_image: Optional[str] = Field(None, description='Image to edit in base64 format') + seed: Optional[int] = Field(None, description='The seed value for reproducibility.') + guidance: confloat(ge=0.1, le=99.0) = Field(..., description='Guidance strength for the image generation process') + steps: conint(ge=1, le=150) = Field(..., description='Number of steps for the image generation process') + safety_tolerance: Optional[conint(ge=0, le=2)] = Field( + 2, description='Tolerance level for input and output moderation. Between 0 and 2, 0 being most strict, 6 being least strict. Defaults to 2.' + ) + output_format: Optional[BFLOutputFormat] = Field( + BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png'] + ) + aspect_ratio: Optional[str] = Field(None, description='Aspect ratio of the image between 21:9 and 9:21.') + prompt_upsampling: Optional[bool] = Field( + None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.' + ) + +class BFLFluxKontextMaxGenerateRequest(BaseModel): + prompt: str = Field(..., description='The text prompt for what you wannt to edit.') + input_image: Optional[str] = Field(None, description='Image to edit in base64 format') + seed: Optional[int] = Field(None, description='The seed value for reproducibility.') + guidance: confloat(ge=0.1, le=99.0) = Field(..., description='Guidance strength for the image generation process') + steps: conint(ge=1, le=150) = Field(..., description='Number of steps for the image generation process') + safety_tolerance: Optional[conint(ge=0, le=2)] = Field( + 2, description='Tolerance level for input and output moderation. Between 0 and 2, 0 being most strict, 6 being least strict. Defaults to 2.' + ) + output_format: Optional[BFLOutputFormat] = Field( + BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png'] + ) + aspect_ratio: Optional[str] = Field(None, description='Aspect ratio of the image between 21:9 and 9:21.') + prompt_upsampling: Optional[bool] = Field( + None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.' + ) + class BFLFluxProUltraGenerateRequest(BaseModel): prompt: str = Field(..., description='The text prompt for image generation.') prompt_upsampling: Optional[bool] = Field( diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py index 509170b34..a762472e6 100644 --- a/comfy_api_nodes/nodes_bfl.py +++ b/comfy_api_nodes/nodes_bfl.py @@ -1,6 +1,6 @@ import io from inspect import cleandoc -from typing import Union +from typing import Union, Optional from comfy.comfy_types.node_typing import IO, ComfyNodeABC from comfy_api_nodes.apis.bfl_api import ( BFLStatus, @@ -9,6 +9,7 @@ from comfy_api_nodes.apis.bfl_api import ( BFLFluxCannyImageRequest, BFLFluxDepthImageRequest, BFLFluxProGenerateRequest, + BFLFluxKontextProGenerateRequest, BFLFluxProUltraGenerateRequest, BFLFluxProGenerateResponse, ) @@ -269,6 +270,287 @@ class FluxProUltraImageNode(ComfyNodeABC): return (output_image,) +class FluxKontextProImageNode(ComfyNodeABC): + """ + Edits images using Flux.1 Kontext Pro via api based on prompt and resolution. + """ + + MINIMUM_RATIO = 1 / 4 + MAXIMUM_RATIO = 4 / 1 + MINIMUM_RATIO_STR = "1:4" + MAXIMUM_RATIO_STR = "4:1" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation - specify what and how to edit.", + }, + ), + "aspect_ratio": ( + IO.STRING, + { + "default": "16:9", + "tooltip": "Aspect ratio of image; must be between 1:4 and 4:1.", + }, + ), + "guidance": ( + IO.FLOAT, + { + "default": 3.0, + "min": 0.1, + "max": 99.0, + "step": 0.1, + "tooltip": "Guidance strength for the image generation process" + }, + ), + "steps": ( + IO.INT, + { + "default": 50, + "min": 1, + "max": 150, + "tooltip": "Number of steps for the image generation process" + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + "prompt_upsampling": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", + }, + ), + }, + "optional": { + "input_image": (IO.IMAGE,), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + @classmethod + def VALIDATE_INPUTS(cls, aspect_ratio: str): + try: + validate_aspect_ratio( + aspect_ratio, + minimum_ratio=cls.MINIMUM_RATIO, + maximum_ratio=cls.MAXIMUM_RATIO, + minimum_ratio_str=cls.MINIMUM_RATIO_STR, + maximum_ratio_str=cls.MAXIMUM_RATIO_STR, + ) + except Exception as e: + return str(e) + return True + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/BFL" + + def api_call( + self, + prompt: str, + aspect_ratio: str, + guidance: float, + steps: int, + input_image: Optional[torch.Tensor]=None, + seed=0, + prompt_upsampling=False, + unique_id: Union[str, None] = None, + **kwargs, + ): + if input_image is None: + validate_string(prompt, strip_whitespace=False) + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/bfl/flux-kontext-pro/generate", + method=HttpMethod.POST, + request_model=BFLFluxKontextProGenerateRequest, + response_model=BFLFluxProGenerateResponse, + ), + request=BFLFluxKontextProGenerateRequest( + prompt=prompt, + prompt_upsampling=prompt_upsampling, + guidance=round(guidance, 1), + steps=steps, + seed=seed, + aspect_ratio=validate_aspect_ratio( + aspect_ratio, + minimum_ratio=self.MINIMUM_RATIO, + maximum_ratio=self.MAXIMUM_RATIO, + minimum_ratio_str=self.MINIMUM_RATIO_STR, + maximum_ratio_str=self.MAXIMUM_RATIO_STR, + ), + input_image=( + input_image + if input_image is None + else convert_image_to_base64(input_image) + ) + ), + auth_kwargs=kwargs, + ) + output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) + return (output_image,) + +class FluxKontextMaxImageNode(ComfyNodeABC): + """ + Edits images using Flux.1 Kontext Max via api based on prompt and resolution. + """ + + MINIMUM_RATIO = 1 / 4 + MAXIMUM_RATIO = 4 / 1 + MINIMUM_RATIO_STR = "1:4" + MAXIMUM_RATIO_STR = "4:1" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Prompt for the image generation - specify what and how to edit.", + }, + ), + "aspect_ratio": ( + IO.STRING, + { + "default": "16:9", + "tooltip": "Aspect ratio of image; must be between 1:4 and 4:1.", + }, + ), + "guidance": ( + IO.FLOAT, + { + "default": 3.0, + "min": 0.1, + "max": 99.0, + "step": 0.1, + "tooltip": "Guidance strength for the image generation process" + }, + ), + "steps": ( + IO.INT, + { + "default": 50, + "min": 1, + "max": 150, + "tooltip": "Number of steps for the image generation process" + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + "prompt_upsampling": ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", + }, + ), + }, + "optional": { + "input_image": (IO.IMAGE,), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + @classmethod + def VALIDATE_INPUTS(cls, aspect_ratio: str): + try: + validate_aspect_ratio( + aspect_ratio, + minimum_ratio=cls.MINIMUM_RATIO, + maximum_ratio=cls.MAXIMUM_RATIO, + minimum_ratio_str=cls.MINIMUM_RATIO_STR, + maximum_ratio_str=cls.MAXIMUM_RATIO_STR, + ) + except Exception as e: + return str(e) + return True + + RETURN_TYPES = (IO.IMAGE,) + DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value + FUNCTION = "api_call" + API_NODE = True + CATEGORY = "api node/image/BFL" + + def api_call( + self, + prompt: str, + aspect_ratio: str, + guidance: float, + steps: int, + input_image: Optional[torch.Tensor]=None, + seed=0, + prompt_upsampling=False, + unique_id: Union[str, None] = None, + **kwargs, + ): + if input_image is None: + validate_string(prompt, strip_whitespace=False) + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/bfl/flux-kontext-max/generate", + method=HttpMethod.POST, + request_model=BFLFluxKontextProGenerateRequest, + response_model=BFLFluxProGenerateResponse, + ), + request=BFLFluxKontextProGenerateRequest( + prompt=prompt, + prompt_upsampling=prompt_upsampling, + guidance=round(guidance, 1), + steps=steps, + seed=seed, + aspect_ratio=validate_aspect_ratio( + aspect_ratio, + minimum_ratio=self.MINIMUM_RATIO, + maximum_ratio=self.MAXIMUM_RATIO, + minimum_ratio_str=self.MINIMUM_RATIO_STR, + maximum_ratio_str=self.MAXIMUM_RATIO_STR, + ), + input_image=( + input_image + if input_image is None + else convert_image_to_base64(input_image) + ) + ), + auth_kwargs=kwargs, + ) + output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) + return (output_image,) class FluxProImageNode(ComfyNodeABC): """ @@ -914,6 +1196,8 @@ class FluxProDepthNode(ComfyNodeABC): NODE_CLASS_MAPPINGS = { "FluxProUltraImageNode": FluxProUltraImageNode, # "FluxProImageNode": FluxProImageNode, + "FluxKontextProImageNode": FluxKontextProImageNode, + "FluxKontextMaxImageNode": FluxKontextMaxImageNode, "FluxProExpandNode": FluxProExpandNode, "FluxProFillNode": FluxProFillNode, "FluxProCannyNode": FluxProCannyNode, @@ -924,6 +1208,8 @@ NODE_CLASS_MAPPINGS = { NODE_DISPLAY_NAME_MAPPINGS = { "FluxProUltraImageNode": "Flux 1.1 [pro] Ultra Image", # "FluxProImageNode": "Flux 1.1 [pro] Image", + "FluxKontextProImageNode": "Flux.1 Kontext Pro Image", + "FluxKontextMaxImageNode": "Flux.1 Kontext Max Image", "FluxProExpandNode": "Flux.1 Expand Image", "FluxProFillNode": "Flux.1 Fill Image", "FluxProCannyNode": "Flux.1 Canny Control Image", From 31260f02753d0809204225cb5f61bace900120fa Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Fri, 30 May 2025 03:52:27 +1000 Subject: [PATCH 0190/1073] Update templates 0.1.22 (#8334) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5e3c60659..d2baedf0d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.20.7 -comfyui-workflow-templates==0.1.20 +comfyui-workflow-templates==0.1.22 torch torchsde torchvision From 094306b626e9cf505690c5d8b445032b3b8a36fa Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 29 May 2025 14:26:39 -0400 Subject: [PATCH 0191/1073] ComfyUI version 0.3.39 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 64b326db8..f742410b1 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.38" +__version__ = "0.3.39" diff --git a/pyproject.toml b/pyproject.toml index 0a841c8ce..28a6158e0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.38" +version = "0.3.39" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From aeba0b3a268eeb66c3a12cae6fd97f7c2d28f36f Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Thu, 29 May 2025 14:14:27 -0700 Subject: [PATCH 0192/1073] Reduce code duplication for [pro] and [max], rename Pro and Max to [pro] and [max] to be consistent with other BFL nodes, make default seed for Kontext nodes be 1234. since 0 is interpreted by API as 'choose random seed' (#8337) --- comfy_api_nodes/apis/bfl_api.py | 16 ---- comfy_api_nodes/nodes_bfl.py | 153 +++----------------------------- 2 files changed, 12 insertions(+), 157 deletions(-) diff --git a/comfy_api_nodes/apis/bfl_api.py b/comfy_api_nodes/apis/bfl_api.py index 504e507e1..0e90aef7c 100644 --- a/comfy_api_nodes/apis/bfl_api.py +++ b/comfy_api_nodes/apis/bfl_api.py @@ -125,22 +125,6 @@ class BFLFluxKontextProGenerateRequest(BaseModel): None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.' ) -class BFLFluxKontextMaxGenerateRequest(BaseModel): - prompt: str = Field(..., description='The text prompt for what you wannt to edit.') - input_image: Optional[str] = Field(None, description='Image to edit in base64 format') - seed: Optional[int] = Field(None, description='The seed value for reproducibility.') - guidance: confloat(ge=0.1, le=99.0) = Field(..., description='Guidance strength for the image generation process') - steps: conint(ge=1, le=150) = Field(..., description='Number of steps for the image generation process') - safety_tolerance: Optional[conint(ge=0, le=2)] = Field( - 2, description='Tolerance level for input and output moderation. Between 0 and 2, 0 being most strict, 6 being least strict. Defaults to 2.' - ) - output_format: Optional[BFLOutputFormat] = Field( - BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png'] - ) - aspect_ratio: Optional[str] = Field(None, description='Aspect ratio of the image between 21:9 and 9:21.') - prompt_upsampling: Optional[bool] = Field( - None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.' - ) class BFLFluxProUltraGenerateRequest(BaseModel): prompt: str = Field(..., description='The text prompt for image generation.') diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py index a762472e6..010564704 100644 --- a/comfy_api_nodes/nodes_bfl.py +++ b/comfy_api_nodes/nodes_bfl.py @@ -272,7 +272,7 @@ class FluxProUltraImageNode(ComfyNodeABC): class FluxKontextProImageNode(ComfyNodeABC): """ - Edits images using Flux.1 Kontext Pro via api based on prompt and resolution. + Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio. """ MINIMUM_RATIO = 1 / 4 @@ -321,7 +321,7 @@ class FluxKontextProImageNode(ComfyNodeABC): "seed": ( IO.INT, { - "default": 0, + "default": 1234, "min": 0, "max": 0xFFFFFFFFFFFFFFFF, "control_after_generate": True, @@ -366,6 +366,8 @@ class FluxKontextProImageNode(ComfyNodeABC): API_NODE = True CATEGORY = "api node/image/BFL" + BFL_PATH = "/proxy/bfl/flux-kontext-pro/generate" + def api_call( self, prompt: str, @@ -382,7 +384,7 @@ class FluxKontextProImageNode(ComfyNodeABC): validate_string(prompt, strip_whitespace=False) operation = SynchronousOperation( endpoint=ApiEndpoint( - path="/proxy/bfl/flux-kontext-pro/generate", + path=self.BFL_PATH, method=HttpMethod.POST, request_model=BFLFluxKontextProGenerateRequest, response_model=BFLFluxProGenerateResponse, @@ -411,146 +413,15 @@ class FluxKontextProImageNode(ComfyNodeABC): output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) -class FluxKontextMaxImageNode(ComfyNodeABC): + +class FluxKontextMaxImageNode(FluxKontextProImageNode): """ - Edits images using Flux.1 Kontext Max via api based on prompt and resolution. + Edits images using Flux.1 Kontext [max] via api based on prompt and aspect ratio. """ - MINIMUM_RATIO = 1 / 4 - MAXIMUM_RATIO = 4 / 1 - MINIMUM_RATIO_STR = "1:4" - MAXIMUM_RATIO_STR = "4:1" + DESCRIPTION = cleandoc(__doc__ or "") + BFL_PATH = "/proxy/bfl/flux-kontext-max/generate" - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation - specify what and how to edit.", - }, - ), - "aspect_ratio": ( - IO.STRING, - { - "default": "16:9", - "tooltip": "Aspect ratio of image; must be between 1:4 and 4:1.", - }, - ), - "guidance": ( - IO.FLOAT, - { - "default": 3.0, - "min": 0.1, - "max": 99.0, - "step": 0.1, - "tooltip": "Guidance strength for the image generation process" - }, - ), - "steps": ( - IO.INT, - { - "default": 50, - "min": 1, - "max": 150, - "tooltip": "Number of steps for the image generation process" - }, - ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, - ), - "prompt_upsampling": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", - }, - ), - }, - "optional": { - "input_image": (IO.IMAGE,), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - @classmethod - def VALIDATE_INPUTS(cls, aspect_ratio: str): - try: - validate_aspect_ratio( - aspect_ratio, - minimum_ratio=cls.MINIMUM_RATIO, - maximum_ratio=cls.MAXIMUM_RATIO, - minimum_ratio_str=cls.MINIMUM_RATIO_STR, - maximum_ratio_str=cls.MAXIMUM_RATIO_STR, - ) - except Exception as e: - return str(e) - return True - - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/BFL" - - def api_call( - self, - prompt: str, - aspect_ratio: str, - guidance: float, - steps: int, - input_image: Optional[torch.Tensor]=None, - seed=0, - prompt_upsampling=False, - unique_id: Union[str, None] = None, - **kwargs, - ): - if input_image is None: - validate_string(prompt, strip_whitespace=False) - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/bfl/flux-kontext-max/generate", - method=HttpMethod.POST, - request_model=BFLFluxKontextProGenerateRequest, - response_model=BFLFluxProGenerateResponse, - ), - request=BFLFluxKontextProGenerateRequest( - prompt=prompt, - prompt_upsampling=prompt_upsampling, - guidance=round(guidance, 1), - steps=steps, - seed=seed, - aspect_ratio=validate_aspect_ratio( - aspect_ratio, - minimum_ratio=self.MINIMUM_RATIO, - maximum_ratio=self.MAXIMUM_RATIO, - minimum_ratio_str=self.MINIMUM_RATIO_STR, - maximum_ratio_str=self.MAXIMUM_RATIO_STR, - ), - input_image=( - input_image - if input_image is None - else convert_image_to_base64(input_image) - ) - ), - auth_kwargs=kwargs, - ) - output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) - return (output_image,) class FluxProImageNode(ComfyNodeABC): """ @@ -1208,8 +1079,8 @@ NODE_CLASS_MAPPINGS = { NODE_DISPLAY_NAME_MAPPINGS = { "FluxProUltraImageNode": "Flux 1.1 [pro] Ultra Image", # "FluxProImageNode": "Flux 1.1 [pro] Image", - "FluxKontextProImageNode": "Flux.1 Kontext Pro Image", - "FluxKontextMaxImageNode": "Flux.1 Kontext Max Image", + "FluxKontextProImageNode": "Flux.1 Kontext [pro] Image", + "FluxKontextMaxImageNode": "Flux.1 Kontext [max] Image", "FluxProExpandNode": "Flux.1 Expand Image", "FluxProFillNode": "Flux.1 Fill Image", "FluxProCannyNode": "Flux.1 Canny Control Image", From 1d9fee79fd93505ee577f9881c8d9a0977affcd3 Mon Sep 17 00:00:00 2001 From: JettHu <35261585+JettHu@users.noreply.github.com> Date: Sat, 31 May 2025 03:08:59 +0800 Subject: [PATCH 0193/1073] Add node for regex replace(sub) operation (#8340) * Add node for regex replace(sub) operation * Apply suggestions from code review add tooltips Co-authored-by: Christian Byrne * Fix indentation --------- Co-authored-by: Christian Byrne --- comfy_extras/nodes_string.py | 41 ++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_string.py b/comfy_extras/nodes_string.py index b24222cee..b1a8ceef0 100644 --- a/comfy_extras/nodes_string.py +++ b/comfy_extras/nodes_string.py @@ -296,6 +296,41 @@ class RegexExtract(): return result, + +class RegexReplace(): + DESCRIPTION = "Find and replace text using regex patterns." + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": (IO.STRING, {"multiline": True}), + "regex_pattern": (IO.STRING, {"multiline": True}), + "replace": (IO.STRING, {"multiline": True}), + }, + "optional": { + "case_insensitive": (IO.BOOLEAN, {"default": True}), + "multiline": (IO.BOOLEAN, {"default": False}), + "dotall": (IO.BOOLEAN, {"default": False, "tooltip": "When enabled, the dot (.) character will match any character including newline characters. When disabled, dots won't match newlines."}), + "count": (IO.INT, {"default": 0, "min": 0, "max": 100, "tooltip": "Maximum number of replacements to make. Set to 0 to replace all occurrences (default). Set to 1 to replace only the first match, 2 for the first two matches, etc."}), + } + } + + RETURN_TYPES = (IO.STRING,) + FUNCTION = "execute" + CATEGORY = "utils/string" + + def execute(self, string, regex_pattern, replace, case_insensitive=True, multiline=False, dotall=False, count=0, **kwargs): + flags = 0 + + if case_insensitive: + flags |= re.IGNORECASE + if multiline: + flags |= re.MULTILINE + if dotall: + flags |= re.DOTALL + result = re.sub(regex_pattern, replace, string, count=count, flags=flags) + return result, + NODE_CLASS_MAPPINGS = { "StringConcatenate": StringConcatenate, "StringSubstring": StringSubstring, @@ -306,7 +341,8 @@ NODE_CLASS_MAPPINGS = { "StringContains": StringContains, "StringCompare": StringCompare, "RegexMatch": RegexMatch, - "RegexExtract": RegexExtract + "RegexExtract": RegexExtract, + "RegexReplace": RegexReplace, } NODE_DISPLAY_NAME_MAPPINGS = { @@ -319,5 +355,6 @@ NODE_DISPLAY_NAME_MAPPINGS = { "StringContains": "Contains", "StringCompare": "Compare", "RegexMatch": "Regex Match", - "RegexExtract": "Regex Extract" + "RegexExtract": "Regex Extract", + "RegexReplace": "Regex Replace", } From 704fc788549112877a0739eebd32f37d6c85982e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 30 May 2025 12:41:02 -0700 Subject: [PATCH 0194/1073] Put ROCm version in tuple to make it easier to enable stuff based on it. (#8348) --- comfy/model_management.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index f5b37e68e..8ae5a5abb 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -297,8 +297,13 @@ except: try: if is_amd(): + try: + rocm_version = tuple(map(int, str(torch.version.hip).split(".")[:2])) + except: + rocm_version = (6, -1) arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName logging.info("AMD arch: {}".format(arch)) + logging.info("ROCm version: {}".format(rocm_version)) if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: if torch_version_numeric[0] >= 2 and torch_version_numeric[1] >= 7: # works on 2.6 but doesn't actually seem to improve much if any((a in arch) for a in ["gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches From df1aebe52eb888f8087131456775a517934cb245 Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Fri, 30 May 2025 17:27:52 -0400 Subject: [PATCH 0195/1073] Remove huchenlei from CODEOWNERS (#8350) --- CODEOWNERS | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index 013ea8622..c4acbf06e 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -5,20 +5,20 @@ # Inlined the team members for now. # Maintainers -*.md @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/tests/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/tests-unit/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/notebooks/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/script_examples/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/.github/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/requirements.txt @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/pyproject.toml @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +*.md @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/tests/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/tests-unit/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/notebooks/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/script_examples/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/.github/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/requirements.txt @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +/pyproject.toml @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne # Python web server -/api_server/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @christian-byrne -/app/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @christian-byrne -/utils/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata @christian-byrne +/api_server/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne +/app/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne +/utils/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne # Node developers -/comfy_extras/ @yoland68 @robinjhuang @huchenlei @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne -/comfy/comfy_types/ @yoland68 @robinjhuang @huchenlei @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne +/comfy_extras/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne +/comfy/comfy_types/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne From 6c319cbb4e25f38d03d1045077638cc11819e636 Mon Sep 17 00:00:00 2001 From: BennyKok Date: Sat, 31 May 2025 05:51:28 +0800 Subject: [PATCH 0196/1073] fix: custom comfy-api-base works with subpath (#8332) --- comfy_api_nodes/apis/client.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 0897d5d78..2a4bac88b 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -327,7 +327,9 @@ class ApiClient: ApiServerError: If the API server is unreachable but internet is working Exception: For other request failures """ - url = urljoin(self.base_url, path) + # Use urljoin but ensure path is relative to avoid absolute path behavior + relative_path = path.lstrip('/') + url = urljoin(self.base_url, relative_path) self.check_auth(self.auth_token, self.comfy_api_key) # Combine default headers with any provided headers request_headers = self.get_headers() From 08b7cc750681be5abaf31cfa0f7003eb6fc8cf56 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Fri, 30 May 2025 18:09:54 -0400 Subject: [PATCH 0197/1073] use fused multiply-add pointwise ops in chroma (#8279) --- comfy/ldm/chroma/layers.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/comfy/ldm/chroma/layers.py b/comfy/ldm/chroma/layers.py index 35da91ee2..2a0dec606 100644 --- a/comfy/ldm/chroma/layers.py +++ b/comfy/ldm/chroma/layers.py @@ -80,15 +80,13 @@ class DoubleStreamBlock(nn.Module): (img_mod1, img_mod2), (txt_mod1, txt_mod2) = vec # prepare image for attention - img_modulated = self.img_norm1(img) - img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift + img_modulated = torch.addcmul(img_mod1.shift, 1 + img_mod1.scale, self.img_norm1(img)) img_qkv = self.img_attn.qkv(img_modulated) img_q, img_k, img_v = img_qkv.view(img_qkv.shape[0], img_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) img_q, img_k = self.img_attn.norm(img_q, img_k, img_v) # prepare txt for attention - txt_modulated = self.txt_norm1(txt) - txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift + txt_modulated = torch.addcmul(txt_mod1.shift, 1 + txt_mod1.scale, self.txt_norm1(txt)) txt_qkv = self.txt_attn.qkv(txt_modulated) txt_q, txt_k, txt_v = txt_qkv.view(txt_qkv.shape[0], txt_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v) @@ -102,12 +100,12 @@ class DoubleStreamBlock(nn.Module): txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :] # calculate the img bloks - img = img + img_mod1.gate * self.img_attn.proj(img_attn) - img = img + img_mod2.gate * self.img_mlp((1 + img_mod2.scale) * self.img_norm2(img) + img_mod2.shift) + img.addcmul_(img_mod1.gate, self.img_attn.proj(img_attn)) + img.addcmul_(img_mod2.gate, self.img_mlp(torch.addcmul(img_mod2.shift, 1 + img_mod2.scale, self.img_norm2(img)))) # calculate the txt bloks - txt += txt_mod1.gate * self.txt_attn.proj(txt_attn) - txt += txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift) + txt.addcmul_(txt_mod1.gate, self.txt_attn.proj(txt_attn)) + txt.addcmul_(txt_mod2.gate, self.txt_mlp(torch.addcmul(txt_mod2.shift, 1 + txt_mod2.scale, self.txt_norm2(txt)))) if txt.dtype == torch.float16: txt = torch.nan_to_num(txt, nan=0.0, posinf=65504, neginf=-65504) @@ -152,7 +150,7 @@ class SingleStreamBlock(nn.Module): def forward(self, x: Tensor, pe: Tensor, vec: Tensor, attn_mask=None) -> Tensor: mod = vec - x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift + x_mod = torch.addcmul(mod.shift, 1 + mod.scale, self.pre_norm(x)) qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1) q, k, v = qkv.view(qkv.shape[0], qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) @@ -162,7 +160,7 @@ class SingleStreamBlock(nn.Module): attn = attention(q, k, v, pe=pe, mask=attn_mask) # compute activation in mlp stream, cat again and run second linear layer output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2)) - x += mod.gate * output + x.addcmul_(mod.gate, output) if x.dtype == torch.float16: x = torch.nan_to_num(x, nan=0.0, posinf=65504, neginf=-65504) return x @@ -178,6 +176,6 @@ class LastLayer(nn.Module): shift, scale = vec shift = shift.squeeze(1) scale = scale.squeeze(1) - x = (1 + scale[:, None, :]) * self.norm_final(x) + shift[:, None, :] + x = torch.addcmul(shift[:, None, :], 1 + scale[:, None, :], self.norm_final(x)) x = self.linear(x) return x From 97f23b81f3421255ec4b425d2d8f4841207e0cd8 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sat, 31 May 2025 14:05:42 +0800 Subject: [PATCH 0198/1073] Bump template to 0.1.23 (#8353) Correct some error settings in VACE --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d2baedf0d..3e2991563 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.20.7 -comfyui-workflow-templates==0.1.22 +comfyui-workflow-templates==0.1.23 torch torchsde torchvision From 19e45e9b0e235acafc120a7532ce3825b8a325b9 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 31 May 2025 17:00:20 -0700 Subject: [PATCH 0199/1073] Make it easier to pass lists of tensors to models. (#8358) --- comfy/conds.py | 42 ++++++++++++++++++++++++++++++++++++++++++ comfy/model_base.py | 5 +++++ 2 files changed, 47 insertions(+) diff --git a/comfy/conds.py b/comfy/conds.py index 920e25488..2af2a43a3 100644 --- a/comfy/conds.py +++ b/comfy/conds.py @@ -86,3 +86,45 @@ class CONDConstant(CONDRegular): def size(self): return [1] + + +class CONDList(CONDRegular): + def __init__(self, cond): + self.cond = cond + + def process_cond(self, batch_size, device, **kwargs): + out = [] + for c in self.cond: + out.append(comfy.utils.repeat_to_batch_size(c, batch_size).to(device)) + + return self._copy_with(out) + + def can_concat(self, other): + if len(self.cond) != len(other.cond): + return False + for i in range(len(self.cond)): + if self.cond[i].shape != other.cond[i].shape: + return False + + return True + + def concat(self, others): + out = [] + for i in range(len(self.cond)): + o = [self.cond[i]] + for x in others: + o.append(x.cond[i]) + out.append(torch.cat(o)) + + return out + + def size(self): # hackish implementation to make the mem estimation work + o = 0 + c = 1 + for c in self.cond: + size = c.size() + o += math.prod(size) + if len(size) > 1: + c = size[1] + + return [1, c, o // c] diff --git a/comfy/model_base.py b/comfy/model_base.py index 8ed124277..638b04092 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -168,6 +168,11 @@ class BaseModel(torch.nn.Module): if hasattr(extra, "dtype"): if extra.dtype != torch.int and extra.dtype != torch.long: extra = extra.to(dtype) + if isinstance(extra, list): + ex = [] + for ext in extra: + ex.append(ext.to(dtype)) + extra = ex extra_conds[o] = extra t = self.process_timestep(t, x=x, **extra_conds) From 456abad83486416baff00f25f97b7c5e32ab8b74 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Sun, 1 Jun 2025 15:10:04 +1000 Subject: [PATCH 0200/1073] Update frontend to 1.21 (#8366) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3e2991563..c5219bd9f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.20.7 +comfyui-frontend-package==1.21.3 comfyui-workflow-templates==0.1.23 torch torchsde From d062fcc5c0bf32cf0dc55ff469470789e775ae27 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sun, 1 Jun 2025 01:28:52 -0700 Subject: [PATCH 0201/1073] [feat] Add ImageStitch node for concatenating images (#8369) * [feat] Add ImageStitch node for concatenating images with borders Add ImageStitch node that concatenates images in four directions with optional borders and intelligent size handling. Features include optional second image input, configurable borders with color selection, automatic batch size matching, and dimension alignment via padding or resizing. Upstreamed from https://github.com/kijai/ComfyUI-KJNodes with enhancements for better error handling and comprehensive test coverage. * [fix] Fix CI issues with CUDA dependencies and linting - Mock CUDA-dependent modules in tests to avoid CI failures on CPU-only runners - Fix ruff linting issues for code style compliance * [fix] Improve CI compatibility by mocking nodes module import Prevent CUDA initialization chain by mocking the nodes module at import time, which is cleaner than deep mocking of CUDA-specific functions. * [refactor] Clean up ImageStitch tests - Remove unnecessary sys.path manipulation (pythonpath set in pytest.ini) - Remove metadata tests that test framework internals rather than functionality - Rename complex scenario test to be more descriptive of what it tests * [refactor] Rename 'border' to 'spacing' for semantic accuracy - Change border_width/border_color to spacing_width/spacing_color in API - Update all tests to use spacing terminology - Update comments and variable names throughout - More accurately describes the gap/separator between images --- comfy_extras/nodes_images.py | 182 +++++++++++++ nodes.py | 1 + tests-unit/comfy_extras_test/__init__.py | 0 .../comfy_extras_test/image_stitch_test.py | 240 ++++++++++++++++++ 4 files changed, 423 insertions(+) create mode 100644 tests-unit/comfy_extras_test/__init__.py create mode 100644 tests-unit/comfy_extras_test/image_stitch_test.py diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 29a5d5b61..6ebf1dbd8 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -14,6 +14,7 @@ import re from io import BytesIO from inspect import cleandoc import torch +import comfy.utils from comfy.comfy_types import FileLocator @@ -229,6 +230,186 @@ class SVG: all_svgs_list.extend(svg_item.data) return SVG(all_svgs_list) + +class ImageStitch: + """Upstreamed from https://github.com/kijai/ComfyUI-KJNodes""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image1": ("IMAGE",), + "direction": (["right", "down", "left", "up"], {"default": "right"}), + "match_image_size": ("BOOLEAN", {"default": True}), + "spacing_width": ( + "INT", + {"default": 0, "min": 0, "max": 1024, "step": 2}, + ), + "spacing_color": ( + ["white", "black", "red", "green", "blue"], + {"default": "white"}, + ), + }, + "optional": { + "image2": ("IMAGE",), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "stitch" + CATEGORY = "image/transform" + DESCRIPTION = """ +Stitches image2 to image1 in the specified direction. +If image2 is not provided, returns image1 unchanged. +Optional spacing can be added between images. +""" + + def stitch( + self, + image1, + direction, + match_image_size, + spacing_width, + spacing_color, + image2=None, + ): + if image2 is None: + return (image1,) + + # Handle batch size differences + if image1.shape[0] != image2.shape[0]: + max_batch = max(image1.shape[0], image2.shape[0]) + if image1.shape[0] < max_batch: + image1 = torch.cat( + [image1, image1[-1:].repeat(max_batch - image1.shape[0], 1, 1, 1)] + ) + if image2.shape[0] < max_batch: + image2 = torch.cat( + [image2, image2[-1:].repeat(max_batch - image2.shape[0], 1, 1, 1)] + ) + + # Match image sizes if requested + if match_image_size: + h1, w1 = image1.shape[1:3] + h2, w2 = image2.shape[1:3] + aspect_ratio = w2 / h2 + + if direction in ["left", "right"]: + target_h, target_w = h1, int(h1 * aspect_ratio) + else: # up, down + target_w, target_h = w1, int(w1 / aspect_ratio) + + image2 = comfy.utils.common_upscale( + image2.movedim(-1, 1), target_w, target_h, "lanczos", "disabled" + ).movedim(1, -1) + + # When not matching sizes, pad to align non-concat dimensions + if not match_image_size: + h1, w1 = image1.shape[1:3] + h2, w2 = image2.shape[1:3] + + if direction in ["left", "right"]: + # For horizontal concat, pad heights to match + if h1 != h2: + target_h = max(h1, h2) + if h1 < target_h: + pad_h = target_h - h1 + pad_top, pad_bottom = pad_h // 2, pad_h - pad_h // 2 + image1 = torch.nn.functional.pad(image1, (0, 0, 0, 0, pad_top, pad_bottom), mode='constant', value=0.0) + if h2 < target_h: + pad_h = target_h - h2 + pad_top, pad_bottom = pad_h // 2, pad_h - pad_h // 2 + image2 = torch.nn.functional.pad(image2, (0, 0, 0, 0, pad_top, pad_bottom), mode='constant', value=0.0) + else: # up, down + # For vertical concat, pad widths to match + if w1 != w2: + target_w = max(w1, w2) + if w1 < target_w: + pad_w = target_w - w1 + pad_left, pad_right = pad_w // 2, pad_w - pad_w // 2 + image1 = torch.nn.functional.pad(image1, (0, 0, pad_left, pad_right), mode='constant', value=0.0) + if w2 < target_w: + pad_w = target_w - w2 + pad_left, pad_right = pad_w // 2, pad_w - pad_w // 2 + image2 = torch.nn.functional.pad(image2, (0, 0, pad_left, pad_right), mode='constant', value=0.0) + + # Ensure same number of channels + if image1.shape[-1] != image2.shape[-1]: + max_channels = max(image1.shape[-1], image2.shape[-1]) + if image1.shape[-1] < max_channels: + image1 = torch.cat( + [ + image1, + torch.ones( + *image1.shape[:-1], + max_channels - image1.shape[-1], + device=image1.device, + ), + ], + dim=-1, + ) + if image2.shape[-1] < max_channels: + image2 = torch.cat( + [ + image2, + torch.ones( + *image2.shape[:-1], + max_channels - image2.shape[-1], + device=image2.device, + ), + ], + dim=-1, + ) + + # Add spacing if specified + if spacing_width > 0: + spacing_width = spacing_width + (spacing_width % 2) # Ensure even + + color_map = { + "white": 1.0, + "black": 0.0, + "red": (1.0, 0.0, 0.0), + "green": (0.0, 1.0, 0.0), + "blue": (0.0, 0.0, 1.0), + } + color_val = color_map[spacing_color] + + if direction in ["left", "right"]: + spacing_shape = ( + image1.shape[0], + max(image1.shape[1], image2.shape[1]), + spacing_width, + image1.shape[-1], + ) + else: + spacing_shape = ( + image1.shape[0], + spacing_width, + max(image1.shape[2], image2.shape[2]), + image1.shape[-1], + ) + + spacing = torch.full(spacing_shape, 0.0, device=image1.device) + if isinstance(color_val, tuple): + for i, c in enumerate(color_val): + if i < spacing.shape[-1]: + spacing[..., i] = c + if spacing.shape[-1] == 4: # Add alpha + spacing[..., 3] = 1.0 + else: + spacing[..., : min(3, spacing.shape[-1])] = color_val + if spacing.shape[-1] == 4: + spacing[..., 3] = 1.0 + + # Concatenate images + images = [image2, image1] if direction in ["left", "up"] else [image1, image2] + if spacing_width > 0: + images.insert(1, spacing) + + concat_dim = 2 if direction in ["left", "right"] else 1 + return (torch.cat(images, dim=concat_dim),) + + class SaveSVGNode: """ Save SVG files on disk. @@ -318,4 +499,5 @@ NODE_CLASS_MAPPINGS = { "SaveAnimatedWEBP": SaveAnimatedWEBP, "SaveAnimatedPNG": SaveAnimatedPNG, "SaveSVGNode": SaveSVGNode, + "ImageStitch": ImageStitch, } diff --git a/nodes.py b/nodes.py index 2d499051e..67360e7da 100644 --- a/nodes.py +++ b/nodes.py @@ -2061,6 +2061,7 @@ NODE_DISPLAY_NAME_MAPPINGS = { "ImagePadForOutpaint": "Pad Image for Outpainting", "ImageBatch": "Batch Images", "ImageCrop": "Image Crop", + "ImageStitch": "Image Stitch", "ImageBlend": "Image Blend", "ImageBlur": "Image Blur", "ImageQuantize": "Image Quantize", diff --git a/tests-unit/comfy_extras_test/__init__.py b/tests-unit/comfy_extras_test/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests-unit/comfy_extras_test/image_stitch_test.py b/tests-unit/comfy_extras_test/image_stitch_test.py new file mode 100644 index 000000000..fbaef756c --- /dev/null +++ b/tests-unit/comfy_extras_test/image_stitch_test.py @@ -0,0 +1,240 @@ +import torch +from unittest.mock import patch, MagicMock + +# Mock nodes module to prevent CUDA initialization during import +mock_nodes = MagicMock() +mock_nodes.MAX_RESOLUTION = 16384 + +with patch.dict('sys.modules', {'nodes': mock_nodes}): + from comfy_extras.nodes_images import ImageStitch + + +class TestImageStitch: + + def create_test_image(self, batch_size=1, height=64, width=64, channels=3): + """Helper to create test images with specific dimensions""" + return torch.rand(batch_size, height, width, channels) + + def test_no_image2_passthrough(self): + """Test that when image2 is None, image1 is returned unchanged""" + node = ImageStitch() + image1 = self.create_test_image() + + result = node.stitch(image1, "right", True, 0, "white", image2=None) + + assert len(result) == 1 + assert torch.equal(result[0], image1) + + def test_basic_horizontal_stitch_right(self): + """Test basic horizontal stitching to the right""" + node = ImageStitch() + image1 = self.create_test_image(height=32, width=32) + image2 = self.create_test_image(height=32, width=24) + + result = node.stitch(image1, "right", False, 0, "white", image2) + + assert result[0].shape == (1, 32, 56, 3) # 32 + 24 width + + def test_basic_horizontal_stitch_left(self): + """Test basic horizontal stitching to the left""" + node = ImageStitch() + image1 = self.create_test_image(height=32, width=32) + image2 = self.create_test_image(height=32, width=24) + + result = node.stitch(image1, "left", False, 0, "white", image2) + + assert result[0].shape == (1, 32, 56, 3) # 24 + 32 width + + def test_basic_vertical_stitch_down(self): + """Test basic vertical stitching downward""" + node = ImageStitch() + image1 = self.create_test_image(height=32, width=32) + image2 = self.create_test_image(height=24, width=32) + + result = node.stitch(image1, "down", False, 0, "white", image2) + + assert result[0].shape == (1, 56, 32, 3) # 32 + 24 height + + def test_basic_vertical_stitch_up(self): + """Test basic vertical stitching upward""" + node = ImageStitch() + image1 = self.create_test_image(height=32, width=32) + image2 = self.create_test_image(height=24, width=32) + + result = node.stitch(image1, "up", False, 0, "white", image2) + + assert result[0].shape == (1, 56, 32, 3) # 24 + 32 height + + def test_size_matching_horizontal(self): + """Test size matching for horizontal concatenation""" + node = ImageStitch() + image1 = self.create_test_image(height=64, width=64) + image2 = self.create_test_image(height=32, width=32) # Different aspect ratio + + result = node.stitch(image1, "right", True, 0, "white", image2) + + # image2 should be resized to match image1's height (64) with preserved aspect ratio + expected_width = 64 + 64 # original + resized (32*64/32 = 64) + assert result[0].shape == (1, 64, expected_width, 3) + + def test_size_matching_vertical(self): + """Test size matching for vertical concatenation""" + node = ImageStitch() + image1 = self.create_test_image(height=64, width=64) + image2 = self.create_test_image(height=32, width=32) + + result = node.stitch(image1, "down", True, 0, "white", image2) + + # image2 should be resized to match image1's width (64) with preserved aspect ratio + expected_height = 64 + 64 # original + resized (32*64/32 = 64) + assert result[0].shape == (1, expected_height, 64, 3) + + def test_padding_for_mismatched_heights_horizontal(self): + """Test padding when heights don't match in horizontal concatenation""" + node = ImageStitch() + image1 = self.create_test_image(height=64, width=32) + image2 = self.create_test_image(height=48, width=24) # Shorter height + + result = node.stitch(image1, "right", False, 0, "white", image2) + + # Both images should be padded to height 64 + assert result[0].shape == (1, 64, 56, 3) # 32 + 24 width, max(64,48) height + + def test_padding_for_mismatched_widths_vertical(self): + """Test padding when widths don't match in vertical concatenation""" + node = ImageStitch() + image1 = self.create_test_image(height=32, width=64) + image2 = self.create_test_image(height=24, width=48) # Narrower width + + result = node.stitch(image1, "down", False, 0, "white", image2) + + # Both images should be padded to width 64 + assert result[0].shape == (1, 56, 64, 3) # 32 + 24 height, max(64,48) width + + def test_spacing_horizontal(self): + """Test spacing addition in horizontal concatenation""" + node = ImageStitch() + image1 = self.create_test_image(height=32, width=32) + image2 = self.create_test_image(height=32, width=24) + spacing_width = 16 + + result = node.stitch(image1, "right", False, spacing_width, "white", image2) + + # Expected width: 32 + 16 (spacing) + 24 = 72 + assert result[0].shape == (1, 32, 72, 3) + + def test_spacing_vertical(self): + """Test spacing addition in vertical concatenation""" + node = ImageStitch() + image1 = self.create_test_image(height=32, width=32) + image2 = self.create_test_image(height=24, width=32) + spacing_width = 16 + + result = node.stitch(image1, "down", False, spacing_width, "white", image2) + + # Expected height: 32 + 16 (spacing) + 24 = 72 + assert result[0].shape == (1, 72, 32, 3) + + def test_spacing_color_values(self): + """Test that spacing colors are applied correctly""" + node = ImageStitch() + image1 = self.create_test_image(height=32, width=32) + image2 = self.create_test_image(height=32, width=32) + + # Test white spacing + result_white = node.stitch(image1, "right", False, 16, "white", image2) + # Check that spacing region contains white values (close to 1.0) + spacing_region = result_white[0][:, :, 32:48, :] # Middle 16 pixels + assert torch.all(spacing_region >= 0.9) # Should be close to white + + # Test black spacing + result_black = node.stitch(image1, "right", False, 16, "black", image2) + spacing_region = result_black[0][:, :, 32:48, :] + assert torch.all(spacing_region <= 0.1) # Should be close to black + + def test_odd_spacing_width_made_even(self): + """Test that odd spacing widths are made even""" + node = ImageStitch() + image1 = self.create_test_image(height=32, width=32) + image2 = self.create_test_image(height=32, width=32) + + # Use odd spacing width + result = node.stitch(image1, "right", False, 15, "white", image2) + + # Should be made even (16), so total width = 32 + 16 + 32 = 80 + assert result[0].shape == (1, 32, 80, 3) + + def test_batch_size_matching(self): + """Test that different batch sizes are handled correctly""" + node = ImageStitch() + image1 = self.create_test_image(batch_size=2, height=32, width=32) + image2 = self.create_test_image(batch_size=1, height=32, width=32) + + result = node.stitch(image1, "right", False, 0, "white", image2) + + # Should match larger batch size + assert result[0].shape == (2, 32, 64, 3) + + def test_channel_matching_rgb_to_rgba(self): + """Test that channel differences are handled (RGB + alpha)""" + node = ImageStitch() + image1 = self.create_test_image(channels=3) # RGB + image2 = self.create_test_image(channels=4) # RGBA + + result = node.stitch(image1, "right", False, 0, "white", image2) + + # Should have 4 channels (RGBA) + assert result[0].shape[-1] == 4 + + def test_channel_matching_rgba_to_rgb(self): + """Test that channel differences are handled (RGBA + RGB)""" + node = ImageStitch() + image1 = self.create_test_image(channels=4) # RGBA + image2 = self.create_test_image(channels=3) # RGB + + result = node.stitch(image1, "right", False, 0, "white", image2) + + # Should have 4 channels (RGBA) + assert result[0].shape[-1] == 4 + + def test_all_color_options(self): + """Test all available color options""" + node = ImageStitch() + image1 = self.create_test_image(height=32, width=32) + image2 = self.create_test_image(height=32, width=32) + + colors = ["white", "black", "red", "green", "blue"] + + for color in colors: + result = node.stitch(image1, "right", False, 16, color, image2) + assert result[0].shape == (1, 32, 80, 3) # Basic shape check + + def test_all_directions(self): + """Test all direction options""" + node = ImageStitch() + image1 = self.create_test_image(height=32, width=32) + image2 = self.create_test_image(height=32, width=32) + + directions = ["right", "left", "up", "down"] + + for direction in directions: + result = node.stitch(image1, direction, False, 0, "white", image2) + assert result[0].shape == (1, 32, 64, 3) if direction in ["right", "left"] else (1, 64, 32, 3) + + def test_batch_size_channel_spacing_integration(self): + """Test integration of batch matching, channel matching, size matching, and spacings""" + node = ImageStitch() + image1 = self.create_test_image(batch_size=2, height=64, width=48, channels=3) + image2 = self.create_test_image(batch_size=1, height=32, width=32, channels=4) + + result = node.stitch(image1, "right", True, 8, "red", image2) + + # Should handle: batch matching, size matching, channel matching, spacing + assert result[0].shape[0] == 2 # Batch size matched + assert result[0].shape[-1] == 4 # Channels matched to max + assert result[0].shape[1] == 64 # Height from image1 (size matching) + # Width should be: 48 + 8 (spacing) + resized_image2_width + expected_image2_width = int(64 * (32/32)) # Resized to height 64 + expected_total_width = 48 + 8 + expected_image2_width + assert result[0].shape[2] == expected_total_width + From 180db6753f019a1936b3774de24392821b53cd8c Mon Sep 17 00:00:00 2001 From: Benjamin Lu Date: Sun, 1 Jun 2025 04:32:32 -0400 Subject: [PATCH 0202/1073] Add Help Menu in NodeLibrarySidebarTab (#8179) --- app/frontend_management.py | 13 +++++++++++++ requirements.txt | 1 + server.py | 7 +++++++ 3 files changed, 21 insertions(+) diff --git a/app/frontend_management.py b/app/frontend_management.py index 7b7923b79..d9ef8c921 100644 --- a/app/frontend_management.py +++ b/app/frontend_management.py @@ -205,6 +205,19 @@ comfyui-workflow-templates is not installed. """.strip() ) + @classmethod + def embedded_docs_path(cls) -> str: + """Get the path to embedded documentation""" + try: + import comfyui_embedded_docs + + return str( + importlib.resources.files(comfyui_embedded_docs) / "docs" + ) + except ImportError: + logging.info("comfyui-embedded-docs package not found") + return None + @classmethod def parse_version_string(cls, value: str) -> tuple[str, str, str]: """ diff --git a/requirements.txt b/requirements.txt index c5219bd9f..60174ff57 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ comfyui-frontend-package==1.21.3 comfyui-workflow-templates==0.1.23 +comfyui-embedded-docs==0.2.0 torch torchsde torchvision diff --git a/server.py b/server.py index 1b0a73601..6e283fe31 100644 --- a/server.py +++ b/server.py @@ -746,6 +746,13 @@ class PromptServer(): web.static('/templates', workflow_templates_path) ]) + # Serve embedded documentation from the package + embedded_docs_path = FrontendManager.embedded_docs_path() + if embedded_docs_path: + self.app.add_routes([ + web.static('/docs', embedded_docs_path) + ]) + self.app.add_routes([ web.static('/', self.web_root), ]) From fb4754624d0fd4d2b6f46ef15f2c9f3942a1bad5 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 1 Jun 2025 02:39:54 -0700 Subject: [PATCH 0203/1073] Make the casting in lists the same as regular inputs. (#8373) --- comfy/model_base.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 638b04092..e0c2bcaa8 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -102,6 +102,13 @@ def model_sampling(model_config, model_type): return ModelSampling(model_config) +def convert_tensor(extra, dtype): + if hasattr(extra, "dtype"): + if extra.dtype != torch.int and extra.dtype != torch.long: + extra = extra.to(dtype) + return extra + + class BaseModel(torch.nn.Module): def __init__(self, model_config, model_type=ModelType.EPS, device=None, unet_model=UNetModel): super().__init__() @@ -165,13 +172,13 @@ class BaseModel(torch.nn.Module): extra_conds = {} for o in kwargs: extra = kwargs[o] + if hasattr(extra, "dtype"): - if extra.dtype != torch.int and extra.dtype != torch.long: - extra = extra.to(dtype) - if isinstance(extra, list): + extra = convert_tensor(extra, dtype) + elif isinstance(extra, list): ex = [] for ext in extra: - ex.append(ext.to(dtype)) + ex.append(convert_tensor(ext, dtype)) extra = ex extra_conds[o] = extra From d3bd983b91d981b60da8cacd5489b18307303d2e Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sun, 1 Jun 2025 17:41:17 +0800 Subject: [PATCH 0204/1073] Bump template to 0.1.25 (#8372) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 60174ff57..b98dc1268 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.21.3 -comfyui-workflow-templates==0.1.23 +comfyui-workflow-templates==0.1.25 comfyui-embedded-docs==0.2.0 torch torchsde From fd943c928f549e96507e6e31ba34a7b541684560 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Mon, 2 Jun 2025 03:57:53 +1000 Subject: [PATCH 0205/1073] [BugFix] Update frontend to 1.21.4 (#8377) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b98dc1268..181c23918 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.21.3 +comfyui-frontend-package==1.21.4 comfyui-workflow-templates==0.1.25 comfyui-embedded-docs==0.2.0 torch From 67f57c5bccb22124368cc462ba075ebaf54399ce Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sun, 1 Jun 2025 12:47:07 -0700 Subject: [PATCH 0206/1073] [feat] add custom node testing requirement to issue templates (#8374) Adds mandatory checkbox to bug report and user support templates requiring users to confirm they've tested with custom nodes disabled before submitting issues. --- .github/ISSUE_TEMPLATE/bug-report.yml | 8 ++++++++ .github/ISSUE_TEMPLATE/user-support.yml | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 39d1992d7..69ce998eb 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -15,6 +15,14 @@ body: steps to replicate what went wrong and others will be able to repeat your steps and see the same issue happen. If unsure, ask on the [ComfyUI Matrix Space](https://app.element.io/#/room/%23comfyui_space%3Amatrix.org) or the [Comfy Org Discord](https://discord.gg/comfyorg) first. + - type: checkboxes + id: custom-nodes-test + attributes: + label: Custom Node Testing + description: Please confirm you have tried to reproduce the issue with all custom nodes disabled. + options: + - label: I have tried disabling custom nodes and the issue persists (see [how to disable custom nodes](https://docs.comfy.org/troubleshooting/custom-node-issues#step-1%3A-test-with-all-custom-nodes-disabled) if you need help) + required: true - type: textarea attributes: label: Expected Behavior diff --git a/.github/ISSUE_TEMPLATE/user-support.yml b/.github/ISSUE_TEMPLATE/user-support.yml index df28804c6..50657d493 100644 --- a/.github/ISSUE_TEMPLATE/user-support.yml +++ b/.github/ISSUE_TEMPLATE/user-support.yml @@ -11,6 +11,14 @@ body: **2:** You have made an effort to find public answers to your question before asking here. In other words, you googled it first, and scrolled through recent help topics. If unsure, ask on the [ComfyUI Matrix Space](https://app.element.io/#/room/%23comfyui_space%3Amatrix.org) or the [Comfy Org Discord](https://discord.gg/comfyorg) first. + - type: checkboxes + id: custom-nodes-test + attributes: + label: Custom Node Testing + description: Please confirm you have tried to reproduce the issue with all custom nodes disabled. + options: + - label: I have tried disabling custom nodes and the issue persists (see [how to disable custom nodes](https://docs.comfy.org/troubleshooting/custom-node-issues#step-1%3A-test-with-all-custom-nodes-disabled) if you need help) + required: true - type: textarea attributes: label: Your question From 6d46bb4b4c9db3bce46b2838c50252551330eba7 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Mon, 2 Jun 2025 06:47:14 +1000 Subject: [PATCH 0207/1073] [BugFix] Update frontend to 1.21.5 (#8382) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 181c23918..6be14767f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.21.4 +comfyui-frontend-package==1.21.5 comfyui-workflow-templates==0.1.25 comfyui-embedded-docs==0.2.0 torch From 010954d277d460b22635fee67513b5f610c8409d Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Mon, 2 Jun 2025 14:57:44 +1000 Subject: [PATCH 0208/1073] [BugFix] Update frontend to 1.21.6 (#8383) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6be14767f..1c1ff54ac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.21.5 +comfyui-frontend-package==1.21.6 comfyui-workflow-templates==0.1.25 comfyui-embedded-docs==0.2.0 torch From 4f4f1c642ad77886a9b5716ad886cde556858a60 Mon Sep 17 00:00:00 2001 From: Jesse Gonyou <168374102+jessegonyou@users.noreply.github.com> Date: Mon, 2 Jun 2025 06:52:44 -0400 Subject: [PATCH 0209/1073] Update fix for potential XSS on /view (#8384) * Update fix for potential XSS on /view This commit uses mimetypes to add more restricted filetypes to prevent from being served, since mimetypes are what browsers use to determine how to serve files. * Fix typo Fixed a typo that prevented the program from running --- server.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/server.py b/server.py index 6e283fe31..aca67a2e7 100644 --- a/server.py +++ b/server.py @@ -476,9 +476,8 @@ class PromptServer(): # Get content type from mimetype, defaulting to 'application/octet-stream' content_type = mimetypes.guess_type(filename)[0] or 'application/octet-stream' - # For security, force certain extensions to download instead of display - file_extension = os.path.splitext(filename)[1].lower() - if file_extension in {'.html', '.htm', '.js', '.css'}: + # For security, force certain mimetypes to download instead of display + if content_type in {'text/html', 'text/html-sandboxed', 'application/xhtml+xml', 'text/javascript', 'text/css'}: content_type = 'application/octet-stream' # Forces download return web.FileResponse( From 312d511630db4907c3bed04dee297b28f61941a8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 2 Jun 2025 04:22:02 -0700 Subject: [PATCH 0210/1073] Style fix. (#8390) --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index aca67a2e7..f772545dc 100644 --- a/server.py +++ b/server.py @@ -390,7 +390,7 @@ class PromptServer(): async def view_image(request): if "filename" in request.rel_url.query: filename = request.rel_url.query["filename"] - filename,output_dir = folder_paths.annotated_filepath(filename) + filename, output_dir = folder_paths.annotated_filepath(filename) if not filename: return web.Response(status=400) From 856448060ce42674eea66c835bd754644c322723 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Mon, 2 Jun 2025 18:57:50 -0700 Subject: [PATCH 0211/1073] [feat] Add GetImageSize node (#8386) * [feat] Add GetImageSize node to return image dimensions Added a simple GetImageSize node in comfy_extras/nodes_images.py that returns width and height of input images. The node displays dimensions on the UI via PromptServer and provides width/height as outputs for further processing. * add display name mapping * [fix] Add server module mock to unit tests for PromptServer import Updated test to mock server module preventing import errors from the new PromptServer usage in GetImageSize node. Uses direct import pattern consistent with rest of codebase. --- comfy_extras/nodes_images.py | 34 ++++++++++++++++++- nodes.py | 1 + .../comfy_extras_test/image_stitch_test.py | 5 ++- 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 6ebf1dbd8..58b29f9a9 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -16,7 +16,8 @@ from inspect import cleandoc import torch import comfy.utils -from comfy.comfy_types import FileLocator +from comfy.comfy_types import FileLocator, IO +from server import PromptServer MAX_RESOLUTION = nodes.MAX_RESOLUTION @@ -491,6 +492,36 @@ class SaveSVGNode: counter += 1 return { "ui": { "images": results } } +class GetImageSize: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": (IO.IMAGE,), + }, + "hidden": { + "unique_id": "UNIQUE_ID", + } + } + + RETURN_TYPES = (IO.INT, IO.INT) + RETURN_NAMES = ("width", "height") + FUNCTION = "get_size" + + CATEGORY = "image" + DESCRIPTION = """Returns width and height of the image, and passes it through unchanged.""" + + def get_size(self, image, unique_id=None) -> tuple[int, int]: + height = image.shape[1] + width = image.shape[2] + + # Send progress text to display size on the node + if unique_id: + PromptServer.instance.send_progress_text(f"width: {width}, height: {height}", unique_id) + + return width, height + NODE_CLASS_MAPPINGS = { "ImageCrop": ImageCrop, "RepeatImageBatch": RepeatImageBatch, @@ -500,4 +531,5 @@ NODE_CLASS_MAPPINGS = { "SaveAnimatedPNG": SaveAnimatedPNG, "SaveSVGNode": SaveSVGNode, "ImageStitch": ImageStitch, + "GetImageSize": GetImageSize, } diff --git a/nodes.py b/nodes.py index 67360e7da..637279ffb 100644 --- a/nodes.py +++ b/nodes.py @@ -2067,6 +2067,7 @@ NODE_DISPLAY_NAME_MAPPINGS = { "ImageQuantize": "Image Quantize", "ImageSharpen": "Image Sharpen", "ImageScaleToTotalPixels": "Scale Image to Total Pixels", + "GetImageSize": "Get Image Size", # _for_testing "VAEDecodeTiled": "VAE Decode (Tiled)", "VAEEncodeTiled": "VAE Encode (Tiled)", diff --git a/tests-unit/comfy_extras_test/image_stitch_test.py b/tests-unit/comfy_extras_test/image_stitch_test.py index fbaef756c..b5a0f022c 100644 --- a/tests-unit/comfy_extras_test/image_stitch_test.py +++ b/tests-unit/comfy_extras_test/image_stitch_test.py @@ -5,7 +5,10 @@ from unittest.mock import patch, MagicMock mock_nodes = MagicMock() mock_nodes.MAX_RESOLUTION = 16384 -with patch.dict('sys.modules', {'nodes': mock_nodes}): +# Mock server module for PromptServer +mock_server = MagicMock() + +with patch.dict('sys.modules', {'nodes': mock_nodes, 'server': mock_server}): from comfy_extras.nodes_images import ImageStitch From 310f4b6ef842eb513eff1b68ce689eb0b990b6ec Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 3 Jun 2025 01:26:44 -0700 Subject: [PATCH 0212/1073] Add api nodes to readme. (#8402) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 47514d1b4..1ceaccb3c 100644 --- a/README.md +++ b/README.md @@ -95,7 +95,8 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [LCM models and Loras](https://comfyanonymous.github.io/ComfyUI_examples/lcm/) - Latent previews with [TAESD](#how-to-show-high-quality-previews) - Starts up very fast. -- Works fully offline: will never download anything. +- Works fully offline: core will never download anything unless you want to. +- Optional API nodes to use paid models from external providers through the online [Comfy API](https://docs.comfy.org/tutorials/api-nodes/overview). - [Config file](extra_model_paths.yaml.example) to set the search paths for models. Workflow examples can be found on the [Examples page](https://comfyanonymous.github.io/ComfyUI_examples/) From 47d55b8b452f409bea39d2cd1c2229f0bb460c75 Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Tue, 3 Jun 2025 19:59:13 -0400 Subject: [PATCH 0213/1073] add support to read pyproject.toml from custom node (#8357) * add support to read pyproject.toml from custom node * sf * use pydantic instead * sf * use pydantic_settings * remove unnecessary try/catch and handle single-file python node * sf --- comfy_config/config_parser.py | 97 +++++++++++++++++++++++++++++++++++ comfy_config/types.py | 80 +++++++++++++++++++++++++++++ 2 files changed, 177 insertions(+) create mode 100644 comfy_config/config_parser.py create mode 100644 comfy_config/types.py diff --git a/comfy_config/config_parser.py b/comfy_config/config_parser.py new file mode 100644 index 000000000..a9cbd94dd --- /dev/null +++ b/comfy_config/config_parser.py @@ -0,0 +1,97 @@ +import os +from pathlib import Path +from typing import Optional + +from pydantic_settings import PydanticBaseSettingsSource, TomlConfigSettingsSource + +from comfy_config.types import ( + ComfyConfig, + ProjectConfig, + PyProjectConfig, + PyProjectSettings +) + +""" +Extract configuration from a custom node directory's pyproject.toml file or a Python file. + +This function reads and parses the pyproject.toml file in the specified directory +to extract project and ComfyUI-specific configuration information. If no +pyproject.toml file is found, it creates a minimal configuration using the +folder name as the project name. If a Python file is provided, it uses the +file name (without extension) as the project name. + +Args: + path (str): Path to the directory containing the pyproject.toml file, or + path to a .py file. If pyproject.toml doesn't exist in a directory, + the folder name will be used as the default project name. If a .py + file is provided, the filename (without .py extension) will be used + as the project name. + +Returns: + Optional[PyProjectConfig]: A PyProjectConfig object containing: + - project: Basic project information (name, version, dependencies, etc.) + - tool_comfy: ComfyUI-specific configuration (publisher_id, models, etc.) + Returns None if configuration extraction fails or if the provided file + is not a Python file. + +Notes: + - If pyproject.toml is missing in a directory, creates a default config with folder name + - If a .py file is provided, creates a default config with filename (without extension) + - Returns None for non-Python files + +Example: + >>> from comfy_config import config_parser + >>> # For directory + >>> custom_node_dir = os.path.dirname(os.path.realpath(__file__)) + >>> project_config = config_parser.extract_node_configuration(custom_node_dir) + >>> print(project_config.project.name) # "my_custom_node" or name from pyproject.toml + >>> + >>> # For single-file Python node file + >>> py_file_path = os.path.realpath(__file__) # "/path/to/my_node.py" + >>> project_config = config_parser.extract_node_configuration(py_file_path) + >>> print(project_config.project.name) # "my_node" +""" +def extract_node_configuration(path) -> Optional[PyProjectConfig]: + if os.path.isfile(path): + file_path = Path(path) + + if file_path.suffix.lower() != '.py': + return None + + project_name = file_path.stem + project = ProjectConfig(name=project_name) + comfy = ComfyConfig() + return PyProjectConfig(project=project, tool_comfy=comfy) + + folder_name = os.path.basename(path) + toml_path = Path(path) / "pyproject.toml" + + if not toml_path.exists(): + project = ProjectConfig(name=folder_name) + comfy = ComfyConfig() + return PyProjectConfig(project=project, tool_comfy=comfy) + + raw_settings = load_pyproject_settings(toml_path) + + project_data = raw_settings.project + + tool_data = raw_settings.tool + comfy_data = tool_data.get("comfy", {}) if tool_data else {} + + return PyProjectConfig(project=project_data, tool_comfy=comfy_data) + + +def load_pyproject_settings(toml_path: Path) -> PyProjectSettings: + class PyProjectLoader(PyProjectSettings): + @classmethod + def settings_customise_sources( + cls, + settings_cls, + init_settings: PydanticBaseSettingsSource, + env_settings: PydanticBaseSettingsSource, + dotenv_settings: PydanticBaseSettingsSource, + file_secret_settings: PydanticBaseSettingsSource, + ): + return (TomlConfigSettingsSource(settings_cls, toml_path),) + + return PyProjectLoader() diff --git a/comfy_config/types.py b/comfy_config/types.py new file mode 100644 index 000000000..611982083 --- /dev/null +++ b/comfy_config/types.py @@ -0,0 +1,80 @@ +from pydantic import BaseModel, Field +from pydantic_settings import BaseSettings, SettingsConfigDict +from typing import List, Optional + +# IMPORTANT: The type definitions specified in pyproject.toml for custom nodes +# must remain synchronized with the corresponding files in the https://github.com/Comfy-Org/comfy-cli/blob/main/comfy_cli/registry/types.py. +# Any changes to one must be reflected in the other to maintain consistency. + +class NodeVersion(BaseModel): + changelog: str + dependencies: List[str] + deprecated: bool + id: str + version: str + download_url: str + + +class Node(BaseModel): + id: str + name: str + description: str + author: Optional[str] = None + license: Optional[str] = None + icon: Optional[str] = None + repository: Optional[str] = None + tags: List[str] = Field(default_factory=list) + latest_version: Optional[NodeVersion] = None + + +class PublishNodeVersionResponse(BaseModel): + node_version: NodeVersion + signedUrl: str + + +class URLs(BaseModel): + homepage: str = Field(default="", alias="Homepage") + documentation: str = Field(default="", alias="Documentation") + repository: str = Field(default="", alias="Repository") + issues: str = Field(default="", alias="Issues") + + +class Model(BaseModel): + location: str + model_url: str + + +class ComfyConfig(BaseModel): + publisher_id: str = Field(default="", alias="PublisherId") + display_name: str = Field(default="", alias="DisplayName") + icon: str = Field(default="", alias="Icon") + models: List[Model] = Field(default_factory=list, alias="Models") + includes: List[str] = Field(default_factory=list) + + +class License(BaseModel): + file: str = "" + text: str = "" + + +class ProjectConfig(BaseModel): + name: str = "" + description: str = "" + version: str = "1.0.0" + requires_python: str = Field(default=">= 3.9", alias="requires-python") + dependencies: List[str] = Field(default_factory=list) + license: License = Field(default_factory=License) + urls: URLs = Field(default_factory=URLs) + + +class PyProjectConfig(BaseModel): + project: ProjectConfig = Field(default_factory=ProjectConfig) + tool_comfy: ComfyConfig = Field(default_factory=ComfyConfig) + + +class PyProjectSettings(BaseSettings): + project: dict = Field(default_factory=dict) + + tool: dict = Field(default_factory=dict) + + model_config = SettingsConfigDict() From 20687293fe48b5e47610294286cea54d6c5343a4 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Wed, 4 Jun 2025 05:57:13 -0700 Subject: [PATCH 0214/1073] Update frontend to 1.21.7 (#8410) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1c1ff54ac..c470b9ead 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.21.6 +comfyui-frontend-package==1.21.7 comfyui-workflow-templates==0.1.25 comfyui-embedded-docs==0.2.0 torch From fcc1643c527bfe5d2f5472e66cb3ed3dcd95d08c Mon Sep 17 00:00:00 2001 From: SD Date: Wed, 4 Jun 2025 18:33:42 +0530 Subject: [PATCH 0215/1073] Sub call to deprecated pillow API `Image.ANTIALIAS` (#8415) ANTIALIAS was removed in Pillow 10.0.0 --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index f772545dc..878b5eeb1 100644 --- a/server.py +++ b/server.py @@ -788,7 +788,7 @@ class PromptServer(): if hasattr(Image, 'Resampling'): resampling = Image.Resampling.BILINEAR else: - resampling = Image.ANTIALIAS + resampling = Image.Resampling.LANCZOS image = ImageOps.contain(image, (max_size, max_size), resampling) type_num = 1 From 871749c20842dbdd7696ba77c78ee7d4c246ef6a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 4 Jun 2025 06:40:21 -0700 Subject: [PATCH 0216/1073] Add batch to GetImageSize node. (#8419) --- comfy_extras/nodes_images.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 58b29f9a9..b1e0d4666 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -505,8 +505,8 @@ class GetImageSize: } } - RETURN_TYPES = (IO.INT, IO.INT) - RETURN_NAMES = ("width", "height") + RETURN_TYPES = (IO.INT, IO.INT, IO.INT) + RETURN_NAMES = ("width", "height", "batch_size") FUNCTION = "get_size" CATEGORY = "image" @@ -515,12 +515,13 @@ class GetImageSize: def get_size(self, image, unique_id=None) -> tuple[int, int]: height = image.shape[1] width = image.shape[2] + batch_size = image.shape[0] # Send progress text to display size on the node if unique_id: - PromptServer.instance.send_progress_text(f"width: {width}, height: {height}", unique_id) + PromptServer.instance.send_progress_text(f"width: {width}, height: {height}\n batch size: {batch_size}", unique_id) - return width, height + return width, height, batch_size NODE_CLASS_MAPPINGS = { "ImageCrop": ImageCrop, From 3aa83feeec3771d98eb780550d4b6cb9d98e88ae Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Wed, 4 Jun 2025 18:56:38 -0700 Subject: [PATCH 0217/1073] [refactor] remove version prefixes from Ideogram node categories (#8418) Simplifies node organization by consolidating all Ideogram nodes under a single category instead of version-specific subcategories. --- comfy_api_nodes/nodes_ideogram.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy_api_nodes/nodes_ideogram.py b/comfy_api_nodes/nodes_ideogram.py index b1cbf511d..b8487355f 100644 --- a/comfy_api_nodes/nodes_ideogram.py +++ b/comfy_api_nodes/nodes_ideogram.py @@ -324,7 +324,7 @@ class IdeogramV1(ComfyNodeABC): RETURN_TYPES = (IO.IMAGE,) FUNCTION = "api_call" - CATEGORY = "api node/image/Ideogram/v1" + CATEGORY = "api node/image/Ideogram" DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True @@ -483,7 +483,7 @@ class IdeogramV2(ComfyNodeABC): RETURN_TYPES = (IO.IMAGE,) FUNCTION = "api_call" - CATEGORY = "api node/image/Ideogram/v2" + CATEGORY = "api node/image/Ideogram" DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True @@ -649,7 +649,7 @@ class IdeogramV3(ComfyNodeABC): RETURN_TYPES = (IO.IMAGE,) FUNCTION = "api_call" - CATEGORY = "api node/image/Ideogram/v3" + CATEGORY = "api node/image/Ideogram" DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True From 866f6cdab4bd5de95ee6296d1b418c455f67f929 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 4 Jun 2025 22:18:54 -0400 Subject: [PATCH 0218/1073] ComfyUI version 0.3.40 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index f742410b1..6962c3661 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.39" +__version__ = "0.3.40" diff --git a/pyproject.toml b/pyproject.toml index 28a6158e0..03841bc94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.39" +version = "0.3.40" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 4248b1618ffd0878ae2502fd4633e30bcbd7554b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 5 Jun 2025 07:07:17 -0700 Subject: [PATCH 0219/1073] Let chroma TE work on regular flux. (#8429) --- comfy/ldm/flux/controlnet.py | 5 ++++- comfy/ldm/flux/model.py | 6 +++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/flux/controlnet.py b/comfy/ldm/flux/controlnet.py index 5322c4891..dbd2a47c0 100644 --- a/comfy/ldm/flux/controlnet.py +++ b/comfy/ldm/flux/controlnet.py @@ -121,6 +121,9 @@ class ControlNetFlux(Flux): if img.ndim != 3 or txt.ndim != 3: raise ValueError("Input img and txt tensors must have 3 dimensions.") + if y is None: + y = torch.zeros((img.shape[0], self.params.vec_in_dim), device=img.device, dtype=img.dtype) + # running on sequences img img = self.img_in(img) @@ -174,7 +177,7 @@ class ControlNetFlux(Flux): out["output"] = out_output[:self.main_model_single] return out - def forward(self, x, timesteps, context, y, guidance=None, hint=None, **kwargs): + def forward(self, x, timesteps, context, y=None, guidance=None, hint=None, **kwargs): patch_size = 2 if self.latent_input: hint = comfy.ldm.common_dit.pad_to_patch_size(hint, (patch_size, patch_size)) diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index ef4ba4106..53f27e3a7 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -101,6 +101,10 @@ class Flux(nn.Module): transformer_options={}, attn_mask: Tensor = None, ) -> Tensor: + + if y is None: + y = torch.zeros((img.shape[0], self.params.vec_in_dim), device=img.device, dtype=img.dtype) + patches_replace = transformer_options.get("patches_replace", {}) if img.ndim != 3 or txt.ndim != 3: raise ValueError("Input img and txt tensors must have 3 dimensions.") @@ -188,7 +192,7 @@ class Flux(nn.Module): img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels) return img - def forward(self, x, timestep, context, y, guidance=None, control=None, transformer_options={}, **kwargs): + def forward(self, x, timestep, context, y=None, guidance=None, control=None, transformer_options={}, **kwargs): bs, c, h, w = x.shape patch_size = self.patch_size x = comfy.ldm.common_dit.pad_to_patch_size(x, (patch_size, patch_size)) From d8759c772bd8ded66e08dac56bb8da636c837c9a Mon Sep 17 00:00:00 2001 From: Olexandr88 Date: Thu, 5 Jun 2025 20:44:29 +0300 Subject: [PATCH 0220/1073] Update README.md (#8427) --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 1ceaccb3c..9a35ab7ea 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ [![Website][website-shield]][website-url] [![Dynamic JSON Badge][discord-shield]][discord-url] +[![Twitter][twitter-shield]][twitter-url] [![Matrix][matrix-shield]][matrix-url]
[![][github-release-shield]][github-release-link] @@ -20,6 +21,8 @@ [discord-shield]: https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fdiscord.com%2Fapi%2Finvites%2Fcomfyorg%3Fwith_counts%3Dtrue&query=%24.approximate_member_count&logo=discord&logoColor=white&label=Discord&color=green&suffix=%20total [discord-url]: https://www.comfy.org/discord +[twitter-shield]: https://img.shields.io/twitter/follow/ComfyUI +[twitter-url]: https://x.com/ComfyUI [github-release-shield]: https://img.shields.io/github/v/release/comfyanonymous/ComfyUI?style=flat&sort=semver [github-release-link]: https://github.com/comfyanonymous/ComfyUI/releases From 3b4b171e18c9026eaf8560b9a766aab87f7639e5 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 6 Jun 2025 06:43:27 -0700 Subject: [PATCH 0221/1073] Alternate fix for #8435 (#8442) --- comfy/controlnet.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 11483e21d..9a47b86f2 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -390,8 +390,9 @@ class ControlLora(ControlNet): pass for k in self.control_weights: - if k not in {"lora_controlnet"}: - comfy.utils.set_attr_param(self.control_model, k, self.control_weights[k].to(dtype).to(comfy.model_management.get_torch_device())) + if (k not in {"lora_controlnet"}): + if (k.endswith(".up") or k.endswith(".down") or k.endswith(".weight") or k.endswith(".bias")) and ("__" not in k): + comfy.utils.set_attr_param(self.control_model, k, self.control_weights[k].to(dtype).to(comfy.model_management.get_torch_device())) def copy(self): c = ControlLora(self.control_weights, global_average_pooling=self.global_average_pooling) From daf9d25ee282051055e773185aa161cd2a1ce3a6 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 7 Jun 2025 07:01:15 -0700 Subject: [PATCH 0222/1073] Cleaner torch version comparisons. (#8453) --- comfy/model_management.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 8ae5a5abb..a107f0d49 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -305,7 +305,7 @@ try: logging.info("AMD arch: {}".format(arch)) logging.info("ROCm version: {}".format(rocm_version)) if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: - if torch_version_numeric[0] >= 2 and torch_version_numeric[1] >= 7: # works on 2.6 but doesn't actually seem to improve much + if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much if any((a in arch) for a in ["gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches ENABLE_PYTORCH_ATTENTION = True except: @@ -328,7 +328,7 @@ except: pass try: - if torch_version_numeric[0] == 2 and torch_version_numeric[1] >= 5: + if torch_version_numeric >= (2, 5): torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True) except: logging.warning("Warning, could not set allow_fp16_bf16_reduction_math_sdp") @@ -1276,11 +1276,11 @@ def supports_fp8_compute(device=None): if props.minor < 9: return False - if torch_version_numeric[0] < 2 or (torch_version_numeric[0] == 2 and torch_version_numeric[1] < 3): + if torch_version_numeric < (2, 3): return False if WINDOWS: - if (torch_version_numeric[0] == 2 and torch_version_numeric[1] < 4): + if torch_version_numeric < (2, 4): return False return True From 97755eed46ccb797cb14a692a4c2931ebf3ad60c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 8 Jun 2025 11:15:34 -0700 Subject: [PATCH 0223/1073] Enable fp8 ops by default on gfx1201 (#8464) --- comfy/model_management.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index a107f0d49..187402748 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -295,6 +295,7 @@ except: pass +SUPPORT_FP8_OPS = args.supports_fp8_compute try: if is_amd(): try: @@ -308,6 +309,10 @@ try: if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much if any((a in arch) for a in ["gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches ENABLE_PYTORCH_ATTENTION = True + if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4): + if any((a in arch) for a in ["gfx1201"]): # TODO: more arches + SUPPORT_FP8_OPS = True + except: pass @@ -1262,7 +1267,7 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma return False def supports_fp8_compute(device=None): - if args.supports_fp8_compute: + if SUPPORT_FP8_OPS: return True if not is_nvidia(): From 7f800d04fae7a20f9d110d250d493ba94b5310f3 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 9 Jun 2025 09:50:39 -0700 Subject: [PATCH 0224/1073] Enable AMD fp8 and pytorch attention on some GPUs. (#8474) Information is from the pytorch source code. --- comfy/model_management.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 187402748..283815611 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -307,10 +307,10 @@ try: logging.info("ROCm version: {}".format(rocm_version)) if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much - if any((a in arch) for a in ["gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches + if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx1201 and gfx950 ENABLE_PYTORCH_ATTENTION = True if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4): - if any((a in arch) for a in ["gfx1201"]): # TODO: more arches + if any((a in arch) for a in ["gfx1201", "gfx942", "gfx950"]): # TODO: more arches SUPPORT_FP8_OPS = True except: From c7b25784b141a298d8ee5f0d830a29da6e8a3d54 Mon Sep 17 00:00:00 2001 From: Kent Mewhort Date: Mon, 9 Jun 2025 13:05:54 -0400 Subject: [PATCH 0225/1073] Fix WebcamCapture IS_CHANGED signature (#8413) --- comfy_extras/nodes_webcam.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy_extras/nodes_webcam.py b/comfy_extras/nodes_webcam.py index 062b15cf8..5bf80b4c6 100644 --- a/comfy_extras/nodes_webcam.py +++ b/comfy_extras/nodes_webcam.py @@ -23,6 +23,10 @@ class WebcamCapture(nodes.LoadImage): def load_capture(self, image, **kwargs): return super().load_image(folder_paths.get_annotated_filepath(image)) + @classmethod + def IS_CHANGED(cls, image, width, height, capture_on_queue): + return super().IS_CHANGED(image) + NODE_CLASS_MAPPINGS = { "WebcamCapture": WebcamCapture, From 6e28a46454f2188844d60d54e5f3400a8ec0b81e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 10 Jun 2025 10:06:24 -0700 Subject: [PATCH 0226/1073] Apple most likely is never fixing the fp16 attention bug. (#8485) --- comfy/model_management.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 283815611..054291432 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1052,7 +1052,7 @@ def pytorch_attention_flash_attention(): global ENABLE_PYTORCH_ATTENTION if ENABLE_PYTORCH_ATTENTION: #TODO: more reliable way of checking for flash attention? - if is_nvidia(): #pytorch flash attention only works on Nvidia + if is_nvidia(): return True if is_intel_xpu(): return True @@ -1068,7 +1068,7 @@ def force_upcast_attention_dtype(): upcast = args.force_upcast_attention macos_version = mac_version() - if macos_version is not None and ((14, 5) <= macos_version < (16,)): # black image bug on recent versions of macOS + if macos_version is not None and ((14, 5) <= macos_version): # black image bug on recent versions of macOS, I don't think it's ever getting fixed upcast = True if upcast: From 373a9386a438327d230a99b6a875c6aa6a589fb6 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 11 Jun 2025 17:10:46 +0800 Subject: [PATCH 0227/1073] Update requirements.txt (#8487) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c470b9ead..ee4edb1dc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.21.7 -comfyui-workflow-templates==0.1.25 +comfyui-workflow-templates==0.1.27 comfyui-embedded-docs==0.2.0 torch torchsde From af1eb58be8ce7707f9884b8caac843661e9b4017 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 11 Jun 2025 12:09:11 -0700 Subject: [PATCH 0228/1073] Fix black images on some flux models in fp16. (#8495) --- comfy/ldm/flux/model.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index 53f27e3a7..09dd2482c 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -187,6 +187,9 @@ class Flux(nn.Module): if add is not None: img[:, txt.shape[1] :, ...] += add + if img.dtype == torch.float16: + img = torch.nan_to_num(img, nan=0.0, posinf=65504, neginf=-65504) + img = img[:, txt.shape[1] :, ...] img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels) From 8a4ff747bd919afffe23b68c2e3480295b91998b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 11 Jun 2025 12:13:29 -0700 Subject: [PATCH 0229/1073] Fix mistake in last commit. (#8496) * Move to right place. --- comfy/ldm/flux/model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index 09dd2482c..846703d52 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -159,6 +159,9 @@ class Flux(nn.Module): if add is not None: img += add + if img.dtype == torch.float16: + img = torch.nan_to_num(img, nan=0.0, posinf=65504, neginf=-65504) + img = torch.cat((txt, img), 1) for i, block in enumerate(self.single_blocks): @@ -187,9 +190,6 @@ class Flux(nn.Module): if add is not None: img[:, txt.shape[1] :, ...] += add - if img.dtype == torch.float16: - img = torch.nan_to_num(img, nan=0.0, posinf=65504, neginf=-65504) - img = img[:, txt.shape[1] :, ...] img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels) From 9685d4f3c3e34b50d3f2e9bcb52026db7f7e624c Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Wed, 11 Jun 2025 16:21:28 -0400 Subject: [PATCH 0230/1073] auto register web folder from pyproject (#8478) * auto register web folder from pyproject * need pydantic-settings as dependency --- comfy_config/types.py | 15 ++++++++++++++- nodes.py | 16 ++++++++++++++++ requirements.txt | 1 + 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/comfy_config/types.py b/comfy_config/types.py index 611982083..11261a136 100644 --- a/comfy_config/types.py +++ b/comfy_config/types.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, field_validator from pydantic_settings import BaseSettings, SettingsConfigDict from typing import List, Optional @@ -50,6 +50,7 @@ class ComfyConfig(BaseModel): icon: str = Field(default="", alias="Icon") models: List[Model] = Field(default_factory=list, alias="Models") includes: List[str] = Field(default_factory=list) + web: Optional[str] = None class License(BaseModel): @@ -66,6 +67,18 @@ class ProjectConfig(BaseModel): license: License = Field(default_factory=License) urls: URLs = Field(default_factory=URLs) + @field_validator('license', mode='before') + @classmethod + def validate_license(cls, v): + if isinstance(v, str): + return License(text=v) + elif isinstance(v, dict): + return License(**v) + elif isinstance(v, License): + return v + else: + return License() + class PyProjectConfig(BaseModel): project: ProjectConfig = Field(default_factory=ProjectConfig) diff --git a/nodes.py b/nodes.py index 637279ffb..19612479b 100644 --- a/nodes.py +++ b/nodes.py @@ -38,6 +38,8 @@ import folder_paths import latent_preview import node_helpers +from comfy_config import config_parser + def before_node_execution(): comfy.model_management.throw_exception_if_processing_interrupted() @@ -2125,6 +2127,20 @@ def load_custom_node(module_path: str, ignore=set(), module_parent="custom_nodes LOADED_MODULE_DIRS[module_name] = os.path.abspath(module_dir) + project_config = config_parser.extract_node_configuration(module_path) + + web_dir_name = project_config.tool_comfy.web + + if web_dir_name: + web_dir_path = os.path.join(module_path, web_dir_name) + + if os.path.isdir(web_dir_path): + project_name = project_config.project.name + + EXTENSION_WEB_DIRS[project_name] = web_dir_path + + logging.info("Automatically register web folder {} for {}".format(web_dir_name, project_name)) + if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None: web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY"))) if os.path.isdir(web_dir): diff --git a/requirements.txt b/requirements.txt index ee4edb1dc..ee24b1cb3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -25,3 +25,4 @@ spandrel soundfile av>=14.2.0 pydantic~=2.0 +pydantic-settings~=2.0 From 50c605e957e681f24b688fa0c7b8f27a578bb3c6 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Wed, 11 Jun 2025 21:43:39 +0100 Subject: [PATCH 0231/1073] Add support for sqlite database (#8444) * Add support for sqlite database * fix --- alembic.ini | 84 ++++++++++++++++++++++++++++ alembic_db/README.md | 4 ++ alembic_db/env.py | 64 +++++++++++++++++++++ alembic_db/script.py.mako | 28 ++++++++++ app/database/db.py | 112 +++++++++++++++++++++++++++++++++++++ app/database/models.py | 14 +++++ app/frontend_management.py | 17 ++---- comfy/cli_args.py | 5 ++ main.py | 10 ++++ requirements.txt | 2 + utils/install_util.py | 18 ++++++ 11 files changed, 345 insertions(+), 13 deletions(-) create mode 100644 alembic.ini create mode 100644 alembic_db/README.md create mode 100644 alembic_db/env.py create mode 100644 alembic_db/script.py.mako create mode 100644 app/database/db.py create mode 100644 app/database/models.py create mode 100644 utils/install_util.py diff --git a/alembic.ini b/alembic.ini new file mode 100644 index 000000000..12f18712f --- /dev/null +++ b/alembic.ini @@ -0,0 +1,84 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +# Use forward slashes (/) also on windows to provide an os agnostic path +script_location = alembic_db + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s + +# sys.path path, will be prepended to sys.path if present. +# defaults to the current working directory. +prepend_sys_path = . + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library. +# Any required deps can installed by adding `alembic[tz]` to the pip requirements +# string value is passed to ZoneInfo() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to alembic_db/versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# The path separator used here should be the separator specified by "version_path_separator" below. +# version_locations = %(here)s/bar:%(here)s/bat:alembic_db/versions + +# version path separator; As mentioned above, this is the character used to split +# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. +# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. +# Valid values for version_path_separator are: +# +# version_path_separator = : +# version_path_separator = ; +# version_path_separator = space +# version_path_separator = newline +# +# Use os.pathsep. Default configuration used for new projects. +version_path_separator = os + +# set to 'true' to search source files recursively +# in each "version_locations" directory +# new in Alembic version 1.10 +# recursive_version_locations = false + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + +sqlalchemy.url = sqlite:///user/comfyui.db + + +[post_write_hooks] +# post_write_hooks defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples + +# format using "black" - use the console_scripts runner, against the "black" entrypoint +# hooks = black +# black.type = console_scripts +# black.entrypoint = black +# black.options = -l 79 REVISION_SCRIPT_FILENAME + +# lint with attempts to fix using "ruff" - use the exec runner, execute a binary +# hooks = ruff +# ruff.type = exec +# ruff.executable = %(here)s/.venv/bin/ruff +# ruff.options = check --fix REVISION_SCRIPT_FILENAME diff --git a/alembic_db/README.md b/alembic_db/README.md new file mode 100644 index 000000000..3b808c7ca --- /dev/null +++ b/alembic_db/README.md @@ -0,0 +1,4 @@ +## Generate new revision + +1. Update models in `/app/database/models.py` +2. Run `alembic revision --autogenerate -m "{your message}"` diff --git a/alembic_db/env.py b/alembic_db/env.py new file mode 100644 index 000000000..4d7770679 --- /dev/null +++ b/alembic_db/env.py @@ -0,0 +1,64 @@ +from sqlalchemy import engine_from_config +from sqlalchemy import pool + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + + +from app.database.models import Base +target_metadata = Base.metadata + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + Calls to context.execute() here emit the given string to the + script output. + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode. + In this scenario we need to create an Engine + and associate a connection with the context. + """ + connectable = engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + with connectable.connect() as connection: + context.configure( + connection=connection, target_metadata=target_metadata + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/alembic_db/script.py.mako b/alembic_db/script.py.mako new file mode 100644 index 000000000..480b130d6 --- /dev/null +++ b/alembic_db/script.py.mako @@ -0,0 +1,28 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + """Upgrade schema.""" + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + """Downgrade schema.""" + ${downgrades if downgrades else "pass"} diff --git a/app/database/db.py b/app/database/db.py new file mode 100644 index 000000000..1de8b80ed --- /dev/null +++ b/app/database/db.py @@ -0,0 +1,112 @@ +import logging +import os +import shutil +from app.logger import log_startup_warning +from utils.install_util import get_missing_requirements_message +from comfy.cli_args import args + +_DB_AVAILABLE = False +Session = None + + +try: + from alembic import command + from alembic.config import Config + from alembic.runtime.migration import MigrationContext + from alembic.script import ScriptDirectory + from sqlalchemy import create_engine + from sqlalchemy.orm import sessionmaker + + _DB_AVAILABLE = True +except ImportError as e: + log_startup_warning( + f""" +------------------------------------------------------------------------ +Error importing dependencies: {e} +{get_missing_requirements_message()} +This error is happening because ComfyUI now uses a local sqlite database. +------------------------------------------------------------------------ +""".strip() + ) + + +def dependencies_available(): + """ + Temporary function to check if the dependencies are available + """ + return _DB_AVAILABLE + + +def can_create_session(): + """ + Temporary function to check if the database is available to create a session + During initial release there may be environmental issues (or missing dependencies) that prevent the database from being created + """ + return dependencies_available() and Session is not None + + +def get_alembic_config(): + root_path = os.path.join(os.path.dirname(__file__), "../..") + config_path = os.path.abspath(os.path.join(root_path, "alembic.ini")) + scripts_path = os.path.abspath(os.path.join(root_path, "alembic_db")) + + config = Config(config_path) + config.set_main_option("script_location", scripts_path) + config.set_main_option("sqlalchemy.url", args.database_url) + + return config + + +def get_db_path(): + url = args.database_url + if url.startswith("sqlite:///"): + return url.split("///")[1] + else: + raise ValueError(f"Unsupported database URL '{url}'.") + + +def init_db(): + db_url = args.database_url + logging.debug(f"Database URL: {db_url}") + db_path = get_db_path() + db_exists = os.path.exists(db_path) + + config = get_alembic_config() + + # Check if we need to upgrade + engine = create_engine(db_url) + conn = engine.connect() + + context = MigrationContext.configure(conn) + current_rev = context.get_current_revision() + + script = ScriptDirectory.from_config(config) + target_rev = script.get_current_head() + + if target_rev is None: + logging.warning("No target revision found.") + elif current_rev != target_rev: + # Backup the database pre upgrade + backup_path = db_path + ".bkp" + if db_exists: + shutil.copy(db_path, backup_path) + else: + backup_path = None + + try: + command.upgrade(config, target_rev) + logging.info(f"Database upgraded from {current_rev} to {target_rev}") + except Exception as e: + if backup_path: + # Restore the database from backup if upgrade fails + shutil.copy(backup_path, db_path) + os.remove(backup_path) + logging.exception("Error upgrading database: ") + raise e + + global Session + Session = sessionmaker(bind=engine) + + +def create_session(): + return Session() diff --git a/app/database/models.py b/app/database/models.py new file mode 100644 index 000000000..6facfb8f2 --- /dev/null +++ b/app/database/models.py @@ -0,0 +1,14 @@ +from sqlalchemy.orm import declarative_base + +Base = declarative_base() + + +def to_dict(obj): + fields = obj.__table__.columns.keys() + return { + field: (val.to_dict() if hasattr(val, "to_dict") else val) + for field in fields + if (val := getattr(obj, field)) + } + +# TODO: Define models here diff --git a/app/frontend_management.py b/app/frontend_management.py index d9ef8c921..2b626f24e 100644 --- a/app/frontend_management.py +++ b/app/frontend_management.py @@ -16,26 +16,17 @@ from importlib.metadata import version import requests from typing_extensions import NotRequired +from utils.install_util import get_missing_requirements_message, requirements_path + from comfy.cli_args import DEFAULT_VERSION_STRING import app.logger -# The path to the requirements.txt file -req_path = Path(__file__).parents[1] / "requirements.txt" - def frontend_install_warning_message(): - """The warning message to display when the frontend version is not up to date.""" - - extra = "" - if sys.flags.no_user_site: - extra = "-s " return f""" -Please install the updated requirements.txt file by running: -{sys.executable} {extra}-m pip install -r {req_path} +{get_missing_requirements_message()} This error is happening because the ComfyUI frontend is no longer shipped as part of the main repo but as a pip package instead. - -If you are on the portable package you can run: update\\update_comfyui.bat to solve this problem """.strip() @@ -48,7 +39,7 @@ def check_frontend_version(): try: frontend_version_str = version("comfyui-frontend-package") frontend_version = parse_version(frontend_version_str) - with open(req_path, "r", encoding="utf-8") as f: + with open(requirements_path, "r", encoding="utf-8") as f: required_frontend = parse_version(f.readline().split("=")[-1]) if frontend_version < required_frontend: app.logger.log_startup_warning( diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 4fb675f99..741ecac3f 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -203,6 +203,11 @@ parser.add_argument( help="Set the base URL for the ComfyUI API. (default: https://api.comfy.org)", ) +database_default_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), "..", "user", "comfyui.db") +) +parser.add_argument("--database-url", type=str, default=f"sqlite:///{database_default_path}", help="Specify the database URL, e.g. for an in-memory database you can use 'sqlite:///:memory:'.") + if comfy.options.args_parsing: args = parser.parse_args() else: diff --git a/main.py b/main.py index fb1f8d20b..7dfc7bc23 100644 --- a/main.py +++ b/main.py @@ -238,6 +238,15 @@ def cleanup_temp(): shutil.rmtree(temp_dir, ignore_errors=True) +def setup_database(): + try: + from app.database.db import init_db, dependencies_available + if dependencies_available(): + init_db() + except Exception as e: + logging.error(f"Failed to initialize database. Please ensure you have installed the latest requirements. If the error persists, please report this as in future the database will be required: {e}") + + def start_comfyui(asyncio_loop=None): """ Starts the ComfyUI server using the provided asyncio event loop or creates a new one. @@ -266,6 +275,7 @@ def start_comfyui(asyncio_loop=None): hook_breaker_ac10a0.restore_functions() cuda_malloc_warning() + setup_database() prompt_server.add_routes() hijack_progress(prompt_server) diff --git a/requirements.txt b/requirements.txt index ee24b1cb3..6c5bf1d92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,6 +18,8 @@ Pillow scipy tqdm psutil +alembic +SQLAlchemy #non essential dependencies: kornia>=0.7.1 diff --git a/utils/install_util.py b/utils/install_util.py new file mode 100644 index 000000000..0f59bcf91 --- /dev/null +++ b/utils/install_util.py @@ -0,0 +1,18 @@ +from pathlib import Path +import sys + +# The path to the requirements.txt file +requirements_path = Path(__file__).parents[1] / "requirements.txt" + + +def get_missing_requirements_message(): + """The warning message to display when a package is missing.""" + + extra = "" + if sys.flags.no_user_site: + extra = "-s " + return f""" +Please install the updated requirements.txt file by running: +{sys.executable} {extra}-m pip install -r {requirements_path} +If you are on the portable package you can run: update\\update_comfyui.bat to solve this problem. +""".strip() From 365f9ed15720b24f5a14be5320e7cd7325124c1c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 11 Jun 2025 14:28:04 -0700 Subject: [PATCH 0232/1073] Revert "auto register web folder from pyproject (#8478)" (#8497) This reverts commit 9685d4f3c3e34b50d3f2e9bcb52026db7f7e624c. --- comfy_config/types.py | 15 +-------------- nodes.py | 16 ---------------- requirements.txt | 1 - 3 files changed, 1 insertion(+), 31 deletions(-) diff --git a/comfy_config/types.py b/comfy_config/types.py index 11261a136..611982083 100644 --- a/comfy_config/types.py +++ b/comfy_config/types.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, Field from pydantic_settings import BaseSettings, SettingsConfigDict from typing import List, Optional @@ -50,7 +50,6 @@ class ComfyConfig(BaseModel): icon: str = Field(default="", alias="Icon") models: List[Model] = Field(default_factory=list, alias="Models") includes: List[str] = Field(default_factory=list) - web: Optional[str] = None class License(BaseModel): @@ -67,18 +66,6 @@ class ProjectConfig(BaseModel): license: License = Field(default_factory=License) urls: URLs = Field(default_factory=URLs) - @field_validator('license', mode='before') - @classmethod - def validate_license(cls, v): - if isinstance(v, str): - return License(text=v) - elif isinstance(v, dict): - return License(**v) - elif isinstance(v, License): - return v - else: - return License() - class PyProjectConfig(BaseModel): project: ProjectConfig = Field(default_factory=ProjectConfig) diff --git a/nodes.py b/nodes.py index 19612479b..637279ffb 100644 --- a/nodes.py +++ b/nodes.py @@ -38,8 +38,6 @@ import folder_paths import latent_preview import node_helpers -from comfy_config import config_parser - def before_node_execution(): comfy.model_management.throw_exception_if_processing_interrupted() @@ -2127,20 +2125,6 @@ def load_custom_node(module_path: str, ignore=set(), module_parent="custom_nodes LOADED_MODULE_DIRS[module_name] = os.path.abspath(module_dir) - project_config = config_parser.extract_node_configuration(module_path) - - web_dir_name = project_config.tool_comfy.web - - if web_dir_name: - web_dir_path = os.path.join(module_path, web_dir_name) - - if os.path.isdir(web_dir_path): - project_name = project_config.project.name - - EXTENSION_WEB_DIRS[project_name] = web_dir_path - - logging.info("Automatically register web folder {} for {}".format(web_dir_name, project_name)) - if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None: web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY"))) if os.path.isdir(web_dir): diff --git a/requirements.txt b/requirements.txt index 6c5bf1d92..4a0a7d7fd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,4 +27,3 @@ spandrel soundfile av>=14.2.0 pydantic~=2.0 -pydantic-settings~=2.0 From ecb8d15e7a778ed28e03fc131a8621b2f2945fe8 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:41:30 -0700 Subject: [PATCH 0233/1073] Allow specifying any frontend semver suffixes (#8498) --- app/frontend_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/frontend_management.py b/app/frontend_management.py index 2b626f24e..d336d766a 100644 --- a/app/frontend_management.py +++ b/app/frontend_management.py @@ -221,7 +221,7 @@ comfyui-workflow-templates is not installed. Raises: argparse.ArgumentTypeError: If the version string is invalid. """ - VERSION_PATTERN = r"^([a-zA-Z0-9][a-zA-Z0-9-]{0,38})/([a-zA-Z0-9_.-]+)@(v?\d+\.\d+\.\d+|latest)$" + VERSION_PATTERN = r"^([a-zA-Z0-9][a-zA-Z0-9-]{0,38})/([a-zA-Z0-9_.-]+)@(v?\d+\.\d+\.\d+[-._a-zA-Z0-9]*|latest)$" match_result = re.match(VERSION_PATTERN, value) if match_result is None: raise argparse.ArgumentTypeError(f"Invalid version string: {value}") From ef7e885fe46dab5aaa9706ca338c48dd3dc2d995 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Thu, 12 Jun 2025 11:10:48 -0700 Subject: [PATCH 0234/1073] Revert "Update requirements.txt (#8487)" (#8502) This reverts commit 373a9386a438327d230a99b6a875c6aa6a589fb6. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4a0a7d7fd..3e02c7494 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.21.7 -comfyui-workflow-templates==0.1.27 +comfyui-workflow-templates==0.1.25 comfyui-embedded-docs==0.2.0 torch torchsde From d2566eb4b262709324eac944f5b58177687401bf Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 12 Jun 2025 12:38:33 -0700 Subject: [PATCH 0235/1073] Add a warning for old python versions. (#8504) --- main.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/main.py b/main.py index 7dfc7bc23..c8c4194d4 100644 --- a/main.py +++ b/main.py @@ -17,7 +17,6 @@ if __name__ == "__main__": os.environ['HF_HUB_DISABLE_TELEMETRY'] = '1' os.environ['DO_NOT_TRACK'] = '1' - setup_logger(log_level=args.verbose, use_stdout=args.log_stdout) def apply_custom_paths(): @@ -310,6 +309,9 @@ if __name__ == "__main__": logging.info("Python version: {}".format(sys.version)) logging.info("ComfyUI version: {}".format(comfyui_version.__version__)) + if sys.version_info.major == 3 and sys.version_info.minor < 10: + logging.warning("WARNING: You are using a python version older than 3.10, please upgrade to a newer one. 3.12 and above is recommended.") + event_loop, _, start_all_func = start_comfyui() try: x = start_all_func() From 4d1c4b9797465dd32d50a7ae60c139965c19310c Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Thu, 12 Jun 2025 16:24:39 -0400 Subject: [PATCH 0236/1073] Auto register web folder (#8505) * auto register web folder from pyproject * need pydantic-settings as dependency * wrapped try/except for config_parser * sf --- comfy_config/types.py | 15 ++++++++++++++- nodes.py | 19 +++++++++++++++++++ requirements.txt | 1 + 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/comfy_config/types.py b/comfy_config/types.py index 611982083..11261a136 100644 --- a/comfy_config/types.py +++ b/comfy_config/types.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, field_validator from pydantic_settings import BaseSettings, SettingsConfigDict from typing import List, Optional @@ -50,6 +50,7 @@ class ComfyConfig(BaseModel): icon: str = Field(default="", alias="Icon") models: List[Model] = Field(default_factory=list, alias="Models") includes: List[str] = Field(default_factory=list) + web: Optional[str] = None class License(BaseModel): @@ -66,6 +67,18 @@ class ProjectConfig(BaseModel): license: License = Field(default_factory=License) urls: URLs = Field(default_factory=URLs) + @field_validator('license', mode='before') + @classmethod + def validate_license(cls, v): + if isinstance(v, str): + return License(text=v) + elif isinstance(v, dict): + return License(**v) + elif isinstance(v, License): + return v + else: + return License() + class PyProjectConfig(BaseModel): project: ProjectConfig = Field(default_factory=ProjectConfig) diff --git a/nodes.py b/nodes.py index 637279ffb..89201cc72 100644 --- a/nodes.py +++ b/nodes.py @@ -2125,6 +2125,25 @@ def load_custom_node(module_path: str, ignore=set(), module_parent="custom_nodes LOADED_MODULE_DIRS[module_name] = os.path.abspath(module_dir) + try: + from comfy_config import config_parser + + project_config = config_parser.extract_node_configuration(module_path) + + web_dir_name = project_config.tool_comfy.web + + if web_dir_name: + web_dir_path = os.path.join(module_path, web_dir_name) + + if os.path.isdir(web_dir_path): + project_name = project_config.project.name + + EXTENSION_WEB_DIRS[project_name] = web_dir_path + + logging.info("Automatically register web folder {} for {}".format(web_dir_name, project_name)) + except Exception as e: + logging.debug(f"Unable to parse pyproject.toml due to lack dependency pydantic-settings, please run 'pip install -r requirements.txt': {e}") + if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None: web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY"))) if os.path.isdir(web_dir): diff --git a/requirements.txt b/requirements.txt index 3e02c7494..94dafea58 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,3 +27,4 @@ spandrel soundfile av>=14.2.0 pydantic~=2.0 +pydantic-settings~=2.0 From 40fd39c7cb16702d60aa84c89add56cf7d2f52b7 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 12 Jun 2025 14:14:59 -0700 Subject: [PATCH 0237/1073] debug -> warning (#8506) --- nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index 89201cc72..8e5b47b37 100644 --- a/nodes.py +++ b/nodes.py @@ -2142,7 +2142,7 @@ def load_custom_node(module_path: str, ignore=set(), module_parent="custom_nodes logging.info("Automatically register web folder {} for {}".format(web_dir_name, project_name)) except Exception as e: - logging.debug(f"Unable to parse pyproject.toml due to lack dependency pydantic-settings, please run 'pip install -r requirements.txt': {e}") + logging.warning(f"Unable to parse pyproject.toml due to lack dependency pydantic-settings, please run 'pip install -r requirements.txt': {e}") if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None: web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY"))) From baa8c8cdd3b84e074d24483f40aa5df9a859a2d0 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Thu, 12 Jun 2025 17:03:27 -0700 Subject: [PATCH 0238/1073] Add '@prerelease' to use latest test frontend (#8501) * Add '@prerelease' to use latest test frontend Allows download of pre-release versions. Will always get the latest pre-release version - even if it's older than the latest stable release. * nit --- app/frontend_management.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/app/frontend_management.py b/app/frontend_management.py index d336d766a..001ebbecb 100644 --- a/app/frontend_management.py +++ b/app/frontend_management.py @@ -112,9 +112,22 @@ class FrontEndProvider: response.raise_for_status() # Raises an HTTPError if the response was an error return response.json() + @cached_property + def latest_prerelease(self) -> Release: + """Get the latest pre-release version - even if it's older than the latest release""" + release = [release for release in self.all_releases if release["prerelease"]] + + if not release: + raise ValueError("No pre-releases found") + + # GitHub returns releases in reverse chronological order, so first is latest + return release[0] + def get_release(self, version: str) -> Release: if version == "latest": return self.latest_release + elif version == "prerelease": + return self.latest_prerelease else: for release in self.all_releases: if release["tag_name"] in [version, f"v{version}"]: @@ -221,7 +234,7 @@ comfyui-workflow-templates is not installed. Raises: argparse.ArgumentTypeError: If the version string is invalid. """ - VERSION_PATTERN = r"^([a-zA-Z0-9][a-zA-Z0-9-]{0,38})/([a-zA-Z0-9_.-]+)@(v?\d+\.\d+\.\d+[-._a-zA-Z0-9]*|latest)$" + VERSION_PATTERN = r"^([a-zA-Z0-9][a-zA-Z0-9-]{0,38})/([a-zA-Z0-9_.-]+)@(v?\d+\.\d+\.\d+[-._a-zA-Z0-9]*|latest|prerelease)$" match_result = re.match(VERSION_PATTERN, value) if match_result is None: raise argparse.ArgumentTypeError(f"Invalid version string: {value}") From c6529c0d77dbc48b3ae8d8ce18b74dd5476deb7f Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Thu, 12 Jun 2025 17:17:10 -0700 Subject: [PATCH 0239/1073] don't validate string inputs with VALIDATE_INPUTS (#8508) --- comfy_api_nodes/nodes_bfl.py | 29 ++++++++--------------------- 1 file changed, 8 insertions(+), 21 deletions(-) diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py index 010564704..d93fbd778 100644 --- a/comfy_api_nodes/nodes_bfl.py +++ b/comfy_api_nodes/nodes_bfl.py @@ -346,20 +346,6 @@ class FluxKontextProImageNode(ComfyNodeABC): }, } - @classmethod - def VALIDATE_INPUTS(cls, aspect_ratio: str): - try: - validate_aspect_ratio( - aspect_ratio, - minimum_ratio=cls.MINIMUM_RATIO, - maximum_ratio=cls.MAXIMUM_RATIO, - minimum_ratio_str=cls.MINIMUM_RATIO_STR, - maximum_ratio_str=cls.MAXIMUM_RATIO_STR, - ) - except Exception as e: - return str(e) - return True - RETURN_TYPES = (IO.IMAGE,) DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value FUNCTION = "api_call" @@ -380,6 +366,13 @@ class FluxKontextProImageNode(ComfyNodeABC): unique_id: Union[str, None] = None, **kwargs, ): + aspect_ratio = validate_aspect_ratio( + aspect_ratio, + minimum_ratio=self.MINIMUM_RATIO, + maximum_ratio=self.MAXIMUM_RATIO, + minimum_ratio_str=self.MINIMUM_RATIO_STR, + maximum_ratio_str=self.MAXIMUM_RATIO_STR, + ) if input_image is None: validate_string(prompt, strip_whitespace=False) operation = SynchronousOperation( @@ -395,13 +388,7 @@ class FluxKontextProImageNode(ComfyNodeABC): guidance=round(guidance, 1), steps=steps, seed=seed, - aspect_ratio=validate_aspect_ratio( - aspect_ratio, - minimum_ratio=self.MINIMUM_RATIO, - maximum_ratio=self.MAXIMUM_RATIO, - minimum_ratio_str=self.MINIMUM_RATIO_STR, - maximum_ratio_str=self.MAXIMUM_RATIO_STR, - ), + aspect_ratio=aspect_ratio, input_image=( input_image if input_image is None From 251f54a2ad5a2676481337b930e7eceb735b132c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 13 Jun 2025 04:05:23 -0700 Subject: [PATCH 0240/1073] Basic initial support for cosmos predict2 text to image 2B and 14B models. (#8517) --- comfy/ldm/cosmos/blocks.py | 10 - comfy/ldm/cosmos/position_embedding.py | 4 +- comfy/ldm/cosmos/predict2.py | 868 +++++++++++++++++++++++++ comfy/model_base.py | 47 +- comfy/model_detection.py | 47 ++ comfy/model_sampling.py | 31 + comfy/supported_models.py | 40 +- 7 files changed, 1021 insertions(+), 26 deletions(-) create mode 100644 comfy/ldm/cosmos/predict2.py diff --git a/comfy/ldm/cosmos/blocks.py b/comfy/ldm/cosmos/blocks.py index a12f892d2..5c4356a3f 100644 --- a/comfy/ldm/cosmos/blocks.py +++ b/comfy/ldm/cosmos/blocks.py @@ -26,16 +26,6 @@ from torch import nn from comfy.ldm.modules.attention import optimized_attention -def apply_rotary_pos_emb( - t: torch.Tensor, - freqs: torch.Tensor, -) -> torch.Tensor: - t_ = t.reshape(*t.shape[:-1], 2, -1).movedim(-2, -1).unsqueeze(-2).float() - t_out = freqs[..., 0] * t_[..., 0] + freqs[..., 1] * t_[..., 1] - t_out = t_out.movedim(-1, -2).reshape(*t.shape).type_as(t) - return t_out - - def get_normalization(name: str, channels: int, weight_args={}, operations=None): if name == "I": return nn.Identity() diff --git a/comfy/ldm/cosmos/position_embedding.py b/comfy/ldm/cosmos/position_embedding.py index 4d6a58dba..44197a597 100644 --- a/comfy/ldm/cosmos/position_embedding.py +++ b/comfy/ldm/cosmos/position_embedding.py @@ -66,6 +66,7 @@ class VideoRopePosition3DEmb(VideoPositionEmb): h_extrapolation_ratio: float = 1.0, w_extrapolation_ratio: float = 1.0, t_extrapolation_ratio: float = 1.0, + enable_fps_modulation: bool = True, device=None, **kwargs, # used for compatibility with other positional embeddings; unused in this class ): @@ -75,6 +76,7 @@ class VideoRopePosition3DEmb(VideoPositionEmb): self.base_fps = base_fps self.max_h = len_h self.max_w = len_w + self.enable_fps_modulation = enable_fps_modulation dim = head_dim dim_h = dim // 6 * 2 @@ -143,7 +145,7 @@ class VideoRopePosition3DEmb(VideoPositionEmb): half_emb_w = torch.outer(self.seq[:W].to(device=device), w_spatial_freqs) # apply sequence scaling in temporal dimension - if fps is None: # image case + if fps is None or self.enable_fps_modulation is False: # image case half_emb_t = torch.outer(self.seq[:T].to(device=device), temporal_freqs) else: half_emb_t = torch.outer(self.seq[:T].to(device=device) / fps * self.base_fps, temporal_freqs) diff --git a/comfy/ldm/cosmos/predict2.py b/comfy/ldm/cosmos/predict2.py new file mode 100644 index 000000000..3b91b3f6e --- /dev/null +++ b/comfy/ldm/cosmos/predict2.py @@ -0,0 +1,868 @@ +# original code from: https://github.com/nvidia-cosmos/cosmos-predict2 + +import torch +from torch import nn +from einops import rearrange +from einops.layers.torch import Rearrange +import logging +from typing import Callable, Optional, Tuple +import math + +from .position_embedding import VideoRopePosition3DEmb, LearnablePosEmbAxis +from torchvision import transforms + +from comfy.ldm.modules.attention import optimized_attention + +def apply_rotary_pos_emb( + t: torch.Tensor, + freqs: torch.Tensor, +) -> torch.Tensor: + t_ = t.reshape(*t.shape[:-1], 2, -1).movedim(-2, -1).unsqueeze(-2).float() + t_out = freqs[..., 0] * t_[..., 0] + freqs[..., 1] * t_[..., 1] + t_out = t_out.movedim(-1, -2).reshape(*t.shape).type_as(t) + return t_out + + +# ---------------------- Feed Forward Network ----------------------- +class GPT2FeedForward(nn.Module): + def __init__(self, d_model: int, d_ff: int, device=None, dtype=None, operations=None) -> None: + super().__init__() + self.activation = nn.GELU() + self.layer1 = operations.Linear(d_model, d_ff, bias=False, device=device, dtype=dtype) + self.layer2 = operations.Linear(d_ff, d_model, bias=False, device=device, dtype=dtype) + + self._layer_id = None + self._dim = d_model + self._hidden_dim = d_ff + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.layer1(x) + + x = self.activation(x) + x = self.layer2(x) + return x + + +def torch_attention_op(q_B_S_H_D: torch.Tensor, k_B_S_H_D: torch.Tensor, v_B_S_H_D: torch.Tensor) -> torch.Tensor: + """Computes multi-head attention using PyTorch's native implementation. + + This function provides a PyTorch backend alternative to Transformer Engine's attention operation. + It rearranges the input tensors to match PyTorch's expected format, computes scaled dot-product + attention, and rearranges the output back to the original format. + + The input tensor names use the following dimension conventions: + + - B: batch size + - S: sequence length + - H: number of attention heads + - D: head dimension + + Args: + q_B_S_H_D: Query tensor with shape (batch, seq_len, n_heads, head_dim) + k_B_S_H_D: Key tensor with shape (batch, seq_len, n_heads, head_dim) + v_B_S_H_D: Value tensor with shape (batch, seq_len, n_heads, head_dim) + + Returns: + Attention output tensor with shape (batch, seq_len, n_heads * head_dim) + """ + in_q_shape = q_B_S_H_D.shape + in_k_shape = k_B_S_H_D.shape + q_B_H_S_D = rearrange(q_B_S_H_D, "b ... h k -> b h ... k").view(in_q_shape[0], in_q_shape[-2], -1, in_q_shape[-1]) + k_B_H_S_D = rearrange(k_B_S_H_D, "b ... h v -> b h ... v").view(in_k_shape[0], in_k_shape[-2], -1, in_k_shape[-1]) + v_B_H_S_D = rearrange(v_B_S_H_D, "b ... h v -> b h ... v").view(in_k_shape[0], in_k_shape[-2], -1, in_k_shape[-1]) + result_B_S_HD = rearrange( + optimized_attention(q_B_H_S_D, k_B_H_S_D, v_B_H_S_D, in_q_shape[-2], skip_reshape=True, skip_output_reshape=True), "b h ... l -> b ... (h l)" + ) + + return result_B_S_HD + + +class Attention(nn.Module): + """ + A flexible attention module supporting both self-attention and cross-attention mechanisms. + + This module implements a multi-head attention layer that can operate in either self-attention + or cross-attention mode. The mode is determined by whether a context dimension is provided. + The implementation uses scaled dot-product attention and supports optional bias terms and + dropout regularization. + + Args: + query_dim (int): The dimensionality of the query vectors. + context_dim (int, optional): The dimensionality of the context (key/value) vectors. + If None, the module operates in self-attention mode using query_dim. Default: None + n_heads (int, optional): Number of attention heads for multi-head attention. Default: 8 + head_dim (int, optional): The dimension of each attention head. Default: 64 + dropout (float, optional): Dropout probability applied to the output. Default: 0.0 + qkv_format (str, optional): Format specification for QKV tensors. Default: "bshd" + backend (str, optional): Backend to use for the attention operation. Default: "transformer_engine" + + Examples: + >>> # Self-attention with 512 dimensions and 8 heads + >>> self_attn = Attention(query_dim=512) + >>> x = torch.randn(32, 16, 512) # (batch_size, seq_len, dim) + >>> out = self_attn(x) # (32, 16, 512) + + >>> # Cross-attention + >>> cross_attn = Attention(query_dim=512, context_dim=256) + >>> query = torch.randn(32, 16, 512) + >>> context = torch.randn(32, 8, 256) + >>> out = cross_attn(query, context) # (32, 16, 512) + """ + + def __init__( + self, + query_dim: int, + context_dim: Optional[int] = None, + n_heads: int = 8, + head_dim: int = 64, + dropout: float = 0.0, + device=None, + dtype=None, + operations=None, + ) -> None: + super().__init__() + logging.debug( + f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using " + f"{n_heads} heads with a dimension of {head_dim}." + ) + self.is_selfattn = context_dim is None # self attention + + context_dim = query_dim if context_dim is None else context_dim + inner_dim = head_dim * n_heads + + self.n_heads = n_heads + self.head_dim = head_dim + self.query_dim = query_dim + self.context_dim = context_dim + + self.q_proj = operations.Linear(query_dim, inner_dim, bias=False, device=device, dtype=dtype) + self.q_norm = operations.RMSNorm(self.head_dim, eps=1e-6, device=device, dtype=dtype) + + self.k_proj = operations.Linear(context_dim, inner_dim, bias=False, device=device, dtype=dtype) + self.k_norm = operations.RMSNorm(self.head_dim, eps=1e-6, device=device, dtype=dtype) + + self.v_proj = operations.Linear(context_dim, inner_dim, bias=False, device=device, dtype=dtype) + self.v_norm = nn.Identity() + + self.output_proj = operations.Linear(inner_dim, query_dim, bias=False, device=device, dtype=dtype) + self.output_dropout = nn.Dropout(dropout) if dropout > 1e-4 else nn.Identity() + + self.attn_op = torch_attention_op + + self._query_dim = query_dim + self._context_dim = context_dim + self._inner_dim = inner_dim + + def compute_qkv( + self, + x: torch.Tensor, + context: Optional[torch.Tensor] = None, + rope_emb: Optional[torch.Tensor] = None, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + q = self.q_proj(x) + context = x if context is None else context + k = self.k_proj(context) + v = self.v_proj(context) + q, k, v = map( + lambda t: rearrange(t, "b ... (h d) -> b ... h d", h=self.n_heads, d=self.head_dim), + (q, k, v), + ) + + def apply_norm_and_rotary_pos_emb( + q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, rope_emb: Optional[torch.Tensor] + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + q = self.q_norm(q) + k = self.k_norm(k) + v = self.v_norm(v) + if self.is_selfattn and rope_emb is not None: # only apply to self-attention! + q = apply_rotary_pos_emb(q, rope_emb) + k = apply_rotary_pos_emb(k, rope_emb) + return q, k, v + + q, k, v = apply_norm_and_rotary_pos_emb(q, k, v, rope_emb) + + return q, k, v + + def compute_attention(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor: + result = self.attn_op(q, k, v) # [B, S, H, D] + return self.output_dropout(self.output_proj(result)) + + def forward( + self, + x: torch.Tensor, + context: Optional[torch.Tensor] = None, + rope_emb: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """ + Args: + x (Tensor): The query tensor of shape [B, Mq, K] + context (Optional[Tensor]): The key tensor of shape [B, Mk, K] or use x as context [self attention] if None + """ + q, k, v = self.compute_qkv(x, context, rope_emb=rope_emb) + return self.compute_attention(q, k, v) + + +class Timesteps(nn.Module): + def __init__(self, num_channels: int): + super().__init__() + self.num_channels = num_channels + + def forward(self, timesteps_B_T: torch.Tensor) -> torch.Tensor: + assert timesteps_B_T.ndim == 2, f"Expected 2D input, got {timesteps_B_T.ndim}" + timesteps = timesteps_B_T.flatten().float() + half_dim = self.num_channels // 2 + exponent = -math.log(10000) * torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) + exponent = exponent / (half_dim - 0.0) + + emb = torch.exp(exponent) + emb = timesteps[:, None].float() * emb[None, :] + + sin_emb = torch.sin(emb) + cos_emb = torch.cos(emb) + emb = torch.cat([cos_emb, sin_emb], dim=-1) + + return rearrange(emb, "(b t) d -> b t d", b=timesteps_B_T.shape[0], t=timesteps_B_T.shape[1]) + + +class TimestepEmbedding(nn.Module): + def __init__(self, in_features: int, out_features: int, use_adaln_lora: bool = False, device=None, dtype=None, operations=None): + super().__init__() + logging.debug( + f"Using AdaLN LoRA Flag: {use_adaln_lora}. We enable bias if no AdaLN LoRA for backward compatibility." + ) + self.in_dim = in_features + self.out_dim = out_features + self.linear_1 = operations.Linear(in_features, out_features, bias=not use_adaln_lora, device=device, dtype=dtype) + self.activation = nn.SiLU() + self.use_adaln_lora = use_adaln_lora + if use_adaln_lora: + self.linear_2 = operations.Linear(out_features, 3 * out_features, bias=False, device=device, dtype=dtype) + else: + self.linear_2 = operations.Linear(out_features, out_features, bias=False, device=device, dtype=dtype) + + def forward(self, sample: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + emb = self.linear_1(sample) + emb = self.activation(emb) + emb = self.linear_2(emb) + + if self.use_adaln_lora: + adaln_lora_B_T_3D = emb + emb_B_T_D = sample + else: + adaln_lora_B_T_3D = None + emb_B_T_D = emb + + return emb_B_T_D, adaln_lora_B_T_3D + + +class PatchEmbed(nn.Module): + """ + PatchEmbed is a module for embedding patches from an input tensor by applying either 3D or 2D convolutional layers, + depending on the . This module can process inputs with temporal (video) and spatial (image) dimensions, + making it suitable for video and image processing tasks. It supports dividing the input into patches + and embedding each patch into a vector of size `out_channels`. + + Parameters: + - spatial_patch_size (int): The size of each spatial patch. + - temporal_patch_size (int): The size of each temporal patch. + - in_channels (int): Number of input channels. Default: 3. + - out_channels (int): The dimension of the embedding vector for each patch. Default: 768. + - bias (bool): If True, adds a learnable bias to the output of the convolutional layers. Default: True. + """ + + def __init__( + self, + spatial_patch_size: int, + temporal_patch_size: int, + in_channels: int = 3, + out_channels: int = 768, + device=None, dtype=None, operations=None + ): + super().__init__() + self.spatial_patch_size = spatial_patch_size + self.temporal_patch_size = temporal_patch_size + + self.proj = nn.Sequential( + Rearrange( + "b c (t r) (h m) (w n) -> b t h w (c r m n)", + r=temporal_patch_size, + m=spatial_patch_size, + n=spatial_patch_size, + ), + operations.Linear( + in_channels * spatial_patch_size * spatial_patch_size * temporal_patch_size, out_channels, bias=False, device=device, dtype=dtype + ), + ) + self.dim = in_channels * spatial_patch_size * spatial_patch_size * temporal_patch_size + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Forward pass of the PatchEmbed module. + + Parameters: + - x (torch.Tensor): The input tensor of shape (B, C, T, H, W) where + B is the batch size, + C is the number of channels, + T is the temporal dimension, + H is the height, and + W is the width of the input. + + Returns: + - torch.Tensor: The embedded patches as a tensor, with shape b t h w c. + """ + assert x.dim() == 5 + _, _, T, H, W = x.shape + assert ( + H % self.spatial_patch_size == 0 and W % self.spatial_patch_size == 0 + ), f"H,W {(H, W)} should be divisible by spatial_patch_size {self.spatial_patch_size}" + assert T % self.temporal_patch_size == 0 + x = self.proj(x) + return x + + +class FinalLayer(nn.Module): + """ + The final layer of video DiT. + """ + + def __init__( + self, + hidden_size: int, + spatial_patch_size: int, + temporal_patch_size: int, + out_channels: int, + use_adaln_lora: bool = False, + adaln_lora_dim: int = 256, + device=None, dtype=None, operations=None + ): + super().__init__() + self.layer_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.linear = operations.Linear( + hidden_size, spatial_patch_size * spatial_patch_size * temporal_patch_size * out_channels, bias=False, device=device, dtype=dtype + ) + self.hidden_size = hidden_size + self.n_adaln_chunks = 2 + self.use_adaln_lora = use_adaln_lora + self.adaln_lora_dim = adaln_lora_dim + if use_adaln_lora: + self.adaln_modulation = nn.Sequential( + nn.SiLU(), + operations.Linear(hidden_size, adaln_lora_dim, bias=False, device=device, dtype=dtype), + operations.Linear(adaln_lora_dim, self.n_adaln_chunks * hidden_size, bias=False, device=device, dtype=dtype), + ) + else: + self.adaln_modulation = nn.Sequential( + nn.SiLU(), operations.Linear(hidden_size, self.n_adaln_chunks * hidden_size, bias=False, device=device, dtype=dtype) + ) + + def forward( + self, + x_B_T_H_W_D: torch.Tensor, + emb_B_T_D: torch.Tensor, + adaln_lora_B_T_3D: Optional[torch.Tensor] = None, + ): + if self.use_adaln_lora: + assert adaln_lora_B_T_3D is not None + shift_B_T_D, scale_B_T_D = ( + self.adaln_modulation(emb_B_T_D) + adaln_lora_B_T_3D[:, :, : 2 * self.hidden_size] + ).chunk(2, dim=-1) + else: + shift_B_T_D, scale_B_T_D = self.adaln_modulation(emb_B_T_D).chunk(2, dim=-1) + + shift_B_T_1_1_D, scale_B_T_1_1_D = rearrange(shift_B_T_D, "b t d -> b t 1 1 d"), rearrange( + scale_B_T_D, "b t d -> b t 1 1 d" + ) + + def _fn( + _x_B_T_H_W_D: torch.Tensor, + _norm_layer: nn.Module, + _scale_B_T_1_1_D: torch.Tensor, + _shift_B_T_1_1_D: torch.Tensor, + ) -> torch.Tensor: + return _norm_layer(_x_B_T_H_W_D) * (1 + _scale_B_T_1_1_D) + _shift_B_T_1_1_D + + x_B_T_H_W_D = _fn(x_B_T_H_W_D, self.layer_norm, scale_B_T_1_1_D, shift_B_T_1_1_D) + x_B_T_H_W_O = self.linear(x_B_T_H_W_D) + return x_B_T_H_W_O + + +class Block(nn.Module): + """ + A transformer block that combines self-attention, cross-attention and MLP layers with AdaLN modulation. + Each component (self-attention, cross-attention, MLP) has its own layer normalization and AdaLN modulation. + + Parameters: + x_dim (int): Dimension of input features + context_dim (int): Dimension of context features for cross-attention + num_heads (int): Number of attention heads + mlp_ratio (float): Multiplier for MLP hidden dimension. Default: 4.0 + use_adaln_lora (bool): Whether to use AdaLN-LoRA modulation. Default: False + adaln_lora_dim (int): Hidden dimension for AdaLN-LoRA layers. Default: 256 + + The block applies the following sequence: + 1. Self-attention with AdaLN modulation + 2. Cross-attention with AdaLN modulation + 3. MLP with AdaLN modulation + + Each component uses skip connections and layer normalization. + """ + + def __init__( + self, + x_dim: int, + context_dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + use_adaln_lora: bool = False, + adaln_lora_dim: int = 256, + device=None, + dtype=None, + operations=None, + ): + super().__init__() + self.x_dim = x_dim + self.layer_norm_self_attn = operations.LayerNorm(x_dim, elementwise_affine=False, eps=1e-6, device=device, dtype=dtype) + self.self_attn = Attention(x_dim, None, num_heads, x_dim // num_heads, device=device, dtype=dtype, operations=operations) + + self.layer_norm_cross_attn = operations.LayerNorm(x_dim, elementwise_affine=False, eps=1e-6, device=device, dtype=dtype) + self.cross_attn = Attention( + x_dim, context_dim, num_heads, x_dim // num_heads, device=device, dtype=dtype, operations=operations + ) + + self.layer_norm_mlp = operations.LayerNorm(x_dim, elementwise_affine=False, eps=1e-6, device=device, dtype=dtype) + self.mlp = GPT2FeedForward(x_dim, int(x_dim * mlp_ratio), device=device, dtype=dtype, operations=operations) + + self.use_adaln_lora = use_adaln_lora + if self.use_adaln_lora: + self.adaln_modulation_self_attn = nn.Sequential( + nn.SiLU(), + operations.Linear(x_dim, adaln_lora_dim, bias=False, device=device, dtype=dtype), + operations.Linear(adaln_lora_dim, 3 * x_dim, bias=False, device=device, dtype=dtype), + ) + self.adaln_modulation_cross_attn = nn.Sequential( + nn.SiLU(), + operations.Linear(x_dim, adaln_lora_dim, bias=False, device=device, dtype=dtype), + operations.Linear(adaln_lora_dim, 3 * x_dim, bias=False, device=device, dtype=dtype), + ) + self.adaln_modulation_mlp = nn.Sequential( + nn.SiLU(), + operations.Linear(x_dim, adaln_lora_dim, bias=False, device=device, dtype=dtype), + operations.Linear(adaln_lora_dim, 3 * x_dim, bias=False, device=device, dtype=dtype), + ) + else: + self.adaln_modulation_self_attn = nn.Sequential(nn.SiLU(), operations.Linear(x_dim, 3 * x_dim, bias=False, device=device, dtype=dtype)) + self.adaln_modulation_cross_attn = nn.Sequential(nn.SiLU(), operations.Linear(x_dim, 3 * x_dim, bias=False, device=device, dtype=dtype)) + self.adaln_modulation_mlp = nn.Sequential(nn.SiLU(), operations.Linear(x_dim, 3 * x_dim, bias=False, device=device, dtype=dtype)) + + def forward( + self, + x_B_T_H_W_D: torch.Tensor, + emb_B_T_D: torch.Tensor, + crossattn_emb: torch.Tensor, + rope_emb_L_1_1_D: Optional[torch.Tensor] = None, + adaln_lora_B_T_3D: Optional[torch.Tensor] = None, + extra_per_block_pos_emb: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if extra_per_block_pos_emb is not None: + x_B_T_H_W_D = x_B_T_H_W_D + extra_per_block_pos_emb + + if self.use_adaln_lora: + shift_self_attn_B_T_D, scale_self_attn_B_T_D, gate_self_attn_B_T_D = ( + self.adaln_modulation_self_attn(emb_B_T_D) + adaln_lora_B_T_3D + ).chunk(3, dim=-1) + shift_cross_attn_B_T_D, scale_cross_attn_B_T_D, gate_cross_attn_B_T_D = ( + self.adaln_modulation_cross_attn(emb_B_T_D) + adaln_lora_B_T_3D + ).chunk(3, dim=-1) + shift_mlp_B_T_D, scale_mlp_B_T_D, gate_mlp_B_T_D = ( + self.adaln_modulation_mlp(emb_B_T_D) + adaln_lora_B_T_3D + ).chunk(3, dim=-1) + else: + shift_self_attn_B_T_D, scale_self_attn_B_T_D, gate_self_attn_B_T_D = self.adaln_modulation_self_attn( + emb_B_T_D + ).chunk(3, dim=-1) + shift_cross_attn_B_T_D, scale_cross_attn_B_T_D, gate_cross_attn_B_T_D = self.adaln_modulation_cross_attn( + emb_B_T_D + ).chunk(3, dim=-1) + shift_mlp_B_T_D, scale_mlp_B_T_D, gate_mlp_B_T_D = self.adaln_modulation_mlp(emb_B_T_D).chunk(3, dim=-1) + + # Reshape tensors from (B, T, D) to (B, T, 1, 1, D) for broadcasting + shift_self_attn_B_T_1_1_D = rearrange(shift_self_attn_B_T_D, "b t d -> b t 1 1 d") + scale_self_attn_B_T_1_1_D = rearrange(scale_self_attn_B_T_D, "b t d -> b t 1 1 d") + gate_self_attn_B_T_1_1_D = rearrange(gate_self_attn_B_T_D, "b t d -> b t 1 1 d") + + shift_cross_attn_B_T_1_1_D = rearrange(shift_cross_attn_B_T_D, "b t d -> b t 1 1 d") + scale_cross_attn_B_T_1_1_D = rearrange(scale_cross_attn_B_T_D, "b t d -> b t 1 1 d") + gate_cross_attn_B_T_1_1_D = rearrange(gate_cross_attn_B_T_D, "b t d -> b t 1 1 d") + + shift_mlp_B_T_1_1_D = rearrange(shift_mlp_B_T_D, "b t d -> b t 1 1 d") + scale_mlp_B_T_1_1_D = rearrange(scale_mlp_B_T_D, "b t d -> b t 1 1 d") + gate_mlp_B_T_1_1_D = rearrange(gate_mlp_B_T_D, "b t d -> b t 1 1 d") + + B, T, H, W, D = x_B_T_H_W_D.shape + + def _fn(_x_B_T_H_W_D, _norm_layer, _scale_B_T_1_1_D, _shift_B_T_1_1_D): + return _norm_layer(_x_B_T_H_W_D) * (1 + _scale_B_T_1_1_D) + _shift_B_T_1_1_D + + normalized_x_B_T_H_W_D = _fn( + x_B_T_H_W_D, + self.layer_norm_self_attn, + scale_self_attn_B_T_1_1_D, + shift_self_attn_B_T_1_1_D, + ) + result_B_T_H_W_D = rearrange( + self.self_attn( + # normalized_x_B_T_HW_D, + rearrange(normalized_x_B_T_H_W_D, "b t h w d -> b (t h w) d"), + None, + rope_emb=rope_emb_L_1_1_D, + ), + "b (t h w) d -> b t h w d", + t=T, + h=H, + w=W, + ) + x_B_T_H_W_D = x_B_T_H_W_D + gate_self_attn_B_T_1_1_D * result_B_T_H_W_D + + def _x_fn( + _x_B_T_H_W_D: torch.Tensor, + layer_norm_cross_attn: Callable, + _scale_cross_attn_B_T_1_1_D: torch.Tensor, + _shift_cross_attn_B_T_1_1_D: torch.Tensor, + ) -> torch.Tensor: + _normalized_x_B_T_H_W_D = _fn( + _x_B_T_H_W_D, layer_norm_cross_attn, _scale_cross_attn_B_T_1_1_D, _shift_cross_attn_B_T_1_1_D + ) + _result_B_T_H_W_D = rearrange( + self.cross_attn( + rearrange(_normalized_x_B_T_H_W_D, "b t h w d -> b (t h w) d"), + crossattn_emb, + rope_emb=rope_emb_L_1_1_D, + ), + "b (t h w) d -> b t h w d", + t=T, + h=H, + w=W, + ) + return _result_B_T_H_W_D + + result_B_T_H_W_D = _x_fn( + x_B_T_H_W_D, + self.layer_norm_cross_attn, + scale_cross_attn_B_T_1_1_D, + shift_cross_attn_B_T_1_1_D, + ) + x_B_T_H_W_D = result_B_T_H_W_D * gate_cross_attn_B_T_1_1_D + x_B_T_H_W_D + + normalized_x_B_T_H_W_D = _fn( + x_B_T_H_W_D, + self.layer_norm_mlp, + scale_mlp_B_T_1_1_D, + shift_mlp_B_T_1_1_D, + ) + result_B_T_H_W_D = self.mlp(normalized_x_B_T_H_W_D) + x_B_T_H_W_D = x_B_T_H_W_D + gate_mlp_B_T_1_1_D * result_B_T_H_W_D + return x_B_T_H_W_D + + +class MiniTrainDIT(nn.Module): + """ + A clean impl of DIT that can load and reproduce the training results of the original DIT model in~(cosmos 1) + A general implementation of adaln-modulated VIT-like~(DiT) transformer for video processing. + + Args: + max_img_h (int): Maximum height of the input images. + max_img_w (int): Maximum width of the input images. + max_frames (int): Maximum number of frames in the video sequence. + in_channels (int): Number of input channels (e.g., RGB channels for color images). + out_channels (int): Number of output channels. + patch_spatial (tuple): Spatial resolution of patches for input processing. + patch_temporal (int): Temporal resolution of patches for input processing. + concat_padding_mask (bool): If True, includes a mask channel in the input to handle padding. + model_channels (int): Base number of channels used throughout the model. + num_blocks (int): Number of transformer blocks. + num_heads (int): Number of heads in the multi-head attention layers. + mlp_ratio (float): Expansion ratio for MLP blocks. + crossattn_emb_channels (int): Number of embedding channels for cross-attention. + pos_emb_cls (str): Type of positional embeddings. + pos_emb_learnable (bool): Whether positional embeddings are learnable. + pos_emb_interpolation (str): Method for interpolating positional embeddings. + min_fps (int): Minimum frames per second. + max_fps (int): Maximum frames per second. + use_adaln_lora (bool): Whether to use AdaLN-LoRA. + adaln_lora_dim (int): Dimension for AdaLN-LoRA. + rope_h_extrapolation_ratio (float): Height extrapolation ratio for RoPE. + rope_w_extrapolation_ratio (float): Width extrapolation ratio for RoPE. + rope_t_extrapolation_ratio (float): Temporal extrapolation ratio for RoPE. + extra_per_block_abs_pos_emb (bool): Whether to use extra per-block absolute positional embeddings. + extra_h_extrapolation_ratio (float): Height extrapolation ratio for extra embeddings. + extra_w_extrapolation_ratio (float): Width extrapolation ratio for extra embeddings. + extra_t_extrapolation_ratio (float): Temporal extrapolation ratio for extra embeddings. + """ + + def __init__( + self, + max_img_h: int, + max_img_w: int, + max_frames: int, + in_channels: int, + out_channels: int, + patch_spatial: int, # tuple, + patch_temporal: int, + concat_padding_mask: bool = True, + # attention settings + model_channels: int = 768, + num_blocks: int = 10, + num_heads: int = 16, + mlp_ratio: float = 4.0, + # cross attention settings + crossattn_emb_channels: int = 1024, + # positional embedding settings + pos_emb_cls: str = "sincos", + pos_emb_learnable: bool = False, + pos_emb_interpolation: str = "crop", + min_fps: int = 1, + max_fps: int = 30, + use_adaln_lora: bool = False, + adaln_lora_dim: int = 256, + rope_h_extrapolation_ratio: float = 1.0, + rope_w_extrapolation_ratio: float = 1.0, + rope_t_extrapolation_ratio: float = 1.0, + extra_per_block_abs_pos_emb: bool = False, + extra_h_extrapolation_ratio: float = 1.0, + extra_w_extrapolation_ratio: float = 1.0, + extra_t_extrapolation_ratio: float = 1.0, + rope_enable_fps_modulation: bool = True, + image_model=None, + device=None, + dtype=None, + operations=None, + ) -> None: + super().__init__() + self.dtype = dtype + self.max_img_h = max_img_h + self.max_img_w = max_img_w + self.max_frames = max_frames + self.in_channels = in_channels + self.out_channels = out_channels + self.patch_spatial = patch_spatial + self.patch_temporal = patch_temporal + self.num_heads = num_heads + self.num_blocks = num_blocks + self.model_channels = model_channels + self.concat_padding_mask = concat_padding_mask + # positional embedding settings + self.pos_emb_cls = pos_emb_cls + self.pos_emb_learnable = pos_emb_learnable + self.pos_emb_interpolation = pos_emb_interpolation + self.min_fps = min_fps + self.max_fps = max_fps + self.rope_h_extrapolation_ratio = rope_h_extrapolation_ratio + self.rope_w_extrapolation_ratio = rope_w_extrapolation_ratio + self.rope_t_extrapolation_ratio = rope_t_extrapolation_ratio + self.extra_per_block_abs_pos_emb = extra_per_block_abs_pos_emb + self.extra_h_extrapolation_ratio = extra_h_extrapolation_ratio + self.extra_w_extrapolation_ratio = extra_w_extrapolation_ratio + self.extra_t_extrapolation_ratio = extra_t_extrapolation_ratio + self.rope_enable_fps_modulation = rope_enable_fps_modulation + + self.build_pos_embed(device=device, dtype=dtype) + self.use_adaln_lora = use_adaln_lora + self.adaln_lora_dim = adaln_lora_dim + self.t_embedder = nn.Sequential( + Timesteps(model_channels), + TimestepEmbedding(model_channels, model_channels, use_adaln_lora=use_adaln_lora, device=device, dtype=dtype, operations=operations,), + ) + + in_channels = in_channels + 1 if concat_padding_mask else in_channels + self.x_embedder = PatchEmbed( + spatial_patch_size=patch_spatial, + temporal_patch_size=patch_temporal, + in_channels=in_channels, + out_channels=model_channels, + device=device, dtype=dtype, operations=operations, + ) + + self.blocks = nn.ModuleList( + [ + Block( + x_dim=model_channels, + context_dim=crossattn_emb_channels, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + use_adaln_lora=use_adaln_lora, + adaln_lora_dim=adaln_lora_dim, + device=device, dtype=dtype, operations=operations, + ) + for _ in range(num_blocks) + ] + ) + + self.final_layer = FinalLayer( + hidden_size=self.model_channels, + spatial_patch_size=self.patch_spatial, + temporal_patch_size=self.patch_temporal, + out_channels=self.out_channels, + use_adaln_lora=self.use_adaln_lora, + adaln_lora_dim=self.adaln_lora_dim, + device=device, dtype=dtype, operations=operations, + ) + + self.t_embedding_norm = operations.RMSNorm(model_channels, eps=1e-6, device=device, dtype=dtype) + + def build_pos_embed(self, device=None, dtype=None) -> None: + if self.pos_emb_cls == "rope3d": + cls_type = VideoRopePosition3DEmb + else: + raise ValueError(f"Unknown pos_emb_cls {self.pos_emb_cls}") + + logging.debug(f"Building positional embedding with {self.pos_emb_cls} class, impl {cls_type}") + kwargs = dict( + model_channels=self.model_channels, + len_h=self.max_img_h // self.patch_spatial, + len_w=self.max_img_w // self.patch_spatial, + len_t=self.max_frames // self.patch_temporal, + max_fps=self.max_fps, + min_fps=self.min_fps, + is_learnable=self.pos_emb_learnable, + interpolation=self.pos_emb_interpolation, + head_dim=self.model_channels // self.num_heads, + h_extrapolation_ratio=self.rope_h_extrapolation_ratio, + w_extrapolation_ratio=self.rope_w_extrapolation_ratio, + t_extrapolation_ratio=self.rope_t_extrapolation_ratio, + enable_fps_modulation=self.rope_enable_fps_modulation, + device=device, + ) + self.pos_embedder = cls_type( + **kwargs, # type: ignore + ) + + if self.extra_per_block_abs_pos_emb: + kwargs["h_extrapolation_ratio"] = self.extra_h_extrapolation_ratio + kwargs["w_extrapolation_ratio"] = self.extra_w_extrapolation_ratio + kwargs["t_extrapolation_ratio"] = self.extra_t_extrapolation_ratio + kwargs["device"] = device + kwargs["dtype"] = dtype + self.extra_pos_embedder = LearnablePosEmbAxis( + **kwargs, # type: ignore + ) + + def prepare_embedded_sequence( + self, + x_B_C_T_H_W: torch.Tensor, + fps: Optional[torch.Tensor] = None, + padding_mask: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]: + """ + Prepares an embedded sequence tensor by applying positional embeddings and handling padding masks. + + Args: + x_B_C_T_H_W (torch.Tensor): video + fps (Optional[torch.Tensor]): Frames per second tensor to be used for positional embedding when required. + If None, a default value (`self.base_fps`) will be used. + padding_mask (Optional[torch.Tensor]): current it is not used + + Returns: + Tuple[torch.Tensor, Optional[torch.Tensor]]: + - A tensor of shape (B, T, H, W, D) with the embedded sequence. + - An optional positional embedding tensor, returned only if the positional embedding class + (`self.pos_emb_cls`) includes 'rope'. Otherwise, None. + + Notes: + - If `self.concat_padding_mask` is True, a padding mask channel is concatenated to the input tensor. + - The method of applying positional embeddings depends on the value of `self.pos_emb_cls`. + - If 'rope' is in `self.pos_emb_cls` (case insensitive), the positional embeddings are generated using + the `self.pos_embedder` with the shape [T, H, W]. + - If "fps_aware" is in `self.pos_emb_cls`, the positional embeddings are generated using the + `self.pos_embedder` with the fps tensor. + - Otherwise, the positional embeddings are generated without considering fps. + """ + if self.concat_padding_mask: + if padding_mask is None: + padding_mask = torch.zeros(x_B_C_T_H_W.shape[0], 1, x_B_C_T_H_W.shape[3], x_B_C_T_H_W.shape[4], dtype=x_B_C_T_H_W.dtype, device=x_B_C_T_H_W.device) + else: + padding_mask = transforms.functional.resize( + padding_mask, list(x_B_C_T_H_W.shape[-2:]), interpolation=transforms.InterpolationMode.NEAREST + ) + x_B_C_T_H_W = torch.cat( + [x_B_C_T_H_W, padding_mask.unsqueeze(1).repeat(1, 1, x_B_C_T_H_W.shape[2], 1, 1)], dim=1 + ) + x_B_T_H_W_D = self.x_embedder(x_B_C_T_H_W) + + if self.extra_per_block_abs_pos_emb: + extra_pos_emb = self.extra_pos_embedder(x_B_T_H_W_D, fps=fps, device=x_B_C_T_H_W.device, dtype=x_B_C_T_H_W.dtype) + else: + extra_pos_emb = None + + if "rope" in self.pos_emb_cls.lower(): + return x_B_T_H_W_D, self.pos_embedder(x_B_T_H_W_D, fps=fps, device=x_B_C_T_H_W.device), extra_pos_emb + x_B_T_H_W_D = x_B_T_H_W_D + self.pos_embedder(x_B_T_H_W_D, device=x_B_C_T_H_W.device) # [B, T, H, W, D] + + return x_B_T_H_W_D, None, extra_pos_emb + + def unpatchify(self, x_B_T_H_W_M: torch.Tensor) -> torch.Tensor: + x_B_C_Tt_Hp_Wp = rearrange( + x_B_T_H_W_M, + "B T H W (p1 p2 t C) -> B C (T t) (H p1) (W p2)", + p1=self.patch_spatial, + p2=self.patch_spatial, + t=self.patch_temporal, + ) + return x_B_C_Tt_Hp_Wp + + def forward( + self, + x: torch.Tensor, + timesteps: torch.Tensor, + context: torch.Tensor, + fps: Optional[torch.Tensor] = None, + padding_mask: Optional[torch.Tensor] = None, + **kwargs, + ): + x_B_C_T_H_W = x + timesteps_B_T = timesteps + crossattn_emb = context + """ + Args: + x: (B, C, T, H, W) tensor of spatial-temp inputs + timesteps: (B, ) tensor of timesteps + crossattn_emb: (B, N, D) tensor of cross-attention embeddings + """ + x_B_T_H_W_D, rope_emb_L_1_1_D, extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D = self.prepare_embedded_sequence( + x_B_C_T_H_W, + fps=fps, + padding_mask=padding_mask, + ) + + if timesteps_B_T.ndim == 1: + timesteps_B_T = timesteps_B_T.unsqueeze(1) + t_embedding_B_T_D, adaln_lora_B_T_3D = self.t_embedder[1](self.t_embedder[0](timesteps_B_T).to(x_B_T_H_W_D.dtype)) + t_embedding_B_T_D = self.t_embedding_norm(t_embedding_B_T_D) + + # for logging purpose + affline_scale_log_info = {} + affline_scale_log_info["t_embedding_B_T_D"] = t_embedding_B_T_D.detach() + self.affline_scale_log_info = affline_scale_log_info + self.affline_emb = t_embedding_B_T_D + self.crossattn_emb = crossattn_emb + + if extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D is not None: + assert ( + x_B_T_H_W_D.shape == extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D.shape + ), f"{x_B_T_H_W_D.shape} != {extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D.shape}" + + block_kwargs = { + "rope_emb_L_1_1_D": rope_emb_L_1_1_D.unsqueeze(1).unsqueeze(0), + "adaln_lora_B_T_3D": adaln_lora_B_T_3D, + "extra_per_block_pos_emb": extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D, + } + for block in self.blocks: + x_B_T_H_W_D = block( + x_B_T_H_W_D, + t_embedding_B_T_D, + crossattn_emb, + **block_kwargs, + ) + + x_B_T_H_W_O = self.final_layer(x_B_T_H_W_D, t_embedding_B_T_D, adaln_lora_B_T_3D=adaln_lora_B_T_3D) + x_B_C_Tt_Hp_Wp = self.unpatchify(x_B_T_H_W_O) + return x_B_C_Tt_Hp_Wp diff --git a/comfy/model_base.py b/comfy/model_base.py index e0c2bcaa8..be72ddd17 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -34,6 +34,7 @@ import comfy.ldm.flux.model import comfy.ldm.lightricks.model import comfy.ldm.hunyuan_video.model import comfy.ldm.cosmos.model +import comfy.ldm.cosmos.predict2 import comfy.ldm.lumina.model import comfy.ldm.wan.model import comfy.ldm.hunyuan3d.model @@ -48,6 +49,7 @@ import comfy.ops from enum import Enum from . import utils import comfy.latent_formats +import comfy.model_sampling import math from typing import TYPE_CHECKING if TYPE_CHECKING: @@ -63,38 +65,39 @@ class ModelType(Enum): V_PREDICTION_CONTINUOUS = 7 FLUX = 8 IMG_TO_IMG = 9 - - -from comfy.model_sampling import EPS, V_PREDICTION, EDM, ModelSamplingDiscrete, ModelSamplingContinuousEDM, StableCascadeSampling, ModelSamplingContinuousV + FLOW_COSMOS = 10 def model_sampling(model_config, model_type): - s = ModelSamplingDiscrete + s = comfy.model_sampling.ModelSamplingDiscrete if model_type == ModelType.EPS: - c = EPS + c = comfy.model_sampling.EPS elif model_type == ModelType.V_PREDICTION: - c = V_PREDICTION + c = comfy.model_sampling.V_PREDICTION elif model_type == ModelType.V_PREDICTION_EDM: - c = V_PREDICTION - s = ModelSamplingContinuousEDM + c = comfy.model_sampling.V_PREDICTION + s = comfy.model_sampling.ModelSamplingContinuousEDM elif model_type == ModelType.FLOW: c = comfy.model_sampling.CONST s = comfy.model_sampling.ModelSamplingDiscreteFlow elif model_type == ModelType.STABLE_CASCADE: - c = EPS - s = StableCascadeSampling + c = comfy.model_sampling.EPS + s = comfy.model_sampling.StableCascadeSampling elif model_type == ModelType.EDM: - c = EDM - s = ModelSamplingContinuousEDM + c = comfy.model_sampling.EDM + s = comfy.model_sampling.ModelSamplingContinuousEDM elif model_type == ModelType.V_PREDICTION_CONTINUOUS: - c = V_PREDICTION - s = ModelSamplingContinuousV + c = comfy.model_sampling.V_PREDICTION + s = comfy.model_sampling.ModelSamplingContinuousV elif model_type == ModelType.FLUX: c = comfy.model_sampling.CONST s = comfy.model_sampling.ModelSamplingFlux elif model_type == ModelType.IMG_TO_IMG: c = comfy.model_sampling.IMG_TO_IMG + elif model_type == ModelType.FLOW_COSMOS: + c = comfy.model_sampling.COSMOS_RFLOW + s = comfy.model_sampling.ModelSamplingCosmosRFlow class ModelSampling(s, c): pass @@ -998,6 +1001,22 @@ class CosmosVideo(BaseModel): latent_image = self.model_sampling.calculate_input(torch.tensor([sigma_noise_augmentation], device=latent_image.device, dtype=latent_image.dtype), latent_image) return latent_image * ((sigma ** 2 + self.model_sampling.sigma_data ** 2) ** 0.5) +class CosmosPredict2(BaseModel): + def __init__(self, model_config, model_type=ModelType.FLOW_COSMOS, image_to_video=False, device=None): + super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.cosmos.predict2.MiniTrainDIT) + self.image_to_video = image_to_video + if self.image_to_video: + self.concat_keys = ("mask_inverted",) + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + + out['fps'] = comfy.conds.CONDConstant(kwargs.get("frame_rate", None)) + return out + class Lumina2(BaseModel): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.lumina.model.NextDiT) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 74f539598..b8fef17ad 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -407,6 +407,53 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["text_emb_dim"] = 2048 return dit_config + if '{}blocks.0.mlp.layer1.weight'.format(key_prefix) in state_dict_keys: # Cosmos predict2 + dit_config = {} + dit_config["image_model"] = "cosmos_predict2" + dit_config["max_img_h"] = 240 + dit_config["max_img_w"] = 240 + dit_config["max_frames"] = 128 + concat_padding_mask = True + dit_config["in_channels"] = (state_dict['{}x_embedder.proj.1.weight'.format(key_prefix)].shape[1] // 4) - int(concat_padding_mask) + dit_config["out_channels"] = 16 + dit_config["patch_spatial"] = 2 + dit_config["patch_temporal"] = 1 + dit_config["model_channels"] = state_dict['{}x_embedder.proj.1.weight'.format(key_prefix)].shape[0] + dit_config["concat_padding_mask"] = concat_padding_mask + dit_config["crossattn_emb_channels"] = 1024 + dit_config["pos_emb_cls"] = "rope3d" + dit_config["pos_emb_learnable"] = True + dit_config["pos_emb_interpolation"] = "crop" + dit_config["min_fps"] = 1 + dit_config["max_fps"] = 30 + + dit_config["use_adaln_lora"] = True + dit_config["adaln_lora_dim"] = 256 + if dit_config["model_channels"] == 2048: + dit_config["num_blocks"] = 28 + dit_config["num_heads"] = 16 + elif dit_config["model_channels"] == 5120: + dit_config["num_blocks"] = 36 + dit_config["num_heads"] = 40 + + if dit_config["in_channels"] == 16: + dit_config["extra_per_block_abs_pos_emb"] = False + dit_config["rope_h_extrapolation_ratio"] = 4.0 + dit_config["rope_w_extrapolation_ratio"] = 4.0 + dit_config["rope_t_extrapolation_ratio"] = 1.0 + elif dit_config["in_channels"] == 17: + dit_config["extra_per_block_abs_pos_emb"] = False + dit_config["rope_h_extrapolation_ratio"] = 3.0 + dit_config["rope_w_extrapolation_ratio"] = 3.0 + dit_config["rope_t_extrapolation_ratio"] = 1.0 + + dit_config["extra_h_extrapolation_ratio"] = 1.0 + dit_config["extra_w_extrapolation_ratio"] = 1.0 + dit_config["extra_t_extrapolation_ratio"] = 1.0 + dit_config["rope_enable_fps_modulation"] = False + + return dit_config + if '{}input_blocks.0.0.weight'.format(key_prefix) not in state_dict_keys: return None diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index 7e7291476..b240b7f29 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -77,6 +77,25 @@ class IMG_TO_IMG(X0): def calculate_input(self, sigma, noise): return noise +class COSMOS_RFLOW: + def calculate_input(self, sigma, noise): + sigma = (sigma / (sigma + 1)) + sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) + return noise * (1.0 - sigma) + + def calculate_denoised(self, sigma, model_output, model_input): + sigma = (sigma / (sigma + 1)) + sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + return model_input * (1.0 - sigma) - model_output * sigma + + def noise_scaling(self, sigma, noise, latent_image, max_denoise=False): + sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) + noise = noise * sigma + noise += latent_image + return noise + + def inverse_noise_scaling(self, sigma, latent): + return latent class ModelSamplingDiscrete(torch.nn.Module): def __init__(self, model_config=None, zsnr=None): @@ -350,3 +369,15 @@ class ModelSamplingFlux(torch.nn.Module): if percent >= 1.0: return 0.0 return flux_time_shift(self.shift, 1.0, 1.0 - percent) + + +class ModelSamplingCosmosRFlow(ModelSamplingContinuousEDM): + def timestep(self, sigma): + return sigma / (sigma + 1) + + def sigma(self, timestep): + sigma_max = self.sigma_max + if timestep >= (sigma_max / (sigma_max + 1)): + return sigma_max + + return timestep / (1 - timestep) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index efe2e6b8f..2a213f24a 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -908,6 +908,44 @@ class CosmosI2V(CosmosT2V): out = model_base.CosmosVideo(self, image_to_video=True, device=device) return out +class CosmosT2IPredict2(supported_models_base.BASE): + unet_config = { + "image_model": "cosmos_predict2", + "in_channels": 16, + } + + sampling_settings = { + "sigma_data": 1.0, + "sigma_max": 80.0, + "sigma_min": 0.002, + } + + unet_extra_config = {} + latent_format = latent_formats.Wan21 + + memory_usage_factor = 1.6 #TODO + + supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32] #TODO + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.CosmosPredict2(self, device=device) + return out + + def clip_target(self, state_dict={}): + pref = self.text_encoder_key_prefix[0] + t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref)) + return supported_models_base.ClipTarget(comfy.text_encoders.cosmos.CosmosT5Tokenizer, comfy.text_encoders.cosmos.te(**t5_detect)) + +class CosmosI2VPredict2(CosmosT2IPredict2): + unet_config = { + "image_model": "cosmos_predict2", + "in_channels": 17, + } + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.CosmosPredict2(self, image_to_video=True, device=device) + return out + class Lumina2(supported_models_base.BASE): unet_config = { "image_model": "lumina2", @@ -1139,6 +1177,6 @@ class ACEStep(supported_models_base.BASE): def clip_target(self, state_dict={}): return supported_models_base.ClipTarget(comfy.text_encoders.ace.AceT5Tokenizer, comfy.text_encoders.ace.AceT5Model) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep] models += [SVD_img2vid] From c69af655aa80b3bdeb812321334909cd7f9054f1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 13 Jun 2025 04:30:18 -0700 Subject: [PATCH 0241/1073] Uncap cosmos predict2 res and fix mem estimation. (#8518) --- comfy/ldm/cosmos/position_embedding.py | 13 +++++-------- comfy/supported_models.py | 8 ++++++-- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/comfy/ldm/cosmos/position_embedding.py b/comfy/ldm/cosmos/position_embedding.py index 44197a597..c925811d4 100644 --- a/comfy/ldm/cosmos/position_embedding.py +++ b/comfy/ldm/cosmos/position_embedding.py @@ -72,7 +72,6 @@ class VideoRopePosition3DEmb(VideoPositionEmb): ): del kwargs super().__init__() - self.register_buffer("seq", torch.arange(max(len_h, len_w, len_t), dtype=torch.float, device=device)) self.base_fps = base_fps self.max_h = len_h self.max_w = len_w @@ -134,21 +133,19 @@ class VideoRopePosition3DEmb(VideoPositionEmb): temporal_freqs = 1.0 / (t_theta**self.dim_temporal_range.to(device=device)) B, T, H, W, _ = B_T_H_W_C + seq = torch.arange(max(H, W, T), dtype=torch.float, device=device) uniform_fps = (fps is None) or isinstance(fps, (int, float)) or (fps.min() == fps.max()) assert ( uniform_fps or B == 1 or T == 1 ), "For video batch, batch size should be 1 for non-uniform fps. For image batch, T should be 1" - assert ( - H <= self.max_h and W <= self.max_w - ), f"Input dimensions (H={H}, W={W}) exceed the maximum dimensions (max_h={self.max_h}, max_w={self.max_w})" - half_emb_h = torch.outer(self.seq[:H].to(device=device), h_spatial_freqs) - half_emb_w = torch.outer(self.seq[:W].to(device=device), w_spatial_freqs) + half_emb_h = torch.outer(seq[:H].to(device=device), h_spatial_freqs) + half_emb_w = torch.outer(seq[:W].to(device=device), w_spatial_freqs) # apply sequence scaling in temporal dimension if fps is None or self.enable_fps_modulation is False: # image case - half_emb_t = torch.outer(self.seq[:T].to(device=device), temporal_freqs) + half_emb_t = torch.outer(seq[:T].to(device=device), temporal_freqs) else: - half_emb_t = torch.outer(self.seq[:T].to(device=device) / fps * self.base_fps, temporal_freqs) + half_emb_t = torch.outer(seq[:T].to(device=device) / fps * self.base_fps, temporal_freqs) half_emb_h = torch.stack([torch.cos(half_emb_h), -torch.sin(half_emb_h), torch.sin(half_emb_h), torch.cos(half_emb_h)], dim=-1) half_emb_w = torch.stack([torch.cos(half_emb_w), -torch.sin(half_emb_w), torch.sin(half_emb_w), torch.cos(half_emb_w)], dim=-1) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 2a213f24a..19f25e337 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -923,9 +923,13 @@ class CosmosT2IPredict2(supported_models_base.BASE): unet_extra_config = {} latent_format = latent_formats.Wan21 - memory_usage_factor = 1.6 #TODO + memory_usage_factor = 1.0 - supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32] #TODO + supported_inference_dtypes = [torch.bfloat16, torch.float32] + + def __init__(self, unet_config): + super().__init__(unet_config) + self.memory_usage_factor = (unet_config.get("model_channels", 2048) / 2048) * 0.9 def get_model(self, state_dict, prefix="", device=None): out = model_base.CosmosPredict2(self, device=device) From 5bf69bde3537800886b9574d51c655a232a05873 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 13 Jun 2025 14:47:52 -0700 Subject: [PATCH 0242/1073] Add cosmos_rflow option to ModelSamplingContinuousEDM node. (#8523) This is for the cosmos predict2 model. --- comfy_extras/nodes_model_advanced.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 71a652ffa..ae5d2c563 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -189,7 +189,7 @@ class ModelSamplingContinuousEDM: @classmethod def INPUT_TYPES(s): return {"required": { "model": ("MODEL",), - "sampling": (["v_prediction", "edm", "edm_playground_v2.5", "eps"],), + "sampling": (["v_prediction", "edm", "edm_playground_v2.5", "eps", "cosmos_rflow"],), "sigma_max": ("FLOAT", {"default": 120.0, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}), "sigma_min": ("FLOAT", {"default": 0.002, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}), }} @@ -202,6 +202,7 @@ class ModelSamplingContinuousEDM: def patch(self, model, sampling, sigma_max, sigma_min): m = model.clone() + sampling_base = comfy.model_sampling.ModelSamplingContinuousEDM latent_format = None sigma_data = 1.0 if sampling == "eps": @@ -215,8 +216,11 @@ class ModelSamplingContinuousEDM: sampling_type = comfy.model_sampling.EDM sigma_data = 0.5 latent_format = comfy.latent_formats.SDXL_Playground_2_5() + elif sampling == "cosmos_rflow": + sampling_type = comfy.model_sampling.COSMOS_RFLOW + sampling_base = comfy.model_sampling.ModelSamplingCosmosRFlow - class ModelSamplingAdvanced(comfy.model_sampling.ModelSamplingContinuousEDM, sampling_type): + class ModelSamplingAdvanced(sampling_base, sampling_type): pass model_sampling = ModelSamplingAdvanced(model.model.model_config) From 520eb77b721c4d56cb0e8a29cf452b0f3a4d857a Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sat, 14 Jun 2025 07:25:59 +0800 Subject: [PATCH 0243/1073] LoRA Trainer: LoRA training node in weight adapter scheme (#8446) --- comfy/comfy_types/node_typing.py | 2 + comfy/ldm/modules/attention.py | 6 +- comfy/model_patcher.py | 19 +- comfy/sd.py | 23 +- comfy/weight_adapter/__init__.py | 8 +- comfy/weight_adapter/base.py | 35 +- comfy/weight_adapter/lora.py | 66 +- comfy_extras/nodes_train.py | 705 ++++++++++++++++++++++ execution.py | 28 +- folder_paths.py | 29 + nodes.py | 1 + tests-unit/folder_paths_test/misc_test.py | 51 ++ 12 files changed, 949 insertions(+), 24 deletions(-) create mode 100644 comfy_extras/nodes_train.py create mode 100644 tests-unit/folder_paths_test/misc_test.py diff --git a/comfy/comfy_types/node_typing.py b/comfy/comfy_types/node_typing.py index 470eb9fdb..071b98332 100644 --- a/comfy/comfy_types/node_typing.py +++ b/comfy/comfy_types/node_typing.py @@ -37,6 +37,8 @@ class IO(StrEnum): CONTROL_NET = "CONTROL_NET" VAE = "VAE" MODEL = "MODEL" + LORA_MODEL = "LORA_MODEL" + LOSS_MAP = "LOSS_MAP" CLIP_VISION = "CLIP_VISION" CLIP_VISION_OUTPUT = "CLIP_VISION_OUTPUT" STYLE_MODEL = "STYLE_MODEL" diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 2cb77d85d..35d2270ee 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -753,7 +753,7 @@ class BasicTransformerBlock(nn.Module): for p in patch: n = p(n, extra_options) - x += n + x = n + x if "middle_patch" in transformer_patches: patch = transformer_patches["middle_patch"] for p in patch: @@ -793,12 +793,12 @@ class BasicTransformerBlock(nn.Module): for p in patch: n = p(n, extra_options) - x += n + x = n + x if self.is_res: x_skip = x x = self.ff(self.norm3(x)) if self.is_res: - x += x_skip + x = x_skip + x return x diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index b7cb12dfc..b1d6d4395 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -17,23 +17,26 @@ """ from __future__ import annotations -from typing import Optional, Callable -import torch + +import collections import copy import inspect import logging -import uuid -import collections import math +import uuid +from typing import Callable, Optional + +import torch -import comfy.utils import comfy.float -import comfy.model_management -import comfy.lora import comfy.hooks +import comfy.lora +import comfy.model_management import comfy.patcher_extension -from comfy.patcher_extension import CallbacksMP, WrappersMP, PatcherInjection +import comfy.utils from comfy.comfy_types import UnetWrapperFunction +from comfy.patcher_extension import CallbacksMP, PatcherInjection, WrappersMP + def string_to_seed(data): crc = 0xFFFFFFFF diff --git a/comfy/sd.py b/comfy/sd.py index e98a3aa87..cd13ab5f0 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -1081,7 +1081,28 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c return (model_patcher, clip, vae, clipvision) -def load_diffusion_model_state_dict(sd, model_options={}): #load unet in diffusers or regular format +def load_diffusion_model_state_dict(sd, model_options={}): + """ + Loads a UNet diffusion model from a state dictionary, supporting both diffusers and regular formats. + + Args: + sd (dict): State dictionary containing model weights and configuration + model_options (dict, optional): Additional options for model loading. Supports: + - dtype: Override model data type + - custom_operations: Custom model operations + - fp8_optimizations: Enable FP8 optimizations + + Returns: + ModelPatcher: A wrapped model instance that handles device management and weight loading. + Returns None if the model configuration cannot be detected. + + The function: + 1. Detects and handles different model formats (regular, diffusers, mmdit) + 2. Configures model dtype based on parameters and device capabilities + 3. Handles weight conversion and device placement + 4. Manages model optimization settings + 5. Loads weights and returns a device-managed model instance + """ dtype = model_options.get("dtype", None) #Allow loading unets from checkpoint files diff --git a/comfy/weight_adapter/__init__.py b/comfy/weight_adapter/__init__.py index d2a1d0151..560b82be3 100644 --- a/comfy/weight_adapter/__init__.py +++ b/comfy/weight_adapter/__init__.py @@ -1,4 +1,4 @@ -from .base import WeightAdapterBase +from .base import WeightAdapterBase, WeightAdapterTrainBase from .lora import LoRAAdapter from .loha import LoHaAdapter from .lokr import LoKrAdapter @@ -15,3 +15,9 @@ adapters: list[type[WeightAdapterBase]] = [ OFTAdapter, BOFTAdapter, ] + +__all__ = [ + "WeightAdapterBase", + "WeightAdapterTrainBase", + "adapters" +] + [a.__name__ for a in adapters] diff --git a/comfy/weight_adapter/base.py b/comfy/weight_adapter/base.py index 29873519d..b5c7db423 100644 --- a/comfy/weight_adapter/base.py +++ b/comfy/weight_adapter/base.py @@ -12,12 +12,20 @@ class WeightAdapterBase: weights: list[torch.Tensor] @classmethod - def load(cls, x: str, lora: dict[str, torch.Tensor]) -> Optional["WeightAdapterBase"]: + def load(cls, x: str, lora: dict[str, torch.Tensor], alpha: float, dora_scale: torch.Tensor) -> Optional["WeightAdapterBase"]: raise NotImplementedError def to_train(self) -> "WeightAdapterTrainBase": raise NotImplementedError + @classmethod + def create_train(cls, weight, *args) -> "WeightAdapterTrainBase": + """ + weight: The original weight tensor to be modified. + *args: Additional arguments for configuration, such as rank, alpha etc. + """ + raise NotImplementedError + def calculate_weight( self, weight, @@ -33,10 +41,22 @@ class WeightAdapterBase: class WeightAdapterTrainBase(nn.Module): + # We follow the scheme of PR #7032 def __init__(self): super().__init__() - # [TODO] Collaborate with LoRA training PR #7032 + def __call__(self, w): + """ + w: The original weight tensor to be modified. + """ + raise NotImplementedError + + def passive_memory_usage(self): + raise NotImplementedError("passive_memory_usage is not implemented") + + def move_to(self, device): + self.to(device) + return self.passive_memory_usage() def weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype, function): @@ -102,3 +122,14 @@ def pad_tensor_to_shape(tensor: torch.Tensor, new_shape: list[int]) -> torch.Ten padded_tensor[new_slices] = tensor[orig_slices] return padded_tensor + + +def tucker_weight_from_conv(up, down, mid): + up = up.reshape(up.size(0), up.size(1)) + down = down.reshape(down.size(0), down.size(1)) + return torch.einsum("m n ..., i m, n j -> i j ...", mid, up, down) + + +def tucker_weight(wa, wb, t): + temp = torch.einsum("i j ..., j r -> i r ...", t, wb) + return torch.einsum("i j ..., i r -> r j ...", temp, wa) diff --git a/comfy/weight_adapter/lora.py b/comfy/weight_adapter/lora.py index b2e623924..729dbd9e6 100644 --- a/comfy/weight_adapter/lora.py +++ b/comfy/weight_adapter/lora.py @@ -3,7 +3,56 @@ from typing import Optional import torch import comfy.model_management -from .base import WeightAdapterBase, weight_decompose, pad_tensor_to_shape +from .base import ( + WeightAdapterBase, + WeightAdapterTrainBase, + weight_decompose, + pad_tensor_to_shape, + tucker_weight_from_conv, +) + + +class LoraDiff(WeightAdapterTrainBase): + def __init__(self, weights): + super().__init__() + mat1, mat2, alpha, mid, dora_scale, reshape = weights + out_dim, rank = mat1.shape[0], mat1.shape[1] + rank, in_dim = mat2.shape[0], mat2.shape[1] + if mid is not None: + convdim = mid.ndim - 2 + layer = ( + torch.nn.Conv1d, + torch.nn.Conv2d, + torch.nn.Conv3d + )[convdim] + else: + layer = torch.nn.Linear + self.lora_up = layer(rank, out_dim, bias=False) + self.lora_down = layer(in_dim, rank, bias=False) + self.lora_up.weight.data.copy_(mat1) + self.lora_down.weight.data.copy_(mat2) + if mid is not None: + self.lora_mid = layer(mid, rank, bias=False) + self.lora_mid.weight.data.copy_(mid) + else: + self.lora_mid = None + self.rank = rank + self.alpha = torch.nn.Parameter(torch.tensor(alpha), requires_grad=False) + + def __call__(self, w): + org_dtype = w.dtype + if self.lora_mid is None: + diff = self.lora_up.weight @ self.lora_down.weight + else: + diff = tucker_weight_from_conv( + self.lora_up.weight, self.lora_down.weight, self.lora_mid.weight + ) + scale = self.alpha / self.rank + weight = w + scale * diff.reshape(w.shape) + return weight.to(org_dtype) + + def passive_memory_usage(self): + return sum(param.numel() * param.element_size() for param in self.parameters()) class LoRAAdapter(WeightAdapterBase): @@ -13,6 +62,21 @@ class LoRAAdapter(WeightAdapterBase): self.loaded_keys = loaded_keys self.weights = weights + @classmethod + def create_train(cls, weight, rank=1, alpha=1.0): + out_dim = weight.shape[0] + in_dim = weight.shape[1:].numel() + mat1 = torch.empty(out_dim, rank, device=weight.device, dtype=weight.dtype) + mat2 = torch.empty(rank, in_dim, device=weight.device, dtype=weight.dtype) + torch.nn.init.kaiming_uniform_(mat1, a=5**0.5) + torch.nn.init.constant_(mat2, 0.0) + return LoraDiff( + (mat1, mat2, alpha, None, None, None) + ) + + def to_train(self): + return LoraDiff(self.weights) + @classmethod def load( cls, diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py new file mode 100644 index 000000000..17a03a1e0 --- /dev/null +++ b/comfy_extras/nodes_train.py @@ -0,0 +1,705 @@ +import datetime +import json +import logging +import os + +import numpy as np +import safetensors +import torch +from PIL import Image, ImageDraw, ImageFont +from PIL.PngImagePlugin import PngInfo +import torch.utils.checkpoint +import tqdm + +import comfy.samplers +import comfy.sd +import comfy.utils +import comfy.model_management +import comfy_extras.nodes_custom_sampler +import folder_paths +import node_helpers +from comfy.cli_args import args +from comfy.comfy_types.node_typing import IO +from comfy.weight_adapter import adapters + + +class TrainSampler(comfy.samplers.Sampler): + + def __init__(self, loss_fn, optimizer, loss_callback=None): + self.loss_fn = loss_fn + self.optimizer = optimizer + self.loss_callback = loss_callback + + def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): + self.optimizer.zero_grad() + noise = model_wrap.inner_model.model_sampling.noise_scaling(sigmas, noise, latent_image, False) + latent = model_wrap.inner_model.model_sampling.noise_scaling( + torch.zeros_like(sigmas), + torch.zeros_like(noise, requires_grad=True), + latent_image, + False + ) + + # Ensure model is in training mode and computing gradients + # x0 pred + denoised = model_wrap(noise, sigmas, **extra_args) + try: + loss = self.loss_fn(denoised, latent.clone()) + except RuntimeError as e: + if "does not require grad and does not have a grad_fn" in str(e): + logging.info("WARNING: This is likely due to the model is loaded in inference mode.") + loss.backward() + if self.loss_callback: + self.loss_callback(loss.item()) + + self.optimizer.step() + # torch.cuda.memory._dump_snapshot("trainn.pickle") + # torch.cuda.memory._record_memory_history(enabled=None) + return torch.zeros_like(latent_image) + + +class BiasDiff(torch.nn.Module): + def __init__(self, bias): + super().__init__() + self.bias = bias + + def __call__(self, b): + org_dtype = b.dtype + return (b.to(self.bias) + self.bias).to(org_dtype) + + def passive_memory_usage(self): + return self.bias.nelement() * self.bias.element_size() + + def move_to(self, device): + self.to(device=device) + return self.passive_memory_usage() + + +def load_and_process_images(image_files, input_dir, resize_method="None"): + """Utility function to load and process a list of images. + + Args: + image_files: List of image filenames + input_dir: Base directory containing the images + resize_method: How to handle images of different sizes ("None", "Stretch", "Crop", "Pad") + + Returns: + torch.Tensor: Batch of processed images + """ + if not image_files: + raise ValueError("No valid images found in input") + + output_images = [] + w, h = None, None + + for file in image_files: + image_path = os.path.join(input_dir, file) + img = node_helpers.pillow(Image.open, image_path) + + if img.mode == "I": + img = img.point(lambda i: i * (1 / 255)) + img = img.convert("RGB") + + if w is None and h is None: + w, h = img.size[0], img.size[1] + + # Resize image to first image + if img.size[0] != w or img.size[1] != h: + if resize_method == "Stretch": + img = img.resize((w, h), Image.Resampling.LANCZOS) + elif resize_method == "Crop": + img = img.crop((0, 0, w, h)) + elif resize_method == "Pad": + img = img.resize((w, h), Image.Resampling.LANCZOS) + elif resize_method == "None": + raise ValueError( + "Your input image size does not match the first image in the dataset. Either select a valid resize method or use the same size for all images." + ) + + img_array = np.array(img).astype(np.float32) / 255.0 + img_tensor = torch.from_numpy(img_array)[None,] + output_images.append(img_tensor) + + return torch.cat(output_images, dim=0) + + +class LoadImageSetNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ( + [ + f + for f in os.listdir(folder_paths.get_input_directory()) + if f.endswith((".png", ".jpg", ".jpeg", ".webp", ".bmp", ".gif", ".jpe", ".apng", ".tif", ".tiff")) + ], + {"image_upload": True, "allow_batch": True}, + ) + }, + "optional": { + "resize_method": ( + ["None", "Stretch", "Crop", "Pad"], + {"default": "None"}, + ), + }, + } + + INPUT_IS_LIST = True + RETURN_TYPES = ("IMAGE",) + FUNCTION = "load_images" + CATEGORY = "loaders" + EXPERIMENTAL = True + DESCRIPTION = "Loads a batch of images from a directory for training." + + @classmethod + def VALIDATE_INPUTS(s, images, resize_method): + filenames = images[0] if isinstance(images[0], list) else images + + for image in filenames: + if not folder_paths.exists_annotated_filepath(image): + return "Invalid image file: {}".format(image) + return True + + def load_images(self, input_files, resize_method): + input_dir = folder_paths.get_input_directory() + valid_extensions = [".png", ".jpg", ".jpeg", ".webp", ".bmp", ".gif", ".jpe", ".apng", ".tif", ".tiff"] + image_files = [ + f + for f in input_files + if any(f.lower().endswith(ext) for ext in valid_extensions) + ] + output_tensor = load_and_process_images(image_files, input_dir, resize_method) + return (output_tensor,) + + +class LoadImageSetFromFolderNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "folder": (folder_paths.get_input_subfolders(), {"tooltip": "The folder to load images from."}) + }, + "optional": { + "resize_method": ( + ["None", "Stretch", "Crop", "Pad"], + {"default": "None"}, + ), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "load_images" + CATEGORY = "loaders" + EXPERIMENTAL = True + DESCRIPTION = "Loads a batch of images from a directory for training." + + def load_images(self, folder, resize_method): + sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder) + valid_extensions = [".png", ".jpg", ".jpeg", ".webp"] + image_files = [ + f + for f in os.listdir(sub_input_dir) + if any(f.lower().endswith(ext) for ext in valid_extensions) + ] + output_tensor = load_and_process_images(image_files, sub_input_dir, resize_method) + return (output_tensor,) + + +def draw_loss_graph(loss_map, steps): + width, height = 500, 300 + img = Image.new("RGB", (width, height), "white") + draw = ImageDraw.Draw(img) + + min_loss, max_loss = min(loss_map.values()), max(loss_map.values()) + scaled_loss = [(l - min_loss) / (max_loss - min_loss) for l in loss_map.values()] + + prev_point = (0, height - int(scaled_loss[0] * height)) + for i, l in enumerate(scaled_loss[1:], start=1): + x = int(i / (steps - 1) * width) + y = height - int(l * height) + draw.line([prev_point, (x, y)], fill="blue", width=2) + prev_point = (x, y) + + return img + + +def find_all_highest_child_module_with_forward(model: torch.nn.Module, result = None, name = None): + if result is None: + result = [] + elif hasattr(model, "forward") and not isinstance(model, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict)): + result.append(model) + logging.debug(f"Found module with forward: {name} ({model.__class__.__name__})") + return result + name = name or "root" + for next_name, child in model.named_children(): + find_all_highest_child_module_with_forward(child, result, f"{name}.{next_name}") + return result + + +def patch(m): + if not hasattr(m, "forward"): + return + org_forward = m.forward + def fwd(args, kwargs): + return org_forward(*args, **kwargs) + def checkpointing_fwd(*args, **kwargs): + return torch.utils.checkpoint.checkpoint( + fwd, args, kwargs, use_reentrant=False + ) + m.org_forward = org_forward + m.forward = checkpointing_fwd + + +def unpatch(m): + if hasattr(m, "org_forward"): + m.forward = m.org_forward + del m.org_forward + + +class TrainLoraNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": (IO.MODEL, {"tooltip": "The model to train the LoRA on."}), + "latents": ( + "LATENT", + { + "tooltip": "The Latents to use for training, serve as dataset/input of the model." + }, + ), + "positive": ( + IO.CONDITIONING, + {"tooltip": "The positive conditioning to use for training."}, + ), + "batch_size": ( + IO.INT, + { + "default": 1, + "min": 1, + "max": 10000, + "step": 1, + "tooltip": "The batch size to use for training.", + }, + ), + "steps": ( + IO.INT, + { + "default": 16, + "min": 1, + "max": 100000, + "tooltip": "The number of steps to train the LoRA for.", + }, + ), + "learning_rate": ( + IO.FLOAT, + { + "default": 0.0005, + "min": 0.0000001, + "max": 1.0, + "step": 0.000001, + "tooltip": "The learning rate to use for training.", + }, + ), + "rank": ( + IO.INT, + { + "default": 8, + "min": 1, + "max": 128, + "tooltip": "The rank of the LoRA layers.", + }, + ), + "optimizer": ( + ["AdamW", "Adam", "SGD", "RMSprop"], + { + "default": "AdamW", + "tooltip": "The optimizer to use for training.", + }, + ), + "loss_function": ( + ["MSE", "L1", "Huber", "SmoothL1"], + { + "default": "MSE", + "tooltip": "The loss function to use for training.", + }, + ), + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "tooltip": "The seed to use for training (used in generator for LoRA weight initialization and noise sampling)", + }, + ), + "training_dtype": ( + ["bf16", "fp32"], + {"default": "bf16", "tooltip": "The dtype to use for training."}, + ), + "lora_dtype": ( + ["bf16", "fp32"], + {"default": "bf16", "tooltip": "The dtype to use for lora."}, + ), + "existing_lora": ( + folder_paths.get_filename_list("loras") + ["[None]"], + { + "default": "[None]", + "tooltip": "The existing LoRA to append to. Set to None for new LoRA.", + }, + ), + }, + } + + RETURN_TYPES = (IO.MODEL, IO.LORA_MODEL, IO.LOSS_MAP, IO.INT) + RETURN_NAMES = ("model_with_lora", "lora", "loss", "steps") + FUNCTION = "train" + CATEGORY = "training" + EXPERIMENTAL = True + + def train( + self, + model, + latents, + positive, + batch_size, + steps, + learning_rate, + rank, + optimizer, + loss_function, + seed, + training_dtype, + lora_dtype, + existing_lora, + ): + mp = model.clone() + dtype = node_helpers.string_to_torch_dtype(training_dtype) + lora_dtype = node_helpers.string_to_torch_dtype(lora_dtype) + mp.set_model_compute_dtype(dtype) + + latents = latents["samples"].to(dtype) + num_images = latents.shape[0] + + with torch.inference_mode(False): + lora_sd = {} + generator = torch.Generator() + generator.manual_seed(seed) + + # Load existing LoRA weights if provided + existing_weights = {} + existing_steps = 0 + if existing_lora != "[None]": + lora_path = folder_paths.get_full_path_or_raise("loras", existing_lora) + # Extract steps from filename like "trained_lora_10_steps_20250225_203716" + existing_steps = int(existing_lora.split("_steps_")[0].split("_")[-1]) + if lora_path: + existing_weights = comfy.utils.load_torch_file(lora_path) + + all_weight_adapters = [] + for n, m in mp.model.named_modules(): + if hasattr(m, "weight_function"): + if m.weight is not None: + key = "{}.weight".format(n) + shape = m.weight.shape + if len(shape) >= 2: + alpha = float(existing_weights.get(f"{key}.alpha", 1.0)) + dora_scale = existing_weights.get( + f"{key}.dora_scale", None + ) + for adapter_cls in adapters: + existing_adapter = adapter_cls.load( + n, existing_weights, alpha, dora_scale + ) + if existing_adapter is not None: + break + else: + # If no existing adapter found, use LoRA + # We will add algo option in the future + existing_adapter = None + adapter_cls = adapters[0] + + if existing_adapter is not None: + train_adapter = existing_adapter.to_train().to(lora_dtype) + else: + # Use LoRA with alpha=1.0 by default + train_adapter = adapter_cls.create_train( + m.weight, rank=rank, alpha=1.0 + ).to(lora_dtype) + for name, parameter in train_adapter.named_parameters(): + lora_sd[f"{n}.{name}"] = parameter + + mp.add_weight_wrapper(key, train_adapter) + all_weight_adapters.append(train_adapter) + else: + diff = torch.nn.Parameter( + torch.zeros( + m.weight.shape, dtype=lora_dtype, requires_grad=True + ) + ) + diff_module = BiasDiff(diff) + mp.add_weight_wrapper(key, BiasDiff(diff)) + all_weight_adapters.append(diff_module) + lora_sd["{}.diff".format(n)] = diff + if hasattr(m, "bias") and m.bias is not None: + key = "{}.bias".format(n) + bias = torch.nn.Parameter( + torch.zeros(m.bias.shape, dtype=lora_dtype, requires_grad=True) + ) + bias_module = BiasDiff(bias) + lora_sd["{}.diff_b".format(n)] = bias + mp.add_weight_wrapper(key, BiasDiff(bias)) + all_weight_adapters.append(bias_module) + + if optimizer == "Adam": + optimizer = torch.optim.Adam(lora_sd.values(), lr=learning_rate) + elif optimizer == "AdamW": + optimizer = torch.optim.AdamW(lora_sd.values(), lr=learning_rate) + elif optimizer == "SGD": + optimizer = torch.optim.SGD(lora_sd.values(), lr=learning_rate) + elif optimizer == "RMSprop": + optimizer = torch.optim.RMSprop(lora_sd.values(), lr=learning_rate) + + # Setup loss function based on selection + if loss_function == "MSE": + criterion = torch.nn.MSELoss() + elif loss_function == "L1": + criterion = torch.nn.L1Loss() + elif loss_function == "Huber": + criterion = torch.nn.HuberLoss() + elif loss_function == "SmoothL1": + criterion = torch.nn.SmoothL1Loss() + + # setup models + for m in find_all_highest_child_module_with_forward(mp.model.diffusion_model): + patch(m) + comfy.model_management.load_models_gpu([mp], memory_required=1e20, force_full_load=True) + + # Setup sampler and guider like in test script + loss_map = {"loss": []} + def loss_callback(loss): + loss_map["loss"].append(loss) + pbar.set_postfix({"loss": f"{loss:.4f}"}) + train_sampler = TrainSampler( + criterion, optimizer, loss_callback=loss_callback + ) + guider = comfy_extras.nodes_custom_sampler.Guider_Basic(mp) + guider.set_conds(positive) # Set conditioning from input + ss = comfy_extras.nodes_custom_sampler.SamplerCustomAdvanced() + + # yoland: this currently resize to the first image in the dataset + + # Training loop + torch.cuda.empty_cache() + try: + for step in (pbar:=tqdm.trange(steps, desc="Training LoRA", smoothing=0.01, disable=not comfy.utils.PROGRESS_BAR_ENABLED)): + # Generate random sigma + sigma = mp.model.model_sampling.percent_to_sigma( + torch.rand((1,)).item() + ) + sigma = torch.tensor([sigma]) + + noise = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(step * 1000 + seed) + + indices = torch.randperm(num_images)[:batch_size] + ss.sample( + noise, guider, train_sampler, sigma, {"samples": latents[indices].clone()} + ) + finally: + for m in mp.model.modules(): + unpatch(m) + del ss, train_sampler, optimizer + torch.cuda.empty_cache() + + for adapter in all_weight_adapters: + adapter.requires_grad_(False) + + for param in lora_sd: + lora_sd[param] = lora_sd[param].to(lora_dtype) + + return (mp, lora_sd, loss_map, steps + existing_steps) + + +class LoraModelLoader: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}), + "lora": (IO.LORA_MODEL, {"tooltip": "The LoRA model to apply to the diffusion model."}), + "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}), + } + } + + RETURN_TYPES = ("MODEL",) + OUTPUT_TOOLTIPS = ("The modified diffusion model.",) + FUNCTION = "load_lora_model" + + CATEGORY = "loaders" + DESCRIPTION = "Load Trained LoRA weights from Train LoRA node." + EXPERIMENTAL = True + + def load_lora_model(self, model, lora, strength_model): + if strength_model == 0: + return (model, ) + + model_lora, _ = comfy.sd.load_lora_for_models(model, None, lora, strength_model, 0) + return (model_lora, ) + + +class SaveLoRA: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "lora": ( + IO.LORA_MODEL, + { + "tooltip": "The LoRA model to save. Do not use the model with LoRA layers." + }, + ), + "prefix": ( + "STRING", + { + "default": "trained_lora", + "tooltip": "The prefix to use for the saved LoRA file.", + }, + ), + }, + "optional": { + "steps": ( + IO.INT, + { + "forceInput": True, + "tooltip": "Optional: The number of steps to LoRA has been trained for, used to name the saved file.", + }, + ), + }, + } + + RETURN_TYPES = () + FUNCTION = "save" + CATEGORY = "loaders" + EXPERIMENTAL = True + OUTPUT_NODE = True + + def save(self, lora, prefix, steps=None): + date = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + if steps is None: + output_file = f"models/loras/{prefix}_{date}_lora.safetensors" + else: + output_file = f"models/loras/{prefix}_{steps}_steps_{date}_lora.safetensors" + safetensors.torch.save_file(lora, output_file) + return {} + + +class LossGraphNode: + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "loss": (IO.LOSS_MAP, {"default": {}}), + "filename_prefix": (IO.STRING, {"default": "loss_graph"}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = () + FUNCTION = "plot_loss" + OUTPUT_NODE = True + CATEGORY = "training" + EXPERIMENTAL = True + DESCRIPTION = "Plots the loss graph and saves it to the output directory." + + def plot_loss(self, loss, filename_prefix, prompt=None, extra_pnginfo=None): + loss_values = loss["loss"] + width, height = 800, 480 + margin = 40 + + img = Image.new( + "RGB", (width + margin, height + margin), "white" + ) # Extend canvas + draw = ImageDraw.Draw(img) + + min_loss, max_loss = min(loss_values), max(loss_values) + scaled_loss = [(l - min_loss) / (max_loss - min_loss) for l in loss_values] + + steps = len(loss_values) + + prev_point = (margin, height - int(scaled_loss[0] * height)) + for i, l in enumerate(scaled_loss[1:], start=1): + x = margin + int(i / steps * width) # Scale X properly + y = height - int(l * height) + draw.line([prev_point, (x, y)], fill="blue", width=2) + prev_point = (x, y) + + draw.line([(margin, 0), (margin, height)], fill="black", width=2) # Y-axis + draw.line( + [(margin, height), (width + margin, height)], fill="black", width=2 + ) # X-axis + + font = None + try: + font = ImageFont.truetype("arial.ttf", 12) + except IOError: + font = ImageFont.load_default() + + # Add axis labels + draw.text((5, height // 2), "Loss", font=font, fill="black") + draw.text((width // 2, height + 10), "Steps", font=font, fill="black") + + # Add min/max loss values + draw.text((margin - 30, 0), f"{max_loss:.2f}", font=font, fill="black") + draw.text( + (margin - 30, height - 10), f"{min_loss:.2f}", font=font, fill="black" + ) + + metadata = None + if not args.disable_metadata: + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + date = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + img.save( + os.path.join(self.output_dir, f"{filename_prefix}_{date}.png"), + pnginfo=metadata, + ) + return { + "ui": { + "images": [ + { + "filename": f"{filename_prefix}_{date}.png", + "subfolder": "", + "type": "temp", + } + ] + } + } + + +NODE_CLASS_MAPPINGS = { + "TrainLoraNode": TrainLoraNode, + "SaveLoRANode": SaveLoRA, + "LoraModelLoader": LoraModelLoader, + "LoadImageSetFromFolderNode": LoadImageSetFromFolderNode, + "LossGraphNode": LossGraphNode, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "TrainLoraNode": "Train LoRA", + "SaveLoRANode": "Save LoRA Weights", + "LoraModelLoader": "Load LoRA Model", + "LoadImageSetFromFolderNode": "Load Image Dataset from Folder", + "LossGraphNode": "Plot Loss Graph", +} diff --git a/execution.py b/execution.py index 15ff7567c..d0012afda 100644 --- a/execution.py +++ b/execution.py @@ -1,23 +1,35 @@ -import sys import copy -import logging -import threading import heapq +import inspect +import logging +import sys +import threading import time import traceback from enum import Enum -import inspect from typing import List, Literal, NamedTuple, Optional import torch -import nodes import comfy.model_management -from comfy_execution.graph import get_input_info, ExecutionList, DynamicPrompt, ExecutionBlocker -from comfy_execution.graph_utils import is_link, GraphBuilder -from comfy_execution.caching import HierarchicalCache, LRUCache, DependencyAwareCache, CacheKeySetInputSignature, CacheKeySetID +import nodes +from comfy_execution.caching import ( + CacheKeySetID, + CacheKeySetInputSignature, + DependencyAwareCache, + HierarchicalCache, + LRUCache, +) +from comfy_execution.graph import ( + DynamicPrompt, + ExecutionBlocker, + ExecutionList, + get_input_info, +) +from comfy_execution.graph_utils import GraphBuilder, is_link from comfy_execution.validation import validate_node_input + class ExecutionResult(Enum): SUCCESS = 0 FAILURE = 1 diff --git a/folder_paths.py b/folder_paths.py index f0b3fd103..9ec952940 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -276,6 +276,9 @@ def filter_files_extensions(files: Collection[str], extensions: Collection[str]) def get_full_path(folder_name: str, filename: str) -> str | None: + """ + Get the full path of a file in a folder, has to be a file + """ global folder_names_and_paths folder_name = map_legacy(folder_name) if folder_name not in folder_names_and_paths: @@ -293,6 +296,9 @@ def get_full_path(folder_name: str, filename: str) -> str | None: def get_full_path_or_raise(folder_name: str, filename: str) -> str: + """ + Get the full path of a file in a folder, has to be a file + """ full_path = get_full_path(folder_name, filename) if full_path is None: raise FileNotFoundError(f"Model in folder '{folder_name}' with filename '{filename}' not found.") @@ -394,3 +400,26 @@ def get_save_image_path(filename_prefix: str, output_dir: str, image_width=0, im os.makedirs(full_output_folder, exist_ok=True) counter = 1 return full_output_folder, filename, counter, subfolder, filename_prefix + +def get_input_subfolders() -> list[str]: + """Returns a list of all subfolder paths in the input directory, recursively. + + Returns: + List of folder paths relative to the input directory, excluding the root directory + """ + input_dir = get_input_directory() + folders = [] + + try: + if not os.path.exists(input_dir): + return [] + + for root, dirs, _ in os.walk(input_dir): + rel_path = os.path.relpath(root, input_dir) + if rel_path != ".": # Only include non-root directories + # Normalize path separators to forward slashes + folders.append(rel_path.replace(os.sep, '/')) + + return sorted(folders) + except FileNotFoundError: + return [] diff --git a/nodes.py b/nodes.py index 8e5b47b37..bfc342275 100644 --- a/nodes.py +++ b/nodes.py @@ -2231,6 +2231,7 @@ def init_builtin_extra_nodes(): "nodes_model_downscale.py", "nodes_images.py", "nodes_video_model.py", + "nodes_train.py", "nodes_sag.py", "nodes_perpneg.py", "nodes_stable3d.py", diff --git a/tests-unit/folder_paths_test/misc_test.py b/tests-unit/folder_paths_test/misc_test.py new file mode 100644 index 000000000..fcf667453 --- /dev/null +++ b/tests-unit/folder_paths_test/misc_test.py @@ -0,0 +1,51 @@ +import pytest +import os +import tempfile +from folder_paths import get_input_subfolders, set_input_directory + +@pytest.fixture(scope="module") +def mock_folder_structure(): + with tempfile.TemporaryDirectory() as temp_dir: + # Create a nested folder structure + folders = [ + "folder1", + "folder1/subfolder1", + "folder1/subfolder2", + "folder2", + "folder2/deep", + "folder2/deep/nested", + "empty_folder" + ] + + # Create the folders + for folder in folders: + os.makedirs(os.path.join(temp_dir, folder)) + + # Add some files to test they're not included + with open(os.path.join(temp_dir, "root_file.txt"), "w") as f: + f.write("test") + with open(os.path.join(temp_dir, "folder1", "test.txt"), "w") as f: + f.write("test") + + set_input_directory(temp_dir) + yield temp_dir + + +def test_gets_all_folders(mock_folder_structure): + folders = get_input_subfolders() + expected = ["folder1", "folder1/subfolder1", "folder1/subfolder2", + "folder2", "folder2/deep", "folder2/deep/nested", "empty_folder"] + assert sorted(folders) == sorted(expected) + + +def test_handles_nonexistent_input_directory(): + with tempfile.TemporaryDirectory() as temp_dir: + nonexistent = os.path.join(temp_dir, "nonexistent") + set_input_directory(nonexistent) + assert get_input_subfolders() == [] + + +def test_empty_input_directory(): + with tempfile.TemporaryDirectory() as temp_dir: + set_input_directory(temp_dir) + assert get_input_subfolders() == [] # Empty since we don't include root From f74778e75d4fb5731adf175bbf01ac19acef5140 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sat, 14 Jun 2025 11:06:28 +0800 Subject: [PATCH 0244/1073] Bump embedded docs to 0.2.2 (#8512) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 94dafea58..41b08134b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ comfyui-frontend-package==1.21.7 comfyui-workflow-templates==0.1.25 -comfyui-embedded-docs==0.2.0 +comfyui-embedded-docs==0.2.2 torch torchsde torchvision From 6673939e767ebb7512e4fc50ff8b9cd2042a8d66 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sat, 14 Jun 2025 11:11:00 +0800 Subject: [PATCH 0245/1073] Bump template to 0.1.28 (#8510) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 41b08134b..336ec9d57 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.21.7 -comfyui-workflow-templates==0.1.25 +comfyui-workflow-templates==0.1.28 comfyui-embedded-docs==0.2.2 torch torchsde From 803af1e0c39f4ba09bb399406c82e430a8f3ab22 Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Fri, 13 Jun 2025 23:11:55 -0400 Subject: [PATCH 0246/1073] allow extra settings from pyproject.toml (#8526) --- comfy_config/types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_config/types.py b/comfy_config/types.py index 11261a136..5222cc59b 100644 --- a/comfy_config/types.py +++ b/comfy_config/types.py @@ -90,4 +90,4 @@ class PyProjectSettings(BaseSettings): tool: dict = Field(default_factory=dict) - model_config = SettingsConfigDict() + model_config = SettingsConfigDict(extra='allow') From 29596bd53fd1dde0f2a53e462318fb1348fc7f1d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 14 Jun 2025 02:02:05 -0700 Subject: [PATCH 0247/1073] Small cosmos attention code refactor. (#8530) --- comfy/ldm/cosmos/predict2.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/comfy/ldm/cosmos/predict2.py b/comfy/ldm/cosmos/predict2.py index 3b91b3f6e..316117f77 100644 --- a/comfy/ldm/cosmos/predict2.py +++ b/comfy/ldm/cosmos/predict2.py @@ -70,11 +70,7 @@ def torch_attention_op(q_B_S_H_D: torch.Tensor, k_B_S_H_D: torch.Tensor, v_B_S_H q_B_H_S_D = rearrange(q_B_S_H_D, "b ... h k -> b h ... k").view(in_q_shape[0], in_q_shape[-2], -1, in_q_shape[-1]) k_B_H_S_D = rearrange(k_B_S_H_D, "b ... h v -> b h ... v").view(in_k_shape[0], in_k_shape[-2], -1, in_k_shape[-1]) v_B_H_S_D = rearrange(v_B_S_H_D, "b ... h v -> b h ... v").view(in_k_shape[0], in_k_shape[-2], -1, in_k_shape[-1]) - result_B_S_HD = rearrange( - optimized_attention(q_B_H_S_D, k_B_H_S_D, v_B_H_S_D, in_q_shape[-2], skip_reshape=True, skip_output_reshape=True), "b h ... l -> b ... (h l)" - ) - - return result_B_S_HD + return optimized_attention(q_B_H_S_D, k_B_H_S_D, v_B_H_S_D, in_q_shape[-2], skip_reshape=True) class Attention(nn.Module): From 53e8d8193c6b669eefc0a9bfa6e525b97837d876 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Sun, 15 Jun 2025 04:58:16 +0800 Subject: [PATCH 0248/1073] Generalize SEEDS samplers (#8529) Restore VP algorithm for RF and refactor noise_coeffs and half-logSNR calculations --- comfy/k_diffusion/sampling.py | 169 ++++++++++++++++++++++------------ 1 file changed, 109 insertions(+), 60 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index fbdf6f554..a8fd98493 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -1,4 +1,5 @@ import math +from functools import partial from scipy import integrate import torch @@ -142,6 +143,33 @@ class BrownianTreeNoiseSampler: return self.tree(t0, t1) / (t1 - t0).abs().sqrt() +def sigma_to_half_log_snr(sigma, model_sampling): + """Convert sigma to half-logSNR log(alpha_t / sigma_t).""" + if isinstance(model_sampling, comfy.model_sampling.CONST): + # log((1 - t) / t) = log((1 - sigma) / sigma) + return sigma.logit().neg() + return sigma.log().neg() + + +def half_log_snr_to_sigma(half_log_snr, model_sampling): + """Convert half-logSNR log(alpha_t / sigma_t) to sigma.""" + if isinstance(model_sampling, comfy.model_sampling.CONST): + # 1 / (1 + exp(half_log_snr)) + return half_log_snr.neg().sigmoid() + return half_log_snr.neg().exp() + + +def offset_first_sigma_for_snr(sigmas, model_sampling, percent_offset=1e-4): + """Adjust the first sigma to avoid invalid logSNR.""" + if len(sigmas) <= 1: + return sigmas + if isinstance(model_sampling, comfy.model_sampling.CONST): + if sigmas[0] >= 1: + sigmas = sigmas.clone() + sigmas[0] = model_sampling.percent_to_sigma(percent_offset) + return sigmas + + @torch.no_grad() def sample_euler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.): """Implements Algorithm 2 (Euler steps) from Karras et al. (2022).""" @@ -1449,12 +1477,12 @@ def sample_er_sde(model, x, sigmas, extra_args=None, callback=None, disable=None old_denoised = denoised return x + @torch.no_grad() def sample_seeds_2(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=0.5): - ''' - SEEDS-2 - Stochastic Explicit Exponential Derivative-free Solvers (VE Data Prediction) stage 2 - Arxiv: https://arxiv.org/abs/2305.14267 - ''' + """SEEDS-2 - Stochastic Explicit Exponential Derivative-free Solvers (VP Data Prediction) stage 2. + arXiv: https://arxiv.org/abs/2305.14267 + """ extra_args = {} if extra_args is None else extra_args seed = extra_args.get("seed", None) noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler @@ -1462,6 +1490,11 @@ def sample_seeds_2(model, x, sigmas, extra_args=None, callback=None, disable=Non inject_noise = eta > 0 and s_noise > 0 + model_sampling = model.inner_model.model_patcher.get_model_object('model_sampling') + sigma_fn = partial(half_log_snr_to_sigma, model_sampling=model_sampling) + lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling) + sigmas = offset_first_sigma_for_snr(sigmas, model_sampling) + for i in trange(len(sigmas) - 1, disable=disable): denoised = model(x, sigmas[i] * s_in, **extra_args) if callback is not None: @@ -1469,80 +1502,96 @@ def sample_seeds_2(model, x, sigmas, extra_args=None, callback=None, disable=Non if sigmas[i + 1] == 0: x = denoised else: - t, t_next = -sigmas[i].log(), -sigmas[i + 1].log() - h = t_next - t + lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1]) + h = lambda_t - lambda_s h_eta = h * (eta + 1) - s = t + r * h + lambda_s_1 = lambda_s + r * h fac = 1 / (2 * r) - sigma_s = s.neg().exp() + sigma_s_1 = sigma_fn(lambda_s_1) + + # alpha_t = sigma_t * exp(log(alpha_t / sigma_t)) = sigma_t * exp(lambda_t) + alpha_s_1 = sigma_s_1 * lambda_s_1.exp() + alpha_t = sigmas[i + 1] * lambda_t.exp() coeff_1, coeff_2 = (-r * h_eta).expm1(), (-h_eta).expm1() if inject_noise: + # 0 < r < 1 noise_coeff_1 = (-2 * r * h * eta).expm1().neg().sqrt() - noise_coeff_2 = ((-2 * r * h * eta).expm1() - (-2 * h * eta).expm1()).sqrt() - noise_1, noise_2 = noise_sampler(sigmas[i], sigma_s), noise_sampler(sigma_s, sigmas[i + 1]) + noise_coeff_2 = (-r * h * eta).exp() * (-2 * (1 - r) * h * eta).expm1().neg().sqrt() + noise_1, noise_2 = noise_sampler(sigmas[i], sigma_s_1), noise_sampler(sigma_s_1, sigmas[i + 1]) # Step 1 - x_2 = (coeff_1 + 1) * x - coeff_1 * denoised - if inject_noise: - x_2 = x_2 + sigma_s * (noise_coeff_1 * noise_1) * s_noise - denoised_2 = model(x_2, sigma_s * s_in, **extra_args) - - # Step 2 - denoised_d = (1 - fac) * denoised + fac * denoised_2 - x = (coeff_2 + 1) * x - coeff_2 * denoised_d - if inject_noise: - x = x + sigmas[i + 1] * (noise_coeff_2 * noise_1 + noise_coeff_1 * noise_2) * s_noise - return x - -@torch.no_grad() -def sample_seeds_3(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r_1=1./3, r_2=2./3): - ''' - SEEDS-3 - Stochastic Explicit Exponential Derivative-free Solvers (VE Data Prediction) stage 3 - Arxiv: https://arxiv.org/abs/2305.14267 - ''' - extra_args = {} if extra_args is None else extra_args - seed = extra_args.get("seed", None) - noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler - s_in = x.new_ones([x.shape[0]]) - - inject_noise = eta > 0 and s_noise > 0 - - for i in trange(len(sigmas) - 1, disable=disable): - denoised = model(x, sigmas[i] * s_in, **extra_args) - if callback is not None: - callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) - if sigmas[i + 1] == 0: - x = denoised - else: - t, t_next = -sigmas[i].log(), -sigmas[i + 1].log() - h = t_next - t - h_eta = h * (eta + 1) - s_1 = t + r_1 * h - s_2 = t + r_2 * h - sigma_s_1, sigma_s_2 = s_1.neg().exp(), s_2.neg().exp() - - coeff_1, coeff_2, coeff_3 = (-r_1 * h_eta).expm1(), (-r_2 * h_eta).expm1(), (-h_eta).expm1() - if inject_noise: - noise_coeff_1 = (-2 * r_1 * h * eta).expm1().neg().sqrt() - noise_coeff_2 = ((-2 * r_1 * h * eta).expm1() - (-2 * r_2 * h * eta).expm1()).sqrt() - noise_coeff_3 = ((-2 * r_2 * h * eta).expm1() - (-2 * h * eta).expm1()).sqrt() - noise_1, noise_2, noise_3 = noise_sampler(sigmas[i], sigma_s_1), noise_sampler(sigma_s_1, sigma_s_2), noise_sampler(sigma_s_2, sigmas[i + 1]) - - # Step 1 - x_2 = (coeff_1 + 1) * x - coeff_1 * denoised + x_2 = sigma_s_1 / sigmas[i] * (-r * h * eta).exp() * x - alpha_s_1 * coeff_1 * denoised if inject_noise: x_2 = x_2 + sigma_s_1 * (noise_coeff_1 * noise_1) * s_noise denoised_2 = model(x_2, sigma_s_1 * s_in, **extra_args) # Step 2 - x_3 = (coeff_2 + 1) * x - coeff_2 * denoised + (r_2 / r_1) * (coeff_2 / (r_2 * h_eta) + 1) * (denoised_2 - denoised) + denoised_d = (1 - fac) * denoised + fac * denoised_2 + x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x - alpha_t * coeff_2 * denoised_d + if inject_noise: + x = x + sigmas[i + 1] * (noise_coeff_2 * noise_1 + noise_coeff_1 * noise_2) * s_noise + return x + + +@torch.no_grad() +def sample_seeds_3(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r_1=1./3, r_2=2./3): + """SEEDS-3 - Stochastic Explicit Exponential Derivative-free Solvers (VP Data Prediction) stage 3. + arXiv: https://arxiv.org/abs/2305.14267 + """ + extra_args = {} if extra_args is None else extra_args + seed = extra_args.get("seed", None) + noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler + s_in = x.new_ones([x.shape[0]]) + + inject_noise = eta > 0 and s_noise > 0 + + model_sampling = model.inner_model.model_patcher.get_model_object('model_sampling') + sigma_fn = partial(half_log_snr_to_sigma, model_sampling=model_sampling) + lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling) + sigmas = offset_first_sigma_for_snr(sigmas, model_sampling) + + for i in trange(len(sigmas) - 1, disable=disable): + denoised = model(x, sigmas[i] * s_in, **extra_args) + if callback is not None: + callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) + if sigmas[i + 1] == 0: + x = denoised + else: + lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1]) + h = lambda_t - lambda_s + h_eta = h * (eta + 1) + lambda_s_1 = lambda_s + r_1 * h + lambda_s_2 = lambda_s + r_2 * h + sigma_s_1, sigma_s_2 = sigma_fn(lambda_s_1), sigma_fn(lambda_s_2) + + # alpha_t = sigma_t * exp(log(alpha_t / sigma_t)) = sigma_t * exp(lambda_t) + alpha_s_1 = sigma_s_1 * lambda_s_1.exp() + alpha_s_2 = sigma_s_2 * lambda_s_2.exp() + alpha_t = sigmas[i + 1] * lambda_t.exp() + + coeff_1, coeff_2, coeff_3 = (-r_1 * h_eta).expm1(), (-r_2 * h_eta).expm1(), (-h_eta).expm1() + if inject_noise: + # 0 < r_1 < r_2 < 1 + noise_coeff_1 = (-2 * r_1 * h * eta).expm1().neg().sqrt() + noise_coeff_2 = (-r_1 * h * eta).exp() * (-2 * (r_2 - r_1) * h * eta).expm1().neg().sqrt() + noise_coeff_3 = (-r_2 * h * eta).exp() * (-2 * (1 - r_2) * h * eta).expm1().neg().sqrt() + noise_1, noise_2, noise_3 = noise_sampler(sigmas[i], sigma_s_1), noise_sampler(sigma_s_1, sigma_s_2), noise_sampler(sigma_s_2, sigmas[i + 1]) + + # Step 1 + x_2 = sigma_s_1 / sigmas[i] * (-r_1 * h * eta).exp() * x - alpha_s_1 * coeff_1 * denoised + if inject_noise: + x_2 = x_2 + sigma_s_1 * (noise_coeff_1 * noise_1) * s_noise + denoised_2 = model(x_2, sigma_s_1 * s_in, **extra_args) + + # Step 2 + x_3 = sigma_s_2 / sigmas[i] * (-r_2 * h * eta).exp() * x - alpha_s_2 * coeff_2 * denoised + (r_2 / r_1) * alpha_s_2 * (coeff_2 / (r_2 * h_eta) + 1) * (denoised_2 - denoised) if inject_noise: x_3 = x_3 + sigma_s_2 * (noise_coeff_2 * noise_1 + noise_coeff_1 * noise_2) * s_noise denoised_3 = model(x_3, sigma_s_2 * s_in, **extra_args) # Step 3 - x = (coeff_3 + 1) * x - coeff_3 * denoised + (1. / r_2) * (coeff_3 / h_eta + 1) * (denoised_3 - denoised) + x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x - alpha_t * coeff_3 * denoised + (1. / r_2) * alpha_t * (coeff_3 / h_eta + 1) * (denoised_3 - denoised) if inject_noise: x = x + sigmas[i + 1] * (noise_coeff_3 * noise_1 + noise_coeff_2 * noise_2 + noise_coeff_1 * noise_3) * s_noise return x From d6a2137fc30d1a52c8b404b23ece0c5b6ef64867 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 14 Jun 2025 18:37:07 -0700 Subject: [PATCH 0249/1073] Support Cosmos predict2 image to video models. (#8535) Use the CosmosPredict2ImageToVideoLatent node. --- comfy/model_base.py | 21 ++++++++++++++++ comfy/model_detection.py | 15 ++++++++---- comfy_extras/nodes_cosmos.py | 46 ++++++++++++++++++++++++++++++++++++ 3 files changed, 77 insertions(+), 5 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index be72ddd17..cb7689e84 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1014,9 +1014,30 @@ class CosmosPredict2(BaseModel): if cross_attn is not None: out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + denoise_mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None)) + if denoise_mask is not None: + out["denoise_mask"] = comfy.conds.CONDRegular(denoise_mask) + out['fps'] = comfy.conds.CONDConstant(kwargs.get("frame_rate", None)) return out + def process_timestep(self, timestep, x, denoise_mask=None, **kwargs): + if denoise_mask is None: + return timestep + condition_video_mask_B_1_T_1_1 = denoise_mask.mean(dim=[1, 3, 4], keepdim=True) + c_noise_B_1_T_1_1 = 0.0 * (1.0 - condition_video_mask_B_1_T_1_1) + timestep.reshape(timestep.shape[0], 1, 1, 1, 1) * condition_video_mask_B_1_T_1_1 + out = c_noise_B_1_T_1_1.squeeze(dim=[1, 3, 4]) + return out + + def scale_latent_inpaint(self, sigma, noise, latent_image, **kwargs): + sigma = sigma.reshape([sigma.shape[0]] + [1] * (len(noise.shape) - 1)) + sigma_noise_augmentation = 0 #TODO + if sigma_noise_augmentation != 0: + latent_image = latent_image + noise + latent_image = self.model_sampling.calculate_input(torch.tensor([sigma_noise_augmentation], device=latent_image.device, dtype=latent_image.dtype), latent_image) + sigma = (sigma / (sigma + 1)) + return latent_image / (1.0 - sigma) + class Lumina2(BaseModel): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.lumina.model.NextDiT) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index b8fef17ad..4aa90d3b6 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -441,11 +441,16 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["rope_h_extrapolation_ratio"] = 4.0 dit_config["rope_w_extrapolation_ratio"] = 4.0 dit_config["rope_t_extrapolation_ratio"] = 1.0 - elif dit_config["in_channels"] == 17: - dit_config["extra_per_block_abs_pos_emb"] = False - dit_config["rope_h_extrapolation_ratio"] = 3.0 - dit_config["rope_w_extrapolation_ratio"] = 3.0 - dit_config["rope_t_extrapolation_ratio"] = 1.0 + elif dit_config["in_channels"] == 17: # img to video + if dit_config["model_channels"] == 2048: + dit_config["extra_per_block_abs_pos_emb"] = False + dit_config["rope_h_extrapolation_ratio"] = 3.0 + dit_config["rope_w_extrapolation_ratio"] = 3.0 + dit_config["rope_t_extrapolation_ratio"] = 1.0 + elif dit_config["model_channels"] == 5120: + dit_config["rope_h_extrapolation_ratio"] = 2.0 + dit_config["rope_w_extrapolation_ratio"] = 2.0 + dit_config["rope_t_extrapolation_ratio"] = 0.8333333333333334 dit_config["extra_h_extrapolation_ratio"] = 1.0 dit_config["extra_w_extrapolation_ratio"] = 1.0 diff --git a/comfy_extras/nodes_cosmos.py b/comfy_extras/nodes_cosmos.py index bd35ddb06..4f4960551 100644 --- a/comfy_extras/nodes_cosmos.py +++ b/comfy_extras/nodes_cosmos.py @@ -2,6 +2,7 @@ import nodes import torch import comfy.model_management import comfy.utils +import comfy.latent_formats class EmptyCosmosLatentVideo: @@ -75,8 +76,53 @@ class CosmosImageToVideoLatent: out_latent["noise_mask"] = mask.repeat((batch_size, ) + (1,) * (mask.ndim - 1)) return (out_latent,) +class CosmosPredict2ImageToVideoLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": {"vae": ("VAE", ), + "width": ("INT", {"default": 848, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "length": ("INT", {"default": 93, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + }, + "optional": {"start_image": ("IMAGE", ), + "end_image": ("IMAGE", ), + }} + + + RETURN_TYPES = ("LATENT",) + FUNCTION = "encode" + + CATEGORY = "conditioning/inpaint" + + def encode(self, vae, width, height, length, batch_size, start_image=None, end_image=None): + latent = torch.zeros([1, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + if start_image is None and end_image is None: + out_latent = {} + out_latent["samples"] = latent + return (out_latent,) + + mask = torch.ones([latent.shape[0], 1, ((length - 1) // 4) + 1, latent.shape[-2], latent.shape[-1]], device=comfy.model_management.intermediate_device()) + + if start_image is not None: + latent_temp = vae_encode_with_padding(vae, start_image, width, height, length, padding=1) + latent[:, :, :latent_temp.shape[-3]] = latent_temp + mask[:, :, :latent_temp.shape[-3]] *= 0.0 + + if end_image is not None: + latent_temp = vae_encode_with_padding(vae, end_image, width, height, length, padding=0) + latent[:, :, -latent_temp.shape[-3]:] = latent_temp + mask[:, :, -latent_temp.shape[-3]:] *= 0.0 + + out_latent = {} + latent_format = comfy.latent_formats.Wan21() + latent = latent_format.process_out(latent) * mask + latent * (1.0 - mask) + out_latent["samples"] = latent.repeat((batch_size, ) + (1,) * (latent.ndim - 1)) + out_latent["noise_mask"] = mask.repeat((batch_size, ) + (1,) * (mask.ndim - 1)) + return (out_latent,) NODE_CLASS_MAPPINGS = { "EmptyCosmosLatentVideo": EmptyCosmosLatentVideo, "CosmosImageToVideoLatent": CosmosImageToVideoLatent, + "CosmosPredict2ImageToVideoLatent": CosmosPredict2ImageToVideoLatent, } From ae75a084dfbd18495313d1cd87a724d507d0861b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 15 Jun 2025 00:44:59 -0700 Subject: [PATCH 0250/1073] SaveLora now saves in the same filename format as all the other nodes. (#8538) --- comfy_extras/nodes_train.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py index 17a03a1e0..fbff01010 100644 --- a/comfy_extras/nodes_train.py +++ b/comfy_extras/nodes_train.py @@ -552,6 +552,9 @@ class LoraModelLoader: class SaveLoRA: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + @classmethod def INPUT_TYPES(s): return { @@ -565,7 +568,7 @@ class SaveLoRA: "prefix": ( "STRING", { - "default": "trained_lora", + "default": "loras/ComfyUI_trained_lora", "tooltip": "The prefix to use for the saved LoRA file.", }, ), @@ -588,12 +591,13 @@ class SaveLoRA: OUTPUT_NODE = True def save(self, lora, prefix, steps=None): - date = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(prefix, self.output_dir) if steps is None: - output_file = f"models/loras/{prefix}_{date}_lora.safetensors" + output_checkpoint = f"{filename}_{counter:05}_.safetensors" else: - output_file = f"models/loras/{prefix}_{steps}_steps_{date}_lora.safetensors" - safetensors.torch.save_file(lora, output_file) + output_checkpoint = f"{filename}_{steps}_steps_{counter:05}_.safetensors" + output_checkpoint = os.path.join(full_output_folder, output_checkpoint) + safetensors.torch.save_file(lora, output_checkpoint) return {} From 7ea79ebb9d42730bace355bf8dc6adfa904d8c74 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 15 Jun 2025 09:21:25 -0700 Subject: [PATCH 0251/1073] Add correct eps to ltxv rmsnorm. (#8542) --- comfy/ldm/lightricks/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/lightricks/model.py b/comfy/ldm/lightricks/model.py index 056e101a4..ad9a7daea 100644 --- a/comfy/ldm/lightricks/model.py +++ b/comfy/ldm/lightricks/model.py @@ -261,8 +261,8 @@ class CrossAttention(nn.Module): self.heads = heads self.dim_head = dim_head - self.q_norm = operations.RMSNorm(inner_dim, dtype=dtype, device=device) - self.k_norm = operations.RMSNorm(inner_dim, dtype=dtype, device=device) + self.q_norm = operations.RMSNorm(inner_dim, eps=1e-5, dtype=dtype, device=device) + self.k_norm = operations.RMSNorm(inner_dim, eps=1e-5, dtype=dtype, device=device) self.to_q = operations.Linear(query_dim, inner_dim, bias=True, dtype=dtype, device=device) self.to_k = operations.Linear(context_dim, inner_dim, bias=True, dtype=dtype, device=device) From e1c6dc720e01cc9f33adef388361a001297dca43 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 16 Jun 2025 10:43:52 -0700 Subject: [PATCH 0252/1073] Allow setting min_length with tokenizer_data. (#8547) --- comfy/sd1_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index ac61babe9..1b69a4103 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -462,7 +462,7 @@ class SDTokenizer: tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer") self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, **tokenizer_args) self.max_length = tokenizer_data.get("{}_max_length".format(embedding_key), max_length) - self.min_length = min_length + self.min_length = tokenizer_data.get("{}_min_length".format(embedding_key), min_length) self.end_token = None self.min_padding = min_padding From 8e81c507d2124b6c04993e3dbe3df9f40573f814 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Tue, 17 Jun 2025 02:47:10 +0800 Subject: [PATCH 0253/1073] Multistep DPM++ SDE samplers for RF (#8541) Include alpha in sampling and minor refactoring --- comfy/k_diffusion/sampling.py | 50 ++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 16 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index a8fd98493..8030048fc 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -781,6 +781,7 @@ def sample_dpmpp_2m(model, x, sigmas, extra_args=None, callback=None, disable=No old_denoised = denoised return x + @torch.no_grad() def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'): """DPM-Solver++(2M) SDE.""" @@ -796,9 +797,12 @@ def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disabl noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler s_in = x.new_ones([x.shape[0]]) + model_sampling = model.inner_model.model_patcher.get_model_object('model_sampling') + lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling) + sigmas = offset_first_sigma_for_snr(sigmas, model_sampling) + old_denoised = None - h_last = None - h = None + h, h_last = None, None for i in trange(len(sigmas) - 1, disable=disable): denoised = model(x, sigmas[i] * s_in, **extra_args) @@ -809,26 +813,29 @@ def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disabl x = denoised else: # DPM-Solver++(2M) SDE - t, s = -sigmas[i].log(), -sigmas[i + 1].log() - h = s - t - eta_h = eta * h + lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1]) + h = lambda_t - lambda_s + h_eta = h * (eta + 1) - x = sigmas[i + 1] / sigmas[i] * (-eta_h).exp() * x + (-h - eta_h).expm1().neg() * denoised + alpha_t = sigmas[i + 1] * lambda_t.exp() + + x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x + alpha_t * (-h_eta).expm1().neg() * denoised if old_denoised is not None: r = h_last / h if solver_type == 'heun': - x = x + ((-h - eta_h).expm1().neg() / (-h - eta_h) + 1) * (1 / r) * (denoised - old_denoised) + x = x + alpha_t * ((-h_eta).expm1().neg() / (-h_eta) + 1) * (1 / r) * (denoised - old_denoised) elif solver_type == 'midpoint': - x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised) + x = x + 0.5 * alpha_t * (-h_eta).expm1().neg() * (1 / r) * (denoised - old_denoised) - if eta: - x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise + if eta > 0 and s_noise > 0: + x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise old_denoised = denoised h_last = h return x + @torch.no_grad() def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None): """DPM-Solver++(3M) SDE.""" @@ -842,6 +849,10 @@ def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disabl noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler s_in = x.new_ones([x.shape[0]]) + model_sampling = model.inner_model.model_patcher.get_model_object('model_sampling') + lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling) + sigmas = offset_first_sigma_for_snr(sigmas, model_sampling) + denoised_1, denoised_2 = None, None h, h_1, h_2 = None, None, None @@ -853,13 +864,16 @@ def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disabl # Denoising step x = denoised else: - t, s = -sigmas[i].log(), -sigmas[i + 1].log() - h = s - t + lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1]) + h = lambda_t - lambda_s h_eta = h * (eta + 1) - x = torch.exp(-h_eta) * x + (-h_eta).expm1().neg() * denoised + alpha_t = sigmas[i + 1] * lambda_t.exp() + + x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x + alpha_t * (-h_eta).expm1().neg() * denoised if h_2 is not None: + # DPM-Solver++(3M) SDE r0 = h_1 / h r1 = h_2 / h d1_0 = (denoised - denoised_1) / r0 @@ -868,20 +882,22 @@ def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disabl d2 = (d1_0 - d1_1) / (r0 + r1) phi_2 = h_eta.neg().expm1() / h_eta + 1 phi_3 = phi_2 / h_eta - 0.5 - x = x + phi_2 * d1 - phi_3 * d2 + x = x + (alpha_t * phi_2) * d1 - (alpha_t * phi_3) * d2 elif h_1 is not None: + # DPM-Solver++(2M) SDE r = h_1 / h d = (denoised - denoised_1) / r phi_2 = h_eta.neg().expm1() / h_eta + 1 - x = x + phi_2 * d + x = x + (alpha_t * phi_2) * d - if eta: + if eta > 0 and s_noise > 0: x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise denoised_1, denoised_2 = denoised, denoised_1 h_1, h_2 = h, h_1 return x + @torch.no_grad() def sample_dpmpp_3m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None): if len(sigmas) <= 1: @@ -891,6 +907,7 @@ def sample_dpmpp_3m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, di noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler return sample_dpmpp_3m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler) + @torch.no_grad() def sample_dpmpp_2m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'): if len(sigmas) <= 1: @@ -900,6 +917,7 @@ def sample_dpmpp_2m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, di noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type) + @torch.no_grad() def sample_dpmpp_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1 / 2): if len(sigmas) <= 1: From 483b3e62e00624fc52da8ad67e88f863abe975d2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 16 Jun 2025 23:34:46 -0400 Subject: [PATCH 0254/1073] ComfyUI version v0.3.41 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 6962c3661..fedd3466f 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.40" +__version__ = "0.3.41" diff --git a/pyproject.toml b/pyproject.toml index 03841bc94..c572ad4c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.40" +version = "0.3.41" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 4459a17e828eb8f96ffe07e3a08d68aff77b4da0 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 17 Jun 2025 02:18:01 -0700 Subject: [PATCH 0255/1073] Add Cosmos Predict2 to README. (#8562) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9a35ab7ea..0de4a6bb5 100644 --- a/README.md +++ b/README.md @@ -65,12 +65,13 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/) - [Lumina Image 2.0](https://comfyanonymous.github.io/ComfyUI_examples/lumina2/) - [HiDream](https://comfyanonymous.github.io/ComfyUI_examples/hidream/) + - [Cosmos Predict2](https://comfyanonymous.github.io/ComfyUI_examples/cosmos_predict2/) - Video Models - [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/) - [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/) - [LTX-Video](https://comfyanonymous.github.io/ComfyUI_examples/ltxv/) - [Hunyuan Video](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/) - - [Nvidia Cosmos](https://comfyanonymous.github.io/ComfyUI_examples/cosmos/) + - [Nvidia Cosmos](https://comfyanonymous.github.io/ComfyUI_examples/cosmos/) and [Cosmos Predict2](https://comfyanonymous.github.io/ComfyUI_examples/cosmos_predict2/) - [Wan 2.1](https://comfyanonymous.github.io/ComfyUI_examples/wan/) - Audio Models - [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/) From cd88f709ab5fbe8fec0d2b242691fef826ba038a Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 17 Jun 2025 19:11:59 +0800 Subject: [PATCH 0256/1073] Update template version (#8563) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 336ec9d57..910634d87 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.21.7 -comfyui-workflow-templates==0.1.28 +comfyui-workflow-templates==0.1.29 comfyui-embedded-docs==0.2.2 torch torchsde From d7430c529a586ba4005bc46ba10ce02f71dba0d8 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Tue, 17 Jun 2025 15:58:28 -0700 Subject: [PATCH 0257/1073] Update frontend to 1.22.2 (#8567) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 910634d87..15fde2849 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.21.7 +comfyui-frontend-package==1.22.2 comfyui-workflow-templates==0.1.29 comfyui-embedded-docs==0.2.2 torch From e9e9a031a88f9cc4845b3322d4bae771d2854472 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 18 Jun 2025 03:55:21 -0700 Subject: [PATCH 0258/1073] Show a better error when the workflow OOMs. (#8574) --- execution.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/execution.py b/execution.py index d0012afda..f6006fa12 100644 --- a/execution.py +++ b/execution.py @@ -429,17 +429,20 @@ def execute(server, dynprompt, caches, current_item, extra_data, executed, promp logging.error(f"!!! Exception during processing !!! {ex}") logging.error(traceback.format_exc()) + tips = "" + + if isinstance(ex, comfy.model_management.OOM_EXCEPTION): + tips = "This error means you ran out of memory on your GPU.\n\nTIPS: If the workflow worked before you might have accidentally set the batch_size to a large number." + logging.error("Got an OOM, unloading all loaded models.") + comfy.model_management.unload_all_models() error_details = { "node_id": real_node_id, - "exception_message": str(ex), + "exception_message": "{}\n{}".format(ex, tips), "exception_type": exception_type, "traceback": traceback.format_tb(tb), "current_inputs": input_data_formatted } - if isinstance(ex, comfy.model_management.OOM_EXCEPTION): - logging.error("Got an OOM, unloading all loaded models.") - comfy.model_management.unload_all_models() return (ExecutionResult.FAILURE, error_details, ex) From 5b12b55e32aa2aa8fd47d545265a974d3b01ac7c Mon Sep 17 00:00:00 2001 From: coderfromthenorth93 Date: Wed, 18 Jun 2025 15:12:29 -0400 Subject: [PATCH 0259/1073] Add new fields to the config types (#8507) --- comfy_config/config_parser.py | 55 +++++++++++++++++++++++++++++++++++ comfy_config/types.py | 6 +++- 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/comfy_config/config_parser.py b/comfy_config/config_parser.py index a9cbd94dd..8da7bd901 100644 --- a/comfy_config/config_parser.py +++ b/comfy_config/config_parser.py @@ -11,6 +11,43 @@ from comfy_config.types import ( PyProjectSettings ) +def validate_and_extract_os_classifiers(classifiers: list) -> list: + os_classifiers = [c for c in classifiers if c.startswith("Operating System :: ")] + if not os_classifiers: + return [] + + os_values = [c[len("Operating System :: ") :] for c in os_classifiers] + valid_os_prefixes = {"Microsoft", "POSIX", "MacOS", "OS Independent"} + + for os_value in os_values: + if not any(os_value.startswith(prefix) for prefix in valid_os_prefixes): + return [] + + return os_values + + +def validate_and_extract_accelerator_classifiers(classifiers: list) -> list: + accelerator_classifiers = [c for c in classifiers if c.startswith("Environment ::")] + if not accelerator_classifiers: + return [] + + accelerator_values = [c[len("Environment :: ") :] for c in accelerator_classifiers] + + valid_accelerators = { + "GPU :: NVIDIA CUDA", + "GPU :: AMD ROCm", + "GPU :: Intel Arc", + "NPU :: Huawei Ascend", + "GPU :: Apple Metal", + } + + for accelerator_value in accelerator_values: + if accelerator_value not in valid_accelerators: + return [] + + return accelerator_values + + """ Extract configuration from a custom node directory's pyproject.toml file or a Python file. @@ -78,6 +115,24 @@ def extract_node_configuration(path) -> Optional[PyProjectConfig]: tool_data = raw_settings.tool comfy_data = tool_data.get("comfy", {}) if tool_data else {} + dependencies = project_data.get("dependencies", []) + supported_comfyui_frontend_version = "" + for dep in dependencies: + if isinstance(dep, str) and dep.startswith("comfyui-frontend-package"): + supported_comfyui_frontend_version = dep.removeprefix("comfyui-frontend-package") + break + + supported_comfyui_version = comfy_data.get("requires-comfyui", "") + + classifiers = project_data.get('classifiers', []) + supported_os = validate_and_extract_os_classifiers(classifiers) + supported_accelerators = validate_and_extract_accelerator_classifiers(classifiers) + + project_data['supported_os'] = supported_os + project_data['supported_accelerators'] = supported_accelerators + project_data['supported_comfyui_frontend_version'] = supported_comfyui_frontend_version + project_data['supported_comfyui_version'] = supported_comfyui_version + return PyProjectConfig(project=project_data, tool_comfy=comfy_data) diff --git a/comfy_config/types.py b/comfy_config/types.py index 5222cc59b..59448466b 100644 --- a/comfy_config/types.py +++ b/comfy_config/types.py @@ -51,7 +51,7 @@ class ComfyConfig(BaseModel): models: List[Model] = Field(default_factory=list, alias="Models") includes: List[str] = Field(default_factory=list) web: Optional[str] = None - + banner_url: str = "" class License(BaseModel): file: str = "" @@ -66,6 +66,10 @@ class ProjectConfig(BaseModel): dependencies: List[str] = Field(default_factory=list) license: License = Field(default_factory=License) urls: URLs = Field(default_factory=URLs) + supported_os: List[str] = Field(default_factory=list) + supported_accelerators: List[str] = Field(default_factory=list) + supported_comfyui_version: str = "" + supported_comfyui_frontend_version: str = "" @field_validator('license', mode='before') @classmethod From 91d40086db7956aabedef5cfcae0f6821529a3d1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 19 Jun 2025 08:04:52 -0700 Subject: [PATCH 0260/1073] Fix pytorch warning. (#8593) --- comfy/ldm/modules/sub_quadratic_attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/modules/sub_quadratic_attention.py b/comfy/ldm/modules/sub_quadratic_attention.py index 21c72373f..fab145f1c 100644 --- a/comfy/ldm/modules/sub_quadratic_attention.py +++ b/comfy/ldm/modules/sub_quadratic_attention.py @@ -31,7 +31,7 @@ def dynamic_slice( starts: List[int], sizes: List[int], ) -> Tensor: - slicing = [slice(start, start + size) for start, size in zip(starts, sizes)] + slicing = tuple(slice(start, start + size) for start, size in zip(starts, sizes)) return x[slicing] class AttnChunk(NamedTuple): From 7e9267fa77c93355dd3bdf05b6cb8f02d41af5ae Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 19 Jun 2025 15:50:05 -0700 Subject: [PATCH 0261/1073] Make flux controlnet work with sd3 text enc. (#8599) --- comfy/ldm/flux/controlnet.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/ldm/flux/controlnet.py b/comfy/ldm/flux/controlnet.py index dbd2a47c0..7dcf82bbf 100644 --- a/comfy/ldm/flux/controlnet.py +++ b/comfy/ldm/flux/controlnet.py @@ -123,6 +123,8 @@ class ControlNetFlux(Flux): if y is None: y = torch.zeros((img.shape[0], self.params.vec_in_dim), device=img.device, dtype=img.dtype) + else: + y = y[:, :self.params.vec_in_dim] # running on sequences img img = self.img_in(img) From f7fb1937127a8ed011b99424598c9ab1e8565112 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 20 Jun 2025 02:37:32 -0700 Subject: [PATCH 0262/1073] Small flux optimization. (#8611) --- comfy/ldm/flux/layers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/flux/layers.py b/comfy/ldm/flux/layers.py index 76af967e6..113eb2096 100644 --- a/comfy/ldm/flux/layers.py +++ b/comfy/ldm/flux/layers.py @@ -118,7 +118,7 @@ class Modulation(nn.Module): def apply_mod(tensor, m_mult, m_add=None, modulation_dims=None): if modulation_dims is None: if m_add is not None: - return tensor * m_mult + m_add + return torch.addcmul(m_add, tensor, m_mult) else: return tensor * m_mult else: From 31ca603ccbc45ab4db1279aa485c715b96b2aae8 Mon Sep 17 00:00:00 2001 From: Lucas - BLOCK33 <95554128+tonynoce@users.noreply.github.com> Date: Sat, 21 Jun 2025 00:04:55 -0300 Subject: [PATCH 0263/1073] Improve the log time function for 10 minute + renders (#6207) * modified: main.py * Update main.py --- main.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/main.py b/main.py index c8c4194d4..79a652578 100644 --- a/main.py +++ b/main.py @@ -185,7 +185,13 @@ def prompt_worker(q, server_instance): current_time = time.perf_counter() execution_time = current_time - execution_start_time - logging.info("Prompt executed in {:.2f} seconds".format(execution_time)) + + # Log Time in a more readable way after 10 minutes + if execution_time > 600: + execution_time = time.strftime("%H:%M:%S", time.gmtime(execution_time)) + logging.info(f"Prompt executed in {execution_time}") + else: + logging.info("Prompt executed in {:.2f} seconds".format(execution_time)) flags = q.get_flags() free_memory = flags.get("free_memory", False) From 1883e70b4374a3317e0463a0bef292fc21182bad Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 21 Jun 2025 00:30:39 -0700 Subject: [PATCH 0264/1073] Fix exception when using a noise mask with cosmos predict2. (#8621) * Fix exception when using a noise mask with cosmos predict2. * Fix ruff. --- comfy/model_base.py | 2 ++ main.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index cb7689e84..75ec42699 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1024,6 +1024,8 @@ class CosmosPredict2(BaseModel): def process_timestep(self, timestep, x, denoise_mask=None, **kwargs): if denoise_mask is None: return timestep + if denoise_mask.ndim <= 4: + return timestep condition_video_mask_B_1_T_1_1 = denoise_mask.mean(dim=[1, 3, 4], keepdim=True) c_noise_B_1_T_1_1 = 0.0 * (1.0 - condition_video_mask_B_1_T_1_1) + timestep.reshape(timestep.shape[0], 1, 1, 1, 1) * condition_video_mask_B_1_T_1_1 out = c_noise_B_1_T_1_1.squeeze(dim=[1, 3, 4]) diff --git a/main.py b/main.py index 79a652578..0d7c97dcb 100644 --- a/main.py +++ b/main.py @@ -185,7 +185,7 @@ def prompt_worker(q, server_instance): current_time = time.perf_counter() execution_time = current_time - execution_start_time - + # Log Time in a more readable way after 10 minutes if execution_time > 600: execution_time = time.strftime("%H:%M:%S", time.gmtime(execution_time)) From 78f79266a9be9d9316e7feb6d6a0ed6ac616547d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 21 Jun 2025 21:19:41 -0700 Subject: [PATCH 0265/1073] Allow padding in ImageStitch node to be white. (#8631) --- comfy_extras/nodes_images.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index b1e0d4666..8d5fcdb85 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -304,10 +304,23 @@ Optional spacing can be added between images. image2.movedim(-1, 1), target_w, target_h, "lanczos", "disabled" ).movedim(1, -1) + color_map = { + "white": 1.0, + "black": 0.0, + "red": (1.0, 0.0, 0.0), + "green": (0.0, 1.0, 0.0), + "blue": (0.0, 0.0, 1.0), + } + + color_val = color_map[spacing_color] + # When not matching sizes, pad to align non-concat dimensions if not match_image_size: h1, w1 = image1.shape[1:3] h2, w2 = image2.shape[1:3] + pad_value = 0.0 + if not isinstance(color_val, tuple): + pad_value = color_val if direction in ["left", "right"]: # For horizontal concat, pad heights to match @@ -316,11 +329,11 @@ Optional spacing can be added between images. if h1 < target_h: pad_h = target_h - h1 pad_top, pad_bottom = pad_h // 2, pad_h - pad_h // 2 - image1 = torch.nn.functional.pad(image1, (0, 0, 0, 0, pad_top, pad_bottom), mode='constant', value=0.0) + image1 = torch.nn.functional.pad(image1, (0, 0, 0, 0, pad_top, pad_bottom), mode='constant', value=pad_value) if h2 < target_h: pad_h = target_h - h2 pad_top, pad_bottom = pad_h // 2, pad_h - pad_h // 2 - image2 = torch.nn.functional.pad(image2, (0, 0, 0, 0, pad_top, pad_bottom), mode='constant', value=0.0) + image2 = torch.nn.functional.pad(image2, (0, 0, 0, 0, pad_top, pad_bottom), mode='constant', value=pad_value) else: # up, down # For vertical concat, pad widths to match if w1 != w2: @@ -328,11 +341,11 @@ Optional spacing can be added between images. if w1 < target_w: pad_w = target_w - w1 pad_left, pad_right = pad_w // 2, pad_w - pad_w // 2 - image1 = torch.nn.functional.pad(image1, (0, 0, pad_left, pad_right), mode='constant', value=0.0) + image1 = torch.nn.functional.pad(image1, (0, 0, pad_left, pad_right), mode='constant', value=pad_value) if w2 < target_w: pad_w = target_w - w2 pad_left, pad_right = pad_w // 2, pad_w - pad_w // 2 - image2 = torch.nn.functional.pad(image2, (0, 0, pad_left, pad_right), mode='constant', value=0.0) + image2 = torch.nn.functional.pad(image2, (0, 0, pad_left, pad_right), mode='constant', value=pad_value) # Ensure same number of channels if image1.shape[-1] != image2.shape[-1]: @@ -366,15 +379,6 @@ Optional spacing can be added between images. if spacing_width > 0: spacing_width = spacing_width + (spacing_width % 2) # Ensure even - color_map = { - "white": 1.0, - "black": 0.0, - "red": (1.0, 0.0, 0.0), - "green": (0.0, 1.0, 0.0), - "blue": (0.0, 0.0, 1.0), - } - color_val = color_map[spacing_color] - if direction in ["left", "right"]: spacing_shape = ( image1.shape[0], From ae0e7c4dff827dae57d5b4c1d21b135036f42d64 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 22 Jun 2025 14:59:31 -0700 Subject: [PATCH 0266/1073] Resize and pad image node. (#8636) --- comfy_extras/nodes_images.py | 57 ++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 8d5fcdb85..ed54ccc57 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -414,6 +414,62 @@ Optional spacing can be added between images. concat_dim = 2 if direction in ["left", "right"] else 1 return (torch.cat(images, dim=concat_dim),) +class ResizeAndPadImage: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "target_width": ("INT", { + "default": 512, + "min": 1, + "max": MAX_RESOLUTION, + "step": 1 + }), + "target_height": ("INT", { + "default": 512, + "min": 1, + "max": MAX_RESOLUTION, + "step": 1 + }), + "padding_color": (["white", "black"],), + "interpolation": (["area", "bicubic", "nearest-exact", "bilinear", "lanczos"],), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "resize_and_pad" + CATEGORY = "image/transform" + + def resize_and_pad(self, image, target_width, target_height, padding_color, interpolation): + batch_size, orig_height, orig_width, channels = image.shape + + scale_w = target_width / orig_width + scale_h = target_height / orig_height + scale = min(scale_w, scale_h) + + new_width = int(orig_width * scale) + new_height = int(orig_height * scale) + + image_permuted = image.permute(0, 3, 1, 2) + + resized = comfy.utils.common_upscale(image_permuted, new_width, new_height, interpolation, "disabled") + + pad_value = 0.0 if padding_color == "black" else 1.0 + padded = torch.full( + (batch_size, channels, target_height, target_width), + pad_value, + dtype=image.dtype, + device=image.device + ) + + y_offset = (target_height - new_height) // 2 + x_offset = (target_width - new_width) // 2 + + padded[:, :, y_offset:y_offset + new_height, x_offset:x_offset + new_width] = resized + + output = padded.permute(0, 2, 3, 1) + return (output,) class SaveSVGNode: """ @@ -536,5 +592,6 @@ NODE_CLASS_MAPPINGS = { "SaveAnimatedPNG": SaveAnimatedPNG, "SaveSVGNode": SaveSVGNode, "ImageStitch": ImageStitch, + "ResizeAndPadImage": ResizeAndPadImage, "GetImageSize": GetImageSize, } From dd94416db24b93f81e9f4bd2aa91d1588041b489 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 23 Jun 2025 11:04:49 -0700 Subject: [PATCH 0267/1073] Indicate that directml is not recommended in the README. (#8644) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 0de4a6bb5..6366280e7 100644 --- a/README.md +++ b/README.md @@ -273,6 +273,8 @@ You can install ComfyUI in Apple Mac silicon (M1 or M2) with any recent macOS ve #### DirectML (AMD Cards on Windows) +This is very badly supported and is not recommended. There are some unofficial builds of pytorch ROCm on windows that exist that will give you a much better experience than this. This readme will be updated once official pytorch ROCm builds for windows come out. + ```pip install torch-directml``` Then you can launch ComfyUI with: ```python main.py --directml``` #### Ascend NPUs From bd9f166c1281e9da622a322d046897bbeee82d1d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 24 Jun 2025 02:17:16 -0700 Subject: [PATCH 0268/1073] Cosmos predict2 model merging nodes. (#8647) --- .../nodes_model_merging_model_specific.py | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/comfy_extras/nodes_model_merging_model_specific.py b/comfy_extras/nodes_model_merging_model_specific.py index dc3411947..2c93cd84f 100644 --- a/comfy_extras/nodes_model_merging_model_specific.py +++ b/comfy_extras/nodes_model_merging_model_specific.py @@ -268,6 +268,52 @@ class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks): return {"required": arg_dict} +class ModelMergeCosmosPredict2_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks): + CATEGORY = "advanced/model_merging/model_specific" + + @classmethod + def INPUT_TYPES(s): + arg_dict = { "model1": ("MODEL",), + "model2": ("MODEL",)} + + argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}) + + arg_dict["pos_embedder."] = argument + arg_dict["x_embedder."] = argument + arg_dict["t_embedder."] = argument + arg_dict["t_embedding_norm."] = argument + + + for i in range(28): + arg_dict["blocks.{}.".format(i)] = argument + + arg_dict["final_layer."] = argument + + return {"required": arg_dict} + +class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBlocks): + CATEGORY = "advanced/model_merging/model_specific" + + @classmethod + def INPUT_TYPES(s): + arg_dict = { "model1": ("MODEL",), + "model2": ("MODEL",)} + + argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}) + + arg_dict["pos_embedder."] = argument + arg_dict["x_embedder."] = argument + arg_dict["t_embedder."] = argument + arg_dict["t_embedding_norm."] = argument + + + for i in range(36): + arg_dict["blocks.{}.".format(i)] = argument + + arg_dict["final_layer."] = argument + + return {"required": arg_dict} + NODE_CLASS_MAPPINGS = { "ModelMergeSD1": ModelMergeSD1, "ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks @@ -281,4 +327,6 @@ NODE_CLASS_MAPPINGS = { "ModelMergeCosmos7B": ModelMergeCosmos7B, "ModelMergeCosmos14B": ModelMergeCosmos14B, "ModelMergeWAN2_1": ModelMergeWAN2_1, + "ModelMergeCosmosPredict2_2B": ModelMergeCosmosPredict2_2B, + "ModelMergeCosmosPredict2_14B": ModelMergeCosmosPredict2_14B, } From 8042eb20c6fbcff54921ede9e983e1e848a65794 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Wed, 25 Jun 2025 02:59:09 +0800 Subject: [PATCH 0269/1073] Singlestep DPM++ SDE for RF (#8627) Refactor the algorithm, and apply alpha scaling. --- comfy/k_diffusion/sampling.py | 48 ++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 8030048fc..739468872 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -710,6 +710,7 @@ def sample_dpmpp_2s_ancestral_RF(model, x, sigmas, extra_args=None, callback=Non # logged_x = torch.cat((logged_x, x.unsqueeze(0)), dim=0) return x + @torch.no_grad() def sample_dpmpp_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1 / 2): """DPM-Solver++ (stochastic).""" @@ -721,38 +722,49 @@ def sample_dpmpp_sde(model, x, sigmas, extra_args=None, callback=None, disable=N seed = extra_args.get("seed", None) noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler s_in = x.new_ones([x.shape[0]]) - sigma_fn = lambda t: t.neg().exp() - t_fn = lambda sigma: sigma.log().neg() + + model_sampling = model.inner_model.model_patcher.get_model_object('model_sampling') + sigma_fn = partial(half_log_snr_to_sigma, model_sampling=model_sampling) + lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling) + sigmas = offset_first_sigma_for_snr(sigmas, model_sampling) for i in trange(len(sigmas) - 1, disable=disable): denoised = model(x, sigmas[i] * s_in, **extra_args) if callback is not None: callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) if sigmas[i + 1] == 0: - # Euler method - d = to_d(x, sigmas[i], denoised) - dt = sigmas[i + 1] - sigmas[i] - x = x + d * dt + # Denoising step + x = denoised else: # DPM-Solver++ - t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1]) - h = t_next - t - s = t + h * r + lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1]) + h = lambda_t - lambda_s + lambda_s_1 = lambda_s + r * h fac = 1 / (2 * r) + sigma_s_1 = sigma_fn(lambda_s_1) + + alpha_s = sigmas[i] * lambda_s.exp() + alpha_s_1 = sigma_s_1 * lambda_s_1.exp() + alpha_t = sigmas[i + 1] * lambda_t.exp() + # Step 1 - sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(s), eta) - s_ = t_fn(sd) - x_2 = (sigma_fn(s_) / sigma_fn(t)) * x - (t - s_).expm1() * denoised - x_2 = x_2 + noise_sampler(sigma_fn(t), sigma_fn(s)) * s_noise * su - denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args) + sd, su = get_ancestral_step(lambda_s.neg().exp(), lambda_s_1.neg().exp(), eta) + lambda_s_1_ = sd.log().neg() + h_ = lambda_s_1_ - lambda_s + x_2 = (alpha_s_1 / alpha_s) * (-h_).exp() * x - alpha_s_1 * (-h_).expm1() * denoised + if eta > 0 and s_noise > 0: + x_2 = x_2 + alpha_s_1 * noise_sampler(sigmas[i], sigma_s_1) * s_noise * su + denoised_2 = model(x_2, sigma_s_1 * s_in, **extra_args) # Step 2 - sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(t_next), eta) - t_next_ = t_fn(sd) + sd, su = get_ancestral_step(lambda_s.neg().exp(), lambda_t.neg().exp(), eta) + lambda_t_ = sd.log().neg() + h_ = lambda_t_ - lambda_s denoised_d = (1 - fac) * denoised + fac * denoised_2 - x = (sigma_fn(t_next_) / sigma_fn(t)) * x - (t - t_next_).expm1() * denoised_d - x = x + noise_sampler(sigma_fn(t), sigma_fn(t_next)) * s_noise * su + x = (alpha_t / alpha_s) * (-h_).exp() * x - alpha_t * (-h_).expm1() * denoised_d + if eta > 0 and s_noise > 0: + x = x + alpha_t * noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * su return x From 7a13f74220dcfcf6d8a6e7c1984f2c718cd9eda8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:52:34 -0700 Subject: [PATCH 0270/1073] unet -> diffusion model (#8659) --- comfy/sd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index cd13ab5f0..c32cf6b6b 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -1160,7 +1160,7 @@ def load_diffusion_model_state_dict(sd, model_options={}): model.load_model_weights(new_sd, "") left_over = sd.keys() if len(left_over) > 0: - logging.info("left over keys in unet: {}".format(left_over)) + logging.info("left over keys in diffusion model: {}".format(left_over)) return comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=offload_device) @@ -1168,7 +1168,7 @@ def load_diffusion_model(unet_path, model_options={}): sd = comfy.utils.load_torch_file(unet_path) model = load_diffusion_model_state_dict(sd, model_options=model_options) if model is None: - logging.error("ERROR UNSUPPORTED UNET {}".format(unet_path)) + logging.error("ERROR UNSUPPORTED DIFFUSION MODEL {}".format(unet_path)) raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path)) return model From ec70ed6aea05c3a380ad0e15d8221c7bf61be7bb Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 25 Jun 2025 16:35:57 -0700 Subject: [PATCH 0271/1073] Omnigen2 model implementation. (#8669) --- comfy/ldm/omnigen/omnigen2.py | 469 + comfy/model_base.py | 31 + comfy/model_detection.py | 20 + comfy/sd.py | 8 + comfy/sd1_clip.py | 3 +- comfy/supported_models.py | 33 +- comfy/text_encoders/llama.py | 33 +- comfy/text_encoders/omnigen2.py | 44 + .../text_encoders/qwen25_tokenizer/merges.txt | 151388 +++++++++++++++ .../qwen25_tokenizer/tokenizer_config.json | 241 + .../text_encoders/qwen25_tokenizer/vocab.json | 1 + comfy_extras/nodes_edit_model.py | 26 + nodes.py | 5 +- 13 files changed, 152295 insertions(+), 7 deletions(-) create mode 100644 comfy/ldm/omnigen/omnigen2.py create mode 100644 comfy/text_encoders/omnigen2.py create mode 100644 comfy/text_encoders/qwen25_tokenizer/merges.txt create mode 100644 comfy/text_encoders/qwen25_tokenizer/tokenizer_config.json create mode 100644 comfy/text_encoders/qwen25_tokenizer/vocab.json create mode 100644 comfy_extras/nodes_edit_model.py diff --git a/comfy/ldm/omnigen/omnigen2.py b/comfy/ldm/omnigen/omnigen2.py new file mode 100644 index 000000000..4884449f8 --- /dev/null +++ b/comfy/ldm/omnigen/omnigen2.py @@ -0,0 +1,469 @@ +# Original code: https://github.com/VectorSpaceLab/OmniGen2 + +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange, repeat +from comfy.ldm.lightricks.model import Timesteps +from comfy.ldm.flux.layers import EmbedND +from comfy.ldm.modules.attention import optimized_attention_masked +import comfy.model_management +import comfy.ldm.common_dit + + +def apply_rotary_emb(x, freqs_cis): + if x.shape[1] == 0: + return x + + t_ = x.reshape(*x.shape[:-1], -1, 1, 2) + t_out = freqs_cis[..., 0] * t_[..., 0] + freqs_cis[..., 1] * t_[..., 1] + return t_out.reshape(*x.shape).to(dtype=x.dtype) + + +def swiglu(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + return F.silu(x) * y + + +class TimestepEmbedding(nn.Module): + def __init__(self, in_channels: int, time_embed_dim: int, dtype=None, device=None, operations=None): + super().__init__() + self.linear_1 = operations.Linear(in_channels, time_embed_dim, dtype=dtype, device=device) + self.act = nn.SiLU() + self.linear_2 = operations.Linear(time_embed_dim, time_embed_dim, dtype=dtype, device=device) + + def forward(self, sample: torch.Tensor) -> torch.Tensor: + sample = self.linear_1(sample) + sample = self.act(sample) + sample = self.linear_2(sample) + return sample + + +class LuminaRMSNormZero(nn.Module): + def __init__(self, embedding_dim: int, norm_eps: float = 1e-5, dtype=None, device=None, operations=None): + super().__init__() + self.silu = nn.SiLU() + self.linear = operations.Linear(min(embedding_dim, 1024), 4 * embedding_dim, dtype=dtype, device=device) + self.norm = operations.RMSNorm(embedding_dim, eps=norm_eps, dtype=dtype, device=device) + + def forward(self, x: torch.Tensor, emb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + emb = self.linear(self.silu(emb)) + scale_msa, gate_msa, scale_mlp, gate_mlp = emb.chunk(4, dim=1) + x = self.norm(x) * (1 + scale_msa[:, None]) + return x, gate_msa, scale_mlp, gate_mlp + + +class LuminaLayerNormContinuous(nn.Module): + def __init__(self, embedding_dim: int, conditioning_embedding_dim: int, elementwise_affine: bool = False, eps: float = 1e-6, out_dim: Optional[int] = None, dtype=None, device=None, operations=None): + super().__init__() + self.silu = nn.SiLU() + self.linear_1 = operations.Linear(conditioning_embedding_dim, embedding_dim, dtype=dtype, device=device) + self.norm = operations.LayerNorm(embedding_dim, eps, elementwise_affine, dtype=dtype, device=device) + self.linear_2 = operations.Linear(embedding_dim, out_dim, bias=True, dtype=dtype, device=device) if out_dim is not None else None + + def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: + emb = self.linear_1(self.silu(conditioning_embedding).to(x.dtype)) + x = self.norm(x) * (1 + emb)[:, None, :] + if self.linear_2 is not None: + x = self.linear_2(x) + return x + + +class LuminaFeedForward(nn.Module): + def __init__(self, dim: int, inner_dim: int, multiple_of: int = 256, dtype=None, device=None, operations=None): + super().__init__() + inner_dim = multiple_of * ((inner_dim + multiple_of - 1) // multiple_of) + self.linear_1 = operations.Linear(dim, inner_dim, bias=False, dtype=dtype, device=device) + self.linear_2 = operations.Linear(inner_dim, dim, bias=False, dtype=dtype, device=device) + self.linear_3 = operations.Linear(dim, inner_dim, bias=False, dtype=dtype, device=device) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + h1, h2 = self.linear_1(x), self.linear_3(x) + return self.linear_2(swiglu(h1, h2)) + + +class Lumina2CombinedTimestepCaptionEmbedding(nn.Module): + def __init__(self, hidden_size: int = 4096, text_feat_dim: int = 2048, frequency_embedding_size: int = 256, norm_eps: float = 1e-5, timestep_scale: float = 1.0, dtype=None, device=None, operations=None): + super().__init__() + self.time_proj = Timesteps(num_channels=frequency_embedding_size, flip_sin_to_cos=True, downscale_freq_shift=0.0, scale=timestep_scale) + self.timestep_embedder = TimestepEmbedding(in_channels=frequency_embedding_size, time_embed_dim=min(hidden_size, 1024), dtype=dtype, device=device, operations=operations) + self.caption_embedder = nn.Sequential( + operations.RMSNorm(text_feat_dim, eps=norm_eps, dtype=dtype, device=device), + operations.Linear(text_feat_dim, hidden_size, bias=True, dtype=dtype, device=device), + ) + + def forward(self, timestep: torch.Tensor, text_hidden_states: torch.Tensor, dtype: torch.dtype) -> Tuple[torch.Tensor, torch.Tensor]: + timestep_proj = self.time_proj(timestep).to(dtype=dtype) + time_embed = self.timestep_embedder(timestep_proj) + caption_embed = self.caption_embedder(text_hidden_states) + return time_embed, caption_embed + + +class Attention(nn.Module): + def __init__(self, query_dim: int, dim_head: int, heads: int, kv_heads: int, eps: float = 1e-5, bias: bool = False, dtype=None, device=None, operations=None): + super().__init__() + self.heads = heads + self.kv_heads = kv_heads + self.dim_head = dim_head + self.scale = dim_head ** -0.5 + + self.to_q = operations.Linear(query_dim, heads * dim_head, bias=bias, dtype=dtype, device=device) + self.to_k = operations.Linear(query_dim, kv_heads * dim_head, bias=bias, dtype=dtype, device=device) + self.to_v = operations.Linear(query_dim, kv_heads * dim_head, bias=bias, dtype=dtype, device=device) + + self.norm_q = operations.RMSNorm(dim_head, eps=eps, dtype=dtype, device=device) + self.norm_k = operations.RMSNorm(dim_head, eps=eps, dtype=dtype, device=device) + + self.to_out = nn.Sequential( + operations.Linear(heads * dim_head, query_dim, bias=bias, dtype=dtype, device=device), + nn.Dropout(0.0) + ) + + def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None) -> torch.Tensor: + batch_size, sequence_length, _ = hidden_states.shape + + query = self.to_q(hidden_states) + key = self.to_k(encoder_hidden_states) + value = self.to_v(encoder_hidden_states) + + query = query.view(batch_size, -1, self.heads, self.dim_head) + key = key.view(batch_size, -1, self.kv_heads, self.dim_head) + value = value.view(batch_size, -1, self.kv_heads, self.dim_head) + + query = self.norm_q(query) + key = self.norm_k(key) + + if image_rotary_emb is not None: + query = apply_rotary_emb(query, image_rotary_emb) + key = apply_rotary_emb(key, image_rotary_emb) + + query = query.transpose(1, 2) + key = key.transpose(1, 2) + value = value.transpose(1, 2) + + if self.kv_heads < self.heads: + key = key.repeat_interleave(self.heads // self.kv_heads, dim=1) + value = value.repeat_interleave(self.heads // self.kv_heads, dim=1) + + hidden_states = optimized_attention_masked(query, key, value, self.heads, attention_mask, skip_reshape=True) + hidden_states = self.to_out[0](hidden_states) + return hidden_states + + +class OmniGen2TransformerBlock(nn.Module): + def __init__(self, dim: int, num_attention_heads: int, num_kv_heads: int, multiple_of: int, ffn_dim_multiplier: float, norm_eps: float, modulation: bool = True, dtype=None, device=None, operations=None): + super().__init__() + self.modulation = modulation + + self.attn = Attention( + query_dim=dim, + dim_head=dim // num_attention_heads, + heads=num_attention_heads, + kv_heads=num_kv_heads, + eps=1e-5, + bias=False, + dtype=dtype, device=device, operations=operations, + ) + + self.feed_forward = LuminaFeedForward( + dim=dim, + inner_dim=4 * dim, + multiple_of=multiple_of, + dtype=dtype, device=device, operations=operations + ) + + if modulation: + self.norm1 = LuminaRMSNormZero(embedding_dim=dim, norm_eps=norm_eps, dtype=dtype, device=device, operations=operations) + else: + self.norm1 = operations.RMSNorm(dim, eps=norm_eps, dtype=dtype, device=device) + + self.ffn_norm1 = operations.RMSNorm(dim, eps=norm_eps, dtype=dtype, device=device) + self.norm2 = operations.RMSNorm(dim, eps=norm_eps, dtype=dtype, device=device) + self.ffn_norm2 = operations.RMSNorm(dim, eps=norm_eps, dtype=dtype, device=device) + + def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, image_rotary_emb: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: + if self.modulation: + norm_hidden_states, gate_msa, scale_mlp, gate_mlp = self.norm1(hidden_states, temb) + attn_output = self.attn(norm_hidden_states, norm_hidden_states, attention_mask, image_rotary_emb) + hidden_states = hidden_states + gate_msa.unsqueeze(1).tanh() * self.norm2(attn_output) + mlp_output = self.feed_forward(self.ffn_norm1(hidden_states) * (1 + scale_mlp.unsqueeze(1))) + hidden_states = hidden_states + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(mlp_output) + else: + norm_hidden_states = self.norm1(hidden_states) + attn_output = self.attn(norm_hidden_states, norm_hidden_states, attention_mask, image_rotary_emb) + hidden_states = hidden_states + self.norm2(attn_output) + mlp_output = self.feed_forward(self.ffn_norm1(hidden_states)) + hidden_states = hidden_states + self.ffn_norm2(mlp_output) + return hidden_states + + +class OmniGen2RotaryPosEmbed(nn.Module): + def __init__(self, theta: int, axes_dim: Tuple[int, int, int], axes_lens: Tuple[int, int, int] = (300, 512, 512), patch_size: int = 2): + super().__init__() + self.theta = theta + self.axes_dim = axes_dim + self.axes_lens = axes_lens + self.patch_size = patch_size + self.rope_embedder = EmbedND(dim=sum(axes_dim), theta=self.theta, axes_dim=axes_dim) + + def forward(self, batch_size, encoder_seq_len, l_effective_cap_len, l_effective_ref_img_len, l_effective_img_len, ref_img_sizes, img_sizes, device): + p = self.patch_size + + seq_lengths = [cap_len + sum(ref_img_len) + img_len for cap_len, ref_img_len, img_len in zip(l_effective_cap_len, l_effective_ref_img_len, l_effective_img_len)] + + max_seq_len = max(seq_lengths) + max_ref_img_len = max([sum(ref_img_len) for ref_img_len in l_effective_ref_img_len]) + max_img_len = max(l_effective_img_len) + + position_ids = torch.zeros(batch_size, max_seq_len, 3, dtype=torch.int32, device=device) + + for i, (cap_seq_len, seq_len) in enumerate(zip(l_effective_cap_len, seq_lengths)): + position_ids[i, :cap_seq_len] = repeat(torch.arange(cap_seq_len, dtype=torch.int32, device=device), "l -> l 3") + + pe_shift = cap_seq_len + pe_shift_len = cap_seq_len + + if ref_img_sizes[i] is not None: + for ref_img_size, ref_img_len in zip(ref_img_sizes[i], l_effective_ref_img_len[i]): + H, W = ref_img_size + ref_H_tokens, ref_W_tokens = H // p, W // p + + row_ids = repeat(torch.arange(ref_H_tokens, dtype=torch.int32, device=device), "h -> h w", w=ref_W_tokens).flatten() + col_ids = repeat(torch.arange(ref_W_tokens, dtype=torch.int32, device=device), "w -> h w", h=ref_H_tokens).flatten() + position_ids[i, pe_shift_len:pe_shift_len + ref_img_len, 0] = pe_shift + position_ids[i, pe_shift_len:pe_shift_len + ref_img_len, 1] = row_ids + position_ids[i, pe_shift_len:pe_shift_len + ref_img_len, 2] = col_ids + + pe_shift += max(ref_H_tokens, ref_W_tokens) + pe_shift_len += ref_img_len + + H, W = img_sizes[i] + H_tokens, W_tokens = H // p, W // p + + row_ids = repeat(torch.arange(H_tokens, dtype=torch.int32, device=device), "h -> h w", w=W_tokens).flatten() + col_ids = repeat(torch.arange(W_tokens, dtype=torch.int32, device=device), "w -> h w", h=H_tokens).flatten() + + position_ids[i, pe_shift_len: seq_len, 0] = pe_shift + position_ids[i, pe_shift_len: seq_len, 1] = row_ids + position_ids[i, pe_shift_len: seq_len, 2] = col_ids + + freqs_cis = self.rope_embedder(position_ids).movedim(1, 2) + + cap_freqs_cis_shape = list(freqs_cis.shape) + cap_freqs_cis_shape[1] = encoder_seq_len + cap_freqs_cis = torch.zeros(*cap_freqs_cis_shape, device=device, dtype=freqs_cis.dtype) + + ref_img_freqs_cis_shape = list(freqs_cis.shape) + ref_img_freqs_cis_shape[1] = max_ref_img_len + ref_img_freqs_cis = torch.zeros(*ref_img_freqs_cis_shape, device=device, dtype=freqs_cis.dtype) + + img_freqs_cis_shape = list(freqs_cis.shape) + img_freqs_cis_shape[1] = max_img_len + img_freqs_cis = torch.zeros(*img_freqs_cis_shape, device=device, dtype=freqs_cis.dtype) + + for i, (cap_seq_len, ref_img_len, img_len, seq_len) in enumerate(zip(l_effective_cap_len, l_effective_ref_img_len, l_effective_img_len, seq_lengths)): + cap_freqs_cis[i, :cap_seq_len] = freqs_cis[i, :cap_seq_len] + ref_img_freqs_cis[i, :sum(ref_img_len)] = freqs_cis[i, cap_seq_len:cap_seq_len + sum(ref_img_len)] + img_freqs_cis[i, :img_len] = freqs_cis[i, cap_seq_len + sum(ref_img_len):cap_seq_len + sum(ref_img_len) + img_len] + + return cap_freqs_cis, ref_img_freqs_cis, img_freqs_cis, freqs_cis, l_effective_cap_len, seq_lengths + + +class OmniGen2Transformer2DModel(nn.Module): + def __init__( + self, + patch_size: int = 2, + in_channels: int = 16, + out_channels: Optional[int] = None, + hidden_size: int = 2304, + num_layers: int = 26, + num_refiner_layers: int = 2, + num_attention_heads: int = 24, + num_kv_heads: int = 8, + multiple_of: int = 256, + ffn_dim_multiplier: Optional[float] = None, + norm_eps: float = 1e-5, + axes_dim_rope: Tuple[int, int, int] = (32, 32, 32), + axes_lens: Tuple[int, int, int] = (300, 512, 512), + text_feat_dim: int = 1024, + timestep_scale: float = 1.0, + image_model=None, + device=None, + dtype=None, + operations=None, + ): + super().__init__() + + self.patch_size = patch_size + self.out_channels = out_channels or in_channels + self.hidden_size = hidden_size + self.dtype = dtype + + self.rope_embedder = OmniGen2RotaryPosEmbed( + theta=10000, + axes_dim=axes_dim_rope, + axes_lens=axes_lens, + patch_size=patch_size, + ) + + self.x_embedder = operations.Linear(patch_size * patch_size * in_channels, hidden_size, dtype=dtype, device=device) + self.ref_image_patch_embedder = operations.Linear(patch_size * patch_size * in_channels, hidden_size, dtype=dtype, device=device) + + self.time_caption_embed = Lumina2CombinedTimestepCaptionEmbedding( + hidden_size=hidden_size, + text_feat_dim=text_feat_dim, + norm_eps=norm_eps, + timestep_scale=timestep_scale, dtype=dtype, device=device, operations=operations + ) + + self.noise_refiner = nn.ModuleList([ + OmniGen2TransformerBlock( + hidden_size, num_attention_heads, num_kv_heads, + multiple_of, ffn_dim_multiplier, norm_eps, modulation=True, dtype=dtype, device=device, operations=operations + ) for _ in range(num_refiner_layers) + ]) + + self.ref_image_refiner = nn.ModuleList([ + OmniGen2TransformerBlock( + hidden_size, num_attention_heads, num_kv_heads, + multiple_of, ffn_dim_multiplier, norm_eps, modulation=True, dtype=dtype, device=device, operations=operations + ) for _ in range(num_refiner_layers) + ]) + + self.context_refiner = nn.ModuleList([ + OmniGen2TransformerBlock( + hidden_size, num_attention_heads, num_kv_heads, + multiple_of, ffn_dim_multiplier, norm_eps, modulation=False, dtype=dtype, device=device, operations=operations + ) for _ in range(num_refiner_layers) + ]) + + self.layers = nn.ModuleList([ + OmniGen2TransformerBlock( + hidden_size, num_attention_heads, num_kv_heads, + multiple_of, ffn_dim_multiplier, norm_eps, modulation=True, dtype=dtype, device=device, operations=operations + ) for _ in range(num_layers) + ]) + + self.norm_out = LuminaLayerNormContinuous( + embedding_dim=hidden_size, + conditioning_embedding_dim=min(hidden_size, 1024), + elementwise_affine=False, + eps=1e-6, + out_dim=patch_size * patch_size * self.out_channels, dtype=dtype, device=device, operations=operations + ) + + self.image_index_embedding = nn.Parameter(torch.empty(5, hidden_size, device=device, dtype=dtype)) + + def flat_and_pad_to_seq(self, hidden_states, ref_image_hidden_states): + batch_size = len(hidden_states) + p = self.patch_size + + img_sizes = [(img.size(1), img.size(2)) for img in hidden_states] + l_effective_img_len = [(H // p) * (W // p) for (H, W) in img_sizes] + + if ref_image_hidden_states is not None: + ref_image_hidden_states = list(map(lambda ref: comfy.ldm.common_dit.pad_to_patch_size(ref, (p, p)), ref_image_hidden_states)) + ref_img_sizes = [[(imgs.size(2), imgs.size(3)) if imgs is not None else None for imgs in ref_image_hidden_states]] * batch_size + l_effective_ref_img_len = [[(ref_img_size[0] // p) * (ref_img_size[1] // p) for ref_img_size in _ref_img_sizes] if _ref_img_sizes is not None else [0] for _ref_img_sizes in ref_img_sizes] + else: + ref_img_sizes = [None for _ in range(batch_size)] + l_effective_ref_img_len = [[0] for _ in range(batch_size)] + + flat_ref_img_hidden_states = None + if ref_image_hidden_states is not None: + imgs = [] + for ref_img in ref_image_hidden_states: + B, C, H, W = ref_img.size() + ref_img = rearrange(ref_img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=p, p2=p) + imgs.append(ref_img) + flat_ref_img_hidden_states = torch.cat(imgs, dim=1) + + img = hidden_states + B, C, H, W = img.size() + flat_hidden_states = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=p, p2=p) + + return ( + flat_hidden_states, flat_ref_img_hidden_states, + None, None, + l_effective_ref_img_len, l_effective_img_len, + ref_img_sizes, img_sizes, + ) + + def img_patch_embed_and_refine(self, hidden_states, ref_image_hidden_states, padded_img_mask, padded_ref_img_mask, noise_rotary_emb, ref_img_rotary_emb, l_effective_ref_img_len, l_effective_img_len, temb): + batch_size = len(hidden_states) + + hidden_states = self.x_embedder(hidden_states) + if ref_image_hidden_states is not None: + ref_image_hidden_states = self.ref_image_patch_embedder(ref_image_hidden_states) + image_index_embedding = comfy.model_management.cast_to(self.image_index_embedding, dtype=hidden_states.dtype, device=hidden_states.device) + + for i in range(batch_size): + shift = 0 + for j, ref_img_len in enumerate(l_effective_ref_img_len[i]): + ref_image_hidden_states[i, shift:shift + ref_img_len, :] = ref_image_hidden_states[i, shift:shift + ref_img_len, :] + image_index_embedding[j] + shift += ref_img_len + + for layer in self.noise_refiner: + hidden_states = layer(hidden_states, padded_img_mask, noise_rotary_emb, temb) + + if ref_image_hidden_states is not None: + for layer in self.ref_image_refiner: + ref_image_hidden_states = layer(ref_image_hidden_states, padded_ref_img_mask, ref_img_rotary_emb, temb) + + hidden_states = torch.cat([ref_image_hidden_states, hidden_states], dim=1) + + return hidden_states + + def forward(self, x, timesteps, context, num_tokens, ref_latents=None, attention_mask=None, **kwargs): + B, C, H, W = x.shape + hidden_states = comfy.ldm.common_dit.pad_to_patch_size(x, (self.patch_size, self.patch_size)) + _, _, H_padded, W_padded = hidden_states.shape + timestep = 1.0 - timesteps + text_hidden_states = context + text_attention_mask = attention_mask + ref_image_hidden_states = ref_latents + device = hidden_states.device + + temb, text_hidden_states = self.time_caption_embed(timestep, text_hidden_states, hidden_states[0].dtype) + + ( + hidden_states, ref_image_hidden_states, + img_mask, ref_img_mask, + l_effective_ref_img_len, l_effective_img_len, + ref_img_sizes, img_sizes, + ) = self.flat_and_pad_to_seq(hidden_states, ref_image_hidden_states) + + ( + context_rotary_emb, ref_img_rotary_emb, noise_rotary_emb, + rotary_emb, encoder_seq_lengths, seq_lengths, + ) = self.rope_embedder( + hidden_states.shape[0], text_hidden_states.shape[1], [num_tokens] * text_hidden_states.shape[0], + l_effective_ref_img_len, l_effective_img_len, + ref_img_sizes, img_sizes, device, + ) + + for layer in self.context_refiner: + text_hidden_states = layer(text_hidden_states, text_attention_mask, context_rotary_emb) + + img_len = hidden_states.shape[1] + combined_img_hidden_states = self.img_patch_embed_and_refine( + hidden_states, ref_image_hidden_states, + img_mask, ref_img_mask, + noise_rotary_emb, ref_img_rotary_emb, + l_effective_ref_img_len, l_effective_img_len, + temb, + ) + + hidden_states = torch.cat([text_hidden_states, combined_img_hidden_states], dim=1) + attention_mask = None + + for layer in self.layers: + hidden_states = layer(hidden_states, attention_mask, rotary_emb, temb) + + hidden_states = self.norm_out(hidden_states, temb) + + p = self.patch_size + output = rearrange(hidden_states[:, -img_len:], 'b (h w) (p1 p2 c) -> b c (h p1) (w p2)', h=H_padded // p, w=W_padded// p, p1=p, p2=p)[:, :, :H, :W] + + return -output diff --git a/comfy/model_base.py b/comfy/model_base.py index 75ec42699..12b0f3dc9 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -41,6 +41,7 @@ import comfy.ldm.hunyuan3d.model import comfy.ldm.hidream.model import comfy.ldm.chroma.model import comfy.ldm.ace.model +import comfy.ldm.omnigen.omnigen2 import comfy.model_management import comfy.patcher_extension @@ -1230,3 +1231,33 @@ class ACEStep(BaseModel): out['speaker_embeds'] = comfy.conds.CONDRegular(torch.zeros(noise.shape[0], 512, device=noise.device, dtype=noise.dtype)) out['lyrics_strength'] = comfy.conds.CONDConstant(kwargs.get("lyrics_strength", 1.0)) return out + +class Omnigen2(BaseModel): + def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.omnigen.omnigen2.OmniGen2Transformer2DModel) + self.memory_usage_factor_conds = ("ref_latents",) + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + attention_mask = kwargs.get("attention_mask", None) + if attention_mask is not None: + if torch.numel(attention_mask) != attention_mask.sum(): + out['attention_mask'] = comfy.conds.CONDRegular(attention_mask) + out['num_tokens'] = comfy.conds.CONDConstant(max(1, torch.sum(attention_mask).item())) + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + ref_latents = kwargs.get("reference_latents", None) + if ref_latents is not None: + latents = [] + for lat in ref_latents: + latents.append(self.process_latent_in(lat)) + out['ref_latents'] = comfy.conds.CONDList(latents) + return out + + def extra_conds_shapes(self, **kwargs): + out = {} + ref_latents = kwargs.get("reference_latents", None) + if ref_latents is not None: + out['ref_latents'] = list([1, 16, sum(map(lambda a: math.prod(a.size()), ref_latents)) // 16]) + return out diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 4aa90d3b6..18232ade3 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -459,6 +459,26 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): return dit_config + if '{}time_caption_embed.timestep_embedder.linear_1.bias'.format(key_prefix) in state_dict_keys: # Omnigen2 + dit_config = {} + dit_config["image_model"] = "omnigen2" + dit_config["axes_dim_rope"] = [40, 40, 40] + dit_config["axes_lens"] = [1024, 1664, 1664] + dit_config["ffn_dim_multiplier"] = None + dit_config["hidden_size"] = 2520 + dit_config["in_channels"] = 16 + dit_config["multiple_of"] = 256 + dit_config["norm_eps"] = 1e-05 + dit_config["num_attention_heads"] = 21 + dit_config["num_kv_heads"] = 7 + dit_config["num_layers"] = 32 + dit_config["num_refiner_layers"] = 2 + dit_config["out_channels"] = None + dit_config["patch_size"] = 2 + dit_config["text_feat_dim"] = 2048 + dit_config["timestep_scale"] = 1000.0 + return dit_config + if '{}input_blocks.0.0.weight'.format(key_prefix) not in state_dict_keys: return None diff --git a/comfy/sd.py b/comfy/sd.py index c32cf6b6b..5b95cf75a 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -44,6 +44,7 @@ import comfy.text_encoders.lumina2 import comfy.text_encoders.wan import comfy.text_encoders.hidream import comfy.text_encoders.ace +import comfy.text_encoders.omnigen2 import comfy.model_patcher import comfy.lora @@ -754,6 +755,7 @@ class CLIPType(Enum): HIDREAM = 14 CHROMA = 15 ACE = 16 + OMNIGEN2 = 17 def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}): @@ -773,6 +775,7 @@ class TEModel(Enum): LLAMA3_8 = 7 T5_XXL_OLD = 8 GEMMA_2_2B = 9 + QWEN25_3B = 10 def detect_te_model(sd): if "text_model.encoder.layers.30.mlp.fc1.weight" in sd: @@ -793,6 +796,8 @@ def detect_te_model(sd): return TEModel.T5_BASE if 'model.layers.0.post_feedforward_layernorm.weight' in sd: return TEModel.GEMMA_2_2B + if 'model.layers.0.self_attn.k_proj.bias' in sd: + return TEModel.QWEN25_3B if "model.layers.0.post_attention_layernorm.weight" in sd: return TEModel.LLAMA3_8 return None @@ -894,6 +899,9 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.hidream.hidream_clip(**llama_detect(clip_data), clip_l=False, clip_g=False, t5=False, llama=True, dtype_t5=None, t5xxl_scaled_fp8=None) clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer + elif te_model == TEModel.QWEN25_3B: + clip_target.clip = comfy.text_encoders.omnigen2.te(**llama_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.omnigen2.Omnigen2Tokenizer else: # clip_l if clip_type == CLIPType.SD3: diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 1b69a4103..ade340fd1 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -482,7 +482,8 @@ class SDTokenizer: if end_token is not None: self.end_token = end_token else: - self.end_token = empty[0] + if has_end_token: + self.end_token = empty[0] if pad_token is not None: self.pad_token = pad_token diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 19f25e337..f4413d647 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -18,6 +18,7 @@ import comfy.text_encoders.cosmos import comfy.text_encoders.lumina2 import comfy.text_encoders.wan import comfy.text_encoders.ace +import comfy.text_encoders.omnigen2 from . import supported_models_base from . import latent_formats @@ -1181,6 +1182,36 @@ class ACEStep(supported_models_base.BASE): def clip_target(self, state_dict={}): return supported_models_base.ClipTarget(comfy.text_encoders.ace.AceT5Tokenizer, comfy.text_encoders.ace.AceT5Model) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep] +class Omnigen2(supported_models_base.BASE): + unet_config = { + "image_model": "omnigen2", + } + + sampling_settings = { + "multiplier": 1.0, + "shift": 2.6, + } + + memory_usage_factor = 1.65 #TODO + + unet_extra_config = {} + latent_format = latent_formats.Flux + + supported_inference_dtypes = [torch.float16, torch.bfloat16, torch.float32] + + vae_key_prefix = ["vae."] + text_encoder_key_prefix = ["text_encoders."] + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.Omnigen2(self, device=device) + return out + + def clip_target(self, state_dict={}): + pref = self.text_encoder_key_prefix[0] + hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_3b.transformer.".format(pref)) + return supported_models_base.ClipTarget(comfy.text_encoders.omnigen2.LuminaTokenizer, comfy.text_encoders.omnigen2.te(**hunyuan_detect)) + + +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep, Omnigen2] models += [SVD_img2vid] diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index 34eb870e3..7fbd0f604 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -24,6 +24,24 @@ class Llama2Config: head_dim = 128 rms_norm_add = False mlp_activation = "silu" + qkv_bias = False + +@dataclass +class Qwen25_3BConfig: + vocab_size: int = 151936 + hidden_size: int = 2048 + intermediate_size: int = 11008 + num_hidden_layers: int = 36 + num_attention_heads: int = 16 + num_key_value_heads: int = 2 + max_position_embeddings: int = 128000 + rms_norm_eps: float = 1e-6 + rope_theta: float = 1000000.0 + transformer_type: str = "llama" + head_dim = 128 + rms_norm_add = False + mlp_activation = "silu" + qkv_bias = True @dataclass class Gemma2_2B_Config: @@ -40,6 +58,7 @@ class Gemma2_2B_Config: head_dim = 256 rms_norm_add = True mlp_activation = "gelu_pytorch_tanh" + qkv_bias = False class RMSNorm(nn.Module): def __init__(self, dim: int, eps: float = 1e-5, add=False, device=None, dtype=None): @@ -98,9 +117,9 @@ class Attention(nn.Module): self.inner_size = self.num_heads * self.head_dim ops = ops or nn - self.q_proj = ops.Linear(config.hidden_size, self.inner_size, bias=False, device=device, dtype=dtype) - self.k_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False, device=device, dtype=dtype) - self.v_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False, device=device, dtype=dtype) + self.q_proj = ops.Linear(config.hidden_size, self.inner_size, bias=config.qkv_bias, device=device, dtype=dtype) + self.k_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=config.qkv_bias, device=device, dtype=dtype) + self.v_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=config.qkv_bias, device=device, dtype=dtype) self.o_proj = ops.Linear(self.inner_size, config.hidden_size, bias=False, device=device, dtype=dtype) def forward( @@ -320,6 +339,14 @@ class Llama2(BaseLlama, torch.nn.Module): self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) self.dtype = dtype +class Qwen25_3B(BaseLlama, torch.nn.Module): + def __init__(self, config_dict, dtype, device, operations): + super().__init__() + config = Qwen25_3BConfig(**config_dict) + self.num_layers = config.num_hidden_layers + + self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) + self.dtype = dtype class Gemma2_2B(BaseLlama, torch.nn.Module): def __init__(self, config_dict, dtype, device, operations): diff --git a/comfy/text_encoders/omnigen2.py b/comfy/text_encoders/omnigen2.py new file mode 100644 index 000000000..1a01b2dd4 --- /dev/null +++ b/comfy/text_encoders/omnigen2.py @@ -0,0 +1,44 @@ +from transformers import Qwen2Tokenizer +from comfy import sd1_clip +import comfy.text_encoders.llama +import os + + +class Qwen25_3BTokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer") + super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='qwen25_3b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, pad_token=151643, tokenizer_data=tokenizer_data) + + +class Omnigen2Tokenizer(sd1_clip.SD1Tokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="qwen25_3b", tokenizer=Qwen25_3BTokenizer) + self.llama_template = '<|im_start|>system\nYou are a helpful assistant that generates high-quality images based on user instructions.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n' + + def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None,**kwargs): + if llama_template is None: + llama_text = self.llama_template.format(text) + else: + llama_text = llama_template.format(text) + return super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, **kwargs) + +class Qwen25_3BModel(sd1_clip.SDClipModel): + def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=True, model_options={}): + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen25_3B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) + + +class Omnigen2Model(sd1_clip.SD1ClipModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + super().__init__(device=device, dtype=dtype, name="qwen25_3b", clip_model=Qwen25_3BModel, model_options=model_options) + + +def te(dtype_llama=None, llama_scaled_fp8=None): + class Omnigen2TEModel_(Omnigen2Model): + def __init__(self, device="cpu", dtype=None, model_options={}): + if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: + model_options = model_options.copy() + model_options["scaled_fp8"] = llama_scaled_fp8 + if dtype_llama is not None: + dtype = dtype_llama + super().__init__(device=device, dtype=dtype, model_options=model_options) + return Omnigen2TEModel_ diff --git a/comfy/text_encoders/qwen25_tokenizer/merges.txt b/comfy/text_encoders/qwen25_tokenizer/merges.txt new file mode 100644 index 000000000..31349551d --- /dev/null +++ b/comfy/text_encoders/qwen25_tokenizer/merges.txt @@ -0,0 +1,151388 @@ +#version: 0.2 +Ġ Ġ +ĠĠ ĠĠ +i n +Ġ t +ĠĠĠĠ ĠĠĠĠ +e r +ĠĠ Ġ +o n +Ġ a +r e +a t +s t +e n +o r +Ġt h +Ċ Ċ +Ġ c +l e +Ġ s +i t +a n +a r +a l +Ġth e +; Ċ +Ġ p +Ġ f +o u +Ġ = +i s +ĠĠĠĠ ĠĠĠ +in g +e s +Ġ w +i on +e d +i c +Ġ b +Ġ d +e t +Ġ m +Ġ o +ĉ ĉ +r o +a s +e l +c t +n d +Ġ in +Ġ h +en t +i d +Ġ n +a m +ĠĠĠĠĠĠĠĠ ĠĠĠ +Ġt o +Ġ re +- - +Ġ { +Ġo f +o m +) ;Ċ +i m +č Ċ +Ġ ( +i l +/ / +Ġa nd +u r +s e +Ġ l +e x +Ġ S +a d +Ġ " +c h +u t +i f +* * +Ġ } +e m +o l +ĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠ +t h +) Ċ +Ġ{ Ċ +Ġ g +i g +i v +, Ċ +c e +o d +Ġ v +at e +Ġ T +a g +a y +Ġ * +o t +u s +Ġ C +Ġ st +Ġ I +u n +u l +u e +Ġ A +o w +Ġ ' +e w +Ġ < +at ion +( ) +Ġf or +a b +or t +u m +am e +Ġ is +p e +t r +c k +â Ģ +Ġ y +i st +-- -- +. ĊĊ +h e +Ġ e +l o +Ġ M +Ġb e +er s +Ġ on +Ġc on +a p +u b +Ġ P +ĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠ +as s +in t +> Ċ +l y +ur n +Ġ $ +; ĊĊ +a v +p ort +i r +- > +n t +ct ion +en d +Ġd e +it h +ou t +t urn +ou r +ĠĠĠĠ Ġ +l ic +re s +p t += = +Ġth is +Ġw h +Ġ if +Ġ D +v er +ag e +Ġ B +h t +ex t += " +Ġth at +** ** +Ġ R +Ġ it +es s +Ġ F +Ġ r +o s +an d +Ġa s +e ct +k e +ro m +Ġ // +c on +Ġ L +( " +q u +l ass +Ġw ith +i z +d e +Ġ N +Ġa l +o p +u p +g et +Ġ} Ċ +i le +Ġa n +at a +o re +r i +Ġp ro +; čĊ +ĉĉ ĉĉ +t er +a in +Ġ W +Ġ E +Ġc om +Ġre turn +ar t +Ġ H +a ck +im port +ub lic +Ġ or +e st +m ent +Ġ G +ab le +Ġ - +in e +il l +in d +er e +: : +it y +Ġ + +Ġt r +el f +ig ht +( ' +or m +ul t +st r +. . +" , +Ġy ou +y pe +p l +Ġn ew +Ġ j +ĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠ +Ġf rom +Ġ ex +Ġ O +l d +Ġ [ +o c +: Ċ +Ġs e +Ġ le +---- ---- +. s +{ Ċ +' , +an t +Ġa t +as e +. c +Ġc h +< / +av e +an g +Ġa re +Ġin t +âĢ Ļ +_ t +er t +i al +a ct +} Ċ +iv e +od e +o st +Ġc lass +Ġn ot +o g +or d +al ue +al l +f f +( );Ċ +on t +im e +a re +Ġ U +Ġp r +Ġ : +i es +iz e +u re +Ġb y +i re +Ġ} ĊĊ +. p +Ġs h +ic e +a st +pt ion +tr ing +o k +_ _ +c l +# # +Ġh e +ar d +) . +Ġ @ +i ew +ĉĉ ĉ +Ġw as +i p +th is +Ġ u +ĠT he +id e +a ce +i b +a c +r ou +Ġw e +j ect +Ġp ublic +a k +v e +at h +o id +Ġ= > +u st +q ue +Ġre s +) ) +' s +Ġ k +an s +y st +un ction +**** **** +Ġ i +Ġ us +p p +on e +a il +== == +n ame +Ġst r +Ġ / +Ġ & +a ch +d iv +yst em +el l +Ġh ave +er r +ou ld +ul l +p on +Ġ J +_ p +Ġ= = +ig n +S t +. Ċ +Ġp l +) ;ĊĊ +f orm +p ut +ou nt +} ĊĊ +d d +it e +Ġg et +r r +om e +Ġ âĢ +ar am +c c +Ġ* / +E R +I n +le s +_ s +on g +i e +Ġc an +Ġ V +er v +p r +Ġ un +ro w +b er +Ġd o +l l +Ġ el +Ġs elf +at ed +ar y +Ġ . +' ] +u d +Ġ en +ĠT h +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠ +t e +_ c +u ct +Ġa b +or k +. get +Ġ # +a w +res s +o b +N ame +ap p +[ ' +Ġal l +or y +it ion +an ce +e ar +Ġcon t +v ent +i a +Ġw ill +I N +ĠĠĠĠĠĠĠĠ Ġ +re turn +Ġ< / +d ata +) ĊĊ +R e +p le +il d +th er +Ġy our +" Ċ +( $ +Ġ out +) , +Ġh as +S tring +s o +Ġ up +a x +Ġde f +Ġb o +g e +al se +O N +p er +ic h +Ġb ut +Ġ Ċ +Ġ _ +_ m +ad d +que st +od el +s elf +er y +f t +en s +// // +a ke +. C +Ġg o +Ġf unction +Ġ K +iv ate +Ġ im +Ġcon st +. t +Ġ*/ Ċ +) ;čĊ +Ġv oid +Ġs et +ĠS ystem +c ri +( )Ċ +l i +ĉ if +. m +al ly +s et +e p +âĢĻ s +b o +de f +' ,Ċ +Ġm e +Ġ ! +at ch +" > +" ,Ċ +e c +ĠI n +p h +Ġ | +_ f +Ġv ar +en ce +I d +re e +in k +le ct +u g +et h +Ġel se +-------- -------- +con t +Ġs o +at ic +Ġl o +p ro +t on +s s +ow n +ab el +o int +ou s +el d +S T +T he +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +R E +" : +ol or +t p +e g +ke y +u de +ĠS t +ou nd +Ġa r +" );Ċ +en er +s er +b ject +ess age +f er +Ġm ore +ation s +ent s +Ġh is +Ġthe y +. S +Ġ Y +u se +n e +is h +ol d +_ d +i o +i eld +Ġp er +C ont +ing s +## ## +Ġd ata +Ġs a +e f +f o +Ġon e +en g +Ġd is +A T +Ġn ame +Ġtr ue +v al +le d +. f +Ġn e +Ġ end +. T +c re +ar k +lo g +E x +err or +_ id +ur re +ang e +Ġn ull +rr ay +Ġm y +p an +ic t +at or +V iew +L ist +ĉ return +âĢ Ŀ +Ġp re +Ġ x +cl ude +ar g +o v +. h +Ġ > +Ġthe ir +' ) +ir st +ic k +g h +L E +O R +Ġpr ivate +t em +čĊ čĊ +us er +Ġ ) +c om +. A +" ;Ċ +Ġ id +re ad +Ġwh o +_ b +" >Ċ +Ġt ime +Ġm an +r y +==== ==== +rou p +ro p +p ublic +v el +um ber +b le +Ġwh ich +******** ******** +Ġan y +Ġf alse +w e +Ġv alue +Ġl i +" ) +nd er +g r +Ġn o +p aram +f ig +.c om +Ġa pp +_ l +ion s +. D +ĠC h +Ġab out +Ġa dd +Ġs u +Ġstr ing +I D +Ġo ver +str ing +. l +our ce +_ C +] Ċ +Ġ qu +ĠS tring +c a +S E +Ġ ro +s h +u al +T ype +s on +n ew +er n +Ġa g +A R +] ;Ċ +] . +Ġ ? +ic al +Ġd es +ut h +i x +ay s +Ġt ype +' t +a ult +Ġin ter +v ar +. b +Ġp art +. d +urre nt +I T +E N +en c +( f +r a +v alue +ch o +ut ton +o se +Ġ! = +at er +à © +re ate +ol l +p os +y le +n g +A L +us ing +am es +Ġ{ čĊ +at es +el y +Ġw ork +Ġ em +in al +Ġs p +Ġwh en +.s et +ĠĠĠĠ ĠĠ +) :Ċ +t o +qu ire +ind ow +le ment +pe ct +as h +[ i +Ġu se +. F +pe c +Ġa d +o ve +ce ption +eng th +in clude +ad er +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠ +at us +T h +it le +r it +v oid +() . +( Ċ +Ġof f +Ġo ther +Ġ& & +' ;Ċ +m s +Ġbe en +Ġt e +m l +c o +n c +erv ice +Ġ % +** Ċ +an n +ad e +ĊĊ ĊĊ +lo ck +con st +pon se +Ġs up ++ + +d ate +Ġa cc +Ġh ad +Ġb u +ĠR e +Ġw ere +Ġf ile +Ġw ould +ĠâĢ ľ +v en +is s +Ġ our +c lass +r aw +Ġy ear +D ata +Ġv al +Ġs ome +f ter +y s +Ġ// / +rou nd +v iew +Ġp e +Ġth ere +Ġsa id +d u +o f +l ine +/ * +d uct +Ġh er +ĠĠĠĠĠĠĠĠ ĠĠĠĠĠ +R es +Ġc o +Ġcom m +is e +m in +ĠĠĠĠ Ċ +# include +eth od +. P +ut e +Ġas s +I nt +as k +lo c +Ġli ke +od y +Ġle t +lo ad +Ġa m +ro l +Ġg r +y p +Ġal so +ĠI t +ur l +if ic +or s +_ P +_ n +ig h +Ġth an +C om +A N +U L +at ing +ĠTh is +re f +_ S +Ġst atic +ro ll +Ġj ust +Ġres ult +i an +id th +Ġthe m +) );Ċ +d er +re ak +C on +: // +u le +.. . +ar ch +em ent +Ġ< < +us h +en se +ar r +Ġint o +c ess +am p +i ed +um ent +Ġ \ +] , +w o +al s +Ġwh at +an c +V alue += ' +ol um +Ġp os +ag es +ay er +Ġs c +u es +" )Ċ +_ T +Ġl ist +( s +Ġc ase +C h +ĉĉĉĉ ĉ +//// //// +pon ent +Ġ z +Ġk n +le t +D E +re d +Ġf e +Ġ} ,Ċ +Ġ , +( t +Ġf irst +' );Ċ +w ord +Ġ import +Ġa ct +Ġch ar +C T +ĠT r +op le += { +ĉ f +i ent +c ent +. j +le ction +) )Ċ +Ġon ly +Ġpr int +m er +. W +o ck +Ġ -- +T ext +Ġo p +an k +Ġit s +Ġb ack +[ " +Ġne ed +Ġc l +Ġs ub +Ġl a +( ( +. " +O bject +Ġst art +f ile +( self +n er +e y +Ġus er +Ġ ent +ĠC om +it s +ĠC on +ou ble +ow er +it em +ver y +ĠW e +lic k +Ġ Q +ph p +t tp +' : +ic s +Ġu nder +Ġ* Ċ +. L +) ; +ic es +Ġre g +) čĊ +ĉ public +S S +Ġth en +re at +i ous +. G +e k +ire ct +he ck +cri pt +n ing +ĠU n +Ġm ay +ĠW h +B o +I tem +str uct +. st +re am +ib le +lo at +Ġor g +u nd +s um +_ in +.. / +_ M +Ġh ow +r ite +' Ċ +T o +w w +Ġpe ople +ind ex +. n +ht tp +( m +ect or +Ġin d +Ġj av +] ,Ċ +ĠH e +_ st +f ul +o le +) {Ċ +Ġsh ould +op y +el p +i er +_ name +ers on +I ON +ot e +Ġt est +Ġb et +rr or +ul ar +ã Ģ +Ġ Ð +b s +t ing +Ġm ake +T r +Ġa fter +ar get +R O +olum n +r c +_ re +def ine +Ġr ight +r ight +d ay +Ġl ong +[ ] +( p +t d +con d +ĠP ro +Ġre m +ption s +v id +. g +Ġ ext +Ġ __ +' )Ċ +p ace +m p +Ġm in +st ance +a ir +a ction +w h +t ype +ut il +a it +< ? +I C +t ext +Ġp h +Ġf l +. M +cc ess +b r +f ore +ers ion +) ,Ċ +. re +ate g +Ġl oc +in s +- s +tr ib +ĠI nt +Ġa rray +, " +P ro +( c +ess ion +> ĊĊ +Ġs he +" ] +ap h +Ġex p +ert y +ĠS e +Ġp ar +un c +E T +Ġre ad +pr int +Ġre l +Ġfor m +Ġd r +Ex ception +in put +Ġtr ans +#### #### +ord er +B y +Ġa w +it ies +u ff +pl ay +. add +ĠâĢ ĵ +Ġw ant +Ġcom p +ment s +Ġ| | +a z +b e +Ġn umber +Ġre quire +ĠE x +Ġc ol +Ġ key +em ber +Ġt wo +Ġs ize +Ġwh ere +U T +res ult +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +ou gh +or ld +o od +u ch +at ive +g er +are nt +Ġ/ * +Ġar g +Ġwh ile +( this +Ġre c +Ġd if +St ate +Ġs pec +r ide +_ F +Ġlo ok +A M +il ity +et er +âĢĻ t +ĊĊ Ċ +ay out +---------------- ---------------- +ag er +Ġc ould +Ġb r +end s +u res +Ġkn ow +et s +ĠI f +ĠS h +. w +b ack +Ġs er +Ġ+ = +Ġf r +() );Ċ +Ġh and +I nd +UL L +I m +() ;ĊĊ +Ġm ost +Ġtr y +Ġn ow +rou gh +> čĊ +ack age +Ġh im +. _ +if y +Ġb reak +Ġ );Ċ +re n +# define +it t +Ġa p +ĉ c +( n +ĠY ou +: ĊĊ +- m +Ġe very +ust om +li ent +oc ument +cri ption +E rror +- b +Ð ¾ +] [ +tr ans +Ġp oint +Ġst d +Ġf il +T ime +Ġm od +Ġ -> +Ġ error +a h +Ġt ext +roll er +lo se +q l +Ġp ol +> < +. B +- c +Ġop en +Ġe st +ĠĠĠĠĠĠĠĠ Ċ +Ġn ext +I M +Ñ Ĥ +O T +à ³ +Ġf ollow +cont ent +ĠĠĠĠĠĠĠĠ ĠĠĠĠ +Ġin clud +H E +ĠR es +Ġh ref +Ð ¸ +Ġc ar +yp es +im age +U n +Ġbo ol +A D +Ġg ame +.F orm +row s +* / +vel op +.D rawing +Ġp ath +is ion +Ġe ach +ĠP l +_t ype +P ath +ne ction +Ġa v +' ). +Ġsup port +EN T +re m +" ). +Ġo wn +Ġc or +c ount +m iss +u ally +Ġm em +st d +i ence +se arch +" ĊĊ +F orm +Ġs ex +en ame +Ġs ign +Ġ et +ĠĠĠĠĠĠĠĠ ĠĠ +', ' +ĠA pp +Ġth ose +o ff +Ġ err +Ġs ystem +Ġbe st +c ode +Ġs ame +Ġd i +us s +Ġc reate +ath er +A rray +. in +f e +S ervice +U N +at s +Ġ Z +al th +Ġm ade +tr ue +A B +Ġm ark +r id +if ied +, čĊ +y n +p ress +Ġg roup +Ġf in +ĠL icense +F ield +eg er +Ġw orld +in ess +t y +Ġpro cess +( b +Ġc re +ar n +iv es +Ġm ain +ide o +_ g +A G +val id +im g +P I +Ġc olor +Ġre port +Ġt ake +ri b +O M +Ġd ay +Re quest +Ġs k +b ers +ĉ s +.A dd +o ot +Im age +Ġcom ple +ol lection +Ġto p +Ġf ree +A S +D e +ĠO n +I G +et a +D ate +Ġa ction +O ver +it or +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +n ot +Ġind ex +h er +ic on +O n +;čĊ čĊ +iv ity +m and +.W indows +O L +Ġre al +Ġm ax +l and +.. .. +r aph +Ġbu ild +le g +ass word +? ĊĊ +âĢ ¦ +o ok +u ck +Ġm essage +t est +iv ers +Ġin put +Ġar t +Ġbet ween +G et +ent er +g round +en e +à ¡ +.l ength +N ode +( i +C lass +f or +ĠâĢ Ķ +t en +o in +Ġ ke +u i +ĠI N +Ġt able +s ub +ĠL e +Ġhe ad +Ġm ust +//////// //////// +. util +Cont ext +Ġor der +Ġm ov +o ver +Ġcont in +Ġs ay +st atic +.T ext +Ġclass Name +pan y +Ġt er +he ad +r g +Ġpro duct +Th is +. âĢĿ +ĠB ut +lo y +Ġd ouble +s g +Ġpl ace +. x +m essage +Ġin formation +pr ivate +Ġo per +c ed +d b +"> +ater ial +ile d +Ġp ut +Q u +Ñ Ģ +un g +m ap +ĉĉĉĉ ĉĉĉĉ +Ġle vel +Com ponent +bo ok +cre en +_ RE +Ġcon fig +ã ģ +O r +. data +Ġd ocument +", " +trib ute +u x +L og +fer ence +p ost +_ e +Ġloc al +and om +ass ert +V al +lect ed +in a +atab ase +A dd +Ġcont ent +.p rint +s igned +r ic +." ĊĊ +Ġf a +! ĊĊ +- f +iv ed +Ġ quest +. ex +Ġf loat +Ġde velop +о Ð +M ap +ad ing +Ġpos s +U E +n amespace +_ O +ĉ b +.G et +> ( +j son +etail s +Ġto o +Ġext ends +ĠN one +Ġf ore +( String +form at +Ġg reat +int er +ca le +Ñ ģ +r on +iv ing +E nt +enc y +x t +o y +Ġmon th +Ġh app +Ġsup er +b ar +def ault +_ de +ord s +l n +( {Ċ +ĠI nd +as es +Ġt itle +Ġcont ext +o h +- p +E m +Ġm et +T est +Ġl ife +_ v +ĠU S +U I +oc ation +m d +Ġ[ Ċ +Ġ ] +s w +Ġin cre +s cript +ent ial +w ays +. de +Ġs rc +Ġc atch +ĠA meric +// Ċ +ĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠ +Ġp ay +pl it +âĢ Ķ +Ġc oun +ob j +.ph p +Ġch ange +eth ing +' re +ast er +lo s +l ation +ĠĠ Ċ +L e +à ¤ +( { +read y +ĠN o +Ġpos ition +Ġo ld +Ġbo ok +able d +b ug +H and +} ;ĊĊ +is play +av ing +Ġgo ver +Ġv ersion +S ystem +n ect +res ponse +St yle +U p +ang u +Ġth ree +in it +er o +Ġl aw +end if +Ġb ase +em ail +( l +_ V +Ġcon f +AT E +Ġd uring +t es +Ġcon sole +ĠP r +Ġs pe +v es +p ath +ial og +d ition +_t o +ard s +Ġagain st +et work +ĠP h +_ L +c ur +im it +W ith +Ġp ower +i um +' ;ĊĊ +Ġw om +le ft +our ces +at ri +ĠI m +ĠM an +or th +$ { +qu als +es e +_s ize +Ġis s +ot al +- g +i que +r ame +Ġw idth +er g +) ( +itt le +T R +ĠThe y +enc es +r l +on s +Ġl abel +. y +- t +up date +an el +s c +.t o +Ġpro ject +à ¼ +Ġe lement +Ġsu ccess +ĉĉ Ċ +.s h +r am +ch ed +() )Ċ +Ġ( Ċ +Ġd ate +Ġto t +_ ST +A ll +ific ation +ĉ var +Ġt ri +ch em +m y +Ġb ig +ĠA d +ĠA t +ot s +n um +A ct +Ġm ap +er a +co pe +. $ +, âĢĿ +Ġp op +Ġf ew +Ġl en +u id +et ers +u les +Ã Ń +s ource +http s +Ġd em +Ġe ar +######## ######## +Ġm atch +or ies +ac es +ĠC l +Ġn ode +ir c +loc al +un ity +} ;Ċ +Ġan other +< < +og le +Ġs it +ew ork +T E +. I +N S +olog y +ou ght +.C ont +> > +Ġc are +st ate +ĉ private +Ġe ffect +++ ) +_f ile +end ing +L ine +F or +i or +ĠS c +Ġf un +.S ize +ĉ else +] ) +st art +v ious +Ġ} , +our s +Ġle g +Ġs ervice +Ġs ince +ir on +L abel +Ġn on +Ġl os +ict ion +Ġf ull +act er +bo ard +g ress +Ġt urn +ith er +.s ize +Ġb ody +res h +et urn +( _ +y les +orm al +p i +Ġsom ething +! -- +u int +Ġpro du +Ġst and +Ġpro ble +Ġav ailable +m t +ĠB l +Ġ ... +Ġb lock +In put +Ġke ep +C ount +op en +Ġ[ ' +Ġth row +uild er +A ction +Ġth ings +Tr ue +Ġ url +ĠB o +print f +Ġre d +j s +.c reate +ĠO r +St atus +In stance +Ġcont rol +Ġcom e +Ġc ustom +loc ation +m odel +Ġ čĊ +Ġs ource +Ġe as +. out +] ĊĊ +one y +Ġaw ait +Ġpart ic +A P +ub lish +od es +_p ro +p ly +rit er +Ġpro v +Ġm ill +H T +] )Ċ +Ġch ang +Ġas k +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠ +Ġout put +Ġem ail +.p ush +Ġ} čĊčĊ +in ation +atri x +T able +u ccess +] );Ċ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġdis c +( [ +Ġb usiness +he ight +. html +t a +f ield +Ġrequire d +_ R +Ġgover n +} čĊčĊ +le x +. , +ĠS et +ur ch +// / +t s +a f +Ġm ight +ist ory +S tr +Ġne ver +Res ponse +ar se +ad a +ĠH ow +Ġ* ) +Ġ ; +Ġh ard +A d +Ġinter n +us ed +( data +m od +ann el +Ġn p +ug g +Ġ/ >Ċ +Ġcal led +b ody +Ġch o +( r +_s et +ir d +Ġ> = +Ġ} ;Ċ +Ġo ptions +ĠG ener +Ġhe ight +P oint +Y ou +et y +C lick +Ġsm all +Ġ ide +Ġacc ess +angu age +Ġprot ected +Ġj ob +ĠTh ere +D ef +Ġadd ress +Ġu int +N ot +o o +ap s +< div +ain ed +at ur +Ġs um +- w +ĠD ate +Ġl ittle +Ġf ri +Y PE +Ġp ort +e h +pr ing +_p ath +Ġst atus +a im +bo ol +Ġap pe +Ġo s +. name +ens ion +_ G +Ġup date +Con fig +a ff +ER R +Ġ< = +at ely +# if +u ction +ĠT e +Ġl ink +ĠU ser +.f ind +. org +m e +Ġg iven +O ut +# endif +Ġbet ter +P age +Ġfe el +en n +M L +Ġal ready +Ġinclud ing +o ogle +r u +ic ally +pro p +le an +out er +Ġal ways +ord ing +I f +or age +Ġp arent +v is +ĉĉĉĉ ĉĉĉ +Ġg ot +st and +Ġle ss +/ s +ĠA ss +ap t +ire d +ĠA dd +Ġacc ount +p loy +Ġd er +res ent +Ġl ot +Ġval id +ĉ d +Ġb it +pon ents +Ġfollow ing +_ ex +S ON +Ġs ure +oc ial +Ġp rom +ert ies +he ader +.p ro +Ġbo olean +Ġse arch +k en +Ġor ig +Ġ er +E d +E M +a ut +l ing +al ity +By Id +b ed +ĉc ase +eth er +pos it +Ġinv est +ĠO R +Ġs ays +miss ion +AM E +Ġtem p +o ad +Ġre st +in fo +Ġinter est +A rg +Ġper form +pon s +ĠV iew +Ġv er +l ib +( const +U til +List ener +ar ge +Ġm ult +Ġd ie +Ġs ite +../ ../ +E L +Ġval ues +Ġ} )Ċ +p en +N o +ic ro +Ġbe h +Ġ' ./ +ac y +re c +() -> +ĉ ĠĠĠ +" )) +Cont ent +_ W +ple ment +Ġw on +Ġv ideo +ad i +p oint +% % +Ġg l +erv ed +v iron +I F +ut ed +ã ĥ +' m +Ġc ert +Ġpro f +Ġc ell +ar i +Ġpl ayer +a is +Ġc ost +Ġh um +( R +Ġoff ic +k s +.t ext +at ures +Ġtot al +Ġ*/ ĊĊ +o pe +Ġst at +U M +Ġlo ad +ight s +Ġc lear +u ro +Ġte chn +up port +I R +Ġ row +Ġse em +Ġ q +Ġsh ort +ĠN ot +ip p +G roup +se ction +m ax +ir l +Ġover ride +Ġcom pany +Ġd one +" );čĊ +Ġg re +. Re +Ġbel ie +r ist +Ġhe alth +AN T +() ĊĊ +ĠB e +. value +ĠG r +ott om +Ġarg s +P T +st atus +f unc +um ents +- h +N umber +: čĊ +ĠL og +er ver +Ġ) ,Ċ +am ent +Ġob j +in c +Ġchild ren +ic y +I Z +and s +ab ly +Ġdist rib +Ġc ur +er ial +Ġd ays +re ated +re ct +- l +ir m +idd en +om b +Ġin itial +.j s +Ġ â +Qu ery +Ġon line +im al +. con +a u +U rl +cont rol +ire ction +Ġin stance +OR T +ĠF r +wh ere +Ġjav ax +Ġorg an +ap ter +Ġre ason +o ptions +ĠM ar +( a +Ġwith in +.âĢĿ ĊĊ +O DE +_ DE +ad min +end ed +Ġdes ign +ĠD ata +un e +ĠF ile +ro ot +Ġc ent +Ġa rr +_ add +l en +p age +, ' +_ str +Ġb ro +ab ility +ou th +/ c +p ose +irt ual +ear ch +_ url +arg in +H ttp +Ġs chool +av a +Ġcons ider +.l abel +ĠA rray +we b +o pt +.print ln +ul ation +Ġf unc +P L +Ġ" \ +ĠT ext +act ory +(f unction +n ull +Ġen g +d own +Ġin clude +ĠE n +ĠD r +Ġd b +! ! +s ide +Ġin it +quire d +ĠS he +C olumn +re act +Ġan n +Ġst op +Ġl ater +ĠTh at +ent ion +d f +U G +I LE +Ġc lient +ra ft +ff er +PO ST +el per +Ġlo ve +qu ote +ou d +Ġj son +Ġab le +Ġm en +A X +ĠC opyright +à ¶ +av ig +re q +C lient +} );Ċ +.C om +er c +il t +pec ial +_c om +ro om +. Name +Ġg ive +am b +i ke +Ġcon dition +cl ient +ator s +: " +Ġc opy +ut ure +ivers ity +ern al +{ { +ĠC an +ou nc +d o +Ġo cc +Ġapp ro +th ers +z e +Ġe ither +ĠF l +Ġimport ant +Ġle ad +at tr +AR T +E qual +Ġd a +et ch +ent ity +Ġfam ily +add ing +Ġo ption +Ġex ist +ic a +ĠO bject +' ve +v ers +ition al +out put +ĠTr ue +ĠO F +_t ime +Ġof fer +Ġ} );ĊĊ +H ER +eg in +" " +Ġw ater +Ġc he +ĠM y +ore d +Ġst ep +anc es +C K +A Y +à ¸ +str uction +( C +ou ch +St ream +act ive +am a +Ent ity +pro duct +() {Ċ +Ġgovern ment +ĠI D +aj or +A nd +Ġdis play +Ð » +Ġt imes +Ġf our +Ġf ar +Ġpres ent +ĠN S +Ġ\ Ċ +ue st +Ġb as +e cho +ch ild +if ier +Hand ler +Ġl ib +Prop erty +trans lation +Ġro om +Ġon ce +Ġ[ ] +cent er +================ ================ +Ġresult s +Ġcontin ue +Ġt alk +_ get +Ġg row +.s w +e b +ĠP ublic +O P +ec ute +ol s +Ġ ** +" );ĊĊ +Ġm ass +ure d +.c lass +om ic +Ġme an +ip s +Ġa ut +);čĊ čĊ +Ġun til +Ġmark et +Ġare a +u it +Ġl ength +ĠW ith +struct or +e vent +"> < +ĠS p +I V +Ġm us +if f +Ġk ind +a uthor +ound s +m b +_ key +w idth +posit ory +Ġl ight +u k +R ow +oh n +al f +viron ment +app er +ollection s +Ġs ide +_in fo +Ġex ample +im ary +Ġw r +Ġc amp +cri be +" / +Ġm iss +w ay +Ġb ased +Ġpl an +V is +om ain +un k +Ġaw ay +U P +< T +O S +i od +ĠM on +âĢĻ re +Ġli k +à § +iv ely +. v +im er +iz er +S ub +Ġbut ton +ĠU p +Ġexper ience +C L +Ġre nder +_ value +Ġn ear +UR L +al t +Ġcoun try +ib ility +() ,Ċ +e ad +Ġa uthor +Ġspec ific +b ase +( name +on es +ĠD o +Ġal ong +y ear +Ġexp ress +. ' +en v +Ġbeg in +Ġso ftware +Ġim p +Ġw in +ó n +Ġth ing +Tr ans +ĠT HE +Ġ< ? +Ġwh y +Ġdoes n +i j +g ing +ĉ g +Ġs ingle +off set +ar ning +og raph +le y +_c ount +Ġan al +cre ate +/ m +ĠR eg +un ch += $ +is k +Ġright s +( M +Ġ"" "Ċ +ap er +.m odel +Ġp o +em pty +art ment +Ġa nt +ĠWh en +Ġwom en +ĠE d +Ġse ason +Ġde st +à £ +( h +Ġposs ible +Ġse ver +Ġb tn +Ġdid n +Ġs ent +Ġen c +Ġcomm and +Ġ ],Ċ +_ x +Ġre cent +ol ution +v ector +ĠB y +ĠM ay +ĠA ct +» ¿ +Ġm oney +IN T +bs ite +ĉ p +. čĊ +ï »¿ +s l +atter n +ĠC lass +Ġto ld +ud io +c urrent +Ġe qu +Ġa uto +ĠSt ate +d a +ms g +)) ;ĊĊ +Ġwork ing +Ġqu ery +ĠB r +Ġw indow +a uth +on ly +ĉ t +Ġle ast +ag n +Ġex pl +it ter +ar ing +Ġc olumn +ĠGener al +": " +er al +ri or +Ġrec ord +I B +E X +Ġd at +Ġm aking +u ed +ĠC ar +em p +" . +ĠM ed +Ġc lose +Ġper cent +Ġp ast +( g +: ( +Ġw rite +Ġm ove +Ġp at +Cont rol +.T o +Ġv i +*/ Ċ +in ate +' ll +ag ed +N ull +Ġspec ial +IZ E +Ġc ity +/* Ċ +ĠE ng +ix ed +in ary +p y +Ġe ff +ar io +Ġt ell +av or +Ġse lect +le vel +im um +op er +B uilder +I P +') ,Ċ +es c +Ġf ont +" ;ĊĊ +ĠA m +ish ed +ill s +Int er +O W +Ġcour se +Ġl ate +idd le +Ġam ount +Ġas ync +in o +c ul +Ġ ì +and le +_ user +Ġb en +ĠC al +Ġ$ _ +ĠR ep +Ġen ough +T oken +. user +( j +S c +W idth +n ow +at form +Ġlook ing +Ġh old +M odule +IT Y +v o +is on +.D ata +y c +Ġp ot +ĠTr ump +id ual +id es +r t +Ġprop erty +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠ +am ework +g o +Ġl ow +Ġpar a +Ġpr ice +ur y +Ġto day +ro y +Ġ' / +Ġpol it +Ġ' ' +ym b +P h +Ġad v +Ġatt ack +ĠS te +RO M +an a +Ġme ans +Ġst ory +id s +ak en +Ġme et +Ġm om +ĠâĢ ĺ +Ġ? > +Ġd en +ob ile +ch ange +ĠĠĠĠĠĠĠĠ ĠĠĠĠĊ +ic i +n a +ĠF orm +Ġs ort +Se lect +p are +Ġth ought +_ con +Ġt ask +oc us +ĠD E +ĠM in +Ġo pt +ĉb reak +um er +K E +th en +Ġd et +ĠT est +port s +Ġre view +(' / +m ove +Ġsw itch +ER T +p atch +ann ot +ã Ĥ +Ġab ove +it ive +Ġquest ion +ĠQ u +ãĢĤ ĊĊ +g le +Ġw ord +Ġprov ide +ĠR eturn +Ġre search +ã o +u str +Ġp ublish +chem a +} } +ĠC ON +- in +all back +Ġco ver +\ \ +c olor +ĠI S +Ġwh ether +im ate +is c +B ar +Ġd iv +B e +our n +Ġh aving +le m +pl ayer +ab s +am era +ne y +Ġex c +get her +pl ied +a o +[ $ +Ġ+ + +i pe +sh ow +/ d +[ : +ag ement +le v +_ ID +r ary +ad es +_ se +a use +Ġem ploy +Ġ*/ čĊ +Ġf re +Ġ' @ +Ġcomple t +Ġl arge +r al +\ x +Ġf ac +< String +Ġcre ated +up er +.st ate +Ġh ost +ener ic +/ b +( ! +wh ile +i as +B UG +Ġ );ĊĊ +Ġro le +Re g +ĠC olor +St art +Ġp orn +t op +Ġwe b +Ġde v +Ġde al +++ )Ċ +Int eger +pos ition +. on +Ġ( " +ä ¸ +Ġproble m +s v +Ġp ress +AB LE +AT ION +ĠSe e +an ch +Ġth ough +le ep +Ġ< !-- +Ġpoint s +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠ +. J +Ġ :: +p tr +D B +++ ;Ċ +.p ng +n ode +so ft +pon d +Ġe ver +-------------------------------- -------------------------------- +M enu +(' # +Ġs ervices +p g +} )Ċ +param s +Ġact ually +Ġ" / +Em pty +M ethod +Ġid ent +un ic +Ġmill ion +Ġa ff +st yle +Ġcon c +i os +ign ment +UL T +P r +" ;čĊ +Ġunder stand +u ary +Ġhapp en +Ġser ver +ĠC o +S C +Ġle s +Ġfile s +G rid +s ql +Ġof ten +Ġin fo +_ tr +s rc +on y +Ġsp ace +um b +Ġpass word +Ġst ore +, ĊĊ +ĠWh at +g ed +ĠF alse +U s +sw er +_ index +Ġform at +m ost +s m +N ew +Ġd etails +Ġpro b +ĠAN D +() čĊ +il ar +Ġ$ { +ry pt +.C ollections +$ this +ĠF ree +_ of +(f alse +d ated +Ġ> > +Ġf ace +CT ION +Ġs ave +Ġt yp +de v +(" # +AG E +cont ainer +ed it +Q L +Ġitem s +Ġs ocial +i en +ĠRe act +) .ĊĊ +Ġm ar +Ġre du +ĠR E +.p ut +Ġm ajor +C ell +n ext +Ġexpect ed +Ġy et +Ġin div +trib utes +at is +am ed +Ġf ood +S ource +( string +Ġ+ Ċ +it es +d r +Ġmem bers +Ġcom b +item s +ĠP er +T H += True +Ġb ar +_ SE +com m +( w +)ĊĊ Ċ +Ġs end +Ġin c +un signed +F A +Ġparam s +app ing +ro s +ug in +f a +Ġcon nection +Ġ} ;ĊĊ +Ġbe come +M ode +Ġe v +Ġdif f +ĠUn ited +He ight +ful ly +im ages +Ġm akes +Ġg lobal +Ġcont act +' :Ċ +Ġab s +а Ð +f loat +Ġex cept +ĠP ol +Ch ild +t yp +Ġcert ain +i ón +O UT +Ġim pro +ile s +Ġ-- >Ċ +ĠP art +val ues +os s +/ ** +il it +ĠE vent +cur ity +st er +Ġchar acter +Ġnew s +Ġ" , +Ġde vice +c el +log in +he et +Def ault +@ " +ĉ Ġ +c lick +( value +ĠA b +Ġpre vious +ERR OR +oc al +Ġm aterial +Ġbel ow +ĠCh rist +Ġmed ia +co ver +ĠU I +Ġf ail +Ġbl ack +Ġcom ponent +ĠAmeric an +Ġadd ed +Ġbu y +st it +Ġc ame +Ġde lete +prop erty +od ing +Ġc ard +rop s +Ġhttp s +Ġro ot +Ġhand le +C C +B ack +em plate +Ġget ting +_b y +m ail +_s h +. assert +ĠD ec +( true +Ġcom put +Ġcl aim +' => +ĠS ub +Ġa ir +op s +n av +em ents +( id +Ġent er +ang ed +E nd +Ġloc ation +Ġn ight +Ġdo ing +ĠR ed +l in +}ĊĊ Ċ +vid er +Ġp ick +Ġw atch +ess ages +Ġhum an +Ġd am +p end +d ir +Ġt ax +Ġg irl +re et +Ġbo x +Ġstr ong +( v +re l +Ġinter face +Ġm sg +f ect +_ at +Ġh ouse +Ġtr ack +' );ĊĊ +j e +ĠJ ohn +ist r +( S +ub e +Ġc e +itt ed +V ER +* ) +p arent +Ġapp lication +an y +.sw ing +Ġp ack +\ u +Ġpr act +Ġse ction +ct x +Ġun signed +.P oint +ĠO ne +Ä ± +ip le +a id +Ñ ĥ +V ector +by te +Ġw ait +Ġà ł +à ¥ +Ġto gether +Ġth rows +F O +' )) +h ost +is ing +. view +Ġter ms +fr amework +- r +Ġapp ly +Ġs ession +O ptions +ugg est +Ġo thers +w itter +Ġf und +In it +__ ( +ens or +G ET +Ġsever al +i i +[ j +I O +Ġtem plate +P osition +Ġe con +ach ine +Ġ il +.s pring +m ain +el t +im ent +Re c +m m +ĠUn iversity +urs or +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠ +G L +ict ure +ith ub +c er +c ast +F rom +a les +Ġsub ject +p assword +n y +Ġes c +.w rite +ï¼ Į +Wh at +. H +Ġh istory +ĠF e +Ġindiv idual +un it +Ġ-- > +Ġd u +I ST +Ġus ers +f s +f alse +un t +T itle +Ġm ot +Ġf uture +ach ed +Ġstart ed +Ġm ode +Ġ' < +_ array +Ġa x +'] ;Ċ +i res +Th ere +ug ht +t ml +pos ed +ic ult +Ġto ok +Ġg ames +Ġ} } +Ġ? >Ċ +Ġproduct s +I s +Ġb ad +ĠD es +.p ath +' ĊĊ +ĠP ost +av el +( : +Ġneed s +Ġkn own +F l +Ġex ec +Ġse en +um e +Ġb order +Ġl ive +tem p +P er +Ġvar iable +i et +ĠD ef +Ġg e +em e +_b ack +f irst +Ġprovid ed +//////////////// //////////////// +Ġfil ename +Ġh ope +ul y +a uto +f ind +_ string +b tn +it ude +At tribute +Ġyou ng +.t xt +Ġwe bsite +ĠP rop +Ġe y +> ();Ċ +ion al +AR R +iction ary +ur ther +. +t x +Ġp ur +u el +ymb ol +u ation +ang er +Ġback ground +ec ess +ef ined +.... .... +Ġdes cription +Ġrep resent +") );Ċ +press ion +row ser +Ġser ies +ward s +($ _ +a ise +Ġh ot +ac ity +ri es +action s +C reate +ad io +amp les +Ġorig inal +ens ive +f ont +st ream + using +.spring framework +ser ver +Ġb ill +AC K +il ename +Ġfr ame +Ġ= Ċ +Ed it +adi us +Ġd raw +ank s +Ġd eter +Ġcom es +_ int +Ġfore ach +ang le +Ġe lect +pect ed +He ader +ist ration +F alse +ĠG ame +Ġfil ter +Act ivity +Ġl arg +in ition +Ġ" < +is ed +Ġrem ove +ĠTr ans +m et +se e +Form at +Com mand +ĠE X +N one +Ġfr ont +A SE +ĠR ec +ound ation +Ġv o += \" +( * +Ch ange +.W rite +g roup +i ents +u y +******************************** ******************************** +Ġd ig +h r +( - +Ġg en +n umber +ve c +uro pe +ent ry +L L +Ġst e +Val id +'] , +_p aram +Ġse lected +Ġacc ording +ĠD is +Ġ util +B uffer +_ error +Ġass oci +_S IZE +Ġw or +Ġprint f +r ag + ł +D D +ĠV al +Ġact iv +E ng +et ime +Ġv irtual +a ign +a ur +ĠP res +ĠEx ception +Ġany thing +ĠO ff +Ġh ours +Ġw ar +Arg s +ag ing +Ġmodel s +ĠT ime +O b +am s +j oy +Ġear ly +. read +Ġc enter +ĠIn itial +Ġl anguage +l ength +x y +Ġs n +Ġin f +P ost +Ġag o +Ġeas y +_c ode +ĠAN Y +_ ch +Ġdown load +( T +av ed +âĢ ĵ +Ġstud ents +Ġf ig +l ight +x x +Ġbu ffer +ĠD ep +ĠM ath +IT H +Ġvar i +Ġd ue +F actory +Ġp or +Ġe p +ot ype +Ġcan not +Ġwh ite +< int +ter n +Ġreg ister +Ġpre d +cl us +_d ate +Ġ/ ** +Ġa uth +Ġ[ ]Ċ +Ġper iod +n own +Ġv ot +Ġs creen +' d +T ypes +Ġt mp +е Ð +ur al +Ġben ef +_ y +Ġn et +ĠSt ates +'] [' +ĠN e +ĠN OT +Ġn eg +Ġcomm on +s cope +Ġc red +g es +_T YPE +Ġs uggest +o om +.ĊĊ Ċ +Ġac cept +Ġr andom +er m +ĠV ector +w ith +T ER +( str +Ġres pons +Ġh it +.S et +gr id +ri a +Ġc lick +und le +C ase +ins ert +Util s +Ġ"" " +Ġim plement +at al +tem pt +tem plate +oc r +return s +Ġplay ers +us ers +ed ef +ĠTh ese +Ġam ong +Ġde b +h a +.get Element +Ġc irc +Ġan swer +Ġw alk +Ġt reat +ĠG e +ĠC reate +Ġa ge +Ġre q +O ST +ang ular +Ñ ı +Ġf ive +Ġdistrib uted +Ġfri end +T P +Ġc lean +ow s +.Control s +d is +Ġw ords +. io +z y +Ġhe ader +ĠC heck +âĢĻ m +j ust +h older +=" čĊ +. annot +Ġcol lection +' . +Ġsim ilar +Ġt aken +(" % +Or der +'] Ċ +-m d +ĠT H +ac ed +Ġis n +/ j +Ġs on +gr aph +ĠInt eger +Ġn ecess +re en +Ġ um +Ġ\ < +Ġmom ent +Ġbr ing +Ġind ic +ys is +Le vel +ver se +urre nc +_t est +Ġent ire +D own +Ġ}ĊĊ Ċ +( result +ĠRe ad +à ¨ +M od +Ġtry ing +") ,Ċ +Ġm ember +ĠC or +OD O +- control +un time +ĠS im +D ialog +pl ot +_ on +Ġph ys +} / +Ġn amespace +ĉ čĊ +ac c +Pl ayer +A RE +Ġf oot +Ġbo ard +p art +Ġs us +w ise +ĠM c +Ġp ush +AT A +Ġp lease +ri ed +we et +b it +id ed +V E +ĠS w +U B +Ġt ypes +ed ia +Ġc los +ace book +Wh en +Ġed it +ig ger +Ġen erg +Cont ainer +Ġph ot +ĠC ount +ĠE urope +.I s +ĠR uss +pe ed +ĠS tr +Ġp y +Ġc ult +Ġdef ined +cc ount +Ġob t +.L ocation +Ġth read +il le +Ġinst ead +str ong +ĠS ec +U RE +Ġide a +. se +em y +select ed +Con nection +ac ing +th read +.n ext +Ġc oll +Ġfil m +ist ic +Ġcomp et +Ġcon n +th ough +Ġcom pan +ock et +Ġte ach += ( +Ġph one +Ġact ive +de lete +tr ies +Ġm o +Ġde ath +} );ĊĊ +oc ol +W idget +Ġart icle +ro du +and id +Ñ ĭ +ĠC r +k a +() : +lo od +ĉĉĉ Ċ +Ġal most +Ġs ell +erv let +ri p +Un it +Ġapp lic +Ġcon nect +Ġfe ature +Ġv ia +' ), +Ġl im +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +ĠG u +Eng ine +Ġen s +Ġen vironment +b lock +HER E +N ULL +g y +t ag +) ). +ex p +Ġcom pl +Ġinst all +Ġcomple te +que ue +atur al +Ġgener al +th on +Ġask ed +o res +( res +Ġres erved +S P +ĠâĢ ¦ +Å Ĥ +Ġsign ific +O ff +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠ +ĠA g +ĠJ ust +ĠE rror +Ġin fl +ad ata +Ġ icon +ask s +' ' +_ LO +? . +ac count +Ġ( * +' )ĊĊ +r ap +_ var +ĠF OR +Ġpart y +ĠY our +c at +str y +. new +bo ot +ĠN ov +Ġv ector +Ġn ormal +Ġf urther +Re pository +Ġd atabase +att le +Ġmus ic +Ġspe ed +Ġd oc +pro cess +IG HT +.p arse +Ġt aking +Ġvi ol +ce ed +ĠA fter +Ġfor ward +Ġc rit +"/ >Ċ +ro t +Ġfa iled +ef ore +Ġconc ern +o e +b a +Ġs ender +Ġter m +h as +=" # +Ġpot ential +N um +Ġpublish ed +.c lose +ĠIm age +str aint +U D +ĠO b +Ġprob ably +l im +" :Ċ +olum e +Ġcon sum +ag ue +ens ions +Ġinvest ig +- year +') ; +-s m +Ġen joy +or ig +er ing +c p +le ased +ple ments +Ġreturn s +p at +B O +ĠH ouse +.L abel +Ġwe ight +igh b +Ġcondition s +Ġex ception +d escription +Ġtr ad +- to +Ġ{ } +Ġmod ule +EN D +. ap +.p rops +Ġcon structor +av es +Ġf avor +ĠN ow +; i +ĠM ain +_ k +er ies +âĢĻ ll +trans form +imest amp +P re +Ġm er +. res +st ant +L ocation +_N AME +Ġlos s +Ġ ĊĊ +n et +Ġeng ine +B lock +Ġiss ues +Ġpar se +ĠB ar +Ġst ay +ĠJ SON +Ġd om +air s +w ner +Ġl ower +", čĊ +ĠD em +uf act +Ġp s +Ġper fect +R L +Ġed uc +l s +em ory +ARR ANT +u ge +Ġex act +. key +al led +e ch +ie f +\ / +o ke +Ġfor mer +al loc +Ġs ix +id a +Ġm argin +Ġhe art +al d +p ack +.getElement ById +ĠW ARRANT +Ġr ather +Ġbuild ing +er man +lic e +Ġquest ions +iz es +le ge +irect ory +Ġj e +Ġc as +pro ps +ut f +Ġse curity +Ġhow ever +we ight +Ġins ide +Ġpres ident +Ch ar +ĠW ITH +.m ap +Ġgr aph +Ġt ag +_st atus +Ġat tempt +op p +us es +ĉ const +Ġr ound +, $ +Ġfri ends +Em ail +? > +Res ource +KE Y +os p +. query +ĠN orth +able s +ist rib +_c lass +el lo +Th at +Ð º +pecial ly +ĠPres ident +Ġcamp aign +Ġal t +are a +Ġch all +Ġop port +.C on +Ġenerg y +li ke +. string +ing ton +) * +y y +Ġprof ession +ir th +Ġse g +æ ľ +Ġh or +i ers +c an +Ġbeh ind +Pro duct +f g +ĠS k +.j pg +? : +] ;ĊĊ +Ġcall back +ĠH ttp +Ñ Į +l ong +M S +AT H +Ġr aise +Ġwant ed +row n +ut or +l t +] = +el ine +M A +Ġse par +c s +se mb +D is +bs erv +ĠW ill +Ġpol icy +Ġth ird +ph one +Ġb ed +/ g +. __ +ĠIn c +iz ing +.re move +in stance +.t ype +Ġs erv +E ach +Ġh ar +ĠM essage +( key +SE LECT +P os +)) ;čĊ +Ġre comm +Ġtr aining +ĠE nt +ĠCh ar +ic ht +(f ile +Ġp rior +G ame +Ġex it +Param s +.c ore +P C +n es +anc ed +( request +P assword +} >Ċ +Ġm ag +Ġre lease +Ġsh all +ud ent +ĠS outh +and o +: ' +.Tab Index +s k +ann er +is set +Ġout side +led ge +Ġ å +ĠR ob +Ġim m +! Ċ +ĠWe b +D es +B C +anc ial +R oute +D ec +fer ences +Ġp urch +ĠM odel +ct or +g n +_st art +_ un +. * +is es +Ġg round +Ġun ique +Ġbe aut +{ " +Ġp our +ĠO ct +Ġt ree +set s +_ res +') -> +_re g +(" \ +Ġby te +B l +Ġd ating +Ġm atter +ĠR em +Ġ' ../ +ĠA ug +ĠL a +Ġ$ ( +ourn al +i am +Ġshow s +w rite +Ġb all +Ġsim ply +Ġf ast +Ġmem ory +A SS +ĠO f +ov ed +ant e +a ul +ist ry +)) );Ċ +Ġf it +< string +Ġpolit ical +anc el +_ . +c ard +.c urrent +o ch +_ image +\ t +# Ċ +( L +Ġindu stry +com ing +Ġex tra +Ġreport ed +.st art +Ġres ources +Ġim g +fl ow +_E X +(n ull +ĠP re +Ġwr ong +inter face +Param eter +n ers +á » +t ure +ers ist +oun try +Ġseem s +al ance +de st +ĉ String +Ġm aint +Ġun it +act ers +ĠT R +if ul +export s +pro ject +App lication +leg ate +Ġt akes +ter m +Ġet c +ust er +Ġappe ar +add ress +Ġf em +h s +Ġh om +, - +Ġdiff icult +Ġcom ing +O pen +Ġset tings +ĠW ar +ĠTh en +Ġaut om +ĠF oundation +Ġqu ite +D escription +Ġb log +i qu +P S +_f ield +J son +SS ION +ĠS ch +ĠL O +Ġdes cri +Ġevery one +Ġpret ty +Ġlong er +Ġm enu +Ġcurrent ly +se c +Ġrelations hip +################ ################ +ĠM ap +as et +Ġparam eters +Ġcr ush +" čĊ +IL ITY +ig ration +Ġc out +t otal +Ġn ames +nd ef +") ; +ri end +yn amic +Ġeff ort +Ġact ual +Ġfield s +O UN +t ers +Ġf ix +_m odel +Ġc ases +C A +M y +Inter face +ĠS E +] ] +al le +ĠN ational +ĠArray List +in line +. V +ar a +ref ix +as c +Re ader +ĠÐ ¿ +ast ic +( () +C l +.annot ation +Ġperform ance +ail y +.to String +.n et +view s +. end +ay ers +l ate +ĠA pr +ed eral +'] ) +.b ody +Ġhigh er +_f l +c r +al ert +_n ode +ĠG oogle +Ġit self +A uth +urrenc y +Ġsignific ant +app end +Ġres pect +str ap +Ġun a +riter ia +P ORT +.ap ache +Out put +Ġpro gress +Ġm id +ĠM icrosoft +Ġres ource +ab lish +Ġd im +. load +.A pp +Ġd irection +Ġadd itional +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠ +Ġnum bers +Ġcompan ies +.T h +Ġs ound +user name +Ġstat ement +Ġal ert +Ġcon tract +h ome +_l ength +.Com ponent +e v +. Ex +ï¼ ļ +" ; +ĠH igh +Ġ )ĊĊ +ĠP oint +op h +Ġl ines +-> _ +" )ĊĊ +o x +app lication +Ġ ]Ċ +ĊĊĊĊ ĊĊ +Ġso on +ction s +ing er +Ġj oin +ĠP e +Ġ ë +Ġl as +. E +c ss +/ or +ĠSt art +ĠT O +Ġsub s +con n +com ponents +DE BUG +qu are +F unction +end ar +. index +Ġf ill +Ä Ļ +Ġcho ose +h ow +ĠAmeric a +ass ets +-------- ---- +ĠV alue +Ġoff ice +Ġv eh +Ġtrans form +ĠAr t +Ġin de +Ġf n +Ġim plements +ang o +ple te ++ " +t mp +am ily +Ġhas h +miss ions +E ST +g t +Pro vider +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠ +Ġfl ag +Ġpartic ip +d en +ĠReturn s +Ġnot e +ü r +p m +ide os +Ġspec ified +ĠE N +est er +ol id +Ġup on +( std +ĉ v +Ġ' \ +u z +Ġv ert +Ġv ict +ĉ self +Ġ" $ +. k +Ġgroup s +g ithub +l ang +Ġm ut +T O +Ġv e +ĠP lease +;ĊĊ Ċ +ac cess +Ġ{ " +re a +Ġr isk +ick er +og gle +ĉ while +AN G +.s end +Ġwom an +Ġget s +Ġ ign +ĠI d +_ log +ON E +Ġe vid +ĠH ar +_s ub +Ġend l +Ġinclud ed +() );ĊĊ +ĠA p +ig r +Ġs em +ĠBl ack +d oc +_t able +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +- up +Ġca use +Ġ .. +Ġv an +_d ict +Ġf ocus +IN D +CE SS +.L og +Ġmult iple +id o +Ġreg ard +- M +and ler +our se +Ġde g +. U +Ġadd ition +Ġvar ious +Ġrece ive +е н +ĠH T +Ob j +D F +Ġincre ase +ĠO pen +] ; +Ġcomm it +? Ċ +ateg ories +at ory +sh ip +ĠM ich +Ġh tml +rom ise +Ġle ave +Ġstr ateg +av en +ĠCon sole +k nown +- n +_ LE +.com ponent +Ġb re +S ession +i ance +Ġal ign +typ edef +_ result +ĠW HERE +.s plit +Ġread ing +FA ULT +Ġc lo +Ġnot ice +_p r +ar ter +Ġlo ck +Ġstand ard +et ic +ell ow +Ġp adding +ĠH is +Ġst ates +_c ast +( P +a a +Ġintern al +e an +ĠP RO +ĠK ey +Ġes pecially +m ing +Ġc ross +Ġn ational +_ object +f ilter +Ġs cript +. update +_ i +ĠAss ert +/ core +%% %% +Ġproble ms +ist or +Ġ. = +Ġar ch +Ġwrit ten +Ġm ilit +M ENT +. ch +ca pe +ĠM us +_ config +ĠA PI +fo ot +Ġim ages +end l +. In +F irst +Ġpl atform +.pro t +O ption +st e +ĠT ODO +Ġfor ce +. cont +ĉ echo +ĠD av +P tr +( B +R T +ĠB ase +] [' +Ġann ounc +con sole +ĠP y +d s +. as +Ġpre vent +ap an +Ġ{ ' +} ' +Ġde ad +V AL +Q UE +**************************************************************** ******** +Ġch arg +R eturn +Ġf ul +d om +Ġr ules +Ġmod ify +Ġe val +h am +at ement +\ < +ul a += False +R A +Ġcont ains +Ġst ack +m ar +Ġ{ }Ċ +Ġund efined +A ss +ĠCh ina +ve y +* Ċ +Ġplay ing +) / +act or +Ġb ottom +li er +ĠN umber +Ġcou ple +D C +ĠS O +g or +.set Text +s uccess +com mand +F ilter +ĠO ur +_ item +Ġc tx +Ġro ad +V ersion +c ase +ur t +av ior +y ch +semb ly +ĠPro duct +Ġh eld +a fe +Ġinclud es +< quote +Ġa void +ĠF in +ĠM od +Ġt ab +an o +à ± +ipp ing +- e +Ġins ert +t arget +ch an +.M odel +IM E +\ Ċ +Ġm achine +av y +ĠN O +ĠInt er +Ġoper ation +mod al +T ag +] : +Ġprodu ction +Ġare as +Ġre n +_f rom +n bsp +Ġoper ator +m en +app ed +_p er +z en +(" . +.s ave +=" {{ +Ġt or +( response +Ġc andid +Ġcon v +a iled +ĠL ib +com p +ur a +ï¿ ½ +ĠH ere +Ġarg ument +h ood +Ġest ablish +ograph y +Ġon Click +amb da +Ġs ch +Ġmov ie +Ġse c +Ġact ivity +Ø § +Ġs ql +_ all +inc ip +Ġprovid es +Ġs ys +ack et +Ġwas n +Ġus es +ĠF unction +.g oogle +ĠRes ult +Vis ible +ag ma +el come +ĠS y +ĠC ent +AL SE +ac ión +EX T +Ġl icense +ĠL ong +Ġacc om +Ġab ility +. height +Act ive +olog ical +ol y +)) , +.S e +Ġparam eter +pr ite +AB ILITY +.s ervice +ĠG roup +_ query +ĠI tem +in ing +Ġj ud +im s +f ix +ind er +ag ram +Ġfunction s +Ġexper i +ĠE m +Ġro t +Ġp en +.b tn +ĠA S +#if def +Ġcho ice +ĠP age +_P RO +Q U +å ı +ant ity +Â Ń +word s +Ġread only +Ġf lex +prot ected +ĠAn y +Ġchar acters +enc ed +ĠJ uly +il er +C ard +ur ance +Ġre v +.e vent +al y +Ġwon der +ĠP ort +Ġleg al +ro le +Ġt en +Ġgo es +M P +wh ite +): čĊ +)) čĊ +Ġre ference +Ġm is +ĠPro ject +ick s +> & +C ON +Ġre pl +Ġreg ular +St orage +ram ework +Ġgo al +Ġt ouch +.w idget +Ġbu ilt +d es +P art +( re +Ġw orth +h ib +g ame +ĠÐ ² +ac ion +ĠWh ite +(t ype +( ` +Ġn atural +Ġin j +Ġcal cul +ĠApr il +. List +Ġassoci ated +ĉ System +~ ~ += [ +Ġst orage +Ġby tes +Ġtr avel +Ġs ou +Ġpass ed +! = +as cript +. open +Ġgr id +Ġb us +Ġrec ogn +A b +Ġh on +ĠC enter +Ġpre c +b uild +HT ML +ĠS an +Ġcoun tries +a led +t oken +k t +Ġqu al +L ast +ad ow +Ġman ufact +id ad +j ango +N ext +x f +. a +Ġporn o +ĠP M +er ve +it ing +_ th +c i += None +g s +Ġlog in +at ives +'] );Ċ +Ä ħ +Ġ ill +I A +child ren +D O +Ġlevel s +Ġ{ { +Ġlook s +Ġ" # +To String +Ġnecess ary +ĠĠĠ Ċ +c ell +En try +Ġ' # +Ġext rem +Select or +Ġplace holder +L oad +Ġre leased +O RE +En umer +ĠT V +SE T +in q +P ress +ĠDep artment +Ġprop erties +Ġres pond +S earch +a el +Ġre qu +ĠB ook +/ Ċ +( st +Ġfin ancial +ick et +_in put +Ġth reat +( in +Str ip +ì Ŀ +ç ão +Ġevid ence +)) ; +ĠB ro +Ġ[ ];Ċ +Ġ ou +b uf +S cript +d at +Ġr ule +# import +=" / +S erial +Ġstart ing +[ index +a e +Ġcon trib +s ession +_ new +ut able +o ber +Ġ" ./ +Ġlog ger +Ġrecent ly +Ġreturn ed +č čĊ +)) )Ċ +ition s +Ġse ek +Ġcomm unic +Ġ" . +Ġuser name +E CT +D S +Ġother wise +ĠG erman +. aw +Ad apter +ix el +Ġsystem s +Ġd rop +Ġstruct ure +Ġ$ ("# +enc ies +ann ing +ĠL ink +ĠRes ponse +Ġst ri +Å ¼ +ĠD B +æ Ĺ +and roid +sub mit +ot ion +( @ +.t est +ĊĊĊĊ ĊĊĊĊ +] ;čĊ +Ġdirect ly +Ġ" % +r is +el ta +A IL +) {čĊ +m ine +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠ +( k +b on +as ic +p ite +__ _ +M ax +Ġerror s +ĠWh ile +Ġarg uments +Ġens ure +R ight +-b ased +We b +Ġ- = +Ġint rodu +ĠIn st +ĠW ash +ord in +j oin +D atabase +Ġgr ad +Ġus ually +IT E +Prop s +? >Ċ +ĠG o +@ Override +RE F +Ġ ip +ĠA ustral +Ġ ist +View ById +Ġser ious +Ġcustom er +.prot otype +od o +c or +Ġdo or +ĠWITH OUT +Ġpl ant +Ġbeg an +Ġdist ance +() ). +Ġch ance +Ġor d +c ame +pr agma +Ġprot ect +rag ment +ĠN ode +en ing +Ñ ĩ +Ġr oute +ĠS chool +h i +Ġne ighb +A fter +lic it +Ġcon tr +Ġpr imary +A A +.Write Line +util s +Ġb i +R ed +.L inq +. object +Ġlead ers +un ities +Ġg un +on th +ĠDe v +F ILE +Ġcom ments +_l en +ar row +am ount +R ange +s ert +Grid View +Ġup dated +ĠM o +Ġin form +oci ety +al a +A ccess +Ġh ab +Ġc reat +_ arg +ĠJan uary +ĠD ay +") čĊ +up le +d ocument +gor ith +m enu +ĠO ver +b b +.t itle +_ out +Ġle d +ur i +Ġ? >Ċ +r un +Ġsc ene +( array +de vice +_t itle +ag on +] čĊ +ab y +Ġbe came +bo olean +Ġp ark +ĠC ode +up load +rid ay +ĠSept ember +F e +Ġs en +c ing +F L +C ol +ut s +_p age +in n +Ġim plied +al ing +Ġyour self +.C ount +con f +Ġa ud +_in it +. ) +Ġw rote +N G +. Error +ä » +.f or +Ġe qual +ĠRe quest +Ġser ial +Ġallow s +X X +Ġm iddle +ch or +à ¸ +erv al +.C olumn +read ing +Ġesc ort +ĠAug ust +Ġquick ly +Ġwe ap +ĠC G +rop ri +h o +Ġc op +( struct +ĠB ig +Ġv s +Ġfre qu +. Value +Ġaction s +Ġpro per +Ġin n +Ġobject s +Ġm atrix +av ascript +Ġon es +.g roup +Ġgre en +Ġp aint +ool s +y cl +enc ode +ol t +com ment +. api +D ir +Ġun e +iz ont +.p osition +Ġdes igned +_ val +av i +ir ing +t ab +Ġl ayer +Ġview s +Ġre ve +ra el +ĠO N +r ics +n p +Ġc ore +() );čĊ +M ain +Ġexp ert +ĉĉ čĊ +_ en +Ġ/ > +ut ter +I AL +ail s +ĠK ing +*/ ĊĊ +ĠM et +_ end +add r +or a +Ġ ir +M in +Ġsur pr +Ġre pe +Ġdirect ory +P UT +- S +Ġe lection +h aps +.p re +c m +Val ues +Ġ" Ċ +c olumn +iv il +Log in +in ue +Ġbeaut iful +Ġse cret +(e vent +Ġch at +um s +Ġorig in +Ġeffect s +Ġman agement +ill a +t k +Ġset ting +ĠC our +Ġmass age +ĉ end +Ġhapp y +Ġfin ish +Ġc amera +ĠV er +ĠDem ocr +ĠH er +( Q +con s +it a +Ġ' . +{ } +ĉ C +Ġst uff +Ġ :Ċ +ĠA R +T ask +h idden +er os +IG N +at io +ĠHe alth +ol ute +Ent er +' > +ĠT witter +ĠCount y +s cribe +Ġ= >Ċ +Ġh y +f it +Ġmilit ary +Ġsa le +re quired +n on +boot strap +h old +r im +- old +ĠD own +Ġm ention +cont act +_g roup +od ay +Ġto wn +Ġsol ution +u ate +ell ing +] -> +ot es +ent al +om en +osp ital +ĠS up +_ EN +Ġsl ow +SE SSION +Ġbl ue +ag o +Ġl ives +Ġ ^ +. un +in st +en ge +Ġcustom ers +Ġc ast +ud get +ï¼ ģ +ic ens +Ġdeter min +Se lected +_ pl +ue ue +Ġd ark +// ĊĊ +s i +ther n +ĠJ apan +/ w +P U +ĠE ast +ov ie +Ġp ackage +Ġn or +Ġap i +b ot +" ];Ċ +_p ost +ul ate +Ġcl ub +') );Ċ +Ġlo op +PI O +ion e +sh ot +In itial +Ġplay ed +reg ister +rou ght +_m ax +ac ement +m atch +raph ics +A ST +Ġexist ing +Ġcomple x +D A +.C h +.com mon +m o +Ġ' ../../ +it o +Ġanal ysis +Ġdel iver +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ Ċ +id x +à ł +ong o +ĠEng lish +< !-- +Ġcomput er +EN SE +Ġp as +Ġr ais +H ash +Ġm obile +Ġo wner +F IG +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +th es +Ġat tr +w d +.t ime +aw n +Ġtreat ment +ĠA c +. View +im pl +m ore +p ass +Ġh a +.f rom +Ġle ading +FF FF +( error +. ui +at ar +ad ers +d ates +Ġz u +Ġfl ow +T arget +Ġinvol ved +Ġi o +par se +$ _ +he st +. int +- item +as y +S p +Ġsh ift +N T +Ġt f +_T R +. web +C S +Ġ} ) +Ġey es +_ z +' );čĊ +if orn +Ġ{ @ +Ġn ice +.l ist +ĠĠĠĠ čĊ +Ġf loor +Ġred irect +ĠU K +( [' +Ġw ish +Ġcap t +leg al +ĠI O +Ġst age +. String +ĠA fr +ig en +ĠS H +De lete +ell s +Ġsol id +Ġmeet ing +Ġwork ed +Ġed itor +in y +Ð ¼ +_ read +. Id +e ff +Off set +ch a +US ER +ĉĉ ĠĠĠ +ipp ed +Ġd ict +ĠR un +.h pp +Ġan g +x ml +im ple +Ġmed ical +_t oken +con nect +Ġh our +Ġcont roller +_m essage +U ID +G r +and ed +_C H +Ġbook s +Ġspe ak +am ing +Ġm ount +Rec ord +ĉ struct +.W eb +ond on +Ġ// Ċ +Ġf elt +.A uto +id ge +_p os +P R +Ġmod ern +C ollection +_m sg +C D +ĠL o +Ġsecond s +ib ly +.e quals +Ġintern ational +# pragma +oo th +W riter +i ate +Ġce le +ĠB it +iv o +iv ery +r d +HE CK +Ġc ache +.c ount +Ġro ll +.Re ad +RE D +Ġset up +izont al +model s +arg v +Ġconsider ed +=" ../ +set tings +ĠR el +Ġgrow th +Ġm ix +ĠWash ington +Ġpl t +ĠI M +á º +Ġturn ed +ĠDate Time +ĠW ed +( url +Ġ" - +Ġlet ter +As ync +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠ +ĠOct ober +_l ine +Ġatt ention +Ġcol lect +ĠH ash +Ġim ag +T ree +Ġsit uation +et te +_n o +IV E +Ġv on +.t arget +Ġknow ledge +Ġdr ive +.p ost +Ġb lood +Ġc it +pr imary +Ġconfig uration +te e +Ġph oto +is ode +Tr ace +Ġg ave +Ġsh ot +ĠA ir +Ġm other +pr ice +Ġmor ning +)) {Ċ +- x +Ġtr ade +Ġdes c +Ġ&& Ċ +Ġparent s +A pi +å Ī +t ed +w er +Ġ æ +Ġs y +ĠK e +Par ser +å ħ +anc y +Ġpie ce +iforn ia +to String +r an +id ing +PT ION +com es +/ lic +.c lient +E l +L ong +Ġprofession al +ru pt +v a +Ġcomplet ely +Ġpract ice +Ġse lection +R em +in i +Ġc am +RE E +Ġsit es +p a +AT US +Ñģ ÑĤ +arr ant +* ( +_ KEY +ĠB utton +ĠF riday +se qu +Ġre ader +Ġm essages +è ¯ +Ġbu f +K e +Ġn ov +H P +M sg +al ign +ar ily +Ġ' , +_w ith +Ġd as +Ġhe ard +at omic +ri al +) [ +Ġdis e +@ end +Ġg old +Ġf air +Ġsa les +. Button +str ict +s ave +Ġme asure +Ġ" + +ec ause +View Controller +ĠT able +.p aram +Ġdec ided +(( ( +IN FO +Ġopport unity +T e +IC ENSE +cc ording +k i +ĠU N +Ġcont ain +Ġman ager +Ġp ain +ĠF ire +rom e +Ġpl ans +F ound +l ay +ĠDec ember +Ġinfl u +à º +ren ch +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ Ġ +az ing +b rief +c all +wo od +Ġload ed +Ġgr and +/ f +im p +_ U +ST R +âĢ ¢ +Ġcred it +.C olor +or ge +QUE ST +Ġdiffer ence +ĠP C +w args +Ġp ub +und ay +Ġf ra +.m ax +Ġtri ed +ann els +s end +Ġreport s +Ġad ult +ä º +Ġcons ist +ĠSt reet +ĠPro gram +S QL +M atrix +ounc il +- A +ĉ w +Ġwho se +Ġrel ig +ĠS ex +Ġg ives +n one +.m essage +( G +.aw t +- right +ĠNov ember +ell ig +ut ive +Ä ĥ +over n +Ġeas ily +Ġide as +ĠÐ ½ +/c ss +ly ing +el le +C an +_c olor +оР² +Ġp air +ng th +Ġs plit +d rop +art y +on a +Ġcap ital +Ġhe ar +Ġex ists +ĉ log +em o +R un +o i +Ġpar ser +ĠM ethod +Ġeduc ation +[ k +Ġlib rary +> ";Ċ +_ UN +ĉ std +od ed +Ġcall s +h ere +R el +Ġbr and +back ground +g a +_add ress +_param s +C ategory +ĠInd ia +_e vent +Ġ ing +R ender +.c l +ump y +Ġp et +F C +ĠA nt +Ex t +Ġchar ge +en ed +gr ad +E O +Ġdep end +Ġ .ĊĊ +fr ame +Ġd f +Ġh uge +ĠP ART +ed s +; ; +ĠA M +Ġbas ic +ĠL et +lic h +Ġar m +Ġst ar +Ġf ederal +W ork +Ġcar ry +ĠIs rael +( obj +={ { +Ġs aved +Ġs yn +Ġconst ant +V ENT +Ġpos itive +Ġcon duct +Ġsk in +Ġear lier +Ġl ayout +ĠI P +O UR +Ġt im +styles heet +_ cl +ĠC ard +++ ){Ċ +Ġtem per +ĠDav id +ĉ try +.d art +Ġwant s +Ġp icture +Ġv ideos +ĠCom m +is ions +_M AX +M apping +- content +ĠE ar +- de +Ġpre m +br uary +Ġcom ponents +Ġthrough out +Ġp ull +Ġp ages +ent e +res pond +Ġg as +cript or +Ġed ge +Ġb ound +A CT +**** ** +Ġcre ating +ĠC H +Ġnull ptr +B r ++ ' +.c o +> :: +Ġle arning +.L ength +_S H +Ġpat ients +A IN +Ġk ids +Ġcom fort +Ġsh own +ug ins +ĠB ack +ell a +_C L +Ġl at +Ġdis patch +Ġclass es +. at +.b egin +Ġsuccess ful +b an +Ġobt ain +ĠS l +Ġl ack +iter ator +Th read +(s ize +Ġn one +.h as +_ X +s ort +n ap +p et +b in +ĠCan ada +The y +Ġd ans +ĠM at +< td +Ġh air +Ġ' ',Ċ +Ġc u +Ġlaw s +let ed +p ed +Ġp ow +Ġk new +_C OM +_ , +ĠM ag +id ents +( req +Ġ ), +- center +Ġw ide +ĠA uthor +st ants +Ġjob s +Ġm ath +et imes +Bo olean +Ġs cope +_ is +Ġme as +Ġkey s +el ay +Ġexact ly +'=> ' +ĠP aul +m as +ĉ print +(l en +f d +Ġ) ; +. Event +q li +ir it +ield s +om an +ĠT op +Ġv ote +Ġm ask +Ġthem e +- Ċ +Ġpro ps +Ġf ine +Ġwrit er +_ offset +c ar +Ġal tern +Ġc opyright +Ġdest roy +pp er +Ġgener ate +pp ed +âĢĻ d +ĠĠĠĠĠĠ Ċ +m ake +ĠSh ow +Ġb rowser +Ġfavor ite +Ġcare er +Ġhappen ed +( char +Ġrecomm end +Ġl iter +.f ilter +gr ade +Ġ £ +Ph one +om s +Ġn amed +- label +ip o +ĠO ther +Ġp anel +Ġro ck +S cale +ĉ assert +Ð ´ +Ġtr ust +fr ont +Ġdem on +A r +N et +Ġecon omic +foot er +Ġr ace +(n ode +ĠO ption +s plit +Ġphys ical +if est +Ġrem oved +. http +)) ,Ċ +Ġlook ed +' ; +d ing +g est +atur day +/lic enses +Pr ice +Ġd ro +Ġto wards +Ġun s +ĠC L +ĉ static +Ġ rows +Ġdef ine +.re place +Ġf ather +ĠDes ign +ass ign +m ut +De vice +D id +') )Ċ +omet ry +ay load +Ġh istor +ĠP aram +ĠBo olean +Ġn ature +Ġj s +Ġn ation +i h +Ġdis cover +se m +Hand le +ĉ r +ĠTe chn +Ġw all +{ $ +@ property +Ġ" ../ +Ġex am +.d raw +opp ing +Ġnear ly +Ġco ol +Ġinde pend +RE S +Ġhand ler +ĠMon day +Ġs un +St yles +ous ly +Ġ ĉ +v est +D isplay +( y +atic ally +Ġpred ict +y ing +Ġsom etimes +" ]Ċ +Ġdr ink +Ġb ul +ific ations +. insert +.re g +Ġtest s +Al ignment +Ġal leg +Ġat tribute +ĠN ote +Ġmy self +art s +N ow +Ġinterest ing +li ents +Ġpop ulation +ĠCal ifornia +" I +å ¹ +Ġgre ater +ues day +Ġth ous +Ġcost s +Ġla unch +\ Http +k er +b and +ĠPl ay +Ġb and +.sh ape +es ome +art icle +.r f +Ġw er +á s +em bers +us r +B A +ic an +et t +valid ate +ult i +Ġimmedi ately +z er +Ġfig ure +o es +ell er +irc le +ĠS ign +.d b +Ġr ank +By tes +Ġproject s +_re c +UL AR +A PI +ĠL ine +P ort +Ġp oll +Ġg iving +id ence +-- Ċ +Ġpl ot +ic ial +Ġw arrant +IT ION +ĠD ouble +Ġbill ion +gorith m +Ġequ ipment +D ATE +Ġ@ " +E E +Ġp le +i ation +Ġhead ers +Ġpro ced +.Component Model +ĠOb ama +Ġp a +ĠB est +im ately +.get String +. \ +mp loy +Ġr aw +_b lock +und red +" },Ċ +.Group Layout +Ġb rought +NS String +th row +cre ated +.N ew +_ view +C P +ep s +O p +Ġgr atis +Ġ' " +Ġinter view +"" "Ċ +Ġpart ial +Ġa ria +b ing +A uthor +Bo ok +ĠP at +um an +Us ers +pl us +ĠD irect +ven ue +al pha +UC CESS +ĠC all +Ġ );čĊ +im ated +Ġrem ain +Ġant i +ĠL ondon +Ġsaf ety +PO SE +o les +cont roller +By te +ĠCour t +ĠPh il +ĠAss oci +en a +å IJ +_ST R +co in +resh old +Ġb atch +_C lick +entic ation +> ';Ċ +ent y +Ġbegin ning +Ġz ero +ĠCon vert +Ġt err +Ġp aid +Ġincre ased +c atch +-s ize +act ivity +e quals +Ġque ue +Ġ" ' +ĠIntern ational +Ġf ür +urs day +Ġsc ient +all ow +ax is +Ġapp ropri +ed ge +Ġid x +S uccess +ent ifier +: \ +x is +Ġmax imum +ark s +Ġb irth +( index +Ġmay be +.p y +file s +Ġlim ited +_ check +lo ok +pl ies +Ġmov ement +'] . +Ġbro ad +ĠB E +ĠUn ityEngine +.c pp +ĠE very +Ad min +Ġf ans +p ared +Ċ ĠĠĠĠĊ +Ġfore ign +Ġp an +Ġt our +ĠOr der +Ġmov ing +Ġa uf +C all +c b +Å Ł +vent ory +ĠS ql +Ġful ly +Click Listener +W ORD +Ġannounc ed +) čĊčĊ +Ġagre ed +ri e +Ġe arn +_l ink +. array +(t ext +Ġmaterial s +, p +ff ff +v g +Ġ © +Ġun less +aj ax +LO G +Ġsex ual +Ġ\ " +- time +Ġco ach +Ġsupport ed +Ġphot os +if orm +.C reate +) ] +ri er +Ġd ialog +av er +ig e +) + +_id x +: [ +_m in +ĠC ong +Ġpress ure +Ġteam s +S ign +b egin +ri an +NE SS +L S +Ġimpro ve +ĠS unday +Ġdef inition +ig er +roll ers +Ġthink ing +T emplate +- F +Ġem erg +pl ates +ĠUS A +.set State +ĠAl so +re v +Ġen able +ĠC O +PE CT +Ġcon cept +) - +ĠâĢ ¢ +Ġset s +Ġmean ing +em on +ĠCon s +c mp +ed er +ann ed +icens ed +ĠS uper +Ġd aily +Ġmult i +_ u +Ġchall eng +_m ode +ĠP romise +Ġstr ict +j o +int on +( list +On ly +> { +Ġveh icle +í ķ +ĠPl ayer +ĠD el +Ġp ool +. url +nes day +();čĊ čĊ +Ġ" );Ċ +L ocal +. ");Ċ +Ġorgan ization +re nder +ĠApp lication +Ġsum mer +ex pected +N A +Ġr ap +_ obj +Ġsur face +ĠP UR +Ġ}, ĊĊ +Ġvariable s +(m essage +Ġop in +.b ack +а н +Ġwork ers +v m +C o +ught er +Ġm aster +Ġ" ", +Ġst ories +. User +Ġcele br +ines e +B S +ĠCom mand +ash board +Ġo g +k g +. image +.st yle +Ġstep s +ĠB en +( args +ĠP erson +, y +Ġofficial s +| Ċ +Ġsk ills +v c +Ġbuild er +Ġg ar +A ccount +ĠA uth +ç Ķ +'] )Ċ +ĠA T +n n +. Int +SS ERT +Ġeffect ive +LE TE +Ġto ols +AR D +Ġdig ital +D ouble +ĠF ind +R C +Ġin line +/ r +AR AM +AS K +Ġint ent +a ight +_add r +Ġrequest s +.f irst +Ġde bug +Ġsp ent +() ));Ċ +Å Ľ +Ġpr incip +Log ger +clud es +. use +Ġsur v +med ia +ĠFe bruary +ĠM ac +Ġmiss ing +Ġw ife +Ġtalk ing +ĠM ake +Ġc art +Ġloc ated +E nc +- a +ch ron +Ġc ards +Ġgu y +Ġp ers +ĠY es +ate ver +ĠA ng +ol ar +ĠE ven +Ġacc ur +ĠP ower +ĠG old +c lear +Pro cess +Ġrec ords +Ġk illed +.c lear +ĠWARRANT IES +Ġpur pose +pan el +J ECT +ÃŃ a +Ġex erc +W S +/ L +. exports +Ġ__ _ +Ġs in +S ervlet +Ġd é +.de lete +ro ke +S l +ug h +ear s +Ġpoint er +Ġh op +all ery +Ġo bs +co very +ĉ char +ĉĉĉĉ ĉĉĉĉĉĉ +ĉ def +oc ity +itch en +ul ations +ĠF IT +Ġ ). +straint s +vent ion +Ġrequ ires +ĠO per +M E +OUN T +al let +Ġn orm +I RE +ex as +Ġprogram s +Ġwe ak +' .$ +u ing +ĉ ĠĠĠĠĠĠĠ +Ġm il +Ġf irm +init ely +_VAL UE +ap se +atis f +Ġdem and +_m od +Ġdescri bed +Ġpl aces +V ID +Ġal one +Ġex port +Ġv ec +ĠM ax +Ġactiv ities +ict ures +g ener +Ġm a +Ĥ ¬ +Ġexpress ion +C allback +_ content +ĠM ost +Ġtest ing +E C +CH ANT +Ġad just +.Th reading +( ctx +Ġag ree +ig hest +Ġu i +ĠL aw +. Y +> ĊĊ +.ex ample +ber g +Ġmov ed +ĉ e +ĠS aturday +Ġpay load +Ä ĩ +) :ĊĊ +Ġbe y +ur er +< script +Ġs ymbol +Ġass um +Ġp ul +E ffect +Ġh undred +To ol +ak ed +con nection +Ġvo ice +Ġp d +Ġtrans action +Ġlink s +E rr +ĠInd ian +T C +atal og +n i +s ign +<< " +j i +y a +Ġdemon str +ul ated +. St +Ġinst it +Ġbo ost +Ġcell s +ol ic +.P ro +: , +"> \ +Ġth us +ĠReg ister +h ol +ĠCh inese +Ġpost ed +Ġm agn +ab ilities +Ġdise ase +Ġrem ains +ĠPro f +- form +Ġc in +org an +ic ate +Ġst ress +] * +Ġ ---------------------------------------------------------------- +_ context +or ry +Ġd ied +m at +Ġstart s +.M essage +Ġrun s +Ġgu ide +Ġwarrant y +ential s +d ict +ĠS ize +ul er +Ġrespons ible +_SE T +Ġcont aining +ĠPr ice +| | +F S +Ġem p +_b utton +( uint +Ġsu ff +p th +Ġdef initely +put e +Ġmarket ing +ĠW H +ĠS ie ++ = +OL OR +Ġcons ult +Ġs igned +Ġse quence +le e +Ġrequire ments +h y +Ex press +M T +se y +Ġ ult +å ® +ellig ence +Ġanal y +Ġd ress +eng ine +ĠG reat +ĠAnd roid +ĠA lex +m ode +D ictionary +.D ate +ä ½ +V ICE +Ġfam ilies +ĠRuss ian +ĠT imes +.c all +$ ( +Pro file +Ġf older +ch es +Ġleg is +_ row +un es +Ù Ħ +Ġ} ). +Ass ert +ag en +ĠH and +I ter +Ġbig gest +ore ach +Ġpol ic +Ġper missions +Ġshow ed +ĠE lement +Ġtop ic +âĢĶ âĢĶ +ro ad +ĠB ank +rec ord +Ġpart ners +ĠR ef +ess ions +Ġass ess +U ST +ĠPart y +pro du +L C +Ġ ul +. form +h ide +c opy +UT F +ĠSO FTWARE +čĊčĊ čĊ +ĠL in +un a +ug ar +Ġadmin istration +Ġopen ing +Ġsc an +Ġcontin ued +com ponent +.s p +Ġhapp ens +um my +ĠP R +.F ile +ĠDown load +Lo ading +d i +Ġwait ing +_A DD +T ab +.query Selector +Ġecon omy +ĠF rench +t xt +Ġf ant +_ ;Ċ +H older +S H +Ġn umpy +Ġst reet +Ġm ale +\ Model +ang ing +ĠB ill +Ġprevious ly +B I +ĠSec ret +Ġm ist +ĠF ield +up s +ĠPro cess +Ġke pt +ĠO T +Ġtrad itional +. i +am in +Ġhelp s +An y +orig in +ilt ers +j u +d esc +ĠA ccount +Ġ) čĊ +k top +ol ly +Ġf s +Ġ ê +Ġ ut +Ġcent ral +(t est +.A n +Ġs atisf +G R +ĠF ull +Ġhe at +ib er +Ġon to +m os +S chema +Ġfact ory +" .$ +aw s +St atement +(t arget +ĉ new +.b e +Ġg uest +Ġm al +AR Y +Ġre ached +Ġm ouse +Ġchall enge +ĉd ouble +ĠT em +Ġt error +Ġex tract +_T O +Ġsepar ate +Ġm ir +h elp +Ġcap acity +ĠProp erty +k an +_c reate +ĠL ight +.p arent +Ġunderstand ing +Ġeas ier +Ġ| = +Ġen h +Ġf at +Ġprot est +am m +_ AT +- of +il s +ĠO h +Ġps ych +Ġ$ . +ind s +Ġrel ative +sh op +sh ort +ĠS and +uest ion +Ġf ear +/ ĊĊ +. context +Ġschool s +Ġser ve +z one +_d b +Ġmajor ity +ex ample +Ġl ang +ĉ ĠĠ +Reg ister +end o +Ġprocess ing +_t emplate +- user +Ġe g +C OM +ĠBl ue +i ro +Ġrem ote +ĠI T +#! / +Ġred istrib +ra z +ĠS ince +ĠT ur +Back ground +== = +Ġref lect +Ġpro s +c md +Ġwh om +Com pat +ĠA re +Id entifier +ĠTh om +_ port +g u +Ġmon itor +r m +Ġpat ient +ver ter +Ġg ain +- ui +In st +Ġd ies +A rea +_f ilter +Ġgr at +Ġreal ity +ord inate +ol ved +Cont act +Ġcompl iance +_ or +ĠV ar +d l +Ġapp end +G ER +(m ax +.re nder +Ġd ynamic +ordin ates +_ options +_c olumn +Ġb atter +s pace +L a +ĠS ource +/b in +Ġd os +ĠBo ard +ĠTh read +ĠA L +( config +ĠM er +Ġm iles +_ header +ETH OD +iz z +Ġbenef it +Ġinteg r +(c urrent +ul o +. default +ĠD iv +Ġt on +o th +erv ation +ed om +Ġb aby +ce ived +.t op +rior ity +ĠL ocal +ri age +Ġattack s +Ġh ospital +Ġfem ale +ĠLog in +ĠFl or +Ġch ain +ash ion +Text ure +S ave +Ġf arm +.cont ains +.T est +Ġknow s +Ġgener ally +ip eline +Ġme ant +enc ia +Ġn icht +Ġcont ents +P M +ched ule +( line +C G +j ob +ĠRe al +u er +f irm +Ġ Ø +et ro +" `Ċ +Ġspe ech +Ġth r +fore ach +Ġw arn +ĉ l +Ġhe avy +< li +N e +Ġinvestig ation +M ath +- title +Ġch urch +Ġdes pite +ch ain +Ġwh atever +ar ian +f n +Ġm eta +} )ĊĊ +U FF +Ġregard ing +_S UCCESS +m es +ĠInt ent +Ġres olve +pos s +ir a +for ce +o ice +à ¢ +Ġp m +Ġup dates +A rr +Ġ Ñ +test ing +Ġto ward +nt ax +ë ĭ +Ġlist en +Ġgo als +Instance State +D r +Ġr are +Ġtr ail +Ke ys +C al +C ar +ĠPe ople +ĉ local +class es +Re ference +.for Each +em b +act iv +Ġpr im +red ict +Ġr ad +æķ ° +.B ack +Ġsp read +Ġc lock +Ġv ir +ed itor +Ġeffort s +Ġbr anch +Ġind ust +Ġmot or +Ġam b +Ġdat etime +Ġren cont +ĠChrist ian +ĠAmeric ans +f ull +Ġf mt +.m ain +Ġca used +_ update +ĠCont ent +AT CH +Ġb ath +ĠE ach +Ġr adio +ach ment +uz z +Sub mit +Ġre strict +ab in +ĠL oad +Ġext ension +Ġess ay +Ġh at +avi our +to Be +": [ +Ġoffer ed +Ġv ill +(d ouble +æĹ ¥ +b c +_f ree +ĠM iss +ĠB er +Ġ è +ĠL ike +Ġhelp ed +.get Name +_ AL +Ġsp irit +ĠAp ache +w s +Ġthere fore +( params +_ img +Ġpe ace +Ġinc or +ĠEX PECT +Ġmin or +ip es +ĉ data +select or +c ity +tr ie +.b ase +_f rame +Ġopen ed +/ json +L Y +n u +.D e +t f +m argin +.P arse +Ġp i +Ġe q +b d +Field s +ĠT ree +Ġb an +ist an +Ċ ĠĠĠĠĠĠĠĠĊ +ĉg l +Ġprodu ced +s ystem +M ark +_h ash +Ġb g +Ġconst it +ĠLe ague +Ġmiss ion +_ format +([ Ċ +clus ion +! " +Ð · +b reak +ĉs witch +Ġth er +Trans form +Ġfoot ball +- link +r oute +. auth +Ġb ag +ov ers +Ġen abled +Ġr ac +( I +C R +anc ing +Ġman aged +_ q +NG TH +Ġm ac +ĠA uto +ament e +Ġ' ', +.App end +Ġp in +. item +ack ing +Ġocc as +p erson +Ġt i +.Re g +Ġh aven +Ġg lass +Ġ" ) +_ char +res ource +Ġep isode +Ġ' _ +ĠE s +ĠEar th +Âł Âł +UP DATE +ĠS ou +u is +t ypes +Ġm as +Ġf av +Ġcon struct +_r ate +er as +Ġ| Ċ +rop erties +Ġext ernal +Ġap plied +Ġpre fix +ot ed +l ers +Ġc old +ĠS P +ĠCh urch +ĠOut put +los ed +ç ļ +ific ate +oper ation +her it +x FF +. env +_ err +os h +D irection +C ancel +ĠFr ank +Ġfind ing +. )ĊĊ +Ġr outer +ãĥ » +s es +Ġc row +== ' +Ġs and +Ġr id +it ure +Ġent re +Ġo bserv +Ġv ac +ð Ł +- T +A rt +n ight +. search +Ġex change +Ġdistr ict +. os +Ġdep artment +Ġdoc uments +Ġcent ury +ĠN ext +H ost +ĠK IND +Ġsus p +- P +re nd +. em +u ite +ist ers +( json +ĠAn n +w t +at i +ĠHT ML +wh en +D irectory +Ġsh ut +< a +ed y +Ġhealth y +Ġtemper ature +ĠG en +Ġmet al +Ġsub mit +ĠD O +Ġat tract +Ġ{ };Ċ +ĠW ord +Ġl l +Ġseem ed +k o +I ED +Ġl abor +.Cont ext +Ġas set +y ou +Ġc ars +ĠC olumn +Ġr é +Ġs quare +ĠNS String +âĢĿ , +ap es +.. .Ċ +Ġthan ks +( props +Ġt ick +Ġexper iment +Ġpr ison +t ree +- text +ĠIO Exception +-w idth +_ST ATUS +f ast +-b ody +- header +Ġgu ar +cre te +ĠT im +Ġclear ly +ĠRepublic an +Ġjust ify +и ÑĤ +ĉ ĠĠĠĠ +c ache +; // +Ġpres ence +Ġfact ors +Ġemploy ee +] )) +M ember +Ġselect or +b or +ĠM ex +çļ Ħ +ut ex +_t ag +ail ure +ĠN et +Ġre li +E G +Ġf printf +Ġte en +lo ss +Ġle aving +De legate +Ġbe at +Ġmin ute +sub scribe +Ġredistrib ute +Con stants +Ġcan cer +/ { +B L +Ġs pan +ĠCh ild +C enter +Ġear th +Y S +ĠLe vel +Ġse a +.s upport +.in ner +. Item +ill ing +ĠĠĠĠĊ ĠĠĠĠĊ +ĠL abel +ĠE st +( arg +bo Box +ĉf oreach +c os +F ailed +sw ers +Ed itor +r ont +ĠM P +ex pr +ĠL ife +Ġ? ? +ö r +Ġatt end +ĠQ ue +Ġspec ies +- D +Ġa us +Str uct +Ġadvant age +ost on +-b lock +in itial +C RE +Ġtr uly +Ġcomp are +or ney +Ġs pect +F ull +b es +Ġvis ible +Ġm ess +st ances +Ġcl oud +_v ersion +Ġf urn +ic ago +LO W +Ġtraff ic +Ġf ol +rypt o +Ġdecl ar +Ġsl ot +ĠEx t +ĠEng land +ĠU nder +Ġt a +let ter +Ġoffic er +ĠDon ald +Y es +_ json +IT ableView +ĠU SE +mploy ee +Ġopin ion +ĠA ut +b order +Ġad vice +Ġautom atically +is co +Ġm m +. vis +am l +Ġinitial ize +Ġ( { +Ġ ;ĊĊ +Ġgener ation +Ġb its +clip se +Ġun f +ut ors +pl t +Ġdel ta +est roy +is is +< br +Ġlimit ations +Ġend ed +ĠM ad +il m +Th ese +ĠMin ister +Ġch art +F ragment +Ġindepend ent +Y ear +Ġin str +Ġt ags +A VE +ĠAr ch +st op +Pro gress +Ġm i +Ġlearn ed +G e +Ġhot el +S M +T YPE +Ġc y +ERS ION +un ately +l imit +s el +Ġmov ies +Ġste el +o z +g b +ĠC amp +s ite +ĠLog ger +P LE +оР´ +. right +ĠC ore +Ġm ixed +st ep +Ġput s +s uper +R outer +. Http +ly ph +ĠColor s +Ġandroid x +. str +Ġinn ov +Ġde ck +' >Ċ +ap ers +] ( +cont inue +s pec +ĠR oad +AS H +ili ar +Ġcontin ues +Ġapp oint +Ġ# Ċ +ĠV ir +Ġ?> " +Ġb in +} ", +go ing +e ach +B D +ĠA ccess +D oc +ĠMan agement +B ER +ask et +.get Instance +Ġestablish ed +so cket +IN S +ĉv irtual +ĉ result +RE AD +_ height +ĠF ont +Ġ( );Ċ +_ html +Ġneighb or +l or +Ġg ather +Ġ} )ĊĊ +Ġid entity +Ġf ab +p adding +ĠR oute +Enumer able +à ´ +Ġfor ced +/j query +.ĊĊ ĊĊĊĊ +res ents +_ left +.P aram +ĉ throw +ĠH am +Ġevent ually +ac er +p ub +Ġtr a +un ique +d el +ĠFlor ida +ĠC lean +x a +Ġ · +Ġvalid ate +Vis ual +Ex pression +_f unc +m ember +ĉ h +tr l +ĉ G +nap shot +ĠProp Types +v in +] )ĊĊ +ow l +if ies +Ġ$ ('. +ĠCont ext +ĠTo ast +. Key +Ġoffic ers +/ n +s n +und efined +. items +ut ow +am age +Ġaccount s +ook ie +Se ction +ici ans +Ġad vis +( is +[: , +ĠFr ance +F unc +ic ious +Ġto k +Ch annel +ĠA D +_N UM +Ġtime out +lem ma +rem e +u j +.A l +uc lear +( os +(" < +[ Ċ +f etch +Ġb al +Ġgu id +- align +ĠW rite +ĠOn ce +utow ired +OD ULE +Ġp itch +C F +by tes +ĠCom mission +Ġincre d +P ER +_ response +ĠL os +par ser +Ġass ume +. Request +ĠT oken +_p osition +Ġn om +- term +Ġrem aining +i ostream +Ġpie ces +ap y +ĠL ess +r ange +umb n +pr ise +_ option +Im pl +k wargs +Ġbusiness es +Al ert +Ġpart ies +ĠCont ainer +ĠPr ivate +ĠPl an +Ġregister ed +Ġj our +ack er +ен и +/ > +ch at +se ct +Ġcre ation +olut ely +Ġinst ant +Ġdel ivery +ick en +y es +ĠFr anc +bl ing +end a +[ ( +_r ange +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠ +Ġsched ule +Con n +Ġthan k +x d +Ġh ook +Ġdocument ation +Param eters +H ello +v t +Ġart icles +Ġw est +def ined +. select +ok ens +ĠV AL +.f ile +res et +Ġmy s +ĠM A +] ), +Ġc ities +rel ated +å Ľ +Ġappe ared +Ġw id +.p anel +ĠIn s +. entity +Ġde cre +ĠL ou +(t ime +ĠTh ank +.create Element +Ġmention ed +oun ce +ĠT ry +ĠW all +/ images +ĠM enu +' čĊ +ĠE r +Ġcrit ic +ĠY ear +( param +Ġf lo +N N +oot er +Ġ ];Ċ +ĠA ff +" github +room s +Ġh yp +g lobal +Ġa vec +æľ Ī +Ġcomplet ion +Ġcon d +onym ous +( temp +Ġst ars +Ġre levant +Ġcover ed +Ġel im +_t ypes +( bool +Ġt u +_ex ists +Ġsec ure +Ġst ored +] / +x F +ĠCont roller +Ġm igr +M I +ĠD en +Ġann ual +U IL +- and +Ġcr ime +b el +Ġk itchen +@ g +_p h +ourn ament +ĠS ocial +ĠS pecial +log ger +Ġt ail +Ġun known +d ed +Ġapp rec +(d b +c f +Ġass ign +- out +ĠM ont +d p +w idget +Ġst one +- primary +. grid +Result s +az z +Ġda ughter +Ġcur r +Ġl in +Ġs outh +form s +ĠO UT +let te +ak s +ig ure +ĠE U +var iable +Ġb rief +ĠSc ott +Ġcon ference +and a +_ lock +or al +Ġe ine +OR S +//////////////////////////////// //////////////////////////////// +ess o +Ġr is +Ġg ender +est ic +L icense +( out +Ġm s +Se e +Ġwill ing +az e +Ġs ports +Ġy es +l u +Ġp urs +/j avascript +- pro +nav bar +_pro duct +/ bootstrap +Ġdr iving +Ġ Ä +Ġpro pos +ult ip +up lic +. email +Ġappro x +( cl +Ġwe ar +Ġrep ly +ass et +Ġ ice +Ġt x +k r +ĠGerman y +ĠGe orge +Ġc b +ĉ err +M ove +Ġpol y +vo ice +} " +Ġan imal +A v +ĠL ocation +Ġn ative +] [" +< double +Ġm ais +, int +Ġpre par +Ġinter val +plement ation +_ ERR +Ġb ug +> " +st at +Ġ} ,čĊ +< span +Ġfa ith +Ġ rom +pre v +ĠE lect +F ind +Ġg od +ot or +// ---------------------------------------------------------------- +orig inal +C pp +ĠSen ate +Ġposition s +Ġweap ons +Ġco ff +Ġpur poses +p ol +Ġim press +Ġanim als +. Entity +(n p +Ġmur der +Ġ` ` +fl ag +Ġsol utions +ĠAct ive +Ġb right +.d ate +Ġsit u +ï¼ Ī +. ID +Ġs ie +), čĊ +ak t +S pace +.d at +.index Of +h an +az ine +ĠZ e +Ġcr ash +( / +> = +Ð ± +iv a +.Auto Size +ĠL at +_ ext +Initial ize +.reg ister +OP Y +Ġre verse +_d is +'] [ +Ġprom pt +ont o +ĠJ ournal +r outer +Ġmys qli +# else +) " +-x s +let s +ph an +. LE +W ill +Ġaff ord +Ġsk ill +-t oggle +N C +B ind +T S +J ust +iter al +Y P +ĉ unsigned +Ġw ind +)) :Ċ +Ġw arning +ĠW ater +Ġd raft +Ġc m +Ġs am +Ġhold ing +z ip +ĠSc ience +Ġsup posed +G en +Ġdi et +< h +ĠP ass +v i +Ġhus band +� � +n ote +ĠAb out +ĠIn stitute +Ġcl imate +.Form at +Ġn ut +est ed +Ġapp arent +Ġhold s +f i +new s +C M +v ideo +': ' +D ITION +p ing +Ġsen ior +w a +-- >Ċ +_ default +ĠD atabase +re p +E SS +ner gy +.F ind +_m ask +Ġr ise +Ġk ernel +:: $ +. Q +Ġoffer ing +de cl +ĠC S +Ġlist ed +Ġmost ly +eng er +Ġblock s +ol o +Ġgover ning +\ F +Ġcon cent +.get Text +Ġm b +Ġocc urred +Ġchang ing +Sc ene +_C ODE +B eh +" The +Ġt ile +ĠAssoci ation +ĉ P +al ty +_ ad +od ies +i ated +Ġpre pared +poss ible +Ġm ort +TE ST +Ġign ore +Ġcal c +Ġr s +Ġassert Equals +Ġs z +ĠTH IS +. "Ċ +Ġcan vas +j ava +Ġd ut +VAL ID +.s ql +. input +Ġa ux +S up +Ġart ist +V ec +_T IME +.string ify +et ween +ĠC ategory +Ġ[ - +ĠDev Express +ĠJ ul +Ġr ing +. ed +Y Y +L et +Text Field +Ġfl at +_p rint +ĠOT HER +ad ian +Ġcheck ed +e le +Al ign +stand ing +Ġ[ ], +Ġl ab +uck y +ĠChrist mas +( image +.m odule +Ġl ots +Ġslight ly +(f inal +er ge +è ¿ +ĠPol ice +ĠR ight +Ġaw ard +ĠO S +Ġ{ }ĊĊ +Ġp tr +ov es +ic ated +еР¼ +Ġman age +olid ay +Am ount +ool Strip +t body +N av +w rap +B B +Ġwatch ing +ari os +Ġoption al +_ K +ĠL icensed +.M ap +T imer +ĠA P +ĠRe v +( o +, c +um in +eta iled +ĠH y +Ġbl ank +ag ger +ĠS elf +() [ +.m ake +ear n +ch annel +< pre +ble m +_p assword +_s p +ic ing +e z +Ġthe ory +ĠT er +, n +log o +ĠHT TP +() )) +.h andle +> ;Ċ +W orld +Ġpy thon +Ġl if +Ġtr av +Ġcon ven +com pany +ĠCl ub +V er +B tn +Ġz one +product s +ĠE duc +Ġver ify +ĠM il +on o +] );ĊĊ +EN CE +Ġpack et +Ġc er +Ġen umer +Ġpar s +form ed +Ġocc up +t re +Ġexerc ise +D ay +_s um +Ġask ing +apt ion +Ġord ers +Ġsp ending +ĠE RR +.D is +ĠU til +âĢľ I +\ ' +? ) +/ >Ċ +Ġem ot +Ġinflu ence +ĠAfr ica +att ers +Ù ħ +.s ession +Ġch ief +ĉĉĉĉĉĉĉĉ ĉĉĉ +Ġto m +clud ed +ser ial +_h andler +.T ype +ap ed +Ġpolic ies +- ex +- tr +bl ank +mer ce +Ġcover age +Ġr c +_m atrix +_ box +Ġcharg es +ĠB oston +P e +Ġcirc um +Ġfil led +Ġn orth +icture Box +ĉ res +è ® +Ġter min +Ġ[ â̦ +IRE CT +Ġb er +Ġ" ../../ +ret ch +.c ode +_c ol +ĠGovern ment +Ġarg v +ĠL ord +as i +Ex ec +ĉ let +vert is +Ġdiscuss ion +en ance +out ube +type of +Ġs erved +ĠP ut +ĉ x +Ġs weet +B efore +ateg y +. of +ĠM aterial +S ort +ON T +ig ital +Wh y +Ġs ust +Ġ ç +ab et +Ġseg ment +Ġ[ ],Ċ +ĠMus lim +Ġfind ViewById +c ut +_T EXT +ĠM ary +Ġlo ved +Ġl ie +ĠJ O +Ġis set +mon th +Ġpr ime +t i +ĠCar ol +U se +ĠP op +ĠS ave +Int erval +ex ecute +d y +ĠI ran +_ cont +ĉ T +Ġph ase +check box +we ek +Ġh ide +Ġt il +Ġj u +C ustom +b urg +/ M +T ON +Ġqu ant +Ġr ub +ix els +Ġinst alled +Ġd ump +Ġproper ly +( List +Ġdec ide +app ly +H as +Ġkeep ing +Ġcitiz ens +Ġj oint +p ool +S ocket +_ op +Ġweap on +gn ore +ĠEx ec +ott en +ĠM S +Ġ( - +ĠRe view +Ġex amples +Ġt ight +! ( +D P +ĠMessage Box +Ġphot ograph +UR I +é t +l ow +ĠGr and +.p ersistence +Ġmaint ain +Ġnum s +Ġz ip +ial s +ĠG ets +pe g +ĠB uffer +~~ ~~ +ra structure +ĠP L +u en +ob by +size of +Ġp ic +Ġse ed +Ġexperi enced +Ġo dd +Ġk ick +Ġproced ure +avig ator +- on +, j +ĠAl though +Ġuser Id +ac cept +Bl ue +IC olor +l ayer +av ailable +Ġend s +.t able +Ġdat aset +b us +Ġexpl ain +( pro +ĠCommit tee +Ġnot ed +] :Ċ +D im +std io +. ",Ċ +_s ource +ĠWe ek +ĠEd ge +Ġoper ating +Ġest e +i pl +ag ination +Ġpro ceed +Ġanim ation +.Model s +ĠW atch +i at +Ġopp on +/ A +Re port +Ġs ounds +_b uf +IEL D +Ġbu nd +ĉ get +.p r +(t mp +Ġk id +>ĊĊ Ċ +Ġy ang +Not Found +Ñ Ĩ +m ath +@g mail +ĠL IMIT +red ients +Ġv ent +avig ate +L ook +Ġrelig ious +Ġr and +ri o +( GL +_ ip +u an +ici ency +ĠCh ange +> čĊčĊ +ĠEnt ity +Ġrencont re +ĠR et +pl an +é n +BO OL +ur ies +tr ain +Def inition +======== ==== +z z +An imation +ĠO K +_m enu +.b l +_s core +Ġac ad +( System +Ġref resh +'=> $ +.G raphics +ament o +p id +t c +Ġt ips +Ġhom es +Ġf uel +â ĸ +_h elper +ĠĠ čĊ +ĠR oom +.C lose +_ attr +ĠM ount +ĠE v +ar ser +_t op +e ah +ĠDe lete +ãĢ į +u ke +Ġus age +ar ia +_de v +Ġtext ure +Ġconvers ation +e per +Be an +d one +non atomic +ĠSe cond +Ġshoot ing +_p re +Com ponents +Ġ] ĊĊ +__ , +stit ution +.Ch ar +> ();ĊĊ +Ġpresent ed +Ġw a +ok er +- ĊĊ +in er +Ġbe coming +Ġinc ident +At t +Ġreve aled +for c +Ġbo ot +.p age +Enumer ator +_ -> +Ph oto +Ġs pring +. ", +ĠD ictionary +B JECT +Ġloc ations +Ġs amples +Input Stream +ĠB rown +Ġst ats +qual ity +Ñ ħ +-d is +Ġhelp ing +Ġp ed +( se +ĠWh o +al ian +int ernal +Ġf t +> (). +-> { +Ġm ine +Ġs ector +Ġg ro +Ġopport unities +Ġà ¼ +Ġm p +Ġalleg ed +Ġdoub t +M ouse +Ab out +_p art +Ġch air +Ġstop ped +lo op +ent ities +Ġapp s +ans ion +Ġm ental +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠ +F R +Ġdef end +c are +Ġide al +/ api +ur face +Ġe le +ul ator +ĠR ights +angu ages +Ġfund s +Ġad apt +At tributes +Ġdep loy +opt s +Ġvalid ation +Ġconcern s +u ce +.n um +ult ure +il a +Ġc up +Ġp ure +.F ore +ĠHash Map +.value Of +as m +M O +Ġc s +Ġst ores +Ġ ************************************************************************ +Ġcommunic ation +m em +.Event Handler +. Status +_ right +.set On +S heet +Ġident ify +ener ated +order ed +Ġ" [ +Ġs we +Con dition +ĠA ccording +Ġpre pare +Ġro b +P ool +Ġs port +r v +ĠR outer +Ġaltern ative +( [] +ĠCh icago +ip her +is che +ĠDirect or +k l +ĠW il +key s +Ġmy sql +Ġw elcome +k ing +ĠMan ager +Ġca ught +) }Ċ +S core +_P R +Ġsur vey +h ab +He aders +AD ER +Ġdec or +Ġturn s +Ġr adius +err upt +C or +Ġm el +Ġin tr +( q +ĠA C +am os +M AX +ĠG rid +ĠJes us +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠ +.D E +Ġt s +Ġlink ed +f ree +ĠQ t +Ġ/** čĊ +Ġf aster +ct r +_ J +D T +.C heck +Ġcomb ination +Ġint ended +- the +- type +ect ors +am i +ut ing +Ġum a +X ML +U CT +A p +ĠR andom +Ġr an +.s ort +Ġsort ed +. Un +_P ER +it ory +Ġprior ity +ĠG al +ĠO ld +h ot +ĠD isplay +(s ub +_T H +_ Y +ĠC are +load ing +K ind +_h andle +, , +r ase +_re place +.add EventListener +ĠR T +Ġenter ed +g ers +Ġ ich +( start +/ app +Ġbro ther +M emory +Out let +Ġ utf +pre c +Ġn avigation +OR K +Ġd st +D etail +Ġaud ience +Ġd ur +Ġcl uster +un ched +Ġ ], +Ġcomfort able +. values +ĠT otal +Ġsn ap +Ġstand ards +Ġperform ed +h and +(" @ +å Ń +Ġph il +ib r +tr im +Ġfor get +Ġdo ctor +.Text Box +icon s +, s +ĠO p +S m +St op +ĉ List +ĉ u +Com ment +_V ERSION +.X tra +P erson +r b +LO B +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĊ +ĠCent ral +IC K +ra q +Ġput ting +Ġm d +ĠL ove +Pro gram +B order +o or +Ġallow ing +a fter +Ġent ries +ĠMay be +] ). +ĠSh ort +) \ +.n ow +f riend +Ġpre fer +ĠG PIO +os is +ĠGame Object +Ġsk ip +Ġcompet ition +_m atch +lic ations +_CON T +.group Box +Ġal s +" We +_e q +l an +_ search +ĠMus ic +as is +Ġb ind +ĠIs land +r um +( E +Ġse at +V ideo +Ġa ck +ree k +={ () +Ġr ating +Ġrestaur ant +DE X +(b uf +pp ing +ual ity +Ġle ague +Ġfoc used +ap on +$ data +CL UD +CLUD ING +Ġabs olute +( query +Ġtell s +A ng +Ġcomm unities +Ġhon est +ok ing +Ġap art +ar ity +/ $ +_m odule +ĠE nc +. an +.Con fig +C re +Ġsh ock +ĠAr ab +I ENT +/ re +Ġre trie +ycl er +is a +ĠO rgan +. graph +Ġ í +ĠB AS +En um +Ġposs ibly +ÑĢ Ð°Ð +ĠJapan ese +Ġc raft +ĠPl ace +Ġtal ent +Ġfund ing +Ġconf irmed +Ġc ycle +/ x +G E +Ġhe aring +Ġpl ants +Ġm outh +p ages +or ia +ĠRem ove +_t otal +Ġo d +oll apse +do or +Ġb ought +Ġadd r +AR CH +_d im +dd en +Ġdec ades +RE QUEST +Ġvers ions +f ire +Ġmov es +f b +Ġcoff ee +.con nect +ĠR ow +Ġs chema +S cope +- Type +Ġfight ing +Ġret ail +Ġmod ified +T F +File s +n ie +_com mand +st one +Ġ ÑĤ +_ thread +Ġb ond +ĠDevelop ment +Ġp t +F ORM +ple t +Ġident ified +c pp +Ġc oding +ok ed +ĠM aster +ID TH +Ġres idents +red it +ĠPh oto += - +un te +ate ur +_ST ATE +ĠS ing +Ġshe et +. val +or se +Ġh ers +Ġdetermin ed +Com mon +Ġw ed +_ queue +P H +ĠAt l +cre d +/L ICENSE +Ġm es +Ġadv anced +.j ava +.S h +G o +k ill +f p +_set tings +Ġp al +Ġtr uck +Ġcomb ined +Ġ" ${ +ĠCor por +Ġjo ined +ĠJ ose +ĠC up +un s +est ival +lev ision +Ġbro ken +Ġmar riage +ĠWest ern +Ġrep resents +ĠT itle +Ġs s +.A ss +ongo ose +ient o +< >();Ċ +Ġabs olutely +Ġsm ooth +TER N +ĠUn less +W ord +Ġmer ge +ig an +ĠV ol +Ġn n +.get Id +ĠÐ · +Ġsex y +Ġseek ing +S ingle +. this +Ġk om +b ound +; " +Ġfont Size +_d f +Ġinj ury +( H +Ġiss ued +_ END +: self +Ġp atch +Ġle aves +Ġad opt +File Name +ãĢ IJ +Ġexec utive +ĠBy te +] ))Ċ +Ġn u +out ing +clud ing +- R +. options +Ġsub stant +av ax +ĠB UT +Ġtechn ical +Ġtw ice +Ġm ás +Ġun ivers +y r +Ġdr ag +ĠD C +Ġs ed +Ġb ot +ĠP al +ĠH all +forc ement +Ġa uch +.m od +not ation +_file s +.l ine +_fl ag +[ name +Ġres olution +Ġb ott +(" [ +end e +( arr +F ree +( @" +ĠD istrict +PE C +: - +P icker +ĠJ o +ĠĠĠĠĠ Ċ +ĠR iver +_ rows +Ġhelp ful +Ġmass ive +--- Ċ +Ġmeas ures +ĠR untime +Ġwor ry +ĠS pec +ĉ D +ãĢ ij +Ġ) {Ċ +Ġwor se +(f ilename +Ġl ay +Ġmag ic +ĠThe ir +ou l +st roy +ĠWh ere +Ġsu dden +Ġdef e +Ġb inding +Ġfl ight +ĠOn Init +ĠW omen +ĠPol icy +Ġdrug s +ish ing +(' ../ +ĠM el +pe at +t or +Ġpro posed +Ġst ated +_RE S +Ġe ast +ĠCON DITION +_d esc +Ġwin ning +fol io +M apper +ĠP an +ĠAn ge +.s ervlet +Ġcop ies +L M +Ġv m +å į +Ġd ictionary +S eg +el ines +ĠS end +Ġ iron +ĠF ort +.d omain +Ġdeb ate +Not Null +e q +ach er +l f +ĉf mt +Ġlaw y +Ä Ł +ĠM en +Ġtr im +( NULL +Ġ! ! +Ġp ad +Ġfollow s +"] [" +re qu +ĠE p +.g ithub +( img +et o +(' \ +S ervices +umbn ail +_m ain +ple ted +fort unately +Ġw indows +Ġpl ane +ĠCon nection +. local +u ard +} \ +== " +and on +ĠR oy +w est +ig inal +em ies +it z +') :Ċ +ĠP eter +Ġt ough +Ġredu ced +Ġcalcul ate +Ġrap id +c ustomer +Ġeff icient +Ġmed ium +Ġf ell +. ref +ĠC as +Ġfeed back +S peed +( output +aj e +Ġc ategories +Ġfe e +} ; +Ġde leted +re h +Ġpro of +D esc +B uild +Ġs ides +.Array List +- % +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠ +Ø ± +.m atch +л и +Ġfe els +Ġachie ve +Ġcl im +_ ON +ĠC D +Ġteach er +_c urrent +b n +_P L +ist ing +En able +G EN +Ġt v +Ġso ck +Ġpl ays +Ġdis count +ĠK E +ĠDe bug +F ore +ĠI raq +Ġappear ance +M on +Ġst yled +ĠH uman +i ot +ĠH istory +Ġs ac +ĠC ollection +Ġrecomm ended +.Se lected +Ġorgan izations +Ġdiscover ed +co hol +ad as +ĠThom as +M ay +Ġcons erv +Ġdom in +ĠF ollow +ĠSe ction +ĠTh anks +User name +Ġrec ipe +Ġwonder ful +.s leep +_ if +ĉĊ ĉĊ +orn o +Ġr u +_t arget +." " +à ¦ +Event Args +Ġinput s +Ġf if +Ġv ision +c y +ĠS eries +) ((( +Ġtr ading +Ġmark er +B egin +Ġtyp ically +Ġca uses +drop down +_DE BUG +Ġdet ect +c ountry +! ");Ċ +ĉ R +app y +Ġc ref +(' < +" => +ĠL E +read er +Ġadmin istr +à µ +uck et +Ġf ashion +. char +iz ar +Ġdis able +Ġsu c +ĠL ive +iss ue +Ġmet adata +fl ags +Ġ ðŁ +Ġcomm itted +Ġv a +Ġr ough +Ġ'' 'Ċ +Ġhigh light +_var s +V O +Ġenc oding +- Z +_s ign +$ ("# +Ġr ain +reate st +ĠEN D +Se lection +Ġcandid ates +Ġs av +. Empty +Ġdec isions +Ġcoll abor +rid ge +fe ed +ress ion +Ġperson s +V M +eg a +_B IT +A ccording +ack ed +Ġdoll ars +_lo ss +ĠC ost +} "Ċ +Not ification +Ġpro stit +Ġauthor ity +.re c +Ġsp okes +ĠT oday +ist ant +ĠHe ad +âĢĿ . +ertain ment +ce an +cul ate +Ġv en +How ever +_ arr +Ġtok ens +G raph +ĠJ ud +ĠVir gin +ĠS erial +un ning +M utable +ag ers +.c sv +Ġdevelop ing +Ġinstruction s +Ġprom ise +Ġrequest ed +_ encode +/ " +ĠI con +u ilt +- day +Ġint elligence +. IS +ĠO bservable +ĠH ard +Bo ol +ident ial +.An chor +Ġsell ing +C I +AG ES +t le +b ur +UFF ER +R Y +Ġbig ger +Ġr at +Ġfam ous +Ġtyp ename +Ġexpl ained +} }Ċ +Ġn uclear +- N +Ġcr isis +ĠEnt er +Ġan swers +/ ${ +/ pl +Ġse qu +_n ext +m ask +Ġstand ing +Ġpl enty +ĠC ross +ĉ ret +d ro +ĠC ast += true +ĠCh ris +ic io +ĠM ike +Dec imal +add Component +L en +Ġco ck +Ġ# { +UR N +< tr +Ġauthor ities +Res ources +- H +B ottom +_ qu +put er +ester day +Dis patch +s ince +Ġfam iliar +, i +V C +Ġm ent +, C +Ġfre edom +Ġr outes +ĠB uy +Ġcomm ands +Ġm esh +/ C +ĠSet tings +- style +Ġw itness +Ġc le +Ġun ion +ef ault +are t +Ġthought s +Ġ ---- +_pro cess +_ us +ing ly +U ES +T ouch +ĠÐ ¼ +_ open +ĠV ec +Ġre ward +.C lick +/ : +Ġn ie +Ch anges +M onth +ï¼ Ł +Ġexec ution +Ġbe ach +( Integer +ĉ a +/ ' +.Font Style +Ġab ort +ĠS ingle +( isset +Ġd p +Ġ}} +Ġ* = +ĠP S +Ġdanger ous +[ p +OM E +O ther +ĠString Builder +Point s +head ing +Ġc urrency +Ġpercent age +_A PI +Ġclass ic +the ad +ĠM O +F E +Id x +aw ait +Ġà ¨ +Ġacc ident +Ġvari ant +Ġm yst +ĠL and +ĠB re +Ġh arm +ĠA cc +Ġcharg ed +ion es +Vis ibility +ar ry +ĠL anguage +Ġwalk ing +" .ĊĊ +if er +Ġleaders hip +.F rom +yn am +Ġt imestamp +i pt +ĠH as +REF ER +ĠIt s +Ġlist ener +UT E +_d escription +Ġexperi ences +Ġcre ates +R S +c art +bl ack +Ġcho ices +w ar +Ġ'' ' +Ġorder ed +Ġeven ing +Ġp il +Ġt un +ĠB ad +( app +r andom +Ġexp licit +Ġarr ived +Ġf ly +Ġecon om +-m ail +Ġlist s +Ġarch itect +ĠP ay +Ġd s +ĠS ol +Ġveh icles +H z +- com +Ġk ing +_e qual +ĠH elp +Ġab use +-- ;Ċ +Ġex tr +Ġchem ical +ä ¿ +Ġor ient +Ġbre ath +ĠS pace +(e lement +w ait +DE D +ig ma +Ġent r +Ġs ob +- name +Ġaff ected +ik a +Ġco al +_w ork +Ġhundred s +Ġpolit ics +sub ject +Ġconsum er +ANG E +Ġrepe ated +S end +Ġ# [ +Ġprot ocol +Ġlead s +use um +E very +Im port +(c ount +Ġchalleng es +Ġnov el +Ġdep art +b its +.C urrent +Ġ` ${ +ot ing +( \ +Ġcreat ive +Ġbu ff +Ġintrodu ced +us ic +mod ules +A re +-d oc +l anguage +_c ache +Ġto d +? > {{ +ĠRes ource +ĠSt andard +ĠP rem +up dated +ival ent +Ġas sets +_t emp +Ġinterest s +Ġhard ware +ĠR om +ĠSh are +Ġ' 'Ċ +Ġ* , +ĠT ake +ĠIm ages +_C HECK +(type of +ĠJ un +\< ^ +Ġli qu +Ġwor st +ymb ols +ĉĉĉ ĠĠĠ +Ġdr ivers +ĠD ocument +en o +ĠTechn ology +Ġappro ved +ump s +Ġs now +form ance +_A SSERT +u its +Ù Ĩ +Ġdiffer ences +. Visible +ĉĉĉ čĊ +ĠP s +_f etch +Ġto do +. ',Ċ +Ġs el +ur ers +in valid +Ġt weet +V EL +Ġresearch ers +Ġs printf +ĠR O +Ġp el +.Tr ans +Ġil legal +d ialog +sm arty +l g +_M IN +Ġher o +f inal +Ġp p +.L e +Ġc i +ĉ RT +Ġsuggest ed +p df +ach ing +ĠR o +ĠProp erties +ĠS i +Ġbuy ing +Ġm u +Ġl ands +if iers +ĠF ILE +RO UP +Ġh older +ĠS on +Ġsym pt +.r oute +) ? +Ġarg c +Ġfor t +Ġcas ino +_c ategory +Ġfor um +p refix +apt ure +T ube +em s +im ize +Ġn ue +a us +c ourse +AT OR +() ), +Ad vertis +ING S +Ġack now +ĠKore a +pl ing +Ġwork er +PL IED +h al +ĠRich ard +Element s +ĉĉĉ Ġ +st ar +Ġrelationship s +Ġche ap +AC H +ĠX ML +, & +ĠLou is +Ġr ide +_F AIL +Ġch unk +[ s +_O UT +Ġch osen +_ [ +/ ( +ĠJ eff +_s l +pr iv +ĠCan adian +Ġun able +_F LAG +Ġn os +h igh +Ġl ift +f un +() { +el ly +ycler View +_ as +_L IST +Ġr adi +.get Value +ĠAnge les +ĠS pan +_in stance +it ors +Ġm igration +A K +O h + ® +. selected +ĠG T +Ġadv ance +ĠSt yle +.Data GridView +e ction +Ñ İ +p io +ro g +Ġsh opping +ĠR ect +I lluminate +O U +ĉ array +Ġsubstant ial +Ġpre gn +Ġprom ote +IE W +.L ayout +Ġsign s +/ . +Ġlet ters +Bo ard +ct rl +" \ +ĠJ ones +Ġvert ex +Ġj a +Ġaff ili +Ġwe alth +ĉ default +Ġsignificant ly +Ġe c +Ġx s +act ual +.p er +_st ep +an vas +m ac +Ġtrans l +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +Iter ator +Ġo ch +agnost ic +ĠD uring +ĠDE FAULT +Ġt ill +Ġsign ature +Ġb ird +ĠO l +ĠI r +H S +av atar +ESS AGE +Ġe lev +Ġm t +ĠN av +Ġrel ax +Ġpl ate +IT EM +( date +.n ot +Ġgr ade +Ġ} ),Ċ +? "ĊĊ +i ences +H igh +ĠD IS +dis abled +Q UI +Ġno ise +a ux +ĠU P +os a +Ġv oc +Ġ )) +oc om +_O FF +ĠD b +L ock +.e clipse +, d +ĠD raw +Ġ" ( +Ġvis ited +Ġâ Ī +Ġsuc ceed +Ġim possible +a ire +ĠT urn +Ġd ish +F G +Ġs ensor +AN N +ab a +Ġsur g +] );čĊ +Ġf p +_ an +- J +- G +ĠJ ob +Con vert +ĠKE Y +Ġauth ors +_s erver +\ r +Ġ-* - +f lex +Ġs oc +R et +Ġs alt +Ġâ̦ ĊĊ +ĠC lear +(p age +-d anger +Ġroom s +con v +# { +. op +ĠA rea +_S C +h en +Ġbeg ins +- y +Ġexc ited +Ġign ored +Ġbon us +st udent +ĠM ember +Ġrel atively +ĠL ow +ĠPro du +ate way +pos ure +Ġth ick +ani el +( view +ĠCr ush +Ext ension +I l +e ed +LO C +. im +. Items +Ġconflic t +.pre vent +Ġon Create +u v +is er +Ġw ave +M ar +ĠComm unity +ic he +ĠNo thing +[ m +ĠLe e +ri ends +è re +!! ! +an z +. result +ĠS K +_P ARAM +Ġdem ocr +Back Color +.ex ists +" It +( options +ra zy +as er +\ Database +al endar +_ ass +; }Ċ +vert ex +ine craft +W arning +arg o +Ġact or +ĠInst ead +ĠUs ing +S elf +@ interface +Ġspe aking +ĠPar is +ĠL ICENSE +.n ode +ĠF ood +E IF +ĠB i +. Start +ĠI B +Ġun iversity +ĠHe ader +.pro duct +C opy +et c +r ical +Ġ> >> +book s +Ġal gorithm +Ġ' __ +(j avax +Ġnumer ous +Sh are +H ave +Ġrec ru +Ġpro ve +.sub string +he alth +е л +Ġdec imal +Ġcomm ission +s cription +x C +Ġsum mary +att ed +Ġclo ser +fin ished +() ){Ċ +ĠW ood +_field s +k u +_ items +Fl ag +Ġconf idence +ĠF ederal +du x +Ġcomp at +Ġvert ical +Ð ¹ +è s +; ">Ċ +_m anager +() ))Ċ +ID E +: ", +__ Ċ +ĠW ay +Ñ Ī +T emp +ĠS TR +rit ten +S ync +ĠA V +ĠC EO +ĠG uid +Ġenvironment al +Ġcorrespond ing +ĉ console +Ġjust ice +ĠJ S +Ġl ived +g ar +ĠG raph +ĠSt at +Ġi Phone +. al +ĠH D +Ġocc ur +Ġth reshold +Ġon click +RE G +.Graphics Unit +M eta +Å ¾ +Ġc um +.g nu +à « +Ġobt ained +Ġcompl aint +Ġe ating +Ġt ar +_t ask +Ġopt s +( to +P ass +Ġpl astic +t ility +ĠW in +.prevent Default +p ile +ĠG ar +Ġqu antity +_l ast +Ġg reatest +D ao +_D IS +ĠUs ed +ĠH P +rit ing +S ION +bl ue +d omain +Ġs cores +N ormal +_ admin +ĠA SSERT +Th en +** * +d ist +l on +Ġh ate +sh al +Image View +d atabase +Ġp and +Ġlog ic += false +b g +ĠConfig uration +Ġn ur +O G +Ġmar ried +: + +Ġdro pped +Ġreg istration +оР¼ +ult iple +iz ers +sh ape +.c opy +Ġwe aring +ĠC ath +Ġded icated +Ġ.. .Ċ +Ġadv oc +ĠF amily +Ġstat ements +em atic +ampions hip +Ġmot iv +ĠH ave +Ġbl ow +J ob +c ert +_v ector +inst all +ĠC OPY +em bed +D IR +ĠS pring +Ġex hib +cd n +ĠCom ment +ĠOption al +. player +ĠD ark +( pos +ĠSh ould +Ġcent re +ĠGu ard +ó w +Ġtr ouble +EN ER +( unsigned +_s ervice +Ġn s +ul ing +ĠMex ico +ĠN Y +mys ql +Ġl ic +å ľ +M r +- fl +ĠC ustomer +id i +Ġ? >ĊĊ +ri ble +Ġп ÑĢ +Ġs izes +_STR ING +valid ation +ĠJ on +( Http +add Class +N odes +Ġfrag ment +Ġsp oke +Ġw aste +J oin +Ġill ustr +el i +c ient +Ġa id +Ġpro sec +') {Ċ +Ġpass ing +Ġf aces +Sh ape +_ Z +it i +Ġal le +Ġro bot +ĠĠĠĠĠĠĠ Ċ +ĠS pe +Ġrece iving +ĠD etails +Ġ" ) +m g +_RE F +Ġcompar ison +* , +ĠF ound +_s ession +( U +/ F +Ġx xx +N etwork +d ers +Ġcap ture +Ġcor re +ĠL td +ĠAd v +[ @ +Ġcl ip +M ill +ĠPro file +Ġend if +Ġob lig +des cribe +.e lement +riter ion +L D +er ed +Ġfav our +s core +ĠF ilter +at tributes +Ġcheck s +In flater +ĠPl us +Ġscient ific +Ġpriv acy +He ad +Ġfe at +Ġdeg rees +ĠP ale +; "> +Ġfil ms +ĠA udio +ĠT ag +ĠE nergy +it ar +par ator +Ġf ellow +Ġev t +ĠT ri +ĠD AM +cl oud +ĠP assword +ĠDemocr ats +ĠAc ad +$ lang +Ġre b +() )ĊĊ +н Ñĭ +ĠB ur +read cr +Ġh ex +Con sole +ct l +ous el +ĠWill iam +Ġa z +_P ORT +Ġpract ices +Ġany where +ĠP osition +Ġ- >Ċ +i ams +.user name +place holder +Ġo der +ĠSecret ary +Ġi T +mon d +event s +? âĢĿ +.S ub +Ġatt ached +Ġn ão +Ġest ate +. action +Ġfig ures +Ġ} );čĊ +Ġsubs cri +.t ag +n am +. plot +no on +li ament +Char acter +.t ab +Ġw inter +ĠVar iable +Ġtre es +Ġpr oud +( V +_ load +Ġh ier +ĠE con +Ġf d +Ġvict ims +R est +ian a +Ġf ake +.Print ln +Ġstr len +Ġs ad +Ġb le +Pro t +Ġbutton s +Ġte levision +Ġlog o +ext ension +ĉ j +ste in +acion es +Ġ"" "ĊĊ +Ġsim p +Ġrecord ed +Ġbr ings +Ġprincip al +Ġfe es +(s ource +k dir +Ġutil s +Ġcorrect ly +f il +Ġw el +P air +-b utton +s cale +ver ify +[ c +Ġ-- - +Ġes cape +ik es +Lower Case +ic ian +Ġch apter +ĠT YPE +Ġsh adow +Ġaw esome +W E +el if +Ġl ambda +Ġdist inct +Ġb are +- off +Ġcol our +.append Child +ole c +ag a +.f ill +ĉs uper +Ġad j +( position +.get Item +Sh ort +Ġtot ally +V D +ĠT re +_ ep +v ements +ĠS olution +Ġfund ament +F ollow +Ġfac ility +Ġhappen ing +O F +.text Box +S pan +Ġ « +id en +Ġex ceed +(p arent +Ġc p +ç » +Ġhas n +Ġp ri +Ġcon sequ +n en +ĠIN TO +I gnore +ĠF uture +Ġcar bon +ĠSte el +f mt +ok ie +Ġs pl +(t itle +- info +Ġde als +Ġfix ture +e a +D iv +Ġtest ed +_ return +)ĊĊ ĊĊ +upport ed +ĠC ook +Ġpay ing +ĠI ll +Ġarrest ed +ĠPr ime +_c allback +> ,Ċ +dr iver +On ce +ab b +_by tes +ĠS ets +( Object +Ġc c +Ġsh ell +al o +); // +( log +ct ors +) +Ġ$ (". +.p os +Ġbo ys +Ġwed ding +Ġag ents +=" _ +ĠAr my +Ġh int +v ision +Ġte ch +ĠCon nect +Ġleg end +ĠB et +.B ase +Sub ject +Ġl it +Rem ove +Ġ" : +ĠF inal +pear ance +ĠiT unes +Ġparticip ants +ĠPy thon +Ġbus y +i el +vert ices +Ġtemplate Url +ĠC lose +Im g +ĠCorpor ation +t imestamp +Ġext end +Ġwe bsites +Ġposs ibility +о ÑĤ +Ġk ö +Ġme at +Ġrepresent ation +Ġ ĉĉ +_ST ART +.app ly +ĠVal ley +ĠS uccess +H i +Ġn ob +ĠI Enumerable +_ select +ge o +. ")Ċ +Ġturn ing +Ġfab ric +(" ");Ċ +Ġpers pective +é Ĺ +ĠS n +Th ank +; j +.Param eters +ĉ ĠĠĠĠĠĠĠĠĠĠĠ +Ġfact s +Ġun t +.in stance +################################ ################################ +- end +ĠJO IN +ĠH en +Ġur i +åIJ į +Ġн а +ĠIn fo +Ġconduct ed +Ġà ¥ +OUR CE +Ġw ine +J ohn +.Error f +ĠA ge +ound ed +Ġreal ize +Ġ] ; +Ġsub sequ +, m +( User +ian o +Ġaccom pl +is p +.st d +é ĩ +ĠB ed +.set Attribute +B R +ke ep +ĠA LL +Ġis ol +am ma +P ackage +Ġoccas ion +-s uccess +еР´ +ĠLIMIT ED +st rip +() ĊĊĊ +istrib ution +Color s +Ġ+ :+ +Did Load +al er +Ġt id +ĠL ED +ĠLink ed +ĠC art +() )čĊ +_RE AD +Ġkill ing +ĠP HP +fe ction +Ġinst ances +c v +"/ > +Ġs f +Ġtax es +_ location +ĠBit coin +u able +r ank +ign ore +tr ack +к а +Ġshould n +ĠO P +=> {Ċ +Ġk m +Ġh elper +_ head +ĠWh ether +oc o +_b l +Ġstat istics +Ġbeaut y +Ġto g +t ip +ëĭ ¤ +Ġc sv +(s ql +std lib +we ak +Ġlik es +Ä į +Ġrepe at +Ġap artment +Ġem ph +_ edit +Ġv it +ĉ type +E ven +ut en +Ġcircum stances +b ian +Ġs ugar +W indows +ì ŀ +Ġobs erved +/ data +Ġcal endar +Ġstri ke +ĠR ES +_s c +f ony +ore m +( z +p ower +et ect +ĠS at +.d escription +Ġg ang +ĠS ports +ong s +ĠB undle +.s um +on ce +Ġacc used +Ġexplo re +Ġapprox imately +Ġlos ing +thes is +ĠF und +Ġdi agn +A utowired +prop erties +Ġ_ . +Ġc nt +ced ure +Ġy y +Ġgr ant +so ck +.inner HTML +Ġ] );Ċ +ĠCON FIG +=' $ +] ];Ċ +UN D +Ġg lob +Ġd ire +uff le +_M EM +Ġauth entic +> (" +Ġdec ade +ĠIm port +Ġorigin ally +Ġj Query +Ġindic ate +Ġours elves +S w +.l bl +ener ate +Ġbas ically +ĠH om +Ġ+ #+ +ĠBrit ain +ĠK ar +to Equal +.st op +Ġmod al +is i +Ġsuggest s +Ġd type +Ġt ur +b f +Ġconnection s +ĠB efore +ist ed +m ouse +Ġpul led +.b uild +Ġlegis lation +Ġfor th +p ad +eg o +.N ow +Ġexc iting +}ĊĊ ĊĊ +Ġcom pr +Ġsh ares +Ġr ig +g reen +_ vec +Ġenumer ate +A uto +ic ator +ĠR ay +as se +Ġh oliday +Ġnull able +g un +_d etails +Ġwr apper +se q +ĠYou ng +ju ana +Ġ" __ +lic ense +ser ve +^ ( +id ers +.Rem ove +rop down +' S +p in +(t oken +.D efault +Ġreason able +amp ion +ĠS ociety +Ġbe i +erv es +r ad +ĠF ox +_ images +Ġw heel +') [ +Ġc fg +( By +Con structor +Ġv ary +.sw ift +Ġpro xy +ĉ H +ĠAn other +ĠP en +Ġcheck ing +Ġj est +man ager +Or igin +ug s +o ir +>< !-- +Ġexpress ed +Ġmod er +Ġag encies +Ġi h +-h idden +ious ly +ĠR od +Ġso le +M ed +.A ny +Ġp c +b al +Ex ample +ĠS ale +Ġst rip +ĠCom p +Ġpresident ial +M ost +put ation +( ref +ĠF our +_f ilename +Ġen forcement +Ø ¯ +ĠGe org +we ights +/ l +Ġag gress +Ġd rawing +and y +< I +- j +ak a +h ref +Ġteach ers +_ Q +( it +ĠM B +Ġtemp orary +ire base +str a +æĹ ¶ +è ´ +( label +ou p +Ġtop ics +Ġport ion +id os +ĠJew ish +Ġre covery +Ġstand s +# [ +Ġafter noon +ĠArt icle +_ att +Ġexpl an +ĠP ak +.setOn ClickListener +. children +Ġi k ++ ( +l ag +Ġdis k +Ġcont rovers +"> & +as p +Ġw ie +ĠAustral ian +ĠYou Tube +At tr +cont ains +du ce +ĠM att +at ern +Ġvol unte +Ġnew sp +V P +olt ip +Ġde legate +_m eta +Ġaccur ate +ĠEx ample +% , +ĠD aily +Ġc abin +ĠS W +Ġlim its +k ip +Ġar my +Ġend ing +Ġb oss +ĠD ialog +Al so +="# " +ord an +row se +- min +Ġ" & +_ loc +U X +Ġdevelop ers +Ġaccur acy +Ġmaint enance +Ġhe av +Ġfil ters +.T oolStrip +Ġn arr +ĠE mp +ORD ER +ĠM obile +.S erial +.out put +.c ol +M aterial +um a +Ġconsum ers +sh ift +Ġp ued +Ġmin i +c ollection +Ġk an +.c enter +H istory +Ġben ch +() ); +itor ies +Ġcrow d +_c all +Ġpow ers +- E +Ġdis miss +Ġtalk s +ĠCh annel +for ward +_ control +/s rc +i est +**************** ******** +Ġbet a +(c olor +_O BJECT +ĠA pi +Ġeffect ively +C amera +s d +uss y +D ict +ĠE ffect +ib ilities +Ġreturn ing +ĠF ar +Ġ' ') +Ġmod ules +il ation +Ġ( % +TR GL +Ġst orm +on na +ĠEX P +Ġs pons +Ġdis pl +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠ +f all +å Į +ign Key +_ US +et rics +Ġhand les +T L +_ amount +ow a +br and +ĠT ool +Ġus ual +. Z +cre ment +ad ium +st ock +Ġserv ing +ĠB on +Ġline ar +ĠT arget +ĠR adio +H L +Sh ader +om atic +ag ues +in ity +d iff +_ iterator +qu ot +Ġ ,Ċ +c allback +Ġsympt oms +[ _ +ĠB ul +ĠF eb +und o +_ account +Ġtyp edef +и Ñģ +tr as +User Id +ĠP enn +ĠSup reme +} > +user Id +ĠK im +Ġg a +Ġart ists +å ¸ +ĠAb stract +ok emon +Ġh am +o val +Ġch a +at en +å Ĩ +F ixed +Ġvul ner +ĠParam eters +qu antity +.C lear +Servlet Request +Ġy a +Ġsou l +trans action +Ġsol o +Ġp airs +æ Ķ +ĠG re +_ word +ĠC C +Ġg i +z ie +Ġsched uled +rot ation +gy pt +ul ous +:: _ +ĠE ll +< ! +ĉĉ ĠĠ +l p +ah a +C opyright +Ġdr am +Ġdi agram +ĠM em +Ġg arden +Com p +Ġattempt s +uff ix +> () +Ġphil osoph +_re l +å ¼ +Ġs v +.se cond +ant o +.J son +ĠTe le +_ local +_s end +Ġas pects +ì Ĺ +IB LE +Ġr ail +Ġwid ely +ash ed +i ar +in f +up per +d jango +_result s +iss ing +Ġequ ivalent +OUN D +Ġt y +Ġpotential ly +Advertis ement +ĠRec ord +resent ation +_w idget +ound ing +Ġrelig ion +Ġcons c +ĠL im +. am +H tml +Ġ' : +P ATH +_s pec +ort ed +id ades +_sh ape +Ġkeep s +.S ave +ĠL oc +or i +ĠT EST +unic ip +Ġreg ions +Ġbelie ves +/ en +pos ite +{ ' +pre pare +_ const +s ample +ĠWill iams +Ġstr t +_ Get +ĠAnd rew +. active +Ġl ayers +Visual Style +az y +ĠK n +Ġac id +ĠAs ia +Ġex cess +ĉm y +Ġkey board +ens us +Ġcre w +Ġmiss ed +m aster +ĠW ild +Ġnew ly +Ġwin ner +Ġst ub +ic ode +.m ove +D omain +ĠS ar +Ġfore st +LE D +claim er +.ex it +ĠW indow +Ġres istance +ĠC HECK +(" - +ĠR yan +Ġp ipe +Ġco ast +DE F +// ! +_ off +ex it +Ġult imately +imit ive +ĠKe ep +Ġhistor ical +Ġany way +ĠJack son +ock er +ER N +ĠU INT +y ntax +ER Y +is ms +Ġc n +Ġocc urs +Ġ; ; +Text View +A E +/ img +Ġy esterday +- default +Ġt iny +Ġpro c +Ġal ive +ĠRE G +. th +ear ing +.get Logger +< link +_ login +F older +ab c +lyph icon +н о +Ġnot iced +od igo +Ġed ition +im ator +. Enabled +.parse Int +Ġy ards +ĉĉĉĉĉĉĉĉ ĉĉĉĉ +Ġver bose +л Ñı +_B Y +.log in +.* ;Ċ +ĠM id +é es +Ġg lo +Ġbuild ings +Ġz e +ĠI ter +Ġt ube +ĠP ot +\ M +< th +br idge +ĠS cript +ĠM odule +Ġv acc +Ġinstall ation +v y +VisualStyle BackColor +ĠS M +.t otal +b at +Ġfind s +Ġat mos +Sub view +iz ard +Ġrepl acement +lic ated +ap is +Ġlog ged +ĠLe ft +G ui +_ Type +t m +P ad +Ġhouse hold +Ġre le +Ġpropos al +_CL ASS +:: :: +Ġinf rastructure +In ject +/ html +Ġad s +iz za +Ġm g +ctr ine +% Ċ +< html +- image +Ġatt orney +< m +(' , +Ġcan n +Ġprint ln +o ose +Ġy ellow +.ex p +p ayment +Ġtable View +aw ay +Ġopp osition +ĠAg ain +ĠH andle +Ġex clusive +in ar +é r +оР± +ĠC ODE +emp orary +Ġre act +pi pe +c z +. activity +Ġlarg ely +Ġdis s +ax y +es is +ĠR en +Ġc orn +.Use VisualStyleBackColor +d ays +Ġfr uit +In sert +_ enc +E st +_de c +ĠL uc +Ġü ber +param eters +P ERT +ex press +_pro file +Un known +Ġrev olution +.add ress +_re quire +Ġun iform +ĠP ack +l ar +ĠU ITableView +Ġdep ends +Valid ation +conf irm +O wner +Ġt rib +h et +ĠI de +ans as +L anguage +u et +ĠP o +ĠSte ve +Ġcont est +_DE FAULT +Ġapparent ly +RE EN +Ġfrequ ently +Ġtrad ition +ocol ate +S I +ĠArg ument +F ocus +ert e +ĠL ayout +Ġd x +Ġgener ator +ĠW ait +P olicy +l ights +.Ex ecute +P y +Ġbed room +ed a +ra id +ĉs ize +Ġan cient +Ġp ump +Ġd w +Ġ(! ( +Ġspec ify +( status +ĠF BI +.ex ception +Ġrem ark +ly mp +ant ee +Up load +ern et +é ¡ +in ent +ĠR ender +d m +ĠM emory +r ich +ĠT ools +Ġk ne +Ġper m +b ad +Ġd inner +.res et +Ġj Label +Fe ature +.S ervice +Ġ( {Ċ +Ġre ferred +.class List +Ġinit With +ĠText View +Ġne ither +Ġcount y +Ġ" { +ç § +Ġt ack +class Name +ĠUS ER +Ġre new +` ` +get Name +Ġb rown +Err ors +ert o +Ġsust ain +S O +let es +ĠIn valid +Ġen emies +un ge +Ġexist ence +err a +Ċ ĠĠĊ +utor ial +# a +p ay +char ge +ĠI re +ate st +Ġexp los +Ġf ired +N ER +ĠT y +ic ion +U ri +Ġobvious ly +ĠC olum +Ġ' + +ĠDe vice +- related +_ ARG +Ġv or +ĠLess er +_O P +Serial izer +Ġup grade +L ight +Ġc odes +++ ;čĊ +Ġwrit es +fo od +Ġé t +@ section +Ġtrack s +Ġserious ly +ch t +(size of +Ġimmedi ate +Ġscient ists +Ġ{ $ +_ ne +.Anchor Styles +Ġaccom mod +ĠHar ry +Ġs ight +ĠPale st +ersist ent +Ġ Ñĥ +- input +Ġco ordinates + · +W elcome +.con f +Ġgre w +Ġb old +ĠC PU +(m y +Ġperfect ly +Ġmom ents +ĠM ovie +- data +yst al +_W IDTH +ĠS creen +æ Ŀ +Ġdis ap +Ġredu ction +.Get Component +_M ODULE +Ġgener ic +Ġd y +all er +Ġc url +ĠB ody +Ġb anks +, t +av g +Ġev il +Ġmanufact urer +Ġrece iver +Column s +Ġing redients +ĉ out +qu es +.L oad +Ġslow ly +ĠT own +ĠC ell +_n ormal +_p refix +ĠAl ert +(" { +ä r +âĢľ The +ĠM D +Ġcour ses +ath an +é Ļ +oc c +ĠS ER +es ign +Add r += [' +(" ./ +] } +.f ont +ĠInst agram +ĠB order +od a +Ġh all +Ġr um +_b it +Ġs aving +_d own +R andom +_reg ister +( Context +Ġoppos ite +R oom +Y ES +ан и +Ġenjoy ed +_r un +C lear +âĢ ĺ +ĠF ord +on ic +ost en +"] ) +_ auth +// čĊ +Ġsuff icient +LE S +Ġph en +Ġo h +_c sv +Ġrout ine +.Are Equal +ay lor +Ġb asket +_COM M +rypt ed +S im +ĠSh op +Ġstud io +at os +( W +[ string +ä t +og a +Ġsh r +Ġs ick +An other +Ġdo ors +_N E +ĠTH REE +. order +raz il +Ġmap s +_TR UE +trans late +Ġnear by +Ġn ach +LO AT +b atch +Ġl ux +ash es +ang ers +â̦ â̦ +_E VENT +_ UP +Ġact s +in v +_M ETHOD +cc ion +Ġret ain +ut ch +ĠÐ ± +Ġknow ing +Ġrepresent ing +N OT +p ng +Con tract +Ġtr ick +ĠE dition +uplic ate +Ġcontrol led +c fg +j avascript +Ġmil k +Wh ite +Se quence +aw a +Ġdiscuss ed +ĠB ush +ĠY ES +.f actory +t ags +Ġt act +Ġs id +$ $ +ĠE num +Ġfr ames +} ); +Ġreg ul +'] ;čĊ +Reg ion +ff f +Ġc ro +( com +=" + +St udent +Ġdis appoint +RES ULT +Count er +Ġbut ter +ĠH a +ĠD igital +Ġb id +"> {{ +ing ers +ĠC ountry +_t pl +"] )Ċ +/ k +d ating +: # +ĠD ATA +yn chron +_b ody +olly wood +Ġval or +ip ient +o ft +UB L +doc s +Ġsyn chron +Ġform ed +ru ption +Ġlist a +Request Mapping +Ġvill age +Ġkn ock +oc s +" { +_fl ags +Ġtrans actions +Ġhab it +ĠJ e +ed en +Ġa ircraft +ir k +ĠA B +Ġfair ly +. inter +.A ct +Ġinstr ument +remove Class +.com mand +Ñ ī +ĉm em +( min +Ġo t +Ġcol le += s +time out +Ġid s +ĠM atch +ij n +z ero +Ġnetwork s +.g ov +Ġint el +Ġsection s +out ine +(c md +(d ir +ĠLI ABILITY +ĠB log +Ġbr idge +ĠC V +con vert +Ġ" )Ċ +ĠB ern +_P O +e val +( set +to ol +Ġpay ments +Beh aviour +Ġcon crete +Ġel ig +Ġacc eler +Ġh ole +_ o +TE GER +Ġgraph ics +O wn +Form atter +on der +Ġpack ages +/ a +ĠK now +Or Default +Ġdut y +W ait +н а +_rec ord +[ t +M esh +Ġon going +.be ans +Ġt an +Ġinter pret +ast ers +QU AL +Ġleg s +\ Request +- file +_m utex +ĠS aint +// # +Ġpro hib +( info +: = +lin ux +Ġb lo +ot ic +ĉf inal +_ex p +ĠSt op +ap ing +(s aved +_p ush +Ġe ase +_F R +pons ive +str cmp +: ĊĊĊĊ +ä» ¶ +ol i +Ġextrem e +Ġprof essor +Im ages +.IO Exception +Ġaddress es +plement ed +Ġincor por +Ġuse Effect +_O F +ĠD a +n ombre +IR ST +Ġdisc rim +Ġcomp ens +greg ate +anc ell +ach es +ĠC riteria +$ result +D estroy +Ġsecond ary +W atch +ĠS em +ĠMc C +Ġacad emic +U pper +:: ~ +ut ral +ĠD og +ad ed +Valid ator +Ġder ived +Ġset Timeout +ĠK en +Ġtyp ical +ĠB ob +Ġb ounds +ĠSe ason +Ġc razy +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠ +-r outer +itt est +ĠM ir +Ġemot ional +, v +c n +/ st +å ½ +on om +Ġdecl ared +> . +ail ing +Ġ/* <<< +Ġnorm ally +(M e +ev in +lik ely +Ġpoint ed +ĠSt ack +Ġw alls +. Vector +me an +] ]Ċ +Ġlist ening +ad v +Ġsw ap +IF T +Ø ª +. argv +ul s +< option +not ations +Ġemail s +ĠU kr +ast a +ĠTh us +ĠSt one +Ġappe al +. âĢĻ +Ġreg ulations +Pre ferences +ĠPh one +ul f +ĠD R +Ġtechn ologies +Ġpar agraph +Ġnecess arily +.e ach +< float +res a +Ġunder st +Ġf inger +press ed +-b y +if fer +w atch +ĠB a +A IM +Ġwe ights +ĠR on +') }} +[ self +-------- --Ċ +per iment +Ġto String +x ic +ĠC amera +! ĊĊĊĊ +aur ant +P refix +Ġinstit utions +: int +Ġex posure +p attern +ĠLin ux +.n umber +red ient +Argument Exception +ĠCh ief +" }, +Ġelect ronic +r ong +er d +sp Net +ra it +/ ', +ĠOh io +Cont rollers +Ġcontin uing +ĠT emplate +ĠE th +s z +/ env +En v +% . +art ers +) (( +ĠT ABLE +Ġà ® +per ature +pro gress +P res +ê ° +im plementation +Ġb ien +Ġstre ets +_M SG +New s +## # +: / +Ġcut ting +x B +ress ed +_EN ABLE +l ab +Ġca using +] ));Ċ +b ra +x FFFF +il ly +plet ion +w ill +_b ar +Ġstruct ures +ĠI mp +Û Į +Ġ< > +Ġ ---------------- +_B UFFER +.d ir +Ġpl ain +Ġpe er +g g +oint s +Ġsomew hat +Ġw et +Ġemploy ment +Ġtick ets +ir ms +Ġt uple +s is +$ sql +r ig +Ġcon version +Ġg es +Ġconfig ure +eg r +ĠC a +Ġ__ (' +ou ston +.t oken +Bl ack +Ġmag azine +A W +. IN +os ing +Ġbro ke +ĠC ru +DE LETE +Ġdestroy ed +(M ath +Ġappro val +-d om +ĠI II +table View +Ġdesign s +Ġcrush ing +Ġcons ent +dir name +om p +Ġc rypt +? ( +or ough +. o +ĉ list +ams ung +."" "Ċ +err ing +G oogle +_p air +_IN IT +rem arks +Ġg ear +F ill +l ife +} ")Ċ +Ġsuit able +Ġsurpr ised +_RE QUEST +Ġman ifest +att en +Ġfr ustr +ov ement +.c lick +Ġi i +Ġexp ansion +ig s +P arse +.Reg ular +R ob +_l ayout +ì ł +Ġtrans lation +ĠBe aut +B est +_C OLOR +< label +Ġliqu id +IT S +Ġpro d +Ġoper ate +UI Kit +Ġn atur +arg ument +_d etail +ĠCent re +Ġ" -- +Ġ}} " +lo cale +.t v +_se q +Ġup coming +Ch art +ĠDiv ision +Ġclin ical +Com pany +S epar +l as +ĠH un +: s +Ġhead ing +оР³ +Ġ" ");Ċ +[ id +b ia +Ġst retch +ic ide +Ġre produ +.pro ject +leg end +end ers +Ġrespons es +Ġon t +rit ical +Ġref uge +ĠL i +Ġ: ĊĊ +ĠTh ree +.cont roller +_IN DEX +_F OR +\Model s +j ax +ĉex it +Ġâ ĸ +Ġc overs +ĉ y +- . +IND OW +Ġfail s +in cludes +Ġf ault +Ġl y +ñ o +.s lice +ILE D +ĠP ur +ĠAs ian +_b atch +.M ax +v l +ĠCOPY RIGHT +Ġg iant +ĠMan ual +ĠC opy +Class Name +He alth +C ursor +IB Outlet +Ġt we +æ ³ +_label s +Ġcol lected +Ġfurn iture +Ġdeal ing +Control s +ĠHot el +ck s +Ġch ose +âĶ Ģ +od d +S R +Ù Ĭ +ì Ħ +Ġacc ord +ĠM ove +ĠM ode +ĠM ock +Ġthread s +++ ++ +ĠO ptions +Ref resh +ĠD id +'] -> +u cc +_ch annel +. abs +Ġ{ },Ċ +ĠW al +er ior +Ġmain ly +ĠDr iver +NotFound Exception +Ġcount s +e am +Ġ& = +Q uestion +ĠA li +Ġany more +d etail +t ail +Ġm ile +ĠF air +Ġs orry +Ġsurround ing +Ġad m +De v +Ġmari juana +ĠS ound +ĠA sh +F D +Te am +. port +Ġ[ ]ĊĊ +ub ble +Ġas c +Ġint ention +A cc +ch i +ust ers +Ġins pired +se g +CL U +Ġman ip +M etadata +Con nect +ĠB eh +Ġfind ings +Ġas sembly +w orld +Ġrem ained +Ġu id +( . +Ġm x +Lo op +ĊĊĊĊ Ċ +Ġfant astic +wh o +ak i +ĠB asic +ĠY et +ĠUs ers +ik ip +Ġhead s +ĠMich igan +_ it +ĠTor onto +Ġrec ording +Ġsub mitted +_var iable +medi ate +.graph ics +Ġst ood +Ġre ar +vel ocity +_M ESSAGE +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +ro les +ĠT our +_ year +end ment +amp s +ĠIre land +m al +Ġyoung er +Ġstrugg le +Ġc able +ĠSD L +(' - +an es +ĠNe ed +.R ow +P ol +ĠP H +_s cript +ag em +ĠB as +_s pace +. loc +: i +ad r +Ġengine ering +it en +) & +Ġu k +ĠL ittle +_C OUNT +x A +Array List +æ į +Ġ" ")Ċ +An chor +Ġh ang +t witter +Ġcompet itive +.s rc +ãģ Ĺ +Ġtrans late +ĠCre ates +ook s +ĠR oll +'' 'Ċ +/ sh +s ome +Enc oding +.res olve +Ġdesign er +ĠSt orage +Ġz a +ĠN ever +Ġsomew here +Ġbox es +.s ource +Ġpy game +Ġgrow n +.t w +() ),Ċ +', [' +Ġoppon ent +(s rc +.l ayer +AP P +ĠAct iv +Ġguest s +ĠVAL UES +};ĊĊ Ċ +.n ative +Ġamount s +. RE +Ġcl one +Ġwer en +Ġ" << +_ ac +Ġbreak ing +Ġreli able +.P OST +ĠSk y +Ġ' & +Ġsaved InstanceState +ast ing +ill ion +com ments +ult y +.m enu +/ config +Ġ ĊĊĊ +T ODO +Ġpurch ased +_c or +ĉ auto +Compat Activity +com plete +_ graph +is odes +Ġsitu ations +ĠH or +Re ceive +âĢľ We +Ġent ities +.assert Equals +оРº +ĠS ans +v ince +rom pt += Ċ +Ġ/ . +.Se lect +yl v +Ġb att +A udio +Ġincreasing ly +.B undle +Ġexpl ains +the ast +. offset +Ġh al +Ġtechn ique +_l imit +Ġdraw n +AY ER +Ġfeature d +yy yy +at in +ph en +ach el +! \ +l ower +ĠG R +Ġp ag +ĠP arse +Ġt ou +ä¸ Ģ +D istance +Index Path +Ġh ell +s im +UT TON +Us age +elen ium +ĠF all +Ġ" .$ +ĠM u +Ġcr uc +Ġs ont +REF IX +Ġinter ior +ĠO lymp +.Auto Scale +par a +Axis Alignment +Ġr iver +D to +Ġwith draw +Re act +- class +b efore +_ alloc +Cont ents +ĠW as +I CT +Ġform ula +Ġindic ates +ĠĠĠĠ ĊĊ +_st ore +it ting +ĠIt alian +_S et +_re port +Ġp id +_V ER +Ġw ins +ĠCl oud +") {Ċ +ch ester +Ġden ied +Ġw ird +ĠSte p +Ġinvest ors +b old +_d isplay +ou ver +or er +Res et +Ġsurg ery +Ġstrateg ies +/m aterial +_ unit +Ġc ouncil +.P er +ĠâĢ ŀ +Ġre form +F ramework +Ġlist ing +_b tn +Ġb is +% d +eg as +Ġsudden ly +_S ER +Ġa o +_d irectory +f as +Ġprem ium +Ġtrack ing +ĠB L +Ġm ature +Ġbath room +Ġ'/ ' +ĠÄ ij +Per formed +Ġsold iers +arn ings +Ġwalk ed +- con +b ottom +Ġsurpr ising +Ġg ene +Us uario +.DE FAULT +ĠM IT +C ODE +ĠE gypt +p icker +ys ql +AT URE +d etails +ĠCon ference +In formation +ĠM ail +-d own +r aries +b ro +Ġsubject s +Ġ' * +è¯ · +or ient +: @ +ver bose +E F +Ġto ler +eng ers +Ġend point +Ġstr ange +Ġcol on +Ġpre ferred +de p +ĠE V +ARR AY +Ġw he +Ġp up +_n odes +Ġtalk ed +Ġinstit ution +db c +Ġex posed +te en +ĠFr ont +T T +_N ONE +\/ \/ +pro gram +Ġencour age +. ` +sh ire +ĠIsl am +e en +N I +' " +.W idth +Ġlik ed +Ġ{ ... +ĠSystem s +Ġvot re +Ġmanufact uring +Con verter +ĠIn f +ì ļ +D TO +Ġin ches +Ġ ठ+à ¹ +ĠChar les +B U +")) ;ĊĊ +ĠL abor +un n +Ġest im +m obile +ĠL earn +_C ALL +â Ħ +Ġind ices +Ġt ub +ikip edia +C ost +row able +ë ¡ +g age +Ġfunction ality +uzz le +em os +.l ib +Ġd ass +еРº +enn a +Ġsh ots +Ġrest ore +/ D +For Key +], [ +al ias +l int +.st ream +æ ł +_FORM AT +Ġsil ver +.re pository +Ġlegis l +.B order +_fe atures +Per mission +Ġhous es +ĠW ars +_COM P +Ġinj uries +Ġconstant ly +fl utter +EN U +ĠCon f +Ġrecogn ized +Ġpract ical +Ġde cent +B J +] ); +ast y +ĠAct ivity +-m ode +Ġsl ide +.IsNullOr Empty +ĠY OU +P ower +ind ices +Ġqual ified +Ġthrow n +h ello +ĠN ick +l ah +as sembly +ĠSm all +old ing +Sh ould +ĠSil ver +(saved InstanceState +Ġtog gle +.N ot +C trl +: nil +ĠCont inue +ĠB oot +æ ī +ĠM ur +d on +ĠF A +S napshot +Ġassoci ation +fo x +, a +az ione +] )čĊ +CT YPE +Ġf ade +ĠD ar +.n avigation +Ġl uck +SC RI +ĠDe ad +Ġterm inal +_LE NGTH +Ġeff iciency +Ġun w +Ġn arrow +iment o +( Color +ĠSe a +_ area +, A +_ opt +ĠHill ary +.t ask +ĠJ ac +ast ed +ĠAd am +ĠIl legal +Ġsearch ing +Instance Of +J ava +ĠForm at +Ġreal ized +ĠChild ren +Ġk il +(f rame +âĢĿ .ĊĊ +Ġscen ario +"] );Ċ +Ġincred ible +li x +IO Exception +ĠQ uest +il ty +Ġun lock +â Ĥ¬ +Ġre ferences +ĠV ert +B inding +eg ative +Ġwr ap +.d atabase +( content +B uf +ĠTr ad +ĠA ud +tr ace +.m ock +Ġther apy +ĉ L +.To Int +ĠKing dom +B us +ha ust +"" "ĊĊ +( end +.draw able +[ ];Ċ +ĠH ospital +Ġph arm +---- - +ĠA G +é d +> ");Ċ +Ġw allet +at able +) $ +Ġmonth ly +Ġdi agnostic +S ymbol +Ġiter ator +un finished +Ġimm igration +s r +RO W +(g ame +Ġclo thes +ĠU nt +Ġactiv ation +_C on +.h ash +Ġinitial ly +.H ash +Ġcut s +f ound +ĠSt ory +ÑĨ и +ac ao +_T YP +pro to +est r +-p age +ah r +Ġincor rect +ĠJose ph +TextBox Column +_st yle +ĠD aniel +s heet +Ġl iv +l ined +Ġr a +R untime +_ empty +sl ug +_ struct +ë Ĭ +m u +Ġper mitted +Ġreg ional +Ġsob re +ĠS uch +Ġ[ _ +Ġro of +.Al ignment +t imes +.m sg +Ġche st +ĠT ab +Ġest a +ä n +Ġsubs cription +( command +s pecial +Ġme al +") :Ċ +_ ctx +Ġclos ely +et ry +- be +ad el +ĠR am +ig est +ĠSpan ish +Ġcommit ment +Ġw ake +* >( +P HP +_ { +ck er +< List +_n ull +ĠRes erved +Ġin her +.Column s +.A spNet +_IN VALID +ĠParam eter +Ġex pr +} { +Cell Style +Ġval uable +Ġfun ny +In v +Ġst able +* t +Ġp ill +pl iers +ĠC SS +ĠCon dition +ĠS peed +ublish er +Ġoff ensive +ce st +ic as +Ġsp ark +ĠPro te +set up +IF Y +ĠT ax +Wh o +F amily +- for +. uk +Ġf asc +sv g +") ). +Ġbirth day +âĸ Ī +ve h +el led +Ġimport s +ĠIsl amic +T A +ĠSt an +we ather +Ġsus pect +e ature +enn es +W M +.m inecraft +av id +è ½ +.se curity +in os +G ood +Ġm arch +Ġposs ess +us uario +Con s +am ber +ched uler +Ġhor se +ç ½ +(b ody +ĠTrans form +_de code +.s vg +Ġf oo +Ġd ella +ext ends +am er +Ġprocess ed +ĠH arr +ĠA I +Ġk o +CH AR +( % +Ġt ap +({ ' +c roll +D OM +Ġte a +Ġre in +Ġworld wide +_f n +sh a +Ġb ir +ç ões +="# "> +Ġrepresent ed +ill er +(ex pected +Ġd ance +Ġvisit ors +.con cat +-b it +UR RE +ĠR og +v p +ip h +ĠL LC +it led +iam i +C oll +_re al +_sh ow +_f older +Ġd ar +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġl atter +arch y +Ġb ow +Ġout come +ĠPost ed +Ġris ks +ĠThere fore +Ġowners hip +Ġpar allel +Ġp ending +ge ometry +Ġrecogn ize +ST EM +ĠC P +Ġimm igr +IT LE +ĠĠĠĠ ĉĉ +conn ected +Ġsm ile +(d ocument +\ Component +vert ical +Ġconsum ption +Ġsh oes +. impl +un ks +. ";Ċ +Ġfood s +_ );Ċ +.assert True +Ġp ipeline +Ġcollection s +Ġearn ed +ĠC ert +Ġpartners hip +( action +Ġc d +ĠV ery +Option al +Ġscre ens +Ġtit les +ener ator +Ġab andon +k ind +IL TER +Ġclos ing +lic a +_ inter +Ġcamp us +set ting +S prite +ãģ ¯ +_re ply +To List +: \/\/ +ed e +Ġfol ks +Ġbo at +( argv +Ġperman ent +Ġcarry ing +Ġconserv ative +import ant +. img +ĠIm m +Ġdim ensions +al and +s ingle +Ex it +-------- -- +ari ant +tern al +Se conds +ĠIt aly +ot lin +.Res ume +=' " +) == +cept or +Ġs ca +/m ain +Sec urity +_d at +Ġlet s +Ġa qu +Ġwhen ever +b erry +Ġact ing +ant i +p d +& gt +æ Ń +Z one +T oday +! . +To Props +ab is +it able +Ġg al +] { +iz ona +Ġin contri +N ET +/// Ċ +[ in +_s ave +Ġex em +ĠK enn +Ġev olution +var s +_st ats +- only +ĠColor ado +Ġwatch ed +b our +Ġsever e +Ġprofession als +port ion +Ġguar ante +Ð ³ +Ġpush ed +ĠG i +ï ½ +Ġt um +ĠA z +ĠEdge Insets +")) ;čĊ +is se +. ac +Set ting +Ġapprec iate +ĠValue Error +Ġsur ve +ĠR ole +. Inter +plot lib +j et +d am +Ġplatform s +te le +UT O +ĠInt ernal ++ : +} ;čĊ +Gener al +\ Entity +Ġlawy er +qu iv +ĠPost s +is o +Ġacc um +ob e +Ġmark s +Ġ] ;ĊĊ +ĉ text +.s uccess +cur r +as a +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠ +Ġth in +_ over +are st +ĠO s +( address +Ġvel ocity +Ġ[] ;ĊĊ +=" ../../ +ĠPr iv +b ow +Ġguar antee +% ĊĊ +Ġeval uate +.LE NGTH +Ġin ventory +q a +_de bug +.On ClickListener +Ġl ies +Ġassess ment +dat etime +.background Color +Ġ*/ čĊčĊ +ra f +un wrap +ĠF oot +Ġnot ify +Ġlow est +DO CTYPE +Ġl anguages +ex tra +- back +Ġein en +tem plates +_p ass +ĠM ust +Ġest á +_c ore +ĠSc ot +A I +Ġb ias +ations hip +Con stant +Ġprogram ming +In s +uspend Layout +ĠPRO VID +ant es +Ġsh irt +in ated +. OK +[ a +Ġthink s +? ĊĊĊĊ +Ġregard less +ĠMag ic +ul ating +ĉ class +add Group +RE ATE +ĠS U +Ġsim pl +c opyright +Ġb unch +Ġun iverse +ĠE rr +Ġpresent ation +c ategories +Ġatt ach +.s ign +_A C +Ġdisc ipl +Ġregular ly +Ġprim arily +ink s +[ [ +.r and +.sh ould +ownt own +=" ' +Ġs ans +Ġsupport ers +se quence +G O +. .ĊĊ +ĠS pr +Ġcare fully +U IColor +dest roy +Ġtod os +ĠOR DER +ott ed +Ġd ont +aud i +_ player +g re +ĠO il +< body +_st ack +.P adding +ĠProduct s +Ġpriv ile +Ġinj ured +ĠF urther +Ġal ias +.Resume Layout +_LE N +Ġs es +'] ;ĊĊ +cre ens +Ġdirect ed +.S uspendLayout +od ge +.A t +mark s +ĠUn ivers +ert s +ĠE sc +Ġnav bar +Ġutil ity +agnost ics +Ġin ject +ĠD NA +Ġ" ," +am ar +Ġe u +Ġrestaur ants +_p ut +ut ers +Tool Strip +t w +ist ro +Ġz oom +Ġleg it +pec ific +ĠC ome +Ġlocal Storage +Ġabs or +.P anel +ĠDesign er +Ġo w +IC AL +_ uri +(f ield +Ġsup erv +Ex ists +Ġrespect ively +ĠSt and +Con f +uss ian +Ġar c +Ġ nd +uck s +Ġre str +Ġseason s +ĠCh apter +ĠSw itch +p ic +Ġh i +load ed +Ġfl uid +-b tn +Ġrun time +. it +B N +Op acity +as ant +ry ption +-n ative +Ġta ught +å ¯ +ag ment +Ġm ul +Reg istry +_ grid +ĠBro ok +: Set +Ġm ongoose +AM ES +inner HTML +Ġs oci +ĠInt el +get Id +C md +Ġaccess ible +r ames +le ton +Ġ__ ( +ĉ delete +ĠS quare +" ĊĊĊ +Ġbu cket +avor ite +ĠB reak +++ ] +Ġbr ush +Ġt ensor +/ http +T ile +Ġfunction al +Ġ" * +wh el +Ġt ent +ĠChar acter +Ġse es +. ST +B ig +Ġext ern +Url s +)) )), +ĠJ r +.B uilder +. ; +n l +_ Init +ĠH ER +ż e +mys qli +_ icon +v an +Ġfeel ings +Ġle an +Ġhop ing +T V +="čĊ +b est +all as +ent ed +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĊ +_con nection +Ġrep o +en abled +аРº +Ġsh a +Ġmembers hip +Status Code +in ating +_s m +_c ustom +_ weight +Ġc ss +St at +_ env +link s +TR L +ĠH it +, r +up id +Ġop ens +Ġg ent +_v is +Ġj oy +< w +_c ost +ĠPy Object +ren ce +ĠGeorg ia +ĠBro ad +m ma +â Ĥ +p f +Ġ" \" +Ġ( & +om o +Ġliter ally +Ī ĺ +met ric +Ġb ars +z ed +(w indow +ĠIsrael i +Ġform al +ident ifier +.d ao +ĠDe ath +% ;Ċ +Ġdecl are +ar ms +RE AM +PERT Y +Ġconsequ ences +to ols +Pe ople +ĠWh ich +> ();čĊ +.de code +_A CT +Button s +.f loat +.F irst +ë ¥ +ĠPol it +ĠX CT +T ags +ĠCG Float += str +Ġle af +- check +ĠI ss +.s ystem +log out +ach t +Ang le +s in +ch art +INT ER +ĠN UM +B asic +.P roperties +ä¸ Ń +_ change +ĠB razil +Ab stract +Ġ: +: +_ use +а л +ĠL y +IB UT +Ġout er +Ġ-- >čĊ +Ġrel ief +l ap +qu er +_p arent +he ap +LO SE +Ġcomb ine +ĠR ose +ow ers +Ġproced ures +ĠS ort +an im +var iant +eh icle +Ġsign ing +Pr imary +c urrency +Ġsex e +o en +th eta +em an +Ġimpress ive +(' _ +ĉ U +ĠText Style +_c nt +Ġs lice +(' : +Ġunderst ood +H is +Ġinform ed +Ġn ick +(T AG +h d +Ġelection s +est ure +ĠS anta +ĠCo ast +.p df +inc iple +.cl one +b orn +ut a +Ġl icensed +C r +Ġb read +ĠH ouston +Ġn od +Ġhop es +ĠCG Rect +Ġgu ilty +.g if +Ġro se +.Com mon +T ip +AN K +ĠF C +D uring +ĠSym fony +Ġdef ensive +k m +) > +arch ive +ĠU RI +ycl ing +- o +ĠWe bsite +AM P +ish ment +Ġdo ctors +D irect +AR I +ĠRed irect +ier en +_d ist +y o +ĠPro gress +Ġz um +Ġmem or +ĠE D +Ġj ur +æį ® +_T ABLE +Ġu uid +Ex pr +. head +(' % +point er +Ġest imate +ĠG reg +Ġlo ader +Ġi OS +Ġm ens +[ y +Ġref used +Ġprec ision +is ch +ĠA CTION +Cl oud +s With +( ret +_ADD R +_con f +(d f +Ġlock ed +Ġr ising +ãĥ» ãĥ» +ĠM s +Ġscen es +_EX T +_ raw +_ the +pe ople +Ġre con +ĠF un +Ġb less +ĠUp dated +ü n +ĠĠĠĠĠĠĠĠĠĠĠĠ čĊ +pe ction +Re lease +.log ger +ĠS Y +Ġcoun sel +ur d +_ true +Ġevery body +iv ot +Ġh ence +ĠN AS +Ġoppos ed +unk nown +ĠDES C +ĠCh air +fa iled +ĠIN CLUDING +Ġwrit ers +{ }Ċ +ÃŃ t +_c opy +} : +ĠB at +Ġconvert ed +ed ing +pl acement +ĠH ost +S ound +и м +Ġs ought +m id +Ġsal ary +og g +âĦ ¢ +b ul +Ġw ir +valid ator +_ST AT +.st ore +ĠB attle +ı n +Ġ-- >ĊĊ +Tr ump +d ot +ĠCON T +.f etch +Ġcontin u +w as +Ġfra ud +_t mp +mit ter +.p ictureBox +G A +Ġt ournament +. Input +[ r +ex ion +cent age +ĠKore an +und ef +ĠAv ailable +resh ape +Ġk it +ĠStr uct +ĠS UB +An swer +_l ib +.t witter +Ġo re +ĠDr agon +.Ex t +, k +Ġexplan ation +ref s +ĠDr ive +ĠTr aining +.H as +int age +b ig +olog ist +enn is +Ù ĩ +Ġch icken +ĠĠĠĠĠĠĠĠĠĠ Ċ +ç Ľ +ãģ § +Ġpe ak +Ġdrink ing +Ġen code +ĠNE W +m alloc +ĉf printf +Ġ= ================================================================ +in cluding +Ġprincip les +ĠM ah +st orage +- key +Ġkey word +% ; +Ġtr ained +.con trib +Ġk v +__ ':Ċ +ĠB oy +param eter +Ġsu ite +Ġthous and +Ġco ordinate +-g enerated +íķ ĺ +gener ated +Ġad mitted +Ġp ussy +# w +Ġsw im +un ion +N a +ĠRoy al +.ch annel +Up dated +_RO OT +Ġv ital +ra ction +ĠCrush er +Ġpre ced +Ġhor izontal +Blue print +Ġattr s +Ġsm oke +Ð Ĵ +. Equals +F B +ĠRes ources +roll ing +Ġpass es +ĠN um +rot ate +et ype +\ ", +Ġsens itive +Ġt all +? âĢĿĊĊ +Pro xy +i y +_ section +âĢĶâĢĶ âĢĶâĢĶ +br id +Ġcirc uit +at an +EN C +Ġdr iven +Ġvot ed +Ġeduc ational +Ġinter action +abet es +Ġt one +ĠInitialize Component +Ġmer ely +Ġì ŀ +co okie +_ div +ĠUIL abel +vel y +} );čĊ +_ ENT +#+ #+ +art icles +ĠSou thern +Ġstrong er +ĠG iven +ĠE ric +ĠI R +ab stract +U nder +n able +Ġincre ment +ov en +Ġco in +_t imer +Ġsuffer ed +ĠF REE +'] ." +ĠQue en +st ats +Ġmeet ings +Ġenter ing +Ġalong side +(s ession +it als +Ġfound ation +ĠC redit +. div +_ ALL +pc ion +_st at +ick ing +Default s +_s rc +Ġoutput s +/ B +Ġent hus +-b l +.Fore Color +ĉ temp +F ace +Ġinter act +Ġwe ird +M ount +re ll +ud ents +Ġrequire ment +ĠS us +I ER +Ġe lected +re ference +ĠM E +Ġserv ers +.w ait +Ġsnap shot +il ton +Ġtri es +Ġt ipo +.T ime +> w +Ġmount ain +Ġp ounds +Ġ[ ... +ex ists +Ġng On +_M AP +Ġf lying +xi ety +ĉ value +_D B +un o +Ġse ats +T URN +. author +! ) +or ce +Ġindic ated +.s in +Ġass ignment +im iento +ĠF rame +_g en +in ery +_ ) +m essages +.set tings +ĠMe an +ĠM useum +ir q +att ach +ĠPalest in +_ QU +_t ags +Ġcas ual +em en +ASS WORD +$ s +ĠC irc +оР¹ +et ric +/ P +Ġep och +< head +_C MD +Ġg it +Ġpen alty +or ph +_ users +ours es +.Date Time +atern ion +_pro ject +Ġsuper ior +ĠD am +ĠSe attle +X Y +> The +ĠA k +Ġgr ass +/* čĊ +(d is +Ġgun s +Ġt b +ĠK evin +. args +ĠA h +op ed +( J +column s +arg uments +ĠWith Events +_f ull +ĠDef ense +S imple +Ġdeath s +Ġext ensive +ĠSt ill +ĠEx pression +ĠAg ency +Ġperform ing +F X +Ġus uario +U AL +S ide +od os +apt op +Ġcred entials +_c ap +at ient +ĠDis ney +Ġa i +Ġch ip +Ġvol t +.make Text +%%%%%%%% %%%%%%%% +Ġbelie f +_LO C +ĠC ivil +N avigation +Ġreve al +Ġviol ent +ĠF il +Ġc atalog +em ed +sc an +. control +Ġconstit ution +C ountry +Separ ator +_A PP +top ic +uet ooth +M IN +Ġdes criptor +y t +ET HER +Ġdistrib ute +' }Ċ +.tr im +.L ine +Ġl bl +assert Equals +ĠD et +omb ok +( width +Ġt ort +ĠEXP RESS +ac o +Us ing +ĠBr and +w all +EM ENT +ĠComm unic +< uint +ĠG UI +EG IN +ĠR ange +/ i +ĠT aylor +c ost +Ġrespond ed +ĠTh eme +n ce +IS H +Ġfeat uring +Return s +ĠK r +Ġ .Ċ +Ġn am +_c b +Test ing +Ġ{ }, +y al +.f ield +Ġ/ = +_SH ORT +m ates +Test Case +ain less +Ġeval uation +_ ITEM +ĠPac ific +ĉ k +Ġc ant +ĠR os +) s +Ġf et +STR ING +ĠDis pose +g al +ĠJ oin +ĠP orn +ĠCath olic +AR GET +cp u +ç łģ +.sc roll +IS ING +ifest yle +anc ement +Ġm erc +ĠB rowser +eter min +Ġover flow +Av ailable +Ġbott le +: UI +ific ial +Ġco ord +clar ation +Ġcon j +G LOBAL +ok u +Ġk wargs +cond itions +ul um +Ġg enu +ĠH ero +å İ +Ġun expected +ĠDAM AGES +Ġk a +ĠC ould +UP PORT +ĠPh otos +Ġconf ident +Ġdet ected +de g +rg b +Ġstrong ly +Ġ} ;čĊ +Ġ) : +Ġle ct +urs ive +RO L +ĠWe ight +Ġent ertainment +Ġ) );Ċ +Ġg onna +Ġb b +.d o +G S +Ġmist ake +D L +ĠPROVID ED +ear ning +L imit +iss ions +[ v +ä¸ į +ir ty +D el +Ġunder lying +pre ne +Ġj aw +ĠD I +pe er +Ġobject ive +Ġde posit +Ġk on +Ġes p +.set Visibility +/ login +< typename +Ġfr anch +/ e +Par allel +Ġsc ored +ĠH on +ĠV ill +ig a +Ġant icip +_ assert +ĠO pt +Ġdescri bes +w an +m ount +Ġmonitor ing +Ġt out +ëĬ Ķ +}, { +................ ................ += int +Ġc ust +---- -- +Ġatmos phere +P AR +ort e +IS IBLE +ĠI ron +ĠNot ification +.log ging +ĠBO OL +-p oint +Ġaf raid +ent a +Ġtom orrow +@ implementation +Ġeng age +ĠAn th +ĠF loor +ĠU l +To ols +Ġb ab +Ġcare ful +ãģ Ħ +Ġcruc ial +Ġcalcul ated +ĠS A +Ġw y +D X +_T AG +ind ed +Ġj et +ĠEngine ering +.M AX +en z +v d +Ġpublic ation +Ġ## # +Ġfac ed +ra ham +ĠC apt +As set +ĠCon stants +Ġlo ans +_ IP +ĠF ish +Red uc +_m at +Date Format +_m e +[] [] +Ġintegr ity +ĠC ourse +lob als +Ġfac ilit +Ġem br +ĠN g +.S ystem +Ġmanufact urers +Ġpro ven +.on Create +Ġal arm +Ġ § +Ġcomm only +ic os +æĸ ° +ĠSt ation +} ). +ĠF ilm +w i +ç ī +Ġeng aged +St ats +Ġgovern ments +Ġafford able +_p roperty +Ġag es +(' -- +Ġf ör +ĠProf essor +Ġhy dro +P ush +Ġorgan ized +Ac cept +é m +_c ell +Ġn b +p b +Art icle +Ġrem oval +Ġauth entication +ĠF R +l ide +Ġple asure +ap ol +Ġpart ition +ĠS ide +Ġcr imes +Ġdem o +hold ers +ĠPak istan +In struction +Ġexpect ations +.sc ene +Ġ' ) +h es +ino is +_P ro +Ġm olec +and al +_sh ort +Ġdefault s +Ġn ations +in en +Ġr t +O CK +P acket +S B +ĠSH ALL +_cont ents +ise conds +vert y +á t +G uid +n om +Ġcon clusion +. Update +Ġlo vely +Ġem it +b ec +ĉĉĉĉ Ġ +Ġintel lect +Ġb rew +ec ycle +F ire +Ġad mit +Ġar bit +Ġarr ang +ĠM IN +M ail +ĠN ative +C ur +Ġcon vent +.R untime +" }Ċ +.R un +Ġprint ed +Ġconven ient +. ar +m ock +ĠAdmin istration +ãģ ¾ +Ġelect ron +fl ate +Ġl ombok +Ġjava fx +n h +Ġsup plies +Ġvisit ing +ah l +Ġpow der +Ġult imate +Ġorient ation +ut as +_s cale +Con firm +ph ones +ĠOper ation +/ T +_IN TER +Ġair port +Ġmet rics +Ġphen omen +a udio +Ġm ai +( K +h u +all ing +rodu ction +ĠTrans port +ĠNOT E +æĸ ĩ +Ġfew er +_T IM +ì § +к и +A ge +F IN +Ġì Ŀ +ĠAt tribute +group s +er k +at to +. define +.AspNet Core +ategor ia +ĠS ir +( form +< User +. round +_d ay +.A ll +Servlet Response +.N o +l arge +IG H +qu ent +Ġvir us +Ġret ro +Ġim per +Bit map +Ġv ice +Ġoff ense +ist e +ĠA UTH +Ġê ° +ToolStrip MenuItem +G u +Ġr ape +ĠDav is +Ġover whel +: flutter +- table +ĠCon structor +Pr ivate +e ven +ch r +Ġap plies +_at tribute +Ġcon tribute +E VER +L ines +ĠAf ghan +Vis itor +ĠS L +se ason +C U +Ġintrodu ction +Ġmat plotlib +Å ij +Ġnewsp aper +âĢĶ and +< tag +Ġin i +Ġd iverse +Ignore Case +ĠU r +Ag ent +Ġb ull +.em it +( Exception +ar Layout +Ġincred ibly +ĠTr ust +={ ( +- nav +Ġe quals +Ġl ady +ĠP od +d isc +al am +ĠI V +â Ļ +iv idual +ph i +add ed +Ġdifficult y +Ġcomp act +ĠAction Result +c ers +_class es +Non Null +Ġqu it +Ġp ou +S witch +ir s +- test +ĠK ind +ĠCal endar +Ġstream ing +} ', +S W +Ġst ead +oc a +Ġprov ince +Ġcol span +Ġperson nel +ĠE mployee +Ġprodu cer +Ġevery where +od b +Ð Ł +bs olute +act ivate +Ġgr inding +ĠBuild ing +ĠSand ers +(s c +ĠOff set +//////// //// +} ;čĊčĊ +({ " +Ġscan f +ĠY Y +ĉdef er +Ġj ew +Ġrestrict ions +.m p +[ l +ä¸ ĭ +label s +red icate +aw esome +Ġw aves +Ġcon front +Ġmeas ured +Ġdat as +_ex it +ot ton +Ġshould er +ask a ++ # +ĠĠĠĠĠĠĠĠĊ ĠĠĠĠĠĠĠĠĊ +Ġtro ops +ĠU nd +_c ard +w ich +Ġn ous +Ġ"/ " +s b +Ġcommunic ations +Ex port +Ġdec ode +th s +inter pret +By Name +ĠSp irit +ed ges +O LE +ĠE M +t it +ĠTh rough +Ġb io +ĠP ackage +or ne +Ġ} . +` ;Ċ +Ġok ay +ĠZe aland +ident ity +(n ext +ĠB ang +Lib rary +Ġheav ily +il on +Ġdi pl +Ġrot ate +put s +) ',Ċ +ĠData Table +Ġmay or +.to LowerCase +Ġsome how +ĠNor thern +al c +Ġcap abilities +Ġv ibr ++ Ċ +ĠS u +ĠRes et +_m ean +Ġc ig +.cl oud +ĠB and +ĠF actory +ĠAr izona +_ io +op her +Ġconsc ious +Ġà ¶ +\ Controllers +_s peed +ĠF ac +_C om +ĠB ible +w en +ED IT +Ġun n +ĠSt aff +ĠIn n +Ġmechan ism +ĠM embers +Ġmigration Builder +'] .' +.get Int +< void +ĉf ree +oid s +\ Support +Ġautom atic +Ġch ances +Ð ¶ +Ġcomp licated +[ row +ah oo +Ġ}ĊĊ ĊĊ +Model s +W in +Ġt ape +ir us +iz on +on omy +(" _ +: . +.st ereotype +( env +_re ct +(w ith +Ġassert That +Ġcon straints +put y +E mployee +T D +Ġgu itar +ĠJew s +.pro cess +Ġf iction +ĠSh ared +âĶĢ âĶĢ +Ġprop ag +.N et +Ġachie ved +ĉ Q +Ġn urs +Sh ared +_FAIL URE +Ġbeh aviour +Ġcol s +ism o +Ġfem in +Ġchalleng ing +Ġpost ing +enc il +Ġcapt ured +ĠD ou +( word +ĠTur key +pan ies +Ġre putation +ORM AL +Ġelig ible +prot ocol +id as +(f rom +Ġfin ance +- per +Ġg otten +H A +d uration +ĠP arent +Ġin vent +Ġre start +ол ÑĮ +r ition +(r s +< bool +i ert +Ġmod ification +ĠT X +readcr umb +b ank +$ / +ĠMill er +] ),Ċ +.Check ed +Ġsac r +se curity +Ġp ose +ĠBr ad +Ġfit ness +Ġannounc ement +ation Token +Ġserv es +ne ed +Ġge ometry +AR S +æ Ģ +andid ate +Ġs prite +_s plit +We ek +ad ies +> (Ċ +?> " +Ġ/// Ċ +Ġein er +Ġweek ly +ĉlog ger +_p op +_m an +Ġmigr ations +Ġask s +Ġb s +Ġfall s +.W here +- height +_fe ature +.M in +Ġhy per +Ġvol atile +Ġtw enty +Typ ography +Un able +D et +, f +-m od +Ġsett lement +Ġcontract s +n ome +B ad +ĠB rian +(user name +!! !! +Ġh ack +.F ield +H R +ĠJ ordan +iz a +Ġ ł +ĠSh er +. header +( other +ĠD ub +( op +ĠR ound +Ġv ie +Ġap pl +ĉ J +ĠIn sert +ĠL P +reg on +ĠM PI +Ġan chor +ac a +ø r +Ġa de +anch or +que e +ĠTree Node +Ġtarget ed +Ġla id +AB EL +v et +ĠOr igin +A nt +. ');Ċ +ex pect +ed Reader +ĠM ajor +Ġin ch +Com par +Ġpre view +Ġill ness +ĠCONTR ACT +ĠInd epend +u uid +Ġn ome +Ġt c +ĠA venue +is an +Ġph rase +_m ove +") [ +Ġprov ision +Ġconcent r +_ IR +ĠU t +() + +Ġn as +! , +ĠRob in +i ations +at itude +Ġp x +ĠWith out +/b ash +ek t +re ement +Ob server +ĠReg ion +UBL IC +Ġ{ // +K N +å · +Game Object +å ¾ +enc oding +Ġ** * +project s +Ġt k +Ġche ese +EM PL +ar o +Ġا ÙĦ +Ġcons ists +ref resh +ure au +ĠSc anner +Ġso il +Ġfl avor +Data Source +Ex ecute +ени е +Ġsh it +åĪ Ĩ +< any +Ġretrie ve +Ġbelong s +.st rip +abs olute +Ġexp anded +bo y +): - +Ġresc ue +.J Label +Ġre ly +Ġal ignment +-f amily +Ġre nd +OLUM N +Ġb orrow +Ġqu otes +ĠL ew +Ġsh ower +ĠDE LETE +_lo op +! "ĊĊ +ĉ re +Ġattempt ed +aver age +ĠP aint +quis ition +ol en +Ġliter ature +ĠRe ference +_TEXT URE +ĠS eg +ĠInd ust +ct ype +D UCT +_H OST +ĠTr ade +Ġpl ugins +Ġbre ast +ul se +Ġcreat ure +ãģ Ļ +ĠW i +Ġsup plied +c oll +! (" +Ġfuck ing +ĠCh rome +ĠU ri +ĠN ation +Ġvert ices +T HE +ĠOr iginal +on de +Ġsh arp +Ġcook ing +Ġ{ /* +ĠPs ych +ĠH ollywood +=$ _ +.D ock +Ġg er +Ġb one +_con n +_se c +ys ics +Ġ= " +S al +s f +Ġdeep ly +ang les +T erm +b ell +ĠQu ick +ener ation +adio Button +åħ ¥ +}čĊčĊ čĊ +Ġcapt ion +l c +ĠE L +, [ +ĠĠĠĠĠĠ čĊ +ret t +(m ethod +ĠFl ash +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +W ISE +.s cale +Ġrough ly +_ child +m emory +ay ing +Ġinitial ized +in ator +а ÑĢ +Ġsc alar +ĠH o +ai res +(c olumn +.de stroy +P ACK +Ġh em +ang el +_S UB +. qu +Ġ × +DE FAULT +pos itories +ĠL ength +ĠF ast +Ġsign als +Ġ// $ +ri ers +Ġd ummy +AN Y +Ġperson ality +Ġa gricult +Pl atform +ER O +ĠT ra +Ġen orm +ĉ W +Action Result +Ġa ver +[ str +Ġ' -- +.S printf +Ġdeb ut +Ġ Ñĩ +h ex +_ utils +Ġp b +U ITableView +Ġz ur +. encode +Ġv ag +.error s +о н +Ġm r +ĠA ward +Ġc pu +Ġpress ed +' est +ĠF estival +' T +Ġa k +res olve +.m e +Ġn ic +Ġgen re +Ġat trib +ĠMo on +Ġarr ive +ĠD ating +Ġt m +.Config uration +. red +Ġgl m +Ġst ations +sw itch +Ġt ied +äº º +Ġ/ >Ċ +Ġsubsequ ent +pos able +-fl uid +Ġth orough +Ġpublic ly +apt ers +ĠWil son +_P RE +y ard +ä ¼ +ĉ in +Ġre vers +Ġbul let +cri bed +nes ota +Ġ($ _ +ann on +c ursor +Ġclo thing +ĠM ulti +: ', +Ġv ess +ordin ator +Ġein em +C annot +Ġar med +ĉ V +ä¸ Ĭ +.F lat +ĠS ep +ĠSub ject +_f ont +Ġcharacter istics +D one +el n +######## #### +PO S +Ġd ensity +ĠPl atform +- items +Ġo vers +Ġpush ing +ç ¤ +.Con nection +_ term +Ġinitial ization +________________ ________________ +ç ¬ +.d ocument +les h +ĉd ocument +ĠP in +ç a +Ġdefinition s +.P ath +_W RITE +Ġ ĉĊ +? >ĊĊ +Ġter rible +be an +ick ets +ĠS V +B uy +(t ask +Ġreg ime +g oogle +Ġcr ack +.vis it +N UM +ener gy +Ġstr uck +_s ample +.p ayload +Ġre vis +ĠSc ene +Ġp g +Ġbreak fast +URRE NT +.char At +_ex ception +ĠAnt on +Ġguid elines +Ġex haust +ĠFin ancial +Ġind ent +Ġdes ktop +H idden +F ailure +Ġpr inciple +Ġ iv +Ġse ks +n etwork +Ġnumber Of +ĠAl bert +ĉ long +, . +Ġz eros +f ade +ĠT yp +ĠT erm +ĠAr ts +.App lication +Ġbeh alf +æĪ · +Ġm ere +(` ${ +Ġaware ness +elp ers +f lix +Ġwe igh +Ġestim ates +. child +/ O +ĠBit map +.b ottom +Ġ************************************************************************ ** +Ex pect +ent o +ĠFor um +ver al +Ġj ail +Ġab ilities +ĠH OLD +ĠC it +Ġd ynam +Ġgr ay +ĉĉĉĉĉĉĉĉ ĉĉĉĉĉ +.next Int +ant ly +ĠAR ISING +( private +Ġreject ed +ĠN ic +Ġle ather += {Ċ +aly tics +th etic +.T op +.P age +={ ` +Ġ ;čĊ +de pth +m ann +W D +ĠS om +.R ight +Ġ) }Ċ +Ġtr ait +Ã Ĺ +i ac +Ġr v +S ample +.X ml +opp ed +ĠÑ Ħ +list s +Ġt ear +ivers ary +.c ollection +ĠCon stitution +ĠHttp Response +Ġbr ill +ĠP rom +h over +ĠM iami +Ġarg ue +_f loat +Ġ ãĤ +Ġn at +ĠT al +Ġinteg ration +(c ur +Ġrem oving +Ġco eff +ĠTh ough +Ġfore cast +ĠV egas +S ite +Ġtr ab +ĠHen ry +- i +Ġinvol ves +B T +Ġs lo +In voke +Ġl ucky +r at +Ġ? Ċ +Ġhand led +(f d +cont ents +ĠO FF +R F +Ġst y +ĠM otor +ter y +t ax +M AP +ĠMr s +Ġph ones +ĠUI View +")) );Ċ +( dev +ĠIr ish +Ġw s +D I +_OFF SET +ĠEvent s +Ġst ages +Ġ} // +Ġhab en +ST ANCE +ĠS in +ĠM oney +(t op +Ġappoint ment +VER SION +met adata +_com ment +Ġcolle agues +map s +â ĺ +Ċ ĉĊ +( al +_re q +Ġf ut +Ġarchitect ure +ĠWH ETHER +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +_s creen +Ġstyle Urls +Ġmon ster +. up +ph ia +Ġprocess or +ĠT err += ', +ĠMan ufact +ĠN T +k el +ib ern +ĉf ile +A li +rient ation +Ġ// ! +ap ore +ane ous +ĠC reat +f older +Ġh ay +Sup press +( left +Ġe uro +Ġdis claimer +ustr y +sh ips +_f d +ĠF a +_in sert +Ġro l +if ting +ĠCom ments +_b r +Ġloss es +ĠAdd ed +ch arg +Ġп о +_s ystem +ĠS ometimes +ĠSp ain +(g roup +ial is +Ġdoll ar +ĠAr gs +qu ires +ĠT en +.s css +Ġsurv ive +us age +Ġj un +im iter +ï¼ģ ĊĊ +Ġfif th +t oggle +Ġdecl ine +($ " +(L ong +ing e +Ġpil ot +-l ight +-r adius +Ġpod cast +Ġnatur ally +P ages +ä¸ º +ĠDes pite +Ġlight ing +Ġcr ate +ĠB inary +Ġredu cing +Ġe leg +ĠM ouse +ĠTest Bed +Ġbefore Each +_ ARRAY +Red irect +Ġf lood +Ġsh ips +Ġelectric ity +)* ( +ê ¸ +ĠV iet +her o +Ġd ia +ĠK ent +he art +Ġthreat s +_ acc +Ġs ymbols +is chen +_in st +C riterion +ĠT IM +. Height +Ġ âĢĻ +();ĊĊ Ċ +Product s +_S P +ĠC y +Ġdepend ent +est e +Ġdat os +d it +аР² +IGN AL +Ġless on +"> ' +ĠC over +ĠH ope +ĠT imer +Ġd ad +vid ers +ĠPh ot +/ ? +rop y +om ing +as ion +Ġ\ ( +ĠE T +ĠRe ading +Ġep isodes +l m +ech a +Ġne uro +Ġhar mon +Ġlib eral +- ind +D ATA +Ġevery day +Ġdiv ided +ĠActive Record +fig ure +U A +ä ¹ +riend ly +te ch +.game Object +иÑĤ ÑĮ +Ġmo on +ft ime +Ġno ch +ĠT ORT +ĠV M +.in itial +( child +Ġmus ical +Ġo c +b as +ĠH ay +_l ong +Ġmem set +ile y +adel phia +S V +ro at +_t x +Ġl on +ĠngOn Init +b p +ĠGold en +AC HE +Ġwor ried +az i +E ar +T ake +(f p +bur gh +_ Data +g res +ĠO nt +p us +Ġtrans parent +Ġp ocket +Ġr am +igr ations +. čĊčĊ +Ġ[ ( +Ġadopt ed +Ġreported ly +ĠD ream +Ġ} ));Ċ +los ing +Ġte eth +ĠBook s +", & +enn y +LE MENT +Ġg el +ĠPl ant +! âĢĿ +.h ost +ĠRep ly +re ngth +Ġrecogn ition +Ġ}} >Ċ +L A +Ġmir ror +Ġassist ant +( device +Ġspirit ual +b uilder + § +Ġou tr +Ġt t +ĠP ER +Ġrad ical +Method s +Ġp ace +ud y +Ġg ut +ĠG reek +Ġnon atomic +ĠP aper +_G PIO +Ġob st +.A d +viron ments +ĠS ov +( con +ĠTrans action +. assign +ĉc atch +el ter +Ġbit coin +_G R +ĠčĊ +met ic +Ġtrans formation +åı · +Ġr gb +istrib utions +Ġimp licit +/ in +dest ination +аÑĤ ÑĮ +Z ero +Ġun set +. where +.g o +Ġform ation +Ġdeclar ation +() čĊčĊ +ĠEx pl +ĉĉĉ ĠĠ +/ pro +.J SON +Ġdes k +.sub str +//---------------------------------------------------------------- ------------ +ly n +p son +dis able +ĠF unc +ĉ Assert +ĠM ARK +Ġdefe at +Ġbl ind +Ġconst ants +. headers +UIL D +Ġexp enses +P ixel +Ġh r +Ġf el +ĠEast ern +_d el +ĠC ub +Ġs q +ĉc ount +ĠD irectory +Ġex clus +Ġhistor ic +Ġ ------------------------------------------------ +Ġcom position +Ġdata GridView +ĠB urn +ĠB C +M aster +Ġsp awn +Ġbe aring +.Set Active +il o +Ġg allery +Ġfound ed +Ġav ailability +.s qrt +Ġp es +ĠD OM +m ate +O ct +Ġmatch ed +it ivity +Ġan xiety +.pr ice +ĠIn stant +ì Ĭ +Ġt ut +IC ollection +.sh ared +_s ql +t bl +lib rary +_de stroy +erm al +ĠNot es +ĠE in +Ġsou thern +ĠOTHER WISE +Ġmac ro +.l ower +cl s +Content View +.l ink +const ant +ĠB es +Ġsome body +n b +"> { +( local +.. ... +ĠN ull +m x +Ġà § +Ġp ause +-------- --- +_M O +ĠC M +Ġfor Key +ĠD VD +Ġclose st +_DE VICE +ĠSte phen +ĠB BC +ĠTr avel +P aint +ĠResult s +ĠR ule +Ġt p +Ġrat ings +c in +c sv +> / +ĠG OP +l ad +Ġ ÑĢ +Ġindex Path +m atrix += f +ars ed +Ġ} ); +ĠC os +ĠS core +Ġt ak +ĠE SP +ĠIN C +_N ULL +-f lex +"] [ +int o +el and +Author ization +_F ALSE +Ġg ate +Ġv id +ist ent +T IME +Ġre write +Ġt ie +Ġarch ive +.event s +.get Parameter +ĠPer mission +Ġprogram me +Ġ é +j ud +Ġcam eras +(s ys +ĠSy rian +Ġimpro vements +Ġh ip +Ġsu icide +Ġsch olar +Ġcompat ible +rem ote +.d own +F UNCTION +Ġman aging +ĠUI Kit +. raw +>> >> +Ġdem ands +ell ite +Ġd ent +ĠM icro +åı ĸ +'] [$ +ĠI E +im ension +Ġt rem +Ġg ained +.w ith +. ok +h ou +Ġb om +amp aign +Ġjoin ing +f ish +Ġadd Subview +Ġnor thern +.c or +ore t +D ie +in ish +_com p +Ġatt ended +Ġcoll apse +ĠS S +ac ent +_E QUAL +ĠDe ep +R GB +ĉ test +ol ves +us et +Un ityEngine +w riter +Res olver +, % +if ference +_re move +ond a +Ġfem me +de code +Br anch +Ġfl ush +Ġinnov ative +Test s +Ġ[' ./ +Ġcover ing +. admin +ultip art +(l ambda + namespace +ĠS port +Ġ! ( +ac les +Ġde pression +ĠK ong +Ġp ert +ĠCon n +ĠOther wise +/ home +s upported +Ġp ink +Ġinv ited +ñ os +_en abled +Ġ- Ċ +F W +en ers +ĠM Y +Ġsuggest ions +Can vas +Ġf er +ĠMarket ing +@ Test +unt u +ĠV en +ĠC ou +iv als +Don ald +lim ited +ĉĉĉĉĉĉ Ċ +Ġanal yst +( entry +Ġrepresent ative +_at tributes +Ġf ur +.h ide +res p +ado res +rid es +ĠJ osh +ro bot +ĠN AT +Ġs esso +Ġintegr ated +: true +part s +Ġst upid +: event +@end section +Ġp u +.T able +ĠY ii +` ;ĊĊ +Ġcl ang +=" "> +eng an +_param eters +.int ernal +ĠMod ern +Ġmet ric +Ġsem i +={ {Ċ +.am azon +ĠB B +aint y +view port +Ġstart Activity +dis patch +**** * +Ġfl av +iffer ent +[ this +Ġst ake +Ġarg ued +vious ly +.w ork +ĠO ak +O ld +( async +not es +Ġfl ip +Ġdis ag +ĠT E +ĉ error +< ' +Ġ» ĊĊ +Ġfilter ed +ĠM ach +Ġh ung +_d ump +_s amples +-dis miss +Ġr ay +Im plemented +D K +Ġj ed +Ġbreak s +Ġf its +. gr +ĠZ ero +or o +Ġequ ally +Ġ' [ +Ġconcern ing +< meta +play ers +_P OS +_s im +J an +Ġyour s +ĉ N +Ġsp ir +Ġch ampion +ĠAn alysis +ap a +ĠNS Log +_l ines +ñ a +ĉĉ ĠĠĠĠĠĠĠ +.S c +Re p +etro it +ur able +M IT +com pat +own ed +_ind ices +], čĊ +Ġdis covery +ĠDie go +ob i +. Index +Ġtrend s +PL AY +.n o +Ġl ens +_c fg +Ġan no +ag an +Ġperiod s +ter ms +y z +Ġattack ed +ib ration +PEC IAL +_ grad +Ġaccord ance +.Read Line +.de vice +ri x +. container +m ay +erc ise +ĠL u +Ġr g +ĠÑģ ÑĤ +ĉĉĊ ĉĉĊ +( un +TERN AL +Ġless ons +Ġalleg ations +Ġtrans mission +.Re f +M obile +ĠT ournament +ĠN ut +ĠG a +ĠCap ital +def inition +- exp +c lean +Ġfant asy +Ġenh ance +ent ence +'] :Ċ +ack ets +Ġcelebr ate +@ ", +Serialize Field +Ġarray s +t b +ĉ st +[ assembly +( reg +.c ategory +Ġimpro ving +Ġsal ope +Byte Array +Or iginal +Ġ[ {Ċ +åĽ ŀ +ĠCl in +oen ix +ĠS amsung +Ġmaint ained +Ġag enda +f ail +Ġpres ents +Ġtim ing +.m ark +' >< +Ġprom ot +Ġin cl +_ only +ë¥ ¼ +ĠAtt orney +- date +Ġlands cape +Ġf u +S Y +.p rop +ĠA rr +p ag +Parallel Group +': čĊ +Ġlog s +a unch +unc i +n ama +Table Cell +iss ues +. { +ec urity +_ex ec +old s +Ġhost s +Ġpro to +_ import +_s ort +ĠB ow +ĠN ormal +ĠF arm +.create ParallelGroup +R otation +. err +Ġp leased +it age +.W h +ĉĉ ĠĠĠĠ +M R +ĠM ORE +ĠN atural +_ transform +B ASE +ener al +ut down +.common s +W T +Ġa an +. Result +d og +Ġclick ing +), ĊĊ +# line +Oper ator +Ġc iv +Ġm erg +ob uf +ng then +Ġ[ { +Ġcan cell +tr igger +. : +W ORK +decl are +Ġdecre ase +ÅĽ ci +lo om +.N one +ĠM I +ĠJ ason +Ġhealth care +iam ond +s ylvania +* x +ĠR a +[ b +Ġprint ing +ph abet +ĠLab our +op per +Ġz ijn +-t arget +_F UNCTION +Ġo ct +ени Ñı +åľ ¨ +Ġwest ern +Ġcomput ers +ĠR ET +Hash Map +[ String +get Value +_D ATE +.N ext +ĠF if +é l +ick ed +æ İ +-M M +Ġ{ ĊĊĊ +Ġcontact s +Ġdig its +Pro du +Ġunus ual +Ġrapid ly +t ures +Ġang ry +c ancel +xx xx +_p arser +id ity +_P REFIX +Ġme hr +Ġrare ly +et he +op es +Ġ% . +work s +Ġthe ta +Ġcontrib ution +ĠT ony +Ġsqu ad +аР¹ +Ġî n +th ere +out ed +ĉ q +Ļ Ĥ +g ood +L I +é¡ µ +ĠL iving +iz abeth +Ġk t +ĠD allas +] ],Ċ +Ġ/ >ĊĊ +Ġrais ing +/r outer +_g ame +ĠC UR +z ens +. es +Ġfont Weight +(f unc +not ification +Ġ'../../ ../ +Ġbl ame +ãĢĤ ĊĊĊĊ +an co +Id entity +f ollow +Ġart s +x s +Ġofficial ly +ĠSt udio +Ġrecommend ations +Ġloc ale +Ġam ateur +ĠEn able +Ġcap s +. End +- add +_g shared +ĠC T +For ce +Ċ ĠĠĠĠĠĠĠĠĠĠĠĠĊ +Ġor ange +Ġl p +Ġanswer ed +.G rid +Ġd ual +Ġstrateg ic +Ġnob ody +Ġf atal +_ est +( el +Ġì ł +ĠB udd +A IT +_f actor +- one +ĠH AVE +" čĊčĊ +Pro f +Ġä r +str ings +Ġdir ty +ĠF ace +ĠB egin +ĠB us +Ġw is +åŃ Ĺ +Ġspe aker +Ġcar rier +ĠO m +Ġhad n +All ow +:: __ +Ġver b +ĠCom plete +ĠE asy +Ġb ills +ĠĠ ĊĊ +Vert ical +Ġpr on +ĠDef ine +Ġlook up +variable s +Ġpand as +um es +Ġinn oc +Ġset Up +ĠCh ampionship +art ist +ĠC Type +F oundation +à¹ Ī +ĠSet up +Ġrec ipes +ĠU IColor +ĠF ight +Ġauthor ized +_c lick +_s uccess +ang an +ĠMount ain +ĠDo ctor +Ġeg g +ĠMedic ine +c les +` .Ċ +[ int +d ashboard +ĠApp ro +-d r +Ġprodu ces +Ġrent al +Ġre load +Ġarr ival +sp ot +Ġund ert +Ġequ ipped +Ġpro ved +Ġcent ers +Ġdef ines +al so +Ġop acity +ĠUn fortunately +ĠIll inois +Ġн е +ĠTem ple +ĠTr ail +ĠK elly +Ġmeasure ment +Ġsepar ated +-c ircle +H ey +ĠRE AD +ig its +Ġ ib +ĠM OD +atter y +аР· +Ġv end +ен ÑĤ +ĠHttp Client +s afe +_A SS +ic it +ĠCon struct +ĠC lo +ĠS ix +_T OKEN +(b lock +Ġwarn ed +/* ! +! Ċ +Ġinnov ation +_ " +Ġ );čĊčĊ +Ġsp ots +Ġcho osing +.c s +Ġflex ible +U Int +Ġscr atch +- al +Ġf estival +Ġout standing +================================ ================ +M ean +ĠO regon +s ymbol +. account +d ney +'' ' +! ", +Ġpart icle +à ĥ +[ MAX +IV ER +ER ENCE +NS Mutable +ĠColum bia +_ ĊĊ +.f r +Ġc ogn +V R +ĠMethod s +ĠM ade +ĠB R +ĠEl se +Ġeg gs +Ġsw ing +ĠIn v +Ġdise ases +Ġf irms +Ġle mma +}` );Ċ +l ings +Ġg ym +umin um +.T rim +M em +Ġcritic ism +ibern ate +_T X +ion i +Ġguid ance +Ġrepeated ly +Ġsup plier +Ġpaint ing +.F ragment +ed Exception +Ġw iring +Ġcour ts +W EB +æľ ī +\ . +ill ance +Ġb rows +ĠP attern +PL ICATION +ĠSum mer +Ch ain +Ġc ute +mer cial +Ġd il +ĠFrank lin +ĉg lobal +IN CLUDING +h istory +Ġl st +Q t +SD L +al ia +i ere +( ... +ĉc in +iff s +vel ope +ĠR oot +cl uster +User Name +ign e +< S +Ġf est +Ġindic ating +ke eper +Ġc ada +é g +cons in +ĠG B +Ġl b +em ony +-icon s +_d oc +Act or +e lem +.De lete +Ġin fection +ĠPriv acy +Ġgreat ly +ĠP os +ĠT reat +Fl ow +Ġattract ive +ĠMar c +s udo +tes y +- an +ab ama +ĠW ould +Ġsu ck +index Path +ĠE t +T imes +Ġclub s +_ass oc +Ġac quired +(" : +Ġint ense +.m aps +Ex pected +T oggle +Ġa y +Ġl ifestyle +-c alled +ĠS now +V olume +Ġcann abis +ĠD irection +ĠLim ited +-s pecific +Ġd owntown +/ icons +Ġre ven +L eg += null +Key board +') ). +Ġ"" ;čĊ +Ġatt itude +.n avigate +- error +AM PLE +ĠJ ay +v r +c ow +.com pile +Ġmem ories +_m ark +ĠMin nesota +Ġk osten +Ġprob ability +w arning +Ġgen etic +F ixture +ĠHash Set +N ombre +_m onth +Æ ° +- start +xy gen +ĉ ft +i agnostics +ĠMat thew +Ġconcept s +Ġcon str +. State +и н +N ov +Î ± +ĠP anel +ä¸ ª +com pare +> ()Ċ +Ġapply ing +Ġprom ised +Ġo x +nc ia +ĠValid ation +ort s +_c ur +e lect +ey e +( Data +Ġreport er +ĠB uff +Ġs r +Ġ" ; +ick y +Ġtemp or +S N +Ġres ident +pi res +ys ical +Ġend orse +ĠS ong +is Empty +le et +_ util +Ġdist ingu +ĠT alk +ĠM ot +( default +.A rg +gorith ms +_ words +im mer +_res et +f amily +W W +Ġsav ings +ĠâĢ Ŀ +_en able +side bar +Run ning +Ġal i +Ġtest im +Ġwarn ings +ĠCh em +ĠEx it +Ġfound er +pect or +Ġr m +_d ataset +ĠD as +Ġh an +Get ty +á l +Ġn y +Ġpo verty +Ġresult ed +.b y +ĠVis it +Ġobt aining +/ '.$ +ĠĠĠĠĠĠĠĠĠĠĠ Ċ +sh all +_LE FT +UI Image +_ Name +h ave +ĠN ob +l r +- footer +Ġn aked +ĠG arden +\F acades +Ġgrad uate +Ġfranch ise +pl ane +Ġcontrib utions +Ġstring With +Ġc rypto +Ġmov ements +ath ers +Ġlif etime +Ġcommunic ate +j ar +ĠFr agment +_ IF +ĠN avy +ĠF igure +Ġsim ulation +_st op +Ġreport ers +Ġvers us +aj a +ĠÎ ± +Ġgovern or +List Item +Ġse aled +.Back ground +ed i +ash ing +Ġl ip +ĠI h +mer ge +Ġn ec +el ocity +ATE G +Ġse eds +Ġflo ating +_F A +w alk +ĉ user +_de pth +Ġw age +@ app +N il +( [" +( vector +Ġsecret ary +Ġj Panel +ve z +³³ ³³ +d irection +ĠE P +Ġh unt +Json Property +ĠP ORT +] ", +аР¿ +ĠFore ign +pan ic +Ġtri als +ĠA le +Ġr ural +- value +author ized +ĠScot land +.d rop +ĠM T +ç ± +row th +File Path +Ġrec all +if le +Ġc el +ĠSE LECT +k n +_c ase +Ġc rop +s ure +p ot +IC S +Ġst em +Ġindust ries +P ut +Ġa ber +road cast +Icon s +) ")Ċ +æĪIJ åĬŁ +g ui +Ġassum ed +Ġr x +E A +è § +EL L +Ġdo se +Ġin e +Ġde eper +l ider +Ġord inary +Ġg olf +_IM AGE +ĠN AME +(m odule +Ġat om +Ġbel t +Ġoff ices +b eta +Ġphilosoph y +( JSON +-f ield +Ġintrodu ce +Ġconven ience +opt im +> "Ċ +ath y +Ġemploy er +qu ate +Ġed ited +Arg uments +ĠN ations +__ ) +Ġno se +ĠS ample +' )ĊĊĊ +Ġc ake +.get Attribute +H D +Mod ified +Ġpredict ed +Å Ħ +an ie +S orry +(d oc +w ind +ie ve +Ġprov isions +AT ER +OT E +M Y +.A utowired +ĠB ath +. Boolean +Ġback end +.M ouse +ater al +p aper +Con st +ĠV R +_ entity +_C TRL +ĠProte ction +ĠG M +ĠStud y +Ġsou p +ot ime +' use +] " +/ users +a ug +ĠH ong +_n orm +ãģ ¨ +Ġse cre +(B uild +ĠCon tract +ol as +Ġsa uce +Ġaggress ive +Ġrac ial +char acter +@ @ +Ġcomp ile +ĠV oid +_re m +_m emory +k k +Ġm ic +S ame +U tility +ĠH tml +ĠX ml +Read y +Ġg all +Ġalleged ly +ĉĉĉĉ ĠĠĠ +ĠMet al +ĠPerson al +Ġborder Radius +rx js +object s +Ġwant ing +Ġb owl +v endor +offset of +ĠR s +ĠR ating +Ġr ally +_N ODE +ĠM ix +Ġadvert is +Ġnarr ative +s al +Ġm c +SE rror +Ġf ingers +Ġaccom pany +Ġt ired +Ġstr ide +Ġgu i +el ist +Loc ale +Ġrele ases +ik ing +Ġan ger +)) )ĊĊ +alle st +Sum mary +( O +(f or +Ġbasket ball +Ġroad s +ĠInst all +ĠF ab +it map +Ġ) )Ċ +Ġinter section +ighb or +ĠB ry +ĠHER E +So ftware +elf are +ac s +Ġtrail er +.get Class +ch ars +Ġreg ulation +Ġref ers +Ġde struction +Ġcontin uous +ĠAust in +é ¢ +ak an +.w indow +ĠTem plates +Ġabs ence +: n +Ġdis order +fl ash +Ġde let +bo ards +ĠĠ ĉ +RO P +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġac qu +Ġlaws uit +ĠRe views +Ġgar age +t imer +Ġe j +ĠRect angle +Ġflow ers +il st +ĠIn stance +S uper +d et +dis posing +ĠE S +ĠI C +ver e +S k +_ch annels +put ed +/ null +nn en +ĠG allery +_g lobal +Auth entication +ĠR ank +Ġblock ed +Ġcal m +mark et +ĉ val +Ġa ug +per iod +ĠCon stant +Ġ?> ">Ċ +Ġl obby +p al +Ġs ink +ia h +Ð ¡ +urn ame +Ġcon ver +Ġinvestig ate +Ch rist +H ub +ĠIN D +ĠP ed +ur as +ĉ url +ĠT ro +Ġpre ferences +Ġguarante ed +` ĊĊ +Ġport ions +Ġeval u +' > ;ĊĊ +.AutoScale Mode +Ġc ats +Ġreg istry +ul us +F I +p ayload +- search +Ġstay ing +ac ious +Dec oration +Re view +In f +Ke ep +it is +, String +Co ord +Ġper o +S ex +ĠAtl anta +uest a +Arg b +> * +} _ +F ooter +Ġemploy ed +_b ound +v ide +.f unc +$ scope +Ġsp o +ĠAn al +ounc ed +ar ound +Ġrestr iction +Ġsh ops +å Ģ +ĠLat in +-c ol +Ġbare ly +ĠE uro +E r +Ġfa ire +_d istance +_un lock +Qu ote +IV ATE +Ġå Ī +Ġaim ed +ĠRet rie +. iter +Ġwr apped +Ġagre ements +str ument +( product +Ġstud ied +.set Value +Ġy e +ĠC ache +MB OL +Ġquarter back +Ġsy ntax +.getElements By +.v ersion +we bsite +Run ner +_s ingle +at iv +ĠAl tern +ĠBeaut iful +right arrow +Ġd iversity +pl ash +( co +.F ill +Ġtyp ing +Ġcl ar +H it +O O +ac co +w orth +Ġscript s +ĠMuslim s +ĠL L +erv ing +( boolean +Ġbase ball +ĠC AN +MA IL +de pend +Ġrespect ive +Ġconst expr +.* ;ĊĊ +'] ))Ċ +Ġy ard +Ġident ical +if ecycle +US H +up iter +. validate +cl i +IST ER +Ind icator +F ail +Ġdemocr acy +. var +Ġsatisf ied +------------ - +enc er +h or +Ġr ounds +DA O +o a +Ġfl ask += c +[ ]Ċ +/d ist +Ġpart e +Ġconfirm ation +er on +aw are + +Ġdepend encies +ĠV ideos +- row +Ġ** /Ċ +Ġn ou +Ġh over +æ ŀ +Ġn in +ĠUS D +M ac +_L oad +Ġout comes +_s ocket +Ġqu eries +w m +Ġhit ting +in ux +M ich +ud ge +AT AB +Ġvulner able +ä ¾ +Ġport folio +: YES +ĉm ap +B ound +Ġiter ation +in cess +Ġact ors +ĠQ ual +_c lean +ãĢij ãĢIJ +MS G +G reen +ĠOff icer +Ġsm oking +> ', +ĠF lo +++ ; +oly gon +Ġbul k +Ġdr ama +Ġexception s +os ed +Ġ+ čĊ +Ġleg acy +C V +Ġcontrib uted +ĠTer ms +Ġb t +Ġunt uk +Ġal ien +=== Ċ +ĉ Vector +Ġl s +On line +.f acebook +num eric +ock ets +A ut +b ury +-re dux +ĠRed istributions +GLOBAL S +urrenc ies +Ġt ons +âĢĻ , +Ġà ª +(c ol +ĠS ymbol +Ġstay ed +ĠM L +Ġm unicip +Ġsex o +S en +n r +Ġg ains +Ġshort ly +.M enu +à ½ +KN OWN +Ġoper ators +- V +ĠPat rick +/ add +_C O +ir ation +(p ost +Post s +/ _ +Ġpl ug +Ġintellect ual +Ġmet ab +Ġpregn ancy +ĠPrem ier +n m +Ġpred iction +ĠMin istry +Th ree +val uate +ĠMin i +b u +оР· +< ul +Ġd d +ol ving +ĠC ut +Ġs chem +.tr ain +it ate +Ġr ice +Ġbird s +ãģ « +m iddle +struction s +Ġn erv +a que +Ġfl u +Ġsurv ival +ĠGal axy +ĠF ant +. Order +At trib +irt s +é c +M ovie +Ġcon ce +qu arters +Ġm ood +.Add Range +Ġres olved +ãĥ Ī +Ġburn ing +ĉĉĉĉ čĊ +ĠW E +Ġhost ing +L AB +Ġman agers +Ġstre ngthen +< const +ĠFire base +on ed +ĠJ ean +' ";čĊ +ĠS av +.B old +Ġen ables +ĉt mp +Ġman ually +ĠS qu +user id +.f unction +.c ache +LO PT +.S ervices +dd it +t im +< img +ĠTh ings +ĠEvery thing +Ġa pt +em and +Ġroll ing +ë ¦ +. level +Ġst om +ĠW inter +Ġview ing +( values +ocom plete +v ia +up o +Ġabort ion +i ère +ï¼ ij +_B UTTON +_d omain +Ġb ra +ĠA st +in as +Ġstat ist +c od +L R +Ġdr ives +Ġfollow ers +Ġall ies +ĉc urrent +ecess ary +Ġdam aged +_ pt +and les +oun tries +Ġsim ult +e u +Ġcontrovers ial +_G ROUP +Ġr ib +. Info +: mm +.n ormal +_ADD RESS +Ġ íķ +add le +ĠD ur +. Element +W arnings +Ġcred its +Ġin hib +Ġem issions +Ġh az +.y outube +ugg ed +Ġbo ther +ĠK ansas +ĠF ixed +ĠTest s +ĠF IX +Un iform +Ġk ont +>> > +st ation +lo re +at ype +ish op +/ **************************************************************** +Com boBox +Ġvac ation +Ġiniti ative +Ġdefault Value +con cat +ĠK h +ĠW elcome +ized Name +M igration +Ġgrad ient +H ot +Ġhard ly +el o +ĠStud ents +Ġlo ose +at z +.S end +' / +Ġunivers al +Ġenter prise +Ġreg ex +Ġvis itor +ĠF ly +Se q +à¸ Ļ +ĠVis ual +Ġlib raries +ato es +P ayment +Ġp ent +Ġgather ed +VRT X +ĠD M +S plit +Ġlet ting +Ð Ŀ +_error s +ep och +P ARAM +c u +ÑģÑĤ в +ol utions +Edit ing +font s +Ġalloc ated +ĠB ased +( Y +ĠJud ge +Ġbro thers +FILE S +ç o +w b +_P I +' ^ +Ġs word +.s ervices +Ġn l +T im +ig g +ĠMo ore +Ġcrypt oc +åĩ º +_post s +ot ate +? ' +... .ĊĊ +Ġk l +=" $ +Ġdec oration +Ạ¡ +ĠD IRECT +G UI +) =>{Ċ +Ġnews letter +Ġprec is +(p oint +ĠEqu ipment +ut y +ĠD ave +Ġparticip ation +u arios +x it +.A s +ET ER +or ous +Ġsh ield +[] > +ilit ary +. origin +Ġprom otion +U nt +Ġc t +TR A +View Holder +Ġsig ma +d elta +are house +con tract +( Vector +Ġcompet e +/ form +/ components +Ġn r +ĠInd ones +Ġо ÑĤ +ĠV olume +.f iles +(res p +/ models +Ġsur f +stand ard +/ o +ĠXCT Assert +V ICES +.C ode +SE D +Ġact ivate +D elta +Ġlimit ation +ri j +Ġpregn ant +: ^( +Ġs our +p ie +Ġexp ense +ic ation +ĠL arge +Ġ ± +ĠB owl +(model s +/ N +P a +.re load +Ġwonder ing +Exec ution +ĉ ĠĠĠĠĠĠ +ĠG raphics +ĠCont in +_j ob +Ġget Name +ĠM agn +ĠD WORD +m ad +Ġn h +fe atures +} ");Ċ +he ets +(tr ain +z n +Ġrecru it +.con nection +Ġbar rel +Ġste am +_set ting +Ġang ular +ane ously +Ġb il +ĠN orm +(! $ +ib t +% ( +Ġpos it +ĠF ather +int endo +L ive +Ġport s +Ġme j +Ġland ing +pon der +Ġc od +_HE ADER +.M argin +Ġball s +Ġdiscuss ions +Ġbl end +H ex +Ġfarm ers +Ġmaint aining +ĠĠĠ čĊ +s yn +[ T +r us +uff ers +Ġcontrib utors +_s ys +.De bug +Ġconstruct ed +om es +? id +sl ider +Ġsup pliers +scri ber +p es +Ð ŀ +": čĊ +\ Controller +)) ĊĊĊ +Ġl ua +M ulti +EN S +S rc +Ġpet ition +Ġsl ave +look ing +V ERT +ĉ vector +S pecial +h h +an ne +ĠN iger +/ views +z ing +end ant +< C +s peed +Ġ{ };ĊĊ +Begin Init +Ġf open +@ RequestMapping +End Init +Ġp unch +S ender +é Ķ +get Message +/t ypes +.P I +(' ');Ċ +oc used +( all +Ġdrop down +). __ +ĠV in +.Fore ignKey +can f +ou red +ĠOrgan ization +ĠÐ ° +ĠC ulture +(cl s +, _ +rg ba +ìĿ ĺ +.data GridView +Ġdo zen +ĠG es +_sh ared +n ick +Ġh osp +om eter +Ġclaim ing +ib les +ri k +æĺ ¯ +en ario +Ġd engan +ob b +m ont +_r ank +('/ ', +Ġap olog +P s +_p ower +ĠG ree +Ġful fill +Ġfire base +Ġf are +ĠH im +Ġbe an +â̦ . +ĠS PI +_R X +Ġper ception +rel ative +comp ile +u um +ut os +a uc +ĠAs k +Ġindic ator +/ th +.set String +ĠWis consin +.D omain +Ġart ificial +De velop +ĠSar ah +Ġl ying +( search +ĠEmp ire +urr ing +æĹ¶ éĹ´ +=" ${ +Ġget Id +ĠP ayment +trans ition +Ġ ]. +ix in +V T +- select +Ġdemonstr ated +Ġlast Name +employ ment +.get Property +Ġf ought +file Name +ĠP ers +-c ard +a str +attr s +Ġprom inent +Des ign +anc ouver +ãģĹ ãģ +ard o +se cret +Ġr ag +Ġpo ison +-m an +, omitempty +ĉ un +it zer +ĠCas ino +ĠR oss +- foot +(result s +Pl an +Ġlas er +ê¸ ° +_D R +F acebook +Ġbo ards +st a +] ], +Ġt iles +S IZE +Ġ= ~ +Ġprem ier +oc ab +Ġenc oded +Ġres erve +ĠAfghan istan +ĠList Node +url s +Ġsub mission +Ġne u +Ġ# +# +_P OST +Ġmo ist +ell i +ellig ent +. alert +ó d +b re +ĠCol lect +Ġgraph ic +Ġlong itude +ĠPro vid +ĠCal culate +x ffff +c riteria +Ġw aters +ro ck +lo quent +ĠT rib +Ġbur st +Ġsuff ix +.Ext ensions +ish es +iv el +ĠLI KE +ĠGet ty +.Action Event +.s lf +ĠH AL +up al +E AR +ud i +_time out +U F +ĠSing apore +ĠAd vent +_int erval +cha ft +ĠE mer +Ġtele phone +ĠTur k +_ interface +ĠO wn +Ġencour aged +< Object +_T ext +ĠOnt ario +ĠApp ly +.f irebase +Ġant ib +P riority +ene z +D ays +c id +urre nce +; / +inn ed +Ñģ Ñı +Ġve z +f w +// $ +att ack +Ġstart up +ain ers +.f ragment +op acity +( conn +he im +.n etwork +( stream +ĠN ON +t ol +ĠX box +ĠD S +Ġc ached +Ġprostit utas +ĠB alt +(' [ +Ġno except +" ' +Ġs d +. valid +_ ag +Ġr aces +Ġro d +itud es +< >( +.Pro duct +Form s +NE W +P ay +ĉ boolean +_ contact +ĠElect ric +sk ip +Ġw ur +Ġch ronic +_d river +ĠS ab +ĠU lt +ĠR ad +ST ATUS +ĠLew is +O B +Ġgift s +.Re c +TR UE +Ġint ensity +Mark er +.com pare +ff ic +C ookie +ĠB aby +ĠBig Decimal +ile t +ĠHOLD ERS +ĠL ady +Ġl ung +ĠAl abama +Ġd ess +` );Ċ +ĠB uilder +_reg ion +Ġne utral +Bo th +Ġh p +Ġh orn +Ġseg ments +ĠE C +"=> " +( rec +ĠP i +G M +Ġl aptop +Sc alar +is d +-d ialog +ĠAnd erson +Ġmist akes +ĠH an +j es +est ination +Ġprom ises +b id +ĠSc ient +G IN +ĠPer formance +b age +. users +le ading +Ġor al +G raphics +_P TR +h ang +Ġin ev +process ing +F actor +ĠN A +$ string +Ġground s +.Save Changes +c lock +cri pcion +ĠNew ton +g c +.in cludes +Ġbl ast +Ġ'- ' +Ġpued e +.S ession +Ġgre p +_f inal +ĠG ay +ĠG ive +ir i +-st ar +ĠUI Image +_ep och +ub b +ent h +Ġel ite +Ġcampaign s +ĠP orno +_ assign +Prot ocol +ĠBe ing +ĠAir port +Ġconvent ional +ĠW at +ĠC I +ET A +ĠAnth ony +Ġtable t +( format +Ġconsist ently +ĠI owa +Ġav atar +.c ursor +! [ +Ġh anging +H er +S uch +';ĊĊ Ċ +orge ous +() == +Ġview Model +Ġ ãĥ +Ġel s +ĠAg ent +F etch +ap or +Ġc x +p read +ĠP ier +oe ff +S n +ĠV irtual +A pr +.Wh ite +_M OD +ĠPoint s +å¤ ± +Ġgen es +Ġv endor +Ġmain stream +< src +ĠEl izabeth +Dec oder +- state +ĠG lass +nc y +adi ans +_m on +ĠRem ote +Ġwire less +ĠM i +å ī +è¡ ¨ +st age +ĠT ile +ll ib +V ariant +== Ċ +Ġgold en +(Q String +.put Extra +ĠD om +ĠAn imation +Ġinter active +if act +éĻ ¤ +LE T +Ġfrequ ent +Ġ< >Ċ +F ilename +Ġs ne +ĠFoot ball +Ġr ival +Ġdis aster +ion ic +ĠD amage +. Resource +- en +ĠT ypes +get String +( board +Ġb ol +pl ain +z ym +ภ² +Ġsc anner +ild er +_msg s +æ ı +(int ent +Ġde struct +Ġb ust +ĠE mploy +on i +ĠUI ViewController +Ġodd s +ear er +Ge ometry +Ġy ii +_EX PORT +ĠAtt ack +Ġn iet +Ġim pression +ĠG il +_pro b +ĠC F +ĠEx perience +/pl ugins +.M ethod +Ġbelie fs +N ative +_b uild +Ġv ig +Ġr anks +cover ed +s uch +G uard +.p ack +add er +iv ia +l ng +Ġв Ñĭ +T imestamp +_n ow +Ġp oker +Ġun c +Ġsh apes +-t ypes +_per iod +p k +Ġveter an +Ġson o +Ġappoint ed +over flow +.d river +_c at +ut t +pl ant +im b +ĠAc cept +Ġconc ert +ĉ node +ĉ z +? >čĊ +Ġb anned +ĉ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġto xic +Ġdisap pe +È Ľ +Ġgr ace +ate ful +Re ply +ĠCru z +Ġsc rap +Ġkey words +s imp +Ġmort gage +Ġcy ber +ĠEx ecute +Ġlat itude +if u +.C OM +d bo +Ġsort s +ĠG as +om ial +.L ocal +Cell s +.Re place +String s +.f it +ĠTh ird +% ",Ċ +Ġ{} ". +ĠS ony +Ġ[ : +Ġfall en +. ')Ċ +in h +ĠM C +Ġred is +C odes +Ġprofile s +h ook +Reduc er +_F UNC +Ġn avigate +str len +Ġh orm +á ŀ +ĠS R +. boot +Ġdig est +ĉ header +.find One +æ ģ +Db Type +n ia +_m erge +Ġdon ne +/ Getty +_CH AR +Ġb ands +. URL +art ial +Ġf req +Ġs ist +N g +Ġrender ing +\ Core +Widget s +ĠV A +Ġactiv ists +St e += _ +all a +St amp +Ġload s +Ġx x +ĠL earning +.M vc +u ir +(" $ +Ġconnect ing +Read Only +ur u +ĠE ag +B IT +_DE L +å § +arr ass +ext ernal +ĠY OUR +ĠB rew +ĠF ive +Ġres ize +ig id +er ation +ĠÑ į +åĬ ł +ĠC atch +Ù ģ +ĠLe on +am il +.B ody +Cl ip +/ list +.b r +Edit Text +ĉ db +.G ame +(Build Context +back end +.R ed +face book +.url s +m r +rol led +---- --- +Ġinter vention +Ġretire ment +ĠK it +ĠP RE +Upper Case +ĠS ocket +Ġ: - +Ġstudy ing +ĠMet ro +ard ed +Ġconvers ations +C alled +Ġexam ine +ert ificate +.g z +-res ponsive +Ġref und +_n etwork +allow ed +em pt +Ġme als +C ategories +Ġtravel ing +Ġk g +Ġsh ame +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġexplicit ly +Ġmath ematic +ĠS uite +ĠR GB +****** / +Ġmix ture +lear ning +.t emplate +att s +w x +ĉ ctx +.p roperties +Ġdrink s +ĠE ither +set Text +.get Data +.z ip +Ġreve als +< table +.Hash Map +ĠH ur +) ");Ċ +.f ramework +ĠST ART +feed back +Ġsaf ely +. icon +config ure +. lock +.l ayers +/> .Ċ +Ġrank ed +_ impl +ĠHand les +Ġhost ed +Ġup dating +al bum +é Ŀ +Ġsh ader +Edit ors +- round +[] { +Ġse p +ĠH i +TE M +look up +.m an +_IN PUT +Ġthreat ened +_IM PORT +Ġd rops +ru it +s id +bo th +ĠEx cel +Ġj er +ord inary +еР¹ +V IEW +re ply +Ġ) :Ċ +color s +ver ified +_T r +_p arse +Ġcon gress +P romise +int s +ĠM other +.A pi +ĠD uration +Ġfirst Name +inherit doc +ĠM ars +Ġa pr +OD Y +Ġvis its +Ġhe aling +let ters +)) );čĊ +f uture +.F ramework +Ġk iss +Ġinv olve +Ġsil ent +ad ows +Ġany body +s ch +Ġsole ly +- img +Ġprop ri +Ġin struct +Ġlic enses +Ġm eth +Ġcond em +ĠD omain +ĠHarr is +Ġs Ã¥ +CE PT +B atch +@ extends +ĠCONTR IBUT +.Data Frame +_p acket +rec ision +Ġfoc using +. ht +__ ":Ċ +: Get +ĠK C +Ġpass age +Seg ment +_c enter +-z A +_B L +Ġconv in +Ġclass ified +ĠNS Mutable +_ ap +t ile +Rect angle +(n ums +v ens +ĠUI Button +ĠF eder +am o +Ġout line +ĠPar ser +Ġâ ī +ĠWork s +.S chema +Ġeng ines +_com mon +_ old +Ġset ContentView +Ġ/// < +ĠB T +f m +Ġd ivers +_ weights +em ark +ĠA CT +Ġpro portion +over lay +.dir name +ĠG it +_REF ERENCE +< > +l b +_r ule +è´ ¥ +ĠPut in +Ġsleep ing +() :čĊ +Ġpres erve +Ġpar liament +ĠLook ing +Ġpick ing +ĠDis patch +Ġsl ip +ë ĵ +ĠL yn +_sign al +config uration +ĠP itt +ad en +pro cedure +Ġenthus i +f ight +ĠCons ider +Ġt orn +Conn ected +.c os +_group s +ĠTh ink +Ġdel iber +Ġres id +work ing +.column s +ĠCal led +Ġes lint +> ", +_D OWN +h ist +ĠAdv anced +Ġre wards +act ors +Ġsil ence +Ġmy th +Ġne ur +Ġa uction +.Get String +ek s +( project +ĉ msg +ĉ output +Ġcomplaint s +, S +Ġt bl +Ġ, ĊĊ +ri ors +ah ren +Ġlawy ers +re dux +_s ymbol +off ee +_RES ULT +( Name +UT C +.current Time +Ġorgan is +. arg +Ġmin im +w ick +Ġrece ives +B alance +Ġspeak s +ĠD ays +ĠBel ow +t ipo +P resent +Ġres erv +h p +Ġr it +_R IGHT +-- ) +Ġchair man +D IS +ĠBO OST +Ġexper iments +__ );Ċ +Ġst amp +Ġf ert +Ġf ond +T er +el ve +ure n ++ i +end ency +Ġvirt ually +... " +ï½ ŀ +- cent +_un ique +Ġpr icing +m ic +RES H +Ġ:: : +Ġan notation +ĠC ircle +ong odb +it as +Ġ% ( +( component +Ġо б +( port +-h our +. obj +L BL +Ġj ury +GB T +Ġsp y +ĠProf essional +Ġ"" ;ĊĊ +Ġstri king +Ġdiscrim ination +Ġp ays +lic t +ent es +Ġthrow ing +ĠPl ugin +( def +ĠRuntime Exception +ĠM igration +Ġd ic +b ag +on ia +Ġcor ruption +( Map +Ġpr z +.d to +Ġac quire +State ToProps +Ġlo ving +оР¶ +_p attern +Ġemot ions +Ġpublish er +_b e +Ġcoup les +o j +ĠCh art +Ġt rop +.t ool +Ġestablish ment +Ġd ol +Ġto wer +Ġl ane +ĠSy dney +Ġfill ing +claim ed +Ġdialog ue +Ġcon vention +book ing +pare ncy +æ ± +ĠGener ic +\ Schema +Ġr anges +/ ch +Ġpan els +Ġr uled +çĶ Ł +.t s +_s ets +Ġclean up +Pre vious +ĠAn imal +($ ( +ĠA ve +oll ar +_e val +ĉ Name +(t ree +Ġ" ] +Ġdut ies +=' / +Click ed +Ġdifferent ly +ĠCl ark +Ġd it +olog ists +Ġsy nd +Ġs ends +- known +k b +ĠMod al +it ative +Ġr acing +Ġhigh lights +ĠSim on +ĠCapt ain +ä¿ ¡ +ĠC B +cont in +ar an +Ġphys ics +ret ty +et al +.m d +ax ios +Ġspeak ers +Ġpre p +Ġaward ed +ì§ Ģ +ĠC orn +ĠN ature +UD IO +Ġpro j +- pre +[ u +Fe atures +Ġis Equal +B inary +s ig +Ġconf usion +ĠH at +Ġkt ó +.config ure +M ON +/ edit +_A dd +, true +Ġc li +Error Message +- loader +Dim ensions +ultip ly +Ġ{ !! +ĠSql Command +Ġsp oken +Ġp ics +Ġto y +( Key +ĠLo op +Ø ¨ +E ATURE +in ction +_set up +w rapper +Ġt ong +c ular +O pt +.P l +=" , +(l ength +um n +Ġch rom +Ġse vent +ĠIllegal ArgumentException +ĉ start +Ġbeg un +CE PTION +dat aset +ĠF ailed +col s +Ġkne e +im ore +.sp lice +sh ell +ig gers +Ġthem es +ĠD J +ĠAss istant +- $ +May be +Ġorder ing +ĠInt elligence +ĠMass achusetts +Ġfail ing +el son +G reat += i +.re st +Ġinv ite +-dis able +.Group Box +âĢĻ est +Ġtack le +g v +et ter +Ġ), čĊ +_r ules +.w arn +function s +ĠChrist ians +Ġback ed +Ġsl ider +Ġenjoy ing +n est +Ġh ij +_m s +// * +An notations +ĠVariable s +< V +( server +ĠOr acle +element s +Ġorgan isation +_point er +ĠHe aders +[ d +Ġdead line +iss a +Ġkn ife +ĠNAS A +ĠHe ight +ĠAs ync +Ġven ue +.d om +bour ne +ĠHaw ai +Ġmem o +ict ions +Ġsurve illance +om i +/ assets +Ġed u +Ä Ľ +Ġro ster +Ġh ired +ĠT ok +Ġpl acement +ur ations +Ġset State +ĠMag azine +Ġhor ror +T ry +Ġl ag +ĠEvery one +th ur +)) ;čĊčĊ +. return +Ġsy mp +âĸĪ âĸĪ +Ġn ights +work er +Ġa le +ennes see +.st ep +Ġsynchron ized +our i +Do es +. change +f on +.set Background +irc ular ++ - +ĠC IA +ĠJ ane +ĠSim ilar +- I +level and +Ġpros pect +_f ound +ĉc olor +.D iagnostics +Ġann ounce +Ġassum es +/ tr +Ġb d +ĠCar bon +Ġanal ys +.de st +n ik +ĠL ie +- index +Draw able +ĠT AG +Ġtri angle +_F LOAT +ĉĉ ĠĠĠĠĠ +.bl ack +v ue +cur acy +Ġaffect s +Ġsure ly +Sl ider +uk i +c ery +Ġun ter +.pro file +ord on +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +le ave +Ġsmart phone +g ie +Ġcons pir +Ġt utorial +ç± » +Ġc ab +ĠSum mary +* ĊĊ +ä h +" This +Ġsl ides +" +c ycle +ĠB ull +path s +Ġun p +Ġview DidLoad +_M odel +Ġassert True +Ġr ated +De cl +vert ed +ĠD at +b rew +Ġpoint ing +M s +ĠPoint er +) ' +_n on +ĠSE C +Ġy eah +g ency +initial ize +f ly +[ pos +, g +Te le +Ġj oke +Ġcl ause +.find ById +en es +( instance + £ +Ġs lic +_h ome +Ġ*/ }Ċ +_p ages +(s ervice +R P +ĠAm ong +.get Current +ãĤ ¹ +Ġs lee += [Ċ +ol er +Ġlib ert +Ġ` Ċ +Ġw enn +l ated +Ġimm une +( Node +ĠPro blem +ĠA bs +log s +Ġ ../ +ĠA DC +Ġ}} ">Ċ +> ');Ċ += b +ĠW ind +lah oma +Ġalloc ate +or ian +Ġpres cription +- quality +ĠMay or +in ely +end foreach +ĠCom plex +k om +T Y +] ]. +. Style +_m any +',' $ +Ġbar rier +ĠF etch +ĠMar vel +Ġres ist +ог о +b idden +ĠRun nable +: false +Ġbuild s +ĠSt age +Ġd ub +emp o +.s ite +;ĊĊ ĊĊ +ĠDen ver +Ġre vel +Ġtrigger ed +Ġd ice +_f ail +Ġg c +ĉ X +ĠTh rowable +.r outer +ĠRev olution +ÑĢ Ð° +_N ON +Ł ¥ +Ġel der +Ġab road +ĠÐ µ +ĠAd ult +bl r +g lyphicon +Ġprom oting +Ġ iz +ĠS olid +_lo ader +ear ly +.en abled +- edit +ĠU L +_ play +ĠInt errupt +Ġadvant ages +uc le +Ġmechan ical +.table LayoutPanel +ĠWork ing +Ġan onymous +R ating +ig ious +_ph one +.addAction Listener +Ġfr an +und en +Ġ*) & +_ bool +ul ative +Ġcon e +ĠM ult +Ġm ö +ĠFor ward +] ):Ċ +Ġconvin ced +act ed +ãģ ĵ +ĠConfig ure +Ġce iling +D er +Ġpass engers +Group s +Ġsoc cer +/ W +avi ors +sw ith +ĠZ one +. Options +ĠM om +ied er +Array s +Ġtreat ments +Ġprotect ing +f ac +Ġpick le +Button Item +Ġblock ing +str ar +à ² +ĠEx port +Ġth rew +ott a +ĠB ASE +.w s +.LE ADING +order By +_d elay +ĠP u +.d ll +ĠCh oose +Pol ice +ĠBE GIN +box es +Ġdiam ond +, l +Ġ ĉĉĉ +Ġcur ious +t v +Ġerot ische +ack ages +ĉ Set +T ick +.b order +static method +Ġch er +in voice +Ġcr u +Ġdef ect +_m etadata +re lation +ik an +[ N +(Q t +( Base +æģ ¯ +be at +ĠEm pty +ĉ o +_sh ift +Ġreg ret +Th ose +C ent +ĠPort ug +ĠIs lands +ĠT IME +Man agement +-s p +ê me +Ġnot ion +un ifu +P K +è¡ Į +ĠCUR LOPT +\" \ +U V +ç º +d ra +c ou += ` +ĠD estroy +r p +.c ancel +G G +r untime +ĠV ue +Ġprogress ive +/s ervices +Ġrun ner +_FR AME +.ToolStrip MenuItem +Ġ' ,' +d elay += utf +Ġscreen ing +Ġpull ing +om as +Ġan th +- new +/ local +Ġi Pad +Ġt witter +Ġd ying +Ġhe aven +ĠU Int +ĠSen ator +Ġpres um +ĠWalk er +Ġover come +ete ction +Ġemb arrass +Ch ina +In clude +RO LL +Ġdata Type +D avid +ภ£ +lo p +-m onth +Ġsc ar +ĠS afe +Ġ **************************************************************** +Ġaccess ories +Ġr amp +_U SE +Ġcontr ad +)) ]Ċ +Ġpre st +ĠH R +ĠR ap +Ġus ize +Ġcap ability +Ġc ort +- next +Ġbur den +_read er +Ġ@ @ +reg ular +ĠK a +M AN +Ġa str +Ġ' ')Ċ +Ġf ed +Ġpars ing +ĠY ears +Ġbro ker +": {" +Ġa kt +In ventory +abe led +Ġarg parse +****** *Ċ +vers ation +Ġc ord +ĠT i +Ġhope fully +Ġa h +ver b +Ġst olen +. Entry +Ġexpect ing +O rientation +Ġpower ed +Ġp ersist +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +'] ); +')) ,Ċ +ĠC ash +ĉ item +gr ades +rop ol +b asic +Ġ" );čĊ +Ġaw ards +(r ange +- all +ĠIB Outlet +ĠInd eed +---------------------------------------------------------------- ------------ +Ġstom ach +Ġfl ower +Ġs ew +_t imes +av is +Q String +ĠR outes +_pro t +Ġcom edy +Ġlog out +Ġwood en +Ġpost er +p iece +.J oin +ĠP ok +cel ona +mut ex +;čĊ čĊčĊ +Ġstri kes +Load ed +) arg +es a +Un ited +E p +PE LL +ĠAtl antic +ul let +app le +Ġsett led +a con +Ġprint er +ĠG C +å® ļ +Ġrender ed +, âĢĻ +he it +s ocial +. ge +ĠR ick +ĠUt ah +g ot +on ical +ĠSc roll +ĠSc iences +Ġj ug +Ġam pl +ent i +LE FT +Ġt abs +Ġenorm ous +.get Key +loc ate +. EX +.st orage +.W e +Ġto ast +ĠAdd itionally +ĠN OW +_ UPDATE +Ġtrans ferred +th a +.D isplay +_ ui +ID EO +Ġmeaning ful +ĠMos cow +, this +ĠVict oria +æĶ ¹ +ĠÐ Ł +.st ack +ĠB arn +pared Statement +: string +Ġb ij +ĠST ATE +Ġemploy ers +ĉ input +( | +Ġle x +in voke +ĉ num +++ , +at ial +ors es +Ġfor k +_t xt +ĠAnton io +Ġ( < +aver se +Ġdev ast +ãĢ Ģ +.D ec +ĠG ard +/ ui +. % +tr i +Ġrol led +Value Pair +itt en +ĠTh er +Ġv rou +ĠFl ow +ĠFin ance +ĠCom b +H C +.set Visible +is l +Ġp k +Ġup set +( raw +ĠV ice +e atures +ĠL ang +Look ing +ĠA ST +Ġtri ps +ĠJust in +b rowser +=" '.$ +. vertices +- co +}/ { +Ġ? , +ĠD omin +ĠBel g +" < +Ġsup pose +add y +Ġwalk s +ERR U +_f ilters +Pre ferred +sc ene +е Ñģ +ĠAff airs +Ġ"# { +Ġon Submit +Ġstock s +/ view +g ree +- get +h it +J o +.get C +Initial ized +ÑĤ и +c uts +( Type +ĠAg reement +ĠViet nam +Ġ/* ! +Ġp izza +- view +_ em +Ġl hs +Ġm uy +ĠId ent +ĠF riends +Ġab und +_A D +.t imestamp +- ' +Ġd uplicate +Ġhun ting +Ġregul atory +ia o +am ous +ĠEnt ertainment +[ A +iat ric +_CL IENT +ĠK ids +/p kg +B reak +)) );ĊĊ +ĠSh ape +Ġrel ating +Int errupt +able Opacity +emb re +Ġmyst ery +Ġjournal ists +rit able +.L ink +Ġstop ping +CRE T +.D B +Ġpopular ity +Ġg ew +Ġim pr +set Value +FL AG +ĉm ax +Ġb ake +w y +ĠEcon omic +Ġen contr +Ġf name +/ de +R ank +Ġbug s +.s m +Ġmed ian +D OWN +ĠS ure +At Index +ĠD ick +Ġ( __ +.d elta +F r +Ġsuggest ing +ĠRec yclerView +, e +ST ART +/************************************************************************ **** +xf ord +Ġrece ipt +CL AIM +read only +Ġeng aging +C a +as ma +Ġens uring +Eng lish +ĠV ancouver +hy th +Ġpurch asing +ĠP I +. word +(s p +.h ome +: def +Ġg ig +ĠV e +for um +ĠM itch +B ay +_F L +Ġs oll +_column s +Ġminor ity +b ird +Ġhand ed +SS L +ST AT +Ġnerv ous +ĥ ½ +Ġfile Path +CRE ATE +A w +Ġp ens +se ed +ĠCom pute +ol k +ĠAs set +re ach +'), čĊ +n avigation +L F +/ util +ĠP ub +Ġâ Ķ +c ion +## Ċ +II I +Tag Name +Ġam id +per mission +if iable +xFFFF FFFF +н и +.B uffer +_ irq +d ark +Ġret val +.f ire +produ ction +.list en +ĠWe ather +Ġbuy ers +. ne +er p +ĠP ent +Ġw elfare +Ġpage Size +ĠSt adium +ert a +Ġle v +amp a +P ager +Ġcharg ing +ĠNet flix +| null +_r andom +.x path +Ġst ere +ĠIS IS +pons es +( loc +ey ond +ĠOff icial +ĠMary land +Data Type +_p ar +{ }, +ĠEn joy +_SH IFT +ĠA wards +_ENT RY +Ġseem ingly +entic ate +Ġheart s +_ ;ĊĊ +ĠH IV +Ġindiv id +ĠFl ag +_ ctrl +ĠC allback +, z +ĠG PU +ĉ obj +ĠPh oenix +ĠB US +Ġrub ber +_A UTH +ĠSol utions +( location +Variable s +.set Enabled +_h igh +W O +G esture +Ġre try +Ġobject ForKey +allow een +Ġm os +ĠC ele +Ġik ke +(c ell +ĠM ODE +ren a +Ġdescri bing +Ġph i +Ġr d +Ġdes erve +Ġwhe els +å¸ Ĥ +Ġcrit ics +N amespace +ĠF ra +Ġ ĊĊĊĊ +Ġall a +Ġrequ iring +æľ Ł +ut ation +Ġdelay ed +Ġadministr ative +Ġb ay +.h idden +T ex +Ġbound aries +Ġ] );ĊĊ +ĠFollow ing +~ / +F i +_con v +_T ITLE +Ġdes de +ICollection View +Ali as +Ġb ite +pat ient +_COMM AND +Com pleted +ĉ elif +( < +B usiness +ĠP ool +Ġpurs ue +ĠB an +_st eps +_DE CL +um ble +Ġcom bo +ĠL ayer +.x r +Ġd up +-------- - +Ġmod ifier +ro b +re z +Ġath letes +Us ed +w ear +Ġlegit imate +Ġ" ĊĊ +Ġh v +St d +ĠH old +Ġsurv iv +ĠAll iance +ĠEar ly +Beh avior +(f ont +/lib s +Ġrect angle +Ġs inger +Ġam p +Equal To +Ġ" ." +Ġgirl friend +å ± +line ar +obs erv +Ġpi ù +Ġcomple ment +With Value +(p assword +t ake +Bl ank +ĠCom par +' ", +_p olicy +m ongoose +_FA ILED +.re port +R atio +.Perform Layout +us able +m ers +_re nder +PE ED +Ġles b +ĉ E +_t ool +Ġl adies +о Ñģ +)) ))Ċ +;; ;; +.d ot +Ġn est +pe ak +uk kit +ec a +_S W +Ġ& ( +ĠOk lahoma +Ġbank ing +ĠN intendo +Ġreprodu ce +_element s +_m ac +pro xy +Ġremark able +}/ ${ +Ġout s +.has Next +M ODE +Ġan ime +.con n +Un ique +D om +Ġimportant ly +itt y +Ġju ice +T w +ĠPart ners +Ġattack ing +Ġport able +am iento +.P ictureBox +.g en +Ġopt imal +Ġre cre +Ġjournal ist +ĠEx tract +ĠMore over +Ġmargin Top +.A p +Ġf iring +Na N +ĉ template +аР´ +. En +Ġdef ence +ĠT el +il en +j an += data +ĠU rl +ĠRe uters +(t otal +ĠFif th +Ġess ays +Ġinterpret ation +Ġchar ity +ĠR ules +Ġsub section +st yled +az er +l ags +L IST +Ġupload ed +Ġtr ash +Ġreg istr +Ġsell er +>' ;čĊ +Ġstart Time +ç Ļ +s y +(Http ServletRequest +Ġtr ap +G C +Ġembed ded +Ġsurround ed +im its +T X +yl inder +ĠF al +Ġsent ences +ĠJ a +IF ICATION +we apon +ov ation +Ġco at +Ġinter pol +Ġl ips +ĠK y +Ġv ectors +_ am +Ġint ake +.w orld +Ġin box +ĠM AC +_ ab +(name of +Ġent ert +Ġgather ing +ĠS IM +++ . +ny a +' }} +ĠUP DATE +Ġp ac +( html +ĠS ant +i ating +ĠIde as +Ġspr ay +ĠH art +Ġver ification +ades h +/ modules +ĠM ind +ĠSized Box +Ġsh elter +Ġher oes +att y +Ġcert ified +s j +Ġê tre +ÅĤ o +Ġpublish ing +ĠMal ays +.get User +ĠPro vider +ĠLinked List +ĠB or +RO UND +d id +t ain +p ire +ĠJ enn +t el +and e +_f ront +ĠMc G +Test Method +à¸ Ń +Ġoccasion ally +ĠW ales +Ġexerc ises +ĠÐ Ĵ +- plus +Ġvalid ator +Ġpr ayer +L ATED +_ author +Ġlab our +++ Ċ +-e quiv +ĠG PL +Ġface book +s imple +g ly +Process or +ip y +Ġ* > +Ġcle ared +ĠP ush +Ġpen is +Struct ure +li j +ĠM organ +Ġhand ful +" .Ċ +| \ +Ġ ******************************** +ĠA qu +_ IC +.load s +Ġm eter +ĠMar ine +:: { +ĠT S +ĠArray s +.T itle +GR AM +ter min +Ġco inc +El se +_st ates +-r un +m embers +ast ro +Ġon Press +Ġbe ings +Ġabandon ed +Ġtax p +own ers +.m ode +Ġdiagn osis +Ġ_ Ċ +ĠK night +ĉ A +Ġob serve +), ' +! ")Ċ +ĠPar a +Ġvari ation +( False +ĠAnt i +Ġg ri +Ġhome less +? v +Ġbe z +.S erver +re lease +ĠP atri +Ġchar s +Ġrank ing +activ ation +Ġw ides +q r +.S ql +ac ular +ĠB ot +_s ync +Ġhapp iness +Ġvolunte ers +Ġs its +/ < +[ e +(file Name +Ġcap ac +ĠMar ia +f ather +Ġgr am +* i +Ġcas o +_d raw +ĠR aw +ĠIter ator +ĠP adding +P D +BO X +ĠS PECIAL +Ġfe cha +Ġv ide +ĠLe ader +ä» ¥ +$ (". +Ġdiam eter +Ġm ild +Ġrock s +app ings +d irectory +.fl ush +ĠJ ess +UN IT +ĠP ear +Ġmand atory +S ur +q t +Ġstream s +Ġco operation +ĠS ac +Ġche aper +ĉ ch +an imation +f are +( height +( True +N Y +Ġw rest +Ġpoll s +Ġencounter ed +ĠMarket able +_P ASSWORD +_SE LECT +ĠArab ia +_c lock +Ġv oy +Ġи з +Ġst ir +is ible +-e ffect +.c reated +Ġto ys +ĠTrad able +Ġr ust +Ġstr cpy +_t imestamp +Ġtalent ed +, null +ĠJ obs +ĠPort land +Ġweak ness +Th row +ĠAng el +ä¿ ® +Ġun cert +ï¼ī Ċ +ĠìĿ ´ +Wh ich +Ġ[- ]: +S omething +Ġconv icted +k le +ed ium +Ġbranch es +Ġb ases +ç ® +Ġcomplex ity +ĠF ig +. reshape +$ db +_CON ST +ĠT es +.r untime +Ġden y +ĠB SD +Ġk r +h att +ĠSt atic +Ġunivers ities +Re place +Ġdro ve +Ġad oles +_pl ugin +ĠL GBT +Ġt ex +du ction +ED I +ĠT ed +_ URI +Ġre ception +art en +.S ingle +r ice +sc ious +_b g +Ġw ages +ĠS ervlet +UIL ayout +Ġform atted +.M od +< class +is en +Ġrepresent atives +"] = +Ġport al +ĠHun ter +Ġh iring +__ )Ċ +ric ulum +u o +li est +Ġt ears +L at +Ġliter al +.In sert +Ġc urs +ĠCom put +Ġterror ism +Ġswe ep +Ġ[] čĊ +Ġpass enger +Ġeast ern +Ġtwe ets +Ġoper ated +w nd +ĠS yn +.t ools +ĠW M +ul ates +Ġbacter ia +( bytes +.set Data +Ġvis ibility +// ================================================================ +el m +Ġgener ating +Ġm v +Ġk h +j en +/ search +Ġaccount ing +se gment +act ic +. ip +Ġdeploy ment +Ġfoot er +> ',Ċ +Ġexpand ing +ĠHam ilton +ĠCon trib +.T ables +Act iv +H H +ocom merce +_ ; +Ġamong st +ow ing +ĠC old +AP H +Ġpsych ological +_t ensor +Ġpack aging +ĠSw eden +Ġp are +Ġag gregate +Ġmoder ate +_h and +Ġdesign ated +Ġdr um +Ġget User +ĠC reek +_s cope +ĠTrans fer +ĠM arg +Ġfight ers +W nd +ĠS el +ĠLa unch +Ġemerg ing +if rame +ĠAdd itional +Ġf ears +Ġsat ellite +_ : +Ġdis posing +Get Value +Http Post +AT IVE +ul ary +View s +Ġatt ending +ĠT ennessee +ĠM ission +Ġmedic ation +ĠW y +ĠAn na +Ø ¹ +ĠVert ex +.t ypes +O rgan +.DataGridView TextBoxColumn +ĠR S +Ġtemp o +( App +Version UID +.p oint +ĠD utch +H ours +L U +Ġqu oted +.b uilder +ĠPer fect +ĠAl ways +_t wo +Ġexclus ively +ĠC ra +ific ar +ĠA WS +ing ham +com plex +k ernel +Ġgr avity +Ġw i +Ġover view +ĠW ant +ĠW P +( sh +. rotation +St ates +ĠTe en +_com ponents +ì Īĺ +Re ceived +Ġly rics +rit es +ĉĉĉĉĉ Ġ +-A merican +[ num +/ python +ĠU ART +Ġapp le +ĠJon athan +Ġmoment um +ภ± +Ĥ ¹ +Ġm ich +and ra +Ġbi ological +ĠM ens +Ġ% % +else a +ĠMex ican +.rand int +Ġt ale +ĠValid ate +Ġdefe ated +.ht m +Ġcop per += / +cos ystem +Ġr ip +dec imal +.V ISIBLE +ĠT a +ĉĉĉĉĉĉĉĉ ĉĉĉĉĉĉ +Ġdownload ed +en vironment +Ġnom ine +build ing +ĠSp ot +ipher al +Ġal to +qu et +ĠF T +/ get +/m aster +W IN +åħ ĥ +W est +arg c +Ġprodu cers +ĠM uch +_st orage +cred it +CON T +Ġv et +Ġvo ices +(' ', +Ġinstr uments +ĠM SG +es se +re pository +om ics +Ġdeal er +St ill +Ġb anner +asc ii +Ġrem arks +[ js +Ġshort er +g ulp +Ġmyst er +Ġk un +ĠB ird +Ġti ene +n ut +ĠU m +Ġw ise +Y eah +INE SS +_b egin +- heading +C ourse +Ġ čĊčĊ +omb ie +grad ed +ĠG PS +Ġ że +F it +c aption +ö n +/ image +l ia +(m od +Ġle ak +en za +/ H +ĠH appy +D ist +n x +ĠGovern or +(l ast +te acher +ĠS ent +s upport +ject ory +Ġ Ùħ +Reg istration +ĠGr ay +, false +Ġadjust ed +( settings +< R +ĠM age +Ġpl aint +_ )Ċ +ĉ it +omet ric +. bootstrap +Ġcar ries +I p +Ġ! $ +Ġswim ming +ĠMar io +ĠQuest ions +P ACE +æĸ ¹ +e or +}} " +Ġo ven +ĠK on +Ġwis dom +Ġac quisition +ess ment +ag ine +Ġexpress ions +Sequential Group +F ront +ul pt +aw k +'] )ĊĊ +_ AR +Ġanal og +ul in +_PR INT +ĠL G +Ġb lob +ĠFurther more +_com ponent +ĠC ole +L AN +SCRI PTION +Ġl ap +icens ing +_TIME OUT +ĠF ro +Ġli ability +Ġcom posed +.create SequentialGroup +_p erson +Ġbe am +ĉ ĠĠĠĠĠĠĠĠ +ĠNot Found +. 'Ċ +ÃŃ s +.Text View +P DF +Ġk ar +__ (' +Ġ" :" +_m essages +Ġhar vest +.h istory +> 'Ċ +-f old +æ Ĭ +ĠBet ter +Ġ"\ < +sp acing +Ġfurn ished +os er +] }Ċ +Ġ$ " +p ull +.P ost +( ip +Ĺ ı +.f ront +nt e +ĠF M +g uid +Ġnegot iations +agon al +Ġtrem end +unge on +Ad v +car ousel +ÃŁ e +_DE SC +Ġham mer +áº Ń +ĠĠĠĠĠĠĠĠ ĊĊ +-c ore +-s ervice +Ġcorn ers +ĠS F +p red +> A +ĠJ Label +Ġrom antic +Ġtestim ony +os c +ĠGener ation +as ures +_int ernal +Ġprint s +Ġ] )Ċ +ĠC leveland +re po +D isc +Ġ" >Ċ +�� �� +Ġne arest +_t b +( require +EO F +- child +Ġbu dd +.Xtra Editors +alt ies +\": \" +W ords +Ġloc ally +Ġpurch ases +Draw er +ex tract +Ġexec ut +} '. +user data +Ġfocus es +-min ute +ĠP ublish +og o +Ġmount ains +B ot +} >{ +Ġt ension +ro d +m esh +Ġtransform ed +, R +() }Ċ +.l ong +Ġg orgeous +ĠS chedule +Ġol dest +Ġsub process +( IN +y ect +ĠCo oper +arn ess +ĠMon itor +.p art +ĠN BC +Ġc otton +Ġh ol +Ġrg ba +ĠB io +Cont inue +P od +Ġparticip ating +clus ions +(By Val +à ¬ +ĠH OW +_set opt +Ġaccompany ing +at on +Ġ/ \ +ĠAuth entication +i én +ĠBar ack +/* . +Ġe ager +ĠC ancel +< lemma +ep h +ĉ window +Ġinc idents +), ( +.D es +ib e +ĠFunction s +Ġhosp itals +Ġo xygen +root Scope +Ġd rew +ĉ request +not ice +ak u +am ents +f ar +Ġprec ise +_w rapper +Ġlisten ers +A Z +.b ounds +ĠA verage +field set +_ axis +Ġexam ination +' .Ċ +mon s +++) {čĊ +ĠForm s +íķ ľ +Cpp Method +_tr ace +Ġengine er +ĠFl at +Ġrev ision +Ġhe ating +/ profile +.r u +p riority +Ġin fer +_ST REAM +Ġ* )( +> $ +OLE AN +OK IE +IB ILITY +U AGE +ĠSur vey +Ġres ign +w ing +Ġsecre ts +Ġch ips +JSON Object +Des ktop +_SY MBOL +(res ource +ĠĊ +Ġnew est +ul i +Ġdes ert +Ġd ip +ĠP ow +Ġequ ation +Ġposs ibilities +ĠF ed +os ph +Ġ[ % +Ġb ubble +ether lands +Ġc ement +. auto +_ AN +âĢĻ . +se lection +ĠB ond +D en +- O +.get Type +.W indow +p res +Ġsw inger +" })Ċ +Ġp ip +Ġm ice +Ġcomp ound +- plugin +ik o +Ġcent uries +ic ular +-in line +ĉ key +> \< +EN SION +Ġ[ čĊ +Ġprecis ely +Ġét é +ĠP ast +ĠCam bridge +-f ull +Ġanaly ze +ĠSte ven +Ġn em +d ue +ore n +Ġmus cles +ij ing +/ - +ĠKenn edy +R M +oss ible +Ġact ress +Ġd olor +å½ ķ +Ne ed +.t oggle +ĠR ace +w ers +.m aterial +ĠD ue +ĠP el +# print +Ġindepend ence +ex us +Sh adow +Ġenc oder +( level +ĠSw ift +.d oc +_se lection +Ġserial VersionUID +Label s +Ġperform ances +.T ag +ĠN HL +iz en +/ UIKit +_CONT ROL +Ġearn ings +ĠAl t +_H ANDLE +C tx +Ġpers u +Ġtr an +ç ¨ +_CH ANNEL +Ġsatisf action +ĠG P +io x +m itt +land o +Ġp ig +inal s +ê ncia +S urface +ĠU UID +Ġbenef icial +Ġsequ ences +ĉmem set +Ġmag ical + « +Ġw orn +AS C +pop up +COM P +_b efore +en ess +U i +L es +.re quire +.Serial izable +add Gap +Ġauthor ization +.py plot +urr ay +lat itude +fr ames +aj s +Ġcomp ass +Ġobserv ations +_s up +.en viron +Ġtri ple +ĠRub y +Ġdr ain +_F ILTER +S an +UM P +Null Exception +ĠG ab +ow e +ĠTurk ish +_se quence +ĠGr ant +uel a +Ġw o +Ġc ube +i q +Ġdis orders +Ġextra ordinary +Ġc trl +ĠSe q +ent r +Ġsan ctions +uts ch +Re ports +Ġin herit +Per iod +Ġphot ography +ĠF ramework +Ġspecial ist +Ġ? ĊĊ +_ selected +.P layer +Ġal location +( account +Ġstruct ural +v able +- offset +.App CompatActivity +аР¼ +.Add WithValue +Ġicon s +Ġshut down +_l ow +ĠCom pare +ĠC e += head +l am +.p redict +_DE C +ĠS leep +ĠGr atis +Ġsuggest ion +ĠD EL +ca ff +av irus +No thing +ŀ ĭ +Ġwides pread +Ġmechan isms +Ġtext Align +occ up +ĠR ail +: NS +Ġf iber +Ġm k +Ġv intage +-l ong +.re duce +. Entities +( record +Ġple asant +FR ING +.C ells +OT T +ĉelse if +_con firm +ĠView Group +s ym +Ġpr ay +Ġsus pected +Cont ains +Ġb orders +Ġcomponent Did +ASS ERT +Ġinf inite +- order +Ġh ello +ĠGr ade +.currentTime Millis +apol is +z h +ĉ Object +: \\ +H O +val uation +Ġvoc ab +Ġcou pon +atab ases +.Get Type +L earn +] =" +ĠG ary +ot ive +Ġas h +Ġb ib +XX XX +Ġbal anced +VAL UE +ĠN at +_A d +< E +åĮ º +ĠMethod Info +L IB +Ġconsider able +ĠInd ustry +test s +.set Title +ĠBl uetooth +Ġm apped +ĠBru ce +ĠMain Window +ĉ status +Ġr az +ĠM and +Ġclass ification +Per missions +Ġ---------------------------------------------------------------- ------------ +Ġcontain ers +: set +_x ml +Ġwh ilst +Th rough +Ġval ign +Ġworld s +C ORD +ED IA +ÑĢ Ð¾Ð² +Ġsp are +ĠH ad +ĠDE F +(p tr +Ġwarm ing +ठ¾ +Ġcons ensus +ag ne +CT L +Ġì ķ +.M ain +web Element +Ġp ist +Fl ash +App end +.tw img +T ap +Ġveget ables +al g +.s ample +Ġcoach ing +( ind +Cell Value +Check Box +ĠH ell +RO OT +Ġst adium +Ġinvestig ating +) % +st ed +ĠW riting +Ġê ² +Ġun o +Ġ{{ -- +Ġco ords +Ġun ser +organ ization +ĠCr ime +ĠDemocr at +Ġv in +/ file +- api +ĠA y +Ġfund ed +ĠBre xit +ĠG h +ent ina +c ases +Ġd ash +Ġ!! }Ċ +H I +Off ice +Ġcapt ain +Ġwor ship +\ C +Ġglo be +_ board +Ġbab ies +Ġconsec utive +Ġenh anced +ere um +ĠAd vis +Ġgr ain +Ġc raw +ancell ationToken +. alpha +_W ITH +ĠO tt +ĠC ool +.b atch +Ġver ified +(c allback +Ġreg ards +ĠInt Ptr +ouch er +Ġk in +Ġtou ched +it Ãł +ath on +Ġadj acent +Ġaccom panied +LE AR +Ġim plies +Ġh ill +ĠBalt imore +=" - +Fin ally +S am +ic opt +Ġs od +Ġm aj +ĠSh ipping +Ġget All +Ġcoach es +Ġdon ations +il ot +ĠT ar +c err +Ġbad ge +Ġmark ers +ĠR and +ais ed +iss ance +Ġexpl oring +uc ed +ĠIndones ia +Ġbene ath +Ġmagn etic +Ġm useum +match Condition +Ġdis rupt +Ġrem ind +ĠT M +Ġ/ >< +Ġf ool +Ġes k +.N ull +ĠD ies +_OUT PUT +_TYP ED +Ġpaint ed +Ġsoph istic +ĠB ear +* n +_P ACK +Ġdeliver ing +ĠC OUNT +åį ķ +Ġj eg +-c ar +f name +Ġr anging +ĠN eg +/ ******/ +ĠCH AR +Ġul tra +Gr ad += t +Ġjud ges +ĠD ise +ann ers +Ġsc al +_c al +ĠCON NECTION +_ embed +(f n +ĠC raft +ĠP as +") -> +.con vert +.res ource +ĠST ATUS +ô ng +ĠT it +Ġclass room +ĠArch itect +ĠK ings +Ġstead y +/* !Ċ +ĠG ene +) ";Ċ +ic ia +st an +ĠCon struction +um per +w c +ĠC BS +ing ing +-p arty +(d river +M ARK +Ġn ested +ew ard +Ġdepend ency +Ġm ales +ĠO NE +ĠProdu ction +][ $ +ãĥ¼ ãĥ +_LO AD +ĠB ol +el ry +ł éϤ +ĠRe quire +Ġpl acing +xx x +CA LE +Ġth umb +Ch oose +Ġprot otype +VO ID +Ġles bian +Ġtra its +Sh arp +Ġconsum e +Tr uth +Ġaction Performed +ĠEnvironment al +ĠDe an +Ġest ado +s ame +Ġnumer ic +Ġtrans it +. Email +-s ide +_R UN +ĠVill age +_OP EN +è ¦ +.re m +-w arning +any a +Property Changed +Ġ(! _ +( check +il ia +ĠSo ft +st eps +ĠMad rid +Memory Warning +Ġhand lers +Ġexperi encing +Ġins pect +button s +Receive MemoryWarning +chem y +Link s +Ġur llib +.System Colors +ĠE igen +Ġpun ishment +:UI Control +bar a +- set +Ġ}čĊčĊ čĊ +Ġtoler ance +Ġinter faces +. redirect +ighb ors +cs rf +_back ground +. Utils +_H T +ĠInter est +im os +Ġgr ants +Ġexam ined +Ð Ķ +Ġc f +for ge +back s +ĠObject s +_s ent +. entry +ĠTH EN +ell ido +c ia +, res +/std c +. nd +( Int +ĠAuth ors +ĠApp CompatActivity +' { +Ġmed i +M usic +ig m +ce ipt +Ġa uss +Ġtarget ing +ĠKe ys +h n +: ]Ċ +Ġmin eral +à ® +.c a +om ed +Ġshe ets +Ġc amb +Ġdead ly +.in ject +( unit +ĠSe lection +.g ms +( connection +Ġ$ (" +é mon +ĠCurrent ly +pt e +_path s +le af +Ġimp lications +pos al +ä½ į +[ / +anc ia +é Ľ +m ul +c ie +Ġge ile +im als +UI View +Ġs urre +serial ize +IS O +Ġarbit rary +Ġsock addr +.f n +ĠM erc +Ġcast ing +Key Down +Ġnew Value +op ens +T odo +Ġflex ibility +ĉĉĉĉ ĠĠ +V elocity +ú n +row ing +Ġcomput ed +` )Ċ +st atement +Ġr i +_c art +L ow +trans fer +.n av +Ġgr ave +ĠDo or +ĉ alert +.sub scribe +- profile +ĉb ase +ĠâĪ Ĵ +__ ĊĊ +Ġengine ers +Ġexplos ion +Ġd ari +ĉ Log +on al +Ġisol ated +{ i +ĠM sg +F uture +Ġrac ist +-w rap +ĠV ers +b org +IS ION +Ġ ÑĢаР+ĠY an +init With +Ġn omin +( empty +ÃŃ n +ãĤ ¤ +ĉ width +Ġch amber +/ ajax +EM P +Ġnec es +iv os +log ic +*) & +cript s +Row At +ib lings +Ġe ars +Ġcomput ing +Ġm aker +ĠNe ither +b readcrumb +Ġserial ize +ĠWith in +Ġd ell +_TR ACE += a +Ġwish es +-in ch +ĠD or +Ġinnoc ent +ĠD ol +Ġint ens +for ced +ĠB IT +Ġphotograph s +Ġcas a +ĠL en +\F ramework +.S imple +Ġde ar +)/ ( +ip pi +Ġown s +Pl ayers +Ġpropos als +.p i +us alem +D amage +Ġcal ories +ĠCreat ive +Ġ[ $ +Ġ// čĊ +And View +è me +.c ustom +_f actory +command s +_lo ok +Ġstr cmp +Y N +a ired +Ġaud it +о ÑģÑĤ +ĠRe verse +ropri ate +et ics +< vector +.s elenium +. or +Ġpred icate +Ġfinish ing +Ġk le +ĠRep os +ĠK han +ĠM aking +ĠF S +Ġp ute +ĉ state +_S UPPORT +' - +orient ation +Ġexist ed +atur a +Ġexpect s +ĠSh adow +Ġorgan iz +å ŀĭ +Ġsusp ension +Ġu it +Ġsimult aneously +ĠAff ero +: ");Ċ +Ġro cket +c as +eter mine +ace ut +x l +ĠA MD +( graph +ass oci +_C R +.ar ange +(j Label +Ġbe ef +Qu ick +.c ard +] ): +- gr +.G ONE +_C LOSE +ĠNe v +ÃŃ as +Ġste pped +ĠFre edom +ĠW R +NS Array +_r x +_d ialog +Ġhot els +Ġ( \< +ĠD iamond +Ġassum ption +um i +( items +č ččĊ +æ³ ķ +Ġn el +Book s +åİ ¿ +us b +ĠF IN +æ ¬ +Ġcorpor ations +US A +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +.p roperty +ew ise +_ plot +"> ';Ċ +Ġpe pper +Ġsh ed +ĠMed ium +ĠC ookie +Ġoverse as +ed or +asure ment +åŃ ĺ +Ġ' .' +Ġph p +ĠPRO C +Ġexception al +( th +ĠJ et +Ġoccup ied +.set Image +ĠRel ated +uck er +M embers +PR INT +ĠG lo +_V IEW +} ",Ċ +Ġad option +[] )Ċ +ĠMiss ouri +ĠLin coln +eral d +Pop up +Ġf ate +- bootstrap +fe ctions +ĠP oll +_ARG S +in ance +-h ome +. ), +_d one +: ĊĊĊ +Ġdiscuss ing +ĠSQL Exception +Ġelect ro +ĉ req +Ġz w +Ġl ui +Ġover night +$ user +ĠW AY +Ġall erg +Ġdisappoint ed +Ġradi ation +Ġimpress ed +ific ates +Ġto b +CL ASS +Ġc uda +_d et +- post +ul u +Trans lation +-h and +.y ear +ĠM ongo +Ġun clear +. engine +WEB PACK +r ices +_AC CESS +Ġh olidays +per cent +.Id entity +ĠG ov +Ġpassion ate +!! . +ĠGree ce +plus plus +')) ; +G P +Ġexc it +.tab Page +_ cond +Ġspons or +M ODULE +_pro c +Ġ$ Ċ +Ġr ational +.T ool +Ġi hr +cc a +åĵ ģ +ĠE state +IB UTE +Action Performed +ĠS olar +¦ Ĥ +Ġequ ity +t id +Ġrec ip +.s imple +m k +ĠL uke +ĠGuard ian +Ġenc rypted +Ġdomin ant +. place +ĠN V +Ġtong ue +( Get +Ġst ainless +.P lay +Ġe b +ac i +.b uffer +readcr umbs +Ġvacc ine +p rom +Ġuser Info +Ġsl ug +Serial izedName +-w ide +Ġre actions +ĠY ang +ĠAdd s +(user Id +Ġpl ates +ĠM EM +Ġb ail +In side +et ed +Ġels if +Ġs ake +Ġc ycles +Ġì Ĺ +ĉ I +-c ollapse +ĠG MT +De claration +Ġg ros +Ġreach es +Ġcust ody +Unt il +t u +ĠCh en +Ġn x +( addr +ĠO ffer +Ġcol leg +ass ador +Ġm apper +ĠS IGNAL +ĠB loom +ĠH oll +ĠIm per +-d es +_s ite +Pro c +E qu +Ġat omic +ĠW oman +s ent +sc ar +Ġint elligent +ĠGet ting +ĠReg istration +ĠPh ill +Ġkill er +unic ode +Ċ ĉĉĊ +ĠJac ob +ĠCon st +Ġloc ate +Ġca us +ĠSch olar +Ġconstitution al +Ġinfl ation +ĠG ot += array +end um +Ġtransl ated +Ġdiv orce +En tries +Ġs or +ĠQu ote +irl ines +U K +Ġexc el +( opt +ĠAD V +,: , +Ġcontact ed +ĠD A +Ġr ings +ĠIndust rial +.get Context +Ġforg otten +ĠT an +Ġp ants +Ġo v +Ġdec oder +ĠPart ial +Ġv c +Ġbatt les +A rial +FRING EMENT +ir ates +, w +aint enance +ĠO d +ĠTechn ologies +åī į +ĠCar ter +.find All +N ome +B en +ĠUs age +ĠP icture +Ġbad ly +_p anel +Ġpat ent +ĠProt ocol +lot te +ĉ player +je ctions +Ġd ou +_re lease +urn iture +_t ax +ĠF ields +.d ataset +_m aster +CLU DE +ĠPh arm +b st +Ġoper ational +.c ell +Ġident ifying +Ġj wt +t uple +ĠT C +ĠC ro +ix map +- components +gener al +Ġo z +_D e +_d ouble +ĠTo o +.View Group +g ate +d ings +ph otos +Ġgrand e +ol lect +_l in +Ġaw ful +f ilters +Ġaltern ate +es p +Ġcomp ress +e o +ĠS cale +Ġind irect +Ġinv oice +ĊĊĊĊĊĊĊĊ ĊĊĊĊĊĊĊĊ +Start ing +ĠPl ayers +ie le +. then +Or d +ĠT uple +Ġb out +ĠStat istics +Pre view +Ġp uzzle +ĠW idth +ST ATE +Ġover lay +ĉ on +Ġin fr +Ġsm allest +lock ed +ÑĤ о +ss l +Ġde emed +Ġs co +re ck +Ġj Button +Ġmiss ions +ç§ ° +.Selected Index +T ABLE +Se pt +Ġacknow ledge +Ġstrt otime +ĠT ell +ĠD ak +Ġal uminum +Ġf ence +ĠSt ars +CON FIG +Ġretro fit +Ġemph asis +/ header +ĠS omething +in ished +=' ".$ +ĠValid ators +Ġpol ar +section s +.as px +Ġas pir +.M ock +Code Gen +Ġpe ut +Ġaccept ing +Ġback ing +P icture +/ ap +еР³ +_SE C +- use +annot ation +Ġcogn itive +Ġg rip +h our +ĠLeg al +Ġep ic +.t oolStrip +.not ify +.L ast +OR IZ +M iddleware +cri ptions +l ash +_F OUND +ĠLiver pool +Ġ{} ", +Inst all +Ġn it +Ġfig ured +[ len +.W in +.pl atform +Ġgam bling +(d t +av ery +ĉ include +Wh ether +R outing +Ġther ap +Rem ote +ĠL oss +y ll +Ġappro ached +ĠV ehicle +ĠAl pha +Ġvoc ê +ans wers +NS Dictionary +cons ider +un used +ĠF an +or able +f re +ĠDIS CLAIM +ĠAct or +. ] +to Have +.user Id +Ġspeed s +ew ay +Ġrec urs +ĠÐ ³ +_pr iv +! âĢĿĊĊ +Ch oice +Ġsett le +Ġplan es +' }, +T om +IT ER +! "Ċ +å » +achel or +Ġsepar ation +Ġd al +ad j +Ġreg isters +r iz +ĠNot ice +Ġl u +Ġcour age +Ġax es +cell ent +.as ync +Ġcompat ibility +ç « +Ġ! ĊĊ +ĉ title +Y LE +ĉ message +U UID +OLD ER +ĠH H +ĠStyle Sheet +Ġaccess ed +. validation +t asks +Ġpoll ution +.c anvas +Ġing redient +ĠC abin +A h +old own +ĠNO I +ĠÃ Ĺ +[ f +ed uc +y alty +(n ot +_ State +am en +Ġda o +ud ad +ell ers +} & +lic ity +_W INDOW +Ġt atto +val or +.R ange +Ġrefer enced +ĠRes erve +M oney +SCRI PT +/ product +cho ices +Ġt in +ãĤ ĵ +Ġsepar ator +Ġp kg +am med +ĠM AT +! !ĊĊ +Ġr aid +Ġmotiv ation +ĠX P +ĠBack ground +ĠQu aternion +.define Property +ik er +ĉp arent +ĠOrigin ally +ant age +ĠH ans +Ġtim eline +.c ur +op ic +ĠSe qu +m ust +ĠCo al +Ġform atter +_R GB +Ġ_ (" +'} ),Ċ +Ġ= ================ +ĠF UNCTION +Ġl ng +ic ates +l ive +_ engine +Ġtown s +')) ĊĊ +ĠP K +( api +ĉs canf +pack et +.ph one +á Ģ +ĠAnd y +_N AMES +PL Y +Ġmin s +im i +Ġbr ick +Ġbl ade +.std out +}` ;Ċ +Sh ift +ĉs b +ĠCheck s +Ġphenomen on +Av atar +Ġmin istry +ro se +ĉ File +Ġtit led +( LOG +Ġg an +des ign +(), čĊ +Ġb ones +st m +ÅĽ Äĩ +ĠInput Stream +Ġvol unt +ĠSerial izable +Ġfight er +ĠDr ag +T witter +Ġsubs id +ç ¼ +Ġfor ums +.load ing +log ged +_ this +Ġterr ain +Ġir re +ĠIn g +ĠC N +_object s +. uid +Ġconscious ness +T INGS +ĠG all +Ġport ray +ĠDevelop er +Ġparticip ant +Ġ" ;čĊ +/ model +ĠOper ations +^ \ +ĠL ater +Ġrais es +-n one +.m eta +=' .$ +Fin ished +Ġrepl acing +Ġsam pling +ĠJ en +" There +RE AL +A LE +ìĬ ¤ +Or ders +_param eter +ĠOlymp ic +Ġtr ès +Ġare na +i ol +; ?> +Ġimpact s +ĠW S +: get +Ġfl ights +ĠRuss ell +c amera +F n +s igma +Ġfor cing +Ġloc als +Ġdepart ure +Ġcelebr ation +ĠS ay +ï¼ Ĵ +ĠH ills +.has OwnProperty +Ġtyp ings +.A PI +Ġdon ation +Operation Exception +.Act ivity +c plusplus +ĠChar lie +Ġimport ed +Ġd ann +Ġoccas ions +Ġimplement ing +Ġpur ple +.d ialog +SQL Exception +ern o +Ġw ars +Ġpast e +Ġdecre ased +Ġhar sh +Ġel abor +input s +ĠView s +Ġerror Message +_m ul +ĉ write +ĠC op +ĠAnn ual +(b utton +Ġv ida +b ars +ĠHar vard +ĉex pect +Ġindex es +Ġdocument ary +Ġf lesh +OR LD +ĠD elta +M AND +Br ush +-c olumn +Ġdevelop ments +method Visitor +s lice +ĠP DO +Ġinvest ing +ir able +Ġxml ns +ï¼ Ľ +art a +Ġthe ories +_c ity +Ġ$ __ +Cre ating +( pr +D ropdown +ism atch +ĠN ET +'] )){Ċ +ĠVal ues +ĠSE O +ĠST AT +Ġe cosystem +Ġtem pt +Ġ\ \ +Ġ// {Ċ +ĠChrist opher +ĠKent ucky +ĠHttp ServletResponse +Ġhy brid +y on +Ġfeed ing +ĠEx tra +N orm +IT CH +ĠSe an +ĠUp load +m un +p ur +Ġp ersistent +ĠID C +ĠPer form +.m erge +_ room +Mean while +! =' +ĠW el +Args Constructor +.D atabase +Ġcount ing +() * +Ķ åĽŀ +ĠT OP +m ill +ĠD T +IGN ED +ĠK B +Ġcomp ly +S outh +_c ollection +Ch apter +Ġexpl aining +_ AM +_t s +c ards +Ġqu el +Ġp ole +Ġtouch down +ĠO thers +Ġpe ers +ĠType Error +Ġsix th +Ġche er +Ġdis pute +us c +) ], +th umb +Ġh iding +ĠS IG +lik es +ĠP AGE +.Ref lection +Ġhead quarters +T ING +ĠG host +M LE +$ Ċ +Ġcontr ary +ext end +'] ). +FF ECT +ĠP interest +úmer o +ric ane +ĉs ession +Ġcr ystal +- Control +overn ment +og raf +- action +v olume +ft en +Ġun con +Ġan imate +Ġle ase +sc r +Ġref use +ãĢ ĭ +ft p +in formation +Ġeval uated +Ġin jection +Ġj ack +Ġwork shop +æ³ ¨ +PT H +ĠT s +off er +ĉ os +Ġking dom +M issing +Ġlaw makers +ext Field +Ġsing ing +ab i +/ client +.m edia +ATEG ORY +Sign ature +% ',Ċ +ĠF uck +][ : +Ġsens ors +/ com +ĠPr imary +.S QL +_pro gram +Ġp ills +Ġinteg ral +Ġfle et +Ġdro pping +.s l +Be en +Ġp ets +Ġadvis ed +Ġdr agon +_ EDIT +( im +F ER +ĠDr ug +(r andom +Ġcomp ression +ou st +[ % +Ġbuy er +h op +R oles +man age +Ġpain ful +ĠBr anch +-mod al +en ant +ĠM esh +/ font +ĠG raham +Ġâ ĺ +Ġn c +ĠFranc is +Ġspec ification +Ġdam ages +- config +Ġthe oret +sec ure +_m ulti +aceut ical +Ġdemand ing +en ne +IST S +() ));ĊĊ +Re ason +Re cent +ph ase +Ġps y +_M AN +Ġvolunte er +å ¿ +istrib uted +li o +Ġproduct ivity +_com m +S pring +n is +. weight +ĠC ancer +Al loc +ĠT weet +Ġsepar ately +ĉ check +_p roperties +. Unit +_CL K +Ġg t +Ġ( );ĊĊ +Ġhand y +ĠThom pson +Ġunn ecessary +ĠRe ader +G N += request +ĠU tility +.Re pository +ĠA x +hy dr +ie u +Ġth y +Ġl t +_m ail +ä¿® æĶ¹ +ail and +ĠPhil ip +Ġbit ter +Ġbet ting +Ġtim ed +ock s +' a +Ġal gorithms +Ġre interpret +Ġto ss +ro gen +Ġhop ed +( selected +Ġvent ure +TE X +ĠLe ave +.Sub string +Ġgr ateful +uk a +ĠCon sumer +Ġag greg +C ircle +ภģ +_block s +Ġleg ally +Ġ" | +ãĥ ĥ +. board +.A b +Function s +rec ipe +è ĩ +ĠO xford +Ġwho les +.B uild +_ch anged +h ai +Ġdepart ments +I mp +Ġcoal ition +IN FRINGEMENT +Ġemp ower +itch es +N orth +Ġinfl amm +ON SE +Ġmiss ile +ĠR aj +ĠIss ue +Ġat oi +ca led +.Cont rollers +ĠW olf +Ġcrush ers +á» ĩ +.A uth +.add Attribute +h is +Ġbo ots +.c lean +c amp +Ġten ant +Ġt une +Ġ{} '. +Ġwork out +Re po +Ġpartial ly +MI SSION +j amin +ĠS B +Ġdetermin ation +Ġ' ');Ċ +ĠB eng +Ġv os +Ġin hab +/ lang +s burgh +Exec utor +h one +ĠCh allenge +_link s +.Le vel +Ġunder ground +-c ode +Ġoptim ization +log ging +_de st +Ġsn ake +Ġchemical s +_IMPORT ED +ado op +ĠTH AT +man aged +Ġredu ces +ĠRE AL +ĠG uy +_GENER IC +/ ******************************** +. amount +Ġd ere +get Time +Ġp ant +an onymous +Ġharmon y +ĠAl an +Ġscen arios +Ġd irt +ht ags +M c +Sh ell +r in +{ čĊčĊ +.p ow +ĉ client +Ġconspir acy +Ġad mission +ĠReg ional +ĠView Controller +ĠPhilipp ines +Ġde pos +Ġp ap +ĠP ad +P aul +.Com boBox +Ġt utor +ĠRec ipe +w riting +Ġcontrib utor +OT H +Sm all +V I +Ġh acer +e qu +ĠEx amples +h uman +.m essages +ĉt yp +Ġ( čĊ +ĠS SL +LE N +ĠRom ney +( grid +ĉ min +Ġ> ĊĊ +Ġfr uits +Ġvot er +In line +pan e +ĠC ollections +char set +Ġsp am +z b +item ap +Ġsucceed ed +_C OL +Ġel apsed +im eter +Ġrecover ed +T ensor +hatt an +.set up +ist o +( head +ĠS IZE +Ġtact ics +Ġdist ur +Ġpre val +ici os +( Value +_c ols +ĠF at +Ġse al +Ġs ons +Ġens ures +Ġpress ing += & +igen ous +Ġharass ment +_ JSON +Ġign or +yn omial +om er +_st atic +Ġsignific ance +Ġcirc les +_S ystem +Ġdiscipl ine +Ġdress ed +Ġs phere +Ġclim b +_ actions +ĠB ab +Ġ' =', +_s chema +" use +Ġund ers +Ġc ups +.s creen +/ new +Ġappe aring +T OP +vis ed +cl ang +Ġinvestig ators +Ġmyster ious +Ġprom ising +Ġqual ify +Ġc ave +Ġequ ip += x +G T +( link +. velocity +. erase +ot er +++++ ++++ +pro fit +Ġz ones +_ uid +- ser +Ġobject ives +Ġmil f +web kit +(m atch +ne h +ĠAssoci ated +ĠT odo += d +C am +Ġv ocal +Ġs udo +( EX +Ġtr ou +AB C +.b ean +ĠG round +ĠRE ST +we ets +In g +im on +_b us +ĠC OLOR +un to +Ġf oss +ĠLink s +ä ng +/ forms +pr ises +Ġachie vement +C ALL +ел ÑĮ +ĠVer ify +_S OURCE +apt cha +ID D +_re ference +G old +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĊ +Re ceiver +Ġa j +_d irection +} ] +ĠCom pet +Ġb ang +ĠC ass +- url +te chn +ĠJer usalem +long itude +' );čĊčĊ +Ġwin ners +T asks +ĠD MA +Ġtool tip +İ · +ĠB ra +_d uration +cur y +parent s +---- >( +ĠK ir +Ġint ros +Ġsk etch +Ġsk illed +Ġim mer +Ġade quate +_re p +( header +_ like +Ġper ceived +ss h +Ġassum ing +Ġf f +_u uid +ul as +Ġdemocr atic +. entities +S eries +aph ore +Ġnew er +} ( +SE C +ai ro +Ġcomm od +Ġprivile ge +Ġde ux +ĠH op +.' / +ct ic +. ';Ċ + C +ĠWar ren +Ġoptim izer +ĠSER VICES +_ oper +get Attribute +ĠMc K +_s elf +.r s +" )ĊĊĊ +Get Component +er ce +Ġt ous +un its +'] );čĊ +Z oom +/ E +Ġobs c +Ġfast est +on line +Ġpeace ful +ff en +Ġc argo +ĉ pr +Ġseek s +z u +Tr im +Ġw ard +Ġver d +Ġblog s +.exception s +ĠPrem ium +ĠN etherlands +S afe +Fin ish +ĠAl bum +_A CC += this +v irtual +] > +_L ABEL +ĠN ich +_w in +ĠA aron +W P +; $ +aim s +ĠImage View +Ġend less +ER A +_DIS ABLE +Ġcancel led +- us +Ġins pection +em in +ĠG rey +- open +Ġiter ations +. owner +Ġk eras +.P assword +ĠR y +ĠIN S +A ir +ĠSe veral +.Tab Stop +ING LE +ĠH air +ĠCan vas +AA AA +Ġfl aw +ced es +.Re port +í Ĭ +ĠT ips +cript ors +.trans action +.S pring +Ġview er +Ġins ights +è¾ ĵ +ord ion +U INT +se ek +ĠA uf +ìŀ IJ +Ġstr ain +To oltip +Ġd z +ign al +ad t +Ġu c +fin ite +Ġn m +.c md +ĠMy Sql +[ data +.j ackson +.t ree +Request Param +_ agent +") ]čĊ +Ġass ass +( Constants +: ss +ĠM AN ++- +- +ĠB ottom +print s +ĠS ame +@ Autowired +sw ap +ici ón +Ġprotest ers +Ġh oney +ĠV eter +(C alendar +- ad +ĠBrook lyn +L ife +_V AR +ze ch +ĠC ALL +_C AST +ĠE lection +Ġthick ness +V ery +_IN TEGER +- dev +)) )) +ap at +oo oo +d emo +Ġparse Float +ĠR ather +ST IT +m aker +[ current +chron o +Ġch rist +ãģ ª +ĠD etail +ư á» +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġs ul +id ency +Q ue +Ġeleg ant +ap ons +Ġdish es +Ġinteg ers +( read +find ViewById +ĠAm ount +ĠSk ip +Ġhab its +* )( +Ġmon sters +M AC +: end +Ġfr ank +As sembly +Ġd fs +Ġne ut +_TYP ES +e qual +loy d +( uri +Ġch i +Ġdefend ant +Ġconflic ts +Ġv il +- js +ĠPe ace +Ġmut able +) sender +ĠF ocus +å» º +Ġapprec iated +s leep +ĠR ED +C ulture +Ġdesign ers +_g enerator +c odes +/ ex +.Get Value +umb led +.scal ajs +per or +Ġveter ans +Ġ} )čĊ +Ġun fortunately +_C REATE +M ass +ĠCL AIM +ĠMe et +_s upport +B ank +() .Ċ +D ark +_LO W +ĠMin ing +ĠO wner +ier a +Client e +Ġencour aging +> S +Ġboy friend +ĠH alf +ĠA CC +A ff +_ ar +-l ife +c x +.J Button +iz ado +.z ero +.open qa +ot on +.text Content +Ġto ll +at ie +Ġball ot +- number +. Exception +ĉ params +c ircle +-m ap +Ġn ap +ĠRob ot +ĠI ch +reg istration +Am azon +roll ment +( exp +Ġt anks +ĠG ordon +Ġmach inery +Ġbas eline +æ ĭ +Ø © +ĠCon vention +ĉ config +ook ies +m ult +Rec ords +ĠE ST +Ġgar bage +Ġcon form +id al +Ġb arg +Ġsurv ived +Ġinvestig ations +.contains Key +---------------------------------------------------------------- ----------Ċ +ort ion +Ġhor r +_ http +Ġm ant +] ;čĊčĊ +b inary +em pl +Ġin quiry +ĠMean while +Ġcollect ing +.Entity Framework +", ĊĊ +ĠP ic +@ Inject +ick ness +ĠB inding +Ġcont rolling +re verse +Ġch airs +semb led +( add +Dis abled +an as +.trans late +-------- ---Ċ +Ġref lected +"] ĊĊ +Ex ternal +Ar row +Single ton +% x +Ġ Å +Ġan cest +ĠOr leans +ĉc md +Ġprohib ited +ith metic +(ch annel +_c ss +For ward +.s ocket +Ġl uc +â Ĩ +ĠFire fox +ĠM ovies +) _ +. ends +( shape +Ġde alt +Ġs aves +Ġgl ory +Ġmej or +Ġbreath ing +Ġ eller +get Data +Ġang les +Ġtool bar +Ġsp acing +IP S +Ġflo ors +_ACT IVE +Ġsh uffle +/ shared +ĠE le +ed ish +Ġweb cam +.ex pect +il oc +ĠIn cludes +Ġtweet ed +Ġ: ) +ĠEss ay +F ix +-b etween +_ web +.con v +Ġrac ism +Ġreflect s +um m +иÑĤ е +_f ooter +/d ocs +ĠP our +Ng Module +.initial ize +pattern s +_ In +ĠAb b +* čĊ +Ġsent iment +b uff +_count s +Ġre use +ch unk +Ġim posed +Primary Key +Fore ground +Ġconsum ed +? ! +Ġd ick +Ġch ron +ĠF ern +Ġrespons ive +Ġin sect +icult y +Ġr w +Ġal ike +Ġsub set +ĠCook ies +ĠP air +Ġt ier +IF O +av our +ĠQ U +, sizeof +Ġmerg ed +m v +it ol +yl on +Ġjump ed +. role +ens aje +R ules +Ġb rowse +An imator +Ġy oga +Ġvari ants +Ġcour tesy +ur an +p bs +else if +Al t +ĠL ane +CL K +IM ARY +_PRO PERTY +ï¼ IJ +Ġch an +Ġgrad ually +Ġsh ake +Ġbl onde +... ");Ċ +-se x +Ġgame play +ac ies +.ref resh +US B +ĠPl ot +W as +iss ippi +ĠT ensor +Ġcryptoc urrency +Ġdifficult ies +De leted +With out +_ append +_ ver +")) čĊ +Ġhonest ly +Ġp ivot +Ġtem ps +_p s +ĠUn like +[: - +V S +_in f +Ġjun ior +Ġanim ations +Ġfile path +? {{ $ +Ġun icode +pl aces +ĠC offee +.S E +ĠP AR +(t xt +ge bra +Ġf ires +Main Window +med ium +Ġ( âĢľ +Ġl g +Ġc mp +/ base +_l ayers +_ entries +Ġadmin ister +ĠSU CH +B P +ĠScott ish +ĉčĊ ĉčĊ +gu ard +ĠStr ong +In sn +ĠC AP +as ury +ĠSE E +C lock +er ie +\ models +Ġ$ $ +ĠC ab +Ġwur de +Ġsold ier +Ġcl ips +Ġarrang ement +ĠW onder +ĠH orn +Ġsc ared +Ġc ure +m kdir +Ġal igned +ĠP ink +Ġland ed +Dim ension +Scroll Pane +.ch at +.W ith +ĠTr ain +] .Ċ +Ġth irty +Ġdur able +Ġl d +Ġlate init +Ġch arts +Ġins ult +.F atal +_ ct +Ġm asks +CLU DED +Pres ident +Ġcol ours +g ments +.at tributes +ĠF lex +ĠC lock +ÃŃ cul +im en +J O +ĠReg ex +_L INK +Ġc ouch +ĠIN PUT +Ġbe ating +b usiness +pre ced +. unit +ĠF el +N ever +osp el +.start swith +ĠE PA +. only +Ġprevent ing +y er +Column Name +Ġelev ation +fl u +icy cle +Ġoff line +Tool bar +Ġcompet ing +) ]. +Ġm og +Ġis Valid +As k +_ av +_l at +AN C +ĠJ oh +k ers +Ġgu ards +Ġch ains +ĠSimple DateFormat +.st atic +Ġvess el +Ġm ud +Ġst abil +Ġst ret +g m +am ation +ç ľ +-w ith +Ġro s +_P A +Ġresult ado +Ġconf idential +ĠTok yo +ĉ using +ĠMath f +omb ine +ĠESP N +Ġdeal ers +Ġdismiss ed +TR Y +Ġte ens +rec ords +Ġw ings +g allery +account s +_L IB +Ġj acket +ĠNS Object +Ġst ones +ĠDel ivery +ĠD iet +/w atch +Ġto ilet +ĠG uest +.d ay +Ġint val +Vis it +Ġinvestig ated +Ġpent ru +ĠThe atre +andid ates +L ang +ĠS erv +Ġcont rollers +Ġset Title +N P +am y +fl at +( ui +_d ocument +è ĥ½ +ĠC oin +ĠAd ams +pt ic +Ġproduct ive +Ġaccompl ished +čĊčĊ čĊčĊ +Ġdefer red +ient es +Ġs inc +ol ars +Right arrow +Ġvari ations +( offset +.Layout Inflater +Ġsus pend +Ġprevent ion +_pr ivate +_ js +âĺ ħ +Ġw ieder +at um +Ĵ Į +Ġappear ances +.D ocument +Ġvalid ates +cal endar +} ";Ċ +.d emo +con ut +Ġcorre ction +ĠDe al +Ġbatter ies +.d uration +, \ +_m arker +m ulti +Ġh alt +Ġc ms +Ġsh aped +B ro +re duce +Ġ #### +CT OR +ĠBen ef +Ġicon ic +Ġp iano +Ġeffect iveness +| .Ċ +Ġa jax +Ġv olumes +ภ¡ +Ġcl js +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠ Ċ +ath s +ra its +å¤ § +Ñ ĸ +_m ult +Ġfasc inating +A verage +Ġpr é +ĠChair man +.find Element +_p in +Ġcomp aring +Ġdark ness +-F i +- server +Ġselect ing +ster dam +ĠPart s +FORM ATION +Ġnot ing +Ġp ile +og s +Ġpa lette +_d o +it ize +() ( +Ġdef ining +Ġremain der +Un its +_T ASK +Http Client +S ocial +Ġfund ra +N R +ch est +C urrency +.ad apter +Ġd op +un ting +ANG UAGE +" He +ĉ index +_p ackage +.I con +Ġrep et +m ass +=" .$ +ĠS ud +Ġl id +pro vince +ì ľ +G PIO +Ð ļ +ĠMy SQL +Ġdoc s +ĠG A +Ġip sum +K ernel +Ġaccept s +Ġfit ting +Ġcu ando +Ġd uplic +ĠBro ther +ĠK le +num s +Ġmor ph +Ġ ######## +ĠCG Point +< unsigned +ä¾ ĭ +ĠD uke +.set Bounds +q s +or ic +j er +Ġregard ed +Http Request +Ġbond s +Ġthorough ly +enc ent +Ġhighlight ed +Ġac res +Ġwork place +ĠL ux +Ġqu ot +.in flate +Ġdocument ed +Ġadd iction +Ġmut ation +.c ity +Ġbott les +ĠRepos itory +on n +err no +ARI ABLE +åº ¦ +_B EGIN +gl as +' })Ċ +ĠMass age +ĠWh it +reg ex +W A +Ġout let +- head +Ġexp ired +ĠTh ai +/ include +grad ient +scan f +Ġse am +w al +ĉb uf +B earer +Ġprec ious +if acts +co ord +Ġexpl oration +.get Y +(h andle +Top ic +ĠV ent +r hs +---- --Ċ +ĠB right +Ġg uild +m other +st orm +Ġmunicip al +Ġin k +.T YPE +w l +... manual +ĠTechn ical +Ġcorpor ation +ĠH W +ank a +T AIL +ist as +Ġperform s +ĠBeh avior +.F or +_ ORDER +ĠK ick +Ġcallback s +_d r +ue go +h ub +uff icient +sk y +Ġb p +ht able +ĠON LY +ĠAUTH ORS +.Arg ument +" };Ċ +ĠTh under +ĠK om +.Sh ould +A UTH +ah u +_p ayment +Ġst arter +ìĦ ľ +ìļ © +B log +.p atch +Ġgovern ed +ass y +-f ound +Ġthe ater +ĠFont Weight +ĠBat man +" If +.R andom +_d elta +ĠC E +Auth enticated +Ġdr one +Ġc ous +r adius +M er +( None +ĠN J +_ headers +Ġam er +py test +ĠA ctions +ĉĉĉ ĠĠĠĠ +Ġet t +Ġh oly +Ġun comfort +ĠN in +ĠDec imal +ĠM essages +.s ender +] ])Ċ +Ġembr ace +Th ough +/ sp +Ġcult ures +Ġhigh way +t ar +.f ail +_h idden +ĠcomponentDid Mount +ĠW right +Ġj ag +_ il +../../ ../ +ig u +F ood +Ġa ce +Ġa ños +US D +Ġmut ual +Log ic +Ġtem ple +Ġbrief ly +ĠT rip +class method +default s +Ġch unks +,, ,, +ĠRe ason +$ id +-up s +Ġdam n +Ġtruck s +Ġun limited +Ġsc ulpt +ĠC ards +Ġaut or +ĠTest ing +Ġdies e +sh ops +ç ´ +(p ayload +ĠP ATH +ĠMem orial +Ġridic ulous +eg ree +-w inning +Ġre hab +Ġsophistic ated +wp db +ĉ path +! ";Ċ +_S YS +.s peed +Ġso ap +s uffix +W rap +Ġenh ancement +à ī +ú b +Ġplay list +Ġmix ing +ant idad +=" ";Ċ +ĠRev ision +ĠBe at +.in c +-w ay +enc ias +ul ers +C at +id el +ĠSh ip +.set Color +Ġthreat ening +.mod ules +Ġafter wards +ĠD ashboard +Ċ ĠĊ +Sign al +Ġpr imer +orne ys +ici ary +Ġl igne +_p redict +Ġa est +_ https +> : +ĠL ex +Ġrencont res +eg ral +sc ala +_f amily +ÃŁ en +_s ym +Ġuncert ainty +ĠVAL UE +Ġ} ;čĊčĊ +Ġbro ader +Ġh orses +ãģ Ŀ +ĠK al +ob a +_IN ET +ĠK ill +j query +am ination +[ @" +Ġm uj +## #Ċ +First OrDefault +then Return +C he +/ footer +Ġpark s +as je +ĠG ulf +Ġmod est +. Init +ï¼Ł ĊĊ +Ġpros pects +Ġs vg +Ġå ı +.D ialog +_N ET +Ġ( ($ +Ġe k +ĠW arning +ĠM K +< LM +Ġ' čĊ +i em +h etic +Ġi x +th ink +-sh adow +ĠE ld +ĠNev ada +ĠLe af +ĠG ROUP +Ġprom o +ent ine +ĉ Map +ĠModel s +ĠK rist +_k ernel +-m ade +Ġc err +As sets +ell ar +Ġinv oked +.v ue +Ġcult iv +C losed +Ġgener ates +ffff ff +thes ize +s qrt +ĠCast le +.c ar +Ġke en +und a +ĠC row +ĠSing h +y thon +Ġbe ans +l arg +æĸĩ ä»¶ +Aw esome +unc ate +Path s +o ji +(c urr +CON DS +Ġm im +Ġshould ers +H ard +ast es +а еÑĤ +Ġconv ince +de cess +m ade +ĠC MD +. Im +Ġcha os +ens ively +Ġcool ing +Ġbur ied +(' @ +_S e +ĉĉĉĉĉĉĉĉ ĉĉĉĉĉĉĉĉ +.com pany +.sub mit +ph ant +Ġboot strap +_h elp +à § +.d ump +Ġdif er +_m apping +Ġcirc ular +Ġescort s +Ġb ere +Ġgrad u +ĠLeg end +im edia +ĠBar celona +Ġbed s +åĪ ° +ãĢ Ĭ +_v olume +Ġtremend ous +Ġsc aling +Ġp ins +en as +type param +D ashboard +render er +Ġsp i +Ġ& $ +ĠSk in +alm art +Ġh ockey +Ġ'" .$ +Ġerr no +Ġb ew +Follow ing +.M odule +er able +ĠM ilitary +ĠR io +_ available +ĠSur face +Ġst ab +IF IER +ĠL IST +Ġd ashboard +Ġcl usters +.pl ugin +Ġj ou +ĠDec or +F our +Ġdel le +****** /Ċ +ia z +in de +ch ing +Ġget Item +.Add ress +ment ed +A meric +Pl ain +Ġus b +ĠPract ice +_ ment +.bl ue +H int +ÑĢаР² +Ġconn ector +Ġinher ited +и в +Ġinterval s +Ġc ere +Ġu d +Ġin con +.Ex ists +ĠM ic +F K +(c ard +.Set tings +Ġexhib ition +Ġon Pressed +Ġrest ored +eng u +. def +Ġrec v +." );čĊ +enc oder +ather ine +( dest +az ed +# endregion +sem bl +, M +ob y +Ġп еÑĢ +.C all +Ġattend ance +-b order +Ġaddress ing +ê n +ĠLe v +Ġb ash +ben ch +C redentials +Sp acing +( of +_RE SET +ig uous +Ġcr uel +Ġcross ed +Ġle ur +ĠG olf +or rect +Ġpack ets +ĠData Set +Ġpart ly +SEQU ENTIAL +Ġindic ation +ĠS alt +ac ia +Ġ* );Ċ +ĉ info +ĠView Bag +on z +Ġeditor ial +ĠA rena +Ġs ir +_ Static +( socket +s u +cho ose +.m onth +.M y +é ri +; font +do es +Ġcon verter +Ġsal v +Ġl r +Ġinflu enced +(f eature +ĠQue ens +let t +_M ON +& amp +Touch ableOpacity +O FF +Ġmetab ol +( iter +Ġvit amin +ĠIND IRECT +aut om +_p ublic +Ġadjust ment +Ġspecial ized +w indows +.add All +Ġaccording ly +ĠJ OptionPane +Ġcell spacing +Ġqu ad +Ġcre ep +Ġout lets +}` )Ċ +Ġpri est +_TH READ +ĠMar x +ĠBy Val +Ġc ual +éĿ ¢ +Ġtempor arily +An n +ke leton +å ¥ +ĠLO C +au er +der ive +Ġbeh aviors +as ename +ĠCent ury +Ġhor rible +ME SS +_ List +we i +P at +ĠCh oice +_F ROM +ĉ line +.in voke +.B ottom +Ġnow here +." ĊĊĊĊ +_ export +Ġstrugg led +.Ap pearance +ĠJ Button +ĠJer emy +([ [ +Ġkick ed +mar shal +st aff +es ity +Ġqu iz +_e ffect +Ġ} ));ĊĊ +m el +b anner +ĠP IN +Ġin vention +Ġcons olid +Ġop s +ĠB etween +j ack +ern ational +Ġsacr ifice +ag ation +ĠJ oy +Ġam endment +ĠS old +Ġprison ers +ан нÑĭ +Doc uments +) ])Ċ +ust ed +ĠLine arLayout +os o +_E M +.s elf +.M iddle +) // +Ġ\ ' +Ġfuck ed +ĠM urray +Ġprof ound +_E LEMENT +ult a +il ers +port folio +J une +t cp +mod ified +ĠTr ace +ĠK el +aly zer +) => +ĠRep air +_B E +Br and +u art +pre view +Ġiniti atives +run ning +b ang +ĉ update +ĠCo ach +R ich +Ġy outube +Ġrit ual +app a +ĠRobin son +prec ision +//////////////////////////////////////////////////////////////// //////////// +=[ ]Ċ +Ġcelebr ated +OT O +Ġin clusion +J P +' ;čĊčĊ +Ġnot able +(_ . +Man aged +Ġgu ides +& nbsp +ated Route +ĠAd just +Ġcol ored +_s cores +ĠTes la +_pro gress +.in st +[' _ +.fl ags +Ġf close +_O PER +ż y +_n ote +Ġtrans gender +å ķ +RI PT +Ġabs ent +Ġam et +Ġoper and +ë © +Ġh ood +to LowerCase +av o +ĠCirc uit +ĠL ind +-- }}Ċ += m +Ġsup press +ĠM AP +i ang +- admin +Ġside bar +ĠB u +ĠH ex +, F +ĠSign al +Ġtrans parency +ĠFeder ation +/ V +Re q +Ġpul se +Ġt ends +Num bers +% ' +Ġde port +dat as +_U INT +_ tra +ok o +Ġ" ? +comp et +sole te +und ry +Ġover lap +}` ,Ċ +. ly +_sum mary +ĠL ost +.C enter +Ġdis ability +.Serial ization +Ġge om +Ġ? : +ĠW o +Ġsh ipped +Ĥ æķ° +Ġu gly +Ġexcit ement +Ġext erior +Ġcheck out +Ġk ur +, D +ĠAl aska +Ġsyn thetic +ĠB udget +ĠSub scribe +Ġ& Ċ +ÈĻ i +ĠY u +ĉ query +} .Ċ +Ġtr aged +ass en +Ġaccommod ation +Ġphys ician +Ġren amed +Ġtid ak +z Äħ +Ġmin us +ny ch +_EX CEPTION +thread s +Ġt ire +_c reated +ens ure +Ġworth y +Ġexc use +Ġclo th +.parent Node +/pl atform +ĠU FC +ĠG tk +un ny +Ġg ibt +ke ley +h um +(t x +ĉ dev +Ġout fit +do ors +Ġf on +ic ut +vol atile +Ġhom osex +Max imum +Ġexp end +Ġ});ĊĊ Ċ +E q +ond ers +dep artment +ĠPhys ics +" });Ċ +Ġpar ad +.S tr +Ġse le +IF IED +Ġdel ivers +iv an +Ġrespons ibilities +Ġadvoc ates +è µ +ĠR ID +.param eters +M etrics +ron ics +ĠUITableView Cell +A bsolute +ip se +yl um +MLE lement +_VAL ID +< title +D lg +p aces +Ġsynd rome +be ans +_d atabase +oz illa +ĠM eg +DB G +Ġl ub +Bag Constraints +ab ad +Ġproject ed +_BY TE +.Size F +st reet +ĊĊĊĊ ĊĊĊĊĊĊ +ĠLO SS +Ġdirect ors +/ news +Ġnurs ing +ĠD one +. HTTP +dis count +ĠR ot +To Many +Ġen abling +Ġauss i +ost a +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ čĊ +è½ ½ +Ġhel icopt +ĠIn side +ä¿¡ æģ¯ +is per +ĠAll ah +ARCH AR +Ġroll s +Com pare +X P +Index Of +S UM +Ġass ured +ĠPhys ical +End point +.G lobal +.d etail +Ġthe ft +.j upiter +Ġhum or +.R ender +A lex +.c ap +Ġbuff ers +Ġdis pose +t ion +.p resent +z el +, P +Ġdesper ate +.get Column +Ġtw in +ì ĸ +.c an +Ġf lee +ĠIran ian +Ġstick y +ĠU TC +L T +//////////////////////////////// //////////////// +Ġl icensing +_PO INT +ĠM aps +Ġl ol += models +-t ab +ĠN ash +_log ger +tor ch +ĠCON SEQUENTIAL +Not Empty +/ react +Ġp f +Ġassert ion +Ġsubsequ ently +_c an +Ġpand emic +og ue +"+ Ċ +_ ent +_P aram +.ĊĊ ĊĊĊĊĊĊ +Res earch +C apture +Ġbel oved +d em +Ġextract ed +Ġf ights +ER C +(a uth +position s +Ġrevers ed +(st ack +Ġ_ ) +uto ff +_fl ow +ç Ĥ¹ +( Game +Ġex cluded +ĠCS V +c g +ĠT itan +p ause +Ġcer ca +Ġdump ster +L ess +Ġkotlin x +aster xml +Ġpoint ers +Ġfl ows +ĠT un +ĠMain Activity +Ġdis cret +Ġcomb inations +vis it +_b ind +oot ing +d ater +_look up +.n io +Ġswe at +ĠR d +Ġscient ist +ĠP ixel +@ NgModule +Play ing +Ġunf old +Trans late +ĠLaw rence +ĠFIX ME +B ill +ĠR IGHT +Ġwhere ver +Ġo ok +vid ence +Ġ] ]; +ĠSk ill +unist d +ĠðŁ ĻĤ +Ġfem ales +-- )Ċ +İ· åıĸ +ĠF red +Over all +Ù Ĥ +Ġess ence +Ġthere by +Ġw ounded +ĠD OWN +les son +text ure +R ound +Ġautom ated +ĠÐ ¡ +ĠUp dates +Ġsh ade +p ublish +ĠG ear += lambda +Ġle ver +) +" +h ill +Ġrad ar +ry ing +Ġ" ). +f illed +Ġline up +Ġd l +Ġworks pace +V o +_d t +ë ² +_ Item +NS URL +. verify +ĠHawai i +G od +M arch +Ġ[â̦ ] +Ġpel o +ur ious +ĠPitt sburgh +. It +C lean +> \<^ +Ġi os +s ound +"] ; +Ġfre ed +rot tle +ĠL ower +[ count +å Ŀ +Ġp ale +ĠWay ne +ear th +_c ategories +U CK +.m etadata +Ġsum mon +H OME +олÑĮ з +Ġmanufact ured +Ġdo ck +Ġcompet itors +_MODE L +ok ia +ĠH ey +Î ¿ +Ġback ward +ĠPO SS +rop a +Ġc ri +_O BJ +Trans port +-h igh +Ġerot ik +_s lot +Ġart ic +_f ramework +-ser if +ĠSql DbType +') ( ++ "/ +Ġw ore +S il +Ġst oring +ĠPh ase +u ant +Ġb ump +in ho +Ġd ign +Ġback s +q q +(h ash +Ġge o +Ġt ender +Log o +! )Ċ +ĠM X +ĠAr thur +esso a +_C h +Ġbed rooms +="# ">< +Ġth roat +ins ic +.int eger +Ġpr imitive +Truth y +Ġfacilit ate +Ġcreat ivity +ĠD NS +Ġg ra +ue z +Ġcount less +ĠPol and +' M +ĠD ist +Ġv est +Ġcert ification +á» ij +h eld +ext ensions +( static +Ġgr ades +ĠU ber +ãģ Ł +Ġ[ ])Ċ +dat os +Ġget Data +ĠCh arg +ĠB S +.m icrosoft +.v ideo +.d irection +->{ ' +l ua +ape st +Ġbo iler +ere k +Ġdec ides +.j ar +IS C +ĠW ords +(C ON +EMPL ATE +ree ze +sh ots +app s +unt ed +.set Name +:: < +-b old +ê ² +å¯ Ĩ +Long rightarrow +Ġunf air +Ġear ning +Ġsh elf +URE MENT +Ġid le +_M ENU +.C ustom +AG ER +- " +_s witch +b ecause +) view +m are +_ condition +ĠStart ing +M vc +(p re +d ump +_LO CK +at etime +.c allback +ĠC er +op ol +ib rary +Ġres ervation +ĉĉĉĉĉĉĉ Ċ +lect or +grad uate +Ġgener ous +Ġ ion +ric ao +m q +_com plete +(c ursor +ĠForm Control +: center +Ġsub stitute +ĠPl anning +Ġp ension +Ġrecommend ation +ĠT ags +Ġg ef +Ġalbum s +Ġwash ing +ro c +Ġtr ains +at ings +Ġex ponent +ack bar +- ln +á g +.Data Annotations +ĠE IF +ĠMalays ia +ĉ PORT +on us +Ġcle ver +Ġpe u +> ĊĊĊĊ +ĠArg uments +Ġdebug ging +( right +' D +com pute +Ġfin est +OR AGE +Ġspect acular +ph rase +Ġind ia +Ġlegend ary +b irth +Ġcom posite +Ġg rows +ĠT D +Ġep id +Ġlaunch ing +] ][ +Min utes +ĠCh a +Ġclean ed +Ġwitness es +uk an +ĉ Type +Ġhab e +par agraph +ĠJ Panel +ĠH ann +Ġvar ied +ĠP okemon +ĠM UST +åĬ ¨ +.vis ibility +op up +^ [ +.exp and +Ġ" ', +.f asterxml +_ auto +ĠShe et +mark er +Par cel +ew s +ĠStr ategy +-m aking +Ġun ve +Ġtrail ing +Ġclick s +ĠGet Component +ĉ content +IG ENCE +ERN EL +NSMutable Array +Ġb reat +Ġharm ful +¶ Ī +Ġbes ides +Ġb oring +Ġbrut al +v ang +(p arse +qu ick +Ġpy test +Ġswitch ing +() ]Ċ +Ġì Ħ +L ER +ĉf ont +Ġnet t +) ]ĊĊ +(/ \ +æŀ ľ +to Array +Ġbre ed +ĠC AR +ĠWe apon +A bs +t ot +Ġset Name +apt ive +Ġ: , +Ġesc aped +ord en +ĠP ri +th umbnail +Ġdescri ptions +/ styles +ĠPC I +Ġal phabet +astic search +NOT E +Ġc ialis +ĠGr iff +Ġpor que +Ġprote ins +pl ays +Ġst ating +Ġimag ination +Ġfac ial +ĠMe chan +Ġarr anged +_ used +Ġarrang ements +ĠP ipe +host name +Ġprov inc +T it +.Flat Style +ĠS plit +ĠLo ader +.c c +Ġclin ic +---------------- ------------ +Ġb aking +ĠEN T +ne ath +ãĢģ ĊĊ +AN E +.EntityFramework Core +app ers +. ic +ĠNg Module +ĠF ORM +Ġ' ; +-pro fit +h w +en emy +ĠE ye +Ġca ution +t own +Ġur ged +ĠJim my +ynchron ous +-s ized +m aking +, { +] ', +_ Object +ah oma +Ġactiv ist +IN VAL +ĠCom mercial +ĠOr lando +(t ab +ĠØ ¨ +Al gorithm +Ġher itage +Get Mapping +Ġfail ures +ri os +at iva +Ġt et +Ġcar pet +( Z +th ree +Ġdisc losure +. ERROR +_c alled +Ġd ial +Ġoccas ional +.E rr +Ġfunc ion +caff old +Ġrele asing +ï¼ī ĊĊ +_ Value +ĠV ari +y ellow +Ġstrugg les +.c al +ĠDak ota +ĉc lose +Ġsand wich +Ġanaly tics +Ġ** ) +& # +ĠJ os +Ġpass ive +AT TR +Th rowable +ĠM un +ĠU int +(dis posing +ar ak +ĠLe aders +Ġaffect ing +Ġitem View +Ġeconom ics +f v +à¹ Ģ +.r b +ĠOver all +Ġwealth y +Ġev olved +nd a +ĠH us +re strict +um en +ĠA gricult +! ĊĊĊ +Ġexp ires +Ġspokes person +int erval +Ġà ¢ +Ġque en +(n il +ing o +He ap +Ù İ +Ġcompl ain +S ym +ĠCl one +ĠR u +ĠW ILL +ĠCr ystal +/ content +ing en +oint ment +Last Name +av icon +ĠIB M +ĠDim ension +an h +icip ants +ĠAn ne +.pro gress +Ġal go +ob il +ĠV oice +ĠF E +Ġg li +Ġv ed +Ġprevent s +\ Column +Ġfol k +ett i +Ġm n +ĠCL ASS +Ġdisplay ing +ĠK l +ĠF err +d uto +. ib +Ġd ados +' name +-s pace +Ġit alian +Ġin verse +Ġd ense +ut er +ĠI Enumerator +-s ign +Ġnation wide +Ġperson a +Ġsol ved +Ġdram atically +Log out +Ġgr av +Ġanalys es +ol lo +Ġl amp +. team +ĠE rot += [" +Ġd ancing +Ġ?> / +Ġc ater +ff e +ĠSh a +ĠB os +ĠRE QUIRE +ĠMon ster +ĠR B +ĠI DE +Ġsu its +Ġform Data +( theta +Ġsp atial += NULL +ĠSql Connection +Ġ à +ĠV enez +ĠMor ning +Ġpublic ations +ĠNON INFRINGEMENT +first Name +ud s +W ould +_HE AD +Ġinvest ed +st able +f red +Ġcommand er +SE S +âĢĶ a +an che +ĠM ovement +ë ³ +S uite +Ġjur isdiction +ë¦ ¬ +ĠB eth +j Query +ĠIs a +Ġd ental +, * +ĠL imit +ili ation +=" { +b ast +Ġt urb +is y +O OK +Ġadvoc ate +im ag +LE CTION +л ÑĮ +(c ategory +.de c +Ġun iqu +_s n +Ġattract ed +Ġà ī +ĠRun ning +_ edges +ĠDis able +_A S +åĽ ¾ +Ġnetwork ing +_br anch +H aving +toBe Truthy +G I +Ġcamp s +se p +-p art +Ġ)ĊĊ ĊĊĊĊĊĊ +ustral ia +ĠRe ports +rit o +Ġwa ist +_pl us +ĠW W +-p erson +Apr il +Ġs ar +.t ar +Ġagricult ural +t ic +Ġt cp +Ġset Value +agent o +ĠAp pe +p iler +CA DE +Ġan che +atch er +Ġcom ics +Ġl bs +_se gment +'] =$ +itt ers +ich er +G INE +Ġutil ize +ĠC ursor +_ex pression +Ġd ag +< long +Ġr hyth +æı IJ +Ġconsult ation +Y et +")) ĊĊ +_M AC +c ould +Ġ' \\ +ĠV o +ĉ http +Ġg s +ph er +- grid +J ames +J ul +Ġsch on +Ġtensor flow +ĠLOG GER +am as +Ġsc ipy +Ġconv iction +. ag +Ġadministr ator +)) {čĊ +Ġn un +" group +P or +Ġnur se +ex pression +ak y +ĠHe avy +. opt +.get All +Ġover l +/ ", +_c ountry +ç İ +ĠG ENER +_r oute +ĠD al + ´ +ol oad +Ġuncomfort able +(m enu +Ġhost name +' ");Ċ +Ġcalcul ations +-c lick +Ġprotect ive +ãĤ ¯ +_F orm +ung s +Act ual +m f +ĠProcess ing +ĠIn ventory +(m atrix +app ropriate +w eg +ij a +Ġch r +Ġr ifle +-w sj +k ar +Ġindepend ently +I OS +Ġconsist ency +v n +/s ystem +ĠCh anges +Ġexp ose +ici ents +Ġrel ate +ĉ next +è ¨ +ud es +Ġglass es +F XML +.... .. +ĠP df +Ġappro ve +Ġ{ \ +Ġexist e +)) ( +ARE NT +оР¿ +ĠL atest +ĠNiger ia +.Inter faces +Ġrem oves +En emy +Ġen force +vert s +ĉ pos +_text ure +W ARD +ĠINC IDENT +( container +Ġdef ending +ĠR X +ĠH ook +br is +ĠFl ask +Gr ay +. )Ċ +vis ibility +ĠRedirectTo Action +err al +_e lem +Ġres on +front end +_variable s +ater ia +Ġ+ " +ave led +RI X +Ġdef icit +_C heck +YY YY +To One +sp y +Ġun ited +end ent +Ġp ode +ãģ Į +C AT +(f mt +ĠBon us +Ġre ck + º +Mod ules +Ġvac uum +R adio +ĠDAM AGE +P en +ĠPark er +; ;Ċ +ĠRe ally +_n eg +p ending +Ġnomine e +ĠC ategories +ĠUl tra +We apon +Ġdef ender +I ss +ĠG ender +ĠD ress +Ġimpr ison +Ġbank rupt +imension al +PH A +ĠStr ateg +ĠPROF ITS +Ġp atri +//////////////////////////////////////////////////////////////// //////////////// +de legate +Ġfor State +Ġdev oted +_m ake +Ġterror ists +ĠS nap +_n av +ĠA A +ĠI an +ĉ app +Pl acement +_h dr +< K +Ġs ang +st roke +- Q +> x +.T ask +m oney +ib aba +' });Ċ +ĠSpec ific +ĠLine ar +_O PT +Hash Code +( Player +.Contains Key +Ġcoll apsed +trans parent +_R ANGE +View er +(c fg +Ġsort ing +Ġinf ected +ĠN ach +Ġaccommod ate +.element s +_P ART +ĠSex y += get +( year +Ġx hr +: ] +ows ki +Ġsum mar +Ġ ¿ +Ġint e +Ġwork flow +ĠTai wan +vers ions +åı ij +Ġsurprising ly +Ġopt ical +Ġpro ces +Ġdisag ree +Ġnue vo +ĠC AM +sort ed +le ases +ist le +Id ent +ĉ event +ject ed +Ch unk +V ars +.pro vider +Ġproceed ings +Ġin clusive +Ġart work +end ants +ï¼ļ Ċ +se en +Ġl ig +Ġm akers +_f un +Ġlength s +Path Variable +[ item +ภµ +De ad +FFFF FF +ĠUr ban +up les +ich en +(null ptr +.s pec +, System +UR ATION +(j ob +å¼ ı +Ġtrack er +Å Ļ +ĠM R +ĠSQL ite +Ġd to +Ġ; ;Ċ +Ġm int +ĠInt roduction +ca o +Ġquestion ed +Ġf itted +rev ision +s q +Ġm ig +_un its +_ async +Ġf lick +});ĊĊ Ċ +Ġnot re +}` , +F ilters +Ġm undo +_d ays +Ġfr m +ut c +Ġval s +ew idth +ĠGener ator +ĠArt ist +ĠID s +ĠArt icles +re ater +ĠComponent Fixture +. = +Ġr ou +- no +.b ukkit +eg g +ĠD iff +atic s +Ñĥ Ñĩ +âĢĶ ĊĊ +ĠChar lotte +by e +Ġ} );čĊčĊ +ĠV ik +ĠB row +Ġl v +ĠG ib +-w ing +GL IGENCE +(I l +ĠEngine er +.W ait +ĠP ictures +Ġr het +Ġth ermal +Ġpr aise +< >();ĊĊ +ĠSp ider +P ause +ĠB aker +Ġsl ower +Ġ} ]Ċ +_en queue +Ġdisappe ared +ĠT icket +IN UX +_LOC AL +аÑģ Ñģ +@Inject able +comm unity +Gesture Recognizer +åĽ ½ +Ġsca les +Ġ- ( +/ '+ +ĠS it +Ġexecut ives +ard ing +Ġad vers +Ġback wards +ĉ context +ĠH amp +ĠP F +ĠDe ck +ĠCra ig +A merican +Ġb ell +Ġpro l +uf en +Ġr ng +ar shal +ĠSim ply +first name +sh ore +J uly +Ġmort ality +ĠâĨĴ ĊĊ +Help ers +Ġbench mark +em ade +Ġorganis ations +.g son +ĠText Field +Ġciv ilians +.Array s +ĠMiss issippi +Ġinter mediate +get User +_cl uster +Rel ative +fore ign +.querySelector All +Fore ignKey +Ġreason ably +-------- -Ċ +C ards +ĠK am +ĠTh or +Ġroll er +-e lement +ĠC urrency +dd ie +ALL Y +ĠR A +Ġper met +aa aa +Ġhom ework +ĠV it +Ġm old +ĠF er +[ start +Ġstatist ical +Ġsc ary +_H OME +.B egin +Con struct +ogen ic +ĠDEAL INGS +Ġtamb ién +ix on +. ind +ac re +Ġtransform s +ĠN ap +.B lock +uss ia +pir ation +ul ent +Ġce il +Cl ause +na ire +T ES +Ġne at +ST D +ĠReg Exp +per form +: ) +Ġun ions +Ġs ublic +Ġw inds +lo ating +g lich +Ġp agination +S kill +App ly +ĠOper ator +ist ogram +Ġqual ities +C ross +Ġde com +], " +ĠJ uan +.mod al +.Ch ild +ĠRog er +STIT UTE +:CGRect Make +a lette +Ġst a +as ide +Ġbl ur +ĠW a +if etime +re ed +control s +Ġb ins +Ġп ол +*/ ,Ċ +U IS +ĠR ou +ĠDem o +- awesome +ĠCh ain +Ġh asta +ĠB art +. KEY +Ġvend ors +nof ollow +ĠD est +_b uilder +Ġarg ues +_ answer +g oto +ĠRES ULT +ĠM ON +Ġp oder +o ons +_C ASE +Ġrep lic +Ġfin ancing +ĠD ATE +c ern +_tr ack +t ies +/ logo +ĠNE GLIGENCE +get Type +> T +b et +g irl +ĠINCIDENT AL +-s ite +.tr igger +ĠL isa +_input s +Ġrel atives +Logged In +Config ure +I K +. accept +Res ume +ĠD raft +Ġ* >( +ĠW A +ed ian +ern ess +ĠLayout Inflater +*/ čĊčĊ +oth y +Ġoblig ation +Sub scribe +Ġth umbnail +ex ist +Ġins isted +ĠU ICollectionView +ĠAng ular +Ġtable ts +ĠImp act +ãĢį ĊĊ +ah o +Ġcharacter istic +g d +Ġ= ================================================ +our t +` . +App ro +Co ordinate +Rem ember +Ġmar ine +] ==' +ĠAdmin istrator +.get Default +Ġforg ot +ĠStruct ure +V ue +ars ing +m oment +k w +_c ursor +Att ack +Ġath letic +Ġdiagn osed +Ġend e +åĪ łéϤ +H ouse +ĠP ARAM +Ġw iki +ĠO pp +Ġcons ervation +Ġs nd +_t em +sub str +ĠC ape +.s im +UT ION +an an +âĢĻ un +Ġg y +- work +Ġcomp elling +=' # +ĉs ub +Ġdirect ories +íĬ ¸ +Ġtouch es +out ines +.C ollection +s chedule +.l at +ĠDo ctrine +CA A +ĠRe fer +Ġshift s +Ġlik elihood +pre ter +ĠF emale +Ġinter cept +Ġl ou +çĻ » +Ġr ug +ĠC rown +Ġ************************************************************************ **** +- product +Ġprompt ed +ung le +d ocker +ĠT u +ĠUn ique +_ Error +ul os +Ġâ Ħ +Ġ( ` +Get ting +_s cal +ĠEn h +ü t +Ġsust ained +Ġp atches +Ġpros per +ĠG aza +_l ight +Ġin cons +-------- Ċ +ĉĉ ĠĠĠĠĠĠ +S F +C N +: ";Ċ +ĠColl ins +( *) +Ġcomp ilation +'] čĊ +Ġcon sequence +, ... +Ġd m +ĠB LOCK +Cl uster +Ġsk i +(arg c +T uple +Ġjo ins +ĠSher iff +W ar +ind i +Ġcomment ed +H OST +Ġinv itation +apan ese +Ġperm its +preced ented +_z one +ĠA my +_R D +Min imum +Ġinv ocation +.en able +icht en +- owned +" id +_PO INTER +F ac +Ġspecific ations +Ġnom ination +Ġg p +< ( +Ġrob ots +ĠJ erry +Ġhold ers +Ġw and +c ms +Ġ} ))Ċ +.To ast +ĠI List +B ased +z oom +/ style +ĠBe ck +M en +Ġcontrib uting +Ġund o +ĠO H +Ġadd Object +Ġe igen +sign up +éĶ Ļ +Ġdist ant +PAR ATOR +ĠM ari +Ġm á +E mp +ó s +Ġì Īĺ +ev t ++ j +p ark +ĠSt ay +ĠD un +Ġso y +> % +az ines +Ġti empo +(m e +p resent +.Th is +Ġedit ors +F IELD +.W ork +ĠUn iverse +Ġdr unk +.t imer +Ġalter ed +ĠN ar +ëł ¥ +.Act ive +id or +ç Ń +.delta Time +Ġawk ward +& quot +ĠSaf ari +Ġtr icks +MENT S +div ision +Ġvary ing +ĠHigh way +Ġphotograph er +ĠSt ewart +Ġlast ing +.P re +.amazon aws +ĠL uck +.D escription +ĠN az +n eg +Ġc ó +<<" \ +ĠSur v +ĠU nc +Rec ipe +.Border Style +Ġmod ifications +- at +AT FORM +h dr +ak o +Ġsublic ense +ĠJ ump +Ġbe im +ĠMan hattan +. bool +_h w +ÑĤ ÑĮ +B in +Ġg ateway +" ": +ĠU IS +:" + +- def +ĠReg ular +/ testing +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +string stream +Ġdis par +Ġmob il +- read +ĠAd apter +ĠCh ampions +Ġsched uler +Ġk ills +ĠM ultiple +ir ror +Ġgod s +AD O +ak te +ĠUs uario +.c ircular +Ġre cept +ĠEx pr +Ġelder ly +Ġnic ely +Ġbest e +W ant +Ġclass ical +.s prite +obj c +ĠM ason +Ġsist ema +.Bl ack +es o +ĠZe it +Ġdiv id +Ġent ers +_sub ject +ĠPlan et +.w arning +ĠG ram +_t okens +Ġhousehold s +_c ustomer +user Name +c ross +Ġp ione +Ġass ists +_S M +ib o +Ġlo yal +Ġuse less +# elif +ĠUlt imate +C ome +g el +Ġd ich +xy z +ik el +ob ra +_s can +ĠInter ior +ĠN ice +Ġpl ac +ĉt arget +Ġvir al +ass o +() / +und e +ĠAd obe +O s +vis ited +ĠO W +ĠFe ed +ĠSe quence +Ġman ages +in son +ĠLouis iana +{ }) +ĠH ab +ĠL D +Ġb ip +pr ites +(e lem +.h ibernate +él é +Ġoh ne +_trans action +Ġann unci +P ublished +ĠH onda +ĠT am +ĠP acket +_ selector +Ġchalleng ed +Process ing +-h over +Ġtr ainer +_c ancel +ĠNS Dictionary +ab ric +ĠM LS +_s ensor +Ġshr ink +ĠF X +th reshold +ĉH X +-m ark +` .` +S cheme +(f ull +_w riter +ĠS ys +Ġf led +ĠC in +-w idget +ĠPre vious +G ender +_ question +Fe ed +Ġscr ut +(p refix +ãĢĤ ãĢĤ +Ġin fections +Part s +Ġhier archy +_DE LETE +ĠPat ient +_p ay +Ġprom oted +Ġì ĭ +Ġcivil ian +Ġagricult ure +ĠP iece +Ġst ance +uts che +Ass ign +.A CTION +F ig +_r adius +ĠS ync +du cer +f ailure +ens ed +pt ime +B M +_dat etime +qu ivo +QUE UE +èĢ ħ +Ap pear +Ġsum mit +: void +Ġv ine +è® ¤ +on ne +_TR ANS +.g reen +_ cc +Ġhung ry +Ġ" > +() );čĊčĊ +Ex tract +iz ens +Ġsol ver +Not ify +Ġeng lish +ĠSh opping +inter faces +RE Q +Ġil leg +ĠUI ImageView +Ġdis connect +ĠUnt il +ĠConserv ative +@ Column +Ġshift ed +Ġ: čĊ +Ġf ich +Ġd la +Ġsh oe +"), čĊ +ular ity +_RE SP +We ather +UI Application +. iterator +Ġag ing +.P arent +ow ie +(e qual +ĠCon v +/ default +Ġmeas uring +.pre v +.Is Valid +.F at +Ġs Äĥ +key words +with out +Ġso vere +Ġex changes +Ġm elt +Ġis lands +ĠInt egr +Ġjump ing +Ġg le +Ġjournal ism +Ġd ated +Local ized +ĠRef resh +Part icle +Ġa a +ĠSTR ICT +Ġb od +.Pro cess +_A UTO +ĠP ublished +e very +Ġtechn ological +ls x +Ġir rit +Add itional +Ġdel imiter +_l anguage +- area +bo ys +ĠT ube +Ġw at +Ġmechan ics +_ owner +Sp ell +ĠSt ories +.Append Line +Table View +h em +st ick +oll ower +I FF +ĠU V +oll ision +S UB +Ġcompar able +Ġdon de +s ales +ll vm +Ġ} ],Ċ +OTT OM +ĠPur pose +L ab +Ġinterview ed +o is +as il +.set Id +ĠIn struction +-- > +ĠMod ified +ation ally +ĠMe eting +è¯ ¯ +# region +Ġrout ing +.f ocus +ĠYou th +< D +ĠN ag +contact s +Ġform ing +Ġm ie +',[' ../ +ĠB P +Ġapp et +ĠTe acher +ĠT P +Ġann ually +outed EventArgs +ĠSpe aker +Ġre name +CF G +(" // +æİ ¥ +/p ages +Ġpr és +ĠSp ell +.All ow +ĠINT ERRU +Ġ( # +âĢĻ ĊĊ +_G eneric +.im show +_t im +- face +(& ( +atin um +Ġrevolution ary +ĠH ours +r ain +Ġany time +Ġab b +.j sp +Scroll View +ĠTr uth +Ġanticip ated +Ġacc ent +. checked +Ġspec ifies +Ġca f +Ġcell padding +Ġcook ed +ĠH ugh +pe ek +_R ATE +Ġd orm +/ čĊ +IV ITY +.Cont roller +(p art +.con straint +Ġinv asion +MO VE +Ġgl uc +l ename +Ġam en +eng lish +ĠSw itzerland +";ĊĊ Ċ +pe st +.col lect +N ib +ĠD ict +ĠE mb +(sub ject +Ġoutr age +Ġdec iding +Ġsent enced +F echa +" A +Ġqu er +Ġfont Family +Ġqu adr +- Y +_C ACHE +Ġanaly zed +Ġg aining +ĠAgain st +ĠSou l +ta u +Ġlight weight +ĠT F +ĠEffect s +.T ypes +.add Class +Ġv egan +é ģ +.' " +ĠExpl orer +.d etect +.sh ift +Ġoblig ations +last Name +Ġassoci ations +ĠTime Span +un ter +ĠF resh +Compat ible +P ub +id ges +. option +var i +.hash Code +Ġg eb +. section +- not +ĠSub mit +T N +reg istry +_m edia +Ġn aj +ff t +Ġm ate +-th ird +Ġp ockets +est a +Ġb ent +ĠN ord +Ġretail ers +ĠMor ris +."" "ĊĊ +W rong +Ġ ÅĽ +R ay +. ec +ĠB ind +_H AND +(n on +is Valid +Ġsimilar ly +_L IMIT +Ġdynam ics +Ġdist inction +ãģ Ĩ +< N +Ġor th +ĠToy ota +ĠK ate +ĠL S +or ie +ĠSpr ings +Ġf reak +last name +_M ULT +-st ep +" ( +AD DR +Ġentert aining +_CON F +Ġdec oded +Ġst reak +Ġwait ed +Ġnot ified +rodu ced +vis ual +.Layout Params +æ ° +es ian +f its +s pring +ĠBern ie +User Defaults +Ġped est +Ap pearance +ĠW iki +ĠNOT ICE +Ġs sh +Ġdur ante +ĠZ ip +ı r +ĠNAT O +Ġtw elve +Ġro yal +ï ¸ +Ġmer chant +ĠF urniture +'] ),Ċ +, X +Ġfold ers +ĠG ate +ĉf unc +p ick +_us uario +ĠV erm +ment ion +ur pose +Ġalert s +x ious +_s ig +ĠF u +Ġ( : +Ġd umb +åħ ³ +Ġaccur ately +éĩ į +R B +-s creen +ĠV ER +j our +Ġrom ance +uc ceed +. choice +Ġad ip +_d ims +Serial izable +ãĤ ĭ +.j ob +Ġpro g +uch ar +Ġg ently +ĠR SS +ict ured +_ENABLE D +ĉ label +aw ks +ĠEn sure +rem ember +ìł ķ +Ġtrans mit +{{ $ +.Trans action +ur se +_rel ative +Ġs ized +ĠX X +ĠPr incess +ĠL arry +Ġpr ó +ĠÑģÑĤ ÑĢ +Ġs isters +estr uct +Ġcheck point +: length +ĠCar los +/ icon +_T ARGET +T okens +Ġpat ience +ĠSe lected +q ty +.show Message +Ġwild life +ĠP rops +b m +- arrow +Ġpar cel +fire base +ĠBen jamin +cess o +.t im +ĠG arc +. any +ĠHOW EVER +ĠK o +Ġgrab bed +_f rames +Ġobject AtIndex +ĠADV ISED +Ġsub ur +ĉ GL +Ġ}) }Ċ +-l ength +ìĭ ľ +ĠPot ter +_b uff +.g ui +ĠEnc oding +E lect +-m essage +Ġ � +Ġ ÈĻi +ĠArgument NullException +а ÑĨи +Ġmin imize +Ġrespond ing +$_ [' +ĠInd ividual +á c +ĠIN TER +Ġmast urb +ĠB in +(' $ +ëĵ ľ +Ġopen ly +Ġ> < +Ġun to +olog ically +ĠM ul +VID IA +Ġsl im +ĠCommission er +( on +Ġunder neath +/ db +v ote +( Message +ĠP ope +Def ined +Ġsw ift +ur f +Ġadapt ed +SE L +Ġreven ues +Ġdiv ine += y +Grad ient +_ act +Ġ/*! < +Ġpoly gon +ĠF DA +ĠC arr +at ables +(std out +Ġrefr iger +Ġco ordin +avor ites +ÑĪ Ð¸ +Ġcompass ion +ĠPOSS IBILITY +- secondary +ur acy +Ġcomp romise +_A V +_ os +Ġbes ide +ĥ Ŀ +Ġl n +.pl ugins +Cap acity +al ah +.b in +ĠC RC +_b alance +Ġflex Direction +Ġam bit +Ġnick name +ĠFor ces +C LE +ĠSh ell +Ġs ail +ĠW riter +ĠA lice +d w +ĠInd ians +ĠMar shall +_S RC +Ġnormal ized +ĠJ ag +ãĤ Ĵ +ze it +r pc +ÃŃ c +.in line +Ġtrav ers +_n umeric +Ġutil ities +Ġev ac +IN PUT +ĉ register +M X +ĠCamp bell +Ġdatas ets +Ġdem anded +Ġinitial State +g an +Ġe i +Un expected +- web +tr ait +, Y +ĠT odd +Ġske leton +Ġoptim ize +ç¬ ¬ +ĠU pon +ĠSt Object +Ġap lic +.' P +v ron +. UN +Ġpaint er +izar re +Ġl av +Ġp om +p reg += function +( serial +ific a +um ing +åľ ° +ãģ Ĥ +- op +U CH +ĠH end +.prop Types +Ġy o +Ġrout ines +Ġcar ing +S em +Ġres erves +Ġprior ities +red its +IST R +Content Type +ĠSch w +/ media +Ġe str +Ġclim bing +- week +cher che +s ensor +To Array +ĠMont real +Ġcloud s +ĠInject able +ĠR ice +Ġpropag anda +_pro vider +Ġind oor +Ġin aug +Ġdipl om +Ġmess aging +_m ut +å ¦Ĥ +Ġk w +ON S +ari ans +R PC +) ]čĊ +-r ay +ĠS or +m all +Ġmarket place +Ġv tk +M a +og an +ig i +Ġspons ored +ĠD ani +.S EVER +>' .$ +m ultipart +ĠW ol +Ġtable Name +ĠUser name +Background Color +Ġf right +_E MAIL +Sept ember +_val s +op ia +Ġsp otted +- Ch +Ġdata Source +/ "Ċ +ек ÑĤ +ĠRequest Method +ĠRe place +-d o +ah n +ĠPh D +] .ĊĊ +N ON +g ement +ĠTh r +Ġquiet ly +Ġtort ure +Ġte as +ĠC Y +Ġa tr +develop ment +-d etail +Ġlight er +Ġarg uing +Ġdes erves +Ġcur riculum +_CON TEXT +ÅĤ y +H ITE +ĉ ID +/ uploads +Ġt its +re o +_d rop +. UTF +Ġpick up +Ġgro cery +ĠP ure +Ġeas iest +Ph il +.f eature +(" * +Ġinvest or +t ok +Ġj ar +L os +âĢĶâĢĶâĢĶâĢĶ âĢĶâĢĶâĢĶâĢĶ +. queue +-s peed +M al +um blr +ĠCON ST +ĠH RESULT +ĠD ance +(file Path +Ġattrib uted +ॠį +ĠB und +co ins +Ġs ão +Ġp ir +person al +Ġpre lim +Ġprop ose +ĠT L +] ]) +ĠSub scription +ĠK re +, len +.First OrDefault +) -- +_product s +.Get Bytes +Sh ip +Ġenc rypt +ĠS G +ĠM yst +h ir +Ġiter ate +Ġint end +.mock ito +Ġch apters +( angle +ĠV lad +è® ¾ +' .ĊĊ +Response Body +ĠAb d +de al +Ġbar riers +-out line +b ill +ĠF alls +_se cond +. include +. ceil +Ġoccup ation +ph ony +.move To +ĠJenn ifer +AST ER +; ">< +ĠEn abled +Ġtermin ate +ĠI o +l ations +ĠTHE ORY +Ġear liest +Ġr ack +ĠSc ar +sh ake +ch ip +Ġu v +Ġall iance +п иÑģ +ĠGOOD S +z ione +ĠV I +Ġ{ - +Ġfilter ing +Ġmis con +.Dock Style +Ġb ush +Ġj unk +æ Į +ĠQ UE +Ġhook s +Ġfirm ware +Ġmiddle ware +d ic +ĠOak land +Ġarr ives +P ayload +p ixel +] | +Ġstart Date +.P RO +_a udio +Ġmid field +igid body +ĠSw iss +ĠCl ip +ĠD ump +ĠText Box +Ġg eh +y ield +od s +Ġrefer endum +Back end +ĠC ream +Ġdomin ated +ĠArch ive +Ġrid ers +.prepare Statement +Ġqu ando +Ġche f +w iki +in el +am pling +(" \\ +Ġs ag +_pro xy +ãģ ķ +p do +.getElementsBy TagName +Ġdemonstr ation +ĠN PC +Ġarch ivo +end ance +Ġefficient ly +( actual +.t ableView +Ġm ush +Ġbe ars +_thread s +j as +ah un +Ġne ural +Ġdesign ing +ĠG DP +Ġlift ed +çĽ ® +ĠJ oint +ĠIn clude +ĠGi ants +Ġwithdraw al +ĠR ent +n ative +ĠSe ek +gress ion +_C PU +\ S +ĠSh ield +Ġsol ic +Ġbo om +yect o +Ġmanufact ure +ĠâĢ ĭ +Ġb box +Ġearth qu +ollect ors +:@" % +Ġlo ops +J e +alk ing +ĠWh ats +ĠBo ys +. book +ARG E +_p ixel +Ġsus pects +Î ¹ +us p +ĠBM W +ie ces +(p erson +å¼ Ģ +é » +ĠPod cast +Ġb ou +( Item +à » +( Input +Http Get +Ġb urg +) ^ +BO ARD +*/ , +Ġg ulp +ĠB enn +Ġdeck s +.status Code +Ġac ute +Ġh ug +ug u +Ġp led +," % +h ape +Ġз ап +ĠMain e +.re al +Ġd alam +ĠMin or +.F loat +dis p +Ġt l +Ġen count +=> $ +Ġf g +te es +ĠRec omm +ä l +Ġchem istry +Block s +O ID +Ġfore x +ĠApp end +Ġ{ * +ĠSup ply +CG Float +(b l +Ġat e +ador a +Ġg ust +Ass oci +> .Ċ +F ETCH +.s erial +widget s +ard less +ie fs +_F ULL +ernet es +ĠP red +Ø Ń +äº ĭ +ub ernetes +ĠL aura +Ġl abeled +High light +Ġanno ying +/ update +(d escription +Ġintim id +$ c +")) )Ċ +.A P +Ġ[] * +ĠEX IT +.H ost +ĠOP EN +.send Message +_c amera +_t ile +Ġth erm +onom ous +Ġdis adv +Ġna ar +index Of +ĠP P +.prot ocol +AF E +Ġtext ures +################################ ################ +umb ai +.st ats +ĠG E +Ġi e +ĠST D +ĠM ann +.ref lect +K B +Ġd ive +.w av +/* ---------------------------------------------------------------- +/ settings +.l ifecycle +Ġda ughters +or us +ub er +N ING +st ri +ĠT ip +Ġz n +Ġswitch ed +in et +uff y +ĠTransport ation +( conf +fr ica +ĠX L +ĠLe ad +_per cent +< Map +Ġthr ust +or b +ik k +Ġtra uma +Access or +ĠF it +ĠString Buffer +ex pl +(s creen +Ġaud iences +ĠO PTION +_ round +[ node +be h +-> __ +per missions +ĠD etermine +.M an +Ġadv ances +. InputStream +Ġstrong est +Ġe Bay +Ġ# - +Ġdir name +ĠS MS +Ġmedic ations +Ġam ended +Ġchurch es +ĠImper ial +$ row +ĠMad ison +ĠIn sp +Ġaff air +Ġpsych ology +v h +Ġsever ity +âĢ IJ +Ġstri ps +A H +vert ising +Ġcon se +IM AGE +ĠSt ats +ĉs c +.C ursor +Ġfree ze +ss on +(x ml +ĠSus an +.t ile +ed ed +ĠĠĠĠ ĉĉĉ +uel le +ĠMitch ell +b ased +Oper and +½ æķ° +ĠF F +ĉstr cpy +ounc es +ild o +.execute Query +Ġapproach ing +ĠSe ven +Ġn uts +Ġr ic +ass ignment +Ġcalcul ator +ĠMur phy +ĠB ou +í Ħ +Ġbut t +Ġt icks +Project s +il ib +.text Color +m ov +_log o +( template +ĠIN IT +Ġimage View +scri ptions +OR ITY +Con sumer +Ġun precedented +Ġtour ist +Ġbr on +Ġcontract or +Ġlic ence +ĠN am +æ ¯ +( transform +_AT T +P ref +ĠG am +Ġvess els +Ġh av +L ater +.To Lower +Ġurl s +Ġbreak down +Ġpen alties +Ġf oster +ĠU E +Ġcl ue +com ed +åIJį ç§° +-m ain +Ġp ts +Ġcount ed +ict s +/ post +Ġget attr +Ġp ing +ANCE L +Ġp ec +Ñħ од +ant om +ĠBlue print +ĠEvent Emitter +Ġl ä +æ ² +Ġstr aw +( comp +' une +> N +- client +es Module +-b ase +Ġret reat +_s imple +ĉĉĉĉĉĉ Ġ +fe e +') čĊčĊ +Control Item +Ġsubscri bers +ple ase +ĠE ff +Ġp ound +ĠBy tes +ĠTe a +_ activity +Ġmax im +Ġop code +B SD +. constant +; } +omb res +Ġcare ers +) .ĊĊĊĊ +Ġsp reading +-exp anded +ĠOr d +amar in +Ġmob ility +Un fortunately +ak k +N L +_ redirect +ĠP G +ĠS ensor +b ol +t ap +_MEM ORY +ĠUI Alert +plit ude +We bsite +ĠLog o +lo ve +[ ind +Ġalto gether +Ġwonder ed +Ġes per +ĠLib eral +Ġo ss +Ġel it +Ġst iff +od ox +_ment ions +ĠDou glas +_p id +ĠC K +ĠinitWith Frame +.b log +p kg +ang hai +QUI RED +u u +Ġm kdir +AT AL +Ġun h +in ces +st h +Ġhypo thesis +Ġc ata +ĠT B +ĠCl ar +Ġpre decess +Ġsitu ated +-w orld +)) / +Ġhead lines +.st at +Ġout break +sp ath +_FLAG S +ĠServlet Exception +S un +F ROM +ĠD ir +ãĥ»ãĥ» ãĥ» +_co ord +ĠOpt im +Mon itor +.b it +XX X +Ġtod as +f eld +ÑĢ Ð¸ +im ir +Ġpolit ically +Ġmolec ular +Ġtrad ed +Ġ{{ $ +ĠSw edish +Ġ'@ / +_RE AL +Ġw arehouse +t oday +, L +or p +< section +- br +ym e +ĠUser Service +Ġlib erty +Ġmoment o +( Image +< size +S ch +Ġj og +i ology +arent ly +Ġquant um +ĠAb u +Ġr im +Ġman a +Font Size +Build ing +st airs +AIL ABLE +Ġ& ' +Ġs ect +Ġs igh +(b atch +.I Container +p oll +ĠCor ps +Î µ +ar u +ĠK ay +.r ange +_click ed +ĠRobert s +.N etwork +fin ish +- Man +Ġcolleg es +ĠF ine +")) ,Ċ +f ilm +Ġrem inded +Ġgest ure +out il +Ġthread ing +Ġobj et +Ġt ours +activ ated +.m kdir += user +Ġre de +f ü +_SY STEM +p v +Ġcon gr +Ġmass asje +Ġpract ition +Un iversity +Ġtab index +Ð ĺ +S ets +Ġcount ies +g uest +f an +Ġword en +.d i +на Ñĩ + ¿ +ig Decimal +Ġsh ore +Ġg ö +Ġrep airs +Ġhelp ers +Ġcenter ed +OL LOW +Ġmap StateToProps +Ġc ents +< A +Ġexpect ation +Oct ober +Ġbg color +ca les +.C ON +ĠV el +Ġcry ing +-se ason +Ġfunction ing +_LOC ATION +ü ss +ber y +Par a +omin ator +- le +Ġeth ical +has htags +emp lo +Ġn úmero +( activity +.St op +.str ftime +IL D +Ġto e +ĉ Node +") čĊčĊ +ĠPu erto +Ġexec uting +ĠG UID +Ġoppos ing +al ph +Ġexhib it +_fl ash +Ġme ille +Ġjson Object +H ero +aint ed +_D OM +Ġw il +Ġslo pe +Ġm Ã¥ +ĠIraq i +Ġorgan ize +ĉj Query +H UD +sh ine +. we +ĠSk ills +pons or +Ġcon clusions +Ġre forms +Ġrel uct +n amed +ĠOl iver +Ġ// }Ċ +- looking +Ġf og +ĠH O +ĠF ried +Ġinev itable +ĠData GridView +H our +il les +log ical +Ġconnect ivity +.tw ig +ĠK yle +(d st +- Sh +ĠStud ios +( Level +.j et +_PRO TO +-de coration +OT HER +Ġread ily +.Param eter +Ġmultip ly +ĠL IB +ar med +Ġsoon er +æ Ħ +_ ES +Ġfoss il +ĠA nc +âĢľ This +l odash +Py thon +Ġhist ogram +west ern +Ġinf ant +Ġco ordinator +Ġn ib +: m +Ġres pected +Ġdef init +& T +_p ad +ĠTr igger +th al +Ġimage Named +Ġbeat en +ĉ rc +ĠPal ace +Ġhaz ard +Ġisol ation +_ rc +cont re +OUT PUT +Ġre ign +ĠPl ate +AT ES +Ġfl ux +Ġpack s +.get Selected +Ġparticip ated +Ġneed le +-de pth +:::: :: +-l aw +ins pace +on itor += no +ĠAt omic +ĠBr ain +Edit able +-s c +red ential +ĠP erry +k ie +Ġ ----------Ċ +.st roke +( Intent +Ġun ity +um lah +F urther +Ġpr ze +Ġs ø +ãĤ Ĭ +ĠPROC UREMENT +ĠH ousing +Ġatt orneys +Ġcomp ose +atter ing +" What +dra ul +Ġstraight forward +In stant +.J TextField +Ġtr ades +л а +Ġ{ ! +Ġl ately +IM G +ĠA ld +ĠIN NER +Ġcart oon +.S ource +F ALSE +Ġd ough +f en +( rect +Data Table +N ick +ĠBut ter +read s +_com ments +EN V +ĠConnect icut +-F IRST +ĉĉĉ ĠĠĠĠĠ +ach i +.M sg +re ction +Ġrelax ed +Ġsha ft +Ġe f +ĠAdd ing +Ġbre ach +Ġ ï¼ļ +ram a +Ġconduct ing +Ġ( ; +(g l +ĠCA USED +ash i +ĠF LAG +ĠCom merce +ĠIN TEGER +h ours +ĠSchool s +Ġn ucle +Ag ain +pro j +Ġsevent h +EMPL ARY +(m ock +'] ,čĊ +_S PEED +> false +Ġsp a +ĠN ear +ì ķ +Ġintr ig +_m embers +w ave +Ġanalyst s +_O S +ed in +ĠF ri +Ġretrie ved +Reg ular +_ obs +EX PORT +')}} " +" class +__ (( +b ucket +Ġst ro +ĠP atch +yst ick +ful ness +ap os +D a +ĉĉĉĉĉ ĠĠĠ +Ġen rich +un ordered +h ole +C ong +< Product +ĠC urt +( the +_l ower +Ġavoid ing +Ġbu zz +Ġv iable +ub a +- is +are l +Ġact ed +-d etails +ภĩ +ĠThe ory +ĠP un +ĠAn onymous +... "Ċ +è res +åı ¯ +ĠV ision +_se m +ash a +Ġcelebr ity +Ġend Date +Ġpop ulate +Ġcu is +qu ant +f loor +Ġglob ally +Ġcru ise +ĠStan ley +Ġb ikes +.get Connection +Ġpoor ly +_ other +amp ing +." );ĊĊ +od i +_A DMIN +.color s +ĠG aming +> ';ĊĊ +STR UCT +Q R +ID s +(arg uments +_a ux +( Event +_PR IVATE +ĠTre k +Ġdownload s +m utable +_STR UCT +(w x +Ġdom ains +js px +ĠVi agra +Command s +J s +.c fg +Content Pane +ĠEdit Text +à¥į ठ+Att ach +ĠAR M +posit ive +ĠGener ated +Ġse ized += : +Ġelectron ics +ĠApp Component +/ ',Ċ +.equals IgnoreCase +Do ctrine +d isk +ĠPolit ical +CH O +< F +ĉ height +ĠB ug +. le +ik h +Ġmill iseconds +Ġconstit u +m ag +.n l +-r ange +ang gal +', [ +ropol itan +Ġà ľ +ĠU C +.d esc +-L AST +f stream +ib il +Ġf ier +VER Y +Ġë ³ +IR T +_ UI +( abs +Ġkne es +Ġro okie +ĠV ac +are na +comm end +- \ +ĠSUB STITUTE +So ft +Ġpart ir +we alth +è¦ ģ +(d ataset +ĠCl imate +- show +Ġreli ability +_ch unk +ä» £ +_st ock +ĠEX EMPLARY +ï¸ ı +Ġv ÃŃ +Ġsm iled +Ġdr ill +.F unction +ĠS I +Ġreg ression +- X +ĠJ ar +p ref +ĉs uccess +ĠHit ler +Ġinst inct +Ġfem mes +Ġlo ver +< Ċ +Ġmulti plier +r il +Res ize +ĠAuthor ization +ĠK an +Dispatch ToProps +Ġc rops +t okens +ec n +ential ly +ĠINTERRU PTION +f ake +Und efined +ĠA K +ĠTest Case +Ġr ab +Ġtor rent +ĠO t +B ars +Ġlect ure +Ġen jo +Ġrespond s +Ġindex ed +Of Work +_ch ain +)) -> +ĠBeaut y +Ġ` < +Ġtouch ing +Ġ| -- +ĉf lag +normal ize +Ġtr apped +Ġestablish ing +/b uild +A J +f y +- react +av n +RI PTION +Ġk ut +ĠF ashion +ĠIn form +cur ities +< byte +ĠUkr ain +Ġs ug +Ġconsist ing +ood le +. ctx +.To List +Ġcomment ary +Ġtransf ers +Ġn ost +ih ad +ĠU pper +Ġconf using +miss ing +- cl +Ġbound ing +Ġcongress ional +Ġreve aling +d h +r up +Ġt res +re peat +, ĊĊĊĊ +_t ac +Ġexp ed +G irl +h orizontal +Ġ"../../ ../ +( option +Ġwe iter +ĉs ql +Ġ=> {Ċ +Ġgar lic +Ġre pr +Ġrepl ies +( prop +Ġspir its +Ġins pire +Ġbas ement +.re ject +Ġhint s +Ġpoll ing +ĉ ĠĊ +_r ating +Ġc ath +av ier +Ġcomp ressed +ĠV S +] ' +Ġjud icial +ĠT rend +tr aining +EST AMP +ogn ition +Ä ģ +SE NT +vent ions +Ġconsult ant +um ph +Ġuser Service +, NULL +k h +D ear +_B AD +it ations +Ġmet aph +' é +and ise +-f ont +.ch art +Ġs g +_ Controller +.j peg +ĠUL ONG +ĉg ame +( ss +ĠM aj +ĉg o +ĠS ad +ĠB erg +ĠM ine +P ack +Ġres istant +ĠR OM +Ġp eg +ĠStan ford +ĠY ahoo +Ġsca led +Ġl an += [] +"/ > ččĊ +Ġs ud +ĉ background +Ġsch olars +-m uted +ar á +Ġ= ==== +Ġ__ __ +C reat +ene ver +/w p +ĠV PN +Error Code +) ],Ċ +(b uilder +ĠEn emy +S ensor +us a +Ġtr iggers +Ġplayoff s +_RE Q +Ġ( ~ +ĠBar ry +Ġperman ently +ĠR UN +Ġb ure +.Fat alf +Ġch ick +ĉ panic +ps i +ok a +éĢ ī +> [ +Ġunderstand s +ĠJun ior +ĠIN FO += mysqli +ust ain +-s ource +s erv +ĠC REATE +. au +Ġsell s +ĠĠĊ ĠĠĊ +E urope +z w +pre h +ĠNS A +Ġx y +ภ´ +ĠB eyond +Inst ead +Non Query +Ġar ise +Ġavoid ed +.em place +_model s +} ),Ċ +Ġh id +Ġ& _ +.p oints +.get Width +.Ex ec +Ġ// // +ĠS essions +... \ +ĠCol omb +Ġacceler ation +rest ore +Ġ ile +ob ic +< Node +ĠD X +ĠBes ides +. age +ĠCont ains +N ational +ĠIm plementation +Ġeff ic +ĠR M +H y +ĠWed ding +ok ies +Ġrec ursive +Ġprosec utors +.Se lection +ĠForm ula +Been Called +[i i +ĠFr an +Ġtraged y +_F EATURE +Ļ ¨ +comp ass +ĠB h +? ĊĊĊ +.w riter +ĠH our +Db Context +io v +am on +re pr +é ĥ +ĉf i +'] ] +ĠD ry +. ro +ĠO bserv +æł ĩ +Form er +ĠB alance +ĉ json +Ġpr zy +I SS +( sock +ĠL INE +Ġde ce +Ġal ly +Ġtend ency +F un +Ġschem es +Ġinter ven +æĺ İ +Ġad verse +quote lev +Ġsacr ific +_s ide +Ġmut ex +AG IC +Ġocc urring +ĠCommunic ation +um ar +ç¼ ĸ +ĠTreat ment +.p erson +ĠL C +Ġe ch +( (" +ĠDise ase +ä d +ĠA Z +.A ccount +Ġcontinu ously +END ING +ĠRET URN +- string +.f ilename +syn thesize +Res ponder +( opts +reg s +Ġn uest +Pe er +// ------------------------------------------------ +Ġg auge +ĠK in +.s chema +Ġarr ange +ĠBl ake +_Type Info +C over +ĠHamp shire +P aper +-in ner +util ity +Ġcross origin +F OR +Ġign oring +ĠD D +av an +Ġtrad itions +Ġget String +Ġeth ics +ĠMaterial s +DE SC +Ġen zym +io let +ĠCh ip +ĠMc Donald +Ġn erve +ç Ħ +") ] +æ± Ĥ +ĠS ugar +_S IM +j peg +Ġdiscret ion +ĠT N +bo ve +ĠMin imum +ĠForm Group +Ġwork force +ĠExec ution +err er +ĉ ĠĠĠĠĉ +Ġpres cribed +.Text Align +OP EN +ĠP B +im ity +ĠEx ternal +° C +ĠApplication Controller +Ġb arr +imp licit +_d ot +ĠCol on +C OLOR +.Pro ject +* }Ċ +pl aint +get Text +Ġindivid ually +Ġcheck box +U Y +ĠL amb +Ġdys function +ĠL ar +à ° +ĠCre ating +');ĊĊ Ċ +" They +loc ations +_C ORE +Inter action +umbn ails +ĠPart ner +b rit +Ġless er +ĠSl ot +set Attribute +ĠW ave +.p o +/ store +Ġbrows ing +_p d +sum e +s ed +Cur ve +Ġpl asma +Ġsusp icious +ìĿ ¸ +ĠB ah +ĠExp licit +_C C +.Client Size +\ View +Ġsub stit +lo on +ĠG AME +ĠB rid +Ľ 建 +_ User +Ġsqu ares +f one +Ġsac red +ug hs +] interface +ĠTh row +ĠK irk +Ġemp ire +Ġassess ed +T ax +ĠHe aven +-b uffer +_STAT IC +én é +-b ordered +Ġpun ct +(m ode +Ġke ine +S ent +ĠCal cul +ĠE ve +Ġsty lish +Ġoil s +.Test Case +Ġtrad emark +Ġliter ary +Ġconcentr ations +ĠRel ations +( Class +Ġstd in +Ġv æ +back up +. VERSION +.AutoScale Dimensions +st arter +Transaction al +- panel +St udio +k c +ĠCh amber +ĠSpi el +Ġr ho +ا ÙĦ +! ' +.At tributes +Ġmurder ed +apeut ic +Ġint imate +Ġtext Field +ĠBuff alo +d ummy +" % +ĠLib erty +ob ar +ĠT ank +ĠPop ular +erv isor +ĠIn iti +ĠM all +ĠP rior +C AP +ĠCl ay +ĠCert ificate +.L ock +-st rip +-dr iven +/ all +ĠMessageBox Buttons +_SE CRET +_p b +Ġr ats +ा ठ+Ġn t +.R outer +_top ic +Ġt ennis +ĠP UBLIC +ĠActiv atedRoute +Ġ' ,Ċ +Ġcost ume +Ġj okes +. Handle +ĉ byte +Ġflav ors +( cc +Ġperson as +ĉ image +ĠN azi +Ġgram mar +Ġú lt +Ġval ve +Ġv ic +ĠR achel +_in valid +P refs +std int +(r oute +Ġhtml specialchars +Ġpe oples +pl ine +Ġn v +ĠQu ant +opp ers +Ġcurrent User +ĠC atal +Ġrecon c +Ġconj unction +l x +amb urg +Ġinflu ential +d anger +ind ers +Ġ% @", +.config uration +os ome +. identity +Ġpick er +n ost +ĠDI Y +Aug ust +ab lo +Le af +ĠRec o +ck o +DO C +ĠH erm +: any +ĠInt erview +ĠT ex +x fe +( work +Ġle ap +He ading +Ġqu arters +\ Bundle +re b +Per haps +ĠG mbH +B irth +ĉ sum +ĠWat son +.n il +ç ¡ +{ }ĊĊ +ica id +Get ter +" name +Ġ" čĊ +_n one +z m +ac ute +uest o +Ġs ous +Ġre build +Ġnewsp apers +ĠH az +Ġk its +if o +Bl ur +Ġsu ited +- In +à ¯ +ĠKe ith +ĠNor way +IN IT +ire ccion +iet ies +_us age +ĠDou g +r ise +Ġtr illion +im ited +ĠR EL +al ic +Ġcritic ized +the orem +Ġce ase +Ġsid ew +ĠT erry +Ġsubs idi +Ġfirm ly +Ġaw s +Ġh ott +Ġdress ing +bad ge +ĠApp lications +è¿ ĶåĽŀ +Ġlaugh ed +Ġh obby +Ġmus icians +Ġ* . +. placeholder +Ġcount ers +ĠCap itol +SD K +Ġhel met +and box +qu it +Ġcriminal s +Ġteen ager +( update +G l +.se lection +Ġdis charge +Ġpresent ing +ufact urer +_UN KNOWN +Ġstress ed +å ύ +Pro to +_cor rect +ha us +Ġren ov +Ġfire arms +Ġtechn ically +-b rowser +Ġc andy +St roke +Ġexec utor +Ġocc urrence +ĠIP v +_INTER FACE +ĠRetrie ve +.b ad +Ex change +Nav bar +ĠK id +(get ApplicationContext +_ST OP +ĠB oss +List eners +Ġshoot er +ĠAl b +ä ch +Ġp ix +.key Code +al one +Ġabs urd +ĠC um +ĠNewton soft +ik t +Ġlaugh ing +Ġcapital ism +ree Node +T x +_QU ERY +.S leep +( login +Web Element +Ġcelebr ating +Ġde precated +Ġma ar +Ġart istic +_ASS OC +ĠBorder Radius +ĉw p +Ġsurviv ors +In ner +- red +Ġprosec ution +_ pp +(" $ +Ġcomm a +un checked +graph ics +r ors +G ROUND +( public +Ġcustom ized +ĠArk ansas +ĠR ew +Ġexp iration +× ķ +ĠC ul +Ġn ons +.F ilter +Ġsen ator +_def inition +ash ington +ym ph +/ J +Ġf use +ram id +ĠSup plier +Ġaut ocomplete +Ġ} ), +." ĊĊĊ +_function s +ĉ to +.e val +ĠT Object +Re ferences +Ġhe ated +H AL +Ġ)) }Ċ +} $ +ĠB arr +_UN IT ++ $ +Ġget Value +ip ed +ch ied +(v m +c ue +_int eger +_c ourse +th ird +Ġrevis ed +** /Ċ +_D IRECT +Out Of +(" ( +ĠFe el +Ġre ass +Ġsub title +per i +n f +Ġenjo ys +Ġtreat s +) this +-t abs +anc ers +Ġcontin ent +Ġcard io +S er +. question +Ġph rases +Valid ators +Ġpop ul +Ġl ÃŃ +s ong +_IN TERNAL +Ġadvis er +Ġp uzz +Ġambit ious +ĠT ob +ĠD P +Ġpres idency +Ġsurre nder +Ġwatch es +_b inary +ĠSo on +Ġcan ada +(" ")Ċ +] =' +ĠBr andon +eps ilon +r w +.add Child +.C opy +Pr incipal +Ph otos +Ġmarg inal +Ġbas ics +e ing +M ust +_ String +Ġo le +M agento +.c ustomer +(p rev +ภ¥ +Ġlo yalty +C og +Ġprot ocols +ĠCom panies +Ġtheoret ical +Ġaccess ing +ĠZ en +. ones +att ice +_w orld +z es +Ġtatto o +Ġmen os +Ġinter sect +"] ;ĊĊ +bel ie +Ġin active +.read line +-label led +.d one +lick r +ĠW ORK +Ġderiv ative +Ġd atabases +âĤ Ĥ +Ġs x +.is Array +Ġy s +Ġp ada +ĠBul let +(` / +is Active +ĠCG Size +(equal To +ĠColum bus +Ġmar ry +DE V +_l imits +ron es +I AS +Ġt au +min o +_W rite +ĠW ine +Ġ[ [' +ĠP ull +rit ers +ri ents +Ġsh ifting +up p +_TIM ER +ĠCondition s +Ạ¥ +ĠOr ders +ĠSt rength +æī Ģ +Ġvalid ity +Ġf ot +et ur +Ġb olt +åĨ ħ +ĠAl ong +os hi +Ġassum ptions +Ġmag azines +_S PI +Ġp unt +_PRO DUCT +Ġrel ay +ĠJ avascript +. te +- es +Ġwidget s +(f s +< Item +_ex tra +Ġrecru iting +E t +Ġnecess ity +p w +Ġnov els +uss els +Cre ator +ĠM VP +ĠO C +th ood +cl ients +)) * +Ġcharacter ized +_SE ND +ut i +T y +.from Json +@ Service +ãĤ Ĥ +Ch ris +_ Is +ĠJohn ny +Ġclean er +ĠInitial izes +UN K +( axis +еР· +ie val +ĠWar riors +} )( +DM I +âĻ Ģ +ĠTre asury +Ġfe as +Ġsl a +_EN UM +l hs +ĠIn stit +ipp ers +Line ar +Re ading +quir ies +-c ell +ch rome +.S earch +IN A +ç±» åŀĭ +ĠĊ ĠĊ +ĠSam uel +Ġmill s +Ġdon ate +ĠGe o +( rows +Ġshe ep +Ġé l +ä½ ĵ +Ġb em +_UN USED +ĠR CC +Ġintrodu cing +att a +ĠP riority +ĠF B +ĠSer ge +> "; +atch ing +ĠKnow ledge +ĉ The +; margin +less ness +op ard +um atic +() ));čĊ +Ġf als +(c ache +Type Id +éĢ ļ +_ choice +ĠGo th +ĠS ites +M G +_b order +Ind ices +Compar er +ĠRed istribution +Ġclo set +Ġvers atile +Input s +**************** **** +Ġob esity +qu iz +gr a +(g lobal +åĬ ¡ +Ġcollect or +Ġk or +ov able +AD C +ĠEvent Handler +. nc +Ġplay back +ient os +_p erm +_W ARNING +ĠOlymp ics +.n orm +ĠBroad cast +_sm all +dr ive +. iloc +Ġtyp ed +M EM +_con s +DM ETHOD +Ġl un +.d istance +(p ar +po on +Ġb ast +activ ities +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +: čĊčĊ +S ER +) && +_l st +ĠPol ish +Ġknock ed +Ġfrustr ation +au kee +Ġph osph +iqu id +_c oeff +æŃ ¤ +L atest +ĠD ust +T ipo +Ġmaint ains +Ġmar sh +inc inn +l bl +C are +Ġneighborhood s +_g pio +ĠAr senal +D em +ĠW he +_h ook +Ġl dc +ĠHar per +ĠBer keley +Ġgrad uated +Per cent +Ġarr iving +ĠAdvent ure +(s cope +(' * +qu arter +ĠMar ie +Spe aking +_code gen +Ġimm un +c aster +ãĤ Į +åķ Ĩ +ĠDim ensions +.rec ord +Ġtext o +ĠMich elle +P ending +( by +_P AR +uch t +be e +.Th read +amp ire +k now +ĠClin ical +Ġmargin Bottom +Ġdistingu ish +.F ull +. undefined +ĠSequ elize +################################################################ ############ +Ġeduc ated +_O VER +åº ı +ĠÂł ĠÂł +_e ach +Ġur ge +de part +Ġdon ors +ĠA u +Ġbill ions +Ġbelong ing +_ age +_ Int +Ġsub stances +m achine +!! !ĊĊ +Ġjson ify +ib bean +ĠC ad +Ġend Time +Ġc ycling +ĠUIT extField +Ġle verage +Ġvan illa +e at +La unch +( pt +st ates +ĠControl s +ĠRes pons +ĠJ ake +Ġas leep +fort unate +.next Line +Size Mode +ìĿ ¼ +Testing Module +G erman +ĠInvest ig +.re verse +ĠB ACK +( DateTime +Ġnon profit +ĠEx pect +Ġt anto +'] ), +ĉ the +M ultiple +(get Activity +_W AIT +Ġj á +de cor +lev ance +ĠGit Hub +min ation +_qu antity +.Sc anner +ĠL ion +éĶĻ è¯¯ +Ġd re +Ġtan tra +Ġcontent Type +Ġf id +_ alt +NS IndexPath +- pl +åĮ ĸ +Ġantib iot +table s +ac ial +ĠReg istry +Ġol ive +ig ers +Ġsubscri ber +_p res +ĠSy ntax +Ġlo vers +. Byte +old ers +_for ward +al ways +C aption +Pr iv +ĠT ampa +is ateur +-labelled by +ĠTo String +Ġì Ĥ¬ +Ġinit iated +W F +Ġinstitution al +in ject +ĠSc r +Ġdo ctrine +Ġsp acious +is ure +ĠAn a +" time +ess aging +Ġc id +ĠN an +Ġin complete +T AG +-b uild +Dec ember +Ġres idual +(P DO +ĠList en +Ġg lyph +Ġg aps +ne a +.R ect +Ġsa u +ĠPhot ograph +Ġexec utable +ĠExp ert +Cor outine +_s izes +ĠN L +.is Valid +); }Ċ +- reg +Ġc iting +c wd +ĠOtt awa +ĠB att +Ġrenew able +Ġprelim inary +Ġas ylum +Ġw rist +Ġutil iz +Ġdet ention +F ast +Ġan ge +incinn ati +Ġste ering +ĠNa N +ios ity +/ page +Ġè ¿ +ster ol +Ġdis g +( DB +ĠDESC RIPTION +Ġ_ $ +Ġobst acle +Ġb izarre +Ġextr action +_ex pected +Ġlos es +ĠCele br +Ġhtml For +Ġexplo it +олÑĮз ов +XY Z +Ġmagn et +amp ed +Ġat oms +S ources +pect ives +Ñģ ли +Ġ= čĊ +Ġd are +ĠWal ter +Ġbright ness +Ġan notations +ë ı +is ke +S chedule +. images +ros so +Ġ" .. +g amma +Ġin structor +Ġover write +- am +Ġdevast ating +ĠSaint s +Ġh s +Ġbon uses +$ output +ij d +(Action Event +mon itor +Ġmatt ress +Jan uary +.j p +Ġcar acter +Ġim pose +_re st +ĠSign ature +Ġcoron avirus +ãģ Ĭ +_com pare +Me asure +it ated +el ijk +ig os +es ar +Ġrush ed +met ry +_SE PARATOR +_W E +_ATTR IBUTE +Ġy aml +Ġspec s +ĠR ah +ph eric +ĠInvest ment +ä ll +Ġappe aling +Ġview port +ç © +Ġmargin Left +Ġsub tract +ĠED IT +ĉ ArrayList +gr ading +ĠF ailure +as per +EE K +(n ow +< object +ĠAl ignment +ple ado +q tt +( ERROR +ĠIN VALID +Ġuser id +ra ises +ID I +Ġvari ance +ĠN il +/ delete +_M AIN +.T oken +.C ategory +> )Ċ +Coll ision +ĠGre ater +ĠR acing +al an +Ġmon etary +, new +ĠS orry +. Enable +ĠInstant iate +oll en +ë© ´ +ĠCall ing +_h our +AD A +Ġsh y +) ** +Ġ== > +Ġes pecial +Ġinterpre ted +! =" +Ġpharm acy +.s ingle +ĠC ialis +Ġpar as +.to UpperCase +ĠDem on +Pr ime +Ġrank ings +Add ing +_H ASH +ĠEx am +Ú © +ĠVict or +Ok ay +"] ;čĊ +Ġfort une +ĠF ETCH +exp and +.Inter op +Ġb arn +æ ¶Ī +ue vo +Ġspec ulation +âĶĢâĶĢ âĶĢâĶĢ +ĠN u +ĠBl ues +(f name +Ġinhab it +Ġ\" % +C ES +ular io +_c r +Ġvalid ated +Ġmid night +ank ing +Ġincorpor ate +Ġpurs uit +EX P +pr ime +P id +- US +ĠN urs +ĠW heel +é ĺ +Ġin p +Ġsupport ive +.m ember +ĠSh ot +.Check Box +Ġaff irm +T or +Full Year +Ġconsider ably +cred entials +_ opts +R oll +( round +Ġcom ent +_U ART +Ġext ending +R G +result ado +it u +.get Session +Ġattr action +& D +$ html +ĠJess ica +ĠAssoci ate +a ñ +_ ed +ĠL ag +Ġorig ins +()) -> +add EventListener +IAL OG +åIJ ¦ +.Com pare +Al bum +ĠK u +< Q +arg est +Ġpro long +Ġconfig urations +Ġaccident ally +_ph oto +Ġ'' ;čĊ +Ġver se +B ob +Ġfarm ing +del ivery +ĠM ack +Ġuse Selector +.bootstrap cdn +keep ing +en y +. upload +ĠM ETHOD +cre ator +< _ +ĠE aster +. -- +UI Button +ãĤ ī +om eters +Ġsh ine +Ġh ogy +\ s +Ġh arness +.C ell +Ġlif ting +Ġcomb ines +ĠOcc up +ex clude +pat ial +Ġres pir +_f it +Ġfif ty +ĠM ol +Ġtun ed +-d imensional +Ġq s +Ġto ps +> ";ĊĊ +quis ite +ch annels +/ res +ĠAn alytics +.app compat +/ to +Ġon Error +( attr +IR M +Ġrag az +- as +.Se cond +orient ed +Ġdon n +Ġlight ning +f id +ĠP le +ãģ¾ ãģĻ +t ro +.Tr ue +O bservable +× Ļ +umb ing +Ġpros pective +-f ilter +Ġpurs uant +(p oints +.B ind +Ġp alm +clear fix +ö s +ĠG onz +Ġwe aken +Dr ive +en ido +l ld +ob ox +ane an +G ot +ä¿ Ŀ +Reg ex +æ ĥ +Ġsal ad +ass is +" net +inherit Doc +ĠR V +qu ier +Ġcl azz +ı ÅŁ +oster one +Ġair line +.list dir +Ġdownload ing +ĠP alm +w aukee +& lt +.B L +_IN LINE +off s +<< ( +_new s +Ġch ase +/ >< +Ġeuro s +ĠEgypt ian +ĠSt ainless +_BO OL +ĠG uild +ĠD ynam +[index Path +Ġ ï +Ġmemor able +ĠCh ampion +Resource Manager +.Log in +ĠForm er +yp ed +Ġl leg +; ", +D WORD +Ġtax i +Ġbom bs +ra h +.t ags +_test s +st ones +âĢĿ ) +[ g +r type +Ġv u +Ġhost ile +Ch ars +ĠPatri ots +/ status +< B +ĠIn come +ĠD ad +Ġpat rol +_CH ANGE +Ġup graded +Ġch ina +set q +Start ed +.U ndef +Ġcheck sum +Ġfrustr ated +{ o +Ġen f +Ġwood s +ĠAny one +Enc ode +ĠQt Widgets +are as +Ġshe er +sk i +end point +_T est +S oup +~~~~~~~~ ~~~~~~~~ +(f iles +ĉĉĉĉĉ čĊ +.sp ark +Ġval ued +Ġ% Ċ +.control s +ĠXCTAssert Equal +Ġf ame +ĠR ic +D OT +ĠAlbert a +ä½ ¿ +os al +.Web Controls +Ġ ------------ +ĠM is +ĠS YS +Non null += item +Ġexp ire +Dec ode +_ operation +ĠValid ator +.C ENTER +uff s +* m +Ġav ant +æ¬ ¡ +âĢľ You +.per mission +... ) +ĠL ic +_co ords +.n ombre +c lo +.Int ernal +ĠCh o +_s w +ĉ Il +cl k +Ġcast le +(l ayer +p it +Ġgu ided +Ġâĸ Ī +Ġsuper b +Ġsup plements +_c ent +Ġpe ek +IN ARY +.Content Alignment +f alls +")) ; +W all +). čĊ +ĠD anny +irm ingham +IAL IZ +( create +" In +Service Provider +Ġpr iced +mac ro +am ac +. box +---- Ċ +ãĥ « +ĠS uit +ur st +br u +ourn als +num ero +__ ()Ċ +D as +ĠM itt +ud er +? \ +f u +[ B +Ġ: )ĊĊ +(int er +br ains +Ġatt itudes +Ver ify +Ġsign atures +ack Bar +Ġg d +J ack +.c at +Ġz z +war f +FT ER +");ĊĊ Ċ +Al ive +IC LE +ĠWh atever +Ġout lined +s prite +еР² +_A B +_DE PTH +Ġcrush ed +aa a +(e v +æľ º +Ant i +IC O +is EqualTo +.s un +ic ulo +s ale +_h ex +ĠV k +apt or +Un ion +ĠDis count +list a +.Undef Or +Ġautom ation +N or +å¯ ¹ +åı Ĥæķ° +Ġref lex +ĠLa ure +.showMessage Dialog +.t emp +Ġa kan +Ġ__ ____ +.Is True +ARE D +ag le +E nergy +Ġquant ities +âĢĻ Ã© +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġcitizens hip +m outh +Ġin appropriate +ĠOut door +White Space +An onymous +load s +webElement Properties +T en +Ġacc idents +Ġadvertis ement +ĠY emen +(c all +Ġsl avery +Ñģ п +ĠL am +_BIT S +ome ga +ĠO le +Ġkid n +_A n +ĠR aid +Cre ation +s aved +Ġpro port +W ARNING +\ P +Ġp wd +Data Reader +is cher +ade on +ĠP redict +Ġreason ing +Ġdestroy ing +H el +* d +ĠLeg isl +_P r +ĉĉĉ ĠĠĠĠĠĠĠ +Ġsymp ath +Ġch ess +Ġm am +: hover +Ġconvert s +Ġp ela +Ġprogress ion +Ġ"_ " +ĠG ill +ĉ show +Ġsupposed ly +ac curacy +el in +Ġunf olding +ĠHy per +Ġw anna +Ġup s +( # +ĠCr iminal +( Point +at Lng +act ly +Ġcontract ors +'] } +draul ic +ód igo +ĠT T +ĠW ide +ĠAR G +_ ic +FLAG S +S chool +Ġclear ing +-be ing +={ [ +, const +man ent +Over lay +(' " +éĩ ı +ĠT imestamp +Ġmail ing +ĠC ake +.Th at +Ġmed itation +q p +Ġemp resa +ĠL ions +Ġw eld +ĠLinked In +Ġc ush +Ġgen ome +.Index Of +ag ain +Ġf allback +Ġcamp ing +re dd +-strip ed +Ġd v +Fe bruary +ĠPro xy +us k +Ġdies el +W RITE +RE AK +L orem +.In voke +- div +Inter ceptor +ĠD H +ia les +Ġvill ages +Ø ´ +ĠEN V +S ys +.X R +Ġpo em +à Ĥ +c ade +pl ots +Ġ{ ( +.g it +/s vg +nc mp +ĠÄ į +ain es +åĩ ½æķ° +Ġ( )ĊĊ +ops is +ĠRel ationship +_ aut +ĠB omb +ĉ com +* sizeof +off icial +_p ayload +ĉĉĉĉĉ ĠĠ +.m anager +ĠA round +ĉs end +ĠEx ercise +ĠB illy +iv i +Ġneed ing +_url s +_t asks +ĠH em +Ġtear Down +enc rypt +.t ie +Ġas m +IC H +ĠCGRect Make +ìĦ ± +ul ong +Ġit r +ĠG ST +Ġoffer ings +ro be +EE E +oper ators +_PRO P +ind ent +A DE +or f +ë IJ +Ġbless ed +vas cular +Ġcon oc +H appy +B ridge +ilit ation +j oint +ĠAdmin istr +- transform +Ġmeant ime +/ K +ĠBed room +Ġrig id +Ġbrows ers +EM PTY +.S erialize +_ ED +Ġst itch +Ġj an +ell t +Ġbr ace +Ġtr ails +p ublished +å¯Ĩ çłģ +} ')Ċ +Ġac ids +Ġ! !! +_d irect +> ());Ċ +aj Äħ +_O CC +Ġplan ets +æ Ł¥ +ĠDub lin +Ġser ie +.print f +de ep +` ) +Ġ\ $ +ĠÎ ¼ +_V IDEO +end ors +ĠC rypto +F ar +.Trans parent +.T R +ias m +_tr aining +Ġteach es +ĠB elt +Ġlimit ing +ĠK ath +ĠIndex Path +Ġachie vements +Ġser á +interop Require +Ġdis se +.I f +arm ing +uls ion +P o +_DE TAIL +Prot otype +ĠC AL +Ġagre es +.v o +.Execute NonQuery +ĠTop ic +Ġ' {} +Ar m +Ġe cc +M ag +Ġserial ized +ĉ conn +c ached += tf +ĠByte Array +prot obuf +var char +ĉ ASSERT +Ġlist e +_tr igger +· ¸ +Fe el +T ahoma +ĠL ik +Ġstruct ured +erg us +.In itial +_ ge +cl js +.cont act +Ġand ere +$ stmt +_C URRENT +ĠDis cover +$ res +form atter +H a +vang st +Ġem erge +ãĢĤ âĢĿ +ĠCabin et +-s quare +éĥ ¨ +Ġr age +ĠA J +ĠV T +sh adow +ĠFa ith +en ames +pret ty +has il +part y +Ġvar char +Ġf otos +Ġal um +ĠBelg ium +.y label +Ġde j +_num bers +Ġh u +.set Adapter +ĠUs ually +(s ample +.Sh ared +Ġbook ed +Ġ>> = +Ġmin erals +"> +pro g +bo o +_m d +_p ack +(ex press +ut z +\ Auth +, id +ĠCh ile +act ice +Ġrecruit ment +Ġpos es +Ġvulner ability +inst anc +or um +d ess +Ġx l +%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%% +( fig +Ġdelet ing +.d el +) ')Ċ +ĠWeek ly +?? ? +(str cmp +sm ith +Ġpurs uing +- so +ĠApp s +/ 'Ċ +Ġdec is +FO RE +Every one +Ġl anes +V irtual +. attach +( Log +ĠMed icaid +( Path +ĠTurn er +/ application +Ġport rait +Ġopp ose +check out +Ġfinish es +_M E +Bar rier +S ong +V AR +Ear lier +rell a +Ġh ast +az ar +Ġpull s +ng x +Ġinspir ing +Ñĥ Ñİ +-d irection +Ġexplos ive +Ġcreated At +st o +Ġwhe at +ĠB uilt +' ai +Ġtrack ed +ham mad +RowAt IndexPath +_ heap +D ue +Ġconnect s +.p ublish +em u +Ġbul lets +B AR +ol ate +Ġintern ally +Ġcatch ing +-p assword +ou ched +æĢ § +e ous +Ġx range +Q uality +v v +Man age +( ($ +ac ements +ĠBro thers +ĠHE AD +ĠUn supported +s an +es i +** *Ċ +Ġadapt ation +ĠWork er +'] / +.save fig +( trans +Ø ¬ +ne e +Cor rect +... ")Ċ +Ġsubmit ting +-p ath +ĉ last +iss an +.x label +ĠS epar +/ no +_b est +ĠM ills +_s ock +(f lag +Ġdest inations +em ption +ĠF AIL +å ĴĮ +Ġr p +f act +ĉ len +D AY +Ġse iz +_d st +l ip +.Line ar +ĠB asket +$ t +$ i +- brand +ĠNe il +ĠE q +Ġth ou +og ene +Ġscholar ship +æĽ ´ +Ġs wo +ag inator +en i +( book +Ġbl ink +th us +Ġcancell ationToken +ĠPalestin ians +Ġprofit able +Ġback pack +ens on +< Long +Ġp ools +Ġst icks +Ġspokes woman +Be ing +ĠHer itage +ĠN ike +SH A +ĠNotImplemented Exception +$ core +ĠR ico +/ latest +ĠC zech +ner Radius +(l ines +Ġsem ester +Ġw ounds +Pro cedure +.m ail +() ):Ċ +Ġcor rid +ter ed +ĠN CAA +Ġgal axy +_k ind +il k +Ġtr as +_P OL +ĠH et +Ġrefuge e +Ġteen age +.b inding +post al +Ġiç in +ĠData Type +é ĸ +ycl erview +, value +_id entifier +< b +Ġout file +čĊ ĠĠĠĠčĊ +Ġcr é +Ġrespond ents +ĠBe ast +ce led +Ġinter f +-th eme +g if +ĠR angers +IT AL +Ġauthentic ate +Com pletion +urs ors +Ġcin ema +Ġdisc our +ĠJ aw +OCK ET +Ġpr ayers +ĠL uis +fr ag +=[ Ċ +Ġbr ave +_p ose +C ertificate +- fe +ifer ay +ĠFl ags +Container Gap +ĠC rit +Result Set +ĉc ur +Ġcorrespond s +St aff +.Http ServletRequest +Ġneur ons +ĠMain AxisAlignment +ed ar +Ġg ad +_p arts +ĠÎ ² +Ġf x +/ files +ĠB ros +hip s +Ġgluc ose +Ġfar ms +Ġment ally +rest aurant +Table Name +ĠMer cedes +. Visual +Ġan ch +inal g +_r untime +Ġpropri etary +Ġintent ions +iz i +S lice +; "> true +ĠNY C +Ġb ored +ĠD etect +Ġapp ar +Ġje ans +ĠT ak +I OD +ĠH orse +( FILE +( ? +ri que +optim izer +n at +lo ys +ĉ Token +oub ted +u ess +oco a +Data Member +_P OWER +class List +Push Button +ĠWi Fi +. Stream +.g uild +Ġn og +ĠPortug al +ĠUnt er +Pr imitive +b oss +ĠDe utsch +Ġerot ic +Ġstr conv +.Try Parse +Ġgr ams +.S uccess +_p k +ĠHar vey +-m inded +.c ountry +[] " +Ġang el +Ġbe ats +ĠV or +il io +.m aster +s omething +ĠP ACK +( if +Request Body +Ġant es +/w idget +Ġmod o +ĠA W +find er +Ġoptim ized +Ġmiss iles +N B +ĉint ernal +t ex +ĠS ri +Ġdam aging +ĠM ais +- Allow +ĠZ h +- alt +Ġ ));ĊĊ +è ī +Ġinflu ences +Ġc atal +_REG ISTER +ĠAPI s +-cent ury +Ġbi ology +ĠAct ual +Ġhe els +TR ACE +_D IG +D ataset +ĠM atter +Ġclass ifier +.w ikipedia +ĠRog ers +Ġdon ated +raw ler +en en +Ġcas inos +ort al +Ġpr ive +s pe +duc ers +. ep +Ġgr asp +ac ji +Ġd airy +Ġb uses +.com m +. ins +ĠI RS +ĠBe er +ad c +o ard +_M ET +Ġ' +' +r ans +Ġkind a +ĠâĶ Ĥ +ĠM aur +аР³ +Ġband width +ib us +ĠD ifferent +(m at +ĠRes ume +_UN S +est ablish +Ġfon ction +Sub scription +_com pany +Ġlight ly +.con firm +.y aml +ĠBo ost +Com merce +- template +_DEL AY +ĠH I +Ġn avig +(S ender +ĠH S +_ "+ +ĠRE QUEST +Ġw ifi +=" "Ċ +]) -> +Ġro pe +Ġviol ated +Ġgl ance +ĠK urd +Ġè ® +de ck +ĠIS BN +Ġin fect +ĠF oo +Ġget ter +Ġt ener +ap pe +.h h +_h ot +< AM +p oly +! ",Ċ +Ġconver ting +ĠW WE +RO S +(' { +Com mit +) L +ĠO re +Ġsp arse +Ġdis posal +Ġcan celed +åIJ İ +Ġa er +Ġvin yl +á» ĥ +rec ogn +ark ing +Ġtrick y +* s +Ġproceed s +Ġis o +Ġco conut +Ġcraft ed +IEL DS +Ġquest o +Ġcomm un +_CON NECT +Ġtraff icking +De ep +a ções +c odigo +ve au +Ġbet ray +int a +T ED +æ r +m art +_B US +/ sc +ial ly +Ġcigaret tes +è¯ ģ +(n n +Ġmodel ing +/ products +w arn +Ġmet ro +ĠI v +& ) +ĠC able +Î » +Compar ison +g ary +ĠB A +P ART +Ġp v +_up dated +C redit +orth y +observ able +Ġthe atre +B LE +; }ĊĊ +la unch +_str ings +ug o +ĠR PG +- auth +Ð ł +hol m +ĠP and +U id +Ġim ply +ìľ ¼ +'] =' +/ User +Ġstr cat +нÑĭ й +Data Adapter +Ġland sc +Ġdipl omatic +ï¼ ĵ +************************************************************************ **** +ĠCh icken +Ġbc rypt +.In f +[ col +ĠQu antity +- position +Ġdiet ary +Ġfil mm +Is rael +Pre v +ĠMill ion +Ġrem ed +Ġbill ing +Ġout doors +.t m +Ġn ad +F org +Z Z +Ġs sl +], ' +K T +f req += document +bl ur +¬ ¸ +ĠJeff erson +C s +(s ave +Ġstr ap +Ind ia +Ġide ology +BO SE +ĠF P +( ans +Ġfe ver +ĠY am +K ing +à ² +AT ING +bo hydr +roll back +Ġnew Node +ĠN VIDIA +Ġhon our +ĠCon firm +xb d +Ġsuccess or +/ u +l iv +ourn aments +Att achment +Ġgr up +Ġtri be +Ġca res +e ft +_s ame +' label +Ġ ãĢIJ +M otor +Ġin exp +Ġ" (" +_POS ITION +Ġval ley +ĠResult Set +Ġpres erved +Ġmut ations +Ġquestion ing +mun ition +parse Int +ĠS r +ĠMet adata +âĢĿ ï¼Į +timestamp s +Ġtrans itions +í Ļ +Ñ Ĭ +i om +.D o +Ġp ine +Ġf ung +Ġtrans mitted +ct ime +ĠF am +Re vision +B as +UP ER +D estination +toHave BeenCalled +Ġun fortunate +IN ES +_pro f +Am ong +ĠCy ber +ĠB attery +gen re +ĠView Model +- = +Ġutil ized +p aint +.Integer Field +ern ity +comp iler +âĢĭ ĊĊ +ĠM asters +.To Array +Ġstrt ol +ĠUkrain ian +} ));Ċ +Ġsh emale +" That +for all +/ download +Ġrhet oric +.l atitude +ĠWH EN +Ġshock ing +IF IC +.N ormal +_F OLDER +Ġdr ift +Ġmount ing +- book +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ Ċ +ĠWire less +> ".$ +Ġrel ies +( Console +Int ernational +-> {$ +M id +Ġdis sert +dd s +Ġdepos its +ĉd river +# ga +pr ising +print ln +Ġpres enter +Ġmin es +C SS +ĠD ual +(! ( +Ġk am +Ġis Loading +ĠProt ect +. upper +ar ium +]: ĊĊĊ +Y ii +-sh irt +ĠIM AGE +_color s +Ġur gent +.Cont ainer +! (Ċ +S aturday +Ġsoci eties +ĠTh an +ĠC od += @ +Ġattach ments +.m obile +Ġsp ite +Ġb ounce +raw l +instanc etype +ĠTr uck +Ġmanip ulation +( Config +-in st +Ġst or +it ution +Preferred Gap +Ġmain AxisAlignment +Ġlist ened +'' 'ĊĊ +ott age +- project +.AP PLICATION +ĉ root +Ġwh it +Ġb ilder +Ġk er +Ġappl iances +row ave +ìĿ Ģ +ematic s +ĠO rg +op ing +_SE ARCH +Ġch am +add ContainerGap +Ġ( ). +ĠAr row +Il legal +Current ly +Ġus a +Ġpassword s +Ġre nown +av ern +ĠEv il +Ġconc at +Ġdu o +Ġv ale +ĠBe an +Ġindic ators +cm ath +ĠP ump +Nov ember +ific ant +_DOM AIN +reg ar +ĠPort al +" $ +Ġformer ly +"] :Ċ +ĠVis ibility +.getElementsBy ClassName +_RE D +Ġch ampions +à ´ +Val or +_ es +* a +-re peat +B and +.st age +Ġbure auc +C nt +et en +- function +Ġm uito +P ID +_ editor +Ġcrash ed +de ad +k at +ag h +ĠEX T +ass er +-sm all +Ġreal iz +( Entity +ú s +ĠAct ually +ĠEl ite +Ġhel m +(non atomic +ash er +Comm unity +all eng +ir y +ĠG rowth +Ġs ue +Ġfrequ encies +_des criptor +.At tribute +Ġrecip ients +_N S +/ "+ +ib an +Ġath lete +ĠI gn +_D MA +(d s +ĠRequire ments +AD I +ere z +\ Admin +br aska +ĠR ust +Rel ation +C OD +ĠV ERSION +em ma +)) { +.D uration +ĠC amb +- logo +Ġread able +Ġcre ators +() ];Ċ +Up Down +-h alf +.get Month +(s f +P ic +Ġhun ger +.t x +Ġexceed ed +_se ed +( ^ +_s k +.per form +Ġ> :: +Ġm ongo += float +bind Param +Sm art +if a +Ġse curities +Ġpre jud +Ġ, " +Ġcor ps +Ġv ra +amac are +it err +(M edia +uch e +Ġc ob +Ġlib er +. geometry +Loc ator +Ġsl iding +Ġsurg ical +_C UR +Ġcon sect +[ * +ĠRes ort +St ub +_DO UBLE +ĠS oph +Ġelect oral +_dis able +ĠÑģ о +ĠLight ning +Ġment ions +oc y +Ġle aked +Ġrelax ing +Pres enter +v sp +Ġgu ilt +=- =- +.re ply +ĠMir ror +C amp +Ġ+#+ #+#+ +Ġ+#+#+#+ #+#+ +.A uthor +Ġdirect ive +-h ook +íĦ ° +}ĊĊ ĊĊĊ +@ pytest +_r and +m is +Ġcolor ful +u je +lass es +ĠClass es +.h ave +% ), +é¢ ĺ +Ġdistur bing +sub string +ĠK oh +In vest +p urchase +Ġrec ycling +ĠA RT +ier archy +Ġf ps +.check Box +íķ ´ +_m aterial +duc ation +Ġf w +ud it +Ġreview ing +ĠS id +S yntax +ĠW ritten +arg ar +UM E +/ q +Class ifier +Off icial +Ġj azz +Ġom ega +Ph ysics +Ġl ugar +_access or +.command s +Ab ility +ĠB atch +R AM +Ġencount ers +. Qu +BY TE +ĠD istribution +Ġus o +ĠReco very +appro ved +Ġden ial +/sh are +Linked List +)čĊčĊ čĊ +udd y +Ġf ines +Ġr y +Un icode +ĉ render +Ġprem ises +Ġp on +ali ases +/F oundation +c uda +ĠC ock +,: ) +(f older +Ġm éd +dr ag +Ġtal ents +ĠĠĠ ĊĊ +е ÑģÑĤв +m ob +.y ml +Ġa ster +Ġdis cre +go al +ĠGT X +ĠS UCCESS +ĠL ONG +(f ind +Ġsing ular +_s z +ĠEth ereum +.. Ċ +Ġir res +')) {Ċ +Ġmin isters +St eps +ivers al +ĠNever theless +- led +Ġ( %) +ç¡ ® +Ġtime zone +Ġstr anger +(re nder +Ġsh util +Ġm ph +Ġtri o +pp y +Ġpred omin +Ġend ors +ĠRuss ians +ĉ row +Ġw izard +.s erialize +Ġcompl ained +Ġs ido +Ġdelight ed +-m e +ĠR av +H uman +ad ays +rec v +Work ing +J ump +ĠÃ¥ r +ĠAut omatic +_B ase +æł ¼ +aur ants + ¯ +æ ¸ +(C Type +IF I +( amount +Ġbelie ving += mysql +Ġf ir +Ġrest oration +ere co +Ð ¢ +_ '+ +Ġe book +Ġde bris +(input s +AY OUT +Ġscre aming +av ia +land er +Ġdist ress +Ġas sembled +ĠA void +( thread +ĠR PC +_EX IT +( queue +и ÑģÑĤ +D ll +Ġsk ull +_p ub +che z +min ate +ens en +Ġins ane +b ounds +ĠR osen +Ġcondition ing +process ed +v ideos +f our +.Con v +| ;Ċ +Person al +cer pt +:UIControlState Normal +Ġdos es +ĠKar l +ĠFre qu +.B ASE +ĠV ote +Ġcon current +ĠMessageBox Icon +Ġà ĸ +ĠDub ai +ĠR etail +: number +ĠOb server +ĠBig Integer +_ origin +_W ORK +F rames +Ġnot ably +. âĢľ +Ġtrop ical +Ġn iche +am ina +.s ys +(t okens +mod ify +os it +st rom +ĠCom ics +O PTION +T icket +Ġfact ories +Ġdis put +_F ile +ĠFin n +ee e +ĠDisc ord +_m oney +.t pl +_s afe +L B +Ġgl ut +J K +.fl ow +- cont +g os +Ġhor izon +ĠR ush +:: * +P ipe +ull a +bor ough +he imer +(m ove +( Text +} );čĊčĊ +w elcome +ĠCom ponents +Ġgovern ance +c losed +ĉm argin +Ġla undry +ĠTerm inal +iz ards +. âĢĶ +.rem ote +.r adius +ĠQue bec +Ġd h +T ech +ĠM ist +s eller +_l iteral +Ġgen ius +Ġbr ains +g em +ĠMe asure +Ġcata st +r ance +.Text Field +Ġconsum ing +Ġ'\ '' +oubted ly +ĠC ertain +E v +ert i +be ing +Ex perience +Ġ// [ +ĠArab ic +ĠC rist +ĠAz ure +Ġhor a +l adesh +\ Blueprint +d ar +.re l +Ġsup rem +ĠRe agan +ĠAt tributes +-s idebar +Ġuse Styles +ĠA irlines +Ġh ills +/x html +v inc +_m ock +Ċ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ +ĠP ill +.Layout Style +ĠCommand er +] < +sign ature +Ġ{ }čĊ +Ġhat red +Ġë ĭ +ole sterol +Ġ ******** +ancell or +c rop +T IM +ĉĉ ĊĊ +ys qli +uit ive +ĉun set +_s el +Ġmen us +t ick +Ġconstit ute +ĠElement s +ĠRed is +agg io +_f p +_de pend +em as +CA ST +or ange +j on +ĠEm ily +Ġpot atoes +Ġre ceptor +ĠElect ronic +ĠL ights +Ġcomb ining +ĠSome one +Ġ######## . +ĠT OD +/ show +X d +." ' +af x +Ġtr agic +St yled +ĠMar co +G allery +d ale +.âĢĿ ĊĊĊĊ +é rie +/s ervice +äº Ĩ +Ġamb ient +_SET TINGS +.Ad apter +l ene +Ġtrav els +Not ice +Ġcle ans +ĠF em +ch air +Ñĥ н +/ my +_b ad +ĠEcon omics +IS A +_C NT +(M enu +äº İ +ĠR idge +Ġlength y +D ot +Ġjump s +Ġhe y +$ pdf +Ġw orm +Ġs ut +Ġsh er +iam o +ĠCal c +trie ve +Ġc ops +ĠCh rom +Ġreg ulated +reat ment +ĠHigh er +ok s +Ġde ze +LOC ATION +ongs To +Ġfin ite +Ġvar ies +Ġposition ed +' il +éĩ ij +Ġh ike +(d one +play list +Ġad a +Ġcoast al +ĠN ancy +.DateTime Field +Cpp CodeGen +ĠSimilar ly +re ur +ĠCon tr +ĠH idden +ĠB eta +atch ed +_inst all +. Output +Look up +ĠRich mond +qu ared +Ġm anga +-control s +ĠBern ard +L arge +Ġslic es +Ġoff ence +ĠM ega +Ġest ar +Ġjoint s +Ġsum m +_pl atform +B uff +.add Subview +Ġret ained +Let ter +.d im +Ġess ere +ĠS caffold +EX PECT +ĉ RE +.long itude +ü nd +Ġstat ue +.add Widget +ĠCar ibbean +add PreferredGap +il de +UIL abel +ĠOp port +Ġimper ial +urs ion +Ġmand ate +Ġpromot ional +Ġv k +ia ÅĤ +Ġp yl +ĠCre ation +оз д +Ġsim pler +. what +ĠRec ent +St orm +. quantity +ĠL ov +" - +ubb les +_not ification +(w orld +ur ger +* (- +: "Ċ +h m +ans hip +ĠAl most +Ġmotor cycle +_f ee +Ġabsor b +ĠVin cent +Ġsound ed +ÃŃ st +Ġpharm aceutical +ht ag +ĠKind le +ital ize +ĠEm peror +oust ic +Ġspecial ists +åħ ¬ +Border Style +/ \ +RE LATED +(', ', +(ex pr +Ġh t +åį Ī +_C reate +Ġspecial ly +Ġ[] ;čĊ +Ġhe el +Ġse pt +_ arch +(in itial +% .ĊĊ +\", \" +Ġdiscuss es +Ġu pt +Ġ[ & +Ġman us +.h and +ĠM AIN +ĠDen mark +Ġ], čĊ +Ġcr yst +Ġn ack +Co ords +_in ner +Ġmid st +Ġaw ake +ĠÐ ŀ +-b reak +ÃŃ vel +_P ASS +ĠParam s +Ġdet r +Ġsp ider +ĠCon cept +Ġpre nd +CH ED +.Ex it +Ġpop ulated +Ġvirt ue +_SE SSION +Ġnou vel +o auth +Ġд аннÑĭ +r ink +.Header Text +atur ated +Ġer st +Ġå ħ +ॠĩ +_vis ible +ey er +Ġli able +Ġde be +Ġb w +{- # +_W IN +df s +H over +ĠP UT +- angle +Ġnob le +Ġtr aces +enc v +Ġuser Data +_in s +ĠS uz +Ġnews letters +ĠMod i +Ġentreprene urs +Ġtrib ute +Ġrum ors +Ġr r +ĠQu arter +ê³ ł +Ġfeed s +ó g +Ġen velope +Ġle ar +Ġk ø +develop er +Sim ilar +: ")Ċ +sub scription +Mod ifier +ital ic +Ġn asty +Ġtermin ation +Ġchar ming +Ġâ Ł +ton s +.tr ace +h ots +ĠU R +M ont +Ġjust ified +ĠG ang +ine a +Ġb og +( ap +_ $ +Ġcont amin +.D ot +ĉ Debug +( exports +Ġpa ired +ĠAss ignment +Ġautom obile +ĵ į +Ġph ases +v w +@ SuppressWarnings += \ +r ant +- ed +ĉ await +Ġcert ificates +'> " +Ġint act +CT RL +M ike +greg ation +AT TERN +Ġre public +_up per +ili ary +Ġcomput ation +h ire +ĠSh in +_ ANY +ĠManufact urer +ĠC arm +Ġbear ings +_c omb +c ad +ur istic +Ġwholes ale +Ġdon or +.inter faces +press o +ĠBr un +-c lose +pro ve +_S K +ĉf rame +et ros +ĠP ain +_EX P +ĠL T +_f s +.dat as +ĉ ss +vo ir +ĠA xis +M ajor +=" < +[ h +Ġprof ess +igr ate +(s core +Key word +" os +ĠĠĠĠ ĉĊ +an alysis +Ġre play +.p ass +\ d +t ls +Ġsan ct +.l ight +_m obile +ÑģÑĤ ÑĮ +ĉt otal +u ity +Ġpa used +N AS +Ġen core +lo e +Ġ-* -ĊĊ +.h igh +am pler +ĠSec ure +Ġfrag ments +_ vel +ill ary +ĠSte in +ĠD awn +Ġmax imize +ภ¢ +Ġ/ ^ +Ġcontin ually +Ġsh adows +ĉ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +ĠI ActionResult +Ġinform ación +C HECK +.Selected Item +b undle +ol ley +< Int +AIN ER +ĠW ing +tit les +ount ain +C Y +ĠLoc ale +form er +< context +R adioButton +_s chedule +Ġfab ulous +Rob ert +_PRO FILE +Ġg ates +IM P +ĠPent agon +g old +b ach +employ ees +R otate +Ġch amp +Ġsel bst +Al tern +Ġconvert View +/ , +Ġ~ ( +St reet +_ place +Ġpersonal ized +P ublisher +ĠSO CK +_NAMES PACE +ĠStand ards +so ever +_C ENTER +Inter est +ô t +tem perature +View port +get Resource +Ġeat en +Ġsem pre +Ġab normal +Ġc ylinder +Ġtroub les +n od +Ñĭ в +g ames +_g l +Pl ane +g rey +_t bl +.Component Placement +ĠCh ase +Log ging +man y +ì Ĩ +Ġfl ame +="< +Ġtra jectory +_r ing +Ġhydro gen +tr on +Ġstat ute +Ġcondition al +Ġtr ay +-s chool +(w idget +$ config +Ġrequest ing +. uint +et on +brit ies +Of Type +AD MIN +p redict +Ġg egen +ĠH app +OC UMENT +ĠA part +Ġ---- - +ro e +u ide +just ify +ĠSqu ad +Ġprof es +.b ot +_c urrency +inn en +ĠM umbai +ĠNum bers +avana ugh +agn itude +âĢľ There += http +çī ĩ +Ġv b ++' {{ $ +Ġin ode +s il +Ġh ace +Ġsever ely +ĠOver view +Ġspr aw +Ġbeach es +: left +· » +($ { +ĠF IRST +ĠSp a +- ass +Ġb aise +ĠN ODE +ĠP izza +P et +(se q +\ ">Ċ +CppMethod Pointer +Ġv p +Ġi a +_se conds +em et +/b lob +_TH RESH +... čĊ +D est +ĠN H +.data Source +it és +ĠJ ak +s ell +Ġwork shops +< u +Ġr ivals +ĠEX ISTS +h om +-t oken +compat ible +.J Panel +Ġphys icians +art in +Ġdes irable +Ġdistinct ive +.D ep +g id +ili ate +, max +Ġprem iere +Ġq Debug +Ġadvoc acy +Ġwh isper +P t +Ġun changed +_q ty +请 æ±Ĥ +Se ason +avel ength +ĠP ul +Ġd ÃŃa +'] ]],Ċ +al is +(" & +bor o +Ġb m +ĠR adi +w rong +ĠGo ing +ime Type +ij i +- feedback +ĠN ames +ĠB apt +Ġprob able +ĠE ther +ĠPolit ics +_prot ocol +lin ing +S at +Ġcor rel +.Pr imary +(null able +RI ORITY +Ġcolor ing +Ġutil izing +d as +Ġexport ed +Ġcar riers +Con v +. editor +i ó +(h andles +Ġapprec iation +. import +ĠAust ria +ĠStr ip +il ight +Ġappropri ately +ĠP rest +ĠW ir +ĠUI Application +al chemy +ĠM ob +ĠD etermin +ergus on +register ed +_con vert +ĠVlad imir +.Show Dialog +ref lect +Ġsh ook +Ġass ure +ĠO ften +Ġcivil ization +Ġvocab ulary +fore ground +ĠS cope +Ġunw anted +act ing +Ġ( [] +Ġmark ing +. original +ĠMO VE +Ġsport ing +ception s +NS Number +S izes +Ġprovinc ial +_Tr ans +Ġproblem atic +d igit +ĠEm ma +lock s +ĠC rew +ib a +') : +ish a +Ġm amm +Ġocc ured +w cs +(r ule +Ġmerch andise +es pecially +ĠT win +Ġn aming +Ġs log +Ġimpro ves +Ġad her +: text +.h adoop +_HT TP +.to List +.dis abled +Ġl enses +.in i +ĠR are +ĠUb untu +Ġsc ram +ol ation +tit ulo +Every thing +Ġnod ded +icht ig +_const ant +z c +l ift +ĠNot ify +ond o +ĠIN F +(" + +ĠK az +Ġd read +.m apper +le ur +ĠCome y +ĠN B +ic ers +.P ush +ĠH ack +ĠBrazil ian +_pro d +Ġ// ĊĊ +Ġb icycle +Ġun available +Ġadoles cent +bl k +Ġmit ig +_bl ue +ì ĺ +fade In +ĠUtil ities +ĠM N +; k +< style +- status +ind o +Ġinn ings +Ġg j +Ġ|| = +.e u +: Number +Ġcuis ine +ĠURL s +ie k +Ġw ires +ĉ ps +ie g +.m k +so ap +Ġsom etime +Ġst ap +_s eries +.T arget +æ º +.dest ination +OUN TER +R aises +& A +Ġsmart phones +NI Env +.s dk +Ġhelicopt er +Ġim pe +ĠB irth +A U +b readcrumbs +co ords +Ġexplo red +Ġl od +ĠI p +g able +ian e +Ġart ifacts +Box Layout +ا ر +list ener +.c art +ĠH uff +ĠHind u +ĠData Types +ĠDr upal +IGN ORE +Ġoffset s +ĠR TC +- login +æ ® +ĠQ Object +Ġprosec utor +R ock +_ch at +W ay +ì ² +Ġneg lig +Ġd ude +; < +Ġdeleg ates +_f ailed +/ dev +/ work +( New +et able +() " +( Icons +Ġp ork +ĠModel AndView +ĠV IP +ĠK or +m ix +Ġox id +ĠSC REEN +ĠFour th +/ ",Ċ +Ġte e +ĠSte vens +t icks +Ġp ledge +ib bon +ĠLo an +Ġne o +n umpy +ĠShared Preferences +- oriented +ĠLogger Factory +ĠGraph QL +zen ia +" _ +W omen +.c ast +Ġdeliber ately ++ b +ĠAr n +font Size +Ġm aze +Ġbl amed +.m as +} )čĊ +eler ik +Ġsc anning +ĠWork shop +Ġfind en +Ġca ut +UI Font +( return +al in +cast le +//////////////////////////////////////////////////////////////// //////// +Ġincent ive +op ath +b lob +Ġcigaret te +Ġfert il +*/ ĊĊĊ +ĠSh ar +Ċ ĠĠĠĠĠĠĊ +Ġunc ertain +ĠS ton +Oper ations +ĠSp encer +Ġdef in +ĠS olo +on est +·» åĬł +Ġu omo +G ive +Ġdent ro +; padding +ent ai +ĠC ars +Ġenthus iasm +ĠOper ating +S kip +par ation +Ġprotect s +Ġre ver +d g +ĠC incinnati +Ġconsect etur +Ġm uss +employ ed +a uses +ink le +. Values +£ ¼ +lo v +_W ARN +Ġbook mark +ĠAp ollo +. axis +Ġm ét +Ġop ener +Ġtum or +d an +Ġelement ary +Ġsk ipped +ĠK er +as ia +_res p +Ġdem ol +ĠCan adians +Ġt astes +U Integer +Ġ' ${ +.aw s +RO ID +ri ans +M Q +ord able +Ġcous in +Prop agation +(S ession +ph alt +UL D +ĠSc alar +Ġblo ody +Ġ ঠ+.m ask +, q +ĠUn its +Ġcent res +ĠPr im +. ]ĊĊ +ĠSh aw +P rom +ĠTh ought +Check er +_output s +( chan +E INVAL +Ġb ob +_c mp +P ed +Ġmat rices +Ġvrou wen +Ġgenu inely +high light +(d isplay +) != +Ġdel icate +ĠL uther +ĠM iles +Ġuser ID +% = +ate urs +_B UF +---- ---Ċ +imit ives +Ġsh elves +sl ow +_in formation +LE G +W r +.form s +cel and +/ un +: & +.âĢĻ ĊĊ +=" % +Ġpro st +Ġfont size +uc ión +get ic +am t +=" . +Dec or +B rit +Ġ"" ). +Ġfound ing +.File Name +ĠT ier +Ġdisc lose +á m +.s yn +.View Holder +lic ant +_st age +Mon day +Ġdes erialize +t alk +Ġtradition ally +æĢ ģ +Ø ® +LE X +Ġe h +ĉ ROM +Ġ{ })Ċ +Quest ions +nc py +Ġfix ing +к Ñĥ +_ Key +: x +ĠSTR ING +ĠÑĦ ай +ĉ left +ĠBen ch +ell ij +UR RED +ĠDi agram +} catch +/ time +ĠMiss ing +db name +Ġs ore +ĠW alt +ugg ing +rep resent +ĠG S +ne ys +ĉ page +Ġvol can +(b tn +Ġexceed s +Ġ erg +Ġpil ots +ĠS ed +ers ions +Ġpat ron +R V +/ top +. asset +_c ross +. Editor +.t b +Ġwel coming +SC REEN +) findViewById +C oder + ",Ċ +_P in +ues e +Ġover rides +_ ready +Adv anced +Ġop i +-c art +("/ ", +ĠDe b +CR Y +ĠVert ical +ĠO VER +ĠCorpor ate +Ġ"" ; +Ġste pping +e j +Ġaccus ations +Ġor az +_t ail +Ġindu ced +Ġel astic +Ġbl own +, // +Ġbackground s +âĢĻ une +-s dk +Ġset Interval +Ġincent ives +Ġveget able +_ On +exp anded +p ix +_sh ader +ĠSP DX +@ example +ĠW rapper +.Z ero +Pos itive +Ġsp inner +Ġinvent ed +ĠG ates +оÑĤ оÑĢ +Ġcompar isons +è · +.pr imary +data Provider +add itional +ĉ options +s napshot +.set Horizontal +Ġ" {} +ĠFish er +hal ten +< Type +Ġmax Length +ĠM t +Ġê° Ģ +.jet brains +Ġident ifies +Ġflow ing +ĠDisc ussion +ats by +Ġsch w +ught y +Ġr ivers +.un ique +_PH Y +ed ral +( ll +Ġcs rf +pp ers +ü l +ĠEs pecially +port ed +ĠHarr ison +****** */Ċ +Text Color +ìĬ µ +w ire +Ġstatus Code +ĠFin ish +c ence +ĠMcC ain +ĠW or +( await +Ġ) -> +ĠRegister ed +IN ED +k al +par ison +Ġobj eto +V i +mand a +Ġrenew ed +ĠS of +ess el +.nd array +Ġcr ap +ç® ¡ +.ab spath +( up +Ġclear ance +ĠT W +_C OPY +ĠĠĠĠĠĠĠĠĠĠĠĠ ĉ +Ġforest s +Ġarg uably +ĠA SS +he y +am el +_f ore +ĠSou theast +Ġab used +Ġpract icing +aked irs +ä¸ » +_res ources +Ġp ond +.F ixed +Last Error +ĠPsych ology +Ġ" // +! : +Re usable +Ġmens aje +Ġro spy +Ġb our +Ġvar ieties +Ġem path +(( { +_ org +ĠM es +ĠMag ento +IST ORY +Un less +Ġh j +ĠD uty +J un +, size +Ġpaint ings +Ġdisp ens +d art +Ġbehavior al +Ġr pc +cal culate +fr uit +_m m +ĉp thread +Max Length +Ġc urrencies +_cap acity +ĠO z +Ġfire arm +Ġcoeff icient +Ġbankrupt cy +w art +Ġfat igue +AV A +Ġes pa +_p c +ĠQu otes +_L IGHT +ĠT ickets +Ġrel ates +Ġpublish ers +Ġunlock ed +Ġ// ---------------------------------------------------------------- +ĠInterrupt edException +Ġout look +r n +Ġreb els +W ritten +Ġas ian +ot to +Ġ ĉĉĉĉ +_g pu +T xt +.Image View +Ġsu is +_t ables +.Rec yclerView +Ġwhat soever +è ģ +] ++;Ċ +assert True +_ verify +ĠR ivers +Ġ ][ +J et +id ian +S ibling +Ġgen res +.A ccess +OP S +Ġtr ivial +ภª +al en +в ед +ĠS word +Ġscrut iny +(c b +Ġcomm erce +Ġguarante es +_ad v +ĠL ET +rec io +Ġh ilar +Ġback yard +ãĢ ı +Ġillustr ated +/v endor +. Util +Ġw ow +LO Y +ĠMar shal +"> '.$ +ĠB ak +Ġmod ifiers +d ictionary +ĠSt re +m ultiple +")) , +ĠC ort +'] "). +( admin +ĠCre ator +Int ernet +( ms +log y +DECL ARE +ĠMarc us +<< << +ãģ ł +_m y +(in st +Ġsc iences +ND ER +. enter +Ġit u +Ġbeh ave +P an +omb ies +=' < +')) ;čĊ +ĠM ENU +ĠWork ers +.No Error +Ġbind ings +Ġdis abilities +{ \ +ĠM unicip +Ġco res +ur ple +ĠN okia +us ions +ĠF itness +.handle Change +Ġjav ascript +ìļ Ķ +( dec +Ġpack ing +-de pend +Ġtrans cript +z eros +_ alert +? ",Ċ +lib s +± оÑĤ +Ġ| ĊĊ +tr ained +ĠG ent +ĠR ab +x p +_config uration +å¤ © +_ accept +.rec yclerview +: url +ĠMu hammad +Ġprivile ges +_b ank +uk u +w allet +ĠRO OT +Ġenc uent +? family +ĉ position +Ġc g +Ġprec ip +method s +_f ast +in crement +ĠT iger +_OCC URRED +qu ip +ĠH AS +_d om +Ġw reck +b j +Ġd ern +Ġorg ans +. entries +Ġ_ (' +ram ento +ĠJam ie +Ġp unk +IP P +Ġprogram a +Ġatt ain +Ġpro ves +/s ign +Ġanswer ing +Ġl adder +************************ **** +ĠW almart +ĠCONT ENT +duct or +Ġver bal +ĠP ID +c rypto +_CALL BACK +Ġ= ================================ +Ġpot ent +Ġshort s +.U ri +.un iform +; border +ĠW er +Ġhere in +ll a +ĠI hr +P ixmap +l iteral +! )ĊĊ +g eneric +r ust +_script s +ost o +it us +ĠCoal ition +Ġrem ot +de ploy +ĠEag le +ãĢģ ãĢĮ +Ġimportant e +ĉ object +Ġseason al +ne j +aid u +Bind View +ĠSi erra +-b g +Ġmake Styles +[ offset +G ames +Ġhorm one +AR IO +head s +( select +ĠStart ed +@ param +_de cl +_b log +Ġa ño +\ Api +ĠMil waukee +Pro vid +An imated +Ġcool er +ĠSe ed +. Edit +Ï Ħ +ĠT aking +Ġborder Color +-found er +.Logger Factory +Ġ"" ĊĊ +AL T +ĠL ate +EDI ATE +Ġ);ĊĊ Ċ +af a +Ġcancell ation +At om +ĠB irmingham +emp resa +HE MA +asc al +Ġup side +.V ersion +ĠF older +ĠE ight +ĠV intage +ĠApp Delegate +ĠPre vention +.se parator +ST M +( room +gener ator +Ġc attle +ĉ Z +ĠPart icle +' };Ċ +Ġneighb ours +ĠState less +Ġalt itude +Ġsa int +об ав +Ġconv inc +ĠCont ents +Ġje une +(t s +Serial ization +(c ollection +ĠJ azz +ĠD od +ĠR och +ac io +comm ended +DEF INE +.on load +Ġspecial ty +PL ACE +_MO VE +Ġaccount able +Re uters +Ġf icken +Ġde pr +W ow +V oid +.s pace +à¸ Ĺ +Ġt q +ĠP ets +< $ +(C urrent +ber ries +plan ation +Ġlist Of +ĠTh u +ĠPR INT +Ġm ismo +Ġdo i +ch k +ĠUn icode +( role +Ġvir gin +< Point +_RESP ONSE +-h ouse +ĠVenez uela +EM AIL +Ġp úb +_ex ist +B all +.C L +re ferences +ĠBeautiful Soup +ĉ Expect +TH IS +Ñĥ д +b ane +Ġtemp oral +ER IC +et as +Ġrefresh ing +Ġsec ular +@ synthesize +ac cur +Ġn ella +ĠS OL +.p ipe +Ch annels +èĩ ª +Ġinsert ion +á» ĭ +el ia +Ġadjust able +Can ada +ĠI TEM +Ġcur ves +ĠChe ap +let ing +Ġoptim istic +al lo +Ġpolit ician +_down load += edge +ORT H +Ġmodel o +art o +. rotate +Ġs elenium +æĪ ij +_al ias +Ġrenown ed +.' . +Ġc zy +Ġal les +.Com piler +ĠB ass +Conn ector +.R ole +L INK +Ġc riterion +lem etry +Success fully +/p ng +Ġey eb +asp berry +( gr +Ġd angers +Ġcorrect ed +Ġgl ow +Ġelabor ate +ĠB ears +aw ai +=" '+ +Ġpromot ions +Ġmathematic al +Ġ" ` +_Generic Class +ĠChe f +.S ort +table Name +R IC +Ġvolunt ary +ĠBl ade +-e lect +ĠCom bat +ĠAb ility +Ġab dom +Ġd uck +T mp +åħ ¨ +Ġer ase +.P h +ĠDefault s +p artment +_US B +ê te +; ' +Ġp ads +ĠOb amacare +.T otal +Ġdiv ert +Ġcr icket +Ġrecre ational +( red +ĠC le +R U +Ġmist aken +ĠMont ana +Ġstr ive +_sl ider +ĠPl astic +Ġdecor ated +ĠV P +lic o +ĉf alse +Ġpre fs +( \" +_f alse +i endo +Ġ@ $ +B ucket +act ical +ĠZ hang +.c ols +.B inding +Ġw ax +_ST ORAGE +Ġlaw n +Ġr f +.Sc ene +ĠCal culator +.d esign +Ġres il +л ем +E mploy +ĠPr ices +ĠP WM +ag i +.e valuate +ĉ param +Ġbr ass +bb en +Ġinflamm ation +ull ivan +Ġan not +Ġp H +iam eter +ĠB TC +( box +Story board +Ġcl ay +.assert Raises +| string +.App ly +Ġmatch er +und ed +Ġsatisf ying +Ġìł ķ +Render ing +_app ro +ind rome +AN EL +_f ix +br ush +.M atch +Ġsm iling +on aut +S unday +Ġdelet ion +Ġencour ages +P ull +Ġreven ge +Ġqu arry +tr ade +Ġc ables +(d elta +ites pace +Ġf h +.b unifu +Ġvi el +_IN CLUDED +ĠT ail +ad ar +of s +Ġmet als +g om +_method s +Ġn j +.St d +(w in +$ (' +Ġt urtle +ur on +Ġen rolled +ĠH z +ĠBox Decoration +Ġp ont +rel ationship +B i +³ » +Ġmas cul +Ġsh ades +Ġv r +ĠLog ic +Ġa in +ĠD IST +Ġcoll ar +" profile +Generated Value +ĠP ossible +Ġe ines +ĥ ģ +.time out +ĠE c +Ġjer sey +.D ouble +Ġqual ifying +v or +CRE EN +_A pp +_rec v +Ġali ens +It s +E sc +i ator +ĠE clipse +Ġg h +V ict +ĉ html +to o +. const +Ġant erior +ĠW u +(key s +Ġul tr +_p oly +ĠT ap +ĠB ud +A WS +Ġcrash es +_t ot +Cont in +-h anded +alth ough +ภļ +ific ent +Ġde ve +ut ory +ĠW orth +_M S +Ġfloor ing +Ġsell ers +ĠThank sgiving +Ġp ng +Ġval ores +Ġslee ve +Ġfil le +Ð IJ +Ġappoint ments +Ġv im +User Info +BO OST +Ġpos ed +initial ized +.product s +ĠLeaders hip +man uel +' % +em arks +Per centage +(d ist +. avatar +(h Object +ä» Ĭ +_ iff +ic one +; ) +_n il +Ġab ol +е ÑģÑĤ +Ġven ues +.Con vert +! ')Ċ +.B itmap +sk in +_C OLUMN +Re v +G RESS +g ow +Ġw ished +tract s +.assert False +Ġscreens hot +Ġfo is +Com b +Line Width +ĠGr ab +Ġint ensive +ĉ sh ++ ) +.first Name +_PRO CESS +Ġt ilt +it ored +.L OG +Ġb ak +Ġintention ally +.play ers +(c anvas +)) )čĊ +.Pro vider +_P UBLIC +T alk +ĠL iv +ched ulers +Ġl c +ad ic +feature d +.res ources +Full Name +Ġmean while +B uffers +Ġres olver +ĠS AP +_T E +G NU +ĠForms Module +_ wh +ĠS we +.widget s +Ġcabin ets +Ġsus cept +ĠB ott +activ ex +av ar +ant ics +Ġ" =" +_k wargs +Ġgame Object +ĠAng le +.I ter +mar sh +ĠB irthday +ĠC MS +request s +ĠPear l +_E OL +Ġlin ux +( org +_M ouse +.con structor +Ġz d +Ġk icks +art isan +Ġe ax +K n +pon ge +ĠFin land +Ġmet res +ĠAss essment +part ner +/ pre +! ',Ċ +[ Int +Ġos lo +date picker +/ String +op lay +ĠHe brew +, double +Ġtrab al ++" \ +ĉ EIF +/ text +_F IRST +ĠP ete +Ġe go +Ġextr as +P DO +Ġreg ulate +ĠQ Widget +st s +ĠSh ows +ĠN HS +.c ourse +p thread +ĠF uel +.t imes +Ġ ° +Ġstr ides +($ ('# +( words +Ġrhyth m +Ġsp ont +Ġsens ation +Ġsp ike +C losing +页 éĿ¢ +N umeric +Ġbreat he +Ġfin ale +_F ACT +in ion +Ġch ill +Ġform ally +ANG ED +Ġ' :' +ĠпÑĢ Ð¸ +a q +ĠFab ric +(l at +ĠPr incipal +Ġer ro +oc ale +N om +Ġf ost +_C USTOM +.int ellij +ert ools +Ġcl asse +adi ents +Ġfundra ising +EN E +_OPTION S +_ ob +// }Ċ +Ġprote ctions +.se ed +N V +term inal +;; ; +P redicate +Ġì ¶ +Ġbomb ing +G F +Ġch ew +)) ). +qual ified +] ={ +list en +C ENT +d igest +E ast +Ġd iver +Ġend points +Ġe e +Ġcolle ague +Ġdissert ation +_com mit +_D AT +. rc +Ġbre asts +ĠR ug +ĠP il +Contract s +ĠBry an +Web View +Ġconcent rate +ĠIn ner +Ġ' | +std out +_S ub +> -->Ċ +V ol +ĠS SD +)) ), +. Optional +Ġnurs es +Ġor b +_ pe +);čĊ čĊčĊ +pl aced +ess er +Ġther apeutic +Ġwhites pace +Ġa ston +Success ful +Ġpr aised +ĠW es +Ġe ighth +ir al +Ġvrou w +Ġf action +_b ias +Ġw itch +Ġnp c +(s b +ĠRod rig +_b ig +Dep endency +ĠAb raham +ard i +C AR +n os +Ġabund ance +Ġnut rients +in stein +.V ert +ĠI SS +< U +Ġsum s +_h ist +Ġfar mer +ĠA br +Sh ot +ĠBad Request +Ġh ass +ĠR ails +Ġaffili ated +æĿ ¥ +Ġer f +IN F +ĠView Holder +min i +ĠR oth +Ġfaith ful +ĠPhill ips +AND OM +]. [ +_P AY +ĠAr ctic +f aker +D igit +M ale +std err +se ys +Ġ Å¡ +_rem ote +li que +Ġin def +ĠIndust ries +it ra +_p airs +< iostream +Ġsal aries +ik en +.F rame +PL IC +_S PEC +ĠMed iterr +Ġsystem atic +Ġinter rog +Icon Button +se a +int ro +ĠIss ues +enc rypted +Ġintern ationally +Ġsn printf +Ġpast a +ĠBrad ley +_ Status +AL K +_P AD +.l aunch +< select +Ġhar dest +Ġph y +Ġ(( * +-s lide +ĠNob ody +S u +Ġas ÃŃ +close st +_initial izer +Ġsupport er +-g en +Ġt ales +Ġcor p +_f u +s at +ne ighbor +.M igrations +Ġal gun +Ġsin on +.S pec +? ,Ċ +.G L +m ale +Ġmon itors +yl an +-L icense +.m atches +ĠA BS +ĠM ast +ĠW allet +($ ("# +Dir ty +Ġco pe +Ġinterpol ation +ous ed +ĠJ ets +.F LAG +.C ancel +.Event s +ne ver +ĠM Hz +> D +Ġs ervlet +bast ian +Ġ> & +S ID +_cl k +Ġdiv isions +} ',Ċ +Ġd ildo +Ġpar ade +m ajor +Ġab oard +; ++ +Ġf usion +"}, {" +ĠDialog Result +ĉ arr +- em +_n r +(h andler +.N ET +.Xtra Reports +ĠSh ah +ĠB rief +- , +Ġprec io +ĉĉĉ ĠĠĠĠĠĠ +Ġt ant +ĠGrand e +/ xml +_IC ON +ĠR etro +un que +Ġn ag +to Fixed +X L +Ġdecl aring +ĠCon crete +ĠAm azing +ĉprint k +Ġdeb ates +D ATED +Ġaest hetic +emet ery +Routing Module +ĠNash ville +W AYS +Ġw olf +Ġobserv ers +OT A +ans on +Ġe a +Ġgreen house +ĵį ä½ľ +Ġst air +Ġimmigr ant +_app ly +pe are +ĠBloom berg +_PL AYER +Res p +æŃ £ +Cho oser +ĠI Collection +P eter +Er ro +.detect Changes +Map s +Ġs queeze +ĠHom es +weg ian +Ġformat ting +Ġnegot iate +ul d +ĠN ep +ĠQ B +Ġeconom ies +Ġ*/ , +Ġredu nd +ĠA ber +.IsNullOr WhiteSpace +yc led +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĊ +_S h +Ġske pt +Ġre created +Ġget Type +Ġmarg ins +Ġcolon ial +ch arts +// @ +Ġprocess ors +è¯ ´ +b atis +æĦ ı +ator io +mention ed +P atient +Ġpre y +Check box +_x path +.s kip +ĠMorm on +ĠMemory Stream +CRE MENT +Ġk u +m eld +\ Data +ĠK ernel +il tr +éĢ ģ +( profile +Car bon +RO LE +( pl +] *( +.m emory +Ġmed al +Ġadvis or +it ät +Ġh dr +ier ung +ĠProvid es +( alpha +Ġteen agers +- parser +.L atLng +] ()Ċ +Ġfel ony +ĉĉĉĊ ĉĉĉĊ +BO OK +Ġsl ash +Ġclear fix +ĠPro phet +å® ¹ +right ness +-f i +.k ind +ert on +J im +Ġmanip ulate +Ġworks heet +ol in +st ars +Ġart ifact +_EM PTY +ĉm ain +------------- ' ; +Ġexpress ing +ĠI Q +ĠF act +/************************************************************************ *******Ċ +_m ass +)) : +Ġcon dom +Ġcreate State +omet own +Ġir r +Ġ> ( +> B +iter ation +ãĥ ª +Ġshirt s +ount y +-> $ +_S IGN +ĠD ale +Ġj j +E asy +F re +ĠN y +Ġch lor +match ed +ĠG erm +- UA +ĠN athan +educ ation +-y ard +- che +h ouses +r itional +Ġprox imity +Ġdies em +áºŃ p +Ġd rought +.a udio +ĠLe o +Ġfavor able +in ch +ĠD aw +rib ly +_st udent +id able +O VE +Ġlack s +ounc ing +.b usiness +Ġre open +may be +_G LOBAL +Ġdress es +ĠEd wards +ens ible +ĠHard ware +ĠEx cellent +ĠTime Unit +CTION S +Ġsched ules +Ġseg ue +Op ens +am men +- Identifier +Ġst aring +Ġhapp ily +ĠH ob +' _ +Ġ" ); +ament os +et ched +Ġ/> }Ċ +. Users +Ġinterrupt ed +Contact s +Ġreg istro +in burgh +CH A +_ imp +ph is +s ay +Ġretail er +.N ODE +/ maps +_L AST +ĠCh arge +_g uard +Coll ider +ĠStateless Widget +": [" +(" ../../ +iox ide +ĠS und +Ġ'' ; +un set +add Widget +л Ñİ +el les +alk er +A rc +Ġded uct +G UILayout +ĠV illa +Ġfor bidden +_ where +Ġ\ / +ĠT ib +_A X +] čĊčĊ +ĠB ir +Ġb end +ĠMA KE +ĠM ET +Ġfut ures +Ġweight ed +"" "čĊ +Ġauthor ize +(pro gram +}, {" +Ġcoeff icients +ê s +Per Page +ĠBath room +ĠPublish ing +G PL +Ġsub missions +ĠNUM BER +j Äħ +Ġaddition ally +em pre +ĠSh el +ot yp +S olution +Ġth under +_ ec +ĠĊ ĠĠĠĠĊ +ĠF ellow +Ġk ay +Ġnew State +ONT AL +Im plementation +.L ook +Ġ ents +Ġl ors +ĠB IG +f ab +Ġaver aged +ĠFe edback +ĠW ells +Ġm artial +Ġind ul +ĠComm unist +ĠFore x +ĠAgricult ure +" [ +Ġqu ar +ĠK ont +ĉ view +. Bytes +des ktop +ĠM akes +akes peare +.Null able +Ġspot light +V B +ow y +(t orch +tr idge +_b ounds +Ġapolog ize +.add Item +ant d +* );Ċ +, u +(g en +ç» ĵ +re ator +ĠC ord +ou pper +.m etro +Ġ ew +ĠW ORD +.A fter +Ġdet ained +ĠHam mer +ex isting +Ġo st +Ġmon ument +-c ustom +User ID +ĠN om +Ġre jection +(d im +Ġsingle ton +ĉd ie +ari ance +re ports +] != +eld a +Ġpreval ence +_reg s +." . +Ġfemin ist +Code c +Ġ **Ċ +(label s +_M ARK +FA ILED +Ġadminister ed +W N +ĠĠĠĠĠĠĠĠ ĉĉ +Ġn oun +w ig +Ġg otta +Ġr if +- im +ĠPaul o +ĠCommand Type +] ))ĊĊ +-z ero +Tr aining +Ġl ord +_ art +re ddit +C ert +Ġpes o +R ot +Ġend anger +.d r +user Info +un ts +n v +ĠTrail er +-f irst +(m ake +Ġbenef ici +-bl ack +i ÃŁ +Ġund oubtedly +Ġm ex +ĠAnc ient +( as +Ġdes cent +P ick +Ġrep lica +$ obj +ä hr +Ġar rows +ft y +ĠLib ya +ug a +charg ed +T ur +Ġh omic +iss en +ĠF ake +Ġbe ers +Ġsc attered +( Time +UT IL +Ġbureauc r +/pl ain +Ġstick ing +FA IL +ĠC ovid +Th ird +_p resent +ĠPier re +Ġë ª +Ġ[... ]ĊĊ +Pro b +ĠTra ffic +ica o +do ctor +Ġ), ĊĊ +T abs +al u +ï¼ļ âĢľ +Ġinher ent +_N o +rit is +ĠPro of +.b asename +ä¼ ļ +Ġch im +ĠProt ected +c rit +Ġpr one +Ġк он +ĠHero es +Ġan xious +Ġan os +Ġweek ends +Ġs ext +Ġredu cer += UTF +h alf +ĠS aw +.m m +Ġnue va +.current Target +.l ua +_EXT ENSION +ĉ reg +ĠC trl +_ align +accept able +Ġrush ing +fr ac +Ġbo asts +F ive + ± +ĠTem perature +> ): +Ġchar ter +RE ATED +Ġsubject ed +Ġop c +health y +使 ç͍ +ĠScient ific +Ġfra u +ri ages +à¸ Ķ +.in ventory +ation ale +M ad +min utes +>> ();Ċ +ĠEn v +Ġrecord ings +Ġsusp icion +sql ite +ĉ read +ãģ ¦ +Ġwor ries +.put String +ĠSh anghai +( uid +r er +ĠvÃŃ de +") : +Ġmethod ology +Ġк оÑĤоÑĢ +cc c +av ad +Ġindu ction +ĉ Thread +, string +ạ i +neh men +u ition +Ġ* __ +.em f +Ġì ľ +/th emes +ĠN ine +. One +ĠEm bed +Ġf az +u ations +Ġpriv ately +Ġl ing +[ F +ush i +Ġlaunch es +( KEY +G MT +Ġaim ing +pat ible +ĠB iden +i w +ĠD egree +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġ$ ('< +á rios +to UpperCase +ìł ľ +ĠE UR +Ġovers ight +Ġtable sp +Up dates +.m akedirs +Ġhum idity +/ template +Al ways +( IS +_c ert +D ig +Ġunder way +ort on +ĠHur ricane +Ġsp ends +ĠSeg ment +Ġfl ies +ĠT oggle +ĠLyn ch +Ġs enses +ĠK os +set Enabled +ist ically +Ġtest er +Ġadministr ators +Ġtag ged +Ð ĵ +Ġshort cut +ĠRes olution +Ġsuperv ision +ĠAsh ley +Tr acking +ul atory +and el +ist en +Ġun re +(d iff +ANT S +Ġr ider +Ġs Äħ +.S eries +_ orders +ORIZ ONTAL +Ġret ention +ãĢĤ čĊčĊ +Ġdi agonal +ĠC ancellationToken +_ Internal +Ġru in +.Q t +ocr atic +T el +ĠAn swers +m atic +Ġx p +at em +_j obs +_ any +Ġsen iors +Ġland mark +ĠQ List +Ġman eu +ot ify +/ ";Ċ +/ server +ĠPhil osoph +uten ant +( io +h z +Ġauthentic ated +d v +- Compatible +Origin ally +, function +ãĢĤ čĊ +ĠRepresent ative +as ily +irc uit +.d t +(m ath +.M arshal +[ , +ĠC ities +_ turn +| )Ċ +Ġcant idad +al ter +ĉ ui +ĠNe braska +Ġsk irt +.b g +Shared Preferences +( style +Ġg rief +g ew +Ġsaf eg +ol ang +_l ists +ì Ľ +Ġgran ite +Ġhott est +.j dbc +.C ustomer +Ġâī ¤ +Ġwa ar +_sc ene ++' / +ĠJ TextField +Ġse ating +Ġwe ars +Ġ` / +C ases +ĠY outube +ı m +Ġbal con +, G +Meta Data +- price +SC R +Un ity +Ġtr unk +={` ${ +Ġearthqu ake +Part ial +Ġsub st +Ġelim in +=" '. +//* [@ +Ġsuperv isor +vro let +_ article +Ġp ane +b io +Ġmot ors +N M +F rank +Ġon ion +- word +Item ClickListener +Ġb rit +end encies +Com puter +_r unning +( day +- he +(n amed +ĠS ach +о Ñĩ +c ampaign +.Ab stract +(w rapper +.p ay +Ġu w +Ge o +r ails +/ select +icht e +son s +E VENT +Ġal iment +Pro viders +A wait +_INTER VAL +. off +Ġgl uten +_cl oud +Ġw en +.ex tract +ĉ button +/ MM +Part y +Ġdem ographic +_err no +Ġh iking +(' ')Ċ +", @" +Ġw it +r á +olog ie +ĠSt yles +ĠBrowser Module +.Request Mapping +ic ans +P AGE +cre ation +ĠF erguson +ud ed +num bers +ĠGT K +Ġpresent ations +ĠB obby +_s pan +est yle +Ġilleg ally +abel a +Ġbattle field +cap acity +ter ror +] ");Ċ +Ġwar rior +le ader +ĠDB G +ĠRe venue +Ġvig il +Ġcounter parts +( Error +ACT ER +Ġhe eft +Ġselection s +ze ug +t om +-t wo +. ;Ċ +_st atement +ĠA id +ĠV ul +_r gb +Ġpr izes +Ġedit able +ĉ form +ın ı +.de cor +D emo +lic es +Ġen ctype +rat ulations +ĠR OS +_ch ars +ĠJ ahr +part ial +Ñĥ ÑĤ +ĠRe ceive +ĠL ands +AP TER +Ġch opped +.. " +ĠAn aly +ĠU ID +ĠR adeon +ĠB ee +Ġun m +> M +.find all +Token izer +ĠWH AT +Ġs j +D rawing +E ss +ON D +Ĭ ¶ +(p acket +âĢĶ but +Inv ocation +ĠN uclear +? ;Ċ +Ġgrand es +ĠC rypt +rem ark +Ġ'../../ ../../ +Ġin ability +m agic +c ats +Ġsim ulate +: ${ +in flate +Ġen er +: NO +ip les +Ġmer it +ĠR ated +Ġgl ue +/b log +Ġg ren +Ġthr illed +.C H +unc an +ĠPR IMARY +Ġper sec +Ġfe ared +.M IN +ĠThe ater +é Ĵ +ategor ie +æ® µ +Ġappet ite +s quare +ĠAlex and +.User Id +_g t +_ enter +Ġgradu ates +Fragment Manager +Author ize +-N LS +(M y +Ġtri umph +ust ing +_PARAM S +Char acters +(: ,:, +_B UILD +M Hz +Ġwash ed +Ġun cle +Ste ve +ard own + ${ +_confirm ation +Ġtro phy +Work s +ĠElect ronics +ĠMediterr anean +_m etrics +Ġannounc ing +ĠD AY +_pro to +Ġp ear +base Url +ĉĉĉĉĉĉĉĉ Ċ +Ġcoord ination +: N +.an imate +ĠC otton +_h it +â ľ +Ġjet zt +if ter +(f ields +own load +ific acion +.c uda +ĠLi u +> equals +ĠA ce +ÑĢаР¼ +ĠSuper man +ĠGarc ia +Ġarrest s +ag ar +Ġ{} ) +Ġmac ros +rou pe +ê tre +Ġtw isted +str uments +_ (" +_ vertices +ĠTrans ition +и к +[ max +m ind +Ġaccess Token +Ġun le +m us +c op +ĠF actor +Ġcon ced +Ġre tr +.l inalg +-s lider +ob l +_Static Fields +Ġz ombie +s elling +Ġch ap +Ġsh aking +ĠTrans late +ĠAm sterdam +ĠE TH +_EX TERN +k d +_d isc +Ġpreced ing +Ġpri x +Object Name +_mod ified +ard ware +Ġ?> "> +ĠD W +` ${ +Ġ?> ">ĊĊ +Ġspin ning +_p ending +Match ers +. Keys +ĠP V +en us +ant is +Ġdisc ard +Ġh aul +Ġem pir +Ġpath way +Ġo ak +м ен +-ind uced +Ġimp air +ĠCal gary +.is Hidden +d z +_ include +Ġg m +Ġ' (' +P Y +uggest ions +Ġcommod ity +c ro +/ sub +Ġget Instance +ĠLeg acy +ĠK il +B al +( short +In form ++ x +* r +ĠHope fully +or ate +Ġmach en +Ġtreat y +ĠO ri +.p ublic +-h orizontal +Ġtact ic +Ġb ord +w ares +Ġam mo +ĠL ists +Ġequ ations +/ her +ĠNS W +B ounding +_C ollections +Ġav ail +.Drop Down +è ° +Ġh h +Ġl Ãł +.p b +Ġmemor ial +ĠAT TR +Ġexhaust ed +Ġt sp +ĉ redirect +Ġlik ewise +ST ER +L java +Ġcondem ned +oca ust +(str ict +Ġexem pt +Ġs ms +Ġex agger +S YS +Ġl ounge +: ^ +Ġto dd +de b +ator ial +ĠPort er +Ġtu ition +Ġexem pl +Ġp aren +.line To +Ġkid ney +Ġç a +Ġc ui +ï¼Į 请 +X C +Ġmo ż +Ġnomin ated +l ung +Im Gui +ĠB uzz +Ġstere o +port al +res as +Ġk lass +Ġdraft ed +Ġproject ile +/g pl +(param eters +* )Ċ +Ġassist ed +ĠNS Integer +s itemap +:n th +.View s +.Argument Parser +Ġme er +z ier +ĠD ig +Ċ +Ġpl ag +p ine +Ġblank et +Ġ: - +Ġl cd +------------ --- +(" " +Ġtact ical +ĠRon ald +ex tr +ĠF est +Ġf uer +-n avigation +Ġk b +gh ost +Ġhandle Change +_cl s +() != +Com parator +.v m +ĠCo x +_re view +/ @ +_c ookie +Ġrecogn ised +ld ap +Thread s +ĠSex ual +ĠB earing +(S QL +Ġx r +Ġth igh +URL Connection +ĠSU V +Ġm Context +Ġinc idence +ĠE ste +.s up +_t e +(EX IT +C MD +/ "> +Al most +ĠU ne +Ġand eren +ĠSingle ton +Ġb ore +Th ink +Ġn arc +] initWith +_sh op +(str ategy +! ', +her its +ĠDes k +_m achine +.net ty +ı nda += < +ĠQ R +ĠS idebar +.split Container +Ġon Success +Ġmon key +En joy +(n odes +pect rum +Ġ(* ( +ĉU INT +, height +ĠNetwork s +.t ail +.l inspace +Ġ" ... +List en +Æ ¡ +.Ch annel +- defined +Re peat +ad just +ER M +_ application +.assert NotNull +- stream +Ġr abbit +Ġposition ing +Ġw oke +Ġf ing +Ġmulti player +Ġregister ing +un til +Ã¥ n +( :: +uss ions +Ġpot ato +ĠE quals +.S up +/ap ache +Ġ( = +. ") +.p tr +ĠSpe ech +.cl ip +ĠGab riel +Ġmusic ian +/ issues +.sh op +ĠH ier +_RE T +_b ucket +ãĥ ¡ +av s +Ġro z +fl ower +Write Barrier +ĠMil an +Ġlegisl ature +ĠD oll +Ġprov ing +.concat enate +âķ IJ +Ġg char +cdn js +b les +ĠList ing +л о +.xr Label +ĠS ak +just ice +ĠVal entine +un less +Ġp iger +(r un +Ġtest ified +AN A +ĠRem oves +)) ));Ċ +rec ated +ĠRuntime Method +Ġcon qu +ãĤ ¢ +Ġt issues +ail er +ét é +- Star +Ġfl ames +.set Icon +Ġsup ern +Ġvag ina +- variable +Ġwell ness +C UR +Ġbel le +.get Request +Ġp oco +ben h +ag ens +Ġsp ill +ĠJ ur +Ġdispatch er +н ого +emon ic +(dir name +ĠÐ Ķ +Ġpas se +Ġg anz +ric ing +E U +Ġmuj eres +ess en +.at tribute +j j +ĉĉ ĠĊ +[ ^ +Ġstrtol ower +lex er +ect ar +hot el +.s quare +Ġr all +Ġlower ed +hand led +Mark et +ĠUs es +iv as +.B usiness +ãģĹãģ ¦ +D IV +Ġw asted +Ġav oir +ê m +_ACC OUNT +. et +ĉ SDL +k ap +Ġf ox +up pet +{ },Ċ +", ' +F avorite +P END +ĠA ES +} ), +Ġded uction +Ġpol ÃŃt +Ġcomponent Will +ĠT elerik +_SE LF +Ġm use +C raft +Ġd ens +ठ¿ +( tp +Ġt asty +Ġbal ances +Ġded ication +ĠWall ace +Ġun law +\"> \ +Ġm um +- update +ement e +Ġs oda +Re public +as mine +é ric +( Status +ĠJson Convert +ĠD isk +.Red irect +Ġfilm ing +/m ol +R o +Ġv ille +Ġtrab aj +Ġsyn thesis +reg a +Ġr l +S cheduler +ISH ED +current User +(error s +' h +_b ot +x imo +ĠUS ART +_s uper +_DEC REF +н ой +_RO W +Ġprom otes +ĠT A +Ġhor as +ĠRep resents +Ġname of +ĠEx c +ĠGar age +Ġse ine +, # +Ġher b +/ resources +Ġple aded +.r adioButton +Ġæ ĺ +O ps +ĠN est +c string +ĠDef ence +Ġref ere +_le af +Ġrevel ation +ë § +.execute Update +_W ORLD +Ġexp ans +(" \" +j ab +Ġdoub ts +ĠGe ometry +Ġintrodu ces +Ġsen ators +Ġcan al +.h elper +ĠBi ology +_SE NS +.pre vious +-t ouch +ab it +Ġimpact ed +Ġbr ackets +.d irect +acc um +Ġtest osterone +ĉ action +ĠCh ance +Ġpe aks +CppCodeGen WriteBarrier +Ġun belie +_p ress +.R el +ang led +/ templates +-- >čĊ +l ime +Ġsufficient ly +_ nt +Exp and +.is file +Ġis Empty +Ġq t +Ġmul her +ac ob +Ge orge +å¸ ¸ +Ġass im +as o +Ġcompr ised +O V +(CON FIG +ĉw riter +Ġdes p +Ġten ure +(c r +.p ool +ĠB rend +Ġc ensor +(time out +Ġple a +.W rap +Ġtight ly +ĠW ere +ĠI gnore +abe i +Ġbr idges +Ġcondem n +Ġsimp licity +Ġrout inely +Ġblack s +j b +ĠP it +U tf +Ġ/ Ċ +re load +Ġset Object +/g lobal +Ġf atty +Ġsock s +Could n +Ġerot isk +æĿ ¡ +ĠPress ure +ĠM az +n pos +tol ower +ĠE Q +ute ur +ĠM oment +Ġet a +{{ -- +Ġgraph s +ĠGu ar +r ine +( -- +ĠHttp Status +(st udent +* np +Ġrail way +Ġas ynchronous +_v m +'] ,' +, text +mer chant +(G uid +ĠG ra +ix er +fetch All +.add Listener +fl ip +* $ +> (), +Ġsun light +ass igned +Ġab c +ĠC OLUMN +ĠðŁĻĤ ĊĊ +) ... +Ġen semble +Ġnew line +_S INGLE +ied ad +Ġdark er +orm ap +Ġl ion +pl its +Ġillustr ation +ĠI EEE +Ġv ista +ous ands +****** * +ĠTom my +Ġh ue +S el +Ġa ura +ĠTher apy +Ġanim ator +.con straints +Ġv ague +(" ") +Ġvill ain +Ġbless ing +Ġstring Builder +ĠM isc +ĠD IR +f ax +- node +ĠWalk ing +ĠA U +s ess +Ġgr ill +VERT ISE +ĠF oods +Ġt ournaments +à ĵ +ĠMar sh +Ġw onders +Long itude +.Command Text += input +_enc oder +page Size +Ġget State +> >Ċ +.g rey +p od +Ġread ings +Ġre consider +Start up +Ġexc er +.b alance +_c ycle +_T ime +LOC AL +ĠE FI +ĠRe yn +.set Foreground +by n +Ġdis connected +ACT IVE +Ġembed ding +ick ers +Ġsurround ings +* c +Ġgar ant +Ġb f +Ġw ipe +Ġ ä¸ĭ +_T RA +ado x +ç ķ +Ġsu cks +ĠS ongs +ĠAssoci ates +ĠB ald +ĠB rett +ven ile +Ġv t +Ġin ade +Ġres igned +ĠGl enn +.p attern +.Data Bind +Ñĥ м +Layout Inflater +ch et +ĠTest ament +.m s +Ġp av +ĠReact DOM +ur dy +AD ATA +M u +/ actions +ĠJ s +_ex tract +ĠBr ing +: id +str t +iv ation +Ġoutr ight +az u +loy ment +и Ñı +al do +ĠP ublisher +E ducation +Pa lette +_d rv +Ġ($ ( +ĠAnd a +Ġrem edy +Ġincons istent +te ction +Ġregul ators +Ġshort est +(p air +ĠInstall ation +Ġdefend ants +Ġ( ); +-l arge +M el +Ġthreat en +н Ñı +Ġfet ish +ot ine +_d ic +Ġ< $ +Ġst agger +sp i +$ response +S erv +-b orn +j os +ĉ img +ĉW HERE +_l t +å½ ĵ +.c ost +ĠT ue +.label s +ĠL V +wcs store +ĠJes se +ภ« +Tr ade +Ġpredecess or +ë Ĥ +fin ally +_g eneral +ogg ler +_REG ION +n ement +Ġblog ger +ĠHar bor +ĠD ataset +[ w +Ġattend ees +. ico +max imum +.Un lock +_SY NC +ág ina +Ġdown s +ĠW ii +]) / +Ġkick ing +unic ation +ĠD AC +ĠID S +ĠR ental +Ġcurrent Time +Ġvacc ines +ĠDev il +Ġn ors +_m ouse +urre ction +(n o +Ġ> čĊ +Ġaggress ion +Ġbre eding +.s ymbol +im an +Absolute Path +ĠWH O +_fl ush +- root +arn a +& M +Ġf athers +ĠR ocket +ive au +Ġw ander +Ġcom pos +ĠWar rior +ĠSe at +ĠClin ic +_in voice +(dis patch +Product o +at uring +oss ier +ĠM AY +Ġd agger +Ġsanit ized +ĠR FC +Ġpro ph +Ġur ine +Ġgr ind +ĠExp anded +des cripcion +-f w +ĠK erry += name +Ġch k +Ġnation ally +Ġthe e +In c +Ġ? >> +.R adioButton +.Http ServletResponse +/ Y +ĉf ield +Ġhom me +y per +Ph ysical += v +Ġdr iv +ĠErr ors +Ġc Äĥ +De ath +ĠW INDOW +Ġpo et +ĠSh arp +ĠImm utable +ĉ create +Ġge ht +ĠRe form +ais er +ĠInitial ization +Ġimm unity +.com pose +Ġlat ency +ĠLeban on +ĠPar ad +Ġfu els +ĠEx hib +co h +% ">Ċ +ĠCL I +) initWith +-Z a +_C LEAR +reg n +Ġfin ances +.st andard +_C ATEGORY +.lib rary +Ġtravel ers +_w p +ĠE valuation +start ing +Ġ )),Ċ +ep isode +ĠV ariant +Ġda emon +ĠJul ia +ĠN R +Ġdoub les +< v +/r untime +Ġinterpre ter +ĠIN DEX +ĠHol mes +_D IM +Ġp addle +_ex ample +Ġfore ground +.r outes +Ġs owie +S UCCESS +ĠC DC +ĠB D +_ - +as ured +W riting +Ġcurrent Page +( answer +ĠASC II +à ¨ +Ġsocial ly +yy y +ĠSpecial ist +(c ustomer +ist ani +ke st +ĠM ak +Ġth o +. pt +( comment +ĠCon verter +g am +b ins +. tele +ĠVeter ans +_AL LOC +олÑĮзов аÑĤ +inn amon +; width +oh l +Ġfant as +Ġs ung +ĉ K +( Json +Ġneighbour hood +Ġv ow +Ġs ins +on acci +Ġepoch s +im agen +.Ch ange +.my batis +Se ek +W ER +管 çIJĨ +Ġinter ess +_ Event +eder land +Ġterr itor +Ġci udad +uck ed +Ġsn ack +Ġtransport ed +ĠMan ifest +ĠD AT +_th eta +Ġw ont +.ĊĊ ĊĊĊĊĊĊĊĊ +Ĭ¶ æĢģ +ĠEp ic +De ck +l tra +_Z ERO +Ġ[] ; +/ scripts +Ġ---------------------------------------------------------------- ---------------- +æĥ ħ +Ġwe ed +N BC +Ġrap ed +ĠG ateway +[ M +ĠTime out +ench mark +.View Model +Ġporn os +ĠY a +th ritis +ĠFly nn +Ġme ga +ac in +Ġtrib al +.app le +ĠB lo +â n +ib i +ro v +ĠL ives +^ . +get Request +ĠEst ablish +cont ainers +Ġst arring +Ġcele brities +ĠRel ative +ĠHe ights +Ġtq dm +ĠNorth west +iv ic +ĉ cl +Ġautom otive +ent ric +Ġfort unate +Ġfire place +se ud +nick name +; s +_C AL +h alt +(n s +_de leted +Develop ment +m ovies +Ġident ities +Ġprompt ly +ا ÙĨ +Ġant e +Ġ" ',' +åı £ +imp se +Ġy ap +Type Name +Ġb itch +Ġassoci ates +HE ME +- empty +ĠØ ª +ol vers +Ġpist ol +Sc oped +ag ner +'] ==' +ĠI MP +ex c +Ġo mitted +Ġmind set +Ġ[] ( +Ġor n +_C AM +A vg +Localized String +ĠN atur +Ġcom poser +ĠPlay ing +Ġover d +_ utf +.s k +ĠF ol +$ page +, Object +Ġbe es +al ary +bul let +_lib rary +O ffer +loc ated +Ġ(_ , +âĢľ He +ĠOwn ers +) ).Ċ +Ġb ri +.Ad min +kt ion +лÑİ Ñĩ +Ġerot ici +Cancel led +Ġa gr +re views +_d ma +RI CT +Ġg fx +mp i +pp o +Ġ// @ +Ġupper case +Ġcommit ting +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +User Data +Ġv ai +ĉs ort +Ġcongr at +Ġd ioxide +д а +. area +ĠJosh ua +ĠK och +_b reak +az ure +ist ical +_AL PHA +_ views +Ġelim inating +OM B +en umer +ĠHy dro +(* ( +ERT ICAL +Ġinev itably +Ġst ole +-e ast +ier on +Ġl inger +/d oc +Å º +ĠAl ready +as io +Ġ-- Ċ +Ġabb rev +ĠAt om +h im +ĠINS ERT +s un +âĻ ª +CON NECT +er ator +ĠM anning +Ġ: ( +g as +=> ' +Ġquery set +; }čĊ +ĠPop ulation +uted String +res ident +_F ONT +ĠRes pond +Ġobsc ure +Ġo bservable +ĠContrib utors +k on +ĠMus k +ex ao +ĠT ub +Boot Application +S OR +.H orizontal +.find By +.p ower +Ġposit ively +ven ience +ĠJ ong +Ġwh istle +Ġз наÑĩ +Ġl ending +Ġdestruct ive +Ġon Delete +author ization +(); ?> +_ original +sc ience +at ra +?, ?, +ĠAs c +Ġconvinc ing +$ a +org en +_D ate +ĠPro vide +Ġlon ely +) 'Ċ +ex change +; ?>Ċ +.f ast +S amples +L ondon +'] )čĊ +ĠI onic +Ġp esso +ĠKn ights +ĠR af +_attr s +Ġrepe al +> Main +ĠOrder ed +_N ew +=" "> ";Ċ +ĠS ERVER +ĠHE ADER +_ velocity +ĠIn voke +.timestamp s +Ġs ulf +I QUE +Ġinhabit ants +ph ins +azz o +Ġmon o +Leg end +Ġnon ce +IF E +; ";Ċ +- create +" ",Ċ +per mit +ĠImm igration +Ġpath name +ffect ive +âĻĢ âĻĢ +Ġex ams +- event +ĠT ill +[m id +F IX +; color +( Order +_tra its +Ġorder By +Ġs unt +ĠNich olas +Ø ² +Ġsun ny +in ers +Ġaccess ibility +ĠH B +.com p +ĉ op +Ġminor ities +ethe us +Ġcollabor ative +pr it +H IR +Ġwr aps +ĉd raw +g od +ĠI X +.app s +ĠN M +Ġirre levant +ĠT igers +Ġdi ag +G V +ĠAccess ories +k ont +Ġsimpl ify +ĠF avorite +_t ools +([] );Ċ +Ġtow ers +B es +Ġhun ter +Ġsal on +(b uff +ĉ debug +Ġmal ware +M oving +- options +) +' +ĠLO VE +_S OCKET +_f in +ĠDel aware +Ġsher iff +-in valid +ĠF ULL +Ġп од +el as +" strings +ĠRepresent atives +s urface +res olved +ht docs +)) :čĊ +Ġpress ures +Ġnorm s +Ġpl a +Ġs urname +Ġpost al +ĠDep art +Ġsla ughter +or ida +Ġhe bben +Ġdes ar +comp act +_L ANG +åIJ Ī +op oly +_r ad +ĠST DMETHOD +L azy +ĠĠĠ ĉ +... , +( web +ĠP ont +Ġet was +Ġup ward +_h at +Ġ], ĊĊ +Ġbase Url +Ġworry ing +-add on +(get Class +S PI +Ġcapt uring +) },Ċ +Effect s +Ġcompet ent +Ġf oul +Ġsubscri bing +ĠO BJECT +IX EL +b ucks +( edge +(p ass +ĠPet erson +Ġbo obs +ĠD elay +_s quare +el im +ot ers +_P C +% E +on click +ĠSV G +Ġto pped +Ġf ist +sm art +ĠR alph +( owner +j ours +Ġbron ze +ĠArgument Exception +( original +_S CALE +_c p +Ġrecomm ends +.set Style +S ure +L AND +Ġrepe ating +M att +. Visibility +Ġenter prises +.Set up +(sc ene +ĠRe active +ur ge +b w +.P ut +p ersist +.c ookie +ĠAud i +` s +sup plier +( Form + ¡ +_s o +Į Ģ +ĠLeg ion +t te +N d +L oss +( attrs +.sc atter +Ġg room +Ġgl impse +Ġn ails +Ġcum ulative +Ġf azer +_s ervices +.N um +ib ilit +_res olution +ĠT x +umin ium +op a +.s chedule +sm tp +ภķ +ur ry +ü k +go og +_sign ature +.int o +ĠSte ps +Ġhome owners +ĠNS URL +ĠP AC +ĠĠĠĠĠĠĠĠĠĠĠĠ ĊĊ +> ')Ċ +en h +Ġinc ap +$ MESS +Ġmo ins +ĠF i +Ġoff season +press ions +> .Ċ +ĠGr ass +ĠGo al +_p df +Hand lers +Ġstack s +.get FullYear +=[ ];Ċ +è½ ¦ +, V +(s plit +Ñĥн к +Ġbake ca +Ġ~ /. +pe z +t ails +ĠG len +Ġset Image +ĠCom ic +B LOCK +ĉ This +o ader +Ġcapital ist +_ST EP +( Boolean +ĠCor rect +r ina +Ġconc aten +å® ŀ +() :ĊĊ +Ġun anim +ll i +al ars +- ne +Ġdiv or +ĠKick starter +]. _ +< number +/m enu +GR APH +vis itor +Ġimpro per +_N EXT +Ġb isa +background Color +/ input +Ġmo i +Go al +li qu +Ġmiscon duct +Ġcompr ises +aw ns +ĠP ie +ra is +role um +Ġcur se +y u +_p oll +.current User +ES H +]) [ +Ġstory t +)? ;Ċ +* = +ĠB urg +/ layout +_back end +; ?> * '+ +åĿ Ģ +ac ency +( URL +_h alf += l +Ġlist View +( section +.to Array ++ / +ĠRodrig uez +ist ream +Ġelig ibility +:: - +.new Instance +P B +ĠAs sets +ĠCom posite +ĠL abs +ĠHam as +++ );Ċ +Ġbl k +ĠNe o +L uc +@ login +Ġun aware +.m et +_RE LEASE +( ST +AM IL +ri ke +Ġ( ){Ċ +(s printf +ĠAccount s +ĠV IEW +ĠA j +ãĤ ° +Ġwh isk +Ġid i +Ġro de +Ġih n +ĠElement ary +Q ty +Ġintrig uing +Ġå ¤ +J obs +ĉ offset +ĠAh med +ĠTal iban +Ġè İ·åıĸ +Ġinject ed +.Auth entication +_line ar +.Dec imal +Ġapp les +Ġshare holders +Ġb aked +.d iff +ĠE ddie +ok ers +Ġconfront ed +vo ices +Ġt us +ĠSp in +N ODE +_ Un +CT X +/g oogle +Tem perature +Ġ' '). +Ġmagn ificent +Ġstart Index +semb les +Any one +z k +eh en +ĠD ame +. strict +Ġrepl aces +Ġline back +Ġpush es +Ġche ek +ĠSh i +_BY TES +RE A +ả n +_CON NECTION +G ateway +ĠTr avis +ĠA X +ĠBas ically +ĠUp grade +à ª +th emes +erm o +k or +F emale +_att ach +ĠìĤ¬ ìļ© +Ġpo z +============ ==Ċ +(s ymbol +ĠS ector +__ )ĊĊ +_p adding +ï¼ļ " +Ġf abs +Ġr anged +set Name +Ġp error +â Ĺ +ĠFile Reader +Ġful filled +_C urrent +Ġdom inate +Ġsm ugg +Post Mapping +_for ce +Ġb loc +ĠG iant +(v ideo +ĠC U +System Service +Ġ elf +Ġkont akt +ë ª +ke es +gt k +Ġparam Int +Ġmark up +u ales +Ġaccount ed +Ġgang bang +RY PT +ĠW rong +Ġcred ited +ĠM ESSAGE +Ġfl aws +Ġbb w +Ġmetab olic +ĠO EM +/ event +(C ollectors +mont on +ap pear +Ġopt ed +Ġche at +Ġd av +ĠPro ceed +Ġê ¸ +ank ed +и з +ans k +ĠH ang +ĠC ler +Ġdis gu +Ġc map +.cl js +Ġa ument +le z +ĠJo ined +_re ceived +Ġa erial +ot el +Ġgre et +" s +ĠGen esis +ĠCal if +pan ion +Ġtail ored +m apping +and Expect +.tr ack +at omy +ĠO w +ull ah +.Y es +ĠSimple Name +db h +' en +Ġnons ense +Ġphilosoph ical +(get Context +Ġis so +ĠA CE +start Date +Ġb ÄĻd +ĠAUTH OR +ĠGlo be +Ġinsect s +_A l +ush ing +è® ° +/ Home +ĠLocal Date +need ed +hes ive +Ġill usion +äº Į +Ġtr at +x o +/d etail +_M ATCH +Ġbroad band +Ġw al +ĠIllegal StateException +IRE CTION +Ġnor theast +es ium +ĠClient e +ul ance +nt y +Ġt ecn +Dev ices +Ġgr ains +ĠO g +ĠS EL +ud iant +Ġ++ ;Ċ +Ġexplan ations +oc co +Ġdi ets +Ġco hort +( controller +.Iter ator +-r ich +ro cess +G D +Ġcar bohydr +Ġfri ed +ĠEmploy ment +ìŀ ¥ +ĠLeon ard +_ ${ +qu ares +Ġcompan ions +Ġpar is +Ġstim ulation +ĠZ oo +Ġre levance +ĠCol our +Ġspe ar +ot ional +ĠL ite +ĠK osten +Ġà ³ +_att achment +orph ic +Ġdam it +Ġd lg +Ġthr ive +CH ANGE +ĠApp arently +Ġat ual +Ġroot ed +( images +aw i +ari at +Ġch erry +STAT IC +m nt +ĠUser Id +il let +ĠHis panic +Ġn ak +Ġcent ro +Ġdim s +_initial ize +ı k +ĠCent ers +RE N +Ġevolution ary +ĠTop ics +_d amage +em er +Ġr und +Ġpun ished +Ġcub ic +f air +[] ;ĊĊ +Ġinstant iate +Ġover see +- delete +unte er +start Time +ĠP ipeline +_G AME +ĠC ir +ĉ Null +.Format ting +uc umber +ĠR ide +Ġz oo +Ġcheck er +åIJ Į += C +Ġg rit +"); // +_x y +ĠDe claration +Ġcall able +F oo +ĠList Item +Ġin accur +ml in +ĉ Data +Ġev olving +aw an +Ġca fe +fol k +_ID X +ĠAny thing +ĠPalest ine +ĠGrid View +Ġcol ony +ĠGerm ans +( + +.p id +.js x +ĠSuper ior +Christ ian +ĠL ect +ĉ Game +Ġinstrument al +Anim ations +д ал +ĠMos es +ĉĉčĊ ĉĉčĊ +z s +k te +ä¸ ļ +_D IST +bit map +d B +Ġp ersistence +ÑĢ Ð¾Ñģ +$ l +B ron +Ġ{ | +_ch art +ĠCon sum +Ġh emp +Ġ" ))Ċ +Ġattack ers +Ġknowledge able +Ġc et +Ġvir uses +' I +Ġpitch er +Ġsweep ing += list +apt ops +.de pth +Ġinstruct ed +ĠR us +benh avn +Ġи н +S ports +Ġon set +æĿ ĥ +. RED +_s i +ĠP ST +.on Change +> tag +ĠR oh +_char acter +ĠLaw s +ĠB achelor +_s wap +.re activex +Ġreward ing +Med ium +- [ +ĠRec ently +J oint +part ition +ĠMin utes +Ġind o +Ġabsor bed +ĠG N +_IN D +Ġsab er +Sp awn +output s +ĠJeff rey +Ġmed ieval +h ed +Gu ide +Ġpsy cho +Ġgl am +E lim +äd chen +_pl ain +ĠS au +-f our +Ġanaly zing +QU ERY +Ġtom ato +_button s +V EN +.set Status +. Url ++ ĊĊ +Ġcompl aining +deg ree +conf irmed +Ġsub t +p arsed +Ġtor que +Ġtroub led +ĠT ARGET +Ġtrad emarks +ĠCo ordinate +ĠV iv +Ġ// }ĊĊ +Ġapr ès +.get Position +(Key Code +ĠSil va +Ġmet eor +Ġendorse ment +Over view +ĠP oss +.In ject +Ġeven ly +Ġvisual ization +Ġw char +ĠH DMI +Ġfun ct +ick name +',' ',' +Ġfor wards +Managed Object +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠ +ĉ server +ĠOut look +ĠChron icle +Ġdub bed +Ġd ok +ĠW ear +.A L +pare n +. Interface +Inter faces +.c od +Ġd ib +.Global ization +ĠAcad emic +Ġass ms +Aut om +Ġl w +ĠN W +Ġ&& čĊ +Ġproble ma +ĠManufact uring +lim its +-m obile +Ġfil me +/ map +Ġdo it +ĠIn k +Ġsu ed +. arr +Ġunder min +ĠPro c +croll View +__ $ +Ġsidew alk +( that +ภ· +[ q +gram mar +Ġt ë +qu ito +Ġspir al +ext ended +Ġf ocal +Ġdig ging +p as +ĠT all +.pro xy +it ures +TR ACT +ĠRe alm +Ġf eder +Ġorient ed +ĠAltern ative +Ġo we +Ġsour ced +ink er +.d et +S ep +ĠQ ui +ĠPal mer +(_ , +s amples +oy er +ull an +que z +Ed ges +Ġsh out +ĠA chie +Ġha ar +_Con struct +Ġprem ature +Ġre vert +'). Ċ +Ġs chn +filter ed +null ptr +S aved +itect ure +CL A +Ġv l +st ell +ĉ Me +ĠL ip +n ational +Ġwh olly +Ġspr ings +.T imer +ĉs rc +els en +åħ ¶ +Ġcommunic ating +ĠQu iz +Ġt eng +Ġge z +ĠOut side +.S ign +(c s +Ġdisput es +ĠWe iss +ann es +> No +ĠB ach +.remove All +re fer +/d ashboard +ĠA jax +Index Changed +ĠWe ak +' "Ċ +Ġs ights +access Token +ĠJ oi +(d omain +ĉc v +Ġcontin uation +Ġpl um +ad ir +.set Message +Ġ ï¼Į +Ġsw allow +ĠL amp +Ġq w +Ġu u +C oin +ub ic +ĠDe als +r ace +Ġdict ator +Ġmem e +turn ed +ĠJul ie +.grid Column +Ġpup py +Ġp am +Ġ) {čĊ +Ġinv iting +Ġf rench +v im +Ġwr apping +Ġ#- }Ċ +([ - +Ear ly +Ġsh iny +.f aces +Ġreb ell +abc def +ä lt +Ġest imation +ph ys +los ures +_RE L +Ġex clusion +ĠSk ype +we ise +-st op +no thing +ĠE gg +is ors +Rich ard +Ġcounsel ing +Ġcomm em +ĠQ MessageBox +ĠSy nd +ĠFro st +ĠCompet ition +ĠAw ake +Ġt ed +ic iones +ĠDev Components +VERTISE MENT +ott i +.run ner +Ġuniqu ely +.fl ag +ĉ rs +_g eneric +Ġ`` `Ċ +ACH INE +Ġme in +( Application +( br +Ġrat ios +: , +ĠXCT est +ustain able +- www +it les +_T EMP +Ġs yst +umeric UpDown +ĉassert True +Ġw f +. peek +ĠBul g +Ġterr ifying +.M ODE +ĠG W +á r +Ġf ic +Ġcommit ments +- tech +ĠL iquid +ope z +z heimer +a ña +-m edia +( animated +_go al +Ġg um +yst one +.S ET +ĠW end +set CellValue +Ġmsg s +c ash +AL LOC +/ aws +Ġmic rowave +.Point er +ĉ Console +_s orted +ĠFil ip +Pro d +Ġ//! < +ing roup +Ġk s +_T RI +Ġteas poon +ĠAT T +Ġrecover ing +ĠG LOBAL +.P ar +Ġ/> ;Ċ +Ġmar ble +ul ators +ĠC ycle +Ġher bs +_m etric +) ! +_C LOCK +_ Button +H arry +è¿ Ľ +Ġstr ains +ĠApp Bar +ĠCh an +/v ideo +Ġb am +.Pro gress +$ f +lem en +Ġir regular +ĠD uncan +ĠM int +-v ideo +ঠ¾ +ó wn +ĠEM PTY +Ġstack ed +ĠH A +_c ut +Ġwhere in +ĠW ays +(count er +è¯ ķ +Form Group +Ġble w +c ourses +Ġproduct os +ry s +ĠRest r +Ġsty ling +> s +Ġp iv +Ġit ertools +get Repository +ĠI k +_dev ices +lay ui +Ġhalf way +Ġfran ç +Ġtun ing +O A +_N ode +ar de +Ġfier ce +lic ted +# čĊ +Ġbreak through +ĠE rik +Ġb ride +Ġ. " +cul us +ins ide +ĠIndian apolis +ĠE E +Ġy og +urre t +.f s +. grad +_c ards +_ac curacy +_ep i +qu eda +/ org +é ªĮ +Ġcom pte +)) [ +Out side +G reater +ĠRender er +. actor +Account s +Id le +_h ours +ern er +Jo ined +Ġmen j +requ ires +ĠO PER +.remove Child +ĉs p +Ġes se +r ift +xF E +ĠSh akespeare +________ ____ +Ġbudget s +Model State +fill able +- component +oc os +ĠBUT TON +/ io +, out +s ms +Th omas +ĠAr med +res ume +Ġrot ating +ĠV ault +Ġse us +. (* +Ġa mino +Ġ[] );ĊĊ +Ġprov oc +no x +.Get Enumerator +==== ===Ċ +æĸ Ļ +_sc roll +Ġfil med +ĠS oci +g ap +g ro +V ote +" But +_R C +An imal +Â Ģ +ib ile +Ġaw aken +ore st +in ja +ĠI van +( Command +Ġ ***** +Î · +Ġkv inder +/h elpers +_c ases +t g +ìĦ ¸ +Register ed +ĉp ass +_d igits +Ġcont our +Ġinf ants +Ġjust ification +ĠFort unately +Con tr +ĠonCreate View +_S AMPLE +Ġallow Null +Ġn ud +Ġfet ched +_e qu +ĠUn able +=\" " +> {Ċ +Ġcommit tees +ist ema ++ ". +ÃŃ an +m ant +Ġsou theast +ï¼Į Ċ +dialog s +PRO JECT +charg er +- port +(u uid +. export +S ix +ĠR P +P rem +Ġconsc ience +Ġmargin Right +_d istribution +y aml +res izing +D ock +ĠLoc ations +G Y +Se ed +B UFFER +oss ip +ull en +Th ings +- self +.p oll +PL AYER +Ġå ® +G ROUP +ĠA way +Ġg ospel +xf d +M ary +ĠPort able +T URE +Ġutil is +Ġse it +Ġstr and +Ġtrans c +Ġ( ^ +ĠAl fred +.m em +.c ircle +Ġ~ / +for cing +Ġr iot +pro x +TH ON +iz ación +ĠN I +ro st +Ġdis pro +_in stances +ï¼Į âĢľ +ograph er +end as +ĠIsa ac +ĠP ine +/d is +Ġcolor With +iter ate +_str ide +Ġpun to +.Event Args +( center +Ġneighb oring +ĠPr ison +ĠMess enger +Ġepid emic +da o +_com plex +Ġgr avel +_D IP +é ment +ĠA ri +_bit map +.qu it +( valid +Ġp end +Ġrespir atory +Ġre bound +Default Value +ãĥ Ń +Ġcomm its +.test s +_f r +it et +.s f +Ġspace craft +c ritical +Ġde pressed +ĠAny Object +Ġun b +Ġdisc ern +(m ysql +L atin +ĠB og +ĠWild life +To File +iox id +@ RestController +Ġ"$ ( +Ġ<< " +Ġdefect s +Ġdat um +h in +Ġreal izar +any ahu +ĠS ig +@ Data +ad aptive +ĠC atherine +.c r +ĠCO OKIE +Ġp ictured +ĠFight er +Query able +ĠAny way +ĠGL FW +_n amespace +_ ft +Ġ] ) +Organ ization +Ġconstit utes +Ġqu and +(ch unk +"/ >čĊ +ĠL akes +main window +Car thy +sp in +(c sv +: red +-com merce +ภ¹ +Ġdiscover ing +Ġe co +_f ac +inc eton +ĠGre ens +j wt +Ø µ +ĠBron cos +ĠGood s +(G TK +Ġreturn Value +Ġsi empre +Ġneut r +w ent +ĠN atal +Ġenthusi astic +á» į +F N +/d atabase +C atalog +Ġbr un +ĠK ash +_P l +isc rim +, width +Ġin mates +Ass ignment +ĠH aven +Ġplay ground +ex am +@ Controller +ul iar +.get Parent +Ġ" ;ĊĊ +: size +iss ors +Ġf is +Ġal c +ens ation +ĠN ixon +Ġmight y +- str +_s pecial +_A DC +ĠTw ig +um bling +- address +Ġher oin +Y TE +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĊ +F riend +Ġa ve +ĠP NG +ĠKurd ish +DataSet Changed +Ġbl ades +br al +St eam +Ġsig u +IRT UAL +ac os +UD P +(d atabase +he c +ĠString s +_scal ar +ĉd esc +ĠT LS +; "Ċ +ĠCor byn +Simple Name +u ell +ĠEnt re +ell ites +- place +Ġfrank ly +ĠE rf +CE L +Ġpa ÃŃs +Ġh edge +Ġlat ent +ĠIR Q +ĠH erald +ĠP rec +ë³ ´ +.T EXT +Sal ary +Ġaut umn +Ġtrav ail +.S um +Ġc ared +M or +Ġint uitive +Ġj ournals +_ IT +ĠT rou +ä¼ ł +Has ColumnName +Com posite +Ġsp ice +_d isk +_CODE S +ĠInt roduced +ion a +Ġnue stra +o ct +ĠĠĠĠĊĠĠĠĠĊ ĠĠĠĠĊ +(param eter +Ġstud ios +Ġproject Id +Ġbd sm +.Sql Client +im izer +ĠC ARD ++ t +a an +.s ol +_Ad just +Ġright eous +ĠLog ging +.f ilters +_T AB +ĉs ys +roph ic +other apy +ĠB rowse +key board +R ON ++ \ +ro pped +Ġext ensively +f k +Ġl ime +year s +Ex c +Ġs ph +Ġche ating +and ro +ÃŃ o +Ġpr ince +o ire +ĠD estination +ĠConvert s +Ġup stream +o led +Ġserv ants +Ġsem antic +Ġcr unch +Ġevent ual +run ner +/ error +Sp in +Ġsecret ly +Ġas semble +.P erson +end error +_ < +Ġp endant +S leep +ĠChem istry +Ġboss es +l k +)) ),Ċ +Block ly +DE VICE +Ġreflect ing +Ġam ple +Mill iseconds +ĠPresident ial +Ġus uarios +ĠN Z +ĠSal ary +ĠA manda +_n p +j ury +Ġkö n +Ġtherap ist +Ġhomosex ual +ĠDr ake +-w indow +ĠLoc ated +.D river +ĠV IDEO +Ġmerch ants +ĠC hest +- lock +/ php +Ġmil ano +_ST YLE +arg er +ide a +G UID +adv anced +me al +Options ItemSelected +=' % +ĠCh am +: data +(st at +Will Appear +Ġinform al +aj i +Ġre productive +ĠC AS +ãģ £ +F UNC +ĠR uth +)+ ( +CON ST +ĠF ans +Ġgroup Id +xffff ffff +Ġsam pler +Ġ}} "> +. the +Ġh ollow +W AY +ĠFac ulty +Attrib utedString +ĠLook s +ĠR ex +j k +ĠM IL +Ġb ard +.L ong +Ġliv est +Ġsk al +ic ism +MA IN +Ġmu cho +B ODY +Ġes e +ĉ use +F oot +.SQL Exception +Ġinherit ance +re ceived +Ġput as +ed is +als a +ĠError Message +Book ing +Ġtr act +ac z +ĠC ant +_reg ex +Ġide ological +Ġj ihad +h os +/s ys +col m +(p ool +Ġest án +ĠP ending +em ás +Ġktó ry +));ĊĊ Ċ +trans actions +Ġw ield +it ere +ert ure +_s s +Ġstretch ing +Ġprison er +.Read All +Ġbes ch +-- ;čĊ +Ġcr isp +_SC AN +Ġa e +Str ict +ĠMin neapolis +ĠBo eing +ar is +re k +_p ipe +Ġpri ests +(E IF +eh icles +ĠInter active +b etween +ĉNull Check +ĠBl air +ĠL t +_in line +eth yl + ¼ +_p ackages +Ġbarrel s +_ he +Ġreg exp +_ pts +_H andler +ing ular +ĠN issan +ĠR anch +Ġper ch +Un supported +Sm ith +ĠLeg ends +M i +Ġg f +st eder +Ġacqu iring +Ġsim ulator +() ," +re ceive +Ġin place +A CTION +ĠWeb Driver +files ystem +< Order +lo pen +ĠHE IGHT +.set Border +į ° +__ [" +Ġcl amp +Seg oe +b ands +to List +amb a +>' +Ċ +Ġcred ible +am at +play ing +.setImage Resource +qu el +Ġpod r +ge om +E k +ĠQ atar +Ġg eld +? ',Ċ +Ġc yl +( ax +ĠW I +ur ally +ĠBr asil +Ġsen za +ale y +on en +Ġb ah +Ġmolec ule +R ad +è¿ ° +AN CH +- background +- agent +Ġprol ifer +: boolean +Ġt ide +erial izer +_ ;čĊ +F ee +** ) +erg y +ĠHon or +.Log ging +ir is +Ġunder mine +ĠD y +Ġt yr +Ġde que +Ġdam er +([] )Ċ +.layout ControlItem +pe ated +C AN +rag ments +L and +) ]);Ċ +ĠS ah +ĠDE CL +With in +ĠN amespace +an other +sem bling +.des cribe +Con sum +ĠF ear +g iven +Or ange +< boolean +Ġstead ily +pa Repository +Ġresult Set +_ ENTER +_re peat +Ġt ones +ĠPRO P +n al +part icle +Ġsign aling +Ġaccess ory +ĉĉĉĉĉĉ ĠĠ +Ġvie le +ĠNo ah +- ag +Ġmur ders +Ġa ired +ĠPL AY +ĠS ullivan +_C ore +Ġul ong +Ġblog ging +> This +Ġdata Index +Ġprint able +ĠE yes +_target s +(P y +. over +Ġbr u +am pton +Ġplaint iff +< Key +b ull +Ġ⣠¨ +Iss ue +.cor nerRadius +C ritical +_p hi +. angle +Ġdynam ically +! ");čĊ +> );Ċ +in vest +.* ĊĊ +Ġt élé +Ġsuper f +Ġcas cade +DT D +Ġviv id +Ġsubsid ies +ĠH ass +Ġcoll aps +Ġcer amic +{} ". +ĠLeak age +-tr ash +coll apsed +-s ocial +ĠCh ad +Ġincl ined +Ġst o +Ġstory board +.p ayment +stack overflow +ĠRaid ers +Ġ# ' +olic ies +ìľ¼ ë¡ľ +em ap +Ġk j +Ġqu ota +ĠGard ens +ë² Ī +ĠAng els +Ġof t +Ġlower case +Ġi Param +Ġche apest +un ta +_p kt +ic ators +Ġle urs +Ġdecre ases +ĉ define +PRE C +amm ers +ĠPre paredStatement +(d irection +Ġcre ws +ark ed +ĠMem phis +ĠS ell +G TK +Ġm aid +: disable +éĽ Ĩ +ĠP f +Ġal beit +open h +?> ">Ċ +.get Source +(s cale +D u +ĠP IL +_ref resh +Ġbet s +(c ar +ĠV on +| --------------------------------------------------------------------------Ċ +ĠGr at +M uch +( Dialog +.stop Propagation +Ġte k +Ġex its +'], $ +Ġphone Number +uc s +ec imal +------------ -- +in p +.po jo +Ġcor pus +Ġpractition ers +.p ic +" testing +Ġstring By +.Not Null +Ġr ang +.D ynamic +_R ender +аÑĤ а +Wait ing +ĠW ik +Ġoverwhel med +% "> +ĠA E +}} >Ċ +u w +_t yp +Ġbuck ets +Ġgre eting +Ġla ughter +Ġant agon +uggest ion +- email +ĉt op +Ġer os +_tr i +Ġiss uing +Ġh á +Ġisol ate +Over flow +, E +Ġnut ritional +ĠAbb ott +Ġn f +.t ouch +.fetch all +_z ip +") }Ċ +Ġam at +ĠC isco +Ġn Ã¥ +PLE X +Ġse i +f oto +.to Json +å¤ ļ +ĠKle in +Ġlib c +Ġmin ers +å ¢ +- print +ĠP ride +T odos +Ġmask ed +Ġset Data +Ġtele fon +Ġunh appy +ĠT ables +ge b +( debug +_all owed +- access +Ġlog istics +Ġg ems +ĠM ature +Ġr sp +ĠAl le +.get Bytes +\ web +ynchron ized +Par agraph +Ġth rottle +.sql ite +cons ulta +ĠSe ah +C e +Ġsub mar +ER E +V ous +Ġre ddit +Ġsql alchemy +-m ile +oc ide +P our +}} ">Ċ +st ead +Ġ@ ( +Ġ[ ]) +ĠAd s +Ġover load +r idden +ĠDes ert +ĠW rap +ĠPortug uese +et z +ĉf irst +Ġmile stone +æĹ ł +Ñĥ Ñī +(s uccess +< Vector +co ol +Ġ[ ]);Ċ +erv als +Ġin vert +" io +cur so +fr agment +Ġfeas ible +.set Position +Ġel m +Ġimag in +@ Spring +Ġb ats +pu és +ga lement +ns ic +gi ene +ell ation +ĠBa iley +Sh ar +ĠT ul +ĠH K +Ġfree zing +gl m +ce ans +-c ut +_c ircle +åij ĺ +n egative +Ġind ian +s alt +Ġt ing +ĉm od +Ġs int +ak in +um l +ĠText Input +Ġpop ped +T MP +Ġpark ed +×Ļ × +ĠF usion +Ġhe ater +ET F +ro zen +h all +ĠM ik +lev ard +- heart +ĉ order +M aking +Ġpled ged +Ġdir s +$ post +ĠH err +stant iate +, "Ċ +.get Color +ĠS AT +Ġtimed elta +ĠM ai +ĉm ethod +Ġid iot +ĠTr av +ident ified +ĠDiv ine +.get Path +D ash +Ġinf iltr +Ġhandle Submit +bro ok +.g eneric +.short cuts +................................ ................................ +Ġdat ings +ĠM V + # +} "ĊĊ +Ġimprison ment +ason ic +rou d +uc ion +æĬ ¥ +Ġdia lect +Ġon Mouse +const expr +.label Control +Ġwe aker +Ġman kind +ĠRE CE +Ġd iz +Ġapp Bar +Ġqu é +f ra +_default s +Ġal iqu +_at om +: indexPath +Ġmiss es +Ġvis ually +ĠH ands +STR U +i ates +_ asset +F inder +mid t +Ġsn acks +(__ (' +. uri +ĠIn strument +ven ir +($ __ +.Dot NetBar +Ġconfig s +Ġguess ed +ि ठ+Ġinitial izer +Ġ? ", +ĠVer izon +man ifest +ge ben +.d etails +G ate +pons ible +ĠEl im +, str +Ġwrit ings +ĠD erek +ĠCo ordinator +Ġpill ow +Ġnotice able +R s +Ġduplic ates +ern els +k J +.z z +oll and +ĠSE CTION +_f name +uff led +'].' ")Ċ +ĠD ollar +Ġem oji +Car ousel +- player +Ġadjust ing +Ġjug a +alleng es +g ene +(body Parser +lop edia +ĠBeh ind +Ġslee ves +Ġdrag ging +ĠChe vrolet +Ġb iz +iv ities +ĠFrequ ency +, char +.W HITE +_pre view +) ';Ċ +_ ax +ION S +.c pu +.input s +UB E +_fe ed +ĠSup plement +! ). +es us +ĠU DP +Ġmicro phone +Ġconf irms +.is NotEmpty +":" ",Ċ +_S CREEN +ĉ expected ++-+- +-+- +ĠH ait +fast call +Ġdep ict +v b +_p icture +ĉd escription +ĠW ife +uc i +Ġv icious +ä» ĸ +ue ba +Ġset User +ãģ ¡ +Ġd iving +Ġoper a +user content +ar ah +) }, +y un +vel t +Ġun covered +Ġh ips +Ġosc ill +Ġassert ing +ĠX i +.re store +ke a +Ġsp elling +Ġder ive +ab we +ĠD ow +.set Type +_v s +Ġco zy +.c ategories +O rg +_m gr +Ġd ungeon +collection View +ĠBl ank +ac ias +ä ä +_clean up +_ACT IVITY +Ġtri angles +.Menu Item +Ġip hone +ĠW on +] ]ĊĊ +ĠCompar ison +.D oc +Ġcan onical +ĠSud an +') { +Up Inside +b uiltin +ENC Y +x be +Ġch uck +Ġcontrad ict +Ġnuest ro +Ġarchitect ural +ĠF ib +Ġcomp ares +* k +C fg +çĦ ¡ +nt en +Match es +ĠDOWN LOAD +_HAND LER +man agement +[ S +EN G +ÂĢ Â +f ang +Ġsl ipped +ĠL anka +esc aping +Ġtack les +ĠPed ro +.P rop +.' ' +.G enerated +.New Guid +at rigesimal +ill on +Ġstat istic +spec ies +hold ing +Dr upal +Ġfundament ally +Ġbond age +Ġres olutions +Inline Data +\ Type +est ion +.w rap +Ġwar riors +ĠLOC AL +Arch ive +Ġembr aced +á» § +.V er +ĠAff ordable +oles ale +ĠAp plied +ĠCon version +m ega +_c am +Ġcer emon +aur us +ĠVol k +.op ens +/ about +ĠSt d +j ournal +()) {čĊ +," \ +( Arrays +ĠD ense +ase ña +än ner +/ stat +user Data +Ġg erman +Ġt z +worth y +Format Exception +ph erd +Ġsm iles +ĠWh enever +( adapter +.bad logic +Ġbrief ing +.Grid Column +- char +dim ension +ĠC opper +Ġnin th +Ġ' {{ +Ġr av +_T able +Ġderiv atives +ĠR aise +ĠF ut +arm or +-p adding +Ġre min +ĉ style +ĠMembers hip +Ġspread s +Ġgall eries +ĠClar ke +Ġcon ception +min ute +Ġab usive +_ad j +Ġterr ific +Ġover t +our cing +Ġentr ada +level s +Ġcrit ique +Ġrespect s +ĠM MA +i ene +Ġenc aps +ĠRay mond +Div ider +iv able +b az +Ġ@ _;Ċ +ĠCl aire +Ġur ging +CE E +Ġtransform er +disc ord +ĠJ ourney +t os +Ġcompet itions +ĠO BJ +ĠB is +Ġrelax ation +id y +_IN STANCE +ĠP ref +d ados +ici encies +ĠMedia Query +ĠC ube +ĠStr ange +g pu +(d ays +_Init Struct +Ġfinger print +em at +ĠGe cko +Ġr ails +ĠL um +str action +ig ung +(m ovie +_d ictionary +_int errupt +ĠQ C +ik ed +append Child +rec ipient +r é +V e +Ġtow el +.last IndexOf +Ġplace bo +ĠW ie +.es p +( Debug +oper ative +Ġdece ased +& id +ĉm utex +el ic +Ġb apt +ĉ čĊčĊ +Ġfar ther +H alf +.dis able +.menu Strip +le ccion +Ġresult Code +Ġc ans +-e lection +f emale +_F IX +aus ible +ĠP OWER +Ġrecon struction +Ġsc ans +.Xtra Bars +âĢĺ s +Rem oved +Ġparagraph s +_m argin +Ġl ymph +Ġb os +ling ton +ĠBapt ist +Ġadvertis ements +ĠMan age +/ yyyy +IO US +ENC ES +ĠF iction +ĉm enu +ĠFile OutputStream +ov an +ĠF eng +Ġsk ipping +get Class +ann i +Ġreb ounds +Ġpublic ity +Ġing res +use ment +Ġthought ful +.Ch art +Ġhat te +pass port +Ġhook ed +ĠL ens +Ġflag ship +Ġst ip +ĠG EN +Ġcl ues +ip v +ĠR ise +ĠG ew +tab lename +Ġfore most +_ validate +_an alysis +oll a +Ġqual ifications +Ġdistrib utions +ĠFl ower +Ġt ense +Ġthank ful +Ġcl utch +Ġun ified +ro ads +Ġsit i +Ġst all +_P RIORITY +c stdlib +_USER NAME +.by tes +? page +ermal ink +ĠVe get +/v nd +- author +.N ONE +ĠCon current +ĠC ry +Ġstart ers +ĠInter action +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠ +ĠLE VEL +E ll +Ġcom boBox +ĠTh eresa +te k +_H andle +Ġab y +.g dx +, end +(L ocal +O l +kn ife +ar ial +ĠH off +Ġprostituer ade +Do ctor +Inst ances +.Set Value +ĉf rom +Ġlux urious +Ind ent +Alloc ator +_D RAW +(", ", +ĠFr ances +Ġgroup Box +(s chema +Print f +OR IES +- gradient +Ġre put +ar in +_D ONE +in cre +ig nty +Ġex ert +Ġ- . +/ App +-th rough +Ġdecl ining +Ġdess ert +Ġinc umb +Ġdesign ation +.P ORT +, strong +Ġsand box +Ġw ines +ĠP av +$ str +ask ell +Ġh ö +ĠP Y +Get Instance +Text Input +game Object +/ events +created At +Ġlocal Var +ĠWH ITE +per ed +ile ge +eff icient +, color +c ate +ĠC afe +Ġsimilar ities +Ġp umps +ĠHung ary +.User name +Ġsk ate +Ġtouchdown s +Ġacceler ate +ĠH elen +OM EM +ĠK un +_v ol +Ġfind All +ĠMens chen +a head +); " +kom men +Ġpossess ed +.arg max +.trans ition +AR P +OLUM E +(s cript +ĠÐ ĺ +ĠF inding +on ces +I o +B old +Ġrenew al +_D IALOG +Ġdis reg +INT ERN +Ġt oute +Ġelect r +ĠG ross +ĉ true +.F ields +ĠW IDTH +ĠD ent +Ġà ģ +NS Notification +Ġa os +Ġme lee +. Validation +ĠDE C +-depend ent +Ġsu ic +T raits +$ message +ĠD ear +ĉ FILE +l anguages +.P rot +.add r +-g eneration +IC ON +Ġtrans plant +-d escription +Ġch asing +Ġche es +Ġ} */Ċ +Tr ad +qu eries +/widget s +sub package +Ġes pec +Ġcr acked +Ġcompet itor +P urchase +- team +olec ular +or Thunk +& P +Ġrel ent +/ #{ +Ġproduct Id +Ġè ¾ +ĠL av +ĠAl ter +.M ode +AD IO +gr p +æ ·»åĬł +Qu it +Ġdepth s +-c ategory +ĠD ATABASE +S PELL +ĠFal con +ĠQString List +Ġ'' . +ĠIn stitution +d amage +az or +bel ongsTo +ver ages +ĠN ONE +ipp ets +, \Ċ +Ġfoot print +_ archive +n ak +.get Field +ĠRef lection +Ġ' ] +ĠH BO +_dis count +Ġin cest +ĠD odge +ĠW ade +.N O +" encoding +ĠBlock chain +Ġlaws uits +ĠM aint +ch ten +Ġét ait +Ġktó re +_ ctl +(t imer +B attle +iz o +ay ed +I OR +ĠGlas gow +Ġsyn th +_log s +.p ose +_Adjust orThunk +(( & +Ġuns ure +yst ate +íķĺ ëĬĶ +O ULD +. ng +Ġdefault dict +work space +Ġselect ive +Picker Controller +YNAM IC +.method s +Ġpath ways +ĠF ew +K G +CRY PT +follow ing +ĠD LC +ĠS ara +Ġpres et +estruct or +ĠK urt +Ġair plane +Ġo mp +ĠParent s +ĠMart inez +.com plete +Ġbroad ly +Ġsc are +ĠM é +Ġelim ination +Ġpou red +/ sw +Ġcom un +Ġm asc +ĠOrgan ic +ĠString Utils +il ateral +Ġreluct ant +- age +Ġn z +." \ +Ġpast or +ale z +Ġe fect +pro v +/ init +Ġp enn +und s +Ġs size +ĠPro j +bas ename +Ġsh ells +ĠNe ck +ĠEn forcement +vid ed +st own +S phere +$ r +uss en +af il +ĠTele gram +Ġanaly tical +нÑĭ е +us ually +x n +Ġhistor ian +ĠGreg ory +ol ph +ĠUn a +Ġcon tributes +% - +anti ago +ÑĢ ÐµÐ´ +.reg ion +Ġab rupt +ĠUnsupported OperationException +ĠT ASK +_f inish +Ġnot orious +ĠV s +ĠM Q +Ġsun set +Ġun acceptable +ar cer +Ġill umin +ĠOr b +Ġb h +E ste +_dis patch +Ġr ipped +Ġtou jours +ĠPar cel +_ ll +.user Name +.class es +S OURCE +( Number +ел Ñı +Ġhead phones +(s ide +const itution +ann ah +čĊ ĠĠĠĠĠĠĠĠčĊ +Ġcl iff +- ref +Ġmo strar +ĠPow ell ++ y +ĠB G +_f ragment +.P ort +Ġreal izing +param ref +Ġh ometown +@ Table ++" --}}Ċ +F rench +Entity Manager +ĠPl ain +//////////////////////////////////////////////////////////////// //// + ³ +( RE +c apt +Ġorgan isms +Ġj ets +ol ocation +ĠApp RoutingModule +Ġgl orious +æľ į +Ġdisc arded +ĉĉĉĉ ĠĠĠĠĠ +ĠArn old +l ug +Ġpar l +Ġhorm ones +Ġm ah +ĠSon ic +Ġorgan izers +_PL ATFORM +.in v +Ġch ord +vent ional +ĉ of +Ep isode +. Enum +unk t +ĠD h +ĠJ ared +ĠN ak +Ġint ends +End ian +Ġa ustralia +_c v +(res olve +Ġclin ics +lik ed +ASH INGTON +in ha +' * +ĠN P +_b eh +Ġh f +Ġw ür +c ategoria +$ form +Ġsub way +Ġis Active +pop ular +C our +Ġco oldown +Ġa insi +ĠGL uint +ere al +Ġarray Of +Ġh atch +======== == +ress es +_P P +. ^ +_dec ay +ĠB less +met rics +ĠCOPY ING +ĠDump ster +ĠJos é +ĠDesign s +< +Ġ" }Ċ +time zone +Ġe er +max cdn +ĠE SC +ig aret +_conn ected +_re verse +Ġquestion able +ĠUS C +Ġtut ti +Ġdrop out +ĠActiv ities +ĠW inds +')) );Ċ +Ġcon gest +ÄŁ ı +Ġprolong ed +è¿ Ļ +ĠCross AxisAlignment +LE EP +ĠVAL ID +ĠG az +Ġdepend ence +ĠP rix +.Compiler Services +j ump +Ġstr at +c irc +ĠC USTOM +x aa +Ġb mp +Ġb ureau +Ġw aren +N X +( Window +ĠChrist ie +_F E +Ġt n +ĠOm ega +communic ations +Home Page +com pletion +Ġsupply ing +YP ES +á vel +åĪ ¶ +(c lick +\ Contracts +/ questions +Ġe z +AM S +.m esh +Ġ' \Ċ +Rob ot +Json Object +ĠD F +ĠProcess or +_sh ould +.prot obuf +- users +Ġemb ry +F ONT +Ġstart ups +ĠData Source +) # +uro s +_C olor +Ġstand alone +} [ +j d +Ġforg ive +Ġng x +ĠGener ally +Ġconfig urable +/ order +Ġv as +') ";Ċ +ĠR R +ĠT roy +Ġcomprom ised +ĠSw an +int endent +Cent ral +_ keeper +Ġar quivo +ĠRead Only +_cur ve +k v +ent in +è ± +ĠE y +.im read +ĠP am +if fe +at ivity +xb c +Ġgr im +-f illed +names e +'] : +Ġa ur +ĠGib son +.Mouse Event +Ġl ado +avad oc +Ġfam il +ĠM oder +f ps +ãĢĢ ãĢĢ +- example +ĠAl zheimer +ĠU tf +_arg uments +Con clusion +text Content +rem aining +Ġinterrupt s +ĠBack up +ĠM ong +Ġrecept ors +h istor +.cor outines +Ġsh outed +Al arm +Ġcomb ust +Ġg rote +ult ural +( ids +---------------------------------------------------------------- ---------------- +ipl inary +O pts +ĠY ale +local Storage +Ġequ ival +ĠF leet +\ b +* pi +ĠQ Label +æ ¡ +Ġv x +ĠA CL +Ġsu cesso +Ġper c +ĠNot re +Ġan arch +R ing +sp b +Ġstr pos +st ores +ĠMap le +(Main Activity +(" ")) +Ġview Holder +Qu ad +Ġig ual +ors che +.m argin +Ġind ie +Ġfr anc +ĠForm Builder +ĠPart icip +.fl ash +Ġstorm s +U lt +Ġf en +[ new +E ver +=" Ċ +Ġlocal ized +_f ollow +Ġn ave +Ġdomin ance +(t ile +J ournal +ĠV C +Ġpenet ration +ï¼ ķ +Ġcomp artment +Ġb ids +Form atted +****** /ĊĊ +(c ity +âĢĶ it +[ C +Ġuse Callback +a ub +) ?. +ĠV AR +ĠSe bastian +ĠM oss +Ġabund ant +G reg +ÑĤ а +_c i +Ġbib li +CR M +ĠAt tempt +ism e +d ash +ãĢ İ +_m u +.Formatting Enabled +Ind eed +-d irect +Ġsuck ing +Ġp ne +ocab ulary +ĠPack ers +.N avigation +Ġp ied +cri bing +ĠSt uart +.To Double +ĠSecond ary +S aving +ĠD ut +ĠM add +M agic +, H +.document Element +ĠB ST +Ġdiff ers +Ġmore over +_ nd +SE ARCH +п ÑĢав +æ ´ +to Match +Ġdecre asing +-m ember +amp us +( boost +D aily +Data GridView +ĠHttp Context +Ġh ipp +_work ers +-l anguage +é ĵ +Ġconsist ed +ath ing +ĠMer cury +$ content +Ġpract iced +ĠMod ules +_D AY +Ġweakness es +ĠL odge +Ġn ar +ĠM ate +Ġj p +ĠHttp Headers +Ġsm o +ĠT OKEN +] )( +Ġaqu i +sw agen +Ġs rv +ĉ ans +A round +ĠMan uel +Ġfiction al +ĠIM G +Ġ. ' +ĠB erry +Ġwall paper +sex ual +ier o +Ġ çļĦ +ìĨ Į +Backing Field +ĠAd rian +BASE PATH +Ġrepe ats +Ġbl ues +Ġunp redict +_c oll +st acle +ĠT umblr +ĠEl f +Ġass urance +Ġc ensus +ĠIM PORT +END ER +an os +Ġ= ( +ĠEll is +" ĊĊĊĊ +.w in +ĠA bove +al on +_t ick +Ġrepresent ations +Ġæ ķ +w id +ĠAr ms +List a +_f ailure +_c m +.Flat Appearance +Ġthr one +P atch +ĠV oy +eng l +Ġnegot iating +> ` +Ġshoot s +ĠF PS +.Y ear +ĠK iss +enc ión +reet ing +From File +Ġresign ation +Ø · +Ġtw ins +ưỠ£ +Ġge bru +.get Content +.T ree +ĠEmploy ees +ĠF IFA +Ġcert ainty +(C l +Ġtot als +edit able +à¥ Ģ +.Report ing +M as +qu iet +.r ules +ĠV O +con exion +, K +Ġalloc ator +ĠPow der +\ Repository +Be at +_t ipo +Ġ[' ', +_IN TR +Ġ<< < +< hr +") == +ugg age +ĠC raw +Ġé galement +Ġg inger +Ġprim era +Ġprod uto +lt k +.User Name +Ġstr error +m ith +_n b +Ġdis comfort +']; ?> ");čĊ +drop IfExists +ĠB eg +_H AL +Ġcross AxisAlignment +ĠE vidence +Ġpec uliar +Ġinstit ute +ve is +Ġf ft +à ģ +Ġzo ekt +an aly +ĠHom eland +Ġpen etr +udden ly +ĉ element +ĠB ren +ĠTr udeau +ĠCub an +j am +us lim +_e v +Ġst ems +} % +Ŀ å§ĭ +Ġbrand ing +Ġcorrespond ence +.j query +¢ åįķ +ĠRead s +(Http StatusCode +ass in +(s lot +ĠGrad uate +/// < +Ġinform ations +EN ABLE +Ġp uis +Ġfind er +ĠBr is +Ġnett steder +_m id +Ġo gs +ĠSter ling +Ġar rog +str ftime +| ĊĊ +Ġvo x +ĠReg ardless +Ġes o +ĠCom fort +.Boolean Field +Ġu h +AC Y +Ġsque ez +ĠV ic +cont ro +. lo +Ġ ire +ĠCom edy +ë ¶ +Ġorigin ated +Ġsh ipment +| max +_g uid +lev ation +на Ñı +( undefined +ĠD DR +Ġshoot ings +ĠLat ino +END OR +Ġaver aging +Ġgre eted +Ġthe aters +о е +Ġd B +Ġg st +Ġdef inite +. Storage +.h er +Ġa fore +ĠRe ality +ĠGod s +vers ed +Ġhands ome +Ġex cluding +( ad +Qu otes +ĠS cheme +? q +ĠT amil +T icks +Ġp est +' n +Ġporn ography +_mod al +Ġ ---------- +Ġdis posable +F REE +Ġsh ark +C HE +Ġdep icted +Ġdemonstr ations +ĠK illed +ĠR ULE +Ġobs essed +Ġsimpl ified +Post al +Ġconcept ual +Ġp st +L as +_PRO JECT +ucceed ed +ol u +ÄŁ i +Ġpersonal ities +Ġres hape +Ġenc losed +ĉp tr +Ġtutor ials +Ġexpl oded +_DIRECT ORY +åĨħ 容 +Ġcan on +Ġrecogn ise +P AD +ĠAppro x +ĠRest ore +ĠImport ant +Ġheav ier +.Se quential +Ear th +ĠMil k +.set Request +.t em +Ġre construct +Ġskept ical +_Pr ivate +BU F +qu a +: a +Ġse k +Ġd well +oss a +Ġreward ed +и й +(top ic +_part ition +Ġ__ ________________ +Key words +ĠFr anco +L ite +Ġn aken +Ġз а +O BJECT +Ġcraft s +ĠSw ap +.X na +.Con nect +Ġbalcon y +(re al +ĠBarn es +b ir +ĠTw enty +ay an +at ars +ĠProp el +ĠIh nen +Up grade +Ġcur b +- second +Ġn eph +.p res +ìŀ ħ +.se q +Ġp added +" ? +j l +ãĥ ¬ +') a +Co ordinates +Ġen acted +ENT S +Ġl ac +.f inal +ĠPhp Storm +c alled +Ġin quiries +.m iddleware +ĠD owntown +/ ';Ċ +Ġkil omet +ac cel +Ġqu ien +w string +set Data +Ġman era +Ġmod ular +rim p +Ġtar iffs +âĢĻ il +_TH ROW +/c olor +ĠHT MLElement +Ġcar ro +Ġpr ere +Ġplot ting +ĠPos itive +ĠMach ines +OT ES +á» Ľ +ple asant +Ġal te +Ġa inda +th ese +Ġc ors +ip ay +ĠAdvis ory +ĠRub io +j q +Ġl imestone +Ġdet ached +设 ç½® +ten ant +ĠDep th +al ore +ĠÑģÑĤÑĢ Ð¾Ðº +ĠF ORE +ĠL ay +p resentation +) ');Ċ +.sub plots +Ï ĥ +N OW +G ar +hand les +ab ra +put ies +ĠElect rical +M iddle +rop ic +ĠJ D +ĠD yn +ĠB ristol +ĠMc Carthy +Ġstri ker +Ġenumer able +ĠEv an +.default s +qu ences +) || +ĉt oken +â Ĺı +-d ropdown +ST ORE +ĠGraph ic +( pp +Ex pl +Ġup wards +ĠD istributed +ĠW EB +J er +is NaN +çĶŁ æĪIJ +> R +üss en +ef s +Ġun cover +Ġl ud +.cal culate +Ġint ptr +Ġmidfield er +. Headers +Ġm f +ere f +.M etro +ĠSpe aking +: b +Ġcryptoc urrencies +Ġdem ons +ĉ EXPECT +Ġw icked +y outube +: Int +ĠHind i +ĠC AT +ĠØ ¹ +r ar +om ore +/ per +/lic ense +Ġre im +Ġawait ing +Ġle thal +ĠE F +round ed +ĠPl atinum +ĠвÑģ е +.co ords +.De vice +/ item +ĠW enn +compile Components +ĠK inder +.remove Item +Ġand a +bn b +Ġpr a +( transaction +Ġembarrass ing +ĉ BOOL +.content View +Ġevent data +at ore +Ġprovided In +ir ma +Ġz ona +_H W +æ Ļ +Ġst ove +Ġcounter part +_Pro duct +_MAN AGER +Ġinfr ing +ĠE RA +_p arty +Ñ ij +Ġin ici +_ Request +Ġmir acle +Ġcancel Button +S py +at ó +Ġpol ish +ĠNic ole +.display Name +\Request s +Ġuse History +Router Module +Ġst ared +ID ER +Ñĥнк ÑĨи +Ġnot a +$ arr +pec ified +Ġto pp +_DR IVER +/ ng +å ł +_t m +% timeout +< s +Ġ( *) +ĠHttp Request +_TR ACK +(n ote +ĠExp lore +_s erv +Ġç » +B inder ++ ", +. att +ĠEth i +Ġc ódigo +=' \ +.l ines +( Of +å° Ĩ +miss ible +Ġv é +Ġac oustic +Ġcraft ing +n it +.b a +ĠLuc y +Ġi Pod +Ġpup ils +-m ax +_w r +(c p +ĠRE PORT +Ġd ns +ĠRe ferences +Ġundert aken +Ġkø benhavn +Ġch ai +ĠC roat +_ Log +rown ed +_m ed +ĉ date +# __ +Ġcost umes +ĠRe quires +aff le +ç Ĭ¶æĢģ +-S emit +ela ide +еÑĤ од +Ġp estic +Ġd ra +DOC UMENT +Ġ... čĊ +}` }Ċ +ĠA uction +ĠD ock +xxxx xxxx +(get String +ħ į +Ġborder Width +ĠMach inery +Ġpredict able +.S H +Ġam plitude +.for Root +IN avigation +Table Model +at trib +Ġmaneu ver +Ġexc av +B ERS +Ġd apat +Ġinstall ations +.A sync +Ġr ays += âĢĿ +; ččĊ +.c rypto +_db g +ĠEnum erable +Of Size +_epoch s +m w +M ENU +out line +ĠP apers +============ Ċ +Ġuniform s +ĠG ig +- package +ĠJen kins +ĠHome Page +.is Selected +Ġmechan ic +M K +ĠS ounds +//---------------------------------------------------------------------------- -Ċ +Ġresearch ing +Ġinf os +ograph ics +ers et +([' / +ĠTim ber +. agent +.to JSON +_command s +par ing +_ad just +.n ome +(g lm +Status Bar +file path +? âĢĻ +Ġdetect ive +Ġunser er +ĠTib et +EN DED +(se ed +Ġsne ak +Ġam or +=" // +ĠPan thers +all ax +ĠL IVE +ĉD WORD +]= - +Ġtorn ado +/ min +Ġlung s +-c urrent +ĠBook ing +åĪĹ è¡¨ +Ġenjoy ment +ठ° +J A +typ ed +.B tn +f at +ug al +ĠSh ares +Ġdis gr +ĠB AR +ĠFO X +Op code +ĠS z +key down +iction aries +Ġdetail ing +} ))Ċ +Ġp ok +Ġdemonstr ating +Ġnot ation +l ayers +@ if +ĠN PR +.strict Equal +ĠRec ipes +.T ensor +Ġliqu or +Ġdeb ts +.ends With +W heel +.P os +CS V +$ arity +Ġun stable +( loss +ENS OR +Ġele ven +ĠL opez +ĠHop kins +con om +ĠS eth +Ġpo ems +Qu ant +Ġg sl +Ġsy rup +Ġs ibling +Ġc ass +-v ous +ö t +_P ATTERN +_SE CTION +est imated +up grade +.m ongodb +ĠBo at +_C TX +Ġfetch ing +ust in +pi el +M arg +Ref lection +Ġd uct +ĠMunicip al +Ġb x +.Get Current +ml ink +ĠAccount ing +ĠGene va +_P os +Ġpass er +Ġhear ings +com pan +Ġfrag ile +Initial izer +walk er +.M aterial +ĠHun ting +trys ide +Ġk at +Ġcl erk +á Ł +do ing +ĉg roup +Ġsan ction +.l b +ĠL azy +ĠCon straint +P agination +Ġpou vez +ĠInd icates +M ER +Ġcour s +Ġyear ly +Ġgros se +abb rev +ĠD ON +Ġproceed ed +ent lich +Ġproperty Name +ĠTe aching +st adt +Ġc utoff +orn ers +Ġa frica +Ġrend ers +ĠYan kees +ĠTool bar +sp aces +.fill Style +Ġseg undo +_str len +.F irebase +å¤ Ħ +Ġmention ing +\ ( +ĠVal ve +Set ter +Ġsp ans +ĠAl cohol +ĠLet ters +\x e +ĠT K +_B LE +.get Result +< Player +ĠP att +Ġeas ing +Ġtur key +ĠF en +') " +Ġconf ined +Ġin clus +Sup erview +(with Identifier +enc ial +Ġstuff ed +Th eta +Ġeconom ists +} ));ĊĊ +co okies +ĠRo ose +ĠChe ese +Ġfich ier +Ġen forced +AB B +no ÅĽci +_AL LOW +Ġrecru ited +Ġexpend iture +-n ight +Ġassert NotNull +_ex ecute +ĠØ ¯ +IN DEX +_F MT +Ġresc ued +ĠMonth ly +ĠCons ervation +ĠG eb +Ob ama +Ep och +ic ies +ĠOr t +Ġso it +( icon +F riends +m ol +Ġground ed +ĠC ause +ad ena +WE EN +ĠL un +IT IVE +. loop +_un til +Ġcor r +.ed ges +Ġhyp oth +ched uling +trans lator +ĠÐ ľ +R om +ãĢij ĊĊ +ĠX amarin +Ġviol ating +. anchor +--- ĊĊ +Ġtr ader +AD VERTISEMENT +Ġuns ere +ĠD AO +Ġbl ond +ĠP AT +.g lob +Ġè¾ ĵ +Ġsplit ting +Ġun subscribe +Ġatmos pheric +ĠTr im +Ġcit ation +Ġin ference +ĠF t +ĠDar win +find One +ĠG el +( Convert +Ġaccess or +; text +(s orted +Ġjud ged +); \ +: p +Ġme ine +ĠS lim +.Command s +Ġper ceive +coh olic +< Data +.entry Set +Ġassert False +ĠPat rol +ense m +ÅĤ Äħ +¨ ¡ +W IDTH +ĠRes cue +ĠU IF +_THRESH OLD +ĠMich el +ATER IAL +opens ource +ĠD iana +Ġinv ites +_B ODY +Ġreserv oir +Ġro i +c ust +(t c +ï¼ģ ");Ċ +Ġfest ivals +Ġperform ers +Ġclim bed +Ġj ungle +String Length +Ġunlaw ful +ier re +vertis ement +Ġst akes +Ġh ats +Mod ify +ĠLET TER +.H ide +Ġstat utory +_ white +ĠPer l +uten berg +em ple +.W orld +Ġoverlook ed +Ġcon cludes +/* ================================================================ +-w ise +ĉ stream +pop ulation +Ġevent o +Ġillustr ations +ft s +Ġaut of +ĠPro cedure +Ġdes erved +-t imes +Ġg ol +N SError +cre st +ĠPak istani +any ch +get Current +Ġl ar +nt l +ĠRe becca +Ġm ateria +Ġfind By +/ ad +Callback s +ĠAl s +ĠKat ie +ĠObservable Collection +ĠDocument ation +Typ ed +ĠCulture Info +ĠTim othy +Ġlater al +" type +Ġun authorized +Ġteach ings +Ġdebug ger +[ value +Ġal ors +Ġu z +Ġsc atter +Ġdown ward +Ġmig li +status Code +Ġ( )) +ĠM W +Ġм ож +RO SS +.b uf +Ġfair y +ĠInf rastructure +=> " +t lement +$ (" +From String +ĠB ild +Ġconvent ions +_n ative +ĠIns pector +ĠP ist +ub ar +Ġreg s +ĠP ilot +Th us +>' + +Ġc ela +.new s +( Product +L iving +R ussia +Ġfac et +et ical +Ġ[' $ +/ [ +ĠD ire +Ġg ases +ĠIN FORMATION +ĠE at +ĠFor ums +ĠChar acters +_m et +Ġìĭ ľ +Ġk ings +ach ie +ĠL ambda +Ġtim ers +ĠLight ing +ĠCase y +add ir +and ex +. answer +ĠH ip +ĠPr incip +Start Date +Ġ ãĢĮ +t res +Ġ& # +.Max Value +ĠPro blems +Ġlat ex +Of Class +ĠLyn n +// ' +Ġvoy age +Ġshut tle +ĠRoll er +ĠRuntime Error +uy a +D ic +ĉb uilder +Ġbul lying +Ġsimple st +.c alled +ĠL R +Ġmor ality +Ġst urdy +tr acking +.sw agger +_B IND +IT OR +-url encoded +ĠÑ ħ +ĠTr inity +Ġtr aps +Ġ| - +Ġset Text +Ġbarg ain +Ġbr akes +.get Code +Ġmigr ate +Ġrib bon +) return +Ġcharg er +ac om +ADI US +ĠAmb assador +-a fter +Ġann i +ĉs pin +Con cept +ĠHend erson +ĠH OST +.r ank +ĠNor theast +Ġber lin +Ġrequ is +.f eed +Ġsource Mapping +ĠRen contre +. ajax +nest js +Ġtre k +ĠN acional +Ġ& [ +Ġpay able +ort ex +Ġde pt +field Name +Ġcomple tes +ĠR VA +Ġon ions +al ignment +Form ats +Ġ' {$ +Hash Set +ĠB od +.Invariant Culture +Ġsettlement s +Ġhy dr +. updated +vent h +( seconds +="/ " +Ġweb page +( ĊĊ +Ġt ir +Ġto es +ĠBr ick +Ġamb ition +P ot += max +ET IME +Ġdep ot +c alls +ĠNor wegian +` : +Ġbur ger +Ġprofess ors +ĠAl locate +-third s +-ch art +Ġfor d +* N +.k otlin +Ġpaper work +ĠDE VICE +% @", +res pect +(m p +é «ĺ +- if +Ġcush ion +ob ot +Ġpar c +SP ACE +ĠNet anyahu +Ġself ish +fe at +Ġclient es +-to ols +Ġpor ch +Ġj q +. verbose +Ġlib erals +] )ĊĊĊ +p ies +Not Blank +( term +ÈĽ i +_Param s +.normal ize +B ullet +AS IC +(h ex +_client e ++ , +_D I +Ġforth coming +} ")]Ċ +se o +U m +> Name +Ġcomfort ably +irection al +W ITH +/ pr +ĠP oor +ĠVit amin +v ic +G H +Ġprior it +ĠN N +ĠC losed +¤ í +Ġis Open +\ Console +And Feel +.S UCCESS +_OPER ATION +pol ation +ĠT as +ps z +> '. +C URRENT +V endor +host s +ĠE rd +>tag ger +ĠsourceMapping URL +Ġmar athon +_c losed +Ġexem ption +Ġrecogn izes +ides how +' $ +('/ ');Ċ +m its +war z +ĠCh erry +µ ¬ +n or +port e +Ġw l +_back up +.get Boolean +.get Resource +Ġdefinit ive +. EditText +Ġs ÃŃ +.C ONT +ĠPL AYER +.c ards +ĠSh ore +('/ ')Ċ +cl uir +Web Driver +(m onth +-re lease +Ġins pector +å £ +ĠN F +_cl ip +åŃ IJ +Ġinteract ing +.t mp +Ġ'' 'ĊĊ +Ġde e +Ġfro st +"] ))Ċ +ĠPl aces +Th rows +f ork +/ day +i Phone +ĠM IC +Ġfold ing +Ġcro re +ĠCh iefs +pher ical +( price +.Write String +Ġexit ing +] ',Ċ +ight ing +Ing redient +( vertex +Ġscroll View +h f +: new +SE N +se ctor +Ġsp ins +ĠS cheduler +ote chn +sem icolon +Font OfSize +ĠSpecific ally +fl amm +.Object Id +Ġcont a +_per missions +ĉF ROM +IC ODE +/ kg +ĠHot els +-m ed +ĠD in +Ġn avy +get Param +Ġm end +Ġportray ed +ĠMet ropolitan +Paint er +Ġref erral +_g ood +Ġmar vel +osa ic +> (& +. ur +Ġest os +Will iam +Ġtim ber +Ġquel ques +ĠDoc uments +.X aml +Ġbatch es +éģ ĵ +ĠRe leased +T ail +CO OKIE +he id +_st ation +ĠV ia +S ale +ĠRe peat +Ġprom in +ĠZ o +- forward +ĠI on +it ary +Ġj us +- request +Ġproud ly +ĠStream ing +(Mouse Event +ĠS print +_ rotation +Re positories +Ġt art +ĠÑģ в +Ġm appings +è ª +C u +C ycle +Ġb un +ĉl ua +ãĥ ī +Ġ(( ! +Ġcollect ively +ĠCon d +Ġwsz yst +(l ib +openh agen +_s kip +.Column Header +é Ĥ +peri enced +ı è¿° +_p rops +Ġcontr ace +Ġmatch up +ab etic +.m embers +RE CT +(d at +Ġs og +ren om +_M ethod +Custom ers +full name +Z N +re try +Ġk ap +ĠNe u +è Ĭ +add Child +will Return +_p ermalink +Ġener getic +ĠW et +ĠMor r +Ġg cd +count s +, type +d ig +( Login +Ġcr acks +Ġbacter ial +ĠMe at +ĠArm strong +ĠBron ze +Ġapprox imate +_dir s +lig a +ÅĤ ad +Ġkind ness +Ġcont re +ĠE VERY +M ET +Ġannounc ements +g pio +ĠWaitFor Seconds +ĠPhotos hop +Ġdis contin +/ dd +Ġtop ology +an ical +. interface +auc oup +.Hash Set +ARI ANT +(r outes +ĠT eh +Ġh ype +] "). +Ġsl am +Ġbro th +- inter +ĠR id +-m anager +Cancel ar +ĠP agination +Ġsound track +Ġpost erior +Ġscr ub +cre ating +- * +ir teen +.d y +.s ymmetric +Ġ"" . +============ === +Ġch assis +ĠnumberOf Rows +Develop er +_b ins +ĠO UR +ri eb +Pro s +Ġwi ÄĻ +" d +Ġasync io +ze igen +_s pi +.A LL +Ġscre ws +Ch inese +Ġapi Key +Ġun successful +ĠSeah awks +OR G +ç« ł +Ġprofession ally +ĠCou pon +åŃĹ æ®µ +Con vention +Ġpol ym +æī ĭ +Ġsalv ation +Ġengine ered +ĠW rest +ĠG CC +Ġwar mer +Layout Constraint +Ġag grav +Script s +vent ure +Ġrefriger ator +Ġinnov ations +ĠRun ner +N IC +ĠRoll ing +Control Events +Ġlo os +p ac +ĉ panel +ef e +ĠBudd ha +------------ --Ċ +åº ĵ +(for Key +Ġl umin +Ġ( ? +ĠA IDS +, user +im ientos +content Type +ant lr +é ¦ +ĠW elt +Produ ction +m ight +ĠV II +", ( +Ġobserv ing +Ġdeliber ate +( control +Ġwith d +Ġsem ana +ST ACK +uch en +N ice +ĠDeutsch land +ĠSpec ifies +d ma +iz io +ĠF acts +_pop up +ĠDirect ors +{ : +[ R +ĠÑį леменÑĤ +Ġpl at +Ġdirect ing +ä¸ ī +ĠGil bert +â̦ .ĊĊ +.q ml +Ġthere after +Ġdis position +d raft +Ġsurge on +ĠIns ider +Bl end +ĠT rev +tr insic +Top ics +rie ve +_FILE NAME +Ġaut res +J ose +Produ cer +er us +Ġpet it +ĠN EXT +ĠF ilters +Ġreplic ate +"] ). +Ġl enders +] ",Ċ +; charset +Cpp Object +Ġfl oral +ĠT ipo +Ġcirc uits +e asy +(& $ +itt a +ery l +_COMM ON +'}} >Ċ +-back ed +(var iable +( Index +Ġvo ir +_loc ations +++) { +ĠLouis ville +Ġgrat itude +.Mock ito +ĠP owers +ie urs +Ġge ographic +ra le +Ġc ra +ĠSp urs +iph ertext +AC ION +- common +Ġvict ories +ĠFinal s +.sh uffle +-m illion +_PRO C +ass ume +Ġil s +DB C +Boot Test +Ġl avor +.test ing +. ast +"] / +m oid +Ġqual ification +ges ch +ĉ put +Ġair ports +J I +Te acher +_un iform +Ġn ama +ĠB ast +ert ype +c apture +get All +ĠReyn olds +oo led +.com ments +Ġch in +). * +Ġи ли +t gl +ud os +Ġd ÃŃas +ch ai +.pro gram +Ġps z +ĉ icon +ph il +ent ral +_WR AP +ov i +Ġnost alg +In finity +ĉy ield +Ġvit amins +Qu aternion +S ink +_g oods +Ġ ........ +ĠW ings +ur idad +-st ory +"] )ĊĊ +idel ity +Type Def +G tk +Ġí Į +_M ain +Ġche z +ĠR aven +Ġpay roll +Ġfreel ance +LL U +ĠM end +ed ay +Api ModelProperty +.Form BorderStyle +Ġeconom ist +stan bul +Ġfre ight +-A gent +(m eta +Ġsym metry +Ġ' .. +.C alendar +- aut +g f +p ent +yc lopedia +Ġwish ing +ĊĊĊĊĊĊĊĊ ĊĊĊĊ +Ġgentle man +Ġê ³ += # +Ġlect ures +âĢľ In +Ġ! _ +Ġh b +ĠV endor +Recent ly +_n otes +æıIJ 示 +" My +Headers Height +_S O +Ġunw illing +Ġsuper hero +g io +ps y +ĠPe er +j avax +& apos +ĠCr isis +ord inal +Mem cpy +++++++++ ++++++++ +- val +Ġwork book +- ap += k +Ġmetal lic +_ peer +By PrimaryKey +_S D +u ator +_SH ADER +) Math +.Trans form +Ġc ows +Ph i +ĠC lem +(_ (" +ĠL ud +-d elay +ĠSec urities +ĠOrth odox +Sym fony +(re port +Ġent ertain +E PS +iz oph +ex ual +IR D +ä» İ +Ġl ith +Ġsanit ize +Ġfemin ine +IS BN +.auth entication +_p ipeline +/ constants +ĠCON F +Ġluc r +ric ia +.t tf +.set Content +Ġst an +ore an +ĠL loyd +.raw Value +Ġg or +ĠBrow ns +Re gression +Ġlower ing +na issance +Ġbl ows +Ġam azed +Ġun related +Re views +Ġrub y +ĠMod ifier +Ġgi ants +. thread +Ġcontain ment +ĠStart Coroutine +um at +ore lease +ĠR andy +@ endif +D igest +Ġsubur ban +=" );Ċ +Ġann once +. variable +\F oundation +Ġa cre +V an +Ġt uples +d ns +ĠStand ing +_l arge +Ġbox ing +Support ActionBar +ĠFort une +ĠR um +_m ultiple +arch ical +Ġf write +_ quote +Ġfool ish +Ġcompr ising +Ġо п +- selected +v f +ma id +N ama +(d atetime +Ġindirect ly +g art +fix tures +ch os +ĠH alo +Ġrec urring +- news +v il +ĠNurs ing +- produ +ĠH Q +\Http Foundation +enc i +au en +Ġv y +ocr acy +Ġdeleg ation +Ġas phalt +Ġset Selected +k ok +/ rest +met ics +ĠNS Date +Ġtravel led +Ġrec ib +Ġm ime +CL IENT +ĠG U +ĠH ANDLE +/ Q +[ z +Ġbother ed +ĠBB Q +ç as +_ex amples +_F IN +Ġwhite Color +Ġastr onom +-d ir +Ġsovere ign +Ġb reeze +Ġin ning +ĠEd monton +g li +.blog spot +js x +Ġvers a +ĠMoh ammed +.J ob +-t oggler +Ġп олÑĮзоваÑĤ +ard on +Ġnew born +Ġnav al +note q +Ġtum blr +Ġh entai +ĠTyp ically +Ġlo ot +.S prite +Fl ight +Ġw avelength +-s k +ĠEl le +_ exports +Ġ Ñı +ĠI H +izoph ren +Ġí ģ +_pr imary +Ġmo is +ĠB N +Ġsystem ic +Ġdifer entes +IN CT +Ġ'' ĊĊ +$ q +Widget Item +cl ide +$ file +L emma +/ table +ag rid +ĠMongo DB +int e +Ġapp rent +ÂŃ ing +.D b +Ġà Ĥ +ham mer +=' ';Ċ +Ġbro kers +it lement +sembl ies +E le +{ x +Ġlast name +< - +Ġfl atten +_b and +.R oot +.read FileSync +==== == +.r x +? čĊ +Ġmetaph or +T i +con te +Ġdeb it +Ġcont empt +Cpp Type +æĶ ¯ +Form Field +r atio +os opher +Ġimpl ant +P URE +Ġal ta +_man agement +Ġref ine +ĠCheck Box +ĠChar l +- version +cond itional +ven ues +Ġrif les +Ġoff spring +Ġmill ing +Ġshar ply +Ġunder water +( origin +_ Control +Ġ. $ +Pl ugins +Ġdry ing +Ġillustr ates +- u +Ġveget arian +n pc +He art +; ',Ċ +com ma +te enth +as an +/s pec +_m oves +-m argin +Ġing en +³³ Âł +Ġpro jet +Ġo tra +Ġbr as +. utc +Ġsle pt += sub +ab ilit +post er +Ġs dk +ounc ill +Ġw d +Pre paredStatement +ĠDr um +( attribute +ĠEther net +ĉ DB +Cal ifornia +c ube +[ I +.C reated +ĠH M +Ġtr acing +Forms Module +- you +.c urrency +feed ing +Ġt body +L i +acc ion +n as +Ġtr ouver +N ONE +"} ,čĊ +Ġf tp +With Identifier +pol ate +File Info +Ġpurs ued +ĠĠĠĠčĊ ĠĠĠĠčĊ +DE SCRIPTION +} */Ċ +From Nib +Ġdecor ative +_S SL +(ch at +T LS +Ġsurpr ises +al culate +ĠS plash +( Configuration +ĠS EM +im son +/lib rary +< Double +. robot +³³³³ ³³³³ +ĠCP F +ĠUnder standing +Ġcos metic +ĠX t +t ips ++ k +(" ' +ĠP DT +W AR +.get Object +ĠTrad itional +.sl ug +ĠDi pl +=" ", +ĠFil ms +ĠAn im +.h elp +Ġemb assy +ĠBoot s +Ġb unk +-r isk +Ġp ci +Ġ/ \. +ĠI PT +Ġcrash ing +Ġip v +_ ke +ĠRES P +.Log Error +Ġinade quate +I on +ĠF ür +ric ula +Ġshould Be +al ready +']." +G ED +fa q +Ġoption ally +_D is +ĠSuccess ful +ĠC ensus +Ġinc arcer +_C ARD +Ġav iation +ĠG ym +Author ity +.B ean +sh ader +Not Exist +_Text Changed +ĠST OP +( team +" H +w g +Ġgr inder +Ġstri pe +Ġpres ervation +Cl aim +avers al +ware house +target s +Tr ust +Ġal lev +, www +ous se +_ch an +_S ize +system s +Ġobj ection +ĠK ane +Ġcor ros +ĠD SL +Ġu a +ĠM H +ĠStrateg ic +_t cp +Ġê° Ĵ +Ġborrow ed +ĠA ch +ĉ command +Ġg ps +le ston +iche ver +ĠU A +Ġassault ed +Ġspecial izes +ĉ search +Hot el +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ čĊ +ĠP itch +Ġ Ùģ +READ Y +Ġparent al +Ġg éné +Ġdonn ées +Ġdet ain +T ARGET +Ġprotagon ist +Ġclear Interval +ĠIcon Button +ĠGet All +Type Info +E H +âĢľ They +Ġ{ [ +Ġg ag +Ġ Ú© +ĠD ropdown +.f ree +g one +im ens +Ġinst al +ĉc url +_C AN +ĠB one +ï¼ Ķ +ony ms +-g overnment +.binding Navigator +ĠD ans +ĠMc L +( en +>( _ +ÐĴ Ñĭ +.* ;čĊ += j +-c or +S on +.ToolStrip Item +- around +_X ML +end Date +Ġsl ack +Ġrot ated +Ġno qa +Ġc ottage +Ġencontr ar +_s kill +hou ette +! čĊ +. weather +Ġemphas ized +å® ¶ +ĠÑģ пиÑģ +ĠComp iler +( android +ĠâĢ º +. turn +Ġsup pression +_c alls +Ġ* @ +(str len +.h ex +ĠB ills +ĠR SA +Ï Ĥ +ĠEs cape +ement ia +Ġfront end +Ġp int +_ex c +zz o +[ ],Ċ +Ġ"',' " +. Environment +Ġafore mentioned +Ġend ure +prot otype +ther apy +ss i +D eg +_pl ugins +.user Info +Print er +ĠPRO GRAM +Ġru ins +Ġempir ical +Ġcraw l +ĠBo iler +- comment +.sub plot +_ et +Ġ'. ', +min or +ĠCustom s +Ġy aw +under line +ĠCom o +( (' +(m ean +Ġcha que +ĠBlock s +.r ad +ilib rium +Ġweb driver +Ġmel hor +d ana +ĠAb use +ĠSouth west +ĠP aren +PERT IES +ĉ IL +Ġscre am +v u +Ġin comes +Ġn im +Ġl ace +Ġcompens ate +Re verse +D at +_att ack +Ġn our +ach en +ce k +< Func +w ie +com pressed +-m atch +(" ")]Ċ +im ized +. orientation +.compare To +Ġmass aggi +Ġìľ Ħ +Ġel bow +Ġant ioxid +undred s +/ tools +ĠR OW +an mar +ĠW ow +_t icket +Program ming +Ġthe or +-re view +() )));Ċ +ĠRichard son +ĠP ocket +] [] +am pp +_ health +ĠP OP +ĠNav al +Gu ess +Ġancest or +.Get All +.local Scale +ĠM apper +Ġaccum ulation +Ġsim ulated +ĠDr ivers +Ġd és +cur ring +Ġele phant +Ġadvert ised +Ġmail box +SH IFT +ĠMon ica +Ġan c +Ġward robe +Ing redients +Ġ|| čĊ +ipp y +Ġantibiot ics +av ings +(c x +ĠFerr ari +ĠAn imator +.d type +rem oved +order by +Ġc res +oc ê +Ġp ym +ĠCirc ular +@ index +ĠW arm +S ay +ĠAss istance +Ġcur tain +ĠMont e +IL ER +ĠC VE +ĠD uck +ĠAll ows +_f ire +ĠDer by +Ġre pos +Ġhttp Client +Ġpsych iat +Ġnow adays +Ġcaut ious +ĠComput ing +Ġcompletion Handler +ĠWel sh +ĠB EST +Ġstress ful +_P E +æĹ¥ æľŁ +ĠData Frame +ĉ Integer +_P rint +M oves +Ġtransform ing +.B atch +y ahoo +Position s +ze j +Ġno od +io res +_ * +Ġcl k +ĠF loyd +Ġh ap +font size +Ġn az +.not ification +ĠDep ression +Ġac ne +*** ĊĊ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĊ +.cont ents +yn th +ĠStra ight +')}} "> "+ +Ġtoken izer +Ġsovere ignty +ĠP ence +() ");Ċ +Ġpesso as +.G e +ĠIn cluded +Ġpag ina +Ġex posing +е ÑĪ +_SC RIPT +/$ ', +Th umbnail +× Ķ +webElement X +webElementX paths +press ure +ĠCur ry +_C P +OL UTION +ILE S +prot ect +ool a +Work space +{ };Ċ +ĠU NS +Ġsymp athy +ro ker +Ġrem odel +ĉc ell +Ġat op +.Full Name +Ġfa ut +ĠE asily +_d ynamic +Ġfr amed +Ġmot ive +è· ¯ +s am +Ġmar ca +ĠText EditingController +Ġde structor +cre am +Ġr ude +ĠB old +ĠInd igenous +Ġg ens +Ġrel acion +(s ystem +ĠUIF ont +_char ge +UST ER +E V +.N amespace +Ġmer ger +Ġcal loc +g ang +Bad Request +Ġs per +-d esign +Ġâ ĩ +Ch an +Ġorgan ism +, ) += id +_pl ane +ĠC ases +elf ast +ĠLegisl ature +ĠF aker +Ġinv oking +- utils +(). ' +.f ace +Ġguard ian +my Modal +Ġclip board +ĠAT M +Ġpe as +ĠS ylv +.c alc +ĠContact s +int Value +Ġmodify ing +ĠBar b +. loss +_per centage +Ask ed +(l st +ategor ical +- files +ĠRoman ia +.A c +Ġh ai +ĠF lying +Ġ ż +j p +ĠTr ainer +. arc +_de g +Ġtrace back +Or Fail +F LOW +. old +oy a +g mt +is empty +Ġvacc ination +Ġob solete +recogn ized +Ġru ined +ĠRe in +ĠTr acking +xf b +ا ÛĮ +Ġvæ re +Ġbr yster +ĠIT S +Ġdest iny +Ġsw ear +Ġred es +Ġcl f +Ġfl ipped +ĉ head +Bl uetooth +ĠOver rides +: Boolean +_ = +_l r +sp awn +: index +VAL UES +is key +? ");Ċ +.syn thetic +ĠCheck ing +struct ures +ip ing +Ġvoc als +- Up +ĠManufact urers +ĠMar riage +代 çłģ +Ġgar ner +_C lient +par allel +RI END +Ġvine gar +seg ue +J B +Ġcontact ing +ĠCar roll +Ġout reach +t ensor +_var iant +Ġthe at +lic able +{ | +t iny +_ letter +Ġp encil +HeadersHeight SizeMode +ilt ro +.auto configure +.d rag +.use State +ĠB MI +h int +Com pile +* \ +en ary +Ġl vl +.C ache ++ =" +_t v +ruit ment +Ġf read +Art icles +f ila +Ġpack aged +âĺ Ĩ +AT HER +ĠPl anned +s cheme +Ġdi ary +Ġoff enses +/ F +ĠSt ick +Ġc erc +ĠS lee +ĉĉ ĠĠĠĠĠĠĠĠ +< Image +Ġè® ¾ +- editor +pie ces +ĠD rama +Ġ// //////////////// +ĠT asks +AR C +g ateway +.get cwd +.M etadata +Ġguess ing +åľ° åĿĢ +Ġsm arter +ĠGet Enumerator +Ġe fter +/ operators +ĠGL float +Ġf ør +Ġop aque +ä¿Ŀ åŃĺ +Sp read +SY STEM +Ġinv ersion +ĠBasket ball +Ġsim ulations +Ġden ies +Ġa vez +_list ener +Ġenh ancing +ĠMy th +ĠL akers +_M D +Nd Ex +D ATABASE +Ġt á» +ar th +[ left +Ġcontest s +st ile +(K ERN +_f c +_p m +Ġpres idents +Ġhospital ity +Ġfade In +RO PERTY +_m aps +ĠDefinition s +Ġassess ing +Ġus ar +Ġquant itative +mo z +Be autiful +[ (( +b ons +f requency +Cont ain +Ġpuzz les +ĠCast ro +Ġv illa +Ġkind ly +Font Awesome +ern a +epoch s +_dat as +ĉ ip +.p adding +ĠCont est +Ġed itions +Ġdispro portion +ĠI CO +Ġcome back += value +ri ad +-s ort +Sub mitted +(n etwork +ĠC el +Ġinstall ment +l ashes +.List View +ĠV atican +(Media Type +IV ED +reach able +: Is +ĠC ITY +äº ¬ +ĠHelp ful +Ġba ÅŁ +% čĊ +Ġpsych iatric +Ġrec ycled +FORM AT +ĠG row +b ine +G it +.s s +ĠWe apons +ĠSt y +_ arrow +* self +ire ment +Ġdeg li +App Delegate +_b anner +Ġcoordin ated +ĠWeb cam +Ġcelebr ations +. act +******************************** **************** +( show +Ġweek day +Ġconc erts +ол н +cl in +Ġcr on +ĠN im +.set Vertical +ĠEll en +س ت +ĠS AM +E ff +g z +ste am +Ġant ique +ph ysical +ĠForm Data +.set ter +ĠPO INT +B on +Ġflav our +erv ention +_ENT ITY +ĉ ĠĠĠĠĠĠĠĠĠĠĠĠ +Ġintr insic +Ġæ İ +append To +aram el +) ]) +ĠRecomm end +) m +OutOf Range +Ġkn ight +Ġsat ellites +ĠTit ans +Ġweigh ed +ĠD ana +e ase +Ġs ip +S IM +ĠDevelop ers +mal ink +/ check +_P LL +n ung +Ġdry er += A +.d w +_S QL +Ġsub plot +D ROP +Ġprot otypes +Ġhour ly +display Name +Ġas i +ĠViol ence +Ġastr onaut +Ġdat atype +Ġinformation al +Ġinvestig ative +etermin ed +ren al +; '> +ĉc ol +V G +_ boolean +re cent +Ġ* )ĊĊ +ĠRain bow +om men +Ġl ur +Ġopp ression +(", ");Ċ +ĠFac ility +DEF INED +Ġne on +Ġoff ender +AF P +ĠClean ing +[] ): +Ġund ocumented +.Re positories +ĠG uitar +аÑģÑģ ив +Sk ills +Ġtestim on +rypt ography +ĠAm ber +ĠSt alin +Ġl one +Ġap enas +Ġdies es +ĠAr duino +è½ ¬ +== - +_A ct +Ġc oded +âĸ ł +amb urger +-link s +Ġarm our +.H igh +get Content +st ag +Ġhe ck +ĠìĹ Ĩ +ĠMc Connell +ĠCon cert +ĠAl loc +ä re +.replace All +Ġpart itions +rot t +ĠF le +_T REE +reason able +ĠReport ing +Ġbillion aire +s cores +min s +- eye +M ORE +ab ort +ĠSW T +Ġin verted +ĠTe achers +; n +Ġast ro +н ов +ани ÑĨ +product o +c ountries +ĠO wen +Ġcont amination +Ġv ibe +ĠEll i +.s cript +ĠOl ive +D MA +v ier +: semicolon +-m odule +gress ive +ag u +_ players +Ġresult ados +start ed +scroll Top +==== = +Ġweigh ing +Ġ[[ [ +z ahl +( NS +ĠAssert ion +le ague +.setText Color +ĉ Message +Ġmom s +_A F +. wh +AL S +Ġaut re +] ĊĊĊĊ +.op acity +ĠBudd hist +Ġde af +ĠOrgan isation +(G lobal +ens ch +Ġhead ache +ĠAli en +_in ode +ĠSt ark +Ġæ ī +-l nd +ore f +_fe at +Ġpedest rian +Ġnom inal +Ġbal loon +Ġspr ites +Prototype Of +ĠA post +ĠF EATURE +O H +Ġre cess +ĠDon na +con sumer +$ GLOBALS +ĠG IF +- frame +In icio +Ġpass ages +Date String +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠ +.by te +B ug +initial izer +p kt +od ium +ĠD ER +. ops +ler i +Ġgift ed +Ġdet ach +ter rain +elt ers +ãģ ı +. loader +ĠN GO +str ncmp +K h +(font Size +ro cket +Ġpreced ent +ĠAur ora +ĠEx periment +is phere +Enc oded +ĠâĢĵ ĊĊ +Ġpy ramid +ĠAnn iversary +of il +ë Ł +( plugin +C oeff +Ġcooper ate +Ġpredomin antly +IS M +Ph rase +_DEF INE +Fl ip +AMIL Y +ĠMark ets +ĠStream Reader +ĠComb ine +Ġmanus cript +z za +, tp +Wh atever +IT ICAL +ighb our +Data Provider +.Text ure +priv acy +.S DK +Ġre charge +Ġc pp +ĠC FG +(h older +(p y +m ot +Ġsav oir +ĠR osa +ĠPC s +Ġí Ļ +.her oku +Ġf ren +ĠR iley +ag ate +Ġs ond +.x lsx +Ġh acked +st ad +G i +Ġsan ity +ĠSql DataAdapter +... ", +ĠP ussy +Ġ **************** +Ġhass le +_P ARENT +ĠU AE +Ġbegin ners +( Client +Ġstatist ically +.h our +ed elta +Ġtr action +uel ve +ar at +Ġsa una +IN VALID +Ġindict ment +AL LE +Ġdiss ent +ĠTyp ography +Ġintention al +s it +ĠAn imals +Ġcoun tryside +Ġu art +} \" +Ġseam less +¾ 示 +Ġaut os +Ġ"' ";Ċ +Fl ush +ANN OT +Ġal gebra +ass oc +ĠW aters +Ġprepar ations +ron ym +[, ] +S ans +Ġarm ies +ipe g +Ġcream y +. art +et re +ĠAn imated +Ġun pleasant +eme an +g reat +i Äħ +ĠEar lier +Ġch ic +Ġpres erving +(ex ec +ĠInvest igation +ĉG PIO +Ġrig orous +ij o += num +Ġtool Strip +) set ++" & +ĠAcc eler +Ġdevelopment al +is posable +Ġflaw ed +re ne +Up dating +Ġwatch dog +Ġden ominator +Ġsubur bs +Ġ... ) +Ġconv ictions +c losure +.I P +Ġtransl ates +.sw t +.Tr ace +Ġmet tre +.is Enabled +ĠEffect ive +.to Int +Ġen chant +Ġst unned +Ġpo i +/ code +ad m +.datab inding +ĠL orem +________________________________ ________________________________ +Ġled ger +Ġcar a +ĠG ir +Ġwa its +Un o +Ġc wd +è¾ ij +ĠT Result +Ġre jo +Ġem itted +ĠWest minster +ä¸Ģ 个 +ne k +_T is +Ġen act +ĉ with +org ia +Ġj ue +Per form +SP ATH +.top ic +ĠD aten +Ạ§ +Ġsit io +_M M +" So +b ial +Ġsc oped +Re quires +ĠT OTAL +ĠCh ancellor +( contents +Ġste alth +dev ices +-p ass +ili h +ĠMal colm +ĠDep ot +Ġconfig ur +a ussian +_con straint +в еÑĤ +G RA +ĠR ates +.dataGridView TextBoxColumn +ĠNob el +it ics +Ġignor ant +ĠReport er +ĠEb ola +ĠSh ock +_re lation +ĠNin ja +) c +Ġt icker +.is Checked +ĠSup pliers +ĠRap id +Level s +âĤ¬ âĦ¢ +ĉ queue +Ġch op +ĠUn ix +re ject +-c alendar +(s ort +è ne +erc icio +Ġh ect +CALL TYPE +rou pon +Ġrent als +auth ors +{ name +ĠF IFO +Ġl assen +ĠN ous +Ġsn apped +Ġfert ility +" log +click ed +Ġplant ing +Ġg b +/ output +PE AT +Ġc ategoria +Ġb ach +Prof essor +in th +"] čĊ +Rec order +ser de +ĠTrans mission +tr ad +Ġtur bo +_VER TEX +\ Event +il ver +Ġbod ily +ĠS ources +Ġkill ings +.xr TableCell +Ġfold ed +/ legal +un er +ĠR ifle +ĠM IDI +_Selected IndexChanged +.Size Type +ĠWeb Socket +Ġsele ccion +S and +ot ros +Ġenv ision +/ etc +ĠMel issa +Sp ot +но е +_ ARM +At tempt +ĠB I +ãģ Ķ +ĠD U +Ġback lash +str ide +/ classes +Ġtext Color +_st aff +ob lin +agent a +.c ollections +ill age +' čĊčĊ +fl atten +_s ales +_M ASTER +T W +_d a +P itch +ph ies +Ġz ombies +ĠV ERY +ĠPharm acy +Ġprogress Bar +Ġhas htag +S idebar +@ stop +(p c +ол ж +MA KE +ĠCor on +Ġkv inner +ĠM aid +b ob +.title Label +Ġsuccess es +ĠDemocr acy +ĠSurg ery +Ġcou gar +Ġcur so +Ġl oro +ist ency +Sen ior +æ k +ĠA AA +ĠBO OK +к о +W STR +Ġ*/ ,Ċ +oy al +.v ector +ĠS PEC +SS F +Ġcomp uls +ĠAppe als +ĠW inston +ĠMock ito +con trib +. available +entity Manager +ari as +_s ale +_r s +Ġdec oding +Ġloc ator +ol ith +Ġk ol +Ġasc ii +ĠR ut +/ interface +ĉĉĉĉĉĉ ĠĠĠ +ĠN umer +.fl ip +-d el +Ġbol ster +on omic +Ġz m +L G +Find By +Ġadapt ive +lo o +Ġv ue +(re verse +_c anvas +. roles +ific ado +ven ient +" As +ĠEn tr +al igned +Ġbere its +/// ĊĊ +.g wt +. employee +_cl i +Ġanticip ate +éĻ IJ +Ġp ik +Ġmush rooms +(t t +Ġo ma +ĠSan chez +_g oogle +. Valid +ĠFile Name +iv ative +k ed +-w ar +Ġm aturity +и д +Ġmin er +Reduc ers +ĠLat Lng +_ST D +D igits +Cal c +-up load +Ġhand ic +ี à¹Ī +egr ated +ĠST M +C lients +ĠTur bo +SY NC +Ġphotograph ers +. Out +.char acter +B UILD +.un lock +Ġar ises +ĠCommand s +(" ");čĊ +_F ORE +; ', ++" ' +. Images +") { +ĠM eyer +Ġneg atively +ĠD LL +Ġex e +Ġdef iciency +Ġwild ly +-s witch +con struction +Ġexception ally +ĠL iz +/j ava +Ġtheir s +ĠCont emporary +l is +.fill Rect +ĠN FC +Ġre he +(num bers +Ġr aster +Ġfig uring +Ġshow c +ĠJ ill +Ġarc ade +ĠConstruct s +md l +(' | +Ġident ifiers +Ġst ellar +( Connection +Ġ" {{ +y or +(m ysqli +Ġdo ve +Of Birth +.dis connect +_h i +Ġzw ischen +ĠGr und +i ros +_A rray +.on click +ans om +An swers +ĉ remove +F a +Ġhur ry +-in f +Ġget Class +ĠReg ulation +ĠFLAG S +m isc +K en +_ heading +G Hz +- entry +Ġbi ography +S ig +-m f +Watch er +âĢľ A +} px +Ġsp icy +_s q +L ost +(tr ack +а ли +Desc ending +< bits +qu ine +ĠAdv oc +_S N +ĠHann ah +PO P +Ġem itter +Ġc yn +ĠC AD +? ). +/ set +ĠS ister +ĠEnd point +Ġmen or +Ġinter p +r k +id le +Ġout fits +. vertex +Ġc lic +ARE N +Ġpost ure +ĠOpport unity +v x +ĠFor bes +.D irection +Ġres ide +Ġremember ing +nest y +Auto resizing +pro viders +ĠA H +Ġhur ting +ĠL ily +eval uate +lij k +p apers +ĠSm ash +ĠL AST +Ġwell s +w asher +_RO LE +ĠD anger +* (( +_re pository +ĠRes olve +ĠRoom s +_R G +ĠQ T +o op +ĠHe ap +Ġslow ing +Ġgrat uite +_c atalog +Ġpol ynomial +L y +pc s +F ox +ĠC yr +Ġdim in +/ month +S alt +Ġh ind +.P ER +For um +c en +_p ol +íĺ ¸ +Ġin ser +( ~ +@ test +ĠGold man +Ġupload ing +F c +Ġkom mer +Ġm itt +_log ged +Ġbu cks +-l ayer +) };Ċ +ĠO M +Ġv eg +col our +Ġоб ÑĬ +Std String +_ que +ĠT ian +Ġspecial ize +и п +Ġк л +tr ial +- edge +Ġm ars +OG LE +Ġempath y +ĠB om +Ġcoll isions +Ġcart e +ĠTe il +ĠM PL +Ġporn ô +Ġa irlines +A ws +N s +ĠSp awn +( use +é» ĺ认 +Ġy acc +st or +Ġconf ess +Ġpe que +r age +? "Ċ +/dat atables +ĠSh ower +__ / +Ġcryst als +Ġbus car +ĠH aus +iz ação +_ entities +ķ Į +ļ Į +x cc +v irt +-che vron +( Result +c ake +COM E +Ġprohib it +ĠCh ess +Ġbe aucoup +ĠÑĩ ÑĤо +R UN +ĠI K +ó ÅĤ +_ Update +Ġsle ek +ĠSpec ify +_c redentials +ÅŁ t +ĠUser Name +ĉ Value +Ġarray List +Ġex changed +ips is +.re lated +ĠSe ite +_B AR +ĠL em +ĠW ATCH +ĠC lients +Ġ. * +ĠEar l +-re port +Ġforeign ers +Ġstrengthen ing +ĉ Description +(g o +.tool bar +Ġcalcul ates +ĉs ource +Ġcz as +Ġre cl +ab o +Ġlocal host +Ġ^ {Ċ +.P op +ĠDes igned +\ Abstract +H old +ĠGuid elines +ipl ine +Ġc aching +.Re ader +_ext ernal +.str ptime +ĠWeek end +-M ar +ĠBe i +Ġ{* } +ĠR ud +Ġexpl or +ĠBou levard +C ash +Ġprep ares +Ġserial ization +ew ater +Ġad c +: ĊĊĊĊĊĊ +Re fer +Ġsc anned +} }ĊĊ +ĠF ul +Ġtour ing +ãĥĥ ãĤ¯ +> (( +sur vey +Ġí ĺ +... ')Ċ +ĠDiv ider +os l +_C ANCEL +_pre pare +st in +ĠHe ath +.Primary Key +ĠâĨ IJ +ĠLocal DateTime +Ġcooper ative +L earning +.en queue +Ġgo og +ĠReg ression +im ates +Ġvoy eur +ĠDr ink +pl ug +Ġl ender +man a +Ġperson nes +yp se +Ġun link +ĠRav ens +Ġhur d +Ġperiod ically +ARG S +ĠG H +char acters +... "ĊĊ +- establish +Ġd n +( condition +ĠGr avity +Ġest as +_f ocus +Creat ure +(s ite +Ġc arr +ĠR L +ĠR I +ĠM oto +AS F +ĠLuck ily +ĉ Route +Ġent ropy +(" ," +Col lect +( contact +ĠFlo rence +Ġpremium s +Ġlif ecycle +Ġb ans +x ef +Web Kit +ĠFlo ating +Ġcos a +Spec ific +ĠLo ans +b read +Ġdes criptors +Ġ{ :. +TH READ +ĠT rent +Ġsc op +Q A +ĠAnt ar +p el +_d ifference +_ch anges +(... ) +ĠR otation +ĠLG PL +ĠJ UST +(T ask +_sub set +ĠTR ANS +åĬ Ľ +ĠSc out +-p opup +Ġsm oked +_C lass +Ġturn over +br akk +ĠRock y +t as +.Regular Expressions +ĠElli ott +ĠSp inner +DU CTION +Ġlib re +Ġmol to +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠ +ĠF TP +m peg +(f eatures +Ġb ald +ĠV id +Ġsh outing +L int +Ġsock ets +Ġpro w +Ġnouvel le +isc ard +ĠS ponsor +Ġconsult a +)) ); +Ind ian +ĠR aspberry +Ġteam mate +ĠJ WT +ĠGh ana +Ġc akes +pr imer +form a +erg arten +_M anager +Ġpre season +G AME +| " +ĠBro ck +Ġoccup y +Ġdecor ations +á nd +Ġc ot +Ġpar an +D isk +rem ain +> ? +Str ong +Ġfr ance +ĠE ra +-c r +.Buffer edReader +ĠParad ise +ĠV AT +ĠAnd ers +Ġlim b +amp oo +Ġimper ative +UT ILITY +ĠRec ognition +Ġragaz ze +Ġpop s +yp ress +Ġemb argo +// {Ċ +Ġsy ll +P TR +åŃĺ åľ¨ +Ġdid nt +Mail er +Ġacad emics +ĠFra uen +ne ider +- rel +Ġrain bow +( In +Ġslic ed +============ =Ċ +(s end +NSMutable Dictionary +v os +(p ackage +Ġord inance +view er +ĠSant os +-s elling +Ġgo v +ett le +Ġfound ers +Ġw aking +sl ashes +-p ound +re cht +ا ت +.on Click +Ġn ord +st änd +_ when +UT ERS +ic c +Ġcaps ule +ĠW id +M arc +ภ¸ +ro red +UG E +LO UD +ĠAud it +ip ients +op ian +ĠS ue +Ġwur den +.H elpers +Ġf actions +[ np +-th an +Ġre co +Ġk as +Ġcmd s +/n etwork +xb f +get Color +Ġbi ased +ĠL ak +D atas +vent s +Ġë ² +_P S +. Validate +Inv oker +Ġne uen +Ġju venile +V ISION +Ġdev ote +Ġlin ha +Ġdiscount ed +\ Config +Ġworth while +Ġskin ny +ĠC ourses +le ys +ĠMort gage +K evin +Ġannounc es +]) * +res ervation +Ġæķ ° +Ġprejud ice +ĠString Comparison +Ġbe ard +-w in +ĠS ão +ĉ ms +j al +ĠE arn +_ ports +ĠN ombre +_C OR +ĠB UILD +.s ound +Y ellow +Ġlineback er +Ġchar itable +j ug +_NON NULL +ĠD ental +"> ${ +ĉm atch +R ussian +Ġvers ch +Ġp inned +Ġadopt ing +Options Menu +P ag +Ġpair ing +Ġt read +erc ises +ĠSp read +) i +ĠB AD +_t f +UI ImageView +pop ulate +b ab +ĠÏ ĥ +[ ++ +Ġopi oid +Ġ## Ċ +d type +ĠStart s +('/ ') +Ġperson als +-mark et +Ġredund ant +ĠEss ential +Ġscrap y +Ġи м +a cl +Ġcre ar +ĠB end +Ġrel ieve +- room +w ife +Ġv Ãł +ĠQ Point +Ġqu asi +Ġmethod Name +\x c +ĠPer u +/ The +. orm +Ġv iz +/p df +Loc ated +Ġconfront ation +ĠChampionship s +Ġhyp ert +Ġd j +ĠUser Info +ĠåĪ Ľå»º +\x b +(s im +Ġ== Ċ +Ġst aging +Ġdr astically +åŃ ¦ +l ords +. less +вед иÑĤе +ĠB ucket +ĠM am +. term +_p i +c zy +.p ub +prec io +ĠV irt +Ġrom an +it at +L ex +_inf os +Ä ° +. other +VE LO +Ġp onder +Ġh anno +( Page +do i +Ġpol ite +Ġprogram mer +D ies +$ d +Ġrep lication +add Column +fr ican +Ġl eng +be er +o it +Ġw asting +yl im +me asure +N eg +Ġpart ie +.con sole +ĠGu inea +TE L +_f act +.ch unk +Ġl ent +Ġall er +Ġठķ +_id le +Ġad missions +JSON Array +Ġv ibration +.h elpers +å¤ ĸ +Ġh en +j ohn +Ġì ĥĿ +Ġjud gement +Ġge en +ter ra +^ { +ĠI z +Ġc â +inst ances +Ġthreat ens +Ġm üssen +Kind OfClass +Ġstoryt elling +_d emo +ri as +Priv acy +h ift +ĠY i +es or +íķ ł +ens itivity +.W riter +ภĤ +D istrict +.get JSONObject +Im pro +(get Resources +ĠS PELL +rodu ce +Ġslow ed +Ġlin ewidth +Ġhonest y +ĠCo ord +ĠF ork +ĠDispatch Queue +ĠCl iff +ĠW iring +_TIM ESTAMP +oll ah +av oid +++ ];Ċ +sem antic +-c ss +Ġv eto +ĠM err +Ġlegisl ators +CEE DED +Ġquestion naire +ĠP ills +Cal culate +(c ore +' e +Ġdis like +ĠPre ferences +_EX TERNAL +è° ĥ +Ġd odge +æľį åĬ¡ +.n ames +.draw Image +_p rom +uck land +Ġ<$ > +ı z +/s ite +é¡ ¹ +rop he +Ġcomp elled +Ġl aptops +Ġun i +C LOSE +Ġcasual ties +ĠUn iform +Term inal +. "," +D AT +(T reeNode +ĠGand hi +(st mt +AX B +* M +Ġumb rella +an imal +Ġgr pc +Ġwhere by +Ġfloat s +ĉ arg +Ġdb g +Ġexceed ing +Event Type +.SaveChanges Async +Ġ{ {{ +Ġow ed +ahren heit +Ġì § +Ġequ ipo +ur ai +Ġid ol +] ")Ċ +_m ajor +Ġentire ty +inger print +ç os +/ account +ĉ right +urs os +ĠE DT +_INS ERT +Ġsh ining +Ġ< : +Edge Insets +Ġcolon ies +. IM +ĉĠ ĉ +RO AD +CC CC +pl acing +Ġget Activity +em acs +' %( +.click ed +ĠTh em +is ia +Bus car +.re name +Ġo ath +Ġafter ward +ĠU FO +AP S +ĠJackson ville +.s ome +Conf irmed +.s can +ig Integer +Decor ator +sh ield +ress ive +.d id +请 è¾ĵåħ¥ +Ġsh utter +D am +Ġparent ing +ey ed +$ item +-de velop +Ġextract s +Ġdecentral ized +ĠEl sa +_sp in +]) + +-in itial +Ġmult itude +Ġsens ory +ĠMODE L +Ġsafeg uard +ì ¹ +Ġhunt ers +ĠT iny +IN O +decor ate +ĠNo Such +H o +( Response +Ġr uler +ĉ short +Ġc aster +Ġclient Id +Ġp db +ëı Ħ +it ic +ĠGame State +Ġnew Item +)ĊĊ ĊĊĊĊ +ou is +n oc +.BL ACK +_V ECTOR +---------- (); +.get P +any e +Ġneur on +if old +ĠK nown +Bit coin +Any way +ay ette +Ġ' [' +Ãł nh +m gr +Ġcor related +Ġn ause +Ġment ality +has Many +ĠF G +amp ie +IT U +F s +.S p +_b etween +Dep endencies +ou g +Place holder += text +ĠMan aging +ocal ypse +åĮ Ĺ +_m ag +f ld +â ij +C AM +ĠHelp ers +Ġd ost +/ out +Ġassass ination +.get Image +ĠKenn y +.' )ĊĊ +){ // +ĠR anger +Ġg ek +Ġsinc ere +< Value +ĠD OT +ĠVict ory +Ġleg ends +Ġpr isons +(ex pression +ĠR abbit +_s entence +Ġbit es +Ġon Failure +ĠâĪ Ī +K im +.g ender +ĠÎ » +Ġ[ . +"] ); +land ing +-d igit +TE MP +ĉ entry +Ġstrt ok +Ġdesc endants +um no +Ġlean ing +Ġspecific s +q n +ĠSp art +Ġpor r +EDIATE K +Ġse per +' aut +ĠSTE P +ĠBorder Layout +Ġret ros +ĠSalv ador +ĠEN GINE +x dc +T weet +v k +Ġì ² +] << +het ics +c oding +Re ach +.re q +gu ide +.s cope +sh irt +rog ate +SET TING +ĠProte in +Ġe ing +. EMPTY +.d f +Ġclear er +Ġc rossover +ĠTo ys +Ġco ated +.M onth +ĠAtt ach +/ run +.t abs +Ġogs Ã¥ +B rown +.D ATE +Ġf os +åŃŠ符 +W ood +-th ree +her ited +Ġ rop +( ac +Ġembod iment +ĠKenn eth +Ġcan non +Ġb idding +čĊ +.get Resources +Ġl ump +_const s +( ext +ĉd ir +â Ŀ +Ġpadding Top +Ġobs ession +Ġb anning +ĠApp Module +Ġpart isan +Ġcatalog ue +Ġmin ors +Ġpitch es +we ep +Ġundert ake +Ġthem ed +aud it +.scroll Top +Ġr er +Ġsympt om +Ġopen ings +.block s +open id +Ġas sh +-s ave +ĠP ig +Ġreg ain +Ġin icial +/f avicon +ĉ exp +Ġsp ices +isk a +claim s +m ak +definition s +Ġcorrespond ent +ĠCann abis +__ ,Ċ +ĠL ucky +ĠGa ussian +ĠN early +C AD +'] ]Ċ +Ġadequ ately +ĠT ITLE +constitution al +-m m +_ override +Ġbl as +.ready State +Ġremin is +Ġrein forced +ĠColl abor +Ġdecor ating +Ġb achelor +ERRU PT +Ġup right +ip ation +ĠNob le +Ġvalue ForKey +Ġset Loading +.I gnore +å ģ +G lobals +ĠM ent +AS SES +Ġlim bs +ĠH UD +inc i +. iv +ĠQ ModelIndex +F use +Ġped al +_F REQ +( verbose +Ġlong itud +ĠChar ter +ê ·¸ +Ġbund les +. ignore +um bo +EM A +.... ... +s x +.C ard +Ġhe ute +Ġste er +j umlah +Ġ{ _ +_Check ed +Ġf ax +ĠG ust +itch ens +Ġ ))ĊĊ +Ġremark ably +/ XML +- remove +_b t +Ġinc ub +.p ackage +.current Thread +ĠHigh lander +.s ide +s plash +Ġ ici += D +Ġp uck +Ġball ots +Ġhug ely +co eff +Ġp Data +.C OLUMN +ĠHe aling +Ġord in +! ), +Ġ' ',čĊ +(m d +ĠS ask +< strong +Ġsurviv or +.s eries +Ġcaffe ine +Ġ` ( +.TRA ILING +_ Input +(" ^ +z d +& );Ċ +ĠP ing +Ġv oucher +.r ating +-sh irts +ĠRetrie ves +.al ibaba +Or acle +_MO V +Old Data +Ġ/* čĊ +Ġg boolean +Ġ=> čĊ +Ġr á +Ġbl unt +ĠImage Icon +if ik +RT C +Ġfib ers +Ġto ile +.s ent +ĠPy Qt +$ app +Ġmed io +Ġgrant ing +Ġtsl int +ĠM ö +(fig size +Ġhur ricane +Ġlif es +Ġà Ħ +rocess ing +_st andard +- option +')) ) +Ġvac ant +å· ¥ +ĠH ollow +handle Change +Ġdiv ider +ĠEngine ers +Ġsv ens +Ġcompl iant +t anggal +ĠC redits +ĠEm irates +Rule Context +Ġreal ization +Ġdistr acted +]+ = +Ġaug ment +ĠD w +ot p +or rent +Edit ar +.st ock +St udy +pe ctions +ĠGame Manager += cut +Ġf lock +ĠRom ans +th em +-h op +Ġscreens hots +Ġ/* !Ċ +Ġconvers ions +Ġnormal ization +(config uration +Ġa eros +_se curity +! 'Ċ +B onus +ĠDR IVER +ĉ Date +t ie +ĠWy oming +St and +it re +Ġsh oppers +Ġdisadv antage +Ġlik ing +ç¬ ij +Ġunderstand able +SE E +Ġh oy +Ġnin ete +Ġcon fer +Ġnow rap +ĠV ern +, čĊčĊ +imest ep +Layout Manager +à · +ĉw ait +PLE TED +J apan +Ġindu ce +Ġå ¯ +оз в +_END POINT +.h orizontal +Ġacceler ated +rim on +IV ES +Trans actions +Le an +ĠSO UR +wh ether +y g +Ġo id +ĠEntity Manager +OUN TRY +Ġfil a +OLUM NS +IN UE +ĠAn chor +TR AN +wo o +block quote +ĠN urse +ĠCar p +Ġrede em +. try +ĠJ P +Ġtimestamp s +Ġ?> ">< +ĠREM OVE +ĠStar bucks +Re ally +Ġflood ed +.C allback +Drop Down +ip ro +Ġt ended +l te +Ġproport ions +- te +ĠR ena +lic ate +for ces +.ex tra +.auth enticate +в од +¡ ° +Ġfor ControlEvents +Ġsen ha +Ġke in +Ġmin ist +ĠPre ference +ĠTele graph +Ñĥ п +str pos +Ġillness es +Ġp igs +Ġget Intent +S ol +Ġ ¡ +(c pu +[ prop +s creens +'); ?> +ĠAct s +Ġstr dup +Ġaver ages +an al +ĠCas ual +Group Box +ĠHand book +/ comments +Ġnumber ed +Ġbroadcast ing +çĽ ij +.native Element +.m u +Ġupdated At +ĠDoes n +.A C +.c oll +Ġrec order +_sh a +B g +b il +Ġbol ts +Ġç ¬ +Ġim posing +ĠInformation en +_flash data +e conomic +Rem ark +uc as +ĠOff icers +ĠT ER +W alk +Ġmerc ado +_g enerate +H Y +Call ing +s nap +script Id +. operation +ĠFl ame +l iness +Ġrent ed +_t oggle +-ch anging +ĠT Y +' util +EE P +Ġgraph ql +ĠUn i +Ġimp ulse +.B asic +Ġenerg ies +M ARY +ĠMar cel +Ġmort al +Ġf res +m ens +m otion +Ġsample d +âĢľ That +id ay +qu ipment +get Int +ĠA bsolute +,' " +un ed +.sh are +Ġ} )( +mm m +ĠR ising +ä» » +Ġun employed +x fa +.f ollow +ĉĉĉĉ ĠĠĠĠĠĠ +sl t +.P hone +Ġkn ives +Ġe ve +on Click +] ))čĊ +ĠW itness +ĉ NS +ĠE OS +ĠSte fan +ĠPri est +âĢĶ which +Get String +. By +Ġup stairs +Ġdetr iment +bro ken +emb ro +Ġnic otine +il ion +Ġaston ishing +_ aff +ĠLess on +Ġaccident al +od or +Ġdec ir +Ġnew Name ++ . +çĽ ¸ +igs list +ĠG ithub +Ġsuccess ive +rac ial +Ġen viron +éªĮ è¯ģ +Ġredirect ed +T OTAL +Ġgrab bing +ĠL ance +Ġfor fe +_C B +å¾ ® +El apsed +_w ay +(Dialog Interface +_me asure +x bb +D og +Dep art +-s rc +res olver +with standing +_sh ell +ĠLast Name +ĠAv iation +Ġbegin ner +("% . +(to ol +Ġн ов +: init +(A PI +ĠMorr ison +vt Color +Ġstap le +/ INFO +Ġsupern atural +Ġste ak +tim eline +zz le +" `ĊĊ +Second ary +ĠNep al +.String Utils +Ġad am +Ġ( ... +Ġsub stitution +Ġboard ing +ĠKey word +ĠAss ault +dbc Template +Ġorder Id +( engine +.assert That +ĠVen us +Ġhomic ide +ĠA val +Ġg utter +ĠSupport ed +/p art +Ġac claimed +H istor +Ġmes es +ü ber +ĠRen ew +Ġgr as +ĠE k +Ġin file +ind y +.m usic +.S croll +ĠA ges +ĠNar uto +ĠG ather +Ġconfirm ing += (" +Ġpitch ed +ole y +Fr ance ++' " +$ total +Ġon de +Ġd itch +_s igma +Ġcontinu ity +re ward +- load +Ġproces o +Lock ed +st aw +Ġsp inal +l azy +! == +j est +Ġd un +ĠRod gers +ĉ grid +Ġlog os +ĠBeng al +.s uper +Provid es +Ġnut rient +.T imestamp +IZ ATION +åĨ Į +Ġf ats +ĠX xx +ct ica +Target s +Ġcont ours +Ġre ordered +: Array +Ġtoler ate +V ir +Ġter ribly +Ġbr icks +(& _ +h b +Port al +ĠB read +. which +ÂŃ t +as InstanceOf +Ġj object +ĉ length +_M T +; ">čĊ +_EX IST +Ġmat ernal +RE L +Ġê²½ ìļ° +he e +Ġlayout s +ĠL ap +ais y +Ġst umbled +ĠU IG +ĠS co +Ġimp aired +RES SED +Ġab uses +V F +AR B +.N AME +r ch +prim ir +_com pleted +Ġp enny +Ch rome +(b egin +ern en +- checkbox +Plain OldData +ĠL PC +r ade +sp ir +Ġcon ceived +T ips +ĠIo T +ĠG an +èģ Ķ +Ġbi ases +Ġconsult ants +ple d +_ ht +associ ated +], ĊĊ +Ġdelight ful +ĠÑĤ ек +Hel vetica +( load +-exp and +_W IDGET +to a +ĠA kt +Ġom n +Ġcl auses +Int el +*/ }Ċ +_reg istration +Ġold Value +Ġrest oring +Ġun real +O VER +ĉĊĉĊ ĉĊ +AT S +_pro be +Ġdiv isor +.update Dynamic +å¹ ³ +Produ ces +st amp +.j boss +ĉt ask +! (: +Ġpsych ic +@ class +M artin +ĠPass ed +clar ations +h el +а Ñĩ +ĉc opy +-b in +z an +ig ram +া ঠ+(s ig +ĠC aval +_ ## +Ġ% = +out lined +ĠAc id +Ġunpredict able +-d ashboard +Hex String ++ c +.P ublic +Ạ© +Ġconvey or +ĠE B +Ġselect s +Ġknock ing +ĠC ec +IBUT ES +owa Äĩ +g atsby +* v +ent ropy +Ġdispatch ed +Ġcam el +ĠSat urn +Ġover weight +( phone +par able +% B +_v ectors +Ġbrew ing +ĠT k +ĠDownload s +ĠS aved +.Pr ice +Ġcur ved +ĠParen thood +è ¶ +.p nl +plet ely +.D ay +Ġadvertis ers +Ġej ec +Ġpr zed +ë ¯ +! ';Ċ +ĠK ush +ĠT AB +Ġquest s +Ġcoinc idence +umm ies +ĠKash mir +ĠEth ics +_g rowth +Ġakt iv +Ġgroup ing +å¢ ŀ +_tr uth +åIJ ¬ +t odos +is et +Tex Coord +ä tt +ĠZ ur +ro ys +_M AGIC +Ġbrew ery +( State +ĠSM ALL +ĠPl ants +it bart +each er +ĠAd elaide +L u +Ġf ick +und les +_load ed +и е +P oll +rit ic +EL Y +Ġ+ ' +ĠProf ession +Ġst amps +ĠS ew +scroll View +Ġcomm unist +/pro blems +}čĊčĊ čĊčĊ +, o +Ġu dp +Ġob ese +appro ve +ancell ation +_G ame +ĠHas htable +adaptive Styles +Ġpossess es +.match er +function al +M rs +ĉs ave +ĠDb Type +Ġk en +get Context +Ġm ans +( rel +ĠBrother hood +) `Ċ +è§ £ +.In formation +OutOfRange Exception +ĠS ek +C as +Ġblog gers +E ither +(" "" +Ġpin ch +Ġco arse +) p +ĠP ulse +Ġlear nt +Ġdent ist +Ġon change +Ġdirect ives +( actions +ny der +ĠSh ir +T rait +_de p +ĠP ET +ĠRE P +.App Settings +cu ador +iden av +Ġenv i +Ġsl ammed +ĠSh oot +Ġdate Format +.j oda +ve ys +Ġ) .ĊĊ +Ġcare g +ĠPar allel +_ translation +.function s +. obs +Runtime Exception +[] = +over view +ĠSch l +Ġno isy +ĠOn PropertyChanged +S ending +Ġunf amiliar +U pon +ĠPrint s +.t yp +Ġflee ing +ĉm ove +( Un +Ġq r +× ľ +_b eta +Ġsk ies +ĉm e +W ND +Ġstick ers +bl as +Ġinsert s +Ġvers es +ĠD ew +Ġtang ible +Ġhe cho +P OL +Ġte ardown +om nia +IB E +.c over +_str ategy +^ - +set Position +u ale +S igned +Ġif ace +as eline +.set Time +ĠMin eral +ĠFight ing +sk ins +Ġdiscrim in +Ġdans k +ĠPr inceton +ac ist +Ġ( ));Ċ +tr acks +imon ial +ad ecimal +EP ROM +ugg le +.Not ification +$ mail +c antidad +ĠJ ung +Ġseek ers +Ġpl ausible +t ier +еР¶ +Ġr apper +ĠMan a +ĠHttp StatusCode +Ġburn t +los es +ĠF oto +ĠJson Object +Inst agram +Ġsys call +Ġreal ities +ĠMAT LAB +:^ {Ċ +TER M +ĠC bd +ĠPar agraph +Ġtrav és +Ġconstruct ing +Ġsw al +Ġp ige +LL LL +-ex isting +G ets +Ġmelt ed +Ġmitig ate +H en +Ġh m +im as +ĠA o +ĠP erez +ĠD AL +Ġëĭ ¤ +Ġdiv is +Storyboard Segue +ĠMod ify +ĠÃľ ber +_O VERRIDE +.p em +unt os +Ġespa ñ +Ġ{ ? +ĠP AY +_ip v +ĠF ury +__ .__ +el ow +-center ed +check s +_ Reg +-J avadoc +ĉ load +ĠLik ewise +ا Ùħ +UN E +.se m +x cb +ĠC ave +_s leep +Ġsil ently +ĠExt reme +.To Upper +ĉC HECK +Ġc ue +ĠQ ByteArray +Ġcorrupt ed +ĠD é +Ġimp ed +Get Name +Ġinaccur ate +Ġso ber +е е +Ġbar code +-- ){Ċ +ink i +Ġé p +Ġd ri +ĠAL T +>>>> >>>> +ont a +[ L +Ġinter es +ver ting +Ġdi agnostics +p dev +è © +ĠIntegr ated +). ' +_g c +$ text +.g ames +ĠT erra +' Re +.trans fer +_F IFO +get Model +Ġbl and +ĠCole man +Ġpr imes +Ġæ Ī +Ġcross es +n k +G ING +Ġ' ^ +ĠB lob +Ġinter course +ĠBl vd +Ġweigh s +_reg ular +ĠPer th +Ġsepar ating +Ġb illed +.tab Control +Ġpup pet +Ġutil ization +Ġâĸ ł +Ġsucc es +Ġl amps +_pro j +E ric +Ġren ovation +ĠFam ilies +ĠB its +part ials +-M en +s olution +Ġd warf +.IN TEGER +ĠLO CK +. ct +Ġexcer pt +ĠP ix +ĠFirst Name +ANT ED +ĠAd mir +-h elp +P rior +ĠAl ign +.IN STANCE +Line Edit +('/ : +Ġin et +od us +.p kl +ĠK Y +up ert +Ġn erves +_grad ient +} ',' +_un ref +Ġs aturated +ĠConn ected +ĠF N +EX IT +Ġtele port +Ġav ait +Page Route +Ġdivor ced +(l ang +f st +ĠT yr +Ġmess enger +if stream +X S +ĠBank ing +Ġinfect ious +ĠM ons +_LO OP +Ġzur ück +Ġobt ener +/re pos +V el +ac ro +Ġuser Repository +style Type +ĠS RC +VML INUX +rec ursive +/ bar +_ch ip +omin ated +ĠN it +âĢĶ to +ĠBudd h +ом еÑĢ +ĠM AG +ĠC HE +_d en +. raises +_de gree +Ġpump kin +_tem plates +_M EDIA +ĠTim eline +Ġb ots +Object Type +Ġbu ys +.post s +C AL +wait ing +ĠDani els +Ġd abei +ĠS igma +il or +ig el +, W +AD S +( panel +ì² ´ +it ating +.p alette +Ġmos quito +Ġt ego +(parse Int +Ġdes pués +p romise +Ġw ij +types cript +ĠT v +_IDENT IFIER +).ĊĊ Ċ +_fl at +its u +US R +ex perience +-f it +ph inx +_th resh +Ġide ally +ĠFre eman +, DB +_r w +çŃ ī +U b +_stat istics +=" ">< +Ġch ore +Ġy ork +inst alled +Add itionally +Ġp stmt +yl ko +:: Ċ +Fore st +Ġhead set +Ġgall on +ÑĢ ÐµÐ¼ +Ġwithdraw n +ĠC andidate +Ġmel ting +Ġfree zer +Ġh l +_HE LP +m ime +( /* +Ġth irst +$ return +member of +еР± +ĠHttp ServletRequest +( ob +_ Result +Ġassert ed +Ġfulfill ing +Ġstret ches +par ated +-f unded +Ġå Ľ +ing les +_c a +. condition +ĠDis plays +Ġor ang +ĠC RE +Ġgl Bind +ĠSelect or +/ type +ĠAlex a +ched ules +ĠPen insula +Ġpar ity +ĉ dest +ĠDo ors +čĊ ĉčĊ +_dim ension +Ġa load +.St oredProcedure +(p aren +ĠBur ke +') ]Ċ +- engine +Ġqu ir +ĠHy brid +ĠDo e +Ġout lines +ĠTrend s +_N V +per iments +ĠH in +? ', +ĉ Text +F UL +Ġsm ells +Ġs lick +Ġmis erable +ĠArray Adapter +Ġparam String +H om +_l iterals +us uarios +Ġprompt ing +_l azy +ĠActiv ation +_ oc +We ak +Ġan ecd +ĠU CLA += re +isse ment +ĠEsc orts +Ex cellent +ĠP ause +Ġre positories +T OR +ari ate +_is o +up dates +hal b +udi ante +ë¡ Ŀ +Ġna ive +ĠP eg +ĠL ounge +ARG IN +(b in +On ClickListener +ĠFA ILED +Ġl ite +Ġd zie +ĠL iteral +iv or +fc ntl +Ġe ats +Ġq ed +Un lock +rid ing +und ai += M +AT TER +Configure Await +ici as +ustom ed +Ġsuccess ion +end Time +ĠJ upiter +Ġjud ging +d ration +_d ocs +.m o +Ġeduc ators +ĠV ine +Con d +[ out +q b +\ Validator +Ġmean ings +Ġpresent ly +Ġdiv iding +otten ham +asc ular +Ġtrail ers +ĠC LOSE +ам и +âĢĻ ai +ĠG ain +w or +Ġpl anner +Ġdistrib uting +v at +month s +x label +H F +V iol +.BASE LINE +еÑĤ ÑģÑı +ĠR otate +Ġtx n +: bold +Ġb loss +Forg ery +( embed +Ġjak o +s printf +the ir +Ġexhib its +- static +he cy +get ActiveSheet +.c lients +ãģ į +_h ide +[ word +C b +add Item +ax e +_r adio +al ion +mod ifier +Ġsat uration +Ġden om +_p ixels +m ess +(f l +at if +Ġse cs +Ġpro stitution +Ġgrand children +Ġparad ise +ĠF eld +_B INARY +it ous +๠Ħ +Ġflash ing +-s ided +Ġcontrad iction +/* ĊĊ +y label +ĠT et +Ġadm ire +res o +Ġlet z +ĠSE ARCH +sl ots +ĠRew ards +ĠH og +ĠNS Data +st ash +F all +ĠA mer +Line arLayout +/ photos +Ġfe ather +Ġ| čĊ +Download s +.Start sWith +Ġ// # +ine Transform +Ġaff id +V tbl +ĠRog ue +scri bed +Ġfa uc +ĠMon roe +Ġdecl ares +mod ern +re on +ay be +P ASS +f ers +_MULT I +ĠMath ematics +Ġsud ah +_ATT ACH +Ġnumber With +ĠSol omon +j in +ograf ia +ö l +_d esign +cul ated +ĠL una +ies z +Ġ=> ' +Ġrevel ations +Al ong +( ed +ĠF ilename +Ġy label +Sec ure +Ġbus ca +agn osis +_RE CE +Ġoverl apping +Ext ent +Ġanticip ation +Check s +ĠALS O +or c +iling ual +it ational +Ġadv ancement +ou ro +ĠP redicate +å¾ Ĺ +er ia +ĠPier ce +or io +Ġmer its +Ġpe anut +.P ackage +ĠCon duct +_SENS OR +Ġbo iling +Ġin tra +ĠI GN +ĠF ur +.Ref resh +ĠRe ach +_dec oder +.Ex p +ĠÑĤ ак +p ill +, Q +ĠGr ill +Ġpop ping +.A g +Ġpro yecto +Ġmile age +Ġec ological +] ]);Ċ +ĠÂ Ń +sub plot +ac ad +ĠTry ing +rec ipes +$ criteria +ĠPers ian +-b ound +M ASK +ĠG esture +Ġk k +ĠP VC +Ġprohib ition +Ġcom ando +ĠLO OK +Sh opping +Ġdist ortion +< Boolean +.Get Length +um pt +\ Product +ell ery +Ġfire wall +form atted +.red is +Ġes a +ĠRh ode +S om +.n on +Ġ' ). +Ġget View +ạ n +pr us +Mat thew +Ġs ia +ĠF ors +G PU +ient ras +_IN ST +Ġol arak +Ġimport ing +T CP +/ ");Ċ +e ither +Ġfresh ly +c ascade +(char acter +ĠJe ep +ot ics +_ UTIL +.Xtra Printing +.first Child +ĠEx cell +Ġd vd +Ġt aller +Ġr as +yp ass +Ġassign s +Ġgri ev +-m ore +J D +ĠBurn s +' >čĊ +.D ependency +.Query String +.O wner +Ġexp iry +Th u +( Vec +Ġhazard ous +Ġr pm +AP ON +Ġadd Target +sv ille +p Net +ĠIm g +ĠTIM ER +.An imation +Ġbe k +Ġass ort +Ġle bih +Ġbody Parser +Ġvibr ating +ID L +Ġbutter knife +int ers +Ġpersu ade +ĠLGBT Q +è ĭ +.s oft +Ġbe ams +_s ur +.D ef +Ġl abs +ĉ plt +Ġsk ins +Ġtransf erring +Ġimag inary +_E nd +; background +Ġl aps +_COM MENT +(S DL +ond s +.Rec ord +ĠIm plements +_t icks +() ))ĊĊ +Ġa rose +] ? +ĠM p +ĠI Command +Ġsculpt ure +Ġcontract ed +< HTML +Ġcal end +at y +/ Sub +Ġkv inn +_ IGNORE +ĠSh ane +ML S +Ġstim ulate +Part ition +Ġm un +ó m +eral a +- account +.B inary +c é +Ġse ize +connection s +ĠĊ ĠĠĠĠĠĠĠĠĊ +ĠDi agnostic +V ISIBLE +ĠRun s +Ġimpress ions +s uite +ob le +~ - +ak ukan +< Person +ĠN os +ĠG ui +.wait For +RE SET +Ġpost pon +Dis cover +arr ison +sh aw +b lood +AJ OR +æĽ´ æĸ° +ĠM use +æĶ ¶ +Ġret aining +ot te +Ġmos que +ĠS ne +Ġstandard ized +Ġmain land +_th ree +unge ons +get Doctrine +Ġwh ale +Ġag g +ĠP orsche +now led +lat ent +ĠRel ation +Ġ// ' +Ġshut ting +ĠRem ix +_c ov +Ġs ailing +Ġv owed +Ġp ots +out u +Ġhair y +cast s +Rel oad +Ġre connect +ter a +.child Nodes +ĠR ack +Ġcurrent Index +Ġall en +Ġ ç͍æĪ· +ĠC ubs +[ X +_SE Q +_RE MOVE +.get Action +(/ ^ +err ar +Ġ ether +cur ve +Ġsl ap +Ġu om +O thers +Ġen gr +Dis position +Ġst aged +E ye +ĠA ux +auth enticate +Ġ$ ? +ĠAndre as +Ġset w +.A rt +Ġforecast s +Ġa unt +-m iddle +Ġmis d +des k +Ġescort e +ĠCas a +rop ical +Ġexem ple +plan et +(U INT +Ġwh ip +ĠPC B +clide an +=" \ +Ġox ide +Ġsucceed s +der ived +ĠEcon om +_co ordinates +ir as +D raft +Ġvisual ize +B rian +_ASS UME +ĠObject Id +Ġtrain ers +_FOR CE +Ġcon soles +- process +lic her +ĠSim mons +T aking +ĠCl aims +Ġdiffé rent +Activity Result +Ġsn s +éĢī æĭ +ĠCr us +Ġll am +r ab +ĠJo an +AA A +ĉf ilter +ish ops +get ting +à µ +Ġquant o +P ast +ov ich +Ġin justice +ĠF LOAT +Ġal right +\ DB +( GameObject +u ish +(b ot +Ġgall ons +ĠR é +ĠS aid +ĠSTDMETHOD CALLTYPE +ais ing +_process or +ell idos +ter dam +ĠBe am +Text Area +Ġret orno +.M ake +Ġ$ ("< +Ġlock down +Ġremed ies +Ġve el +x ee +do ctype +F il +ĠExp and +Ġemp loys +Ġsession Storage +Ph p +P ublish +Ġret al +f abs +ynam ics +Ġtoss ed +ĠnumberOfRows InSection +x path +\ modules +Ġdis astr +ĠM ULT +.M esh +-st age +Ġs df +it ung +ug es +Ġ?> ">' +kin son +Ġк ол +ogn itive +_ li +Ġim minent +Ġaff inity +.sign al +Ġnot ch +ĠSteel ers +max length +K K +ĠEug ene +_P WM +ro i +Ġâ Ĺı +ĠH amburg +.M ust +Ġax e +en ef +Ġamb itions +ĠSpec ies +ĠSt ress +Ġa while +Ġб Ñĥд +Ġwith stand +ĠDec oder +_in ventory +Ġ{ ččĊ +Ġt gt +Ġrail road +W ASHINGTON +Ġnegot iated +N ST +- phone +, U +Ġexerc ising +á» ¥ +_P IXEL +av ors +iter ated +Ġv ampire +ad al +In grese +Ġun g +ject ive +.c ells +Ġn ano +Ġmark down +_R ULE +(event s +Ġl uggage +MESS AGE +ig keit +$ count +Attribute Name +IG INAL +_E nt +ĠB F +ĠCOM MENT +_in i +ĠEurope ans +ĠB elle +åij ½ +) [' +åº Ķ +ĠUse ful +.re ference +() ", +_ grade +ĠK aw +Ġsent encing +Ġsocial ism +mon ster +_L AYER +Ġdee pest +w k +ĠNo ise +### ĊĊ +Ġpr éc +ot le +ÑĤ е +a uf +ib al +Ġcon quer +> Email +Ġamb ulance +O AD +Ġ(" % +ĠF I +.f ixture +Ġter se +ĠĠĠĠ ĉĉĉĉ +Ġsanct uary +ug i +ĠCom parator +Definition s +Ġast hma +Ġl act +Ġhard wood +.c lock +Ġattract ing +ĠM our +(d istance +ic its +Ġbon ne +ĠAC CESS +.Deserialize Object +ĠTyp ed +Ġje u +Ġapp Id +ĠCl ara +ĠH F +ĠRe ich +ipp les +//---------------------------------------------------------------- ---------------- +_del ivery +erial ization +Ġplaint iffs +Sc ient +sh opping +ĠD ummy +ĠW ald +Group Name +Ġins cription +el og +:::: :::: +_ ld +Back Pressed +.R aw +ĠOn Trigger +Ġmuse ums +ĠBe en +ĠAdvent ures +Ġsl ate +Ġlet t +Ġsu nd +ĠG in +ĠMechan ical +.s hip +App Component +Ġdest ined +Ġdw elling +Prof iler +Pre pare +ze ich +Ġsil icon +(h as +Ġ# % +VID EO +Ġcollabor ate +L in +Ġsc opes +( className +(s d +and in +.h am +Service Impl +-des cribed +Ġiron y +st ial +ĠHu awei +(re po +Ġunexpected ly +ĠK ai +.inst all +\x f +Ġexhib ited +_T CP +ĠO x +_CH O +Ġprostitu erte +Ġv ä +Ġsit o +Ġconstitu ents +ĠContin ued +ĠS AVE +r ss +/ message +ub es +Ġmisd emean +Ġtax ation +Ġstory line +h air +ĠFind s +S IG +ver ification +~ = +.h p +Iter able +Ñĭ е +ator i +Ġc tr +R x +_ );ĊĊ +d ag +.p in +Ġp seud +Ġinv o +ÑģÑĤ ÑĢ +_p ix +为 空 +Ġsw orn +âĢĶ or +_reg istry +Ġdis asters +ĠRO I +ĠâĢ ķ +akt u +fore st +be iten +âĢĶ I +ue va +eg t +Ġsp ikes +URE S +ĠRecomm ended +Ġexplo ited +ĠFreder ick +_COMP LETE +ĠDr ugs +!!!! !!!! +ĠR iv +ST OP +RO OM +ĠP ASSWORD +C ookies +.E l +á» Ń +ĠB ert +Ġhash ed +ic ester +Ġdecor ator +Ġquery String +: ;Ċ +Ġ" [" +oto pe +-A meric +ĠMatthew s +UR AL +âĢľ , +Sum mer +f os +_CONT AINER +_A CK +Ġfil tr +_dis p +_ Re +Ġfac ile +а ÑĪ +Ġìķ Ĭ +Ġe ben +Ġspr ink +ĠQ uint +> V +Ġhistor ians +our met +ĠMonitor ing +led ger +c ott +Ġw are +GG LE +c ars +ĠM EDIATEK +Ġvol upt +_ View +HE L +(c opy +(st ats +Ġchrom osome +ĠCurt is +- conf +( asset +Ġhv or +File System +< >();čĊ +oc oder +ĠC annon +) x +ĠSm ooth +ĠS AS +_ ce +ĉ prev +_m ovie +E c +_w all +< Button +ĠF AST +Ġon View +ul an +ĠS UPPORT +Ġgesch ichten +ĠS ons +Im m +$ IFn +Ġfair ness +Ġd pi +ats u +J osh +Equal ity +Ġ} ()Ċ +_ less +ĠR atio +ĠC ats +ĠS tern +Mon ster +Ġmer cury +ü hr +Ġplus ieurs +.des erialize +sc opy +.F alse +) animated +ĠExp erts +Ġ"") {Ċ +.W hen +see also +.un pack +LE M +.select All +Ġperception s +ud ing +ir ling +ĠPrint ing +gram s +ĠFile Stream +erv ille +il og +ic mp +_C ount +Ġlivest ock +- ca +doc uments +Ġpo les +ĉw ant +Ġflu ores +Ġstand point +ĠH uge +Ġradi ans +ĠUIB ar +EDI UM +ĠHistor ic +_h older +ĠMar ines +Ġt ä +.L ight +quir er +ason ry +div ider +ĠFl utter +_f b +restrict ed +ĠEvery body +N ão +Ġkn ot +ĠT witch +Ġhall way +(C ollider +Input Element +? )Ċ +/ off +/ ) +play ed +[ OF +Ġbat ting +_d l +Ġcom edian +Ġé v +ĠD EM +ĠEd en +: white +' ', +Con struction +acer b +Ġtask ed +.man age +Rel ationship +Ġph on +n z +_B GR +Validate AntiForgeryToken +_ air +âĢľ When +Ġgl fw +ĠCon versation +_T OTAL +, Z +Ġg raz +Ġiter able +ĠP ASS +Ġadvert ise +Ġmö glich +/ train +ĠVolk swagen +Ġcreep y +Ġ" )čĊ +QU ENCE +Ġalt ar +Ġed its +comp iled +aw ning +ĠD ungeon +Ġo sg +Navigation Bar +Ġtrend ing +ĠE co +ogg les +cd ot +| - +S ie +ec ret +ĠN egative +ĠL ing +ĠD IM +ĠC WE +ĠCar rier +Ġcar tridge +_us b += os +ĠJack ie +Ġo tras +Ġcommod ities +ĠP resentation +)&& ( +ĠMar tha +ĠCath olics +ĠM ond +об Ñĭ +_ absolute +Ġash amed +pons ors +t al +Ġsad ness +Ġpu ò +F ade +-pre view +ĠRequest s +ĠCal vin +h orn +Reuse Identifier +(pro vider +/app s +ime o +ĉ Class +S amsung +ĠW ORLD +Ġc innamon +dot env +ĠI User +ĠDE V +_C har +.ib atis +et i +/ me +s st +.s ym +ĠRug by +-m aster +aj ar +ĠY EAR +Ġo dp +ĠR oles +Ġbip artisan +ail le +Ġblock er +Ġgre ens +.SE CONDS +Ġbelie vers +ĠL ikes +F LOAT +Ġm ak +Ġg cc +âķIJ âķIJ +(" ~/ +SCRIPT OR +Ġton nes +ĠS ang +Ġtrans pose +enn ai +P red +Ġsoll te +.github usercontent +( print +ĠH ole +çľ ĭ +ad get +Ġprompt s +Ġgen etically +ĠH od +Ġvert ically +_control s +ÑģÑĤ ан +") {čĊ +$ title +Ġ} ),ĊĊ +Ġstate wide +ĠCor respond +ĠAt tr +it ant +Element Type +Ġout ward +Ġfam ilia +( article +Ġbl at +Âł Ċ +Ġgl Get +ĠRe ceiver +Ġ% - +ad am +W inner +Ġtail or +_p wd +ert en +St an +ĉ all +al ive +strt otime +� s +s essions +$ conn +ass ist +Ġchat ting +ĠM ant +Ġ% @ +Ġ"" );ĊĊ +Ġd gv +Ġíķ ¨ +.re peat +_M essage +Ġadvis ers +/ path +Ġk es +) } .ĊĊ +ogen esis +ĠOPTION S +upt ools +Ġmilit ant +Ġex ited +ig ar +ĠCOM M +ĠDis posable +ay cast +Ġrow span +Ġsyn thes +Ġsond ern +ĠĊ +ĠJ acket +R ATION +.getSelected Item +- init +ĠReg isters +_se p +ĠTool kit +.d ict +Ġx label +\ Table +t oc +_com bo +ĠComp act +Ġr ugged +à¥ĩ ठ+-man agement +')}} ">Ċ +ĠSt amp +ı l +ro x +Ġlandsc apes +_NOT E +mon ary +c ab +Ġmo et +x af +rc ode +- cli +_g ate +[ event +SP ORT +g ia +ĠS UPER +/ Login +_sh utdown +int errupt +Ġpret ending +Ġfr inge +ĠRed s +ĠC UDA +ĠUN IX +v it +Ġbr ig +dr v +ĠConn ector +There fore +Ġl ia +D etection +_ actor +Ġtemp file +Ġecc entric +- role +Ġpad x +d ent +West ern +Ġê ·¸ +ĠApplication Record +Ġcampaign ing +_run ner +ĠC ivic +ale igh +Ġdire kt +.s ul +ĠĠ ĉĉĉ +ant en +Ġiss uer +Ġassert ions +( orig +AT IO +Ġlean ed +ä s +.D TO +expl ode +.O bservable +Ġstagger ing +Ġkidn apped +Ġprogram mers +ĠInn ov +.param eter +Ġdom ination +Ġske ptic +Ġæĺ ¯ +Ġavoid s +.Ver ify +ub by +ĠAS N +Ġformat o +ĠBeat les +_b rand +Ġin set +y outu +Ġto c +-f inal +Show ing +ĠD oub +ĠM esa +Ad j +_m edium +Cre ates +(end point +ĉ UP +bb ie +Ġst alk +.datab ind +.S can +ag ents +$ , +ind ividual ++ )/ +ĉv m +(not ification +Ġin ex +ĠClass ification +ren o +Ġo lig +-r ated +Ġform ulation +', { +Ġa cept +_un pack +_C A +.P ow +ĉ im +Ġal uminium +AN O +Ġx n +Ġcó mo +ĠIng redient +Ġseiz ures +åħ ± +ific ador +Ġsigu iente +ĠIn fragistics +Ġduplic ated +ĠDe e +Ġn ø +ĠAC CEPT +(c rate +иÑĤ елÑĮ +- less +Ġinf inity +An alyzer +-D ay +rit t +(c in +ĠG y +Ġmulti plied +uch i +ĠBald win +/ ip +Ġshort cuts +.A DD +Ġvig or +_in struction +( ; +_ eta +è¿ ŀ +utor ials +Ġboost ing +b v +Ġacknowled ges +List ening +FA Q +; b +(( - +Ġarchitect s +Ġz we +Ġpul s +Ġget Count +ver bs +ãĢ ľ +(C ollection +k re +Ġjuris dictions +_b ridge +ĠCr ack +ĠDiff iculty +K O +Res ervation +_re quires +T our +ãģĹãģ Ł +.set Current +Ġk y +ĠAlb any +Ġè § +ll er +agn a +work ers +.bl ank +ĠPr ayer +M IC +Ġresil ience +Te X +ĠL anguages +st udy +ĉc urr +Ġenzym es +Sl ug +ĠíĮ Į +str al +Ġtum ors +Ġseg unda +=' { +in struction +ĠL isp +/ info +Ġ" {$ +,: ), +Ġg v +( ErrorMessage +Ġ' = +}- ${ +.Doc uments +" Well +Ġreminis cent +Ġg az +iro pr +eh r +Ġsup pressed +ers h +.scroll To +Ġcad ena +Ġgame State +ÃŃ m +( conv +ĠTom orrow +ĠC CT +M ongo +ul g +.C amera +.hand lers +m ph +Ġst k +Ġgen etics +AC ING +Tr ivia +ĠB am +(m arker +.St retch +ĠSun ni +ĠBet ty +.t olist +un likely +.Rect angle +ob solete +IL ON +inner Text +emb ourg +a N +ĠV ehicles +un lock +: utf +n ob +ĠSee ing +ĠNE VER +Ġt ls +Ġfil les +Ġbenef ited +ĠCl int +*/ ), +.f old +Ġpos ible +A DED +th ouse +.D AL +ĠO dd +ro kes +ĠSun ny +ĠPartial Eq +_B uffer +ĠLe vi +long rightarrow +eld on +g ages +_w arn +.Create Table +ĠD ip +_ questions +.log ic +Ġ# " +={() => +Ġt ep +Ġju icy +ì Ĥ¬ +en ko +ia lect +Ù ī +Ġon board +Ġæ ı +ĉ rt +_ UTF +ĠQ Action +âĢ ŀ +( Component +(a udio +.h it +g te +Ġprogram med +state Params +Ġpoly ester +f ires +by ss +] =( +_ quality +Of Day +ĠFair y +Ġy elled +op l +(user Name +ĠD ifference +Ġevalu ations +iff any +Ġcycl ists +Ġc idade +Ġtext book +Ġprof iling +__ ), +de a +. activate +Ġindic ations +Ð ķ +Touch UpInside +Ġinval uable +ĠM ASK +Ġcont end +F req +Ġrecru its +(int erval +ĠUser Profile +Ġ'./ ../ +ed u +_C allback +Ġanal ogy +ĠTro phy +app hire +V ideos +ĠCh er +ĠH av +â̦ " +. validator +g fx +ĠU Object +class names +tri angle +ĠEnc oder +.s py +Ġpred ators += status +-s afe +: ",Ċ +ĠIn cluding +Ġ{} ;čĊ +* cos +Ġend ured +.sul ake +Ġnurs ery +Ġfrag rance +Ġre building +Ġn th +ĠFr aser +.set Date +ĠV ince +_RE ST +Ġvent ilation +æµ · +cri bes +.as m +lp Vtbl +ĠA be +uis ine +, array +ĉ className +err als +Ġ' ĊĊ +Check out +Ġsol icit +A ux +_c apture +Ġrib s +rag on +vi ol +top ics +Function Flags +ĠM arty +b ike +ĠT ucker +(k ernel +ĠO ps +Close Operation +/d emo +ild a +ĠlÃŃ nea +APP ING +Ġsu ites +.visit VarInsn +ur us +ĠMin ute +(m anager +Ġbutter fly +Ġap are +Ġw olves +J WT +ĠSal on +ĉd elay +-es lint +is ations +.r pc +)| ( +ĠSnap chat +/m m +M N +cer ies +.text Alignment +ĠFrank furt +Ġad o +(new Value +( access +( Expression +ĠSign In +ĠHait i +_t p +.set Parameter +Min ute +Ġmanual s +ric anes +ĠP TR +ĠOut er +Ġget line +oc ations +_C D +ĠLy on +/g ui +_l ive +id an +.ge om +Ġborder Bottom +im uth +_check point +Ġme u +ĠIr ving +Ġpeu vent +(M AX +ĠAR CH +Ġp ov +.source forge +Ġjam ais +Ġar k +ĠBaghd ad +ĠC LEAR +Menu Bar +Ġtro is +CHED ULE +Ġ# čĊ +(C all +$ order +(M aterial +Ġencontr ado +$ list +ĠMETHOD S +.begin Transaction +_M AG +Style Sheet +Ġmaj ors +Ġindef initely +clean up +Ġhom eland +(d to +D ates +P resentation +ĠD K +={` / +ĉ Key +( Block +_check box +ne eds +Ġon Complete +ric o +Ġgle ich +Ġx m +O OD +B etter +ĠSQL ITE +. Book +x ad +ĠG one +ĉd p +Ġdev otion +Ġst m +Ġobs ess +ĠBack end +Qu eries +I k +// **************************************************************** +Ġdivid ends +.parent Element +} ")ĊĊ +ĠMaterial PageRoute +: num +Ġexp lic +ĠO L +le ast +O ops +iment os +Ġins urers +Ġhero ic +ĉf ields +.img ur +.btn Cancel +ĠDetect ive +(s m +ĠMutable LiveData +.l ab +(( [ +Ġha irst +ĠTrans actions +å¼Ģ å§ĭ +Ġstd Class +uent o +G IS +_c od +Instruction s +C alls +Pointer Type +ĠR w +Ġassort ment +ĠD IG ++ r +_C ERT +Ġinst ability +Ġv ib +on as +Ġro ku +ap ellido +Ġan gl +prene ur +Ġfluid s +ise ase +Ġde ed +qu ist +_CONST ANT +Ġequ ilibrium +_de legate +ĠQuant um +re i +Cap abilities +rect angle +? >< +al ien +ĠJ ug +D NA +T ickets +Occ urs +ĠHaw k +.setHorizontal Group +\ Collection +ff iti +Ġre arr +.setVertical Group +Ġc avity +Ġadult e +Fac ade +- wh +ĠL OL +Ø ° +Ġgrand parents +Sw ift +ĉw x +æīĢ æľī +if en +ff set +B eyond +// }ĊĊ +Ġw ager +Ġb ury +Ġcomm ence +reg istro +sc ient +ĠPer cent +Ġд олж +( identifier +.set Model +Ġs eldom +nt on +Ġappl iance +am us +rys ler +Ġpant ies +engu ins +Ġmim ic +Ġon Changed +Ġal coholic +.reload Data +Ch arge +ĠF ax +Ġj ScrollPane +Emp resa +Ġsh attered +x ba +Font s +? s +Ġpost season +ret ain +_r ates +Ġrequest Code +.t odo +´ s +CH K +ĠKeep ing +enge ance +Ġvs code +IPP ING +Default CloseOperation +_ raise +ĠO culus +ogram s +ra j +pc i +Ġcorros ion +.handle Submit +Access ible +ĠP iano +l ittle +AC L +Äĩ e +.un wrap +ĠCon vers +ĠLe ben +ione er +ĠMer chant +ĠJ orge +Ġembr acing +Ġvent a +á st +Ġvi ene +< QString +Ġexplos ions +Ġdistur bed +." < +m emo +ĠAb original +Ġcomple to +Tex Parameter +Ġuom ini +( agent +Ñĥ ÑĢ +ĠWh olesale +/ am +ĠBook mark +dr agon +Ġglo ve +Ġ" "));Ċ +iv ariate +now rap +In Children +.B r +Ġcon exion +Ġback bone +Ġe clipse +Ġpersec ution +': ĊĊ +/ link +ĠP ero +and as +ĠT ek +. "); +-an alysis +Ġer ad +Mar shal +Ġanch ors +og er +Ġconver gence +st icky +Ġnave g +int ern +_DE SCRIPTOR +ĠConsult ant +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ Ċ +ĠA uch +Ġer re +ÅĽ li +ĠHor izon +col a +Install ation +hot mail +C NN +.C ollectors +ch s +(tr ace +ĠEnc rypt +Ġ---- -- +ĠBase Controller +Ġag ua +Ġre active +id l +Ġclass Names +ĉ Session +ĠDod gers +H ad +_l v +Is Valid +ĠHEL P +ut to +ĠVer ification +Ġget env +_p a +.b mp +: f +ĠLou ise +(' ; +/ socket +Gr anted +.c alendar +( IP +ĠP X +.R oom +Ġprogram m +ens i +Ġtablesp oons +Ġle ve +Ġmo str +.t ipo +/ an +(d i +Ġb iod +Ġdb Context +ĠJS X +ĉ results +. END +ht e +l ify +P recision +èĬ Ĥ +ARS ER +)did ReceiveMemoryWarning +at tempt +IS P +& a +_P OP +ĠT ac +Ġprepared Statement +Ġзап иÑģ +Ġow ing +, start +Ġreview er +Ġr st +Ġprop Types +Ġrock y +_lo cale +ĠStrateg ies +ĠWe ber +.C ascade +_equal To +Ġcos as +ĠDe letes +ĠMax im +Ġsh rimp +re trieve +.In clude +IG IN +ĠO E +] );čĊčĊ +.en umer +Ġco ef +_N ull +R a +ty ard +ĠSh awn +keep ers +Ġq q +_s b +om ens +ĠExec utes +# " +TT Y +ĠValue Type +); */Ċ +ĠAbs olutely +ĠT ottenham +/ art +Ġbless ings +Ġswift ly +b uster +Ġa vid +COM M +, temp +Ġ} ?>Ċ +-g rowing +Ġdeep copy +A ck +egg ies +Ġ__ (" +Ġno ir +terror ism +Ġanth em +ag ency +_PACK AGE +ĠC losure +.reg istry +Ġmamm als +< L +U ICollectionView +ĠLED s +Ġvol ley +( Buffer +_N ATIVE +lib c +impl ode +Scroll Bar +ĠMar ion +.Con tracts +_A t +ĠWe instein +compare To +ĠH ose +en ity +.create Query +_r outer +Ġstim uli +Ġ++ ) +ĠCh amp +ĠBay ern +ass a +.v a +Ġdistrib utors +Ġfile private +Ġdepart ed +cc cc +@ click +ĠL unch +> L +Ġbl uetooth +.De ep +- standing +ác il +Ġro oft +ĠPath s +_iter ations +Invalid ArgumentException +.s pi +ĠUIAlert Action +uy e +sign in +.p riority +ĠEss ays +=' {$ +Ġè¿ ĶåĽŀ +_s igned +.p ersist +Ġred esign +To Lower +ĠNew man += start +ĠIsrael is +asis wa +Spe ech +Ġnum eros +hand lers +ĠW ong +Ġм еÑĤод +We ights +ĠGu jar +te il +ĠNon etheless +_E FFECT +Ġv ect +ĠO sc +Ġco ats +ĠW heat +Ġge ek +ĠPRO PERTY +w orm +_const ants +ĠB oulder +ĠP arm +co le +Ġdefault Center +ĠRou ge +: A +xc f +ĠVen ice +med ian +Ġred emption +F resh +Ġcos m +Ġfig ur +Ġref urb +CO PE +.c d +Ġch ords +ĠS gt +Å į +VP N +ĠS END +ain en +_account s +Ġtent h +Ġdiss olved +< App +ĠCover age +use State +é ro +.. < +Ġì £¼ +Ġdream ing +ĠFore cast +.C ursors +Ġvis as +/ script +_start ed +Ġga str +(P RO +]; // +.T ile +* sin +( Adapter +ĠSand ra +_S IG +ard ash +ĠO val +Ġdescri pcion +(s l +ĠDes criptor +Ġ` $ +/f ree +ĠKey words +Ġt udo +ion ale +(f ound +.x yz +ĠGeneration Type +_DISABLE D +( area +Ġel ites +Ġh ombre +(m essages +ĠR ac +Ġext ingu +ĠEst a +op o +. vel +mouse out +Ġconv olution +ĠHand ling +Ġceil ings +T ek +ĠAre as +.writer ow +< View +ĠCorn ell +_B IN +.in valid +'' 'čĊ +ie ż +_P osition +Ġk idding +PC ODE +Ġwatch er +lo x +Ġâ Ĺ +D ave +_all ow +Ġbis exual +Ġun ordered +ĠSch we +_se gments +Ġt earing +IN LINE +Ġund es +.g oods +.c am +ĠL W +ĉ where +Cal culator +-th reat +- alert +ĠSuz uki +ĠIP A +ĠAtt achment +AC CESS +(d type +O pp +_s ymbols +Ġdans ke +l age +or get +res olution +е Ñĩ +ĠQ Color +ĠBar rett +аÑĨи Ñı += \' +ĠNav Controller +/ ref +(c ountry +_H DR +Ġterse but +pet ition +Ġsu f +cred its +๠Į +x m +ĠDav ies +.re ddit +Ġw oven +ĠO bl +ĠK M +ĠConsider ing +ens ored +.per iod +Ġd dl +$ wp +Ġextrem ist +; \Ċ +Ġk im +al ers +Ġspan ning +Ġco herent +Ġconse gu +.text Label +.g eneral +_d ashboard +л ение +k ick +_P ID +ĠExt ensions +reg exp +ĠCl ause +_m ov +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠ +ĠR eward +ĠLEG O +A k +=-=- =-=- +ĉ parser +Ġon ze +éĢ Ģ +âĢĿ ãĢĤ +_b all +(r hs +Ġch orus +< count +as urable +Ġwirk lich +ĠEr in +ĠMS NBC +Ġet ter +ĠC ron +_F LOW +Ġ, čĊ +Ġcal idad +ĠFile Writer +ĉ stmt +( Byte +_p at +Ġte lescope +Ġgre ed +ĠT ort +(w rite +\ application +ĉRT LR +ĠConfiguration Manager +Un ix +End Time +In cludes +ĠHar vest +en berg +ĠAustral ians +Ġë ĵ +Ġr n +Ġreput able +Ġbl ending +UL ATION +ĠBrend an +d ad +Ġm ø +ĠW oo +_d c +U ne +Ġr ue +with in +ang ep +Ġp ouch +\" ", +ĠS ic +âĢĿ ), +aly ze +ĠG ef +c overs +Ġd bo +replace All +ĉ Logger +Try ing +[ state +-p iece +éĸ ĵ +beh avior +all ows +l rt +_p ython +ert ura +-c ountry +ĠT G +.UI Manager +b ens +ale x +ĠBre itbart +b ac +Ġpredict s +Ġg ab +Ġcard inal +.Time Unit +ĠVis itor +ĠM ing +Ġliv re +Ġparent Id +port un +Ġdimension al +ĠV est +en ic +à ³ +Ġ Ùĩ +ĠBL UE +Ġitem Count +Ġfe athers +ĉp stmt +ĠPol ar +{ // +und i +Ñĥ ж +z ar +Error Response +ì ĥģ +Rep resentation +* _ ++ ] +pre pend +Ġ' > +Ġlegitim acy +Ġo o +S linky +Ġnation als +. words +; p +tr ap +oman ip +Ġc ues +Ġgradu ating +Ġsem aphore +"] );ĊĊ +ace y +RE ET +Gr ab +ĠFel ix +( Id +_ne ighbors +Ġmeaning less +(d el +Ġj eder +ĠContent Values +.abs olute +/ cl +Ġx b +dat um +Ġtort ured +Ġrub bing +S cores +ĠðŁĺ ī +Ġav ons +Ġam sterdam +E OS +H al +Ġtrust worthy +# = +.EX TRA +Ġman o +is icing +-s upport +ĉc ursor +ĠSp o +aim assage +M ission +[] {" +Ġprint ers +G REEN +Ġt eg +Ġabdom inal +! ĊĊĊĊĊĊ +.Sh ort +аз в +ĠGift s +} ") +(b inding +x ce +âĢ ij +inf os +Form Data +Ġd art +Ġele ms +(in v +Y L +t in +GEN ER +á» ¯ +ĠT aken +uck le +: e +Ġspect ral +.b aidu +/ ');Ċ +Ġgre edy +es ion +,,,, ,,,, +Ġ/> ,Ċ +Internal ServerError +NSNotification Center +ĠA i +Ġsp it +Ġaug mented +Ġstandard UserDefaults +FIN ITY +R ace +: C +ĠRE CORD +ĠHigh light +Ġ' ` +Ġdef icits +Ġne i +Ġresearch ed +T a +Ġc opp +.Get HashCode +): čĊčĊ +On Click +ĠWell ington +Ġrev ival +æ¯ Ķ +éĹ ® +ĠN SS +Ġfor n +Ġint é +ĠKu wait +_fl ip +_ bo +_ \ +Ġocc urrences +ĠScient ists +S RC +og ens +igr ant +RE MOTE +ĠS ID +. opts +u ve +() ])Ċ +Ġlibert arian +ĠGl ide +les en +Ġform e +ow ania +Ġannoy ed +Def s +ĠExec utor +Ġcast s +.set Checked +ĠSh aring +.Serialize Object +Ġselect ors +_ OTHER +ë¯ ¸ +(s uper +( OS +_VER IFY +id unt +< header +Ġ/> ';Ċ +Ġvidé o +ĠNeg ro +ĠL ords +ĠT ours +Ġsoft ly +.re ceive +ĠE RC +Ġdata Set +Bad ge +ĉ Event +Ġper l +Ġ{} \ +(s entence +Or Update +Ġdim inish +P IN +(d raw +.To DateTime +.Equal To +(p in +-p encil +lu ent +ĠCall er +Ġplay ful +- '+ +x ca +sw ick +){ }Ċ +}: ${ +ĠM eth +.get Cell +.b reak +Ġy max +=' Ċ +ĠH iro +( TRUE +as urer +Ġcu er +U ber +. Operation +Ġol an +Ġthr illing +< Response +ĠF emin +Ġtravers al +Ġp oc +Ġset Status +decl ar +std afx +Ġaddict ive +ĠB tn +Ġexplos ives +ĠCook ing +ĠPl aint +Ġaccum ulator +ĠApp ointment +, password +ĠF AR +lu et +Further more +decl spec +_Static s +.D ictionary +"> '. +ĉ valid +" ", +In strument +> J +Ġno str +ĠR ift +_P ort +Ġvec es +[ [' +Ġrall ies +- series +Ġv v +. uc +Ġr tn +State Changed +( ins +ĠCl a +------------ Ċ +c us +ĠRel oad +//---------------------------------------------------------------- -------------------------------- +.se conds +_dest ination +Ġscrew ed +> c +Th ickness +Design er +Ġgr ids +n Äħ +( cookie +T rip +-M obile +Ġv oll +Ġgen ital +Ġconf isc +ĠConfeder ate +Ġweb View +Ġm ise +Ġcl er +(se lection +$ date +Ġshar pen +rag en +And Update +Ġrem ix +Ġh tons +R W +M PI +Ġretrie val +Ġric hest +.Dec ode +:init Components +ĠT Value +S aint +@ include +ĠPER SON +.se p +ĠLD AP +g ba +Ġgro ÃŁe +Ġreli ably +ĠD FS +.getItem Id +Ġprés ent +.get Token +Ġch inese +ĠMe al +Y OU +"> >ĊĊ +b ower +Ġsw apped +/ install +Ġs inks +etr ize +Ġdecl ines +ĉm ysql +ĠC String +ĠMotion Event +.L anguage +R oad +ÑĤ еÑĢ +asc imento +')) -> +. about +( editor +ĠR atings +in come +Å¡ e +.de queueReusableCell +ĠAust rian +Ġs ulla +ĠTrib unal +ĠDid n +ов аÑĢ +Ġins pections +B oss +Ġcock tails +Ġapolog ized +_sub plot +op al ++ =( +Ġreson ance +ib u +Ġë ¦¬ +rom a +res erve +pl s +ĠT ah +ax ies +OP LE +ĠDar ren +ĠZ ombie +_M ap +Ġ] )ĊĊ +ĠQ i +ĠS ail +Ġrestrict ive +Ġeros ion +- par +WH ITE +Ġold u +Ġap erture +Ġbit coins +text o +ĠCom cast +Ġtime less +en kins +Ġfeed er +/ tmp +res den ++' _ +.D estroy +Ġç ok +ĠD OCUMENT +.l ng +.tag Name +Ġk ullan +eg rate +Ġ(* . +ç¼ĸ è¾ij +Ġhand shake +s oc +_ geometry +ĠDam ascus +Min or +ĠK afka +ìĹ ¬ +Fl orida +_com pute +.ex pr +Ġpar alle +ĠD iaz +c ir +[ target +Ġj oking +Ġgl or +(set q +_hand lers +H ang +Ġf err +rim inal +ĉĠĠĠĠ ĉĉ +ent ies +def ines +-t ax +json p +ĠU PS +met ro +__ ;Ċ +ĠUg anda +])) :Ċ +_t d +x ae +l w +. OS +ĠLog ged +ac id +ĠMay o +as pect +Ġvag inal +Ġinitial izing +Ġster oids +f iction +G RE +g end +Ġli abilities +ĠL ets +M ech +( nc +( change +Ġconnect ors +: k +Ġt ast +! ");ĊĊ +th ings +ro phy +luet ooth +ĠSign Up +. ctrl +Ġthere in +ord a +. escape +ig ator +Ġpet rol +Ġspec imen +Ġdeb uted +- Pro +Ġcr ises +.add View +ëı Ļ +-d oor +Ġmon et +Ġmill is +Ġv ier +Internal Enumerator +Ġadmin s +ĠL air +z in +get Query +umb les +L IMIT +ĠV ig +_s ong +< Character +:: . +_h om +_b p +ĠSup ervisor +sub mission +ab ile +Ġno i +Or Create +Ġpe el +Ġon Start +Ġsent iments +veh icles +Ġclass rooms +Ġs zer +Ġb ending +Ġlong evity +Ġa cl +ĠAle ppo +ĠU M +ĠR icht +Ġmultip rocessing +DOM AIN +"," + +_Y EAR +Ġsc rape +Ġsol itary +Ġ"] ";Ċ +/ errors +ìŀ ¬ +ľ ëł¥ +b etter +ĉ number +ĠL F +ĠAc ross +Pub Med +\" " +ĠExcell ence +Ġus ando +ĠU IP +Activity Indicator +_V OID +Ġbre eds +ï½ ¥ +uest as +ĠTre asure +ustral ian +(f ace +ĠT ennis +ĉ Int +ĠHans en +ç µ +: I +Ġâľ Ķ +GR AY +O USE +Ġhe pat +ł í +A IR +ó ż +Ġque ued +vinc ia +ĠChrom ium +Ġcompet ence +ung al +ill i +Ġget By +ĠF inder +Ġincap able +Ġs add +Ġc ites +ĠChurch ill +S dk +More over +As pNet +( Float +$ password +ĠConn or +-s ession +_d m +* )) +Ġde utsch +ĠN X +Ġper ks +_S ORT +_TO OL +_V ISIBLE +.as p +æĪ ĸ +ĠBre ath +D etect +ĠD uel +.c mb +[ it +.Set Bool +Ġnarc iss +Ġab ide +Ġej emplo +ĠâĦ ķ +Ġm ornings +Ġcomput es +.s sl +j t +Ġmuch os +_S S +[ end +Ġbas in +Ġalgun os +ĠCroat ia +lin ewidth +(t ags +(h idden +ÃŃc io +Ġap ar +ĠÐ ¶ +ä¸ İ +. food +ĠR ural +Ġbread th +å½ ± +(s ess ++ ") +ĠP aste +Ġserv idor +ĠBit Set +ĠTr an +la us +v ette +ey es +ĠCL ICK +ĠV III +ĠTurn s +ĠLe Bron +ĠM uj +ĠD eg +ĠAdult s +_s uite +process able +ĠPH Y +g hest +.F ail +ĠSl ack +ce j +\ Carbon +Ġsuper star +Ġhold ings +( forms +Ġ'# ' +M ultip +("[ % +-s olid +/ url +-t ier +[ length +ĠStream Writer +ĠMarket place +get text +_T ICK +ĠFor ge +Ġblack jack +ĠDO ES +ĠM atters +w aves +Ġwhisper ed +Ġl ush +ìĺ ¤ +d igital +Ġwr ink +ĠH ogan +Ġrust ic +.Apply Resources +ĠHard y +os omes +A UT +.ST ATE +Ġnarr atives +ĉ store +b ib +ĉ Scanner +ĠC ody +\ Repositories +Ġre union +and um +âĢĻ h +Ġsn iff +NS Bundle +Ġcompreh end +_US AGE +_ occ +URRE NCY +J NI +Ġspecial izing +Ġvis ions +Ġdol ore +Ġv á +ĠChe vy +ĠSt yled +imp act +all en +Ġk art +ĠTable t +st uff +re esome +аÑĤ оÑĢ +//---------------------------------------------------------------- -----------Ċ +_Ad min +Ġcell phone +Ġaut oplay +Ġcamb io +Ġmar itime +_BO OT +- quarter +Ġlat ina +ĠAJ AX +e quiv +ĠFront ier +ĠX Y +} ]Ċ +ĠR ough +.pro to +Ġcorrect ness +Ġfac il +ĠRe ached +ãģĿ ãģ® +V IS +.p s +Ġstr ncpy +Ġdiff usion +.start Activity +�� � +Ġaccom p +AMES PACE +imon ials +ĠBl ast +aby rin +Ġd ome +Ġextr av +Ġy en +Ġcul inary +P RI +ĠComm unities +n id +_oper ations +.h s +ĠMil ton +Ġno ises +Autoresizing Mask +(c id +}ĊĊ ĊĊĊĊ +] },Ċ +ĠD etection +tab la +Ġlib erties +_D YNAMIC +w get +ĠT ür +ĠP ascal +Trans parent +Delay ed +] () +ĠHer bert +< ActionResult +ch allenge +Ġmush room +.insert Before +ĠR in +Ġhum our +Ġf ø +api Key +alloc ated +Ġconf ession +. ",čĊ +ĉassert That +ĠS ORT +ĠL ORD +Ġexport er +.set Level +p okemon +ash tra +Ġf é +ur ator +(M SG +Ġt up +ĠH ull +Ġyield ed +.Sub ject +\ Route +! ? +ĠÑĥ дал +\ Security +- ar +Ġalleg ation +( Settings +ä nder +Ġell ipse +ĠRetro fit +Ġregul ating +ĠM olly +ĠL ok +_C ustom +ĠProm o +is in +Ġres umed +Ġmet ropolitan +.error Message +: ------------- +Ġpas ado +th ank +_De lete +ĠBright on +, unsigned +ä½ľ èĢħ +Ġaspir ations +-h ow +R ose += (( +_ne eded +_pl ural +< Application +ĠW EEK +ĠUn lock +ĠT EMP +S ou +Ġschizophren ia +Ġt roll +Ġcomplement ary +ĠNET WORK +Ġbl ir +Ġprogress Dialog +" %( +ĠAttribute Set +ĉ ts +.iter items +è¯ Ŀ +Ġesc rit +v ous +_pl aces +H K +Ġseg uir +_f w +ĠR ounded +Ġdis posit +è§ Ĩ +par m +w ow +STRU CTION +. allow +ĠChar Sequence +ĉ extern +Ġprosec uted +Ġmort ar +ĠJ uda +- msg +Ġest ud +.get Description +Ġs ow +amb re +Ġrom a +En h +bon us +Ġsqu at +Ġdist ra +ed Image +Ġpe ppers +-per formance +, ĊĊĊ +, file +ĠM IME +_con cat +AB S +-f ashion +Ġunder cover +One ToMany +Ġre claim +C OPY +Ġb inds +ĠT ape +Ġg ossip +ĠEqu ity +/ Card +. activ +' am +Ġdrain age +< Scalars +ĠonBind ViewHolder +() ?. +Ġs orrow +ĠI b +up y +_U UID +ĠCh arm +ĠElection s +.on Destroy +ĠInterest ingly +ounding Box +_d etection +-h eld +_ unknown +Ġrefr ain +Ġmét odo +Ġe Book +EN OMEM +Ġd ang +Prof essional +Ġd ictionaries +/m ysql +ĠST UD +Ġmas se +s cape +Ġdre i +: name +.log o +Sign Up +Ġt ahun +( theme +ĠFem me +Ġbom ber +ĠJ ade +ĠT ay +Ġsubmar ine +_cl ause +zy ch +Ġsimult aneous +Ġcas os +. boolean +(l hs +Ġcontin ental +-s ale +ĉ env +ĠC ute +ĠFactory Girl +ab us +/ value +Ġj adx +Ġst ern +> >ĊĊ +Ġsurf aced +Ġìł Ģìŀ¥ +pl atz +ĉ email +cept ors +"> ( +Ġep ile +è¯ » +ĠDe bt +åij Ĭ +N OP +" https +: j +Form Item +_L ICENSE +.get Double +ĠAg enda +ĉf inally +(f ilters +( av +ç¾ İ +AP ER +Ġl ava +еÑĢ Ð¶ +)) ))ĊĊ +Ġfault y +_n m +Ġtr ava +(B itmap +Ġspeed ing +> '). +Ġscreen ed +_ roll +ĠMac Book +ĠA UD +Ġdiagn ose +.G enerate +Ġ^ ^ +Ġstr s +[ Test +Ġr ansom +ĠDH CP +eld en +Ġinterpret ations +() ]. +flat Map +Ġline Height +_m ount +ĠW izards +Ġsl uts +eh ler +od al +Ġmilit ia +å ² +earn ed +Ġmis ery +int val +f und +Ġh ides +Ġdi arr +ĠWes ley +Ġx mm +Ġqu em +ĠAr abs +if th +ategor ized +Dis posable +P ure +_NOT IFY +sn ippet +ĠGar rett +.run ning +. weights +Ġ( -- +Ġin variant +äºĭ ä»¶ +ĠAll owed +dir s +Ġpass ions +Ġl ad +ĠFl ush +men us +: block +Ġcompr a +.ch omp +alloc ator +Ġcur ated +ĠKnow ing +ĠPatt erson +Ġtel ah +' ex +Ġdo omed +Ġphil anth +ott y +.st yles +Own ed +Ġallerg ies += params +oc ese +it elist +ĠS ending +b ef +orr ar +ĠN ão +ĠF argo +ĠL ub +ĠComb ined +_g iven +ĉĉĉĉĉ ĠĠĠĠ +Ġreconc iliation +Pattern s +az ard +Ġbiom ass +ĠH ouses +resp uesta +cc o +/top ics +ĠY uk +Ġweaken ed +_c alendar +Ġmulher es +ĠMar l +Ġs ine +ĠT il +ĠSou ls +ĠDe utsche +ĠF OLLOW +Ġpip elines +ĠBever ly +_DIP SETTING +" # +ĠPro to +.b ig +ĠSav ings +ĠT anz +j un +ĠG amma +ĠS add +Ġadvis ors +Ġro ast +Ġun ters +ud ies +_l on +-point er +ĠElement Ref +\ Builder +example Input +.web driver +data Type +ĠQu ite +ĠCelt ics +u il +-def ense +b ish +ĠUI Window +ĠS uddenly +.h ot +.re ason +Ġg ör +AM D +.M ulti +auth enticated +reg ions +; ( +а ÑĢам +ĠKir by +$ route +PREC ATED +ĠDur ham +ow o +ĠPer forms +Ġdisreg ard +n st +ĠP ols +Ġget P +"] : +-col ored +( Keys +ĠAl leg +_mod ify +_ loading +str ained +Ġat roc +_p hr +< Sprite +Ġsatisf actory +m anship +.p ipeline +T ony +Ġth ief +pol ator +( lock +bur st +ĠOptim ization +Ġsurf ing +" Yes +Ġdesc ended +æ Ĵ +_C lear +Ġc ries +ĠFro zen +D IRECT +- Con +ĠLe icester +å¥ ³ +O OM += db +Ġget Message +< Student +_b atches +.M ask +_ eth +\ ) +Ġsom a +C atch +[ ch +Own ers +ind le +: auto +. vert +iv r +.set Location +Ġfl uent +_END IAN +ĠCar lo +cept s +add Action +.o auth +< UnityEngine +re ements +.S kip +? )ĊĊ +.default Props +Ġc abe +ĠSh en +eros is +ĠPro fit +Ġpo is +_C REATED +Ġremove From +(w s +? action +( Field +Ġerr one +.min imum +ĠRetrie ved +Ġd ado +ĠPR IVATE +-s pec +Ġg zip +p data +Ġpos Y +(l ow +Ġqual quer +/ cloud +ê² Į +( common +ĠAr beit +organ isation +Ġtid y +ĠRol and +( ph +.z one +Ġgent lemen +ượ c +å± ± +Ġenc losure +ĠMan afort +ĉ Color +St encil +N ic +Ġthe orem +ĠV G +Ġcol oured +V BoxLayout +uls ive +Drag on +c ff +et est +ens a +of day +.A zure +:UIControlEvent TouchUpInside +_up dates +Ġtrend y +ug as +weak Self +Ġr idge +ib ri +Ġì¶ Ķ +(C G +ĠMon key +.write Int +.tim edelta +ViewController Animated +ĠProvid ence +ãģ Ī +Ġbl ends +/Sub threshold +ĠAp pl +Ġat an +Ġreload Data +umb otron +st üt +O Auth +ĠG iving +ĠìĦ ¤ +ĠFinn ish +check ing +. Embed +sequ elize +Ġinitial izes +ĠOs lo +Ø ¶ +get Extension +_AL T +(bl ank +Ġfatal Error +Ġdem ise +**** *Ċ +ĠX S +(A F +ĠEn s +an tha +ĠP OR +Ġn ich +.N amed +Ġgig antic +ĠObserv atory +.Res olve +ĠPay ments +g uild +Ġcurrent State +============ ===Ċ +ĠS ey +p Data +Ġdead lines +Ġcentral ized +ĠScholar ship +_s upported +.ch rome +() ]);Ċ +Ġc yan +ĠC age +Auth ors +_ čĊ +/ os +k im +de e +.t ex +Ġyours elves +Ġm gr +Ġal k +-inst all +Ġdraft ing +Ġrum or +Ġstat ues +Pool ing +ol ina +AAAA AAAA +/* ---------------------------------------------------------------------------- +Ġextrem ists +Cal cul +ighth ouse +In set +(IN PUT +Ġsynchron ization +iv irus +. axes +ĠG ap +- An +_T emplate +Ġgam er +ĠCr icket +Ġl int +Ġauthor itarian +NS UInteger +Ġred o +Ġadip iscing +_F ETCH +che id +ĠF ang +. indices +t one +д ел +Ġ{{-- < +bra him +Ġsal a +get Code +Ġcommunic ated +start sWith +ert z +Read able +Item Id +oref errer +cred ible +á ria +Ġcombine Reducers +** /ĊĊ +Ġbl iss +Ġad orn +dep ends +ĠRO OM +Ġfr aming +Ġ? ', +aut y +_p ot +_t abs +Ex act +, ", +Ġ'} ';Ċ +Ġarbit r +ahr ain +.getString Extra +Ġ$ \ +Ġoutput Stream +Ġcomm enc +an us +ch y +< Employee +Ġhex atrigesimal +Ġn acional +(serial izers +_put char +_S AFE +ential Action +ItemSelected Listener +.Dis patch +Conf lict +_ about +os aur +Bound ary +Ġclear Color +( Location +ĠMON TH +ĠT aste +- General +ĠW AR +Ġer halten +-s aving +Ġcou pling +-tr igger +m otor +Ġy yyy +ĠPat ent +pt o +Ġmisdemean or +vas ion +ĠAdmir al +à¹ī า +_P WR +Ġdevast ated +fol ios +ITU DE +urre ct +Ġrobot ic +ĠSan ct +ĠHawai ian +.R oute +- condition +Ġr k +/**************************************************************************** Ċ +create Element +ĠK op +ign ant +. rollback +Ġsal ud +_ ', +ĠAN SI +Ex cept +ĠDraw able +.Utc Now +":[ {Ċ +Ġk ole +L ua +ĠBel ieve +Com put +Ġhall uc +ĠSign s +r st +.h u +ĠKN OW +W i +ĠBr ass +ĠR as +@ hotmail +Ġsed iment +Ġap k +Ġì ĥģ +_reg ions +Ġpod ium +< Book +ж е +Ġsix teen +ĠAli as +Ġinfr ared +ĠV ander +ĠLe ading +uc ing +,: ,: +_h or +w at +Ġdé cou +_W idget +S ounds +_n avigation +Ġschn ell +(g enerator +uc ene +Ġrem ake +IP v +Ġré al +_IN CREMENT +Ġhypoth etical +_ ang +Ġof s +Ġ! Ċ +.com pleted +Get Type +Ġkom men +ál ido +add On +Ġz ÅĤ +UL A +_ind icator +'] ĊĊĊ +ap ache +_S elect +ĠGre ene +Wh ats +_an im +Ġrepet itive +m uch +ĠTh reshold +Ġl f +(C ategory +con e +M ix +_MET ADATA +ays ia +Ne ighbors +ĉĊ ĉĉĊ +IP HER +ĠFr ag +ĠC ells +Ġnames paces +( back +ĠRest aurants +sv c +Ġл и +ote ch +-s l +¥ ¿ +ĠW T +ĠRed uction +Ġd otted +ĉf ound +ĠTE AM +B orn +ĠM ush +ĠCompar able +Ġh itch +AT O +Ġmax Height +begin Transaction +ÃŃ v +_b n +Ġher d +Ġrevers al +ĠH ond +del imiter +Ġconf use +Ġh ops +Ġcent roid +Ġcourt room +.decor ators +Ġm pi +ĠImpro ved +IN NER +ĠBang alore +ĠT amb +Ġbo ast +() ))čĊ +Ġil licit +ĠMor occo +greg ator +_res ume +Ġcrack down +Ġport raits +/h igh +( \' +Ġay ud +_fe edback +Ġc ate +/ avatar +Ġhe b +Point Cloud +Ġå ĴĮ +Ġ< ![ +Ġget Resources +} :{ +Oper ating +ĠF og +ĉt ab +ĠResearch ers +Ġfabric ation +.datas ets +ĠCamp o +ĠKa uf +Ġd ll +lig t +] ));ĊĊ +st ellen +ACK ET +l vl +ĠGl ory +.date Time +Ġcomm ute +ĠonCreate ViewHolder +ĠX Element +ĠT okens +< thead +_p ick +ì ¤ +v on +depart ure +(render er +phone Number +(P erson +gen es +ĠL ars +Ġ) {ĊĊ +ĠJson Result +Ġmet odo +VO KE +.get UserId +Acc eler +ĉ required +Ġchampionship s +Build Context +/t ask +/re leases +C ategoria +_over lay +Ġscar ce +_l im +n gr +ah len +ĠArt ificial +sp read +Ġbow ling +.an alysis +SM TP +ĉp assword +Ġbath s +] )){Ċ +current ly +ac iente +_se parator +Ġde ber +ĠDis abled +i ères +Ġâ ķ +_process ing +Ġprotest ing +ĠR OT +gr ab +Ġз ак +Ġpro active +word press +ĠSe ver +ind en +Ġw ikipedia +){ čĊčĊ +_w indows +is lation +Ġun rest +Ġdismiss al +.N UM +_F AST +iss ued +ĠF ACE +_u nder +Ġpl ugged +Ġå ° +ĠbÄĻd zie +ĠI CC +Ġcombust ion +Ġkiss ed +Ġstar red +ĠW atts +Ġspi elen +-p urpose +ĠE val +arg es +, result +techn ology +Ġnational ity +ic us +ĠN ug +ĠÑĤ о +ĉĉĉĉĉĉĉ ĠĠ +col o +Ġg astro +ante ed +OL ID +.b ias +_t ele +.ins pect +Ġve il +. footer +Ġneglig ence +Ġjud gments +Room s +yn n +ĉcount er +occup ation +Ġ çĶŁ +un as +Ġ(^ )( +L ambda +f el +.Param s +Ġд обав +set Layout +Ġdeport ation +Ġlocal Object +ĠPharm aceutical +cept ive +ĠN ome +Equ ipment +F an +Un iversal +ĉ socket +Ġgr in +Ġex poses +Ġhab er +Ġsincer ely +Ġc ams +Ġm ü +en ia +E mer +C rypto +Sl ow +(x hr +! =( +-s ervices +ĠP W +Ġprend re +Ġm ädchen +em ons +озв ÑĢаÑī +.M anager +ì Ļ +Ġg raf +- ra +met rical +/ fl +Ġc emetery +g ens +Ġp ÅĻ +ĠMySql Command +- To +Ġv Ã¥ +Ġa irst +oment um +Ġserv o +m illion +ĠMir anda +" She +Ġadvoc ating +-c aption +ĠAt tribution +Ġwel che +_v endor +ĉ Status +arr is +Ġprint k +"," # +Ġrel ativ +if ferences +izz es +Ġdec imals +ĠPro v +.max imum +Ar n +Ġhelicopt ers +_B OTTOM +ch ure +od ings +' ( +")) );čĊ +( bean +.f d +F und +Ġhang s +app id +/k ernel +.p oi +.Min Value +- validation +L uke +c df +ĠFun eral +ĠS amples +ĉ de +Ġto astr +Ġtax able +Ġcl ustering +Ġ'\ ' +Ġre straint +ec ed +ch ains +ãĢĤ ï¼Ī +_GR APH +Ġfue led +éľ Ģ +H p +å¤ į +T iles +Ġa unque +J C +Ġhost age +ĠE sk +Ġm av +Ġgest ion +Ġb anners +} {$ +.int Value +.' "ĊĊ +_M ATRIX +Ġce ased +ĠG OD +_CAM ERA +.Allow User +tr acked +C ook +b airro +( company +Ġview point +.get Writer +ĠN ets +w ives +Ġ( ))Ċ +example Modal +ĉ child +Ġmyth ology +Ġ// " +_ axes +ib old +.D ark +ĠMax well +Ġg pointer +olic itud +B at +ul ner +bal anced +mail er +Ġcont empor +æīĭ æľº +(" __ +Ġ" )" +re ar +ĠHu ang +] ')Ċ +× © +FT A +ĠCalling Convention +ĠOutput s +P k +.Re ference +lect ual +Ġ) :ĊĊ +Ġbrace let +ug er +ĉ Error +S weet +("/ ");Ċ +h x +Ġun reasonable +Inter preter +Ġlo ft +_product o +Ġsoci etal +.P arser +ĠAd apt +. foo +( where +.F eature +ĠYam aha +g lass +For ge +Ġprohib its +Ġcapac ities +Ġíķ¨ ìĪĺ +Ġper mutation +Ġih m +F ld +el ial +======== ===Ċ +@ Configuration +Ġge ared +ios o +iest a +trans lations +Input Change +Pop ular +ĠPL US +Ġv f +_F ree +b box +Ġcaus al +PI LE +Ġsch ö +Ġiron ic +M ir +. @ +åį Ĺ +Ġè ĩ +R ew +ul ence +fl en +Ġcan Activate +- response +Ġacc ents +ign ored +° F +.Dependency Injection +ĉ point +Ġconting ent +Ġsqu ash +Ġpar ms +ĠC emetery +Ġdelta Time +ĠD OS +Ġvan ished +аÑĢам еÑĤ +ĠD PS +t foot +ĠZ us +_IN STALL +G AN +Ġar b +Ġmunicipal ities +Into Constraints +AutoresizingMask IntoConstraints +, image +_ ignore +Ġdanger ously +quis a +pl uck +Ġhar us +up pe +Http Exception +Br acket +.' 'ĊĊ +ĠT ol +ĠView er +zb ollah +.Code Analysis +ì nh +Ġcorrect amente +.d a +ĠAl ger +× IJ +ba um +ĠPan ther +part icipant +å¿ ħ +-s up +Ġem ulator +Ġf ading +ĠW olver +cre ates +Ġbook ings +.Q uestion +§ è¡Į +Ġstress es +Ġre written +.PI PE +ed es +Ġc bd +": "/ +Ġenh ancements +_s y +B IN +ĠSl ip +Ins pect +ĠW eg +Ġcon gregation +Ġ_ : +_r m +Frame buffer +Ġ'& # +ĠFall out +Is Required +ĠPear son +ĠF ACT +Ġrel ie +ĉ box +ĠShe pherd +ĠWiki Leaks +ĠCollect or +Ġres ized +method Name +Ġevent Type +ĠA then +Des criptors +Ġb ers +- oper +ĠInitial ly +å ¡ +_B TN +ĠĠĠĠĠĠĠĠĠ čĊ +á b +_c ampaign +_w atch +F ord +-date picker +Ġvis c +Ġsat u +_s ms +Ġcont ador +-s vg +ĠDO I +$ args +Ġkn ob +.B OLD +Ġdeb ated +img s +sock opt +tr uth +ĠFe es +Ġh Wnd +_f ood +Ġab ras +Ġnot ions +ĠT od +: create +ĠConf lict +Us uarios +OT OS +Ġm sm +K HTML +([ ( +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠ +Ġ} ] +w izard +Ġm ientras +Ġdata List +Ġemerg es +Äĥ ng +.Read Int +PG A +ILL ISE +I Enumerator +(t uple +Christ mas +Look AndFeel +og enerated +Ġ# ĊĊ +control led +Ġex quisite +Ġa cest +Read Write +G ain +ãĢį ãĢĮ +Ġcopyright ed +Ġdo om +.Table LayoutPanel +ĠD ort +Ġch ili +Ġwer k +ĠEVENT S +ĠBe acon +Ġship ments +Ġse bagai +up on +ut om +.con verter +.Drop Table +={ }Ċ +f ic +~ ĊĊ +Ġlesb ians +_n a +Fore ign +ĉ then +/ ms +Ġor i +get Property +ĉsn printf +hes ion +ãģ ¤ +"} ," +Ġac rylic +P ers +@ Enable +I sl +(C ard +. Stack +L icensed +_G UID +: title +Ġh ust +Ġprincipal Table +an itize +/ embed +Ġens ured +ĠE GL +ÙĪ Ø± +ĠåĪ Ĩ +/ ,Ċ +Ġfundra iser +Key Name +Ġmarch ed +_VAL UES +ĠSc enario +Ġmet ic +_ass oci +ĠPast or +ĉĉĉĉĉĉĉĉ ĉĉĉĉĉĉĉĉĉĉ +er ate +Ġinv itations +quo ise +Ġbl aming +Ġd aring +UM MY +Ġrich er +em aker +ĠIdent ification +ĠìĿ ¸ +ĠBinding Flags +ch as +Ġresil ient +_p g +Ġre leg +ĠI RA +ST E +Ġtr actor +- loading +ĠPre viously +ĠV acc +/ be +Ġn Ã¥r +Ġurl encode +ĠNor folk +.Re lease +ĠNe utral +ä¸Ń åĽ½ +ĠAr lington +Ġalleg es +ĠW riters +Test er +ĠR ally +Ġc á +ĉ Print +Ġâĩ Ĵ +ĠUser Controller +ĠSeek ing +.V AL +List Node +_ ff +ĠPhill ip +FA CT +Ġc aramel +ĠM ultip +ĠCom pared +ĠSer bia +Ł ³ +Ġrev ive +ĠK anye +Ġver ge +ĠBulg aria +get Body +Ġ| > +ce ph +.DateTime Picker +." ;ĊĊ +ĠT ie +, item +Ġm enn +G as +och a +_v irtual +Ġmaster piece +_se quences +L TE +ĠSub mission +Call er +$ \ +S port +ag us +Constraint Maker +Ġcol oc +Ġw ig +ĠÐ £ +ĉ Array +Look s +ĠGT A +.st eps +atch ewan +_r anges +ext Alignment +ĠBren nan +Ġab straction +uler Angles +.m isc +Ġantib odies +Ġexponent ial +ĠCH ANNEL +exp ense +' y +Ġdetect ives +Ġpur ported +Y STEM +Ġradio active +ĠLat ina +.Enc oding +.T AG +x in +D egree +ur acion +pr ices +ĠRefer entialAction +Ġr arity +Ġp iles +g ende +_project s +_g lobals +.start Time +Ġê µ¬ +SE CTION +_p ublish +F ault +DD L +_p rior +M om +Ġth icker +Ġsequ elize +Ġessential s +str as +in tr +>( () +.man agement +e il +éĹ Ń +A ware +.C ity +ĠAr bit +_D M +_key board +L Object +- webpack +ĠNew port +Ġprincipal Column +leg ant +Ġp allet +Ġfract ure +Ġg mail +.M eta +A bove +.Key Event +j it +_mac ro +_P USH +á» © +/ controller +åĬł è½½ +Ġsuperf icial +exter ity +Ġmens agem +W ind +ist on +.open api +и ÑĢов +ĠSerial izer +uct ive +Ġz ar +Pl aces +.St atic +B a +Ġin advert +ĠIndones ian +_IP V +(h orizontal +Ġget Title +ide press +ĠConsole Color +ip ers +$ out +Ġfest ive +Ġeven ings +.Get Data +uit ka +ĠManual s +uss ed +_M ax +.Ch at +ĠA ircraft += com +FO UND +ap ro +Ġtre asures +_al ive +Ġgad get +ek ing +Button Down +B rowsable +.PER MISSION +P ASSWORD +ĠH ASH +f é +\ TestCase +LO SS +o thers +, J +Ġassh ole +wer k +Ġm ã +. ie +ev il +kont akte +//////////////////////////////////////////////////////////////////////////////// Ċ += sys +ĉ lock +-- ;ĊĊ +_F UN +Fill Color +ó a +pre nd +Ġcompress or +M other +ĠAr cher +.g oto +Ġwür de +Ġbam boo +ï¼ İ +ĠT rees +Ġb umper +Ġsa usage +ĠEl asticsearch +Ġhor izontally +ĠG ul +Im mutable +Ġlos er +Ġabort ed +-d emo +ĠH atch +Ġund e +Ġprocess o +-c all +In come +å ĥ +_ returns +']." ' +(s w +C BS +am ilies +ĠYour self +ĠH olt +.M ON +à§ ĩ +ÑĪ Ðµ +an on +ĠFont Awesome +produ cer +j r +Ġm au +ĉint er +Ġdish onest +Ġmagn a +ĠCollect ive +Ġvra iment +Ġcho ix +st ay +Ġweld ing +r ising +, min +ĠF ate +g lob +RGB A +Ġdet te +V en +Ġembarrass ment +.DE LETE +greg ar +-re nder +(b ucket +"> ĊĊĊ +.wait Key +Bus y +Ġdifferent iation +ĠC ST +.Con stant +Ġline Number +(m atches +Ġweb socket +Ġbar red +Ġpued es +M ono +C ORE +I ID +ĠĠĠĠ čĊčĊ +Ġpúb lico +lean ing +Ġcleans ing +Ġcr is +ĠDev ils +_SET TING +unt ary +. );Ċ +Ċ ĠĠĠĊ +[ curr +ts y +ĠAlex is +rit el +Ġpet roleum +.pre processing +m atter +For Result +- license +Ġtrav ellers +ĠDispatch er +enn ifer +Ġdigest ive +P ED +hib ition +MAS ConstraintMaker +ĠW att +Ben ef +.set View +d to +TE E +ĠPel osi +_EX TRA +Ġmed als +x hr +fore cast +Ġn argin +oun s +-f ill +_CUR SOR +Ġsuperv ised +Ġtur f +ĠEd gar +POS ITION +Ġcategory Id +â ī +_ ER +á»§ a +Sh own +. ll +_POL ICY +(), ' +ĠPre v +ĠString Field +ĉG lobal +ass ed +Through out +o stringstream +.awt extra +Ġslo pes +ĠSe quential +Ġgi orn +Ġz elf +Ġvers atility +lene ck +.c gi +Ġdou bling +ĠBang kok +Ġbu urt +Ġusu ário +st udio +Ġje unes +Ġm uted +Ġ ips +_f raction +&& ( +Ġst unt +'); ?>čĊ +Ġev apor +b able +ĠPR ICE +Ġæ ³ +lu cent +Ġv amp +ĠTechn ician +Ġuniqu eness +M es +ur ban +.param etrize +ĠRe play +S essions +em br +-Americ ans +_PRO XY +Ġp ian +Ġtri e +ĠD estructor +Game State +ĠIM F +ch in +Ġport e +ĠSw al +åŁ İ +Sub string +im ing +/L ibrary +Ġfright ened +w rites +Ġrecurs os +ar Result +_INIT IALIZ +ĠBad ge +_c rc +E ight +ĠDIST INCT +Ġth ro +@ Xml +ĠLegend ary +-t witter +_e asy +Ġ+ ++ +(D ATA +.L ocale +Ġk ä +Ġn urt +Ġcr uis +_ ios +Ġsens ing +_L ine +Ċ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ +pon g +ole on +Ġwild card +ç͍æĪ· åIJį +Ġbeg ging +R od +ĠÃ İ +_C ELL +Research ers +. selector +_ ing +Ġaspir ing +Ġimm ortal +Ġy min +_ robot +Ġpl ur +B TC +ĠD ID +Ġpier cing +* u +_DEFIN ED +ĠTh i +ita ire +(m edia +- ons +Ġche fs +Ġ"* . +/ AP +Ġraz or +Ġsearch Data +Ġ= & +Ġ ãĢĤ +Ġm ourn +ting ham +Ġo li +ĠVern on +_R S +ŀ æĢ§ +Ġf ácil +ang en +cel ain +Ġa il +le st +ĠQ COMPARE +g ain +ĠÎ µ +ĠK ob +ĠF ault +_config s +ç»ĵ æŀľ +. + +cal ar +(color s +M ul +_ ART +Ġexperiment ing +erm en +ĠAng lo +.Fixed Single +Se a +Ġc txt +.s lider +C ollapse +G rey +Ġf ld +-pro of +.cap acity +get Parent +ĠCom pliance +Ġburg l +- rec +Ġover written +M U +Ġrout ers +ĉ Model +Ġfantas ies +av ian +_p rec +ĠSc andin +Ġ// < +/o ct +Ġceremon ies +Month s +und y +Ġqu ed +ĠN ou +ĠV ibr +.r gb +Ġcit rus +Ġbr aces +-upper case +get Table +Ġdop o +ĠK err +_CH ILD +- cloud +ĉ Matrix +Ġgard ening +S ing +al most +Require ments +ugu ay +( Property +sub scriber +FA ST +re action +(l p +) })Ċ +` ). +.w allet +_ex change +.Max imum +ĠVer b +âĶ ģ +() < +ï¼Ľ Ċ +RO T +C ARD +ub it +{ @ +_k el +ĠTool tip +My SQL +Main Activity +ar f +Ġm align +Ġse inen +ap ist +Ġ< % +Method Impl +M il +ĠM ick +.de pend +< ID +Ġpredict ive +ĠAP PLICATION +le f +dim ensions +Ġconoc er +/ conf +ĠTr acy +F oto +_rem aining += file +Ġpage Index +ĠPar ish +Ġt exas +ĠM AGIC +ĠH ew +d ifference +Ġalt ura +c um +ĉdata Type +Ġcaracter es +avi ours +ĠV OID +è¿ ij +P UBLIC +B io +ĠstringBy Appending +Parse Exception +ĠS uff +ĠN orton +/d etails +.n ull +>> & +ĉ ok +-l ow +. usuario +n ested +X B +OUR S +.Border Color +Ġb row +ĠÐ ķ +cor r +ĠRed skins +.get Tag +.get Transaction +Ġst igma +hard t +ĠPlayer Prefs +als y +uc son +L anguages +ĠOl ivia +Ġt ac +Ġb li +Ġc aval +Ġconsolid ated +Ġper il +Ġde le +Ġform ulated +Ġhigh ways +.sp awn +== $ +ĠN iet +Ġv eggies +yp o +-r ule +ĠV ie +/e pl +Ġenf ants +string Literal +Ġtou ghest +buy er +Ġcov ariance +Ġil i +ĠSoph ie +ĠB AB +Ġ" ), +ĠU k +current Index +_user data +.code c +ĠPun jab +ĠSN P +l ol +adv ance +Ġcom fy +Json Ignore +Ġfashion able +ĠI CON +Ġor a +ĠP ricing +< num +ĠI RC +ER V +ĠMe in +ĠID ictionary +AD OW +is New +ĠDev on +at l +(request Code +ĉ PreparedStatement +IM PORT +Ġmar ital +_SELECT ED +get Response +ar Down +B V +ib Name +ĠP ATCH +ä än +Ġda ar +ĠFile Mode +Ġm arty +.Spring Application +c ene +amp oline +get Size +Rest art +æķ Ī +.project s +ĠEthi opia +Ġstatus es +T ION +(b g +ĠX unit +Temp orary +ĠEng agement +Ġx f +Ġprox ies +Ġgen esis +Pager Adapter +ĠSl ave +Ġsung lasses +ĠCh loe +Ġko ji +ad em +ĉ JSONObject +Î ³ +Ġh ors +* w +ó r +es ch +Ġcritic ised +z ial +ĠSale m +.Vert ical +ĠR ash +> E +ter ing +/s creens +Ġheight ened +аÑĢ ÑĤ +Author ities +_b box +ün st +.font Size +ĠBO OLEAN +div ide +ĠSlo ven +uc er +Ù Ĵ +st ub +Ġnavig ating +: animated +_N OW +_v ect +} {Ċ +@ ( +Ġtele com +Ġcontract ing +ĠAss ange +Ġextract ing +Ġgr ö +c obra +.D IS +Ġcr ab +Ġtw itch +Ġvert s +Ġreject s +ĉ format +Ġreg eneration +.S ys +s olve +ĉd ialog +sh i +m eter +(b est +valid ators +Ġon wards +Ġg uru +Ġmoder ator +ow ied +ex periment +r ub +Ġm qtt +ĠCa ucas +Ġnational ism +Ġm ange +ĉ ImGui +/ Edit +Ġin h +Ġint ellig +ero kee +ĉ export +Ġdiscrim inate +sub tract +ĠM oodle +ens er +ĠGuid es +R AP +-h ot +_gr p +.p icture +X A +Ġinit View +_Com m +Ġoverd ose +Ġ+ ĊĊ +ĠSil ent +show s +Ġinterpol ate +Form ation +Ġb isc +mark ets +( SC +Z e +ĠNetwork ing +Ġad renal +ĠG uns +ete or +Decl ared +orget own +Ġk arena +/ password +_address es +ITER AL +B uzz +ĠCon way +(c ase +P WD +he iro +( act +** čĊ +());ĊĊ Ċ +Ġan v +Ġ. .ĊĊ +(Menu Item +(m ail +_section s +ĉ net +Ġpl ut +Ġw rench +/ object +ĠI st +ĠV IS +/p ub +al ten +Ġguit ars +Ġantibiot ic +ï¼ ĸ + ¹ +Ġ" +" +form ula +Ġbab es +ĠP rompt +Ġen im +/ player +ĉ ref +Ġby Äĩ +Ġconsum es +ĠH ast +ĠT ao +Ġ' ))Ċ +Ġcl am +Ġthigh s +Ġmot if +Api Operation +ĠW L +get C +ĉf lags +oint ments +Ġeconom ical +need le +x ls +pr actice +ut zer +time ofday +- output +Ġfind ById +ĠBudd y +Ðŀ ÑĤ +Se ven +ĠB ark +Ġenv oy +_al gorithm +åĪ © +Ġball istic +ç§ » +r ades +ĉd oc +rodu cing +ĠE ating +Un mount +/data Tables +_b onus +Ġl itt +pp s +) localObject +per f +ĠHel vetica +sh utdown +/ ml +.t okens +ĠHard core +, row +/b g +Sc aler +âĢĶ as +_log its +âĢĻ int +ĉ App +Imp licit +.F printf +ET O +Ġterr a +Ġpossess ing +.r strip +, ), += yes +ĠStr ipe +? = +ne utral +.g ood +Ġk ennen +ĠS ung +f ault +ystate change +Can adian +',' ".$ +ĠM its +æ nd +ĠSTR UCT +ĠURL WithString +ĠCom pass +Ġ-- ĊĊ +ĠNS LayoutConstraint +| min +-ad just +Ġreb uilt +L IGHT +/ se +-m ount +vp n +valid ated +(Q Object +Ġign ition +ĠCharg ers +RYPT O +]initWith Frame +ĠFl uid +Ġcad re +Ġnomin ations +Ne ill +ĠH ou +Ġcurrent s +_g ene +(in p +Par is +z ÄĻ +ag gregate +Ġass oc +weet ed +err at +âĢĵ ĊĊ +Ġ'/ ',Ċ +fix ture +ĠH ighest +amb ient +Ġch mod +Ġcon te +Ġsens ual +Ġgar ment +z ers +ĠPower ed +dom ains +R eward +i omanip +Ġcock pit +out file +Ġbuilt in +Ġins isting +. vars +zip code +Ġ ���� +f ails +Ġconsolid ation +_ oid +Plan et +Ġ= ", +ĉ el +UIL T +ät z +af ari +ĠMc Cl +Tim eline +Est a +Ġfr am +Y E +Ġcere bral +Of Month +ĠP regn +Ġкл аÑģÑģ +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ +ĠF res +Appro ved +.S pecial +ĠProtest ant +Ġallerg y +_p cm +ĉC opyright +Ġsuper Class +" strconv +ĠMoh amed +Ġ' // +Fore Color +Ar thur +ĠJ ungle +Ġve ins +S ad +Ġback ups +ĠOp inion +û t +Ġinter mitt +ody n +ĠChrist ina +Ġand re +Ġevac uation +pa lette +h orse +ĠRes ident +ĠHass an +.N il +Ġa isle +ĠG rowing +Ġblog info +/s ql +_io ctl +Sc aling +ĠMon ad +_c pp +ĠH utch +ĠApple WebKit +Exp ense +_J OB +Ġpoint less +From Body +ant al +Ġdepict ing +ĠC ELL +Ġref in +ĠC NC +ì¹ ĺ +_dim ensions +ĠS AN +Ġa ft +Ġfoot steps +cc oli +_PH ONE +/m ath +-k ind +ĠMe ans +ich ael +.g una +Ġinaug uration +-dr iving +( delete +Ġtotal Count +_M C +.Ext ension +Com mercial +Ġz Index +< Customer +" g +-sh are +Ġp act +ag ara +ĠS IL +_m odes +ĠM olecular +Ġsystem atically +< G +_s cr +ĠO ro +as ers +Ġb ic +Ġdest roys +PI PE +.Start Position +Ġc á»§a +ire z +.B unifu +_F unction +Ġs ü +_f uture +ĠWe alth +ĠNatur ally +æĢ » +_y es +Ġabrupt ly +String Encoding +ĠCGPoint Make +Ġz h +Ġimp erson +Ġpiv otal +ĠSom alia +Ġsegment ation +_AN AL +ĠLogin Component +Cons ult +Ġtr uncated +] ";Ċ +.get Config +Ġintern ship +B aby +ê° ľ +Ġstrengthen ed +_M I +b asket +Ġnicht s +ĠTV s +ĠSh an +ãĤ µ +rac use +.Re LU +/ interfaces +ĠgetItem Count +Ġret iring +Ġspecial s +Ġentity Manager +bel ief +Ġs older +da ughter +ij kl +Ġutil izes +.f ixed +S U +Ġdr astic +Ġh acks +gr und +ĠM U +ĠSt arter +.Com ponents +_m otor +Gold en +Ġl odge +Ġ )); +ĠCor inth +иÑĩ еÑģÑĤво +ón ico +gre SQL +ĠFl uent +Ġmar c +.Load Scene +.Group s +Ġer h +ĠAut umn +St opped +Ġitalian o +Ġmin ions +ĠAssert ions +Ġm ux +B u +Ġ---------------------------------------------------------------- -------------------------------- +ĉ up +read ystatechange +_M eta +Ġcurrent Date +ĠChap man +Und o +Se an +ap r +Ġpar m +_ icons +ĠSt a +á z +Ġsub division +Ġalter ing +P NG +ponent ial +Ġpost gres +ĠB DS +-ex istent +ĠBrad ford +ĠO MX +_W HITE +_PRO GRAM +q c +Ġtypings Slinky +ĠP ics +_M ETA +IT TER +_sub scription +IRON MENT +ĠHy undai +();ĊĊ ĊĊ +ĠØ ³ +Ġj ac +Ġelimin ates +) });Ċ +Ġcomp rend +ĉ insert +_f aces +"> $ +Ġeb ay +Ġcapt ive +pl iant +ĠCalcul ates +ol ta +est ing +_re vision +Ġm ús ++ m +"," "," +WH AT +Ġcompassion ate +h arga +[ random +Ġmod ulo +(s n +Ġoccup ations +//// Ċ +ĉ board +ĠB alk +wi Äħ +ĠW ifi +.Pro file +:m aj +ĉm at +LOCK S +(j Button +Ġ(' $ +M ur +æĮ ī +b ble +Ġf rog +-h ide +Ġbroad caster +ภŀ +ha led +Ġam using +_predict ions +_in tr +Ġe agle +аÑĤ елÑĮ +Ġget List +ps ilon +Ġcharacter ization +AR DS +Ġre location +Ġr ulers +P AY +ĠDef initely +_A ction +Ġclos ures +Ġfact ual +odyn amic +Ġpreca utions +nie j +ĠPart ies +ĠSub aru +Ġcous ins +ar beit +.m oney +gun ta +( and +get item +.Style Priority +Ġsl id +single ton +Ġg arn +ĠP AS +Ġd azz +a ż +Ġbog us +ĠM og +Ġrival ry +is ol +Ġland marks +ñ as +B ern +ĠSach s +Ġ" )ĊĊ +Ġhost ility +_m ex +m ere +M ot +p ictureBox +Def ense +Ġaffid avit +other wise +.d irectory +_ UnityEngine +-b log +.s kin +ph em +Ap ellido +er chant +[ class +Ġw art +." [ +ale ur +/ back +ĠĠĠĠ ĉĠĠĠ +Ġprecip itation +Ġob struction +Ġp Obj +Ġr upt +UCK ET +ay e +æİ Ĵ +g x +Ġe cl +Ġsecre cy +/ Header +ĠLes b +Ġle i +ĠBullet in +Ġgive away +.H ome +_RO OM +" W +Ġcow ork +_ ra +ĠC ycling +ĠP aw +Ġpup il +/ arch +ĠFile Utils +é¦ ĸ +r sp +Ġfreed oms +ĠL ear +}` ). +Ġbow ls +/b lock +_log ging +Ġmeth ane +Ġhorn s +Ġwonder fully +Ġalter ations +Ġex ile +ls en +_p ause +_L ANGUAGE +ĠUS DA +_m ysql +_AM OUNT +ĠL IFE +Ġyoung sters +Ġri ots +[ E +Ġun forgettable +, },Ċ +Dis posed +ĠAss assin +UN G +ĠNew sp +User Service +: aload ++ ', +Ġsett lers +Ġscre ams +Ġincon venience +.R otate +Ġj ars +ĠP uzzle +Ġm est +ars i +ĠSh arma +| ( +.d s +ĠSac red +_e vt +Ġexpress es +Ġh och +ĠD uch +.c alls +th r +ĠShe ffield +.Alert Dialog +Ġrad ically +Ġtr ous +Ġprev ailing +ĠWW II +âĢĻ n +ens ely +ĠY esterday +ĠSir ius +Ġkill ers +ĠF FT +Ġo val +') :čĊ +Ġìłķ ë³´ +our age +ĠCheck box +Work book +.def er +_f loor +Ġc ouncill +Ġnors ke +mo il +ore a +Ġmarket ed +_S UR +x AA +Ġst ained +e ut +ĠM eng +Ġi eee +. extern +eg ie +Ġr app +ĠPy ongyang +' class +M ob +Ġinitial Value +_w ave +Ġj ab +Ġmascul ine +Ġampl ifier +Ġt ty +Path Component +_ xt +ĠG FP +/ sec +ĉdis patch +mark down +ĠS chn +bo le +· · +mouse move +Ġerr Msg +Ġas ign +_m ono +To Selector +ĠZ u +(R ect +ĠError Code +lat in +ang ible +v tk +CG Size +P okemon +Ġclass mates +Ġattract s +ĠT atto +ult an +ol óg +Ġhalt ed +ठ¨ +ĠK art +Ġ ue +_Init Structure +Test Class +ĠAir bnb +_ ", +Ġchar coal +Ġip c +ĠSt retch +.g lide +lates AutoresizingMaskIntoConstraints +Ġpot ion +ITT LE +Ġcount ert +_h d +pre pared +Ad s +ĠV ampire +rob ots +.Create Index +Status Label +Ġt ucked +af ür +U t +Ġswe ater +_F N +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĉ +ata ka +Ġeyeb rows +ac oes +ud en +.LinearLayout Manager +Ġsw ay +Ġmult in +() )))Ċ +ĠNS UInteger +ĠMy Base +Part ner +uts chen +ĠC ater +.setBackground Color +Ġaccompl ishment +_pro blem +.d td +Ġpage Number +Ġj ackets +Ġcro pped +u els +ĠH ep +Ġc apped +* Math +_callback s +Ġpub b +ĠBrun swick +.res pond +[" _ +Ġbed ding +hyth m +O X +(s peed +Ġpestic ides +Ġ---- --- +.Bl ue +Ġnood les +ĠGo es +Ġs aver +o xy +_com pletion +ĠSw inger +Ġget Date +Ġmind ed +int egration +ĠLot us +(st op +(', ');Ċ +Ġflood s +ĠWork flow +Ġerupt ed +Mac ro +ĠSau ce +Ġevent Name +\ Input +Break ing +ĉ when +_p w +IND ER +ĠWell ness +Ġvox el +ĠM ell +ĠM EDIA +SE NS +ĠFund s +ĠM ild +< Array +- this +ump ed +/f w +ĠDb Context +W I +girl s +H OW +'); ?>Ċ +Ġtempt ing +Ġtest ament +Ġb ible +Ġconsult ed +ĠIndex Error +è¨ ĺ +Ġkey pad +izz o +( ok +Ġwhats app +ĠRemote Exception +Ġteam ed +âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ +» , +Ġget Time +di ag +iss y +Ġh ed +Ġkn ots +j om +Ġfun nel +-m ails +Ġexport ing +ĠV L +ĠK arn +ĠBuddh ism +ĠAll an +_R ADIUS +Ġw ording +ĠFor get +ĠCor ona +ip hy +Ġlim burg +ugg y +ĠUser Repository +im in +(e le +Ġlabel led +ç¤ ¾ +ĠH erman +.q q +Ġ" ));Ċ +ie ber +.Trans late +ry n +Ġdes env +um d +Sim ply +ĉm ode +R pc +ĠVal encia +Ġstaff ers +Ġsel v +ĠSpi ke +Ġdel ic +Ġer u +_D T +J udge +á» ķ +ĠBas in +.m utable +" url +Ġtar iff +ĠSlee ve +Ġfl are +.drop out +Ġbr ides +)) ,čĊ +_con straints +de struct +Out line +Ġdisappe ars +_lock ed +ĠNS LocalizedString +ck e +ĉ null +ad resse +Ġto pping +ĠJ oker +b ishop +но ÑģÑĤÑĮ +and ering +_ amp += time +_S pace +_P ULL +' = +Ġant iqu +Ġc ach +___ ĊĊ +ON ES +о Ñı +Ġun read +.p olicy +oooo oooo +ëŁ ¬ +Ġu sted +ĠRe ce +Ġal lem +ãĥ¼ ãĤ¹ +ĠThought s +ve illance +istr ate +_l ane +Ġfam ed +.Get Name +Ġsmo other +ĠQual ified +az ers +_ geo +F ax +ĠM inds +ĠR aises +Ġtrans cripts +Con versation +Ġremark ed +ëĤ ĺ +d ling +Ġdeploy ing +Ġshared Application +Ġk p +FontAwesome Icon +_d ummy +reib en +ĠJane iro +Direction s +.get Bean +s ass +Ġcommand ers +v ation +error Code +ĠAl loy +.local ized +Ð ij +Ġdish washer +ĠSou p +N u +_D efault +Ġune ven +Ġ/> ";Ċ +-B ased +Ġseam lessly +- null +ĠX C +Ġst ew +(d elay +AT ORS +ĠWhe eler +" H +e ast +. air +âĢľ But +Object Context +success fully +_l and +Ġfold s +_CO ORD +Ġsub po +.get Address +in str +Material s +Ñĥ ÑģÑĤ +de posit +-l ast +_GR AY += find +Ġmut ant +Ġlesb ienne +let cher +RO UGH +ure ka +.c apture +Ġen n +Ġ([ [ +ĠFl u +Ġtask Id +ĠHus sein +.f older +Ġa usterity +ISTR ATION +_ Impl +注 æĦı +Ġdec ree +- chat +Ġimp lication +Ġguess es +ul kan +An alytics +. plus +COM MAND +е ли +» ĊĊ +_S ITE +Ġequal To +Support FragmentManager +ĠRec ording +å®Į æĪIJ +Ġbag gage +Ġpitch ers +ĠE h +o que +ĉc nt +Ġ=> $ +/ foo +IR A +ĠSat ellite +bor ah +Ġ}} "Ċ +ĠEnd s +ĠSpr ay +, param +.Ch rome +* q +th ought +ibr ated +Ġth ieves +Ġbenefici aries +Enter ed +ottes ville +Ġveter in +By ID +qu ipe +um ption +- unit +Execution Context +@ s +ĠG iov +.Tool Tip +_f riend +( attributes +Ġdump ing +ĠJ C +_D OCUMENT +ĠArm our +( insert +.Horizontal Alignment +ĠQ ed +ãģĦ ãģ¾ãģĻ +/g it +ĠY YYY +ĠCard iff +Ġap a +organ ic +ĠWhere as +Ġæ Ŀ +ĠM ia +Ġdemol ition +Ġsc ars +Ġp ai +Ġre tries +Ġr q +ĠDen is +( Utils +Ġallev iate +ĠP IC +id ue +Ġacknowled ging +Ġ// //////////////////////////////// +ç¡® å®ļ +Ä « +\ Json +.b inary +Ġx type +sign als +ĠAp pearance +& r +} s +C i +ĠI llum +por ate +h og +Ġindex Of +\ Command +_par allel +ĠSher lock +í ĥ +Ġ" ")čĊ +//////////////////////////////////////////////////////////////// //////////////////////////////// +Ġcritic ize +ĠSo ap +ĠMatch er +Ġgr illed +* T +Ġad ore +ull ing +Ġjed och +_ref s +lean up +ĠJ AXB +Ġro ses +ĠL iam +size i +Ġget char +Ġtar de +-to oltip +Ġqual ifier +ĠInter mediate +_W indow +ĠMal ta +Dis connect +ew here +Camp o +Ġirr ational +led o +ĠD N +ARG V +Ġout ro +Ġth irteen +Jose ph +M AR +/g l +J ess +ĠPsych iat +Ġpadding Bottom +- loop +/ fonts +_se en +Te ams +React DOM +(m an +(x path +.get SimpleName +>( * +ĠP vt +Ġel ders +Ġp ies +.user Agent +- region +ĠGree ks +(f ragment +st u +Ġcouncil s +Ġst amina +ĠGod dess +è ¥¿ +Ġphilosoph ers +Ġpers one +ĠL ose +ĠCL R +ĠD ocs +Ġso ak +ĠHOLD ER +Ġb ells +hash Code +R ATE +_WE IGHT +in ous +end ra +oph obic +Ġpro se +Ġfin ely +/o auth +(s pace +ad ge +ĠM ama +Ġstring Buffer +Ġst int +Ġmis ma +Ġvill ains +ĠCrime a +Ġdipl oma +Ġпо Ñģл +ĠBe a +(j oin +Ġíķ ´ +CH AT +per ing +ĠC ros +Ġmon keys +Ġpred s +yl a +,, , +Ġvibr ator +ĠN U +åħ Ī +f ant +z et +Ġb ietet +un ft +sw orth +.F low +Ġpsy ched +ĠContin ental +> t +Ġqu ilt +. UP +Ġexpans ive +Dis pose +(l anguage +C aps +_Z ONE +Ġrec ycle +ĠMan aged +current Color +.b roadcast +sign In +.p rom +ll u +ue blo +Ġpunch es +Ġautom at +Ġassign ing +Ġcreate User +ĠAll ied +Ġconduct or +Ĥ ¨ +Ġs addle +Ġd ni +omed ical +-W est +Positive Button +Ġit alic +? [ +(tr igger +Ġele phants +":" "," +Ġcal iber +raft ed +d igits +Ġmar shal +mill iseconds +mark ers +m om +/ place +Ġhol istic +: t +# , +Ġb oto +Ġnause a +ĠSh ooting +ite ch +Ġtext Status +< Class +ĠDes cribe +Ġbuff et +g il +Ġlog its +std call +mod s +ĠSk ull +ĠB are +h ope +ĠIn tr +F air +ĉ pt +Ġacompan h +Ġf kk +_r pc +Inst alled +_ ans +.get Minutes +â̦ "ĊĊ +- thread +Ġpres chool +AIL S +Ġdiff ic +( convert +ĠN ath +ĠDO J +Ġreg imes +Ġenthusi ast +Ġwarrant ies +Ġfasc inated +_b inding +_N ot +oft en +_R W +/m ail +Ġtitle Label +Ġvill agers +ĠJ iang +Ġsw agger +.Row Index +_img s +rap y +VER AGE +. Up +Ġno op +c io +ĉ ST +Ġdecre ment +Ġmagn esium +_ rotate +S it +Ġnieu we +Ġter med +íķ ©ëĭĪëĭ¤ +Ġur g +_t ouch +Ġsw arm +Ġcl ave +th est +ĠL af +H X +ĠH ulk +Ġplaint ext +ĠSof a +get Session +L ed +Ġecosystem s +he i +ĠK ills +Ġhus bands +Ñħ ÑĢан +(d om +_t iles +Nib Name +Ġdon ating +. acc +Ġlifes pan +.b n +_RG CTX +æ ¥ +ans en +Ġmod elling +Layout Params +ĠonChange Text +rs a +- location +.P e +(b us +(s ong +Ġprodu k +ĠSH OULD +ĠC J +Ġs os +ĠHome Controller +.load ed +(D ocument +.s ocial +t iles +Ġl ame += df +.parse Long +Ġpr ac +Ġdet ox +ĠV E +Ġpunt os +Ġdo ctr +Ġan cor +CA PE +Ġc mb +çĦ ¶ +*) " +:// / +Value Type +Ġmort gages +; q +ĠRock ets +s port +UG C +ct s +ãĤ ģ +ie ur +ĠAppe al +(n b +//////////////////////////////////////////////// //////// +IM ATION +ĠC res +ĠMan ip +C ause +at ypes +man ufacturer +# ---------------------------------------------------------------------------- +Ġsp or +es on +Ġpun ched +Ġbook marks +ĠBul k +Complete Listener +ĠTalk ing +ĠEr nest +Ġrub bish +k ills +ĠDE FIN +Ġneighbour ing +ar lo +ĠP CA +ĉm atrix +lo k +Ġat las +ĠG ur +Ġw yn +-n egative +Ġt ul +Ġre lic +ĠV oltage +ĠPre is +ĠJ NICALL +ĠPM ID +ak et +ĉ attr +Ġet iqu +ĠM J +ĠG mail +cl r +_exec ution +éĶ ® +pos itor +. af +N r +Ge orgia +Top ology +Ġperch é +Ġmus lim +Ġepid emi +Ġsab ot +act us +Ġë ĮĢ +ĠIO Error +. est +p refs +ĠKr ish +.Read Key +NAS A +u ção +_D b +umer ator +W ide +(st atement +.end point +.... ..... +Ġ[ * +stream s +m time +P x +at r +Ġt pl +R oman +Ġscen ic +.n z +ĠSe conds +sub menu +Ġìĭ ¤í +_b undle +Ġde ÄŁ +ĠS isters +pre ferences +Ġport a +Ad visor +max Length +ĠG REAT +__ (Ċ +ole st +ĠLabel s +Ġen fer +ĠĠĠĠĠĠ ĊĊ +ĠThe ft +_F ILL +ĠW ise +) application +un ami +> ())Ċ +ADD RESS +B ST +et zt +ĠQ gs +S ense +Exception Handler +ĠCh u +.get OwnProperty +Ġexerc ised +iot ic +ĠRe leases +Ġp interest +ol ie +is oft +Ġsequ encing +Ġpad re +] ));čĊ +(r adius +.m ed +aint ies +.Object Model +Ġem ple +Ġseg uro +St ars +Ġqual itative +lem n +á» ± +> "). +Ġg x +-c ert +ĠAST M +Ġfull name +Ġte lemetry +ĠCamb odia +_ ul +ĠCl are +C USTOM +Q C +ĠUn s +ĠHTTP S +ĠPark inson +ancy box +',' . +T ue +.get Last +Ġab i +Äħ d +A st +ĠEd iting +.Un ity +j mp +Ġm ats +Ġshared Preferences +Capt ain +.page Size +Ġr tl +Ġan meld +Runtime Object +Ġdemand e +(" ; +se ite +-head ed +ĠK ra +ĠF ONT +` \ +Class NotFoundException +. avg +atic al +A j +Ġpermit ting +Pro j +ERR Q +Ġcre ampie +ĠBuy er +-mod ules +ĠSund ays +| `Ċ +Ġday time +Ġ+ ( +Ġgl itch +ĠOper and +Ġtox ins +iny a +D NS +ĠS as +C ake +ĠNation als +.add To +Ġs inking +Ġcompreh ension +Ġsc or +ag ements +Ġt ard +Ġmarch ing +ĠM TV +Ġs ane +Create Info +Ạ¯ +Ġend Index +ĉ layout +ĠåIJ į +S ITE +ĠT HERE +Ġ[ {' +opath ic +Ġtrans mitter +/ body +Ġp und +ĠC losing +Ġset attr +Ġbound ed +At las +sum ing +(t imes +par er +yn om +fe it +Ġf rem +- leg +ĠBr as +> # +Ġì¶ ľëł¥ +ĠIN STANCE +ĠC ouch +_host s +lik elihood +.M arker +ĠM asks +Ġcere al +util ities +Ġelement al +Ġdist orted +in active +c ry +W L +UPPORT ED +.Th rows +/s chema +ser ie +." ', +ĠBened ict +-p icker +ig gs +ĠPir ate +åij¨ æľŁ +ĠTh ema +ĠSouth ampton +Ġarray With +ĠPaul a +Ġpredict or +- Ass +.user id +Ġper i +Ġexagger ated +ur ate +arse ille +ĠCon cent +ĠP ik +Ġ@ _;ĊĊ +Ġform ations +Ġden omin +"/> .Ċ +ended or +Ġpan cre +Ġam t +Ġon Resume +on Delete +ĠB CH +) (" +m ovement +Ġpot assium + čĊčĊ +ĠMah m +} ";ĊĊ +Ġd q +ĠPublish ers +ĠAm pl +ĠDani elle +Ġt ern +èµ · +no ÅĽÄĩ +e in +ĠAsync Storage +un ger +rou w +Ġsc issors +/ assert +.b ucket +/ archive +_M an +Ġint oler +Ġ() => +ĠÐĴ Ñĭ +Ġsa i +.x y +." čĊ +Ġur inary +es ub +IST ICS +ĠÎ º +Ġcompl iments +Ġtypings Japgolly +ih ar +Exp ansion +ĠS erving +_st udents +ĠX BOOLE +( il +Ġì² ĺ +Ġj ó +(t ol +( JS +ĉC G +ĠD RAW +tw ig +Ġo at +_sm ooth +ĠC SL +Ġos ob +Ġens uing +Ġbank er +ĠBack pack +_p ing +Ġwish list += ax +ĉĠĠĠ Ċ +Dis ney +stead y +"> % +Ġproph ets +ĠZ X +Ġminimal ist +.PL AIN +Se attle +. ordinal +ĠPI PE +Ġret orna +Ġjug ador +ĠB ret +ĠâĶ ľ +Ġpl ush +UL ATOR +Sort ing +.grid y +ect omy +_ activ +r ack +Inter active +ĠAntar ctica +Ġv engeance +en so +_k nown +up plier +.Mod ules +ĠConnection State +éļ IJèĹı +@ FindBy +Ġpl acer +\ model +< ()> +.is Successful +-g ood +b z +ĠDr aco +Ass istant +-ex tra +аб лиÑĨ +Ġhyp ocrisy +Ġt st +ĠA gr +$ txt +Ġlog istic +lic ensed +ĠH of +Ġt at +( iv +Ġinto xic +post Id +_st rike +Ġhum iliation +pc odes +" sync +(rec ipe ++ N +rent e +ĉ Client +ycop g +ĠZur ich +ĠPro files +C ountries +Ġp ict +Ġroll out +requ encies +Ġpatch ed +Ġcar tridges +Ġsh ading +J ar +Ġsalv age +ĠTax es +Ġstand by +apor an +E igen +. angular +ĠN ested +äº « +Ġis Visible +ĠDw ight +_BR ANCH +.D elay +Ġk end +Ġfacilit ated +.flat Map +Ġs anta +ĉS end +/m essages +Ġof Type +ĉs wap +# plt +ĠTur ks +N ES +Ġprogress ively +ĠRes idence +ĠT REE +Ġno en +d io +Ġn elle +Ġsog ar +itt i +week ly +Ġambigu ity +_Set tings +W are +.ne o +_D ST +Ġæĸ ¹ +pre p +lob by +@ email +/m ovie +Ġfun kc +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ Ċ +ÂŃ s +Ġguard ians +- pos +Ġconfig uring +ĠC PS +ĠDe us +Ġvidé os +_ empresa +Ġsl apped +< Model +Ġunders cores +U h +.access Token +SET S +ĠS parse +ĠCal d +: path +ĠS ervers += batch +Ġkn itting +Ġx a +Ġsearch Bar +Ġsn ag +Ġinf used +.b am +le ver +Ġtax onomy +Ã İ +Ġatt aching +Ġh ern +_N OP +Click able +(P arse +ĠDynam o +-b uilder +Ġdere g +Ġsc attering +è¿Ľ è¡Į +an zi +ĠShe pard +"> ',Ċ +_X DECREF +ĠBuzz Feed +_M ARGIN +P LOY +.sm all +Ġm imeType +Ġh olog +ĉc amera +li as +Ġsusp ense +ody nam +b au +Ġgrave yard +_n amed +":" ' +Ġ******************************** **************** +Ġgame Over +ĠLENG TH +ĉs creen +Ġdo InBackground +_depend encies +Ġr tc +/ up +_ ROM +H all +Ġdef iciencies +( te +' # +_e quiv +Ġpre order +ĠA xe +ом Ñĥ +.send File +Ġfil t +ĠLim its +ĠCaval iers +.dis count +âĨ IJ +ĠW it +QRST UV +Ġi j +Ġt egen +Ġ: ", +diff iculty +p unkt +ĠEmail s +ch lor +(f un +.U int +ĠSt all +_ verified +u D +File Type +Ġple asures +Ġjud iciary +Ġsh am +ip ur +_PL US +off ers +( foo +_G T +ĉc ore +ENT ION +ĠLib eration +Command Line +_de partment +.A r +_ne ighbor +ĠSub mitted +ĠĊ +Ġdro its +Ġhomosexual s +Ġab duction +ĉw idget +$ headers +ĠD AR +Ġfl a +th reat +Ġlou is +.Get Property +" Just +(f rames +ry o +prof ession +| i +íķ´ ìĦľ +(s v +Ġun recognized +I onic +F ashion +Screen State +ĠIn coming +Not Nil +Ġsync ing +em ie +Ġtherm o +_pro cs +Ġincons istency +rel igious +.m j +Ġperson n +Ġmoment os +or arily +Ġæ Ĭ +_ne urons +Ill ustr +im oto +il ik +ĠW oj +Tr ading +Ġapp are +Ġentre prises +ach at +Ġ ¬ +Ġne igh +BUTTON DOWN +ĠMah er +ag han +-h ash +" f +Ġclient ele +.add Button +ĉ SP +Q i +Ġgr ated +POS ITE +: > +ĠHow ell +ĠCompar ative +ĠIS C +ÂŃ i +O cean +D avis +ĠFil me +W ins +ĠJ IT +oc cer +ĠC orm +ENCH MARK +rch ive +ica ção +Ġm ata +Ġchild birth +ĠOption ally +En s +Ġx http +Ġel ucid +_Osc InitStruct +)) ):Ċ +Ġint uit +ĠDon ate +Ġcorrel ates +> Delete +Ġequ ipe +Ġb oca +Ġinfl atable +er ah +ĠDateTime Kind +Ġcal ves +\ Lib +Ġem lrt +ĠTr ilogy +ĠP anc +ĠD uis +ĠpelÃŃcul a +WAR DS +_DE TECT +-section al +dh cp +For Row +-de struct +ĠPres enter +/s lick +, on +ĠCit adel +logged in +_sub type +Ġsig ue +Ġc uring +ĠFire wall +Ġfluores cence +ĠItal ians +иÑĤ ÑģÑı +.get Style +In Seconds +j ie +-S mith +Ġx link +Ġsub missive +он ÑĤ +arbon ate +ĠF aul +_go als +ĠCommission ers +chart Instance +_POST FIELDS +Ġmed ial +Ġman os +Ġdel t +sv m +.Ap is +ep hy +Ġasym pt +Ġapp Delegate +Ġimpro bable +ck a +sim d +/ Error +. âĢĵ +ĠP TS +de er +Ġs ina +m agnitude +ID ADE +'] }' +Ġmay ores +ĉ comment +/ console +" @ +v olt +.s ell +ĠM acy +Ġmel od +Ġim ágenes +_ch g +Ġin out +ident e +) '),Ċ +d ni +.b lob +Ġtyp ography +Ġe erie +_O ID +pes an +aj an +Ġch opping +Ġbl uff +ad f +_b ases +.Form atter +Ġ\ % +ĠPage Info +Car rier +ĠCal ibration +com o +-b odied +Ġfinanc ier +ĠIN A +. ERR +Ġhood ie +ĠSan ity +gu arded +.opend aylight +ISM ATCH +High lights +ün k +ani em +anger ed +assign ments +Ġregistr ado +ĠU PPER +ampil kan +ash ire +ĠNik ola +ĠC FL +ĠH DC +Ġp oids +ĠIP s +Ġprevent ative +ips oid +if ix +.c amel +.g a +V olumes +- ste +Y ahoo +_s ibling +H ighest +opt group +Ġkvin na +âĢĿ ãĢĤĊĊ +ĠAppl iances +Ġ" >< +') ")Ċ +ht t +ĠIdent ified +Ġpenc ils +Ġmember Id +Ġappend String +.load Data +Ġmock Mvc +Ġj ub +ĠSl ut +ĠTai pei +st att +Pol it +Ġpart ager +Did Change +Incre ases +) }. +ĠB aba +_CL IP +[ unit +Ġк лÑİÑĩ +Ġalc uni +ĠL ola +Ġcl inging +@ PostMapping +(con cat +Ġss id +ĠFa uc +ok it +ĠRecord ed +á lez +($ ('< +.assertIs Not +Ġk ali +V olt +Ġwarm ly +Ġsca res +get ti +füh rt +_d oes +. EMAIL +im ations +Ġspring fox +ĠDec om +arc y +Ġgl itches +ĠM off +ĠV oll +.b etween +Ġcoord en +ĠPart icularly +GB P +Ġsem ble +East ern +_M SB +]) {čĊ +m organ +ĠE VAL +d ere +HO USE +mo ire +ist ique +_l stm +-com mit +yster ious +Ġtw ink +-th umbnails +en ÃŃ +:' ', +Ġblack out +ĠFlo ors +Ġso fas +Ġou i +lesh oot +ĠRa q +- abs +Ġk ra +M ining +sha ft +.set Columns +Cl azz +PRE TTY +.play list +éĸ ¢ +-Sah aran +M ING +ĉ bl +è® ® +j f +DO CKER +hope fully +( ignore +ĠUsers Controller +ĠMitar beiter +ĠL ES +Ham ilton +-m etadata +ĠK K +ikt ig +Ġwoll te +egr ator +] bool +, current +Ġvalue Type +Ġexcav ation +ol and +Ġv erv +/file path +Auth Provider +Ġpro crast +ĉ ULONG +_MEM BERS +Ġup lift +ĠAut onomous +Ġart works +ĠOut reach +Ġp ore +Home page +Dialog Title +ĠGener ating +PAR SE +Ġsem anas +Ġhuman o +JSGlobal Scope +Ġvol te +Ġb ella +(is instance +Ġpl c +\C atalog +Ġeste emed +éĽ · +(s uffix +Ġswe eps +ĉ ORDER +Ġdo ivent +ĠSw arm +ĠComp iled +get Page +AD R +.R ichTextBox +ĠN aming +ag ged +ĠG ANG +r asing +ode led +Ġg ala +ĠJS Name +dd f +Ġill ust +ĠLans ing +[ port +-de ath +Ġdin heiro +ĠE ighth +Ġb ian +st Ã¥ +Ġvers ión +ĠLinear Gradient +ĠHard ing +. *) +ec zy +$ header +Ġv Ã¥r +Un checked +Ġko je +ĠPal adin +() )), +G iving +() })Ċ +Ġd ips +F riendly +Ġport rays +Ġhel ium +Ġinsurg ency +_ex piry +ĠstringByAppending String +Ġa antal +s lope +m ast +.get Integer +Ġ################ ######## +_PIPE LINE +Ġdens ely +Ġmut ating +m idi +ĠSe it +ay ne +NOW LED +ĠDes mond +ĠF Name +ĠN airobi +\ Context +Ġcalc ular +-d en +Ġc ott +] ):čĊ +ĠRecommend ation +ĠRole x +Ġvalidation Result +.p at +Ġn Ãły +ĠRest Client +ĠG PI +ĠAshe ville +ĠO SP +ĠPER MISSION +ÐĶ Ð°ÑĤа +/ notification +K night +_W ord +ĠB ender +rank ing +Ġpart ida +_res ervation +Ì Ģ +Ġm Name +Ġget ch +Ġb orr +Ġdilig ent +Disc uss +æŃ£ åľ¨ +ape ake +ion ed +-N azi +.c um +ĠK ron +=$ ('# +/s ingle +Ġerot isch +ĠV ib +Ġrat ified +Ġconcert ed +ĠREG ARD +Ġdo br +.Driver Manager +' r +Port able +ĉs uite +Ġrel aciones +ĠD op +emplo i +DO B +Ġcr umbs +Ġx ls +_App lication +(': ', +Ġ---------------------------------------------------------------- --------Ċ +m se +Ġber k +ĠReturn Value +ĠBel ly +Ġcam ar +ĠPe ek +els ing +Ġnot ifies +ĠTr istan +ĠG AR +em me +ĠElev ated +_C SV +(ch alk +Ġtw enties +ĠSearch Result += search +ĠMix ing +ý t +Ġrecru iter +ĠIDE OGRAPH +ĠA go +( Operation +$ values +Ġworld ly +ĠRosen berg +ĠConfigure Services +>* Ċ +Ġsn ork +_op acity +ĠinitWith NibName +i ado +A AC +Ġ] ). +; z +_par agraph +Ġnos es +stand s +if r +_m E +I raq +.P redicate +ena ire +]] ];Ċ +Ġun idad +Ġretire es +_h ello +Ġmode le +ĠUIT ableViewController +f write +_num ero +_vis ited +Ġrece be +( Notification +Fant astic +_sub menu +ĠP EM +ĠCup ertino +approx imately +class ed +.Read String +Ġdomic ile +_P W +Ġball park +ĠK ale +con tra +_f avorite +/ of +Qu ite +ĠOT A +Ġacceler ometer +did n +| ^ +ĠRohing ya +ivic rm +ann abin +обÑĭ ÑĤи +or ado +') + +Ha unted +, ID +( UIAlertAction +ur v +_b el +ĠMex icans +/ terms +ĠPaint er +Input Label +ĠV inci +ĠRos ie +\ uc +< Menu +Ġcool ant +(current User +_d ual +) "},Ċ +& p +Ġconver ged +Ġrestr ain +ĠYugosl avia += target +Ġimp uls +ds a +Search Tree +Ġh box +ĠImp ress +§ Ãĥ +get FullYear +(d a +ĠY YS +.al ignment +.Get Text +.token ize +ĠOlymp us +Ġmur ky +ore station +Ġdiss atisfaction +ĉT Array +_ kses +.Add Singleton +ĠStart Time +Ġfan atic +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĉ +Ġentity Type +. override +Ġ ------------- +ĠDat agram +f out +(with Id +Ġ# __ +Ł èĥ½ +ek yll +.f riends +ame leon +Ġz ach +.simple Button +ret orno +Ġkon k +/s mall +ĠQuick ly +un read +Don ate +Detail View +Ġdu a +Ġpenetr ated +OM UX +Ġn ir +_p data +"], [" +Ġlow es +Ġdop ing +Ġas ymmetric +Ġneed less +our cem +Ġup ro +ĠGu zzle +af b +Ġsext reffen +-c ollar +Ġcol ossal +Mon key +n ish +Ġhandle Message +Incre ased +* dx +ĠChatt anooga +f org +ĠOr den +Ġsh ri +ĠV and +Ġ" @" +Image Sharp +ĠWild cats +pon ible +.sc enes +Ġpaint ers +ĠPf izer +ĠZ ah +To Local +ĠFl am +Ġé taient +)) ^ +ĠSand box +ĠTR ADE +Ġchrom ium +Ġac claim +Ġpac man +´ t +) reader +M ari +.Dispatch er +.A DMIN +ĠRem ed +Sw eden +Ġoverl ays +. er +Ġp ang +Ġclean ly +aven port +Toy ota +patch es +Ġv tx +ĠE is +cl ado +ĠR itch +RO LS +Ġh ade +Ġconspic uous +Ġdo cks +(j q +ĠPrem iership +ĠBe z +ĠâĦ ĸ +ĠÑĥ Ñģл +_tot als +Ġprov a +ĠC ue +Ġsa úde +ĠGame Controller +IM IZE +, port +ãĢĤ ( +.C decl +Instant iationException +Ġcoll age +ĠIO C +Ġb ais +Ġon Finish +-st ars +set Size +Ġmog ul +Ġdis illusion +Ġche vy +(S chedulers +( IR +_loc s +Ġcann ons +Ġcancell ing +/b us +Ġbuf io +ĠY ours +ĠPik achu +Ġter me +r Ã¥ +f ahren +Ġowner Id +Ġoblig atory +Ġcul p +Ġacid ity +-m ult +ĠBam boo +Ġ' "> +_g s +Ġcomp il +n ard +-ex c +Ġrh yme +Ġbut to +s ays +ant asy +ë ¸ +Ġcitt Ãł +Ġche g +Time String +Ġpos itivity +ĠD abei +Ġw ang +Ġes cre +" c +ĉv ideo +ĠRank ed +.str ings +>> >( +Ġин ÑĤеÑĢ +Ġrest a +[: ,: +Ġrend re +Ġdes er +J os +Ġdis ruptions +Ġоп еÑĢ +s ampling +sup press +Ġcontainer View +ĠSeam less +Ġair y +Ġon load +.Window Manager +ĠPL A +br aco +.set PositiveButton +Ġp du +Ġg si +ĠC li +_gr adients +Ñı д +ĠWh isper +c stdint +Ġl äng +Ġform ulations +én om +ourn emouth +[$ _ +Ġordin arily +.set Username +Ġfacult ies +MIT TED +/ values +Ġwe ir +ĠA pt +M Z +ĉc f +uck en +ĉĉĉĉĉĉĉĉ ĉĉĉĉĉĉĉĉĉĉĉĉ +def ense +[i Var +ĠBusiness Exception +Select ors +(co ordinates +ĠRes ets +ĠDr inks +ole ans +(st ypy +_IO C +.x xx +ĠSl ater +ĠBel ize +Ġ/ ************************************************************************ +add in +_ep isodes +Ġis chem +legal ArgumentException +D anny +Ġp ared +.code haus +ĠAss y +ĉ Rect +â ŀ +.list a +Ġв аÑĪ +Ġv ets +HW ND +ison er +Ġx o +Ġor ally +ĠSt mt +.r nn +ĠD PI +ĠStr ikes +.setViewport View +Ġèĩª åĬ¨çĶŁæĪIJ +Y ELLOW +GL enum +part ners +ĠImp licit +Ġtak o +âĢĻ elle +Ġerm ög +total Count +G il +ĉ work +Ġpr atic +in ati +ab ies +ĠSk inner +Ġspir ited +Ġpancre atic +Ġh df +' em +Ġpsych osis +olic it +Ġ" {" +_at ual +Ġé lect +TE AM +Ġd ak +ĠSW AT +.Fragment Manager +Ġprovision ing +l ifetime +_EXTENSION S +ĠC ASCADE +Ġ! [ +(K P +Ġv em +ĠInterr acial +'] },Ċ +sp acer +_k v +W arehouse +R DD +_f sm +.Stretch Image +, Yes +ĠRefuge e +ĠBr inging +Ġv álido +.inter section +Ġsp ooky +_port al +Ġmo th +ĠZ odiac +ĠSOC IAL +M imeType +'] }} +_Bl ue +Ġbot anical +Ġfr ags +Ġfamil ial +- du +Ġse izing +(block s +.r d +.check NotNull +Ġmis er +Ġmax x +ĠK nee +View Item +Inner HTML +D anger +(( __ +Ġprz ypad +create Url +** , +ĠDecor ating +ATEG Y +?> / +.Design er +hex digest +ĠEvery where +all eries +.TEXT URE +.Block s +z ell +Ġpre ço +S uddenly +input Email +(s ync +.b d +gold en +> '); +ĠDick inson +>> (Ċ +ĠQUE UE +Ġget Column +ĠS AND +.p iece +lic er +Fl utter +Ġget Version +Ġresource Id +og l +ÅĤ aw +.Br anch +ĉ web +Ġfr amerate +PP P +Ġfr ay +C NT +Ġinformat ie +'] čĊčĊ +ne as +Header Code +Ġæ ¸ +Ġtr g +raw types +H onda +Ġmark eter +Ġrequest Data +ĠP g +ĉ not +Ġpage Info +Ġakt uellen +ãģķ ãĤĵ +ĠA MS +push ViewController +ĉ AL +Ġv ests +produ ce +-m ême +ĠRah man +F unny +E Z +_ Valid +Ġsquad ron +Ġl ash +Ġ irm +ias co +ĠPar an +Ġpet ites +ĠDec ay +Ġun initialized +priv ileged +Ġm bedtls +å¤ĩ 注 +Ġ^ . +Ġec static +D etroit +Ġpart en +Ġsou venir +.get Login +моÑĤ ÑĢ +en ção +ĠmÃŃn imo +ĠAccess ed +ri ó +M ic +ĠV ocal +.Set String +Ġmens ajes +åĢ į +Ġattr avers +ĠA ph +Ġ' );čĊ +ünd e +Ġench anted +ĠRoot State +ĠCLOSE D +ĉĉĉĉĉĉĉĉ čĊ +Ġcal iente +or ris +Ġphysic ists +h wnd +_v i +Ġráp ido +Ġcapital ized +ed By +Ġmach ining +Ġhub by +ĠSt acy +.B us +dr ink +H ur +Ġprop ia +Unit Test +Ġmiscon ception +__ ));Ċ +/d c +ĠMay weather +_m C +.create From +ĠQ Painter +rops ych +inn itus +ay as +Ġg eg +(d w +Ġus ado +Ġtrick le +Ġann ihil +ĠP asta +Ġ++ Ċ +(Expected Conditions +.post Value +ic ap +ĠDon etsk +_s oup +-p ublish +ĠP b +ment ions +AC CEPT +.P ull +,âĢĻ âĢĻ +Ġret arded +_AT OM +ĠTermin ator +-c ourt +ĠCLLocation Coordinate +Ġrever ence +ĠS SC +ut ely +ĠW ON +ĠG SL +fre i +.get Longitude +Ġopen FileDialog +.B utter +- important +_M ANY +ĠG ong +âĢľ How +Ġg orge += msg +ĠEz ek +create Command +: checked +Ġinf ographic +.W EST +Dir s +Ġguard a +Ġbeet le +< small +- android +Ġcred itor +ĠM éd +Ġfinal ist +Ġab l +ne v +_inter action +ĠMonter ey +j ah +Ġcand ies +ĠQu incy +èª Ń +Ġbatch Size +ak it +Ġo be +(p ara +Ġexperiment ed +Ġcouncill ors +Ġcl ashed +s qu +-st rokes +ĠG K +ĠEx pires +Ġprosec utions +ĠCreat ures +Ġy ö +x lim +_IM P +Entry Point +ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ +.Default CellStyle +Ġbre ve +ĠBrit ann +Ġsweat y +Ġle th +Ġflash back +per manent +ĠJ DK +_D etails +E uro +p pt +Ġrich TextBox +/ board +Ġtr ance +.c ycle +'); ");Ċ +Ġtox in +_de init +Ġover arching +Ġconfig parser +ĠKaw asaki +.th umb +Ġplay a +ĠJose f ++ _ +Ġzero es +Ġa up +ĠH ari +comm itted +N it +.file Path +ĠDis abilities +man ufact +-al igned +.RE SET +Ġrust y +E y +Ġou sted +cos a +Struct ured +.get D +Ġs ábado +> Loading +_m A +.get Random +bl ings +Ġchees es +tt i +. âĢ¢ +ĠBurg ess +ender it +. ',čĊ +(" "+ +ac b +% p +index ed +_pred icate +nes ia +Ġb ied +ĠC IT +( Pos +_r adi +ä»· æł¼ +B iz +ĠAdoles cent +Ġvi ên +c ycl +_C ancel +Ġcon clusive +Ġappell ate +inform atics +S J +Ġelect ive +role Id +Fetch er +ĉ Command +(" (% +Ġf art +IL A +get Block +A USE +Ġд ан +ĠAr te +Ġnot ifying +Ġge le +.s ame +ĠReg el +ĠBa ÅŁ +.c reation +ĠV N +_comm unity +Ġuns ustainable +SE X +Ġgrid Size +res cia +avers able +(', ')[ +ĠPh elps +á»ķ i +ANCE LED +- IS +.run ners +ĠSt okes +.P rodu +Ġwh ipping +_ac quire +Ġinvestig ación +f ried +.copy With +ĠHard cover +- Se +áŀ¶ áŀ +inv itation +les ai +ĠD orm +ĠÑģпиÑģ ка +Ġconcaten ated +oph il +Ġthink er +/font awesome +ĠLe opard +Ġ"/ ");Ċ +Ġresidual s +ĠMic rowave +Ġconform e +th rop +Ġdis emb +ĠO MG +ĠDisc ipline +ĠAc robat +/re pository +df a +_M ED +buf io +Ġméth ode +_H OLD +ias i +_ legacy +) ččĊ +æ£ Ģ +Get ProcAddress +Ġy ay +ot ence +order id +-t w +Ġdear ly +In coming +/ il +Ġneu rop +uc z +); čččĊ +ĠInnov ative +Ġprof und +ig mat +Selection Mode +re levant +.G O +Ġbru ises +Ġs ach +ode f +Ġre imb +/d esktop +-s pot +und ance +Ent ropy +\ core +Ġsug er +ĠM vc +ĠGN OME +_ind x +ĠYY STYPE +ĠMat lab +ĠC IF +Ġ* )) +Ġproduct List +ĠAl right +ac emark +ÑĤи в +mod ification +int ernational +Ġhom ers +Ġdict s +ĠQ Font +.SQL ite +Ġtransplant ation +ĠMessageBox Button +ĠEl ves +'] ])Ċ +(Q Icon +Ġcin emas +CO ORD +- China +Ġkh ẩu +æĪij çļĦ +Ġskull s +Ġpain staking +f ce +.XR Label +Ġspec ifier +Ġpref erring +/ activity +( Photo +á lt +.l ot +' '. +ann once +.google code +-p df +ĠP oke +_A CL +Ġend owed +dis cover +.om g +Ġwood land +.M agic +Ġvol ont +Not Allowed +Ġch ave +BM W +',' =', +ĠS IX +æĪij 们 +Ġkos her +Ġaspir ation +int l +_ref ptr +'+ Ċ +ment or +.cl ub +Window State +.A RR +Ġz za +Ġmessage Type +.e qu +Th or +Ġin just +Ġg ums +Ġborder Side +//// / +ĠTrans mit +Ġbuf size +Ġh ak +Ġell as +R ANDOM +ĉm c +Ġpe a +ek o +document o +Ġhyster ia +Ġaren as +Ġgun men +Ġm ike +Ġimp unity +atis ation +_Z ero +_COMP ANY +ĠG ors +Ġuse Class +( redis +ĠRUN NING +ĠB air +vel te +Ġ',' . +аÑĤÑĮ ÑģÑı +ö st +encode URIComponent +_re strict +Ġdec als +ĠPed ido +Ġalter cation +Dis plays +ĠApp licants +C US +Text area +ĠAng ola +.f uture +ĠUS HORT +Ġsuppress ing +Ġset zen +AP olynomial +Ġto ch +Ġhall mark +Ġ$ $$ +ĠCHAR SET +.r pm +ĠD ich +---------------- ---- +_p arm +è¿ ĺ +acc iones +h ait +WAR DED +_r outing +ĠN OM +Ġen clave +ĠLot to +ĉf r +complex Content +ĠBall ard +k ube +/w in +.getColumn Model +_RE PLACE +Header Value +Ġest udiantes +Ġap is +Ġb pm +ĠType Name +And Get +rit a +Pl ans +> Note +Ġfet isch +Ġton ed +_g oto +ons ense +Ġm olds +Ġinfiltr ation +ĠGuerr ero +ub bo +ck i +($ (". +_ activities +(ch anges +Ġof App +ĠKe pler +ĠD emp +ĠCont inent +.T icks +ĠUn signed +ĠJah res +Ġfresh men +ĠArch ived +ĠкоÑĤоÑĢ Ñĭй +Ġ' :: +T utorial +C c +Ġtable LayoutPanel +from Json +.level s +_trans ient +Ġendors ing +ĠD IC +la uf +Ġsh red +_E MIT +ific antly +AL A +/ proto +Ġnarrow ing +U tc +Fact ors +Ġsent ient +æŀ IJ +lix ir +ĠC ROSS +met eor +Ġgro in +Ġm db +ĠRot terdam +Ġcom ida +ĠOp Code +ĠDefault Value +Permissions Result +Ġheter ogeneous +Ġm oot +Ġde ceived +-in dependent +ĠObject OutputStream +Ġover power +.d up +Ġl db +Ġdomest ically +Ġbest ellen +Ġlo v +ĠContract ors +Tri angles +Ġfod der +Ġfilm es +ä¼ ģ +Ġrev olver +Startup Script +/ validation +ĠResource Type +i ÅŁ +ĠL az +f ef +Ġlst m +{ * +. attachment +.h its +ew ith +DO G +Al abama +Ġmedium s +.m Context +-c ols +åı ĭ +.not ice +Ġat tn +ĠP acking +ĠL n +_COM PLEX +/ Users +.sav etxt +ĠR ounds +?,?, ?,?, +Ġing l +ĠR OC +_f emale +ĠSt ard +]] ; +Ġwrest lers +Ġtorrent s +Ġsin h + ĊĊ +ë³ µ +s ense +how ever +.Ph ysics +Inf rastructure +ĠSac r +F el +ĠD ISTRIBUT +é ments +ĠValid ates +################################################ ############ +Ġ| / +Ġes l +Ġré seau +ĠB ip +BY TES +_W ATER +Turn ing +EL S +Ġj uxtap +Ġlesb ische +ý ch +( Unknown +Ne o +@ JsonProperty +Ġal umnos +ĠRaq qa +ime i +.get Bounds +.Mouse EventHandler +#### ### +Generic Type +/c ms +Ġturn o +Ġм ин +Ġfolk lore +ĠE vo +Ġconduct ivity +Ġle ben +Ġgear box +-v s +ĠÏ Ĩ +Ġdrink ers +Ġcon exao +ĠTe eth +Ġget Arguments +ĠR AT +ent ious +E duc ++ W +ĠInstitution al +ĠB ord +is Equal +(p wd +Ġign ited +ĠR ousse +Ġimpact ful +ĠM alk +Ġg eral +ĠP ivot +Ġa zt +Ġcsv file +ĠR ope +ĠSOL UTION +ĠArbit rary +Ġlet to +.Mouse Adapter +Ġ} }} +ĠSail or +der a +Put ting +Ġconcentr ates +Ġauth Domain +âĢĿ çļĦ +-f inals +, strlen +Mu on +ĠOrd inary +fire fox +ĠLa TeX +ĠH und +engine ering +/ blue +ed TextBox +(" "); +ĠC DDL +ke pt +ĠGet String +K ir +() =' +ĠO CD +ant ium +$ menu +ĠAppalach ian +Secret ary +ë¥ ĺ +ี ย +Sem antic +Ġ* [ +est one +ung kin +Max Y +-t one +"} ;čĊ +_P art +< Member +tr am +Ġtrans istor +Ġ---------------------------------------------------------------- ----------Ċ +ĠDes de +Ġright ful +ĠCorn el +æ ij +.H OUR +Ġsidel ined +ref errer +m aze +Ġhol ster +Ġcripp led +ĠDate Formatter +oph age +_m D +Ġdes elect +ra ud +ĠPK K +row Data +Ġlock smith +.res ponses +(product Id +_ST MT +Key Type +.Th en +z ee +Ġcr t +ĠGrand ma +@ Resource +Ġbit wise +-c mpr +ãĢĤ www +zeit ig +& display +Cart Item +- No +Ġnum éro +Ġm aur +Ġinst ancia +ĉd t +_n pc +Ġskate board +âĢľ All +ĠCrow d +Ġä n +Ġb raz +ca e +yn et +/p m +/s creen +OPT ARG +ĠV Box +Ġle opard +_g reater +c pt +< dd +Ġmechan ically +osp els +) f +.l wjgl +.get Port +ĠP REF +.Add Transient +pp ard +Ġí ļĮ +Ether net +Ġsal ine +(level s +Ġservice Provider +.A ngle +alt itude +illa ume +Ġs cape +_CAL C +_ quest +ĠDiss ertation +ĠE DM +-C ds +Ġhon orary +st ops +Ġsub dir +ĠV H +ĠChe at +Ġright fully +Q E +.Write Byte +fig ures +enn ie +( DBG +Ġvoks ne +Ġexp ended +UN ICATION +il inx +ĠRec ap +_ verts +Ġtra umat +Ġget Player +Ġverb ess +Ġcultiv ating +Ġiniti ator +Th ông +find First +_per ms +Ġbu c +Ġ""" čĊčĊ +T YPES +object Manager +(Configuration Manager +Ġtim id +Ġsnap chat +Ġcon seg +ĉd istance +_right s +_D es +ĠF lesh +- ver +Ġa fl +fra uen +Ġblas ph +ĠQual ität +ma f +Monitor ing +.D iff +Ġshore line +Ġresponse Body +mem set +< decimal +Smarty HeaderCode +Ġin sets +ĠBinary Tree +amed a +Ġn ihil +ĠN ay +ym ology +ĠW G +Ġt api +ĠInst alled +m aintenance +)} "Ċ +ĠX O +-per iod +s ar +Ġning una +ORM AT +.set PrototypeOf +ĠK b +ĠHen rik +ét ique +ĠLah ore +ĉ Address +Ġmel ts +N y +_adv ance +Ġveloc idad +Ġalum no +Ġsanit izer +Ġph ishing +ĠCom et +Ġch iar +ĉs pec +trim med +(state arr +on nen +Re venue +L ens +Ġcha ired +ĠAss umes +Tr ash +_un set +\ Bridge +Point Size +ĠPol ic +Ġsex uales +ĉd fs +ĠWide String +Ġaccru ed +Y W +_S CHEDULE +Ġk ite +Ġparach ute +[ table +Ġactive ClassName +.Qu ad +Israel i +ĠÅ ĵ +Ġho og +Ġch á»ī +ew ear +Ġtire lessly +set Error +.get Amount +.set Items +ĠM anson +ĠBay esian +_F lag +AC HER +/ original +Ġimm ac +ĠLos ing +' >ĊĊ +L ic +ĠMir age +ĠAssembly FileVersion +Te V +ĠValue EventListener +-s olving +Th o +rou lette +_W P +Ġunint errupted +Ġfield Type +.T yped +Ġam our +Ġmock ery +(v ol +ĠSub committee +ĠR uf +ero x +:UIButtonType Custom +ĠBl ur +Ġwy kon +nc es +ASH BOARD +!! ");Ċ +Ġmurder ers +.d aily +ĠDI AG +j ing +Ġdol phin +Ġl òng +Ġb ö +ĠV ocabulary +.St Object +') "> +Ġz un +Ġscrim mage +tr éal +ĠL ig +[ vi +C ole +Ġfrost ing +.Pl ayers +- translate +Fe els +=\" / +.Butter Knife +Ġ?> ;Ċ +Ġav i +inn ie +.F ailure +Ġsp indle +Configuration Exception +_h op +Ġpos ição +ĠA wait +UIImage PickerController +ĉ day +Ġgen om +C ab +ĠÑĢ ÐµÐ·ÑĥлÑĮÑĤаÑĤ +OR IGINAL +Ġejac ulation +(t cp +SE COND +Ġton ic +ĠList Box +Ġ ĉĉĊ +() >Ċ +Ġqu atre +ượ ng +with Errors +.M aybe +, â̦ +token Id +_UN DEF +Ġfresh ness +ĠAmend ments +.map box +.C V +(b log +_get time +. quest +s parse +Ġres ale +Ġenthusi astically +ĠProstit utas +W a +C argo +.Parcel able +SENS OR +ĠRy u +La ughs +_N ative +/ pg +yst s +Ġphot oc +ç® Ģ +ado pt +.spec ies +conc iliation +Adjust ed +.Firebase Auth +ut tle +ord ination +Ġm unch +ĠSt ake +.p ing +ank er +(QString Literal +Ġsub script +ĠĠ ĉĊ +ĠM CC +_C md +se xy +i ou +ĠM ANY +Ġn anny +TR AIN +Ġflour ishing +ĠW atches +ĠQ Map +ĠF erm +Ġwas m +ĠA bed +_ UD +ĠGlass es ++ v +Att end +.Ch ain +Ġdec ency +ĠSupplement ary +h unter +-t xt +Ġ" }";Ċ +.set WindowTitle +(" +Ġmasc ara +( Profile +åĬŁ èĥ½ +imit é +Ġwild fires +- ROM +.is On +(group Id +Re pair +accum ulate +Ġ< ", +Ġhand written +Ġach eter +ĠM GM +ĠIr ma +->{ _ +ge e +cr iminal +Ġèĭ¥ è¦ģ +Ġmoment arily +") != +_l it +Ġexpires In +." ). +éķ¿ åº¦ +Ġfr ække +vl c +Ġor bs +), $ +Ġvent ured +/ >\ +char m +N uitka +eld ig +aton in +W itness +-l at +Ġset Hidden +Ġrelic s +Ġcons ulate +. IGNORE +" After +Ġset Address +Ġbeste ht +Ġ'' )ĊĊ +.x axis +Ġser ão +Ġmis led +_UN IFORM +ĠV IA +inc r +Ġzen ith +Ġvis cosity +Ġthin ly +.get SharedPreferences +.Error Code +"), " +ĠMillion en +Ġ/> )Ċ +Scroll Indicator +-se eking +ĠPOLIT ICO +as ca +_r l +N avig +(full file +Ġsol itude +Ġju ven +Ġhaul ing +ĠMac ros +ĠG ry +Ġexerc itation +ĠATT ACK +Tick Count +Ġr ites +Ġdo e +Particle System +Ġsl u +Window Text +ĠClass Name +Ġsl ander +ĉ Port +j ong +? a +.D ial +âĢĶ at +$obj PHPExcel +Ġso ar +EN N +appe ared +Ġquot id +em achine +Ġn ip +Ġmicro time +ĠAl ma +; ! +---------------------------------------------------------------- -------------------------------- +ĠPass age +Ġdump sters +ĠEx clude +Ġsuggest ive +ĠCircularProgress Indicator +_cl r +Array Type +ILL A +Elapsed Time +Dr iven +Ġresource Name +ĠG arrison +ser ir +-a head +Ġp innacle +ĠEs presso +S parse +Ġass ays +ĠGirl friend +im id +]=' \ +ONGL ONG +Ġportray ing +L ane +Ġb úsqueda +Ġrein forcements +ĠSpread sheet +ĠArray Collection +, arr +light box +ic ana +< " +build ers +K id +ĠMat SnackBar +EX PR +od cast +ĠFound ations +Ġind s +=' ${ +F izz +-function al +(work space +Ġstem med +_p atches +ĠJar vis +READ ING +Ġdisrespect ful +ĠQ Dom +Ġ$ {Ċ +est atus +Re ached +! .ĊĊ +IL T +ĠN DEBUG +ĠCour age +birth date +ĠT ing +Ġutil izado +án chez +Out door +Ġhand guns +Ref Count +É Ļ +rom o +Ġt ts +.S he +ĠP ane +ãĢij, ãĢIJ +ĠIO CTL +/ black +ins cription +Ġbi opsy +ĠTime Interval +.Test Check +ĠGUI Style +ĠCap ability +ĠBeit rag +don nees +T reatment +.back up +Ġsign ings +ĠB oca +dr m +.M AIN +Ġgo ede +ĠMark up +G REE +ĠBase Service +.C reator +Ġj ails +ĠK ahn +Ip Address +ACH I +Ġinhib ited +Ġ@ $_ +ĠAss ass +Ġenvi ado +Hero es +ÐŁ еÑĢ +ĠM aven +.l s +Ġ ive +| RF +Ġresize Mode +Ġrum pe +_attach ments +T U +Ġtact ile +Attempt ing +Ġro bin +y aw +Ġmerc enaries +ĠHab itat +end date +Ġo xy +ĉR andom +oh on +Is Null +ĠValidation Result +ãĥ ļ +um bed +pp v +Ġar p +ich ick +_r nn +ĠT FT +Tex Image +" On +ĠSam pler +top l +Ġj ane +y ling +ĠUN ICODE +Tab Index +< {Ċ +s uspend +uv ian +, application +ол иÑĩеÑģÑĤво +y at +ez ier +ĠCH UNK +ĠAd ler +/ Add +ĠKey Value +Ġspos ób +Sam pling +ch ers +_AM D +R u +.Must Compile +N ation +Ass oc +Man aging +ĠEng l +_G B +Ġsucc inct +Ġdis liked +ĠI ke +Bullet in +_ARCH IVE +Prop osal +Ġjog ging +.C REATED +Ġch ol +è£ ħ +Į ¨ +-p ush +Ġreserv a +core v +è tre +TH R +Ġincompet ence +Ġchar isma +æĦ Ł +Ġ" == +BT N +ĠLoc ator +iv et +('. ')Ċ +Ġfor IndexPath +ô me +Ġcapac it +w aters +ĠWR ONG +ho a +ĠM IPS +Ġem iss +ĠJacqu eline +(c mp +Ġe ens +Le o +.tim ing +CLUS ION +Ġ(" - +åĵ Ī +.k ode +ĠUnd ert +Ġbew ild +ĠEss en +.h d +Ġren egot +Ġm ower +Ġl sp +Ġpen chant +Ġman oe +Ġag li +Ġrec al +ĠOPER ATION +(^ )( +ĠÎ ½ +ĠSc oped +Ġ@ "Ċ += label +[ loc +Int l +ĠN z +table t +.Column Name +Ġscreen Size +DB us +co oked +- registration +âĢľ One +-n on +ĠwiÄĻ c +Ġcost a +.add Tab +. conditions +ĠH ess +MEM ORY +ĠAval anche +() }}Ċ +Ġtri plet +Ġl abyrinth +ĠNode List +ĠNY T +Ġy eni +d ff +.Html Controls +AV IS +/ Math +Ġmem cmp +Ø§Ø ¡ +оÑģ ÑĮ +c rap +(p ages +Ġl xml +ĠQ DateTime +_t cb +Ġopen id +Ġsyn aptic +ĠMD MA +(s lug +igm atic +en or +Ġcr amped +G OP +Ń IJ +.is File +ĠD ifferential +Ġ=" ";Ċ +ĉĉĉ ĠĠĠĠĉ +ĠC ooke +ĉU FUNCTION +Ġpersever ance +Relative Layout +IMPORT ANT +Ġex on +Ġо н +ib ase +(C ONT +n ovation +ä½ ķ +[ sub +Admin Controller +HTTP Header +cre ar +ĠN IR +ĠDrop DownList +Ġval ide +Ġde hydration +. '] +(W IN +Ġ... \ +Ġphotos hop +ĉ Init +_c ou +Ġtime Zone +dar win +rom atic +Navigation ItemSelectedListener +br ates +] --;Ċ +Ġtraged ies +ĠPed iatrics +SM ART +-A PI +ĠMessage Lookup +ĉ vo +Ġprejud ices +Ġm A +U ps +ĠMISS ING +ĉ ad +C ream +ĠT b +ĠMon a +_ ghost +ĉt ypes +Em b +ĠDocument ary +');ĊĊ ĊĊ +Ġl up +_ Reference +ĠB ATCH +Ġintertw ined +< Cell +ĠCab r +n ation +Ġis Connected +.remove Listener +Ġcon g +_t i +ĠSil icone +Ġê²° ê³¼ +ĠW AN +ĠG ibraltar +/ response +ĉp erson +ch ants +V IP +em ergency +Pixel Format +- Am +Ġsouth western +_pl l +if ers +_ON CE +ĠF ayette +.nc bi +_P anel +.Q ual +Ġpol ys +Ġcreate StackNavigator +� t +Ġlay offs +ĠBl anco +Fe at +ĠV imeo +_ch i +_l ifetime +POINT S +, private +Ġunb earable +print ing +Ġc gi +.B ACK +Ġintern s +ĠNew ly +inf eld +( IB +ĠK ata +ĠDef endants +Th r +é¢ Ħ +_V F +FFFF FFFF +Ġdavid jl +Ġbitter ly +S uggestions +.set Cancelable +FIN AL +ason s +_rw lock +_WRAP PER +Ġhapp iest +(row Index +ós ito +TOT YPE +Autom ation +Log File +Ġcons olation +ãĥ Ģ +Ġt êm +Ġpr er +rg yz +ĠG eg +ĉd to +.default Value +ĠK ami +ĠA SE +optim ized +Ġíı ¬ +Ġorigin ates +err Msg +Ġespa ço +(S YS +ĠMc B +d ance +_det ected +Ġfr ü +ĉĉ ĠĠĠĠĉĉ +< Date +(com b +ĠDec ide +\ Field +ĠProp osed +R ib +Ġdis likes +ĠW ien +ĉ Document +Ġtr af +Ġst oria +ĠT ells +') == +C ri +( VALUE +ĠBurn ett +, void +Ġdan h +Ġc cp +Block chain +:"- "`Ċ +IC lient +IS ODE +Iss uer +) }čĊ +, but +ĠU ph +( Sub +Ġtélé phone +ĠonData Change +Ġmarsh aller +-an alytics +, content +Ġdeb acle +_Value Changed +Ġfa una +Ġ# => +Ġf oyer +'util isation +ĠMü ller +ĠFet ish +Ġdefault Manager +Ġback track +B ah +Exp licit +_A SCII +Ġm Activity +(M sg +Ġê² Į +ĠTER MS +ĠAng ie +HS V +ĠMos que +.N ames +íĬ ¼ +rest e +_p arms +Ġgap ing +Ġcro pping +Data Frame +Ġrespons iveness +_ undo +_tr an +. terminate +Ġitalian e +Ġwalk through +Ġattract iveness +д е +_ST S +_ learn +Ġchocol ates +ier archical +-th inking +Ġ ))) +ish ments +.Log f +ĠTM Z +ĠCan ary +fo il +ĠVacc ine +.v x +ĠSur round +Inter mediate +Ġi ov +v ais +'; ";Ċ +ï½ŀ ĊĊ +éĢģ æĸĻ +â̦ it +Se ats +Cl ar +W ars +ĠHutch inson +ĠHas an +! ')ĊĊ +ĠRich ie +che iden +($ (' +Y ork +Ġl ids +Ġal phanumeric +ĠG lock +.sh apes +Ġspark ing +_ epsilon +uplic ated +.dir ty +]) == +ĠìľĦ ì¹ĺ +Ġsc n +Ġ/ **************************************************************** +_PRE VIEW +_H C +ield ing +f gets +ĠAdd ison +Ġproduct Service +- figure +(ret val +z ano +Ġaut ob +ĉs d +_n umer +ĠSet LastError +ĠF ior +ific ance +Unt itled +Ġin field +Ġ{} ));Ċ +Ġsp ac +Ġro okies +(des cribing +ng en +ி à® +.r df +.M utex +Ġkne eling +ĠQ E +set Max +Read Stream +Ġvent as +s ut +cm peq +.WriteAll Text +ĠEx perienced +$ __ +Ġka um +ĠL IS +Ġdocument os +_HE ALTH +icont ains +Ġart isans +OWN ER +Ġblink ed +get Display +Ġto en +Ġrow Num +Ġav ril +Ġinv is +ĠK ear +toBe InTheDocument +ap ur +Ġr acked +ĠMc Master +_ATTR IB +H az +Ġfact ura +/ ts +ĠÑĢаз меÑĢ +Ġz f +Ġshort fall +.f asta +ĠCONST ANT +.man aged +g ems +Shared Pointer +Ġblur ry +b rightness +( components +Ġ... "ĊĊ +SE LL +ĠIllustr ator +.get Channel +Ġtrou vé +yst ers +Ġvo is +ĠLind en +Ġem ojis +Ġb rawl +ĠMS R +ĠE lo +ĠCroat ian +Popup Menu +L ewis +.J WT +Ġaston ished +B ush +(item Id +Ġdet achment +ĠEnc ore +å° Ķ +Ġre kl +Ġcr am +)$ / +.get Host +_re commend +- HT +_cal ibration +Auth enticate +.firebase app +UN IX +ĉC amera +ĠHE AP +I deal +. office +Ġgoof y +(S ymbol +Ġjou er +_part itions +Ġrapid ement +ĠGN UNET +id User +Ġsuperv ise +( Contact +AW N +ãģ ĺ +Ġna am +Ġa ust +åľ¨ 线 +_soft max +Allow Anonymous +amm able +RO UTE +* D +Ġad en +ĠCrist ina +ĠCrist iano +Ġblood stream +sub class +_person a +CH ILD +-k now +Ġnavigation Options +ĠZuk unft +ĠPix ar +Ty ler +Ġunder world +Ġsincer ity +Ġdispens er +Ġk ter +idd ers +.add Node +- checked +Ġke yst +ĠW TO +.sign als +Ġadvent urer +ĠP ang +\ R += pos +Ġdispens aries +ĠClo set +("{ \" +ide on +Ġnécess aire +() "Ċ +_RECE IVED +Ġrésult ats +Ġmod en +ĠIceland ic +; d +. allowed +(new User +Ġmerc iless +.Wait For +Ġday care +ĠCon veyor +ç ĸ +ð ¬ +ç ĥ +ç Ĺ +ç ł +è Ħ +é ² +å ¦ +çĿ Ģ +å¾ Ī +é ħ +ç ĭ +é ª +æ Ĥ +é ¥ +è ħ +æĥ ³ +å ¨ +é ¹ +ç Ĥ +å Ĵ +ç Į +è´ ¨ +æ ¢ +æ° Ķ +ð « +æķ Ļ +ç Ł +å Ħ +åıij å±ķ +åĪ Ľ +è ij +æ ħ +å ŀ +åģ ļ +æĪ ĺ +æ IJ +å¼ º +æ· ± +åĩ ł +ç ¿ +å © +è ŀ +å§ Ķ +åIJ Ħ +è İ +é ¸ +é º +åı Ĺ +èģ Į +å ĺ +æ ½ +é£ İ +èIJ ¥ +åħ ļ +è ľ +éĤ £ +é¢ Ĩ +ç ij +é ³ +æľ ¯ +ä» Ģ +æĪ ¿ +ç² ¾ +å ª +é Ĩ +å¤ ª +èĤ ¡ +è Ľ +åħ ī +æŀ ģ +åĬ ŀ +è ĵ +ç ĺ +å ´ +å Ĺ +èĬ ± +çł Ķ +å¿ « +å¸ Ī +è¶ Ĭ +è§ Ĥ +æ ¤ +æ ¦ +ç ŀ +èĤ ² +çĪ ± +çĻ ½ +ä¸ ĸ +ä»Ģ ä¹Ī +çľ ¼ +å ³ +è Ĵ +æ ĵ +è¢ « +å¹ ² +çĹ ħ +å£ « +ç Ĵ +è ¸ +æ ¾ +å·¥ ä½ľ +è® © +çĥ Ń +è¾ ĥ +åĦ ¿ +åĬ © +ç§ ¯ +ç ³ +ç ĵ +ç £ +å Ĥ +è ¹ +è ļ +å· ± +çĻ ¾ +åĬ ¿ +èµ Ľ +æ ¨ +æ ¿ +è ĸ +æĿ ij +å¸ ¦ +å¢ ĥ +æĬ ¤ +é Ń +å « +èĩª å·± +æµ İ +ä½ İ +åĮ » +éĺ ² +åĨ ľ +è Ĩ +ç Ĩ +é « +åĨ Ľ +æĪ ı +åį ĩ +æĸ ¯ +ä½ ı +èIJ ½ +åħ » +èĩ ´ +ç Ĭ +ç ĩ +ç ħ +è Ķ +ä¼ģ ä¸ļ +åĽ ¢ +æī į +æł ¡ +åĩ Ĩ +å¥ ĩ +åī ¯ +é ¼ +æ¼ Ķ +é© ¬ +èµ ° +ç¥ ŀ +åħ ĭ +æľ Ľ +æ² ¹ +è¾ ¹ +åį ĥ +å¾ Ģ +åĪ ĩ +æ © +ç ¶ +å Ļ +éĻ ħ +çī Į +社 ä¼ļ +游 æĪı +æĸ ½ +ç ħ§ +æİ § +æ» ¡ +è¯ Ĩ +éĩį è¦ģ +è¶ ³ +çķ Ļ +ç» Ĩ +åį ı +éĢ Ĥ +æ ĩ +æ § +é Ħ +è Ŀ +å¸Ĥ åľº +ç»ı æµİ +ä¹ ł +æĸĩ åĮĸ +éļ ¾ +ä¹ IJ +åĨ ³ +æ¬ ¢ +è§ ī +åĽ Ń +åħ ´ +åħ ħ +ä¸ ¾ +æī ¹ +è ķ +æĬ Ĭ +æĬĢ æľ¯ +ç© ¶ +第 ä¸Ģ +ä¾ ¿ +åĵ į +çİ © +åĿ ļ +èŀ į +åį Ĭ +åĸ ľ +å± Ĥ +ç¦ » +ä» ħ +é Ł +åij ³ +å¿ µ +åŃ £ +ç´ § +ä¹ ħ +é ¤ +é ŀ +è ¤ +åĢ Ļ +åĨ µ +ç Ł³ +åģ ¥ +æĢ İ +å® Ŀ +è¡ Ģ +åŁ Ł +æĹ © +çŁ¥ éģĵ +è´ Ł +åį ļ +å· ´ +äº ² +å± ŀ +ä¸ ¥ +äº ī +å¯ Ł +è º +ç ° +建 设 +产 ä¸ļ +åIJ ĥ +åŃ © +æĹ ħ +æł ¹ +æĿ IJ +ä¼ Ĺ +éļ ı +å® ĺ +åº ķ +å½ © +å¯ Į +æ¸ © +åį « +åī § +çĽ Ĭ +æĬ Ĺ +è´ ¢ +çº ª +æ Ĩ +çĶŁ æ´» +çº ¢ +çĶŁ 产 +è¿ ľ +éĴ ± +åĶ ® +ç¾ ¤ +çı Ń +æ¥ ¼ +éĩ ĩ +èī º +å± ħ +åģ ĩ +è° Ī +æĻ ļ +é ¬ +èĪ ª +å® ³ +è Ĺ +ç į +å µ +çİ ĭ +åº · +è İ· +ç» Ń +äº ļ +é£ Ł +åİ ĭ +æĭ Ľ +èĮ ĥ +è® ¸ +åĽ ´ +é ½ +éĻ į +çº ³ +åĵ ª +æķĻ èĤ² +å·² ç»ı +å¾ · +æŀ Ĺ +å®ī åħ¨ +é¾ Ļ +大 å®¶ +éĿ Ĵ +åº ľ +æ² ³ +åı ¤ +èį ¯ +åĿ ĩ +æĻ º +ä¹ ¡ +çķ ¥ +åĨ · +ç¦ ı +å® ¤ +ç» ´ +æī ¿ +å± Ĭ +è¯ ī +åĪ » +è Ł +æ ª +å°± æĺ¯ +è¿Ļ 个 +ä¸Ń å¿ĥ +ä¸ĸ çķĮ +åŁİ å¸Ĥ +éĿŀ 常 +åĪ Ĵ +åı Į +æĢİ ä¹Ī +åΰ äºĨ +æľ ĥ +åı ² +ä¾ Ĩ +å¾ ĭ +å¥ ĸ +ç» Ī +åª Ĵ +å® ģ +è¯ ¾ +èģĮ ä¸ļ +åħ į +æµ ĭ +æĢ ¥ +æķ ij +çĭ ¬ +èŃ ¦ +é¤ IJ +æĦ ¿ +è´ « +çĸ ij +å ļ +å¥ ¹ +åı Ī +åĽł 为 +ä¸į æĺ¯ +å¤ Ł +æĸ¹ éĿ¢ +éķ ĩ +äº Ĵ +éħ Ĵ +è® ² +çĸ Ĺ +æĺ ¥ +æ¹ ĸ +å¤ ľ +è´£ ä»» +人 æ°ij +åħ ° +çŁ Ń +æķ ħ +åĩ ı +æĻ ® +äº ® +ä¾ Ŀ +åį ° +éĿ Ļ +åĢ ĭ +å¾ ģ +åIJ ¸ +ç¼ º +æĶ » +åĩ Ģ +åħ ¸ +åĽ º +è® ¿ +ç ¹ +ç Ģ +æıIJ ä¾Ľ +ç» ĩ +å¾Ī å¤ļ +çłĶ ç©¶ +è· Ł +主 è¦ģ +æĥħ åĨµ +çŃ ĸ +æŃ » +大 åѦ +æĶ¿ åºľ +å½± åĵį +ä¹ ° +åħ Ń +éĻ © +åħ « +æŁ IJ +è´¨ éĩı +åį ł +å· ® +æĽ´ å¤ļ +æľ ĭ +éĿ © +å® £ +çł ´ +è½ » +åº § +æĺ ¾ +ç¨ ³ +è´ µ +èĥ Į +èī ¯ +çĸ « +æ¯ Ĵ +ä¹ İ +åĢ Ł +è¿ · +çŃ Ķ +æ¿ Ģ +åij ¼ +äºĨ ä¸Ģ +è¶ £ +ä¼ ´ +ä¼ Ļ +è ¼ +ð¬ Ń +åĽ½ å®¶ +æ´» åĬ¨ +çݰ åľ¨ +ç§ij æĬĢ +åį ¡ +ä¸į åIJĮ +个 人 +è®° èĢħ +ä¸į æĸŃ +éĹ » +ä¹ Ŀ +èij Ĺ +ç» ¼ +ä¸ ĥ +æł ij +æľĭ åıĭ +åį ĸ +ä¼ ¤ +æ² Ļ +åĸ Ħ +å¥ Ĺ +è½ ® +ç© ¿ +è¡ ¥ +ä¸Ģ å®ļ +çª ģ +çĿ £ +è¿ ½ +å¨ ģ +åı ¦ +åĽ ° +æŀ ¶ +ç» Ŀ +æķ £ +æİ ¢ +æ´ Ĺ +ä¸ ´ +ä¼ ¼ +è´ ¸ +ä¸ ° +æĺ¯ ä¸Ģ +ç« ŀ +è¿ İ +èģ ļ +è « +æį Ł +æī § +é© ¾ +è¿ Ŀ +è ¥ +è ł +ä»ĸ 们 +æĹ¶ åĢĻ +å® ĥ +人 åijĺ +è¿Ļ æł· +å·¥ ç¨ĭ +åĪĽ æĸ° +åŃ© åŃIJ +å¸ Į +éĥ¨ åĪĨ +éĵ ¶ +代 表 +é¦ Ļ +å¸ ® +æİ¨ è¿Ľ +çĽ ĺ +积 æŀģ +éĥ¨ éŨ +åŁ ¹ +æŃ ¦ +ä¸į ä¼ļ +çŃ ij +éĢ Ļ +çİ© å®¶ +æĭ ¿ +åİ Ĥ +æ¯ Ľ +çģ µ +æŃ Į +ç »¿ +å¦ Ī +çĽ Ľ +é¦ Ĩ +é¡ º +èĦ ¸ +å° ¼ +ä¸ ½ +å¥ ¥ +éģ ĩ +è¯ į +å° ģ +ä¸ Ŀ +好 çļĦ +æĭ ħ +èĦ ± +æģ ¶ +åİ ļ +åĬ ³ +çĽ Ł +æĬ ĺ +åı ¥ +æĢ Ģ +æŁ ĵ +书 è®° +åĨ ł +é² ľ +æ ¦Ĥ +éļ IJ +å¹ ħ +èµ ŀ +å¹ ķ +æ¥ Ń +éģ Ĺ +åĪ ¤ +è ĺ +å ¶ +æĬķ èµĦ +è¡Į ä¸ļ +äº ij +çݯ å¢ĥ +åѦ çĶŁ +åIJĪ ä½ľ +åģ¥ åº· +é£ ŀ +ä¸Ģ æŃ¥ +ä¸Ģ 缴 +åıij çĶŁ +éĺ ¿ +é¢Ĩ 导 +åĸľ 欢 +åºĶ 该 +çĤ º +è® Ń +æĿ Ģ +æ¸ ¯ +交 éĢļ +éĺ ¶ +éĴ ¢ +ä» ¤ +å° ½ +æ¯ į +è¡ £ +ç² ī +é¡ ¶ +ä¹Ł ä¸į +æĬ ĵ +èĭ ¦ +å¹ ¸ +ç¤ ¼ +第 ä¸ī +大 çļĦ +éģ İ +çĥ Ł +éģ ¿ +ä» į +åº Ĩ +æĢ ķ +è° ¢ +çĽ ĸ +å° Ħ +éľ ² +æĸ Ĺ +ç Ĭ¶ +åŃ ¸ +æ¯ ķ +å· ¨ +çŁ ¿ +çļ ĩ +å¸ Ń +çĹ ĩ +æī ¬ +å» ¶ +ä¾ § +æ· ¡ +çļĦ ä¸Ģ +ç¶ ² +æ´ ģ +ç ¸ +è§ Ī +çŃ ¹ +ç§ ĺ +è¯ Ĭ +çı ¾ +èª ī +æ¯ « +ð ¨ +åį ´ +æĪIJ 为 +èĥ½ åĬĽ +é» Ħ +æĹħ 游 +èĪ ¬ +æ¯Ķ è¾ĥ +èµ· æĿ¥ +äºĨ è§£ +èĩª çĦ¶ +ä¸Ģ 次 +åŁº æľ¬ +æĽ ¾ +综 åIJĪ +èı ľ +è§ī å¾Ĺ +第 äºĮ +è· ij +æ³ ¢ +åĢ Ĵ +ç¡ Ģ +åħ µ +èį ī +çĶ ³ +çĶ ° +æĤ £ +è§Ħ å®ļ +èĥ ľ +èµĦ 产 +æ¢ ¦ +æľ Ŀ +è¿Ļ éĩĮ +å¤ « +æĮ ¥ +ä½ Ľ +å® Ī +éĽ ¶ +æĸ ¼ +ç¯ ĩ +å² Ľ +åĵ ¥ +éŃ Ķ +ä¸į åΰ +æī ĺ +åº Ĭ +æ¬ § +èį £ +æ± ĩ +æī © +åģ ı +å¢ Ļ +è® ¯ +å© ļ +æĥ ł +æ´ ĭ +å® ľ +æ¶ ¦ +æħ ¢ +éĢ ı +å® ½ +é¡ ¾ +ç´ ¯ +æ± ¡ +çĪ Ĩ +ç§ Ł +æĥ Ĭ +æ¶ ¨ +é¥ ° +éĺ µ +é¥ ® +æļ ĸ +åº Ł +æĹ Ĺ +éļ Ķ +ç¶ ĵ +åĭ Ļ +å¯ ¦ +éĢ Ķ +æī « +çĥ Ī +éĽ » +åĪ ij +éĹ ľ +éĹ ª +å¥ ĭ +å Ĥ¨ +ç¼ © +ä¾ µ +å ¬ +𬠶 +åĽ½ éĻħ +ç»Ħ ç»ĩ +ä¸ĵ ä¸ļ +åıij çݰ +å¸Į æľĽ +ç»ı èIJ¥ +åı « +æĿ¥ 说 +éļ ľ +ä»» ä½ķ +交 æĺĵ +éĩį çĤ¹ +çļ ® +ç» į +æ´ ¾ +ç§ij åѦ +åºĶ ç͍ +建 çŃij +èĤ ī +æĶ¹ éĿ© +åŁº ç¡Ģ +æ± ī +åĩº æĿ¥ +è¿Ļ ä¹Ī +åĪ ļ +åĿ IJ +ä¸į ä»ħ +ä¼ļ è®® +éĿ ł +åªĴ ä½ĵ +æ° ¸ +åĨ ² +èĭ ı +å¤ ® +çĪ ¶ +åł Ĥ +å®ŀ éĻħ +è¡ Ĺ +ç« ¥ +éĺ ħ +äºĭ æĥħ +åİŁ åĽł +éħ ¸ +以 æĿ¥ +å¨ ± +å® « +åĿ Ĺ +ç» © +éĩ İ +ä¸į å¾Ĺ +ä¼ł å¥ĩ +ç¡ ¬ +åİ ħ +æĹ ¢ +ç» ĥ +èĦ ij +å¼ ± +æİ Į +è´ ´ +æĮ Ĥ +åħ³ éĶ® +å° ļ +é¥ Ń +åº Ħ +çĻ ¼ +åľ ĭ +æİ Ī +个 æľĪ +äº Ī +å¸ ģ +è· Ŀ +æ² ī +ç« Ł +åĨ ¬ +æĬ ½ +éĨ Ĵ +å¼ Ł +è§ ¦ +èģ ĺ +è± Ĩ +æļ ´ +åijĬ è¯ī +è± ª +èµ ¢ +è· ¨ +è³ ĩ +çĪ ¸ +æĬ ± +æµ ª +éº » +ä» ª +è¡ ¡ +å¥ ¶ +çģ ¾ +èµ ¶ +èĤ ¥ +å§ IJ +åĢ º +éľ ĩ +è® ¢ +æ¬ Ĭ +ç · +å» ī +ä¿ Ĺ +å¿ ĺ +å¦ ĩ +ç¼ ĵ +åŃ ķ +æ¼ « +è£ ģ +çĩ ĥ +é» ĺ +çī ¢ +çĪ · +æĬ µ +å® ¾ +æľī ä¸Ģ +è¿ ¹ +è¿ « +è² Į +æľī çļĦ +ð¬ ĺ +è¿ĺ æĺ¯ +æīĢ ä»¥ +ä¹Ł æĺ¯ +è¿Ļ äºĽ +对 äºİ +åIJ § +缮 åīį +èĩªå·± çļĦ +èĥ½ å¤Ł +å¦Ĥ ä½ķ +æľº æŀĦ +åıª æĺ¯ +ç½ij ç«Ļ +åħ¨ éĿ¢ +为 äºĨ +å¼Ģ åıij +æĸ° éĹ» +éĩij èŀį +ç» § +客 æĪ· +ä¸Ģ èµ· +èĮ ¶ +åħ³ 注 +æ°´ å¹³ +åİĨ åı² +å¢ŀ éķ¿ +é ± +åŁº éĩij +åº Ń +åı ¶ +ä¿ ĥ +éĽ ¨ +æ¶Ī è´¹ +èĪ ¹ +çŁ¥ è¯Ĩ +æĪĺ çķ¥ +ç»ı éªĮ +å³ ° +æĽ ² +èĦ ļ +åĨ ° +å¤ ı +å½ Ĵ +ç¬ Ķ +èĻ ij +çĶ ² +åľ Ī +è¯ Ĺ +é½ IJ +容 æĺĵ +çłĶ åıij +éª ¨ +çº ¸ +è· µ +æĹ § +çķ ¶ +åĪ ¸ +è´ · +åı ¬ +ç§ ĭ +æ¶ ² +è¡Į æĶ¿ +çĮ ® +èĤ ¤ +éĢ IJ +è¶Ĭ æĿ¥ +è¶ĬæĿ¥ è¶Ĭ +æĦı è§ģ +èĪ ŀ +åī Ĥ +æ¶ ī +ç¨ĭ 度 +åħ¬ åħ± +æ¢ ° +æľ « +çº ¯ +åĶ ± +æ´ ² +æĬ ¢ +æ¤ į +å¿ Ļ +ä¼ ° +å¼ ¹ +æ³ ī +æľĢ 大 +è¶ ĭ +å· § +ç¦ ģ +æī ¶ +åį ± +çı ł +çĨ Ł +æĭ ľ +主 ä¹ī +æĿ Ĥ +éĻ Ħ +éģ į +æIJ Ń +æĮ ¯ +å¤ļ å¹´ +æķ ¬ +æij Ħ +çº · +å¼ ĥ +æ¹ ¿ +å¨ ĺ +æ¡ £ +é© ¶ +æľ Ĺ +æ® ĸ +æ¦ ľ +åĵ ¡ +ä¸Ģ ä½ĵ +æŁ¥ çľĭ +ç¹ ģ +æµ ĵ +åħ¬ å®ī +æ½ ľ +è´ ¯ +éª Ĺ +æ IJľ +å· ¡ +è ¬ +é Ĭ +å§Ķ ä¼ļ +æĤ ł +åī © +æı Ń +åŃ£ 度 +ð «ĺ +𬠬 +ä ´ +ð ª +ä½Ĩ æĺ¯ +éĥ½ æĺ¯ +å¹³ åı° +åѦ ä¹ł +åĵģ çīĮ +ä¸ Ķ +è¿Ļ ç§į +æĶ¿ çŃĸ +æĭ ¬ +认 为 +ä¸Ģ èά +æłĩ åĩĨ +æĶ¯ æĮģ +模 å¼ı +åħ³ ç³» +çļĦ æĺ¯ +è¿Ļ ä¸Ģ +ä¸į è¦ģ +çĶ ļ +ç²¾ ç¥ŀ +æĭ ¥ +åĪ© ç͍ +ä¿Ŀ æĬ¤ +ä½ľ ç͍ +èĭ ¥ +åĽ½ åĨħ +ä»ĭ ç»į +ä¸Ģ ä¸ĭ +å·¥ ä¸ļ +缮 æłĩ +æľĢ åIJİ +ä»· å̼ +å° į +éĵ ģ +è° ģ +ç»ĵ æŀĦ +éĽ ª +æĻº èĥ½ +ä¼ł 绣 +ä½ĵ èĤ² +çĶŁ æĢģ +æĭ į +æİ ª +åĨľ ä¸ļ +çī¹ èī² +è§Ħ 模 +æĹ¶ 代 +è¿ĩ ç¨ĭ +éĴ Ī +æĿ ¾ +åĶ IJ +åĮ» çĸĹ +çģ ¯ +åζ éĢł +æł¸ å¿ĥ +ä¸į åı¯ +ç³» åĪĹ +åIJ ī +åľ £ +åĢ ij +ä½ ³ +æĿ¥ çľĭ +æ¯Ķ èµĽ +ä¸ĭ æĿ¥ +åĩº äºĨ +å¹² éĥ¨ +å¾® ä¿¡ +å½ĵ åľ° +åį · +åį« çĶŁ +ä¼ Ł +çĸ« æĥħ +è° · +åĩł 个 +éĺ ´ +çĶŁ çī© +å° ¤ +ä¼ Ĭ +èĤ ¯ +éĿ¢ 积 +åĪĽ éĢł +æı ¡ +åľ Ĩ +æĻ ĵ +æĪIJ äºĨ +åĩ ¡ +çĸ ¾ +ç«ŀ äºī +è® ¨ +主 é¢ĺ +é² ģ +è¿ ª +ä¿ Ħ +æĢ ª +ä¸ ¦ +èĻ ļ +æ½ ® +çĥ § +èĢ ³ +æ± ł +éĢĤ åIJĪ +æł¹ æľ¬ +åĬł 缣 +ç͵ è§Ĩ +æ· · +ç¼ ĺ +çª Ĺ +çĬ ¯ +æĥ ¯ +æĦı ä¹ī +åĬŀ æ³ķ +ä¼ ij +æ» ij +åĭ ĩ +æķ ¢ +å¯ » +è¦ Ĩ +éĢ ĥ +ç»ı çIJĨ +åĿ ı +æ³ ½ +ä¹ ĺ +åĪ º +å± ı +é¡ ¿ +äº ¡ +éĤ Ģ +åħ ¼ +åĭ ¤ +æ® ĭ +æĺ ł +æ¯ķ ä¸ļ +æĪ ª +è· Į +å£ ģ +åı¦ ä¸Ģ +羣 å®ŀ +ç£ ¨ +è¯ ļ +å¿ħ è¦ģ +æģ ĭ +æĩ Ĥ +å¾ Ĵ +è° ĵ +æķ ı +æ ύ +èĥ ¸ +æĭ ¼ +å¦ Ļ +è¯ ¸ +èģ Ĭ +æĤ ī +éº ¼ +åĩ Ń +èĪ Ĵ +æ¶ Ĥ +è¿ ģ +æ² ¿ +å¡ ij +æĽ ¿ +æ¾ ³ +å¿ į +èĢ Ĺ +éľ ¸ +åĩł å¹´ +åĪ Ĭ +èĦ ī +èħ IJ +æ¡ Į +çº ł +æ» ļ +æĤ ² +åĨ Ĵ +å¦ ¹ +çķ ħ +çº µ +æij ĩ +å¤ º +è·¯ ä¸Ĭ +å¿ ½ +èĸ ª +æģ IJ +æĦı æĢĿ +å« Į +æı ´ +æ° § +èĢ Ģ +éĺ » +è½ ¨ +å¹ » +æį ķ +åĿ ¦ +åĵĪ åĵĪ +çĭ IJ +æ» ¨ +è² » +è¿ Ł +人 éĥ½ +ç» ĺ +åı ¹ +çµ IJ +æī ° +æ» ĭ +å¥ ij +åĭ Ł +ç¢ º +ð ¦ +éĽĨ åĽ¢ +æĿ İ +å¼Ģ å±ķ +æıIJ åįĩ +åħ¨ åĽ½ +æ±½ 车 +åѦ æł¡ +æł¹ æį® +è¿Ļ æĺ¯ +åĩº çݰ +éĻ Ī +ç½ Ĺ +èİ· å¾Ĺ +åĪ ĺ +éĶĢ åĶ® +æľª æĿ¥ +éľĢ æ±Ĥ +å®ŀ æĸ½ +åĿļ æĮģ +åħ¨ çIJĥ +éĵ¶ è¡Į +æİ§ åζ +é¡ » +åľ° åĮº +æīĵ éĢł +çļĦ è¯Ŀ +帮 åĬ© +ä½ĵ ç³» +è¾¾ åΰ +è§Ħ åĪĴ +åŁ¹ è®Ń +两 个 +æĬ¥ åijĬ +åľ° æĸ¹ +å®Į åħ¨ +æİ ī +ç»ĵ åIJĪ +宣 ä¼ł +æ³ķ å¾ĭ +èīº æľ¯ +ç͵ å½± +èª ª +ä¸Ģ çĤ¹ +è¶ħ è¿ĩ +ç͵ åŃIJ +æĢĿ æĥ³ +æķĻ åѦ +éĺ¶ æ®µ +åķĨ ä¸ļ +çī© æµģ +åĪĽ ä¸ļ +æĸ¹ æ¡Ī +çݰ 代 +æ¡ ¥ +èIJ½ å®ŀ +带 æĿ¥ +产 çĶŁ +ç§ Ģ +æ³ ° +ä¹ ± +åħ· ä½ĵ +åĸ Ŀ +èĵ Ŀ +å® Ĺ +åįĩ 级 +æ·± åħ¥ +ä¿Ŀ éĻ© +ç®Ģ åįķ +çĹ Ľ +稳 å®ļ +è¾ Ĩ +å±ŀ äºİ +å· Ŀ +ä¸į å°ij +åĴ ¨ +举 西 +å½¢ å¼ı +娱 ä¹IJ +æŃ£ 常 +é¸ ¡ +åħħ åĪĨ +å®ŀ è·µ +éĩĮ éĿ¢ +è· ³ +èĻ İ +æĪIJ éķ¿ +æļ Ĺ +çĿ ¡ +ç½ ª +çIJĨ 念 +æĮ ij +èµĦ æľ¬ +å¤ļ å°ij +ä¸ĭ éĿ¢ +å¸ Ŀ +åħ¬ å¼Ģ +æ¸ IJ +éķ · +å± ĭ +欢 è¿İ +å¿ĥ çIJĨ +çĤ İ +æ¹ ¾ +è® ĵ +éĤ Ħ +ç³ ĸ +ä¹ Į +åĬ ± +çī Ļ +èħ ¿ +å² Ĺ +ä¼ į +æĪIJ åijĺ +åŃ Ķ +å°ı ç¼ĸ +èij £ +æ³ ¡ +åħĪ è¿Ľ +åħ § +åĺ ´ +è´ Ŀ +è » +æIJ ŀ +æ³ Ľ +é¸ Ł +ç½ ² +èĽ ĭ +主 ä»» +缮 çļĦ +ä¹ ı +æ´ ¥ +æĪ ´ +严 æł¼ +çħ ¤ +çĮ « +åĶ ¯ +å° Ĭ +çĶ ľ +åŀ ĥ +åľ ¾ +æĭ Ł +çĦ ¦ +é« Ķ +å® ı +æ© Ł +é© » +æĹ ģ +å½ » +éĥ½ ä¸į +æij © +ä» ĵ +ä¹ ³ +å² ¸ +è° ĭ +大 å¤ļ +çģ Ń +èħ ¾ +æŁ ľ +èĪ į +åħļ çļĦ +å° ĺ +åįģ å¹´ +æĭ Ĵ +è£ ¡ +æŁ Ķ +å¹ ¼ +éĶ ģ +ä¸ĵ 项 +æī İ +驾 é©¶ +ç¢ İ +è¢ ĭ +éĶ ĭ +å£ ® +å° ĸ +ç͵ æ±ł +è¿ Ķ +æ¼ ı +å¾ ª +èı Į +èĥ ĥ +è¾ ħ +éĢ Ĵ +èĥ İ +éĻ ª +å¯ ¿ +å¥ Ķ +çĮ Ľ +çº ¹ +çŁ¥ åIJį +å¿ Ĩ +æ¡ ĥ +æ£ ĭ +éĢ Ĩ +çĤ ¼ +ç± į +çī § +æł· çļĦ +è¾ Ľ +åł Ĩ +å®ŀ åľ¨ +ä¼ ı +å® ¿ +èµ ı +è£ Ĥ +åįĬ å¹´ +åĢ ¾ +满 æĦı +æ¢ ¯ +æĦı åij³ +åŃ ¤ +ç¥ Ŀ +æĻ ¶ +èµ Ķ +åģ ¿ +èĦ Ĥ +ç½ ļ +ç¢ į +æ² ĥ +æ ĵį +å´ ĩ +æļ Ĥ +è· ĥ +æIJ ¬ +å© Ĩ +é ī +éī ´ +åħ´ è¶£ +èIJ¥ ä¸ļ +è® Ĭ +èĦ ı +è¾ Ī +å·ŀ å¸Ĥ +è´« åĽ° +ç© · +ä¸Ń å°ı +æ¼ Ĥ +çĻ Į +èľ ľ +ä¼Ļ ä¼´ +çī µ +æĤ Ł +éĻ · +èµĽ åŃ£ +æ¨ £ +åģ ¶ +æĺ Ĩ +è¢ Ń +æį IJ +èī ° +æ Ĥ¬ +çĶ ¢ +èij ¡ +çĽ Ĺ +å© ´ +å° İ +çº ½ +åĢ ¡ +æī ® +è¨ Ń +æĬ ij +ç¡ ķ +è¾ ĸ +éĥ ģ +è¾ © +éĤ » +çݰ åĩº +è¦ ı +å½ ¹ +éĺ Ķ +åī µ +è¯ ± +æĥ ij +æ· Ģ +é¢ Ī +ä¾ ¦ +æģ ° +æ£Ģ å¯Ł +éĨ « +çĦ¶ æĺ¯ +åĭ ĥ +èĮ « +ä ĵ +𠬸 +ä½ľ 为 +çļĦ 人 +éĤ£ ä¹Ī +ç¾İ åĽ½ +è¿ĺ æľī +æıIJ é«ĺ +èĻ ½ +åħ· æľī +åĮħ æĭ¬ +æĪĸ èĢħ +ä¸į è¿ĩ +ä¸Ĭ æµ· +åĮ» éĻ¢ +èµĦ éĩij +çĶļ èĩ³ +åζ 度 +è§£ åĨ³ +èģĶ ç½ij +ç»§ ç»Ń +建 ç«ĭ +è¿Ľ ä¸ĢæŃ¥ +æĿIJ æĸĻ +ä»Ĭ 天 +å¿ħ é¡» +åIJĦ ç§į +çݰ åľº +ä»ĸ çļĦ +å¢ŀ åĬł +é¢Ĩ åŁŁ +åıĤ ä¸İ +æĮģ ç»Ń +ä¹ĭ ä¸Ģ +çī¹ åĪ« +é± ¼ +åħ± åIJĮ +åĬ ª +çİ ī +人 们 +åħĪ çĶŁ +ä¼ĺ åĬ¿ +ä¿Ŀ æĮģ +ä½ľ åĵģ +çī Ľ +æĪIJ æľ¬ +æĶ¶ åħ¥ +åıĬ æĹ¶ +è´Ł è´£ +æİ¥ åıĹ +èį IJ +åıª è¦ģ +羣 çļĦ +导 èĩ´ +æľº åζ +è¡Į åĬ¨ +æĸ° çļĦ +å®Į åĸĦ +为 ä»Ģä¹Ī +ä¸Ń 央 +æĪIJ ç«ĭ +æĦŁ è§ī +åıĺ åĮĸ +åıĹ åΰ +å¹¶ ä¸į +åŃ Ļ +æĸ½ å·¥ +æĺİ æĺ¾ +è¿ĩ åİ» +åıij æĮ¥ +羣 æŃ£ +åŁº åľ° +æĺİ ç¡® +èĥ ¡ +许 å¤ļ +ä¸Ģ å¹´ +æĸ¹ åIJij +æģ © +缸 ä¿¡ +åľ ³ +详 ç»Ĩ +äºĭ ä¸ļ +çĶŁ åij½ +åĴ¨ 询 +æĸĩ æĺİ +çij ŀ +绿 èī² +èİ « +æĦı è¯Ĩ +æĬķ åħ¥ +åĬł å¿« +æ¢ ħ +ç¿ » +å¼Ģ æĶ¾ +æĻ® éĢļ +åįı ä¼ļ +æĪIJ 绩 +ä» Ļ +å¯ Ĵ +è¯ģ åΏ +认 è¯Ĩ +ä¸ ¹ +大 éĩı +è¿ ħ +åģļ åΰ +设 æĸ½ +è´¸ æĺĵ +èĥ½ æºIJ +æĹ¶ æľŁ +ä¸Ģ 天 +æ²» çIJĨ +åĺ ī +å® ĩ +丰 å¯Į +举 è¡Į +æĪIJ æŀľ +èĤ¯ å®ļ +çĭ Ĺ +åĬ¨ åĬĽ +æ£ ® +åĩł ä¹İ +åĽł ç´ł +æ°ij æĹı +æ´ ŀ +ç½ij åıĭ +åIJĪ çIJĨ +广 大 +æ® Ĭ +æ´ Ľ +æĿ ¯ +èĴ Ļ +ç͍ äºİ +èŀį èµĦ +ç¥ ĸ +æľº 械 +举 åĬŀ +èĩª åĬ¨ +åĬŀ åħ¬ +é» ŀ +éĽ Ħ +å̼ å¾Ĺ +çĮ ª +以 为 +æĺ Į +è·Ŀ 离 +åIJ¸ å¼ķ +ç» ķ +éļ Ĩ +计 ç®Ĺ +éĺŁ ä¼į +大 ä¼ļ +å¼ķ èµ· +çī¹ çĤ¹ +èĥ ¶ +å¹´ è½» +æľ¬ 身 +æľº åħ³ +å®ĺ æĸ¹ +éĥ ij +æµ Ļ +è§Ĵ èī² +èij£ äºĭ +为 主 +æĹł 论 +ä¹ł æĥ¯ +æ¥ ļ +æĭ ĵ +绣 计 +åħ Ħ +广 æ³Ľ +åį Ģ +污 æŁĵ +è« ĭ +èĬĤ 缮 +ä¼ ¦ +è¦Ĩ çĽĸ +èĢ IJ +æī¶ è´« +ç»ı åİĨ +éĩįè¦ģ çļĦ +èĤ¡ 举 +æĭĽ èģĺ +åĽĽ 个 +æĩ ī +èĥ ŀ +æij Ĩ +é«ĺ éĢŁ +éº ¦ +åİŁ åĪĻ +èİ ± +æĽ´ 好 +éķ ľ +åĩ Į +åŀĥ åľ¾ +éĢ ² +çģ ° +éĵ º +äºĭ æķħ +çĶ ĺ +空 æ°Ķ +é¾ Ħ +èı ² +çĵ ¶ +æĺ ¨ +æĹ¥ æĬ¥ +æµ ® +åľ° åĽ¾ +åij Ī +大 åĬĽ +ç» ª +å¸ ħ +æľį åĭĻ +ä¸į éĶĻ +乡 æĿij +å± ¥ +å¹³ æĸ¹ +éĹ ² +æī £ +ç´ł è´¨ +èµ ´ +éģ Ń +èIJ ¨ +èĩª 主 +éĩij å±ŀ +èī¯ å¥½ +两 å¹´ +æ³ ¥ +é¢ ľ +ç²¾ 彩 +ä¸Ń åįİ +æĻ ĭ +ä¹ł è¿ij +ä¹łè¿ij å¹³ +æĪĺ 士 +åģļ çļĦ +éª ij +æ» ´ +çĵ ľ +çīĪ æĿĥ +èĤ ł +æľĥ åĵ¡ +çı į +ç¨ ® +ä »¿ +çī© ä¸ļ +åĢĭ 人 +å¦ » +ä¼ ¸ +æ± Ĺ +æĹ º +çIJĨ æĥ³ +æij ¸ +è¿Ŀ æ³ķ +å®Į æķ´ +åİ ¦ +è¸ ı +æĸ ij +æ¡ Ĥ +ä½ĵ åζ +å¸ « +æĿ Ĩ +æ® ¿ +æ¯ ģ +é¦ Ī +è§Ĵ 度 +æ¬ £ +çĥ ¦ +èĤ º +éĩĩ 访 +æij ĺ +æĮ ¡ +æ· ĺ +åħ» èĢģ +çĤ ¸ +è¿ Ī +åİ ī +åĿ Ĭ +è¾ £ +åĩ Ŀ +æ³ ª +çĸ ı +æİ ĺ +åĥı æĺ¯ +éĽ ķ +ç¼ Ŀ +èį · +æį · +åł ¡ +åı¥ è¯Ŀ +çĸ ¼ +æł ı +éģ µ +ç¢ ³ +å·¥ åķĨ +æIJ º +åĪ ¥ +ä¹ Ļ +æĹ ĭ +æĥ ľ +ä¸Ģ 大 +å±Ĥ 次 +èµ ĸ +æĬ ¬ +æ¨ Ĥ +è¯ ŀ +åħ Ĵ +ç¯ ® +èĤ ĥ +å§ ¿ +æĬ ļ +çĵ · +ç͵ åĬ¨ +æĸ° åĨł +æ¶ µ +ç¢ ij +æ· ® +æĹ ¨ +è¸ ª +æ¸ Ķ +æĦ Ī +åı Ķ +åįĹ çľģ +ç¾ © +å§Ķ 书记 +è² ¸ +æ¶ Į +è« ĸ +èIJ Ħ +æı ı +å¿ § +è¾ ¦ +å¦ Ĩ +æī Ń +åij µ +éģ ¥ +è¨ ± +ä» ĩ +åįģ ä¸ī +åī ² +èª į +èĪ ° +é¢ ĩ +é¥ ± +çĭ ł +é«ĺ çļĦ +çµ ± +æħ İ +é¢ ģ +åIJĪ éĢĤ +æµ ´ +èµ ĭ +æĬ ¼ +å¦ ¥ +éĻ¢ éķ¿ +èĢ ķ +è¾ ¨ +æħ ° +åįģ åĽĽ +æľ µ +èĵ Ħ +æŀ ¢ +å» · +æĤ Ħ +æ¶ ¯ +çŁ © +åŃIJ éĩĮ +çĬ ¹ +å±Ģ éķ¿ +é IJ +å¥ ł +ä¼ļ éķ¿ +æĵ ļ +ä¸į åıĬ +åįģ ä¹Ŀ +æ¬ º +èº º +éĺ IJ +çº Į +è¨ » +åĨ Ĭ +èŃ ĺ +é«ĺ çŃī +èħ º +å¤ ķ +ç» ij +åĶ ¤ +èķ ´ +çķ ľ +æħ ĭ +åı Ļ +åı ĥ +å³ ¡ +人 大 +éħ ¿ +éģ © +å¥ ¢ +åı£ æ°Ķ +éĮ Ħ +é ı +åĭ ĺ +è´ ¿ +éļ ª +é ĭ +éļ ¶ +ð ¥ +𬠣 +ð £ +ð« į +𬠳 +ð« ĵ +ð« Ħ +ð« Ł +𨠱 +ä Ĺ +以 åıĬ +æľī éĻIJ +åij ¢ +åIJ Ĺ +çľĭ åΰ +计 åĪĴ +è¿Ľ åħ¥ +缴 æİ¥ +åĪĨ æŀIJ +åıª æľī +设 å¤ĩ +åħ¶ å®ŀ +åĬł 强 +ä¸Ń çļĦ +ä¿Ŀ éļľ +èĢģ å¸Ī +人 æīį +å¾Ĺ åΰ +é£İ éĻ© +ä¸Ģ ç§į +空 éĹ´ +æĪij åĽ½ +ä¹ĭ åīį +ä¸ĵ å®¶ +æĿ ¨ +æĹ¥ æľ¬ +群 ä¼Ĺ +åıĤ åĬł +æķĪ æŀľ +æľī åħ³ +å®¶ åºŃ +åĮº åŁŁ +åĬª åĬĽ +éļı çĿĢ +æĹł æ³ķ +交 æµģ +è¡Į 为 +æ£Ģ æŁ¥ +æľŁ éĹ´ +å¦Ĥ æŃ¤ +èĤ¡ 份 +å½ĵ æĹ¶ +è£ħ å¤ĩ +åĩĨ å¤ĩ +éħĴ åºĹ +è¿IJ åĬ¨ +æıIJ åĩº +å·¦ åı³ +æİª æĸ½ +é£Ł åĵģ +æ¶Īè´¹ èĢħ +åѦ éĻ¢ +æĮĩ 导 +è¿IJ èIJ¥ +éĩį 大 +åĨľ æĿij +éĢł æĪIJ +æĶ¿ æ²» +éĴΠ坹 +æŃ£ å¼ı +åıĸ å¾Ĺ +éĤ£ 个 +éĽĨ ä¸Ń +åıª èĥ½ +å¿« éĢŁ +身 ä½ĵ +åħļ åijĺ +èģĶ åIJĪ +åĬĽ éĩı +éĥ½ æľī +æ ħ§ +å¡ Ķ +åĪ« 人 +表 çݰ +æķħ äºĭ +ä¸Ģ åĪĩ +å° ĩ +èµĦ æĸĻ +åŁ¹ åħ» +éĺħ 读 +æľī 人 +èIJ¥ éĶĢ +çĽij çĿ£ +çݯ ä¿Ŀ +èĢĥ èĻij +æ·± åľ³ +严 éĩį +èĮĥ åĽ´ +å§Ķ åijĺ +çĽij 管 +ä¸ī 个 +è£ħ ä¿® +åħ¬ éĩĮ +åĪĨ åĪ« +çIJĨ è§£ +éŁ © +åĬł å·¥ +认 羣 +ä¸į 好 +åİ» å¹´ +éĻį ä½İ +æľº ä¼ļ +åįı è®® +符 åIJĪ +å¢ŀ 强 +æĬĢ èĥ½ +é¦ĸ åħĪ +ç§ ¦ +ä¸ ģ +å° ¾ +æľī äºĨ +åľ° 产 +æ¸ ł +æĸ¹ 便 +ç§» åĬ¨ +éĢŁ 度 +å°¤ åħ¶ +éĢļ çŁ¥ +åĿ Ľ +éģ¿ åħį +æģ ¢ +è´ ¡ +èģĮ å·¥ +å®ŀ åĬĽ +æĺ¯ä¸Ģ ç§į +åIJ¯ åĬ¨ +çĸ¾ çĹħ +æĿ¥ äºĨ +缸 对 +çݰ å®ŀ +èŀį åIJĪ +åIJĮ æł· +åħ¬ åijĬ +çī¹ æ®Ĭ +ç´ « +ä¸ĭ åİ» +ä¼ł æĴŃ +æľĢ 好 +ä¼ĺ è´¨ +æ² Ĵ +æĮ º +æĹ ¦ +è¯ º +ä¸Ģ åIJį +éģĵ è·¯ +示 èĮĥ +è¿ĩ æĿ¥ +åIJĮ åѦ +é¼ ĵ +æĿ Ń +æľ¬ 次 +åIJĮ æĦı +ä¸ĸ 纪 +ç¾ Ĭ +æ¬ ² +å·¥ èīº +çĵ ¦ +人 士 +æľī æīĢ +ä»İ äºĭ +æľī å¾Īå¤ļ +ä¸į äºĨ +å²Ĺ ä½į +åıĺ å¾Ĺ +åĬ³ åĬ¨ +å¤Ħ äºİ +å¹³ åĿĩ +å½¢ 象 +å¡ ŀ +åħ± 享 +çĿ Ľ +åĪ© 润 +æŃ£ æĺ¯ +å¾Ģ å¾Ģ +缸 æ¯Ķ +æ¨ ª +åĪ · +æµĻ æ±Ł +大 éĥ¨åĪĨ +å¤ļ 个 +æĤ¨ çļĦ +ç͵ åķĨ +å¾® åįļ +å§ĭ ç»Ī +çĬ¯ 罪 +æĺ¯ åľ¨ +ç»Ħ åIJĪ +åİŁ æĿ¥ +æ¸ħ æ¥ļ +åIJĦ åľ° +æĦŁ åıĹ +å½ĵ ä¸Ń +è¶ĭ åĬ¿ +æĻ¯ åĮº +羣 æĺ¯ +ä¾Ľ åºĶ +转 åŀĭ +çĭ Ĥ +èĨ ľ +èĭ Ĺ +å¿ ł +å¾Ī 大 +èĤ¡ æĿĥ +ç¾İ åħĥ +æİĴ åIJį +åĬ¨ çī© +éĶ ħ +å¢ ¨ +主 å¸Ń +å¾Ī 好 +ç»Ŀ 对 +æĿ ľ +转 è½½ +çĴ ĥ +æĿij æ°ij +åIJ ¨ +åĽŃ åĮº +é«ĺ 度 +çī© è´¨ +è¾ ī +æĹ¥ 常 +æı Ĵ +ä¸ī å¹´ +ä½ĵ çݰ +æīį æĺ¯ +代 çIJĨ +ä¸į 管 +æģ Ĵ +åľ° ä½į +ç² ® +èĸ Ħ +æĺİ çϽ +ä¸Ģ èĩ´ +æĽ ¼ +åĵ Ń +åĩ ¤ +åĬ ² +æķ Į +æĪĺ æĸĹ +主 ä½ĵ +åħ¬ å¸ĥ +åıĤ èĢĥ +èĪª 空 +å¯ º +åѦ ä¼ļ +åıį æĺł +ç¾İ 丽 +太 éĺ³ +建 æĪIJ +æħ¢ æħ¢ +åIJĦ 个 +éĤ ¦ +ç»Ħ æĪIJ +ä¸ī 大 +éĶ ¦ +大å¤ļ æķ° +æ¦Ĥ 念 +éŃ Ĥ +åħ¬ çĽĬ +èį Ĵ +身 份 +æ·± åĪ» +åħ © +ç»ı åħ¸ +åIJĦ 项 +èĻ ķ +è¿Ľ æŃ¥ +åįģ äºĮ +æī§ æ³ķ +æĥ³ åΰ +æĦŁ æŁĵ +åķĨ åĬ¡ +å°ı ç»Ħ +èĶ ¬ +çıŃ åŃIJ +åIJĮ å¿Ĺ +éĿ¢ 临 +çĤ Ĵ +å¤ļ ç§į +è§Ĥ çĤ¹ +åĵª éĩĮ +å° Ŀ +å§ Ĩ +èħ ¹ +åŁİ åĮº +太 å¤ļ +çĹħ æ¯Ĵ +åľ¨ äºİ +æīĢ è°ĵ +æĻ ° +æŀ Ŀ +æĭ ĸ +å® ħ +æķ´ æ²» +ä½ı æĪ¿ +åģ · +çĨ Ĭ +èµ ģ +æ° Ľ +æł¼ å±Ģ +åŁºç¡Ģ ä¸Ĭ +èĥ Ĩ +åħ ½ +鼶 åĶ® +åĿ ¡ +女 åŃ© +æĴ ŀ +åħ¨ åĬĽ +åĴ ĸ +èĤ © +çľ ī +èĩ³ äºİ +åħļ ç»Ħ +ä¸Ģ ä»¶ +æĭ Ĩ +äºĭ å®ŀ +åĤ ³ +æ¹ ĺ +ç¶² ç«Ļ +循 çݯ +åIJĮ æ¯Ķ +æĭ Ķ +åĮ» èᝠ+åħ» æ®ĸ +åĽº å®ļ +å®ŀéĻħ ä¸Ĭ +è®° å¾Ĺ +åĪ© äºİ +æĤ ¦ +æĭ ³ +èĤ Ŀ +æķĪ çĽĬ +è© ² +æ°ij 主 +çĹĩ çĬ¶ +é¢ ¨ +å¹¼ åĦ¿ +å§ ij +æĪ Ĵ +ä¸ĭ çļĦ +æ¸ ¡ +å¹´ åºķ +è®° å¿Ĩ +åIJ IJ +大 å¹ħ +å¾ ½ +åħ¬ ä¼Ĺ +ä¿¡ å¿ĥ +çİ Ľ +ä¼ļ ä¸Ĭ +ä¹ Ķ +æijĦ å½± +æ£ĭ çīĮ +éĻ ķ +åºĶ æĢ¥ +æĶ¶ è´¹ +æİ§ èĤ¡ +仪 å¼ı +çŀ ¬ +æīĢ åľ¨ +ç¢ ° +å§ ĵ +é¡ Į +æĶ¯ éĥ¨ +使 åij½ +çĤ ī +å¯ Ħ +ç¿ ¼ +åľ° ä¸ĭ +è¾ ŀ +ä¿ ± +主 æĮģ +è´§ å¸ģ +æģ ¨ +èĤ Į +çĽ Ī +éĶ » +å¿Ĺ æĦ¿ +ç±» ä¼¼ +æĮ ĸ +éĢ » +ç¸ ½ +纪 念 +åķ ¥ +å¼ ¯ +åIJį åŃĹ +åģ¥ èº« +çļĦ å¿ĥ +é© ± +èĥĮ åIJİ +æ³ķ å¸Ī +ç² Ĵ +èĥ½ éĩı +è¾ ° +èī ³ +å½ ¼ +段 æĹ¶éĹ´ +åIJĪ æ³ķ +æĵ ¦ +ç¾ ½ +åİ ¨ +æĪij 说 +äºĭ åĬ¡ +åĩł 天 +åħ ģ +ç¼ ´ +åį ĵ +两 ç§į +çĭ¬ çī¹ +å¸ ¶ +éĴ » +æĥ © +é¢Ĩ åħĪ +è¶³ å¤Ł +å£ ³ +æĦıåij³ çĿĢ +åĪĨ å¸ĥ +ä¹ ĥ +éģ ĭ +ä½ © +è° ± +çģ £ +èį ¡ +è´¯ å½» +å¹ ¾ +ç£ ģ +åħ¸ åŀĭ +åī ĩ +åĨ » +æ¬ ł +ä¸į ä¹ħ +æµ ¦ +éŃ ħ +å¼Ģ äºĨ +使ç͍ èĢħ +è¿Ļ 款 +å° Ī +èĦ± è´« +æĶ» åĿļ +ç®Ĺ æĺ¯ +ç¨ Ģ +æĹł 人 +åł µ +å¥ ı +éĥ½ å¸Ĥ +åı¯ è§ģ +ä¸į åĩº +æ ·» +äº ı +ç¾İ 好 +èĥ ĸ +éŁ µ +æłĩ å¿Ĺ +èĬĤ èĥ½ +æĬ « +å° º +å¯ ¸ +ä¸Ģ 代 +é¢ Ĺ +èĢ ¶ +èĴ ¸ +åĸ ® +æ »¿ +çĮ ľ +æµ Ĩ +åŁ ĥ +åįĥ ä¸ĩ +èµ Į +èģ ² +ä½ľ é£İ +è³ ª +å¯ ¨ +å¹´ 人 +åį° è±¡ +æ¡ ¶ +æĴ ¤ +åįģ äºĶ +æ¯ ħ +æ² ª +åĽ½ æľī +大éĩı çļĦ +å¾ ¡ +å¯ ĵ +è¦ ĸ +æ¼Ĥ 亮 +çľ ł +ç ĤŃ +é» İ +èĻ ¹ +åĪ© äºļ +èŃ ī +æµ ı +åįģ åħ« +ä¸ ¢ +è¾ ½ +æľīä¸Ģ äºĽ +æħ Ī +åģľ è½¦ +å® ł +è§£ æĶ¾ +æľī å¤ļ +éĤ Ĭ +常 è§ģ +æĬ ¹ +çº ¤ +è¦ ª +æ¡ Ĩ +èİ ŀ +æ°§ åĮĸ +è¿Ļ ä»¶ +åĩ ° +æŁ ´ +åıij ç͵ +é¼ ł +转 åĮĸ +å¨ ĥ +æĮ ¤ +ç½ © +å¯Ĩ åĪĩ +æĪij ä¸į +é«ĺ æĸ° +ä¸Ģ ç¯ĩ +è¿Ľ ç¨ĭ +è¡ ° +è¿ĺ ä¸į +ç ħĮ +æĸ° åįİ +èĤ ¿ +æ» © +ä¸Ģ æµģ +è¯ Ī +å®ŀ ä½ĵ +å¤ĸ åĽ½ +èº ² +èµ ł +è¦ º +æ¢ Ŀ +ä¸į è§ģ +è¨ Ĭ +åĮ ¹ +åį µ +çĩ ¥ +æħ ķ +é½ ¿ +å® ´ +é¥ ¼ +èij¡ èIJĦ +å°ı å¿ĥ +æģ ¼ +éĻ Į +æĺ Ĥ +åĥ ¹ +èĬ Ŀ +æ¯ı 个人 +åīį æıIJ +ä½ĵ ä¼ļ +æ¨ Ļ +æIJľ çĭIJ +对 åħ¶ +ä¸ § +èľ Ĥ +æµ ¸ +èª ¿ +åĿ ª +é¢ ĸ +åIJį 为 +ç¬ ¼ +èĪ Į +æľ¬ 书 +èģ ¯ +çº º +ç®Ģ 缴 +éĽ ¢ +ç¾İ çļĦ +éļ ¨ +é«ĺ å³° +è¿Ļ å®¶ +å Ĥ¬ +å° ¸ +ç¡ķ 士 +èŃ · +è° ¨ +æĺ ı +æĶ¿ åįı +è¡ Ķ +ç¿ Ĵ +åľ Ĵ +åĽ½ æ°ij +主 è§Ĵ +è£ ķ +ä¼ ª +åº ŀ +æ°ij èIJ¥ +æĥ § +ç§ĺ 书 +çĹ ķ +çϾ åĪĨ +æº ¶ +æĹł çĸij +çļĦ çľ¼ +æĵ İ +ä¼Ł 大 +å½ ° +åħ¬å®ī å±Ģ +ç³ ķ +å¼ ¥ +åĤ Ļ +ä¹ ¾ +毫 ä¸į +注 æĺİ +åī¯ æĢ» +æĦ ī +æķ ¦ +é¦ ¨ +æĶ Ģ +éĢ Ŀ +åı¯ éĿł +å¤ ¸ +åľ ĺ +éĿ¢ ä¸Ĭ +æĬ ĸ +èĦ Ĩ +é© ° +ä¼ IJ +å¦ ¨ +å®ļ äºĨ +ç³ Ĭ +æŃ ¡ +éĥ¨ éķ¿ +ç§ ī +èĪ Ĩ +åĪij äºĭ +åIJ µ +æ¤ Ĵ +è¡ ĵ +è± « +èı © +åŃ µ +é¥ ² +å°± 好 +åł ª +ä¸ī è§Ĵ +åľº æ¯ĶèµĽ +ä¸į åģľ +æĵ ħ +åħ¨ æĸĩ +æ³ ģ +åѦ ä½į +æ± ° +éł ĺ +åı ł +éļ Ľ +å¸ IJ +çľĭ åĩº +åĮ ł +å±Ģ éĿ¢ +æ³ Į +è° Ĭ +åIJĮ æľŁ +æĬķ æłĩ +å¥ ´ +æĿ¥çľĭ çľĭ +èĦ ¾ +èŀ º +æŃ ī +çĽ ¯ +ç¨İ åĬ¡ +å» Ĭ +æİ © +æħ ¨ +çĽ ¼ +èĬ Ĵ +è® Ģ +æĮ £ +èĮ ħ +æĸ ¥ +æ¤ ħ +åΰ æĿ¥ +èijĹ ä½ľ +çĭ ± +äºĮ æīĭ +ä»İ æĿ¥ +çĸ ² +åºĬ ä¸Ĭ +æĸ° 浪 +æ³ Ħ +å¢ŀ å̼ +ä¸ Ľ +æļ ij +ä»İ ä¸ļ +æ· ĭ +å¤ļ æł· +æľ ´ +份 é¢Ŀ +æŀ £ +西 çľģ +æľ¬ è´¨ +æ·± æ·± +èī ĩ +ç» µ +产 å̼ +æ¼ ł +èħ » +çŃ Ľ +åİ Į +æģ Ń +å«Į çĸij +æĪ ¶ +æ» ŀ +èĨ Ģ +åĬ £ +座 è°Ī +常 æĢģ +çļĦ æĥħ +è¦ ½ +å¯ Ĥ +åĮ Ĩ +èĩ º +é¡ ¯ +çķ ı +éģ £ +åį ľ +çŃī å¥ĸ +è² ¬ +æº ¯ +é İ +çĤ¹ 头 +èĵ ¬ +æ± º +éħ ¬ +éģ Ĭ +è³ ¼ +註 åĨĬ +æľ¬ æĬ¥ +çµ ķ +æ´» æĢ§ +åħ ij +éĮ ¯ +åĨ ¶ +åĸ » +æº ĸ +èĤ ¢ +æº ĥ +æĹ ¬ +åī Ĭ +çIJĨ äºĭ +å± ł +æ² § +èļ Ģ +鼻 åŃIJ +为 æŃ¢ +常 å§Ķ +çµ Ĥ +éĬ · +çĭ Ģ +ä¾ £ +èĥ Ģ +èŃ ° +ç͍ 车 +åĻ ª +æŃ · +åį Ķ +åĪ ¹ +竣 æĺ¯ +é© Ĺ +èIJ Ŀ +çĻ « +çĹ « +æŃ § +å¼ Ĭ +åª ½ +çı Ĭ +è¡ · +éľ ī +åŁº çĿ£ +éļ ± +æ° ¨ +ç» ¸ +å°¼ æĸ¯ +çĥ ĺ +æľŁ åĨħ +è° ħ +éĽ ĩ +éļ Ļ +å ĸī +åī ¥ +çĹ ĺ +æĮ ½ +çĵ £ +æ¹ Ľ +æ¨ ± +æ¾ İ +æ¹ ĥ +åĨ¬ 奥 +æ£ µ +å® ° +åŀ Ĵ +æ§ ĭ +ä¾ Ī +èĮ Ħ +åĺ ¿ +èı ĩ +ç ĻĤ +åĬ ĥ +é į +èĶ ½ +çŀ Ń +æķ ŀ +ä¹ ĸ +éŁ § +è¾ ľ +æĩ Ī +ä½ £ +çŀ » +åŁ Ķ +èĪ ħ +å®ŀ äºĭ +é ¨ +å§ ¥ +çµ ¡ +åĺ » +çķ ¢ +æ²ĥ å°Ķ +è¿ Ħ +èĤ ĩ +æħ ij +ã § +ä ı +ð ł +ð¬ ĩ +ð« Ń +ð« IJ +ã ³ +© ½ +ð« ł +ã Ľ +ð¬ į +é ¿ +ð¬ Ĵ +ã Ļ +𬠤 +ð ¬´ +ð« ĸ +ð ¤ +ã ¬ +ä ² +ð« Ķ +ð« ļ +è¦ģ æ±Ĥ +ä¸Ģ äºĽ +å®ŀ çݰ +èĢĮ ä¸Ķ +åĽł æŃ¤ +çͱ äºİ +åħ³ äºİ +çĦ¶ åIJİ +æİ¨ åĬ¨ +ä¸Ģ æł· +æĮī çħ§ +è¿Ļæł· çļĦ +å½¢ æĪIJ +æľī äºĽ +æĽ´ åĬł +ç»ı è¿ĩ +建 è®® +æ²» çĸĹ +ä½ł 们 +æīį èĥ½ +ä¿ĥ è¿Ľ +åijĺ å·¥ +ä½ĵ éªĮ +èĪ ĩ +åģļ 好 +ä¿Ŀ è¯ģ +æķ´ 个 +æĺ¯ ä¸Ģ个 +éĩĩ ç͍ +çIJĨ 论 +æ¯Ķ å¦Ĥ +ä¸Ĭ çļĦ +æİ¨ èįIJ +çͳ 请 +天 空 +éĥ¨ èIJ½ +åįģ åĪĨ +æĿ¥ èĩª +ä¹ĭ éĹ´ +è°ĥ æķ´ +æ¯ı 天 +è°ĥ æŁ¥ +æĤ£ èĢħ +è¿ĩç¨ĭ ä¸Ń +é¦Ļ 港 +广 åijĬ +éĿ¢ 对 +满 è¶³ +éķ¿ æľŁ +è§Ħ èĮĥ +æķ´ ä½ĵ +æĶ¹ åıĺ +æĻº æħ§ +å¦Ī å¦Ī +å¦Ĥ ä»Ĭ +åIJĪ åIJĮ +éĥ½ ä¼ļ +åĦ¿ ç«¥ +åĩı å°ij +éŁ³ ä¹IJ +ç»ı 常 +ä¸Ĭ å¸Ĥ +ä¼ĺ ç§Ģ +çļĦ éĩįè¦ģ +ä¸Ģ æĿ¡ +æµ· å¤ĸ +åı¦ å¤ĸ +ä¸Ģ å®¶ +åİĭ åĬĽ +大 åŀĭ +çľĭ çĿĢ +åĪ Ģ +幸 ç¦ı +æİ¨ 广 +åIJ Ľ +å¾ IJ +æī¾ åΰ +äºİ æĺ¯ +èĩª 身 +ä¸Ģ ä½į +åľŁ åľ° +åĬł åħ¥ +æİ¢ ç´¢ +æ¢ ģ +主 åĬ¨ +å°± ä¸ļ +女 æĢ§ +çªģ çł´ +ä¸įåIJĮ çļĦ +è¿IJ è¾ĵ +èĩª çͱ +å±ħ æ°ij +æŃ¤ 次 +çļĦ æĹ¶éĹ´ +å®¶ éķ¿ +ä¸Ģ个 人 +æ£Ģ æµĭ +åĨħ éĥ¨ +广 å·ŀ +缴 æĴŃ +ä»İ èĢĮ +è´· 款 +åı¬ å¼Ģ +æĶ¹ éĢł +人 çĶŁ +å±ķ 示 +æ¯ı å¹´ +女 人 +çļĦ æĸ¹å¼ı +æķĪ çİĩ +å±± 举 +æ¸ł éģĵ +ä¼¼ ä¹İ +æ¡Ī ä»¶ +åĪ© çĽĬ +çľĭ çľĭ +å¿ĥ éĩĮ +ç»´ æĬ¤ +å®Ŀ å®Ŀ +ç½ij ä¸Ĭ +论 åĿĽ +å°± åı¯ä»¥ +ä¸į è¶³ +æģ¢ å¤į +å¸ĥ å±Ģ +è´¡ çĮ® +ä¸ĭ éĻį +æİĮ æı¡ +çļ® èĤ¤ +å·¥ åħ· +éĩį åºĨ +åĵģ è´¨ +æİ¨ åĩº +çĶ· 人 +æī¿ æĭħ +çªģ åĩº +èĢĮ è¨Ģ +æ² Ł +åįı è°ĥ +æĺ¯ ä»Ģä¹Ī +æ± ¤ +æĴ ij +çĭ¬ ç«ĭ +çݯ èĬĤ +æī© 大 +æ´ ª +æĿ ° +çĽ IJ +ä» ģ +æ¶ī åıĬ +èĢģ 人 +åį³ ä½¿ +åįĹ äº¬ +éħį åIJĪ +é¬ ¼ +çζ 亲 +ç½Ĺ æĸ¯ +å°ı åĮº +æķĻ æİĪ +åĨ³ çŃĸ +é¢Ħ 计 +æľ¬ 人 +ä¼ ¯ +ç« ¹ +åΰ åºķ +å¸Ĥ æ°ij +åĩº åı£ +éĩĩ è´Ń +æĢ» ç»ĵ +æŃ¦ æ±ī +åĬł 大 +广 举 +æµģ ç¨ĭ +人 åı£ +å¦Ĥæŀľ ä½ł +åĩº åİ» +åĩ ī +åĨľ æ°ij +çݰ 象 +åĬĽ 度 +ç»Ļ äºĪ +åħļ å§Ķ +è¯Ń è¨Ģ +线 ä¸Ĭ +æĢİ æł· +åĦ¿ åŃIJ +ç¡® å®ŀ +ä¹ĭ å¤ĸ +éĥ½ åľ¨ +èī ¾ +çļĦ æĥħåĨµ +éĩĮ çļĦ +åĽ´ ç»ķ +æĽ´å¤ļ çļĦ +ä¾Ŀ æ³ķ +åħ¬ åĽŃ +å®¶ éĩĮ +æ¯į 亲 +ä¸į åĨį +èĭ ¹ +æ³ķ éĻ¢ +飩 åĽ½ +缸 å½ĵ +ä¸į çŁ¥ +è¯Ħ ä¼° +ä¸į ç͍ +顺 åĪ© +éĩį è§Ĩ +è´¢ åĬ¡ +ä»ĸ åĢij +åıij è¡Į +ä¸ĵ éŨ +åħ· å¤ĩ +å¹¶ ä¸įæĺ¯ +è¶³ çIJĥ +é ŀĭ +åıij 表 +æ°¸ è¿ľ +èIJ¥ åħ» +éħį å¥Ĺ +æķ´ åIJĪ +è´ º +åĽŀ çŃĶ +æĶ¶ çĽĬ +ä¹Ł 许 +è» Ĭ +æİ¥ 触 +æĶ» åĩ» +åĽĽ å·Ŀ +æĢ§ èĥ½ +åĽŀ åΰ +èħ ° +ä¹Ł 没æľī +å¼ Ħ +设 ç«ĭ +éĺ² æİ§ +æĬĢ å·§ +éĢļ 常 +è´¢ æĶ¿ +éĥ¨ ç½² +åľº æĻ¯ +æ±Ł èĭı +表 è¾¾ +åĸ · +女 åĦ¿ +èĪ ¶ +çµ ¦ +ä¼ļ åijĺ +æĪĸ 许 +äº © +举 æĸ¹ +天 æ´¥ +è¿ij å¹´ +çľĭ æĿ¥ +æ¯Ķ ä¾ĭ +å² © +éĵ ľ +çİ » +å®ŀ éªĮ +æĢĿ ç»´ +æĭħ å¿ĥ +æ² Ī +身 è¾¹ +æ·± åĮĸ +ç²¾ åĩĨ +ç§ģ æľį +æ¶Ī éĺ² +åİ» äºĨ +ç»Ĩ èĥŀ +çIJĥ éĺŁ +æĺİ æĺŁ +é£Ł çī© +å¾Ī å¿« +让 ä½ł +ä¿¡ ç͍ +å͝ ä¸Ģ +åħ¶ å®ĥ +çŃī æĸ¹éĿ¢ +å¾ĭ å¸Ī +æŃ» 亡 +æ Ł³ +ä¸Ģ æī¹ +ä¸Ĭ 涨 +æľº åľº +å½¢ åĬ¿ +æĦ¿ æĦı +éĽĨ ä½ĵ +æĸ° åŀĭ +æį٠失 +æĽ ¸ +ä¸ĭ åįĪ +æ¯ı 次 +æĪIJ å°± +åħ¬ è·¯ +èĻ « +åĴ ± +西 å®ī +æľĢ ä½³ +ç§ij çłĶ +å¤į æĿĤ +æľº åύ +çα æĥħ +çħ§ çīĩ +å¹´ é¾Ħ +è³ĩ æĸĻ +ç² Ĺ +åĩĨ ç¡® +åĬł ä¸Ĭ +åĩº çīĪ +è° IJ +å®¶ å±ħ +èĥĮ æĻ¯ +ä¸Ģ 线 +äºĭ 项 +åĬ¨ ä½ľ +ç¥ ¥ +æĢ» ä½ĵ +æĪ¿ åŃIJ +ä¹Ł å°±æĺ¯ +大 æ¦Ĥ +é«ĺ æķĪ +åIJ ¹ +æİ ĪæĿĥ +éĻĦ è¿ij +æ¡Ī ä¾ĭ +éĹ ¹ +çΏ çΏ +彩 票 +æĢ Ĵ +举 æĬ¥ +æĻ® éģį +çķĻ ä¸ĭ +è¡£ æľį +æĹłè®º æĺ¯ +åħħ 满 +æ·± 度 +æ¡ ij +æĪª èĩ³ +带æĿ¥ çļĦ +éĻ µ +æĦŁ æĥħ +èµ ļ +åĵª äºĽ +æķ´ æĶ¹ +æĪIJ çĨŁ +å¨ ľ +é¼ » +çŁ Ľ +çĽ ¾ +好 好 +第 åĽĽ +åĨł åĨĽ +è´¢ å¯Į +æľĢ 好çļĦ +车 åŀĭ +éĸ Ģ +åį³ å°Ĩ +åĪĨ 为 +éĿĴ å²Ľ +纷 纷 +ä»Ĭ æĹ¥ +å¹³ è¡¡ +å¹³æĸ¹ ç±³ +éĤ£ ç§į +åĩº çĶŁ +éĿĴ æĺ¥ +人 群 +人 å·¥ +ä¹ĭ ä¸ĭ +æ¹ĸ åĮĹ +åľ¨ æŃ¤ +åįļ 士 +æĹ¶ åĪ» +æ²³ åĮĹ +æĶ¾ å¼ĥ +éĢļ éģĵ +森 æŀĹ +çĸ Ĩ +æķ ¸ +èĬ ³ +æīĵ åĩ» +æĽ ¹ +åĮĸ åѦ +æĥ³ 象 +ä¸ĩ 人 +è´¢ ç»ı +åħĥ ç´ł +ä¼ļ 计 +åħ¨ ä½ĵ +æĦ Ľ +é«ĺ ä¸Ń +æľº éģĩ +声 éŁ³ +æĹħ è¡Į +æµ © +æŁ ± +å°ij å¹´ +åĽ½ å¤ĸ +èijĹ åIJį +çĶŁ åŃĺ +å§ ľ +带 é¢Ĩ +é¢ľ èī² +ä¸Ĭ ä¸ĭ +产ä¸ļ éĵ¾ +æĽ´ 好çļĦ +å² Ń +ä¼ĺ æĥł +便 æĺ¯ +åħ§ 容 +ä¸Ģ åıª +çIJ ´ +梦 æĥ³ +ç§Ł èµģ +å¼Ģ åIJ¯ +è´Ń çī© +åĮħ åIJ« +åĪ© çİĩ +èµ· äºĨ +æľī åĬĽ +éĤ£ éĩĮ +审 æī¹ +对 æīĭ +çݰ éĩij +天 çĦ¶ +çĽ Ĵ +çĪ ½ +å¿ħ çĦ¶ +åĮĸ å·¥ +ä¸ĵ åĪ© +åķ ¡ +å¼Ģ å¿ĥ +人 ä½ĵ +éģĵ 士 +æĢģ 度 +空 è°ĥ +æĭĽ åķĨ +å§ » +第 äºĶ +æ£ Ĵ +ä¸Ģ ç³»åĪĹ +åį± æľº +转 åıĺ +åľº æīĢ +é¸ £ +æĪ¿ éĹ´ +éĢ ¼ +è¯ķ çĤ¹ +对 å¤ĸ +åĩº åı° +åľ¨ è¿Ļ +åİĤ å®¶ +å·¨ 大 +ç®Ģ ä»ĭ +çľĭ äºĨ +åħļ 建 +æĮĩ æĮ¥ +çŁ³ æ²¹ +ä¸į åı¯èĥ½ +èİ ² +ä¸į 太 +åĪĽ æĦı +第 ä¸Ģ个 +è´µ å·ŀ +è¿ĩ äºĨ +æľ¬ æĿ¥ +éģĵ å¾· +çŃĶ æ¡Ī +éĻ ¶ +ä¸Ģ è·¯ +èĤ ĸ +æ¸ħ æ´ģ +æľī æľº +åIJį åįķ +æĿ ± +åij¼ åIJ¸ +ä¸ Ī +ç¦ı 建 +è¯ķ éªĮ +å¼ķ åıij +ä¹Ł 没 +ä¸į ä½ı +çĨŁ æĤī +èIJ ¬ +ä¸į èī¯ +çł ĸ +èĩ´ åĬĽ +çѾ 订 +åIJ Ĭ +ä¾ ¯ +çĺ ¦ +å§ij å¨ĺ +æĸ ¤ +妻 åŃIJ +æĺ¥ èĬĤ +çĪ ¬ +æĽ Ŀ +çĥŃ æĥħ +éķ¿ æ²Ļ +èIJ¥ éĢł +éħ · +éĵ Ŀ +åŁºæľ¬ ä¸Ĭ +åij¨ åĽ´ +ä»Ģ 麼 +认 åı¯ +åĪĨ åŃIJ +ä¸Ģ æĸ¹éĿ¢ +è½ ´ +å¼ · +马 ä¸Ĭ +éĽ ¾ +èĩ £ +å° ¿ +çĶŁ æĦı +å®ī å¾½ +ç¥ŀ ç»ı +åĩº å¸Ń +èᝠåĵģ +çIJĨ çͱ +åįı åIJĮ +æµģ åĬ¨ +åıij åĬ¨ +åĿļ å®ļ +表 æĺİ +åIJİ éĿ¢ +ä¹ī åĬ¡ +å¦ ĸ +æľī åı¯èĥ½ +å¹´è½» 人 +大 éĻĨ +å² ³ +ä¸į èµ· +çŀ¬ éĹ´ +ä¸įå¾Ĺ ä¸į +çѾ 约 +åIJĪ æł¼ +åħļ æĶ¯éĥ¨ +æµİ åįĹ +便 åĪ© +éļı æĹ¶ +å¥ ī +ç§° 为 +产 æĿĥ +åIJ ķ +çĽ Ĩ +课 åłĤ +ç· ļ +æ£ ī +线 ä¸ĭ +èĩª è¡Į +举 æİª +åݦ éŨ +èĩª ä¿¡ +å½± è§Ĩ +ä» Ķ +çĶŁæ´» ä¸Ń +æĿĥ çĽĬ +çϽ èī² +å°± ä¸į +è¿Ľ å±ķ +æ¯ı æĹ¥ +ä¾Ľ ç»Ļ +æĿĥ åĪ© +æĹł æķ° +çIJĨ è´¢ +ä¾Ŀ æĹ§ +ä¸Ĭ åįĪ +è¯Ĩ åĪ« +çĽĪ åĪ© +çł Ĥ +许 åı¯ +åIJĮ äºĭ +åĺ Ľ +éģ ¸ +çĿĢ åĬĽ +éŨ åı£ +ä¸į å¤ļ +åħ¶ 次 +ç¢ § +çī© çIJĨ +åĨħ å¿ĥ +çϾ å§ĵ +æĢ» 绣 +å¹² åĩĢ +积 ç´¯ +åıį é¦Ī +æłij ç«ĭ +社 交 +ç§ © +åįģ ä¸Ģ +éĤ ĵ +驱 åĬ¨ +å±ķ è§Ī +èĪĴ éĢĤ +åŁº åĽł +å·® å¼Ĥ +转 让 +å°ı å§IJ +æł· åŃIJ +ç¿ Ķ +é«ĺ åħ´ +å½±åĵį åĬĽ +æīĭ ç»Ń +缸 åIJĮ +缸 åºĶ +æĻ Ĵ +è§ Ģ +å¸Ĥ å§Ķ +èĬ ¯ +å±ķ çݰ +åľ° çIJĥ +éĤ ª +ä¸Ģå®ļ çļĦ +åħģ 许 +ä¿¡ ä»» +æī ij +éĻ¢ æł¡ +ç®Ģ ç§° +åģļ æ³ķ +ä¹ĭ è·¯ +æĹĹ ä¸ĭ +èħ Ķ +æ¶Ī 失 +ä¸ĸçķĮ ä¸Ĭ +åŁİ 乡 +èĪŀ åı° +å¾Ī 大çļĦ +绣 çѹ +åħ¬ å¹³ +èĤ ¾ +çļĦ 好 +æ± ģ +çľ¼ åīį +éĽ £ +å¹ ½ +åħ± 产 +主 åĬŀ +å¤Ħ ç½ļ +åº Ļ +éģĵ çIJĨ +å¼ µ +æİ¥ çĿĢ +çĮ İ +çģ Į +çͱ æŃ¤ +人 åĬĽ +æµģ è¡Į +ä¾ ł +åı¯ä»¥ 说 +èĴ ĭ +å½¢ æĢģ +æĹ¥ åŃIJ +æ¼ Ĩ +çķĻ åѦ +缸 éĹľ +æľĢ å¤ļ +åĩŃ åĢŁ +åħ¬ 交 +æĮĸ æİĺ +æĿĤ å¿Ĺ +主 人 +éļľ ç¢į +æł¡ éķ¿ +æĸ¹ ä½į +ä¸Ĭ çıŃ +å¤ļ åħĥ +è ĥģ +éŃħ åĬĽ +èĮ Ĥ +åħħ ç͵ +强 大 +çĥ ¤ +å¥ĭ æĸĹ +å®ŀ ç͍ +éĺ ģ +ç»Ļ äºĨ +æľ¬ ç§ij +æł ĭ +æĭ ¨ +æķĻ ç»ĥ +éĥ½ çŁ¥éģĵ +æ¯ķä¸ļ çĶŁ +ç¢ Ĺ +åŀ Ĥ +è® ¼ +å®ģ æ³¢ +åѦ èĢħ +è°¢ è°¢ +åŁİ éķĩ +æĢİä¹Ī åĬŀ +éģ Ķ +æĪIJ 交 +æ½ľ åĬĽ +åį § +æĸ° å¼Ģ +éħį å¤ĩ +主 åĬĽ +åij³ éģĵ +çĥ Ĥ +é£ŀ è¡Į +å« ģ +大 大 +ç»Ļ 大家 +å¤ĸ éĿ¢ +éĨ ī +åıij è¨Ģ +æĹ© é¤IJ +åIJĦ èĩª +å® Ļ +èᣠèªī +æĬ« éľ² +é¡ ŀ +åĨħ çļĦ +èĤ ª +è¾ IJ +æ³ µ +æĬ Ľ +æĺŁ æľŁ +ä¸Ģ 带 +çĶŁ ç´ł +ç»ı éĶĢ +åĩ ¶ +åľ° ä¸Ĭ +åij½ è¿IJ +åĵ ² +ä¸Ĭ åİ» +æĸĩ çī© +è¯ ij +æĮ¯ åħ´ +éķ¿ æĹ¶éĹ´ +ç¥ Ń +åIJĪ èĤ¥ +è¿Ŀ è§Ħ +èģ ª +ä½İ äºİ +éĢĤ å½ĵ +æľī åºı +æľ¬ ç½ij +çķĻ è¨Ģ +æĥ³ æ³ķ +çѾ ç½² +å§ ļ +æĢ§ æł¼ +èĴĻ åı¤ +æŁ ı +åŀ « +åѦ åİĨ +ä»ħ ä»ħ +讲 è¯Ŀ +éĶ IJ +æĢ ĸ +åī ª +èĭ į +åIJ ĵ +强 çĥĪ +åģ¥ åħ¨ +çĸ ¯ +åı¤ 代 +å¥ Ī +ä¸į çĦ¶ +乡 éķĩ +æľĭåıĭ 们 +åĤ ħ +èģ ½ +个 æĢ§ +æ³ķ è§Ħ +å°ı éķĩ +çĶ» éĿ¢ +第 åħŃ +ç¶² è·¯ +åīį æĻ¯ +åIJ¬ 说 +ä¼ł åªĴ +æĿ¡ ä¾ĭ +åĪ« çļĦ +ä¸į æĩĤ +顾 éĹ® +强 度 +éĺ¿ éĩĮ +èµ° åĬ¿ +å¸ ½ +çļĦ ç¡® +åĮº åĪ« +éĮ ¢ +主 管 +ä¸Ģ çľĭ +æĸ ľ +åŃĺåľ¨ çļĦ +ä» ² +åᱠ害 +éĵ Ń +游æĪı ä¸Ń +éħ ± +é¾Ļ 头 +人 å¿ĥ +éĢĢ ä¼ij +æµı è§Ī +åĬ « +éĺ² æ²» +ç® Ń +å± Ī +è¾½ å®ģ +å£ ¤ +è¿İ æĿ¥ +éŀ į +ç͍ æĿ¥ +大 åľ° +ä» ° +éĢļ 讯 +å¼Ģ å·¥ +è£ ¤ +å¦Ĥ åIJĮ +éª ¤ +éĺŁ åijĺ +è½ © +ç¾İ æľ¯ +èĻ Ł +åIJĮ ä¸Ģ +åľ ĸ +书 æ³ķ +æīĵ åį° +åIJ« æľī +éĽĨ æĪIJ +éĹ · +å¸Ĥåľº ä¸Ĭ +æĹģ è¾¹ +åľ° æĿ¿ +产çĶŁ çļĦ +ç² ¤ +éĩį ç»Ħ +è¡Ģ æ¶² +çŃ ĭ +åĬŀ äºĭ +常è§ģ çļĦ +ä¸Ĭ åįĬå¹´ +å±ı å¹ķ +åIJī æŀĹ +å· © +åĸľ çα +ç¿ ł +ä¸ī ç§į +æ¡Ĩ æŀ¶ +举 èİŀ +çĶĺ èĤĥ +èĬ ¬ +åĽ¾ 书 +åĩ¤ åĩ° +æ°Ķ åĢĻ +å° ´ +å° ¬ +两 天 +è¾ħ 导 +åĢŁ 款 +æĹ¥ èµ· +æ´ Ĵ +ä¸Ģ 度 +è¹ Ī +æ½ Ń +æī ĩ +çĻ ľ +æĸ° åħ´ +åĤ ² +诸 å¤ļ +è´ ª +éĻ· åħ¥ +èĪ Ł +èĤº çĤİ +ä¸Ģ æł·çļĦ +åİ ĺ +åľ° çIJĨ +æĬķ æ³¨ +éļ Ĭ +åħī ä¼ı +ä¿Ŀ åģ¥ +åħ Ķ +åħ¬ åĬ¡ +æīĵ çł´ +çĶ· åŃ© +åĬ³ åĬ¡ +ä½ł ä¼ļ +ç͍ åľ° +æº ¢ +åıij è¾¾ +èĤ ļ +è¿ĩ äºİ +èĩ Ĥ +éĢĻ æ¨£ +è½» è½» +ä¸Ń åħ± +åIJĦ åĽ½ +åĶ ĩ +å®ŀ ä¹ł +èĻ ¾ +æ§ ½ +ä¸į ä¸Ĭ +åħį çĸ« +åįł æį® +å·¥ ä¼ļ +åĽ Ĭ +èĪª 天 +åı¯ çα +æĸĹ äºī +çĺ ¤ +å¦Ĥ æľī +éĽ ĸ +对 æĪij +åĩº ç§Ł +好 çľĭ +太 大 +æ°´ åĪ© +åĬ¿ åĬĽ +åħ¨ æ°ij +ç½ ¢ +èµ¢ å¾Ĺ +ç͵ ä¿¡ +车 éĹ´ +æĻĤ åĢĻ +å°ij æķ° +éĵ ¸ +åħ³ èģĶ +ä¸įä»ħ ä»ħ +为 æĤ¨ +åĴ ¸ +æľº åĬ¨ +è£ Ļ +åĵį åºĶ +éģ ł +è² · +ç© ´ +å¢ ħ +éĶ ¡ +çµ Ħ +çģ« è½¦ +è³ĩ è¨Ĭ +åĨ³ èµĽ +污 æ°´ +èª ŀ +å´ Ľ +ç´§ å¯Ĩ +缺 å°ij +å¤ļ 人 +æĢ» 书记 +éĶ Ī +èij Ľ +å¿ĺ è®° +éĻĮ çĶŁ +éķ¿ å¤§ +åħĪè¿Ľ çļĦ +ç¡ ħ +åıij æĺİ +å©´ åĦ¿ +æīİ å®ŀ +èĽĭ çϽ +ä¸Ģ çϾ +缮 åħī +æ ħĮ +åĬł æ²¹ +åIJ ŀ +ä¸Ģ 群 +ä¸Ń ä»ĭ +å¸ ĸ +å¿ Į +èģĮ èĥ½ +广 æĴŃ +çĽij å¯Ł +ç§ĺ å¯Ĩ +çĭ ® +è¿Ļ æĿ¡ +éĢ ¢ +æĢ ¨ +åįģ åħŃ +è© ¦ +说 åΰ +åĩĿ èģļ +æĮĩ 示 +æ° ¢ +å¼ ĺ +éĺ Ģ +æĸ © +éł ħ +ä¸Ģ å¼Ģå§ĭ +æİĴ è¡Į +åľ¨ æĪij +纪 å½ķ +æĬ Ħ +æł ª +说 æ³ķ +ä¸Ń èᝠ+好 å¤ļ +åıª ä¸įè¿ĩ +çķĻ åľ¨ +个 å°ıæĹ¶ +认 çŁ¥ +çķ « +è§ģ è¿ĩ +å°ı å¾® +ä½Ľ å±± +çľ ¾ +讲 è¿° +æ¢ ³ +ç§° åı· +æĹ¥ æĻļ +è¢ ĸ +åķ ¤ +æľª ç»ı +æľĢ æĹ© +æī® æ¼Ķ +è¡Ģ 管 +çº ± +æĥħ èĬĤ +第 ä¸ĥ +æį § +ä» Ĺ +æ¿Ģ çĥĪ +æĹł 线 +ä¸į 容æĺĵ +å¼Ģ å¹ķ +æĸ° çĶŁ +ä¸ĵ 注 +èij ± +åįĹ æµ· +çĩ Ł +èµ· ä¾Ĩ +æ´¾ åĩº +åĦ Ĵ +ä¾ ¨ +è¼ ĥ +åįļ è§Ī +éĢ ¾ +åĮ Ģ +ç»ıæµİ åѦ +æ¸ Ĺ +ä¿Ŀ èŃ· +çī º +çī ² +çİ « +çij ° +æľĢåIJİ ä¸Ģ +æĶ¿ åĬ¡ +æ§ Ľ +èĻķ çIJĨ +éļIJ æĤ£ +æī¿ åĮħ +æ¥ µ +æ¡ © +çĽ ² +导 åIJij +èĩ´ å¯Į +ç¼ Ĩ +æģĭ çα +ä¸į åĬ¨ +ç»Ļ 人 +å· ¢ +表 æĥħ +举 åįĹ +åĨħ å¤ĸ +è¾Ī åŃIJ +åı ī +åįļ ä¼ļ +åĬŁ æķĪ +æ¸ ´ +å± ¬ +æİĴ éϤ +éĢ Ľ +ä¸Ģ ä¼ļ +ä¸į å¼Ģ +å¼Ģ å¥ĸ +é»ij é¾Ļ +é»ijé¾Ļ æ±Ł +å¿« ä¸ī +度 åģĩ +åĿ ¤ +éĤ® ä»¶ +æĩ Ĵ +ä¾Ľ ç͵ +å» £ +好 è¯Ħ +ç§ĺ书 éķ¿ +æĪĺ åľº +好 å¥ĩ +ä¾µ æĿĥ +æĨ ¾ +æľĢ åĪĿ +æī¹ åıij +åİ ķ +è¼ ķ +æŀ ¯ +ä¸ļ åĨħ +è´Ń æĪ¿ +ä¸į åľ¨ +纪 å§Ķ +æīĢ éľĢ +å¸Ĥ éķ¿ +è³ ½ +å¼ķ æĵİ +çģµ éŃĤ +éĬ Ģ +æ» ¤ +çĿ IJ +å¤ļ 项 +åĽŀ 头 +èī ĺ +å¤į å·¥ +éĥ¨ ä»¶ +ç´§ ç´§ +æŁIJ ç§į +使 åħ¶ +æĸ° 人 +æŀ ļ +æ³ķ å®ļ +å·´ å·´ +æ¶µ çĽĸ +ç¨ » +æĭ ¾ +æĻ ķ +è½ ¿ +éĢļ è¡Į +åĵ Ģ +æ³ Ĭ +温 馨 +éĽĨ èģļ +çĨ Ļ +åĩ ij +åįģ ä¸ĥ +æ°Ķ æģ¯ +æıIJä¾Ľ çļĦ +æ³ ³ +奥 è¿IJ +çģ¾ å®³ +åĩĢ åĮĸ +è·¨ è¶Ĭ +åĵª æĢķ +éŁ ¿ +å¢ŀ æ·» +çĦ Ĭ +æ®ĭ çĸ¾ +ç¢ Į +æĤ Ķ +è§ģ è¯ģ +è¾ĸ åĮº +å¿ĥ èĦı +éļ § +åį ¸ +åı¯èĥ½ æĢ§ +æľī è¶£ +åī¯ ä¹¦è®° +åĮĸ å¦Ĩ +ä¿ Ĥ +æ£ ļ +éĨ ĩ +带 头 +éł Ī +追 ç©¶ +æij Ķ +è¿Ļ éĥ¨ +ä¸į 论 +ç¥ ¸ +å ³» +éģ ķ +çĶŁ èĤ² +å¤ ł +å¤ĸ 交 +è¯Ħ 为 +ä»İ å°ı +å°ı å°ı +é ¥¿ +æĴ ¼ +è·¨ å¢ĥ +被 åijĬ +åįĹ å®ģ +身 å¿ĥ +åĨį çĶŁ +æīĢ è¯´ +æĹ¶éĹ´ åĨħ +åĪĹ åħ¥ +éĿĴ æµ· +çα 好 +çª Ħ +èĪ Ī +è¿ĩ 渡 +æ¿ Ł +éĽ Ģ +审 è®® +åĽ½ èµĦ +æŃ¥ ä¼IJ +轨 éģĵ +ä¿¡ 念 +ä¸ī åĪĨ +çĨ ¬ +åѵ åĮĸ +ç¼ ł +éĥ Ĭ +èĪĴ æľį +纪 æ£Ģ +ä¸Ģä¸ĭ åŃIJ +鼻 話 +è² ł +éĴ ¥ +åĮ Ļ +çĹ ´ +è¶ ģ +ç» £ +çĪ µ +è½ ° +éª Ħ +å§ ¨ +æĭ ĺ +çĮ ´ +è® ¶ +è¿Ļ 座 +çį ¨ +æ·ĺ æ±° +çĹħ ä¾ĭ +æ²Ļ åıij +è§Ĩ 为 +头 æĿ¡ +å¿ħè¦ģ çļĦ +åı¯ è°ĵ +è¯Ŀ 说 +ç¯ Ħ +æĹ© çĤ¹ +æŀ¢ 纽 +ç¾ ¡ +çα åĽ½ +çªģ åıij +éĢ Ĭ +æ½ į +èᣠèĢĢ +èŁ ¹ +æ¦Ĥ çİĩ +å¾Ī ä¹ħ +æĥ ķ +è¨ ´ +åľĨ 满 +çļ ± +åĪĨ æ³Į +åħħ è¶³ +çľĭ æ³ķ +è¾ Ł +æĭ ¦ +æĭ © +对 åºĶ +为 æł¸å¿ĥ +èħ Ĭ +å¤ļ ä¹Ī +æµ ij +å®ı è§Ĥ +èĦ ĸ +åIJĪ èµĦ +çĶŁ 涯 +å®ŀ è´¨ +ä¼ĺ çĤ¹ +ç͍ æ°´ +寿 åij½ +æ² « +åIJ ģ +è© ¹ +åĽ½ éĺ² +å´ © +åĿ İ +èĨ ı +ä¸Ģ è½® +éģĹ äº§ +æ¹¾ åĮº +ç» İ +åįķ 纯 +æ¾ Ħ +åīį åĪĹ +身 å½± +é»ĺ é»ĺ +æį ī +çĴ ° +èı Ĭ +æĢ ľ +åħĭ æĢĿ +æĢ» å±Ģ +çĩĥ æĸĻ +ä¸ļ æĢģ +åIJĦ æł· +åĴ ½ +åĩº èī² +åĪĿ å¿ĥ +åı Ľ +çłĶ 讨 +è¡ « +åİĨ ç¨ĭ +ç¦ ½ +è¶³å¤Ł çļĦ +èį Ĩ +çľĭ å¾ħ +è´ © +åĨ³ å¿ĥ +è£ ¹ +å¸Ī èĮĥ +åŀ Ħ +æĿ ł +åĩ ¸ +çĬ¹ 豫 +çĥŃ è¡Ģ +åIJĪ ä¼Ļ +éħ µ +èIJ½ åľ¨ +åįł åľ° +è¡ ¬ +èĵ ī +æĦ ¤ +æ¸ Ĭ +åĪĨ æķ° +ç¬ij çĿĢ +太 å¹³ +çĤ « +æİ¨ ä»ĭ +æĸ¯ åĿ¦ +å½¢ 容 +æĵ Ĭ +æĦŁ åħ´è¶£ +åĨĽ 人 +åĩĮ æĻ¨ +对 çħ§ +åıij çĹħ +å· ¾ +èĪ ī +æª ¢ +ç¬ij äºĨ +ç¡® è¯Ĭ +è´Ł åĢº +壮 大 +æĪ ļ +äºĴ èģĶ +èª ² +èħ ¦ +æĹ ± +åıĹ æ¬¢è¿İ +åį ī +éĻ¢ 士 +æ© ¡ +ä¸Ģ 对 +è¾ ± +æ² Ĥ +åı² ä¸Ĭ +æIJ ı +å´ ĸ +代 è°¢ +ç£ · +é¡ ĺ +æµ ĩ +常 ç͍ +åį ij +åĩº åĽ½ +è¯ ł +稳 æŃ¥ +ç»ı 纪 +å¤ļ å¤ļ +æīĢ å¾Ĺ +为 主é¢ĺ +ä¸Ģ åĪĨ +æł ½ +é¡ § +çº ² +åĥ ħ +å£ ĵ +åĦ ª +ç¿ ° +æİ Ģ +人 为 +åª ³ +æ´ ½ +èĿ ¶ +å¤į åħ´ +ä¼ļ å½±åĵį +åIJĦ çķĮ +éĤ£ ä¸Ģ +é¢ ¤ +çĢ ı +çĢı 覽 +å¯ ŀ +åı¯ æĢķ +åį³ æĹ¶ +çķ ´ +ä¸ĭ åįĬå¹´ +ç¬Ķ è®° +éĻĦ åĬł +çĥŃ æ°´ +å¥ ¸ +ç£ ħ +æĿ ī +æ¸ħ åįİ +éĸ ± +ç° ¡ +å¤Ħ å¤Ħ +åIJĪ éĩij +æ²³ æµģ +ç´ ° +è´Ł éĿ¢ +çļĦ 羣å®ŀ +åύ 械 +èĴ IJ +西 äºļ +å· ħ +ç² ¹ +åİŁ æĸĩ +æŀ ķ +è¡Ģ åİĭ +åļ ´ +å¸ ĺ +åĨ Ģ +æĮ « +ç͵ è·¯ +å°ı ä¼Ļä¼´ +èĿ ´ +æľĢ å¿« +æĭ Į +å® ª +æĸ · +ç¿ ħ +åĴ ³ +åĹ ½ +ç¾ ŀ +躺 åľ¨ +èµĽ 车 +æ² IJ +éĻIJ 度 +为 ä¸Ģä½ĵ +èĴ ľ +å¹ « +æIJ ħ +åĭ ĭ +åī ĸ +纳 ç¨İ +éķ¿ æķĪ +ç½ ķ +åī¯ æľ¬ +ç© į +éĴ © +ç¹ ¼ +åĽ½ åľŁ +è¼ ī +ä¸į å¿ĺ +èѦ 示 +çģ ¿ +å¿ĥ å¾Ĺ +æĦ ļ +忽 çķ¥ +åĽŀ äºĭ +åįł æľī +æ· Ħ +çī ¡ +çĽij äºĭ +ç¿ ¡ +éĴĪ对 æĢ§ +çª ĥ +è£ ½ +èĨ Ŀ +ç³ Ł +港 æ¾³ +太 太 +æ¾ ¡ +ç»Ĩ åĮĸ +åĶ® åIJİ +å®ŀåľ¨ æĺ¯ +ç« £ +çį ² +å̾ åIJij +å¼ķ ç͍ +é¹ ħ +ç¬ij 容 +ä¹IJ è¶£ +æ°ij æĶ¿ +éŨ æĪ· +å± ģ +è¿· 失 +éĶ Į +å°ı 康 +åĭ ī +æ³ ¼ +ä¾ĭ åŃIJ +ä¸ī ä½į +å» ł +èĶ ĵ +广 éĺĶ +èĢ į +èĢģ èĻİ +åĭŁ éĽĨ +èĦļ æŃ¥ +æĭ ¯ +åŃĹ åı· +çĦ ° +é¢ ł +èļ Ĥ +èļ ģ +é£ ¯ +人 æĢ§ +æĴ ° +åİ ¢ +å±Ģ éĻIJ +æľª æĪIJ +åĵª åĦ¿ +大 åıij +ä¸į å®ļ +å¾ģ æ±Ĥ +éĥ µ +åĢº æĿĥ +çα ä½ł +èº ģ +ä»ħ ä¾Ľ +è¿ľ å¤Ħ +éĨ Ľ +åĥ µ +积æŀģ æĢ§ +æİ ¡ +åīį ä¸ī +äºİ ä¸Ģä½ĵ +çŀ Ħ +çĿ ģ +æ² ¸ +åħ± èµ¢ +éĢĢ å½¹ +è´Ŀ å°Ķ +æİ ı +æĪ ² +è¡ į +éĶ Ĥ +ä¸ĩ ä½Ļ +ç§ij åĪĽ +æ¼Ķ åͱ +欧 åħĥ +æ·¡ æ·¡ +éĿĴ å±± +èĹ Ŀ +ç» ½ +令 çīĮ +éĽĨ 群 +ä½ľ çī© +çĢ ij +å¤ ¯ +ç½ij 游 +åħ« 大 +éª ļ +èª ĵ +ä¼ļ å±ķ +åħļ åı² +æ£Ģå¯Ł éĻ¢ +åĸ ĺ +éĺ ± +èĢĮ åĩº +éĢļ 车 +éĴ ĵ +æĥħ 人 +æ¸ Ľ +ä¸Ń ç§ĭ +çĪ Ń +åıª åī© +æĺ Ķ +éĩİ çĶŁ +ç¡ « +èIJĿ åįľ +æĬµ æĬĹ +çĻ« çĹ« +éĻ Ģ +èĶ ļ +å¸ ľ +满 满 +èı ± +éļĨ éĩį +æĺŁ çº§ +æ½ ĩ +åħ¬ åħĥ +è° £ +æ¯Ķ äºļ +æ¡Į åŃIJ +èµ £ +è² ¼ +æĦ¿ æľĽ +é¡ ½ +æ´¾ éģ£ +ç¥ Ľ +åª ļ +éĺ ľ +èij « +èĬ ¦ +æ³ » +å¡ Į +çĭ Ń +å»ī æĶ¿ +å¥ij æľº +æĹĹ èΰ +æĥ « +严 åİī +åıĭ æĥħ +å¦ Ĭ +å¨ ł +åĵª å®¶ +èĨ ¨ +è¶ Ł +æĮ ª +èĻ IJ +é łģ +çŀ © +éº Ł +ç¨ £ +èģĶ éĢļ +åı ® +çİĭ èĢħ +ä¸į ç¡®å®ļ +ç ijľ +è° İ +çī¢ è®° +ç¢ ¼ +æĬ¤ èĤ¤ +é¡ · +çĦ ķ +åģļ 强 +éļ± ç§ģ +éļ±ç§ģ æ¬Ĭ +åıĹ å®³ +ä¸į çͱ +çĥ ¹ +é¥ ª +é© ³ +ä¼ ½ +ä¸Ŀ 绸 +è¥ Ħ +åįģ ä½Ļ +éº Ĺ +æ¬Ĭ åĪ© +èģ ŀ +åı¤ èĢģ +éģ ı +åIJĦ å¼ı +å°± è¡Į +åħ¥ å¢ĥ +ç ĥģ +èľ ĺ +èĽ Ľ +çº ¬ +çŁ « +è» Ł +æ´Ĺ è¡£ +æĦ § +é¢Ħ æ¡Ī +éľ Ĩ +æ·± åİļ +éĺ¿ æĭī +åĨĻ åŃĹ +åį ¦ +éķ Ģ +模 æł· +åĤ į +æIJ į +èĸ ¯ +åł ħ +åħ¬ 积 +è¨ İ +ä¼ł æŁĵ +æ¯ ¯ +çIJĨ å·¥ +åĨ· éĵ¾ +ç«ĭ æĸ¹ +æ¢ Ń +åľ£ è¯ŀ +综 èīº +çİ© ç¬ij +æĥ³ ä¸įåΰ +æijĩ 头 +æ· ¹ +åģĩ æĹ¥ +åĢ ĺ +èĢ ½ +èİ ĵ +åŁ · +èĩª è´¸ +åįĬ 天 +æª Ķ +æ¾İ æ¹ĥ +éķ ij +ä¸ « +éĩĮ ç¨ĭ +å¼Ģ èįĴ +èı ı +å®Ŀ è´µ +èŃ ¬ +åķ Ł +æŁ ł +æª ¬ +é© Ń +æ± Ľ +çĨĬ çĮ« +èķ ī +éļı ä¹ĭ +å± ij +è¾ĥ 强 +èĥ ³ +èĨ Ĭ +éĿĻ éĿĻ +åĴ ª +æĭĽ åij¼ +代 è¨Ģ +ä¿¡ ç®± +è£ħ éħį +æĤ į +åįķ 车 +èIJ İ +å¤ļ 彩 +éĻ ¸ +ä»İ 严 +æ© Ħ +æ¦ Ħ +éĢ ® +éĩĮ æĸ¯ +å§¿ æĢģ +太 æŀģ +éĩ Ŀ +æº ī +è¿ Ń +ç§ ¸ +ç§ Ĩ +å·¥ å§Ķ +æ± ķ +èģ Ĩ +ä½ ¬ +ç¼ ħ +çĶ ¸ +åī¯ å±Ģéķ¿ +éĹ º +èª ¤ +è¤ IJ +ä¸į éĻIJ +èħ ķ +åij ķ +çŁ ¶ +åĨľ å®¶ +管 å§Ķä¼ļ +é¥ º +èĬ ľ +æ¾ Ī +è© ¢ +å¨ģ å°¼æĸ¯ +ä½ķ åĨµ +å°ı ä¼Ļ +奢 ä¾Ī +è¿Ļ ç¯ĩ +è¯ µ +竳 ç¨ĭ +ç´ Ģ +éIJ ĺ +éĤ ¢ +ç³ Ļ +ç¼ Ģ +ä¹ Ĵ +ä¹ ĵ +çī¢ åĽº +åĿ ŀ +å¼ Ī +ä¾ĭ å¤ĸ +å» ³ +è§Ħ 竳 +èĬ Ļ +ç¯ · +èº ¯ +æł Ī +åĿļ å®ŀ +åŁº 建 +çĿĢ çľ¼ +ç· ´ +èij © +ç¼ ļ +æ¦ Ĩ +主 åĭķ +ç¥ Ģ +äºĴ éĢļ +å°¤ 为 +å® Ľ +éª ¼ +æ± ² +ä¾ ĥ +æĤł ä¹ħ +æij § +æĭ ĩ +é« ĵ +éº Ĵ +éĻ Ľ +æŀ ¸ +æĿ ŀ +è´ ¬ +å°ı é¾Ļ +åĵ ® +èĵ¬ åĭĥ +åĮ Ī +çķľ çī§ +å¨ © +个 å¤ļ +æ² ¥ +æĺ § +çĦ ļ +æĬij éĥģ +çĸ ¡ +èĺ ij +éģİ ç¨ĭ +æ© ± +éĿ ĵ +大 çIJĨ +é« ¦ +åĪĨ 辨 +æ¸ ¤ +çĸ ¤ +åĬ¨ èĥ½ +å¼ł å®¶ +ä¸ĩ åįĥ +æ» ¥ +é¥ ¥ +åºŁ å¼ĥ +å¸ ³ +æ¼ ³ +è± IJ +ä» ij +å« ī +å¦ Ĵ +çŀ Ĵ +è¡ ħ +çĭ ¸ +å¾ģ ç¨ĭ +éĤ ¯ +éĥ ¸ +ç¥ Ī +ç¥ · +è¶ ´ +ç»ĵæŀĦ æĢ§ +è§Ĩ åIJ¬ +è¬ Ŀ +çĴ Ģ +çĴ ¨ +åĩº å¤Ħ +è¯ Ģ +å¾ ĺ +å¾ Ĭ +çľ ¨ +åĸ ĩ +åı Ń +åĺ ² +çķ ¸ +å¹² äºĭ +æļ § +æ² Ľ +åĦ Ħ +å» ĵ +åİ¿ éķ¿ +èĥ ļ +çIJ ¢ +çŃ · +éĩ ĭ +ä¾ ® +åIJ © +åĴ IJ +åĮ ¿ +æĬ¬ èµ· +æ³ £ +æ¶ ¤ +éº ½ +æĽ Ļ +åī¯ éĻ¢éķ¿ +åħļ åĴĮ +æķ£ åıij +润 æ»ij +åĵ º +æĥ ¬ +漫 éķ¿ +ä¸į æĩĪ +åŁ ł +åĹ ĵ +èĢģ çĪ· +è® ½ +æĪĺ ç»ĦåIJĪ +æ£ ł +åħ¨ åŁŁ +èł ¢ +è¯ ¡ +åīį çŀ» +æķ Ľ +ä¸Ģ å°ģ +å¹ Ĥ +èİ Ĩ +è¯Ŀ è¯Ń +ç»Ĩ åĪĻ +å± ¿ +åµ Į +éĢ į +åĺ ± +æ¸ ² +çĥ ¯ +çĿ ¹ +é¦ Ĵ +èħ ¥ +æĬĹ åĩ» +çĿ « +èį Ķ +éļ İ +æ³ī æ°´ +è¬ Ĥ +ç Ĥ¬ +åĩı æİĴ +è¸ Ĭ +è ·» +æ· Į +éľ ¾ +å¥ĩ 纳 +å¯ Ŀ +æ¤ İ +æŁ ¬ +æĸ¯ åŁº +åħ¬ ç«ĭ +è¨ ĵ +é£ Ļ +é© ¿ +åĤ µ +èĽ Ļ +ç¯ĩ 竳 +åĪĨ æĶ¯ +ä¸Ĭ å¹´ +çŃ Ŀ +ç¼ ¤ +èĢģ æĹ§ +åĻ ¬ +æľ ¦ +èĥ § +æ¶Ī è²» +æĵ Ķ +æ¦ ´ +æ¿ Ĵ +ç³ ¯ +æ³ ¸ +æį Ĩ +ç» ļ +èµ İ +çIJ IJ +èµ Ĥ +æħ ® +æ² Į +çĦ Ļ +æĴŃ æĬ¥ +æ· ĩ +åĪĩ åħ¥ +çij ķ +çĸ µ +éģ ´ +ç¨ ļ +ç© © +èŀ ĥ +æ£ ķ +æĨ § +æĨ ¬ +ä¼ º +æ¯ Ĺ +æį į +æĬ ī +ç´ Ĭ +å¼ Ľ +æĭ Ń +æĹı èĩªæ²» +åĿ · +ç« ¶ +è© ³ +è¿Ħ ä»Ĭ +è° ´ +çŀŃ è§£ +æŁ ¿ +é¢ Ĭ +ç° § +çĥŁ èĬ± +ä¾ ¥ +çĿ ¦ +éħ Ŀ +æ° ĵ +çIJ ī +å§ Ĭ +æ² ® +æħ · +èľ ķ +çij ļ +éĩĩ çŁ¿ +åł ° +åºķ èķ´ +èĨ ³ +è¾ ķ +éŁ Ń +åĴ Ļ +ç² ½ +åī Ķ +æ² ¦ +èĤ ´ +éķ ¶ +æĺ ¼ +è¾ Ĺ +å© ª +åĮ ® +æĸ ĵ +æ± ¶ +éĥ ´ +éł » +çª Ĵ +è¢ ± +åĽ ± +èĢ ĺ +è ļĮ +çĭ Ļ +çĹ ¹ +ç¥ ī +æı ® +æ· Ĩ +ç£ ĭ +éĺ ª +æ « +ã ¸ +Ļ ¶ +ã ij +𣠲 +ä ¢ +ã Ń +𬠨 +ð¬ Ģ +𬠮 +𬠯 +ð¬ ľ +𪠨 +ð« Ĺ +ð¬ Ĭ +𬠱 +ð¬ Ł +ä İ +ð ¡ +ä ĥ +ã ł +ð © +ð© ¾ +𬠺 +ð¬ Ļ +ãĢ Ķ +ãĢ ķ +çļĦ æĹ¶åĢĻ +æľīéĻIJ åħ¬åı¸ +ä¹ĭ åIJİ +ä¸ļ åĬ¡ +åķ Ĭ +èϽ çĦ¶ +æĭ¥ æľī +äºĴ èģĶç½ij +éĤ£ äºĽ +ä½ł çļĦ +åĨ³ å®ļ +éϤ äºĨ +åĽ¢ éĺŁ +åı¯ æĺ¯ +以 åIJİ +社 åĮº +çļĦ éĹ®é¢ĺ +å¹¶ ä¸Ķ +æķĻ å¸Ī +å°± ä¼ļ +天空 éĥ¨èIJ½ +æľĢ ç»Ī +å½ĵ çĦ¶ +ä¹Ł æľī +ç¡® ä¿Ŀ +æĥ³ è¦ģ +è´Ń ä¹° +人 çļĦ +åIJ ´ +çļĦ åıijå±ķ +ä¸į çŁ¥éģĵ +软 ä»¶ +æĪij们 çļĦ +çζ æ¯į +åī ij +èĢĮ æĺ¯ +å®ī æİĴ +åIJİ æĿ¥ +çļĦ åľ°æĸ¹ +èµ µ +èĢĥ è¯ķ +çªģ çĦ¶ +ä¸Ģå®ļ è¦ģ +åζ ä½ľ +è¯Ħ ä»· +åħį è´¹ +è´¹ ç͍ +绣 ä¸Ģ +çĦ¶ èĢĮ +è¿Ļ 次 +éĿĴ å¹´ +人 ç±» +äº ¦ +让 人 +è´Łè´£ 人 +éĩĩ åıĸ +çļĦ äºĭæĥħ +ä¹Ł ä¼ļ +车 è¾Ĩ +æĽ´ æĺ¯ +强 åĮĸ +æĪij åĢij +以 åīį +ä¼ĺ åĮĸ +å§Ķåijĺ ä¼ļ +åĽ° éļ¾ +å¹´ 度 +ä½į äºİ +æĮĩ åĩº +åĨį æ¬¡ +åĬŀ çIJĨ +æ¯ı 个 +对 æĸ¹ +è¿Ľè¡Į äºĨ +æľĢ é«ĺ +课 ç¨ĭ +身 ä¸Ĭ +æĽ¾ ç»ı +åĮ» çĶŁ +å®ī è£ħ +æľ ± +è¿IJ è¡Į +åıĮ æĸ¹ +æľĢ 大çļĦ +æŀĦ 建 +è¿ŀ ç»Ń +çļĦ å°ı +她 çļĦ +çŃī çŃī +æĶ¹ åĸĦ +åIJĦ ç±» +éģĩ åΰ +æľī çĿĢ +人 çī© +æĢ» æĺ¯ +è¿ħ éĢŁ +åζ å®ļ +å®ĥ 们 +å®ĺ ç½ij +è¿ĺ è¦ģ +ç»Ī äºİ +æĪ¿ åľ°äº§ +è¯ģ æĺİ +èĤ¡ 票 +åºĶ å½ĵ +èĭ± åĽ½ +è¿IJ ç͍ +æľĢ æĸ° +享 åıĹ +让 æĪij +æĻļ ä¸Ĭ +å¾ ŀ +å°ı 说 +å°¤åħ¶ æĺ¯ +è®Ń ç»ĥ +åħ¨ å¸Ĥ +æĮij æĪĺ +æľī çĤ¹ +带 çĿĢ +çļĦ ä¸ľè¥¿ +é£İ æł¼ +é»Ħ éĩij +å¼ķ 导 +æŃ¤ å¤ĸ +æľĢ è¿ij +追 æ±Ĥ +强 è°ĥ +ä¹Ł åı¯ä»¥ +æĦŁ åΰ +èĩª æĪij +çī¹åĪ« æĺ¯ +æĪIJ éĥ½ +éĢIJ æ¸IJ +å¿« ä¹IJ +ä¹ĭ ä¸Ń +æĬķèµĦ èĢħ +ä»ĸ们 çļĦ +æ° ı +å·¥ä½ľ 人åijĺ +äºĨ ä¸Ģ个 +åķ ¦ +ä¸Ģ åĢĭ +åŁº å±Ĥ +æ²Ł éĢļ +第ä¸Ģ 次 +å¹¶ 没æľī +çļĦ å·¥ä½ľ +åľ¨ è¿ĻéĩĮ +æŀ ª +æĶ¯ æĴij +æĹ¶ å°ļ +æĿ¥ åΰ +æĶ¶ è´Ń +éĿ© åij½ +æĺ¯ ä¸įæĺ¯ +讨 论 +ä¸ļ 绩 +å°± èĥ½ +ç«ĭ åį³ +è¡Ĺ éģĵ +åľ¨ ä¸Ģèµ· +æľĪ 份 +é«ĺ 端 +å¾Ī éļ¾ +ä¿Ħ ç½Ĺæĸ¯ +æīĭ 段 +åģļ åĩº +ä¼Ĺ å¤ļ +å®ŀ è¡Į +æīĵ å¼Ģ +游 客 +ä¾Ŀ çĦ¶ +å°± åĥı +离 å¼Ģ +说 éģĵ +æĸ° èĥ½æºIJ +æº ª +äº ķ +令 人 +ä¸Ģ åľº +æĪij æĥ³ +两 人 +èĩ³ å°ij +çļĦ çĶŁæ´» +æĺ¯ 个 +èĭ± è¯Ń +æ²Ĵ æľī +æĢĿ èĢĥ +éĻIJ åζ +åı° æ¹¾ +ä¸Ģ æĹ¦ +çļĦ ä¸Ģ个 +é«ĺ 级 +åĬŀåħ¬ 室 +å¾· åĽ½ +æĪij å°± +å®ļ ä½į +éĢĤ åºĶ +æĮĩ æłĩ +åħ¨ çľģ +ä¸Ĭ è¿° +å®ĥ çļĦ +åĽŀ å®¶ +欧 æ´² +éĵģ è·¯ +é¼ĵ åĬ± +çļĦ å½±åĵį +é«ĺ æł¡ +天 ä¸ĭ +é«ĺ è´¨éĩı +æĿŃ å·ŀ +èµĦ 讯 +æĶ¾ åľ¨ +æľī ä¸Ģ个 +å°± è¦ģ +ä¸Ĭ éĿ¢ +è§£ éĩĬ +éĢIJ æŃ¥ +å°½ 管 +æľī ä»Ģä¹Ī +çļĦ äºĭ +çĻ» è®° +人æ°ij å¸ģ +è§Ĥ ä¼Ĺ +è§Ĥ å¯Ł +ç͵ èĦij +çļĦ åIJĮæĹ¶ +ä½ľ ä¸ļ +宣 å¸ĥ +çļĦ ä½ľç͍ +åĽŀ æĿ¥ +éļ¾ ä»¥ +æīĢæľī çļĦ +å°ı åѦ +æıIJ åīį +æ¤į çī© +åĩ ¯ +ä¸Ĭ äºĨ +å°± åľ¨ +åħĪ åIJİ +æīĭ æľ¯ +éĥ Ń +éĿ¢ åīį +æ¯ķ 竣 +äºĮ æĺ¯ +红 èī² +éĺ³ åħī +èĭ¹ æŀľ +å¾Īå¤ļ 人 +ç»Ļ æĪij +åĵ ¦ +çľ¼ çĿĽ +éł Ń +ä¸Ģ æĺ¯ +åıijå±ķ çļĦ +åıį åºĶ +æĪ¿ å±ĭ +æľŁ å¾ħ +ç§į æ¤į +æĸĩ åѦ +åį³ åı¯ +é¦ĸ 次 +èĭ± éĽĦ +å¤ļ 次 +åĮħ è£ħ +æ²³ åįĹ +ä¹ĭéĹ´ çļĦ +ä»į çĦ¶ +åIJ¬ åΰ +èij£äºĭ éķ¿ +è§Ħ åĪĻ +ä¸Ģ 份 +大 ä¼Ĺ +使 å¾Ĺ +è¿Ľ åı£ +ä¸Ģ çīĩ +æĢ§ çļĦ +çļĦ 大 +æĪij æĺ¯ +äºĴ åĬ¨ +æ° £ +çļ Ĩ +åħ¬åı¸ çļĦ +ä¸Ģ è¾¹ +åıĬ åħ¶ +èī¯ å¥½çļĦ +æĭĵ å±ķ +å½ĵ å¹´ +广 åľº +åģļ äºĨ +åŁº äºİ +æıIJ éĨĴ +åħĦ å¼Ł +èĢģ æĿ¿ +è¿ij æĹ¥ +çĬ¶ åĨµ +注 éĩį +åĪļ åĪļ +è°ĥ çłĶ +å¿ĥ ä¸Ń +æĬĬ æı¡ +éļı åIJİ +ä¸į å¤Ł +åĪĽ ä½ľ +ç«Ļ åľ¨ +缸 äºĴ +çĸ«æĥħ éĺ²æİ§ +å¹´ 代 +带 åĬ¨ +伤 害 +竣 çĦ¶ +å¼ķ è¿Ľ +ç´¯ 计 +让 æĪij们 +åĽŀ æĶ¶ +æĬ¥ åIJį +åĬ© åĬĽ +èģĶ çĽŁ +çŃĸ çķ¥ +åij¨ è¾¹ +åĭ Ĵ +è¿ĺ åľ¨ +æµģ éĩı +寻 æī¾ +ç͵ åĬĽ +èι èζ +è¿ĺ èĥ½ +æĭħ ä»» +çļĦæĥħåĨµ ä¸ĭ +çļĦ åİŁåĽł +缺 ä¹ı +çIJĥ åijĺ +å²ģ çļĦ +çĶ· åŃIJ +å·¥ èµĦ +è¿ijå¹´ æĿ¥ +åij Ģ +æıIJä¾Ľ äºĨ +她 们 +å®¶ åħ· +çĩ ķ +è½» æĿ¾ +æł¡ åĽŃ +èĢĥ æł¸ +åį± éĻ© +åħļ ç»Ħç»ĩ +æĢ» ç»ıçIJĨ +çļĦ æĸ° +çİ» çĴĥ +è¿Ļ ä½į +对 æŃ¤ +å®¶ 人 +çļĦ è¦ģæ±Ĥ +温 度 +æĮĩ æķ° +缴 åΰ +æŃ¤ æĹ¶ +æ¹ĸ åįĹ +éĥ½ è¦ģ +ä½ľ åĩº +åIJĦ ä½į +èĢĥ çĶŁ +ä¾Ŀ æį® +说 è¯Ŀ +æĪij ä¹Ł +å·¥ åİĤ +åıĺ æĪIJ +ä»ĸ 人 +æĪij è§īå¾Ĺ +åIJĦ 级 +ä¼łå¥ĩ ç§ģæľį +ä¸Ĭ åįĩ +好 åĥı +åĬł éĢŁ +äºĮ åįģ +è¢ ģ +è£ħ 饰 +éĥ½ èĥ½ +ä¸Ģ å¼ł +åĬ¨ æĢģ +å¹´ çļĦ +è¿Ļ å°±æĺ¯ +ä¹Ł è¦ģ +èµĦ æł¼ +æĪĺ äºī +æĦŁ è°¢ +åŁ¹ èĤ² +天 æ°Ķ +女 士 +åı¯èĥ½ ä¼ļ +çļĦ 产åĵģ +ä¹Ł å°± +主è¦ģ æĺ¯ +åĪº æ¿Ģ +ç»Ļ ä½ł +大 æķ°æį® +åĮ» åѦ +åĪ ¤æĸŃ +ä»ĸ 说 +表 æ¼Ķ +äºļ æ´² +ä¸ĵ é¢ĺ +ç«ŀäºī åĬĽ +éĤ£ æł· +å±ķ å¼Ģ +å¹³ æĹ¶ +æİ¥ ä¸ĭæĿ¥ +æī¿ 诺 +æ³ķ åĽ½ +åħ³ å¿ĥ +ä¼ļ æľī +éĤĢ è¯· +é¢Ħ éĺ² +对 æİ¥ +好 äºĨ +åĴ± 们 +çļĦ æĦŁè§ī +æĢĿ è·¯ +éĥ½ 没æľī +çļĦ æĸ¹æ³ķ +女 åŃIJ +åı¸ æ³ķ +è¿ĺ ä¼ļ +è¶ĬæĿ¥è¶Ĭ å¤ļ +åĽł çĤº +æµ· åįĹ +人 æķ° +å°Ĩ ä¼ļ +ä¸ļ 主 +é¤IJ 饮 +å±ħ ä½ı +åıij åĩº +è¿ij æľŁ +å¼ķ é¢Ĩ +æľºåύ 人 +åĩºæĿ¥ çļĦ +çľĭ è§ģ +ä¿ Ĭ +让 ä»ĸ +ä¸į æĥ³ +å·¥ä½ľ çļĦ +è¡¥ åħħ +æµ ħ +çī¹ å¾ģ +ä¸Ĭå¸Ĥ åħ¬åı¸ +ç¾İ é£Ł +广 西 +æ¯ı ä¸Ģ个 +èIJ½ åľ° +åĵģ ç§į +åĴĮ è°IJ +å½» åºķ +é«ĺ èĢĥ +æĺ¨ 天 +åīį å¾Ģ +çĽij æµĭ +çϾ 度 +åľ¨ ä¸ŃåĽ½ +çļĦ éľĢæ±Ĥ +亿 ç¾İåħĥ +åѦ æľ¯ +æĶ¶ åΰ +æĿ¿ åĿĹ +ä¸Ģ 段 +æŀĦ æĪIJ +ä¼ģä¸ļ çļĦ +表 éĿ¢ +æķ´ çIJĨ +ç»ĵ å©ļ +人 å®¶ +åģľ æŃ¢ +åѦ ç§ij +æĺ¾ å¾Ĺ +ä¼ij æģ¯ +é¢Ħ æľŁ +æĪĸ æĺ¯ +çļĦ 主è¦ģ +åºĶ 对 +èµ° äºĨ +ä¸Ń éĹ´ +èµ° è¿Ľ +åijĪ çݰ +æIJŃ éħį +é¹ ı +æĺ¯ åĽłä¸º +æĥħ 绪 +å®ļ æľŁ +社ä¼ļ 主ä¹ī +çŃī 级 +磼 çĽ¾ +é£ŀ æľº +èĩ³ ä»Ĭ +æĶ¶ éĽĨ +çļĦ æķħäºĭ +åĪĩ å®ŀ +å®ŀçݰ äºĨ +å½¢ æĪIJäºĨ +åįĹ æĸ¹ +ä¸Ń åѦ +æµ· æ´ĭ +åIJ¦ åĪĻ +æĭį æijĦ +大åѦ çĶŁ +åĩºçݰ äºĨ +æĦı å¤ĸ +ä¹Ł èĥ½ +çļĦ èĥ½åĬĽ +åĿIJ åľ¨ +åĪĻ æĺ¯ +èĢĥ å¯Ł +å°Ĭ éĩį +éĺ² æŃ¢ +ç´§ å¼ł +读 书 +åĩº è¡Į +å°± æľī +å±¥ è¡Į +çݰ代 åĮĸ +åĽ½ åĬ¡ +åĽ½åĬ¡ éĻ¢ +ç»´ ä¿® +åİŁ åĪĽ +æĺ¯ æĮĩ +ä¼ij éĹ² +çĤ ® +æĸ° æĹ¶ä»£ +éĢĻ åĢĭ +ä¸į æķ¢ +å®Į ç¾İ +ç»Ĩ èĬĤ +éŃ ı +èͬ èıľ +é¢Ĩ导 çıŃåŃIJ +è¶ħ 级 +è¡Į æĥħ +人工 æĻºèĥ½ +åį° åº¦ +åŁºç¡Ģ 设æĸ½ +åıĪ æĺ¯ +èᝠçī© +åIJ¸ æĶ¶ +åį´ æĺ¯ +éĥ İ +å¥ĸ åĬ± +çļĦ æľĭåıĭ +ä¿Ŀ çķĻ +è§Ħ å¾ĭ +æĸ° çĸĨ +è¿ĺ åı¯ä»¥ +æİ¥ è¿ij +æŃ¤ åīį +æī¹ åĩĨ +æĢİä¹Ī æł· +çļĦ ä½įç½® +ä¸Ģ åĿĹ +æĭĴ ç»Ŀ +顾 客 +ä¹Ł åľ¨ +ä¸Ģ çĶŁ +éĥ¨ éĺŁ +å¹´ åīį +æĸ¹éĿ¢ çļĦ +å°Ŀ è¯ķ +羣æŃ£ çļĦ +ç¦ģ æŃ¢ +è¿ĺ 没æľī +æ°ij çĶŁ +èµ° åIJij +èĦ¸ ä¸Ĭ +å½ĵ 天 +éĽĨåĽ¢ åħ¬åı¸ +çļĦä¸Ģ ç§į +西 æĸ¹ +åĽŀ åºĶ +ä¸Ģ 声 +常 常 +æıIJ åΰ +èħ¾ 讯 +æľį è£ħ +为 ä½ķ +äºij åįĹ +å°± ç®Ĺ +ä¼ł æī¿ +åıį èĢĮ +ä¸ĩ åIJ¨ +è´¢ 产 +å¦Ĥ ä¸ĭ +æĹ¥ åīį +åİŁ æľ¬ +æľĢ éĩįè¦ģçļĦ +认 è¯ģ +ä¸Ģ éģĵ +ä¿¡æģ¯ åĮĸ +å¾Ĺ åΰäºĨ +é̲ è¡Į +æĪij è¦ģ +éĢļ ä¿¡ +室 åĨħ +èµļ éĴ± +æĶ¶ èĹı +è§£åĨ³ æĸ¹æ¡Ī +æĪ¿ 产 +çĭ ¼ +æ´» åĬĽ +ç»ıæµİ åıijå±ķ +çŃī å¾ħ +ä¹Ł å¾Ī +åĿ ij +å¾Ī 好çļĦ +éļ¾ åº¦ +ä¸į å¦Ĥ +人æ°ij æĶ¿åºľ +åĩº åıij +åīį æľŁ +æ¼Ķ åijĺ +女 çĶŁ +èģļ çĦ¦ +审 计 +é¢Ħ æµĭ +ä¾Ŀ æīĺ +äºĶ å¹´ +è¡¥ è´´ +æ¸ħ æĻ° +éª Ĥ +çľĭ èµ·æĿ¥ +çļĦ åŃ©åŃIJ +é¢ij éģĵ +ä½ı å®ħ +éĿ¢ åIJij +æľĢ ä½İ +æĹ¢ çĦ¶ +ä¸Ģ å¥Ĺ +æķ° åѦ +群 ä½ĵ +åĮĹ京 å¸Ĥ +å±ħ çĦ¶ +æ°Ľ åĽ´ +éĢĶ å¾Ħ +çļĦ åŁºç¡Ģä¸Ĭ +èģĮ è´£ +åı¯èĥ½ æĺ¯ +åĨĽ äºĭ +æĪIJ æķĪ +åŃ©åŃIJ 们 +计ç®Ĺ æľº +èµ ¤ +产ä¸ļ åıijå±ķ +å·¨ 大çļĦ +å·¥ 人 +çĶŁ éķ¿ +éĥ½ åı¯ä»¥ +çļĦ æľºä¼ļ +èµĦ è´¨ +çĹĽ èĭ¦ +ç²ī ä¸Ŀ +å¢ ĵ +å¹³ å®ī +管 éģĵ +è·Ł çĿĢ +饮 é£Ł +åķĨ å®¶ +å¤ļ å®¶ +åı¸ æľº +åºĶ该 æĺ¯ +éĢı éľ² +认 å®ļ +è¡Įä¸ļ çļĦ +çļĦ ä¼ģä¸ļ +æ¯ı ä¸Ģ +èĮĥåĽ´ åĨħ +è¾ĥ 大 +è´ ¤ +大 èµĽ +å¤ļ äºĨ +é¸ ¿ +临 åºĬ +åľ¨ è¿Ļ个 +çļĦ åĨħ容 +éĶĢ éĩı +å¾Ī å°ij +åŃ Ł +ç»´ æĮģ +åĴĸ åķ¡ +æľ¬ åľ° +èī² å½© +å¹¶ éĿŀ +èĢĮ å·² +温 æļĸ +èIJ § +æĬĵ ä½ı +èĢĮ ä¸įæĺ¯ +åĸ Ĭ +çļĦ åħ³ç³» +çī© åĵģ +éĤ£ æĺ¯ +åĨľ 产åĵģ +è¿Ļ æĹ¶ +å©ļ å§» +æ°´ æŀľ +æĶ¶ èİ· +ä»ĺ åĩº +客æĪ· 端 +æ¼Ķ åĩº +åħ¨ æĸ° +è¿Ļ ä¹Łæĺ¯ +æĺ¯ çͱ +è§Ĥ 念 +æľī 个 +éĢł åŀĭ +èĥľ åĪ© +ä¸ī æĺ¯ +è¶ħ å¸Ĥ +åħļ建 å·¥ä½ľ +æĶ¾ å¿ĥ +线 è·¯ +æĭĽ çĶŁ +åIJĥ é¥Ń +è½ ī +å°½ éĩı +è§ģ åΰ +åIJĮæ¯Ķ å¢ŀéķ¿ +åįİ ä¸º +æĪij å¸Ĥ +æıIJ åĩºäºĨ +æ°ij èѦ +åįļ çī© +åįļçī© é¦Ĩ +è¯ļ ä¿¡ +åīį éĿ¢ +å±± 西 +è¾ħ åĬ© +转 ç§» +æĽ´ 为 +丰å¯Į çļĦ +åį ¢ +å¿« éĢĴ +æĺ¾ èijĹ +çī© èµĦ +åΰ è¾¾ +æľī åĪ©äºİ +åij Ĩ +åŃ©åŃIJ çļĦ +ä¸į ä½Ĩ +çłĶç©¶ éĻ¢ +çͳ æĬ¥ +æļ ¨ +æ°ij éĹ´ +åį » +çļĦ å£°éŁ³ +å¸Ĥåľº çļĦ +ä¸Ģ åı¥ +çľģ 级 +æĿ¥ çļĦ +åĵª 个 +æīį ä¼ļ +åĪĨ éħį +èĶ ¡ +ä»ĸ åľ¨ +åħ± æľī +å¡ ĺ +èĴ Ĥ +éľ į +åıĤ è§Ĥ +ä¸Ī 夫 +ä¾Ŀ éĿł +æľī æĹ¶ +äºĨ å¾Īå¤ļ +ä¸ĸçķĮ æĿ¯ +å®¶ æĹı +ä¸į éľĢè¦ģ +大 å¸Ī +èŀį åħ¥ +éĿŀ æ³ķ +çĹħ 人 +åIJİ æľŁ +大家 éĥ½ +ç½ij åĿĢ +åİŁ æĸĻ +便 å®ľ +æ¶ Ľ +仿 ä½Ľ +å·® è·Ŀ +åı¦ä¸Ģ æĸ¹éĿ¢ +产åĵģ çļĦ +èµ « +æĥħåĨµ ä¸ĭ +éĴ¢ éĵģ +æľ¬ ç«Ļ +纳 åħ¥ +å·² æľī +æľī 没æľī +ä¼° 计 +é£ ĺ +æľŁ è´§ +åĢĭ人 è³ĩæĸĻ +ä¸ĵä¸ļ çļĦ +çĪĨ åıij +èĩ´åĬĽ äºİ +çİ°åľ¨ çļĦ +æľī åĵªäºĽ +çł´ åĿı +æķ°åŃĹ åĮĸ +åľ° éĿ¢ +é»ij èī² +å¹¼åĦ¿ åĽŃ +çļĦ ç²¾ç¥ŀ +äº Ń +导 æ¼Ķ +çݰ æľī +æŃ¦ åύ +èĭı å·ŀ +çİ Ħ +æ±Ł 西 +å»¶ 伸 +论 æĸĩ +è¾ĥ 为 +çİ© æ³ķ +é¼ İ +åIJĮ æŃ¥ +éĩĬ æĶ¾ +æĽĿ åħī +åĿļ åĨ³ +å§Ķ æīĺ +å°Ĩ åľ¨ +äºĪ 以 +ä½ľ æĸĩ +èĢĮ åľ¨ +ä¼ĺ åħĪ +åĽŀ åİ» +ä¿® å¤į +åĽ½åĨħ å¤ĸ +çŃĸ åĪĴ +åıij æĶ¾ +å¿ĥ æĥħ +çļĦ åİĨåı² +éĿ¢ è¯ķ +举 åĮĹ +ä¿¡ åı· +ç²® é£Ł +è¯ģ 书 +æŁIJ äºĽ +è¿IJ ä½ľ +åĨ² åĩ» +çĥŃ çĤ¹ +æĹ¶ æĹ¶ +æĹ¶æĹ¶ 彩 +åľ° çĤ¹ +ä¸Ģä½ĵ åĮĸ +éļ¾ é¢ĺ +æĽ ° +ç«ĭ åĪ» +æĺ¯ éĿŀ常 +åħ± åĴĮ +åħ±åĴĮ åĽ½ +æ¿Ģ åĬ± +æľīæķĪ çļĦ +å¤Ħ ç½® +该 åħ¬åı¸ +æ£Ģ éªĮ +èѦ æĸ¹ +è´ ¾ +äºĨä¸Ģ ä¸ĭ +ä»Ĭ åIJİ +çħ ® +ç͍ åĵģ +读 èĢħ +æĪij åľ¨ +åĽŀ å¤į +ä¸Ģ 座 +è¿ĺ 没 +å®ļ åζ +没 æĥ³åΰ +å¤ ¹ +ä¼ł éĢĴ +ä¸Ģ 款 +强 大çļĦ +çļĦ è¡Į为 +å¤ı 天 +åıijåĬ¨ æľº +é¢ĨåŁŁ çļĦ +å®ŀéªĮ 室 +ä¸Ģ æĬĬ +æĺ¯ 为äºĨ +éĻķ 西 +æĭħ ä¿Ŀ +è¾¾ æĪIJ +è¦ģ æĺ¯ +æĺİ å¤© +ç»Ļ ä»ĸ +建ç«ĭ äºĨ +ä¸į è¡Į +ä¸Ń æĸĩ +åľ° 说 +åIJİ çļĦ +çĽij æİ§ +éĢ ¸ +æĢ» éĥ¨ +æľ¬ æĸĩ +é¹ ¿ +æĻ¯ è§Ĥ +çļĦ 缮æłĩ +èĽ ĩ +åĨ ¯ +ä¸Ń åĮ» +æķĪ åºĶ +产 éĩı +åŃ Ŀ +è´¦ æĪ· +è¿Ŀ åıį +èij£äºĭ ä¼ļ +京 举 +责任 ç¼ĸè¾ij +åķı é¡Į +çα å¿ĥ +èѦ å¯Ł +é¤IJ åİħ +å¸Ĥ æĶ¿åºľ +天 天 +æĸ° é²ľ +éĥij å·ŀ +è¶ħ è¶Ĭ +å½ Ń +çŁ¥è¯Ĩ 产æĿĥ +åĽŀ å¿Ĩ +è·¯ 线 +å»ī æ´ģ +éĿĴ å°ijå¹´ +åıĸå¾Ĺ äºĨ +çľĭ åΰäºĨ +é¦ ¬ +ç²¾ åĵģ +åľ° éĵģ +æĮģ æľī +ä¸ĭ äºĨ +æľī æĹ¶åĢĻ +ä¸Ģ 人 +æĴ Ĵ +ä»Ķ ç»Ĩ +èĢģ åħ¬ +äºĭå®ŀ ä¸Ĭ +èģĶ èµĽ +ä¾ĽåºĶ éĵ¾ +é¢Ħ ç®Ĺ +åζéĢł ä¸ļ +å®īåħ¨ çĶŁäº§ +俱 ä¹IJ +俱ä¹IJ éĥ¨ +çļĦ æł¸å¿ĥ +æīĵ ç®Ĺ +å½± çīĩ +æIJŃ å»º +ä¹Ł ä¸įä¼ļ +æĭħ å½ĵ +å±Ĥ éĿ¢ +åѦ åijĺ +临 æĹ¶ +缸 ç»ĵåIJĪ +对 æ¯Ķ +ä»ĸ æĺ¯ +æĸ° åĮº +è¿Ľ åİ» +çϾ å¹´ +ä¿ © +å°½ å¿« +ç͵åŃIJ åķĨåĬ¡ +æĽ´ æľī +æ¸ħ çIJĨ +åı¦ ä¸Ģ个 +åĤ » +ä»Ģä¹Ī æł·çļĦ +æĺ¯ æľĢ +åij¨ å¹´ +å¾Ī 容æĺĵ +åĽ¢ ç»ĵ +ç´ Ħ +æĹ© å·² +çļĦ åıĺåĮĸ +éľ ŀ +æĹ¥ ä¸ĬåįĪ +失 åİ» +ä¸Ń åľĭ +çļĦä¸Ģ äºĽ +å°ı åŃ© +ä¸ĭ è·Į +éĶ» çĤ¼ +é ij +éij « +å¿ĹæĦ¿ èĢħ +èĤ¡ å¸Ĥ +èµĽ äºĭ +许åı¯ è¯ģ +åı¯ æĮģç»Ń +åijĬè¯ī è®°èĢħ +éĢ» è¾ij +å¼ķ åħ¥ +çļĦ è¿ĩç¨ĭä¸Ń +è§Ĩ è§ī +èĩªæ²» åĮº +è¯ģ æį® +è£ħ ç½® +第ä¸ī æĸ¹ +å¹´ æĿ¥ +å¹¿ä¸ľ çľģ +带æĿ¥ äºĨ +éķ¿ æ±Ł +访 éĹ® +å·® ä¸įå¤ļ +æĺ¯ æĪij +éģŃ éģĩ +æĬĵ 好 +é«ĺ è¾¾ +å¹¶ åľ¨ +èĩª è§ī +ä¾ĽåºĶ åķĨ +æĥħ æĦŁ +ä½ı äºĨ +çļĦ èģĮä¸ļ +çļĩ å¸Ŀ +西 éĥ¨ +åĴĮ å¹³ +çļĦ åĬĽéĩı +æ± ª +åħħåĪĨ åıijæĮ¥ +æĬķ è¯ī +èµ· åΰ +äºĴ 缸 +æ¾³ éŨ +æİ¥ åΰ +æ°´ æ³¥ +模 åŀĭ +ä¸Ģ åįĬ +ç§© åºı +æĪij们 åľ¨ +æī¿ 认 +ä¸Ģ éĥ¨åĪĨ +åįł æ¯Ķ +å¦ĩ 女 +ç² ĺ +äºĨè§£ åΰ +ä¸Ģå®ļ ä¼ļ +åIJĦ 大 +èµ° åĩº +为 大家 +é«ĺ éĵģ +åı¯ä»¥ åľ¨ +ä½Ĩ åľ¨ +çĶŁæĢģ çݯå¢ĥ +èı ¯ +çļĦ ä»·æł¼ +麻 çĥ¦ +æ¿Ģ åıij +éĤ£ å°± +çļĦ æł·åŃIJ +为 æŃ¤ +天 åľ° +çļĦ 缮çļĦ +åĢº åΏ +å·² ç¶ĵ +åĽĽ 大 +åIJĮæĹ¶ ä¹Ł +å½¼ æŃ¤ +æĭ¿ åΰ +åIJ« éĩı +åįģ 大 +éļ¾ éģĵ +å¼ Ĺ +ä¸Ģ 段æĹ¶éĹ´ +çħ§ 顾 +æķ°æį® æĺ¾ç¤º +æĪIJ为 äºĨ +èµ° åΰ +æľ¬ åħ¬åı¸ +ç»Ī 端 +ä¹Ł ä¸įæĺ¯ +头 åıij +大 约 +é£İ æĻ¯ +æ¶Ī èĢĹ +审 æŁ¥ +äºī åıĸ +æ³ķ æ²» +äºĭ çī© +ç¼ĵ è§£ +æĥ ¨ +缸åºĶ çļĦ +çļĦ æķĪæŀľ +åıį å¤į +åıijçĶŁ äºĨ +éĢĻ äºĽ +ç»ĥ ä¹ł +åݨ æĪ¿ +å¼Ģ æĭĵ +欣 èµı +夫 妻 +ä¸į ä¸Ģæł· +产 èĥ½ +èĬ¯ çīĩ +è¦ģ ç´ł +åıį 对 +çİĩ åħĪ +è´§ çī© +æĹ¥ ç͵ +ä½ľ å®¶ +æĶ¹ è¿Ľ +æĪIJ åĪĨ +åĽł èĢĮ +åĩı èĤ¥ +æ½ ĺ +å±±ä¸ľ çľģ +åĬ Ŀ +åŁ ĭ +æŃ¦ è£ħ +æ±ĩ æĬ¥ +ä¸Ģ个 æľĪ +çĥŃ éŨ +大 éģĵ +æ´» åĭķ +éĥ½ å¾Ī +ç͵ 梯 +ç´§ æĢ¥ +åĢº åĬ¡ +客 æľį +ä¸Ģ éĥ¨ +ä½ł æĺ¯ +çݰ çĬ¶ +æŃ£ç¡® çļĦ +ä¹ĭ å¤Ħ +ç¼ĸ åζ +ä½ł åı¯ä»¥ +çŃī åľ° +èİ ī +对 è¯Ŀ +æ·ĺ å®Ŀ +è°ĥ èĬĤ +æİĴ æĶ¾ +åºĵ åŃĺ +ç´ ļ +çļĦ ä¼ĺåĬ¿ +æĿĥ å¨ģ +以ä¸ĭ ç®Ģç§° +ä¸Ģ 项 +èģļ éĽĨ +ä¼łç»Ł çļĦ +æ·· åIJĪ +è¿Ļä¸Ģ çĤ¹ +ä¸Ģ çľ¼ +æĹł éĻIJ +èİ·å¾Ĺ äºĨ +éĢī æīĭ +åζ åĵģ +åįı ä½ľ +çĭ¬çī¹ çļĦ +ä¸Ģ 级 +è¿Ļ个 éĹ®é¢ĺ +æĸ Į +æĺ¯ æĪij们 +æķĮ 人 +æ¸ħ æ´Ĺ +ä¸Ģ缴 åľ¨ +å°ı ç±³ +çļĦ è¿ĩç¨ĭ +åľ¨ åĮĹ京 +ä¸Ģ æĶ¯ +æĹ© ä¸Ĭ +æĸĩ èīº +ç¦ı åĪ© +é£Ł ç͍ +æĦŁ åĬ¨ +åħ¨ ç¨ĭ +æĶ¯ åĩº +æĸ° 建 +å¸ ķ +æĺ¾ çĦ¶ +羣 çļĦæĺ¯ +æĸ°éĹ» ç½ij +èĥ½ åIJ¦ +åįı åĬ© +亲 èĩª +å¾Ī æľī +çϼ å±ķ +æĦı 大 +æĦı大 åĪ© +ç͵ ç½ij +æĹ¥ çĽĬ +çĨ ± +èĤĮ èĤ¤ +çĶ· æĢ§ +ç»Ħ 建 +çŃī éĹ®é¢ĺ +æ¶Ī éϤ +æĬ¤ çIJĨ +å¡ij æĸĻ +ä¹Į åħĭ +ä¹Įåħĭ åħ° +åķĨ æłĩ +çIJ ³ +æĸ° æīĭ +çļĦ çī¹çĤ¹ +åĴ ¬ +å½ĵ ä¸ĭ +设计 å¸Ī +èµĶ åģ¿ +第 åįģ +æĻºèĥ½ åĮĸ +å¼Ģåıij åĮº +åı¯ä»¥ éĢļè¿ĩ +åħ±äº§ åħļ +åİī 害 +çģµ æ´» +æĹ¶ åħī +éĥ¨ ä½į +人 æĸĩ +è¿Ľ æĿ¥ +ä¹ĭ æīĢ以 +ä¸ī åįģ +çļĦ åѦçĶŁ +éĺ² æĬ¤ +åĽ½ 产 +æ·±åľ³ å¸Ĥ +éĤ£ å°±æĺ¯ +åΰ ä½į +çī¹ æľĹ +çľĹ æĻ® +å®ŀ æĹ¶ +åı° çģ£ +èĢĮ ä¸į +æĮĩ å®ļ +åĿ Ŀ +èħIJ è´¥ +çī¹ å®ļ +å¢ŀ éĢŁ +æłĩ çѾ +æĪ¿ ä»· +æĦ ģ +贯彻 èIJ½å®ŀ +æĢ§ è´¨ +çłĶç©¶ çĶŁ +ç¾İ 容 +æī¹ è¯Ħ +ç©¶ 竣 +人åĬĽ èµĦæºIJ +éĸĭ å§ĭ +åĽŀ å½Ĵ +èIJ¥ åķĨ +èIJ¥åķĨ çݯå¢ĥ +ä¸ŃåĽ½ 人 +çļĦ åŁºæľ¬ +è¯Ŀ é¢ĺ +æłĩåĩĨ åĮĸ +西 èĹı +åĭ ¾ +çļĦ 设计 +ç®Ģåįķ çļĦ +å¤į åζ +æ¸IJ æ¸IJ +以 å¤ĸ +èģĶ åĬ¨ +两 次 +æĢ§ åĴĮ +æĽ´ 大 +çļĦ åIJįåŃĹ +éŁ ¦ +ä½ł è¦ģ +å¢ĥ å¤ĸ +æĹ© æľŁ +åĪĿ æŃ¥ +è´¦ åı· +害 æĢķ +æĺ¨ æĹ¥ +åĪļ æīį +ç¥ŀ ç§ĺ +ç²¾ å¿ĥ +æµģ éĢļ +åħ¨ æĸ¹ä½į +以 å¾Ģ +ä¹Ł å°Ĩ +æĺ¯ ä¸ŃåĽ½ +åĽ½å®¶ 级 +å°Ĩ åĨĽ +æij Ĭ +æľĢ 为 +第ä¸Ģ æĹ¶éĹ´ +æ¶Ī æ¯Ĵ +å°Ĩ äºİ +å¨ģ èĥģ +èĭ± æĸĩ +æīĭ ä¸Ń +çIJĥ è¿· +è§Ĥ çľĭ +离 å©ļ +æľ¬ åľŁ +åĪĨ æķ£ +æĻ ´ +è¦ģ 注æĦı +浪 è´¹ +管 æİ§ +åĩº åĶ® +æĢ» è£ģ +ä¸Ģ éĺµ +å¨ ĩ +äºĶ 个 +å½ĵ åĪĿ +çºł 纷 +ä¸ĵ ç͍ +å¤ĩ æ¡Ī +åĪĿ æľŁ +å®ĥ æĺ¯ +åĮº åĿĹ +åĮºåĿĹ éĵ¾ +大 è¿ŀ +è¿Ļ ç±» +åıĺ æĪIJäºĨ +éĤĦ æĺ¯ +åįļ 客 +çı¾ åľ¨ +ä¸Ģ æĸ¹ +å®ĮæĪIJ äºĨ +è¿Ļ个 æĹ¶åĢĻ +åħ¨ å¹´ +ä¸Ĭ 线 +ç½ IJ +ç«ŀ èµĽ +åĩºçīĪ ç¤¾ +åĵ¥ åĵ¥ +å¯ « +å¾Ĺ 以 +èĬ± åĽŃ +äºĨ èµ·æĿ¥ +èĦ±è´« æĶ»åĿļ +çļĦ åİŁåĪĻ +讲 è§£ +æ¶Ī åĮĸ +æį٠害 +æļĤ æĹ¶ +å¾Ĺ çŁ¥ +éĢĤ ç͍ +éŨ åºĹ +è§£ 读 +æĻ® åıĬ +人æ°ij æ³ķéĻ¢ +åī¯ ä¸»ä»» +å¿ĥ çģµ +è¯Ĭ æĸŃ +ç¾İ 女 +æŁ ¯ +å¹´ 以æĿ¥ +æ´» è·ĥ +åĢŁ åĬ© +åħ± 建 +è¯ī 讼 +æĶ¾ æĿ¾ +çªĹ åı£ +ä¼ģ æ¥Ń +åĬł æĭ¿ +åĬłæĭ¿ 大 +ä¹° äºĨ +主 æµģ +æĩĤ å¾Ĺ +å°Ĩ åħ¶ +éĢı æĺİ +å·¥ä½ľ ä¸Ń +èĤ¡ ä»· +æ¡£ æ¡Ī +没æľī ä»»ä½ķ +åijĬ çŁ¥ +å¹´ åĪĿ +æĹ¥ ä¸ĭåįĪ +åİĤ åķĨ +èĬĤ å¥ı +主 导 +è£ Ŀ +åħ³éĶ® è¯į +èģĬ 天 +åĨĻ ä½ľ +æĶ¹éĿ© å¼ĢæĶ¾ +æľī æľĽ +éĢļ æĬ¥ +èIJ Į +æĢ» é¢Ŀ +çŁŃ æľŁ +ä¸Ģ çķª +çĶŁæ´» çļĦ +åĮĸ çļĦ +æĺ¥ 天 +è¿Ļ åľº +æĸ°å¼Ģ ä¼łå¥ĩ +æĺ¯ è¦ģ +å°ļ æľª +åıĺ æĽ´ +ä¸Ģ åij¨ +客 è§Ĥ +æĹ¥ èĩ³ +é¹ ° +çİ ² +å°Ĩ æĿ¥ +客 人 +åıĺ éĿ© +说 äºĨ +åİŁ çIJĨ +èģĮ åĬ¡ +åıĪ æľī +ä¸Ģ åı¥è¯Ŀ +æĦŁ åıĹåΰ +ç¬Ķ èĢħ +ç§» æ°ij +西 åįĹ +ä¹ĥ èĩ³ +æŃ£ è§Ħ +åĪĿ ä¸Ń +çĬ ¬ +å½ĵ äºĭ +å½ĵäºĭ 人 +æĪij们 è¦ģ +åħ¥ åı£ +éĤ£ æĹ¶ +æľīéĻIJ 责任 +å°ij 女 +è¿Ļä¹Ī å¤ļ +åĪĨ åħ¬åı¸ +å®ĩ å®Ļ +çļĦ éĢīæĭ© +å§IJ å§IJ +åıij èµ· +è» į +æĽ´å¥½ åľ° +éĻĨ ç»Ń +æľ¬ æľįåĭĻ +å« © +èµ¶ ç´§ +èĦĤ èĤª +第äºĮ 天 +æĪij ä¼ļ +两 ä½į +æķ ² +åħ¬å®ī æľºåħ³ +ç§ijæĬĢ åĪĽæĸ° +å°º 寸 +è¾IJ å°Ħ +å®Ĺ æķĻ +转 æį¢ +åĩº çİ°åľ¨ +ä¸Ģ é¢Ĺ +æľŁ éĻIJ +åIJĮåѦ 们 +åĮĹ æĸ¹ +ä½ł å°± +ä¸Ģ带 ä¸Ģè·¯ +èĢģ å©Ĩ +游æĪı çݩ家 +çļĦ ç»ĵæŀľ +è¡¥ åģ¿ +å¤ĸ è´¸ +对 å¾ħ +ç»´ çĶŁç´ł +ç»ıéĶĢ åķĨ +è¿ĺ å°Ĩ +åŃIJ 女 +æĽ´ é«ĺ +ä¸į 大 +éī´ å®ļ +让 ä»ĸ们 +æīĢè°ĵ çļĦ +æŃ» äºĨ +帮 æī¶ +åĵ² åѦ +以ä¸Ĭ çļĦ +çļĦ åħ³éĶ® +æĹ© å°± +æĬ¥ ä»· +éģµ å®Ī +æī© å¼ł +æĺ¯ å¾Ī +å¼Ģ éĢļ +æĸ° åĬł +æĸ°åĬł åĿ¡ +ç¿» è¯ij +询 éĹ® +é¸ Ń +ä½ĵ åĨħ +两 个人 +çĪ ¹ +éľ ľ +乡æĿij æĮ¯åħ´ +çĿ¡ è§ī +å®ĺ åijĺ +åĪĽ å§ĭ +åĪĽå§ĭ 人 +ä¼Ĺ 人 +åį³ ä¾¿ +çĸ« èĭĹ +ä¼ģä¸ļ å®¶ +æ¸ £ +ç²¾ åĬĽ +å¤ĸ éĥ¨ +èģª æĺİ +è¿Ļ ä¹Ł +å½ķ åıĸ +åĨ² çªģ +åħ¨ 身 +åŃ£ èĬĤ +忽 çĦ¶ +çļĦ æĢģ度 +åĤ¨ å¤ĩ +ä¿Ŀ åħ» +çļĦ æĥ³æ³ķ +ä¸Ĭæµ· å¸Ĥ +æIJº æīĭ +çļĦ ä¿¡æģ¯ +åķĨ åľº +çļĦ æĢĿæĥ³ +æĿĥ åĬĽ +毫 æĹł +æĢĢ åŃķ +硬 ä»¶ +åĨħ èĴĻåı¤ +æİ¢ 讨 +åħ» çĶŁ +çļĦ 表çݰ +空 ä¸Ń +æģIJ æĢĸ +å¾Ī é«ĺ +ç»ıæµİ 社ä¼ļ +ä¸Ĭ æĿ¥ +å»¶ ç»Ń +éĩį å¤į +éĺ² èĮĥ +çļĦ å½¢å¼ı +æľĪ åºķ +èĢģ 年人 +绿 åĮĸ +å±± åĮº +æĭ¿ åĩº +æĹħ 客 +æĽ´ æį¢ +åħ¬ 主 +èĬĤ 约 +åħ¨ åİ¿ +åĽŀ æĬ¥ +çIJĨ æĢ§ +çĸ¯ çĭĤ +æ¶ī å«Į +åī§ æĥħ +åĨ¬ åŃ£ +åIJİ ç»Ń +è¿Ļæĺ¯ ä¸Ģ个 +æ¼Ķ 讲 +ä¸Ģ å±Ĥ +æľīåħ³ éĥ¨éŨ +æĹł å¥Ī +ç§į ç±» +缸åħ³ çļĦ +æĪĸèĢħ æĺ¯ +æī¶ æĮģ +å¤ļ æķ° +çļĦ ä½ľåĵģ +ä¸ĭ ä¸ĢæŃ¥ +å¸Ī åĤħ +é«ĺéĢŁ åħ¬è·¯ +好 åıĭ +ä¼ĺç§Ģ çļĦ +è¿Ľ äºĨ +æģIJ æĢķ +äºĨ åIJ§ +大 è§Ħ模 +çļĦ ä¸ĸçķĮ +æĢĢ çĸij +å· · +åħ´ å¥ĭ +æĪ ° +æĿij éĩĮ +æľĭåıĭ åľĪ +åĨ¬ 天 +ä¸Ńåįİ äººæ°ij +åįı åķĨ +è¯Ħ éĢī +æĹ Ń +å¢ŀåĬł äºĨ +åıĹ ä¼¤ +ä¸Ģ èĤ¡ +便 æį· +ä¸ ij +é¹ ¤ +å¤ĸ è§Ĥ +å·¥ç¨ĭ å¸Ī +åĴĮ åħ¶ä»ĸ +è¿Ļ å°± +ä¸Ńå°ı ä¼ģä¸ļ +西 åĮĹ +åĽ½æľī ä¼ģä¸ļ +èĭ¥ æĺ¯ +åı¯ æĥľ +çĶŁ æĹ¥ +åĩ ½ +ä¹° åįĸ +ç¥Ŀ ç¦ı +人æ°ij 群ä¼Ĺ +åħī æĺİ +åħ¬ å¯ĵ +æĺ¯ è°ģ +æĪij çŁ¥éģĵ +è¯Ń æĸĩ +æķı æĦŁ +ä¸įéĶĻ çļĦ +æĿ¥ 讲 +æ³¢ åĬ¨ +çļĦ 第ä¸Ģ +åľ° éľĩ +åľ¨ åħ¨åĽ½ +骨 å¹² +å®ī ç½® +å®¶ ç͵ +ä¸İ æŃ¤ +ä¸İæŃ¤ åIJĮæĹ¶ +åıĹ çģ¾ +çĥŃ çº¿ +çļĦ æĬĢæľ¯ +æµĭ éĩı +ä¾Ŀ èµĸ +ä¸ŃåĽ½ çļĦ +çī¹ æĢ§ +è¾ĥ é«ĺ +è¸ © +ä¼ļ åľ¨ +建 éĢł +导 èĪª +æĥ³ èµ· +åħ¨ ä¸ĸçķĮ +建 æĿIJ +ç¯ Ģ +çļĦ åŁºç¡Ģ +èĩªåĬ¨ åĮĸ +åīį åIJİ +çĿ¡ çľł +æİ¨ è¡Į +æį® äºĨè§£ +ä»Ģä¹Ī æĹ¶åĢĻ +ä¸į åĸľæ¬¢ +çħ¤ çĤŃ +éĤ£ä¹Ī å¤ļ +å¸Ĥåľº åĮĸ +ä¸į管 æĺ¯ +ç«ĭ åľº +éĥ½ 没 +课 é¢ĺ +æĪij们 å°Ĩ +è¿ĩ çļĦ +åĨį åĬłä¸Ĭ +çĪ ¾ +身 æĿIJ +çĶ· 女 +è¿ľ è¿ľ +çĶ· çĶŁ +èĩªèº« çļĦ +è´Ł æĭħ +çϾ ä¸ĩ +西 çıŃ +西çıŃ çīĻ +åĩĢ åĪ©æ¶¦ +æ¾³ 大 +澳大 åĪ©äºļ +ä¸į åİ» +æī¿ åıĹ +楼 çĽĺ +å¢ĥ åĨħ +æ·· åĩĿ +æ··åĩĿ åľŁ +æĢĿæĥ³ æĶ¿æ²» +å¸Ĥ åĮº +æĭĽ æłĩ +åĽ¢ ä½ĵ +è¿Ľ 度 +åĨĽ éĺŁ +åıį å¼¹ +äºĨä¸Ģ äºĽ +æİ¥ å¾ħ +çļĦ åŃ¦ä¹ł +éħį éĢģ +é£Łåĵģ å®īåħ¨ +æĽ¿ 代 +æĺ¯ 以 +éĢļ ç͍ +çłĶç©¶ æīĢ +ç¦ ħ +æī Ķ +éļĶ ç¦» +ä¸ĩ å¹³æĸ¹ç±³ +çļĦ è§Ħå®ļ +ç»Ļ æĪij们 +æ¿Ģ åħī +ä¼ļ åĩºçݰ +çŁŃ ä¿¡ +ç©¿ çĿĢ +æ²Ī éĺ³ +æķĻ æĿIJ +éĺ² çĸ« +ä¼ĺ èī¯ +约 å®ļ +æĪij çľģ +åħ¬ æ°ij +éģ¸ æĵ +é쏿ĵ ĩ +å·² æĪIJ为 +ä¸į å¿ħ +ç¥ĸ åĽ½ +å¹¶ æľª +åľŁ 壤 +å¾® ç¬ij +äºĭä¸ļ åįķä½į +çļĦ 游æĪı +åħ¬ 示 +åIJĪçIJĨ çļĦ +çª Ŀ +æ°Ķ 象 +å®¶ ä¸Ń +亮 缸 +åį« æĺŁ +è®° è½½ +è§Ĩ éĩİ +åľ°åĮº çļĦ +ä½Ĩ ä»ĸ +èĤĮ èĤī +äºı æįŁ +åĬŀ åѦ +ä¸Ģ è¡Į +è¯ŀ çĶŁ +åıijå¸ĥ çļĦ +çļĦ æľįåĬ¡ +çļĦ çłĶç©¶ +åij¨ æľ« +产ä¸ļ åĽŃ +é«ĺ 温 +æĪIJåĬŁ çļĦ +æŃ¥ 骤 +åŃĺ åĤ¨ +åŃIJ åħ¬åı¸ +让 她 +ä¸Ń æľī +åĺī 宾 +å¦ ® +æĺİ å¹´ +äºĨ åIJĹ +äºī è®® +æĪ Ī +ä¸Ģ æľ¬ +ç¾İ丽 çļĦ +ä½ł 说 +大 人 +æĶ» çķ¥ +ä¸į æľĥ +å¾ħ éģĩ +ä¸Ģ è¾Ĩ +çīĪæĿĥ æīĢæľī +æ°ij ä¼Ĺ +åĬ٠夫 +å±ķ ä¼ļ +大 èĦij +æ¯ı æľĪ +å°ı 麦 +æµĻæ±Ł çľģ +çļĦ æīĢæľī +ä¸ĭ æ»ij +èĵĿ èī² +è¦ģ æĥ³ +åѦçĶŁ çļĦ +å½ĵ ä½ł +ä½ľ æĪĺ +å®¶ 乡 +å¤ļ åIJį +é«ĺ äºİ +åĿļ 强 +è¿ŀ éĶģ +åIJİ æŀľ +人 äºĭ +ç´ ħ +æ¿Ģ åĬ¨ +è¿Ľ æĶ» +ç© Ĩ +ä¸ ĺ +让 èĩªå·± +以 æŃ¤ +夫 人 +å¼Ģ 设 +æ°Ķ è´¨ +鸡 èĽĭ +çĦ¡ æ³ķ +åIJĥ äºĨ +åĪĨåĪ« 为 +èģĶåIJĪ åĽ½ +å½ĵ 代 +å¦Ĥæŀľ æĺ¯ +è¿ľ ç¨ĭ +åĸ Ĥ +è®° ä½ı +æ¸ħ åįķ +åIJĪä½ľ ä¼Ļä¼´ +åİ» åģļ +æķħ éļľ +模 æĭŁ +å¸Ī çĶŁ +åīį æĿ¥ +ç͵è§Ĩ åī§ +çĥŃ çα +éľ² åĩº +é«ĺ å±Ĥ +ç͵ åύ +纪 å¾ĭ +å¼Ģåıij åķĨ +éķ¿ å®ī +è½½ ä½ĵ +çļĦ å°±æĺ¯ +被 人 +åıĹ çIJĨ +篮 çIJĥ +èİ İ +交 ç»Ļ +æľªæĿ¥ çļĦ +两 大 +åIJķ å¸ĥ +çŃī 人 +çļĦ æĹ¥åŃIJ +åIJĪä½ľ 社 +æĮij éĢī +åŃĺ æ¬¾ +ç³»ç»Ł çļĦ +æĬĬ å®ĥ +没æľī ä»Ģä¹Ī +ä»İ æŃ¤ +ä¸Ń åįĪ +çĸ¼ çĹĽ +å·© åĽº +浪 漫 +缸åħ³ éĥ¨éŨ +éķ¿ åŁİ +纤 ç»´ +ä¸Ĭ éŨ +çĪĨ çĤ¸ +èµ· çĤ¹ +çļĦ éĢļçŁ¥ +èĢĮ æĿ¥ +çļĦ èĢģ +æīĭ éĩĮ +è¯Ń éŁ³ +è¾Ľ èĭ¦ +æ±Łèĭı çľģ +ç͍ äºĨ +身份 è¯ģ +æľī åĬ© +æľīåĬ© äºİ +çī© èģĶç½ij +åĩº éŨ +å¼Ł åŃIJ +æĥ ¹ +è¿Ļä»¶ äºĭ +æĪij们 åı¯ä»¥ +çļĦ çĶŁåij½ +æľīä¸Ģ ç§į +åºĹ éĵº +åıĮ æīĭ +çļĦ æ¶Īæģ¯ +èĢIJ å¿ĥ +å°´ å°¬ +éĤ£ 天 +é¦ĸ æī¹ +æĺ¯ä¸Ģ å®¶ +人 æ°Ķ +åıį æŃ£ +æĪij åĴĮ +å®ł çī© +ä¸į 对 +寻 æ±Ĥ +缸 ä¼¼ +åľ¨ ç¾İåĽ½ +åı« åģļ +åĹ İ +ç«ĭ è¶³ +ç͍ éĢĶ +åħ Ĩ +大 æ°Ķ +åIJij ä¸Ĭ +ä»ĸ å°± +é¡¹çĽ® 建设 +èĭ¥ å¹² +æĺ¯ æľī +æ¿Ģ æĥħ +çļĦ æĦıä¹ī +æĺ Ń +严éĩį çļĦ +å¯Ĩ éĽĨ +èĪŀ è¹Ī +èᣠèİ· +èİ· æĤī +æ±Ł åįĹ +åģĩ å¦Ĥ +æĪ· å¤ĸ +线 ç´¢ +ç§ģ 人 +转åŀĭ åįĩ级 +çļĦ ä»·å̼ +åįķ çĭ¬ +èĢģ çϾå§ĵ +å°į æĸ¼ +åĽ½éĻħ åĮĸ +ä¼° å̼ +æľįåĬ¡ ä¸ļ +èĩ Ń +æİī äºĨ +è§£åĨ³ äºĨ +ä¹Ł ä¸įèĥ½ +åħ ¹ +æĸ¯ çī¹ +æķħ æĦı +è¿ĩ 度 +èĬĤ æĹ¥ +çϽ çĻľ +çϽçĻľ é£İ +ç»§ æī¿ +äºĨ ä¸įå°ij +äºĮ 人 +è§ģ éĿ¢ +æĥ³ æĥ³ +å¤į åIJĪ +康 å¤į +åİ¿ åŁİ +åľ¨ åĽ½åĨħ +åľº åľ° +é϶ çĵ· +è¿Ļ 项 +çľ¼ ä¸Ń +çł ¸ +æĦŁè§ī åΰ +æŀľ çĦ¶ +æĶ¾ åħ¥ +约 æĿŁ +æİĴ æŁ¥ +车 主 +çļĦ æĦıæĢĿ +æĸ° åŁİ +æĥ³ çĿĢ +éģ Ĥ +èĮ¶ åı¶ +ä¹° æĪ¿ +åĨľ æĪ· +é«ĺ æīĭ +çİī ç±³ +æĸ°åĨł èĤºçĤİ +çħ§ æĺİ +æĮĩ åįĹ +è¸ ¢ +æķij æı´ +æĻ¯ çĤ¹ +ç¨İ æĶ¶ +çļĦ æīĭ +æŃ£ 好 +è¦ģ æĬĬ +éļı æĦı +åħ¶å®ŀ æĺ¯ +ç»Ļ èĩªå·± +è°Ī åΤ +æ¯ı天 éĥ½ +æĢģ åĬ¿ +é¢Ħ 约 +åİĨåı² ä¸Ĭ +å®Ŀ è´Ŀ +åīį è¿Ľ +ä¹Łå°±æĺ¯ 说 +çļĦ æĦıè§ģ +åı£ 罩 +åİĺ ç±³ +èĬ± è´¹ +ä½ĵèĤ² æĬķæ³¨ +åħ¬ä¼Ĺ åı· +èijĹåIJį çļĦ +å¼Ģ æĪ· +æĭį åįĸ +å²ģ æľĪ +åĨħ æ¶µ +å®Įæķ´ çļĦ +é«ĺ åİĭ +åħ¬åĬ¡ åijĺ +使ç͍ çļĦ +çĶŁäº§ 线 +妹 妹 +èµ° 访 +æĺ¯ åı¯ä»¥ +åľ¨ å®¶ +æļ´ åĬĽ +æ³° åĽ½ +è´¨ çĸij +ä¸į éģİ +天çĦ¶ æ°Ķ +缺 çĤ¹ +å°ı åŀĭ +ä¸įä»ħ æĺ¯ +é»ij æļĹ +æ¢ ¨ +æĸĩ æĹħ +è¦ģ æľī +ä¸Ń å±± +çļĦ æķ°æį® +å¾Ĺ å¾Ī +以 便 +对 ä»ĸ +åĬł 以 +çϼ çı¾ +设 å®ļ +èĤļ åŃIJ +éĿ ĸ +å¥ī çĮ® +ä¸į åıĺ +åı£ ç¢ij +åľ¨ åĵªéĩĮ +ä½ IJ +è¿Ļ 两个 +çļĦ æĸ¹åIJij +æŀ « +äºĮ 次 +çīĩ åĮº +éł IJ +ç£ Ĭ +æĭ¿ çĿĢ +å·²ç»ı æĪIJ为 +ä¹ĭ ä¸Ĭ +å®Ĺ æĹ¨ +奶 奶 +é«ĺæĸ° åĮº +社 æľĥ +è·Ł 踪 +æľįåĬ¡ ä¸Ńå¿ĥ +æī ¯ +æīĭ æĮĩ +礼 çī© +宿 èĪį +ç͍ å¿ĥ +æıIJé«ĺ äºĨ +亮 çĤ¹ +ä¸į æĦ¿æĦı +æĴŃ æĶ¾ +å¤ļå°ij éĴ± +没 ä»Ģä¹Ī +æķ° åįģ +æĢ» çĽij +çļĦ åŁİå¸Ĥ +æī¾ åΰäºĨ +åĨħ åľ° +åΰ çİ°åľ¨ +æĪĺæĸĹ åĬĽ +åİŁ å§ĭ +åĥ § +åĢĴ æĺ¯ +æľĢ åħ· +è´«åĽ° æĪ· +éĢģ åΰ +级 åĪ« +åĩº èµĦ +æĪª æŃ¢ +ç§į åŃIJ +èĥ½ ä¸įèĥ½ +幸 è¿IJ +èĸ ĩ +项 éĵ¾ +æĮĤ çīĮ +ä¸Ģ 樣 +ä¹ĺ 客 +èIJ½ åIJİ +ä½Ĩ æĪij +æĹ© åľ¨ +åĬ¨ 漫 +å¹³ çŃī +对 ä½ł +ä¸į æĢķ +å¤ĸ çķĮ +å¤ļå¹´ æĿ¥ +é¦ĸ 个 +æ²³ åįĹçľģ +æĪĸ åħ¶ä»ĸ +éķľ å¤´ +åįĹ æĺĮ +ä¸Ģ éĿ¢ +éĢłæĪIJ çļĦ +å´ Ķ +çŃ Ĵ +æķĻèĤ² éĥ¨ +åľ° åŁŁ +æĺĨ æĺİ +å·´ é»İ +æīĭ 游 +ä¸Ģ æĹ¶ +çł į +é¡¶ 级 +åħ± 计 +åİŁ æ²¹ +è¾ī çħĮ +说 æĺ¯ +æĸ°åįİ ç¤¾ +ç»ıåİĨ äºĨ +ä¸į æŃ¢ +è¦ģ ä¹Ī +èĢħ çļĦ +æĢ» æĬķèµĦ +è¡Į é©¶ +ä¸Ĭ å¸Ŀ +å¹´ 纪 +çIJ ¼ +ä¼ł 说 +ç²¾ èĭ± +æĸ¹ éĴĪ +æ±Ł æ¹ĸ +æĪIJ çĤº +æĢ» éĩı +æĬķ æĶ¾ +åĬ¨ çĶ» +èĹ ¤ +ç͵ æºIJ +éĴ Ļ +åIJĮ è¡Į +æĻ®éĢļ çļĦ +åĽ¾ä¹¦ é¦Ĩ +è¯Ī éªĹ +æħĪ åĸĦ +è¿Ļ 份 +主æĮģ 人 +å°± è¿Ļæł· +èĢĮ æĪIJ +èĩªè¡Į 车 +ä¸ŃåĽ½ çī¹èī² +èĤ¿ çĺ¤ +åIJ ¾ +å¼Ł å¼Ł +åıĹ çĽĬ +éĢīæĭ© äºĨ +æĺİæĺ¾ çļĦ +æĬ¥ èĢĥ +ç¬ij éģĵ +éĽĸ çĦ¶ +温 å·ŀ +éĿŀ æ´² +ç§į ç§į +åıĤåĬł äºĨ +è´§ è¿IJ +éļı 便 +å°± 没æľī +ç¸ £ +央 è§Ĩ +ç©¿ è¶Ĭ +çļĦ çݰ象 +åĩł 次 +çļĦ é£İéĻ© +æŃĮ æĽ² +æľ¬ å±Ĭ +å¹´ åĨħ +ä¸į è¶ħè¿ĩ +è¿ĩ å¤ļ +å¿ħé¡» è¦ģ +ç»ĵ 论 +åĢŁ éī´ +ç¥ŀ å¥ĩ +æľŁ æľĽ +ä¸ĵ 享 +éĿŀ常 éĩįè¦ģ +æĦıè¯Ĩ åΰ +åIJĪ å¹¶ +æĬĬ èĩªå·± +å¥Ĺ è£ħ +éŃĶ æ³ķ +å¤ı åŃ£ +ä¸į åĥı +å¢ĥ çķĮ +æĥĬ åĸľ +æľīä¸Ģ 天 +çĦ¦ çĤ¹ +æĪij 认为 +åħ° å·ŀ +ç͵ æ°Ķ +èģĶç³» æĪij们 +ç§ij æĻ® +她 说 +çļĦ æĸĩ竳 +å¥ĩ æĢª +åıĭ 好 +饮 æĸĻ +çļĦ æĶ¯æĮģ +çŃĶ åºĶ +éĩį éĩı +çij ¶ +åĩı è½» +ç§ijåѦ å®¶ +å·´ 西 +éĩijèŀį æľºæŀĦ +åħļ å§Ķ书记 +貸 款 +ç²¾ èĩ´ +ä»İ æľª +åį° åĪ· +åĽŀ 顾 +é¦ĸ éĥ½ +åıij èĤ² +éĹ® éģĵ +è¾¾ åΰäºĨ +å¿į ä¸įä½ı +æīį æľī +æįIJ èµł +ä½Ľ æķĻ +ä¸į æ¸ħ +éĺŁ éķ¿ +缸 åıį +æĬ¥ èѦ +大 åħ¨ +欧 缣 +帮 å¿Ļ +çļĦ æĻĤåĢĻ +缮 å½ķ +è¶³ 以 +èī° éļ¾ +ä»ĸ ä¹Ł +å·¥ ä½ľèĢħ +头 èĦij +缺 éĻ· +æĪIJç«ĭ äºĨ +å°± å¼Ģå§ĭ +认 åIJĮ +é»Ħ èī² +çĹħ æĥħ +覺 å¾Ĺ +è¿Ļ 两 +ä¿¡ ä»° +åľĭ å®¶ +ä¸įä»ħä»ħ æĺ¯ +çĭ¬ å®¶ +èά çļĦ +æĿIJ è´¨ +æµ· ä¸Ĭ +çĤº äºĨ +æľºåĬ¨ 车 +缸å½ĵ äºİ +å¤ļåħĥ åĮĸ +æĽ´ 大çļĦ +èĽ ® +åģĩ æľŁ +å¼ı çļĦ +交éĢļ è¿IJè¾ĵ +çľģ å§Ķ +ä¸į ç®Ĺ +æĶ¾ ä¸ĭ +éĹ ¯ +人 åľ¨ +港 åı£ +æĹ¨ åľ¨ +åij½ 令 +æŁIJ 个 +å¹³ 稳 +åıª 好 +人 人 +äº ŀ +äºĮ ç»´ +äºĮç»´ çłģ +æŀģ 为 +åĪ« å¢ħ +åħ¶ ä½Ļ +大 äºĭ +主管 éĥ¨éŨ +æĹł éĶ¡ +éĹ µ +éģŃ åΰ +说 è¿ĩ +为 ä½ł +è§£ çŃĶ +éªĮ æĶ¶ +çļĦ ç»ıéªĮ +åĮ¹ éħį +çģ« ç®Ń +豪 åįİ +æŁIJ æŁIJ +çļĦ æĹ¶ä»£ +书 éĿ¢ +æģĴ 大 +å»¶ éķ¿ +ä¸Ģ åIJĮ +æľª èĥ½ +交 æį¢ +çĶ¢ åĵģ +çŃī åΰ +åĪĨ 离 +æīĵ ç͵è¯Ŀ +å¹² çĩ¥ +è¾ĥ å¤ļ +å¤ļå¹´ çļĦ +èĥĮæĻ¯ ä¸ĭ +为 ä¾ĭ +æijĺ è¦ģ +å´Ľ èµ· +æŃ¤ åĪ» +æľī æľºä¼ļ +æĿ¡ 款 +é¢Ĩ导 å°ıç»Ħ +çļĦ 身ä½ĵ +åįķ ä¸Ģ +央 è¡Į +ä¸įæĸŃ æıIJé«ĺ +ä»·å̼ è§Ĥ +èĬ ½ +èIJ į +æ³ķå¾ĭ æ³ķè§Ħ +ä¸į éĶĪ +ä¸įéĶĪ éĴ¢ +åĩº äºİ +èĻļ æĭŁ +æį® æĤī +çĥ¦ æģ¼ +åħ¨ æĸ°çļĦ +æī« æıı +çĻ» éĻĨ +èīºæľ¯ å®¶ +çļĦ é£Łçī© +çļĦ åŃĺåľ¨ +客 åİħ +æĪij们 å°± +æŁ¥çľĭ æĽ´å¤ļ +è¯Ħ 审 +å¸Ĥ åł´ +è¬ Ľ +å·¨ 头 +ä¸ŃåĽ½ ç»ıæµİ +äºĨ èĩªå·±çļĦ +åĨ³ è®® +çĽijçĿ£ 管çIJĨ +æĬķ 票 +åĨį 度 +è¡Į çĤº +注 åħ¥ +ä½ľä¸º ä¸Ģ个 +æ¯ı个人 éĥ½ +åįķ åħĥ +è¦ģ çŁ¥éģĵ +被 称为 +ä¹ĭ éĻħ +è§£ éϤ +ä¸ ¸ +æº « +ä¸ī æĺŁ +é²ľ æĺİ +ä¹Ł éĥ½ +æĹ¶ æľº +åĩº æīĭ +æĥħ å½¢ +åķĨ è´¸ +éĢī 举 +对 èĩªå·± +çĶŁ åĬ¨ +åħĭ æľį +个 ä½ĵ +èĭ ij +ç¨ ± +大 åݦ +æĺ¯ 对 +åĪ© æģ¯ +è¿IJåĬ¨ åijĺ +åĮĸ è§£ +åīį æ²¿ +æĦŁ æģ© +æĢ» ä¹ĭ +é«ĺæĸ° æĬĢæľ¯ +åĿĩ 为 +åħ¨ åĮº +æ°Ķ æ°Ľ +åı¯ä»¥è¯´ æĺ¯ +ä½ı 宿 +åħļåijĺ å¹²éĥ¨ +åĹ ¯ +è·µ è¡Į +çļĦ ä¸ĵä¸ļ +èĢĥ éªĮ +èķ ¾ +åħ¬ åŃIJ +çļĦ çĬ¶æĢģ +æ½® æµģ +ä¿¡ æīĺ +è´ ¼ +åIJĦ æĸ¹ +æķij åĬ© +éĿŀ常 çļĦ +æ¡¥ æ¢ģ +åħ¬ æĸ¤ +ä¼¼ çļĦ +çľĭ 好 +å±Ģ éĥ¨ +å®ī éĿĻ +éħį ä»¶ +常 è§Ħ +å¼Ģ 车 +第äºĮ 次 +ä¸Ĭ 级 +åıĤ èµĽ +å®¶ å±ŀ +强 åĬ¿ +åľ¨ ä»ĸ +åIJij åīį +ä¹ĭ åľ° +éĥ ¡ +è¡Į ç¨ĭ +èѦ åijĬ +è§Ħå®ļ çļĦ +åķĨ åŁİ +äºĶ 大 +æķĻ å®¤ +åįģ è¶³ +æīĢ以 åľ¨ +å°Ĩ ç»§ç»Ń +çŃī æĸ¹å¼ı +å®¶ ä¼ģä¸ļ +交 ä»ĺ +çĤ¹ è¯Ħ +ç»ĵ ç®Ĺ +ä¹Ł åı¯ +å¤ĸ æ±ĩ +è¿Ļç§į æĥħåĨµ +æİĪ äºĪ +å¸ĥ ç½® +æĪIJç«ĭ äºİ +é¢Ħ èѦ +管çIJĨ 人åijĺ +å©ļ 礼 +ç»ĵæĿŁ åIJİ +åħ¥ éĢī +æĹł æ¯Ķ +åĴĮ åıijå±ķ +çϽ éħĴ +çİ© åħ· +ä¸ĩ ç¾İåħĥ +çļĦ æĪIJ绩 +æĭį çħ§ +èĢĥèĻij åΰ +ä¼ģä¸ļ åıijå±ķ +äºĨ 个 +çĶŁ æ°Ķ +çļĦ 女人 +äºĶ åįģ +çĪ· çĪ· +纽 约 +éĥ½ 被 +ä¸Ĭ 课 +çĽ ¡ +ä¼łç»Ł æĸĩåĮĸ +æ½ľ åľ¨ +åıij å°Ħ +ä¸Ģ 身 +éĺ² å®Ī +åĪ ® +é¢ĺ 缮 +åľ¨ åĨħçļĦ +ç¾İ 好çļĦ +è¿ĻéĩĮ çļĦ +ä¸Ģ ä¸Ŀ +人 åĿĩ +åĢ¡ 导 +身 åIJİ +æī© å±ķ +大 éŨ +å°± 被 +该 é¡¹çĽ® +æŀ¶ æŀĦ +ä¸Ģ åı£ +ä¿¡æģ¯ æĬĢæľ¯ +å¼Ģ ä¸ļ +æĶ¶ åıĸ +ç½ij 页 +æĶ¯ æı´ +å°ģ éĹŃ +å¡ij éĢł +大 èĥĨ +å¿«éĢŁ åıijå±ķ +çľĭ ä¼¼ +æ¸ Ŀ +è¿Ļæł· ä¸Ģ个 +模 åĿĹ +注æĦı åΰ +çł´ è§£ +èĩª ä»İ +åijµ åijµ +ä¹ĭ å¾Į +ä¹ĭ æĹħ +è·Ł æĪij +æ³ķ 人 +æİĴè¡Į æ¦ľ +åĿļ å®Ī +好 å¤Ħ +çŁ³ 头 +å¹¶ å°Ĩ +èĪ ± +æŃ ĩ +两 岸 +å¤ļ ä¹ħ +象 å¾ģ +个æĢ§ åĮĸ +çļĦ è§Ĵ度 +å¸ Ĩ +ç¦ı å·ŀ +æŁ¥ å¤Ħ +两 åĽ½ +åIJ¸å¼ķ äºĨ +é¦ĸ å¸Ń +大 åĵ¥ +é¤ Ĭ +涨 å¹ħ +éĢī ç͍ +許 å¤ļ +èIJ½ æĪ· +åĵĪ å°Ķ +åĵĪå°Ķ 滨 +åģļ ä»Ģä¹Ī +以 åħį +é¾ į +æĹł éľĢ +åΰåºķ æĺ¯ +æĢ ¡ +åijĬè¯ī ä½ł +éĺ² æ°´ +è¿Ļ æĹ¶åĢĻ +欢 ä¹IJ +转 åIJij +è¿Ļ个 åľ°åĽ¾ +åħ¥ é©» +èįī åİŁ +æĹ¶ä»£ çļĦ +åıĺ åĬ¨ +åĬłå¼º 对 +åģ¶ å°Ķ +å®Ī æĬ¤ +æ°Ķ 温 +人 éĹ´ +æľĿ é²ľ +ç»ı è´¹ +åĽŃ æŀĹ +å·¥ åľ° +è§Ħ æł¼ +åĩł åįģ +è¯ķ åĽ¾ +å¦ ĥ +éĤ£ æĹ¶åĢĻ +å¼ĺ æī¬ +ä¸ļ çķĮ +çļĦ éĢŁåº¦ +ä¼ļ ä¸įä¼ļ +èIJ¥ æĶ¶ +å°ıå¾® ä¼ģä¸ļ +çľĭ è¿ĩ +æĬĬ ä»ĸ +éģµ å¾ª +è¿Ļ è¾¹ +没æľī 人 +å£ ¶ +æ¹ĸ åįĹçľģ +æŀģ åħ¶ +çļĦ人 çĶŁ +ä»ĸ è¿ĺ +转åĮĸ 为 +èµ° è¿ĩ +æĬ± çĿĢ +çīĽ å¥¶ +ä¸ĩ 亩 +å¿ĥ æĢģ +æĹ¥å¸¸ çĶŁæ´» +ä½ĵ æ£Ģ +æĻ ĥ +çŃī é¢ĨåŁŁ +æĩī 該 +åı¯ä»¥ çľĭåΰ +æī¾ ä¸įåΰ +èĢģ å¹´ +æĬĬ æĪij +积 åĪĨ +梳 çIJĨ +ç» ³ +çļĦ æĶ¿æ²» +å¸Ŀ åĽ½ +éĻª ä¼´ +æ´Ľ éĺ³ +åħ¬ æŃ£ +å¼Ģ åı£ +çī¹èī² çļĦ +åĽ° å¢ĥ +ä¸Ĭ æľī +ç«ĭ ä½ĵ +æīĵ å·¥ +åķ¤ éħĴ +åľ¨ éĤ£éĩĮ +éĤ£ è¾¹ +个 åĪ« +ä¸Ģå®ļ æĺ¯ +çļĦéĩįè¦ģ æĢ§ +主 å¼ł +åĴĮ æľįåĬ¡ +ä¸Ĭ ç½ij +è¡¥ åĬ© +åıª éľĢ +å¼ ¦ +éģ ® +åĬĽ äºī +度 è¿ĩ +èij ¬ +é¡¿ æĹ¶ +éĦ ī +纺 ç»ĩ +åľ° åĿĹ +ä¿¡ç͍ åį¡ +ç½ļ 款 +åijĬè¯ī æĪij +éĽ Ļ +书 çĶ» +è¨Ń è¨Ī +æĢ» ä¼ļ +åΤ åĨ³ +ä¿¡ èªī +个 èĤ¡ +å¹³ 常 +æĢİ éº¼ +ä½ĵ çİ°åľ¨ +é»Ħ æ²³ +åĽĽå·Ŀ çľģ +羣 缸 +åIJĦ项 å·¥ä½ľ +åĬ¨ åijĺ +å³° ä¼ļ +ä¸Ģ æľŁ +æľī ä¸Ģå®ļçļĦ +é«ĺ度 éĩįè§Ĩ +ç¹ģ èᣠ+åıijçݰ äºĨ +ç½ij 红 +æīĭ æ³ķ +å®¶ åĽŃ +仪 åύ +è¾ĥ ä½İ +çļĦ å®īåħ¨ +æ¡ IJ +ä»ĺ 款 +æĬij åζ +åįĵ è¶Ĭ +æŃ£ éĿ¢ +åĵ ij +强 åζ +ä»Ĭ天 çļĦ +æĪĺ èĥľ +楼 å¸Ĥ +æĭ¿ ä¸ĭ +é¢ľ å̼ +举 éĥ¨ +çłĶ åζ +çļĦ æĪĺçķ¥ +åľ¨ ä¸Ģ个 +ä¸ī 人 +å®Į äºĨ +æĸ° æĬĢæľ¯ +ç»ıæµİ æķĪçĽĬ +å¯Į æľī +æ¾³ æ´² +åĬ© çIJĨ +é¢Ĩ åıĸ +è° Ń +çĩĥ çĥ§ +ç´ł åħ» +éĤĦ æľī +è¿Ľ èĢĮ +ä»Ģä¹Ī æĺ¯ +çłĶç©¶ ä¸Ńå¿ĥ +éĢĤ ç͍äºİ +æİ¥ æĶ¶ +失 æľĽ +äºĮ 级 +éĹ´ çļĦ +åİŁ æłĩé¢ĺ +èªį çĤº +æį ¡ +对 çĿĢ +对 éĿ¢ +ä¸Ń åİŁ +éĵ ĥ +çĶŁäº§ çļĦ +åıijå¸ĥ ä¼ļ +士 åħµ +è¿Ļ åı¥è¯Ŀ +ç¼´ 纳 +ä¸Ģ个 个 +åѸ çĶŁ +çĸij éĹ® +交 èѦ +示èĮĥ åĮº +天 使 +åľ¨ ä¸Ĭæµ· +åIJĮ æĻĤ +è½» æĺĵ +å͝ä¸Ģ çļĦ +çĥŃ éĹ¹ +ä¹IJ è§Ĥ +çļĦ 身份 +åĸĦ äºİ +大 åİħ +èĤ¯å®ļ æĺ¯ +éĺ² çģ« +å¤ĸ åĩº +æį® 说 +é¡¹çĽ® çļĦ +ä¸Ģ åı° +èĻļ åģĩ +ä¸Ģ ç¬Ķ +ç«ĭ æ³ķ +严 èĤĥ +æī¿ åĬŀ +åįģ åĩł +çļĦ 空éĹ´ +æľ¬ ç½ijç«Ļ +åģļ å¾Ĺ +ä¿Ŀ 温 +æľĪ åĪĿ +åľ¨ ç½ijä¸Ĭ +åIJĦ æĸ¹éĿ¢ +ä¸ī 天 +交æĺĵ æīĢ +è§£ æŀIJ +åħļ ä¸Ń央 +è¿Ľ åĩºåı£ +åĴĮ 社ä¼ļ +次 æķ° +ä¹ĭ å®¶ +ç»´ 度 +æ´¾åĩº æīĢ +产çĶŁ äºĨ +带 æľī +å¾Ī 强 +æľīäºĽ 人 +å¹´ åIJİ +äºĨ 许å¤ļ +å¯Ĩ 度 +åѦ æľŁ +çıł æµ· +æľĢå¤ļ çļĦ +è¾¹ ç¼ĺ +容 éĩı +第äºĮ 个 +ä¸Ģ缴 æĺ¯ +ä¸į ç¦ģ +æŃ ² +ä»ĭç»į äºĨ +ä¼ĺ éĽħ +æ¯Ķ è¼ĥ +èģĮ ä½į +温 æŁĶ +æľī éĴ± +æľĢ é«ĺçļĦ +åįļè§Ī ä¼ļ +ä¸į æĪIJ +éĶĻ äºĨ +è¯ģ çĽij +è¯ģçĽij ä¼ļ +æĪIJ 人 +åĿĩ åĮĢ +æľī åĪ© +è¶Ĭ åįĹ +æīĵ äºĨ +好 åIJĥ +ç³» çµ± +è·Ł éļı +çļĦ åľ°ä½į +æŃ£ å¦Ĥ +ç¨į å¾® +åį° åıij +åĪĽ ç«ĭ +é£İ åħī +å°Ĩ æĪIJ为 +ä¸į é«ĺ +é¢ij ç¹ģ +设 æľī +ä¼ ŀ +æĭĨ éϤ +å½± åĥı +æ¸Ĺ éĢı +å¹´ å¼Ģå§ĭ +ç½ij æĺĵ +è¦ģ åģļ +ç͵åĬ¨ 车 +羣 å¿ĥ +æµ· åĨĽ +ä¼ł æĿ¥ +å·® åĪ« +è°¨ æħİ +çĥŁ åı° +åįĥ å¹´ +è¯ģ å®ŀ +çIJ ª +çļĦ åħ·ä½ĵ +åΰ å¤Ħ +ä¸į å®ľ +èľ Ģ +èĥ½åĬĽ åĴĮ +çīº çī² +çļĦ éĴ± +大 éĺŁ +é¦ĸ è¦ģ +ä¸į æĦ¿ +çİ« çij° +人æ°ij ç½ij +è¿ĺæĺ¯ è¦ģ +åĽĽ å¹´ +æį٠伤 +çļĦ åģļæ³ķ +éĿ Ī +è¡Ķ æİ¥ +åIJĪ æĪIJ +没 人 +éŨ æ§Ľ +ä¿¡ è´· +çļĦ 缸åħ³ +举 é£İ +社 ä¿Ŀ +ä¸ĭ 游 +åĿĹ éĴ± +è¿ĩ åIJİ +çļĦ åºĶç͍ +é¥ ¶ +é¢ģ åıij +ä¸Ģ å¤Ħ +åįİ å¤ı +为 ä¼ģä¸ļ +åıª ä¼ļ +ä¾µ 害 +çļĦ åĬŁèĥ½ +åѸ ç¿Ĵ +ä¸Ńåįİ æ°ijæĹı +åıijå¸ĥ äºĨ +è¿İ æİ¥ +æĪij èĩªå·± +è¿ĺ éľĢè¦ģ +太éĺ³ èĥ½ +åİ» ä¸ĸ +æĺ¯ ä½ł +åIJĪ åĬĽ +ç»ĺ çĶ» +åı° åĮĹ +çĿ£ ä¿ĥ +åĮĹ éĥ¨ +æľī å¤ļå°ij +å¾Ī éĩįè¦ģ +åĪĴ åĪĨ +åı· 线 +æĶ¾ 大 +ä¼ļ 被 +èİ· å¥ĸ +ä¹ĭ åĨħ +失 åİ»äºĨ +çݩ家 们 +éĩĩ éĽĨ +å£ ¹ +å®¶ ä¼Ļ +çϽ 天 +åĽłä¸º ä»ĸ +社ä¼ļ æ²»çIJĨ +å¼Ģ åĪĽ +ç͵ ç¼Ĩ +æĸ° ä¸Ģ代 +å¹¶ è´Ń +å°± å·²ç»ı +çļĦ 社ä¼ļ +éϤ éĿŀ +åı¯ä»¥ ç͍ +å© ī +æ¯Ķè¾ĥ 好 +å®ŀ ä¸ļ +åĪĽ åĬŀ +æıIJ èµ· +é» ĥ +ä½ı åľ¨ +å¸Ĥ æĶ¿ +éĿ¢ä¸´ çļĦ +èĥ½ åľ¨ +çŁŃ çŁŃ +羣 人 +æĺİ æĺİ +èµĦ åĬ© +çļĦ ä¸įåIJĮ +å°ı æľĭåıĭ +é¢ĺ æĿIJ +ç¾İ åij³ +æĺŁ åº§ +ä¸į ä¸Ģæł·çļĦ +çľĭ ä¸Ĭåİ» +ä¸Ģ æł¹ +广 å·ŀå¸Ĥ +åıijçĶŁ çļĦ +é«ĺ ç§ijæĬĢ +ä¸Ģ è¾ĪåŃIJ +交 åıī +ä½ĵç³» 建设 +åĽłä¸º æĪij +çıį æĥľ +ä¸Ĭ åѦ +æĪĺ æľ¯ +æŃ¤ ç±» +交 å¾Ģ +æĮī æij© +人们 çļĦ +åħ¶ 實 +åİŁ æĿIJæĸĻ +渴 æľĽ +缸 å¤Ħ +å¾® å¾® +æ® · +ä¹ĺ åĿIJ +å¼Ģå±ķ äºĨ +é«ĺ åĵģè´¨ +æĹłäºº æľº +ä¸įæĺ¯ å¾Ī +çļĦ æĬķèµĦ +èĬĤ çľģ +èĩ ī +ç²¾ éĢī +çļĦ æłĩåĩĨ +åįĹ éĥ¨ +认è¯Ĩ åΰ +å¹³ éĿĻ +èĹ ¥ +æī« é»ij +æī«é»ij éϤ +æī«é»ijéϤ æģ¶ +éĢĻ ç¨® +建çŃij éĿ¢ç§¯ +ç¡® ç«ĭ +管çIJĨ åĬŀæ³ķ +æĦı å¿Ĺ +ä¸ ¨ +让 åŃ©åŃIJ +æķij çģ¾ +å½ĵ ä»Ĭ +çģ« çģ¾ +åIJĦ éĥ¨éŨ +ä¾µ çĬ¯ +æ¯ı åij¨ +æı ½ +ä¸Ģ次 æĢ§ +åħ¶ä»ĸ 人 +éĶĻ è¿ĩ +ä¸İ åħ¶ +åĭĩ æ°Ķ +çĩĥ æ°Ķ +é¦ĸ å±Ĭ +æľį 饰 +ç² ¥ +å®Į æ¯ķ +å°± æĬĬ +åĬŀäºĭ å¤Ħ +ä¸Ģä¼ļ åĦ¿ +离 ä¸įå¼Ģ +å¦Ĥæŀľ æĤ¨ +ä»ĵ åºĵ +导 å¸Ī +åIJĪéĢĤ çļĦ +毫 ç±³ +å®īåħ¨ æĢ§ +ä¾Ŀ çħ§ +产ä¸ļ åĮĸ +ä½ł çľĭ +羣çļĦ å¾Ī +åѤ çĭ¬ +éĺ² å¾¡ +å¾Ī ç®Ģåįķ +é£İ æ°´ +ä½Ĩ ä¹Ł +æİ¨ åĩºäºĨ +æ°ijèIJ¥ ä¼ģä¸ļ +çłģ 头 +å¤įæĿĤ çļĦ +ç»ĦæĪIJ éĥ¨åĪĨ +åħħ满 äºĨ +è¿ij åĩłå¹´ +çľģ æĶ¿åºľ +æľī å¿ħè¦ģ +éĻ ³ +ä¹ĭ ç±» +ä¹ĭç±» çļĦ +æĢ§ ä»· +æĢ§ä»· æ¯Ķ +åķĨ åºĹ +å¸Ĥ å̼ +人æīį åŁ¹åħ» +æ·± åıĹ +管çIJĨ å±Ģ +æģIJ æĥ§ +ä»ħ æľī +æĬµ è¾¾ +æµ· åħ³ +èµĭ äºĪ +äºĭ åĦ¿ +ä»· éĴ± +æīĭ ä¸Ĭ +èĩª å¾ĭ +åħ³ çα +享 æľī +éģĹ æĨ¾ +å¾Īå¿« å°± +æĽ´ å¿« +æłĩ è¯Ĩ +åºĨ ç¥Ŀ +ä¹Ł 好 +ä¸į æĺĵ +æĪij å¾Ī +æĶ¹éĿ© åıijå±ķ +å¤ĸ åľ° +æĬµ æĬ¼ +è¯Ĺ 人 +åİķ æīĢ +æĸ° åªĴä½ĵ +èĸ Ľ +è°Ī è¯Ŀ +ä¸Ģå®ļ ç¨ĭ度 +èµ° åľ¨ +æľĢ 强 +åĬŁ çİĩ +åħ± è¯Ĩ +大 æ¡¥ +ä¸ĭ æĸ¹ +å¤ĸ èµĦ +ç¢ ± +å·¡ è§Ĩ +æ¹ĸåĮĹ çľģ +个 çϾåĪĨ +个çϾåĪĨ çĤ¹ +çļĦ 责任 +çļĦ åĵģçīĮ +åĬ© æİ¨ +åĪĽéĢł äºĨ +ä»» èģĮ +å¿« æį· +æĿij åºĦ +åİ» çľĭ +æīį èĥ½å¤Ł +å± ¤ +æĪij å®¶ +æĺ¯ä¸Ģ 款 +ç¾ ħ +åĨ° éĽª +æŀģ 大 +çģ¯ åħī +éĨ ĭ +ä¸İ åħ¶ä»ĸ +æıIJåĩº çļĦ +éĿł è¿ij +è°ĥ åĬ¨ +å°½ åı¯èĥ½ +åıij åĬĽ +ç»Ļ 她 +éĢĤ éĩı +è·¨ åĽ½ +åħĪ è¡Į +æĸ° æĿIJæĸĻ +ä½ľ äºĨ +满 äºĨ +ä¸į 满 +çļĦçľ¼ çĿĽ +çľĭ å¾Ĺ +è¿Ļ ä¸Ģ次 +é½IJ åħ¨ +çļĦä¸Ģ éĥ¨åĪĨ +ä¸ Ļ +æ¸ħ æĸ° +說 æĺİ +身边 çļĦ +æīĢæľī 人 +å½° æĺ¾ +è± ¹ +åį ¿ +è¿IJ 转 +æĮĩ å¼ķ +å¸Ĥ åħ¬å®īå±Ģ +åıĤ å±ķ +ä¹ĭ æĹ¶ +éĩijèŀį æľįåĬ¡ +èµĦæľ¬ å¸Ĥåľº +èĥ½ 让 +å¿ĺ äºĨ +天 åłĤ +æ¯Ķå¦Ĥ 说 +éĬĢ è¡Į +èĽĭ ç³ķ +çĶ © +æł¸ å®ŀ +æĻ® 京 +ä¼ĺ ç¾İ +åı£ èħĶ +漫 çĶ» +çľ¼ éĩĮ +äºĨ ä¸ĭæĿ¥ +æĪij们 ä¹Ł +ä¾ į +为 ä¸Ńå¿ĥ +å¥ĩ 迹 +éĿĴ çĿIJ +æĪªèĩ³ 缮åīį +åĩº ä¾Ĩ +æĢ» åħ¬åı¸ +å¼¥ è¡¥ +ç®Ĺ æ³ķ +å·¥ä½ľ 室 +æīĢ以 æĪij +æ°´ åĪĨ +æīĢ å±ŀ +ä¸į 说 +ä½Ĩæĺ¯ åľ¨ +è¦ģ åİ» +åĪĽä¸ļ èĢħ +ä¸į æ¸ħæ¥ļ +åĽĽ åij¨ +æĺ¯ ä»İ +çļĦ æł¹æľ¬ +çģ ¶ +æ¯Ľ æ³½ +æ¯Ľæ³½ 举 +æµ· åı£ +åĽĽ åįģ +ä¹Ł 被 +èģ · +ä¸Ģ æīĭ +绩 æķĪ +çļĦ çĶ·äºº +书 ç±į +ä¸Ģ èĦ¸ +大 äºİ +鼶 éĥ¨ä»¶ +åħ³ æĢĢ +å¹³ ç±³ +æļ´ éľ² +å¾Ĺ å¤ļ +ä¸ī 级 +æľ¬ åij¨ +两 èĢħ +对 ä¸ŃåĽ½ +åıª è§ģ +欧 ç¾İ +å¦Ĥæŀľ æľī +å·²ç»ı æĺ¯ +çľĭ å®Į +çģ« éĶħ +èµ IJ +ä¸Ģ éģį +æĦŁ åĨĴ +ç»ĵ å±Ģ +ä»ĵ åĤ¨ +å®ŀ åľ° +å̻ ç»ıçIJĨ +ä¹Łä¸į çŁ¥éģĵ +碰 åΰ +åIJĪ è®¡ +客æĪ· çļĦ +ç½Ĺ 马 +æĦī å¿« +é£ Ľ +çĥŃ çĥĪ +伦 æķ¦ +åĮ» ä¿Ŀ +éĺ¿éĩĮ å·´å·´ +åĨį 说 +为 åŁºç¡Ģ +çĶŁäº§ ç»ıèIJ¥ +è¿ĻäºĽ 人 +åĪĹ è½¦ +æ²³åĮĹ çľģ +è¿Ļ 段 +æ´»åĬ¨ ä¸Ń +å© · +çĶŁ çIJĨ +ä¸ŃåĽ½ 人æ°ij +éĦ Ĥ +åIJ¬ åıĸ +å¤į ä¹ł +æľī çĽĬ +æĶ¶ æĭ¾ +å¾Ī åı¯èĥ½ +ç½ij绾 游æĪı +们 çļĦ +èµĭ èĥ½ +éļ¾ å¾Ĺ +åĪĨ æīĭ +羣 è¯ļ +åħ¬åı¸ åľ¨ +åĿĩ è¡¡ +åı£ åij³ +çīµ å¤´ +ä¸Ģèά çļĦ +轿 车 +çŃī äºİ +æ²ī é»ĺ +æĪij éĥ½ +å°ı ç¨ĭåºı +ä¸Ģ åī¯ +æī¿ è½½ +åľ° è´¨ +çķĮ éĿ¢ +ç͵ æľº +çĦ¦ èĻij +éĶĢåĶ® é¢Ŀ +æĸ° 车 +ä¸Ĭ 游 +主 æ¼Ķ +éļIJ ç§ģ +åıijå±ķ æĪĺçķ¥ +çļĦ åĬªåĬĽ +å¼Ģ åħ³ +è§£åĨ³ éĹ®é¢ĺ +çĿ£ 导 +对 æĬĹ +å¾Īå¤ļ 人éĥ½ +æĹł æķĪ +产åĵģ è´¨éĩı +å®ī å¿ĥ +åįİ äºº +ä¸į 符åIJĪ +èĩª å®¶ +éĺµ å®¹ +çļĦ åIJĦç§į +çļĦ çIJĨ念 +çļĦ æĸĩåĮĸ +为 èĩªå·± +å±± æ°´ +游 æ³³ +éľĩ èį¡ +çĶŁæ´» æĸ¹å¼ı +è¿ľ 离 +çŁ³ åĮĸ +æŃ¤ äºĭ +æĺ¯ 羣çļĦ +çļĦ æ¯Ķä¾ĭ +ç͍ ç͵ +奥è¿IJ ä¼ļ +ä¿Ŀ å®ī +èĽĭçϽ è´¨ +çļĦ å¿ĥçIJĨ +å· « +åı· çłģ +æ°Ķ ä½ĵ +åıij æĶ¹ +åıijæĶ¹ å§Ķ +åĮ» å¸Ī +æ¶Ĥ æĸĻ +æĺ Ĭ +å¸Ĥ 级 +ä¸ĸçķĮ çļĦ +åĪĨåĪ« æĺ¯ +çł´ 产 +ä¸Ģ æĿ¯ +æĭī å¼Ģ +å¹³ åĩ¡ +çļĦ åıijçĶŁ +åĬ¨ æīĭ +ä¸Ģ缴 以æĿ¥ +æīĭ å·¥ +éĩĮéĿ¢ çļĦ +æĹł åħ³ +ä»ĭ åħ¥ +èµ° ä¸Ĭ +å°±æĺ¯ è¦ģ +å¹´ éĹ´ +åĩº çı¾ +å½± éŁ¿ +å¹ħ 度 +éĽ ģ +éģĵ åħ· +缮çļĦ åľ° +åIJİ èĢħ +ä¸Ĭ æ¼Ķ +äºĨ åĩł +æ®ĭçĸ¾ 人 +å¿Ļ ç¢Į +æĺ¯åIJ¦ æľī +å¹¶ 对 +ä¼ļ 导èĩ´ +æ°´ åºĵ +ç»Ĩ èĩ´ +åIJİ æĤĶ +å¿ĥ æĢĿ +åģļ äºĭ +åİĤ æĪ¿ +çĿ ¿ +è¿IJèIJ¥ åķĨ +头 éĥ¨ +çļĦ è§Ĵèī² +æĺ¯ ä»ĸ +æĹ¢ æľī +å°ıæĹ¶ åĢĻ +强 åĬ² +主 æĴŃ +åħ¨åĽ½ åIJĦåľ° +æį ı +æįŁ åĿı +åķĨ ä¼ļ +ä¿Ŀ ç½Ĺ +çľģ å¸Ĥ +éļ§ éģĵ +æľī ä¸įå°ij +è¦ģ åľ¨ +建设 é¡¹çĽ® +ç³ĸ å°¿ +ç³ĸå°¿ çĹħ +æĿ¡ä»¶ ä¸ĭ +ä¼ĺè´¨ çļĦ +é¦ĸ åıij +å½ĵæĹ¶ çļĦ +丰 çͰ +大 çĽĺ +缸 ç»§ +å®ģ å¤ı +åħ¥ ä½ı +æĪij è¿ĺ +åħĭ æĸ¯ +å®ļ ä»· +å¹³æĸ¹ åħ¬éĩĮ +çļĦ çŁ¥è¯Ĩ +æĪij们 ä¼ļ +åħĥ å®Ŀ +ä½ĵ éĩį +è³ £ +对 æĪij们 +çŁ³ å®¶ +çŁ³å®¶ åºĦ +ç²¾ åįİ +å½¢ çĬ¶ +åıĹ åΰäºĨ +ä¿® 订 +ç¾İ åľĭ +é«ĺ æ¸ħ +çľ¼ éķľ +è§īå¾Ĺ èĩªå·± +带 ç»Ļ +åĶ® ä»· +éŨ 票 +åŃķ å¦ĩ +ç͵è§Ĩ åı° +åıij ä½ľ +çļĦ åij³éģĵ +éķ¿ è¿ľ +åħ¬åħ± æľįåĬ¡ +æŃ£å¸¸ çļĦ +æľī è¿ĩ +é£İ æĥħ +æ¯Ķ éĩį +åIJ » +管çIJĨ å·¥ä½ľ +综åIJĪ æĢ§ +å·² 被 +说 èµ· +æİĴ æ°´ +ä¸įæĸŃ åľ° +æĥħ æĢĢ +è¾ĵ éĢģ +è¿ĩ æķı +çļĦ åı¯èĥ½æĢ§ +æľį ç͍ +æľī 许å¤ļ +å§Ķ åī¯ä¹¦è®° +åĮĸå¦Ĩ åĵģ +æļĤ åģľ +æĬķèµĦ 人 +çıŃ çº§ +说 çĿĢ +åįĹ åĮĹ +åĪĨ è¡Į +çıł å®Ŀ +å¯ ¶ +å¢ŀ å¤ļ +被 åĬ¨ +ç®Ĭ çļĦ +éĹľ ä¿Ĥ +çļĦ èĦ¸ +æĥ Ł +ä¸į ä¸Ģå®ļ +ç¶ Ń +çģ« çĪĨ +ç§Ł éĩij +çŀ § +éĩį 建 +è· ª +ä¸Ģ 種 +çļĦ åIJĪä½ľ +å®ī æħ° +ä»į æĺ¯ +ä¸ĵä¸ļ åĮĸ +è°ĥ è§£ +ä¸į 妨 +éĢĻ æĺ¯ +å¿ħ éłĪ +ä¼Ĭ æľĹ +å¾Ĺ äºĨ +æľįåĬ¡ å¹³åı° +å§ ¬ +åħĪ éĶĭ +çİĭ åŃIJ +çļĦä¸Ģ åĪĩ +æĢ» çIJĨ +åĵ ¼ +çª ij +çļĦå¿ĥ æĥħ +çļĦ éĩį大 +çij Ł +ä¸Ģ ç¬ij +åıijå±ķ ä¸Ń +åģ¥åº· åıijå±ķ +åĵģçīĮ çļĦ +ç¦ ® +ä½Ļ 人 +ä»Ĭå¹´ 以æĿ¥ +æķ° çłģ +çѾ è¯ģ +åİ» æī¾ +åŁºéĩij ä¼ļ +æĬ± æĢ¨ +æŃ£ å½ĵ +çıŃåŃIJ æĪIJåijĺ +ä¸į åIJĪæł¼ +åζ å®ļäºĨ +ç¼ĵ æħ¢ +åζ 约 +æłı 缮 +å¸Ĥåľº ç»ıæµİ +ç»ĦæĪIJ çļĦ +严 å³» +æĹ¥ 讯 +ä¸ĢçĤ¹ çĤ¹ +æĺ¯ æĢİä¹Ī +çļĦ çħ§çīĩ +éĺ» æŃ¢ +模 ç³Ĭ +ç¼ ¸ +éģķ åıį +æIJ¬ è¿ģ +éĩij éĴ± +å½ ¬ +ä¸į å®ī +æĪĺçķ¥ åIJĪä½ľ +å¡« åĨĻ +讲 ç©¶ +åħħåĪĨ åĪ©ç͍ +èĥ½ å¤ł +èij¡èIJĦ éħĴ +éĩĩç͍ äºĨ +åľ¨ ä»Ĭå¹´ +ä¸Ńå°ı åѦ +åľ¨ æĦı +çļĦ åİĭåĬĽ +ä¸į 幸 +åζ èᝠ+åı¯ä»¥ 让 +被 è¯Ħ为 +ç»Ĩ èıĮ +æĪı åī§ +åįĬ 导 +åįĬ导 ä½ĵ +è§Ĩ è§Ĵ +åĸľ æŃ¡ +å¾ģ æĶ¶ +è°ĭ åĪĴ +æŀģ 大çļĦ +çĤ¹ èµŀ +è®°èĢħ ä»İ +两 åIJį +èĩª åĬ© +èµ· æŃ¥ +æĬ¤ 士 +å®Ŀ 马 +太 åŃIJ +å°ıå°ı çļĦ +温 æ³ī +åĩºç§Ł 车 +ç§Ł æĪ¿ +两 å®¶ +éľĩ æĴ¼ +ç§ī æī¿ +ä¸Ģä»¶ äºĭ +çĥΠ士 +å®ĺ åħµ +转 身 +ä¹IJ åĽŃ +çĻĮ çĹĩ +模 èĮĥ +æĦ £ +è¿ĩåİ» çļĦ +代 ä»· +çļĦ æ¦Ĥ念 +åĩł çϾ +è´µ éĺ³ +æĭħ å¿§ +éĢĤ å®ľ +çݯå¢ĥ ä¿ĿæĬ¤ +çĥ « +ä½ł æĥ³ +æŃ¤ åIJİ +ä½ł ä¹Ł +çį İ +éϤ æŃ¤ +éϤæŃ¤ ä¹ĭå¤ĸ +è°ĥ 度 +ç§ij 缮 +æīĢ说 çļĦ +åĬ ĩ +忽 è§Ĩ +ä¸ī 次 +ä¸Ģ æĹ¥ +åŀĤ 缴 +ç«ŀ æĬĢ +éĿ¢ åĮħ +大 æĪĺ +æIJº 带 +å¦Ĥæŀľ 没æľī +åħ» æĪIJ +åĩº è¡Ģ +çα好 èĢħ +æīĵ éĢļ +èµ· è¯ī +åijĪ çݰåĩº +æŃĮ æīĭ +åľ¨ å¤ĸ +é¢Ĩ导 å¹²éĥ¨ +åĨ ¥ +èĪĨ 论 +æıIJ åıĸ +éĺ¿ å°Ķ +æľĽ çĿĢ +ä¸ī äºļ +è² ¡ +åĪ ·æĸ° +æĻļ æĬ¥ +è¿ĺæľī ä¸Ģ个 +åĨ° ç®± +ç½ij çĤ¹ +åĩº åħ· +强çĥĪ çļĦ +æĪij çĽ¸ä¿¡ +å¸ĮæľĽ èĥ½ +çīĻ é½¿ +äºĭ å®ľ +ä¸ļåĨħ 人士 +代 æĽ¿ +åıĺ å½¢ +éĽ ² +è°ĥ æİ§ +åĪĽæĸ° åĪĽä¸ļ +æĭĨ è¿ģ +æł¸ æŁ¥ +éĢ Ĺ +åħ¥ åѦ +æĦı åIJij +æı Ľ +ä¸ĭ 次 +ä¼ł è¾ĵ +ä»ĸ们 åľ¨ +èĢĮä¸Ķ è¿ĺ +æĹ¥ åľ¨ +æķĻ è®Ń +æ´» çĿĢ +çļĦ æľīæķĪ +å¤įå·¥ å¤į +å¤įå·¥å¤į 产 +æĺ¯ä¸Ģ ä»¶ +çŃī çĿĢ +å¾ © +åĭĩ æķ¢ +éģŃ åıĹ +å¥Ķ é©° +讲 座 +说 å®Į +ç»Ļ åĩº +è° ¦ +è¯Ĭ çĸĹ +çĽ² 缮 +客 è¿IJ +å°± è¿ŀ +å¼Ģ åħĥ +å¼Ģåħĥ æ£ĭçīĮ +ä¸įæĸŃ æıIJåįĩ +ç͍æĪ· çļĦ +æĴ ķ +ä¾Ľ æ°´ +ç¶ĵ æ¿Ł +ä¸Ń åĮ»èᝠ+èģĶ æĥ³ +åħ¬äº¤ 车 +èĪª çıŃ +æĬĢ è¡ĵ +å¼ķèµ· çļĦ +å° ¹ +èµĦ æ·± +åĽ½èµĦ å§Ķ +èĺ Ń +é¼» åŃIJ +éĹ ½ +æİĴ éĺŁ +è§Ĥ åħī +éģĹ åĿĢ +举 京 +é¥Ń åºĹ +ä¸įæĸŃ çļĦ +å°±æĺ¯ ä¸Ģ个 +éķ¿ ä¹ħ +çļĦ è§ĤçĤ¹ +å¨ ¶ +æĪij çİ°åľ¨ +çķ ° +å¾Ĺ åĩº +å¿ħ å®ļ +ä¸į åıĹ +åıª éľĢè¦ģ +åĽ° æī° +ç§ijåѦ æĬĢæľ¯ +çīĽ èĤī +è¾ĥ é«ĺçļĦ +è·ij æŃ¥ +æ² ¾ +èı© èIJ¨ +æľĢ å¾Į +ä¿Ŀ å¯Ĩ +æ²» å®ī +éĤ ± +常 è¯Ĩ +èĦ¸ èī² +åĮĹ å¤§ +æ±ĩ èģļ +æijĨ èĦ± +é¾Ļ头 ä¼ģä¸ļ +女 åıĭ +çŃī å·¥ä½ľ +ä¸Ń ç¾İ +èģĮ åľº +èĦij è¢ĭ +åĨĻ çļĦ +饲 æĸĻ +åĬ³ åĬ¨åĬĽ +å± ¯ +æĮģ èĤ¡ +åĽ¾ åĥı +è¿ĩåİ» äºĨ +è² ¨ +è¾ ² +éĹ® æĪij +è·Ł ä½ł +çĶŁ æŃ» +审 ç¾İ +é¢Ĺ ç²Ĵ +ä¸Ń æĸ¹ +åĬł çĥŃ +æĹħè¡Į 社 +çϼ çĶŁ +ä¸į åłª +åĤ · +æ¥ ł +åĬŀ æ¡Ī +æŁ Ħ +æĹ¢ æĺ¯ +å¤Ħ åĪĨ +羣å®ŀ çļĦ +æĬ¥ 纸 +å¸Ī çζ +å®īå¾½ çľģ +åī¯ ä¸»å¸Ń +ä¹ĭ éģĵ +导 å¼¹ +åŃ¦æł¡ çļĦ +åŁİå¸Ĥ çļĦ +è°Ī åΰ +æ¢ Ĺ +å¹³ éĿ¢ +说 ä»Ģä¹Ī +é¢ij çİĩ +éķ¿ ä¸īè§Ĵ +çļĦ åĪ©çĽĬ +é» ¨ +è±Ĩ èħIJ +å®ŀéĻħ æĥħåĨµ +æŀĹ ä¸ļ +纪æ£Ģ çĽijå¯Ł +ä½ı éĻ¢ +çļĦ æķ´ä½ĵ +åīį è¡Į +æĮ ¨ +çħ¤ çŁ¿ +å̻ è£ģ +å°ı åIJĥ +æŀģ 端 +å©Ĩ å©Ĩ +çݰ è´§ +è¯Ĺ æŃĮ +éĴ¥ åĮĻ +缩 çŁŃ +ä½Ĩ è¿Ļ +æĸ° åĵģ +è¿Ļ 对 +çŁ¥åIJį 度 +å¿ĹæĦ¿ æľįåĬ¡ +大 å±Ģ +è¡¡ éĩı +ä½ĵçݰ äºĨ +æ¡ĥ èĬ± +åIJ¸å¼ķ åĬĽ +åł ¤ +æĵħ éķ¿ +åĴ Ĵ +缸 æľº +ä¸Ģ ç«Ļ +ä¸Ģç«Ļ å¼ı +æľĢ ç¾İ +æ°¸ ä¹ħ +çļĦ éĥ¨åĪĨ +åĪĨ å·¥ +å·¥ç¨ĭ 建设 +æIJŃ è½½ +æ°´ ä¸Ń +èĮ ¨ +çļĦ æĵįä½ľ +绣 æ²» +çķħ éĢļ +åħļçļĦ åįģ +è¼ ¸ +æ¸ ¬ +ç¾İ è§Ĥ +ä¸į åĪ© +åıį æĢĿ +éªĦ åĤ² +æłĩ çļĦ +æĿĢ äºº +éĺ¿ å§¨ +é£Ł æĿIJ +åIJĥ çļĦ +åIJİ åĨį +çŁ £ +两 ä¾§ +æ¸ħ æ°´ +è¿Ľ çIJĥ +å¼Ģå§ĭ äºĨ +åIJ¬ äºĨ +çĦĬ æİ¥ +çŁ ® +å¨ Ł +为 人 +éĢģ ç»Ļ +åĨĴ éĻ© +æķ · +ç»Ī æŃ¢ +æīį çŁ¥éģĵ +è¿IJ æ°Ķ +éĢļ é£İ +æĥĬ è®¶ +ç§ijåѦ éĻ¢ +æıIJ éĹ® +太 åİŁ +缸åIJĮ çļĦ +ä» ķ +èģ ĸ +æĥħ æ³ģ +é¢Ĩ导 人 +åĩºæĿ¥ äºĨ +沿 线 +éĻ ½ +æĦŁ è¦º +ä»į åľ¨ +æ© Ļ +约 为 +åĸĿ éħĴ +ç͍ èᝠ+ä¸ĭ ä¸Ģ +æ³ķ å®ĺ +顺 åºı +åģļ ä¸Ģ个 +åĭ ¢ +æŃ ª +ç͵ ç«ŀ +ä¼´ éļıçĿĢ +ä¹ĭ åĬĽ +ä¹ĭ 人 +äºij 计ç®Ĺ +åĪ«äºº çļĦ +ç§ijåѦ åıijå±ķ +第 åħ« +å¹² æī° +女 ç¥ŀ +è¿Ļæł· åģļ +å¤Ħ åľ¨ +æ°´ è´¨ +éķ¿ æĺ¥ +å¸Ĥåľº éľĢæ±Ĥ +ç»´ æĿĥ +è̳ æľµ +æĸĩåĮĸ çļĦ +奶 ç²ī +ä¼ł è¾¾ +æīĭæľº çīĪ +æĽ¾ åľ¨ +äºĮ æľŁ +åİŁåĽł æĺ¯ +æºIJ 头 +åıĪ èĥ½ +è£ ¸ +æĬĢæľ¯ åĪĽæĸ° +æĸĩåĮĸ æĹħ游 +åıij 票 +å¹´ 级 +ä½ł ä¸į +ä¹ĭ å¿ĥ +æķ° çϾ +åIJij å¾Ģ +èĢģ å®¶ +åľĭ éļĽ +çļĦ é«ĺ度 +æľĿ éĺ³ +æ¸ħ éϤ +èĩª æľī +书 ä¸Ń +游æĪı è£ħå¤ĩ +ä¸ĩ å¤ļ +驾驶 åijĺ +ä½ł çŁ¥éģĵ +åĽ½ åºĨ +é£Ł åłĤ +æİ¥ åı£ +æĢ» æķ° +åħ¶ä»ĸ çļĦ +çĶŁåij½ çļĦ +ä½ł åľ¨ +çļĦ 缮åħī +è¿Ļ æĸ¹éĿ¢ +éĥ½ 说 +çĸĹ æ³ķ +åĭĩ 士 +åľ¨ åħ¨çIJĥ +ä¿ĿéĻ© åħ¬åı¸ +çĿ£ æŁ¥ +åĸĦ èī¯ +表 å½° +è¹ ² +è·¯ 段 +æľĥåĵ¡ è¦ı +æľĥåĵ¡è¦ı ç¯Ħ +æĪ· åŀĭ +ä¿ĥ 使 +ä¿® 建 +é«ĺ æ°´å¹³ +åģļ åĩºäºĨ +主 åľº +è¡Į èµ° +空 çϽ +æľī人 说 +è¿Ļ个 ä¸ĸçķĮ +åIJį ä¹ī +å®Į ç¾İçļĦ +羡 æħķ +åıĬ åħ¶ä»ĸ +åı¯ ç͍ +æĭ IJ +è¾ĥ 大çļĦ +æĬĢæľ¯ åĴĮ +å°¼ äºļ +çϾ è´§ +æı ī +éĢī è´Ń +éĺŁ åıĭ +ä¼ł æĦŁ +ä¼łæĦŁ åύ +åıªè¦ģ ä½ł +为ä»Ģä¹Ī è¦ģ +ä¸ĵ注 äºİ +ä½Ļ é¢Ŀ +åħ¸åŀĭ çļĦ +缮åīį å·² +欲 æľĽ +èģĶ ç»ľ +æµģ ä¼ł +çļĦ å®¶åºŃ +åı· åı¬ +çıį è´µ +ä¼Ł 大çļĦ +éī´ äºİ +è·Ł ä»ĸ +产 çī© +ä¸į å·² +è¿Ŀæ³ķ è¡Į为 +头 ä¸Ĭ +åĪĨ è§£ +åı¯ä»¥ çľĭåĩº +æł¡ åĮº +åŃĹ ä½ĵ +ä¿® çĤ¼ +çĶļèĩ³ æĺ¯ +微信 åħ¬ä¼Ĺ +åıĸ 代 +èIJ¥ä¸ļ æĶ¶åħ¥ +æ½į åĿĬ +ä½ł èĥ½ +社ä¼ļ ä¿Ŀéļľ +æ¯ĶèµĽ ä¸Ń +污水 å¤ĦçIJĨ +夫 å¦ĩ +ä¸Ģ å¹ħ +沿 æµ· +åı£ æĦŁ +ä½Ĩ åį´ +å½ĵ æĹ¥ +çļĦ æľĢ大 +æ¯ı ä¸Ģä½į +没 äºĭ +çī¹ åĪ¥ +å¼Ģ åѦ +è·¯ éĿ¢ +å¿ĥçIJĨ åѦ +æĶ¾ ç½® +éĩįåºĨ å¸Ĥ +ä½ł èĩªå·± +æ¶Īè´¹èĢħ çļĦ +ä¸Ģ æ³¢ +èѦ æĥķ +å᧠室 +注 å°Ħ +é£İ 鼨 +沿 çĿĢ +åijĬ 訴 +表 çݰåĩº +åĽĽ æĺ¯ +åı¤ åħ¸ +æĽ´ éĩįè¦ģçļĦ +好 äºĭ +çľ¼ 泪 +æ¨ ĵ +审 åΤ +碰 æĴŀ +车 ç«Ļ +è¿Ľåħ¥ äºĨ +éĽĨ åIJĪ +æł¼ å¤ĸ +宾 é¦Ĩ +æĶ¯ä»ĺ å®Ŀ +她 æĺ¯ +æĺ¯ å¦Ĥä½ķ +人 次 +çļĦ æĪIJåĬŁ +æĹł åĬĽ +æµ· æĭĶ +æĺ¥ åŃ£ +éĥ½ ä¸įä¼ļ +çŃī å¤ļç§į +ä¸Ģ个 å°ı +åģľè½¦ åľº +让 æĽ´å¤ļ +è¿Ļ çĤ¹ +æĪIJ åĵģ +éĴ ī +éģĩ è§ģ +çıŃ ä¸»ä»» +æĦı æĦ¿ +çļĦ åIJĮåѦ +游 è§Ī +åİĭ 缩 +åľ¨ ä¼łå¥ĩ +å¼¹ æĢ§ +æĹ¥ åĨħ +ç¦ı建 çľģ +è§Ĵ èIJ½ +åĪĨ å¼Ģ +ä¼ļ 让 +å¤ĸ åĽ´ +çĨŁæĤī çļĦ +çĨ Ķ +ä¸ĩ è¾Ĩ +å¤ľ éĹ´ +车 身 +ä¸Ń æľŁ +å®ĮåĸĦ çļĦ +åĵģ ç±» +åıĭ è°Ĭ +éĢīæĭ Ķ +éªij 士 +å½ ¦ +çļĦ çľĭæ³ķ +åĽ½ çİĭ +è¾£ æ¤Ĵ +åıijå¸ĥ æĹ¶éĹ´ +åı¤ åŁİ +éļı æľº +ç« ĸ +å¼Ģ è¾Ł +ä¼Ĺ çĶŁ +没 åĬŀæ³ķ +åįĥ éĩĮ +æĿ¥æºIJ äºİ +çļĦ æĿĥåĪ© +æ¯Ķ åĪĨ +满æĦı çļĦ +ä¿® è¡Į +åĿ ł +大 æµ· +èİ ¹ +åĩº 身 +è« ĩ +åħ³ èĬĤ +åIJį 人 +éľĢè¦ģ 注æĦı +æĹ© æĻ¨ +å¤ĸ åįĸ +åıĪ è¦ģ +æ¶ī æ¡Ī +çĶ³è¯· 人 +éĻĦè¿ij çļĦ +åĬłå¿« æİ¨è¿Ľ +æĸ° å¹´ +大 è¡Ĺ +ä¸Ģ é»ŀ +èĭı å®ģ +æĤĦ æĤĦ +èĦ¾ æ°Ķ +å¸Į èħĬ +éļı åį³ +æķ¢ äºİ +å®ŀè·µ ä¸Ń +æĺ¯ 没æľī +æľīè¶£ çļĦ +æĿ¥èĩª äºİ +è£ģ åΤ +女 åŃ©åŃIJ +èĩ³ åħ³ +èĩ³åħ³ éĩįè¦ģ +æĻº åĬĽ +èµ° åĩºåİ» +çŁŃ æĿ¿ +大 åĽ½ +çļĦ 认è¯Ĩ +å¹´ å¤ľ +åĨį åΰ +åIJĮ æł·çļĦ +å¯Ĩ å°ģ +å¤ĸ交 éĥ¨ +çĶŁ æķĪ +æĤ¨ åı¯ä»¥ +ä½ł åĢij +è¿ĩ å¹´ +å¼ ĵ +è¡Į æĿİ +æ¯Ķ èµ· +身 é«ĺ +è¿Ļ个 人 +ä¸Ń å¤ĸ +éģĵ æŃī +çĽ¯ çĿĢ +亲 åŃIJ +éĹ ¸ +çϽ äºij +èĦĸ åŃIJ +ä¸ĢåĪĩ éĥ½ +æ· ij +è° ľ +åģ¶ çĦ¶ +éĿł è°± +é«ĺ 管 +ä¸ĭ åıij +æĶ¾ åΰ +ç±» åĪ« +ä¸ĭ åĪĹ +æ·· ä¹± +åIJĪæ³ķ æĿĥçĽĬ +çݯ çIJĥ +æľīæķĪ åľ° +åķĨ æĪ· +æ¹ĸ 人 +æµ· 岸 +æĬķ 产 +两 个æľĪ +éĥ½ éĿŀ常 +å¢ŀ强 äºĨ +æĿ¥ åΰäºĨ +åī© ä½Ļ +æĤ¨çļĦ åŃ©åŃIJ +æµģ æ°´ +æŃ£ ä¹ī +天 çĮ« +åģļ è¿ĩ +ä½ķ æĹ¶ +æĪij åİ» +çľģ 份 +å¥ĸ éĩij +该 å¦Ĥä½ķ +ä¸ĭ çıŃ +åģ¶ åĥı +æijĨ æĶ¾ +æĸ° 模å¼ı +æĬķ è³ĩ +è·¯ åı£ +åĨľæ°ij å·¥ +大 åѸ +ä»¶ äºĭ +æł¹æľ¬ ä¸į +æµĵ 度 +æµĵ åİļ +è½® èĥİ +æĪ¿ ä¼ģ +éĿŀ常 好 +ä»İ ä¸Ń +人 æł¼ +ç¿ ģ +æĹ¶éĹ´ åĴĮ +è¿Ļ ä¸įæĺ¯ +åΏ åķĨ +æĥĬ 人 +åύ å®ĺ +åĩĨ åĪĻ +æĥħ æĻ¯ +æĽ´ é«ĺçļĦ +åѦ å®¶ +泡 沫 +åľ°æĸ¹ æĶ¿åºľ +å°± çŁ¥éģĵ +åij¼ åIJģ +ç»ı è´¸ +èĬ± éĴ± +æľī ä¸Ģ次 +æĦŁ æħ¨ +ä¸Ģ åįĥ +å¤ľ æĻļ +詹 å§Ĩ +詹å§Ĩ æĸ¯ +è¦ģ éĹ» +ç» Ĵ +æºIJ äºİ +çļĦ è´¨éĩı +注æĦı äºĭ项 +æħ¢ æĢ§ +稳å®ļ çļĦ +建设 åĴĮ +æĻ¯ 象 +éĩı åĮĸ +çļĦ 話 +è¯Ħ 级 +æº ľ +红 åĮħ +éĢļ éģİ +社ä¼ļ 责任 +æĸ° 产åĵģ +åĨ· éĿĻ +çľĭ ä¸įåΰ +èģĶ éĤ¦ +éŃ Ħ +çļĦ åīįæıIJ +çļĦåīįæıIJ ä¸ĭ +è¾ĥ 好 +çļĦ æĦŁæĥħ +客æĪ· æıIJä¾Ľ +çĭ¬ èĩª +å¢ŀ æĶ¶ +æĸĩ çĮ® +æĭ¼ åij½ +管çIJĨ åĴĮ +æµģåĬ¨ æĢ§ +åħ¨ å®¶ +ä¸Ĭ æĸ¹ +æİ¨åĩº çļĦ +ä¸ī åĽ½ +ä¸Ģ个 æĺ¯ +æĸ° ä¸Ģè½® +æĸĩåĮĸ éģĹ产 +æ® º +大 æ¹¾åĮº +éĥ½ éľĢè¦ģ +çļĦ å®ŀéĻħ +ç· Ĭ +大 å¥ĸ +åħī èĬĴ +便 äºİ +çļĦ 表æĥħ +æ¼Ķ ç»İ +红 åĨĽ +å½ĵ æĪij +æ²» æĦĪ +é¢Ŀ 度 +éĿ ľ +ä»»ä½ķ 人 +è¡Ĺ 头 +çī¹ æĸ¯ +çĸ¯ æĭī +åĮ»çĸĹ æľºæŀĦ +ç»Ļ åŃ©åŃIJ +è§Ħ 磩 +è£ ľ +çļĦ 身影 +ä¸ĵ æłı +æĿ¥ 临 +ç«¥ å¹´ +å¤į èĭı +è¨ Ĥ +åŀĭ åı· +åĽ¾ æ¡Ī +ç®Ģ åİĨ +æĭ ± +èį· åħ° +ä»» æĦı +æī¿ æİ¥ +è¿Ļ æīį +客 车 +æľĿ çĿĢ +éłħ 缮 +åı° é£İ +çļĦ æĪ¿åŃIJ +éª ı +æĿ± 西 +éģĹ ä¼ł +è¶Ĭ å¤ļ +äºĨ ä»ĸçļĦ +ä¸Ĭ åij¨ +管çIJĨ åĪ¶åº¦ +失 ä¸ļ +çĶ· åıĭ +æİ¥ ç§į +å¨ģ åIJį +çĴ° å¢ĥ +åıijçĶŁ åľ¨ +个 åĽ½å®¶ +åĪĽæĸ° åıijå±ķ +æĶ¹åıĺ äºĨ +åģ¥åº· çļĦ +å̼å¾Ĺ ä¸Ģ +å̼å¾Ĺä¸Ģ æıIJ +åĽ¢ ä¼Ļ +åģĩ 设 +åı° ä¸Ĭ +è§ĦèĮĥ åĮĸ +éĻª åIJĮ +座 æ¤ħ +åı¯ æĢľ +åħĭæĢĿ 主ä¹ī +æ³ķå¾ĭ 责任 +ä¸Ģ é¡¿ +æĬ¬ 头 +为 éĩįçĤ¹ +è¿ľ æ´ĭ +éĢı è¿ĩ +åħ¨çIJĥ åĮĸ +è¶£ åij³ +票 æĪ¿ +æ¯ı 人 +åIJĦç§į åIJĦæł· +äºĨ åĩºæĿ¥ +ç»Ŀ对 æĺ¯ +ä¸ĭ å±ŀ +ä¸Ģ åıĮ +è¿Ļ åĿĹ +æĬĹ çĸ« +è¦ģ çĤ¹ +å½¢æĪIJ çļĦ +æĪij çľĭ +ä¸ĩ éĩĮ +èĢĥ çłĶ +为 åħ¶ +æ°ij 宿 +å¤ļ ä½į +大 èĩ´ +ä»ĺ è´¹ +åħ¥ æīĭ +å±ħ å®¶ +æīĢåľ¨ åľ° +人 身 +è¿ĩ å¾Ĺ +è¯ķ è¯ķ +访 è°Ī +åĬł éĩį +å°± ä¸įä¼ļ +çĶŁäº§ ä¼ģä¸ļ +åĽŀ åĽ½ +åºķ 线 +èµ¶ åΰ +æĶ¯ éĺŁ +æĪij们 éĥ½ +éĤ® æĶ¿ +缴 èĩ³ +éĴ¢ çIJ´ +åħ ľ +çłĶ讨 ä¼ļ +æľĪ 亮 +åĿļæĮģ 以 +åħ¬å®ī éĥ¨ +éĴ¢ 管 +å°ı çϽ +ç½® ä¸ļ +èģ ĭ +书 åĨĻ +æĿ ı +éħį æĸ¹ +èĢĮ åıĪ +çijŀ 士 +çķĮ çļĦ +èĢģ 大 +æĪIJçĨŁ çļĦ +å¹² ä»Ģä¹Ī +ä¸ĵ项 æĸĹäºī +çŃī å¤ļ个 +èĦ± 离 +ä¸ī 个æľĪ +çłĶç©¶ åijĺ +æĹĭ 转 +æŀģ èĩ´ +åħį è´£ +åħįè´£ 声æĺİ +å¾Īå¤ļ çݩ家 +车 ä¸Ĭ +交 äºĴ +å·² æĺ¯ +ä¸Ģ å°ı +çļĦ éĩįçĤ¹ +èĬ± äºĨ +ä¸į æĺİ +æľīåħ³ è§Ħå®ļ +çĬ¹ å¦Ĥ +çľ ¸ +å¯ ¡ +çļĦ è¡£æľį +åĮħ 裹 +身 åŃIJ +å¸ĪèĮĥ 大åѦ +äºĭ åħĪ +线 æĿ¡ +æ³ķ åζ +åħ» æĬ¤ +稳å®ļ æĢ§ +éĤ µ +åŀĦ æĸŃ +é¡ į +èĢĥ åı¤ +æĿł æĿĨ +èĭı èģĶ +æ°´ ç͵ +åħ·ä½ĵ çļĦ +æ¿Ģ æ´» +æĪij æł¡ +åĪļ å¼Ģå§ĭ +åĩ¸ æĺ¾ +ç¦ ¾ +åħ¼ èģĮ +éĢı éģİ +åľ¨ 游æĪıä¸Ń +社ä¼ļ åıijå±ķ +好 çİ© +å¹» æĥ³ +ä¸į 代表 +注æĦı åĬĽ +æ£ į +ç͍ æīĭ +ç¾İ 人 +许å¤ļ 人 +å¾Ī æĺ¯ +çļĦ çłĶåıij +æīĵ åĩº +åIJĪä¼Ļ 人 +ä¸Ģ å¤ľ +ç¼ĵ ç¼ĵ +ä¿® æŃ£ +æĦŁ çŁ¥ +ç»Ī 身 +æ¿Ģ ç´ł +çݯå¢ĥ ä¸ĭ +次 ä¼ļè®® +ç»ıæµİ å¢ŀéķ¿ +æī Ľ +åıij éħµ +åĪĨæŀIJ å¸Ī +åľ¨ æľªæĿ¥ +主è¦ģ æľī +ä¸Ģ åŃ£åº¦ +çļĦ 说æ³ķ +ä»İæĿ¥ 没æľī +è´§ 车 +缩 å°ı +太 è¿ĩ +æķĪ åĬĽ +ä¸į ä¸ĭ +æĬķ 稿 +èᝠä¸ļ +ç»Ħ éķ¿ +ç«Ļ çĤ¹ +å¾Ī åĸľæ¬¢ +éIJ µ +åĬ¿ 头 +æ¼ı æ´ŀ +æĦ¤ æĢĴ +åħħ å®ŀ +åĪĽä¸ļ æĿ¿ +çĪ ª +æľª å¿ħ +åºķ éĥ¨ +å¾Ĺ åĪĨ +人æ°ij åĮ»éĻ¢ +äºĮæīĭ æĪ¿ +å·²ç»ı 被 +大 楼 +æĸ° æĪ¿ +辦 æ³ķ +ç͍ åĬĽ +æĭĵ 宽 +åĨħ åľ¨ +æĴŃ åĩº +饰 æ¼Ķ +ä¹Ł 让 +ä½ľ çĤº +çī©ä¸ļ 管çIJĨ +åį´ ä¸į +为 ä¸ŃåĽ½ +å±Ģ åĬ¿ +ä¸į èĤ¯ +æľĢ æĸ°çļĦ +åı¯ä»¥ éĢīæĭ© +æĺ¾ çݰ +å°± ç®Ĺæĺ¯ +åľ¨ æł¡ +é¾ Ł +两 æĿ¡ +çļĦ å®ŀåĬĽ +è¶Ĭ 好 +她 åľ¨ +å¿ł è¯ļ +ä¹Ł éľĢè¦ģ +游æĪı æĵįä½ľ +è¶ħ åĩº +å¦Ĥæŀľ ä¸į +æīĢåľ¨ çļĦ +ä½ł è¿ĺ +以 åĨħ +æľī ä¸Ģå®ļ +åı¯ è¾¾ +è·ij åΰ +åī Ľ +建ç«ĭ åģ¥åħ¨ +æķ´ 车 +åīį æĸ¹ +éĹ´ æİ¥ +çѹ å¤ĩ +çĸ² åĬ³ +离 å¼ĢäºĨ +æ± Ŀ +éĿ¢ éĥ¨ +ä¹ĭåīį çļĦ +åıĺ 为 +å¦Ĥæŀľ 说 +对 ä»ĺ +åĿĩ åı¯ +被åijĬ 人 +ç²¾ ç¾İ +èģļ ä¼ļ +çĿĢ æĢ¥ +è°· æŃĮ +ä¸Ģ åı· +红 åĪ© +ä¼łå¥ĩ 游æĪı +å» ĸ +è´ ŀ +ä¹° åΰ +éŃ ļ +ä½ĵ è´¨ +å°ij äºĨ +æ³ī å·ŀ +åIJ Ł +ç»Ŀ ä¸į +é»ij æģ¶ +é»ijæģ¶ åĬ¿åĬĽ +ä¸Ĭ æĺł +çļĦè¯Ŀ é¢ĺ +ä¸ĩ人 次 +ä¸ĸ éĹ´ +ç͍ å·¥ +è´¯ ç©¿ +å®Ŀ çŁ³ +ä½ł 好 +åĪĩ åī² +强 åĽ½ +åĽŀ èIJ½ +æ°´ æĻ¶ +模 仿 +æ´ª æ°´ +éĢĻ éº¼ +åįģä¸ī äºĶ +ä½ ij +éĻ Ħä»¶ +çļĦ å¢ŀéķ¿ +éĻĦ å±ŀ +çݰ å·² +帮 ä½ł +éĩij çīĮ +é«ĺ åİŁ +åľ¨ å®¶éĩĮ +éĺ² èħIJ +ç¡®å®ŀ æĺ¯ +宣 讲 +天 æīį +ç»ıèIJ¥ 管çIJĨ +éĶħ çĤī +åIJĪ ä¸Ģ +è§Ĥ èµı +éķ¿ è¾¾ +主ä¹ī æĢĿæĥ³ +éĤ£ 麼 +é£İ äºij +为主 çļĦ +æļij åģĩ +æĮģ ä¹ħ +å¼Ĥ åľ° +å¼Ģ éŨ +模 æĿ¿ +æī¹ 次 +ä¸į 便 +天 çĶŁ +åĩł 个æľĪ +ä¸ĵ ç§ij +åı¦ æľī +åħ¬å¸ĥ çļĦ +æĩ · +åľº åIJĪ +çļĦå¿ĥ æĢģ +è¿ĺ 好 +å®ŀ æĪĺ +èĢģå¸Ī çļĦ +åħ© åĢĭ +åı¯ åľ¨ +éĤ£ ä½į +å¥ł å®ļäºĨ +ä¿ĥ éĶĢ +æı´ åĬ© +ä¸ĩ çī© +æĥħ æĬ¥ +é¦ĸåħĪ è¦ģ +æĸĩåĮĸ åĴĮ +éĥ½ å·²ç»ı +ä¸Ĭ ä¸ĸ纪 +åĨľ åľº +大 æī¹ +æĺİçϽ äºĨ +çļĦ æĪIJéķ¿ +çļĦ æ¯ĶèµĽ +失 误 +åģļ æĪIJ +ä»Ĭ天 å°ıç¼ĸ +é¢Ĩ è¢ĸ +æıIJåįĩ äºĨ +å¾IJ å·ŀ +ä»į æľī +è¿ĩ 滤 +å¹½ é»ĺ +çĥŃ éĩı +ä¸Ģ é¦ĸ +æ¼Ĥ亮 çļĦ +åĩł ç§į +åĢ¡ è®® +å°±åı¯ä»¥ äºĨ +æİĴ åĪĹ +éĩį éĩį +ä¼ģä¸ļ åĴĮ +ä¸ĵ å±ŀ +çħ İ +亲 æĪļ +çϾåĪĨ ä¹ĭ +稿 ä»¶ +è¿ĺ å¾Ĺ +人 åĵ¡ +äºī 夺 +æĽ´ 容æĺĵ +大 èĩªçĦ¶ +鼻 èħ¦ +太 空 +åľ° å¤Ħ +å¤ ¢ +ä»ĸ 对 +å¿ħ å°Ĩ +ä¸į å½ĵ +严 è°¨ +åĩº åľº +å·²ç»ı æľī +é¢Ĩ åĨĽ +é«ĺ æ¡£ +ä¸Ģ æīĢ +æł Ĺ +让 åѦçĶŁ +æĽ¹ æĵį +æŁIJ ä¸Ģ +伸 åĩº +èĬ± åįī +æ¸ħ éĨĴ +èģĶç³» æĸ¹å¼ı +åĪĨ å±Ģ +èħ ³ +æ©¡ èĥ¶ +éķ¿ å¾Ĺ +绿 åľ° +è¢ į +çļĦ èīºæľ¯ +女 æľĭåıĭ +ä¸Ń è¶ħ +离 åŃIJ +å¤ļæł· åĮĸ +éĺ³ åı° +ä½İ 碳 +ä¸Ģ ç±» +çŃīæĸ¹éĿ¢ çļĦ +å¾Ĺ 好 +模 åħ· +ä¸ĩ 亿 +çķĻ æĦı +临 æ²Ĥ +å°ij éĩı +çľĭ åIJij +ç»ıèIJ¥ èĢħ +çķĻä¸ĭ äºĨ +åĿı äºĨ +åijĬ åĪ« +羣 çIJĨ +ç¼´ è´¹ +æĬĬ ä½ł +çļĦ ä»»åĬ¡ +æĪij 对 +ä¹° åħ¥ +çĻ» ä¸Ĭ +æľī 两个 +ä¸Ģ 头 +æĵį æİ§ +åħ¨ è¦ĨçĽĸ +çĿĢ æīĭ +å¢Ļ éĿ¢ +å¤ļ æĸ¹ +åı¯çα çļĦ +ä¹Ł åı¯èĥ½ +æľĢ æľī +è¿ĻäºĽ éĥ½æĺ¯ +æĥ ¡ +å® ® +å¾Ī å°ı +éĹ®é¢ĺ æĺ¯ +åĿĩ æľī +å¾ģ éĽĨ +说 åĩº +æľī æĦı +é¢ Ĥ +æī¬ å·ŀ +åķĨä¸ļ 模å¼ı +çĶŁ èĤĸ +æįIJ 款 +å² Ĥ +ç¾İ æĻ¯ +è¿ĺ 羣 +æĭ¥ æĬ± +身ä½ĵ åģ¥åº· +æ·± å¤Ħ +çľ¼ ç¥ŀ +çļĦ 形象 +ä¼ĺ è¶Ĭ +å½ĵ æĪIJ +åĮº åĪĨ +åİ» éϤ +注 å®ļ +å§IJ 妹 +åĮº åĨħ +é© ļ +æļĹ ç¤º +æĺİ äº® +æħ° éĹ® +å¸Ĥåľº 份é¢Ŀ +çĮª èĤī +çļĦ èµĦéĩij +åİĨ ç»ı +å§ĭç»Ī åĿļæĮģ +çĶŁ æľº +ä¸į 顾 +éĩij åĪļ +大 声 +éĻķ 西çľģ +é² į +åĨľä¸ļ åĨľæĿij +æľī 害 +éŨ è¯Ĭ +æ¯ı ä¸Ģ次 +çļĦ åĽłç´ł +é¢Ŀ å¤ĸ +åİ¿ 级 +çļĩ åIJİ +åĽ½ ä¼ģ +é¦ĸ éĢī +ç¼ĸ åĨĻ +æĭ¿ èµ· +åģ· åģ· +ä¸İ ä¸ŃåĽ½ +åįĸ å®¶ +ç»Ļ ä»ĸ们 +ç¥ŀ è¯Ŀ +åѸ æł¡ +æĪij ä¸Ģ缴 +çŁ¥éģĵ äºĨ +åį Ĵ +åĴĮ åľ°åĮº +ä»Ģä¹Ī éĥ½ +çĶ» å®¶ +æľ¬ çĿĢ +ä½Ļ åIJį +审 çIJĨ +ä¸Ģ åIJij +åıijå±ķ è¶ĭåĬ¿ +åĮº éĹ´ +注åĨĮ èµĦæľ¬ +çIJ ¦ +ä¸į åı¯ä»¥ +çļĦ åĦ¿åŃIJ +å̼ çıŃ +ä¸¥æł¼ çļĦ +å®ŀä½ĵ ç»ıæµİ +æľī æĿĥ +æĪij åıĪ +éĵ¶ æ²³ +ç«ĭ 马 +æĿĢ äºĨ +åĮħ 容 +管 å®¶ +身 é«Ķ +éĵ ħ +å°ı åŃIJ +管çIJĨ ç³»ç»Ł +æľīçļĦ 人 +é£İ ç͵ +æĻºèĥ½ åζéĢł +ç²¾ ç¡® +æĭĽåķĨ å¼ķ +æĭĽåķĨå¼ķ èµĦ +äºĮæīĭ 车 +åİ¿ å§Ķ +èīº äºº +å¥ ķ +è¿İ æĿ¥äºĨ +ç»ĵæĿŁ äºĨ +çļĦ ä¼łç»Ł +æĭ¼ æIJı +奥 迪 +çĸij æĥij +ä¹ĭ æĹ¥èµ· +æłĩå¿Ĺ çĿĢ +åľ° åįĢ +è¯ł éĩĬ +åΰ æľŁ +åħ¨ éĥ½ +çŁŃ æļĤ +æĺ¯ æĪijåĽ½ +æĪij å·²ç»ı +æ»´ æ»´ +天 èµĭ +对 她 +åį«çĶŁ éĹ´ +çĶŁäº§ åŁºåľ° +æĹ¥ è®° +çļĦ æķĻåѦ +åĵ ĩ +æ°ij äºĭ +è¿ĺ åİŁ +æīĭ ä¸ŃçļĦ +çļĦ èī¯å¥½ +æ· « +ä¸Ńåħ± ä¸Ń央 +åĪ ĥ +åĵ Ħ +åľ¨ ä»ĸçļĦ +å°Ī æ¥Ń +åľº éĿ¢ +éĤ» å±ħ +çĹ Ĵ +å¦ Ħ +å¤ĸ ç§ij +ä¸į éĢĤ +举åĬŀ çļĦ +é Ĥ¹ +åħļçļĦ 建设 +çϼ 表 +è·¨ çķĮ +æ²ī æ·Ģ +大 çīĩ +è¶Ĭ é«ĺ +å°Ĩ æĺ¯ +è§ī éĨĴ +åĤ¨ åŃĺ +å¢ŀ 大 +ä¸į 让 +æķ´ å½¢ +å¹³åı° ä¸Ĭ +åĩł ä½į +è¯ī æ±Ĥ +好 ä¸į好 +åľ į +æĸĩ æľ¬ +é̲ åħ¥ +ç´ į +æł¹ æĵļ +èįī æ¡Ī +åħŃ ä¸ª +åĭ ¿ +åζ æĪIJ +饮 æ°´ +æ°¸ æģĴ +èĩª æĿĢ +åı¸ 马 +éļ¾ çĤ¹ +为 æĪij们 +å¼ § +åī© ä¸ĭçļĦ +åĩĨå¤ĩ 好 +çļĦ æľĢä½³ +èģĶåIJĪ ä¼ļ +æĤ£èĢħ çļĦ +æĪijä¸į çŁ¥éģĵ +ä¸ĭ ä¸Ģ个 +åıijå±ķ æĸ¹åIJij +ç¬ ¨ +æīĢ以 æĪij们 +åĨĻ äºĨ +éĢł æĪIJäºĨ +æ²Ļ æ¼ł +çŃĽ éĢī +çģ¾ åĮº +ä¸Ĭ çľĭ +éħ ¶ +æ»ļ åĬ¨ +éļ¾ åħį +åIJī åĪ© +ä¸Ģ ä¸Ģ +ç²¾ å¯Ĩ +伸 æīĭ +礼 仪 +åħ¨ æĺ¯ +è¶Ĭ 大 +ä¸Ń æłĩ +åıĸ åĨ³ +åıĸåĨ³ äºİ +éĢĶ ä¸Ń +讨 åİĮ +æīĭ åĨĮ +第 ä¹Ŀ +åŃĶ åŃIJ +çĦ¶ å¾Į +ä¸Ģ åħ± +æµ· æĬ¥ +款 å¼ı +æķ´ 天 +è¾¹ çķĮ +è·¯ è¾¹ +æĻĭ 级 +åIJIJ æ§½ +çļĦ åħ³æ³¨ +æĪij 没æľī +å°±æĺ¯ åľ¨ +缮 çļĦæĺ¯ +åį³ä½¿ æĺ¯ +é¡¶ å°ĸ +å·²ç»ı åľ¨ +å®īåħ¨ éļIJæĤ£ +æłĩ æĿĨ +åįĹ éĢļ +ä¼ļ 对 +座 ä½į +èµ¢å¾Ĺ äºĨ +åİŁæĿ¥ çļĦ +身 为 +书 åºĹ +è¢Ń åĩ» +ä»Ĭ æĻļ +以 èī² +以èī² åĪĹ +æĬĸ éŁ³ +åį´ æ²¡æľī +丧 失 +çļĦ å±ĢéĿ¢ +åįģåĽĽ äºĶ +çŃī 缸åħ³ +æ±ĩ æĢ» +å¤ĸ 表 +为 æ°ij +éľĩ æĥĬ +å¥Ĺ è·¯ +çĬ¯ç½ª å«Įçĸij +å°Ĩ 以 +çİĩ é¢Ĩ +éħĴ åIJ§ +è¡Įä¸ļ åıijå±ķ +å¹´ èĩ³ +åύ æĿIJ +åĴĮ æĬĢæľ¯ +æľĢ å°ı +è¿Ļä¸Ģ åĪĩ +èģĮ ç§° +å½ĵ ä½ľ +æİĢ èµ· +åĴ ĭ +ä¸Ń éĥ¨ +æīĭ èĩĤ +ç½¢ äºĨ +媳 å¦ĩ +æ´½ è°Ī +æĹ¶ä»£ ä¸ŃåĽ½ +人çĶŁ çļĦ +æŀģ éĻIJ +ç¦ Ħ +åĮº æĶ¿åºľ +æľ¬ éĴ± +礼 åĵģ +çļĦ éĤ£ä¸ª +侦 æŁ¥ +太å¤ļ çļĦ +å®ŀæĸ½ æĸ¹æ¡Ī +é«ĺ æłĩåĩĨ +æĮĩæĮ¥ éĥ¨ +å̾ æĸľ +çī¹èī² ç¤¾ä¼ļ +çµIJ æŀľ +éĴ» çŁ³ +ç§» æ¤į +çī¹ ç§į +èĩª æĦ¿ +æĭľ çĻ» +åįķ 身 +åį´ åıĪ +åĪ¥ 人 +åIJĪ è§Ħ +æľº ç͵ +çī¹ æĦı +å½ĵåīį ä½įç½® +ä¹° å®¶ +åIJĪ çº¦ +èĤ© èĨĢ +为 åĩĨ +å®¶ è£ħ +çļĦ çĥŃæĥħ +éĿŀ éģĹ +çļĦ éŃħåĬĽ +åİŁ åijĬ +社ä¼ļ åIJĦçķĮ +ä¹° çļĦ +å¤ļ åIJĥ +éĽķ å¡ij +èµ· ä¹ī +åĬł åī§ +éĤ£ä¸Ģ åĪ» +å°Ĩ è¿Ľä¸ĢæŃ¥ +æ¡Ĥ æŀĹ +æĽ´ 强 +对 ä¼ģä¸ļ +æĹł æĦı +ä¹łè¿ijå¹³ æĸ° +æµģ 失 +å¾® 软 +缸 对äºİ +座è°Ī ä¼ļ +主 èIJ¥ä¸ļ +主èIJ¥ä¸ļ åĬ¡ +ç§ģ åĭŁ +å±ķ示 äºĨ +常æĢģ åĮĸ +è² ´ +符 åı· +å¹´è½» çļĦ +å°± éľĢè¦ģ +ä¹Ł æĽ¾ +çļĦæĥħ 绪 +è¾¾ æłĩ +èĩ ¨ +ä½į å±ħ +ä»ħ 为 +é¦ĸ å®¶ +éĺ´ éĺ³ +ä¸įåĨį æĺ¯ +åĽłä¸º å®ĥ +ä¼ģä¸ļ åľ¨ +çĺ ¾ +åIJ¬ è§ģ +åİŁ æľī +åζ è£ģ +å¯Ĥ å¯ŀ +éĢļè¿ĩ 对 +æ»ij éĽª +è¿Ļ å¼ł +çļĦ çIJĨè§£ +æĸ° ä¸ŃåĽ½ +è¿Ļ åĦ¿ +ä½İ ä»· +æĥ³ è¿ĩ +çļĦ ä¿¡å¿ĥ +建çŃij çī© +çļĦ é¢ľèī² +ä¸į åºĶ该 +æĹłçĸij æĺ¯ +å¼ķèµ· äºĨ +åħ¨ åijĺ +æĿ° åĩº +è¿Ļæĺ¯ æĪij +èª ° +èĺ ĩ +éĺµ åľ° +åħħ å̼ +çŁ¿ ä¸ļ +çĿĢ ä»ĸ +ä¿¡ 访 +ä¸ĩ è¾¾ +æij© æĵ¦ +å¼Ģ 端 +èı² å¾ĭ +èı²å¾ĭ 宾 +车 åŃIJ +æľ¬èº« çļĦ +çģ«è½¦ ç«Ļ +常 å·ŀ +为 代表 +为代表 çļĦ +广 ç͵ +亲 人 +åı³ æīĭ +éĽĨ è£ħ +éĽĨè£ħ ç®± +çļĦ åį°è±¡ +æ©Ł æľĥ +åĮĨ åĮĨ +åħī ç͵ +大 æĸ¹ +è¿ĺ æľª +åĪ© 好 +ç»Ŀ 大å¤ļæķ° +åľ¨ è¿Ļç§į +ä¸Ģ ç»Ħ +æĸ° èĤ¡ +转 åıij +æ³ķ åºŃ +æĹł æīĢ +éģĵ è·¯ä¸Ĭ +çŁ¿ å±± +èij ī +æĶ¶ åĽŀ +ç§° ä¹ĭ +ç§°ä¹ĭ 为 +æıŃ éľ² +åı£ 岸 +åIJ ¼ +å¿ĥ æĥ³ +çļĦ 梦æĥ³ +éĽ ¯ +ä¹ĭ åĪĿ +å¥ĸ 项 +订 éĺħ +èĵĿ 天 +åĿ¦ åħĭ +ç«ĭ æ¡Ī +èģĶ æīĭ +ä½Ĩæĺ¯ æĪij +帮 æĪij +ä»ħ 代表 +说 æĪij +çļĦ è¶ĭåĬ¿ +æ¯Ķè¾ĥ 大 +èµ° å»Ĭ +éĩįçĤ¹ é¡¹çĽ® +èµĮ åľº +åIJį çīĩ +æĦŁ åı¹ +åľ¨ åľ°ä¸Ĭ +åıij çĥŃ +èĮĥ çķ´ +çļĦ éģĵè·¯ +éĩij èī² +ä»ĸ åıĪ +ä¼ļ 产çĶŁ +æ°ij åĽ½ +å®ĺæĸ¹ ç½ijç«Ļ +æĶ¶çĽĬ çİĩ +çļĦ åΰæĿ¥ +çļĦ åĬŀæ³ķ +æĶ¹ åζ +ä¸ĩ ç§ij +ä¸į äºĪ +è¿ĻäºĽ éĹ®é¢ĺ +çα ä¸Ĭ +çIJĥ åľº +è´£ 令 +æİĪ è¯¾ +åľ¨ é¦Ļ港 +ç»Ĩ èħ» +å¤ļ ä¸ĩ +åIJĮ å¹´ +大 使 +æĸ ĭ +ä¹Ł 为 +æĥł å·ŀ +åIJī 祥 +çͰ åĽŃ +åĽ½å®¶ éĺŁ +éĩį çĶŁ +åľ¨ åħ¶ +é¦Ļ åij³ +è´Ł èį· +亲 åĪĩ +èĩª 豪 +没 éĶĻ +åĽłä¸º åľ¨ +æĺŁ æĺŁ +éĤ ij +è¿ĺæľī å¾Īå¤ļ +æij© æīĺ +æij©æīĺ 车 +æŃ¥ è¡Į +管çIJĨ ä½ĵç³» +èĦļ ä¸ĭ +éģİ åİ» +æ±ī è¯Ń +对 ä¸įèµ· +çļĦ ç»ıåİĨ +åıĬ 缸åħ³ +ä¸įå°ij 人 +éĩį ç£ħ +åĬ³åĬ¨ èĢħ +大åĬĽ åıijå±ķ +æĢİä¹Ī åģļ +çĭĹ çĭĹ +举åįĹ äºļ +åĭĩ äºİ +åħ¬ éĸĭ +çĵ· çłĸ +åıĤ çħ§ +广æĴŃ ç͵è§Ĩ +举 åĬ¨ +æ±Ł 西çľģ +æķĪ èĥ½ +å͝ æľī +éĿ¢ è²Į +èĩªåĬ¨ 驾驶 +æ¦ľ åįķ +å½ĵ æĪij们 +仲 è£ģ +æľ¨ æĿIJ +ç±³ åħ° +çϽ éĵ¶ +çļĦ 人éĥ½ +å°± åĥıæĺ¯ +æŃ¥ åħ¥ +åįł ç͍ +åĩ» è´¥ +让 大家 +ä¼ļ è®©ä½ł +åİ¿ æĶ¿åºľ +è¦ģ ç͍ +çŃī å½¢å¼ı +åįĩ é«ĺ +责任 æĦŁ +å¤ĩ ç͍ +ä»ĸ 认为 +æ¸ħåįİ å¤§åѦ +ä»ĸ èĩªå·± +éĸ± è®Ģ +太平 æ´ĭ +éĶģ å®ļ +çŃ Ĩ +è¿Ļ çīĩ +æī§ æĶ¿ +è¿ĶåĽŀ æIJľçĭIJ +å°± æŃ¤ +éģĩ åΰäºĨ +å¼Ģå¹ķ å¼ı +管çIJĨ éĥ¨éŨ +å§¿ åĬ¿ +设 æĥ³ +åĽĽ åŃ£ +æĬĢæľ¯ 人åijĺ +å·® çĤ¹ +è¾ŀ èģĮ +èĢģ 師 +çļĦ æĦŁåıĹ +ä¹Ł éĿŀ常 +å¹´ ä¸ĬåįĬå¹´ +æĢª çī© +èĮĥ æĸĩ +æĪĺ å½¹ +åIJ« ä¹ī +åħ¨ è¿ĩç¨ĭ +èĢĮ éĿŀ +éĢļ讯 åijĺ +è¿Ļæł· æīįèĥ½ +æľº ç»Ħ +è£ ı +çķ¶ çĦ¶ +èµĮ åįļ +åIJĦ æľī +å·¥ä½ľ æľºåζ +äºĭ åIJİ +åī§ éĻ¢ +å±Ĭ æĹ¶ +åĺ´ éĩĮ +主 线 +ä¸Ģ åľĪ +主è¦ģ åİŁåĽł +å°¸ ä½ĵ +åĮ»çĸĹ åĻ¨æ¢° +ä½ł æĢİä¹Ī +ä½Ĩ çͱäºİ +æĹ¶ 空 +çĶ· æľĭåıĭ +çĶľ èľľ +é«ĺ åľ° +æĻ ĸ +èĴIJ éĽĨ +åĩĿèģļ åĬĽ +å¤ĩ åıĹ +æĸĩ åĪĽ +马 æĿ¥ +马æĿ¥ 西äºļ +æŁ´ æ²¹ +使 人 +æķĻ ä¼ļ +ç§ĭ 天 +æĺİ çıł +åħŃ åįģ +çݯå¢ĥ ä¸Ń +æ¸ħ æĻ¨ +积æŀģ åıĤä¸İ +å·ħ å³° +为 æľŁ +çѾ åŃĹ +æĦŁ æ¿Ģ +ç§ĭ åŃ£ +æĿij åŃIJ +æ¢ħ 西 +æļ´ 鼨 +çĶŁæ´» åľ¨ +çªĹ æĪ· +æģ¶ åĬ£ +纯 ç²¹ +åľ¨ æİ¥åıĹ +没 èĥ½ +è¡Į 人 +åĭ º +æĭ¨ æīĵ +ä½ľ åĩºäºĨ +çļĦ 主é¢ĺ +æľª ä¾Ĩ +ä¸Ń æľĢ +æ¾ ľ +é«ĺ è¡Ģåİĭ +åħ´ èµ· +æŃ£ èĥ½éĩı +åŁ¹è®Ń çıŃ +æİ¥ åħ¥ +çĦ¶åIJİ åĨį +åѦçĶŁ 们 +é¢ĨåħĪ çļĦ +çģ« çĥŃ +ä¸ĵ èģĮ +æĪĸèĢħ 说 +建 è¨Ń +é» ı +对 åħ¬åı¸ +çī¹ æľīçļĦ +åħī èᣠ+å½ĵ åľº +éĿ¢ åŃIJ +èµĦ产 管çIJĨ +æĹ¶æľŁ çļĦ +çŀ İ +åįİ ä¸ľ +åıĪ ä¸Ģ次 +èĥİ åĦ¿ +å®ļ çĤ¹ +头 çĹĽ +æ¶² ä½ĵ +æĺ¯ä¸Ģ ä½į +帽 åŃIJ +å¹´ èµ· +ä¸į ä½İäºİ +è¾ĥ å°ij +éĿ¢ä¸´ çĿĢ +å±Ĥ å±Ĥ +èĿ´ èĿ¶ +èī° èĭ¦ +éĺ¿ æł¹ +éĺ¿æł¹ å»· +æ¦Ĥ æĭ¬ +请 éĹ® +èµ· åºĬ +å±Ģ å±Ģéķ¿ +稳 åģ¥ +å¦Ĥæŀľ æĪij们 +éħĴ ç²¾ +æĪ· åı£ +æĦŁ æĤŁ +æĪij们 éľĢè¦ģ +æĬĢ èīº +èĩª åªĴä½ĵ +è¿Ľ åĮĸ +æ¿ĢçĥĪ çļĦ +ä½ĵ 温 +èļ ķ +èĩ´ è¾ŀ +宪 æ³ķ +ä¸Ģ çŃīå¥ĸ +çĵ¶ é¢Ī +æĥł æ°ij +èµ° è·¯ +çݰ ä»» +åķĨ éĩı +ä¸ĭ 车 +åĪ ł +責 ä»» +èŀįåIJĪ åıijå±ķ +ç´ł æĿIJ +æ²¹ ä»· +åģļ 人 +çŀ ª +æĶ¹éĿ© åĪĽæĸ° +çļĦ åĮºåĪ« +è·¨å¢ĥ ç͵åķĨ +æ¶īåıĬ åΰ +æīĺ 管 +æĪij è¿ĺæĺ¯ +åĿIJ æłĩ +ç½ij 讯 +å½ĵåľ° çļĦ +追 溯 +åľŁ è̳ +åľŁè̳ åħ¶ +åºķ ä¸ĭ +åĩł åįģå¹´ +ç©¿ è¿ĩ +çĶŁæĢģ æĸĩæĺİ +æİ¨ èĸ +æİ¨èĸ ¦ +éł Ĩ +åĴ³ åĹ½ +åĪĨ æĪIJ +çĹķ 迹 +æĪ· ç±į +éĥ½ ä¸įèĥ½ +æĻļ ä¼ļ +åĢ © +ä½ĵ åĬĽ +è¿Ļ个 èģĮä¸ļ +æĹł å½¢ +åıª æĥ³ +è¿Ľ åıĸ +æĿĢ æŃ» +èĦ Ĭ +äºij åįĹçľģ +æľª çŁ¥ +ç¾İ èģĶ +ç¾İèģĶ åĤ¨ +å¤ĸ å½¢ +诱 æĥij +çĽ £ +è¡Į 使 +åłĨ 积 +çĨŁ ç»ĥ +éĺIJ è¿° +æľĢ大 éĻIJ度 +å·¡ æŁ¥ +夺 åĨł +ä¼ģä¸ļ æĸĩåĮĸ +çĭ® åŃIJ +ä¿Ŀ å®Ī +ä¸ºæł¸å¿ĥ çļĦ +æī© æķ£ +åζéĢł åķĨ +æŁĶ 软 +为ä¸Ģä½ĵ çļĦ +游 çİ© +çĶŁ çĹħ +幫 åĬ© +åͱ æŃĮ +æīį åı¯ä»¥ +宽 æĿ¾ +è¦ģ æ¯Ķ +æĺ¯ æĢİæł· +çģ° èī² +çİĭ åĽ½ +æIJħ æĭĮ +计 éĩı +åij¨åĽ´ çļĦ +æĻºèĥ½ æīĭæľº +常 åĬ¡ +常åĬ¡ åī¯ +é© ´ +å°Ĩ è¿ij +寻 常 +ä¸ŃåĽ½ å¸Ĥåľº +容 åύ +å±± ä¸Ĭ +èĥĮåIJİ çļĦ +亲 å¯Ĩ +æīĢ以 说 +éİ ® +çļĦ çIJĨçͱ +大 åŁİå¸Ĥ +常 å¹´ +æĹħ游 ä¸ļ +å°±æĺ¯ è¿Ļæł· +åĨį æĿ¥ +é«ĺ ä½į +åĨħ 饰 +æŀĦ éĢł +ä¸Ģ èµ·æĿ¥ +çͳ è«ĭ +å·²ç»ı å¼Ģå§ĭ +çļĦ åĬ¨ä½ľ +被 è¿« +éģį å¸ĥ +åīĸ æŀIJ +å°ı äºĭ +å¿ĥ ä¸ŃçļĦ +ä½ĵåζ æĶ¹éĿ© +çļĩ å®¶ +æķĻ åłĤ +åIJĥ å®Į +åĽ½æ°ij åħļ +æĺİç¡® äºĨ +åıijå±ķ è§ĦåĪĴ +第ä¸Ģ æŃ¥ +å¾Ĺ èµ· +åľ¨ åĵª +çļĦ è·¯ä¸Ĭ +é» Ķ +çķ¶ æĻĤ +大åĬĽ æĶ¯æĮģ +åıĮ éĩį +çŁ¥éģĵ èĩªå·± +åIJĪä½ľ åįıè®® +æ°Ķ åĬ¿ +éķ¿æķĪ æľºåζ +ç½ķ è§ģ +åĽŀ æĿ¥äºĨ +ä»ĸ ä¼ļ +ä¸Ń æĸ° +ä¸Ńæĸ° ç½ij +çļĦ åķĨåĵģ +èµł éĢģ +決 å®ļ +å¸Ĥåľº çĽij管 +çķĻ åѦçĶŁ +ç͵ åİĭ +äºļ 马 +äºļ马 éĢĬ +è¿ĺæĺ¯ æ¯Ķè¾ĥ +ä¿ĥè¿Ľ äºĨ +æµģ åħ¥ +æijĦ åĥı +æijĦåĥı 头 +æıIJ åıĬ +åıij æİĺ +æī¾ åĩº +æ¢Ŀ ä»¶ +ç¹¼ çºĮ +æĪij åĸľæ¬¢ +å¥ İ +æ¦ľ æł· +å¼Ģ èĬ± +æ²ī éĩį +åŁº åĩĨ +ä»ħä»ħ æĺ¯ +轨éģĵ 交éĢļ +åĶIJ å±± +çŃī ä¸Ģç³»åĪĹ +ä¸įè¿ĩ æĺ¯ +åŃĺåľ¨ çĿĢ +èĬ± çĶŁ +å¤ · +ç»Ī ç©¶ +ä¹Łæĺ¯ ä¸Ģ个 +åįģ åŃĹ +èĸª éħ¬ +伤 å¿ĥ +æĺ¥ ç§ĭ +åĨ· åį´ +ç²¾ çģµ +çļĦ åľ°åĽ¾ +æ¯Ķ çī¹ +æ¯Ķçī¹ å¸ģ +æĢ§ åĪ« +ä½Ļ ä¸ĩåħĥ +ä¸įå¿ĺ åĪĿå¿ĥ +å¿ĥ çĸ¼ +æĽ² 线 +é«ĺ ä½İ +è¦ı å®ļ +æĻ¯ èī² +è¦ģ 说 +åħ¬åı¸ å°Ĩ +æ¶² åİĭ +è¿Ŀ 约 +åİļ 度 +åºŀ 大çļĦ +è¿ĺæĺ¯ å¾Ī +é¦ĸåħĪ æĺ¯ +çµ ² +åĬ¡ å®ŀ +並 ä¸Ķ +å¢ŀ è¿Ľ +ç»Ħç»ĩ å¼Ģå±ķ +èµ·æĿ¥ äºĨ +è¾ĥ å°ı +导 游 +两 åľ° +ç¿ ĺ +çģ¿ çĥĤ +é£İ éĩĩ +æĶ¯ 线 +æĶ¯çº¿ ä»»åĬ¡ +娱ä¹IJ åľĪ +天津 å¸Ĥ +åĮħ åĽ´ +æľ¬ èµĽåŃ£ +éĩįè¦ģ 讲è¯Ŀ +åıĮ åIJij +åįİ ä¸½ +éĶ ¤ +åĦ¿ 女 +åįĸ åĩº +ä¾Ĩ 說 +ä»ĭç»į ä¸Ģä¸ĭ +åIJ¦ 认 +åĭ Ŀ +æĻ®éĢļ 人 +çļĦ åĬ¨åĬĽ +涨 åģľ +åŁºéĩij 管çIJĨ +ä¸Ģ个 éĩįè¦ģ +è¿IJ æ²³ +çħ ŀ +è´¢æĶ¿ éĥ¨ +è¡Įä¸ļ åįıä¼ļ +éĥ½ å°Ĩ +è¨Ģ 论 +ä¸ĭ ä¾Ĩ +墨 西 +墨西 åĵ¥ +åĽłä¸º ä»ĸ们 +æĢİä¹Ī åĽŀäºĭ +åĬłå¤§ 对 +èĬ Ń +çīĮ åŃIJ +ä¼ļ 使 +妹 åŃIJ +ç«Ļ éķ¿ +å¿ħ å¤ĩ +æłij æľ¨ +æģ¶ æĦı +æ²³ éģĵ +å¯Į è£ķ +ç¹ģ åįİ +代表 åĽ¢ +æµij 身 +é¦ĸ ä½į +èĪªç©º åħ¬åı¸ +鼻 å½± +ä¸ĵ è¾ij +æ°´ æºIJ +ä¸Ń æ¯Ĵ +並 ä¸į +èĢĮ åİ» +é ĥĿ +äºİ æŃ¤ +æĸĩåĮĸ 建设 +èĤ¯å®ļ ä¼ļ +å¸ĮæľĽ 大家 +æıı åĨĻ +ä½İ è°ĥ +æĸ°åħ´ 产ä¸ļ +æ·Ħ åįļ +æĶ¾ å¼Ģ +çļĦ æĢ§æł¼ +çĸ¾çĹħ çļĦ +æķ´ é¡¿ +线ä¸Ĭ 线ä¸ĭ +éĢī 项 +çļĦ 认åı¯ +æķ´ é½IJ +çĶļ ä¹Ī +çľģ åĨħ +åı¤ 人 +æ°ij ä¿Ĺ +çī¡ ä¸¹ +éŨ çªĹ +éĤ£ æł·çļĦ +çĽijäºĭ ä¼ļ +ç¿¡ ç¿ł +ç¦ ¹ +åįĥä¸ĩ ä¸įè¦ģ +æĶ¶ 缩 +çļĦ æĸĩåŃĹ +åĴĮ å°ļ +æĮĩ 令 +åħ±äº§ åħļåijĺ +çļĦ çĪ¶äº² +å®Į å·¥ +åĬ¡ å·¥ +马 æĭī +马æĭī æĿ¾ +æµĭ è¯Ħ +å² ļ +ä¸į åģļ +ä¸ĥ å¹´ +åĿĩ ä»· +主 è§Ĥ +å¾Ī ä¸įéĶĻ +èĤ¡ä¸ľ 大ä¼ļ +äºĶ ä¸Ģ +é£İ åIJ¹ +å¼Ģ éĩĩ +è¿Ļä¹Ī 大 +èĥ½ çľĭåΰ +èĢĥ è¯Ħ +åį³ ä¾¿æĺ¯ +çݰ代 åĨľä¸ļ +æ¯Ķè¾ĥ é«ĺ +è¦ģ çľĭ +没 äºĨ +è§£ 決 +çݯ æ¯Ķ +åĨ² åĬ¨ +æ·± å¤ľ +åĩł åįĥ +ä¿ ı +ç½ij æ°ij +å°± 没 +ä»ĸ 表示 +éĩı åŃIJ +æĹ©é¤IJ åĬłçĽŁ +åįĬ å²Ľ +æIJŀ ç¬ij +ä¸Ĭ æĬ¥ +å¯ © +é¢Ħ 订 +èľĤ èľľ +æŁ¥ æī¾ +ä¼Ĺ æīĢ +ä¼ĹæīĢ åij¨ +ä¼ĹæīĢåij¨ çŁ¥ +æĹ© æĹ¥ +åıij æī¬ +åĴĮ 个人 +åĬłåħ¥ äºĨ +åĸ® ä½į +åĪĨ æĺİ +第ä¸Ģ æī¹ +ç¾İ åĨĽ +æĿĢ æīĭ +éŨ å¤ĸ +åķĨ åľĪ +ä¸Ģ åĪ» +çļĦçľ¼ ç¥ŀ +éľ Ħ +äºĽ ä»Ģä¹Ī +åĬł æ·± +æ¯ı ä½į +å¸Ĥ éĿ¢ä¸Ĭ +åıĶ åıĶ +çļĦ éĤ£ç§į +粤 港澳 +è´´ å¿ĥ +æĸĩåĮĸ 产ä¸ļ +红 æĹĹ +åĺī åħ´ +æĶ¶ çĽĺ +å®ĮæĪIJ åIJİ +ä¼ģä¸ļ 管çIJĨ +纵 横 +ä¸į ä¿¡ +æĪIJ éĥ½å¸Ĥ +æ´Ĺ 澡 +举è¡Į çļĦ +çĶ¢ çĶŁ +ç©¿ ä¸Ĭ +åĪļ 好 +åħī 线 +æīĵ æŀ¶ +è¿Ļ æľ¬ä¹¦ +åĶ®åIJİ æľįåĬ¡ +åĩł åĪĨ +ä¸Ĭ 次 +ä¸į åĪĨ +产 åIJİ +éģ¿ å¼Ģ +ç»Ī æŀģ +代表 大ä¼ļ +æ¼Ķ æĬĢ +åĽŀ è´Ń +åѦ è´¹ +éĺ» ç¢į +ä¸Ģ大 æī¹ +ç«£ å·¥ +åĨ³ å®ļäºĨ +ä½Ĩ å¦Ĥæŀľ +ç͵ æµģ +ä¸Ŀ 毫 +èĥ½å¤Ł åľ¨ +éĶĢåĶ® æĶ¶åħ¥ +åľ¨ åŃ¦æł¡ +æ°´ åĩĨ +è§Ĩ 线 +èĩª åľ¨ +åķĨä¸ļ éĵ¶è¡Į +为äºĨ 让 +çį² å¾Ĺ +çݩ家 æľĭåıĭ +éĿ¢ èĨľ +åĪĨ åī² +åī§ æľ¬ +ç« Ń +说 å¾Ĺ +æĥ³ çŁ¥éģĵ +çļĦ人 çī© +èĮħ åı° +åIJĮ ä¸Ģ个 +æķ°æį® ä¸Ńå¿ĥ +çĶ Ħ +åĸľ æĤ¦ +ä¸ĭæĿ¥ çļĦ +å®ļ åIJij +æŀģ åħ· +çļĦ åľŁåľ° +éĤ£ åĢĭ +æijĦ åħ¥ +äºĨ æĪijçļĦ +马 è·¯ +åħ¨ 社ä¼ļ +è®® æ¡Ī +å±ĭ åŃIJ +åIJį åı« +åĮ ª +åľ¨ å¤ĸéĿ¢ +åįİ åįĹ +åıij è´§ +å¯Ĵ åĨ· +é«ĺçŃī æķĻèĤ² +详ç»Ĩ çļĦ +个 é¡¹çĽ® +çĶŁäº§ åĬĽ +æĹ¶ 常 +å°± æľĥ +ä¸ĩ èĤ¡ +éĻĮçĶŁ 人 +æıı ç»ĺ +å½ĵ çĦ¶æĺ¯ +æĭī åĬ¨ +éĵ¾ æĿ¡ +æī£ éϤ +ä¸Ģ缴 éĥ½ +å°ı åŃ©åŃIJ +伤 åı£ +第äºĮ å±Ĭ +è´Ń ç½® +çļĩ 马 +æĹł èģĬ +表 åĨ³ +诸 å¦Ĥ +åĵį èµ· +é£İ æļ´ +ä¸Ģæµģ çļĦ +ç ·¨ +è§£æĶ¾ åĨĽ +室 å¤ĸ +å°± è¿Ļä¹Ī +å³ ¶ +æīĢæľī 人éĥ½ +æIJľç´¢ å¼ķæĵİ +çļĦ æĪIJæľ¬ +åħļ æĶ¿ +åıijè¡Į 人 +çļĦ äºĭå®ŀ +对 该 +åıĹ æįŁ +ä¿Ħ ä¹Į +é²ľ èĬ± +åĨľ èᝠ+æŀģ éĢŁ +æĢ¥ æĢ§ +两 ä¼ļ +ä¸Ģèά æĿ¥è¯´ +æµ· é²ľ +åĨ Ī +ç͍ 人 +çĶ¨äºº åįķä½į +åĢ ª +åĦª æĥł +æł¹ æºIJ +åĽ¢ è´Ń +ç¾İ æ´² +ä¸ĭ è¡Į +å¹´ æľ« +èľ ¡ +è¯ģ ä»¶ +åľ¨ æĪijåĽ½ +ä¸į åºĶ +æĮī æĹ¶ +åłª ç§° +åľº ä¸Ĭ +å¹²éĥ¨ èģĮå·¥ +æľī å¾Ī大çļĦ +æķ°åŃĹ ç»ıæµİ +æ¼Ķ ç»ĥ +æį® ç»Łè®¡ +å¾Ģ æĿ¥ +广åijĬ æľįåĬ¡ +çļĦ è·Ŀ离 +æŃ ¸ +è¨Ģ è¯Ń +被 èªī +被èªī 为 +åĭī 强 +å°Ĭ æķ¬ +ä¸ĩ 亿åħĥ +ä¸ŃåĽ½ åĽ½éĻħ +å¹² é¢Ħ +å¹´ 产 +èĢķ åľ° +èĮ İ +åį³ æĺ¯ +æĺ¨ æĻļ +æĪIJ为 ä¸Ģ个 +çºł æŃ£ +åij½ åIJį +é¢ģ å¸ĥ +çĮľ æµĭ +ä¿ĿèŃ· æĶ¿çŃĸ +æĭ ¢ +æ´» æ³¼ +çŃī éĥ¨éŨ +åѦ åΰ +å¢ŀå̼ ç¨İ +èĪª 线 +åĨ ¤ +åįģ åĩłå¹´ +æİ§èĤ¡ èĤ¡ä¸ľ +ä¸Ģ éŨ +个 å·¥ä½ľ +ä¸ªå·¥ä½ľ æĹ¥ +æĸ° 西 +æĸ°è¥¿ åħ° +论 è¯ģ +ä» Ĩ +åı¦å¤ĸ ä¸Ģ个 +æĶ¹ ç¼ĸ +严 ç¦ģ +åĸľ 好 +个人 ä¿¡æģ¯ +满æĦı 度 +åĵ ¨ +å¸Ī èµĦ +æĶ¹ 为 +ç«ŀäºī 对æīĭ +åĩº çĤī +åķĨ 人 +大 æ£ļ +æĮĩ导 ä¸ĭ +å¦ĩ ç§ij +è¼ ª +æī ģ +åIJĮæĹ¶ è¿ĺ +å¹¶ éĢļè¿ĩ +æĪĺ éĺŁ +èĶĵ å»¶ +ä¿ ŀ +éĢĤå½ĵ çļĦ +åīį è¾Ī +åĵģ åij³ +湿 åľ° +æĪIJ åŀĭ +ä¸į åıªæĺ¯ +æĥ© ç½ļ +åĩºåı° äºĨ +çİ© 游æĪı +æīį åıijçݰ +åºĶ èģĺ +å¤ĸ æĿ¥ +åįł é¢Ĩ +å±ķ æľĽ +å« Ĥ +港 èĤ¡ +æ¡Į ä¸Ĭ +æĶ¯ æŁ± +çļĦæĥħ å½¢ +广éĺĶ çļĦ +æĶ¯ è¡Į +å´© æºĥ +æľĪ ä¸Ń +æľĪä¸Ń æĹ¬ +ç»į åħ´ +临 è¿ij +æĬ¤ æłı +æļ ® +åįķ èģĮä¸ļ +è¾¹ å¢ĥ +æĹ¥ çħ§ +ä¸Ģ åłĨ +缴 å¾Ħ +åħ±åIJĮ ä½ĵ +æĸ°åįİ ç½ij +æīĵ 好 +ç͵åĬ¨ 汽车 +ä¸į æĺİçϽ +éĢĻ è£¡ +缼 大 +çİĭ æľĿ +åĨį ä¸Ģ次 +åĬŀåħ¬ åİħ +è´¨ æĬ¼ +åIJĪ åĩ» +人们 对 +鼶 é£Ł +éĥ½ä¸į çŁ¥éģĵ +çļĦ è¯Ńè¨Ģ +åĭŁéĽĨ èµĦéĩij +åĬ¨ èĦī +å½ ¤ +è¿Ļ åĩłå¹´ +çŁŃ è§Ĩé¢ij +太 é«ĺ +常 å§Ķä¼ļ +åĬł çıŃ +éĩį å¿ĥ +åªĴä½ĵ æĬ¥éģĵ +没 æ³ķ +éĹ» åIJį +çĥŃ åº¦ +å¹¿æ³Ľ çļĦ +åħŃ å¤§ +çī© ä½ĵ +ä¸į 该 +é¢ĺ 主 +精彩 çļĦ +为 è¿Ľä¸ĢæŃ¥ +èĻ ŀ +åĽº çĦ¶ +è´µå·ŀ çľģ +çºł ç»ĵ +代çIJĨ 人 +æ³ķå®ļ 代表 +åı¦ä¸Ģ ç§į +ä¸į åIJ« +æĭ¯ æķij +ä¼ļ ç»Ļ +è¯Ĺ è¯į +åIJĮ ç±» +å¾Ĺ ä¸įåΰ +æĬĵ ç´§ +以 åħ¶ +åħ¥ åħļ +è¿ĺ åı¯ +æľŁ åĪĬ +å¾Īå¤ļ æĹ¶åĢĻ +æĹ¥ åIJİ +åħ¬ 约 +ä¸Ģ 举 +æ¯Ķè¾ĥ å¤ļ +éĩij æ²Ļ +æį ŀ +æİĴ åĩº +æŃ¦ æľ¯ +ä¸į æĸ· +ä¸Ń èĢĥ +ä¿¡ èµĸ +ä»İä¸ļ 人åijĺ +çģ« çĦ° +éĨĴ æĿ¥ +ä½İ 温 +é̾ æľŁ +åĬ± å¿Ĺ +éħ ¥ +åı¯è°ĵ æĺ¯ +è¿Ļ æĦıåij³çĿĢ +é¢ł è¦Ĩ +åĮĹ京 大åѦ +ä¸ĵ 线 +åıĬ 以ä¸Ĭ +è¨ ª +èĢĮ åIJİ +çŁ¥ ä¹İ +ä¸Ģ对 ä¸Ģ +å¨ĥ å¨ĥ +çģ¾ éļ¾ +åħ¨ å±Ģ +æīĢå¾Ĺ ç¨İ +å®ŀ æĥł +èļĤ èļģ +ä¹Ł çŁ¥éģĵ +温 åĴĮ +èIJ½ ä¸ĭ +åŀĭ ä¼ģä¸ļ +åĨį ä¹Ł +ä¾Ľ çĥŃ +é«ĺ æ½® +çĢı覽 åύ +çļĦ 巨大 +åħΠ天 +å¹´ ä¸ŃåĽ½ +类似 çļĦ +çIJĨäºĭ ä¼ļ +空 éĸĵ +çģµ æĦŁ +åĬĽ æ°Ķ +带 ä¸Ĭ +ä¸į好 æĦıæĢĿ +æľī ä½ķ +å·² åľ¨ +åıĸ åĩº +è¿Ŀæ³ķ çĬ¯ç½ª +åŃ¦ä¹ł 贯彻 +åľ° 带 +楼 梯 +çŃī æĥħåĨµ +ä»İ åīį +çļĦ ä¹łæĥ¯ +ç³Ł ç³ķ +å°± èĥ½å¤Ł +è© ķ +ä¸Ģ å¾ĭ +æĮ« æĬĺ +åİŁæĸĩ åľ°åĿĢ +å½ĵ å±Ģ +ä¸į éĢļ +æķ° åįĥ +éĺŁä¼į 建设 +æĹ¶ èĬĤ +åģļ èµ· +çļĦ è®°å¿Ĩ +ç½ij绾 å®īåħ¨ +åĩ¡ æĺ¯ +æ° ¯ +éĽķ åĪ» +åŁĥ åıĬ +æĪij åı¯ä»¥ +çĽij çIJĨ +æĽ´ åħ· +åŁİ 管 +èĭ ¯ +åı¥ åŃIJ +èĭ¥ æľī +ä»İæĿ¥ ä¸į +缸åħ³ è´Łè´£ +å®īåħ¨ æĦŁ +æĽ´ è¦ģ +çļĦæĥħ æĦŁ +çī¢ çī¢ +è¾ĥ 好çļĦ +æ° ® +ç¬ij è¯Ŀ +车 å±ķ +ä¹ĭ ç¾İ +ç®Ģ 约 +ç±»åŀĭ çļĦ +èĢģ åĮĸ +çľĭ ä½ł +è¿ĩ åĪĨ +éŨ åīį +ä¸Ģ éĹ´ +æĥ³ åİ» +åª Ľ +åľŁ è±Ĩ +åıĪ ç§° +ä¸Ń ä¿¡ +åŃĺ éĩı +马 äºij +èĩ´ 使 +åħĪ åīį +èĢģ åŃIJ +æīĵ æī® +æ¯ķä¸ļ äºİ +æ¯ķä¸ļ åIJİ +ç¾İ好 çĶŁæ´» +å·¥ä¸ļ ä¼ģä¸ļ +就好 äºĨ +èħIJ èļĢ +çıį çıł +åΰ è¿ĻéĩĮ +æīĢéľĢ çļĦ +è¿Ļæĺ¯ åĽłä¸º +çIJĨæĥ³ çļĦ +å·®å¼Ĥ åĮĸ +é ® +é® ® +äºļ 太 +æĹł ç©· +æıIJ çݰ +ä¸ĵä¸ļ æĬĢæľ¯ +çĶ¢ æ¥Ń +åѦ åŃIJ +ç§ij å¹» +åįłåľ° éĿ¢ç§¯ +ä¸į åĩĨ +æľªæĪIJ 年人 +æĶ¶ å½ķ +è¿ĺ 款 +éĴ¢ çŃĭ +æ¼ ¢ +å¾Ĺ æĦı +综åIJĪ ä½ĵ +æŀģ é«ĺ +åįķ è¯į +é«ĺæķĪ çļĦ +骨 头 +æī§ çĿĢ +缼 ä¸ĸ +模 çī¹ +æĽ´ èĥ½ +ç»Ŀ æľĽ +对åºĶ çļĦ +æ¨ Ĭ +æĸ° ä¸ī +æĸ°ä¸ī æĿ¿ +æģ° æģ° +åIJį å®¶ +æł¸å¿ĥ æĬĢæľ¯ +个 å°ı +æĢİä¹Ī ä¼ļ +说 ä¸įå®ļ +西 çĵľ +åĵ İ +ç¢ Ł +å¿ħ ä¸įåı¯ +å¿ħä¸įåı¯ å°ij +ä¹ĭ éĸĵ +åĪĨ 管 +交éĢļ äºĭæķħ +å¼Ģ åĬŀ +å¾ģæ±Ĥ æĦıè§ģ +äº ¨ +鼻åŃIJ éĥµ +鼻åŃIJéĥµ ä»¶ +ä¿¡æģ¯ æľįåĬ¡ +ä½ł è§īå¾Ĺ +缴 è§Ĥ +å·² å®ĮæĪIJ +åĪĨ ä¼ļ +åĽŀ åįĩ +éļ » +好 人 +äºĨè§£ ä¸Ģä¸ĭ +åį« æµ´ +æľĢ çα +åºŀ 大 +客 æĪ¿ +çijŀ åħ¸ +éĥ½ ä¸įæĺ¯ +é¤ ¨ +èĹ ī +çļĦ åIJĦ项 +为 缮æłĩ +çļĦ è®¤çŁ¥ +å½±åĵįåĬĽ çļĦ +夸 å¼ł +佩 æĪ´ +æ±ĩ çİĩ +çļĦ çαæĥħ +æĺ¥ é£İ +æĺ¯ æĪijçļĦ +æ¨ ¹ +åįĬ å°ıæĹ¶ +å±± åİ¿ +å±± 西çľģ +èĢĮ è¿Ļ +æĽ´å¤ļ ä¿¡æģ¯ +è¿ĺ æľīä¸ĢäºĽ +ç²¾ ç»ĨåĮĸ +ç¾İ åѦ +çͱ æĸ¼ +ä»ħä¾Ľ åıĤèĢĥ +å¾Ī é«ĺçļĦ +åıł åĬł +è¿Ļä¹Ī 说 +å±ķ åĩº +åĽĽ å¤Ħ +ä¸ĩ å®¶ +æĭĽ åĭŁ +çļĦ 强大 +æĤ£ æľī +å°ı äºİ +ä¹Łè®¸ æĺ¯ +对 èĩªå·±çļĦ +èģĮä¸ļ æķĻèĤ² +æĿ¥ è¿Ľè¡Į +æ¡£ 次 +æīĵ èµ¢ +éĥ½æľī çĿĢ +åº ¸ +è¯Ń æ°Ķ +çͲ éĨĽ +空 åĨĽ +车 åĨħ +åĽłä¸º ä½ł +å®ŀ æķĪ +æĥħ ä¾£ +åıijè¾¾ åĽ½å®¶ +éķľ åŃIJ +æ¯į å©´ +ä½Ĩæĺ¯ ä»ĸ +积æŀģ æİ¨è¿Ľ +大å¹ħ 度 +çļĦ 女åĦ¿ +é¤IJ æ¡Į +åIJ¬ å¾Ĺ +çļĦ 积æŀģæĢ§ +好 åIJ§ +æĹ¥ æ¶Īæģ¯ +æľī ä»»ä½ķ +æ¯Ĵ åĵģ +æĹ©çĤ¹ åĬłçĽŁ +第ä¸Ģ 天 +å°½ åĬĽ +æł ĸ +主 æīĵ +æĺ¯ä¸Ģ åIJį +çĪĨ æĸĻ +äºĭä¸ļ åıijå±ķ +å¾® åķĨ +äºİä¸Ģä½ĵ çļĦ +çĶŁ çĮª +èĩªçĦ¶ èµĦæºIJ +çŀĦ åĩĨ +è§Ħ模 åĮĸ +å¹¶ ä¸İ +èĤ¥ èĥĸ +å®¶ ç͍ +大 çĪ· +é¢Ħ åijĬ +æĿ¥ åģļ +éĺ³ åİ¿ +æŀĦ çŃij +é¢ģ å¥ĸ +åİĨåı² æĸĩåĮĸ +æľįåĭĻ æĪĸ +æĢ» åĨ³èµĽ +åıij åŀĭ +æĪij 羣çļĦ +æĽ ¦ +åıĤ ä¼ļ +èĦĨ å¼± +åĩĨ åħ¥ +èħ¹ éĥ¨ +åı¸ 令 +æĤ² åī§ +天 ä¸Ĭ +åı£ ä¸Ń +ä¸ĩ 个 +åѦ ä¸ļ +æıIJ åĢ¡ +两 è¾¹ +大 èĤ¡ä¸ľ +åı¤ éķĩ +è¡Ģ ç³ĸ +çļĦ ç¨ĭ度 +æ£ī èĬ± +åIJİ åı° +å°± åĮ» +æķ´ æķ´ +èĴ ² +çĽĪåĪ© èĥ½åĬĽ +ç± ½ +èĦ « +çľĭ éĩį +å®¶ éķ· +èģĺ ç͍ +èµĽ éģĵ +åīį èĢħ +建 èѰ +å¾ĭå¸Ī äºĭåĬ¡ +èīºæľ¯ åĵģ +æľī èĩªå·±çļĦ +åIJ¦ å®ļ +社 åĽ¢ +åij¨ äºĶ +带 åΰ +å·¥ä½ľ ä¼ļè®® +èĤ¡ æľ¬ +å¤ĸ åĮħ +å®¶ åħ¬åı¸ +çĽij çĭ± +èĪ Ĭ +åIJį æł¡ +西 æ¹ĸ +è¶ħè¿ĩ äºĨ +åįĹ å±± +ç»Ħ ä»¶ +å̼å¾Ĺ 注æĦı +æĮ£ æīİ +äºĭ 迹 +ç¶ĵ çĩŁ +ç§ij 室 +好 åIJĹ +æ¤ħ åŃIJ +åľĪ åŃIJ +ä½Ĩ 她 +æµģ çķħ +åIJĦèĩª çļĦ +èģĮ åijĺ +è¡į çĶŁ +åħ¨ åľº +æĴ¤ éĶĢ +åį´ è¢« +å®ģ éĿĻ +åīį æīĢ +åīįæīĢ æľª +åīįæīĢæľª æľī +主 ä¸ļ +åĮĹ ç¾İ +è¯Ħ å®ļ +åĵģ å°Ŀ +大家 éĥ½åľ¨ +主 å¸ħ +ç»Ĩ å¿ĥ +ä¿¡æģ¯ æĬ«éľ² +çļĦ ç«ŀäºī +éĢĻæ¨£ çļĦ +ç§ijåĪĽ æĿ¿ +éĩĩ æijĺ +票 æį® +éĢIJ å¹´ +èĭ± è¶ħ +è¡Įä¸ļ åĨħ +人 寿 +åIJİ åĭ¤ +å¦Ĥ æĦı +ç¬Ķ è¯ķ +æ·¡æ·¡ çļĦ +ä¸į èĪĴæľį +ä½ĵ 积 +ä¹Łä¸į è¦ģ +éĿ¢ æĸĻ +æł· æľ¬ +ç¥ ģ +æĮī è§Ħå®ļ +大æ¦Ĥ æĺ¯ +æĥħåĨµ è¿Ľè¡Į +åIJĦ åįķä½į +çļĦ ç¬ij容 +åĩºèī² çļĦ +代表 æĢ§ +çļĦ ç¾İ好 +éĴ ¦ +å¾® çĶŁçī© +è¶Ĭ æĺ¯ +æĸ¹ åı¯ +å¹² èĦĨ +éģĬ æĪ² +çļĦ åħ´è¶£ +éĹ® è´£ +åĽłä¸º æĪij们 +èĢĥ éĩı +çĶŁ çĶŁ +éĺ» åĬĽ +ä¸į åħģ许 +æıIJ è®® +åĩı æĮģ +åıªæĺ¯ ä¸Ģ个 +æĪij æĬĬ +åıijçݰ èĩªå·± +å¢ŀ å¹ħ +å¦ į +èĹĿ è¡ĵ +ä¸Ģå®¶ 人 +åĪĨ 级 +çļĦ æķ°éĩı +è½® èŀįèµĦ +çŃī åĽłç´ł +大 夫 +èģĺ 请 +é£İ æľº +绽 æĶ¾ +ä»»ä½ķ ä¸Ģ个 +éł Ĥ +éĺ¶ çº§ +æĬĬ 她 +è¿Ľ åĨĽ +èĥ½ åģļåΰ +åŁ¹è®Ń æľºæŀĦ +çī© æĸĻ +ç«¥ è¯Ŀ +æĮĩ导 æĦıè§ģ +éĺ ® +æ·±åħ¥ æİ¨è¿Ľ +主 æľº +æ¸Ķ ä¸ļ +ä¸į æľį +æµĵ éĥģ +è¡Ĺ ä¸Ĭ +ä¾Ŀ 次 +æĹ¶ 段 +æ¢ µ +çļĦ åĸľçα +å¾Ī éķ¿ +åĪĿ 级 +æŀľ æĸŃ +æĬ¢ æķij +é¼ĵ èĪŀ +ä¾Ľ éľĢ +æ·±åħ¥ å¼Ģå±ķ +产ä¸ļ éĽĨ群 +åĻª éŁ³ +åIJ¬ çĿĢ +æ·±åĪ» çļĦ +å¿į åıĹ +ç͵ ç£ģ +强 èĢħ +æ»ĭ åij³ +æĽ¼ èģĶ +åı¯ä»¥ 缴æİ¥ +大 ç±³ +æŃ· åı² +æĶ¿åĬ¡ æľįåĬ¡ +åħ¬ å¼ı +社 群 +éģĵ士 èģĮä¸ļ +ä¹ĭ æĥħ +æµ· æ°´ +æ¼Ķ å¥ı +åºĹ éĩĮ +迹 象 +åıijå±ķ çIJĨ念 +é«ĺ 空 +åij¨ åĪĬ +åĽŀ åΰäºĨ +ä¸į éĢĤåIJĪ +åłµ å¡ŀ +åĬ Ī +æ°´ ä¸Ĭ +çĢij å¸ĥ +纳ç¨İ 人 +çĩĥ æ²¹ +å·¥ç¨ĭ é¡¹çĽ® +峡 è°· +æľī éĴĪ对æĢ§ +åľĨ å½¢ +æľ¬ å¸Ĥ +è¿Ļ è¯Ŀ +管çIJĨ èĢħ +ç¡®è¯Ĭ çĹħä¾ĭ +æĬĬ æīĭ +彩 èī² +ä¸Ĭ åīį +夯 å®ŀ +ç¾Ĭ èĤī +å¾Ģ å¹´ +æĵħ èĩª +è¿· 人 +èĪª æ¯į +ç²¾ ç»Ĩ +åľ¨ æĪijçļĦ +åĪĽ æĬķ +麦 åħĭ +æľĪ ç»ı +åĮĹ æµ· +ä¹ĭ æĺŁ +åı¶ åŃIJ +å¸Ĥåľº ç«ŀäºī +è¿Ļ äºĭ +åıĥ èĪĩ +产 åľ° +åĶ ī +åķĨåĵģ æĪ¿ +èĪª è¿IJ +ä¼ĺ å¼Ĥ +ä»ĸ们 æĺ¯ +鼨 æ°´ +è¯į æ±ĩ +åĨľ çͰ +欧 éĺ³ +çŁŃ 线 +管 ç½ij +æł¹ åŁº +åıªæľī ä¸Ģ个 +éŀĭ åŃIJ +å¸Ĥ å§Ķ书记 +åĪ» æĦı +è¡Į 车 +åıĪ è¢« +åı¯éĿł æĢ§ +è´ ± +ä»» åij½ +åºĶ åľ¨ +å°± å¾Ĺ +æľįåĬ¡ ä½ĵç³» +æĶ¿ æĿĥ +åıijè¨Ģ 人 +è¿ĩ å¾Ģ +两 åıª +èϽ 说 +éĢģ ä¸Ĭ +ä»Ģä¹Ī äºĭ +æķ£ æĸĩ +æİĮ æİ§ +èĸĦ å¼± +ä¸ĭéĿ¢ å°± +主è¦ģ åĨħ容 +å¾Ī éĩįè¦ģçļĦ +å°± 说 +çϽèī² çļĦ +éĤ£ä¸ª æĹ¶åĢĻ +ç»ı纪 人 +çļĦ æ¯į亲 +ç¬Ķè®° æľ¬ +åºķ å±Ĥ +è¿ij 代 +è§£ 说 +è²ł 責 +æľĢ大 åĮĸ +åķĨ éĵº +æł¡ åıĭ +æ² ģ +ä¸į åĩºæĿ¥ +éĻ· éĺ± +ç¨ ħ +åħ¬å¸ĥ äºĨ +åĩĢ å̼ +çĽ¸å¯¹ è¾ĥ +ç¬ Ľ +æł¸ ç®Ĺ +åįİ ä¾¨ +æĢ¥ æķij +æĮº 好 +åħĴ ç«¥ +äºĮ èĥİ +åĩº èĩª +åĿ Ł +æīĭ ä¸ĭ +å± ¡ +åĪĽéĢł æĢ§ +ä¸¥æł¼ æĮīçħ§ +åĨį åİ» +举 缣 +人 æµģ +äºĨä¸Ģ 声 +å°ıæĹ¶ åīį +è´µ æĹı +éľ ĸ +ä¹Łæĺ¯ éĿŀ常 +éĢ ± +çľĭäºĨ çľĭ +ç¹ģ æ®ĸ +èĩ³ æŃ¤ +é¢Ħ å¤ĩ +å¾Ī æĺİæĺ¾ +æ¼Ķ èīº +åĿIJ çĿĢ +ä¿Ħ åĨĽ +åľ¨ è¿ĩåİ» +ä¹ĭ äºĭ +æĬĵ èİ· +åĿIJ ä¸ĭ +çͱ ä¸ŃåĽ½ +ä¹Ł å¼Ģå§ĭ +çŃĶ å¤į +åŀĥåľ¾ åĪĨç±» +éĴĵ é±¼ +åIJĦ 種 +缸 éģĩ +ä¸įåģľ çļĦ +æī¹ éĩı +éĩįè¦ģ ä½ľç͍ +å§Ķ å±Ī +åħŃ å¹´ +ä¸ĥ åįģ +ä¹ĭ æĪĺ +é£İéĻ© 管çIJĨ +éŁ³ æ¨Ĥ +è¡ĮæĶ¿ å¤Ħç½ļ +æľ¬ äºĭ +æĴ° åĨĻ +èģļ åIJĪ +éĢĤ æĹ¶ +æIJ¬ å®¶ +ç¢İ çīĩ +缼 å®´ +ç®Ģ æ´ģ +åı¬ éĽĨ +ç®Ģ åĮĸ +åĮĹ京 æĹ¶éĹ´ +第ä¸ī å±Ĭ +æĿ¥ åĽŀ +常ç͍ çļĦ +京 æ´¥ +京津 åĨĢ +梦 å¹» +è¯ķ è¡Į +æľº åºĬ +åΰ æľĢåIJİ +åĬ© æīĭ +åĪĨ 彩 +åĩº åĵģ +åι 车 +åIJ¯ åıij +ä¾§ éĿ¢ +æ¯ı å½ĵ +缸åħ³ è§Ħå®ļ +ä¸ĸ 人 +è´Ń 车 +å¿ĥ 缮 +å¿ĥ缮 ä¸Ń +äºĶ éĩij +è¿ĺ è®°å¾Ĺ +ä¾Ŀ çĦ¶æĺ¯ +æıIJ æ¡Ī +ç͵åķĨ å¹³åı° +åģļ åΰäºĨ +æĿľ ç»Ŀ +å®ī åįĵ +ä¸ĸçķĮ åIJĦåľ° +åīį éĢĶ +æ´Ĺ åĩĢ +å¥ĭ åĬĽ +åŁİå¸Ĥ 建设 +å¤ļ åĬŁèĥ½ +ä¼ļ éĢłæĪIJ +åıijå¸ĥ ä¼ļä¸Ĭ +ç©¶ 竣æĺ¯ +åĪĨ 红 +çŁ¥ èŃĺ +éĿ¢ æĿ¿ +æĹł 声 +æĢ¥ éľĢ +失 çľł +çΏ å¦Ī +äº Ĥ +åħ¨ æĻ¯ +ç»ıåħ¸ çļĦ +åī§ ä¸Ń +é¢Ĩ导 ä¸ĭ +åħļ åĨħ +åħ¥ ä¾µ +æĭī æĸ¯ +ä¸Ģ å¹ķ +åĬł ä¹ĭ +èĤ Ĩ +èĭ± æł¼ +èĭ±æł¼ åħ° +å·§ åħĭ +å·§åħĭ åĬĽ +ä¸Ģ å¿ĥ +èģ Ĥ +å¾Ģå¾Ģ æĺ¯ +管çIJĨ å±Ĥ +çĻ» åħ¥ +建ç«ĭ èµ· +建 åĽ½ +åŃIJ 宫 +åºĶ ä»ĺ +æİ¢ ç©¶ +第ä¸Ģ ä½į +ä½Ļ å®¶ +çŃī æ´»åĬ¨ +æīĢ èĩ´ +è¾ĥ å¿« +æĺ¯ éĿŀ +æıIJ åIJį +äºĮ èĢħ +åıªåī© ä¸ĭ +åħ¶ä¸Ń åĮħæĭ¬ +ç¼ĸ ç¨ĭ +çł´ ç¢İ +ä¸Ń 举 +å·¥ä½ľ æĬ¥åijĬ +çѾ åIJį +éħĴ ä¸ļ +çŁ¥ æĻĵ +çĥŃ å¿ĥ +éĿŀ åĩ¡ +èIJ¥ä¸ļ æī§ +èIJ¥ä¸ļæī§ çħ§ +人大 代表 +ä¸Ģ个 æĸ°çļĦ +å¨ģ æµ· +éĤ£ 人 +涨 ä»· +æ¶Ī çģŃ +éļ¾ å¿ĺ +ç¶ĵ é©Ĺ +åı£ è¢ĭ +ç³» æķ° +æĸĩ ä¸Ń +好 转 +æĸ° 鼶åĶ® +讲述 äºĨ +å¼Ģ çĽĺ +çķĻ ç»Ļ +æħ¢æħ¢ çļĦ +æĤ² 伤 +æľ¬ æľŁ +äºĨ å¤ļå°ij +è¿Ļ 让 +åIJĮ çŃī +æ¸ħ æĺİ +个 åŁİå¸Ĥ +æºĸ åĤĻ +åĩłä¹İ æĺ¯ +强 åĬĽ +ä¿ ¯ +æ°´ 稻 +åĽºå®ļ çļĦ +æł¸ åĩĨ +说 æľį +顯 示 +è¿Ļ å¥Ĺ +æĻºæħ§ åŁİå¸Ĥ +å±ĭ é¡¶ +ä¸į æĿ¥ +çĶŁ é²ľ +çŁ¥ æĥħ +æĬķ 身 +åijĬè¯ī æĪij们 +ä¸ī åĽĽ +ä¸ĩ ä¸Ģ +è¾Ĩ 车 +为 ä¹ĭ +åΰ æĹ¶åĢĻ +è¿Ļ æīįæĺ¯ +åIJį çīĮ +åºŁ æ°´ +åݻ年 åIJĮæľŁ +å¹´ éĻIJ +éģĭ åĭķ +åıĮ çľ¼ +è¦ģ ç´§ +对 çŃĸ +åľº é¦Ĩ +çϾ ç§ij +è¶Ĭ éĩİ +å¯Į åIJ« +大å¤ļæķ° 人 +æľĢ å°ij +åı¬ åͤ +åħ¸ èĮĥ +åĨľ æľº +æŃ£ æĸĩ +åºĶç͍ äºİ +æ·± èĢķ +ä¿ Ń +ä»Ģä¹Ī ä¸ľè¥¿ +å¥Ĺ é¤IJ +å½ĵ éĢī +å·¦ æīĭ +è°ĥ çIJĨ +æĻļ é¤IJ +éļ¾ åħ³ +åĩŃ è¯ģ +çα 人 +æĮĩ è´£ +è´£ ç¼ĸ +çļĦä¸Ģ 款 +éĵ ² +åįģ 个 +èĢ » +æľįåĬ¡ åķĨ +åľ° çĭ± +è¿ŀ å¿Ļ +åĽ° æĥij +çļ ĵ +ä¸į åIJĥ +çİ°åľ¨ å·²ç»ı +çĽĺ çĤ¹ +ä¸įåģľ åľ° +管çIJĨ 模å¼ı +è¿Ļ 段æĹ¶éĹ´ +æ¤ ° +礼 åĮħ +æµģ 转 +æī« çłģ +éĽĨä¸Ń åľ¨ +æ±Ĥ åĬ© +åįĬ 个 +å¿«éĢŁ å¢ŀéķ¿ +å¾Ģ ä¸ĭ +è¯Ħ åĪĨ +å°± æĥ³ +åķĨåĬ¡ éĥ¨ +æľī éĹ®é¢ĺ +èİ· åĪ© +æ¯Ľ çĹħ +æĦŁ åºĶ +èī¯ æĢ§ +åĪĨ æŃ§ +åĨ ī +æĪij们 çİ°åľ¨ +è¦ģ åĬłå¼º +å·§ å¦Ļ +èŀº æĹĭ +åĪĩ æį¢ +çĭ Ħ +顺 çķħ +å°¤åħ¶ æĺ¯åľ¨ +èĬĿ 麻 +éļ¾ è¿ĩ +æĹĹ å¸ľ +å¤į åį° +å¤įåį° ä»¶ +å¿ħ éľĢ +对å¤ĸ å¼ĢæĶ¾ +éļ¾ åıĹ +åİŁæĿ¥ æĺ¯ +ç®Ĺ äºĨ +é«ĺ å±± +离 èģĮ +çµĦ ç¹ +çµĦç¹ Ķ +å±ģ èĤ¡ +çϾ å®¶ +éģĩ ä¸Ĭ +æĺĶ æĹ¥ +ä¸į 容 +çĽij管 éĥ¨éŨ +主 æĦı +æµģ åŁŁ +è·Į å¹ħ +èĩ³ ä¸Ĭ +åĪ« 说 +æĺ¯ æ¯Ķè¾ĥ +å®ıè§Ĥ ç»ıæµİ +å¸Ĥåľº 主ä½ĵ +污æŁĵ çī© +æķij æ²» +丰 æĶ¶ +åŃĺ æĶ¾ +åĩ Ħ +éĩij å±± +æį¢ äºĨ +ä¸ĵ 人 +éĹľ æĸ¼ +æĹ¢ è¦ģ +åĽ½ è¶³ +éļ ĭ +åıį åĩ» +èµ· 身 +åħĪ æĺ¯ +å¸ĮæľĽ èĥ½å¤Ł +åζ 订 +åºĹ éĿ¢ +åĸ Ģ +æķĻ ä½ł +éĻį æ¸© +åĬĽ æ±Ĥ +ä¸ī çϾ +çī© ä»· +丢 失 +å¢Ļ ä¸Ĭ +éĥ¨ 份 +æł· æĿ¿ +ä¹ĭ æĦı +ç½ij å°ıç¼ĸ +ä¸ĸ ä¸Ĭ +è°ĥ è¯ķ +污æŁĵ éĺ²æ²» +å½± éĻ¢ +å®Įåħ¨ åı¯ä»¥ +éĢļ åħ³ +ä¹īåĬ¡ æķĻèĤ² +没æľī åĬŀæ³ķ +èĢ ¿ +å¦ ³ +æĹł æĥħ +å¾Ĺ çĽĬ +å¾ĹçĽĬ äºİ +æľŁ çĽ¼ +娱ä¹IJ åľº +çͲ æĸ¹ +ä¸Ģ æ±½ +çĹ ° +çĸij ä¼¼ +æĸ°æµª å¾®åįļ +强 è¡Į +å½ĵ ä»ĸ +èĥ º +ç͍æĪ· æıIJä¾Ľ +åĮº å§Ķ +æĦ¿ æĻ¯ +æĬĺ æī£ +失 踪 +è¿« åĪĩ +åŃĹ æ¯į +åĴ ¯ +èªį èŃĺ +ä»Ģä¹Ī æĦıæĢĿ +çĽĴ åŃIJ +å½ķ éŁ³ +建设 å·¥ç¨ĭ +ä¸ļ ä½Ļ +å®ŀè·µ æ´»åĬ¨ +羣 空 +çĤ ĸ +åľ¨ è·¯ä¸Ĭ +主è¦ģ åĮħæĭ¬ +该 æĢİä¹Ī +æĢ» æľī +æĢ§ æĦŁ +æ°ij èĪª +å¼Ģ åºĹ +欺 éªĹ +çªģ åĩ» +缺 失 +æī§ ä¸ļ +åľ° éģĵ +å¹¶ æĹł +æ°ij åĬŀ +ç»Ħç»ĩ çĶŁæ´» +æĪij å¦Ī +è¨ĺ èĢħ +管 åζ +æī¾ 个 +èĹ » +çĤİ çĹĩ +äºĴ åĬ© +æµıè§Ī åύ +çݩ家 æĿ¥è¯´ +éĻįä½İ äºĨ +è£ Ķ +æĮ£ éĴ± +åķĨ æľº +æĶ¹ è£ħ +æµģ 浪 +æĶ¿ æ³ķ +èĢģ 头 +çĶŁäº§ åĴĮ +ç© Ĺ +亲 çα +亲çα çļĦ +å±¥ èģĮ +åŁİ éĩĮ +ç»Ĩ åĪĨ +åĬ³åĬ¨ åIJĪåIJĮ +åľ¨ æĹ¥æľ¬ +å¨ģ å°Ķ +åį« è§Ĩ +éĢ£ çµIJ +çĿĢ éĩį +æĬĺ 磨 +åĽ¾ 为 +çľ · +å·¥ åºı +æĵ ģ +æĵģ æľī +ç½ijç«Ļ åľ°åĽ¾ +çļĦä¸Ģ 大 +ç»Ħç»ĩ å®ŀæĸ½ +æĬĽ å¼ĥ +åĴĮ æĶ¯æĮģ +æ³ķ åĪĻ +浪 æ½® +çݰ æľīçļĦ +åĩł çİĩ +为 客æĪ· +åįģ ä¸ĩ +è ¹Ħ +çªģåĩº éĹ®é¢ĺ +åıĥ åĬł +éĥ½ä¼ļ æľī +çĽ ¤ +è°ģ éĥ½ +æīĭ åĬ¨ +缴 è¾¾ +çĤ¹ å¤ļ +éĺ¶ å±Ĥ +ä¸į ä½³ +éĤ£ 段 +滨 æµ· +æĺ¯ åĽ½åĨħ +æĪij å¸ĮæľĽ +åIJĽ åŃIJ +è§Ĥ éŁ³ +åģļ é¥Ń +æ±½ è»Ĭ +åħ³ ç¨İ +çľ¼åīį çļĦ +æ°´ éĿ¢ +è̳ æľº +追 踪 +æİ¨ éĢģ +éĴ± åĮħ +æģ¶ å¿ĥ +æµ· åŁŁ +å· į +å¼Ģ æĿ¥ +表 æĢģ +仪 表 +å¹³ åİŁ +åįģ å¤ļå¹´ +ä¹Ł æĹłæ³ķ +åħ¼ 顾 +è¡£ æŁľ +æł½ åŁ¹ +æĪ¿ æºIJ +设ç«ĭ äºĨ +ä¸ĩ åIJį +æķ° é¢Ŀ +è¦ģ åĿļæĮģ +åIJīæŀĹ çľģ +请 èģĶç³» +ç»ıåİĨ è¿ĩ +çļĦ æľ¬è´¨ +åħ¥ éŨ +æľ¬ æ¡Ī +çİĩ è¾¾åΰ +åı° éĺ¶ +éĴ ŀ +æĪij èĥ½ +èݲ èĬ± +éĴ ł +ä¸Ģ äºĭ +åİŁ æľīçļĦ +æ¯ı åĢĭ +æ¯Ķäºļ 迪 +æ£ĭçīĮ 游æĪı +ä¸įä¼ļ æľī +å½Ĵ æĿ¥ +äºĶ çϾ +è¿ĩ é«ĺ +鼷 è¾¾ +ä¸Ģèµ· åİ» +æķĻ å¯¼ +å°± è¯Ĭ +å°± å¾Ī +ä¸įåIJĮ äºİ +ä¿ º +å¸ĸ åŃIJ +æĶ¿åįı å§Ķåijĺ +çĸ«æĥħ å½±åĵį +åĪĨ è£Ĥ +为ä»Ģä¹Ī ä¼ļ +äºĶ æĺŁ +å°ij åĦ¿ +æĬ¢ éĻ© +梦 è§ģ +è®°èĢħ éĩĩ访 +å±± è·¯ +æĪij 个人 +æ²Ļ 滩 +è¹ Ń +æĶ¹ è®Ĭ +æĸ°åŀĭ åĨł +æĸ°åŀĭåĨł çĬ¶ +åĮ» æĬ¤ +åĮ»æĬ¤ 人åijĺ +æµ· å°Ķ +åħ³äºİ æĪij们 +éϤ å¤ĸ +åº ļ +宣 åijĬ +ä¸ī åįĥ +æ¦ ¨ +ç§ijæĬĢ å¤§åѦ +ä¸ĥ åħ« +顺 åºĶ +çΏçΏ å¦Īå¦Ī +éĢī åıĸ +åī§ çĥĪ +乡æĿij æĹħ游 +积æŀģ æİ¢ç´¢ +表çݰ 为 +å¾Ī æ¸ħæ¥ļ +大 åĨĽ +æĿ¥ ç͵ +å¥Ĺ æĪ¿ +çݰ è¡Į +享 åıĹåΰ +çľĭ çĤ¹ +åĽºå®ļ èµĦ产 +以 人为 +以人为 æľ¬ +ä¸į å®Į +éĻį 鼨 +åģļçļĦ äºĭæĥħ +å¹¶ äºİ +顽 强 +èĢ ¸ +åĺ´ å·´ +缸åħ³ ä¿¡æģ¯ +æĪij 没 +æĪĺçķ¥ æĢ§ +æĢĿ 念 +åĪĺ å¤ĩ +åĬ© æĶ» +é£İ è²Į +éĿ¢å¯¹ éĿ¢ +积æŀģ å¼Ģå±ķ +çĸĹ æķĪ +çľĭ 书 +缺 åı£ +åĽ½æ°ij ç»ıæµİ +使ç͍ æĿĥ +éģ¥ è¿ľ +å¡« è¡¥ +第ä¸ī 人 +åįĬ å¤ľ +æŃ¦æ±ī å¸Ĥ +æĪij åıijçݰ +ä¼ĺæĥł æĶ¿çŃĸ +é£İ åı£ +å°± ä¸įèĥ½ +为 主è¦ģ +æµģ åĩº +å´ĩ æĭľ +å¹¶ ä¸įèĥ½ +é«ĺ ä¸ī +ä¸ĸçķĮä¸Ĭ æľĢ +æĥ³ å¿ħ +åħ¶ æīĢ +åĢĻ éĢī +åĢĻéĢī 人 +ä¸į çα +åī¯ ä½ľç͍ +人æ°ij æĹ¥æĬ¥ +æĪij ä¸įæĺ¯ +å®ŀ çī© +ç͵ åİĤ +ä¹Ł ç®Ĺæĺ¯ +æľī éĹľ +æľī èĥ½åĬĽ +æĮĤ åľ¨ +çľ¼ ä¸ĭ +约 ç¿° +å°ı åѦçĶŁ +èµ· åΰäºĨ +å·¥ 夫 +åIJĮ å¿ĥ +åĿ¦ è¨Ģ +çł Į +åıijæĮ¥ äºĨ +èģĮä¸ļ éģĵå¾· +è¿ĻäºĽ å¹´ +念 头 +èĢģ é¼ł +åħ¨ èµĦ +åħ¨èµĦ åŃIJ +ä¸Ģ åij³ +å¤ļ ä¸ĩåħĥ +æł¼ æľĥ +éķ¿ éĢĶ +带 èµ° +èĭ± 寸 +æĸĩ ä½ĵ +对 ä»ĸ们 +åĵŃ äºĨ +å¡« æĬ¥ +çīĪæĿĥ 声æĺİ +ç͵ 线 +è´Ńçī© ä¸Ńå¿ĥ +饱 满 +ä½İ 头 +强 è¿« +ä¿Ŀ æ´ģ +欧 åĨł +缸 è¿ŀ +认 è´Ń +çģ« æĺŁ +é«ĺ å°Ķ +é«ĺå°Ķ 夫 +èij« èĬ¦ +æłĩ 注 +çļĦ çIJĨæĥ³ +æł¸ éħ¸ +æł¸éħ¸ æ£Ģæµĭ +åĬ ī +ä¸Ģèά æĺ¯ +æĢĿ ç´¢ +轨 迹 +çĥŃ å¸¦ +éĻ £ +åĩĨç¡® æĢ§ +æĪ´ çĿĢ +åľ¨ çĶŁæ´»ä¸Ń +æīĢ èĥ½ +æľ¯ åIJİ +带 ä½ł +ç¥ ł +æ®ĭ éħ· +ä¹Ł åıªæĺ¯ +çͳ è´Ń +举åĬŀ äºĨ +æľī æĦıä¹ī +æĹº 缼 +åľ¨ ç¶² +åľ¨ç¶² è·¯ä¸Ĭ +å¾Ī大 ç¨ĭ度 +管 è¾ĸ +çĸ«æĥħ æľŁéĹ´ +触 æij¸ +éĺ¶æ®µ æĢ§ +ä¼ļ è§īå¾Ĺ +çļĦ çĶ»éĿ¢ +æİ¥åıĹ äºĨ +表达 äºĨ +éĤĵ å°ı +éĤĵå°ı å¹³ +åħļ é£İ +åħļé£İ å»īæĶ¿ +åķĨ åѦéĻ¢ +åħij æį¢ +é£Łåĵģ èį¯åĵģ +éĿŀ常 好çļĦ +çľ ¯ +纳 ç±³ +åĬ¨ æijĩ +åĽŀ éģ¿ +çľĭ èijĹ +款 项 +åħ« å¹´ +åģļ 个 +æĸĩ æ¡£ +éĩijèŀį ç§ijæĬĢ +åħ¶ä¸Ń æľī +äºĨä¸Ģ ç³»åĪĹ +æĹĹèΰ åºĹ +ç§° èµŀ +éĽ¢ éĸĭ +åζ åĨ· +å®¶ éŨåı£ +åįģ å¤ļ +ä¼´ ä¾£ +çľĭ çĹħ +æĭī çĿĢ +æī Ĵ +çĸ² æĥ« +å°ijæķ° æ°ijæĹı +åĽ¾ å½¢ +è½ § +å¢ŀ éĩı +饲 åħ» +çģ« å±± +æ¯ı 个æľĪ +ä½ľä¸º ä¸ĢåIJį +è½´ æī¿ +æĸĩ 书 +ç¼ ķ +åħ·ä½ĵ æĥħåĨµ +çĹĽ çĤ¹ +缴 éĶĢ +å¡ Ĭ +ä¹Ł æľĥ +çĥŃ æ½® +å¹³ æ°ij +æ¼Ķåͱ ä¼ļ +æķĻ çłĶ +éĢĥ éģ¿ +ä¸Ģ è´¯ +å°± è¶Ĭ +å®ŀ å®ŀåľ¨ +å®ŀå®ŀåľ¨ åľ¨ +ä¹łè¿ijå¹³ æĢ» +æº º +å¿ĥ åºķ +éķ¿ å¾ģ +媽 媽 +第ä¸ī 次 +åĩº æ¼Ķ +çĭĢ æ³ģ +å°Ķ æĸ¯ +代çIJĨ åķĨ +çĨ ı +çļĦ 对象 +ç͵ éĩı +è¡Į åĪĹ +åĽ½ 人 +è·ij äºĨ +åįĶ åĬ© +èIJ¥ è¿IJ +å¸Ī åħĦ +æ¦ ® +æĥ³ åĥı +æĢ§ 强 +ç§ijåѦ çłĶç©¶ +å»¶ å®ī +ä¸¥æł¼ èIJ½å®ŀ +é¢Ĩ ä¼ļ +缸 å·® +è·¯ 人 +çĶ « +æľī ä»·å̼ +æľīä»·å̼ çļĦ +ç¾İ åĽ¢ +æ°ij主 çĶŁæ´» +æĪij æīį +ç¾İåĽ½ 人 +æ°Ķ åij³ +åıį å°Ħ +çļĦ åĨ³å¿ĥ +大 è±Ĩ +交 代 +è¿Ľ åĩº +åıį æĬĹ +æĮĩ çļĦæĺ¯ +ä»· ä½į +è¿Ľ é©» +ä¸Ĭ çϾ +ä½į åĪĹ +ä¸ŃåĽ½ ä¼ģä¸ļ +çļĦ好 å¤Ħ +主 ç¼ĸ +æ±½ æ²¹ +ä½Ĩ æĪij们 +æĢİä¹Ī çľĭ +é»Ħ å±± +å¤ļ åªĴä½ĵ +åIJİ åį« +èİ·å¾Ĺ æĽ´å¤ļ +åĬ¡ å¿ħ +为 å¥ijæľº +é¦ĸ 饰 +ä¸ĩ åįļ +è¶ĬæĿ¥è¶Ĭ 大 +ä¸ĵ项 è¡ĮåĬ¨ +å¥ĭ è¿Ľ +ä»į çĦ¶æĺ¯ +è´¨ æĦŁ +å¦Ĥæŀľ ä¸įæĺ¯ +ç«Ļ èµ·æĿ¥ +ä¹¾ éļĨ +åı¯æĢķ çļĦ +å¯Į è´µ +æ¸ħ ç®Ĺ +åIJij ä¸ĭ +åĢ ļ +çļĦ çŃĶæ¡Ī +èι ä¸Ĭ +çļĦ羣å®ŀ æĢ§ +çŃī åĬŁèĥ½ +åĸľ åī§ +å¨ģ åĬĽ +æĸ° é¢ĸ +æł¸ ç͵ +æĬ¥ éĶĢ +æķħ 乡 +ä¼´ éļı +éŀ Ń +å¦Ĭ å¨ł +åĪĨ åĮĸ +æľī å¾Ī大 +æĢİä¹Ī 说 +æĻĤ 代 +产 åĩº +ä»ĭç»į 说 +å¤ĦçIJĨ åύ +èĨ¨ èĥĢ +åī¯ å¸Ĥéķ¿ +çļĦ 妻åŃIJ +æł· åĵģ +åIJĮæ¯Ķ ä¸ĭéĻį +åħĥ å·¦åı³ +ç͍ èĩªå·±çļĦ +é«ĺ éĽĦ +æĺ¥ æĻļ +ä¹Ł æľīå¾Īå¤ļ +çľ¼ çIJĥ +æķ£ æŃ¥ +ä»ĸ们 éĥ½ +第ä¸Ģ å®¶ +åĬŀ 好 +å®ī éĺ² +ä¸Ģ ä¸ĩ +åľ¨ éĩĮéĿ¢ +éŁ³ é¢ij +åı£ åı· +ä¸Ģ è¶Ł +ç¦ı çī¹ +é³ ŀ +æĥĬ èī³ +æĸ° å¨ĺ +绿èī² åıijå±ķ +ä¸Ń å¼ı +ä¹Ł åıªæľī +çݰ 身 +åı¯ ä¾Ľ +æ¯ı ä¸Ģ个人 +第ä¸ī èĢħ +åľ° å½¢ +éĴ¢ ç»ĵæŀĦ +çĽijçĿ£ æ£ĢæŁ¥ +åı« æĪij +èĩ´ æķ¬ +æ´Ĺ æīĭ +ä¸ĭ è°ĥ +康 çĨĻ +æĪIJ交 éĩı +ä¹Ł æĪIJ为 +åħī æ»ij +å®Įæķ´ æĢ§ +çģ ¼ +ç¶² éłģ +éķ¿ å¯¿ +éģ© ç͍ +çļĦä¸Ģ 项 +çŀ© 缮 +æĬĬ èĩªå·±çļĦ +éĵ¶è¡Į åį¡ +å°± å¿ħé¡» +ç¾İ çϽ +éŀį å±± +æľ¬ é¢Ĩ +ä¸Ģ ç¢Ĺ +æīĵ æ³ķ +æĤ¨ 好 +对 åŃ©åŃIJ +æĬ¥éģĵ ç§° +ä¼ł åĩº +大 èĩ£ +ç¬ ĭ +çĽ ı +é¾ ļ +缴 线 +æĻº åºĵ +ç§Ł 车 +é£İ åij³ +çľĭ ä¸Ģä¸ĭ +æİ¨ éĶĢ +éĥ¨ éĥ¨éķ¿ +è´¨éĩı åĴĮ +åĪĬ çĻ» +å·¥ä¸ļ åĮĸ +çİĩ 为 +鼶 ä»¶ +硬 åĮĸ +ä¸Ĭ åįĥ +ç»ıéªĮ å̼ +å¹³ è¡Į +声 éģĵ +æľįåĬ¡ è´¨éĩı +çĶŁ çĶ¢ +æľĢ 容æĺĵ +ä¸Ģ æŀļ +å¹´ æĬ¥ +åħ¬ ç½ij +åħ¬ç½ij å®ī +åħ¬ç½ijå®ī å¤ĩ +çļĦ èĥ½éĩı +å®ŀéĻħ è¡ĮåĬ¨ +è¦ģ ä¸įè¦ģ +æĹ¥æľ¬ 人 +è̶ 稣 +ç¼ĸ åī§ +æ¶ © +åį° å°¼ +ä¸Ĭä¸ĭ 游 +åĩł åı¥ +ä¸Ń éĵģ +ç°¡ åĸ® +èĩª 带 +çĶŁ äºİ +ä¸Ģ åı£æ°Ķ +åĭ¤ å¥ĭ +éĻį ä»· +å±ķçݰ äºĨ +å¸ĥ æĭī +ä¼ļ éĢīæĭ© +çļĦ ç»ıåħ¸ +好 æľĭåıĭ +车 éģĵ +æķ´ åĢĭ +åľ ĵ +éķ¿æľŁ 以æĿ¥ +æĬķ å½± +çļĩ åĨł +è¿ĩ 大 +åijĬè¯ī ä»ĸ +ä¼ģä¸ļ æıIJä¾Ľ +æĬ½ 象 +éĢĤ 度 +çļĦ 女åŃ© +èµ· ä¼ı +çļĦ åĬŁæķĪ +ä¸ĵ项 æķ´æ²» +åı¯ éĢļè¿ĩ +ä¸įåIJĮ ç¨ĭ度 +å¼Ĥ è®® +åĩĢ èµĦ产 +åij Ĺ +ä»Ģä¹Ī åij¢ +å·¡ éĢ» +è¸ı ä¸Ĭ +ä½Ĩ å®ĥ +ç²¾ 度 +管 å±Ģ +第ä¸Ģ åIJį +åĨħ åŃĺ +æijĨ åľ¨ +åī© ä¸ĭ +主ä½ĵ 责任 +çĤ¹ åįĬ +以 èĩ³äºİ +åħ»èĢģ ä¿ĿéĻ© +æĦŁåıĹ åΰäºĨ +çŁ¥åIJį çļĦ +å¯Į 豪 +妥 åĸĦ +åŃĻ åŃIJ +éĵ Ĥ +说 èĩªå·± +让 æĤ¨ +æķ° æİ§ +çļĦçľ¼ åħī +注 éĶĢ +çļĦ çģµéŃĤ +è¿ĺ ä¸įéĶĻ +éĹ® ä»ĸ +èĩªä¸» çłĶåıij +èĵ ĭ +ç´« èī² +åĽ½å®¶ å®īåħ¨ +è¾½å®ģ çľģ +ä¹Ł æ¯Ķè¾ĥ +ç¾İ èĤ¡ +ä¸įç¡®å®ļ æĢ§ +å¿ĥ 头 +æĪ ³ +级 åĪ«çļĦ +论 è¿° +çļĦ åĽŀçŃĶ +ä¿Ŀè¯ģ éĩij +çŃī è¡Įä¸ļ +幸ç¦ı æĦŁ +æŃ§ è§Ĩ +æľº 票 +æ´¾ 人 +èĩ´ åij½ +åĺ´ è§Ĵ +æĸ°éĹ» ä¸Ńå¿ĥ +æĶ¾å¼ĥ äºĨ +å®ľ å±ħ +åĨĻ ä¸ĭ +éĹ® çŃĶ +è¿ĻéĩĮ æĺ¯ +å¤ļ åľ° +åĮºåŁŁ åĨħ +åīµ æĸ° +çľĭ ä»ĸ +æī§æ³ķ 人åijĺ +åĬ¨ æľº +éŁ³ åĵį +çļĦ åij½è¿IJ +é¡¶ éĥ¨ +åĵ Ł +éĥ½ æľĥ +æīĵéĢł æĪIJ +æĦı åĽ¾ +çļ ĸ +åĢĴ åħ¥ +å·´ èIJ¨ +åĬ© åѦ +å¤į åı¤ +åIJ¯ ç͍ +åĽ½éĻħ å¸Ĥåľº +åĤ¨ èĥ½ +é»ijé¾Ļæ±Ł çľģ +ä¹ĺ 车 +è¿IJåĬ¨ ä¼ļ +ä¿Ŀ åĪ© +çŁ³ æĿIJ +çµ ® +çĤĴ ä½ľ +çļĦ ä¿¡ä»» +å°± æĪIJäºĨ +åı¯ è§Ĥ +çļĩ ä¸Ĭ +è¿Ļ åĩłå¤© +ä¸Ģ éĶ® +åĨ· åĨ» +ä¿Ŀ åį« +æł¸ æ¡ĥ +åIJĪä½ľ åħ³ç³» +éĢģ åĩº +æĹĹ ä¸ĭçļĦ +åľ¨ ä¹İ +为 广大 +åįĪ é¤IJ +ä¸ĵ 访 +æĪĸ å°Ĩ +éĿĴå²Ľ å¸Ĥ +å¥Ķ è·ij +æĹ¥ æĬ¥éģĵ +å¥ij åIJĪ +æĸ° æĺ¥ +ä¸į å°ıå¿ĥ +两 ä¸ī +æĦıæĢĿ æĺ¯ +åĨ· èĹı +çļĦ çĹĩçĬ¶ +æĢ§ åij½ +è¶ħ æłĩ +å¯Ĩ 碼 +ç§ijæĬĢ èĤ¡ä»½ +äºĨä¸Ģ æī¹ +çĿ£ å¯Ł +åªĴ ä»ĭ +å°Ħ æīĭ +ä¿® åħ» +çīĩ åĪ» +éĢĤåIJĪ èĩªå·± +åıªè¦ģ æĺ¯ +åIJĥ è¿ĩ +éĩij éĵ¶ +缴 å±ŀ +åѦ éĹ® +åİĭ åζ +çªĹ å¤ĸ +æĶ¶ åΰäºĨ +åħ¨åĽ½ 人大 +ä½Ĩæĺ¯ 对äºİ +åľ¨ æķ´ä¸ª +çļĦ èĥĮåIJİ +åĩıå°ij äºĨ +åıį èħIJ +åıįèħIJ åĢ¡ +åıįèħIJåĢ¡ å»ī +æĹ · +åĪĨ æľŁ +åľ¨ æ·±åľ³ +æīĵ çĿĢ +æī« ä¸Ģ +æī«ä¸Ģ æī« +æĶ¿åºľ éĥ¨éŨ +æİ¥ è¿ŀ +å±ŀäºİ èĩªå·± +åŃIJ å¼¹ +åIJĮæł· æĺ¯ +æĢ» åħ± +车 ä¼ģ +æ¢ ĵ +åħ¬ é¡· +åıij 声 +éĴ Ľ +èµ°åĬ¿ åĽ¾ +主 èIJ¥ +åĸ Ķ +æķ°æį® åĪĨæŀIJ +ä¸į è¿ľ +æľī åIJį +æľīåIJį çļĦ +åģ¿ è¿ĺ +å¾Ī ä½İ +è®ĵ 人 +èĿ ī +é«ĺ è´µ +å°ij 许 +æ° Ł +å¹ ¢ +亲 æĥħ +è¿Ļä»¶ äºĭæĥħ +ç͍ é¤IJ +缸åħ³ æĸ°éĹ» +å°± åºĶ该 +ç»Ī çĤ¹ +æĺ¯ å¤ļå°ij +çĻ» åľº +è¯ķ 管 +è¯ķ管 å©´åĦ¿ +åģļ 大 +åģļ大 åģļ强 +çļĦ ä¾ĭåŃIJ +åħ« 个 +æĺİ æĹ¥ +çĤ ³ +èµ° åİ» +éģ º +å¢ © +ä½ĵä¼ļ åΰ +åĴ ı +ä¸ĭ è¾¾ +å¤į åıij +追 éĢIJ +æīĵ åĵį +çļĦ éļ±ç§ģæ¬Ĭ +åħ·æľī ä¸Ģå®ļ +è¿Ļä¹Ī å¤ļå¹´ +æłij æŀĹ +æľĢ éķ¿ +åIJĮ èĥŀ +åħī æ³½ +åŁŁ åIJį +æĮĩ åIJij +åıĹ害 èĢħ +æłij èĦĤ +æľīå¤ļ 大 +大 éĿ¢ç§¯ +æĹł ç¼Ŀ +æĶ¹ æŃ£ +æĽ´å¤ļ çļĦæĺ¯ +æľŁ æľ« +æŃ ¼ +ä¹ī ä¹Į +éĤ£ ä½ł +çļĦ 第ä¸Ģ个 +èĮ µ +å° § +èį « +ä¸įä»ħ åı¯ä»¥ +æ¶Į çݰ +æĢ» éĿ¢ç§¯ +æĸ°éĹ» åıijå¸ĥ +æ°ij ç͍ +å°± 读 +æīĵ è´¥ +å¤ĸ è¯Ń +æĪij们 ä¸Ģèµ· +é¢Ħ å®ļ +çĥ¹ 饪 +æľĢ 主è¦ģ +æľĢ主è¦ģ çļĦ +çīĮ çħ§ +åĽł åħ¶ +ä½İ ä¸ĭ +ä¼ļ åIJĮ +è§ģ è§£ +éĹ´ éļĶ +æķĻ ç¨ĭ +å° ī +å¸Ĥ ä¸Ńå¿ĥ +åħ³éĶ® æĺ¯ +æµ· åįĹçľģ +çī¹åĪ« æĺ¯åľ¨ +ä¸ŃåĽ½ 大éĻĨ +åħħè¶³ çļĦ +æĹ¢ èĥ½ +åĤ³ çµ± +çijľ ä¼½ +åħ¥ åĽ´ +æħ¢æħ¢ åľ° +æĬ¥ éħ¬ +æī¹ å¤į +å·¥ä¸ļ åĽŃåĮº +ä¸İ åıijå±ķ +èĥ¸ éĥ¨ +åľ¨ ç½ij绾 +åľ¨ç½ij绾 ä¸Ĭ +交 è°Ī +æĽ´ æĶ¹ +åįłæľī çİĩ +ä¸Ŀ绸 ä¹ĭè·¯ +è¡ Ľ +çłĶ åΤ +åĪ ª +åĪª éϤ +è¿Ļ åıª +çļĦ æ°Ķæģ¯ +åĬł å·ŀ +éĴ § +çIJĨäºĭ éķ¿ +ä¸ĸ å®¶ +æµģè¡Į çļĦ +å¾Ī æľīåı¯èĥ½ +们 éĥ½ +ç»ıèIJ¥ 模å¼ı +è¡Įä¸ļ ä¸Ń +éĢļçŁ¥ 书 +åij½ é¢ĺ +æľ¬ ç¶²ç«Ļ +æ²Ļ çī¹ +åıij åħī +é«ĺ ä»· +å·² çĦ¶ +åıĮ åįģä¸Ģ +ä¸Ĭ è¯ī +ç¿ħ èĨĢ +è¿Ļä¸Ģ å¹´ +大ä¼ļ ä¸Ĭ +éĩ ī +å®Įåħ¨ æĺ¯ +å¾Ĺ 太 +ä¸Ģèά 人 +è¿ĺ ç®Ĺ +æĬĺ åıł +æĬķ æľº +çĤ¹ çĩĥ +çݰéĩij æµģ +åħĶ åŃIJ +ç½ij æł¼ +æİ¥ è¿ĩ +ä¾Ľ è´§ +éĺ´ å½± +åİŁ åħĪ +æį £ +å·¦ ä¾§ +åħĭ æĭī +æīĵ åį¡ +ç§ij æ¯Ķ +æ±ĩ éĽĨ +åľ°çIJĨ ä½įç½® +è¯Ħ å§Ķ +ç»ĵåIJĪ èµ·æĿ¥ +è¿Ľåħ¥ åΰ +åı¯ è¡Į +åı¯è¡Į æĢ§ +让 å®ĥ +åĪ¶åº¦ æĶ¹éĿ© +çĶĺèĤĥ çľģ +åĵ Ĺ +åģı åģı +è¡£ çī© +ç¥Ŀ è´º +æºIJ èĩª +å¹¶ä¸į 代表 +åĽ½ 度 +好 åĿı +æĿ ĸ +æĿŃ å·ŀå¸Ĥ +湿 度 +é² ¸ +åįļ 彩 +æ³° å±± +æĿij èIJ½ +æĸ° èģŀ +èĤ ĭ +åı¤èĢģ çļĦ +çļĦ ç§ĺå¯Ĩ +ä¸Ģ个 éĹ®é¢ĺ +éģı åζ +åįĥ 亿 +è¿ĩ 硬 +å°Ħ åĩ» +èĩªçĦ¶ æĺ¯ +产 åĮº +çĤ¹ çĤ¹å¤´ +åı¯ä»¥ 帮åĬ© +说 å®ŀ +说å®ŀ è¯Ŀ +æĪij åıªæĺ¯ +ä¹ĭ ä½Ļ +åIJĮæĹ¶ ä¹Łæĺ¯ +ä¸ŃåĽ½ éĺŁ +建æĪIJ åIJİ +ä¹IJ è§Ĩ +åij¨ å²ģ +èᝠåºĹ +éĩij åįİ +严éĩį å½±åĵį +è´¨ åľ° +æĹħ éģĬ +åħµ åύ +æķĻèĤ² æķĻåѦ +离 åİ» +åIJĦå¼ı åIJĦæł· +ä»ĭ ç´ +ä»ĭç´ ¹ +å¼Ģ 头 +å°Ĩ èĩªå·±çļĦ +åIJ¬ åĬĽ +ä¿¡æģ¯ ç³»ç»Ł +ä»İ æł¹æľ¬ +ä»İæł¹æľ¬ ä¸Ĭ +æİĮ 声 +欢 åĸľ +å±ķ åĮº +åķ ¸ +太å¤ļ äºĨ +éĹ² ç½® +èĥ¡ èIJĿåįľ +å§Ķ å®£ä¼ł +å§Ķå®£ä¼ł éĥ¨ +åįĹ éĺ³ +å·ŀ åĮº +ä¸İ æĹ¶ +ä¸İæĹ¶ 俱 +ä¸İæĹ¶ä¿± è¿Ľ +å«Įçĸij 人 +èī¯ å¿ĥ +头 é¡¶ +è´¢ æĬ¥ +ä½Ľ æ³ķ +å¾ µ +åİŁ ä»¶ +åĭ ŀ +çĶ· 篮 +å¤ĸåĽ½ 人 +è¿Ŀ 纪 +æī¾ äºĨ +æįķ æįī +缸 è¯Ĩ +æIJľ éĽĨ +çļĦ ä¼Łå¤§ +ä¸ī ç»´ +å°±è¡Į äºĨ +çĭIJ æľĪ +çĭIJæľĪ å±± +å¸ĮæľĽ éĢļè¿ĩ +èĢĮ 对äºİ +éĿ¢ å°į +åĨĽ åĽ¢ +è¡Ĺ åĮº +æĤ¬ æĮĤ +便 ç§ĺ +æľīä¸Ģ çĤ¹ +ä¼ļè®® ä¸Ĭ +ä¸ĭ æīĭ +廣 åijĬ +äºĶ è¡Į +çŃī åĢĻ +ç´§ç´§ åĽ´ç»ķ +æĭ¿ äºĨ +æ¡Į éĿ¢ +ç¥ŀ æĥħ +éĽĦ åİļ +çŀ ³ +楼 ä¸ĭ +å½ ª +äºĭ åıij +åĨį è§ģ +é¤ ĺ +é¢Ħ åĶ® +åİ» çľĭçľĭ +æĪij们 åºĶ该 +ä¸ī å®¶ +æµ Ĭ +ä¹IJ éĺŁ +çľĭ ä¸įè§ģ +èĦij åŃIJ +æĮģ æľīçļĦ +çϽ èıľ +éĹª çĥģ +åĸĿ æ°´ +æİ§åζ ç³»ç»Ł +ä¸ĵ åĮº +æľĿ å»· +æĪij å¿ĥéĩĮ +å±ķ åİħ +èľĺ èĽĽ +åĨ» ç»ĵ +ç² ª +åº IJ +åIJij 社ä¼ļ +åĨ³çŃĸ éĥ¨ç½² +çŁŃ æľŁåĨħ +æĸ° ä¸ļæĢģ +æľ Ķ +æĹ¶ æĬ¥ +使 ä¹ĭ +åĽł åŃIJ +åıĤä¸İ èĢħ +çļĦ 年轻人 +æīĭ 表 +å°ģ éĶģ +为ä»Ģä¹Ī ä¸į +åIJ¸ çĥŁ +æ¯Ĵ ç´ł +åĪij æ³ķ +磫 æŃ£ +身 æĹģ +åİŁ è°ħ +çĽij æĬ¤ +æŃ¤ å¤Ħ +éļ¨ æĻĤ +æŀľ å®ŀ +åĮ»çĸĹ æľįåĬ¡ +ä¸į åIJĪçIJĨ +æIJŀ 好 +çļĦ èĦļæŃ¥ +å¤ĸ å¥Ĺ +ç¶ĵ éģİ +æĶ¾ ç¼ĵ +åģľ çķĻ +æĺŁ çIJĥ +çļĦä¸Ģ éĿ¢ +åĩł ä½ķ +è½® åĽŀ +æ¯Ľ å·¾ +ä¿® çIJĨ +ä¸įçŁ¥ ä¸į +ä¸įçŁ¥ä¸į è§ī +æķ´ 个人 +æ¯ģ çģŃ +åı° å·ŀ +使ç͍ 寿åij½ +é»ij çϽ +æij¸ ç´¢ +é¼ł æłĩ +éĿ© æĸ° +éº µ +ä¸ĵéŨ 为 +å¾Īå¤ļ æľĭåıĭ +å·¥ä½ľ ç»Ħ +åIJĪ å½± +çĤº ä»Ģ麼 +æŀģ 度 +çļĦ è¿ĽæŃ¥ +å½ĵ ä¹ĭ +å½ĵä¹ĭ æĹł +å½ĵä¹ĭæĹł æĦ§ +è´´ è¿ij +å°º 度 +åľ¨ çİ°åľº +éĻį 临 +åħ»èĢģ éĩij +ç£ ķ +åı¯ä»¥ 使 +管çIJĨ æ°´å¹³ +æľ¬æĬ¥ è®°èĢħ +æ³ķ 令 +åį¡ è½¦ +举 æµ· +å¤ļ éĩį +åħ¶ éĹ´ +ç´ Ļ +éĩį大 é¡¹çĽ® +æ±Ĺ æ°´ +ç»Ħ å§Ķä¼ļ +ä¿¡æģ¯ åħ¬å¼Ģ +ä¸į论 æĺ¯ +ä¸Ģ åIJ¬ +èĴ¸ æ±½ +æıŃ ç§ĺ +è¶ħ éģİ +触 åıij +å© ¦ +åħ³èģĶ äº¤æĺĵ +å°± ç»Ļ大家 +好 ä¹ħ +åĢŁ è´· +游æĪı è§Ĵèī² +å¼ĢåIJ¯ äºĨ +æİ ł +åħļçļĦ åįģä¹Ŀ +ä¸ĭ 鼨 +çŁŃ æĹ¶éĹ´åĨħ +å¯ ħ +导 åħ¥ +å·¥ä½ľ ç»ıéªĮ +ä¹Ł åıªèĥ½ +鼷 éľĨ +è·Ł è¿Ľ +åį¡ éĢļ +é¢ĩ æľī +æľº ä½ĵ +æĪĺ士 èģĮä¸ļ +女 主 +ä½ĵåζ æľºåζ +è¶³ åįı +èĪĴéĢĤ çļĦ +åĢŁ åı£ +æī¹ åΤ +æķ° å̼ +è« ¾ +éĺ¿æĭī 伯 +åĺ İ +æħ ¶ +è¾¾ 人 +å¼Ģ æ°´ +大 鼨 +温 室 +ä½İ è¿· +ä»į æĹ§ +éªĹ åŃIJ +亲 å±ŀ +çIJĨ æĻº +æľ¬ åŁºéĩij +å¨ ħ +åĨĻåŃĹ æ¥¼ +å¢Ļ å£ģ +å® µ +èϽ çĦ¶æĺ¯ +顺 çĿĢ +åħ« åᦠ+åķĨ ç͍ +ä¸į 失 +è¿· èĮ« +顺 便 +æļij æľŁ +欺 è´Ł +é¢ij é¢ij +该 æł¡ +æĸĻ çIJĨ +æ·± æĥħ +åīį éĶĭ +ä¿Ŀ èŃī +èģĮä¸ļ çĶŁæ¶¯ +åħ¬ å¼Ģåıij +åħ¬å¼Ģåıij è¡Į +åħ¥ æĪ· +éł ĵ +å̾ åIJ¬ +éŃ ģ +æĦī æĤ¦ +åĽŀ åIJĪ +åħ¨åĬĽ 以 +åħ¨åĬĽä»¥ èµ´ +åĥ¹ å̼ +èĥ½åĬĽ 强 +ç»ı å¼Ģ +ç»ıå¼Ģ åĮº +è¿ľ æĸ¹ +çļĦ éģĵçIJĨ +缴 åįĩ +缴åįĩ æľº +为主é¢ĺ çļĦ +ç»Ļ æĤ¨ +è¿ĺ æĥ³ +æ¯Ķ æĪij +åĨľ çī§ +æµ· åºķ +çŃ¾è®¢ äºĨ +对äºİ æĪij们 +æĹ¶ 许 +éĶ® çĽĺ +å®ŀéĻħ æİ§åζ +çļĦ æ¨¡æł· +åıįæĺł äºĨ +代 åĬŀ +åĮ» ç͍ +éĽĨ ç»ĵ +åıijå±ķ åīįæĻ¯ +æĮĩ çĿĢ +åįİ åĮĹ +è¿Ļ åĩłä¸ª +åIJį æ°Ķ +åĤį æĻļ +èĩª åıij +æ³¢ åħ° +大åĬĽ æİ¨è¿Ľ +èĩª ç§° +èįĨ å·ŀ +æIJį 害 +äºĨä¸Ģ åı¥ +æľĢåĪĿ çļĦ +éĩijèŀį å᱿ľº +æĢĢ å¿µ +è¡Į åĭķ +女 æİĴ +ä¸į è§£ +ä¼ł éĶĢ +转载 请 +饰 åĵģ +åıª 为 +ä¸İ ä¼Ĺ +ä¸İä¼Ĺ ä¸įåIJĮ +èĥ½ èĢĹ +èı© æıIJ +è¿ij 两年 +è¿Ķ 乡 +马ä¸Ĭ å°± +äºĮ çŃīå¥ĸ +æ°´ 管 +æ³ķ åѦ +çģŃ çģ« +大 å§IJ +åij¨ 转 +æľī æľŁ +æľīæľŁ å¾Ĵ +æľīæľŁå¾Ĵ åĪij +å°į æĸ¹ +ç¥ŀ èī² +æ²¹ èĦĤ +ä¸ī çĤ¹ +ä¸į åĪ©äºİ +äºĭä¸ļ éĥ¨ +å°± è·Ł +å¼Ģ æĶ¯ +å°ı 女åŃ© +åħ±åIJĮ åĬªåĬĽ +çĶļèĩ³ è¿ĺ +è¿Ļ åIJį +è¿Ļ ç¬Ķ +çݯ åį« +æľī ç§į +è§Ĩ åĬĽ +çĨŁ çŁ¥ +åħ¬ç§¯ éĩij +æ¶Īéĺ² å®īåħ¨ +é¢ĩ 为 +大 èħ¿ +éĿ ¶ +çī¹ æķĪ +æľįåĬ¡ åĮº +å¼Ģ åĩº +深度 èŀįåIJĪ +æĹł å¿§ +æŁ¥ éĺħ +ç»Ī ç»ĵ +ä¿Ŀ ç¨İ +è¨İ è«ĸ +å½ĵ åģļ +è·³ èĪŀ +å¯ § +女 çİĭ +è®°èĢħ åľ¨ +åħ¨ 产ä¸ļéĵ¾ +è´¯ éĢļ +åħ´ ä¸ļ +éĻį åΰ +å°ģ éĿ¢ +åħ¨éĿ¢ æİ¨è¿Ľ +奶 èĮ¶ +éĢī åĿĢ +äºĨä¸Ģ åľº +åIJĮ ä¼´ +è®® 论 +æIJ ĵ +诸 èijĽ +诸èijĽ 亮 +å¹² åĺĽ +æµģ æĦŁ +ä¸ĵä¸ļ çŁ¥è¯Ĩ +ç͵ ç«Ļ +åĩı å¼± +åĩº åħ¥ +åIJĦ çľģ +éĿŀ常 é«ĺ +åľ° 毯 +åıij æĸĩ +çĦ ī +çĥ§ çĥ¤ +å£ģ 纸 +æģ¶ åĮĸ +èĬ ¸ +èĥĸ åŃIJ +çĩ Ĵ +çľģ éĴ± +çϾ 强 +çIJĨå·¥ 大åѦ +éĴ¢ æĿIJ +åĽ½æľī èµĦ产 +æĪĺ æľº +æ³Ħ éľ² +åIJİéĿ¢ çļĦ +æ°´ èµĦæºIJ +æ¢ħ èĬ± +åĨĻ çĿĢ +ä¹ĭ 声 +æĹł åı¯ +æĺİ æľĿ +ç«ĭæĸ¹ ç±³ +ç· £ +æĶ¾ è¿ĩ +ç¦ı çͰ +å¾Ĺ ä½ı +åıĹ ä¼Ĺ +ä¸Ń 级 +çĹħ åıĺ +ä¸Ģ çŀ¬éĹ´ +æĿĥ éĩį +人æĢ§ åĮĸ +åĮ»çĸĹ åį«çĶŁ +ä¸įåΰ ä½į +æĻºèĥ½ å®¶å±ħ +饮 ç͍ +æ¼Ķ åıĺ +é«ĺ ç´łè´¨ +ä¹Ļ æĸ¹ +åģľ çķĻåľ¨ +èİ· æī¹ +ç©¿ æ¢Ń +客 åľº +æĮ½ åĽŀ +京 åŁİ +çĶŁåij½ åĬĽ +實 éļĽ +çĩ Ī +åĨį çݰ +çݰå®ŀ ä¸Ń +æľī ä¿¡å¿ĥ +çĸı éĢļ +åĺ´ åĶĩ +鼷 éĶĭ +èıľ åįķ +éħ ¯ +è¶ħ é«ĺ +å¾Ī é«ĺåħ´ +çĶŁ æ®ĸ +éĢł ä»· +误 åĮº +æĨ ĭ +好 æ¶Īæģ¯ +å´ Ń +以 èĩ´ +å¼Ģ çİ©ç¬ij +çĽij è§Ĩ +å·¡ å¯Ł +å¾· å·ŀ +æĹ© æĹ© +éĹª ç͵ +æĪª åĽ¾ +åı¯ä»¥ æł¹æį® +æīĭ èīº +æİ¥ 轨 +ç§į æĹı +æĢĢ éĩĮ +åİ» åĮ»éĻ¢ +ä¸Ģ äºĮ +å¼Ģ éĺĶ +åĩı éĢŁ +ä½Ĩ ä»İ +éĢĻ ä¸Ģ +åĩı åħį +主é¢ĺ æķĻèĤ² +å¼Ģå·¥ 建设 +è¹ ¦ +æľĪ 饼 +ä¸ĭ æ²ī +å°Ĭ 严 +éĻ ĩ +å®ŀ æľ¨ +å»ł åķĨ +声 ç§° +èĢĥ åľº +å¸ĥ é²ģ +èĩª æĿ¥ +èĩªæĿ¥ æ°´ +éĴ ¾ +å¹´ 以ä¸Ĭ +大 åıĶ +ä»ĸ å·²ç»ı +åħ¨ æĿij +èģĶç³» ç͵è¯Ŀ +为 导åIJij +åΤ å¤Ħ +对 éĺµ +缮 æ¨Ļ +åIJį é¢Ŀ +客 æ°Ķ +横 åIJij +çŃī åĨħ容 +åĩł çĤ¹ +è°Ī 论 +ä¸į ä¹ı +å±ķ çݰåĩº +è¾ĥ éķ¿ +éĢĨ 转 +å°ı æĻĤ +æĺ¯ å¤ļä¹Ī +æľ¬ æľĪ +è¿ij è§Ĩ +æĪIJç«ĭ 以æĿ¥ +代表 çĿĢ +æĬ¥ å¤į +æĪı æĽ² +è¨Ń åĤĻ +åħ¥ èĤ¡ +å¾ģ æľį +é«ĺ åĩº +èĪŀåı° ä¸Ĭ +å¿ĥ åĬ¨ +两 çĤ¹ +缸 çķ¶ +èĻ Ľ +主 页 +åĩł å®¶ +æĹł ä¸į +åįı å®ļ +æĸ IJ +å¯ĵ æĦı +åħ¨ 线 +æįķ é±¼ +åı¯ä»¥ ä»İ +æľī è¿Ļæł·çļĦ +æģ¶ éŃĶ +åĮħ åŃIJ +æģ ¤ +å¼Ģå¥ĸ ç»ĵæŀľ +ä¸į æŃ» +èĹ į +弯 æĽ² +æµ· 峡 +éĶĢ æ¯ģ +çļĦ çĭ¬çī¹ +示 æĦı +ä¸įèĥ½ åĨį +èĥ½ æĬĬ +éĺ² çº¿ +ä¸įå°ij äºİ +æ± Ģ +çļĦ éĤ£ä¸Ģ +羣 æĥħ +åŀ ® +被 æīĵ +åĽ½ å®ī +ç¾İ å¦Ļ +è¿Ļ åĩł +åĩº éģĵ +æľįåĬ¡ äºİ +æĪIJæŀľ 转åĮĸ +æīį åįİ +天 é¹ħ +åĩł 个人 +åĢĺ èĭ¥ +è̽ 误 +æĬĹ æĪĺ +è¡Į éĬ· +æĿ¥ è¢Ń +åĢŁ éĮ¢ +èįī èİĵ +ä¸¥æł¼ æī§è¡Į +举è¡Į äºĨ +å¤ĸ ç±į +å·² è¾¾ +æĿij åħļæĶ¯éĥ¨ +è¡ Ŀ +éĻį èĩ³ +æµ· éĩı +é¤IJ é¦Ĩ +æĢ¥ å¿Ļ +æ·± è¿ľ +å¾Ģ è¿Ķ +ç¨İåĬ¡ å±Ģ +å¹¿æ³Ľ åºĶç͍ +è®® åijĺ +æĹł æķĮ +çľ¼ åħī +çĥŃè¡Ģ ä¼łå¥ĩ +æŃ IJ +äºĨ äºĽ +è¿Ŀ èĥĮ +è¿Ļ æĺ¯ä¸Ģç§į +ä¸į 稳å®ļ +大家 åĪĨ享 +表 çı¾ +åīį åįģ +è·¯ è¿ĩ +æĴ © +åIJĮ æĥħ +ä¹ł ä¿Ĺ +åıij è´¢ +åºĶ æľīçļĦ +æĿİ æŁIJ +èĤ Ľ +马 åħĭ +éĢļ åijĬ +å·¨ 人 +ä¸Ģ åĽ¢ +éĢĻ æ¬¡ +ä¸į äºĨè§£ +æĸ½ è¡Į +èij¡èIJĦ çīĻ +åıĺå¾Ĺ æĽ´åĬł +æı £ +åĪĽæĸ° èĥ½åĬĽ +çķħ éĶĢ +表 æī¬ +æ¯Ķ åĪ© +æ¯ĶåĪ© æĹ¶ +åĮ»çĸĹ ä¿ĿéĻ© +æĵį 纵 +伤 亡 +æµİ å®ģ +åıĺ äºĨ +æľ¬æ¬¡ æ´»åĬ¨ +åľŁ 豪 +æĥ³ åĬŀæ³ķ +æĺ ķ +å½ĵ æĻļ +åĩº å±Ģ +çĥŃ è®® +è°Ī è°Ī +æĻĭ åįĩ +åĬ¿ å¿ħ +çĻ» å±± +éĤ£ åĦ¿ +åIJĥ åΰ +ä¹ĭ åŁİ +å¿« æĿ¥ +æ¹Ľ æ±Ł +第ä¸ī 个 +åħ¨éĿ¢ æıIJåįĩ +å¥ĸ åѦ +å¥ĸåѦ éĩij +æĬķåħ¥ 使ç͍ +é½IJ é²ģ +åı¯ä»¥ æĬĬ +åĴĮ ä»ĸçļĦ +è´ŃæĪ¿ èĢħ +æŃ£å¼ı åIJ¯åĬ¨ +åįİ æ¶¦ +ä¸įæĸŃ å®ĮåĸĦ +éĴ¢ æĿ¿ +ç´¯ 积 +满 èĦ¸ +åĽĽ æĸ¹ +è´¢ çī© +ä»ĸ们 ä¼ļ +å¤ı æĹ¥ +éĤ£ 个人 +éĿł çĿĢ +çĤ¹ äºĨ +çĤ¹äºĨ çĤ¹å¤´ +æ© ĭ +åıΠ好 +åıĪ好 åıĪ +åıĪ好åıĪ å¿« +éĺµ éĺµ +å°ģ 建 +æľ¬ çͰ +çī©ä¸ļ æľįåĬ¡ +èĩªè´¸ åĮº +åIJ ı +便åĪ© åºĹ +åĽ½å®¶ æłĩåĩĨ +éĿ¢ ç²ī +èī° è¾Ľ +æĶ» åħ³ +æīĵ åĮħ +车 éĺŁ +人 éĢī +åı¯ ä¸įæĺ¯ +äºĮ åįģå¹´ +åIJį å¸Ī +浦 举 +åħ¬ è¯ģ +è¿IJ éĢģ +æĺ¯ æľĢ好çļĦ +æŁĶ åĴĮ +çİĭ æŁIJ +çĹħ æĪ¿ +åĨ¶ éĩij +ä¸Ģä»¶ äºĭæĥħ +åį ¤ +åı¯ æİ§ +çī Ł +æĭ Ĥ +å·² äºİ +人 éĢł +çĶŁçī© åĮ»èᝠ+ä½ĵ çݰåĩº +èĤ² åĦ¿ +èĢģ å®ŀ +åľĸ çīĩ +è« ¸ +ç´¯ äºĨ +æĦŁåħ´è¶£ çļĦ +åĽ¾çīĩ æĿ¥æºIJ +ä¹Ł æĺ¯ä¸Ģç§į +æ¾İæ¹ĥ æĸ°éĹ» +æĹ¶ 表示 +åħī è¾ī +æĬ¥ åºŁ +å²ģ æĹ¶ +éħ ® +æ£Ģ ä¿® +åıĺ éĢŁ +åıĺéĢŁ ç®± +åľ¨ èģĮ +éı ¡ +æį Ĥ +çĿ£ åĬŀ +æ°¸ ä¸į +åģļ ä¸ĢäºĽ +åİĨ æĹ¶ +å·¥ç¨ĭ æľºæ¢° +æģ° å½ĵ +å°± åľ¨äºİ +ç§° åij¼ +éĢļ常 æĺ¯ +æł· å¼ı +åij¨ ä¸Ģ +èĭ± éķij +åĿĩ 线 +ä¼ł éĹ» +ç͍æĪ· ä½ĵéªĮ +èµŀ åIJĮ +骨 æĬĺ +为主 ä½ĵ +æ±Ł å±± +æ¸ħ æľĿ +æĶĢ åįĩ +ä¸į çĽ¸ä¿¡ +éĿ ´ +æŃ¦ åĬŁ +åĭ¤ åĬ³ +æĿ¥ æī¾ +å°Ĩ æĮģç»Ń +丫 头 +æ¨Ļ æºĸ +è£ ´ +深深 çļĦ +åŃķ èĤ² +è§ĦåĪĴ 建设 +æ¸ħ çν +ç²¾åĩĨ æī¶è´« +æīĵçł´ äºĨ +è¿Ļä¸Ģ 天 +å·¥ä½ľ æĢ»ç»ĵ +æĹħ ç¨ĭ +举 èIJ¥ +æĶ¾ å°Ħ +æľī åĩłä¸ª +éĿŀ çī©è´¨ +åIJĥ å¾Ĺ +åĹ ¨ +ä¼ļ åıijçĶŁ +篮 æĿ¿ +å¼Ģ å°ģ +麻 å°Ĩ +èıı æ³½ +ä¸į åIJĪ +ç³»åĪĹ äº§åĵģ +èѬ å¦Ĥ +ç¾İ èªī +èĩªå·± åĸľæ¬¢ +交æĺĵ ä¸Ńå¿ĥ +åIJĪ åͱ +使 æĪij +åĥı ç´ł +带 éĺŁ +ä½Ĩ 对äºİ +æĬĬ è¿Ļ个 +èĤĿ èĦı +åįķ纯 çļĦ +æĶ»åĿļ æĪĺ +缼 ä¼ļ +åijµ æĬ¤ +æª Ģ +èµ¶ ä¸Ĭ +æ¥ Ĭ +ä¹ħ äºĨ +ç¡ Ŀ +çŃĶ é¢ĺ +ä¿ĿæĮģ çĿĢ +è§ģ è¯Ĩ +çĤ¹ åĦ¿ +åįĬ 个æľĪ +æ» ĩ +浸 泡 +ä¼ł éĢģ +åľ¨ å¸Ĥåľºä¸Ĭ +ä¹ĭ 乡 +çī¹ éķ¿ +éĽ ŀ +èª ł +身 å¤Ħ +æŁł 檬 +身 ç©¿ +çľģ åħ¬å®ī +çľģåħ¬å®ī åİħ +åıĻ åĪ©äºļ +åĩł åĪĨéĴŁ +人 åĢij +åľ° 段 +èĩª åѦ +ä¹Ł è¶ĬæĿ¥è¶Ĭ +èģĮ æĿĥ +æĸ § +èĩ » +å½Ĵ 纳 +驾 é©Ń +éĥ¨åĪĨ åľ°åĮº +没æľī æĥ³åΰ +æĴ ĩ +ä¹Į é²ģ +ä¹Įé²ģ æľ¨ +ä¹Įé²ģæľ¨ é½IJ +èĤ² 人 +çļĦ æŃ¥ä¼IJ +å»¶ æľŁ +æ²¹ æ°Ķ +åģļ å®Į +åľ£ åľ° +丰 åİļ +宽 带 +åı¯éĿł çļĦ +åºŃ éĻ¢ +åŃ ľ +å°ı康 社ä¼ļ +å®īåħ¨ 管çIJĨ +å¹´ 第 +æİĴ 污 +èĥĮ åĮħ +å®¶ ä½ı +åħ¶å®ŀ å°±æĺ¯ +ä¼ļ è§ģ +帮åĬ© ä¼ģä¸ļ +ç½ij è´Ń +æĺ¯ ä¸įä¼ļ +飯 åºĹ +æŃ» åİ» +åħįçĸ« åĬĽ +æľ ķ +åĸĿ äºĨ +è½» å¾® +个æľĪ åĨħ +ç»Ħ åĽ¢ +åĴĮ å®ĮåĸĦ +é¸ ½ +æıIJ éĢŁ +西å®ī å¸Ĥ +ä¸Ńå¿ĥ 主任 +æĹ¶éĹ´ 为 +æľŁ æĿĥ +è¶ ķ +ä¸įä»ħ è¦ģ +æľį ä»İ +é¡ĺ æĦı +ä¸į å°ı +ä¸įå°ı çļĦ +ç° ĩ +çª ¦ +åĪĩ æĪIJ +åĵĪ åĪ© +天 羣 +ä¸Ģ次 次 +éĩij å¸ģ +æĢİä¹Ī èĥ½ +ç½ij è´· +ä¼ļ计 å¸Ī +çŁŃ 缺 +对 æłĩ +åıĺå¾Ĺ æĽ´ +åīį åĩłå¤© +éĺ² æ±Ľ +彩 èϹ +åĵģ ä½į +表 æł¼ +严 å¯Ĩ +æ¯Ľ åĪ©çİĩ +çļĦ åį±å®³ +å½ķ åζ +æ°´ åĬ¡ +èĥ½å¤Ł 让 +å¹³ æĿ¿ +ä¹³ æĪ¿ +è¸ı å®ŀ +é¦ĸ åĪĽ +é¦Ļ èķī +æĬ¥ 表 +ä¸Ģ æĬ¹ +åĩºçĶŁ äºİ +è²» ç͍ +åĩº 让 +åIJĪæ³ķ æĢ§ +å°¼ åħĭ +åĨ° åĨ· +é¦Ļ æ°Ķ +åı· ç§° +èµ· çłģ +åŁİ åİ¿ +çİ© èĢį +ä¸Ĭ éĻIJ +ä¼ļè®® ç²¾ç¥ŀ +æĹģè¾¹ çļĦ +便 ä¼ļ +æıŃ æĻĵ +çİ© æĦı +éĽª å±± +åIJij çĿĢ +ä½ĵèĤ² åľ¨çº¿ +说æĺİ ä¹¦ +åĮĸ èĤ¥ +åħļç»Ħ 书记 +åĬ¨ 人 +ä¹ĭ æīĢ +æľĪ èĩ³ +æľĢå¿« çļĦ +èĬĤ åģĩæĹ¥ +ä¸ĵ åľº +èĢĥ ä¸Ĭ +çª Ł +é²ľ è¡Ģ +è¾ĥ强 çļĦ +æĤĦ çĦ¶ +å¤ļ个 åĽ½å®¶ +çªĹ å¸ĺ +æŀģ å¤§åľ° +ä¸įç͍ æĭħå¿ĥ +è¿Ļä¹Ī åģļ +åĥ¹ æł¼ +ç¾İ丽 乡æĿij +å°ıæĹ¶ åĨħ +ç´§ è¿« +大 çģ« +èĥ³ èĨĬ +æĵįä½ľ ç³»ç»Ł +æ®ĭ çķĻ +åĨĻ åĩº +ç¦ģ å¿Į +åĬłçĽŁ åºĹ +è¿ij çϾ +便 åı¯ +æķ´æĶ¹ æİªæĸ½ +éĩĩ访 æĹ¶ +åĶIJ 代 +æ·±åĮĸ æĶ¹éĿ© +çŁ ¢ +éĥ½ åĸľæ¬¢ +è¶ĬæĿ¥è¶Ĭ é«ĺ +èĬ± æľµ +头 çĸ¼ +å®ī 康 +å¢ŀéķ¿ çİĩ +çľ¼ çľĭ +å°±æĺ¯ 为äºĨ +èĢĮ 导èĩ´ +åĬłå¿« 建设 +èĬ± æł· +åĨħå¿ĥ çļĦ +æĺĨ å±± +è³ĩ æºIJ +åĽŀåΰ å®¶ +èıĬ èĬ± +æ°´ éĩı +å¾ģ ä¿¡ +è¡ĮæĶ¿ åĮº +ä¹ĥ æĺ¯ +æĬķèµĦ é¡¹çĽ® +å«ģ ç»Ļ +ç¥ŀ åľ£ +ç¨ ł +æľ¬æĿ¥ å°± +éĢIJ ä¸Ģ +èģĮä¸ļ æĬĢæľ¯ +ä¸įèī¯ ä¿¡æģ¯ +æīĺ è¿IJ +åIJ¯ 示 +ä¹ĭ åħ§å®¹ +éŁ ¶ +奢 åįİ +æıŃ ç¤º +æĪIJ为 ä¸ŃåĽ½ +æ¶Īè´¹ åĵģ +åħ¬ ç͍ +æIJŀ å®ļ +请 ä½ł +æŁ ļ +åĨħ è¡£ +ä½Ĩ ä»ĸ们 +ä¿Ŀ 湿 +该 åİ¿ +饱 åĴĮ +æİ¨ åIJij +èµĦæĸĻ æĺ¾ç¤º +ä¸į å½±åĵį +人 人éĥ½ +åıijå±ķ 壮大 +åħ»èĢģ æľįåĬ¡ +çĶŁæ´» æ°´å¹³ +åIJĦ åİ¿ +ä½ł éľĢè¦ģ +说 çļĦæĺ¯ +å¤ĸ åªĴ +æŃ¤ 人 +次 è¦ģ +追 èµ¶ +åºĶ该 å¦Ĥä½ķ +æĹ¥ åĩĮæĻ¨ +çķ¥ æľī +éĥ½ æĥ³ +游 ä¹IJ +è¿Ļ款 游æĪı +å¹³ æ·¡ +æĺ¯ä¸Ģ åĢĭ +å¤ĩ èĢĥ +åζ æŃ¢ +ä¸Ģå®ļ èĥ½ +å¾Ĵ å¼Ł +以 çĤº +åįĥ åħĥ +äºĶ åħŃ +迪 士 +迪士 å°¼ +éĺ³ æĢ§ +åĨ¬å¥¥ ä¼ļ +å°±æĺ¯ åĽłä¸º +æĮĤ éĴ© +æ¦Ĥ åĨµ +åıªè¦ģ æľī +æ²¹ çĶ» +åľ° æłĩ +ä¸Ĭ è°ĥ +产ä¸ļ åĽŃåĮº +åħ« åįģ +æ£ ± +æ¶² æĻ¶ +æĿij å§Ķä¼ļ +çŃ¾çº¦ 仪å¼ı +è¿Ļ åħ¶ä¸Ń +åĨĻ éģĵ +示èĮĥ åŁºåľ° +éĩİçĶŁ åĬ¨çī© +鼻åŃIJ ä¿¡ç®± +åĽ½éĻħ è´¸æĺĵ +人 æĿĥ +ä¿Ŀ 管 +èĭ¥ æĤ¨ +åİĭ æĬij +é» Ľ +åľ° çľĭçĿĢ +éĻ ° +ä¸Ģå¹´ å¤ļ +ä»İ 容 +ä¸Ń æĸŃ +å¯Ł è§ī +ç§» 交 +éĶ ¯ +æĪĸ许 æĺ¯ +ç¶ ł +两 项 +æľĢ åĸľæ¬¢ +æľĢåĸľæ¬¢ çļĦ +å¤ľ éĩĮ +åIJĮ ä»ģ +åĪĽæĸ° 驱åĬ¨ +è°ģ èĥ½ +é£ ¾ +åħī åѦ +åİ Ħ +èĦ± é¢ĸ +èĦ±é¢ĸ èĢĮåĩº +è¿ ¦ +æĺ¯ ä¸įåı¯èĥ½ +çª ¥ +èĥ½ 满足 +宽 度 +伦 çIJĨ +åı¯ä»¥ èİ·å¾Ĺ +转 ä¼ļ +å±± æĿij +éĵº 设 +åĩº åĩ» +æĸĩåĮĸ èīºæľ¯ +ä¼ļè®® 室 +æŃĮ 声 +æ» Ķ +èIJİ ç¼© +æľįåĬ¡ åijĺ +åıij表 äºĨ +æĸ¼ æĺ¯ +æĺİç¡® è§Ħå®ļ +ç»´ å¥ĩ +æ°´ 产 +æĬķ ä¿Ŀ +éĺ´ éģĵ +èµ¶ å¿« +夺 å¾Ĺ +ä¸ĭ åįķ +çµģ åħ¬åı¸ +çݯ ç»ķ +å½ Ī +ä½ľé£İ 建设 +æĹħ游 æĻ¯åĮº +æľī æĽ´å¤ļçļĦ +丰å¯Į å¤ļ彩 +çIJĨè´¢ 产åĵģ +åĩº å·® +ä»İ严 æ²» +ä»İ严治 åħļ +缸 å¹² +æ»ĭ 润 +主åĬŀ æĸ¹ +åī§ åľº +æ»ļ çIJĥ +æ©Ħ æ¦Ħ +èĩªä¸» åĪĽæĸ° +éĢļ å¾Ģ +æł¼ å°Ķ +çļĦ ä¼ĺçĤ¹ +èĥĮ ä¸Ĭ +çª ľ +çĪĨ åĩº +å¹³ æķ´ +ä¸Ģ èĦļ +åħ¨ä½ĵ åijĺå·¥ +éĻIJ å®ļ +åŁİéķĩ åĮĸ +æ· ³ +éĢ® æįķ +è¡ĮåĬ¨ 计åĪĴ +æīĵ å¾Ĺ +åİļ éĩį +纪å½ķ çīĩ +åĿļ ä¿¡ +央 ä¼ģ +åĨį ä¹Łä¸į +天 涯 +åıĤèĢĥ èµĦæĸĻ +æľī æ¯Ĵ +åIJ¸ 纳 +è¶Ĭ åıij +éĩįè¦ģ æĦıä¹ī +åĽ½éĺ² éĥ¨ +è¿Ļ个 è¡Įä¸ļ +æĻ® æŁ¥ +å¼Ĥ æĢ§ +å»¶ è¿Ł +å°ı å¹ħ +èī² æĥħ +综åIJĪ æ²»çIJĨ +æŃ£æĺ¯ åĽłä¸º +产ä¸ļ ç»ĵæŀĦ +çłĶç©¶ æĬ¥åijĬ +åģľ ä¸ĭ +éķ¿ èĢģ +éĩĿ å°į +åįĹ京 å¸Ĥ +çģĮ æºī +转 è¿IJ +欺 è¯Ī +éĢł åģĩ +åĪĨå¸ĥ å¼ı +æĦŁ è§¦ +æĪij å½ĵæĹ¶ +åıij è§ī +åĽ¾ 纸 +æĶ¹ èī¯ +çĭł çĭł +åĨ² åĪº +æĸ° 京 +æĸ°äº¬ æĬ¥ +ç¥ŀ åύ +秸 ç§Ĩ +çĪ º +å°Ĩ è¿İæĿ¥ +å·¥ ä¿¡ +工信 éĥ¨ +éĻIJ éĩı +æŃ¢ æįŁ +åѦä¼ļ äºĨ +åįİ çĽĽ +åįİ缼 é¡¿ +å¾Į ä¾Ĩ +ä¸ĭéĿ¢ æĺ¯ +ä¸ĭéĿ¢æĺ¯ å°ı +æIJ¬ è¿IJ +ç¾İæľ¯ é¦Ĩ +æ¸ħ åĩī +å¤ļå¹´ åīį +è© ŀ +åįĥ ç±³ +表 è¿° +æ±Ł éŨ +åĬłæ²¹ ç«Ļ +æľ¬ èĥ½ +导 读 +åĽ´ è§Ĥ +å¹¶ åIJij +åŁºæľ¬ æĥħåĨµ +æīĵ å¼ĢäºĨ +è¿Ļ ä¸ī个 +æ±ķ 头 +强 æľīåĬĽ +强æľīåĬĽ çļĦ +è¿Ľ åľº +ä¹Ŀ æ±Ł +çIJĥ æĺŁ +好çľĭ çļĦ +大 æĪ· +æ¹ ¯ +å¥ĩ å¦Ļ +ä¹IJ åύ +æĪijçļĦ å¿ĥ +çľī 头 +åĨľä¸ļ çĶŁäº§ +ç¼ĸ çłģ +åŁº ç¤ +åŁºç¤ İ +天 æĸĩ +åĢĭ人 è³ĩè¨Ĭ +åİ» è¿ĩ +èģĨ åIJ¬ +æĶ¾ åģĩ +ä¸į åħ·å¤ĩ +æ·Ģ ç²ī +大 佬 +åħ¨ 天 +åħ¨éĿ¢ 建æĪIJ +éļIJ å½¢ +ç¼ħ ç͏ +åIJ ³ +è¡ĮæĶ¿ æī§æ³ķ +åŁİ åł¡ +èİ« æĸ¯ +èİ«æĸ¯ ç§ij +æīĢæľī æĿĥ +éĽĨ åľĺ +å±Ģ åī¯å±Ģéķ¿ +åĩłä¹İ 没æľī +æ´ģ åĩĢ +ç͵影 èĬĤ +åŃ© ç«¥ +æīĢ åģļçļĦ +æ¸ħ 代 +æĸ° çīĪ +éĵĿ åIJĪéĩij +为 æĬĵ +为æĬĵ æīĭ +åΤ å®ļ +çī¹ äº§ +æīĭ æ©Ł +ä¸įåı¯ æĪĸ +ä¸įåı¯æĪĸ 缺 +å¸Ĥåľº è§Ħ模 +åĿ ¯ +åĮ» åѦéĻ¢ +å¿« è¦ģ +èĮ ľ +æĬĺ èħ¾ +äºĨ è¿ĩæĿ¥ +æĬ¥åijĬ æľŁåĨħ +çī© ç§į +ç»Łè®¡ å±Ģ +æī© 建 +æ¶ ħ +责任 人 +éĺ İ +è¯Ħ è®® +å¾Ģ äºĭ +æīĢ ç¤º +æķ´ æ´ģ +éĹº èľľ +æĹħ éĢĶ +å®ŀ è®Ń +ä¹ĭ ç§° +å·´ 士 +éĢŁåº¦ å¿« +ä¸įä»ħ å¦ĤæŃ¤ +å®Ŀè´µ çļĦ +åºŁ çī© +æ²³ æ°´ +æİ¥ 纳 +ç²¾ æ¹Ľ +åħ¶æ¬¡ æĺ¯ +顺 å¾· +åħ¬åħ± åį«çĶŁ +è¤IJ èī² +ä¸į æĥľ +æĬĢæľ¯ æľįåĬ¡ +æİ · +æ±Ĥ èģĮ +ä¸ī 峡 +æĬķåħ¥ åΰ +太 åIJİ +åIJ¯åĬ¨ 仪å¼ı +缴æİ¥ å½±åĵį +æĸ° 款 +个 乡éķĩ +çϾ 亿 +åº « +ä¹Ł æŃ£æĺ¯ +åı¶ çīĩ +æľĢæĹ© çļĦ +æĪĺ 绩 +å·¥ æľŁ +æĻļ æľŁ +è¿Ļæł· 说 +è¯į è¯Ń +ä¾ Ħ +æķ£ çĥŃ +éĽĨæĪIJ çĶµè·¯ +åIJį è¯į +æĻº åķĨ +æĭ¥ åłµ +çĭĤ 欢 +è¿Ļ èά +æµ´ 室 +åijķ åIJIJ +æľªæĿ¥ åıijå±ķ +ä¸īä½į ä¸Ģä½ĵ +åªĴ é«Ķ +ä¸įå¾Ĺ 转载 +åĽłä¸º 她 +æĺ¾ç¤º å±ı +ä¾Ľ æļĸ +éĨ« éĻ¢ +æľī æĦıæĢĿ +æľīæĦıæĢĿ çļĦ +娱ä¹IJ åŁİ +åįµ å·¢ +åĪĽéĢł åĬĽ +竳 èĬĤ +人大 常å§Ķ +èĢĮ çİ°åľ¨ +å¤ĸ å©Ĩ +å¢ŀ æĮģ +äºĶ åįĥ +èĢģå¸Ī 们 +æ´Ľ æĿī +æ´ĽæĿī 磶 +æİĮæı¡ äºĨ +ä¸ŃåĽ½ æĸĩåĮĸ +æĸ° æĶ¿ +主è¦ģ ç͍äºİ +åıij çĥ§ +类似 äºİ +åĮĹ æŀģ +æĪij们 认为 +å¼¥ 漫 +åħ¨çIJĥ ç»ıæµİ +é¢ IJ +ä¸Ģèµ· è£ħä¿® +æĶ Ĵ +æĭī èIJ¨ +帶 ä¾Ĩ +åĨ· æ°´ +ä¸ī åĨľ +æĿ¿ æĿIJ +è¿ŀ è¿ŀ +éĵ ® +ç»ıèIJ¥ çIJĨ念 +å±± é¡¶ +å¾Ī æĥ³ +çĺ « +å§ĭç»Ī ä¿ĿæĮģ +åľ¨ 广å·ŀ +ä¸įåIJĮ æĦı +åıĺ åİĭ +åıĺåİĭ åύ +产 éĶĢ +表 éĿ¢ä¸Ĭ +æīĢ以 ä»ĸ +ç»ıéªĮ 丰å¯Į +éĥ¨ å§Ķ +åħµ åĽ¢ +æīĢ è¿° +æķ¦ çħĮ +ç»ıèIJ¥ èĮĥåĽ´ +åı£ è¯Ń +失 ä¿¡ +æ¯ı个人 çļĦ +æīĭ æĮģ +æģIJ æħĮ +åł¡ åŀĴ +é¦ ħ +éĵ¸ éĢł +æĭ¿ åĩºæĿ¥ +æİ¢ æµĭ +大家 ä¸Ģèµ· +å¥ § +å®ŀè´¨ æĢ§ +å°ı åĦ¿ +èĩº åįĹ +èĩºåįĹ å¸Ĥ +å¼Ģåıij èĢħ +åı¯ æł¹æį® +ç®± åŃIJ +饺 åŃIJ +å¿Ļ çĿĢ +æĿ¥ ä¸įåıĬ +缸 ä¼ł +åĽ½ ç½ij +èħ¹ æ³» +è¿ĻéĩĮ æľī +é£İ æĻ¯åĮº +åıĤ ä¿Ŀ +æŃ» èĢħ +æĪ´ ä¸Ĭ +æ©Ł æ§ĭ +è¯ķéªĮ åĮº +ä¼ł æİĪ +æµ· è¾¹ +泪 æ°´ +缸åħ³ åĨħ容 +éĥij å·ŀå¸Ĥ +åħij çݰ +两 åij¨ +èĬľ æ¹ĸ +ç͵åŃIJ ä¿¡æģ¯ +红 å¤ĸ +æĹħ游 å±Ģ +å¾Ģå¾Ģ ä¼ļ +è¿ħ çĮĽ +ä¼ł 羣 +æ¸ħ æ¾Ī +å°± è¿ij +微信 群 +ç³»åĪĹ æ´»åĬ¨ +ç»ı常 ä¼ļ +è§Ĥ æµĭ +å¿ĥå¾Ĺ ä½ĵä¼ļ +éĻĪ åĪĹ +åĮĹ æĸĹ +è« ® +è«® è©¢ +è¿ĺæĺ¯ ä¼ļ +æµĭ ç®Ĺ +æĺŁ ç©º +宽 容 +çī©ä¸ļ åħ¬åı¸ +æĪĴ æĮĩ +å¸ħ æ°Ķ +ä¸ĢæŃ¥ æŃ¥ +åħ± 鸣 +åĨ³ ä¸į +æİ¥ 管 +å¦ĩ èģĶ +æ¯Ķ åĸ» +é²ģ è¿ħ +æĮģ çºĮ +缸 亲 +å¨ģå°¼æĸ¯ 人 +ç«ĭ 项 +åĪ Ŀå§ĭ +èĩª åζ +è¿Ī è¿Ľ +ä¸Ĭ æ±½ +å®ı ä¼Ł +æł¹æľ¬ 没æľī +æĸ°åĨł çĹħæ¯Ĵ +åĵª ç§į +康 åħ» +è¡° èĢģ +å½ķ åĥı +é«Ķ é©Ĺ +ç»ij å®ļ +é¢Ŀ 头 +äºĶ æľĪ +èĬ± å¼Ģ +ä¸Ģ线 åŁİå¸Ĥ +åΰ åľº +æĬķ éĻį +çĹĺ çĹĺ +åıĹ ä¸įäºĨ +æīİ æł¹ +æĽ´ ä½ķåĨµ +æĬ½ æŁ¥ +åĩº è·¯ +审议 éĢļè¿ĩ +ä¸į åĥħ +èī² è°ĥ +çϾ ä½Ļ +èĤł éģĵ +æ·±åİļ çļĦ +马 åĬĽ +æĹ© æĻļ +æŃĮ èĪŀ +éĺ² æĻĴ +æľĢåIJİ ä¸Ģ个 +樱 èĬ± +å°ıä¼Ļ åŃIJ +åľ¨ å½ĵåľ° +å°ıä¼Ļä¼´ 们 +èµ· æºIJ +åħ¨ åªĴä½ĵ +ç° ½ +éħ± æ²¹ +æĹłè®º å¦Ĥä½ķ +裤 åŃIJ +åģľ äº§ +ä¸įçͱ å¾Ĺ +çīµ å¼ķ +ä¼ł åĬ¨ +ä¹Ŀ é¾Ļ +åĬł åĽº +ä¹Łä¸į æķ¢ +æĬĢæľ¯ æĶ¯æĮģ +ä¸Ĭ å²Ĺ +ç»ıéªĮ åĴĮ +æł¼ æŀĹ +åIJ¸ éĻĦ +æľªæĪIJ å¹´ +奢ä¾Ī åĵģ +追 æį§ +好 ä¸į容æĺĵ +èķ´ åIJ« +ä¿Ŀ å®ļ +æĬ¥ ä¸ļ +æµ· åĨħå¤ĸ +ä½ł çİ°åľ¨ +æ²¹ èĢĹ +è´¨éĩı 管çIJĨ +æ½ľ æ°´ +丽 æ±Ł +转 åħ¥ +è¿Ļä¹Ī ä¹ħ +æĺİ ä»£ +责任 åζ +éĩį å·¥ +大 å·´ +触 åıĬ +èµ· åĪĿ +大 å¦Ī +æĸ¯ å¡Ķ +åĨĽ å·¥ +书 éĻ¢ +å³ ¨ +æİ¨ çIJĨ +è¿Ļç¯ĩ æĸĩ竳 +è¿ģ ç§» +åľ¨ åIJĮä¸Ģ +ç»Ĩ ç»Ĩ +åīĬ å¼± +书 æĪ¿ +ç¶ĵ 常 +è¯ķ é¢ĺ +æĤ£ ä¸Ĭ +çĻ«çĹ« çĹħ +åĨ² æ´Ĺ +å¤ĸ æı´ +åħĭ åζ +åįģ æľĪ +åģļ ä¸įåΰ +ç¾İ åĮĸ +å¦Ĥ æľŁ +è¿ĺ éľĢ +天 åºľ +å°± æĦıåij³çĿĢ +çļĦç¡® æĺ¯ +éªĹ å±Ģ +å°ıç»Ħ èµĽ +è© © +ä¹Ŀ å¹´ +æĻĵ å¾Ĺ +çłĶç©¶ 人åijĺ +大 éħĴåºĹ +ç§ij åѸ +åħŃ åIJĪ +çķĮ å®ļ +车 è½½ +å¼Ģ çĿĢ +毫 æĹłçĸij +毫æĹłçĸij éĹ® +è¿IJ ç»´ +ç¦ģ åĮº +èĦ± èIJ½ +讲 å¸Ī +产ä¸ļ åŁºåľ° +é«ĺ æĢ§èĥ½ +åħī 彩 +çݰ éĺ¶æ®µ +åĩ ¿ +è¾ĥ å·® +饮 çĶ¨æ°´ +éĸĭ çϼ +ç½ij åIJ§ +çĮ´ åŃIJ +æŃ¦ æŀĹ +å®ī åİ¿ +ä¸įåı¯ æĢĿ +ä¸įåı¯æĢĿ è®® +éĬ· åĶ® +è´« ç©· +为 åķ¥ +éº ĵ +å¹¾ åĢĭ +è§Ħ模 以ä¸Ĭ +æı ļ +被 åĽ° +缺 å¸Ń +å¿« é¤IJ +æĬ¢ åįł +æĻ Ł +å¤į æ´» +æľ¬æĬ¥ 讯 +åĪĽ ä¸ĭ +æµ· 滩 +éĩı 产 +å¦Ĥä½ķ åİ» +车 ä½į +å¯ ĩ +äºĮ åįģåĽĽ +ç»ıæµİ æįŁå¤± +éħįå¥Ĺ 设æĸ½ +åŁºæľ¬ éĿ¢ +äºī 论 +就好 åĥı +çłĶç©¶ æĪIJæŀľ +éĻĪ è¿° +æīĵ åĬ¨ +ä¸ĭ å·´ +ç§Ĵ éĴŁ +对 人ä½ĵ +æĬĢæľ¯ çłĶåıij +åİŁ åŃIJ +æĺ¯ä¸Ģ 项 +äºĨä¸Ģ 份 +æĮĩ çͲ +ç͍ éĩı +è¿ĺä¸į å¤Ł +æĶ¿åºľ éĩĩè´Ń +çŁ¥è¯Ĩ çĤ¹ +ä¸ŃåĽ½ 梦 +å¾Ī å¼Ģå¿ĥ +礼 è²Į +éĿŀ常 å¤ļ +éĿŀ常å¤ļ çļĦ +åĽ ļ +æĹħ é¦Ĩ +å°½ æĥħ +æŃĮ åͱ +æ²Ļ é¾Ļ +车 åİ¢ +客 æµģ +åģı å·® +积累 äºĨ +æ¡ Ķ +çĶ» çĶ» +ä¹Ł åºĶ该 +åºĶç͍ ç¨ĭåºı +èĥĥ èĤł +以 å¾Į +豪 å®ħ +æ·± åĬłå·¥ +缴 è¨Ģ +åĮĸ çŁ³ +åĽ½ éģĵ +ä¸ĥ 个 +ä»İèĢĮ 使 +èĤł èĥĥ +æĹ¥ è¶ĭ +çζ åŃIJ +ç· © +æĭĽ çīĮ +产 å¦ĩ +çķª èĮĦ +æĪij éĻ¢ +建çŃij å·¥ç¨ĭ +å±ķè§Ī ä¼ļ +å®¶éķ¿ ä»¬ +åĨľ ä½ľçī© +æĹ¥ å¤ľ +æĶ» æĵĬ +è§Ħ éģ¿ +èĪŁ å±± +便 æ°ij +åħ« åŃĹ +ä¸į æĽ¾ +æĶ¯ éħį +çĨ¬ å¤ľ +人 é¡ŀ +ç´Ģ éĮĦ +ç»ıèIJ¥ æ´»åĬ¨ +大 涨 +å¸Ĥå§Ķ 常å§Ķ +åĪĨ éIJĺ +ä¸Ģ个 èģĮä¸ļ +çĹħ åĽł +è¿Ļ 对äºİ +ä¸įå¾Ĺä¸į 说 +åıijç͵ æľº +æľīæīĢ å¸®åĬ© +缮æłĩ ä»»åĬ¡ +åĽł åľ° +åĽłåľ° åζ +åĽłåľ°åζ å®ľ +å°Ĩ è¾¾åΰ +ç²Ĺ ç³Ļ +稳 åĽº +å« £ +çİ°åľ¨ å¾Īå¤ļ +ä¸ĸçķĮ 级 +å¼ł æŁIJ +çĤ¹ ç¼Ģ +èij µ +社ä¼ļ ç»Ħç»ĩ +å¾Ģ åIJİ +åĬł æģ¯ +åĻª 声 +æľī åħ´è¶£ +为æĤ¨ æıIJä¾Ľ +æ²¹ æ¼Ĩ +ç¬¬åĽĽ å±Ĭ +çļĩ 宫 +ä¹Ĵ ä¹ĵ +ä¹Ĵä¹ĵ çIJĥ +éļ¨ èijĹ +éģ© åIJĪ +åįĹ éĿŀ +æĵ ´ +西 æ´ĭ +åĬł å¯Ĩ +æĪIJåĬ٠䏾åĬŀ +åı£ æ°´ +æĪIJ 年人 +æīĢ æıIJä¾ĽçļĦ +éļĶ å£ģ +åľ¨ 京 +å½ĵåľ° æĹ¶éĹ´ +çŃī åIJĦç§į +é£İ æ°Ķ +å±ĭ éĩĮ +ä¸Ģ åŃĹ +çļĦæĹ¶éĹ´ éĩĮ +åĺ¿ åĺ¿ +å¿« 讯 +ä¸Ń åľº +ä¸Ģ çĵ¶ +æ» ķ +é¢Ĩ è·ij +好 èݱ +好èݱ åĿŀ +没 åħ³ç³» +åĩº å¢ĥ +ä¸įæĺ¯ ä¸Ģ个 +éĥ½æĺ¯ éĿŀ常 +éľĩ åĬ¨ +èİ· èĥľ +åįļ å¼Ī +æĬļ åħ» +对 ç«ĭ +æľįåĬ¡ æľºæŀĦ +è°£ è¨Ģ +社ä¼ļ ç§ijåѦ +åIJ¬è¯´ è¿ĩ +æī ³ +æīĵ 磨 +åı£ æľį +好 åĥıæĺ¯ +以åıĬ åħ¶ä»ĸ +çī¹ è´¨ +亲 è¿ij +ä¸Ģ ç»ı +æ¶ Ŀ +éŃĶ æľ¯ +éģĵè·¯ 交éĢļ +è§Ħ模 æľĢ大 +å®ŀæĸ½ æĦıè§ģ +ä¹ ŀ +ä¸Ģ ä¸ĸ +åŁ· è¡Į +è±Ĩ çĵ£ +åĪĹ ä¸º +æķħ 宫 +çĶŁ åij½åij¨æľŁ +ä¸īç§į èģĮä¸ļ +详ç»Ĩ ä»ĭç»į +å®Į å¤ĩ +岩 çŁ³ +éļı æīĭ +é£ ² +æķĪæŀľ åĽ¾ +ç§ĭ åĨ¬ +åĬŁ å¾· +è§Ħ竳 åĪ¶åº¦ +æĹ¥ æ¸IJ +æīĢ éľĢè¦ģ +æīĢéľĢè¦ģ çļĦ +å²Ľ ä¸Ĭ +åĩº åľŁ +åĽ¾ æĸĩ +ç§ijæĬĢ è¿ĽæŃ¥ +éĢļ èĥĢ +èĢģ 太太 +èĭĹ æľ¨ +éĵ¶ å·Ŀ +å¸IJ 篷 +éĿŀ è¦ģ +éħį ç͵ +å¤Ħ å¢ĥ +èĤ¡æĿĥ æĬķèµĦ +ä¸Ģ缴 åΰ +åĿĩ çͱ +æĬĹ æĹ¥ +æį® ä»ĭç»į +ä½ł åĸľæ¬¢ +åĪĽæĸ° åŀĭ +åıĺ è¿ģ +è§Ĩ å¯Ł +å®Įåħ¨ 没æľī +åħĥ æĹ¦ +åı¯ ä¿¡ +åı¦ è¡Į +æĿij 级 +åħ¥ åľº +æIJŃ æ¡£ +ä¹Ł åĽłæŃ¤ +æį¢ æĪIJ +ä¸į è´Ł +äºĨ 大éĩıçļĦ +éģĶ åΰ +å¸Ĥ åİ¿ +å¹´ è¼ķ +å¿« æīĭ +å¸Į å°Ķ +èĩª èIJ¥ +éĽª èĬ± +æIJ ģ +çľ¼ ç§ij +æŃ£ 確 +çļĦ å§¿æĢģ +åĿļå®ŀ çļĦ +æĮĩ 纹 +æªĶ æ¡Ī +ç½® äºİ +佩 æľį +豪 éŨ +åĵ Ĵ +æģ° 好 +檢 æŁ¥ +åĪĿ è¡· +大 åĶIJ +约 ä¼ļ +èĴ¸ åıij +çѹ åĪĴ +å¹´ ç»Ī +è¡Į æ¥Ń +åħ± éĿĴ +åħ±éĿĴ åĽ¢ +ä¼ļ å¼ķèµ· +ä¸Ń ç§ij +ä¸Ńç§ij éĻ¢ +æĮ¯ åĬ¨ +åį´ åıijçݰ +ä¸įåĬ¨ 产 +èĮ ¹ +æĪ¿éĹ´ éĩĮ +è´§å¸ģ æĶ¿çŃĸ +æ²» çĻĤ +æħİ éĩį +å¡ŀ å°Ķ +åĽ½ ç±į +åĽł æŀľ +çŃī çī¹çĤ¹ +å±± è°· +ä¸ĭ è¼ī +è®ĵ æĪij +饮 éħĴ +è¿Ļ个 游æĪı +ç»Ŀ 大éĥ¨åĪĨ +åĴ¨è¯¢ æľįåĬ¡ +å¹² æ´» +è®® ä¼ļ +æ¦Ĥ è¿° +åĪĨ åĮº +æŃ» åIJİ +ç«Ļ çĿĢ +主è¦ģ é¢Ĩ导 +åIJĮ åŁİ +大 æłij +对 åѦçĶŁ +社ä¼ļ ä¿ĿéĻ© +å¢ŀ èµĦ +主人 åħ¬ +å®£ä¼ł æķĻèĤ² +æĸĩåĮĸ 交æµģ +客 æĪ¶ +çŁ¥åIJį åĵģçīĮ +æ»ŀ åIJİ +äºĴ è¡¥ +æĦŁ äºº +åī ¿ +åIJİ ä»£ +äºī 龸 +æķĻèĤ² åŁ¹è®Ń +éĿĻ èĦī +ä¹ı åĬĽ +说 åĩºæĿ¥ +çİĭèĢħ èį£èĢĢ +åĢ « +åįĩ èµ· +éķ ģ +åĩº 游 +éĢļè¡Į è¯ģ +å·¥ä½ľ å²Ĺä½į +åĮł å¿ĥ +æĭ¿ æĿ¥ +æ´Ĺè¡£ æľº +æĪijä¸į æĥ³ +é¢Ħ è§ģ +æ¼Ķ 示 +ä¸Ģ缴 没æľī +è·Ł 她 +对çħ§ æ£ĢæŁ¥ +ç° ¿ +ä¸ĵ å¿ĥ +è®® äºĭ +åīį 端 +åį¡ å°Ķ +è¨Ń å®ļ +设置 äºĨ +å©ļ 纱 +åľ¨ åĽ½å¤ĸ +åı³ ä¾§ +è³¼ çī© +å¥ĩ èij© +å¢ŀåĬł å̼ +好 è¿IJ +åĽ½éĻħ æľºåľº +ä¸ĭ ç§° +缮åīį 为æŃ¢ +ç¥ŀ ä»Ļ +å®ĥ åı¯ä»¥ +æ¾Ħ æ¸ħ +èĥ½ 使 +游 åĩ» +游åĩ» éĺŁ +åĩ ¹ +ä¸įè¦ģ åĨį +åĨ³ èĥľ +åĨ³ æĪĺ +æĭ ½ +缼 åħ¸ +å¾Ī好 åľ° +æľĢ ç¾İçļĦ +åĥ ļ +å·´ åŁº +å·´åŁº æĸ¯åĿ¦ +æľĢ éĢĤåIJĪ +é«ĺ èģĮ +ä¿Ŀ å§Ĩ +æİĪ æ¬Ĭ +说åΰ è¿ĻéĩĮ +æİ¨ å¼Ģ +çİĩ è¾¾ +ä¸īåĪĨ ä¹ĭä¸Ģ +管çIJĨ ä¸Ńå¿ĥ +交 æ±ĩ +森æŀĹ åħ¬åĽŃ +å¾Ģ ä¸Ĭ +éªij è¡Į +æį® æŃ¤ +纽 带 +ç» ŀ +ä¸ī æĸ¹ +æĦıä¹ī ä¸ĬçļĦ +æİ¨ è¿Ł +å¤ļæł· æĢ§ +æĥ³ èµ·äºĨ +æİĴåIJį 第 +å·¨ é¢Ŀ +æĿŁ ç¼ļ +å®ī å®ļ +äºĭ 實 +çļĦ æĦ¿æľĽ +è£ħå¤ĩ åζéĢł +人 å±ħ +人å±ħ çݯå¢ĥ +å¿ĺè®° äºĨ +该 游æĪı +楼 ä¸Ĭ +å¼Ģ ä¼ļ +æģ ³ +åıĭæĥħ éĵ¾æİ¥ +ç¡ Ĵ +ç»ĻäºĪ äºĨ +åģı 好 +åĵ ī +交éĢļ å®īåħ¨ +éĽ Į +æ²» çĹħ +è§īå¾Ĺ å¾Ī +衬 è¡« +å¿ĥ æĦ¿ +æ´ŀ å¯Ł +æ°ij æ£Ģå¯ŁéĻ¢ +æıIJ çĤ¼ +è¦ģ è¿Ľä¸ĢæŃ¥ +驾 车 +æĻ® æĥł +æķ ĸ +ç¦ı éŁ³ +éĢģ è¾¾ +è§ĦåĪĴ 设计 +æīĭ å¥Ĺ +å®ī ä¿Ŀ +è¿ĺä¸į å¦Ĥ +åīį è¿° +æłĩ è®° +ç´§ æİ¥çĿĢ +æ§ IJ +深深 åľ° +满满 çļĦ +æĺ¥ è¿IJ +æĹ¥ 产 +çα æĬ¤ +åħ¨ æĹ¥ +åħ¨æĹ¥ åζ +转 åĬ¨ +ç¥Ń ç¥Ģ +ä¹° ä¸ľè¥¿ +对 æľªæĿ¥ +æ¶Ī失 äºĨ +åļ´ éĩį +ä¸ī æĿ¡ +éħ¸ 奶 +éĽĨåĽ¢ èĤ¡ä»½ +西 è·¯ +åıª å¾Ĺ +éĢģ åİ» +çĭł æĬĵ +åĪ©ç͍ çİĩ +ä¸ĭ åij¨ +å¥ĭ æĪĺ +æĺ¥èĬĤ æľŁéĹ´ +è´Ł 责任 +æĺĤ è´µ +å°¾ å·´ +ç¯ĩ æĸĩ竳 +åħ ® +è®Ĭ æĪIJ +å¹ ¹ +çĻ» éĮĦ +ä½ Ī +å·¥ åĮł +åĵªæĢķ æĺ¯ +åıį åĵį +ç§ ĥ +åĩº 轨 +æĹ¥ åĨĽ +åIJį èªī +æķı éĶIJ +æľįåĬ¡ æ°´å¹³ +çħ§ å°Ħ +ä¼Ĭ æĭī +ä¼Ĭæĭī åħĭ +åĨħ éĺģ +èĬĴ æŀľ +ä¸ĩ åĪĨ +éĢĢ æ¬¾ +缴æĴŃ éĹ´ +æĭ¿ åΰäºĨ +å°İ èĩ´ +空æ°Ķ ä¸Ń +客æĪ· æľįåĬ¡ +è¿IJ åĬ¿ +ç»ĵ çŁ³ +ä¸į å¿ħè¦ģçļĦ +èĥ¶ åĽĬ +çIJĨ ä¼ļ +æĬ½ åĩº +空æ°Ķ è´¨éĩı +æ¯ķ 竣æĺ¯ +åĨ· æ¼ł +ä¸Ģ å¦Ĥ +ä¸Ģå¦Ĥ æĹ¢ +ä¸Ģå¦ĤæĹ¢ å¾Ģ +æĤ£ çĹħ +åĬł æĮģ +èµŀ åĬ© +é« ® +åij½ ä¸Ń +æĦıä¹ī ä¸Ĭ +ä¸į èĪį +åģļ æ¢¦ +æīĵ æī« +æĺŁ åħī +æĸŃ è£Ĥ +åħ¨ å¥Ĺ +è£ģ å®ļ +马 åħĭæĢĿ +骨 骼 +ä¸Ģ è·¯ä¸Ĭ +å®ļ æĹ¶ +å·¥ç¨ĭ æĬĢæľ¯ +å½¼ å¾Ĺ +æ±² åıĸ +ä¸Ģ è§Ī +åIJµ æŀ¶ +ä¿Ĺ ç§° +æłª æ´² +åºŁ æĹ§ +è¡Į æĺŁ +åıijçĶŁ åıĺåĮĸ +é¦ĸ ä»ĺ +åįģåĪĨ éĩįè¦ģ +æĬĬ è¿ĻäºĽ +ç¥ŀ å·ŀ +æıIJä¾Ľ åķĨ +æ¥ · +å± İ +çĬ¶ åħĥ +åŁİ å¢Ļ +çľĭ ä¸Ģçľĭ +çĶŁäº§ èĥ½åĬĽ +åŁºæľ¬ä¸Ĭ éĥ½ +æīĵ æī° +åĪĿ 次 +åĩº 示 +åħ¶ä¸Ń ä¸Ģ个 +çĶŁæĢģ ç³»ç»Ł +æīĭ æİĮ +æµİåįĹ å¸Ĥ +åľĭ åħ§ +æŃ£ å̼ +å¹¾ ä¹İ +æİ¨èįIJ éĺħ读 +è¿Ń 代 +è°ĥ ä¾ĥ +饮 åĵģ +å¢Ļ ä½ĵ +åıĺ çݰ +äºĨ 好 +äºĨ好 åĩł +ä¸į çķĻ +çĪ ² +å°½ æĹ© +æŃ£åľ¨ è¿Ľè¡Į +åĩº éĻ¢ +æĿĢ å®³ +æıIJ 款 +åıijå±ķ 空éĹ´ +åīį 身 +ä¸įæĸŃ å¢ŀ强 +æ·± å±Ĥ次 +容 纳 +éĤ£ 份 +å·¥ä½ľ æķĪçİĩ +æľ¬ åĽ½ +失 èIJ½ +æŃ£ åĽłä¸º +èĬĤ æ°´ +ä¸ĭ ä¸Ģ代 +çłĶåıij ä¸Ńå¿ĥ +ä¸į çIJĨ +å®Į 好 +ä¿ĿæĬ¤ åĮº +ç»ĵæŀĦ è°ĥæķ´ +å¥ł å®ļ +宣 ç§° +éĺ» æĮ¡ +æĴ¤ 离 +ä¸į æĸ¹ä¾¿ +åĴ ķ +ç¬ijäºĨ ç¬ij +çݯå¢ĥ 污æŁĵ +ä½ı æĪ· +ç»Ŀ ç¼ĺ +éϤ å°ĺ +é«ĺ å°ļ +æĢİä¹Ī åı¯èĥ½ +éĿ¢ èī² +åķĨ æ¥Ń +çĸ ¹ +èµĦæºIJ ä¼ĺåĬ¿ +è¾ĸåĮº åĨħ +èĢĢ çľ¼ +æij§ æ¯ģ +ä¸ĸçķĮ ç»ıæµİ +å¼ķ æĿ¥ +ä¸Ģ åĪĻ +æĭĩ æĮĩ +æĬµ 御 +éĽ į +åĩĨå¤ĩ å·¥ä½ľ +çıł ä¸īè§Ĵ +ç¨Ģ åľŁ +èİ·å¾Ĺ æĦŁ +æĪIJåĬŁ çİĩ +ç½ij 约 +ç½ij约 车 +èĦ IJ +æķ¬ ä¸ļ +éĩij ä»· +ç²¾ é«ĵ +ä¹° 车 +åħ³ åı£ +åĨį å¤ļ +æŀģ åĵģ +åIJĦ å®¶ +举æĬ¥ ç͵è¯Ŀ +èļ Ĭ +æĸ¹ å½¢ +ç§ijæĬĢ æĪIJæŀľ +æľĢ好 æĺ¯ +éĹ® åĢĻ +红 éħĴ +åĽĽ ç§į +ç¿Ĵ æħ +ç¿Ĵæħ £ +åŀ ¦ +éĤ£ åıª +é¢Ĩ æĤŁ +çľ¼ éĥ¨ +æ³° å®ī +ä»» æľŁ +磨 æįŁ +æĽ¿ æį¢ +åħ¸ 礼 +符åIJĪ æĿ¡ä»¶ +è¿ĺæľī ä»Ģä¹Ī +åħ±äº« åįķ车 +åı¯ åĪĨ为 +åŃ£ åIJİ +åŃ£åIJİ èµĽ +举èİŀ å¸Ĥ +å¿ĥ æĦı +æīŃ æĽ² +ä½ľä¸º ä¸Ģç§į +è¿Ļ éĥ¨åĪĨ +åıĤä¸İ åΰ +ç½ij çIJĥ +實 çı¾ +ç»Ħ è£ħ +åIJij å¤ĸ +å·¥ä½ľ æĸ¹æ¡Ī +åįģ æĿ¡ +課 ç¨ĭ +颤 æĬĸ +åĵ © +éĤ® å¯Ħ +äº ¢ +åħį è²» +ç§ ¤ +åºĶæĢ¥ 管çIJĨ +åĽĽ äºĶ +éºĴ éºŁ +å¾Ĵ æŃ¥ +è¨ĺ å¾Ĺ +çĴ IJ +æĺ¯åIJ¦ ä¼ļ +æĦıè§ģ åıįé¦Ī +éļ¾ æĢª +çª į +交 æİ¥ +两 åįĥ +æĩī ç͍ +æľŁ éĸĵ +æIJ¬ åΰ +è®® é¢ĺ +碧 æ¡Ĥ +碧æ¡Ĥ åĽŃ +åģļ çĶŁæĦı +éĻĽ ä¸ĭ +è· ĭ +èĢģ人 å®¶ +带 åĽŀ +æŀ¸ æĿŀ +è¡Į éķ¿ +åĨħ容 ç®Ģä»ĭ +æ¢ ¢ +æĮĩ æİ§ +éĩį çĹĩ +ç½ijåıĭ 们 +çı¾ 代 +ç±» 产åĵģ +å¥Ķ æ³¢ +æ¸ º +ç²ī ç¢İ +è¿Ļ åıªæĺ¯ +æ£Ģå¯Ł æľºåħ³ +é½ Ĭ +æĪ¿ ç§Ł +å¾· æĭī +å²ģ 以ä¸Ĭ +纯 åĩĢ +åĪĨå¸ĥ åľ¨ +èĥ½ å¾Ĺåΰ +ä¸į å°½ +ç«ŀ ä»· +çļĦ 带é¢Ĩ +çļĦ带é¢Ĩ ä¸ĭ +ä¸ŃèᝠæĿIJ +æĿij éķĩ +ä¸įåı¯ éģ¿åħį +éľ² 天 +å°ı å§ijå¨ĺ +çī© ä»¶ +èijĹä½ľ æĿĥ +æĭĺ çķĻ +éĥ½ è§īå¾Ĺ +æĽ² æĬĺ +æ·»åĬł åīĤ +åı¬ åĽŀ +æīİå®ŀ æİ¨è¿Ľ +æĬĦ è¢Ń +åĮĸ 身 +缴 èIJ¥ +ä¹Ł å¸ĮæľĽ +èį£èªī ç§°åı· +åįĸ ç»Ļ +æľī ä¸įåIJĮçļĦ +å¥ĩ çī¹ +éĥ½ 认为 +å¦ ŀ +æĪIJéķ¿ ä¸º +辩 æĬ¤ +主 æķĻç»ĥ +æ³ķå¸Ī èģĮä¸ļ +æ¤į åħ¥ +ç´¢ å°¼ +åIJ¬ è¿ĩ +ä¹łæĥ¯ äºĨ +夺 åıĸ +éŁ ĵ +æľ¬è´¨ ä¸Ĭ +æİ¥ åĬĽ +äºij 端 +è¦ģ åģļ好 +è·¯ çģ¯ +åįıåIJĮ åıijå±ķ +æľī å¾ħ +æ°´ åŁŁ +æIJľçĭIJ é¦ĸ页 +è´¨éĩı å®īåħ¨ +åįģäºĮ äºĶ +åĵ® åĸĺ +èĵ¬åĭĥ åıijå±ķ +åIJį 声 +身 亡 +çİĭ åºľ +åİŁåĪĻ ä¸Ĭ +çĥĺ å¹² +éģĹ æ¼ı +éĿ¢ 缮 +åĽ½ ä¼ļ +ä¸Ģ缴 éĥ½æĺ¯ +æľīä¸Ģ ä½į +éħį æľī +éĻª çĿĢ +ä¼ģ åĽ¾ +æĮī ä¸ĭ +èĵĿ åĽ¾ +æ© ĺ +大å¤ļ æĺ¯ +辩 论 +æĹĭ å¾ĭ +æĬ¥ éĢģ +æĿ¡ è§Ħå®ļ +åĬ¨ éĿĻ +åĮΠ奴 +æĭľ 访 +ä¸Ģ åĪĢ +ä»ĸ çŁ¥éģĵ +主 æĿĥ +ä»ĸ æĽ¾ +æĴŃ ç§į +å£ģ åŀĴ +çī¢è®° 使åij½ +åľ¨è¿Ļ æĸ¹éĿ¢ +æīĭ èħķ +æĶ¯ æŀ¶ +ä¾Ĩ èĩª +éĩį å¡ij +å¤ļ å±Ĥ次 +ä»ĭ è´¨ +éĿ¢ åŃĶ +æ½® 湿 +åİ¿ åŁŁ +游æĪı å½ĵä¸Ń +å£ ŀ +åĪĹ åĩº +èµĽ åĮº +å¤ļ åįĬ +éĩįçĤ¹ å·¥ä½ľ +æĪij们 å¿ħé¡» +æŁı æŀĹ +é²ģ èĥ½ +æĸ½ å±ķ +åIJĦ åĮº +åħį ç¨İ +èµĽ åIJİ +æľĢ éĩįè¦ģ +ä¸Ģ个 好çļĦ +è¿Ŀæ³ķ è¿Ŀè§Ħ +äºĨè§£ æĽ´å¤ļ +æķ¬ 请 +ç¬ijçĿĢ è¯´ +ä¸įæĸŃ åıijå±ķ +æijĦå½± å¸Ī +以 éĺ² +çĤ¸ å¼¹ +声 åĵį +ç¤ ģ +æĩ ¿ +èĪĨ æĥħ +èĩªçͱ è´¸æĺĵ +æķı æį· +ä¸ī大 éĺ¶æ®µ +èĭ Ķ +æĹº åŃ£ +ä¸į 满æĦı +微信 åı· +ä¿® 为 +çł´ è£Ĥ +éĢĥ 离 +æ¯ı èĤ¡ +è¾¾ ä¸įåΰ +æ¯ıå¹´ éĥ½ +çģ¯ ç¬¼ +æŃ¤ åŁºç¡Ģä¸Ĭ +åĥı 个 +åĪĨ 娩 +æĻ ¾ +ä¸į èĩ³äºİ +红 线 +误 è§£ +举 è·¯ +æ·® å®ī +产 åѦ +产åѦ çłĶ +èī¾ æ»ĭ +è»ĭ çĹħ +åīįæıIJ æĺ¯ +æ¯ı ä¸Ģ天 +ä¸ĥ 大 +æłij åı¶ +èµ° å¾Ĺ +è¿Ļ 两ç§į +æİı åĩº +æİ IJ +é¢Ĩ导 èĢħ +ä¸Ģ æľµ +个å¤ļ æľĪ +ä¸Ń åħ³ +ä¸Ńåħ³ æĿij +课åłĤ æķĻåѦ +大 åĴĸ +éģĭ ç͍ +è¯ļ æĦı +ç»Ħ åĽ¾ +è¯ķ çĿĢ +ä¹Ķ æ²» +è¿ĺ ä¸įæĺ¯ +æľī æĽ´å¥½çļĦ +åIJİ å¤ĩ +æĸ°çĶŁ åĦ¿ +æ°Ķ è¡Ģ +æ²¥ éĿĴ +å±ı éļľ +æ¥Ń åĭĻ +æĪij 以为 +éķ¿ çĽ¸ +èĢģ çΏ +éķĩ æ±Ł +æľºæ¢° 设å¤ĩ +ä½Ĩæĺ¯ å¦Ĥæŀľ +åĿļå®ļ ä¸į +åĿļå®ļä¸į ç§» +åĨ² éĶĭ +ç®Ģ缴 æĺ¯ +åĤ¨ èĵĦ +纯 ç͵åĬ¨ +漫 æŃ¥ +举 èµ· +æģ¶ æĢ§ +è¨ĺ éĮĦ +èģĮèĥ½ éĥ¨éŨ +åħ¨ éķ¿ +鼻 è¦ĸ +ä¹³ èħº +ä½ķ å¤Ħ +æ¶Ī æŀģ +æŃ£ å¤Ħäºİ +å®ī å®ģ +æĪIJ éķ· +åıĻ è¿° +æºĥ çĸ¡ +ä½Ĩ çİ°åľ¨ +女 æĺŁ +å©´ å¹¼åĦ¿ +æĬķ èŀįèµĦ +éĹ® éĹ® +æıŃ å¼Ģ +è¯ ı +åIJį å½ķ +èĺij èıĩ +åIJĬ é¡¶ +æ¹ĸ åĮº +åįĸ åľº +建 ç¯ +å»ºç¯ ī +èİ ½ +åIJ¬ åIJ¬ +ç«ŀäºī ä¼ĺåĬ¿ +åĩº ä»» +æľī 两ç§į +橱 æŁľ +è¤ ª +è¯ķ åį· +ç»ıæµİ æĬĢæľ¯ +æ·± å±Ĥ +éĩįè¦ģ åĨħ容 +é£İ æİ§ +çĬ¶æĢģ ä¸ĭ +éĥ¨ éĸĢ +广 æ±½ +è§Ĥ æij© +éģĹ çķĻ +转 è´¦ +æĮģ ä»ĵ +æĢ» 计 +åľĺ éļĬ +æĪ¿ 举 +éĺĢ éŨ +åħ¬ åħ³ +åħ³ åĪĩ +èĤ ĺ +æķ¸ æĵļ +ä¸ī åįģå¹´ +è§ģè¯ģ äºĨ +å± Ĩ +çģ° å°ĺ +æ¦ľ é¦ĸ +è¦ĨçĽĸ çİĩ +ä»Ļ 女 +çĶŁäº§ æĢ» +çĶŁäº§æĢ» å̼ +æĪ¿ è´· +æ±Ł åĮº +åħħç͵ æ¡© +çϾ åIJĪ +確 èªį +转 ç§»åΰ +éĥ½ æĹłæ³ķ +纪念 é¦Ĩ +çŃ¾ç½² äºĨ +å¹¶ä¸į å¤ļ +æĮ ł +ä¸į太 好 +ä¸ĸ 代 +误 导 +é«ĺå³° 论åĿĽ +åħ¼ 容 +龸 æ°Ķ +æĿ¥ 访 +æīĢ å¸¦æĿ¥çļĦ +æĺ¯ä¸Ģ éĥ¨ +æĻļ é¥Ń +åİĨ 代 +åIJ¦ åīĩ +ä¹ħ ä¹ħ +æľīæķĪ æľŁ +诱 åıij +æĢ» èµĦ产 +æľ¬èº« å°±æĺ¯ +çĶŁäº§ åİĤå®¶ +æĹ¶ 髦 +èĢIJ ç͍ +ä»İå°ı å°± +æĿ¡ 约 +èĭ± åĭĩ +ä¿Ĺ è¯Ŀ说 +寺 åºĻ +å¿ĥçIJĨ åģ¥åº· +ä»Ģä¹Ī äºĭæĥħ +æ±ī åŃĹ +çķĻ ä½ı +åįĹ è·¯ +ä¸ī 项 +丢 äºĨ +æĥ³ åΰäºĨ +çѹ éĽĨ +éĻĦåĬł å̼ +西 è£ħ +ä¹ĭ ä½ľ +åģļçļĦ äºĭ +çķ¶ æĤ¨ +çķ¶æĤ¨ åľ¨ +é¦ĸ 款 +ä¸įåľ¨ ä¹İ +å·¥ç¨ĭ æĸ½å·¥ +éļIJ éļIJ +åıĺ 身 +沿 éĢĶ +æĤł æĤł +ä¿Ŀ æļĸ +çĶŁæ´» åŀĥåľ¾ +渤 æµ· +æŃ¦ ä¾ł +女 主è§Ĵ +举 ä¾ĭ +æ ·¨ +çϽ é¢Ĩ +è£Ļ åŃIJ +è¿Ķ è¿ĺ +è¿Ī åĩº +é¾Ļ éŨ +ç»ıæµİ ä½ĵ +æĶ¶ å®ĺ +çķĮ éĻIJ +è·³ åĩº +åįĩ å̼ +绵 éĺ³ +çĸ¤ çĹķ +çľĭ æ¸ħ +æĭĴ çµķ +è¥Ħ éĺ³ +课 å¤ĸ +åŃIJ åŃĻ +æŃĮ è¯į +æĪIJ åIJį +溶 æ¶² +åĦĴ å®¶ +åķĨä¸ļ åĮĸ +辨 åĪ« +å¤ļ è¾¾ +ç½ij åºĹ +ä¹Ŀ 大 +ä¹Ŀ大 ç²¾ç¥ŀ +æŃ¤ 举 +è¿ŀ è½½ +ä¸Ģ åĢĭ人 +èī² æ³½ +æ¶µçĽĸ äºĨ +è¦ı åĬĥ +åĽ½ æĥħ +åį«çĶŁ åģ¥åº· +积æŀģ åĵįåºĶ +æĭ Ļ +åζ åĬ¨ +æĥ³è±¡ åĬĽ +çļĦ ä¹IJè¶£ +å¼łå®¶ çķĮ +å´ İ +éĩį åŀĭ +å¤ĸ å¢Ļ +æĶ¾ åѦ +è®¤çľŁ åŃ¦ä¹ł +è´¬ å̼ +æ³ķ æ¡Ī +æĬ¤èĤ¤ åĵģ +éĻ·åħ¥ äºĨ +请 æĤ¨ +åŀ ¢ +æķĻèĤ² èµĦæºIJ +交æĺĵ å¹³åı° +æĹ¶ è£ħ +ä¼łæŁĵ çĹħ +æ¹ĸ æ³Ĭ +èµĦ 管 +åݨ å¸Ī +éĹľ éį +éĹľéį µ +åĵĪåĵĪ åĵĪ +çĽĹ çªĥ +çĶľ ç¾İ +åºĦ åĽŃ +缮åīį å·²ç»ı +è¾¹ ä¸Ĭ +çģ« èĬ± +æĬ¥ è®°èĢħ +æģĭ æĥħ +ç´§ åĩij +æ°´ æµģ +è¿Ļæĺ¯ æĪij们 +æ³¥ åľŁ +æĽ¾ ä»» +æĸ¹ è¨Ģ +åij¨ åħŃ +åı· 楼 +ä¼ij åģĩ +误 ä¼ļ +åĽ½ åĢº +åīį å¤ķ +两 å¼ł +éĹ « +éŃĶ é¬¼ +æĬĬ æĮģ +èĬĤèĥ½ çݯä¿Ŀ +æ¸ħæ´ģ èĥ½æºIJ +èĤ¥ æĸĻ +é«ĺ é¢ij +å°± æľīäºĨ +交 ä¼ļ +没 éĴ± +éĽħ æĢĿ +è¦ģ åıĬæĹ¶ +åŁ¹åħ» åѦçĶŁ +欣 åĸľ +çĥŃæ°´ åύ +é¾Ļ æ¹ĸ +äºĮ 楼 +æĸ°æµª è´¢ç»ı +æĸ° åĬ¨èĥ½ +èµ£ å·ŀ +æĭ³ 头 +æµģ åIJij +ä¹Łæĺ¯ å¾Ī +åıij åĶ® +ä¸Ń åIJ«æľī +åIJĵ å¾Ĺ +å·¨ æĺŁ +æĹł æīĢè°ĵ +æ¯Ľ åŃĶ +åħ¬åħ± 交éĢļ +çĤİ çĥŃ +èµ· èįī +åĬłçĽŁ åķĨ +说 ä¸įåĩº +大åѦ æ¯ķä¸ļ +å·¥ä¸ļ åĽŃ +éłĺ åŁŁ +åºĨ åħ¸ +æµģ 产 +èģ² éŁ³ +ä¼¼ä¹İ æĺ¯ +è´§ æºIJ +æ·± åĪĩ +æ²»çĸĹ æĸ¹æ³ķ +èµĦæºIJ éħįç½® +ç¶² åıĭ +çĶ £ +äº ¥ +躲 åľ¨ +社 ç§ij +è»Ł é«Ķ +女 è£ħ +æŃ¡ è¿İ +综åIJĪ å®ŀåĬĽ +æł¼ å°ĩ +åħļåı² åŃ¦ä¹ł +æľĢ åŁºæľ¬ +æľĢåŁºæľ¬ çļĦ +çľĭ æľĽ +åıĹ è´¿ +ä¸įä»ħ èĥ½ +ä½ķ å¿ħ +ä¸Ģ个 å°ıæĹ¶ +ç¾ Į +æĭĽ æĶ¶ +çĤĴ èĤ¡ +æĿij å¹²éĥ¨ +缸 çα +æ½ľ èĥ½ +ä¹ į +æĹ¶ è¾° +欣 æħ° +éĵ¶ è¡Įä¸ļ +çĭŃ çªĦ +éĩįçĤ¹ é¢ĨåŁŁ +çݰå®ŀ çĶŁæ´» +éĮ¯ 誤 +æĸ° è§Ħ +滥 ç͍ +æĹ¶ ä¸į +æĹ¶ä¸į æĹ¶ +帳 èĻŁ +ç¨Ģ 缺 +åIJij 举 +ä¿Ŀåģ¥ åĵģ +çıŃ éķ¿ +äºĴ åĭķ +笼 罩 +æ½ Ľ +æļĸ å¿ĥ +è½° çĤ¸ +åºĨ 幸 +è²Į ä¼¼ +æĵ º +èĢIJ 磨 +ä¸ĵä¸ļ 人士 +ä¸Ģèά éĥ½æĺ¯ +æ¼³ å·ŀ +åħ¨ èĩªåĬ¨ +å½ķ ç͍ +大 è·Į +æľīæķĪ æĢ§ +èĩª åĭķ +ä¸ī个 æĸ¹éĿ¢ +港 åĮº +ä¿¡ 貸 +éĢļ è¯Ŀ +é«ĺ 涨 +æ³Ħ æ¼ı +éħį ä¸Ĭ +åħļ å·¥å§Ķ +被 认为 +被认为 æĺ¯ +ä¸įä¼ļ åĨį +è°ĥ åīĤ +åıĤ èĤ¡ +èĦ± åıij +å¿ł å®ŀ +åĨħ åĪĨæ³Į +ç¹ģ å¿Ļ +åıĮ åĪĽ +é©» æĿij +åĪĴ ç®Ĺ +éģİ ä¾Ĩ +åľ£ ç»ı +èıľ 鸣 +æĭ¼ å¤ļå¤ļ +ä¸ŃåĽ½ 汽车 +çĥŁ èįī +缴 æµģ +äºĨä¸Ģ åı£æ°Ķ +ä½İ æĪIJæľ¬ +æī¾ åĽŀ +èĩª åįij +總 æĺ¯ +æĸĩåĮĸ åĪĽæĦı +天 æ²³ +樱 æ¡ĥ +éªij åħµ +éĩĮéĿ¢ æľī +çİ ® +èĥ½ æī¾åΰ +éĢĥ è·ij +åĪĩ å°Ķ +åĪĩå°Ķ 西 +以ä¸ĭ æĺ¯ +å²³ éĺ³ +çļĦ æ¦Ĥçİĩ +æĬµ åζ +å¸Ī äºĭåĬ¡ +å¸ĪäºĭåĬ¡ æīĢ +åĩĨ æĹ¶ +屬 æĸ¼ +订 è´Ń +åįłæį® äºĨ +ä¸Ń éĢĶ +å° ĭ +é»ij 马 +åİ¿ åħ¬å®īå±Ģ +ä¸ĥ æľĪ +èī² ç´ł +å¿ĥèĦı çĹħ +æĹ¶ éĻIJ +æ¯į åħ¬åı¸ +å¹ķ åIJİ +ä¸Ĭ æ¦ľ +å̾åIJij äºİ +纸 ä¸Ĭ +æ¡ ĵ +éĽĨä½ĵ ç»ıæµİ +æĥħ å¢ĥ +è¦ģ åģļåΰ +ç©į 極 +åıª æĢķ +æ¹ĺ 西 +çļ± çº¹ +åħ¨ åľĭ +çĦ¡ è«ĸ +好 æĦŁ +åįķ ä»· +è¿Ľç¨ĭ ä¸Ń +æĺĨ ä»ij +åĪĽ 客 +åħħ æĸ¥ +åħĪ æĬĬ +该 æĢİä¹ĪåĬŀ +åĵģ å¾· +åħ¨éĿ¢ åıijå±ķ +è¨Ī åĬĥ +æĢ» å·¥ä¼ļ +ä½Ľå±± å¸Ĥ +æĬĹ è¡¡ +å¼Ģ åľº +éĴ± å¸ģ +åıĭ 们 +å«ī å¦Ĵ +ç´¢ èµĶ +è®Ĭ åĮĸ +æĮ¤ åİĭ +æĮij è¡ħ +çŃī ä¸Ģæī¹ +æĿ¨ 欢 +ä¸ĵå®¶ åѦèĢħ +èĥ½ è¾¾åΰ +èµ° è¿ij +è´«åĽ° åľ°åĮº +éĻIJ æľŁ +ä¸į 平衡 +åĽ½åĨħ å¸Ĥåľº +èµĽ åľº +éħį èµĦ +è¦ģ èĢĥèĻij +ä¸ĩ åı° +æľĪ æľ« +éĶ ¥ +åŃ « +æİ¥è§¦ åΰ +åĩº 产 +æķĻ åѸ +ä½ľ å¼Ĭ +çļĦ æľĢåIJİä¸Ģ +ä¿ĥ æĪIJ +åIJ¸ åıĸ +æ½ľ èīĩ +被 éªĹ +è¾ĵ äºĨ +çĭIJ çĭ¸ +åįĩ éĻį +è¿ĻäºĽ ä¸ľè¥¿ +æĬķèµĦ åŁºéĩij +çĶŁçī© åѦ +ç½ij绾 èIJ¥éĶĢ +åIJij è®°èĢħ +èįī åľ° +æĢ ¯ +æľįåĬ¡ èĥ½åĬĽ +éĥģ éĹ· +åįķ åĵģ +å¾Ĺ 罪 +æĺĵ äºİ +个å¤ļ å°ıæĹ¶ +éĩį ä»» +ä¸Ĭ å®ĺ +æľ¬ éĩij +çı¾ åł´ +溢 ä»· +æĺŁ è¾° +æ´»åĬ¨ çİ°åľº +丹 麦 +å¸Ŀ çİĭ +æŁ¥ æĺİ +åŃĺåľ¨ äºİ +é¦Ļ æ°´ +æĬ½ æ£Ģ +å®ŀéĻħä¸Ĭ æĺ¯ +æĸ° å¾ģç¨ĭ +è´¢åĬ¡ 管çIJĨ +æİ Ľ +åĨľ åİĨ +éĥ½ èĥ½å¤Ł +éĤ¯ éĥ¸ +羣 實 +ç» Ĭ +åĨµ ä¸Ķ +ç½® 身 +ç¥Ī 祷 +çĿģ å¼Ģ +æĮĩ çĤ¹ +å¼Ģ æľº +西 å®ģ +åĮĹ çº¦ +积 æ°´ +åĩº åĬ¨ +åıijå±ķ 模å¼ı +转 æĬĺ +èĢĥ çĤ¹ +æľī ç½ijåıĭ +è´«åĽ° æĿij +æĪij们 çŁ¥éģĵ +åĪĨ éĶĢ +å±± èĦī +æ¯Ķ æĭŁ +ä¼° ç®Ĺ +æĶ¹ 建 +壮 è§Ĥ +ç§ī æĮģ +æı ª +ç¦ Ģ +åĮĸåѦ åĵģ +ä¸ŃåĽ½ åζéĢł +ä¸Ģ æŀ¶ +æīį è¡Į +æĭĽ å¾ħ +åıĺ æį¢ +åīį 线 +幸 好 +è¿Ļæł· çļĦè¯Ŀ +å¿ĥ è¡Ģ管 +æĢ§ çĸ¾çĹħ +åħ¨ èĥ½ +åĪij 侦 +ä¿¡æģ¯ åıijå¸ĥ +æĺ¾ çĦ¶æĺ¯ +éĿĴ éĵľ +åIJĥ ä»Ģä¹Ī +ç͵ ä»· +æ³ķå¾ĭ è§Ħå®ļ +çħ ² +çĵ· åύ +èĤī ç±» +æıĴ åħ¥ +åĹ ľ +è¿Ł è¿Ł +ä¸ĢçĤ¹ éĥ½ä¸į +è¿ĺ åĮħæĭ¬ +èĪį ä¸įå¾Ĺ +æłĩå¿Ĺ æĢ§ +æľĪ 以æĿ¥ +ç³ĸ æŀľ +éĥ½ åºĶ该 +çݯå¢ĥ åį«çĶŁ +èĪª è¡Į +éĥij éĩį +ç½ij æĬķ +åįģ ä½³ +ç§ģ ä¸ĭ +æļ´ è·Į +åĬłå¿« åıijå±ķ +产åĵģ çłĶåıij +åĪĽéĢł åĩº +æĢ» è§īå¾Ĺ +åºķ çĽĺ +èķ Ĭ +åĩºå¸Ń ä¼ļè®® +主 æĿ¿ +æĹ¥æĻļ éĹ´ +å®ĺæĸ¹ å¾®åįļ +å¼ķç͍ æĹ¥æľŁ +åī¯ æķĻæİĪ +ç͵åŃIJ 产åĵģ +è¡° éĢĢ +çķĻ åŃĺ +çģ« åĬĽ +çĴ § +çļ Ĥ +åħ¼ åħ· +éĩį è¿Ķ +é¢Ĩ çķ¥ +åĪĩ éϤ +åĨįçĶŁ èĥ½æºIJ +å®ŀåľ¨ 太 +çIJĨ论 ä¸Ĭ +ä¸ī å±Ĥ +ä¸ĸçķĮ åIJĦåĽ½ +å®ľ æĺĮ +è̳ è¾¹ +宽 æķŀ +æ±ī æĹı +çϽ çϽ +è¿ĻéĩĮ éĿ¢ +çĶŁæ´» ä¹łæĥ¯ +èµŀ èµı +çĶ· 士 +ä¸Ń ä¿Ħ +车 祸 +åīĤ éĩı +éϤ åİ» +å·¦ è¾¹ +çŃij çī¢ +çīĽ å¸Ĥ +å®¶ åĬ¡ +åķ ĥ +ç½® æį¢ +ç´« å¤ĸ +ç´«å¤ĸ 线 +å¾Ģ åīį +åĬĽ åѦ +ç´§ è·Ł +缮çļĦ åľ¨äºİ +ç» ® +ç¥ Ĥ +宣 è¨Ģ +äºĮ æ°§åĮĸ +äºĮæ°§åĮĸ 碳 +æĹł ç¼ĺ +ç²¾ éĢļ +è¨ º +å¼ķåıij äºĨ +æľĢ åħĪ +æ´¾ é©» +ä¸į å¿į +æĪij çΏ +å¹´ ä¸ĭåįĬå¹´ +æ·ĭ å·´ +没 éĹ®é¢ĺ +åºĹ åĨħ +è·Ł æĪij说 +çĶŁäº§ çĶŁæ´» +è§Ĥ æľĽ +æ¸ į +被 æī§è¡Į +被æī§è¡Į 人 +èĪ ľ +æİ º +ä¸Ģ ç§Ĵ +èįī åĿª +åij¼ åĴĮ +åij¼åĴĮ 浩 +åij¼åĴĮ浩 çī¹ +人æ°ij éĵ¶è¡Į +çĦķ åıij +è¯ģåΏ 交æĺĵ +çķ Ķ +æľº èĥ½ +å¦ ¾ +æĻļ å¹´ +å·¥åķĨ èģĶ +åİŁ åŀĭ +è§Ĵ度 çľĭ +æĬ¥ 社 +è¯į æĿ¡ +躲 éģ¿ +éĩį åIJ¯ +å¤ķ éĺ³ +èĤ¡æĿĥ 转让 +åľ¨ ä¸Ģ +åľ¨ä¸Ģ æĹģ +社ä¼ļ åĮĸ +åıijå±ķ åİĨç¨ĭ +æĭĸ æ¬ł +使 èĢħ +ä¸İ åIJ¦ +æĸ° å±ĢéĿ¢ +ä»Ĭ天 æĪij们 +é½IJ èģļ +对 æĪij说 +éĢĴ 交 +æľª æĽ¾ +èİ Ĭ +éĸ ī +亲 æīĭ +è§Ĵ éĢIJ +æľī é»ŀ +ç¨İ çİĩ +ä½İ 声 +é»ĺ å¥ij +æĻ® æ³ķ +大 ä¸ĵ +第äºĮ 大 +ä½ı åĿĢ +æĶ¾ è¿Ľ +äºĮ æĪĺ +亲 身 +åĽº åĮĸ +ä¸ĭ 乡 +åħ³éĶ® æĬĢæľ¯ +åĽŀ æĥ³ +æĬ¥ åĪĬ +æ¶Ĥ æĬ¹ +èĹı çĿĢ +ç¥Ŀ æĦ¿ +åįĩ 温 +çĶļèĩ³ è¿ŀ +åħ¬åħĥ åīį +ç¾İ æĸ¹ +è¯ļ å®ŀ +æĹł åģ¿ +åīµ æ¥Ń +å°ıå¿ĥ 翼 +å°ıå¿ĥ翼 翼 +两 æīĭ +温馨 æıIJ示 +仿 羣 +æĥ ¶ +èĥ¡ åŃIJ +å·¥ä½ľ ç«Ļ +硬 çĽĺ +ç« ¿ +åĤ³ éĢģ +åħ¨ æł¡ +é²ľ æ´» +çĴĢ çĴ¨ +ç»ĵ å°¾ +æį¢ æĿ¥ +æĪ Ģ +ä½İ ä½į +ä¸ĩåħĥ 以ä¸Ĭ +åĬł åĪĨ +æİ¨ä»ĭ ä¼ļ +çIJĨ èµĶ +å¾· å°Ķ +æĬĹ è®® +æ´ ¼ +åĸ § +åŁİ éĻħ +å¾Ī æ£Ĵ +人 æŃ»äº¡ +ä¼ļå±ķ ä¸Ńå¿ĥ +äºĴèģĶ äºĴéĢļ +èĸĦ èĨľ +éĩį é»ŀ +ç¦ģ æ¯Ĵ +åĨ· ç¬ij +大家 åı¯ä»¥ +é¦ĸ 缸 +è¿ij è·Ŀ离 +æµ® çݰ +ç§ĺ è¯Ģ +èµ· é£ŀ +æIJ ¶ +羣 åģĩ +æģ ķ +å°ı åºĹ +æ°ij çľ¾ +åıijå¸ĥ åħ¬åijĬ +ä¾§ éĩį +å¾ĺ å¾Ĭ +æĢ Ķ +æª IJ +æķ° 缮 +åī¯ ç§ĺ书éķ¿ +两 åı¥ +éļIJ çŀĴ +åıĮ åıĮ +æīĭ æĦŁ +èij¡ 京 +éģĹ å¿ĺ +é¬ ¥ +è¿Ļ个 åľ°æĸ¹ +说 çļĦè¯Ŀ +å·¡ åĽŀ +è¿Ŀ 竳 +æī¾ å·¥ä½ľ +æĶ¯ çIJĥéĺŁ +裡 éĿ¢ +æĺ¾ç¤º åĩº +èĩ³ å°Ĭ +两 级 +åīį æ®µæĹ¶éĹ´ +çĺ¦ èº« +èĤ¢ ä½ĵ +æ¯į 親 +æīĭç»Ń è´¹ +汽车 è¡Įä¸ļ +æİ© çĽĸ +æİ§èĤ¡ éĽĨåĽ¢ +åı£ å¾Ħ +æĶ¿çŃĸ æİªæĸ½ +æµ· 绵 +åħ¨ éķĩ +äºĭ åħ³ +å¸Ń æī§è¡Į +å¸Ńæī§è¡Į å®ĺ +éĤ£ 次 +åı¯èĥ½ åĩºçݰ +ä¸Ńå¿ĥ åŁİå¸Ĥ +ç¿» 身 +ä¹Ł ç®Ĺ +ä¾µ çķ¥ +åĸĩ åıŃ +æ¯ı次 éĥ½ +è§ ħ +éĻ¢ éĻ¢éķ¿ +å§ĭ äºİ +èѦ åĬ¡ +èᝠæĿIJ +å±ł æĿĢ +æľ¬èº« å°± +éļıæĹ¶ éļı +éļıæĹ¶éļı åľ° +åĶ® åįĸ +æĹłäºº 驾驶 +é¢ ħ +åĵģ 質 +åĺ² ç¬ij +è·ij åİ» +åħĭ éĩĮæĸ¯ +çķ¸ å½¢ +ä¿® 饰 +磩 éĺµ +éŁ³ä¹IJ ä¼ļ +æŁ³ å·ŀ +é½ ¡ +ä¼ļ è°Ī +æŃ£ çīĪ +ä¹Ł åIJĮæł· +æļ§ æĺ§ +è¡ĮæĶ¿ éĥ¨éŨ +ä¹ĸ ä¹ĸ +èĤ¤ èī² +æĹ¶ ä»» +羣 åĪĩ +æľĪ ä¸ĭ +æľĪä¸ĭ æĹ¬ +举æĸ¹ è´¢å¯Į +è£ħä¿® åħ¬åı¸ +éĢĢ è¿ĺ +åĭĺ å¯Ł +åĵ¥ 伦 +åĵ¥ä¼¦ æ¯Ķäºļ +çĭ¬ ä¸Ģ +çĭ¬ä¸Ģ æĹł +çĭ¬ä¸ĢæĹł äºĮ +è°ĥ åij³ +åİĭ è¿« +åħ¨çIJĥ æľĢ大 +åī¯ æł¡éķ¿ +æĽ´ ä½İ +åĪĨéĴŁ åIJİ +åĽŀ ä¾Ĩ +åζ åīĤ +åijĬè¯ī 大家 +çĤ¹ éĴŁ +åįģä¸ī å±Ĭ +åij¨ åĽĽ +è¿Ļæł· ä¸Ģ +è¿Ļæł·ä¸Ģ æĿ¥ +èĭ Ł +æľĽ åİ» +æĪIJ è¯Ń +å½ĵ åį³ +ç¬ij 声 +ä¹ĭ åĬ¿ +åĪijäºĭ æ¡Īä»¶ +æĮĤ çĿĢ +ä½ķ ç§į +å°ı 游æĪı +åĽ½å®¶ æĪĺçķ¥ +åĨ· åĨ· +å®ľ 宾 +æIJº ç¨ĭ +è¶ĭ äºİ +åıį çľģ +常 说 +ä¸ĩ æĪ· +åĥµ å°¸ +åįĥä¸ĩ åĪ« +åıijçݰ éĹ®é¢ĺ +åı¯ çŁ¥ +éŨæĪ· ç½ijç«Ļ +åģ¥åº· 产ä¸ļ +åı³ è¾¹ +æµ· è¿IJ +è¿ij ä¹İ +åĮ» æ²» +æĢ» ç®Ĺ +ä¸Ģ åĪĨéĴŁ +æĭ § +ä¹Ł æľīä¸ĢäºĽ +ä¾Ľç͵ åħ¬åı¸ +å»ī ä»· +帮 ä»ĸ +æŃ¤æ¬¡ æ´»åĬ¨ +åıªèĥ½ 说 +èĬ ĭ +çīĩ 段 +åŃĺåľ¨ éĹ®é¢ĺ +ä½łä¼ļ åıijçݰ +è½® å»ĵ +ç½ij éĢļ +滨 æ±Ł +æİĪ ä¿¡ +é»İ æĺİ +ä¸į å±ŀäºİ +约 åįł +éķ¿æ²Ļ å¸Ĥ +èĥļ èĥİ +åħĥ ä»¶ +éĻĨ åĨĽ +è³¼ è²· +æĮĩ æľĽ +å®ŀä¹ł çĶŁ +çī¹çĤ¹ æĺ¯ +çıł æ±Ł +çľĭ ä¸įåĩº +ä¸įè§ģ äºĨ +ç¼ ī +éĺµ èIJ¥ +åĶIJ æľĿ +没 å¿ħè¦ģ +åĽ½åľŁ èµĦæºIJ +ç»ıæµİåѦ å®¶ +åIJĪèĤ¥ å¸Ĥ +çIJ¢ 磨 +ç¡® åĪĩ +åŁİå¸Ĥ åıijå±ķ +çŃ· åŃIJ +人æ°ij æľįåĬ¡ +满 åĪĨ +è¿· ä¿¡ +ä½ľèĢħ æľ¬äºº +æĸĩ竳 æĿ¥æºIJ +ç«Ļ ç«ĭ +æŀĦ æĪIJäºĨ +è¾Ľ åĭ¤ +è¶ħ 强 +éĶ ļ +åīįä¸ī åŃ£åº¦ +å°± è§īå¾Ĺ +å´ĩ é«ĺ +è¶Ĭ ä¾Ĩ +è¶Ĭä¾Ĩ è¶Ĭ +å¸Ĥåľº èIJ¥éĶĢ +综åIJĪ ç´łè´¨ +åŃ ļ +ä¾® è¾± +äºĮ åŃĹ +å·¥ä½ľ ä»»åĬ¡ +åı²ä¸Ĭ æľĢ +æľĢ ä¼ĺ +åIJ© åĴIJ +表 çϽ +èİ« åIJį +èİ«åIJį åħ¶ +èİ«åIJįåħ¶ å¦Ļ +å¹ £ +åIJĮå¿Ĺ 们 +建设 çĶ¨åľ° +åĦ Ģ +éħį åģ¶ +å¼ © +åͱ çīĩ +æīĭ èĦļ +åħ¼ ä»» +åģľ æĶ¾ +æŃ£ å®Ĺ +æĸ° åĨľæĿij +åĤ¬ çĶŁ +æīĢ åŃ¦æł¡ +念 ä½Ľ +åͤ éĨĴ +åħ± åĪĽ +æĭī ä¸ģ +èĥĮ çĿĢ +çĶŁæĢģ ä¿ĿæĬ¤ +åı£ 头 +æĸ¹åIJij çĽĺ +調 æķ´ +æĭĽèģĺ ä¿¡æģ¯ +åħ¶ä»ĸ åĽ½å®¶ +ç®Ģ æĺĵ +åĮ¿ åIJį +è¯Ħ æµĭ +æĺ¯ä¸Ģ 座 +çīµ æīĭ +è¶³ 迹 +çIJĨè§£ åĴĮ +æľĢ åıĹ +å¿ĥ è·³ +çζ 親 +éĿŀ常 åĸľæ¬¢ +èĭ¦ éļ¾ +æĬĢ å¸Ī +æ°ij æĦı +æĪĺ åĽ½ +æĽ¿ è¡¥ +æ´¥ è´´ +ä¸ŃåĽ½ ä¼łç»Ł +åIJĦ è¡Į +åIJĦè¡Į åIJĦ +åIJĦè¡ĮåIJĦ ä¸ļ +第äºĶ å±Ĭ +èį· èĬ± +æĦı èŃĺ +票 ä»· +åĪĨ æµģ +æĿİ çϽ +æ±Ł åĮĹ +æİĴ æĸ¥ +ä½ĵ éĩı +åĮħåIJ« äºĨ +åĪĺ æŁIJ +çݰ å¦Ĥä»Ĭ +å·¥èīº åĵģ +è¿Ļç§į æĸ¹æ³ķ +åĬŀåħ¬ 楼 +ç͵ å·¥ +çħ Ļ +åį¡ çīĩ +å¹´ å¹´åºķ +ä¸ĵ项 èµĦéĩij +åĮ» ç§ij +åĮ»ç§ij 大åѦ +åĽŀ头 çľĭ +ä¸į å±ij +èĩª 驾 +没 æĶ¶ +æīĵ çĮİ +èĦ¸ éĥ¨ +åıĥ èĢĥ +å°Ĩ 士 +è´«åĽ° 人åı£ +çIJĨæĥ³ 信念 +é£İ å°ļ +人æīį éĺŁä¼į +çij ¾ +æĿ¥ è¿ĻéĩĮ +æ´Ĺ 涤 +å¹´ èĸª +èĭį çϽ +ä¸ĩ äºĭ +课 æľ¬ +åºĵ éĩĮ +çī¹ æ´¾ +ç´¾ åijĺ +èµŀ ç¾İ +ç©¿ æĪ´ +製 ä½ľ +èµŀ æĪIJ +ä¸Ģ ä¾§ +å½ĵåľ° 人 +æĭ İ +纸 è´¨ +ä½Ļ 个 +éĶĤ çĶµæ±ł +æľº åŀĭ +éĻ¢ éϢ士 +åģļ å·¥ +å¼ł è´´ +ç¥Ľ æĸij +æ®ĸ æ°ij +å¥ij 约 +æ¹ĺ æ½Ń +æIJ ĸ +åŃĺ è´§ +交éĢļ 大åѦ +è¶ģ çĿĢ +æĸĩçī© ä¿ĿæĬ¤ +å¤ĩ æĪĺ +éĩĩ 纳 +åįĬ æľĪ +æľĢ åħ³éĶ® +æľĢåħ³éĶ® çļĦ +æİ¥ éĢģ +æĶ¶ åī² +åıį åĢĴ +çĥ Ľ +æ ½Ķ +ä¼Łå¤§ å¤įåħ´ +çļĦè¯Ŀ è¯Ń +容 å¿į +å®ļ éĩı +æķ Ĺ +åĵģçīĮ 形象 +æīŃ è½¬ +åĽ½å®¶ éĩįçĤ¹ +èĨĿ çĽĸ +ä¸Ģ 楼 +大 éϏ +éĤª æģ¶ +åĽŀ åij³ +çĮ ¿ +çĿ¡ åīį +æĹł è¾ľ +çĹħæ¯Ĵ æĦŁæŁĵ +æľºæ¢° åĮĸ +çĤ¹ 亮 +溶 è§£ +åĩłä¹İ æīĢæľī +è·ij éģĵ +ç͵è§Ĩ æľº +åı ¨ +æijĩ äºĨ +æijĩäºĨ æijĩ头 +èĩª è´Ł +综åIJĪ åĪ©ç͍ +èĩª å¦Ĥ +åİŁ ä¾Ĩ +ä¹Łä¸į æĥ³ +èĬĤ 课 +è¿ĩ åī© +çͲ çĬ¶ +çͲçĬ¶ èħº +æĸ° ä¸ĸ纪 +èĩªä¸» åĵģçīĮ +é«ĺ å±Ĥ次 +ä¸Ģ è§Ĵ +è¡Į äºĭ +ç¥ĸ åħĪ +å©ļ åIJİ +éĹ´ éļĻ +ç¼Ŀ éļĻ +è¿Ļ æĶ¯ +ä¸įæĸŃ åĪĽæĸ° +å¾® åŀĭ +æĽĻ åħī +享 ç͍ +ä¸ŃåĽ½ ç§»åĬ¨ +éĹŃ çݯ +æī§ æĦı +åıijå±ķ æł¼å±Ģ +æł¸å¿ĥ åĮº +éªļ æī° +åħļåĴĮ åĽ½å®¶ +ä¸ŃåĽ½ æĶ¿åºľ +帶 èijĹ +ä¸ĩåįĥ çĵ¦ +åħ© 人 +äºİæĺ¯ æĪij +åĽº ä½ĵ +çªģ å¦Ĥ +çªģå¦Ĥ åħ¶ +çªģå¦Ĥåħ¶ æĿ¥ +éĩĮç¨ĭ ç¢ij +çα ç¾İ +æŁ¥ éªĮ +åıĮ èµ¢ +éĹª åħī +楼 å®ĩ +æĻ ı +æľī è¶³å¤ŁçļĦ +æŁĶ æĢ§ +ä¿¡æģ¯ å®īåħ¨ +管 线 +å¹¶ ä¸įä¼ļ +åύ ä»¶ +ä½ł åºĶ该 +çĿĢ å®ŀ +æĺİ æ¸ħ +æĬĹ çĶŁç´ł +æīĵ æŃ» +å®Įåħ¨ ä¸įåIJĮ +èĬ± æ¤Ĵ +æĶ¾ 宽 +ä½İ 端 +åĽĽ èĤ¢ +åĮĹ京 èµĽè½¦ +éĽĨ å¸Ĥ +æľª å©ļ +大å¹ħ æıIJåįĩ +建çŃij 设计 +çĭ¬ æľīçļĦ +æİ¢ éĻ© +æ²³æµģ åŁŁ +æħķ 容 +被 çĽĹ +åĵº ä¹³ +èı ģ +æĥ¬ æĦı +è¶ĬæĿ¥è¶Ĭ 好 +广大 群ä¼Ĺ +å¾· èĤ² +å¸Ĥåľº ä»·æł¼ +奥 å·´ +奥巴 马 +èĬĤ缮 ä¸Ń +两 款 +ä¸ĩä½Ļ åħĥ +ç»´ å°Ķ +çĶŁçī© ç§ijæĬĢ +åIJ¬ èµ·æĿ¥ +çł ļ +æĭŁ å®ļ +æ²¹ çͰ +声 èªī +建çŃij ä¸ļ +éĻIJ è´Ń +çīĩ åŃIJ +çķľ ç¦½ +ç½ij é¦ĸ页 +ä¼Ĺ çѹ +æĴŀ åĩ» +åīį ä¸įä¹ħ +åīį ä¸ĸ +åĽĽä¸ª æĦıè¯Ĩ +æµĭ ç»ĺ +éĺ² ç©º +漫éķ¿ çļĦ +æ²IJ æµ´ +æ¯Ķè¾ĥ ç®Ģåįķ +æµĭ å®ļ +åĽŀ è°ĥ +让 人们 +èĴĭ ä»ĭ +èĴĭä»ĭ çŁ³ +ç»ĵ æĻ¶ +å¢ŀæ·» äºĨ +æĿ¡ è¯Ħ论 +åī¯ ä¼ļéķ¿ +ä½ı æīĢ +ç»Ļ åĩºäºĨ +è°ĥ éħį +æ² ĸ +æľī ç͍ +æľīç͍ çļĦ +ä¸ĢæĿ¡ é¾Ļ +éĩİ å¤ĸ +ç¼ĺ åĪĨ +æ°¸è¿ľ ä¸įä¼ļ +æŀľ æłij +大åıij å¿«ä¸ī +麻 éĨī +äºij éĽĨ +åİ» åĵªéĩĮ +åħ¥ å¸Ĥ +ä»» æĢ§ +建 æ¡£ +建档 ç«ĭ +建档ç«ĭ åį¡ +ä¸Ģ 棵 +社 åįĢ +缸 ä¼´ +åļ · +å¡« åħħ +ä¸Ģ æĹı +ç¾ ģ +åıĸ è¯ģ +èΰ éĺŁ +åİĤ åĮº +è¡· å¿ĥ +åıijå±ķ éĺ¶æ®µ +é«ĺ 强度 +åĹĵ åŃIJ +é¢Ĩ è¡Ķ +楼 主 +大 èĴľ +æŀķ 头 +ç²® æ²¹ +é»Ħ çĵľ +æĵ Ĵ +å°ı çĭĹ +æĶ¹éĿ© å§Ķ +åįģ åĪĨéĴŁ +é²ľ èī³ +åħ³ ç¾½ +çĭĢ æħĭ +å®ŀç͍ æĢ§ +å°ij è§ģ +é£ŀ æī¬ +çͰ éĩİ +æIJ Ĥ +è¿Ļ个 è¯į +åºĶæĢ¥ é¢Ħæ¡Ī +è§Ĵ度 æĿ¥çľĭ +æķ¬ çķı +æ³ķ å®Ŀ +åĸĦ æĦı +æīĵ æĸŃ +对 åĨ³ +çµķ å°į +åĢŁ æŃ¤ +å¼Ģ æºIJ +å°ı 說 +ç¥ º +å²ģ 以ä¸ĭ +éĢĢå½¹ åĨĽäºº +ä¸įä¹ħ åīį +åĩº åİĤ +讽 åĪº +æĿ¥çľĭçľĭ åIJ§ +éŃĶ åħ½ +çķĻ ä¸ĭæĿ¥ +å±ħ 室 +åłħ æĮģ +çľĭ äºĨä¸Ģ +çľĭäºĨä¸Ģ çľ¼ +éĽĨåĽ¢ æĹĹä¸ĭ +æĪĺ æĪĺç»ĦåIJĪ +è®¤çľŁ èIJ½å®ŀ +汽车 产ä¸ļ +çī©çIJĨ åѦ +æķ µ +éĴ Ŀ +åĽ¢ éķ¿ +ä¸įæĸŃ æī©å¤§ +èĤ© è´Ł +åıijå±ķ 缮æłĩ +è³ĩ éĩij +åīį ç½® +ä¸ŃåĽ½ åı¤ä»£ +æŃ» åĪij +åħħåĪĨ ä½ĵçݰ +åħ³ éŨ +ç¾İ æĦŁ +æīĵ åħ¥ +æĬijéĥģ çĹĩ +å°ij çĪ· +æłij æŀĿ +æ¶Īæģ¯ ç§° +æ´Ľ åħĭ +åį ¯ +è¿Ī åIJij +æİ¨ åĭķ +ä»İä¸ļ èĢħ +åİ» ä¹° +欢 å¿« +æĭ¥ æĮ¤ +马 æ¡¶ +æĬĬ æİ§ +æĶ¿ åħļ +å¼ł æī¬ +客 æłĪ +红 æĺŁ +éĢģ æĿ¥ +åħ¨åŁŁ æĹħ游 +èĩª ç§ģ +åįģäºĮ æĿ¡ +åı¹ æģ¯ +ä¸Ģ èīĺ +ä¿Ŀ è´¹ +æĸ½å·¥ çİ°åľº +æľī 幸 +ç»Ń èĪª +åı¯èĥ½ æľĥ +èĥĮ åıĽ +ä½£ éĩij +ä¸ī çŃīå¥ĸ +å¾Ī 满æĦı +游æĪı åľ¬ +群 éĩĮ +æŀĦ ä»¶ +åºı å¹ķ +太 æ¹ĸ +æľ¨ è´¨ +æĻĭ æ±Ł +çµĤ æĸ¼ +è·³ è·ĥ +åĢºæĿĥ 人 +çŃī 诸å¤ļ +æĶ¾ åĩº +åħ³éĶ® æĹ¶åĪ» +æĦŁæŁĵ èĢħ +é£ŀè¡Į åijĺ +èĥĨ åĽº +èĥĨåĽº éĨĩ +æĬ± æŃī +åij¨ äºĮ +æĸ° æĹ¶æľŁ +åĨ·éĵ¾ çµģ +è¿Ļç§į æĸ¹å¼ı +该 æĿij +åĽŀ é¦Ī +åŁºçĿ£ æķĻ +人 åıĤ +æŀ¯ çĩ¥ +æī¹åıij å¸Ĥåľº +åħħåĪĨ èĤ¯å®ļ +å¸Ĥ æĶ¿åįı +äºĭ æ¥Ń +龸 çİĭ +çĥŃ æIJľ +åįģä¹Ŀ 大 +ä¼´ æľī +ç¾İåĽ½ æĢ»ç»Ł +åŁİå¸Ĥ 管çIJĨ +ä¸ĭ 令 +èĥ¸ åı£ +åıª çŁ¥éģĵ +åij¨ ä¸ī +ç͍ æĪ¶ +éŃ ¯ +å¿ĥ è¡Ģ +带头 人 +åĮ» åĬ¡ +åĮ»åĬ¡ 人åijĺ +æİ§åζ åύ +ä½ľåĵģ åĨħ容 +æĪĺ åıĭ +åİĨ å¹´ +ä¸į åħĭ +ä¸įåħĭ ä¸įåıĬ +æĹ¥ æŃ£å¼ı +è±IJ å¯Į +ç¨İ è´¹ +æĹ¶ æķĪ +å±ķ ä½į +è¡¡ éĺ³ +æĪ¿ 貸 +çĪĨ 款 +ä¹IJ æĦı +çĶ· 主 +å¯ ¬ +æľĥ èѰ +ä¹ĭ å¤ľ +åIJĮ 樣 +ä¸įè¦ģ 太 +ä¼Ĭ æĸ¯ +ä¼Ĭæĸ¯ åħ° +åŁºæľ¬ åİŁåĪĻ +åİ» æİī +ä½İ ä¿Ŀ +个 交æĺĵ +个交æĺĵ æĹ¥ +èģĬ èģĬ +åĽĽ ä½į +åħļç»Ħ æĪIJåijĺ +主è¦ģ ä»İäºĭ +å½± éŁ³ +åĨĴ åĩº +åij¼åIJ¸ éģĵ +è¾¾ å°Ķ +æľ¨ åľ°æĿ¿ +诡 å¼Ĥ +çģ¯ åħ· +çģ« çĥ§ +è§£ èĦ± +æĦĪ åıij +æ¹ĸ å·ŀ +é£İ ä¿Ĺ +æĸ° å½¢åĬ¿ +æĸ°å½¢åĬ¿ ä¸ĭ +è² Ŀ +èĦ ĵ +åĬ¨åĬĽ çĶµæ±ł +é£ŀ èι +飧 æĢ§ +åĪ© çī© +åĪ©çī© æµ¦ +ä¸į 认è¯Ĩ +ç¼ĸ ç»ĩ +ä½ľ åĿĬ +èģĮä¸ļ æĬĢèĥ½ +çľĭ è¦ĭ +åĽ´ æ£ĭ +æĺı è¿· +å½Ĵ å±ŀäºİ +æĤ¬ å´ĸ +éĨ« çĻĤ +å®ĭ 代 +åºĦ æĿij +èĹ ķ +çĮĽ çĦ¶ +çĩĥæĸĻ çĶµæ±ł +å®ŀä½ĵ åºĹ +ä¸įè¶³ 以 +æĥħ ç· +æĥħç· Ĵ +å»Ĭ åĿĬ +ç͵ åı° +åºĶ åĬĽ +ä¸Ńå°ı åѦçĶŁ +èĥ¡ åIJĮ +éī´ åĪ« +åĨħ ç½® +ä¹± 象 +æ¬Ĭ çĽĬ +å¼ĢæĶ¾ å¼ı +åįļ æĸĩ +讲 课 +çŃī åİŁåĽł +ç©· 人 +交 æĽ¿ +æĬ¤ çħ§ +åıijå±ķ æľºéģĩ +客 åķĨ +åıį ä¹ĭ +ç±³ é¥Ń +å¹¶ åıij +å¹¶åıij çĹĩ +æ±ī åŃIJ +æŀľ åĽŃ +对æĪij æĿ¥è¯´ +åģı åIJij +æī¹ 示 +读 åIJİ +读åIJİ æĦŁ +æĺİ æĻº +åĽ´ çĿĢ +åıį 转 +æĿ¨ å¹Ĥ +ä¸ĵ åįĸ +ä¸ĵåįĸ åºĹ +åıĹ éĻIJ +åºŁ è¯Ŀ +æŀģ å°ij +åįĪ åIJİ +è¿Ľ ä¿® +åīĬ åĩı +æľ¬ç§ij çĶŁ +ä¼ĺ éĢī +åħī çħ§ +åıĻ äºĭ +åıĸ æļĸ +åĮĹ è·¯ +æ¦ ķ +èİĨ çͰ +楼 å±Ĥ +天 èĬ± +天èĬ± æĿ¿ +çĤ ľ +å·²ç»ı æľīäºĨ +è¶ ¾ +çͳ åįļ +ç͵ éĺ» +åĬŁ è¯¾ +æŃ¥ æŃ¥ +éĤ£ä¹Ī 容æĺĵ +æŃ¤ æĸĩ +ä½ ° +计 è¾ĥ +çīĩ éĿ¢ +ç͵影 éĻ¢ +ä¸į åħ¬å¹³ +ä¸ī æľŁ +æĹħ游 èµĦæºIJ +å¤ļç§į å½¢å¼ı +è£Ĥ ç¼Ŀ +åIJİ æİĴ +硬 度 +åĽŀ æļĸ +éģĵ æķĻ +è´« è¡Ģ +æ¸ħ é¦Ļ +伤 çĹħ +æĦı 義 +çļĦ ç¼ĺ +çļĦç¼ĺ æķħ +åºĦ 严 +åıªæĺ¯ 为äºĨ +æīĵ æĬĺ +以 ä¾Ĩ +滿 è¶³ +çİĽ 丽 +風 éļª +æĸĩ ç§ij +éħįå¤ĩ äºĨ +è¿Ľ é£Ł +æ¶ ¡ +è·¯ ç¨ĭ +åı« 声 +ä¸Ńå¿ĥ åŁİåĮº +æľīæīĢ ä¸įåIJĮ +å¼µ è²¼ +é¢Ħ æĬ¥ +æľīå¤ļ ä¹Ī +è¿Ľè¡Į åħ¨éĿ¢ +æĽ¾ ç¶ĵ +ä¸ī 代 +å®ı 大 +æ¸ħ æī« +éĢī åĩº +åĵª ä¸Ģ个 +主 義 +ä¾Ŀ æĵļ +çļ® éĿ© +èµ¶ æĿ¥ +çŃĽ æŁ¥ +æ¨ Ł +ä¿Ŀ èįIJ +åIJĥ æĥĬ +æľĭåıĭ们 对 +ä»ĸ æĺ¯ä¸Ģ个 +åºŁ æ°Ķ +æ» ħ +è´¢ ç¨İ +æĿij æĿijæ°ij +èµĦ产 è´ŁåĢº +å®ī å¨ľ +缮åīį åĽ½åĨħ +æĦŁè§ī èĩªå·± +çµIJ åIJĪ +éͦ æłĩ +éͦæłĩ èµĽ +æĽ´ æ·± +åŁº æķ° +éħ¿ éħĴ +çī¹èī² äº§ä¸ļ +åİĭ å®ŀ +ä¾Ŀæ³ķ 追究 +æ·¡ å®ļ +ç®Ģ缴 å°±æĺ¯ +å£ĵ åĬĽ +æ°ij å¿ĥ +ä¸į åIJĪéĢĤ +çͱæŃ¤ åı¯è§ģ +èµŀ èªī +æ¾ ¤ +åĩłå¹´ åīį +åIJī ä»ĸ +çł´ æįŁ +轻轻 åľ° +å²Ľ 屿 +æĦı å¢ĥ +ä»Ģä¹Ī åı« +åģĩ è£ħ +éĢģ è´§ +å¹ķ å¢Ļ +妥 åįı +åĽ½ æĹĹ +äºĨ å¾Īä¹ħ +åĪĨ辨 çİĩ +ç´ Ķ +éĺ³ åĮº +åĩŃ çĿĢ +åģľè½¦ ä½į +京 éĥ½ +éĶ £ +æĵ ¾ +è¿Ľ éŨ +åĪĺ æµ· +åĽĽ 级 +女 è¶³ +è¡ĮæĶ¿ 审æī¹ +éģ¥ æİ§ +ä¸į éĮ¯ +å¾Ĺ å¾Ī好 +为 缮çļĦ +ä»į æľª +ç²¾ è£ħ +éĢį éģ¥ +å°½ 头 +çºł ç¼ł +éłĺ å°İ +æĭħ è´Ł +æĪĸèĢħ åħ¶ä»ĸ +åıªä¸įè¿ĩ æĺ¯ +åı® åĺ± +åģĩ åĨĴ +æļĸ æ°Ķ +çĽIJ åŁİ +被 è§Ĩ为 +诺 è´Ŀå°Ķ +ç»ĻäºĨ æĪij +è¿ij åįĥ +éĩį åĽŀ +éĨĴ äºĨ +ç͵ è§£ +忽çķ¥ äºĨ +èĥĮ éĥ¨ +æĸĩæĺİ åŁİå¸Ĥ +æº ħ +è² ĵ +æĬµ æĮ¡ +åĸľæ¬¢ åIJĥ +éĿĻéĿĻ åľ° +å¾Ī æ·± +åŁºç¡Ģ çŁ¥è¯Ĩ +è¿ĩ éĶĻ +çIJĨ ç§ij +交æµģ åIJĪä½ľ +èĪ Ķ +調 æŁ¥ +æħĪ æĤ² +éĴ ° +èĩ´ ç͵ +å®£ä¼ł æ´»åĬ¨ +åıĺ éĩı +çļĦ人 æĿ¥è¯´ +æĹ¶ éļĶ +ä¸į管 ä½ł +缸 è¿ij +è´µ éĩijå±ŀ +ä¹Łä¸į åı¯èĥ½ +ç²ī æľ« +åįĹ çĵľ +çϽ 马 +åħī æºIJ +éĩij å¥ĸ +çĭ¬ è§Ĵ +çĭ¬è§Ĵ åħ½ +妨 ç¢į +ç»Ļ åĬĽ +ä½Ĩ ä»į +å¼łå®¶ åı£ +èIJ¬ åħĥ +渲 æŁĵ +éķ¿å¤§ äºĨ +è®°èĢħ äºĨè§£ +æĢĢ çĿĢ +è¦ģ åѦä¼ļ +游æĪı 代 +游æĪı代 ç»ĥ +äºĮ çϾ +æĦıè¯Ĩ å½¢æĢģ +çİ º +计åĪĴ çĶŁèĤ² +æī¾ åĩĨ +åħ° èĬ± +è¿Ļ座 åŁİå¸Ĥ +污 æ³¥ +å®ĺæĸ¹ 微信 +å½Ĵ å±ŀ +æ°§ æ°Ķ +éģİç¨ĭ ä¸Ń +åį°è±¡ æ·±åĪ» +稳 妥 +çµIJ æĿŁ +åŃķ æľŁ +çī¹ æĿĥ +åĿļ åĽº +顺 åĬ¿ +æŀľ èͬ +éĨ« 師 +åİ ® +ä¹Łæĺ¯ å¦ĤæŃ¤ +é¦Ĵ 头 +缸 åĬ© +å¹² 线 +ä¸Ģ æľ¬ä¹¦ +ç» ¥ +æĮ¯ å¥ĭ +èĤ¾ èĦı +åĭķ çī© +é£ŀ è·ĥ +èıľ åĵģ +å¤ļ ä½Ļ +å¤ļä½Ļ çļĦ +éĢĿ ä¸ĸ +æģĭ 人 +å¼Ģåıij åĪ©ç͍ +顺 丰 +éĩİ å¿ĥ +æł¡ å¤ĸ +æģIJ é¾Ļ +éĿ¢ åħ· +éķ¿ è¾Ī +éļı å¤Ħ +éļıå¤Ħ åı¯è§ģ +ç´§ 缺 +éĩį ä¸Ń +éĩįä¸Ń ä¹ĭ +éĩįä¸Ńä¹ĭ éĩį +奥 æĸ¯ +奥æĸ¯ åį¡ +ä¸Ģ个 å¤ļ +ä¸Ģ个å¤ļ æľĪ +ä¸įåı¯ 缺å°ij +æĸ° æł¼å±Ģ +æıIJ æĮ¯ +è¡Į è´¿ +æ¼Ĥ æµģ +èģĬ åŁİ +åħ´ 建 +è´¨ æ£Ģ +ç§ģæľį 游æĪı +æĽ´ éĩįè¦ģ +è´ ® +çħ ľ +转åıĺ 为 +è¿Ļ 两年 +ä¿Ŀ é²ľ +æī§ æķĻ +çĥ ¨ +å¼Ģåıij 建设 +è¿IJèIJ¥ 管çIJĨ +误 å·® +京 åī§ +å¸IJ åı· +å·¥ä½ľ ä½ľé£İ +ä¸ĸ ä¿Ĺ +çϽ 宫 +天 åĽ½ +å¤©åĽ½ ç»§ç»Ń +å·´ æĸ¯ +èIJ¥ åĪ© +åĵģ æł¼ +æĿijæ°ij 们 +æĪ¿ 车 +çŃī çĹĩçĬ¶ +å¦Ĥ å®ŀ +å® ¸ +å±Ĥ 级 +éĶĻ è¿ĩäºĨ +ç»ĵ å®ŀ +ç¬ij èĦ¸ +羣å®ŀ æĢ§ +éĥ½å¸Ĥ æĬ¥ +é¥Ń èıľ +åºĶ 注æĦı +æĬ½ çĥŁ +伪 éĢł +åīį ä¸Ģ天 +éŃĶ é¾Ļ +éŃĶé¾Ļ 令çīĮ +约 è°Ī +绣çѹ æİ¨è¿Ľ +让 ç͍æĪ· +åħ¨éĿ¢ èIJ½å®ŀ +å¼Ħ å¾Ĺ +è°Ī æģĭçα +鸣 æĪIJéķ¿ +鸣æĪIJéķ¿ è®° +æ´ĭ æ´ĭ +çĸı æķ£ +éĿ¢ç§¯ 约 +æµĵ 缩 +æĸ¯ é¡¿ +çĶŁæĢģ åľĪ +æī§ 导 +ç§» éĢģ +齿 è½® +æł¹æľ¬ å°±ä¸į +缩 åĩı +èµ° ä¸ĭåİ» +çĿ« æ¯Ľ +ä¹Łä¸į éĶĻ +åıįæĺł åĩº +èĭ¦ æģ¼ +缸åħ³ æĶ¿çŃĸ +é«ĺ 楼 +ç²ī èī² +æĬķèµĦ é¢Ŀ +ä¸į ç»ı +ä¸įç»ı æĦı +å®ģ æĦ¿ +èĪĮ 头 +æ»ĭ çĶŁ +å®ģ åİ¿ +åīįåĪĹ èħº +åĩ ³ +é£Ł 欲 +åıĸ èĥľ +éĻ¢ åŃIJ +ç´łè´¨ æķĻèĤ² +滨 å·ŀ +æĬ¢ æĬĵ +å¼Ĥ åij³ +åĴ ļ +åĬ į +宽 éĺĶ +æļ´ 涨 +æĥł åıĬ +è§Ħ ç¨ĭ +ä¾Ľ åħ» +éĢģ å¾Ģ +å±± åºĦ +举 äºļ +å±ķ é¦Ĩ +è§£ éĶģ +æĹł è§Ĩ +éĻį èIJ½ +è¿ŀ äºij +è¿ŀäºij 港 +åıĤ è°ĭ +çİ ĸ +ç¬ ĥ +èĢĹ è´¹ +æī¿ å¾· +社ä¼ļ æķĪçĽĬ +åįĹæµ· ç½ij +åĪĽ 伤 +èIJ ± +åħħ æ²Ľ +ç½ijç«Ļ 建设 +大 åºĨ +åĨį éĢł +åŃĹ æł· +åħ¨æ°ij åģ¥èº« +èĮ« èĮ« +æµ® åĬ¨ +åīį åı° +å¢ŀ 设 +éĢĽ è¡Ĺ +åĢĴ éĹŃ +æ³ķå¾ĭ 顾éĹ® +çĸ ® +çĹħ çĹĩ +空 åīį +请 æķĻ +èĥľ ä»» +æĿĢ èıĮ +æĪĺæĸĹ æľº +ç»ĺ åζ +å¤Ħ æĸ¹ +çªģ åĽ´ +çĮ« åĴª +æĬ¥åijĬ æĺ¾ç¤º +ç¿ Ł +çķ¶ åľ° +æľĢ éļ¾ +纪 å§Ķ书记 +ä½İ åİĭ +èĻļ 空 +è¿Ļéĥ¨ ç͵影 +产ä¸ļ åįĩ级 +è°· çα +è°·çα åĩĮ +æĬ¼ éĩij +女 æĸ¹ +éĴ» çłĶ +æļĹ æļĹ +è¿· ä½ł +æīĢ è¬Ĥ +å¨ģ å»ī +å¼Ģ æľĹ +å² Ķ +çģ« çĤ¬ +åIJĪçIJĨ æĢ§ +åħ¬ åĬŀ +ä¼ļ ä¼ļéķ¿ +éĺ´ è°ĭ +å¼Ģ å±Ģ +æĻ®éĢļ è¯Ŀ +åį¡ æĭī +å°ij åIJĥ +éĹª èĢĢ +æŀľ æ±ģ +æī§è¡Į åĬĽ +è° Ľ +æĬ¢ åĬ« +é«ĺéĢŁ åıijå±ķ +éŁ ¬ +åįĹ æ²Ļ +é«ĺçŃī åŃ¦æł¡ +æį¢ 个 +åı¯èĥ½ åŃĺåľ¨ +æĬ Ĵ +è°± åĨĻ +被 æĬĵ +æĿ¯ åŃIJ +èĬĤèĥ½ åĩıæİĴ +æ°ĶåĢĻ åıĺåĮĸ +åĪĨ åĪ¥ +ä¸Ń æŀ¢ +欢 åij¼ +åħī 纤 +è¿Ļ 群 +çľ¼ çķĮ +åħ±åIJĮ åıijå±ķ +çݰ ä»Ĭ +éĹ» è¨Ģ +çī¹èī² å°ıéķĩ +æķij 人 +éĻį æ°´ +ä¸ĸçķĮ ä¸Ģæµģ +å°± é¤IJ +çŀ ¥ +å¤į ä»ĩ +ç¾½ æ¯Ľ +ç¾½æ¯Ľ çIJĥ +è´© åįĸ +æºIJ æ³ī +æĢ»ä½ĵ è§ĦåĪĴ +åĬ¨ æĦŁ +ä¸Ģ 审 +åĢŁ éĴ± +è§ģ æķĪ +èĬ± èįī +åIJĮ ä¸ļ +æŁ¥ è©¢ +åĽ½éĻħ åIJĪä½ľ +ä¾Ľ åĽ¾ +åģ ´ +æł ĵ +缸 éĢļ +è°Ī åıĬ +è¿ĩç¨ĭ å½ĵä¸Ń +é¦Ļ èıĩ +åįģåĽĽ æĿ¡ +ä¸Ģå¼Ģå§ĭ å°± +ä¸ĵ åijĺ +æĺİ é¡¯ +æīĵéĢł åĩº +ä¸ĭéĿ¢ æĪij们 +æľº æ²¹ +åı° è¯į +åŃIJ å¼Ł +æľĢ 常è§ģçļĦ +æĪij è®°å¾Ĺ +ç» ° +æĤ¬ æµ® +è¿ĺ 羣æĺ¯ +æĮĤ åı· +åıĭ åĸĦ +éĩį 伤 +çħ§ 亮 +æŃ¦ èѦ +åĩºçݰ éĹ®é¢ĺ +è¸Ĭ è·ĥ +åľ°çIJĥ ä¸Ĭ +å¸Ĥ 人大 +åıĹ害 人 +å² IJ +åIJĮ åѸ +éĩijèŀį å¸Ĥåľº +æľīçļĦ çݩ家 +å¸Ĥ æķĻèĤ² +å¸ĤæķĻèĤ² å±Ģ +åIJĦ å¼Ĥ +ç·ļ ä¸Ĭ +æģ º +æľī 大éĩıçļĦ +åķĨ æĬ¥ +åįķ åįķ +åħ¨ é¢Ŀ +ä¾ĿæĹ§ æĺ¯ +好 åĩłä¸ª +åĸ µ +éĩį æķ´ +çĶŁæ´» è´¨éĩı +æİ¢ 访 +åį° èĬ± +缼 è¡Į +å¾® è§Ĥ +èĪį å¾Ĺ +åºŁå¼ĥ çī© +积 èĵĦ +å®ļ å±ħ +æĤ ¼ +èĮ ¸ +çļĦ 帮åĬ© +çļĦ帮åĬ© ä¸ĭ +亿 åIJ¨ +åŃĶ éĽĢ +è¿ĻæĿ¡ è·¯ +é¥ µ +æĦĪ åĬł +éķ į +ä½ľ æ¡Ī +èįĶ æŀĿ +太 å°ij +è·» 身 +åħ¬çĽĬ æ´»åĬ¨ +çϽ æĸij +æĬĢæľ¯ æ°´å¹³ +å¸ § +æĹł çŁ¥ +åºĶ该 æĢİä¹Ī +éĢĢ å¸Ĥ +æ¸ Ń +åħ» çĮª +é© ¼ +群 å²Ľ +大 åį« +ä¹ĺ çĶ¨è½¦ +èı² å°Ķ +è´´ åIJ§ +åģľ ä¸ĭæĿ¥ +æľīæľº ç»ĵåIJĪ +åĪ» èĭ¦ +çļĦ åľ° +çļĦåľ° æŃ¥ +è¯Ĭ æīĢ +å¼Ģ æĪĺ +èĢģ çīĮ +çѹ çłģ +åħ«å¤§ 以æĿ¥ +楼 æĪ¿ +åŃĻ æĤŁ +åŃĻæĤŁ ç©º +åħĴ åŃIJ +第ä¸Ģ æĿ¡ +社交 åªĴä½ĵ +æĥ³ èµ·æĿ¥ +大 æ´ĭ +æĭ¼ éŁ³ +è¿Ľ åįļä¼ļ +è¿ĩ åħ³ +æ² ¼ +ç©¿ æIJŃ +éĤ£ ä¸Ģ天 +çł´ éŨ +æĬķæłĩ 人 +èµ¢ å®¶ +èĻļ å¼± +æ¿ ĥ +å®ī æ£Ģ +客 å®¶ +çĭ¬ç«ĭ èij£äºĭ +æīĭ åĬ¿ +åīµ éĢł +åľĨ满 å®ĮæĪIJ +为主 线 +好å¥ĩ å¿ĥ +é¢Ĩ åľŁ +çª ĸ +åħ¸åŀĭ æ¡Īä¾ĭ +çªģåıij äºĭä»¶ +åºķ æ°Ķ +头 æĻķ +å®Ľ å¦Ĥ +è§ ¸ +æ¸ħ æ·¡ +åļ ¼ +åģľ ç͵ +ç²ī å°ĺ +éĻįä½İ æĪIJæľ¬ +æĶ¾ æīĭ +è®°èĢħ 表示 +æĭĸ å»¶ +éª ĩ +æ®ĭ å¿į +çľģ æķĻèĤ² +çľģæķĻèĤ² åİħ +é«ĺ é¢Ŀ +éĦ Ļ +æ¥ ŀ +åĨħ ç§ij +èIJ¥ä¸ļ é¢Ŀ +åŁº çŁ³ +æµģ æ·Į +主 æĹ¨ +éĺIJ éĩĬ +建 åįİ +æĥĬ åı¹ +çī¢åĽº æłijç«ĭ +æĺ¯åIJ¦ åŃĺåľ¨ +建 åĨĽ +éĽ¾ éľ¾ +åħ¬ 认 +åħ¬è®¤ çļĦ +æ°¨ åŁº +æ°¨åŁº éħ¸ +åīį åĩłå¹´ +åι éĤ£ +æ±Ł 举 +å·¥ æ¥Ń +ä¸ĢçĤ¹ ä¹Łä¸į +ä¿® 士 +äºĨä¸Ģ éģį +åĪ ģ +æ»ļ æ»ļ +åĪĨ æł¡ +羣 çα +è¡Ģ èĦī +æĢ¥ åī§ +ä¸Ģ群 人 +ç¾ ¯ +æĪIJ é¾Ļ +ç²¾ç¥ŀ çĹħ +缸åħ³ 人åijĺ +éĿĵ 丽 +ä¸ī åŃ£åº¦ +åĪĴ å®ļ +ä¸ĸçķĮ 第ä¸Ģ +éĢļ ä¿Ĺ +åķĨä¸ļ åľ°äº§ +åĬŁèĥ½ æĢ§ +èµĦæľ¬ 主ä¹ī +详 è§ģ +æĬĵ æįķ +æĸĩ æĺĮ +å®Ŀ å®ī +è£ħéħį å¼ı +æºIJ æºIJ +æºIJæºIJ ä¸įæĸŃ +çĶŁ æĢķ +纵 åIJij +å£ ½ +çľ¼ è¢ĭ +èĤī ä½ĵ +åı¤ ä»Ĭ +èŀį åªĴä½ĵ +åģ ī +æł¼ æľĥåĵ¡ +çĥ · +åĬŁ ç͍ +æīŃ çŁ© +绿èī² éĢļéģĵ +åī§ ç»Ħ +å¼± åĬ¿ +è´¨éĩı éĹ®é¢ĺ +éĻIJ é¢Ŀ +éª Ĩ +éģµ ä¹ī +å¯Ŀ 室 +æĥ³ 念 +åł± åijĬ +ä»ħ 次 +ä»ħ次 äºİ +èŀį åĪĽ +æĭĽèģĺ ä¼ļ +åºĬ åŀ« +转åŀĭ åıijå±ķ +ä¸ŃåĽ½ çĶµä¿¡ +åIJ¬ è¯Ŀ +è«ĭ æ±Ĥ +大éĥ¨åĪĨ 人 +æ´» å¾Ĺ +åĵŃ æ³£ +è¶ Ļ +åıijçĹħ çİĩ +ä¸į 符 +åĨĽ å®ĺ +é¢Ī æ¤İ +æĸ°åĨł çĸ«æĥħ +æŁ¬ åŁĶ +æŁ¬åŁĶ 寨 +ä»»ä½ķ å½¢å¼ı +人 éĻħ +人éĻħ åħ³ç³» +æĢ» æī¿åĮħ +å¹³åĿĩ æ¯ı +æģŃ åĸľ +åĦ ĺ +åħµ 马 +è¿Ł åΰ +å·¥ 伤 +çīĪæĿĥ å½Ĵ +çīĪæĿĥå½Ĵ åİŁ +æĭ¥ æĬ¤ +ç³Ĭ æ¶Ĥ +å¹² æ¶ī +å°ij ä¸įäºĨ +æĥ³ æī¾ +è´¹ çİĩ +该 éĻ¢ +èŀį åĮĸ +è¿İ åIJĪ +è§ĨåIJ¬ èĬĤ缮 +æł¼ ç¶²ç«Ļ +çľī æ¯Ľ +欢è¿İ 大家 +å®¶åºŃ æķĻèĤ² +ä¾µ èļĢ +ç»Ļ ä½łä»¬ +è¡Ģæ¶² 循çݯ +å¯Ħ æīĺ +å°ĸ åı« +以ä¸ĭ åĩłä¸ª +è¿ĺ 以为 +åħ¶ä»ĸ çݩ家 +ç¬ij ç¬ij +æīĵ åIJ¬ +èĩªçĦ¶ ç§ijåѦ +åŁº ç«Ļ +ä¹Ŀ å·ŀ +ä¿Ŀ 驾 +ä¿Ŀ驾 æĬ¤ +ä¿Ŀ驾æĬ¤ èĪª +æĶ¾ çľ¼ +çŁ¥åIJį ä¼ģä¸ļ +ç¸ ® +ç¨ ½ +æļ ĩ +使ç͍ 網路 +é¢Ħ çķĻ +大 象 +åıijæĺİ ä¸ĵåĪ© +æĸĩ 娱 +éĢł ç¦ı +湿 润 +éĿ¢ æĿ¡ +æ¶Īè´¹ åįĩ级 +è®Ĭ å¾Ĺ +åĩł åIJį +ä» Ħ +认 æ¸ħ +è¿ľ æĻ¯ +æıĴ 座 +诸 侯 +åıĺ æĢģ +ç¦ı 彩 +è´§ æŀ¶ +失 æİ§ +ç§»åĬ¨ 端 +ä¸Ĭ åı¸ +éĢł 纸 +å¸ĥ æľĹ +çĴ ĩ +åı° åįĹ +åĮĹ京 åĨ¬å¥¥ +èĵĿ çīĻ +éķ¿ çŁŃ +æĬĺ å°Ħ +ç»ij æŀ¶ +å¯Ĵ åģĩ +转 åŁºåĽł +æĢ¥ äºİ +æŃ£ åĵģ +åħħ 滿 +大 纲 +æĬĹ ä½ĵ +è¨ĵ ç·´ +æĶ¶ ç´§ +æ¯Ķ è³½ +åħµ åĬĽ +æľ¬ æĽ¸ +äºĮ 代 +æĢ¥ è¯Ĭ +æĸĩ æ¡Ī +ç»ı åķĨ +æĻ¨ æĬ¥ +æ£ ĺ +æĢ»ä¹¦è®° åľ¨ +åıĹ éĤĢ +äºĶ åĽĽ +å²Ń åįĹ +çα åIJĥ +åŁĥ å°Ķ +å¿ĥ å¢ĥ +è¦ĨçĽĸ éĿ¢ +å®ŀåľ¨æĺ¯ 太 +æł¹ åºķ +纷纷 表示 +åĹ ħ +éļıçĿĢ æĹ¶éĹ´ +åİĨåı² æĤłä¹ħ +éħ ī +æĢ» éĺŁ +主é¢ĺ æ´»åĬ¨ +éĹ® åį· +é©¿ ç«Ļ +æı¡ ä½ı +åı¯èĥ½ 导èĩ´ +æ°ij éĸĵ +éĸĭ åķŁ +ä½Ĩ ä¸įéĻIJ +ä½Ĩä¸įéĻIJ äºİ +åįģ éĩĮ +å¨ ¥ +æįŁ èĢĹ +çĸı 导 +çݯ æ°§ +ç¥ŀ éĢļ +çα å°Ķ +çαå°Ķ åħ° +æľ´ å®ŀ +å¿« æĬ¥ +æĶ¶ åıĹ +æĪĸ 許 +èĥĮ éĿ¢ +æĸĩåĮĸ ä¼łåªĴ +ä¸ī åĢĭ +æĶ» åĬ¿ +å®ī 举 +å®ī举 å°¼ +åĿĩ å·² +顾 èĻij +éĦ Ń +è¿Ļå®¶ åħ¬åı¸ +åħ¬åijĬ ç§° +æıIJä¾Ľ ä¼ĺè´¨ +稳æŃ¥ æİ¨è¿Ľ +å¤į è¯ķ +å°Ĩ é¢Ĩ +è°Ī èµ· +å¨ Ħ +è¿ŀ 线 +æ©Ł éĹľ +åºĶç͍ åľºæĻ¯ +çĶ» åĥı +è´¢ è¿IJ +ä¿Ŀ éļª +çĹħ çIJĨ +æ¯Ľ 主å¸Ń +ä¸Ŀ 毫ä¸į +çα å¥ĩ +çαå¥ĩ èīº +ä¸ĵå®¶ ç»Ħ +åij¼ åͤ +éĭ ¼ +çģ ¸ +é¢ĨåħĪ åľ°ä½į +æıIJ æĭĶ +龸 éģĵ +å±± åĿ¡ +èĿ İ +沸 èħ¾ +该 项 +ä»Ĭ çĶŁ +ä¸Ģç¯ĩ æĸĩ竳 +æĸ¹å¼ı è¿Ľè¡Į +é»ij 客 +æĶ¹ åĬ¨ +主 é¡Į +æķ£ å¸ĥ +ä»Ģä¹Ī åľ°æĸ¹ +åĮĸ åIJĪ +åĮĸåIJĪ çī© +éĿĻ ç͵ +æĢ» æĶ¶åħ¥ +å§Ķ ç»Ħç»ĩ +å§Ķç»Ħç»ĩ éĥ¨ +éĿĻ æĢģ +èĢģ åŃĹåı· +室 åıĭ +éĥ½ä¸į æķ¢ +æŀ¶ åŃIJ +çģµ æķı +审 è§Ĩ +æĤ£ åĦ¿ +å±± 寨 +èĸª èµĦ +é©° æı´ +éĥ¨åĪĨ åĨħ容 +好 ä¼¼ +æĪIJåijĺ åĽ½ +åľ¨æĪij çľĭæĿ¥ +åħ³æ³¨ 度 +éĻĪ æŁIJ +è¿Ļç§į äºĭæĥħ +éĢī å®ļ +ç²¾ åŃIJ +å£ģ çĶ» +æ±Ł æ·® +é«ĺ æĺĤ +æł¼ åĬĽ +è¼ © +åѦ åłĤ +æĤ¨ åIJĮæĦı +ä¸ĢåĪĩ éĥ½æĺ¯ +æ½ ¤ +éĸ ĥ +å¸ĮæľĽ èĩªå·± +ä¿ ĺ +æ±Ł åİ¿ +æ³ ¾ +ç§ij æķĻ +æīĵ è¿Ľ +ä¸į æħİ +å¯Ĵ åĨ¬ +æ¸Ķ æ°ij +鼷 æĸ¯ +主 å®° +æĹħ游 度åģĩ +ç͵åŃIJ éĤ®ä»¶ +æ±Ĥ å©ļ +éļİ æ®µ +åģ¥èº« æĪ¿ +注æĺİ åĩºå¤Ħ +äºĭæķħ åıijçĶŁ +级 以ä¸Ĭ +åŃĺ æ´» +æĸ½ èĤ¥ +èľľ èľĤ +åµ © +æĮĸæİĺ æľº +æĬĹ æĭĴ +ä¼ł 导 +æĺ¯ä»Ģä¹Ī åij¢ +ä¸Ĭå¹´ åIJĮæľŁ +建 åħļ +çĶŁ æħĭ +ä¿Ŀ ä½ı +款 车åŀĭ +人 èĦī +éļIJ èͽ +失 æķĪ +éģ¿ åŃķ +ç®Ģ 便 +谢谢 ä½ł +å®Ī ä½ı +æĶ¾ æĺł +è¨Ī çķ« +çݰ代 çµģ +é¤IJ 廳 +æķħ å±ħ +大 大å°ı +大大å°ı å°ı +çī¹åĪ« 声æĺİ +éģį åıĬ +å¿ĥçIJĨ åĴ¨è¯¢ +è³ ´ +çĮ® è¡Ģ +å·²ç»ı è¾¾åΰ +æīĵ æĭĽåij¼ +åıĮ è¾¹ +ä¸Ģæĸ¹éĿ¢ æĺ¯ +å´ĩ å°ļ +éĺ¿ å¯Į +éĺ¿å¯Į æ±Ĺ +æĮģ æľī人 +è± ģ +é£İ çŃĿ +åĬ¨ èį¡ +äºĨä¸Ģ ä¼ļ +äºĨä¸Ģä¼ļ åĦ¿ +ä¸ĩ 象 +çľĭ ç͵è§Ĩ +åįģä¸ī æĿ¡ +çĮĽ çĥĪ +è¦ģ ä¸įçĦ¶ +太æŀģ æĭ³ +å¼ķ çĪĨ +ç»ıè¿ĩ å¤ļå¹´ +游æĪı éĩĮçļĦ +é¾Ļ æ³ī +æłĩ éħį +è®ĵ ä»ĸåĢij +éĢł æŀĹ +åĮºåŁŁ æĢ§ +亿 ä¸ĩ +æĪĺçķ¥ å¸ĥå±Ģ +éķĩ æĶ¿åºľ +åĶ® 票 +çĶŁäº§ å·¥èīº +éķĩ åħļå§Ķ +ä¸Ńå°ı åŀĭ +æľ¨ è̳ +æ²³ è¾¹ +èĦ¾ èĥĥ +欢è¿İ æĤ¨ +åıĺ å¼Ĥ +缤 纷 +åŀĥåľ¾ æ¡¶ +辩 è¯ģ +车 åºĵ +æ¯Ķ çİĩ +åħ´ æĹº +详ç»Ĩ äºĨè§£ +å®ī å±ħ +çħ§ æĸĻ +æĸ¹ æīį +èµ ¦ +åĨ ķ +å¥Ķ èµ´ +å®Ŀ 鸡 +åľº åĿĩ +缮åīį æŃ£åľ¨ +åIJŀ åϬ +è¿° èģĮ +æĩ µ +å¥ĩ çijŀ +ä»į å°Ĩ +èĪī 辦 +å·¥åķĨ å±Ģ +å¡ij èĥ¶ +åĬŀ å®ŀäºĭ +æĸ¹ æĸ¹éĿ¢ +æĸ¹æĸ¹éĿ¢ éĿ¢ +æĸĩåĮĸ èĬĤ +åħ¥ èģĮ +é¸ ¥ +ç©¿ éĢı +以 ä¹łè¿ijå¹³ +åį± éļª +æľ¦ èĥ§ +åİĨåı² æĢ§ +æķŀ å¼Ģ +ä¼Ļä¼´ åħ³ç³» +çŁ¿ åĮº +åĽ½éĻħ åľ¨çº¿ +ä¼łå¥ĩ éĩĮéĿ¢ +è¿ij äºĽ +è¿ijäºĽ å¹´ +åĬ£ åĬ¿ +æĶ»åĩ» åĬĽ +æĻº éĢł +ç¦ § +çİĭ åħĪçĶŁ +éĨ« çĶŁ +åĽĽ 项 +å®ŀ æĻ¯ +åĪĿ åĪĽ +å¿ĥ 裡 +æĻ¶ ä½ĵ +交 éĻħ +让 æ¶Īè´¹èĢħ +课 æĸĩ +æİĴ æ°Ķ +å¹¶ä¸į æĦıåij³ +缸 声 +第ä¸Ģ å±Ĭ +åİŁ èijĹ +éĽ ľ +没æľī 太大 +è¡¥ æ°´ +çµģ ä¼ģä¸ļ +第äºĮ æī¹ +åħ¶å®ĥ éĹ®é¢ĺ +æİĮ éŨ +责任 å¿ĥ +é¤IJ åħ· +ç¾Ĭ æ¯Ľ +没æľī å¿ħè¦ģ +ä¹IJ åĽ¢ +è¿Ľ åŁİ +ä¸ĢçĤ¹ åĦ¿ +身 å½¢ +çļ®èĤ¤ çĹħ +æĺ ± +å¢ŀ èĩ³ +èģ² æĺİ +æıIJ è´¨ +ä½ĵèĤ² åľº +çѹ 建 +é¬ Ĩ +车 çīĮ +éļĶ éŁ³ +è´Łè´£ åIJĮå¿Ĺ +丰 ç¡ķ +ä½Ľ éĻĢ +äºī åIJµ +åº ¶ +æ·¡ æ°´ +å°ı çĶ·åŃ© +ç§ģ èĩª +åĮĸ è¿Ľç¨ĭ +æĪĺ士 æĿ¥è¯´ +æ²¹ èħ» +èĦ±è´« èĩ´å¯Į +æĹ¥å¸¸ å·¥ä½ľ +交 èŀį +åĨľ è´¸ +åĨľè´¸ å¸Ĥåľº +åĵĪ çĻ» +ç͵ è´¹ +èµ ĺ +åıĮ èħ¿ +æĵĶ å¿ĥ +æĿ¥ 形容 +使åij½ æĦŁ +éĤ£ä¹Ī ç®Ģåįķ +èĬĻ èĵī +åĢŁæ¬¾ 人 +ç§Ģ 丽 +è®ĵ ä»ĸ +严åİī æīĵåĩ» +è³ ŀ +æļ « +çħ¤ æ°Ķ +çά ä¸Ĭ +æ½ĩ æ´Ĵ +太 ä¹ħ +åij½ åIJį为 +è·¯ çͱ +è·¯çͱ åύ +é© ¯ +æıIJ æĹ© +æĬĹåĩ» çĸ«æĥħ +åĩ Ľ +交 åıĭ +éĶĢåĶ® æ¸łéģĵ +毫ä¸į çĬ¹è±« +èIJ¥ åľ° +çłĶç©¶ 表æĺİ +é±¼ ç±» +æį¢ å±Ĭ +æİ¡ åıĸ +çī Ĩ +缼 å¼Ģ +æ²§ æ¡ij +åºŃ 审 +ç»ı æŁ¥ +åĬł å¼· +缸æ¯Ķ äºİ +ä¸ĵ çıŃ +ä½ĵ åŀĭ +被 害 +被害 人 +æĶ¶ 款 +åħ·æľī èī¯å¥½ +é«ĺå³° æľŁ +åģı ä½İ +åĦ Ł +åĨľä¸ļ ç§ijæĬĢ +ç®Ĭ æĥħåĨµ +å¦Ĥæŀľ çݩ家 +éķ¿ çº¦ +第åħŃ å±Ĭ +åħ¬å¼Ģ æĭĽèģĺ +åĪĩ æĸŃ +è¿« 使 +çĸĹ ç¨ĭ +第äºĮ ç§į +ä¸į åħį +å¹² èѦ +çŁ³ 榴 +åĹ £ +两 ç±» +çε 士 +åŁİ乡 å±ħæ°ij +æŃ¤ 项 +缴 è¾ĸ +缴è¾ĸ å¸Ĥ +åij¼ åºĶ +éĴ ¯ +ç¦ı å¾· +æľº 身 +æĵį åľº +æ¿Ĵ 临 +人群 ä¸Ń +èĤ¡ æ°ij +åŃ ½ +æ³ķ åħ° +é¨ İ +糯 ç±³ +æĢ» çļĦ +æĢ»çļĦ æĿ¥è¯´ +åħ¸ éĽħ +æĸ° éĻĪ +æĸ°éĻĪ ä»£è°¢ +缮 çĿ¹ +é¢Ħ è¨Ģ +è·Į çł´ +æĸ° ç¯ĩ竳 +æ¯Ĵ æĢ§ +åĸĿ èĮ¶ +æŁ¥ èİ· +亮 丽 +çĶŁäº§ åķĨ +æĶ¹ æĪIJ +为äºĨ æĽ´å¥½ +æ·± 交 +深交 æīĢ +æİ ĥ +ä¹Ļ èĤĿ +泸 å·ŀ +åħĪè¿Ľ æĬĢæľ¯ +è¾ĵ ç»Ļ +æķ£ æĪ· +æĢĿç»´ æĸ¹å¼ı +åºĹ 主 +è°ĭ æ±Ĥ +游æĪı æĬĢå·§ +ä¸Ģå¹´ 级 +çľ¼ è§Ĵ +ä¸Ńä»ĭ æľºæŀĦ +å·§ åIJĪ +éĺ² çĽĹ +导 è´Ń +æĪ Ĭ +æĽ´ éĢĤåIJĪ +åŁºæľ¬ ä¿¡æģ¯ +马 ä¸ģ +åħ»æ®ĸ åľº +åıį è¿ĩæĿ¥ +æİ¨ å´ĩ +å¯ĨåĪĩ åħ³æ³¨ +åŁºéĩij ç»ıçIJĨ +æĮī éĶ® +åĨħéĥ¨ æİ§åζ +æĪIJåijĺ åįķä½į +æľ¯ è¯Ń +åζ æľį +åĪļ éľĢ +æ£Ģ ç´¢ +大大 æıIJé«ĺ +åģ¥åº· 管çIJĨ +èĩª æŃ¤ +客æĪ· éľĢæ±Ĥ +丰 èĥ¸ +èµ· éĩį +èµ·éĩį æľº +æ¬ł 缺 +æ¡Ī åŃIJ +æĥħ人 èĬĤ +åħļ æł¡ +è¢ ľ +该 åī§ +迷失 ä¼łå¥ĩ +ç»ļ 丽 +åķ ª +æĹł ç§ģ +é̲ ä¸ĢæŃ¥ +第ä¸Ģ 竳 +åύ åħ· +åĨľ èµĦ +確 實 +åºı åĪĹ +娱ä¹IJ å¹³åı° +èŀįèµĦ ç§Łèµģ +èµĦæºIJ åħ±äº« +èģ½ åΰ +æIJŀ å¾Ĺ +ç»§ç»Ń ä¿ĿæĮģ +åIJ¯ èĴĻ +çľ º +ä¸Ŀ è·¯ +设æĸ½ 建设 +æİ¥ åľ° +æİ¥åľ° æ°Ķ +第ä¸ī åŃ£åº¦ +åŁº è°ĥ +åıij éŁ³ +社ä¼ļ èµĦæľ¬ +éĽĩ 主 +è¿ŀ èĥľ +没 åķ¥ +å» ¢ +èµ¶ èµ´ +æ¼Ķ åĮĸ +åı¤ æĢª +çİĭ çĪ· +é¢Ħ åħĪ +å¼Ģ åħ· +åĽŀ é¦ĸ +åľ°ä¸ĭ æ°´ +å°ıç¼ĸ ä¸Ģèµ· +èµİ åĽŀ +åľ° è²Į +åĪĿ ä¸ī +åı¯ ç͍äºİ +éģĹ è¿¹ +è¿Ļ æī¹ +èĸª æ°´ +å¿ħçĦ¶ ä¼ļ +æ² ½ +éį ĭ +第ä¸Ģ éĥ¨ +åĪĬ çī© +å®ŀ ä¾ĭ +æ¸ħ åĩĢ +ä¸Ĭ èµĽåŃ£ +åĽ¾ 表 +éĤ® è½® +åĵª 裡 +缸 è§ģ +æī° ä¹± +æ¯ı æ¯ı +è¿Ļ è¾ĪåŃIJ +ç¡« éħ¸ +äºī 缸 +溯 æºIJ +åĩº ä¼Ĺ +çİī çŁ³ +åħ± çĶŁ +æĹ¶éĹ´ 段 +éĩįè¦ģ æĮĩ示 +æ¶Īè´¹ éľĢæ±Ĥ +éķ¿ éķ¿ +éķ¿éķ¿ çļĦ +å®ī æĬļ +å¢ŀ é«ĺ +æľ¬ è½® +亲 çľ¼ +é£İ æ³¢ +èĢģ å¦Ī +æĶ¶è´¹ æłĩåĩĨ +åĨħ éĻĨ +æĮ¥ åıij +åįĩ åѦ +èĥ¸ åīį +åģı è¿ľ +纯 æ´ģ +æĸ½å·¥ åįķä½į +身 ä»· +è´¢ åĬĽ +çº ¶ +è£ħ çͲ +æĺ¾ç¤º åύ +毫 åįĩ +æ·± çŁ¥ +è̶ ç© +èĢ¶ç© Į +è¾ĥ éĩı +åľ¨ è¿ĩ渡 +åľ¨è¿ĩ渡 æľŁ +èĮ Ĺ +ä¸Ģ个 æĺŁæľŁ +èĬ · +è´¿ èµĤ +æ¿ ķ +æĩĤ äºĭ +ç§ § +åħħ å½ĵ +åĽ½ ç«ĭ +èĬ± çĵ£ +éĤĦ è¦ģ +åħ¬ åľĴ +触 åĬ¨ +æ³° å·ŀ +ä»Ģä¹Ī æł· +æ»ĭ åħ» +è¯Ħ åΤ +æĮ¥ æīĭ +èĦ Ī +å§¥ å§¥ +è¿IJ è´¹ +æ¯ħ åĬĽ +å¿ĥ æĻº +ä¸į æİĴéϤ +第ä¸ī 代 +éĢĢ è´§ +æĺŁ éĻħ +æ°¸ åĪ© +æĬ¤ åį« +çıŃ è½¦ +è¨Ģ è¡Į +ç¹ ª +主åĬ¨ æĢ§ +å·¥ç¨ĭ è´¨éĩı +éĥĬ åĮº +ä¸Ģ æłĭ +ä½Ĩ å®ŀéĻħä¸Ĭ +ä¸ī大 èģĮä¸ļ +åij¼ åı« +女 åħĴ +è¯ģåΏ æĬķèµĦ +èĢĥ æħ® +çĤ« èĢĢ +æ²» 好 +åĺ ¶ +èĥ ¤ +åħīä¼ı åıijç͵ +åĩł æŃ¥ +æīĢ æīĢ +æīĢæīĢ éķ¿ +çħ§ æł· +åĵ¥ 们 +è¯ Ľ +è¿Ļä¸Ģ åĪ» +çŁ¿ çī©è´¨ +ä¸įå¾Ĺ å·² +åIJĮ 缣 +ç»Ĩ å¾® +è·¯ èĻİ +çϾ èĬ± +æ·· æ²Į +ä¸Ĭæµ· è¯ģåΏ +éĢĢ ç¨İ +èµŀ åı¹ +æī®æ¼Ķ 游æĪı +åIJį åĪĹ +åIJįåĪĹ åīį +åIJįåĪĹåīį èĮħ +ç±³ å°Ķ +ä»Ģä¹Ī åİŁåĽł +å®īåħ¨ ä¿Ŀéļľ +ä¸Ģåıª æīĭ +ä¹³ ä¸ļ +ä¸į çĶĺ +æĥħ åķĨ +æĮ¡ ä½ı +åİŁåĽł ä¹ĭä¸Ģ +è¿Ļ 两天 +çĥĺ çĦĻ +è± ¬ +ä½ł 以为 +没 è§ģè¿ĩ +åĵªå®¶ 好 +åīį ä»» +è¿Ľ è´§ +éĢĢ åĽŀ +串 èģĶ +èĩ³ æĸ¼ +åĨ° æ·ĩ +åĨ°æ·ĩ æ·ĭ +æŁ¥çľĭ 详æĥħ +çı¾ 實 +æİ¨ æµĭ +æİ¥ æīĭ +éļ¶ å±ŀäºİ +åŁİå¸Ĥ 群 +æĿİ åħĪçĶŁ +çŁ¿ æ³īæ°´ +çī¹ ä»· +æĽ´å¤ļ 精彩 +ç¨ĭ å¼ı +读 æĩĤ +å±ı èͽ +奥 æŀĹ +奥æŀĹ åĮ¹ +奥æŀĹåĮ¹ åħĭ +红 èĸ¯ +å¥ ® +å®Ŀ çİī +ç¶² 絡 +è² § +欧 å¼ı +çϽ ç³ĸ +èĩªçĦ¶ çģ¾å®³ +åijĬè¯ī 她 +å» ļ +çĤ¹åĩ» æŁ¥çľĭ +é£İ 湿 +èµĦ产 éĩįç»Ħ +ä¹Łä¸į ä¾ĭå¤ĸ +åįĬ 个å°ıæĹ¶ +åIJ¸å¼ķ æĽ´å¤ļ +æĹ¶éĹ´ èĬĤçĤ¹ +æĶ¶ 纳 +åIJ¸ æ¯Ĵ +èĢģ 乡 +çIJ ħ +æľĢ çµĤ +åıį æĦŁ +ç͍ 微信 +çĶ¨å¾®ä¿¡ æī« +éĢŁ çİĩ +大 çĨĬçĮ« +åı¯ æĥ³ +åı¯æĥ³ èĢĮ +åı¯æĥ³èĢĮ çŁ¥ +åĴ § +èµ° åħ¥ +碳 éħ¸ +èĮĥ åĨ° +èĮĥåĨ° åĨ° +被 åΤ +积æŀģ æİ¨åĬ¨ +è¶³ è¶³ +ç²Ĵ åŃIJ +大 å®Ĺ +大å®Ĺ åķĨåĵģ +ç½ij绾 ç§ijæĬĢ +æĽ¼ åŁİ +å·² ä¹ħ +å·²ä¹ħ çļĦ +秦 çļĩ +秦çļĩ å²Ľ +ä»» æķĻ +å͝ ç¾İ +æ·¡ åĮĸ +æ¡Ĥ èĬ± +çŁ¥è¯Ĩ åĪĨåŃIJ +æĩĴ å¾Ĺ +主 åħ¬ +设计 çIJĨ念 +è³ º +æīĢ æıIJä¾Ľ +æīĢæıIJä¾Ľ ä¹ĭ +æĶ» åħĭ +åĤ ¾ +è¯Ń æ³ķ +åįĥ åı¤ +éĸĭ æĶ¾ +第ä¸Ģ èĬĤ +éĤĦ æ²Ĵ +éĢĥ çĶŁ +æ³ Ĺ +åİ¿ å§Ķ书记 +ä½ľèĢħ æīĢæľī +çħ ½ +ç» ħ +æł ħ +æľ´ ç´ł +çijķ çĸµ +åĮħ åĮħ +æ°ij主 åħļ +ä¸į è¿ľå¤Ħ +å¥ĩ å¼Ĥ +åĺ» åĺ» +æī ¼ +ç¿» å¼Ģ +æĢİ èĥ½ +éģ´ éĢī +è§£ éĩĭ +å¹¼ ç¨ļ +è¦ģ 好好 +è¶´ åľ¨ +ç´¢ åıĸ +ç»Ī çĶŁ +åħ¨ æµģç¨ĭ +éģ© çķ¶ +åįıè°ĥ åıijå±ķ +æĬ¥ ä»ĩ +ç§ijæĬĢ åĽŃ +ä»Ģä¹Ī éĥ½ä¸į +æľĢåIJİ ä¸Ģ次 +ç»Ļ人 ä¸Ģç§į +æł¸ å®ļ +被 åĪĹåħ¥ +æĦı æĥ³ä¸įåΰ +èĢĥ æŁ¥ +åľ¨æŃ¤ ä¹ĭåīį +æīĵ çIJĥ +è¶ĬæĿ¥è¶Ĭ å°ij +å®ļ å¾ĭ +è¡ĮæĶ¿ æľºåħ³ +ä½ıæĪ¿ åħ¬ç§¯ +å°ıå§IJ å§IJ +ä¸ī èı± +ä¿® è¡¥ +èŀĥ èŁ¹ +西 çͲ +æĢ ł +çŃī å¤ļ项 +产ä¸ļ éĽĨèģļ +ä»·æł¼ ä¸Ĭ涨 +åħ¬åħ± åľºæīĢ +è¢ĭ åŃIJ +æĨ§ æĨ¬ +çļĦæĸ¹å¼ı æĿ¥ +åΰ è´¦ +çģ ½ +å·´ èı² +å·´èı² çī¹ +æ¼Ķ ä¹ł +èŃ¦ç¤º æķĻèĤ² +çķı æĥ§ +å¼ķ æµģ +æĶ¶ æĶ¯ +å±Ĥ åĩº +å±Ĥåĩº ä¸į +å±Ĥåĩºä¸į ç©· +æijĩ æ»ļ +辦 çIJĨ +纵 è§Ĥ +æķij æµİ +å®¶ éĥ½çŁ¥éģĵ +åĮ ¯ +å°ı 鸣 +ä»» åĭĻ +计 åħ¥ +ç«ŀ éĢī +å¼ĢèįĴ æĹ¶æľŁ +åij¨ æģ© +åij¨æģ© æĿ¥ +交 ç»ĩ +çķ¢ æ¥Ń +æł¹æį® èĩªå·± +æĸ°äºº çݩ家 +åѵåĮĸ åύ +éĩĩ æļĸ +å¹³åĿĩ æ°´å¹³ +åħ¬å¼Ģ 课 +失 åĪ© +伺 æľį +çĬ ģ +忽 æĤł +主è¦ģ éĽĨä¸Ń +æ¤į æłij +æ¯Ĺ éĤ» +èĩº çģ£ +åĩºåĽ½ çķĻåѦ +æĬĹ éľĩ +æĥ© æĪĴ +å¹´åºķ åīį +åĴ¸ éĺ³ +æ°ij å±ħ +大çIJĨ çŁ³ +éĿ ³ +éķ ĸ +æ¸ħ è¿ľ +è£ħ è½½ +èĩ Ģ +å½± ä¸ļ +å¼Ł åħĦ +æĤ² è§Ĥ +çĿĢçľ¼ äºİ +æįį åį« +åī¥ å¤º +ç¯ Ĩ +å¾Ī éķ¿æĹ¶éĹ´ +è¥ Ł +第ä¸Ģ çϾ +ä¸ĢåĪĨ éĴ± +æĸ°éĹ» è®°èĢħ +éķ· æľŁ +æ³ķ æĪĺç»ĦåIJĪ +è°ģ çŁ¥éģĵ +èħ° éĥ¨ +æ±ī åł¡ +åħ¥ çĿ¡ +åįĸ æİī +æ¶Īè²» èĢħ +æĥ¯ ä¾ĭ +æĥ³ äºĨ +æĥ³äºĨ æĥ³ +èĢģæĹ§ å°ıåĮº +ä¼ł è¨Ģ +åĪĨæķ° 线 +æµģ 泪 +ç»Ħç»ĩ é¢Ĩ导 +äºļ åĨĽ +å¢ŀå̼ æľįåĬ¡ +å¾ ¹ +ä¼ ¶ +äºĽ 许 +å¸ĥ èݱ +强 æĤį +宫 å»· +绿 èĮ¶ +åĮ ¡ +å¾Ī æŃ£å¸¸ +æĺ¥ å¤ı +æ¯ Ļ +è¯Ħ æ¯Ķ +åĩ¡ äºĭ +æĬī æĭ© +åĢĴ éľī +éĩį 度 +åįıä¼ļ ä¼ļéķ¿ +å¿§ èĻij +ä¸ĭ ä¸Ģç¯ĩ +沪 æ·± +æĪ İ +æīĵ ä»Ĺ +åįĪ é¥Ń +å¹´é¾Ħ 段 +ä¸ŃåĽ½ è¶³çIJĥ +设计 æĸ¹æ¡Ī +åºĶç͍ æŁ¥çľĭ +é¢Ħ æĸĻ +åĹ ¡ +ç¥ĸ çζ +çļĦä¸Ģ åijĺ +æ´Ĺ å¹²åĩĢ +åİĨåı² æĸ° +åİĨåı²æĸ° é«ĺ +çĭ¬ åħ· +æħĭ 度 +æīĵ 交 +æīĵ交 éģĵ +é»Ħ çŁ³ +çĽ¼ æľĽ +çī§ åľº +转 弯 +åįĩ åįİ +åĨį ä¹Łæ²¡æľī +èĭ± æīį +æĽ´ åIJį为 +åĢŁ ç͍ +çºł éĶĻ +ç»Ŀ对 ä¸įä¼ļ +çİĭ çīĮ +çĽĨ åľ° +失 è°ĥ +好 象 +é³ ¥ +ä¿Ŀ ä¿® +åĽĽä¸ª èĩªä¿¡ +头 çļ® +åİŁ åīĩ +æĬ¥ æ¡Ī +奴 éļ¶ +å³ Ļ +è°ĥ æĸĻ +ä¹Ł 許 +èIJ½ åΰ +èIJ½åΰ å®ŀ +èIJ½åΰå®ŀ å¤Ħ +çĦļ çĥ§ +çĶŁæ´» çݯå¢ĥ +åºĶ åıĬæĹ¶ +è¶Ĭ è¿ĩ +æĦŁ è¬Ŀ +æĻ¯ å¾· +æĻ¯å¾· éķĩ +çĬ Ģ +身 éĤĬ +ç¨İåĬ¡ æĢ»å±Ģ +åĩĢ åľŁ +ä¾µ åįł +åĬ¨ å·¥ +å¹´ ä¹ĭ +å¹´ä¹ĭ ä¹ħ +第äºĮ èĬĤ +åĬ¨çī© åĽŃ +第ä¸Ģ 书记 +éħ ļ +çĶŁäº§ 设å¤ĩ +æŁIJç§į ç¨ĭ度 +åľ Ń +åĩŃåĢŁ çĿĢ +éĺħ è§Ī +çϽ æ²Ļ +æ²¹ çĥŁ +çªģçł´ åı£ +åıĹ å½±åĵį +åı¯ä»¥ æĽ´å¥½ +å³° å̼ +æĿĤ è´¨ +宿 è¿ģ +çĽĺ æ´» +æ¿Ģ èµ· +åĦ¿ ç§ij +åĿIJ èIJ½åľ¨ +æĮª å¨ģ +æµ· å²Ľ +绣 绣 +éĻ ¨ +ä¼ĺ äºİ +å°Ī å®¶ +ä¸Ģ éĤĬ +èIJ Ĭ +äºĨä¸Ģ åı£ +æ²ĥå°Ķ æ²ĥ +æŃ£å¸¸ 使ç͍ +æĻ®éģį åŃĺåľ¨ +丰 满 +çĶ» åį· +åºĶ æĶ¶ +åºĶæĶ¶ è´¦ +åºĶæĶ¶è´¦ 款 +å®Įæķ´ çĥŃ +å®Įæķ´çĥŃ æ¦ľ +注 è§Ĩ +çĨ Ħ +èº ¬ +éĶĢåĶ® 人åijĺ +è¶ĭ åIJij +çĦ¦ æĢ¥ +åįģå¹´ åīį +ä¼łç»Ł 产ä¸ļ +質 éĩı +åĩ¤åĩ° ç½ij +èµĦæºIJ æķ´åIJĪ +æ¶Į åħ¥ +æĸĩåĮĸ ä¼łæĴŃ +çķĮ 第ä¸Ģ +æ°´ æ³µ +宫 殿 +æİ¢ 寻 +ä¿® åīª +æĦı è¦ĭ +ç´Ĭ ä¹± +æĽ ī +çϽ è¡£ +èĻİ åį« +ç´§ æī£ +å¤Ħå¤Ħ éķ¿ +åĪĽå»º å·¥ä½ľ +红 æŀ£ +饼 å¹² +äºĨ åįĬ天 +ä¼ļå½±åĵį åΰ +çĽ¸ä¿¡ 大家 +èħ¾ é£ŀ +å°± å¦ĤåIJĮ +ä¸ĭéĿ¢ å°ıç¼ĸ +æ°ijèIJ¥ ç»ıæµİ +æĻ ¦ +è£ħ æī® +é»ij å¤ľ +常 å¾· +å·¥ä¸ļ 大åѦ +æĺİ çŁ¥ +éĺŁåijĺ 们 +åIJ¬ 课 +æ¯ı éļĶ +羣æĺ¯ 太 +åIJĪä½ľ åħ±èµ¢ +çIJĨ åıij +æīį å¹² +çľĭ èµ·ä¾Ĩ +殿 ä¸ĭ +å®ī éĺ³ +æīĢ äº§çĶŁçļĦ +éĽĩ ä½£ +æĬ¬èµ· 头 +æį® æĬ¥éģĵ +éļĨéĩį 举è¡Į +交 éĶĻ +è¶ħ é¢Ŀ +åĮĸ çĸĹ +é¡ Ĩ +纵 æ·± +çĪ±åĽ½ 主ä¹ī +éĻ¢ åī¯éĻ¢éķ¿ +è® ³ +羣æŃ£ åģļåΰ +åѤ åįķ +èĩªçĦ¶ èĢĮ +èĩªçĦ¶èĢĮ çĦ¶ +ä¿® 身 +èĬ ¹ +æģ¯ æģ¯ +æģ¯æģ¯ 缸åħ³ +驾 æł¡ +æİ© 饰 +æ³½ è¿ŀ +æ³½è¿ŀ æĸ¯åŁº +举 æŃ¢ +管çIJĨ ä½ĵåζ +åħ¶ä¸Ń ä¹ĭä¸Ģ +æĿ¾ å¼Ľ +æĭ¦ æĪª +åį« åģ¥ +åį«åģ¥ å§Ķ +ä»İ åݻ年 +åĤ ¢ +è´Ń 票 +åĽ¾ æłĩ +æ²³ 西 +æ°ijæĶ¿ å±Ģ +ç§ģ èIJ¥ +å¤ĸåĽ½ è¯Ń +å¹² è´§ +æĵ¦ æĭŃ +åľ° ä¸Ń +åľ°ä¸Ń æµ· +æµĵ æµĵ +æµĵæµĵ çļĦ +å§ĭ 建 +å§ĭ建 äºİ +ç¶ĵ æŃ· +è·¯ æ¼Ķ +æļ´ é£İ +åŁº è¾ħ +æī¶è´« å·¥ä½ľ +ä¸Ģ缴 å¤Ħäºİ +æĥħ è¶£ +äºĮ åŃ£åº¦ +åİĮ æģ¶ +顺åĪ© å®ĮæĪIJ +æŁ¥ å°ģ +é¡¶ 端 +ä¸į åŃķ +ä¸Ģ大 åłĨ +被 æ·ĺæ±° +æĺ¯ ç͍æĿ¥ +æľĢ åIJĪéĢĤ +亮 çľ¼ +å¹¶ä¸įæĺ¯ å¾Ī +ç§ijçłĶ éĻ¢ +ç§ijçłĶéĻ¢ æīĢ +ç² Ł +é¢Ī éĥ¨ +é»ĺé»ĺ åľ° +é«ĺä¸Ń çĶŁ +æĹıèĩªæ²» åİ¿ +æķĻåѦ è´¨éĩı +æĪĺ çģ« +åĿİ åĿ· +æIJŃ ä¹ĺ +è¯Ĺ æĦı +åĪij èѦ +åĩº æ±Ĺ +åįģåħŃ æĿ¡ +请 åıĬæĹ¶ +åĨľä¸ļ 大åѦ +èIJ½ åı¶ +æĢ» èĢĮè¨Ģ +æĢ»èĢĮè¨Ģ ä¹ĭ +æĿľ åħ° +æĿľåħ° çī¹ +éĻª ä½ł +åħ¬ æĬ¥ +çķĻè¨Ģ æĿ¿ +éĺħ åİĨ +ç«¶ çĪŃ +ç»Ļ åĪ«äºº +æĹ¥æĬ¥ 社 +åĿIJ èIJ½ +åĿIJèIJ½ äºİ +éĩij åŃĹ +éĩijåŃĹ å¡Ķ +åĽ ¤ +è¯Ŀ åī§ +æĮģç»Ń æİ¨è¿Ľ +æ¼ı æ°´ +詳 ç´° +æĢĢ æĬ± +åıĺ å¹» +饥 饿 +éļIJ 身 +个 èµĽåŃ£ +åĵ¡ å·¥ +æģ¢å¤į æŃ£å¸¸ +äºĨ 好å¤ļ +æĺŁ å·´ +æĺŁå·´ åħĭ +åħī çݯ +å¸ħ åĵ¥ +çϽ éĽª +ç¨į ç¨į +计 æıIJ +æĦĽ æĥħ +éİ ĸ +ä¿¡ éĺ³ +è§Ģ å¯Ł +å¦Ĥæŀľä½ł æĥ³ +缸æ¯Ķ ä¹ĭä¸ĭ +è§£ å¼Ģ +æīĵåį° æľº +身 躯 +ç²¾ç¥ŀ æĸĩæĺİ +èĤ¡ æĮĩ +å¾® åĪĽ +红 èĮ¶ +èĩ´ çĻĮ +æģ© æĸ½ +èħ¿ éĥ¨ +大åŀĭ å¤ļ人 +å®ī åĢį +è¾ħ导 åijĺ +èĪª éģĵ +å¸ĥ å°Ķ +åįĹå®ģ å¸Ĥ +ä¸ĬçıŃ æĹı +ä¾§ ç»ĵæŀĦæĢ§ +追 éļı +å½ĵåľ° æĶ¿åºľ +èµ° åĩºæĿ¥ +éĩijèŀį ä¸ļ +丼 书 +é¡¹çĽ® ç»ıçIJĨ +è¿ĩ æĪ· +骨 æŀ¶ +è¡ Ļ +ä»Ģ 麽 +èħ ĭ +è¦ģ 害 +åľ¨ åºĬä¸Ĭ +代è¨Ģ 人 +並 å°ĩ +åIJĦ个 æĸ¹éĿ¢ +è°´ è´£ +åħ± æĮ¯ +åį³å°Ĩ åΰæĿ¥ +èĤº çĻĮ +ä¾Ľ éĶĢ +丼 æŀĹ +èµ ĥ +åįģä½Ļ å¹´ +åĭĺ æİ¢ +飵 åij³ +èĭ¦ ç¬ij +æľĢ大 ç¨ĭ度 +éĩįçĤ¹ åħ³æ³¨ +ä¹ĭ 举 +满 æĢĢ +åıĹåΰ å½±åĵį +æĭĽ æĬķæłĩ +è¡¥ é½IJ +西 红 +西红 æŁ¿ +é¬ § +è£ħ åᏠ+éĤ» éĩĮ +èĤĩ äºĭ +æİĴ æ¯Ĵ +åѤ åĦ¿ +鼶 è·Ŀ离 +å®ŀ å¹² +çľĭ æŁ¥çľĭ +æĶ¶è´¹ ç«Ļ +ç» · +åħ¬çĽĬ æĢ§ +éĢĴ ç»Ļ +æĶ» æīĵ +æĺŁçº§ éħĴåºĹ +æĺİ åªļ +ç፠ç«ĭ +è¯Ŀè¯Ń æĿĥ +ä¸ĢæŃ¥ ä¸ĢæŃ¥ +书æ³ķ å®¶ +æľªç»ı æİĪæĿĥ +çŁ³ èĨı +åĩŃ ä»Ģä¹Ī +çļĦ æĹ¥ +çļĦæĹ¥ åŃIJéĩĮ +诱 人 +çϾåĪĨ çϾ +èĪĪ è¶£ +å¼ł åħĪçĶŁ +èĢģçĪ· åŃIJ +æ³¢ çī¹ +åŁºéĩij 份é¢Ŀ +æ²Ļåıij ä¸Ĭ +å¥ĭæĸŠ缮æłĩ +æ°¢ èĥ½ +æ²ĥå°Ķ çİĽ +義 åĭĻ +éŁ³ ç®± +æ²ī 浸 +æ²ī浸 åľ¨ +èĭ± åľĭ +çģ¯ çģ« +è¿Ľ 项 +两 端 +ä¹Ķ 丹 +èĦ¸ é¢Ĭ +åıijå±ķ æ½ľåĬĽ +åĭķ ä½ľ +åĵĪ ä½Ľ +å®´ ä¼ļ +æ§ į +ç«ĭ å¿Ĺ +ç¡ķ士 åѦä½į +åĭĭ 竳 +è¿Ļ åľºæ¯ĶèµĽ +æĮģ å¹³ +éķĢ éĶĮ +èĭ± çī¹ +èĭ±çī¹ å°Ķ +æķĻ èģĮå·¥ +åĬŁ åĬĽ +该 æ¡Ī +ä¸Ģ æ¢Ŀ +åĺī å¹´ +åĺīå¹´ åįİ +è¿« ä¸įåıĬ +è¿«ä¸įåıĬ å¾ħ +è¿Ļ个 æĹ¶ä»£ +精彩 æĴŃæĬ¥ +人 èĦ¸ +人èĦ¸ è¯ĨåĪ« +æ£Ģå¯Ł å®ĺ +å°ı èħ¿ +éĨĴ 缮 +åħļ æĢ» +åħļæĢ» æĶ¯ +æĪ Ł +èĮ« çĦ¶ +è±Ĩ æµĨ +主 æ²» +éĿĴæµ· çľģ +åĪijäºĭ 责任 +çł ° +ä¹ĭ æ¬ĬåĪ© +äºĶ å®ĺ +è¿· æĥij +åħ¥ åºĵ +å®¶ 纺 +å¼¹ ç°§ +åįģäºĶ æĿ¡ +ç»Ļ å®Ŀå®Ŀ +èĪªç©º èĪªå¤© +å¾Ģ å¤ĸ +å¼ķ åĬĽ +çľ¼ çļ® +æ¶ī è¶³ +æĿ¥ 宾 +åľ¨çº¿ è§Ĵèī² +çĥŃ éĶĢ +æµģ éĢĿ +泡 泡 +éĻį å¹ħ +è´ŁéĿ¢ å½±åĵį +红 楼 +红楼 梦 +éļĶ çĿĢ +ä¾¥ 幸 +许 ä¹ħ +åĴĮ çĿ¦ +èŃ ½ +使ç͍èĢħ æĪĸ +ä¹° åįķ +è¿ ´ +é£İ æīĩ +æķĻ å¸« +æ¡ĮåŃIJ ä¸Ĭ +å¾Ī æ¼Ĥ亮 +åł± å°İ +第ä¸Ģ åŃ£åº¦ +ç©© å®ļ +æĤ² åĵĢ +çĿĢåĬĽ æīĵéĢł +æĮ Ł +è·¯ æ¡¥ +åij IJ +åľ£è¯ŀ èĬĤ +çļĩ åŃIJ +ä»ĩ æģ¨ +éħĿ éħ¿ +ä¸į éĹ´ +ä¸įéĹ´ æĸŃ +æĮĩ å°ĸ +ä¸ŃåĽ½ ç½ij游 +åŀ £ +æĦıè§ģ 建议 +æ¯ħ çĦ¶ +亮 度 +èģĶ è°Ĭ +å½ķ åħ¥ +åĦ ² +å¨ĺ å®¶ +ç§ij å°Ķ +ä¹Łæ²¡ ä»Ģä¹Ī +æł¹æį® ä¸įåIJĮ +åı¶ ä¿® +å̼ å®Ī +æľ« 端 +åĪ ¨ +åĤµ åĭĻ +èģ¯ åIJĪ +å¥ĩ å¹» +èĻļ æŀĦ +é»Ħ æĺı +å¹³ åĿ¦ +æµģ æ°ĵ +æĸ° åŁºå»º +æĮ½ æķij +åįİ å°Ķ +åįİå°Ķ è¡Ĺ +æľĢ åıĹæ¬¢è¿İ +ç»Ń 约 +å¼Ĭ 端 +éŃĶ æ³ķå¸Ī +éŃĶæ³ķå¸Ī åĴĮ +åħ·ä½ĵ åĨħ容 +çIJī çĴĥ +æī© 容 +èĮ¶ åĽŃ +主ä¹ī èĢħ +ç«ĭ éĿ¢ +æİ¥åıĹ éĩĩ访 +åĩº åħ¥å¢ĥ +ç§ij åįı +éĴ ³ +çµIJ æ§ĭ +ç»ĵæŀľ æĺ¾ç¤º +åı° è´¦ +å°± æĿ¥çľĭçľĭ +èĩª æķij +åıį æĩī +åİ» åĵªåĦ¿ +è¿Ļ é¦ĸ +è¿Ļé¦ĸ æŃĮ +åIJ¬ ä¼Ĺ +å¤ĸ 壳 +ä½ĵèĤ² é¦Ĩ +實 æĸ½ +èŀº ä¸Ŀ +æĭī åįĩ +çĮĽ åľ° +åħ¨åĽ½ 人æ°ij +æĤī å°¼ +æĹı 群 +åĽ¢ åijĺ +两个 å°ıæĹ¶ +åľ¨ çݩ家 +åľ¨çݩ家 ä¸Ń +çĶľ çĶľ +æĬķ è¡Į +åįĶ æľĥ +éĻ ¡ +åĬłå·¥ åİĤ +æ¦Ĩ æŀĹ +æŃ» è§Ĵ +åĨħ å¹ķ +æīĢæľī æĥħèĬĤ +åĪ· åį¡ +æ°´ èĤ¿ +èĥĥ åı£ +å«Į å¼ĥ +æ²® 丧 +ä¸īå¹´ 级 +æ¶Ĥ å±Ĥ +å¿ĥ 仪 +å¿ĥ仪 çļĦ +å¤ Ń +é¦ĸ è½® +æĹłè®ºæĺ¯ åħ¶ +éĢı æ°Ķ +äºĮ åįģäºĶ +ç® « +åĬŁ åĬ³ +çѾ ä¸ĭ +æ²ī è¿· +æķij åij½ +éĹª éĹª +åIJĥ äºı +å±ķ åĵģ +åį³æĹ¶ åıijçĶŁ +ç¶ ľ +ç¶ľ åIJĪ +æłĩ æĺİ +çľĭ ç͵影 +åħ¬ 竳 +éĺ¿ æ£® +éĺ¿æ£® 纳 +身 åĪĽéĢł +身åĪĽéĢł çļĦ +æ¸Ľ å°ij +å̼å¾Ĺ åħ³æ³¨ +鼶åĶ® åķĨ +æįĨ ç»ij +è¸ı åħ¥ +èĽ Ł +æŁ´ 纳 +èĢģ åħµ +绿èī² çݯä¿Ŀ +é¹ Ń +麻 æľ¨ +æıŃ çīĮ +è¿Ļ款 车 +ç¾İ å¾· +ç¾İå¾· åħ¬åı¸ +æ¶ § +è°ģ çŁ¥ +æ´ĭ èij± +æ¯į æł¡ +ä¸Ģ éĹª +çĶ· 主è§Ĵ +æĹłçº¿ ç͵ +å±ł å®° +æĺ¯ éŁ©åĽ½ +æĺ¯éŁ©åĽ½ 娱 +容 è²Į +åĿĩ 使åħ¶ +太 å¿« +å¹´ çͱ +å¹´çͱ 缼 +èĭ¦ èĭ¦ +åĬĽ è¿ĺæĺ¯ +åĬĽè¿ĺæĺ¯ èĩª +æĨ © +èģ¯ çµ¡ +åĶ ¾ +åħ·æľī æĪĺ士 +追 éĹ® +åłĨ æĶ¾ +åıį 驳 +å®ŀäºĭ æ±Ĥ +å®ŀäºĭæ±Ĥ æĺ¯ +åѸ éĻ¢ +åįģ åĩłä¸ª +æķij æĬ¤ +æķijæĬ¤ 车 +ç½ij绾 ä¼łæĴŃ +åįģåħ« å±Ĭ +éĥ¨ åī¯ +éĥ¨åī¯ éĥ¨éķ¿ +çĹ´ è¿· +管çIJĨ æĿ¡ä¾ĭ +èŀį 为ä¸Ģä½ĵ +æĢ» 产å̼ +è³ ĵ +ä¸ĥ æĺŁ +çıŃ ç»Ħ +绣 é¢Ĩ +请 大家 +éĩij éϵ +èĪħ èĪħ +æµ· æ¹¾ +æĸ½ çŃĸ +享 èªī +éº ¥ +端 åįĪ +绿 åŁİ +確 ä¿Ŀ +å·´ æĭī +åĨĴ çĿĢ +æħ· æħ¨ +个人 è§ĤçĤ¹ +ä¹Ļ çĥ¯ +ç¡ħ è°· +éĸĭ å±ķ +å°ļ 书 +åĿļ 飧 +åº µ +èĢģ é¾Ħ +èĢģé¾Ħ åĮĸ +羨 çľ¼ +绿 æ°´ +绿水 éĿĴå±± +书 é¦Ļ +主åĬĽ åĨĽ +æīįæĺ¯ 羣æŃ£ +æĬ¢ åħĪ +æĪIJå°± æĦŁ +éĩį æŀĦ +éĴ¢ åİĤ +æĪIJ 份 +èĬ± 纹 +ä¹ĭ äºī +å¹² ç»Ĩèĥŀ +æĹ¢ åı¯ä»¥ +ç¹ģ çIJIJ +æĦļ èł¢ +éĿŀ常 æĺİæĺ¾ +ä½ĵ 彩 +æĬĢ æ³ķ +æĿĨ èıĮ +å¹¿æ³Ľ åħ³æ³¨ +åĮĹ å®ĭ +å§Ĭ 妹 +åįı åĬŀ +æ·® åįĹ +çĥ ı +æ´Ĺ èĦ¸ +åıĹ è®¿ +åıĹ访 èĢħ +éĩįè¦ģ åĽłç´ł +å½±è§Ĩ åī§ +综èīº èĬĤ缮 +èľķ åıĺ +äºĮ 线 +äºĮ线 åŁİå¸Ĥ +ä¼Ĭ å§ĭ +çıĬ çijļ +èĩª æŁ¥ +åħ¥ åĽŃ +åĩ¶ æīĭ +åħ¬ è¯ī +éģĩ éļ¾ +éĩĩçŁ¿ çŃī +èĩª çIJĨ +åĸ· æ¶Ĥ +æī© åħħ +éĢı è§Ĩ +é«ĺéĢŁ å¢ŀéķ¿ +åĽ¾ çĶ» +ç¾ ¹ +èĤĩ åºĨ +è¾ľ è´Ł +èµĶ ä»ĺ +è· ¡ +åģ¥åº· æĪIJéķ¿ +以ä¸Ĭ åѦåİĨ +åıĸå¾Ĺ 以åıĬ +æ²ī 积 +åįģä¹Ŀ å±Ĭ +缸éĹľ æľįåĭĻ +æī§ åĭ¤ +åī¯ åİ¿éķ¿ +å¯ ° +åģľ æ»ŀ +æ·¹ 没 +çŁ³ çģ° +çį ¸ +åĢ ¦ +ç¾İ åªĴ +æķĻ æ¡Ī +åĬł çĽĸ +åħ¬å¼Ģ èµĽ +å¥ł åŁº +æĺĨ èĻ« +çŀ ħ +磷 éħ¸ +äºī åĪĽ +çİĭ æĻĵ +ç¼ĵ åĨ² +åİļ åİļ +åİļåİļ çļĦ +æŀ£ åºĦ +ç²¾ çĽĬ +ç²¾çĽĬ æ±Ĥ +ç²¾çĽĬæ±Ĥ ç²¾ +åĪĨæĶ¯ æľºæŀĦ +å®ŀæĸ½ ç»ĨåĪĻ +æĸ° èµĽåŃ£ +總 çµ± +éĢł è¡Ģ +é¢ĩ åħ· +é»Ħ åŁĶ +è¡Ģ èĦĤ +交éĢļ å·¥åħ· +å³ ¥ +æĹıèĩªæ²» å·ŀ +寺 éĻ¢ +確 å®ļ +æ¦Ĥ念 èĤ¡ +æĦŁ å®ĺ +æŁľ åı° +åĶ Ķ +çŀŃè§£ 並 +æĢ» ä»· +åIJ¸ åħ¥ +æĢ ¼ +æĻļ éĹ´ +å±Ĭ æ¯ķä¸ļçĶŁ +çĶŁ å§ľ +éĺħ读 åħ¨æĸĩ +å¾Ĺåΰ æľīæķĪ +æIJľ æķij +åİĨ æĿ¥ +èŃī æĺİ +åĥ » +èĨ³ é£Ł +åĦĦ åħĥ +æīĵ åİĭ +宾 客 +åķ ¼ +ä¸ĢçϾ å¤ļ +æ·±åħ¥ 人å¿ĥ +æ¢ħ å·ŀ +çłĶ åѦ +åħ³ ä¹İ +è¼ Ľ +亲 åıĭ +éħį æĸĻ +æĪij çĪ±ä½ł +è´¸æĺĵ æĪĺ +æľī èī² +æľīèī² éĩijå±ŀ +æįIJ åĬ© +为 é¦ĸ +为é¦ĸ çļĦ +å¯Į åĬĽ +çĶ· ç¥ŀ +é³ ³ +æµĩ æ°´ +åIJ ± +æĺİç¡® æıIJåĩº +åı¹ äºĨ +åı¹äºĨ åı£æ°Ķ +礼 æĭľ +è¿Ļ个 åIJįåŃĹ +ä¿¡ å¾Ĵ +å¿Ĺ 强 +éĻIJ æĹ¶ +æĶ¶ è²» +åĨľå®¶ ä¹IJ +å°ıé¾Ļ èϾ +èIJ½ å¹ķ +æ§ Ł +åѦ 龸 +æĪĸ å¤ļ +æĪĸå¤ļ æĪĸ +æĪĸå¤ļæĪĸ å°ij +座è°Ī ä¼ļä¸Ĭ +æ¶ ¼ +éŃĶ çİĭ +å² ± +é¡¶ å±Ĥ +é¡¶å±Ĥ 设计 +èĦij åŃIJéĩĮ +éĻ¢ åŃIJéĩĮ +轩 è¾ķ +身å¿ĥ åģ¥åº· +èħ ij +éĹľ 注 +åıĤåĬł ä¼ļè®® +ä¸Ńåįİ æĸĩåĮĸ +追 寻 +å®ī çĦ¶ +é£Ļ åįĩ +éŁŃ èıľ +é¸ ¦ +åĤ¨ éĩı +çĶ· æĸ¹ +å¤ĩ 份 +æijĶ åĢĴ +润æ»ij æ²¹ +é̼ è¿ij +çͳ è¯ī +鸣 ç±» +çŁ³æ²¹ åĮĸå·¥ +åĿļ æŀľ +è¿Ļå®¶ ä¼Ļ +æĭĴ ä¸į +羣 çļ® +è·Ŀ éĽ¢ +è¿ĺ æĮº +éĽķ åĥı +åĪĿ æģĭ +æıIJä¾Ľ æĽ´å¤ļ +æŁ¥çľĭ åħ¨æĸĩ +æķ°åŃĹ è´§å¸ģ +åĸī åĴĻ +åı¦ä¸Ģ ä½į +åĤ¬ åĮĸ +åĤ¬åĮĸ åīĤ +ä»İæĿ¥ 没 +å¯ĨåĪĩ 缸åħ³ +éĥ¨ 主任 +产åĵģ ç»ıçIJĨ +並 åIJĮæĦı +èIJ½ åħ¥ +å±ıå¹ķ ä¸Ĭ +åħ¬åı¸ 竳ç¨ĭ +æį¢ åı¥è¯Ŀ +æį¢åı¥è¯Ŀ 说 +ä½į æĸ¼ +ä½ Ķ +åĩ» æĿĢ +缸 è¾ĥ +缸è¾ĥ äºİ +ç²½ åŃIJ +åįĹ æŀģ +宫 é¢Ī +è£ģ åijĺ +æĺİ ç»Ĩ +ä»·å̼ éĵ¾ +åĽĽä¸ª æĸ¹éĿ¢ +æĥħåĨµ æĿ¥çľĭ +æĮij åīĶ +æ® ĺ +æŀģ åĬĽ +çĸij éļ¾ +æĬµæĬĹ åĬĽ +æĢ¥ éĢŁ +æĪ Į +ä½İ ä¼° +éĹª è¿ĩ +æģ ¬ +èµŀ æī¬ +ä»ĸ å¦Ī +æĪIJ为 ä¸ĢåIJį +æ´Ĺ 礼 +é¢Ħ计 å°Ĩ +åħĪè¿Ľ åįķä½į +è¼ Ķ +éĢĥ èĦ± +çݰ åŃĺ +èĢģèĻİ æľº +åįģä¸ĥ æĿ¡ +åı¦ä¸Ģ åįĬ +温 æĥħ +åī¥ ç¦» +ä¸ĸ è´¸ +å®ĺ åı¸ +å¾Ī å·® +éĹ´ è·Ŀ +请 注æĦı +åı² è¯Ĺ +åĪ© åύ +è¿IJ ç®Ĺ +沦 为 +該 使ç͍èĢħ +èĮ ¬ +éͦ 绣 +åı² æĸĻ +çģµ æ´»æĢ§ +èģĶ ç¤¾ +æĹł åĬ© +æĬĹ æ°§åĮĸ +èıľ èĤ´ +éĢł èι +æİī èIJ½ +å¤į æŁ¥ +åĭĥ åĭĥ +åij¼ 声 +給 äºĪ +åIJĮäºĭ 们 +ç½ ° +è¯ķ æİ¢ +åħ³éĶ® åŃĹ +æįIJ çĮ® +ç»Łè®¡ æķ°æį® +åĪĽ ä½ľèĢħ +ä¸ĭ åįĬ +ä¸ĭåįĬ åľº +æī¿æĭħ 责任 +端 æŃ£ +ç©¿ è¡£ +ä¼ł çIJĥ +åĬ© éķ¿ +åĩ ± +éķ¶ åµĮ +é£ŀ ç¿Ķ +è¾ĵ åįµ +è¾ĵåįµ ç®¡ +ä¸ĩ åħ¬éĩĮ +æİ¨å¹¿ åºĶç͍ +å¿« æ¨Ĥ +ç§ ½ +èī° å·¨ +åIJ¬ å®Į +åĿļ 硬 +奥 åľ° +å¥¥åľ° åĪ© +é¢ ĵ +èĻIJ å¾ħ +ä¾Ľ æ±Ĥ +éľī ç´ł +伪 è£ħ +乡 åľŁ +åĩ¡ æľ¬ç½ij +åĩ¡æľ¬ç½ij 注 +ä¼Ĭ åĪ© +è¡¡ æ°´ +æĽ´ åĥıæĺ¯ +åĪĨéĴŁ å·¦åı³ +è¦ı 模 +äºĶ åĪĨéĴŁ +åºĹ åĬłçĽŁ +åĽ° éĽ£ +åħ³ åģľ +æĢĿ 绪 +åĴ½ åĸī +缸 符 +çĥ¦ èºģ +æĻĤ æľŁ +åijĪ çı¾ +è§£ æķ£ +诱 导 +éļĶ çĥŃ +çĮ ¶ +åįĹ å®ĭ +æ·±åħ¥ äºĨè§£ +çŃĶ çĸij +æĺ¼ å¤ľ +åįĥ ä¼ı +åĬ³åĬ¡ æ´¾éģ£ +红 è±Ĩ +åĿı äºĭ +çĤ¹ æ»´ +å°±ä¸ļ å²Ĺä½į +约 åIJĪ +åħį éϤ +éĢĨ åĬ¿ +éĩį éĩijå±ŀ +å®ĺ 宣 +ä½İ å»ī +æģ¨ ä¸įå¾Ĺ +å¾Ĺ 天 +å¾Ĺ天 çĭ¬ +å¾Ĺ天çĭ¬ åİļ +ä¸Ģå°ģ ä¿¡ +æĬ½ å¥ĸ +è¾Ĺ 转 +çķĻ å®Ī +çķĻå®Ī åĦ¿ç«¥ +çŃĶ åį· +å·¨ åŀĭ +æľĢ好 ä¸įè¦ģ +æµĻæ±Ł 大åѦ +æĨ ¨ +æı¡ æīĭ +éĴĪ ç»ĩ +æİĴ 骨 +çĤ ½ +å°ģ è£ħ +åįĢ åŁŁ +空æ°Ķ åĩĢåĮĸ +åħī å½± +åĢĴ å¡Į +å§ļ æĺİ +æ¤į 被 +åѦ åīį +åѦåīį æķĻèĤ² +èĬĿ åĬł +èĬĿåĬł åĵ¥ +缩 æ°´ +ä½ Ł +åľ¨çº¿ åĴ¨è¯¢ +èµı æŀIJ +éĿĴ èĽĻ +æĬ± ä½ı +èĮĤ åIJį +åħ¨åĬĽ æīĵéĢł +åįļ士 åѦä½į +æ²§ å·ŀ +åĻ ¢ +æĿĤ çī© +åĪ» çĶ» +æį ħ +å¾® éĩı +å¾®éĩı åħĥç´ł +ä¸Ģ åĽŀäºĭ +鸡 èĤī +åĪ©æ¶¦ çİĩ +æīį ç®Ĺ +å¾® å¦Ļ +棵 æłij +è´ª 婪 +åĩı å̼ +梦 å¢ĥ +åı¯ è§Ĩ +åı¯è§Ĩ åĮĸ +广大 å¸Ĥæ°ij +ä¸ĵä¸ļ ä»İäºĭ +ç»ı 纬 +ç´§ çĽ¯ +çŁ¥ å·± +è¤ ļ +æĸĩåĮĸ åºķèķ´ +åݦéŨ å¸Ĥ +临 港 +对åħ¶ 羣å®ŀ +岸 è¾¹ +è¦ĸ çĤº +æĬĹ çĻĮ +åĶIJ å®ĩ +ä¸įå¾Ĺ è¶ħè¿ĩ +å¨ģ æħij +æ¡Ĩæŀ¶ åįıè®® +èµ° ç§ģ +åĽ¢ å§Ķ +夸 大 +æ¬ Ħ +ç¥ŀç»ı ç³»ç»Ł +æijĦå½± ä½ľåĵģ +èĬ ¥ +å®ī åºĨ +æµ· 滨 +æŀĦ æĢĿ +çīµ æĮĤ +åı © +éĺIJ æĺİ +éģ ģ +ç²¾ æ²¹ +ç©´ ä½į +æĬ¤ 身 +æĬ¤èº« 符 +æĮĩ å°İ +åŃĺåľ¨ ä¸Ģå®ļ +å¯Ĥ éĿĻ +æµ·å¤ĸ å¸Ĥåľº +éĿ ¡ +综åIJĪ å¾ģ +ä¿ IJ +è¨Ī ç®Ĺ +æĺİ æľĹ +äºļ è¿IJ +äºļè¿IJ ä¼ļ +åīįçŀ» æĢ§ +åĮ® ä¹ı +产ä¸ļ æī¶è´« +èĦij æµ· +èĦijæµ· ä¸Ń +åħļçļĦ é¢Ĩ导 +åĪĺ éĤ¦ +æµģ æĺŁ +æĵ Ĥ +æĶĢ çĻ» +åĴ Ķ +ä¸Ģä¸ĭåŃIJ å°± +è¯Ĭ æ²» +使 åĬ² +åīµ ä½ľ +éĵŃ è®° +éĴ± è´¢ +æĹ¥æĬ¥ è®°èĢħ +çĥŁ çģ« +èĥľ è´Ł +åįļ 主 +ä¸ŃåĽ½ èģĶéĢļ +ç½ijç«Ļ é¦ĸ页 +å°± å¤Ł +å°±å¤Ł äºĨ +æīij åħĭ +å±ħ å§Ķä¼ļ +è° ¬ +å®īåħ¨ äºĭæķħ +åķĨ çĶ¨è½¦ +循çݯ ç»ıæµİ +æ· ¤ +èĢĥ è¯ģ +å®Ŀ èĹı +å®Į ç»ĵ +çłĶåıij æĬķåħ¥ +å² ij +æģŃ æķ¬ +离 éĢĢä¼ij +æ°´ 墨 +å© ¶ +è¯Ĺ åı¥ +å®ģæ³¢ å¸Ĥ +å¼± çĤ¹ +åģľ çīĮ +奶 æ²¹ +å¥ĩ纳 æ²³ +æĨ Ĥ +社ä¼ļ å®ŀè·µ +è´Ŀ 壳 +çłĤ æµĨ +èι åıª +宣 æī¬ +综åIJĪ æķ´æ²» +åĤ ij +æ°ijæĹı æĸĩåĮĸ +éĩį çݰ +积 æ·Ģ +åħ¬ çĦ¶ +çħ ī +缸 èģļ +æ± ¾ +纹 çIJĨ +çĩĥ çħ¤ +æŃ¤ ç§į +ç¾İ å¦Ĩ +åįĥ çĵ¦ +çIJ Ľ +驾驶 è¯ģ +éĺ¶ æ¢¯ +ä¸Ŀ ä¸Ŀ +å¾Īå¤ļ äºĭæĥħ +åħī éĺ´ +èijĹä½ľ æ¬Ĭ +åħ§ éĥ¨ +çĽ¸å¯¹ æĿ¥è¯´ +éĸ Ĵ +éľĩ æħij +說 話 +æĨ ij +ç«¥ è£ħ +ä½ıæĪ¿ åĴĮ +ä½ıæĪ¿åĴĮ åŁİ +å·²ç»ı è¶ħè¿ĩ +侦 å¯Ł +çŁ¿ çī© +ä¾Ľ 大家 +çī¹ éĤĢ +ç¨ĭåºı åijĺ +çķľçī§ ä¸ļ +æ° ª +çij ª +åĢĴ åľ¨ +åĢĴåľ¨ åľ° +æ¯ Ģ +梯 éĺŁ +æİ¥ èijĹ +æĬĹ èıĮ +è¤ ĩ +ç¬ Ļ +æ¯Ķ ä¸Ĭå¹´ +鸡 汤 +åŃ¦ä¹ł æĪIJ绩 +æĸij æĸĵ +åħΠ坼 +åĪĹ ä¸¾ +è°ĥæŁ¥ æĺ¾ç¤º +æ© « +ä¹Ŀ åįģ +è°¢ 飵 +è·¨è¶Ĭ å¼ı +女æĢ§ æľĭåıĭ +èIJ¥åħ» ä»·å̼ +å®ŀè·µ ç»ıéªĮ +èĭı å·ŀå¸Ĥ +çĵ¶ åŃIJ +æĸ° çļĦä¸Ģ +æĸ°çļĦä¸Ģ å¹´ +æĺİ æĻ° +å®ł çα +åŃŠ第 +æľĹ 诵 +纳 æĸ¯ +éĢĨ è¡Į +è«ĭ æĤ¨ +è«ĭæĤ¨ æıIJä¾Ľ +èĥ¸ æĢĢ +第ä¸ĥ å±Ĭ +强 壮 +代 åŃķ +æ±¶ å·Ŀ +å®¶ åĸ» +å®¶åĸ» æĪ· +å®¶åĸ»æĪ· æĻĵ +èħ ® +åIJ¯ 迪 +æĹł éļľç¢į +èĻķçIJĨ åıĬ +æĿ¥ åİĨ +å®ŀ åĬ¡ +ä¹Ł éļıä¹ĭ +æĬĢèĥ½ åŁ¹è®Ń +åѤ ç«ĭ +åī ģ +éĥ´ å·ŀ +æĶ¶ æķĽ +éł» éģĵ +èᣠ幏 +èİ« è¿ĩäºİ +æŃ¤ æĻĤ +纪å§Ķ çĽij +纪å§ĶçĽij å§Ķ +缸 éĤ» +åı¦ä¸Ģ è¾¹ +çªĴ æģ¯ +æľīå¾Īå¤ļ ç§į +æ¯ı éĢ¢ +éĹ® ä¸ĸ +ç´¯ ç´¯ +éĿĴæĺ¥ æľŁ +è·¯ åĨµ +åħĭ èݱ +è¿Ħä»Ĭ 为æŃ¢ +æĥĬ å¥ĩ +è·¨ 度 +éħ¿ éĢł +åĩ ĭ +è¿ij ä¸īå¹´ +åĨħ 马 +åĨħ马 å°Ķ +æı į +è¿Ľå±ķ æĥħåĨµ +èĮ § +æľīåºı æİ¨è¿Ľ +æĢ» åĨłåĨĽ +æĪIJ绩 åįķ +éĽ»è©± åıĬ +ç´§å¯Ĩ ç»ĵåIJĪ +åºĬ ä½į +é¹ Ĭ +æķ£åıij çĿĢ +åĭŁ èµĦ +æ°¨ éħ¸ +彩 ç¥ŀ +è®Ģ åıĸ +éĩį æ¸© +ä¸Ń åŃĺåľ¨çļĦ +ç¾İ éºĹ +ä¸įæĸŃ å¢ŀåĬł +è½® æµģ +æİ¥ åIJ¬ +å¹´ 产å̼ +åįĥ åħĭ +æĪĺåľº ä¸Ĭ +çħ§ é¡§ +å¹²éĥ¨ éĺŁä¼į +åį° ç«ł +ä¸Ģèĩ´ æĢ§ +è¿ŀ å¤ľ +åħħ è£ķ +é»ij åIJįåįķ +åĩĢ æ°´ +ä¸Ģ大 æĹ© +åĮħ 袱 +çĬ¯ è§Ħ +çIJĨ è«ĸ +æŀģ æĺĵ +éª ¸ +å¨ĺ å¨ĺ +åĽ¢ åľĨ +亿åħĥ 以ä¸Ĭ +åĪ©ç͍ æĤ¨çļĦ +带æĿ¥ æĽ´å¤ļ +ä¸Ń央 空è°ĥ +æľĪ èĸª +çĮľ æĥ³ +åĪº 客 +ä½ľ æģ¯ +åįķ è°ĥ +äºĴ åĪ© +å¦Ĥæľī ä¾µæĿĥ +å°ı å·§ +åįģ åł° +åĵĪåĵĪ åĵĪåĵĪ +è¾¹ éĻħ +æłĩ è¯Ń +åĪĩåħ¥ çĤ¹ +éĢĨ è¢Ń +è¯ķ åīĤ +绿 è±Ĩ +è® ļ +åŁºçĿ£ å¾Ĵ +å£ ¬ +åħ¨ æĺİæĺŁ +éĢī ç§Ģ +èĪĮ å°ĸ +ä¸įåIJĮ ç±»åŀĭ +çĥŁ åĽ± +çģµ æ°Ķ +åĮº 管å§Ķä¼ļ +åĨľ åī¯ +åĨľåī¯ äº§åĵģ +èĶļ æĿ¥ +沪 æĮĩ +åħ»æ®ĸ æĪ· +æĸĹ å¿Ĺ +é¦ĸ é¢Ĩ +è¡Ģ èħ¥ +åĬł ç´§ +ä¸Ģèĩ´ 好è¯Ħ +第ä¸ī èĬĤ +æī¬ å°ĺ +交éĢļ æŀ¢çº½ +鼶 ç¢İ +é»ij æ´ŀ +çľĭ ä¸įæĩĤ +å±ŀ å®ŀ +主 åŁİåĮº +å¨ Ľ +å¨Ľ æ¨Ĥ +ç¬ij æĦı +èϹ æ¡¥ +åIJĦ个 çݯèĬĤ +çķ¥ å¾® +èĢķ èĢĺ +æľ¬ åľºæ¯ĶèµĽ +æĪIJ è´¥ +éĢī èĤ¡ +èªŀ è¨Ģ +çŃĶ è¾© +èĩª ä¹ł +æ£ º +ä¸ĩ 欧åħĥ +åģľ å·¥ +对åħ¶ è¿Ľè¡Į +积æŀģ éħįåIJĪ +ä¹¾ åĿ¤ +å¦ĸ æĢª +èļĮ åŁł +èµĦ产 è¯Ħä¼° +è°ĥ çļ® +éϤ å¤ķ +åĽ´ å¢Ļ +æľį å½¹ +æ·± æ¸Ĭ +é¢Ħ åζ +ç ĥ½ +å®ī 稳 +建 æŀĦ +çĭĻ åĩ» +主åĭķ 註åĨĬ +éĥ½æľī èĩªå·± +æİĴåIJį 第ä¸Ģ +麻 è¾£ +çĢ ļ +çĥŁèĬ± çĪĨ +çĥŁèĬ±çĪĨ 竹 +èĩªçĦ¶ ä¿ĿæĬ¤ +ä»Ļ å¢ĥ +为äºĨ éģ¿åħį +åĨ· åºĵ +è§£æĶ¾ æĢĿæĥ³ +åĪĿ äºĮ +ä½ĵ è´´ +é¦ĸ å¯Į +迪 æĭľ +æļĤ ç¼ĵ +æĶ¯æĮģ åĬĽåº¦ +侦 æİ¢ +马 åĪº +åĮĹ æ±½ +ç¹ ŀ +è°İ è¨Ģ +éĢ£ çºĮ +å· ³ +ä»»ä½ķ æĹ¶åĢĻ +车 èģĶç½ij +åįķ 项 +å¸Ń åį· +建çŃij æĿIJæĸĻ +ä¸Ńç§ĭ èĬĤ +ç¡ķ士 çłĶç©¶ +ç§ģ ç«ĭ +åħļåĴĮ æĶ¿åºľ +æľ¬æ¬¡ 交æĺĵ +èººåľ¨ åºĬä¸Ĭ +ç½ijåıĭ è¯Ħ论 +å¦ Ŀ +害 ç¾ŀ +åħ¬ç«ĭ åĮ»éĻ¢ +ä¸ ŀ +çĶŁçī© è´¨ +åºĶ éĤĢ +æĬ½ åıĸ +åĩł å¼ł +æijĺ ç¼ĸ +ç»ĺ æľ¬ +详 è§£ +强 硬 +æľĢ åħĪè¿ĽçļĦ +æĭĽ èĤ¡ +æĭĽèĤ¡ 书 +åįĥ æĸ¹ +åįĥæĸ¹ çϾ +åįĥæĸ¹çϾ 计 +éħį éŁ³ +驾 çħ§ +å¾ģ æĪĺ +èªĵ è¨Ģ +æĭľ å¸Ī +æĭľå¸Ī åѦ +æĭľå¸ĪåѦ èīº +æĬ± åĽ¢ +ç±³ ç²ī +éĿŀ常 éĢĤåIJĪ +èĪª æµ· +å±¥ 约 +åįģåħ« æĿ¡ +éĶ» éĢł +éĩįè¦ģ 举æİª +åıijæĮ¥ ä½ľç͍ +æ· ļ +人 社 +人社 å±Ģ +è¯ķçĤ¹ å·¥ä½ľ +éĺľ éĺ³ +æ¡ĥ åľĴ +æ°ij ä¼ģ +æ´ģ çϽ +è´µ 宾 +åħ¬ 社 +è§ī æĤŁ +è®°å¿Ĩ åĬĽ +æľĥåĵ¡ 註åĨĬ +æŃ¤ æ¡Ī +麻 çĹ¹ +çı Ģ +æĸ© èİ· +çĶ· åŃ©åŃIJ +å±ĢéĻIJ äºİ +åĭĺ æŁ¥ +åIJĥ 饱 +èĬ¬ åħ° +æ£ķ èī² +ç¦ı ç¥ī +çͳ èĬ± +æµ· çĽĹ +èĶ ij +æĸĩ åѸ +æ´»æĢ§ çĤŃ +缴 éĢļ车 +è°¢ éĤĢ +躺 çĿĢ +åľ ĥ +æ¯ıæĹ¥ ç»ıæµİ +åħ¬åħ± æĸĩåĮĸ +讲 æķħäºĭ +å¯Ł çľĭ +æĤł éĹ² +åľ° åĿª +æ¶Į çݰåĩº +é«ĺçŃī éĻ¢æł¡ +èĮĦ åŃIJ +éĺ² åį« +ä¾ĭ è¡Į +æĺ¾ éľ² +æĸ° 常æĢģ +ç»Ŀ ä½³ +å¯Į æ°ij +以 人æ°ij +以人æ°ij 为 +éĤ¢ åı° +å±ķ æ¼Ķ +çϼ å¸ĥ +è´Ł è½½ +åģı 离 +æ°¸ éģł +éĩįè¦ģ åİŁåĽł +åįıä¼ļ ä¼ļåijĺ +éļ¾ æ°ij +çĶŁäº§ 车éĹ´ +çģµ åĬ¨ +两年 åīį +æĸ¹ åľĨ +æ´» ä¸ĭåİ» +ä¸ĸçķĮ è§Ĥ +éªĹ åıĸ +ç¾İ è²Į +èĥ½ çľĭåĩº +çϼ æı® +è§Ĥ å½± +åī ĥ +åIJĪèµĦ åħ¬åı¸ +å© § +å¹² æĹ± +åħŃ ä¸ªæľĪ +尤为 éĩįè¦ģ +èĤ ½ +秦 åĽ½ +æīĺ ç¦ı +建çŃij å¸Ī +åįĩ级 æĶ¹éĢł +å°ı é¢Ŀ +å°ıé¢Ŀ 贷款 +两个 ç»´æĬ¤ +æĭį æĭį +åı¯ çĸij +æį¢ åıĸ +æŃ¦ 士 +èµĸ 以 +èµĸ以 çĶŁåŃĺ +æĮ ļ +殿 åłĤ +èĩªçĦ¶ çķĮ +ç£ģ åľº +å¦Ĥä½ķ çľĭå¾ħ +ä»ĬæĹ¥ 头æĿ¡ +西 åŁŁ +èİ· è¯Ħ +風 æł¼ +ä¿Ħ åĽ½ +æīĵ æĭ¼ +å®£ä¼ł çīĩ +å¾Ī æĸ¹ä¾¿ +ä¾Ľç»Ļ ä¾§ +纪念 ç¢ij +毫 åħĭ +èĬ³ é¦Ļ +å·¥åķĨ éĵ¶è¡Į +请 çĤ¹åĩ» +ç¼ ª +æĹłæķ° 次 +èᝠå¸Ī +èħ ¸ +游 èīĩ +åĮ ¾ +å·¡ èĪª +æ²»çIJĨ ä½ĵç³» +èIJ¥éĢł èī¯å¥½ +æ·· æ·Ĩ +éĢļ çķħ +åĬ³ ç´¯ +ä»ĵ ä½į +å¢ŀ éķ· +éļIJ 约 +æĿĤå¿Ĺ 社 +åħ» èĤ² +åı¯èĥ½ åıijçĶŁ +èĢĥ 試 +西 ä¾§ +åĬł åĢį +主æĮģ åı¬å¼Ģ +çķ¢ ç«Ł +éĹ® 询 +æµ· æ£ł +èĹ © +注æĺİ æĿ¥æºIJ +æ£Ģ çĸ« +请 åģĩ +æĬļ æij¸ +èĵĦ çĶµæ±ł +è·Ł ä¸įä¸Ĭ +çݰ代 社ä¼ļ +çѹ èµĦ +ä½ĵèĤ² 彩票 +å»¶ 误 +è¾Ľ è¾£ +éĿ¢ 容 +åį° è®° +çģŃ äº¡ +ç´ł é£Ł +åħ´ èĩ´ +éľĢè¦ģ ç͍ +éľĢè¦ģç͍ åΰ +å®Ŀ å¦Ī +ç£ĭ åķĨ +éļ¶ å±ŀ +è´¡çĮ® åĬĽéĩı +åħ¬åħ± èµĦæºIJ +大 éĺª +åĨĽ è®Ń +æĤ¬ 念 +社ä¼ļ 稳å®ļ +å¹²äºĭ åĪĽä¸ļ +æľī æĿ¡ä»¶ +æľīæĿ¡ä»¶ çļĦ +ä¸Ģå¹´ ä¸Ģ度 +åİ ¥ +强 奸 +豪 车 +æİĮ æŁľ +æ°´åĪ© å·¥ç¨ĭ +å³ ª +积æŀģ ä½ľç͍ +æµ· æ·Ģ +æµ·æ·Ģ åĮº +çĥŃ æĴŃ +åĿļæĮģ ä¸įæĩĪ +åıĮ èĦļ +绣 æĪĺ +ä»»ä½ķ 人éĥ½ +åľ°ä¸ĭ 室 +åĨ¶ çĤ¼ +è°ħ è§£ +æ¸Ķ èι +太éĺ³ åŁİ +被 æįķ +计ç®Ĺ åύ +西 åĮ» +èĪĴ å¿ĥ +æ¡ ¦ +éģ ² +åĬ ij +è¨ Ĺ +èİ º +åĸ ¬ +çĵ ¯ +åĺ ĺ +åł ķ +æķ Ŀ +åij ¦ +èĭ ŀ +æŃ ¹ +æĵ ¬ +æ£ Ħ +èĪ µ +å¥ ª +çļ ĭ +æĶ ¸ +åľ © +ç¤ Ļ +ç¢ ĺ +éı Ī +æĦ ķ +ç¹ ³ +èĺ ¸ +è² Ĥ +æ¼ ² +æij ¹ +æĶ Ŀ +åŃ ¢ +èķ Ń +é¨ ° +æ½ ¼ +éħ ° +æĴ ¥ +è¹ ¬ +é¨ Ļ +è¸ ¹ +éģ IJ +çĺ Ģ +èĽ ¤ +æĤ ĸ +çĴ ŀ +ç£ IJ +æİ ° +è¾ Ĭ +å¾ ij +æİ ĸ +éģ ŀ +éĤ ¸ +éĽ ı +æĨ İ +æľ ½ +çį » +ç® Ķ +è¤ ¶ +æļ ¢ +æĺ µ +çı Ĥ +æĤ ¸ +åģ µ +åĻ ľ +å£ ¯ +æĴ ® +æģ į +å© ķ +ç¯ ± +éĺ Ļ +çī ł +è£ ĺ +è³ ¢ +éĩ ľ +éĵ ł +èİ ĺ +æ® Ĩ +çĻ ¸ +è´ ı +ç² ± +å« ¡ +åĨ ¢ +è¤ Ĵ +æĩ Ĭ +éľ ĵ +å¡ µ +æĭ £ +å» Ł +é£ ½ +é¢ Į +åļ İ +æ· º +èĨ ł +åİ Ń +åļ ĩ +åij ĥ +çĴ ĭ +çŃ ± +æĭ · +èį § +éĶ ° +åŃ ° +èĵ ĵ +èĨ ½ +æŀ ī +åĸ ½ +çĽ Ķ +çŃ IJ +ç¾ ļ +è ħĮ +è¾ « +æ³ ĵ +çĶ ¬ +èŁ ² +åĸ ª +å¦ ĵ +è¬ Ģ +çĤ Ĭ +æĽ ľ +æ± IJ +è´ Ī +èį Ģ +æĬ ł +ç¢ ¾ +æ« ĥ +éŀ ł +èij Ĩ +ç¥ ¯ +å½ Ŀ +é¦ į +åĮ £ +æľ Ń +åĿ Ĥ +ä¿ ij +èĵ ® +çij Ľ +æī ī +èĩ Ł +è² « +çİ ¥ +æ· ¼ +åİ ² +é³ Į +å³ Ń +åij Ľ +é § +é§ IJ +éģ · +ä¿ ª +æĢ Ĥ +è¾ į +å± į +åĭ ģ +å¥ ļ +éļ ħ +éĴ ´ +è¼ Ŀ +å® ¦ +èIJ ĥ +çĺ ĭ +æĨ ¶ +æĤ ħ +è¾ Ļ +åij ľ +çł º +éĢ ŀ +æµ ļ +éĸ £ +èĸ © +éĻ ĭ +çĤ Ļ +èª ķ +ä¸ Ł +é¹ ½ +ç± Į +è´ ° +éĭ ª +çľ © +æĴ IJ +èĨ º +éŀ ĺ +ç¾ ² +çª ® +ç´ IJ +æ® ´ +çº ¾ +èº į +ç´ ĭ +çĦ ĸ +çĶ º +çī ½ +çĤ ¯ +ç¼ Ķ +æ¯ ĵ +å¬ ° +æ¢ § +äº Ł +è¢ ħ +çį Ħ +è¿ ¥ +æ¼ ¾ +çĿ ij +ç¸ ¾ +é¦ ĭ +é¤ ħ +æ ¹Ħ +æĺ ĩ +æŀ Ń +èĸ ° +æŁ ij +æ¦ » +åĻ Ĺ +åĻ ´ +æ£ £ +åĶ § +çĨ ¹ +è¼ ¯ +å¢ Ł +é² ² +æĪ Ľ +èī ¦ +èĬ ® +åĺ Ł +å¸ ¥ +å¿ » +çĮ Ŀ +å¯ µ +è³ ¦ +èĽ ¾ +æ» ¾ +çĤ ķ +éĵ ¬ +èĴ ¿ +éĴ ¨ +çĥ Ļ +ç² ķ +æĥ ¦ +æº § +é¢ į +éħ £ +å³ ¦ +ç± ģ +çĥ ĥ +åĨ Ĺ +åı ģ +çĽ § +ç½ µ +éĴ Ĺ +å¬ ī +è° ı +ç³ § +è¾ Ń +æ· ¬ +èŁ Ĵ +è¯ © +è¦ ĥ +çĻ ĸ +é½ Ĵ +çĪ IJ +ç® į +ç¼ İ +ç£ º +è¯ « +è¤ ² +æĵ ł +èIJ ¦ +çĿ ¬ +è° į +éĦ ° +æł ¾ +é¡ ı +ç¸ ± +æ¡ ¨ +éĨ ¬ +è¥ ² +è® ª +å© º +èį Ł +åĮ Ŀ +çĨ ł +èĽ Ĭ +æ¸ ļ +å´ ½ +é² ¤ +åķ ° +åĮ ķ +ä¸ IJ +è® ¥ +åı ½ +åı ¼ +çļ ¿ +è¿ Ĥ +åIJ Ĩ +å± ¹ +èĩ ¼ +è® ¹ +é© ® +çº « +æ± ŀ +æĬ ¡ +èĭ ĩ +åIJ ł +åIJ Ń +åIJ ® +å² ĸ +ä½ ĥ +çĭ Ī +åº ĩ +åIJ Ŀ +éĹ ° +æ± ¹ +å¿ ± +æĭ Ħ +æĭ Ĺ +èĮ ī +èĭ Ľ +èĮ ģ +çŁ ¾ +èĻ ı +åij » +åĴ Ħ +å¿ ¿ +èĤ ® +çĭ ŀ +çĸ Ł +çĸ Ļ +çĸ ļ +æ³ ŀ +å¸ ļ +å± ī +è¿ ¢ +é© ¹ +ç İ· +çıĬ ó +çıĬó ł +çıĬół Ħ +çıĬółĦ ģ +æĮ İ +æĭ ´ +åŀ Ľ +èį ¤ +æ® ĥ +çĽ ¹ +åĵ Ĩ +è´ » +æ¯ ¡ +çĭ ° +çĭ ¡ +æŁ Ĵ +æģ ĥ +è¯ ¬ +è¢ Ħ +è¯ ² +èļ ¤ +èĢ Ļ +åŁ Ĥ +æį İ +æį Į +æ¢ Ĩ +é ħĮ +çł ¾ +æ® ī +åĶ ł +æĻ Į +èļ £ +èļ ª +èļ ĵ +é¸ ¯ +åĶ ģ +åĶ Ĩ +åĢ Ķ +èĪ Ģ +è± º +èĥ ° +é¸ µ +é¸ ³ +é¦ ģ +ç¾ Ķ +æ¶ £ +æ¶ ķ +æĤ ¯ +è¯ ½ +è° Ĩ +ç¥ Ł +ç» ¢ +æį º +æį ¶ +æį » +æİ Ĥ +èı ł +èIJ ¤ +éħ Ĺ +çľ ¶ +åķ Ħ +èļ ¯ +èĽ Ģ +åĶ ¬ +å¸ · +éĵ IJ +éĵ Ľ +åģ İ +å¾ Ļ +èĦ ¯ +è± ļ +çĮ ĸ +çĹ Ĭ +æ¶ ® +æĥ Ń +æĤ ´ +æĥ ĭ +è° ļ +æı © +æIJ Ģ +æIJ Ķ +æ¦ Ķ +æ¤ Ń +éĽ ³ +åĸ ³ +è· Ľ +èľ ĵ +èľ Ĵ +é¹ ĥ +éĶ Ħ +çĶ ¥ +çŃ ı +çĮ © +çĮ ¬ +çĮ ¾ +çĹ ¢ +çĹ ª +æĥ ° +çª ĺ +è° ¤ +éļ ĺ +å© ¿ +é¹ ī +çij Ļ +æĸ Ł +æ¤ ¿ +éħ ª +éĽ ¹ +åĹ ¦ +è· · +è· º +è· ¤ +èľ Ī +èľ Ĺ +å¹ Į +é¦ ı +èª Ĭ +æ¼ ĵ +è¤ Ĥ +èĶ Ĺ +èĶ ¼ +åħ ¢ +è£ ³ +èľ » +èĿ ĩ +åĺ Ģ +éĶ ¹ +ç® ķ +ç® © +çĺ © +çĺ Ł +æ¼ ± +å¯ ¥ +éª ¡ +æĴ µ +æĴ ¬ +è± Į +åĺ ¹ +èĿ ł +èĿ Į +èĿ Ĺ +èĿ Ļ +éķ IJ +ç¨ ¼ +ç¯ ĵ +èĨ Ľ +é² « +çĺ ª +é² ¨ +æĨ Ķ +ç¿ © +è¤ ¥ +ç¼ Ń +åĻ © +çĵ ¢ +éľ İ +è¸ ± +è¹ Ĥ +èŁ Ĩ +é¹ ¦ +ç¯ ¡ +çĺ ¸ +çª ¿ +ç¼ ° +èĹ IJ +è¹ ĭ +èŁ ĭ +èŁ Ģ +èµ ¡ +èĩ Ĭ +é³ Ħ +ç³ ł +æĩ ¦ +åļ £ +éķ ° +é³ į +ç° ¸ +çĻ £ +é³ ĸ +é¬ ĵ +èł ķ +éľ ¹ +èº ı +é» ¯ +çĵ ¤ +çŁ Ĺ +ä¹ Ĥ +ä¹ ľ +åħ Ģ +å¼ ĭ +åŃ ij +åŃ ĵ +å¹ º +äº ĵ +å »¿ +ä¸ ı +åį ħ +ä» ĥ +ä» ī +ä» Ĥ +åĪ Ī +çĪ » +åį ŀ +éĹ © +è® £ +å¤ ¬ +çĪ ¿ +æ¯ ĭ +éĤ Ĺ +éĤ Ľ +èī ½ +èī ¿ +åı µ +ä¸ ķ +åĮ ľ +åĬ ¢ +åį Ł +åı ± +åı » +ä» ¨ +ä» Ł +ä» ¡ +ä» « +ä» ŀ +åį ® +æ° IJ +çĬ ° +åĪ į +éĤ Ŀ +éĤ Ļ +è® ¦ +è® § +è® « +å° » +éĺ ¡ +å° ķ +å¼ ģ +èĢ Ĵ +çİ İ +çİ ij +åľ ¬ +æī ¦ +åľ ª +åľ ¹ +æī ª +åľ ® +åľ ¯ +èĬ Ĭ +èĬ į +èĬ Ħ +èĬ ¨ +èĬ ij +èĬ İ +èĬ Ĺ +äº ĺ +åİ į +å¤ ¼ +æĪ į +å° ¥ +ä¹ © +æĹ ¯ +æĽ ³ +å² Į +å± º +åĩ ¼ +åĽ ¡ +éĴ ĩ +ç¼ ¶ +æ° ĺ +æ° ĸ +çī Ŀ +ä¼ İ +ä¼ Ľ +ä¼ ¢ +ä½ ¤ +ä» µ +ä¼ ¥ +ä¼ § +ä¼ ī +ä¼ « +åĽ Ł +æ± Ĩ +åĪ ĸ +å¤ Ļ +æĹ ® +åĪ İ +çĬ · +çĬ ¸ +èĪ Ľ +åĩ « +é Ĥ¬ +é¥ § +æ± Ķ +æ± ľ +æ± Ĭ +å¿ ĸ +å¿ ı +è® ´ +è® µ +è® · +èģ ¿ +èī ® +åİ ¾ +å¦ ģ +çº ¡ +çº £ +çº ¥ +çº ¨ +çİ ķ +çİ Ļ +æĬ Ł +æĬ Ķ +åľ » +åĿ į +æĬ ĥ +ã§ IJ +èĬ « +èĬ ¾ +èĭ Ī +èĭ £ +èĭ ĭ +èĬ ¼ +èĭ Į +èĭ ģ +èĬ © +èĬ ª +èĬ ¡ +èĬ Ł +èĭ Ħ +èĭ İ +èĭ ¡ +æĿ Į +æĿ ĵ +æĿ Ī +å¿ ij +åŃ Ľ +éĤ ´ +éĤ ³ +å¥ ģ +è± ķ +å¿ Ĵ +æ¬ ¤ +è½ « +è¿ ĵ +éĤ ¶ +å¿ IJ +åį £ +éĤ º +æĹ ° +åij ĭ +åij Ĵ +åij ĵ +åij Ķ +åij ĸ +æĹ ¸ +åIJ ¡ +èĻ ¬ +åIJ ½ +åIJ £ +åIJ ² +å¸ ı +å² Ī +å² ĺ +åħ ķ +åĽ µ +åĽ « +éĴ Ĭ +éĴ ĭ +é ĴĮ +è¿ ķ +æ° Ļ +æ° ļ +çī ¤ +ä½ ŀ +ä½ ļ +ä½ Ŀ +ä½ Ĺ +å½ · +ä½ ĺ +ä½ ¥ +è± ¸ +åĿ Į +èĤ Ł +å¥ Ĥ +åĬ ¬ +çĭ ģ +é¸ ł +é¥ ¨ +é¥ © +é¥ « +é¥ ¬ +åº ij +åº ĭ +çĸ Ķ +çĸ ĸ +èĤ ĵ +éĹ ± +éĹ ³ +çĤ Ģ +æ² £ +æ² ħ +æ² Ķ +æ² ¤ +æ² ı +æ² ļ +æ± © +æ± ¨ +æ² ¨ +æ± ´ +æ² Ĩ +æ² © +æ³ IJ +æĢ ĥ +æĢ Ħ +å¿ ¡ +å¿ ¤ +å¿ ¾ +æĢ ħ +å¿ ª +æĢ Ĩ +å¿ Ń +å¿ ¸ +è¯ Ĥ +è¯ ĥ +è¯ ħ +è¯ ĭ +è¯ Į +è¯ Ĵ +éĻ Ĥ +éĻ ī +å¦ © +å¦ ª +å¦ £ +å¦ Ĺ +å¦ « +å§ Ĵ +å¦ ¤ +åĬ Ń +åĪ Ń +éĤ ° +çº Ń +çº ° +çº ´ +çİ ¡ +çİ Ń +çİ ł +çİ ¢ +çİ ¦ +çĽ Ĥ +å¿ Ŀ +åĮ ¦ +åĿ © +æĬ ¨ +æĭ ¤ +åĿ « +æĭ Ī +åŀ Ĩ +æĬ » +åĬ ¼ +æĭ ĥ +æĭ Ĭ +åĿ ¼ +åĿ » +ã§ Ł +åĿ ¨ +åĿ Ń +æĬ ¿ +åĿ ³ +èĭ · +èĭ ¤ +èĮ ı +èĭ « +èĭ ľ +èĭ ´ +èĭ Ĵ +èĭ ĺ +èĮ Į +èĭ » +èĭ ĵ +èĮ ļ +èĮ Ĩ +èĮ ij +èĮ ĵ +èĮ Ķ +èĮ ķ +è ĮĢ +èĭ ķ +æŀ ¥ +æŀ ĩ +æĿ ª +æĿ ³ +æŀ § +æĿ µ +æŀ ¨ +æŀ ŀ +æŀ ĭ +æĿ » +æĿ · +æĿ ¼ +çŁ ¸ +ç łĢ +åĪ ³ +å¥ Ħ +æ® ģ +éĥ ı +è½ Ń +éĥ ħ +é¸ ¢ +çĽ ± +æĺ Ļ +æĿ ² +æĺ ĥ +åĴ Ĥ +åij ¸ +æĺ Ģ +æĹ » +æĺ ī +çĤ ħ +çķ Ģ +èĻ ® +åĴ Ģ +åij · +é» ¾ +åij ± +åij ¤ +åĴ Ĩ +åĴ Ľ +åij ¶ +åij £ +åĴ Ŀ +å² ¢ +å² ¿ +å² ¬ +å² « +å¸ Ļ +å² £ +å³ ģ +åĪ ¿ +å² · +åī Ģ +å¸ Ķ +å³ Ħ +æ² ĵ +åĽ ¹ +ç½ Ķ +éĴ į +éĴ İ +éĴ ı +éĴ Ĵ +éĴ ķ +éĤ ¾ +è¿ ® +çī ¦ +ç« º +è¿ ¤ +ä½ ¶ +ä¾ ij +ä¾ ī +èĩ ¾ +ä¾ Ĺ +ä¾ ı +ä¾ © +ä½ » +ä½ ¾ +ä¾ ª +ä½ ¼ +ä½ ¯ +ä¾ ¬ +å¸ Ľ +ä¾ Ķ +å¾ Ĥ +åĪ ½ +éĥ Ħ +ç± ´ +çĵ ® +æĪ Ĺ +èĤ ¼ +äı Ŀ +èĤ ± +èĤ « +è¿ © +éĥ ĩ +çĭ İ +çĭ į +çĭ Ĵ +åĴ İ +é¥ ¯ +é¥ ´ +åĨ ½ +åĨ ¼ +åº ĸ +çĸ ł +çĸ Ŀ +åħ ĸ +åĬ ¾ +ð¬ ī +ð¬ī ¼ +çĤ ĺ +çĤ Ŀ +çĤ Ķ +æ³ Ķ +æ² Ń +æ³ · +æ³ ± +æ³ ħ +æ³ ł +æ³ º +æ³ ĸ +æ³ « +æ³ ® +æ² ± +æ³ ¯ +æĢ Ļ +æĢ µ +æĢ ¦ +æĢ Ľ +æĢ ı +æĢ į +ã ¤ +㤠ĺ +æĢ © +æĢ « +æĢ ¿ +å® ķ +ç© ¹ +å® ĵ +è¯ ĵ +è¯ Ķ +è¯ ĸ +è¯ ĺ +æĪ ¾ +è¯ Ļ +æĪ ½ +éĥ ĵ +è¡ © +ç¥ Ĩ +ç¥ İ +ç¥ ĩ +è¯ ľ +è¯ Ł +è¯ £ +è¯ ¤ +è¯ § +è¯ ¨ +æĪ ķ +éĻ Ķ +å¦ ² +å¦ ¯ +å§ Ĺ +å¸ ij +åŃ ¥ +é© ½ +èĻ ± +è¿ ¨ +ç» Ģ +ç» ģ +ç» Ĥ +é© · +é© ¸ +ç» ī +ç» Į +éª Ģ +çĶ ¾ +çı ı +çı IJ +çı ij +çİ ³ +é¡ ¸ +çı ī +çı Ī +æĭ ® +åŀ Ń +æĮ Ŀ +æĮ ŀ +åŀ ¤ +èµ ³ +è´ ² +åŀ ± +åŀ Į +åŀ § +åŀ ĵ +æĮ ¦ +åŀ ł +èį ļ +èį ij +è´ ³ +èį ľ +èİ Ĵ +èĮ ¼ +èĮ ´ +èĮ ± +èİ Ľ +èį ŀ +èĮ ¯ +èį ı +èį ĩ +èį ĥ +èį ł +èĮ Ń +åŀ © +èį ¥ +èį ¦ +èį ¨ +èį © +åī ĭ +èį ª +èį ¬ +èį ® +æŁ ° +æł ī +æŁ ĺ +æł Ĭ +æŁ © +æŀ ° +æł Į +æŁ Ļ +æŀ µ +æŀ ³ +æŁ ŀ +æŁ Ŀ +æł Ģ +æŁ ¢ +æł İ +æŁ Ī +æŁ ģ +æŀ · +æŁ ½ +åī Į +éħ Ĭ +éĥ ¦ +çĶ Ń +çł Ĺ +çł ĺ +çł Ĵ +æĸ « +çł Ń +çł ľ +èĢ · +èĻ º +æ® Ĥ +æ® ĩ +æ® Ħ +è½ ± +è½ ² +è½ ³ +è½ ¶ +è½ ¸ +èĻ ¿ +æ¯ ĸ +è§ ĩ +å° ľ +åĵ IJ +çľ Ħ +çľ į +ðł ³ +ðł³ IJ +éĥ ¢ +çľ ĩ +çľ Ĭ +çľ Ī +ç¦ º +åĵ Ĥ +åĴ ´ +æĽ · +æĺ ´ +åĴ ¦ +åĵ ĵ +åĵ Ķ +çķ İ +åij ² +èĥ Ħ +çķ ĭ +çķ Ī +èĻ ¼ +èĻ » +çĽ ħ +åĴ £ +åĵ ķ +åī IJ +éĥ § +åĴ » +åĽ ¿ +åĴ ¿ +åĵ Į +åĵ Ļ +åĵ ļ +åĴ © +åĴ ¤ +åĵ Ŀ +åĵ ı +åĵ ŀ +å³ £ +ç½ ĺ +å³ Ĵ +å³ ¤ +å³ ĭ +è´ ¶ +éĴ ļ +éĴ ¡ +éĴ £ +éĴ ¤ +éĴ « +æ° ¡ +çī ¯ +éĥ ľ +ç§ ķ +ç§ Ń +ç« ½ +ç¬ Ī +ä¿ ¦ +ä¿ ¨ +ä¿ ħ +åı Ł +åŀ ¡ +çī ® +ä¿ £ +ä¿ ļ +çļ Ī +ä¿ Ł +éĢ ħ +å¾ ĩ +å¾ ī +èĪ ¢ +éĥ Ĺ +ä¿ İ +éĥ ¤ +çĪ ° +éĥ Ľ +çĵ ´ +èĥ ¨ +èĥ ª +èĥ Ľ +èĥ Ĥ +èĥ Ļ +èĥ į +èĥ Ĺ +è ĥĿ +æľ IJ +èĥ « +é¸ ¨ +åĮ į +çĭ ¨ +çĭ ¯ +é£ ij +çĭ © +çĭ ² +è¨ ĩ +éĢ Ħ +æĺ Ŀ +é¥ · +é¥ ¸ +é¥ ¹ +åŃ ª +å¨ Ī +åº ¥ +çĸ ¬ +çĸ £ +çĸ ¥ +çĸ Ń +åº ł +ç« ij +é£ Ĵ +éĹ ¼ +éĹ ¾ +éĹ ¿ +éĺ Ĥ +ç¾ ij +è¿ ¸ +ç± ¼ +éħ ĭ +çĤ » +çĥ Ģ +çĤ · +æ´ ± +æ´ ¹ +æ´ § +æ´ Į +æµ ĥ +æ´ ĩ +æ´ Ħ +æ´ Ļ +æ¶ İ +æ´ İ +æ´ « +æµ į +æ´ ® +æ´ µ +æµ Ĵ +æµ Ķ +æµ ķ +æ´ ³ +æģ ¸ +æģ ĵ +æģ ¹ +æģ « +æģ » +æģ Ĥ +æģ ª +æģ ½ +å® ¥ +æī ĥ +è¡ ² +è¡ ½ +è¡ ¿ +è¢ Ĥ +ç¥ ľ +ç¥ ĵ +ç¥ ļ +è¯ ® +ç¥ Ĺ +ç¥ ¢ +è¯ ° +è¯ ³ +é¸ © +æĺ ¶ +åĴ « +å¼ Ń +çī ģ +èĥ ¥ +éĻ Ł +å§ ® +å¨ Ĩ +å§ Ŀ +å§ £ +å§ ĺ +å§ ¹ +ç¾ ¿ +çĤ ± +çŁ ľ +ç» Ķ +éª ģ +éª ħ +ç» Ĺ +ç» Ľ +éª Ī +èĢ ĸ +æĮ Ī +çı ¥ +çı Ļ +é¡ ¼ +çı ° +çı © +çı § +çı £ +çı ŀ +çIJ ¤ +çı ² +æģ ļ +åŁ ķ +åŁ ĺ +åŁ Ļ +åŁ ļ +æĮ ¹ +èĢ Ĩ +èĢ Ħ +åŁ Ĵ +æį ĭ +è´ ½ +åŀ ¸ +æį ĥ +çĽ į +èį ¸ +èİ ³ +èİ ´ +èİ ª +èİ ł +èİ ľ +èİ ħ +èį ¼ +èİ © +èį ½ +èİ ¸ +èį » +èİ ¨ +é¸ ª +èİ ¼ +æł ² +æł ³ +æ¡ ¡ +æ¡ İ +æ¡ ¢ +æ¡ ¤ +æ¢ ĥ +æł Ŀ +æ¡ ķ +æ¡ ģ +æ¡ § +æ¡ ħ +æł Ł +æ¡ ī +æł © +éĢ ij +éĢ ĭ +å½ § +é¬ ² +è± ĩ +éħ IJ +éĢ ¦ +åİ Ŀ +åŃ ¬ +çł Ŀ +çł ¹ +çł § +çł · +çł Ł +çł ¼ +çł ¥ +çł £ +åī ŀ +çł » +è½ ¼ +è½ ¾ +è¾ Ĥ +é¸ « +è¶ ¸ +é¾ Ģ +é¸ ¬ +èĻ Ķ +çľ ¬ +åĶ Ľ +çľ Ļ +åĵ § +åĵ ½ +æĻ ģ +é¸ ® +è¶ µ +è¶ ¿ +çķ Ľ +èļ ¨ +èļ ľ +èļ į +èļ ĭ +èļ ¬ +èļ Ŀ +èļ § +åĶ ¢ +åľ Ħ +åĶ £ +åĶ ı +çĽ İ +åĶ ij +å´ Ĥ +å´ ĥ +ç½ ¡ +ç½ Ł +è§ Ĭ +èµ ħ +éĴ ² +éĴ µ +éĴ ¹ +éĴ º +éĴ ½ +éĴ ¼ +éĴ ¿ +éĵ Ģ +éĵ Ħ +éĵ Ĩ +éĵ Ī +éĵ ī +éĵ Ĭ +éĵ ĭ +éĵ Į +é ĵį +ä ¥ +ä¥ ½ +éĵ İ +æ° © +æ° ¤ +æ° ¦ +æ¯ ª +èĪ IJ +ç§ £ +ç§ « +çĽ ī +ç¬ Ħ +ç¬ ķ +ç¬ Ĭ +ç¬ ı +ç¬ Ĩ +ä¿ ¸ +ä¿ µ +åģ Į +ä¿ ³ +ä¿ ¶ +åĢ ¬ +åĢ ı +æģ ģ +åĢ Ń +ä¿ ¾ +åĢ ľ +éļ ¼ +éļ ½ +åĢ Į +åĢ ¥ +èĩ ¬ +éĥ « +åĢ ¨ +è¡ Ħ +é¢ Ģ +å¾ ķ +èĪ « +è¡ ¾ +èĥ ¯ +èĥ ± +èĥ ´ +èĥ Ń +èĦ į +èĥ ¼ +èĦ Ĵ +é¸ ± +é¸ ² +çĭ · +çĮ ģ +çĭ ³ +çĮ ĥ +çĭ º +éĢ ĸ +æ¡ Ģ +é¥ ½ +åĩ ĩ +æĮ Ľ +äº ³ +çĸ ³ +çĸ ´ +çĸ ¸ +çĸ ½ +çĹ Ī +çĸ ± +çĹ Ĥ +çĹ ī +è¡ ® +é¢ ĥ +æģ £ +æĹ Ĩ +æĹ Ħ +æĹ ĥ +éĺ ĥ +éĺ Ħ +è¨ ļ +éĺ Ĩ +æģ Ļ +ç² ij +çĥ ľ +çĥ © +çĥ Ĭ +åī ¡ +éĥ ¯ +çĥ ¬ +æ¶ ij +æµ ¯ +æ¶ ŀ +æ¶ Ł +å¨ ij +æ¶ ł +æµ ŀ +æ¶ ĵ +æµ ¥ +æ¶ Ķ +æµ ľ +æµ ł +æµ £ +æĤ ļ +æ ĤŃ +æĤ Ŀ +æĤ Ĵ +æĤ Į +æĤ Ľ +çª Ī +åī ľ +è¯ ¹ +è¯ ¼ +è¢ Ĵ +è¢ ¢ +è¯ ¿ +è° Ģ +è° Ĥ +è° Ħ +è° ĩ +å± IJ +å± Ļ +éĻ ¬ +åĭ IJ +å¥ ĺ +çī Ĥ +èļ © +éĻ ² +å¨ Į +å¨ ī +å¨ ² +å¨ ´ +å¨ £ +å¨ ĵ +å© Ģ +çķ ļ +éĢ ¡ +ç» ł +éª Ĭ +ç» ¡ +éª ĭ +ç» ¦ +ç» ¨ +éª İ +éĤ ķ +é¸ ¶ +å½ Ĺ +èĢ ľ +çĦ ĺ +èĪ Ĥ +çIJ ı +çIJ ĩ +éº ¸ +æı ¶ +åŁ ´ +åŁ ¯ +æį ¯ +æİ ³ +æİ ´ +åŁ ¸ +åŁ µ +èµ § +åŁ ¤ +æį Ń +éĢ µ +åŁ Ŀ +åł ĭ +åł į +æİ ¬ +é¸ · +æį ½ +æİ Ĭ +åł ī +æİ ¸ +æį © +æİ ® +æĤ « +åŁ Ń +åŁ ½ +æİ ĩ +æİ ¼ +èģ ĥ +èIJ ģ +èı ĺ +åł ĩ +èIJ ĺ +èIJ ĭ +èı ½ +èı ĸ +è IJľ +èIJ ¸ +èIJ ij +æ£ » +èı Ķ +èı Ł +èIJ ı +èı ¹ +èı ª +èı ħ +èı Ģ +èı ° +èı ¡ +æ¢ ¿ +æ¢ ı +è§ ĭ +æ¡ ´ +æ¡ · +æ£ ģ +æ¡ « +æ£ Ĥ +åķ ¬ +éĥ ¾ +æķ ķ +è± ī +éĦ Ħ +éħ ŀ +ç¡ İ +ç¡ Ń +ç¡ ĸ +ç¡ Ĺ +ç¡ IJ +ç¡ ĩ +ç¡ Į +é¸ ¸ +çĵ ł +åĮ ı +åİ © +æ® Ĵ +æ® ĵ +æ® į +èµ ī +éĽ © +è¾ Ħ +åł ij +çľ Ń +çľ ¦ +åķ § +æĻ ¡ +æĻ ¤ +çľ µ +åľ Ĭ +åĸ ı +åķ ī +åĭ ĸ +æĻ ŀ +åĶ µ +æĻ Ĺ +åķ Ń +çķ ¦ +è¶ º +åķ ® +è· Ħ +èļ ¶ +è ĽĦ +èĽ İ +èĽ Ĩ +èļ ° +åľ ī +èļ ± +èĽ ī +èĽ ı +èļ ´ +åķ ģ +åķ ķ +åĶ ¿ +åķ IJ +åĶ ¼ +åĶ · +åķ ĸ +åķ µ +åķ ¶ +åķ · +åĶ ³ +åĶ ° +åķ ľ +å¸ » +å´ ļ +å´ ¦ +å¸ ¼ +å´ ® +å´ ¤ +å´ Ĩ +èµ ĩ +èµ Ī +èµ Ĭ +éĵ ij +éĵ Ĵ +éĵ Ĺ +éĵ Ļ +éĵ Ł +éĵ ¡ +éĵ ¢ +éĵ £ +éĵ ¤ +éĵ § +éĵ ¨ +éĵ © +éĵ ª +éĵ « +éĵ ¯ +éĵ ° +éĵ ± +éĵ ³ +éĵ µ +éĵ · +çī ¾ +é¸ ¹ +ç§ ¾ +éĢ ¶ +ç¬ º +çŃ ĩ +ç¬ ¸ +ç¬ ª +ç¬ ® +ç¬ ł +ç¬ ¥ +ç¬ ¤ +ç¬ ³ +ç¬ ¾ +ç¬ ŀ +åģ ¾ +åģ ĥ +åģ ķ +åģ Ī +åĤ Ģ +åģ ¬ +åģ » +çļ ij +çļ İ +é¸ » +å¾ ľ +èĪ ¸ +èĪ » +èĪ ´ +èĪ · +é¾ Ľ +ç¿ İ +èĦ ¬ +èĦ ĺ +èĦ ² +åĮ IJ +çĮ Ĺ +çĮ ¡ +çĮ ŀ +æĸ Ľ +çĮ ķ +é¦ Ĺ +é¦ ĥ +é¦ Ħ +é¸ ¾ +åº ¹ +åº ¾ +çĹ Ķ +çĹ į +ç¿ Ĭ +æĹ Į +æĹ İ +è¢ ¤ +éĺ ĩ +éĺ Ī +éĺ ī +éĺ Ĭ +éĺ ĭ +éĺ į +éĺ ı +ç¾ Ł +ç² Ŀ +çĦ IJ +çĦ ĵ +çĦ Ĺ +æ· ħ +æ· ŀ +æ¸ İ +æ¶ ¿ +æ· ĸ +æĮ ² +æ· ł +æ¶ ¸ +æ¸ ij +æ· ¦ +æ· Ŀ +æ¶ ª +æ· Ļ +æ¶ « +æ¸ Į +æĤ » +æĤ ± +æ ĥĿ +æĥ ĺ +æĥ Ĩ +æĥ ļ +æĥ ĩ +æĥ ® +çª ķ +è° Į +æī Ī +çļ ² +è° ij +è£ Ĩ +è¢ · +è£ ī +è° Ĵ +è° Ķ +è° ķ +è° ĸ +è° Ĺ +è° Ļ +è° Ŀ +éĢ ¯ +éĥ ¿ +éļ Ī +ç² ľ +éļ į +éļ Ĺ +å© Ĭ +å¨ ¼ +å© ¢ +å© µ +èĥ ¬ +è¢ Ī +ç¿ Į +æģ ¿ +æ¬ ¸ +ç» « +éª IJ +ç» ¯ +ç» ± +éª Ĵ +ç» ² +éª ĵ +ç» ¶ +ç» º +ç» » +ç» ¾ +éª ĸ +ç¼ ģ +èĢ ł +çIJ « +çIJ µ +çIJ ¶ +çIJ ¥ +çIJ ¨ +çIJ ° +çIJ ® +çIJ ¯ +çIJ ¬ +çIJ ļ +è¾ ĩ +é¼ ĭ +æı ³ +åł ŀ +æIJ ½ +æı ¸ +æı ł +åł Ļ +è¶ Ħ +æı ĸ +é¢ ī +å¡ Ħ +æı ¿ +èĢ ĭ +æı Ħ +èĽ © +èĽ ° +å¡ Ĩ +æij Ĵ +æı Ĩ +æİ ¾ +èģ Ĵ +èij ij +èij ļ +éĿ ° +éĿ ¸ +èij ³ +èij º +èij ¸ +èIJ ¼ +èij ¶ +è ĴĮ +èij Ń +æ¥ ® +æ £¼ +æ¤ Ł +æ£ ¹ +æ¤ ¤ +æ£ ° +èµ į +æ¤ ĭ +æ¤ ģ +æ¤ ª +æ¤ IJ +é¹ ģ +éħ ¤ +éħ ¢ +éħ ¡ +é¹ Ĥ +æ® ļ +æ® Ľ +éĽ ± +è¾ ĭ +æ¤ ł +è¾ İ +çĿ Ħ +çĿ ĩ +çĿ ĥ +æĪ ¢ +åĸ ĭ +åĹ Ĵ +åĸ ĥ +åĸ ± +åĸ ¹ +æĻ · +åĸ Ī +è· ĸ +è· Ĺ +è· ŀ +è· ļ +è· İ +è· ı +è· Ĩ +èĽ ± +èĽ ² +èĽ Ń +èĽ ³ +èĽ IJ +èĽ Ķ +èĽ ŀ +èĽ ´ +èĽ ĺ +åĸ ģ +åĸ Ł +åķ ¾ +åĹ ĸ +åĸ ij +åĹ Ł +åĹ ŀ +åĸ Ļ +åµ ĺ +åµ ĸ +å´ ´ +éģ Ħ +è© Ī +åµ İ +å µ¬ +åµ Ľ +åµ ¯ +åµ Ŀ +åµ « +å¹ Ħ +åµ ĭ +èµ ķ +éĵ » +éĵ ¼ +éĵ ¿ +éĶ ĥ +éĶ Ĩ +éĶ ĩ +éĶ ī +éĶ ı +éĶ ij +éĶ Ĵ +éĶ Ķ +éĶ ķ +æİ £ +çŁ ¬ +æ° ° +æ¯ ³ +æ¯ ½ +çĬ Ĭ +çĬ Ħ +çĬ ĭ +é ¹Ħ +çĬ į +åµ ĩ +é» į +ç¨ ĥ +ç¨ Ĥ +çŃ ļ +çŃ µ +çŃ Į +åĤ £ +åĤ Ī +èĪ Ħ +çī į +åĤ ¥ +åĤ § +éģ ij +åĤ © +å¾ ¨ +åª Ń +çķ ² +å¼ ij +ç¿ ķ +é¹ Ĩ +èħ Ī +èħ ĵ +èħ Ĩ +èħ ´ +èħ ļ +èħ ± +é± ¿ +é² Ģ +é² Ĥ +çĮ ¢ +çĮ ¹ +çĮ ¥ +é£ ĵ +è§ ŀ +è§ ļ +çĮ ± +é¢ İ +é£ § +é¦ ĩ +é¦ Ĭ +äº µ +èĦ Ķ +è£ Ĵ +çĹ £ +çĹ ¨ +çĹ ¦ +çĹ ŀ +çĹ ¤ +çĹ § +èµ ĵ +ç« ¦ +çĵ ¿ +åķ » +é¢ ı +é¹ ĩ +éĺ ij +éĺ Ĵ +éĺ ķ +ç² ŀ +éģ Ĵ +åŃ ³ +çĦ ¯ +çĦ ľ +çĦ ± +é¹ Ī +æ¸ « +æ¹ ® +æ¹ İ +æ¹ ľ +æ¹ į +æ¹ « +æº ² +æ¹ Ł +æº Ĩ +æ¹ ² +æ¹ Ķ +æ¹ ī +æ¸ ¥ +æ» ģ +æĦ ł +æĥ º +æĦ ¦ +æĥ ´ +æĦ Ģ +æĦ İ +æĦ Ķ +åĸ ¾ +å¯ IJ +è° Ł +è£ ¢ +è£ İ +è£ ¥ +ç¥ ¾ +è° ł +è° ¡ +è° ¥ +è° § +åŃ ± +å¼ ¼ +å· ½ +éª ĺ +åª ª +å· ¯ +ç¿ ļ +çļ ´ +éª Ľ +ç¼ Ĥ +ç¼ ĥ +ç¼ Ħ +å½ ĺ +ç¼ ĩ +ç¼ Ī +ç¼ Į +ç¼ ij +ç¼ Ĵ +ç¼ Ĺ +é£ ¨ +èĢ ¢ +çij ģ +çij Ĺ +çij Ħ +éģ ¨ +éª ľ +éŁ « +é« ¡ +å¡ ¬ +éĦ ¢ +è¶ Ķ +è¶ ij +æij ħ +æij ģ +èľ ĩ +æIJ ĭ +æIJ ª +æIJ IJ +æIJ Ľ +æIJ ł +æij Ī +å½ Ģ +æ¯ Ĥ +æIJ ¦ +æIJ ¡ +èĵ ģ +æĪ ¡ +è ĵį +éĦ ŀ +èĵ IJ +èĵ ¦ +é¹ ĭ +èĴ ½ +èĵ ĸ +èĵ Ĭ +èĴ ¯ +èĵ Ł +èĵ ij +èĴ º +èĵ ł +èĴ Ł +èĴ ¡ +èĴ ¹ +èĴ ´ +èĴ Ĺ +èĵ ¥ +æ¥ Ķ +æ¥ Ĥ +æ¥ Ŀ +æ¥ « +æ¥ ¸ +æ¤ ´ +æ§ Į +æ¥ ¯ +çļ Ļ +æ¦ Ī +æ§ İ +æ¦ ī +æ¥ ¦ +æ¥ £ +æ¥ ¹ +æ¤ ½ +åī ½ +éħ © +èľ ĥ +ç¢ Ľ +ç¢ ĵ +ç¡ ¼ +ç¢ ī +ç¢ ļ +ç¢ ĩ +ç¢ ľ +é¹ Į +è¾ ı +é¾ ĥ +é¾ ħ +è¨ ¾ +ç² ² +çĿ ļ +åĹ ª +éŁ ª +åĹ · +åĹ ī +çĿ ¨ +çĿ ¢ +éĽ İ +çĿ ¥ +åĹ ij +åĹ « +åĹ ¬ +åĹ Ķ +åĹ Ŀ +æĪ ¥ +åĹ Ħ +çħ ¦ +æļ Ħ +éģ ¢ +æ ļĮ +è· ¬ +è· ¶ +è ·¸ +è· IJ +è· £ +è· ¹ +èĽ ¸ +èľ Ĭ +èľ į +èľ ī +èľ £ +çķ ¹ +èĽ ¹ +åĹ ¥ +åĹ ² +åĹ ³ +åĹ Į +åĹ į +åĹ IJ +åĹ ¤ +åĹ µ +ç½ ¨ +åµ Ĭ +åµ ´ +éª ° +éĶ Ĺ +éĶ Ľ +éĶ ľ +éĶ Ŀ +éĶ ŀ +éĶ Ł +éĶ ¢ +éĶ ¨ +éĶ © +éĶ Ń +éĶ ± +éĽ ī +æ° ² +çĬ ı +æŃ ĥ +ç¨ ŀ +ç¨ Ĺ +ç¨ Ķ +çŃ ł +çŃ ¢ +çŃ ® +çŃ ² +çī Ĵ +æķ « +å¾ Ń +æĦ Ĩ +èī Ħ +è§ İ +æ¯ ¹ +è² Ĭ +è² ħ +è² ī +é¢ Ķ +èħ ł +èħ © +èħ ¼ +èħ Ń +è ħ§ +å¡ į +åª µ +é² ħ +é² Ĩ +é² ĩ +é² Ī +é² ĭ +é² IJ +èĤ Ħ +é¹ IJ +é£ ķ +è§ ¥ +éģ Ľ +é¦ IJ +é¹ ij +äº ¶ +çĺ ĥ +çĹ ± +çĹ ¼ +çĹ ¿ +çĺ IJ +çĺ ģ +çĺ Ĩ +éº Ĥ +æŃ Ĩ +æĹ Ĵ +éĺ ĸ +éĺ Ĺ +ç¾ § +è± ¢ +ç² ³ +çĮ · +çħ ³ +çħ ¨ +çħ ħ +çħ Ĭ +çħ ¸ +çħ º +æ» Ł +æº ± +æº ĺ +æ¼ Ń +æ» ¢ +æº ¥ +æº ½ +è£ Ł +æº » +æº · +æ» Ĺ +æ» « +æº ´ +æ» ı +æ» ĥ +æ» ¦ +æº ı +æ» Ĥ +æ» ĵ +æº Ł +æ» ª +æĦ « +æħ Ĭ +é² İ +éª ŀ +çª ł +çª £ +è£ ± +è£ ¨ +è£ ¾ +è£ ° +ç¦ Ĭ +è° © +è° ª +åª ¾ +å« « +åª ² +å« Ĵ +å« Ķ +åª ¸ +ç¼ Ļ +ç¼ ľ +ç¼ Ľ +è¾ Ķ +éª Ŀ +ç¼ Ł +ç¼ ¡ +ç¼ ¢ +ç¼ £ +éª Ł +èĢ ¥ +çĴ Ī +çij Ń +çį Ĵ +è§ ı +æħ Ŀ +å« ł +åı Ĩ +æij ½ +å¢ ģ +æĴ Ĥ +æij ŀ +æĴ Ħ +ç¿ ¥ +è¸ ħ +æij Ń +å¢ ī +å¢ Ĵ +æ¦ ĸ +ç¶ ¦ +èĶ « +èĶ · +éĿ º +éĿ ¼ +éŀ ħ +éĿ ¿ +çĶ į +èĶ ¸ +èĶ Ł +èĶ º +æĪ ¬ +èķ ĸ +èĶ » +èĵ ¿ +æĸ ¡ +é¹ ķ +èĵ ¼ +æ¦ Ľ +æ¦ § +æ¦ « +æ¦ Ń +æ§ Ķ +æ¦ ± +æ§ ģ +æ§ ł +æ¦ · +åĥ ° +éħ ½ +éħ ¹ +ç¢ ¡ +ç¢ ´ +ç¢ £ +ç¢ ² +èĩ § +è± ¨ +æ® ¡ +éľ ģ +èľ ļ +é¾ ĩ +é¾ Ī +ä ģ +äģ ĸ +çĿ ½ +åĺ ŀ +åĺ Ī +åĺ Į +åĺ ģ +æļ Ŀ +è¸ Į +è¸ ī +èľ ŀ +èľ ¥ +èľ ® +èĿ Ī +èľ ´ +èľ ± +èľ © +èľ · +èľ ¿ +èŀ Ĥ +èľ ¢ +åĺ ¡ +é¹ Ĺ +åĺ £ +åĺ ¤ +åĺ ļ +åĹ ¾ +åĺ § +ç½ ´ +ç½ ± +å¹ Ķ +å¶ Ĥ +å¹ Ľ +èµ Ļ +ç½ Ĥ +éª · +éª ¶ +é¹ ĺ +éĶ ² +éĶ ´ +éĶ ¶ +éĶ · +éĶ ¸ +éĶ µ +éķ Ĥ +çĬ Ĵ +ç® IJ +ç® ¦ +ç® § +ç® ¸ +ç® ¬ +ç® ħ +ç® ª +ç® ľ +ç® ¢ +ç® ĵ +åĥ ĸ +åĦ Ĩ +åĥ ³ +åĥ Ń +åĬ ģ +åĥ ® +éŃ ĥ +éŃ Ĩ +çĿ ¾ +èī ĭ +éĦ ± +èĨ Ī +èĨ ij +é² ij +é² Ķ +é² ļ +é² Ľ +é² Ł +çį IJ +è§ « +éĽ Ĵ +å¤ ¤ +é¦ ij +éĬ ® +å¡ ¾ +çĺ Į +çĺ Ĭ +çĺ ĺ +çĺ Ļ +æĹ ĸ +èĨ Ĥ +éĺ ļ +éĦ ¯ +é² ŀ +ç² ¿ +ç² ¼ +ç³ ģ +æ§ Ĭ +é¹ ļ +çĨ ĺ +çĨ ¥ +æ½ ¢ +æ¼ ķ +æ» ¹ +æ¼ ¯ +æ¼ ¶ +æ½ ĭ +æ½ ´ +æ¼ ª +æ¼ ī +æ¼ © +æ¾ ī +æħ µ +æIJ ´ +çª ¨ +å¯ ¤ +ç¶ ® +è° ® +è¤ ¡ +è¤ Ļ +è¤ ĵ +è¤ Ľ +è¤ Ĭ +è° ¯ +è° ° +è° ² +å± £ +é¹ Ľ +å« ± +å« ĸ +å« ¦ +å« ļ +å «ĺ +é¼ IJ +çŀ Ģ +é¹ ľ +éª ł +ç¼ ¥ +ç¼ ¦ +ç¼ § +ç¼ ¨ +éª ¢ +ç¼ « +èĢ ¦ +èĢ § +çĴ ľ +çĴ İ +çĴ ģ +å¥ Ń +é« ¯ +é« « +æĴ · +æĴ ħ +èµ Ń +æĴ ¸ +éĭ Ĩ +æĴ Ļ +æĴ º +å¢ Ģ +èģ © +è§ IJ +éŀ ij +èķ Ļ +éŀ Ĵ +èķ Ī +èķ ¨ +èķ ¤ +èķ ŀ +èķ º +çŀ ¢ +èķ ĥ +èķ ² +èµ ľ +æ§ ¿ +æ¨ ¯ +æ§ Ń +æ¨ Ĺ +æ¨ ĺ +æ§ ² +éĨ Į +éĨ ħ +éĿ ¥ +éŃ ĩ +é¤ į +ç£ Ķ +ç£ Ļ +éľ Ī +è¾ ĺ +é¾ ī +é¾ Ĭ +è§ ij +çŀ Į +ç ŀĭ +çŀ ij +åĺ Ń +åĻ İ +åĻ ¶ +é¢ Ļ +æļ ¹ +åĻ ĺ +è¸ Ķ +è¸ Ŀ +è¸ Ł +è¸ Ĵ +è¸ ¬ +è¸ ® +è¸ ¯ +è¸ º +è¸ ŀ +èĿ ½ +èĿ ¾ +èĿ » +èĿ ° +èĿ ® +è ŀĭ +èĿ ĵ +èĿ £ +è Ŀ¼ +åĺ ¬ +é¢ ļ +åĻ į +åĻ Ļ +åĻ Į +åĻ Ķ +é¢ Ľ +å¹ ŀ +å¹ ¡ +å¶ Ļ +å¶ Ŀ +éª º +éķ Ĭ +éķ ī +éķ Į +éķ ı +éķ Ĵ +éķ ĵ +éķ Ķ +ç¨ · +ç® ´ +ç¯ ij +ç¯ ģ +ç¯ Į +çī ĸ +åĦ ĭ +èĻ ¢ +é¹ ŀ +èĨ ĺ +é² ł +é² ¡ +é² ¢ +é² £ +é² ¥ +é² § +é² © +çį Ĺ +çį ł +è§ ¯ +é¦ ĵ +é¦ Ķ +éº ¾ +å» Ľ +çĺ Ľ +çĺ ¼ +çĺ ¢ +çĺ ł +é½ ij +ç¾ ° +𥠻 +ð¥» Ĺ +ç³ Į +ç³ į +ç³ ħ +çĨ ľ +ç Ĩµ +æ¾ į +æ¾ Į +æ½ ¸ +æ½ ¦ +æ½ ² +éĭ Ī +æ½ Ł +æ½ º +å¯ ® +çª ³ +è° ³ +è¤ ´ +è¤ Ł +è¤ « +è° µ +çĨ ¨ +å± ¦ +åĭ ° +æĪ ® +èĿ ¥ +ç¼ ¬ +ç¼ ® +ç¼ ¯ +éª £ +çķ ¿ +èĢ © +èĢ ¨ +èĢ ª +çĴ Ł +éĿ Ľ +çĴ ł +çĴ ĺ +èģ ± +èŀ ¯ +é« » +é« Ń +é« ¹ +æĵ Ģ +çĶ ı +æĵ ŀ +ç¸ ł +ç£ ¬ +é¢ ŀ +èķ » +é¢ Ł +èĸ ¤ +èĸ ¨ +æª ł +èĸ ı +èĸ ® +èĸ ľ +èĸ ħ +æ¨ ¾ +æ© Ľ +æ© ĩ +æ¨ µ +æª İ +æ© ¹ +æ¨ ½ +æ¨ ¨ +æ© ¼ +å¢ ¼ +æ© IJ +ç¿ ® +éĨ IJ +éĨ į +éĨ ļ +ç£ ² +èµ Ŀ +æ® ª +éľ ı +éĮ ¾ +è¾ ļ +éģ ½ +æ° ħ +çŀ Ł +çŀ ł +çŀ ° +åļ Ħ +åļ Ĩ +åĻ ¤ +æļ ¾ +è¹ Ģ +è¸ µ +è¸ ½ +è¹ ī +è¹ ģ +èŀ ¨ +èŀ Ī +èŀ ħ +èŀ Ń +èŀ ł +èŀ Ł +åĻ ± +åĻ « +åĻ » +åĻ ¼ +ç½ ¹ +åľ ľ +ä ¦ +ä¦ ĥ +éķ Ĺ +éķ ĺ +éķ ļ +éķ Ľ +éķ Ŀ +éķ ŀ +éķ ł +æ° ĩ +æ° Ĩ +ç© ij +ç¯ Ŀ +ç¯ ¥ +ç¯ ¦ +ç¯ ª +ç¯ Ļ +çĽ ¥ +åĬ ĵ +ç¿ ± +éŃ ī +éŃ Ī +å¾ ¼ +æŃ Ļ +èĨ ¦ +èĨ Ļ +é² ® +é² ± +é² ³ +é² ´ +é² µ +é² · +é² » +çį ´ +çį Ń +çį ¬ +éĤ Ĥ +é¹ § +å» ¨ +èµ Ł +çĺ ° +å» ª +çĺ ¿ +çĺ µ +çĺ ´ +çĻ ĥ +çĺ ³ +éº ĩ +éº Ī +å ¬´ +å£ ħ +ç³ Ĺ +çĶ ij +çĩ İ +çĩ ł +çĩ Ķ +çĩ § +æ¿ ij +æ¿ ī +æ½ ŀ +æ¾ § +æ¾ ¹ +æ¾ ¥ +æ¾ ¶ +æ¿ Ĥ +è¤ ° +çª ¸ +å¬ ĸ +çĬ Ł +éļ ° +å¬ Ĺ +é¢ ¡ +ç¼ ± +ç¼ ² +ç¼ ³ +çĴ © +çĴ ª +èŀ « +æĵ ¤ +å£ ķ +è§ ³ +ç½ Ħ +æĵ ¢ +èĸ ¹ +éŀ ¡ +éŀ ¬ +èĸ · +èĹ ĵ +èĹ ģ +æª Ħ +æª © +æĩ ĭ +éĨ ¢ +ç¿ ³ +ç¤ ħ +ç£ ´ +é¹ © +é¾ ĭ +é¾ Į +è± ³ +å£ ij +é» » +åļ ı +åļ ħ +è¹ ij +è¹ Ĵ +è¹ Ĭ +è Ł¥ +èŀ ¬ +èŀ µ +çĸ ĥ +èŀ ³ +èŁ ij +åļ ĵ +ç½ ½ +ç½ ¾ +å¶ · +é» ľ +é» Ŀ +é« ģ +é« Ģ +éķ ¡ +éķ ¢ +éķ £ +éķ ¦ +éķ § +éķ © +éķ ª +éķ « +ç½ ħ +ç° Į +ç¯ ¾ +ç¯ ¼ +ç° ĸ +ç° ĭ +é¼ ¢ +åĦ ¡ +é¹ ª +é¼ ¾ +çļ ¤ +éŃ į +é¾ ł +ç¹ ĩ +è² ĺ +éĤ Ī +è² Ķ +èĩ Į +èĨ » +èĩ Ĩ +èĩ ĥ +é² ¼ +é² ½ +é³ Ģ +é³ ĥ +é³ ħ +é³ ĩ +é³ Ĭ +èŀ ½ +çĩ ® +é¹ « +ç³ ľ +ç¸ » +çĻ į +éº ĭ +æĩ ij +æ¿ ¡ +æ¿ ® +æ¿ ŀ +æ¿ ł +æ¿ ¯ +è¹ ĩ +è¬ ĩ +éĤ ĥ +è¥ ģ +æª Ĺ +æ ĵĺ +åŃ º +éļ ³ +å¬ · +èŁ Ĭ +é¹ ¬ +éį ª +éı Ĭ +é¬ Ī +é¬ ĥ +çŀ ½ +éŀ ¯ +éŀ ¨ +éŀ « +éŀ § +éŀ £ +èĹ ľ +èĹ ł +éĨ ª +è¹ Ļ +ç¤ ĵ +çĩ ¹ +é¤ ® +çŀ ¿ +æĽ Ľ +é¢ ¢ +èº ĩ +è¹ ļ +èŁ Ľ +èŁ ª +èŁ ł +èŁ ® +é¹ ® +é» ł +é» Ł +é« ħ +é« Ĥ +éķ ¬ +éķ Ń +éķ ¯ +é¦ ¥ +ç° Ł +ç° ª +é¼ ¬ +éĽ ł +èī Ł +é³ İ +é³ ı +é³ IJ +çĻ ŀ +çĻ Ķ +ç³ ¨ +è¹ © +éİ ı +éĤ ĭ +é¬ ı +æĶ ī +éŀ ² +éŀ ´ +èĹ ¿ +èĺ § +èĺ ħ +éĨ ® +éĨ ¯ +éħ ĥ +éľ ª +éľ Ń +éľ ¨ +é» ¼ +åļ ¯ +è¹ ° +è¹ ¶ +è¹ ½ +è¹ ¼ +è¹ ´ +è¹ ¾ +è¹ ¿ +èł ĸ +èł ĵ +èŁ ¾ +èł Ĭ +é» ¢ +é« ĭ +é« Į +éķ ² +ç± Ģ +é½ ģ +éŃ ij +èī ¨ +é³ ĵ +é³ Ķ +é³ ķ +é³ Ĺ +é³ Ļ +éı ĸ +ç¾ ¸ +㸠Ĩ +çĢ £ +çĢ Ľ +è¥ ¦ +è° ¶ +è¥ ŀ +éª ¥ +ç¼ µ +çĵ Ĵ +æĶ ĺ +èĺ © +èĺ ĸ +éĨ ´ +éľ ° +éħ Ĩ +çŁ į +èº ħ +é¼ į +å· ī +é» © +é» ¥ +é» ª +éķ ³ +éķ ´ +é» § +çº Ĥ +çĴ º +é¼ ¯ +èĩ ľ +é³ ľ +é³ Ŀ +é³ Ł +çį ¾ +åŃ Ģ +éª § +ç ĵĺ +é¼ Ļ +éĨ º +ç¤ ´ +é¢ ¦ +æĽ © +é³ ¢ +éº Ŀ +å¤ Ķ +çĪ Ŀ +çģ ı +ç¦ ³ +éIJ ¾ +ç¾ ¼ +èł ¡ +èĢ ± +é¹ ³ +æ° į +é¥ ķ +èº IJ +é« ij +éķ µ +ç© ° +é¥ Ķ +é¬ » +é¬ Ł +è¶ ± +æĶ « +æĶ ¥ +é¢ § +èº ľ +é¼ ¹ +çĻ ¯ +èł ² +èł ¹ +èº ŀ +è¡ ¢ +çģ ŀ +è¥ » +çº Ľ +é¬ £ +æĶ ® +åĽ Ķ +é¦ ķ +æĪ Ĩ +çĪ ¨ +é½ ī +äº į +å° ¢ +å½ ³ +åį ¬ +æ® ³ +ðł ϶ +æ¯ Į +éĤ ĺ +æĪ ĭ +åľ ¢ +æ° ķ +ä¼ ĭ +ä» Ŀ +åĨ ® +æ° ¿ +æ± Ī +æ° ¾ +å¿ ī +å® Ħ +ð¬£ Ļ +è® ± +æī ŀ +åľ ² +åľ « +èĬ ı +èĬ ĥ +æľ ³ +æľ ¸ +ð¨ Ļ +ð¨Ļ ¸ +éĤ ¨ +åIJ Ĵ +åIJ ĸ +å± ¼ +å± ¾ +è¾ ¿ +éĴ Ĩ +ä» ³ +ä¼ £ +ä¼ Ī +çĻ ¿ +çĶ ª +éĤ ł +çĬ ´ +åĨ ± +éĤ ¡ +ð¬ĩ ķ +æ± ĭ +ä ľ +äľ £ +è® » +𬣠ŀ +åŃ ĸ +ð¬ĺ ĵ +çº © +çİ Ĵ +çİ ĵ +çİ ĺ +çİ ļ +åĪ ¬ +ð«Ń Ł +åĿ ľ +åĿ ī +æī ½ +ð«Ń ¢ +åĿ ĭ +æī º +ã§ ij +æ¯ IJ +èĬ ° +èĬ £ +èĭ Ĭ +èĭ ī +èĬ ĺ +èĬ ´ +èĬ ł +ð« ĩ +ð«ĩ Ń +èĬ ¤ +æĿ ķ +æĿ Ļ +æĿ Ħ +æĿ § +æĿ © +å° ª +å° ¨ +è½ ª +ð«IJ Ħ +åĿ Ĵ +èĬ Ī +æĹ ´ +æĹ µ +åij Ļ +ã ķ +ãķ ® +å² į +ð« µ +𫵠· +å² ł +å² ľ +åij ĩ +åĨ ı +è§ ĥ +å² Ļ +ä¼ ¾ +ãij ĩ +ä¼ Ń +ä½ ĸ +ä¼ ² +ä½ ģ +é£ ı +çĭ ĥ +éĹ ¶ +æ± § +æ± « +𣲠ĺ +ð£² Ĺ +æ² Ħ +æ² ĺ +ð¬ĩ Ļ +æ± Ń +ã³ ĩ +æ² ĩ +å¿ ® +å¿ ³ +å¿ º +𬣠¡ +ç¥ ĥ +è¯ ĩ +éĤ ² +è¯ İ +è¯ IJ +å± ĥ +ð« ¸ +𫸠© +å² Ĭ +éĺ ½ +ä¢ º +éĺ ¼ +å¦ § +å¦ ĺ +ð¨ ļ +ð¨ļ ķ +çº ® +é© ² +ð«ĺ ľ +çº » +ð¬ĺ ĺ +ð«ĺ Ŀ +çº ¼ +çİ ¤ +çİ ŀ +çİ ± +çİ Ł +éĤ ½ +éĤ ¿ +åĿ ¥ +åĿ ° +åĿ ¬ +åĿ ½ +å¼ Ĩ +èĢ µ +ä¢ ¼ +ð¦ Ń +ð¦Ń ľ +èĮ ĭ +èĭ § +èĭ ¾ +èĭ ł +æŀ ħ +ãŃ İ +æŀ ĺ +æŀ į +çŁ ¼ +çŁ » +åĮ ¼ +𬨠Ĥ +ð¬Ģ © +ð¬Ģ ª +æĹ ¿ +æĺ Ħ +æĺ Ĵ +æĺ Ī +åĴ ī +åĴ ĩ +åĴ į +å² µ +å² ½ +å² ¨ +å² ŀ +å³ Ĥ +ã Ł +ãŁ ĥ +åĽ · +𬬠© +éĴ IJ +éĴ Ķ +éĴ ĸ +çī ¥ +ä½ ´ +åŀ Ī +ä¾ ģ +ä¾ ¹ +ä½ ¸ +ä½ º +éļ ¹ +ãij Ĭ +ä¾ Ĥ +ä½ ½ +ä¾ ĺ +éĥ Ī +èĪ ł +éĥ IJ +éĥ ĥ +æĶ ½ +èĤ Ń +èĤ ¸ +èĤ · +çĭ ī +çĭ Ŀ +é¥ ³ +å¿ ŀ +çĤ Į +çĤ Ĩ +æ³ Ļ +æ² º +æ³ Ĥ +æ³ ľ +æ³ ĥ +æ³ ĩ +æĢ Ĭ +å³ ĥ +ç© ¸ +ç¥ ĭ +ç¥ Ĭ +ð«į £ +𬣠³ +𬠩½ +é¸ ¤ +å¼ ¢ +å¼ ¨ +éĻ ij +𬮠¿ +éĻ İ +ð¬¯ Ģ +åį º +ä¹ ¸ +å¦ Ń +å§ Ī +ð« ° +ð«° Ľ +è¿ ³ +åı ķ +𬳠µ +é© µ +𬳠¶ +ä Į +äĮ ¹ +é© º +ð«ł Ĭ +ç» ĭ +ç» IJ +çł ī +èĢ Ķ +ãĽ ĥ +çİ ¶ +çı ĩ +çı ħ +ð¬į Ľ +çı ĭ +çİ ¹ +çı Į +çİ ¿ +éŁ ¨ +åŀ ļ +åŀ ¯ +åŀ Ļ +åŀ ² +åŁ ı +åŀ į +èĢ ĩ +é¿ į +åŀ İ +åŀ ´ +åŀ Ł +åŀ ŀ +æĮ ĵ +åŀ µ +åŀ ı +æĭ ¶ +èį ĸ +èį ģ +èį Ļ +èį Ľ +èĮ Ī +èĮ ½ +èį Ħ +èĮ º +ð¬ľ ¬ +èį ĵ +èĮ ³ +𦠰 +𦰠¡ +èĮ Ľ +èį Ń +ãŃ ķ +æŁ · +æŁ ĥ +æŁ Ĭ +æŀ ¹ +æł IJ +æŁ ĸ +éĥ ļ +åī ħ +ä´ ĵ +è¿ º +åİ ĸ +çł Ĩ +çł ij +çł Ħ +èĢ ı +å¥ ĵ +ä ¶ +ä¶ ® +è½ µ +è½ · +è½ ¹ +è½ º +æĺ º +𪠾 +𪾠¢ +æĺ ½ +çĽ · +åĴ ¡ +åĴ º +æĺ ³ +æĺ £ +æĺ ¤ +æĺ « +æĺ ¡ +åĴ ¥ +æĺ ª +èĻ · +èĻ ¸ +åĵ ĥ +å³ ĺ +èĢ ij +å³ Ľ +𪨠° +å³ Ĺ +å³ § +å¸ ¡ +éĴ ĺ +ð«ĵ § +éĴ ľ +𬬠® +𬬠± +ð¬¬ Ń +éĴ ª +éĴ ¬ +éĴ Ń +çŁ § +ç§ ¬ +ä¿ « +èĪ ģ +ä¿ ľ +ä¿ Ļ +ä¿ į +åŀ ķ +è¡ İ +èĪ £ +å¼ ĩ +ä¾ ´ +é¸ § +äı ¡ +èĥ ł +ð¦ ϶ +èĥ Ī +èĥ © +èĥ £ +æľ ı +é£ IJ +è¨ Ħ +é¥ » +åº ¤ +çĸ ¢ +çĤ £ +çĤ Ł +ã ¶ +ã¶ ² +æ´ Ń +æ´ ĺ +æ´ ĵ +æ´ ¿ +ã³ ļ +æ³ ļ +æµ Ī +æµ ī +æ´ ¸ +æ´ ij +æ´ ¢ +æ´ Ī +æ´ ļ +æ´ º +æ´ ¨ +æµ IJ +ã³ ĺ +æ´ ´ +æ´ £ +æģ Ķ +å® ¬ +çª Ģ +æī Ĥ +è¢ Ĩ +ç¥ ı +ç¥ IJ +ç¥ ķ +åı ļ +éĻ § +éĻ ŀ +å¨ Ģ +å§ ŀ +å§ ± +å§ ¤ +å§ ¶ +å§ ½ +æŀ ² +ç» ĸ +éª ĥ +ð¬ĺ ¡ +𬳠½ +ð¬ĺ © +ð«Ħ § +å½ ĸ +éª ī +æģ Ŀ +çı ª +çı Ľ +çı ¹ +çIJ Ĭ +çİ ¼ +çı ĸ +ðª Ł +ðªŁ Ŀ +çı ½ +çı ¦ +çı « +çı Ĵ +ð¬į ¤ +çı ¢ +çı ķ +çı Ŀ +ð«Ń ¼ +åŁ Ĺ +åŀ ¾ +åŀ º +åŁ Ĩ +åŀ ¿ +åŁ Į +åŁ ĩ +èİ ° +èĮ Ŀ +ð¬ľ ¯ +éĦ Ģ +èİ ¶ +èİ Ŀ +äĵ ĸ +èİ Ļ +æł » +æ¡ ł +ð¬ Ĥ +ð¬Ĥ © +æ¡ Ħ +æ¢ ł +æł ´ +æ¢ ´ +æł Ĵ +éħ İ +éħ ı +ð«ł Ĩ +çł µ +çł ł +çł « +çł ¬ +ç¡ ģ +æģ § +ç¿ ĥ +éĥ ª +ð¨ IJ +ð¨IJ Ī +è¾ Ģ +è¾ ģ +ð¬ Į +ð¬Į Ĺ +åī ķ +èµ Ģ +åĵ ¢ +æĻ ħ +æĻ Ĭ +åĶ Ŀ +åĵ ³ +åĵ ± +åĨ Ķ +æĻ Ķ +æĻ IJ +çķ ĸ +èļ Ħ +èļ Ĩ +ð« ij +ð«ij ¡ +å¸ ± +å´ ģ +å³ ¿ +𪨠¶ +å´ Ħ +å¸ ¨ +å ´Ģ +èµ Ĩ +𬠬¸ +éĴ · +𬬠» +𬬠¹ +𬬠¿ +ð¬Ń ģ +çľ ļ +çĶ ¡ +ç¬ « +åĢ » +åĢ ´ +èĦ © +åĢ ® +åĢ ķ +åĢ ŀ +ð« ¢ +ð«¢ ¸ +åĢ ĵ +åĢ § +è¡ ĥ +èĻ Ĵ +èĪ Ń +èĪ ¯ +èĪ ¥ +çĵ ŀ +é¬ ¯ +é¸ ° +èĦ İ +æľ ĵ +èĥ ² +èĻ ĵ +é± ½ +çĭ ´ +å³ ± +çĭ » +çľ ¢ +ð«Ĺ § +åĭ į +çĹ Ħ +çĸ ° +çĹ ĥ +ç« ĺ +ç¾ ĸ +ç¾ ĵ +æ¡ Ĭ +æķ ī +çĥ ł +çĥ Ķ +çĥ ¶ +çĥ » +ð¬Ĭ Ī +æ¶ į +æµ ¡ +æµ Ń +æµ ¬ +æ¶ Ħ +æ¶ ¢ +æ¶ IJ +æµ ° +æµ Ł +æµ Ľ +æµ ¼ +æµ ² +æ¶ ĺ +æĤ Ī +æĤ ĥ +æĤ ¢ +ð¬Ĵ Ī +å® § +çª ħ +çª Ĭ +çª İ +æī ħ +æī Ĩ +è¢ ª +è¢ Ĺ +è¢ ¯ +ç¥ § +éļ º +åł ² +çĸ į +𨠺 +ð¨º Ļ +éĻ ´ +ç ĥĿ +çł ® +ãĽ ļ +åĵ ¿ +ç¿ Ģ +ç¿ Ĥ +åī Ł +𬳠¿ +ð«Ħ ¨ +ç» ¤ +éª į +ð¬ĺ « +ä Ĥ +äĤ ® +çIJ İ +çı ¸ +çı µ +çIJ Ħ +çIJ Ī +çIJ Ģ +çı º +æİ Ń +åł İ +åł IJ +åŁ ¼ +æİ İ +åŁ « +åł Į +æĻ ¢ +ð« ® +ð«® ĥ +æİ ŀ +åŁ ª +å£ ¸ +ãĻ į +èģ į +èı Ŀ +èIJ ļ +èı ¥ +èİ ¿ +äĵ « +åĭ ļ +äĵ ¬ +èIJ Ĩ +èı Ĥ +èı į +èı ¼ +èIJ £ +äĵ ¨ +èı ī +äĵ Ľ +æ¢ ¼ +æ¢ ½ +æ¡ ² +æ¢ ¾ +æ¡ ¯ +æ¢ £ +æ¢ Į +æ¡ ¹ +æķ Ķ +åİ £ +ç¡ Ķ +é¿ İ +ç¡ Ļ +ç¡ ļ +ç¡ Ĭ +ç¡ į +åĭ Ķ +ä´ ķ +é¾ ģ +éĢ ´ +åĶ ª +åķ « +ç¿ Ī +ã « +ã« ° +æĻ Ļ +çķ ¤ +𬱠ĸ +è¶ ¼ +è· Ĥ +èĽ ĥ +èļ ² +ð¬Ł ½ +èļ º +åķ ´ +äİ ĥ +å´ § +å´ Ł +å´ ŀ +å´ Ĵ +å´ Į +å´ ¡ +éĵ ı +ð«ĵ ¯ +ð«Ł ¹ +éĵ ķ +ð«Ł ¼ +éĵ ĸ +éĵ ĺ +éĵ ļ +éĵ ŀ +éĵ ¥ +éĵ ´ +çī » +çī ¿ +ç¨ Ĩ +ç¬ ± +ç¬ ¯ +åģ ° +åģ ¡ +é¸ º +åģ Ń +åģ ² +åģ ģ +ã ¿ +ã¿ ł +éĦ ħ +åģ ĵ +å¾ Ľ +è¡ Ĵ +èĪ ³ +èĪ ² +é¸ ¼ +æĤ Ĩ +éĦ ĥ +çĵ » +ä Ŀ +äĿ Ļ +èĦ ¶ +èĦ ŀ +èĦ Ł +äı ² +é± ¾ +çĮ ĩ +çĮ Ĭ +çĮ Ħ +è§ ĸ +ðł ħ +ðłħ ¤ +åº ± +åº ¼ +åº ³ +çĹ ĵ +ä´ Ķ +ç« « +åł ĥ +éĺ Į +ç¾ Ŀ +ç¾ ķ +çĦ Ĩ +çĥ º +çĦ Į +æ· ı +ð¬ĩ ¹ +æ· Ł +æ· ľ +æ· ´ +æ· ¯ +æ¹ ´ +æ¶ ´ +ð¬į ¡ +ã ¥ +㥠Ħ +æĥ Ľ +æĥ Ķ +æĤ ° +æĥ Ļ +å¯ ģ +éĢ Ń +𬤠ĩ +ð«į ¯ +è¢ ¼ +è£ Ī +ç¥ ² +𬤠Ĭ +ð«į ² +è° ŀ +èī ´ +å¼ ¸ +å¼ ¶ +ð¬¯ İ +éļ ĥ +å© ŀ +å¨ µ +å© ¼ +åª ĸ +å© ³ +å© į +å© Į +å© « +å© ¤ +å© ĺ +å© ł +ð¬ĺ ¬ +ð¬ĺ Ń +𬴠Ĥ +ð«ĺ ¦ +ç» ¹ +ð«Ł ħ +ð¬ĺ ¯ +éª ķ +ð«ĺ § +çµ ľ +çı · +çIJ ² +çIJ ¡ +çIJ Ł +çIJ Ķ +çIJ Ń +åł ¾ +åł ¼ +æı ķ +ãĻ ĺ +åł § +åĸ Ĩ +åł ¨ +å¡ ħ +åł ł +çµ · +𪠣 +𪣠» +ð¡ İ +ð¡İ ļ +è ijľ +æĥ İ +èIJ ³ +èij Ļ +éĿ ¬ +èij ´ +èĴ ĩ +èĴ Ī +éĦ ļ +èĴ ī +èĵ ĩ +èIJ © +èij ° +èij İ +éĦ ij +èĴ İ +èij ĸ +èĴ Ħ +èIJ ¹ +æ£ ¤ +æ£ ½ +æ£ « +æ¤ ĵ +æ¤ ij +ð¬ ĥ +ð¬ĥ Ĭ +é¹ Ģ +æ¤ Ĩ +æ£ ĵ +æ£ ¬ +æ£ ª +æ¤ Ģ +æ¥ Ĺ +𬠷 +𬷠ķ +çĶ ¦ +éħ ¦ +è§ Į +å¥ ¡ +çļ ķ +ç¡ ª +æ¬ ¹ +è© Ł +ð«IJ IJ +è¾ Į +æ£ IJ +é¾ Ĥ +𬠹 +𬹠¼ +é» ¹ +çī ļ +çĿ İ +æĻ « +æĻ ª +æĻ ± +ð § +ð§ ¿ +ð§¿ ¹ +èĽ ij +çķ ¯ +æĸ Ŀ +åĸ ¤ +å´ ¶ +åµ ģ +ð« ¶ +ð«¶ ĩ +å´ ¾ +åµ ħ +å´ ¿ +åµ ļ +ç¿ Ļ +ð«ĸ ® +åľ Į +åľ IJ +èµ ij +èµ Ĵ +é¿ ı +éĵ ¹ +ð¬Ń Ĭ +éĵ ½ +𨱠ĩ +ð«ĵ ¶ +éĶ Ĭ +éĶ į +éĶ İ +ð¬Ń İ +éĶ ĵ +çĬ ĩ +é¢ ĭ +ç¨ Į +çŃ Ģ +çŃ ĺ +çŃ ľ +çŃ ¥ +çŃ ħ +åĤ ĥ +åĤ ī +ç¿ Ľ +åĤ Ĵ +åĤ ķ +èĪ ¾ +çķ ¬ +ð«ĸ ¯ +èĦ ¿ +èħ ĺ +ä IJ +äIJ ĥ +èħ Ļ +èħ Ĵ +ð¬± Ł +é² ĥ +çĮ ° +ð« Ľ +ð«Ľ Ń +çĮ ¯ +ã º +㺠Ħ +é¦ ī +åĩ ĵ +éĦ Ĺ +ð« · +ð«· · +å» ĭ +å» Ĩ +éĦ Į +ç² ¢ +éģ Ĩ +æĹ IJ +𬮠± +çĦ ŀ +ð¬Ĭ ¤ +æ¬ » +𣠸 +𣸠£ +æº ļ +æº ģ +æ¹ Ŀ +æ¸ ° +æ¹ ĵ +ã ´ +ã´ Ķ +æ¸ Ł +æº ł +æ¸ ¼ +æº ĩ +æ¹ £ +æ¹ ij +æº ŀ +æĦ IJ +æĦ ĥ +æķ © +çĶ ¯ +æ£ ¨ +æī Ĭ +è£ £ +ç¥ ¼ +å© » +åª Ĩ +åª ŀ +ãĽ ¹ +åª ĵ +åª Ĥ +åª Ħ +æ¯ µ +çŁ ŀ +𬴠ĥ +ð«ĺ ¨ +ç¼ Ĭ +ç¼ IJ +éª Ļ +çij ĥ +çij ĵ +çij ħ +çij Ĩ +ä´ ĸ +çij ĸ +çij Ŀ +çij Ķ +çij Ģ +𤠧 +𤧠Ľ +çij ³ +çij Ĥ +å¶ ħ +çij ij +éģ ĺ +é« ¢ +å¡ ¥ +åł ½ +èµ ª +æij Ľ +å¡ Ŀ +æIJ Ĵ +æIJ Į +èĴ ± +èĴ ¨ +èĵ ı +èĶ Ģ +èĵ ¢ +èĵ Ĥ +èĴ » +èĵ £ +æ¤ ¹ +æ¥ ª +æ¦ ĥ +æ¦ ħ +æ¥ Ĵ +æ¥ © +æ¦ ĩ +æ¤ ¸ +æ¥ Ļ +æŃ ħ +𬠪 +𬪠© +ç¢ ĥ +ç¢ ı +ð¬Ĵ Ķ +ç¢ Ī +äĥ ħ +ç¡ ¿ +éĦ ł +è¾ Ĵ +ð¬¨ İ +ð«IJ ĵ +é¾ Ĩ +è§ ľ +ä £ +ä£ ĺ +æļ ķ +é¹ į +ð« « +ð«« ĩ +㬠Ĭ +æļ ħ +è· ± +èľ IJ +èľ İ +åµ ² +èµ Ĺ +éª ± +éĶ ĸ +ð«ĵ ¹ +éĶ ĺ +éĶ ³ +éĶ § +éĶ ª +ð¬Ń ļ +éĶ « +éĶ ¬ +ð¬Ń Ľ +ç¨ ij +ç¨ Ļ +ä ħ +äħ Ł +ð¬ ķ +ð¬ķ Ĥ +çŃ » +çŃ ¼ +çŃ ¶ +çŃ ¦ +çŃ ¤ +åĤ º +é¹ İ +åĥ ĩ +èī ħ +èī ī +è° ¼ +è² Ĩ +èħ ½ +èħ ¨ +èħ ¯ +é² ī +é² Ĭ +é² Į +ä² Ł +𬶠ĭ +𬶠į +é² ı +éĽ Ĭ +çĮ º +é£ Ķ +è§ Ł +ð¦ Ŀ¼ +é¦ Į +è£ Ľ +å» Ĵ +çĺ ħ +éĦ ĺ +é¹ Ĵ +éĦ ľ +éº Ģ +éĦ £ +éĺ ĺ +ð«Ķ ¶ +çħ ģ +çħ ĥ +çħ ´ +çħ ĭ +çħ Ł +çħ ĵ +æ» ł +æº į +æº ¹ +æ» Ĩ +æ» ī +æº ¦ +æº µ +æ¼ · +æ» § +æ» ĺ +æ» į +æĦ Ń +æħ ¥ +æħ Ĩ +å¡ ± +ð« ĮĢ +è £¼ +ç¦ ĭ +ç¦ Ķ +ç¦ ĺ +ç¦ Ĵ +è° « +é¹ Ķ +ð«ĸ ³ +æĦ į +å« Ħ +åª ± +æĪ ¤ +åĭ ł +æĪ £ +ð«ĺ ª +ð«ĺ ¬ +ç¼ ŀ +èĢ ¤ +çij § +ð« ŀ +ð«ŀ © +çij ¨ +çij ± +çij · +çij ¢ +æĸ ł +æij ı +å¢ ķ +å¢ Ī +å¢ IJ +å¢ ĺ +æij ´ +éĬ İ +ð¡ IJ +ð¡IJ ĵ +å¢ ļ +æĴ ĸ +𪠤 +ðª¤ Ĺ +éĿ ½ +éŀ ģ +èĶ Į +èĶ Ī +èĵ ° +èĶ ¹ +èĶ Ĭ +åĺ ı +æ¦ ° +æ¦ ij +æ§ ļ +ð£ Ĺ +ð£Ĺ ĭ +æ§ ľ +æ¦ į +çĸ IJ +𬸠ĺ +éħ º +éħ ¾ +éħ ² +éħ ´ +ç¢ ¶ +äĥ İ +ð¬Ĵ Ĺ +ç¢ ¨ +ð¥ Ķ +ð¥Ķ ² +ç¢ ¹ +ç¢ ¥ +åĬ Ĥ +ð«ļ ĸ +ä´ Ĺ +å¤ ¥ +çŀ į +é¹ ĸ +ã¬ İ +è· ½ +èľ ¾ +å¹ ĸ +å¶ į +åľ Ļ +𨱠ı +éĶ º +éĶ ¼ +éĶ ½ +ð¬Ń ¤ +éĶ ¾ +éĶ ¿ +éķ ĥ +éķ Ħ +éķ ħ +é¦ Ŀ +é¹ Ļ +ç® ¨ +ç® ĸ +åĬ Ħ +åĥ ¬ +åĥ ¦ +åĥ Ķ +åĥ İ +æ§ ĥ +ãĻ ¦ +é² Ĵ +é² ķ +ð«ļ ķ +é² ĸ +é² Ĺ +é² ĺ +é² Ļ +𬶠IJ +𬶠ı +ð ©½ +𩽠¾ +å¤ IJ +çį į +é£ Ĺ +𬸠ļ +åĩ ĺ +å» ij +å» Ļ +çĺ Ĺ +çĺ ¥ +çĺ ķ +é² Ŀ +éĦ « +çĨ ĩ +æ¼ ¹ +æ¼ ĸ +æ½ Ĩ +æ¼ ¤ +æ½ © +æ¼ ¼ +æ¼ ´ +ã ½ +ã½ ı +æ¼ Ī +æ¼ ĭ +æ¼ » +æħ ¬ +çª ¬ +çª Ń +ã ® +ã® ¾ +𬤠Ŀ +è¤ ķ +ç¦ Ľ +ç¦ ļ +éļ © +å« ķ +å« Ń +å« ľ +å« ª +ð¬ ĻĤ +ã » +ã» ¬ +éº ¹ +çĴ Ĩ +æ¼ ¦ +åı ĩ +å¢ £ +å¢ ¦ +å¢ ¡ +åĬ IJ +èĸ ģ +èķ ° +èĶ ĥ +é¼ Ĵ +æ§ ± +é¹ Ŀ +ç£ ı +ç£ ī +æ® £ +æħ Ń +éľ ħ +æļ µ +æļ ² +æļ ¶ +è¸ ¦ +è¸ £ +äĹ ĸ +èĿ ĺ +èĿ ² +èĿ ¤ +åĻ ĩ +å ĻĤ +åĻ Ģ +ç½ ¶ +å¶ ² +å¶ ĵ +ãł ĩ +å¶ Ł +å¶ Ĵ +éķ Ĩ +éķ Ī +éķ ĭ +éķ İ +ð¬Ń © +éķ ķ +ç¨ ¹ +åĦ ĩ +çļ ŀ +çļ Ľ +ä´ ĺ +èī İ +èī ı +é¹ Ł +𩾠ĥ +é² ¦ +é² ª +é² ¬ +æ© ¥ +è§ Ń +é¹ ł +é¹ ¡ +ç³ ĩ +ç³ Ī +ç¿ ¦ +é¹ ¢ +é¹ £ +çĨ Ľ +æ½ ĸ +æ½ µ +ã µ +ãµ IJ +æ¾ Ĥ +æ¾ Ľ +çij ¬ +æ½ ½ +æ½ ¾ +æ½ ı +æĨ Ń +æĨ ķ +𬸠£ +æĪ Ń +è¤ ¯ +ç¦ ¤ +ð«į ½ +å« ½ +éģ ¹ +𬴠Ĭ +çĴ ¥ +çĴ ² +çĴ Ĵ +æĨ Ļ +æĵ IJ +éĦ ¹ +èĸ ³ +éŀ Ķ +é» ĩ +ð¬ ŀ +ð¬ŀ Ł +èķ Ĺ +èĸ ¢ +èķ ¹ +æ© ŀ +æ© ij +æ© ¦ +éĨ ij +è§ ± +ç£ ¡ +ð¥ ķ +ð¥ķ ¢ +ç£ ľ +è± ® +ð«Ł ¦ +ð¬º Ī +ð«ł ľ +é¹ ¾ +èĻ ¤ +æļ ¿ +æĽ Į +æĽ Ī +㬠ļ +è¹ ħ +è¸ ¶ +äĹ Ľ +èŀ Ĺ +çĸ ģ +ãł ĵ +å¹ ª +𪠩 +𪩠ĺ +å¶ ¦ +ð¬Ń ¬ +𨱠ij +ð¬Ń ¯ +é¦ ŀ +ç© Ħ +ç¯ ļ +ç¯ ¯ +ç° ī +é¼ ½ +è¡ ł +çĽ ¦ +èŀ £ +ç¸ ¢ +é² Ń +é² ¯ +é² ° +é² º +é² ¹ +ð«Ĺ ´ +äº ¸ +çĻ Ģ +çĺ Ń +𬸠¦ +ç¾ ± +ç³ Ĵ +çĩ ĭ +çĨ » +çĩ Ĭ +çĩ ļ +çĩ ı +æ¿ © +æ¿ ĭ +æ¾ ª +æ¾ ½ +æ¾ ´ +æ¾ Ń +æ¾ ¼ +æĨ · +æĨ º +æĩ Ķ +é» ī +å¬ Ľ +é¹ ¨ +ç¿ ¯ +ð«Ħ · +çĴ ± +𤠩½ +çĴ ¬ +çĴ ® +é« ½ +æĵ ¿ +èĸ ¿ +èĸ ¸ +æª ij +æ« Ĩ +æª ŀ +éĨ ¨ +ç ¹Ħ +ç£ ¹ +ç£ » +çŀ « +çŀ µ +è¹ IJ +èŁ ı +ã ĺ +ãĺ İ +ð¬Ń ³ +éķ ¤ +ð¬Ń ¶ +ð«Ķ į +éķ ¥ +éķ ¨ +ð¬Ń ¸ +ð¨± Ķ +ð¬Ń ¼ +ð«Ķ İ +çŁ ° +ç© Ļ +ç© ľ +ç© Ł +ç° ķ +ç° ĥ +ç° ı +åĦ ¦ +éŃ ĭ +æĸ ¶ +èī ļ +𬸠ª +è° ¿ +ä² ł +ð¬¶ Ł +é² ¾ +𬶠ł +é² ¿ +é³ ģ +é³ Ĥ +é³ Ī +é³ ī +çį ¯ +äĹ ª +é¦ ĺ +è¥ ķ +è¥ ļ +𬶠¨ +èŀ ± +çĶ ĵ +å¬ ¬ +å¬ ¥ +ð¦ Ī +ð¦Ī ¡ +ð«Ħ ¸ +çĵ Ģ +éĩ IJ +é¬ ¶ +çĪ ĩ +éŀ ³ +éŀ ® +ð¬Ł ģ +èĹ Ł +èĹ ¦ +èĹ ¨ +é¹ ² +æª « +é» ¡ +ç¤ ŀ +ç¤ Į +ð¥ ĸ +ð¥ĸ ¨ +è¹ ¢ +è¹ ľ +èŁ « +äĹ ´ +åļ ļ +é« ĥ +éķ ® +éķ ± +éħ Ĥ +é¦ § +ç° ł +ç° Ŀ +ç° ° +é¼ « +é¼ © +çļ ¦ +èĩ ij +ä² ¢ +é³ ij +é³ Ĵ +é¹ ± +é¹ ¯ +çĻ Ĺ +ð¦ Ĵ +ð¦Ĵ į +æĹ ŀ +ç¿ · +åĨ ģ +äİ ĸ +çĢ Ķ +çĢ į +çĢ Į +è¥ ľ +ä´ Ļ +ð¬Ļ Ĭ +åļ Ń +ã ° +ã° Ģ +é¬ · +éĨ Ń +è¹ ¯ +èł ĭ +ç¿ ¾ +é³ ĺ +åĦ ³ +åĦ ´ +é¼ Ĺ +ð¬¶ Ń +𩾠Į +é³ ļ +é³ Ľ +éº ij +éº ĸ +èł ĥ +å½ Ł +å¬ ¿ +é¬ Ĵ +èĺ ĺ +æ¬ Ĥ +é Ĩµ +é¢ ¥ +çĶ Ĺ +ð¨ Ł +ð¨Ł ł +å· ĩ +éħ ħ +é« İ +çĬ ¨ +𬶠® +ð¨ Ń +ð¨Ń ī +㸠Į +çĪ Ķ +çĢ ± +çĢ ¹ +çĢ ¼ +çĢ µ +è¥ « +åŃ ħ +éª ¦ +ð¬Ļ ĭ +èĢ ° +𤠫 +𤫠ī +çĵ ĸ +é¬ ĺ +è¶ ¯ +𬺠ĵ +ç½ į +é¼ ± +é³ ł +é³ ¡ +é³ £ +çĪ Ł +çĪ ļ +çģ Ī +éŁ Ĥ +ç³ µ +èĺ ¼ +ç¤ µ +é¹ ´ +èº Ķ +çļ Ń +é¾ ¢ +é³ ¤ +äº ¹ +ç± ¥ +é¼ · +ð«ļ Ń +çİ ĥ +éĨ ¾ +é½ ĩ +è§ ¿ +èł ¼ +× § +× ¤ +× Ľ +×ķ× ª +× ¡ +×Ļ× Ŀ +× ¦ +× Ĵ +× ĺ +×ķ× ¨ +× Ŀ +×ķ× ľ +× ĸ +๠Ĥ +ï º +ðŁ į +ðŁ IJ +×Ļ× ¨ +ï » +ðŁ ij +ðĿ IJ +ðŁ ı +ðŁ Ķ +ðŁ Į +ðŁ İ +ðŁ ĵ +× Ł +ðĿ ij +×ķ× ĵ +ï ¦ +Ġ× ķ +×ķ× ij +à¸Ń à¸ĩ +ðĿ ĺ +×Ļ× ª +ðĿ ķ +à¸Ĺ ีà¹Ī +Ø§Ø ¦ +ðŁ ¤ +×ķ× Ł +ر ÙĬ +×Ļ× ľ +ร ะ +า ย +ï ¯ +ï ® +า ม +â ĩ +ðŁ ¥ +ï Ń +ðĿ Ļ +×ķ× ł +á ½ +Ġ× Ľ +ðŁ ļ +â ļ +ï § +×ij ר +×Ļ× ł +á ´ +Ġ× Ĺ +á ¼ +ðĿ Ĺ +Ġ× ¢ +×Ļ× Ķ +ãģ£ ãģŁ +ãģĵ ãģ¨ +á ¸ +ÙĬ ÙĨ +ãģª ãģĦ +ا ع +ภ¨ +à¹Ī à¸ĩ +×Ļ× ĵ +×ŀ ש +á Ī +׳ ×Ļ +×Ļ× ij +ï ¥ +ðĿ ĵ +Ġ× Ļ +× ļ +ั à¸ĩ +â ĵ +ï ¤ +ĠاÙĦ Ø£ +า à¸ģ +à¹ī à¸Ļ +à¹Ģ ร +×ķ× Ŀ +á ¹ +ภ¶ +×Ļ× § +ภĭ +à¸Ħ ร +ภĺ +ั à¸ģ +ðŁ ķ +ÙĪ ÙĨ +à¸Ń ย +â Ĭ +ðĿ Ĵ +ĠاÙĦ ع +า à¸Ļ +×Ļ× Ł +ÙĦ ÙĬ +×Ļ× © +à¸Ľ ระ +à¹Ģ à¸Ľ +Ġ× ł +×ķ× ¡ +ภł +Ùħ ÙĨ +×ķ× ¢ +×ķ× ŀ +â Į +ðŁ § +à¹ĩ à¸Ļ +ภį +ã İ +á µ +ĠاÙĦ س +×ķ× § +ห ล +ðŁ ĩ +â ı +ðŁ ¦ +Ġ×Ķ ×ŀ +ÙĪ Ø§ +Ġ× ª +ר ×IJ +à¸Ń à¸Ļ +ภ© +à¹Ī ว +×ķ× ¦ +í Ĺ +ã Ħ +ï ¨ +ï ¹ +â İ +ï ² +ðĿ ļ +ð IJ +à¸Ħ ว +ห à¸Ļ +Ġ× ¨ +ب ÙĬ +ร à¹Į +ر ا +Ø´ ر +×ķ× Ĺ +×ķ× ¤ +×ķ× © +×ķ× Ĵ +í Ŀ +â Ľ +à¸ķ ิ +à¹Ģ à¸ģ +ï ³ +ï ± +à¸Ķ à¹ī +ë ¹ +ï ¬ +á ¿ +ðŁ Ľ +ðĿ ĸ +à¹Īา à¸ĩ +ู à¹ī +Ġ×Ķ ×IJ +ĠاÙĦ ØŃ +פ ר +ÙĪ Ùħ +à¹Ģ ล +í ĸ +×Ļ× ¢ +ì Ī +í ĵ +ðŁ ħ +á ł +à¸Ħว าม +à¸Ī ะ +׳ ×Ķ +Ġ× § +à¸ Ł +à¹ī à¸ĩ +ห ม +ت Ùħ +׾ ×Ļ +ÙĬ د +à¹Ī à¸Ļ +׊ר +ש ר +à¹Ģ à¸Ĺ +×ŀ ר +ë ĸ +ع ÙĦ +×ŀ ×¢ +â ² +׾ ×Ķ +Ġ× ¤ +à¸Ń à¸ģ +س ÙĦ +×Ļ× ŀ +ÙĤ ÙĬ +í İ +ت ØŃ +×Ļ× ¡ +×Ļ× Ĺ +í Ľ +ï ° +â ½ +á ī +á Ĭ +á ¨ +Ùĩ ا +Ġ׾ ×Ķ +×ķ× IJ +Ùħ ا +à¹īà¸Ń à¸ĩ +ر ب +ĠاÙĦ ج +×ŀ ×ĵ +Ùħ ÙĦ +ت ر +à¹Ģ à¸Ķ +×§ ר +í ħ +ì ¼ +ê ¿ +ã Ī +á IJ +ðŁ Ĺ +ê ¦ +á ĭ +ðĿ Ķ +à¹Ģà¸Ľ à¹ĩà¸Ļ +à¹ĥ ห +ม า +ว à¹Īา +ม ี +ี à¹ī +à¹Ħม à¹Ī +ÙĨ ÙĬ +Ø ¤ +ร า +×ķ ×Ļ +ãĤĪ ãģĨ +ิ à¸Ķ +×Ļ× ¤ +׊׾ +ÙĤ د +à¹Ģ ส +×Ļ× ĺ +à¸ģ ล +ר ׼ +×ķ× Ľ +×Ļ× Ľ +ë Ī +ë ĥ +ðŁ ĸ +á ħ +â ¼ +ã ī +à¹Ħ à¸Ķà¹ī +ת ×Ļ +×Ļ× IJ +ĠاÙĦ Ø¥ +à¸ł า +ร ิ +ÙĤ Ø© +ØŃ د +ê » +ì ± +ת ×Ĺ +ì º +â ĭ +á Ħ +á ¾ +â µ +â ¾ +ĠÙĪ Ø§ÙĦ +׳ ×ķ +Ù Ģ +ÙĬ ا +à¸ģ à¹ĩ +×ŀ ×Ķ +ãģĦ ãĤĭ +ع د +ĠاÙĦ ÙĨ +Ġ×Ķ ×© +Ø ¦ +ั à¹īà¸ĩ +ร ัà¸ļ +ÙĪ ÙĤ +ãģ§ ãģį +à¹Ģ à¸ŀ +׼ ׾ +×ĺ ר +ั à¸Ķ +à¸Ń า +ì ¢ +à¸Ń à¸ļ +à¸ķ ร +à¹Ģ à¸Ĭ +ì Ķ +ãģĹ ãģ¾ +ë ģ +ë ķ +ðŁ Ļ +â Ĵ +á ¶ +à¹ģ ล +ÙĨ ا +à¹ĥห à¹ī +à¹Ħ à¸Ľ +× £ +ั ว +า à¸ĩ +×ĵ ר +×ij ׾ +פ ×Ļ +Ġ× ĵ +ĠاÙĦ Ùģ +à¹Ģ à¸Ĥ +ש ×Ķ +×IJ ר +ë ¬ +ãģ« ãģª +ÑĢ Ð¾ +ว ิ +Ùħ ر +×IJ ת +Ùĥ ر +س ب +ÙĨ ت +ãģĹ ãģĦ +ا ج +à¸Ń รà¹Į +Ùĥ ÙĦ +س Ùħ +ส ิ +×Ļ× ¦ +ë Ŀ +í ľ +ì ī +á Ĩ +Ùĩ Ùħ +à¸Ļ ีà¹ī +ãģĤ ãĤĭ +ãģĦ ãģ¦ +س ÙĬ +׾ ×IJ +د ر +ãģ ļ +ÙĪ Ø¬ +ĠاÙĦ Ø® +ص ر +í ı +à¹īา à¸ĩ +ุ à¸Ķ +×ķ× ĺ +×ij ×¢ +í Ĩ +à¸Ĭ า +ร ม +ש ×ŀ +×ŀ ס +ê ´ +ì ´ +ë ľ +ì ¿ +ì © +ë » +â ¤ +ðŁ Ĩ +á Į +á ķ +ذ ا +à¸Ĺ ำ +à¸ķ à¹Ī +ĠاÙĦ ÙĤ +ÙĦ Ùĥ +ู à¹Ī +à¸Ħ ุ +ÙĬ Ùħ +׳ ×Ļ×Ŀ +ืà¹Ī à¸Ń +ÙĪ Ø¹ +ãĤ ĩ +ا ÙĤ +Ġ×ij ×¢ +à¹Ģ ม +ج Ùħ +á» « +ãģĵãģ¨ ãģĮ +ب د +×ķ× Ķ +ש ׾ +Ùĩ ر +à¹Ģ à¸Ļ +ãģ ¹ +í ĭ +ì » +ì ½ +ë Ń +ì Į +í Ģ +ë Į +ë º +ã Ĭ +à¹ĥ à¸Ļ +Ġ× Ĵ +๠Ĩ +à¸Ī าà¸ģ +ว ย +à¹ĥ à¸Ĭ +à¸ĩ าà¸Ļ +ĠاÙĦ Ø´ +ا ØŃ +à¹īา à¸Ļ +ืà¹Ī à¸Ńà¸ĩ +×IJ ×Ļ +ب ÙĦ +ãģ¨ æĢĿ +׳ ס +ãģ¾ ãģĽ +Ùĥ ÙĨ +×¢ ר +ĠاÙĦ د +ש ת +í ŀ +Ùħ س +ص ÙĦ +×ķ׳ ×Ķ +ار Ø© +ÙĦ Ùħ +ส ม +Ø£ ÙĨ +ת ר +×IJ ×ŀ +ع ب +Ø® ت +ãĤ ĥ +ì ¡ +ì £ +ив а +ส ั +ึ à¸ģ +ì ¸ +ë Ĩ +алÑĮ н +ì ³ +ì į +ê ¼ +ê ½ +ì ı +ã Į +ã ı +ï © +ê ª +á İ +Ġ× ĸ +à¸ģ ัà¸Ļ +×Ļ ×ķ +à¸Ħ à¸Ļ +׳ ×ķת +à¸ľ ูà¹ī +à¹ĥ à¸Ī +ãģĦ ãģŁ +Ùģ Ø± +×ĺ ×Ļ +צ ×Ļ +ãĤĤ ãģ® +ĠاÙĦ ص +ãģ¾ãģĽ ãĤĵ +د Ø© +×ij ×Ļ +ĠاÙĦ ر +Ġ×ŀ ×IJ +ส ำ +à¹Ģ ห +ع ر +ãģª ãģı +à¸ģร ะ +×ij ×ĵ +à¹Ģ à¸Ī +×Ļ× ļ +×Ĺ ×Ļ +ÙĬ ع +ש ×ij +ÙĨ Ø© +ÙĪ Ø¶ +ÙĦ Ùģ +ÙĢ ÙĢ +פ ×¢ +í Ī +×ŀ ×§ +ภIJ +ØŃ Ø© +ا ص +Ñĭв а +à¸Ħ ม +ว ั +à¸Ľ ล +ì Ł +í ļ +ë ´ +ë ij +ë ī +ë ĩ +ì ¨ +ë ± +ë İ +â ¬ +á ¥ +á Ĺ +á Ľ +á į +Å © +à¸Ķ ี +ô i +Ġ× ¡ +׾ ×ķ +á»Ŀ i +à¸Ħุ à¸ĵ +â y +à¸Ļ า +×Ĺ ×ĵ +×ĵ ×Ļ +ห า +ج ÙĦ +à¹Ģ ว +ãĤĩ ãģĨ +Ùħ Ø© +ĠاÙĦ Ùĥ +Ġ×Ķ ×¢ +ج ر +×ĸ ר +ا Ø· +׼ ת +×ķ׳ ×Ļ×Ŀ +ØŃ Ùħ +ê ¶ +ر Ùĥ +Ġ׾ ×¢ +×ķ× ĸ +ส ร +צ ׾ +Ø ¢ +ا ست +à¹Ī ม +Ø® ر +צ ×¢ +×Ļר ×ķת +اد Ø© +Ø´ ار +×ŀ ×Ĺ +í Ĵ +à¹Ģร ีย +×Ĺ ×§ +Ø§Ø « +ร à¸ĩ +à¹Ģ à¸ķ +à¸Ī ำ +ภĿ +à¹Īา ย +à¸Ħ ล +ÙĤ ÙĪ +иÑĩеÑģ к +à¸ĵ à¹Į +ั ย +Ùħ ع +ë ¨ +ë ¿ +ë ® +ï ´ +ì ¥ +ì « +ë µ +á ¡ +â į +ð ĵ +â ° +à¸Ĥ à¸Ńà¸ĩ +Ù ĭ +à¸ģ ัà¸ļ +ãģ® ãģ§ +à¹ī ว +à¸Ńย à¹Īาà¸ĩ +ãģ Ń +á»ĩ t +à¸ķ à¹īà¸Ńà¸ĩ +×ŀ ×Ļ +à¹ģ à¸ļ +×Ĵ ר +ÙĪ Ùģ +ÙĤ ÙĦ +à¸łà¸² à¸ŀ +ר ×Ļ +ล า +ÙĬ س +Ġ× ¦ +ÙĬ Ùģ +Ġ× ĺ +à¸ľ ล +á ng +ร ว +Ġ×ŀ ש +×IJ ×ķת +×ĸ ×Ķ +ู à¸ģ +à¸Ļ ัà¸ģ +اÙĨ ÙĬ +د ا +ãģ ³ +׼ ף +ãĤī ãĤĮ +ãĤĮ ãģ° +ת ×§ +ú c +ÙĪ Ø² +×Ļר ×Ķ +Ġn gh +án h +Ġ×ķ ×IJ +á» ħ +ส ุà¸Ķ +ë į° +ا ض +اÙĦ ÙĬ +ب ار +ع Ùħ +à¸ļ า +ت ج +à¸ŀ ร +×ķר ×Ķ +ả ng +Ø® ÙĦ +ภī +ắ c +ש ×Ļ×Ŀ +í Ķ +Ùģ Ø³ +×Ļ× Ĵ +п ÑĢ +ĠاÙĦ Ø« +س Ø· +ร ูà¹ī +ีà¹Ī ย +à¸Ń à¸Ķ +ãģª ãĤĬ +×Ĵ ×ĵ +ãģĦ ãģ¾ãģĹãģŁ +ס ×§ +Ø® ص +la ÅŁ +ен но +ب ØŃ +ส à¸Ļ +ภ® +ר×IJ ש +Ùħ ÙĪ +دÙĬ د +ษ า +×ķ× ļ +ãĥ§ ãĥ³ +à¸ķ ุ +Ġê µ +ĠÑģв о +צ ×ij +à¸Ń ม +à¸Ľ ร +ت ع +×Ķ ×ª +اÙħ ÙĦ +×ŀ ׳ +ç ¶ļ +ภ¤ +í į +ë ĺ +ë ¤ +ì ij +â ´ +ã ĭ +Ġب اÙĦ +á»ģ u +ĠاÙĦ ÙĦ +à¸ķ ัว +ذ Ùĩ +ึ à¸ĩ +à¹ĥà¸Ĭ à¹ī +á»ĵ ng +à¸Ļ ั +ม าà¸ģ +ãĥ Ł +×ŀ ×ķ +à¸Ĺ ย +á»Ļ i +Ạ± +ả o +à¹Ĥ à¸Ķ +×IJ ׾ +ส าม +ÙĪ Ø¨ +à¸Ĺ ุ +ย ัà¸ĩ +×¢ ת +×ķ׳ ×ķת +à¸Ĥ ึ +à¸Ĥึ à¹īà¸Ļ +à¸ģ à¹Ī +Ạ« +á»ij c +ãģĹ ãĤĩãģĨ +á»ĭ ch +Ġ×IJ ×ķת +Ġש ×IJ +׼ ×ķ׾ +á»Ļ c +ع Ø© +à¸Ĺ ี +à¹Ģ à¸Ń +Ùĥ ت +ãģ » +Ạ» +ìĹ ħ +à¸Ń à¸Ńà¸ģ +اÙĨ ت +à¹Ħ ร +Ġ×IJ ×Ĺר +Ø· ر +ÙĨ د +ื à¹īà¸Ń +Ø· ÙĦ +×IJ ×Ķ +uy ên +í ĸī +×ij ×Ķ +à¸Ħ à¹Ī +à¸Ĭ à¹Īว +ãģĤãĤĬ ãģ¾ãģĻ +ÙĬ ب +×§ ׾ +ãĥ Ļ +Ä © +س ر +า ว +ãĤ ± +à¸ļ ริ +ר ×Ĵ +á»ĥ u +ØŃ ت +×ķ×ŀ ×Ļ +ب ÙĨ +êµ IJ +ÄŁ u +ãģª ãĤĵ +×ij ×§ +Ġפ ר +ắ n +ØŃ ÙĦ +×ij ×Ĺ +ấ u +×ij ×ķ×ĵ +ãĥ ¯ +Ġ׾ ×§ +ั à¸į +à¸ŀ ิ +×Ĺ ×Ķ +×ĸ ׼ +ãĥ¼ãĥ ł +ÑĤ елÑĮ +×ŀ ×Ļ×ĵ +ÙĬ Ø® +Ạ³ +ت ص +à¸ĺ ิ +è¾ ¼ +ì ĵ +Ùĥ Ø© +ÙĤ ب +à¸Ħ à¹Į +à¹īา ย +à¸ĵ ะ +า ะ +ë Ĵ +ê ¾ +ë · +ì ĩ +ê º +ì ģ +ë Ģ +ì ¾ +ë ½ +ë ļ +ì Ń +ì İ +á ij +ë Ĺ +ê Ĵ +à ¡ +à ¬ +ðIJ Į +ã ĩ +ðĿ Ħ +Ġ׾ ×IJ +ãģ¨ ãģĦãģĨ +Ġn hi +×Ļ ×ķת +Ġש ×Ķ +à¹ģล à¹īว +Æ°á»Ľ c +à¸Ķà¹ī วย +à¸Ĺ าà¸ĩ +׳ ת +פ ת +à¹ģ à¸ķà¹Ī +ư ng +à¸Ńย ูà¹Ī +à¹ī ำ +Ġ×IJ ׾ +Ùĥ Ùħ +ấ p +ล à¸ĩ +ãģŁ ãĤģ +×Ĵ ׾ +ห ร +ĠÑĢ Ðµ +à¹Ģà¸Ĥ à¹īา +ÙĤ ر +Ġ×Ķ ×¡ +ÙĪ ÙĬ +สาม าร +สามาร à¸ĸ +Äĥ n +à¸Ń ี +פ ×ķ +×Ļ׳ ×ķ +ว ัà¸Ļ +ặ c +íķ Ļ +×ŀ ת +ê u +Ạ¹ +Ùģ ÙĬ +×ŀ צ +à¸Ħ า +ãģĿ ãģĨ +ãĢ ħ +ا ز +ا Ùĩ +ר ×Ļ×Ŀ +ấ n +ห าร +ạ t +ÙĨ Ùĩ +à¹Ģ à¸Ħร +ج Ùĩ +׼ ×Ļ +ắ t +à¸Ħ à¹īา +ر Ø© +ãĥ ı +Ùĥ ÙĪÙĨ +ứ ng +Ġìļ ° +ย à¹Į +à¹Īว à¸Ļ +à¸ģ ำ +Ø« ر +Ñģ и +ĠاÙĦ Ø· +Ġ×Ķ ×¦ +ĠØ · +ĠاÙĦ ÙĪ +ê¹ Į +ØŃ ÙĬ +ار ات +à¹Ģ à¸ĭ +ب ا +г ÑĢ +ร ี +ืà¸Ń à¸Ļ +ع ت +ÙĤ اÙĦ +د Ùħ +Ø ¡ +Ġ×ŀ ×§ +×ĵ ×Ļ×Ŀ +×¢ ׾ +ãģ Ĵ +ëĭ ĺ +×¢ ×Ķ +Ġìĸ ´ +Ñģ ÑĮ +ÙĤ Ø· +ãĥ Ľ +èĢĥ ãģĪ +à¹ģ à¸Ļ +ÙĪ Ø§Øª +â u +ĠìĤ¬ ëŀ +ห ว +ĠاÙĦØ£ Ùħ +Ġ×Ķ ×ŀש +ب ÙĪ +à¸Ĭ à¸Ļ +ãĤĵ ãģ§ãģĻ +ว à¸Ļ +à¸ģร รม +×ŀ ×ķ×ĵ +Ùĥ اÙĨ +×ķ× £ +ол ог +ت ÙĨ +à¸ķ à¹Į +ê² ĥ +ר ×ĺ +ừ ng +×ķ×ij ×Ķ +Ùħ ØŃ +ĠÐ § +פ ×Ĵ +ส à¸ĸ +ãģĭ ãĤĬ +ını z +à¹Ģ ย +ãĥ¼ ãĥ³ +ãģĬ ãĤĬ +פ ש +ิ à¸ķ +Ø· ÙĨ +×Ļת ×Ļ +×IJ ׳ +ç ek +ì ª +×ŀ ×ij +ศ า +ãĤ¹ ãĤ¿ +à¸ļ ุ +×ĵ ×ijר +ãģĦ ãģı +ส ะ +à¹Ģ หล +ิ à¸ĩ +à¸ŀ ัà¸Ļ +ãģĦ ãģŁãģł +ãĤĤ ãĤī +à¹ī ม +ãģĵãģ¨ãģĮ ãģ§ãģį +าร à¹Į +ุ à¸ĩ +í ij +ì ¯ +ë ¼ +í Ĥ +ì · +ê ¡ +á ı +á Ĵ +ðĿ ľ +á © +ðŁ Ħ +ðIJ ¤ +Ġש ׾ +Ġ×ŀ ×Ķ +à¹ģล ะ +Ġ׼ ׾ +Ạ½ +á»Ļ ng +ذ ÙĬ +л е +× ¥ +ãģª ãģ© +ĠÙĪ Ø£ +หà¸Ļ à¹īา +ãģ¾ ãģ§ +à¸ķà¹Ī à¸Ń +à¸Ĺ ัà¹īà¸ĩ +ãģł ãģij +à¹ģà¸ļ à¸ļ +à¹Ģร า +פ ׾ +ãģŁ ãģĦ +à¹Ģล ย +ãģ£ãģ¦ ãģĦãĤĭ +ế p +ึ à¹Īà¸ĩ +ê ´Ģ +ê³ Ħ +׼ ×ķ +à¹Ģร ืà¹Īà¸Ńà¸ĩ +×§ ×Ļ +êµ Ń +פ ס +ت ÙĬ +ãĥ Ħ +Ġ×Ķ ×Ĺ +г и +ר×IJ ׾ +×ŀ ׾ +ĠØ£ ÙĬ +Ġع ÙĦÙĬ +ãģĭ ãģ£ãģŁ +ש ×Ļ +д Ñĥ +×ŀ ף +׳ ×ĺ +׳ ×Ļת +mi ÅŁ +׼ ×Ŀ +Ġ×ij ר +Ġ׾ ×ij +ĠÐ Ľ +ç e +×ķ׳ ×Ļ +ãĤĪãģĨ ãģ« +פ ×ķר +ãĥ į +Ùĥ ÙĬ +×Ĺ ×ª +Ùģ ÙĦ +Ġ×Ķ ×§ +Ġ×Ķ ×ij +Ġ×ŀ ס +à¹Īา à¸Ļ +п еÑĢ +à¹Īา ว +Ġ×ij ×IJ +ĠÙĪ Ùĩ +à¸Ļ ำ +Ġ×ij ש +׳ ×§ +ãģ© ãģĨ +ש ×ķת +×ĵ ×Ķ +à¹Ģ à¸ļ +ÙĨ س +Ġìļ° ë¦¬ +ส à¹Īวà¸Ļ +ล ัà¸ĩ +ج ز +Ġ×Ĺ ×Ļ +Ùĥ ثر +ล ะ +Ùĩ د +ĠÙĪ Ø¨ +اÙĦ Ùħ +à¹ģ ม +Æ¡ i +Ġ×ij ×Ĺ +ữ a +à¹Ģà¸Ĺ ศ +à¸ķ ัà¹īà¸ĩ +ог да +׾ ×§ +د د +สร à¹īาà¸ĩ +à¸Ĭ ี +Ùģ Ø¶ +à¹ģ ห +uy á»ĩn +ร ัà¸ģ +á»ĩ m +ส า +פ ×§ +ีย à¸ĩ +à¸ķ à¹Īาà¸ĩ +à¸Ħร ัà¹īà¸ĩ +ØŃ ÙĤ +à¹Ģ à¸Ńà¸ĩ +ائ ÙĬ +×ĺ ×¢ +اÙĦ Ø© +ิ à¹Īม +ãĤ ½ +د Ùī +Ġר ×IJ +ãģ£ ãģ¨ +ãĥĥ ãĥĹ +ÙĬر Ø© +ê± ´ +×ŀ ×IJ +×ķ ×ķ +ب ع +ãģ ² +ร าย +×ĵ ×Ŀ +ت Ùģ +à¸ķ à¸ģ +ạ ng +ãĤĴ è¦ĭ +à¸Ĭ ั +Æ°á» Ł +Æ°á»Ł ng +ج ب +×ķ×ŀ ר +ĠìĤ¬ëŀ Į +ó ng +ร ั +Ġ×Ķ ×ĸ +ר צ +Ġ×Ĺ ×ĵ +ذ ÙĦÙĥ +×ķר ×Ļ +ãģ¡ ãĤĥ +Ùģ Ø¹ +Ġ׾ צ +á i +à¹ĩ à¸ļ +ãģ İ +à¸ģ ิ +ạ c +ë© ° +ãģª ãĤĭ +×ķ׾ ×Ŀ +à¹ģ à¸Ĺ +×ķ× ¥ +м еÑĤ +ü ÅŁ +ÑĢ Ñı +ภĴ +ÑģÑĤ оÑı +ع ÙĪØ¯ +Ùħ ار +Ø· Ø© +à¸ŀ ื +к ÑĢ +à¹ģ à¸ģ +à¹Ĥ รà¸ĩ +×ij ×Ļ×ĺ +ê² ł +×ķ׾ ×Ķ +ØŃ ر +ืà¹Ī à¸Ńà¸Ļ +×ķ×ij ר +׊ש +ãĥķãĤ ¡ +×ŀ ×ĺ +ú t +Ġd ön +ắ ng +ëł ĩ +ẳ ng +ว à¸ģ +ص د +Ø® Ø· +à¸Ń ั +ãĤı ãĤĮ +سÙĦ اÙħ +à¹Ģร à¹ĩ +×Ļש ×Ļ +ج اÙĦ +ãģij ãĤĭ +à¸Ĭา à¸ķิ +ÙĪØ§ ÙĤ +à¹Ĥ à¸Ļ +ãģ¦ ãģĹãģ¾ +اع Ø© +ãĤŃ ãĥ£ +à¸į า +ÙĦا ÙĤ +ิ à¸ģ +ĠÑģ ов +ÑĢаРº +×Ļ׳ ×Ļ +ü ÄŁ +Ã¼ÄŁ ü +×§ ×ij +à¹Ī à¸Ńà¸ĩ +Ġger çek +à¸Ĺ ั +ов аниÑı +×ŀ ׼ +س Ø© +×Ļ× £ +le ÅŁ +Ùħ ؤ +ĠìĿ ĺ +à¸IJ าà¸Ļ +ĠÑģ об +Ġêµ Ń +×¢ צ +з в +ส à¸ĩ +ز ÙĦ +ãģı ãĤĮ +и ÑĢÑĥ +ت Ø£ +п олн +ìĺ Ģ +ÙĨ Ø´ +׼ ×IJ +Ùħ Ø´ +à¸Ķ à¹Į +ÙĪ ÙĬÙĦ +à¹ģ à¸Ĥ +ãģ£ãģ¦ ãģĹãģ¾ +но ÑģÑĤ +в л +Ùħ ÙĤ +را ج +å¤ ī +ë Ľ +â ¸ +ì IJ +à » +á ļ +â » +ê Ļ +â § +ð Ĵ +ðĿ ĩ +Ġ×IJ ת +ĠÙĦ ÙĦ +ĠØ£ ÙĨ +Ġ×ķ ×Ķ +ãģ« ãģ¯ +Ġ×Ļ ×© +ت Ùĩ +ÃŃ nh +ÙĬ ات +Ġ×ij ×ŀ +à¸Ļั à¹īà¸Ļ +à¸Ļ à¹īำ +Ãł o +à¸ķ าม +ãģ® ãģ¯ +d ır +Ġn ghi +ặ t +×ŀ ×Ļ×Ŀ +ãģ¦ ãģĦãĤĭ +Ġ×ij ת +หร ืà¸Ń +Ġس ÙĬ +ãģª ãĤī +à¹Ĥà¸Ķ ย +ı yor +à¸Ńี à¸ģ +á»ĩ nh +Ñĭ м +à¸Ĺุ à¸ģ +Ġ׾ ×Ĺ +Ġ×Ķ ×¨ +Ġ×Ķ ×Ļ +à¸ŀ ระ +à¹Ģว ลา +ĠØ º +ẫ n +m Ä±ÅŁ +׼ ×Ķ +á»ij n +ãģ§ ãģĹãĤĩãģĨ +ãĥ ¢ +à¸Ľ ี +ס ×Ļ +ãģĵ ãĤį +Ġ׾ פ +ร à¸ĸ +ê¸ Ī +à¸ģ วà¹Īา +ë ¬´ +á»į ng +ãĤĵ ãģ§ +ãĤĪãģĨ ãģª +á»ĵ i +ãĤ ¬ +ส à¹Īà¸ĩ +×Ļ׳ ×Ķ +à¸ĸ ูà¸ģ +à¸Ī ัà¸Ķ +Ġ×Ķ ×Ĵ +ãĥ ľ +×ŀ ×ķת +ÙĪ Ùĥ +ëĭ ¨ +ĠØ « +ãģ® ãģĮ +à¹Ģห à¹ĩà¸Ļ +ع ا +à¸Ļ ิ +Å ŀ +à¸Ń ะ +ãģĪ ãĤĭ +Ø« ÙĦ +ØŃÙħ د +à¹Ģà¸ģ ิà¸Ķ +פ שר +פ ×Ķ +ม ิ +ئ ÙĬس +à¸Ĺำ à¹ĥหà¹ī +×¢ ×ĵ +ìĭ ¤ +à¸Ĭà¹Īว ย +ĠاÙĦÙħ ÙĨ +ز ÙĬ +ع ÙĬ +Ġ׼ ×IJ +ạ nh +á» ¹ +ãĤĵ ãģª +ส ู +צ ר +Æ°á»Ľ ng +×ķ ×ķ×Ķ +à¹Ĥ ล +ĠاÙĦ Ùĩ +ว า +หล าย +Ñī е +à¸Ĥ à¹īà¸Ń +à¹īà¸Ń ย +ب Ø· +ка Ñı +ĠØ ¢ +Ġи Ñģ +ĠاÙĦ غ +à¸ģ า +à¸Ļ à¹Īา +ÙĬ ÙĪ +×ij ×ķר +á»ħ n +ว à¸ĩ +×Ļ× ĸ +ì² Ń +н им +ëŁ ° +×Ĵ ×ķר +ص ØŃ +ÙĦ ÙĪ +×Ĺ ×ķת +ส ุ +رÙĬ ÙĤ +ס ×ĺ +Ġ×ŀ ×¢ +ãĥĨ ãĤ£ +à¸Ħ ิà¸Ķ +ãĤį ãģĨ +à¹Ħ ล +à¸Ļ à¹Į +á»ı i +ÑģÑĤÑĢ Ð¾ +ส à¸Ķ +ส าร +ÙĪÙĦ Ø© +ầ m +ร à¹Īว +รà¹Īว ม +ร ุ +ĠاÙĦس ÙĬ +ìĺ ģ +Ġ×ŀ ×ij +פ ×ĺ +à¸ķิ à¸Ķ +×ĺ ×Ļ×Ŀ +Ġë ¬´ +ÙĤد Ùħ +Ġdü ÅŁ +ائ ÙĦ +м Ñĭ +ØŃ س +ÙĪ Øµ +×Ļ×§ ×Ķ +ãģ§ãģ¯ ãģªãģĦ +à¹Ģ หม +оÑĢ ÑĤ +í Ĩµ +ãģ IJ +к ÑĢа +ีย ว +ع ار +ئ Ø© +íĥ Ģ +ãģ«ãģª ãĤĬ +ج Ø© +ÙĪÙĤ ع +ÑĮ Ñı +×ķצ ×Ķ +ש ×Ŀ +ب ÙĤ +Ġ×Ļ ×Ķ +ÙĬ Ø· +ım ız +д еÑĢж +×Ļש ר×IJ׾ +غ ÙĬر +ร à¸Ńà¸ĩ +à¹Ģรีย à¸Ļ +Ġ×Ķ ×ĺ +หม าย +Ùħ Ùĩ +اÙģ Ø© +Ġо ÑĢг +ÙĪ Ùī +ãĥ© ãĤ¤ +×ŀ ׳×Ķ +ĠÄij o +Ġг оÑĢ +اÙħ Ø© +æ¥ ½ +Ø« ÙĬر +à¸ģิ à¸Ī +á»ĵ n +ÙĨ ب +ÑĢÑĥ д +ìĹ Ī +Ġ×Ĺ ×ijר +ÑĢаР¶ +ạ ch +ت ÙĪ +à¹Ĥ ม +×ij ×Ļ×ij +Ġí Ĩµ +aca ģı +جÙĦ س +à¹Ģà¸Ľ ล +ว à¸Ķ +à¸Ń ล +ãģŁ ãĤĬ +à¸Ľ ัà¸į +Ġìķ Į +عر Ùģ +à¹Ħ à¸Ł +Ø£ Ø® +å¤ļ ãģĦ +à¸Ķ ัà¸ĩ +Ø´ Ùģ +ãģ£ãģ¦ ãģĦãģ¾ãģĻ +׼ ×ł×¡ +ÑĨ е +еÑģ п +Ùħ اÙħ +à¸ŀื à¹īà¸Ļ +иÑĩеÑģ ки +Ø® د +Ùĥ ÙĪÙħ +Ġ×Ķ ×¨×IJש +ت اب +é£Ł ãģ¹ +ื à¸Ļ +оÑĢ Ð¾ +Ġb öl +×ķ×Ĺ ×ĵ +دÙĬ ر +ắ m +د ع +ãģķ ãģĽ +à¸ĺ ร +à¸ĺร รม +ãģĭ ãĤĤ +å¤ļ ãģı +r ä +س ع +×Ļ׾ ×Ķ +ض ر +ĠاÙĦ شر +×ĸ ×ķר +×¢ ×ijר +ạ m +алÑĮ но +ر ÙĨ +اÙħ ج +׼ ×ļ +d ıģ +д ен +ض ا +ÙĦÙĬ Ùħ +Ġê·¸ 룬 +تÙħ اع +ار ÙĬØ® +à¹Ĥ à¸ķ +ĠÑģ ÑĢед +Ġ׳ ×ķס +ÙĤ بÙĦ +оÑĤ ов +le ÅŁtir +Ġм еÑģÑĤ +سÙĦ Ùħ +Ġ×¢ צ +ĠاÙĦس ÙĦ +еÑĤ ÑĮ +اب Ø© +н ак +สà¸ĸ าà¸Ļ +Ġ×ij ׳ +à¸ļ ัà¸Ļ +׼ ׳ +Ġö ÄŁ +ãģ¨ è¨Ģ +uy ến +di ÄŁ +áºŃ u +ÑĢ Ð°Ñģ +ãĤ· ãĥ§ãĥ³ +n ız +×ķ×ĵ ×Ķ +ت س +Ùħ اÙĦ +à¹Ģห à¸ķุ +ย ว +à¸ŀ ัà¸ģ +ãģĦ ãģªãģĦ +Ġк аÑĩ +ล à¹Į +ר׼ ת +ÅŁt ur +×ŀ ×ķס +ãģ ¥ +б ол +عÙħ اÙĦ +×ķר ת +ÑĨи он +ศ ึà¸ģ +ภı +ÑĢ ÐµÐ½ +اس ÙĬ +ائ ر +à¹Ĥ à¸Ľà¸£ +Ġse ç +غ ÙĬ +Ñį ÑĤ +ен н +ãģª ãģ® +×Ļש ×Ķ +×Ļפ ×ķר +ãģŁãĤģ ãģ« +ز Ø© +Ġç oc +ãĤ¯ ãĥª +ÑĪ ÐµÐ½ +ãĤı ãģij +رÙĬ د +ĠÑĢ Ð°ÑģÑģ +Ùĥ ات +ส à¸Ńà¸ļ +ce ÄŁi +ãĤ¿ ãĤ¤ +à¸ļ ร +ĠاÙĦ بر +׳ ×ķ×¢ +r ün +را ض +ศา ส +à¸ķ รà¹Į +ãģį ãģŁ +×ķ׾ ×ĵ +еÑĢ Ð¸ +íĹ ĺ +ắ p +ت عÙĦ +Ùĥ د +иÑĤелÑĮ но +Ø· Ùģ +Ġав ÑĤом +Ġ×ŀ צ +ÑĪи Ñħ +ات Ùģ +ĠÑħ оÑĤ +Ùİ Ø§ +ãģı ãĤĭ +×Ķ ×¤ +à¹Ĥ à¸Ĺ +à¹ģ à¸ŀ +à¹Ī à¸Ńย +ĠاÙĦÙħ Ø´ +à¸ģาร à¸ĵà¹Į +ани з +×Ķ ×ľ +ظ Ùħ +ย ุ +li ÄŁ +à¹Ħ à¸Ĥ +à¸ĸ ืà¸Ń +ö z +ãģij ãģ¦ +à¹Ģ à¸ľ +ุ ม +ãĥĹ ãĥ¬ +Ġ×Ķ×IJ ×Ĺר +خت ÙĦÙģ +à¸ İ +ÙĦا ØŃ +Ġdü zen +צ ×Ķ +س اء +×ķר ×ļ +×ķ×ĵ ×Ļ +ÑĢа ÑĦ +ÅŁt ır +ãģ« åħ¥ +ãģĪ ãģ° +ص ÙĪÙĦ +ĠÐľ оÑģ +ا Ùĩر +ãģ£ ãģ +ĠлÑİ Ð± +×Ļ×¢ ×Ķ +Ġ×Ķ×ŀ ×§ +สิ à¸Ĺ +สิà¸Ĺ à¸ĺิ +×Ļ׳ ×Ŀ +ÙĦا Ùģ +à¸ŀัà¸Ļ à¸ĺ +×ķ×IJ ×Ķ +ม ั +à¸Ĥ à¸ĵะ +д оÑĢ +ãģ¨ ãģª +à¸ģระ à¸Ĺ +ac ı +×ķ׾ ×ķ×Ĵ +Ñĥ ÑĪ +ãĥ¥ ãĥ¼ +ãĥ ¦ +Ùħ ست +Ġa ÅŁ +ש ×§ +פ ת×Ĺ +าย à¸Ļ +í ĩ +ë ¢ +ï · +í ī +ì µ +ì ¬ +ðĿ Ľ +ì Ĵ +ë Ļ +ê § +á ĸ +â ¨ +â ± +á ĺ +ð ĸ +à ł +á Ķ +ðIJ Ń +ữ ng +Å© ng +Ġ×Ķ ×ª +ĠاÙĦ ا +Ġ×ŀ ת +à¸ĸ ึà¸ĩ +ò n +á»ĭ nh +нÑĭ м +Ġc ả +à¸Ķ ู +Ġ à¹ģà¸ķà¹Ī +Ġ×ij ×Ķ +ó i +ãģ¨ ãģĹãģ¦ +ú ng +ĠØ ° +Ġ×Ķ ×ł +Ġب ÙĨ +ÙĦ اÙĦ +à¹Ħ à¸Ĺย +á»ĩ p +t ı +ม ัà¸Ļ +ằ ng +á»ij t +к ом +à¸ĭ ึà¹Īà¸ĩ +à¸Ħร ัà¸ļ +à¸ļ à¹īาà¸Ļ +ĠاÙĦ ÙĬ +l ü +ÙĪ Ø³ +ãģł ãģ£ãģŁ +à¹Ģ à¸ĩ +Ġê³ µ +н Ñĥ +ãĤĪ ãĤĬ +м Ñĥ +à¹Ģà¸Ĥ า +ãĤ Ģ +ни е +ãģ«ãģª ãĤĭ +áºŃ y +ĠÙĪ Ø§ +ëł ¤ +ש ×ķ +á p +×ĵ ×ķ +ãģ§ ãģĹãģŁ +ع ض +Ñģк ой +æĦŁ ãģĺ +ÑİÑĤ ÑģÑı +Ġ×Ļ ×Ľ×ķ׾ +ãĤĵ ãģł +в и +à¹Ģล à¹Īà¸Ļ +ìĿ´ ëĭ¤ +ĠÙĦ Ùĩ +à¸Ħ ืà¸Ń +ت Ùĥ +Ùħ ÙĥÙĨ +a ģı +׳ ×ĵ +ë¯ ¼ +à¹Ħ ว +สำ ห +สำห รัà¸ļ +Ñģл ед +t ır +ĠÙĦ ÙĬ +ĠاÙĦع ÙħÙĦ +×ij ×ķת +×ij ×Ļ×Ŀ +à¸Ħ ำ +à¹Ģà¸Ħร ืà¹Īà¸Ńà¸ĩ +lı ģı +ืà¸Ń à¸ĩ +ج د +íŀ Ī +ìĭ ¬ +×¢ ×ķת +ส ิà¸Ļ +Ñĩ и +ر ض +à¹Ģà¸Ľ ิà¸Ķ +à¸Ħ à¹Īา +ìĦ ł +ÙĪØ± Ø© +×§ ×ĺ +ìľ ł +ع ÙħÙĦ +×IJ ×Ļ×Ŀ +׾ ×Ļ×Ŀ +à¹ĥห à¸į +à¹ĥหà¸į à¹Ī +ừ a +á»į i +ãģ ¶ +ÃŃ ch +ãĥĩ ãĤ£ +×ķר ×Ļ×Ŀ +Ñģ о +ìķ ½ +ов а +Ñĩ аÑģÑĤ +à¹Ģà¸Ī à¹īา +п ÑĢо +Ġ×ŀ ×Ĺ +ãĥ İ +×ķ×Ļ ×ķת +Ġд е +ë§ Ī +ì§ ģ +×Ļפ ×Ķ +ĠاÙĦع اÙĦÙħ +ë¥ ´ +ר×IJ ×Ķ +uy á»ĥn +×¢ ×Ļ +ม ืà¸Ń +Ø¥ ÙĨ +ร ู +ĠØ ² +×Ļ ×ķ×Ŀ +à¸ķ à¹īà¸Ļ +ãģ¦ ãģĦãģ¾ãģĻ +Ùħ اÙĨ +ĠÐ ¥ +à¸Ľà¸£à¸° à¹Ģà¸Ĺศ +á» ³ +׾ ×ij +à¹Ģà¸Ķ à¹ĩ +ãģŁ ãģ¡ +à¸Ĺี ม +à¸Ļ ะ +ìĹ ° +Ġìł Ģ +ÙĦ Ùĩ +ợ i +ĠاÙĦ ز +د ار +ãĤ³ ãĥ³ +м ин +à¹ģห à¹Īà¸ĩ +à¸Ķ ัà¸ļ +׼ ר +ж а +íĸ Ī +×ŀ ×ĸ +ợ i +à¸Ķ า +Ġع بد +à¹ģ ร +×IJת ר +×¢ ׳×Ļ +à¹Ģ à¸Ħ +×ķצ ר +ì§Ģ ë§Į +ائ Ùħ +Ø£ س +uy á»ģn +Ġ×IJ ׳ +׊׳×ķ +×ĸ ×Ļ +ร à¹īาà¸Ļ +ĠÐł оÑģ +ĠÐłÐ¾Ñģ Ñģ +رب ÙĬØ© +t ür +ãĤĭ ãģĵãģ¨ +ظ ر +б Ñĭ +à¸Ĺีà¹Ī สุà¸Ķ +Ġצ ר +èĩª åĪĨ +л аÑģ +ĠÑı в +ĠÑıв лÑı +à¸ŀร à¹īà¸Ńม +à¸Ńา à¸Ī +à¸ļริ à¸ģาร +Ġç ı +ëį ĺ +ĠاÙĦÙħ ست +ت Ø´ +ש ×ķ×ij +ãĤ ´ +Ġyap ıl +ĠاÙĦ ذ +ุ à¹Īม +à¸ĸ à¹īา +ìĦ ¤ +ì° ¨ +в аÑĢ +à¹Ģà¸ŀ ิà¹Īม +Æ°á»Ľ i +Ùĥ س +à¸Ńย าà¸ģ +ãģ¦ ãĤĤ +Ġг од +ÙĬ ار +à¸ķ à¸Ńà¸Ļ +Ġиг ÑĢ +à¹Ħà¸Ķà¹ī รัà¸ļ +ĠاÙĦÙħ ر +ÙĤ ت +Ġë ĺ +Ġëĺ IJ +ẩ n +ãģĻãĤĭ ãģĵãģ¨ +×Ĵ ×Ŀ +Ġ×ij ×ij +ت د +ÙĪ Ø§Ø± +ãĤ ® +п ол +Ġм ог +تر Ùĥ +ÙĪ Ø« +Ġç ık +ا Ø© +à¹Ģà¸Ķ ียว +มี à¸Ħวาม +Ġ×ŀ ×Ĵ +ص Ùģ +ĠТ ак +Ġ׼ ת +×Ļ×ĵ ×Ļ +ов оÑĢ +ầ y +สิ à¹Īà¸ĩ +ب ت +ür ü +ÙĨ ج +หล ัà¸ģ +×Ļ×Ķ ×Ŀ +ÙĤ ص +з Ñĭ +×Ľ×ª ×ij +ư u +m ız +ĠìĦ ¸ +л ог +Ùħ ÙĬÙĦ +ÙĬ ج +íĴ Ī +à¸ŀ à¸ļ +ห ัว +з на +ר ×§ +à¹Ĥ ร +Ġ×ij ס +ĠBaÅŁ kan +ĠëĶ ° +à¸Ń ัà¸Ļ +ีà¹Īย ว +н еÑģ +à¹Ģà¸Ķ ิà¸Ļ +ÙĬ اÙĨ +×ķ׾ ×Ļ +ا خت +צ ×ķת +ãģĵ ãģĵ +ĠاÙĦ اÙĨ +ĠпÑĢо ÑĨ +ãģ¾ ãģł +׼ ס +ĠاÙĦ Ø¢ +ÙĬ ز +ĠاÙĦد ÙĪÙĦ +Ġíķĺ ëĤĺ +ض ع +ê» ĺ +ÅĽ wi +ย ิ +ãģ¡ãĤĥ ãĤĵ +ĠÙħ Ø´ +à¸ĺ ี +ãģ¨ ãģį +׳×Ļ ×ķת +Ġë ¯ +Ġë¯ ¸ +Ġs ı +ëĭĪ ê¹Į +Ġп л +غ ÙĦ +à¹ģ รà¸ĩ +ب ÙĬر +ãģĤãĤĬ ãģ¾ãģĽãĤĵ +ê· ¼ +Ġy üz +ĠdeÄŁ er +åł´ åIJĪ +á» ¡ +м аÑĤ +รา à¸Ĭ +ÙĪØ± ÙĬ +ж ен +ãģ¾ ãĤĬ +ãģ® ä¸Ń +×Ļ×ĵ ×¢ +à¸Ń ุ +à¸ļ à¸Ńล +à¸Ľà¸±à¸į หา +ز Ùħ +ÄŁ a +à¸Ń ืà¹Ī +à¸Ńืà¹Ī à¸Ļ +п л +Ġне обÑħодим +׼ ×ij +à¹Ģ ศ +קר ×Ķ +ì² ĺ +ëł ¨ +×ŀ×§ ×ķ×Ŀ +jÄħ c +Ùĩ ÙĦ +Ġ×¢ ×ij×ķ×ĵ +à¹Ħม à¹ī +à¸ģล ัà¸ļ +×ķ׼ ׾ +×§ ×ĵ +اÙĦ ÙĬØ© +ر Ùĩ +ãģij ãĤĮãģ° +ĠÙĨ Ù쨳 +ãĤ¢ ãĥ« +ìĹ Īëĭ¤ +×§ ×ķר +н еÑĢ +ب اب +ãĤ ¶ +سب ب +ÙĦ ÙĬÙĦ +ص ÙĨ +ص در +ế m +à¸Ĭà¹Īว à¸ĩ +ØŃ ÙĨ +Ġ×ij ×Ĵ +×ŀ ×ķ×¢ +׾ ×Ĺ +大 ãģį +ت ب +н еÑĤ +×Ļ×ij ×Ķ +б л +ãĥĹ ãĥª +اص Ø© +ãģ¤ ãģij +×Ļ×ŀ ×ķש +ãģĮ ãģĤ +ëĭ ´ +ãģĭãĤĤ ãģĹ +ãģĭãĤĤãģĹ ãĤĮ +ãģ¡ ãĤī +×ij ×ĺ +Ġba ÄŁ +×Ļ×Ĺ ×¡ +×ij ×ķ×¢ +ล ี +פע ×Ļ׾ +им и +g ÅĤ +Ġим е +خد اÙħ +×IJ ×Ļר +Ġy apt +ãģ¨ ãģĦ +à¸ĩ à¹Īาย +׾×Ļ ×ķ +ØŃد Ø« +را ÙĤ +ĠÄIJ i +اد ر +ãģĵãģ¨ ãĤĤ +×ij ×Ļר +Ġв з +ض اÙģ +ת ×ķ׼ +ÑĢ Ð¾Ð¼ +ر ات +à¹Ģà¸Ĺ à¹Īา +ãģĺ ãĤĥ +ãģĿ ãģĵ +اج تÙħاع +à¹īà¸Ń à¸Ļ +ÙĤ Ùħ +ë³ ¸ +Ä ŀ +ש ×Ļ×ķ +×ij ׳×Ļ +ìľĦ ìĽIJ +à¹ģ à¸Ī +×Ĺ ×ķר +دÙĬ ÙĨØ© +ت Ø· +ằ m +ò a +ย à¸Ńà¸Ķ +Ġëĭ ¹ +สุ à¸Ĥ +×ĵר ×ļ +د ÙĨ +س ÙĬÙĨ +ÙĪÙĤ Ùģ +ÑĨ Ñĭ +г оÑĤов +еж дÑĥ +à¸ŀ วà¸ģ +اÙĤ تص +اÙĤتص اد +cz ÄĻ +ni ÄĻ +ÑĢ ÐµÐ± +ØŃ ÙĪ +à¸Ĺ à¹Į +ãĤĪ ãģŃ +д ж +à¸ģล à¹Īาว +دÙĬ Ø« +ãĤ³ ãĥŁ +ÙĤ ÙĪÙħ +Ġت ØŃ +à¹Ģ à¸ķิ +اÙģ Ø¸ +à¸Ī ุ +رÙĬ اض +×ŀש ×ļ +à¹Ĥ ย +еÑĢ Ðµ +ãģ¿ ãģŁãģĦ +ìĿ´ ëĿ¼ +ĠاÙĦÙħ ÙĪ +ĠÑģÑĤ о +à¹Ģรà¹ĩ ว +Ġд еÑĤ +ĠÑģ дел +à¹Ģà¸Ĭ ืà¹Īà¸Ń +פ ׳×Ļ +ÙĪØ¶ ÙĪØ¹ +×ij ס +à¹ģ à¸Ķ +ó c +ริ ม +ÑĢаР´ +ìĪ ł +ãĥ¼ãĤ º +ãģ« ãģĬ +и но +פ ×Ļ׾ +à¸Ĭั à¹Īà¸Ļ +×Ĺ×ĵ ש +à¹Ģà¸Ļ ืà¹Īà¸Ńà¸ĩ +׳ ×Ļס +غ رب +ãĤ¸ ãĥ£ +ส ัà¸ĩ +à¹Ģ à¸Ĺีà¹Ī +à¹Ģà¸Ĺีà¹Ī ยว +ëŁ ¼ +à¹ģ à¸Ł +ãĥ¼ãĤ · +ãĥ¼ãĤ· ãĥ§ãĥ³ +Ġвоз мож +جÙħ ÙĪØ¹ +×ijר ×Ļ×Ŀ +ãĥĪ ãĥ© +ĠкаÑĩ еÑģÑĤв +Ø· ÙĬ +ÑĤ Ñı +צ ×ķ×¢ +ÄŁ ını +ع ÙĦÙī +ا ذ +ÙĪØ§ÙĤ ع +Ùħ ÙĪØ§ +ائ ÙĬÙĦ +к ол +á»ģ m +à¸ľà¸¥ ิà¸ķ +×Ļ׳ ×ĺר +س Ùĥ +ש ×Ļר +ศึà¸ģ ษา +à¸ļ ั +Ñĩ аÑģ +×ķפ ×Ķ +×Ļפ ×ķ׾ +ĠاÙĦس اب +رÙĬ ب +ĠاÙĦ بÙĬ +ãĤ¹ ãĥĨ +Ñĩ ен +à¹ģ à¸ľ +Ġ׳ ש +ز ÙĬد +ØŃ اد +ëį Ķ +رÙĪ Ø¹ +à¸Ĺุ à¸Ļ +ส มา +c zeÅĦ +×Ļ×ĵ ×Ķ +ãģ§ ãģĤ +Ġçoc uk +Ø® ب +à¸ļ าย +à¸Ľà¸£à¸° à¸Ĭา +×ŀש ׾ +ãģª ãģĭ +à¸ģ าย +ãĥģ ãĥ£ +аÑĢ Ð¸ +ĠÑĩ а +à¸Ķ ำ +à¸Ĺั à¹Īว +Ñĥ Ñħ +Ġö z +Ġì¢ ĭ +ج رÙĬ +ائ ÙĤ +à¸ł ัย +Ø· ار +د ارة +Ä© nh +Ø« ÙĨ +zell ik +اÙĦ ت +Ġg eli +ãĥķãĤ © +ол од +رب ع +שת ×ŀש +à¸ļร ร +íĿ ¬ +Ġü rün +Ġê·¸ ëłĩ +ศาส à¸ķรà¹Į +ãģ ľ +×Ļ×ij ׾ +ĠпÑĢед ÑģÑĤав +سط ÙĬÙĨ +ãĤĴ 使 +Ġпом оÑī +×ķ×§ ר +ãĥ¯ ãĥ¼ +Ġyö net +×Ļ×§ ר +à¸Ĥ า +еÑĢи ал +ØŃ Ùģ +Ġ×Ļ ×¦ +à¸Ĺ ิ +å£ ² +à¸Ļ à¸Ńà¸ģ +×ķ׼ ר +íĻ ľ +á»§ y +ĠاÙĦÙĤ ر +×Ļ×ij ×ķת +ÅĽ ni +Ùħ شار +ượ t +ĠÙĦ دÙĬ +ÑĤ ел +ĠØ¥ ÙĦÙĬ +عÙĦ ÙĪÙħ +ìķ ĺ +в иÑĤ +à¸Ħ ะ +yr ı +ãģ¨ ãģ£ãģ¦ +à¹Ģ à¸ī +à¸ĸ าม +ÙĤ ار +عÙĦ اÙħ +ặ ng +Ùħ ÙĴ +×Ļ×ŀ ת +سب Ø© +ãĤ¯ ãĥ© +×ķס ×£ +ĠпÑĢ Ð¸Ð½ +ãģĦ ãĤį +س اس +عت بر +วิ à¸Ĺย +วิà¸Ĺย า +س Ùĥر +ãĤ· ãĥ§ +ãģ ģ +ัà¸ģ ษ +×ij ×ķ×Ķ +ห ย +ãģ¾ ãĤĮ +ĠоÑĢг аниз +каз ал +ĠÑģв Ñıз +uy ết +ĠпÑĢо из +Ġ×§ ×ĺ +à¹ģà¸ģ à¹ī +п ÑĥÑģ +Ġê·¸ ê²ĥ +ëĬ IJ +л екÑģ +ãĥ¼ãĥ Ĺ +à¸ķ ำ +ת×Ĺ ×Ļ׾ +à¸Ńà¸ĩ à¸Ħà¹Į +Ạµ +׳ צ +Ø£ Ø´ +Ø´ Ùĩ +ย ะ +à¸ģ à¸İ +ĠاÙĦØ¥ سÙĦاÙħ +ед ÑĮ +ãģ² ãģ¨ +ëıĦ ë¡Ŀ +ãģ© ãģ® +Ñĥ в +еÑĩ ение +ĠاÙĦت ج +ãģ« è¡Į +Ġп озв +ãĤı ãĤĬ +ÙĦ اث +íķĺ ìĺĢ +Ġм аÑĢ +Ġkon uÅŁ +ãĥ¬ ãĤ¹ +ãĤĴ æĮģ +ĠоÑģ нов +×Ĺ ×ij +ÙĪØ¬ ÙĪØ¯ +פ ×ķף +в оÑĢ +Ġн ик +ãģĭ ãĤĭ +ÅŁtır ma +×Ļס ×ĺ +Ø£ ÙĦ +ห à¹Į +и она +лÑĮ н +Ġг оÑģ +ĠÐľÐ¾Ñģ к +ÑĢ Ð¾Ð± +×ķ×IJ ×Ļ +ãģĬãĤĬ ãģ¾ãģĻ +ãģ£ãģ ± +к л +à¸Ļ à¸Ķà¹Į +رÙĬ Ùģ +اس ب +ĠÑĢ ÐµÑĪ +Ġд ол +ãģ¹ ãģį +×Ļ×ij ×ķר +м еÑī +Ġна ÑĪ +à¹ģ à¸Ľà¸¥ +ÑĢ Ð¸ÑĤ +кÑĥ Ñģ +и ÑĢа +аÑĤ ÑĥÑĢ +ÙĪØ§ صÙĦ +à¹Ģà¸ľ ย +à¸Ń ำ +à¹Ģà¸ģ ิà¸Ļ +غ Ùħ +ãģĻ ãģİ +lı kl +ÅĦ sk +ê² ¬ +×Ļ׼ ×Ķ +׊ש×ij +ÙĪØ± ÙĬØ© +Ġд ейÑģÑĤв +×Ĺ׾ ×ĺ +Ġ׾ ×ŀ×¢ +צ׾ ×Ļ×Ĺ +еÑĩ а +Ùģ Ø§Ø¹ +×Ĵ ×Ļ×ĵ +áºŃ m +ÄĻ b +Ø´ ع +ãģı ãĤĬ +à¸ŀ ุ +ед еÑĢ +à¸Ĥ à¸Ļ +à¸Ħ าร +ĠболÑĮ ÑĪ +ãģı ãģªãĤĬ +à¸ĵ า +×ĵ ×ķ×Ĵ +Ġм н +ä¸Ĭ ãģĮ +ç¶ļ ãģį +ฤ ษ +ภĨ +Ø® ÙĬ +à¹Ģà¸Ĺ à¸ŀ +สั ม +à¹Ģส à¸Ļ +à¹Ģสà¸Ļ à¸Ń +ãĥ ´ +Ġи ÑģÑĤ +با شر +ĠÑĥ ÑĢов +×ŀ ×ķ×ĸ +ab ı +wa ż +×ķצ ×IJ×Ķ +ÑĤ веÑĢ +à¸ŀัà¸Ļà¸ĺ à¹Į +׳ ×Ĵ×ĵ +ãĤĭ ãģĵãģ¨ãģĮãģ§ãģį +ĠÑĤÑĢ ÐµÐ± +à¸ģร ุà¸ĩ +ØŃت اج +à¹Ģ à¸Ħล +ã Ĩ +ÄĻ tr +Ġszcz eg +Ġר ש +à¸Ĺ à¸ĺ +Ġн ек +Ġнек оÑĤоÑĢ +в ÑĪ +Ð ¬ +à¹Īว ย +ล ุ +б ÑĢÑı +หม ูà¹Ī +à¹ģ à¸ķà¸ģ +ר׼ ×Ļ×Ŀ +Ġí ĸī +ã i +Ùĥر Ø© +â Ń +í IJ +ã į +á ģ +â ® +â ¥ +ì ® +à ¿ +â ¿ +á Ĥ +á ¤ +â ł +í Ł +ðIJ į +ðIJ ° +ðĿ Ĩ +ðŁ Ī +Ġ×¢ ׾ +Ġع ÙĨ +ĠÙħ ع +Ġ×ĸ ×Ķ +ĠÙħ ا +Ġm Ãł +Ġd ụ +á»ĩ c +а Ñħ +s ı +íķĺ ê³ł +Ġ×ķ ×ij +ĠÐŁ о +×ķת ר +ĠÙĦ Ùħ +Ġ×ķ ׾ +ãģĹãģ¦ ãģĦãĤĭ +Ġ×ŀ ×Ļ +Ġب ÙĬÙĨ +з а +ĠÙĥ اÙĨ +Ġ×Ķ ×Ļ×Ķ +ëħ Ħ +×IJ ×ķ +д и +ĠпеÑĢ Ðµ +d ı +Ġ׾ ש +Ġש ×ŀ +ãģĮ ãģĤãĤĭ +ãģĦ ãģĦ +ÑĢ Ðµ +×§ ×ķ +и ли +м е +ÙĬ ت +ãģ§ ãģĤãĤĭ +Ġв о +à¹ĥ หม +à¹ĥหม à¹Ī +Ġש ×ij +Ġ à¹Ĥà¸Ķย +ÙĬ Ùĩ +ãģ§ãģĻ ãģĮ +ãģ¨ ãģ¯ +ר ×ķ +Ġ à¸ĭึà¹Īà¸ĩ +ãģ§ãģį ãĤĭ +м о +à¹Ģà¸ŀ ืà¹Īà¸Ń +צ ×ķ +×ĺ ×ķ +ìķ Ī +Ġh á»į +à¹Ģà¸ĩ ิà¸Ļ +ĠاÙĦ ب +Ġ มี +ë¬ ¼ +Ñģ е +ëĵ¤ ìĿ´ +Ġë§ IJ +Ġl Ỽ +a ÅĤ +×Ĺ ×ijר +Ġd á»± +ÙĬ Ø« +Ġth á»ĭ +à¸ģà¹Ī à¸Ńà¸Ļ +Ġ×ij ׼׾ +ãģ ¸ +ã썿ĢĿ ãģĦãģ¾ãģĻ +ả nh +ย า +Ùģ Ø§ +ส ี +à¸ķ า +ë² ķ +ãĥª ãĥ¼ +รา à¸Ħา +Ġ×ķ ׾×IJ +ãģ¨ ãģĵãĤį +à¹Ģล ืà¸Ń +di ÄŁi +ÙĪ Ø§ÙĨ +Ġ׾×Ķ ×ª +รว ม +פ ×Ļ×Ŀ +à¸ľ ม +ж и +c ı +ÑĢ Ð¾Ð´ +Ġkar ÅŁÄ± +×Ĵ ×ķ +ãģ« ãģ¤ +ãģ«ãģ¤ ãģĦãģ¦ +r Ãł +×Ļ×ķת ר +ĠìĨ Į +×§ ×Ķ +ÑģÑĤв о +ãģij ãģ© +g é +à¸Ķ à¹īาà¸Ļ +çļĦ ãģ« +ĠÙĬ ÙħÙĥÙĨ +ìĨ į +ÙĬ Ùĥ +à¹Ħว à¹ī +Ñģки й +ì m +Ġ׾×IJ ×Ĺר +à¸Ńา หาร +Ġà¹Ģ à¸ŀ +รา ะ +ล ูà¸ģ +ÑģÑĤ а +Ġìľ ł +ÙĤ ÙĪÙĦ +б оÑĢ +Ñģк ого +หล ัà¸ĩ +à¸Ĥ à¹Īาว +à¹Ģม ืà¸Ńà¸ĩ +ê° ģ +t Ãł +ÙĬ ÙĬÙĨ +عر ض +ë° © +Ġëı Ļ +Ġà¹Ģ à¸Ľ +Ġà¹Ģà¸Ľ à¹ĩà¸Ļ +ç i +li ÄŁi +ìĹIJ ê²Į +ãĤ¿ ãĥ¼ +Ġ׾ ת +פ ×ķת +à¸Ĥ à¸Ń +ر س +ìł IJ +à¸ľ à¹Īาà¸Ļ +ÑĦ и +ج ÙĨ +ì¢ ħ +Ġ×Ķ ×¤ +Ġn go +á»ĭ a +Ġtá» ķ +Ġê·¸ 리 +à¹Ģม ืà¹Īà¸Ń +ذ Ùĥر +ìĸ ij +ìĹ Ń +×ĺ ׾ +k ı +Ġع ÙħÙĦ +Ġع ÙĨد +à¸ĭ ืà¹īà¸Ń +Ġê± ° +в е +r ü +à¹Ģ à¸Ńา +ส à¹Į +à¸Ī à¸Ļ +ס ת +Ġgi ả +ãĤĭ ãģ¨ +à¸ģำ ลัà¸ĩ +н ей +à¸Ī ริ +à¸Īริ à¸ĩ +Ġë į +Ġëį Ķ +à¸Ħà¹Ī ะ +ì n +Ġsü re +Ġqu y +à¸ļ าà¸ĩ +åıĸ ãĤĬ +ר ×Ĺ +×ij ת +ãģĮ ãģĤãĤĬãģ¾ãģĻ +ר ש +ìĹIJ ëĬĶ +Ġ×IJ פשר +ay ı +ãģĮ ãĤī +ØŃ ب +ан Ñģ +س ÙĪ +ĠпÑĢ Ðµ +د ÙĪ +ãģ« ãĤĪ +à¹Ģà¸ģ ม +สู à¸ĩ +m akt +makt ad +maktad ır +Ġön em +×Ļ×ŀ ×Ļ×Ŀ +б о +ÙĪ ÙĬØ© +รู à¸Ľ +à¹Ĥล à¸ģ +Ùħ ÙĬع +ÑģÑĤ Ñĥп +à¹Ĥ à¸Ń +دÙĬ ÙĨ +ì¤ ij +ãģĹãģ ı +à¹Ģส ีย +в Ñĭ +Ùħ ت +íĺ Ħ +ãĥIJ ãĥ¼ +ا Ø´ +×§ ס +Ġtá» ¥ +ล à¸Ķ +Ùģ Ø© +í ijľ +ر ج +k ÅĤad +ĠÅŁ ey +ĠØ£ Ùħ +Ġà¹Ģ ม +Ġب ÙĦ +Ñģ каÑı +ãģ¨ ãģ® +Ġìĭ ¤ +ấ m +ห à¹īà¸Ńà¸ĩ +à¸Ĭ ม +d ü +Ġç ek +Ġê³ ł +×Ĵ ×ij +à¸Ĭี วิ +à¸Ĭีวิ à¸ķ +Ù쨶 ÙĦ +ภ¯ +ç ı +Ġب Ø´ +ĠÙĩ ÙĨا +ãģį ãģ¾ãģĹãģŁ +t ü +Ġìĺ ģ +ĠTür k +к ÑĤ +פר ס +ãģ¨ãģĦãģĨ ãģĵãģ¨ +í ĶĦ +à¹ģร à¸ģ +ר ×ķף +Ġar as +×ŀצ ×IJ +Ġtá» ī +س ا +à¸ŀ à¸Ń +ĠاÙĦÙħ ØŃ +ãĥ ¤ +ĠاÙĦ است +Ùģ ÙĨ +×Ļ×ŀ ×Ķ +ر ت +ãģ¨ ãĤĤ +Ġна Ñģ +п ÑĢи +Ġ×Ĺ ×ķ +и ла +ÙĬ Ø´ +Ġgö z +Ġ×ij ׳×Ļ +ım ı +ĠÑĤ еÑħ +Ġh á»Ļ +غ ر +к он +اØŃ ت +Ġ à¸ŀ +à¸Ń à¸Ńà¸Ļ +à¸Ńà¸Ńà¸Ļ à¹Ħล +à¸Ńà¸Ńà¸Ļà¹Ħล à¸Ļà¹Į +Ñħ о +Ñı в +à¹ģ สà¸Ķ +à¹ģสà¸Ķ à¸ĩ +à¹Ģà¸ŀ ียà¸ĩ +ÑĤ ов +ا ÙĬ +Ġ×Ķ ×ĵ +Ġ×ķ ׼ +ãĤī ãģĦ +×ķפ ף +Ġë ¶Ī +ล à¸Ńà¸ĩ +Ø· اÙĦ +Ġн и +ĠÙħ ست +ế c +Ġש ׼ +ĠëķĮ 문 +วัà¸Ļ à¸Ĺีà¹Ī +×Ļ׾ ×ĵ +ØŃ ا +е ÑĨ +Ġc ứ +×ĵ ×ķר +ĠÙħ ØŃ +ר׼ ×ij +بÙĬ ع +ни и +ĠاÙĦØ£ ÙĪÙĦ +à¸Ħว ร +ã썿ĢĿ ãģĨ +ĠС о +ائ ÙĬØ© +ر اء +оÑģ об +Ġب Ø£ÙĨ +×¢ ×ķ×ĵ +ĠÑĤ е +ãģĵ ãģĨ +ÑģÑĤ ÑĢа +ай н +Ġsö z +ت ÙĨا +à¸Ń ิ +ặ p +ĠìķĦ ëĭĪ +íķ Ń +Ġר×IJ ש +Ġ à¹Ħà¸Ķà¹ī +Ġ×Ĵ ×ĵ +Ġס פר +обÑī е +ĠÙĪ Ø¥ +ada ÅŁ +ãģ¡ ãĤĩ +×§ ×ķ׾ +ÑĢ ÐµÐ· +ĠdÃ¼ÅŁ ün +Ġ×ij ×IJ×ŀ +Ġìĸ´ ëĸ +ער ×ij +н ее +ĠÑģÑĤÑĢ Ð°Ð½ +س اÙĨ +yn ı +ĠاÙĦر ئÙĬس +ãģĹãģ ª +Ġ׳ ת +ãģ«ãģª ãģ£ãģŁ +g ü +åıĹ ãģij +׾ ת +ìł Ī +ëĬĶ ëį° +Ø® ÙĬر +à¸ķà¹īà¸Ńà¸ĩ à¸ģาร +ĠÙĦ Ø£ÙĨ +Ġch á»ĭ +ÙĪ Ø© +à¹ĥ ส +ë¶Ģ íĦ° +íķĺ ë©´ +ữ u +à¹Ģหม ืà¸Ńà¸Ļ +б еÑĢ +ĠìĿ´ ìļ© +ĠÑģ еб +wiÄĻ ks +Ġ׳ ×¢ +ÑĤ ÑĥÑĢ +Ġngh Ä© +ש ×ķ×ĺ +ti ÄŁi +Ġde ÄŁi +×IJ ×ij +Ġ×ŀ ×ŀ +ãĥĹ ãĥŃ +wa ÅĤ +à¸Ī ึà¸ĩ +Ø® دÙħ +×IJ ×Ŀ +Ä±ÅŁ ı +cz Äħ +ר ×ĵ +ĠÑĢ Ñĥб +خر Ùī +ãģ® æĸ¹ +Ġд енÑĮ +×Ĺ ×Ļ×Ŀ +еÑĤ е +ëĤ ľ +×IJ ×Ĵ +×¢ ×ķר +ë³ Ħ +åIJĮ ãģĺ +ãĤ ² +ר ×ļ +×ķש ×IJ +ìľ ¡ +ا Ø® +צ ×Ļ×Ķ +á»± a +ãģĪ ãģ¦ +ש×Ķ ×ķ +ан ÑĤ +ลา à¸Ķ +ин г +ë¡ ł +اع د +ÙĪ Ø³Ø· +Ġв оп +Ġвоп ÑĢоÑģ +Ùħ ÙĬÙĨ +à¸Ħ à¸ĩ +×Ļר ×Ļ×Ŀ +c ów +ê² © +Ġê·¸ 룰 +Ġì§ Ħ +Ġש ׾×Ķ +à¹Ģร ิà¹Īม +à¸Ĭ à¸Ńà¸ļ +д еÑĤ +ÑİÑī иÑħ +à¸ļ à¸Ńà¸ģ +æĢĿ ãģĦ +ع ÙĬد +ס ×ŀ +×Ĵ ×Ļ×¢ +צ ×ĵ +ب ات +ĠëͰ ëĿ¼ +à¸Ī ัà¸ĩ +ãģłãģij ãģ§ +×¢ ×Ļר +ĠÑĩ ел +ĠÑĩел ов +ĠÑĩелов ек +ãĥĥ ãĥģ +à¹Ģà¸ģ ีà¹Īยว +à¸Ķ ิ +Ġפ ×¢ +×Ļ×ŀ ×Ļ +ë° ĺ +Ø® ار +×ij ×Ļת +×¢ ×Ļ×Ŀ +ü yor +ãĤģ ãģ¦ +к лад +Ġ à¸Īาà¸ģ +à¹Ģà¸Ħ ย +ส à¸Ńà¸ĩ +à¹ģ à¸Ħà¹Ī +ẫ u +หà¸Ļ ัà¸ĩ +ש׾ ×ķ×Ŀ +اÙĨ ÙĬØ© +åĩº ä¼ļ +åĩºä¼ļ ãģĦ +à¸ł าย +à¸ļา à¸Ĺ +à¸Ĭา ว +mu ÅŁ +Ġ׾ק ×ij׾ +ãĤ· ãĥ£ +Ġİ ÅŁ +×Ĵ×ĵ ×ķ׾ +ج عÙĦ +ë³ Ģ +ยิ à¹Īà¸ĩ +à¸Ļ าย +à¸Ļ ีà¹Ī +วิ à¸ĺี +ãĤī ãģªãģĦ +ëł Ī +Ġ문 ìłľ +Ġ à¸ģ +à¸Ĺำ à¸ĩาà¸Ļ +à¹Ģว à¹ĩà¸ļ +ÑĦ е +楽 ãģĹ +สำ à¸Ħ +สำà¸Ħ ัà¸į +ر Ùħ +ãģķãĤĮ ãģ¦ +Ġоб ла +ר×IJ ×Ļ +หม à¸Ķ +ÙĨ ÙĬØ© +ли н +Ġe ÄŁ +it im +ëł ¹ +ص اÙĦ +ÅĽ l +à¸ľ ิà¸Ķ +ãĥŀ ãĥ³ +åħ¥ ãĤĮ +à¹Ģà¸ķ à¸Ńรà¹Į +ار ÙĬ +ĠÐ ¦ +d ür +ส วย +ë¦ ½ +رÙĥ Ø© +Ġh ã +×Ļת ×Ķ +à¸Ĥ à¸Ļา +à¸Ĥà¸Ļา à¸Ķ +à¸Īำ à¸Ļ +à¸Īำà¸Ļ วà¸Ļ +ש ×ķ×§ +Ġд ом +ì± ħ +ãģĭ ãģij +פ ×ķ׾ +à¸Ĭ าย +Ñģ моÑĤÑĢ +Ñģл Ñĥж +ש ×IJ׾ +кÑĢÑĭ ÑĤ +Ġìŀ ĺ +é«ĺ ãģĦ +ĠÑĢ Ñĥк +ÙĨ ص +д ав +ưỠ¡ +ưỡ ng +ر اÙħ +×Ļ׳ ×Ļ×Ŀ +ãĥ© ãĥ¼ +ëĦ ¤ +Ġت ع +l ke +好 ãģį +æĮģ ãģ¡ +Ġë§ İ +Ġy ük +ĠÑģоÑģÑĤ ав +енÑĤ ÑĢ +pe ÅĤ +à¹Ģà¸Ľà¸¥ ีà¹Īย +à¹Ģà¸Ľà¸¥à¸µà¹Īย à¸Ļ +íı ī +ãĤĦ ãģĻ +×Ĺ ×ĸ +×ijר ×Ķ +ë£ ¨ +ìĶ Ģ +بØŃ Ø« +à¹Ģà¸ķ à¹ĩ +ów i +ب Ùĩ +ãģį ãģ¾ãģĻ +Ġ×¢ ×ŀ +×Ĵ ×ķ׾ +ез д +ÙĬÙģ Ø© +สà¸Ļ à¹ĥà¸Ī +Ġת ׾ +Ñı Ñī +Ġس ÙĨ +ĠÙĪØ§ ØŃد +ĠÑģ м +lad ı +ı ld +×Ļר ת +ีย à¸Ļ +ת×Ĺ ×ª +Ġж из +à¸ŀ ั +à¸ŀั à¸Ĵ +à¸ŀัà¸Ĵ à¸Ļา +à¸Ĭ ิ +ا Ø®ÙĦ +ãģ£ãģ¦ ãģĦãģŁ +รั à¸IJ +ãĤģ ãĤĭ +à¹Ĥ à¸ģ +ĠT á»ķ +Ġh akk +ر Ùģ +ìł Ģ +Ñģ об +ãģª ãģijãĤĮãģ° +Ùĩ ÙĪ +Ġë² ķ +ãĤ Ĩ +ĠاÙĦس عÙĪØ¯ +Ġ×IJ תר +Ø§Ø º +Ġ׾ ×ĵ +à¹ģ à¸ķ +à¹ģà¸ķ à¹Īà¸ĩ +íĮ Į +Ñĥп иÑĤÑĮ +à¸ŀืà¹īà¸Ļ à¸Ĺีà¹Ī +×ij ת×Ļ +à¹ĩ à¸ģ +ÅĤ at +Ġê°ľ ìĿ¸ +ìłķ ë³´ +ÑĤ ал +Ġgü ven +Ġİ l +Ġê° ģ +Ġب ت +×ŀ ×ķ׳×Ķ +ĠاÙĦØŃ ÙĥÙĪÙħ +ÙĤ ات +à¹ģ à¸ģà¹Ī +ห าà¸ģ +н ÑĮ +à¸Ľ รัà¸ļ +มา à¸ĵ +Ġне Ñģк +ĠØ ¶ +สม ั +สมั à¸Ħร +ãģĮ ãģĤãĤĬ +м еÑģÑĤ +Ġ×IJ צ׾ +Ġкомп ани +ס ר +ÙĬÙħ Ø© +ĠÑħ оÑĢо +ĠÑħоÑĢо ÑĪ +Ġ×Ļ ×ķ×ĵ +ü s +×Ĵ ×Ļש +à¸ļ à¸Ĺ +تÙĨ ظ +ว าà¸ĩ +ม หา +Ġ׼ ×ķ׾ +à¸Ĥ à¹īาà¸ĩ +ë° ľ +г од +д ан +ãģĭãĤĤãģĹãĤĮ ãģ¾ãģĽãĤĵ +ãģĵ ãģ¡ãĤī +ãĥIJ ãĤ¤ +ece ÄŁi +دÙĬ دة +ÙĨ Ùī +Ġëĭ¤ ìĿĮ +ว ี +غ ا +ли з +à¹Ģà¸Ķ ิ +à¹Ģà¸Ķิ ม +ĠÙĬ ست +Ġy ılı +ko ÅĦ +ãģ§ãģĹãĤĩãģĨ ãģĭ +ãģĤ ãģª +ãģĤãģª ãģŁ +ÑĨ ен +ĠÙĪ Ø² +×IJ ×Ļש +à¹Ī à¸Ń +ر ØŃ +ê´ ij +ÑĢа ÑģÑĤ +Ġ×Ķ ×ľ +ãģĹãģ¦ ãĤĤ +×ŀר ׼ +×ŀר׼ ×ĸ +éģķ ãģĦ +ãģŁ ãģı +ĠÑģ Ñĥд +в еÑģÑĤи +ĠíķĦ ìļĶ +ãĥķ ãĤ§ +ÑĤелÑĮ но +à¹Ģà¸ŀ ืà¹Īà¸Ńà¸Ļ +ÅĤu ż +à¹Ģà¸Ķิà¸Ļ à¸Ĺาà¸ĩ +ש ×ķר +Ġ×ŀ ×ĵ +×ķ×¢ ׾ +ÙĦ اÙħ +à¹Ħ à¸ĭ +л ей +кÑĥ ÑĢ +Ạ¢ +à¸Ĺ าà¸Ļ +ì§ ij +ĠгоÑĢ Ð¾Ð´ +ר ס +׾ ×ķ×Ĵ +mas ını +Ġл ÑĥÑĩ +ล à¹Īา +ìļ ¸ +ש ×ĺ +ĠÐĺ н +í Ĥ¤ +ÙĪÙĦ ا +ìķ ł +ĠØ£ÙĬ ضا +Ùĥ ار +ĠاÙĦت ع +ส ูà¹Ī +ãĤ ¼ +×ij ×Ļ×IJ +ย à¸ģ +ĠØŃ ÙĤ +ر بÙĬ +ãģĺãĤĥ ãģªãģĦ +รัà¸ģ ษา +Ñħод иÑĤ +à¸ķ à¸Ńà¸ļ +׳ ×ĺ×Ļ +ĠاÙĦÙħ ج +تÙħ ع +ов аÑĤÑĮ +ÙĦ ÙĬÙĨ +×Ļ×ŀ ×ķת +Ġm ù +n ÄĻ +Ġد ÙĬ +׼ ש×Ļ×ķ +Ġhi ç +ë ijIJ +ÙĪ Ø§Ø¡ +ÙĪ Ø· +ĠاÙĦ بÙĦ +à¹ģม à¹ī +×§ ×ķת +ÙĪØ¬ د +å§ĭ ãĤģ +ÙĬ ئة +Ġë§ ¤ +ص بØŃ +פ ×IJ +г оÑĢ +ס ×Ķ +بÙĬ ÙĤ +ย าà¸ģ +Ġн ад +ÙĬ Ùij +Ġب ÙĪ +ס ×ķר +Ùħ ÙĥاÙĨ +ר ×ij +×Ĵ ×ĸ +צ ת +b ilit +л аг +ĠN go +×IJ ×ķר +à¸ķ à¸Ļ +íĬ ¹ +à¸Ĺีà¹Ī à¸Ķี +à¸Ľà¸£à¸° à¸Īำ +ов ание +ãģĦ ãģ¤ +ãĥĥãĤ¯ ãĤ¹ +åIJĪ ãĤı +åIJĪãĤı ãģĽ +×Ļ׳ ×ķ×Ļ +ạ y +Ø« ÙĤ +ĠпÑĢ Ð¾Ð± +ĠпÑĢоб лем +ÅŁ eh +ÅŁeh ir +ع ادة +اÙĨ ÙĪÙĨ +à¸ķัว à¹Ģà¸Ńà¸ĩ +ì¶ ķ +ı lan +б ан +ãĥ³ ãĥī +à¸Ī ี +Ġ×Ķש ׳×Ļ +п оÑĤ +×ķ׾ ×Ļ×Ŀ +ล ัà¸ļ +ĠÑį ÑĤи +×ij×§ ש +ë¹Ħ ìĬ¤ +à¸Ńยà¹Īาà¸ĩ à¹Ħร +×Ļ׾ ×Ļ +à¹ĥà¸Ĭ à¹Ī +ĠاÙĦ ÙĥÙĦ +ãĥļ ãĥ¼ãĤ¸ +ص Ø© +ÑĤи ÑĢ +ãĤĵ ãģ© +зÑĭ к +wy ż +Ùĩ ÙĬ +ĠÙħ ÙĦÙĬ +Ġвид е +ظ اÙħ +دا ÙĪÙĦ +×ŀ ת×Ļ +Ġs ık +à¹Ģà¸ķิ ม +ãĤ¢ ãĤ¤ +ка Ñħ +צ ×Ļ׾ +à¹Ģà¸Ĭ à¹Īà¸Ļ +м аг +маг аз +магаз ин +à¸Ľ ั +à¸Ľà¸± à¸Ī +Ġש ×Ļר×ķת +ีย ม +ãĥĸ ãĥ« +Ġد ÙĪÙĦ +קר ×Ļ×Ŀ +Ùĩ Ùı +ов о +Ġü ret +د ÙĪÙĨ +à¹ģà¸Ļ ว +à¹Ģà¸Ļ ืà¹īà¸Ń +ĠÑĦ оÑĤ +ãĥ ĺ +ãģ¤ ãģĭ +Ñı Ñģ +ĠíķĺëĤĺ ëĭĺ +ائ ع +Ġп лаÑĤ +ìĺ Ī +Ġdost ÄĻp +ÙĪØ¬ Ùĩ +Ġ×Ķ ×Ĺ×Ļ +׳ ×Ļ×§ +д ей +í ĽĦ +ı y +بØŃ ر +à¹Ģส ริม +Ġ׾ ×Ĵ +ذÙĩ ب +ج ÙĬÙĦ +رÙĥ ز +Ġë ħ +Ġëħ ¸ +פ×Ļ׾ ×ķ +ãģ¾ ãģļ +iri ÅŁ +ĠÙĥ ÙĬÙģ +Ġ×ij צ +Ġêµ IJ +ÑĢоÑģ Ñģ +ĠØ´ ÙĬ +Ġiç er +×Ĵ ×ķ×ij×Ķ +мен но +×¢ ×ij×Ļר +×ķ×ŀ ×Ķ +ãĤī ãģĹãģĦ +ãģ ¼ +Ñī ин +è²· ãģĦ +جÙħÙĪØ¹ Ø© +Ġdön em +Ġ×ij ×IJר +в еÑģÑĤ +×ķר ×ķת +س Ùģ +à¹ģà¸Ĺ à¸Ļ +Ġд окÑĥменÑĤ +Ġا ÙĬ +ج اÙĨ +צ×ķ×¢ ×Ļ +ĠоÑģ об +ĠاÙĦÙħ س +ÑĢаР± +à¸ł ู +à¸Ķ าว +л екÑĤ +ع ÙĤ +×ķ×ĵ ×ķת +Ġol u +Ġolu ÅŁtur +ãģ¾ ãģ¾ +ед ин +à¹Ģ à¸Ńà¸ģ +ãĤµ ãĤ¤ +ëĦ Ī +Ø· ÙĨÙĬ +Ø· ÙĤØ© +ĠÐł аз +ÙĦ Ùij +Ñĩ ем +Ġ׾ ×ĺ +สั à¹Īà¸ĩ +سر ائÙĬÙĦ +Ġפר ×ĺ×Ļ +д еÑģÑĮ +Ġ׳ ׼ +اÙĨ ب +ÙĬا Ø© +Ùħ بر +Ġk ı +à¸Ľ à¸ı +à¸Ľà¸ı ิ +à¸ļั à¸ķิ +׳ ת×Ļ +ìĨ ¡ +ر اب +à¹ĥ à¸ķ +à¹ĥà¸ķ à¹ī +×Ļ׳ ת +ÙĪ ÙĬر +Ġ×Ķ×ŀ ×Ļ +ей ÑĩаÑģ +×§ ×ķ×ij +در اس +ĠÙħ ÙĤ +رÙĬ ÙĨ +Ø® اص +ãģĬ éĩij +Ġج دا +ãģĨ ãģ¡ +ëħ ¸ +ır ım +æ§ ĺ +ãģ« å¯ +ãģ«å¯ ¾ +ÑĨ ев +Ġv ard +ĠÐIJ н +e ÄŁ +ÑģÑĤв енно +Ð ¨ +س د +à¸ģ ุ +à¹ģà¸ľ à¸Ļ +รูà¹ī ส +รูà¹īส ึà¸ģ +ات ØŃاد +Ñij ÑĤ +×Ĺ ×ķ×§ +ãģĻ ãģIJ +Ø· ÙĦاÙĤ +Ġ×§ ×ķ×ĵ +à¹ĥà¸Ĭ à¹īà¸ĩ +à¹ĥà¸Ĭà¹īà¸ĩ าà¸Ļ +ãĥ¼ãĤ ¿ +Ġs ür +ÑĢ Ð¾Ðº +ë³ ij +สมา à¸Ĭ +สมาà¸Ĭ ิà¸ģ +ãĥķ ãĥ¬ +è¾¼ ãģ¿ +ãĤ» ãĥ³ +Ġê°Ģ ì§Ģ +à¸ľ à¹īา +ÑįÑĤ омÑĥ +иÑĤ ел +à¸ł ั +ภij +ãĥĸ ãĥ© +×Ľ×ª ×ķ×ij +׳ ×Ŀ +ен нÑĭе +×¢ ×¨×Ľ×ª +Ġì Ĥ +ĠìĤ ´ +à¸Ĥ à¹īา +׳ ×ķס +ãĥ¬ ãĥĵ +ÑĢ ÐµÑģ +à¹Ģล à¸Ĥ +Ø« اÙĦ +ìĹ Ĩ +ĠÑĩ аÑģÑĤ +า ศ +ãĥª ãĤ¢ +u ç +×Ļ׼ ×ķת +ล à¹īาà¸Ļ +i ë +ãĤ¸ ãĤ§ +à¸Ī à¸Ń +ÙĪ ØŃد +×Ļצ ×ķ×ij +Ġ×ij ש׾ +ок о +ض Ø© +ذ ر +ĠÑĥ д +İ L +×ķצ ×Ļ×Ŀ +×ĸ ×ŀף +à¸Ľ à¸ģ +íķĻ êµIJ +س اÙħ +à¹Ħ à¸Ķ +ละ à¹Ģà¸Ń +ละà¹Ģà¸Ń ีย +ละà¹Ģà¸Ńีย à¸Ķ +ả y +аÑĨи он +ãĤ¹ ãĤ¯ +פ ×ķס +ร à¹Īาà¸ĩ +ен нÑĭй +ع ÙĨ +عÙĦ ÙĨ +ائ Ùģ +d ÄĻ +ؤ ÙĪÙĦ +׾×ķ ×ķ +Ġ×ij ש×ij +ä»Ĭ åĽŀ +ĠاÙĦج ÙĨ +د اد +wa Äĩ +ãĥª ãĥ³ +ĠìŀIJ ìĭł +اÙĨ ÙĬا +ãĥ¡ ãĥª +ÙĦ ÙĪÙĨ +à¸Ĺ à¹Īà¸Ńà¸ĩ +à¸Ĺà¹Īà¸Ńà¸ĩ à¹Ģà¸Ĺีà¹Īยว +اÙģ ÙĬ +Ġли ÑĪ +Ùħ ÙĬØ© +оÑĤ веÑĤ +Ñĩ ин +à Ĭ +ãĥ¡ ãĥ³ +å® Ł +éļĽ ãģ« +ĠÑĢаР¹ +ãĤ¦ ãĥ³ +×Ļר ×ķש +×Ļר×ķש ׾×Ļ×Ŀ +ม ะ +Ġar a +каз аÑĤÑĮ +à¸ķ ัà¸Ķ +ÑĥÑİ ÑĤ +Ġü st +×Ĵ ×ķ×ij +×Ĵ×ķ×ij ×ķת +mal ı +ег од +егод нÑı +اÙģ ÙĤ +à¸Ĭ à¹Īà¸Ńà¸ĩ +Ġö zellik +×Ļצ ×ķר +Ġmi ÄĻd +Ġili ÅŁ +Ġна Ñħод +×¢ ×ĸר +׾ ×Ľ×ª +ÙĨت اج +ĠÑģ ем +à¸Ī à¹Īาย +à¸ķร ว +à¸ķรว à¸Ī +פר ×ķ +à¸Ĥ ัà¸ļ +ãģ ŀ +Ġп ло +к олÑĮ +×ŀ×¢ ×ĺ +íķĺ ìĭľ +jÄħ ce +ÙĨ اÙĨ +ลี à¸ģ +н ÑĥÑĤ +Ġоб ÑĢаз +Ùĥ بر +ĠاÙĦÙĪ Ø·ÙĨ +ãģķãģĽ ãģ¦ +ÙĤ اء +×ŀ×ĵ ×Ļ׳ +y ü +פ ×Ļת +׳ ×ķף +ÙħÙĨ ظ +หà¸Ļ ัà¸ģ +ìŀ Ī +ãĤ« ãĥ¼ãĥī +ع ÙĨÙĬ +п од +ض اء +à¸Ļ à¸ķà¹Į +×ŀש פ +ว à¹Į +ר ×ķ×§ +ส ืà¹Īà¸Ń +פק ×Ļ×ĵ +ãģªãĤī ãģªãģĦ +ĠìŬ 룬 +ÙĦ ج +Ñī иÑĤ +ãĥĥ ãĤ· +ÙĦÙĬ س +ĠÙĦ Ùħا +ìł ij +×ij ×Ļף +ãĥģ ãĤ§ +Ġgü ç +Ġch ứ +×ķצ ×IJ +קר ×ij +à¹Ĥ à¸ŀ +оÑĩ но +סק ×Ļ +ש׾ ×Ŀ +صر Ùģ +ĠL Ãł +×¢ ×Ļת +á» · +à¹Ĥ à¸Ńà¸ģ +à¹Ĥà¸Ńà¸ģ า +à¹Ĥà¸Ńà¸ģา ส +Ġ×Ķ ×ĵ×ijר +à¸Ļั à¹Īà¸Ļ +ز ر +нак о +íļ į +ãĤĤ ãģ¡ +ãĤĤãģ¡ ãĤį +ãĤĤãģ¡ãĤį ãĤĵ +اÙħ ت +عد اد +и нÑĭ +ÅĤy w +à¸Ħ à¸ĵะ +à¸Ĺ ะ +kt ör +×Ļ×Ĺ ×Ķ +Ġм е +Ġме ÑģÑı +׳×Ķ ×Ĵ +ĠÑģ ÑĥÑīеÑģÑĤв +à¸Ļ ัà¸Ļ +ÑĦ ÑĦ +ек ÑĤив +عÙĦÙĪÙħ ات +б Ñĥд +à¸Ļัà¸ģ à¸ĩาà¸Ļ +หà¸Ļà¹īา à¸Ĺีà¹Ī +ÙĤÙĬ ÙĤ +ãĤ· ãĥ³ +ãģ« éĸ¢ +×IJר ×Ĵ +ĠпÑĢ Ð¾ÑĤ +ĠпÑĢоÑĤ ив +ĠìŀĪ ìĸ´ +ÙĤÙĬ ÙĤØ© +ìĹ ĩ +k ür +ãģ«ãģªãĤĬ ãģ¾ãģĹãģŁ +Ġде ÑıÑĤ +ĠдеÑıÑĤ елÑĮ +פ×ķר ×ĺ +à¸Ł à¹īา +à¹Ģ à¸ł +ĠавÑĤом аÑĤ +×ĸ ×Ļ×§ +Ġold uk +ع اÙħ +ĠÑĤ оÑĢ +yrı ca +ê Ì +ãĤŃ ãĥ³ãĤ° +ãģ« ãģ¨ãģ£ãģ¦ +à¹Ģà¸ī à¸ŀ +à¹Ģà¸īà¸ŀ าะ +ãģ¯ ãģļ +×ŀ ×IJ×Ļ +สะ à¸Ķ +สะà¸Ķ วà¸ģ +ìľ¼ ë©° +à¸ģ ี +ภ¬ +Ġ×¢ ×ķש +à¸łà¸² ษา +à¸Ĺ ัà¸Ļ +ac akt +acakt ır +اع دة +ĠÑĥÑģл Ñĥг +ס ר×ĺ +×ķ×ŀ ×ķת +×Ķ ×ķר +×ŀ ×ķ×ij +×ŀ×ķ×ij ף +سÙĬ اس +اتÙģ Ø§ÙĤ +×Ķ ×¦×ľ +Ùħؤ س +Ġp ó +Ġк ни +×Ļ׼ ×ķ׾ +à¹Ģหล ืà¸Ń +׼׾ ׼ +׳ ×ĸ +ÑĪи е +r ès +ĠاÙĦØŃ ÙĤ +лÑı ÑĢ +ห à¸į +หà¸į ิà¸ĩ +ר×Ĵ ×Ļש +à¹Ģส à¹īà¸Ļ +ש×ij ×ķף +ô tel +ап ÑĢ +апÑĢ Ð¸Ð¼ÐµÑĢ +اب ÙĦ +ĠÑĢаз виÑĤ +Ġп олÑĮз +ĠС еÑĢ +×ķ×ij ×Ļ +r óż +ìĭ Ń +ãĤ¯ ãĥĪ +ãģĹ ãĤĪãģĨ +à¸ģร ม +ØŃ ÙĥÙĪÙħ +à¹Ĥ à¸ļ +à¸Ĺ à¹īาย +ĠM á +ĠÑĤ Ñĭ +à¸Ħร ัว +ÑĢÑĥ б +ạ p +Ġm ÅĤ +ĠmÅĤ od +Ġgör Ã¼ÅŁ +Ġgeli ÅŁ +ươ i +×ŀש ×§ +ÙĢÙĢ ÙĢÙĢ +รา ว +ãģĹãģ £ +ãģĹãģ£ ãģĭãĤĬ +ĠÐļ он +Ġk ê +à¹Ĥà¸Ĺ ร +èIJ½ ãģ¡ +åĩº ãģ¦ +ล ัà¸ģษ +Ġ×Ĵ ×ij×ķ×Ķ +ãĥĻ ãĥ« +ê±° ëĤĺ +ë§ IJ +×Ļ׾ ×ĵ×Ļ×Ŀ +ĠëĦ Ī +×ŀר ×Ļ +ร ส +ãĥŃ ãĥ³ +и ло +ноÑģÑĤÑĮ Ñİ +×ĸר ×Ĺ +п он +Ġ×Ķש ׾ +ê²ł ìĬµëĭĪëĭ¤ +Ġki ÅŁ +ĠÐļ и +ว ร +د اع +ÅŁ im +ÙĨ Ùij +в аÑĤ +را Ùĥ +ب اÙĦ +ид е +Ġ×Ķ×ŀ ×Ĺ +ìĸ µ +تÙģ Ø§Ø¹ +Ø£ ت +ëĬ ĺ +ש ×Ļת +ست Ùħر +ĠÑĦ ак +ĠاÙĦØ£Ùħ رÙĬ +ëŀ ¨ +اس Ùħ +Ġa ÄŁ +Ġç ev +Ùĥ ÙĪØ± +ãģķ ãģ¾ +Ġç öz +Ġر س +Äħ da +สà¸Ļ ุ +ãģĹãģ¦ ãģıãĤĮ +н Ñİ +leÅŁ me +ãĤª ãĥ³ +ãģ¨ ãģªãĤĬ +ava ÅŁ +×ĺ ×Ļ×ij +ØŃ ض +×ķצ ×IJ×ķת +ÙĨ ÙħÙĪ +ı t +ĠÑħ а +ĠÑħа ÑĢак +ĠÑħаÑĢак ÑĤеÑĢ +Ġd ÅĤ +ãĥĹ ãĥ© +à¸Ĭ ุม +à¹Ī à¸Ńà¸Ļ +×ķ×ij ׾ +Ñģ ол +×ĵ ×Ĵ +аÑĢ Ð°ÑĤ +n ivers +Ġgerçek leÅŁtir +ĠاÙĦ ÙĦÙĬ +ระ ยะ +ĠÙħ ختÙĦÙģ +Ġgö nder +Ùģ Ø§Ø± +do ÄŁ +doÄŁ an +ص ÙĦاØŃ +Ġyay ın +ãĥĨ ãĥ³ +รว à¸Ī +×Ļ×Ĺ ×Ļ×ĵ +ünk ü +ÑĨи алÑĮн +à¸ļ ู +ม ุ +h ä +Ø® Ùģ +å¢ Ĺ +å¢Ĺ ãģĪ +еÑĩ но +ĠاÙĦس ÙĨ +à¸Ĥ าว +im di +Ð « +à¸Ļà¸Ńà¸ģ à¸Īาà¸ģ +à¸ļา ล +ת ש +Ġdüzen le +мÑĭ Ñģл +ãģı ãģª +ż u +Ġwsp óÅĤ +Ġн аз +ınd aki +تر Ø© +ÅŁ ek +Ġö d +ĠÙĪ Ùĥ +Ġпозв олÑı +Ġת ×ķ׼ +ÙħÙĨ تج +ë§ ī +ĠاÙĦØ« ÙĦاث +аÑĨи Ñİ +ÙĪØ± ÙĪ +Ñĭв аеÑĤ +خص ص +ĠاÙĦÙģ ÙĦ +ĠاÙĦÙģÙĦ سطÙĬÙĨ +Ø¥ جر +إجر اء +اÙĨت Ø® +اÙĨتخ اب +ار ÙĬØ© +×ķ Ö +Ø¢ ÙĨ +×ŀ×¢ ×ķת +Ġм ал +Ġ×IJ ×Ĺ +à¸Ĺ à¹īà¸Ńà¸ĩ +ze ÅĽ +Ġë§Į ëĵ¤ +رÙĬ ع +äºĭ ãĤĴ +à¸ļริ หาร +׾ ×ŀ×Ļ×ĵ +Ġм Ñĥж +ت رÙĪ +ĠباÙĦ Ø¥ +פ ×Ļ×§ +ز ÙħØ© +ĠÃ¶ÄŁ renc +ãĥ ¶ +اÙħ عة +×§×ij ×ķצ +×ŀ ׳×ķת +رÙĬ Ùħ +Ġо каз +ãģłãģij ãģ© +Ġh ız +Ġש ×IJת +ãĤ¢ ãĥ¼ +Ġmożli wo +ìĦ ¼ +ÙĪ Ø§Ø¨ +ог ÑĢаÑĦ +Ġعبد اÙĦ +ãĤĴ è¡Į +ب ÙĬÙĦ +Ġİ ç +ย าย +ĠÑĥ ÑĩаÑģÑĤ +ÑĦ еÑģÑģ +ÑĦеÑģÑģ иона +Ạ¤ +ÙĨ ÙĬÙĨ +عد ÙĦ +สร ร +دÙĬ ÙĦ +×ij ×Ļ×§ +czy ÅĤ +ÑĢом е +Ġм ед +ìĻ Ķ +ãĥ© ãĤ¤ãĥ³ +ĠÑĤ еп +еÑĢ ÑĮ +i ÄŁi +в ели +ÑĢи ÑģÑĤ +ס ×ķפ +×ŀ׾ ×Ĺ +ĠاÙĦØ¥ ÙĨ +Ġ׾×Ķ ×© +è¶Ĭ ãģĹ +ĠÑĢ Ñĭ +×ķ×IJ ר +رÙĩ اب +פ ×ķ×IJ×Ļ +ĠгоÑģ Ñĥд +ĠгоÑģÑĥд аÑĢ +ĠгоÑģÑĥдаÑĢ ÑģÑĤв +ĠاÙĦØ£Ùħ ÙĬر +Ùħ ج +à¹Ģหม าะ +ÑĢ ÐµÐ² +à¸Ĭี à¸ŀ +ãĥķ ãĥĪ +иÑĩ но +ĠاÙĦÙħ ؤ +Ġi ht +íħ ľ +د ÙĨÙĬ +ر ص +ла ÑģÑĤ +à¹Ģหล à¹Īา +ılı r +ร à¸ĵà¹Į +×ŀש ×Ļ×ļ +Ġd á»ĭ +Ø·Ùģ Ø§ÙĦ +×ĺ ×ķף +Ġ×ij ×Ļ׳ +ãģ¾ ãģ£ãģŁ +лож ениÑı +تØŃ ر +ب اØŃ +à¹Ģส ืà¹īà¸Ń +ãģĻ ãģĶ +lt ür +à¸ĩ าม +Ġt ü +ĠпÑĢ Ð¸Ð¼ +ĠпÑĢим ен +Ġhay at +ëĥ IJ +ëĭ Į +׳×Ļ ×ķ +вед ен +ìħ ¨ +à¸Ī ัย +à¸ģà¹Ī à¸Ń +Ġв од +оÑģÑĤ оÑı +н аÑĤ +à¹ģ หล +سÙħ ÙĬ +à¸Ķำ à¹Ģà¸Ļ +à¸Ķำà¹Ģà¸Ļ ิà¸Ļ +w ód +ö yle +ãĥĢ ãĤ¤ +ÑĪи й +меÑī ен +ãģĹãģ¾ ãģĨ +ãĥī ãĥ© +ÙĪØ¶ ØŃ +à¸Ńà¸Ļ ุ +ĠاÙĦ اجتÙħاع +laÅŁ ma +à¸Ħ à¸Ńà¸Ļ +×ŀר ×Ļ×Ŀ +ÙĨ اÙħج +שר ×ķת +اÙĦ Ø£ +Ġksi Äħż +Ġа н +ÑĢаР¹ +اÙĩر Ø© +×ŀ×ĵ ×Ķ +ä¸Ģ ç· +ä¸Ģç· Ĵ +ä¸Ģç·Ĵ ãģ« +ÑĢиÑĤ оÑĢ +d ıkl +à¹ģ à¸ĸ +à¹ģà¸Ĥ à¹Īà¸ĩ +екÑĤ оÑĢ +×ŀס ×¢ +ÑĢак ÑĤи +u ÄŁu +×ķ×ij ת +สู à¸ķร +ĠçalÄ±ÅŁ m +ĠçalÄ±ÅŁm alar +Ġа на +ãĥĽ ãĥ¼ãĥł +Ġböl üm +Ġب ص +ол оÑģ +ĠìķĬ ëĬĶ +à¹Ī ะ +ÙĪ ØªØ± +ä¹ Ĺ +ست خداÙħ +פ×Ļ ×Ļס +פ×Ļ×Ļס ×ij +פ×Ļ×Ļס×ij ×ķ×§ +Ġк ÑĢаÑģ +ли к +رÙĬ ØŃ +×ŀש ׾×Ķ +à¹Ģย ีà¹Īย +à¹Ģยีà¹Īย ม +в иÑģ +ом н +ÄŁ un +ãĥŃ ãĥ¼ãĥ³ +Ø£ تÙĬ +à¸ķร ี +çͳ ãģĹ +تÙħ ر +ìĹ ĪìĬµëĭĪëĭ¤ +ĠÙĪ ØºÙĬر +red ni +ĠاÙĦص Ùģ +Ġна ÑģÑĤоÑı +ĠнаÑģÑĤоÑı Ñī +à¸ķ รา +ĠÑĥÑģл ов +ĠÑĥÑģлов иÑı +ÑĨ еп +×Ķ ×Ĺ׾×ĺ +Ø· ÙĬع +ĠB akan +ĠاÙĦ رÙĪ +илÑĮ но +Ġм еÑĤ +à¸Ķ à¸Ńà¸ģ +ãģĭãĤī ãģªãģĦ +Ġпо ÑģÑĤоÑı +ĠпоÑģÑĤоÑı н +ĠÑĩ аÑģ +ü c +wr ó +б ÑĥÑĢ +ãĥIJ ãĥĥãĤ¯ +ãĥ©ãĥ³ ãĥī +Ġо гÑĢ +สั à¸į +สัà¸į à¸įา +มั à¹Īà¸Ļ +à¸Ħ à¸Ńม +al ık +Ġн ед +üm üz +ĠÅĽ wie +é rio +×Ļ×IJ ×Ķ +دÙħ ات +ı rl +ĠоÑĤ з +ĠоÑĤз Ñĭв +ä»ĺ ãģį +Ġkaż de +мин иÑģÑĤ +ãĤ° ãĥ« +ë° ĸ +ез н +اÙĦ Ùģ +Ġש ק׾ +Ùħ ض +ãĥĿ ãĥ¼ãĥĪ +ÙħÙĨ ت +ÙĤÙĬ اÙħ +Ø´ ÙĨ +×Ļר ×ķ×¢ +ãĤŃãĥ£ ãĥ³ +доÑĢ Ð¾Ð² +×ŀ ×Ļת×Ļ +ÙĪÙĦ ÙĪØ¬ +Ùĥ اÙģ +ĠÑĢаз лиÑĩ +иÑĤ еÑĤ +н олог +ลà¸ĩ à¸Ĺุà¸Ļ +Ġyak laÅŁ +ãĥ¬ ãĤ¤ +ê²ł ëĭ¤ +æ±Ĥ ãĤģ +رÙĪ Ùģ +Ġí Ĭ +ĠíĬ ¹ +ãģ£ ãģıãĤĬ +à¸Ħวาม à¸Ħิà¸Ķ +×Ķ ×Ļס×ĺ +Ø¥ ÙĤ +ãģ¦ ãģĦ +à¹Ĥ à¸Ĭ +ĠBü yük +ĠФ едеÑĢ +ÑĨи н +ÑĢов а +ĠاÙĦ اÙĤتصاد +Ġch á +à¸ĺ าà¸Ļ +ë¥ ł +à¹Ħ à¸ķ +ÃŃ pio +Ùĭ ا +Ġоб Ñıз +Ùĩ ج +Ġì¤ij ìļĶ +ãģ® ãģ§ãģ¯ãģªãģĦ +بار اة +ãĤ¤ ãĥ« +Ġн оÑĢм +á»ī nh +m ö +mö glich +ÑĨи п +ãĤ¢ ãĤ¯ +×Ķ ×Ļ +ÑĨи алÑĮно +ĠÅĽ wi +ت ÙĤ +ĠÑģÑĤо им +بÙĬ عÙĬ +Ġ׾ ש×ŀ +г лÑı +глÑı д +ãģ¦ ãģıãĤĮ +ÄĻd zi +à¸Ĥ ั +à¸Ĥั à¹īà¸Ļ +Ø· ÙĤ +ĠìĹ Ń +ãģ£ãģ¦ãģĹãģ¾ ãģĨ +ĠdeÄŁer l +ĠdeÄŁerl endir +Ġü lk +Ġмн ог +๠ĭ +ë¿ IJ +ĠУ кÑĢа +ÄŁ ini +Ġбез оп +Ġбезоп аÑģ +à¸Ńà¸Ńà¸ģ à¹ģà¸ļà¸ļ +Ø§Ø ¸ +ØŃد اث +л еÑĢ +×Ļ× ¥ +×Ļ׳×ĺר ׳×ĺ +lar ınız +ØŃÙĬ ØŃ +ż eli +à¸Ń ัà¸ĩ +à¸Ńัà¸ĩ à¸ģ +à¸Ńัà¸ĩà¸ģ ฤษ +ĠоÑĤ лиÑĩ +ั ส +ëŀ į +ож но +ãĤ¹ ãĥĿ +ĠÑħ оÑĩ +Ġк ап +еÑĩ ен +ØŃÙĦ Ø© +ÙĬا Ùĩ +на л +×ķצ ר×Ļ×Ŀ +Ġk ald +åĥ į +ĠاÙĦØ´ خص +Ġз на +Ġwz gl +ż ycz +ê° Ŀ +à¸ŀ ลัà¸ĩ +íģ ¼ +Ġö l +Ġb ụ +Ø´ Ùĩر +Ġз ам +Ġд ев +×Ļ×ĺ ת +تعÙĦ ÙĤ +ÙĪÙħ Ø© +ãĤĴ ä½ľ +ãģį ãģ¦ +í ĥĿ +ras ında +ãĤĴ æİ¢ +ĠÙħ باشر +راج ع +Ġв озд +ÙħØŃ ا +×ķש ר +ĠиÑģÑĤ оÑĢ +ม ัà¸ģ +t ıģ +Ø« ار +تر ÙĨت +à¹ģà¸Ĥ à¹ĩ +à¹ģà¸Ĥà¹ĩ à¸ĩ +п оÑĩ +Ġ×ij ×IJ×ķת +ë¯ Ģ +ëĿ¼ ëıĦ +à¸Ĭ ัà¸Ķ +ส à¸ķà¹Į +ãĥĭ ãĥĥãĤ¯ +ид енÑĤ +Ġг ÑĢÑĥпп +ت Ø® +Ạł +ย ืà¸Ļ +ย ัà¸Ļ +ó ry +T Ãľ +ãģĹ ãĤĥ +ĠпÑĢов ед +лÑı еÑĤ +Ùħ Ø® +ย à¸Ńม +×Ľ×ł×¡ ת +ĠاÙĦÙħ ÙĨت +Ġol mad +ר׼ ×ĸ×Ļ +Ġв ÑģÑĤÑĢ +ĠиÑģ Ñģлед +ÑĤвеÑĢ Ð¶ +بد ÙĪ +еÑĢ ÑĤ +ï» · +± ħ +สัม à¸ŀัà¸Ļà¸ĺà¹Į +ิ à¹Īà¸Ļ +צ ×Ļ×ij +wiÄĻ t +Ġì° ¸ +Ġz wiÄħz +سب ÙĪØ¹ +ãĥĥ ãĤ° +à¸Ľà¸¥ à¸Ńà¸Ķ +à¸Ľà¸¥à¸Ńà¸Ķ à¸łà¸±à¸¢ +ãĤĤ ãĤĬ +ÙĤد س +Ġspr z +Ġsprz eda +Ġist edi +Ġk hu +Ġд ен +Ġko ÅĦ +Ġ×ij ×Ĺ×Ļ +à¹Ģà¸Ĺ à¹īา +×ķס ×Ļ×£ +ãĥĭ ãĥ¥ãĥ¼ +ĠпÑĢед оÑģÑĤ +ĠпÑĢедоÑģÑĤ ав +à¹Ĥ à¸Ł +é v +ĠاÙĦص ØŃ +صØŃ اب +à¹Ģà¸Ī à¹ĩà¸ļ +вл ек +วั à¸ķ +à¸ĸ ุ +ãģĵãģ¨ãģĮãģ§ãģį ãģ¾ãģĻ +ÙĤÙĬ ÙĤÙĬ +×ķ׊ר +Ñĭ ÑĪ +ĠоÑĤ но +ĠоÑĤно ÑĪ +об илÑĮ +Ùģ ØŃ +ı nt +ınt ı +Ġ׾ ×ij×ĵ +í İĺìĿ´ì§Ģ +ãĥĬ ãĥ« +ĠÙħ ساء +×Ļ×ĺ ×ij +ÑĮ еÑĢ +ëĦ · +Ñĭ ÑĤа +ĠоÑĩ еÑĢ +à¸Ķ ืà¹Ī +à¸Ķืà¹Ī ม +ĠN gh +ت عب +ÙĦاÙĤ ات +×ķ׾×ķ×Ĵ ×Ļ×Ķ +ĠìĿ´ ê²ĥ +Ġ×Ķ ×ijר +ìľ µ +à¹Ģà¸Ħล ืà¹Īà¸Ńà¸Ļ +Ùĩ Ø© +à¸Īำ à¹Ģà¸Ľà¹ĩà¸Ļ +å¤ī ãģĪ +wi ÅĽcie +ch od +chod zÄħ +в ÑĢо +×ŀ×Ĺ ×Ļר +Ġy ı +Ġyı ll +ì¡ Į +à¹Ħ หว +ãģªãģı ãģª +Ġзав иÑģ +ĠìĺĪ ìĪĺ +Ùģ Ø° +á»§ ng +à¸ŀุ à¸Ĺà¸ĺ +з н +lay an +ãĤ ¡ +à¸ģà¹ĩ à¸ķาม +ĠsaÄŁ lam +ร à¸ĵ +ĠÑģ иÑĤ +ĠÑģиÑĤ Ñĥ +ĠاÙĦت ÙĨ +×Ķ ×ĸ +ĠØ· ÙĪÙĬÙĦ +ta ÅĤ +Ġgö rd +å¤ī ãĤı +ëĥ ¥ +à¸Ħà¹Ī à¸Ńย +×IJ ×ķ×ĺ +ëħ IJ +ãĥ©ãĥ³ ãĤ¹ +วั à¸Ĵ +วัà¸Ĵ à¸Ļ +Ġol uÅŁ +פע ×ķ׾ +Ġszczeg óÅĤ +à¸Ħา สิ +à¸Ħาสิ à¹Ĥà¸Ļ +pow ied +ĠÑĤ еб +หà¸Ļ à¹Īวย +Ġм ил +ØŃ Ùĥ +à¸Ĺ à¸Ķ +ĠмаÑĤ еÑĢиал +ÅĤ ow +à¹Ģà¸ģ ีย +ĠÑģов еÑĢ +ãĤ © +à¸Ľ ริ +Ġи Ñİ +наÑĩ ен +ÑĢен д +mu ÅŁtur +ĠпÑĢод Ñĥк +з д +Ñı ÑĤи +ÑıÑĤи Ñı +à¹Ģม ีย +رات ÙĬج +Ġam acı +ש ×ķ׾ +ש×ķ׾ ×Ĺ +สะ à¸Ńา +สะà¸Ńา à¸Ķ +פ×Ĵ ×¢ +عب Ø© +d ın +íħ Ķ +Ġ×ŀש ×Ĺ×§ +Ġfi yat +Ġз аÑı +ĠзаÑı в +à¹Ĥ หล +à¹Ĥหล à¸Ķ +à¸ģรุà¸ĩ à¹Ģà¸Ĺà¸ŀ +צ×Ļ ×Ļף +ìļ ± +Ùħ ب +Ùħب اد +land ır +Ġв еÑģÑĮ +Ġh ük +ĠÐĴ оз +ÑĩиÑĤ Ñĭва +ว ล +×ķצ ×¢ +à¸Ĥà¸ĵะ à¸Ĺีà¹Ī +ĠaÅŁ aģı +׾×IJ ×ķ×ŀ×Ļ +tr zym +Ã¤ÃŁ ig +owo ÅĽci +ãģĿ ãĤĤ +Ġroz wiÄħz +ĠgÅĤ ówn +м онÑĤ +×ŀ ×ķ×ŀ +ĠÑģÑĤ ан +ÙĦا ÙĤØ© +p rowad +prowad zi +ĠÑģоÑģÑĤ оÑı +×Ļ×IJ ×ķת +r ı +g ı +ãĥij ãĥij +Ġна лиÑĩ +×Ķ ×¦×¢ +Ġ׳ ×Ķ +à¸Ħ ัà¸ļ +ع راض +и ж +Ùĩ ائÙĬ +ãĤī ãģı +ож еÑĤ +Ġоб оÑĢ +ĠобоÑĢ Ñĥд +Ø£ سÙĦ +à¹ĩ à¸Ķ +ÑĢÑĥ ÑĤ +دÙĬ ÙħÙĤ +دÙĬÙħÙĤ را +Ġjest e +×ķ×ķ ×Ļר +×ij×ĵ ×Ļ×§ +деÑĢж ива +ãģĬ ãģı +ewn ÄĻtr +ewnÄĻtr zn +à¸ŀ ฤ +Ġ×IJ ×ķ×Ķ +ת×Ĺ ×ķש +Ġz ob +д Ñĥм +ĠÑģ Ñĭ +ÙĬر ا +ĠwiÄĻ ks +à¹ģà¸ķà¸ģ à¸ķà¹Īาà¸ĩ +lar aras +lararas ı +íĺ Ģ +ëī ´ +×ķ×Ĵ ׾ +ĠоÑĤ меÑĤ +ĠÑĢ Ð°Ð½ +ت ÙĥÙĦ +иÑĤелÑĮ н +à¸Ľà¸£à¸° วั +à¸Ľà¸£à¸°à¸§à¸± à¸ķิ +ìŀ ĸ +мож но +pie czeÅĦ +pieczeÅĦ st +ëª » +ìĬ ¨ +×ŀס ×ŀ +á» ¦ +ศ ิ +ศิ ล +ศิล à¸Ľ +ĠÅļ w +ãĥĥ ãĤ·ãĥ§ãĥ³ +unit Ãł +Ġmiesz ka +Ġmieszka ÅĦ +pr zed +przed si +przedsi ÄĻb +przedsiÄĻb ior +à¸Ľà¸£à¸° สิà¸Ĺà¸ĺิ +à¸Ľà¸£à¸°à¸ªà¸´à¸Ĺà¸ĺิ à¸łà¸²à¸ŀ +ย à¹Ī +ìķ Ļ +รว à¸Ķ +รวà¸Ķ à¹Ģรà¹ĩว +å½ĵ ãģŁãĤĬ +äl le +Ñĥ еÑĤÑģÑı +ã n +ëł µ +th è +ãĤĴ åĪ©ç͍ +ì µľ +íĵ ¨ +à¸Ĺ ัà¸ļ +า à¸Ħม +ãģ ĩ +ëĤ Į +à¹Ģà¸Ľà¸¥ à¹Īา +â ¦ +ë ¾ +ê Ģ +ê ĩ +â ¡ +ðŁ Ł +ã IJ +â º +á Ń +á Ļ +á ĵ +á ² +ðĵ ı +á ¬ +â ¯ +ä ¨ +ê Ŀ +ê « +ð ij +ðĵ ĥ +ðĿ ħ +< unk + + + +Ġع ÙĦÙī +Ġm á»Ļt +Ġv Ỽi +Ġng ưá»Ŀi +ĠØ¥ ÙĦÙī +Ġnh ững +Ġth á»ĥ +Ġ×IJ ×ķ +Ġ×¢ ×Ŀ +ا Ùĭ +Ġ à¹ģละ +ĠÙĦ ا +Ġnh ư +ĠاÙĦت ÙĬ +Ġ×Ķ ×ķ×IJ +ĠÄij ến +ĠØ£ ÙĪ +Ġv á»ģ +ĠlÃł m +Ġs ẽ +Ġc Å©ng +Ġ ợ +ĠÄij ó +Ġnhi á»ģu +Ġt ại +Ġtr ên +Ġ×Ĵ ×Ŀ +Ġnh Ãł +Ġ׼ ×Ļ +Ġs á»± +ĠÄij ầu +Ġb á»ĭ +ĠÙĩ ذا +Ġnh ất +Ġph ải +Ġhi á»ĩn +Ġdụ ng +ĠÄij á»Ļng +ĠاÙĦÙĦ Ùĩ +ĠØ Į +ĠÙĥ ÙĦ +Ġvi á»ĩc +Ġn Äĥm +Ġth ì +Ġh á»įc +ĠÙĪ Øª +t é +Ġا ÙĨ +Ġt ôi +Ġ×IJ ׳×Ļ +Ġ׾ ×Ļ +Ġ×ŀ ×ķ +Ġng Ãły +Ġn Æ°á»Ľc +Ġ×Ķ ×Ļ×IJ +Ġ×IJ ×Ļ +Ġh Æ¡n +ĠÙĩ ذÙĩ +ĠÙĪ ÙĬ +ĠاÙĦ ذÙĬ +Ġ×ķ ×ŀ +Ġgi á +Ġnh ân +Ġch ÃŃnh +Ġm ình +ĠÐĿ а +Ġth ế +Ġ×Ļ ×ķתר +Ġ×IJ ×Ŀ +Ġn ên +Ġh ợ +Ġhợ p +Ġc òn +ĠÙĩ ÙĪ +Ġc Æ¡ +Ġr ất +ĠVi á»ĩt +Ġب عد +Ġש ×Ļ +Ġth á»Ŀi +Ġc ách +ĠÄij á»ĵng +Ġн о +Ġtr ưá»Ŀng +Ø Ł +ĠÄij á»ĭnh +ĠÄiji á»ģu +×Ļ ×Ļ×Ŀ +Ġth á»±c +n ın +Ġh ình +Ġn ói +Ġc ùng +Ġ×Ķ ×Ķ +ĠØ¥ ÙĨ +Ġ×IJ ×ij׾ +Ġnh ưng +Ġbi ết +Ġж е +Ġch úng +ĠÄij ang +Ġذ ÙĦÙĥ +Ġl ên +Ġkh ách +Ġn Ãło +Ġs á»Ń +Ġkh ác +Ġë° ı +Ġl ý +×Ļ ×Ļ +ĠÄij ây +Ġ׾ ×ŀ +Ġc ần +Ġtr ình +Ġph át +ãģ« ãĤĤ +п о +Ġn Äĥng +Ġb á»Ļ +Ġv ụ +ĠÄij á»Ļ +Ñĩ е +Ġnh áºŃn +Ġtr Æ°á»Ľc +Ġ×¢ ×ĵ +Ġh Ãłnh +ĠØ® ÙĦاÙĦ +Ġl ượng +Ġc ấp +Ġtá» ± +Ġv ì +Ġt ư +Ġch ất +Ġ׼ ×ŀ×ķ +Ġg ì +Ġש ׳ +Ġt ế +ת ×ķ +Ġnghi á»ĩp +Ġm ặt +ĠÙĥ Ùħا +Ġ×ij ×Ļף +Ġר ×§ +Ġth ấy +Ġmá y +ĠÙģ Ùī +Ġd ân +Ġ×IJ ×Ĺ×ĵ +Ġt âm +Ġ׼ ×ļ +Ġ׾ ×ķ +в о +Ġt ác +Ġto Ãłn +ĠÙĪ Ùħ +Ġk ết +Ġ หรืà¸Ń +ĠÙĪØ§ÙĦ Ùħ +ĠÄiji á»ĥm +Ġ×ĸ ×ķ +Ġ×ij ×ķ +׼ ×ķת +Ġh á»Ļi +Ġb ằng +ت Ùĩا +Ġ׼ ×ĵ×Ļ +Ġ×Ķ ×Ŀ +Ġxu ất +ĠÙĤ د +Ġb ảo +Ġt á»ijt +Ġt ình +ĠÙĩ ÙĬ +ĠÄij á»iji +Ġthi ết +Ġhi á»ĩu +Ġti ếp +Ġt ạo +ת ×Ķ +Ġch á»§ +o ÅĽÄĩ +Ġgi ú +Ġgiú p +Ġà ½ +Ġqu ả +Ġlo ại +Ġc ô +Ġà ´ +Ġô ng +Ġ×Ķ ×ķ +ĠاÙĦÙĬ ÙĪÙħ +ĠtÃŃ nh +г а +Ġph òng +Ġ Äĥn +Ġع اÙħ +Ġv á»ĭ +lar ını +r ÃŃa +Ġt Ỽi +ĠÄij ưá»Ŀng +Ġgi Ỽi +Ġb ản +Ġc ầu +Ġnhi ên +Ġb á»ĩnh +Ġth ưá»Ŀng +Ġ×IJ ×Ļף +ĠÄij á»ģ +Ġh á»ĩ +Ġ×Ļש ר×IJ׾ +Ġqu á +ĠÐĹ Ð° +ãģ® ãģ§ãģĻãģĮ +ĠÐŁ ÑĢи +Ġph ần +ĠÙĪ ÙĦا +ĠlỼ n +Ġtr á»ĭ +Ġcả m +Ġм о +Ġd ùng +ĠاÙĦ Ùī +ĠعÙĦÙĬ Ùĩ +ĠìŀĪ ìĬµëĭĪëĭ¤ +ÙĬ ÙĤ +ĠÙĤ بÙĦ +Ġho ặc +ĠØŃ ÙĬØ« +Ġ à¸Ĺีà¹Ī +Ġغ ÙĬر +ĠÄij ại +Ġsá»ij ng +нÑĭ ми +Ġth ức +Ġפ ×Ļ +ĠÄiji á»ĩn +ãģª ãģĭãģ£ãģŁ +Ġgi ải +Ġv ẫn +Ġи Ñħ +Ġö nce +Ġv áºŃy +Ġmu á»ijn +Ġ ảnh +à¹ĥà¸Ļ à¸ģาร +ĠQu á»ijc +Ġk ế +׳ ×IJ +Ġס ×Ļ +Ġy êu +ãģ® ãģĭ +ĠÄij ẹ +ĠÄijẹ p +Ġch ức +Ġy ıl +ĠTür kiye +d é +ĠÙĤ اÙĦ +Ġd á»ĭch +ĠolduÄŁ u +Ġch á»įn +Ġت Ùħ +หà¸Ļ ึà¹Īà¸ĩ +ãģķãĤĮ ãģŁ +Ġph áp +ìĽ Ķ +Ġti á»ģn +ãģĹ ãģ¾ãģĹãģŁ +Ġש ׾×IJ +ÙĦ Ø© +Ġ׾פ ׳×Ļ +Ġ×ij ×Ļת +ĠH Ãł +ĠØŃ ت +ĠØŃت Ùī +Ġ×¢ ×ķ×ĵ +Ġn ó +Ġth áng +à¹Ģลืà¸Ń à¸ģ +ר ×Ķ +Ġt Äĥng +Ġcá i +Ġtri á»ĥn +Ġ×IJ×ķת ×ķ +ìłģ ìĿ¸ +ĠC ông +Ġ׾×Ķ ×Ļ×ķת +Ġг ода +и Ñİ +Ġب عض +Ġ à¸ģาร +èī¯ ãģĦ +ÙĪ Øª +Ġli ên +ĠÐĿ о +ĠÐĿ е +çļĦ ãģª +ĠÙħ ت +ĠÑĤак же +ĠкоÑĤоÑĢ Ñĭе +Ġ×Ļ ×ĵ×Ļ +Ġtr á»įng +ãĤµ ãĤ¤ãĥĪ +ìłģ ìľ¼ë¡ľ +Ġt áºŃp +Ġש ׾×Ļ +íķĺ ê²Į +Ġt Ãłi +ĠÐ ¯ +Ġr á»ĵi +ا Ùĥ +Ġth ương +Ġ×Ķ ×ĸ×Ķ +ĠÙĪ ÙħÙĨ +à¸Ĺีà¹Ī มี +Ġcu á»Ļc +Ġbü yük +ãģ¨ ãģĭ +Ġ×ij ×Ļ×ķתר +Ġl ần +Ġgö re +Ġtr ợ +Ġ×ĺ ×ķ×ij +ÑĤÑĮ ÑģÑı +Ġth á»ijng +Ġ׼ ש +Ġti êu +Ġ×ŀ×IJ ×ķ×ĵ +Ø Ľ +k Äħ +Ġ à¹ĥà¸Ļ +Ġv ấn +Ġש ׾×ķ +ĠÄij á»ģu +Ùģ Øª +Ġê²ĥ ìĿ´ +Ġh óa +ĠاÙĦع اÙħ +ĠÙĬ ÙĪÙħ +к ой +Ġbi á»ĩt +ÑģÑĤ о +Ġ×Ķ ×Ļ×ķ +à¸Ĺีà¹Ī à¸Īะ +Ġ×ĵ ×Ļ +Ġ×IJ ×ļ +Ġá n +ص ÙĪØ± +Ġtr ÃŃ +ĠÐŁÑĢ Ð¾ +Ġl á»±c +ãģĹãģ¦ ãģĦãģ¾ãģĻ +Ġb Ãłi +Ġ×ĸ ×IJת +Ġb áo +à¸ļ à¸Ļ +ĠëĮĢ íķľ +Ġti ế +Ġtiế ng +Ġb ên +ãģķãĤĮ ãĤĭ +s ión +Ġt ìm +×¢ ×ķ +m é +ни Ñı +ãģ» ãģ© +Ġà¹Ģà¸ŀ ราะ +ب Ø© +Ġë¶ Ħ +Ġ×IJ ×ĸ +à¸Ĺ à¹Īาà¸Ļ +ת ×Ŀ +Ġth êm +Ġho ạt +y ı +×ĸ ×ķ +Ġgi á»Ŀ +Ġb án +à¸Ĥ าย +Ñĩ а +Ġ à¹Ĩ +ĠاÙĦÙħ ت +ĠоÑĩ енÑĮ +Ġb ất +Ġtr ẻ +ÑĤ ÑĢ +ĠØ£ ÙĨÙĩ +ĠØ« Ùħ +Ġ׼ ×ŀ×Ķ +Ġkh ó +Ġr ằng +ĠÙĪ ÙģÙĬ +ни й +Ġho Ãłn +t ó +Ġ×IJ שר +ĠìĥĿ ê°ģ +Ñģ а +Ġ׼ ×ijר +ĠÑįÑĤ ом +lar ının +Ġch ưa +з и +Ġd ẫn +ĠÐļ ак +ج ÙĪ +ĠбÑĭ ло +ĠÙĬ ت +n ı +ÅĤ am +ĠÙĪÙĩ ÙĪ +×ij ×ķ +п и +ר ת +Ġqu á»ijc +ж д +ĠÄij Æ¡n +Ùĥت ب +Ġm ắt +ระ à¸ļ +ระà¸ļ à¸ļ +ĠÙĥ اÙĨت +Ġth ân +สิà¸Ļ à¸Ħà¹īา +×Ĵ ×Ļ +Ġph ương +à¹Ħมà¹Ī à¹Ħà¸Ķà¹ī +ĠìĦ ± +ĠC ác +Ġ×Ķ×ŀ ×ķ +ĠÑĤ ем +Ġ×ĵ ×ķ +à¸Ńะ à¹Ħร +Ġv Äĥn +ãģª ãģ®ãģ§ +ĠN á»Ļi +Ġ×¢ ×ķ +ãĤīãĤĮ ãĤĭ +Ġs áng +Ġgö ster +ãģĵãģ¨ ãĤĴ +Ġtaraf ından +Ġм а +ĠпоÑģл е +Ġ׳ ×Ļת +Ġ׳×Ļת ף +Ġл еÑĤ +Ġ׾ ׳×ķ +Ñģ Ñģ +Ġ×Ļ ×ķ +п е +ĠÙĪ ÙĦÙĥ +ĠÙĪÙĦÙĥ ÙĨ +Ġngo Ãłi +ĠÄij á»ĭa +r zÄħd +dz iaÅĤ +ĠÙħ ر +иÑĤÑĮ ÑģÑı +Ġ×IJ×Ĺר ×Ļ +Ġ׾ ׼׾ +à¸Ĥ à¹īà¸Ńม +à¸Ĥà¹īà¸Ńม ูล +Ġб ол +Ġбол ее +جÙħ ع +л еÑĤ +Ġl á»ĭch +ĠÙħ Ø«ÙĦ +Ġ그리 ê³ł +Ġth ứ +ĠdeÄŁ il +ÙĪ ØŃ +Ġש׾ ×ļ +ĠÙħ ØŃÙħد +Ġn ếu +ĠÄij á»ķi +Ġv ừa +Ġm á»įi +Ġо ни +Ġl úc +ĠÙĬ ÙĥÙĪÙĨ +ì§ Ī +Ġש׾ ׳×ķ +ĠÐĶ Ð¾ +Ġש ׳×Ļ +ล ิ +×IJ פשר +Ġs ức +ê¶ Į +Ġ ứng +à¹Ħมà¹Ī มี +Ø·ÙĦ ب +ĠÑĩ ем +Ġch uyên +Ġth ÃŃch +Ġ×ķ ×Ļ +íķ © +ĠÙħ صر +д о +ĠÄij ất +Ġch ế +à¸Ĭ ืà¹Īà¸Ń +Ġìĭ ł +ĠØ¥ ذا +Ġر ئÙĬس +Ġש ×Ļש +Ġgiả m +Ñģ ка +lar ında +Ġs ợ +ĠtÃŃ ch +ĠÙĦ ÙĥÙĨ +Ġب Ùħ +×¢ ×ķ×ij +×¢×ķ×ij ×ĵ +ÅĤÄħ cz +ları na +Ġש ×Ŀ +ĠÙĦ ت +Ġש×Ķ ×ķ×IJ +t ów +Ġëĭ¤ 른 +ĠØ£ Ùĥثر +ãģ® ãģ§ãģĻ +׼ ×Ļ×Ŀ +ĠolduÄŁ unu +ãģĭ ãģª +ãĤĤ ãģĨ +ÙĬ ØŃ +Ġnh ìn +Ġngh á»ĩ +ãģ«ãģª ãģ£ãģ¦ +п а +Ġquy ết +ÙĦ ÙĤ +t á +Ġlu ôn +ĠÄij ặc +Ġ×IJ ר +Ġtu á»ķi +s ão +ìĻ ¸ +ر د +ĠبÙĩ ا +Ġ×Ķ×Ļ ×ķ×Ŀ +×ķ ×ķ×Ļ +ãģ§ãģĻ ãģŃ +ĠÑĤ ого +Ġth á»§ +ãģĹãģŁ ãģĦ +ر ÙĤ +Ġb ắt +г Ñĥ +Ġtá» Ń +ÑĪ Ð° +Ġ à¸Ľà¸µ +Ġ×Ķ×IJ ×Ŀ +íı ¬ +ż a +Ġ×IJת ×Ķ +Ġn á»Ļi +Ġph ÃŃ +ĠÅŁek ilde +Ġl á»Ŀi +d ıģı +Ġ׼×IJ ף +Ġt üm +Ġm ạnh +ĠM ỹ +ãģĿ ãĤĵãģª +Ġnh á»ı +ãģª ãģĮãĤī +Ġb ình +ı p +à¸ŀ า +ĠÄij ánh +ĠÙĪ ÙĦ +ר ×ķת +Ġ×IJ ×Ļ×ļ +Ġch uyá»ĥn +Ùĥ ا +ãĤĮ ãĤĭ +à¹ģม à¹Ī +ãĤĪ ãģı +ĠÙĪ ÙĤد +íĸ Īëĭ¤ +Ġn Æ¡i +ãģ«ãĤĪ ãģ£ãģ¦ +Ġvi ết +Ġà¹Ģà¸ŀ ืà¹Īà¸Ń +ëIJĺ ëĬĶ +اد ÙĬ +ĠÙģ Ø¥ÙĨ +ì¦ Ŀ +ĠÄij ặt +Ġh Æ°á»Ľng +Ġx ã +Ġönem li +ãģł ãģ¨ +Ġm ẹ +Ġ×ij ×Ļ +Ġ×ĵ ×ijר +Ġv áºŃt +ĠÄij ạo +Ġdá»± ng +ĠÑĤ ом +ĠÙģÙĬ Ùĩا +Ġج ÙħÙĬع +Ġthu áºŃt +st ÄĻp +Ġti ết +Ø´ ÙĬ +Ġе Ñīе +ãģĻãĤĭ ãģ¨ +ĠmÃł u +ĠÑįÑĤ ого +Ġv ô +ĠÐŃ ÑĤо +Ġth áºŃt +Ġn ữa +Ġbi ến +Ġn ữ +Ġ׾ ׼×Ŀ +×Ļ ×Ļף +Ġس ت +ĠÐŀ ÑĤ +Ġph ụ +ê¹Į ì§Ģ +Ġ׾ ×ļ +Ġk ỳ +à¹ĥ à¸Ħร +Ġg ây +ĠÙĦ ÙĦÙħ +Ġtụ c +ت ÙĬÙĨ +Ġtr ợ +Ġ׾ פ×Ļ +Ġb á»ij +ĠÐļ а +ĠÄij ình +ow Äħ +s ında +Ġkhi ến +s ız +Ġк огда +ס ׾ +ĠбÑĭ л +à¸Ļ à¹īà¸Ńย +обÑĢаР· +Ġê²ĥ ìĿ´ëĭ¤ +ëĵ¤ ìĿĢ +ãģ¸ ãģ® +Ġà¹Ģม ืà¹Īà¸Ń +Ġph ục +Ġ׊׾ק +Ġh ết +ĠÄij a +à¹Ģà¸Ķà¹ĩ à¸ģ +íĺ ķ +l ÃŃ +ê¸ ī +Ġع دد +ĠÄij á»ĵ +Ġg ần +Ġ×Ļ ×ķ×Ŀ +Ġs Ä© +ÑĢ Ñıд +Ġquy á»ģn +Ġ×IJ ׾×IJ +Ùĩ Ùħا +׳ ×Ļ×Ķ +׾ ×ķת +Ġ×Ķר ×ij×Ķ +Ġti ên +Ġal ın +Ġd á»ħ +人 ãģĮ +но Ñģ +л ÑģÑı +ĠÄij ưa +ส าว +иÑĢов ан +Ġ×ŀס פר +×Ĵ ף +Ġki ến +ĠÐ ¨ +p é +б Ñĥ +ов ой +б а +ĠØ¥ ÙĦا +×IJ ׾×Ļ +Ġx ây +Ġb ợi +Ġש ×ķ +人 ãģ® +×§ ×Ļ×Ŀ +à¹Ģà¸Ķ ืà¸Ńà¸Ļ +Ġkh á +Ġ×ķ ׾×Ķ +×ĵ ×ķת +Ġ×¢ ×ij×ķר +Ġبش ÙĥÙĦ +ĠÙĩÙĨا Ùĥ +ÑĤ ÑĢа +Ġ íķĺëĬĶ +ร à¸Ńà¸ļ +owa ÅĤ +h é +Ġdi á»ħn +Ġ×Ķ ×Ľ×ľ +ĠØ£ س +Ġch uyá»ĩn +ระ à¸Ķัà¸ļ +ĠNh ững +Ġ×IJ ×Ĺת +ĠØŃ ÙĪÙĦ +л ов +׳ ר +Ġ×ķ ׳ +Ġch Æ¡i +Ġiç inde +ÑģÑĤв Ñĥ +Ġph á»ij +ĠÑģ Ñĥ +ç§ģ ãģ¯ +Ġch ứng +Ġv á»±c +à¹ģ à¸Ń +Ġl áºŃp +Ġtừ ng +å°ij ãģĹ +ĠNg uy +ĠNguy á»ħn +ĠÙģÙĬ Ùĩ +Ġб а +×Ļ ×Ļת +Ġ×ľ×¢ ש×ķת +Ġ×ŀ ׼ +Ġnghi á»ĩm +Ġм ного +Ġе е +ëIJĺ ìĸ´ +Ġl ợi +Ġ׾ ׾×IJ +Ġ׼ ף +Ġch ÃŃ +ãģ§ ãģ® +×Ĺ ×ķ +ש ×ķ×Ŀ +Ġ×ŀ ר +ĠÐĶ Ð»Ñı +Å ģ +Ġ׼×IJ שר +ĠM á»Ļt +ĠÙĪØ§ÙĦ ت +ĠìĿ´ 룰 +ÅŁ a +Ġchi ến +Ġaras ında +Ġ×ij ×IJתר +ãģķãĤĮ ãģ¦ãģĦãĤĭ +Ø´ ÙĥÙĦ +Ġt ượng +Ġت ت +ĠC ó +Ġb á»ı +Ġtá»ī nh +Ġkh ÃŃ +ĠпÑĢ Ð¾ÑģÑĤ +ĠпÑĢоÑģÑĤ о +ĠÙĪ ÙĤاÙĦ +Ġgi áo +ĠN ếu +×IJ ×ŀר +×¢×ł×Ļ ×Ļף +íİ ¸ +Ùĩد Ùģ +ĠB á»Ļ +Ġb Ãłn +Ġng uyên +Ġgü zel +ส าย +ì² ľ +×ŀ ×ķר +Ġph ân +ס פק +×§ ×ij׾ +ĠاÙĦÙħ تØŃ +ĠاÙĦÙħتØŃ دة +ائ د +Ġ×IJ ×ŀר +Ġki ÅŁi +ì¤ Ģ +Ġtr uyá»ģn +ĠÙĦ Ùĩا +ĠÐľ а +à¸ļริ ษ +à¸ļริษ ั +à¸ļริษั à¸Ĺ +Ġש ׳×Ļ×Ŀ +Ġмен Ñı +ÅŁ e +Ġdi á»ĩn +Ġ×IJ׳ ×Ĺ׳×ķ +k ü +Ġc á»ķ +Ġm á»Ĺi +w ä +Ùħ ÙĬ +Ġhi á»ĥu +ëĭ ¬ +Ġ×Ķ ×Ĺ׾ +Ġt ên +Ġki á»ĩn +ÙĨ ÙĤÙĦ +Ġv á»ĩ +×ĵ ת +ĠÐłÐ¾ÑģÑģ ии +л Ñĥ +ĠاÙĦع ربÙĬØ© +ĠØ· رÙĬÙĤ +Ġ×Ķ×ij ×Ļת +Ñģ еÑĢ +Ġм не +ä u +Ġtri á»ĩu +ĠÄij á»§ +Ġר ×ij +ت ÙĩÙħ +à¸ĭ ี +Ġì§Ģ ê¸Ī +li ÅĽmy +د عÙħ +ãģł ãĤįãģĨ +Ñģки е +Ġh á»ıi +Ġ×§ ×ķ +ÑĢÑĥ Ñģ +ÙĨ ظر +ãģ® ãĤĤ +Ġ×Ķ ×Ľ×Ļ +ĠìĽ IJ +ÙĪ Ùĩ +ĠÙĪ Ùİ +ĠB ạn +п лаÑĤ +Ġ×ŀ ×ŀש +лÑİ Ð± +ĠнÑĥж но +Ġth ư +ãģ µ +ãģı ãĤīãģĦ +ر Ø´ +ר ×ķ×Ĺ +ĠÙĬ تÙħ +Ġצר ×Ļ×ļ +Ġph á +ม à¸Ńà¸ĩ +Ġ×ij×IJ ×ķפף +Ġcả nh +Ġíķľ ëĭ¤ +Ġ×Ķ×ŀ ת +à¸ķà¹Īาà¸ĩ à¹Ĩ +มี à¸ģาร +Ñģки Ñħ +ĠÐĴ Ñģе +Ġا ÙĪ +ج ÙĬ +ãģĵãģ¨ ãģ¯ +Ġd Ãłi +Ġh á»ĵ +èĩªåĪĨ ãģ® +à¹Ħ หà¸Ļ +ëĵ¤ ìĿĦ +ĠV Äĥn +Ġд аж +Ġдаж е +Ñĭ ми +лаÑģ ÑĮ +ÙĬ ÙĪÙĨ +ÙĨ ÙĪ +c ó +ãģĹãģ¦ ãģĦãģŁ +ãģł ãģĭãĤī +طاÙĦ ب +Ġc á»Ńa +п ÑĢоÑģ +ãģªãģ© ãģ® +รุ à¹Īà¸Ļ +Ġchi ếc +л Ñĭ +ĠÑıвлÑı еÑĤÑģÑı +Ġn á»ķi +ãģ® ãģĬ +Ġ×IJת ×Ŀ +ĠëķĮ문 ìĹIJ +à¸ģล าà¸ĩ +ĠbaÅŁ ka +ìĦ Ŀ +ĠÑĨ ел +Ùģ ÙĤ +ãģ«ãĤĪ ãĤĭ +ÙĤ ا +Ġçı kar +Ġcứ u +Ø· ا +Ġש ת +à¹Ĥ à¸Ħ +Ġ×ŀ ׾ +Ġ×Ķ ×¤×¨ +Ġг де +ĠØ® Ø· +åīį ãģ« +c jÄĻ +Ġ׊ש×ķ×ij +ר×Ĵ ×¢ +Ġkho ảng +ĠÄij á»Ŀi +ĠÐł е +Ġо на +Ġ×IJ ׳×ķ +ãģ® ãģ« +ĠاÙĦذ ÙĬÙĨ +кÑĥ п +ãĤµ ãĥ¼ãĥ +ãĤµãĥ¼ãĥ ĵ +ãĤµãĥ¼ãĥĵ ãĤ¹ +в ал +г е +Ġgi ữa +ĠKh ông +ĠâĹ ĭ +à¸ģล ุà¹Īม +ĠÙħÙĨ ذ +à¸Ń à¹Īาà¸Ļ +ĠÑģп оÑģоб +ĠÄij á»Ļi +Ġdi ÄŁer +Ġ à¸ĸà¹īา +Ùħ Ø«ÙĦ +Ġ×Ķ×IJ ×Ļ +Ġد ÙĪÙĨ +ÙĬر اÙĨ +Ñī и +بÙĨ اء +ĠØ¢ خر +ظ Ùĩر +Ġ×ij ׼ +ĠاÙĦÙħ ع +ãĥ Ĵ +Ġt ất +Ġm ục +ĠdoÄŁ ru +ãģŁ ãĤī +Ġס ×ķ +Ġx ác +ร à¸Ń +ĠcÄĥ n +Ġон л +Ġонл айн +Ġk ý +Ġch ân +Ġ à¹Ħมà¹Ī +اØŃ Ø© +r án +׳×Ļ ×Ļ×Ŀ +Ġ×ij ף +ĠÐ ĸ +à¸ķร à¸ĩ +д Ñĭ +Ġs ắc +ÙĦ ت +ãĥŃ ãĥ¼ +ĠÙĦ ÙĨ +Ġר ×ķ +Ġd Æ°á»Ľi +à¹Ģ à¸ĺ +à¹Ģà¸ĺ à¸Ń +e ÄŁi +Ġ×ķ ש +ĠÙĦ Ø£ +Ġg ặp +Ġc á»ij +ãģ¨ ãģ¦ãĤĤ +رÙĪ Ø³ +Ġ׾×Ķ ×Ļ +Ġë³ ¸ +ä¸Ĭ ãģĴ +Ġm ức +Ñħ а +Ġìŀ ¬ +à¸ī ัà¸Ļ +ÑĢÑĥ ж +Ġaç ık +ÙĪ Ø§ÙĦ +Ġ×ĸ ×ŀף +人 ãģ¯ +ع ÙĬÙĨ +Ñı Ñħ +Ġ×Ĵ×ĵ ×ķ׾ +ר ×ķ×ij +g ó +ëĿ¼ ê³ł +Ġark adaÅŁ +ÙĨ شر +Ġгод Ñĥ +ĠболÑĮ ÑĪе +ãģ¡ãĤĩ ãģ£ãģ¨ +Ġcâ u +Ġs át +íĶ ¼ +Ġti ến +íķ´ ìķ¼ +ĠÙĪ Ø£ÙĨ +à¸Ļ าà¸Ļ +Ġ×ij×IJ×ŀ צע +Ġ×ij×IJ×ŀצע ×ķת +Ġ׾ ר +Ġqu ản +ĠÙĪØ§ÙĦ Ø£ +Ġ×IJ×ķת ×Ķ +Ġìĸ´ëĸ ¤ +Ġê²ĥ ìĿĢ +ØŃس ÙĨ +Ġm ất +à¸Ħ ูà¹Ī +ãĥ¬ ãĥ¼ +ĠÐĶ Ð° +Ġol ması +Ġthu á»Ļc +׳ ×Ĺ +íĨ ł +Ġsö yle +ãģĿãģĨ ãģ§ãģĻ +Ġت ÙĥÙĪÙĨ +л ÑĥÑĩ +׾ ×Ļ×ļ +ĠØ£ ØŃد +ли ÑģÑĮ +ĠвÑģ его +Ġ×Ķר ×ij +Ġëª » +o ÄŁ +oÄŁ lu +ĠìĦ ł +Ġк аÑĢ +à¸łà¸² à¸Ħ +e ÅĦ +Ġ à¸ģà¹ĩ +Ġa ynı +Ġb Ãł +ãģªãĤĵ ãģ¦ +Ġ모 ëĵł +ÙĤر ار +ãģĹãģª ãģĦ +ĠÐĴ о +ĠÙĪÙĩ ÙĬ +ни ки +ãĤĮ ãģŁ +Ġchu ẩn +ר ×¢ +Ùģ Ø±ÙĬÙĤ +ãĤĴ åıĹãģij +ĠÄij úng +б е +׼ ×ķ×Ĺ +п Ñĥ +Ġ×ķ ×Ĵ×Ŀ +×ŀ ׳×Ļ +íĸ ¥ +צ ×Ļ×Ŀ +à¸ĭ ิ +Ùĩ ÙĨ +н ем +Ġ×ij×ij ×Ļת +ر ع +Ġ ส +ĠÄIJ Ãł +íķĺ ëĭ¤ +Ġ ấy +×Ĺ ×ķ×ĵ +×Ĺ×ķ×ĵ ש +ĠÑĩеÑĢ ÐµÐ· +Ñĥ л +ĠB ình +Ġê²ĥ ìĿĦ +Ġ×Ĵ ר +ä»ĺ ãģij +×Ĺ׾ ×§ +Ġت ÙĦÙĥ +à¹ĥส à¹Ī +sz Äħ +ÙĤ اÙħ +د ÙĪØ± +ĠÙģ ÙĤØ· +Ġh ữu +Ġмог ÑĥÑĤ +Ġg á»įi +Ġ×§ ר +à¸Īะ มี +ت ÙĤدÙħ +Ġع بر +Ġ׾×Ķ ×Ŀ +ĠÑģам о +ס ×ĵר +Ġc Ãłng +r ÃŃ +Ġìŀ ¥ +ëĵ¤ ìĿĺ +ĠÙĦ Ùĥ +п оÑĢÑĤ +Ġkh ả +ĠÑģеб Ñı +׳ ף +Ġد ÙĪØ± +Ġm ợ +Ġcâ y +Ġf ark +Ġfark lı +а ÑİÑĤ +Ġtr á»±c +wiÄĻks z +Ġthu á»ijc +Ġت ØŃت +ت ÙĦ +ов Ñĭе +ëĤ ł +Ġв ам +بÙĦ غ +Ġê°Ļ ìĿĢ +íĮ IJ +ÙĦ ب +Ġnas ıl +Ġод ин +м ан +ĠعÙĦÙĬ Ùĩا +б и +Ġפ ש×ķ×ĺ +×ijר ×Ļ +Ġש ׳×Ķ +Ġëı Ħ +ĠÄIJ ại +Ġ×IJ×ķת ×Ŀ +ĠاÙĦØŃ ر +Ġб о +à¸Ī ุà¸Ķ +Ġr õ +ĠdeÄŁi ÅŁ +Ġëĭ ¨ +ĠÑģлÑĥÑĩ а +ĠÑģлÑĥÑĩа е +Ġ×IJ׳ ש×Ļ×Ŀ +×ĵ ×£ +ש×ij ת +Ġש׾ ׼×Ŀ +Ġch ú +nik ów +Ġtan ı +Ġcá o +ĠÄij á +Ġ×IJ ×ĵ×Ŀ +Ġê° ķ +Ġnhi á»ĩm +Ġ׾ ס +Ġ×Ľ×ª ×ij +Ġ×Ķס פר +ĠÄij Äĥng +Ġë ijIJ +à¸ľ ิ +à¸ľà¸´ ว +ج ا +Ġê° IJ +ر Ø£ +ست خدÙħ +ãģ«ãģªãĤĬ ãģ¾ãģĻ +Ġtá» · +×ĺ ×ķר +г овоÑĢ +Ġв оÑģ +ĠÙħÙĨ Ùĩا +иÑĢов аÑĤÑĮ +ĠÄij ầy +׳ ×Ĵ +ĠÙħ ÙĪ +ĠÙħ ÙĪÙĤع +ר׼ ×Ļ +ت Ùı +ëª ¨ +Ġת ×ķ +ÙĬا Ùĭ +à¹ĥ à¸Ķ +ãĤĬ ãģ¾ãģĻ +à¸Ńยูà¹Ī à¹ĥà¸Ļ +ĠØ£ ÙĪÙĦ +ĠØ£ خرÙī +Ġc ư +ص ار +×ŀ׊ש×ij +б ÑĢа +ÅĦ ski +б ÑĢ +ĠÙĬ Ùı +à¸ģ ิà¸Ļ +Ġch á»ijng +Ùħ Ùı +Ġ à¸Ħืà¸Ń +Ġت ÙĨ +t ÃŃ +y Äĩ +Ġm ạng +Ùģ ÙĪ +Ġdü nya +×§ ר×IJ +Ġ×§ ׾ +ĠØŃ اÙĦ +c ÃŃa +Ġà¹Ģ รา +Ġר ×ķצ×Ķ +Ġá p +ë° ķ +ا ÙĤØ© +ни Ñİ +Ġ×IJ ׾×ķ +Ġ×ŀס ×ķ +ãģ§ãģ¯ ãģªãģı +Ġtr ả +Ġ×§ שר +mi ÅŁtir +Ġl ưu +Ġh á»Ĺ +ĠбÑĭ ли +Ġl ấy +عÙĦ Ùħ +Ġö zel +æ°Ĺ ãģĮ +Ġ×ĵ ר×ļ +Ùħ د +s ını +׳ ×ķש×IJ +r ów +Ñĩ еÑĢ +êµIJ ìľ¡ +ĠÐľ о +л ег +ĠV Ỽi +วัà¸Ļ à¸Ļีà¹ī +ÑİÑī ие +ãģĬ ãģĻ +ãģĬãģĻ ãģĻ +ãģĬãģĻãģĻ ãĤģ +ëı ħ +Ġ×Ļ×Ķ ×Ļ×Ķ +×ŀ ×ĺר +Ñı ми +Ġl á»±a +ĠÄij ấu +à¹Ģส ียà¸ĩ +Ġt ương +ëĵ ± +ĠÑģÑĤ аÑĢ +à¹ĥ à¸ļ +ว ัà¸Ķ +Ġİ stanbul +Ġ à¸Īะ +à¸ķ ลาà¸Ķ +Ġب ÙĬ +à¹ģà¸Ļ ะ +à¹ģà¸Ļะ à¸Ļำ +س اعد +Ġب Ø£ +Ġki á»ĥm +ØŃ سب +à¸Ĭั à¹īà¸Ļ +Ġ×ķ ×¢×ķ×ĵ +ов ÑĭÑħ +оÑģ нов +Ġtr Æ°á»Łng +צ ×ij×¢ +ĠÃŃ t +Ġk ỹ +cr é +Ñı м +êµ ° +ãģĮ ãģªãģĦ +ÙĬÙĦ Ø© +ãĥķ ãĤ£ +ر Ùī +ĠÙĬ جب +Ġ×IJ ×£ +Ġc á»±c +ãĤīãĤĮ ãģŁ +Ġ à¸ľà¸¹à¹ī +Ġ à¸Ń +lar ımız +Ġkad ın +Ġê·¸ ëŀĺ +Ġê·¸ëŀĺ ìĦľ +ĠëĺIJ ëĬĶ +ĠÄij ả +ĠÄijả m +Ġ×IJ ×ķ×ŀר +Ġy ếu +ci Äħ +ciÄħ g +Ġt á»ij +Ġש×IJ ׳×Ļ +Ġdz iaÅĤa +Ñī а +ĠÄij Ãłn +s ına +ãģĵãĤĮ ãģ¯ +Ġ×ij ׾×Ļ +Ġ×ij ×Ļשר×IJ׾ +л оÑģÑĮ +Ġgi ữ +ê° IJ +ÑĢ Ð¾Ð½ +تج ار +г лав +в ин +Ġh ạn +Ġyapı lan +ب س +Ġ à¸ŀรà¹īà¸Ńม +ê´Ģ 리 +mÄ±ÅŁ tır +b ü +r ück +ĠBaÅŁkan ı +ĠÙĦ ÙĬس +Ġs Æ¡ +à¸Īัà¸ĩ หว +à¸Īัà¸ĩหว ัà¸Ķ +د اء +Ġ×Ķ ×Ľ +v ÃŃ +ש ×IJר +Ġh Æ°á»Łng +Ġb óng +ĠCh ÃŃnh +Äħ c +à¹Ģà¸ģีà¹Īยว à¸ģัà¸ļ +Ġtá» © +Ġtứ c +ĠÑĨ веÑĤ +Ġt á»iji +ĠnghÄ© a +ÙĦا عب +د ÙĦ +Ġפע ×Ŀ +h ör +à¸Ĭ ุà¸Ķ +à¸ŀ ู +à¸ŀู à¸Ķ +п аÑģ +ĠÅŁ u +Ġt Æ°á»Łng +خار ج +Ġâ m +ĠинÑĤеÑĢ ÐµÑģ +ен нÑĭÑħ +×IJ ׳×Ļ +بد Ø£ +ëĿ¼ ëĬĶ +ì¹ ´ +æĸ¹ ãģĮ +ли в +Ġ à¸Ħà¸Ļ +ער ×ļ +à¸Ĥà¸Ńà¸ĩ à¸Ħุà¸ĵ +п ад +Ġc ạnh +ĠëĤ ¨ +ĠÄij âu +Ġbi á»ĥu +ãĤĤ ãģĤãĤĭ +׾ ×Ĵ +Ġ สำหรัà¸ļ +Ġxu á»ijng +ס ×ķ +Ġذ ات +ĠÐľ е +ع اÙĦÙħ +×IJ ס +ب ÙĬØ© +Ø´ ا +и ем +ĠNg ưá»Ŀi +íĺ ij +Ñģл ов +Ġп а +Ġm ẫu +ĠпÑĢоÑĨ еÑģÑģ +ĠNh Ãł +пÑĢо из +пÑĢоиз вод +à¸łà¸²à¸¢ à¹ĥà¸Ļ +Ġ à¸ļาà¸Ĺ +×ŀ ׳×ķ +ĠоÑĢг ан +רצ ×ķ +×ķ×ŀ ×Ļ×Ŀ +Ġyaz ı +Ġd ù +ãĥ¬ ãĥ³ +ÙĪÙĦ ÙĬ +ย ู +Ġtr ò +à¹Ģà¸ŀ ลà¸ĩ +Ġ×ŀ ׾×IJ +à¸ķ ล +à¸ķล à¸Ńà¸Ķ +ĠÄij ạt +Ġ×Ĺ×ĵ ש +p óÅĤ +Ġ×ŀ ×ĵ×Ļ +ujÄħ c +×ŀ׳×Ķ ×ľ +Ġש×ij ×ķ +Ġ×Ķ×ŀש פ×ĺ +Ġ×IJ ׾×Ķ +ĠÙĪ Ø°ÙĦÙĥ +à¹Ģà¸ŀ ราะ +ĠÄijo Ãłn +Ġíķ¨ ê»ĺ +Ġd ục +Ø´ ت +Ġ ula +Ġula ÅŁ +Ġqu ý +Ġ×Ķ ×Ĵ×ĵ×ķ׾ +à¸ķัà¹īà¸ĩ à¹ģà¸ķà¹Ī +Ġש ר +Ø´ Ùĩد +׳ ש×Ļ×Ŀ +à¸ŀ ล +رÙĪ Ø§ +ãĤĮ ãģ¦ +Ġн иÑħ +Ġдел а +ãģ§ãģį ãģªãģĦ +ÅĤo ż +×IJ ×Ĺר +ì ½Ķ +ãĤ¢ ãĥĥãĥĹ +د Ù쨹 +Ġti á»ĩn +Ġkh á»ı +Ġkhá»ı e +ĠاÙĦع اÙħØ© +ãģ« ãģĤãĤĭ +ĠÄij á»Ļc +ì¡ ± +Ġc ụ +й ÑĤе +Ġзак он +ĠпÑĢо екÑĤ +ìĸ ¸ +ÙĦ ØŃ +ĠçalÄ±ÅŁ ma +ãĤĴ ãģĻãĤĭ +Ñħ и +ع اد +Ġ׳ ×ŀצ×IJ +Ġר ×Ļ +à¸Ńà¸Ńà¸ģ มา +ĠT ôi +Ġth ần +ĠÙĬ ا +ล าย +Ġав ÑĤо +Ġsı ra +ĠÙĥ Ø«ÙĬر +Ùħ ÙĬز +ĠاÙĦع ÙĦÙħ +æĸ¹ ãģ¯ +×ķ×¢ ×ĵ +Ġобла ÑģÑĤи +×Ļ׾ ×Ļ×Ŀ +ãģĮ åĩº +à¸ĺ ุ +à¸ĺุ ร +à¸ĺุร à¸ģิà¸Ī +ÙĤت ÙĦ +ר×IJ ×ķ +Ġng u +Ġngu á»ĵn +Ġ มา +Ġпл ан +t ório +Ġcu á»iji +Ñģк ом +ĠاÙĦÙħ اض +ĠاÙĦÙħاض ÙĬ +Ġ×ij×¢ ׾ +Ġר ×ij×Ļ×Ŀ +Ġlu áºŃn +Ùĥ ÙĪ +à¸Ĺัà¹īà¸ĩ หมà¸Ķ +в ан +Ġtho ại +à¹Ħ à¸Ń +б иÑĢ +ĠاÙĦ ض +ت ا +ĠÑĢ Ð¾Ð´ +ĠV Ãł +×ŀ ×Ļף +ĠбÑĭ ла +к ами +ĠÐĶ Ðµ +t ık +קר ×Ļ +ĠeÄŁ itim +ĠÙĥ بÙĬر +ب Ùĥ +ĠÙĦ ÙĪ +в ой +Ġ ãģĵãģ® +ĠÑĤ ÑĢÑĥд +my ÅĽl +Ġs ư +à¸ŀ ีà¹Ī +Ġ à¹ģลà¹īว +×¢ ×§ +Ġ×Ĺ×ijר ת +ระ หว +ระหว à¹Īาà¸ĩ +×Ļ ×Ļ×Ķ +ĠاÙĦÙĨ اس +ün ü +Ġ׾ ×ŀ×Ķ +Ġch ương +ĠH á»ĵ +ار ت +ãĤĪãģĨ ãģ§ãģĻ +l á +×§×Ļ ×Ļ×Ŀ +æľ¬ å½ĵ +æľ¬å½ĵ ãģ« +ãģĵãĤĵ ãģª +Ñģ ов +Ġ×ķ ×Ĺ +à¹Ģà¸ģ à¹ĩà¸ļ +Ġк ÑĤо +à¹Ĥร à¸Ħ +ĠØ´ رÙĥØ© +ع زÙĬ +عزÙĬ ز +Ø·ÙĦ ÙĤ +п ÑĥÑģÑĤ +Ùģ ØªØŃ +ëŀ Ģ +Ġhã y +ض Ùħ +ë¦ ° +åł´åIJĪ ãģ¯ +ãĤª ãĥ¼ +Ġh ắn +Ġ×IJ ×ij×Ļ×ij +Ġש׾×Ķ ×Ŀ +Ġ×Ķ×Ļ ×Ļת×Ķ +ĠاÙĦد ÙĪÙĦØ© +ĠاÙĦ ÙĪÙĤ +ĠاÙĦÙĪÙĤ ت +ãģĤ ãģ¾ãĤĬ +Ġta ÅŁÄ± +İ N +×¢ סק +ãģ¦ ãģĦãģŁ +Ġtá»ķ ng +ĠاÙĦØ¥ ÙĨس +ĠاÙĦØ¥ÙĨس اÙĨ +ÑĢ ÐµÑĪ +Ġg ái +ĠÑĨ ен +ĠÙģ ÙĤد +Ùħ ات +ãģķãĤĵ ãģ® +Ġph ù +×ĺ ×Ķ +ĠÙĪØ§ÙĦ تÙĬ +Ġب Ùĥ +ìĿ´ ëĤĺ +к Ñģ +Ùħ ÙĬر +Ġv ùng +ĠاÙĦØ´ عب +ĠNh ưng +ãĥĢ ãĥ¼ +Ġ×Ĺ×Ļ ×Ļ×Ŀ +ĠØ´ خص +×§ ×ķ×ĵ +ê² Ģ +×¢ ש +×¢ ×ķ׾×Ŀ +צ ×ķר +ع ÙĤد +ĠiÅŁ lem +Ġ×Ķ×ij ×IJ +Ġd ưỡng +à¸Ł รี +Ġph ÃŃa +ãģ®ä¸Ń ãģ§ +Ġп и +Ġng Ãłnh +ним а +ĠÙĩ ÙĦ +Ġ×ķ ×IJת +ĠÄij áng +é quipe +ĠÑįÑĤ оÑĤ +Ġgö rev +ë§ ¤ +Ġqu ân +å¼ķ ãģį +æĻĤ ãģ« +Ġب Ùħا +×ŀ ×Ļת +Ġü lke +Ġ×ŀ×§ ×ķ×Ŀ +×ij ף +æ°Ĺ æĮģãģ¡ +Ġë§İ ìĿĢ +Ġyük sek +ÑĨ енÑĤÑĢ +ĠÙħ جÙĦس +ç§ģ ãģ® +ÙĤد ر +Ġë¶Ģ ë¶Ħ +Ġì° ¨ +خر ج +ãģĭ ãģªãĤĬ +ë³´ ëĭ¤ +Ġ×ŀ ×Ļ×ĵ×¢ +peÅĤ ni +Ġx á»Ń +ìĹIJìĦľ ëĬĶ +ĠباÙĦ Ùħ +ĠÙĪ Ùħا +ĠÑįÑĤ ой +ب ÙĬÙĨ +n ü +ØŃ ز +ØŃز ب +ĠÑĢабоÑĤ а +ĠNh áºŃt +ÙĦ اء +Ġëĵ ¤ +Ġëĵ¤ ìĸ´ +ãĤĦãģĻ ãģĦ +×Ĺ×ĸ ×§ +Ġ×Ķ×Ĺ ×ijר×Ķ +п иÑĤ +ãģĭãĤī ãģ® +Ġë§IJ ìĶĢ +Ġפ ×ķ +ÙĦ Ùİ +à¹Ģà¸ķà¹ĩ ม +ĠÐļ о +Ġm ówi +Ġt ÃŃn +ר×Ĵ ש +פר ×§ +Ġtr ạng +ĠÐŀ н +×Ĺ ×ķ×¥ +ĠعÙĨد Ùħا +Ġب ر +使 ãģĦ +Ġr á»Ļng +ëĮĢ ë¡ľ +íĪ ¬ +Ġktóry ch +в ид +ลูà¸ģ à¸Ħà¹īา +Ġmog Äħ +Ġש ×Ĺ +×ij ×Ĺר +ãĥĸ ãĥŃãĤ° +ĠTh Ãłnh +Ġ×Ķ ×¨×Ļ +ĠÑģÑĤ аÑĤÑĮ +ĠH á»Ļi +à¸ļ à¹īาà¸ĩ +çī¹ ãģ« +ĠÄIJ ức +èĢħ ãģ® +×¢ ×ŀ×ķ×ĵ +×ĺר ×Ķ +Ð ¥ +ĠÙħ Ùħا +Ġe ÅŁ +ĠнеобÑħодим о +ник ов +Ġüzer inde +a ÅĤa +Ġchá»ĭ u +ĠاÙĦ دÙĬÙĨ +أخ بار +ĠÄij au +ãģĮ å¤ļãģĦ +jÄħ cych +د Ø®ÙĦ +ları nd +larınd an +Ġs ẻ +à¸ŀิ à¹Ģศ +à¸ŀิà¹Ģศ ษ +ת ף +t ıģı +Ġlu áºŃt +ĠÅŀ e +ãĤ« ãĥ¼ +ãģ® ãģĤãĤĭ +Ġ×Ķ×IJ תר +ĠاÙĦØ¢ ÙĨ +ıld ı +Ġá o +ĠнаÑĩ ал +Ġvi á»ĩn +Ġ×ij×¢ ×ķ׾×Ŀ +з наÑĩ +×Ļ×ĺ ×Ķ +к ам +ĠÐĺ з +à¹Ģà¸Ĥ ียà¸Ļ +à¸Ļ à¹īà¸Ńà¸ĩ +ÑĤ ÑĢо +à¹Ģ à¸Ł +Ġжиз ни +Ġ สà¹Īวà¸Ļ +Ġv áºŃn +Ġê´Ģ 볨 +Ġl âu +ס ×ĺר +×§ ש +س ÙĬر +Ġ×IJ×ķת ×Ļ +Ġm ôi +ائ ب +Ġо ÑģÑĤа +Ġm ón +Ġ×ij ×ŀ×§×ķ×Ŀ +Ġد اخÙĦ +Ġ×IJ ×ķר +Ġв аÑģ +Ùĥ Ø´Ùģ +ìĺ ¨ +à¸ĸ à¹Īาย +Ġkullan ıl +Ġt ô +ãģ« ãĤĪãĤĬ +ĠëĺIJ íķľ +Ġ×¢×ij×ķ×ĵ ×Ķ +Ġri ê +Ġriê ng +Ġyak ın +ز ا +Å » +×IJ ×ķ׼׾ +شار Ùĥ +Ġб еÑģ +× ´ +Ġا بÙĨ +ĠTá»ķ ng +ÙĨ ظ +ÅĽwi ad +ãĤµ ãĥ¼ +ห าย +ĠG ün +Ġhakk ında +à¹Ģà¸Ĥà¹īา มา +ز ÙĨ +ĠÐł о +Ġbi á»ĥn +ãģ© ãģĵ +Ùģ Ø¹ÙĦ +ز ع +פר ×ĺ +Ġ×Ķ ×Ł +Ø£ ÙĩÙĦ +Ġth ất +ØŃ ÙħÙĦ +Ñĩ Ñĥ +ĠìĤ¬ ìĭ¤ +ì° ¸ +ĠìľĦ íķ´ +ÙĪ Ø¸ +ĠÐŁ од +Ġkho ản +ÑĤ ен +ĠÙģ Ø§ÙĦ +Ñģ ад +à¸Ļ à¸Ńà¸Ļ +ĠاÙĦسعÙĪØ¯ ÙĬØ© +" ØĮ +ĠاÙĦ ÙĴ +ãĤī ãģļ +Ġto án +Ġch ắc +׼ ×Ļר +m éd +méd ia +ز ÙĪ +Ġyan ı +פ ׳×Ļ×Ŀ +ØŃ ظ +Ġб еÑģп +ĠбеÑģп лаÑĤ +ĠбеÑģплаÑĤ но +ĠØ£ ÙħاÙħ +à¸Ń าย +à¸Ńาย ุ +ר שת +Ġg á»ĵ +Ġgá»ĵ m +Ġu á»ijng +ص ب +k ır +ãĥij ãĥ¼ +Ġ׾×ĵ עת +Ġк ÑĥпиÑĤÑĮ +׾ ×ķ×Ĺ +ÙĪØ¶ ع +ÙĤÙĬ Ùħ +à¸Ľ า +ж ив +à¸Ķ ิà¸Ļ +×IJ ×ķפ +à¹Ģล à¹ĩà¸ģ +ãĥĥ ãĥī +иÑĩеÑģки Ñħ +ĠCh á»§ +кÑĢ Ð°Ñģ +ÙĪ ØµÙĦ +p ÅĤat +м оÑĢ +Ġ×Ķ×IJ ×ķ +à¸Ń ิà¸Ļ +Ġíķľ êµŃ +гÑĢ Ðµ +Ġìłľ ê³µ +ì° ½ +Ġê°ľìĿ¸ ìłķë³´ +Ġngh á»ĭ +à¸ĭ า +ØŃس اب +Ġby ÅĤa +ÙħÙĦ Ùĥ +иÑĩеÑģки е +Ġb ác +ض ØŃ +ê¸ ¸ +ש ×ŀ×¢ +Ġìĸ´ëĸ » +Ġìĸ´ëĸ» ê²Į +ìĽ Į +ات Ùĩ +à¹Ĥรà¸ĩ à¹ģ +à¹Ĥรà¸ĩà¹ģ รม +خد ÙħØ© +ĠÐł а +׼×ķ׾ ×Ŀ +×ŀש ×Ĺ×§ +ĠÙĪ ÙĥاÙĨ +ס ×ķ×£ +ĠاÙĦØŃÙĥÙĪÙħ Ø© +Ġ×ij ×ĺ +Ġtr áºŃn +Ġ×Ķ×¢ ×ķ׾×Ŀ +ĠÃŃ ch +t Äħ +ש×ŀ ×ķ +Ġ×Ķר×IJש ×ķף +Ġíķĺ ê³ł +ãģķ ãĤī +ãģķãĤī ãģ« +ãģ« ãģĹãģ¦ +Ġ à¸ľà¸¡ +ãģ® ãĤĪãģĨãģª +ĠÙĪ ÙĤت +ãĥį ãĥĥãĥĪ +ÙĦ عب +ÙĪ Ø´ +ìĺ ¬ +Ġ หาà¸ģ +Ġm iaÅĤ +à¸Ĺ à¸Ńà¸ĩ +иÑĤ а +ا صر +ил ÑģÑı +з е +à¸Ľà¸£à¸° มาà¸ĵ +ãģĿãĤĮ ãģ¯ +Ġb ır +Ġbır ak +صÙĨ اع +Ð ® +Ø´ عر +Ġ׳ ×Ĵ×ĵ +Ġب سبب +ãĥĿ ãĤ¤ +ãĥĿãĤ¤ ãĥ³ãĥĪ +ĠاÙĦج ÙĪ +ĠнеÑģк олÑĮко +Ġki ếm +Ùģ Ùİ +Ġض د +×ij×Ļ×ĺ ×ķ×Ĺ +تاب ع +ÙĨ ز +ĠB ản +Ġaç ıkl +Ġaçıkl ama +Ġ à¸Ħุà¸ĵ +à¸Ĺ า +ÅĤ ów +Ø· ب +ÙĨ ØŃÙĨ +Ġ×ŀ×§ ×ķר +Ġİ s +Ġдом а +Ġ วัà¸Ļ +Ġd Ãłnh +Ñı н +ми ÑĢ +Ġm ô +ĠvÃł ng +ص اب +s ının +à¸Ħ ืà¸Ļ +Ø® بر +×ĸ׼ ×ķ +Ġ×ŀ ש×Ķ×ķ +m ü +Ġкомпани и +Ġ×Ķ×¢ ×Ļר +ĠÙĥ ÙĪ +ÙĤÙĦ ب +ĠlỼ p +и ки +׳ ×ij +à¹Ĥ à¸Ħร +à¹Ĥà¸Ħร à¸ĩ +à¹Ĥà¸Ħรà¸ĩ à¸ģาร +×ŀ×ķ×¢ ×ĵ +ÑıÑĤ ÑģÑı +หลัà¸ĩ à¸Īาà¸ģ +ени Ñİ +Ġש ×¢ +Ġb Æ°á»Ľc +ãĥ¡ ãĥ¼ãĥ« +ãĤĦ ãĤĬ +Ġ×Ļ×ķ×ĵ ×¢ +Ġê´Ģ íķľ +ĠاÙĦØ£ Ùħر +Ġböl ge +ĠÑģв ой +ÙĦ س +Ġ×ŀ×Ļ ×ķ×Ĺ×ĵ +ĠëĤ´ ìļ© +ĠØ£ جÙĦ +ĠÄIJ ông +Ġ×ŀ ×ł×ª +Ġìĭľ ê°Ħ +Ùĥ Ùİ +ãģ¨ãģĦãģĨ ãģ®ãģ¯ +Ġnale ży +تÙĨظ ÙĬÙħ +ĠÑģозд а +Ġph é +Ġphé p +ãģ§ãģį ãģ¾ãģĻ +Ġع ÙĦÙħ +大ãģį ãģª +ãĤ² ãĥ¼ãĥł +í ħĮ +Ġ׼×ķ׾ ׾ +ĠинÑĤеÑĢ Ð½ÐµÑĤ +ĠT ừ +ãģ¨ ãģªãĤĭ +ز اÙĦ +Ġktóry m +Ġnh é +ìĪ ľ +н ев +д еÑĢ +ãĤ¢ ãĥĹãĥª +i á»ĩu +×ij ×Ļ׾ +Ġت س +ĠÄIJ ây +ĠاÙĦØ® اصة +Ġà¹Ģ à¸Ĭ +Ġà¹Ģà¸Ĭ à¹Īà¸Ļ +ص اد +Ġd ạng +س عر +Ġש ×Ļ×ŀ×ķש +×Ĵ ×Ļ×Ŀ +ãģĮãģĤ ãģ£ãģŁ +п ÑĢов +пÑĢов од +Ġ×IJ ×Ļ׳×ķ +Ġ׾ ר×IJ +Ġ׾ר×IJ ×ķת +ĠØ£ Ù쨶ÙĦ +ĠØŃ ÙĦ +ĠØ£ بÙĪ +ê° ķ +Ġì§ ij +ãģ® ãĤĪãģĨãģ« +Ġפ ׳×Ļ +ס ×Ļ×Ŀ +ĠÙĪÙĩ ذا +Ġka ç +Ġé én +Ġê± ´ +ë° Ķ +Ñĥ з +à¸Ĥà¸Ńà¸ĩ à¹Ģรา +i ÅĤ +ĠÐľ Ñĭ +Ġch ết +ĠاÙĦØ« اÙĨÙĬ +×IJ ×§ +Ġ×ķ ×¢×ľ +ĠاÙĦØ· ب +×ij×ĺ ×Ĺ +Ġج دÙĬدة +Ġع دÙħ +ع ز +สิà¹Īà¸ĩ à¸Ĺีà¹Ī +ãģĻ ãĤĮãģ° +ĠÄij ô +ì£ ł +د ÙĤ +н омÑĥ +Ġk á»ĥ +ãĤ¢ ãĥ³ +å¤ļãģı ãģ® +à¸Ľà¸£à¸° à¸ģ +à¸Ľà¸£à¸°à¸ģ à¸Ńà¸ļ +פע×Ļ׾ ×ķת +ĠÑģÑĤ ол +may ı +ãģ¤ ãģĦ +Ġyılı nda +Ġ à¸Īึà¸ĩ +koÅĦ cz +ĠTh ông +Ġак ÑĤив +н ÑģÑĤ +нÑģÑĤ ÑĢÑĥ +ĠÃĸ z +Ġת ×ŀ×Ļ×ĵ +ĠÙĥ ÙĨت +Ñģ иÑģÑĤем +pr és +prés ent +Ġn â +Ġnâ ng +gÅĤ os +ĠÙĪØ² ÙĬر +ØŃ صÙĦ +Ġиме еÑĤ +ØŃ رÙĥØ© +à¸ŀ à¹Īà¸Ń +ãĤĴ ãģĬ +Ġاست خداÙħ +×IJ×Ļר ×ķ×¢ +ä»ĸ ãģ® +Ġש×Ķ ×Ŀ +ãģĹãģŁ ãĤī +ש×ŀ ×Ļ +Ñģ ла +m ı +Ġbaz ı +Ġíķĺ ì§Ģë§Į +×ĵ ׾ +Ġyapt ıģı +ãĥĬ ãĥ¼ +׾ ×Ļ׾×Ķ +ãģ¨ãģĦ ãģ£ãģŁ +änd ig +ĠÅŁ a +ĠÙģÙĬ Ùħا +иÑĤ елÑı +×ŀ ×ķש +à¸Ĥ à¸Ńà¸ļ +l ük +Ġh á»ĵi +Ġëª ħ +ĠاÙĦÙĥ Ø«ÙĬر +צ ×IJ +Ġhaz ır +طر Ùģ +ا ÙĬا +ĠÄij ôi +ен д +ÙĦ غ +×Ĺ ×ĸ×ķר +ĠвÑģ ег +ĠвÑģег да +ëIJĺ ê³ł +×ĵ ×ķ×ĵ +ан а +د ÙĪÙĦØ© +Ġho ạch +ع ÙĦا +عÙĦا ج +Ġ×ķ ×¢×ĵ +×Ķ ×Ŀ +ки й +ÙĦ ÙIJ +Ġ×¢ ׾×Ļ×ķ +ÑİÑī ий +Ġng á»§ +صÙĨ ع +ĠاÙĦع راÙĤ +à¸ķà¹Īà¸Ń à¹Ħà¸Ľ +ãģŁãģı ãģķãĤĵ +Ġph ạm +ÙĦ اÙĨ +ات Ùĩا +Ġbö yle +تÙĨ ÙģÙĬ +تÙĨÙģÙĬ ذ +Ġש×Ķ ×Ļ×IJ +Ñģ Ñĥ +ย าว +Ġש ×ķ׳×Ļ×Ŀ +Ġ×ŀ ×ķ׾ +ĠÑģ ил +Ġ×IJ×Ĺר ×Ļ×Ŀ +Ġph á»§ +ÙĤØ· ع +ĠTh á»§ +à¸Ľà¸£à¸°à¹Ģà¸Ĺศ à¹Ħà¸Ĺย +ÙĨ ÙĤ +ĠÄijo ạn +Ġب Ø¥ +п ÑĢедел +×ķת ×ķ +Ġy arı +пÑĢ Ðµ +ĠczÄĻ ÅĽci +ØŃ ÙĥÙħ +×ķ׳ ×Ļת +פע ׾ +ãĤĴ ãģĹãģ¦ +Ġktó rzy +׾ ×Ŀ +ĠÄIJi á»ģu +ĠкоÑĤоÑĢ Ð°Ñı +ĠìĿ´ ìĥģ +ãģĤ ãģ£ãģŁ +Ġ×ŀ×ĵ ×ķ×ijר +פ ×ķ×¢×ľ +d ım +éĢļ ãĤĬ +ĠбÑĥд ÑĥÑĤ +à¹Ģวà¹ĩà¸ļ à¹Ħà¸ĭ +à¹Ģวà¹ĩà¸ļà¹Ħà¸ĭ à¸ķà¹Į +ا خر +×Ĺ ×Ļ׾ +Ġ×Ļ ×ľ +Ġ×Ļ׾ ×ĵ×Ļ×Ŀ +×Ĺ ×Ļפ +×Ĺ×Ļפ ×ķש +Ġd òng +Ġש ×ĸ×Ķ +ÑĮ е +ãģĤ ãģ¨ +ìŀIJ ê°Ģ +×IJ ×ĵ +Ġü z +Ġüz ere +ظ ÙĦ +Ġ×IJ ×ķ׾×Ļ +Ġ×ij ×Ļ×ķ×Ŀ +ÙĦ ات +Ġm ê +ì¹ ¨ +تØŃ د +تØŃد Ø« +ĠØ® اصة +Ġب رÙĨ +ĠبرÙĨ اÙħج +ĠH Ãłn +×Ĺ ×¡ +ĠÙĪ ÙĦÙħ +×¢ ×Ŀ +Ġm ı +à¸Ł ัà¸ĩ +ש ×¢×Ķ +ÙĪÙģ ÙĤ +ס ×ij×Ļר +алÑĮ нÑĭй +×Ĺש ×ķ×ij +Ġn Ãłng +ë³ ¼ +ĠкоÑĤоÑĢ ÑĭÑħ +Ġ×Ĺ ×ķ×§ +t ör +ĠлÑĥÑĩ ÑĪе +ãĥij ãĥ³ +ลà¹Īา สุà¸Ķ +Ġج دÙĬد +ÙĬد Ø© +à¸Ĺ รà¸ĩ +ãĤĪãĤĬ ãĤĤ +ÙĦ ÙĦ +ãĤĤ ãģ£ãģ¨ +ש×ĺ ×Ĺ +Ġ×ķ ×IJ×Ļ +Ġgi á»ijng +Ø¥ ضاÙģ +×§ ת +ë§ Ŀ +Ġzosta ÅĤ +ÑĢ Ð¾Ð· +×Ļפ ×Ļ×Ŀ +Ġ׼׾ ׾ +ת×ķ׼ ף +dıģ ını +ÙĤ سÙħ +ĠÑģ ÑĩиÑĤ +ĠÑģÑĩиÑĤ а +×ĺ ×ķת +Ġ ưu +ĠØ¢ ÙĦ +Ġм ом +Ġмом енÑĤ +ĠاÙĦتع ÙĦÙĬÙħ +×¢×ľ ×ķת +Ġch ữa +Ġy ön +Ġtr Ãł +ĠØŃ ÙĬÙĨ +à¸ĭ ั +ĠC á +×¢ ×ĸ +ĠاÙĦØ£ ÙħÙĨ +c ÃŃ +Ġv á»ijn +Ġ à¸Ļาย +об ÑĢа +×§ ×IJ +Ġthi ếu +ãĥŀ ãĥ¼ +ส วà¸Ļ +Ġg á»Ń +Ġgá»Ń i +Ġê ¹ +Ġê¹ Ģ +Ġthi á»ĩn +ÙĤ ع +w ÄĻ +Ġн ам +ÑĤ ол +Ġs ân +ס ×ķ×Ĵ +Ġgeç ir +ÑĤ он +ев а +ĠÙĪ Ø¶Ø¹ +Ġع شر +Ñģ ло +à¸Ī ัà¸ļ +ãĤ· ãĥ¼ +ãĤĤ ãģĤãĤĬãģ¾ãģĻ +Ġv ẻ +ĠÄIJ á»ĥ +ر Ù쨹 +ĠاÙĦØ£ÙĪÙĦ Ùī +ÑĤ аÑĢ +ãģªãģı ãģ¦ +Ùħ Ùİ +qu ÃŃ +×¢×ł×Ļ ×Ļ׳ +г ен +Ġh ôm +à¸Ī า +Ġnh Ỽ +ĠاÙĦع ربÙĬ +×IJ ף +Ġl á»Ļ +Ġje ÅĽli +à¹Ģà¸Ĺà¹Īา à¸Ļัà¹īà¸Ļ +ĠØ£ÙĨ Ùĩا +Ġt uy +Ġtuy á»ĩt +Ġت ص +Ġتص ÙĨÙĬ +ĠتصÙĨÙĬ Ùģ +Ġê·¸ëŁ¬ ëĤĺ +о ÑĨен +à¸ģิà¸Ī à¸ģรรม +ãĤĦ ãģ£ãģ¦ +Ġkh á»ıi +Ġl á»ĩ +ĠاÙĦÙħج تÙħع +à¸Ńาà¸Ī à¸Īะ +à¸Īะ à¹Ģà¸Ľà¹ĩà¸Ļ +ов Ñĭй +ר ×Ŀ +ร à¹īà¸Ńà¸Ļ +ש ×ŀש +人 ãģ« +Ġüzer ine +פר ×Ļ +du ÄŁu +Ñĩ ик +Ġmù a +Ġ×ŀת ×ķ×ļ +Ġc áºŃp +Ġت ارÙĬØ® +×ij׾ ת×Ļ +Ġì¢ Ģ +ÙĦ ع +ب اÙĨ +Ġch út +Ġ×Ķ×ĸ ×ŀף +n ée +ĠLi ên +ĠÙĦÙĦ Ø£ +ØŃد ÙĪØ¯ +Ġ×¢ ׼ש×Ļ×ķ +в оз +Ġyapt ı +Ġоб о +à¹ĥหà¹ī à¸ģัà¸ļ +Ġ×ij×Ķ ×Ŀ +ãģı ãģ¦ +ر أس +ĠÑģÑĢед ÑģÑĤв +ĠB Ãłi +ãģĵãģ¨ ãģ« +ĠìĤ¬ íļĮ +Ġ모 ëijIJ +×ij ×IJ +Ġtr ắng +ĠاÙĦبÙĦ د +ĠHo Ãłng +ли бо +ĠдÑĢÑĥг иÑħ +İ R +Ñĥм а +ĠJe ÅĽli +ãĤĤ ãģĹ +Ġv òng +Ġ×IJתר ×Ļ×Ŀ +ĠÄij á»įc +Ġв оÑĤ +ãģł ãģĮ +ë° ° +à¸Ķู à¹ģล +Ġ×ŀ ׼׾ +ìĹIJ ëıĦ +г аз +Ġ׳×ķס פ×Ļ×Ŀ +ãģĵãģ¨ ãģ§ +Ġت ÙĪ +ãģ§ ãģĤãĤĬ +à¸Ļั à¹Īà¸ĩ +ĠможеÑĤ е +sz ÄĻ +ãģ® ãģł +ĠÙħÙĨ Ùĩ +Ġb á»ķ +Ġb üt +Ġbüt ün +ë³´ ê³ł +Ġch á»ĵng +à¹ģà¸Ī à¹īà¸ĩ +ĠV ì +ĠØŃ ر +Ġgi ản +ĠÙħ دÙĬÙĨØ© +تط بÙĬÙĤ +à¸Ī ิ +æĹ¥ ãģ® +б ил +à¸ģ à¸Ńà¸ĩ +ê³ ³ +ĠØ£ Ùħا +ìĨ IJ +Ġtr ái +ĠвÑģ ем +Ġس ÙĨØ© +ĠÑģай ÑĤ +Ġг оÑĤов +п Ñĭ +ĠëIJ ł +ĠاÙĦØ® Ø· +ĠاÙĦرئÙĬس ÙĬØ© +Ġíķ ©ëĭĪëĭ¤ +ĠìķĦëĭĪ ëĿ¼ +ĠìĿ´ ëłĩ +ĠìĿ´ëłĩ ê²Į +) ØĮ +h ält +ĠØ£ Ùħر +Ġع Ùħر +à¸ģà¹ĩ à¸Īะ +Ġ à¸Ĺำà¹ĥหà¹ī +Ġc ân +Ġ×ij ׾ +Ġ×ij׾ ×ij×ĵ +פ סק +ĠÙĬ ÙĤÙĪÙĦ +н ÑĥÑĤÑĮ +à¹ģ à¸Ħ +Ġ×§ צת +Ġn ằm +Ġh òa +bilit Ãł +ĠìĹĨ ëĭ¤ +Ġ׼ פ×Ļ +ÑĢ Ð¾Ð¶ +лаг а +Ġ×Ķש ×Ļ +ĠNgo Ãłi +ĠÙĪ Ø¬ +ĠÙĪØ¬ ÙĪØ¯ +ĠìľĦ íķľ +Ġus ÅĤug +Ġtu ần +d ź +×ŀ ×ķף +ĠاÙĦع دÙĬد +Ġch ẳng +สุà¸Ĥ à¸łà¸²à¸ŀ +Ġ×ij ×ĵר×ļ +ĠÑģеб е +ĠìŀĪ ìĿĦ +ĠاÙĦØŃ اÙĦ +Ġd á +Ġc ưá»Ŀi +Ġnghi ên +ie ÅĦ +ĠD ương +ï¼ ħ +Ø´ د +ãģĦãģ¤ ãĤĤ +ĠвÑĭб оÑĢ +Ġc á»Ļng +ש ×Ļ׳×ķ×Ļ +Ġch ạy +Ġ×ij×¢ ׾×Ļ +اخ بار +íķĺ ë©° +ż Äħ +ج از +Ġ׳ ר×IJ×Ķ +ศ ู +ศู à¸Ļ +ศูà¸Ļ ยà¹Į +×Ĵ ×¢ +Ġ×¢ ×ĵ×Ļ +Ġ×¢×ĵ×Ļ ×Ļף +بر ا +ÑĨи й +ĠÄIJ á»ĵng +ÙĤ اÙĨÙĪÙĨ +ĠÄij ứng +ãģĹãģŁ ãĤĬ +Ġ×Ĺ×Ļ ×Ļ +Ġë IJľ +ĠëIJľ ëĭ¤ +Ġм еждÑĥ +à¸ŀวà¸ģ à¹Ģà¸Ĥา +ĠB ắc +ล ำ +ë° ± +ĠíĻ ķ +มาà¸ģ ม +มาà¸ģม าย +бан к +à¸Ńา à¸ģาร +Ġh Ãł +Ġ׾ ׳ +à¸Ń à¸Ń +Ġë°Ķ ë¡ľ +л ом +m ática +ĠØŃ د +اب ت +à¸Ĺีà¹Ī à¸Ļีà¹Ī +Ġco ÅĽ +ÙģÙĬ دÙĬ +ÙģÙĬدÙĬ ÙĪ +ĠмеÑģÑĤ о +Ġph út +มาà¸ģ à¸ģวà¹Īา +×IJ פ +ب ÙIJ +ĠPh ú +ì± Ħ +ĠÙĪ Ø³ÙĦÙħ +à¸Īี à¸Ļ +поÑĤ ÑĢеб +Ġ×Ĺ×ĵ ש×ķת +Ø´ ÙĪ +Ġעצ ×ŀ×ķ +ĠعÙħÙĦ ÙĬØ© +à¸Ħุà¸ĵ à¸łà¸²à¸ŀ +ãģ¾ãģĻ ãģĮ +دع ÙĪ +طر ÙĤ +à¹Ħมà¹Ī à¸ķà¹īà¸Ńà¸ĩ +ë² Ķ +ìĬ ¹ +Ġk ÃŃch +ĠìĹĨ ëĬĶ +ĠÑĤ ам +ĠÙĨ ØŃÙĪ +ĠاÙĦÙĤ اÙĨÙĪÙĨ +×Ĺ ×ķ×Ŀ +Ġk ız +Ġ×ĵ ×Ļף +ĠвÑĢем ени +ãģ£ãģŁ ãĤĬ +ĠØ´ Ùĩر +ĠìĦľ ë¹ĦìĬ¤ +×¢ ש×Ķ +Ġgi ác +ĠاÙĦسÙĦ اÙħ +Ġ×IJ ש +ĠполÑĥÑĩ а +à¸Īัà¸Ķ à¸ģาร +к оÑĢ +Ġ×Ķ×ĺ ×ķ×ij +ราย à¸ģาร +주 ìĿĺ +à¹ģà¸ķà¹Ī ละ +Ġê·¸ëŁ° ëį° +à¸Ĺีà¹Ī à¹Ģà¸Ľà¹ĩà¸Ļ +Ġת ×ķ×ļ +بÙĬ اÙĨ +Ð Ļ +oÅĽci Äħ +ÑĤ ок +ĠÃ Ķ +ĠÃĶ ng +à¹Ħมà¹Ī à¹ĥà¸Ĭà¹Ī +ãģ¿ ãģ¦ +ÐŁ о +ĠЧ ÑĤо +íĻ © +×ĺ ×ij×¢ +меÑĤ ÑĢ +Ġ×ij ×ŀ×Ķ +Ġ×ij×ŀ×Ķ ×ľ +Ġ×ij×ŀ×Ķ׾ ×ļ +Ñĩ ÑĮ +×§ ש×Ķ +з нак +знак ом +uj ÄĻ +×Ļצ ר +ĠاÙĦÙħ ÙĦÙĥ +ı yla +×IJ×ŀ ת +à¸Ľ ิà¸Ķ +×IJ ×Ĺ×ĵ +ر اد +Ġm áºŃt +ëĭ¤ ëĬĶ +Ġl ạnh +ש׾ ×ķש +ØŃ دÙĬØ« +ت ز +å¹´ ãģ® +Ġк ваÑĢ +ĠкваÑĢ ÑĤиÑĢ +ä½ľ ãĤĬ +رÙĪ Ø¨ +ов ан +ĠТ е +à¸Īำ à¸ģ +à¸Īำà¸ģ ัà¸Ķ +ب اط +×Ĵ ת +Ġм аÑĪ +ĠмаÑĪ Ð¸Ð½ +×Ļצ ×Ķ +ãģ» ãģ¨ +ãģ»ãģ¨ ãĤĵãģ© +ÃŃ do +ĠÑı зÑĭк +à¸ļ ิà¸Ļ +สà¸ĸาà¸Ļ à¸Ĺีà¹Ī +ĠìĹ ´ +ãĤ¦ ãĤ§ +Ġc Ãł +п ан +åı£ ãĤ³ãĥŁ +Ġر د +اÙĤ ت +ĠÙĥ ب +ĠÙĥب ÙĬرة +ÑģÑĤ ал +ש×ŀ ×Ĺ +pos ición +ĠÙħÙĦÙĬ ÙĪÙĨ +ĠìĿ´ ìķ¼ +ĠìĿ´ìķ¼ ê¸° +Ġh út +ĠÅĽw iat +Ġë°© ë²ķ +ĠÑģв еÑĤ +Ġвиде о +ĠاÙĦÙĨ ظاÙħ +Ġtr á»Ŀi +ĠëĮĢ íķ´ìĦľ +ר ×ŀת +ت داÙĪÙĦ +×ķר ×ĵ +ת ×ŀ +ת×ŀ ×ķ׳×ķת +Ġ×ŀ ף +Ġдв а +Ġ×Ķ×§ ×ķ +æĹ¥ ãģ« +Ġ×Ķ×Ĵ ×Ļ×¢ +à¹Ģà¸ŀิà¹Īม à¹Ģà¸ķิม +Ùħار س +Ġê²ĥ ìŀħëĭĪëĭ¤ +ãģªãģĦ ãģ¨ +Ġnhi á»ĩt +ëIJ ©ëĭĪëĭ¤ +Ġ×ij׳ ×ķש×IJ +Ġê°Ģ ìŀ¥ +Ġv ợ +ĠÄij óng +צ×Ļ׾ ×ķ×Ŀ +ê´Ģ ê³Ħ +в аÑı +×IJ ×Ļ×ĸ +×IJ×Ļ×ĸ ×Ķ +ĠÙĨ ظاÙħ +ÙħØŃ اÙ쨏 +Ġt ải +기 ëıĦ +à¸Ľà¸±à¸Ī à¸Īุ +à¸Ľà¸±à¸Īà¸Īุ à¸ļัà¸Ļ +׼ ×ĵ×ķר +ĠìķĦ ìĿ´ +׼׳ ×Ļס +à¹Ģ à¸ķร +à¹Ģà¸ķร ียม +Ġngo ại +ĠدÙĪÙĦ ار +Ġr ẻ +Ġkh Äĥn +عد د +Ø´ عب +czy Äĩ +ĠاÙĦ Ùĥر +ĠÑĩеловек а +ĠÙĪ Ø¥ÙĨ +×IJ ×ĺ +Ġth Æ¡ +ĠاÙĦ رÙĬاض +оп ÑĢедел +опÑĢедел ен +×Ķ ×ŀש×ļ +ĠÐĿ ово +з Ñĭва +ĠاÙĦدÙĪÙĦ ÙĬ +ĠÄij áp +Ġк ÑĢед +ĠкÑĢед иÑĤ +ов ого +Ġm ôn +à¸Ľà¸£à¸° à¹Ĥย +à¸Ľà¸£à¸°à¹Ĥย à¸Ĭà¸Ļ +à¸Ľà¸£à¸°à¹Ĥยà¸Ĭà¸Ļ à¹Į +ÑģÑĤ е +ĠTh á»ĭ +د ÙĬØ© +×ŀצ ×ķ +Ùģ Ø§Øª +×§ ×ĵ×Ŀ +ìĿ´ëĿ¼ ê³ł +ÙĪ Ø® +Ġ×Ĺ ×ĸ +ĠÑĦоÑĤ о +׾ ×Ļת +ت Ùİ +ÙĪ Ø¨Ø± +й ÑĤи +ĠÃ¶ÄŁ ren +Ġ×Ķ×ĸ ×ķ +Ġv á»įng +ÙĤÙĪ Ø© +ĠT ây +ĠÐĿ и +Ġש ×ķ×ij +ãģ¨è¨Ģ ãĤıãĤĮ +ãģ© ãĤĵãģª +׊צ×Ļ +ï½ ľ +Ġ×ķ×Ķ ×ķ×IJ +ä¸Ģ ãģ¤ +ĠÑģÑĤо иÑĤ +ni Äħ +×ĺר ×Ļ +ĠдеÑĤ ей +нÑı ÑĤÑĮ +ĠÑģдел аÑĤÑĮ +Ġë§İ ìĿ´ +ä½ķ ãģĭ +ãģĽ ãĤĭ +à¹Ħ หม +à¸ķิà¸Ķ à¸ķà¹Īà¸Ń +Ġ×ij ת×Ĺ +Ġ×ijת×Ĺ ×ķ×Ŀ +ìĻ Ħ +ì§Ģ ëĬĶ +ÑģÑĤ аÑĤ +ÑıÑģ н +ü b +Ġth ả +Ġ×ij×IJ×ŀ ת +Ġt uyến +×ĵ ×Ļר×Ķ +Ġ×IJ ×Ļש×Ļ +×ĸ׼ ר +ãģ° ãģĭãĤĬ +Ġx ét +׼ ×Ļ×ķ +׼×Ļ×ķ ×ķף +diÄŁ ini +ĠاÙĦÙħ ÙĪØ¶ÙĪØ¹ +Ġh áºŃu +à¸Īาà¸ģ à¸ģาร +×ijס ×Ļס +Ġ×ŀ×Ĵ ×Ļ×¢ +×ij ×Ļ×¢ +ĠÙĪ Ø¬Ùĩ +à¹ģà¸Ķ à¸ĩ +à¸Ļ าà¸ĩ +ĠÅŀ a +ì ¡´ +ë¡ Ģ +à¸ķ ะ +Ġ×Ķ×Ĺ×Ļ ×Ļ×Ŀ +Ùģ ÙĬد +ãģ§ãģĻ ãģĭãĤī +ê· ľ +ź ni +ĠлÑİ Ð´ÐµÐ¹ +Ġyüz de +ıy orum +ĠاÙĦ بØŃر +e ño +п аÑĢ +ÙĬ ÙĤØ© +об ÑĢ +ר ×ķ×ļ +ت ÙĪÙĤع +ĠاÙĦØ´ ÙĬØ® +åĪĿ ãĤģãģ¦ +ĠÑĤ елеÑĦ +ĠÑĤелеÑĦ он +Ġth ôi +Ġ×Ļ׼×ķ׾ ×Ļ×Ŀ +ĠÅŁ irk +ĠÅŁirk et +Ġìļ°ë¦¬ ê°Ģ +ĠÄij ông +Ġת ×ķ×ĵ×Ķ +ÑģмоÑĤÑĢ ÐµÑĤÑĮ +ĠÙĦ ÙĩÙħ +Ġ׾ ׼ +ĠN ó +ĠØŃ اÙĦØ© +ãģĦ ãģij +קר ×ķ +az ı +ãĤ³ ãĥ¼ +ĠÙĦÙĦ ت +s ınız +ĠH ải +기 ìĪł +ยัà¸ĩ à¹Ħมà¹Ī +ëĭ¤ ê³ł +פ ×Ĺ +Ġ׾×Ĵ ×ij×Ļ +Ġع ÙĨÙĩ +Ġк аз +Ġказ ино +ب ÙĪØ± +ÑĦ еÑĢ +Ġê°Ļ ìĿ´ +تس جÙĬÙĦ +ĠاÙĦÙħ رÙĥز +ĠTh ái +д аÑĤÑĮ +×ŀ×Ļ ×Ļ׾ +Ġpay laÅŁ +ãģ¤ ãģ® +à¹Ģร ืà¸Ń +n ça +׳ ×ķ×Ĺ +Ġ×IJ פ×Ļ׾×ķ +ãģ¨ èĢĥãģĪ +ãģ¨ãģĹãģ¦ ãģ¯ +à¹Ģà¸Ī à¸Ń +×ŀ פ +Ġg iriÅŁ +л иÑĤ +ÑĤ елÑı +Ñij н +æ°Ĺ ãģ« +Ġg ó +Ġgó p +åĪĩ ãĤĬ +Ġ×Ķ ×Ĺ×ĵש +ж ал +Ġ×ĵ עת +éģķ ãģĨ +à¹Ģà¸Ĥà¹īา à¹Ħà¸Ľ +Ġס ר×ĺ +e ña +æĸ° ãģĹãģĦ +ر Ùİ +ĠÐIJ ÑĢ +Ġph ản +à¸Īะ à¹Ħà¸Ķà¹ī +Ġ×ijצ ×ķר×Ķ +Ø´ اÙĩ +شاÙĩ د +ÙĪØ± د +à¹Ģà¸Ļืà¹Īà¸Ńà¸ĩ à¸Īาà¸ģ +или ÑģÑĮ +à¹ģละ à¸ģาร +Ġ×Ķ ×ĸ׼ +Ġ×Ķ×ĸ׼ ×ķ×Ļ×ķת +ei ÃŁ +ãĥ ¨ +ìĥ Ī +ĠÃĩ a +Æ ¯ +ש ×Ĵ +ÙĬÙĨ Ø© +ร à¹īà¸Ńà¸ĩ +ãĤµ ãĥ³ +ÑĢоÑģÑģ ий +ÑĢоÑģÑģий Ñģк +a ÄŁa +ĠнаÑĩ ина +Ġص ÙĦÙī +à¸Ĺุà¸ģ à¸Ħà¸Ļ +íļĮ ìĤ¬ +Ġли ÑĨ +Ø´ ÙĬر +ĠØ´ÙĬ Ø¡ +ÙĬÙĨ ا +Ġפ ×Ĺ×ķת +Ġiçer is +Ġiçeris inde +ĠØ£ ØŃÙħد +Ġże by +ì´ Ŀ +Ġп оказ +Ġи менно +หà¸Ļัà¸ĩ ส +หà¸Ļัà¸ĩส ืà¸Ń +ĠÑĤÑĢ Ðµ +สัà¸ĩ à¸Ħม +Ø¥ ÙIJ +ãģĮ å¿ħè¦ģ +ÙĬÙij Ø© +פ צ +íĭ ° +ĠÙħ جاÙĦ +׳ פש +к ан +×Ĺ ×ķפ +×Ĺ×ķפ ש +ì²ĺ ëŁ¼ +ов аÑı +з ов +Ġh ạ +Ġdzi ÄĻki +×Ļר ×ķ +Ġ׾ ×ŀצ +Ġ׾×ŀצ ×ķ×IJ +×Ļ×ĵ ×ķ +Ġs ợ +Ġ׾×Ķ ×Ĵ×Ļ×¢ +×§ ×ij×¢ +Ġchi á»ģu +ãĥŀ ãĤ¤ +Ġd Ãłng +à¹ģà¸Ł à¸Ļ +Ġü ye +×Ļ׳ ×Ĵ +à¹Ģรีย à¸ģ +ç§ģ ãģĮ +th é +ĠÑĦ илÑĮ +ĠÑĦилÑĮ м +ĠNg Ãły +Ġж ен +Ġжен Ñīин +ج ÙĬد +n ç +à¸Ľ รา +×Ļ×ŀ ×ķ +Ġn á»ģn +×IJ ×ķ׾×Ŀ +Ġвозмож ноÑģÑĤÑĮ +Ġëĭ¤ ìĭľ +è¦ĭ ãģŁ +à¸ĸ à¸Ļ +à¸ĸà¸Ļ à¸Ļ +mız ı +ĠÙħ جÙħÙĪØ¹Ø© +c jÄħ +ĠÐł Ф +à¸ģำ หà¸Ļ +à¸ģำหà¸Ļ à¸Ķ +ĠìŬ 기 +land ı +ни ÑĨ +ÑģÑĤв е +Ġ×ĵ ×ijר×Ļ×Ŀ +Ġsk ÅĤad +ãĤĬ ãģ¾ãģĹãģŁ +ĠоÑĤ кÑĢÑĭÑĤ +нÑı ÑĤ +ĠÑģво ей +à¸Ī ิà¸ķ +ĠкаÑĩеÑģÑĤв е +Ġet tiÄŁi +ìĤ¬ íķŃ +ĠاÙĦÙĬ ÙħÙĨ +иÑĩеÑģки й +ë¸ Į +Ġ×ij×IJר ×¥ +Ġا سÙħ +Ġиз веÑģÑĤ +r ão +Ġatt ivitÃł +à¹Ģà¸Ľà¹ĩà¸Ļ à¸ģาร +ĠاÙĦد Ùĥت +ĠاÙĦدÙĥت ÙĪØ± +ĠÙĪØ§ØŃد Ø© +ĠÑģ ÑĩеÑĤ +ĠпÑĢ Ð¸Ñĩ +ĠпÑĢиÑĩ ин +ĠÙĪØ² ارة +Ġh uyá»ĩn +ĠÙĥ تاب +à¹ģà¸Ļ à¹Īà¸Ļ +à¹ģà¸Ļà¹Īà¸Ļ à¸Ńà¸Ļ +Ġgün ü +г ÑĢÑĥз +ĠاÙĦØ® اص +Ġgör ül +׾ ×ŀ×ĵ +Ġìłķ ëıĦ +×ķ×ij ×Ļ׾ +Ġ×ŀ×§ צ×ķ×¢×Ļ +ĠоÑģоб енно +à¸Ľà¸£à¸° à¸ģา +à¸Ľà¸£à¸°à¸ģา ศ +aca ģını +ë¶ ģ +à¸łà¸¹ มิ +ĠÑį лекÑĤ +ĠÑįлекÑĤ ÑĢо +Ġ×§ ש×Ķ +سÙĦ Ø· +à¸Ĭà¸Ļ ะ +×¢ ×Ļ׾ +ĠЧ е +à¹ģà¸Ļ à¹Ī +lı ÄŁ +lıģ ın +Ġ×ŀ×¢ ×¨×Ľ×ª +好ãģį ãģª +มาà¸ģ à¸Ĥึà¹īà¸Ļ +×ŀ×¢ ×ijר +ĠاÙĦÙħ غرب +ĠпеÑĢ Ð¸ +ĠпеÑĢи од +Ġnh ạc +ا ÙĪÙĬ +ĠÙĪ Ø¹ÙĦÙī +أخ ذ +ĠC ô +תר ×ij×ķת +×Ĵ ×Ķ +Ġktóre j +×IJ ×Ļת +×ij ×ķ×IJ +д елÑĮ +รี วิ +รีวิ ว +ж Ñĥ +Ġ×ij×Ĺ ×ķ +еÑĪ ÑĮ +ĠØ£ ÙĦÙģ +ĠاÙĦÙĪ Ø·ÙĨÙĬ +ĠاÙĦÙħÙĨ Ø·ÙĤØ© +nÄħ Äĩ +Ġthi ên +иÑĩеÑģк ой +ĠاÙĦÙħ ÙĦ +Ġع Ùħ +ס פר +Ġnh óm +ÙĪØµ Ùģ +ĠCh úng +Ġر ÙĤÙħ +ãģ¾ãģĹãģŁ ãģĮ +al ité +ล ม +ĠëĤ´ ê°Ģ +׾ק ×ķ×Ĺ +ĠS Æ¡n +pos ição +mi ÄĻ +Ġtr ánh +ĠÄIJ á»Ļ +׼ ×Ĺ +ãģĤ ãģ£ãģ¦ +à¸Ńย à¹Īา +Ġ×ŀ×Ĺ ×Ļר +Ġ×Ķ ×Ļת×Ķ +à¸Ľ à¹Īา +à¸Ńืà¹Īà¸Ļ à¹Ĩ +Ø´ ÙĤ +×ł×¡ ×Ļ +ë¦ ¼ +ãģ¦ãģĹãģ¾ ãģĨ +Ġ×ŀ צ×ij +ãģ« åĩº +ÙħÙĪØ§ Ø·ÙĨ +ยัà¸ĩ มี +алÑĮ нÑĭе +san ız +Ø¥ سرائÙĬÙĦ +ĠvÃł i +ì¤ Ħ +ã썿ĢĿ ãģ£ãģ¦ +×Ļ ×ķ׳×Ļ +çĶŁ ãģį +Ġs âu +Ñĩ иÑģÑĤ +Ġl á»ħ +ĠGi á +à¸Ńุ à¸Ľ +à¸Ńà¸¸à¸Ľ à¸ģร +à¸Ńà¸¸à¸Ľà¸ģร à¸ĵà¹Į +Ġnh ẹ +r ö +ס ×ĺ×Ļ +ãģķãĤĵ ãģĮ +Ġd ầu +ع Ùİ +ت را +×Ĵ×ĵ ׾ +Ġtécn ica +׼ ׳×Ļ×Ŀ +תק ש +תקש ×ķרת +Ġн его +ét ait +Ġm á»ģm +Ñģ еÑĤ +Ġnh áºŃt +Ġ×ŀ ×¢×ľ +Ġ×Ķ×¢ ×ij×ķ×ĵ +Ġ×Ķ×¢×ij×ķ×ĵ ×Ķ +Ġ×Ĵ ×Ļ׾ +ãģ¯ ãģªãģĦ +ائ ØŃ +Ġз деÑģÑĮ +×IJ ×Ļ׳×ĺר +Ùħ ÙIJ +Ġ×Ļ ×Ĺ×ĵ +ر اÙģ +ì²ĺ 리 +×ĵ ×¢×ķת +ì¹ ľ +ĠТ о +ĠTh ế +ì¶ © +Ġ׳׼ ×ķף +عÙĬ Ø´ +ни з +Ġج اÙĨب +×ŀ×§ צ×ķ×¢ +à¹Ĥ à¸ĭ +Ñģ ÑĥÑĤ +ìĸ´ ìļĶ +ãĤĴè¦ĭ ãģ¦ +ار د +Ġaç ıl +ĠاÙĦØŃ ÙĬاة +à¸ģà¹ĩ à¹Ħà¸Ķà¹ī +ãģĿãĤĮ ãĤĴ +عض ÙĪ +Ġг ÑĢаж +ĠгÑĢаж дан +à¸Īะ à¸ķà¹īà¸Ńà¸ĩ +ĠìĿ´ 룬 +ĠìĿ´ë٬ íķľ +Ġtr ách +ÙĨ Ùİ +Ġkı sa +Ã Ķ +ÑĪ ÐºÐ° +ãģ® äºº +ĠÐŁ оÑģ +ĠÐŁÐ¾Ñģ ле +Ñĥ лÑĮ +ÙĪØ§ جÙĩ +ÙĤ رب +à¸Ľà¸ıิ à¸ļัà¸ķิ +ê° Ļ +Ġ×ŀ ׳ +ĠÑģво и +بر اÙħج +Ġر ÙĪ +пÑĢ Ð¾Ð´ +пÑĢод аж +Ġby ÅĤy +วั ย +Ġgör ün +ĠÃ Ī +ÑİÑī им +ĠÑĤак ой +Ùģ ÙĪØ± +ĠÙģ Ø¹ÙĦ +Ġб ел +ëIJ ł +er ÃŃa +ĠÑģво Ñİ +Ġl ã +Ġlã nh +à¹Ģà¸ŀืà¹Īà¸Ń à¹ĥหà¹ī +ÙĤ ÙĨ +تط ÙĪÙĬر +Ġsay ı +ĠÑģ ейÑĩаÑģ +Ġ×IJ×Ĺר ת +×§ ×ķפ×Ķ +×§×ķר ס +Ġس Ùħ +Ġ×ĺ ×Ļפ×ķ׾ +ìĿ´ëĿ¼ ëĬĶ +دراس Ø© +èµ· ãģĵ +×Ĺ ×Ļ׳ +×Ĺ×Ļ׳ ×ķ×ļ +×ĵ ×§ +Ġë§ ŀ +Ġком анд +ĠÐij о +Ġиг ÑĢÑĭ +à¸ļ ี +ĠØ£ Ùİ +в ен +ĠاÙĦج دÙĬد +ĠÙĦ Ø¥ +Ġ×ķ×IJ ׳×Ļ +Ġ×Ķס ×Ļ +иÑĩеÑģк ого +رÙĪ ØŃ +à¸ģาร ศึà¸ģษา +ĠTr ưá»Ŀng +иг ÑĢа +ıl ması +Ġм аÑģÑģ +ãģ¨ãģį ãģ« +à¸Ĺีà¹Ī à¸ľà¹Īาà¸Ļ +à¸Ĺีà¹Īà¸ľà¹Īาà¸Ļ มา +ĠاÙĦساب ÙĤ +Ġ×ŀ×¢ ×ĺ +в аÑĤÑĮ +m Ã¼ÅŁ +Ġ׾ ׼×ļ +Ġt á»ĭch +Ùģ ÙĩÙħ +تد رÙĬب +Ø´ Ùĥ +Ġ×ij ×ŀ×Ļ +Ġ×ij×ŀ×Ļ ×ķ×Ĺ×ĵ +ÙĤØ· اع +ãģª ãģĹ +×ķצ ×Ļ×IJ +ĠÙĪ Ø³ÙĬ +з Ñĥ +Ġy at +Ġyat ırım +ë§ İ +Ġth ắng +ãģĬ 客 +ãģĬ客 æ§ĺ +ĠThi ên +ãģ«å¯¾ ãģĹãģ¦ +ÑĢ Ð¸Ñģ +ÙĨت ائ +ÙĨتائ ج +Ġ×ŀ שר +Ġ×ŀשר ×ĵ +Ġتع اÙĦ +ĠتعاÙĦ Ùī +ש ׳×Ļ +Ùĩ اÙħ +×IJ׳ ש×Ļ×Ŀ +Ġżyc ia +ĠÑĢÑĥб лей +ÙĬ ض +Ġkat ıl +ĠÙħ ÙĪØ¶ÙĪØ¹ +Ġvard ır +ĠÙħÙĨ Ø·ÙĤØ© +ĠTr ần +Ġв еÑģ +ü p +Ùħ ÙĪÙĨ +ÑĪ Ð»Ð¸ +Ġn óng +Ø® ÙĦÙģ +ĠС ÑĤа +Ġд оÑĢ +ĠдоÑĢ Ð¾Ð³ +ĠwÅĤa ÅĽnie +eÄŁ in +Ġhi á»ĥm +ĠС ам +ê»ĺ ìĦľ +ĠÑĦ а +ãģ» ãģĨ +ãģ»ãģĨ ãģĮ +×ķפ ×Ļ×¢ +ê° Ī +د ÙĪÙĦ +Ġthu ê +Ġch á»Ĺ +Ġëĭ¹ ìĭł +ãģij ãĤĮ +ãģijãĤĮ ãģ© +ë³´ íĺ¸ +ãģķãĤĮ ãģ¦ãģĦãģ¾ãģĻ +Ġнад о +ĠìĤ¬ëŀĮ ëĵ¤ +à¹Ģà¸Ĥ à¸ķ +สม ัย +z ÅĤ +ت ÙĪØ± +Ġש ת×Ļ +v ê +Ġ×ijת ×ķ×ļ +à¸Ĭ ัย +ãģĦ ãģ£ãģŁ +ìĿ ij +Ġt ầ +Ġtầ ng +ש ׼ר +Ġê¸ Ģ +Ġ×Ķש ׳×Ķ +Ġا ÙĨÙĩ +ç«ĭ ãģ¡ +r és +füh ren +ر ØŃÙħ +ê· ¹ +ĠâĢ « +Ġsu ất +à¸Ł ิ +ÙĬ Ùĩا +ĠاÙĦ اتØŃاد +Ġt uyá»ĥn +ãģ¾ ãĤĭ +Ġm ại +Ġng ân +ãĤ° ãĥ© +欲 ãģĹãģĦ +س ار +ãĤĤãģ® ãģ§ãģĻ +ки е +Ġseç im +åħ¥ ãĤĬ +ãģªãģ© ãĤĴ +ÑĤ ÑĢи +ĠÑģп еÑĨ +ĠØ£ د +Ġод но +ÑĪ ÐµÐ» +ãĥĩ ãĥ¼ãĤ¿ +ãĤ· ãĤ¹ãĥĨ +ãĤ·ãĤ¹ãĥĨ ãĥł +è¡Į ãģį +ã썿ĢĿ ãģ£ãģŁ +à¹Ģà¸ģิà¸Ķ à¸Ĥึà¹īà¸Ļ +ĠÑĤ ож +ĠÑĤож е +Ġs ạch +ĠÑģ ÑĢок +Ġкли енÑĤ +ĠÙħØ´ رÙĪØ¹ +Ġalt ında +Ġì ·¨ +ä¸Ń ãģ® +ãģķãģĽ ãĤĭ +ãģĻ ãģ¹ +ãģĻãģ¹ ãģ¦ +ê°ľ ë°ľ +ĠÄij êm +ãģªãģĦ ãģ®ãģ§ +ì² ł +×¢ ×ij×ĵ +Ġd ấu +à¸Ħà¸Ļ à¸Ĺีà¹Ī +ĠC ách +تع ÙĦÙĬÙħ +Ġh ại +ãĤ» ãĥķãĥ¬ +ĠÙĨÙ쨳 Ùĩ +ĠíĨµ íķ´ +ÑĪ Ð»Ð¾ +Ġнап ÑĢав +ĠнапÑĢав лен +ÑĢÑĥ Ñĩ +íĶ Į +Ġ×ijר ×Ļ×IJ +ãģ® ãģ¿ +ãģ«ãģĬ ãģĦãģ¦ +×ij ׳ק +ãĤ¨ ãĥ³ +Ø«ÙĦ اث +Ġm ỹ +ĠÑģай ÑĤе +Ġе мÑĥ +ت غÙĬ +تغÙĬ ÙĬر +خص ÙĪØµ +ÑĤе ли +Ġ×ķ׾ ׼ף +פע ×Ŀ +Ġпо ÑįÑĤомÑĥ +ر اÙĨ +иÑĤел ей +пиÑģ ан +×¢ ×¥ +ĠìĤ¬ ìĹħ +Ùħ ز +جÙħ ÙĬع +ë©´ ìĦľ +à¸ľà¸¥à¸´à¸ķ à¸łà¸± +à¸ľà¸¥à¸´à¸ķà¸łà¸± à¸ĵ +à¸ľà¸¥à¸´à¸ķà¸łà¸±à¸ĵ à¸ij +à¸ľà¸¥à¸´à¸ķà¸łà¸±à¸ĵà¸ij à¹Į +ĠпÑĢ Ð¸Ð¼ÐµÑĢ +ãĤŃ ãĥ¼ +l â +Ġch Äĥm +缮 ãģ® +ãģĦ ãģĭ +ãģ¨è¨Ģ ãģĨ +×ĸ ×ķ×Ĵ +Ġ×ij ×ĵ×Ļ +Ġ×ij×ĵ×Ļ ×ķ×§ +ãģĬ åºĹ +à¸ķà¸Ńà¸Ļ à¸Ļีà¹ī +Ġph á»iji +п ÑĤ +สà¸Ļ าม +Ø· ÙĪ +ص اØŃ +صاØŃ ب +ĠD ü +ĠDü nya +Ġп ока +п ал +ĠÄij ảo +ĠاÙĦÙģ ÙĪØ± +ĠاÙĦÙģÙĪØ± Ùĥس +Ġmá u +кÑĢ ÐµÐ¿ +ĠاÙĦس اعة +ĠгоÑĢ Ð¾Ð´Ð° +Ùģ ØµÙĦ +ай ÑĤе +Ġд ог +Ġдог овоÑĢ +ĠØ¥ ذ +Ġ×ij׼׾ ׾ +ÙĬ تÙĩ +×Ĵ ×ijר +Ġbir ç +Ġbirç ok +문 íĻĶ +ãģĿãģĨ ãģª +را ØŃ +ĠÙħ رة +ĠденÑĮ ги +f ä +à¸Ĥà¹īา ว +ĠÑģов ÑĢем +ĠÑģовÑĢем енн +׾×Ĺ ×¥ +èī¯ ãģı +ĠÙģ Ø£ +Ġ×ķ ×ĸ×Ķ +Ġз ани +Ġзани ма +Ġê°Ģì§Ģ ê³ł +Ġh Æ¡i +ãģªãģ® ãģĭ +ãĥĨ ãĥ¬ãĥĵ +Ġר ×ij×ķת +à¸ķ ี +Ġ×ijש ×ł×ª +ĠT ại +Ġthu áºŃn +Ñģ ел +Ñij м +dzi Äĩ +ĠÑģ ка +ĠÑģка Ñĩ +ĠÑģкаÑĩ аÑĤÑĮ +×ķ×ŀ ×ķ +г ла +Ġмин ÑĥÑĤ +åĩº ãģĻ +Ġ×Ĺ×Ļ ×Ļ×ij +Ġת ×Ĵ×ķ×ij×Ķ +à¸£à¸¹à¸Ľ à¹ģà¸ļà¸ļ +ни ÑĨа +Ġİ n +ĠØ£ ع +Ġض ÙħÙĨ +Ùħ ثاÙĦ +ĠyaÅŁ an +ĠìŰ 구 +ĠL ê +ש׾ ×Ĺ +ãģı ãģªãĤĭ +ìĹĨ ìĿ´ +ĠÑĤ ÑĢи +ĠÑĩаÑģÑĤ о +Ġоб ÑĢаÑĤ +п ло +د Ø® +دخ ÙĪÙĦ +س Ùĩ +à¸Ń าà¸ģ +à¸Ńาà¸ģ าศ +Ġ׼ ×ĸ×Ķ +Ġ×Ķ×¢ סק +ĠاÙĦØ£ ÙĨ +å¹´ ãģ« +×¢ ש×ķ +Ġש ×¢×ķת +Ġm Ãłn +×IJר ×Ļ +sı yla +Ù쨱 ÙĤ +ни Ñħ +Ġت ست +è¦ĭ ãģ¦ +ØŃا ÙĪÙĦ +×IJ ×Ļ׼×ķת +ĠbaÅŁ ladı +st Äħ +stÄħ pi +à¸Ĺีà¹Ī à¹Ģรา +ÙĤر ر +ج اب +Ġ×ijר ×ķר +à¹Ģà¸Ĥà¹īา à¹ĥà¸Ī +×ŀ׊קר +al ım +Ġס ×Ļפ×ķר +ãģ§ãģĤ ãĤĮãģ° +Ġש×ŀ ×ķר×ķת +Ġ×ķ ×ŀ×Ķ +ãģĵ ãģĿ +id ée +ä¸ĭ ãģķãģĦ +تÙĨا ÙĪÙĦ +Ġ ลà¹īาà¸Ļ +Ġìļ°ë¦¬ ëĬĶ +اÙĨ ا +ÑģÑĤ ой +б оÑĤ +ĠyaÅŁ am +kö y +Ø¥ ÙĦ +ÑĢ Ñĭв +기 ìĹħ +Ġ×Ķ×ŀ ×ĵ +Ġ×Ķ×ŀ×ĵ ×Ļ׳×Ķ +د ب +×¢ ×Ļ׳×Ļ +×ŀ ת×Ĺ +Ġפ ר×Ļ +ãĥĭ ãĥ¼ +اÙħ ÙĬ +Ġnh ằm +ãĤĮ ãģªãģĦ +ت عرÙģ +Ġë§Ī ìĿĮ +ìĵ ° +Ġh ấp +ר×Ĵ ×Ļ׾ +ب Ùİ +Ġr Äĥng +gl Äħd +ĠÑģиÑģÑĤем Ñĭ +Ġkh óa +ãģ§ãģĻ ãĤĪãģŃ +大ãģį ãģı +기 를 +Ġké o +ÙĪ Ø¡ +ج اÙħ +جاÙħ ع +Ġ×¢ ×Ļצ×ķ×ij +t éri +Ġת ש +Ġ×IJ ×ij×Ļ +ĠCh ương +à¸ļริ à¹Ģว +à¸ļริà¹Ģว à¸ĵ +ãģ¤ ãģı +Ġ×Ĺ ×ķ׾ +עת ×Ļ×ĵ +ש ×Ļ×ŀ×Ķ +ëĤ ¨ +Ġש×IJ ×Ļף +ĠÙĪØ§ÙĦ Ø¥ +ÑĦ а +Ġkh ám +Ġ×ĺ ×ķ×ij×Ķ +ĠвÑĭ Ñģ +ĠвÑĭÑģ око +ĠاÙĦØŃ دÙĬØ« +人 ãĤĤ +d Ã¼ÄŁÃ¼ +×Ļ×Ĺ ×ķ×ĵ +تع ÙĦÙĬ +تعÙĦÙĬ ÙĤ +l ö +تØŃ دÙĬد +н его +ĠÑĥд об +Ġ׾ ×ŀ×Ļ +Ġר ×ķצ×Ļ×Ŀ +Ġج اء +Ġ×ij ×ĸ×ŀף +à¸Ľà¸ģ à¸ķิ +é«ĺ ãģı +à¸Ľà¸¥ า +Ġart ık +Ġbug ün +×§ ׳×Ļ +Ġkho á +ĠÙħ رÙĥز +ĠìŀIJ 기 +در جة +×ŀש ר×ĵ +Ġgi ấy +Ġch óng +×§ פ +ÙĬب Ø© +ĠczÄĻ sto +в али +Ùĥ ب +ìŁ ģ +ส à¸ļาย +à¸Ľà¸£à¸°à¸Ĭา à¸Ĭà¸Ļ +×Ĵ ×ķ×£ +ëŁ ī +ãģ® ãģĵãģ¨ +ล à¸Ń +Ġngh á»ī +åŃIJ ãģ© +åŃIJãģ© ãĤĤ +à¹Ħà¸Ķ à¹īà¸Ńย +à¹Ħà¸Ķà¹īà¸Ńย à¹Īาà¸ĩ +×ĵ ×¢ +ĠاÙĦت Ùī +ĠÑģов еÑĤ +Ġqual itÃł +åĩº ãģĹ +ĠÑĢÑĥк ов +ĠÑĢÑĥков од +ราย ละà¹Ģà¸Ńียà¸Ķ +ãģªãģĭ ãģªãģĭ +기 ê´Ģ +Ġ×Ĺ ×ķש +Ġ×Ĺ×ķש ×ij +л оÑĤ +à¸Ļะ à¸Ħรัà¸ļ +×§×ij ×ķצ×Ķ +Ġth ái +Ġש ×ij×Ķ +ĠÑĪ ÐºÐ¾Ð» +ĠÙĦ ÙĥÙĦ +à¹ĥà¸Ļ à¸Ĭà¹Īวà¸ĩ +ĠÙħ ÙĥاÙĨ +ë ķĮ +Ġc ải +ĠCh ÃŃ +ÑĥÑĩ а +ìĿ µ +Ġx ảy +à¸Ĭà¸Ļ ิà¸Ķ +Ġc áºŃu +к ÑĢов +ss é +ĠÙĨ ÙĪØ¹ +ĠТ а +Ø® Ùħس +פ×ķס ×ĺ +Ġm ắc +ĠÄij em +à¸ģาร à¹ĥà¸Ĭà¹ī +ר ×ķס +ĠÐĽ е +Ġth á»Ń +รà¹Īาà¸ĩ à¸ģาย +üz ü +æĹ¥æľ¬ ãģ® +ê³¼ ìłķ +ש ×Ļ×IJ +ĠìŀĪ ê³ł +×ij ×ķ׾ +ìķ ħ +ĠÙĪØ§ÙĦ ا +ĠÐĽ и +ĠвÑģ Ñij +Ġużytk ow +×Ĺ ×ķ׾ +ر Ù쨶 +Ġson uç +ãģĦ ãģ¾ãģĽãĤĵ +ìĤ¬ ìĹħ +ëĪ Ħ +ÑĤ ек +Ġud ziaÅĤ +л ез +Ġ×Ķ×Ļ ×Ļת×Ļ +ãĤīãĤĮ ãģ¦ +Ùħس ؤÙĪÙĦ +ر ار +ÑĤ ан +ĠÄij Ãło +Ġר ×ķ×ij +Ġ×ijש×ij ×Ļ׾ +ä»ĬåĽŀ ãģ¯ +ãĤ¸ ãĥ¥ +Ġ×¢ ×ijר +ãģĽ ãģ¦ +п олÑĮ +ak lı +Ġk ÃŃnh +د ت +лож ение +ĠاÙĦÙħ ص +ĠاÙĦÙħص رÙĬ +à¸Īริà¸ĩ à¹Ĩ +ĠاÙĦشر ÙĥØ© +ĠÄij á»ı +ãĥĽ ãĥĨ +ãĥĽãĥĨ ãĥ« +Ñį кон +Ñįкон ом +ĠÙĪ Ø¹ÙĨ +Ġת ׳ +Ġ×ª×ł ×IJ×Ļ +ĠاÙĦدÙĪÙĦ ÙĬØ© +Ġì§Ģ ìĹŃ +ãģ§ãģĻ ãģĭ +Ġв аÑĢи +ĠваÑĢи анÑĤ +ĠاÙĦع رب +ел а +Ġt Æ°á»Ľng +sk Äħ +Ġm ặc +ส ัà¸ģ +ãĥĵ ãĥ¼ +Ġ×ij ×Ĵ׾ +Ġ×ij×Ĵ׾ ׾ +ãĥķãĤ¡ ãĥ³ +×ij ×Ļצ +×ij×Ļצ ×ķ×¢ +ли ÑģÑĤ +à¸Ł ุ +à¸Łà¸¸ à¸ķ +à¸Łà¸¸à¸ķ à¸ļà¸Ńล +à¸Ŀ à¹Īาย +ìŀIJ ìĿĺ +Ġس ÙĪÙģ +Ġש ×Ķת +Ġê± ¸ +×¢ ×ij×ķ×ĵ +ãģĻãĤĭ ãģĵãģ¨ãģĮ +ĠÑĩа ÑģÑĤÑĮ +ãĤ¢ ãĥ¡ãĥª +ãĤ¢ãĥ¡ãĥª ãĤ« +Ġtak ım +Ġs Ỽ +ĠsỼ m +שר ×Ķ +è¨Ģ ãģĨ +л ан +ì» ¤ +׼ ׳×Ķ +ÙĪÙģ ÙĬ +íĹ Ī +lu ÄŁu +ĠëĮĢ íķ´ +Ġ׾×ij ×Ļת +Ġ×Ķר×IJש ×ķ׳×Ķ +ص Ùħ +Ġsö yled +Ġsöyled i +à¸Ľ าà¸ģ +Ġard ından +ãģĪ ãģŁ +à¸Ĺัà¹Īว à¹Ħà¸Ľ +Ġ׳×ķס ×£ +б олÑĮ +ãĤĵãģ§ãģĻ ãģijãģ© +ĠлиÑĪ ÑĮ +Ġ×ij ×IJ×Ļ +ĠбÑĭ ÑģÑĤÑĢо +ส ัà¸Ļ +Ġ×ij פ׳×Ļ +л еÑĩ +ĠاÙĦØ® بر +Ġsó c +Ġth ú +Ġп ÑıÑĤ +ãģĬ é¡ĺ +ãģĬé¡ĺ ãģĦ +ÑĤ ин +ãģ«ãģ¤ãģĦãģ¦ ãģ¯ +פ ף +Ġдв ÑĥÑħ +à¸į ีà¹Ī +à¸įีà¹Ī à¸Ľ +à¸įีà¹Īà¸Ľ ุ +à¸įีà¹Īà¸Ľà¸¸ à¹Īà¸Ļ +оп еÑĢ +ĠاÙĦب شر +ĠاÙĦÙħ اÙĦ +ıyor uz +تØŃ ÙħÙĬÙĦ +à¸ģ ะ +éĸĵ ãģ« +×Ĺ ×ķש +ĠNg uyên +ãģĦãģ¦ ãģĦãĤĭ +дÑĥ ÑĪ +ש פע +ÑĪ Ñĥ +å®Ł éļĽãģ« +ĠÑĢай он +ĠCh á»ī +ÙĨ صر +Ġìļ ´ +Ġìļ´ ìĺģ +Ġ×Ķ×ĵ ×Ļף +ØŃد د +ر ز +ĠاÙĦد Ùħ +ĠPh áp +ÑĤ ÑģÑı +è¦ĭ ãģĪ +Ġti á»ĥu +Ġs á»Ńa +а ÑİÑĤÑģÑı +ĠB á +Ġ×ķ ׼׾ +Ð ĸ +ÑĪ Ð¸Ð¼ +ìĿ´ ëĬĶ +л ев +d ık +Ġprés ente +Ġara ç +صد ÙĤ +Ġпом ог +ĠاÙĦشر ÙĤ +ĠÙĪØ§ÙĦ ذÙĬ +رÙĬ ا +×ij ׳×ķת +Ġng á»ĵi +ר ×ķפ +ר×ķפ ×IJ +Ġth ấp +ãĤĦ ãģ¯ +ãĤĦãģ¯ ãĤĬ +ĠاÙĦج دÙĬدة +éĿŀ常 ãģ« +ÙĬÙĦ ÙĬ +ìª ½ +تع اÙħÙĦ +ãģł ã썿ĢĿãģĦãģ¾ãģĻ +Ùħ Ùħ +иÑĤе ли +ãĤµãĤ¤ ãĤº +اد ات +ĠاÙĦÙħ اÙĦÙĬØ© +Ùĥات ب +к ли +веÑĢ Ñħ +ни Ñĩ +Ġ×ľ×¢ ×ij×ķ×ĵ +׾ ×Ļ×Ķ +ØŃ Ùİ +ãĤ¤ ãĥĻ +ãĤ¤ãĥĻ ãĥ³ãĥĪ +Ġת ×Ĵ×ķ×ij×ķת +ÑĦ он +ĠдÑĢÑĥг ие +×IJ ×ĸ×ķר +Ġper ò +ìķ ŀ +åĢŁ ãĤĬ +ר צ×Ļ +×IJ ×ĸ +алÑĮ нÑĭÑħ +Ġê²ĥ ìľ¼ë¡ľ +ĠпÑĢав о +ĠاÙĦØ£ رض +à¹Ģà¸Ĺ à¸Ħ +à¹Ģà¸Ĺà¸Ħ à¹Ĥà¸Ļ +à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļ à¹Ĥล +à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļà¹Ĥล ย +à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļà¹Ĥลย ี +צ ר×Ļ +ĠÐļ Ñĥ +ıl ma +決 ãĤģ +ا ÙĪ +Ġ×ĵ ×§×ķת +à¸Ħร ู +ĠÙħست ÙĪÙī +à¸Ľ à¹īà¸Ńà¸ĩ +à¸Ľà¹īà¸Ńà¸ĩ à¸ģัà¸Ļ +×ĵ ×ķ×ŀ×Ķ +ĠÑģ егоднÑı +س ÙĪÙĤ +ר×Ĺ ×ķ×ij +ĠØ¥ دارة +Ñħ ож +éģİ ãģİ +à¸Ħ à¸Ń +нÑĥ л +×ķ׼ ×Ķ +ÙĪ Ø§ÙģÙĤ +׼׾ ׾ +Ġ×Ķ ×ĵ×ķ +Ġl Ä©nh +Ġkh ảo +×IJ×ŀ צע +ë¨ ¸ +Ġ׼ ×Ļצ +Ġ׼×Ļצ ×ĵ +Ġдолж нÑĭ +หว ัà¸ĩ +ãĥĩ ãĤ¶ +ãĥĩãĤ¶ ãĤ¤ãĥ³ +Ġng á»Ŀ +ä¸Ń ãģ« +à¸ģลัà¸ļ มา +جÙħ اÙĦ +à¸Ķัà¸ĩ à¸ģลà¹Īาว +س ÙĥÙĨ +س ÙĨ +Ġözellik le +з еÑĢ +rz ÄĻ +×ŀ ×ķר×Ķ +Ġl ạ +×ŀ ×Ļ׳×Ļ +ר ×Ļת +ãģĿãĤĮ ãģĮ +ãģĭ ãĤĮ +ĠÙĬÙħÙĥÙĨ Ùĥ +öff entlich +г ан +ĠاÙĦØŃ ÙĦ +ĠmiÄĻd zy +ĠÑĩа ÑģÑĤи +ujÄħ cy +ĠbaÄŁ lı +ĠiliÅŁ ki +Ùģ Ø§Ø¡ +ãĥª ãĥ³ãĤ° +Ġhã ng +ĠконÑĤ ÑĢ +ĠконÑĤÑĢ Ð¾Ð» +к оп +ש ×Ļ×¢ +ש×Ļ×¢ ×ķר +ĠÐĴ аÑĪ +Ġ×Ķ ×ª×§ +ÙħÙĨ ع +ĠpolÃŃt ico +Ġг олов +ĠØ¥ ÙĬ +Ø¥ ÙĨتاج +à¸ļ ิ +Ġг овоÑĢ +ĠговоÑĢ Ð¸ÑĤ +Ġph á»ķ +ĠÑģем ÑĮ +ãģ¯ ãģĤãĤĬãģ¾ãģĽãĤĵ +ĠÙĪ Ø§Ø³Øª +×ŀש פ×ĺ +з ем +×ŀ×ĵ ×ijר +Ġíģ ° +ĠìĿ´ ë²Ī +ê°Ģ ëĬĶ +Ġì§Ģ ìĽIJ +Ġca ÅĤy +Ġgeli ÅŁtir +Ñģк ое +pos é +Ġkh ô +à¸ķิà¸Ķ à¸ķาม +miss ão +Ġ׾ ×ŀר +Ġ׾×ŀר ×ķת +Ġb ó +à¸ķรวà¸Ī สà¸Ńà¸ļ +Ġngh á»ģ +Ġб из +Ġбиз неÑģ +ÑģÑĤ еÑĢ +ÙĪ Ùİ +楽 ãģĹãģ +楽ãģĹãģ ¿ +ãģĵãĤĮ ãģĭãĤī +wiÄħ zan +ส à¸Ńà¸Ļ +Ùħ ÙĪØ± +׳×ĵ ׾ +Ġ×Ķ×IJ ×ĵ×Ŀ +Ġм олод +ØŃ Ùħا +ØŃÙħا ÙĬØ© +ÑģÑĤ ÑĢан +Ġbu á»ķi +ת×Ļ ×Ļ×Ŀ +abile ceÄŁi +L İ +à¹Ģย à¸Ńะ +à¸Ī ร +س ÙĥاÙĨ +à¸Ļ ัà¸Ķ +Ġm ấy +ĠÐij а +s ÅĤaw +ĠÙģ ÙĦا +ĠкоÑĤоÑĢ Ð¾Ð¹ +Ġпло Ñī +ĠплоÑī ад +ãĤĤ ãģĤãĤĬ +sz czÄĻ +×Ļפ ×ķ +ש×ŀ ת +owa ÅĤa +Ġn ông +צ×ij ×IJ +ĠìŀĪ ìĹĪ +ãģ¾ ãģ¨ +ãģ¾ãģ¨ ãĤģ +ÙĤÙĪ Ø§Øª +ãģ¿ ãĤĵãģª +Ġ׼ ×ŀ×¢×ĺ +Ġx úc +ï¼ Ĩ +r ÄĻ +rÄĻ cz +×ĵ ×ŀ×Ļ +Ġt áºŃn +à¸Ķ วà¸ĩ +ê²½ ìłľ +п ÑĥÑĤ +Ø£ ربع +Ġ×ŀ שת×ŀש +ãĤ¿ãĤ¤ ãĥĹ +Ġìłľ ê°Ģ +Ġ׾ ׼ף +ĠобÑĢаз ом +ÙĬÙĥ ا +w ÅĤ +wÅĤ asn +ĠاÙĦÙĪØ·ÙĨ ÙĬØ© +بÙĬ ب +×ŀ ׾×Ļ +к ÑĢаÑĤ +기 ìĹIJ +ÙĤ اد +ĠÙĦ دÙī +à¸Ħวาม รูà¹ī +×ŀ×ĵ×Ļ׳ ×Ļ×ķת +ê² ¨ +Ġíĺ Ħìŀ¬ +ש ת×Ļ +м ол +Ġmá i +à¸ŀิ ม +à¸ŀิม à¸ŀ +à¸ŀิมà¸ŀ à¹Į +หล วà¸ĩ +Ġx uyên +×Ĺ ×¡×¨ +رÙĪ ÙĨ +ãģĿãģĨ ãģĦãģĨ +ãģĿãĤĮ ãģŀ +ãģĿãĤĮãģŀ ãĤĮ +Ġ׼ ש×Ķ +ÐŁ ÑĢав +×ŀ×ij צע +ع رب +Ġbü yü +פ×Ļת ×ķ×Ĺ +à¸Ī à¸ļ +ĠØ£ Ùĥبر +שר ת +×ŀ׼ ש×Ļר +ĠÙĪ Ùħع +ãģ® ãģŁãĤģãģ« +à¸Ļ ัà¸ļ +ì° ° +ãĥª ãĥķãĤ© +ãĥªãĥķãĤ© ãĥ¼ãĥł +Ġc ưá»Ŀng +ĠìłĢ íĿ¬ +ÙħÙĨظ ÙħØ© +Ġhiç bir +ãģ§ãģ¯ ãģĤãĤĬãģ¾ãģĽãĤĵ +ร à¸Ńย +ëIJľ ëĭ¤ +ãģĻãģIJ ãģ« +к ла +Ġürün ler +Ġki á»ĥu +ĠëĤĺ ëĬĶ +ÑĤ ки +Ñģ им +Ġchá»ī nh +ãĤĤ ãģªãģĦ +ศ รี +æĽ¿ ãģĪ +ta ÅŁ +Ġب ÙĥÙĦ +Ġ×ķ ×Ļש +vis ão +ä¼ Ŀ +ä¼Ŀ ãģĪ +ÙĦ د +׾ ×Ļ×ŀ +׾×Ļ×ŀ ×ķ×ĵ +t ória +د Ùij +اÙħ ر +Ġê·¸ëłĩ ê²Į +Ġmateria ÅĤ +à¸Ĺ รา +à¸Ĺรา à¸ļ +ã쮿ĸ¹ ãģĮ +ãģ¦ ãģįãģŁ +ض غ +ضغ Ø· +ĠÙĬ عÙĨÙĬ +ел о +×IJ×Ķ ×ij×Ķ +×¢ ×ŀ +ÅŁ ık +ìŀIJ ëĬĶ +ãĤ¿ ãĥ³ +Ġb áºŃt +×ŀשפ ×Ĺ×Ķ +к ÑĢи +б ли +สั à¸ķ +สัà¸ķ วà¹Į +ĠسÙĨ ÙĪØ§Øª +ĠPh ương +ãģ¦ãģĹãģ¾ ãģ£ãģŁ +ãģª ãģľ +Ġ×ij×IJ ×ķ +Ġc án +س جÙĦ +Ġl ẽ +ãĤ± ãĥ¼ãĤ¹ +Ġ×§ ×Ļ×ij׾ +à¸ļà¸Ĺ à¸Ħวาม +Ġ×ķ ׼ף +ĠпÑĢедÑģÑĤав лен +Ġn á»iji +Ġcoment ário +ени ем +Ġtá» ı +l Ãł +Ġש×Ķ ×Ļ×Ķ +Ñģл ав +ĠاÙĦ ÙĪÙĦا +ĠاÙĦÙĪÙĦا ÙĬات +ÙĦج ÙĨØ© +×§×ķר ×IJ +бÑĭ ÑĤ +Ġì ¦ +Ġì¦ ī +ãģ§ãģĻ ãģĹ +หรืà¸Ń à¹Ħมà¹Ī +за ÑīиÑĤ +ÙģÙĦ سطÙĬÙĨ +Ġmi á»ħn +à¹Ģย à¹ĩà¸Ļ +ĠçalÄ±ÅŁ an +×Ļ×Ĵ ×Ķ +ĠE ÄŁ +ĠEÄŁ itim +ãĥĥãĤ· ãĥ¥ +Ġоп Ñĭ +ĠопÑĭ ÑĤ +ر غ +رغ ب +ĠÑģво иÑħ +à¸Ľà¸£à¸° à¸ķ +à¸Ľà¸£à¸°à¸ķ ู +Ġ×ŀ×IJ ×ĵ +׼ ×ķ׳×Ļ×Ŀ +à¸Ļ ี +ĠвÑĭ Ñħод +ãģ®ä¸Ń ãģ« +פ ׾×IJ +ĠÙĪ ÙĦÙĬس +פ×ķר ס +פ×ķרס ×Ŀ +Ùħ سÙĦÙħ +Ġng ôi +×ĵ ×ŀ×ķת +ãĤĴ使 ãģ£ãģ¦ +ĠпомоÑī ÑĮÑİ +Ø£ سر +бл ок +ÙĤ Ùĩ +ãģĹãģ¾ ãģĦ +ãģ¨ ãģĹãģŁ +Ġп еÑģ +ãĥī ãĥ« +×Ĺ ×Ŀ +ãģĹãģª ãģĮãĤī +ĠÐŁ ÑĢед +ãĥģãĤ§ ãĥĥãĤ¯ +å¼· ãģĦ +ש ×Ļר×ķת +д аеÑĤ +×Ļ×ij ×ķ +Ġgen ç +ил аÑģ +илаÑģ ÑĮ +ĠبÙĦ د +æĤ ª +æĤª ãģĦ +Ġ×ŀ שת +æ§ĺ ãĢħ +æ§ĺãĢħ ãģª +à¸ĺรรม à¸Ĭาà¸ķิ +ĠÙĥ اÙħÙĦ +ĠاÙĦس Ùħ +×ij×ĺ ×Ļ×Ĺ +c á +g ência +ãĤ¹ãĤ¿ ãĥ¼ +à¸Ĺำ à¸ģาร +×Ļ׾ ת +Ġ×Ļ ×ķצ×IJ +w ój +à¸ļุ à¸Ħ +à¸ļุà¸Ħ à¸Ħล +ع تÙħ +عتÙħ د +ãģĿãĤĮ ãģ« +ĠاÙĦت ارÙĬØ® +ÙĤر اء +Ġyönet im +×§ שר +ĠÑģп оÑĢÑĤ +Ġר×IJש ×ķף +Ġseñ al +Ġch ắn +çĦ¡ ãģĦ +ĠдоÑģÑĤ аÑĤ +ĠдоÑģÑĤаÑĤ оÑĩно +Ġá gua +à¸ģร à¸ĵ +à¸ģรà¸ĵ ี +Ġ×ŀש ×ķ +Ġtr ải +ë² Į +ujÄħ cych +Ù쨱 د +à¹ĥ à¸ģล +à¹ĥà¸ģล à¹ī +ãĤĭ ãģ®ãģ¯ +ר×ķ ×ķ×Ĺ +ÙĨ Ùĥ +ĠاÙĦÙĨ ÙĤ +ãģ®ãģ§ ãģĹãĤĩãģĨ +ãģ®ãģ§ãģĹãĤĩãģĨ ãģĭ +Ùħ عرÙģ +ÙħعرÙģ Ø© +ÑĥÑī е +Ġ×ij×¢ ×Ļקר +ت صÙĦ +Ġ×Ķ×IJ ר +Ġ×Ķ×IJר ×¥ +ĠÅŀ i +à¸Ĥา à¸Ķ +íŀ ĺ +ãģªãĤĵ ãģ¨ +ĠìĤ¬ëŀ ij +l Ã¼ÄŁÃ¼ +ب اء +ĠاÙĦØ¢ خر +Ġfam ÃŃlia +ĠTh áng +Ñī ениÑı +ãĤ¯ ãĥŃ +ĠTh ứ +æĽ¸ ãģį +ен ной +ìŀ ¡ +бл аг +благ о +п ов +à¹ģ ว +à¸ĩ à¸Ħà¹Į +à¸Ńัà¸Ļ à¸Ķัà¸ļ +ãģĤ ãģĴ +ร à¹īาย +ün ün +Ġ×Ļ׼×ķ׾ ×Ķ +з он +ĠÐľ и +маÑĤ еÑĢиал +Ġë³´ ë©´ +ØŃÙģ Ø¸ +ê Ìģ +ãģ« ãģĻãĤĭ +Ġת ×IJ +Ġ×Ķס ×ķ +ĠÑģÑĤ оÑĢ +ĠÑģÑĤоÑĢ Ð¾Ð½ +ãĥĪ ãĥĥãĥĹ +ÅĤo ÅĽÄĩ +ëħ ¼ +ëĵ Ŀ +ĠÙĪØ§ÙĦ ع +ì¶ Ķ +Ġ×Ļצ ×IJ +ĠÑĢаз дел +алÑĮ наÑı +×IJ׳ ש×Ļ +spo ÅĤ +spoÅĤ ec +spoÅĤec zn +Ø¥ عÙĦ +إعÙĦ اÙĨ +ÙĤÙĪ Ùī +íķĺë©´ ìĦľ +تط ÙĪØ± +Ġsi êu +Ỽ t +д ви +дви ж +Ġqu ần +k ıl +ĠпÑĢи зна +ĠH ã +ĠHã y +ĠباÙĦ ت +man ın +ãĤ« ãĥ« +Ġk á»· +×§ ׾×Ļ +ëIJĺ ì§Ģ +تعÙĦ Ùħ +ìĭľ ìĦ¤ +ìĭ ¶ +íĺ ¼ +Ùĥ ÙĬÙģ +売 ãĤĬ +วิ à¸Ĭา +б ал +ĠØ£ ØŃ +Ġдолж ен +รา à¸ĩ +ราà¸ĩ วั +ราà¸ĩวั ล +Ùħ اء +ج ار +Å ļ +Ġ×ŀ×IJ ×ĸ +ר ×ŀ×Ķ +ãģĭãĤĤãģĹãĤĮ ãģªãģĦ +ét ude +czÄħ c +Ġg ór +×ł×¡ ×Ķ +Ùħ ÙĬد +ĠÐŁ еÑĢе +Ø£ خر +ãģĿãģ® å¾Į +à¹Ģà¸Ķียว à¸ģัà¸Ļ +×ŀ ×Ĵ×ķ +×ŀ×Ĵ×ķ ×ķף +д ов +mas ına +×¢ ׳×Ķ +ãĤ± ãĥĥãĥĪ +ס ×¢ +סע ×Ļ×£ +ĠT ư +Ġt óc +íĻľ ëıĻ +ĠÐŀ д +ĠÐŀд нако +Ġdol ayı +ؤ Ùĥد +ê³Ħ íļį +׾ ר +в еÑĩ +Ġkh ợi +Ġth á»§y +×ĵ ף +ร à¸ģ +à¸ļั à¸ķร +à¹Ģà¸ģ à¹Īา +ĠاÙĦØ« اÙĦ +ĠاÙĦثاÙĦ Ø« +Ġpod rá +ער ×Ļ +ÙĨج اØŃ +Ġkh ắc +ì¸ ¡ +İ M +ãĤ» ãĥĥãĥĪ +ż enia +Ġ׾×Ĺ ×ijר +er Ãł +ì ´Ī +Ġkü ç +Ġküç ük +ات ÙĩÙħ +à¸ĭ à¹Į +Ùħشار ÙĥØ© +ĠاÙĦ بط +Ġd ây +ен нÑĭм +à¸Ĺีà¹Ī à¹Ħมà¹Ī +ÙĤ Ùİ +Ġv ượt +Ġtr ì +Ġwp ÅĤyw +A Åŀ +з о +ĠاÙĦس ÙĬد +à¸Ĺะ à¹Ģล +ĠÑģодеÑĢж а +ع Ø·ÙĬ +ĠاÙĦع ÙĨ +èĢħ ãģĮ +à¹Ģ หà¸Ļ +à¹Ģหà¸Ļ ืà¸Ń +Ġb ÃŃ +Ġüzer inden +ĠV Å© +Ġnu ôi +ÙĨ Ùħ +алÑĮ ного +×¢ ×Ļף +ØŃ ضر +ĠоÑĤ дел +ëª ĩ +ìķ ¡ +ĠÙĦدÙĬ Ùĩ +ìĻ ľ +Ġse ktör +Ġвозмож но +ĠÐĶ Ð¶ +Ġh ô +äºĭ ãģĮ +иÑĢов ание +алÑĮ ной +Ġ미 êµŃ +ر ØŃÙĦ +ĠÑįк Ñģ +пÑĢав лÑı +Ġnh á»Ŀ +ĠÄij ẩ +ĠÄijẩ y +Ùģ Ùĥر +ĠÙĪØ£ ضاÙģ +ãĥIJ ãĤ¹ +ת×ķ׼ ׳×Ļת +ÑĤел ей +ĠØ¥ÙĦÙĬ Ùĩ +ãģ¨è¨Ģ ãģ£ãģ¦ +Ġдв е +Ġch ấp +ĠL ö +à¸Ħล ิ +à¸Ħลิ à¸Ľ +Ġس ÙĪØ± +ĠسÙĪØ± ÙĬا +×ŀ×Ĺ ×ķ +st ä +д об +Ġni á»ĩm +ãģ® å¤§ +פר×ķ ×Ļ×§ +פר×ķ×Ļ×§ ×ĺ +ĠCh âu +Ġ×ŀ×Ķ ×Ŀ +Ñģк им +ĠполÑĥÑĩ иÑĤÑĮ +ÙĬ ÙĪÙħ +Ø« ÙĪØ± +פ×ķ׾ ×Ļ×ĺ +פ×ķ׾×Ļ×ĺ ×Ļ +ĠмеÑģÑı ÑĨ +åħ¨ ãģ¦ +ĠاÙĦÙħ جÙĦس +ĠاÙĦت اÙĦÙĬ +Ġ׊ר +åIJij ãģij +׼ ×ŀ×Ķ +б ед +Ø£ عض +أعض اء +ÙĪÙĦ د +วà¹Īา à¸Īะ +Ġb ánh +à¸Ļิ ย +à¸Ļิย ม +à¸Ľà¸£à¸° à¸ģัà¸Ļ +ÑģÑĤав иÑĤÑĮ +à¸ŀ à¸Ļัà¸Ļ +ĠÑį ÑĦÑĦ +ĠÑįÑĦÑĦ екÑĤив +Ġав ÑĤоÑĢ +ĠÄIJ Äĥng +Ġth Æ°á»Łng +ãĤĴ æĦŁãģĺ +à¸ģัà¸ļ à¸ģาร +å¾Į ãģ« +Ġya ÄŁ +ست اÙĨ +Ġli á»ģn +ãģĦ ãģ¾ +i êu +à¹Ĥà¸Ķ à¸Ļ +ĠÙĦ ذÙĦÙĥ +à¹Ĥรà¸ĩ à¹Ģรียà¸Ļ +צ ×Ļ×Ĵ +ĠاÙĦÙħ عÙĦÙĪÙħات +ç§ģ ãģŁãģ¡ +à¸Ĺีà¹Ī à¸Ħุà¸ĵ +ãģ«ãģª ãģ£ãģ¦ãģĦãĤĭ +×ŀ×ĵ ×Ļ׳×Ķ +ס ׼×Ŀ +Ġв не +à¸ŀ à¸Ļัà¸ģà¸ĩาà¸Ļ +ÑĢ ÐµÐ¹ +à¹Ģà¸Īà¹īา หà¸Ļà¹īาà¸Ĺีà¹Ī +ĠHi á»ĩn +Ġméd ico +ĠتØŃ ÙĤÙĬÙĤ +ÑĮ ÑĤе +miÅŁ ti +ÙĤÙĬ ادة +ãĤı ãģĭãĤĬ +มา à¸Īาà¸ģ +ëħ Ģ +ãģ«éĸ¢ ãģĻãĤĭ +×IJר×Ĵ ×ķף +m ètre +Ġעצ ×ŀ×Ļ +ĠCh úa +รูà¹ī à¸Ī +รูà¹īà¸Ī ัà¸ģ +ì£ Ħ +ëĭ µ +à¹ģà¸Ĺ à¹ī +Ġgeç en +Ġlan ça +ĠاÙĦ بØŃØ« +×ĵ ×ŀ×ķ +ãģ¯ ãģĺ +ãģ¯ãģĺ ãĤģ +Ġdön Ã¼ÅŁ +è¿ij ãģı +à¹Ģส ม +à¹Ģสม à¸Ń +ëĿ ½ +Ġü ç +á» ŀ +ÑĪ Ð°Ñı +à¸Ĺ ร +ØŃ ÙĤÙĬÙĤØ© +à¸Ĥà¸Ńà¸ĩ à¸ģาร +Ġ무 ìĹĩ +Ġ×Ķ ×Ľ×¨ +ĠاÙĦص ÙĬÙĨ +ĠлÑİ Ð´Ð¸ +à¸ķ าย +ب ÙĪÙĦ +Ġvi êm +Ġthi á»ĩu +à¸ģ à¸Ķ +Ġ׾ ×ĵ×ijר +פ ׳×Ķ +×IJר ×ij×¢ +س Ùī +ĠاÙĦسÙĬ اس +ĠاÙĦسÙĬاس ÙĬØ© +yd ı +ÙĪØŃØ¯ Ø© +ĠдеÑıÑĤелÑĮ ноÑģÑĤи +Ġ×ķ×Ķ ×ŀ +п еÑĩ +пеÑĩ аÑĤ +иÑĢов аниÑı +ĠÑģ ог +ĠÑģог лаÑģ +Ġ׼ ×ĵ +Ġ׼×ĵ ×IJ×Ļ +ĠиÑģполÑĮзов аÑĤÑĮ +ס פ×ķר×ĺ +Ġil çe +exp érience +ĠTh á»Ŀi +İ K +à¹Ħà¸Ł à¸Łà¹īา +ëĵ¤ ìĹIJê²Į +à¸Ľà¸£à¸° à¹Ģà¸ł +à¸Ľà¸£à¸°à¹Ģà¸ł à¸Ĺ +Ġmü mk +Ġmümk ün +Ġ×IJ×ķת ׳×ķ +ìĦ± ìĿĦ +ĠìĿ´ ìľł +زÙĬ ارة +Ġolduk ça +r ób +ĠØ£ ÙĨا +Ġ×Ķ ×ij×Ļ +Ñģ ен +×¢ ×Ļקר +×Ļ×ĵ ×ķ×¢ +d zÄħ +Ùħ عÙĦÙĪÙħات +Ø´ اب +Ġpar ça +à¸Ļะ à¸Ħะ +ب اس +ĠÑĤоÑĢ Ð³ +ĠÑĤоÑĢг ов +Ġ×Ĺ ×ĵר +׼ ר×ĺ +׼ר×ĺ ×Ļס +ĠA yrıca +ÃªÌ £ +ìľ ¨ +ĠÑĤак ие +Ġ×ŀצ ×ķ×Ļ +ãĥ©ãĥ³ ãĤŃãĥ³ãĤ° +ש×Ļ×ķ ×ķ×§ +åīį ãģ® +ĠB ảo +Ñī Ñĥ +æĹ© ãģı +ĠPh òng +à¸ŀระ ราà¸Ĭ +פ ×Ĺ×ķת +Ġг л +Ġгл аз +à¸Ĺ à¹Īา +Ġd ạy +ÑĢ Ð¾ÑģÑĤ +à¹Ĥà¸Ķย à¹Ģà¸īà¸ŀาะ +Ġqu áºŃn +Ġ×Ĺ×ijר ×ķת +m ême +mÄ±ÅŁ tı +ĠاÙĦت داÙĪÙĦ +Ġn ạn +Ġ×Ķ ×ĵ×Ļ +ĠاÙĦØ· رÙĬÙĤ +×Ĵ ×ķת +Ġ×Ķ ×ĵר×ļ +ujÄħ ce +Ġch ữ +ãĤĤãģ® ãģ® +ë° Ľ +ãģķãĤĵ ãģ¯ +Ġyard ım +ĠاÙĦع Ùħ +Ġì§Ħ íĸī +Ġ×Ļ ×Ĺ +Ġ×Ļ×Ĺ ×¡×Ļ +ĠاÙĦÙħ دÙĬÙĨØ© +Ġc ú +à¸ģี ฬ +à¸ģีฬ า +Ġni ên +mis ión +׳×Ļס ×Ļ +׳×Ļס×Ļ ×ķף +Ġвоз ÑĢаÑģÑĤ +Ġ×¢×ķש ×Ķ +ĠÙħ دÙĬر +Ñı ÑģÑĮ +ØŃ جÙħ +íĻĺ ê²½ +ĠاÙĦØ£ خرÙī +u ÃŁer +ĠاÙĦعاÙĦÙħ ÙĬØ© +ĠNg á»įc +êµIJ íļĮ +ä¸Ĭ ãģ§ +×Ļ×Ķ ×ķ×ĵ +×Ļ×Ķ×ķ×ĵ ×Ļ×Ŀ +Ùħس اعدة +Ġжиз нÑĮ +ĠпоÑĤ омÑĥ +ĠاÙĦÙħ ÙħÙĦ +ĠاÙĦÙħÙħÙĦ ÙĥØ© +ĠG ör +ر ÙIJ +×ŀ×§ ×ķ×ŀ×ķת +åĩºæĿ¥ ãĤĭ +ÑĦ ÑĤ +ĠìĿ´ ìłľ +ĠÑĢ ÐµÐ¼ +ĠÑĢем онÑĤ +ת ×ķ×ļ +æĻĤ ãģ¯ +ãĤīãĤĮ ãģªãģĦ +alt ı +å®¶ ãģ® +ĠاÙĦØ¥ عÙĦاÙħ +리 ëĬĶ +ãģĭãĤī ãģ¯ +ĠH ạ +ãģĤ ãģ® +×ĵ×Ļ ×ķף +رÙĬ س +Ġsoci etÃł +ĠاÙĦÙĥ بÙĬر +Ġ×ij ×ŀס +Ġ×ij×ŀס ×Ĵר +Ġ×ij×ŀס×Ĵר ת +ĠìŀĪ ìľ¼ë©° +Ġn ặng +Ùĩ Ùī +ĠB Ãł +×ŀר ×ķ +Ġj ÄĻ +ĠjÄĻ zy +ĠjÄĻzy k +Ġ׼ ×ŀ×ķ×ijף +×¢ ׾×Ķ +à¸Ĺีà¹Ī à¹Ħà¸Ķà¹ī +ãģ¾ ãģĹãĤĩãģĨ +×ŀס פר +Т Ðŀ +سÙĬاس Ø© +Ġкажд Ñĭй +ë² ł +t ım +y á»ĩn +ร ีà¹Ī +ĠдеÑĤ Ñģк +วิà¸ĺี à¸ģาร +m ówi +×ĺ×¢ ×Ŀ +×Ķצ׾ ×Ĺ×Ķ +ض ÙĬÙģ +ĠÑħоÑĤ Ñı +ãĤĵãģ§ ãģĦãĤĭ +à¸Ħา à¸Ķ +à¸Ħร à¸ļ +Ġк ÑĥÑĢÑģ +ĠbaÅŁ arı +×ijר ×ķ +ÙĬع Ø© +ĠÐĿ Ñĥ +à¸Ħวาม à¹Ģà¸Ľà¹ĩà¸Ļ +Ġ׾ ×ŀש׾ +Ġì¢ĭ ìĿĢ +Ùħؤس س +Ùħؤسس ات +Ġpréc is +Ġth ảo +à¸ģà¹ĩ à¸Ħืà¸Ń +Ġש ׼׾ +führ ung +ãģĦ ãģ§ +à¹ģละ มี +à¸ģà¹ĩ มี +Ġש ש +м ел +Ġкни г +ĠباÙĦ ÙĨ +ĠباÙĦÙĨ سبة +Ġald ı +ÑĤ ай +Ġ×Ĺ×ĵ ש×Ļ×Ŀ +å®Ł ãģ¯ +ع ÙĪØ§ +ĠìĿĺ 미 +из м +ÑĢабоÑĤ аÑĤÑĮ +Ùģ Øµ +Ġ×ij׳ ×ķסף +ãģ¨ãģĹãģ¦ ãĤĤ +à¹Ģà¸Ľà¹ĩà¸Ļ à¸Ĺีà¹Ī +ĠÑģлед ÑĥеÑĤ +èĢĥãģĪ ãģ¦ +Ġ׼ ×Ļ×ķ×Ŀ +ÑģÑĤ Ñĭ +׼׾׼ ׾×Ļ +æµģ ãĤĮ +ãĤĴ ãģ¤ãģij +Ñĩ аÑĤ +×Ļ׼ ×ķף +×Ļר ×Ļ +ları yla +ãĤ¤ ãĥ¡ +ãĤ¤ãĥ¡ ãĥ¼ãĤ¸ +׳×ĸ ×§ +Ġci ò +Ġs ın +Ġsın ır +à¸Ļ à¸Ħร +к аÑĤ +Ġl á»Ĺi +ëŀ Į +تÙģ Ø§Øµ +تÙģØ§Øµ ÙĬÙĦ +ëĨ ĵ +ĠÙħ ض +il miÅŁ +بار Ùĥ +ÐĿ Ðĺ +Ġth ẩm +Ġ×IJ×ķת ×ļ +ĠпÑĢин им +ĠпÑĢиним а +Ġyö nt +Ġyönt em +Ġ×ŀ×§ ×ij׾ +Ġktó rego +ê· Ģ +شر Ùģ +د اÙħ +ãģĦãĤį ãģĦãĤį +ĠAl ém +Ġgör ü +Ġgörü nt +Ġgörünt ü +د س +ÑĪ ÐºÐ¸ +г ÑĢад +Ġl ạc +Ġs ữa +ãĤīãĤĮ ãģ¾ãģĻ +o Ãłi +Ñī ен +ãģĭ ãģªãģĦ +Ġп оп +Ġпоп Ñĥ +ĠпопÑĥ лÑıÑĢ +ĠاÙĦÙħ ÙĪÙĤع +rä g +ï¼ ¡ +íķ Ħ +ãĤĴè¦ĭ ãĤĭ +اÙħ ا +ĠاÙĦØŃ رب +ĠÐŁ а +Ġ׾ ×IJתר +Ġt á»ijc +×ij ׾×Ķ +ر ئÙĬس +в Ñĥ +ÙĬ دÙĬ +каз ан +Ġ׊ש×ij×ķף +h ôtel +×¢ ×ķ׳×Ķ +ب ÙĨÙĬ +×ŀ ×ķ׾ +Ġд нÑı +éĽ£ ãģĹãģĦ +вед ениÑı +Ġ×ķ ×ŀת +н апÑĢимеÑĢ +ÙĤ ابÙĦ +Ġrésult at +ĠÑĢазвиÑĤ иÑı +ر Ùij +ìłĦ 문 +ĠاÙĦÙħ زÙĬد +ĠìľĦ íķ´ìĦľ +ëĨ į +íĻ ķ +ĠThi ết +íĮ ¨ +malı dır +Ġcz ÅĤ +ĠczÅĤ owie +ĠczÅĤowie k +ĠÙĦ بÙĨ +ĠÙĦبÙĨ اÙĨ +üs ü +ãģªãĤĵ ãģł +Ġżyc ie +ĠÑħоÑĢоÑĪ Ð¾ +æĸ¹ ãģ« +ëĭ¤ ë©´ +иÑĩеÑģ каÑı +ער ×Ļ׼ +ער×Ļ׼ ת +ãģ¾ãģĽãĤĵ ãģ§ãģĹãģŁ +ĠÑģоб ой +Ġg á»Ĺ +Ġдел аÑĤÑĮ +da Äĩ +аÑĢ Ð° +róż ni +à¹Ģล ีà¹ī +à¹Ģลีà¹ī ย +à¹Ģลีà¹īย à¸ĩ +à¸Ŀ าà¸ģ +Ġت ÙĤ +ĠتÙĤ دÙĬ +ĠتÙĤدÙĬ Ùħ +หà¸Ļ ุà¹Īม +Ġmü cade +Ġmücade le +ì§Ģ 를 +ãĤ¤ ãĤ¹ +ĠØ£ ساس +jÄħce go +ĠÅŁ eh +н ÑĤеÑĢ +ÑĨи Ñİ +ï» » +ÑİÑī его +à¹Ĥà¸Ľà¸£ à¹ģ +à¹Ĥà¸Ľà¸£à¹ģ à¸ģรม +Ġmie Äĩ +ØŃÙĥÙĪÙħ Ø© +ãģ§ãģĹãģŁ ãģĮ +×Ļס ×Ķ +ãĤĤãģ® ãĤĴ +Ġ×ŀ ×IJת +สุà¸Ķ à¸Ĺà¹īาย +Ġc Å© +ÙĨ سب +ĠпÑĢ Ð¾Ñĩ +Ġд ней +ĠÑįÑĤи Ñħ +׾ ×ŀת +нÑı Ñı +Ñį к +Ġì§Ģ ëĤľ +มหา วิà¸Ĺยา +มหาวิà¸Ĺยา ล +มหาวิà¸Ĺยาล ัย +d ão +ĠMá y +ĠêµŃ ê°Ģ +à¸ļุ รี +×Ĵ ×Ļ׾ +ĠÑĤÑĭ ÑģÑı +ĠÑĤÑĭÑģÑı Ñĩ +Ùģ Ùĥ +ĠÐĺ Ñģ +è¡Į ãĤıãĤĮ +פר ×ĵ +ãģ¤ ãģį +à¸Ħร à¸Ńà¸ļ +à¸Ħรà¸Ńà¸ļ à¸Ħรัว +à¸Ĥึà¹īà¸Ļ มา +ä»ĬæĹ¥ ãģ¯ +ĠìĤ¬ëŀĮ ìĿ´ +עצ ×ŀ×Ķ +п оÑĢ +ĠK ỳ +Ġ Æ¡n +Ġth Äĥm +Ùģ Ø§ÙĤ +ãģļ ãģ« +Ġ׾ קר +Ġ׾קר ×ķ×IJ +اÙģ ÙĬØ© +Ùħ ÙİØ§ +г аÑĢ +ص ÙĦا +صÙĦا Ø© +Ġ×ŀ ×ĸ×Ķ +lı ģını +Ġ×IJ ×Ļ׳×Ķ +к ÑĢо +Ġng ươi +Ġв ним +Ġвним ание +jÄħ cy +ÙĢÙĢÙĢÙĢ ÙĢ +Ñģ Ñħод +ãģªãĤĵ ãģĭ +×ŀ ×Ļ׾ +Ġ×Ķ×IJ ×Ĺ +ãĤı ãģªãģĦ +ع سÙĥر +ĠìĦ¸ ê³Ħ +ĠÑĩ его +ĠÑģÑĢед ÑģÑĤва +ĠÐł аÑģ +ãģª ãģģ +ÙĨ Ù쨳 +ר×Ļ ×ķף +Ñģ Ñĥд +ĠìĿ¸ ê°Ħ +ĠاÙĦÙħ ÙĤبÙĦ +ÙĨ عÙħ +تÙĪ Ù쨱 +ש ×ij×¢ +ı lm +ılm Ä±ÅŁ +Ġ×ľ×ª ת +تص Ùģ +×Ķפ ×ķ×ļ +à¹ĥà¸Ļ à¸Ľà¸µ +ìĿ´ ê³ł +Ùģ ÙĪØ² +à¸ľà¸¥ à¸ĩาà¸Ļ +ĠGi áo +à¸ļà¸Ńà¸ģ วà¹Īา +Ġd Ä±ÅŁ +ĠdÄ±ÅŁ ında +ì£ ½ +Ġdzie ÅĦ +к ÑĨии +и ÑĨе +ãģ® ä¸Ģ +ع Ø´ +пÑĢ ÐµÑģÑģ +หà¸Ļ à¹Īà¸Ńย +ลัà¸ģษ à¸ĵะ +Ġpossibilit Ãł +à¹Ħà¸Ķà¹īรัà¸ļ à¸ģาร +หย ุà¸Ķ +Ġphi ên +çĶŁ ãģ¾ãĤĮ +Ø· ÙĪÙĦ +ÑĦ ин +f ür +ØŃ ÙĬاة +íĸ ĪìĬµëĭĪëĭ¤ +׼ ׳×ķת +à¸Ľà¸£à¸° ส +à¸Ľà¸£à¸°à¸ª à¸ļ +à¸Ľà¸£à¸°à¸ªà¸ļ à¸ģารà¸ĵà¹Į +ëIJĺ ìĹĪ +Ġkaż dy +Ġl uyá»ĩn +ĠоÑĢганиз аÑĨии +å°ij ãģªãģı +ÑģÑĤÑĢо ен +Ġtécn ico +×§ ×Ķ׾ +Ġ×ķ×IJ ×Ĺ +ĠعÙĦÙĬ Ùĥ +Ñī ение +Ġ×Ķ ×Ļ׾×ĵ×Ļ×Ŀ +ÙĪØ³ ائÙĦ +Ġ×ķ ×Ķת +تÙħ ÙĬز +ĠÑģ казал +Ġпол и +Ġ×Ķ×ŀ ס +ÙĦÙij Ùİ +Ùħؤس سة +Ġ×ŀ ×Ļ×ĵ +ãģ£ ãģ¡ +ĠëĦĪ ë¬´ +à¸ŀ ี +Ġt ặng +Ġt ấn +ר ש×Ŀ +Ġméd ica +Ġ×¢ ×ķ×ŀ +Ġ×¢×ķ×ŀ ×ĵ +ÑĦ оÑĢ +Ùħر Ø© +Ġvat anda +Ġvatanda ÅŁ +Ġдел о +à¸Ļ ม +ãģ¨ åIJĮãģĺ +Ùģ Ùī +Ñģ оÑĢ +Ġ×Ķס ר×ĺ +Ġép oca +ìłķ ì±ħ +ĠÑģвÑıз ан +ض رب +ĠÙĦ ÙĨا +Ġuży wa +ĠاÙĦج ÙĬØ´ +Ñİ ÑĢ +×ijס ×ķ×£ +Ġм Ñĥ +ĠмÑĥ зÑĭк +bilit é +Ġma ç +س Ùİ +ت ÙĦÙĥ +ãģ ¬ +ÙĬ ÙĦا +ÑĪ Ð»Ð° +ÙĢÙĢ ÙĢ +Ġод ной +зв ан +ĠÑģ ÑĢаз +ĠÑģÑĢаз Ñĥ +ÙĨ ظÙħ +را Ùĩ +ĠÙĦÙĩ ذا +׼ ×ķר +Ġ×Ķש ×ij×ķ×¢ +Ġ×Ķש ת +ĠQu ảng +ãĥ« ãĥ¼ +ãģĪ ãģªãģĦ +×ĺ ×IJ +Ġmi á»ģn +ĠPh áºŃt +ĠاÙĦس ÙĪÙĤ +Ä Ĥ +ĠاÙĦج Ùħع +ĠاÙĦجÙħع Ø© +ÑİÑī ей +a ÅĤem +عت ÙĤد +Ø£ ÙĦÙħ +Ñģ ке +ĠìĿ´ íķ´ +ÙĨس Ø® +è¨Ģ ãģĦ +д обав +سب ÙĤ +×¢×ķר ר +ÑĤи п +ãģĿãģĵ ãģ§ +vis ión +عÙĪØ¯ Ø© +ë¨ ¹ +×ŀ ×ĸר×Ĺ +ĠØ¥ ØŃ +Ġ׾×ij ×Ļף +Ġ׾צ ×IJת +Ġyard ı +Ġyardı mc +Ġyardımc ı +İ Z +×§ פ×Ķ +tr é +liÄŁ ini +клÑİÑĩ а +Ġüret im +Ġa yrı +ĠkiÅŁ iler +à¸Ħ à¹īà¸Ļ +à¸Ħà¹īà¸Ļ หา +ĠS á»± +Ġ׼ ס +Ġ×Ľ×¡ ×£ +ĠÑĤак иÑħ +ĠXu ân +Ġл ег +Ġлег ко +Ø«ÙĤ اÙ쨩 +ÐĿ Ðŀ +ãĤ¹ãĤ¿ ãĥĥ +ãĤ¹ãĤ¿ãĥĥ ãĥķ +åIJĪ ãģĦ +Ġ×Ķש ×Ļ×ŀ×ķש +man ız +ĠÐĴ аÑģ +g ün +ìľĦìĽIJ íļĮ +Ġwsp óln +ĠÑģв ое +í ĥģ +à¹Ģà¸Ļ ีย +ÙĪØ¨ Ø© +в Ñıз +ı dır +ëIJĺ ìĹĪëĭ¤ +ĠdeÄŁi ÅŁtir +ãĤĭ ãģĵãģ¨ãģĮ +Ġ×Ĺ×ĵ ש×Ķ +ãĤīãĤĮ ãģ¦ãģĦãĤĭ +×Ĺ×Ļ ×Ļ×ij +ĠÐļ аÑĢ +׳×Ļת ×ķ×Ĺ +Ġ×§×ĺ ף +ר ×ĸ +ÙĪ Øº +èªŃ ãģ¿ +Ġت ÙĤÙĪÙħ +ĠÙĥ اÙĦ +à¸Ŀ ึà¸ģ +Ġë°ľ ìĥĿ +ológ ico +ر اع +à¹ģà¸ģà¹ī à¹Ħà¸Ĥ +ĠÑĢабоÑĤ Ñĥ +ÙĨÙij Ùİ +à¸Ńยูà¹Ī à¸Ĺีà¹Ī +ĠاÙĦØ« اÙĨÙĬØ© +ĠNh ân +Ñħ ваÑĤ +ö ne +Ġع دة +à¹ģ สà¸ĩ +ÑĤ оп +пÑĥÑģ ка +شر اء +ĠÐļ ом +Ġפע ×ķ׾×Ķ +ìĤ¬ ìĿ´ +ìĤ¬ìĿ´ íĬ¸ +è¡Į ãģ£ãģ¦ +Ġ×Ķ ×Ķת +ĠÑģÑĤ оÑĢо +ĠÑģÑĤоÑĢо нÑĭ +در س +à¸ĭ ู +à¸ķà¹Ī ำ +ĠØ£ بÙĬ +под об +ãģ« ãģ¦ +ار تÙģØ§Ø¹ +ĠÙħ ؤ +ик ов +ge führt +มืà¸Ń à¸ĸืà¸Ń +ĠÙĦ ÙĤد +ĠØ£ÙĨ Ùij +سÙĬ طر +ãģ¾ãģļ ãģ¯ +ס ×ĵ +Ñģк олÑĮко +ãģ¿ãģŁãģĦ ãģª +×ĵר ×Ĵ +×¢ ×Ļ×ĵ +à¹ĥหà¹ī à¸ļริà¸ģาร +ĠÐĶ Ð¸ +×ij×¢ ×Ļ×ķת +Ġ×Ķ×Ĺ ×ķ +пиÑģ ÑĮ +ĠاÙĦØ® ÙĦ +б ав +Ġİ lk +ĠاÙĦØ® Ùħ +ĠاÙĦØ®Ùħ ÙĬس +ĠÙĬ ÙĤÙĪÙħ +æĻĤ ãģ® +ĠsÅĤ ow +ĠØ£ ÙĩÙħ +Ø®ÙĦ ÙĤ +ĠØ£ صبØŃ +Ġchứ a +Ġth ác +Ùģ Ø§ÙĦ +Ġch á»Ŀ +ĠاÙĦØ® ار +ĠاÙĦخار ج +ĠاÙĦخارج ÙĬØ© +Ø· ائر +Ġt Ãł +ĠtÃł u +à¸ģล à¹īà¸Ńà¸ĩ +ĠاÙĦÙħر Ø£ +ĠاÙĦÙħرأ Ø© +åħ¨ ãģı +ĠÃĸ n +çļĦ ãģ«ãģ¯ +Ġpiè ce +×Ĵ ×Ļ×ij +ĠاÙĦ ÙĪØ§ÙĤع +ä»Ĭ ãģ® +ĠاÙĦÙħ ÙĤ +cz nÄħ +Ù쨹 اÙĦ +ен ного +ĠÑĦак ÑĤ +ìĭł ì²Ń +ĠÐŀ ни +ĠاÙĦبÙĦ اد +ов иÑĩ +ëı Į +ÑĦ ÑĥнкÑĨи +Ġìĸ´ ëĬIJ +ãĥķãĤ© ãĥ¼ +d ÃŃ +ил оÑģÑĮ +Ùħ Ùī +ĠاÙĦØ£ÙħرÙĬ Ùĥ +ĠاÙĦØ£ÙħرÙĬÙĥ ÙĬØ© +×ĺ ×Ļפ×ķ׾ +íĶĦ ë¡ľê·¸ +íĶĦë¡ľê·¸ ëŀ¨ +Ġש ×ķ׳×ķת +Ø´ ÙħÙĦ +ĠпаÑĢ Ð° +Ġ×Ķ×Ĺ ×ķ×§ +ÙĪØ² ارة +ãģ¨ ãģĻãĤĭ +Ġqu ảng +ĠaÄŁ ır +ĠاÙĦÙĦ ج +ĠاÙĦÙĦج ÙĨØ© +ê¸ ´ +ĠT ân +ج ÙħÙĦ +д ол +à¹ģà¸ŀ à¸Ĺย +à¹ģà¸ŀà¸Ĺย à¹Į +Ġר×IJ ש×Ļ +Ñī ей +Ġçev re +Ġкомп лекÑģ +Ġ×ij ×ŀש×ļ +Ġalt ın +ĠØ£ عÙħاÙĦ +ĠÑģво его +ãĤĪ ãģĦ +×Ĺ׾ ×Ļ×ĺ +×ŀ׳ ×¢ +Ġר ×ij×Ķ +ĠØ£ÙĬضا Ùĭ +×ĸ ׾ +ĠاÙĦسÙĬ اسÙĬ +æĢĿ ãģĨ +קר ×§ +קרק ×¢ +ĠاÙĦÙģ Ø±ÙĬÙĤ +б иÑĤ +×§ ׳×Ķ +ĠØ¥ ÙĨÙĩ +ĠÐĴ ам +Ðł Ðŀ +ãĥĪ ãĥª +å¿ħè¦ģ ãģª +Ġch âu +ç¶ļ ãģij +Ġçöz üm +gÅĤ ow +ع ÙĤÙĦ +売 ãĤĭ +i ết +à¸Ĭิ à¹īà¸Ļ +ĠØŃÙĤ ÙĪÙĤ +Ø·ÙĦ ع +ĠÄij en +ĠÙĥ اÙ쨩 +ãģ® ãģĶ +Ġë ¬ +Ġë¬ ¼ +Ġ물 ë¡ł +Ġرس ÙĪÙĦ +з ам +зам ен +Ġkullan ıcı +×¢ ×ķ׾ +èī² ãĢħ +ÑĪи ÑĢ +Ġ׊ש +Ġwy gl +Ġwygl Äħda +ש ×Ļ×ŀ×ķש +å¿ĺ ãĤĮ +×¢ ×Ļצ×ķ×ij +ĠاÙĦس ÙĪØ±ÙĬ +å°ij ãģªãģĦ +Ġпо иÑģк +สำ à¸Ļัà¸ģà¸ĩาà¸Ļ +Ġ×ŀצ ×ĵ +Ġmü ÅŁ +ĠmÃ¼ÅŁ ter +ĠmÃ¼ÅŁter i +ĠÙħÙĨ ÙĩÙħ +à¸ķำ à¹ģ +à¸ķำà¹ģ หà¸Ļ +à¸ķำà¹ģหà¸Ļ à¹Īà¸ĩ +ÅĽ mie +Ġש ×ł×ª +Ġ×Ķ ×¤×Ļ +פר ש +×¢×ijר ×Ļת +สà¸Ļ ัà¸ļ +สà¸Ļัà¸ļ สà¸Ļุ +สà¸Ļัà¸ļสà¸Ļุ à¸Ļ +è¨Ģ ãģ£ãģ¦ +à¸ģาร à¸Īัà¸Ķ +ĠMo że +из аÑĨии +ứ t +ĠÙĪØ¨ عد +ĠdeÄŁ ild +ĠdeÄŁild ir +Ġת ×ŀ +Ġ×ŀ×ŀ ׳×ķ +話 ãĤĴ +ĠÑĨ ена +Ġth úc +×Ļ×ŀ ×ķף +ĠB áo +ãĤĴ åıĸãĤĬ +å®ī ãģĦ +Ġ×¢×ķש ×Ļ×Ŀ +èĩªåĪĨ ãģĮ +l ée +ãĤĭ ãģ®ãģ§ +иÑĢÑĥ еÑĤ +ãģ¦ ãĤĭ +ست ر +ĠاÙĦØŃ ÙĬ +×Ļ׾ ×ķת +Ġ×Ĺ ×ij +ÙĤر Ø£ +تÙħ ÙĥÙĨ +س ائÙĦ +prü f +ãģĭ ãģijãģ¦ +ĠÑģоб ÑģÑĤвенно +ĠìľĦ íķĺìŬ +׾ ×Ļ×ĺ +ãģĮ å¤ļãģı +ÙĬت Ùĩا +ç«ĭ ãģ¦ +ม à¸Ńà¸ļ +ìĭľ ìŀ¥ +оÑĢ Ð° +Ġs avaÅŁ +×ĺ×Ļ×ij ×Ļ +×ij ׳×ķ +Ùħا ذا +기 ê°Ħ +ãģªãģ© ãģ§ +Ġ×ŀ ת×Ĺ×Ļ׾ +Ġnhi á»ħ +Ġnhiá»ħ m +ка ÑĢ +каÑĢ ÑĤ +Ġ׾×Ķ ×©×ª×ŀש +׳ ×Ļ×Ĺ +اد ÙĬØ© +ราย à¸ĩาà¸Ļ +Ġprzy kÅĤad +Ñī ий +ØŃض ÙĪØ± +Ġh ôn +à Ŀ +ת ×ķצ×IJ×ķת +راب Ø· +Ġb ếp +ĠполÑĥÑĩ и +åĩºä¼ļãģĦ ç³» +à¸Ľà¸¥ à¹Īà¸Ńย +ĠاÙĦØ´ باب +اÙĩ ÙĦ +ä»Ĭ ãģ¾ãģ§ +رج ع +ãĤ¶ ãĥ¼ +ÙĤ Ùģ +ĠGro ÃŁ +ĠíļĮ ìĽIJ +اج ر +Ġ×ij×ŀ קר×Ķ +Ġseg urança +fü hl +ãģ¦ ãģĦãģı +หม à¸Ń +ĠкоÑĤоÑĢ Ð¾Ð¼ +ĠN Äĥm +ĠdÅĤ ugo +ÙħÙĨ ØŃ +ש×ķ ×ķ×Ļ +ĠØ£ÙĬ اÙħ +ส à¸łà¸²à¸ŀ +r zÄħ +شر Ùĥات +ãĤĴ èĢĥãģĪ +д аÑĢ +à¸Ľà¸£à¸° à¸Ĭุม +Ġ×ķ×IJ ×ĸ +i á»ĩn +Ġt ươi +ש ×Ļ×Ĺ +à¸Ń à¹Īà¸Ńà¸Ļ +æĽ¸ ãģĦãģ¦ +Ġng ữ +×ij×Ļ×ĺ ×Ĺ +×ij×Ļ×ĺ×Ĺ ×ķף +Ġs ẵ +Ġsẵ n +ì§Ģ ëıĦ +ĠпÑĢ ÐµÐ¿ +ĠпÑĢеп аÑĢаÑĤ +Ġна ÑĥÑĩ +ĠÃľ nivers +ĠÃľnivers ites +ĠÃľniversites i +Ġ×Ĵ×ĵ ×ķ׾×Ķ +Ġ×Ķ ×ł×ª +Ġ×Ķ×ł×ª ×ij×¢ +ãģ§ãģĤ ãģ£ãģŁ +Ġmies iÄħ +ĠmiesiÄħ c +г ÑĢам +гÑĢам м +Ġبش Ø£ÙĨ +ĠÑħ ÑĢ +×§ ×Ļ×ĵ +×§×Ļ×ĵ ×ķ×Ŀ +Ø´ Ùĥر +Ġ á»ķ +Ġá»ķ n +ãģĮãģĤ ãģ£ãģ¦ +ãģķãĤĮ ãģ¾ãģĻ +Ġ×Ĺ ×ķ×ĵ +Ġ×Ĺ×ķ×ĵ ש×Ļ×Ŀ +ÙħÙĪØ§ جÙĩ +ÙħÙĪØ§Ø¬Ùĩ Ø© +أش خاص +ب غ +à¹Ģรียà¸Ļ รูà¹ī +ãģĹãģ¦ ãģĦãģı +Ġs ạn +å¿ħ ãģļ +׳ ×Ļ×Ĵ +׳×Ļ×Ĵ ×ķ×ĵ +باÙĦ غ +׊ש×ŀ +×Ĺש×ŀ ׾ +Ġnap raw +Ġnapraw dÄĻ +Ø´Ùĩ اد +×IJ ×ķ×Ķ +×IJ×ķ×Ķ ×ij +и ÑĨÑĭ +Ġ×Ķ ×¨×Ľ×ij +ëŀ ij +Ġת ×¢ +Ġ×Ķ ×Ļש +Ġ×Ķ×Ļש ר×IJ +Ġ×Ķ×Ļשר×IJ ׾×Ļ +Ø£ ÙħÙĨ +ÑİÑī аÑı +sk ór +LER İ +Ġ×Ķ×IJ×Ĺר ×ķף +×¢ ׳ק +ĠÙĪ ÙĥÙĦ +ãģĵãģĵ ãģ§ +Ġqu án +liÄŁ in +à¸ģà¸İ หมาย +Ø· Ùħ +Ø£ جÙĩ +أجÙĩ زة +ĠEr doÄŁan +ãģ§ ãģĬ +Ġв ÑĢа +ĠвÑĢа Ñĩ +ĠPh ó +à¸Ĭั à¹Īว +à¸Ĭัà¹Īว à¹Ĥม +à¸Ĭัà¹Īวà¹Ĥม à¸ĩ +Ġph úc +×Ļפ ×ķת +×¢×Ļ ×ķף +Ġduż o +ãĥģ ãĥ¼ãĥł +ĠÙĬ Ùİ +Ġзад аÑĩ +Ġ×Ĵ×ij×ķ×Ķ ×Ķ +Ġ׼ ׼׾ +лож ен +ét at +Ġng Äĥn +èµ· ãģį +ĠTi ến +ص عب +Ġexperi ência +Ø® Ùħ +à¸ģาร à¸Ĺำà¸ĩาà¸Ļ +س ÙĬد +ĠD á»± +ĠкоÑĤоÑĢ Ð¾Ð³Ð¾ +lad ıģı +Ġkh á»ķ +Ġê³Ħ ìĨį +Ñī ик +สà¹Īวà¸Ļ à¸ķัว +з оÑĢ +ÙĨ Ùı +Ġ à¸Ķัà¸ĩ +Ġà¸Ķัà¸ĩ à¸Ļัà¹īà¸Ļ +Ġc ấu +ĠÄij á»ijc +о ÑĦ +ĠاÙĦØ£ عÙħاÙĦ +ãģªãģı ãģ¦ãĤĤ +×ķ׼ ×Ļ×Ŀ +à¹ģ à¸Ľ +ĠB ên +ãĥ¯ ãĥ³ +Ġgi ám +ĠÅŀ u +Ġd áng +ع ÙĦÙĬ +à¹Ģà¸ģ ษ +à¹Ģà¸ģษ à¸ķร +ÙĪØ¬ ب +н нÑĭе +ÙĤ ضاء +à¸Ħว à¸ļ +à¸Ħวà¸ļ à¸Ħุ +à¸Ħวà¸ļà¸Ħุ ม +ãģ¤ ãģ¤ +ĠVi á»ĩc +×ŀ×ij ×ĺ +ש×Ļת ×ķ×£ +Ġв едÑĮ +k aza +kaza ÅĤ +à¸ķำ รวà¸Ī +ãĤ¿ ãĥ« +Ġпов Ñĭ +ĠповÑĭ ÑĪен +ĠS ợ +ĠìĦ¤ ëªħ +ĠÃĩ ünkü +ìĥĿ íĻľ +Ö ¾ +ãĤĮ ãģ¦ãģĦãĤĭ +Ġ×ij ר×IJש +ר ×ķ×Ĵ +Ġо ÑĦи +ĠоÑĦи ÑĨиалÑĮн +ĠÑĥ ÑģÑĤанов +ĠÑĥÑģÑĤанов лен +ĠاÙĦÙħ صر +ĠاÙĦÙħصر ÙĬØ© +ĠÐŁÐ¾ ÑįÑĤомÑĥ +ÙĨ صÙģ +ĠÙĪØ§ÙĦ ÙĨ +Ġh Ãłi +à¸Ħ ิ +ĠApr ès +ì³ IJ +à¹Ģà¸ĭ ีย +×ĵ ×ŀ×Ķ +activ ité +à¸Ħิà¸Ķ วà¹Īา +ÑĤ ÑĢен +à¹Ģ ฮ +ãĥı ãĤ¤ +ãģĮ å¢ĹãģĪ +ен наÑı +Ġìĺ¤ ëĬĺ +ãĥ¢ ãĥ³ +Ġкон еÑĩно +ĠÙħÙĤ ابÙĦ +cl é +Ġh ü +Ġth ẳng +ìłģ ìĿ´ +ĠÐIJ лекÑģ +ĠÐIJлекÑģ ан +ĠÐIJлекÑģан дÑĢ +ãĥŀãĥ³ ãĤ·ãĥ§ãĥ³ +ãģ²ãģ¨ ãģ¤ +ãģª ãģĬ +à¹Ģà¸Īà¹īา à¸Ĥà¸Ńà¸ĩ +ëĵľ 리 +Ø´ اء +ĠsaÄŁ lık +ĠÅŁ imdi +×Ļ×IJ ׾ +تأ Ø«ÙĬر +Ø£ سب +أسب اب +ĠвÑĭполн ен +л ок +ש ×Ļ×ij×Ķ +Ġl ắm +ĠTr Æ°á»Ľc +Ġ×Ķ×¢ ׾ +리 를 +ĠÑĢ ÐµÐ¶ +ĠÑĢеж им +int é +inté gr +×Ĵ ׳×Ļ +ĠاÙĦØ´ عر +Ġmil hões +Ġpeque ño +ãĤ³ ãĥ¼ãĤ¹ +×ķ׼ ×Ĺ +à¹Ģà¸Ĭ à¹īา +شر ÙĤ +Ġh ương +รัà¸IJ à¸ļาล +à¸ģล าย +à¸ģลาย à¹Ģà¸Ľà¹ĩà¸Ļ +Ġпод Ñħод +תש ×ķ×ij×Ķ +ãģıãģª ãģ£ãģ¦ +ĠاÙĦØ£Ùħ Ùħ +ĠH á»įc +ĠwspóÅĤ pr +ĠwspóÅĤpr ac +Ñĩ Ñĥв +ÑĩÑĥв ÑģÑĤв +ÃŃst ico +à¹Ģà¸ģ าะ +ìĽ Ģ +Ġназ ад +ãĤĭ ãĤĪãģĨãģ« +ĠС Ш +ĠСШ ÐIJ +м он +ĠAs ÃŃ +×ķר ×Ĵ +полн ен +×ŀס ׾ +×ŀ×¡×ľ ×ķ׾ +à¹Ģลืà¸Ń à¸Ķ +à¹Ģริà¹Īม à¸ķà¹īà¸Ļ +ĠاÙĦØ¥ Ùħ +ĠاÙĦØ¥Ùħ ارات +צ×Ķ ×¨ +ãĥ¡ãĥª ãĥĥãĥĪ +ĠпоÑĤ ом +в из +ĠÙģ ØªØ±Ø© +å¾Į ãģ® +ÐĿ ÐIJ +×ŀס ר +ÙĬر ÙĬ +pr é +Ġte ÅŁek +ĠteÅŁek kür +Ġöd eme +د اÙĨ +ãģ¾ ãģĹãģ¦ +缮 ãģ« +ĠÑĤ еÑĩение +l ard +lard ır +à¹Ģรา à¸Īะ +ס פ×Ļ +ĠÙĪÙĥ ذÙĦÙĥ +Ġh át +Ġt á»Ļc +à¸Ħุ ย +Ġb ức +ØŃ ÙĬÙĨ +èģŀ ãģĦãģ¦ +Ùħؤ شر +ĠNh ư +Ġмен ее +ละ à¸Ħร +Ñģ ин +ĠÑĢ ÐµÐº +ĠÑĢек л +ĠÑĢекл ам +ĠÙģ ÙĩÙĪ +Ġ׾ ×ĸ +×Ļ׳ ×ķת +ĠÅŁ art +ÑģÑĤав ка +Ġíı¬ íķ¨ +ãģ«è¡Į ãģı +ï¼ Ŀ +ĠпозволÑı еÑĤ +Ġת×ķ׼ ׾×ķ +ов ал +صÙĦ Ø© +Ġ׾ש ׳×ķת +ĠÐĺ гÑĢ +ÙħÙĨتج ات +Ġsat Ä±ÅŁ +Ñģ ко +ĠاÙĦØ«ÙĦاث اء +Ġ×Ķ×ĵ×ijר ×Ļ×Ŀ +ãģĹãģ¾ ãģĹãĤĩãģĨ +بÙĤ Ùī +åĬĽ ãĤĴ +ĠÃĩ ok +ãĥģ ãĥ¥ +à¹Ģà¸Ĭ ืà¹īà¸Ń +ยุ à¸Ħ +ศา ล +Ġ×§×ķ×ĵ ×Ŀ +×ĸר ×Ļ×Ŀ +ãģ® åł´åIJĪ +ĠìķĬ ìķĺ +ãģĤãĤĬãģ¾ãģĻ ãģĮ +×IJ שר +è¡Į ãģı +ãģ» ãģĭ +æ°Ĺ ãģ«ãģªãĤĭ +й деÑĤ +íķĺìĺĢ ëĭ¤ +ستÙħر ار +ĠÐŁÑĢ Ðµ +ĠÑģ боÑĢ +ĠìķĦ 무 +ç§ģ ãĤĤ +ع ص +Ġн иÑĩ +ĠниÑĩ его +ĠпÑĢи ем +×§ ×ķ×ŀ +ĠìĪĺ ëıĦ +Ġì ¡´ +Ġì¡´ ìŀ¬ +ĠØ£ Ø«ÙĨ +ĠأثÙĨ اء +ĠÙĪØ§ÙĦ ØŃ +ãģĮ ãģ§ãģįãĤĭ +Ġת ×Ķ +Ġת×Ķ ×Ļ×Ķ +ר ף +ĠÑģвÑıз и +×Ĵ שת +Ñģп екÑĤ +ס ×ij×Ļ×ij +ס×ij×Ļ×ij ×Ķ +ĠíķĦìļĶ íķľ +ت خصص +Ġж ив +Ġжив оÑĤ +ĠMay ıs +تع ا +تعا ÙĪÙĨ +ĠعÙĨ Ùĩا +ów ki +ĠاÙĦÙģÙĦسطÙĬÙĨ ÙĬ +ãģłãģijãģ§ ãģªãģı +ìĿ¸ ì§Ģ +ĠاÙĦس ÙĪØ¯ +ĠاÙĦسÙĪØ¯ اÙĨ +إجراء ات +Ġkö tü +Ġ×Ļ ×ª×¨ +×Ĵ ×Ļש×Ķ +Ġצ ×ķר×ļ +รà¸ĸ ย +รà¸ĸย à¸Ļà¸ķà¹Į +Ñħ оÑĤ +Ðł ÐIJ +ÙĪ Ø·ÙĨ +Ġsay ısı +ס ×Ĺר +Ùħ ÙĪÙĦ +ãĤĴæĮģ ãģ£ãģ¦ +ع اÙĨ +Ġt á»Ļi +ĠвÑĭ ÑĪе +Ġt ầm +ãĥĪ ãĥ¬ +×Ļצ ×ķ +ม ุม +س ÙĪØ¯ +ìłĦ ìŀIJ +ãĤµ ãĥŃãĥ³ +ìĤ° ìĹħ +ĠоÑģнов ан +Ø® Ù쨶 +רצ ×Ķ +بÙĬ ض +×ķÖ ¹ +ס×Ļ ×Ļ×¢ +Ġש ×IJ×Ļ +ĠاÙĦÙĤر Ø¢ÙĨ +ĠТак же +×ŀש ×ŀ×¢×ķת +س ÙĩÙĦ +Ġ×Ķ ×ł×Ķ +ãĤĴ ãģĹãģ¦ãģĦãĤĭ +×Ļ ×Ļס +×Ķ ×ķ×IJ +ĠB ÃŃ +Ġмал о +ĠëͰëĿ¼ ìĦľ +Ġר ×Ĺ×ij +ãģĮ é«ĺãģĦ +ÙĪ Ø§Ø³ +ìĤ ¼ +׳ ×¢ +ãģ£ ãģ¡ãĤĥ +ĠT üm +à¸Ńีà¸ģ à¸Ķà¹īวย +ãģĹãģ¦ ãģıãģłãģķãģĦ +ÙĨØ´ اط +ãĥĹ ãĥ©ãĥ³ +али ÑģÑĮ +×ĵ ×ľ×ª +Ġwc zeÅĽ +ĠwczeÅĽ niej +ĠÑįÑĤ им +Ġthá»ĭ t +à¸ļ ัà¸į +à¸ļัà¸į à¸Ĭี +ãģļ ãģ£ãģ¨ +ÑĢ Ð¸Ð½ +Ġswo jÄħ +íķĺëĬĶ ëį° +Ġë§Įëĵ¤ ìĸ´ +تش Ùĥ +تشÙĥ ÙĬÙĦ +ائ Ùĩ +Ġ׾פ ×Ĺ×ķת +ãĥĭ ãĥ¥ +ãĥĭãĥ¥ ãĥ¼ãĤ¹ +׼×IJ ף +ãģ§ãģį ãģŁ +зв он +Ġsta ÅĤ +×Ĺ×ijר ת×Ļ +ĠØ£ عÙĦÙĨ +à¹ģà¸ļà¸ļ à¸Ļีà¹ī +بد Ø¡ +ãĤģ ãģŁ +Ġ×ŀש ×ŀ×¢×ķת +Ġ×ŀש×ŀ×¢×ķת ×Ļ +ör ü +Ġh ạnh +z ähl +ĠL ý +Ġ×ij ×Ķת +Ġ×ij×Ķת ×IJ×Ŀ +б аÑĢ +ì¦ Ī +ä»ĬåĽŀ ãģ® +Ġy ü +Ġyü ks +Ġyüks el +ãĤ½ ãĥ¼ +ãģĤ ãĤĮ +ת ׾×ŀ×Ļ×ĵ +ãģ¤ ãģª +×ij ׳×Ļ×Ŀ +Ġx ếp +ĠмÑĥж Ñĩин +ĠاÙĦÙĥ تاب +׼ ×ŀ×ķת +Ġç e +Ġçe ÅŁ +ĠçeÅŁ it +ĠçeÅŁit li +×ĵ ×Ļר×ķת +à¸ļุ à¸į +ĠاÙĦØ¥ ÙĦÙĥ +ĠاÙĦØ¥ÙĦÙĥ ترÙĪ +ĠاÙĦØ¥ÙĦÙĥترÙĪ ÙĨÙĬ +ĠباÙĦØ¥ ض +ĠباÙĦإض اÙ쨩 +Ġyö nel +Ġyönel ik +mys ÅĤ +à¸Ķà¹īวย à¸ģาร +à¸ģาร à¸Ĺำ +ов Ñĭм +Ø£ زÙħØ© +æİ¢ ãģĹ +íļ ¨ +Ġ×ķ×IJ ×Ŀ +Ġnghi êm +ÑĪ Ð¸Ð½ +ка л +Ġcrian ças +èĩªåĪĨ ãģ§ +Ġн ай +Ġнай ÑĤи +ĠS á»ij +ĠÃ¶ÄŁrenc iler +ãĥ¶ æľĪ +Ñģ ан +ĠJ á +ĠkonuÅŁ ma +شر Ø· +ëĪ Ī +ar rière +ضر ÙĪØ±Ø© +ãĥĶ ãĥ³ +×¢ שר +аÑĢ ÑĮ +جÙħ اع +Ġdé co +Ġ×Ļ×Ķ ×ķ×ĵ×Ļ +à¸ŀ ลาà¸Ķ +ĠÙĬ ÙĥÙĨ +Ġج اÙħعة +Ø· بÙĤ +Ġbo ÅŁ +×ķ ×ķ×IJ +×ŀ×ĵ ×¢ +×§×ij×ķצ ת +פ ×Ļר +jÄħc ym +ÙħØ´ ا +Ùħشا ÙĥÙĦ +צ פ×ķף +Ø¥ ست +×ŀ׼ ר +سÙħ ع +Ġкак ой +ÑĤ воÑĢ +ØŃ ج +Ù쨱 ض +пÑĢав лен +Ġник ак +Ġmi á»ĩ +Ġmiá»ĩ ng +ü ÃŁ +иÑĢов ал +׾ ×ŀ×ķת +次 ãģ® +ÙĦ Ø· +à¸ķ ัà¸Ļ +×Ķ ×ª×Ĺ×Ļ׾ +Ġfoto ÄŁ +ĠfotoÄŁ raf +طر ØŃ +à¸Ńà¸Ńà¸ģ à¹Ħà¸Ľ +Ġy ên +Ġп ок +Ġпок Ñĥп +ĠпокÑĥп а +ÑĨ Ñĥ +Ġкомп ÑĮÑİ +ĠкомпÑĮÑİ ÑĤеÑĢ +ĠاÙĦÙĥ رÙĬÙħ +تص Ùħ +تصÙħ ÙĬÙħ +Ġоказ а +Ġzar ówn +Ġzarówn o +ëĮĢ ì¶ľ +ãĤ»ãĥ³ ãĤ¿ãĥ¼ +Ġjako ÅĽci +æĤ © +æĤ© ãģ¿ +Ø£ÙĨ ÙĪ +Ø£ÙĨÙĪ Ø§Ø¹ +ë¹ ł +Ġìłķ ë§IJ +Ġk ẻ +ĠÑģай ÑĤа +Ġ×Ķ ×¢×¨×ij +Ùĩ ز +pres ión +ĠÑģÑĤ ен +ãģ£ãģ¦ ãĤĭ +Ġhız lı +Ðļ ÐIJ +×ŀשפ ×Ĺת +ĠÙĨ Ùĩا +ĠÙĨÙĩا ÙĬØ© +ãģ¾ ãģĦ +о ÑħÑĢан +ร à¹īà¸Ńย +ล ึà¸ģ +ĠÙĪØ¨ اÙĦ +ãĤĤãģ® ãģĮ +ר׼ ×Ļ×ij +ãĤ¤ ãĥ¤ +س ؤ +سؤ اÙĦ +ĠÙĦØ£ÙĨ Ùĩ +ĠkonuÅŁ tu +Ðļ ÑĥпиÑĤÑĮ +Ġש×IJת ×Ķ +ĠÙĪØ§ÙĦ س +Ġmożliwo ÅĽci +Ġpró b +ëĶ ° +ãģ© ãĤĮ +ĠÐľ ин +ĠоÑĢганиз м +ãģ«å¯¾ ãģĻãĤĭ +ĠPr é +Ġpriv é +ch è +ãģĦãģŁãģł ãģį +สà¸Ļุ à¸ģ +ajÄħ ce +ĠD zi +ĠDzi ÄĻki +ÅĤat w +r än +rän k +æĿ¥ ãģŁ +Ġ×Ķ×Ļ×Ķ ×ķ×ĵ×Ļ +ãĤ¬ ãĥ¼ +ĠÑĢаР´ +ĠÑĢад и +к ÑĤив +Ø£ Ùĩد +Ø£Ùĩد اÙģ +ש ×IJ×Ļר +ãģ¦ ãģĦãģªãģĦ +Ġfr üh +Ġок ол +Ġокол о +Ġreg ião +ĠÑĩиÑģ ле +Ġpon iew +Ġponiew aż +ìĦ¼ íĦ° +Ġb ầu +Ġê · +Ġê· ľ +Ġê·ľ ìłķ +ĠH òa +ĠÑĤ оÑĤ +ãĤĤ å¤ļãģĦ +ĠاÙĦإسÙĦاÙħ ÙĬØ© +ãģĭ ãģĦ +Ñį н +ĠÑĥказ ан +ĠÑĤак ое +ï¼ ³ +ëĮĢ íķĻ +Ġgen iÅŁ +ĠاÙĦØ® ÙĬ +ĠاÙĦØ®ÙĬ ارات +ãĤĴè¡Į ãģĨ +ש ×ŀ×Ķ +ĠLÃł m +ÙĪÙĨ ÙĬ +Ġ×IJ ׾×Ļ×ķ +Ä ĺ +à¹Ħมà¹Ī สามารà¸ĸ +人 ãģ¨ +بر ز +×Ļס ×ķ×ĵ +×Ĵ ׾×Ļ +ĠÙĬ ÙĨا +ĠÙĬÙĨا ÙĬر +ĠкаÑĢÑĤ ин +Ġt ôn +à¹Ģ à¸ģร +à¸Ħ à¸Ķี +Ġ׾×IJ ×ķר×ļ +ãĤĤãĤī ãģĨ +ãģĭ ãģĭãĤĭ +ани и +Ġara ÅŁtırma +ÙĦاØŃ ظ +ãģĦ ãĤĦ +ĠT Ãłi +Ġ à¸Ļà¸Ńà¸ģà¸Īาà¸ģ +Ġà¸Ļà¸Ńà¸ģà¸Īาà¸ģ à¸Ļีà¹ī +ĠÄIJ ảng +ãģ£ãģ¦ ãģįãģŁ +Ġà¸ĭึà¹Īà¸ĩ à¹Ģà¸Ľà¹ĩà¸Ļ +Ġt ả +Ġmożliwo ÅĽÄĩ +ĠS ản +Ġİ ki +Ġc ắt +س Ø£ÙĦ +Ġbak ım +Ø´ ب +à¸ķ ีà¹ī +à¸ŀ ยาย +à¸ŀยาย าม +สั à¸Ľ +à¸ªà¸±à¸Ľ à¸Ķา +à¸ªà¸±à¸Ľà¸Ķา หà¹Į +ë° Ģ +еÑĢ Ñĭ +Ġc ánh +Ġthu ế +ت بع +ãģ«åħ¥ ãĤĮ +Ñİ ÑģÑĮ +íļĮ ìĿĺ +ç°¡ åį +ç°¡åį ĺ +ç°¡åįĺ ãģ« +Ġtr úc +ĠاÙĦÙĥ ÙĪÙĬ +ĠاÙĦÙĥÙĪÙĬ ت +ãĤıãģij ãģ§ãģĻ +ĠÑģв об +ĠÑģвоб од +ĠÑĥÑĩаÑģÑĤ ник +สิ à¹īà¸Ļ +ĠпÑĢо ÑĦеÑģÑģиона +ĠпÑĢоÑĦеÑģÑģиона лÑĮн +Ñģп оÑĢ +×Ĺ ×ķ×ij×Ķ +Ùħع ÙĨÙī +ĠاÙĦÙģ ØªØ±Ø© +สูà¸ĩ สุà¸Ķ +ãĤı ãģļ +ĠÄij è +ĠÄijè n +æ¯Ķ ãģ¹ +า à¸ĺิ +Ġmoż emy +à¹ģ à¸ĭ +à¸Īะ à¹Ħมà¹Ī +Ġs ắp +Ðļ Ðŀ +Ġprá ctica +ÙĪÙĥ اÙĦØ© +è¾¼ ãĤĵãģ§ +ológ ica +Ġе Ñī +ĠеÑī Ñij +تع دÙĬÙĦ +ĠØ£ Ùĥد +Ġצר ×Ļ׼ +Ġצר×Ļ׼ ×Ļ×Ŀ +Ø« Ùħ +Ġк ÑĢÑĥ +ĠкÑĢÑĥ п +×ij×Ļ×§ ×ķרת +Ġì¡° ê¸Ī +ãģ¨ãģį ãģ¯ +Ġb ạc +ĠÑĢаÑģ пол +ĠÑĢаÑģпол ож +ĠÑĢаÑģполож ен +ز ÙĬÙĨ +ĠÐļ ÑĢоме +ĠاÙĦÙĨ ظر +×Ķ ×ķ×ĵ +ĠاÙĦس بت +ã썿ĢĿ ãģĦ +Ġpa ÅĦst +ĠpaÅĦst w +ĠÙĦÙĬ ست +ĠбÑĥд Ñĥ +à¸Ĺัà¸Ļ à¸Ĺี +ร าม +ØŃ صÙĪÙĦ +ãģĹãģ¦ãģıãĤĮ ãĤĭ +ĠاÙĦØ¥ سرائÙĬÙĦ +ĠاÙĦإسرائÙĬÙĦ ÙĬ +ãģĵãĤĮ ãģ¾ãģ§ +ìĤ¬ 를 +Ġs ürü +à¹Ģว à¸Ńรà¹Į +à¹Ģà¸ĭ à¸Ńรà¹Į +Ġutilis é +ĠÑģиÑģÑĤем а +Ġdw ó +Ġdwó ch +Ġpróp rio +Ġëĵ± ìĿĦ +arr êt +ĠЧ а +×IJ×ŀ ׳×ķת +عار ض +à¹Ģà¸ģม สà¹Į +Ġ׾×Ķ ×ij×Ļף +Ġ׾ ×ij×Ĺ +Ġ׾×ij×Ĺ ×ķר +สา à¸Ĥา +ĠÐľÐ¾Ñģк ве +ب عد +ĠاÙĦÙĤر ار +ĠÄIJ á»ĭa +Ġ×Ĺ ×Ĵ +Ùģ ØªØ± +ÙĪÙĨ Ø© +Ġ×Ķ×ĸ ×IJת +å¸Ĥ ãģ® +ãģ» ãģĹãģĦ +Ġ×ij×¢ ×Ļר +ĠÑĤеп еÑĢÑĮ +ìĬµ ëĭĪê¹Į +à¹Ħม à¹Īว +à¹Ħมà¹Īว à¹Īา +à¹Ħมà¹Īวà¹Īา à¸Īะ +×ŀ ×IJ×Ķ +æĥħ åł± +æĥħåł± ãĤĴ +غ ÙĨ +Ġпо Ñı +ĠпоÑı ви +éģİ ãģĶ +تش غ +تشغ ÙĬÙĦ +в ел +Ġ×Ĺ ×ŀ +ãģ¨ãģªãĤĬ ãģ¾ãģĻ +Ġra ÄŁ +ĠraÄŁ men +ãģĭ ãģ©ãģĨ +ãģĭãģ©ãģĨ ãģĭ +ен ко +ì§Ģ ê³ł +Ġ×IJ׾ ×Ļ×Ķ +ĠØ£ ÙĦ +à¸Īำ หà¸Ļ +à¸Īำหà¸Ļ à¹Īาย +nız ı +Ġ׾ק ×Ĺת +Ø£ ÙĩÙħ +Ø£ÙĩÙħ ÙĬØ© +ت غÙĬر +ש ×Ĺר +ס×ķפ ר +×ĵ ×Ļר +èī¯ ãģĭãģ£ãģŁ +×ŀ׾×Ĺ ×ŀ×Ķ +ÑģÑĤв ие +ÑĤ ÑĢаÑĤ +ĠاÙĦØ£ Ø® +ĠاÙĦأخ ÙĬرة +ĠاÙĦØŃ صÙĪÙĦ +Ġcréd ito +צ ×Ļ×¢ +ãĥ¬ ãĥĻãĥ« +بر ÙĬ +ëIJ IJ +ãģł ãģ£ãģ¦ +Ġreal tÃł +س Ù쨱 +×ķ׳ ×ķ +×Ĵ ×ķ×ĵ +×Ĵ×ķ×ĵ ׾ +ฮ า +ãģĹãģ¦ ãģĬãĤĬãģ¾ãģĻ +Ġg Ãł +Ġ׾×ij צע +å¼ķ è¶ĬãģĹ +Ġ×ŀ ×Ļ׾×Ļ +Ġ×ŀ×Ļ׾×Ļ ×ķף +Ùħ در +Ùħدر سة +פ ×ķ×ĺ +à¸Ļà¹īำ มัà¸Ļ +ëģ Ŀ +ع Ùĥس +ĠÙĤ ض +ĠÑĢÑĭ б +خط Ø· +×ŀ×ķס ×ĵ +Ġ׼׾ ׾×Ļ +ĠкоÑĤоÑĢ Ð¾Ðµ +צ×Ļ ×ķף +ĠмеÑģÑĤ а +ãģĭ ãģ¤ +г ÑĢÑĥпп +׾ ×Ļ׾ +ת ×ķ×IJר +ë³µ ì§Ģ +à¹ģà¸ľ à¹Īà¸Ļ +Ġ×ij×¢ ת +æĻĤéĸĵ ãĤĴ +ï¼ £ +ãģ¨ãģĦãģĨãģĵãģ¨ ãģ§ +Ġ׾×Ķ ×§ +Ġ׾ ×ĸ×Ķ +ĠìłĢ ëĬĶ +ĠاÙĦØ¥ رÙĩاب +ĠìŀĪëĬĶ ëį° +ĠÑĤ огда +Ġ×Ķ ×¦×Ļ +×ķ׾ ×ĺ +Ġר פ×ķ×IJ×Ļ +ãģĵãģ¨ ãģ§ãģĻ +ĠÄij ÃŃch +ØŃ ÙĬا +Ġ×Ķ×ŀש ×Ĺ×§ +ãģľ ãģ² +Ġ×ŀ×IJ פשר +ãģ¿ ãģ¾ãģĹãģŁ +ĠاÙĦØ£ÙħÙĬر ÙĥÙĬ +Ùħج تÙħع +Ġس اب +Ġساب ÙĤ +׼ ×Ļ׾ +Ạ¾ +ãĥª ãĤ¹ãĥĪ +Ġì ĥ +Ġìĥ Ī +ĠìĥĪ ë¡ľ +ĠìĥĪë¡ľ ìļ´ +ĠD á»ĭch +à¹Ģหมาะ สม +ĠاÙĦÙĨ بÙĬ +׾ ׾ +ÙĨ ع +Ðĵ лав +Ðĵлав наÑı +Ùħر ض +Ġ×ķ ×ĵ +ت ÙĤÙĬ +تÙĤÙĬ ÙĬÙħ +Ġb ảng +ĠÙģ ÙĤاÙĦ +×¢ ×ŀ×Ļ +д ÑĢа +Ġsu á»ijt +سر عة +Ġc á»Ń +Ġ×Ķ ×Ļ×Ĺ×Ļ×ĵ +سع ÙĬد +à¸Ńา à¸Ĭีà¸ŀ +Ġس ÙĪØ§Ø¡ +ãĤ½ ãĥķãĥĪ +Ġл иÑĩно +ĠÐļ оÑĢ +اÙĩ تÙħ +اÙĩتÙħ اÙħ +à¸Ń à¸Ķี +à¸Ńà¸Ķี à¸ķ +ãģIJ ãĤīãģĦ +Ġiht iya +Ġihtiya ç +ãģ¾ãģ§ ãģ® +ìĭľ ìĬ¤ +ìĭľìĬ¤ íħľ +ÑĢÑĥ ÑĪ +ãĤĦ ãģ£ãģ± +ãĤĦãģ£ãģ± ãĤĬ +к еÑĢ +Ġ ży +Ġży w +кл он +Ġl ượt +à ¾ +да Ñĩи +tür k +غ ÙĪ +ĠигÑĢ Ð¾Ðº +Ġph ê +Ġש ×¢×ľ +ĠاÙĦÙħ دÙĨÙĬ +ĠìŬ룬 ë¶Ħ +ער ×Ļ×Ŀ +Ñħод ÑıÑĤ +Ġx ứ +ÐĹ Ð° +ĠÙģ Ø±Øµ +à¸Īะ à¸Ĺำà¹ĥหà¹ī +íģ ´ +×¢ ×ij×ķר +à¹Ģหลà¹Īา à¸Ļีà¹ī +èĢĥãģĪ ãĤĭ +ÑĢ ÐµÑģÑĤ +н нÑĭй +Ġc ầm +دا Ø®ÙĦ +ĠÙħÙĦÙĬ ار +ĠÐIJ л +ĠвÑĢем ен +à¸Ĭà¹Īวย à¹ĥหà¹ī +ר×Ļ ×ķת +ëĵ ¯ +飲 ãģ¿ +׳ ׾ +שת ×£ +ĠاÙĦسعÙĪØ¯ ÙĬ +u ÃŁ +ìĿ¸ ëį° +ĠìĿ¼ ë°ĺ +ÅĤ ÄĻ +Ġm á»iji +×ŀ ×Ļ׳ +ĠاÙĦØ£ Ø·Ù쨧ÙĦ +Ġçı kan +é cole +×§ ×Ļש +×§×Ļש ×ķר +ĠоÑģ ÑĥÑīеÑģÑĤв +ĠоÑģÑĥÑīеÑģÑĤв лÑı +×ij ×IJר +à¹Ħà¸Ľ à¸Ķà¹īวย +Ġ×¢ ×ķ׾×Ķ +à¸ģà¹ĩ à¹Ħมà¹Ī +ãĥ¢ ãĥĩ +ãĥ¢ãĥĩ ãĥ« +تØŃ ÙĪÙĦ +Ġод ного +ת×Ĺ×Ļ׾ ת +Ġت Ø® +Ġch cia +Ġchcia ÅĤ +ãĥIJ ãĥ³ +èĢħ ãģ¯ +ĠÙħ ØŃÙĦ +Ñģл ож +Ñģлож н +Ġt ÄĻ +Ġçı kt +Ġçıkt ı +ĠC Æ¡ +à¹Ħà¸Ķà¹ī à¹Ģลย +ır ken +à¹Ģà¸Ĥà¹īา สูà¹Ī +ÙħØŃ Ùĥ +ÙħØŃÙĥ ÙħØ© +à¸Ħุ à¹īม +à¸Ļà¹Īา à¸Īะ +лÑİ Ð´ +де ÑģÑı +деÑģÑı ÑĤ +ĠлÑİб ой +تØŃر ÙĬر +צע ×ĵ +Ġе Ñij +ĠاÙĦØŃ ÙĥÙħ +Ġص باØŃ +à¹Ģà¸ļ à¸Ńรà¹Į +Ġróż nych +ги б +ĠÑģ оÑĤ +ĠÑģоÑĤ ÑĢÑĥд +ĠÑģоÑĤÑĢÑĥд ник +ĠобÑĬ ем +פ ×ĺר +ãģĻãģĶ ãģı +ãģ«éĸ¢ ãģĹãģ¦ +в ол +Ø« ÙħاÙĨ +Ġd ần +æĬ ľ +æĬľ ãģij +Ġ×¢ ש +Ġעש ×ķ×Ļ +ס ×ķף +ãģªãģ® ãģ§ãģĻ +ãģ¯ ãģ©ãģĨ +×ŀ×¢ ר×ij +ï¼ ° +Ùħ صر +ÙħÙĨ اسب +ÙħÙĨاسب Ø© +ä¸Ĭ ãģ® +×IJ×Ļש ×ķר +ĠìĦ¤ ì¹ĺ +×ŀ×ĵ×Ļ׳ ×ķת +×ŀר ת +ãĤĭ ãģ®ãģĮ +د Ùİ +ĠاÙĦشر Ùĥات +ìĭľ ê°Ħ +ĠÑĢеÑĪ ÐµÐ½Ð¸Ðµ +ãģĻãĤĭ ãģ®ãģ¯ +ĠìŀIJìĭł ìĿĺ +׾ ×ŀ×ķ +ãģ¨ãģĵãĤį ãģ§ +Ġ×§ צר +Ġmã i +Ġkü ltür +ãĥ©ãĤ¤ ãĥĸ +à¸ľà¸¹à¹ī หà¸įิà¸ĩ +æĻĤéĸĵ ãģĮ +клÑİÑĩ и +diÄŁ iniz +มาà¸ģ à¹Ĩ +تØŃ ÙħÙĦ +Ġh ạt +ãĤ¦ ãĤ£ +п ле +×ŀ ׾×IJ +ÅĤ ó +Ġg á»ijc +Ġ×IJ ×ķ×ĵ×ķת +หว าà¸Ļ +ĠاÙĦ ÙĪØ² +ĠاÙĦÙĪØ² راء +ëĵ¤ ê³¼ +Ġص ØŃ +ĠصØŃ ÙĬÙ쨩 +Ġм м +تد Ø®ÙĦ +Ġpersön lich +Ġز ÙĬ +ĠزÙĬ ادة +ãĤ· ãĤ¢ +Ġng ắn +à¸Ħล ิà¸ģ +Ġs ông +Ġtü ket +Ñį ÑĦÑĦ +ÑįÑĦÑĦ екÑĤ +ש ×Ļ×ij +Ġا عت +ت ض +تض ÙħÙĨ +ĠاÙĦÙħØ´ رÙĪØ¹ +Ġprodu ção +ĠпÑĢимен Ñı +ни ÑĨÑĭ +주 ëĬĶ +ر Ùı +Ġm Æ¡ +Ġhayat ı +ëŁ ½ +Ġü cret +Ġyan ında +Ġpr ática +×ij×Ļ×§ ×ķר +Ãľ N +Ñģ оÑĤ +ãĤıãģij ãģ§ +Ġдол го +ת ׼×ķ +ĠìķĦ ëĭĮ +ë į°ìĿ´ +Ġç iz +Ġcho Äĩ +Ġ×Ķ ×Ļת +Ġ×Ķ×Ļת ר +Ġso át +׼ ×ij×ĵ +à¹Ģล à¹Īา +Ġд еÑĢ +ĠдеÑĢ ÐµÐ² +ãĤĴ åħ¥ãĤĮ +×Ĺ ×ķס +×Ĺ×ķס ר +ج ÙĬÙĨ +t ón +onn é +Ġпол ноÑģÑĤÑĮÑİ +人 ãģŁãģ¡ +Ġpr êt +ëł ¸ +Ġdéc embre +cı lar +Ġת ת +Ġê²½ìļ° ìĹIJëĬĶ +ÙĪ Ø¹Ø¯ +è¦ĭ ãĤĭ +วิ à¸Īัย +ë ¶Ī +ز ÙĪØ§ +زÙĪØ§ ج +d ì +ãģ§ãģĻ ãĤĪ +Ġвод о +ĠÙĬ ÙĪØ¬Ø¯ +Ñģ оÑģÑĤоÑı +Ðŀ С +ĠÄIJ ó +׊פש +Ġצ ×Ļ×ij×ķר +ĠاÙĦÙĤ Ø· +ĠاÙĦÙĤØ· اع +Ġиме ÑİÑĤ +Ġph áºŃn +×Ľ×¡ פ×Ļ +полн иÑĤелÑĮ +éĻIJ ãĤĬ +ĠÑģ ÑĢав +ĠÑģÑĢав н +ÙħاÙĦ Ùĥ +×ĵר ×ķ×Ŀ +çļĨ ãģķãĤĵ +ØŃÙĤ ÙĤ +à¹ģหล à¹Īà¸ĩ +ĠاÙĦر سÙħÙĬ +оÑĩ ки +×ĺ ×ij×Ĺ +Ġcan lı +Ġ׾ ׾ +Ġ׾׾ ×ŀ×ķ×ĵ +×ŀ×ij ×ķ +ת ׼ +×ª×Ľ ׳×Ļת +ĠاÙĦÙħ شار +ĠاÙĦÙħشار ÙĥØ© +İ Åŀ +ĠسÙĬ اسÙĬ +в олÑĮ +ĠÑģ пÑĢав +æĿ¥ ãģ¦ +פ×ķר ×ķ×Ŀ +สำ à¹Ģรà¹ĩ +สำà¹Ģรà¹ĩ à¸Ī +ĠÅŁ öyle +Ġzosta ÅĤa +ĠH ü +ר ×ķש +د ÙĦÙĬÙĦ +ÑĢи д +ש ף +×ŀ×§ ×ķר +ĠÑĥ Ñĩ +ĠÑĥÑĩ еб +ĠÑį ÑĤа +ков а +à¸ķà¸Ļ à¹Ģà¸Ńà¸ĩ +ÙĨ ÙIJ +à¸Ńีà¸ģ à¸Ħรัà¹īà¸ĩ +ระ à¸ļุ +Ġd ữ +ĠاÙĦØŃ اÙĦÙĬ +׼ ×ķ׼ +׼×ķ׼ ×ij +Ġ×ŀ×IJ שר +Ġtr ụ +ÑĤел ем +Ġв ли +Ġвли Ñı +Ġש×IJת ×Ŀ +Ġuw ag +Ġuwag ÄĻ +×ĺ ×Ļת +×IJ ×ĵ×Ŀ +à¸Ķ ุ +Ġ×Ķ×IJ ׾×Ķ +Ġkar Ä±ÅŁ +ĠÄIJ á»iji +да ÑİÑĤ +ãģªãģ® ãģ« +Äħ cych +à¹Ģà¸Ļ à¹īà¸Ļ +ãģĹãģ¦ ãģĹãģ¾ãģĨ +int érieur +ĠfÃŃs ica +ĠÐŁ ол +ãģĹãģ ķ +à¸Ĺำ à¹Ħม +ĠL âm +ĠاÙĦÙħ سÙĦÙħ +ĠاÙĦÙħسÙĦÙħ ÙĬÙĨ +ص ØŃØ© +ìĹ Ħ +à¹Ģà¸Ķà¹ĩ à¸Ķ +ĠÑĥ ÑĩеÑĤ +â Ìģ +Ġب ÙĦا +ĠاÙĦاجتÙħاع ÙĬ +פרס ×Ŀ +ãĥķ ãĥ© +ĠÐļ огда +mie ÅĽci +ĠبÙĬÙĨ Ùħا +Ġ×ŀ×IJ ×ŀר×Ļ×Ŀ +Ġ×ij×IJ ×ĸ×ķר +×ķש ×Ļ×Ŀ +ĠÑģдел а +entr ée +à¹Ģ à¸Ħà¹īา +Ñĥг л +ĠاÙĦÙģ ÙĨÙĬ +ĠÐĴ оÑĤ +à¸Ĺีà¹Ī มา +×ķצ ×Ĵ +ÙĤد رة +Ġëª © +Ġ목 ìłģ +íıī ê°Ģ +ĠاÙĦØ£ ربع +ĠاÙĦأربع اء +פס ×Ļ×§ +ĠÑıвлÑı ÑİÑĤÑģÑı +ب ÙĪÙĨ +ì° ¾ +×ŀ×¢ ר׼ +×ŀ×¢×¨×Ľ ×ķת +ãĤ· ãĤ§ +ĠباÙĦ Ø£ +íĸĪ ëįĺ +ĠاÙĦبر ÙĨاÙħج +ĠاÙĦØ£ ØŃد +Ġm Å© +ĠmÅ© i +п аÑĤ +ب Ø« +ĠÑĨ енÑĭ +Ġ×ijת ׾ +è¨Ģ ãĤıãĤĮ +ĠاÙĦÙħ جاÙĦ +ĠìĦ¸ ìĥģ +Ġ×Ĵ ×ķפ +ĠнаÑĪ ÐµÐ¹ +Ġкомп аниÑı +б ин +öl ü +×Ļ ×Ļ×ĺ +Ġ×ŀס פ×Ļ×§ +ยัà¸ĩ à¸Ħà¸ĩ +ĠЧ и +Ġан ÑĤи +ĠÑģÑĢед и +สà¹Īวà¸Ļ à¹ĥหà¸įà¹Ī +оÑĩ ка +íĬ¹ ë³Ħ +ว à¹Īาà¸ĩ +гоÑĢ Ð¾Ð´ +با Ùĥ +à¹Ģส ีà¹Īย +à¹Ģสีà¹Īย à¸ĩ +ãĤĤãĤī ãģĦ +×§ ×ķ×Ŀ +ãģĽ ãģļ +ĠاÙĦÙĤ اÙĩرة +Ġ×ij ׼×ļ +Ùħشار ÙĬع +باØŃ Ø« +Ġпо Ñĩ +ĠпоÑĩ ÑĤи +ĠÑĦоÑĢм а +S İ +Ġ×ŀצ ×Ļ×¢ +ล ื +ลื ม +ĠÑĤ еÑĢ +ĠÑĤеÑĢ ÑĢиÑĤоÑĢ +ĠÑĤеÑĢÑĢиÑĤоÑĢ Ð¸Ð¸ +Ġв меÑģÑĤ +ĠвмеÑģÑĤ е +dıkl arı +op ération +à¹Ĥ ห +ص دÙĬ +صدÙĬ ÙĤ +íĸī ìłķ +تج ا +تجا ÙĪØ² +Ġsu ç +Ġar ty +Ġarty ku +Ġartyku ÅĤ +ãĤ·ãĥ§ ãĥĥãĥĹ +ש פ +שפ ×Ļ×¢ +Ġ×Ķש ×Ļר×ķת +à¹ģà¸ĸ ม +ë¸ Ķ +Ġuk ÅĤad +Ġ×ķ ׼×Ļ +หล าà¸ģ +หลาà¸ģ หลาย +æĸ¹ ãĤĤ +Ġpodr óż +ĠE ÄŁer +Ġком наÑĤ +ĠÑģам ÑĭÑħ +Ġв кÑĥÑģ +б еж +Ġ×ij ×§×ķ +æİĽ ãģij +ãģ¿ ãĤĭãģ¨ +ĠiliÅŁ kin +ĠÙĬ عÙħÙĦ +Ġпод аÑĢ +Ġyaz ılı +ãĤĴ å¾Ĺ +Ġwyst ÄĻp +à¸Ĺีà¹Ī à¹ĥà¸Ĭà¹ī +ØŃاد Ø« +ÙĪ ÙĬد +кÑĥ лÑĮÑĤ +кÑĥлÑĮÑĤ ÑĥÑĢ +à¸ģาร à¹ģà¸Ĥà¹Īà¸ĩ +à¸ģารà¹ģà¸Ĥà¹Īà¸ĩ à¸Ĥ +à¸ģารà¹ģà¸Ĥà¹Īà¸ĩà¸Ĥ ัà¸Ļ +ÙħÙĪ Ø¸ +ÙħÙĪØ¸ Ùģ +ÙĬÙħ ÙĬ +ãĤĵãģ§ãģĻ ãģĮ +diÄŁ im +diÄŁim iz +ĠÐŁ еÑĢ +ĠÐŁÐµÑĢ Ð² +Ġm ão +ĠÑģ ез +ĠÑģез он +Ġ×Ķ×ŀ ×¢ +Ùħ جÙħÙĪØ¹Ø© +ĠинÑĦоÑĢм аÑĨии +i ếc +ã ng +ĠÄij ấy +ãģĶ ç´ +ãģĶç´ ¹ +ãģĶç´¹ ä»ĭ +Ġad ım +à¹Ħ หล +Ġп ÑĢакÑĤи +ĠпÑĢакÑĤи Ñĩ +ĠпÑĢакÑĤиÑĩ еÑģ +ĠпÑĢакÑĤиÑĩеÑģ ки +ĠاÙĦÙĨ Ù쨳 +ĠÑĢабоÑĤ е +ÙĦÙĬ Ùģ +ĠاÙĦجÙĨ ÙĪØ¨ +Ġвод Ñĭ +ì¹ Ļ +Ġм иÑĢа +ĠÄij ừng +ĠпÑĢоÑĤив о +ĠÑģÑĤÑĢан Ñĭ +ล ู +ìĤ ¶ +kre ÅĽl +Ġbul und +Ġbulund uÄŁu +à¹ģ สà¸Ļ +ãĤ± ãĤ¢ +ת×Ĺ ×ķ×ŀ×Ļ +ר׼ ×Ķ +Ġ׾ק ×ķ×Ĺ +Ġ׾ק×ķ×Ĺ ×ķת +Ġ×Ľ×ª ×ķ×ijת +ĠÙĦ ÙĥÙħ +ب شر +Ġr Ãłng +Ġ×ŀ×Ķ ×ŀ +Ġ×IJ×Ĺר ×ķת +Ġб он +Ġбон ÑĥÑģ +ï½ Ĺ +à¹ģ ยà¸ģ +ãģĤãģªãģŁ ãģ® +ĠÑĥÑĩаÑģÑĤ ие +ĠE yl +ĠEyl ül +ĠçalÄ±ÅŁmalar ı +Ø® طر +ìĿ ½ +à¸ģาร à¹ĥà¸Ĭà¹īà¸ĩาà¸Ļ +Ġана лиз +תק ×ij׾ +ни ем +Ġİ ns +Ġİns an +ĠبÙĪ Ø§Ø³ +ĠبÙĪØ§Ø³ طة +Ġ׳ ×Ľ×ł×¡ +Ġ×Ķ×ŀ ×Ļ×ĵ×¢ +Ġç o +Ġço ÄŁu +á» ĺ +ĠêµŃ 민 +ãĤĤ ãģĦãģĦ +Ġ׼ ׾×Ļ +ĠÑģÑĢед не +g ÅĤo +gÅĤo ÅĽ +Ġneg ó +Ġnegó cio +ĠÑĢ ÐµÐ³Ð¸ÑģÑĤ +ĠÑĢегиÑģÑĤ ÑĢа +ĠÑĢегиÑģÑĤÑĢа ÑĨии +Ġtr á»ĵng +ĠпÑĢ Ñı +ĠпÑĢÑı мо +ëłĪ ìĿ´ +Ġk ém +к ле +à¸Ļำ มา +ĠÑĦ ин +ĠÑĦин анÑģ +ĠÑĦинанÑģ ов +Ġki á»ĩm +ยัà¸ĩ à¹Ħ +ยัà¸ĩà¹Ħ à¸ĩ +ย ิà¸ĩ +à¹Ĥ à¸Ľ +ĠполÑĥÑĩ ил +×Ļ×ĸ ×Ŀ +à¹ģละ à¸Ħวาม +Ġво обÑīе +ص ÙĬر +ãĥı ãĥ³ +ĠاÙĦÙĤ اد +ĠاÙĦÙĤاد Ùħ +Ġب دÙĪÙĨ +ع ظÙħ +ת ׳×ķ×¢ +×ª×ł×ķ×¢ ×Ķ +Ø£ ÙħÙĦ +ãģķ ãģĪ +ÑĤ ем +ÑĤем пеÑĢ +ÑĤемпеÑĢ Ð°ÑĤÑĥÑĢ +Ġ׾ ×Ļצ×ķר +Ġr ÄĻk +ر سÙĦ +ìŀIJ 를 +Ġ×Ļצ ×Ļרת +ÙĨ بÙĬ +Ñĩ наÑı +تØŃ ÙĦÙĬÙĦ +Ġм ик +Ġмик ÑĢо +ĠS öz +Ġfor ça +Ñģ он +ĠاÙĦع را +ĠاÙĦعرا ÙĤÙĬ +ĠH á»ĵng +ãģĻãĤĭ ãģŁãĤģãģ« +à¸Ĺีà¹Ī à¸Ńยูà¹Ī +Ġ×ķ×IJ ×£ +ص ÙĬد +ĠìķĬ ê³ł +ร ัà¸ĩ +ĠاÙĦت ÙĪØ§ØµÙĦ +à¹Ģม à¸ķร +Ñĥ ÑģÑĤÑĢой +ÑĥÑģÑĤÑĢой ÑģÑĤв +m ıyor +Ġبا سÙħ +Ġ×ķ ׼×ķ +ĠG ül +á» IJ +Ãī tat +غ اÙĦ +Ø¥ ÙĨØ´ +Ø¥ÙĨØ´ اء +T İ +à¸Ĥà¹īา ม +Ġtro ch +Ġtroch ÄĻ +Ø¥ ص +إص ابة +ĠØ« اÙĨÙĬ +ĠاÙĦص ØŃØ© +Ġ×ĸ×Ķ ×ķ +jÄħ cej +ãĥĢ ãĥ³ +ìĿ¸ ìĿ´ +Ġв олоÑģ +ëIJĺ ë©´ +Ġzak ÅĤad +ãģĻ ãģĵãģ¨ +以ä¸Ĭ ãģ® +Ġ×Ķ×ŀ×§ ×ķ×Ŀ +ÙħØ´ اÙĩ +ÙħشاÙĩ دة +Ñĩ ив +ب Ø´ +ย à¹īาย +Ġsür dür +ĠN ẵ +ĠNẵ ng +ĠигÑĢ Ð°ÑĤÑĮ +Ġê·¸ëŁ¬ ë©´ +ãĥķ ãĥ« +ล à¹Īะ +Ġtend rá +Ġb Ãły +à¹Ģà¸Ľà¹ĩà¸Ļ à¸ľà¸¹à¹ī +Ġok o +Ġoko ÅĤo +w ÅĤa +wÅĤa ÅĽci +wÅĤaÅĽci w +æĢĿ ãĤı +ĠYa ÅŁ +ĠB á»ĩnh +íı Ń +بÙĬ د +קר ף +à¹Ģศ ร +à¹Ģศร ษ +à¹Ģศรษ à¸IJ +à¹Ģศรษà¸IJ à¸ģิà¸Ī +ĠاÙĦØ£ ÙĪØ±ÙĪ +ĠاÙĦØ£ÙĪØ±ÙĪ Ø¨ÙĬ +fl äche +ä¹Ĺ ãĤĬ +Ġb á»ģn +Ùĩ ب +æľĢ ãĤĤ +Ġsa ç +à¸Ńำ à¹Ģà¸ł +à¸Ńำà¹Ģà¸ł à¸Ń +ĠØ£ ج +ĠاÙĦد اخÙĦ +ĠاÙĦداخÙĦ ÙĬØ© +×ĺ ×ķ×ij +ãĤĤ ãģªãģı +Ġли ÑĨа +à¹ģลà¹īว à¸ģà¹ĩ +×ĸ׼ ×Ļר +Ġqu Ãł +ĠÙĥ ذÙĦÙĥ +صØŃ Ùģ +ĠÃĤ u +ÙĪØ¨ ا +à¹Ģà¸Ľà¸¥à¸µà¹Īยà¸Ļ à¹ģà¸Ľà¸¥ +à¹Ģà¸Ľà¸¥à¸µà¹Īยà¸Ļà¹ģà¸Ľà¸¥ à¸ĩ +à¸ķัว à¸Ńยà¹Īาà¸ĩ +Ġráp ida +Ġtas ar +Ġtasar ım +ĠعÙĦÙĬ ÙĩÙħ +ס ×ķ׾ +c ılı +cılı k +Ġر غÙħ +ìĭľ íĤ¤ +Ġ×IJ׾ ×§ +Ġ×IJ׾ק ×ĺר +Ġ×IJ׾ק×ĺר ×ķ׳×Ļ +à¹ģà¸ļ à¹Īà¸ĩ +Ġh ạng +ãģ£ãģ¦ ãģıãĤĮ +ĠÙĨ تÙĬ +ĠÙĨتÙĬ جة +ıkl ı +غ اÙĨ +à¸Ĥà¹īà¸Ń à¸Ħวาม +à¸Ľà¸¥ าย +ĠØ£ Ùħس +à¸Ĺีà¹Ī à¹Ģà¸ģีà¹Īยว +à¸Ĺีà¹Īà¹Ģà¸ģีà¹Īยว à¸Ĥ +à¸Ĺีà¹Īà¹Ģà¸ģีà¹Īยวà¸Ĥ à¹īà¸Ńà¸ĩ +Ġdé fin +Ġdéfin i +ÙģÙĨ اد +ÙģÙĨاد ÙĤ +à¹Ħà¸Ķà¹ī วà¹Īา +ãģªãģĦ ãĤĪãģĨãģ« +Ġpróp ria +ĠPh át +ãĤĦãģĻ ãģı +สวย à¸ĩาม +ê³ł ìļĶ +Ñı еÑĤ +ãģĭãĤĤãģĹãĤĮãģ¾ãģĽãĤĵ ãģĮ +تر جÙħ +ĠкÑĢаÑģ ив +Ġ×ŀ ר×IJש +д еж +ĠÙĬ ÙĪÙĨ +ĠÙĬÙĪÙĨ ÙĬÙĪ +Ñģк оÑĢ +ĠKas ım +ê³Ħ ìķ½ +к оÑģ +Ġна ÑĢÑĥ +ĠнаÑĢÑĥ ÑĪен +Ġdu że +acc ès +Ġh á»ĵng +Ġv Å© +ãģĦãģŁ ãģĹãģ¾ãģĻ +Ġ×ĺ ×Ļ +Ġ×ĺ×Ļ ×ķ׾ +lıkl arı +Ġqu ê +ëħ¸ ëıĻ +ìķ Ķ +CI ÃĵN +Ġt ắc +press ão +ĠìŀĪ ìľ¼ +สิà¸Ĺà¸ĺิ à¹Į +íĥ Ħ +Ġ×Ķ×ŀ ×ŀש׾×Ķ +å¬ī ãģĹãģĦ +ĠÄIJ ặc +ÙĨ زÙĦ +ĠдÑĢÑĥг ой +д ÑĥÑĤ +ìĪ Ļ +Ġth ụ +à¹Ģส ร +à¹Ģสร à¹ĩ +à¹Ģสรà¹ĩ à¸Ī +Ġto plant +Ġtoplant ı +×IJ×ŀ ף +×ķ׾ ת +п омн +Ġyo ÄŁun +ÅĦsk iego +ì° © +ĠØ« ÙĦاث +ĠØ«ÙĦاث Ø© +Ġl ắng +ë¦ ´ +ราà¸Ĭ à¸ģาร +ĠÑģлов а +á» Ĩ +à¸Ķี à¸ģวà¹Īา +ãģĶãģĸ ãģĦãģ¾ãģĻ +Ġд из +Ġдиз айн +fé rence +lıkl ar +ãģªãĤĵ ãģ§ãģĻ +ajÄħ cy +Ġëĭ¤ ìĸij +Ġëĭ¤ìĸij íķľ +×§ ×Ļר +ØŃ ار +ส ูà¹ī +Ġz ro +Ġzro bi +Ġzrobi Äĩ +×ŀ ×Ļ׼×Ķ +à¸Ĭà¹Īวย à¹Ģหลืà¸Ń +ĠÑįÑĤ Ñĥ +ë´ ī +楽 ãģĹãģĦ +س ÙĪØ± +íķĺ ê±°ëĤĺ +Ùħؤ تÙħر +Ġpoc zÄħ +ĠpoczÄħ tk +ĠpoczÄħtk u +Ġع ربÙĬ +اÙĦØ£ ر +اÙĦأر دÙĨ +à¸Ķ ร +Åĵ uvre +ĠÙĪÙĥ اÙĨت +ĠÅĽ redni +Ø® ضر +Ġch uyến +н ÑĤ +ĠìķĮ ê³ł +Ġv á»Ŀi +Ġ×ij ×Ļ×ĵ×Ļ +×ŀ×ĵ ×ķ×ijר +ÙĪ Ù쨱 +ÙĬ Ø¡ +׳ ×Ľ×¡ +ĠÐĽ а +л он +Ġx ấu +Ùģ ÙĬÙĨ +Ġfé vrier +ĠÐŀ на +ĠV á»ģ +ĠÅŁey ler +ĠполÑĥÑĩ ен +з ад +Ġn ét +à¹Ħà¸Ľ ยัà¸ĩ +×Ĺש×ij ×ķ +à¸ļัà¸Ļ à¸Ĺ +à¸ļัà¸Ļà¸Ĺ ึà¸ģ +Ġgerçek leÅŁ +иÑĩеÑģк ое +ìĪĺ ê°Ģ +Ø« بت +ãģ¤ ãģ¾ãĤĬ +ĠÑĥÑģловиÑı Ñħ +ëĭ¤ ê°Ģ +ราย à¹Ħà¸Ķà¹ī +׼×IJ ×ij +à¹Ĥà¸Ľà¸£ à¹Ĥม +à¹Ĥà¸Ľà¸£à¹Ĥม à¸Ĭัà¹Īà¸Ļ +j ähr +jähr ige +×§ ׳×Ļ×Ŀ +×ŀ ×ķ×§ +×ŀ×ķ×§ ×ĵ +ãģ«è¡Į ãģ£ãģ¦ +Ø¢ ÙĦ +вед ение +Ġ׾ ×Ľ×ª×ķ×ij +جÙħ Ùĩ +جÙħÙĩ ÙĪØ±ÙĬØ© +à¸ī à¸ļ +à¸īà¸ļ ัà¸ļ +ĠC òn +à¸ľ สม +ãģªãģ© ãģĮ +×IJ×Ķ ×ij +ĠдейÑģÑĤв иÑı +y ız +à¹Ħมà¹Ī à¹Ģà¸Ħย +ج ÙĪØ² +×Ķ×Ĺ׾×ĺ ×Ķ +f ällt +ãĥĵ ãĤ¸ +ãĥĵãĤ¸ ãĥį +ãĥĵãĤ¸ãĥį ãĤ¹ +Ġ×IJ ×Ļ׳×Ŀ +ĠнаÑħод иÑĤÑģÑı +Ġdzi ÅĽ +ست Ø·ÙĬع +׾ ×Ļף +Ø® ÙĦاÙģ +Ùĩ ÙIJ +Ġatr ás +íĺ ģ +ãĤĴ ãģĶ +Ġ×Ķ×ŀ ×ķצר +ĠBakan lıģı +ÑİÑī ее +ÙħÙĨ اط +ÙħÙĨاط ÙĤ +Ùģ Ø¯ +à¸Ļำ à¹Ħà¸Ľ +Ġв аж +Ġваж но +Ġm ạch +׼ ׳×ķ +بع Ø« +lan ması +Ġa yr +Ġayr ıl +ìĤ¬ íļĮ +d ÃŃa +p ÅĤyw +اÙħ ÙĬØ© +íĺ ľ +×IJ׳ ×Ĵ׾ +×IJ׳×Ĵ׾ ×Ļת +ĠìŀĪëĭ¤ ëĬĶ +Ġس اعة +ĠëĤĺ íĥĢ +b ö +à¸Ħ ัà¸Ļ +ĠdziaÅĤ ania +Ø© Ùĭ +Ġng Å© +׳צ ×Ĺ +ãģ¯ ãģĤãĤĭ +ĠyaÅŁ ında +st ück +car acter +caracter ÃŃsticas +Ġr á»Ńa +ĠÙħختÙĦÙģ Ø© +ãģ«ãģĬ ãģijãĤĭ +à¹ģà¸ŀ à¸ĩ +วิ à¹Īà¸ĩ +ת פ×ķ +سا ÙĩÙħ +使 ãģĨ +Ùĥ رÙĬ +×IJ פ×Ļ +........ ....... +ĠÑĤак им +×Ļ׼ ×ķ×Ļ +Ø´ بÙĩ +ج ÙĬر +ãģĿãģ® ãģ¾ãģ¾ +ac jÄĻ +ĠاÙĦت رÙĥ +ĠاÙĦترÙĥ ÙĬ +ĠпÑĢав илÑĮно +Ġت عÙħÙĦ +à¸ģล à¹īา +Ġbi ên +Ġ×ij׳×Ļ ×Ļת +Ġкл Ñĥб +Ġ×ŀ ש×Ķ +в ÑĪий +ãģĵãģ¨ãģĮãģ§ãģį ãĤĭ +à¸ŀัà¸Ļà¸ĺ ุ +à¸ŀัà¸Ļà¸ĺุ à¹Į +ר ×ķ×Ŀ +ĠاÙĦÙģ Ø±ÙĨ +ĠاÙĦÙ쨱ÙĨ سÙĬ +à¹Ģà¸Ľà¹ĩà¸Ļ à¸Ħà¸Ļ +ãģĹãģ¦ ãģĬãĤĬ +Ġth ầy +ãĤĵ ãģłãģijãģ© +ìĶ ¨ +Ùħ دÙĨ +ت ÙĪÙĨ +ĠмеÑĤ ал +ĠмеÑĤал л +Ġin ÃŃcio +à¸Ńà¸Ńà¸ģ à¸Īาà¸ģ +ëĴ ¤ +Ġcu á»ijn +Ġbu á»Ļc +ÙĨ سÙĬ +ä cht +×ŀ ×Ļ׳×Ļ×Ŀ +ãģķ ãģ¦ +ãģĮ ãģ§ãģį +ÑĬ ем +Ġtá i +ĠЧ ÑĤ +ĠЧÑĤ обÑĭ +à¸Ľà¸¥ ูà¸ģ +à¸Ĭุม à¸Ĭà¸Ļ +н Ñģкий +Ġv ững +Ġ×Ķ ×ľ×ij +ë le +Ġש ×¢×ijר +в аÑĤÑĮÑģÑı +б ой +ع ÙĪÙĨ +à¹ģà¸Ķ à¸Ļ +Ġספר ×Ļ×Ŀ +Ġt uyên +Ġnhi êu +ĠQu ý +Ġh uyết +ãĤı ãģĭãĤīãģªãģĦ +Ġ×ŀ ׼ף +Ġ×Ķ ×§×ľ +Ġ׾×IJ ×ķר +ĠÄIJi á»ĩn +Ø´ ؤ +شؤ ÙĪÙĨ +Ġ×ŀ׊פש +ĠпоÑģÑĤоÑıн но +×ŀ ×Ļר +ìħ Ķ +Ðŀ Ñģ +ÐŀÑģ нов +×ĸ ×Ļת +ĠH á +ĠÑĩаÑģ ов +×IJ ×ķ׾×Ļ +Ġm át +Ø® رÙĪ +خرÙĪ Ø¬ +ÙĤ ضا +ÙĤضا ÙĬا +à¹Ģà¸Ľ à¸Ńรà¹Į +ĠÙĬ ÙĪÙĦ +ĠÙĬÙĪÙĦ ÙĬÙĪ +à¹Ĥà¸Ĺ ษ +׳ פ׾ +ת ×ķש +ת×ķש ×ij×Ļ +Ġv ários +×ŀ ר×IJ×Ķ +ëĿ¼ ìĿ´ +ÙĨ غ +×ij צע +г он +ĠÄIJ ược +ع Ùı +пÑĥÑģ к +ĠÙĪØ§ÙĦ Ùģ +üc ü +×Ļ×§ ×Ļ×Ŀ +Ġس بÙĬÙĦ +׾×ij ף +ĠاÙĦÙĤ رÙĨ +ס ×ķת +ĠQu áºŃn +ãģĵãĤĮ ãģĮ +ãĥĸ ãĥ©ãĥ³ãĥī +×Ĵ ×ŀר +Ġwarto ÅĽci +ĠÙĪØ¨ ÙĬÙĨ +Ġd ạ +ÐIJ в +ÐIJв ÑĤо +Ġol acaktır +à¸Ļ à¸Ĺà¹Į +Ùħ طار +Ġ×¢ ×§×ij +Ġת פ +ãģĹãģ¦ ãģĦãģ¦ +צ ×ŀ×Ĺ +à¸Ī à¸Ńà¸ĩ +Ġö de +ìį ¨ +ÙĨ اس +調 ãģ¹ +ĠогÑĢ Ð¾Ð¼Ð½ +ë³´ íĹĺ +×ĺ ×§ +×ĺ×§ ס×ĺ +ĠbaÅŁ v +ĠbaÅŁv uru +Ġpom ys +Ġpomys ÅĤ +ãģ« ä¹Ĺ +Ġש ׼ף +ĠاÙĦÙħس ؤÙĪÙĦ +Ġз ан +Ġзан ÑıÑĤ +Ġd ương +ãĥĹãĥ¬ ãĤ¤ +ล à¸ļ +ÑĤи ка +ĠAr alık +Ġнед о +Ġm á»Ļ +Ġor an +Ġoran ı +Ġktó r +Ġktór Äħ +Ġ×Ķ×IJ×Ĺר ×ķ׳×ķת +ائ ÙĨ +ÅĦ s +ÅĦs ka +åĽ½ ãģ® +×ŀ ×ĺ×Ļ +ĠвопÑĢоÑģ Ñĭ +à¸Ńà¸ĩà¸Ħà¹Į à¸ģร +×ŀ ×ķצ×IJ +Ġpó ź +Ġpóź niej +ש×ŀ ×IJ׾ +Ġk aps +Ġkaps am +Ġkapsam ında +Ġmá quina +ĠÅĽwie cie +Ġho Ãłng +Ġöz gü +×Ĵ×ķר ×Ŀ +ãģĤ ãģŁãĤĬ +à¸ķัà¸Ķ สิà¸Ļ +à¸ķัà¸Ķสิà¸Ļ à¹ĥà¸Ī +б ÑĢи +ãģ«ãģªãĤĭ ãģ¨ +ت ÙĥÙĪÙĨ +Ġ×ķ×Ķ ×Ļ×IJ +Ġchi ếu +ÑģÑĤан ав +ÑģÑĤанав ли +ÑģÑĤанавли ва +×ŀ ×ķ×Ĵ +c ité +ĠK örper +Ġש ×Ĵ×Ŀ +ع ظ +عظ ÙĬÙħ +Ġ×Ķ×IJ ×Ļש×Ļ +Ġmat ière +ĠÙģ ÙĪÙĤ +Ġk to +Ġkto ÅĽ +à¸Ļ à¹Ĥย +à¸Ļà¹Ĥย à¸ļาย +å¾ħ ãģ¡ +à¹Ģม à¸Ļ +à¹Ģมà¸Ļ ู +A ÃĩÃĥO +Ġt ù +Ġtù y +ãĥĪ ãĥ³ +ĠоÑĤ каз +Ġ×ŀ ×ķצר +ül ü +ãģķãĤĵ ãģ« +Ġ×Ĺ ×ķ×ij +קר ×Ļ×IJ×Ķ +ĠاÙĦØ® دÙħات +ĠÙĦÙħ دة +ر ؤ +رؤ ÙĬØ© +ãĤĴè¦ĭ ãģ¤ãģij +à¸Ł า +Ġréuss i +à¸Ļัà¸ģ à¹Ģรียà¸Ļ +ĠÑĩиÑģ л +à¸ģาร à¹Ģลà¹Īà¸Ļ +Ġhaz ırl +Ġhazırl an +ĠпеÑĢв Ñĭй +ли м +ĠоÑĤзÑĭв Ñĭ +Ġwy jÄħ +ĠwyjÄħ tk +ĠØ£ ÙĤÙĦ +ס ×ļ +Ġê²° ìłķ +Ġ׾×ŀ×¢ ש×Ķ +Ġl ắp +à¹ģà¸ļ ร +à¹ģà¸ļร à¸Ļà¸Ķà¹Į +วà¹Īา à¹Ģà¸Ľà¹ĩà¸Ļ +Ġب دا +Ġبدا ÙĬØ© +ãģ¨ãģĦãģĨ ãģ®ãģĮ +иÑĩеÑģк им +à¸ģาร à¸ŀัà¸Ĵà¸Ļา +Ġb Ãło +Ġmia ÅĤa +y waÄĩ +ĠMär z +ĠÙĨ سبة +Ġéconom ique +×ĸ ×ŀ +×ĸ×ŀ ׳×Ļ×Ŀ +æŃ¢ ãĤģ +Ġt á»§ +íķĺ ìĭł +Ġkażde go +stra ÃŁe +à¸Ĭ ีà¹ī +à¹Ģ à¸ļา +ÑĢеÑģ ÑĥÑĢÑģ +ев ой +Ø´ باب +à¸ķà¹Īาà¸ĩ à¸Ľà¸£à¸°à¹Ģà¸Ĺศ +Ġ×IJ ×Ļש +Ġ×IJ×Ļש ×Ļת +×Ļ ×ķפ +×Ļ×ķפ ×Ļ +ĠìļĶ êµ¬ +ì¡° ìĤ¬ +ãģ£ãģŁ ãĤī +׾ ×Ļ×§ +миниÑģÑĤ ÑĢ +ãĤĤãģ® ãģ¯ +Ġl ương +Ġна и +Ġнаи бол +Ġнаибол ее +íİ ĺ +à¹ģà¸ŀ à¹ī +ãĤŃ ãĥ¥ +ĠкоÑĤоÑĢ Ñĭм +à¹ģà¸Ĺ à¸ĩ +à¹ģà¸Ĺà¸ĩ à¸ļà¸Ńล +Ġ׳ ×Ļ×Ķ +Ġ׳×Ļ×Ķ ×ķ׾ +âĤ ª +ĠGi ải +ĠиÑģполÑĮзов а +ëł¥ ìĿĦ +ãģĹãģĭ ãĤĤ +à¸ģà¹ĩ à¸ķà¹īà¸Ńà¸ĩ +ĠÑĢ ÐµÐ± +ĠÑĢеб ен +ĠÑĢебен ка +ت ÙĪØ§ØµÙĦ +ãĤ°ãĥ« ãĥ¼ãĥĹ +ãĤĦ ãĤī +à¹Ģà¸Ľà¸´à¸Ķ à¸ķัว +б ÑĢо +ë°ĸ ìĹIJ +ÙĨ ÙİØ§ +×Ķ ×Ĵ +×Ķ×Ĵ ׳×Ķ +à¸Ĺ รั +à¸Ĺรั à¸ŀ +à¸Ĺรัà¸ŀ ยà¹Į +Ġkh á»iji +עצ ×ŀ×ķ +бол езн +Ġë°Ľ ìķĦ +ม à¸Ļ +มà¸Ļ ุ +มà¸Ļุ ษ +มà¸Ļุษ ยà¹Į +âĹ Ĩ +×ŀ צ׾×Ļ×Ĺ +Ñıв ление +Ùħ Ø·ÙĦ +ÙħØ·ÙĦ ÙĪØ¨ +Ø® اÙĦÙģ +ت ÙĪÙĤÙģ +ãģ§ãģį ãģ¾ãģĽãĤĵ +оÑģÑĤ ей +м еÑĩа +기 ëĬĶ +תש ×¢ +ص ÙĬب +Ġ×ij×¢ ×ķ×ĵ +à¸Ĥà¸Ńà¸ĩ à¹Ģà¸Ĥา +ÑĤÑı ж +ĠÑĥ пÑĢав +ĠÑĥпÑĢав лениÑı +Ġgén ér +Ġth ÃŃ +פ ×ļ +Ġر Ùħض +ĠرÙħض اÙĨ +Ġtr uyá»ĩn +Ø¥ عداد +ãĤµ ãĥĿãĥ¼ãĥĪ +Ġпол но +Ø® اÙħ +ÐŁ еÑĤ +ÐŁÐµÑĤ еÑĢ +ÐŁÐµÑĤеÑĢ Ð±ÑĥÑĢ +ÐŁÐµÑĤеÑĢбÑĥÑĢ Ð³ +ÙħÙĨت دÙī +ãģķãĤĮ ãģ¾ãģĹãģŁ +ĠëĮĢ íķĺìŬ +à¸ľà¸¹à¹ī à¸Ĺีà¹Ī +Ġ×ŀ×IJ ×ķ +׾ ׳×ĵ +оÑĩ нÑĭе +ĠнаÑĩ ала +Ġ׾ ×Ļ׾×ĵ×Ļ×Ŀ +ов ое +ãģĻãĤĭãģĵãģ¨ ãģ§ +ĠاÙĦÙĨ Ùģ +ĠاÙĦÙĨÙģ Ø· +ìŀĪ ëĬĶ +غ ÙĨÙĬ +פ ×ĵ +ãĤ ¾ +ĠCr é +ãģ© ãģ¡ãĤī +Ø« اÙĨ +ÑĢаб аÑĤ +ÑĢабаÑĤ Ñĭва +Ġê°Ļ ëĭ¤ +à¸Ī ั +à¸Īั à¸ģร +Ġch ụ +Ġchụ p +Ġм аÑģÑĤ +ĠмаÑģÑĤ еÑĢ +Ġn ắm +ĠÑģÑĤ али +Ġ×Ķ×IJ ×Ļר×ķ×¢ +ãĤ½ ãĥ³ +åĪĨ ãģĭãĤĬ +Ø· بع +بد ا +gr áfico +г еÑĢ +à¸Ķำà¹Ģà¸Ļิà¸Ļ à¸ģาร +Ġsal dır +Ġsaldır ı +в ÑĪиÑħ +ãģĭãģ£ãģŁ ãģ§ãģĻ +Ġyapı yor +ĠاÙĦÙģ Øª +צר פת +з доÑĢов +×ij×¢ ׾ +Ġ×IJ ×ŀ×Ļת×Ļ +Ġоб Ñĭ +ĠобÑĭ Ñĩ +ĠобÑĭÑĩ но +Ġ׾ ×ķ×ŀר +ت ÙĥÙĨ +تÙĥÙĨ ÙĪÙĦÙĪØ¬ +تÙĥÙĨÙĪÙĦÙĪØ¬ ÙĬا +Ġhakk ı +ĠÑĢаР² +ĠÑĢав но +رÙĬ Ùĥ +Ġ×ij ×ŀ×Ļ×ĵ +Ġ×ij×ŀ×Ļ×ĵ ×Ķ +à¹ģà¸ģ à¹īว +Ġìĸ ĺ +Ġìĸĺ 기 +ãģĹãģ¦ ãģĦãģ¾ãģĹãģŁ +Ġkı sm +Ġkısm ı +ê± ¸ +åĨħ ãģ® +ì§ ķ +à¹Ģหมืà¸Ńà¸Ļ à¸ģัà¸Ļ +ĠÙģ ÙIJ +ĠÙģÙIJ ÙĬ +ÙĤ اعدة +Ġmoż esz +Ùħ صاÙĦ +ÙħصاÙĦ ØŃ +ãģ¾ãģŁ ãģ¯ +б ег +Ġs ıc +Ġsıc ak +Ñĩ иÑģ +ÑĩиÑģ лен +Ġн ог +ãĥģãĥ£ ãĥ³ +ãĥ« ãĥī +Ġgi ó +Ġs ını +Ġsını f +ив аÑĤÑĮ +Ġqu ên +Ġì łģ +Ġìłģ ìļ© +ĠJo ão +Ùģ Ø§Ø¯ +ĠGl ück +à¸Ĺ à¸Ńà¸Ķ +Ġg ói +ï¼ Ĭ +Ġdé tail +ĠدÙĬ سÙħ +ĠدÙĬسÙħ بر +ë¡ľ ìĦľ +×ŀ ×ķ×Ĺ +à¹Ħ ฮ +ĠоÑĤ д +ĠоÑĤд ÑĭÑħ +Ġkh uyến +à¸Ħ à¸Ńย +Ġج ÙĨÙĬ +ĠجÙĨÙĬ Ùĩ +ĠاÙĦد ÙģØ§Ø¹ +à¸Ļà¹īำ หà¸Ļัà¸ģ +ĠìĤ¬ëŀĮ ëĵ¤ìĿ´ +Ġth ừa +ĠÃ¶ÄŁrenc i +ĠпомоÑī и +ĠczÄĻ ÅĽÄĩ +ש ×ĺר +ĠN hi +ĠNhi á»ģu +׳ צ×Ļ +ĠнаÑĪ ÐµÐ¼ +ĠkarÅŁÄ± laÅŁ +Ġ×Ķש ׳×Ļ×Ŀ +ĠÄIJ ưá»Ŀng +Ġtr ú +ĠÑĢазлиÑĩ нÑĭÑħ +ĠاÙĦØ´ Ùĩر +Ġ×ľ×¢ ×ķ׾×Ŀ +ØŃ جر +ĠÄij á»ķ +ĠìĿĺ íķ´ +à¸ļ à¹Īà¸Ńย +Ġ×Ķ ×Ļ׾×ĵ +ãģ¨ãģª ãģ£ãģŁ +Ġ×Ĺ×ķ ×ķת +Ġש×Ļר×ķת ×Ļ +Äħ cy +س رÙĬ +K İ +פ ׳×ķ +ÑģÑĤÑĢÑĥк ÑĤÑĥÑĢ +ÑĤ ÑĢÑĥд +Ġ×Ķ ×§×¨ +Ġ×Ķקר ×ķ×ij +Ġth áºŃm +èģŀ ãģį +ÙĤÙĪ ÙĬ +клÑİÑĩ ен +ÑĤе Ñħ +ÑĤеÑħ нолог +è¡Į ãģ£ãģŁ +Ġ×ķ×IJ ×Ļף +ĠÅŁek lin +ĠÅŁeklin de +r ô +ÑĢ Ð¾Ð³ +Ġнов Ñĭе +Ġס ×ij×Ļ×ij +Ġtecn ologÃŃa +ס ׼ +×¡×Ľ ×ķ×Ŀ +ĠÅŀ ub +ĠÅŀub at +Ġ×Ķ×ŀ ׾×IJ +Ġwy pos +Ġwypos aż +ãģ¯ ä½ķ +ãĤ¬ ãĥ³ +ê° ĸ +Ġкак ие +Ġçocuk lar +Ġ׾צ ×ĵ +Ġkay ıt +ĠмеÑģÑĤ е +Ùħ دÙĬÙĨØ© +Ġ׼ ×Ĵ +Ġ׼×Ĵ ×ķף +ãģĹãģ¦ ãĤĭ +ĠÙħا ÙĬÙĪ +ãģ£ãģ¦ãģĹãģ¾ ãģ£ãģŁ +ĠпÑĢогÑĢамм Ñĭ +à¹ģล à¸Ļà¸Ķà¹Į +ãĥ¯ ãĤ¤ +ער ×ķ×¥ +Ñģ ид +ĠB öyle +Ġì²ĺ ìĿĮ +Ġת פק×Ļ×ĵ +ĠTr ên +íĥ Ī +ĠÐłÐ¾ÑģÑģ ий +ĠÐłÐ¾ÑģÑģий Ñģкой +Ġs Ãłn +Ġrè gle +ĠyaklaÅŁ ık +à¹Ģล ิà¸ģ +Ġد ائÙħ +Ġ×ķ ×Ĵ +اب ر +Ġb è +ĠاÙĦ ÙĤدÙħ +ĠÑĢеÑĪ ÐµÐ½Ð¸Ñı +hi ên +ÑĤи к +Ä Ħ +à¸ļรร ยาà¸ģ +à¸ļรรยาà¸ģ าศ +רצ ×ķף +åĭķ ãģį +ĠGä ste +Ġ기 본 +ĠÙĬ عرÙģ +ĠS á»Ń +gÅĤ ÄĻb +à¹Ģà¸Ń ส +×IJ×ŀ ×Ļף +Ġп Ñĥнк +ĠпÑĥнк ÑĤ +Ġ×Ļ×ķ×ĵ ×¢×Ļ×Ŀ +ãĤ« ãĥ©ãĥ¼ +Ġ×ijס ×ĵר +Ġbu á»ĵn +й ÑĤ +йÑĤ еÑģÑĮ +ãĤĴ æ±ĤãĤģ +Ġ×IJת ׼×Ŀ +Ġ모 르 +ظ رÙĪÙģ +Ñĩ еÑģÑĤво +ìĸ´ ìĦľ +Ġод на +Ġkap ı +Ġëħ¸ ëł¥ +ĠKü che +ĠاÙĦت Ø´ +Ø· ÙĬب +ĠíĬ¹ íŀĪ +ĠвÑĭп ÑĥÑģ +ĠвÑĭпÑĥÑģ к +×ĵ ת×Ļ +Ġu ÄŁ +ĠuÄŁ ra +ائ Ùĩا +Ġtho át +ãģª ãĤĤãģ® +Ñij ÑĢ +기 ê°Ģ +ĠgeliÅŁ me +تØŃ ÙĤ +تØŃÙĤ ÙĤ +Ġоп аÑģ +б ÑĢоÑģ +ห ุ +หุ à¹īà¸Ļ +ì¼ Ģ +ãĤ¹ ãĥŀ +ãĤ¹ãĥŀ ãĥĽ +Ø£ Ù쨱 +Ø£Ù쨱 اد +ĠTh á»±c +Ġth ắ +ãĥªãĥ³ ãĤ¯ +Ġni á»ģm +ĠHö he +عÙħ ار +ÙĥÙĪØ± ÙĪÙĨ +ÙĥÙĪØ±ÙĪÙĨ ا +ĠÄIJ ến +ĠÑģам ом +ĠÑĤ еле +ĠÄijo án +à¸Ħวามà¸Ħิà¸Ķ à¹Ģหà¹ĩà¸Ļ +Ġд иÑģк +Ø£ Ø·Ù쨧ÙĦ +ม ารà¹Į +à¸Ĺ หาร +à¸Ĺ à¸Ļ +Ġب عÙĬد +ĠاÙĦÙĩ ÙĨد +åĩº ãģĹãģ¦ +Ġkar de +Ġkarde ÅŁ +×Ķ×Ļס×ĺ ×ķר +×Ķ×Ļס×ĺ×ķר ×Ļ×Ķ +éģ¸ ãģ³ +ع اÙħÙĦ +à¸Ĥ ยาย +Ġtü rl +Ġtürl ü +ĠìĿ¼ ìĿ´ +Ġmaté ria +Ġ׼׾ ×ķ×ŀר +ãĥģãĥ£ ãĥ¼ +جÙħ اعة +ĠÑģво им +Ø¥ÙĤ اÙħØ© +ä¾ĭ ãģĪãģ° +س اب +Ø¢ خر +ÙĤ دÙĬر +×IJ×ŀ ×Ļ +ìĸ » +Ġ׳×ķס פת +ĠÐĴ лад +ĠÐĴлад им +ĠÐĴладим иÑĢ +Ġest ará +ãģĵãģĨ ãģĦãģĨ +ãĤĴ 使ç͍ +มา à¸ķร +มาà¸ķร à¸IJาà¸Ļ +ãģ£ãģ ½ +Ġn ú +Ġnú i +ย าà¸ĩ +ĠاÙĦج ÙĨس +Ġüst ün +ëľ » +ãĤ» ãĥ« +ãģ¦ãģĦ ãģįãģ¾ãģĻ +Ġ×Ĺ ×ķ×ĸ +Ġ×Ĺ×ķ×ĸ ר +ĠÐĵ лав +à¹Ĥà¸Ĭ à¸Ħ +íı IJ +ÙĨت ظر +Ġ×Ĵ ×ij×Ļ +ع ÙĤب +int ér +intér êt +×ŀ פ×Ĵ +×ŀפ×Ĵ ש +Ġth ù +اÙģ Øª +Ġ×ŀש פ +Ġ×ŀשפ ×ĺ×Ļ +ĠÙħ ÙĪØ§ÙĤع +è¦ ļ +è¦ļ ãģĪ +×ĵ ×Ļף +à¹Ģรืà¹Īà¸Ńà¸ĩ ราว +ãģ¾ ãģĤ +Ġgh ế +иÑĢÑĥ ÑİÑĤ +à¸ģ ว +à¸ģว à¹īาà¸ĩ +Ġпов еÑĢ +ĠповеÑĢ Ñħ +ĠповеÑĢÑħ ноÑģÑĤ +׳ ×ĵר +Ġкон ÑĨе +Ġдолж на +Ġ×Ļש ×Ļר +acaģı z +ìĹ Ķ +Ġn ÃŃvel +Ġö r +Ġör nek +Ùĥ Ùģ +ĠФедеÑĢ Ð°ÑĨии +Ġ구 ìĦ± +หัว à¹ĥà¸Ī +ĠV áºŃy +м ед +мед и +меди ÑĨин +медиÑĨин Ñģк +از ÙĬ +×Ĵ×ij ×ķ׾ +ÑĦ ÑĢ +Ġzus ätzlich +à¸ģ à¸ģ +ĠاÙĦاÙĤتصاد ÙĬØ© +Ġh è +lu ÄŁun +ج Ùİ +à¹Ħà¸Ł ลà¹Į +ÄIJ T +ãģĿãģ® ä»ĸ +à¸Ĺิ à¹īà¸ĩ +ĠاÙĦØ£ ÙĪ +ر سÙħ +æ°Ĺ ãģ¥ +ìĿ´ ë©° +ÑĮ ев +ص Ø· +ĠاÙĦاست Ø« +ĠاÙĦاستث Ùħار +à¸Ńา à¸Ħาร +ĠÑĤоÑĩ но +ĠV ân +à¸Ń ร +à¸Ńร à¹Īà¸Ńย +ĠاÙĦس ÙĨØ© +Ġc Æ°á»Ľi +×Ļ×Ķ ×Ł +íį ¼ +話 ãģĹ +âĹ ĭ +ĠìķĬ ìĿĢ +ãĥ¡ ãĥ¼ãĤ +ãĥ¡ãĥ¼ãĤ « +ãĥ¡ãĥ¼ãĤ« ãĥ¼ +ĠÑĤеп ло +å½¼ ãĤī +Ġİ z +Ġİz mir +íĻ į +Ġr ượ +Ġrượ u +æĢĿãģĦ åĩº +ĠPh ạm +Ġchá u +צ×Ļ ×ķת +ĠìĿ¼ 본 +ìĤ¬ ëĬĶ +ĠÑģозд ан +Ġar acı +Ġ×¢ ר +Ġער ×Ļ׼×Ķ +ĠíķĺëĤĺëĭĺ ìĿĺ +dzi ÅĤ +à¸Ľà¸£à¸° à¸ĺาà¸Ļ +Ġser ÃŃa +ĠìŀĪ ëıĦë¡Ŀ +در ج +íķľëĭ¤ ëĬĶ +à¸Ńา à¸Ĺ +à¸Ńาà¸Ĺ ิà¸ķ +à¸Ńาà¸Ĺิà¸ķ ยà¹Į +ÑĤелÑĮ нÑĭй +ĠØ® دÙħات +×ŀ׳ ×ĺ +Ġl ược +ĠS Ãłi +ĠÙĪ Ø§Ø¶ +ĠÙĪØ§Ø¶ ØŃ +غ از +ĠdoÄŁ al +Ġ×ijש ×Ŀ +Ġд лин +ĠØ¥ طار +Ġ×ijס פר +ãĤĴ ä¸İ +ãĤĴä¸İ ãģĪ +Ġë²ķ ë¥ł +ĠÑĥ вели +ĠÑĥвели Ñĩи +ส à¹Ħà¸ķ +สà¹Ħà¸ķ ลà¹Į +à¹Ħ à¸ģล +×ij׊ף +ĠìĿ´ íĽĦ +Ġm unic +Ġmunic ÃŃpio +تÙħ Ø«ÙĦ +ĠÄij áo +H ôtel +Ġl á»Ńa +ĠÄij ẳng +Ñĩ ки +Ø´ رÙĪ +شرÙĪ Ø· +ĠìĿ´ 를 +ÙĬ Ùĭا +×ŀ׾ ×ļ +×ŀ×Ķ ×Ļר×ķת +ĠобÑıз аÑĤелÑĮ +ĠобÑıзаÑĤелÑĮ но +é nergie +Ġmud ança +Ġm ụ +Ġmụ n +Ġn º +ĠاÙĦت عا +ĠاÙĦتعا ÙĪÙĨ +ĠاÙĦاجتÙħاع ÙĬØ© +Ġп лаÑģÑĤ +Ġëĵ± ìĿĺ +ãĥIJãĤ¤ ãĤ¯ +Ùĩج ÙĪÙħ +ĠSa úde +Ġì¤ijìļĶ íķľ +Ġ×Ķצ ×Ļ×ij×ķר +תק ף +ĠاÙĦعاÙĦÙħ ÙĬ +ĠболÑĮÑĪ Ð¾Ð¹ +ĠÙĥ ÙĦÙħ +ĠÙĥÙĦÙħ Ø© +ãģ®ãģ§ãģ¯ãģªãģĦ ãģ§ãģĹãĤĩãģĨãģĭ +ĠÙħ باراة +Ġש×IJ ׳ +Ġש×IJ׳ ×Ĺ׳×ķ +ãĤ¹ãĤ¿ ãĤ¤ãĥ« +ĠSa ÄŁ +ĠSaÄŁ lık +Ġh ư +׳ ×Ĺ×Ķ +Ġ×ij קר×ij +Ø· عÙħ +ห ิà¸Ļ +à¸Ĺุà¸ģ วัà¸Ļ +à¸Ħรัà¹īà¸ĩ à¸Ĺีà¹Ī +ĠlÃł nh +Ġdonn é +ãģĽ ãģĦ +جز ÙĬرة +доÑĢ Ð¾Ð¶ +ì¼ ľ +تÙĨظ ÙĬÙģ +ãĥģ ãĥ§ +Ġald ıģı +ج اج +ĠÑĤ омÑĥ +à¸Ľ ิ +Ġ×ijר שת +ãģıãģªãĤĬ ãģ¾ãģĻ +ĠпÑĢин ÑĨип +Ġ׊׾×ķ +ëı ¼ +×ķ×Ĵ ש +س س +à¸Ľ ู +Ġh ầu +æĦŁãģĺ ãĤĭ +ï¼ ´ +د ÙĪØ§ +ĠÑģм ог +scri ção +Ġth áºŃn +Ġר ×ķ×IJ×Ķ +обÑĢаж ен +ĠاÙĦتج ارÙĬØ© +Ø· بÙĬع +jÄħc Äħ +íĸī ìľĦ +Ġнов Ñĭй +Ġ×ŀ ×Ĺ×ĵש +æĮ¯ ãĤĬ +gu é +Ġ×IJ ×Ļר×ķ×¢ +Ġ×IJ×Ļר×ķ×¢ ×Ļ×Ŀ +ĠاÙĦ ذÙĩب +×ĵ ×IJ +ت اÙĨ +ãģł ãģĹ +à¸Ńั à¸ķรา +à¹Ĥ à¸Ī +بÙĦ اد +×Ķ×Ļ ×Ļ׳×ķ +ĠÑģп е +ĠÑģпе ÑĨиалÑĮно +ĠÅĽwi ata +ãĤĵãģ§ãģĻ ãĤĪ +شر ÙĥØ© +ĠpÅĤ yt +Ġsitu é +Ġ׼×IJ ׾×Ķ +ס ×ijר +Ġkaż d +Ġkażd ym +ãĤĴæĮģ ãģ¤ +׾×Ķ ×ľ +׾×Ķ׾ ף +ĠwÅĤ as +ĠwÅĤas ne +ĠsaÄŁ lan +×ŀ×¢ ׾×Ķ +ĠاÙĦا ÙĪÙĦ +ìĹIJìĦľ ëıĦ +×IJ×Ļר ×ķפ×Ķ +تÙĤ ÙĨÙĬØ© +Ùħ ائ +Ùħائ Ø© +Ġcompañ ÃŃa +Ġsü rek +Ġsürek li +ĠиÑģ кÑĥÑģ +ĠиÑģкÑĥÑģ ÑģÑĤв +ĠB ürger +ת ×Ĺר +ת×Ĺר ×ķת +à¸ŀรà¹īà¸Ńม à¸ģัà¸ļ +Ø´ Ùħ +à¸ĸืà¸Ń วà¹Īา +è¾¼ ãĤĢ +ä¼ij ãģ¿ +ĠاÙĦØ£ ب +ĠÑģÑĤоим оÑģÑĤÑĮ +ĠпÑĢав а +may ın +ห วย +ĠاÙĦØ· بÙĬعÙĬ +à¸Ĺีà¹Ī à¸ŀัà¸ģ +ĠEst á +Ñĭва ÑİÑĤ +ب سÙĬ +بسÙĬ Ø· +Ġ×ij×¢ ×ijר +åı¯èĥ½ ãģ§ãģĻ +Ġ×ĵ ×ķ׾ +Ġ×ĵ×ķ׾ ר +Ùĩ ÙİØ§ +воÑĢ Ð¾ÑĤ +ãģ¦ ãģĦãģ¾ãģĹãģŁ +à¹Ĥà¸Ĺร ศ +à¹Ĥà¸Ĺรศ ั +à¹Ĥà¸Ĺรศั à¸ŀ +à¹Ĥà¸Ĺรศัà¸ŀ à¸Ĺà¹Į +Ġ×§ ׳ +ĠاÙĦØ« ÙĨ +ĠاÙĦØ«ÙĨ ائÙĬØ© +Ġco ût +à¸ķิà¸Ķ à¸ķัà¹īà¸ĩ +Ġö rg +Ġörg üt +ĠاÙĦØ® ÙĦÙĬ +ĠاÙĦØ®ÙĦÙĬ ج +Ġb á»įn +×ķ׾×ķ×Ĵ ×Ļ +ëŀ ľ +ĠÐij олÑĮ +ĠÐijолÑĮ ÑĪ +×Ĵ ×ijר×Ļ×Ŀ +ÙĤ ÙĬد +×ij×Ļ×ĺ ×ķ×Ļ +æīĵ ãģ¡ +Ġol muÅŁ +f äh +fäh ig +ล าà¸Ļ +ĠÙĤ طر +ש פ×Ķ +èªŃ ãĤĵãģ§ +à¸Ĥ วา +Ġchi ếm +ãĤ¤ãĥ³ ãĤ¿ +ãĤ¤ãĥ³ãĤ¿ ãĥ¼ãĥ +ãĤ¤ãĥ³ãĤ¿ãĥ¼ãĥ į +ãĤ¤ãĥ³ãĤ¿ãĥ¼ãĥį ãĥĥãĥĪ +Ġ׾ש×ŀ ×ķר +Ġت رÙĥ +ĠترÙĥ ÙĬا +ר ×ķ×ĺ +ã썿ĢĿ ãģĦãģ¾ãģĹãģŁ +ĠاÙĦت ÙĤ +Ġd ư +ãģ¦ãģıãĤĮ ãĤĭ +ãģĹãģŁ ãģĵãģ¨ +Ġróż ne +ĠاÙĦØ· ÙģÙĦ +ĠPost é +Ġ×ŀש ×ķ×Ŀ +Ñį ÑĢ +ĠÑĢабоÑĤ аеÑĤ +ãĤ· ãĥª +ãĤ·ãĥª ãĥ¼ãĤº +Ġ×ij×Ķ ×Ĺ׾×ĺ +×§×Ķ ×Ļ׾×Ķ +ãĤ« ãĥ¡ +ãĤ«ãĥ¡ ãĥ© +ï¼ ¯ +ĠìĤ¬ ìĿ´ +Ġk ì +Ġth Æ°á»Ľc +ض بط +ÙĤب ÙĪÙĦ +åĪ¥ ãģ® +Ġparticul ière +ĠÑģво ем +Ġ×¢ סק +Ġעסק ×Ļ×Ŀ +×ij×Ĺ ×Ļר×ķת +×ij ×Ļ׳×ķ +à¸ĭ à¸Ń +Ġ×¢ ×ķ×ijר +ãģłãģ£ãģŁ ãģ®ãģ§ +ıld ıģı +Ùħ دار +Ùħدار س +주 ìĭľ +à¸Ńา ศ +à¸Ńาศ ัย +Ġt ấm +à¸ŀิ à¸Ī +à¸ŀิà¸Ī าร +à¸ŀิà¸Īาร à¸ĵา +ÑĤелÑĮ нÑĭе +Ñģк ÑĥÑİ +Ðľ Ðĺ +à¹Ģà¸ģ า +à¹Ģà¸ģา หล +à¹Ģà¸ģาหล ี +×ĵ ×Ĺ +à¹Ģà¸Ĭ ิà¸ĩ +Ġد ÙĤÙĬÙĤØ© +íķĻ ìĥĿ +Ġש×IJ ׾×Ķ +Ġcontr ôle +Ġsit uação +à¸Ĥà¸Ńà¸ĩ à¸ľà¸¹à¹ī +ÙĨ Ø·ÙĤ +ê³¼ íķĻ +หลาย à¸Ħà¸Ļ +Ġn ắng +ÙĤ Ùı +ì¡° ê±´ +Ñ ķ +ãĥĥ ãģ¨ +×ŀ ×Ļ׾×Ķ +Gr ün +×Ļ ×Ļ×¢ +×Ļ×Ļ×¢ ×ķ×¥ +×ŀ׳ ׼ +ë ŃIJ +×ŀ×¢ ×ŀ×ĵ +สำ à¸Ļัà¸ģ +ج دد +à¸Ħ ัà¸Ķ +Ġ×Ķ×ŀש פ +Ġ×Ķ×ŀשפ ×Ĺ×Ķ +×ŀש ק׾ +ÙĦ Ùı +Ġty tu +Ġtytu ÅĤ +ÑĪ ÐµÐ¹ +ĠìĿ¼ ë¶Ģ +ÑĪ ÐµÐ½Ð¸Ðµ +Ġph óng +ĠìĹŃ ìĤ¬ +ãĤ« ãĥ³ +Ġtú i +ĠÙĨ ÙĪÙģ +ĠÙĨÙĪÙģ Ùħبر +gr ün +ĠاÙĦØ´ ÙħاÙĦ +ÅĽwi adc +ÅĽwiadc zenie +ער ×Ķ +Ġ×¢ ×ķ×ij +Ġ×¢×ķ×ij ×ĵ×Ļ×Ŀ +×ĵ×ķ×Ĵ ×ŀ×IJ +ä»Ĭ ãģ¯ +Ġv ão +ĠТ ем +Ñģ илÑĮ +Ġch ợ +Ùħ را +Ùħرا ÙĤب +à¹Ħมà¹Ī รูà¹ī +Ġر ائع +×IJ׳ ×Ĺ׳×ķ +สà¹Īà¸ĩ à¹Ģสริม +צ ×Ĺ +ĠìŀĪìĸ´ ìĦľ +Ġkur ulu +Ġkurulu ÅŁ +ĠÃĸ zellik +ĠÃĸzellik le +Ġת ×Ļ×§ +Ġgh é +Ġspr zÄĻ +ĠsprzÄĻ t +ער ×ķת +را ØŃØ© +ãģ£ ãģį +ãģ£ãģį ãĤĬ +ĠìķĦ ëŀĺ +stit uição +Ġдолж но +×Ķ ×¨×© +×Ķרש ×ŀ×Ķ +×Ķ׾ ×ļ +ãģ¡ ãģª +ãģ¡ãģª ãģ¿ +ãģ¡ãģªãģ¿ ãģ« +פ ×Ĺ×ĵ +ĠاÙĦج ÙħÙĬع +×ij×¢ ׾×Ļ +Ġtr ùng +Ġפ ת×Ĺ +×ŀ׾×Ĺ ×ŀת +ãĥĨ ãĥ¼ãĥ +ãĥĨãĥ¼ãĥ ŀ +Ùħ تاب +Ùħتاب عة +Ġ모 ìĬµ +ÙĬ ص +åIJĪ ãģĨ +ĠY ap +ĠYap ı +ĠÑģ казаÑĤÑĮ +ëª ° +à¸Ĺีà¹Ī สำà¸Ħัà¸į +ĠìĹĨ ìĬµëĭĪëĭ¤ +Ġnh ắc +Ġülk eler +Ġмног ие +íķĺ ìħ¨ +มาà¸ģ à¸Ĺีà¹Īสุà¸Ķ +à¸ģ à¹īา +à¸ģà¹īา ว +Ġİ yi +л еж +леж а +ãĤ¸ ãĥ§ +à¸Ĺั à¸ŀ +ا ÙĪØ± +Ġ×Ĺ×ijר ×Ļ +Ġ׾ ש×Ŀ +ì² « +ĠT á»Ń +×ŀ ×ķ׳×Ļ +ÙĤ ÙĪØ¯ +à¸ģระ à¹Ģà¸Ľ +à¸ģระà¹Ģà¸Ľ à¹ĭ +à¸ģระà¹Ģà¸Ľà¹ĭ า +ĠпÑĢоблем Ñĭ +Ġaç ıs +Ġaçıs ından +Ġ×Ķ×ŀ ׼ +ĠÙħع ظÙħ +ÙĤÙĬ اس +ĠпÑĢод олж +ĠпÑĢодолж а +Ġver diÄŁi +ĠпÑĢед меÑĤ +ãģĦãģ¾ãģĻ ãģĮ +ĠëͰ 른 +ĠاÙĦ ÙĤÙĬاÙħ +ĠØ¥ÙĦÙĬ Ùĩا +Т ÐIJ +п оз +ãĤ· ãĥ¥ +ä¸ĬãģĮ ãĤĬ +à¹Ģà¸Ķิม à¸ŀัà¸Ļ +à¸ģุ ล +ØŃر ÙĬØ© +×§×ij×ķצ ×ķת +ë¯ ¿ +ĠاÙĦÙħ ÙĨا +ĠاÙĦÙħÙĨا Ø·ÙĤ +ĠвÑĭп ол +ĠвÑĭпол нÑı +ãĥĭ ãĤ¢ +Ġê²° êµŃ +×Ĺ ×ķ×ŀ +×Ĺ×ķ×ŀ ר×Ļ×Ŀ +ĠУкÑĢа инÑĭ +ห à¸Ńม +ר ×Ļס +ĠÑħоÑĤ ел +ĠобÑĢаз ованиÑı +Ġkh ẳng +Ġm ưa +Ġgör me +Ġgüç lü +سع Ùī +มัà¹Īà¸Ļ à¹ĥà¸Ī +íķĺ ê²łìĬµëĭĪëĭ¤ +Ġпол Ñĥ +Ġfün f +ã썿ĢĿ ãģ£ãģ¦ãģĦãģ¾ãģĻ +Ġê·¸ê²ĥ ìĿĢ +ĠdÃ¼ÅŁÃ¼n ce +ìŀ ł +ĠH Æ°á»Ľng +ĠTi á»ĥu +Ġç ift +ãģij ãģ° +à¸Īà¸Ļ à¸ĸึà¸ĩ +à¸Ĺำ à¹Ħà¸Ķà¹ī +ĠìŀIJ ì²´ +Ġd õ +Ġdõ i +à¸Ī ัà¸Ļ +à¸Īัà¸Ļ à¸Ĺ +à¸Īัà¸Ļà¸Ĺ รà¹Į +ece ÄŁini +׳×ķ×¢ ר +غ ار +ĠاÙĦØ£ÙħرÙĬ ÙĥÙĬ +داع Ø´ +ĠбезопаÑģ ноÑģÑĤи +Ġб Ñİ +ĠбÑİ Ð´Ð¶ +ĠбÑİдж еÑĤ +ãĥĬ ãĤ¤ +à¸ŀà¸ļ วà¹Īา +da ÄŁ +×IJ ×ķפף +íĹ Į +ãĥĢãĤ¤ ãĤ¨ +ãĥĢãĤ¤ãĤ¨ ãĥĥãĥĪ +ĠëĮĢ íĨµ +ĠëĮĢíĨµ ëł¹ +D İ +Ø£ ØŃداث +ĠA ÄŁ +ĠAÄŁ ust +ĠAÄŁust os +ØŃÙĦ ÙĪÙĦ +Ġw ÅĽ +ĠwÅĽ ród +ĠÑģо оÑĤвеÑĤ +ĠÑģооÑĤвеÑĤ ÑģÑĤв +ĠÑģооÑĤвеÑĤÑģÑĤв ии +ĠLu áºŃt +Ġ׼׾ פ×Ļ +Ġв еÑī +ĠвеÑī еÑģÑĤв +×§ ×Ļ×¥ +ĠبÙĩ ذا +عا Ø´ +à¹Ģà¸Ľà¹ĩà¸Ļ à¹Ģรืà¹Īà¸Ńà¸ĩ +Т Ðķ +Ġ×ij×IJ ×Ļ׳×ĺר׳×ĺ +س عد +Ġ×Ķ×ĺ ×Ļפ×ķ׾ +פ ×Ļס +à¸ĩà¹Īาย à¹Ĩ +ĠGer ät +׾ ×Ļ×ĵ×Ķ +ĠÑĢ Ð¸Ñģк +׾ק ×Ĺ +н наÑı +ר ×Ļ×ĵ +п ÑĢакÑĤи +пÑĢакÑĤи к +à¸Ĥัà¹īà¸Ļ à¸ķà¸Ńà¸Ļ +à¸Ļà¹Īา รัà¸ģ +larınız ı +à¸Ńà¸Ļุ à¸įา +à¸Ńà¸Ļุà¸įา à¸ķ +ĠzdjÄĻ cia +Ġb ây +Ñģ ÑĢ +ÑģÑĢ Ð¾Ñĩ +ãĥĭ ãĥ³ãĤ° +Ġö ner +Ġöner i +Ġнов ÑĭÑħ +دع ÙĪØ© +Ġg ắn +ĠاÙĦÙĦ بÙĨ +ĠاÙĦÙĦبÙĨ اÙĨÙĬ +ãĥĨãĤ£ ãĥ¼ +Ġص ØŃÙĬØŃ +ем ÑĭÑħ +çĸ² ãĤĮ +ĠпÑĢо иÑģ +ĠпÑĢоиÑģ ÑħодиÑĤ +ส à¸ķิ +ĠT ết +Ġ×Ķ׾ ׾×ķ +à¹Ģรืà¹Īà¸Ńà¸ĩ à¸Ļีà¹ī +×ŀ×ij ׳×Ķ +Ġconte údo +Ġا خت +Ġاخت ÙĬار +Ùħ سÙĦ +ÙħسÙĦ سÙĦ +ëı Ī +Ġ׾ ×Ļ×ĵ +à¸ŀิ à¸ĺี +ĠÑģов Ñģ +ĠÑģовÑģ ем +ãģĮãģĤãĤĬ ãģ¾ãģĹãģŁ +Ġsó ng +Ø¥ صÙĦاØŃ +ë§ ģ +Ùģ ÙĬر +ĠJe żeli +ìłľ ëıĦ +d ÅĤug +ìĥģ ìĿĦ +Ġc áºŃn +Ġhá»į p +Ø£ ست +أست اذ +Ġ×ŀ ×Ļש×Ķ +Ġ×ŀ×Ļש×Ķ ×ķ +Ġd Ãły +Ġch Ãłng +ãģ¡ãĤĥãĤĵ ãģ¨ +ĠÄij ám +Ġsw ój +Ġpoder á +ĠоÑĤлиÑĩ а +Ġpéri ode +ünd ig +×ĺ×¢ ף +ÑģÑĤÑĢо иÑĤелÑĮ +ר ת×Ļ +Ġ×Ļ×Ķ ×Ļ×ķ +׾ ס +ĠاÙĦÙħÙĨ زÙĦ +à¸Ļิ à¹īว +иÑĦ ика +иÑĦика ÑĨи +ðŁĺ ī +Ġad ına +ãĢĤãĢĤ ãĢĤ +×IJ ×Ļף +ס ×Ļר +ĠÙĬ عد +çŃĶ ãģĪ +اÙĦ جز +اÙĦجز ائر +енÑĮ к +ร ห +รห ัส +ĠTürk çe +ê¾ ¸ +Ġ×Ļ ×ķ׼׾ +Ġש ×ķ׳×Ķ +Ġ×ij×ŀ צ×ij +ĠдейÑģÑĤв иÑĤелÑĮно +ĠبأÙĨ Ùĩ +×ŀ×§ ×ĵ +Ġ×Ķש ×§ +Ø®ÙĬ ارات +Ġf ı +Ġfı rs +Ġfırs at +ëij ĺ +ĠìĦľ ìļ¸ +Ġ×Ķ×Ĵ ×ķ×£ +ر عا +رعا ÙĬØ© +ĠK ết +к Ñģи +ĠÑĥÑģлÑĥг и +ноÑģÑĤ ей +ìļ´ ëıĻ +ĠобÑĬ Ñı +ĠобÑĬÑı вл +н еж +×Ķפ ×ļ +Ġ×ij×¢ ×Ļ׳×Ļ +ëĨ Ĵ +ĠпÑĢоÑĨ ед +ĠпÑĢоÑĨед ÑĥÑĢ +Ġiht iy +Ġihtiy acı +Ġë°Ķ ëŀį +Ġë°Ķëŀį ëĭĪëĭ¤ +à¸ģล ัว +ĠÑģл ожно +×§×Ļ ×Ļ×ŀת +ĠÄIJ ình +ĠÙħ ÙĦÙģ +Ġà¹Ĥà¸Ķย มี +Ġkat kı +تØŃ ÙĪÙĬÙĦ +à¹Ħ à¸ŀ +ĠH á»į +ñ e +Ġдо Ñħод +Ġtho ải +íķĺìŬ ìķ¼ +ãĤ¹ãĥĿ ãĥ¼ãĥ +ãĤ¹ãĥĿãĥ¼ãĥ Ħ +ĠG òn +Ġk è +Ġkè m +é̲ ãĤģ +ãĤ¹ ãĥ¼ãĥ +ãĤ¹ãĥ¼ãĥ ij +ãĤ¹ãĥ¼ãĥij ãĥ¼ +ĠgiÃł u +ĠØ¥ عادة +Ġ׾ ×ķ×§ +Ġ׾×ķ×§ ×Ĺ +ĠÑħоÑĩ еÑĤ +×ĺ ׾×ķ×ķ +×ĺ׾×ķ×ķ ×Ļ×ĸ +×ĺ׾×ķ×ķ×Ļ×ĸ ×Ļ×Ķ +Ġth uyết +ãģĿãĤĮ ãģ§ +Ġvard ı +à¹Ħร à¹ī +ع بد +ĠRep ública +ãĥ¼ãĤ¿ ãĥ¼ +Ġ×ŀ×IJ ×ķת +à¹Ħà¸Ľ à¹ģลà¹īว +Ġyapıl acak +ãĤ¹ãĤ¿ ãĥ¼ãĥĪ +ãģ» ãģ¼ +Ġko ÅŁ +ĠмаÑĤ еÑĢи +Ġsiè cle +ĠاÙĦÙħ ختÙĦÙģ +ĠاÙĦÙħختÙĦÙģ Ø© +Ġ׾ק ר×IJ +Ġ׾קר×IJ ת +Ġ×Ķפ ×ķ×¢×ľ +Ġt òa +Ġr Æ¡i +åij¨ ãĤĬ +à¸Ŀ à¸Ļ +j ÅĽÄĩ +ĠìķĬ ìĿĦ +اÙĨت ÙĤاÙĦ +ëĸ ł +ив аеÑĤ +ãĥĪ ãĥ« +ĠاÙĦÙģÙĦسطÙĬÙĨ ÙĬØ© +à¸ģลà¹Īาว วà¹Īา +ا Ùĥت +ĠÃĸ l +ĠÑĢе ÑĪи +ĠÑĢеÑĪи л +Ġ׳×ķס פ×ķת +Ġìłķ ì¹ĺ +вл еÑĩен +Ùħر ØŃÙĦØ© +Ġcome ça +Ġy ık +ìĤ ´ +à¸ĺ à¸Ļา +à¸ĺà¸Ļา à¸Ħาร +à¸Ńà¸Ļ า +à¸Ńà¸Ļา à¸Ħ +à¸Ńà¸Ļาà¸Ħ à¸ķ +Ġpeque ña +ä»ķ äºĭãĤĴ +Ġب ذÙĦÙĥ +Ġнов ого +ãģĹãģ¦ ãģĦãģªãģĦ +ĠاÙĦÙħ ÙĬاÙĩ +à¸ģà¹ĩ à¹Ģà¸Ľà¹ĩà¸Ļ +Ġж ÑĥÑĢ +ĠжÑĥÑĢ Ð½Ð°Ð» +в еÑģ +خت ار +Ġ매 ìļ° +ĠM ã +ĠавÑĤомаÑĤ Ñĭ +ضع Ùģ +ĠاÙĦÙģ Ùĥر +ãģ§ãģĻ ãģ®ãģ§ +ãĥ¡ãĥ³ ãĥIJãĥ¼ +Ġк ÑĢÑĥг +ĠاÙĦسÙĦ طة +à¸Ħรัà¹īà¸ĩ à¹ģรà¸ģ +à¸ģระà¸Ĺ รว +à¸ģระà¸Ĺรว à¸ĩ +ÑĨ ов +éķ· ãģĦ +大ãģį ãģĦ +Ġgeç miÅŁ +ìĦ± ìĿ´ +Ġצר ×Ļ׼×Ķ +Ġм оÑī +ĠмоÑī н +Ġ×§ ×Ļש +Ġ×§×Ļש ×ķר×Ļ×Ŀ +ĠNas ıl +г ÑĢан +Ġ×ŀ ×ķצר×Ļ×Ŀ +Ġ×ŀס ×ķ×Ĵ +Ġy ür +Ġyür üt +Ġ׾׊צ×ķ +×ķÖ ¼ +ĠìŀĪ ìĹĪëĭ¤ +Ġter ör +ĠTh ương +ĠÙĪ ÙĬÙħ +ĠÙĪÙĬÙħ ÙĥÙĨ +ج ÙĪÙĨ +ĠÙĪØºÙĬر Ùĩا +×ŀ פ×ķ +×Ĵ×ķר ×ŀ×Ļ×Ŀ +׼×ij ×Ļש +ĠاÙĦÙĦ غ +ĠاÙĦÙĦغ Ø© +شر Ùĥ +ĠاÙĦر اب +ĠاÙĦراب ع +ĠпÑĢ ÐµÐº +ĠпÑĢек ÑĢаÑģ +ĠпÑĢекÑĢаÑģ н +Ġenerg ÃŃa +×§×ĵ ×ŀ×Ļ +ãģıãģª ãģ£ãģŁ +ĠÄij ứ +ĠÄijứ a +Serv i +Servi ço +Ġkald ır +åĥį ãģį +Ġод еж +Ġодеж д +물 ìĿĦ +ãģĿãģĨ ãģ§ +ãģĮãģĤ ãĤĮãģ° +ìĻ ķ +צ×ĵ ×§ +Ġart ır +Ġile ti +Ġileti ÅŁim +ãĤĪãģĨ ãģ§ +ãĥĪ ãĥ¼ +ãĤ¢ ãĥĭ +ãĤ¢ãĥĭ ãĥ¡ +×ĺ×Ļ ×Ļ׾ +ãĥķ ãĥªãĥ¼ +ãĥĿ ãĥ³ +ÐŁÑĢ Ð¾ +Ġع اÙĦÙĬØ© +ĠÃ¶ÄŁ ret +ĠÃ¶ÄŁret men +ĠкаÑĩеÑģÑĤв а +Ġ×Ķ×ĺ ×ij×¢ +Ġзна Ñİ +ãģ¦ ãģıãĤĭ +Ġm ừng +ÙħÙĪ Øª +ש ×ķ×ŀר +×Ĺ׾ ×ij +Ġwzgl ÄĻ +ĠwzglÄĻ du +ë²Ī 째 +Ġtá» ĵ +Ġtá»ĵ n +ãĥ¯ãĥ¼ ãĤ¯ +Ġpo życz +Ġpożycz k +×Ļ ×ķצר×Ļ×Ŀ +Ùĥر Ùħ +Ġг аÑĢ +ĠгаÑĢ Ð°Ð½ +ĠгаÑĢан ÑĤи +ล à¹īาà¸ĩ +Ġìĺģ íĻĶ +×ĺ ×Ļס +Ġth ẻ +ĠìŀĪëĭ¤ ê³ł +اÙĦت ز +اÙĦتز اÙħ +Ġна ÑĪи +is ée +ãģĵãĤĮ ãĤĴ +Ġm ẽ +ض ÙĦ +بÙĪ Øª +Ġ׼ ׼×Ķ +h ợ +ĠاÙĦس ÙĪØ±ÙĬØ© +Ġ×ľ×¢ ×ķ×ŀ +Ġ×ľ×¢×ķ×ŀ ת +ĠbaÅŁ ar +ĠbaÅŁar ılı +е ÑģÑĤÑĮ +à¸Ħร ี +à¸Ħรี ม +ĠìłĦ ì²´ +ĠسÙĬ ÙĥÙĪÙĨ +Ġ×ŀ×ĵ ×ķ×¢ +ĠëķĮ문 ìĿ´ëĭ¤ +Ġc ứng +ger ät +Ġм иÑĢ +ĠмиÑĢ Ðµ +ĠÙĥÙĬÙģ ÙĬØ© +Ġפר ×ĺ×Ļ×Ŀ +Ġgo ÅĽci +иÑĤ еÑģÑĮ +ÑĥÑĪ ÐºÐ¸ +ؤ ÙħÙĨ +Ġ×IJ ׼ף +ĠاÙĦر جÙĦ +Ġl á»įc +à¹Ģรีย à¸ģวà¹Īา +ãģĵãģ® ãĤĪãģĨãģª +ë§Į íģ¼ +Ġп еÑĩ +ÙĪÙĦ ات +ĠÃľ ye +liÄŁ inde +à¸Ħะ à¹ģà¸Ļ +à¸Ħะà¹ģà¸Ļ à¸Ļ +ãĤĭãģĵãģ¨ ãģ¯ +วิ à¹Ģà¸Ħร +วิà¹Ģà¸Ħร าะ +วิà¹Ģà¸Ħราะ หà¹Į +Ġвозмож ноÑģÑĤи +ĠاÙĦÙĨ ساء +ãĥīãĥ© ãĥŀ +Ġgü c +Ġgüc ü +Ġt ưá»Ŀng +Ġacomp aña +ãĤ¤ ãĥ© +×§ צ×ij +ĠY ö +ĠYö net +ĠYönet im +สัม à¸ľ +à¸ªà¸±à¸¡à¸ľ ัส +à¸Ļ าม +ĠÄij ợi +à¹ģหà¹Īà¸ĩ à¸Ĭาà¸ķิ +ãģĿãĤĮ ãģ§ãĤĤ +ät ig +ת ×ķ×Ŀ +ĠbaÅŁ lat +ĠвÑģ ей +ת ×Ļ×§ +ת×Ļ×§ ×ķף +ĠNg ô +ĠGesch ä +ĠGeschä fts +Ø£ Ùħ +Ø£Ùħ راض +à¹Ģà¸Ĺ à¸Ħà¸Ļ +à¹Ģà¸Ĺà¸Ħà¸Ļ ิ +à¹Ģà¸Ĺà¸Ħà¸Ļิ à¸Ħ +Ġм енÑĮ +ĠменÑĮ ÑĪе +Ġöl ç +Ġölç ü +ĠÙĬ جعÙĦ +ĠÄij ỡ +ש ×Ļ׾ +ש×Ļ׾ ×ķ×ij +ĠGr Ã¶ÃŁe +ĠÙĩ اتÙģ +รà¹īาà¸Ļ à¸Ńาหาร +×Ķ׾ ×Ļ׼ +×Ķ׾×Ļ׼ ×Ļ +иÑĢÑĥ ÑİÑī +èĭ¥ ãģĦ +ĠÃĸ zel +ãģĦãģŁ ãĤī +à¸Ħำ à¸ĸาม +Ġzosta ÅĤy +Ġ×Ķס ×Ļפ×ķר +×Ķ ×ķ׾ +×Ķ×ķ׾ ×ļ +à¹Ģà¸Ĭà¹Īà¸Ļ à¸ģัà¸Ļ +à¹Ĥ à¸Ĩ +à¹Ĥà¸Ĩ ษ +à¹Ĥà¸Ĩษ à¸ĵา +×IJר צ×ķת +×Ĵר פ×Ļ +Ġao ût +ĠÙĬ رÙĬد +ت ÙĪØ¬ +تÙĪØ¬ ÙĬÙĩ +ĠÑįÑĤ ап +ãĤ¹ãĤ¿ ãĥ³ +Ġkr ó +Ġkró tk +ãĤĴ使 ãģĨ +ì ·¨ +éĸ¢ ãĤı +à¸Ķà¹īวย à¸Ħวาม +à¸Ļำ à¹Ģสà¸Ļà¸Ń +Ġa yrıca +à¸Ī à¹īาà¸ĩ +ĠÑĦоÑĤ огÑĢаÑĦ +Ġв еÑĩ +ĠвеÑĩ еÑĢ +åĩº ãģĹãģŁ +ĠÐ¥ о +Ġ×ŀ ר×Ĵ×Ļש +à¹ĥหà¹ī à¹Ģà¸Ľà¹ĩà¸Ļ +ãĤĴ 缮 +ãĤĴ缮 æĮĩ +׾ ×ŀ×Ļ×Ŀ +nÄħ ÅĤ +ĠÑģÑĤ анд +ĠÑģÑĤанд аÑĢÑĤ +ĠSü d +ĠT âm +اخت بار +à¹Ģà¸ģ à¸Ńรà¹Į +Ùħس رØŃ +Ġbi á»ĩn +ب Ùı +Ġص اÙĦ +ĠصاÙĦ ØŃ +ĠPh ụ +íľ ´ +ãĥ¬ãĥĵ ãĥ¥ãĥ¼ +Ġbụ ng +Ġrég ime +ĠØ£ Ø´Ùĩر +ĠÑĢабоÑĤ ник +à¸Ŀ ัà¸Ļ +اع تÙħ +اعتÙħ اد +Ġзам еÑĤ +ãģ¾ ãģ£ãģ¦ +Ġch ặt +æĿ¥ ãĤĭ +ĠاÙĦÙĤ ÙĪØ§Øª +ãģ«åħ¥ ãģ£ãģ¦ +تØŃ اÙĦÙģ +Ùħ زÙĬد +ĠÙĬ صÙĦ +ìĹ ¼ +à¹Ģà¸Ĭ à¹ĩ +à¹Ģà¸Ĭà¹ĩ à¸Ħ +Ġk á»ĭ +Ġká»ĭ p +ĠìķĦ ì§ģ +×IJ׳ ×Ĵ +Ġобла ÑģÑĤÑĮ +Ġpomoc Äħ +Ġ×ķ ש׾ +ëĵł ì§Ģ +ĠGi ám +ĠSt ück +Ġchá y +ĠëĤĺ ìĺ¤ +ש ×Ļ×ĺת +×ŀ×ĵ ר +×ŀ×ĵר ×Ļ×ļ +Ġsüre ç +к ва +×ij׾ ×Ļ×Ŀ +×Ķ ×ª×Ļ +×Ķת×Ļ ×Ļ×Ĺס +ÙĤب اÙĦ +Ġס ×ķ×Ĵ +Ġס×ķ×Ĵ ×Ļ +ÑģÑĤ олÑĮ +ä½ķ ãĤĤ +×ĸ׼ ×ķר +è²· ãģĨ +å®ī ãģı +à¸Ħรัà¹īà¸ĩ à¸Ļีà¹ī +kö p +ĠÑģеÑĢ Ð²Ð¸Ñģ +оÑĩ нÑĭÑħ +ê±° ëŀĺ +تأ Ùĥ +تأÙĥ ÙĬد +×ĵ ׾ק +Ġпо Ñĩем +ĠпоÑĩем Ñĥ +пиÑģ аÑĤÑĮ +×ij שר +ĠH Ãłng +ĠT ìm +Ġtr ừ +ãĤ» ãĥĥãĤ¯ãĤ¹ +×ķ׳ ×Ĵ +mız da +п Ñģи +ĠìŀĪ ê¸° +Ġr út +ز اÙĨ +تÙĨ ÙĪØ¹ +ÙħÙĤ ا +ÙħÙĤا ÙĪÙħØ© +Ġ׾צ ×ķר×ļ +Ġ×ij ×Ļר×ķש׾×Ļ×Ŀ +ãĥ´ ãĤ£ +eb ile +ebile ceÄŁi +ãĥ¦ ãĥ¼ãĤ +ãĥ¦ãĥ¼ãĤ ¶ +ãĥ¦ãĥ¼ãĤ¶ ãĥ¼ +ãĤĴä½ľ ãĤĭ +Ñģ меÑĢ +ÑģмеÑĢ ÑĤ +Ġì§ ģ +Ġì§ģ ìłij +ĠÐŁ аÑĢ +ØŃ اض +ØŃاض ر +Ùħ ÙĥاÙģ +ÙħÙĥاÙģ ØŃØ© +ล ิà¸Ļ +ãģ¦ ãģįãģ¦ +ÑĢоÑģ л +ĠÄ°ÅŁ te +ÙĤص ÙĬر +Ġ×ij×Ĵ ×Ļ׾ +Ġ×ŀת ×IJ×Ļ×Ŀ +Ġ×Ķ ×Ĺ×ĵ +Ġ×Ķ×Ĺ×ĵ ש×Ķ +ר ×ķ×¢ +Ġprodukt ów +ĠÙħ صدر +не ÑĨ +ĠاÙĦعÙħÙĦ ات +Ġçık ma +Ġد بÙĬ +×§ ×Ļף +ת ×IJר +ת×IJר ×Ļ×ļ +׳×Ļ ×Ļ×ĵ +صر اع +l ève +צ ×Ļר +à¸Ķ ัà¸Ļ +à¹ĥหà¹ī à¹Ħà¸Ķà¹ī +ãĤ¿ãĤ¤ ãĥł +Ġgi ảng +С ÐŁ +ĠاÙĦÙħ ØŃÙĦ +ĠاÙĦÙħØŃÙĦ ÙĬØ© +ĠT ất +׾ ×ķ×ĺ +h á»ķ +Ġam éric +Ġaméric ain +Ġ×ijש׾ ×ij +Ġ׾×IJ ×ķ×ŀ×Ļ +Ġpe ça +ĠÑĢаз нÑĭÑħ +ãģĦãĤĭ ãģ¨ +ãĥĩ ãĥ³ +ס קר +Ġ×Ķ×ŀ×Ĺ ×Ļר +ãģ¨ãģĦãģĨ ãĤĤãģ® +رت بط +ĠиÑģÑĤ оÑĩ +ĠиÑģÑĤоÑĩ ник +สมัà¸Ħร สมาà¸Ĭิà¸ģ +Ġ à¸Ĺัà¹īà¸ĩ +Ġà¸Ĺัà¹īà¸ĩ à¸Ļีà¹ī +ĠT áºŃp +ãģ£ãģ¦ ãģĦãģĨ +ĠاÙĦÙĪ ØµÙĪÙĦ +Ġdéc ada +Ġо ÑĦоÑĢм +ĠоÑĦоÑĢм лен +สำหรัà¸ļ à¸ģาร +Ġog óln +ãģĨãģ¡ ãģ« +Ġvá rias +ãģĻãģİ ãĤĭ +ÙĪ Ùĩا +à¹Ĥà¸Ľà¸£ à¸Ķ +ĠÐłÐ¾ÑģÑģ иÑı +人 ãĢħ +ãģĹãģ¦ ãģįãģŁ +Ġsı rasında +Ġng ôn +س ÙĨØ© +تÙħ تع +×ŀ׼ ×ij×Ļ +Ġnh ấn +×¢ ×ŀ×Ļ×ĵ +á» ¨ +ж иÑĤÑĮ +ãĤī ãģĽ +gr áf +gráf ica +ĠÙĤ ÙĪÙĦ +ĠÙĤÙĪÙĦ Ùĩ +ëĭ¨ ì²´ +ห à¹īา +หà¹īา ม +使 ãģ£ãģ¦ +ת ×Ļ×ij +ת×Ļ×ij ת +i á»ĥu +à¹ģ à¸Ĭม +à¹ģà¸Ĭม à¸Ľ +à¹ģà¸Ĭà¸¡à¸Ľ à¹Į +Ạ¬ +ĠëĤĺ ëĿ¼ +ĠÙħباشر Ø© +Ġtr Äĥm +سÙĥ ÙĪ +ĠاÙĦذ Ùī +Ġbi ç +Ġbiç im +ت راجع +Ġоб еÑģп +ĠобеÑģп еÑĩ +ĠобеÑģпеÑĩ ива +Ġвозд ÑĥÑħ +Ñĭв аÑĤÑĮ +ÙĦ ØŃÙĤ +ĠMü dü +ĠMüdü rl +ĠMüdürl Ã¼ÄŁÃ¼ +Ġyapt ır +Ġפר ס +Ġפרס ×ķ×Ŀ +Ø· ÙĪØ± +ÑģÑĤв оваÑĤÑĮ +ìŀ¥ ìĿĦ +à¸Ĺีà¹Īà¸Ķี à¸Ĺีà¹Īสุà¸Ķ +à¸Ńั ล +ÑĢ Ñİ +Ùħست ÙĤبÙĦ +Ñģл ÑĥÑĪ +ÑģлÑĥÑĪ Ð° +èªį ãĤģ +Ġ׾ ×Ļ×ŀ +Ġ׾×Ļ×ŀ ×ķ×ĵ×Ļ +ת ש×ķ×ij +תש×ķ×ij ×ķת +ĠgerçekleÅŁtir il +ĠاÙĦ اتÙ쨧ÙĤ +ĠÑĥÑĢов не +ĠÑĤ ÑĢав +Ġ×Ķ×ŀ ×ķף +ØŃÙģ Ø§Ø¸ +ĠÙħ ÙIJ +ĠÙħÙIJ ÙĨ +ĠÙħÙIJÙĨ ÙĴ +Ġdem ás +×ŀ×ķ×ĸ ×Ļ×§×Ķ +ש ×Ļ×Ĺ×Ķ +Ġb ú +алÑĮ нÑĭм +ãĤı ãģŁ +ãĤıãģŁ ãģĹ +ĠاÙĦÙħÙĪ Ø§Ø¯ +ת ׼׳ +×ª×Ľ×ł ×ķף +ãĥŃ ãĥĥãĤ¯ +hi ếu +ĠÑĥ ме +ÙħØŃا ÙĪÙĦØ© +×IJ ×ķשר +Ġкон кÑĥÑĢ +ĠконкÑĥÑĢ Ñģ +Ġ×ŀ ×ij×Ĺ +Ġ×ŀ×ij×Ĺ ×Ļ×ł×ª +Ġan lam +Ġanlam ı +Ġli á»ĩt +Ġв Ñħод +ĠH ình +ĠÙĨ ÙĬ +ĠÙĨÙĬ ÙĪØ² +ãĤ¸ãĥ£ ãĥ¼ +×ij ×Ļ×¥ +ÑĤелÑĮ нÑĭÑħ +à¸Ĺุà¸ģ à¸Ńยà¹Īาà¸ĩ +ĠkiÅŁ inin +Ø£ Ùĥثر +ĠиÑģÑĤоÑĢ Ð¸Ð¸ +Ġë³Ģ íĻĶ +פ׾ ס×ĺ +×¤×ľ×¡×ĺ ×Ļ׳×Ļ +ĠÑģ еÑĤ +ĠÑģеÑĤ и +dıģ ımız +íķĺ ëıĦë¡Ŀ +×Ķ ×¨ +×Ķר ×ij×Ķ +ãģĻãĤĭãģĵãģ¨ ãģ¯ +Ġphi ếu +تØŃ سÙĬÙĨ +ĠÅĽ rod +ĠÅĽrod ow +ĠÅĽrodow isk +ĠÑĢаÑģ Ñħод +بر ÙĬد +Ġر ÙĬ +ĠرÙĬ اÙĦ +Ġ×ķ ׼×ļ +ì§Ģ ìļĶ +׼ ×ŀ×ķ +Ġ×¢×ľ ×Ļ×Ķ×Ŀ +f ÃŃcio +Ġkar arı +tıģ ını +ĠС ов +ĠСов еÑĤ +ãģĬéĩij ãĤĴ +м еждÑĥ +междÑĥ на +междÑĥна ÑĢод +междÑĥнаÑĢод н +Ġm á»Ŀi +ĠاÙĦØ¥ ÙĬر +ĠاÙĦØ¥ÙĬر اÙĨÙĬ +ĠاÙĦرÙĪ Ø³ÙĬ +ص ÙĨد +صÙĨد ÙĪÙĤ +ĠاÙĦØ¥ÙĨ ترÙĨت +Ġt ắm +ĠÑĤак ого +Ġ×ij ׾×ķ×Ĵ +Ġü crets +Ġücrets iz +×Ĺ×ĸ ×Ļר +ìĸ´ ìķ¼ +ĠPh ần +ï¼ ľ +Ġ×ĺ ×ij×¢ +Ġ×ĺ×ij×¢ ×Ļ +×IJ×ŀ ×IJ +اÙĤ ÙĦ +Ġcondi ções +ÙĤات ÙĦ +ĠÑĢезÑĥлÑĮÑĤаÑĤ е +ĠÑģво ими +צ×ij ×Ļ×¢ +gé ni +Ġz es +Ġzes po +Ġzespo ÅĤ +ÑĪ Ð¸Ð² +Ġפר×ĺ×Ļ ×ķת +Ùħست Ø´Ùģ +ÙħستشÙģ Ùī +شر ع +Ġko ÅĽci +Ġ×Ķ×IJ ×Ļ׳×ĺר׳×ĺ +ĠЧ еÑĢ +поÑĩ ÑĤ +Ġactiv ités +çŁ¥ ãģ£ãģ¦ +Ġ×ij ×ĸ×Ķ +Ġyüz den +ãģªãĤĬ ãģ¾ãģĽãĤĵ +Ġíĺ ¹ +Ġíĺ¹ ìĿĢ +Ġ×ŀש ׳×Ķ +ĠÐĴ еÑĢ +Ġ×ij×IJ×ķת ×ķ +éĿ¢ çϽ +éĿ¢çϽ ãģĦ +شر ØŃ +gr ünde +Ùģ Ø´ +Ù쨴 ÙĦ +Ġsé jour +ë´ IJ +Ġr ôle +Ø´ عار +ем Ñĭе +ĠاÙĦج سÙħ +алÑĮ ное +Ġìĥģ íĥľ +ï¼ ¤ +ë¯Ģ ë¡ľ +ĠÙĨ ÙĤØ· +ĠÙĨÙĤØ· Ø© +ãģĿãģĨ ãģł +ãģĻãĤĭ ãģ®ãģĮ +ห ู +Ġnh á»ĭ +Ġeconóm ica +ס×ĺ ×ķ×ĵ +ס×ĺ×ķ×ĵ ׳×ĺ +มี à¹Ĥà¸Ńà¸ģาส +Ġgest ão +รูà¹ī วà¹Īา +Ġlo ạt +ĠاÙĦÙħ Ùı +ĠاÙĦØŃ ÙħÙĦ +ĠاÙĦعÙħÙĦ ÙĬØ© +Ġê²ĥ ëıĦ +ĠÐľÐ¾Ñģк ва +×§×ĺ ×ķר +Ġпод ÑĢоб +ĠподÑĢоб н +Ġl ưng +ت Ù쨳 +تÙ쨳 ÙĬر +ĠاÙĦ بع +ĠاÙĦبع ض +ئ ت +Ðķ ÐĿ +ìŰ 구 +à¹ĥหà¹ī à¸Ħุà¸ĵ +ãģĤãĤĬ ãģ¾ãģĹãģŁ +Ġbir ka +Ġbirka ç +Ġİ sl +Ġİsl am +çĹĽ ãģ¿ +Ġh ảo +Ġм аÑı +ĠiÅŁ çi +ש × +×©× ģ +à¸ģาร à¹Ģมืà¸Ńà¸ĩ +×ķ×Ķ ×¨ +Ġch ó +ëĨ Ģ +Ġyan lı +Ġyanlı ÅŁ +幸 ãģĽ +×IJר×Ĵ ×ķ׳×Ļ +à¸Ńาà¸Ī าร +à¸Ńาà¸Īาร ยà¹Į +ĠинÑĦоÑĢм аÑĨиÑİ +Ðĵ Ðŀ +׳ ×Ĺש +ĠìķĮ ìķĦ +ĠÑħаÑĢакÑĤеÑĢ Ð¸ÑģÑĤ +ĠÑħаÑĢакÑĤеÑĢиÑģÑĤ ик +à¸Ħุà¸ĵ สามารà¸ĸ +è¦ĭ ãģĪãĤĭ +à¸Ĭัà¸Ķ à¹Ģà¸Ī +à¸Ĭัà¸Ķà¹Ģà¸Ī à¸Ļ +ĠdziaÅĤ al +ĠdziaÅĤal noÅĽci +à¹Ĥà¸ŀ สà¸ķà¹Į +ĠÐļ ол +ĠÙģ ÙĩÙĬ +Ġ×ŀ פ׳×Ļ +Ġ×Ķ×§ שר +Ùħر Ùĥ +ÙħرÙĥ ز +Ġho á +Ġа пп +Ġапп аÑĢаÑĤ +Ġp ami +Ġpami ÄĻ +ĠpamiÄĻ ta +Ġç ünkü +×ĵ ×ķף +ãģ¯ ãģĵãģ¡ãĤī +ĠM Ãł +ĠÙĬ ÙĤدÙħ +ĠпÑĢ ÐµÐ· +ĠпÑĢез иденÑĤ +à¸Ńุ à¸ķ +à¸Ńุà¸ķ สา +à¸Ńุà¸ķสา ห +à¸Ńุà¸ķสาห à¸ģรรม +ì§Ģ ìĽIJ +Ġ×IJפשר ×ķת +sch üt +schüt z +ĠTi ên +Ġsay ılı +ĠгÑĢÑĥпп Ñĭ +оÑĩ нÑĭй +Ġ×ľ×¢ ×ŀ×ķ×ĵ +Ġwr zeÅĽ +ĠwrzeÅĽ nia +ĠÄIJ ầu +à¹Ģà¸Ĥà¹īา รà¹Īวม +nız da +Ø®ÙĬ ص +Ġgü nc +Ġgünc el +ĠÙĦÙĩ ذÙĩ +ĠÙĬ عتبر +lé gi +ãĤı ãģĭãĤĭ +Ġr ừng +ظ Ùĩ +ظÙĩ ÙĪØ± +Ġ×ŀ×ij ×Ļף +Ġ기 íĥĢ +åĪĩ ãĤĮ +lan mÄ±ÅŁ +à¸Ĺีà¹Ī มีà¸Ħวาม +Ġh á»ģ +ت ÙĪØ¬Ùĩ +ĠاÙĦØ¥ دارة +Ġú til +ס פ×ķ +à¸Ħวาม รัà¸ģ +à¹Ĥ ฮ +Ġпол иÑĤ +ĠполиÑĤ ик +Ġsat ın +ĠÅŀ imdi +×ŀ ×ķר×Ļ×Ŀ +ìķĺ ëĭ¤ +×Ĺ ×ķ×ķ +×Ĺ×ķ×ķ ×Ļ×Ķ +à¸Ħà¸Ńม à¸ŀิ +à¸Ħà¸Ńมà¸ŀิ ว +à¸Ħà¸Ńมà¸ŀิว à¹Ģà¸ķà¸Ńรà¹Į +Ġا ذا +تخ اذ +ãĤ¨ ãĥ« +Ġpossibilit é +ยืà¸Ļ ยัà¸Ļ +Ġü nivers +Ġünivers ite +ĠاÙĦد ÙĪØ±ÙĬ +ĠìķĬëĬĶ ëĭ¤ +ĠìĦľ ë¡ľ +ØŃ اÙĦ +Ġë ¨ +Ġë¨ ¼ +Ġ먼 ìłĢ +à¸Ĺีà¹Ī à¸ĸูà¸ģ +ì§ ľ +Ġsk óry +лÑĮ ÑĨ +à¹ĥà¸Ĭà¹ī à¹Ģวลา +×ij×§ שת +Ġذ ÙĪ +æĹ¥ ãĢħ +ĠкоÑĤоÑĢ ÑĥÑİ +ĠÑĥÑĢов енÑĮ +ê¹ ¨ +à¹Ħ à¸Ĺ +ãĤµ ãĥĹãĥª +ãĤ¸ ãĥ§ãĥ³ +ãģĻ ãģ¹ãģį +ĠG ór +ãĥĪ ãĤ¤ +ãĥĪãĤ¤ ãĥ¬ +ĠyaÅŁ ama +Ġdá»ĭ p +Ġb ữa +à¸ĭ ุ +Ġöl üm +ãģ£ãģ¦ ãģıãĤĭ +à¸ģาร à¸Ħà¹īา +ש ער +ĠÑĤип а +Ġг еÑĢ +ĠгеÑĢ Ð¾ +רק ×¢ +Ġu waż +Ġuważ a +ש×ŀ ף +Ġhast alık +ãĤıãĤĮ ãĤĭ +ba ÅŁÄ± +Ñĩ ÑĤо +Ġ×ij ×ŀר׼×ĸ +Ġìļ°ë¦¬ ìĿĺ +ĠÙĥاÙĨ ÙĪØ§ +ĠØ£ بر +Ġأبر ÙĬÙĦ +ì¸ µ +à¹Ħà¸Ĥ à¹Ī +ĠÙĪ ÙĦÙĪ +à¸Ĺ ัว +à¸Ĺัว รà¹Į +ĠÙĪØ£ Ùĥد +à¸Ĭ วà¸Ļ +׾ ×ķ×§ +æį ¨ +æį¨ ãģ¦ +Ġİç in +p éri +Ġy al +Ġyal nız +ÑĮÑı н +Ġg ắng +à¸ģà¹ĩ ยัà¸ĩ +ĠУкÑĢа ин +ĠÑģ ами +ĠпÑĢовед ен +à¸ķà¸ģ à¹ģà¸ķà¹Īà¸ĩ +ĠQu ân +é paration +ĠbaÅŁ ında +Ġzn ale +Ġznale ź +Ġznaleź Äĩ +ãĤ± ãĥ¼ +ãĥİ ãĥ¼ +à¸ĸูà¸ģ à¸ķà¹īà¸Ńà¸ĩ +ëª ¸ +Ġëı Į +ĠëıĮ ìķĦ +ĠSch üler +Ġпод гоÑĤов +ĠподгоÑĤов к +ع رÙĪ +عرÙĪ Ø¶ +la ÅŁtır +ĠÑģоÑģÑĤав лÑıеÑĤ +ĠпÑĢоиз вод +ĠпÑĢоизвод ÑģÑĤва +ĠоÑģнов е +ĠØ´ ÙħاÙĦ +à¸ģร ี +ĠgörÃ¼ÅŁ me +оÑĩ ек +Ġ×Ĺ×ijר ×Ļ×Ŀ +ÙħØ® اط +Ùħخاط ر +ï¼ Ń +ר פ×IJ +ĠM ẹ +ยà¸Ńม รัà¸ļ +Ġv ết +Ø® ذ +ĠاÙĦت Ø· +ĠاÙĦتط بÙĬÙĤ +à¸Ļ ึà¸ģ +Ġ×Ķ ×Ľ×ł×¡×ª +ĠогÑĢ Ð°Ð½Ð¸ +ĠогÑĢани Ñĩен +ĠÃĩ alÄ±ÅŁ +ĠاÙĦÙħÙĨت دÙī +à¸Īำà¸Ļวà¸Ļ มาà¸ģ +ĠÑĤоÑĢ ÑĢ +ĠÑĤоÑĢÑĢ ÐµÐ½ÑĤ +ĠìĤ´ ìķĦ +à¸ŀลัà¸ĩ à¸ĩาà¸Ļ +à¸Ĭ ัà¸Ļ +ĠÐIJн дÑĢ +Ġréalis é +×ŀש ×IJ +à¹ģ à¸Ĭ +à¹ģà¸Ĭ รà¹Į +Ġб ог +มา à¹ģลà¹īว +ĠاÙĦÙĨ ار +Ġolmad ıģı +×ĵ ×¢×Ķ +ĠÑĥ веÑĢ +ĠÑĥвеÑĢ ÐµÐ½ +ãĤĭ ãĤĤãģ® +Ø£ د +أد ÙĪØ§Øª +Ġ×Ķ×ĸ ×ķ×Ĵ +Ø¥ عÙĦاÙħ +h á»ı +ĠNä he +ĠÑĤ еÑģÑĤ +Ġ×ŀ ×ķ׼ר +Ġë¬¸ìłľ ê°Ģ +ת ×ķצ×IJ×Ķ +m ó +mó vel +ĠاÙĦتج ارة +Ġмног иÑħ +обÑī а +Ġ×¢ סק×Ļ +ĠEdu cação +×§ ש×Ļ×Ŀ +é tabl +établ issement +Ġд еле +иÑĢÑĥ еÑĤÑģÑı +Ø¢ ثار +Ġ×Ķ×ŀ ר׼×ĸ×Ļ +ãĥIJ ãĥ« +ĠвÑģÑĤÑĢ ÐµÑĩ +ãģĴ ãĤĭ +Ġci Äħ +ĠciÄħ gu +ÙĬ ست +à¸łà¸² ว +à¸łà¸²à¸§ ะ +Ø£ Ùħر +Ġо жи +Ġожи да +Ġ á»§y +ãĥŀ ãĥ« +ر اس +оÑĩ ной +ת ×Ĵ×ķ×ij×ķת +تع رÙĬÙģ +ĠÑģо ÑĨиалÑĮно +ãĤĴ éĸĭ +ĠиÑģÑģлед ова +Ġd ú +Ġdú vida +Ġsk ÅĤ +ĠskÅĤ ada +Ġhä ufig +ĠвÑĭб ÑĢ +ĠвÑĭбÑĢ Ð°ÑĤÑĮ +ãģ®ãģ§ãģ¯ãģªãģĦ ãģĭ +ĠÑģ илÑĮно +ÑĤвеÑĢж ден +ר פ +רפ ×ķ×IJ×Ķ +æĢĿ ãģĦãģ¾ãģĻ +ØŃر ص +ש×ķת ×£ +Ùħس جد +à¹Ĥà¸Ĭ วà¹Į +ем ÑģÑı +в ÑĪие +Ġм л +Ġмл н +Ġ׾×Ķ ×ij×Ļ×IJ +ĠÙĬ تعÙĦÙĤ +à¸ķ ูà¹ī +Ġп ÑĢаз +ĠпÑĢаз д +ĠпÑĢазд ник +Ġн ем +Ġнем ного +Ġs Ãłng +تÙĨ سÙĬ +تÙĨسÙĬ ÙĤ +Ġtá» Ŀ +Ġмед и +ãģ« æĪ +ã쫿Π» +à¸Ħว à¹īา +ãģĭ ãģijãĤĭ +×ij׾ ×ķת +ĠÑįк Ñģп +ĠÑįкÑģп еÑĢÑĤ +Ġдев ÑĥÑĪ +ĠдевÑĥÑĪ Ðº +ĠØŃ ص +ÙĨØ´ Ø£ +ãģĮãģĤãĤĭ ãģ®ãģ§ +Ġت راÙħ +ĠتراÙħ ب +أس ÙĪØ§ÙĤ +Ġ׾פ ׳×ķת +Ġا ï»· +ãģ« ãģı +ãģ«ãģı ãģĦ +ĠØ£ عÙĦÙī +Ġ׾×Ķ ×ŀש×Ļ×ļ +rä u +ש×ŀ ×Ļ×Ŀ +åĪĨ ãģij +ãģĻ ãģ§ +ãģĻãģ§ ãģ« +×Ķ׾ ׼×Ķ +×Ĺ׾ ×Ļ×£ +Ġì ±ħ +Ġì±ħ ìŀĦ +à¹Ģà¸Ī ริ +à¹Ģà¸Īริ à¸į +éģĬ ãģ³ +ج سد +สา à¸ĺ +สาà¸ĺ าร +สาà¸ĺาร à¸ĵ +Ġbas ın +ÑĢаР³ +г ад +Ġho ÅŁ +íķ µ +×ij×Ĺ ×Ļר×Ķ +×ŀס ×ļ +Ġìłľ íĴĪ +تÙħ ÙĪÙĬÙĦ +ĠL ưu +ë¡ľ ë¶ĢíĦ° +Ġп об +Ġпоб ед +ÙħÙĨ ذ +常 ãģ« +ÙĤ س +ĠاÙĦÙħ صدر +ĠÙĪØ§ÙĦ است +Ġkh ắp +ĠاÙĦج اÙĨب +Ġng uyá»ĩn +éĸĵ éģķãģĦ +ĠÑģÑĤ ÑĢа +ĠÑģÑĤÑĢа Ñħ +ĠÑģÑĤÑĢаÑħ ов +รี à¸ļ +Ġx ương +Ġì° ¾ +Ġì°¾ ìķĦ +Ġng ại +г ал +à¸ĭ ีà¹Ī +Ġ×ij פ×Ļ×Ļס×ij×ķ×§ +Ц енÑĤÑĢ +Ġaval iação +Ġeconóm ico +×ĸ ף +ĠÐľ ак +Ġinter és +à¸ģล ิà¹Īà¸Ļ +ÑģÑĤÑĮ Ñİ +ĠÄij ương +å¼· ãģı +ĠKh ách +à¹Ģà¸Ļืà¹īà¸Ń หา +ĠYaz ı +è²· ãģ£ãģ¦ +Ðł Ðķ +à¹Ģà¸ŀิà¹Īม à¸Ĥึà¹īà¸Ļ +สม à¸ļู +สมà¸ļู รà¸ĵà¹Į +Ġм иÑĢов +×Ĵ ׳×Ļ×Ŀ +ĠÄij ức +à¸Ń ารà¹Į +ص اص +ãģĬ ãĤĪ +ãģĬãĤĪ ãģ³ +ÃªÌ ī +ĠاÙĦÙħؤ تÙħر +ĠاÙĦÙħر ØŃÙĦØ© +สà¸Ńà¸ļ à¸ĸาม +Ġà¸Īาà¸ģ à¸Ļัà¹īà¸Ļ +Ġت عد +ãģĿãģ® ãģŁãĤģ +Ġkh áng +à¸Ļ ิà¸Ķ +ãĥĬ ãĥ³ +ëĦ¤ ìļĶ +ĠاÙĦ اØŃت +ĠاÙĦاØŃت ÙĦاÙĦ +ìļ ķ +Ġмод ели +ĠпÑĢоÑĨ енÑĤ +à¸ŀวà¸ģ à¹Ģรา +Ġ×Ķצ ×ĵ +Ġ×Ķצ×ĵ ×ĵ×Ļ×Ŀ +ständ e +׳ ×Ĵר +Ġdot yc +Ġdotyc zÄħ +ĠdotyczÄħ ce +ĠÅĽ wiÄĻt +×ŀר ×Ķ +ãģĻãģĶ ãģĦ +ãĥĩãĤ£ ãĥ³ãĤ° +à¸ģาร สรà¹īาà¸ĩ +ë Ĥ¬ +Ġì°¸ ìŬ +Ñģ Ñħ +ÑģÑħ ем +ÙħÙĪ Ø³ +Ġn ấu +Ġ׾×ŀ×¢ ׾×Ķ +à¹Ģà¸Ľ à¹īา +à¹Ģà¸Ľà¹īา หมาย +Ġmù i +ائ ز +íĽ Ī +×Ĺ×ij ×ķר×Ķ +à¸ľà¸¹à¹ī à¹ĥà¸Ĭà¹ī +Ġpa ź +Ġpaź dzi +Ġpaździ ern +Ġpaździern ika +ลà¸ĩ à¹Ħà¸Ľ +ÙĤ اع +Ġch áºŃm +Ġözellik leri +ĠÄIJ o +ĠÄIJo Ãłn +ж ение +Ġh ẳ +Ġhẳ n +ĠaÅŁ k +ï½ į +ãĥij ãĤ¹ +×Ķ×ķר ×IJ×ķת +ĠÅ » +ĠÅ» y +×ŀ×ĸ ׾ +ĠÑĥ кÑĢа +ĠÑĥкÑĢа ин +à¹Ģà¸Ĭ ิ +à¹Ģà¸Ĭิ à¸į +Ðł Ðĺ +ĠzwiÄħz ku +×Ķ×Ĺ׾×ĺ ת +ãĤĵãģ§ãģĻ ãĤĪãģŃ +ãģ¦ ãģĬãĤĬ +лож иÑĤÑĮ +×ŀ ×ķ׳×Ļ×Ŀ +ฮ ิ +ì° ¬ +ĠاÙĦÙħØ´ ترÙĥ +ĠdÃ¼ÅŁ ük +аг енÑĤ +ĠاÙĦØ£ سبÙĪØ¹ +ĠÙĤ رÙĬب +ин д +инд ив +индив ид +индивид Ñĥ +индивидÑĥ алÑĮн +för der +Ġseç en +Ġseçen ek +Ġét ant +ĠлÑİб им +каз ÑĭваеÑĤ +ว ิà¸Ļ +Ġ×Ķ×ij ×IJ×Ļ×Ŀ +Ġд ов +Ġдов олÑĮ +ĠдоволÑĮ но +×¢×ĵ ×Ļ×£ +Ġok re +Ġokre ÅĽ +ĠokreÅĽ lon +Ġت رÙĬد +à¹Ģมืà¹Īà¸Ń วัà¸Ļà¸Ĺีà¹Ī +ãĤĪ ãģĭãģ£ãģŁ +Cum h +Cumh ur +Cumhur ba +Cumhurba ÅŁ +CumhurbaÅŁ kan +CumhurbaÅŁkan ı +Ġn ợ +à¸ľà¸¹à¹ī à¹Ģลà¹Īà¸Ļ +Ġcompl ète +à¹Ģà¸ŀ ศ +د ÙIJ +Ġdü z +Ġdüz ey +ãģ§ãģĤãĤĭ ãģĵãģ¨ +ext érieur +× ³ +Ġinform ação +ãĤ¯ãĥª ãĥĭãĥĥãĤ¯ +ĠPub li +ĠPubli é +ר ×ķ×ĵ +à¸Ħวาม à¸Ľà¸¥à¸Ńà¸Ķà¸łà¸±à¸¢ +ĠØ£ÙĬ ض +ĠØ£ÙĬض Ùĭا +ت سبب +ãģ¤ ãĤĤãĤĬ +из ма +à¸Ĥึà¹īà¸Ļ à¹Ħà¸Ľ +Ùĥ ÙIJ +ÙĦ ÙĪÙħ +Ġש צר +Ġשצר ×Ļ×ļ +ãģ¯ ãĤĤãģ¡ãĤįãĤĵ +Ġк ан +Ġкан ал +ãģ«ãģª ãģ£ãģ¦ãģĦãģ¾ãģĻ +ĠاÙĦØ£ Ùĥثر +ت اØŃ +ÙĨت Ùĩ +ÙĨتÙĩ اء +ا ÙĪÙĬØ© +ĠBug ün +н Ñģкого +à¸Ķ à¹Īวà¸Ļ +é volution +ãģ£ãģ¦ ãģĦãģ¾ãģĹãģŁ +ãĤ ħ +ĠV ương +à¸łà¸²à¸ŀ ย +à¸łà¸²à¸ŀย à¸Ļ +à¸łà¸²à¸ŀยà¸Ļ à¸ķรà¹Į +Ġ×Ķ ×¦×ľ×Ļ×Ĺ +ĠاÙĦإسÙĦاÙħ ÙĬ +ÙĦÙĬ ب +Ġed ição +ÑģÑĤÑĢ ÐµÐ» +Ġkh úc +ÙĨÙħÙĪ Ø° +ÙĨÙħÙĪØ° ج +׾ צ×Ķ +ÑģÑĤав ил +à¸ĸ า +สรà¹īาà¸ĩ à¸Ħวาม +ãģĦ ãģ£ãģ± +ãģĦãģ£ãģ± ãģĦ +ÑģÑĤав лен +ĠاÙĦ ÙĤدس +Ġng ược +ب Ø® +ส หร +สหร ั +สหรั à¸IJ +ĠØ£ غ +Ġأغ سط +Ġأغسط س +ãģĨ ãģ¾ +ãģĨãģ¾ ãģı +ĠêµŃ ìłľ +ØŃض ار +Ġd ừng +æĬ¼ ãģĹ +ت ÙĪØ§ +تÙĪØ§ جد +ש×ŀ ×Ĺ×Ķ +ãģı ãĤĵ +Ġ×ij×¢ צ +Ġ×ijעצ ×Ŀ +×ŀ ׳×Ļ×ķת +×ķ ×Ļ×ĵ +×ķ×Ļ×ĵ ×IJ×ķ +à¸Ĭ ิà¸ĩ +Ġprac ÄĻ +Ġз аÑĤ +ĠзаÑĤ ем +ĠìŀIJ ìľł +Ġì¤ Ģ +Ġì¤Ģ ë¹Ħ +Ġb áºŃ +ĠbáºŃ c +Ġ×Ķ×ŀ צ×ij +ĠÙĤ ÙĬÙħØ© +à¹Ģà¸Ń à¹Ģà¸Ĭ +à¹Ģà¸Ńà¹Ģà¸Ĭ ีย +Ġperch è +ĠاÙĦع سÙĥر +ĠاÙĦعسÙĥر ÙĬØ© +ج ÙĬب +ëŀ µ +Ùħ Ùĩر +ÙħÙĩر جاÙĨ +Ùħ راÙĥ +ÙħراÙĥ ز +Ġод нако +à¸Ķี à¹Ĩ +Ġצ פ×ķ +Ġkullan ılan +Ġк ино +ãĥĨãĤ£ ãĥ³ãĤ° +ĠGi Ỽi +ت ÙĪØ² +تÙĪØ² ÙĬع +ย ิà¸Ļ +ยิà¸Ļ à¸Ķี +Ġc Åĵur +ĠiÅŁ aret +Ġ×ij×¢ ×ĸר +Ġ×ij×¢×ĸר ת +Ġп аÑĨи +ĠпаÑĨи енÑĤ +ãģ¿ãģŁãģĦ ãģ§ãģĻ +в ез +ли на +од е +Ġ×IJ×ķת ף +dıģ ınız +ĠÐIJ в +ĠÐIJв ÑĤоÑĢ +ï¼ ® +ĠC ần +ĠاÙĦا Ø® +ĠاÙĦاخ بار +Ġê±° ìĿĺ +Ġat enção +Ġgeld iÄŁi +ãĤª ãĤ¹ +ãĤªãĤ¹ ãĤ¹ +ãĤªãĤ¹ãĤ¹ ãĥ¡ +ев Ñĭе +кÑĢÑĭ л +à¹Ģà¸Ĭ ียà¸ĩ +à¹Ģà¸Ĭียà¸ĩ à¹ĥหมà¹Ī +Ġmar ço +ĠاÙĦÙħ ادة +Ġг ол +Ġsprzeda ży +Ġíķ´ ê²° +ĠÐķ го +ê¹ Ģ +Ġ׾ק×ij׾ ת +ĠاÙĦÙģ ÙĨاÙĨ +Ġcomunic ación +à¹Ģสà¹īà¸Ļ à¸Ĺาà¸ĩ +íĺ ¹ +à¸Ĭ ำ +à¸Ĭำ ระ +Ġ׼ ×IJ×ŀ +Ġ׼×IJ×ŀ ×ķר +à¸Ĭ à¹Īาà¸ĩ +ز Ùĩر +Ġklient ów +ива ÑİÑĤ +ан г +׳ ×ļ +Ġg á»įn +Ãľ R +ìĺģ ìĥģ +Ġغ زة +ìĿĮ ìĿĦ +Ġbez po +Ġbezpo ÅĽ +ĠbezpoÅĽ redni +ĠاÙĦÙħ ÙĪØ§ +ĠاÙĦÙħÙĪØ§ Ø·ÙĨ +ĠاÙĦÙħÙĪØ§Ø·ÙĨ ÙĬÙĨ +ãĤĮ ãģ¾ãģĻ +ĠмаÑĤ Ñĩ +×IJ ×ķף +Ġر سÙħÙĬ +ĠÑįк он +ĠÑįкон ом +ĠÑįконом иÑĩеÑģк +ãĥľ ãĥ¼ +Ġд иÑĢ +ĠдиÑĢ ÐµÐºÑĤоÑĢ +ĠÑģк оÑĢо +à¸ļ ำ +à¸ļำ ร +à¸ļำร ุà¸ĩ +ĠÑĦ ÑĥÑĤ +ĠÑĦÑĥÑĤ бол +Ġ×IJ ×Ļ׾ +Ġì¤ij êµŃ +ìľ ¤ +eÄŁ e +à¹Ħ à¸ģà¹Ī +tra î +traî n +ĠÑĤ ÑĢÑĥб +à¹Ģà¸ļ ื +à¹Ģà¸ļื à¹īà¸Ńà¸ĩ +à¹ģม à¸Ļ +ĠتØŃ دÙĬØ« +Ġ׼ עת +ØŃ اسب +lı ÄŁa +×§×Ļ ×Ļ×ŀ×Ļ×Ŀ +оÑģÑĤ ÑĮÑİ +à¸Ŀ ั +à¸Ŀั à¹Īà¸ĩ +Ø´ غÙĦ +ìĽ ¹ +Ġкажд ого +Ġbölüm ü +หà¸Ļ ี +Ġistedi ÄŁi +Ġtr ưng +ãĥ Į +ฮ à¸Ń +Ø£ÙĨ Ø´ +Ø£ÙĨØ´ طة +ĠاÙĦÙħ سÙĬ +ĠاÙĦÙħسÙĬ ØŃ +ลัà¸ģษ à¸ĵà¹Į +Ġn á»Ńa +à¸Ĺีà¹Ī à¸ķà¹īà¸Ńà¸ĩà¸ģาร +ÑĪ ÐµÐº +л Ñij +Ġש ×Ļ×Ķ +Ġש×Ļ×Ķ ×Ļ×Ķ +Ġkhu ôn +ĠÑĤÑĢеб ованиÑı +Ġ×ľ×¢ ×ĸ×ķר +ĠاÙĦع Ùħر +ราà¸Ħา à¸ĸูà¸ģ +ÙĩÙı ÙħÙĴ +ü st +üst ü +Ġден ег +Ġn ạ +à¸Ĥà¸Ļ ม +Ġбл аг +Ġблаг од +Ġблагод аÑĢ +ĠблагодаÑĢ Ñı +Ø¥ سÙĦاÙħ +à¸Ļิ ว +çŁ¥ ãĤīãģªãģĦ +Ø« ÙĤØ© +Ġг олоÑģ +×IJ×ķר ×Ĺ +Ġtr ứng +Ġод ном +ĠkoÅĦ cu +Ġ×ķ רק +Wi ÄĻ +WiÄĻ cej +Ġ×IJ ×Ļ׼×ķת +Ġ×IJ×Ļ׼×ķת ×Ļ +Ñģ оÑģ +Ġje żeli +以ä¸ĭ ãģ® +å°ı ãģķ +å°ıãģķ ãģª +олог ии +Ġоб ÑģлÑĥж +ĠобÑģлÑĥж ива +Ùĥت ابة +Ġê´Ģ ìĭ¬ +×¢ ש×Ļר +Ġaras ındaki +ĠÑĢай она +ÙĪØ§ جب +Ġ×ij×Ĺ×Ļ ×Ļ +íķ´ ì£¼ +Ġg óc +ай л +ĠT ình +æļ® ãĤī +æļ®ãĤī ãģĹ +æĻĤ ãģ«ãģ¯ +ĠгоÑĢод е +Ġ׼×IJ ×Ļ׾ +Ġ׼×IJ×Ļ׾ ×ķ +ĠC á»Ļng +ãģ©ãģĨ ãģĹãģ¦ãĤĤ +×Ĺ ×ķ×£ +تØŃ رÙĥ +ĠÑģлов ам +à¸Īะ à¸Ĭà¹Īวย +ĠاÙĦÙħست ÙĤبÙĦ +ÙĤ ض +ÙĤض ÙĬ +×ijס ×ķפ +×ijס×ķפ ×ķ +iÄĻ Äĩ +ĠY ıl +Ø´ ÙĬØ® +à¸Ħุà¸ĵ à¸Īะ +ש×ŀ ×ķת +Ġت عرض +Ġanál ise +ĠÑģоб иÑĢа +à¹Ģà¸ŀ à¸Ĭ +à¹Ģà¸ŀà¸Ĭ ร +Ġв ели +Ġвели к +สั à¹īà¸Ļ +Ġpop ulação +รà¹Īวม à¸ģัà¸Ļ +×Ĺ ×ŀ +×Ĺ×ŀ ×Ļש×Ļ +ס ×Ļס +åĨħ ãģ§ +Ġsob Äħ +ĠY ay +ĠYay ın +ãĥ¡ ãĥĭãĥ¥ãĥ¼ +ĠпÑĢедоÑģÑĤав лÑı +ãģł ã썿ĢĿãģĨ +Ġê³ł ê°Ŀ +Ġод ним +à¹ĥà¸Ļ à¹Ģรืà¹Īà¸Ńà¸ĩ +Ġs á»ķ +ĠÐĹ Ð´ÐµÑģÑĮ +Ġизмен ениÑı +ĠìĿ¼ ìĿĦ +ãģªãģ® ãģł +клад Ñĭва +ÑĢ Ð¼Ð° +Ġ×ķ×ij ׼׾ +تأ ÙħÙĬÙĨ +ĠпÑĢи ÑıÑĤ +ĠпÑĢиÑıÑĤ н +Ùħ Ùħار +ÙħÙħار سة +ãģ¨ãģª ãģ£ãģ¦ +Ġج ÙħÙĬÙĦ +Ġì§ Ī +Ġì§Ī 문 +Ġquest ão +i é +ié ndo +หà¹īà¸Ńà¸ĩ à¸ŀัà¸ģ +ãĥij ãĥ¼ãĥĪ +ÑĤвеÑĢж да +н Ñģкой +з ал +มุ à¹Īà¸ĩ +á» Ĭ +Ġ×Ķ×IJ×Ĺר ×ķ׳×Ķ +ĠTh ư +주 민 +ĠاÙĦع ب +év én +évén ement +ÙĤÙĪ Ø§Ø¹Ø¯ +د Ùı +ĠìķĬ ìĬµëĭĪëĭ¤ +Ġë³´ 기 +Ġyapıl ması +à¹Ģร าà¸ģ +à¹Ģราà¸ģ à¹ĩ +ØŃ ذر +ÙĤ صر +ãģ¦ãģĹãģ¾ ãģĦãģ¾ãģĹãģŁ +Ġà¹Ģà¸Ľà¹ĩà¸Ļ à¸ķà¹īà¸Ļ +ãģ¨ ãģ« +ãģ¨ãģ« ãģĭ +ãģ¨ãģ«ãģĭ ãģı +н ÑĨе +зв Ñĥк +ãģĹãĤĪãģĨ ãģ¨ +ĠاÙĦصØŃ ÙĬØ© +Ġש×Ķ ×Ļ×ķ +ĠDi ÄŁer +ÙĤÙĦ ÙĤ +ãĤ¸ãĥ£ ãĥ³ +Ġr á»Ŀi +Ġл еÑĩ +ĠлеÑĩ ениÑı +تب اد +تباد ÙĦ +צ פ×Ķ +à¸Ħวาม à¹Ģหà¹ĩà¸Ļ +ĠØ´ ب +Ġشب ÙĥØ© +ר ×Ļ×§ +Ùħ عد +Ùħعد ات +dıģ ında +Ġ×ijש ׳×Ļ×Ŀ +Ġ×Ķ ×Ļשר×IJ׾ +Ġ×Ķ×Ļשר×IJ׾ ×Ļת +Ġsı nav +׳צ ×Ļ×Ĵ +วัà¸ķ à¸ĸุ +ĠاÙĦبر ÙĦÙħ +ĠاÙĦبرÙĦÙħ اÙĨ +t ivitÃł +ãĤĵãģł ãĤįãģĨ +×§×Ļ ×Ļ×ŀ +ÙĦÙĬ Ùĥ +ĠÄij ò +ĠÄijò i +ĠÐĺн ÑĤеÑĢ +ĠÐĺнÑĤеÑĢ Ð½ÐµÑĤ +ãģ«ãģ¨ãģ£ãģ¦ ãģ¯ +ãģ£ ãģĵ +×§ ×ķס +ست ØŃÙĤ +æķĻ ãģĪãģ¦ +ãĥĢ ãĥ¡ +ĠÙħÙĨ زÙĦ +à¹Ģà¸ĭ à¹ĩà¸Ļ +使 ãģĪãĤĭ +è¦ĭ ç©į +è¦ĭç©į ãĤĤãĤĬ +Ø£ Ùģ +Ø£Ùģ Ùĥار +Ġиг ÑĢов +ĠигÑĢов Ñĭе +Ġm ÄĻż +ĠmÄĻż czy +ĠmÄĻżczy zn +ĠاÙĦØŃ ÙĤÙĬÙĤÙĬ +ع بر +׼×ķ׾ ׳×ķ +íĿ ¥ +×ŀ×IJ ×ķ×Ĺר +خت ص +ãĥŀ ãĥŀ +Ġ×IJ×Ĺ ×ķ×ĸ +í ĮĢ +Ġr á»iji +Ġв ÑĤоÑĢ +ĠвÑĤоÑĢ Ð¾Ð¹ +Ġl ẫn +пÑĢ Ð¾Ð¼ +пÑĢом ÑĭÑĪ +пÑĢомÑĭÑĪ Ð»ÐµÐ½ +пÑĢомÑĭÑĪлен н +ĠоÑĤноÑĪ ÐµÐ½Ð¸Ñı +Ġs ứ +Ġм обилÑĮ +ĠмобилÑĮ н +ĠÑįÑĤ омÑĥ +Ġt ạp +ĠìĤ¬ ê±´ +ĠìķĮ 볤 +Ùĥ Ùı +ÙĥÙı ÙħÙĴ +Ġ×§ ×ķר×Ķ +ĠÑĦ иÑĢ +ĠÑĦиÑĢ Ð¼ +Ġsık ıntı +׳ ׼ +׳׼ ×ķף +ÙĪÙĦÙĪØ¬ ÙĬ +ØŃ اÙĨ +Ġlo ạn +Ġ×IJ׾ ×£ +Ġm ắn +abh äng +abhäng ig +ĠÑĥÑĢов нÑı +Ġ׾×ij×ĵ ×ķ×§ +ÙĬ ÙħÙĨ +lay ın +Ġh ải +Ġзав од +ĠìķĦ 주 +สà¸ĸ า +สà¸ĸา à¸ļัà¸Ļ +Ġgüven lik +à¹Ģà¸Ķ à¹Īà¸Ļ +×ij×ĵ ×§ +Ġë Ī +ĠëĪ Ħ +ĠëĪĦ 구 +éĩįè¦ģ ãģª +รà¸Ńà¸ĩ รัà¸ļ +sch lie +schlie ÃŁen +Ġìĸ ¼ +Ġìĸ¼ ë§Ī +Ġìĸ¼ë§Ī ëĤĺ +ÑĤи ки +íķľëĭ¤ ê³ł +ãģłãģ£ãģŁ ãĤī +Ġ×Ķ ×Ļ×ĺ×ij +ãģªãģijãĤĮãģ° ãģªãĤīãģªãģĦ +â Ì +Ã¢Ì £ +Ġph ạt +ak Ä±ÅŁ +ãģ¦ãģĹãģ¾ ãģĦãģ¾ãģĻ +à¹Ģà¸ĭ à¹ĩ +ĠС егоднÑı +Ġinsan ların +Ġdévelop pe +ת פר +תפר ×Ļ×ĺ +اÙĨت شار +ê° ij +Fran çois +Ø£ÙĦ ع +Ø£ÙĦع اب +ãĤĴ è¶ħ +ãĤĴè¶ħ ãģĪ +Ġê°Ļ ìĬµëĭĪëĭ¤ +ãĤ³ ãĥ¬ +ĠмеÑģÑı ÑĨев +íĮ ħ +ĠاÙĦج اÙħعة +ìĿ¸ íĦ° +ìĿ¸íĦ° ëĦ· +×ĵר ×ķש +ĠÙĪØ£ شار +ĠпÑĢав ила +ãģĿãģĵ ãģ« +×Ĺ ×ŀ×ĵ +à¹Ģหà¸ķุ à¸ģารà¸ĵà¹Į +Ġê²½ íĹĺ +ãģ¶ ãĤĬ +׾ ש +׾ש ×ķף +à¹Ģ à¸ĸ +ĠDo ÄŁu +ĠиÑģполÑĮзов ание +Ġçoc uÄŁu +магазин е +ĠÄiji á»ĥn +Ġas lı +Ġaslı nda +Ġdoen ça +Ġس اع +Ġساع ات +ĠиÑģполÑĮзов аниÑı +ר ×ķצ×Ļ×Ŀ +ĠзнаÑĩ иÑĤ +ĠÑĢаР¼ +ĠÑĢам каÑħ +ê±° 리 +Ġп ÑĭÑĤа +ãĥģ ãĥ³ +Ġпо Ñģк +ĠпоÑģк олÑĮ +ĠпоÑģколÑĮ кÑĥ +Ø¥ بر +إبر اÙĩ +إبراÙĩ ÙĬÙħ +ĠÑĤÑĢ ÐµÑħ +ĠGen ç +س ÙĪÙģ +Ġve ÃŃculo +ĠNg ân +ĠоÑĩеÑĢ ÐµÐ´ÑĮ +à¸Ħร ึà¹Īà¸ĩ +×IJ ×ij×Ļ +à¸ķ à¹īม +ãĤĴè¡Į ãģĦ +ĠاÙĦساب ÙĤØ© +на ÑĨи +наÑĨи она +наÑĨиона лÑĮн +Ġgest ión +ت ÙĤد +ĠاÙĦبÙĬ اÙĨ +ĠاÙĦبÙĬاÙĨ ات +ĠاÙĦ اÙĨتخاب +ĠاÙĦاÙĨتخاب ات +à¹Ģà¸Ĭ à¹Īา +×ĵ ×IJ×Ĵ +Ġ׾×Ĵ ×ŀר×Ļ +Ġت ØŃتاج +Ġth ôn +à¸ķ à¹īà¸Ńà¸Ļ +à¸ķà¹īà¸Ńà¸Ļ รัà¸ļ +女 ãģ® +女ãģ® åŃIJ +Ġth ợ +Ø· ØŃÙĨ +ารà¹Į à¸Ķ +ת ×ŀ×Ļ×ĵ +ĠÑģам Ñĭм +Ġìĭľ íĸī +Ø¥ صد +إصد ار +ĠNgh á»ĩ +ìķ ķ +س ئ +سئ ÙĦ +à¸Ń าร +à¸Ńาร ม +à¸Ńารม à¸ĵà¹Į +à¹ģ ฮ +׳×ĺ ׾ +Ġì¢ĭ ìķĦ +×ķ׾ ׾ +Ġ×ij ×Ľ×ª×ij +ãĤ« ãĥ© +צע ×Ļר×Ļ×Ŀ +تعب ÙĬر +Ġ×ŀ קר×Ķ +ĠÑĦак ÑĤоÑĢ +Ġت ÙħاÙħ +ĠتÙħاÙħ ا +ëį ķ +Ġv ưá»Ŀ +Ġvưá»Ŀ n +Ġd Ä±ÅŁÄ± +ãģĦ ãģ¡ +Ġ׾ק ׳×ķת +ĠاÙĦع ÙĦاÙĤات +п Ñĥб +пÑĥб ли +Ø¥ ÙĬÙħ +Ø¥ÙĬÙħ اÙĨ +à¸Ńำ à¸Ļา +à¸Ńำà¸Ļา à¸Ī +åIJ« ãģ¾ãĤĮ +ãĤĭ ãģŁãĤģãģ« +ס ×Ĵ +ס×Ĵ ׳×ķף +تØŃ دÙĬ +Ġaup rès +ĠاÙĦج Ùĩا +ĠاÙĦجÙĩا ز +Ġ×ŀ ת×Ĺת +ен нÑĥÑİ +Ġз им +à¸ģา à¹ģà¸Ł +Ġ×ijת ×ķר +Ġngh è +Ġnghè o +ĠÐĽ Ñİ +ĠÐĽÑİ Ð± +תק צ×Ļ×ij +×ŀ×¢ ש×Ķ +ĠاÙĦبÙĬ ت +צ ×Ļפ +ĠобÑıз ан +ĠM á»Ĺi +ĠТ ÑĥÑĢ +ĠÙĪØ¨ اÙĦت +ĠÙĪØ¨Ø§ÙĦت اÙĦÙĬ +Ġdéc ision +Ġب د +Ġبد أت +Ġc ục +Ġb ask +Ġbask ı +Ġhat ırl +Ġhatırl a +å°ı ãģķãģĦ +Ġgerçek ten +à¸ľ ัà¸ģ +åı¯èĥ½ ãģª +×ŀ×IJ ס +Ġcr ÃŃtica +ĠìĿĺ ìĽIJ +عÙĤ ÙĪØ¯ +×ĺ ׼׳ +×ĺ׼׳ ×ķ׾×ķ×Ĵ×Ļ×Ķ +è¨Ģ ãģĪãģ° +ĠÙĤ ÙĨا +ĠÙĤÙĨا Ø© +ĠìĿ´ê²ĥ ìĿĢ +ت صر +à¸Ł ัà¸Ļ +ĠÑĢе ÑĨеп +ĠÑĢеÑĨеп ÑĤ +ĠبÙĨ Ù쨳 +ÑĢо ÑĪ +ĠмаÑĢ ÑĤа +Ġson ras +Ġsonras ı +×ķ×ij ש +ãĥª ãĤ¹ãĤ¯ +ĠFranç ais +á» ļ +ê° Ķ +Ġ×Ķ×ijר ×Ļת +פ ×Ļצ +פ×Ļצ ×ķ×Ļ +ĠÙĦÙħا ذا +ĠÐļи ев +ĠÑģ мÑĭÑģл +ê¸Ī ìľµ +ãĤ·ãĥ£ ãĥ« +ãĥ© ãĤ¤ãĥĪ +ìĽ ĥ +×ŀ ×Ĺר +ãĨ į +Ġkullan ım +Ġ×IJצ׾ ׳×ķ +Ġt Ãłn +ãĥı ãĥ¼ +ãģ¨ ãģ¨ãĤĤ +ãģ¨ãģ¨ãĤĤ ãģ« +ÑĢ ÐµÐ³ +ÑĢег и +ÑĢеги он +ãģªãģı ãģªãĤĭ +Ġch ảy +Ġج ÙĩØ© +ÅĦsk iej +à¸Ńี à¹Ģม +à¸Ńีà¹Ģม ล +ãģį ãģ£ãģ¨ +ĠìĺĪ ìĤ° +Ġkit abı +Ġedu cação +Ġbul uÅŁ +олог иÑı +Ġкон кÑĢ +ĠконкÑĢ ÐµÑĤ +×Ĵ ×Ļר +ĠпÑĢед лаг +ĠпÑĢедлаг аеÑĤ +ĠY ên +Ġíķľ ë²Ī +Ġ×ŀ ר׼×ĸ×Ļ +à¹Ģà¸Ľà¸´à¸Ķ à¹Ģà¸ľà¸¢ +ÑĤвеÑĢ Ð´ +ĠH á»ĩ +ĠÐĵ ÑĢ +à¸Ŀ à¹īา +×Ķ ×©×§ +×Ķשק ×¢×Ķ +Ġна Ñĥк +ìłIJ ìĿĦ +Ġн елÑĮ +ĠнелÑĮ з +ĠнелÑĮз Ñı +г ин +ĠB öl +ĠBöl ge +Ġв ла +Ġвла ÑģÑĤи +à¹Ģà¸Ļ à¹ĩ +à¹Ģà¸Ļà¹ĩ à¸ķ +ê³ ¨ +Ġö ld +Ġöld ür +׼׳ ×¢ +ĠاÙĦÙĩ ÙĬئة +ت ارÙĬØ® +ĠÐij ÑĢ +ĠÑģ мож +ĠÑģмож еÑĤе +ĠL úc +à¹Ħà¸Ľ à¸ĸึà¸ĩ +ĠBakan ı +Ġerklä rt +ĠÐIJ на +Ġsc ène +åķı ãģĦ +åķıãģĦ åIJĪãĤıãģĽ +ÙħÙĩ ÙĨد +ÙħÙĩÙĨد س +Ġн азвание +ив аниÑı +ãĤĴ å¤īãģĪ +ä»ĺãģį åIJĪ +ãĥij ãĤ½ +ãĥijãĤ½ ãĤ³ãĥ³ +æĺİ ãĤī +æĺİãĤī ãģĭ +à¹Ģà¸Ńà¸ģ สาร +à¹Ģà¸ģิà¸Ļ à¹Ħà¸Ľ +л еп +ãģĹãģŁ ãĤĤãģ® +ĠC âm +ĠCâm ara +×§×ķ׾ ׳×ķ×¢ +Ġ×ij×Ĵ ×Ļף +Ġoc zy +Ġoczy wiÅĽcie +att ivitÃł +ãĥĵ ãĥ¥ãĥ¼ +Ġeduc ación +İ YE +ê¹Į ìļĶ +ãĤ¨ ãĥªãĤ¢ +н еÑģÑĤи +Ġm óg +Ġmóg ÅĤ +Ġ×§×ĺ ׳×Ļ×Ŀ +ĠPr ä +Ġ×ľ×¢ ×ij×ķר +بÙĨ Ùī +з ол +зол оÑĤ +Ġwn ÄĻtr +ĠwnÄĻtr z +Ġconstr ução +รัà¸ļ รà¸Ńà¸ĩ +س جÙĨ +Ġ×§ ×ķ׳ +ס ×Ļפ×ķר +ĠÙħ دÙī +رض Ùī +п лав +ï¼ ¥ +Ġil a +Ġila ç +ãĤĭ ãģ¹ãģį +ĠÙħ ÙĪÙĤÙģ +à¸ģร ุ +à¸ģรุ à¸ĵา +chodzÄħ c +ĠÑĤÑĭ Ñģ +Ðķ вÑĢо +ĠÙĬ ØŃدث +ãĥ¡ ãĤ¤ãĥ³ +ĠاÙĦص ØŃÙĬ +ĠÐĶ Ð°Ð½ +دع اء +ãĤ´ ãĥ¼ãĥ« +ש ×ł×ª×Ļ +×©×ł×ª×Ļ ×Ļ×Ŀ +à¸Ķà¹īวย à¸ģัà¸Ļ +Ġol acaģı +Ġ×ij ×ŀ×Ĺ×Ļר +×Ķ ×§ +×Ķ×§ ×ŀת +ãĥ¢ ãĥİ +ĠçalÄ±ÅŁ tı +Ġjó venes +ãģĦãģı ãĤī +ĠÙħ عدÙĦ +ĠC Å©ng +ĠSeg ún +Ġdönem de +Ġ׾ ×Ļ×ĵ×Ļ +ãģį ãģ¡ +ãģįãģ¡ ãĤĵ +ãģįãģ¡ãĤĵ ãģ¨ +Ù쨱 ÙĨس +Ù쨱ÙĨس ا +åIJij ãģį +Ġcamp aña +ĠÑģам оÑģÑĤоÑı +ĠÑģамоÑģÑĤоÑı ÑĤелÑĮно +á» Ģ +ÙĤ ÙĪØ§ +س ÙĦاØŃ +à¸ģระ à¹ģ +à¸ģระà¹ģ ส +ĠполÑĮз Ñĥ +n qu +nqu ête +รà¹Īวม à¸ģัà¸ļ +ëĬIJ ëĥIJ +à¸Ĺีม à¸Ĭาà¸ķิ +Ġyıll ık +ìĬ ¬ +ĠØ£ صØŃاب +ill é +Ġdó la +Ġdóla res +Ġк ож +Ġкож и +ล à¹īà¸Ń +à¹Ģรีย à¸ļร +à¹Ģรียà¸ļร à¹īà¸Ńย +à¹Ģà¸ŀ ิ +à¹Ģà¸ŀิ à¹Īà¸ĩ +ÑĢиÑĤоÑĢ Ð¸ +Ġí ijľ +Ġíijľ íĺĦ +ĠпеÑĢ ÐµÐ² +ĠпеÑĢев од +פ×Ĵ ×Ļ×¢×Ķ +ĠdeÄŁerlendir me +Ùģ Ø§Ø¦ +ĠвÑĭ год +ınız ı +×ķ׼ ×Ļ×Ĺ +ĠдоÑģÑĤ иг +Ġng Ãłn +æĢĿ ãģ£ãģŁ +ĠÐķ ÑģÑĤÑĮ +ĠاÙĦر غÙħ +ĠzwiÄħz ane +رب Ø· +à¸Ļ ึà¸ĩ +Ġ׾×Ĺ ×ķ×§ +Ġszczeg óln +Ġszczególn ie +Ġبا ستخداÙħ +ĠfÃŃs ico +×¢ ס +עס ×ķ×§ +سÙĦ ÙĪÙĥ +Ġا ØŃد +Ñĩ ÑijÑĤ +×ĸ׼ ×Ķ +Ġl á»ĩnh +ĠÙĪ ØŃت +ĠÙĪØŃØª Ùī +à¸Ħวาม สามารà¸ĸ +à¸Ńยูà¹Ī à¹ģลà¹īว +à¸ģาร à¹Ģà¸Ķิà¸Ļà¸Ĺาà¸ĩ +تخ ذ +צ×Ļ ×ķ×ĵ +ĠاÙĦØ£ س +ĠاÙĦأس ÙĩÙħ +Ġt á»ĩ +ãģ£ãģ¦ ãģĦãģ¦ +สร ุ +สรุ à¸Ľ +Ġком ÑĦ +ĠкомÑĦ оÑĢÑĤ +ìĺ¤ ëĬĶ +ĠÑĢаз в +ĠÑĢазв ива +л анд +h änge +ĠبÙĨ سبة +à¹Ģà¸Ĥ ียว +עצ ×Ŀ +Ġ׾ ×ľ×Ľ×ª +Ñģо ÑĨиалÑĮн +Ġëĭ¤ìĿĮ ê³¼ +Ġרש ×ķ×ŀ +×ŀר ×Ĺ×ij +س ÙĤØ· +Ġalan ı +ĠÄij á»ĩ +é£Łãģ¹ ãĤĭ +à¸Ķ ึà¸ĩ +Ġgegen über +ĠبÙĩ ذÙĩ +à¸ĸืà¸Ń à¹Ģà¸Ľà¹ĩà¸Ļ +ëķ ħ +à¸Ħà¸Ļ à¹Ħà¸Ĺย +ãĤ¢ ãĤ¦ +ãĤ¢ãĤ¦ ãĥĪ +ศ ัà¸ģ +ศัà¸ģ à¸Ķิ +ศัà¸ģà¸Ķิ à¹Į +ÙĤÙĪ Ø§ÙĨ +ÙĤÙĪØ§ÙĨ ÙĬÙĨ +Ġhá»Ļ p +ãģªãģıãģª ãģ£ãģ¦ +Ġ×IJ ×ŀ׳ +Ġ×IJ×ŀ׳ ×Ŀ +à¹Ģà¸ķ ืà¸Ńà¸Ļ +ĠзавиÑģ им +ĠзавиÑģим оÑģÑĤи +ת ×Ļ×IJ +ת×Ļ×IJ ×ķר +å§ĭãĤģ ãģŁ +Ġng á»į +Ġngá»į t +íĴ į +ê³¼ ìŀ¥ +Ġb ại +ãģ§ãģį ãģ¦ +Ġcomeç ar +à¸Ľà¸£ าà¸ģ +à¸Ľà¸£à¸²à¸ģ à¸ı +Ġгод Ñĭ +м еÑģ +ĠاÙĦÙħست ÙĪÙī +ĠÑģам Ñĭе +л леÑĢ +ãģ£ãģ¦ãģĹãģ¾ ãģĦãģ¾ãģĻ +ãģ¨ãģ® ãģĵãģ¨ +bi ó +à¸ģล à¹Īà¸Ńà¸ĩ +ĠاÙĦز ÙĪØ¬ +ãģ«è¡Į ãģ£ãģŁ +à¸Ħà¹Ī à¸Ńà¸Ļ +à¸Ħà¹Īà¸Ńà¸Ļ à¸Ĥà¹īาà¸ĩ +ĠbaÄŁ l +ĠbaÄŁl ant +ĠbaÄŁlant ı +確 ãģĭ +確ãģĭ ãģ« +ãĥľ ãĥ¼ãĥ« +çµĤ ãĤıãĤĬ +ש ×ŀר +à¸Ĺีà¹Ī สามารà¸ĸ +ÙĦ زÙħ +д аеÑĤÑģÑı +รัà¸ļ à¸Ľà¸£à¸° +รัà¸ļà¸Ľà¸£à¸° à¸Ĺาà¸Ļ +å¤ī ãĤıãĤĬ +ï¼ ¢ +ĠìĺĪìĪĺ ëĭĺ +ãĤĪãģĨ ãģ¨ +มัà¸ģ à¸Īะ +ĠH ương +ÙĨ Ù쨰 +×ŀ×ĵ ×ĵ +ĠìĿ¸ ìłķ +Ñħод иÑĤÑĮ +ĠзавиÑģ иÑĤ +×ķ×ĵ ×Ļ×¢ +ãģĵãģ¨ãģĮ ãģĤãĤĬãģ¾ãģĻ +ع راÙĤ +سط ØŃ +à¸ģำ à¹Ħร +ëĵ¤ ëıĦ +×Ļצ ×Ļר×Ķ +ãģĨ ãģĵãģ¨ +ÙĦا ØŃÙĤ +ãģĦ ãĤĮãģ° +ĠиÑģполÑĮз ÑĥÑİÑĤ +ĠB ợi +Ġשק׾ ×Ļ×Ŀ +ÑĨи кл +ÐIJ Ðŀ +Ġ×ijש ׳×Ķ +ÙĨØ´ Ø· +Ġש ×Ļ׳×ķ×Ļ +Ġש×Ļ׳×ķ×Ļ ×Ļ×Ŀ +Ġpobl ación +ĠH ưng +ระ ว +ระว ัà¸ĩ +رÙĬاض Ø© +ر صد +تÙĤ ÙĦÙĬ +تÙĤÙĦÙĬ د +Ġülk em +Ġülkem iz +à¸Ĭ ะ +ãĤ¯ãĥª ãĥ¼ãĥł +èģŀ ãģĦãģŁ +Ġwa ż +Ġważ ne +ê±° ëĵł +ê±°ëĵł ìļĶ +×ŀ×IJ ×ij×§ +×Ĺ×ĵ ש×ķת +ĠW roc +ĠWroc ÅĤaw +ĠKü ltür +s ist +sist ência +×¢×ĸר ×Ķ +Ġg ương +รà¹īาà¸Ļ à¸Ħà¹īา +ĠÙĪØ£ ÙĪØ¶ØŃ +ánd ose +ãĤ· ãĥ¼ãĥ³ +×IJ׳ ר×Ĵ +×IJ׳ר×Ĵ ×Ļ×Ķ +ãģªãģĦ ãģ§ãģĻ +Ġkh á»§ng +Ġ문 ìĦľ +Ġ×ij ×ĵ×ijר +×ĵ ×Ļ×ķ +×ĵ×Ļ×ķ ×ķ×Ĺ +Ġré gl +ÙħÙĪ Ø§Ø¯ +об оÑĢ +обоÑĢ Ð¾ÑĤ +Ġ×Ķ ×ij׾ +Ġ×Ķ×ij׾ ×ķ×Ĵ +ØŃ اÙħ +ĠاÙĦع اص +ĠاÙĦعاص ÙħØ© +пеÑĢ Ð°ÑĤоÑĢ +ت Ø®ÙĦ +تخÙĦ ص +ãģŁãģł ãģĹ +ت سÙħ +à¹Ĥรà¸ĩ à¸ŀ +à¹Ĥรà¸ĩà¸ŀ ยา +à¹Ĥรà¸ĩà¸ŀยา à¸ļาล +ĠY ük +ĠYük sek +Ġש ׳×Ļת +Ġש׳×Ļת ף +liÄŁ e +Ġפ ת +Ġפת ×ķ×Ĺ +Ġbe ÄŁ +ĠbeÄŁ en +Ġ×ŀ ×ķר +Ġ×ŀ×ķר ׼×ij +Ġرس اÙĦØ© +íĨµ ìĭł +Ġaval ia +Ġavalia ções +Ġman h +Ġmanh ã +Ġìķ ŀ +Ġìķŀ ìľ¼ë¡ľ +ÙĤ تر +ÙĤتر ØŃ +à¹Ģà¸ģ ืà¸Ń +à¹Ģà¸ģืà¸Ń à¸ļ +Ġpropos é +Ø£ Ùħا +Ø£Ùħا ÙĥÙĨ +ĠÐŀ Ðŀ +ĠÐŀÐŀ Ðŀ +ÙħÙĤ ار +ÙħÙĤار ÙĨØ© +ëĦ IJ +ãģĦãģŁãģł ãģı +ÙĤ ÙĬÙĦ +Ġна ÑĪиÑħ +ãĤ« ãĥĥãĥĹ +×Ĺ׾ ת +Ġëĭ¤ ë§Į +à¸Ĺัà¹Īว à¹Ĥลà¸ģ +ãĥį ãĤ¿ +ØŃس اس +ãģ«ãģª ãĤĮ +ج ائ +جائ زة +é change +é conom +économ ie +Т Ðĺ +סת ׼׾ +à¸Ĺัà¹īà¸ĩ สà¸Ńà¸ĩ +ĠاÙĦØ® اÙħ +ĠاÙĦخاÙħ س +×§ ×ĺ×¢ +au waż +à¸ľà¸¹à¹ī à¸Ĭาย +à¹ģà¸Ľà¸¥ à¸ģ +åIJĮæĻĤ ãģ« +зн аниÑı +ãģĦãģŁãģł ãģįãģ¾ãģĹãģŁ +Ġ×ŀ×ij ׾×Ļ +à¸Ĥà¸Ń à¹ĥหà¹ī +ĠاÙĦت ربÙĬØ© +Ġdécou vert +Ġżyc iu +apr ès +Ġy ab +Ġyab anc +Ġyabanc ı +ĠbaÅŁ layan +ìĹĪ ëįĺ +Ġhes abı +Ġë§Į ìķ½ +ë§ Īëĭ¤ +ĠTh ánh +ãĥ´ ãĤ¡ +à¸Ľà¸£à¸±à¸ļ à¸Ľà¸£ +à¸Ľà¸£à¸±à¸ļà¸Ľà¸£ ุà¸ĩ +ĠM ặc +à¹Ģหà¸ķุ à¸ľà¸¥ +ĠÐij ез +Ġcapac itÃł +ÅĤe ÅĽ +ĠпÑĢе им +ĠпÑĢеим ÑĥÑīеÑģÑĤв +ĠÅļ wiÄĻt +Ġpubli é +×ŀ×¢ צ×ij +Ùħشار Ùĥات +à¸łà¸² ษ +à¸łà¸²à¸© ี +Ġdeux ième +ĠÙħØŃ اÙ쨏 +ĠÙħØŃاÙ쨏 Ø© +ĠSch ön +ï½ ¤ +Ġ×Ķ ×ij×¢ +Ġ×Ķ×ij×¢ ×Ļ×Ķ +ĠÙĪØ§ÙĦ ÙĦÙĩ +è¨Ģ ãģ£ãģŁ +à¸ķ à¹īาà¸Ļ +วร รà¸ĵ +à¸Ĺิ ศ +ĠbaÅŁ ına +Ġmog ÄĻ +ש ×Ļפ×ķר +ĠÙĪ Ø¹Ø¯ +ĠÙĪØ¹Ø¯ Ùħ +Ġhistó rico +Ġk ısı +ĠìĿ´ ê²Į +ĠPol ÃŃtica +ĠÑģиÑĤÑĥ аÑĨии +ĠkoÅĦ ca +×ij×ĵ ×Ļ×§×Ķ +ĠاÙĦسÙĬ ارات +ãģªãĤī ãģ° +ãĤµ ãĥ© +ãĤĭãģĵãģ¨ãģĮãģ§ãģį ãĤĭ +Ġdecis ão +×ķ ×ķ×ĵ +lä ss +läss ig +Ġ׾ ×Ļשר×IJ׾ +ĠÙĬ أتÙĬ +ר ×ķ×ĸ +ö ÄŁ +Ã¶ÄŁ ret +Ã¶ÄŁret im +Ġд ек +Ġдек аб +Ġдекаб ÑĢÑı +Ġש ×Ĺ×ķר +ãģ¦ãģıãĤĮ ãģŁ +عب ارة +Ġélect rique +ĠاÙĦتÙĨ ÙħÙĬØ© +جر Ùī +ĠìĪĺ íĸī +à¸Ĺ ู +ĠÑĢе алÑĮно +Ñģп оÑģоб +à¸Ħล à¹īาย +Ġس عÙĪØ¯ +ön ü +ĠÙģ ÙħÙĨ +تÙĥ ÙĪ +تÙĥÙĪ ÙĬÙĨ +ĠкаÑĩ еÑģÑĤво +ĠконÑĤ ак +ĠконÑĤак ÑĤ +Ġsöz leÅŁme +à¸Ń à¹īาà¸ĩ +Ġت ÙĪÙģ +ĠتÙĪÙģ ÙĬر +×Ķ×ĸ ×ĵ +×Ķ×ĸ×ĵ ×ŀ׳×ķת +ĠØ·ÙĪÙĬÙĦ Ø© +Ġtér mino +Ġ×IJ ×Ļפ×Ķ +ãĥĵ ãĥ« +ส à¹Ĥม +สà¹Ĥม สร +ĠاÙĦ اث +ĠاÙĦاث ÙĨÙĬÙĨ +ев иÑĩ +Ġopin ión +à¸Ľ วà¸Ķ +åı¤ ãģĦ +ร à¹Īา +ĠB iaÅĤ +ĠÑģÑĤ ал +ĠÑģÑĤал о +ó logo +ĠìķĦ ëĭĪëĭ¤ +Ġ×IJ ×Ļת +Ġ×IJ×Ļת ×ķ +à¹Ģหà¹ĩà¸Ļ วà¹Īา +à¸ļ ารà¹Į +çĦ ¼ +çĦ¼ ãģį +ĠìĿ´ìļ© ìŀIJ +ĠнекоÑĤоÑĢ Ñĭе +ks z +ksz taÅĤ +ksztaÅĤ c +ãĤŃãĥ£ ãĥĥãĤ· +ãĤŃãĥ£ãĥĥãĤ· ãĥ³ãĤ° +Ġro ÅĽ +ĠroÅĽ lin +ÑĢаж а +×ij׳×Ļ ×Ļ×Ķ +à¸Ľà¸£ สิ +à¸Ľà¸£à¸ªà¸´ à¸ķ +Ġgörd ü +×ŀ׳×Ķ ×Ļ×Ĵ +å¤īãĤı ãģ£ãģ¦ +Ġ×IJ ×Ķ +Ġ×IJ×Ķ ×ijת×Ļ +à¹Ģร à¹Īà¸ĩ +Ġön ünde +Ġê·¸ ëĥ¥ +пол иÑĤ +полиÑĤ иÑĩеÑģк +ãĥ¡ ãĥĩãĤ£ +ãĥ¡ãĥĩãĤ£ ãĤ¢ +ĠDet ay +ĠDetay lı +ĠاÙĦصÙģ ØŃØ© +à¸ģาร à¹Ģà¸ĩิà¸Ļ +Ġìµľ ê·¼ +׼ ש׾ +ï¼ © +вÑĪ ÐµÐ³Ð¾ +íķĺ ìĭ¤ +ĠÐŃ ÑĤ +ĠÐŃÑĤ оÑĤ +ส ื +สื à¸ļ +Ġng ừng +ĠдокÑĥменÑĤ ов +дав аÑĤÑĮ +ĠاÙĦشخص ÙĬØ© +Ġצ ×¢×Ļר +در Ùĥ +س ØŃب +à¹Ħมà¹Ī à¸Ħà¹Īà¸Ńย +Ġ×Ķ×ŀ×§ ×ķ×ŀ×Ļ +สัà¹Īà¸ĩ à¸ĭืà¹īà¸Ń +Ġê·¸ê²ĥ ìĿĦ +ãģĤãĤĭ ãģĦ +ãģĤãĤĭãģĦ ãģ¯ +×IJ×ķ×ĺ ×ķ×ij +×IJ×ķ×ĺ×ķ×ij ×ķס +к ÑĨион +ĠÐľ ожно +ãģı ãģł +ãģıãģł ãģķ +ĠинÑĦоÑĢм аÑĨиÑı +ï» Ł +Ġìŀij ìĹħ +Ġ×Ļ ×ķסף +Ø¥ دارة +ĠاÙĦØŃ اج +×ł×¡ ×Ļ×¢×Ķ +из аÑĨиÑı +×IJ׾ ×ij +×IJ׾×ij ×ķ×Ŀ +п ед +Ġ×§×ĺ ׳×Ķ +ĠÙĨÙ쨳 Ùĩا +ĠMinist ério +Ġп ен +Ġпен Ñģи +ãĥIJ ãĥ©ãĥ³ãĤ¹ +Ġ×Ķת ×ķר×Ķ +Ġt ạm +ĠìĹŃ ìĭľ +ï½ ¡ +Ġth á»± +Ġ ısı +ì» ¨ +ãģĹãģ£ãģĭãĤĬ ãģ¨ +Ġx ưa +Ġc ặp +×Ĺ ×Ļ×ij×ķר +วัà¸Ĵà¸Ļ à¸ĺรรม +st är +stär ke +ĠÑģам Ñĭй +p isa +pisa Äĩ +ĠoluÅŁ an +ĠاÙĦØ¥ ÙħاÙħ +ĠcÄĥ ng +Ġgü nl +Ġgünl ük +Ġ׳ש ×IJר +Ġkhi á»ĥn +ç¶ļ ãģijãĤĭ +stit ución +Ġcapac ité +Ġj aki +Ġjaki ÅĽ +вÑĪ Ð¸Ñģ +вÑĪиÑģ ÑĮ +פע×ķ׾ ×ķת +ĠØŃ ÙĬات +ĠØŃÙĬات Ùĩ +Ġник огда +ÐĽ Ь +Ġ×Ķ×¢ ×ķ×ij +Ġ×Ķ×¢×ķ×ij ×ĵ×Ķ +Ġch Ãło +หลาย à¹Ĩ +ĠÑı н +ĠÑıн ваÑĢ +ĠÑıнваÑĢ Ñı +à¸Īำà¹Ģà¸Ľà¹ĩà¸Ļ à¸ķà¹īà¸Ńà¸ĩ +Ġhö her +ãģķãĤĮãģ¦ ãģĦãģŁ +สà¸ĩ สั +สà¸ĩสั ย +ĠاÙĦ اس +ĠاÙĦاس ÙĦاÙħ +ĠاÙĦØ´ Ùħس +สà¸ĸาà¸Ļ ี +ãĤ¯ãĥ© ãĤ¹ +à¸ŀร ร +à¸ŀรร à¸Ħ +p õ +põ e +Ġpor ém +à¸Ľà¸£à¸° สà¸ĩ +à¸Ľà¸£à¸°à¸ªà¸ĩ à¸Ħà¹Į +powied zie +powiedzie Äĩ +Ġмог Ñĥ +Ġж ел +Ġжел ез +ĠاÙĦØ« ÙĤ +ĠاÙĦØ«ÙĤ اÙģÙĬ +ĠпÑĢав ило +Ġgdy ż +פש ×ķ×ĺ +ÑĢабоÑĤ ка +ĠÙĥ رة +Ø´ دد +Ùħار Ùĥ +Ùħ ÙĥØ© +Ġпод пиÑģ +×ĺ×ķ ×ķ×Ĺ +ĠÅĽ c +ĠÅĽc ian +Ġر جاÙĦ +Ġ×ª×ľ ×ķ×Ļ +и ÑĪ +иÑĪ ÑĮ +Ġmé dec +Ġmédec in +ëįĶ ëĿ¼ëıĦ +ĠÑĤеб Ñı +Ġ׾×Ķ ×ķס×Ļ×£ +ãģĬ 話 +Ġà¹ģà¸ķà¹Ī à¸ģà¹ĩ +د اÙģ +داÙģ Ø¹ +ĠC ùng +ãĥ»ãĥ» ãĥ»ãĥ» +ê¶ ģ +Ġdeber ÃŃa +หà¸Ļà¹Īวย à¸ĩาà¸Ļ +Ġva ÌĢ +Ġעצ ×ŀ +Ġעצ×ŀ ×Ŀ +à¹Ģà¸Ĭืà¹Īà¸Ń วà¹Īา +שק ×¢ +Ġ×Ķ ×Ľ×ķ׾ +Ġ×Ķ׼×ķ׾ ׾ +ни бÑĥд +нибÑĥд ÑĮ +ĠëĦĪ íĿ¬ +Ġоб ÑĢаÑī +ĠобÑĢаÑī а +Ġ×¢×ij×ķ×ĵ ת +ĠاÙĦÙħÙĨت خب +ıy ord +ıyord u +ÙĪ Ø° +×Ĺש ×Ļ×ij×ķת +Ġ×Ķ×¢ ×Ļ×§ +Ġ×Ķ×¢×Ļ×§ ר×Ļ +ì¢ Į +ยุ à¹Ĥร +ยุà¹Ĥร à¸Ľ +Ġа пÑĢ +ĠапÑĢ ÐµÐ»Ñı +sz ed +szed ÅĤ +д он +à¹Ģà¸ķิ à¸ļ +à¹Ģà¸ķิà¸ļ à¹Ĥà¸ķ +кол о +Ġkażde j +å¸ ° +帰 ãĤĬ +Ġмил ли +Ġмилли он +ç¾İåij³ ãģĹãģĦ +ت ÙĤار +تÙĤار ÙĬر +ĠìĿ´ 루 +ĠìĿ´ë£¨ ìĸ´ +Ġsprzeda ż +×Ķ ×ķצ×IJ×ķת +ãĤ¢ãĤ¯ ãĤ» +ãĤ¢ãĤ¯ãĤ» ãĤ¹ +ר ×ķ×¥ +ĠгоÑģÑĥдаÑĢÑģÑĤв енн +Ø£ ØŃÙĥ +Ø£ØŃÙĥ اÙħ +ĠoluÅŁ u +ĠA ç +ĠAç ık +ãĤ¸ ãĥ¼ +ç´ł æĻ´ +ç´łæĻ´ ãĤīãģĹãģĦ +Ġ×ijש×ij ×ķ×¢ +ب ذ +بذ ÙĦ +สา à¹Ģหà¸ķุ +Ġpoz osta +Ġpozosta ÅĤ +ØŃر Ùħ +Ġimport ância +leÅŁtir me +Ġд ÑĢев +Ġmó vil +ĠA ynı +Ġна лог +Ġналог ов +Ġ×Ĺ ×Ļפ×Ķ +ĠÑĦоÑĢм Ñĥ +à¸Ĺà¸Ķ สà¸Ńà¸ļ +ĠksiÄħż ki +Ġma ÅĤe +Ùħس Ø£ÙĦ +ÙħسأÙĦ Ø© +ï¼¾ ï¼¾ +ç ãeste +év iter +Ġкон ÑģÑĤÑĢÑĥк +ĠконÑģÑĤÑĢÑĥк ÑĨи +ï¾ ŀ +Ġת×ķ׼ ׳ +ãĤ¹ãĥĪ ãĥ¬ãĤ¹ +ĠاÙĦاÙĤتصاد ÙĬ +×ŀ×ĵ ×Ļ +Ġw ÅĤad +ĠwÅĤad z +Ø® ÙĪÙģ +ĠмаÑĤеÑĢиал ов +ãģ¨ãģ£ãģ¦ ãĤĤ +Ġznaj du +Ġznajdu jÄħ +Ùģ Ø¦Ø© +ãģ©ãģ® ãĤĪãģĨãģª +æĬij ãģĪ +׳ ×Ĺ׾ +Ġdü ny +Ġdüny an +Ġdünyan ın +гÑĢ Ð°Ð½Ð¸ +гÑĢани Ñĩ +Ġ×Ķש׾ ×Ļש×Ļ +Ġ×Ķ×IJ ש +åıĬ ãģ³ +ìĭŃ ìĭľ +ìĭŃìĭľ ìĺ¤ +Ġдол л +Ġдолл аÑĢ +Ġпов ÑĤоÑĢ +Ġ×Ĺ ×Ļ׳×Ŀ +ת פת×Ĺ +Ñĥв ели +Ñĥвели Ñĩен +ãĤ« ãĥª +raw id +rawid ÅĤow +×ķ ×ķ׾ +ãĥŁ ãĥ¥ +ì½ ĺ +ĠBy ÅĤ +Ðľ ÐIJ +ع ÙIJ +ĠÑģовеÑĢ ÑĪ +ĠÑģовеÑĢÑĪ ÐµÐ½Ð½Ð¾ +Ġм ой +Ġ×ķ׾×IJ ×Ĺר +æħ £ +æħ£ ãĤĮ +ØŃ اÙ쨏 +Ġ무 ë£Į +à¸Ħà¸ĵะ à¸ģรรม +à¸Ħà¸ĵะà¸ģรรม à¸ģาร +Ġìĸ´ ëĶĶ +Ġdif eren +Ġdiferen ça +ĠاÙĦØ£ ساس +ĠاÙĦأساس ÙĬØ© +Ġ׾×IJ×Ĺר ×ķ׳×Ķ +ê· ł +Ġ×Ķש׳×Ļ ×Ļ×Ķ +ìľĦìĽIJ ìŀ¥ +ลุ à¸ģ +ç iler +Ġ×Ķ×IJ ׾×ķ +èģŀ ãģı +Ġ×ķ×IJ פ×Ļ׾×ķ +ĠÑĢе ализ +ĠÑĢеализ аÑĨи +ระยะ à¹Ģวลา +Ġجدا Ùĭ +تب اع +Ġveh ÃŃculo +Ġдол г +à¸Ľà¸£à¸´ มาà¸ĵ +ì¦ IJ +Ġ׾ ×ŀ×§×ķ×Ŀ +ĠìĤ¬ ì§Ħ +à¸Ĭ à¹īา +Ġ×ŀ×¢ ×ķ׾×Ķ +Ġgö rm +Ġgörm ek +ĠÙĪÙĩ ذÙĩ +пеÑĢ Ð² +пеÑĢв ÑĭÑħ +ê·¸ ëŀĺ +ĠاÙĦبر ÙĬØ· +ĠاÙĦبرÙĬØ· اÙĨÙĬ +ĠиÑİ Ð½Ñı +ĠÐĵ оÑĢ +Ġ׾ ש׾×Ŀ +ÐIJ ÐĿ +Ġназ наÑĩен +о оÑĢ +ооÑĢ Ñĥж +Ġöz elli +Ġözelli ÄŁi +Ġни же +ç¶ļ ãģijãģ¦ +Ġа ÑĢенд +Ġkat ılı +Ġkatılı m +ĠØ¥ Ø·ÙĦاÙĤ +ĠÙĪØ¥ ذا +Ġок ÑĤÑı +ĠокÑĤÑı бÑĢÑı +à¹Ĥà¸ķ ๠+à¹Ĥà¸ķ๠Ĭ +à¹Ĥà¸ķà¹Ĭ ะ +Ġolduk ları +Ùħ ÙĪÙĤع +ëĤ © +ã썿ĢĿ ãģ£ãģ¦ãģĦãĤĭ +Ġש ×Ļ׼×ķ׾ +วา à¸Ķ +س ÙĬÙĦ +à¸Ĥ วั +à¸Ĥวั à¸į +تØŃ ÙĥÙħ +ì ĤŃ +Ġconna ît +׳ פת×Ĺ +Ġch ặ +Ġchặ n +ĠÙħ ØŃÙħ +ĠÙħØŃÙħ ÙĪØ¯ +ãģ ´ +ĠпÑĢодÑĥк ÑĨии +зд ÑĢав +ãģĶ è¦ +ãģĶè¦ § +×IJ×ij ×IJ +Ġvé ritable +ĠØ· ÙģÙĦ +ãĥĪãĥ© ãĥĸãĥ« +ê³ ¡ +Ġת ×ŀ×ķ׳×Ķ +Ġki ên +ĠÙĤ ادر +Ø¥ÙĤ ÙĦÙĬÙħ +ĠпÑĢед пÑĢи +ĠпÑĢедпÑĢи ÑıÑĤиÑı +Ġb Äĥng +Ġay ında +Ġg ấp +еÑħ ал +Ġgi Ãłnh +Ġд ав +Ġдав но +ìĺĢ ëĭ¤ +à¸Ļัà¸ģ à¹Ģà¸ķ +à¸Ļัà¸ģà¹Ģà¸ķ ะ +Ùħست شار +ست راتÙĬج +ستراتÙĬج ÙĬ +رÙħ ز +Ġt Ä©nh +ë¡ Ń +ĠÑĩ еÑĤ +ĠÑĩеÑĤ Ñĭ +ĠÑĩеÑĤÑĭ ÑĢе +ĠEnt ão +Ġص غ +Ġصغ ÙĬرة +×ij×Ļ×ĺ ×ķ׾ +خط ÙĪØ· +ĠÑĢазвиÑĤ ие +Ġamacı yla +à¸Ĺี วี +Ġо ÑģÑĤ +ĠоÑģÑĤ алÑĮн +ש×ķ׾׊ף +Ġ׼ ׳×Ļס +Ġ׼׳×Ļס ×Ķ +Ġd áºŃy +ĠyaÅŁ ayan +Ġ×ŀ×Ķ ×ķ×ķ×Ķ +ĠÑĥ Ñģи +ĠÑĥÑģи ли +×ŀ פ×Ļ +ĠпÑĢовед ениÑı +Ġر ب +Ġرب Ùħا +ĠاÙĦØ£ ÙĪØ³Ø· +Ġìľł ì§Ģ +Ġprac ownik +Ġpracownik ów +×ŀס ×ķרת +ÙĤار ب +à¸Ħวาม รูà¹īสึà¸ģ +à¹ģหล ะ +ĠاÙĦÙĨ ÙĤد +Ġ×IJ׾ פ×Ļ +Ùħس ئ +Ùħسئ ÙĪÙĦ +ев ÑĭÑħ +клÑİÑĩ ениÑı +×ij ×Ļ׳ +×ij×Ļ׳ ×Ļ×Ķ×Ŀ +ש ×ķ×IJ×Ķ +ĠÅŁ ark +ĠÅŁark ı +Ġsü rec +Ġsürec in +à¹Ģà¸Ħร à¸Ķ +à¹Ģà¸Ħรà¸Ķ ิà¸ķ +ãĥIJ ãĥ¬ +ĠØ´ Ø£ÙĨ +à¹Ģà¸Ńา à¹Ħวà¹ī +niÄĻ cie +רצ ×Ĺ +ĠaÅŁ ama +׳ פ×Ĵ×¢ +Ġth á»Ŀ +Ġkhu ẩn +diÄŁ inde +ÑıÑī иÑħ +ãĥĺ ãĥ« +Ġüber h +Ġüberh aupt +ĠÑĤÑĢеб ова +ĠdÅĤ ugi +×ĺ ×Ļף +à¸Ĥà¸Ļาà¸Ķ à¹ĥหà¸įà¹Ī +ĠاÙĦØ£ Ùĩ +ĠاÙĦØ£Ùĩ ÙĦÙĬ +ĠMü d +ĠMüd ürü +Ġ×Ļ×Ķ ×ķ×ĵ×Ķ +Ñĭв аеÑĤÑģÑı +س اط +×Ķת ׳×Ķ×Ĵ +×Ķ×ª×ł×Ķ×Ĵ ×ķת +à¸ģาร à¸ľà¸¥à¸´à¸ķ +íĴ Ģ +สà¸ĸาà¸Ļ à¸ģารà¸ĵà¹Į +Ġо ÑĦ +ĠоÑĦ иÑģ +ĠÙĦ عبة +Ġstron ÄĻ +Ġר×IJ ×ķ×Ļ +×Ĺ ×ij׾ +ĠÑĢÑĭ н +ĠÑĢÑĭн ке +Ġ׾×ŀ×¢ ף +اس ÙĦ +ห ัà¸Ļ +Ġ×IJ ×Ĺ×Ļ +ĠпÑĢод ол +ê°Ģ ìŀħ +Ġ×ijר ×Ĺ +Ġ×ijר×Ĺ ×ij×Ļ +дж еÑĢ +Ġ׾ ×Ĺ׾ +Ġ׾×Ĺ׾ ×ķ×ĺ +Ġ׾×Ĺ׾×ķ×ĺ ×Ļף +ศาส à¸Ļา +ãĤ¢ãĤ¤ ãĥĨ +ãĤ¢ãĤ¤ãĥĨ ãĥł +Ġפר ×ķפ +جز اء +ล à¸Ńย +Ġc iaÅĤa +Ġgi ết +ĠзнаÑĩ иÑĤелÑĮно +Ġolmad ıģ +Ġolmadıģ ını +н д +нд екÑģ +تأ Ùĥد +Ġìĸ ¸ +Ġìĸ¸ ìłľ +ay dın +ãĥī ãĥ¬ãĤ¹ +Ġs ắt +Ġíĺ¸ íħĶ +Ġë¶ ģ +Ġë¶ģ íķľ +ãĥij ãĤ¤ +Ġ×ŀש×Ĺ×§ ×Ļ +à¸Ħà¸Ļ à¸Ńืà¹Īà¸Ļ +Ġиз гоÑĤов +ĠизгоÑĤов лен +à¹Ģà¸ģีย ร +à¹Ģà¸ģียร à¸ķิ +תק שר +ĠÑĢаÑģ ÑĩеÑĤ +ส à¹Ģà¸ķ +Ġl änger +ĠiÅŁ let +ĠiÅŁlet me +Ġع ÙĦÙĬÙĨ +ĠعÙĦÙĬÙĨ ا +é lection +ĠاÙĦغ ربÙĬØ© +íĭ Ģ +ãĤĤãĤī ãģĪ +Ġкни ги +Ø£ سÙħ +أسÙħ اء +Ġth á»ı +Ġthá»ı a +หà¸Ļ ู +Ġ×ł×¢ ש×Ķ +à¸łà¸²à¸¢ à¹ĥà¸ķà¹ī +à¸ŀื à¸Ĭ +رÙĬ Ø· +Ùģ ÙĪØ¶ +ãģĤãĤĬãģĮãģ¨ãģĨãģĶãģĸ ãģĦãģ¾ãģĹãģŁ +ש ×ĵ×Ķ +Ġng á»±c +ĠÑģеÑĢ ÑĮ +ĠÑģеÑĢÑĮ езн +T ôi +Ġfiyat ları +ĠвÑģ Ñİ +ĠC ódigo +Ġ×Ķש ×IJ +Ġ×Ķש×IJ ׾×Ķ +ĠP ública +Ø¥ Ø® +إخ ÙĪØ§ÙĨ +ĠзаÑıв ил +ãĥ¦ ãĥ¼ +ר×IJ ×Ļת +vol ución +Ġsz ko +Ġszko ÅĤy +جرÙĬ دة +Ġpens é +ìī ¬ +ĠBüyük ÅŁehir +ĠØ£Ùħ رÙĬ +ĠØ£ÙħرÙĬ ÙĥÙĬ +à¸Ļัà¸ģ ศึà¸ģษา +Ġtod av +Ġtodav ÃŃa +ĠС ан +ĠСан кÑĤ +íķĺ ìŀIJ +ØŃÙĪ Ø§ÙĦ +׼ ×ķשר +à¹Ģลย à¸Ħรัà¸ļ +Ġal gu +Ġalgu ém +Ùģ Ø² +Ġçek il +Ġ×ĵ ר׼×Ļ×Ŀ +ãĥIJ ãĥ© +à¸ģà¹ĩ สามารà¸ĸ +สà¹Īวà¸Ļ ลà¸Ķ +íı ° +ĠP úb +ĠPúb lico +à¹ģà¸Ļว à¸Ĺาà¸ĩ +×IJת ×Ĵר +Ø´ اش +شاش Ø© +ci ÅĽni +ĠÃľ rün +ÙĦÙĪ ØŃ +ĠاÙĦ بÙĨ +ĠاÙĦبÙĨ Ùĥ +ì¡° ì¹ĺ +Ġorganiz ación +ãģĤãĤĬãģĮãģ¨ãģĨãģĶãģĸ ãģĦãģ¾ãģĻ +s ätze +ĠÑģем ей +ÙĤ صد +ÑģÑĤв еннÑĭе +Ġpréc éd +Ġprécéd ent +à¸ģรุà¸ĩà¹Ģà¸Ĺà¸ŀ ฯ +ãģ¨è¨Ģ ãģĦ +×ij׳×Ļ ×Ļף +ĠØŃ ÙĪ +ĠØŃÙĪ Ø§ÙĦÙĬ +סק ס +ĠsaÄŁlam ak +Ġ׾ צ×Ļ×Ļף +×§×ĵ ש +Ġ×Ķ×ŀ ×¢×¨×Ľ×ª +Ġ׾×Ķ ×¢×ij×Ļר +Ġg ünd +Ġgünd em +ĠнаÑĪ ÐµÐ³Ð¾ +à¹ĥà¸Ļ à¸ŀืà¹īà¸Ļà¸Ĺีà¹Ī +à¹Ģà¸Ħร ืà¸Ń +à¹Ģà¸Ħรืà¸Ń à¸Ĥ +à¹Ģà¸Ħรืà¸Ńà¸Ĥ à¹Īาย +ظ اÙĩرة +ÙħÙĨ ظÙħ +ÙħÙĨظÙħ ات +Ùħت از +追 ãģĦ +dı kt +dıkt an +ĠëįĶ ìļ± +ĠÐĿ апÑĢимеÑĢ +tw ór +×ŀ×ķ×¢ צ×Ķ +Ùĥ ÙĪÙĥ +Ð © +×ŀ×ĺ פ׾ +ó lica +訪 ãĤĮ +ĠëĮĢ ë¶Ģ +ĠëĮĢë¶Ģ ë¶Ħ +ãĤ¯ãĥª ãĥĥãĤ¯ +ãĤĴ éģ¸ +ãĤĴéģ¸ ãģ¶ +Ġpow sta +Ġpowsta ÅĤ +Ġraz ón +×ij ×ķ×Ĺר +ĠÑģообÑī ил +Ġ×§ ×ij×ķ×¢ +r êt +à¸Ķี à¸Ĥึà¹īà¸Ļ +×ŀס ×¢×ĵ +×ŀסע×ĵ ×ķת +ĠÃĸ sterreich +Ġ׳ ×Ĺש×ij +Ùħباد رة +ì´ ī +×Ĵ ׳×ĺ×Ļ +ä¿¡ ãģĺ +du ÄŁ +duÄŁ unu +Ġph ú +ĠاÙĦØ£ Ø®ÙĬر +Ġت عتبر +landır ıl +ãģ¨ãģ¯ ãģĦ +ãģ¨ãģ¯ãģĦ ãģĪ +ĠاÙĦ Ø·ÙĦ +ĠاÙĦØ·ÙĦ اب +ĠN º +éģ¿ ãģij +اÙĦ Ùħع +اÙĦÙħع رÙĪÙģ +ส à¸łà¸² +éĽ¢ ãĤĮ +ĠпомоÑī ÑĮ +Ġзна еÑĤ +ãĥĹãĥ¬ ãĤ¼ +ãĥĹãĥ¬ãĤ¼ ãĥ³ãĥĪ +Ġsup érieur +Ġש׾ ×Ļש×Ļ +ĠاÙĦÙĨ ÙĪØ¹ +ãĤĵãģ§ãģĻ ãģŃ +à¸Ńà¸ļ รม +Ġgi á»įng +Ġwzgl ÄĻd +ĠاÙĦÙģ ÙĤر +è rent +Ġ×ŀ×IJ ×Ĺ +Ġ×ŀ×IJ×Ĺ ×ķר×Ļ +×Ĵ ×Ĵ +×Ļ ×Ļ×ij +ÙħÙĦ اب +ÙħÙĦاب س +Ġhük ü +Ġhükü met +Ġ×ŀ×Ĵ ×Ļ×ij +ĠÐŀ Ñĩ +ĠÐŀÑĩ енÑĮ +æĹ© ãģĦ +Ġconstr ucción +Ġth ượng +ï¼ ĭ +Ġcor ação +à¹Ģหล à¹ĩà¸ģ +ĠBaÅŁ b +ĠBaÅŁb akan +éĢ£ ãĤĮ +ãģĻãĤĭ ãģĵãģ¨ãģĮãģ§ãģįãģ¾ãģĻ +ĠÙĤ اÙħت +Ġا Ùĥثر +ÙģØ§Ø¹ ÙĦ +ĠÑĦ оÑĢ +ĠÑĦоÑĢ Ñĥм +غ ذÙĬ +ĠiÅŁ le +ĠiÅŁle ml +ĠiÅŁleml eri +ĠìĤ¬ëŀĮ ìĿĢ +Ġìŀij ìĦ± +Ġë§Ī 볨 +Ùħ جÙĦس +หม ู +д в +дв иг +двиг а +à¹Ģสีย à¸Ĭีวิà¸ķ +×Ķת פת×Ĺ +×Ķתפת×Ĺ ×ķת +ĠмеÑĤ ÑĢо +ĠÑģ енÑĤ +ĠÑģенÑĤ Ñı +ĠÑģенÑĤÑı бÑĢÑı +ê³ § +Ġ׾ פע +Ġ×ľ×¤×¢ ×ŀ×Ļ×Ŀ +à¹Ģà¸ļ ีย +詳 ãģĹãģı +çķ° ãģªãĤĭ +Ġİl çe +ĠAt at +ĠAtat ür +ĠAtatür k +รุ à¹Īà¸ĩ +Ġkald ı +Ġ주 ìŀ¥ +Ġprés ence +Ġн аб +Ġнаб лÑİ +ĠнаблÑİ Ð´Ð° +ĠÑģам ого +×Ĵ ×ķש +×ŀ×ĺ ×ķפ +×ŀ×ĺ×ķפ ׾ +ĠвÑĭб иÑĢа +ĠìŀIJ 리 +åĪĨ ãģĭãĤīãģªãģĦ +Ġз Ñĥб +Ġש׼ ×ijר +Ġد ائ +Ġدائ Ùħا +ĠпаÑĢ ÑĤи +ï¼ ² +ĠاÙĬ ضا +ĠÑħ оз +ĠÑħоз Ñı +ĠÑħозÑı й +ĠÑħозÑıй ÑģÑĤв +ĠاÙĦØ£ ج +ĠاÙĦأج ÙĨب +ĠاÙĦأجÙĨب ÙĬØ© +ĠÐĹ Ð½Ð° +ĠAp ós +ĠÑį неÑĢ +ĠÑįнеÑĢ Ð³Ð¸ +Ġy ans +Ġyans ı +ĠJust i +ĠJusti ça +Ġpré vu +ม วล +ìŀ¥ ëĭĺ +à¸ģระ à¸ļ +à¸ģระà¸ļ วà¸Ļ +à¸ģระà¸ļวà¸Ļ à¸ģาร +×ŀ ×ŀ +×ŀ×ŀ ×ķצע +Ġh ẹ +Ġhẹ n +зд ание +Ġak ÅŁ +ĠakÅŁ am +×ĺ ×ķפ +Ġgere kt +Ġgerekt i +Ġgerekti ÄŁini +Ġnar z +Ġnarz ÄĻdzi +é po +épo que +ĠTh ần +Ġwys oko +Ġwysoko ÅĽci +à¸ľà¸¹à¹ī à¸Ľ +à¸ľà¸¹à¹īà¸Ľ à¹Īวย +ĠÙĬ بدÙĪ +ÑĤелÑĮ ного +Ġвз глÑıд +Ġjed nÄħ +ĠìĿĺ 견 +Ġ à¸Ĥà¸ĵะà¸Ĺีà¹Ī +פ ×Ļ×ĵ +ìĥģ ëĭ´ +Ġm ỡ +×Ķ ×ŀ׾ +×Ķ×ŀ׾ צ×ķת +ĠÑģоÑģÑĤ о +ĠÑģоÑģÑĤо иÑĤ +Ġав и +Ġави а +ĠL änder +تص ÙĪÙĬر +×ŀ×ĵ ×Ļ×Ķ +ìłĪ ì°¨ +ãģ¨ ãĤĬ +ãģ¨ãĤĬ ãģĤ +ãģ¨ãĤĬãģĤ ãģĪ +ãģ¨ãĤĬãģĤãģĪ ãģļ +ĠÑĢ Ñıд +ĠÑĢÑıд ом +ĠNh ất +ĠاÙĦÙĥ اÙħÙĦ +×Ĺ׾ ׾ +ĠGi ấy +צ ×ĺר +צ×ĺר ×£ +Ġ׾×ij ×ĺ׾ +Ġим еÑĤÑĮ +ס×ŀ ×ķ×ļ +Ġparticip ação +íķľëĭ¤ ë©´ +ÙħÙĨت دÙĬ +ÙħÙĨتدÙĬ ات +ĠeÄŁ len +g änge +رب ØŃ +ãĤ® ãĥ£ +ĠاÙĦر ÙĤÙħ +à¸ĭ à¹īำ +ĠH óa +×ŀר ×Ĺ×§ +ØŃÙħ اÙħ +بÙĪ Ùĥ +ĠArt ÃŃculo +ãĥĦ ãĤ¢ãĥ¼ +×Ķפ ׼×Ķ +×Ĺ׾ ×ķף +ĠпеÑĢе Ñħод +len miÅŁ +زر اعة +Ġseñ or +ãģ£ãģ¦ ãģįãģ¦ +Ø¥ Ø´ +إش ارة +Ġpod ÃŃa +ĠÃľ lke +н ÑģкаÑı +Ġadapt é +Ġdüzen len +Ġdüzenlen en +ĠÑģÑĤ ала +ĠÙĬ ØŃتاج +Ġn ier +Ġnier uch +Ġnieruch omo +Ġnieruchomo ÅĽci +ãģĵãģ¨ãģĮ ãģĤãĤĭ +ยà¸Ńà¸Ķ à¹Ģยีà¹Īยม +ĠÙħ ج +ĠÙħج اÙĨÙĬ +Ġз аб +Ġзаб ол +Ġзабол ев +Ġзаболев аниÑı +ĠÅĽ ro +ĠÅĽro dk +ĠÅĽrodk ów +Ġ×Ķ ×ľ×IJ×ķ×ŀ×Ļ +Ġdok ÅĤad +ĠdokÅĤad nie +ãģŁãģı ãģªãģĦ +ãģ¯ãģļ ãģ§ãģĻ +ã썿ĢĿ ãģ£ãģ¦ãģĦãģŁ +é cran +ìĹħ ì²´ +trzym aÅĤ +ÑģÑĤв еннÑĭй +ĠNot ÃŃc +ĠNotÃŃc ias +Ùħ رÙĬ +ÙħرÙĬ ض +æ°Ĺ è» +æ°Ĺè» ½ +æ°Ĺ軽 ãģ« +ëĵ £ +Ġ×ĵ ×ķ×IJר +Ġ׾ ×ŀ׳ +Ġ׾×ŀ׳ ×ķ×¢ +ĠçalÄ±ÅŁ ıyor +ĠÅŁ idd +ĠÅŁidd et +ĠM ặt +Ġate ÅŁ +ĠполÑĥÑĩ ениÑı +à¹Ģà¸Ħรืà¹Īà¸Ńà¸ĩ มืà¸Ń +Ġgrö ÃŁer +د ائ +دائ رة +Ġbul un +Ġbulun maktadır +à¹Ģห ร +à¹Ģหร ีย +à¹Ģหรีย à¸į +à¸Ļัà¸ģ à¸Ĺà¹Īà¸Ńà¸ĩà¹Ģà¸Ĺีà¹Īยว +Ġalan ında +ĠÑĥ зна +Ġл еÑĩение +売 ãĤĮ +Ġçev ir +Ġdeste ÄŁi +ĠheiÃŁ t +âĸ ² +ØŃ Ø· +à¸Ħำ à¸ķà¸Ńà¸ļ +ãĤªãĥ³ ãĥ©ãĤ¤ãĥ³ +Ġ×ij×Ĺ×Ļ ×Ļ×Ŀ +ãĥ¦ ãĥĭ +Ġdüzenle me +Ġmodal itÃł +سر Ø· +سرط اÙĨ +×ŀ׼ ×ķף +ĠданнÑĭ й +تر ت +ترت ÙĬب +à¸ļาà¸ĩ à¸Ħà¸Ļ +ĠÄIJ á»ĭnh +ม ูล +มูล à¸Ħà¹Īา +ÙĨ ÙĤص +à¸ģาร รัà¸ģษา +ĠÑĦ он +ĠÑĦон д +ãĤĪãģĨ ãģ«ãģªãģ£ãģŁ +Ùħع اÙĦ +ÙħعاÙĦ جة +ĠOs man +ĠOsman lı +иÑĩеÑģк ом +à¸Ńยาà¸ģ à¸Īะ +ãģķãģ¾ ãģĸ +ãģķãģ¾ãģĸ ãģ¾ +ãģķãģ¾ãģĸãģ¾ ãģª +Ġת ×ķ׼׾ +×¢ צ×ij +ĠاÙĦع سÙĥ +ĠاÙĦعسÙĥ رÙĬ +Ġvé hic +Ġvéhic ule +Ġ×Ļצ ×Ĺ×§ +ĠاÙĦÙĪ ØŃ +ĠاÙĦÙĪØŃ ÙĬد +ĠاÙĦع دÙĪ +ĠQu ản +Ġê³µ ëıĻ +بد ÙĦ +ĠÄij ảng +Ġm á»ĩnh +Ġnie zb +Ġniezb ÄĻ +ĠniezbÄĻ dn +Ġyayın lan +обÑī и +Ġgö tür +צ פ +צפ ×ķ×Ļ +ĠÙĦÙĬ بÙĬ +ĠÙĦÙĬبÙĬ ا +ØŃ ÙĪØ§ +Ġд об +Ġдоб ÑĢо +иÑĢÑĥ ем +ĠاÙĦØŃÙĥÙĪÙħ ÙĬØ© +m Ã¤ÃŁig +Ġed ición +влек аÑĤелÑĮ +влекаÑĤелÑĮ н +Ġת ש׾×ķ×Ŀ +Ġ×Ķש ×ķ׳×Ļ×Ŀ +มิ à¸ĸุ +มิà¸ĸุ à¸Ļ +มิà¸ĸุà¸Ļ ายà¸Ļ +é£Łãģ¹ ãģ¦ +ĠìĪĺ ì§ij +ס ×ij×Ļ +ĠиÑİ Ð»Ñı +Ġà¹Ħà¸Ķà¹ī à¹ģà¸ģà¹Ī +׾×Ĺ ×Ŀ +tr ä +trä gt +ãģĿãĤĤ ãģĿãĤĤ +ÐĿ Ðķ +Ġв нÑĥÑĤ +ĠвнÑĥÑĤ ÑĢи +ãģ¨ ä¸Ģç·Ĵãģ« +ãĤ« ãĥķãĤ§ +Ġ×ij×Ĺ ×ĵר +×Ĺ ×ŀש +ãĤ¨ ãĥį +ãĤ¨ãĥį ãĥ« +ãĤ¨ãĥįãĥ« ãĤ® +ãĤ¨ãĥįãĥ«ãĤ® ãĥ¼ +à¸Ĥà¸Ńà¸ĩ à¸ķัวà¹Ģà¸Ńà¸ĩ +بÙĤ اء +פס ×Ļ׼ +פס×Ļ׼ ×ķ׾×ķ×Ĵ +ãĥ¡ ãĥĥ +ãĥ¡ãĥĥ ãĤ» +ãĥ¡ãĥĥãĤ» ãĥ¼ãĤ¸ +ÙĦ ÙĤب +A Äŀ +שק ×Ļ×¢ +ÙĤ ساÙħ +×ĵ×ķ×Ĵ ×ŀ×Ķ +æ·± ãģĦ +íĸĪ ëĬĶëį° +ĠrozwiÄħz anie +à¸Ļัà¹Īà¸Ļ à¹Ģà¸Ńà¸ĩ +×Ļצ ×ij +Ġtr ông +à¹ĥà¸Ĭà¹ī à¸ļริà¸ģาร +ĠاÙĦÙħÙĪ Ø³Ùħ +ĠдеÑĤ и +ãģĹãģĭ ãģªãģĦ +ס ×Ļף +Ġréfé rence +à¹ģห à¹īà¸ĩ +ãĤĤãĤī ãģ£ãģŁ +Ġ׾ ר׼ +Ġ׾ר׼ ×ķש +شع ÙĪØ± +ĠÐij ог +Ġlaz ım +Ġ×Ļש ׳×Ŀ +Ġп аÑĢÑĤ +ĠпаÑĢÑĤ неÑĢ +ĠÑĥ ника +ĠÑĥника лÑĮн +Ġmaté riel +×ŀר ×§ +Ġph ưá»Ŀng +Ġз ай +Ġзай м +Ùģ ÙĤد +Univers itÃł +×¢ ר׼×Ļ×Ŀ +Ġba ño +Ġн оÑı +ĠноÑı бÑĢÑı +à¸Ľ à¹īาย +Ġt ats +Ġtats äch +Ġtatsäch lich +ĠÑĤÑĢ ÐµÑĤÑĮ +Ñį м +ãĥĻ ãĥ¼ãĤ¹ +Ġnh á»±a +ìĬ¤ íģ¬ +ĠعبداÙĦ ÙĦÙĩ +Ġת ×ķר×Ķ +أش ÙĬ +أشÙĬ اء +ĠÙĦÙĦ غا +ĠÙĦÙĦغا ÙĬØ© +Ùħ ÙĪØ§ÙĤ +ÙħÙĪØ§ÙĤ Ùģ +ĠgÅĤówn a +Ġart Ä±ÅŁ +Ġ×ŀ×§ ×ķ×ŀ×Ļ +ãĤ¯ãĥ© ãĥĸ +Ġس ÙĪÙī +ĠìŬ ìĦ± +اس ر +اسر ائÙĬÙĦ +Ġ׳ ×Ľ×ª×ij +ย à¹īà¸Ńà¸Ļ +Ġdeber á +Ġph ẫu +ÑİÑī ем +ĠÙĦدÙĬ ÙĨا +×ŀ×ĺ ×Ķ +Ġ׳ ×ķ׾×ĵ +ĠвÑģÑĤÑĢ ÐµÑĩа +ãĤīãĤĮ ãģ¦ãģĦãģ¾ãģĻ +ĠcaÅĤ ej +ย ึ +ยึ à¸Ķ +поÑĤ ен +поÑĤен ÑĨи +Ġл иÑĤ +ĠлиÑĤ еÑĢ +ĠлиÑĤеÑĢ Ð°ÑĤÑĥÑĢ +Ġкажд ом +ĠíĮ IJ +ĠíĮIJ ëĭ¨ +à¸Ī ู +Ġpres ença +ãģªãĤĵ ãģ§ +Ùħ ÙĬاÙĩ +ин ÑĦоÑĢм +инÑĦоÑĢм аÑĨион +инÑĦоÑĢмаÑĨион н +ĠìŀIJ ìŰ +ר׼ ש +Ġöd ül +ç¶ļ ãģı +Ġп Ñģ +ĠпÑģ иÑħ +ĠпÑģиÑħ олог +ت ذÙĥر +Ġìŀħ ìŀ¥ +ล à¸Ķà¹Į +ìĦł ê±° +ãģ£ãģ¦ ãģĬãĤĬãģ¾ãģĻ +Ġ×Ļ ×¢ +Ġ×Ļ×¢ ×§×ij +ĠاÙĦØ· عاÙħ +ãĥĨ ãĤ¹ãĥĪ +ĠTu ấn +Ġparticip ación +×ŀ×ķ×ŀ ×Ĺ×Ķ +×Ĵר ס×Ķ +ĠاÙĦتÙĨ ÙģÙĬ +ĠاÙĦتÙĨÙģÙĬ ذÙĬ +ĠбезопаÑģ н +ge f +gef ähr +Ø´ ÙĪØ± +Ġmy ÅĽli +ÙĪØ§ Ø´ÙĨ +ÙĪØ§Ø´ÙĨ Ø·ÙĨ +׳×ķס ×¢ +Ùĥ Ùĩ +ÙĥÙĩ رب +ÙĥÙĩرب اء +Ġmus iaÅĤ +ìĭ ¸ +ãĥĸãĥ© ãĥĥãĤ¯ +Ġcré é +ÙĨÙĩ ار +owo ÅĽÄĩ +ÙħØŃا ÙĥÙħ +ĠwÅĤa ÅĽ +ĠwÅĤaÅĽ c +ĠwÅĤaÅĽc iciel +ĠÙĬ ؤ +ĠÙĬؤ دÙĬ +×ŀ×¢ ×ķ׳ +×IJ ×ij׾ +خط Ø£ +ĠÑħ олод +×ĸ ×ķ׾ +ãģĵãĤĮ ãĤī +ãģĵãĤĮãĤī ãģ® +Ġbás ica +ฤ à¸Ķ +ฤà¸Ķ ูà¸ģ +ฤà¸Ķูà¸ģ า +ฤà¸Ķูà¸ģา ล +èIJ½ãģ¡ çĿĢ +ãģªãģĦ ãģĵãģ¨ +ص ÙĪÙħ +ÙĨج ØŃ +׳ק ×ķ×ĵ +׳ק×ķ×ĵ ת +кл аÑģÑģ +íķĺìĭľ ëĬĶ +ëĦ ĺ +Ġש×IJ ×Ļ׳×ķ +ĠС ейÑĩаÑģ +may acaģı +Ġyap ılır +Ġcategor ÃŃa +عب اد +ĠТ еп +ĠТеп еÑĢÑĮ +×Ķ×Ļס×ĺ ×ķר×Ļ +h ế +ãĤ³ ãĥ¼ãĥī +Ġcabe ça +ج Ùħا +جÙħا Ùĩ +جÙħاÙĩ ÙĬر +ä½İ ãģĦ +ĠÑĤоваÑĢ Ð¾Ð² +à¸Ĭาว à¸ļà¹īาà¸Ļ +ĠÑģÑĤан ов +ĠÑģÑĤанов иÑĤÑģÑı +ĠавÑĤом обилÑĮ +ĠÑģлÑĥÑĩ ай +à¸Ńั à¸ŀ +ĠG iriÅŁ +ĠìĿ¼ ëĭ¨ +ĠпÑĢ Ð¾Ñģ +ĠпÑĢоÑģ моÑĤÑĢ +ãģªãģıãģª ãģ£ãģŁ +มี à¸Ľà¸±à¸įหา +ïº İ +éc oute +ĠÙħ ÙĪØ¬ÙĪØ¯ +Ġس رÙĬع +ĠÙĪÙĩ ÙĨا +ĠÙĪÙĩÙĨا Ùĥ +à¸Ħุà¸ĵ สม +à¸Ħุà¸ĵสม à¸ļัà¸ķิ +Ġìļ° ìĦł +à¸ŀระ à¸ŀุà¸Ĺà¸ĺ +好 ãģ¿ +ظ ÙĦÙħ +Ġм акÑģ +ĠмакÑģ ималÑĮ +ĠмакÑģималÑĮ но +ãĥª ãĤ¢ãĥ« +à¹ģมà¹ī วà¹Īา +ĠاÙĦØŃ ÙĪØ§Ø± +ãĥĹãĥ© ãĤ¹ +Ġع ÙĦاÙĤØ© +Ġíĸī ëıĻ +Ġgönder il +Ġl ãi +ĠsaÄŁ lıkl +ĠsaÄŁlıkl ı +ĠÑĪ Ð°Ð³ +Ġ×ij×IJר ×Ķ +prowadzi Äĩ +ãģĦãģı ãģ¤ãģĭ +Ġبت ارÙĬØ® +Ġ×ij×IJ×ķת ×Ķ +Ġmó c +ĠÐľ не +ãĥĹãĥ¬ ãĥ¼ +×IJ ×ĸר×Ĺ +åł´åIJĪ ãģ«ãģ¯ +使 ãģĪ +à¹Ģร ืà¸Ńà¸Ļ +ĠÐŁ еÑĤ +ĠÐŁÐµÑĤ ÑĢ +ãģ«åħ¥ ãĤĭ +Ùħ ادة +à¹Ģà¸ĩ ืà¹Īà¸Ńà¸Ļ +à¹Ģà¸ĩืà¹Īà¸Ńà¸Ļ à¹Ħà¸Ĥ +ĠÑģоÑģÑĤоÑı ние +ôn ica +ĠÑĦ ев +ĠÑĦев ÑĢа +ĠÑĦевÑĢа лÑı +Ġ×ķ ×ĸ +Ġ×ķ×ĸ ×IJת +à¸Ħร ิ +à¸Ħริ ส +ĠÐķ Ñīе +ãģ£ãģ¦ãģĹãģ¾ ãģĦãģ¾ãģĹãģŁ +ĠпÑĢав иÑĤелÑĮ +ĠпÑĢавиÑĤелÑĮ ÑģÑĤв +Ġtä glich +Ġëĭ¹ ìĭľ +×ŀ×ķ×¢ ×ŀ×ĵ +Ġдв оÑĢ +æī ķ +æīķ ãģĦ +ĠÑģÑĤан еÑĤ +Ġвозд ейÑģÑĤв +ĠвоздейÑģÑĤв и +Ġf ête +à¹Ģส า +תק ×ķ×ķ×Ķ +Ġu yar +Ġuyar ı +à¸ģลัà¸ļ à¹Ħà¸Ľ +Ġgi ưá»Ŀng +Ġв а +Ġва ÑĪи +ĠÄij áºŃu +ĠSpa ÃŁ +ĠìķĦ ë§Ī +à¹Ħà¸Ķà¹ī à¸ĩà¹Īาย +Ġ×Ķ×ŀ ×ijקש +æĸ° ãģŁ +æĸ°ãģŁ ãģª +ılı yor +пл ан +Ġ×Ķ×ijר ×Ļ×IJ×ķת +ĠaÄŁ rı +Ġsay gı +建 ãģ¦ +Ġnaj wyż +Ġnajwyż sz +سÙĬاس ات +ãģĬ å¾Ĺ +ĠاÙĦع ÙĦÙĬ +ĠاÙĦعÙĦÙĬ ا +Ġcoraz ón +ì¹ĺ ë£Į +หัว à¸Ĥà¹īà¸Ń +Ġب ØŃÙĬ +ĠبØŃÙĬ Ø« +зв езд +بÙĪ Ø§Ø¨Ø© +ÐĽ Ðĺ +ÙĦا زÙħ +Ġroz p +Ġrozp oc +Ġrozpoc zÄĻ +触 ãĤĮ +ĠاÙĦج ÙħÙĩ +ĠاÙĦجÙħÙĩ ÙĪØ± +Ġsp ÄĻd +ĠspÄĻd z +วิà¸Ĺยา ศาสà¸ķรà¹Į +ив аеÑĤÑģÑı +Ġдан ной +Ġreprés ente +ĠÄij á»ĭch +Ġ×¢×ŀ ×ķ×§ +à¸Ńัà¸Ļ à¸ķร +à¸Ńัà¸Ļà¸ķร าย +Ġestr atég +Ġestratég ia +pad ÅĤ +Ġв полн +Ġвполн е +ĠпÑĢедоÑģÑĤав лен +×Ĺ׾ ×ķ×§ +×Ĺ׾×ķ×§ ת +ãĤ¢ ãĥĬ +ĠاÙĦغ ذ +ĠاÙĦغذ ائÙĬ +ĠÑĥ зн +ĠÑĥзн аÑĤÑĮ +à¸ĭ à¹īาย +å½ĵ ãģ¦ +ØŃÙĬ اء +Ġbás ico +×§×ķ×ij ×¢ +ĠاÙĦÙħ باراة +ĠاÙĦÙĩ اتÙģ +Ġ׼ ׳×Ĵ×ĵ +à¸Ľà¸£à¸° หย +à¸Ľà¸£à¸°à¸«à¸¢ ัà¸Ķ +Ðļ ак +à¸Ĺีà¹Ī à¸Ļà¹Īา +à¸Ĺีà¹Īà¸Ļà¹Īา สà¸Ļà¹ĥà¸Ī +ãģ¾ ãģģ +ï½ ¢ +Ñģк оп +Ġson rasında +Ġur zÄħd +ĠurzÄħd zenia +׼×ķ ×ķ׳ +׼×ķ×ķ׳ ת +Ġ׾×Ķת ×ŀ×ķ×ĵ +Ġ׾×Ķת×ŀ×ķ×ĵ ×ĵ +ĠÑģ ли +ĠÑģли ÑĪ +ĠÑģлиÑĪ ÐºÐ¾Ð¼ +ĠÑģÑĤ Ñĥд +ĠÑģÑĤÑĥд енÑĤ +Ġ×Ķ ×ķ×ĵ +Ġ×Ķ×ķ×ĵ ×¢×Ķ +ë¹Ħ ìļ© +à¸Ńยาà¸ģ à¹ĥหà¹ī +Ġb á»ģ +ยุ à¸Ĺà¸ĺ +Ðĺ ÐĿ +س ائر +Ø£ صÙĪÙĦ +ĠاÙĦغ رÙģ +ãģĵãģ¨ãĤĤ ãģĤãĤĬãģ¾ãģĻ +è¾¼ ãģ¾ãĤĮ +ĠاÙĦساب ع +Ġc á»§ +ãģĦãģŁãģł ãģĦãģŁ +ì§ ĵ +ìĤ¬ 무 +powied ź +تÙģ Ùĥ +تÙģÙĥ ÙĬر +иÑĢов ки +ĠíĨµ íķ´ìĦľ +ãĤ¨ ãĤ¹ãĥĨ +ĠдеÑıÑĤелÑĮ ноÑģÑĤÑĮ +ĠданнÑĭ м +Ġ×¢ ×ķר +Ġ×¢×ķר ׼×Ļ +×ķ×ĵ עת +Ġhayat ını +Ġb Äħd +ĠbÄħd ź +obs ÅĤug +à¹Ģà¸ŀียà¸ĩ à¹ģà¸Ħà¹Ī +à¸ĭ à¹Īา +è²ł ãģij +ĠÑģÑĤÑĢ ÐµÐ¼ +ĠÄij á»īnh +ĠÐł ÑĥÑģ +ĠN ữ +Ġ׾×Ķש ×Ļ×Ĵ +Ġjed noc +Ġjednoc ze +Ġjednocze ÅĽnie +Ġ×Ķ×Ĵ ×ij×ķ×Ķ +أخ ÙĦاÙĤ +ĠнаÑģ ел +ĠнаÑģел ениÑı +ĠÙĬ ÙĨب +ĠÙĬÙĨب غÙĬ +ãģĮ ãģĭ +ãģĮãģĭ ãģĭ +×Ĵ עת +Ðŀ Ðł +ĠналиÑĩ ии +Ġë§Ī ì§Ģ +Ġë§Īì§Ģ ë§ī +Ġíĸī ìĤ¬ +Ġtre ÅĽci +Ġê°Ģ ì¹ĺ +ì¦ ĺ +Ġана лог +×Ķצע ת +в лад +влад е +ĠÑģдел ал +Ġ׳ ×Ĵ×Ļש +Ġ׳×Ĵ×Ļש ×ķת +полн ение +à¸Ĩ à¹Īา +ĠD ön +׼׾׼ ׾×Ķ +×ŀ×ĸ ×Ĵ +Ùħ Ùģ +ÙħÙģ Ùĩ +ÙħÙģÙĩ ÙĪÙħ +×Ķ ×ĵ +×Ķ×ĵ פס +×Ķ×ĵפס ×Ķ +ãģĻãģİ ãģ¦ +Ġг ÑĢ +ĠгÑĢ Ð½ +×ŀ×ĺ ×ķס +Ġ기 ìĸµ +ï¾ Ł +ĠpÅĤ yn +ĠGr ünde +ĠBü cher +Ġwed ÅĤug +ãģ¾ãģł ãģ¾ãģł +Ġ׳×Ķ ×ĵר +ĠÙĬست Ø·ÙĬع +ĠHi á»ĩp +ãĤŃãĥ£ãĥ³ ãĥļ +ãĤŃãĥ£ãĥ³ãĥļ ãĥ¼ãĥ³ +Ġth á»ķ +Ġeuropé enne +à¸ļ ัà¸ĩ +à¸ļัà¸ĩ à¸Ħัà¸ļ +ĠszczegóÅĤ owo +׳ שק +ãĥķ ãĥ©ãĥ³ãĤ¹ +×ŀ×ķ×ŀ ×Ĺ×Ļ +Ġcom ún +Ġç arp +ØŃت ÙĬا +ØŃتÙĬا ج +ØŃتÙĬاج ات +ëĭ´ ëĭ¹ +ä½ķ 度 +ä½ķ度 ãĤĤ +×ĵ ×ij×§ +ãģį ãĤĮ +ãģįãĤĮ ãģĦ +Ġк ам +Ġкам еÑĢ +ĠespecÃŃf ico +Ġtel éfono +à¸ķัà¹īà¸ĩ à¸Ńยูà¹Ī +I Åŀ +ãģ© ãĤĵãģ© +ãģ©ãĤĵãģ© ãĤĵ +עצ ×ŀ×IJ×Ļ +à¸Ķัà¸ĩ à¸Ļีà¹ī +ĠÑĦоÑĢм иÑĢов +ĠÑĦоÑĢмиÑĢов а +×ķ×ŀ ×ij +Ġkullan ımı +Ðľ Ðŀ +×¢ ש×Ļ +עש×Ļ ×Ļ×Ķ +Ġön lem +à¹Ģà¸Ń à¹ĩ +à¹Ģà¸Ńà¹ĩ ม +×ŀשק ×Ļ×¢ +ר ×Ļ×Ĺ +à¸Ĥ ัà¸Ķ +ĠíĻ ľ +ĠíĻľ ìļ© +à¸ĭ ะ +ãĤĪãģĨ ãģ«ãģªãĤĬãģ¾ãģĹãģŁ +ĠÑĢаÑģ пÑĢ +ĠÑĢаÑģпÑĢ Ð¾ÑģÑĤ +ĠÑĢаÑģпÑĢоÑģÑĤ ÑĢан +ĠÑĢаÑģпÑĢоÑģÑĤÑĢан ен +׼×Ļ ×ķף +ÙĤب ض +تص رÙĬØŃ +تصرÙĬØŃ ات +Ġо ÑĢи +ĠоÑĢи г +ĠоÑĢиг ина +ĠоÑĢигина л +ĠاÙĦع اÙĦÙĬ +à¹ģหà¹Īà¸ĩ à¸Ļีà¹ī +ãĥķãĤ¡ ãĥ¼ +ãģ¦ãģĦ ãģį +ãģ¦ãģĦãģį ãģŁãģĦ +פ תר +פתר ×ķ׳×ķת +Ġ×ij ×Ļ×Ĺ +Ġ×ij×Ļ×Ĺ ×ĵ +Ġod by +Ġodby ÅĤ +ĠоÑĩеÑĢ ÐµÐ´ +Ġtr ương +ãĤŃ ãĥ³ +×ŀ ×ķפ +×ŀ×ķפ ×¢ +ëĵľ 립 +ëĵľë¦½ ëĭĪëĭ¤ +à¸ŀืà¹īà¸Ļ à¸IJาà¸Ļ +ìŀIJ 격 +ĠVi á»ĩn +ĠDes pués +Ġ×IJ׾ ×Ļ׳×ķ +Ġdur ée +íĩ ´ +Ġmü zik +i ếu +ĠÑĢаз меÑīен +Ġк Ñĥд +ĠкÑĥд а +غ ض +غض ب +ĠTamb ém +à¸Īัà¸Ķ สà¹Īà¸ĩ +à¸ģาร à¹ģสà¸Ķà¸ĩ +onom ÃŃa +Ġан г +Ġанг ли +Ġангли й +Ġанглий Ñģк +Ġzn al +Ġznal az +Ġznalaz ÅĤ +תר ×Ĵ +תר×Ĵ ×ķ×Ŀ +ĠÑģ нов +ĠÑģнов а +ĠÑĩаÑģ а +Ġcommun auté +ĠespecÃŃf ica +ĠL á»ĭch +Ġli é +Ùģ Ø¬Ø± +à¹Ģà¸ģ à¹Īà¸ĩ +ع اÙĦ +عاÙĦ ج +Ø£ÙĨ ظ +Ø£ÙĨظ ÙħØ© +ES İ +ĠاÙĦØŃ دÙĬد +à¸ŀระ à¸Ńà¸ĩà¸Ħà¹Į +Ġפר שת +Ġдв иж +Ġдвиж ениÑı +ĠاÙĦج ارÙĬ +à¸ĺาà¸Ļ ี +неÑģ ен +ĠاÙĦÙĨ ÙĩائÙĬ +Ġб еÑĢ +ĠбеÑĢ ÐµÐ¼ +ĠбеÑĢем енн +Ġdépart ement +à¹Ģà¸Ĺ ีย +à¹Ģà¸Ĺีย à¸ļ +ĠÐľ аÑĢи +ĠнекоÑĤоÑĢ ÑĭÑħ +об еÑģп +обеÑģп еÑĩен +×Ĺ ×ķ×ĸ +×Ĺ×ķ×ĸ ×Ķ +ÙĨت ج +à¸Īะ à¹Ħà¸Ķà¹īรัà¸ļ +á» ° +Ġél éments +ع Ø· +عط اء +Ġt ắt +i á»ĩm +ÑİÑīиÑħ ÑģÑı +ãģĹãģ ° +ãģĹãģ° ãĤīãģı +Ġпом ожеÑĤ +à¸Ĥà¸ĵะ à¸Ļีà¹ī +Ġ×¢ שר×ķת +éģķ ãģ£ãģ¦ +ĠпÑĢ Ð¾Ð³ +ĠпÑĢог н +ĠпÑĢогн оз +Ġt ÅĤ +ĠtÅĤ um +ĠtÅĤum acz +T ür +Tür kiye +ãģį ãģ£ +ãģįãģ£ ãģĭãģij +Ġ×Ķ׳ ×ķ׼ +Ġ×Ķ׳×ķ׼ ×Ĺ×Ļ +ĠìĥĿ ìĤ° +ĠÑĦоÑĢм Ñĭ +ç¾İ ãģĹãģĦ +à¸Ľà¸£ ึà¸ģ +à¸Ľà¸£à¸¶à¸ģ ษา +Ġlum ière +ãĤª ãĥ¼ãĥĹ +ãĤªãĥ¼ãĥĹ ãĥ³ +à¸Ľ ืà¸Ļ +วั สà¸Ķ +วัสà¸Ķ ุ +еÑĢÑĤ в +ÙĥÙĦ Ùģ +ï½ £ +à¸ĺรรม à¸Ķา +׳ ×ĺר +ĠпÑĢедÑģÑĤав лÑıеÑĤ +Ġanál isis +Ġb ãi +با ÙĤÙĬ +à¸Ľà¸£à¸° à¹Ģà¸Ķ +à¸Ľà¸£à¸°à¹Ģà¸Ķ à¹ĩà¸Ļ +ĠÑģлÑĥÑĩ аÑı +ĠÑģлÑĥÑĩаÑı Ñħ +ÐĽ ÐIJ +สัà¸ĩ à¹Ģà¸ģ +สัà¸ĩà¹Ģà¸ģ à¸ķ +Ġprz ec +Ġprzec ież +Ùħ صÙĦ +ÙħصÙĦ ØŃØ© +ש×ķ×§ ×ķ׾×ĵ +ĠобоÑĢÑĥд ованиÑı +Ġtr waÅĤ +رÙĪ Ùħ +ìķĪ ëĤ´ +ĠNgh á»ĭ +Ø® Ø´ +à¸ļา à¸Ħาร +à¸ļาà¸Ħาร à¹Īา +Ġоп ÑĨион +ĠÑģозд аниÑı +ãĤ³ ãĤ¹ãĥĪ +Ġ×Ķ×¢ ׾×Ļ +Ġ×Ķ×¢×ľ×Ļ ×ķף +lä uft +ãĥĻ ãĤ¹ãĥĪ +Ġr ê +Ġrê ve +×IJ ×ij×Ļ×ij +×Ļ ×Ļ×ļ +ë¶ Ļ +ãĤ¤ãĥ³ ãĥī +ÅĤo ży +ÅĤoży Äĩ +ع ائÙĦ +عائÙĦ Ø© +Ø£ ÙĪØ± +Ø£ÙĪØ± اÙĤ +à¸Ĺà¹īà¸Ńà¸ĩ à¸ĸ +à¸Ĺà¹īà¸Ńà¸ĩà¸ĸ ิà¹Īà¸Ļ +Ġä hn +Ġähn lich +ãĥŁ ãĥĭ +à¸ľ ู +à¸ľà¸¹ à¹īà¸Ļ +à¸ľà¸¹à¹īà¸Ļ ำ +ĠмаÑĤеÑĢиал Ñĭ +Ġкап иÑĤ +ĠкапиÑĤ ал +ï¼ ¦ +Ġseç il +Ġh ứng +Ġintéress ant +ãģ£ãģ¦ ãģĦãģı +Ġe ÄŁer +ëIJĺ ìĹĪìĬµëĭĪëĭ¤ +Ġan laÅŁma +ãģĶ åĪ©ç͍ +Ġ×ij ×ĸ׼ +Ġ×ij×ĸ׼ ×ķת +ëĿ¼ ë©´ +ĠÙĬ ÙĪØ³ +ĠÙĬÙĪØ³ Ùģ +أسÙĦ ØŃØ© +ĠGef ühl +ĠноÑĢм алÑĮн +ãĥĻ ãĥ³ +ãģķãĤĮ ãĤĭãģĵãģ¨ +ĠÐij еÑģ +ãģ¨ãģĦ ãģĪãģ° +ĠÙħ ÙĩÙħ +ĠÙħÙĩÙħ Ø© +ãģ§ãģĹãĤĩãģĨ ãģŃ +ĠêµŃ ëĤ´ +à¹Ģม à¹ĩà¸Ķ +×ŀ×ij קר +ĠاÙĦد ÙĨÙĬ +ĠاÙĦدÙĨÙĬ ا +à¸Ĭ ู +к ÑĢÑĥÑĤ +Ġtho áng +Ġ׳ ×ĵר +Ġ׳×ĵר ש +ĠÑĢаÑģÑģ казал +ĠAu ÃŁerdem +פ ×IJר +פ×IJר ×§ +Ġ×ŀש×Ĺ×§ ×Ļ×Ŀ +צ ר׼×Ļ×Ŀ +×ŀ×ĵ ×ķ +×ŀ×ĵ×ķ ×Ļ×§ +èĭ¦ ãģĹ +ĠÑģ иг +ĠÑģиг нал +ĠM á»įi +Ġtr ữ +Ġnast ÄĻp +ĠnastÄĻp nie +Ġì¶Ķ ì§Ħ +ĠاÙĦÙģ ÙĨد +ĠاÙĦÙģÙĨد ÙĤ +koÅĦ czyÅĤ +ส ีà¹Ī +×§ ×Ļ×ij +×§×Ļ×ij ×ķ×¥ +ĠнÑĥж нÑĭ +大 åĪĩ +大åĪĩ ãģª +æıĽ ãģĪ +ת ×ķס +ת×ķס פת +ãģ£ãģ¦ ãģĦãģªãģĦ +Ġм Ñı +ĠмÑı г +ĠмÑıг к +Ġjak ie +Ġjakie ÅĽ +à¸ķำ à¸ļ +à¸ķำà¸ļ ล +ĠìŀĪ ì§Ģ +×ij×ĺ ×IJ +ĠоÑĤлиÑĩ но +ÙĤ ÙIJ +ĠавÑĤом об +ĠавÑĤомоб и +ĠавÑĤомоби лÑı +دÙĬÙħÙĤرا Ø·ÙĬ +ĠاÙĦ ÙĪØ§ +ĠاÙĦÙĪØ§ ØŃد +Ġس ÙĪØ±ÙĬØ© +Ø£ غÙĦ +أغÙĦ ب +ĠÑįк ÑĢан +ãĥĹ ãĥ©ãĤ¤ +Ġjeste ÅĽ +ãĥIJ ãĥª +Ġ×Ķ×IJ ×ķ×ķ×Ļר +ائ Ùĥ +à¸Ńยà¹Īาà¸ĩ ยิà¹Īà¸ĩ +ÑĢ ÐµÐºÑĤ +Ġum o +Ġumo ż +Ġumoż li +Ġumożli w +Ġumożliw ia +Ġnäch ste +ĠìŀĪ ì§Ģë§Į +ĠпÑĢед н +ĠпÑĢедн аз +ĠпÑĢедназ наÑĩен +Ġma çı +Ġp omi +Ġpomi ÄĻd +ĠpomiÄĻd zy +ĠاÙĦÙĦ ÙĤاء +à¹Ģà¸Ķ à¸Ńะ +Ġнов оÑģÑĤи +×ŀ׊׾×Ķ +رÙĬاض ÙĬ +à¸Ķ à¸Ļ +à¸Ķà¸Ļ à¸ķรี +ب صر +ìĬ¤ íĥĢ +scri pción +Ġnap isa +Ġnapisa ÅĤ +Ġ׳ש ×ŀ×¢ +ĠاÙĦÙħØŃ ÙĦÙĬ +Ġhi á»ĥn +×IJ ×Ĺ +×IJ׊ר×IJ×Ļ +Ġг ÑĢаниÑĨ +æīĭ ç¶ļãģį +Ùĥ سب +Ġà¹ģà¸ķà¹Ī à¸ĸà¹īา +à¸Ķาว à¸Ļà¹Į +à¸Ķาวà¸Ļà¹Į à¹Ĥหลà¸Ķ +ãĤĭãģĵãģ¨ãģĮãģ§ãģį ãģ¾ãģĻ +åŁºæľ¬ çļĦãģ« +ÙĪÙĦ اد +rä ume +د ÙģØ§Ø¹ +×Ļצ ×¢ +ĠO czy +ĠOczy wiÅĽcie +ĠÅ ģ +ĠÅģ a +اÙĦÙĬ اب +اÙĦÙĬاب اÙĨ +áºł I +ĠBir liÄŁi +×Ķ ×ķצ +×Ķ×ķצ ×IJת +ĠÄij ua +Ġê·¸ëŁ¬ ëĭĪê¹Į +Ġréal ité +ع ÙĦاÙĤات +J este +Jeste ÅĽ +Ġмн ож +Ġмнож еÑģÑĤво +ï¼ « +ãĥĹãĥŃ ãĤ¸ãĤ§ +ãĥĹãĥŃãĤ¸ãĤ§ ãĤ¯ãĥĪ +ĠÑĦ л +ظ ÙĨ +×Ĵ׾ ×Ĵ׾ +ĠmÅĤod zie +ĠmÅĤodzie ż +à¸Ļà¹īำ à¸ķา +à¸Ļà¹īำà¸ķา ล +ÐĽ Ðķ +×ij ×ķ×ĺ +Ġ׾×Ķ ×Ĵ×Ļ×ĵ +ãģĵãģ¨ãĤĤ ãģĤãĤĭ +ز اد +×ŀ×Ļ×ĵ ×¢ +ĠgÅĤówn ie +ãĥı ãĤ¦ +ãĥıãĤ¦ ãĤ¹ +б ел +Ġét ape +ðŁĺ Ģ +Ġмод елÑĮ +a ģını +ש ×Ĺ×§ +ש×Ĺ×§ ף +Ġni ño +à¸Ĭ à¹īาà¸ĩ +à¹Ģล ีย +ĠÑĦоÑĢм е +ĠاÙĦØ´ رÙĬÙģ +ĠÑĥд аÑĢ +arr iv +arriv ée +Ġmies iÄĻ +ĠmiesiÄĻ cy +ØŃ رÙĥ +ØŃرÙĥ ات +ĠDi á»ħn +ÐĿ Ы +ãģ¾ãģ£ãģŁ ãģı +Ġ×Ļ ×¨×ķ×§ +еÑģÑĤ еÑģÑĤв +еÑģÑĤеÑģÑĤв енн +Ġê·¸ ëŁ¼ +ĠاÙĦÙħ تÙĪ +ĠاÙĦÙħتÙĪ Ø³Ø· +Ġbéné fic +Ġbénéfic ie +Ġwy bra +Ġwybra Äĩ +ĠاÙĦز ÙħÙĨ +ĠпÑĢин Ñı +ĠпÑĢинÑı л +Ù쨱 ØŃ +Ġk sz +Ġksz taÅĤ +ĠksztaÅĤ t +ק׾ ×ĺ +×ij×ĵ×Ļ×§ ת +Ġgi ấ +Ġgiấ c +Ġpropriet Ãł +деÑĢж ан +ĠKö ln +ĠGü zel +×Ļפ ×ķ×Ļ +ĠCu á»Ļc +ÑįÑĤ аж +تر ÙĥÙĬ +ترÙĥÙĬ ز +лож ений +Ġп Ñĥ +ĠпÑĥ ÑĤи +اخت ÙĦاÙģ +åĩºãģ¦ ãģıãĤĭ +à¸ļุ à¸ģ +âĿ ¤ +ÑĦ ан +פש ×ĺ +à¸ļัà¸Ļ à¹Ģà¸Ĺ +à¸ļัà¸Ļà¹Ģà¸Ĺ ิà¸ĩ +ĠاÙĦس اد +ĠاÙĦساد س +ĠاÙĦÙĤ ÙĪÙħ +ĠاÙĦÙĤÙĪÙħ ÙĬ +Ġyönet ici +Ùĩ ÙĪØ§Øª +ÙĩÙĪØ§Øª Ùģ +Ġrespons ável +Ġпод деÑĢжива +ĠاÙĦسÙĦ Ø· +ĠاÙĦسÙĦØ· ات +ãģĹãģ¦ ãģĬãģı +ãĥļ ãĥĥãĥĪ +à¸Ľ ุà¹Īม +Ġogl Äħda +ÙĨا ÙĤ +ÙĨاÙĤ Ø´ +à¸Ħà¸Ńà¸Ļ à¹Ĥà¸Ķ +ĠMü sl +ĠMüsl ü +ĠMüslü man +ĠMo ż +ĠMoż na +Ġnum érique +Ġv á»ı +ĠسÙĬ تÙħ +Ġyer leÅŁ +монÑĤ аж +Ġgo ût +ãģ¦ ãģĬãĤĬãģ¾ãģĻ +ĠKh ánh +Ġе дин +Ġедин ÑģÑĤв +اÙĨ Ø®Ùģ +اÙĨØ®Ùģ Ø§Ø¶ +ìĭľ íĹĺ +Ġl ặng +ĠÑĢ Ð¾Ð»ÑĮ +à¸ķัว à¹ģà¸Ĺà¸Ļ +à¸Ħà¹Īา à¹ĥà¸Ĭà¹ī +à¸Ħà¹Īาà¹ĥà¸Ĭà¹ī à¸Īà¹Īาย +Ġver füg +Ġverfüg bar +ìĻĶ ëĭ¤ +ãģĦ ãģļ +ãģĦãģļ ãĤĮ +ĠиÑģÑģлед ованиÑı +меÑī а +×Ķ ×Ĺ +×Ķ×Ĺ ×ĸר +à¹ģà¸Ł à¸Ĭัà¹Īà¸Ļ +ت صرÙģ +Ø¥ رÙĩاب +Ġexerc ÃŃcio +Ġé lev +Ġélev é +สัà¸įà¸įา à¸ĵ +Ãĸ Z +ãĥĹ ãĥŃãĤ° +ãĥĹãĥŃãĤ° ãĥ© +ãĥĹãĥŃãĤ°ãĥ© ãĥł +Ġw ewnÄĻtrzn +Ġhen üz +é£Ľ ãģ³ +à¹Ģà¸Ķ à¸Ńรà¹Į +Ñģ Ñĥж +ÑģÑĥж ден +شع ÙĪØ¨ +ãģ²ãģ¨ ãĤĬ +Ġwy ÅĤÄħ +ĠwyÅĤÄħ cznie +Ġпло Ñħо +ÐĶ Ðķ +Ạ¦ +Ù쨹 اÙĦÙĬ +ÙģØ¹Ø§ÙĦÙĬ ات +ĠاÙĦع شر +ÑģÑĤÑĥп ил +Ġy arg +Ġyarg ı +нÑİ Ñİ +×ķ×IJ ×ij +Ġu ç +Ġuç ak +ë² ½ +تÙĪ ÙĤÙĬ +تÙĪÙĤÙĬ ع +Ġì¤ij ìĭ¬ +׳×Ļ×ķ ×ķ×ĺ +Ø£ ÙĥÙĦ +ç½® ãģĦãģ¦ +éłĤ ãģį +Ġ×Ķת ×ij +Ġ×Ķת×ij ×Ļ×¢×Ķ +Ġdür fen +Ùħ ÙĤاÙĦ +ÙħÙĤاÙĦ ات +Ġز ÙħÙĨ +à¸ŀฤ ศ +à¸ŀฤศ à¸Ī +à¸ŀฤศà¸Ī ิà¸ģ +à¸ŀฤศà¸Īิà¸ģ ายà¸Ļ +ĠнеÑģк олÑĮ +ĠнеÑģколÑĮ ки +ĠнеÑģколÑĮки Ñħ +Ġcrian ça +มิ à¸ķร +×ŀ׼ ×Ļר×ķת +à¸ģาร à¸ļริหาร +Ġtélé charg +Ġ×IJ×ķ×Ķ ×ijת +ĠBü ro +ä½ľ ãģ£ãģŁ +ĠKi ÅŁi +ç¾İåij³ ãģĹ +à¹Ģลย à¸Ħà¹Īะ +à¸ŀà¸ļ à¸ģัà¸ļ +à¸Ī à¹īา +Ġç er +Ġçer ç +Ġçerç eve +ãĤĴä½ľ ãģ£ãģ¦ +ĠпеÑĢв ÑĥÑİ +×ŀצ ר×Ļ×Ŀ +×IJ׾ ×ķ×Ķ +×IJ׾×ķ×Ķ ×Ļ×Ŀ +Ġagr é +Ġagré able +Ġay ır +İL İ +ãĤ ¥ +Ġíĺ Ħ +ĠíĺĦ ìĭ¤ +ثاÙĦ Ø« +ת ×ĸ +ת×ĸ ×ķ׳×Ķ +ãģ¨ãģĦ ãģ£ãģ¦ +ãģ¨ãģĦãģ£ãģ¦ ãĤĤ +Ġا بÙĪ +ĠÑģоб ак +é£Łãģ¹ ãģŁ +Ġдан ном +à¹Ģล ิ +à¹Ģลิ ศ +Ġí ļ +Ġíļ ¨ +Ġíļ¨ ê³¼ +ãĤĤãĤī ãģĪãĤĭ +׳ צ׾ +ÑĦ ик +ÑĦик Ñģ +Ġjeste ÅĽmy +ת×Ĺ×ķש ×Ķ +à¹Ħมà¹Ī à¸Ħวร +ĠØŃ سÙĬÙĨ +à¸ģาร ลà¸ĩà¸Ĺุà¸Ļ +ë´ ¤ +ĠÐĺ менно +à¸ļ à¸Ńรà¹Į +à¸ļà¸Ńรà¹Į à¸Ķ +ĠC ảnh +ìĦľ ë¹ĦìĬ¤ +Ġпол ов +Ġполов ин +Ġзам еÑĩа +ãģĦãĤį ãĤĵãģª +Ġ×ij ×Ļ×§ +Ġ×ij×Ļ×§ ש +л ÑĥÑĪ +ãĤĴ è¿İ +ãĤĴè¿İ ãģĪ +جرÙĬ ÙħØ© +Ġt ây +ĠاÙĦÙĨ ÙĪ +ĠاÙĦÙĨÙĪ ÙĪÙĬ +ÃĤ N +ì¿ ł +หà¸Ļ าว +Ġ×ij׊ש×ij×ķף +ز ار +à¸Ķ าร +à¸Ķาร า +ĠÅĽ l +ĠÅĽl ub +มีà¸Ħวาม สุà¸Ĥ +Ġn hu +Ġnhu áºŃn +ÙħØŃ طة +à¹Ģสืà¹īà¸Ń à¸ľà¹īา +ĠТ олÑĮко +ĠÙĥ س +ĠÙĥس ارة +ÙħØ´ رÙĪØ¹ +niÄĻ cia +×¢ ׼ש×Ļ×ķ +ت ÙĦÙģ +تÙĦÙģ Ø²ÙĬ +تÙĦÙ쨲ÙĬ ÙĪÙĨ +Ġl Æ°á»Ľi +ĠÐľÐ¾Ñģк вÑĭ +Ġré serve +Ġan laÅŁ +ĠanlaÅŁ ıl +Ġed eceÄŁi +รà¸Ńà¸ĩ à¹Ģà¸Ĺà¹īา +Ġب Ø· +Ġبط رÙĬ +ĠبطرÙĬ ÙĤØ© +ãģ¦ãģĹãģ¾ ãģ£ãģ¦ +ãĤĤãĤī ãģ£ãģ¦ +بر ج +æ± ļ +æ±ļ ãĤĮ +Ġch oc +Ġchoc ia +Ġchocia ż +Ġzob ac +Ġzobac zyÄĩ +пÑĢ Ñı +пÑĢÑı жен +ĠÑĨ иÑĦ +ĠÑĨиÑĦ ÑĢ +Ġм ам +Ġвз ÑıÑĤÑĮ +Ġch ạm +ج سÙħ +ØŃÙħ اس +à¹Ģล à¹Īม +à¸ŀิ ษ +×Ķפ ׼×ķ +à¸Ĭà¹Īà¸Ńà¸ĩ à¸Ĺาà¸ĩ +Ġв ек +Ġвек а +Æ¡ Ìģ +Æ¡Ìģ i +ĠTi á»ģn +Ġtr ầm +мÑĭ ÑĪ +мÑĭÑĪ Ð» +ĠÑĤ Ñĥ +ĠÑĤÑĥ ÑĢиÑģÑĤ +Ġch c +Ġchc Äħ +Ġав г +Ġавг ÑĥÑģÑĤ +ĠавгÑĥÑģÑĤ а +ס ×IJ×ķת +Ġר ×Ĵ׾ +à¸ľà¸¥ à¸ģระà¸Ĺ +à¸ľà¸¥à¸ģระà¸Ĺ à¸ļ +å¤īãĤı ãĤĭ +Ġ×Ķ×IJ×Ĺר ×ķ׳×Ļ×Ŀ +سÙģ ÙĬر +ĠÑĩа Ñīе +ãģĦ ãĤī +ãģĦãĤī ãģ£ +ãģĦãĤīãģ£ ãģĹãĤĥ +×ķ×ŀ ׳×Ļ×Ŀ +Ġart tır +ĠCh á»ĭ +Ġì¡° ì§ģ +ĠÑĥÑģп еÑħ +Ġ×¢ ×ķס +Ġ×¢×ķס ×§ +ĠìĥĿ ëªħ +ÑĨ иÑĤ +Ġreg ión +Ðŀ ÐĿ +ĠdoÄŁ um +ĠyaÅŁ ad +ĠyaÅŁad ıģı +à¸Ĺà¸Ķ ลà¸Ńà¸ĩ +Ġgöz ü +ש ×Ļר×Ķ +дÑĥм ал +Ġda ģı +Ġdaģı t +à¸Ĺีม à¸ĩาà¸Ļ +Ġti á»ģm +ĠاÙĦÙĥ بر +ĠاÙĦÙĥبر Ùī +ì¹ Ń +ĠGü nc +ĠGünc elle +ĠGüncelle me +ê¹ Ĭ +ĠобоÑĢÑĥд ование +ĠÑĢеÑĪ Ð° +á» ¤ +Ġп иÑĤ +ĠпиÑĤ аниÑı +à¹Ģรีย à¸ļ +×Ľ×ª ×Ļ×ij×Ķ +Ġп он +Ġпон ÑĢав +ĠпонÑĢав и +Ġ×Ķ ×ķ׾×ĵ +Ġ×Ķ×ķ׾×ĵ ת +Ġê² ģ +Ġê²ģ ëĭĪëĭ¤ +ĠпеÑĢв ой +ãĥ©ãĤ¤ ãĥķ +ĠÅŁi ir +kr ÄĻ +krÄĻ c +Ġthi á»ĥu +à¹Ģลย à¸Ĺี +à¹Ģลยà¸Ĺี à¹Ģà¸Ķียว +×ĺ×¢ ׳×ķת +ائ ÙĩÙħ +Ġ×IJ ס×ķר +ĠплаÑĤ еж +تر دد +Ġmożli we +Ġkh Ỽ +ĠkhỼ p +تÙģØ§Ø¹ ÙĦ +ĠÑĪ ÐºÐ¾Ð»ÑĮ +ĠÑĪколÑĮ н +ĠÙĤ صة +Ġmét ier +nÄĻ ÅĤa +หล à¹Īà¸Ń +Ġ á»§ng +Ġprz egl +Ġprzegl Äħd +ĠاÙĦÙħ تعÙĦ +ĠاÙĦÙħتعÙĦ ÙĤØ© +ĠÑģÑĭ н +Ġв олн +ãĥĩ ãĥ¼ãĥĪ +ĠÐŃ ÑĤи +Ġк ÑĢоме +à¸Ħ ารà¹Į +׳ק ×ķ×ĵ×Ķ +Ġ׾ש×ŀ ×ķ×¢ +Ġ×ĸ ×ķ׼ר +ï¼ § +ÙĬ ÙİØ§ +Ġgi á»ıi +åĥį ãģı +ĠÑģ ни +ĠÑģни жен +à¹ģà¸Ķ à¸Ķ +รุ à¸Ļ +รุà¸Ļ à¹ģรà¸ĩ +Ġhi á»ĩp +ograf ÃŃa +à¹Ģà¸Ī à¸Ńรà¹Į +Ġдв иг +Ġдвиг аÑĤ +ĠдвигаÑĤ ел +Ġü y +Ġüy eler +Ġüyeler i +Ġб Ñĥк +ĠбÑĥк в +ãĤĤ å¤ļãģı +Ġthi á»ĩt +ĠPa ÃŃs +ĠØ· بÙĬعÙĬ +à¹ģà¸Ī à¸ģ +ĠاÙĦص ØŃÙĬØŃ +Ġapp ré +Ġappré ci +Ġdecis ión +Ġë°ĺ ëĵľ +Ġë°ĺëĵľ ìĭľ +ĠÑĤеб е +ãĤ· ãĥ¼ãĤº +ãĤ·ãĥ¼ãĤº ãĥ³ +Ġд алÑĮн +ĠìĬ ¤ +ĠìĬ¤ ìĬ¤ +ĠìĬ¤ìĬ¤ ë¡ľ +ĠTh á»ĥ +Ġkar ÅŁ +ĠkarÅŁ ıs +ĠkarÅŁÄ±s ında +ĠK ön +ĠKön ig +ив ание +×ij ×ķצע +г лаÑģ +Ġtw ó +Ġtwó rc +à¸Ľà¸ģ à¸Ħร +à¸Ľà¸ģà¸Ħร à¸Ńà¸ĩ +ĠG ÅĤ +ĠGÅĤ ówn +ĠUnter stüt +ĠUnterstüt zung +Ġд ÑĥÑħ +ĠдÑĥÑħ ов +Ø£ ÙħاÙĨ +×Ĺש ש +ت ظ +تظ اÙĩر +ĠлÑİб ом +à¸ķ าร +à¸ķาร าà¸ĩ +Ġkr ól +Ø£ ØŃدث +ì¡Į ëĭ¤ +Ðļ ÑĥÑĢÑģ +ãĥĥ ãĥĦ +×ŀ×§ ×ķ×ij׾ +ĠÑģимв ол +Ġdés orm +Ġdésorm ais +w üns +wüns che +Ñĥ ни +Ñĥни ÑĨип +ÑĥниÑĨип алÑĮн +หลัà¸ģ สูà¸ķร +ÙĨت شر +Ġа л +Ġал к +Ġалк ог +Ġалког ол +ĠÑĥ ÑĩиÑĤÑĭва +à¸ģำ à¸ģัà¸ļ +Ġ׾ פע×ķ׾ +ĠìŰ ê²° +s Äħd +ĠاÙĦØ£ ÙĬ +ĠاÙĦØ£ÙĬ اÙħ +غÙĬ اب +Ġна ÑĢ +ĠнаÑĢ ÐºÐ¾ +×ŀ×ķ×ĵ ×¢ +ĠÑģеÑĢ Ð¸Ð¸ +пиÑģ Ñĭва +สิ ว +ç¶ļ ãģĦãģ¦ +çͳãģĹ è¾¼ãģ¿ +Ġ׾ ×Ĵר +Ġ׾×Ĵר ×ķ×Ŀ +Ġд ем +Ġдем о +Ġë³´ ëĤ´ +تÙĩ دÙĬد +ĠÙħØ´ ÙĬرا +Ġdu y +Ġduy á»ĩt +ĠwiÄĻks ze +Ùħع اÙĬ +ÙħعاÙĬ ÙĬر +ĠG da +ĠGda ÅĦsk +Ġr ah +Ġrah ats +Ġrahats ız +ר ×ķצ×Ķ +l ös +lös ung +ĠТак им +ÑĪ ÐµÐ´ +ÑĪед ÑĪ +ع زÙĦ +Ġרש ×Ļ×ŀת +Ġ׾×Ķ ×Ļ׼ +Ġ׾×Ķ×Ļ׼ ×ł×¡ +Ġп ÑĥÑĤ +ĠпÑĥÑĤ еÑĪ +ĠпÑĥÑĤеÑĪ ÐµÑģÑĤв +Ġnot ÃŃcia +Ġal Ä±ÅŁ +ĠalÄ±ÅŁ ver +ĠalÄ±ÅŁver iÅŁ +ĠwÅĤ os +ĠwÅĤos ów +Ġب غ +Ġبغ داد +Ġver öffent +Ġveröffent licht +ĠKh á +Ġt án +ëIJĺ 기 +Ġë°© 문 +Ùģ ÙĬÙĦ +à¹Ģà¸ģิà¸Ķ à¸Īาà¸ģ +åı¯ æĦĽ +åı¯æĦĽ ãģĦ +à¸ĸ ุà¸ĩ +Ġz ewnÄĻtrzn +à¸łà¸²à¸©à¸² à¸Ńัà¸ĩà¸ģฤษ +Ġmá xima +Ġul us +Ġulus lararası +Ġ׳×Ķ ×ł +à¸Ĥà¹Īาว สาร +ĠìĿĺ ìĤ¬ +à¹Ģหล ืà¸Ńà¸ĩ +Ġد ÙĤ +ĠدÙĤ ائÙĤ +สืà¹Īà¸Ń สาร +ë¨ ¼ +ĠÑģоÑģÑĤоÑı нии +สมา à¸Ħม +á» Ĥ +ĠÐľÐ¾Ñģ ков +ĠÐľÐ¾Ñģков Ñģк +×ŀס ×ķ×Ĵ׾ +ãģĭ ãģĭãĤĬ +ĠTr uyá»ģn +à¹ģà¸Ĥà¹ĩà¸ĩ à¹ģรà¸ĩ +×ŀ×Ĺ ×ĸ×Ļ×§ +à¹Ĥà¸ģ à¹ī +ÙĬس ر +ìĶ © +×IJ ×ķ×§ +×IJ×ķ×§ ×ĺ +×IJ×ķ×§×ĺ ×ķ×ijר +Ġprox imité +ÙħÙĨ Ùĩج +ĠاÙĦج ز +ĠاÙĦجز ائ +ĠاÙĦجزائ رÙĬ +ĠÄIJi á»ĥm +Ġден еж +Ġденеж н +ÙģØŃ ص +Ùģ Ø¦ +ĠÐij Ñĥд +×Ĵ×Ļ×ĵ ×ķ׾ +ĠÐĴ едÑĮ +عÙĦ اÙħØ© +Ġ×IJ×Ĺר ×ķ׳×ķת +ãģĦãģŁãģł ãģĦãģ¦ +سÙĦ ØŃ +ØŃ ÙĦÙħ +ز ÙĪØ§Ø± +Ùĥ سر +×ĺ קס +Ġб ан +Ġбан ков +ĠпÑĢ Ð¾Ð¶ +ĠпÑĢож ива +li wo +liwo ÅĽci +ĠTi ếp +ĠاÙĦÙħÙĨ اسب +ĠاÙĦØ® ÙĬار +ãģĬ ãģĭ +ãģĬãģĭ ãģĴ +à¸Ķà¸Ńà¸ģ à¹Ħมà¹ī +ä mp +ämp fe +à¸ķัà¹īà¸ĩ à¹ĥà¸Ī +Ġза ÑīиÑĤ +ĠзаÑīиÑĤ Ñĭ +ĠTh ưá»Ŀng +Ġص Ùģ +ĠصÙģ ØŃØ© +×Ĺ×ķר ×£ +ãĥIJ ãĥĥãĤ° +Ġ×ĵ ×Ļ×Ĵ +Ġ×ĵ×Ļ×Ĵ ×Ļ×ĺ +Ġ×ĵ×Ļ×Ĵ×Ļ×ĺ ׾×Ļ +Ġ×Ķ×Ĺ ×ķ׾×Ļ×Ŀ +в еÑī +веÑī а +Ġк ÑĥлÑĮÑĤ +ĠкÑĥлÑĮÑĤ Ñĥ +ĠкÑĥлÑĮÑĤÑĥ ÑĢÑĭ +ĠاÙĦاÙĨ ترÙĨت +Ġhö ch +Ġhöch st +Ġíĺ ķ +Ġíĺķ íĥľ +Ġв ой +Ġвой нÑĭ +ÐĽ Ðŀ +ìĭł ìļ© +Ġ×ŀ×ij ×ķס +Ġ×ŀ×ij×ķס ס +×ŀ׳ ×Ļ×¢ +Ġfiyat ı +ĠÑģл Ñĥж +ĠÑģлÑĥж бÑĭ +à¸Ĺั ศ +à¸Ĺัศ à¸Ļ +ãģĵãģ¨ãģĮ å¤ļãģĦ +Ġ×Ķ×ŀש ת +Ġ×Ķ×ŀשת ×ŀש +å¯Ħ ãģĽ +×ŀש׾ ×ķ×Ĺ +æĻĤ çĤ¹ +æĻĤçĤ¹ ãģ§ +à¸ŀร ี +à¸ŀรี à¹Ģมีย +à¸ŀรีà¹Ģมีย รà¹Į +à¸ŀรีà¹Ģมียรà¹Į ลีà¸ģ +Ġdiffic olt +Ġdifficolt Ãł +ãĥ¬ ãĤ¹ãĥĪ +ãĥ¬ãĤ¹ãĥĪ ãĥ©ãĥ³ +สม à¹Ģà¸Ķà¹ĩ +สมà¹Ģà¸Ķà¹ĩ à¸Ī +Ġж ид +Ġжид к +Ġzu peÅĤ +ĠzupeÅĤ nie +ĠÙħ جر +ĠÙħجر د +ãģĮ å§ĭ +ãģĮå§ĭ ãģ¾ +ãĤŃãĥ£ ãĥ© +Ġ×IJ ×ķ×ķ×Ļר +ãģĬ äºĴ +ãģĬäºĴ ãģĦ +Ġpot rÃł +ĠPa ÅĦst +ĠPaÅĦst wo +Ġب ÙĬاÙĨ +ĠبÙĬاÙĨ ات +Ġин огда +ĠÑĢ Ð° +ĠÑĢа ÑģÑĤв +ĠÑĢаÑģÑĤв оÑĢ +Ġ×ĸ ×ŀ׳ +ยิ à¹īม +Ä Ĩ +ãģ¾ ãģķ +ãģ¾ãģķ ãģ« +ãĥķãĤ¡ ãĤ¤ãĥ« +Ġgörd Ã¼ÄŁÃ¼ +สà¸ĩ à¸Ħร +สà¸ĩà¸Ħร าม +ĠArk adaÅŁ +ĠrozwiÄħz ania +×ŀ ×ķ×ĺ +pi ÄĻ +piÄĻ t +ص غر +ส ย +สย าม +ãĤĨ ãģ£ãģıãĤĬ +Ġtr ần +Ġeconom ÃŃa +Ġgeh ören +ãĤ·ãĥ§ ãĥ¼ +ĠsÅĤ ucha +à¸ŀà¸Ń à¹ĥà¸Ī +ĠоÑĤмеÑĤ ил +ÙĨت ÙĤÙĦ +Ġprop ósito +ĠваÑĪ ÐµÐ³Ð¾ +Ġnh ắn +à¹ģà¸ĸ ว +Ġком иÑģ +ĠкомиÑģ Ñģи +waż nie +Ġy avaÅŁ +×ŀ ×Ļ×§ +×ŀ×Ļ×§ ×ķ×Ŀ +ש×IJ׾ ת +Ġyıll arda +ĠÐ ® +ĠЮ ÑĢ +×ł×¡ ×Ļ×ij×ķת +ת צ +תצ ×ķ×Ĵ +Ġод нÑĥ +Ġ à¸Ńยà¹Īาà¸ĩà¹Ħร +Ġà¸Ńยà¹Īาà¸ĩà¹Ħร à¸ģà¹ĩà¸ķาม +ëģ ¼ +à¹Ħล à¹Ī +تس ÙĦÙĬÙħ +بÙĦ اغ +Ġì ī +Ġìī ½ +Ġìī½ ê²Į +ãĥļ ãĥ³ +зв ÑĥÑĩ +ĠW äh +ĠWäh rend +Ġ×Ļ ×Ļת +Ġ×Ļ×Ļת ׼ף +Ġkh uyên +Ġv ẽ +Ġа меÑĢ +ĠамеÑĢ Ð¸Ðº +ĠамеÑĢик ан +ĠамеÑĢикан Ñģк +ع جب +ãĥĽãĥ¼ãĥł ãĥļãĥ¼ãĤ¸ +Ġник ÑĤо +ĠÙĤ Ùİ +ĠÙĤÙİ Ø§ÙĦ +ĠÙĤÙİØ§ÙĦ Ùİ +ÐIJ ÐĹ +Ùħ جÙħÙĪØ¹ +ÙħجÙħÙĪØ¹ ات +Ġnecess itÃł +Ġpob li +Ġpobli żu +Ġph ấn +ĠСо обÑī +ÙħÙĤ اط +ÙħÙĤاط ع +Ġ×Ķצ ×ķר×ļ +la ÅŁtırma +ว ิà¸Ķ +วิà¸Ķ ี +วิà¸Ķี à¹Ĥà¸Ń +Ġ그리 ìĬ¤ +Ġ그리ìĬ¤ ëıĦ +ãĤ¿ãĤ¤ ãĥŁ +ãĤ¿ãĤ¤ãĥŁ ãĥ³ãĤ° +×§×ĺ ×Ĵ×ķר +×§×ĺ×Ĵ×ķר ×Ļ×Ķ +Ġ×Ĺ ×ķפ +Ġ×Ĺ×ķפ ש×Ļ +Ø£ جر +Ġим ени +ĠÑĢан ее +à¹Ģà¸ŀืà¹Īà¸Ńà¸Ļ à¹Ĩ +ĠJes ús +Ñģо един +Ñģоедин ен +Ġר ×Ĺ×ķ×§ +à¹Ĥà¸ļ รา +à¹Ĥà¸ļรา à¸ĵ +ĠH Æ¡n +Ġth áºŃp +تع ÙĬÙĬÙĨ +Ġtart Ä±ÅŁ +ĠtartÄ±ÅŁ ma +ĠGes pr +ĠGespr äch +תר ×ķפ +תר×ķפ ×ķת +Ġcat égorie +Ġоказ Ñĭва +ĠналиÑĩ ие +Ġprésent é +Ġk ull +Ġkull and +Ġkulland ı +Ġü nl +Ġünl ü +ĠÙģ Ùĥرة +из аÑĤоÑĢ +×IJ ×ķ׳ +×IJ×ķ׳ ×Ļ×ij +×IJ×ķ׳×Ļ×ij רס +×IJ×ķ׳×Ļ×ijרס ×Ļ×ĺת +ĠÑĢаÑģÑģ маÑĤ +ĠÑĢаÑģÑģмаÑĤ ÑĢ +ĠÑĢаÑģÑģмаÑĤÑĢ Ð¸Ð²Ð° +تÙĥÙĦ Ùħ +Ùĥت رÙĪ +ÙĥترÙĪ ÙĨÙĬ +ĠÑģо ÑĩеÑĤ +ĠÑģоÑĩеÑĤ а +ãĤĴè¦ĭ ãģĽ +Ġng ừa +ĠÐł еÑģп +ĠÐłÐµÑģп Ñĥб +ĠÐłÐµÑģпÑĥб лик +ãĤ¦ ãĤ© +ãĤ¦ãĤ© ãĥ¼ +ĠÐľ еждÑĥ +ĠìŀĪ ê²Į +Ġm â +ĠìļĶ ì²Ń +ض ار +ลุ à¹īà¸Ļ +ëĮĢ íķĻêµIJ +×ĸ ×Ļ׼ +×ĸ×Ļ׼ ר×ķף +ãĤ¹ ãĥļ +ãĤ¹ãĥļ ãĥ¼ãĤ¹ +ĠкÑĢаÑģ оÑĤ +ï¼ ¨ +ê¼ Ń +ãĤĴ éĽĨ +ãĤĴéĽĨ ãĤģ +ë° Ŀ +Ġ×Ķ׳ ×IJ +Ġ×Ķ׳×IJ ש×Ŀ +Ġê°Ģ ìļ´ +Ġê°Ģìļ´ ëį° +تÙĥÙĦ Ù쨩 +ĠØŃ ÙĤÙĬÙĤÙĬ +Ġh alk +Ġhalk ın +ÑİÑī ÑĥÑİ +ĠÑģп ин +סר×ĺ ף +ĠпеÑĢв ого +Ġпол ож +Ġполож иÑĤелÑĮн +Ġд л +Ġдл иÑĤелÑĮн +ĠV Ä©nh +ê´ ´ +ĠÑģÑĭ ÑĢ +ĠíĨµ íķĺìŬ +ë³ij ìĽIJ +à¹Ĥรà¸ĩ à¸ĩาà¸Ļ +รัà¸ļ à¸ľà¸´à¸Ķ +รัà¸ļà¸ľà¸´à¸Ķ à¸Ĭà¸Ńà¸ļ +تج ÙĨب +s ÅĤ +sÅĤ uch +ãĤ¢ãĥ« ãĥIJ +ãĤ¢ãĥ«ãĥIJ ãĥł +ëī´ ìĬ¤ +Ġpat ië +Ġpatië nt +Ġìĺ ¤í +Ġìĺ¤í ŀ +Ġìĺ¤íŀ Ī +Ġìĺ¤íŀĪ ëł¤ +ĠDer ne +ĠDerne ÄŁi +wró ci +wróci Äĩ +Ġоб Ñī +ĠобÑī еÑģÑĤв +ĠобÑīеÑģÑĤв енно +ĠêµIJ ìĪĺ +tıģ ımız +Ġ×Ķ×ŀש ×Ļ×ij +k örper +Ġпозв ол +Ġпозвол иÑĤ +ĠChi ến +أخ ÙĪ +ĠAy dın +à¸Ķà¹īาà¸Ļ ล +à¸Ķà¹īาà¸Ļล à¹Īาà¸ĩ +Ġdr u +Ġdru ż +Ġdruż yn +Ġë°ľ íijľ +ĠTh ảo +جÙĩ اد +à¸ģระà¸Ĺ ูà¹ī +Ġк ÑĢов +ĠкÑĢов и +Ġiçer ik +Ġnad zie +Ġnadzie jÄĻ +ĠС моÑĤÑĢ +Ġph ức +ج تÙħاع +جتÙħاع ÙĬØ© +ком пон +компон енÑĤ +Ġб ил +Ġбил еÑĤ +ãĥIJ ãĥ³ãĥī +ĠPol ÃŃcia +اÙĦ تÙĩ +اÙĦتÙĩ اب +ØŃر Ùģ +ت خط +تخط ÙĬØ· +ãĤ³ ãĥ¼ãĥ +ãĤ³ãĥ¼ãĥ Ĵ +ãĤ³ãĥ¼ãĥĴ ãĥ¼ +・・ ï½¥ +à¸ĭ à¸Ńย +Ġcréd it +è²· ãģ£ãģŁ +ĠпоÑĢ Ñıд +ĠпоÑĢÑıд ке +Ġph ó +Ġw ida +Ġwida Äĩ +جر ائÙħ +à¸ľ ี +ĠbÄĻd ÄĻ +Ġ×ŀ פת×Ĺ +ãĥij ãĥ¼ãĥ +ãĥijãĥ¼ãĥ Ĩ +ãĥijãĥ¼ãĥĨ ãĤ£ +ãĥijãĥ¼ãĥĨãĤ£ ãĥ¼ +ĠKa ż +ĠKaż dy +ĠнеобÑħодим оÑģÑĤи +à¸Ł à¸Ńรà¹Į +à¸Łà¸Ńรà¹Į ม +Ġмал ÑĭÑĪ +Ġпл оÑĤ +ĠÑĥ ÑģÑĤÑĢой +ĠÑĥÑģÑĤÑĢой ÑģÑĤва +à¸ĸ à¸Ńà¸Ļ +ĠoluÅŁtur ul +ĠÅĽwi ad +ĠÅĽwiad om +Ùħع Ùĩد +ĠпÑĢоиз веден +Æ ł +ר ×Ļש +Ùħست Ø« +Ùħستث Ùħر +׳×Ļ ×Ļר +pa ñ +Ġ; -) +Ġë°ľ 견 +Ġgör üyor +Ùħؤ ÙĦÙģ +ĠÄIJ á»ģ +ĠاÙĦÙĨ ÙĪØ§Ø¨ +×Ĺ×§ ×Ļר×Ķ +Ġm á»ıi +è¿° ãģ¹ +ÐĿ ик +ìŀĸ ìķĦ +ìŀĸìķĦ ìļĶ +prowadzi ÅĤ +l óg +lóg ica +פס ×ĺ +פס×ĺ ×Ļ×ij׾ +Ġ×ŀ ×ĵ×Ķ +Ġ×ŀ×ĵ×Ķ ×Ļ×Ŀ +ãģĵãģĵ ãģ¾ãģ§ +×Ķ ×ª×Ĺ +×Ķת׊׾×Ķ +Ġפ ×ķס +Ġפ×ķס ×ĺ×Ļ×Ŀ +Ġн ев +Ġнев оз +Ġневоз можно +ĠdostÄĻp ny +Ġغ اÙĦ +ĠغاÙĦ ب +Ġbez pieczeÅĦst +ĠbezpieczeÅĦst wa +åĪĨ ãģĭãĤĭ +ĠF ührung +à¸ģ ีà¹ī +gem Ã¤ÃŁ +à¸Ĭà¹Īวà¸ĩ à¹Ģวลา +Ġìļ°ë¦¬ ëĤĺ +Ġìļ°ë¦¬ëĤĺ ëĿ¼ +ãģ¥ ãģıãĤĬ +ĠاÙĦÙħ سÙĦ +ĠاÙĦÙħسÙĦ ØŃØ© +Ġlibert é +клÑİÑĩ ение +Ġzam ów +Ġzamów ienia +รà¸ĸ à¹Ħà¸Ł +Ø£ ÙģÙĦ +Ø£ÙģÙĦ اÙħ +Ùħ راج +Ùħراج عة +Ġë¹Ħ êµIJ +ĠاÙĦت اب +ĠاÙĦتاب عة +Ġë§Į ëĤĺ +Ġб Ñĥм +ĠбÑĥм аг +Ġgé nero +Ġìŀĺ 못 +×ŀ פ×ķר×ĺ +è²·ãģĦ çī© +ĠÙĦدÙĬ Ùĥ +Ġ×ľ×¢ ×Ļת +Ġ×ľ×¢×Ļת ×Ļ×Ŀ +ĠsÅĤ ab +ĠпÑĢедÑģÑĤав лÑı +ãĤ¿ ãĤ¤ãĥĪ +ãĤ¿ãĤ¤ãĥĪ ãĥ« +Ùħ ص +Ùħص Ø·Ùģ +ÙħصطÙģ Ùī +Ġdifficult é +ãĥĨãĤ£ ãĥĸ +Ġpew noÅĽci +ĠpewnoÅĽci Äħ +Ġ무 ìĬ¨ +Ø¥ رس +إرس اÙĦ +Ġд алÑĮ +ĠдалÑĮ ÑĪе +Ġ׾ ×ł×¡ +Ġ×ľ×ł×¡ ×ķת +หมูà¹Ī à¸ļà¹īาà¸Ļ +×ŀס×ŀ ׼×Ļ +أسÙĦ ÙĪØ¨ +Ġzw ÅĤ +ĠzwÅĤ as +ĠzwÅĤas zc +ĠzwÅĤaszc za +ĠпÑĢ ÐµÐ¶ +ĠпÑĢеж де +ĠоÑĢганиз аÑĨиÑı +Ġdön emin +Ġdönemin de +Ġ Ủ +ĠỦ y +ä¸ĭ ãģĴ +ĠпоÑģлед ние +Ġgü ne +Ġgüne ÅŁ +Ġ×IJ ×ĸר +Ġ×IJ×ĸר ×Ĺ×Ļ +ãģ§ãģĤ ãĤįãģĨ +ĠÙĨ ÙĤ +ĠÙĨÙĤ اط +æŃ£ ãģĹãģĦ +ĠÑĢ ÐµÐ³ +ĠÑĢег иона +ĠFör der +ê²½ ìĺģ +dıkl ar +dıklar ını +trzym aÄĩ +أش Ùĥ +أشÙĥ اÙĦ +×Ķת ×IJ +×Ķת×IJ ×ŀ×Ķ +à¸Ĺำà¹ĥหà¹ī à¹Ģà¸ģิà¸Ķ +ĠGeb ä +ĠGebä ude +ĠСеÑĢ Ð³ +ĠСеÑĢг ей +Ġз доÑĢов +ĠздоÑĢов ÑĮÑı +Ġr ãi +ĠпÑĢед ÑĥÑģ +ĠпÑĢедÑĥÑģ моÑĤÑĢ +ĠпÑĢедÑĥÑģмоÑĤÑĢ ÐµÐ½ +Ġ×Ķצ ×Ļ×ij +Ġ×Ķצ×Ļ×ij ×ķר×Ļ +Ġdés ir +Ġн оÑĩ +ĠноÑĩ ÑĮ +möglich keiten +Ġ×IJ×Ĺר ×ķ׳×Ļ×Ŀ +Ġsoir ée +ĠNh áºŃn +Ù ª +à¸Ľà¸£à¸°à¸§à¸±à¸ķิ ศาสà¸ķรà¹Į +êµIJ íĨµ +ĠØ£ Ø®ÙĬ +Ġdé cid +Ġdécid é +Ġwy ja +Ġwyja ÅĽni +Ġ สิ +Ġสิ à¸ĩ +Ġสิà¸ĩ หา +Ġสิà¸ĩหา à¸Ħม +à¹ģ à¸Ńรà¹Į +หà¸Ļà¹īา à¸Īà¸Ń +ס תר +Ġê ¶ +Ġê¶ Į +Ġê¶Į 리 +pl ätze +ب Ø·ÙĦ +ê±´ ìĦ¤ +Ġ×IJ ×Ļ×ŀ×Ļ +Ġ×IJ×Ļ×ŀ×Ļ ×Ļ׾ +ãģ ½ +تر اث +×IJ׾ ×Ļ×ŀ×ķת +Ġdispon ÃŃveis +Ġz ale +Ġzale ży +à¸Ľà¸£à¸°à¸Ĭา สัมà¸ŀัà¸Ļà¸ĺà¹Į +ĠÅļw iat +Ġpor ówn +Ġporówn a +Ġ׾×ĺ ×ķ×ijת +×Ķ×ĸ ×ŀ׳×Ķ +Ġ×Ľ×ª ×ķצ×IJ×Ķ +Ġ×ij ק׾ +Ġ×ijק׾ ×ķת +ĠоÑĤ кÑĢ +ĠоÑĤкÑĢ Ñĭва +ãĥij ãĥ¯ãĥ¼ +ë¿IJ ë§Į +Ġв ÑģÑı +ĠвÑģÑı к +ãģ¨ãģª ãģ£ãģ¦ãģĦãĤĭ +Ġgi áºŃn +Ġок ÑĢÑĥ +ĠокÑĢÑĥ жа +ĠокÑĢÑĥжа ÑİÑī +ĠUnivers ität +ĠÑĢ Ð¾Ð¶ +ĠÑĢож д +ĠÑĢожд ениÑı +Ø® ÙĬÙĦ +Ġкомпани й +ĠÑĢазлиÑĩ нÑĭе +ĠЦ ена +׳×Ļ ×ķ×ĸ +׳×Ļ×ķ×ĸ ׾ +׳×Ļ×ķ×ĸ׾ ×ĺר +Ġê³µ ê°Ħ +Ġê°ľ ëħIJ +landır ma +ĠÑĥдал ен +à¸ŀัà¸ģ à¸ľ +à¸ŀัà¸ģà¸ľ à¹Īà¸Ńà¸Ļ +Ġprote cción +Ġb ÅĤ +ĠbÅĤ ÄĻd +Ã Ī +Ġíĸī ë³µ +ĠÅŁ ü +ĠÅŁÃ¼ phe +Ġí Ķ +ĠíĶ ¼ +Ġíͼ íķ´ +Ġëĭ¤ 르 +à¹Ħมà¹Ī à¹Ģà¸ģิà¸Ļ +ãģ¿ ãģª +ãģ¿ãģª ãģķãĤĵ +ĠпоÑĤ ÑĢеб +ĠпоÑĤÑĢеб иÑĤел +ĠاÙĦÙĥÙĦ اÙħ +ìķĦ ë²Ħ +ìķĦë²Ħ ì§Ģ +ãĤĴ使 ãģ£ãģŁ +Ġbụ i +ĠпоÑĤ еÑĢ +ĠпоÑĤеÑĢ Ñı +ĠØ¢ ÙĦاÙģ +ĠнаÑģÑĤоÑıÑī ее +ãģıãģªãĤĬ ãģ¾ãģĹãģŁ +clus ão +ãĤ³ ãĥĶãĥ¼ +צ פ×Ļ +צפ×Ļ ×Ļ×Ķ +Ø® ÙĦا +Ø®ÙĦا ص +ล à¹īำ +ãĥ¯ ãĤ¤ãĥ³ +Ġมี à¸Ļา +Ġมีà¸Ļา à¸Ħม +Ø´ خص +شخص ÙĬات +Ġ×ĸ ×§ +Ġ×ĸ×§ ×ķ×§ +×Ļ ×Ļצ +×Ļ×Ļצ ×Ĵ +èĢĥãģĪ æĸ¹ +Ġürün ü +ĠиÑģп ол +ĠиÑģпол ни +Ġcompañ ero +×§ צ×Ķ +×ŀ×¢ ׳×Ļ×§ +Ùħ ØŃÙħد +Ġc ámara +Ġп ед +Ġпед аг +Ġпедаг ог +м аÑĢ +маÑĢ Ðº +×Ķת ׳×Ĵ×ĵ +ĠìĨĮ ê°ľ +Ġcom unitÃł +ê³ ¤ +ĠNg Ãłi +สà¸ĩ à¸ļ +ĠmieszkaÅĦ ców +ĠÙĨ ÙĩائÙĬ +iv ité +Ġи де +Ġиде алÑĮн +ĠØ£ سبÙĪØ¹ +Ġ×Ļ ×¢×ľ +Ġ׾ ר×IJש +Ġ׾ר×IJש ×ķ׳×Ķ +ĠзапиÑģ и +ĠкоÑĢ Ð¿ÑĥÑģ +วà¸ĩ ศ +วà¸ĩศ à¹Į +ĠÐĶ Ð¼ +ĠÐĶм иÑĤ +ĠÐĶмиÑĤ ÑĢ +Ġkön nt +Ġböl ges +Ġbölges inde +׼ ×Ļ׼ +׼×Ļ׼ ר +ĠاÙĦØ¥ Ø«ÙĨ +ĠاÙĦإثÙĨ ÙĬÙĨ +Ġng á»Ļ +ì¹ ł +د راج +Ġu da +Ġuda ÅĤo +ìº IJ +بر ÙĨاÙħج +ĠÑģÑĥд еб +ĠÑģÑĥдеб н +Ġzun ächst +ĠEduc ación +ãģ¨ãģª ãģ£ãģ¦ãģĦãģ¾ãģĻ +Ġ×Ķ×IJ ×ŀ×Ļת×Ļ +Ġİ nt +Ġİnt ernet +ĠcaÅĤ ego +ãĥĹãĥª ãĥ³ +Ø¥ بد +إبد اع +ĠпоÑĢ ÑĤал +à¹Ĥà¸ķ à¹ī +Ġ×Ķ×§ ש×ķר +пл од +ĠÙħ د +ĠÙħد رÙĬد +×ŀסע ×ĵ×Ķ +ĠØ´ÙĬ ئ +ĠØ´ÙĬئ ا +à¸ģà¹Īà¸Ń สรà¹īาà¸ĩ +Ġì°¸ ê³ł +à¹Ģà¸Ĺ ร +à¹Ģà¸Ĺร à¸Ķ +Ġ×ij×ŀ קר×Ļ×Ŀ +Ġb ât +Ġbât iment +åij¼ ãģ³ +ç´ł æķµ +ç´łæķµ ãģª +przedsiÄĻbior st +przedsiÄĻbiorst w +Ġ×ł×ª ×ķ׳×Ļ×Ŀ +×Ĺ׾ ×ķ×Ŀ +ร วย +Ùħ ÙĪØ¶ÙĪØ¹ +ĠÑģоб ÑĢан +вед ÑĥÑī +ĠÑĤе аÑĤ +ĠÑĤеаÑĤ ÑĢ +m eye +meye ceÄŁi +Ġpien iÄħ +ĠpieniÄħ d +ĠpieniÄħd ze +ÑĢез иденÑĤ +ØŃ صر +ìĺ ¥ +à¹Ģย ืà¸Ńà¸Ļ +ĠÑĥ ни +ĠÑĥни веÑĢ +ĠÑĥнивеÑĢ Ñģ +ĠÑĥнивеÑĢÑģ иÑĤеÑĤ +ĠاÙĦر ØŃ +ĠاÙĦرØŃ ÙħÙĨ +ĠÑĤеÑħ нолог +ĠÑĤеÑħнолог ии +ìĹIJ ëĦĪ +ìĹIJëĦĪ ì§Ģ +Ġíķ Ń +ĠíķŃ ìĥģ +à¸ĺ า +à¸ĺา à¸ķุ +ĠEspañ ol +×ĵ×Ĵ ש +Ġêµ ī +Ġêµī ìŀ¥ +Ġêµīìŀ¥ íŀĪ +ĠÅĤ at +ĠÅĤat wo +Ġk á»ĭch +Ø¥ ز +إز اÙĦØ© +ĠдейÑģÑĤв ие +ĠsaÄŁ layan +สุà¸Ķ ยà¸Ńà¸Ķ +Ġzosta Äĩ +Ġdispon ÃŃvel +ïº į +ver ständ +verständ lich +tw or +twor zyÄĩ +ع جز +à¹Ģà¸Ĥ à¹īม +ยà¹Ī à¸Ńม +Ġstrat ég +Ġstratég ie +à¸ľà¸¥ à¹Ħมà¹ī +Ġê°ģ ì¢ħ +ĠÙħ ÙĪØ§ +ĠÙħÙĪØ§ ض +ĠÙħÙĪØ§Ø¶ ÙĬع +اØŃ تج +اØŃتج اج +Ġ Ấ +ĠẤ n +×ŀ ×ŀש׾×Ķ +ĠÅŁek il +×ŀ ×Ĺ׾ +×ŀ×Ĺ׾ ×ķת +Ġ à¸ĺ +Ġà¸ĺ ัà¸Ļ +Ġà¸ĺัà¸Ļ วา +Ġà¸ĺัà¸Ļวา à¸Ħม +Ġìĭ¤ ìłľ +Ġìĭ¤ìłľ ë¡ľ +ì¤ij ìķĻ +ëįĶ ëĿ¼ +ĠÑĪ Ð¸ÑĢ +ĠÑĪиÑĢ Ð¾ÐºÐ¾ +Ġsol ución +วาà¸ĩ à¹ģà¸ľà¸Ļ +×IJ×ķ×ĺ ×ķ×ŀ +×IJ×ķ×ĺ×ķ×ŀ ×ĺ×Ļ +ĠÑĢ ÐµÑģÑĤ +ĠÑĢеÑģÑĤ оÑĢ +ĠÑĢеÑģÑĤоÑĢ Ð°Ð½ +ëį ¸ +ÑĤ ÑĢад +ÑĤÑĢад и +ÑĤÑĢади ÑĨион +ÑĤÑĢадиÑĨион н +มะ à¹Ģรà¹ĩ +มะà¹Ģรà¹ĩ à¸ĩ +à¹Ĥ ส +Ġol masını +×ŀ×ķס ר +ĠоÑĤноÑĪ ÐµÐ½Ð¸Ð¸ +Ġê°ĢëĬ¥ ìĦ± +Ġy uk +Ġyuk arı +ìĨ Ķ +ĠÑģ ÑĦ +ĠÑģÑĦ еÑĢе +Ġ×§ ×ķפ +ãĤ± ãĥ¼ãĤ +ãĤ±ãĥ¼ãĤ Ń +âĢķ âĢķ +ĠاÙĦØ£ ÙĦÙħ +ĠاÙĦØ£ÙĦÙħ اÙĨÙĬ +Ả N +ת×ķ׼ ׳×Ļ×ķת +ĠÑģÑĥÑīеÑģÑĤв ÑĥеÑĤ +æĪij ãĢħ +ĠاÙĦص ادر +ĠTr á»įng +Ġа д +Ġад миниÑģÑĤ +ĠадминиÑģÑĤ ÑĢа +ĠадминиÑģÑĤÑĢа ÑĨи +ĠдÑĢÑĥг ими +Ñģп еÑĪ +عÙĦاÙħ ات +Ġа б +Ġаб Ñģол +ĠабÑģол ÑİÑĤ +ĠабÑģолÑİÑĤ но +ฤ à¸Ķู +é tr +étr anger +нÑı ÑĤи +нÑıÑĤи е +×¢ ×ķ׳ +×¢×ķ׳ ש +ĠÙĤ ائ +ĠÙĤائ ÙĦا +Ġм аÑģ +ĠмаÑģ ло +ãĥī ãĤ¤ +ãĥīãĤ¤ ãĥĦ +å¿ħè¦ģ ãģĮãģĤãĤĬãģ¾ãģĻ +×ŀ×ķ×ĸ ×Ļ×IJ +×ŀ×ķ×ĸ×Ļ×IJ ×ķף +ĠNgo ại +Ġkê nh +à¸ģาร à¸Ńà¸Ńà¸ģà¹ģà¸ļà¸ļ +×ŀ פק +×ŀפק ×ĵ +ÙħÙĨ از +ÙħÙĨاز ÙĦ +ë· ° +íĹ ¤ +ÙħÙĩ ارات +Ġpropri été +פ×Ĵ ×Ļש×Ķ +Ñĩ ÑĢ +ÑĩÑĢ ÐµÐ¶ +ÑĩÑĢеж ден +×Ķ ×ķצ×IJ×Ķ +ØŃÙĥ ÙĬÙħ +ĠíĻ Ī +ĠíĻĪ íİĺìĿ´ì§Ģ +åİ ³ +åݳ ãģĹãģĦ +×¢ ×ŀ×ĵ×Ķ +ĠAu ÃŁen +سÙĪ Ø¡ +ë¹ Ī +ĠÙĪ Ø® +ĠÙĪØ® اصة +ин ÑĤеÑĢ +инÑĤеÑĢ ÐµÑģ +èĩ´ ãģĹãģ¾ãģĻ +Ġhük üm +à¹Ħà¸Ĥ มัà¸Ļ +Ġdav ran +Ġdavran Ä±ÅŁ +à¹Ģà¸ķ ียà¸ĩ +в ÑĢем +вÑĢем енно +à¹Ģà¸Ĺศ à¸ģา +à¹Ģà¸Ĺศà¸ģา ล +å¼ķ ãģ£ +å¼ķãģ£ è¶ĬãģĹ +×IJר ×ķ×Ĺ +×IJר×ķ×Ĺ ×ª +à¹Ģ วิ +à¹Ģวิ รà¹Į +à¸Ńยà¹Īาà¸ĩ รวà¸Ķà¹Ģรà¹ĩว +ĠìŬ íĸī +ĠÑĢан ÑĮ +ĠÑĢанÑĮ ÑĪе +Ġzob ow +Ġzobow iÄħ +ĠzobowiÄħ z +Ġ×ķ׼ ×ŀ×ķ×ijף +ĠاÙĦÙħ Ùĩ +ĠاÙĦÙħÙĩ ÙĨÙĬ +ãĤ¢ ãĤ¸ +ãĤ¢ãĤ¸ ãĤ¢ +ë°© ìĨ¡ +à¸Ńà¸Ńà¸ģ à¸ģำลัà¸ĩ +à¸Ńà¸Ńà¸ģà¸ģำลัà¸ĩ à¸ģาย +am éli +améli orer +å½ĵãģŁãĤĬ åīį +Ġreg elm +Ġregelm Ã¤ÃŁig +ãģĬ åĭ +ãģĬåĭ § +ãģĬåĭ§ ãĤģ +Ġm ưá»Ŀi +بر Ùħج +ĠNat ürlich +ĠD Å©ng +ĠاÙĦر جاÙĦ +Ġthé p +Ġol muÅŁtur +×ŀ×ķס ×Ļ×§×Ķ +f älle +주 íĥĿ +ĠاÙĦÙģ Ø±Øµ +Ġnaj wiÄĻks +ĠnajwiÄĻks zy +Ġça ÄŁ +ĠçaÄŁ rı +ì¸ ł +ĠvÃŃ ct +ĠvÃŃct ima +ĠÑģовеÑĢ ÑĪен +×Ķ×Ļ ×Ļת×Ļ +à¹Ģà¸Ķ ี +à¹Ģà¸Ķี à¹ĭ +à¹Ģà¸Ķีà¹ĭ ยว +ü yü +Ġд оп +Ġдоп олн +Ġдополн иÑĤелÑĮно +à¹ģà¸ķà¸ģà¸ķà¹Īาà¸ĩ à¸ģัà¸Ļ +Ġá l +Ġál bum +à¸Ľà¸£à¸°à¸Īำ à¸Ľà¸µ +ĠÑĦ едеÑĢ +ĠÑĦедеÑĢ Ð°Ð»ÑĮн +Ġobs ÅĤ +ĠobsÅĤ ugi +à¹Ģร ืà¹Ī +à¹Ģรืà¹Ī à¸Ńย +à¹Ģรืà¹Īà¸Ńย à¹Ĩ +ëģ Į +Ġngh ìn +ĠBaÅŁkan lıģı +تأ سÙĬ +تأسÙĬ س +Ġ×ij×ij ×ķקר +Ġ×¢×ij×ķ×ĵ ×ķת +Ġبص ÙĪØ±Ø© +ãĤıãģij ãģ§ãģ¯ãģªãģĦ +führ er +ãĤ¹ ãĤŃ +ãĤ¹ãĤŃ ãĥ« +ĠاÙĦÙĤ ض +ĠاÙĦÙĤض ÙĬØ© +Ġдолж ноÑģÑĤ +ÙģØ§Ø± ÙĤ +Ġcomeç ou +Ġorganis é +Ġxu ân +ĠÑģообÑī аеÑĤ +ĠпÑĢи д +ĠпÑĢид еÑĤÑģÑı +TÃľ RK +ãĥ¬ ãĥ¼ãĤ·ãĥ§ãĥ³ +Kh ông +است Ùģ +استÙģ Ø§Ø¯Ø© +ä¸ĬãģĮ ãģ£ãģ¦ +Ġum ie +Ġumie jÄĻ +ĠumiejÄĻ tn +ĠumiejÄĻtn oÅĽci +ëĤ ¸ +à¹Ģà¸Ļ à¸Ńรà¹Į +×ĵ×ķ ×ķ×Ĺ +ÃŃs imo +I ÃĬ +IÃĬ N +Ġalcan ç +Ġ à¸ķุ +Ġà¸ķุ ลา +Ġà¸ķุลา à¸Ħม +ש׾ ×ĺ×ķף +Ġél è +Ġélè ves +ĠÄij u +ĠÄiju á»ķi +ĠØ£ Ùģ +ĠØ£Ùģ Ø±ÙĬ +ĠØ£Ù쨱ÙĬ ÙĤÙĬ +ĠØ£Ù쨱ÙĬÙĤÙĬ ا +ãĤĴæİ¢ ãģĻ +ĠпÑĢед ложениÑı +ج اد +ĠÑħоÑĤ ÑĮ +Ñģ ал +Ñģал он +à¸Ľà¸£à¸° à¹Ģม +à¸Ľà¸£à¸°à¹Ģม ิà¸Ļ +ãĤŃ ãĥĥãĥģ +ãĤŃãĥĥãĥģ ãĥ³ +×ij×ĵ×Ļ×§ ×ķת +Ġch ù +Ġchù a +ÐĴ иде +ÐĴиде о +иÑĢов ка +ĠÑħоÑĤ иÑĤе +Ġspéc ifique +รส à¸Ĭาà¸ķิ +è¾¼ ãĤĵãģł +伸 ãģ³ +×Ķצ׾ ×Ĺת +ãģ©ãģ® ãĤĪãģĨãģ« +سع ادة +Ġл ид +Ġлид еÑĢ +ม à¸ĩ +มà¸ĩ à¸Ħล +ØŃ اÙħÙĦ +หล ุà¸Ķ +à¸Ńยà¹Īาà¸ĩ à¸ķà¹Īà¸Ń +à¸Ńยà¹Īาà¸ĩà¸ķà¹Īà¸Ń à¹Ģà¸Ļืà¹Īà¸Ńà¸ĩ +ãģķãģĽãģ¦ éłĤ +تس ÙĪÙĬ +تسÙĪÙĬ ÙĤ +ĠaÅŁaģı d +ĠaÅŁaģıd aki +ĠÑĨ елÑĮ +ĠÑĨелÑĮ Ñİ +ĠAra ÅŁtırma +à¸Ĥัà¸ļ รà¸ĸ +Ùĩ ذÙĩ +ลà¸ĩ à¸Ĺะ +ลà¸ĩà¸Ĺะ à¹Ģà¸ļ +ลà¸ĩà¸Ĺะà¹Ģà¸ļ ียà¸Ļ +تÙĥ اÙħÙĦ +Ġc io +Ġcio è +ãģ¦ ãģĬãģı +ĠاÙĦصØŃ ÙģÙĬ +ĠíĬ¹ ìłķ +полн иÑĤÑĮ +ãĤĵ ãģĺãĤĥãģªãģĦ +ãĤĵãģĺãĤĥãģªãģĦ ãģĭ +ĠاÙĦج Ùĩ +ĠاÙĦجÙĩ ات +ĠÑĥÑģпеÑĪ Ð½Ð¾ +Ġв ок +Ġвок ÑĢÑĥг +ĠÑģиÑĤÑĥ аÑĨиÑı +Ġ×Ķ×IJ ×ŀר +Ġ×Ķ×IJ×ŀר ×Ļ×§ +Ġ×Ķ×IJ×ŀר×Ļ×§ ×IJ×Ļ +×ŀ ×Ĵ×ĸ +×ŀ×Ĵ×ĸ ×Ļף +Ġак ÑĤÑĥ +ĠакÑĤÑĥ алÑĮн +é ta +éta is +Ġmog ÅĤa +ĠÑĤоÑĩ ки +Ġ×ŀ×Ķ ×ŀ×¢ +Ġ×ŀ×Ķ×ŀ×¢ ×¨×Ľ×ª +มี à¸Ľà¸£à¸°à¸ªà¸´à¸Ĺà¸ĺà¸´à¸łà¸²à¸ŀ +×Ļר ×Ļ×ĵ×Ķ +×Ĵר ×ŀ׳ +×Ĵר×ŀ׳ ×Ļ×Ķ +Ġг лав +Ġглав ное +Ġ미 ëŀĺ +Ġ׳׼ ×ķ׳×Ķ +ĠÙĪ Ø·ÙĨÙĬ +op port +opport unitÃł +Ġh á»§y +ĠÙĦ تØŃ +ĠÙĦتØŃ ÙĤÙĬÙĤ +Ġó rg +Ġórg ão +ãĤ¹ ãĥĶ +ãĤ¹ãĥĶ ãĥ¼ãĥī +Ġön ü +Ġönü ne +Ùħع اÙħÙĦ +ש×ŀ ×Ļר×Ķ +ĠвеÑģÑĮ ма +ĠwiÄĻks zo +ĠwiÄĻkszo ÅĽÄĩ +Ġاست راتÙĬج +ĠاستراتÙĬج ÙĬØ© +ĠÙģ Ø¥ +ĠÙ쨥 ذا +à¹Ģà¸Ĭืà¹Īà¸Ń ม +à¹Ģà¸Ĭืà¹Īà¸Ńม à¸ķà¹Īà¸Ń +Ġ׾ פר +Ġ׾פר ×ĺ×Ļ×Ŀ +Ùħض ÙĬ +ĠGer çek +Ġçocuk ların +ÙĪØ« ائÙĤ +ĠÙħساء Ùĭ +Ġunterstüt zt +Ġpré st +Ġprést amo +ĠÐłÐ°Ð· меÑĢ +ĠÅŁ eker +Ġsé culo +×ij×Ķ ×Ļר +Ø´Ùĩ ÙĪØ± +Ġ à¸Ńีà¸ģ +Ġà¸Ńีà¸ģ à¸Ĺัà¹īà¸ĩ +Ġlleg ó +à¸¨à¸´à¸¥à¸Ľ ะ +æĪij ãģĮ +æĪijãģĮ å®¶ +ع ÙĤÙĪ +عÙĤÙĪ Ø¨Ø§Øª +ĠF älle +Ġs ÅĤuż +ĠsÅĤuż b +ĠاÙĦØŃÙĤ ÙĪÙĤ +Ġпл иÑĤ +Ġи ноÑģÑĤ +ĠиноÑģÑĤ ÑĢан +ĠиноÑģÑĤÑĢан н +à¹ĥà¸Ļ à¸Ĥà¸ĵะà¸Ĺีà¹Ī +ãĤ« ãĥĨ +ãĤ«ãĥĨ ãĤ´ +ãĤ«ãĥĨãĤ´ ãĥª +à¸Ńิ ส +à¸Ńิส ระ +à¹Ģà¸ľà¸¢ à¹ģ +à¹Ģà¸ľà¸¢à¹ģ à¸ŀร +à¹Ģà¸ľà¸¢à¹ģà¸ŀร à¹Ī +ãģĬ ãģĦ +ãģĬãģĦ ãģĹãģĦ +است ÙĤÙĦ +استÙĤÙĦ اÙĦ +تØŃ ض +تØŃض ÙĬر +åĬ© ãģij +Ùħر اÙģÙĤ +Ġ×ĵ ×ķר +Ġ×ĵ×ķר ש +×ŀת×Ļ ×Ļ×Ĺס +ס ×Ļ׼ +ס×Ļ׼ ×ķ×Ŀ +íĮĮ íĬ¸ +Ġwy ÅĽ +ĠwyÅĽ w +ĠwyÅĽw iet +ĠwyÅĽwiet l +ĠاÙĦاÙĨ ساÙĨ +ĠStra ÃŁen +ï¼ ¬ +ãģ« åŁº +ãģ«åŁº ãģ¥ +Ġcap ÃŃtulo +ลุ ย +Ġ×Ķ×ŀ×§ צ×ķ×¢×Ļ +ãģĤãĤĭ ç¨ĭ度 +á» ¢ +ĠاÙĦ ÙĦا +ĠاÙĦÙĦا زÙħØ© +æķĻ ãģĪ +Ġרש ×IJ×Ļ +з ав +зав иÑģ +завиÑģ им +à¸Ľà¸±à¸Ī à¸Īัย +à¹Ģà¸ĭ ล +à¹Ģà¸ĭล ลà¹Į +Ġdiffé rence +ĠAlt ın +Ġк ÑĢай +ĠкÑĢай не +Ġз ло +Ġgün ümüz +Ġн аÑĤÑĥÑĢ +ĠнаÑĤÑĥÑĢ Ð°Ð»ÑĮн +×Ĵ×ķ׾ ש×Ļ×Ŀ +Ġк аÑĤегоÑĢ +ĠкаÑĤегоÑĢ Ð¸Ð¸ +Ġз нак +à¸ģà¹Īà¸Ńà¸Ļ หà¸Ļà¹īา +à¸ģà¹Īà¸Ńà¸Ļหà¸Ļà¹īา à¸Ļีà¹ī +ĠÙħÙĨ ت +ĠÙħÙĨت خب +ãĥĽ ãĥ¼ãĥ« +Ġе вÑĢо +ส ว +สว ม +ĠìľĦ ìĽIJ +ĠìľĦìĽIJ ëĭĺ +ĠاÙĦØŃ ÙĪØ« +ĠاÙĦØŃÙĪØ« ÙĬ +ĠÑģодеÑĢж иÑĤ +ãĥķãĤ¡ ãĥĥãĤ·ãĥ§ãĥ³ +Ġ à¸ģัà¸Ļ +Ġà¸ģัà¸Ļ ย +Ġà¸ģัà¸Ļย ายà¸Ļ +ãĤª ãĥª +ãĤªãĥª ãĤ¸ +ãĤªãĥªãĤ¸ ãĥĬãĥ« +Ġб ÑĢенд +ãĤĴæĮģ ãģ£ãģ¦ãģĦãĤĭ +Ġinvers ión +Ġê° ĸ +Ġê°ĸ ê³ł +Ġnov itÃł +ê´Ģ ê´ij +Ġà¸ŀ ฤษ +Ġà¸ŀฤษ à¸łà¸² +Ġà¸ŀà¸¤à¸©à¸łà¸² à¸Ħม +×ķר ×Ĺ×Ļ×Ŀ +׼׾ ×ķ׾ +Ġng ạc +×Ļ ×Ļש +×Ļ×Ļש ×ķ×ij +f äll +fäll ig +ĠÑĤÑĢеб ÑĥеÑĤÑģÑı +Ġcar á +Ġcará cter +Ġprinc ÃŃpio +ĠÅĤ az +ĠÅĤaz ien +ĠÅĤazien k +Ġgi ãn +ÑģÑĤÑĢа ива +Ùħس اب +Ùħساب ÙĤØ© +à¹Ģà¸Ħรืà¹Īà¸Ńà¸ĩ à¸Ķืà¹Īม +ترÙĥ ÙĬب +vol ução +ĠÐŁ оÑĩ +ĠÐŁÐ¾Ñĩ ем +ĠÐŁÐ¾Ñĩем Ñĥ +казал оÑģÑĮ +ĠпÑĢимен ениÑı +à¹Ģà¸Ĺ ียม +íĮ Ķ +à¸Ĥà¹īà¸Ń à¹Ģสà¸Ļà¸Ń +à¸Ľà¸±à¸į à¸įา +Ġоб ÑĥÑĩ +ĠобÑĥÑĩ ениÑı +ĠÑģеÑĢ Ð¸ +ĠÑģеÑĢи ал +Ġingl és +ĠÙĦ Ùĥرة +Ġ×ĺ ׾ +Ġ×ĺ׾ פ×ķף +Ġìł ij +Ġìłij ê·¼ +×IJ ×ķ×Ĵ +×IJ×ķ×Ĵ ×ķס +×IJ×ķ×Ĵ×ķס ×ĺ +ĠболÑĮÑĪ Ð¾Ðµ +ĠÐļон еÑĩно +×¢×Ļת ×ķ׳ +×¢×Ļת×ķ׳ ×IJ×Ļ +Ġкноп к +Ġз н +Ġзн аÑĤÑĮ +ĠÄij á»± +ĠÄijá»± ng +вл аж +влаж н +×ŀ ×Ļ×ĺ×ij +ãĤ¬ ãĤ¤ +ãĤ¬ãĤ¤ ãĥī +........ .. +Ġà¸ģ ุม +Ġà¸ģุม à¸łà¸²à¸ŀ +Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀ ัà¸Ļ +Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀัà¸Ļ à¸ĺ +Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀัà¸Ļà¸ĺ à¹Į +be z +bez pieczeÅĦst +bezpieczeÅĦst w +ãĥijãĥij æ´» +ع اط +عاط Ùģ +ĠÄij áºŃm +Ġз ÑĢ +ĠзÑĢ ÐµÐ½Ð¸Ñı +Ġbor ç +Ġнед ел +Ġнедел Ñİ +Ġh á»ı +Ġhá»ı ng +ìŀ¥ ìķł +ìŀ¥ìķł ìĿ¸ +ĠاÙĦع ÙĦاÙĤØ© +Ġíģ ¬ +Ġíģ¬ ê²Į +à¹Ħร à¹Ī +à¸ļา à¸Ķ +à¸ļาà¸Ķ à¹Ģà¸Īà¹ĩà¸ļ +à¸Ŀ รั +à¸Ŀรั à¹Īà¸ĩ +à¸Ŀรัà¹Īà¸ĩ à¹Ģศ +à¸Ŀรัà¹Īà¸ĩà¹Ģศ ส +ר ×¢×Ļ +רע×Ļ ×ķ׳×ķת +Ġë Į +ĠëĮ ĵ +ĠëĮĵ ê¸Ģ +Ġnaj b +Ġnajb li +Ġnajbli ż +Ġnajbliż sz +ĠиÑģполÑĮз ÑĥеÑĤÑģÑı +Ġcient ÃŃf +ĠcientÃŃf ico +×¢ ×ŀ×§ +Ġg ợi +Ø´ ØŃÙĨ +ĠÅĽ m +ĠÅĽm ier +ĠÅĽmier ci +à¸Ħาสิà¹Ĥà¸Ļ à¸Ńà¸Ńà¸Ļà¹Ħลà¸Ļà¹Į +×Ĺש×ij ת×Ļ +Ġn ingu +Ġningu ém +è¾¼ ãĤģ +ãģ · +ĠÑĥ г +ĠÑĥг ол +ï½ ° +פת ×Ļ×Ĺ +פת×Ļ×Ĺ ×ª +Ġ×Ķר×IJש ×ķ׳×Ļ×Ŀ +p ósito +ãĤŃ ãĥ¬ãĤ¤ +ãģ© ãģĵãĤį +à¹Ģà¸Ĺà¹Īา à¹Ħ +à¹Ģà¸Ĺà¹Īาà¹Ħ หร +à¹Ģà¸Ĺà¹Īาà¹Ħหร à¹Ī +ĠинÑĤеÑĢ ÑĮеÑĢ +ĠØŃ اج +ĠØŃاج Ø© +สี à¸Ĥาว +ìĸ ¼ +Ġn á»Ļ +Ġná»Ļ p +ĠÃŃ nd +ĠÃŃnd ice +สำ รวà¸Ī +Ġкажд ой +Ġhot éis +Ġnast ÄĻ +ĠnastÄĻ pn +Ġ×Ķ×§ ×ķ×ĵ +Ġ×Ķ×§×ķ×ĵ ×Ŀ +פ ×ķפ +פ×ķפ ×ķ׾ +פ×ķפ×ķ׾ ר×Ļ +вÑĪ ÐµÐ¹ +ãĤ·ãĥ³ ãĥĹ +ãĤ·ãĥ³ãĥĹ ãĥ« +ĠzdjÄĻ Äĩ +ĠгÑĢÑĥпп а +Ġпом еÑī +ĠпомеÑī ениÑı +ãģ©ãģĨ ãģĦãģĨ +ĠиÑģп ÑĭÑĤа +Ġog ÅĤ +ĠogÅĤ os +ĠogÅĤos zen +ĠogÅĤoszen i +สรà¹īาà¸ĩ สรร +สรà¹īาà¸ĩสรร à¸Ħà¹Į +à¸ŀร รà¸ĵ +Ġçık Ä±ÅŁ +ĠÑĩаÑģÑĤ ноÑģÑĤи +Ġ×ķ ×Ļ×ķתר +ç¶ļãģį ãĤĴ +ç¶ļãģįãĤĴ èªŃ +ç¶ļãģįãĤĴèªŃ ãĤĢ +à¸ģร ั +à¸ģรั ม +г ÑĢаÑĦ +Ġв лад +Ġвлад елÑĮ +ĠвладелÑĮ ÑĨ +Ġistedi ÄŁ +ĠistediÄŁ iniz +×ij׾ ×¢ +×ij×ľ×¢ ×ĵ×Ļ +ÙħÙĪ Ø§Ùģ +ÙħÙĪØ§Ùģ ÙĤØ© +Ġ×Ļ ×ķר +Ġ×Ļ×ķר ×§ +ãĤ«ãĥ¼ãĥī ãĥŃãĥ¼ãĥ³ +ĠاÙĦÙħØ´ ÙĥÙĦ +ĠاÙĦÙħØ´ÙĥÙĦ Ø© +ĠêµŃ íļĮ +ס פ×ĺ +ספ×ĺ ×ŀ +ספ×ĺ×ŀ ×ijר +Ġìĸ´ ëłµ +Ùĥ اÙħ +ÙĥاÙħ ÙĬرا +sch lü +schlü sse +ĠØ« ÙĨ +ĠØ«ÙĨ ائÙĬ +ìī ½ +ĠÐŀ Ñģоб +ĠÐŀÑģоб енно +Ġин веÑģÑĤи +ĠинвеÑģÑĤи ÑĨи +اØŃ تÙħ +اØŃتÙħ اÙĦ +E Äŀ +EÄŀ İ +íķĺ ê²łëĭ¤ +Ġ×IJ ×ijר×Ķ +Ġ×IJ×ijר×Ķ ×Ŀ +Ġ×ij×Ĺ ×Ļ׳×Ŀ +Ø£ ÙĪØ¶ +Ø£ÙĪØ¶ اع +Ġdé l +Ġdél ai +Ġ×IJ×ķ×Ķ ×ij×Ļ×Ŀ +ĠÑģо Ñħ +ĠÑģоÑħ ÑĢ +ĠÑģоÑħÑĢ Ð°Ð½Ð¸ +ĠдоÑģÑĤ иж +ĠдоÑģÑĤиж ени +สิà¹Īà¸ĩ à¹ģ +สิà¹Īà¸ĩà¹ģ วà¸Ķ +สิà¹Īà¸ĩà¹ģวà¸Ķ ล +สิà¹Īà¸ĩà¹ģวà¸Ķล à¹īà¸Ńม +ĠاÙĦÙħ باشر +ĠÑĦ иг +ĠÑĦиг ÑĥÑĢ +мож ем +׾×ŀ×Ļ×ĵ ×Ķ +Ġcin é +Ġciné ma +Ġb ada +Ġbada ÅĦ +جب ÙĩØ© +Ġд еп +Ġдеп ÑĥÑĤ +ĠдепÑĥÑĤ аÑĤ +Ġdist ância +ĠاÙĦÙħ عار +ĠاÙĦÙħعار ضة +thè se +ü nc +ünc ü +Ġдан ного +ĠBel gi +ĠBelgi ë +Ġ×ij ×ij×§ +Ġ×ij×ij×§ ש×Ķ +ย à¹Īาà¸Ļ +Ġsol ução +Ġ×Ķצ ×ĺר +Ġ×Ķצ×ĺר פ×ķ +ĠØ£ÙĨ ØŃ +ĠØ£ÙĨØŃ اء +Ġد ÙħØ´ +ĠدÙħØ´ ÙĤ +มั à¹ī +มัà¹ī ย +Ùħ غرب +است عÙħاÙĦ +ĠS ÅĤow +ĠëıĻ ìĭľ +ĠëıĻìĭľ ìĹIJ +ĠÑģ оÑģ +ĠÑģоÑģ ед +ì²Ń ìĨĮ +ì²ŃìĨĮ ëħĦ +Ġг ÑĢаÑĦ +ĠгÑĢаÑĦ ик +Ġìŀij ìĿĢ +Ġyet i +Ġyeti ÅŁtir +ĠìĿ´ê²ĥ ìĿ´ +ห à¹Īาà¸ĩ +Ø¥ ÙħÙĥاÙĨ +Ø¥ÙħÙĥاÙĨ ÙĬØ© +است عراض +ÙħØ® در +ĠÑĩ ÑĥÑĤÑĮ +Ùħ دÙĬر +ÙħدÙĬر ÙĬØ© +Ġà¹Ģม ษ +Ġà¹Ģมษ ายà¸Ļ +Ġм еÑħ +ĠмеÑħ аниз +ĠмеÑħаниз м +ĠÑģ Ñĥм +ĠÑģÑĥм мÑĥ +Ġv ö +Ġvö ll +Ġvöll ig +Ġд ÑĢÑĥз +ĠдÑĢÑĥз ÑĮÑı +ãĤĴåĪ©ç͍ ãģĹãģ¦ +à¸ļรร à¸Īุ +po życz +×ŀש ׼ +×ŀש׼ ×ł×ª +×ŀ×©×Ľ×ł×ª ×IJ +Ġeuropé en +Ġpropri é +Ġproprié taire +Ġkh ấu +ãģĦãģŁãģł ãģijãĤĭ +Ġtec rü +Ġtecrü be +×Ķ ×ij +×Ķ×ij ׳×Ķ +Ġcu Ì +ĠcuÌ ī +ĠcuÌī a +×IJ ×ķ×ķ +×IJ×ķ×ķ ×Ļר×Ķ +Ġ׼×ķ׾ ×ķ +U lus +Ulus lararası +Ġ׳ ×ķת +Ġ׳×ķת ף +ãģ« åIJij +ãģ«åIJij ãģijãģ¦ +ë¹ Ľ +à¸Ĺ ัà¸ģษ +à¸Ĺัà¸ģษ ะ +س ÙĤÙĪ +سÙĤÙĪ Ø· +Ġв н +Ġвн еÑĪ +ĠвнеÑĪ Ð½Ðµ +Ġur z +Ġurz ÄĻd +Ġá mb +Ġámb ito +à¸Ń à¸ĺิ +à¸Ńà¸ĺิ à¸ļาย +Ġ ÅĤad +ĠÅĤad n +ê±´ ì¶ķ +wód zt +wództ w +Ġquest ões +Ġש ×§ +Ġשק ×Ļ×ij׾ +Ġmiejsc owoÅĽci +Ġв ал +Ġвал ÑİÑĤ +hä user +หà¸Ļ à¸Ńà¸ĩ +ãģ¨ åħ± +ãģ¨åħ± ãģ« +ãĥı ãĥ¼ãĥī +Ġê°ľ ìµľ +ĠоÑģнов ном +Ġм ÑıÑģ +اع ت +اعت ÙĤاÙĦ +สà¸ĸ ิ +สà¸ĸิ à¸ķิ +N gu +Ngu á»ĵn +ĠÙħ جÙĦ +ĠÙħجÙĦ Ø© +à¹ģà¸Ĥ à¸Ļ +ĠاÙĦÙĦÙĬ بÙĬ +פע×Ļ׾ ×ķ×Ļ×ķת +Ġ×Ķר פ×ķ×IJ×Ļ +פר ×ķפ +פר×ķפ ×Ļ׾ +×§ ׾×IJ +ק׾×IJ ס×Ļ +Ùĥت Ø´Ùģ +ãģ«ãģª ãģ£ãģ¦ãģĹãģ¾ãģĨ +à¹Ģà¸Ħล à¹ĩà¸Ķ +à¹Ģà¸Ħลà¹ĩà¸Ķ ลัà¸ļ +Ġì» ´ +Ġì»´ íĵ¨ +Ġì»´íĵ¨ íĦ° +Ġ×Ĺ×Ļ ×ķ×ij×Ļ +Ġnä m +Ġnäm lich +åij¼ ãģ° +åij¼ãģ° ãĤĮ +ĠÑĢ Ð¾Ð» +ĠÑĢол и +Ġspécial isé +à¸Ļ วัà¸ķ +à¸Ļวัà¸ķ à¸ģรรม +ÙĨص ÙĪØµ +пеÑĢ ÐµÐ´ +пеÑĢед аÑĩ +thè que +Ġר×IJ ×Ļת×Ļ +ãĥĢ ãĤ¦ãĥ³ +ãĤı ãģĭ +ãĤıãģĭ ãģ£ãģ¦ +беÑĢ ÐµÐ¶ +ĠÑģ ек +ĠÑģек ÑĢ +ĠÑģекÑĢ ÐµÑĤ +ĠпоÑģÑĤоÑıн н +à¸Ĥà¸Ļ สà¹Īà¸ĩ +Ġm ük +Ġmük em +Ġmükem mel +еÑĤ еÑģÑĮ +ĠاÙĦسÙĨ ÙĪØ§Øª +ĠìłĦ íĺĢ +Ġ×Ķ×ŀ×§ ×ķר×Ļ +Ġmü d +Ġmüd ah +Ġmüdah ale +Ġwy b +Ġwyb ór +Ġtend ência +Ø¥ دار +إدار ÙĬØ© +Ġunterstüt zen +ת ×ijר +ת×ijר ר +Ġdi á +Ġdiá logo +ĠÃĸ nce +ĠÃĸnce ki +ãĤ¹ãĥĿ ãĥĥãĥĪ +ëĦ £ +ĠG eli +ĠGeli ÅŁ +ãĤĴ éĢļ +ãĤĴéĢļ ãģĹãģ¦ +ĠFuÃŁ ball +Ġsal ari +Ġsalari é +ĠпÑĢодÑĥк ÑĤов +صÙģ ÙĤØ© +รว à¸ļ +รวà¸ļ รวม +à¹ĥà¸Ļ à¸IJาà¸Ļ +à¹ĥà¸Ļà¸IJาà¸Ļ ะ +Ġkay na +Ġkayna ģı +Ġìŀij íĴĪ +ĠвÑĭ ÑĢаж +ĠвÑĭÑĢаж ен +ĠÑģÑĤ еп +ĠÑģÑĤеп ени +ĠاÙĦÙħ ÙĪØ¬ÙĪØ¯ +ĠاÙĦÙħÙĪØ¬ÙĪØ¯ Ø© +ล à¹īม +Ġnaj czÄĻ +ĠnajczÄĻ ÅĽcie +ĠnajczÄĻÅĽcie j +Ġz wy +Ġzwy k +Ġzwyk ÅĤ +Ġê·¸ëłĩ ì§Ģ +à¸ģระ à¸Ī +à¸ģระà¸Ī าย +Ġëĭ µ +Ġëĭµ ë³Ģ +ĠÑĢе ак +ĠÑĢеак ÑĨи +ĠÅĽwie ż +ĠÑģÑĤоим оÑģÑĤи +ÙħÙĨ اÙĤ +ÙħÙĨاÙĤ Ø´ +ÙħÙĨاÙĤØ´ Ø© +ĠÑħоÑĩ Ñĥ +ãĥľ ãĥ¼ãĥī +Ġróż nic +Ġк ÑĢÑĭ +ĠкÑĢÑĭ ÑĪ +âľ ĵ +ãĤ³ãĥ³ ãĥĨãĥ³ +ãĤ³ãĥ³ãĥĨãĥ³ ãĥĦ +ĠпÑĢед поÑĩ +×ŀר ×ij×Ļת +ĠØ´ Ùĥ +ĠØ´Ùĥ را +Ġд ал +Ġдал ек +Ġдалек о +بر ÙĬØ· +برÙĬØ· اÙĨÙĬا +ع ÙĨا +عÙĨا ÙĬØ© +ĠÑĢаÑģÑģ каз +ĠÑĢаÑģÑģказ Ñĭва +Ø£ ÙĦÙĪ +Ø£ÙĦÙĪ Ø§ÙĨ +æĮģ ãģ£ãģ¦ +æĮģãģ£ãģ¦ ãģĦ +Ùħباد ئ +×Ķ ×¢×ijר +×Ķ×¢×ijר ת +Ġyay ı +Ġyayı ml +Ġyayıml a +m át +mát icos +à¸ģ ัà¸ĩ +à¸ģัà¸ĩ วล +Ġ׾ פת +Ġ×ľ×¤×ª ×ķ×Ĺ +à¸ŀฤ à¸ķิ +à¸ŀฤà¸ķิ à¸ģรรม +í Ĥ¬ +Ġок ÑĢÑĥг +Ġ×ŀצ ×ķ×ķ×Ķ +ÐĽ ени +ÐĽÐµÐ½Ð¸ н +ĠTri á»ģu +ãĤ³ãĥŁ ãĥ¥ +ãĤ³ãĥŁãĥ¥ ãĥĭ +ãĤ³ãĥŁãĥ¥ãĥĭ ãĤ± +ãĤ³ãĥŁãĥ¥ãĥĭãĤ± ãĥ¼ãĤ·ãĥ§ãĥ³ +Ùĥ ÙĨÙĬ +ÙĥÙĨÙĬ سة +ãĤĴ ä¸Ńå¿ĥ +ãĤĴä¸Ńå¿ĥ ãģ« +ĠmiÄĻd z +ĠmiÄĻdz yn +ĠmiÄĻdzyn ar +ĠmiÄĻdzynar od +ĠmiÄĻdzynarod ow +ÙĦ ÙĨ +ÙĦÙĨ دا +بر Ø´ +برش ÙĦÙĪÙĨ +برشÙĦÙĪÙĨ Ø© +à¸ģระ à¸ķุ +à¸ģระà¸ķุ à¹īà¸Ļ +Ġg ı +Ġgı da +à¸Ľà¸£à¸° à¸Ĺัà¸ļ +à¸Ľà¸£à¸°à¸Ĺัà¸ļ à¹ĥà¸Ī +Ġë¶Ī 구 +Ġë¶Ī구 íķĺê³ł +ĠÙĨ Ø· +ĠÙĨØ· اÙĤ +ĠÐľ ожеÑĤ +Pr äs +Präs ident +ĠÑģк оÑĢ +ĠÑģкоÑĢ Ð¾ÑģÑĤÑĮ +Ġ×Ķ×ij ×ķקר +еÑħ аÑĤÑĮ +Ġg ạo +Ġש×IJ ×Ļ׳×Ŀ +Ġ×ij׳ ×ķ×Ĵ +Ġ×ij׳×ķ×Ĵ ×¢ +Ġо пиÑģание +Ġucz ni +Ġuczni ów +à¹Ģà¸Ń à¹ĩà¸Ļ +Ġت Ø´ +Ġتش رÙĬÙĨ +Ġnh ãn +ë¹ ¨ +Ġcaract ère +×¢ ׾×Ļ +×¢×ľ×Ļ ×Ļ×Ķ +楽ãģĹ ãĤģãĤĭ +ĠÑģ аÑħ +ĠÑģаÑħ аÑĢ +дÑĥм аÑĤÑĮ +ĠÐĴоз можно +ص ÙĬاÙĨ +صÙĬاÙĨ Ø© +öm ür +ส ล +สล à¹ĩ +สลà¹ĩ à¸Ń +สลà¹ĩà¸Ń à¸ķ +ë¡ ¯ +Ġth ói +gr Ã¶ÃŁe +Ġksi ÄĻ +ĠksiÄĻ g +ĠÑĢ Ð¾Ð¼ +ĠÑĢом ан +ÙĤ اسÙħ +×ŀ×ij ×ķ×Ĵ +×ŀ×ij×ķ×Ĵ ר×Ļ×Ŀ +bes ch +besch äft +beschäft ig +×Ķצע ×Ķ +ĠÃģ rea +ĠзаÑıв к +Ä ¹ +ĠлÑİб ого +Ġ ม +Ġม à¸ģร +Ġมà¸ģร าà¸Ħม +ÑĦ из +ÑĦиз иÑĩеÑģк +ин ÑĦ +инÑĦ ек +инÑĦек ÑĨи +اÙĦ Ø· +اÙĦØ· ائÙģ +Ġкол л +Ġколл екÑĤив +ез жа +Ġس بØŃ +ĠسبØŃ اÙĨ +ĠسبØŃاÙĨ Ùĩ +sch lä +schlä ge +Ġд и +Ġди аг +Ġдиаг ноÑģÑĤ +ĠоÑĤмеÑĤ иÑĤÑĮ +Т Ь +ĠاÙĦ در +ĠاÙĦدر اسÙĬ +עצ ×ŀ +עצ×ŀ ×IJ×ķת +Ġdém arch +Ġdémarch e +Ġ×ĺ ×ķ×¢ +Ġ×ĺ×ķ×¢ ף +Ġfuncion ários +á» µ +׾ ׼×IJ +׾׼×IJ ×ķר×Ķ +à¸ĭ à¹Ī +à¸ĭà¹Ī à¸Ńม +ĠÑĩ Ñĥв +ĠÑĩÑĥв ÑģÑĤво +âĸ ¼ +п ÑĥÑī +пÑĥÑī ен +Ġм еÑĢ +ĠмеÑĢ Ð¾Ð¿ +ĠмеÑĢоп ÑĢи +ĠмеÑĢопÑĢи ÑıÑĤиÑı +Ġu çu +Ġuçu ÅŁ +ãĤĴåĪ©ç͍ ãģĻãĤĭ +a ÄŁ +aÄŁ lı +ìĺĪ ìĪł +à¹ģ ยà¹Ī +ĠاÙĦÙĥ Ùħ +ĠاÙĦÙĥÙħ بÙĬ +ĠاÙĦÙĥÙħبÙĬ ÙĪØªØ± +ت ÙĪÙĬ +تÙĪÙĬ تر +à¹Ģà¸Ĭ ีà¹Īยว +à¹Ģà¸Ĭีà¹Īยว à¸Ĭา +à¹Ģà¸Ĭีà¹Īยวà¸Ĭา à¸į +á» Ķ +Ġhi ếm +ذا Ùĥرة +Ġ×Ķ×ŀ×Ļ ×ķ×Ĺ×ĵ +ĠìĪ ľ +ĠìĪľ ê°Ħ +ĠK ı +ĠKı sa +Ġgele ceÄŁi +пÑĢо ÑĦеÑģÑģиона +пÑĢоÑĦеÑģÑģиона л +Ġog ó +Ġogó le +ĠgÅĤ ów +ĠgÅĤów ne +ĠÑģÑĤ илÑĮ +×IJ פ׾ +×IJפ׾ ×Ļ×§ +×IJפ׾×Ļ×§ צ×Ļ×Ķ +สม ารà¹Į +สมารà¹Į à¸Ĺ +สมารà¹Įà¸Ĺ à¹Ĥà¸Ł +สมารà¹Įà¸Ĺà¹Ĥà¸Ł à¸Ļ +Ġth ánh +ÐŁ од +ÐŁÐ¾Ð´ ÑĢоб +ÐŁÐ¾Ð´ÑĢоб нее +ĠاÙĦت ÙĪÙĨ +ĠاÙĦتÙĪÙĨ سÙĬ +Ġbah çe +à¹ģà¸ģà¹ī à¸Ľà¸±à¸įหา +é ducation +eu rop +europ ä +europä ische +ĠK si +ĠKsi ÄĻ +ĠëĦ ĺ +ĠëĦĺ ìĸ´ +Ġv üc +Ġvüc ud +Ġyay g +Ġyayg ın +Ġnie kt +Ġniekt óry +Ġniektóry ch +ãģŃ ãģĩ +Ġк аж +Ġкаж еÑĤÑģÑı +к аж +каж еÑĤ +ĠاÙĦ دÙĬÙħÙĤرا +ĠاÙĦدÙĬÙħÙĤرا Ø· +ĠاÙĦدÙĬÙħÙĤراط ÙĬØ© +æŃ © +æŃ© ãģĦãģ¦ +Ġv az +Ġvaz ge +Ġvazge ç +Ġмин ималÑĮ +ĠминималÑĮ н +ãĥij ãĤ¿ +ãĥijãĤ¿ ãĥ¼ãĥ³ +Ġë Ĭ +ĠëĬ IJ +ĠëĬIJ ëĤĮ +ãģ¡ ãĤĩãģĨ +ãģ¡ãĤĩãģĨ ãģ© +Ġ à¸ģร +Ġà¸ģร à¸ģà¸İ +Ġà¸ģรà¸ģà¸İ าà¸Ħม +تج دÙĬد +ĠØ´ اÙħÙĦ +หลัà¸ģ à¸IJาà¸Ļ +ĠмаÑĢ ÑĪ +ĠмаÑĢÑĪ ÑĢÑĥÑĤ +Ġv ÃŃt +ĠvÃŃt ima +Ġquiz á +ay gı +×ĵ×ijר ×Ļ×ķ +Ġиз д +Ġизд ели +Ġиздели Ñı +п ла +пла Ñĩ +плаÑĩ ива +ä»» ãģĽ +Ġéquip é +ä¹ħ ãģĹãģ +ä¹ħãģĹãģ ¶ +ä¹ħãģĹãģ¶ ãĤĬ +Ġк аÑĤ +ĠкаÑĤ ал +ĠкаÑĤал ог +ส à¹īม +ĠÑĢ ÐµÐ¹ +ĠÑĢей ÑĤ +ĠÑĢейÑĤ инг +Ġth uyá»ģn +ĠاÙĦÙħ ÙĤدس +esp ère +ãģ«åħ¥ ãģ£ãģŁ +หมาย à¹Ģลà¸Ĥ +ת×Ĺ×ķש ת +à¸Ļ à¹Īะ +Ġpe ÅĤ +ĠpeÅĤ ne +Ġpé rd +Ġpérd ida +หม วà¸Ķ +หมวà¸Ķ หมูà¹Ī +иÑĩеÑģк ÑĥÑİ +çµĤ ãĤı +çµĤãĤı ãģ£ãģŁ +Ġ×Ĵ ×ķ×Ĵ׾ +à¸Ĺำ à¸Ħวาม +à¸Ĺำà¸Ħวาม สะà¸Ńาà¸Ķ +Hot éis +Ġз аÑĢ +ĠзаÑĢ ÐµÐ³Ð¸ÑģÑĤ +ĠзаÑĢегиÑģÑĤ ÑĢи +ĠзаÑĢегиÑģÑĤÑĢи ÑĢова +ĠÑģ обÑĭÑĤи +ĠÑģобÑĭÑĤи Ñı +Ġ×ĸ ׼×IJ +ÙħÙĨظ ÙĪÙħØ© +Ġ×Ķ×ŀ צ +Ġ×Ķ×ŀצ ×Ļ×IJ×ķת +Ùħ ÙĥÙĪÙĨ +ÙħÙĥÙĪÙĨ ات +ä¸ĬãģĮ ãĤĭ +Ġm ÄĻ +ĠmÄĻ sk +หรืà¸Ń à¹Ģà¸Ľà¸¥à¹Īา +ëĤ ® +Ġnok tas +Ġnoktas ı +ĠболÑĮÑĪ Ð¸Ð¼ +ĠлÑĥÑĩ ÑĪиÑħ +Ø´Ùĩ ÙĬد +à¸Ńำ à¸Ļ +à¸Ńำà¸Ļ วย +à¸Ńำà¸Ļวย à¸Ħวาม +à¸Ńำà¸Ļวยà¸Ħวาม สะà¸Ķวà¸ģ +Ġе в +Ġев ÑĢ +ĠевÑĢ Ð¾Ð¿ +ĠевÑĢоп ей +à¸ī าย +ìĦ Ń +Ùħ Ù쨧 +ÙħÙ쨧 ÙĪØ¶ +ÙħÙ쨧ÙĪØ¶ ات +ë¹ Į +赤 ãģ¡ãĤĥãĤĵ +ĠÑĥдал оÑģÑĮ +ĠÐ¥ оÑĤ +ĠХоÑĤ Ñı +przedsiÄĻbior c +ĠH ôm +íķĺìĺĢ ìĬµëĭĪëĭ¤ +Ġн аг +Ġнаг ÑĢÑĥз +ĠнагÑĢÑĥз к +Ġ×ij×Ļ׳ ׾×IJ×ķ×ŀ×Ļ +Ġê°ĢëĬ¥ íķľ +ĠH ữu +à¸Ń ุà¸Ķ +à¸Ńุà¸Ķ ม +ת ×ķפ +ת×ķפ ×¢×Ķ +Ġmi ÅĤo +ĠmiÅĤo ÅĽci +ksi Äħż +ksiÄħż ka +ĠاÙĦÙĦ عبة +à¸ī าà¸ģ +สะ สม +×ŀ תר +×ŀתר ×Ĺש +Ġlég ère +Ġ׾צ פ +Ġ׾צפ ×Ļ×Ķ +ĠиÑģÑĤоÑĢ Ð¸Ñı +Ġ ãĥĪãĥ© +ĠãĥĪãĥ© ãĥĥãĤ¯ +ĠãĥĪãĥ©ãĥĥãĤ¯ ãĥIJãĥĥãĤ¯ +Ġк а +Ġка ÑĦе +×ŀס×ŀ ×ļ +Ġc üm +Ġcüm le +à¹Ģà¸Ħลืà¹Īà¸Ńà¸Ļ à¹Ħหว +ãģĬ ãģĿ +ãģĬãģĿ ãĤīãģı +ìŀIJ ëıĻ +ìŀIJëıĻ ì°¨ +à¸Ńั à¸ķ +à¸Ńัà¸ķ à¹Ĥà¸Ļ +à¸Ńัà¸ķà¹Ĥà¸Ļ มั +à¸Ńัà¸ķà¹Ĥà¸Ļมั à¸ķิ +ĠÅŁ ik +ĠÅŁik ay +ĠÅŁikay et +extr ême +kr ä +krä fte +ëĤ Ļ +íķ ij +ì² Ļ +íĺ Ī +ì° į +âĻ ¡ +ìŀ Ķ +ë¢ ° +íĿ Ķ +íĿ IJ +âĩ Ĵ +ë§ Ľ +ìĬ Ī +á» Ĵ +ìĺ µ +âĹ İ +í Ĥ¨ +ê¿ Ī +ìĪ ¨ +ìĽ ¨ +ë§ ¥ +ï½ Ģ +ï¼ ª +Ạ¨ +ãħ İ +Ñ Ĺ +ìĦ ¬ +ì¹ ¼ +ï¼ ¶ +ìĽ ł +ëŁ ´ +Å ĥ +ëĤ ¼ +ëĭ IJ +âĢ ¹ +ë¦ Ń +ì§ IJ +âĢ ¤ +à ħ +ëľ ¨ +íĦ ¸ +íľ ĺ +ê² ģ +ë´ ħ +à ĺ +ëŃ Ķ +ëĺ ij +âĹ ĩ +ìĹ ĺ +ï» ´ +ë§ ¹ +ï¾ Ŀ +ìĬ · +íĥ ķ +ï¼ ł +ì» ´ +ëł Į +ì½ ľ +ï» ¹ +ãħ ł +ì¡ ¸ +ëħ ¹ +âĤ º +âĸ ¶ +íĥ IJ +êµ ´ +íij ¸ +Ñ Ķ +íĶ ½ +Ð ħ +ë° ¤ +Ô ģ +ì² ¨ +ì¶ ĺ +ë² Ĺ +ë© ¸ +ï¼ » +ï¼ ½ +ï¼ · +ì° Į +à Ĵ +íı ´ +ìĵ ¸ +ì´ Į +ëģ Ķ +ëĶ © +ëĩ Į +ë© Ģ +ë² ¨ +ï¼ µ +ë§ ¡ +ëĭ « +ภ¿ +ãģ ± +ìĩ ¼ +ìº ł +ë® ¤ +ê± ± +ì» ¬ +âĦ ĥ +ëĶ ± +ëĥ Ī +ìĭ ± +íĻ Ī +ëŀ IJ +ìħ Ģ +ìł ł +Ð Ĩ +ëł ī +ï½ ħ +ï½ ı +íĻ Ģ +ëĽ ° +á» ® +í Ĥ¹ +ê½ ĥ +ï» ¤ +ïº Ķ +êº ¼ +ìķ ī +âĻ ¦ +ï½ ģ +ìĵ ´ +ãĢ ī +ì° ® +ì¤ ĺ +á» ª +ëģ Ħ +ëIJ ¨ +ìķ Į +íĿ ĺ +íħ IJ +ãĢ Ī +ê² ª +ëĭ ¥ +ê² ¼ +á» Į +ë§ ¨ +ëģ Ĭ +ë² ¤ +ëij Ķ +íĿ ¡ +á» ¬ +ë¬ ĺ +ãģ ī +ëŀ « +íĶ Ī +í ħį +ìŀ ĥ +ï½ ī +ìģ ľ +âĸ ½ +ë¬ » +âĸ ³ +ï¼ ¸ +ìģ ĺ +ì¶ ° +ìĬ ´ +ìķ ± +ìĩ Ħ +Ạ® +ï´ ¿ +ï´ ¾ +âĤ ½ +ëĦ ĵ +ë£ © +ì³ ¤ +ê´ ľ +Ã Ļ +á» ľ +ï¿ £ +ëĵ Ń +ë© ĺ +ê» ´ +ëł ´ +Ð ĥ +ë¬ µ +ì§ Ŀ +ãģ º +ðŁĺ Ĥ +ëŀ ¬ +ìł Ĭ +ê´ Ħ +ìŀ Ĭ +íŀ Į +ìĦ ¯ +âĪ Ģ +âĸ ¡ +ëĢ Į +ëŀ Ļ +ï½ ĥ +Ạ¶ +ï¾ Ħ +ïº ĺ +ë¹ ¼ +à Į +âĸ · +ê¸ į +ë© ĭ +ãģ ĥ +ìĺ Ĩ +ìĺ ® +ëª ¬ +ë¡ ¤ +ëł ¬ +ëĬ ¦ +âĸ ª +ì¼ ĵ +ìľ Ī +ì§ § +ï½ ½ +ëĥ ī +ï¾ Į +ëĺ IJ +ï¼ ĥ +á» Ħ +ì´ ¬ +ì¶ ¤ +ï¼ ¹ +ï» Ń +âĤ « +ï½ ĩ +ìĺ · +ëĸ ¨ +âī « +ë¦ ¿ +âľ ¨ +Ù ± +ì¯ ¤ +ê¹ Ķ +ðŁĺ Ĭ +ìĪ « +ê³ ± +êµ ³ +ï½ ĭ +ภĮ +Ä ł +ëĶ ¸ +ë° ij +ìħ ĭ +íİ ´ +âľ ħ +íĥ ij +ëĪ ĩ +íı ¼ +ðŁĺ į +ìĺ Ľ +ï» £ +Ñ ĺ +ì© Į +ë¦ ħ +ìĿ į +ï½ ¸ +ëį ľ +ãģ ħ +íİ ¼ +ëĭ Ŀ +ë¿ Į +ì¼ ° +ìĭ « +ë° ¥ +íĽ Į +ì¨ Į +ë¹ Ļ +ï½ İ +ë´ Ħ +ìĦ ¹ +ï½ ² +ìĮ ĵ +Ò ij +ë° į +ëł Ģ +íĨ ¤ +ï½ ¯ +ë¤ Ħ +ê½ ¤ +ï½ Ĵ +ìķ ¨ +ï½ ¼ +ê¹ IJ +íģ IJ +âĦ ĸ +ë§ º +ïº ® +ëħ ģ +ê² ¸ +ï» ł +íĬ ľ +Å ¹ +ë¥ Ń +ëĪ ī +ï½ Ķ +íĮ ¬ +ìŀ ĩ +ï ¬ģ +ï» ¨ +ëij ¥ +ëŀ Ħ +Ù ¬ +íĭ ´ +ìŀ ī +Ú ¾ +ìĽ ħ +ï» ® +ëĭ ī +âī ª +âĹ Ħ +ëĪ Į +íĽ ¼ +ì¤ į +Å ¸ +ì¤ ¬ +ì¾ Į +ï½ ĵ +ï¾ Ĭ +ðŁı » +ï¾ ī +Ð ģ +íĺ IJ +ï¾ Ļ +ê¼ ¬ +íŀ IJ +âĢ ¥ +ëŁ Ń +ë§ ŀ +ìĥ ¤ +ïº Ĵ +íĭ ± +ë½ ij +à ķ +âĪ ļ +ëĤ Ħ +ê¹ Ŀ +ëĨ Ī +Ạº +ìħ Ī +ìĮ į +âĢ ¡ +ï¼ ± +ìģ ¨ +âĺ º +ëĴ · +ìĺ ³ +ðŁij į +ëª ½ +ëĤ Ń +ïº Ń +ë© Ī +á» Ī +íķ Ģ +ëĭ Ļ +ë¦ ĩ +ìķ ¤ +ìį ¼ +ãĥ µ +Ñ £ +ìľ Ĺ +â ŃIJ +ï¾ ĺ +íĹ ¬ +ê¾ ¼ +ìķ Ĺ +ï» Į +ê± · +ëħ ķ +ë¡ ± +ìķ Ĭ +ï¾ Ģ +ìĩ ł +íĮ © +ïº ª +ë§ Ļ +ï¼ ¿ +ê¿ Ķ +íİ ľ +ë£ ¸ +íĶ Ķ +ï» ³ +ëı ķ +ìĭ ¼ +á» İ +ë§ ĺ +ì¢ ĭ +íĨ ¡ +ï½ ± +íĿ ij +á» ¸ +ì¦ Į +ì¹ ¸ +ëŃ ĺ +ï¾ Ĺ +ï» ĭ +íĬ Ģ +ë¥ Ļ +ì½ © +ëģ Ĺ +ëį ´ +ìħ ľ + ¸ +ë» IJ +ìĥ µ +ê² IJ +ëĵ ¬ +ë£ ° +ãħ ĭ +ìĹ ī +á» ĸ +ëĦ Į +ï½ ¶ +ë´ ĩ +ëĤ ³ +ãĤ ľ +ëĸ » +íİ Ģ +ëį © +íķ ¸ +à · +ê¼ ¼ +ëĶ ľ +ë° ´ +ë© į +âĹ ¯ +ìĹ ij +ìĻ ¼ +ïº ij +ë¶ ķ +ë¡ ¬ +ï½ Į +íĨ ¨ +ïº ´ +ëł ĺ +ê° ¤ +ìĪ ² +Ñ ĵ +ìħ ī +ï» ĵ +ëĪ Ķ +ëį § +âĢ ¼ +ï» ² +ê° ± +ê¿ Ģ +ëĭ · +Ạ¸ +Ạª +Æ Ĵ +ëį ¤ +ìĪ Ń +ï½ Ĥ +ï½ Ī +Å ł +ë£ ¬ +Ñ µ +ëĸ ¡ +ëĥ Ħ +ìĦ ° +ëĵ Ī +ï¾ ĥ +ëĩ ¨ +ï½ IJ +êµ ½ +ìĹ ½ +ëĤ Ģ +ë¬ ¶ +ï½ · +ìı Ł +íĺ Ķ +ê¼ Ī +ëģ Ī +ì¥ IJ +ïº Ĺ +Ä Į +ëĪ ł +ëĸ ¼ +íĢ ´ +âī ¥ +ëĭ Ń +ì± Ļ +ê» ı +ë© ¤ +ìĥ ĺ +ëį ® +ë£ ¡ +ìĤ ½ +ãĪ ľ +Ä ¨ +âĢ § +ï½ º +Ä £ +ì¦ ī +ï¼ ¼ +Û © +âĪ Ļ +ë° ı +ë¹ ħ +ðŁĺ Ľ +íĪ ´ +ðŁĴ ķ +ãĢ Ĵ +ìŀ ĺ +ïº ¤ +ï½ ĸ +ë© ľ +ë² ¼ +ëĿ Ħ +ëļ ľ +ï» ĺ +ìĥ Į +ï½ Ħ +ì© Ķ +ï½ Ļ +ïº © +Û ŀ +âĺ İ +ìł ¤ +ëIJ © +Å Ŀ +âŀ ¡ +ï» § +Ð ı +ì« ĵ +ê³ ½ +É ij +ãĥ ² +ëĤ « +ë¦ ī +ì¢ ģ +ë° Ń +ðŁĺ ģ +ë¹ µ +ì² © +ì» µ +ðŁĺ ĺ +ë± ħ +âī Ī +ë¹ ļ +ï» ľ +ðŁĻ ı +íģ ° +ìĦ ŀ +ï¾ ļ +ìĺ ¹ +ë¼ Ī +ëĤ ¯ +ëŀ © +íļ ¡ +ï½ ķ +íĥ ĵ +ëĿ ł +ê³ ģ +ëĵ Ģ +ìĹ ł +ï¼ º +ë§ ij +ëĭ ¿ +ì¿ ¨ +ãİ ¡ +Ð Ĭ +íĦ ± +Å ¨ +ïº ³ +ï¾ ı +âĭ ħ +ê¼ ´ +âī ¤ +íĮ ģ +Î © +ê¶ ¤ +ìĪ į +âľ ¿ +ì½ ¤ +ëĪ ħ +íĨ ± +ãħ ľ +áIJ ħ +Å Ĵ +ðŁij ī +ï» ¦ +Ð ª +ë¥ ľ +íķ « +ï¾ ĭ +âĻ « +ê¹ ľ +ë° ¸ +ëĶ ĺ +íĿ ī +ï¾ ģ +ï¾ Ľ +ëł Ľ +ê² ¹ +ì¿ ¼ +ï» ¬ +âŀ ¤ +ðŁĻ ģ +ïº ł +ëĨ ¨ +ë¯ ¹ +ê¸ ĭ +ë» Ķ +ê¹ ĥ +ëij ij +íĭ ¸ +íİ Ļ +âŀ ĸ +ãĥ ½ +ì§ ļ +ï½ ¬ +ï» ¥ +íĮ ½ +âĢ Ĵ +ì ĮĢ +ìŃ ī +ëļ ± +ãĤ ŀ +íĭ Ī +ãĤ IJ +ëī ĺ +Î £ +ê³ ° +ë¹ Ĺ +ï¾ İ +ðŁĺ Ń +íĿ ł +ìĹ ¿ +ê° ļ +ì¤ Į +ë§ µ +ï½ ³ +ãģ ¢ +ï» Ĺ +âī ¦ +Ú ¤ +ë łģ +ê¼ ½ +ï» « +âī § +ì´ Ľ +ìł Ŀ +Ạ° +âĻ £ +ìº ĺ +âĪ ĩ +ê² ī +ë° Ł +ï» Ķ +íĸ ĩ +âĸ Ĵ +ðŁij ı +à ŀ +ðŁĺ Ĩ +ïº ¼ +âĿ Ĺ +ìº Ķ +ì¹ © +ëĸ ¤ +ëĥ ħ +âĶ ľ +ï½ » +Î Ķ +áĥ ¦ +ìŀ İ +âĺ Ģ +âĪ ¼ +ðŁĶ ¥ +ë° Į +ìł ĸ +íĹ Ľ +Î ķ +ïº ĥ +ë¶ ī +âĪ ŀ +íĥ Ń +à ĭ +âģ Ħ +ãħ ĩ +ëĦ ¥ +ëĭ ® +ëł · +íĮ Ŀ +ìº ¡ +ë· Ķ +ì© į +íĤ ´ +ëļ « +âĵ Ĵ +íķ į +âĻ Ĥ +ï¾ Ĩ +âĨ © +ìį © +ïº ķ +íĿ Ļ +Ñ ľ +íĤ · +íĿ ° +íĥ ± +ëķ IJ +ï¾ Ĵ +× ĥ +ëĮ Ħ +ìĺ ´ +ìķ µ +ê¹ ¥ +ëŀ Ń +ìª ¼ +ãİ Ŀ +ðŁĺ ħ +ëı ĭ +ëª « +ïº ¸ +ë® ¬ +ë² ħ +ëij ł +ìħ ° +ì» · +ëĶ ª +ëħ Ķ +ãħ ¡ +ìĶ » +íķ ı +ëį ± +ïº ¨ +ï¾ į +ï½ µ +ì¢ Ģ +íİ Į +ï» ° +ïº £ +Æ £ +ðŁ¤ £ +ï· º +ëĤ ļ +âĭ Ĩ +ë³ į +ðŁĺ Ħ +ìĸ Ģ +ìĻ ł +ëĨ Ķ +íĹ ¨ +ï» Ľ +ï» Ŀ +á» ¶ +ìĸ ĺ +ìİ Ħ +Ú Ĩ +ï» ŀ +ëĢ IJ +ê² Ķ +ï» µ +âĹ ¦ +íļ Ł +ê¹ ģ +ê° ĵ +ëĶ ´ +ìı ĺ +ëļ Ŀ +á» ł +ëŀ ´ +ëĦ ī +âĺ ŀ +ï½ ĺ +Å ½ +ë¦ İ +âĸ ¬ +ëŃ ī +âĩ Ľ +ìį ¬ +ïº Ł +Ë ľ +ë¶ ĵ +ìĽ ° +Å ľ +ëŃ ĩ +á» ² +Ë ļ +ëķ Ģ +âĺ ij +ðŁı ¼ +ìĸ ½ +âĮ Ĵ +Ð İ +É ¾ +íĮ ¡ +ï¾ ħ +ìŀ Ń +ï½ ¨ +ì¹ « +ìľ Į +Ò Ľ +êµ ¿ +ëĭ ¦ +âĶ Ķ +ï¾ ij +ì§ ĸ +ìº Ħ +ãĢ ĥ +Ê ¼ +ê² Ł +ï½ § +Ä ¢ +íİ ł +ë§ · +ê° ĩ +ìĭ ¹ +ðŁĴ ¦ +ï¾ ľ +ëĬ Ļ +ë² ¡ +Å ¿ +ðŁĺ ĭ +ðŁĴ ª +ì¿ Ħ +ë© ķ +ìŃ ¤ +ëĬ Ħ +ðŁĮ ¸ +ãĤ Ŀ +Ç İ +ï½ ļ +Ä Ĺ +ëģ ĵ +ê¶ IJ +áµ ī +ãĥ Ĥ +ê» į +ðŁĺ ¦ +ãĢ Ŀ +ðŁ¤ Ĺ +Ñ Ł +ìĹ İ +âľ Į +ìī IJ +à Ĩ +íĹ IJ +ðŁİ ī +Î ij +ï½ Ń +ðŁĴ Ļ +ìĽ ¬ +íĢ ĺ +ï» ¢ +ðŁĺ İ +íij ¼ +íĿ © +ï» Ħ +íħ Ģ +ëł IJ +ì¥ ¬ +Ð ĭ +ìĥ · +ëľ ¬ +ðŁĺ ĥ +ëĦ ¬ +ë¥ ¨ +ìĽ į +ï½ Ĩ +ï½ ´ +ãĥ ħ +à ı +ï» ª +âĻ ł +ëĬ ¬ +ë± Ģ +ë° ĭ +ìĥ Ģ +ï½ ¾ +ëĤ ± +ì» ¸ +ðŁĴ ĸ +ðŁij Į +Ñ ŀ +ì§ ± +Ë Ĩ +ðŁĵ ļ +âŃ ķ +ï¬ Ĥ +ï» ¡ +ëij ¬ +íĪ ¼ +âĸ ¸ +ê° ¯ +ê¹ ħ +ï½ ® +ëĺ ¥ +Ä ¡ +íĮ Ł +Ð Į +ìĨ Ł +ïº ĵ +ï» ¼ +à Ľ +ãĥ ¾ +ëĮ ĵ +íĴ ĭ +ìķ ĵ +ï½ ¹ +ëĤ ¡ +ðŁij ĩ +Ạ¼ +ãĢ Ł +ðŁĮ Ł +íĥ ł +ãĢ Ĩ +âĢ Ł +ë¸ IJ +ðŁĮ ¹ +ìł ¼ +ðŁĵ Į +ìĶ ¬ +âĹ Ģ +ðŁĴ ĵ +ê¹ İ +ìĤ IJ +ìĶ Į +Ñ Ľ +âĶ Ī +ë² ³ +ãİ ŀ +Õ ¡ +íĤ µ +ðŁ¤ Ķ +ëĢ Ķ +ìĬ IJ +íĻ ī +âľ ¦ +ëľ ¯ +ìł ¯ +ëĶ § +Î ¦ +Ë Ī +ìī ¼ +âĹ Ĭ +ëľ © +ëľ ° +ï¾ IJ +ë¿ Ķ +ìĹ ® +ì· Į +ïº § +Î Ĵ +ëµ Ļ +ï» Ĭ +ì° Ķ +íİ Ħ +ðŁĴ Ĺ +Ạ´ +ì° ¢ +íľ ¼ +ê½ Ĥ +ì± Ķ +ìī ´ +âĸ ¾ +íĪ ° +ëĭ Ľ +âĿ £ +ï½ ª +ðŁĴ ľ +Ë ĺ +ãħ ¤ +âĨ Ĺ +íĸ Ħ +âĻ ¬ +ìķ ° +ïº ľ +âī ¡ +ãĢ ĵ +ìij ¥ +íĮ į +íī ģ +ë» Ĺ +íľ ł +íľ © +âľ Ī +íĢ Ħ +ìĸ ĩ +ì¢ ĩ +íŀ Ļ +ëª ¹ +ãĤ Ľ +ðŁĺ ± +ëį Ł +๠ħ +êµ ¶ +Ù « +ìĶ ģ +âľ ª +ï¾ Ī +ðŁĻ Į +âļ ¡ +Î ļ +ì¼ Ī +ï¾ Ķ +ï¾ Ĥ +êµ ī +ïº » +ðŁĴ ĭ +á¹ £ +Ó Ļ +ìĨ ľ +ìĹ £ +âľ © +ìľ Ļ +ïº ° +Ạ² +ìŀ £ +âĿ Į +âĺ ģ +ìķ İ +Ä ½ +Û ģ +ãĦ ± +ëŁ ¿ +íĮ ¸ +ê½ ī +ìı ł +ðŁį Ģ +âĨ Ķ +ëŃ ¡ +ï» ģ +ï¼ Ħ +ðŁĴ ¥ +âĺ Ľ +íĹ · +ëij ¡ +Î ł +Î ¤ +âĦ ĵ +ïº · +Î Ļ +ëı Ķ +ì§ ¤ +âĶ ĥ +ãĦ · +Ç Ĵ +ðŁ¥ ° +ëĶ ķ +ìļ ¥ +ì¸ Ħ +íĽ Ķ +ïº ĩ +ïº ¬ +ðŁĺ ¢ +ë¹ ¡ +ìĶ ¹ +Å ³ +Ë Ŀ +íİ ij +ï¾ ĵ +ðŁĴ ļ +ëĬ ij +êº ¾ +íĨ ° +à ¿ +Ð Ħ +ëĮ IJ +ë½ Ģ +ì· Ħ +ðŁ ĵį +ðŁĻ Ī +âĹ Ī +ê¿ ĩ +ì¼ Ħ +íİ « +ðŁĩ · +âĶ ĭ +âļ ł +ë± ī +ì į° +ìĻ Ī +É ª +ïº ĭ +ðŁĺ ľ +Î Ł +ðŁ ĻĤ +âļ ½ +Å Ī +ë¹ Ķ +íĮ ľ +๠ı +ìĸ ¹ +íĪ Ń +ðŁ¥ ĩ +ãĦ ´ +ëĶ ¥ +ìŃ Ī +âĪ Ĩ +ëĸ ³ +ë± ĥ +ìŀ ¦ +ï» IJ +Î ľ +âľ § +Ï į +ìł ĵ +âĹ ķ +ëĴ Ģ +ï» Ģ +ðŁĶ ´ +ê½ ģ +ëĮ Ī +ëİ Į +ãĤ İ +⦠ģ +ì½ § +ï¯ ¾ +âĿ ¯ +ภħ +ðŁĻ Ħ +âĿ Ģ +ðŁĶ ¹ +âĩ IJ +êµ µ +âĩ Ķ +ë¶ IJ +ðŁĴ Ľ +Î ¾ +íĥ ¬ +âĿ Ħ +Ò £ +ãĢ ° +âĪ ij +âĺ ¼ +âī ł +Ò ¯ +ïº ¯ +ê¿ ¨ +âľ ĸ +Ê ĸ +íĢ Ģ +ê¾ Ģ +íĹ Ŀ +âĶ £ +ãİ ľ +ëĶ Ľ +ëľ ¸ +ï º« +ê¿ ° +ðŁĩ ¹ +Ç IJ +Û Ĵ +ë£ » +ïº ĸ +Ñ ļ +ëĬ ł +Û ķ +ê¹ ¡ +ë¿ ľ +ì² ¼ +ï¨ ij +ë¥ µ +ìį ¸ +íħ ħ +íij ¹ +Ö Ģ +ï³ Į +ãħ £ +ìij ¤ +ì½ ķ +ëķ ł +ðŁĮ ¿ +íĥ Ķ +ìĽ ģ +Î ¶ +âŀ ľ +ìĬ ĺ +íĽ Ĺ +ë© § +ìī ĺ +Õ ¶ +á¹ ĩ +ðŁİ ģ +ï½ ¿ +ï¼ Ĥ +á¼ IJ +âľ ķ +âŀ ¢ +ëĦ ¨ +ì» « +ì¯ Ķ +ì° ľ +ðŁĴ ° +íħ Ŀ +ãİ ı +ë³ ¶ +Ò ĵ +âĨ ³ +ìĥ ´ +íģ ĺ +âĸ Ģ +ë² Ļ +ภĥ +á½ ¶ +Ä ķ +⬠ĩ +ë¤ ĺ +ðŁİ µ +âľ ļ +ïº ı +Î ¡ +âĹ ī +ðŁĴ « +Ð Ī +ìĸ Ħ +ì§ Ļ +ï» ĥ +ðĿij Ĵ +ëŃ Ħ +âĿ ¥ +âĿ ĸ +âĺ Ŀ +Ê ¹ +Ḡ¥ +âĢ ¿ +ãħ ħ +ê¸ ģ +ëķ ¡ +ëį ¥ +âĪ © +ê» Ħ +ë® Į +Ò ± +âĪ Ĺ +ëł Ļ +ïº Į +Ë IJ +ðŁĺ ³ +ðŁij © +ðŁİ ¶ +ì¿ µ +ðŁ¤ © +ê· ¤ +ëĮ Ķ +ïº IJ +Ï İ +ì¶ ¥ +ï½ Ĭ +á¹ Ń +ë¤ ¼ +âĸ « +ì§ ł +á¼ Ģ +ê» ij +ëĮ ģ +íĢ ¸ +âĻ Ľ +ðŁĴ ŀ +âĸ ° +ðĿij ĸ +ëĿ ¤ +ठ¦ +ì´ ĺ +ðŁĺ ĩ +ëĶ ¤ +Î Ĺ +ðŁĻ ĩ +Ë Ľ +ì© ¡ +âĪ § +Õ ¥ +Ñ Ļ +ëIJ ¬ +ëĸ Ħ +ðŁĮ · +ìĹ Į +ðŁĺ ¥ +ëĪ ´ +ï» ļ +É Ľ +ïº Ħ +ï» ı +Å Į +ë² ļ +ìĭ £ +ïº Ģ +Î ĵ +ðŁĺ Į +Ë Ļ +ëŀ ı +ðŁĶ ¸ +ðŁĵ · +ëģ ½ +íģ ½ +ðŁĴ ¡ +ðŁĮ ± +ëº ı +ìģ ł +ìĥ IJ +ëı Ĺ +ì¸ ° +ëĪ ķ +Î Ŀ +âģ ī +ðŁĮ ¼ +íĮ ł +âĭ ¯ +áĥ ĺ +âľ ¤ +ê± Ķ +íĮ İ +ðŁĴ ¯ +ìı Ļ +íĹ ī +Ù Ń +ì½ ° +ïº ¿ +ï» ± +ì± Į +âĺ ķ +ðŁİ Ģ +Ä Ŀ +ë° § +ìĤ ¿ +áij ķ +ðŁį ĥ +âĩ ¨ +Î Ľ +ë§ ´ +ë³ ķ +á ijIJ +âĸ ĵ +ðĿ ijľ +âĻ » +íĤ ¥ +Õ ¸ +ãĪ ± +ëº Ģ +ì² ¸ +ïº Ľ +ðŁı Ĩ +ðŁĩ ª +âĿ ĵ +Ä Ģ +ì½ ¥ +ðŁĩ § +á½ · +âľ Ĥ +ìŀ ¼ +ï§ ¡ +ðŁĵ ¸ +âĻ ¯ +É Ķ +á½ ¸ +âĮ ª +ï» ĸ +ï¥ § +âļ « +âĶ Ĺ +ðŁĮ Ī +ï» © +ðŁĵ ² +Ï Ī +ðŁĺ ¡ +ðĿij İ +ìľ ½ +ì§ ¬ +ì§ Ĭ +á½ ³ +ìĮ ¤ +ëĤ į +âī Ĵ +ðŁij ¨ +âĺ ĺ +Ó © +âĤ ĵ +âĪ Ĥ +ï¹ ģ +ðŁĴ IJ +íħ ĥ +ðŁı ½ +ê· Ħ +ðŁĺ ı +ðŁĮ º +ðŁĺ Ķ +ï½ « +âľ İ +ëµ Ī +ðŁĩ ¸ +âĢ £ +âŀ Ķ +ëĺ ĺ +ìĥ ¬ +Ê ĥ +⬠ħ +ì© IJ +ðŁĻ Ĩ +ðŁİ Ħ +Ä ¾ +⣠¶ +áĥ IJ +âĺ » +ì± ķ +ìģ © +ë½ ķ +ìº £ +ðŁij Ī +ðŁĻ ĭ +ï¾ ĸ +Ò ļ +Õ « +ìĮ Ī +ë² § +ðŁĩ ® +ï½ Ŀ +ðŁį ģ +ìĹ ¥ +Ä ³ +ë½ IJ +íį ½ +íĽ ij +âĤ ¹ +ãħ ģ +ìĶ ½ +ðŁĶ ģ +ठ¯ +ê¾ ¹ +ëī ľ +âĹ ¡ +íķ Į +Î ĺ +ë£ ¹ +ìĻ ĵ +ðŁĩ ¦ +ðŁij Ģ +âĶ Į +á¿ ¦ +ëĦ Ľ +ìĦ £ +ìŃ Ļ +ï± ł +Î ŀ +Ê » +á¿ ¶ +âĿ Ŀ +ê± Ģ +ëĸ ´ +ãĦ ¹ +ðŁĴ İ +Ï ¹ +⼠ħ +ï» ķ +ãĥ ± +ï½ Ľ +ëĮ ķ +ë¹ ½ +ì¥ Ķ +ì¿ ¤ +ðŁĸ ¤ +Ñ Ĵ +ê¹ į +ëİ Ģ +ìĭ ¯ +ë» ¤ +ðŁĵ ŀ +ðŁĵ £ +ðŁĺ Ŀ +ìį ¹ +ìĹ ¡ +ì° IJ +á½ IJ +ï» Ī +âľ į +Ä ı +ðŁĮ ŀ +âĦ ¦ +ê½ Ŀ +ë» ĺ +ìĪ ± +âĶ ĺ +ðŁĮ » +âĤ ´ +âŀ ¨ +íIJ ģ +ê ¶Ī +âĺ ¢ +ðŁĺ Ī +ï½ © +âĦ Ĺ +ê° Ń +ê° ¸ +ë» ij +ì¥ ´ +ì» ¥ +ï¤ Ĭ +ï» Ĵ +ðŁĺ ķ +âĺ Ķ +ìĺ IJ +ðŁļ Ĺ +ëĹ Ħ +ë§ ı +Õ ½ +âĸ » +⣠µ +ìī ° +ï» ij +âĻ © +Î ¥ +ðŁĺ £ +âĬ Ĥ +ãħ Ĥ +ìħ ¸ +íı Ħ +âľ ½ +ì¦ Ļ +âĸ £ +ê± į +ê¿ ĭ +ì« Ħ +ìº ĩ +ðŁĩ µ +ðŁij ij +âľ ĺ +ðĿij Ľ +ìį ½ +ìº ī +ï¬ µ +ðŁĶ º +âĦ ® +íĥ ¤ +ðŁĩ º +ðŁĴ µ +íħ ¨ +ï½ ij +Î ¨ +ìĥ ¹ +ìĸ ķ +ì¹ µ +ðŁĵ ± +ठµ +ðŁij Ĭ +ðŁĴ Ħ +ðŁĴ Ŀ +ãĮ Ķ +ìĻ ģ +Ð ĩ +à® IJ +âĸ ¹ +á´ Ľ +âĹ ĺ +ëº ¨ +íĥ ī +ìĸ Į +ðŁIJ ¶ +ãĤ ij +Ë ĩ +Å ı +á½ ¹ +ìħ § +ï¹ ° +ðĿij ¡ +ðŁĶ Ŀ +ðŁĺ » +ðŁĴ ĥ +ðŁ¤ ¦ +ðŁį Ĵ +íĢ µ +âľ Ĩ +ë¹ ´ +ï§ ¤ +ï» Ļ +á´ Ĺ +ðŁĮ ´ +Í ¾ +ëĮ ij +ì¨ ĭ +ìµ ¸ +ðŁİ Ī +ðŁı ł +á½ ± +Û Ĩ +á¿ ĸ +âĢ Ľ +ì° ¼ +íķ ¥ +íĹ ´ +ðŁĩ ¬ +ì° Ŀ +âĪ ł +ï¼ ĩ +âĬ Ļ +âĿ ij +ëĦ ĭ +ëŀ Ĺ +ë° ī +ìĹ Ĭ +ì¢ Ĩ +íĮ ¥ +ï° ² +ðŁĵ ĸ +ðŁĺ ® +âļ ª +ðŁĺ ļ +âĿ ŀ +ðĿij Ł +ðŁİ Ĥ +Å ķ +áIJ Ī +êº ½ +ì± ł +ïº Ŀ +ê¿ ī +áĥ ł +ðŁı ĥ +ðŁĴ ¸ +âĿ ģ +âĹ ¾ +Ú ª +á¹ ĥ +íĬ ¬ +ðŁĩ ± +íİ Ń +ðŁĺ ŀ +ë¾ ° +á¹ Ľ +ëĽ ¸ +âĿ Ĥ +êĴ ³ +âĶ IJ +íĵ ° +âŀ ł +ê´ ĺ +ëħ ĺ +ë» ¥ +ì¾ ħ +ðŁĺ IJ +âĪ ª +ðŁij ģ +âĪ ´ +âĹ ģ +ëº IJ +ìŀ ¤ +ì± Ĺ +ðŁı ¾ +Î § +á½ » +âŀ ¥ +ìŁ Ī +ï» ī +âĸ Į +ãĥ ® +ðŁ¤ ¤ +âĩ ĵ +ì¼ ł +á´ ı +ë§ ¬ +ë» £ +ðŁĴ ¬ +ðŁį ĵ +Ä ¸ +Ù ¹ +Ê ¿ +á½ ° +ëķ ľ +ì° ¡ +ì° » +íİ į +ðŁİ ¯ +ðŁį Ĥ +ðŁij § +âĻ ¢ +áĨ ŀ +âĻ § +âļ ľ +âľ ī +ëĵ ¦ +ëŃ £ +ìĪ ı +ìĵ ± +Å Ń +Ê Ĭ +âĴ ¸ +âĩ © +ðŁĴ Ķ +Õ µ +Ð ī +Ò » +ë§ £ +ìĽ ľ +ì¿ ¡ +íĽ ħ +íĽ ¤ +ïº ¢ +âľ ĭ +âĪ Ī +ðŁĮ į +Ê ľ +ëĬ ª +ëĴ ¹ +ïº ² +âĸ Ħ +ãħ Ī +ëļ ¤ +íİ © +âĪ ¨ +ðŁ¤ ª +áĥ ļ +ê³ ¶ +íĬ ķ +ðŁĺ ¬ +âĪ « +ðŁij ĭ +Ò IJ +íĬ ¿ +ðŁĶ µ +ðŁĴ ¨ +ðŁĮ Ļ +ëĩ © +âľ ³ +ë¨ ģ +ëº Ħ +ìĻ ij +ìº ħ +íı Ī +ðĿij Ļ +ðŁĴ ĺ +ãİ ¥ +âĿ ı +âľ ° +ï¯ ¿ +ëµ IJ +ì¼ IJ +ïº ± +Õ ´ +ï¬ Ģ +âľ ´ +ðŁ¤ Ń +ðŁij Ĩ +âĽ Ķ +ê· ĵ +ìĮ Į +ðŁ¤ · +Û Ķ +ðŁ§ ¡ +ðŁĺ ĵ +Î ĸ +âı ° +ê² ľ +ëĭ ³ +ëİ ħ +ë° Ī +ï® IJ +ðŁı ¡ +âĨ ª +âĵ Ķ +âľ Ĭ +Ï ² +Ü IJ +ðŁĩ ³ +Ö Ĥ +âľ ı +ìĸ Ĺ +ì« Ļ +ðŁĺ ² +Ä Ń +âĻ Ń +âĶ ı +âĹ Į +ðŁĺ ¯ +áµ Ĵ +íĬ ł +Ä · +Ê ģ +à¤ Ł +á¹ ģ +á¼ ° +á¿ Ĩ +â « +â« ¸ +ëį « +ì³ ĩ +ì¼ ¤ +íĽ ¨ +ðŁĴ Ł +Ê Ģ +Ê ³ +ëĵ IJ +âķ ° +âĿ ĩ +Ç Ģ +Ç Ķ +É ´ +âĺ ļ +âĺ ľ +ê¶ Ĥ +ì« Ĵ +ì± Ī +ðŁĩ ¨ +ðŁİ ¥ +ðŁĵ Ŀ +Ä § +ðĿ ijIJ +Û Ī +ठ¬ +ì¬ IJ +íĹ ¥ +âĻ ¨ +ðŁį ´ +ï¹ ı +Ë ĭ +ðŁ¥ º +âĸ ¨ +íĻ ĭ +âĪ ħ +ëģ Ļ +ëŀ ł +ìĨ ¥ +âĢ ĸ +ðŁ¤ ĺ +ðŁIJ » +áµ ķ +Ç Ŀ +âĺ ı +ïº ļ +ï» Ĥ +ðŁļ © +ìĪ Ł +Ë Ĭ +⤠µ +ðŁĴ § +ã ħį +ë© © +Æ ¬ +Î ĩ +âĩ § +âĵ ļ +ìĤ ¯ +ìĪ ¯ +ëĨ ĭ +âľ ¯ +ðŁļ Ģ +Ú ĺ +Ú ¨ +âľ Ń +ê² ħ +íĮ ° +íľ Ļ +ðŁĮ Ĭ +ðŁİ ĵ +ðŁĺ Ļ +Ë ĥ +ðŁĴ ģ +ðŁij İ +âĺ ¹ +ðŁĺ « +ðŁĴ » +ëĤ µ +ìĿ Ĭ +íĮ » +Ò ³ +á½ ² +âŀ ŀ +ëĤ ij +ëĿ Ī +ì£ ¤ +ï» ¯ +ðŁĩ © +ðŁ¥ ³ +âĴ ¼ +ðŁ¦ ĭ +âĺ Ĥ +ðŁĺ ° +ðŁĻ ĥ +ðŁĺ Ĵ +Û İ +Ï ķ +Ḡ¤ +ë£ ½ +ìĬ ¥ +ðĿij ī +É IJ +ðŁį İ +âķ ¯ +âķ ¹ +ຠ² +ï¾ ł +ë¹ ķ +ïº Ĩ +Ê º +Ó § +âĨ ł +ëĥ ĩ +ìİ Ī +ìŁ ¤ +ï± ¢ +âķ ¬ +âĺ ł +ðŁİ Ĭ +ãį į +ãİ İ +âĺ ° +âľ ĥ +ãħ ī +ë¯ Ī +ë¹ ¤ +ìı Ń +ðĿij ¢ +ðŁIJ ¾ +Å ĭ +ðŁij ¶ +âĶ Ľ +ï¿ ¢ +áĥ ¡ +Ä ¼ +Å Ĩ +Ñ IJ +ìĥ Ľ +ìĺ Į +ì± ¤ +íħ ģ +íļ ĥ +ï³ Ĭ +ðĿij Ķ +ðŁĩ « +âĭ ° +ðŁĺ ¨ +âĤ © +Õ ¬ +Ḡį +á» ´ +âĨ ĺ +âĺ ¯ +ãħ ı +ìł ¬ +âĻ Ķ +ðŁĶ Ķ +ðŁĺ ł +ðŁĻ Ĭ +à® ľ +á¹ ħ +âĹ IJ +âĿ Ī +âŀ ½ +ìĥ ħ +ðĿij ł +Æ ¢ +âĭ Ļ +ê° Ľ +ëĿ µ +ë£ Ł +ìı ľ +ïº ģ +ðŁĴ Ń +âĬ ĥ +ðŁIJ ° +ãħ Į +Ü ĵ +âŀ ķ +á½ ģ +ìķ ³ +ðĿij Ŀ +ðŁİ ¬ +É ¡ +à¤ Ĺ +áIJ ī +ì© ľ +ì¶ § +ï³ ī +ï» ħ +ðĿIJ ŀ +ठ¶ +ðŁĵ ¢ +ðŁį ĭ +ðŁĴ ħ +ï¾ ķ +⬠Ĩ +âĪ µ +ðŁ¤ ij +áĥ £ +Æ Ħ +Ñ ¹ +á¼ Ķ +ê° ł +ê´ Į +ê· IJ +ëĽ ´ +ì± ĺ +ï® Ń +ïº ¹ +ïº ¾ +âľ Ĺ +âĿ ¦ +ðŁij ¦ +áĥ Ĺ +Ù ² +á½ ´ +âĪ ı +âľ ® +ê¹ ° +ë² µ +ìĦ Ģ +ì© Ŀ +ïº ŀ +ïº ½ +ðŁĩ Ń +Ë Ĥ +ðŁį ij +ðŁį Į +ðŁĶ » +ê¹ ¬ +ìĬ Ń +ìľ · +ðŁĽ ij +Ç § +ë¼ Ľ +ïº ¡ +ïº º +ðĿij ļ +ðŁĵ ¦ +ðŁĶ İ +ðŁĹ ĵ +áĥ Ķ +âľ Ĵ +âľ ¡ +ðŁĮ µ +âĶ ķ +ëĢ Ŀ +ðŁį Ĭ +âĺ ĥ +ìĺ ħ +ঠ¬ +ðŁ¦ ģ +âİ ¯ +ðŁIJ ķ +Ñ ¿ +ॠ¤ +༠ĭ +ê· Ī +ì« Į +ðŁĩ ° +âĿ ī +ì« Ģ +íĿ Ħ +ðĿIJ ¢ +ðŁļ ¨ +âĻ ¤ +ðŁĺ © +ðŁį į +ðŁĺ ij +ðŁļ ļ +Ö Ħ +ë « +ë« ¼ +ठı +á¿ · +âĮ © +âĺ IJ +âŀ £ +ê¸ ± +ê¼ ¿ +ëĦ Ŀ +ìı ´ +ìļ ¤ +ì¿ ± +íİ IJ +ðŁĴ ¢ +ì´ IJ +âĩ ij +âĶ ĵ +âģ ¾ +Ü Ŀ +ðŁ į° +â´ ° +Æ ı +Ï Ł +Ú º +Û ĥ +áĦ Ĵ +âĪ Ł +âĿ į +ãĦ ² +ìľ ħ +ì¤ ı +ðŁĩ ² +êº Ħ +ðŁİ ¤ +âľ £ +⸠Ŀ +ï¸ µ +ຠ§ +áĢ Ļ +âķ ł +Õ ¯ +âı © +ðĿij £ +ðŁĴ £ +Å ĺ +ॠIJ +âģ ĥ +âĮ ĺ +ê» Į +ìĮ Ķ +ðĿij ĺ +ðŁ¤ ĵ +Õ ¿ +à¤ Ń +âĮ ļ +âľ Ŀ +ðŁIJ ¼ +Ë Į +âķ ļ +ï¦ Ĺ +âĿ ķ +âķ £ +ðŁIJ ± +à® ¤ +Ñ ¾ +ठļ +ठľ +ìĪ Ħ +ìļ ľ +ðŁİ ® +É Ĵ +Ú · +ຠį +âĨ µ +â Īĺ +âĿ Ĭ +ë¿ į +ìIJ Ī +ìļ ĺ +ì¯ § +íĥ ¯ +ìĸ ı +ï¸ ° +ðŁĩ ¯ +ðŁ§ ļ +ðŁĺ µ +ðŁĺ · +ðŁĮ ³ +ຠ¥ +Ä ī +Ä ¥ +âľ ¶ +á¿ ¾ +âĬ ± +âĺ ¾ +ê° ī +ê¼ ° +ëº ij +ðŁĶ Ĭ +ðŁĸ IJ +Å ¤ +Ò « +à® ® +âĮ Ī +âĹ Ĺ +ëĦ µ +ëħ ľ +ëľ ¹ +ðĿij ¥ +ðŁĴ ¿ +ðŁĽ Ĵ +Ê Ĵ +áŀ ĵ +ðŁIJ Ŀ +ðŁ¦ Ħ +ðŁį · +âĺ Ł +ï¸ ¶ +ðŁ¤ Ł +Ô ± +âĨ ² +âĪ İ +âľ « +ëĩ ½ +ëı IJ +ëķ Ħ +ï¦ ³ +ï§ Ŀ +ïº Ļ +ðŁij » +ðŁĵ º +êµ ¼ +ìĮ © +ðŁĮ ² +È ± +íĶ ķ +ðŁĺ ¤ +ãĮ ¢ +Ê Ķ +ठ¡ +á¼ Ī +ëİ ĥ +ë© ± +ë® Ī +ðĿIJ « +âĬ ķ +ëĥ ł +ë» ¬ +íĭ Ķ +Õ ¤ +á¼ ± +âľ ¥ +âĺ Ħ +âĪ ¥ +âļ ķ +ðŁij Ħ +ðŁİ ħ +àº Ļ +âĶ ¬ +á½ µ +Õ ¾ +Ö ģ +âĹ Ķ +ê¿ į +ëĸ µ +ë© İ +ë® ´ +ìķ ´ +áĥ ľ +á¼ ¡ +âĶ Ĭ +âķ ® +âĹ ¼ +ðŁį ¾ +ðŁĽ į +ðŁij Ĺ +ðŁ¤ ŀ +âľ Ħ +Õ Ģ +ঠ² +Ë ī +⣠¨ +Ä ¯ +Ï Ĭ +á´ ľ +ë¹ ³ +ï³ ĭ +ï¿ ł +Ä ª +âĤ ¸ +âľ ± +ê» IJ +ëĭ » +ë§ ¸ +ìŀ ¿ +ì© ¨ +ì ŃIJ +ì° ¿ +íħ Ł +ðĿIJ § +ðĿij ij +ðŁĮ İ +ðŁĵ ® +ðŁķ Ķ +âĹ Ļ +âĹ » +âŀ § +ìŁ Ŀ +âľ ¬ +ãĥ ° +âģ Ī +â ĵĺ +ðŁ ĴĮ +ï¬ ĥ +àº Ķ +ìĶ ° +ðŁĺ ª +× Ģ +ìĥ ¨ +ïŃ ĭ +ðŁį ķ +ðŁĺ ´ +Ï ³ +á¼ Ħ +á½ ħ +âĩ ¢ +âķ Ń +ìĺ » +íĬ ¤ +Ü ĺ +⤠´ +âĹ į +áŀ Ł +ðŁį º +áŀ ļ +ðŁı Ĭ +ðŁIJ · +Ê Į +á½ º +âģ » +ê½ Į +ëĪ Ĺ +ë Ĺı +ì¿ ° +íĢ ¼ +íį ħ +ï· ² +ðŁĮ ı +ðŁį « +ðŁį ³ +ðŁİ ° +ðŁij ° +ðŁĴ ² +á¥ Ļ +ðŁIJ Ł +ï¿ ¡ +ðŁĹ £ +ðŁį ľ +âľ ² +ãİ ¢ +ðŁĶ ° +á¼ ¸ +á½ ij +Ä İ +áĦ Ģ +âĻ ķ +ëł Ŀ +ìĪ ´ +ïŃ Ń +Ó ľ +Ô Ģ +ëĢ ľ +ëĥ Ķ +ìĬ Ľ +ì« ij +ìº ¥ +ìº ¬ +ðĿij ¦ +ðŁĶ ¶ +ì¾ ¨ +ðĿIJ ļ +ðŁį » +ðŁĴ į +ðŁ¤ ¡ +ðŁķ Ĭ +â½ ĩ +âĵ IJ +ðŁį Ń +ðŁį ª +ðŁĶ Ĩ +Ò ¡ +á´ ĩ +É Ĺ +Ü Ķ +âĦ İ +âĿ ĥ +ëĹ Ģ +ï² Ķ +ïº Ī +ðĿIJ » +ðŁĴ Ĭ +ðŁļ « +Ñ ° +Ñ ³ +ठ· +âĹ ł +ðŁij ¤ +ï¾ ĩ +âĺ ĵ +ðŁį µ +ðŁ¤ ¨ +âĸ Ń +à® ´ +Ü ¢ +Ü ¬ +à´ ® +ðŁķ º +Ô ¹ +Õ £ +à´ ¯ +á ´Ģ +âĮ ī +âľ IJ +âŀ ¦ +ê¹ ½ +ëĮ ľ +ðŁı ¥ +ðŁĵ © +Ò ¹ +Ó ĺ +ठħ +âĿ § +Æ Ĺ +âĹ ½ +ðŁij « +ðŁİ § +ðŁij £ +âľ » +ðŁĻ ħ +ðŁĺ ĸ +ðŁĴ ® +ຠ° +ðŁĶ ľ +ðŁį Ħ +ðŁ¤ Ŀ +á ĥĿ +áŀ Ģ +âĩ ¦ +Ê ¾ +Ò ® +Õ ¼ +ठĨ +âĹ ħ +âļ ĵ +âļ ĸ +ê¿ © +ë¯ Ħ +ìIJ IJ +ìŀ ° +ì§ Ń +íĭ ĭ +íİ ¨ +íĻ § +ï² ij +ðŁİ Ĺ +Ù ³ +ðŁij ¸ +ঠ® +ðŁij ķ +Ú µ +âĢ ¾ +âŀ ° +ðŁij ¯ +ðŁİ ¼ +ðŁı ģ +Ä º +Ê ı +Ú ³ +âı ± +ê½ Ī +ëĿ Į +ìĮ ī +ìĹ · +ìŀ ´ +íĹ ¹ +íľ ¨ +ðĿĹ ² +ðŁĮ IJ +ðŁİ Ļ +ðŁı µ +íĽ Ļ +ðĿij ħ +ðŁĺ ¶ +âĵ ħ +âķ ¥ +ðŁį ı +ï¦ İ +Õ © +ðĿIJ Ħ +Ó £ +Ú ¿ +âĻ ļ +ðŁĶ Ĺ +Ḡ« +âĭ ® +âĸ ¦ +⼠½ +âľ µ +ãħ Ĩ +ãħ Ĭ +ëĦ Ļ +ëĿ ¨ +ë¥ Ħ +ìĦ ¦ +ì§ ° +ì§ ¹ +íī Ī +ï§ ij +ï» ĩ +ðŁĮ ¾ +ðŁı ĸ +ðŁIJ ij +ðŁĴ ³ +ðŁĵ Ĩ +Û ĩ +Ü ķ +á½ ½ +ëĦ ľ +à´ ² +à´ ³ +àº Ń +áĥ Ľ +âĿ Ķ +âij ħ +áĥ ¥ +ðŁĵ ħ +âŀ ³ +á´ µ +ï¹ ¡ +ï¹ ¶ +Î Ĩ +ठ¥ +áī µ +âĿ Ļ +âĿ ± +ëī ł +ëİ ł +ëı Ľ +ë¿ ħ +ìĶ ¸ +íij ¯ +íŀ ī +íŀ Ľ +ï§ Ħ +ïŃ ĺ +ïº ¦ +ï» ¸ +ðĿij Ĥ +ðĿij ı +Ï ij +Ú ł +áĢ Ķ +áŀ Ķ +á¹ ¢ +ëĦ ¸ +ðĿIJ ¨ +ðŁĩ ´ +Õ ° +ðŁij ł +ðŁį Ĩ +ðŁı Ģ +ðŁ ijIJ +ðŁį ĩ +ðŁIJ £ +áĪ Ń +Ü ª +ðŁ ĮĢ +áŀ ĺ +âĩ Ħ +ðĿIJ Ģ +Ê Ļ +âĶ ¼ +ðŁı ¿ +Æ · +È ł +Ñ ½ +âĤ ¨ +ê´ Ń +ê¹ » +ëĶ ¨ +ìĪ Ģ +ì¾ ° +íĨ Ī +ï® § +ï¯ ½ +ðŁĶ ħ +ðŁĶ ® +Å ¢ +Ê ° +Ñ ¸ +ठ£ +âĬ Ĺ +ëª Ħ +ï¹ · +ïº ħ +ðĿIJ µ +ðŁĮ ¶ +ðŁĵ ° +ðŁĶ · +ðŁĸ Ĵ +ðŁ¤ ² +ëī © +ðŁİ Ĩ +ðŁ§ IJ +ðŁį ® +âĨ º +âĿ ¢ +ðŁij ª +ðŁij ± +âĨ ¡ +áŀ ı +Ú ķ +ðŁį ¹ +ðŁĴ Ģ +Ë ® +Ó ¨ +Ö ħ +ठĩ +âĤ ¡ +âĪ ķ +âĺ ī +ê¹ ¼ +ê¼ IJ +ì½ ¸ +ðĿIJ ¬ +ðŁı ħ +ðŁij Ļ +ðŁĴ ī +ðŁ¤ Ļ +È ĺ +É ³ +É ¹ +Ù º +áĢ Ħ +á¿ ³ +âļ ĺ +âĿ Ĩ +ëĨ ī +ìĸ į +ìĺ ĩ +ì¥ ĺ +íĸ ħ +íĻ ij +ï® Ĭ +ï¿ Ń +ðĿĴ IJ +ðĿĹ ¢ +ðŁĶ ĸ +ðŁĶ ¨ +ðŁļ ij +ðŁļ ² +Æ ¸ +âĹ ¥ +ðĿIJ Ń +ðŁį ½ +âĹ ij +âĵ ĩ +ðŁĶ ± +âľ ¼ +ï¹ ĥ +âķ ± +ãĢ Ĺ +ðŁı ĭ +ðŁļ ´ +ðĿIJ ® +Ä ļ +Õ ı +Ä ¶ +áĥ ij +á¹ ¬ +Ä Ī +Ä Ĵ +Ò ° +Ó ķ +â IJ +âIJ £ +âĹ ¢ +âļ Ļ +ãħ Ĺ +ê° ¬ +ê³ ª +ê» Ģ +ëĦ ´ +ëİ ģ +ëĿ Ķ +ë¬ ½ +ëŃ į +ìĩ ³ +ì° ¹ +íĮ ¹ +íŀ Ŀ +ï® ĭ +ï ¶Ī +ðĿĴ Ĥ +ðŁ¥ Ģ +ðŁ¦ ħ +Ê ĺ +á¼ ij +âģ İ +ðŁį ŀ +âĨ ĸ +âĨ Ļ +ðŁİ ĥ +âĦ ¡ +âĭ ± +ðŁĶ į +ಠ¨ +áµ ĥ +âĶ « +⦠¿ +ðŁĩ » +Æ ¤ +Ò ı +Ò · +Û ī +à® ķ +Ḡ³ +ï¬ ± +ðŁĨ Ķ +Ú Ń +Û ¦ +áħ ¡ +âĦ ¹ +ê¿ İ +ëķ Ķ +ë¼ ī +ìļ § +ì² µ +ì´ ¨ +íĬ Ī +íĸ IJ +ðĿĹ ĺ +ðŁĩ ¿ +ðŁİ ĸ +ðŁij ħ +ðŁ ĵĺ +ðŁļ Ļ +ðŁĽ µ +à¶ ½ +⼠µ +ðĿIJ ³ +ðĿIJ ¸ +âļ Ķ +ðŁij Ń +Ó ij +âĶ ¯ +ðŁħ ¿ +ðŁĺ ¹ +ï¿ « +â¼ ¤ +ðŁĴ ĩ +ðŁĵ İ +ðŁĸ ĭ +ঠ¸ +ðĿIJ į +Ä ² +Ï ĭ +Ñ ¬ +Ú ¬ +Ü Ĵ +á´ ¬ +ï¨ Ħ +É £ +Ë ij +Ï µ +Ò Ŀ +Û ¥ +Ü ł +๠Ľ +áĥ ķ +áĬ ķ +á¾ ¶ +âĤ · +âĩ ¾ +âķ © +âĸ IJ +âĺ ª +âĺ ® +âĿ ļ +âĿ Ń +âŀ ± +âµ İ +ãı Ĭ +ë© ĵ +ìĹ ¾ +ìª Ħ +íĵ Į +íķ ¼ +ïŃ ¬ +ðĿij Ĩ +ðĿij ŀ +ðĿĸ Ĭ +ðŁİ ¸ +ðŁı Ħ +ðŁij µ +ðŁĴ ł +ðŁĶ ĺ +ðŁ¥ Ĥ +Å ª +à· ĥ +á´ ¼ +âĬ ° +ë³ ı +ë´ £ +ï¥ ľ +ðŁĵ Ī +ðŁķ ¯ +ðŁ§ Ģ +âĻ IJ +ðŁĨ Ĺ +ðŁĵ ķ +ðŁ§ ģ +Ü « +âĿ IJ +Õ ķ +འķ +âŀ Ŀ +ঠķ +ðĿIJ ¶ +É ¢ +Î Ħ +áĨ ¢ +âĤ ± +Õ į +à¡ ķ +á´ ° +Ḡ© +⼠· +âĿ ® +ê¡ ĵ +ëı ¤ +ëĹ IJ +ëµ Į +ìij Ī +íı ¿ +íĹ µ +ðĿIJ İ +ðŁĨ ĺ +ðŁı Ł +É ¥ +Õ » +à¡ Ķ +ठĸ +á´ ¸ +âİ Ļ +âİ ¥ +âı ³ +ëģ ķ +ëĬ ī +ì¡ į +ì¹ ¡ +ï¦ ¶ +ï¬ Ł +ï® « +ï® ¯ +ï± ĥ +ï ·» +ïº µ +ðĿĹ Ķ +ðĿĹ ¡ +ðŁİ ¨ +ðŁĶ Ĵ +Ú Ľ +ठ§ +âŀ ¹ +áĢ Ģ +ðŁį ħ +âĹ ¤ +ठł +ðŁIJ ¥ +áĥ Ĵ +ðŁı Ŀ +ðŁį ¼ +ãĮ § +âĿ Ľ +ðŁIJ Ī +ঠ¯ +áĢ ŀ +ãĢ ĸ +áŀ Ļ +ঠª +Õ Ĩ +âĬ Ĩ +âľ ¾ +ðŁIJ Ĺ +ï¹ ¿ +Ä ¦ +Ü Ł +ಠł +ಠ¥ +áŀ ī +á´ ¥ +á´ © +á½ Ģ +á½ ¡ +âĨ ķ +âŀ ¯ +ê¡ ij +ëij £ +ë± Į +ìĪ ij +ìľ Ķ +ìŀ ½ +ì¨ į +ðĿij Ģ +ðŁĮ Į +ðŁį ¦ +ðŁį © +ðŁIJ ļ +ðŁĵ Ĵ +ðŁĵ ¹ +ðŁ¥ ij +Ä ĭ +Ë Ĺ +Ñ « +Õ ¢ +Ú ° +â ĮĢ +âĹ Ĥ +âĹ £ +âľ Ľ +âĿ Ĵ +âĿ ĺ +âŀ Ļ +âŀ ² +ãİ į +ê¡ IJ +ëŀ ĸ +ìĬ Ŀ +ìĽ ¤ +ì¡ ĭ +ì¨ ° +íĹ Ļ +ï¥ ¸ +ï³ į +ï» İ +ðĿij ĵ +ðŁĵ Ĭ +ðŁļ ¼ +ï¦ ģ +ðĿķ Ĵ +ðŁ ijľ +ðŁij ¿ +ðŁĩ ½ +à· Ħ +âĸ ´ +ãį ī +âĬ ĩ +ðŁ§ ¸ +Ú ¡ +â¾ ĥ +ðŁĹ » +âĵ ij +ðŁ¤ ¸ +ðŁ¤ ¯ +êĴ ° +ðĿIJ ĵ +âĶ ´ +êĴ ± +áĢ ĺ +â ĽĦ +ï¹ ¹ +Ó Ķ +áĥ ± +Ü ¡ +ß ŀ +âĻ ı +âľ ¸ +ìij ¨ +ðĿIJ Ŀ +ðĿIJ ¥ +ðŁį ī +ðŁij ¼ +ðŁ¥ Ŀ +Æ Ķ +Ý ¬ +ठ« +ຠļ +á´ ´ +á½ ĸ +âĤ ¶ +âİ ¢ +âĿ ħ +⣠« +ãİ Ľ +ë® ¨ +ëº Į +ë¼ ĺ +ìĨ Ŀ +ìľ ³ +ìŀ Į +ì£ Ĺ +ìª ĺ +ì» ¹ +ï· ¼ +ïº Ĥ +ðĿIJ ´ +ðĿIJ ¼ +ðŁĮ ļ +ðŁı « +ðŁĴ ¤ +ðŁĴ ¶ +ðŁĴ ¼ +Ê ķ +Ê ½ +â² Ł +ãī ł +ê¡ Ĵ +ëľ Ģ +ìĥ ¾ +ì¸ ¤ +ï¥ ģ +ðĿļ Ĭ +ðŁļ ĥ +âŀ Ľ +ìħ ´ +áĦ ĭ +âĩ Ĺ +ï§ · +âĺ ĸ +ðŁIJ ¦ +⸠ľ +ðŁĴ ´ +ðŁ¤ ļ +ãĬ Ĺ +âĮ Ľ +áĪ Ľ +༠º +â½ ī +ðŁı ¢ +âĵ ŀ +âĺ ½ +ãĢ Ļ +ðŁ¤ ® +Å IJ +áĥ ¬ +ðĿĹ » +ðŁį ĸ +Æ Ĭ +Ê Ł +ß ĭ +ठĭ +áµ Ķ +á¿ ĥ +âĦ ī +âĮ ĭ +âı ² +âĵ Ī +âĵ ¢ +âķ Ķ +âļ ij +âĿ ĭ +âĿ İ +â µľ +âµ £ +ëĴ Ī +ëľ ģ +ë¶ ĩ +ìį » +ìĺ Ń +ì§ ¢ +íĹ Ģ +ï§ Ĭ +ï ¬¸ +ï± ¡ +ðĿIJ º +ðĿij § +ðĿĺ ¦ +ðŁĵ ¥ +ðŁĺ Ł +ðŁ¥ IJ +Ä ĸ +É ¨ +áĢ IJ +áĥ ĵ +Ạĵ +á¼ ¶ +á½ Ħ +âĤ ¤ +âĮ ľ +âĮ Ł +âİ ł +⼠¸ +âµ į +âµ ı +âµ ĵ +ãĢ ĺ +ë ·¸ +íħ ¼ +ï¦ Į +ïŃ Ħ +ïŃ İ +ðĿĻ ļ +ðĿļ ĺ +༠ĵ +ëŃ ħ +áIJ Ľ +ãİ ¾ +ï¨ Ģ +ðŁĹ ½ +âĻ ŀ +Ë ĸ +âĹ ŀ +ðŁ¤ « +ðŁĺ Ĺ +ï½ ¦ +ðŁ¤ ¢ +âģ ĩ +ãĢ µ +ðŁį Ķ +áĬ ł +ðŁĺ ¼ +ðĿĹ ® +ðŁIJ ³ +ðĿIJ ĭ +ðŁĨ ļ +ðŁĶ Ľ +Ñ » +Ü ¨ +à® ² +âľ ŀ +âµ Ļ +êµ £ +ì¸ ¨ +ðĿ IJľ +ðĿĺ ° +ðŁĶ ½ +Ç » +Ç ¿ +Ê ĩ +Î IJ +Ð Ģ +Ñ ¡ +Ñ ² +Ò Ĵ +Ù ¶ +ß ķ +à¶ ± +áIJ ģ +âģ ŀ +âĸ § +âĽ Ī +âľ ľ +âľ ¹ +⣠¹ +⤠ĩ +ê² Ĭ +ê¾ ľ +ë¯ IJ +ë³ IJ +ìħ © +ìIJ ¬ +ìij ¹ +ï¤ Ķ +ï¦ ļ +ï¬ ł +ïŃ Ķ +ïº ¶ +ðĿĴ ı +ðĿĸ Ĩ +ðĿĹ ¶ +ðŁı Ĥ +ðŁIJ ½ +ðŁĴ © +ðŁĵ ½ +ðŁĹ ¨ +ðŁĹ º +ðŁĺ ¸ +ðŁ¥ § +Å Ĺ +Ê İ +Ò Ļ +× ² +à¤ Ī +á¼ ´ +á¿ ij +âµ ī +ãħ ĵ +ì½ ´ +ðĿĸ ĵ +ðŁĵ Ĺ +ðŁĶ ª +ðŁĸ į +Ï Ĵ +ðŁij ¬ +áĥ Ļ +âĨ ¬ +âĶ ¤ +⼠¹ +âĻ Ł +ðŁļ ¶ +ðŁij ¾ +âĪ ĭ +ðŁIJ ¯ +à¼ İ +âľ · +ï¨ Ļ +âĶ » +ðŁij ¹ +áĦ ī +ຠª +â¾ ı +â½ ħ +ãİ ĸ +Ñ ´ +Õ ® +Ú ¼ +áĢ ķ +áĨ ¼ +ëŃ ı +ðŁIJ ¸ +ðŁļ £ +Æ Ŀ +Ô » +áĥ ¢ +ðŁį ¯ +É ¦ +Õ ¦ +âĻ ĭ +ï¬ « +ðĿĹ ¦ +Ç ļ +É ± +ठī +á´ Ħ +âĻ ĵ +⼠° +⣠ª +ëĥ ĺ +ë¢ ¸ +ìĤ ij +ï® Ķ +ðĿķ ĸ +ðĿĹ § +ðŁĩ ¼ +ðŁĵ ĭ +ðŁļ ľ +ðŁ¥ ¤ +Ä ® +Å · +ß Ĭ +ॠ¥ +à® ª +áŀ Ħ +áµ Ģ +Ḡħ +á¼ ¢ +âĪ Ŀ +âĬ ¹ +âĴ ¶ +âķ ´ +⼠± +⼠³ +⼠º +âŀ Ł +ãı Ħ +ê¸ Ķ +ê¹ Ł +ëĩ ° +ë¹ » +ìĤ ¥ +ìĽ » +ì° Ł +íĥ ° +íĨ º +íļ ½ +ï¤ ´ +ï¥ ¾ +ï³ Ŀ +ðĿIJ ¦ +ðĿĴ ľ +ðĿĴ Ł +ðĿļ Ĺ +ðŁİ Ń +ðŁı ĵ +ðŁı ³ +ðŁı º +ðŁIJ į +ðŁij ĥ +ðŁĴ ı +ðŁ¤ ĸ +ðŁ¤ µ +Õ ² +âµ Ķ +ëĺ ¬ +ï¦ £ +Ê Ĥ +áĨ « +áŀ ij +ðĿĸ İ +ðĿĹ ĸ +áĦ ĥ +âĩ ł +áĢ ¡ +འĦ +âŀ ¸ +ï¦ Ļ +âĩ ļ +ðŁIJ ¬ +ðŁIJ ¢ +â¾ Ĵ +ðŁIJ ¤ +ðŁĶ « +ãĢ ŀ +ï¸ º +ðŁĺ º +â½ ´ +ðŁĨ ķ +âģ ¿ +ðŁį ¨ +ಠķ +ðŁļ ĺ +áŀ ħ +ঠħ +áŀ ¢ +ਠľ +â ļĮ +ãĢ ½ +à· ´ +âĵ Ľ +áĢ ľ +ìĨ ¨ +Ë © +Ü Ĺ +âĭ ¼ +ðŁĻ ī +Å Ĭ +É ĵ +Ê ² +Î ° +Ñ ¼ +Ô ¿ +à¡ IJ +༠ľ +འ¦ +á¶ ľ +âĤ ² +âĨ ¨ +âĬ ¥ +âķ § +âĻ ľ +ãĭ ¡ +ë´ ¬ +ë¶ ij +ìī ¿ +ìİ ħ +ìł ± +ì° § +ï² ¡ +ðĿĴ Ľ +ðĿķ £ +ðĿĹ ľ +ðŁį ² +ðŁİ © +ðŁIJ IJ +ðŁIJ ł +ðŁij ½ +ðŁĴ ij +ðŁĵ ľ +ðŁķ µ +ðŁ ļĮ +ðŁĽ £ +Ê ĭ +Ó ¯ +Ù ¸ +ß Ķ +ß Ļ +à¡ ĵ +á´ į +Ḡ¿ +âı º +âĸ ¥ +ë¤ ½ +íľ ij +ðĿIJ ¹ +ðĿĸ Ķ +ðĿļ İ +ðŁĵ Ħ +ðŁ¦ · +Æ ĥ +à¦ Ł +âĮ Ĥ +âĺ Ń +â² ļ +ëĿ ķ +ðŁİ £ +à® ĩ +འĨ +áħ µ +áĹ ľ +âĢ ½ +âĮ £ +âģ ½ +ðŁĵ ¬ +ðŁ¤ § +âĩ ª +â½ £ +âĹ Ł +ï¨ Ĺ +êĴ ª +ðŁĽ Ģ +Ç Ĥ +ðŁ¥ ¶ +ðŁİ į +ï¿ © +ðŁij Ĵ +áµ Ī +ï¸ ¿ +áħ © +â¾ ¦ +à° ¤ +á´ ĸ +ਠ¬ +àº Ĺ +༠» +Ñ º +ਠª +á´ ³ +ðĿIJ Ī +à» Ģ +á´ ¿ +âĤ į +âĩ ¡ +⼠ª +ðĿIJ Ĥ +ðĿĴ ķ +ðŁ IJľ +Ê į +Ñ ± +འĥ +ë® IJ +ìĽ ¡ +ìľ ģ +ðĿIJ ¿ +ðĿķ ł +ðŁij Ľ +Æ ª +Ï º +Ó ¬ +Ù ¿ +Ý £ +ઠī +à® ¹ +འij +áĨ ¯ +áµ ĩ +âĩ ¥ +âı ª +âĻ ° +âļ Ń +âļ ¾ +ãħ Ħ +êĢ ° +ê° Ĺ +ê² ĭ +ê² » +ê¶ ľ +ê¼ ĩ +ê½ ¹ +ëĤ Ł +ëħ Ī +ëĭ ¢ +ë§ Ł +ëª Ĩ +ëµ Ģ +ì½ ± +íĩ ĺ +íľ ľ +ï§ ¾ +ï± µ +ï² ¢ +ï² ¤ +ðĿĴ Ĭ +ðĿĺ ¯ +ðŁį Ĺ +ðŁı į +ðŁIJ ĺ +ðŁĵ ¡ +ðŁĶ ŀ +ðŁ¤ ³ +ðŁ¥ ģ +ðŁ¥ Ĺ +ðŁ¦ Ĭ +Ä µ +Æ ¦ +Ç µ +É ¯ +Î ı +Õ Ħ +Ü ¥ +འģ +ᨠł +âķ « +ãİ ī +ë· ´ +ìĨ İ +ìİ Į +ì£ µ +íĽ ł +ï§ ª +ï³ ı +ï» º +ðĿij ģ +ðĿij ĩ +ðĿĴ Ĩ +ðŁİ ł +ðŁIJ Ķ +ðŁij Ł +Å ĸ +ठĮ +á¾ ½ +ê¦ Ĵ +à® Ł +á´ ± +ðŁı ° +ðŁIJ ŀ +à½ Ģ +áĢ ħ +âĬ ¿ +ðŁIJ § +ἠģ +â¼ Ī +âĶ ¿ +ðŁ¥ ´ +â¼ ¿ +ðŁ§ ľ +ãħ ¿ +âĦ « +ãĢ ³ +ãĬ Ļ +â¼ Ģ +ï ¦¬ +ðŁı ¬ +ðŁĵ » +áĬ Ľ +áĦ ħ +ຠĬ +ຠĽ +áħ ³ +ðŁij ® +à® ± +âĺ ĩ +ðĿIJ ı +à´ µ +à» ģ +འı +འ¢ +ᥠ± +âĤ £ +ï¥ ¦ +ïŃ Ļ +ï´ © +ï¹ Ĥ +ðŁį £ +ðŁķ ¹ +Ï ĸ +à¶ ¸ +ຠ¢ +áĭ Ń +âİ Ŀ +âĹ Ŀ +âĻ Ī +âĻ İ +ê½ ¥ +ì³ Ķ +ì¼ ij +ï± ° +ðĿij ĥ +ðŁĮ ª +ðŁį ¡ +Å İ +Ê ¦ +Ñ § +Ó İ +Ô ´ +Ú Ī +ß ĵ +ß § +à¤ Ķ +áĪ « +áĪ µ +áĹ © +á´ ł +á¼ ł +âĢ Ĺ +âģ ij +âĦ ı +âĸ ĩ +â² £ +ãĦ ³ +ãī ® +ê³ Ĺ +ëĦ Ĵ +ëĸ « +ë¡ Ħ +ë¹ ° +ë½ ģ +ìĦ ģ +ìĮ ĺ +ìŁ Į +ì³ ī +ì¼ ķ +ï¬ » +ï³ İ +ï¹ ¸ +ï¹ ¾ +ðĿIJ Ĩ +ðĿij · +ðĿĽ ¼ +ðŁİ ı +ðŁİ ŀ +ðŁIJ Ļ +ðŁij Ĥ +ðŁĵ ģ +ðŁĸ ± +ðŁļ į +ðŁļ § +ðŁĽ ¡ +ðŁ¤ Ĵ +ðŁ¥ ŀ +ðŁ¥ © +ðŁ¦ Ģ +ðŁ¦ ĸ +Ë ¢ +Ü ļ +à® µ +áĢ ģ +áī ° +âı Ń +âĻ ¿ +ê³ ĺ +ëı Ŀ +ëķ ĥ +ìħ Į +ìĴ ¸ +ìĽ Ł +íħ Ħ +íľ « +ï§ ĺ +ï¿ ¬ +ðŁı · +ðŁĶ § +ðŁ¥ Ī +Æ ĸ +áŀ ĩ +áŀ ĸ +âģ º +âĹ ľ +âŀ © +ê¦ Ń +ëĻ ¤ +ïŃ ¼ +ðĿĻ ĸ +ðĿĻ £ +ðĿĻ ¤ +ðŁĮ Ŀ +ðŁĶ ij +ðŁĽ ł +ຠĩ +âĺ £ +ãĦ ¨ +ðĿĸ Ĺ +Ó ĵ +âĨ £ +ðŁ¥ ī +ðŁĮ ł +ðŁĺ ½ +ãİ ł +Å § +ðŁIJ Ĵ +ï§ IJ +ðŁĺ ¿ +âĪ ¬ +ðŁIJ ® +⣠± +ಠ¡ +â¾ ¼ +à° ² +Ë ¶ +âĸ ¿ +Õ Ī +áŀ İ +áħ ¥ +áŀ Ĺ +Õ § +ðŁ¤ IJ +ðŁį ł +ঠ¤ +à¶ º +âĻ į +ìĺ Ļ +íĺ ĵ +ï¹ º +ðŁĽ ³ +Å ī +á´ İ +âı ľ +âĶ ³ +ê¸ · +ì¡ Ķ +ðĿĴ Ī +ðĿĴ į +ðĿĴ ¹ +ðĿĵ ĩ +ðĿķ Ł +ðĿĹ ¹ +ðŁĮ ħ +ðŁı ´ +Ä Ķ +Ä ¤ +Å µ +Ç ¾ +Ï ŀ +Ï ¶ +Ô ³ +Ü Ĩ +ß © +à¡ Ĵ +ठĺ +à¶ ļ +འĸ +áģ Ĭ +áĥ ŀ +áĦ Ĥ +áĭ « +á´ º +Ḡ£ +Ḡª +á¹ Ĥ +á¼ · +á¿ ĩ +âĩ Į +âı ¬ +âĻ Į +â® Ł +â´ » +âµ Ł +ê¦ ķ +ê¦ ª +ê¦ ® +ê² Ħ +ê¾ IJ +ëĥ ij +ëķ ĭ +ë¡ ¸ +ë¬ Ģ +ìĩ ¤ +ìĪ © +ìľ ķ +ìŃ ĺ +ì· ° +ì ·¸ +íľ Ģ +ï¤ £ +ï§ į +ï± Ħ +ï³ ij +ðĿIJ ¤ +ðĿĴ ĵ +ðĿĴ ¶ +ðĿĹ ¼ +ðĿĻ Ĭ +ðŁĩ ¾ +ðŁĮ Ľ +ðŁĮ ® +ðŁİ ĩ +ðŁİ ² +ðŁı Ľ +ðŁij ¥ +ðŁij ´ +ðŁĴ Ĩ +ðŁĵ Ĥ +ðŁĵ § +ðŁķ IJ +ðŁĸ ķ +ðŁĺ § +ðŁĻ Ģ +ðŁļ Ĵ +ðŁĽ « +ðŁ¤ ł +ðŁ¥ ļ +ðŁ¥ Ľ +ðŁ¥ £ +Ç ¯ +È § +Î Ĭ +Ò ² +× ° +Û ij +áĥ © +áĦ Į +áĪ į +áī ¥ +áı Ĥ +âģ ± +âĬ ¢ +âĹ ĵ +âĿ ° +ë¿ ¡ +ìĽ © +íģ Ń +íĨ ³ +íĬ Ħ +íĵ ¸ +ï¥ £ +ï¥ ´ +ï± IJ +ï± ¯ +ï³ ļ +ðĿĸ ĺ +ðĿĺ Ģ +ðŁIJ Ĭ +ðŁIJ Į +ðŁij ļ +ðŁĵ ĥ +ðŁļ Ľ +ðŁļ ª +ðŁ¤ ° +Ä ´ +áĥ ® +áĹ ¨ +âĻ ® +â² ŀ +ãĪ Ķ +ì ħį +ãħ ĥ +ï¥ ¡ +ຠ¡ +Õ İ +Õ º +⬠Ľ +â½ ¤ +ðĿIJ ² +âŀ µ +áĢ Ľ +âĶ ħ +âĨ Ł +â¼ Ĭ +ðŁĮ ½ +ðŁļ ¿ +ï¦ Ĭ +ãĦ £ +⼠© +ï© Ľ +ðŁį ± +â¾ ¨ +à´ ¤ +áŀ ģ +ຠŀ +Ê ļ +ðĿIJ Ĵ +à´ ± +áŀ ľ +à® © +à° Ĺ +à´ ļ +âĩ £ +ï¦ ķ +Õ ħ +Æ ĺ +âĤ ¦ +âĶ Ħ +ï¦ Ł +ï¦ « +ðĿIJ ģ +ðĿIJ ĥ +ðŁį ¸ +ðŁIJ ² +Å ¶ +É ĸ +ß ĺ +ภ¦ +à½ Ķ +áĨ · +âģ ķ +âĵ Ĥ +âĿ ľ +ï¥ ¥ +ï¬ ® +ðĿĹ Ŀ +ðĿĹ ¿ +ðŁİ ¾ +ðŁĹ Ŀ +ðŁ¦ Į +Æ ħ +Ç ª +Ò Ĺ +Ü Ľ +ß ł +à¡ ij +áī £ +áĬ Ń +á¹ ¡ +âŀ ¼ +âŀ ¾ +â´ ± +ãī ¡ +ê³ ¯ +ë½ Ī +ìĤ ĺ +ìī ij +ì «ĺ +íĮ ĥ +íĻ ° +ï¤ Ĺ +ðŁĮ ¬ +ðŁĮ ° +ðŁį ¤ +Ä » +Å ĩ +Æ ¨ +É ķ +Ò ¢ +Ò º +Ö į +× ± +Ú ± +Ú ½ +Û IJ +ठĽ +à· Ģ +๠ļ +ຠ« +á´ ¹ +á ½Ķ +á¾ ³ +âĤ Ĵ +âĨ ´ +âĩ Ŀ +âī ħ +â Į¨ +âĵ ĵ +âĸ ¢ +âļ ¬ +âŀ Ń +â² Ĵ +ãİ ¿ +ê¿ ´ +ëĪ ± +ëį ¬ +ëİ IJ +ëIJ « +ëĶ « +ë± ģ +ìĥ ¥ +íĮ ¼ +ïŃ ĵ +ï® ¥ +ï² ° +ðĿIJ ĩ +ðĿIJ ij +ðĿij Į +ðĿĵ ª +ðĿķ ļ +ðĿĺ ª +ðĿĺ ¼ +ðĿļ Ľ +ðŁĩ ¶ +ðŁĮ Ħ +ðŁĮ ķ +ðŁĮ ¤ +ðŁĮ § +ðŁį ¬ +ðŁİ ĭ +ðŁİ » +ðŁı ¨ +ðŁIJ ĩ +ðŁij ĵ +ðŁĵ IJ +ðŁĵ Ļ +ðŁĶ ¼ +ðŁķ Ĵ +ðŁĸ ı +ðŁĸ ¥ +ðŁ¤ ¬ +ðŁ¥ Ĭ +ðŁ¥ Ĵ +ß Į +ຠĦ +á¼ µ +âķ ¡ +â² ¤ +â´ ¼ +âµ ¢ +ãĪ ¯ +ëĵ ¸ +ëŁ ĩ +ëº į +ðĿĻ § +ðŁį Ī +ðŁĶ ¬ +ðŁĸ Ĭ +ðŁ¤ ¾ +Ë ¡ +Ü © +âĮ ¡ +âŃ ij +â² ¦ +ë© ī +ì¼ Ń +ï¿ ¤ +ðĿĴ İ +ðĿĹ ¥ +ðŁIJ µ +ðŁķ ¶ +ðŁķ ¸ +ðŁ¤ ľ +Õ ª +áĪ ĭ +ðŁ¥ µ +ï° ģ +áµ IJ +âķ ĵ +áĢ ĸ +âĭ Ī +É ŀ +âŀ ® +ॠ° +ãĨ ģ +ðŁĴ ± +ðŁı Ń +áĨ ¨ +ðŁį ļ +ðŁ¦ IJ +á´ » +âĺ Į +à´ ķ +Õ ± +áħ ® +ðĿIJ Į +Å ¦ +ຠķ +âľ Ļ +Ë ³ +Ô µ +âķ Ĵ +ðĿĹ Ĺ +ðĿĹ ł +Ú ļ +ঠ§ +âĨ Ŀ +âĻ ī +ãĮ » +ì¹ Ĭ +ðĿĹ º +ðŁ§ ĺ +ì³ £ +ï¬ Ŀ +ðŁij º +Ç Ł +Î Ī +Î « +Ñ ¥ +Ô ² +Õ ¨ +Ü ¦ +ঠĨ +ঠ¥ +áIJ ¢ +á¼ ģ +á¼ ĺ +á¼ ¦ +âĵ Ŀ +ãĪ ° +ãİ Ĺ +ê² ¡ +ë¨ Ģ +ì£ Ķ +ì´ ¤ +ìµ Ŀ +ï§ ´ +ïŃ Ĭ +ï² Ł +ðĿIJ · +ðĿij ĭ +ðĿĵ ī +ðĿĺ µ +ðŁĴ · +ðŁĽ © +ðŁ§ ¹ +Å Ķ +Ê ŀ +Ë ¥ +Î Į +Ñ © +Ó IJ +Ó ł +Ú ij +Ú Ĵ +ß ¨ +àª Ī +áIJ ĥ +á¹ ¯ +âĤ ĭ +âĤ µ +âĦ ħ +âĦ ł +âĪ £ +âī º +âī » +âĬ Ľ +âĮ IJ +âİ ĵ +âĺ ¸ +âĻ Ĵ +âļ Ĵ +âľ ĩ +âľ ł +â´ · +âµ ĸ +ãĦ ¸ +ãī ¢ +ãī ° +êĩ ´ +ê´ ¸ +êº ł +ëĤ ı +ëĤ ¢ +ëIJ Ģ +ëº ´ +ìĥ ľ +ìį ħ +ì¤ « +ì± ¦ +ìº ij +ì¼ ģ +ì¿ ³ +íĤ ģ +íħ ¡ +íĴ Ĥ +íĴ ī +íľ Ħ +ïŃ ª +ï® ¬ +ï¯ ¦ +ï± ª +ï² ı +ï ´Ģ +ï» Ĩ +ï¿ ¦ +ðĿij Ĺ +ðĿĸ Ļ +ðŁĮ ¡ +ðŁį Ŀ +ðŁį § +ðŁİ « +ðŁı ĺ +ðŁı ª +ðŁIJ ĭ +ðŁIJ Ľ +ðŁIJ º +ðŁij ĸ +ðŁij ŀ +ðŁij · +ðŁĵ Ģ +ðŁ ĶĦ +ðŁĶ Į +ðŁķ Ļ +ðŁĻ į +ðŁĻ İ +ðŁ¦ į +Ç ° +É Ł +Ê Ĩ +Ô ¼ +Ú ľ +ঠ¡ +ঠ¶ +áĴ ĥ +á¼ © +âĵ ķ +â² Ī +ê° ° +ê¹ ł +êº ħ +ëĦ ¹ +ë¯ ĵ +íIJ Ī +ï§ ¶ +ï® ij +ï² ¨ +ðĿĴ ī +ðĿĴ Ķ +ðĿĹ ¨ +ðĿĻ ŀ +ðĿļ Ĵ +ðĿļ ķ +ðŁIJ İ +ðŁ¤ ķ +ðŁ§ Ķ +Ï ° +Ô Ŀ +âĮ Ĭ +âĴ ¾ +ãī £ +ïŃ © +ðĿļ ŀ +Ê ij +ঠ¦ +áĦ ĩ +âī ĥ +â² Ģ +ìŁ İ +ðĿij ¶ +ðĿĵ ² +ðŁ İ· +ðŁļ ¹ +ຠģ +áł ł +ãĦ ļ +ðŁIJ ¿ +ἠļ +âķ ³ +ðŁIJ Ń +âĴ ¹ +ðĿĸ ļ +âĻ ĸ +ãĪ ² +âĨ ¾ +áĦ Ĩ +âķ Ľ +ðŁ¤ į +â½ ¥ +ðŁ Į¨ +âĪ ® +ãĮ ĺ +ãį ij +ï¹ Ģ +âĵ Ĺ +âĬ Ħ +ðŁı ¹ +Ë Ĵ +ðŁ¤ ± +ãı ľ +ðŁİ Į +ï¥ Ń +ঠ£ +ðŁİ ¹ +ãĬ Ł +à´ ° +ðĿIJ Ķ +à´ ¨ +འļ +âľ º +Õ · +ðŁij ³ +ঠľ +âĺ ĭ +âĻ Ĭ +ãĢ Ľ +È ĭ +à® ° +áĥ ¨ +âĦ ķ +íij Ģ +ðĿĵ ĥ +ðŁ¦ Ķ +Ä ¿ +Å Ģ +Æ ³ +É ļ +Ö ĥ +Ü £ +ß Ł +à¦ Ń +à§ ¡ +à¶ » +ຠ£ +འĩ +Ḡ¨ +á½ Ī +â½ ¬ +ê¡ Ķ +ì³ Ħ +ï¨ ī +ðĿIJ ¡ +ðĿĺ ¢ +ðŁį ¿ +ðŁİ Ł +ðŁı ī +ðŁĶ IJ +ðŁļ ħ +ðŁ¤ ½ +Æ į +Ç « +Ç ½ +È ļ +Î ī +Ó ¤ +Ó ª +Õ Ĭ +Ù ¼ +Ú ´ +ß Ŀ +à¶ ľ +á¼ ķ +á¿ ¥ +âİ ŀ +ãĢ ļ +ãī ¤ +ê³ ¸ +ê· ģ +ëĵ Ħ +ëĵ ķ +ì¨ Ķ +ì± ¨ +ðĿIJ ¾ +ðĿij » +ðĿĶ ¼ +ðĿķ Ŀ +ðĿĺ Ń +ðŁĨ Ļ +ðŁĵ ¤ +ðŁĶ Ł +ðŁĹ ¼ +Ä ľ +Æ ģ +Æ ¿ +Ç ³ +Ç · +É ĥ +É ł +Ê ī +Ê § +Ë ² +Ï ´ +Õ ģ +Õ ŀ +Ö ĩ +Û Ĥ +Û ĵ +ß Ĺ +ß ¦ +ঠ¹ +à® ³ +à´ ¸ +à» Ĥ +áĪ Ŀ +áĪ ª +áĭ µ +áIJ Ĭ +áĴ ª +áļ ĸ +áŀ Ľ +á´ ¢ +áµ ı +áµ Ń +á¶ « +Ḡı +ẠĴ +á¼ ¥ +á½ ķ +á½ ¼ +âĤ Ĭ +âĦ Ĥ +âĦ © +âĩ ī +âī £ +âĮ ł +âİ Ł +âı ® +âķ ĺ +âĹ ĸ +âĺ © +âĻ ij +âĻ ² +âļ Ľ +ãĦ Ł +ãī ± +ãİ ļ +ê¡ ķ +êª ĸ +ê° ¹ +ê² Ĩ +êµ Ħ +ëĩ ¬ +ëĭ ¯ +ëı ł +ëĴ ¬ +ëĸ Ī +ëĸ ½ +ëĺ Ķ +ëŀ ¸ +ë¸ ħ +ë» ł +ë¿ Ł +ìĤ µ +ìĬ ī +ìľ ° +ìł ĭ +ìł Ķ +ì¥ ¡ +ìŃ Ŀ +ì¼ ¬ +íĪ ĩ +íī ľ +íį Ħ +íĽ ¾ +íĿ £ +ï¤ © +ï¤ ¯ +ï¦ ľ +ï¦ § +ï§ ľ +ï¨ Ī +ï¬ ª +ï ¬´ +ïŃ ½ +ï® ī +ï¯ ŀ +ï° Ĵ +ï± ĩ +ï¿ Ħ +ðĿIJ ħ +ðĿij Ħ +ðĿij º +ðĿĴ Ĺ +ðĿĵ ® +ðĿķ Ľ +ðĿķ ŀ +ðĿĸ ij +ðĿĺ ģ +ðĿĺ Ĩ +ðĿĺ ¶ +ðĿĻ ¢ +ðĿļ ľ +ðŁĮ ĥ +ðŁĮ ¦ +ðŁį Ł +ðŁİ İ +ðŁı Ļ +ðŁIJ © +ðŁIJ « +ðŁIJ ´ +ðŁij Ķ +ðŁĵ ī +ðŁĵ Ľ +ðŁĶ ī +ðŁĸ ¼ +ðŁĹ ĥ +ðŁĹ ¯ +ðŁļ ĩ +ðŁļ IJ +ðŁļ µ +ðŁ¤ ¶ +ðŁ¥ ĭ +ðŁ¥ ĵ +ðŁ¥ ® +ðŁ¦ İ +ðŁ¦ ł +ðŁ§ Ĵ +ðŁ§ ¨ +Æ IJ +Ç į +Ó Ģ +Ô Ľ +ಠ° +à´ Ļ +áĢ Ĵ +ê² Ŀ +ê¹ ¹ +ë© ¥ +ìĸ Ķ +ï¤ ģ +ï¤ ı +ï¦ ī +ï¦ ĵ +ï§ ī +ï² Ŀ +ðĿĹ ŀ +ðĿĹ ± +ðŁĮ ĭ +ðŁį ¶ +ঠļ +ìķ ľ +ðĿIJ ¯ +ðĿļ Ŀ +à° ¨ +འĺ +འł +á¡ ¥ +á¾ ° +âģ į +âĶ ° +⬠ľ +ðĿIJ ł +ðĿij ¯ +ðĿĹ Ľ +ðĿĵ » +ðĿĸ Ī +âŀ » +áŀ ł +â¡ ± +â» ij +ðŁ§ µ +ï¦ ¢ +ðŁij ĺ +ãĤ Ķ +â¼ Ł +ãĬ ¤ +ï¦ Ŀ +ãĮ ¦ +âĢ ¸ +ðŁĶ Ļ +ã ¹ +ã¹ ¦ +ï¹ ħ +ï© Į +ãī ¨ +ï¸ ½ +âį ¥ +ðŁļ ī +ðŁ¥ ľ +âĵ ľ +â» Ŀ +ï¨ ľ +ðŁĴ Ĵ +áĦ ij +â¾ ŀ +ï¨ ģ +à´ ª +áĦ İ +âŀ ´ +ঠ· +áħ ¬ +áŀ § +âĨ ¢ +âķ ¦ +âľ ij +Ë ¬ +Õ IJ +à¼ Ķ +Ê ¤ +Ë ¨ +ठŀ +à» ĥ +༠ļ +âĵ ¥ +âķ ľ +ðŁIJ ĸ +á¼ Ļ +á¼ ¤ +ìĨ ° +È Ĥ +Ê ± +à® ļ +áĥ § +á´ ĭ +á´ ® +âĿ ¡ +âŀ · +ëĿ ¡ +ï§ ¢ +ï¯ ¡ +ðĿķ ķ +ðŁħ ° +ðŁ¦ ¸ +Ç ¸ +Ó ŀ +Ô ¶ +Ö Ĩ +Ú ģ +Û ĭ +áİ ¥ +á¾ ¿ +âĶ Ń +âĶ ® +êĢ Ģ +ê± ĺ +ëIJ Ń +ë½ Ħ +ìĶ IJ +ì¸ Į +íģ ł +íĻ ± +ï¥ ī +ï¨ ĸ +ðĿij ´ +ðĿĸ Ĵ +ðĿĺ ¨ +ðĿ ļĮ +ðŁIJ ¡ +ðŁij ¢ +ðŁĵ Ķ +Å ħ +Æ İ +È © +Ò ª +Ô ĥ +áĥ « +Ḡĩ +âĽ Ł +ê» Ń +ë¨ Ħ +ìŁ Ģ +ì¤ ´ +íļ IJ +ï¤ ³ +ðŁŁ ¢ +Æ § +È ¼ +Ê Ŀ +Ë Ħ +Ë ħ +Ë į +Ë § +Ò ¥ +Õ Ķ +Ø ı +Ø ¼ +ß IJ +ß ľ +ठĵ +à¦ Ļ +à® ĵ +à¶ ´ +༠į +༠Ĵ +འ£ +áĢ Ĥ +áĢ Ĭ +áĦ Ħ +á Īĺ +áĭ Ĭ +áĮ į +áij ĭ +áŀ Ĥ +áł ¢ +á¡ Ŀ +á´ ¦ +áµ į +áµ ¨ +Ḡ¡ +Ḡ¯ +á¼ £ +âģ Ĥ +âĦ ĺ +âĦ ľ +âĦ ³ +âĦ µ +âĨ ¦ +âĩ Ĩ +âĪ · +âĬ ļ +âĮ « +âĮ ¯ +âİ Ľ +âİ ľ +âİ ¤ +âİ ¦ +âİ ® +âij ī +âĶ ī +âķ Ļ +âĸ Ĥ +âĹ Ń +âĺ Ĭ +âĺ į +âĺ Ĵ +âļ Ĩ +⼠§ +⼠² +âŀ ĺ +⥠Ħ +â´ ³ +â´ ½ +âµ Ī +ãī ¯ +ãİ ij +ã§ ¬ +êĻ ¬ +ê§ ģ +ê³ ¬ +ê´ ŀ +ê» ľ +ëħ ĵ +ëĭ ¼ +ëį ĸ +ëĸ ± +ëĿ ° +ë¡ ¹ +ë¢ ´ +ë£ Ģ +ë¤ ł +ë¨ ķ +ëŃ ¥ +ìĦ ¶ +ìħ ¤ +ìĮ ķ +ìį ª +ìı © +ìĴ Ģ +ìĶ ¯ +ìĿ Ķ +ìĿ ľ +ìł Ń +ì§ ¦ +ì¨ © +ì² ¬ +ì³ ¥ +ì¼ ¯ +íĢ « +íĢ Ń +íĥ ¸ +íĵ ģ +íķ ¬ +íĹ ¸ +íĽ ķ +íľ Ń +íĿ Ĺ +ï¤ Į +ï¤ ª +ï§ ¿ +ï¬ Ħ +ï¬ ħ +ïŃ ij +ïŃ « +ïŃ º +ï® Ĥ +ï® ¢ +ï® ¨ +ï° İ +ï° ł +ï² £ +ï³ IJ +ï³ Ĵ +ï³ ĺ +ï³ ľ +ï¹ ¼ +ï¿ ¨ +ðĿIJ © +ðĿĴ ļ +ðĿķ Ķ +ðĿķ ¤ +ðĿĸ Į +ðĿĹ £ +ðĿĹ ° +ðĿĹ ´ +ðĿĺ Ĥ +ðĿĺ ¥ +ðĿĺ ® +ðĿĺ ¸ +ðĿĻ Ģ +ðĿĽ ¾ +ðĿľ ı +ðŁĮ ģ +ðŁĮ ľ +ðŁĮ ¥ +ðŁĮ ¯ +ðŁį IJ +ðŁİ Ĵ +ðŁı Ķ +ðŁı ķ +ðŁı ® +ðŁIJ Ĥ +ðŁIJ ī +ðŁIJ ¹ +ðŁĶ ķ +ðŁĶ ļ +ðŁķ ij +ðŁķ £ +ðŁĹ ŀ +ðŁĹ ¡ +ðŁĹ ¿ +ðŁļ Ĩ +ðŁļ Ĭ +ðŁļ ĵ +ðŁļ ķ +ðŁļ ¾ +ðŁĽ ģ +ðŁĽ İ +ðŁĽ ı +ðŁ¤ ´ +ðŁ¥ ķ +ðŁ¥ ĸ +ðŁ¥ ł +ðŁ¥ ¥ +ðŁ¦ Ĩ +ðŁ¦ ī +ðŁ¦ ļ +ðŁ§ ij +ðŁ§ ¥ +ðŁ§ ¿ +Å ° +Æ º +É § +ઠĩ +à® £ +áĪ Ī +áĬ ¤ +áĭ ® +áĮ Ī +áĮ µ +ᥠ² +âĵ Ł +êĻ ³ +ê° Ĭ +ëķ ģ +ëķ ¨ +ìĬ ģ +ï¦ µ +ï¬ ² +ðĿĸ į +ðĿĺ Į +ðĿĺ ³ +ðĿĻ © +ðŁį Ļ +ðŁĸ ĸ +áī ³ +áĭ ¨ +áĸ ĩ +áŀ Į +á¹ § +âķ ª +âŀ ļ +â² ĺ +ê ķ +êķ ¥ +ï¤ · +ï® £ +ï¯ ł +ðĿĴ ĸ +ðĿķ ĺ +ðĿĸ ĩ +ðĿĹ Ł +ðĿĹ ª +ðĿĹ ¯ +ðĿĻ ł +ðŁĵ ı +à¦ Ĺ +âĴ » +â² ł +ðĿĵ µ +Ê £ +à° ľ +áĬ ¢ +áŀ IJ +Ḡ· +âĦ Ľ +âĩ Ģ +âĩ Ĭ +êĴ ¦ +ê¦ ł +ï® ¤ +ðŁį Ľ +ðŁ¤ Ľ +ᨠ¾ +âŀ º +áķ ¯ +ἠı +âĩ Ĥ +âĶ ¹ +âĻ Ĺ +ðŁĸ ¨ +ê¦ ı +ઠ° +áļ ¨ +ðŁ¤ ¥ +ðŁ§ ¢ +ãIJ Ĥ +ãĦ ¥ +ðŁĸ Į +â¼ Ĵ +ãĬ § +âį © +ðŁ¦ ij +âĶ · +ï© IJ +ï© ¡ +ðĵ Ī +ðĵĪ Ĵ +â» Ħ +ï¨ Ĵ +âĦ ª +Ò § +Ú Į +âĢ ¶ +⺠ł +â» ģ +âĨ ¸ +áĦ IJ +ãħ IJ +à» Ħ +áĹ ª +âĨ ¼ +âĩ ĭ +âĩ ĺ +âĮ ij +âĸ © +ðĿIJ Ĺ +Ä Ĭ +ঠī +ìī ł +É ¤ +ß į +ß ı +áµ Ĺ +âĤ ¥ +âĵ ī +âĶ ł +âĶ ¨ +âķ Ħ +ä ¤ +ä¤ Ģ +ê» ¸ +ï® ģ +ðĵ Ĥ +ðĵĤ ĥ +ðŁ¦ ķ +Æ Ľ +ঠĩ +ãı ĺ +ï® ¼ +Ú ĵ +Ú Ŀ +ঠĵ +à¶ ¯ +á´ ħ +á½ Ļ +âģ ¼ +âĸ İ +â¼ © +ä Ķ +äĶ Ģ +ë» ¡ +ìĽ ½ +íģ Ħ +ï¥ ¼ +ï± ī +ï¹ » +ðĿĸ ĭ +ðĿĻ Ī +ðĿĻ ª +ðĿ ϶ +ðŁIJ Ħ +ðŁIJ Ĩ +áİ ¢ +ḠĮ +âĿ ´ +ðŁı ¸ +È Ŀ +É ¸ +Î ħ +Ï ľ +Ó ¢ +Õ ¹ +à´ ħ +àº Ī +áĭ ° +áij İ +áł µ +á¡ ł +á´ ī +Ḡµ +á¿ ´ +âĵ £ +âĶ ¶ +â½ ¯ +ê² ¥ +ê¿ ĺ +ëģ İ +ëİ Ī +ëĶ ¯ +ë² ° +ìĺ ¯ +ìĽ ¸ +ìŀ Ĺ +ì§ ĺ +ì¬ ¬ +ì· ¬ +íģ ħ +íĵ Ķ +íĽ Ŀ +ï¤ ® +ï¤ ¹ +ï¥ ² +ï¯ ĸ +ðĿĵ ħ +ðĿĻ Ħ +ðŁĵ ¶ +ðŁĹ Ĵ +ðŁ¥ Ķ +ðŁ¥ Ń +Å ® +Å ´ +Æ ī +Æ « +Ç ģ +Ç £ +Ç º +Ç ¼ +È į +È ¯ +É ľ +Ê ¬ +Ë ģ +Ë ¤ +Ë µ +Ï Ľ +Ò ¤ +Ò ¬ +Ó ı +Ó Ľ +Ó ¡ +Ó ³ +Ô Į +Ô ¬ +Õ ³ +Ù » +Ú ī +Ú § +Ü ľ +ß ª +ठĿ +ঠĽ +ਠĨ +ઠķ +ઠ¡ +à® İ +à° ¬ +ൠ» +ൠ¼ +à¶ ł +à¶ Ń +à¶ ¶ +à· Ĩ +༠½ +áĢ ļ +áħ ¢ +áĨ ¸ +áĪ Ģ +áĪ ķ +áĪ ° +áī ¡ +áī ¤ +áĬ ¦ +áĬ « +áĭ ĭ +áĭ į +áİ ¯ +áij Ń +áķ Ĺ +ᣠĽ +ᥠĴ +á© ī +áŃ º +á´ ¡ +áµ ĺ +áµ Ľ +á¶ ł +Ḡģ +Ḡĭ +á¹ Ļ +á¹ Ŀ +á¹ ¦ +Ạħ +á¼ Ĥ +á½ ĥ +á½ į +á½ § +á¾ · +âĢ µ +âĤ İ +âĦ Ŀ +âħ Ģ +âĨ ŀ +âĨ § +âĩ ħ +âĪ ĥ +âī ı +âī ½ +âĬ ŀ +âĬ ¡ +âĬ § +â Ĭ¶ +âĭ Ħ +âİ Ĵ +âİ ¡ +âİ £ +âİ ª +âı İ +âĵ ĥ +âĵ ĸ +âĵ ¨ +âķ ĭ +âķ ĸ +âķ ¢ +âķ ² +âĸ Ĩ +âĸ Ĭ +âĸ į +âĸ ® +âĺ ¡ +âĺ ¦ +âĺ ± +âĺ ¿ +âĻ ĺ +âĻ Ŀ +âļ ° +⼠ij +âŀ ª +⤠Ŀ +⤠¢ +⤠· +â§ « +â¨ Ń +⨠¯ +â± £ +â² İ +âµ Ľ +ãħ Ķ +ãĪ ı +ãī ² +ãī ³ +ãĬ ij +ãĭ Ľ +ãİ IJ +ê² ¤ +ê· ¿ +ê¹ ŀ +ê» ¨ +ê¼ į +ê¿ ¸ +ëĥ ¬ +ëĩ IJ +ëĭ ł +ëį ¯ +ëĹ Į +ëĹ ij +ë¥ Ģ +ëª ĥ +ëª ¯ +ë± ¡ +ë³ ĵ +ë³ ½ +ë µľ +ìĤ ³ +ìħ ¥ +ìĩ ½ +ìı ¨ +ìı ¸ +ìķ į +ìĸ ĸ +ìŁ ¨ +ì¢ ĥ +ì¢ į +ì¥ ij +ì§ ¼ +ì© ĥ +ì® ľ +ì® ¸ +ì³ ij +ì´ ¥ +ì¾ ĥ +íħ ¦ +íĪ ¿ +íĵ ½ +íķ ³ +íĸ ı +íĹ ł +íĿ « +ï¤ ĵ +ï¤ ĺ +ï¥ İ +ï¥ ¶ +ï¦ ħ +ï¦ ½ +ï§ ĩ +ï¬ Ĩ +ï¬ ³ +ï® ĩ +ï® Ī +ï® Ŀ +ï® © +ï® ± +ï¯ ĺ +ï¯ Ļ +ï¯ ¢ +ï¯ £ +ï¯ ¤ +ï¯ ¥ +ï± Ĥ +ï² Ĩ +ï² ª +ï´ ¼ +ïº ī +ïº Ĭ +ïº ¥ +ðĿij ¨ +ðĿij © +ðĿij ² +ðĿ ĴĮ +ðĿĴ ª +ðĿĴ ® +ðĿĵ Ĥ +ðĿĵ Ī +ðĿĵ ¯ +ðĿĶ ¨ +ðĿķ Ģ +ðĿķ Ĩ +ðĿķ ¦ +ðĿķ § +ðĿķ « +ðĿķ · +ðĿĹ µ +ðĿĹ ¸ +ðĿĺ Ħ +ðĿĺ Ļ +ðĿĺ ł +ðĿĺ ¬ +ðĿĻ į +ðĿĻ ij +ðĿĻ ¡ +ðĿ ύ +ðĿĻ · +ðĿļ į +ðĿĽ ¿ +ðŁ ĥ +ðŁĥ ı +ðŁħ ĺ +ðŁ ī +ðŁī ij +ðŁİ ¡ +ðŁİ ª +ðŁİ ± +ðŁİ ³ +ðŁİ º +ðŁı İ +ðŁı Ĺ +ðŁı ļ +ðŁı ŀ +ðŁı ¦ +ðŁı § +ðŁIJ ģ +ðŁIJ ħ +ðŁIJ ĵ +ðŁĴ Ĥ +ðŁĵ ij +ðŁĵ ĵ +ðŁĵ ¨ +ðŁĵ « +ðŁĶ ĭ +ðŁĶ Ń +ðŁĶ ¯ +ðŁķ Ĺ +ðŁļ Ĥ +ðŁļ ¢ +ðŁļ ¦ +ðŁļ ¬ +ðŁĽ ĭ +ðŁĽ Į +ðŁĽ ¬ +ðŁĽ ¶ +ðŁŁ ¡ +ðŁ¥ ĺ +ðŁ¥ Ł +ðŁ¥ ¦ +ðŁ¦ ĩ +ðŁ¦ Ī +ðŁ§ Ĭ +ðŁ§ Ĺ +ðŁ§ ¤ +Ê · +Ë ¹ +á¹ ļ +á½ ¥ +âĦ Ł +ê² ¯ +ê» « +ë° · +ìĥ Ĩ +ìĽ Ŀ +ì¨ ī +ì« ı +ï¯ ķ +ðĿľ ĭ +É ² +Ò Ń +Ó Ī +འĽ +áĭ ĵ +áĻ Ń +áł © +á¹ ® +âĦ Ĵ +âĨ » +âµ ĥ +ëĢ ¨ +ëł § +ìī ¥ +ìĮ ľ +ìĹ ¶ +ì¨ Ī +ìª ¾ +íı ½ +íļ Ķ +íĽ µ +ï¤ ¸ +ï¦ IJ +ï§ Ĺ +ï§ ļ +ï¬ ¯ +ðĿIJ Ĭ +ðĿķ Ĺ +ðĿĹ ļ +ðĿļ ĸ +ðŁħ ´ +È ĥ +É Ŀ +Ï ± +Ó Ĺ +ठ¢ +áħ ł +áī ¦ +áij Į +áĴ ¼ +áŀ ¡ +áł ¨ +áł Ń +ᨠħ +á¨ Ķ +á´ ĺ +á¶ ¦ +á¸ İ +á¼ ħ +á¼ ¹ +âĨ ¯ +âĵ İ +ãı Į +ê ī +êī Ĥ +ëĨ § +ëĿ ± +ì¢ ¡ +íĪ ½ +ï¤ ĩ +ï¤ Ľ +ðĿIJ ķ +ðĿĵ ¸ +ðĿĵ ¼ +ðĿĹ ķ +ðĿĺ Ī +ðŁı £ +ðŁı ¤ +ðŁĹ Ħ +Ñ · +Ò ł +áµ ĸ +á¼ ¨ +ë¬ Ħ +ï° ´ +âĪ ½ +Õ Ń +Ú ¹ +à¥ Ł +áĢ Ĩ +áŀ Ĵ +ãĢ ¶ +ê¦ « +ï¸ ĵ +ðĿIJ Ľ +ðĿĺ Ĺ +ðŁı ľ +ì« Ń +ðŁ§ ŀ +འĤ +âĨ ¿ +âĩ ı +âĵ ģ +âĶ § +âķ ģ +âķ ¤ +ê¦ Ĺ +ê¦ ¤ +ðŁı Ī +áŀ ķ +Ô ½ +àª Ĺ +ଠĨ +âķ ķ +ï½ ł +â¼ ¦ +â¼ ¯ +â¾ · +âĶ ĸ +ଠĵ +âĺ Ĺ +âį ĭ +ï¨ Ŀ +â¼ ¥ +ï¦ ª +âĦ Ĭ +ãĢ ´ +âį ¢ +ð¡ Ī +ð¡Ī ½ +ï© ¨ +ãĢ » +ãı ĥ +ï¦ ¡ +ï¨ ĺ +ðŁIJ ĥ +ðŁĨ ĸ +ðŁĹ ¾ +ãĦ ĩ +Þ ĭ +â¼ ¼ +ï¨ Ń +Þ Ģ +Þ Ħ +Þ Ī +Þ IJ +âĮ Ħ +â» ĺ +ãŁ ¢ +á ħ§ +ðIJĮ ¿ +Ë » +à² Ĺ +áĢ ĩ +áŀ Ĭ +âķ ĩ +ãĩ ¼ +ãİ ° +Õ Ĵ +Ü Ī +ß ¥ +à¿ IJ +áĢ Ł +âĨ ¥ +âķ Į +â½ Ģ +â½ ° +â¾ Ĭ +ä Ħ +äĦ Ģ +ðĵ IJ +ðĵIJ į +ðŁİ ¦ +âĤ ¯ +âĬ ĺ +âĦ į +Ê µ +Ñ ¶ +Ú ĥ +à¦ Ķ +à´ ¦ +áİ ¶ +áĵ ķ +á¹ ¨ +âĤ ł +âĩ ° +âĹ Ĵ +â¿ Ĭ +ê· ± +ì¹ ķ +íĪ © +ïŃ Ģ +ðĿĴ ¸ +ðĿĵ Ĭ +ðĿĺ © +Ç ¦ +É « +áĬ ¨ +È ¹ +Ê ¯ +Î ª +Ú Ģ +áĮ ¸ +áİ » +áı ķ +áı ´ +á² Ĥ +á½ ¨ +âı Ŀ +âĺ Ļ +ëĥ ¨ +ëĦ ¼ +ëĪ Ļ +ë£ ħ +ìĶ ¼ +ìķ Ŀ +ìļ ¬ +ìľ ± +ï¥ Ĥ +ï¦ ¹ +ï¬ ¹ +ïŃ ģ +ï³ Ī +ðĿĶ ħ +ðĿĺ ¤ +ðĿĻ ı +ðĿĻ Ļ +ðŁķ ī +ðŁ§ Ļ +Ḡij +ê´ ¼ +ëģ į +ëĹ ´ +ëĿ ³ +ë° ŀ +ë° ¢ +ëµ ĺ +ìĤ Ķ +ìĦ Ħ +ì¼ ļ +íĢ ł +íĬ ± +íĮ ĸ +ï¤ ij +ï¦ ´ +ï¦ ¸ +ï´ į +ðĿĺ · +Ä ¬ +Å ¬ +Æ Ģ +Æ ĭ +Æ ľ +Ç ij +Ç ĺ +Ç ŀ +Ç ¥ +Ç ® +É ° +É ¶ +É · +É ½ +Ê Ī +Ê IJ +Ë İ +Ë Ł +Ë ¦ +Ë ¯ +Ï IJ +Ï ĵ +Ï ¢ +Ï ¤ +Ï ª +Ï Ń +Ï ® +Ï » +Ñ ł +Ñ Ń +Ò ¨ +Ó Ŀ +Ô ¡ +Ô · +Õ ī +Õ ĵ +Õ ĸ +Õ ļ +Õ Ŀ +Ö İ +Ø ¿ +Ú ħ +Ú į +Ú Ķ +Û Ĭ +Û ¾ +Ü Ļ +Ý Ĵ +Ý ĺ +ß Ĵ +ß ĸ +ठĬ +ठIJ +ঠı +ঠĸ +à§ Ł +ઠ® +ઠ¹ +à® ħ +à® Ĩ +à° ¡ +à° ° +ಠļ +ಠ® +ಠ¯ +à´ Ł +à´ · +ൠ¾ +à¶ ij +à¶ ŀ +༠¼ +འĵ +áĢ ĵ +áĤ ¦ +áĥ ĸ +áĥ Ń +áĥ ¯ +áħ ¨ +áħ ª +áĨ ° +áĪ ģ +áĪ İ +áĪ ĵ +áĪ ¥ +áĪ ² +áĪ ´ +áĪ » +áī ł +áī ² +áī ¶ +áĬ £ +áĬ ¥ +áĬ ª +áĭ ĺ +áĭ ² +áĭ ¶ +áĮ £ +áį ¡ +áį £ +áİ ¬ +áİ ¾ +áIJ ¡ +áķ ķ +áĸ ± +áĹ IJ +áĹ Ń +áĺ ī +áļ ± +áĽ Ł +áŀ ¥ +áŁ Ķ +áł £ +áł ª +áł ° +áł ´ +ᤠĸ +ᥠ£ +á ® +á® ł +á ¯ +á¯ Ļ +á ° +á° į +á´ Ĭ +á´ ¾ +áµ ģ +áµ İ +áµ ŀ +áµ ¤ +á¶ ħ +á¶ ĺ +á¶ Ł +á¶ ¢ +á¶ ¤ +á¶ ± +á¶ » +Ḡī +Ḡŀ +Ḡº +á¹ ĵ +á¹ Ĺ +á¹ ª +ẠĬ +Ạı +ẠĽ +á¼ ĥ +á¼ Į +á¼ ¿ +á½ Ĥ +á½ ĵ +á½ Ĺ +á½ ¦ +á¾ ± +á¾ ´ +á¿ ĺ +á¿ Ł +á¿ ¸ +âģ ĺ +âĤ ij +âĤ Ľ +âĤ ¿ +âĦ ĩ +âĦ ŀ +âĦ ± +âĩ Ł +âĩ ² +âĪ ¤ +âĪ ¶ +âī Ĥ +âī ¾ +âĬ ¨ +âĬ ³ +âĬ · +âĭ Į +âĭ ĺ +âĮ ķ +âĮ ¥ +âĮ µ +âĮ º +âį £ +âį ² +âį µ +âİ ĩ +âı ĥ +âı IJ +âı ł +âı ¤ +âı ¶ +âı ¸ +âı ¹ +âij Ĥ +âĴ · +âĴ º +âĵ ¡ +âĵ ¤ +âĶ ¾ +âĸ ĺ +âĸ µ +âĹ ª +âĹ · +âĺ ¨ +âĺ « +âĺ ² +âĺ ³ +âĻ Ĩ +âļ ¤ +âļ ¥ +⼠ĵ +⼠´ +⼠¾ +âŀ « +âŀ ¿ +⣠· +⤠ij +⤠« +⤠¶ +⤠½ +â§ ª +â¨ Ģ +â ©½ +⬠¡ +⬠¢ +⬠¤ +â² ĸ +â² ª +âµ Ģ +⸠® +⸠½ +ãĢ ł +ãĢ · +ãĦ Į +ãĦ ĺ +ãħ ij +ãĪ İ +ãĪ IJ +ãĬ ľ +ãĮ ĵ +ãĮ ł +ãİ Ł +ãİ ¤ +ãİ § +㬠® +ä Ī +äĪ Ģ +ä ° +ä° Ģ +ê ħ +êħ ī +êĩ Ĺ +ê Ī +êĪ į +ê§ Ĥ +ê§ Ĭ +êª Ģ +ê² Ī +ê² į +ê³ Ģ +êµ ł +ê½ IJ +ê¾ Ī +ê¿ ± +ëĥ ı +ëĦ ij +ëħ ¤ +ëĩ ¸ +ëĪ ¼ +ëī ħ +ëĬ £ +ëĭ º +ëį ŀ +ëIJ Į +ëķ ¸ +ëĺ ł +ëĻ ĩ +ëĻ Ī +ëľ ½ +ëŀ Ķ +ëł ľ +ë£ IJ +ë§ Ģ +ë§ Ĭ +ëª Ģ +ë¬ Ń +ë¯ ¾ +ë³ ľ +ë´ Ĭ +ëµ ī +ë· ľ +ë¸ Ģ +ë¹ ĭ +ìģ Ħ +ìĤ £ +ìĤ » +ìĦ µ +ìħ Ĵ +ìī Ī +ìī Ķ +ìĬ Į +ìĬ Ļ +ìIJ ´ +ìĵ º +ìķ ļ +ìķ º +ìĸ ľ +ìĹ ª +ìĺ ľ +ìĻ ¤ +ìļ Ľ +ìļ º +ìĿ ħ +ìĿ ı +ìĿ Ń +ìĿ ¶ +ìł Ľ +ì¡ Ī +ì¢ ī +ì¢ Ķ +ì© ł +ìŃ Į +ì¯ © +ì´ £ +ì¸ ķ +ì¹ Ł +ì¾ ¡ +ì¿ Ļ +íģ ĩ +íģ ī +íĩ Ģ +íĪ ¶ +íĸ ij +íĸ ¤ +íĹ ħ +íľ ı +íĿ Ŀ +ï¤ Ĵ +ï¤ ķ +ï¤ ¬ +ï¥ ħ +ï¥ ĩ +ï¥ ı +ï¥ ļ +ï¥ Ł +ï¦ Ħ +ï¦ Ī +ï¦ ¨ +ï¦ © +ï¦ ² +ï§ ģ +ï§ ĥ +ï§ Ķ +ï§ ł +ï§ £ +ï§ ® +ï ŃIJ +ïŃ ĸ +ïŃ ¦ +ïŃ ´ +ïŃ µ +ïŃ ¶ +ïŃ ¸ +ï® Į +ï® İ +ï® ŀ +ï® Ł +ï® ¡ +ï® ª +ï¯ Ķ +ï¯ Ĺ +ï¯ ļ +ï¯ Ľ +ï¯ Ŀ +ï¯ Ł +ï¯ § +ï¯ ¨ +ï¯ « +ï¯ ¯ +ï¯ ° +ï¯ ± +ï¯ ² +ï¯ ³ +ï¯ ´ +ï¯ µ +ï¯ ¶ +ï° Ģ +ï± ħ +ï± Ķ +ï± ´ +ï² ģ +ï³ ķ +ï· ½ +ï¸ ķ +ï¸ ± +ï¹ £ +ï¹ ½ +ï» į +ï¾ ± +ðĿIJ Ļ +ðĿIJ ½ +ðĿij ¤ +ðĿij ® +ðĿij µ +ðĿĴ ĥ +ðĿĴ Ħ +ðĿĵ Ń +ðĿĵ · +ðĿĶ ĸ +ðĿĶ ŀ +ðĿĶ ¢ +ðĿĶ ¦ +ðĿĶ ¬ +ðĿķ Ħ +ðĿķ Ĭ +ðĿķ İ +ðĿķ Ļ +ðĿķ ľ +ðĿķ Ń +ðĿķ ³ +ðĿķ ¸ +ðĿķ ¾ +ðĿ ĸī +ðĿĸ ı +ðĿĺ ĩ +ðĿĺ ī +ðĿĺ ĸ +ðĿĺ Ľ +ðĿĺ ŀ +ðĿĺ « +ðĿĺ ¾ +ðĿĻ ĩ +ðĿĻ ī +ðĿĻ ĭ +ðĿĻ İ +ðĿĻ ĺ +ðĿĻ ¥ +ðĿļ ĥ +ðĿļ IJ +ðĿļ Ķ +ðĿľ ĥ +ðŁĦ · +ðŁħ Ŀ +ðŁħ ¾ +ðŁĨ Ĥ +ðŁĨ ĵ +ðŁĮ Ĥ +ðŁĮ Ĩ +ðŁĮ ī +ðŁĮ ij +ðŁĮ ĺ +ðŁĮ © +ðŁĮ « +ðŁį ¢ +ðŁį ¥ +ðŁİ Ľ +ðŁİ ¢ +ðŁİ ´ +ðŁij ¡ +ðŁĴ ¾ +ðŁĵ Ń +ðŁĶ Ī +ðŁĶ ¦ +ðŁĶ ² +ðŁĶ ³ +ðŁķ ĵ +ðŁķ ķ +ðŁķ ĺ +ðŁķ Ł +ðŁķ · +ðŁĹ ³ +ðŁļ Ħ +ðŁļ Ķ +ðŁļ ĸ +ðŁĽ IJ +ðŁĽ ¤ +ðŁĽ ¸ +ðŁ ł +ðŁł ³ +ðŁ¤ ¹ +ðŁ¥ ĥ +ðŁ¥ ¨ +ðŁ¥ ª +ðŁ¥ ¾ +ðŁ¦ ĥ +ðŁ¦ Ĵ +ðŁ¦ Ļ +ðŁ¦ ¶ +ðŁ§ ł +ðŁ§ ª +ðŁ§ Ń +ðŁ§ ² +𣠷 +ð£· Ń +ð¦ ĺ +ð¦ĺ Ĵ +Æ ij +Ç Ļ +È ® +Ø ł +Ú Ħ +Ü Ģ +ß ¢ +áī Ģ +áĬ IJ +áİ ł +Ạŀ +ëĪ ŀ +ëķ Ł +ë£ ģ +ë¤ Ĺ +ìĦ ¥ +ìħ ij +ìĸ IJ +ìĽ Ľ +ì£ ķ +íİ ı +íĽ ĵ +ï¥ º +ï³ Ľ +ï´ « +ðĸ § +ðĸ§ · +ðĿķ ģ +ðŁIJ ª +ðŁĴ Ī +ðŁĵ ł +ðŁķ Ľ +ðŁķ ´ +Ñ Ŀ +Ó Ĭ +ॠ² +ઠª +áĥ ¤ +áį IJ +á¶ ° +á¼ Ŀ +á½ © +âĭ ĭ +âĴ ½ +âĻ ¾ +â ½Ķ +â¾ ¯ +ãĦ Ĵ +ãħ ļ +ëIJ į +ë· ģ +ìĭ Ģ +ìļ Ŀ +ì¥ ° +ìº ´ +íĭ ī +íĿ ½ +ï¦ Ģ +ï¦ ¿ +ï§ ħ +ï§ ĵ +ïŃ ¯ +ï® Ĩ +ðIJ¤ ķ +ðĿIJ Ł +ðĿĴ ħ +ðĿĵ ľ +ðĿĶ ° +ðĿĶ » +ðĿĺ į +ðĿĻ ¯ +ðŁĦ ½ +ðŁħ Ĥ +ðŁħ Ķ +ðŁħ ½ +ðŁĵ ´ +ðŁ§ ĸ +Ó Ĵ +Ḡ² +ëī ¼ +Ç ı +È ĵ +Ê ¸ +Õ Ĥ +Û ħ +ß ¡ +ß £ +à® ¯ +à° Ī +ಠ¸ +ຠ® +༠ķ +áĢ İ +áĨ ¡ +áIJ ĭ +áIJ ķ +áij ¯ +áŀ Ĩ +ᨠķ +á© Ī +âģ ħ +âĨ ļ +âĶ İ +âł © +â² Ĥ +â² Ķ +â² ¨ +ãĬ ļ +íĵ ² +ðĿij Ī +ðĿij ¬ +ðĿij ¹ +ðĿĴ ¾ +ðĿĵ ± +ðĿĵ ½ +ðĿķ ¯ +ðĿķ » +ðĿĺ ½ +ðĿļ Ĩ +ðŁĦ ° +ðŁIJ ¨ +Ò ķ +ಠħ +ï¨ Ĩ +ðĿij ° +ðŁĦ ¸ +Ô İ +Ø į +Ù µ +ಠ¶ +áĢ Ī +áĺ Ĺ +áł ¸ +á¡ ¡ +ᨠ² +á© ģ +á´ · +áµ § +âķ ¨ +âļ ģ +â¾ Ŀ +ãĢ ¼ +ãĦ ı +êĴ « +ê¦ ¥ +ê¦ © +ê¦ ² +ìĺ ¼ +íĵ IJ +ðĵ ĩ +ðĵĩ ¼ +ðĿķ ¿ +ðŁĽ ´ +ë¨ ľ +ಠµ +à´ İ +à¼ Ģ +âĩ ĸ +ãĪ « +âĵ Ģ +áħ ´ +áļ ¾ +ἠŀ +ἠ« +ᥠ´ +âĨ Ľ +âĨ ¶ +âĩ ¤ +âķ Ł +âĺ · +âļ IJ +ðŁ§ ´ +á¹ ³ +âĶ į +âĶ Ĵ +âĶ © +âĶ ¦ +â¾ µ +ઠľ +ઠ¤ +âĩ Ļ +âĶ ± +âķ Ģ +â½ Ĭ +ï½ Ł +ଠ¡ +ðł ® +ðł® · +âķ ĥ +â° Ķ +ãĬ ¦ +ðŁİ IJ +ãĩ ° +â¼ Ŀ +â¾ Ķ +â½ Ĵ +âł Ĵ +ï¨ ¦ +ï© Ĵ +ï¨ ² +ï© ĸ +ðĵı ¸ +ãĮ ĥ +ðĸ ¤ +ðĸ¤ IJ +ï¦ Ń +âĬ ħ +â¾ ³ +ä´ ¥ +ï© ķ +ðŁĮ Ķ +áŀ ĭ +âļ į +â¼ ĭ +ãİ ĺ +ðIJĮ ² +É © +áİ ij +âĨ ® +âĩ ĥ +âļ İ +ãĩ ± +ãĭ © +ãĮ ¶ +êĻ ª +ëİ ¬ +ï¨ IJ +ï¨ Ľ +ï© Ĭ +ï© į +ðĵ ħ +ðĵħ º +Ï ¡ +È ij +É Ĥ +Ô ĵ +ß İ +à´ § +áĢ ī +áĢ ĭ +áĢ ij +áĢ ł +áļ Ļ +ᨠĦ +ᨠ© +ᨠ¹ +á© ĵ +ᬠľ +á´ Ļ +áµ ij +âĤ Ń +âĨ ° +âľ ģ +â½ IJ +ãĭ ¯ +ãĮ ½ +íĨ ¢ +ï¤ ¿ +ðŁ Ĥ +ðŁĤ » +È Ĵ +Í º +Ô ¥ +Õ ij +Ú ¶ +à§ İ +à¶ ® +ຠĸ +ຠľ +ຠ½ +áĥ » +áħ ¯ +áĭ ŀ +áĸ ķ +á ´Ī +á¶ Ĩ +Ḡľ +á¹ ¼ +á¿ ¨ +âĦ ĭ +âĦ Ń +âĪ ± +âĮ ĵ +âĶ ĩ +âĶ ¢ +â± ® +â² Ħ +ãĩ ¾ +ãĪ ¬ +ë¸ ¡ +ìIJ ī +íĻ Ľ +ðĿķ ª +Æ ¹ +Í ² +Ó ģ +Û ¼ +ঠ« +áħ Ł +áī Ĩ +áį Ī +Ạĸ +á½ ī +âĶ ¸ +â½ © +ê ľ +êľ ¥ +êµ ħ +ëĤ Ķ +ëĦ ł +ëĩ Ĺ +ëĻ Ŀ +ìļ ¯ +ìļ · +ìŁ Ľ +ì· IJ +íŁ ¬ +íŁ ® +íŁ ° +ï¦ Ĩ +ï¦ ± +ï² ŀ +ï³ ¤ +ï³ ¥ +ðIJĮ ¸ +ðĿĶ ı +ðĿķ ® +ðĿĺ £ +à¦ Ī +âı ı +ãĦ ĸ +ê² ĩ +ëĸ ĺ +ëľ · +ëŀ Ĵ +ë¡ ĵ +ë¢ ī +ë£ ĥ +ë§ ĭ +ë² ĭ +ìĤ · +ìĪ ķ +ì Į¨ +ìĵ » +ìĸ Ĭ +ìĻ ¬ +ìĿ » +ì¦ ģ +ìµ ¤ +ì· ĥ +íĢ ľ +íħ ī +íį ł +íı ħ +íij ± +íķ ķ +íĸ ł +íĿ ķ +Æ Ļ +Æ ļ +Æ ŀ +Ç ĥ +Ç Ĭ +Ç ľ +Ç ¤ +Ç Ń +Ç ¹ +È Ģ +È ģ +È ħ +È ī +È Ĺ +È Ł +È ¤ +È ¥ +È ¨ +È µ +È º +È » +É Į +É ® +Ê ħ +Ê ¥ +Ê ¨ +Ë ĵ +Ë Ķ +Ë ł +Ë £ +Ë ¸ +Í ´ +Ï Ĺ +Ï ĺ +Ï Ļ +Ï ļ +Ï Ŀ +Ï ¨ +Ï ¬ +Ï ¾ +Ï ¿ +Ñ ª +Ò Ģ +Ò ľ +Ò ¼ +Ò ½ +Ó Ĥ +Ó ħ +Ó ĩ +Ó į +Ó ĸ +Ó Ł +Ó « +Ó ± +Ô Ĩ +Ô ĩ +Ô º +Õ ĭ +Ö ī +Ø Ī +Ø Ĭ +Ø ½ +Ø ¾ +Ù · +Ú Ĥ +Ú Ĭ +Ú ĸ +Ú Ĺ +Ú £ +Ú « +Ú ¸ +Û Ģ +Û į +Û ½ +Ü ī +Ü ¤ +Ý § +Ý ´ +Þ ĥ +Þ ¤ +Þ ¥ +ß ļ +ß Ľ +ß ¤ +àł į +àł ĵ +àł ³ +à¡ ¢ +ॠł +à§ ł +à§ º +ਠĬ +ਠIJ +ਠ® +ਠ¯ +ਠ° +ਠ¸ +ઠĨ +ઠ³ +ઠµ +ઠ½ +ଠĮ +ଠĺ +ଠ½ +à® ĥ +à® ¸ +à° Ĩ +à° ķ +à° ¦ +ಠĨ +ಠĬ +ಠĮ +ಠIJ +ಠĽ +ಠ¤ +ಠ¦ +ಠª +ಠ² +ಠ¹ +à´ Ĩ +à´ ı +à´ Ĺ +à´ « +à´ ¹ +ൠº +ൠ½ +à¶ ħ +à¶ Ĭ +à¶ Ķ +à¶ § +à¶ « +à¶ ° +༠Ħ +༠ħ +༠Ĭ +à½ Ļ +འ¡ +འ§ +à¿ Ģ +à¿ Ļ +áĢ Ŀ +áĢ § +áĢ © +áĢ ¿ +áģ µ +áĤ ģ +áĤ ½ +áĥ Ĥ +áĥ ª +áĦ Ĭ +áĦ ¢ +áħ ¦ +áħ Ń +áĨ ® +áĨ ± +áĨ » +á ĩ +áĩ Ĥ +áĪ ħ +áĪ ī +áĪ Į +áĪ IJ +áĪ Ĵ +áĪ Ļ +áĪ ļ +áĪ ľ +áĪ ŀ +áĪ © +áĪ ³ +áĪ º +áĪ ½ +áī ħ +áī ¢ +áī ± +áī ´ +áĬ ĥ +áĬ į +áĬ ĸ +áĬ ® +áĬ ¸ +áĭ Ľ +áĭ Ŀ +áĭ ³ +áĮ ģ +áĮ ħ +áĮ ¥ +áĮ ¦ +á Į¨ +áį Ĭ +áį į +áį ķ +áį ĸ +áį ¢ +áį ¤ +áİ Ĵ +áİ ª +áı ģ +áı IJ +áı Ł +áIJ Ĥ +áIJ ĸ +áIJ Ŀ +áIJ ŀ +áIJ Ł +áIJ ł +áij ĸ +áĴ ĭ +áĴ į +áĴ ¡ +áĵ « +áĶ ķ +áķ ĭ +áķ ij +áķ Ļ +áķ ļ +áķ Ľ +áķ ¤ +áķ ¦ +áķ ® +áķ ¼ +áĸ ĵ +áĹ Ĺ +áĹ ¢ +áĹ ¯ +áĹ · +áĺ Ħ +áĺ ij +ἠĤ +áĽ Ļ +áŀ į +áł Ĩ +áł ¡ +áł ¦ +áł ® +áł ¯ +áł ² +áł · +á¡ į +á¡ ŀ +á¡ ¤ +á ¡´ +á¡ µ +ᤠĵ +ᥠĸ +ᥠ° +ᨠ¦ +ᨠ§ +ᨠ¨ +ᨠª +ᨠ¬ +ᨠ¯ +ᨠ³ +ᨠµ +á© ĥ +ᬠķ +áŃ £ +á ± +á± ļ +á² ł +á´ ĵ +á´ ¶ +áµ Ĥ +áµ Į +áµ ¥ +áµ ´ +á¶ ĩ +á¸ Ī +Ḡł +Ḡ§ +Ḡ´ +Ḡ¾ +á¹ Ģ +á¹ ĸ +á¹ Ł +á¹ ł +á¹ « +á¹ ± +á¹ · +á¹ ¿ +ẠĦ +Ạį +Ạij +áº Ĺ +á¼ ī +á¼ ĵ +á¼ Ń +á½ ĭ +á½ Ĵ +á½ ł +á½ £ +á¾ Ħ +á¾ ı +á¾ ij +á¾ Ĺ +á¾ ¦ +á¾ § +á¾ ¾ +á¿ Ħ +á¿ ĵ +á¿ ¡ +á¿ ¬ +âģ ļ +âĤ Į +âĦ ģ +âĦ Ķ +âĦ £ +âĦ § +âĦ ¯ +âĦ ° +âĦ ´ +âħ ħ +âĨ ľ +âĨ « +âĨ Ń +âĨ ± +âĨ ¹ +âĨ ½ +âĩ ĩ +âĩ ľ +âĩ µ +âĪ ī +âĪ Ĭ +âĪ ĸ +âĪ ľ +âĪ ¾ +âī Ģ +âī ĭ +âī Į +âī ĵ +âī ľ +âī ´ +âī ¿ +âĬ Ĭ +âĬ ĭ +âĬ Ķ +âĬ ĸ +âĬ £ +âĬ ¦ +âĭ İ +âĭ ª +âĭ ² +âĮ ¦ +âĮ § +âį º +âİ Ī +âİ ¨ +âİ ¬ +âİ ³ +âİ ¼ +âİ ¾ +âı Į +âı ļ +âı « +âı ¯ +âı µ +âĴ ľ +âĴ Ŀ +âĴ « +âĵ Ħ +âĵ Ĭ +âĵ Ļ +âĵ © +âĶ ij +âĶ Ļ +âĶ ļ +âĶ ¥ +âķ ħ +âķ ī +âķ į +âķ ı +âķ ŀ +âĸ ļ +âĸ ¯ +âĹ ĥ +âĹ ļ +âĹ ¬ +âĹ ´ +âĺ Ī +âĺ ¤ +âĺ ¥ +âĺ § +âĺ ¬ +âĻ ģ +âĻ ± +âļ ĥ +âļ Ħ +âļ ħ +âļ ı +âļ ļ +âļ ŀ +âļ Ł +âļ ± +âļ ² +âľ Ģ +âľ Ł +âľ ¢ +âĿ µ +⣠¡ +⣠¦ +⣠§ +⣠³ +⣠¾ +⣠¿ +âł ĩ +⤠Ħ +⤠º +⥠Ĥ +⥠¹ +â§ ī +â§ ¼ +â§ ½ +⨠į +⬠Ĭ +â¬ Ł +âŃ ŀ +â® ŀ +â® ³ +â¯ Ī +⯠ij +â± ł +â± ± +â² Ń +â´ ¹ +âµ ķ +⸠¾ +â º« +â¼ Ĩ +â¼ ł +â½ Ł +â½ ¼ +â¾ Ľ +â¾ § +â¿ ĥ +â¿ » +ãĤ ķ +ãĤ Ł +ãĦ Ľ +ãĦ ¡ +ãĦ ¶ +ãĦ º +ãħ Ĵ +ãħ Ł +ãĨ Ģ +ãĩ » +ãĪ ij +ãĪ Ń +ãĪ ® +ãĪ ³ +ãĪ ¹ +ãī ¥ +ãī ¦ +ãī ¹ +ãī ¿ +ãĬ ŀ +ãĬ ¨ +ãĭ ij +ãĭ ¥ +ãĭ ´ +ãĭ º +ãİ Ħ +ãİ ķ +ãİ ¯ +ãı Ĥ +ãı Ī +ãı ĵ +ãı ĸ +ãı ± +ãIJ ± +ãŁ ģ +ã ¢ +㢠¨ +ã ¨ +㨠³ +ã« ª +ã« ´ +ã¶ ³ +㺠¾ +ä Ģ +äĢ Ģ +ä ĭ +äĭ Į +ä ĮĢ +äIJ Ģ +ä łĢ +ä ł +äł ¼ +ä § +ä§ ŀ +ä¨ ° +ä¨ º +ä ´Ģ +ä · +ä· ħ +ä ·¸ +ê Ĥ +êĤ « +ê Į +êĮ ¼ +ê į +êį ² +êĴ µ +ê ĵ +êĵ ½ +êĻ Ń +êĿ Ľ +êĿ ¥ +ê ŀ +êŀ Ĭ +ê¦ Ĩ +ê¦ ĩ +ê¦ Ł +ê¦ ¨ +ê§ Ī +ê © +ê© Ł +êª ĭ +êª ij +êª ķ +êª Ĺ +êª ľ +êª ® +êª ± +êª » +êª ¼ +ê« Ģ +ê« Ŀ +ê° ĥ +ê° ĺ +ê± ľ +ê² ĵ +ê² ļ +ê³ Ļ +ê³ ¾ +ê´ Ĺ +ê´ Ļ +êµ Ľ +ê¶ ĥ +ê¶ ķ +ê¶ ¨ +ê¸ © +ê¸ ¿ +ê ¹Ħ +ê¹ Ĩ +ê¹ ī +ê¹ ĵ +ê¹ ¢ +ê¹ £ +ê¹ ¸ +êº ³ +ê¿ ı +ê¿ ķ +ê¿ § +ëĢ © +ëģ ħ +ëĥ µ +ëĦ ĸ +ëĦ Ĺ +ëĦ ¢ +ëħ Ĥ +ëĨ IJ +ëĩ ľ +ëĪ ĭ +ëĪ ļ +ëī į +ëī ¨ +ëĬ ļ +ëĬ ¡ +ëĭ ľ +ëĭ ª +ëĮ ĺ +ëĮ ¤ +ëĮ ¸ +ëİ Ł +ëı ¨ +ëIJ Ħ +ëIJ ı +ëIJ ´ +ëIJ ¸ +ëij ģ +ëij ¿ +ëĴ ¨ +ëĵ · +ëĶ ® +ëĶ ² +ëķ § +ëĸ Ķ +ëĸ ª +ëĺ Ń +ëļ Ģ +ëļ ł +ëĽ Ķ +ëĽ © +ëľ ħ +ëŀ ķ +ëŀ ° +ëŁ IJ +ëł ¡ +ë¡ ŀ +ë¡ £ +ë¡ µ +ë£ Ħ +ë£ į +ë¤ ³ +ë¦ į +ë¦ ı +ë¦ ³ +ë§ Ħ +ë§ Ĩ +ë§ į +ë§ ľ +ë§ « +ë§ » +ë¨ ® +ë© Ĥ +ë© Ń +ëª ´ +ë¬ ľ +ë¬ ł +ë¬ « +ë¬ ¾ +ëŃ ¬ +ë® ĺ +ë® ¹ +ë¯ ķ +ë¯ ľ +ë° ¨ +ë° ª +ë± Ķ +ë² ĺ +ë² Ľ +ë² ± +ë² ´ +ë´ ½ +ëµ ¤ +ëµ ¨ +ë· Ĺ +ë· ĺ +ë¸ ĵ +ë¸ ľ +ë¹ ª +ëº ĥ +ëº ĺ +ëº µ +ë» ´ +ë¼ IJ +ë¾ Ķ +ìģ Ń +ìĤ ł +ìĤ ® +ìĥ ı +ìĥ Ļ +ìĦ º +ìħ ¢ +ìĨ Ģ +ìĨ ħ +ìĨ ¤ +ìĨ ¦ +ìĨ ¬ +ìĩ ± +ìĪ µ +ìĭ ¨ +ìĭ ´ +ìĮ ° +ìį ľ +ìİ Ĺ +ìİ ĺ +ìİ ¼ +ìij ī +ìij Ŀ +ìij » +ìĴ Ķ +ìĴ ¯ +ìĵ © +ìķ IJ +ìķ ĸ +ìĸ ł +ìĸ ¾ +ìĹ ĥ +ìĹ Ĺ +ìĹ ľ +ìĹ ¨ +ìĺ Ĥ +ìĺ Ħ +ìĺ ı +ìĺ ¾ +ìĺ ¿ +ìľ § +ìĿ IJ +ìĿ ĸ +ìĿ · +ìŀ į +ìŀ ı +ìŀ ¨ +ìŀ ª +ìŀ ³ +ìł ¡ +ìł ´ +ìł ¹ +ì¡ Ģ +ì¡ ª +ì¡ µ +ì¢ IJ +ì¢ ¨ +ì£ Į +ì£ Ļ +ì£ ³ +ì¦ ij +ì§ ¥ +ì§ ´ +ì§ ¾ +ì¨ ĵ +ì¨ ķ +ì© ° +ì© » +ì© ¼ +ìª Ĺ +ì¬ Ķ +ì¬ ĺ +ì® ® +ì¯ ķ +ì¯ ĺ +ì° İ +ì° ¯ +ì± ĥ +ì± µ +ì² § +ì² ® +ì² ¯ +ì³ ¬ +ì´ ĭ +ì´ ¢ +ìµ ¥ +ì¶ £ +ì¸ Ī +ì¸ Ļ +ìº ¤ +ìº Ń +ì» ½ +ì¼ Ļ +ì½ ¬ +ì¾ Ģ +ì¿ ħ +ì¿ ½ +íĢ ħ +íģ ¦ +íĤ ħ +íĥ ¶ +íĥ ¹ +íĦ Ķ +íħ £ +íĨ Ħ +íĨ § +íĨ ¹ +íĩ ¼ +íī ¤ +íĬ ½ +íĭ Ĥ +íĭ ij +íį Ī +íį Ļ +íį ¿ +íİ ¶ +íIJ Ŀ +íĴ ľ +íĵ Ŀ +íĵ ª +íĵ ± +íĵ · +íĵ ¼ +íĶ Ļ +íĶ ł +íķ ļ +íķ Ľ +íķ ŀ +íķ Ł +íķ § +íķ ¶ +íĸ Ĭ +íĸ ĭ +íĸ į +íĸ Ķ +íĸ ĺ +íĸ ¡ +íĸ ¬ +íĹ £ +íĹ ¿ +íĺ ĸ +íĺ Ń +íļ ° +íĽ į +íĽ ½ +íĿ Ł +íĿ Ń +íĿ ´ +íŀ ľ +ï¤ ī +ï¤ Ń +ï¤ ² +ï¤ µ +ï¤ ¼ +ï¥ Ģ +ï¥ ij +ï¥ Ĵ +ï¥ ķ +ï¥ ĺ +ï¥ Ļ +ï¥ « +ï¥ ¬ +ï¥ ° +ï ¥¿ +ï¦ ĭ +ï¦ ı +ï¦ Ķ +ï¦ ĸ +ï¦ ĺ +ï¦ Ľ +ï¦ ł +ï¦ ® +ï¦ ¯ +ï¦ º +ï¦ » +ï¦ ¾ +ï§ Ĩ +ï§ ĸ +ï§ Ľ +ï§ ŀ +ï§ Ł +ï§ § +ï§ ³ +ï§ º +ï§ ½ +ï¨ ĥ +ï¨ ļ +ï¨ ¢ +ï© Ł +ï¬ ¤ +ï¬ ¬ +ï¬ ¼ +ïŃ Ĵ +ïŃ ķ +ïŃ Ľ +ïŃ Ŀ +ïŃ ŀ +ïŃ Ł +ïŃ ¤ +ïŃ § +ïŃ ¨ +ïŃ ® +ïŃ ° +ïŃ ± +ïŃ · +ïŃ ¹ +ïŃ » +ï® Ģ +ï® ĥ +ï® Ħ +ï® ħ +ï® į +ï® Ĵ +ï® ĵ +ï® ķ +ï® ¦ +ï® ® +ï® ° +ï¯ ĵ +ï¯ ľ +ï¯ © +ï¯ ª +ï¯ ¬ +ï¯ Ń +ï¯ ® +ï¯ · +ï¯ ¹ +ï¯ » +ï¯ ¼ +ï° ĥ +ï° Į +ï° IJ +ï° ĺ +ï° Ļ +ï° ľ +ï° ŀ +ï° ¢ +ï° ® +ï° ° +ï° ¼ +ï° ¿ +ï± Ģ +ï± ģ +ï± Ī +ï± ĭ +ï± ı +ï± Ń +ï² Ģ +ï² ĩ +ï² Ī +ï² ĭ +ï² İ +ï² Ĵ +ï² ľ +ï² ł +ï² ¬ +ï² » +ï³ ĩ +ï³ Ķ +ï³ £ +ï³ « +ï´ ĺ +ï´ ° +ï´ ½ +ï ¶ +ï¶ ° +ï¸ ĸ +ï¸ ´ +ï¸ ¹ +ï¹ į +ï¹ Ĺ +ï¹ ¢ +ï¹ ¤ +ï¹ © +ï¹ ± +ï¾ ° +ï¿ Ĥ +ï¿ ® +ðIJĮ ° +ðIJĮ ¹ +ðIJĮ º +ðIJĮ ½ +ðIJį Ĥ +ðIJį ĥ +ðIJį Ħ +ðIJ İ +ðIJİ ¹ +ðIJ¤ Ĥ +ðIJ¤ į +ðIJ¤ ı +ðIJ¤ ĵ +ðIJŃ ī +ðIJŃ į +ðIJ° ĩ +ðIJ° ° +ðij Ĥ +ðijĤ Ħ +ðij ĺ +ðijĺ ģ +ðĴ Ģ +ðĴĢ ¸ +ðĴ ģ +ðĴģ º +ðĴ Ħ +ðĴĦ · +ðĴ Ĭ +ðĴĬ ij +ðĴ ĭ +ðĴĭ Ĺ +ð ĴĮ +ðĴĮ ¨ +ðĵĥ ¢ +ðĵĥ ° +ðĸ ł +ðĸł ļ +ðĿĦ ĥ +ðĿĦ ħ +ðĿĦ ķ +ðĿĦ Ļ +ðĿĦ ± +ðĿĦ ´ +ðĿĦ ¹ +ðĿħ İ +ðĿħ ª +ðĿĨ £ +ðĿĨ ³ +ðĿĨ ¹ +ðĿĩ Ĭ +ðĿĩ Ĺ +ðĿĩ ļ +ðĿĩ ľ +ðĿĩ ł +ðĿIJ ī +ðĿIJ ĸ +ðĿIJ ĺ +ðĿIJ £ +ðĿIJ ± +ðĿij Ĭ +ðĿij Ń +ðĿij ¼ +ðĿij ½ +ðĿĴ ° +ðĿĴ · +ðĿĴ ¿ +ðĿĵ ģ +ðĿĵ ĭ +ðĿĵ İ +ðĿĵ Ĵ +ðĿ ĵĺ +ðĿĵ ¢ +ðĿĵ ¦ +ðĿĵ « +ðĿĵ ¿ +ðĿĶ İ +ðĿĶ ± +ðĿĶ ´ +ðĿĶ · +ðĿĶ ¸ +ðĿĶ ½ +ðĿķ Ĥ +ðĿķ ĥ +ðĿķ ĭ +ðĿķ ı +ðĿķ IJ +ðĿķ ¥ +ðĿķ ´ +ðĿķ º +ðĿĸ IJ +ðĿĸ Ľ +ðĿĸ Ŀ +ðĿĸ ŀ +ðĿĹ © +ðĿĹ ³ +ðĿĹ ½ +ðĿĺ Ĭ +ðĿĺ ĭ +ðĿĺ Ķ +ðĿĺ ± +ðĿĺ ´ +ðĿĺ ¿ +ðĿĻ Ĵ +ðĿĻ Ŀ +ðĿĻ Ł +ðĿĻ ¬ +ðĿĻ Ń +ðĿĻ » +ðĿĻ ¾ +ðĿļ Ī +ðĿļ ĭ +ðĿļ ij +ðĿļ Ł +ðĿļ ł +ðĿļ £ +ðĿĽ ½ +ðĿľ Ĥ +ðĿľ Ķ +ðĿľ Ļ +ðŁ Ģ +ðŁĢ Ħ +ðŁĦ ² +ðŁĦ ¶ +ðŁħ IJ +ðŁħ ĸ +ðŁħ ļ +ðŁħ Ľ +ðŁħ ¦ +ðŁħ ¶ +ðŁħ » +ðŁħ ¼ +ðŁĨ ĥ +ðŁĨ Ĩ +ðŁĨ İ +ðŁĪ ¯ +ðŁĪ ² +ðŁĪ ¹ +ðŁĮ ĩ +ðŁĮ ĵ +ðŁį ĺ +ðŁİ ij +ðŁİ ¿ +ðŁı ı +ðŁı Ĵ +ðŁı © +ðŁı ¯ +ðŁIJ Ģ +ðŁij Ŀ +ðŁĴ ¹ +ðŁĴ º +ðŁĵ Ł +ðŁĵ ª +ðŁĵ ¼ +ðŁĶ Ģ +ðŁĶ Ĥ +ðŁĶ ĥ +ðŁĶ ĩ +ðŁĶ ĵ +ðŁĶ ¢ +ðŁĶ ¤ +ðŁĶ © +ðŁķ ĸ +ðŁķ ļ +ðŁķ ľ +ðŁķ Ŀ +ðŁķ ŀ +ðŁķ ł +ðŁķ ¢ +ðŁķ ³ +ðŁĸ ĩ +ðŁĸ ij +ðŁĸ ¶ +ðŁĹ ģ +Ñ ¨ +Ú İ +á¡ Į +Ḡ° +áº Ģ +á¼ ® +á½ Ŀ +âĦ ¬ +âļ § +⼠¤ +ã³ ¬ +êĻ ĭ +ê¸ ij +ëĶ ī +ëĹ į +ë¡ ij +ë¯ ij +ë» ħ +ë¼ Ŀ +ìĦ IJ +ìī ¡ +ìĭ ² +ìı ± +ìĹ ¤ +ìĿ © +ìĿ ¿ +ìŁ Ļ +ìł ° +ì¥ ī +íĬ Ń +íķ ® +ï® ı +ðŁħ ± +ðŁĨ Ĵ +ðŁķ ĭ +É ĺ +Ê ĵ +Õ ĥ +à´ ´ +འħ +áĨ º +áĪ Ĭ +áĪ ¨ +áĪ ¾ +áī IJ +áĮ ĥ +áĮ ½ +áĶ Ń +áł Ĥ +áł ¬ +ᨠ¸ +á© ĭ +á¶ ı +á¾ Ķ +á¿ IJ +á¿ ļ +âĻ Ļ +âļ Ĥ +âļ Ĺ +â¡ ¢ +⤠¦ +ëĸ ° +ë¤ Ĥ +ë§ ł +ë± ĭ +ë± IJ +ìĽ ¢ +ìľ ¾ +ì³ ħ +ì» ģ +íģ » +íĥ Ļ +íĵ ĸ +íĵ Ń +íķ ± +íĽ ľ +ï¤ ħ +ï¤ Ĩ +ï¦ ĥ +ï§ © +ï¨ Ĥ +ðIJ¤ Ķ +ðIJŃ ĵ +ðIJ° ¼ +ðĿĵ ŀ +ðĿĵ ° +ðĿĻ ľ +ðĿļ ģ +ðŁħ ¢ +ðŁı ĩ +È ² +Ê ¶ +Ô Ī +Ô ij +Ý ĵ +Ý ¥ +ठij +ॠ± +ଠī +à° ³ +à° µ +à² Ł +áĢ ı +áģ ¼ +áī ¨ +áĬ Ĵ +áĭ © +áĮ Ħ +áĮ Ķ +áIJ § +á ĴĮ +áĶ ħ +áĶ Ĭ +áł Ħ +ᨠģ +Ḡĥ +Ḡ» +âĶ ŀ +âĺ µ +âļ £ +â² ¢ +ãĪ ª +ä¶ µ +ê² Ļ +ê² ´ +ê³ Ĥ +ë¡ ¼ +ìĨ Ĭ +ì¼ ĩ +íĭ į +íĵ ¬ +íĵ ® +íĵ ¶ +íĵ » +ï¤ ¦ +ï¥ ł +ï¥ ± +ïŃ ² +ðIJŃ Ĭ +ðIJ ±ħ +ðĸ ¥ +ðĸ¥ ¨ +ðĿij ³ +ðĿĵ ķ +ðĿĵ ¬ +ðĿĵ ¹ +ðĿĵ ¾ +ðĿĶ ĵ +ðĿķ į +ðĿķ ¡ +ðĿķ ± +ðĿĸ ĸ +ðĿĺ ı +ðĿĺ IJ +ðĿĺ ļ +ðĿĻ ® +ðĿĻ ° +ðĿĻ ¸ +ðĿĻ º +ðĿĻ ¼ +ðĿĻ ½ +ðĿĻ ¿ +ðĿļ Ħ +ðĿļ ı +ðŁħ ħ +ðŁħ ĵ +Æ Ī +àł Į +áĻ ³ +á ļĮ +ἠħ +ἠIJ +ᤠĬ +ḠĬ +âĶ ½ +âķ Ĭ +⼠ĩ +⼠ı +âĿ ª +âĿ « +⣠° +ãĦ į +ãĦ ĵ +ãĦ § +ãħ ĸ +ãī « +ê¦ Ķ +ï± Ĭ +ຠĤ +áħ £ +á¥ Ķ +ᥠ¤ +âĨ ¤ +âĨ · +âĩ ŀ +âĸ ¤ +âŀ ¶ +ãĪ ¼ +ï¨ · +ðĵı § +âĶ ² +âĢ ´ +âĴ Ł +âĴ ¡ +â° Ĥ +â° į +â° İ +â° IJ +â° ij +â° Ł +â° ł +â° ¡ +â¼ Ń +ãĬ ¥ +âĴ ł +â½ º +ãĩ º +ãĩ ½ +ï¨ Ĭ +áķ · +âį ¨ +âº Ł +â½ Ĺ diff --git a/comfy/text_encoders/qwen25_tokenizer/tokenizer_config.json b/comfy/text_encoders/qwen25_tokenizer/tokenizer_config.json new file mode 100644 index 000000000..67688e82c --- /dev/null +++ b/comfy/text_encoders/qwen25_tokenizer/tokenizer_config.json @@ -0,0 +1,241 @@ +{ + "add_bos_token": false, + "add_prefix_space": false, + "added_tokens_decoder": { + "151643": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151644": { + "content": "<|im_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151645": { + "content": "<|im_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151646": { + "content": "<|object_ref_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151647": { + "content": "<|object_ref_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151648": { + "content": "<|box_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151649": { + "content": "<|box_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151650": { + "content": "<|quad_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151651": { + "content": "<|quad_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151652": { + "content": "<|vision_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151653": { + "content": "<|vision_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151654": { + "content": "<|vision_pad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151655": { + "content": "<|image_pad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151656": { + "content": "<|video_pad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151657": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151658": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151659": { + "content": "<|fim_prefix|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151660": { + "content": "<|fim_middle|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151661": { + "content": "<|fim_suffix|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151662": { + "content": "<|fim_pad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151663": { + "content": "<|repo_name|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151664": { + "content": "<|file_sep|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151665": { + "content": "<|img|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151666": { + "content": "<|endofimg|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151667": { + "content": "<|meta|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151668": { + "content": "<|endofmeta|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "<|im_start|>", + "<|im_end|>", + "<|object_ref_start|>", + "<|object_ref_end|>", + "<|box_start|>", + "<|box_end|>", + "<|quad_start|>", + "<|quad_end|>", + "<|vision_start|>", + "<|vision_end|>", + "<|vision_pad|>", + "<|image_pad|>", + "<|video_pad|>" + ], + "bos_token": null, + "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n", + "clean_up_tokenization_spaces": false, + "eos_token": "<|im_end|>", + "errors": "replace", + "extra_special_tokens": {}, + "model_max_length": 131072, + "pad_token": "<|endoftext|>", + "processor_class": "Qwen2_5_VLProcessor", + "split_special_tokens": false, + "tokenizer_class": "Qwen2Tokenizer", + "unk_token": null +} diff --git a/comfy/text_encoders/qwen25_tokenizer/vocab.json b/comfy/text_encoders/qwen25_tokenizer/vocab.json new file mode 100644 index 000000000..4783fe10a --- /dev/null +++ b/comfy/text_encoders/qwen25_tokenizer/vocab.json @@ -0,0 +1 @@ +{"!":0,"\"":1,"#":2,"$":3,"%":4,"&":5,"'":6,"(":7,")":8,"*":9,"+":10,",":11,"-":12,".":13,"/":14,"0":15,"1":16,"2":17,"3":18,"4":19,"5":20,"6":21,"7":22,"8":23,"9":24,":":25,";":26,"<":27,"=":28,">":29,"?":30,"@":31,"A":32,"B":33,"C":34,"D":35,"E":36,"F":37,"G":38,"H":39,"I":40,"J":41,"K":42,"L":43,"M":44,"N":45,"O":46,"P":47,"Q":48,"R":49,"S":50,"T":51,"U":52,"V":53,"W":54,"X":55,"Y":56,"Z":57,"[":58,"\\":59,"]":60,"^":61,"_":62,"`":63,"a":64,"b":65,"c":66,"d":67,"e":68,"f":69,"g":70,"h":71,"i":72,"j":73,"k":74,"l":75,"m":76,"n":77,"o":78,"p":79,"q":80,"r":81,"s":82,"t":83,"u":84,"v":85,"w":86,"x":87,"y":88,"z":89,"{":90,"|":91,"}":92,"~":93,"¡":94,"¢":95,"£":96,"¤":97,"¥":98,"¦":99,"§":100,"¨":101,"©":102,"ª":103,"«":104,"¬":105,"®":106,"¯":107,"°":108,"±":109,"²":110,"³":111,"´":112,"µ":113,"¶":114,"·":115,"¸":116,"¹":117,"º":118,"»":119,"¼":120,"½":121,"¾":122,"¿":123,"À":124,"Á":125,"Â":126,"Ã":127,"Ä":128,"Å":129,"Æ":130,"Ç":131,"È":132,"É":133,"Ê":134,"Ë":135,"Ì":136,"Í":137,"Î":138,"Ï":139,"Ð":140,"Ñ":141,"Ò":142,"Ó":143,"Ô":144,"Õ":145,"Ö":146,"×":147,"Ø":148,"Ù":149,"Ú":150,"Û":151,"Ü":152,"Ý":153,"Þ":154,"ß":155,"à":156,"á":157,"â":158,"ã":159,"ä":160,"å":161,"æ":162,"ç":163,"è":164,"é":165,"ê":166,"ë":167,"ì":168,"í":169,"î":170,"ï":171,"ð":172,"ñ":173,"ò":174,"ó":175,"ô":176,"õ":177,"ö":178,"÷":179,"ø":180,"ù":181,"ú":182,"û":183,"ü":184,"ý":185,"þ":186,"ÿ":187,"Ā":188,"ā":189,"Ă":190,"ă":191,"Ą":192,"ą":193,"Ć":194,"ć":195,"Ĉ":196,"ĉ":197,"Ċ":198,"ċ":199,"Č":200,"č":201,"Ď":202,"ď":203,"Đ":204,"đ":205,"Ē":206,"ē":207,"Ĕ":208,"ĕ":209,"Ė":210,"ė":211,"Ę":212,"ę":213,"Ě":214,"ě":215,"Ĝ":216,"ĝ":217,"Ğ":218,"ğ":219,"Ġ":220,"ġ":221,"Ģ":222,"ģ":223,"Ĥ":224,"ĥ":225,"Ħ":226,"ħ":227,"Ĩ":228,"ĩ":229,"Ī":230,"ī":231,"Ĭ":232,"ĭ":233,"Į":234,"į":235,"İ":236,"ı":237,"IJ":238,"ij":239,"Ĵ":240,"ĵ":241,"Ķ":242,"ķ":243,"ĸ":244,"Ĺ":245,"ĺ":246,"Ļ":247,"ļ":248,"Ľ":249,"ľ":250,"Ŀ":251,"ŀ":252,"Ł":253,"ł":254,"Ń":255,"ĠĠ":256,"ĠĠĠĠ":257,"in":258,"Ġt":259,"ĠĠĠĠĠĠĠĠ":260,"er":261,"ĠĠĠ":262,"on":263,"Ġa":264,"re":265,"at":266,"st":267,"en":268,"or":269,"Ġth":270,"ĊĊ":271,"Ġc":272,"le":273,"Ġs":274,"it":275,"an":276,"ar":277,"al":278,"Ġthe":279,";Ċ":280,"Ġp":281,"Ġf":282,"ou":283,"Ġ=":284,"is":285,"ĠĠĠĠĠĠĠ":286,"ing":287,"es":288,"Ġw":289,"ion":290,"ed":291,"ic":292,"Ġb":293,"Ġd":294,"et":295,"Ġm":296,"Ġo":297,"ĉĉ":298,"ro":299,"as":300,"el":301,"ct":302,"nd":303,"Ġin":304,"Ġh":305,"ent":306,"id":307,"Ġn":308,"am":309,"ĠĠĠĠĠĠĠĠĠĠĠ":310,"Ġto":311,"Ġre":312,"--":313,"Ġ{":314,"Ġof":315,"om":316,");Ċ":317,"im":318,"čĊ":319,"Ġ(":320,"il":321,"//":322,"Ġand":323,"ur":324,"se":325,"Ġl":326,"ex":327,"ĠS":328,"ad":329,"Ġ\"":330,"ch":331,"ut":332,"if":333,"**":334,"Ġ}":335,"em":336,"ol":337,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":338,"th":339,")Ċ":340,"Ġ{Ċ":341,"Ġg":342,"ig":343,"iv":344,",Ċ":345,"ce":346,"od":347,"Ġv":348,"ate":349,"ĠT":350,"ag":351,"ay":352,"Ġ*":353,"ot":354,"us":355,"ĠC":356,"Ġst":357,"ĠI":358,"un":359,"ul":360,"ue":361,"ĠA":362,"ow":363,"Ġ'":364,"ew":365,"Ġ<":366,"ation":367,"()":368,"Ġfor":369,"ab":370,"ort":371,"um":372,"ame":373,"Ġis":374,"pe":375,"tr":376,"ck":377,"âĢ":378,"Ġy":379,"ist":380,"----":381,".ĊĊ":382,"he":383,"Ġe":384,"lo":385,"ĠM":386,"Ġbe":387,"ers":388,"Ġon":389,"Ġcon":390,"ap":391,"ub":392,"ĠP":393,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":394,"ass":395,"int":396,">Ċ":397,"ly":398,"urn":399,"Ġ$":400,";ĊĊ":401,"av":402,"port":403,"ir":404,"->":405,"nt":406,"ction":407,"end":408,"Ġde":409,"ith":410,"out":411,"turn":412,"our":413,"ĠĠĠĠĠ":414,"lic":415,"res":416,"pt":417,"==":418,"Ġthis":419,"Ġwh":420,"Ġif":421,"ĠD":422,"ver":423,"age":424,"ĠB":425,"ht":426,"ext":427,"=\"":428,"Ġthat":429,"****":430,"ĠR":431,"Ġit":432,"ess":433,"ĠF":434,"Ġr":435,"os":436,"and":437,"Ġas":438,"ect":439,"ke":440,"rom":441,"Ġ//":442,"con":443,"ĠL":444,"(\"":445,"qu":446,"lass":447,"Ġwith":448,"iz":449,"de":450,"ĠN":451,"Ġal":452,"op":453,"up":454,"get":455,"Ġ}Ċ":456,"ile":457,"Ġan":458,"ata":459,"ore":460,"ri":461,"Ġpro":462,";čĊ":463,"ĉĉĉĉ":464,"ter":465,"ain":466,"ĠW":467,"ĠE":468,"Ġcom":469,"Ġreturn":470,"art":471,"ĠH":472,"ack":473,"import":474,"ublic":475,"Ġor":476,"est":477,"ment":478,"ĠG":479,"able":480,"Ġ-":481,"ine":482,"ill":483,"ind":484,"ere":485,"::":486,"ity":487,"Ġ+":488,"Ġtr":489,"elf":490,"ight":491,"('":492,"orm":493,"ult":494,"str":495,"..":496,"\",":497,"Ġyou":498,"ype":499,"pl":500,"Ġnew":501,"Ġj":502,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":503,"Ġfrom":504,"Ġex":505,"ĠO":506,"ld":507,"Ġ[":508,"oc":509,":Ċ":510,"Ġse":511,"Ġle":512,"--------":513,".s":514,"{Ċ":515,"',":516,"ant":517,"Ġat":518,"ase":519,".c":520,"Ġch":521,"":589,"ust":590,"que":591,"Ġres":592,"))":593,"'s":594,"Ġk":595,"ans":596,"yst":597,"unction":598,"********":599,"Ġi":600,"Ġus":601,"pp":602,"one":603,"ail":604,"====":605,"name":606,"Ġstr":607,"Ġ/":608,"Ġ&":609,"ach":610,"div":611,"ystem":612,"ell":613,"Ġhave":614,"err":615,"ould":616,"ull":617,"pon":618,"ĠJ":619,"_p":620,"Ġ==":621,"ign":622,"St":623,".Ċ":624,"Ġpl":625,");ĊĊ":626,"form":627,"put":628,"ount":629,"}ĊĊ":630,"dd":631,"ite":632,"Ġget":633,"rr":634,"ome":635,"ĠâĢ":636,"aram":637,"cc":638,"Ġ*/":639,"ER":640,"In":641,"les":642,"_s":643,"ong":644,"ie":645,"Ġcan":646,"ĠV":647,"erv":648,"pr":649,"Ġun":650,"row":651,"ber":652,"Ġdo":653,"ll":654,"Ġel":655,"Ġself":656,"ated":657,"ary":658,"Ġ.":659,"']":660,"ud":661,"Ġen":662,"ĠTh":663,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":664,"te":665,"_c":666,"uct":667,"Ġab":668,"ork":669,".get":670,"Ġ#":671,"aw":672,"ress":673,"ob":674,"Name":675,"app":676,"['":677,"Ġall":678,"ory":679,"ition":680,"ance":681,"ear":682,"Ġcont":683,"vent":684,"ia":685,"Ġwill":686,"IN":687,"ĠĠĠĠĠĠĠĠĠ":688,"return":689,"Ġ":755,"\",Ċ":756,"ec":757,"ĠIn":758,"ph":759,"Ġ|":760,"_f":761,"Ġvar":762,"ence":763,"Id":764,"ree":765,"ink":766,"lect":767,"ug":768,"eth":769,"Ġelse":770,"----------------":771,"cont":772,"Ġso":773,"atic":774,"Ġlo":775,"pro":776,"ton":777,"ss":778,"own":779,"abel":780,"oint":781,"ous":782,"eld":783,"ST":784,"The":785,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":786,"RE":787,"\":":788,"olor":789,"tp":790,"eg":791,"key":792,"ude":793,"ĠSt":794,"ound":795,"Ġar":796,"\");Ċ":797,"ener":798,"ser":799,"bject":800,"essage":801,"fer":802,"Ġmore":803,"ations":804,"ents":805,"Ġhis":806,"Ġthey":807,".S":808,"ĠY":809,"use":810,"ne":811,"ish":812,"old":813,"_d":814,"io":815,"ield":816,"Ġper":817,"Cont":818,"ings":819,"####":820,"Ġdata":821,"Ġsa":822,"ef":823,"fo":824,"Ġone":825,"eng":826,"Ġdis":827,"AT":828,"Ġname":829,"Ġtrue":830,"val":831,"led":832,".f":833,"Ġne":834,"Ġend":835,".T":836,"cre":837,"ark":838,"log":839,"Ex":840,"error":841,"_id":842,"urre":843,"ange":844,"Ġnull":845,"rray":846,"Ġmy":847,"pan":848,"ict":849,"ator":850,"View":851,"List":852,"ĉreturn":853,"âĢĿ":854,"Ġpre":855,"Ġx":856,"clude":857,"arg":858,"ov":859,".h":860,"Ġ>":861,"Ġtheir":862,"')":863,"irst":864,"ick":865,"gh":866,"LE":867,"OR":868,"Ġprivate":869,"tem":870,"čĊčĊ":871,"user":872,"Ġ)":873,"com":874,".A":875,"\";Ċ":876,"Ġid":877,"read":878,"Ġwho":879,"_b":880,"\">Ċ":881,"Ġtime":882,"Ġman":883,"ry":884,"========":885,"roup":886,"rop":887,"public":888,"vel":889,"umber":890,"ble":891,"Ġwhich":892,"****************":893,"Ġany":894,"Ġfalse":895,"we":896,"Ġvalue":897,"Ġli":898,"\")":899,"nder":900,"gr":901,"Ġno":902,"param":903,"fig":904,".com":905,"Ġapp":906,"_l":907,"ions":908,".D":909,"ĠCh":910,"Ġabout":911,"Ġadd":912,"Ġsu":913,"Ġstring":914,"ID":915,"Ġover":916,"string":917,".l":918,"ource":919,"_C":920,"]Ċ":921,"Ġqu":922,"ĠString":923,"ca":924,"SE":925,"Ġro":926,"sh":927,"ual":928,"Type":929,"son":930,"new":931,"ern":932,"Ġag":933,"AR":934,"];Ċ":935,"].":936,"Ġ?":937,"ical":938,"Ġdes":939,"uth":940,"ix":941,"ays":942,"Ġtype":943,"'t":944,"ault":945,"Ġinter":946,"var":947,".b":948,"Ġpart":949,".d":950,"urrent":951,"IT":952,"EN":953,"enc":954,"(f":955,"ra":956,"value":957,"cho":958,"utton":959,"ose":960,"Ġ!=":961,"ater":962,"é":963,"reate":964,"oll":965,"pos":966,"yle":967,"ng":968,"AL":969,"using":970,"ames":971,"Ġ{čĊ":972,"ates":973,"ely":974,"Ġwork":975,"Ġem":976,"inal":977,"Ġsp":978,"Ġwhen":979,".set":980,"ĠĠĠĠĠĠ":981,"):Ċ":982,"to":983,"quire":984,"indow":985,"lement":986,"pect":987,"ash":988,"[i":989,"Ġuse":990,".F":991,"pec":992,"Ġad":993,"ove":994,"ception":995,"ength":996,"include":997,"ader":998,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":999,"atus":1000,"Th":1001,"itle":1002,"rit":1003,"void":1004,"().":1005,"(Ċ":1006,"Ġoff":1007,"Ġother":1008,"Ġ&&":1009,"';Ċ":1010,"ms":1011,"Ġbeen":1012,"Ġte":1013,"ml":1014,"co":1015,"nc":1016,"ervice":1017,"Ġ%":1018,"**Ċ":1019,"ann":1020,"ade":1021,"ĊĊĊĊ":1022,"lock":1023,"const":1024,"ponse":1025,"Ġsup":1026,"++":1027,"date":1028,"Ġacc":1029,"Ġhad":1030,"Ġbu":1031,"ĠRe":1032,"Ġwere":1033,"Ġfile":1034,"Ġwould":1035,"ĠâĢľ":1036,"ven":1037,"iss":1038,"Ġour":1039,"class":1040,"raw":1041,"Ġyear":1042,"Data":1043,"Ġval":1044,"Ġsome":1045,"fter":1046,"ys":1047,"Ġ///":1048,"round":1049,"view":1050,"Ġpe":1051,"Ġthere":1052,"Ġsaid":1053,"du":1054,"of":1055,"line":1056,"/*":1057,"duct":1058,"Ġher":1059,"ĠĠĠĠĠĠĠĠĠĠĠĠĠ":1060,"Res":1061,"Ġco":1062,"Ġcomm":1063,"ise":1064,"min":1065,"ĠĠĠĠĊ":1066,"#include":1067,"ethod":1068,".P":1069,"ute":1070,"Ġass":1071,"Int":1072,"ask":1073,"loc":1074,"Ġlike":1075,"ody":1076,"Ġlet":1077,"load":1078,"Ġam":1079,"rol":1080,"Ġgr":1081,"yp":1082,"Ġalso":1083,"ĠIt":1084,"url":1085,"ific":1086,"ors":1087,"_P":1088,"_n":1089,"igh":1090,"Ġthan":1091,"Com":1092,"AN":1093,"UL":1094,"ating":1095,"ĠThis":1096,"ref":1097,"_S":1098,"Ġstatic":1099,"roll":1100,"Ġjust":1101,"Ġresult":1102,"ian":1103,"idth":1104,"Ġthem":1105,"));Ċ":1106,"der":1107,"reak":1108,"Con":1109,"://":1110,"ule":1111,"...":1112,"arch":1113,"ement":1114,"Ġ<<":1115,"ush":1116,"ense":1117,"arr":1118,"Ġinto":1119,"cess":1120,"amp":1121,"ied":1122,"ument":1123,"Ġ\\":1124,"],":1125,"wo":1126,"als":1127,"Ġwhat":1128,"anc":1129,"Value":1130,"='":1131,"olum":1132,"Ġpos":1133,"ages":1134,"ayer":1135,"Ġsc":1136,"ues":1137,"\")Ċ":1138,"_T":1139,"Ġlist":1140,"(s":1141,"Ġcase":1142,"Ch":1143,"ĉĉĉĉĉ":1144,"////////":1145,"ponent":1146,"Ġz":1147,"Ġkn":1148,"let":1149,"DE":1150,"red":1151,"Ġfe":1152,"Ġ},Ċ":1153,"Ġ,":1154,"(t":1155,"Ġfirst":1156,"');Ċ":1157,"word":1158,"Ġimport":1159,"Ġact":1160,"Ġchar":1161,"CT":1162,"ĠTr":1163,"ople":1164,"={":1165,"ĉf":1166,"ient":1167,"cent":1168,".j":1169,"lection":1170,"))Ċ":1171,"Ġonly":1172,"Ġprint":1173,"mer":1174,".W":1175,"ock":1176,"Ġ--":1177,"Text":1178,"Ġop":1179,"ank":1180,"Ġits":1181,"Ġback":1182,"[\"":1183,"Ġneed":1184,"Ġcl":1185,"Ġsub":1186,"Ġla":1187,"((":1188,".\"":1189,"Object":1190,"Ġstart":1191,"file":1192,"(self":1193,"ner":1194,"ey":1195,"Ġuser":1196,"Ġent":1197,"ĠCom":1198,"its":1199,"ĠCon":1200,"ouble":1201,"ower":1202,"item":1203,"very":1204,"ĠWe":1205,"lick":1206,"ĠQ":1207,"php":1208,"ttp":1209,"':":1210,"ics":1211,"Ġunder":1212,"Ġ*Ċ":1213,".L":1214,");":1215,"ices":1216,"Ġreg":1217,")čĊ":1218,"ĉpublic":1219,"SS":1220,"Ġthen":1221,"reat":1222,"ious":1223,".G":1224,"ek":1225,"irect":1226,"heck":1227,"cript":1228,"ning":1229,"ĠUn":1230,"Ġmay":1231,"ĠWh":1232,"Bo":1233,"Item":1234,"struct":1235,".st":1236,"ream":1237,"ible":1238,"loat":1239,"Ġorg":1240,"und":1241,"sum":1242,"_in":1243,"../":1244,"_M":1245,"Ġhow":1246,"rite":1247,"'Ċ":1248,"To":1249,"ww":1250,"Ġpeople":1251,"index":1252,".n":1253,"http":1254,"(m":1255,"ector":1256,"Ġind":1257,"Ġjav":1258,"],Ċ":1259,"ĠHe":1260,"_st":1261,"ful":1262,"ole":1263,"){Ċ":1264,"Ġshould":1265,"opy":1266,"elp":1267,"ier":1268,"_name":1269,"erson":1270,"ION":1271,"ote":1272,"Ġtest":1273,"Ġbet":1274,"rror":1275,"ular":1276,"ãĢ":1277,"ĠÐ":1278,"bs":1279,"ting":1280,"Ġmake":1281,"Tr":1282,"Ġafter":1283,"arget":1284,"RO":1285,"olumn":1286,"rc":1287,"_re":1288,"define":1289,"Ġright":1290,"right":1291,"day":1292,"Ġlong":1293,"[]":1294,"(p":1295,"td":1296,"cond":1297,"ĠPro":1298,"Ġrem":1299,"ptions":1300,"vid":1301,".g":1302,"Ġext":1303,"Ġ__":1304,"')Ċ":1305,"pace":1306,"mp":1307,"Ġmin":1308,"stance":1309,"air":1310,"action":1311,"wh":1312,"type":1313,"util":1314,"ait":1315,"ĊĊ":1339,"Ġshe":1340,"\"]":1341,"aph":1342,"Ġexp":1343,"erty":1344,"ĠSe":1345,"Ġpar":1346,"unc":1347,"ET":1348,"Ġread":1349,"print":1350,"Ġrel":1351,"Ġform":1352,"Ġdr":1353,"Exception":1354,"input":1355,"Ġtrans":1356,"########":1357,"order":1358,"By":1359,"Ġaw":1360,"ities":1361,"uff":1362,"play":1363,".add":1364,"ĠâĢĵ":1365,"Ġwant":1366,"Ġcomp":1367,"ments":1368,"Ġ||":1369,"az":1370,"be":1371,"Ġnumber":1372,"Ġrequire":1373,"ĠEx":1374,"Ġcol":1375,"Ġkey":1376,"ember":1377,"Ġtwo":1378,"Ġsize":1379,"Ġwhere":1380,"UT":1381,"result":1382,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":1383,"ough":1384,"orld":1385,"ood":1386,"uch":1387,"ative":1388,"ger":1389,"arent":1390,"Ġ/*":1391,"Ġarg":1392,"Ġwhile":1393,"(this":1394,"Ġrec":1395,"Ġdif":1396,"State":1397,"Ġspec":1398,"ride":1399,"_F":1400,"Ġlook":1401,"AM":1402,"ility":1403,"eter":1404,"âĢĻt":1405,"ĊĊĊ":1406,"ayout":1407,"--------------------------------":1408,"ager":1409,"Ġcould":1410,"Ġbr":1411,"ends":1412,"ures":1413,"Ġknow":1414,"ets":1415,"ĠIf":1416,"ĠSh":1417,".w":1418,"back":1419,"Ġser":1420,"Ġ+=":1421,"Ġfr":1422,"());Ċ":1423,"Ġhand":1424,"Ind":1425,"ULL":1426,"Im":1427,"();ĊĊ":1428,"Ġmost":1429,"Ġtry":1430,"Ġnow":1431,"rough":1432,">čĊ":1433,"ackage":1434,"Ġhim":1435,"._":1436,"ify":1437,"Ġbreak":1438,"Ġ);Ċ":1439,"ren":1440,"#define":1441,"itt":1442,"Ġap":1443,"ĉc":1444,"(n":1445,"ĠYou":1446,":ĊĊ":1447,"-m":1448,"Ġevery":1449,"ustom":1450,"lient":1451,"ocument":1452,"cription":1453,"Error":1454,"-b":1455,"о":1456,"][":1457,"trans":1458,"Ġpoint":1459,"Ġstd":1460,"Ġfil":1461,"Time":1462,"Ġmod":1463,"Ġ->":1464,"Ġerror":1465,"ah":1466,"Ġtext":1467,"roller":1468,"lose":1469,"ql":1470,"Ġpol":1471,"><":1784,".B":1785,"-c":1786,"Ġopen":1787,"Ġest":1788,"ĠĠĠĠĠĠĠĠĊ":1789,"Ġnext":1790,"IM":1791,"ÑĤ":1792,"OT":1793,"ó":1794,"Ġfollow":1795,"content":1796,"ĠĠĠĠĠĠĠĠĠĠĠĠ":1797,"Ġinclud":1798,"HE":1799,"ĠRes":1800,"Ġhref":1801,"и":1802,"Ġcar":1803,"ypes":1804,"image":1805,"Un":1806,"Ġbool":1807,"AD":1808,"Ġgame":1809,".Form":1810,"rows":1811,"*/":1812,"velop":1813,".Drawing":1814,"Ġpath":1815,"ision":1816,"Ġeach":1817,"ĠPl":1818,"_type":1819,"Path":1820,"nection":1821,"Ġav":1822,"').":1823,"Ġsupport":1824,"ENT":1825,"rem":1826,"\").":1827,"Ġown":1828,"Ġcor":1829,"count":1830,"miss":1831,"ually":1832,"Ġmem":1833,"std":1834,"ience":1835,"search":1836,"\"ĊĊ":1837,"Form":1838,"Ġsex":1839,"ename":1840,"Ġsign":1841,"Ġet":1842,"ĠĠĠĠĠĠĠĠĠĠ":1843,"','":1844,"ĠApp":1845,"Ġthose":1846,"off":1847,"Ġerr":1848,"Ġsystem":1849,"Ġbest":1850,"code":1851,"Ġsame":1852,"Ġdi":1853,"uss":1854,"Ġcreate":1855,"ather":1856,"Array":1857,".in":1858,"fe":1859,"Service":1860,"UN":1861,"ats":1862,"ĠZ":1863,"alth":1864,"Ġmade":1865,"true":1866,"AB":1867,"Ġmark":1868,"rid":1869,"ified":1870,",čĊ":1871,"yn":1872,"press":1873,"Ġgroup":1874,"Ġfin":1875,"ĠLicense":1876,"Field":1877,"eger":1878,"Ġworld":1879,"iness":1880,"ty":1881,"Ġprocess":1882,"(b":1883,"Ġcre":1884,"arn":1885,"ives":1886,"Ġmain":1887,"ideo":1888,"_g":1889,"AG":1890,"valid":1891,"img":1892,"PI":1893,"Ġcolor":1894,"Ġreport":1895,"Ġtake":1896,"rib":1897,"OM":1898,"Ġday":1899,"Request":1900,"Ġsk":1901,"bers":1902,"ĉs":1903,".Add":1904,"oot":1905,"Image":1906,"Ġcomple":1907,"ollection":1908,"Ġtop":1909,"Ġfree":1910,"AS":1911,"De":1912,"ĠOn":1913,"IG":1914,"eta":1915,"Date":1916,"Ġaction":1917,"Over":1918,"itor":1919,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":1920,"not":1921,"Ġindex":1922,"her":1923,"icon":1924,"On":1925,";čĊčĊ":1926,"ivity":1927,"mand":1928,".Windows":1929,"OL":1930,"Ġreal":1931,"Ġmax":1932,"land":1933,"....":1934,"raph":1935,"Ġbuild":1936,"leg":1937,"assword":1938,"?ĊĊ":1939,"â̦":1940,"ook":1941,"uck":1942,"Ġmessage":1943,"test":1944,"ivers":1945,"Ġinput":1946,"Ġart":1947,"Ġbetween":1948,"Get":1949,"enter":1950,"ground":1951,"ene":1952,"á":1953,".length":1954,"Node":1955,"(i":1956,"Class":1957,"for":1958,"ĠâĢĶ":1959,"ten":1960,"oin":1961,"Ġke":1962,"ui":1963,"ĠIN":1964,"Ġtable":1965,"sub":1966,"ĠLe":1967,"Ġhead":1968,"Ġmust":1969,"////////////////":1970,".util":1971,"Context":1972,"Ġorder":1973,"Ġmov":1974,"over":1975,"Ġcontin":1976,"Ġsay":1977,"static":1978,".Text":1979,"ĠclassName":1980,"pany":1981,"Ġter":1982,"head":1983,"rg":1984,"Ġproduct":1985,"This":1986,".âĢĿ":1987,"ĠBut":1988,"loy":1989,"Ġdouble":1990,"sg":1991,"Ġplace":1992,".x":1993,"message":1994,"Ġinformation":1995,"private":1996,"Ġoper":1997,"ced":1998,"db":1999,"\">":2179,"aterial":2180,"iled":2181,"Ġput":2182,"Qu":2183,"ÑĢ":2184,"ung":2185,"map":2186,"ĉĉĉĉĉĉĉĉ":2187,"Ġlevel":2188,"Component":2189,"book":2190,"creen":2191,"_RE":2192,"Ġconfig":2193,"ãģ":2194,"Or":2195,".data":2196,"Ġdocument":2197,"\",\"":2198,"tribute":2199,"ux":2200,"Log":2201,"ference":2202,"post":2203,"_e":2204,"Ġlocal":2205,"andom":2206,"assert":2207,"Val":2208,"lected":2209,"ina":2210,"atabase":2211,"Add":2212,"Ġcontent":2213,".print":2214,"signed":2215,"ric":2216,".\"ĊĊ":2217,"Ġfa":2218,"!ĊĊ":2219,"-f":2220,"ived":2221,"Ġquest":2222,".ex":2223,"Ġfloat":2224,"Ġdevelop":2225,"оÐ":2226,"Map":2227,"ading":2228,"Ġposs":2229,"UE":2230,"namespace":2231,"_O":2232,"ĉb":2233,".Get":2234,">(":2235,"json":2236,"etails":2237,"Ġtoo":2238,"Ġextends":2239,"ĠNone":2240,"Ġfore":2241,"(String":2242,"format":2243,"Ġgreat":2244,"inter":2245,"cale":2246,"Ñģ":2247,"ron":2248,"iving":2249,"Ent":2250,"ency":2251,"xt":2252,"oy":2253,"Ġmonth":2254,"Ġhapp":2255,"Ġsuper":2256,"bar":2257,"default":2258,"_de":2259,"ords":2260,"ln":2261,"({Ċ":2262,"ĠInd":2263,"ases":2264,"Ġtitle":2265,"Ġcontext":2266,"oh":2267,"-p":2268,"Em":2269,"Ġmet":2270,"Test":2271,"Ġlife":2272,"_v":2273,"ĠUS":2274,"UI":2275,"ocation":2276,"md":2277,"Ġ[Ċ":2278,"Ġ]":2279,"sw":2280,"Ġincre":2281,"script":2282,"ential":2283,"ways":2284,".de":2285,"Ġsrc":2286,"Ġcatch":2287,"ĠAmeric":2288,"//Ċ":2289,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":2290,"Ġpay":2291,"plit":2292,"âĢĶ":2293,"Ġcoun":2294,"obj":2295,".php":2296,"Ġchange":2297,"ething":2298,"'re":2299,"aster":2300,"los":2301,"lation":2302,"ĠĠĊ":2303,"Le":2304,"ä":2305,"({":2306,"ready":2307,"ĠNo":2308,"Ġposition":2309,"Ġold":2310,"Ġbook":2311,"abled":2312,"bug":2313,"Hand":2314,"};ĊĊ":2315,"isplay":2316,"aving":2317,"Ġgover":2318,"Ġversion":2319,"System":2320,"nect":2321,"response":2322,"Style":2323,"Up":2324,"angu":2325,"Ġthree":2326,"init":2327,"ero":2328,"Ġlaw":2329,"endif":2330,"Ġbase":2331,"email":2332,"(l":2333,"_V":2334,"Ġconf":2335,"ATE":2336,"Ġduring":2337,"tes":2338,"Ġconsole":2339,"ĠPr":2340,"Ġspe":2341,"ves":2342,"path":2343,"ialog":2344,"dition":2345,"_to":2346,"ards":2347,"Ġagainst":2348,"etwork":2349,"ĠPh":2350,"_L":2351,"cur":2352,"imit":2353,"With":2354,"Ġpower":2355,"ium":2356,"';ĊĊ":2357,"Ġwom":2358,"left":2359,"ources":2360,"atri":2361,"ĠIm":2362,"ĠMan":2363,"orth":2364,"${":2365,"quals":2366,"ese":2367,"_size":2368,"Ġiss":2369,"otal":2370,"-g":2371,"ique":2372,"rame":2373,"Ġwidth":2374,"erg":2375,")(":2376,"ittle":2377,"TR":2378,"ĠThey":2379,"ences":2380,"rl":2381,"ons":2382,"Ġlabel":2383,".y":2384,"-t":2385,"update":2386,"anel":2387,"sc":2388,".to":2389,"Ġproject":2390,"ü":2391,"Ġelement":2392,"Ġsuccess":2393,"ĉĉĊ":2394,".sh":2395,"ram":2396,"ched":2397,"())Ċ":2398,"Ġ(Ċ":2399,"Ġdate":2400,"Ġtot":2401,"_ST":2402,"All":2403,"ification":2404,"ĉvar":2405,"Ġtri":2406,"chem":2407,"my":2408,"Ġbig":2409,"ĠAd":2410,"ĠAt":2411,"ots":2412,"num":2413,"Act":2414,"Ġmap":2415,"era":2416,"cope":2417,".$":2418,",âĢĿ":2419,"Ġpop":2420,"Ġfew":2421,"Ġlen":2422,"uid":2423,"eters":2424,"ules":2425,"ÃŃ":2426,"source":2427,"https":2428,"Ġdem":2429,"Ġear":2430,"################":2431,"Ġmatch":2432,"ories":2433,"aces":2434,"ĠCl":2435,"Ġnode":2436,"irc":2437,"local":2438,"unity":2439,"};Ċ":2440,"Ġanother":2441,"<<":2442,"ogle":2443,"Ġsit":2444,"ework":2445,"TE":2446,".I":2447,"NS":2448,"ology":2449,"ought":2450,".Cont":2451,">>":2452,"Ġcare":2453,"state":2454,"ĉprivate":2455,"Ġeffect":2456,"++)":2457,"_file":2458,"ending":2459,"Line":2460,"For":2461,"ior":2462,"ĠSc":2463,"Ġfun":2464,".Size":2465,"ĉelse":2466,"])":2467,"start":2468,"vious":2469,"Ġ},":2470,"ours":2471,"Ġleg":2472,"Ġservice":2473,"Ġsince":2474,"iron":2475,"Label":2476,"Ġnon":2477,"Ġlos":2478,"iction":2479,"Ġfull":2480,"acter":2481,"board":2482,"gress":2483,"Ġturn":2484,"ither":2485,".size":2486,"Ġbody":2487,"resh":2488,"eturn":2489,"(_":2490,"yles":2491,"ormal":2492,"pi":2493,"Ġsomething":2494,"!--":2495,"uint":2496,"Ġprodu":2497,"Ġstand":2498,"Ġproble":2499,"Ġavailable":2500,"mt":2501,"ĠBl":2502,"Ġ...":2503,"Ġblock":2504,"Input":2505,"Ġkeep":2506,"Count":2507,"open":2508,"Ġ['":2509,"Ġthrow":2510,"uilder":2511,"Action":2512,"Ġthings":2513,"True":2514,"Ġurl":2515,"ĠBo":2516,"printf":2517,"Ġred":2518,"js":2519,".create":2520,"ĠOr":2521,"Status":2522,"Instance":2523,"Ġcontrol":2524,"Ġcome":2525,"Ġcustom":2526,"location":2527,"model":2528,"ĠčĊ":2529,"Ġsource":2530,"Ġeas":2531,".out":2532,"]ĊĊ":2533,"oney":2534,"Ġawait":2535,"Ġpartic":2536,"AP":2537,"ublish":2538,"odes":2539,"_pro":2540,"ply":2541,"riter":2542,"Ġprov":2543,"Ġmill":2544,"HT":2545,"])Ċ":2546,"Ġchang":2547,"Ġask":2548,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":2549,"Ġoutput":2550,"Ġemail":2551,".push":2552,"Ġ}čĊčĊ":2553,"ination":2554,"atrix":2555,"Table":2556,"uccess":2557,"]);Ċ":2558,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":2559,"Ġdisc":2560,"([":2561,"Ġbusiness":2562,"height":2563,".html":2564,"ta":2565,"field":2566,"Ġrequired":2567,"_R":2568,"Ġgovern":2569,"}čĊčĊ":2570,"lex":2571,".,":2572,"ĠSet":2573,"urch":2574,"///":2575,"ts":2576,"af":2577,"Ġmight":2578,"istory":2579,"Str":2580,"Ġnever":2581,"Response":2582,"arse":2583,"ada":2584,"ĠHow":2585,"Ġ*)":2586,"Ġ;":2587,"Ġhard":2588,"Ad":2589,"Ġintern":2590,"used":2591,"(data":2592,"mod":2593,"annel":2594,"Ġnp":2595,"ugg":2596,"Ġ/>Ċ":2597,"Ġcalled":2598,"body":2599,"Ġcho":2600,"(r":2601,"_set":2602,"ird":2603,"Ġ>=":2604,"Ġ};Ċ":2605,"Ġoptions":2606,"ĠGener":2607,"Ġheight":2608,"Point":2609,"You":2610,"ety":2611,"Click":2612,"Ġsmall":2613,"Ġide":2614,"Ġaccess":2615,"anguage":2616,"Ġprotected":2617,"Ġjob":2618,"ĠThere":2619,"Def":2620,"Ġaddress":2621,"Ġuint":2622,"Not":2623,"oo":2624,"aps":2625,"":2759,"ĉĠĠĠ":2760,"\"))":2761,"Content":2762,"_W":2763,"plement":2764,"Ġwon":2765,"Ġvideo":2766,"adi":2767,"point":2768,"%%":2769,"Ġgl":2770,"erved":2771,"viron":2772,"IF":2773,"uted":2774,"ãĥ":2775,"'m":2776,"Ġcert":2777,"Ġprof":2778,"Ġcell":2779,"ari":2780,"Ġplayer":2781,"ais":2782,"Ġcost":2783,"Ġhum":2784,"(R":2785,"Ġoffic":2786,"ks":2787,".text":2788,"atures":2789,"Ġtotal":2790,"Ġ*/ĊĊ":2791,"ope":2792,"Ġstat":2793,"UM":2794,"Ġload":2795,"ights":2796,"Ġclear":2797,"uro":2798,"Ġtechn":2799,"upport":2800,"IR":2801,"Ġrow":2802,"Ġseem":2803,"Ġq":2804,"Ġshort":2805,"ĠNot":2806,"ipp":2807,"Group":2808,"section":2809,"max":2810,"irl":2811,"Ġoverride":2812,"Ġcompany":2813,"Ġdone":2814,"\");čĊ":2815,"Ġgre":2816,".Re":2817,"Ġbelie":2818,"rist":2819,"Ġhealth":2820,"ANT":2821,"()ĊĊ":2822,"ĠBe":2823,".value":2824,"ĠGr":2825,"ottom":2826,"Ġargs":2827,"PT":2828,"status":2829,"func":2830,"uments":2831,"-h":2832,"Number":2833,":čĊ":2834,"ĠLog":2835,"erver":2836,"Ġ),Ċ":2837,"ament":2838,"Ġobj":2839,"inc":2840,"Ġchildren":2841,"icy":2842,"IZ":2843,"ands":2844,"ably":2845,"Ġdistrib":2846,"Ġcur":2847,"erial":2848,"Ġdays":2849,"reated":2850,"rect":2851,"-l":2852,"irm":2853,"idden":2854,"omb":2855,"Ġinitial":2856,".js":2857,"Ġâ":2858,"Query":2859,"Ġonline":2860,"imal":2861,".con":2862,"au":2863,"Url":2864,"control":2865,"irection":2866,"Ġinstance":2867,"ORT":2868,"ĠFr":2869,"where":2870,"Ġjavax":2871,"Ġorgan":2872,"apter":2873,"Ġreason":2874,"options":2875,"ĠMar":2876,"(a":2877,"Ġwithin":2878,".âĢĿĊĊ":2879,"ODE":2880,"_DE":2881,"admin":2882,"ended":2883,"Ġdesign":2884,"ĠData":2885,"une":2886,"ĠFile":2887,"root":2888,"Ġcent":2889,"Ġarr":2890,"_add":2891,"len":2892,"page":2893,",'":2894,"_str":2895,"Ġbro":2896,"ability":2897,"outh":2898,"/c":2899,"pose":2900,"irtual":2901,"earch":2902,"_url":2903,"argin":2904,"Http":2905,"Ġschool":2906,"ava":2907,"Ġconsider":2908,".label":2909,"ĠArray":2910,"web":2911,"opt":2912,".println":2913,"ulation":2914,"Ġfunc":2915,"PL":2916,"Ġ\"\\":2917,"ĠText":2918,"actory":2919,"(function":2920,"null":2921,"Ġeng":2922,"down":2923,"Ġinclude":2924,"ĠEn":2925,"ĠDr":2926,"Ġdb":2927,"!!":2928,"side":2929,"Ġinit":2930,"quired":2931,"ĠShe":2932,"Column":2933,"react":2934,"Ġann":2935,"Ġstop":2936,"Ġlater":2937,"ĠThat":2938,"ention":2939,"df":2940,"UG":2941,"ILE":2942,"Ġclient":2943,"raft":2944,"ffer":2945,"POST":2946,"elper":2947,"Ġlove":2948,"quote":2949,"oud":2950,"Ġjson":2951,"Ġable":2952,"Ġmen":2953,"AX":2954,"ĠCopyright":2955,"ö":2956,"avig":2957,"req":2958,"Client":2959,"});Ċ":2960,".Com":2961,"erc":2962,"ilt":2963,"pecial":2964,"_com":2965,"room":2966,".Name":2967,"Ġgive":2968,"amb":2969,"ike":2970,"Ġcondition":2971,"client":2972,"ators":2973,":\"":2974,"Ġcopy":2975,"uture":2976,"iversity":2977,"ernal":2978,"{{":2979,"ĠCan":2980,"ounc":2981,"do":2982,"Ġocc":2983,"Ġappro":2984,"thers":2985,"ze":2986,"Ġeither":2987,"ĠFl":2988,"Ġimportant":2989,"Ġlead":2990,"attr":2991,"ART":2992,"Equal":2993,"Ġda":2994,"etch":2995,"entity":2996,"Ġfamily":2997,"adding":2998,"Ġoption":2999,"Ġexist":3000,"ica":3001,"ĠObject":3002,"'ve":3003,"vers":3004,"itional":3005,"output":3006,"ĠTrue":3007,"ĠOF":3008,"_time":3009,"Ġoffer":3010,"Ġ});ĊĊ":3011,"HER":3012,"egin":3013,"\"\"":3014,"Ġwater":3015,"Ġche":3016,"ĠMy":3017,"ored":3018,"Ġstep":3019,"ances":3020,"CK":3021,"AY":3022,"à¸":3023,"struction":3024,"(C":3025,"ouch":3026,"Stream":3027,"active":3028,"ama":3029,"Entity":3030,"product":3031,"(){Ċ":3032,"Ġgovernment":3033,"ĠID":3034,"ajor":3035,"And":3036,"Ġdisplay":3037,"л":3038,"Ġtimes":3039,"Ġfour":3040,"Ġfar":3041,"Ġpresent":3042,"ĠNS":3043,"Ġ\\Ċ":3044,"uest":3045,"Ġbas":3046,"echo":3047,"child":3048,"ifier":3049,"Handler":3050,"Ġlib":3051,"Property":3052,"translation":3053,"Ġroom":3054,"Ġonce":3055,"Ġ[]":3056,"center":3057,"================================":3058,"Ġresults":3059,"Ġcontinue":3060,"Ġtalk":3061,"_get":3062,"Ġgrow":3063,".sw":3064,"eb":3065,"ĠPublic":3066,"OP":3067,"ecute":3068,"ols":3069,"Ġ**":3070,"\");ĊĊ":3071,"Ġmass":3072,"ured":3073,".class":3074,"omic":3075,"Ġmean":3076,"ips":3077,"Ġaut":3078,");čĊčĊ":3079,"Ġuntil":3080,"Ġmarket":3081,"Ġarea":3082,"uit":3083,"Ġlength":3084,"ĠWith":3085,"structor":3086,"event":3087,"\"><":3088,"ĠSp":3089,"IV":3090,"Ġmus":3091,"iff":3092,"Ġkind":3093,"author":3094,"ounds":3095,"mb":3096,"_key":3097,"width":3098,"pository":3099,"Ġlight":3100,"uk":3101,"Row":3102,"ohn":3103,"alf":3104,"vironment":3105,"apper":3106,"ollections":3107,"Ġside":3108,"_info":3109,"Ġexample":3110,"imary":3111,"Ġwr":3112,"Ġcamp":3113,"cribe":3114,"\"/":3115,"Ġmiss":3116,"way":3117,"Ġbased":3118,"Ġplan":3119,"Vis":3120,"omain":3121,"unk":3122,"Ġaway":3123,"UP":3124,"":3370,"Ġden":3371,"obile":3372,"change":3373,"ĠĠĠĠĠĠĠĠĠĠĠĠĊ":3374,"ici":3375,"na":3376,"ĠForm":3377,"Ġsort":3378,"Select":3379,"pare":3380,"Ġthought":3381,"_con":3382,"Ġtask":3383,"ocus":3384,"ĠDE":3385,"ĠMin":3386,"Ġopt":3387,"ĉbreak":3388,"umer":3389,"KE":3390,"then":3391,"Ġdet":3392,"ĠTest":3393,"ports":3394,"Ġreview":3395,"('/":3396,"move":3397,"Ġswitch":3398,"ERT":3399,"patch":3400,"annot":3401,"ãĤ":3402,"Ġabove":3403,"itive":3404,"Ġquestion":3405,"ĠQu":3406,"ãĢĤĊĊ":3407,"gle":3408,"Ġword":3409,"Ġprovide":3410,"ĠReturn":3411,"Ġresearch":3412,"ão":3413,"ustr":3414,"Ġpublish":3415,"chema":3416,"}}":3417,"ĠCON":3418,"-in":3419,"allback":3420,"Ġcover":3421,"\\\\":3422,"color":3423,"ĠIS":3424,"Ġwhether":3425,"imate":3426,"isc":3427,"Bar":3428,"Ġdiv":3429,"Be":3430,"ourn":3431,"Ġhaving":3432,"lem":3433,"player":3434,"abs":3435,"amera":3436,"ney":3437,"Ġexc":3438,"gether":3439,"plied":3440,"ao":3441,"[$":3442,"Ġ++":3443,"ipe":3444,"show":3445,"/d":3446,"[:":3447,"agement":3448,"lev":3449,"_ID":3450,"rary":3451,"ades":3452,"_se":3453,"ause":3454,"Ġemploy":3455,"Ġ*/čĊ":3456,"Ġfre":3457,"Ġ'@":3458,"Ġcomplet":3459,"Ġlarge":3460,"ral":3461,"\\x":3462,"Ġfac":3463,">":3578,"Ġface":3579,"CTION":3580,"Ġsave":3581,"Ġtyp":3582,"dev":3583,"(\"#":3584,"AGE":3585,"container":3586,"edit":3587,"QL":3588,"Ġitems":3589,"Ġsocial":3590,"ien":3591,"ĠReact":3592,").ĊĊ":3593,"Ġmar":3594,"Ġredu":3595,"ĠRE":3596,".put":3597,"Ġmajor":3598,"Cell":3599,"next":3600,"Ġexpected":3601,"Ġyet":3602,"Ġindiv":3603,"tributes":3604,"atis":3605,"amed":3606,"Ġfood":3607,"Source":3608,"(string":3609,"Ġ+Ċ":3610,"ites":3611,"dr":3612,"Ġmembers":3613,"Ġcomb":3614,"items":3615,"ĠPer":3616,"TH":3617,"=True":3618,"Ġbar":3619,"_SE":3620,"comm":3621,"(w":3622,")ĊĊĊ":3623,"Ġsend":3624,"Ġinc":3625,"unsigned":3626,"FA":3627,"Ġparams":3628,"apping":3629,"ros":3630,"ugin":3631,"fa":3632,"Ġconnection":3633,"Ġ};ĊĊ":3634,"Ġbecome":3635,"Mode":3636,"Ġev":3637,"Ġdiff":3638,"ĠUnited":3639,"Height":3640,"fully":3641,"images":3642,"Ġmakes":3643,"Ġglobal":3644,"Ġcontact":3645,"':Ċ":3646,"Ġabs":3647,"аÐ":3648,"float":3649,"Ġexcept":3650,"ĠPol":3651,"Child":3652,"typ":3653,"Ġcertain":3654,"ión":3655,"OUT":3656,"Ġimpro":3657,"iles":3658,"Ġ-->Ċ":3659,"ĠPart":3660,"values":3661,"oss":3662,"/**":3663,"ilit":3664,"ĠEvent":3665,"curity":3666,"ster":3667,"Ġcharacter":3668,"Ġnews":3669,"Ġ\",":3670,"Ġdevice":3671,"cel":3672,"login":3673,"heet":3674,"Default":3675,"@\"":3676,"ĉĠ":3677,"click":3678,"(value":3679,"ĠAb":3680,"Ġprevious":3681,"ERROR":3682,"ocal":3683,"Ġmaterial":3684,"Ġbelow":3685,"ĠChrist":3686,"Ġmedia":3687,"cover":3688,"ĠUI":3689,"Ġfail":3690,"Ġblack":3691,"Ġcomponent":3692,"ĠAmerican":3693,"Ġadded":3694,"Ġbuy":3695,"stit":3696,"Ġcame":3697,"Ġdelete":3698,"property":3699,"oding":3700,"Ġcard":3701,"rops":3702,"Ġhttps":3703,"Ġroot":3704,"Ġhandle":3705,"CC":3706,"Back":3707,"emplate":3708,"Ġgetting":3709,"_by":3710,"mail":3711,"_sh":3712,".assert":3713,"ĠDec":3714,"(true":3715,"Ġcomput":3716,"Ġclaim":3717,"'=>":3718,"ĠSub":3719,"Ġair":3720,"ops":3721,"nav":3722,"ements":3723,"(id":3724,"Ġenter":3725,"anged":3726,"End":3727,"Ġlocation":3728,"Ġnight":3729,"Ġdoing":3730,"ĠRed":3731,"lin":3732,"}ĊĊĊ":3733,"vider":3734,"Ġpick":3735,"Ġwatch":3736,"essages":3737,"Ġhuman":3738,"Ġdam":3739,"pend":3740,"dir":3741,"Ġtax":3742,"Ġgirl":3743,"reet":3744,"Ġbox":3745,"Ġstrong":3746,"(v":3747,"rel":3748,"Ġinterface":3749,"Ġmsg":3750,"fect":3751,"_at":3752,"Ġhouse":3753,"Ġtrack":3754,"');ĊĊ":3755,"je":3756,"ĠJohn":3757,"istr":3758,"(S":3759,"ube":3760,"Ġce":3761,"itted":3762,"VER":3763,"*)":3764,"parent":3765,"Ġapplication":3766,"any":3767,".swing":3768,"Ġpack":3769,"\\u":3770,"Ġpract":3771,"Ġsection":3772,"ctx":3773,"Ġunsigned":3774,".Point":3775,"ĠOne":3776,"ı":3777,"iple":3778,"aid":3779,"Ñĥ":3780,"Vector":3781,"byte":3782,"Ġwait":3783,"ĠÃł":3784,"Ã¥":3785,"Ġtogether":3786,"Ġthrows":3787,"FO":3788,"'))":3789,"host":3790,"ising":3791,".view":3792,"Ġterms":3793,"framework":3794,"-r":3795,"Ġapply":3796,"Ġsession":3797,"Options":3798,"uggest":3799,"Ġothers":3800,"witter":3801,"Ġfund":3802,"Init":3803,"__(":3804,"ensor":3805,"GET":3806,"Ġseveral":3807,"ii":3808,"[j":3809,"IO":3810,"Ġtemplate":3811,"Position":3812,"Ġecon":3813,"achine":3814,"Ġil":3815,".spring":3816,"main":3817,"elt":3818,"iment":3819,"Rec":3820,"mm":3821,"ĠUniversity":3822,"ursor":3823,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":3824,"GL":3825,"icture":3826,"ithub":3827,"cer":3828,"cast":3829,"From":3830,"ales":3831,"Ġsubject":3832,"password":3833,"ny":3834,"Ġesc":3835,".write":3836,"ï¼Į":3837,"What":3838,".H":3839,"Ġhistory":3840,"ĠFe":3841,"Ġindividual":3842,"unit":3843,"Ġ-->":3844,"Ġdu":3845,"IST":3846,"Ġusers":3847,"fs":3848,"false":3849,"unt":3850,"Title":3851,"Ġmot":3852,"Ġfuture":3853,"ached":3854,"Ġstarted":3855,"Ġmode":3856,"Ġ'<":3857,"_array":3858,"Ġax":3859,"'];Ċ":3860,"ires":3861,"There":3862,"ught":3863,"tml":3864,"posed":3865,"icult":3866,"Ġtook":3867,"Ġgames":3868,"Ġ}}":3869,"Ġ?>Ċ":3870,"Ġproducts":3871,"Is":3872,"Ġbad":3873,"ĠDes":3874,".path":3875,"'ĊĊ":3876,"ĠPost":3877,"avel":3878,"(:":3879,"Ġneeds":3880,"Ġknown":3881,"Fl":3882,"Ġexec":3883,"Ġseen":3884,"ume":3885,"Ġborder":3886,"Ġlive":3887,"temp":3888,"Per":3889,"Ġvariable":3890,"iet":3891,"ĠDef":3892,"Ġge":3893,"eme":3894,"_back":3895,"first":3896,"Ġprovided":3897,"////////////////////////////////":3898,"Ġfilename":3899,"Ġhope":3900,"uly":3901,"auto":3902,"find":3903,"_string":3904,"btn":3905,"itude":3906,"Attribute":3907,"Ġyoung":3908,".txt":3909,"Ġwebsite":3910,"ĠProp":3911,"Ġey":3912,">();Ċ":3913,"ional":3914,"ARR":3915,"ictionary":3916,"urther":3917,".":3997,"tx":3998,"Ġpur":3999,"uel":4000,"ymbol":4001,"uation":4002,"anger":4003,"Ġbackground":4004,"ecess":4005,"efined":4006,"........":4007,"Ġdescription":4008,"Ġrepresent":4009,"\"));Ċ":4010,"pression":4011,"rowser":4012,"Ġseries":4013,"wards":4014,"($_":4015,"aise":4016,"Ġhot":4017,"acity":4018,"ries":4019,"actions":4020,"Create":4021,"adio":4022,"amples":4023,"Ġoriginal":4024,"ensive":4025,"font":4026,"stream":4027,"using":4028,".springframework":4029,"server":4030,"Ġbill":4031,"ACK":4032,"ilename":4033,"Ġframe":4034,"Ġ=Ċ":4035,"Edit":4036,"adius":4037,"Ġdraw":4038,"anks":4039,"Ġdeter":4040,"Ġcomes":4041,"_int":4042,"Ġforeach":4043,"angle":4044,"Ġelect":4045,"pected":4046,"Header":4047,"istration":4048,"False":4049,"ĠGame":4050,"Ġfilter":4051,"Activity":4052,"Ġlarg":4053,"inition":4054,"Ġ\"<":4055,"ised":4056,"Ġremove":4057,"ĠTrans":4058,"met":4059,"see":4060,"Format":4061,"Command":4062,"ĠEX":4063,"None":4064,"Ġfront":4065,"ASE":4066,"ĠRec":4067,"oundation":4068,"Ġvo":4069,"=\\\"":4070,"(*":4071,"Change":4072,".Write":4073,"group":4074,"ients":4075,"uy":4076,"****************************************************************":4077,"Ġdig":4078,"hr":4079,"(-":4080,"Ġgen":4081,"number":4082,"vec":4083,"urope":4084,"entry":4085,"LL":4086,"Ġste":4087,"Valid":4088,"'],":4089,"_param":4090,"Ġselected":4091,"Ġaccording":4092,"ĠDis":4093,"Ġutil":4094,"Buffer":4095,"_error":4096,"Ġassoci":4097,"_SIZE":4098,"Ġwor":4099,"Ġprintf":4100,"rag":4101,"Âł":4102,"DD":4103,"ĠVal":4104,"Ġactiv":4105,"Eng":4106,"etime":4107,"Ġvirtual":4108,"aign":4109,"aur":4110,"ĠPres":4111,"ĠException":4112,"Ġanything":4113,"ĠOff":4114,"Ġhours":4115,"Ġwar":4116,"Args":4117,"aging":4118,"Ġmodels":4119,"ĠTime":4120,"Ob":4121,"ams":4122,"joy":4123,"Ġearly":4124,".read":4125,"Ġcenter":4126,"ĠInitial":4127,"Ġlanguage":4128,"length":4129,"xy":4130,"Ġsn":4131,"Ġinf":4132,"Post":4133,"Ġago":4134,"Ġeasy":4135,"_code":4136,"ĠANY":4137,"_ch":4138,"Ġdownload":4139,"(T":4140,"aved":4141,"âĢĵ":4142,"Ġstudents":4143,"Ġfig":4144,"light":4145,"xx":4146,"Ġbuffer":4147,"ĠDep":4148,"ĠMath":4149,"ITH":4150,"Ġvari":4151,"Ġdue":4152,"Factory":4153,"Ġpor":4154,"Ġep":4155,"otype":4156,"Ġcannot":4157,"Ġwhite":4158,"čĊ":4424,".annot":4425,"Ġcollection":4426,"'.":4427,"Ġsimilar":4428,"Ġtaken":4429,"(\"%":4430,"Order":4431,"']Ċ":4432,"-md":4433,"ĠTH":4434,"aced":4435,"Ġisn":4436,"/j":4437,"Ġson":4438,"graph":4439,"ĠInteger":4440,"Ġnecess":4441,"reen":4442,"Ġum":4443,"Ġ\\<":4444,"Ġmoment":4445,"Ġbring":4446,"Ġindic":4447,"ysis":4448,"Level":4449,"verse":4450,"urrenc":4451,"_test":4452,"Ġentire":4453,"Down":4454,"Ġ}ĊĊĊ":4455,"(result":4456,"ĠRead":4457,"è":4458,"Mod":4459,"Ġtrying":4460,"\"),Ċ":4461,"Ġmember":4462,"ĠCor":4463,"ODO":4464,"-control":4465,"untime":4466,"ĠSim":4467,"Dialog":4468,"plot":4469,"_on":4470,"Ġphys":4471,"}/":4472,"Ġnamespace":4473,"ĉčĊ":4474,"acc":4475,"Player":4476,"ARE":4477,"Ġfoot":4478,"Ġboard":4479,"part":4480,"Ġsus":4481,"wise":4482,"ĠMc":4483,"Ġpush":4484,"ATA":4485,"Ġplease":4486,"ried":4487,"weet":4488,"bit":4489,"ided":4490,"VE":4491,"ĠSw":4492,"UB":4493,"Ġtypes":4494,"edia":4495,"Ġclos":4496,"acebook":4497,"When":4498,"Ġedit":4499,"igger":4500,"Ġenerg":4501,"Container":4502,"Ġphot":4503,"ĠCount":4504,"ĠEurope":4505,".Is":4506,"ĠRuss":4507,"peed":4508,"ĠStr":4509,"Ġpy":4510,"Ġcult":4511,"Ġdefined":4512,"ccount":4513,"Ġobt":4514,".Location":4515,"Ġthread":4516,"ille":4517,"Ġinstead":4518,"strong":4519,"ĠSec":4520,"URE":4521,"Ġidea":4522,".se":4523,"emy":4524,"selected":4525,"Connection":4526,"acing":4527,"thread":4528,".next":4529,"Ġcoll":4530,"Ġfilm":4531,"istic":4532,"Ġcompet":4533,"Ġconn":4534,"though":4535,"Ġcompan":4536,"ocket":4537,"Ġteach":4538,"=(":4539,"Ġphone":4540,"Ġactive":4541,"delete":4542,"tries":4543,"Ġmo":4544,"Ġdeath":4545,"});ĊĊ":4546,"ocol":4547,"Widget":4548,"Ġarticle":4549,"rodu":4550,"andid":4551,"Ñĭ":4552,"ĠCr":4553,"ka":4554,"():":4555,"lood":4556,"ĉĉĉĊ":4557,"Ġalmost":4558,"Ġsell":4559,"ervlet":4560,"rip":4561,"Unit":4562,"Ġapplic":4563,"Ġconnect":4564,"Ġfeature":4565,"Ġvia":4566,"'),":4567,"Ġlim":4568,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":4569,"ĠGu":4570,"Engine":4571,"Ġens":4572,"Ġenvironment":4573,"block":4574,"HERE":4575,"NULL":4576,"gy":4577,"tag":4578,")).":4579,"exp":4580,"Ġcompl":4581,"Ġinstall":4582,"Ġcomplete":4583,"queue":4584,"atural":4585,"Ġgeneral":4586,"thon":4587,"Ġasked":4588,"ores":4589,"(res":4590,"Ġreserved":4591,"SP":4592,"Ġâ̦":4593,"ÅĤ":4594,"Ġsignific":4595,"Off":4596,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":4597,"ĠAg":4598,"ĠJust":4599,"ĠError":4600,"Ġinfl":4601,"adata":4602,"Ġicon":4603,"asks":4604,"''":4605,"_LO":4606,"?.":4607,"account":4608,"Ġ(*":4609,"')ĊĊ":4610,"rap":4611,"_var":4612,"ĠFOR":4613,"Ġparty":4614,"ĠYour":4615,"cat":4616,"stry":4617,".new":4618,"boot":4619,"ĠNov":4620,"Ġvector":4621,"Ġnormal":4622,"Ġfurther":4623,"Repository":4624,"Ġdatabase":4625,"attle":4626,"Ġmusic":4627,"Ġspeed":4628,"Ġdoc":4629,"process":4630,"IGHT":4631,".parse":4632,"Ġtaking":4633,"Ġviol":4634,"ceed":4635,"ĠAfter":4636,"Ġforward":4637,"Ġcrit":4638,"\"/>Ċ":4639,"rot":4640,"Ġfailed":4641,"efore":4642,"Ġconcern":4643,"oe":4644,"ba":4645,"Ġsender":4646,"Ġterm":4647,"has":4648,"=\"#":4649,"Ġpotential":4650,"Num":4651,"Ġpublished":4652,".close":4653,"ĠImage":4654,"straint":4655,"UD":4656,"ĠOb":4657,"Ġprobably":4658,"lim":4659,"\":Ċ":4660,"olume":4661,"Ġconsum":4662,"ague":4663,"ensions":4664,"Ġinvestig":4665,"-year":4666,"');":4667,"-sm":4668,"Ġenjoy":4669,"orig":4670,"ering":4671,"cp":4672,"leased":4673,"plements":4674,"Ġreturns":4675,"pat":4676,"BO":4677,"ĠHouse":4678,".Label":4679,"Ġweight":4680,"ighb":4681,"Ġconditions":4682,"Ġexception":4683,"description":4684,"Ġtrad":4685,"-to":4686,"Ġ{}":4687,"Ġmodule":4688,"END":4689,".ap":4690,".props":4691,"Ġconstructor":4692,"aves":4693,"Ġfavor":4694,"ĠNow":4695,";i":4696,"ĠMain":4697,"_k":4698,"eries":4699,"âĢĻll":4700,"transform":4701,"imestamp":4702,"Pre":4703,"Ġmer":4704,".res":4705,"stant":4706,"Location":4707,"_NAME":4708,"Ġloss":4709,"ĠĊĊ":4710,"net":4711,"Ġengine":4712,"Block":4713,"Ġissues":4714,"Ġparse":4715,"ĠBar":4716,"Ġstay":4717,"ĠJSON":4718,"Ġdom":4719,"airs":4720,"wner":4721,"Ġlower":4722,"\",čĊ":4723,"ĠDem":4724,"ufact":4725,"Ġps":4726,"Ġperfect":4727,"RL":4728,"Ġeduc":4729,"ls":4730,"emory":4731,"ARRANT":4732,"uge":4733,"Ġexact":4734,".key":4735,"alled":4736,"ech":4737,"ief":4738,"\\/":4739,"oke":4740,"Ġformer":4741,"alloc":4742,"Ġsix":4743,"ida":4744,"Ġmargin":4745,"Ġheart":4746,"ald":4747,"pack":4748,".getElementById":4749,"ĠWARRANT":4750,"Ġrather":4751,"Ġbuilding":4752,"erman":4753,"lice":4754,"Ġquestions":4755,"izes":4756,"lege":4757,"irectory":4758,"Ġje":4759,"Ġcas":4760,"props":4761,"utf":4762,"Ġsecurity":4763,"Ġhowever":4764,"weight":4765,"Ġinside":4766,"Ġpresident":4767,"Char":4768,"ĠWITH":4769,".map":4770,"Ġgraph":4771,"Ġtag":4772,"_status":4773,"Ġattempt":4774,"opp":4775,"uses":4776,"ĉconst":4777,"Ġround":4778,",$":4779,"Ġfriends":4780,"Email":4781,"?>":4782,"Resource":4783,"KEY":4784,"osp":4785,".query":4786,"ĠNorth":4787,"ables":4788,"istrib":4789,"_class":4790,"ello":4791,"That":4792,"к":4793,"pecially":4794,"ĠPresident":4795,"Ġcampaign":4796,"Ġalt":4797,"area":4798,"Ġchall":4799,"Ġopport":4800,".Con":4801,"Ġenergy":4802,"like":4803,".string":4804,"ington":4805,")*":4806,"yy":4807,"Ġprofession":4808,"irth":4809,"Ġseg":4810,"æľ":4811,"Ġhor":4812,"iers":4813,"can":4814,"Ġbehind":4815,"Product":4816,"fg":4817,"ĠSk":4818,".jpg":4819,"?:":4820,"];ĊĊ":4821,"Ġcallback":4822,"ĠHttp":4823,"ÑĮ":4824,"long":4825,"MS":4826,"ATH":4827,"Ġraise":4828,"Ġwanted":4829,"rown":4830,"utor":4831,"lt":4832,"]=":4833,"eline":4834,"MA":4835,"Ġsepar":4836,"cs":4837,"semb":4838,"Dis":4839,"bserv":4840,"ĠWill":4841,"Ġpolicy":4842,"Ġthird":4843,"phone":4844,"Ġbed":4845,"/g":4846,".__":4847,"ĠInc":4848,"izing":4849,".remove":4850,"instance":4851,".type":4852,"Ġserv":4853,"Each":4854,"Ġhar":4855,"ĠMessage":4856,"(key":4857,"SELECT":4858,"Pos":4859,"));čĊ":4860,"Ġrecomm":4861,"Ġtraining":4862,"ĠEnt":4863,"ĠChar":4864,"icht":4865,"(file":4866,"Ġprior":4867,"Game":4868,"Ġexit":4869,"Params":4870,".core":4871,"PC":4872,"nes":4873,"anced":4874,"(request":4875,"Password":4876,"}>Ċ":4877,"Ġmag":4878,"Ġrelease":4879,"Ġshall":4880,"udent":4881,"ĠSouth":4882,"ando":4883,":'":4884,".TabIndex":4885,"sk":4886,"anner":4887,"isset":4888,"Ġoutside":4889,"ledge":4890,"Ġå":4891,"ĠRob":4892,"Ġimm":4893,"!Ċ":4894,"ĠWeb":4895,"Des":4896,"BC":4897,"ancial":4898,"Route":4899,"Dec":4900,"ferences":4901,"Ġpurch":4902,"ĠModel":4903,"ctor":4904,"gn":4905,"_start":4906,"_un":4907,".*":4908,"ises":4909,"Ġground":4910,"Ġunique":4911,"Ġbeaut":4912,"{\"":4913,"Ġpour":4914,"ĠOct":4915,"Ġtree":4916,"sets":4917,"_res":4918,"')->":4919,"_reg":4920,"(\"\\":4921,"Ġbyte":4922,"Bl":4923,"Ġdating":4924,"Ġmatter":4925,"ĠRem":4926,"Ġ'../":4927,"ĠAug":4928,"ĠLa":4929,"Ġ$(":4930,"ournal":4931,"iam":4932,"Ġshows":4933,"write":4934,"Ġball":4935,"Ġsimply":4936,"Ġfast":4937,"Ġmemory":4938,"ASS":4939,"ĠOf":4940,"oved":4941,"ante":4942,"aul":4943,"istry":4944,")));Ċ":4945,"Ġfit":4946,"_":5129,"\")ĊĊ":5130,"ox":5131,"application":5132,"Ġ]Ċ":5133,"ĊĊĊĊĊĊ":5134,"Ġsoon":5135,"ctions":5136,"inger":5137,"Ġjoin":5138,"ĠPe":5139,"Ġë":5140,"Ġlas":5141,".E":5142,"css":5143,"/or":5144,"ĠStart":5145,"ĠTO":5146,"Ġsubs":5147,"conn":5148,"components":5149,"DEBUG":5150,"quare":5151,"Function":5152,"endar":5153,".index":5154,"Ġfill":5155,"ÄĻ":5156,"Ġchoose":5157,"how":5158,"ĠAmerica":5159,"assets":5160,"------------":5161,"ĠValue":5162,"Ġoffice":5163,"Ġveh":5164,"Ġtransform":5165,"ĠArt":5166,"Ġinde":5167,"Ġfn":5168,"Ġimplements":5169,"ango":5170,"plete":5171,"+\"":5172,"tmp":5173,"amily":5174,"Ġhash":5175,"missions":5176,"EST":5177,"gt":5178,"Provider":5179,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":5180,"Ġflag":5181,"Ġparticip":5182,"den":5183,"ĠReturns":5184,"Ġnote":5185,"ür":5186,"pm":5187,"ideos":5188,"Ġspecified":5189,"ĠEN":5190,"ester":5191,"olid":5192,"Ġupon":5193,"(std":5194,"ĉv":5195,"Ġ'\\":5196,"uz":5197,"Ġvert":5198,"Ġvict":5199,"ĉself":5200,"Ġ\"$":5201,".k":5202,"Ġgroups":5203,"github":5204,"lang":5205,"Ġmut":5206,"TO":5207,"Ġve":5208,"ĠPlease":5209,";ĊĊĊ":5210,"access":5211,"Ġ{\"":5212,"rea":5213,"Ġrisk":5214,"icker":5215,"oggle":5216,"ĉwhile":5217,"ANG":5218,".send":5219,"Ġwoman":5220,"Ġgets":5221,"Ġign":5222,"ĠId":5223,"_log":5224,"ONE":5225,"Ġevid":5226,"ĠHar":5227,"_sub":5228,"Ġendl":5229,"Ġincluded":5230,"());ĊĊ":5231,"ĠAp":5232,"igr":5233,"Ġsem":5234,"ĠBlack":5235,"doc":5236,"_table":5237,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":5238,"-up":5239,"Ġcause":5240,"Ġ..":5241,"Ġvan":5242,"_dict":5243,"Ġfocus":5244,"IND":5245,"CESS":5246,".Log":5247,"Ġmultiple":5248,"ido":5249,"Ġregard":5250,"-M":5251,"andler":5252,"ourse":5253,"Ġdeg":5254,".U":5255,"Ġaddition":5256,"Ġvarious":5257,"Ġreceive":5258,"ен":5259,"ĠHT":5260,"Obj":5261,"DF":5262,"Ġincrease":5263,"ĠOpen":5264,"];":5265,"Ġcommit":5266,"?Ċ":5267,"ategories":5268,"atory":5269,"ship":5270,"ĠMich":5271,"Ġhtml":5272,"romise":5273,"Ġleave":5274,"Ġstrateg":5275,"aven":5276,"ĠConsole":5277,"known":5278,"-n":5279,"_LE":5280,".component":5281,"Ġbre":5282,"Session":5283,"iance":5284,"Ġalign":5285,"typedef":5286,"_result":5287,"ĠWHERE":5288,".split":5289,"Ġreading":5290,"FAULT":5291,"Ġclo":5292,"Ġnotice":5293,"_pr":5294,"arter":5295,"Ġlock":5296,"Ġstandard":5297,"etic":5298,"ellow":5299,"Ġpadding":5300,"ĠHis":5301,"Ġstates":5302,"_cast":5303,"(P":5304,"aa":5305,"Ġinternal":5306,"ean":5307,"ĠPRO":5308,"ĠKey":5309,"Ġespecially":5310,"ming":5311,"Ġcross":5312,"Ġnational":5313,"_object":5314,"filter":5315,"Ġscript":5316,".update":5317,"_i":5318,"ĠAssert":5319,"/core":5320,"%%%%":5321,"Ġproblems":5322,"istor":5323,"Ġ.=":5324,"Ġarch":5325,"Ġwritten":5326,"Ġmilit":5327,"MENT":5328,".ch":5329,"cape":5330,"ĠMus":5331,"_config":5332,"ĠAPI":5333,"foot":5334,"Ġimages":5335,"endl":5336,".In":5337,"First":5338,"Ġplatform":5339,".prot":5340,"Option":5341,"ste":5342,"ĠTODO":5343,"Ġforce":5344,".cont":5345,"ĉecho":5346,"ĠDav":5347,"Ptr":5348,"(B":5349,"RT":5350,"ĠBase":5351,"]['":5352,"Ġannounc":5353,"console":5354,"ĠPy":5355,"ds":5356,".as":5357,"Ġprevent":5358,"apan":5359,"Ġ{'":5360,"}'":5592,"Ġdead":5593,"VAL":5594,"QUE":5595,"************************************************************************":5596,"Ġcharg":5597,"Return":5598,"Ġful":5599,"dom":5600,"Ġrules":5601,"Ġmodify":5602,"Ġeval":5603,"ham":5604,"atement":5605,"\\<":5606,"ula":5607,"=False":5608,"RA":5609,"Ġcontains":5610,"Ġstack":5611,"mar":5612,"Ġ{}Ċ":5613,"Ġundefined":5614,"Ass":5615,"ĠChina":5616,"vey":5617,"*Ċ":5618,"Ġplaying":5619,")/":5620,"actor":5621,"Ġbottom":5622,"lier":5623,"ĠNumber":5624,"Ġcouple":5625,"DC":5626,"ĠSO":5627,"gor":5628,".setText":5629,"success":5630,"command":5631,"Filter":5632,"ĠOur":5633,"_item":5634,"Ġctx":5635,"Ġroad":5636,"Version":5637,"case":5638,"urt":5639,"avior":5640,"ych":5641,"sembly":5642,"ĠProduct":5643,"Ġheld":5644,"afe":5645,"Ġincludes":5646,"&":5789,"CON":5790,"Ġrepl":5791,"Ġregular":5792,"Storage":5793,"ramework":5794,"Ġgoal":5795,"Ġtouch":5796,".widget":5797,"Ġbuilt":5798,"des":5799,"Part":5800,"(re":5801,"Ġworth":5802,"hib":5803,"game":5804,"Ġв":5805,"acion":5806,"ĠWhite":5807,"(type":5808,"(`":5809,"Ġnatural":5810,"Ġinj":5811,"Ġcalcul":5812,"ĠApril":5813,".List":5814,"Ġassociated":5815,"ĉSystem":5816,"~~":5817,"=[":5818,"Ġstorage":5819,"Ġbytes":5820,"Ġtravel":5821,"Ġsou":5822,"Ġpassed":5823,"!=":5824,"ascript":5825,".open":5826,"Ġgrid":5827,"Ġbus":5828,"Ġrecogn":5829,"Ab":5830,"Ġhon":5831,"ĠCenter":5832,"Ġprec":5833,"build":5834,"HTML":5835,"ĠSan":5836,"Ġcountries":5837,"aled":5838,"token":5839,"kt":5840,"Ġqual":5841,"Last":5842,"adow":5843,"Ġmanufact":5844,"idad":5845,"jango":5846,"Next":5847,"xf":5848,".a":5849,"Ġporno":5850,"ĠPM":5851,"erve":5852,"iting":5853,"_th":5854,"ci":5855,"=None":5856,"gs":5857,"Ġlogin":5858,"atives":5859,"']);Ċ":5860,"Äħ":5861,"Ġill":5862,"IA":5863,"children":5864,"DO":5865,"Ġlevels":5866,"Ġ{{":5867,"Ġlooks":5868,"Ġ\"#":5869,"ToString":5870,"Ġnecessary":5871,"ĠĠĠĊ":5872,"cell":5873,"Entry":5874,"Ġ'#":5875,"Ġextrem":5876,"Selector":5877,"Ġplaceholder":5878,"Load":5879,"Ġreleased":5880,"ORE":5881,"Enumer":5882,"ĠTV":5883,"SET":5884,"inq":5885,"Press":5886,"ĠDepartment":5887,"Ġproperties":5888,"Ġrespond":5889,"Search":5890,"ael":5891,"Ġrequ":5892,"ĠBook":5893,"/Ċ":5894,"(st":5895,"Ġfinancial":5896,"icket":5897,"_input":5898,"Ġthreat":5899,"(in":5900,"Strip":5901,"ìĿ":5902,"ção":5903,"Ġevidence":5904,"));":5905,"ĠBro":5906,"Ġ[];Ċ":5907,"Ġou":5908,"buf":5909,"Script":5910,"dat":5911,"Ġrule":5912,"#import":5913,"=\"/":5914,"Serial":5915,"Ġstarting":5916,"[index":5917,"ae":5918,"Ġcontrib":5919,"session":5920,"_new":5921,"utable":5922,"ober":5923,"Ġ\"./":5924,"Ġlogger":5925,"Ġrecently":5926,"Ġreturned":5927,"ččĊ":5928,")))Ċ":5929,"itions":5930,"Ġseek":5931,"Ġcommunic":5932,"Ġ\".":5933,"Ġusername":5934,"ECT":5935,"DS":5936,"Ġotherwise":5937,"ĠGerman":5938,".aw":5939,"Adapter":5940,"ixel":5941,"Ġsystems":5942,"Ġdrop":5943,"Ġstructure":5944,"Ġ$(\"#":5945,"encies":5946,"anning":5947,"ĠLink":5948,"ĠResponse":5949,"Ġstri":5950,"ż":5951,"ĠDB":5952,"æĹ":5953,"android":5954,"submit":5955,"otion":5956,"(@":5957,".test":5958,"ĊĊĊĊĊĊĊĊ":5959,"];čĊ":5960,"Ġdirectly":5961,"Ġ\"%":5962,"ris":5963,"elta":5964,"AIL":5965,"){čĊ":5966,"mine":5967,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":5968,"(k":5969,"bon":5970,"asic":5971,"pite":5972,"___":5973,"Max":5974,"Ġerrors":5975,"ĠWhile":5976,"Ġarguments":5977,"Ġensure":5978,"Right":5979,"-based":5980,"Web":5981,"Ġ-=":5982,"Ġintrodu":5983,"ĠInst":5984,"ĠWash":5985,"ordin":5986,"join":5987,"Database":5988,"Ġgrad":5989,"Ġusually":5990,"ITE":5991,"Props":5992,"?>Ċ":5993,"ĠGo":5994,"@Override":5995,"REF":5996,"Ġip":5997,"ĠAustral":5998,"Ġist":5999,"ViewById":6000,"Ġserious":6001,"Ġcustomer":6002,".prototype":6003,"odo":6004,"cor":6005,"Ġdoor":6006,"ĠWITHOUT":6007,"Ġplant":6008,"Ġbegan":6009,"Ġdistance":6010,"()).":6011,"Ġchance":6012,"Ġord":6013,"came":6014,"pragma":6015,"Ġprotect":6016,"ragment":6017,"ĠNode":6018,"ening":6019,"Ñĩ":6020,"Ġroute":6021,"ĠSchool":6022,"hi":6023,"Ġneighb":6024,"After":6025,"licit":6026,"Ġcontr":6027,"Ġprimary":6028,"AA":6029,".WriteLine":6030,"utils":6031,"Ġbi":6032,"Red":6033,".Linq":6034,".object":6035,"Ġleaders":6036,"unities":6037,"Ġgun":6038,"onth":6039,"ĠDev":6040,"FILE":6041,"Ġcomments":6042,"_len":6043,"arrow":6044,"amount":6045,"Range":6046,"sert":6047,"GridView":6048,"Ġupdated":6049,"ĠMo":6050,"Ġinform":6051,"ociety":6052,"ala":6053,"Access":6054,"Ġhab":6055,"Ġcreat":6056,"_arg":6057,"ĠJanuary":6058,"ĠDay":6059,"\")čĊ":6060,"uple":6061,"document":6062,"gorith":6063,"menu":6064,"ĠOver":6065,"bb":6066,".title":6067,"_out":6068,"Ġled":6069,"uri":6070,"Ġ?>Ċ":6107,"run":6108,"Ġscene":6109,"(array":6110,"device":6111,"_title":6112,"agon":6113,"]čĊ":6114,"aby":6115,"Ġbecame":6116,"boolean":6117,"Ġpark":6118,"ĠCode":6119,"upload":6120,"riday":6121,"ĠSeptember":6122,"Fe":6123,"Ġsen":6124,"cing":6125,"FL":6126,"Col":6127,"uts":6128,"_page":6129,"inn":6130,"Ġimplied":6131,"aling":6132,"Ġyourself":6133,".Count":6134,"conf":6135,"Ġaud":6136,"_init":6137,".)":6138,"Ġwrote":6139,"NG":6140,".Error":6141,"ä»":6142,".for":6143,"Ġequal":6144,"ĠRequest":6145,"Ġserial":6146,"Ġallows":6147,"XX":6148,"Ġmiddle":6149,"chor":6150,"ø":6151,"erval":6152,".Column":6153,"reading":6154,"Ġescort":6155,"ĠAugust":6156,"Ġquickly":6157,"Ġweap":6158,"ĠCG":6159,"ropri":6160,"ho":6161,"Ġcop":6162,"(struct":6163,"ĠBig":6164,"Ġvs":6165,"Ġfrequ":6166,".Value":6167,"Ġactions":6168,"Ġproper":6169,"Ġinn":6170,"Ġobjects":6171,"Ġmatrix":6172,"avascript":6173,"Ġones":6174,".group":6175,"Ġgreen":6176,"Ġpaint":6177,"ools":6178,"ycl":6179,"encode":6180,"olt":6181,"comment":6182,".api":6183,"Dir":6184,"Ġune":6185,"izont":6186,".position":6187,"Ġdesigned":6188,"_val":6189,"avi":6190,"iring":6191,"tab":6192,"Ġlayer":6193,"Ġviews":6194,"Ġreve":6195,"rael":6196,"ĠON":6197,"rics":6198,"np":6199,"Ġcore":6200,"());čĊ":6201,"Main":6202,"Ġexpert":6203,"ĉĉčĊ":6204,"_en":6205,"Ġ/>":6206,"utter":6207,"IAL":6208,"ails":6209,"ĠKing":6210,"*/ĊĊ":6211,"ĠMet":6212,"_end":6213,"addr":6214,"ora":6215,"Ġir":6216,"Min":6217,"Ġsurpr":6218,"Ġrepe":6219,"Ġdirectory":6220,"PUT":6221,"-S":6222,"Ġelection":6223,"haps":6224,".pre":6225,"cm":6226,"Values":6227,"Ġ\"Ċ":6228,"column":6229,"ivil":6230,"Login":6231,"inue":6232,"Ġbeautiful":6233,"Ġsecret":6234,"(event":6235,"Ġchat":6236,"ums":6237,"Ġorigin":6238,"Ġeffects":6239,"Ġmanagement":6240,"illa":6241,"tk":6242,"Ġsetting":6243,"ĠCour":6244,"Ġmassage":6245,"ĉend":6246,"Ġhappy":6247,"Ġfinish":6248,"Ġcamera":6249,"ĠVer":6250,"ĠDemocr":6251,"ĠHer":6252,"(Q":6253,"cons":6254,"ita":6255,"Ġ'.":6256,"{}":6257,"ĉC":6258,"Ġstuff":6259,"Ġ:Ċ":6260,"ĠAR":6261,"Task":6262,"hidden":6263,"eros":6264,"IGN":6265,"atio":6266,"ĠHealth":6267,"olute":6268,"Enter":6269,"'>":6270,"ĠTwitter":6271,"ĠCounty":6272,"scribe":6273,"Ġ=>Ċ":6274,"Ġhy":6275,"fit":6276,"Ġmilitary":6277,"Ġsale":6278,"required":6279,"non":6280,"bootstrap":6281,"hold":6282,"rim":6283,"-old":6284,"ĠDown":6285,"Ġmention":6286,"contact":6287,"_group":6288,"oday":6289,"Ġtown":6290,"Ġsolution":6291,"uate":6292,"elling":6293,"]->":6294,"otes":6295,"ental":6296,"omen":6297,"ospital":6298,"ĠSup":6299,"_EN":6300,"Ġslow":6301,"SESSION":6302,"Ġblue":6303,"ago":6304,"Ġlives":6305,"Ġ^":6306,".un":6307,"inst":6308,"enge":6309,"Ġcustomers":6310,"Ġcast":6311,"udget":6312,"ï¼ģ":6313,"icens":6314,"Ġdetermin":6315,"Selected":6316,"_pl":6317,"ueue":6318,"Ġdark":6319,"//ĊĊ":6320,"si":6321,"thern":6322,"ĠJapan":6323,"/w":6324,"PU":6325,"ĠEast":6326,"ovie":6327,"Ġpackage":6328,"Ġnor":6329,"Ġapi":6330,"bot":6331,"\"];Ċ":6332,"_post":6333,"ulate":6334,"Ġclub":6335,"'));Ċ":6336,"Ġloop":6337,"PIO":6338,"ione":6339,"shot":6340,"Initial":6341,"Ġplayed":6342,"register":6343,"rought":6344,"_max":6345,"acement":6346,"match":6347,"raphics":6348,"AST":6349,"Ġexisting":6350,"Ġcomplex":6351,"DA":6352,".Ch":6353,".common":6354,"mo":6355,"Ġ'../../":6356,"ito":6357,"Ġanalysis":6358,"Ġdeliver":6359,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ":6360,"idx":6361,"Ãł":6362,"ongo":6363,"ĠEnglish":6364,"Ċ":9992,"_default":9993,"ĠDatabase":9994,"rep":9995,"ESS":9996,"nergy":9997,".Find":9998,"_mask":9999,"Ġrise":10000,"Ġkernel":10001,"::$":10002,".Q":10003,"Ġoffering":10004,"decl":10005,"ĠCS":10006,"Ġlisted":10007,"Ġmostly":10008,"enger":10009,"Ġblocks":10010,"olo":10011,"Ġgoverning":10012,"\\F":10013,"Ġconcent":10014,".getText":10015,"Ġmb":10016,"Ġoccurred":10017,"Ġchanging":10018,"Scene":10019,"_CODE":10020,"Beh":10021,"\"The":10022,"Ġtile":10023,"ĠAssociation":10024,"ĉP":10025,"alty":10026,"_ad":10027,"odies":10028,"iated":10029,"Ġprepared":10030,"possible":10031,"Ġmort":10032,"TEST":10033,"Ġignore":10034,"Ġcalc":10035,"Ġrs":10036,"ĠassertEquals":10037,"Ġsz":10038,"ĠTHIS":10039,".\"Ċ":10040,"Ġcanvas":10041,"java":10042,"Ġdut":10043,"VALID":10044,".sql":10045,".input":10046,"Ġaux":10047,"Sup":10048,"Ġartist":10049,"Vec":10050,"_TIME":10051,".stringify":10052,"etween":10053,"ĠCategory":10054,"Ġ[-":10055,"ĠDevExpress":10056,"ĠJul":10057,"Ġring":10058,".ed":10059,"YY":10060,"Let":10061,"TextField":10062,"Ġflat":10063,"_print":10064,"ĠOTHER":10065,"adian":10066,"Ġchecked":10067,"ele":10068,"Align":10069,"standing":10070,"Ġ[],":10071,"Ġlab":10072,"ucky":10073,"ĠChristmas":10074,"(image":10075,".module":10076,"Ġlots":10077,"Ġslightly":10078,"(final":10079,"erge":10080,"è¿":10081,"ĠPolice":10082,"ĠRight":10083,"Ġaward":10084,"ĠOS":10085,"Ġ{}ĊĊ":10086,"Ġptr":10087,"oves":10088,"icated":10089,"ем":10090,"Ġmanage":10091,"oliday":10092,"Amount":10093,"oolStrip":10094,"tbody":10095,"Nav":10096,"wrap":10097,"BB":10098,"Ġwatching":10099,"arios":10100,"Ġoptional":10101,"_K":10102,"ĠLicensed":10103,".Map":10104,"Timer":10105,"ĠAP":10106,"ĠRev":10107,"(o":10108,",c":10109,"umin":10110,"etailed":10111,"ĠHy":10112,"Ġblank":10113,"agger":10114,"ĠSelf":10115,"()[":10116,".make":10117,"earn":10118,"channel":10119,";Ċ":10133,"World":10134,"Ġpython":10135,"Ġlif":10136,"Ġtrav":10137,"Ġconven":10138,"company":10139,"ĠClub":10140,"Ver":10141,"Btn":10142,"Ġzone":10143,"products":10144,"ĠEduc":10145,"Ġverify":10146,"ĠMil":10147,"ono":10148,"]);ĊĊ":10149,"ENCE":10150,"Ġpacket":10151,"Ġcer":10152,"Ġenumer":10153,"Ġpars":10154,"formed":10155,"Ġoccup":10156,"tre":10157,"Ġexercise":10158,"Day":10159,"_sum":10160,"Ġasking":10161,"aption":10162,"Ġorders":10163,"Ġspending":10164,"ĠERR":10165,".Dis":10166,"ĠUtil":10167,"âĢľI":10168,"\\'":10169,"?)":10170,"/>Ċ":10171,"Ġemot":10172,"Ġinfluence":10173,"ĠAfrica":10174,"atters":10175,"Ùħ":10176,".session":10177,"Ġchief":10178,"ĉĉĉĉĉĉĉĉĉĉĉ":10179,"Ġtom":10180,"cluded":10181,"serial":10182,"_handler":10183,".Type":10184,"aped":10185,"Ġpolicies":10186,"-ex":10187,"-tr":10188,"blank":10189,"merce":10190,"Ġcoverage":10191,"Ġrc":10192,"_matrix":10193,"_box":10194,"Ġcharges":10195,"ĠBoston":10196,"Pe":10197,"Ġcircum":10198,"Ġfilled":10199,"Ġnorth":10200,"ictureBox":10201,"ĉres":10202,"è®":10203,"Ġtermin":10204,"Ġ[â̦":10205,"IRECT":10206,"Ġber":10207,"Ġ\"../../":10208,"retch":10209,".code":10210,"_col":10211,"ĠGovernment":10212,"Ġargv":10213,"ĠLord":10214,"asi":10215,"Exec":10216,"ĉlet":10217,"vertis":10218,"Ġdiscussion":10219,"enance":10220,"outube":10221,"typeof":10222,"Ġserved":10223,"ĠPut":10224,"ĉx":10225,"Ġsweet":10226,"Before":10227,"ategy":10228,".of":10229,"ĠMaterial":10230,"Sort":10231,"ONT":10232,"igital":10233,"Why":10234,"Ġsust":10235,"Ġç":10236,"abet":10237,"Ġsegment":10238,"Ġ[],Ċ":10239,"ĠMuslim":10240,"ĠfindViewById":10241,"cut":10242,"_TEXT":10243,"ĠMary":10244,"Ġloved":10245,"Ġlie":10246,"ĠJO":10247,"Ġisset":10248,"month":10249,"Ġprime":10250,"ti":10251,"ĠCarol":10252,"Use":10253,"ĠPop":10254,"ĠSave":10255,"Interval":10256,"execute":10257,"dy":10258,"ĠIran":10259,"_cont":10260,"ĉT":10261,"Ġphase":10262,"checkbox":10263,"week":10264,"Ġhide":10265,"Ġtil":10266,"Ġju":10267,"Custom":10268,"burg":10269,"/M":10270,"TON":10271,"Ġquant":10272,"Ġrub":10273,"ixels":10274,"Ġinstalled":10275,"Ġdump":10276,"Ġproperly":10277,"(List":10278,"Ġdecide":10279,"apply":10280,"Has":10281,"Ġkeeping":10282,"Ġcitizens":10283,"Ġjoint":10284,"pool":10285,"Socket":10286,"_op":10287,"Ġweapon":10288,"gnore":10289,"ĠExec":10290,"otten":10291,"ĠMS":10292,"Ġ(-":10293,"ĠReview":10294,"Ġexamples":10295,"Ġtight":10296,"!(":10297,"DP":10298,"ĠMessageBox":10299,"Ġphotograph":10300,"URI":10301,"ét":10302,"low":10303,"ĠGrand":10304,".persistence":10305,"Ġmaintain":10306,"Ġnums":10307,"Ġzip":10308,"ials":10309,"ĠGets":10310,"peg":10311,"ĠBuffer":10312,"~~~~":10313,"rastructure":10314,"ĠPL":10315,"uen":10316,"obby":10317,"sizeof":10318,"Ġpic":10319,"Ġseed":10320,"Ġexperienced":10321,"Ġodd":10322,"Ġkick":10323,"Ġprocedure":10324,"avigator":10325,"-on":10326,",j":10327,"ĠAlthough":10328,"ĠuserId":10329,"accept":10330,"Blue":10331,"IColor":10332,"layer":10333,"available":10334,"Ġends":10335,".table":10336,"Ġdataset":10337,"bus":10338,"Ġexplain":10339,"(pro":10340,"ĠCommittee":10341,"Ġnoted":10342,"]:Ċ":10343,"Dim":10344,"stdio":10345,".\",Ċ":10346,"_source":10347,"ĠWeek":10348,"ĠEdge":10349,"Ġoperating":10350,"Ġeste":10351,"ipl":10352,"agination":10353,"Ġproceed":10354,"Ġanimation":10355,".Models":10356,"ĠWatch":10357,"iat":10358,"Ġoppon":10359,"/A":10360,"Report":10361,"Ġsounds":10362,"_buf":10363,"IELD":10364,"Ġbund":10365,"ĉget":10366,".pr":10367,"(tmp":10368,"Ġkid":10369,">ĊĊĊ":10370,"Ġyang":10371,"NotFound":10372,"ÑĨ":10373,"math":10374,"@gmail":10375,"ĠLIMIT":10376,"redients":10377,"Ġvent":10378,"avigate":10379,"Look":10380,"Ġreligious":10381,"Ġrand":10382,"rio":10383,"(GL":10384,"_ip":10385,"uan":10386,"iciency":10387,"ĠChange":10388,">čĊčĊ":10389,"ĠEntity":10390,"Ġrencontre":10391,"ĠRet":10392,"plan":10393,"én":10394,"BOOL":10395,"uries":10396,"train":10397,"Definition":10398,"============":10399,"zz":10400,"Animation":10401,"ĠOK":10402,"_menu":10403,".bl":10404,"_score":10405,"Ġacad":10406,"(System":10407,"Ġrefresh":10408,"'=>$":10409,".Graphics":10410,"amento":10411,"pid":10412,"tc":10413,"Ġtips":10414,"Ġhomes":10415,"Ġfuel":10416,"âĸ":10417,"_helper":10418,"ĠĠčĊ":10419,"ĠRoom":10420,".Close":10421,"_attr":10422,"ĠMount":10423,"ĠEv":10424,"arser":10425,"_top":10426,"eah":10427,"ĠDelete":10428,"ãĢį":10429,"uke":10430,"Ġusage":10431,"aria":10432,"_dev":10433,"Ġtexture":10434,"Ġconversation":10435,"eper":10436,"Bean":10437,"done":10438,"nonatomic":10439,"ĠSecond":10440,"Ġshooting":10441,"_pre":10442,"Components":10443,"Ġ]ĊĊ":10444,"__,":10445,"stitution":10446,".Char":10447,">();ĊĊ":10448,"Ġpresented":10449,"Ġwa":10450,"oker":10451,"-ĊĊ":10452,"iner":10453,"Ġbecoming":10454,"Ġincident":10455,"Att":10456,"Ġrevealed":10457,"forc":10458,"Ġboot":10459,".page":10460,"Enumerator":10461,"_->":10462,"Photo":10463,"Ġspring":10464,".\",":10465,"ĠDictionary":10466,"BJECT":10467,"Ġlocations":10468,"Ġsamples":10469,"InputStream":10470,"ĠBrown":10471,"Ġstats":10472,"quality":10473,"Ñħ":10474,"-dis":10475,"Ġhelping":10476,"Ġped":10477,"(se":10478,"ĠWho":10479,"alian":10480,"internal":10481,"Ġft":10482,">().":10483,"->{":10484,"Ġmine":10485,"Ġsector":10486,"Ġgro":10487,"Ġopportunities":10488,"Ġü":10489,"Ġmp":10490,"Ġalleged":10491,"Ġdoubt":10492,"Mouse":10493,"About":10494,"_part":10495,"Ġchair":10496,"Ġstopped":10497,"loop":10498,"entities":10499,"Ġapps":10500,"ansion":10501,"Ġmental":10502,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":10503,"FR":10504,"Ġdefend":10505,"care":10506,"Ġideal":10507,"/api":10508,"urface":10509,"Ġele":10510,"ulator":10511,"ĠRights":10512,"anguages":10513,"Ġfunds":10514,"Ġadapt":10515,"Attributes":10516,"Ġdeploy":10517,"opts":10518,"Ġvalidation":10519,"Ġconcerns":10520,"uce":10521,".num":10522,"ulture":10523,"ila":10524,"Ġcup":10525,"Ġpure":10526,".Fore":10527,"ĠHashMap":10528,".valueOf":10529,"asm":10530,"MO":10531,"Ġcs":10532,"Ġstores":10533,"Ġ************************************************************************":10534,"Ġcommunication":10535,"mem":10536,".EventHandler":10537,".Status":10538,"_right":10539,".setOn":10540,"Sheet":10541,"Ġidentify":10542,"enerated":10543,"ordered":10544,"Ġ\"[":10545,"Ġswe":10546,"Condition":10547,"ĠAccording":10548,"Ġprepare":10549,"Ġrob":10550,"Pool":10551,"Ġsport":10552,"rv":10553,"ĠRouter":10554,"Ġalternative":10555,"([]":10556,"ĠChicago":10557,"ipher":10558,"ische":10559,"ĠDirector":10560,"kl":10561,"ĠWil":10562,"keys":10563,"Ġmysql":10564,"Ġwelcome":10565,"king":10566,"ĠManager":10567,"Ġcaught":10568,")}Ċ":10569,"Score":10570,"_PR":10571,"Ġsurvey":10572,"hab":10573,"Headers":10574,"ADER":10575,"Ġdecor":10576,"Ġturns":10577,"Ġradius":10578,"errupt":10579,"Cor":10580,"Ġmel":10581,"Ġintr":10582,"(q":10583,"ĠAC":10584,"amos":10585,"MAX":10586,"ĠGrid":10587,"ĠJesus":10588,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":10589,".DE":10590,"Ġts":10591,"Ġlinked":10592,"free":10593,"ĠQt":10594,"Ġ/**čĊ":10595,"Ġfaster":10596,"ctr":10597,"_J":10598,"DT":10599,".Check":10600,"Ġcombination":10601,"Ġintended":10602,"-the":10603,"-type":10604,"ectors":10605,"ami":10606,"uting":10607,"Ġuma":10608,"XML":10609,"UCT":10610,"Ap":10611,"ĠRandom":10612,"Ġran":10613,".sort":10614,"Ġsorted":10615,".Un":10616,"_PER":10617,"itory":10618,"Ġpriority":10619,"ĠGal":10620,"ĠOld":10621,"hot":10622,"ĠDisplay":10623,"(sub":10624,"_TH":10625,"_Y":10626,"ĠCare":10627,"loading":10628,"Kind":10629,"_handle":10630,",,":10631,"rase":10632,"_replace":10633,".addEventListener":10634,"ĠRT":10635,"Ġentered":10636,"gers":10637,"Ġich":10638,"(start":10639,"/app":10640,"Ġbrother":10641,"Memory":10642,"Outlet":10643,"Ġutf":10644,"prec":10645,"Ġnavigation":10646,"ORK":10647,"Ġdst":10648,"Detail":10649,"Ġaudience":10650,"Ġdur":10651,"Ġcluster":10652,"unched":10653,"Ġ],":10654,"Ġcomfortable":10655,".values":10656,"ĠTotal":10657,"Ġsnap":10658,"Ġstandards":10659,"Ġperformed":10660,"hand":10661,"(\"@":10662,"åŃ":10663,"Ġphil":10664,"ibr":10665,"trim":10666,"Ġforget":10667,"Ġdoctor":10668,".TextBox":10669,"icons":10670,",s":10671,"ĠOp":10672,"Sm":10673,"Stop":10674,"ĉList":10675,"ĉu":10676,"Comment":10677,"_VERSION":10678,".Xtra":10679,"Person":10680,"rb":10681,"LOB":10682,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ":10683,"ĠCentral":10684,"ICK":10685,"raq":10686,"Ġputting":10687,"Ġmd":10688,"ĠLove":10689,"Program":10690,"Border":10691,"oor":10692,"Ġallowing":10693,"after":10694,"Ġentries":10695,"ĠMaybe":10696,"]).":10697,"ĠShort":10698,")\\":10699,".now":10700,"friend":10701,"Ġprefer":10702,"ĠGPIO":10703,"osis":10704,"ĠGameObject":10705,"Ġskip":10706,"Ġcompetition":10707,"_match":10708,"lications":10709,"_CONT":10710,".groupBox":10711,"Ġals":10712,"\"We":10713,"_eq":10714,"lan":10715,"_search":10716,"ĠMusic":10717,"asis":10718,"Ġbind":10719,"ĠIsland":10720,"rum":10721,"(E":10722,"Ġseat":10723,"Video":10724,"Ġack":10725,"reek":10726,"={()":10727,"Ġrating":10728,"Ġrestaurant":10729,"DEX":10730,"(buf":10731,"pping":10732,"uality":10733,"Ġleague":10734,"Ġfocused":10735,"apon":10736,"$data":10737,"CLUD":10738,"CLUDING":10739,"Ġabsolute":10740,"(query":10741,"Ġtells":10742,"Ang":10743,"Ġcommunities":10744,"Ġhonest":10745,"oking":10746,"Ġapart":10747,"arity":10748,"/$":10749,"_module":10750,"ĠEnc":10751,".an":10752,".Config":10753,"Cre":10754,"Ġshock":10755,"ĠArab":10756,"IENT":10757,"/re":10758,"Ġretrie":10759,"ycler":10760,"isa":10761,"ĠOrgan":10762,".graph":10763,"Ġí":10764,"ĠBAS":10765,"Enum":10766,"Ġpossibly":10767,"ÑĢаÐ":10768,"ĠJapanese":10769,"Ġcraft":10770,"ĠPlace":10771,"Ġtalent":10772,"Ġfunding":10773,"Ġconfirmed":10774,"Ġcycle":10775,"/x":10776,"GE":10777,"Ġhearing":10778,"Ġplants":10779,"Ġmouth":10780,"pages":10781,"oria":10782,"ĠRemove":10783,"_total":10784,"Ġod":10785,"ollapse":10786,"door":10787,"Ġbought":10788,"Ġaddr":10789,"ARCH":10790,"_dim":10791,"dden":10792,"Ġdecades":10793,"REQUEST":10794,"Ġversions":10795,"fire":10796,"Ġmoves":10797,"fb":10798,"Ġcoffee":10799,".connect":10800,"ĠRow":10801,"Ġschema":10802,"Scope":10803,"-Type":10804,"Ġfighting":10805,"Ġretail":10806,"Ġmodified":10807,"TF":10808,"Files":10809,"nie":10810,"_command":10811,"stone":10812,"ĠÑĤ":10813,"_thread":10814,"Ġbond":10815,"ĠDevelopment":10816,"Ġpt":10817,"FORM":10818,"plet":10819,"Ġidentified":10820,"cpp":10821,"Ġcoding":10822,"oked":10823,"ĠMaster":10824,"IDTH":10825,"Ġresidents":10826,"redit":10827,"ĠPhoto":10828,"=-":10829,"unte":10830,"ateur":10831,"_STATE":10832,"ĠSing":10833,"Ġsheet":10834,".val":10835,"orse":10836,"Ġhers":10837,"Ġdetermined":10838,"Common":10839,"Ġwed":10840,"_queue":10841,"PH":10842,"ĠAtl":10843,"cred":10844,"/LICENSE":10845,"Ġmes":10846,"Ġadvanced":10847,".java":10848,".Sh":10849,"Go":10850,"kill":10851,"fp":10852,"_settings":10853,"Ġpal":10854,"Ġtruck":10855,"Ġcombined":10856,"Ġ\"${":10857,"ĠCorpor":10858,"Ġjoined":10859,"ĠJose":10860,"ĠCup":10861,"uns":10862,"estival":10863,"levision":10864,"Ġbroken":10865,"Ġmarriage":10866,"ĠWestern":10867,"Ġrepresents":10868,"ĠTitle":10869,"Ġss":10870,".Ass":10871,"ongoose":10872,"iento":10873,"<>();Ċ":10874,"Ġabsolutely":10875,"Ġsmooth":10876,"TERN":10877,"ĠUnless":10878,"Word":10879,"Ġmerge":10880,"igan":10881,"ĠVol":10882,"Ġnn":10883,".getId":10884,"Ġз":10885,"Ġsexy":10886,"Ġseeking":10887,"Single":10888,".this":10889,"Ġkom":10890,"bound":10891,";\"":10892,"ĠfontSize":10893,"_df":10894,"Ġinjury":10895,"(H":10896,"Ġissued":10897,"_END":10898,":self":10899,"Ġpatch":10900,"Ġleaves":10901,"Ġadopt":10902,"FileName":10903,"ãĢIJ":10904,"Ġexecutive":10905,"ĠByte":10906,"]))Ċ":10907,"Ġnu":10908,"outing":10909,"cluding":10910,"-R":10911,".options":10912,"Ġsubstant":10913,"avax":10914,"ĠBUT":10915,"Ġtechnical":10916,"Ġtwice":10917,"Ġmás":10918,"Ġunivers":10919,"yr":10920,"Ġdrag":10921,"ĠDC":10922,"Ġsed":10923,"Ġbot":10924,"ĠPal":10925,"ĠHall":10926,"forcement":10927,"Ġauch":10928,".mod":10929,"notation":10930,"_files":10931,".line":10932,"_flag":10933,"[name":10934,"Ġresolution":10935,"Ġbott":10936,"(\"[":10937,"ende":10938,"(arr":10939,"Free":10940,"(@\"":10941,"ĠDistrict":10942,"PEC":10943,":-":10944,"Picker":10945,"ĠJo":10946,"ĠĠĠĠĠĊ":10947,"ĠRiver":10948,"_rows":10949,"Ġhelpful":10950,"Ġmassive":10951,"---Ċ":10952,"Ġmeasures":10953,"ĠRuntime":10954,"Ġworry":10955,"ĠSpec":10956,"ĉD":10957,"ãĢij":10958,"Ġ){Ċ":10959,"Ġworse":10960,"(filename":10961,"Ġlay":10962,"Ġmagic":10963,"ĠTheir":10964,"oul":10965,"stroy":10966,"ĠWhere":10967,"Ġsudden":10968,"Ġdefe":10969,"Ġbinding":10970,"Ġflight":10971,"ĠOnInit":10972,"ĠWomen":10973,"ĠPolicy":10974,"Ġdrugs":10975,"ishing":10976,"('../":10977,"ĠMel":10978,"peat":10979,"tor":10980,"Ġproposed":10981,"Ġstated":10982,"_RES":10983,"Ġeast":10984,"ĠCONDITION":10985,"_desc":10986,"Ġwinning":10987,"folio":10988,"Mapper":10989,"ĠPan":10990,"ĠAnge":10991,".servlet":10992,"Ġcopies":10993,"LM":10994,"Ġvm":10995,"åį":10996,"Ġdictionary":10997,"Seg":10998,"elines":10999,"ĠSend":11000,"Ġiron":11001,"ĠFort":11002,".domain":11003,"Ġdebate":11004,"NotNull":11005,"eq":11006,"acher":11007,"lf":11008,"ĉfmt":11009,"Ġlawy":11010,"ÄŁ":11011,"ĠMen":11012,"Ġtrim":11013,"(NULL":11014,"Ġ!!":11015,"Ġpad":11016,"Ġfollows":11017,"\"][\"":11018,"requ":11019,"ĠEp":11020,".github":11021,"(img":11022,"eto":11023,"('\\":11024,"Services":11025,"umbnail":11026,"_main":11027,"pleted":11028,"fortunately":11029,"Ġwindows":11030,"Ġplane":11031,"ĠConnection":11032,".local":11033,"uard":11034,"}\\":11035,"==\"":11036,"andon":11037,"ĠRoy":11038,"west":11039,"iginal":11040,"emies":11041,"itz":11042,"'):Ċ":11043,"ĠPeter":11044,"Ġtough":11045,"Ġreduced":11046,"Ġcalculate":11047,"Ġrapid":11048,"customer":11049,"Ġefficient":11050,"Ġmedium":11051,"Ġfell":11052,".ref":11053,"ĠCas":11054,"Ġfeedback":11055,"Speed":11056,"(output":11057,"aje":11058,"Ġcategories":11059,"Ġfee":11060,"};":11061,"Ġdeleted":11062,"reh":11063,"Ġproof":11064,"Desc":11065,"Build":11066,"Ġsides":11067,".ArrayList":11068,"-%":11069,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":11070,"ر":11071,".match":11072,"ли":11073,"Ġfeels":11074,"Ġachieve":11075,"Ġclim":11076,"_ON":11077,"ĠCD":11078,"Ġteacher":11079,"_current":11080,"bn":11081,"_PL":11082,"isting":11083,"Enable":11084,"GEN":11085,"Ġtv":11086,"Ġsock":11087,"Ġplays":11088,"Ġdiscount":11089,"ĠKE":11090,"ĠDebug":11091,"Fore":11092,"ĠIraq":11093,"Ġappearance":11094,"Mon":11095,"Ġstyled":11096,"ĠHuman":11097,"iot":11098,"ĠHistory":11099,"Ġsac":11100,"ĠCollection":11101,"Ġrecommended":11102,".Selected":11103,"Ġorganizations":11104,"Ġdiscovered":11105,"cohol":11106,"adas":11107,"ĠThomas":11108,"May":11109,"Ġconserv":11110,"Ġdomin":11111,"ĠFollow":11112,"ĠSection":11113,"ĠThanks":11114,"Username":11115,"Ġrecipe":11116,"Ġwonderful":11117,".sleep":11118,"_if":11119,"ĉĊĉĊ":11120,"orno":11121,"Ġru":11122,"_target":11123,".\"\"":11124,"à¦":11125,"EventArgs":11126,"Ġinputs":11127,"Ġfif":11128,"Ġvision":11129,"cy":11130,"ĠSeries":11131,")(((":11132,"Ġtrading":11133,"Ġmarker":11134,"Begin":11135,"Ġtypically":11136,"Ġcauses":11137,"dropdown":11138,"_DEBUG":11139,"Ġdetect":11140,"country":11141,"!\");Ċ":11142,"ĉR":11143,"appy":11144,"Ġcref":11145,"('<":11146,"\"=>":11147,"ĠLE":11148,"reader":11149,"Ġadministr":11150,"õ":11151,"ucket":11152,"Ġfashion":11153,".char":11154,"izar":11155,"Ġdisable":11156,"Ġsuc":11157,"ĠLive":11158,"issue":11159,"Ġmetadata":11160,"flags":11161,"ĠðŁ":11162,"Ġcommitted":11163,"Ġva":11164,"Ġrough":11165,"Ġ'''Ċ":11166,"Ġhighlight":11167,"_vars":11168,"VO":11169,"Ġencoding":11170,"-Z":11171,"_sign":11172,"$(\"#":11173,"Ġrain":11174,"reatest":11175,"ĠEND":11176,"Selection":11177,"Ġcandidates":11178,"Ġsav":11179,".Empty":11180,"Ġdecisions":11181,"Ġcollabor":11182,"ridge":11183,"feed":11184,"ression":11185,"Ġpersons":11186,"VM":11187,"ega":11188,"_BIT":11189,"According":11190,"acked":11191,"Ġdollars":11192,"_loss":11193,"ĠCost":11194,"}\"Ċ":11195,"Notification":11196,"Ġprostit":11197,"Ġauthority":11198,".rec":11199,"Ġspokes":11200,"ĠToday":11201,"istant":11202,"ĠHead":11203,"âĢĿ.":11204,"ertainment":11205,"cean":11206,"culate":11207,"Ġven":11208,"However":11209,"_arr":11210,"Ġtokens":11211,"Graph":11212,"ĠJud":11213,"ĠVirgin":11214,"ĠSerial":11215,"unning":11216,"Mutable":11217,"agers":11218,".csv":11219,"Ġdeveloping":11220,"Ġinstructions":11221,"Ġpromise":11222,"Ġrequested":11223,"_encode":11224,"/\"":11225,"ĠIcon":11226,"uilt":11227,"-day":11228,"Ġintelligence":11229,".IS":11230,"ĠObservable":11231,"ĠHard":11232,"Bool":11233,"idential":11234,".Anchor":11235,"Ġselling":11236,"CI":11237,"AGES":11238,"tle":11239,"bur":11240,"UFFER":11241,"RY":11242,"Ġbigger":11243,"Ġrat":11244,"Ġfamous":11245,"Ġtypename":11246,"Ġexplained":11247,"}}Ċ":11248,"Ġnuclear":11249,"-N":11250,"Ġcrisis":11251,"ĠEnter":11252,"Ġanswers":11253,"/${":11254,"/pl":11255,"Ġsequ":11256,"_next":11257,"mask":11258,"Ġstanding":11259,"Ġplenty":11260,"ĠCross":11261,"ĉret":11262,"dro":11263,"ĠCast":11264,"=true":11265,"ĠChris":11266,"icio":11267,"ĠMike":11268,"Decimal":11269,"addComponent":11270,"Len":11271,"Ġcock":11272,"Ġ#{":11273,"URN":11274,"":11403,"Ġ*=":11404,"ĠPS":11405,"Ġdangerous":11406,"[p":11407,"OME":11408,"Other":11409,"ĠStringBuilder":11410,"Points":11411,"heading":11412,"Ġcurrency":11413,"Ġpercentage":11414,"_API":11415,"Ġclassic":11416,"thead":11417,"ĠMO":11418,"FE":11419,"Idx":11420,"await":11421,"Ġè":11422,"Ġaccident":11423,"Ġvariant":11424,"Ġmyst":11425,"ĠLand":11426,"ĠBre":11427,"Ġharm":11428,"ĠAcc":11429,"Ġcharged":11430,"iones":11431,"Visibility":11432,"arry":11433,"ĠLanguage":11434,"Ġwalking":11435,"\".ĊĊ":11436,"ifer":11437,"Ġleadership":11438,".From":11439,"ynam":11440,"Ġtimestamp":11441,"ipt":11442,"ĠHas":11443,"REFER":11444,"ĠIts":11445,"Ġlistener":11446,"UTE":11447,"_description":11448,"Ġexperiences":11449,"Ġcreates":11450,"RS":11451,"cart":11452,"black":11453,"Ġchoices":11454,"war":11455,"Ġ'''":11456,"Ġordered":11457,"Ġevening":11458,"Ġpil":11459,"Ġtun":11460,"ĠBad":11461,"(app":11462,"random":11463,"Ġexplicit":11464,"Ġarrived":11465,"Ġfly":11466,"Ġeconom":11467,"-mail":11468,"Ġlists":11469,"Ġarchitect":11470,"ĠPay":11471,"Ġds":11472,"ĠSol":11473,"Ġvehicles":11474,"Hz":11475,"-com":11476,"Ġking":11477,"_equal":11478,"ĠHelp":11479,"Ġabuse":11480,"--;Ċ":11481,"Ġextr":11482,"Ġchemical":11483,"ä¿":11484,"Ġorient":11485,"Ġbreath":11486,"ĠSpace":11487,"(element":11488,"wait":11489,"DED":11490,"igma":11491,"Ġentr":11492,"Ġsob":11493,"-name":11494,"Ġaffected":11495,"ika":11496,"Ġcoal":11497,"_work":11498,"Ġhundreds":11499,"Ġpolitics":11500,"subject":11501,"Ġconsumer":11502,"ANGE":11503,"Ġrepeated":11504,"Send":11505,"Ġ#[":11506,"Ġprotocol":11507,"Ġleads":11508,"useum":11509,"Every":11510,"Import":11511,"(count":11512,"Ġchallenges":11513,"Ġnovel":11514,"Ġdepart":11515,"bits":11516,".Current":11517,"Ġ`${":11518,"oting":11519,"(\\":11520,"Ġcreative":11521,"Ġbuff":11522,"Ġintroduced":11523,"usic":11524,"modules":11525,"Are":11526,"-doc":11527,"language":11528,"_cache":11529,"Ġtod":11530,"?>{{":11764,"ĠResource":11765,"ĠStandard":11766,"ĠPrem":11767,"updated":11768,"ivalent":11769,"Ġassets":11770,"_temp":11771,"Ġinterests":11772,"Ġhardware":11773,"ĠRom":11774,"ĠShare":11775,"Ġ''Ċ":11776,"Ġ*,":11777,"ĠTake":11778,"ĠImages":11779,"_CHECK":11780,"(typeof":11781,"ĠJun":11782,"\\<^":11783,"Ġliqu":11784,"Ġworst":11785,"ymbols":11786,"ĉĉĉĠĠĠ":11787,"Ġdrivers":11788,"ĠDocument":11789,"eno":11790,"ĠTechnology":11791,"Ġapproved":11792,"umps":11793,"Ġsnow":11794,"formance":11795,"_ASSERT":11796,"uits":11797,"ÙĨ":11798,"Ġdifferences":11799,".Visible":11800,"ĉĉĉčĊ":11801,"ĠPs":11802,"_fetch":11803,"Ġtodo":11804,".',Ċ":11805,"Ġsel":11806,"urers":11807,"invalid":11808,"Ġtweet":11809,"VEL":11810,"Ġresearchers":11811,"Ġsprintf":11812,"ĠRO":11813,"Ġpel":11814,".Trans":11815,"Ġillegal":11816,"dialog":11817,"smarty":11818,"lg":11819,"_MIN":11820,"Ġhero":11821,"final":11822,"Ġpp":11823,".Le":11824,"Ġci":11825,"ĉRT":11826,"Ġsuggested":11827,"pdf":11828,"aching":11829,"ĠRo":11830,"ĠProperties":11831,"ĠSi":11832,"Ġbuying":11833,"Ġmu":11834,"Ġlands":11835,"ifiers":11836,"ĠFILE":11837,"ROUP":11838,"Ġholder":11839,"ĠSon":11840,"Ġsympt":11841,".route":11842,")?":11843,"Ġargc":11844,"Ġfort":11845,"Ġcasino":11846,"_category":11847,"Ġforum":11848,"prefix":11849,"apture":11850,"Tube":11851,"ems":11852,"imize":11853,"Ġnue":11854,"aus":11855,"course":11856,"ATOR":11857,"()),":11858,"Advertis":11859,"INGS":11860,"Ġacknow":11861,"ĠKorea":11862,"pling":11863,"Ġworker":11864,"PLIED":11865,"hal":11866,"ĠRichard":11867,"Elements":11868,"ĉĉĉĠ":11869,"star":11870,"Ġrelationships":11871,"Ġcheap":11872,"ACH":11873,"ĠXML":11874,",&":11875,"ĠLouis":11876,"Ġride":11877,"_FAIL":11878,"Ġchunk":11879,"[s":11880,"_OUT":11881,"Ġchosen":11882,"_[":11883,"/(":11884,"ĠJeff":11885,"_sl":11886,"priv":11887,"ĠCanadian":11888,"Ġunable":11889,"_FLAG":11890,"Ġnos":11891,"high":11892,"Ġlift":11893,"fun":11894,"(){":11895,"elly":11896,"yclerView":11897,"_as":11898,"_LIST":11899,"Ġradi":11900,".getValue":11901,"ĠAngeles":11902,"ĠSpan":11903,"_instance":11904,"itors":11905,"Ġmigration":11906,"AK":11907,"Oh":11908,"®":11909,".selected":11910,"ĠGT":11911,"Ġadvance":11912,"ĠStyle":11913,".DataGridView":11914,"ection":11915,"Ñİ":11916,"pio":11917,"rog":11918,"Ġshopping":11919,"ĠRect":11920,"Illuminate":11921,"OU":11922,"ĉarray":11923,"Ġsubstantial":11924,"Ġpregn":11925,"Ġpromote":11926,"IEW":11927,".Layout":11928,"Ġsigns":11929,"/.":11930,"Ġletters":11931,"Board":11932,"ctrl":11933,"\"\\":11934,"ĠJones":11935,"Ġvertex":11936,"Ġja":11937,"Ġaffili":11938,"Ġwealth":11939,"ĉdefault":11940,"Ġsignificantly":11941,"Ġec":11942,"Ġxs":11943,"actual":11944,".per":11945,"_step":11946,"anvas":11947,"mac":11948,"Ġtransl":11949,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":11950,"Iterator":11951,"Ġoch":11952,"agnostic":11953,"ĠDuring":11954,"ĠDEFAULT":11955,"Ġtill":11956,"Ġsignature":11957,"Ġbird":11958,"ĠOl":11959,"ĠIr":11960,"HS":11961,"avatar":11962,"ESSAGE":11963,"Ġelev":11964,"Ġmt":11965,"ĠNav":11966,"Ġrelax":11967,"Ġplate":11968,"ITEM":11969,"(date":11970,".not":11971,"Ġgrade":11972,"Ġ}),Ċ":11973,"?\"ĊĊ":11974,"iences":11975,"High":11976,"ĠDIS":11977,"disabled":11978,"QUI":11979,"Ġnoise":11980,"aux":11981,"ĠUP":11982,"osa":11983,"Ġvoc":11984,"Ġ))":11985,"ocom":11986,"_OFF":11987,"ĠDb":11988,"Lock":11989,".eclipse":11990,",d":11991,"ĠDraw":11992,"Ġ\"(":11993,"Ġvisited":11994,"ĠâĪ":11995,"Ġsucceed":11996,"Ġimpossible":11997,"aire":11998,"ĠTurn":11999,"Ġdish":12000,"FG":12001,"Ġsensor":12002,"ANN":12003,"aba":12004,"Ġsurg":12005,"]);čĊ":12006,"Ġfp":12007,"_an":12008,"-J":12009,"-G":12010,"ĠJob":12011,"Convert":12012,"ĠKEY":12013,"Ġauthors":12014,"_server":12015,"\\r":12016,"Ġ-*-":12017,"flex":12018,"Ġsoc":12019,"Ret":12020,"Ġsalt":12021,"Ġâ̦ĊĊ":12022,"ĠClear":12023,"(page":12024,"-danger":12025,"Ġrooms":12026,"conv":12027,"#{":12028,".op":12029,"ĠArea":12030,"_SC":12031,"hen":12032,"Ġbegins":12033,"-y":12034,"Ġexcited":12035,"Ġignored":12036,"Ġbonus":12037,"student":12038,"ĠMember":12039,"Ġrelatively":12040,"ĠLow":12041,"ĠProdu":12042,"ateway":12043,"posure":12044,"Ġthick":12045,"aniel":12046,"(view":12047,"ĠCrush":12048,"Extension":12049,"Il":12050,"eed":12051,"LOC":12052,".im":12053,".Items":12054,"Ġconflict":12055,".prevent":12056,"ĠonCreate":12057,"uv":12058,"iser":12059,"Ġwave":12060,"Mar":12061,"ĠCommunity":12062,"iche":12063,"ĠNothing":12064,"[m":12065,"ĠLee":12066,"riends":12067,"ère":12068,"!!!":12069,"anz":12070,".result":12071,"ĠSK":12072,"_PARAM":12073,"Ġdemocr":12074,"BackColor":12075,".exists":12076,"\"It":12077,"(options":12078,"razy":12079,"aser":12080,"\\Database":12081,"alendar":12082,"_ass":12083,";}Ċ":12084,"vertex":12085,"inecraft":12086,"Warning":12087,"argo":12088,"Ġactor":12089,"ĠInstead":12090,"ĠUsing":12091,"Self":12092,"@interface":12093,"Ġspeaking":12094,"ĠParis":12095,"ĠLICENSE":12096,".node":12097,"ĠFood":12098,"EIF":12099,"ĠBi":12100,".Start":12101,"ĠIB":12102,"Ġuniversity":12103,"ĠHeader":12104,".product":12105,"Copy":12106,"etc":12107,"rical":12108,"Ġ>>>":12109,"books":12110,"Ġalgorithm":12111,"Ġ'__":12112,"(javax":12113,"Ġnumerous":12114,"Share":12115,"Have":12116,"Ġrecru":12117,"Ġprove":12118,".substring":12119,"health":12120,"ел":12121,"Ġdecimal":12122,"Ġcommission":12123,"scription":12124,"xC":12125,"Ġsummary":12126,"atted":12127,"Ġcloser":12128,"finished":12129,"()){Ċ":12130,"ĠWood":12131,"_fields":12132,"ku":12133,"_items":12134,"Flag":12135,"Ġconfidence":12136,"ĠFederal":12137,"dux":12138,"Ġcompat":12139,"Ġvertical":12140,"й":12141,"ès":12142,";\">Ċ":12143,"_manager":12144,"()))Ċ":12145,"IDE":12146,":\",":12147,"__Ċ":12148,"ĠWay":12149,"ÑĪ":12150,"Temp":12151,"ĠSTR":12152,"ritten":12153,"Sync":12154,"ĠAV":12155,"ĠCEO":12156,"ĠGuid":12157,"Ġenvironmental":12158,"Ġcorresponding":12159,"ĉconsole":12160,"Ġjustice":12161,"ĠJS":12162,"Ġlived":12163,"gar":12164,"ĠGraph":12165,"ĠStat":12166,"ĠiPhone":12167,".al":12168,"ĠHD":12169,"Ġoccur":12170,"Ġthreshold":12171,"Ġonclick":12172,"REG":12173,".GraphicsUnit":12174,"Meta":12175,"ž":12176,"Ġcum":12177,".gnu":12178,"ë":12179,"Ġobtained":12180,"Ġcomplaint":12181,"Ġeating":12182,"Ġtar":12183,"_task":12184,"Ġopts":12185,"(to":12186,"Pass":12187,"Ġplastic":12188,"tility":12189,"ĠWin":12190,".preventDefault":12191,"pile":12192,"ĠGar":12193,"Ġquantity":12194,"_last":12195,"Ġgreatest":12196,"Dao":12197,"_DIS":12198,"ĠUsed":12199,"ĠHP":12200,"riting":12201,"SION":12202,"blue":12203,"domain":12204,"Ġscores":12205,"Normal":12206,"_admin":12207,"ĠASSERT":12208,"Then":12209,"***":12210,"dist":12211,"lon":12212,"Ġhate":12213,"shal":12214,"ImageView":12215,"database":12216,"Ġpand":12217,"Ġlogic":12218,"=false":12219,"bg":12220,"ĠConfiguration":12221,"Ġnur":12222,"OG":12223,"Ġmarried":12224,":+":12225,"Ġdropped":12226,"Ġregistration":12227,"ом":12228,"ultiple":12229,"izers":12230,"shape":12231,".copy":12232,"Ġwearing":12233,"ĠCath":12234,"Ġdedicated":12235,"Ġ...Ċ":12236,"Ġadvoc":12237,"ĠFamily":12238,"Ġstatements":12239,"ematic":12240,"ampionship":12241,"Ġmotiv":12242,"ĠHave":12243,"Ġblow":12244,"Job":12245,"cert":12246,"_vector":12247,"install":12248,"ĠCOPY":12249,"embed":12250,"DIR":12251,"ĠSpring":12252,"Ġexhib":12253,"cdn":12254,"ĠComment":12255,"ĠOptional":12256,".player":12257,"ĠDark":12258,"(pos":12259,"ĠShould":12260,"Ġcentre":12261,"ĠGuard":12262,"ów":12263,"Ġtrouble":12264,"ENER":12265,"(unsigned":12266,"_service":12267,"Ġns":12268,"uling":12269,"ĠMexico":12270,"ĠNY":12271,"mysql":12272,"Ġlic":12273,"åľ":12274,"Mr":12275,"-fl":12276,"ĠCustomer":12277,"idi":12278,"Ġ?>ĊĊ":12279,"rible":12280,"ĠпÑĢ":12281,"Ġsizes":12282,"_STRING":12283,"validation":12284,"ĠJon":12285,"(Http":12286,"addClass":12287,"Nodes":12288,"Ġfragment":12289,"Ġspoke":12290,"Ġwaste":12291,"Join":12292,"Ġillustr":12293,"eli":12294,"cient":12295,"Ġaid":12296,"Ġprosec":12297,"'){Ċ":12298,"Ġpassing":12299,"Ġfaces":12300,"Shape":12301,"_Z":12302,"iti":12303,"Ġalle":12304,"Ġrobot":12305,"ĠĠĠĠĠĠĠĊ":12306,"ĠSpe":12307,"Ġreceiving":12308,"ĠDetails":12309,"Ġ\")":12310,"mg":12311,"_REF":12312,"Ġcomparison":12313,"*,":12314,"ĠFound":12315,"_session":12316,"(U":12317,"/F":12318,"Ġxxx":12319,"Network":12320,"ders":12321,"Ġcapture":12322,"Ġcorre":12323,"ĠLtd":12324,"ĠAdv":12325,"[@":12326,"Ġclip":12327,"Mill":12328,"ĠProfile":12329,"Ġendif":12330,"Ġoblig":12331,"describe":12332,".element":12333,"riterion":12334,"LD":12335,"ered":12336,"Ġfavour":12337,"score":12338,"ĠFilter":12339,"attributes":12340,"Ġchecks":12341,"Inflater":12342,"ĠPlus":12343,"Ġscientific":12344,"Ġprivacy":12345,"Head":12346,"Ġfeat":12347,"Ġdegrees":12348,"ĠPale":12349,";\">":12350,"Ġfilms":12351,"ĠAudio":12352,"ĠTag":12353,"ĠEnergy":12354,"itar":12355,"parator":12356,"Ġfellow":12357,"Ġevt":12358,"ĠTri":12359,"ĠDAM":12360,"cloud":12361,"ĠPassword":12362,"ĠDemocrats":12363,"ĠAcad":12364,"$lang":12365,"Ġreb":12366,"())ĊĊ":12367,"нÑĭ":12368,"ĠBur":12369,"readcr":12370,"Ġhex":12371,"Console":12372,"ctl":12373,"ousel":12374,"ĠWilliam":12375,"Ġaz":12376,"_PORT":12377,"Ġpractices":12378,"Ġanywhere":12379,"ĠPosition":12380,"Ġ->Ċ":12381,"iams":12382,".username":12383,"placeholder":12384,"Ġoder":12385,"ĠSecretary":12386,"ĠiT":12387,"mond":12388,"events":12389,"?âĢĿ":12390,".Sub":12391,"Ġattached":12392,"Ġnão":12393,"Ġestate":12394,".action":12395,"Ġfigures":12396,"Ġ});čĊ":12397,"Ġsubscri":12398,".tag":12399,"nam":12400,".plot":12401,"noon":12402,"liament":12403,"Character":12404,".tab":12405,"Ġwinter":12406,"ĠVariable":12407,"Ġtrees":12408,"Ġproud":12409,"(V":12410,"_load":12411,"Ġhier":12412,"ĠEcon":12413,"Ġfd":12414,"Ġvictims":12415,"Rest":12416,"iana":12417,"Ġfake":12418,".Println":12419,"Ġstrlen":12420,"Ġsad":12421,"Ġble":12422,"Prot":12423,"Ġbuttons":12424,"Ġtelevision":12425,"Ġlogo":12426,"extension":12427,"ĉj":12428,"stein":12429,"aciones":12430,"Ġ\"\"\"ĊĊ":12431,"Ġsimp":12432,"Ġrecorded":12433,"Ġbrings":12434,"Ġprincipal":12435,"Ġfees":12436,"(source":12437,"kdir":12438,"Ġutils":12439,"Ġcorrectly":12440,"fil":12441,"Ġwel":12442,"Pair":12443,"-button":12444,"scale":12445,"verify":12446,"[c":12447,"Ġ---":12448,"Ġescape":12449,"ikes":12450,"LowerCase":12451,"ician":12452,"Ġchapter":12453,"ĠTYPE":12454,"Ġshadow":12455,"Ġawesome":12456,"WE":12457,"elif":12458,"Ġlambda":12459,"Ġdistinct":12460,"Ġbare":12461,"-off":12462,"Ġcolour":12463,".appendChild":12464,"olec":12465,"aga":12466,".fill":12467,"ĉsuper":12468,"Ġadj":12469,"(position":12470,".getItem":12471,"Short":12472,"Ġtotally":12473,"VD":12474,"ĠTre":12475,"_ep":12476,"vements":12477,"ĠSolution":12478,"Ġfundament":12479,"Follow":12480,"Ġfacility":12481,"Ġhappening":12482,"OF":12483,".textBox":12484,"Span":12485,"Ġ«":12486,"iden":12487,"Ġexceed":12488,"(parent":12489,"Ġcp":12490,"ç»":12491,"Ġhasn":12492,"Ġpri":12493,"Ġconsequ":12494,"nen":12495,"ĠINTO":12496,"Ignore":12497,"ĠFuture":12498,"Ġcarbon":12499,"ĠSteel":12500,"fmt":12501,"okie":12502,"Ġspl":12503,"(title":12504,"-info":12505,"Ġdeals":12506,"Ġfixture":12507,"ea":12508,"Div":12509,"Ġtested":12510,"_return":12511,")ĊĊĊĊ":12512,"upported":12513,"ĠCook":12514,"Ġpaying":12515,"ĠIll":12516,"Ġarrested":12517,"ĠPrime":12518,"_callback":12519,">,Ċ":12520,"driver":12521,"Once":12522,"abb":12523,"_bytes":12524,"ĠSets":12525,"(Object":12526,"Ġcc":12527,"Ġshell":12528,"alo":12529,");//":12530,"(log":12531,"ctors":12532,")":13004,"Ġ$(\".":13005,".pos":13006,"Ġboys":13007,"Ġwedding":13008,"Ġagents":13009,"=\"_":13010,"ĠArmy":13011,"Ġhint":13012,"vision":13013,"Ġtech":13014,"ĠConnect":13015,"Ġlegend":13016,"ĠBet":13017,".Base":13018,"Subject":13019,"Ġlit":13020,"Remove":13021,"Ġ\":":13022,"ĠFinal":13023,"pearance":13024,"ĠiTunes":13025,"Ġparticipants":13026,"ĠPython":13027,"Ġbusy":13028,"iel":13029,"vertices":13030,"ĠtemplateUrl":13031,"ĠClose":13032,"Img":13033,"ĠCorporation":13034,"timestamp":13035,"Ġextend":13036,"Ġwebsites":13037,"Ġpossibility":13038,"оÑĤ":13039,"Ġkö":13040,"Ġmeat":13041,"Ġrepresentation":13042,"Ġĉĉ":13043,"_START":13044,".apply":13045,"ĠValley":13046,"ĠSuccess":13047,"Hi":13048,"Ġnob":13049,"ĠIEnumerable":13050,"_select":13051,"geo":13052,".\")Ċ":13053,"Ġturning":13054,"Ġfabric":13055,"(\"\");Ċ":13056,"Ġperspective":13057,"éĹ":13058,"ĠSn":13059,"Thank":13060,";j":13061,".Parameters":13062,"ĉĠĠĠĠĠĠĠĠĠĠĠ":13063,"Ġfacts":13064,"Ġunt":13065,".instance":13066,"################################################################":13067,"-end":13068,"ĠJOIN":13069,"ĠHen":13070,"Ġuri":13071,"åIJį":13072,"Ġна":13073,"ĠInfo":13074,"Ġconducted":13075,"ĠÃ¥":13076,"OURCE":13077,"Ġwine":13078,"John":13079,".Errorf":13080,"ĠAge":13081,"ounded":13082,"Ġrealize":13083,"Ġ];":13084,"Ġsubsequ":13085,",m":13086,"(User":13087,"iano":13088,"Ġaccompl":13089,"isp":13090,".std":13091,"éĩ":13092,"ĠBed":13093,".setAttribute":13094,"BR":13095,"keep":13096,"ĠALL":13097,"Ġisol":13098,"amma":13099,"Package":13100,"Ġoccasion":13101,"-success":13102,"ед":13103,"ĠLIMITED":13104,"strip":13105,"()ĊĊĊ":13106,"istribution":13107,"Colors":13108,"Ġ+:+":13109,"DidLoad":13110,"aler":13111,"Ġtid":13112,"ĠLED":13113,"ĠLinked":13114,"ĠCart":13115,"())čĊ":13116,"_READ":13117,"Ġkilling":13118,"ĠPHP":13119,"fection":13120,"Ġinstances":13121,"cv":13122,"\"/>":13123,"Ġsf":13124,"Ġtaxes":13125,"_location":13126,"ĠBitcoin":13127,"uable":13128,"rank":13129,"ignore":13130,"track":13131,"ка":13132,"Ġshouldn":13133,"ĠOP":13134,"=>{Ċ":13135,"Ġkm":13136,"Ġhelper":13137,"_head":13138,"ĠWhether":13139,"oco":13140,"_bl":13141,"Ġstatistics":13142,"Ġbeauty":13143,"Ġtog":13144,"tip":13145,"ëĭ¤":13146,"Ġcsv":13147,"(sql":13148,"stdlib":13149,"weak":13150,"Ġlikes":13151,"Äį":13152,"Ġrepeat":13153,"Ġapartment":13154,"Ġemph":13155,"_edit":13156,"Ġvit":13157,"ĉtype":13158,"Even":13159,"uten":13160,"Ġcircumstances":13161,"bian":13162,"Ġsugar":13163,"Windows":13164,"ìŀ":13165,"Ġobserved":13166,"/data":13167,"Ġcalendar":13168,"Ġstrike":13169,"ĠRES":13170,"_sc":13171,"fony":13172,"orem":13173,"(z":13174,"power":13175,"etect":13176,"ĠSat":13177,".description":13178,"Ġgang":13179,"ĠSports":13180,"ongs":13181,"ĠBundle":13182,".sum":13183,"once":13184,"Ġaccused":13185,"Ġexplore":13186,"Ġapproximately":13187,"Ġlosing":13188,"thesis":13189,"ĠFund":13190,"Ġdiagn":13191,"Autowired":13192,"properties":13193,"Ġ_.":13194,"Ġcnt":13195,"cedure":13196,"Ġyy":13197,"Ġgrant":13198,"sock":13199,".innerHTML":13200,"Ġ]);Ċ":13201,"ĠCONFIG":13202,"='$":13203,"]];Ċ":13204,"UND":13205,"Ġglob":13206,"Ġdire":13207,"uffle":13208,"_MEM":13209,"Ġauthentic":13210,">(\"":13211,"Ġdecade":13212,"ĠImport":13213,"Ġoriginally":13214,"ĠjQuery":13215,"Ġindicate":13216,"Ġourselves":13217,"Sw":13218,".lbl":13219,"enerate":13220,"Ġbasically":13221,"ĠHom":13222,"Ġ+#+":13223,"ĠBritain":13224,"ĠKar":13225,"toEqual":13226,".stop":13227,"Ġmodal":13228,"isi":13229,"Ġsuggests":13230,"Ġdtype":13231,"Ġtur":13232,"bf":13233,"Ġconnections":13234,"ĠBefore":13235,"isted":13236,"mouse":13237,"Ġpulled":13238,".build":13239,"Ġlegislation":13240,"Ġforth":13241,"pad":13242,"ego":13243,".Now":13244,"Ġexciting":13245,"}ĊĊĊĊ":13246,"Ġcompr":13247,"Ġshares":13248,"Ġrig":13249,"green":13250,"_vec":13251,"Ġenumerate":13252,"Auto":13253,"icator":13254,"ĠRay":13255,"asse":13256,"Ġholiday":13257,"Ġnullable":13258,"gun":13259,"_details":13260,"Ġwrapper":13261,"seq":13262,"ĠYoung":13263,"juana":13264,"Ġ\"__":13265,"license":13266,"serve":13267,"^(":13268,"iders":13269,".Remove":13270,"ropdown":13271,"'S":13272,"pin":13273,"(token":13274,".Default":13275,"Ġreasonable":13276,"ampion":13277,"ĠSociety":13278,"Ġbei":13279,"erves":13280,"rad":13281,"ĠFox":13282,"_images":13283,"Ġwheel":13284,"')[":13285,"Ġcfg":13286,"(By":13287,"Constructor":13288,"Ġvary":13289,".swift":13290,"Ġproxy":13291,"ĉH":13292,"ĠAnother":13293,"ĠPen":13294,"Ġchecking":13295,"Ġjest":13296,"manager":13297,"Origin":13298,"ugs":13299,"oir":13300,">čĊ":15956,"Ġrelief":15957,"lap":15958,"quer":15959,"_parent":15960,"heap":15961,"LOSE":15962,"Ġcombine":15963,"ĠRose":15964,"owers":15965,"Ġprocedures":15966,"ĠSort":15967,"anim":15968,"variant":15969,"ehicle":15970,"Ġsigning":15971,"Primary":15972,"currency":15973,"Ġsexe":15974,"oen":15975,"theta":15976,"eman":15977,"Ġimpressive":15978,"('_":15979,"ĉU":15980,"ĠTextStyle":15981,"_cnt":15982,"Ġslice":15983,"(':":15984,"Ġunderstood":15985,"His":15986,"Ġinformed":15987,"Ġnick":15988,"(TAG":15989,"hd":15990,"Ġelections":15991,"esture":15992,"ĠSanta":15993,"ĠCoast":15994,".pdf":15995,"inciple":15996,".clone":15997,"born":15998,"uta":15999,"Ġlicensed":16000,"Cr":16001,"Ġbread":16002,"ĠHouston":16003,"Ġnod":16004,"Ġhopes":16005,"ĠCGRect":16006,"Ġguilty":16007,".gif":16008,"Ġrose":16009,".Common":16010,"Tip":16011,"ANK":16012,"ĠFC":16013,"During":16014,"ĠSymfony":16015,"Ġdefensive":16016,"km":16017,")>":16018,"archive":16019,"ĠURI":16020,"ycling":16021,"-o":16022,"ĠWebsite":16023,"AMP":16024,"ishment":16025,"Ġdoctors":16026,"Direct":16027,"ARI":16028,"ĠRedirect":16029,"ieren":16030,"_dist":16031,"yo":16032,"ĠProgress":16033,"Ġzum":16034,"Ġmemor":16035,"ĠED":16036,"Ġjur":16037,"æį®":16038,"_TABLE":16039,"Ġuuid":16040,"Expr":16041,".head":16042,"('%":16043,"pointer":16044,"Ġestimate":16045,"ĠGreg":16046,"Ġloader":16047,"ĠiOS":16048,"Ġmens":16049,"[y":16050,"Ġrefused":16051,"Ġprecision":16052,"isch":16053,"ĠACTION":16054,"Cloud":16055,"sWith":16056,"(ret":16057,"_ADDR":16058,"_conf":16059,"(df":16060,"Ġlocked":16061,"Ġrising":16062,"ãĥ»ãĥ»":16063,"ĠMs":16064,"Ġscenes":16065,"_EXT":16066,"_raw":16067,"_the":16068,"people":16069,"Ġrecon":16070,"ĠFun":16071,"Ġbless":16072,"ĠUpdated":16073,"ün":16074,"ĠĠĠĠĠĠĠĠĠĠĠĠčĊ":16075,"pection":16076,"Release":16077,".logger":16078,"ĠSY":16079,"Ġcounsel":16080,"urd":16081,"_true":16082,"Ġeverybody":16083,"ivot":16084,"Ġhence":16085,"ĠNAS":16086,"Ġopposed":16087,"unknown":16088,"ĠDESC":16089,"ĠChair":16090,"failed":16091,"ĠINCLUDING":16092,"Ġwriters":16093,"{}Ċ":16094,"ÃŃt":16095,"_copy":16096,"}:":16097,"ĠBat":16098,"Ġconverted":16099,"eding":16100,"placement":16101,"ĠHost":16102,"Sound":16103,"им":16104,"Ġsought":16105,"mid":16106,"Ġsalary":16107,"ogg":16108,"âĦ¢":16109,"bul":16110,"Ġwir":16111,"validator":16112,"_STAT":16113,".store":16114,"ĠBattle":16115,"ın":16116,"Ġ-->ĊĊ":16117,"Trump":16118,"dot":16119,"ĠCONT":16120,".fetch":16121,"Ġcontinu":16122,"was":16123,"Ġfraud":16124,"_tmp":16125,"mitter":16126,".pictureBox":16127,"GA":16128,"Ġtournament":16129,".Input":16130,"[r":16131,"exion":16132,"centage":16133,"ĠKorean":16134,"undef":16135,"ĠAvailable":16136,"reshape":16137,"Ġkit":16138,"ĠStruct":16139,"ĠSUB":16140,"Answer":16141,"_lib":16142,".twitter":16143,"Ġore":16144,"ĠDragon":16145,".Ext":16146,",k":16147,"Ġexplanation":16148,"refs":16149,"ĠDrive":16150,"ĠTraining":16151,".Has":16152,"intage":16153,"big":16154,"ologist":16155,"ennis":16156,"Ùĩ":16157,"Ġchicken":16158,"ĠĠĠĠĠĠĠĠĠĠĊ":16159,"çĽ":16160,"ãģ§":16161,"Ġpeak":16162,"Ġdrinking":16163,"Ġencode":16164,"ĠNEW":16165,"malloc":16166,"ĉfprintf":16167,"Ġ=================================================================":16168,"including":16169,"Ġprinciples":16170,"ĠMah":16171,"storage":16172,"-key":16173,"Ġkeyword":16174,"%;":16175,"Ġtrained":16176,".contrib":16177,"Ġkv":16178,"__':Ċ":16179,"ĠBoy":16180,"parameter":16181,"Ġsuite":16182,"Ġthousand":16183,"Ġcoordinate":16184,"-generated":16185,"íķĺ":16186,"generated":16187,"Ġadmitted":16188,"Ġpussy":16189,"#w":16190,"Ġswim":16191,"union":16192,"Na":16193,"ĠRoyal":16194,".channel":16195,"Updated":16196,"_ROOT":16197,"Ġvital":16198,"raction":16199,"ĠCrusher":16200,"Ġpreced":16201,"Ġhorizontal":16202,"Blueprint":16203,"Ġattrs":16204,"Ġsmoke":16205,"ÐĴ":16206,".Equals":16207,"FB":16208,"ĠResources":16209,"rolling":16210,"Ġpasses":16211,"ĠNum":16212,"rotate":16213,"etype":16214,"\\\",":16215,"Ġsensitive":16216,"Ġtall":16217,"?âĢĿĊĊ":16218,"Proxy":16219,"iy":16220,"_section":16221,"âĢĶâĢĶâĢĶâĢĶ":16222,"brid":16223,"Ġcircuit":16224,"atan":16225,"ENC":16226,"Ġdriven":16227,"Ġvoted":16228,"Ġeducational":16229,"Ġinteraction":16230,"abetes":16231,"Ġtone":16232,"ĠInitializeComponent":16233,"Ġmerely":16234,"Ġìŀ":16235,"cookie":16236,"_div":16237,"ĠUILabel":16238,"vely":16239,"});čĊ":16240,"_ENT":16241,"#+#+":16242,"articles":16243,"ĠSouthern":16244,"Ġstronger":16245,"ĠGiven":16246,"ĠEric":16247,"ĠIR":16248,"abstract":16249,"Under":16250,"nable":16251,"Ġincrement":16252,"oven":16253,"Ġcoin":16254,"_timer":16255,"Ġsuffered":16256,"ĠFREE":16257,"'].\"":16258,"ĠQueen":16259,"stats":16260,"Ġmeetings":16261,"Ġentering":16262,"Ġalongside":16263,"(session":16264,"itals":16265,"Ġfoundation":16266,"ĠCredit":16267,".div":16268,"_ALL":16269,"pcion":16270,"_stat":16271,"icking":16272,"Defaults":16273,"_src":16274,"Ġoutputs":16275,"/B":16276,"Ġenthus":16277,"-bl":16278,".ForeColor":16279,"ĉtemp":16280,"Face":16281,"Ġinteract":16282,"Ġweird":16283,"Mount":16284,"rell":16285,"udents":16286,"Ġrequirement":16287,"ĠSus":16288,"IER":16289,"Ġelected":16290,"reference":16291,"ĠME":16292,"Ġservers":16293,".wait":16294,"Ġsnapshot":16295,"ilton":16296,"Ġtries":16297,"Ġtipo":16298,".Time":16299,">w":16300,"Ġmountain":16301,"Ġpounds":16302,"Ġ[...":16303,"exists":16304,"ĠngOn":16305,"_MAP":16306,"Ġflying":16307,"xiety":16308,"ĉvalue":16309,"_DB":16310,"uno":16311,"Ġseats":16312,"TURN":16313,".author":16314,"!)":16315,"orce":16316,"Ġindicated":16317,".sin":16318,"Ġassignment":16319,"imiento":16320,"ĠFrame":16321,"_gen":16322,"inery":16323,"_)":16324,"messages":16325,".settings":16326,"ĠMean":16327,"ĠMuseum":16328,"irq":16329,"attach":16330,"ĠPalestin":16331,"_QU":16332,"_tags":16333,"Ġcasual":16334,"emen":16335,"ASSWORD":16336,"$s":16337,"ĠCirc":16338,"ой":16339,"etric":16340,"/P":16341,"Ġepoch":16342,"The":16357,"ĠAk":16358,"Ġgrass":16359,"/*čĊ":16360,"(dis":16361,"Ġguns":16362,"Ġtb":16363,"ĠKevin":16364,".args":16365,"ĠAh":16366,"oped":16367,"(J":16368,"columns":16369,"arguments":16370,"ĠWithEvents":16371,"_full":16372,"ĠDefense":16373,"Simple":16374,"Ġdeaths":16375,"Ġextensive":16376,"ĠStill":16377,"ĠExpression":16378,"ĠAgency":16379,"Ġperforming":16380,"FX":16381,"Ġusuario":16382,"UAL":16383,"Side":16384,"odos":16385,"aptop":16386,"Ġcredentials":16387,"_cap":16388,"atient":16389,"ĠDisney":16390,"Ġai":16391,"Ġchip":16392,"Ġvolt":16393,".makeText":16394,"%%%%%%%%%%%%%%%%":16395,"Ġbelief":16396,"_LOC":16397,"ĠCivil":16398,"Navigation":16399,"Ġreveal":16400,"Ġviolent":16401,"ĠFil":16402,"Ġcatalog":16403,"emed":16404,"scan":16405,".control":16406,"Ġconstitution":16407,"Country":16408,"Separator":16409,"_APP":16410,"topic":16411,"uetooth":16412,"MIN":16413,"Ġdescriptor":16414,"yt":16415,"ETHER":16416,"Ġdistribute":16417,"'}Ċ":16418,".trim":16419,".Line":16420,"Ġlbl":16421,"assertEquals":16422,"ĠDet":16423,"ombok":16424,"(width":16425,"Ġtort":16426,"ĠEXPRESS":16427,"aco":16428,"Using":16429,"ĠBrand":16430,"wall":16431,"EMENT":16432,"ĠCommunic":16433,"(Ċ":17055,"?>\"":17056,"Ġ///Ċ":17057,"Ġeiner":17058,"Ġweekly":17059,"ĉlogger":17060,"_pop":17061,"_man":17062,"Ġmigrations":17063,"Ġasks":17064,"Ġbs":17065,"Ġfalls":17066,".Where":17067,"-height":17068,"_feature":17069,".Min":17070,"Ġhyper":17071,"Ġvolatile":17072,"Ġtwenty":17073,"Typography":17074,"Unable":17075,"Det":17076,",f":17077,"-mod":17078,"Ġsettlement":17079,"Ġcontracts":17080,"nome":17081,"Bad":17082,"ĠBrian":17083,"(username":17084,"!!!!":17085,"Ġhack":17086,".Field":17087,"HR":17088,"ĠJordan":17089,"iza":17090,"ĠÂł":17091,"ĠSher":17092,".header":17093,"(other":17094,"ĠDub":17095,"(op":17096,"ĠRound":17097,"Ġvie":17098,"Ġappl":17099,"ĉJ":17100,"ĠInsert":17101,"ĠLP":17102,"regon":17103,"ĠMPI":17104,"Ġanchor":17105,"aca":17106,"ør":17107,"Ġade":17108,"anchor":17109,"quee":17110,"ĠTreeNode":17111,"Ġtargeted":17112,"Ġlaid":17113,"ABEL":17114,"vet":17115,"ĠOrigin":17116,"Ant":17117,".');Ċ":17118,"expect":17119,"edReader":17120,"ĠMajor":17121,"Ġinch":17122,"Compar":17123,"Ġpreview":17124,"Ġillness":17125,"ĠCONTRACT":17126,"ĠIndepend":17127,"uuid":17128,"Ġnome":17129,"Ġtc":17130,"ĠAvenue":17131,"isan":17132,"Ġphrase":17133,"_move":17134,"\")[":17135,"Ġprovision":17136,"Ġconcentr":17137,"_IR":17138,"ĠUt":17139,"()+":17140,"Ġnas":17141,"!,":17142,"ĠRobin":17143,"iations":17144,"atitude":17145,"Ġpx":17146,"ĠWithout":17147,"/bash":17148,"ekt":17149,"reement":17150,"Observer":17151,"ĠRegion":17152,"UBLIC":17153,"Ġ{//":17154,"KN":17155,"å·":17156,"GameObject":17157,"å¾":17158,"encoding":17159,"Ġ***":17160,"projects":17161,"Ġtk":17162,"Ġcheese":17163,"EMPL":17164,"aro":17165,"ĠاÙĦ":17166,"Ġconsists":17167,"refresh":17168,"ureau":17169,"ĠScanner":17170,"Ġsoil":17171,"Ġflavor":17172,"DataSource":17173,"Execute":17174,"ение":17175,"Ġshit":17176,"åĪĨ":17177,"Ċ":17419,"Ġsubsequent":17420,"posable":17421,"-fluid":17422,"Ġthorough":17423,"Ġpublicly":17424,"apters":17425,"ĠWilson":17426,"_PRE":17427,"yard":17428,"ä¼":17429,"ĉin":17430,"Ġrevers":17431,"Ġbullet":17432,"cribed":17433,"nesota":17434,"Ġ($_":17435,"annon":17436,"cursor":17437,"Ġclothing":17438,"ĠMulti":17439,":',":17440,"Ġvess":17441,"ordinator":17442,"Ġeinem":17443,"Cannot":17444,"Ġarmed":17445,"ĉV":17446,"ä¸Ĭ":17447,".Flat":17448,"ĠSep":17449,"ĠSubject":17450,"_font":17451,"Ġcharacteristics":17452,"Done":17453,"eln":17454,"############":17455,"POS":17456,"Ġdensity":17457,"ĠPlatform":17458,"-items":17459,"Ġovers":17460,"Ġpushing":17461,"ç¤":17462,".Connection":17463,"_term":17464,"Ġinitialization":17465,"________________________________":17466,"ç¬":17467,".document":17468,"lesh":17469,"ĉdocument":17470,"ĠPin":17471,"ça":17472,"Ġdefinitions":17473,".Path":17474,"_WRITE":17475,"ĠĉĊ":17476,"?>ĊĊ":17477,"Ġterrible":17478,"bean":17479,"ickets":17480,"ĠSV":17481,"Buy":17482,"(task":17483,"Ġregime":17484,"google":17485,"Ġcrack":17486,".visit":17487,"NUM":17488,"energy":17489,"Ġstruck":17490,"_sample":17491,".payload":17492,"Ġrevis":17493,"ĠScene":17494,"Ġpg":17495,"Ġbreakfast":17496,"URRENT":17497,".charAt":17498,"_exception":17499,"ĠAnton":17500,"Ġguidelines":17501,"Ġexhaust":17502,"ĠFinancial":17503,"Ġindent":17504,"Ġdesktop":17505,"Hidden":17506,"Failure":17507,"Ġprinciple":17508,"Ġiv":17509,"Ġseks":17510,"network":17511,"ĠnumberOf":17512,"ĠAlbert":17513,"ĉlong":17514,",.":17515,"Ġzeros":17516,"fade":17517,"ĠTyp":17518,"ĠTerm":17519,"ĠArts":17520,".Application":17521,"Ġbehalf":17522,"æĪ·":17523,"Ġmere":17524,"(`${":17525,"Ġawareness":17526,"elpers":17527,"flix":17528,"Ġweigh":17529,"Ġestimates":17530,".child":17531,"/O":17532,"ĠBitmap":17533,".bottom":17534,"Ġ**************************************************************************":17535,"Expect":17536,"ento":17537,"ĠForum":17538,"veral":17539,"Ġjail":17540,"Ġabilities":17541,"ĠHOLD":17542,"ĠCit":17543,"Ġdynam":17544,"Ġgray":17545,"ĉĉĉĉĉĉĉĉĉĉĉĉĉ":17546,".nextInt":17547,"antly":17548,"ĠARISING":17549,"(private":17550,"Ġrejected":17551,"ĠNic":17552,"Ġleather":17553,"={Ċ":17554,"alytics":17555,"thetic":17556,".Top":17557,".Page":17558,"={`":17559,"Ġ;čĊ":17560,"depth":17561,"mann":17562,"WD":17563,"ĠSom":17564,".Right":17565,"Ġ)}Ċ":17566,"Ġtrait":17567,"ÃĹ":17568,"iac":17569,"Ġrv":17570,"Sample":17571,".Xml":17572,"opped":17573,"ĠÑĦ":17574,"lists":17575,"Ġtear":17576,"iversary":17577,".collection":17578,"ĠConstitution":17579,"ĠHttpResponse":17580,"Ġbrill":17581,"ĠProm":17582,"hover":17583,"ĠMiami":17584,"Ġargue":17585,"_float":17586,"ĠãĤ":17587,"Ġnat":17588,"ĠTal":17589,"Ġintegration":17590,"(cur":17591,"Ġremoving":17592,"Ġcoeff":17593,"ĠThough":17594,"Ġforecast":17595,"ĠVegas":17596,"Site":17597,"Ġtrab":17598,"ĠHenry":17599,"-i":17600,"Ġinvolves":17601,"BT":17602,"Ġslo":17603,"Invoke":17604,"Ġlucky":17605,"rat":17606,"Ġ?Ċ":17607,"Ġhandled":17608,"(fd":17609,"contents":17610,"ĠOFF":17611,"RF":17612,"Ġsty":17613,"ĠMotor":17614,"tery":17615,"tax":17616,"MAP":17617,"ĠMrs":17618,"Ġphones":17619,"ĠUIView":17620,"\")));Ċ":17621,"(dev":17622,"ĠIrish":17623,"Ġws":17624,"DI":17625,"_OFFSET":17626,"ĠEvents":17627,"Ġstages":17628,"Ġ}//":17629,"Ġhaben":17630,"STANCE":17631,"ĠSin":17632,"ĠMoney":17633,"(top":17634,"Ġappointment":17635,"VERSION":17636,"metadata":17637,"_comment":17638,"Ġcolleagues":17639,"maps":17640,"âĺ":17641,"ĊĉĊ":17642,"(al":17643,"_req":17644,"Ġfut":17645,"Ġarchitecture":17646,"ĠWHETHER":17647,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":17648,"_screen":17649,"ĠstyleUrls":17650,"Ġmonster":17651,".up":17652,"phia":17653,"Ġprocessor":17654,"ĠTerr":17655,"=',":17656,"ĠManufact":17657,"ĠNT":17658,"kel":17659,"ibern":17660,"ĉfile":17661,"Ali":17662,"rientation":17663,"Ġ//!":17664,"apore":17665,"aneous":17666,"ĠCreat":17667,"folder":17668,"Ġhay":17669,"Suppress":17670,"(left":17671,"Ġeuro":17672,"Ġdisclaimer":17673,"ustry":17674,"ships":17675,"_fd":17676,"ĠFa":17677,"_insert":17678,"Ġrol":17679,"ifting":17680,"ĠComments":17681,"_br":17682,"Ġlosses":17683,"ĠAdded":17684,"charg":17685,"Ġпо":17686,"_system":17687,"ĠSometimes":17688,"ĠSpain":17689,"(group":17690,"ialis":17691,"Ġdollar":17692,"ĠArgs":17693,"quires":17694,"ĠTen":17695,".scss":17696,"Ġsurvive":17697,"usage":17698,"Ġjun":17699,"imiter":17700,"ï¼ģĊĊ":17701,"Ġfifth":17702,"toggle":17703,"Ġdecline":17704,"($\"":17705,"(Long":17706,"inge":17707,"Ġpilot":17708,"-light":17709,"-radius":17710,"Ġpodcast":17711,"Ġnaturally":17712,"Pages":17713,"为":17714,"ĠDespite":17715,"Ġlighting":17716,"Ġcrate":17717,"ĠBinary":17718,"Ġreducing":17719,"Ġeleg":17720,"ĠMouse":17721,"ĠTestBed":17722,"ĠbeforeEach":17723,"_ARRAY":17724,"Redirect":17725,"Ġflood":17726,"Ġships":17727,"Ġelectricity":17728,")*(":17729,"ê¸":17730,"ĠViet":17731,"hero":17732,"Ġdia":17733,"ĠKent":17734,"heart":17735,"Ġthreats":17736,"_acc":17737,"Ġsymbols":17738,"ischen":17739,"_inst":17740,"Criterion":17741,"ĠTIM":17742,".Height":17743,"ĠâĢĻ":17744,"();ĊĊĊ":17745,"Products":17746,"_SP":17747,"ĠCy":17748,"Ġdependent":17749,"este":17750,"Ġdatos":17751,"dit":17752,"ав":17753,"IGNAL":17754,"Ġlesson":17755,"\">'":17756,"ĠCover":17757,"ĠHope":17758,"ĠTimer":17759,"Ġdad":17760,"viders":17761,"ĠPhot":17762,"/?":17763,"ropy":17764,"oming":17765,"asion":17766,"Ġ\\(":17767,"ĠET":17768,"ĠReading":17769,"Ġepisodes":17770,"lm":17771,"echa":17772,"Ġneuro":17773,"Ġharmon":17774,"Ġliberal":17775,"-ind":17776,"DATA":17777,"Ġeveryday":17778,"Ġdivided":17779,"ĠActiveRecord":17780,"figure":17781,"UA":17782,"ä¹":17783,"riendly":17784,"tech":17785,".gameObject":17786,"иÑĤÑĮ":17787,"Ġmoon":17788,"ftime":17789,"Ġnoch":17790,"ĠTORT":17791,"ĠVM":17792,".initial":17793,"(child":17794,"Ġmusical":17795,"Ġoc":17796,"bas":17797,"ĠHay":17798,"_long":17799,"Ġmemset":17800,"iley":17801,"adelphia":17802,"SV":17803,"roat":17804,"_tx":17805,"Ġlon":17806,"ĠngOnInit":17807,"bp":17808,"ĠGolden":17809,"ACHE":17810,"Ġworried":17811,"azi":17812,"Ear":17813,"Take":17814,"(fp":17815,"burgh":17816,"_Data":17817,"gres":17818,"ĠOnt":17819,"pus":17820,"Ġtransparent":17821,"Ġpocket":17822,"Ġram":17823,"igrations":17824,".čĊčĊ":17825,"Ġ[(":17826,"Ġadopted":17827,"Ġreportedly":17828,"ĠDream":17829,"Ġ}));Ċ":17830,"losing":17831,"Ġteeth":17832,"ĠBooks":17833,"\",&":17834,"enny":17835,"LEMENT":17836,"Ġgel":17837,"ĠPlant":17838,"!âĢĿ":17839,".host":17840,"ĠReply":17841,"rength":17842,"Ġrecognition":17843,"Ġ}}>Ċ":17844,"LA":17845,"Ġmirror":17846,"Ġassistant":17847,"(device":17848,"Ġspiritual":17849,"builder":17850,"§":17851,"Ġoutr":17852,"Ġtt":17853,"ĠPER":17854,"Ġradical":17855,"Methods":17856,"Ġpace":17857,"udy":17858,"Ġgut":17859,"ĠGreek":17860,"Ġnonatomic":17861,"ĠPaper":17862,"_GPIO":17863,"Ġobst":17864,".Ad":17865,"vironments":17866,"ĠSov":17867,"(con":17868,"ĠTransaction":17869,".assign":17870,"ĉcatch":17871,"elter":17872,"Ġbitcoin":17873,"_GR":17874,"ĠčĊ":17989,"metic":17990,"Ġtransformation":17991,"åı·":17992,"Ġrgb":17993,"istributions":17994,"Ġimplicit":17995,"/in":17996,"destination":17997,"аÑĤÑĮ":17998,"Zero":17999,"Ġunset":18000,".where":18001,".go":18002,"Ġformation":18003,"Ġdeclaration":18004,"()čĊčĊ":18005,"ĠExpl":18006,"ĉĉĉĠĠ":18007,"/pro":18008,".JSON":18009,"Ġdesk":18010,".substr":18011,"//----------------------------------------------------------------------------":18012,"lyn":18013,"pson":18014,"disable":18015,"ĠFunc":18016,"ĉAssert":18017,"ĠMARK":18018,"Ġdefeat":18019,"Ġblind":18020,"Ġconstants":18021,".headers":18022,"UILD":18023,"Ġexpenses":18024,"Pixel":18025,"Ġhr":18026,"Ġfel":18027,"ĠEastern":18028,"_del":18029,"ĠCub":18030,"Ġsq":18031,"ĉcount":18032,"ĠDirectory":18033,"Ġexclus":18034,"Ġhistoric":18035,"Ġ------------------------------------------------":18036,"Ġcomposition":18037,"ĠdataGridView":18038,"ĠBurn":18039,"ĠBC":18040,"Master":18041,"Ġspawn":18042,"Ġbearing":18043,".SetActive":18044,"ilo":18045,"Ġgallery":18046,"Ġfounded":18047,"Ġavailability":18048,".sqrt":18049,"Ġpes":18050,"ĠDOM":18051,"mate":18052,"Oct":18053,"Ġmatched":18054,"itivity":18055,"Ġanxiety":18056,".price":18057,"ĠInstant":18058,"ìĬ":18059,"Ġtut":18060,"ICollection":18061,".shared":18062,"_sql":18063,"tbl":18064,"library":18065,"_destroy":18066,"ermal":18067,"ĠNotes":18068,"ĠEin":18069,"Ġsouthern":18070,"ĠOTHERWISE":18071,"Ġmacro":18072,".lower":18073,"cls":18074,"ContentView":18075,".link":18076,"constant":18077,"ĠBes":18078,"Ġsomebody":18079,"nb":18080,"\">{":18081,"(local":18082,".....":18083,"ĠNull":18084,"mx":18085,"Ġç":18086,"Ġpause":18087,"-----------":18088,"_MO":18089,"ĠCM":18090,"ĠforKey":18091,"ĠDVD":18092,"Ġclosest":18093,"_DEVICE":18094,"ĠStephen":18095,"ĠBBC":18096,"ĠTravel":18097,"Paint":18098,"ĠResults":18099,"ĠRule":18100,"Ġtp":18101,"Ġratings":18102,"cin":18103,"csv":18104,">/":18105,"ĠGOP":18106,"lad":18107,"ĠÑĢ":18108,"ĠindexPath":18109,"matrix":18110,"=f":18111,"arsed":18112,"Ġ});":18113,"ĠCos":18114,"ĠScore":18115,"Ġtak":18116,"ĠESP":18117,"ĠINC":18118,"_NULL":18119,"-flex":18120,"\"][":18121,"into":18122,"eland":18123,"Authorization":18124,"_FALSE":18125,"Ġgate":18126,"Ġvid":18127,"istent":18128,"TIME":18129,"Ġrewrite":18130,"Ġtie":18131,"Ġarchive":18132,".events":18133,".getParameter":18134,"ĠPermission":18135,"Ġprogramme":18136,"Ġé":18137,"jud":18138,"Ġcameras":18139,"(sys":18140,"ĠSyrian":18141,"Ġimprovements":18142,"Ġhip":18143,"Ġsuicide":18144,"Ġscholar":18145,"Ġcompatible":18146,"remote":18147,".down":18148,"FUNCTION":18149,"Ġmanaging":18150,"ĠUIKit":18151,".raw":18152,">>>>":18153,"Ġdemands":18154,"ellite":18155,"Ġdent":18156,"ĠMicro":18157,"åıĸ":18158,"'][$":18159,"ĠIE":18160,"imension":18161,"Ġtrem":18162,"Ġgained":18163,".with":18164,".ok":18165,"hou":18166,"Ġbom":18167,"ampaign":18168,"Ġjoining":18169,"fish":18170,"ĠaddSubview":18171,"Ġnorthern":18172,".cor":18173,"oret":18174,"Die":18175,"inish":18176,"_comp":18177,"Ġattended":18178,"Ġcollapse":18179,"ĠSS":18180,"acent":18181,"_EQUAL":18182,"ĠDeep":18183,"RGB":18184,"ĉtest":18185,"olves":18186,"uset":18187,"UnityEngine":18188,"writer":18189,"Resolver":18190,",%":18191,"ifference":18192,"_remove":18193,"onda":18194,"Ġfemme":18195,"decode":18196,"Branch":18197,"Ġflush":18198,"Ġinnovative":18199,"Tests":18200,"Ġ['./":18201,"Ġcovering":18202,".admin":18203,"ultipart":18204,"(lambda":18205,"namespace":18206,"ĠSport":18207,"Ġ!(":18208,"acles":18209,"Ġdepression":18210,"ĠKong":18211,"Ġpert":18212,"ĠConn":18213,"ĠOtherwise":18214,"/home":18215,"supported":18216,"Ġpink":18217,"Ġinvited":18218,"ños":18219,"_enabled":18220,"Ġ-Ċ":18221,"FW":18222,"eners":18223,"ĠMY":18224,"Ġsuggestions":18225,"Canvas":18226,"Ġfer":18227,"ĠMarketing":18228,"@Test":18229,"untu":18230,"ĠVen":18231,"ĠCou":18232,"ivals":18233,"Donald":18234,"limited":18235,"ĉĉĉĉĉĉĊ":18236,"Ġanalyst":18237,"(entry":18238,"Ġrepresentative":18239,"_attributes":18240,"Ġfur":18241,".hide":18242,"resp":18243,"adores":18244,"rides":18245,"ĠJosh":18246,"robot":18247,"ĠNAT":18248,"Ġsesso":18249,"Ġintegrated":18250,":true":18251,"parts":18252,"Ġstupid":18253,":event":18254,"@endsection":18255,"Ġpu":18256,".Table":18257,"ĠYii":18258,"`;ĊĊ":18259,"Ġclang":18260,"=\"\">":18261,"engan":18262,"_parameters":18263,".internal":18264,"ĠModern":18265,"Ġmetric":18266,"Ġsemi":18267,"={{Ċ":18268,".amazon":18269,"ĠBB":18270,"ainty":18271,"viewport":18272,"ĠstartActivity":18273,"dispatch":18274,"*****":18275,"Ġflav":18276,"ifferent":18277,"[this":18278,"Ġstake":18279,"Ġargued":18280,"viously":18281,".work":18282,"ĠOak":18283,"Old":18284,"(async":18285,"notes":18286,"Ġflip":18287,"Ġdisag":18288,"ĠTE":18289,"ĉerror":18290,"<'":18291,"Ġ»ĊĊ":18292,"Ġfiltered":18293,"ĠMach":18294,"Ġhung":18295,"_dump":18296,"_samples":18297,"-dismiss":18298,"Ġray":18299,"Implemented":18300,"DK":18301,"Ġjed":18302,"Ġbreaks":18303,"Ġfits":18304,".gr":18305,"ĠZero":18306,"oro":18307,"Ġequally":18308,"Ġ'[":18309,"Ġconcerning":18310,"<":18407,"Ġpromot":18408,"Ġincl":18409,"_only":18410,"를":18411,"ĠAttorney":18412,"-date":18413,"Ġlandscape":18414,"Ġfu":18415,"SY":18416,".prop":18417,"ĠArr":18418,"pag":18419,"ParallelGroup":18420,"':čĊ":18421,"Ġlogs":18422,"aunch":18423,"unci":18424,"nama":18425,"TableCell":18426,"issues":18427,".{":18428,"ecurity":18429,"_exec":18430,"olds":18431,"Ġhosts":18432,"Ġproto":18433,"_import":18434,"_sort":18435,"ĠBow":18436,"ĠNormal":18437,"ĠFarm":18438,".createParallelGroup":18439,"Rotation":18440,".err":18441,"Ġpleased":18442,"itage":18443,".Wh":18444,"ĉĉĠĠĠĠ":18445,"MR":18446,"ĠMORE":18447,"ĠNatural":18448,"_transform":18449,"BASE":18450,"eneral":18451,"utdown":18452,".commons":18453,"WT":18454,"Ġaan":18455,".Result":18456,"dog":18457,"Ġclicking":18458,"),ĊĊ":18459,"#line":18460,"Operator":18461,"Ġciv":18462,"Ġmerg":18463,"obuf":18464,"ngthen":18465,"Ġ[{":18466,"Ġcancell":18467,"trigger":18468,".:":18469,"WORK":18470,"declare":18471,"Ġdecrease":18472,"ÅĽci":18473,"loom":18474,".None":18475,"ĠMI":18476,"ĠJason":18477,"Ġhealthcare":18478,"iamond":18479,"sylvania":18480,"*x":18481,"ĠRa":18482,"[b":18483,"Ġprinting":18484,"phabet":18485,"ĠLabour":18486,"opper":18487,"Ġzijn":18488,"-target":18489,"_FUNCTION":18490,"Ġoct":18491,"ениÑı":18492,"åľ¨":18493,"Ġwestern":18494,"Ġcomputers":18495,"ĠRET":18496,"HashMap":18497,"[String":18498,"getValue":18499,"_DATE":18500,".Next":18501,"ĠFif":18502,"él":18503,"icked":18504,"æİ":18505,"-MM":18506,"Ġ{ĊĊĊ":18507,"Ġcontacts":18508,"Ġdigits":18509,"Produ":18510,"Ġunusual":18511,"Ġrapidly":18512,"tures":18513,"Ġangry":18514,"cancel":18515,"xxxx":18516,"_parser":18517,"idity":18518,"_PREFIX":18519,"Ġmehr":18520,"Ġrarely":18521,"ethe":18522,"opes":18523,"Ġ%.":18524,"works":18525,"Ġtheta":18526,"Ġcontribution":18527,"ĠTony":18528,"Ġsquad":18529,"ай":18530,"Ġîn":18531,"there":18532,"outed":18533,"ĉq":18534,"ĻĤ":18535,"good":18536,"LI":18537,"页":18538,"ĠLiving":18539,"izabeth":18540,"Ġkt":18541,"ĠDallas":18542,"]],Ċ":18543,"Ġ/>ĊĊ":18544,"Ġraising":18545,"/router":18546,"_game":18547,"ĠCUR":18548,"zens":18549,".es":18550,"ĠfontWeight":18551,"(func":18552,"notification":18553,"Ġ'../../../":18554,"Ġblame":18555,"ãĢĤĊĊĊĊ":18556,"anco":18557,"Identity":18558,"follow":18559,"Ġarts":18560,"xs":18561,"Ġofficially":18562,"ĠStudio":18563,"Ġrecommendations":18564,"Ġlocale":18565,"Ġamateur":18566,"ĠEnable":18567,"Ġcaps":18568,".End":18569,"-add":18570,"_gshared":18571,"ĠCT":18572,"Force":18573,"ĊĠĠĠĠĠĠĠĠĠĠĠĠĊ":18574,"Ġorange":18575,"Ġlp":18576,"Ġanswered":18577,".Grid":18578,"Ġdual":18579,"Ġstrategic":18580,"Ġnobody":18581,"Ġfatal":18582,"_est":18583,"(el":18584,"Ġìł":18585,"ĠBudd":18586,"AIT":18587,"_factor":18588,"-one":18589,"ĠHAVE":18590,"\"čĊčĊ":18591,"Prof":18592,"Ġär":18593,"strings":18594,"Ġdirty":18595,"ĠFace":18596,"ĠBegin":18597,"ĠBus":18598,"Ġwis":18599,"åŃĹ":18600,"Ġspeaker":18601,"Ġcarrier":18602,"ĠOm":18603,"Ġhadn":18604,"Allow":18605,"::__":18606,"Ġverb":18607,"ĠComplete":18608,"ĠEasy":18609,"Ġbills":18610,"ĠĠĊĊ":18611,"Vertical":18612,"Ġpron":18613,"ĠDefine":18614,"Ġlookup":18615,"variables":18616,"Ġpandas":18617,"umes":18618,"Ġinnoc":18619,"ĠsetUp":18620,"ĠChampionship":18621,"artist":18622,"ĠCType":18623,"Foundation":18624,"à¹Ī":18625,"ĠSetup":18626,"Ġrecipes":18627,"ĠUIColor":18628,"ĠFight":18629,"Ġauthorized":18630,"_click":18631,"_success":18632,"angan":18633,"ĠMountain":18634,"ĠDoctor":18635,"Ġegg":18636,"ĠMedicine":18637,"cles":18638,"`.Ċ":18639,"[int":18640,"dashboard":18641,"ĠAppro":18642,"-dr":18643,"Ġproduces":18644,"Ġrental":18645,"Ġreload":18646,"Ġarrival":18647,"spot":18648,"Ġundert":18649,"Ġequipped":18650,"Ġproved":18651,"Ġcenters":18652,"Ġdefines":18653,"also":18654,"Ġopacity":18655,"ĠUnfortunately":18656,"ĠIllinois":18657,"Ġне":18658,"ĠTemple":18659,"ĠTrail":18660,"ĠKelly":18661,"Ġmeasurement":18662,"Ġseparated":18663,"-circle":18664,"Hey":18665,"ĠREAD":18666,"igits":18667,"Ġib":18668,"ĠMOD":18669,"attery":18670,"аз":18671,"Ġvend":18672,"енÑĤ":18673,"ĠHttpClient":18674,"safe":18675,"_ASS":18676,"icit":18677,"ĠConstruct":18678,"ĠClo":18679,"ĠSix":18680,"_TOKEN":18681,"(block":18682,"Ġwarned":18683,"/*!":18684,"!Ċ":18769,"Ġinnovation":18770,"_\"":18771,"Ġ);čĊčĊ":18772,"Ġspots":18773,"Ġchoosing":18774,".cs":18775,"Ġflexible":18776,"UInt":18777,"Ġscratch":18778,"-al":18779,"Ġfestival":18780,"Ġoutstanding":18781,"================================================":18782,"Mean":18783,"ĠOregon":18784,"symbol":18785,".account":18786,"dney":18787,"'''":18788,"!\",":18789,"Ġparticle":18790,"Ãĥ":18791,"[MAX":18792,"IVER":18793,"ERENCE":18794,"NSMutable":18795,"ĠColumbia":18796,"_ĊĊ":18797,".fr":18798,"Ġcogn":18799,"VR":18800,"ĠMethods":18801,"ĠMade":18802,"ĠBR":18803,"ĠElse":18804,"Ġeggs":18805,"Ġswing":18806,"ĠInv":18807,"Ġdiseases":18808,"Ġfirms":18809,"Ġlemma":18810,"}`);Ċ":18811,"lings":18812,"Ġgym":18813,"uminum":18814,".Trim":18815,"Mem":18816,"Ġcriticism":18817,"ibernate":18818,"_TX":18819,"ioni":18820,"Ġguidance":18821,"Ġrepeatedly":18822,"Ġsupplier":18823,"Ġpainting":18824,".Fragment":18825,"edException":18826,"Ġwiring":18827,"Ġcourts":18828,"WEB":18829,"æľī":18830,"\\.":18831,"illance":18832,"Ġbrows":18833,"ĠPattern":18834,"PLICATION":18835,"ĠSummer":18836,"Chain":18837,"Ġcute":18838,"mercial":18839,"Ġdil":18840,"ĠFranklin":18841,"ĉglobal":18842,"INCLUDING":18843,"history":18844,"Ġlst":18845,"Qt":18846,"SDL":18847,"alia":18848,"iere":18849,"(...":18850,"ĉcin":18851,"iffs":18852,"velope":18853,"ĠRoot":18854,"cluster":18855,"UserName":18856,"igne":18857,"()Ċ":18949,"Ġapplying":18950,"Ġpromised":18951,"Ġox":18952,"ncia":18953,"ĠValidation":18954,"orts":18955,"_cur":18956,"elect":18957,"eye":18958,"(Data":18959,"Ġreporter":18960,"ĠBuff":18961,"Ġsr":18962,"Ġ\";":18963,"icky":18964,"Ġtempor":18965,"SN":18966,"Ġresident":18967,"pires":18968,"ysical":18969,"Ġendorse":18970,"ĠSong":18971,"isEmpty":18972,"leet":18973,"_util":18974,"Ġdistingu":18975,"ĠTalk":18976,"ĠMot":18977,"(default":18978,".Arg":18979,"gorithms":18980,"_words":18981,"immer":18982,"_reset":18983,"family":18984,"WW":18985,"Ġsavings":18986,"ĠâĢĿ":18987,"_enable":18988,"sidebar":18989,"Running":18990,"Ġali":18991,"Ġtestim":18992,"Ġwarnings":18993,"ĠChem":18994,"ĠExit":18995,"Ġfounder":18996,"pector":18997,"Ġrm":18998,"_dataset":18999,"ĠDas":19000,"Ġhan":19001,"Getty":19002,"ál":19003,"Ġny":19004,"Ġpoverty":19005,"Ġresulted":19006,".by":19007,"ĠVisit":19008,"Ġobtaining":19009,"/'.$":19010,"ĠĠĠĠĠĠĠĠĠĠĠĊ":19011,"shall":19012,"_LEFT":19013,"UIImage":19014,"_Name":19015,"have":19016,"ĠNob":19017,"lr":19018,"-footer":19019,"Ġnaked":19020,"ĠGarden":19021,"\\Facades":19022,"Ġgraduate":19023,"Ġfranchise":19024,"plane":19025,"Ġcontributions":19026,"ĠstringWith":19027,"Ġcrypto":19028,"Ġmovements":19029,"athers":19030,"Ġlifetime":19031,"Ġcommunicate":19032,"jar":19033,"ĠFragment":19034,"_IF":19035,"ĠNavy":19036,"ĠFigure":19037,"Ġsimulation":19038,"_stop":19039,"Ġreporters":19040,"Ġversus":19041,"aja":19042,"Ġα":19043,"Ġgovernor":19044,"ListItem":19045,"Ġsealed":19046,".Background":19047,"edi":19048,"ashing":19049,"Ġlip":19050,"ĠIh":19051,"merge":19052,"Ġnec":19053,"elocity":19054,"ATEG":19055,"Ġseeds":19056,"Ġfloating":19057,"_FA":19058,"walk":19059,"ĉuser":19060,"_depth":19061,"Ġwage":19062,"@app":19063,"Nil":19064,"([\"":19065,"(vector":19066,"Ġsecretary":19067,"ĠjPanel":19068,"vez":19069,"³³³³":19070,"direction":19071,"ĠEP":19072,"Ġhunt":19073,"JsonProperty":19074,"ĠPORT":19075,"]\",":19076,"ап":19077,"ĠForeign":19078,"panic":19079,"Ġtrials":19080,"ĠAle":19081,"Ġrural":19082,"-value":19083,"authorized":19084,"ĠScotland":19085,".drop":19086,"ĠMT":19087,"ç±":19088,"rowth":19089,"FilePath":19090,"Ġrecall":19091,"ifle":19092,"Ġcel":19093,"ĠSELECT":19094,"kn":19095,"_case":19096,"Ġcrop":19097,"sure":19098,"pot":19099,"ICS":19100,"Ġstem":19101,"Ġindustries":19102,"Put":19103,"Ġaber":19104,"roadcast":19105,"Icons":19106,")\")Ċ":19107,"æĪIJåĬŁ":19108,"gui":19109,"Ġassumed":19110,"Ġrx":19111,"EA":19112,"è§":19113,"ELL":19114,"Ġdose":19115,"Ġine":19116,"Ġdeeper":19117,"lider":19118,"Ġordinary":19119,"Ġgolf":19120,"_IMAGE":19121,"ĠNAME":19122,"(module":19123,"Ġatom":19124,"Ġbelt":19125,"Ġoffices":19126,"beta":19127,"Ġphilosophy":19128,"(JSON":19129,"-field":19130,"Ġintroduce":19131,"Ġconvenience":19132,"optim":19133,">\"Ċ":19134,"athy":19135,"Ġemployer":19136,"quate":19137,"Ġedited":19138,"Arguments":19139,"ĠNations":19140,"__)":19141,"Ġnose":19142,"ĠSample":19143,"')ĊĊĊ":19144,"Ġcake":19145,".getAttribute":19146,"HD":19147,"Modified":19148,"Ġpredicted":19149,"ÅĦ":19150,"anie":19151,"Sorry":19152,"(doc":19153,"wind":19154,"ieve":19155,"Ġprovisions":19156,"ATER":19157,"OTE":19158,"MY":19159,".Autowired":19160,"ĠBath":19161,".Boolean":19162,"Ġbackend":19163,".Mouse":19164,"ateral":19165,"paper":19166,"Const":19167,"ĠVR":19168,"_entity":19169,"_CTRL":19170,"ĠProtection":19171,"ĠGM":19172,"ĠStudy":19173,"Ġsoup":19174,"otime":19175,"'use":19176,"]\"":19177,"/users":19178,"aug":19179,"ĠHong":19180,"_norm":19181,"ãģ¨":19182,"Ġsecre":19183,"(Build":19184,"ĠContract":19185,"olas":19186,"Ġsauce":19187,"Ġaggressive":19188,"Ġracial":19189,"character":19190,"@@":19191,"Ġcompile":19192,"ĠVoid":19193,"_rem":19194,"_memory":19195,"kk":19196,"Ġmic":19197,"Same":19198,"Utility":19199,"ĠHtml":19200,"ĠXml":19201,"Ready":19202,"Ġgall":19203,"Ġallegedly":19204,"ĉĉĉĉĠĠĠ":19205,"ĠMetal":19206,"ĠPersonal":19207,"ĠborderRadius":19208,"rxjs":19209,"objects":19210,"Ġwanting":19211,"Ġbowl":19212,"vendor":19213,"offsetof":19214,"ĠRs":19215,"ĠRating":19216,"Ġrally":19217,"_NODE":19218,"ĠMix":19219,"Ġadvertis":19220,"Ġnarrative":19221,"sal":19222,"Ġmc":19223,"SError":19224,"Ġfingers":19225,"Ġaccompany":19226,"Ġtired":19227,"Ġstride":19228,"Ġgui":19229,"elist":19230,"Locale":19231,"Ġreleases":19232,"iking":19233,"Ġanger":19234,")))ĊĊ":19235,"allest":19236,"Summary":19237,"(O":19238,"(for":19239,"Ġbasketball":19240,"Ġroads":19241,"ĠInstall":19242,"ĠFab":19243,"itmap":19244,"Ġ))Ċ":19245,"Ġintersection":19246,"ighbor":19247,"ĠBry":19248,"ĠHERE":19249,"Software":19250,"elfare":19251,"acs":19252,"Ġtrailer":19253,".getClass":19254,"chars":19255,"Ġregulation":19256,"Ġrefers":19257,"Ġdestruction":19258,"Ġcontinuous":19259,"ĠAustin":19260,"é¢":19261,"akan":19262,".window":19263,"ĠTemplates":19264,"Ġabsence":19265,":n":19266,"Ġdisorder":19267,"flash":19268,"Ġdelet":19269,"boards":19270,"ĠĠĉ":19271,"ROP":19272,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":19273,"Ġacqu":19274,"Ġlawsuit":19275,"ĠReviews":19276,"Ġgarage":19277,"timer":19278,"Ġej":19279,"ĠRectangle":19280,"Ġflowers":19281,"ilst":19282,"ĠInstance":19283,"Super":19284,"det":19285,"disposing":19286,"ĠES":19287,"ĠIC":19288,"vere":19289,"Sk":19290,"_channels":19291,"puted":19292,"/null":19293,"nnen":19294,"ĠGallery":19295,"_global":19296,"Authentication":19297,"ĠRank":19298,"Ġblocked":19299,"Ġcalm":19300,"market":19301,"ĉval":19302,"Ġaug":19303,"period":19304,"ĠConstant":19305,"Ġ?>\">Ċ":19306,"Ġlobby":19307,"pal":19308,"Ġsink":19309,"iah":19310,"С":19311,"urname":19312,"Ġconver":19313,"Ġinvestigate":19314,"Christ":19315,"Hub":19316,"ĠIND":19317,"ĠPed":19318,"uras":19319,"ĉurl":19320,"ĠTro":19321,"Ġpreferences":19322,"Ġguaranteed":19323,"`ĊĊ":19324,"Ġportions":19325,"Ġevalu":19326,"'>;ĊĊ":19421,".AutoScaleMode":19422,"Ġcats":19423,"Ġregistry":19424,"ulus":19425,"FI":19426,"payload":19427,"-search":19428,"Ġstaying":19429,"acious":19430,"Decoration":19431,"Review":19432,"Inf":19433,"Keep":19434,"itis":19435,",String":19436,"Coord":19437,"Ġpero":19438,"Sex":19439,"ĠAtlanta":19440,"uesta":19441,"Argb":19442,">*":19443,"}_":19444,"Footer":19445,"Ġemployed":19446,"_bound":19447,"vide":19448,".func":19449,"$scope":19450,"Ġspo":19451,"ĠAnal":19452,"ounced":19453,"around":19454,"Ġrestriction":19455,"Ġshops":19456,"åĢ":19457,"ĠLatin":19458,"-col":19459,"Ġbarely":19460,"ĠEuro":19461,"Er":19462,"Ġfaire":19463,"_distance":19464,"_unlock":19465,"Quote":19466,"IVATE":19467,"ĠåĪ":19468,"Ġaimed":19469,"ĠRetrie":19470,".iter":19471,"Ġwrapped":19472,"Ġagreements":19473,"strument":19474,"(product":19475,"Ġstudied":19476,".setValue":19477,"Ġye":19478,"ĠCache":19479,"MBOL":19480,"Ġquarterback":19481,"Ġsyntax":19482,".getElementsBy":19483,".version":19484,"website":19485,"Runner":19486,"_single":19487,"ativ":19488,"ĠAltern":19489,"ĠBeautiful":19490,"rightarrow":19491,"Ġdiversity":19492,"plash":19493,"(co":19494,".Fill":19495,"Ġtyping":19496,"Ġclar":19497,"Hit":19498,"OO":19499,"acco":19500,"worth":19501,"Ġscripts":19502,"ĠMuslims":19503,"ĠLL":19504,"erving":19505,"(boolean":19506,"Ġbaseball":19507,"ĠCAN":19508,"MAIL":19509,"depend":19510,"Ġrespective":19511,"Ġconstexpr":19512,".*;ĊĊ":19513,"']))Ċ":19514,"Ġyard":19515,"Ġidentical":19516,"ifecycle":19517,"USH":19518,"upiter":19519,".validate":19520,"cli":19521,"ISTER":19522,"Indicator":19523,"Fail":19524,"Ġdemocracy":19525,".var":19526,"Ġsatisfied":19527,"-------------":19528,"encer":19529,"hor":19530,"Ġrounds":19531,"DAO":19532,"oa":19533,"Ġflask":19534,"=c":19535,"[]Ċ":19536,"/dist":19537,"Ġparte":19538,"Ġconfirmation":19539,"eron":19540,"aware":19541,"":19542,"Ġdependencies":19543,"ĠVideos":19544,"-row":19545,"Ġ**/Ċ":19546,"Ġnou":19547,"Ġhover":19548,"æŀ":19549,"Ġnin":19550,"ĠUSD":19551,"Mac":19552,"_Load":19553,"Ġoutcomes":19554,"_socket":19555,"Ġqueries":19556,"wm":19557,"Ġhitting":19558,"inux":19559,"Mich":19560,"udge":19561,"ATAB":19562,"Ġvulnerable":19563,"ä¾":19564,"Ġportfolio":19565,":YES":19566,"ĉmap":19567,"Bound":19568,"Ġiteration":19569,"incess":19570,"Ġactors":19571,"ĠQual":19572,"_clean":19573,"ãĢijãĢIJ":19574,"MSG":19575,"Green":19576,"ĠOfficer":19577,"Ġsmoking":19578,">',":19579,"ĠFlo":19580,"++;":19581,"olygon":19582,"Ġbulk":19583,"Ġdrama":19584,"Ġexceptions":19585,"osed":19586,"Ġ+čĊ":19587,"Ġlegacy":19588,"CV":19589,"Ġcontributed":19590,"ĠTerms":19591,"Ġbt":19592,"Ġuntuk":19593,"Ġalien":19594,"===Ċ":19595,"ĉVector":19596,"Ġls":19597,"Online":19598,".facebook":19599,"numeric":19600,"ockets":19601,"Aut":19602,"bury":19603,"-redux":19604,"ĠRedistributions":19605,"GLOBALS":19606,"urrencies":19607,"Ġtons":19608,"âĢĻ,":19609,"Ġê":19610,"(col":19611,"ĠSymbol":19612,"Ġstayed":19613,"ĠML":19614,"Ġmunicip":19615,"Ġsexo":19616,"Sen":19617,"nr":19618,"Ġgains":19619,"Ġshortly":19620,".Menu":19621,"ý":19622,"KNOWN":19623,"Ġoperators":19624,"-V":19625,"ĠPatrick":19626,"/add":19627,"_CO":19628,"iration":19629,"(post":19630,"Posts":19631,"/_":19632,"Ġplug":19633,"Ġintellectual":19634,"Ġmetab":19635,"Ġpregnancy":19636,"ĠPremier":19637,"nm":19638,"Ġprediction":19639,"ĠMinistry":19640,"Three":19641,"valuate":19642,"ĠMini":19643,"bu":19644,"оз":19645,"\";čĊ":20078,"ĠSav":20079,".Bold":20080,"Ġenables":20081,"ĉtmp":20082,"Ġmanually":20083,"ĠSqu":20084,"userid":20085,".function":20086,".cache":20087,"LOPT":20088,".Services":20089,"ddit":20090,"tim":20091,">>":20154,"station":20155,"lore":20156,"atype":20157,"ishop":20158,"/****************************************************************":20159,"ComboBox":20160,"Ġvacation":20161,"Ġinitiative":20162,"ĠdefaultValue":20163,"concat":20164,"ĠKh":20165,"ĠWelcome":20166,"izedName":20167,"Migration":20168,"Ġgradient":20169,"Hot":20170,"Ġhardly":20171,"elo":20172,"ĠStudents":20173,"Ġloose":20174,"atz":20175,".Send":20176,"'/":20177,"Ġuniversal":20178,"Ġenterprise":20179,"Ġregex":20180,"Ġvisitor":20181,"ĠFly":20182,"Seq":20183,"à¸Ļ":20184,"ĠVisual":20185,"Ġlibraries":20186,"atoes":20187,"Payment":20188,"Ġpent":20189,"Ġgathered":20190,"VRTX":20191,"ĠDM":20192,"Split":20193,"Ġletting":20194,"ÐĿ":20195,"_errors":20196,"epoch":20197,"PARAM":20198,"cu":20199,"ÑģÑĤв":20200,"olutions":20201,"Editing":20202,"fonts":20203,"Ġallocated":20204,"ĠBased":20205,"(Y":20206,"ĠJudge":20207,"Ġbrothers":20208,"FILES":20209,"ço":20210,"wb":20211,"_PI":20212,"'^":20213,"Ġsword":20214,".services":20215,"Ġnl":20216,"Tim":20217,"igg":20218,"ĠMoore":20219,"Ġcryptoc":20220,"åĩº":20221,"_posts":20222,"otate":20223,"?'":20224,"....ĊĊ":20225,"Ġkl":20226,"=\"$":20227,"Ġdecoration":20228,"ạ":20229,"ĠDIRECT":20230,"GUI":20231,")=>{Ċ":20232,"Ġnewsletter":20233,"Ġprecis":20234,"(point":20235,"ĠEquipment":20236,"uty":20237,"ĠDave":20238,"Ġparticipation":20239,"uarios":20240,"xit":20241,".As":20242,"ETER":20243,"orous":20244,"Ġshield":20245,"[]>":20246,"ilitary":20247,".origin":20248,"Ġpromotion":20249,"Unt":20250,"Ġct":20251,"TRA":20252,"ViewHolder":20253,"Ġsigma":20254,"delta":20255,"arehouse":20256,"contract":20257,"(Vector":20258,"Ġcompete":20259,"/form":20260,"/components":20261,"Ġnr":20262,"ĠIndones":20263,"ĠоÑĤ":20264,"ĠVolume":20265,".files":20266,"(resp":20267,"/models":20268,"Ġsurf":20269,"standard":20270,"/o":20271,"ĠXCTAssert":20272,"VICES":20273,".Code":20274,"SED":20275,"Ġactivate":20276,"Delta":20277,"Ġlimitation":20278,"rij":20279,"Ġpregnant":20280,":^(":20281,"Ġsour":20282,"pie":20283,"Ġexpense":20284,"ication":20285,"ĠLarge":20286,"Ġ±":20287,"ĠBowl":20288,"(models":20289,"/N":20290,"Pa":20291,".reload":20292,"Ġwondering":20293,"Execution":20294,"ĉĠĠĠĠĠĠ":20295,"ĠGraphics":20296,"ĠContin":20297,"_job":20298,"ĠgetName":20299,"ĠMagn":20300,"ĠDWORD":20301,"mad":20302,"Ġnh":20303,"features":20304,"}\");Ċ":20305,"heets":20306,"(train":20307,"zn":20308,"Ġrecruit":20309,".connection":20310,"Ġbarrel":20311,"Ġsteam":20312,"_setting":20313,"Ġangular":20314,"aneously":20315,"Ġbil":20316,"ĠNorm":20317,"(!$":20318,"ibt":20319,"%(":20320,"Ġposit":20321,"ĠFather":20322,"intendo":20323,"Live":20324,"Ġports":20325,"Ġmej":20326,"Ġlanding":20327,"ponder":20328,"Ġcod":20329,"_HEADER":20330,".Margin":20331,"Ġballs":20332,"Ġdiscussions":20333,"Ġblend":20334,"Hex":20335,"Ġfarmers":20336,"Ġmaintaining":20337,"ĠĠĠčĊ":20338,"syn":20339,"[T":20340,"rus":20341,"uffers":20342,"Ġcontributors":20343,"_sys":20344,".Debug":20345,"Ġconstructed":20346,"omes":20347,"?id":20348,"slider":20349,"Ġsuppliers":20350,"scriber":20351,"pes":20352,"Ðŀ":20353,"\":čĊ":20354,"\\Controller":20355,"))ĊĊĊ":20356,"Ġlua":20357,"Multi":20358,"ENS":20359,"Src":20360,"Ġpetition":20361,"Ġslave":20362,"looking":20363,"VERT":20364,"ĉvector":20365,"Special":20366,"hh":20367,"anne":20368,"ĠNiger":20369,"/views":20370,"zing":20371,"endant":20372,"(":20591,".Product":20592,"Forms":20593,"NEW":20594,"Pay":20595,"ĉboolean":20596,"_contact":20597,"ĠElectric":20598,"skip":20599,"Ġwur":20600,"Ġchronic":20601,"_driver":20602,"ĠSab":20603,"ĠUlt":20604,"ĠRad":20605,"STATUS":20606,"ĠLewis":20607,"OB":20608,"Ġgifts":20609,".Rec":20610,"TRUE":20611,"Ġintensity":20612,"Marker":20613,".compare":20614,"ffic":20615,"Cookie":20616,"ĠBaby":20617,"ĠBigDecimal":20618,"ilet":20619,"ĠHOLDERS":20620,"ĠLady":20621,"Ġlung":20622,"ĠAlabama":20623,"Ġdess":20624,"`);Ċ":20625,"ĠBuilder":20626,"_region":20627,"Ġneutral":20628,"Both":20629,"Ġhp":20630,"Ġhorn":20631,"Ġsegments":20632,"ĠEC":20633,"\"=>\"":20634,"(rec":20635,"ĠPi":20636,"GM":20637,"Ġlaptop":20638,"Scalar":20639,"isd":20640,"-dialog":20641,"ĠAnderson":20642,"Ġmistakes":20643,"ĠHan":20644,"jes":20645,"estination":20646,"Ġpromises":20647,"bid":20648,"ĠScient":20649,"GIN":20650,"ĠPerformance":20651,"bage":20652,".users":20653,"leading":20654,"Ġoral":20655,"Graphics":20656,"_PTR":20657,"hang":20658,"Ġinev":20659,"processing":20660,"Factor":20661,"ĠNA":20662,"$string":20663,"Ġgrounds":20664,".SaveChanges":20665,"clock":20666,"cripcion":20667,"ĠNewton":20668,"gc":20669,".includes":20670,"Ġblast":20671,"Ġ'-'":20672,"Ġpuede":20673,".Session":20674,"Ġgrep":20675,"_final":20676,"ĠGay":20677,"ĠGive":20678,"iri":20679,"-star":20680,"ĠUIImage":20681,"_epoch":20682,"ubb":20683,"enth":20684,"Ġelite":20685,"Ġcampaigns":20686,"ĠPorno":20687,"_assign":20688,"Protocol":20689,"ĠBeing":20690,"ĠAirport":20691,"Ġconventional":20692,"ĠWat":20693,"ĠCI":20694,"ETA":20695,"ĠAnthony":20696,"Ġtablet":20697,"(format":20698,"Ġconsistently":20699,"ĠIowa":20700,"Ġavatar":20701,".cursor":20702,"![":20703,"Ġhanging":20704,"Her":20705,"Such":20706,"';ĊĊĊ":20707,"orgeous":20708,"()==":20709,"ĠviewModel":20710,"Ġãĥ":20711,"Ġels":20712,"ĠAgent":20713,"Fetch":20714,"apor":20715,"Ġcx":20716,"pread":20717,"ĠPier":20718,"oeff":20719,"Sn":20720,"ĠVirtual":20721,"Apr":20722,".White":20723,"_MOD":20724,"ĠPoints":20725,"失":20726,"Ġgenes":20727,"Ġvendor":20728,"Ġmainstream":20729,"Ċ":20758,"Filename":20759,"Ġsne":20760,"ĠFootball":20761,"Ġrival":20762,"Ġdisaster":20763,"ionic":20764,"ĠDamage":20765,".Resource":20766,"-en":20767,"ĠTypes":20768,"getString":20769,"(board":20770,"Ġbol":20771,"plain":20772,"zym":20773,"า":20774,"Ġscanner":20775,"ilder":20776,"_msgs":20777,"æı":20778,"(intent":20779,"Ġdestruct":20780,"Ġbust":20781,"ĠEmploy":20782,"oni":20783,"ĠUIViewController":20784,"Ġodds":20785,"earer":20786,"Geometry":20787,"Ġyii":20788,"_EXPORT":20789,"ĠAttack":20790,"Ġniet":20791,"Ġimpression":20792,"ĠGil":20793,"_prob":20794,"ĠCF":20795,"ĠExperience":20796,"/plugins":20797,".Method":20798,"Ġbeliefs":20799,"Native":20800,"_build":20801,"Ġvig":20802,"Ġranks":20803,"covered":20804,"such":20805,"Guard":20806,".pack":20807,"adder":20808,"ivia":20809,"lng":20810,"ĠвÑĭ":20811,"Timestamp":20812,"_now":20813,"Ġpoker":20814,"Ġunc":20815,"Ġshapes":20816,"-types":20817,"_period":20818,"pk":20819,"Ġveteran":20820,"Ġsono":20821,"Ġappointed":20822,"overflow":20823,".driver":20824,"_cat":20825,"utt":20826,"plant":20827,"imb":20828,"ĠAccept":20829,"Ġconcert":20830,"ĉnode":20831,"ĉz":20832,"?>čĊ":20833,"Ġbanned":20834,"ĉĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":20835,"Ġtoxic":20836,"Ġdisappe":20837,"ÈĽ":20838,"Ġgrace":20839,"ateful":20840,"Reply":20841,"ĠCruz":20842,"Ġscrap":20843,"Ġkeywords":20844,"simp":20845,"Ġmortgage":20846,"Ġcyber":20847,"ĠExecute":20848,"Ġlatitude":20849,"ifu":20850,".COM":20851,"dbo":20852,"Ġsorts":20853,"ĠGas":20854,"omial":20855,".Local":20856,"Cells":20857,".Replace":20858,"Strings":20859,".fit":20860,"ĠThird":20861,"%\",Ċ":20862,"Ġ{}\".":20863,"ĠSony":20864,"Ġ[:":20865,"Ġfallen":20866,".')Ċ":20867,"inh":20868,"ĠMC":20869,"Ġredis":20870,"Codes":20871,"Ġprofiles":20872,"hook":20873,"Reducer":20874,"_FUNC":20875,"Ġnavigate":20876,"strlen":20877,"Ġhorm":20878,"áŀ":20879,"ĠSR":20880,".boot":20881,"Ġdigest":20882,"ĉheader":20883,".findOne":20884,"æģ":20885,"DbType":20886,"nia":20887,"_merge":20888,"Ġdonne":20889,"/Getty":20890,"_CHAR":20891,"Ġbands":20892,".URL":20893,"artial":20894,"Ġfreq":20895,"Ġsist":20896,"Ng":20897,"Ġrendering":20898,"\\Core":20899,"Widgets":20900,"ĠVA":20901,"Ġactivists":20902,"Ste":20903,"=_":20904,"alla":20905,"Stamp":20906,"Ġloads":20907,"Ġxx":20908,"ĠLearning":20909,".Mvc":20910,"uir":20911,"(\"$":20912,"Ġconnecting":20913,"ReadOnly":20914,"uru":20915,"ĠEag":20916,"BIT":20917,"_DEL":20918,"å§":20919,"arrass":20920,"external":20921,"ĠYOUR":20922,"ĠBrew":20923,"ĠFive":20924,"Ġresize":20925,"igid":20926,"eration":20927,"ĠÑį":20928,"åĬł":20929,"ĠCatch":20930,"Ùģ":20931,"ĠLeon":20932,"amil":20933,".Body":20934,"Clip":20935,"/list":20936,".br":20937,"EditText":20938,"ĉdb":20939,".Game":20940,"(BuildContext":20941,"backend":20942,".Red":20943,"facebook":20944,".urls":20945,"mr":20946,"rolled":20947,"-------":20948,"Ġintervention":20949,"Ġretirement":20950,"ĠKit":20951,"ĠPRE":20952,"UpperCase":20953,"ĠSocket":20954,"Ġ:-":20955,"Ġstudying":20956,"ĠMetro":20957,"arded":20958,"Ġconversations":20959,"Called":20960,"Ġexamine":20961,"ertificate":20962,".gz":20963,"-responsive":20964,"Ġrefund":20965,"_network":20966,"allowed":20967,"empt":20968,"Ġmeals":20969,"Categories":20970,"Ġtraveling":20971,"Ġkg":20972,"Ġshame":20973,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":20974,"Ġexplicitly":20975,"Ġmathematic":20976,"ĠSuite":20977,"ĠRGB":20978,"******/":20979,"Ġmixture":20980,"learning":20981,".template":20982,"atts":20983,"wx":20984,"ĉctx":20985,".properties":20986,"Ġdrinks":20987,"ĠEither":20988,"setText":20989,".getData":20990,".zip":20991,"Ġreveals":20992,".Ċ":21005,"Ġranked":21006,"_impl":21007,"ĠHandles":21008,"Ġhosted":21009,"Ġupdating":21010,"album":21011,"éĿ":21012,"Ġshader":21013,"Editors":21014,"-round":21015,"[]{":21016,"Ġsep":21017,"ĠHi":21018,"TEM":21019,"lookup":21020,".man":21021,"_INPUT":21022,"Ġthreatened":21023,"_IMPORT":21024,"Ġdrops":21025,"ruit":21026,"sid":21027,"both":21028,"ĠExcel":21029,"Ġjer":21030,"ordinary":21031,"ей":21032,"VIEW":21033,"reply":21034,"Ġ):Ċ":21035,"colors":21036,"verified":21037,"_Tr":21038,"_parse":21039,"Ġcongress":21040,"Promise":21041,"ints":21042,"ĠMother":21043,".Api":21044,"ĠDuration":21045,"ĠfirstName":21046,"inheritdoc":21047,"ĠMars":21048,"Ġapr":21049,"ODY":21050,"Ġvisits":21051,"Ġhealing":21052,"letters":21053,")));čĊ":21054,"future":21055,".Framework":21056,"Ġkiss":21057,"Ġinvolve":21058,"Ġsilent":21059,"adows":21060,"Ġanybody":21061,"sch":21062,"Ġsolely":21063,"-img":21064,"Ġpropri":21065,"Ġinstruct":21066,"Ġlicenses":21067,"Ġmeth":21068,"Ġcondem":21069,"ĠDomain":21070,"ĠHarris":21071,"ĠsÃ¥":21072,"CEPT":21073,"Batch":21074,"@extends":21075,"ĠCONTRIBUT":21076,".DataFrame":21077,"_packet":21078,"recision":21079,"Ġfocusing":21080,".ht":21081,"__\":Ċ":21082,":Get":21083,"ĠKC":21084,"Ġpassage":21085,"Segment":21086,"_center":21087,"-zA":21088,"_BL":21089,"Ġconvin":21090,"Ġclassified":21091,"ĠNSMutable":21092,"_ap":21093,"tile":21094,"Rectangle":21095,"(nums":21096,"vens":21097,"ĠUIButton":21098,"ĠFeder":21099,"amo":21100,"Ġoutline":21101,"ĠParser":21102,"Ġâī":21103,"ĠWorks":21104,".Schema":21105,"Ġengines":21106,"_common":21107,"_old":21108,"ĠsetContentView":21109,"Ġ///<":21110,"ĠBT":21111,"fm":21112,"Ġdivers":21113,"_weights":21114,"emark":21115,"ĠACT":21116,"Ġproportion":21117,"overlay":21118,".dirname":21119,"ĠGit":21120,"_REFERENCE":21121,"<>":21122,"lb":21123,"_rule":21124,"è´¥":21125,"ĠPutin":21126,"Ġsleeping":21127,"():čĊ":21128,"Ġpreserve":21129,"Ġparliament":21130,"ĠLooking":21131,"Ġpicking":21132,"ĠDispatch":21133,"Ġslip":21134,"ëĵ":21135,"ĠLyn":21136,"_signal":21137,"configuration":21138,"ĠPitt":21139,"aden":21140,"procedure":21141,"Ġenthusi":21142,"fight":21143,"ĠConsider":21144,"Ġtorn":21145,"Connected":21146,".cos":21147,"_groups":21148,"ĠThink":21149,"Ġdeliber":21150,"Ġresid":21151,"working":21152,".columns":21153,"ĠCalled":21154,"Ġeslint":21155,">\",":21156,"_DOWN":21157,"hist":21158,"ĠAdvanced":21159,"Ġrewards":21160,"actors":21161,"Ġsilence":21162,"Ġmyth":21163,"Ġneur":21164,"Ġauction":21165,".GetString":21166,"eks":21167,"(project":21168,"ĉmsg":21169,"ĉoutput":21170,"Ġcomplaints":21171,",S":21172,"Ġtbl":21173,"Ġ,ĊĊ":21174,"riors":21175,"ahren":21176,"Ġlawyers":21177,"redux":21178,"_symbol":21179,"offee":21180,"_RESULT":21181,"(Name":21182,"UTC":21183,".currentTime":21184,"Ġorganis":21185,".arg":21186,"Ġminim":21187,"wick":21188,"Ġreceives":21189,"Balance":21190,"Ġspeaks":21191,"ĠDays":21192,"ĠBelow":21193,"tipo":21194,"Present":21195,"Ġreserv":21196,"hp":21197,"Ġrit":21198,"_RIGHT":21199,"--)":21200,"Ġchairman":21201,"DIS":21202,"ĠBOOST":21203,"Ġexperiments":21204,"__);Ċ":21205,"Ġstamp":21206,"Ġfert":21207,"Ġfond":21208,"Ter":21209,"elve":21210,"uren":21211,"+i":21212,"endency":21213,"Ġvirtually":21214,"...\"":21215,"ï½ŀ":21216,"-cent":21217,"_unique":21218,"Ġpricing":21219,"mic":21220,"RESH":21221,"Ġ:::":21222,"Ġannotation":21223,"ĠCircle":21224,"ongodb":21225,"itas":21226,"Ġ%(":21227,"(component":21228,"Ġоб":21229,"(port":21230,"-hour":21231,".obj":21232,"LBL":21233,"Ġjury":21234,"GBT":21235,"Ġspy":21236,"ĠProfessional":21237,"Ġ\"\";ĊĊ":21238,"Ġstriking":21239,"Ġdiscrimination":21240,"Ġpays":21241,"lict":21242,"entes":21243,"Ġthrowing":21244,"ĠPlugin":21245,"(def":21246,"ĠRuntimeException":21247,"ĠMigration":21248,"Ġdic":21249,"bag":21250,"onia":21251,"Ġcorruption":21252,"(Map":21253,"Ġprz":21254,".dto":21255,"Ġacquire":21256,"StateToProps":21257,"Ġloving":21258,"ож":21259,"_pattern":21260,"Ġemotions":21261,"Ġpublisher":21262,"_be":21263,"Ġcouples":21264,"oj":21265,"ĠChart":21266,"Ġtrop":21267,".tool":21268,"Ġestablishment":21269,"Ġdol":21270,"Ġtower":21271,"Ġlane":21272,"ĠSydney":21273,"Ġfilling":21274,"claimed":21275,"Ġdialogue":21276,"Ġconvention":21277,"booking":21278,"parency":21279,"æ±":21280,"ĠGeneric":21281,"\\Schema":21282,"Ġranges":21283,"/ch":21284,"Ġpanels":21285,"Ġruled":21286,"çĶŁ":21287,".ts":21288,"_sets":21289,"Ġcleanup":21290,"Previous":21291,"ĠAnimal":21292,"($(":21293,"ĠAve":21294,"ollar":21295,"_eval":21296,"ĉName":21297,"(tree":21298,"Ġ\"]":21299,"Ġduties":21300,"='/":21301,"Clicked":21302,"Ġdifferently":21303,"ĠClark":21304,"Ġdit":21305,"ologists":21306,"Ġsynd":21307,"Ġsends":21308,"-known":21309,"kb":21310,"ĠModal":21311,"itative":21312,"Ġracing":21313,"Ġhighlights":21314,"ĠSimon":21315,"ĠCaptain":21316,"ä¿¡":21317,"ĠCB":21318,"contin":21319,"aran":21320,"Ġphysics":21321,"retty":21322,"etal":21323,".md":21324,"axios":21325,"Ġspeakers":21326,"Ġprep":21327,"Ġawarded":21328,"ì§Ģ":21329,"ĠCorn":21330,"ĠNature":21331,"UDIO":21332,"Ġproj":21333,"-pre":21334,"[u":21335,"Features":21336,"ĠisEqual":21337,"Binary":21338,"sig":21339,"Ġconfusion":21340,"ĠHat":21341,"Ġktó":21342,".configure":21343,"MON":21344,"/edit":21345,"_Add":21346,",true":21347,"Ġcli":21348,"ErrorMessage":21349,"-loader":21350,"Dimensions":21351,"ultiply":21352,"Ġ{!!":21353,"ĠSqlCommand":21354,"Ġspoken":21355,"Ġpics":21356,"Ġtoy":21357,"(Key":21358,"ĠLoop":21359,"ب":21360,"EATURE":21361,"inction":21362,"_setup":21363,"wrapper":21364,"Ġtong":21365,"cular":21366,"Opt":21367,".Pl":21368,"=\",":21369,"(length":21370,"umn":21371,"Ġchrom":21372,"Ġsevent":21373,"ĠIllegalArgumentException":21374,"ĉstart":21375,"Ġbegun":21376,"CEPTION":21377,"dataset":21378,"ĠFailed":21379,"cols":21380,"Ġknee":21381,"imore":21382,".splice":21383,"shell":21384,"iggers":21385,"Ġthemes":21386,"ĠDJ":21387,"ĠAssistant":21388,"-$":21389,"Maybe":21390,"Ġordering":21391,"ĠIntelligence":21392,"ĠMassachusetts":21393,"Ġfailing":21394,"elson":21395,"Great":21396,"=i":21397,".rest":21398,"Ġinvite":21399,"-disable":21400,".GroupBox":21401,"âĢĻest":21402,"Ġtackle":21403,"gv":21404,"etter":21405,"Ġ),čĊ":21406,"_rules":21407,".warn":21408,"functions":21409,"ĠChristians":21410,"Ġbacked":21411,"Ġslider":21412,"Ġenjoying":21413,"nest":21414,"Ġhij":21415,"_ms":21416,"//*":21417,"Annotations":21418,"ĠVariables":21419,"":21620,"cycle":21621,"ĠBull":21622,"paths":21623,"Ġunp":21624,"ĠviewDidLoad":21625,"_Model":21626,"ĠassertTrue":21627,"Ġrated":21628,"Decl":21629,"verted":21630,"ĠDat":21631,"brew":21632,"Ġpointing":21633,"Ms":21634,"ĠPointer":21635,")'":21636,"_non":21637,"ĠSEC":21638,"Ġyeah":21639,"gency":21640,"initialize":21641,"fly":21642,"[pos":21643,",g":21644,"Tele":21645,"Ġjoke":21646,"Ġclause":21647,".findById":21648,"enes":21649,"(instance":21650,"£":21651,"Ġslic":21652,"_home":21653,"Ġ*/}Ċ":21654,"_pages":21655,"(service":21656,"RP":21657,"ĠAmong":21658,".getCurrent":21659,"ãĤ¹":21660,"Ġslee":21661,"=[Ċ":22071,"oler":22072,"Ġlibert":22073,"Ġ`Ċ":22074,"Ġwenn":22075,"lated":22076,"Ġimmune":22077,"(Node":22078,"ĠProblem":22079,"ĠAbs":22080,"logs":22081,"Ġ../":22082,"ĠADC":22083,"Ġ}}\">Ċ":22084,">');Ċ":22085,"=b":22086,"ĠWind":22087,"lahoma":22088,"Ġallocate":22089,"orian":22090,"Ġprescription":22091,"-quality":22092,"ĠMayor":22093,"inely":22094,"endforeach":22095,"ĠComplex":22096,"kom":22097,"TY":22098,"]].":22099,".Style":22100,"_many":22101,"','$":22102,"Ġbarrier":22103,"ĠFetch":22104,"ĠMarvel":22105,"Ġresist":22106,"ого":22107,"bidden":22108,"ĠRunnable":22109,":false":22110,"Ġbuilds":22111,"ĠStage":22112,"Ġdub":22113,"empo":22114,".site":22115,";ĊĊĊĊ":22116,"ĠDenver":22117,"Ġrevel":22118,"Ġtriggered":22119,"Ġdice":22120,"_fail":22121,"Ġgc":22122,"ĉX":22123,"ĠThrowable":22124,".router":22125,"ĠRevolution":22126,"ÑĢа":22127,"_NON":22128,"Ł¥":22129,"Ġelder":22130,"Ġabroad":22131,"Ġе":22132,"ĠAdult":22133,"blr":22134,"glyphicon":22135,"Ġpromoting":22136,"Ġiz":22137,"ĠSolid":22138,"_loader":22139,"early":22140,".enabled":22141,"-edit":22142,"ĠUL":22143,"_play":22144,"ĠInterrupt":22145,"Ġadvantages":22146,"ucle":22147,"Ġmechanical":22148,".tableLayoutPanel":22149,"ĠWorking":22150,"Ġanonymous":22151,"Rating":22152,"igious":22153,"_phone":22154,".addActionListener":22155,"Ġfran":22156,"unden":22157,"Ġ*)&":22158,"_bool":22159,"ulative":22160,"Ġcone":22161,"ĠMult":22162,"Ġmö":22163,"ĠForward":22164,"]):Ċ":22165,"Ġconvinced":22166,"acted":22167,"ãģĵ":22168,"ĠConfigure":22169,"Ġceiling":22170,"Der":22171,"Ġpassengers":22172,"Groups":22173,"Ġsoccer":22174,"/W":22175,"aviors":22176,"swith":22177,"ĠZone":22178,".Options":22179,"ĠMom":22180,"ieder":22181,"Arrays":22182,"Ġtreatments":22183,"Ġprotecting":22184,"fac":22185,"Ġpickle":22186,"ButtonItem":22187,"Ġblocking":22188,"strar":22189,"ò":22190,"ĠExport":22191,"Ġthrew":22192,"otta":22193,"ĠBASE":22194,".ws":22195,".LEADING":22196,"orderBy":22197,"_delay":22198,"ĠPu":22199,".dll":22200,"ĠChoose":22201,"Police":22202,"ĠBEGIN":22203,"boxes":22204,"Ġdiamond":22205,",l":22206,"Ġĉĉĉ":22207,"Ġcurious":22208,"tv":22209,"Ġerotische":22210,"ackages":22211,"ĉSet":22212,"Tick":22213,".border":22214,"staticmethod":22215,"Ġcher":22216,"invoice":22217,"Ġcru":22218,"Ġdefect":22219,"_metadata":22220,"relation":22221,"ikan":22222,"[N":22223,"(Qt":22224,"(Base":22225,"æģ¯":22226,"beat":22227,"ĠEmpty":22228,"ĉo":22229,"_shift":22230,"Ġregret":22231,"Those":22232,"Cent":22233,"ĠPortug":22234,"ĠIslands":22235,"ĠTIME":22236,"Management":22237,"-sp":22238,"ême":22239,"Ġnotion":22240,"unifu":22241,"PK":22242,"è¡Į":22243,"ĠCURLOPT":22244,"\\\"\\":22245,"UV":22246,"çº":22247,"dra":22248,"cou":22249,"=`":22250,"ĠDestroy":22251,"rp":22252,".cancel":22253,"GG":22254,"runtime":22255,"ĠVue":22256,"Ġprogressive":22257,"/services":22258,"Ġrunner":22259,"_FRAME":22260,".ToolStripMenuItem":22261,"Ġ','":22262,"delay":22263,"=utf":22264,"Ġscreening":22265,"Ġpulling":22266,"omas":22267,"Ġanth":22268,"-new":22269,"/local":22270,"ĠiPad":22271,"Ġtwitter":22272,"Ġdying":22273,"Ġheaven":22274,"ĠUInt":22275,"ĠSenator":22276,"Ġpresum":22277,"ĠWalker":22278,"Ġovercome":22279,"etection":22280,"Ġembarrass":22281,"China":22282,"Include":22283,"ROLL":22284,"ĠdataType":22285,"David":22286,"ร":22287,"lop":22288,"-month":22289,"Ġscar":22290,"ĠSafe":22291,"Ġ****************************************************************":22292,"Ġaccessories":22293,"Ġramp":22294,"_USE":22295,"Ġcontrad":22296,"))]Ċ":22297,"Ġprest":22298,"ĠHR":22299,"ĠRap":22300,"Ġusize":22301,"Ġcapability":22302,"Ġcort":22303,"-next":22304,"Ġburden":22305,"_reader":22306,"Ġ@@":22307,"regular":22308,"ĠKa":22309,"MAN":22310,"Ġastr":22311,"Ġ'')Ċ":22312,"Ġfed":22313,"Ġparsing":22314,"ĠYears":22315,"Ġbroker":22316,"\":{\"":22317,"Ġakt":22318,"Inventory":22319,"abeled":22320,"Ġargparse":22321,"*******Ċ":22322,"versation":22323,"Ġcord":22324,"ĠTi":22325,"Ġhopefully":22326,"Ġah":22327,"verb":22328,"Ġstolen":22329,".Entry":22330,"Ġexpecting":22331,"Orientation":22332,"Ġpowered":22333,"Ġpersist":22334,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":22335,"']);":22336,"')),Ċ":22337,"ĠCash":22338,"ĉitem":22339,"grades":22340,"ropol":22341,"basic":22342,"Ġ\");čĊ":22343,"Ġawards":22344,"(range":22345,"-all":22346,"ĠIBOutlet":22347,"ĠIndeed":22348,"----------------------------------------------------------------------------":22349,"Ġstomach":22350,"Ġflower":22351,"Ġsew":22352,"_times":22353,"avis":22354,"QString":22355,"ĠRoutes":22356,"_prot":22357,"Ġcomedy":22358,"Ġlogout":22359,"Ġwooden":22360,"Ġposter":22361,"piece":22362,".Join":22363,"ĠPok":22364,"celona":22365,"mutex":22366,";čĊčĊčĊ":22367,"Ġstrikes":22368,"Loaded":22369,")arg":22370,"esa":22371,"United":22372,"Ep":22373,"PELL":22374,"ĠAtlantic":22375,"ullet":22376,"apple":22377,"Ġsettled":22378,"acon":22379,"Ġprinter":22380,"ĠGC":22381,"å®ļ":22382,"Ġrendered":22383,",âĢĻ":22384,"heit":22385,"social":22386,".ge":22387,"ĠRick":22388,"ĠUtah":22389,"got":22390,"onical":22391,"ĠScroll":22392,"ĠSciences":22393,"Ġjug":22394,"Ġampl":22395,"enti":22396,"LEFT":22397,"Ġtabs":22398,"Ġenormous":22399,".getKey":22400,"locate":22401,".EX":22402,".storage":22403,".We":22404,"Ġtoast":22405,"ĠAdditionally":22406,"ĠNOW":22407,"_UPDATE":22408,"Ġtransferred":22409,"tha":22410,".Display":22411,"_ui":22412,"IDEO":22413,"Ġmeaningful":22414,"ĠMoscow":22415,",this":22416,"ĠVictoria":22417,"æĶ¹":22418,"ĠÐŁ":22419,".stack":22420,"ĠBarn":22421,"paredStatement":22422,":string":22423,"Ġbij":22424,"ĠSTATE":22425,"Ġemployers":22426,"ĉinput":22427,"(|":22428,"Ġlex":22429,"invoke":22430,"ĉnum":22431,"++,":22432,"atial":22433,"orses":22434,"Ġfork":22435,"_txt":22436,"ĠAntonio":22437,"Ġ(<":22438,"averse":22439,"Ġdevast":22440,"ãĢĢ":22441,".Dec":22442,"ĠGard":22443,"/ui":22444,".%":22445,"tri":22446,"Ġrolled":22447,"ValuePair":22448,"itten":22449,"ĠTher":22450,"Ġvrou":22451,"ĠFlow":22452,"ĠFinance":22453,"ĠComb":22454,"HC":22455,".setVisible":22456,"isl":22457,"Ġpk":22458,"Ġupset":22459,"(raw":22460,"ĠVice":22461,"eatures":22462,"ĠLang":22463,"Looking":22464,"ĠAST":22465,"Ġtrips":22466,"ĠJustin":22467,"browser":22468,"=\"'.$":22469,".vertices":22470,"-co":22471,"}/{":22472,"Ġ?,":22473,"ĠDomin":22474,"ĠBelg":22475,"\"<":22476,"Ġsuppose":22477,"addy":22478,"Ġwalks":22479,"ERRU":22480,"_filters":22481,"Preferred":22482,"scene":22483,"еÑģ":22484,"ĠAffairs":22485,"Ġ\"#{":22486,"ĠonSubmit":22487,"Ġstocks":22488,"/view":22489,"gree":22490,"-get":22491,"hit":22492,"Jo":22493,".getC":22494,"Initialized":22495,"ÑĤи":22496,"cuts":22497,"(Type":22498,"ĠAgreement":22499,"ĠVietnam":22500,"Ġ/*!":22501,"Ġpizza":22502,"-view":22503,"_em":22504,"Ġlhs":22505,"Ġmuy":22506,"ĠIdent":22507,"ĠFriends":22508,"Ġabund":22509,"_AD":22510,".timestamp":22511,"-'":22512,"Ġduplicate":22513,"Ġhunting":22514,"Ġregulatory":22515,"iao":22516,"amous":22517,"ĠEntertainment":22518,"[A":22519,"iatric":22520,"_CLIENT":22521,"ĠKids":22522,"/pkg":22523,"Break":22524,")));ĊĊ":22525,"ĠShape":22526,"Ġrelating":22527,"Interrupt":22528,"ableOpacity":22529,"embre":22530,"Ġmystery":22531,"Ġjournalists":22532,"ritable":22533,".Link":22534,"Ġstopping":22535,"CRET":22536,".DB":22537,"Ġpopularity":22538,"Ġgew":22539,"Ġimpr":22540,"setValue":22541,"FLAG":22542,"ĉmax":22543,"Ġbake":22544,"wy":22545,"ĠEconomic":22546,"Ġencontr":22547,"Ġfname":22548,"/de":22549,"Rank":22550,"Ġbugs":22551,".sm":22552,"Ġmedian":22553,"DOWN":22554,"ĠSure":22555,"AtIndex":22556,"ĠDick":22557,"Ġ(__":22558,".delta":22559,"Fr":22560,"Ġsuggesting":22561,"ĠRecyclerView":22562,",e":22563,"START":22564,"/****************************************************************************":22565,"xford":22566,"Ġreceipt":22567,"CLAIM":22568,"readonly":22569,"Ġengaging":22570,"Ca":22571,"asma":22572,"Ġensuring":22573,"English":22574,"ĠVancouver":22575,"hyth":22576,"Ġpurchasing":22577,"ĠPI":22578,".word":22579,"(sp":22580,".home":22581,":def":22582,"Ġgig":22583,"ĠVe":22584,"forum":22585,"ĠMitch":22586,"Bay":22587,"_FL":22588,"Ġsoll":22589,"_columns":22590,"Ġminority":22591,"bird":22592,"Ġhanded":22593,"SSL":22594,"STAT":22595,"Ġnervous":22596,"ĥ½":22597,"ĠfilePath":22598,"CREATE":22599,"Aw":22600,"Ġpens":22601,"seed":22602,"ĠCompute":22603,"olk":22604,"ĠAsset":22605,"reach":22606,"'),čĊ":22607,"navigation":22608,"LF":22609,"/util":22610,"ĠPub":22611,"ĠâĶ":22612,"cion":22613,"##Ċ":22614,"III":22615,"TagName":22616,"Ġamid":22617,"permission":22618,"ifiable":22619,"xFFFFFFFF":22620,"ни":22621,".Buffer":22622,"_irq":22623,"dark":22624,"Ġretval":22625,".fire":22626,"production":22627,".listen":22628,"ĠWeather":22629,"Ġbuyers":22630,".ne":22631,"erp":22632,"ĠPent":22633,"Ġwelfare":22634,"ĠpageSize":22635,"ĠStadium":22636,"erta":22637,"Ġlev":22638,"ampa":22639,"Pager":22640,"Ġcharging":22641,"ĠNetflix":22642,"|null":22643,"_random":22644,".xpath":22645,"Ġstere":22646,"ĠISIS":22647,"ponses":22648,"(loc":22649,"eyond":22650,"ĠOfficial":22651,"ĠMaryland":22652,"DataType":22653,"_par":22654,"{},":22655,"ĠEnjoy":22656,"_SHIFT":22657,"ĠAwards":22658,"_ENTRY":22659,"Ġseemingly":22660,"enticate":22661,"Ġhearts":22662,"_;ĊĊ":22663,"ĠHIV":22664,"Ġindivid":22665,"ĠFlag":22666,"_ctrl":22667,"ĠCallback":22668,",z":22669,"ĠGPU":22670,"ĉobj":22671,"ĠPhoenix":22672,"ĠBUS":22673,"Ġrubber":22674,"_AUTH":22675,"ĠSolutions":22676,"(location":22677,"Variables":22678,".setEnabled":22679,"_high":22680,"WO":22681,"Gesture":22682,"Ġretry":22683,"ĠobjectForKey":22684,"alloween":22685,"Ġmos":22686,"ĠCele":22687,"Ġikke":22688,"(cell":22689,"ĠMODE":22690,"rena":22691,"Ġdescribing":22692,"Ġphi":22693,"Ġrd":22694,"Ġdeserve":22695,"Ġwheels":22696,"å¸Ĥ":22697,"Ġcritics":22698,"Namespace":22699,"ĠFra":22700,"ĠĊĊĊĊ":22701,"Ġalla":22702,"Ġrequiring":22703,"æľŁ":22704,"utation":22705,"Ġdelayed":22706,"Ġadministrative":22707,"Ġbay":22708,".hidden":22709,"Tex":22710,"Ġboundaries":22711,"Ġ]);ĊĊ":22712,"ĠFollowing":22713,"~/":22714,"Fi":22715,"_conv":22716,"_TITLE":22717,"Ġdesde":22718,"ICollectionView":22719,"Alias":22720,"Ġbite":22721,"patient":22722,"_COMMAND":22723,"Completed":22724,"ĉelif":22725,"(<":22726,"Business":22727,"ĠPool":22728,"Ġpursue":22729,"ĠBan":22730,"_steps":22731,"_DECL":22732,"umble":22733,"Ġcombo":22734,"ĠLayer":22735,".xr":22736,"Ġdup":22737,"---------":22738,"Ġmodifier":22739,"rob":22740,"rez":22741,"Ġathletes":22742,"Used":22743,"wear":22744,"Ġlegitimate":22745,"Ġ\"ĊĊ":22746,"Ġhv":22747,"Std":22748,"ĠHold":22749,"Ġsurviv":22750,"ĠAlliance":22751,"ĠEarly":22752,"Behavior":22753,"(font":22754,"/libs":22755,"Ġrectangle":22756,"Ġsinger":22757,"Ġamp":22758,"EqualTo":22759,"Ġ\".\"":22760,"Ġgirlfriend":22761,"å±":22762,"linear":22763,"observ":22764,"Ġpiù":22765,"Ġcomplement":22766,"WithValue":22767,"(password":22768,"take":22769,"Blank":22770,"ĠCompar":22771,"'\",":22772,"_policy":22773,"mongoose":22774,"_FAILED":22775,".report":22776,"Ratio":22777,".PerformLayout":22778,"usable":22779,"mers":22780,"_render":22781,"PEED":22782,"Ġlesb":22783,"ĉE":22784,"_tool":22785,"Ġladies":22786,"оÑģ":22787,"))))Ċ":22788,";;;;":22789,".dot":22790,"Ġnest":22791,"peak":22792,"ukkit":22793,"eca":22794,"_SW":22795,"Ġ&(":22796,"ĠOklahoma":22797,"Ġbanking":22798,"ĠNintendo":22799,"Ġreproduce":22800,"_elements":22801,"_mac":22802,"proxy":22803,"Ġremarkable":22804,"}/${":22805,"Ġouts":22806,".hasNext":22807,"MODE":22808,"Ġanime":22809,".conn":22810,"Unique":22811,"Dom":22812,"Ġimportantly":22813,"itty":22814,"Ġjuice":22815,"Tw":22816,"ĠPartners":22817,"Ġattacking":22818,"Ġportable":22819,"amiento":22820,".PictureBox":22821,".gen":22822,"Ġoptimal":22823,"Ġrecre":22824,"Ġjournalist":22825,"ĠExtract":22826,"ĠMoreover":22827,"ĠmarginTop":22828,".Ap":22829,"Ġfiring":22830,"NaN":22831,"ĉtemplate":22832,"ад":22833,".En":22834,"Ġdefence":22835,"ĠTel":22836,"ilen":22837,"jan":22838,"=data":22839,"ĠUrl":22840,"ĠReuters":22841,"(total":22842,"ĠFifth":22843,"Ġessays":22844,"Ġinterpretation":22845,"Ġcharity":22846,"ĠRules":22847,"Ġsubsection":22848,"styled":22849,"azer":22850,"lags":22851,"LIST":22852,"Ġuploaded":22853,"Ġtrash":22854,"Ġregistr":22855,"Ġseller":22856,">';čĊ":22857,"ĠstartTime":22858,"çĻ":22859,"sy":22860,"(HttpServletRequest":22861,"Ġtrap":22862,"GC":22863,"Ġembedded":22864,"Ġsurrounded":22865,"imits":22866,"TX":22867,"ylinder":22868,"ĠFal":22869,"Ġsentences":22870,"ĠJa":22871,"IFICATION":22872,"weapon":22873,"ovation":22874,"Ġcoat":22875,"Ġinterpol":22876,"Ġlips":22877,"ĠKy":22878,"Ġvectors":22879,"_am":22880,"Ġintake":22881,".world":22882,"Ġinbox":22883,"ĠMAC":22884,"_ab":22885,"(nameof":22886,"Ġentert":22887,"Ġgathering":22888,"ĠSIM":22889,"++.":22890,"nya":22891,"'}}":22892,"ĠUPDATE":22893,"Ġpac":22894,"(html":22895,"ĠSant":22896,"iating":22897,"ĠIdeas":22898,"Ġspray":22899,"ĠHart":22900,"Ġverification":22901,"adesh":22902,"/modules":22903,"ĠMind":22904,"ĠSizedBox":22905,"Ġshelter":22906,"Ġheroes":22907,"atty":22908,"Ġcertified":22909,"sj":22910,"Ġêtre":22911,"ÅĤo":22912,"Ġpublishing":22913,"ĠMalays":22914,".getUser":22915,"ĠProvider":22916,"ĠLinkedList":22917,"ĠBor":22918,"ROUND":22919,"did":22920,"tain":22921,"pire":22922,"ĠJenn":22923,"tel":22924,"ande":22925,"_front":22926,"ĠMcG":22927,"TestMethod":22928,"à¸Ń":22929,"Ġoccasionally":22930,"ĠWales":22931,"Ġexercises":22932,"ĠÐĴ":22933,"-plus":22934,"Ġvalidator":22935,"Ġprayer":22936,"LATED":22937,"_author":22938,"Ġlabour":22939,"++Ċ":22940,"-equiv":22941,"ĠGPL":22942,"Ġfacebook":22943,"simple":22944,"gly":22945,"Processor":22946,"ipy":22947,"Ġ*>":22948,"Ġcleared":22949,"ĠPush":22950,"Ġpenis":22951,"Structure":22952,"lij":22953,"ĠMorgan":22954,"Ġhandful":22955,"\".Ċ":22956,"|\\":22957,"Ġ********************************":22958,"ĠAqu":22959,"_IC":22960,".loads":22961,"Ġmeter":22962,"ĠMarine":22963,"::{":22964,"ĠTS":22965,"ĠArrays":22966,".Title":22967,"GRAM":22968,"termin":22969,"Ġcoinc":22970,"Else":22971,"_states":22972,"-run":22973,"members":22974,"astro":22975,"ĠonPress":22976,"Ġbeings":22977,"Ġabandoned":22978,"Ġtaxp":22979,"owners":22980,".mode":22981,"Ġdiagnosis":22982,"Ġ_Ċ":22983,"ĠKnight":22984,"ĉA":22985,"Ġobserve":22986,"),'":22987,"!\")Ċ":22988,"ĠPara":22989,"Ġvariation":22990,"(False":22991,"ĠAnti":22992,"Ġgri":22993,"Ġhomeless":22994,"?v":22995,"Ġbez":22996,".Server":22997,"release":22998,"ĠPatri":22999,"Ġchars":23000,"Ġranking":23001,"activation":23002,"Ġwides":23003,"qr":23004,".Sql":23005,"acular":23006,"ĠBot":23007,"_sync":23008,"Ġhappiness":23009,"Ġvolunteers":23010,"Ġsits":23011,"/<":23012,"[e":23013,"(fileName":23014,"Ġcapac":23015,"ĠMaria":23016,"father":23017,"Ġgram":23018,"*i":23019,"Ġcaso":23020,"_draw":23021,"ĠRaw":23022,"ĠIterator":23023,"ĠPadding":23024,"PD":23025,"BOX":23026,"ĠSPECIAL":23027,"Ġfecha":23028,"Ġvide":23029,"ĠLeader":23030,"以":23031,"$(\".":23032,"Ġdiameter":23033,"Ġmild":23034,"Ġrocks":23035,"appings":23036,"directory":23037,".flush":23038,"ĠJess":23039,"UNIT":23040,"ĠPear":23041,"Ġmandatory":23042,"Sur":23043,"qt":23044,"Ġstreams":23045,"Ġcooperation":23046,"ĠSac":23047,"Ġcheaper":23048,"ĉch":23049,"animation":23050,"fare":23051,"(height":23052,"(True":23053,"NY":23054,"Ġwrest":23055,"Ġpolls":23056,"Ġencountered":23057,"ĠMarketable":23058,"_PASSWORD":23059,"_SELECT":23060,"ĠArabia":23061,"_clock":23062,"Ġvoy":23063,"Ġиз":23064,"Ġstir":23065,"isible":23066,"-effect":23067,".created":23068,"Ġtoys":23069,"ĠTradable":23070,"Ġrust":23071,"Ġstrcpy":23072,"_timestamp":23073,"Ġtalented":23074,",null":23075,"ĠJobs":23076,"ĠPortland":23077,"Ġweakness":23078,"Throw":23079,"ĠAngel":23080,"ä¿®":23081,"Ġuncert":23082,"ï¼īĊ":23083,"ĠìĿ´":23084,"Which":23085,"Ġ[-]:":23086,"Something":23087,"Ġconvicted":23088,"kle":23089,"edium":23090,"Ġbranches":23091,"Ġbases":23092,"ç®":23093,"Ġcomplexity":23094,"ĠFig":23095,".reshape":23096,"$db":23097,"_CONST":23098,"ĠTes":23099,".runtime":23100,"Ġdeny":23101,"ĠBSD":23102,"Ġkr":23103,"hatt":23104,"ĠStatic":23105,"Ġuniversities":23106,"Replace":23107,"Ġdrove":23108,"Ġadoles":23109,"_plugin":23110,"ĠLGBT":23111,"Ġtex":23112,"duction":23113,"EDI":23114,"ĠTed":23115,"_URI":23116,"Ġreception":23117,"arten":23118,".Single":23119,"rice":23120,"scious":23121,"_bg":23122,"Ġwages":23123,"ĠServlet":23124,"UILayout":23125,"Ġformatted":23126,".Mod":23127,"',Ċ":23174,"Ġexpanding":23175,"ĠHamilton":23176,"ĠContrib":23177,".Tables":23178,"Activ":23179,"HH":23180,"ocommerce":23181,"_;":23182,"Ġamongst":23183,"owing":23184,"ĠCold":23185,"APH":23186,"Ġpsychological":23187,"_tensor":23188,"Ġpackaging":23189,"ĠSweden":23190,"Ġpare":23191,"Ġaggregate":23192,"Ġmoderate":23193,"_hand":23194,"Ġdesignated":23195,"Ġdrum":23196,"ĠgetUser":23197,"ĠCreek":23198,"_scope":23199,"ĠTransfer":23200,"ĠMarg":23201,"Ġfighters":23202,"Wnd":23203,"ĠSel":23204,"ĠLaunch":23205,"Ġemerging":23206,"iframe":23207,"ĠAdditional":23208,"Ġfears":23209,"Ġsatellite":23210,"_:":23211,"Ġdisposing":23212,"GetValue":23213,"HttpPost":23214,"ATIVE":23215,"ulary":23216,"Views":23217,"Ġattending":23218,"ĠTennessee":23219,"ĠMission":23220,"Ġmedication":23221,"ĠWy":23222,"ĠAnna":23223,"ع":23224,"ĠVertex":23225,".types":23226,"Organ":23227,".DataGridViewTextBoxColumn":23228,"ĠRS":23229,"Ġtempo":23230,"(App":23231,"VersionUID":23232,".point":23233,"ĠDutch":23234,"Hours":23235,"LU":23236,"Ġquoted":23237,".builder":23238,"ĠPerfect":23239,"ĠAlways":23240,"_two":23241,"Ġexclusively":23242,"ĠCra":23243,"ificar":23244,"ĠAWS":23245,"ingham":23246,"complex":23247,"kernel":23248,"Ġgravity":23249,"Ġwi":23250,"Ġoverview":23251,"ĠWant":23252,"ĠWP":23253,"(sh":23254,".rotation":23255,"States":23256,"ĠTeen":23257,"_components":23258,"ìĪĺ":23259,"Received":23260,"Ġlyrics":23261,"rites":23262,"ĉĉĉĉĉĠ":23263,"-American":23264,"[num":23265,"/python":23266,"ĠUART":23267,"Ġapple":23268,"ĠJonathan":23269,"Ġmomentum":23270,"ั":23271,"Ĥ¹":23272,"Ġmich":23273,"andra":23274,"Ġbiological":23275,"ĠMens":23276,"Ġ%%":23277,"elsea":23278,"ĠMexican":23279,".randint":23280,"Ġtale":23281,"ĠValidate":23282,"Ġdefeated":23283,".htm":23284,"Ġcopper":23285,"=/":23286,"cosystem":23287,"Ġrip":23288,"decimal":23289,".VISIBLE":23290,"ĠTa":23291,"ĉĉĉĉĉĉĉĉĉĉĉĉĉĉ":23292,"Ġdownloaded":23293,"environment":23294,"Ġnomine":23295,"building":23296,"ĠSpot":23297,"ipheral":23298,"Ġalto":23299,"quet":23300,"ĠFT":23301,"/get":23302,"/master":23303,"WIN":23304,"åħĥ":23305,"West":23306,"argc":23307,"Ġproducers":23308,"ĠMuch":23309,"_storage":23310,"credit":23311,"CONT":23312,"Ġvet":23313,"Ġvoices":23314,"('',":23315,"Ġinstruments":23316,"ĠMSG":23317,"esse":23318,"repository":23319,"omics":23320,"Ġdealer":23321,"Still":23322,"Ġbanner":23323,"ascii":23324,"Ġremarks":23325,"[js":23326,"Ġshorter":23327,"gulp":23328,"Ġmyster":23329,"Ġkun":23330,"ĠBird":23331,"Ġtiene":23332,"nut":23333,"ĠUm":23334,"Ġwise":23335,"Yeah":23336,"INESS":23337,"_begin":23338,"-heading":23339,"Course":23340,"ĠčĊčĊ":23341,"ombie":23342,"graded":23343,"ĠGPS":23344,"Ġże":23345,"Fit":23346,"caption":23347,"ön":23348,"/image":23349,"lia":23350,"(mod":23351,"Ġleak":23352,"enza":23353,"/H":23354,"ĠHappy":23355,"Dist":23356,"nx":23357,"ĠGovernor":23358,"(last":23359,"teacher":23360,"ĠSent":23361,"support":23362,"jectory":23363,"ĠÙħ":23364,"Registration":23365,"ĠGray":23366,",false":23367,"Ġadjusted":23368,"(settings":23369,"'Ċ":23431,"-fold":23432,"æĬ":23433,"ĠBetter":23434,"Ġ\"\\<":23435,"spacing":23436,"Ġfurnished":23437,"oser":23438,"]}Ċ":23439,"Ġ$\"":23440,"pull":23441,".Post":23442,"(ip":23443,"Ĺı":23444,".front":23445,"nte":23446,"ĠFM":23447,"guid":23448,"Ġnegotiations":23449,"agonal":23450,"Ġtremend":23451,"ungeon":23452,"Adv":23453,"carousel":23454,"ÃŁe":23455,"_DESC":23456,"Ġhammer":23457,"áºŃ":23458,"ĠĠĠĠĠĠĠĠĊĊ":23459,"-core":23460,"-service":23461,"Ġcorners":23462,"ĠSF":23463,"pred":23464,">A":23465,"ĠJLabel":23466,"Ġromantic":23467,"Ġtestimony":23468,"osc":23469,"ĠGeneration":23470,"asures":23471,"_internal":23472,"Ġprints":23473,"Ġ])Ċ":23474,"ĠCleveland":23475,"repo":23476,"Disc":23477,"Ġ\">Ċ":23478,"����":23479,"Ġnearest":23480,"_tb":23481,"(require":23482,"EOF":23483,"-child":23484,"Ġbudd":23485,".XtraEditors":23486,"alties":23487,"\\\":\\\"":23488,"Words":23489,"Ġlocally":23490,"Ġpurchases":23491,"Drawer":23492,"extract":23493,"Ġexecut":23494,"}'.":23495,"userdata":23496,"Ġfocuses":23497,"-minute":23498,"ĠPublish":23499,"ogo":23500,"Ġmountains":23501,"Bot":23502,"}>{":23503,"Ġtension":23504,"rod":23505,"mesh":23506,"Ġtransformed":23507,",R":23508,"()}Ċ":23509,".long":23510,"Ġgorgeous":23511,"ĠSchedule":23512,"Ġoldest":23513,"Ġsubprocess":23514,"(IN":23515,"yect":23516,"ĠCooper":23517,"arness":23518,"ĠMonitor":23519,".part":23520,"ĠNBC":23521,"Ġcotton":23522,"Ġhol":23523,"Ġrgba":23524,"ĠBio":23525,"Continue":23526,"Pod":23527,"Ġparticipating":23528,"clusions":23529,"(ByVal":23530,"ì":23531,"ĠHOW":23532,"_setopt":23533,"Ġaccompanying":23534,"aton":23535,"Ġ/\\":23536,"ĠAuthentication":23537,"ién":23538,"ĠBarack":23539,"/*.":23540,"Ġeager":23541,"ĠCancel":23542,"$":23586,"OLEAN":23587,"OKIE":23588,"IBILITY":23589,"UAGE":23590,"ĠSurvey":23591,"Ġresign":23592,"wing":23593,"Ġsecrets":23594,"Ġchips":23595,"JSONObject":23596,"Desktop":23597,"_SYMBOL":23598,"(resource":23599,"ĠĊ":23600,"Ġnewest":23601,"uli":23602,"Ġdesert":23603,"Ġdip":23604,"ĠPow":23605,"Ġequation":23606,"Ġpossibilities":23607,"ĠFed":23608,"osph":23609,"Ġ[%":23610,"Ġbubble":23611,"etherlands":23612,"Ġcement":23613,".auto":23614,"_AN":23615,"âĢĻ.":23616,"selection":23617,"ĠBond":23618,"Den":23619,"-O":23620,".getType":23621,".Window":23622,"pres":23623,"Ġswinger":23624,"\"})Ċ":23625,"Ġpip":23626,"Ġmice":23627,"Ġcompound":23628,"-plugin":23629,"iko":23630,"Ġcenturies":23631,"icular":23632,"-inline":23633,"ĉkey":23634,">\\<":23635,"ENSION":23636,"Ġ[čĊ":23637,"Ġprecisely":23638,"Ġété":23639,"ĠPast":23640,"ĠCambridge":23641,"-full":23642,"Ġanalyze":23643,"ĠSteven":23644,"Ġnem":23645,"due":23646,"oren":23647,"Ġmuscles":23648,"ijing":23649,"/-":23650,"ĠKennedy":23651,"RM":23652,"ossible":23653,"Ġactress":23654,"Ġdolor":23655,"å½ķ":23656,"Need":23657,".toggle":23658,"ĠRace":23659,"wers":23660,".material":23661,"ĠDue":23662,"ĠPel":23663,"#print":23664,"Ġindependence":23665,"exus":23666,"Shadow":23667,"Ġencoder":23668,"(level":23669,"ĠSwift":23670,".doc":23671,"_selection":23672,"ĠserialVersionUID":23673,"Labels":23674,"Ġperformances":23675,".Tag":23676,"ĠNHL":23677,"izen":23678,"/UIKit":23679,"_CONTROL":23680,"Ġearnings":23681,"ĠAlt":23682,"_HANDLE":23683,"Ctx":23684,"Ġpersu":23685,"Ġtran":23686,"ç¨":23687,"_CHANNEL":23688,"Ġsatisfaction":23689,"ĠGP":23690,"iox":23691,"mitt":23692,"lando":23693,"Ġpig":23694,"inals":23695,"ência":23696,"Surface":23697,"ĠUUID":23698,"Ġbeneficial":23699,"Ġsequences":23700,"ĉmemset":23701,"Ġmagical":23702,"«":23703,"Ġworn":23704,"ASC":23705,"popup":23706,"COMP":23707,"_before":23708,"eness":23709,"Ui":23710,"Les":23711,".require":23712,".Serializable":23713,"addGap":23714,"Ġauthorization":23715,".pyplot":23716,"urray":23717,"latitude":23718,"frames":23719,"ajs":23720,"Ġcompass":23721,"Ġobservations":23722,"_sup":23723,".environ":23724,"Ġtriple":23725,"ĠRuby":23726,"Ġdrain":23727,"_FILTER":23728,"San":23729,"UMP":23730,"NullException":23731,"ĠGab":23732,"owe":23733,"ĠTurkish":23734,"_sequence":23735,"ĠGrant":23736,"uela":23737,"Ġwo":23738,"Ġcube":23739,"iq":23740,"Ġdisorders":23741,"Ġextraordinary":23742,"Ġctrl":23743,"ĠSeq":23744,"entr":23745,"Ġsanctions":23746,"utsch":23747,"Reports":23748,"Ġinherit":23749,"Period":23750,"Ġphotography":23751,"ĠFramework":23752,"Ġspecialist":23753,"Ġ?ĊĊ":23754,"_selected":23755,".Player":23756,"Ġallocation":23757,"(account":23758,"Ġstructural":23759,"vable":23760,"-offset":23761,".AppCompatActivity":23762,"ам":23763,".AddWithValue":23764,"Ġicons":23765,"Ġshutdown":23766,"_low":23767,"ĠCompare":23768,"ĠCe":23769,"=head":23770,"lam":23771,".predict":23772,"_DEC":23773,"ĠSleep":23774,"ĠGratis":23775,"Ġsuggestion":23776,"ĠDEL":23777,"caff":23778,"avirus":23779,"Nothing":23780,"ŀĭ":23781,"Ġwidespread":23782,"Ġmechanisms":23783,"ĠtextAlign":23784,"occup":23785,"ĠRail":23786,":NS":23787,"Ġfiber":23788,"Ġmk":23789,"Ġvintage":23790,"-long":23791,".reduce":23792,".Entities":23793,"(record":23794,"Ġpleasant":23795,"FRING":23796,".Cells":23797,"OTT":23798,"ĉelseif":23799,"_confirm":23800,"ĠViewGroup":23801,"sym":23802,"Ġpray":23803,"Ġsuspected":23804,"Contains":23805,"Ġborders":23806,"ĠcomponentDid":23807,"ASSERT":23808,"Ġinfinite":23809,"-order":23810,"Ġhello":23811,"ĠGrade":23812,".currentTimeMillis":23813,"apolis":23814,"zh":23815,"ĉObject":23816,":\\\\":23817,"HO":23818,"valuation":23819,"Ġvocab":23820,"Ġcoupon":23821,"atabases":23822,".GetType":23823,"Learn":23824,"]=\"":23825,"ĠGary":23826,"otive":23827,"Ġash":23828,"Ġbib":23829,"XXXX":23830,"Ġbalanced":23831,"VALUE":23832,"ĠNat":23833,"_Ad":23834,"<":23976,"Ġfool":23977,"Ġesk":23978,".Null":23979,"ĠDies":23980,"_OUTPUT":23981,"_TYPED":23982,"Ġpainted":23983,"Ġsophistic":23984,"ĠBear":23985,"*n":23986,"_PACK":23987,"Ġdelivering":23988,"ĠCOUNT":23989,"åįķ":23990,"Ġjeg":23991,"-car":23992,"fname":23993,"Ġranging":23994,"ĠNeg":23995,"/******/":23996,"ĠCHAR":23997,"Ġultra":23998,"Grad":23999,"=t":24000,"Ġjudges":24001,"ĠDise":24002,"anners":24003,"Ġscal":24004,"_cal":24005,"ĠCONNECTION":24006,"_embed":24007,"(fn":24008,"ĠCraft":24009,"ĠPas":24010,"\")->":24011,".convert":24012,".resource":24013,"ĠSTATUS":24014,"ông":24015,"ĠTit":24016,"Ġclassroom":24017,"ĠArchitect":24018,"ĠKings":24019,"Ġsteady":24020,"/*!Ċ":24021,"ĠGene":24022,")\";Ċ":24023,"icia":24024,"stan":24025,"ĠConstruction":24026,"umper":24027,"wc":24028,"ĠCBS":24029,"inging":24030,"-party":24031,"(driver":24032,"MARK":24033,"Ġnested":24034,"eward":24035,"Ġdependency":24036,"Ġmales":24037,"ĠONE":24038,"ĠProduction":24039,"][$":24040,"ãĥ¼ãĥ":24041,"_LOAD":24042,"ĠBol":24043,"elry":24044,"łéϤ":24045,"ĠRequire":24046,"Ġplacing":24047,"xxx":24048,"CALE":24049,"Ġthumb":24050,"Choose":24051,"Ġprototype":24052,"VOID":24053,"Ġlesbian":24054,"Ġtraits":24055,"Sharp":24056,"Ġconsume":24057,"Truth":24058,"ĠactionPerformed":24059,"ĠEnvironmental":24060,"ĠDean":24061,"Ġestado":24062,"same":24063,"Ġnumeric":24064,"Ġtransit":24065,".Email":24066,"-side":24067,"_RUN":24068,"ĠVillage":24069,"_OPEN":24070,"è¦":24071,".rem":24072,"-warning":24073,"anya":24074,"PropertyChanged":24075,"Ġ(!_":24076,"(check":24077,"ilia":24078,"ĠSoft":24079,"steps":24080,"ĠMadrid":24081,"MemoryWarning":24082,"Ġhandlers":24083,"Ġexperiencing":24084,"Ġinspect":24085,"buttons":24086,"ReceiveMemoryWarning":24087,"chemy":24088,"Links":24089,"Ġurllib":24090,".SystemColors":24091,"ĠEigen":24092,"Ġpunishment":24093,":UIControl":24094,"bara":24095,"-set":24096,"Ġ}čĊčĊčĊ":24097,"Ġtolerance":24098,"Ġinterfaces":24099,".redirect":24100,"ighbors":24101,"csrf":24102,"_background":24103,".Utils":24104,"_HT":24105,"ĠInterest":24106,"imos":24107,"Ġgrants":24108,"Ġexamined":24109,"ÐĶ":24110,"Ġcf":24111,"forge":24112,"backs":24113,"ĠObjects":24114,"_sent":24115,".entry":24116,"ĠTHEN":24117,"ellido":24118,"cia":24119,",res":24120,"/stdc":24121,".nd":24122,"(Int":24123,"ĠAuthors":24124,"ĠAppCompatActivity":24125,"'{":24126,"Ġmedi":24127,"Music":24128,"igm":24129,"ceipt":24130,"Ġauss":24131,"Ġtargeting":24132,"ĠKeys":24133,"hn":24134,":]Ċ":24135,"Ġmineral":24136,"î":24137,".ca":24138,"omed":24139,"Ġsheets":24140,"Ġcamb":24141,"Ġdeadly":24142,".inject":24143,"(unit":24144,"ĠSelection":24145,".gms":24146,"(connection":24147,"Ġ$(\"":24148,"émon":24149,"ĠCurrently":24150,"pte":24151,"_paths":24152,"leaf":24153,"Ġimplications":24154,"posal":24155,"ä½į":24156,"[/":24157,"ancia":24158,"éĽ":24159,"mul":24160,"cie":24161,"Ġgeile":24162,"imals":24163,"UIView":24164,"Ġsurre":24165,"serialize":24166,"ISO":24167,"Ġarbitrary":24168,"Ġsockaddr":24169,".fn":24170,"ĠMerc":24171,"Ġcasting":24172,"KeyDown":24173,"ĠnewValue":24174,"opens":24175,"Todo":24176,"Ġflexibility":24177,"ĉĉĉĉĠĠ":24178,"Velocity":24179,"ún":24180,"rowing":24181,"Ġcomputed":24182,"`)Ċ":24183,"statement":24184,"Ġri":24185,"_cart":24186,"Low":24187,"transfer":24188,".nav":24189,"Ġgrave":24190,"ĠDoor":24191,"ĉalert":24192,".subscribe":24193,"-profile":24194,"ĉbase":24195,"ĠâĪĴ":24196,"__ĊĊ":24197,"Ġengineers":24198,"Ġexplosion":24199,"Ġdari":24200,"ĉLog":24201,"onal":24202,"Ġisolated":24203,"{i":24204,"ĠMsg":24205,"Future":24206,"Ġracist":24207,"-wrap":24208,"ĠVers":24209,"borg":24210,"ISION":24211,"ĠÑĢаÐ":24212,"ĠYan":24213,"initWith":24214,"Ġnomin":24215,"(empty":24216,"ÃŃn":24217,"ãĤ¤":24218,"ĉwidth":24219,"Ġchamber":24220,"/ajax":24221,"EMP":24222,"Ġneces":24223,"ivos":24224,"logic":24225,"*)&":24226,"cripts":24227,"RowAt":24228,"iblings":24229,"Ġears":24230,"Ġcomputing":24231,"Ġmaker":24232,"ĠNeither":24233,"breadcrumb":24234,"Ġserialize":24235,"ĠWithin":24236,"Ġdell":24237,"_TRACE":24238,"=a":24239,"Ġwishes":24240,"-inch":24241,"ĠDor":24242,"Ġinnocent":24243,"ĠDol":24244,"Ġintens":24245,"forced":24246,"ĠBIT":24247,"Ġphotographs":24248,"Ġcasa":24249,"ĠLen":24250,"\\Framework":24251,".Simple":24252,"Ġdear":24253,")/(":24254,"ippi":24255,"Ġowns":24256,"Players":24257,"Ġproposals":24258,".pi":24259,"usalem":24260,"Damage":24261,"Ġcalories":24262,"ĠCreative":24263,"Ġ[$":24264,"Ġ//čĊ":24265,"AndView":24266,"ème":24267,".custom":24268,"_factory":24269,"commands":24270,"_look":24271,"Ġstrcmp":24272,"YN":24273,"aired":24274,"Ġaudit":24275,"оÑģÑĤ":24276,"ĠReverse":24277,"ropriate":24278,"etics":24279,"';Ċ":24352,"Ġpepper":24353,"Ġshed":24354,"ĠMedium":24355,"ĠCookie":24356,"Ġoverseas":24357,"edor":24358,"asurement":24359,"åŃĺ":24360,"Ġ'.'":24361,"Ġphp":24362,"ĠPROC":24363,"Ġexceptional":24364,"(th":24365,"ĠJet":24366,"Ġoccupied":24367,".setImage":24368,"ĠRelated":24369,"ucker":24370,"Members":24371,"PRINT":24372,"ĠGlo":24373,"_VIEW":24374,"}\",Ċ":24375,"Ġadoption":24376,"[])Ċ":24377,"ĠMissouri":24378,"ĠLincoln":24379,"erald":24380,"Popup":24381,"Ġfate":24382,"-bootstrap":24383,"fections":24384,"ĠPoll":24385,"_ARGS":24386,"inance":24387,"-home":24388,".),":24389,"_done":24390,":ĊĊĊ":24391,"Ġdiscussing":24392,"ĠSQLException":24393,"Ġelectro":24394,"ĉreq":24395,"Ġzw":24396,"Ġlui":24397,"Ġovernight":24398,"$user":24399,"ĠWAY":24400,"Ġallerg":24401,"Ġdisappointed":24402,"Ġradiation":24403,"Ġimpressed":24404,"ificates":24405,"Ġtob":24406,"CLASS":24407,"Ġcuda":24408,"_det":24409,"-post":24410,"ulu":24411,"Translation":24412,"-hand":24413,".year":24414,"ĠMongo":24415,"Ġunclear":24416,".engine":24417,"WEBPACK":24418,"rices":24419,"_ACCESS":24420,"Ġholidays":24421,"percent":24422,".Identity":24423,"ĠGov":24424,"Ġpassionate":24425,"!!.":24426,"ĠGreece":24427,"plusplus":24428,"'));":24429,"GP":24430,"Ġexcit":24431,".tabPage":24432,"_cond":24433,"Ġsponsor":24434,"MODULE":24435,"_proc":24436,"Ġ$Ċ":24437,"Ġrational":24438,".Tool":24439,"Ġihr":24440,"cca":24441,"åĵģ":24442,"ĠEstate":24443,"IBUTE":24444,"ActionPerformed":24445,"ĠSolar":24446,"¦Ĥ":24447,"Ġequity":24448,"tid":24449,"Ġrecip":24450,".simple":24451,"mk":24452,"ĠLuke":24453,"ĠGuardian":24454,"Ġencrypted":24455,"Ġdominant":24456,".place":24457,"ĠNV":24458,"Ġtongue":24459,"(Get":24460,"Ġstainless":24461,".Play":24462,"Ġeb":24463,"aci":24464,".buffer":24465,"readcrumbs":24466,"Ġvaccine":24467,"prom":24468,"ĠuserInfo":24469,"Ġslug":24470,"SerializedName":24471,"-wide":24472,"Ġreactions":24473,"ĠYang":24474,"ĠAdds":24475,"(userId":24476,"Ġplates":24477,"ĠMEM":24478,"Ġbail":24479,"Inside":24480,"eted":24481,"Ġelsif":24482,"Ġsake":24483,"Ġcycles":24484,"ĠìĹ":24485,"ĉI":24486,"-collapse":24487,"ĠGMT":24488,"Declaration":24489,"Ġgros":24490,"Ġreaches":24491,"Ġcustody":24492,"Until":24493,"tu":24494,"ĠChen":24495,"Ġnx":24496,"(addr":24497,"ĠOffer":24498,"Ġcolleg":24499,"assador":24500,"Ġmapper":24501,"ĠSIGNAL":24502,"ĠBloom":24503,"ĠHoll":24504,"ĠImper":24505,"-des":24506,"_site":24507,"Proc":24508,"Equ":24509,"Ġatomic":24510,"ĠWoman":24511,"sent":24512,"scar":24513,"Ġintelligent":24514,"ĠGetting":24515,"ĠRegistration":24516,"ĠPhill":24517,"Ġkiller":24518,"unicode":24519,"ĊĉĉĊ":24520,"ĠJacob":24521,"ĠConst":24522,"Ġlocate":24523,"Ġcaus":24524,"ĠScholar":24525,"Ġconstitutional":24526,"Ġinflation":24527,"ĠGot":24528,"=array":24529,"endum":24530,"Ġtranslated":24531,"Ġdivorce":24532,"Entries":24533,"Ġsor":24534,"ĠQuote":24535,"irlines":24536,"UK":24537,"Ġexcel":24538,"(opt":24539,"ĠADV":24540,",:,":24541,"Ġcontacted":24542,"ĠDA":24543,"Ġrings":24544,"ĠIndustrial":24545,".getContext":24546,"Ġforgotten":24547,"ĠTan":24548,"Ġpants":24549,"Ġov":24550,"Ġdecoder":24551,"ĠPartial":24552,"Ġvc":24553,"Ġbattles":24554,"Arial":24555,"FRINGEMENT":24556,"irates":24557,",w":24558,"aintenance":24559,"ĠOd":24560,"ĠTechnologies":24561,"åīį":24562,"ĠCarter":24563,".findAll":24564,"Nome":24565,"Ben":24566,"ĠUsage":24567,"ĠPicture":24568,"Ġbadly":24569,"_panel":24570,"Ġpatent":24571,"ĠProtocol":24572,"lotte":24573,"ĉplayer":24574,"jections":24575,"Ġdou":24576,"_release":24577,"urniture":24578,"_tax":24579,"ĠFields":24580,".dataset":24581,"_master":24582,"CLUDE":24583,"ĠPharm":24584,"bst":24585,"Ġoperational":24586,".cell":24587,"Ġidentifying":24588,"Ġjwt":24589,"tuple":24590,"ĠTC":24591,"ĠCro":24592,"ixmap":24593,"-components":24594,"general":24595,"Ġoz":24596,"_De":24597,"_double":24598,"ĠToo":24599,".ViewGroup":24600,"gate":24601,"dings":24602,"photos":24603,"Ġgrande":24604,"ollect":24605,"_lin":24606,"Ġawful":24607,"filters":24608,"Ġalternate":24609,"esp":24610,"Ġcompress":24611,"eo":24612,"ĠScale":24613,"Ġindirect":24614,"Ġinvoice":24615,"ĊĊĊĊĊĊĊĊĊĊĊĊĊĊĊĊ":24616,"Starting":24617,"ĠPlayers":24618,"iele":24619,".then":24620,"Ord":24621,"ĠTuple":24622,"Ġbout":24623,"ĠStatistics":24624,"Preview":24625,"Ġpuzzle":24626,"ĠWidth":24627,"STATE":24628,"Ġoverlay":24629,"ĉon":24630,"Ġinfr":24631,"Ġsmallest":24632,"locked":24633,"ÑĤо":24634,"ssl":24635,"Ġdeemed":24636,"Ġsco":24637,"reck":24638,"ĠjButton":24639,"Ġmissions":24640,"ç§°":24641,".SelectedIndex":24642,"TABLE":24643,"Sept":24644,"Ġacknowledge":24645,"Ġstrtotime":24646,"ĠTell":24647,"ĠDak":24648,"Ġaluminum":24649,"Ġfence":24650,"ĠStars":24651,"CONFIG":24652,"Ġretrofit":24653,"Ġemphasis":24654,"/header":24655,"ĠSomething":24656,"inished":24657,"='\".$":24658,"ĠValidators":24659,"Ġpolar":24660,"sections":24661,".aspx":24662,"Ġaspir":24663,".Mock":24664,"CodeGen":24665,"Ġpeut":24666,"Ġaccepting":24667,"Ġbacking":24668,"Picture":24669,"/ap":24670,"ег":24671,"_SEC":24672,"-use":24673,"annotation":24674,"Ġcognitive":24675,"Ġgrip":24676,"hour":24677,"ĠLegal":24678,"Ġepic":24679,".toolStrip":24680,".notify":24681,".Last":24682,"ORIZ":24683,"Middleware":24684,"criptions":24685,"lash":24686,"_FOUND":24687,"ĠLiverpool":24688,"Ġ{}\",":24689,"Install":24690,"Ġnit":24691,"Ġfigured":24692,"[len":24693,".Win":24694,".platform":24695,"Ġgambling":24696,"(dt":24697,"avery":24698,"ĉinclude":24699,"Whether":24700,"Routing":24701,"Ġtherap":24702,"Remote":24703,"ĠLoss":24704,"yll":24705,"Ġapproached":24706,"ĠVehicle":24707,"ĠAlpha":24708,"Ġvocê":24709,"answers":24710,"NSDictionary":24711,"consider":24712,"unused":24713,"ĠFan":24714,"orable":24715,"fre":24716,"ĠDISCLAIM":24717,"ĠActor":24718,".]":24719,"toHave":24720,".userId":24721,"Ġspeeds":24722,"eway":24723,"Ġrecurs":24724,"Ġг":24725,"_priv":24726,"!âĢĿĊĊ":24727,"Choice":24728,"Ġsettle":24729,"Ġplanes":24730,"'},":24731,"Tom":24732,"ITER":24733,"!\"Ċ":24734,"å»":24735,"achelor":24736,"Ġseparation":24737,"Ġdal":24738,"adj":24739,"Ġregisters":24740,"riz":24741,"ĠNotice":24742,"Ġlu":24743,"Ġcourage":24744,"Ġaxes":24745,"cellent":24746,".async":24747,"Ġcompatibility":24748,"ç«":24749,"Ġ!ĊĊ":24750,"ĉtitle":24751,"YLE":24752,"ĉmessage":24753,"UUID":24754,"OLDER":24755,"ĠHH":24756,"ĠStyleSheet":24757,"Ġaccessed":24758,".validation":24759,"tasks":24760,"Ġpollution":24761,".canvas":24762,"Ġingredient":24763,"ĠCabin":24764,"Ah":24765,"oldown":24766,"ĠNOI":24767,"ĠÃĹ":24768,"[f":24769,"educ":24770,"yalty":24771,"(not":24772,"_State":24773,"amen":24774,"Ġdao":24775,"udad":24776,"ellers":24777,"}&":24778,"licity":24779,"_WINDOW":24780,"Ġtatto":24781,"valor":24782,".Range":24783,"Ġreferenced":24784,"ĠReserve":24785,"Money":24786,"SCRIPT":24787,"/product":24788,"choices":24789,"Ġtin":24790,"ãĤĵ":24791,"Ġseparator":24792,"Ġpkg":24793,"ammed":24794,"ĠMAT":24795,"!!ĊĊ":24796,"Ġraid":24797,"Ġmotivation":24798,"ĠXP":24799,"ĠBackground":24800,"ĠQuaternion":24801,".defineProperty":24802,"iker":24803,"ĉparent":24804,"ĠOriginally":24805,"antage":24806,"ĠHans":24807,"Ġtimeline":24808,".cur":24809,"opic":24810,"ĠSequ":24811,"must":24812,"ĠCoal":24813,"Ġformatter":24814,"_RGB":24815,"Ġ_(\"":24816,"'}),Ċ":24817,"Ġ=================":24818,"ĠFUNCTION":24819,"Ġlng":24820,"icates":24821,"live":24822,"_engine":24823,"Ġtowns":24824,"'))ĊĊ":24825,"ĠPK":24826,"(api":24827,"ĉscanf":24828,"packet":24829,".phone":24830,"áĢ":24831,"ĠAndy":24832,"_NAMES":24833,"PLY":24834,"Ġmins":24835,"imi":24836,"Ġbrick":24837,"Ġblade":24838,".stdout":24839,"}`;Ċ":24840,"Shift":24841,"ĉsb":24842,"ĠChecks":24843,"Ġphenomenon":24844,"Avatar":24845,"Ġministry":24846,"rose":24847,"ĉFile":24848,"Ġtitled":24849,"(LOG":24850,"Ġgan":24851,"design":24852,"(),čĊ":24853,"Ġbones":24854,"stm":24855,"ÅĽÄĩ":24856,"ĠInputStream":24857,"Ġvolunt":24858,"ĠSerializable":24859,"Ġfighter":24860,"ĠDrag":24861,"Twitter":24862,"Ġsubsid":24863,"ç¼":24864,"Ġforums":24865,".loading":24866,"logged":24867,"_this":24868,"Ġterrain":24869,"Ġirre":24870,"ĠIng":24871,"ĠCN":24872,"_objects":24873,".uid":24874,"Ġconsciousness":24875,"TINGS":24876,"ĠGall":24877,"Ġportray":24878,"ĠDeveloper":24879,"Ġparticipant":24880,"Ġ\";čĊ":24881,"/model":24882,"ĠOperations":24883,"^\\":24884,"ĠLater":24885,"Ġraises":24886,"-none":24887,".meta":24888,"='.$":24889,"Finished":24890,"Ġreplacing":24891,"Ġsampling":24892,"ĠJen":24893,"\"There":24894,"REAL":24895,"ALE":24896,"ìĬ¤":24897,"Orders":24898,"_parameter":24899,"ĠOlympic":24900,"Ġtrès":24901,"Ġarena":24902,"iol":24903,";?>":24904,"Ġimpacts":24905,"ĠWS":24906,":get":24907,"Ġflights":24908,"ĠRussell":24909,"camera":24910,"Fn":24911,"sigma":24912,"Ġforcing":24913,"Ġlocals":24914,"Ġdeparture":24915,"Ġcelebration":24916,"ĠSay":24917,"ï¼Ĵ":24918,"ĠHills":24919,".hasOwnProperty":24920,"Ġtypings":24921,".API":24922,"Ġdonation":24923,"OperationException":24924,".Activity":24925,"cplusplus":24926,"ĠCharlie":24927,"Ġimported":24928,"Ġdann":24929,"Ġoccasions":24930,"Ġimplementing":24931,"Ġpurple":24932,".dialog":24933,"SQLException":24934,"erno":24935,"Ġwars":24936,"Ġpaste":24937,"Ġdecreased":24938,"Ġharsh":24939,"Ġelabor":24940,"inputs":24941,"ĠViews":24942,"ĠerrorMessage":24943,"_mul":24944,"ĉwrite":24945,"ĠCop":24946,"ĠAnnual":24947,"(button":24948,"Ġvida":24949,"bars":24950,"ĠHarvard":24951,"ĉexpect":24952,"Ġindexes":24953,"Ġdocumentary":24954,"Ġflesh":24955,"ORLD":24956,"ĠDelta":24957,"MAND":24958,"Brush":24959,"-column":24960,"Ġdevelopments":24961,"methodVisitor":24962,"slice":24963,"ĠPDO":24964,"Ġinvesting":24965,"irable":24966,"Ġxmlns":24967,"ï¼Ľ":24968,"arta":24969,"Ġtheories":24970,"_city":24971,"Ġ$__":24972,"Creating":24973,"(pr":24974,"Dropdown":24975,"ismatch":24976,"ĠNET":24977,"'])){Ċ":24978,"ĠValues":24979,"ĠSEO":24980,"ĠSTAT":24981,"Ġecosystem":24982,"Ġtempt":24983,"Ġ\\\\":24984,"Ġ//{Ċ":24985,"ĠChristopher":24986,"ĠKentucky":24987,"ĠHttpServletResponse":24988,"Ġhybrid":24989,"yon":24990,"Ġfeeding":24991,"ĠExtra":24992,"Norm":24993,"ITCH":24994,"ĠSean":24995,"ĠUpload":24996,"mun":24997,"pur":24998,"Ġpersistent":24999,"ĠIDC":25000,"ĠPerform":25001,".merge":25002,"_room":25003,"Meanwhile":25004,"!='":25005,"ĠWel":25006,"ArgsConstructor":25007,".Database":25008,"Ġcounting":25009,"()*":25010,"ĶåĽŀ":25011,"ĠTOP":25012,"mill":25013,"ĠDT":25014,"IGNED":25015,"ĠKB":25016,"Ġcomply":25017,"South":25018,"_collection":25019,"Chapter":25020,"Ġexplaining":25021,"_AM":25022,"_ts":25023,"cards":25024,"Ġquel":25025,"Ġpole":25026,"Ġtouchdown":25027,"ĠOthers":25028,"Ġpeers":25029,"ĠTypeError":25030,"Ġsixth":25031,"Ġcheer":25032,"Ġdispute":25033,"usc":25034,")],":25035,"thumb":25036,"Ġhiding":25037,"ĠSIG":25038,"likes":25039,"ĠPAGE":25040,".Reflection":25041,"Ġheadquarters":25042,"TING":25043,"ĠGhost":25044,"MLE":25045,"$Ċ":25046,"Ġcontrary":25047,"extend":25048,"']).":25049,"FFECT":25050,"ĠPinterest":25051,"úmero":25052,"ricane":25053,"ĉsession":25054,"Ġcrystal":25055,"-Control":25056,"overnment":25057,"ograf":25058,"-action":25059,"volume":25060,"ften":25061,"Ġuncon":25062,"Ġanimate":25063,"Ġlease":25064,"scr":25065,"Ġrefuse":25066,"ãĢĭ":25067,"ftp":25068,"information":25069,"Ġevaluated":25070,"Ġinjection":25071,"Ġjack":25072,"Ġworkshop":25073,"注":25074,"PTH":25075,"ĠTs":25076,"offer":25077,"ĉos":25078,"Ġkingdom":25079,"Missing":25080,"Ġlawmakers":25081,"extField":25082,"Ġsinging":25083,"abi":25084,"/client":25085,".media":25086,"ATEGORY":25087,"Signature":25088,"%',Ċ":25089,"ĠFuck":25090,"][:":25091,"Ġsensors":25092,"/com":25093,"ĠPrimary":25094,".SQL":25095,"_program":25096,"Ġpills":25097,"Ġintegral":25098,"Ġfleet":25099,"Ġdropping":25100,".sl":25101,"Been":25102,"Ġpets":25103,"Ġadvised":25104,"Ġdragon":25105,"_EDIT":25106,"(im":25107,"FER":25108,"ĠDrug":25109,"(random":25110,"Ġcompression":25111,"oust":25112,"[%":25113,"Ġbuyer":25114,"hop":25115,"Roles":25116,"manage":25117,"Ġpainful":25118,"ĠBranch":25119,"-modal":25120,"enant":25121,"ĠMesh":25122,"/font":25123,"ĠGraham":25124,"Ġâĺ":25125,"Ġnc":25126,"ĠFrancis":25127,"Ġspecification":25128,"Ġdamages":25129,"-config":25130,"Ġtheoret":25131,"secure":25132,"_multi":25133,"aceutical":25134,"Ġdemanding":25135,"enne":25136,"ISTS":25137,"()));ĊĊ":25138,"Reason":25139,"Recent":25140,"phase":25141,"Ġpsy":25142,"_MAN":25143,"Ġvolunteer":25144,"å¿":25145,"istributed":25146,"lio":25147,"Ġproductivity":25148,"_comm":25149,"Spring":25150,"nis":25151,".weight":25152,"ĠCancer":25153,"Alloc":25154,"ĠTweet":25155,"Ġseparately":25156,"ĉcheck":25157,"_properties":25158,".Unit":25159,"_CLK":25160,"Ġgt":25161,"Ġ();ĊĊ":25162,"Ġhandy":25163,"ĠThompson":25164,"Ġunnecessary":25165,"ĠReader":25166,"GN":25167,"=request":25168,"ĠUtility":25169,".Repository":25170,"ĠAx":25171,"hydr":25172,"ieu":25173,"Ġthy":25174,"Ġlt":25175,"_mail":25176,"ä¿®æĶ¹":25177,"ailand":25178,"ĠPhilip":25179,"Ġbitter":25180,"Ġbetting":25181,"Ġtimed":25182,"ocks":25183,"'a":25184,"Ġalgorithms":25185,"Ġreinterpret":25186,"Ġtoss":25187,"rogen":25188,"Ġhoped":25189,"(selected":25190,"Ġventure":25191,"TEX":25192,"ĠLeave":25193,".Substring":25194,"Ġgrateful":25195,"uka":25196,"ĠConsumer":25197,"Ġaggreg":25198,"Circle":25199,"à¸ģ":25200,"_blocks":25201,"Ġlegally":25202,"Ġ\"|":25203,"ãĥĥ":25204,".board":25205,".Ab":25206,"Functions":25207,"recipe":25208,"èĩ":25209,"ĠOxford":25210,"Ġwholes":25211,".Build":25212,"_changed":25213,"hai":25214,"Ġdepartments":25215,"Imp":25216,"Ġcoalition":25217,"INFRINGEMENT":25218,"Ġempower":25219,"itches":25220,"North":25221,"Ġinflamm":25222,"ONSE":25223,"Ġmissile":25224,"ĠRaj":25225,"ĠIssue":25226,"Ġatoi":25227,"caled":25228,".Controllers":25229,"ĠWolf":25230,"Ġcrushers":25231,"á»ĩ":25232,".Auth":25233,".addAttribute":25234,"his":25235,"Ġboots":25236,".clean":25237,"camp":25238,"Ġtenant":25239,"Ġtune":25240,"Ġ{}'.":25241,"Ġworkout":25242,"Repo":25243,"Ġpartially":25244,"MISSION":25245,"jamin":25246,"ĠSB":25247,"Ġdetermination":25248,"Ġ'');Ċ":25249,"ĠBeng":25250,"Ġvos":25251,"Ġinhab":25252,"/lang":25253,"sburgh":25254,"Executor":25255,"hone":25256,"ĠChallenge":25257,"_links":25258,".Level":25259,"Ġunderground":25260,"-code":25261,"Ġoptimization":25262,"logging":25263,"_dest":25264,"Ġsnake":25265,"Ġchemicals":25266,"_IMPORTED":25267,"adoop":25268,"ĠTHAT":25269,"managed":25270,"Ġreduces":25271,"ĠREAL":25272,"ĠGuy":25273,"_GENERIC":25274,"/********************************":25275,".amount":25276,"Ġdere":25277,"getTime":25278,"Ġpant":25279,"anonymous":25280,"Ġharmony":25281,"ĠAlan":25282,"Ġscenarios":25283,"Ġdirt":25284,"htags":25285,"Mc":25286,"Shell":25287,"rin":25288,"{čĊčĊ":25289,".pow":25290,"ĉclient":25291,"Ġconspiracy":25292,"Ġadmission":25293,"ĠRegional":25294,"ĠViewController":25295,"ĠPhilippines":25296,"Ġdepos":25297,"Ġpap":25298,"ĠPad":25299,"Paul":25300,".ComboBox":25301,"Ġtutor":25302,"ĠRecipe":25303,"writing":25304,"Ġcontributor":25305,"OTH":25306,"Small":25307,"VI":25308,"Ġhacer":25309,"equ":25310,"ĠExamples":25311,"human":25312,".messages":25313,"ĉtyp":25314,"Ġ(čĊ":25315,"ĠSSL":25316,"LEN":25317,"ĠRomney":25318,"(grid":25319,"ĉmin":25320,"Ġ>ĊĊ":25321,"Ġfruits":25322,"Ġvoter":25323,"Inline":25324,"pane":25325,"ĠCollections":25326,"charset":25327,"Ġspam":25328,"zb":25329,"itemap":25330,"Ġsucceeded":25331,"_COL":25332,"Ġelapsed":25333,"imeter":25334,"Ġrecovered":25335,"Tensor":25336,"hattan":25337,".setup":25338,"isto":25339,"(head":25340,"ĠSIZE":25341,"Ġtactics":25342,"Ġdistur":25343,"Ġpreval":25344,"icios":25345,"(Value":25346,"_cols":25347,"ĠFat":25348,"Ġseal":25349,"Ġsons":25350,"Ġensures":25351,"Ġpressing":25352,"=&":25353,"igenous":25354,"Ġharassment":25355,"_JSON":25356,"Ġignor":25357,"ynomial":25358,"omer":25359,"_static":25360,"Ġsignificance":25361,"Ġcircles":25362,"_System":25363,"Ġdiscipline":25364,"Ġdressed":25365,"Ġsphere":25366,"Ġclimb":25367,"_actions":25368,"ĠBab":25369,"Ġ'=',":25370,"_schema":25371,"\"use":25372,"Ġunders":25373,"Ġcups":25374,".screen":25375,"/new":25376,"Ġappearing":25377,"TOP":25378,"vised":25379,"clang":25380,"Ġinvestigators":25381,"Ġmysterious":25382,"Ġpromising":25383,"Ġqualify":25384,"Ġcave":25385,"Ġequip":25386,"=x":25387,"GT":25388,"(link":25389,".velocity":25390,".erase":25391,"oter":25392,"++++++++":25393,"profit":25394,"Ġzones":25395,"_uid":25396,"-ser":25397,"Ġobjectives":25398,"Ġmilf":25399,"webkit":25400,"(match":25401,"neh":25402,"ĠAssociated":25403,"ĠTodo":25404,"=d":25405,"Cam":25406,"Ġvocal":25407,"Ġsudo":25408,"(EX":25409,"Ġtrou":25410,"ABC":25411,".bean":25412,"ĠGround":25413,"ĠREST":25414,"weets":25415,"Ing":25416,"imon":25417,"_bus":25418,"ĠCOLOR":25419,"unto":25420,"Ġfoss":25421,"ĠLinks":25422,"äng":25423,"/forms":25424,"prises":25425,"Ġachievement":25426,"CALL":25427,"елÑĮ":25428,"ĠVerify":25429,"_SOURCE":25430,"aptcha":25431,"IDD":25432,"_reference":25433,"Gold":25434,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ":25435,"Receiver":25436,"Ġaj":25437,"_direction":25438,"}]":25439,"ĠCompet":25440,"Ġbang":25441,"ĠCass":25442,"-url":25443,"techn":25444,"ĠJerusalem":25445,"longitude":25446,"');čĊčĊ":25447,"Ġwinners":25448,"Tasks":25449,"ĠDMA":25450,"Ġtooltip":25451,"İ·":25452,"ĠBra":25453,"_duration":25454,"cury":25455,"parents":25456,"---->(":25526,"ĠKir":25527,"Ġintros":25528,"Ġsketch":25529,"Ġskilled":25530,"Ġimmer":25531,"Ġadequate":25532,"_rep":25533,"(header":25534,"_like":25535,"Ġperceived":25536,"ssh":25537,"Ġassuming":25538,"Ġff":25539,"_uuid":25540,"ulas":25541,"Ġdemocratic":25542,".entities":25543,"Series":25544,"aphore":25545,"Ġnewer":25546,"}(":25547,"SEC":25548,"airo":25549,"Ġcommod":25550,"Ġprivilege":25551,"Ġdeux":25552,"ĠHop":25553,".'/":25554,"ctic":25555,".';Ċ":25556,"C":25630,"ĠWarren":25631,"Ġoptimizer":25632,"ĠSERVICES":25633,"_oper":25634,"getAttribute":25635,"ĠMcK":25636,"_self":25637,".rs":25638,"\")ĊĊĊ":25639,"GetComponent":25640,"erce":25641,"Ġtous":25642,"units":25643,"']);čĊ":25644,"Zoom":25645,"/E":25646,"Ġobsc":25647,"Ġfastest":25648,"online":25649,"Ġpeaceful":25650,"ffen":25651,"Ġcargo":25652,"ĉpr":25653,"Ġseeks":25654,"zu":25655,"Trim":25656,"Ġward":25657,"Ġverd":25658,"Ġblogs":25659,".exceptions":25660,"ĠPremium":25661,"ĠNetherlands":25662,"Safe":25663,"Finish":25664,"ĠAlbum":25665,"_ACC":25666,"=this":25667,"virtual":25668,"]>":25669,"_LABEL":25670,"ĠNich":25671,"_win":25672,"ĠAaron":25673,"WP":25674,";$":25675,"aims":25676,"ĠImageView":25677,"Ġendless":25678,"ERA":25679,"_DISABLE":25680,"Ġcancelled":25681,"-us":25682,"Ġinspection":25683,"emin":25684,"ĠGrey":25685,"-open":25686,"Ġiterations":25687,".owner":25688,"Ġkeras":25689,".Password":25690,"ĠRy":25691,"ĠINS":25692,"Air":25693,"ĠSeveral":25694,".TabStop":25695,"INGLE":25696,"ĠHair":25697,"ĠCanvas":25698,"AAAA":25699,"Ġflaw":25700,"cedes":25701,".Report":25702,"íĬ":25703,"ĠTips":25704,"criptors":25705,".transaction":25706,".Spring":25707,"Ġviewer":25708,"Ġinsights":25709,"è¾ĵ":25710,"ordion":25711,"UINT":25712,"seek":25713,"ĠAuf":25714,"ìŀIJ":25715,"Ġstrain":25716,"Tooltip":25717,"Ġdz":25718,"ignal":25719,"adt":25720,"Ġuc":25721,"finite":25722,"Ġnm":25723,".cmd":25724,"ĠMySql":25725,"[data":25726,".jackson":25727,".tree":25728,"RequestParam":25729,"_agent":25730,"\")]čĊ":25731,"Ġassass":25732,"(Constants":25733,":ss":25734,"ĠMAN":25735,"+-+-":25736,"ĠBottom":25737,"prints":25738,"ĠSame":25739,"@Autowired":25740,"swap":25741,"ición":25742,"Ġprotesters":25743,"Ġhoney":25744,"ĠVeter":25745,"(Calendar":25746,"-ad":25747,"ĠBrooklyn":25748,"Life":25749,"_VAR":25750,"zech":25751,"ĠCALL":25752,"_CAST":25753,"ĠElection":25754,"Ġthickness":25755,"Very":25756,"_INTEGER":25757,"-dev":25758,"))))":25759,"apat":25760,"oooo":25761,"demo":25762,"ĠparseFloat":25763,"ĠRather":25764,"STIT":25765,"maker":25766,"[current":25767,"chrono":25768,"Ġchrist":25769,"ãģª":25770,"ĠDetail":25771,"ưá»":25772,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":25773,"Ġsul":25774,"idency":25775,"Que":25776,"Ġelegant":25777,"apons":25778,"Ġdishes":25779,"Ġintegers":25780,"(read":25781,"findViewById":25782,"ĠAmount":25783,"ĠSkip":25784,"Ġhabits":25785,"*)(":25786,"Ġmonsters":25787,"MAC":25788,":end":25789,"Ġfrank":25790,"Assembly":25791,"Ġdfs":25792,"Ġneut":25793,"_TYPES":25794,"equal":25795,"loyd":25796,"(uri":25797,"Ġchi":25798,"Ġdefendant":25799,"Ġconflicts":25800,"Ġvil":25801,"-js":25802,"ĠPeace":25803,"Ġmutable":25804,")sender":25805,"ĠFocus":25806,"建":25807,"Ġappreciated":25808,"sleep":25809,"ĠRED":25810,"Culture":25811,"Ġdesigners":25812,"_generator":25813,"codes":25814,"/ex":25815,".GetValue":25816,"umbled":25817,".scalajs":25818,"peror":25819,"Ġveterans":25820,"Ġ})čĊ":25821,"Ġunfortunately":25822,"_CREATE":25823,"Mass":25824,"ĠCLAIM":25825,"ĠMeet":25826,"_support":25827,"Bank":25828,"().Ċ":25829,"Dark":25830,"_LOW":25831,"ĠMining":25832,"ĠOwner":25833,"iera":25834,"Cliente":25835,"Ġencouraging":25836,">S":25837,"Ġboyfriend":25838,"ĠHalf":25839,"ĠACC":25840,"Aff":25841,"_ar":25842,"-life":25843,"cx":25844,".JButton":25845,"izado":25846,".zero":25847,".openqa":25848,"oton":25849,".textContent":25850,"Ġtoll":25851,"atie":25852,"Ġballot":25853,"-number":25854,".Exception":25855,"ĉparams":25856,"circle":25857,"-map":25858,"Ġnap":25859,"ĠRobot":25860,"ĠIch":25861,"registration":25862,"Amazon":25863,"rollment":25864,"(exp":25865,"Ġtanks":25866,"ĠGordon":25867,"Ġmachinery":25868,"Ġbaseline":25869,"æĭ":25870,"Ø©":25871,"ĠConvention":25872,"ĉconfig":25873,"ookies":25874,"mult":25875,"Records":25876,"ĠEST":25877,"Ġgarbage":25878,"Ġconform":25879,"idal":25880,"Ġbarg":25881,"Ġsurvived":25882,"Ġinvestigations":25883,".containsKey":25884,"--------------------------------------------------------------------------Ċ":25885,"ortion":25886,"Ġhorr":25887,"_http":25888,"Ġmant":25889,"];čĊčĊ":25890,"binary":25891,"empl":25892,"Ġinquiry":25893,"ĠMeanwhile":25894,"Ġcollecting":25895,".EntityFramework":25896,"\",ĊĊ":25897,"ĠPic":25898,"@Inject":25899,"ickness":25900,"ĠBinding":25901,"Ġcontrolling":25902,"reverse":25903,"Ġchairs":25904,"sembled":25905,"(add":25906,"Disabled":25907,"anas":25908,".translate":25909,"-----------Ċ":25910,"Ġreflected":25911,"\"]ĊĊ":25912,"External":25913,"Arrow":25914,"Singleton":25915,"%x":25916,"ĠÅ":25917,"Ġancest":25918,"ĠOrleans":25919,"ĉcmd":25920,"Ġprohibited":25921,"ithmetic":25922,"(channel":25923,"_css":25924,"Forward":25925,".socket":25926,"Ġluc":25927,"âĨ":25928,"ĠFirefox":25929,"ĠMovies":25930,")_":25931,".ends":25932,"(shape":25933,"Ġdealt":25934,"Ġsaves":25935,"Ġglory":25936,"Ġmejor":25937,"Ġbreathing":25938,"Ġeller":25939,"getData":25940,"Ġangles":25941,"Ġtoolbar":25942,"Ġspacing":25943,"IPS":25944,"Ġfloors":25945,"_ACTIVE":25946,"Ġshuffle":25947,"/shared":25948,"ĠEle":25949,"edish":25950,"Ġwebcam":25951,".expect":25952,"iloc":25953,"ĠIncludes":25954,"Ġtweeted":25955,"Ġ:)":25956,"ĠEssay":25957,"Fix":25958,"-between":25959,"_web":25960,".conv":25961,"Ġracism":25962,"Ġreflects":25963,"umm":25964,"иÑĤе":25965,"_footer":25966,"/docs":25967,"ĠPour":25968,"NgModule":25969,".initialize":25970,"patterns":25971,"_In":25972,"ĠAbb":25973,"*čĊ":25974,"Ġsentiment":25975,"buff":25976,"_counts":25977,"Ġreuse":25978,"chunk":25979,"Ġimposed":25980,"PrimaryKey":25981,"Foreground":25982,"Ġconsumed":25983,"?!":25984,"Ġdick":25985,"Ġchron":25986,"ĠFern":25987,"Ġresponsive":25988,"Ġinsect":25989,"iculty":25990,"Ġrw":25991,"Ġalike":25992,"Ġsubset":25993,"ĠCookies":25994,"ĠPair":25995,"Ġtier":25996,"IFO":25997,"avour":25998,"ĠQU":25999,",sizeof":26000,"Ġmerged":26001,"mv":26002,"itol":26003,"ylon":26004,"Ġjumped":26005,".role":26006,"ensaje":26007,"Rules":26008,"Ġbrowse":26009,"Animator":26010,"Ġyoga":26011,"Ġvariants":26012,"Ġcourtesy":26013,"uran":26014,"pbs":26015,"elseif":26016,"Alt":26017,"ĠLane":26018,"CLK":26019,"IMARY":26020,"_PROPERTY":26021,"ï¼IJ":26022,"Ġchan":26023,"Ġgradually":26024,"Ġshake":26025,"Ġblonde":26026,"...\");Ċ":26027,"-sex":26028,"Ġgameplay":26029,"acies":26030,".refresh":26031,"USB":26032,"ĠPlot":26033,"Was":26034,"issippi":26035,"ĠTensor":26036,"Ġcryptocurrency":26037,"Ġdifficulties":26038,"Deleted":26039,"Without":26040,"_append":26041,"_ver":26042,"\"))čĊ":26043,"Ġhonestly":26044,"Ġpivot":26045,"Ġtemps":26046,"_ps":26047,"ĠUnlike":26048,"[:-":26049,"VS":26050,"_inf":26051,"Ġjunior":26052,"Ġanimations":26053,"Ġfilepath":26054,"?{{$":26076,"Ġunicode":26077,"places":26078,"ĠCoffee":26079,".SE":26080,"ĠPAR":26081,"(txt":26082,"gebra":26083,"Ġfires":26084,"MainWindow":26085,"medium":26086,"Ġ(âĢľ":26087,"Ġlg":26088,"Ġcmp":26089,"/base":26090,"_layers":26091,"_entries":26092,"Ġadminister":26093,"ĠSUCH":26094,"BP":26095,"ĠScottish":26096,"ĉčĊĉčĊ":26097,"guard":26098,"ĠStrong":26099,"Insn":26100,"ĠCAP":26101,"asury":26102,"ĠSEE":26103,"Clock":26104,"erie":26105,"\\models":26106,"Ġ$$":26107,"ĠCab":26108,"Ġwurde":26109,"Ġsoldier":26110,"Ġclips":26111,"Ġarrangement":26112,"ĠWonder":26113,"ĠHorn":26114,"Ġscared":26115,"Ġcure":26116,"mkdir":26117,"Ġaligned":26118,"ĠPink":26119,"Ġlanded":26120,"Dimension":26121,"ScrollPane":26122,".chat":26123,".With":26124,"ĠTrain":26125,"].Ċ":26126,"Ġthirty":26127,"Ġdurable":26128,"Ġld":26129,"Ġlateinit":26130,"Ġcharts":26131,"Ġinsult":26132,".Fatal":26133,"_ct":26134,"Ġmasks":26135,"CLUDED":26136,"President":26137,"Ġcolours":26138,"gments":26139,".attributes":26140,"ĠFlex":26141,"ĠClock":26142,"ÃŃcul":26143,"imen":26144,"JO":26145,"ĠRegex":26146,"_LINK":26147,"Ġcouch":26148,"ĠINPUT":26149,"Ġbeating":26150,"business":26151,"preced":26152,".unit":26153,"ĠFel":26154,"Never":26155,"ospel":26156,".startswith":26157,"ĠEPA":26158,".only":26159,"Ġpreventing":26160,"yer":26161,"ColumnName":26162,"Ġelevation":26163,"flu":26164,"icycle":26165,"Ġoffline":26166,"Toolbar":26167,"Ġcompeting":26168,")].":26169,"Ġmog":26170,"ĠisValid":26171,"Ask":26172,"_av":26173,"_lat":26174,"ANC":26175,"ĠJoh":26176,"kers":26177,"Ġguards":26178,"Ġchains":26179,"ĠSimpleDateFormat":26180,".static":26181,"Ġvessel":26182,"Ġmud":26183,"Ġstabil":26184,"Ġstret":26185,"gm":26186,"amation":26187,"çľ":26188,"-with":26189,"Ġros":26190,"_PA":26191,"Ġresultado":26192,"Ġconfidential":26193,"ĠTokyo":26194,"ĉusing":26195,"ĠMathf":26196,"ombine":26197,"ĠESPN":26198,"Ġdealers":26199,"Ġdismissed":26200,"TRY":26201,"Ġteens":26202,"records":26203,"Ġwings":26204,"gallery":26205,"accounts":26206,"_LIB":26207,"Ġjacket":26208,"ĠNSObject":26209,"Ġstones":26210,"ĠDelivery":26211,"ĠDiet":26212,"/watch":26213,"Ġtoilet":26214,"ĠGuest":26215,".day":26216,"Ġintval":26217,"Visit":26218,"Ġinvestigated":26219,"Ġpentru":26220,"ĠTheatre":26221,"andidates":26222,"Lang":26223,"ĠServ":26224,"Ġcontrollers":26225,"ĠsetTitle":26226,"NP":26227,"amy":26228,"flat":26229,"(ui":26230,"_document":26231,"èĥ½":26232,"ĠCoin":26233,"ĠAdams":26234,"ptic":26235,"Ġproductive":26236,"Ġaccomplished":26237,"čĊčĊčĊčĊ":26238,"Ġdeferred":26239,"ientes":26240,"Ġsinc":26241,"olars":26242,"Rightarrow":26243,"Ġvariations":26244,"(offset":26245,".LayoutInflater":26246,"Ġsuspend":26247,"Ġprevention":26248,"_private":26249,"_js":26250,"âĺħ":26251,"Ġwieder":26252,"atum":26253,"ĴĮ":26254,"Ġappearances":26255,".Document":26256,"Ġvalidates":26257,"calendar":26258,"}\";Ċ":26259,".demo":26260,"conut":26261,"Ġcorrection":26262,"ĠDeal":26263,"Ġbatteries":26264,".duration":26265,",\\":26266,"_marker":26267,"multi":26268,"Ġhalt":26269,"Ġcms":26270,"Ġshaped":26271,"Bro":26272,"reduce":26273,"Ġ####":26274,"CTOR":26275,"ĠBenef":26276,"Ġiconic":26277,"Ġpiano":26278,"Ġeffectiveness":26279,"|.Ċ":26280,"Ġajax":26281,"Ġvolumes":26282,"ม":26283,"Ġcljs":26284,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ":26285,"aths":26286,"raits":26287,"大":26288,"Ñĸ":26289,"_mult":26290,"Ġfascinating":26291,"Average":26292,"Ġpré":26293,"ĠChairman":26294,".findElement":26295,"_pin":26296,"Ġcomparing":26297,"Ġdarkness":26298,"-Fi":26299,"-server":26300,"Ġselecting":26301,"sterdam":26302,"ĠParts":26303,"FORMATION":26304,"Ġnoting":26305,"Ġpile":26306,"ogs":26307,"Ġpalette":26308,"_do":26309,"itize":26310,"()(":26311,"Ġdefining":26312,"Ġremainder":26313,"Units":26314,"_TASK":26315,"HttpClient":26316,"Social":26317,"Ġfundra":26318,"NR":26319,"chest":26320,"Currency":26321,".adapter":26322,"Ġdop":26323,"unting":26324,"ANGUAGE":26325,"\"He":26326,"ĉindex":26327,"_package":26328,".Icon":26329,"Ġrepet":26330,"mass":26331,"=\".$":26332,"ĠSud":26333,"Ġlid":26334,"province":26335,"ìľ":26336,"GPIO":26337,"Ðļ":26338,"ĠMySQL":26339,"Ġdocs":26340,"ĠGA":26341,"Ġipsum":26342,"Kernel":26343,"Ġaccepts":26344,"Ġfitting":26345,"Ġcuando":26346,"Ġduplic":26347,"ĠBrother":26348,"ĠKle":26349,"nums":26350,"Ġmorph":26351,"Ġ########":26352,"ĠCGPoint":26353,"manual":26667,"ĠTechnical":26668,"Ġcorporation":26669,"ĠHW":26670,"anka":26671,"TAIL":26672,"istas":26673,"Ġperforms":26674,"ĠBehavior":26675,".For":26676,"_ORDER":26677,"ĠKick":26678,"Ġcallbacks":26679,"_dr":26680,"uego":26681,"hub":26682,"ufficient":26683,"sky":26684,"Ġbp":26685,"htable":26686,"ĠONLY":26687,"ĠAUTHORS":26688,".Argument":26689,"\"};Ċ":26690,"ĠThunder":26691,"ĠKom":26692,".Should":26693,"AUTH":26694,"ahu":26695,"_payment":26696,"Ġstarter":26697,"ìĦľ":26698,"ìļ©":26699,"Blog":26700,".patch":26701,"Ġgoverned":26702,"assy":26703,"-found":26704,"Ġtheater":26705,"ĠFontWeight":26706,"ĠBatman":26707,"\"If":26708,".Random":26709,"_delta":26710,"ĠCE":26711,"Authenticated":26712,"Ġdrone":26713,"Ġcous":26714,"radius":26715,"Mer":26716,"(None":26717,"ĠNJ":26718,"_headers":26719,"Ġamer":26720,"pytest":26721,"ĠActions":26722,"ĉĉĉĠĠĠĠ":26723,"Ġett":26724,"Ġholy":26725,"Ġuncomfort":26726,"ĠNin":26727,"ĠDecimal":26728,"ĠMessages":26729,".sender":26730,"]])Ċ":26731,"Ġembrace":26732,"Though":26733,"/sp":26734,"Ġcultures":26735,"Ġhighway":26736,"tar":26737,".fail":26738,"_hidden":26739,"ĠcomponentDidMount":26740,"ĠWright":26741,"Ġjag":26742,"_il":26743,"../../../":26744,"igu":26745,"Food":26746,"Ġace":26747,"Ġaños":26748,"USD":26749,"Ġmutual":26750,"Logic":26751,"Ġtemple":26752,"Ġbriefly":26753,"ĠTrip":26754,"classmethod":26755,"defaults":26756,"Ġchunks":26757,",,,,":26758,"ĠReason":26759,"$id":26760,"-ups":26761,"Ġdamn":26762,"Ġtrucks":26763,"Ġunlimited":26764,"Ġsculpt":26765,"ĠCards":26766,"Ġautor":26767,"ĠTesting":26768,"Ġdiese":26769,"shops":26770,"ç´":26771,"(payload":26772,"ĠPATH":26773,"ĠMemorial":26774,"Ġridiculous":26775,"egree":26776,"-winning":26777,"Ġrehab":26778,"Ġsophisticated":26779,"wpdb":26780,"ĉpath":26781,"!\";Ċ":26782,"_SYS":26783,".speed":26784,"Ġsoap":26785,"suffix":26786,"Wrap":26787,"Ġenhancement":26788,"Ãī":26789,"úb":26790,"Ġplaylist":26791,"Ġmixing":26792,"antidad":26793,"=\"\";Ċ":26794,"ĠRevision":26795,"ĠBeat":26796,".inc":26797,"-way":26798,"encias":26799,"ulers":26800,"Cat":26801,"idel":26802,"ĠShip":26803,".setColor":26804,"Ġthreatening":26805,".modules":26806,"Ġafterwards":26807,"ĠDashboard":26808,"ĊĠĊ":26809,"Signal":26810,"Ġprimer":26811,"orneys":26812,"iciary":26813,"Ġligne":26814,"_predict":26815,"Ġaest":26816,"_https":26817,">:":26818,"ĠLex":26819,"Ġrencontres":26820,"egral":26821,"scala":26822,"_family":26823,"ÃŁen":26824,"_sym":26825,"Ġuncertainty":26826,"ĠVALUE":26827,"Ġ};čĊčĊ":26828,"Ġbroader":26829,"Ġhorses":26830,"ãģĿ":26831,"ĠKal":26832,"oba":26833,"_INET":26834,"ĠKill":26835,"jquery":26836,"amination":26837,"[@\"":26838,"Ġmuj":26839,"###Ċ":26840,"FirstOrDefault":26841,"thenReturn":26842,"Che":26843,"/footer":26844,"Ġparks":26845,"asje":26846,"ĠGulf":26847,"Ġmodest":26848,".Init":26849,"ï¼ŁĊĊ":26850,"Ġprospects":26851,"Ġsvg":26852,"Ġåı":26853,".Dialog":26854,"_NET":26855,"Ġ(($":26856,"Ġek":26857,"ĠWarning":26858,"ĠMK":26859,"":27166,"ĠRepair":27167,"_BE":27168,"Brand":27169,"uart":27170,"preview":27171,"Ġinitiatives":27172,"running":27173,"bang":27174,"ĉupdate":27175,"ĠCoach":27176,"Rich":27177,"Ġyoutube":27178,"Ġritual":27179,"appa":27180,"ĠRobinson":27181,"precision":27182,"////////////////////////////////////////////////////////////////////////////":27183,"=[]Ċ":27184,"Ġcelebrated":27185,"OTO":27186,"Ġinclusion":27187,"JP":27188,"';čĊčĊ":27189,"Ġnotable":27190,"(_.":27191,"Managed":27192,"Ġguides":27193," ":27194,"atedRoute":27195,"ĠAdjust":27196,"Ġcolored":27197,"_scores":27198,"ĠTesla":27199,"_progress":27200,".inst":27201,"['_":27202,".flags":27203,"Ġfclose":27204,"_OPER":27205,"ży":27206,"_note":27207,"Ġtransgender":27208,"åķ":27209,"RIPT":27210,"Ġabsent":27211,"Ġamet":27212,"Ġoperand":27213,"ë©":27214,"Ġhood":27215,"toLowerCase":27216,"avo":27217,"ĠCircuit":27218,"ĠLind":27219,"--}}Ċ":27220,"=m":27221,"Ġsuppress":27222,"ĠMAP":27223,"iang":27224,"-admin":27225,"Ġsidebar":27226,"ĠBu":27227,"ĠHex":27228,",F":27229,"ĠSignal":27230,"Ġtransparency":27231,"ĠFederation":27232,"/V":27233,"Req":27234,"Ġpulse":27235,"Ġtends":27236,"Numbers":27237,"%'":27238,"Ġdeport":27239,"datas":27240,"_UINT":27241,"_tra":27242,"oko":27243,"Ġ\"?":27244,"compet":27245,"solete":27246,"undry":27247,"Ġoverlap":27248,"}`,Ċ":27249,".ly":27250,"_summary":27251,"ĠLost":27252,".Center":27253,"Ġdisability":27254,".Serialization":27255,"Ġgeom":27256,"Ġ?:":27257,"ĠWo":27258,"Ġshipped":27259,"Ĥæķ°":27260,"Ġugly":27261,"Ġexcitement":27262,"Ġexterior":27263,"Ġcheckout":27264,"Ġkur":27265,",D":27266,"ĠAlaska":27267,"Ġsynthetic":27268,"ĠBudget":27269,"ĠSubscribe":27270,"Ġ&Ċ":27271,"ÈĻi":27272,"ĠYu":27273,"ĉquery":27274,"}.Ċ":27275,"Ġtraged":27276,"assen":27277,"Ġaccommodation":27278,"Ġphysician":27279,"Ġrenamed":27280,"Ġtidak":27281,"zÄħ":27282,"Ġminus":27283,"nych":27284,"_EXCEPTION":27285,"threads":27286,"Ġtire":27287,"_created":27288,"ensure":27289,"Ġworthy":27290,"Ġexcuse":27291,"Ġcloth":27292,".parentNode":27293,"/platform":27294,"ĠUFC":27295,"ĠGtk":27296,"unny":27297,"Ġgibt":27298,"keley":27299,"hum":27300,"(tx":27301,"ĉdev":27302,"Ġoutfit":27303,"doors":27304,"Ġfon":27305,"icut":27306,"volatile":27307,"Ġhomosex":27308,"Maximum":27309,"Ġexpend":27310,"Ġ});ĊĊĊ":27311,"Eq":27312,"onders":27313,"department":27314,"ĠPhysics":27315,"\"});Ċ":27316,"Ġparad":27317,".Str":27318,"Ġsele":27319,"IFIED":27320,"Ġdelivers":27321,"ivan":27322,"Ġresponsibilities":27323,"Ġadvocates":27324,"èµ":27325,"ĠRID":27326,".parameters":27327,"Metrics":27328,"ronics":27329,"ĠUITableViewCell":27330,"Absolute":27331,"ipse":27332,"ylum":27333,"MLElement":27334,"_VALID":27335,"\\<^":27530,"Ġios":27531,"sound":27532,"\"];":27533,"Ġfreed":27534,"rottle":27535,"ĠLower":27536,"[count":27537,"åĿ":27538,"Ġpale":27539,"ĠWayne":27540,"earth":27541,"_categories":27542,"UCK":27543,".metadata":27544,"Ġsummon":27545,"HOME":27546,"олÑĮз":27547,"Ġmanufactured":27548,"Ġdock":27549,"Ġcompetitors":27550,"_MODEL":27551,"okia":27552,"ĠHey":27553,"ο":27554,"Ġbackward":27555,"ĠPOSS":27556,"ropa":27557,"Ġcri":27558,"_OBJ":27559,"Transport":27560,"-high":27561,"Ġerotik":27562,"_slot":27563,"Ġartic":27564,"_framework":27565,"-serif":27566,"ĠSqlDbType":27567,"')(":27568,"+\"/":27569,"Ġwore":27570,"Sil":27571,"Ġstoring":27572,"ĠPhase":27573,"uant":27574,"Ġbump":27575,"inho":27576,"Ġdign":27577,"Ġbacks":27578,"qq":27579,"(hash":27580,"Ġgeo":27581,"Ġtender":27582,"Logo":27583,"!)Ċ":27584,"ĠMX":27585,"ĠArthur":27586,"essoa":27587,"_Ch":27588,"Ġbedrooms":27589,"=\"#\"><":27590,"Ġthroat":27591,"insic":27592,".integer":27593,"Ġprimitive":27594,"Truthy":27595,"Ġfacilitate":27596,"Ġcreativity":27597,"ĠDNS":27598,"Ġgra":27599,"uez":27600,"Ġcountless":27601,"ĠPoland":27602,"'M":27603,"ĠDist":27604,"Ġvest":27605,"Ġcertification":27606,"á»ij":27607,"held":27608,"extensions":27609,"(static":27610,"Ġgrades":27611,"ĠUber":27612,"ãģŁ":27613,"Ġ[])Ċ":27614,"datos":27615,"ĠgetData":27616,"ĠCharg":27617,"ĠBS":27618,".microsoft":27619,".video":27620,".direction":27621,"->{'":27622,"lua":27623,"apest":27624,"Ġboiler":27625,"erek":27626,"Ġdecides":27627,".jar":27628,"ISC":27629,"ĠWords":27630,"(CON":27631,"EMPLATE":27632,"reeze":27633,"shots":27634,"apps":27635,"unted":27636,".setName":27637,"::<":27638,"-bold":27639,"ê²":27640,"å¯Ĩ":27641,"Longrightarrow":27642,"Ġunfair":27643,"Ġearning":27644,"Ġshelf":27645,"UREMENT":27646,"Ġidle":27647,"_MENU":27648,".Custom":27649,"AGER":27650,"-\"":27651,"_switch":27652,"because":27653,")view":27654,"mare":27655,"_condition":27656,"ĠStarting":27657,"Mvc":27658,"(pre":27659,"dump":27660,"_LOCK":27661,"atetime":27662,".callback":27663,"ĠCer":27664,"opol":27665,"ibrary":27666,"Ġreservation":27667,"ĉĉĉĉĉĉĉĊ":27668,"lector":27669,"graduate":27670,"Ġgenerous":27671,"Ġion":27672,"ricao":27673,"mq":27674,"_complete":27675,"(cursor":27676,"ĠFormControl":27677,":center":27678,"Ġsubstitute":27679,"ĠPlanning":27680,"Ġpension":27681,"Ġrecommendation":27682,"ĠTags":27683,"Ġgef":27684,"Ġalbums":27685,"Ġwashing":27686,"roc":27687,"Ġtrains":27688,"atings":27689,"Ġexponent":27690,"ackbar":27691,"-ln":27692,"ág":27693,".DataAnnotations":27694,"ĠEIF":27695,"ĠMalaysia":27696,"ĉPORT":27697,"onus":27698,"Ġclever":27699,"Ġpeu":27700,">ĊĊĊĊ":27701,"ĠArguments":27702,"Ġdebugging":27703,"(right":27704,"'D":27705,"compute":27706,"Ġfinest":27707,"ORAGE":27708,"Ġspectacular":27709,"phrase":27710,"Ġindia":27711,"Ġlegendary":27712,"birth":27713,"Ġcomposite":27714,"Ġgrows":27715,"ĠTD":27716,"Ġepid":27717,"Ġlaunching":27718,"]][":27719,"Minutes":27720,"ĠCha":27721,"Ġcleaned":27722,"Ġwitnesses":27723,"ukan":27724,"ĉType":27725,"Ġhabe":27726,"paragraph":27727,"ĠJPanel":27728,"ĠHann":27729,"Ġvaried":27730,"ĠPokemon":27731,"ĠMUST":27732,"åĬ¨":27733,".visibility":27734,"opup":27735,"^[":27736,".expand":27737,"Ġ\"',":27738,".fasterxml":27739,"_auto":27740,"ĠSheet":27741,"marker":27742,"Parcel":27743,"ews":27744,"ĠStrategy":27745,"-making":27746,"Ġunve":27747,"Ġtrailing":27748,"Ġclicks":27749,"ĠGetComponent":27750,"ĉcontent":27751,"IGENCE":27752,"ERNEL":27753,"NSMutableArray":27754,"Ġbreat":27755,"Ġharmful":27756,"¶Ī":27757,"Ġbesides":27758,"Ġboring":27759,"Ġbrutal":27760,"vang":27761,"(parse":27762,"quick":27763,"Ġpytest":27764,"Ġswitching":27765,"()]Ċ":27766,"ĠìĦ":27767,"LER":27768,"ĉfont":27769,"Ġnett":27770,")]ĊĊ":27771,"(/\\":27772,"æŀľ":27773,"toArray":27774,"Ġbreed":27775,"ĠCAR":27776,"ĠWeapon":27777,"Abs":27778,"tot":27779,"ĠsetName":27780,"aptive":27781,"Ġ:,":27782,"Ġescaped":27783,"orden":27784,"ĠPri":27785,"thumbnail":27786,"Ġdescriptions":27787,"/styles":27788,"ĠPCI":27789,"Ġalphabet":27790,"asticsearch":27791,"NOTE":27792,"Ġcialis":27793,"ĠGriff":27794,"Ġporque":27795,"Ġproteins":27796,"plays":27797,"Ġstating":27798,"Ġimagination":27799,"Ġfacial":27800,"ĠMechan":27801,"Ġarranged":27802,"_used":27803,"Ġarrangements":27804,"ĠPipe":27805,"hostname":27806,"Ġprovinc":27807,"Tit":27808,".FlatStyle":27809,"ĠSplit":27810,"ĠLoader":27811,".cc":27812,"Ġclinic":27813,"----------------------------":27814,"Ġbaking":27815,"ĠENT":27816,"neath":27817,"ãĢģĊĊ":27818,"ANE":27819,".EntityFrameworkCore":27820,"appers":27821,".ic":27822,"ĠNgModule":27823,"ĠFORM":27824,"Ġ';":27825,"-profit":27826,"hw":27827,"enemy":27828,"ĠEye":27829,"Ġcaution":27830,"town":27831,"Ġurged":27832,"ĠJimmy":27833,"ynchronous":27834,"-sized":27835,"making":27836,",{":27837,"]',":27838,"_Object":27839,"ahoma":27840,"Ġactivist":27841,"INVAL":27842,"ĠCommercial":27843,"ĠOrlando":27844,"(tab":27845,"Ġب":27846,"Algorithm":27847,"Ġheritage":27848,"GetMapping":27849,"Ġfailures":27850,"rios":27851,"ativa":27852,"Ġtet":27853,"Ġcarpet":27854,"(Z":27855,"three":27856,"Ġdisclosure":27857,".ERROR":27858,"_called":27859,"Ġdial":27860,"Ġoccasional":27861,".Err":27862,"Ġfuncion":27863,"caffold":27864,"Ġreleasing":27865,"ï¼īĊĊ":27866,"_Value":27867,"ĠVari":27868,"yellow":27869,"Ġstruggles":27870,".cal":27871,"ĠDakota":27872,"ĉclose":27873,"Ġsandwich":27874,"Ġanalytics":27875,"Ġ**)":27876,"&#":27877,"ĠJos":27878,"Ġpassive":27879,"ATTR":27880,"Throwable":27881,"ĠMun":27882,"ĠUint":27883,"(disposing":27884,"arak":27885,"ĠLeaders":27886,"Ġaffecting":27887,"ĠitemView":27888,"Ġeconomics":27889,"fv":27890,"à¹Ģ":27891,".rb":27892,"ĠOverall":27893,"Ġwealthy":27894,"Ġevolved":27895,"nda":27896,"ĠHus":27897,"restrict":27898,"umen":27899,"ĠAgricult":27900,"!ĊĊĊ":27901,"Ġexpires":27902,"Ġspokesperson":27903,"interval":27904,"Ġâ":27905,"Ġqueen":27906,"(nil":27907,"ingo":27908,"Heap":27909,"Ùİ":27910,"Ġcomplain":27911,"Sym":27912,"ĠClone":27913,"ĠRu":27914,"ĠWILL":27915,"ĠCrystal":27916,"/content":27917,"ingen":27918,"ointment":27919,"LastName":27920,"avicon":27921,"ĠIBM":27922,"ĠDimension":27923,"anh":27924,"icipants":27925,"ĠAnne":27926,".progress":27927,"Ġalgo":27928,"obil":27929,"ĠVoice":27930,"ĠFE":27931,"Ġgli":27932,"Ġved":27933,"Ġprevents":27934,"\\Column":27935,"Ġfolk":27936,"etti":27937,"Ġmn":27938,"ĠCLASS":27939,"Ġdisplaying":27940,"ĠKl":27941,"ĠFerr":27942,"duto":27943,".ib":27944,"Ġdados":27945,"'name":27946,"-space":27947,"Ġitalian":27948,"Ġinverse":27949,"Ġdense":27950,"uter":27951,"ĠIEnumerator":27952,"-sign":27953,"Ġnationwide":27954,"Ġpersona":27955,"Ġsolved":27956,"Ġdramatically":27957,"Logout":27958,"Ġgrav":27959,"Ġanalyses":27960,"ollo":27961,"Ġlamp":27962,".team":27963,"ĠErot":27964,"=[\"":27965,"Ġdancing":27966,"Ġ?>/":27967,"Ġcater":27968,"ffe":27969,"ĠSha":27970,"ĠBos":27971,"ĠREQUIRE":27972,"ĠMonster":27973,"ĠRB":27974,"ĠIDE":27975,"Ġsuits":27976,"ĠformData":27977,"(theta":27978,"Ġspatial":27979,"=NULL":27980,"ĠSqlConnection":27981,"Ġà":27982,"ĠVenez":27983,"ĠMorning":27984,"Ġpublications":27985,"ĠNONINFRINGEMENT":27986,"firstName":27987,"uds":27988,"Would":27989,"_HEAD":27990,"Ġinvested":27991,"stable":27992,"fred":27993,"Ġcommander":27994,"SES":27995,"âĢĶa":27996,"anche":27997,"ĠMovement":27998,"ë³":27999,"Suite":28000,"Ġjurisdiction":28001,"리":28002,"ĠBeth":28003,"jQuery":28004,"ĠIsa":28005,"Ġdental":28006,",*":28007,"ĠLimit":28008,"iliation":28009,"=\"{":28010,"bast":28011,"Ġturb":28012,"isy":28013,"OOK":28014,"Ġadvocate":28015,"imag":28016,"LECTION":28017,"лÑĮ":28018,"(category":28019,".dec":28020,"Ġuniqu":28021,"_sn":28022,"Ġattracted":28023,"ĠÃī":28024,"ĠRunning":28025,"_edges":28026,"ĠDisable":28027,"_AS":28028,"åĽ¾":28029,"Ġnetworking":28030,"_branch":28031,"Having":28032,"toBeTruthy":28033,"GI":28034,"Ġcamps":28035,"sep":28036,"-part":28037,"Ġ)ĊĊĊĊĊĊĊĊ":28038,"ustralia":28039,"ĠReports":28040,"rito":28041,"Ġwaist":28042,"_plus":28043,"ĠWW":28044,"-person":28045,"April":28046,"Ġsar":28047,".tar":28048,"Ġagricultural":28049,"tic":28050,"Ġtcp":28051,"ĠsetValue":28052,"agento":28053,"ĠAppe":28054,"piler":28055,"CADE":28056,"Ġanche":28057,"atcher":28058,"Ġcomics":28059,"Ġlbs":28060,"_segment":28061,"']=$":28062,"itters":28063,"icher":28064,"GINE":28065,"Ġutilize":28066,"ĠCursor":28067,"_expression":28068,"Ġdag":28069,"x":28257,".Task":28258,"money":28259,"ibaba":28260,"'});Ċ":28261,"ĠSpecific":28262,"ĠLinear":28263,"_OPT":28264,"HashCode":28265,"(Player":28266,".ContainsKey":28267,"Ġcollapsed":28268,"transparent":28269,"_RANGE":28270,"Viewer":28271,"(cfg":28272,"Ġsorting":28273,"Ġinfected":28274,"ĠNach":28275,"Ġaccommodate":28276,".elements":28277,"_PART":28278,"ĠSexy":28279,"=get":28280,"(year":28281,"Ġxhr":28282,":]":28283,"owski":28284,"Ġsummar":28285,"Ġ¿":28286,"Ġinte":28287,"Ġworkflow":28288,"ĠTaiwan":28289,"versions":28290,"åıij":28291,"Ġsurprisingly":28292,"Ġoptical":28293,"Ġproces":28294,"Ġdisagree":28295,"Ġnuevo":28296,"ĠCAM":28297,"sorted":28298,"leases":28299,"istle":28300,"Ident":28301,"ĉevent":28302,"jected":28303,"Chunk":28304,"Vars":28305,".provider":28306,"Ġproceedings":28307,"Ġinclusive":28308,"Ġartwork":28309,"endants":28310,"ï¼ļĊ":28311,"seen":28312,"Ġlig":28313,"Ġmakers":28314,"_fun":28315,"Ġlengths":28316,"PathVariable":28317,"[item":28318,"ี":28319,"Dead":28320,"FFFFFF":28321,"ĠUrban":28322,"uples":28323,"ichen":28324,"(nullptr":28325,".spec":28326,",System":28327,"URATION":28328,"(job":28329,"å¼ı":28330,"Ġtracker":28331,"ÅĻ":28332,"ĠMR":28333,"ĠSQLite":28334,"Ġdto":28335,"Ġ;;Ċ":28336,"Ġmint":28337,"ĠIntroduction":28338,"cao":28339,"Ġquestioned":28340,"Ġfitted":28341,"revision":28342,"sq":28343,"Ġmig":28344,"_units":28345,"_async":28346,"Ġflick":28347,"});ĊĊĊ":28348,"Ġnotre":28349,"}`,":28350,"Filters":28351,"Ġmundo":28352,"_days":28353,"Ġfrm":28354,"utc":28355,"Ġvals":28356,"ewidth":28357,"ĠGenerator":28358,"ĠArtist":28359,"ĠIDs":28360,"ĠArticles":28361,"reater":28362,"ĠComponentFixture":28363,".=":28364,"Ġrou":28365,"-no":28366,".bukkit":28367,"egg":28368,"ĠDiff":28369,"atics":28370,"ÑĥÑĩ":28371,"âĢĶĊĊ":28372,"ĠCharlotte":28373,"bye":28374,"Ġ});čĊčĊ":28375,"ĠVik":28376,"ĠBrow":28377,"Ġlv":28378,"ĠGib":28379,"-wing":28380,"GLIGENCE":28381,"(Il":28382,"ĠEngineer":28383,".Wait":28384,"ĠPictures":28385,"Ġrhet":28386,"Ġthermal":28387,"Ġpraise":28388,"<>();ĊĊ":28389,"ĠSpider":28390,"Pause":28391,"ĠBaker":28392,"Ġslower":28393,"Ġ}]Ċ":28394,"_enqueue":28395,"Ġdisappeared":28396,"ĠTicket":28397,"INUX":28398,"_LOCAL":28399,"аÑģÑģ":28400,"@Injectable":28401,"community":28402,"GestureRecognizer":28403,"åĽ½":28404,"Ġscales":28405,"Ġ-(":28406,"/'+":28407,"ĠSit":28408,"Ġexecutives":28409,"arding":28410,"Ġadvers":28411,"Ġbackwards":28412,"ĉcontext":28413,"ĠHamp":28414,"ĠPF":28415,"ĠDeck":28416,"ĠCraig":28417,"American":28418,"Ġbell":28419,"Ġprol":28420,"ufen":28421,"Ġrng":28422,"arshal":28423,"ĠSimply":28424,"firstname":28425,"shore":28426,"July":28427,"Ġmortality":28428,"ĠâĨĴĊĊ":28429,"Helpers":28430,"Ġbenchmark":28431,"emade":28432,"Ġorganisations":28433,".gson":28434,"ĠTextField":28435,"Ġcivilians":28436,".Arrays":28437,"ĠMississippi":28438,"Ġintermediate":28439,"getUser":28440,"_cluster":28441,"Relative":28442,"foreign":28443,".querySelectorAll":28444,"ForeignKey":28445,"Ġreasonably":28446,"---------Ċ":28447,"Cards":28448,"ĠKam":28449,"ĠThor":28450,"Ġroller":28451,"-element":28452,"ĠCurrency":28453,"ddie":28454,"ALLY":28455,"ĠRA":28456,"Ġpermet":28457,"aaaa":28458,"Ġhomework":28459,"ĠVit":28460,"Ġmold":28461,"ĠFer":28462,"[start":28463,"Ġstatistical":28464,"Ġscary":28465,"_HOME":28466,".Begin":28467,"Construct":28468,"ogenic":28469,"ĠDEALINGS":28470,"Ġtambién":28471,"ixon":28472,".ind":28473,"acre":28474,"Ġtransforms":28475,"ĠNap":28476,".Block":28477,"ussia":28478,"piration":28479,"ulent":28480,"Ġceil":28481,"Clause":28482,"naire":28483,"TES":28484,"Ġneat":28485,"STD":28486,"ĠRegExp":28487,"perform":28488,":)":28489,"Ġunions":28490,"Ġsublic":28491,"Ġwinds":28492,"loating":28493,"glich":28494,"Ġpagination":28495,"Skill":28496,"Apply":28497,"ĠOperator":28498,"istogram":28499,"Ġqualities":28500,"Cross":28501,"Ġdecom":28502,"],\"":28503,"ĠJuan":28504,".modal":28505,".Child":28506,"ĠRoger":28507,"STITUTE":28508,":CGRectMake":28509,"alette":28510,"Ġsta":28511,"aside":28512,"Ġblur":28513,"ĠWa":28514,"ifetime":28515,"reed":28516,"controls":28517,"Ġbins":28518,"Ġпол":28519,"*/,Ċ":28520,"UIS":28521,"ĠRou":28522,"ĠDemo":28523,"-awesome":28524,"ĠChain":28525,"Ġhasta":28526,"ĠBart":28527,".KEY":28528,"Ġvendors":28529,"nofollow":28530,"ĠDest":28531,"_builder":28532,"Ġargues":28533,"_answer":28534,"goto":28535,"ĠRESULT":28536,"ĠMON":28537,"Ġpoder":28538,"oons":28539,"_CASE":28540,"Ġreplic":28541,"Ġfinancing":28542,"ĠDATE":28543,"cern":28544,"_track":28545,"ties":28546,"/logo":28547,"ĠNEGLIGENCE":28548,"getType":28549,">T":28550,"bet":28551,"girl":28552,"ĠINCIDENTAL":28553,"-site":28554,".trigger":28555,"ĠLisa":28556,"_inputs":28557,"Ġrelatives":28558,"LoggedIn":28559,"Configure":28560,"IK":28561,".accept":28562,"Resume":28563,"ĠDraft":28564,"Ġ*>(":28565,"ĠWA":28566,"edian":28567,"erness":28568,"ĠLayoutInflater":28569,"*/čĊčĊ":28570,"othy":28571,"Ġobligation":28572,"Subscribe":28573,"Ġthumbnail":28574,"exist":28575,"Ġinsisted":28576,"ĠUICollectionView":28577,"ĠAngular":28578,"Ġtablets":28579,"ĠImpact":28580,"ãĢįĊĊ":28581,"aho":28582,"Ġcharacteristic":28583,"gd":28584,"Ġ=================================================":28585,"ourt":28586,"`.":28587,"Appro":28588,"Coordinate":28589,"Remember":28590,"Ġmarine":28591,"]=='":28592,"ĠAdministrator":28593,".getDefault":28594,"Ġforgot":28595,"ĠStructure":28596,"Vue":28597,"arsing":28598,"moment":28599,"kw":28600,"_cursor":28601,"Attack":28602,"Ġathletic":28603,"Ġdiagnosed":28604,"Ġende":28605,"åĪłéϤ":28606,"House":28607,"ĠPARAM":28608,"Ġwiki":28609,"ĠOpp":28610,"Ġconservation":28611,"Ġsnd":28612,"_tem":28613,"substr":28614,"ĠCape":28615,".sim":28616,"UTION":28617,"anan":28618,"âĢĻun":28619,"Ġgy":28620,"-work":28621,"Ġcompelling":28622,"='#":28623,"ĉsub":28624,"Ġdirectories":28625,"íĬ¸":28626,"Ġtouches":28627,"outines":28628,".Collection":28629,"schedule":28630,".lat":28631,"ĠDoctrine":28632,"CAA":28633,"ĠRefer":28634,"Ġshifts":28635,"Ġlikelihood":28636,"preter":28637,"ĠFemale":28638,"Ġintercept":28639,"Ġlou":28640,"çĻ»":28641,"Ġrug":28642,"ĠCrown":28643,"Ġ****************************************************************************":28644,"-product":28645,"Ġprompted":28646,"ungle":28647,"docker":28648,"ĠTu":28649,"ĠUnique":28650,"_Error":28651,"ulos":28652,"ĠâĦ":28653,"Ġ(`":28654,"Getting":28655,"_scal":28656,"ĠEnh":28657,"üt":28658,"Ġsustained":28659,"Ġpatches":28660,"Ġprosper":28661,"ĠGaza":28662,"_light":28663,"Ġincons":28664,"--------Ċ":28665,"ĉĉĠĠĠĠĠĠ":28666,"SF":28667,"CN":28668,":\";Ċ":28669,"ĠCollins":28670,"(*)":28671,"Ġcompilation":28672,"']čĊ":28673,"Ġconsequence":28674,",...":28675,"Ġdm":28676,"ĠBLOCK":28677,"Cluster":28678,"Ġski":28679,"(argc":28680,"Tuple":28681,"Ġjoins":28682,"ĠSheriff":28683,"War":28684,"indi":28685,"Ġcommented":28686,"HOST":28687,"Ġinvitation":28688,"apanese":28689,"Ġpermits":28690,"precedented":28691,"_zone":28692,"ĠAmy":28693,"_RD":28694,"Minimum":28695,"Ġinvocation":28696,".enable":28697,"ichten":28698,"-owned":28699,"\"id":28700,"_POINTER":28701,"Fac":28702,"Ġspecifications":28703,"Ġnomination":28704,"Ġgp":28705,"<(":28706,"Ġrobots":28707,"ĠJerry":28708,"Ġholders":28709,"Ġwand":28710,"cms":28711,"Ġ}))Ċ":28712,".Toast":28713,"ĠIList":28714,"Based":28715,"zoom":28716,"/style":28717,"ĠBeck":28718,"Men":28719,"Ġcontributing":28720,"Ġundo":28721,"ĠOH":28722,"ĠaddObject":28723,"Ġeigen":28724,"signup":28725,"éĶĻ":28726,"Ġdistant":28727,"PARATOR":28728,"ĠMari":28729,"Ġmá":28730,"Emp":28731,"ós":28732,"ĠìĪĺ":28733,"evt":28734,"+j":28735,"park":28736,"ĠStay":28737,"ĠDun":28738,"Ġsoy":28739,">%":28740,"azines":28741,"Ġtiempo":28742,"(me":28743,"present":28744,".This":28745,"Ġeditors":28746,"FIELD":28747,".Work":28748,"ĠUniverse":28749,"Ġdrunk":28750,".timer":28751,"Ġaltered":28752,"ĠNar":28753,"ëł¥":28754,".Active":28755,"idor":28756,"çŃ":28757,".deltaTime":28758,"Ġawkward":28759,""":28760,"ĠSafari":28761,"Ġtricks":28762,"MENTS":28763,"division":28764,"Ġvarying":28765,"ĠHighway":28766,"Ġphotographer":28767,"ĠStewart":28768,"Ġlasting":28769,".Pre":28770,".amazonaws":28771,"ĠLuck":28772,".Description":28773,"ĠNaz":28774,"neg":28775,"Ġcó":28776,"<<\"\\":28777,"ĠSurv":28778,"ĠUnc":28779,"Recipe":28780,".BorderStyle":28781,"Ġmodifications":28782,"-at":28783,"ATFORM":28784,"hdr":28785,"ako":28786,"Ġsublicense":28787,"ĠJump":28788,"Ġbeim":28789,"ĠManhattan":28790,".bool":28791,"_hw":28792,"ÑĤÑĮ":28793,"Bin":28794,"Ġgateway":28795,"\"\":":28796,"ĠUIS":28797,":\"+":28798,"-def":28799,"ĠRegular":28800,"/testing":28801,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":28802,"stringstream":28803,"Ġdispar":28804,"Ġmobil":28805,"-read":28806,"ĠAdapter":28807,"ĠChampions":28808,"Ġscheduler":28809,"Ġkills":28810,"ĠMultiple":28811,"irror":28812,"Ġgods":28813,"ADO":28814,"akte":28815,"ĠUsuario":28816,".circular":28817,"Ġrecept":28818,"ĠExpr":28819,"Ġelderly":28820,"Ġnicely":28821,"Ġbeste":28822,"Want":28823,"Ġclassical":28824,".sprite":28825,"objc":28826,"ĠMason":28827,"Ġsistema":28828,".Black":28829,"eso":28830,"ĠZeit":28831,"Ġdivid":28832,"Ġenters":28833,"_subject":28834,"ĠPlanet":28835,".warning":28836,"ĠGram":28837,"_tokens":28838,"Ġhouseholds":28839,"_customer":28840,"userName":28841,"cross":28842,"Ġpione":28843,"Ġassists":28844,"_SM":28845,"ibo":28846,"Ġloyal":28847,"Ġuseless":28848,"#elif":28849,"ĠUltimate":28850,"Come":28851,"gel":28852,"Ġdich":28853,"xyz":28854,"ikel":28855,"obra":28856,"_scan":28857,"ĠInterior":28858,"ĠNice":28859,"Ġplac":28860,"ĉtarget":28861,"Ġviral":28862,"asso":28863,"()/":28864,"unde":28865,"ĠAdobe":28866,"Os":28867,"visited":28868,"ĠOW":28869,"ĠFeed":28870,"ĠSequence":28871,"Ġmanages":28872,"inson":28873,"ĠLouisiana":28874,"{})":28875,"ĠHab":28876,"ĠLD":28877,"Ġbip":28878,"prites":28879,"(elem":28880,".hibernate":28881,"élé":28882,"Ġohne":28883,"_transaction":28884,"Ġannunci":28885,"Published":28886,"ĠHonda":28887,"ĠTam":28888,"ĠPacket":28889,"_selector":28890,"Ġchallenged":28891,"Processing":28892,"-hover":28893,"Ġtrainer":28894,"_cancel":28895,"ĠNSDictionary":28896,"abric":28897,"ĠMLS":28898,"_sensor":28899,"Ġshrink":28900,"ĠFX":28901,"threshold":28902,"ĉHX":28903,"-mark":28904,"`.`":28905,"Scheme":28906,"(full":28907,"_writer":28908,"ĠSys":28909,"Ġfled":28910,"ĠCin":28911,"-widget":28912,"ĠPrevious":28913,"Gender":28914,"_question":28915,"Feed":28916,"Ġscrut":28917,"(prefix":28918,"ãĢĤãĢĤ":28919,"Ġinfections":28920,"Parts":28921,"Ġhierarchy":28922,"_DELETE":28923,"ĠPatient":28924,"_pay":28925,"Ġpromoted":28926,"Ġìĭ":28927,"Ġcivilian":28928,"Ġagriculture":28929,"ĠPiece":28930,"Ġstance":28931,"utsche":28932,"Assign":28933,".ACTION":28934,"Fig":28935,"_radius":28936,"ĠSync":28937,"ducer":28938,"failure":28939,"ensed":28940,"ptime":28941,"BM":28942,"_datetime":28943,"quivo":28944,"QUEUE":28945,"èĢħ":28946,"Appear":28947,"Ġsummit":28948,":void":28949,"Ġvine":28950,"认":28951,"onne":28952,"_TRANS":28953,".green":28954,"_cc":28955,"Ġhungry":28956,"Ġ\">":28957,"());čĊčĊ":28958,"Extract":28959,"izens":28960,"Ġsolver":28961,"Notify":28962,"Ġenglish":28963,"ĠShopping":28964,"interfaces":28965,"REQ":28966,"Ġilleg":28967,"ĠUIImageView":28968,"Ġdisconnect":28969,"ĠUntil":28970,"ĠConservative":28971,"@Column":28972,"Ġshifted":28973,"Ġ:čĊ":28974,"Ġfich":28975,"Ġdla":28976,"Ġshoe":28977,"\"),čĊ":28978,"ularity":28979,"_RESP":28980,"Weather":28981,"UIApplication":28982,".iterator":28983,"Ġaging":28984,".Parent":28985,"owie":28986,"(equal":28987,"ĠConv":28988,"/default":28989,"Ġmeasuring":28990,".prev":28991,".IsValid":28992,".Fat":28993,"ĠsÄĥ":28994,"keywords":28995,"without":28996,"Ġsovere":28997,"Ġexchanges":28998,"Ġmelt":28999,"Ġislands":29000,"ĠIntegr":29001,"Ġjumping":29002,"Ġgle":29003,"Ġjournalism":29004,"Ġdated":29005,"Localized":29006,"ĠRefresh":29007,"Particle":29008,"Ġaa":29009,"ĠSTRICT":29010,"Ġbod":29011,".Process":29012,"_AUTO":29013,"ĠPublished":29014,"every":29015,"Ġtechnological":29016,"lsx":29017,"Ġirrit":29018,"Additional":29019,"Ġdelimiter":29020,"_language":29021,"-area":29022,"boys":29023,"ĠTube":29024,"Ġwat":29025,"Ġmechanics":29026,"_owner":29027,"Spell":29028,"ĠStories":29029,".AppendLine":29030,"TableView":29031,"hem":29032,"stick":29033,"ollower":29034,"IFF":29035,"ĠUV":29036,"ollision":29037,"SUB":29038,"Ġcomparable":29039,"Ġdonde":29040,"sales":29041,"llvm":29042,"Ġ}],Ċ":29043,"OTTOM":29044,"ĠPurpose":29045,"Lab":29046,"Ġinterviewed":29047,"ois":29048,"asil":29049,".setId":29050,"ĠInstruction":29051,"-->":29052,"ĠModified":29053,"ationally":29054,"ĠMeeting":29055,"误":29056,"#region":29057,"Ġrouting":29058,".focus":29059,"ĠYouth":29060,"<":29348,"Ġunto":29349,"ologically":29350,"ĠMul":29351,"VIDIA":29352,"Ġslim":29353,"ĠCommissioner":29354,"(on":29355,"Ġunderneath":29356,"/db":29357,"vote":29358,"(Message":29359,"ĠPope":29360,"Defined":29361,"Ġswift":29362,"urf":29363,"Ġadapted":29364,"SEL":29365,"Ġrevenues":29366,"Ġdivine":29367,"=y":29368,"Gradient":29369,"_act":29370,"Ġ/*!<":29371,"Ġpolygon":29372,"ĠFDA":29373,"ĠCarr":29374,"atables":29375,"(stdout":29376,"Ġrefriger":29377,"Ġcoordin":29378,"avorites":29379,"ÑĪи":29380,"Ġcompassion":29381,"ĠPOSSIBILITY":29382,"-secondary":29383,"uracy":29384,"Ġcompromise":29385,"_AV":29386,"_os":29387,"Ġbeside":29388,"ĥĿ":29389,"Ġln":29390,".plugins":29391,"Capacity":29392,"alah":29393,".bin":29394,"ĠCRC":29395,"_balance":29396,"ĠflexDirection":29397,"Ġambit":29398,"Ġnickname":29399,"ĠForces":29400,"CLE":29401,"ĠShell":29402,"Ġsail":29403,"ĠWriter":29404,"ĠAlice":29405,"dw":29406,"ĠIndians":29407,"ĠMarshall":29408,"_SRC":29409,"Ġnormalized":29410,"ĠJag":29411,"ãĤĴ":29412,"zeit":29413,"rpc":29414,"ÃŃc":29415,".inline":29416,"Ġtravers":29417,"_numeric":29418,"Ġutilities":29419,"Ġevac":29420,"INPUT":29421,"ĉregister":29422,"MX":29423,"ĠCampbell":29424,"Ġdatasets":29425,"Ġdemanded":29426,"ĠinitialState":29427,"gan":29428,"Ġei":29429,"Unexpected":29430,"-web":29431,"trait":29432,",Y":29433,"ĠTodd":29434,"Ġskeleton":29435,"Ġoptimize":29436,"第":29437,"ĠUpon":29438,"ĠStObject":29439,"Ġaplic":29440,".'P":29478,"vron":29479,".UN":29480,"Ġpainter":29481,"izarre":29482,"Ġlav":29483,"Ġpom":29484,"preg":29485,"=function":29486,"(serial":29487,"ifica":29488,"uming":29489,"åľ°":29490,"ãģĤ":29491,"-op":29492,"UCH":29493,"ĠHend":29494,".propTypes":29495,"Ġyo":29496,"Ġroutines":29497,"Ġcaring":29498,"Sem":29499,"Ġreserves":29500,"Ġpriorities":29501,"redits":29502,"ISTR":29503,"ContentType":29504,"ĠSchw":29505,"/media":29506,"Ġestr":29507,"Ġclimbing":29508,"-week":29509,"cherche":29510,"sensor":29511,"ToArray":29512,"ĠMontreal":29513,"Ġclouds":29514,"ĠInjectable":29515,"ĠRice":29516,"Ġpropaganda":29517,"_provider":29518,"Ġindoor":29519,"Ġinaug":29520,"Ġdiplom":29521,"Ġmessaging":29522,"_mut":29523,"å¦Ĥ":29524,"Ġkw":29525,"ONS":29526,"arians":29527,"RPC":29528,")]čĊ":29529,"-ray":29530,"ĠSor":29531,"mall":29532,"Ġmarketplace":29533,"Ġvtk":29534,"Ma":29535,"ogan":29536,"igi":29537,"Ġsponsored":29538,"ĠDani":29539,".SEVER":29540,">'.$":29541,"multipart":29542,"ĠWol":29543,"ĠtableName":29544,"ĠUsername":29545,"BackgroundColor":29546,"Ġfright":29547,"_EMAIL":29548,"September":29549,"_vals":29550,"opia":29551,"Ġspotted":29552,"-Ch":29553,"ĠdataSource":29554,"/\"Ċ":29555,"екÑĤ":29556,"ĠRequestMethod":29557,"ĠReplace":29558,"-do":29559,"ahn":29560,"ĠPhD":29561,"].ĊĊ":29562,"NON":29563,"gement":29564,"ĠThr":29565,"Ġquietly":29566,"Ġtorture":29567,"Ġteas":29568,"ĠCY":29569,"Ġatr":29570,"development":29571,"-detail":29572,"Ġlighter":29573,"Ġarguing":29574,"Ġdeserves":29575,"Ġcurriculum":29576,"_CONTEXT":29577,"ÅĤy":29578,"HITE":29579,"ĉID":29580,"/uploads":29581,"Ġtits":29582,"reo":29583,"_drop":29584,".UTF":29585,"Ġpickup":29586,"Ġgrocery":29587,"ĠPure":29588,"Ġeasiest":29589,"Phil":29590,".feature":29591,"(\"*":29592,"Ġinvestor":29593,"tok":29594,"Ġjar":29595,"Los":29596,"âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ":29597,".queue":29598,"-speed":29599,"Mal":29600,"umblr":29601,"ĠCONST":29602,"ĠHRESULT":29603,"ĠDance":29604,"(filePath":29605,"Ġattributed":29606,"à¥į":29607,"ĠBund":29608,"coins":29609,"Ġsão":29610,"Ġpir":29611,"personal":29612,"Ġprelim":29613,"Ġpropose":29614,"ĠTL":29615,"]])":29616,"ĠSubscription":29617,"ĠKre":29618,",len":29619,".FirstOrDefault":29620,")--":29621,"_products":29622,".GetBytes":29623,"Ship":29624,"Ġencrypt":29625,"ĠSG":29626,"ĠMyst":29627,"hir":29628,"Ġiterate":29629,"Ġintend":29630,".mockito":29631,"Ġchapters":29632,"(angle":29633,"ĠVlad":29634,"设":29635,"'.ĊĊ":29636,"ResponseBody":29637,"ĠAbd":29638,"deal":29639,"Ġbarriers":29640,"-outline":29641,"bill":29642,"ĠFalls":29643,"_second":29644,".include":29645,".ceil":29646,"Ġoccupation":29647,"phony":29648,".moveTo":29649,"ĠJennifer":29650,"ASTER":29651,";\"><":29652,"ĠEnabled":29653,"Ġterminate":29654,"ĠIo":29655,"lations":29656,"ĠTHEORY":29657,"Ġearliest":29658,"Ġrack":29659,"ĠScar":29660,"shake":29661,"chip":29662,"Ġuv":29663,"Ġalliance":29664,"пиÑģ":29665,"ĠGOODS":29666,"zione":29667,"ĠVI":29668,"Ġ{-":29669,"Ġfiltering":29670,"Ġmiscon":29671,".DockStyle":29672,"Ġbush":29673,"Ġjunk":29674,"æĮ":29675,"ĠQUE":29676,"Ġhooks":29677,"Ġfirmware":29678,"Ġmiddleware":29679,"dic":29680,"ĠOakland":29681,"Ġarrives":29682,"Payload":29683,"pixel":29684,"]|":29685,"ĠstartDate":29686,".PRO":29687,"_audio":29688,"Ġmidfield":29689,"igidbody":29690,"ĠSwiss":29691,"ĠClip":29692,"ĠDump":29693,"ĠTextBox":29694,"Ġgeh":29695,"yield":29696,"ods":29697,"Ġreferendum":29698,"Backend":29699,"ĠCream":29700,"Ġdominated":29701,"ĠArchive":29702,"Ġriders":29703,".prepareStatement":29704,"Ġquando":29705,"Ġchef":29706,"wiki":29707,"inel":29708,"ampling":29709,"(\"\\\\":29710,"Ġsag":29711,"_proxy":29712,"ãģķ":29713,"pdo":29714,".getElementsByTagName":29715,"Ġdemonstration":29716,"ĠNPC":29717,"Ġarchivo":29718,"endance":29719,"Ġefficiently":29720,"(actual":29721,".tableView":29722,"Ġmush":29723,"Ġbears":29724,"_threads":29725,"jas":29726,"ahun":29727,"Ġneural":29728,"Ġdesigning":29729,"ĠGDP":29730,"Ġlifted":29731,"缮":29732,"ĠJoint":29733,"ĠInclude":29734,"ĠGiants":29735,"Ġwithdrawal":29736,"ĠRent":29737,"native":29738,"ĠSeek":29739,"gression":29740,"_CPU":29741,"\\S":29742,"ĠShield":29743,"Ġsolic":29744,"Ġboom":29745,"yecto":29746,"Ġmanufacture":29747,"ĠâĢĭ":29748,"Ġbbox":29749,"Ġearthqu":29750,"ollectors":29751,":@\"%":29752,"Ġloops":29753,"Je":29754,"alking":29755,"ĠWhats":29756,"ĠBoys":29757,".book":29758,"ARGE":29759,"_pixel":29760,"Ġsuspects":29761,"ι":29762,"usp":29763,"ĠBMW":29764,"ieces":29765,"(person":29766,"å¼Ģ":29767,"é»":29768,"ĠPodcast":29769,"Ġbou":29770,"(Item":29771,"û":29772,"(Input":29773,"HttpGet":29774,"Ġburg":29775,")^":29776,"BOARD":29777,"*/,":29778,"Ġgulp":29779,"ĠBenn":29780,"Ġdecks":29781,".statusCode":29782,"Ġacute":29783,"Ġhug":29784,"ugu":29785,"Ġpled":29786,",\"%":29787,"hape":29788,"Ġзап":29789,"ĠMaine":29790,".real":29791,"Ġdalam":29792,"ĠMinor":29793,".Float":29794,"disp":29795,"Ġtl":29796,"Ġencount":29797,"=>$":29798,"Ġfg":29799,"tees":29800,"ĠRecomm":29801,"äl":29802,"Ġchemistry":29803,"Blocks":29804,"OID":29805,"Ġforex":29806,"ĠAppend":29807,"Ġ{*":29808,"ĠSupply":29809,"CGFloat":29810,"(bl":29811,"Ġate":29812,"adora":29813,"Ġgust":29814,"Associ":29815,">.Ċ":29816,"FETCH":29817,".serial":29818,"widgets":29819,"ardless":29820,"iefs":29821,"_FULL":29822,"ernetes":29823,"ĠPred":29824,"ØŃ":29825,"äºĭ":29826,"ubernetes":29827,"ĠLaura":29828,"Ġlabeled":29829,"Highlight":29830,"Ġannoying":29831,"/update":29832,"(description":29833,"Ġintimid":29834,"$c":29835,"\")))Ċ":29836,".AP":29837,"Ġ[]*":29838,"ĠEXIT":29839,".Host":29840,"ĠOPEN":29841,".sendMessage":29842,"_camera":29843,"_tile":29844,"Ġtherm":29845,"onomous":29846,"Ġdisadv":29847,"Ġnaar":29848,"indexOf":29849,"ĠPP":29850,".protocol":29851,"AFE":29852,"Ġtextures":29853,"################################################":29854,"umbai":29855,".stats":29856,"ĠGE":29857,"Ġie":29858,"ĠSTD":29859,"ĠMann":29860,".reflect":29861,"KB":29862,"Ġdive":29863,".wav":29864,"/*----------------------------------------------------------------":29865,"/settings":29866,".lifecycle":29867,"Ġdaughters":29868,"orus":29869,"uber":29870,"NING":29871,"stri":29872,"ĠTip":29873,"Ġzn":29874,"Ġswitched":29875,"inet":29876,"uffy":29877,"ĠTransportation":29878,"(conf":29879,"frica":29880,"ĠXL":29881,"ĠLead":29882,"_percent":29883,"__":29899,"permissions":29900,"ĠDetermine":29901,".Man":29902,"Ġadvances":29903,".InputStream":29904,"Ġstrongest":29905,"ĠeBay":29906,"Ġ#-":29907,"Ġdirname":29908,"ĠSMS":29909,"Ġmedications":29910,"Ġamended":29911,"Ġchurches":29912,"ĠImperial":29913,"$row":29914,"ĠMadison":29915,"ĠInsp":29916,"Ġaffair":29917,"Ġpsychology":29918,"vh":29919,"Ġseverity":29920,"âĢIJ":29921,"Ġstrips":29922,"AH":29923,"vertising":29924,"Ġconse":29925,"IMAGE":29926,"ĠStats":29927,"ĉsc":29928,".Cursor":29929,"Ġfreeze":29930,"sson":29931,"(xml":29932,"ĠSusan":29933,".tile":29934,"eded":29935,"ĠĠĠĠĉĉĉ":29936,"uelle":29937,"ĠMitchell":29938,"based":29939,"Operand":29940,"½æķ°":29941,"ĠFF":29942,"ĉstrcpy":29943,"ounces":29944,"ildo":29945,".executeQuery":29946,"Ġapproaching":29947,"ĠSeven":29948,"Ġnuts":29949,"Ġric":29950,"assignment":29951,"Ġcalculator":29952,"ĠMurphy":29953,"ĠBou":29954,"íĦ":29955,"Ġbutt":29956,"Ġticks":29957,"Projects":29958,"ilib":29959,".textColor":29960,"mov":29961,"_logo":29962,"(template":29963,"ĠINIT":29964,"ĠimageView":29965,"scriptions":29966,"ORITY":29967,"Consumer":29968,"Ġunprecedented":29969,"Ġtourist":29970,"Ġbron":29971,"Ġcontractor":29972,"Ġlicence":29973,"ĠNam":29974,"æ¯":29975,"(transform":29976,"_ATT":29977,"Pref":29978,"ĠGam":29979,"Ġvessels":29980,"Ġhav":29981,"Later":29982,".ToLower":29983,"Ġurls":29984,"Ġbreakdown":29985,"Ġpenalties":29986,"Ġfoster":29987,"ĠUE":29988,"Ġclue":29989,"comed":29990,"åIJįç§°":29991,"-main":29992,"Ġpts":29993,"Ġcounted":29994,"icts":29995,"/post":29996,"Ġgetattr":29997,"Ġping":29998,"ANCEL":29999,"Ġpec":30000,"Ñħод":30001,"antom":30002,"ĠBlueprint":30003,"ĠEventEmitter":30004,"Ġlä":30005,"æ²":30006,"Ġstraw":30007,"(comp":30008,"'une":30009,">N":30010,"-client":30011,"esModule":30012,"-base":30013,"Ġretreat":30014,"_simple":30015,"ĉĉĉĉĉĉĠ":30016,"fee":30017,"')čĊčĊ":30018,"ControlItem":30019,"Ġsubscribers":30020,"please":30021,"ĠEff":30022,"Ġpound":30023,"ĠBytes":30024,"ĠTea":30025,"_activity":30026,"Ġmaxim":30027,"Ġopcode":30028,"BSD":30029,".constant":30030,";}":30031,"ombres":30032,"Ġcareers":30033,").ĊĊĊĊ":30034,"Ġspreading":30035,"-expanded":30036,"ĠOrd":30037,"amarin":30038,"Ġmobility":30039,"Unfortunately":30040,"akk":30041,"NL":30042,"_redirect":30043,"ĠPG":30044,"ĠSensor":30045,"bol":30046,"tap":30047,"_MEMORY":30048,"ĠUIAlert":30049,"plitude":30050,"Website":30051,"ĠLogo":30052,"love":30053,"[ind":30054,"Ġaltogether":30055,"Ġwondered":30056,"Ġesper":30057,"ĠLiberal":30058,"Ġoss":30059,"Ġelit":30060,"Ġstiff":30061,"odox":30062,"_mentions":30063,"ĠDouglas":30064,"_pid":30065,"ĠCK":30066,"ĠinitWithFrame":30067,".blog":30068,"pkg":30069,"anghai":30070,"QUIRED":30071,"uu":30072,"Ġmkdir":30073,"ATAL":30074,"Ġunh":30075,"inces":30076,"sth":30077,"Ġhypothesis":30078,"Ġcata":30079,"ĠTB":30080,"ĠClar":30081,"Ġpredecess":30082,"Ġsituated":30083,"-world":30084,"))/":30085,"Ġheadlines":30086,".stat":30087,"Ġoutbreak":30088,"spath":30089,"_FLAGS":30090,"ĠServletException":30091,"Sun":30092,"FROM":30093,"ĠDir":30094,"ãĥ»ãĥ»ãĥ»":30095,"_coord":30096,"ĠOptim":30097,"Monitor":30098,".bit":30099,"XXX":30100,"Ġtodas":30101,"feld":30102,"ÑĢи":30103,"imir":30104,"Ġpolitically":30105,"Ġmolecular":30106,"Ġtraded":30107,"Ġ{{$":30108,"ĠSwedish":30109,"Ġ'@/":30110,"_REAL":30111,"Ġwarehouse":30112,"today":30113,",L":30114,"orp":30115,"false":30392,"Ġspa":30393,"ĠNear":30394,"ìķ":30395,"Ġintrig":30396,"_members":30397,"wave":30398,"Ġanalysts":30399,"_OS":30400,"edin":30401,"ĠFri":30402,"Ġretrieved":30403,"Regular":30404,"_obs":30405,"EXPORT":30406,"')}}\"":30407,"\"class":30408,"__((":30409,"bucket":30410,"Ġstro":30411,"ĠPatch":30412,"ystick":30413,"fulness":30414,"apos":30415,"Da":30416,"ĉĉĉĉĉĠĠĠ":30417,"Ġenrich":30418,"unordered":30419,"hole":30420,"Cong":30421,"';ĊĊ":30463,"STRUCT":30464,"QR":30465,"IDs":30466,"(arguments":30467,"_aux":30468,"(Event":30469,"_PRIVATE":30470,"ĠTrek":30471,"Ġdownloads":30472,"mutable":30473,"_STRUCT":30474,"(wx":30475,"Ġdomains":30476,"jspx":30477,"ĠViagra":30478,"Commands":30479,"Js":30480,".cfg":30481,"ContentPane":30482,"ĠEditText":30483,"à¥įà¤":30484,"Attach":30485,"ĠARM":30486,"positive":30487,"ĠGenerated":30488,"Ġseized":30489,"=:":30490,"Ġelectronics":30491,"ĠAppComponent":30492,"/',Ċ":30493,".equalsIgnoreCase":30494,"Doctrine":30495,"disk":30496,"ĠPolitical":30497,"CHO":30498,"":30584,"ĠBeauty":30585,"Ġ`<":30586,"Ġtouching":30587,"Ġ|--":30588,"ĉflag":30589,"normalize":30590,"Ġtrapped":30591,"Ġestablishing":30592,"/build":30593,"AJ":30594,"fy":30595,"-react":30596,"avn":30597,"RIPTION":30598,"Ġkut":30599,"ĠFashion":30600,"ĠInform":30601,"curities":30602,"{Ċ":30634,"Ġgarlic":30635,"Ġrepr":30636,"Ġreplies":30637,"(prop":30638,"Ġspirits":30639,"Ġinspire":30640,"Ġbasement":30641,".reject":30642,"Ġhints":30643,"Ġpolling":30644,"ĉĠĊ":30645,"_rating":30646,"Ġcath":30647,"avier":30648,"Ġcompressed":30649,"ĠVS":30650,"]'":30651,"Ġjudicial":30652,"ĠTrend":30653,"training":30654,"ESTAMP":30655,"ognition":30656,"Äģ":30657,"SENT":30658,"ventions":30659,"Ġconsultant":30660,"umph":30661,"ĠuserService":30662,",NULL":30663,"kh":30664,"Dear":30665,"_BAD":30666,"itations":30667,"Ġmetaph":30668,"'é":30669,"andise":30670,"-font":30671,".chart":30672,"Ġsg":30673,"_Controller":30674,".jpeg":30675,"ĠULONG":30676,"ĉgame":30677,"(ss":30678,"ĠMaj":30679,"ĉgo":30680,"ĠSad":30681,"ĠBerg":30682,"ĠMine":30683,"Pack":30684,"Ġresistant":30685,"ĠROM":30686,"Ġpeg":30687,"ĠStanford":30688,"ĠYahoo":30689,"Ġscaled":30690,"Ġlan":30691,"=[]":30692,"\"/>ččĊ":30736,"Ġsud":30737,"ĉbackground":30738,"Ġscholars":30739,"-muted":30740,"ará":30741,"Ġ=====":30742,"Ġ____":30743,"Creat":30744,"enever":30745,"/wp":30746,"ĠVPN":30747,"ErrorCode":30748,")],Ċ":30749,"(builder":30750,"ĠEnemy":30751,"Sensor":30752,"usa":30753,"Ġtriggers":30754,"Ġplayoffs":30755,"_REQ":30756,"Ġ(~":30757,"ĠBarry":30758,"Ġpermanently":30759,"ĠRUN":30760,"Ġbure":30761,".Fatalf":30762,"Ġchick":30763,"ĉpanic":30764,"psi":30765,"oka":30766,"éĢī":30767,">[":30768,"Ġunderstands":30769,"ĠJunior":30770,"ĠINFO":30771,"=mysqli":30772,"ustain":30773,"-source":30774,"serv":30775,"ĠCREATE":30776,".au":30777,"Ġsells":30778,"ĠĠĊĠĠĊ":30779,"Europe":30780,"zw":30781,"preh":30782,"ĠNSA":30783,"Ġxy":30784,"ิ":30785,"ĠBeyond":30786,"Instead":30787,"NonQuery":30788,"Ġarise":30789,"Ġavoided":30790,".emplace":30791,"_models":30792,"}),Ċ":30793,"Ġhid":30794,"Ġ&_":30795,".points":30796,".getWidth":30797,".Exec":30798,"Ġ////":30799,"ĠSessions":30800,"...\\":30801,"ĠColomb":30802,"Ġacceleration":30803,"restore":30804,"Ġile":30805,"obic":30806,"}Ċ":31296,"plaint":31297,"getText":31298,"Ġindividually":31299,"Ġcheckbox":31300,"UY":31301,"ĠLamb":31302,"Ġdysfunction":31303,"ĠLar":31304,"à°":31305,"ĠCreating":31306,"');ĊĊĊ":31307,"\"They":31308,"locations":31309,"_CORE":31310,"Interaction":31311,"umbnails":31312,"ĠPartner":31313,"brit":31314,"Ġlesser":31315,"ĠSlot":31316,"setAttribute":31317,"ĠWave":31318,".po":31319,"/store":31320,"Ġbrowsing":31321,"_pd":31322,"sume":31323,"sed":31324,"Curve":31325,"Ġplasma":31326,"Ġsuspicious":31327,"ìĿ¸":31328,"ĠBah":31329,"ĠExplicit":31330,"_CC":31331,".ClientSize":31332,"\\View":31333,"Ġsubstit":31334,"loon":31335,"ĠGAME":31336,"ĠBrid":31337,"Ľå»º":31338,"_User":31339,"Ġsquares":31340,"fone":31341,"Ġsacred":31342,"ughs":31343,"]interface":31344,"ĠThrow":31345,"ĠKirk":31346,"Ġempire":31347,"Ġassessed":31348,"Tax":31349,"ĠHeaven":31350,"-buffer":31351,"_STATIC":31352,"éné":31353,"-bordered":31354,"Ġpunct":31355,"(mode":31356,"Ġkeine":31357,"Sent":31358,"ĠCalcul":31359,"ĠEve":31360,"Ġstylish":31361,"Ġoils":31362,".TestCase":31363,"Ġtrademark":31364,"Ġliterary":31365,"Ġconcentrations":31366,"ĠRelations":31367,"(Class":31368,"Ġstdin":31369,"Ġvæ":31370,"backup":31371,".VERSION":31372,".AutoScaleDimensions":31373,"starter":31374,"Transactional":31375,"-panel":31376,"Studio":31377,"kc":31378,"ĠChamber":31379,"ĠSpiel":31380,"Ġrho":31381,"اÙĦ":31382,"!'":31383,".Attributes":31384,"Ġmurdered":31385,"apeutic":31386,"Ġintimate":31387,"ĠtextField":31388,"ĠBuffalo":31389,"dummy":31390,"\"%":31391,"ĠLiberty":31392,"obar":31393,"ĠTank":31394,"ĠPopular":31395,"ervisor":31396,"ĠIniti":31397,"ĠMall":31398,"ĠPrior":31399,"CAP":31400,"ĠClay":31401,"ĠCertificate":31402,".Lock":31403,"-strip":31404,"-driven":31405,"/all":31406,"ĠMessageBoxButtons":31407,"_SECRET":31408,"_pb":31409,"Ġrats":31410,"ाà¤":31411,"Ġnt":31412,".Router":31413,"_topic":31414,"Ġtennis":31415,"ĠPUBLIC":31416,"ĠActivatedRoute":31417,"Ġ',Ċ":31418,"Ġcostume":31419,"Ġjokes":31420,".Handle":31421,"ĉbyte":31422,"Ġflavors":31423,"(cc":31424,"Ġpersonas":31425,"ĉimage":31426,"ĠNazi":31427,"Ġgrammar":31428,"Ġúlt":31429,"Ġvalve":31430,"Ġvic":31431,"ĠRachel":31432,"_invalid":31433,"Prefs":31434,"stdint":31435,"(route":31436,"Ġhtmlspecialchars":31437,"Ġpeoples":31438,"pline":31439,"Ġnv":31440,"ĠQuant":31441,"oppers":31442,"ĠcurrentUser":31443,"ĠCatal":31444,"Ġreconc":31445,"Ġconjunction":31446,"lx":31447,"amburg":31448,"Ġinfluential":31449,"danger":31450,"inders":31451,"Ġ%@\",":31452,".configuration":31453,"osome":31454,".identity":31455,"Ġpicker":31456,"nost":31457,"ĠDIY":31458,"August":31459,"ablo":31460,"Leaf":31461,"ĠReco":31462,"cko":31463,"DOC":31464,"ĠHerm":31465,":any":31466,"ĠInterview":31467,"ĠTex":31468,"xfe":31469,"(work":31470,"Ġleap":31471,"Heading":31472,"Ġquarters":31473,"\\Bundle":31474,"reb":31475,"Perhaps":31476,"ĠGmbH":31477,"Birth":31478,"ĉsum":31479,"ĠWatson":31480,".nil":31481,"ç¡":31482,"{}ĊĊ":31483,"icaid":31484,"Getter":31485,"\"name":31486,"Ġ\"čĊ":31487,"_none":31488,"zm":31489,"acute":31490,"uesto":31491,"Ġsous":31492,"Ġrebuild":31493,"Ġnewspapers":31494,"ĠHaz":31495,"Ġkits":31496,"ifo":31497,"Blur":31498,"Ġsuited":31499,"-In":31500,"à¯":31501,"ĠKeith":31502,"ĠNorway":31503,"INIT":31504,"ireccion":31505,"ieties":31506,"_usage":31507,"ĠDoug":31508,"rise":31509,"Ġtrillion":31510,"imited":31511,"ĠREL":31512,"alic":31513,"Ġcriticized":31514,"theorem":31515,"Ġcease":31516,"Ġsidew":31517,"ĠTerry":31518,"Ġsubsidi":31519,"Ġfirmly":31520,"Ġaws":31521,"Ġhott":31522,"Ġdressing":31523,"badge":31524,"ĠApplications":31525,"è¿ĶåĽŀ":31526,"Ġlaughed":31527,"Ġhobby":31528,"Ġmusicians":31529,"Ġ*.":31530,".placeholder":31531,"Ġcounters":31532,"ĠCapitol":31533,"SDK":31534,"Ġhelmet":31535,"andbox":31536,"quit":31537,"Ġcriminals":31538,"Ġteenager":31539,"(update":31540,"Gl":31541,".selection":31542,"Ġdischarge":31543,"Ġpresenting":31544,"ufacturer":31545,"_UNKNOWN":31546,"Ġstressed":31547,"åύ":31548,"Proto":31549,"_correct":31550,"haus":31551,"Ġrenov":31552,"Ġfirearms":31553,"Ġtechnically":31554,"-browser":31555,"Ġcandy":31556,"Stroke":31557,"Ġexecutor":31558,"Ġoccurrence":31559,"ĠIPv":31560,"_INTERFACE":31561,"ĠRetrieve":31562,".bad":31563,"Exchange":31564,"Navbar":31565,"ĠKid":31566,"(getApplicationContext":31567,"_STOP":31568,"ĠBoss":31569,"Listeners":31570,"Ġshooter":31571,"ĠAlb":31572,"äch":31573,"Ġpix":31574,".keyCode":31575,"alone":31576,"Ġabsurd":31577,"ĠCum":31578,"ĠNewtonsoft":31579,"ikt":31580,"Ġlaughing":31581,"Ġcapitalism":31582,"reeNode":31583,"Tx":31584,"_QUERY":31585,".Sleep":31586,"(login":31587,"WebElement":31588,"Ġcelebrating":31589,"Ġdeprecated":31590,"Ġmaar":31591,"Ġartistic":31592,"_ASSOC":31593,"ĠBorderRadius":31594,"ĉwp":31595,"Ġsurvivors":31596,"Inner":31597,"-red":31598,"Ġprosecution":31599,"_pp":31600,"(\"$":31682,"Ġcomma":31683,"unchecked":31684,"graphics":31685,"rors":31686,"GROUND":31687,"(public":31688,"Ġcustomized":31689,"ĠArkansas":31690,"ĠRew":31691,"Ġexpiration":31692,"×ķ":31693,"ĠCul":31694,"Ġnons":31695,".Filter":31696,"Ġsenator":31697,"_definition":31698,"ashington":31699,"ymph":31700,"/J":31701,"Ġfuse":31702,"ramid":31703,"ĠSupplier":31704,"Ġautocomplete":31705,"Ġ}),":31706,".\"ĊĊĊ":31707,"_functions":31708,"ĉto":31709,".eval":31710,"ĠTObject":31711,"References":31712,"Ġheated":31713,"HAL":31714,"Ġ))}Ċ":31715,"}$":31716,"ĠBarr":31717,"_UNIT":31718,"+$":31719,"ĠgetValue":31720,"iped":31721,"chied":31722,"(vm":31723,"cue":31724,"_integer":31725,"_course":31726,"third":31727,"Ġrevised":31728,"**/Ċ":31729,"_DIRECT":31730,"OutOf":31731,"(\"(":31732,"ĠFeel":31733,"Ġreass":31734,"Ġsubtitle":31735,"peri":31736,"nf":31737,"Ġenjoys":31738,"Ġtreats":31739,")this":31740,"-tabs":31741,"ancers":31742,"Ġcontinent":31743,"Ġcardio":31744,"Ser":31745,".question":31746,"Ġphrases":31747,"Validators":31748,"Ġpopul":31749,"ĠlÃŃ":31750,"song":31751,"_INTERNAL":31752,"Ġadviser":31753,"Ġpuzz":31754,"Ġambitious":31755,"ĠTob":31756,"ĠDP":31757,"Ġpresidency":31758,"Ġsurrender":31759,"Ġwatches":31760,"_binary":31761,"ĠSoon":31762,"Ġcanada":31763,"(\"\")Ċ":31764,"]='":31765,"ĠBrandon":31766,"epsilon":31767,"rw":31768,".addChild":31769,".Copy":31770,"Principal":31771,"Photos":31772,"Ġmarginal":31773,"Ġbasics":31774,"eing":31775,"Must":31776,"_String":31777,"Ġole":31778,"Magento":31779,".customer":31780,"(prev":31781,"ล":31782,"Ġloyalty":31783,"Cog":31784,"Ġprotocols":31785,"ĠCompanies":31786,"Ġtheoretical":31787,"Ġaccessing":31788,"ĠZen":31789,".ones":31790,"attice":31791,"_world":31792,"zes":31793,"Ġtattoo":31794,"Ġmenos":31795,"Ġintersect":31796,"\"];ĊĊ":31797,"belie":31798,"Ġinactive":31799,".readline":31800,"-labelled":31801,".done":31802,"lickr":31803,"ĠWORK":31804,"Ġderivative":31805,"Ġdatabases":31806,"âĤĤ":31807,"Ġsx":31808,".isArray":31809,"Ġys":31810,"Ġpada":31811,"ĠBullet":31812,"(`/":31813,"isActive":31814,"ĠCGSize":31815,"(equalTo":31816,"ĠColumbus":31817,"Ġmarry":31818,"DEV":31819,"_limits":31820,"rones":31821,"IAS":31822,"Ġtau":31823,"mino":31824,"_Write":31825,"ĠWine":31826,"Ġ[['":31827,"ĠPull":31828,"riters":31829,"rients":31830,"Ġshifting":31831,"upp":31832,"_TIMER":31833,"ĠConditions":31834,"ấ":31835,"ĠOrders":31836,"ĠStrength":31837,"æīĢ":31838,"Ġvalidity":31839,"Ġfot":31840,"etur":31841,"Ġbolt":31842,"åĨħ":31843,"ĠAlong":31844,"oshi":31845,"Ġassumptions":31846,"Ġmagazines":31847,"_SPI":31848,"Ġpunt":31849,"_PRODUCT":31850,"Ġrelay":31851,"ĠJavascript":31852,".te":31853,"-es":31854,"Ġwidgets":31855,"(fs":31856,"\";":31923,"atching":31924,"ĠKnowledge":31925,"ĉThe":31926,";margin":31927,"lessness":31928,"opard":31929,"umatic":31930,"()));čĊ":31931,"Ġfals":31932,"(cache":31933,"TypeId":31934,"éĢļ":31935,"_choice":31936,"ĠGoth":31937,"ĠSites":31938,"MG":31939,"_border":31940,"Indices":31941,"Comparer":31942,"ĠRedistribution":31943,"Ġcloset":31944,"Ġversatile":31945,"Inputs":31946,"********************":31947,"Ġobesity":31948,"quiz":31949,"gra":31950,"(global":31951,"åĬ¡":31952,"Ġcollector":31953,"Ġkor":31954,"ovable":31955,"ADC":31956,"ĠEventHandler":31957,".nc":31958,"Ġplayback":31959,"ientos":31960,"_perm":31961,"_WARNING":31962,"ĠOlympics":31963,".norm":31964,"ĠBroadcast":31965,"_small":31966,"drive":31967,".iloc":31968,"Ġtyped":31969,"MEM":31970,"_cons":31971,"DMETHOD":31972,"Ġlun":31973,".distance":31974,"(par":31975,"poon":31976,"Ġbast":31977,"activities":31978,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":31979,":čĊčĊ":31980,"SER":31981,")&&":31982,"_lst":31983,"ĠPolish":31984,"Ġknocked":31985,"Ġfrustration":31986,"aukee":31987,"Ġphosph":31988,"iquid":31989,"_coeff":31990,"æŃ¤":31991,"Latest":31992,"ĠDust":31993,"Tipo":31994,"Ġmaintains":31995,"Ġmarsh":31996,"incinn":31997,"lbl":31998,"Care":31999,"Ġneighborhoods":32000,"_gpio":32001,"ĠArsenal":32002,"Dem":32003,"ĠWhe":32004,"_hook":32005,"Ġldc":32006,"ĠHarper":32007,"ĠBerkeley":32008,"Ġgraduated":32009,"Percent":32010,"Ġarriving":32011,"ĠAdventure":32012,"(scope":32013,"('*":32014,"quarter":32015,"ĠMarie":32016,"Speaking":32017,"_codegen":32018,"Ġimmun":32019,"caster":32020,"ãĤĮ":32021,"åķĨ":32022,"ĠDimensions":32023,".record":32024,"Ġtexto":32025,"ĠMichelle":32026,"Pending":32027,"(by":32028,"_PAR":32029,"ucht":32030,"bee":32031,".Thread":32032,"ampire":32033,"know":32034,"ĠClinical":32035,"ĠmarginBottom":32036,"Ġdistinguish":32037,".Full":32038,".undefined":32039,"ĠSequelize":32040,"############################################################################":32041,"Ġeducated":32042,"_OVER":32043,"åºı":32044,"ĠÂłĠÂł":32045,"_each":32046,"Ġurge":32047,"depart":32048,"Ġdonors":32049,"ĠAu":32050,"Ġbillions":32051,"Ġbelonging":32052,"_age":32053,"_Int":32054,"Ġsubstances":32055,"machine":32056,"!!!ĊĊ":32057,"Ġjsonify":32058,"ibbean":32059,"ĠCad":32060,"ĠendTime":32061,"Ġcycling":32062,"ĠUITextField":32063,"Ġleverage":32064,"Ġvanilla":32065,"eat":32066,"Launch":32067,"(pt":32068,"states":32069,"ĠControls":32070,"ĠRespons":32071,"ĠJake":32072,"Ġasleep":32073,"fortunate":32074,".nextLine":32075,"SizeMode":32076,"ìĿ¼":32077,"TestingModule":32078,"German":32079,"ĠInvestig":32080,".reverse":32081,"ĠBACK":32082,"(DateTime":32083,"Ġnonprofit":32084,"ĠExpect":32085,"Ġtanto":32086,"']),":32087,"ĉthe":32088,"Multiple":32089,"(getActivity":32090,"_WAIT":32091,"Ġjá":32092,"decor":32093,"levance":32094,"ĠGitHub":32095,"mination":32096,"_quantity":32097,".Scanner":32098,"ĠLion":32099,"éĶĻ误":32100,"Ġdre":32101,"Ġtantra":32102,"ĠcontentType":32103,"Ġfid":32104,"_alt":32105,"NSIndexPath":32106,"-pl":32107,"åĮĸ":32108,"Ġantibiot":32109,"tables":32110,"acial":32111,"ĠRegistry":32112,"Ġolive":32113,"igers":32114,"Ġsubscriber":32115,"_pres":32116,"ĠSyntax":32117,"Ġlovers":32118,".Byte":32119,"olders":32120,"_forward":32121,"always":32122,"Caption":32123,"Priv":32124,"ĠTampa":32125,"isateur":32126,"-labelledby":32127,"ĠToString":32128,"ĠìĤ¬":32129,"Ġinitiated":32130,"WF":32131,"Ġinstitutional":32132,"inject":32133,"ĠScr":32134,"Ġdoctrine":32135,"Ġspacious":32136,"isure":32137,"ĠAna":32138,"\"time":32139,"essaging":32140,"Ġcid":32141,"ĠNan":32142,"Ġincomplete":32143,"TAG":32144,"-build":32145,"December":32146,"Ġresidual":32147,"(PDO":32148,"ĠListen":32149,"Ġglyph":32150,"Ġgaps":32151,"nea":32152,".Rect":32153,"Ġsau":32154,"ĠPhotograph":32155,"Ġexecutable":32156,"ĠExpert":32157,"Coroutine":32158,"_sizes":32159,"ĠNL":32160,".isValid":32161,");}Ċ":32162,"-reg":32163,"Ġciting":32164,"cwd":32165,"ĠOttawa":32166,"ĠBatt":32167,"Ġrenewable":32168,"Ġpreliminary":32169,"Ġasylum":32170,"Ġwrist":32171,"Ġutiliz":32172,"Ġdetention":32173,"Fast":32174,"Ġange":32175,"incinnati":32176,"Ġsteering":32177,"ĠNaN":32178,"iosity":32179,"/page":32180,"Ġè¿":32181,"sterol":32182,"Ġdisg":32183,"(DB":32184,"ĠDESCRIPTION":32185,"Ġ_$":32186,"Ġobstacle":32187,"Ġbizarre":32188,"Ġextraction":32189,"_expected":32190,"Ġloses":32191,"ĠCelebr":32192,"ĠhtmlFor":32193,"Ġexploit":32194,"олÑĮзов":32195,"XYZ":32196,"Ġmagnet":32197,"amped":32198,"Ġatoms":32199,"Sources":32200,"pectives":32201,"Ñģли":32202,"Ġ=čĊ":32203,"Ġdare":32204,"ĠWalter":32205,"Ġbrightness":32206,"Ġannotations":32207,"ëı":32208,"iske":32209,"Schedule":32210,".images":32211,"rosso":32212,"Ġ\"..":32213,"gamma":32214,"Ġinstructor":32215,"Ġoverwrite":32216,"-am":32217,"Ġdevastating":32218,"ĠSaints":32219,"Ġhs":32220,"Ġbonuses":32221,"$output":32222,"ijd":32223,"(ActionEvent":32224,"monitor":32225,"Ġmattress":32226,"January":32227,".jp":32228,"Ġcaracter":32229,"Ġimpose":32230,"_rest":32231,"ĠSignature":32232,"Ġcoronavirus":32233,"ãģĬ":32234,"_compare":32235,"Measure":32236,"itated":32237,"elijk":32238,"igos":32239,"esar":32240,"Ġrushed":32241,"metry":32242,"_SEPARATOR":32243,"_WE":32244,"_ATTRIBUTE":32245,"Ġyaml":32246,"Ġspecs":32247,"ĠRah":32248,"pheric":32249,"ĠInvestment":32250,"äll":32251,"Ġappealing":32252,"Ġviewport":32253,"ç©":32254,"ĠmarginLeft":32255,"Ġsubtract":32256,"ĠEDIT":32257,"ĉArrayList":32258,"grading":32259,"ĠFailure":32260,"asper":32261,"EEK":32262,"(now":32263,")Ċ":32279,"Collision":32280,"ĠGreater":32281,"ĠRacing":32282,"alan":32283,"Ġmonetary":32284,",new":32285,"ĠSorry":32286,".Enable":32287,"ĠInstantiate":32288,"ollen":32289,"ë©´":32290,"ĠCalling":32291,"_hour":32292,"ADA":32293,"Ġshy":32294,")**":32295,"Ġ==>":32296,"Ġespecial":32297,"Ġinterpreted":32298,"!=\"":32299,"Ġpharmacy":32300,".single":32301,"ĠCialis":32302,"Ġparas":32303,".toUpperCase":32304,"ĠDemon":32305,"Prime":32306,"Ġrankings":32307,"Adding":32308,"_HASH":32309,"ĠExam":32310,"Ú©":32311,"ĠVictor":32312,"Okay":32313,"\"];čĊ":32314,"Ġfortune":32315,"ĠFETCH":32316,"expand":32317,".Interop":32318,"Ġbarn":32319,"æ¶Ī":32320,"uevo":32321,"Ġspeculation":32322,"âĶĢâĶĢâĶĢâĶĢ":32323,"ĠNu":32324,"ĠBlues":32325,"(fname":32326,"Ġinhabit":32327,"Ġ\\\"%":32328,"CES":32329,"ulario":32330,"_cr":32331,"Ġvalidated":32332,"Ġmidnight":32333,"anking":32334,"Ġincorporate":32335,"Ġpursuit":32336,"EXP":32337,"prime":32338,"Pid":32339,"-US":32340,"ĠNurs":32341,"ĠWheel":32342,"éĺ":32343,"Ġinp":32344,"Ġsupportive":32345,".member":32346,"ĠShot":32347,".CheckBox":32348,"Ġaffirm":32349,"Tor":32350,"FullYear":32351,"Ġconsiderably":32352,"credentials":32353,"_opts":32354,"Roll":32355,"(round":32356,"Ġcoment":32357,"_UART":32358,"Ġextending":32359,"RG":32360,"resultado":32361,"itu":32362,".getSession":32363,"Ġattraction":32364,"&D":32365,"$html":32366,"ĠJessica":32367,"ĠAssociate":32368,"añ":32369,"_ed":32370,"ĠLag":32371,"Ġorigins":32372,"())->":32373,"addEventListener":32374,"IALOG":32375,"åIJ¦":32376,".Compare":32377,"Album":32378,"ĠKu":32379,"\";ĊĊ":32423,"quisite":32424,"channels":32425,"/res":32426,"ĠAnalytics":32427,".appcompat":32428,"/to":32429,"ĠonError":32430,"(attr":32431,"IRM":32432,"Ġragaz":32433,"-as":32434,".Second":32435,"oriented":32436,"Ġdonn":32437,"Ġlightning":32438,"fid":32439,"ĠPle":32440,"ãģ¾ãģĻ":32441,"tro":32442,".True":32443,"Observable":32444,"×Ļ":32445,"umbing":32446,"Ġprospective":32447,"-filter":32448,"Ġpursuant":32449,"(points":32450,".Bind":32451,"Ġpalm":32452,"clearfix":32453,"ös":32454,"ĠGonz":32455,"Ġweaken":32456,"Drive":32457,"enido":32458,"lld":32459,"obox":32460,"anean":32461,"Got":32462,"ä¿Ŀ":32463,"Regex":32464,"æĥ":32465,"Ġsalad":32466,"assis":32467,"\"net":32468,"inheritDoc":32469,"ĠRV":32470,"quier":32471,"Ġclazz":32472,"Ä±ÅŁ":32473,"osterone":32474,"Ġairline":32475,".listdir":32476,"Ġdownloading":32477,"ĠPalm":32478,"waukee":32479,"<":32480,".BL":32481,"_INLINE":32482,"offs":32483,"<<(":32484,"_news":32485,"Ġchase":32486,"/><":32487,"Ġeuros":32488,"ĠEgyptian":32489,"ĠStainless":32490,"_BOOL":32491,"ĠGuild":32492,"ĠDynam":32493,"[indexPath":32494,"Ġï":32495,"Ġmemorable":32496,"ĠChampion":32497,"ResourceManager":32498,".Login":32499,"ĠFormer":32500,"yped":32501,"Ġlleg":32502,";\",":32503,"DWORD":32504,"Ġtaxi":32505,"Ġbombs":32506,"rah":32507,".tags":32508,"_tests":32509,"stones":32510,"âĢĿ)":32511,"[g":32512,"rtype":32513,"Ġvu":32514,"Ġhostile":32515,"Chars":32516,"ĠPatriots":32517,"/status":32518,"());Ċ":32872,"ajÄħ":32873,"_OCC":32874,"Ġplanets":32875,"æŁ¥":32876,"ĠDublin":32877,"Ġserie":32878,".printf":32879,"deep":32880,"`)":32881,"Ġ\\$":32882,"Ġμ":32883,"_VIDEO":32884,"endors":32885,"ĠCrypto":32886,"Far":32887,".Transparent":32888,".TR":32889,"iasm":32890,"_training":32891,"Ġteaches":32892,"ĠBelt":32893,"Ġlimiting":32894,"ĠKath":32895,"ĠIndexPath":32896,"Ġachievements":32897,"Ġserá":32898,"interopRequire":32899,"Ġdisse":32900,".If":32901,"arming":32902,"ulsion":32903,"Po":32904,"_DETAIL":32905,"Prototype":32906,"ĠCAL":32907,"Ġagrees":32908,".vo":32909,".ExecuteNonQuery":32910,"ĠTopic":32911,"Ġ'{}":32912,"Arm":32913,"Ġecc":32914,"Mag":32915,"Ġserialized":32916,"ĉconn":32917,"cached":32918,"=tf":32919,"ĠByteArray":32920,"protobuf":32921,"varchar":32922,"ĉASSERT":32923,"Ġliste":32924,"_trigger":32925,"·¸":32926,"Feel":32927,"Tahoma":32928,"ĠLik":32929,"Ġstructured":32930,"ergus":32931,".Initial":32932,"_ge":32933,"cljs":32934,".contact":32935,"Ġandere":32936,"$stmt":32937,"_CURRENT":32938,"ĠDiscover":32939,"$res":32940,"formatter":32941,"Ha":32942,"vangst":32943,"Ġemerge":32944,"ãĢĤâĢĿ":32945,"ĠCabinet":32946,"-square":32947,"éĥ¨":32948,"Ġrage":32949,"ĠAJ":32950,"ĠVT":32951,"shadow":32952,"ĠFaith":32953,"enames":32954,"pretty":32955,"hasil":32956,"party":32957,"Ġvarchar":32958,"Ġfotos":32959,"Ġalum":32960,"ĠBelgium":32961,".ylabel":32962,"Ġdej":32963,"_numbers":32964,"Ġhu":32965,".setAdapter":32966,"ĠUsually":32967,"(sample":32968,".Shared":32969,"Ġbooked":32970,"Ġ>>=":32971,"Ġminerals":32972,"\">":32991,"prog":32992,"boo":32993,"_md":32994,"_pack":32995,"(express":32996,"utz":32997,"\\Auth":32998,",id":32999,"ĠChile":33000,"actice":33001,"Ġrecruitment":33002,"Ġposes":33003,"Ġvulnerability":33004,"instanc":33005,"orum":33006,"dess":33007,"Ġxl":33008,"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%":33009,"(fig":33010,"Ġdeleting":33011,".del":33012,")')Ċ":33013,"ĠWeekly":33014,"???":33015,"(strcmp":33016,"smith":33017,"Ġpursuing":33018,"-so":33019,"ĠApps":33020,"/'Ċ":33021,"Ġdecis":33022,"FORE":33023,"Everyone":33024,"Ġlanes":33025,"Virtual":33026,".attach":33027,"(Log":33028,"ĠMedicaid":33029,"(Path":33030,"ĠTurner":33031,"/application":33032,"Ġportrait":33033,"Ġoppose":33034,"checkout":33035,"Ġfinishes":33036,"_ME":33037,"Barrier":33038,"Song":33039,"VAR":33040,"Earlier":33041,"rella":33042,"Ġhast":33043,"azar":33044,"Ġpulls":33045,"ngx":33046,"Ġinspiring":33047,"ÑĥÑİ":33048,"-direction":33049,"Ġexplosive":33050,"ĠcreatedAt":33051,"sto":33052,"Ġwheat":33053,"ĠBuilt":33054,"'ai":33055,"Ġtracked":33056,"hammad":33057,"RowAtIndexPath":33058,"_heap":33059,"Due":33060,"Ġconnects":33061,".publish":33062,"emu":33063,"Ġbullets":33064,"BAR":33065,"olate":33066,"Ġinternally":33067,"Ġcatching":33068,"-password":33069,"ouched":33070,"æĢ§":33071,"eous":33072,"Ġxrange":33073,"Quality":33074,"vv":33075,"Manage":33076,"(($":33077,"acements":33078,"ĠBrothers":33079,"ĠHEAD":33080,"ĠUnsupported":33081,"san":33082,"esi":33083,"***Ċ":33084,"Ġadaptation":33085,"ĠWorker":33086,"']/":33087,".savefig":33088,"(trans":33089,"ج":33090,"nee":33091,"Correct":33092,"...\")Ċ":33093,"Ġsubmitting":33094,"-path":33095,"ĉlast":33096,"issan":33097,".xlabel":33098,"ĠSepar":33099,"/no":33100,"_best":33101,"ĠMills":33102,"_sock":33103,"(flag":33104,"Ġdestinations":33105,"emption":33106,"ĠFAIL":33107,"åĴĮ":33108,"Ġrp":33109,"fact":33110,"ĉlen":33111,"DAY":33112,"Ġseiz":33113,"_dst":33114,"lip":33115,".Linear":33116,"ĠBasket":33117,"$t":33118,"$i":33119,"-brand":33120,"ĠNeil":33121,"ĠEq":33122,"Ġthou":33123,"ogene":33124,"Ġscholarship":33125,"æĽ´":33126,"Ġswo":33127,"aginator":33128,"eni":33129,"(book":33130,"Ġblink":33131,"thus":33132,"ĠcancellationToken":33133,"ĠPalestinians":33134,"Ġprofitable":33135,"Ġbackpack":33136,"enson":33137,"true":33284,"ĠNYC":33285,"Ġbored":33286,"ĠDetect":33287,"Ġappar":33288,"Ġjeans":33289,"ĠTak":33290,"IOD":33291,"ĠHorse":33292,"(FILE":33293,"(?":33294,"rique":33295,"optimizer":33296,"nat":33297,"loys":33298,"ĉToken":33299,"oubted":33300,"uess":33301,"ocoa":33302,"DataMember":33303,"_POWER":33304,"classList":33305,"PushButton":33306,"ĠWiFi":33307,".Stream":33308,".guild":33309,"Ġnog":33310,"ĠPortugal":33311,"ĠUnter":33312,"Primitive":33313,"boss":33314,"ĠDeutsch":33315,"Ġerotic":33316,"Ġstrconv":33317,".TryParse":33318,"Ġgrams":33319,".Success":33320,"_pk":33321,"ĠHarvey":33322,"-minded":33323,".country":33324,"[]\"":33325,"Ġangel":33326,"Ġbeats":33327,"ĠVor":33328,"ilio":33329,".master":33330,"something":33331,"ĠPACK":33332,"(if":33333,"RequestBody":33334,"Ġantes":33335,"/widget":33336,"Ġmodo":33337,"ĠAW":33338,"finder":33339,"Ġoptimized":33340,"Ġmissiles":33341,"NB":33342,"ĉinternal":33343,"tex":33344,"ĠSri":33345,"Ġdamaging":33346,"ĠMais":33347,"-Allow":33348,"ĠZh":33349,"-alt":33350,"Ġ));ĊĊ":33351,"èī":33352,"Ġinfluences":33353,"Ġcatal":33354,"_REGISTER":33355,"ĠAPIs":33356,"-century":33357,"Ġbiology":33358,"ĠActual":33359,"Ġheels":33360,"TRACE":33361,"_DIG":33362,"Dataset":33363,"ĠMatter":33364,"Ġclassifier":33365,".wikipedia":33366,"ĠRogers":33367,"Ġdonated":33368,"rawler":33369,"enen":33370,"Ġcasinos":33371,"ortal":33372,"Ġprive":33373,"spe":33374,"ducers":33375,".ep":33376,"Ġgrasp":33377,"acji":33378,"Ġdairy":33379,"Ġbuses":33380,".comm":33381,".ins":33382,"ĠIRS":33383,"ĠBeer":33384,"adc":33385,"oard":33386,"_MET":33387,"Ġ'+'":33388,"rans":33389,"Ġkinda":33390,"ĠâĶĤ":33391,"ĠMaur":33392,"аг":33393,"Ġbandwidth":33394,"ibus":33395,"ĠDifferent":33396,"(mat":33397,"ĠResume":33398,"_UNS":33399,"establish":33400,"Ġfonction":33401,"Subscription":33402,"_company":33403,"Ġlightly":33404,".confirm":33405,".yaml":33406,"ĠBoost":33407,"Commerce":33408,"-template":33409,"_DELAY":33410,"ĠHI":33411,"Ġnavig":33412,"(Sender":33413,"ĠHS":33414,"_\"+":33415,"ĠREQUEST":33416,"Ġwifi":33417,"=\"\"Ċ":33418,"])->":33419,"Ġrope":33420,"Ġviolated":33421,"Ġglance":33422,"ĠKurd":33423,"Ġè®":33424,"deck":33425,"ĠISBN":33426,"Ġinfect":33427,"ĠFoo":33428,"Ġgetter":33429,"Ġtener":33430,"appe":33431,".hh":33432,"_hot":33433,"\".$":33643,"Ġrelies":33644,"(Console":33645,"International":33646,"->{$":33647,"Mid":33648,"Ġdissert":33649,"dds":33650,"Ġdeposits":33651,"ĉdriver":33652,"#ga":33653,"prising":33654,"println":33655,"Ġpresenter":33656,"Ġmines":33657,"CSS":33658,"ĠDual":33659,"(!(":33660,"Ġkam":33661,"ĠisLoading":33662,"ĠProtect":33663,".upper":33664,"arium":33665,"]:ĊĊĊ":33666,"Yii":33667,"-shirt":33668,"ĠIMAGE":33669,"_colors":33670,"Ġurgent":33671,".Container":33672,"!(Ċ":33673,"Saturday":33674,"Ġsocieties":33675,"ĠThan":33676,"ĠCod":33677,"=@":33678,"Ġattachments":33679,".mobile":33680,"Ġspite":33681,"Ġbounce":33682,"rawl":33683,"instancetype":33684,"ĠTruck":33685,"Ġmanipulation":33686,"(Config":33687,"-inst":33688,"Ġstor":33689,"itution":33690,"PreferredGap":33691,"ĠmainAxisAlignment":33692,"Ġlistened":33693,"'''ĊĊ":33694,"ottage":33695,"-project":33696,".APPLICATION":33697,"ĉroot":33698,"Ġwhit":33699,"Ġbilder":33700,"Ġker":33701,"Ġappliances":33702,"rowave":33703,"ìĿĢ":33704,"ematics":33705,"ĠOrg":33706,"oping":33707,"_SEARCH":33708,"Ġcham":33709,"addContainerGap":33710,"Ġ().":33711,"ĠArrow":33712,"Illegal":33713,"Currently":33714,"Ġusa":33715,"Ġpasswords":33716,"Ġrenown":33717,"avern":33718,"ĠEvil":33719,"Ġconcat":33720,"Ġduo":33721,"Ġvale":33722,"ĠBean":33723,"Ġindicators":33724,"cmath":33725,"ĠPump":33726,"November":33727,"ificant":33728,"_DOMAIN":33729,"regar":33730,"ĠPortal":33731,"\"$":33732,"Ġformerly":33733,"\"]:Ċ":33734,"ĠVisibility":33735,".getElementsByClassName":33736,"_RED":33737,"Ġchampions":33738,"à´":33739,"Valor":33740,"_es":33741,"*a":33742,"-repeat":33743,"Band":33744,".stage":33745,"Ġbureauc":33746,"Cnt":33747,"eten":33748,"-function":33749,"Ġmuito":33750,"PID":33751,"_editor":33752,"Ġcrashed":33753,"dead":33754,"kat":33755,"agh":33756,"ĠEXT":33757,"asser":33758,"-small":33759,"Ġrealiz":33760,"(Entity":33761,"ús":33762,"ĠActually":33763,"ĠElite":33764,"Ġhelm":33765,"(nonatomic":33766,"asher":33767,"Community":33768,"alleng":33769,"iry":33770,"ĠGrowth":33771,"Ġsue":33772,"Ġfrequencies":33773,"_descriptor":33774,".Attribute":33775,"Ġrecipients":33776,"_NS":33777,"/\"+":33778,"iban":33779,"Ġathlete":33780,"ĠIgn":33781,"_DMA":33782,"(ds":33783,"ĠRequirements":33784,"ADI":33785,"erez":33786,"\\Admin":33787,"braska":33788,"ĠRust":33789,"Relation":33790,"COD":33791,"ĠVERSION":33792,"emma":33793,")){":33794,".Duration":33795,"ĠCamb":33796,"-logo":33797,"Ġreadable":33798,"Ġcreators":33799,"()];Ċ":33800,"UpDown":33801,"-half":33802,".getMonth":33803,"(sf":33804,"Pic":33805,"Ġhunger":33806,".tx":33807,"Ġexceeded":33808,"_seed":33809,"(^":33810,"_sk":33811,".perform":33812,"Ġ>::":33813,"Ġmongo":33814,"=float":33815,"bindParam":33816,"Smart":33817,"ifa":33818,"Ġsecurities":33819,"Ġprejud":33820,"Ġ,\"":33821,"Ġcorps":33822,"Ġvra":33823,"amacare":33824,"iterr":33825,"(Media":33826,"uche":33827,"Ġcob":33828,"Ġliber":33829,".geometry":33830,"Locator":33831,"Ġsliding":33832,"Ġsurgical":33833,"_CUR":33834,"Ġconsect":33835,"[*":33836,"ĠResort":33837,"Stub":33838,"_DOUBLE":33839,"ĠSoph":33840,"Ġelectoral":33841,"_disable":33842,"ĠÑģо":33843,"ĠLightning":33844,"Ġmentions":33845,"ocy":33846,"Ġleaked":33847,"Ġrelaxing":33848,"Presenter":33849,"vsp":33850,"Ġguilt":33851,"=-=-":33852,".reply":33853,"ĠMirror":33854,"Camp":33855,"Ġ+#+#+#+":33856,"Ġ+#+#+#+#+#+":33857,".Author":33858,"Ġdirective":33859,"-hook":33860,"íĦ°":33861,"}ĊĊĊĊĊ":33862,"@pytest":33863,"_rand":33864,"mis":33865,"Ġcolorful":33866,"uje":33867,"lasses":33868,"ĠClasses":33869,".have":33870,"%),":33871,"é¢ĺ":33872,"Ġdisturbing":33873,"substring":33874,"ĠKoh":33875,"Invest":33876,"purchase":33877,"Ġrecycling":33878,"ĠART":33879,"ierarchy":33880,"Ġfps":33881,".checkBox":33882,"íķ´":33883,"_material":33884,"ducation":33885,"Ġfw":33886,"udit":33887,"Ġreviewing":33888,"ĠSid":33889,"Syntax":33890,"ĠWritten":33891,"argar":33892,"UME":33893,"/q":33894,"Classifier":33895,"Official":33896,"Ġjazz":33897,"Ġomega":33898,"Physics":33899,"Ġlugar":33900,"_accessor":33901,".commands":33902,"Ability":33903,"ĠBatch":33904,"RAM":33905,"Ġencounters":33906,".Qu":33907,"BYTE":33908,"ĠDistribution":33909,"Ġuso":33910,"ĠRecovery":33911,"approved":33912,"Ġdenial":33913,"/share":33914,"LinkedList":33915,")čĊčĊčĊ":33916,"uddy":33917,"Ġfines":33918,"Ġry":33919,"Unicode":33920,"ĉrender":33921,"Ġpremises":33922,"Ġpon":33923,"aliases":33924,"/Foundation":33925,"cuda":33926,"ĠCock":33927,",:)":33928,"(folder":33929,"Ġméd":33930,"drag":33931,"Ġtalents":33932,"ĠĠĠĊĊ":33933,"еÑģÑĤв":33934,"mob":33935,".yml":33936,"Ġaster":33937,"Ġdiscre":33938,"goal":33939,"ĠGTX":33940,"ĠSUCCESS":33941,"ĠLONG":33942,"(find":33943,"Ġsingular":33944,"_sz":33945,"ĠEthereum":33946,"..Ċ":33947,"Ġirres":33948,"')){Ċ":33949,"Ġministers":33950,"Steps":33951,"iversal":33952,"ĠNevertheless":33953,"-led":33954,"Ġ(%)":33955,"ç¡®":33956,"Ġtimezone":33957,"Ġstranger":33958,"(render":33959,"Ġshutil":33960,"Ġmph":33961,"Ġtrio":33962,"ppy":33963,"Ġpredomin":33964,"Ġendors":33965,"ĠRussians":33966,"ĉrow":33967,"Ġwizard":33968,".serialize":33969,"Ġcomplained":33970,"Ġsido":33971,"Ġdelighted":33972,"-me":33973,"ĠRav":33974,"Human":33975,"adays":33976,"recv":33977,"Working":33978,"Jump":33979,"ĠÃ¥r":33980,"ĠAutomatic":33981,"_Base":33982,"æł¼":33983,"aurants":33984,"¯":33985,"æ¸":33986,"(CType":33987,"IFI":33988,"(amount":33989,"Ġbelieving":33990,"=mysql":33991,"Ġfir":33992,"Ġrestoration":33993,"ereco":33994,"Т":33995,"_'+":33996,"Ġebook":33997,"Ġdebris":33998,"(inputs":33999,"AYOUT":34000,"Ġscreaming":34001,"avia":34002,"lander":34003,"Ġdistress":34004,"Ġassembled":34005,"ĠAvoid":34006,"(thread":34007,"ĠRPC":34008,"_EXIT":34009,"(queue":34010,"иÑģÑĤ":34011,"Dll":34012,"Ġskull":34013,"_pub":34014,"chez":34015,"minate":34016,"ensen":34017,"Ġinsane":34018,"bounds":34019,"ĠRosen":34020,"Ġconditioning":34021,"processed":34022,"videos":34023,"four":34024,".Conv":34025,"|;Ċ":34026,"Personal":34027,"cerpt":34028,":UIControlStateNormal":34029,"Ġdoses":34030,"ĠKarl":34031,"ĠFrequ":34032,".BASE":34033,"ĠVote":34034,"Ġconcurrent":34035,"ĠMessageBoxIcon":34036,"ĠÃĸ":34037,"ĠDubai":34038,"ĠRetail":34039,":number":34040,"ĠObserver":34041,"ĠBigInteger":34042,"_origin":34043,"_WORK":34044,"Frames":34045,"Ġnotably":34046,".âĢľ":34047,"Ġtropical":34048,"Ġniche":34049,"amina":34050,".sys":34051,"(tokens":34052,"modify":34053,"osit":34054,"strom":34055,"ĠComics":34056,"OPTION":34057,"Ticket":34058,"Ġfactories":34059,"Ġdisput":34060,"_File":34061,"ĠFinn":34062,"eee":34063,"ĠDiscord":34064,"_money":34065,".tpl":34066,"_safe":34067,"LB":34068,"Ġglut":34069,"JK":34070,".flow":34071,"-cont":34072,"gos":34073,"Ġhorizon":34074,"ĠRush":34075,"::*":34076,"Pipe":34077,"ulla":34078,"borough":34079,"heimer":34080,"(move":34081,"(Text":34082,"});čĊčĊ":34083,"welcome":34084,"ĠComponents":34085,"Ġgovernance":34086,"closed":34087,"ĉmargin":34088,"Ġlaundry":34089,"ĠTerminal":34090,"izards":34091,".âĢĶ":34092,".remote":34093,".radius":34094,"ĠQuebec":34095,"Ġdh":34096,"Tech":34097,"ĠMist":34098,"seller":34099,"_literal":34100,"Ġgenius":34101,"Ġbrains":34102,"gem":34103,"ĠMeasure":34104,"Ġcatast":34105,"rance":34106,".TextField":34107,"Ġconsuming":34108,"Ġ'\\''":34109,"oubtedly":34110,"ĠCertain":34111,"Ev":34112,"erti":34113,"being":34114,"Experience":34115,"Ġ//[":34116,"ĠArabic":34117,"ĠCrist":34118,"ĠAzure":34119,"Ġhora":34120,"ladesh":34121,"\\Blueprint":34122,"dar":34123,".rel":34124,"Ġsuprem":34125,"ĠReagan":34126,"ĠAttributes":34127,"-sidebar":34128,"ĠuseStyles":34129,"ĠAirlines":34130,"Ġhills":34131,"/xhtml":34132,"vinc":34133,"_mock":34134,"ĊĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ":34135,"ĠPill":34136,".LayoutStyle":34137,"ĠCommander":34138,"]<":34139,"signature":34140,"Ġ{}čĊ":34141,"Ġhatred":34142,"Ġëĭ":34143,"olesterol":34144,"Ġ********":34145,"ancellor":34146,"crop":34147,"TIM":34148,"ĉĉĊĊ":34149,"ysqli":34150,"uitive":34151,"ĉunset":34152,"_sel":34153,"Ġmenus":34154,"tick":34155,"Ġconstitute":34156,"ĠElements":34157,"ĠRedis":34158,"aggio":34159,"_fp":34160,"_depend":34161,"emas":34162,"CAST":34163,"orange":34164,"jon":34165,"ĠEmily":34166,"Ġpotatoes":34167,"Ġreceptor":34168,"ĠElectronic":34169,"ĠLights":34170,"Ġcombining":34171,"ĠSomeone":34172,"Ġ########.":34173,"ĠTOD":34174,"/show":34175,"Xd":34176,".\"'":34177,"afx":34178,"Ġtragic":34179,"Styled":34180,"ĠMarco":34181,"Gallery":34182,"dale":34183,".âĢĿĊĊĊĊ":34184,"érie":34185,"/service":34186,"äºĨ":34187,"Ġambient":34188,"_SETTINGS":34189,".Adapter":34190,"lene":34191,"Ġtravels":34192,"Notice":34193,"Ġcleans":34194,"ĠFem":34195,"chair":34196,"Ñĥн":34197,"/my":34198,"_bad":34199,"ĠEconomics":34200,"ISA":34201,"_CNT":34202,"(Menu":34203,"äºİ":34204,"ĠRidge":34205,"Ġlengthy":34206,"Dot":34207,"Ġjumps":34208,"Ġhey":34209,"$pdf":34210,"Ġworm":34211,"Ġsut":34212,"Ġsher":34213,"iamo":34214,"ĠCalc":34215,"trieve":34216,"Ġcops":34217,"ĠChrom":34218,"Ġregulated":34219,"reatment":34220,"ĠHigher":34221,"oks":34222,"Ġdeze":34223,"LOCATION":34224,"ongsTo":34225,"Ġfinite":34226,"Ġvaries":34227,"Ġpositioned":34228,"'il":34229,"éĩij":34230,"Ġhike":34231,"(done":34232,"playlist":34233,"Ġada":34234,"Ġcoastal":34235,"ĠNancy":34236,".DateTimeField":34237,"CppCodeGen":34238,"ĠSimilarly":34239,"reur":34240,"ĠContr":34241,"ĠHidden":34242,"ĠBeta":34243,"atched":34244,"_install":34245,".Output":34246,"Lookup":34247,"ĠRichmond":34248,"quared":34249,"Ġmanga":34250,"-controls":34251,"ĠBernard":34252,"Large":34253,"Ġslices":34254,"Ġoffence":34255,"ĠMega":34256,"Ġestar":34257,"Ġjoints":34258,"Ġsumm":34259,"_platform":34260,"Buff":34261,".addSubview":34262,"Ġretained":34263,"Letter":34264,".dim":34265,"Ġessere":34266,"ĠScaffold":34267,"EXPECT":34268,"ĉRE":34269,".longitude":34270,"ünd":34271,"Ġstatue":34272,".addWidget":34273,"ĠCaribbean":34274,"addPreferredGap":34275,"ilde":34276,"UILabel":34277,"ĠOpport":34278,"Ġimperial":34279,"ursion":34280,"Ġmandate":34281,"Ġpromotional":34282,"Ġvk":34283,"iaÅĤ":34284,"Ġpyl":34285,"ĠCreation":34286,"озд":34287,"Ġsimpler":34288,".what":34289,"ĠRecent":34290,"Storm":34291,".quantity":34292,"ĠLov":34293,"\"-":34294,"ubbles":34295,"_notification":34296,"(world":34297,"urger":34298,"*(-":34299,":\"Ċ":34300,"hm":34301,"anship":34302,"ĠAlmost":34303,"Ġmotorcycle":34304,"_fee":34305,"Ġabsorb":34306,"ĠVincent":34307,"Ġsounded":34308,"ÃŃst":34309,"Ġpharmaceutical":34310,"htag":34311,"ĠKindle":34312,"italize":34313,"ĠEmperor":34314,"oustic":34315,"Ġspecialists":34316,"åħ¬":34317,"BorderStyle":34318,"/\\":34319,"RELATED":34320,"(',',":34321,"(expr":34322,"Ġht":34323,"åįĪ":34324,"_Create":34325,"Ġspecially":34326,"Ġ[];čĊ":34327,"Ġheel":34328,"Ġsept":34329,"_arch":34330,"(initial":34331,"%.ĊĊ":34332,"\\\",\\\"":34333,"Ġdiscusses":34334,"Ġupt":34335,"Ġ[&":34336,"Ġmanus":34337,".hand":34338,"ĠMAIN":34339,"ĠDenmark":34340,"Ġ],čĊ":34341,"Ġcryst":34342,"Ġnack":34343,"Coords":34344,"_inner":34345,"Ġmidst":34346,"Ġawake":34347,"ĠÐŀ":34348,"-break":34349,"ÃŃvel":34350,"_PASS":34351,"ĠParams":34352,"Ġdetr":34353,"Ġspider":34354,"ĠConcept":34355,"Ġprend":34356,"CHED":34357,".Exit":34358,"Ġpopulated":34359,"Ġvirtue":34360,"_SESSION":34361,"Ġnouvel":34362,"oauth":34363,"ĠданнÑĭ":34364,"rink":34365,".HeaderText":34366,"aturated":34367,"Ġerst":34368,"Ġåħ":34369,"à¥ĩ":34370,"_visible":34371,"eyer":34372,"Ġliable":34373,"Ġdebe":34374,"Ġbw":34375,"{-#":34376,"_WIN":34377,"dfs":34378,"Hover":34379,"ĠPUT":34380,"-angle":34381,"Ġnoble":34382,"Ġtraces":34383,"encv":34384,"ĠuserData":34385,"_ins":34386,"ĠSuz":34387,"Ġnewsletters":34388,"ĠModi":34389,"Ġentrepreneurs":34390,"Ġtribute":34391,"Ġrumors":34392,"Ġrr":34393,"ĠQuarter":34394,"ê³ł":34395,"Ġfeeds":34396,"óg":34397,"Ġenvelope":34398,"Ġlear":34399,"Ġkø":34400,"developer":34401,"Similar":34402,":\")Ċ":34403,"subscription":34404,"Modifier":34405,"italic":34406,"Ġnasty":34407,"Ġtermination":34408,"Ġcharming":34409,"ĠâŁ":34410,"tons":34411,".trace":34412,"hots":34413,"ĠUR":34414,"Mont":34415,"Ġjustified":34416,"ĠGang":34417,"inea":34418,"Ġbog":34419,"(ap":34420,"_$":34421,"Ġcontamin":34422,".Dot":34423,"ĉDebug":34424,"(exports":34425,"Ġpaired":34426,"ĠAssignment":34427,"Ġautomobile":34428,"ĵį":34429,"Ġphases":34430,"vw":34431,"@SuppressWarnings":34432,"=\\":34433,"rant":34434,"-ed":34435,"ĉawait":34436,"Ġcertificates":34437,"'>\"":34438,"Ġintact":34439,"CTRL":34440,"Mike":34441,"gregation":34442,"ATTERN":34443,"Ġrepublic":34444,"_upper":34445,"iliary":34446,"Ġcomputation":34447,"hire":34448,"ĠShin":34449,"_ANY":34450,"ĠManufacturer":34451,"ĠCarm":34452,"Ġbearings":34453,"_comb":34454,"cad":34455,"uristic":34456,"Ġwholesale":34457,"Ġdonor":34458,".interfaces":34459,"presso":34460,"ĠBrun":34461,"-close":34462,"prove":34463,"_SK":34464,"ĉframe":34465,"etros":34466,"ĠPain":34467,"_EXP":34468,"ĠLT":34469,"_fs":34470,".datas":34471,"ĉss":34472,"voir":34473,"ĠAxis":34474,"Major":34475,"=\"<":34476,"[h":34477,"Ġprofess":34478,"igrate":34479,"(score":34480,"Keyword":34481,"\"os":34482,"ĠĠĠĠĉĊ":34483,"analysis":34484,"Ġreplay":34485,".pass":34486,"\\d":34487,"tls":34488,"Ġsanct":34489,".light":34490,"_mobile":34491,"ÑģÑĤÑĮ":34492,"ĉtotal":34493,"uity":34494,"Ġpaused":34495,"NAS":34496,"Ġencore":34497,"loe":34498,"Ġ-*-ĊĊ":34499,".high":34500,"ampler":34501,"ĠSecure":34502,"Ġfragments":34503,"_vel":34504,"illary":34505,"ĠStein":34506,"ĠDawn":34507,"Ġmaximize":34508,"ย":34509,"Ġ/^":34510,"Ġcontinually":34511,"Ġshadows":34512,"ĉĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":34513,"ĠIActionResult":34514,"Ġinformación":34515,"CHECK":34516,".SelectedItem":34517,"bundle":34518,"olley":34519,"<":34681,"Ġtrajectory":34682,"_ring":34683,"Ġhydrogen":34684,"tron":34685,"Ġstatute":34686,"Ġconditional":34687,"Ġtray":34688,"-school":34689,"(widget":34690,"$config":34691,"Ġrequesting":34692,".uint":34693,"eton":34694,"brities":34695,"OfType":34696,"ADMIN":34697,"predict":34698,"Ġgegen":34699,"ĠHapp":34700,"OCUMENT":34701,"ĠApart":34702,"Ġ-----":34703,"roe":34704,"uide":34705,"justify":34706,"ĠSquad":34707,"Ġprofes":34708,".bot":34709,"_currency":34710,"innen":34711,"ĠMumbai":34712,"ĠNumbers":34713,"avanaugh":34714,"agnitude":34715,"âĢľThere":34716,"=http":34717,"çīĩ":34718,"Ġvb":34719,"+'{{$":34802,"Ġinode":34803,"sil":34804,"Ġhace":34805,"Ġseverely":34806,"ĠOverview":34807,"Ġspraw":34808,"Ġbeaches":34809,":left":34810,"·»":34811,"(${":34812,"ĠFIRST":34813,"ĠSpa":34814,"-ass":34815,"Ġbaise":34816,"ĠNODE":34817,"ĠPizza":34818,"Pet":34819,"(seq":34820,"\\\">Ċ":34821,"CppMethodPointer":34822,"Ġvp":34823,"Ġia":34824,"_seconds":34825,"emet":34826,"/blob":34827,"_THRESH":34828,"...čĊ":34829,"Dest":34830,"ĠNH":34831,".dataSource":34832,"ités":34833,"ĠJak":34834,"sell":34835,"Ġworkshops":34836,"\",Ċ":35452,"_Pin":35453,"uese":35454,"Ġoverrides":35455,"_ready":35456,"Advanced":35457,"Ġopi":35458,"-cart":35459,"(\"/\",":35460,"ĠDeb":35461,"CRY":35462,"ĠVertical":35463,"ĠOVER":35464,"ĠCorporate":35465,"Ġ\"\";":35466,"Ġstepping":35467,"ej":35468,"Ġaccusations":35469,"Ġoraz":35470,"_tail":35471,"Ġinduced":35472,"Ġelastic":35473,"Ġblown":35474,",//":35475,"Ġbackgrounds":35476,"âĢĻune":35477,"-sdk":35478,"ĠsetInterval":35479,"Ġincentives":35480,"Ġvegetable":35481,"_On":35482,"expanded":35483,"pix":35484,"_shader":35485,"ĠSPDX":35486,"@example":35487,"ĠWrapper":35488,".Zero":35489,"Positive":35490,"Ġspinner":35491,"Ġinvented":35492,"ĠGates":35493,"оÑĤоÑĢ":35494,"Ġcomparisons":35495,"è·":35496,".primary":35497,"dataProvider":35498,"additional":35499,"ĉoptions":35500,"snapshot":35501,".setHorizontal":35502,"Ġ\"{}":35503,"ĠFisher":35504,"halten":35505,"":35538,"ĠRegistered":35539,"INED":35540,"kal":35541,"parison":35542,"Ġobjeto":35543,"Vi":35544,"manda":35545,"Ġrenewed":35546,"ĠSof":35547,"essel":35548,".ndarray":35549,"Ġcrap":35550,"管":35551,".abspath":35552,"(up":35553,"Ġclearance":35554,"ĠTW":35555,"_COPY":35556,"ĠĠĠĠĠĠĠĠĠĠĠĠĉ":35557,"Ġforests":35558,"Ġarguably":35559,"ĠASS":35560,"hey":35561,"amel":35562,"_fore":35563,"ĠSoutheast":35564,"Ġabused":35565,"Ġpracticing":35566,"akedirs":35567,"主":35568,"_resources":35569,"Ġpond":35570,".Fixed":35571,"LastError":35572,"ĠPsychology":35573,"Ġ\"//":35574,"!:":35575,"Reusable":35576,"Ġmensaje":35577,"Ġrospy":35578,"Ġbour":35579,"Ġvarieties":35580,"Ġempath":35581,"(({":35582,"_org":35583,"ĠMes":35584,"ĠMagento":35585,"ISTORY":35586,"Unless":35587,"Ġhj":35588,"ĠDuty":35589,"Jun":35590,",size":35591,"Ġpaintings":35592,"Ġdispens":35593,"dart":35594,"Ġbehavioral":35595,"Ġrpc":35596,"calculate":35597,"fruit":35598,"_mm":35599,"ĉpthread":35600,"MaxLength":35601,"Ġcurrencies":35602,"_capacity":35603,"ĠOz":35604,"Ġfirearm":35605,"Ġcoefficient":35606,"Ġbankruptcy":35607,"wart":35608,"Ġfatigue":35609,"AVA":35610,"Ġespa":35611,"_pc":35612,"ĠQuotes":35613,"_LIGHT":35614,"ĠTickets":35615,"Ġrelates":35616,"Ġpublishers":35617,"Ġunlocked":35618,"Ġ//----------------------------------------------------------------":35619,"ĠInterruptedException":35620,"Ġoutlook":35621,"rn":35622,"Ġrebels":35623,"Written":35624,"Ġasian":35625,"otto":35626,"Ġĉĉĉĉ":35627,"_gpu":35628,"Txt":35629,".ImageView":35630,"Ġsuis":35631,"_tables":35632,".RecyclerView":35633,"Ġwhatsoever":35634,"èģ":35635,"]++;Ċ":35636,"assertTrue":35637,"_verify":35638,"ĠRivers":35639,"Ġ][":35640,"Jet":35641,"idian":35642,"Sibling":35643,"Ġgenres":35644,".Access":35645,"OPS":35646,"Ġtrivial":35647,"ส":35648,"alen":35649,"вед":35650,"ĠSword":35651,"Ġscrutiny":35652,"(cb":35653,"Ġcommerce":35654,"Ġguarantees":35655,"_adv":35656,"ĠLET":35657,"recio":35658,"Ġhilar":35659,"Ġbackyard":35660,"ãĢı":35661,"Ġillustrated":35662,"/vendor":35663,".Util":35664,"Ġwow":35665,"LOY":35666,"ĠMarshal":35667,"\">'.$":35668,"ĠBak":35669,"Ġmodifiers":35670,"dictionary":35671,"ĠStre":35672,"multiple":35673,"\")),":35674,"ĠCort":35675,"']\").":35676,"(admin":35677,"ĠCreator":35678,"Internet":35679,"(ms":35680,"logy":35681,"DECLARE":35682,"ĠMarcus":35683,"<<<<":35684,"ãģł":35685,"_my":35686,"(inst":35687,"Ġsciences":35688,"NDER":35689,".enter":35690,"Ġitu":35691,"Ġbehave":35692,"Pan":35693,"ombies":35694,"='<":35695,"'));čĊ":35696,"ĠMENU":35697,"ĠWorkers":35698,".NoError":35699,"Ġbindings":35700,"Ġdisabilities":35701,"{\\":35702,"ĠMunicip":35703,"Ġcores":35704,"urple":35705,"ĠNokia":35706,"usions":35707,"ĠFitness":35708,".handleChange":35709,"Ġjavascript":35710,"ìļĶ":35711,"(dec":35712,"Ġpacking":35713,"-depend":35714,"Ġtranscript":35715,"zeros":35716,"_alert":35717,"?\",Ċ":35718,"libs":35719,"±Ð¾ÑĤ":35720,"Ġ|ĊĊ":35721,"trained":35722,"ĠGent":35723,"ĠRab":35724,"xp":35725,"_configuration":35726,"天":35727,"_accept":35728,".recyclerview":35729,":url":35730,"ĠMuhammad":35731,"Ġprivileges":35732,"_bank":35733,"uku":35734,"wallet":35735,"ĠROOT":35736,"Ġencuent":35737,"?family":35738,"ĉposition":35739,"Ġcg":35740,"Ġprecip":35741,"methods":35742,"_fast":35743,"increment":35744,"ĠTiger":35745,"_OCCURRED":35746,"quip":35747,"ĠHAS":35748,"_dom":35749,"Ġwreck":35750,"bj":35751,"Ġdern":35752,"Ġorgans":35753,".entries":35754,"Ġ_('":35755,"ramento":35756,"ĠJamie":35757,"Ġpunk":35758,"IPP":35759,"Ġprograma":35760,"Ġattain":35761,"Ġproves":35762,"/sign":35763,"Ġanswering":35764,"Ġladder":35765,"****************************":35766,"ĠWalmart":35767,"ĠCONTENT":35768,"ductor":35769,"Ġverbal":35770,"ĠPID":35771,"crypto":35772,"_CALLBACK":35773,"Ġ=================================":35774,"Ġpotent":35775,"Ġshorts":35776,".Uri":35777,".uniform":35778,";border":35779,"ĠWer":35780,"Ġherein":35781,"lla":35782,"ĠIhr":35783,"Pixmap":35784,"literal":35785,"!)ĊĊ":35786,"generic":35787,"rust":35788,"_scripts":35789,"osto":35790,"itus":35791,"ĠCoalition":35792,"Ġremot":35793,"deploy":35794,"ĠEagle":35795,"ãĢģãĢĮ":35796,"Ġimportante":35797,"ĉobject":35798,"Ġseasonal":35799,"nej":35800,"aidu":35801,"BindView":35802,"ĠSierra":35803,"-bg":35804,"ĠmakeStyles":35805,"[offset":35806,"Games":35807,"Ġhormone":35808,"ARIO":35809,"heads":35810,"(select":35811,"ĠStarted":35812,"@param":35813,"_decl":35814,"_blog":35815,"Ġaño":35816,"\\Api":35817,"ĠMilwaukee":35818,"Provid":35819,"Animated":35820,"Ġcooler":35821,"ĠSeed":35822,".Edit":35823,"ÏĦ":35824,"ĠTaking":35825,"ĠborderColor":35826,"-founder":35827,".LoggerFactory":35828,"Ġ\"\"ĊĊ":35829,"ALT":35830,"ĠLate":35831,"EDIATE":35832,"Ġ);ĊĊĊ":35833,"afa":35834,"Ġcancellation":35835,"Atom":35836,"ĠBirmingham":35837,"empresa":35838,"HEMA":35839,"ascal":35840,"Ġupside":35841,".Version":35842,"ĠFolder":35843,"ĠEight":35844,"ĠVintage":35845,"ĠAppDelegate":35846,"ĠPrevention":35847,".separator":35848,"STM":35849,"(room":35850,"generator":35851,"Ġcattle":35852,"ĉZ":35853,"ĠParticle":35854,"'};Ċ":35855,"Ġneighbours":35856,"ĠStateless":35857,"Ġaltitude":35858,"Ġsaint":35859,"обав":35860,"Ġconvinc":35861,"ĠContents":35862,"Ġjeune":35863,"(ts":35864,"Serialization":35865,"(collection":35866,"ĠJazz":35867,"ĠDod":35868,"ĠRoch":35869,"acio":35870,"commended":35871,"DEFINE":35872,".onload":35873,"Ġspecialty":35874,"PLACE":35875,"_MOVE":35876,"Ġaccountable":35877,"Reuters":35878,"Ġficken":35879,"Ġdepr":35880,"Wow":35881,"Void":35882,".space":35883,"à¸Ĺ":35884,"Ġtq":35885,"ĠPets":35886,"<$":35887,"(Current":35888,"berries":35889,"planation":35890,"ĠlistOf":35891,"ĠThu":35892,"ĠPRINT":35893,"Ġmismo":35894,"Ġdoi":35895,"chk":35896,"ĠUnicode":35897,"(role":35898,"Ġvirgin":35899,"-->Ċ":36360,"Vol":36361,"ĠSSD":36362,"))),":36363,".Optional":36364,"Ġnurses":36365,"Ġorb":36366,"_pe":36367,");čĊčĊčĊ":36368,"placed":36369,"esser":36370,"Ġtherapeutic":36371,"Ġwhitespace":36372,"Ġaston":36373,"Successful":36374,"Ġpraised":36375,"ĠWes":36376,"Ġeighth":36377,"iral":36378,"Ġvrouw":36379,"Ġfaction":36380,"_bias":36381,"Ġwitch":36382,"Ġnpc":36383,"(sb":36384,"ĠRodrig":36385,"_big":36386,"Dependency":36387,"ĠAbraham":36388,"ardi":36389,"CAR":36390,"nos":36391,"Ġabundance":36392,"Ġnutrients":36393,"instein":36394,".Vert":36395,"ĠISS":36396,"D":36495,"Ġservlet":36496,"bastian":36497,"Ġ>&":36498,"SID":36499,"_clk":36500,"Ġdivisions":36501,"}',Ċ":36502,"Ġdildo":36503,"Ġparade":36504,"major":36505,"Ġaboard":36506,";++":36507,"Ġfusion":36508,"\"},{\"":36509,"ĠDialogResult":36510,"ĉarr":36511,"-em":36512,"_nr":36513,"(handler":36514,".NET":36515,".XtraReports":36516,"ĠShah":36517,"ĠBrief":36518,"-,":36519,"Ġprecio":36520,"ĉĉĉĠĠĠĠĠĠ":36521,"Ġtant":36522,"ĠGrande":36523,"/xml":36524,"_ICON":36525,"ĠRetro":36526,"unque":36527,"Ġnag":36528,"toFixed":36529,"XL":36530,"Ġdeclaring":36531,"ĠConcrete":36532,"ĠAmazing":36533,"ĉprintk":36534,"Ġdebates":36535,"DATED":36536,"Ġaesthetic":36537,"emetery":36538,"RoutingModule":36539,"ĠNashville":36540,"WAYS":36541,"Ġwolf":36542,"Ġobservers":36543,"OTA":36544,"anson":36545,"Ġea":36546,"Ġgreenhouse":36547,"ĵįä½ľ":36548,"Ġstair":36549,"Ġimmigrant":36550,"_apply":36551,"peare":36552,"ĠBloomberg":36553,"_PLAYER":36554,"Resp":36555,"æŃ£":36556,"Chooser":36557,"ĠICollection":36558,"Peter":36559,"Erro":36560,".detectChanges":36561,"Maps":36562,"Ġsqueeze":36563,"ĠHomes":36564,"wegian":36565,"Ġformatting":36566,"Ġnegotiate":36567,"uld":36568,"ĠNep":36569,"ĠQB":36570,"Ġeconomies":36571,"Ġ*/,":36572,"Ġredund":36573,"ĠAber":36574,".IsNullOrWhiteSpace":36575,"ycled":36576,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ":36577,"_Sh":36578,"Ġskept":36579,"Ġrecreated":36580,"ĠgetType":36581,"Ġmargins":36582,"Ġcolonial":36583,"charts":36584,"//@":36585,"Ġprocessors":36586,"说":36587,"batis":36588,"æĦı":36589,"atorio":36590,"mentioned":36591,"Patient":36592,"Ġprey":36593,"Checkbox":36594,"_xpath":36595,".skip":36596,"ĠMormon":36597,"ĠMemoryStream":36598,"CREMENT":36599,"Ġku":36600,"meld":36601,"\\Data":36602,"ĠKernel":36603,"iltr":36604,"éĢģ":36605,"(profile":36606,"Carbon":36607,"ROLE":36608,"(pl":36609,"]*(":36610,".memory":36611,"Ġmedal":36612,"Ġadvisor":36613,"ität":36614,"Ġhdr":36615,"ierung":36616,"ĠProvides":36617,"(alpha":36618,"Ġteenagers":36619,"-parser":36620,".LatLng":36621,"]()Ċ":36622,"Ġfelony":36623,"ĉĉĉĊĉĉĉĊ":36624,"BOOK":36625,"Ġslash":36626,"Ġclearfix":36627,"ĠProphet":36628,"容":36629,"rightness":36630,"-fi":36631,".kind":36632,"erton":36633,"Jim":36634,"Ġmanipulate":36635,"Ġworksheet":36636,"olin":36637,"stars":36638,"Ġartifact":36639,"_EMPTY":36640,"ĉmain":36641,"-------------';":36709,"Ġexpressing":36710,"ĠIQ":36711,"ĠFact":36712,"/*******************************************************************************Ċ":36713,"_mass":36714,")):":36715,"Ġcondom":36716,"ĠcreateState":36717,"ometown":36718,"Ġirr":36719,"Ġ>(":36720,">B":36721,"iteration":36722,"ãĥª":36723,"Ġshirts":36724,"ounty":36725,"->$":36726,"_SIGN":36727,"ĠDale":36728,"Ġjj":36729,"Easy":36730,"Fre":36731,"ĠNy":36732,"Ġchlor":36733,"matched":36734,"ĠGerm":36735,"-UA":36736,"ĠNathan":36737,"education":36738,"-yard":36739,"-che":36740,"houses":36741,"ritional":36742,"Ġproximity":36743,"Ġdiesem":36744,"áºŃp":36745,"Ġdrought":36746,".audio":36747,"ĠLeo":36748,"Ġfavorable":36749,"inch":36750,"ĠDaw":36751,"ribly":36752,"_student":36753,"idable":36754,"OVE":36755,"Ġlacks":36756,"ouncing":36757,".business":36758,"Ġreopen":36759,"maybe":36760,"_GLOBAL":36761,"Ġdresses":36762,"ĠEdwards":36763,"ensible":36764,"ĠHardware":36765,"ĠExcellent":36766,"ĠTimeUnit":36767,"CTIONS":36768,"Ġschedules":36769,"Ġsegue":36770,"Opens":36771,"ammen":36772,"-Identifier":36773,"Ġstaring":36774,"Ġhappily":36775,"ĠHob":36776,"'_":36777,"Ġ\");":36778,"amentos":36779,"etched":36780,"Ġ/>}Ċ":36781,".Users":36782,"Ġinterrupted":36783,"Contacts":36784,"Ġregistro":36785,"inburgh":36786,"CHA":36787,"_imp":36788,"phis":36789,"say":36790,"Ġretailer":36791,".NODE":36792,"/maps":36793,"_LAST":36794,"ĠCharge":36795,"_guard":36796,"Collider":36797,"ĠStatelessWidget":36798,"\":[\"":36799,"(\"../../":36800,"ioxide":36801,"ĠSund":36802,"Ġ'';":36803,"unset":36804,"addWidget":36805,"лÑİ":36806,"elles":36807,"alker":36808,"Arc":36809,"Ġdeduct":36810,"GUILayout":36811,"ĠVilla":36812,"Ġforbidden":36813,"_where":36814,"Ġ\\/":36815,"ĠTib":36816,"_AX":36817,"]čĊčĊ":36818,"ĠBir":36819,"Ġbend":36820,"ĠMAKE":36821,"ĠMET":36822,"Ġfutures":36823,"Ġweighted":36824,"\"\"\"čĊ":36825,"Ġauthorize":36826,"(program":36827,"},{\"":36828,"Ġcoefficients":36829,"ês":36830,"PerPage":36831,"ĠBathroom":36832,"ĠPublishing":36833,"GPL":36834,"Ġsubmissions":36835,"ĠNUMBER":36836,"jÄħ":36837,"Ġadditionally":36838,"empre":36839,"ĠShel":36840,"otyp":36841,"Solution":36842,"Ġthunder":36843,"_ec":36844,"ĠĊĠĠĠĠĊ":36845,"ĠFellow":36846,"Ġkay":36847,"ĠnewState":36848,"ONTAL":36849,"Implementation":36850,".Look":36851,"Ġents":36852,"Ġlors":36853,"ĠBIG":36854,"fab":36855,"Ġaveraged":36856,"ĠFeedback":36857,"ĠWells":36858,"Ġmartial":36859,"Ġindul":36860,"ĠCommunist":36861,"ĠForex":36862,"ĠAgriculture":36863,"\"[":36864,"Ġquar":36865,"ĠKont":36866,"ĉview":36867,".Bytes":36868,"desktop":36869,"ĠMakes":36870,"akespeare":36871,".Nullable":36872,"Ġspotlight":36873,"VB":36874,"owy":36875,"(torch":36876,"tridge":36877,"_bounds":36878,"Ġapologize":36879,".addItem":36880,"antd":36881,"*);Ċ":36882,",u":36883,"(gen":36884,"ç»ĵ":36885,"reator":36886,"ĠCord":36887,"oupper":36888,".metro":36889,"Ġew":36890,"ĠWORD":36891,".After":36892,"Ġdetained":36893,"ĠHammer":36894,"existing":36895,"Ġost":36896,"Ġmonument":36897,"-custom":36898,"UserID":36899,"ĠNom":36900,"Ġrejection":36901,"(dim":36902,"Ġsingleton":36903,"ĉdie":36904,"ariance":36905,"reports":36906,"]!=":36907,"elda":36908,"Ġprevalence":36909,"_regs":36910,".\".":36911,"Ġfeminist":36912,"Codec":36913,"Ġ**Ċ":36914,"(labels":36915,"_MARK":36916,"FAILED":36917,"Ġadministered":36918,"WN":36919,"ĠĠĠĠĠĠĠĠĉĉ":36920,"Ġnoun":36921,"wig":36922,"Ġgotta":36923,"Ġrif":36924,"-im":36925,"ĠPaulo":36926,"ĠCommandType":36927,"]))ĊĊ":36928,"-zero":36929,"Training":36930,"Ġlord":36931,"_art":36932,"reddit":36933,"Cert":36934,"Ġpeso":36935,"Rot":36936,"Ġendanger":36937,".dr":36938,"userInfo":36939,"unts":36940,"nv":36941,"ĠTrailer":36942,"-first":36943,"(make":36944,"Ġbenefici":36945,"-black":36946,"iÃŁ":36947,"Ġundoubtedly":36948,"Ġmex":36949,"ĠAncient":36950,"(as":36951,"Ġdescent":36952,"Pick":36953,"Ġreplica":36954,"$obj":36955,"ähr":36956,"Ġarrows":36957,"fty":36958,"ĠLibya":36959,"uga":36960,"charged":36961,"Tur":36962,"Ġhomic":36963,"issen":36964,"ĠFake":36965,"Ġbeers":36966,"Ġscattered":36967,"(Time":36968,"UTIL":36969,"Ġbureaucr":36970,"/plain":36971,"Ġsticking":36972,"FAIL":36973,"ĠCovid":36974,"Third":36975,"_present":36976,"ĠPierre":36977,"Ġëª":36978,"Ġ[...]ĊĊ":36979,"Prob":36980,"ĠTraffic":36981,"icao":36982,"doctor":36983,"Ġ),ĊĊ":36984,"Tabs":36985,"alu":36986,"ï¼ļâĢľ":36987,"Ġinherent":36988,"_No":36989,"ritis":36990,"ĠProof":36991,".basename":36992,"ä¼ļ":36993,"Ġchim":36994,"ĠProtected":36995,"crit":36996,"Ġprone":36997,"Ġкон":36998,"ĠHeroes":36999,"Ġanxious":37000,"Ġanos":37001,"Ġweekends":37002,"Ġsext":37003,"Ġreducer":37004,"=UTF":37005,"half":37006,"ĠSaw":37007,".mm":37008,"Ġnueva":37009,".currentTarget":37010,".lua":37011,"_EXTENSION":37012,"ĉreg":37013,"ĠCtrl":37014,"_align":37015,"acceptable":37016,"Ġrushing":37017,"frac":37018,"Ġboasts":37019,"Five":37020,"±":37021,"ĠTemperature":37022,">):":37023,"Ġcharter":37024,"REATED":37025,"Ġsubjected":37026,"Ġopc":37027,"healthy":37028,"使ç͍":37029,"ĠScientific":37030,"Ġfrau":37031,"riages":37032,"à¸Ķ":37033,".inventory":37034,"ationale":37035,"Mad":37036,"minutes":37037,">>();Ċ":37038,"ĠEnv":37039,"Ġrecordings":37040,"Ġsuspicion":37041,"sqlite":37042,"ĉread":37043,"ãģ¦":37044,"Ġworries":37045,".putString":37046,"ĠShanghai":37047,"(uid":37048,"rer":37049,"ĠvÃŃde":37050,"\"):":37051,"Ġmethodology":37052,"ĠкоÑĤоÑĢ":37053,"ccc":37054,"avad":37055,"Ġinduction":37056,"ĉThread":37057,",string":37058,"ại":37059,"nehmen":37060,"uition":37061,"Ġ*__":37062,".emf":37063,"Ġìľ":37064,"/themes":37065,"ĠNine":37066,".One":37067,"ĠEmbed":37068,"Ġfaz":37069,"uations":37070,"Ġprivately":37071,"Ġling":37072,"[F":37073,"ushi":37074,"Ġlaunches":37075,"(KEY":37076,"GMT":37077,"Ġaiming":37078,"patible":37079,"ĠBiden":37080,"iw":37081,"ĠDegree":37082,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":37083,"Ġ$('<":37084,"ários":37085,"toUpperCase":37086,"ìłľ":37087,"ĠEUR":37088,"Ġoversight":37089,"Ġtablesp":37090,"Updates":37091,".makedirs":37092,"Ġhumidity":37093,"/template":37094,"Always":37095,"(IS":37096,"_cert":37097,"Dig":37098,"Ġunderway":37099,"orton":37100,"ĠHurricane":37101,"Ġspends":37102,"ĠSegment":37103,"Ġflies":37104,"ĠToggle":37105,"ĠLynch":37106,"Ġsenses":37107,"ĠKos":37108,"setEnabled":37109,"istically":37110,"Ġtester":37111,"Ġadministrators":37112,"Ġtagged":37113,"Ðĵ":37114,"Ġshortcut":37115,"ĠResolution":37116,"Ġsupervision":37117,"ĠAshley":37118,"Tracking":37119,"ulatory":37120,"andel":37121,"isten":37122,"Ġunre":37123,"(diff":37124,"ANTS":37125,"Ġrider":37126,"ĠsÄħ":37127,".Series":37128,"_orders":37129,"ORIZONTAL":37130,"Ġretention":37131,"ãĢĤčĊčĊ":37235,"Ġdiagonal":37236,"ĠCancellationToken":37237,"_Internal":37238,"Ġruin":37239,".Qt":37240,"ocratic":37241,"Tel":37242,"ĠAnswers":37243,"matic":37244,"Ġxp":37245,"atem":37246,"_jobs":37247,"_any":37248,"Ġseniors":37249,"Ġlandmark":37250,"ĠQList":37251,"Ġmaneu":37252,"otify":37253,"/\";Ċ":37254,"/server":37255,"ĠPhilosoph":37256,"utenant":37257,"(io":37258,"hz":37259,"Ġauthenticated":37260,"dv":37261,"-Compatible":37262,"Originally":37263,",function":37264,"ãĢĤčĊ":37265,"ĠRepresentative":37266,"asily":37267,"ircuit":37268,".dt":37269,"(math":37270,".Marshal":37271,"[,":37272,"ĠCities":37273,"_turn":37274,"|)Ċ":37275,"Ġcantidad":37276,"alter":37277,"ĉui":37278,"ĠNebraska":37279,"Ġskirt":37280,".bg":37281,"SharedPreferences":37282,"(style":37283,"Ġgrief":37284,"gew":37285,"Ġsafeg":37286,"olang":37287,"_lists":37288,"ìĽ":37289,"Ġgranite":37290,"Ġhottest":37291,".jdbc":37292,".Customer":37293,"Ġâī¤":37294,"Ġwaar":37295,"_scene":37296,"+'/":37297,"ĠJTextField":37298,"Ġseating":37299,"Ġwears":37300,"Ġ`/":37301,"Cases":37302,"ĠYoutube":37303,"ım":37304,"Ġbalcon":37305,",G":37306,"MetaData":37307,"-price":37308,"SCR":37309,"Unity":37310,"Ġtrunk":37311,"={`${":37312,"Ġearthquake":37313,"Partial":37314,"Ġsubst":37315,"Ġelimin":37316,"=\"'.":37317,"//*[@":37318,"Ġsupervisor":37319,"vrolet":37320,"_article":37321,"Ġpane":37322,"bio":37323,"Ġmotors":37324,"NM":37325,"Frank":37326,"Ġonion":37327,"-word":37328,"ItemClickListener":37329,"Ġbrit":37330,"endencies":37331,"Computer":37332,"_running":37333,"(day":37334,"-he":37335,"(named":37336,"ĠSach":37337,"оÑĩ":37338,"campaign":37339,".Abstract":37340,"(wrapper":37341,".pay":37342,"Ġuw":37343,"Geo":37344,"rails":37345,"/select":37346,"ichte":37347,"sons":37348,"EVENT":37349,"Ġaliment":37350,"Providers":37351,"Await":37352,"_INTERVAL":37353,".off":37354,"Ġgluten":37355,"_cloud":37356,"Ġwen":37357,".extract":37358,"ĉbutton":37359,"/MM":37360,"Party":37361,"Ġdemographic":37362,"_errno":37363,"Ġhiking":37364,"('')Ċ":37365,"\",@\"":37366,"Ġwit":37367,"rá":37368,"ologie":37369,"ĠStyles":37370,"ĠBrowserModule":37371,".RequestMapping":37372,"icans":37373,"PAGE":37374,"creation":37375,"ĠFerguson":37376,"uded":37377,"numbers":37378,"ĠGTK":37379,"Ġpresentations":37380,"ĠBobby":37381,"_span":37382,"estyle":37383,"Ġillegally":37384,"abela":37385,"Ġbattlefield":37386,"capacity":37387,"terror":37388,"]\");Ċ":37389,"Ġwarrior":37390,"leader":37391,"ĠDBG":37392,"ĠRevenue":37393,"Ġvigil":37394,"Ġcounterparts":37395,"(Error":37396,"ACTER":37397,"Ġheeft":37398,"Ġselections":37399,"zeug":37400,"tom":37401,"-two":37402,".;Ċ":37403,"_statement":37404,"ĠAid":37405,"ĠVul":37406,"_rgb":37407,"Ġprizes":37408,"Ġeditable":37409,"ĉform":37410,"ını":37411,".decor":37412,"Demo":37413,"lices":37414,"Ġenctype":37415,"ratulations":37416,"ĠROS":37417,"_chars":37418,"ĠJahr":37419,"partial":37420,"ÑĥÑĤ":37421,"ĠReceive":37422,"ĠLands":37423,"APTER":37424,"Ġchopped":37425,"..\"":37426,"ĠAnaly":37427,"ĠUID":37428,"ĠRadeon":37429,"ĠBee":37430,"Ġunm":37431,">M":37432,".findall":37433,"Tokenizer":37434,"ĠWHAT":37435,"Ġsj":37436,"Drawing":37437,"Ess":37438,"OND":37439,"Ĭ¶":37440,"(packet":37441,"âĢĶbut":37442,"Invocation":37443,"ĠNuclear":37444,"?;Ċ":37445,"Ġgrandes":37446,"ĠCrypt":37447,"remark":37448,"Ġ'../../../../":37449,"Ġinability":37450,"magic":37451,"cats":37452,"Ġsimulate":37453,":${":37454,"inflate":37455,"Ġener":37456,":NO":37457,"iples":37458,"Ġmerit":37459,"ĠRated":37460,"Ġglue":37461,"/blog":37462,"Ġgren":37463,"Ġthrilled":37464,".CH":37465,"uncan":37466,"ĠPRIMARY":37467,"Ġpersec":37468,"Ġfeared":37469,".MIN":37470,"ĠTheater":37471,"éĴ":37472,"ategorie":37473,"段":37474,"Ġappetite":37475,"square":37476,"ĠAlexand":37477,".UserId":37478,"_gt":37479,"_enter":37480,"Ġgraduates":37481,"FragmentManager":37482,"Authorize":37483,"-NLS":37484,"(My":37485,"Ġtriumph":37486,"usting":37487,"_PARAMS":37488,"Characters":37489,"(:,:,":37490,"_BUILD":37491,"MHz":37492,"Ġwashed":37493,"Ġuncle":37494,"Steve":37495,"ardown":37496,"${":37680,"_confirmation":37681,"Ġtrophy":37682,"Works":37683,"ĠElectronics":37684,"ĠMediterranean":37685,"_metrics":37686,"Ġannouncing":37687,"ĠDAY":37688,"_proto":37689,"Ġpear":37690,"baseUrl":37691,"ĉĉĉĉĉĉĉĉĊ":37692,"Ġcoordination":37693,":N":37694,".animate":37695,"ĠCotton":37696,"_hit":37697,"âľ":37698,"Ġjetzt":37699,"ifter":37700,"(fields":37701,"ownload":37702,"ificacion":37703,".cuda":37704,"ĠLiu":37705,">equals":37706,"ĠAce":37707,"ÑĢам":37708,"ĠSuperman":37709,"ĠGarcia":37710,"Ġarrests":37711,"agar":37712,"Ġ{})":37713,"Ġmacros":37714,"roupe":37715,"être":37716,"Ġtwisted":37717,"struments":37718,"_(\"":37719,"_vertices":37720,"ĠTransition":37721,"ик":37722,"[max":37723,"mind":37724,"ĠaccessToken":37725,"Ġunle":37726,"mus":37727,"cop":37728,"ĠFactor":37729,"Ġconced":37730,"Ġretr":37731,".linalg":37732,"-slider":37733,"obl":37734,"_StaticFields":37735,"Ġzombie":37736,"selling":37737,"Ġchap":37738,"Ġshaking":37739,"ĠTranslate":37740,"ĠAmsterdam":37741,"ĠETH":37742,"_EXTERN":37743,"kd":37744,"_disc":37745,"Ġpreceding":37746,"Ġprix":37747,"ObjectName":37748,"_modified":37749,"ardware":37750,"Ġ?>\">":37751,"ĠDW":37752,"`${":37753,"Ġ?>\">ĊĊ":37859,"Ġspinning":37860,"_pending":37861,"Matchers":37862,".Keys":37863,"ĠPV":37864,"enus":37865,"antis":37866,"Ġdiscard":37867,"Ġhaul":37868,"Ġempir":37869,"Ġpathway":37870,"Ġoak":37871,"мен":37872,"-induced":37873,"Ġimpair":37874,"ĠCalgary":37875,".isHidden":37876,"dz":37877,"_include":37878,"Ġgm":37879,"Ġ'('":37880,"PY":37881,"uggestions":37882,"Ġcommodity":37883,"cro":37884,"/sub":37885,"ĠgetInstance":37886,"ĠLegacy":37887,"ĠKil":37888,"Bal":37889,"(short":37890,"Inform":37891,"+x":37892,"*r":37893,"ĠHopefully":37894,"orate":37895,"Ġmachen":37896,"Ġtreaty":37897,"ĠOri":37898,".public":37899,"-horizontal":37900,"Ġtactic":37901,"Ġbord":37902,"wares":37903,"Ġammo":37904,"ĠLists":37905,"Ġequations":37906,"/her":37907,"ĠNSW":37908,"Bounding":37909,"_Collections":37910,"Ġavail":37911,".DropDown":37912,"è°":37913,"Ġhh":37914,"ĠlÃł":37915,".pb":37916,"Ġmemorial":37917,"ĠATTR":37918,"Ġexhausted":37919,"Ġtsp":37920,"ĉredirect":37921,"Ġlikewise":37922,"STER":37923,"Ljava":37924,"Ġcondemned":37925,"ocaust":37926,"(strict":37927,"Ġexempt":37928,"Ġsms":37929,"Ġexagger":37930,"SYS":37931,"Ġlounge":37932,":^":37933,"Ġtodd":37934,"deb":37935,"atorial":37936,"ĠPorter":37937,"Ġtuition":37938,"Ġexempl":37939,"Ġparen":37940,".lineTo":37941,"Ġkidney":37942,"Ġça":37943,"Ġcui":37944,"ï¼Į请":37945,"XC":37946,"Ġmoż":37947,"Ġnominated":37948,"lung":37949,"ImGui":37950,"ĠBuzz":37951,"Ġstereo":37952,"portal":37953,"resas":37954,"Ġklass":37955,"Ġdrafted":37956,"Ġprojectile":37957,"/gpl":37958,"(parameters":37959,"*)Ċ":37960,"Ġassisted":37961,"ĠNSInteger":37962,"sitemap":37963,":nth":37964,".Views":37965,".ArgumentParser":37966,"Ġmeer":37967,"zier":37968,"ĠDig":37969,"Ċ":38036,"Ġplag":38037,"pine":38038,"Ġblanket":38039,"Ġ:-":38643,"Ġlcd":38644,"---------------":38645,"(\"\"":38646,"Ġtactical":38647,"ĠRonald":38648,"extr":38649,"ĠFest":38650,"Ġfuer":38651,"-navigation":38652,"Ġkb":38653,"ghost":38654,"ĠhandleChange":38655,"_cls":38656,"()!=":38657,"Comparator":38658,".vm":38659,"ĠCox":38660,"_review":38661,"/@":38662,"_cookie":38663,"Ġrecognised":38664,"ldap":38665,"Threads":38666,"ĠSexual":38667,"ĠBearing":38668,"(SQL":38669,"Ġxr":38670,"Ġthigh":38671,"URLConnection":38672,"ĠSUV":38673,"ĠmContext":38674,"Ġincidence":38675,"ĠEste":38676,".sup":38677,"_te":38678,"(EXIT":38679,"CMD":38680,"/\">":38681,"Almost":38682,"ĠUne":38683,"Ġanderen":38684,"ĠSingleton":38685,"Ġbore":38686,"Think":38687,"Ġnarc":38688,"]initWith":38689,"_shop":38690,"(strategy":38691,"!',":38692,"herits":38693,"ĠDesk":38694,"_machine":38695,".netty":38696,"ında":38697,"=<":38698,"ĠQR":38699,"ĠSidebar":38700,".splitContainer":38701,"ĠonSuccess":38702,"Ġmonkey":38703,"Enjoy":38704,"(nodes":38705,"pectrum":38706,"Ġ(*(":38707,"ĉUINT":38708,",height":38709,"ĠNetworks":38710,".tail":38711,".linspace":38712,"Ġ\"...":38713,"Listen":38714,"Æ¡":38715,".Channel":38716,"-defined":38717,"Repeat":38718,"adjust":38719,"ERM":38720,"_application":38721,".assertNotNull":38722,"-stream":38723,"Ġrabbit":38724,"Ġpositioning":38725,"Ġwoke":38726,"Ġfing":38727,"Ġmultiplayer":38728,"Ġregistering":38729,"until":38730,"Ã¥n":38731,"(::":38732,"ussions":38733,"Ġpotato":38734,"ĠEquals":38735,".Sup":38736,"/apache":38737,"Ġ(=":38738,".\")":38739,".ptr":38740,"ĠSpeech":38741,".clip":38742,"ĠGabriel":38743,"Ġmusician":38744,"/issues":38745,".shop":38746,"ĠHier":38747,"_RET":38748,"_bucket":38749,"ãĥ¡":38750,"avs":38751,"Ġroz":38752,"flower":38753,"WriteBarrier":38754,"ĠMilan":38755,"Ġlegislature":38756,"ĠDoll":38757,"Ġproving":38758,".concatenate":38759,"âķIJ":38760,"Ġgchar":38761,"cdnjs":38762,"bles":38763,"ĠListing":38764,"ло":38765,".xrLabel":38766,"ĠSak":38767,"justice":38768,"ĠValentine":38769,"unless":38770,"Ġpiger":38771,"(run":38772,"Ġtestified":38773,"ANA":38774,"ĠRemoves":38775,"))));Ċ":38776,"recated":38777,"ĠRuntimeMethod":38778,"Ġconqu":38779,"ãĤ¢":38780,"Ġtissues":38781,"ailer":38782,"été":38783,"-Star":38784,"Ġflames":38785,".setIcon":38786,"Ġsupern":38787,"Ġvagina":38788,"-variable":38789,"Ġwellness":38790,"CUR":38791,"Ġbelle":38792,".getRequest":38793,"Ġpoco":38794,"benh":38795,"agens":38796,"Ġspill":38797,"ĠJur":38798,"Ġdispatcher":38799,"ного":38800,"emonic":38801,"(dirname":38802,"ĠÐĶ":38803,"Ġpasse":38804,"Ġganz":38805,"ricing":38806,"EU":38807,"Ġmujeres":38808,"essen":38809,".attribute":38810,"jj":38811,"ĉĉĠĊ":38812,"[^":38813,"Ġstrtolower":38814,"lexer":38815,"ectar":38816,"hotel":38817,".square":38818,"Ġrall":38819,"Ġlowered":38820,"handled":38821,"Market":38822,"ĠUses":38823,"ivas":38824,".Business":38825,"ãģĹãģ¦":38826,"DIV":38827,"Ġwasted":38828,"Ġavoir":38829,"êm":38830,"_ACCOUNT":38831,".et":38832,"ĉSDL":38833,"kap":38834,"Ġfox":38835,"uppet":38836,"{},Ċ":38837,"\",'":38838,"Favorite":38839,"PEND":38840,"ĠAES":38841,"}),":38842,"Ġdeduction":38843,"ĠpolÃŃt":38844,"ĠcomponentWill":38845,"ĠTelerik":38846,"_SELF":38847,"Ġmuse":38848,"Craft":38849,"Ġdens":38850,"ि":38851,"(tp":38852,"Ġtasty":38853,"Ġbalances":38854,"Ġdedication":38855,"ĠWallace":38856,"Ġunlaw":38857,"\\\">\\":38858,"Ġmum":38859,"-update":38860,"emente":38861,"Ġsoda":38862,"Republic":38863,"asmine":38864,"éric":38865,"(Status":38866,"ĠJsonConvert":38867,"ĠDisk":38868,".Redirect":38869,"Ġfilming":38870,"/mol":38871,"Ro":38872,"Ġville":38873,"Ġtrabaj":38874,"Ġsynthesis":38875,"rega":38876,"Ġrl":38877,"Scheduler":38878,"ISHED":38879,"currentUser":38880,"(errors":38881,"'h":38882,"_bot":38883,"ximo":38884,"ĠUSART":38885,"_super":38886,"_DECREF":38887,"ной":38888,"_ROW":38889,"Ġpromotes":38890,"ĠTA":38891,"Ġhoras":38892,"ĠRepresents":38893,"Ġnameof":38894,"ĠExc":38895,"ĠGarage":38896,"Ġseine":38897,",#":38898,"Ġherb":38899,"/resources":38900,"Ġpleaded":38901,".radioButton":38902,"Ġæĺ":38903,"Ops":38904,"ĠNest":38905,"cstring":38906,"ĠDefence":38907,"Ġrefere":38908,"_leaf":38909,"Ġrevelation":38910,"ë§":38911,".executeUpdate":38912,"_WORLD":38913,"Ġexpans":38914,"(\"\\\"":38915,"jab":38916,"Ġdoubts":38917,"ĠGeometry":38918,"Ġintroduces":38919,"Ġsenators":38920,"Ġcanal":38921,".helper":38922,"ĠBiology":38923,"_SENS":38924,".previous":38925,"-touch":38926,"abit":38927,"Ġimpacted":38928,"Ġbrackets":38929,".direct":38930,"accum":38931,"Ġtestosterone":38932,"ĉaction":38933,"ĠChance":38934,"Ġpeaks":38935,"CppCodeGenWriteBarrier":38936,"Ġunbelie":38937,"_press":38938,".Rel":38939,"angled":38940,"/templates":38941,"-->čĊ":38942,"lime":38943,"Ġsufficiently":38944,"_nt":38945,"Expand":38946,".isfile":38947,"ĠisEmpty":38948,"Ġqt":38949,"Ġmulher":38950,"acob":38951,"George":38952,"常":38953,"Ġassim":38954,"aso":38955,"Ġcomprised":38956,"OV":38957,"(CONFIG":38958,"ĉwriter":38959,"Ġdesp":38960,"Ġtenure":38961,"(cr":38962,".pool":38963,"ĠBrend":38964,"Ġcensor":38965,"(timeout":38966,"Ġplea":38967,".Wrap":38968,"Ġtightly":38969,"ĠWere":38970,"ĠIgnore":38971,"abei":38972,"Ġbridges":38973,"Ġcondemn":38974,"Ġsimplicity":38975,"Ġroutinely":38976,"Ġblacks":38977,"jb":38978,"ĠPit":38979,"Utf":38980,"Ġ/Ċ":38981,"reload":38982,"ĠsetObject":38983,"/global":38984,"Ġfatty":38985,"Ġsocks":38986,"Couldn":38987,"Ġerotisk":38988,"æĿ¡":38989,"ĠPressure":38990,"ĠMaz":38991,"npos":38992,"tolower":38993,"ĠEQ":38994,"uteur":38995,"ĠMoment":38996,"Ġeta":38997,"{{--":38998,"Ġgraphs":38999,"ĠGuar":39000,"rine":39001,"(--":39002,"ĠHttpStatus":39003,"(student":39004,"*np":39005,"Ġrailway":39006,"Ġasynchronous":39007,"_vm":39008,"'],'":39009,",text":39010,"merchant":39011,"(Guid":39012,"ĠGra":39013,"ixer":39014,"fetchAll":39015,".addListener":39016,"flip":39017,"*$":39018,">(),":39019,"Ġsunlight":39020,"assigned":39021,"Ġabc":39022,"ĠCOLUMN":39023,"ĠðŁĻĤĊĊ":39024,")...":39025,"Ġensemble":39026,"Ġnewline":39027,"_SINGLE":39028,"iedad":39029,"Ġdarker":39030,"ormap":39031,"Ġlion":39032,"plits":39033,"Ġillustration":39034,"ĠIEEE":39035,"Ġvista":39036,"ousands":39037,"*******":39038,"ĠTommy":39039,"Ġhue":39040,"Sel":39041,"Ġaura":39042,"ĠTherapy":39043,"Ġanimator":39044,".constraints":39045,"Ġvague":39046,"(\"\")":39047,"Ġvillain":39048,"Ġblessing":39049,"ĠstringBuilder":39050,"ĠMisc":39051,"ĠDIR":39052,"fax":39053,"-node":39054,"ĠWalking":39055,"ĠAU":39056,"sess":39057,"Ġgrill":39058,"VERTISE":39059,"ĠFoods":39060,"Ġtournaments":39061,"Ãĵ":39062,"ĠMarsh":39063,"Ġwonders":39064,"Longitude":39065,".CommandText":39066,"=input":39067,"_encoder":39068,"pageSize":39069,"ĠgetState":39070,">>Ċ":39071,".grey":39072,"pod":39073,"Ġreadings":39074,"Ġreconsider":39075,"Startup":39076,"Ġexcer":39077,".balance":39078,"_cycle":39079,"_Time":39080,"LOCAL":39081,"ĠEFI":39082,"ĠReyn":39083,".setForeground":39084,"byn":39085,"Ġdisconnected":39086,"ACTIVE":39087,"Ġembedding":39088,"ickers":39089,"Ġsurroundings":39090,"*c":39091,"Ġgarant":39092,"Ġbf":39093,"Ġwipe":39094,"Ġä¸ĭ":39095,"_TRA":39096,"adox":39097,"çķ":39098,"Ġsucks":39099,"ĠSongs":39100,"ĠAssociates":39101,"ĠBald":39102,"ĠBrett":39103,"venile":39104,"Ġvt":39105,"Ġinade":39106,"Ġresigned":39107,"ĠGlenn":39108,".pattern":39109,".DataBind":39110,"Ñĥм":39111,"LayoutInflater":39112,"chet":39113,"ĠTestament":39114,".ms":39115,"Ġpav":39116,"ĠReactDOM":39117,"urdy":39118,"ADATA":39119,"Mu":39120,"/actions":39121,"ĠJs":39122,"_extract":39123,"ĠBring":39124,":id":39125,"strt":39126,"ivation":39127,"Ġoutright":39128,"azu":39129,"loyment":39130,"иÑı":39131,"aldo":39132,"ĠPublisher":39133,"Education":39134,"Palette":39135,"_drv":39136,"Ġ($(":39137,"ĠAnda":39138,"Ġremedy":39139,"Ġinconsistent":39140,"tection":39141,"Ġregulators":39142,"Ġshortest":39143,"(pair":39144,"ĠInstallation":39145,"Ġdefendants":39146,"Ġ();":39147,"-large":39148,"Mel":39149,"Ġthreaten":39150,"нÑı":39151,"Ġfetish":39152,"otine":39153,"_dic":39154,"Ġ<$":39155,"Ġstagger":39156,"spi":39157,"$response":39158,"Serv":39159,"-born":39160,"jos":39161,"ĉimg":39162,"ĉWHERE":39163,"_lt":39164,"å½ĵ":39165,".cost":39166,"ĠTue":39167,".labels":39168,"ĠLV":39169,"wcsstore":39170,"ĠJesse":39171,"ห":39172,"Trade":39173,"Ġpredecessor":39174,"ëĤ":39175,"finally":39176,"_general":39177,"oggler":39178,"_REGION":39179,"nement":39180,"Ġblogger":39181,"ĠHarbor":39182,"ĠDataset":39183,"[w":39184,"Ġattendees":39185,".ico":39186,"maximum":39187,".Unlock":39188,"_SYNC":39189,"ágina":39190,"Ġdowns":39191,"ĠWii":39192,"])/":39193,"Ġkicking":39194,"unication":39195,"ĠDAC":39196,"ĠIDS":39197,"ĠRental":39198,"ĠcurrentTime":39199,"Ġvaccines":39200,"ĠDevil":39201,"Ġnors":39202,"_mouse":39203,"urrection":39204,"(no":39205,"Ġ>čĊ":39206,"Ġaggression":39207,"Ġbreeding":39208,".symbol":39209,"iman":39210,"AbsolutePath":39211,"ĠWHO":39212,"_flush":39213,"-root":39214,"arna":39215,"&M":39216,"Ġfathers":39217,"ĠRocket":39218,"iveau":39219,"Ġwander":39220,"Ġcompos":39221,"ĠWarrior":39222,"ĠSeat":39223,"ĠClinic":39224,"_invoice":39225,"(dispatch":39226,"Producto":39227,"aturing":39228,"ossier":39229,"ĠMAY":39230,"Ġdagger":39231,"Ġsanitized":39232,"ĠRFC":39233,"Ġproph":39234,"Ġurine":39235,"Ġgrind":39236,"ĠExpanded":39237,"descripcion":39238,"-fw":39239,"ĠKerry":39240,"=name":39241,"Ġchk":39242,"Ġnationally":39243,"Ġthee":39244,"Inc":39245,"Ġ?>>":39246,".RadioButton":39247,".HttpServletResponse":39248,"/Y":39249,"ĉfield":39250,"Ġhomme":39251,"yper":39252,"Physical":39253,"=v":39254,"Ġdriv":39255,"ĠErrors":39256,"ĠcÄĥ":39257,"Death":39258,"ĠWINDOW":39259,"Ġpoet":39260,"ĠSharp":39261,"ĠImmutable":39262,"ĉcreate":39263,"Ġgeht":39264,"ĠReform":39265,"aiser":39266,"ĠInitialization":39267,"Ġimmunity":39268,".compose":39269,"Ġlatency":39270,"ĠLebanon":39271,"ĠParad":39272,"Ġfuels":39273,"ĠExhib":39274,"coh":39275,"%\">Ċ":39276,"ĠCLI":39277,")initWith":39278,"-Za":39279,"_CLEAR":39280,"regn":39281,"Ġfinances":39282,".standard":39283,"_CATEGORY":39284,".library":39285,"Ġtravelers":39286,"_wp":39287,"ĠEvaluation":39288,"starting":39289,"Ġ)),Ċ":39290,"episode":39291,"ĠVariant":39292,"Ġdaemon":39293,"ĠJulia":39294,"ĠNR":39295,"Ġdoubles":39296,"'":39526,"Ġqueryset":39527,";}čĊ":39528,"ĠPopulation":39529,"utedString":39530,"resident":39531,"_FONT":39532,"ĠRespond":39533,"Ġobscure":39534,"Ġobservable":39535,"ĠContributors":39536,"kon":39537,"ĠMusk":39538,"exao":39539,"ĠTub":39540,"BootApplication":39541,"SOR":39542,".Horizontal":39543,".findBy":39544,".power":39545,"Ġpositively":39546,"venience":39547,"ĠJong":39548,"Ġwhistle":39549,"ĠзнаÑĩ":39550,"Ġlending":39551,"Ġdestructive":39552,"ĠonDelete":39553,"authorization":39554,"();?>":39555,"_original":39556,"science":39557,"atra":39558,"?,?,":39559,"ĠAsc":39560,"Ġconvincing":39561,"$a":39562,"orgen":39563,"_Date":39564,"ĠProvide":39565,"Ġlonely":39566,")'Ċ":39567,"exchange":39568,";?>Ċ":39569,".fast":39570,"Samples":39571,"London":39572,"'])čĊ":39573,"ĠIonic":39574,"Ġpesso":39575,"ĠKnights":39576,"ĠRaf":39577,"_attrs":39578,"Ġrepeal":39579,">Main":39580,"ĠOrdered":39581,"_New":39582,"=\"\">\";Ċ":39663,"ĠSERVER":39664,"ĠHEADER":39665,"_velocity":39666,"ĠInvoke":39667,".timestamps":39668,"Ġsulf":39669,"IQUE":39670,"Ġinhabitants":39671,"phins":39672,"azzo":39673,"Ġmono":39674,"Legend":39675,"Ġnonce":39676,"IFE":39677,";\";Ċ":39678,"-create":39679,"\"\",Ċ":39680,"permit":39681,"ĠImmigration":39682,"Ġpathname":39683,"ffective":39684,"âĻĢâĻĢ":39685,"Ġexams":39686,"-event":39687,"ĠTill":39688,"[mid":39689,"FIX":39690,";color":39691,"(Order":39692,"_traits":39693,"ĠorderBy":39694,"Ġsunt":39695,"ĠNicholas":39696,"ز":39697,"Ġsunny":39698,"iners":39699,"Ġaccessibility":39700,"ĠHB":39701,".comp":39702,"ĉop":39703,"Ġminorities":39704,"etheus":39705,"Ġcollaborative":39706,"prit":39707,"HIR":39708,"Ġwraps":39709,"ĉdraw":39710,"god":39711,"ĠIX":39712,".apps":39713,"ĠNM":39714,"Ġirrelevant":39715,"ĠTigers":39716,"Ġdiag":39717,"GV":39718,"ĠAccessories":39719,"kont":39720,"Ġsimplify":39721,"ĠFavorite":39722,"_tools":39723,"([]);Ċ":39724,"Ġtowers":39725,"Bes":39726,"Ġhunter":39727,"Ġsalon":39728,"(buff":39729,"ĉdebug":39730,"Ġmalware":39731,"Moving":39732,"-options":39733,")+'":39734,"ĠLOVE":39735,"_SOCKET":39736,"_fin":39737,"ĠDelaware":39738,"Ġsheriff":39739,"-invalid":39740,"ĠFULL":39741,"Ġпод":39742,"elas":39743,"\"strings":39744,"ĠRepresentatives":39745,"surface":39746,"resolved":39747,"htdocs":39748,")):čĊ":39749,"Ġpressures":39750,"Ġnorms":39751,"Ġpla":39752,"Ġsurname":39753,"Ġpostal":39754,"ĠDepart":39755,"Ġslaughter":39756,"orida":39757,"Ġhebben":39758,"Ġdesar":39759,"compact":39760,"_LANG":39761,"åIJĪ":39762,"opoly":39763,"_rad":39764,"ĠSTDMETHOD":39765,"Lazy":39766,"ĠĠĠĉ":39767,"...,":39768,"(web":39769,"ĠPont":39770,"Ġetwas":39771,"Ġupward":39772,"_hat":39773,"Ġ],ĊĊ":39774,"ĠbaseUrl":39775,"Ġworrying":39776,"-addon":39777,"(getClass":39778,"SPI":39779,"Ġcapturing":39780,")},Ċ":39781,"Effects":39782,"Ġcompetent":39783,"Ġfoul":39784,"Ġsubscribing":39785,"ĠOBJECT":39786,"IXEL":39787,"bucks":39788,"(edge":39789,"(pass":39790,"ĠPeterson":39791,"Ġboobs":39792,"ĠDelay":39793,"_square":39794,"elim":39795,"oters":39796,"_PC":39797,"%E":39798,"onclick":39799,"ĠSVG":39800,"Ġtopped":39801,"Ġfist":39802,"smart":39803,"ĠRalph":39804,"(owner":39805,"jours":39806,"Ġbronze":39807,"ĠArgumentException":39808,"(original":39809,"_SCALE":39810,"_cp":39811,"Ġrecommends":39812,".setStyle":39813,"Sure":39814,"LAND":39815,"Ġrepeating":39816,"Matt":39817,".Visibility":39818,"Ġenterprises":39819,".Setup":39820,"(scene":39821,"ĠReactive":39822,"urge":39823,"bw":39824,".Put":39825,"persist":39826,".cookie":39827,"ĠAudi":39828,"`s":39829,"supplier":39830,"(Form":39831,"¡":39832,"_so":39833,"ĮĢ":39834,"ĠLegion":39835,"tte":39836,"Nd":39837,"Loss":39838,"(attrs":39839,".scatter":39840,"Ġgroom":39841,"Ġglimpse":39842,"Ġnails":39843,"Ġcumulative":39844,"Ġfazer":39845,"_services":39846,".Num":39847,"ibilit":39848,"_resolution":39849,"ĠTx":39850,"uminium":39851,"opa":39852,".schedule":39853,"smtp":39854,"à¸ķ":39855,"urry":39856,"ük":39857,"goog":39858,"_signature":39859,".into":39860,"ĠSteps":39861,"Ġhomeowners":39862,"ĠNSURL":39863,"ĠPAC":39864,"ĠĠĠĠĠĠĠĠĠĠĠĠĊĊ":39865,">')Ċ":39866,"enh":39867,"Ġincap":39868,"$MESS":39869,"Ġmoins":39870,"ĠFi":39871,"Ġoffseason":39872,"pressions":39873,">.Ċ":39945,"ĠGrass":39946,"ĠGoal":39947,"_pdf":39948,"Handlers":39949,"Ġstacks":39950,".getFullYear":39951,"=[];Ċ":39952,"车":39953,",V":39954,"(split":39955,"Ñĥнк":39956,"Ġbakeca":39957,"Ġ~/.":39958,"pez":39959,"tails":39960,"ĠGlen":39961,"ĠsetImage":39962,"ĠComic":39963,"BLOCK":39964,"ĉThis":39965,"oader":39966,"Ġcapitalist":39967,"_STEP":39968,"(Boolean":39969,"ĠCorrect":39970,"rina":39971,"Ġconcaten":39972,"å®ŀ":39973,"():ĊĊ":39974,"Ġunanim":39975,"lli":39976,"alars":39977,"-ne":39978,"Ġdivor":39979,"ĠKickstarter":39980,"]._":39981,"*'+":40622,"åĿĢ":40623,"acency":40624,"(URL":40625,"_half":40626,"=l":40627,"ĠlistView":40628,"(section":40629,".toArray":40630,"+/":40631,"ĠRodriguez":40632,"istream":40633,"Ġeligibility":40634,"::-":40635,".newInstance":40636,"PB":40637,"ĠAssets":40638,"ĠComposite":40639,"ĠLabs":40640,"ĠHamas":40641,"++);Ċ":40642,"Ġblk":40643,"ĠNeo":40644,"Luc":40645,"@login":40646,"Ġunaware":40647,".met":40648,"_RELEASE":40649,"(ST":40650,"AMIL":40651,"rike":40652,"Ġ(){Ċ":40653,"(sprintf":40654,"ĠAccounts":40655,"ĠVIEW":40656,"ĠAj":40657,"ãĤ°":40658,"Ġwhisk":40659,"Ġidi":40660,"Ġrode":40661,"Ġihn":40662,"ĠElementary":40663,"Qty":40664,"Ġintriguing":40665,"Ġå¤":40666,"Jobs":40667,"ĉoffset":40668,"ĠAhmed":40669,"ĠTaliban":40670,"Ġèİ·åıĸ":40671,"Ġinjected":40672,".Authentication":40673,"_linear":40674,".Decimal":40675,"Ġapples":40676,"Ġshareholders":40677,"Ġbaked":40678,".diff":40679,"ĠEddie":40680,"okers":40681,"Ġconfronted":40682,"voices":40683,"Ġtus":40684,"ĠSpin":40685,"NODE":40686,"_Un":40687,"CTX":40688,"/google":40689,"Temperature":40690,"Ġ'').":40691,"Ġmagnificent":40692,"ĠstartIndex":40693,"sembles":40694,"Anyone":40695,"zk":40696,"ehen":40697,"ĠDame":40698,".strict":40699,"Ġreplaces":40700,"Ġlineback":40701,"Ġpushes":40702,"Ġcheek":40703,"ĠShi":40704,"_BYTES":40705,"REA":40706,"ản":40707,"_CONNECTION":40708,"Gateway":40709,"ĠTravis":40710,"ĠAX":40711,"ĠBasically":40712,"ĠUpgrade":40713,"àª":40714,"themes":40715,"ermo":40716,"kor":40717,"Female":40718,"_attach":40719,"ĠìĤ¬ìļ©":40720,"Ġpoz":40721,"==============Ċ":40722,"(symbol":40723,"ĠSector":40724,"__)ĊĊ":40725,"_padding":40726,"ï¼ļ\"":40727,"Ġfabs":40728,"Ġranged":40729,"setName":40730,"Ġperror":40731,"âĹ":40732,"ĠFileReader":40733,"Ġfulfilled":40734,"_Current":40735,"Ġdominate":40736,"Ġsmugg":40737,"PostMapping":40738,"_force":40739,"Ġbloc":40740,"ĠGiant":40741,"(video":40742,"ĠCU":40743,"SystemService":40744,"Ġelf":40745,"Ġkontakt":40746,"ëª":40747,"kees":40748,"gtk":40749,"ĠparamInt":40750,"Ġmarkup":40751,"uales":40752,"Ġaccounted":40753,"Ġgangbang":40754,"RYPT":40755,"ĠWrong":40756,"Ġcredited":40757,"ĠMESSAGE":40758,"Ġflaws":40759,"Ġbbw":40760,"Ġmetabolic":40761,"ĠOEM":40762,"/event":40763,"(Collectors":40764,"monton":40765,"appear":40766,"Ġopted":40767,"Ġcheat":40768,"Ġdav":40769,"ĠProceed":40770,"Ġê¸":40771,"anked":40772,"из":40773,"ansk":40774,"ĠHang":40775,"ĠCler":40776,"Ġdisgu":40777,"Ġcmap":40778,".cljs":40779,"Ġaument":40780,"lez":40781,"ĠJoined":40782,"_received":40783,"Ġaerial":40784,"otel":40785,"Ġgreet":40786,"\"s":40787,"ĠGenesis":40788,"ĠCalif":40789,"panion":40790,"Ġtailored":40791,"mapping":40792,"andExpect":40793,".track":40794,"atomy":40795,"ĠOw":40796,"ullah":40797,".Yes":40798,"ĠSimpleName":40799,"dbh":40800,"'en":40801,"Ġnonsense":40802,"Ġphilosophical":40803,"(getContext":40804,"Ġisso":40805,"ĠACE":40806,"startDate":40807,"ĠbÄĻd":40808,"ĠAUTHOR":40809,"ĠGlobe":40810,"Ġinsects":40811,"_Al":40812,"ushing":40813,"è®°":40814,"/Home":40815,"ĠLocalDate":40816,"needed":40817,"hesive":40818,"Ġillusion":40819,"äºĮ":40820,"Ġtrat":40821,"xo":40822,"/detail":40823,"_MATCH":40824,"Ġbroadband":40825,"Ġwal":40826,"ĠIllegalStateException":40827,"IRECTION":40828,"Ġnortheast":40829,"esium":40830,"ĠCliente":40831,"ulance":40832,"nty":40833,"Ġtecn":40834,"Devices":40835,"Ġgrains":40836,"ĠOg":40837,"ĠSEL":40838,"udiant":40839,"Ġ++;Ċ":40840,"Ġexplanations":40841,"occo":40842,"Ġdiets":40843,"Ġcohort":40844,"(controller":40845,".Iterator":40846,"-rich":40847,"rocess":40848,"GD":40849,"Ġcarbohydr":40850,"Ġfried":40851,"ĠEmployment":40852,"ìŀ¥":40853,"ĠLeonard":40854,"_${":40855,"quares":40856,"Ġcompanions":40857,"Ġparis":40858,"Ġstimulation":40859,"ĠZoo":40860,"Ġrelevance":40861,"ĠColour":40862,"Ġspear":40863,"otional":40864,"ĠLite":40865,"ĠKosten":40866,"Ġó":40867,"_attachment":40868,"orphic":40869,"Ġdamit":40870,"Ġdlg":40871,"Ġthrive":40872,"CHANGE":40873,"ĠApparently":40874,"Ġatual":40875,"Ġrooted":40876,"(images":40877,"awi":40878,"ariat":40879,"Ġcherry":40880,"STATIC":40881,"mnt":40882,"ĠUserId":40883,"illet":40884,"ĠHispanic":40885,"Ġnak":40886,"Ġcentro":40887,"Ġdims":40888,"_initialize":40889,"ık":40890,"ĠCenters":40891,"REN":40892,"Ġevolutionary":40893,"ĠTopics":40894,"_damage":40895,"emer":40896,"Ġrund":40897,"Ġpunished":40898,"Ġcubic":40899,"fair":40900,"[];ĊĊ":40901,"Ġinstantiate":40902,"Ġoversee":40903,"-delete":40904,"unteer":40905,"startTime":40906,"ĠPipeline":40907,"_GAME":40908,"ĠCir":40909,"ĉNull":40910,".Formatting":40911,"ucumber":40912,"ĠRide":40913,"Ġzoo":40914,"Ġchecker":40915,"åIJĮ":40916,"=C":40917,"Ġgrit":40918,"\");//":40919,"_xy":40920,"ĠDeclaration":40921,"Ġcallable":40922,"Foo":40923,"ĠListItem":40924,"Ġinaccur":40925,"mlin":40926,"ĉData":40927,"Ġevolving":40928,"awan":40929,"Ġcafe":40930,"folk":40931,"_IDX":40932,"ĠAnything":40933,"ĠPalestine":40934,"ĠGridView":40935,"Ġcolony":40936,"ĠGermans":40937,"(+":40938,".pid":40939,".jsx":40940,"ĠSuperior":40941,"Christian":40942,"ĠLect":40943,"ĉGame":40944,"Ġinstrumental":40945,"Animations":40946,"дал":40947,"ĠMoses":40948,"ĉĉčĊĉĉčĊ":40949,"zs":40950,"kte":40951,"ä¸ļ":40952,"_DIST":40953,"bitmap":40954,"dB":40955,"Ġpersistence":40956,"ÑĢоÑģ":40957,"$l":40958,"Bron":40959,"Ġ{|":40960,"_chart":40961,"ĠConsum":40962,"Ġhemp":40963,"Ġ\"))Ċ":40964,"Ġattackers":40965,"Ġknowledgeable":40966,"Ġcet":40967,"Ġviruses":40968,"'I":40969,"Ġpitcher":40970,"Ġsweeping":40971,"=list":40972,"aptops":40973,".depth":40974,"Ġinstructed":40975,"ĠRus":40976,"benhavn":40977,"Ġин":40978,"Sports":40979,"Ġonset":40980,"æĿĥ":40981,".RED":40982,"_si":40983,"ĠPST":40984,".onChange":40985,">tag":40986,"ĠRoh":40987,"_character":40988,"ĠLaws":40989,"ĠBachelor":40990,"_swap":40991,".reactivex":40992,"Ġrewarding":40993,"Medium":40994,"-[":40995,"ĠRecently":40996,"Joint":40997,"partition":40998,"ĠMinutes":40999,"Ġindo":41000,"Ġabsorbed":41001,"ĠGN":41002,"_IND":41003,"Ġsaber":41004,"Spawn":41005,"outputs":41006,"ĠJeffrey":41007,"Ġmedieval":41008,"hed":41009,"Guide":41010,"Ġpsycho":41011,"Ġglam":41012,"Elim":41013,"ädchen":41014,"_plain":41015,"ĠSau":41016,"-four":41017,"Ġanalyzing":41018,"QUERY":41019,"Ġtomato":41020,"_buttons":41021,"VEN":41022,".setStatus":41023,".Url":41024,"+ĊĊ":41025,"Ġcomplaining":41026,"degree":41027,"confirmed":41028,"Ġsubt":41029,"parsed":41030,"Ġtorque":41031,"Ġtroubled":41032,"ĠTARGET":41033,"Ġtrademarks":41034,"ĠCoordinate":41035,"ĠViv":41036,"Ġ//}ĊĊ":41037,"Ġaprès":41038,".getPosition":41039,"(KeyCode":41040,"ĠSilva":41041,"Ġmeteor":41042,"Ġendorsement":41043,"Overview":41044,"ĠPoss":41045,".Inject":41046,"Ġevenly":41047,"Ġvisualization":41048,"Ġwchar":41049,"ĠHDMI":41050,"Ġfunct":41051,"ickname":41052,"','','":41053,"Ġforwards":41054,"ManagedObject":41055,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":41056,"ĉserver":41057,"ĠOutlook":41058,"ĠChronicle":41059,"Ġdubbed":41060,"Ġdok":41061,"ĠWear":41062,".AL":41063,"paren":41064,".Interface":41065,"Interfaces":41066,".cod":41067,"Ġdib":41068,".Globalization":41069,"ĠAcademic":41070,"Ġassms":41071,"Autom":41072,"Ġlw":41073,"ĠNW":41074,"Ġ&&čĊ":41075,"Ġproblema":41076,"ĠManufacturing":41077,"limits":41078,"-mobile":41079,"Ġfilme":41080,"/map":41081,"Ġdoit":41082,"ĠInk":41083,"Ġsued":41084,".arr":41085,"Ġundermin":41086,"ĠProc":41087,"crollView":41088,"__$":41089,"Ġsidewalk":41090,"(that":41091,"ื":41092,"[q":41093,"grammar":41094,"Ġtë":41095,"quito":41096,"Ġspiral":41097,"extended":41098,"Ġfocal":41099,"Ġdigging":41100,"pas":41101,"ĠTall":41102,".proxy":41103,"itures":41104,"TRACT":41105,"ĠRealm":41106,"Ġfeder":41107,"Ġoriented":41108,"ĠAlternative":41109,"Ġowe":41110,"Ġsourced":41111,"inker":41112,".det":41113,"Sep":41114,"ĠQui":41115,"ĠPalmer":41116,"(_,":41117,"samples":41118,"oyer":41119,"ullan":41120,"quez":41121,"Edges":41122,"Ġshout":41123,"ĠAchie":41124,"Ġhaar":41125,"_Construct":41126,"Ġpremature":41127,"Ġrevert":41128,"').Ċ":41129,"Ġschn":41130,"filtered":41131,"nullptr":41132,"Saved":41133,"itecture":41134,"CLA":41135,"Ġvl":41136,"stell":41137,"ĉMe":41138,"ĠLip":41139,"national":41140,"Ġwholly":41141,"Ġsprings":41142,".Timer":41143,"ĉsrc":41144,"elsen":41145,"åħ¶":41146,"Ġcommunicating":41147,"ĠQuiz":41148,"Ġteng":41149,"Ġgez":41150,"ĠOutside":41151,".Sign":41152,"(cs":41153,"Ġdisputes":41154,"ĠWeiss":41155,"annes":41156,">No":41157,"ĠBach":41158,".removeAll":41159,"refer":41160,"/dashboard":41161,"ĠAjax":41162,"IndexChanged":41163,"ĠWeak":41164,"'\"Ċ":41165,"Ġsights":41166,"accessToken":41167,"ĠJoi":41168,"(domain":41169,"ĉcv":41170,"Ġcontinuation":41171,"Ġplum":41172,"adir":41173,".setMessage":41174,"Ġï¼Į":41175,"Ġswallow":41176,"ĠLamp":41177,"Ġqw":41178,"Ġuu":41179,"Coin":41180,"ubic":41181,"ĠDeals":41182,"race":41183,"Ġdictator":41184,"Ġmeme":41185,"turned":41186,"ĠJulie":41187,".gridColumn":41188,"Ġpuppy":41189,"Ġpam":41190,"Ġ){čĊ":41191,"Ġinviting":41192,"Ġfrench":41193,"vim":41194,"Ġwrapping":41195,"Ġ#-}Ċ":41196,"([-":41197,"Early":41198,"Ġshiny":41199,".faces":41200,"Ġrebell":41201,"abcdef":41202,"ält":41203,"Ġestimation":41204,"phys":41205,"losures":41206,"_REL":41207,"Ġexclusion":41208,"ĠSkype":41209,"weise":41210,"-stop":41211,"nothing":41212,"ĠEgg":41213,"isors":41214,"Richard":41215,"Ġcounseling":41216,"Ġcommem":41217,"ĠQMessageBox":41218,"ĠSynd":41219,"ĠFrost":41220,"ĠCompetition":41221,"ĠAwake":41222,"Ġted":41223,"iciones":41224,"ĠDevComponents":41225,"VERTISEMENT":41226,"otti":41227,".runner":41228,"Ġuniquely":41229,".flag":41230,"ĉrs":41231,"_generic":41232,"Ġ```Ċ":41233,"ACHINE":41234,"Ġmein":41235,"(Application":41236,"(br":41237,"Ġratios":41238,":,":41239,"ĠXCTest":41240,"ustainable":41241,"-www":41242,"itles":41243,"_TEMP":41244,"Ġsyst":41245,"umericUpDown":41246,"ĉassertTrue":41247,"Ġwf":41248,".peek":41249,"ĠBulg":41250,"Ġterrifying":41251,".MODE":41252,"ĠGW":41253,"ár":41254,"Ġfic":41255,"Ġcommitments":41256,"-tech":41257,"ĠLiquid":41258,"opez":41259,"zheimer":41260,"aña":41261,"-media":41262,"(animated":41263,"_goal":41264,"Ġgum":41265,"ystone":41266,".SET":41267,"ĠWend":41268,"setCellValue":41269,"Ġmsgs":41270,"cash":41271,"ALLOC":41272,"/aws":41273,"Ġmicrowave":41274,".Pointer":41275,"ĉConsole":41276,"_sorted":41277,"ĠFilip":41278,"Prod":41279,"Ġ//!<":41280,"ingroup":41281,"Ġks":41282,"_TRI":41283,"Ġteaspoon":41284,"ĠATT":41285,"Ġrecovering":41286,"ĠGLOBAL":41287,".Par":41288,"Ġ/>;Ċ":41289,"Ġmarble":41290,"ulators":41291,"ĠCycle":41292,"Ġherbs":41293,"_metric":41294,")!":41295,"_CLOCK":41296,"_Button":41297,"Harry":41298,"è¿Ľ":41299,"Ġstrains":41300,"ĠAppBar":41301,"ĠChan":41302,"/video":41303,"Ġbam":41304,".Progress":41305,"$f":41306,"lemen":41307,"Ġirregular":41308,"ĠDuncan":41309,"ĠMint":41310,"-video":41311,"া":41312,"ówn":41313,"ĠEMPTY":41314,"Ġstacked":41315,"ĠHA":41316,"_cut":41317,"Ġwherein":41318,"ĠWays":41319,"(counter":41320,"è¯ķ":41321,"FormGroup":41322,"Ġblew":41323,"courses":41324,"Ġproductos":41325,"rys":41326,"ĠRestr":41327,"Ġstyling":41328,">s":41329,"Ġpiv":41330,"Ġitertools":41331,"getRepository":41332,"ĠIk":41333,"_devices":41334,"layui":41335,"Ġhalfway":41336,"Ġfranç":41337,"Ġtuning":41338,"OA":41339,"_Node":41340,"arde":41341,"Ġfierce":41342,"licted":41343,"#čĊ":41344,"Ġbreakthrough":41345,"ĠErik":41346,"Ġbride":41347,"Ġ.\"":41348,"culus":41349,"inside":41350,"ĠIndianapolis":41351,"ĠEE":41352,"Ġyog":41353,"urret":41354,".fs":41355,".grad":41356,"_cards":41357,"_accuracy":41358,"_epi":41359,"queda":41360,"/org":41361,"éªĮ":41362,"Ġcompte":41363,"))[":41364,"Outside":41365,"Greater":41366,"ĠRenderer":41367,".actor":41368,"Accounts":41369,"Idle":41370,"_hours":41371,"erner":41372,"Joined":41373,"Ġmenj":41374,"requires":41375,"ĠOPER":41376,".removeChild":41377,"ĉsp":41378,"Ġesse":41379,"rift":41380,"xFE":41381,"ĠShakespeare":41382,"____________":41383,"Ġbudgets":41384,"ModelState":41385,"fillable":41386,"-component":41387,"ocos":41388,"ĠBUTTON":41389,"/io":41390,",out":41391,"sms":41392,"Thomas":41393,"ĠArmed":41394,"resume":41395,"Ġrotating":41396,"ĠVault":41397,"Ġseus":41398,".(*":41399,"Ġamino":41400,"Ġ[]);ĊĊ":41401,"Ġprovoc":41402,"nox":41403,".GetEnumerator":41404,"=======Ċ":41405,"æĸĻ":41406,"_scroll":41407,"Ġfilmed":41408,"ĠSoci":41409,"gap":41410,"gro":41411,"Vote":41412,"\"But":41413,"_RC":41414,"Animal":41415,"ÂĢ":41416,"ibile":41417,"Ġawaken":41418,"orest":41419,"inja":41420,"ĠIvan":41421,"(Command":41422,"Ġ*****":41423,"η":41424,"Ġkvinder":41425,"/helpers":41426,"_cases":41427,"tg":41428,"ìĦ¸":41429,"Registered":41430,"ĉpass":41431,"_digits":41432,"Ġcontour":41433,"Ġinfants":41434,"Ġjustification":41435,"ĠFortunately":41436,"Contr":41437,"ĠonCreateView":41438,"_SAMPLE":41439,"ĠallowNull":41440,"Ġnud":41441,"Ġfetched":41442,"_equ":41443,"ĠUnable":41444,"=\\\"\"":41445,">{Ċ":41446,"Ġcommittees":41447,"istema":41448,"+\".":41449,"ÃŃan":41450,"mant":41451,"Ġsoutheast":41452,"ï¼ĮĊ":41453,"dialogs":41454,"PROJECT":41455,"charger":41456,"-port":41457,"(uuid":41458,".export":41459,"Six":41460,"ĠRP":41461,"Prem":41462,"Ġconscience":41463,"ĠmarginRight":41464,"_distribution":41465,"yaml":41466,"resizing":41467,"Dock":41468,"ĠLocations":41469,"GY":41470,"Seed":41471,"BUFFER":41472,"ossip":41473,"ullen":41474,"Things":41475,"-self":41476,".poll":41477,"PLAYER":41478,"Ġå®":41479,"GROUP":41480,"ĠAway":41481,"Ġgospel":41482,"xfd":41483,"Mary":41484,"ĠPortable":41485,"TURE":41486,"Ġutilis":41487,"Ġseit":41488,"Ġstrand":41489,"Ġtransc":41490,"Ġ(^":41491,"ĠAlfred":41492,".mem":41493,".circle":41494,"Ġ~/":41495,"forcing":41496,"Ġriot":41497,"prox":41498,"THON":41499,"ización":41500,"ĠNI":41501,"rost":41502,"Ġdispro":41503,"_instances":41504,"ï¼ĮâĢľ":41505,"ographer":41506,"endas":41507,"ĠIsaac":41508,"ĠPine":41509,"/dis":41510,"ĠcolorWith":41511,"iterate":41512,"_stride":41513,"Ġpunto":41514,".EventArgs":41515,"(center":41516,"Ġneighboring":41517,"ĠPrison":41518,"ĠMessenger":41519,"Ġepidemic":41520,"dao":41521,"_complex":41522,"Ġgravel":41523,"_DIP":41524,"ément":41525,"ĠAri":41526,"_bitmap":41527,".quit":41528,"(valid":41529,"Ġpend":41530,"Ġrespiratory":41531,"Ġrebound":41532,"DefaultValue":41533,"ãĥŃ":41534,"Ġcommits":41535,".tests":41536,"_fr":41537,"itet":41538,".sf":41539,"Ġspacecraft":41540,"critical":41541,"Ġdepressed":41542,"ĠAnyObject":41543,"Ġunb":41544,"Ġdiscern":41545,"(mysql":41546,"Latin":41547,"ĠBog":41548,"ĠWildlife":41549,"ToFile":41550,"ioxid":41551,"@RestController":41552,"Ġ\"$(":41553,"Ġ<<\"":41554,"Ġdefects":41555,"Ġdatum":41556,"hin":41557,"Ġrealizar":41558,"anyahu":41559,"ĠSig":41560,"@Data":41561,"adaptive":41562,"ĠCatherine":41563,".cr":41564,"ĠCOOKIE":41565,"Ġpictured":41566,"ĠFighter":41567,"Queryable":41568,"ĠAnyway":41569,"ĠGLFW":41570,"_namespace":41571,"_ft":41572,"Ġ])":41573,"Organization":41574,"Ġconstitutes":41575,"Ġquand":41576,"(chunk":41577,"\"/>čĊ":41578,"ĠLakes":41579,"mainwindow":41580,"Carthy":41581,"spin":41582,"(csv":41583,":red":41584,"-commerce":41585,"ู":41586,"Ġdiscovering":41587,"Ġeco":41588,"_fac":41589,"inceton":41590,"ĠGreens":41591,"jwt":41592,"ص":41593,"ĠBroncos":41594,"ĠGoods":41595,"(GTK":41596,"ĠreturnValue":41597,"Ġsiempre":41598,"Ġneutr":41599,"went":41600,"ĠNatal":41601,"Ġenthusiastic":41602,"á»į":41603,"FN":41604,"/database":41605,"Catalog":41606,"Ġbrun":41607,"ĠKash":41608,"_Pl":41609,"iscrim":41610,",width":41611,"Ġinmates":41612,"Assignment":41613,"ĠHaven":41614,"Ġplayground":41615,"exam":41616,"@Controller":41617,"uliar":41618,".getParent":41619,"Ġ\";ĊĊ":41620,":size":41621,"issors":41622,"Ġfis":41623,"Ġalc":41624,"ensation":41625,"ĠNixon":41626,"Ġmighty":41627,"-str":41628,"_special":41629,"_ADC":41630,"ĠTwig":41631,"umbling":41632,"-address":41633,"Ġheroin":41634,"YTE":41635,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ":41636,"Friend":41637,"Ġave":41638,"ĠPNG":41639,"ĠKurdish":41640,"DataSetChanged":41641,"Ġblades":41642,"bral":41643,"Steam":41644,"Ġsigu":41645,"IRTUAL":41646,"acos":41647,"UDP":41648,"(database":41649,"hec":41650,"ĠStrings":41651,"_scalar":41652,"ĉdesc":41653,"ĠTLS":41654,";\"Ċ":41655,"ĠCorbyn":41656,"SimpleName":41657,"uell":41658,"ĠEntre":41659,"ellites":41660,"-place":41661,"Ġfrankly":41662,"ĠErf":41663,"CEL":41664,"ĠpaÃŃs":41665,"Ġhedge":41666,"Ġlatent":41667,"ĠIRQ":41668,"ĠHerald":41669,"ĠPrec":41670,"ë³´":41671,".TEXT":41672,"Salary":41673,"Ġautumn":41674,"Ġtravail":41675,".Sum":41676,"Ġcared":41677,"Mor":41678,"Ġintuitive":41679,"Ġjournals":41680,"_IT":41681,"ĠTrou":41682,"ä¼ł":41683,"HasColumnName":41684,"Composite":41685,"Ġspice":41686,"_disk":41687,"_CODES":41688,"ĠIntroduced":41689,"iona":41690,"Ġnuestra":41691,"oct":41692,"ĠĠĠĠĊĠĠĠĠĊĠĠĠĠĊ":41693,"(parameter":41694,"Ġstudios":41695,"ĠprojectId":41696,"Ġbdsm":41697,".SqlClient":41698,"imizer":41699,"ĠCARD":41700,"+t":41701,"aan":41702,".sol":41703,"_Adjust":41704,"Ġrighteous":41705,"ĠLogging":41706,".filters":41707,"_TAB":41708,"ĉsys":41709,"rophic":41710,"otherapy":41711,"ĠBrowse":41712,"keyboard":41713,"RON":41714,"+\\":41715,"ropped":41716,"Ġextensively":41717,"fk":41718,"Ġlime":41719,"years":41720,"Exc":41721,"Ġsph":41722,"Ġcheating":41723,"andro":41724,"ÃŃo":41725,"Ġprince":41726,"oire":41727,"ĠDestination":41728,"ĠConverts":41729,"Ġupstream":41730,"oled":41731,"Ġservants":41732,"Ġsemantic":41733,"Ġcrunch":41734,"Ġeventual":41735,"runner":41736,"/error":41737,"Spin":41738,"Ġsecretly":41739,"Ġassemble":41740,".Person":41741,"enderror":41742,"_<":41743,"Ġpendant":41744,"Sleep":41745,"ĠChemistry":41746,"Ġbosses":41747,"lk":41748,"))),Ċ":41749,"Blockly":41750,"DEVICE":41751,"Ġreflecting":41752,"Ġample":41753,"Milliseconds":41754,"ĠPresidential":41755,"Ġusuarios":41756,"ĠNZ":41757,"ĠSalary":41758,"ĠAmanda":41759,"_np":41760,"jury":41761,"Ġkön":41762,"Ġtherapist":41763,"Ġhomosexual":41764,"ĠDrake":41765,"-window":41766,"ĠLocated":41767,".Driver":41768,"ĠVIDEO":41769,"Ġmerchants":41770,"ĠChest":41771,"-lock":41772,"/php":41773,"Ġmilano":41774,"_STYLE":41775,"arger":41776,"idea":41777,"GUID":41778,"advanced":41779,"meal":41780,"OptionsItemSelected":41781,"='%":41782,"ĠCham":41783,":data":41784,"(stat":41785,"WillAppear":41786,"Ġinformal":41787,"aji":41788,"Ġreproductive":41789,"ĠCAS":41790,"ãģ£":41791,"FUNC":41792,"ĠRuth":41793,")+(":41794,"CONST":41795,"ĠFans":41796,"ĠgroupId":41797,"xffffffff":41798,"Ġsampler":41799,"Ġ}}\">":41800,".the":41801,"Ġhollow":41802,"WAY":41803,"ĠFaculty":41804,"AttributedString":41805,"ĠLooks":41806,"ĠRex":41807,"jk":41808,"ĠMIL":41809,"Ġbard":41810,".Long":41811,"Ġlivest":41812,"Ġskal":41813,"icism":41814,"MAIN":41815,"Ġmucho":41816,"BODY":41817,"Ġese":41818,"ĉuse":41819,"Foot":41820,".SQLException":41821,"Ġinheritance":41822,"received":41823,"Ġputas":41824,"edis":41825,"alsa":41826,"ĠErrorMessage":41827,"Booking":41828,"Ġtract":41829,"acz":41830,"ĠCant":41831,"_regex":41832,"Ġideological":41833,"Ġjihad":41834,"hos":41835,"/sys":41836,"colm":41837,"(pool":41838,"Ġestán":41839,"ĠPending":41840,"emás":41841,"Ġktóry":41842,"));ĊĊĊ":41843,"transactions":41844,"Ġwield":41845,"itere":41846,"erture":41847,"_ss":41848,"Ġstretching":41849,"Ġprisoner":41850,".ReadAll":41851,"Ġbesch":41852,"--;čĊ":41853,"Ġcrisp":41854,"_SCAN":41855,"Ġae":41856,"Strict":41857,"ĠMinneapolis":41858,"ĠBoeing":41859,"aris":41860,"rek":41861,"_pipe":41862,"Ġpriests":41863,"(EIF":41864,"ehicles":41865,"ĠInteractive":41866,"between":41867,"ĉNullCheck":41868,"ĠBlair":41869,"ĠLt":41870,"_inline":41871,"ethyl":41872,"¼":41873,"_packages":41874,"Ġbarrels":41875,"_he":41876,"Ġregexp":41877,"_pts":41878,"_Handler":41879,"ingular":41880,"ĠNissan":41881,"ĠRanch":41882,"Ġperch":41883,"Unsupported":41884,"Smith":41885,"ĠLegends":41886,"Mi":41887,"Ġgf":41888,"steder":41889,"Ġacquiring":41890,"Ġsimulator":41891,"(),\"":41892,"receive":41893,"Ġinplace":41894,"ACTION":41895,"ĠWebDriver":41896,"filesystem":41897,"'+Ċ":41909,"Ġcredible":41910,"amat":41911,"playing":41912,".setImageResource":41913,"quel":41914,"Ġpodr":41915,"geom":41916,"Ek":41917,"ĠQatar":41918,"Ġgeld":41919,"?',Ċ":41920,"Ġcyl":41921,"(ax":41922,"ĠWI":41923,"urally":41924,"ĠBrasil":41925,"Ġsenza":41926,"aley":41927,"onen":41928,"Ġbah":41929,"Ġmolecule":41930,"Rad":41931,"è¿°":41932,"ANCH":41933,"-background":41934,"-agent":41935,"Ġprolifer":41936,":boolean":41937,"Ġtide":41938,"erializer":41939,"_;čĊ":41940,"Fee":41941,"**)":41942,"ergy":41943,"ĠHonor":41944,".Logging":41945,"iris":41946,"Ġundermine":41947,"ĠDy":41948,"Ġtyr":41949,"Ġdeque":41950,"Ġdamer":41951,"([])Ċ":41952,".layoutControlItem":41953,"peated":41954,"CAN":41955,"ragments":41956,"Land":41957,")]);Ċ":41958,"ĠSah":41959,"ĠDECL":41960,"Within":41961,"ĠNamespace":41962,"another":41963,"sembling":41964,".describe":41965,"Consum":41966,"ĠFear":41967,"given":41968,"Orange":41969,"This":41993,"ĠdataIndex":41994,"Ġprintable":41995,"ĠEyes":41996,"_targets":41997,"(Py":41998,".over":41999,"Ġbru":42000,"ampton":42001,"Ġplaintiff":42002,");Ċ":42013,"invest":42014,".*ĊĊ":42015,"Ġtélé":42016,"Ġsuperf":42017,"Ġcascade":42018,"DTD":42019,"Ġvivid":42020,"Ġsubsidies":42021,"ĠHass":42022,"Ġcollaps":42023,"Ġceramic":42024,"{}\".":42025,"ĠLeakage":42026,"-trash":42027,"collapsed":42028,"-social":42029,"ĠChad":42030,"Ġinclined":42031,"Ġsto":42032,"Ġstoryboard":42033,".payment":42034,"stackoverflow":42035,"ĠRaiders":42036,"Ġ#'":42037,"olicies":42038,"ìľ¼ë¡ľ":42039,"emap":42040,"Ġkj":42041,"Ġquota":42042,"ĠGardens":42043,"ë²Ī":42044,"ĠAngels":42045,"Ġoft":42046,"Ġlowercase":42047,"ĠiParam":42048,"Ġcheapest":42049,"unta":42050,"_pkt":42051,"icators":42052,"Ġleurs":42053,"Ġdecreases":42054,"ĉdefine":42055,"PREC":42056,"ammers":42057,"ĠPreparedStatement":42058,"(direction":42059,"Ġcrews":42060,"arked":42061,"ĠMemphis":42062,"ĠSell":42063,"GTK":42064,"Ġmaid":42065,":disable":42066,"éĽĨ":42067,"ĠPf":42068,"Ġalbeit":42069,"openh":42070,"?>\">Ċ":42071,".getSource":42072,"(scale":42073,"Du":42074,"ĠPIL":42075,"_refresh":42076,"Ġbets":42077,"(car":42078,"ĠVon":42079,"|--------------------------------------------------------------------------Ċ":42080,"ĠGrat":42081,"Much":42082,"(Dialog":42083,".stopPropagation":42084,"Ġtek":42085,"Ġexits":42086,"'],$":42087,"ĠphoneNumber":42088,"ucs":42089,"ecimal":42090,"--------------":42091,"inp":42092,".pojo":42093,"Ġcorpus":42094,"Ġpractitioners":42095,".pic":42096,"\"testing":42097,"ĠstringBy":42098,".NotNull":42099,"Ġrang":42100,".Dynamic":42101,"_Render":42102,"аÑĤа":42103,"Waiting":42104,"ĠWik":42105,"Ġoverwhelmed":42106,"%\">":42107,"ĠAE":42108,"}}>Ċ":42109,"uw":42110,"_typ":42111,"Ġbuckets":42112,"Ġgreeting":42113,"Ġlaughter":42114,"Ġantagon":42115,"uggestion":42116,"-email":42117,"ĉtop":42118,"Ġeros":42119,"_tri":42120,"Ġissuing":42121,"Ġhá":42122,"Ġisolate":42123,"Overflow":42124,",E":42125,"Ġnutritional":42126,"ĠAbbott":42127,"Ġnf":42128,".touch":42129,".fetchall":42130,"_zip":42131,"\")}Ċ":42132,"Ġamat":42133,"ĠCisco":42134,"ĠnÃ¥":42135,"PLEX":42136,"Ġsei":42137,"foto":42138,".toJson":42139,"å¤ļ":42140,"ĠKlein":42141,"Ġlibc":42142,"Ġminers":42143,"å¢":42144,"-print":42145,"ĠPride":42146,"Todos":42147,"Ġmasked":42148,"ĠsetData":42149,"Ġtelefon":42150,"Ġunhappy":42151,"ĠTables":42152,"geb":42153,"(debug":42154,"_allowed":42155,"-access":42156,"Ġlogistics":42157,"Ġgems":42158,"ĠMature":42159,"Ġrsp":42160,"ĠAlle":42161,".getBytes":42162,"\\web":42163,"ynchronized":42164,"Paragraph":42165,"Ġthrottle":42166,".sqlite":42167,"consulta":42168,"ĠSeah":42169,"Ce":42170,"Ġsubmar":42171,"ERE":42172,"Vous":42173,"Ġreddit":42174,"Ġsqlalchemy":42175,"-mile":42176,"ocide":42177,"Pour":42178,"}}\">Ċ":42179,"stead":42180,"Ġ@(":42181,"Ġ[])":42182,"ĠAds":42183,"Ġoverload":42184,"ridden":42185,"ĠDesert":42186,"ĠWrap":42187,"ĠPortuguese":42188,"etz":42189,"ĉfirst":42190,"Ġmilestone":42191,"æĹł":42192,"ÑĥÑī":42193,"(success":42194,"\")Ċ":42363,"ĠDollar":42364,"Ġemoji":42365,"Carousel":42366,"-player":42367,"Ġadjusting":42368,"Ġjuga":42369,"allenges":42370,"gene":42371,"(bodyParser":42372,"lopedia":42373,"ĠBehind":42374,"Ġsleeves":42375,"Ġdragging":42376,"ĠChevrolet":42377,"Ġbiz":42378,"ivities":42379,"ĠFrequency":42380,",char":42381,".WHITE":42382,"_preview":42383,")';Ċ":42384,"_ax":42385,"IONS":42386,".cpu":42387,".inputs":42388,"UBE":42389,"_feed":42390,"ĠSupplement":42391,"!).":42392,"esus":42393,"ĠUDP":42394,"Ġmicrophone":42395,"Ġconfirms":42396,".isNotEmpty":42397,"\":\"\",Ċ":42398,"_SCREEN":42399,"ĉexpected":42400,"+-+-+-+-":42401,"ĠHait":42402,"fastcall":42403,"Ġdepict":42404,"vb":42405,"_picture":42406,"ĉdescription":42407,"ĠWife":42408,"uci":42409,"Ġvicious":42410,"ä»ĸ":42411,"ueba":42412,"ĠsetUser":42413,"ãģ¡":42414,"Ġdiving":42415,"Ġopera":42416,"usercontent":42417,"arah":42418,")},":42419,"yun":42420,"velt":42421,"Ġuncovered":42422,"Ġhips":42423,"Ġoscill":42424,"Ġasserting":42425,"ĠXi":42426,".restore":42427,"kea":42428,"Ġspelling":42429,"Ġderive":42430,"abwe":42431,"ĠDow":42432,".setType":42433,"_vs":42434,"Ġcozy":42435,".categories":42436,"Org":42437,"_mgr":42438,"Ġdungeon":42439,"collectionView":42440,"ĠBlank":42441,"acias":42442,"ää":42443,"_cleanup":42444,"_ACTIVITY":42445,"Ġtriangles":42446,".MenuItem":42447,"Ġiphone":42448,"ĠWon":42449,"]]ĊĊ":42450,"ĠComparison":42451,".Doc":42452,"Ġcanonical":42453,"ĠSudan":42454,"'){":42455,"UpInside":42456,"builtin":42457,"ENCY":42458,"xbe":42459,"Ġchuck":42460,"Ġcontradict":42461,"Ġnuestro":42462,"Ġarchitectural":42463,"ĠFib":42464,"Ġcompares":42465,"*k":42466,"Cfg":42467,"çĦ¡":42468,"nten":42469,"Matches":42470,"ĠDOWNLOAD":42471,"_HANDLER":42472,"management":42473,"[S":42474,"ENG":42475,"ÂĢÂ":42476,"fang":42477,"Ġslipped":42478,"ĠLanka":42479,"escaping":42480,"Ġtackles":42481,"ĠPedro":42482,".Prop":42483,".''":42484,".Generated":42485,".NewGuid":42486,"atrigesimal":42487,"illon":42488,"Ġstatistic":42489,"species":42490,"holding":42491,"Drupal":42492,"Ġfundamentally":42493,"Ġbondage":42494,"Ġresolutions":42495,"InlineData":42496,"\\Type":42497,"estion":42498,".wrap":42499,"Ġwarriors":42500,"ĠLOCAL":42501,"Archive":42502,"Ġembraced":42503,"á»§":42504,".Ver":42505,"ĠAffordable":42506,"olesale":42507,"ĠApplied":42508,"ĠConversion":42509,"mega":42510,"_cam":42511,"Ġceremon":42512,"aurus":42513,"ĠVolk":42514,".opens":42515,"/about":42516,"ĠStd":42517,"journal":42518,"()){čĊ":42519,",\"\\":42520,"(Arrays":42521,"ĠDense":42522,"aseña":42523,"änner":42524,"/stat":42525,"userData":42526,"Ġgerman":42527,"Ġtz":42528,"worthy":42529,"FormatException":42530,"pherd":42531,"Ġsmiles":42532,"ĠWhenever":42533,"(adapter":42534,".badlogic":42535,"Ġbriefing":42536,".GridColumn":42537,"-char":42538,"dimension":42539,"ĠCopper":42540,"Ġninth":42541,"Ġ'{{":42542,"Ġrav":42543,"_Table":42544,"Ġderivatives":42545,"ĠRaise":42546,"ĠFut":42547,"armor":42548,"-padding":42549,"Ġremin":42550,"ĉstyle":42551,"ĠMembership":42552,"Ġspreads":42553,"Ġgalleries":42554,"ĠClarke":42555,"Ġconception":42556,"minute":42557,"Ġabusive":42558,"_adj":42559,"Ġterrific":42560,"Ġovert":42561,"ourcing":42562,"Ġentrada":42563,"levels":42564,"Ġcritique":42565,"Ġrespects":42566,"ĠMMA":42567,"iene":42568,"Ġencaps":42569,"ĠRaymond":42570,"Divider":42571,"ivable":42572,"baz":42573,"Ġ@_;Ċ":42574,"ĠClaire":42575,"Ġurging":42576,"CEE":42577,"Ġtransformer":42578,"discord":42579,"ĠJourney":42580,"tos":42581,"Ġcompetitions":42582,"ĠOBJ":42583,"ĠBis":42584,"Ġrelaxation":42585,"idy":42586,"_INSTANCE":42587,"ĠPref":42588,"dados":42589,"iciencies":42590,"ĠMediaQuery":42591,"ĠCube":42592,"ĠStrange":42593,"gpu":42594,"(days":42595,"_InitStruct":42596,"Ġfingerprint":42597,"emat":42598,"ĠGecko":42599,"Ġrails":42600,"ĠLum":42601,"straction":42602,"igung":42603,"(movie":42604,"_dictionary":42605,"_interrupt":42606,"ĠQC":42607,"iked":42608,"appendChild":42609,"recipient":42610,"ré":42611,"Ve":42612,"Ġtowel":42613,".lastIndexOf":42614,"Ġplacebo":42615,"ĠWie":42616,".esp":42617,"(Debug":42618,"operative":42619,"Ġdeceased":42620,"&id":42621,"ĉmutex":42622,"elic":42623,"Ġbapt":42624,"ĉčĊčĊ":42625,"Ġfarther":42626,"Half":42627,".disable":42628,".menuStrip":42629,"leccion":42630,"ĠresultCode":42631,"Ġcans":42632,"-election":42633,"female":42634,"_FIX":42635,"ausible":42636,"ĠPOWER":42637,"Ġreconstruction":42638,"Ġscans":42639,".XtraBars":42640,"âĢĺs":42641,"Removed":42642,"Ġparagraphs":42643,"_margin":42644,"Ġlymph":42645,"Ġbos":42646,"lington":42647,"ĠBaptist":42648,"Ġadvertisements":42649,"ĠManage":42650,"/yyyy":42651,"IOUS":42652,"ENCES":42653,"ĠFiction":42654,"ĉmenu":42655,"ĠFileOutputStream":42656,"ovan":42657,"ĠFeng":42658,"Ġskipping":42659,"getClass":42660,"anni":42661,"Ġrebounds":42662,"Ġpublicity":42663,"Ġingres":42664,"usement":42665,"Ġthoughtful":42666,".Chart":42667,"Ġhatte":42668,"passport":42669,"Ġhooked":42670,"ĠLens":42671,"Ġflagship":42672,"Ġstip":42673,"ĠGEN":42674,"Ġclues":42675,"ipv":42676,"ĠRise":42677,"ĠGew":42678,"tablename":42679,"Ġforemost":42680,"_validate":42681,"_analysis":42682,"olla":42683,"Ġqualifications":42684,"Ġdistributions":42685,"ĠFlower":42686,"Ġtense":42687,"Ġthankful":42688,"Ġclutch":42689,"Ġunified":42690,"roads":42691,"Ġsiti":42692,"Ġstall":42693,"_PRIORITY":42694,"cstdlib":42695,"_USERNAME":42696,".bytes":42697,"?page":42698,"ermalink":42699,"ĠVeget":42700,"/vnd":42701,"-author":42702,".NONE":42703,"ĠConcurrent":42704,"ĠCry":42705,"Ġstarters":42706,"ĠInteraction":42707,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":42708,"ĠLEVEL":42709,"Ell":42710,"ĠcomboBox":42711,"ĠTheresa":42712,"tek":42713,"_Handle":42714,"Ġaby":42715,".gdx":42716,",end":42717,"(Local":42718,"Ol":42719,"knife":42720,"arial":42721,"ĠHoff":42722,"Ġprostituerade":42723,"Doctor":42724,"Instances":42725,".SetValue":42726,"ĉfrom":42727,"Ġluxurious":42728,"Indent":42729,"Allocator":42730,"_DRAW":42731,"(\",\",":42732,"ĠFrances":42733,"ĠgroupBox":42734,"(schema":42735,"Printf":42736,"ORIES":42737,"-gradient":42738,"Ġreput":42739,"arin":42740,"_DONE":42741,"incre":42742,"ignty":42743,"Ġexert":42744,"Ġ-.":42745,"/App":42746,"-through":42747,"Ġdeclining":42748,"Ġdessert":42749,"Ġincumb":42750,"Ġdesignation":42751,".PORT":42752,",strong":42753,"Ġsandbox":42754,"Ġwines":42755,"ĠPav":42756,"$str":42757,"askell":42758,"Ġhö":42759,"ĠPY":42760,"GetInstance":42761,"TextInput":42762,"gameObject":42763,"/events":42764,"createdAt":42765,"ĠlocalVar":42766,"ĠWHITE":42767,"pered":42768,"ilege":42769,"efficient":42770,",color":42771,"cate":42772,"ĠCafe":42773,"Ġsimilarities":42774,"Ġpumps":42775,"ĠHungary":42776,".Username":42777,"Ġskate":42778,"Ġtouchdowns":42779,"Ġaccelerate":42780,"ĠHelen":42781,"OMEM":42782,"ĠKun":42783,"_vol":42784,"ĠfindAll":42785,"ĠMenschen":42786,"ahead":42787,");\"":42788,"kommen":42789,"Ġpossessed":42790,".argmax":42791,".transition":42792,"ARP":42793,"OLUME":42794,"(script":42795,"ĠÐĺ":42796,"ĠFinding":42797,"onces":42798,"Io":42799,"Bold":42800,"Ġrenewal":42801,"_DIALOG":42802,"Ġdisreg":42803,"INTERN":42804,"Ġtoute":42805,"Ġelectr":42806,"ĠGross":42807,"ĉtrue":42808,".Fields":42809,"ĠWIDTH":42810,"ĠDent":42811,"ĠÃģ":42812,"NSNotification":42813,"Ġaos":42814,"Ġmelee":42815,".Validation":42816,"ĠDEC":42817,"-dependent":42818,"Ġsuic":42819,"Traits":42820,"$message":42821,"ĠDear":42822,"ĉFILE":42823,"languages":42824,".Prot":42825,".addr":42826,"-generation":42827,"ICON":42828,"Ġtransplant":42829,"-description":42830,"Ġchasing":42831,"Ġchees":42832,"Ġ}*/Ċ":42833,"Trad":42834,"queries":42835,"/widgets":42836,"subpackage":42837,"Ġespec":42838,"Ġcracked":42839,"Ġcompetitor":42840,"Purchase":42841,"-team":42842,"olecular":42843,"orThunk":42844,"&P":42845,"Ġrelent":42846,"/#{":42847,"ĠproductId":42848,"Ġè¾":42849,"ĠLav":42850,"ĠAlter":42851,".Mode":42852,"ADIO":42853,"grp":42854,"æ·»åĬł":42855,"Quit":42856,"Ġdepths":42857,"-category":42858,"ĠDATABASE":42859,"SPELL":42860,"ĠFalcon":42861,"ĠQStringList":42862,"Ġ''.":42863,"ĠInstitution":42864,"damage":42865,"azor":42866,"belongsTo":42867,"verages":42868,"ĠNONE":42869,"ippets":42870,",\\Ċ":42871,"Ġfootprint":42872,"_archive":42873,"nak":42874,".getField":42875,"ĠReflection":42876,"Ġ']":42877,"ĠHBO":42878,"_discount":42879,"Ġincest":42880,"ĠDodge":42881,"ĠWade":42882,".NO":42883,"\"encoding":42884,"ĠBlockchain":42885,"Ġlawsuits":42886,"ĠMaint":42887,"chten":42888,"Ġétait":42889,"Ġktóre":42890,"_ctl":42891,"(timer":42892,"Battle":42893,"izo":42894,"ayed":42895,"IOR":42896,"ĠGlasgow":42897,"Ġsynth":42898,"_logs":42899,".pose":42900,"_AdjustorThunk":42901,"((&":42902,"Ġunsure":42903,"ystate":42904,"íķĺëĬĶ":42905,"OULD":42906,".ng":42907,"Ġdefaultdict":42908,"workspace":42909,"Ġselective":42910,"PickerController":42911,"YNAMIC":42912,".methods":42913,"Ġpathways":42914,"ĠFew":42915,"KG":42916,"CRYPT":42917,"following":42918,"ĠDLC":42919,"ĠSara":42920,"Ġpreset":42921,"estructor":42922,"ĠKurt":42923,"Ġairplane":42924,"Ġomp":42925,"ĠParents":42926,"ĠMartinez":42927,".complete":42928,"Ġbroadly":42929,"Ġscare":42930,"ĠMé":42931,"Ġelimination":42932,"Ġpoured":42933,"/sw":42934,"Ġcomun":42935,"Ġmasc":42936,"ĠOrganic":42937,"ĠStringUtils":42938,"ilateral":42939,"Ġreluctant":42940,"-age":42941,"Ġnz":42942,".\"\\":42943,"Ġpastor":42944,"alez":42945,"Ġefect":42946,"prov":42947,"/init":42948,"Ġpenn":42949,"unds":42950,"Ġssize":42951,"ĠProj":42952,"basename":42953,"Ġshells":42954,"ĠNeck":42955,"ĠEnforcement":42956,"vided":42957,"stown":42958,"Sphere":42959,"$r":42960,"ussen":42961,"afil":42962,"ĠTelegram":42963,"Ġanalytical":42964,"нÑĭе":42965,"usually":42966,"xn":42967,"Ġhistorian":42968,"ĠGregory":42969,"olph":42970,"ĠUna":42971,"Ġcontributes":42972,"%-":42973,"antiago":42974,"ÑĢед":42975,".region":42976,"Ġabrupt":42977,"ĠUnsupportedOperationException":42978,"ĠTASK":42979,"_finish":42980,"Ġnotorious":42981,"ĠVs":42982,"ĠMQ":42983,"Ġsunset":42984,"Ġunacceptable":42985,"arcer":42986,"Ġillumin":42987,"ĠOrb":42988,"Ġbh":42989,"Este":42990,"_dispatch":42991,"Ġripped":42992,"Ġtoujours":42993,"ĠParcel":42994,"_ll":42995,".userName":42996,".classes":42997,"SOURCE":42998,"(Number":42999,"елÑı":43000,"Ġheadphones":43001,"(side":43002,"constitution":43003,"annah":43004,"čĊĠĠĠĠĠĠĠĠčĊ":43005,"Ġcliff":43006,"-ref":43007,"Ġmostrar":43008,"ĠPowell":43009,"+y":43010,"ĠBG":43011,"_fragment":43012,".Port":43013,"Ġrealizing":43014,"paramref":43015,"Ġhometown":43016,"@Table":43017,"+\"--}}Ċ":43196,"French":43197,"EntityManager":43198,"ĠPlain":43199,"////////////////////////////////////////////////////////////////////":43200,"³":43201,"(RE":43202,"capt":43203,"Ġorganisms":43204,"Ġjets":43205,"olocation":43206,"ĠAppRoutingModule":43207,"Ġglorious":43208,"æľį":43209,"Ġdiscarded":43210,"ĉĉĉĉĠĠĠĠĠ":43211,"ĠArnold":43212,"lug":43213,"Ġparl":43214,"Ġhormones":43215,"Ġmah":43216,"ĠSonic":43217,"Ġorganizers":43218,"_PLATFORM":43219,".inv":43220,"Ġchord":43221,"ventional":43222,"ĉof":43223,"Episode":43224,".Enum":43225,"unkt":43226,"ĠDh":43227,"ĠJared":43228,"ĠNak":43229,"Ġintends":43230,"Endian":43231,"Ġaustralia":43232,"_cv":43233,"(resolve":43234,"Ġclinics":43235,"liked":43236,"ASHINGTON":43237,"inha":43238,"'*":43239,"ĠNP":43240,"_beh":43241,"Ġhf":43242,"Ġwür":43243,"categoria":43244,"$form":43245,"Ġsubway":43246,"ĠisActive":43247,"popular":43248,"Cour":43249,"Ġcooldown":43250,"Ġainsi":43251,"ĠGLuint":43252,"ereal":43253,"ĠarrayOf":43254,"Ġhatch":43255,"==========":43256,"resses":43257,"_PP":43258,".^":43259,"_decay":43260,"ĠBless":43261,"metrics":43262,"ĠCOPYING":43263,"ĠDumpster":43264,"ĠJosé":43265,"ĠDesigns":43266,"<":43269,"Ġ\"}Ċ":43270,"timezone":43271,"Ġeer":43272,"maxcdn":43273,"ĠESC":43274,"igaret":43275,"_connected":43276,"_reverse":43277,"Ġquestionable":43278,"ĠUSC":43279,"Ġtutti":43280,"Ġdropout":43281,"ĠActivities":43282,"ĠWinds":43283,"')));Ċ":43284,"Ġcongest":43285,"ģı":43286,"Ġprolonged":43287,"è¿Ļ":43288,"ĠCrossAxisAlignment":43289,"LEEP":43290,"ĠVALID":43291,"ĠGaz":43292,"Ġdependence":43293,"ĠPrix":43294,".CompilerServices":43295,"jump":43296,"Ġstrat":43297,"circ":43298,"ĠCUSTOM":43299,"xaa":43300,"Ġbmp":43301,"Ġbureau":43302,"Ġwaren":43303,"NX":43304,"(Window":43305,"ĠChristie":43306,"_FE":43307,"Ġtn":43308,"ĠOmega":43309,"communications":43310,"HomePage":43311,"completion":43312,"Ġsupplying":43313,"YPES":43314,"ável":43315,"åζ":43316,"(click":43317,"\\Contracts":43318,"/questions":43319,"Ġez":43320,"AMS":43321,".mesh":43322,"Ġ'\\Ċ":43373,"Robot":43374,"JsonObject":43375,"ĠDF":43376,"ĠProcessor":43377,"_should":43378,".protobuf":43379,"-users":43380,"Ġembry":43381,"FONT":43382,"Ġstartups":43383,"ĠDataSource":43384,")#":43385,"uros":43386,"_Color":43387,"Ġstandalone":43388,"}[":43389,"jd":43390,"Ġforgive":43391,"Ġngx":43392,"ĠGenerally":43393,"Ġconfigurable":43394,"/order":43395,"Ġvas":43396,"')\";Ċ":43397,"ĠRR":43398,"ĠTroy":43399,"Ġcompromised":43400,"ĠSwan":43401,"intendent":43402,"Central":43403,"_keeper":43404,"Ġarquivo":43405,"ĠReadOnly":43406,"_curve":43407,"kv":43408,"entin":43409,"è±":43410,"ĠEy":43411,".imread":43412,"ĠPam":43413,"iffe":43414,"ativity":43415,"xbc":43416,"Ġgrim":43417,"-filled":43418,"namese":43419,"']:":43420,"Ġaur":43421,"ĠGibson":43422,".MouseEvent":43423,"Ġlado":43424,"avadoc":43425,"Ġfamil":43426,"ĠModer":43427,"fps":43428,"ãĢĢãĢĢ":43429,"-example":43430,"ĠAlzheimer":43431,"ĠUtf":43432,"_arguments":43433,"Conclusion":43434,"textContent":43435,"remaining":43436,"Ġinterrupts":43437,"ĠBackup":43438,"ĠMong":43439,"Ġreceptors":43440,"histor":43441,".coroutines":43442,"Ġshouted":43443,"Alarm":43444,"Ġcombust":43445,"Ġgrote":43446,"ultural":43447,"(ids":43448,"--------------------------------------------------------------------------------":43449,"iplinary":43450,"Opts":43451,"ĠYale":43452,"localStorage":43453,"Ġequival":43454,"ĠFleet":43455,"\\b":43456,"*pi":43457,"ĠQLabel":43458,"æ¡":43459,"Ġvx":43460,"ĠACL":43461,"Ġsucesso":43462,"Ġperc":43463,"ĠNotre":43464,"Ġanarch":43465,"Ring":43466,"spb":43467,"Ġstrpos":43468,"stores":43469,"ĠMaple":43470,"(MainActivity":43471,"(\"\"))":43472,"ĠviewHolder":43473,"Quad":43474,"Ġigual":43475,"orsche":43476,".margin":43477,"Ġindie":43478,"Ġfranc":43479,"ĠFormBuilder":43480,"ĠParticip":43481,".flash":43482,"Ġstorms":43483,"Ult":43484,"Ġfen":43485,"[new":43486,"Ever":43487,"=\"Ċ":43488,"Ġlocalized":43489,"_follow":43490,"Ġnave":43491,"Ġdominance":43492,"(tile":43493,"Journal":43494,"ĠVC":43495,"Ġpenetration":43496,"ï¼ķ":43497,"Ġcompartment":43498,"Ġbids":43499,"Formatted":43500,"******/ĊĊ":43501,"(city":43502,"âĢĶit":43503,"[C":43504,"ĠuseCallback":43505,"aub":43506,")?.":43507,"ĠVAR":43508,"ĠSebastian":43509,"ĠMoss":43510,"Ġabundant":43511,"Greg":43512,"ÑĤа":43513,"_ci":43514,"Ġbibli":43515,"CRM":43516,"ĠAttempt":43517,"isme":43518,"dash":43519,"ãĢİ":43520,"_mu":43521,".FormattingEnabled":43522,"Indeed":43523,"-direct":43524,"Ġsucking":43525,"Ġpne":43526,"ocabulary":43527,"ĠPackers":43528,".Navigation":43529,"Ġpied":43530,"cribing":43531,"ĠStuart":43532,".ToDouble":43533,"ĠSecondary":43534,"Saving":43535,"ĠDut":43536,"ĠMadd":43537,"Magic":43538,",H":43539,".documentElement":43540,"ĠBST":43541,"Ġdiffers":43542,"Ġmoreover":43543,"_nd":43544,"SEARCH":43545,"пÑĢав":43546,"æ´":43547,"toMatch":43548,"Ġdecreasing":43549,"-member":43550,"ampus":43551,"(boost":43552,"Daily":43553,"DataGridView":43554,"ĠHttpContext":43555,"Ġhipp":43556,"_workers":43557,"-language":43558,"éĵ":43559,"Ġconsisted":43560,"athing":43561,"ĠMercury":43562,"$content":43563,"Ġpracticed":43564,"ĠModules":43565,"_DAY":43566,"Ġweaknesses":43567,"ĠLodge":43568,"Ġnar":43569,"ĠMate":43570,"Ġjp":43571,"ĠHttpHeaders":43572,"Ġsmo":43573,"ĠTOKEN":43574,"])(":43575,"Ġaqui":43576,"swagen":43577,"Ġsrv":43578,"ĉans":43579,"Around":43580,"ĠManuel":43581,"Ġfictional":43582,"ĠIMG":43583,"Ġ.'":43584,"ĠBerry":43585,"Ġwallpaper":43586,"sexual":43587,"iero":43588,"ĠçļĦ":43589,"ìĨĮ":43590,"BackingField":43591,"ĠAdrian":43592,"BASEPATH":43593,"Ġrepeats":43594,"Ġblues":43595,"Ġunpredict":43596,"_coll":43597,"stacle":43598,"ĠTumblr":43599,"ĠElf":43600,"Ġassurance":43601,"Ġcensus":43602,"ĠIMPORT":43603,"ENDER":43604,"anos":43605,"Ġ=(":43606,"ĠEllis":43607,"\"ĊĊĊĊ":43608,".win":43609,"ĠAbove":43610,"alon":43611,"_tick":43612,"Ġrepresentations":43613,"Ġæķ":43614,"wid":43615,"ĠArms":43616,"Lista":43617,"_failure":43618,"_cm":43619,".FlatAppearance":43620,"Ġthrone":43621,"Patch":43622,"ĠVoy":43623,"engl":43624,"Ġnegotiating":43625,">`":43626,"Ġshoots":43627,"ĠFPS":43628,".Year":43629,"ĠKiss":43630,"ención":43631,"reeting":43632,"FromFile":43633,"Ġresignation":43634,"Ø·":43635,"Ġtwins":43636,"ượ":43637,"Ġgebru":43638,".getContent":43639,".Tree":43640,"ĠEmployees":43641,"ĠFIFA":43642,"Ġcertainty":43643,"(Cl":43644,"Ġtotals":43645,"editable":43646,"à¥Ģ":43647,".Reporting":43648,"Mas":43649,"quiet":43650,".rules":43651,"ĠVO":43652,"conexion":43653,",K":43654,"Ġallocator":43655,"ĠPowder":43656,"\\Repository":43657,"Beat":43658,"_tipo":43659,"Ġ['',":43660,"_INTR":43661,"Ġ<<<":43662,"\");čĊ":43691,"dropIfExists":43692,"ĠBeg":43693,"_HAL":43694,"ĠcrossAxisAlignment":43695,"ĠEvidence":43696,"Ġpeculiar":43697,"Ġinstitute":43698,"veis":43699,"Ġfft":43700,"Ãģ":43701,"Ġzoekt":43702,"analy":43703,"ĠHomeland":43704,"Ġpenetr":43705,"uddenly":43706,"ĉelement":43707,"ĠBren":43708,"ĠTrudeau":43709,"ĠCuban":43710,"jam":43711,"uslim":43712,"_ev":43713,"Ġstems":43714,"}%":43715,"Ŀå§ĭ":43716,"Ġbranding":43717,"Ġcorrespondence":43718,".jquery":43719,"¢åįķ":43720,"ĠReads":43721,"(HttpStatusCode":43722,"assin":43723,"(slot":43724,"ĠGraduate":43725,"///<":43726,"Ġinformations":43727,"ENABLE":43728,"Ġpuis":43729,"Ġfinder":43730,"ĠBris":43731,"Ġnettsteder":43732,"_mid":43733,"Ġogs":43734,"ĠSterling":43735,"Ġarrog":43736,"strftime":43737,"|ĊĊ":43738,"Ġvox":43739,"ĠRegardless":43740,"Ġeso":43741,"ĠComfort":43742,".BooleanField":43743,"Ġuh":43744,"ACY":43745,"Ġsqueez":43746,"ĠVic":43747,"contro":43748,".lo":43749,"Ġire":43750,"ĠComedy":43751,"ë¶":43752,"Ġoriginated":43753,"Ġshipment":43754,"|max":43755,"_guid":43756,"levation":43757,"наÑı":43758,"(undefined":43759,"ĠDDR":43760,"Ġshootings":43761,"ĠLatino":43762,"ENDOR":43763,"Ġaveraging":43764,"Ġgreeted":43765,"Ġtheaters":43766,"ое":43767,"ĠdB":43768,"Ġgst":43769,"Ġdefinite":43770,".Storage":43771,".her":43772,"Ġafore":43773,"ĠReality":43774,"ĠGods":43775,"versed":43776,"Ġhandsome":43777,"Ġexcluding":43778,"(ad":43779,"Quotes":43780,"ĠScheme":43781,"?q":43782,"ĠTamil":43783,"Ticks":43784,"Ġpest":43785,"'n":43786,"Ġpornography":43787,"_modal":43788,"Ġ----------":43789,"Ġdisposable":43790,"FREE":43791,"Ġshark":43792,"CHE":43793,"Ġdepicted":43794,"Ġdemonstrations":43795,"ĠKilled":43796,"ĠRULE":43797,"Ġobsessed":43798,"Ġsimplified":43799,"Postal":43800,"Ġconceptual":43801,"Ġpst":43802,"Las":43803,"_PROJECT":43804,"ucceeded":43805,"olu":43806,"ÄŁi":43807,"Ġpersonalities":43808,"Ġreshape":43809,"Ġenclosed":43810,"ĉptr":43811,"Ġtutorials":43812,"Ġexploded":43813,"_DIRECTORY":43814,"åĨħ容":43815,"Ġcanon":43816,"Ġrecognise":43817,"PAD":43818,"ĠApprox":43819,"ĠRestore":43820,"ĠImportant":43821,"Ġheavier":43822,".Sequential":43823,"Earth":43824,"ĠMilk":43825,".setRequest":43826,".tem":43827,"Ġreconstruct":43828,"Ġskeptical":43829,"_Private":43830,"BUF":43831,"qua":43832,":a":43833,"Ġsek":43834,"Ġdwell":43835,"ossa":43836,"Ġrewarded":43837,"ий":43838,"(topic":43839,"_partition":43840,"Ġ__________________":43841,"Keywords":43842,"ĠFranco":43843,"Lite":43844,"Ġnaken":43845,"Ġза":43846,"OBJECT":43847,"Ġcrafts":43848,"ĠSwap":43849,".Xna":43850,".Connect":43851,"Ġbalcony":43852,"(real":43853,"ĠBarnes":43854,"bir":43855,"ĠTwenty":43856,"ayan":43857,"atars":43858,"ĠPropel":43859,"ĠIhnen":43860,"Upgrade":43861,"Ġcurb":43862,"-second":43863,"Ġneph":43864,".pres":43865,"ìŀħ":43866,".seq":43867,"Ġpadded":43868,"\"?":43869,"jl":43870,"ãĥ¬":43871,"')a":43875,"Coordinates":43876,"Ġenacted":43877,"ENTS":43878,"Ġlac":43879,".final":43880,"ĠPhpStorm":43881,"called":43882,"Ġinquiries":43883,".middleware":43884,"ĠDowntown":43885,"/';Ċ":43886,"Ġkilomet":43887,"accel":43888,"Ġquien":43889,"wstring":43890,"setData":43891,"Ġmanera":43892,"Ġmodular":43893,"rimp":43894,"Ġtariffs":43895,"âĢĻil":43896,"_THROW":43897,"/color":43898,"ĠHTMLElement":43899,"Ġcarro":43900,"Ġprere":43901,"Ġplotting":43902,"ĠPositive":43903,"ĠMachines":43904,"OTES":43905,"Ỽ":43906,"pleasant":43907,"Ġalte":43908,"Ġainda":43909,"these":43910,"Ġcors":43911,"ipay":43912,"ĠAdvisory":43913,"ĠRubio":43914,"jq":43915,"Ġlimestone":43916,"Ġdetached":43917,"设置":43918,"tenant":43919,"ĠDepth":43920,"alore":43921,"ĠÑģÑĤÑĢок":43922,"ĠFORE":43923,"ĠLay":43924,"presentation":43925,")');Ċ":43926,".subplots":43927,"Ïĥ":43928,"NOW":43929,"Gar":43930,"handles":43931,"abra":43932,"puties":43933,"ĠElectrical":43934,"Middle":43935,"ropic":43936,"ĠJD":43937,"ĠDyn":43938,"ĠBristol":43939,"ĠMcCarthy":43940,"Ġstriker":43941,"Ġenumerable":43942,"ĠEvan":43943,".defaults":43944,"quences":43945,")||":43946,"ĉtoken":43947,"âĹı":43948,"-dropdown":43949,"STORE":43950,"ĠGraphic":43951,"(pp":43952,"Expl":43953,"Ġupwards":43954,"ĠDistributed":43955,"ĠWEB":43956,"Jer":43957,"isNaN":43958,"çĶŁæĪIJ":43959,">R":43960,"üssen":43961,"efs":43962,"Ġuncover":43963,"Ġlud":43964,".calculate":43965,"Ġintptr":43966,"Ġmidfielder":43967,".Headers":43968,"Ġmf":43969,"eref":43970,".Metro":43971,"ĠSpeaking":43972,":b":43973,"Ġcryptocurrencies":43974,"Ġdemons":43975,"ĉEXPECT":43976,"Ġwicked":43977,"youtube":43978,":Int":43979,"ĠHindi":43980,"ĠCAT":43981,"Ġع":43982,"rar":43983,"omore":43984,"/per":43985,"/license":43986,"Ġreim":43987,"Ġawaiting":43988,"Ġlethal":43989,"ĠEF":43990,"rounded":43991,"ĠPlatinum":43992,"ĠвÑģе":43993,".coords":43994,".Device":43995,"/item":43996,"ĠWenn":43997,"compileComponents":43998,"ĠKinder":43999,".removeItem":44000,"Ġanda":44001,"bnb":44002,"Ġpra":44003,"(transaction":44004,"Ġembarrassing":44005,"ĉBOOL":44006,".contentView":44007,"Ġeventdata":44008,"atore":44009,"ĠprovidedIn":44010,"irma":44011,"Ġzona":44012,"_HW":44013,"æĻ":44014,"Ġstove":44015,"Ġcounterpart":44016,"_Product":44017,"_MANAGER":44018,"Ġinfring":44019,"ĠERA":44020,"_party":44021,"Ñij":44022,"Ġinici":44023,"_Request":44024,"Ġmiracle":44025,"ĠcancelButton":44026,"Spy":44027,"ató":44028,"Ġpolish":44029,"ĠNicole":44030,".displayName":44031,"\\Requests":44032,"ĠuseHistory":44033,"RouterModule":44034,"Ġstared":44035,"IDER":44036,"ÑĥнкÑĨи":44037,"Ġnota":44038,"$arr":44039,"pecified":44040,"Ġtopp":44041,"_DRIVER":44042,"/ng":44043,"åł":44044,"_tm":44045,"%timeout":44046,"\"":44488,"tlement":44489,"$(\"":44490,"FromString":44491,"ĠBild":44492,"Ġconventions":44493,"_native":44494,"ĠInspector":44495,"ĠPist":44496,"ubar":44497,"Ġregs":44498,"ĠPilot":44499,"Thus":44500,">'+":44501,"Ġcela":44502,".news":44503,"(Product":44504,"Living":44505,"Russia":44506,"Ġfacet":44507,"etical":44508,"Ġ['$":44509,"/[":44510,"ĠDire":44511,"Ġgases":44512,"ĠINFORMATION":44513,"ĠEat":44514,"ĠForums":44515,"ĠCharacters":44516,"_met":44517,"Ġìĭľ":44518,"Ġkings":44519,"achie":44520,"ĠLambda":44521,"Ġtimers":44522,"ĠLighting":44523,"ĠCasey":44524,"addir":44525,"andex":44526,".answer":44527,"ĠHip":44528,"ĠPrincip":44529,"StartDate":44530,"ĠãĢĮ":44531,"tres":44532,"Ġ&#":44533,".MaxValue":44534,"ĠProblems":44535,"Ġlatex":44536,"OfClass":44537,"ĠLynn":44538,"//'":44539,"Ġvoyage":44540,"Ġshuttle":44541,"ĠRoller":44542,"ĠRuntimeError":44543,"uya":44544,"Dic":44545,"ĉbuilder":44546,"Ġbullying":44547,"Ġsimplest":44548,".called":44549,"ĠLR":44550,"Ġmorality":44551,"Ġsturdy":44552,"tracking":44553,".swagger":44554,"_BIND":44555,"ITOR":44556,"-urlencoded":44557,"ĠÑħ":44558,"ĠTrinity":44559,"Ġtraps":44560,"Ġ|-":44561,"ĠsetText":44562,"Ġbargain":44563,"Ġbrakes":44564,".getCode":44565,"Ġmigrate":44566,"Ġribbon":44567,")return":44568,"Ġcharger":44569,"acom":44570,"ADIUS":44571,"ĠAmbassador":44572,"-after":44573,"Ġanni":44574,"ĉspin":44575,"Concept":44576,"ĠHenderson":44577,"ĠHOST":44578,".rank":44579,"ĠNortheast":44580,"Ġberlin":44581,"Ġrequis":44582,".feed":44583,"ĠsourceMapping":44584,"ĠRencontre":44585,".ajax":44586,"nestjs":44587,"Ġtrek":44588,"ĠNacional":44589,"Ġ&[":44590,"Ġpayable":44591,"ortex":44592,"Ġdept":44593,"fieldName":44594,"Ġcompletes":44595,"ĠRVA":44596,"Ġonions":44597,"alignment":44598,"Formats":44599,"Ġ'{$":44600,"HashSet":44601,"ĠBod":44602,".InvariantCulture":44603,"Ġsettlements":44604,"Ġhydr":44605,".updated":44606,"venth":44607,"(seconds":44608,"=\"/\"":44609,"Ġwebpage":44610,"(ĊĊ":44611,"Ġtir":44612,"Ġtoes":44613,"ĠBrick":44614,"Ġambition":44615,"Pot":44616,"=max":44617,"ETIME":44618,"Ġdepot":44619,"calls":44620,"ĠNorwegian":44621,"`:":44622,"Ġburger":44623,"Ġprofessors":44624,"ĠAllocate":44625,"-thirds":44626,"-chart":44627,"Ġford":44628,"*N":44629,".kotlin":44630,"Ġpaperwork":44631,"ĠDEVICE":44632,"%@\",":44633,"respect":44634,"(mp":44635,"é«ĺ":44636,"-if":44637,"Ġcushion":44638,"obot":44639,"Ġparc":44640,"SPACE":44641,"ĠNetanyahu":44642,"Ġselfish":44643,"feat":44644,"Ġclientes":44645,"-tools":44646,"Ġporch":44647,"Ġjq":44648,".verbose":44649,"Ġliberals":44650,"])ĊĊĊ":44651,"pies":44652,"NotBlank":44653,"(term":44654,"ÈĽi":44655,"_Params":44656,".normalize":44657,"Bullet":44658,"ASIC":44659,"(hex":44660,"_cliente":44661,"+,":44662,"_DI":44663,"Ġforthcoming":44664,"}\")]Ċ":44665,"seo":44666,"Um":44667,">Name":44668,"Ġcomfortably":44669,"irectional":44670,"WITH":44671,"/pr":44672,"ĠPoor":44673,"ĠVitamin":44674,"vic":44675,"GH":44676,"Ġpriorit":44677,"ĠNN":44678,"ĠClosed":44679,"¤í":44680,"ĠisOpen":44681,"\\Console":44682,"AndFeel":44683,".SUCCESS":44684,"_OPERATION":44685,"polation":44686,"ĠTas":44687,"psz":44688,">'.":44689,"CURRENT":44690,"Vendor":44691,"hosts":44692,"ĠErd":44693,">tagger":44694,"ĠsourceMappingURL":44695,"Ġmarathon":44696,"_closed":44697,"Ġexemption":44698,"Ġrecognizes":44699,"ideshow":44700,"'$":44701,"('/');Ċ":44702,"mits":44703,"warz":44704,"ĠCherry":44705,"µ¬":44706,"nor":44707,"porte":44708,"Ġwl":44709,"_backup":44710,".getBoolean":44711,".getResource":44712,"Ġdefinitive":44713,".EditText":44714,"ĠsÃŃ":44715,".CONT":44716,"ĠPLAYER":44717,".cards":44718,"ĠShore":44719,"('/')Ċ":44720,"cluir":44721,"WebDriver":44722,"(month":44723,"-release":44724,"Ġinspector":44725,"å£":44726,"ĠNF":44727,"_clip":44728,"åŃIJ":44729,"Ġinteracting":44730,".tmp":44731,"Ġ'''ĊĊ":44732,"Ġdee":44733,"Ġfrost":44734,"\"]))Ċ":44735,"ĠPlaces":44736,"Throws":44737,"fork":44738,"/day":44739,"iPhone":44740,"ĠMIC":44741,"Ġfolding":44742,"Ġcrore":44743,"ĠChiefs":44744,"pherical":44745,"(price":44746,".WriteString":44747,"Ġexiting":44748,"]',Ċ":44749,"ighting":44750,"Ingredient":44751,"(vertex":44752,"ĠscrollView":44753,"hf":44754,":new":44755,"SEN":44756,"sector":44757,"Ġspins":44758,"ĠScheduler":44759,"otechn":44760,"semicolon":44761,"FontOfSize":44762,"ĠSpecifically":44763,"flamm":44764,".ObjectId":44765,"Ġconta":44766,"_permissions":44767,"ĉFROM":44768,"ICODE":44769,"/kg":44770,"ĠHotels":44771,"-med":44772,"ĠDin":44773,"Ġnavy":44774,"getParam":44775,"Ġmend":44776,"Ġportrayed":44777,"ĠMetropolitan":44778,"Painter":44779,"Ġreferral":44780,"_good":44781,"Ġmarvel":44782,"osaic":44783,">(&":44784,".ur":44785,"Ġestos":44786,"William":44787,"Ġtimber":44788,"Ġquelques":44789,"ĠDocuments":44790,".Xaml":44791,"Ġbatches":44792,"éģĵ":44793,"ĠReleased":44794,"Tail":44795,"COOKIE":44796,"heid":44797,"_station":44798,"ĠVia":44799,"Sale":44800,"ĠRepeat":44801,"Ġpromin":44802,"ĠZo":44803,"-forward":44804,"ĠIon":44805,"itary":44806,"Ġjus":44807,"-request":44808,"Ġproudly":44809,"ĠStreaming":44810,"(MouseEvent":44811,"ĠSprint":44812,"_rotation":44813,"Repositories":44814,"Ġtart":44815,"ĠÑģв":44816,"Ġmappings":44817,"èª":44818,"Cu":44819,"Cycle":44820,"Ġbun":44821,"ĉlua":44822,"ãĥī":44823,"Ġ((!":44824,"Ġcollectively":44825,"ĠCond":44826,"Ġwszyst":44827,"(lib":44828,"openhagen":44829,"_skip":44830,".ColumnHeader":44831,"éĤ":44832,"perienced":44833,"ıè¿°":44834,"_props":44835,"Ġcontrace":44836,"Ġmatchup":44837,"abetic":44838,".members":44839,"RECT":44840,"(dat":44841,"Ġsog":44842,"renom":44843,"_Method":44844,"Customers":44845,"fullname":44846,"ZN":44847,"retry":44848,"Ġkap":44849,"ĠNeu":44850,"èĬ":44851,"addChild":44852,"willReturn":44853,"_permalink":44854,"Ġenergetic":44855,"ĠWet":44856,"ĠMorr":44857,"Ġgcd":44858,"counts":44859,",type":44860,"dig":44861,"(Login":44862,"Ġcracks":44863,"Ġbacterial":44864,"ĠMeat":44865,"ĠArmstrong":44866,"ĠBronze":44867,"Ġapproximate":44868,"_dirs":44869,"liga":44870,"ÅĤad":44871,"Ġkindness":44872,"Ġcontre":44873,"ĠEVERY":44874,"MET":44875,"Ġannouncements":44876,"gpio":44877,"ĠWaitForSeconds":44878,"ĠPhotoshop":44879,"Ġdiscontin":44880,"/dd":44881,"Ġtopology":44882,"anical":44883,".interface":44884,"aucoup":44885,".HashSet":44886,"ARIANT":44887,"(routes":44888,"ĠTeh":44889,"Ġhype":44890,"]\").":44891,"Ġslam":44892,"Ġbroth":44893,"-inter":44894,"ĠRid":44895,"-manager":44896,"Cancelar":44897,"ĠPagination":44898,"Ġsoundtrack":44899,"Ġposterior":44900,"Ġscrub":44901,"creating":44902,"-*":44903,"irteen":44904,".dy":44905,".symmetric":44906,"Ġ\"\".":44907,"===============":44908,"Ġchassis":44909,"ĠnumberOfRows":44910,"Developer":44911,"_bins":44912,"ĠOUR":44913,"rieb":44914,"Pros":44915,"ĠwiÄĻ":44916,"\"d":44917,"Ġasyncio":44918,"zeigen":44919,"_spi":44920,".ALL":44921,"Ġscrews":44922,"Chinese":44923,"ĠapiKey":44924,"Ġunsuccessful":44925,"ĠSeahawks":44926,"ORG":44927,"竳":44928,"Ġprofessionally":44929,"ĠCoupon":44930,"åŃĹæ®µ":44931,"Convention":44932,"Ġpolym":44933,"æīĭ":44934,"Ġsalvation":44935,"Ġengineered":44936,"ĠWrest":44937,"ĠGCC":44938,"Ġwarmer":44939,"LayoutConstraint":44940,"Ġaggrav":44941,"Scripts":44942,"venture":44943,"Ġrefrigerator":44944,"Ġinnovations":44945,"ĠRunner":44946,"NIC":44947,"ĠRolling":44948,"ControlEvents":44949,"Ġloos":44950,"pac":44951,"ĉpanel":44952,"efe":44953,"ĠBuddha":44954,"--------------Ċ":44955,"åºĵ":44956,"(forKey":44957,"Ġlumin":44958,"Ġ(?":44959,"ĠAIDS":44960,",user":44961,"imientos":44962,"contentType":44963,"antlr":44964,"é¦":44965,"ĠWelt":44966,"Production":44967,"might":44968,"ĠVII":44969,"\",(":44970,"Ġobserving":44971,"Ġdeliberate":44972,"(control":44973,"Ġwithd":44974,"Ġsemana":44975,"STACK":44976,"uchen":44977,"Nice":44978,"ĠDeutschland":44979,"ĠSpecifies":44980,"dma":44981,"izio":44982,"ĠFacts":44983,"_popup":44984,"ĠDirectors":44985,"{:":44986,"[R":44987,"ĠÑįлеменÑĤ":44988,"Ġplat":44989,"Ġdirecting":44990,"ä¸ī":44991,"ĠGilbert":44992,"â̦.ĊĊ":44993,".qml":44994,"Ġthereafter":44995,"Ġdisposition":44996,"draft":44997,"Ġsurgeon":44998,"ĠInsider":44999,"Blend":45000,"ĠTrev":45001,"trinsic":45002,"Topics":45003,"rieve":45004,"_FILENAME":45005,"Ġautres":45006,"Jose":45007,"Producer":45008,"erus":45009,"Ġpetit":45010,"ĠNEXT":45011,"ĠFilters":45012,"Ġreplicate":45013,"\"]).":45014,"Ġlenders":45015,"]\",Ċ":45016,";charset":45017,"CppObject":45018,"Ġfloral":45019,"ĠTipo":45020,"Ġcircuits":45021,"easy":45022,"(&$":45023,"itta":45024,"eryl":45025,"_COMMON":45026,"'}}>Ċ":45027,"-backed":45028,"(variable":45029,"(Index":45030,"Ġvoir":45031,"_locations":45032,"++){":45033,"ĠLouisville":45034,"Ġgratitude":45035,".Mockito":45036,"ĠPowers":45037,"ieurs":45038,"Ġgeographic":45039,"rale":45040,"Ġcra":45041,"ĠSpurs":45042,"iphertext":45043,"ACION":45044,"-common":45045,"Ġvictories":45046,"ĠFinals":45047,".shuffle":45048,"-million":45049,"_PROC":45050,"assume":45051,"Ġils":45052,"DBC":45053,"BootTest":45054,"Ġlavor":45055,".testing":45056,".ast":45057,"\"]/":45058,"moid":45059,"Ġqualification":45060,"gesch":45061,"ĉput":45062,"Ġairports":45063,"JI":45064,"Teacher":45065,"_uniform":45066,"Ġnama":45067,"ĠBast":45068,"ertype":45069,"capture":45070,"getAll":45071,"ĠReynolds":45072,"ooled":45073,".comments":45074,"Ġchin":45075,").*":45076,"Ġили":45077,"tgl":45078,"udos":45079,"ĠdÃŃas":45080,"chai":45081,".program":45082,"Ġpsz":45083,"ĉicon":45084,"phil":45085,"entral":45086,"_WRAP":45087,"ovi":45088,"Ġnostalg":45089,"Infinity":45090,"ĉyield":45091,"Ġvitamins":45092,"Quaternion":45093,"Sink":45094,"_goods":45095,"Ġ........":45096,"ĠWings":45097,"uridad":45098,"-story":45099,"\"])ĊĊ":45100,"idelity":45101,"TypeDef":45102,"Gtk":45103,"ĠíĮ":45104,"_Main":45105,"Ġchez":45106,"ĠRaven":45107,"Ġpayroll":45108,"Ġfreelance":45109,"LLU":45110,"ĠMend":45111,"eday":45112,"ApiModelProperty":45113,".FormBorderStyle":45114,"Ġeconomist":45115,"stanbul":45116,"Ġfreight":45117,"-Agent":45118,"(meta":45119,"Ġsymmetry":45120,"Ġ'..":45121,".Calendar":45122,"-aut":45123,"gf":45124,"pent":45125,"yclopedia":45126,"Ġwishing":45127,"ĊĊĊĊĊĊĊĊĊĊĊĊ":45128,"Ġgentleman":45129,"Ġê³":45130,"=#":45131,"Ġlectures":45132,"âĢľIn":45133,"Ġ!_":45134,"Ġhb":45135,"ĠVendor":45136,"Recently":45137,"_notes":45138,"æıIJ示":45139,"\"My":45140,"HeadersHeight":45141,"_SO":45142,"Ġunwilling":45143,"Ġsuperhero":45144,"gio":45145,"psy":45146,"ĠPeer":45147,"javax":45148,"&apos":45149,"ĠCrisis":45150,"ordinal":45151,"Memcpy":45152,"++++++++++++++++":45153,"-val":45154,"Ġworkbook":45155,"-ap":45156,"=k":45157,"Ġmetallic":45158,"_peer":45159,"ByPrimaryKey":45160,"_SD":45161,"uator":45162,"_SHADER":45163,")Math":45164,".Transform":45165,"Ġcows":45166,"Phi":45167,"ĠClem":45168,"(_(\"":45169,"ĠLud":45170,"-delay":45171,"ĠSecurities":45172,"ĠOrthodox":45173,"Symfony":45174,"(report":45175,"Ġentertain":45176,"EPS":45177,"izoph":45178,"exual":45179,"IRD":45180,"ä»İ":45181,"Ġlith":45182,"Ġsanitize":45183,"Ġfeminine":45184,"ISBN":45185,".authentication":45186,"_pipeline":45187,"/constants":45188,"ĠCONF":45189,"Ġlucr":45190,"ricia":45191,".ttf":45192,".setContent":45193,"Ġstan":45194,"orean":45195,"ĠLloyd":45196,".rawValue":45197,"Ġgor":45198,"ĠBrowns":45199,"Regression":45200,"Ġlowering":45201,"naissance":45202,"Ġblows":45203,"Ġamazed":45204,"Ġunrelated":45205,"Reviews":45206,"Ġruby":45207,"ĠModifier":45208,"Ġgiants":45209,".thread":45210,"Ġcontainment":45211,"ĠStartCoroutine":45212,"umat":45213,"orelease":45214,"ĠRandy":45215,"@endif":45216,"Digest":45217,"Ġsuburban":45218,"=\");Ċ":45219,"Ġannonce":45220,".variable":45221,"\\Foundation":45222,"Ġacre":45223,"Van":45224,"Ġtuples":45225,"dns":45226,"ĠStanding":45227,"_large":45228,"Ġboxing":45229,"SupportActionBar":45230,"ĠFortune":45231,"ĠRum":45232,"_multiple":45233,"archical":45234,"Ġfwrite":45235,"_quote":45236,"Ġfoolish":45237,"Ġcomprising":45238,"Ġоп":45239,"-selected":45240,"vf":45241,"maid":45242,"Nama":45243,"(datetime":45244,"Ġindirectly":45245,"gart":45246,"fixtures":45247,"chos":45248,"ĠHalo":45249,"Ġrecurring":45250,"-news":45251,"vil":45252,"ĠNursing":45253,"-produ":45254,"ĠHQ":45255,"\\HttpFoundation":45256,"enci":45257,"auen":45258,"Ġvy":45259,"ocracy":45260,"Ġdelegation":45261,"Ġasphalt":45262,"ĠsetSelected":45263,"kok":45264,"/rest":45265,"metics":45266,"ĠNSDate":45267,"Ġtravelled":45268,"Ġrecib":45269,"Ġmime":45270,"CLIENT":45271,"ĠGU":45272,"ĠHANDLE":45273,"/Q":45274,"[z":45275,"Ġbothered":45276,"ĠBBQ":45277,"ças":45278,"_examples":45279,"_FIN":45280,"ĠwhiteColor":45281,"Ġastronom":45282,"-dir":45283,"Ġsovereign":45284,"Ġbreeze":45285,"Ġinning":45286,"ĠEdmonton":45287,"gli":45288,".blogspot":45289,"jsx":45290,"Ġversa":45291,"ĠMohammed":45292,".Job":45293,"-toggler":45294,"ĠполÑĮзоваÑĤ":45295,"ardon":45296,"Ġnewborn":45297,"Ġnaval":45298,"noteq":45299,"Ġtumblr":45300,"Ġhentai":45301,"ĠTypically":45302,"Ġloot":45303,".Sprite":45304,"Flight":45305,"Ġwavelength":45306,"-sk":45307,"ĠElle":45308,"_exports":45309,"ĠÑı":45310,"ĠIH":45311,"izophren":45312,"Ġíģ":45313,"_primary":45314,"Ġmois":45315,"ĠBN":45316,"Ġsystemic":45317,"Ġdiferentes":45318,"INCT":45319,"Ġ''ĊĊ":45320,"$q":45321,"WidgetItem":45322,"clide":45323,"$file":45324,"Lemma":45325,"/table":45326,"agrid":45327,"ĠMongoDB":45328,"inte":45329,"Ġapprent":45330,"ÂŃing":45331,".Db":45332,"ĠÃĤ":45333,"hammer":45334,"='';Ċ":45335,"Ġbrokers":45336,"itlement":45337,"semblies":45338,"Ele":45339,"{x":45340,"Ġlastname":45341,"<-":45342,"Ġflatten":45343,"_band":45344,".Root":45345,".readFileSync":45346,"======":45347,".rx":45348,"?čĊ":45349,"Ġmetaphor":45350,"Ti":45351,"conte":45352,"Ġdebit":45353,"Ġcontempt":45354,"CppType":45355,"æĶ¯":45356,"FormField":45357,"ratio":45358,"osopher":45359,"Ġimplant":45360,"PURE":45361,"Ġalta":45362,"_management":45363,"Ġrefine":45364,"ĠCheckBox":45365,"ĠCharl":45366,"-version":45367,"conditional":45368,"venues":45369,"Ġrifles":45370,"Ġoffspring":45371,"Ġmilling":45372,"Ġsharply":45373,"Ġunderwater":45374,"(origin":45375,"_Control":45376,"Ġ.$":45377,"Plugins":45378,"Ġdrying":45379,"Ġillustrates":45380,"-u":45381,"Ġvegetarian":45382,"npc":45383,"Heart":45384,";',Ċ":45385,"comma":45386,"teenth":45387,"asan":45388,"/spec":45389,"_moves":45390,"-margin":45391,"Ġingen":45392,"³³³":45393,"Ġprojet":45394,"Ġotra":45395,"Ġbras":45396,".utc":45397,"Ġslept":45398,"=sub":45399,"abilit":45400,"poster":45401,"Ġsdk":45402,"ouncill":45403,"Ġwd":45404,"PreparedStatement":45405,"ĠDrum":45406,"(attribute":45407,"ĠEthernet":45408,"ĉDB":45409,"California":45410,"cube":45411,"[I":45412,".Created":45413,"ĠHM":45414,"Ġtracing":45415,"FormsModule":45416,"-you":45417,".currency":45418,"feeding":45419,"Ġtbody":45420,"Li":45421,"accion":45422,"nas":45423,"Ġtrouver":45424,"NONE":45425,"\"},čĊ":45426,"Ġftp":45427,"WithIdentifier":45428,"polate":45429,"FileInfo":45430,"Ġpursued":45431,"ĠĠĠĠčĊĠĠĠĠčĊ":45432,"DESCRIPTION":45433,"}*/Ċ":45434,"FromNib":45435,"Ġdecorative":45436,"_SSL":45437,"(chat":45438,"TLS":45439,"Ġsurprises":45440,"alculate":45441,"ĠSplash":45442,"(Configuration":45443,"ĠSEM":45444,"imson":45445,"/library":45446,"":45521,"GED":45522,"faq":45523,"Ġoptionally":45524,"_Dis":45525,"ĠSuccessful":45526,"ĠCensus":45527,"Ġincarcer":45528,"_CARD":45529,"Ġaviation":45530,"ĠGym":45531,"Authority":45532,".Bean":45533,"shader":45534,"NotExist":45535,"_TextChanged":45536,"ĠSTOP":45537,"(team":45538,"\"H":45539,"wg":45540,"Ġgrinder":45541,"Ġstripe":45542,"Ġpreservation":45543,"Claim":45544,"aversal":45545,"warehouse":45546,"targets":45547,"Trust":45548,"Ġallev":45549,",www":45550,"ousse":45551,"_chan":45552,"_Size":45553,"systems":45554,"Ġobjection":45555,"ĠKane":45556,"Ġcorros":45557,"ĠDSL":45558,"Ġua":45559,"ĠMH":45560,"ĠStrategic":45561,"_tcp":45562,"Ġê°Ĵ":45563,"Ġborrowed":45564,"ĠAch":45565,"ĉcommand":45566,"Ġgps":45567,"leston":45568,"ichever":45569,"ĠUA":45570,"Ġassaulted":45571,"Ġspecializes":45572,"ĉsearch":45573,"Hotel":45574,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠčĊ":45575,"ĠPitch":45576,"ĠÙģ":45577,"READY":45578,"Ġparental":45579,"Ġgéné":45580,"Ġdonnées":45581,"Ġdetain":45582,"TARGET":45583,"Ġprotagonist":45584,"ĠclearInterval":45585,"ĠIconButton":45586,"ĠGetAll":45587,"TypeInfo":45588,"EH":45589,"âĢľThey":45590,"Ġ{[":45591,"Ġgag":45592,"ĠÚ©":45593,"ĠDropdown":45594,".free":45595,"gone":45596,"imens":45597,"Ġinstal":45598,"ĉcurl":45599,"_CAN":45600,"ĠBone":45601,"ï¼Ķ":45602,"onyms":45603,"-government":45604,".bindingNavigator":45605,"ĠDans":45606,"ĠMcL":45607,"(en":45608,">(_":45609,"ÐĴÑĭ":45610,".*;čĊ":45611,"=j":45612,"-cor":45613,"Son":45614,".ToolStripItem":45615,"-around":45616,"_XML":45617,"endDate":45618,"Ġslack":45619,"Ġrotated":45620,"Ġnoqa":45621,"Ġcottage":45622,"Ġencontrar":45623,"_skill":45624,"houette":45625,"!čĊ":45626,".weather":45627,"Ġemphasized":45628,"å®¶":45629,"ĠÑģпиÑģ":45630,"ĠCompiler":45631,"(android":45632,"ĠâĢº":45633,".turn":45634,"Ġsuppression":45635,"_calls":45636,"Ġ*@":45637,"(strlen":45638,".hex":45639,"ĠBills":45640,"ĠRSA":45641,"ÏĤ":45642,"ĠEscape":45643,"ementia":45644,"Ġfrontend":45645,"Ġpint":45646,"_exc":45647,"zzo":45648,"[],Ċ":45649,"Ġ\"','\"":45650,".Environment":45651,"Ġaforementioned":45652,"Ġendure":45653,"prototype":45654,"therapy":45655,"ssi":45656,"Deg":45657,"_plugins":45658,".userInfo":45659,"Printer":45660,"ĠPROGRAM":45661,"Ġruins":45662,"Ġempirical":45663,"Ġcrawl":45664,"ĠBoiler":45665,"-comment":45666,".subplot":45667,"_et":45668,"Ġ'.',":45669,"minor":45670,"ĠCustoms":45671,"Ġyaw":45672,"underline":45673,"ĠComo":45674,"(('":45675,"(mean":45676,"Ġchaque":45677,"ĠBlocks":45678,".rad":45679,"ilibrium":45680,"Ġwebdriver":45681,"Ġmelhor":45682,"dana":45683,"ĠAbuse":45684,"ĠSouthwest":45685,"ĠParen":45686,"PERTIES":45687,"ĉIL":45688,"Ġscream":45689,"vu":45690,"Ġincomes":45691,"Ġnim":45692,"Ġlace":45693,"Ġcompensate":45694,"Reverse":45695,"Dat":45696,"_attack":45697,"Ġnour":45698,"achen":45699,"cek":45700,"\"+":45957,"Ġtokenizer":45958,"Ġsovereignty":45959,"ĠPence":45960,"()\");Ċ":45961,"Ġpessoas":45962,".Ge":45963,"ĠIncluded":45964,"Ġpagina":45965,"Ġexposing":45966,"еÑĪ":45967,"_SCRIPT":45968,"/$',":45969,"Thumbnail":45970,"×Ķ":45971,"webElementX":45972,"webElementXpaths":45973,"pressure":45974,"ĠCurry":45975,"_CP":45976,"OLUTION":45977,"ILES":45978,"protect":45979,"oola":45980,"Workspace":45981,"{};Ċ":45982,"ĠUNS":45983,"Ġsympathy":45984,"roker":45985,"Ġremodel":45986,"ĉcell":45987,"Ġatop":45988,".FullName":45989,"Ġfaut":45990,"ĠEasily":45991,"_dynamic":45992,"Ġframed":45993,"Ġmotive":45994,"è·¯":45995,"sam":45996,"Ġmarca":45997,"ĠTextEditingController":45998,"Ġdestructor":45999,"cream":46000,"Ġrude":46001,"ĠBold":46002,"ĠIndigenous":46003,"Ġgens":46004,"Ġrelacion":46005,"(system":46006,"ĠUIFont":46007,"_charge":46008,"USTER":46009,"EV":46010,".Namespace":46011,"Ġmerger":46012,"Ġcalloc":46013,"gang":46014,"BadRequest":46015,"Ġsper":46016,"-design":46017,"Ġâĩ":46018,"Chan":46019,"Ġorganism":46020,",)":46021,"=id":46022,"_plane":46023,"ĠCases":46024,"elfast":46025,"ĠLegislature":46026,"ĠFaker":46027,"Ġinvoking":46028,"-utils":46029,"().'":46030,".face":46031,"Ġguardian":46032,"myModal":46033,"Ġclipboard":46034,"ĠATM":46035,"Ġpeas":46036,"ĠSylv":46037,".calc":46038,"ĠContacts":46039,"intValue":46040,"Ġmodifying":46041,"ĠBarb":46042,".loss":46043,"_percentage":46044,"Asked":46045,"(lst":46046,"ategorical":46047,"-files":46048,"ĠRomania":46049,".Ac":46050,"Ġhai":46051,"ĠFlying":46052,"Ġż":46053,"jp":46054,"ĠTrainer":46055,".arc":46056,"_deg":46057,"Ġtraceback":46058,"OrFail":46059,"FLOW":46060,".old":46061,"oya":46062,"gmt":46063,"isempty":46064,"Ġvaccination":46065,"Ġobsolete":46066,"recognized":46067,"Ġruined":46068,"ĠRein":46069,"ĠTracking":46070,"xfb":46071,"اÛĮ":46072,"Ġvære":46073,"Ġbryster":46074,"ĠITS":46075,"Ġdestiny":46076,"Ġswear":46077,"Ġredes":46078,"Ġclf":46079,"Ġflipped":46080,"ĉhead":46081,"Bluetooth":46082,"ĠOverrides":46083,":Boolean":46084,"_=":46085,"_lr":46086,"spawn":46087,":index":46088,"VALUES":46089,"iskey":46090,"?\");Ċ":46091,".synthetic":46092,"ĠChecking":46093,"structures":46094,"iping":46095,"Ġvocals":46096,"-Up":46097,"ĠManufacturers":46098,"ĠMarriage":46099,"代çłģ":46100,"Ġgarner":46101,"_Client":46102,"parallel":46103,"RIEND":46104,"Ġvinegar":46105,"segue":46106,"JB":46107,"Ġcontacting":46108,"ĠCarroll":46109,"Ġoutreach":46110,"tensor":46111,"_variant":46112,"Ġtheat":46113,"licable":46114,"{|":46115,"tiny":46116,"_letter":46117,"Ġpencil":46118,"HeadersHeightSizeMode":46119,"iltro":46120,".autoconfigure":46121,".drag":46122,".useState":46123,"ĠBMI":46124,"hint":46125,"Compile":46126,"*\\":46127,"enary":46128,"Ġlvl":46129,".Cache":46130,"+=\"":46131,"_tv":46132,"ruitment":46133,"Ġfread":46134,"Articles":46135,"fila":46136,"Ġpackaged":46137,"âĺĨ":46138,"ATHER":46139,"ĠPlanned":46140,"scheme":46141,"Ġdiary":46142,"Ġoffenses":46143,"/F":46460,"ĠStick":46461,"Ġcerc":46462,"ĠSlee":46463,"ĉĉĠĠĠĠĠĠĠĠ":46464,"":46639,"ĉcol":46640,"VG":46641,"_boolean":46642,"recent":46643,"Ġ*)ĊĊ":46644,"ĠRainbow":46645,"ommen":46646,"Ġlur":46647,"Ġoppression":46648,"(\",\");Ċ":46649,"ĠFacility":46650,"DEFINED":46651,"Ġneon":46652,"Ġoffender":46653,"AFP":46654,"ĠCleaning":46655,"[]):":46656,"Ġundocumented":46657,".Repositories":46658,"ĠGuitar":46659,"аÑģÑģив":46660,"Skills":46661,"Ġtestimon":46662,"ryptography":46663,"ĠAmber":46664,"ĠStalin":46665,"Ġlone":46666,"Ġapenas":46667,"Ġdieses":46668,"ĠArduino":46669,"转":46670,"==-":46671,"_Act":46672,"Ġcoded":46673,"âĸł":46674,"amburger":46675,"-links":46676,"Ġarmour":46677,".High":46678,"getContent":46679,"stag":46680,"Ġheck":46681,"ĠìĹĨ":46682,"ĠMcConnell":46683,"ĠConcert":46684,"ĠAlloc":46685,"äre":46686,".replaceAll":46687,"Ġpartitions":46688,"rott":46689,"ĠFle":46690,"_TREE":46691,"reasonable":46692,"ĠReporting":46693,"Ġbillionaire":46694,"scores":46695,"mins":46696,"-eye":46697,"MORE":46698,"abort":46699,"ĠSWT":46700,"Ġinverted":46701,"ĠTeachers":46702,";n":46703,"Ġastro":46704,"нов":46705,"аниÑĨ":46706,"producto":46707,"countries":46708,"ĠOwen":46709,"Ġcontamination":46710,"Ġvibe":46711,"ĠElli":46712,".script":46713,"ĠOlive":46714,"DMA":46715,"vier":46716,":semicolon":46717,"-module":46718,"gressive":46719,"agu":46720,"_players":46721,"Ġresultados":46722,"started":46723,"scrollTop":46724,"=====":46725,"Ġweighing":46726,"Ġ[[[":46727,"zahl":46728,"(NS":46729,"ĠAssertion":46730,"league":46731,".setTextColor":46732,"ĉMessage":46733,"Ġmoms":46734,"_AF":46735,".wh":46736,"ALS":46737,"Ġautre":46738,"]ĊĊĊĊ":46739,".opacity":46740,"ĠBuddhist":46741,"Ġdeaf":46742,"ĠOrganisation":46743,"(Global":46744,"ensch":46745,"Ġheadache":46746,"ĠAlien":46747,"_inode":46748,"ĠStark":46749,"Ġæī":46750,"-lnd":46751,"oref":46752,"_feat":46753,"Ġpedestrian":46754,"Ġnominal":46755,"Ġballoon":46756,"Ġsprites":46757,"PrototypeOf":46758,"ĠApost":46759,"ĠFEATURE":46760,"OH":46761,"Ġrecess":46762,"ĠDonna":46763,"consumer":46764,"$GLOBALS":46765,"ĠGIF":46766,"-frame":46767,"Inicio":46768,"Ġpassages":46769,"DateString":46770,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":46771,".byte":46772,"Bug":46773,"initializer":46774,"pkt":46775,"odium":46776,"ĠDER":46777,".ops":46778,"leri":46779,"Ġgifted":46780,"Ġdetach":46781,"terrain":46782,"elters":46783,"ãģı":46784,".loader":46785,"ĠNGO":46786,"strncmp":46787,"Kh":46788,"(fontSize":46789,"rocket":46790,"Ġprecedent":46791,"ĠAurora":46792,"ĠExperiment":46793,"isphere":46794,"Encoded":46795,"ĠâĢĵĊĊ":46796,"Ġpyramid":46797,"ĠAnniversary":46798,"ofil":46799,"ëŁ":46800,"(plugin":46801,"Coeff":46802,"Ġcooperate":46803,"Ġpredominantly":46804,"ISM":46805,"Phrase":46806,"_DEFINE":46807,"Flip":46808,"AMILY":46809,"ĠMarkets":46810,"ĠStreamReader":46811,"ĠCombine":46812,"Ġmanuscript":46813,"zza":46814,",tp":46815,"Whatever":46816,"ITICAL":46817,"ighbour":46818,"DataProvider":46819,".Texture":46820,"privacy":46821,".SDK":46822,"Ġrecharge":46823,"Ġcpp":46824,"ĠCFG":46825,"(holder":46826,"(py":46827,"mot":46828,"Ġsavoir":46829,"ĠRosa":46830,"ĠPCs":46831,"ĠíĻ":46832,".heroku":46833,"Ġfren":46834,"ĠRiley":46835,"agate":46836,"Ġsond":46837,".xlsx":46838,"Ġhacked":46839,"stad":46840,"Gi":46841,"Ġsanity":46842,"ĠSqlDataAdapter":46843,"...\",":46844,"ĠPussy":46845,"Ġ****************":46846,"Ġhassle":46847,"_PARENT":46848,"ĠUAE":46849,"Ġbeginners":46850,"(Client":46851,"Ġstatistically":46852,".hour":46853,"edelta":46854,"Ġtraction":46855,"uelve":46856,"arat":46857,"Ġsauna":46858,"INVALID":46859,"Ġindictment":46860,"ALLE":46861,"Ġdissent":46862,"ĠTypography":46863,"Ġintentional":46864,"sit":46865,"ĠAnimals":46866,"Ġcountryside":46867,"Ġuart":46868,"}\\\"":46869,"Ġseamless":46870,"¾ç¤º":46871,"Ġautos":46872,"Ġ\"'\";Ċ":46873,"Flush":46874,"ANNOT":46875,"Ġalgebra":46876,"assoc":46877,"ĠWaters":46878,"Ġpreparations":46879,"ronym":46880,"[,]":46881,"Sans":46882,"Ġarmies":46883,"ipeg":46884,"Ġcreamy":46885,".art":46886,"etre":46887,"ĠAnimated":46888,"Ġunpleasant":46889,"emean":46890,"great":46891,"iÄħ":46892,"ĠEarlier":46893,"Ġchic":46894,"Ġpreserving":46895,"(exec":46896,"ĠInvestigation":46897,"ĉGPIO":46898,"Ġrigorous":46899,"ijo":46900,"=num":46901,"ĠtoolStrip":46902,")set":46903,"+\"&":46904,"ĠAcceler":46905,"Ġdevelopmental":46906,"isposable":46907,"Ġflawed":46908,"rene":46909,"Updating":46910,"Ġwatchdog":46911,"Ġdenominator":46912,"Ġsuburbs":46913,"Ġ...)":46914,"Ġconvictions":46915,"closure":46916,".IP":46917,"Ġtranslates":46918,".swt":46919,".Trace":46920,"Ġmettre":46921,".isEnabled":46922,"ĠEffective":46923,".toInt":46924,"Ġenchant":46925,"Ġstunned":46926,"Ġpoi":46927,"/code":46928,"adm":46929,".databinding":46930,"ĠLorem":46931,"________________________________________________________________":46932,"Ġledger":46933,"Ġcara":46934,"ĠGir":46935,"Ġwaits":46936,"Uno":46937,"Ġcwd":46938,"è¾ij":46939,"ĠTResult":46940,"Ġrejo":46941,"Ġemitted":46942,"ĠWestminster":46943,"ä¸Ģ个":46944,"nek":46945,"_Tis":46946,"Ġenact":46947,"ĉwith":46948,"orgia":46949,"Ġjue":46950,"Perform":46951,"SPATH":46952,".topic":46953,"ĠDaten":46954,"ầ":46955,"Ġsitio":46956,"_MM":46957,"\"So":46958,"bial":46959,"Ġscoped":46960,"Requires":46961,"ĠTOTAL":46962,"ĠChancellor":46963,"(contents":46964,"Ġstealth":46965,"devices":46966,"-pass":46967,"ilih":46968,"ĠMalcolm":46969,"ĠDepot":46970,"Ġconfigur":46971,"aussian":46972,"_constraint":46973,"веÑĤ":46974,"GRA":46975,"ĠRates":46976,".dataGridViewTextBoxColumn":46977,"ĠNobel":46978,"itics":46979,"Ġignorant":46980,"ĠReporter":46981,"ĠEbola":46982,"ĠShock":46983,"_relation":46984,"ĠNinja":46985,")c":46986,"Ġticker":46987,".isChecked":46988,"ĠSuppliers":46989,"ĠRapid":46990,"Levels":46991,"âĤ¬âĦ¢":46992,"ĉqueue":46993,"Ġchop":46994,"ĠUnix":46995,"reject":46996,"-calendar":46997,"(sort":46998,"ène":46999,"ercicio":47000,"Ġhect":47001,"CALLTYPE":47002,"roupon":47003,"Ġrentals":47004,"authors":47005,"{name":47006,"ĠFIFO":47007,"Ġlassen":47008,"ĠNous":47009,"Ġsnapped":47010,"Ġfertility":47011,"\"log":47012,"clicked":47013,"Ġplanting":47014,"Ġgb":47015,"/output":47016,"PEAT":47017,"Ġcategoria":47018,"Ġbach":47019,"Professor":47020,"inth":47021,"\"]čĊ":47022,"Recorder":47023,"serde":47024,"ĠTransmission":47025,"trad":47026,"Ġturbo":47027,"_VERTEX":47028,"\\Event":47029,"ilver":47030,"Ġbodily":47031,"ĠSources":47032,"Ġkillings":47033,".xrTableCell":47034,"Ġfolded":47035,"/legal":47036,"uner":47037,"ĠRifle":47038,"ĠMIDI":47039,"_SelectedIndexChanged":47040,".SizeType":47041,"ĠWebSocket":47042,"Ġseleccion":47043,"Sand":47044,"otros":47045,"Ġenvision":47046,"/etc":47047,"ĠMelissa":47048,"Spot":47049,"ное":47050,"_ARM":47051,"Attempt":47052,"ĠBI":47053,"ãģĶ":47054,"ĠDU":47055,"Ġbacklash":47056,"stride":47057,"/classes":47058,"ĠtextColor":47059,"_staff":47060,"oblin":47061,"agenta":47062,".collections":47063,"illage":47064,"'čĊčĊ":47065,"flatten":47066,"_sales":47067,"_MASTER":47068,"TW":47069,"_da":47070,"Pitch":47071,"phies":47072,"Ġzombies":47073,"ĠVERY":47074,"ĠPharmacy":47075,"ĠprogressBar":47076,"Ġhashtag":47077,"Sidebar":47078,"@stop":47079,"(pc":47080,"олж":47081,"MAKE":47082,"ĠCoron":47083,"Ġkvinner":47084,"ĠMaid":47085,"bob":47086,".titleLabel":47087,"Ġsuccesses":47088,"ĠDemocracy":47089,"ĠSurgery":47090,"Ġcougar":47091,"Ġcurso":47092,"Ġloro":47093,"istency":47094,"Senior":47095,"æk":47096,"ĠAAA":47097,"ĠBOOK":47098,"ко":47099,"WSTR":47100,"Ġ*/,Ċ":47101,"oyal":47102,".vector":47103,"ĠSPEC":47104,"SSF":47105,"Ġcompuls":47106,"ĠAppeals":47107,"ĠWinston":47108,"ĠMockito":47109,"contrib":47110,".available":47111,"entityManager":47112,"arias":47113,"_sale":47114,"_rs":47115,"Ġdecoding":47116,"Ġlocator":47117,"olith":47118,"Ġkol":47119,"Ġascii":47120,"ĠRut":47121,"/interface":47122,"ĉĉĉĉĉĉĠĠĠ":47123,"ĠNumer":47124,".flip":47125,"-del":47126,"Ġbolster":47127,"onomic":47128,"Ġzm":47129,"LG":47130,"FindBy":47131,"Ġadaptive":47132,"loo":47133,"Ġvue":47134,"(reverse":47135,"_canvas":47136,".roles":47137,"ificado":47138,"venient":47139,"\"As":47140,"ĠEntr":47141,"aligned":47142,"Ġbereits":47143,"///ĊĊ":47144,".gwt":47145,".employee":47146,"_cli":47147,"Ġanticipate":47148,"éĻIJ":47149,"Ġpik":47150,"Ġmushrooms":47151,"(tt":47152,"Ġoma":47153,"ĠSanchez":47154,"_google":47155,".Valid":47156,"ĠFileName":47157,"ivative":47158,"ked":47159,"-war":47160,"Ġmaturity":47161,"ид":47162,"Ġminer":47163,"Reducers":47164,"ĠLatLng":47165,"_STD":47166,"Digits":47167,"Calc":47168,"-upload":47169,"Ġhandic":47170,"ีà¹Ī":47171,"egrated":47172,"ĠSTM":47173,"Clients":47174,"ĠTurbo":47175,"SYNC":47176,"Ġphotographers":47177,".Out":47178,".character":47179,"BUILD":47180,".unlock":47181,"Ġarises":47182,"ĠCommands":47183,"(\"\");čĊ":47184,"_FORE":47185,";',":47186,"+\"'":47187,".Images":47188,"\"){":47189,"ĠMeyer":47190,"Ġnegatively":47191,"ĠDLL":47192,"Ġexe":47193,"Ġdeficiency":47194,"Ġwildly":47195,"-switch":47196,"construction":47197,"Ġexceptionally":47198,"ĠLiz":47199,"/java":47200,"Ġtheirs":47201,"ĠContemporary":47202,"lis":47203,".fillRect":47204,"ĠNFC":47205,"Ġrehe":47206,"(numbers":47207,"Ġraster":47208,"Ġfiguring":47209,"Ġshowc":47210,"ĠJill":47211,"Ġarcade":47212,"ĠConstructs":47213,"mdl":47214,"('|":47215,"Ġidentifiers":47216,"Ġstellar":47217,"(Connection":47218,"Ġ\"{{":47219,"yor":47220,"(mysqli":47221,"Ġdove":47222,"OfBirth":47223,".disconnect":47224,"_hi":47225,"Ġzwischen":47226,"ĠGrund":47227,"iros":47228,"_Array":47229,".onclick":47230,"ansom":47231,"Answers":47232,"ĉremove":47233,"Fa":47234,"Ġhurry":47235,"-inf":47236,"ĠgetClass":47237,"ĠRegulation":47238,"ĠFLAGS":47239,"misc":47240,"Ken":47241,"_heading":47242,"GHz":47243,"-entry":47244,"Ġbiography":47245,"Sig":47246,"-mf":47247,"Watcher":47248,"âĢľA":47249,"}px":47250,"Ġspicy":47251,"_sq":47252,"Lost":47253,"(track":47254,"али":47255,"Descending":47256,"((":47453,"survey":47454,"Ġíĺ":47455,"...')Ċ":47456,"ĠDivider":47457,"osl":47458,"_CANCEL":47459,"_prepare":47460,"stin":47461,"ĠHeath":47462,".PrimaryKey":47463,"ĠâĨIJ":47464,"ĠLocalDateTime":47465,"Ġcooperative":47466,"Learning":47467,".enqueue":47468,"Ġgoog":47469,"ĠRegression":47470,"imates":47471,"Ġvoyeur":47472,"ĠDrink":47473,"plug":47474,"Ġlender":47475,"mana":47476,"Ġpersonnes":47477,"ypse":47478,"Ġunlink":47479,"ĠRavens":47480,"Ġhurd":47481,"Ġperiodically":47482,"ARGS":47483,"ĠGH":47484,"characters":47485,"...\"ĊĊ":47486,"-establish":47487,"Ġdn":47488,"(condition":47489,"ĠGravity":47490,"Ġestas":47491,"_focus":47492,"Creature":47493,"(site":47494,"Ġcarr":47495,"ĠRL":47496,"ĠRI":47497,"ĠMoto":47498,"ASF":47499,"ĠLuckily":47500,"ĉRoute":47501,"Ġentropy":47502,"(\",\"":47503,"Collect":47504,"(contact":47505,"ĠFlorence":47506,"Ġpremiums":47507,"Ġlifecycle":47508,"Ġbans":47509,"xef":47510,"WebKit":47511,"ĠFloating":47512,"Ġcosa":47513,"Specific":47514,"ĠLoans":47515,"bread":47516,"Ġdescriptors":47517,"Ġ{:.":47518,"THREAD":47519,"ĠTrent":47520,"Ġscop":47521,"QA":47522,"ĠAntar":47523,"pel":47524,"_difference":47525,"_changes":47526,"(...)":47527,"ĠRotation":47528,"ĠLGPL":47529,"ĠJUST":47530,"(Task":47531,"_subset":47532,"ĠTRANS":47533,"åĬĽ":47534,"ĠScout":47535,"-popup":47536,"Ġsmoked":47537,"_Class":47538,"Ġturnover":47539,"brakk":47540,"ĠRocky":47541,"tas":47542,".RegularExpressions":47543,"ĠElliott":47544,"ĠSpinner":47545,"DUCTION":47546,"Ġlibre":47547,"Ġmolto":47548,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ":47549,"ĠFTP":47550,"mpeg":47551,"(features":47552,"Ġbald":47553,"ĠVid":47554,"Ġshouting":47555,"Lint":47556,"Ġsockets":47557,"Ġprow":47558,"Ġnouvelle":47559,"iscard":47560,"ĠSponsor":47561,"Ġconsulta":47562,")));":47563,"Indian":47564,"ĠRaspberry":47565,"Ġteammate":47566,"ĠJWT":47567,"ĠGhana":47568,"Ġcakes":47569,"primer":47570,"forma":47571,"ergarten":47572,"_Manager":47573,"Ġpreseason":47574,"GAME":47575,"|\"":47576,"ĠBrock":47577,"Ġoccupy":47578,"Ġdecorations":47579,"ánd":47580,"Ġcot":47581,"Ġparan":47582,"Disk":47583,"remain":47584,">?":47585,"Strong":47586,"Ġfrance":47587,"ĠEra":47588,"-cr":47589,".BufferedReader":47590,"ĠParadise":47591,"ĠVAT":47592,"ĠAnders":47593,"Ġlimb":47594,"ampoo":47595,"Ġimperative":47596,"UTILITY":47597,"ĠRecognition":47598,"Ġragazze":47599,"Ġpops":47600,"ypress":47601,"Ġembargo":47602,"//{Ċ":47603,"Ġsyll":47604,"PTR":47605,"åŃĺåľ¨":47606,"Ġdidnt":47607,"Mailer":47608,"Ġacademics":47609,"ĠFrauen":47610,"neider":47611,"-rel":47612,"Ġrainbow":47613,"(In":47614,"Ġsliced":47615,"=============Ċ":47616,"(send":47617,"NSMutableDictionary":47618,"vos":47619,"(package":47620,"Ġordinance":47621,"viewer":47622,"ĠSantos":47623,"-selling":47624,"Ġgov":47625,"ettle":47626,"Ġfounders":47627,"Ġwaking":47628,"slashes":47629,"-pound":47630,"recht":47631,"ات":47632,".onClick":47633,"Ġnord":47634,"ständ":47635,"_when":47636,"UTERS":47637,"icc":47638,"Ġcapsule":47639,"ĠWid":47640,"Marc":47641,"ุ":47642,"rored":47643,"UGE":47644,"LOUD":47645,"ĠAudit":47646,"ipients":47647,"opian":47648,"ĠSue":47649,"Ġwurden":47650,".Helpers":47651,"Ġfactions":47652,"[np":47653,"-than":47654,"Ġreco":47655,"Ġkas":47656,"Ġcmds":47657,"/network":47658,"xbf":47659,"getColor":47660,"Ġbiased":47661,"ĠLak":47662,"Datas":47663,"vents":47664,"Ġë²":47665,"_PS":47666,".Validate":47667,"Invoker":47668,"Ġneuen":47669,"Ġjuvenile":47670,"VISION":47671,"Ġdevote":47672,"Ġlinha":47673,"Ġdiscounted":47674,"\\Config":47675,"Ġworthwhile":47676,"Ġskinny":47677,"ĠCourses":47678,"leys":47679,"ĠMortgage":47680,"Kevin":47681,"Ġannounces":47682,"])*":47683,"reservation":47684,"Ġæķ°":47685,"Ġprejudice":47686,"ĠStringComparison":47687,"Ġbeard":47688,"-win":47689,"ĠSão":47690,"ĉms":47691,"jal":47692,"ĠEarn":47693,"_ports":47694,"ĠNombre":47695,"_COR":47696,"ĠBUILD":47697,".sound":47698,"Yellow":47699,"Ġlinebacker":47700,"Ġcharitable":47701,"jug":47702,"_NONNULL":47703,"ĠDental":47704,"\">${":47705,"ĉmatch":47706,"Russian":47707,"Ġversch":47708,"Ġpinned":47709,"Ġadopting":47710,"OptionsMenu":47711,"Pag":47712,"Ġpairing":47713,"Ġtread":47714,"ercises":47715,"ĠSpread":47716,")i":47717,"ĠBAD":47718,"_tf":47719,"UIImageView":47720,"populate":47721,"bab":47722,"ĠÏĥ":47723,"[++":47724,"Ġopioid":47725,"Ġ##Ċ":47726,"dtype":47727,"ĠStarts":47728,"('/')":47729,"Ġpersonals":47730,"-market":47731,"Ġredundant":47732,"ĠEssential":47733,"Ġscrapy":47734,"Ġим":47735,"acl":47736,"Ġcrear":47737,"ĠBend":47738,"Ġrelieve":47739,"-room":47740,"wife":47741,"ĠvÃł":47742,"ĠQPoint":47743,"Ġquasi":47744,"ĠmethodName":47745,"\\xc":47746,"ĠPeru":47747,"/The":47748,".orm":47749,"Ġviz":47750,"/pdf":47751,"Located":47752,"Ġconfrontation":47753,"ĠChampionships":47754,"Ġhypert":47755,"Ġdj":47756,"ĠUserInfo":47757,"ĠåĪĽå»º":47758,"\\xb":47759,"(sim":47760,"Ġ==Ċ":47761,"Ġstaging":47762,"Ġdrastically":47763,"åѦ":47764,"lords":47765,".less":47766,"ведиÑĤе":47767,"ĠBucket":47768,"ĠMam":47769,".term":47770,"_pi":47771,"czy":47772,".pub":47773,"precio":47774,"ĠVirt":47775,"Ġroman":47776,"itat":47777,"Lex":47778,"_infos":47779,"İ":47780,".other":47781,"VELO":47782,"Ġponder":47783,"Ġhanno":47784,"(Page":47785,"doi":47786,"Ġpolite":47787,"Ġprogrammer":47788,"Dies":47789,"$d":47790,"Ġreplication":47791,"addColumn":47792,"frican":47793,"Ġleng":47794,"beer":47795,"oit":47796,"Ġwasting":47797,"ylim":47798,"measure":47799,"Neg":47800,"Ġpartie":47801,".console":47802,"ĠGuinea":47803,"TEL":47804,"_fact":47805,".chunk":47806,"Ġlent":47807,"Ġaller":47808,"Ġà¤ķ":47809,"_idle":47810,"Ġadmissions":47811,"JSONArray":47812,"Ġvibration":47813,".helpers":47814,"å¤ĸ":47815,"Ġhen":47816,"john":47817,"ĠìĥĿ":47818,"Ġjudgement":47819,"Ġgeen":47820,"terra":47821,"^{":47822,"ĠIz":47823,"Ġcâ":47824,"instances":47825,"Ġthreatens":47826,"Ġmüssen":47827,"KindOfClass":47828,"Ġstorytelling":47829,"_demo":47830,"rias":47831,"Privacy":47832,"hift":47833,"ĠYi":47834,"esor":47835,"íķł":47836,"ensitivity":47837,".Writer":47838,"à¸Ĥ":47839,"District":47840,".getJSONObject":47841,"Impro":47842,"(getResources":47843,"ĠSPELL":47844,"roduce":47845,"Ġslowed":47846,"Ġlinewidth":47847,"Ġhonesty":47848,"ĠCoord":47849,"ĠFork":47850,"ĠDispatchQueue":47851,"ĠCliff":47852,"ĠWiring":47853,"_TIMESTAMP":47854,"ollah":47855,"avoid":47856,"++];Ċ":47857,"semantic":47858,"-css":47859,"Ġveto":47860,"ĠMerr":47861,"Ġlegislators":47862,"CEEDED":47863,"Ġquestionnaire":47864,"ĠPills":47865,"Calculate":47866,"(core":47867,"'e":47868,"Ġdislike":47869,"ĠPreferences":47870,"_EXTERNAL":47871,"è°ĥ":47872,"Ġdodge":47873,"æľįåĬ¡":47874,".names":47875,".drawImage":47876,"_prom":47877,"uckland":47878,"Ġ<$>":47879,"ız":47880,"/site":47881,"项":47882,"rophe":47883,"Ġcompelled":47884,"Ġlaptops":47885,"Ġuni":47886,"CLOSE":47887,"Ġcasualties":47888,"ĠUniform":47889,"Terminal":47890,".\",\"":47891,"DAT":47892,"(TreeNode":47893,"ĠGandhi":47894,"(stmt":47895,"AXB":47896,"*M":47897,"Ġumbrella":47898,"animal":47899,"Ġgrpc":47900,"Ġwhereby":47901,"Ġfloats":47902,"ĉarg":47903,"Ġdbg":47904,"Ġexceeding":47905,"EventType":47906,".SaveChangesAsync":47907,"Ġ{{{":47908,"Ġowed":47909,"ahrenheit":47910,"Ġì§":47911,"Ġequipo":47912,"urai":47913,"Ġidol":47914,"]\")Ċ":47915,"_major":47916,"Ġentirety":47917,"ingerprint":47918,"ços":47919,"/account":47920,"ĉright":47921,"ursos":47922,"ĠEDT":47923,"_INSERT":47924,"Ġshining":47925,"Ġ<:":47926,"EdgeInsets":47927,"Ġcolonies":47928,".IM":47929,"ĉĠĉ":47930,"ROAD":47931,"CCCC":47932,"placing":47933,"ĠgetActivity":47934,"emacs":47935,"'%(":47936,".clicked":47937,"ĠThem":47938,"isia":47939,"Buscar":47940,".rename":47941,"Ġoath":47942,"Ġafterward":47943,"ĠUFO":47944,"APS":47945,"ĠJacksonville":47946,".some":47947,"Confirmed":47948,".scan":47949,"igInteger":47950,"Decorator":47951,"shield":47952,"ressive":47953,".did":47954,"请è¾ĵåħ¥":47955,"Ġshutter":47956,"Dam":47957,"Ġparenting":47958,"eyed":47959,"$item":47960,"-develop":47961,"Ġextracts":47962,"Ġdecentralized":47963,"ĠElsa":47964,"_spin":47965,"])+":47966,"-initial":47967,"Ġmultitude":47968,"Ġsensory":47969,"ĠMODEL":47970,"Ġsafeguard":47971,"ì¹":47972,"Ġhunters":47973,"ĠTiny":47974,"INO":47975,"decorate":47976,"ĠNoSuch":47977,"Ho":47978,"(Response":47979,"Ġruler":47980,"ĉshort":47981,"Ġcaster":47982,"ĠclientId":47983,"Ġpdb":47984,"ëıĦ":47985,"itic":47986,"ĠGameState":47987,"ĠnewItem":47988,")ĊĊĊĊĊĊ":47989,"ouis":47990,"noc":47991,".BLACK":47992,"_VECTOR":47993,"----------();":48281,".getP":48282,"anye":48283,"Ġneuron":48284,"ifold":48285,"ĠKnown":48286,"Bitcoin":48287,"Anyway":48288,"ayette":48289,"Ġ'['":48290,"Ãłnh":48291,"mgr":48292,"Ġcorrelated":48293,"Ġnause":48294,"Ġmentality":48295,"hasMany":48296,"ĠFG":48297,"ampie":48298,"ITU":48299,"Fs":48300,".Sp":48301,"_between":48302,"Dependencies":48303,"oug":48304,"Placeholder":48305,"=text":48306,"ĠManaging":48307,"ocalypse":48308,"åĮĹ":48309,"_mag":48310,"fld":48311,"âij":48312,"CAM":48313,"ĠHelpers":48314,"Ġdost":48315,"/out":48316,"Ġassassination":48317,".getImage":48318,"ĠKenny":48319,".')ĊĊ":48320,"){//":48321,"ĠRanger":48322,"Ġgek":48323,"Ġsincere":48324,"čĊ":48527,".getResources":48528,"Ġlump":48529,"_consts":48530,"(ext":48531,"ĉdir":48532,"âĿ":48533,"ĠpaddingTop":48534,"Ġobsession":48535,"Ġbanning":48536,"ĠAppModule":48537,"Ġpartisan":48538,"Ġcatalogue":48539,"Ġminors":48540,"Ġpitches":48541,"weep":48542,"Ġundertake":48543,"Ġthemed":48544,"audit":48545,".scrollTop":48546,"Ġrer":48547,"Ġsymptom":48548,"Ġopenings":48549,".blocks":48550,"openid":48551,"Ġassh":48552,"-save":48553,"ĠPig":48554,"Ġregain":48555,"Ġinicial":48556,"/favicon":48557,"ĉexp":48558,"Ġspices":48559,"iska":48560,"claims":48561,"mak":48562,"definitions":48563,"Ġcorrespondent":48564,"ĠCannabis":48565,"__,Ċ":48566,"ĠLucky":48567,"ĠGaussian":48568,"ĠNearly":48569,"CAD":48570,"']]Ċ":48571,"Ġadequately":48572,"ĠTITLE":48573,"constitutional":48574,"-mm":48575,"_override":48576,"Ġblas":48577,".readyState":48578,"Ġreminis":48579,"Ġreinforced":48580,"ĠCollabor":48581,"Ġdecorating":48582,"Ġbachelor":48583,"ERRUPT":48584,"Ġupright":48585,"ipation":48586,"ĠNoble":48587,"ĠvalueForKey":48588,"ĠsetLoading":48589,".Ignore":48590,"åģ":48591,"Globals":48592,"ĠMent":48593,"ASSES":48594,"Ġlimbs":48595,"ĠHUD":48596,"inci":48597,".iv":48598,"ĠQModelIndex":48599,"Fuse":48600,"Ġpedal":48601,"_FREQ":48602,"(verbose":48603,"Ġlongitud":48604,"ĠCharter":48605,"ê·¸":48606,"Ġbundles":48607,".ignore":48608,"umbo":48609,"EMA":48610,".......":48611,"sx":48612,".Card":48613,"Ġheute":48614,"Ġsteer":48615,"jumlah":48616,"Ġ{_":48617,"_Checked":48618,"Ġfax":48619,"ĠGust":48620,"itchens":48621,"Ġ))ĊĊ":48622,"Ġremarkably":48623,"/XML":48624,"-remove":48625,"_bt":48626,"Ġincub":48627,".package":48628,".currentThread":48629,"ĠHighlander":48630,".side":48631,"splash":48632,"Ġici":48633,"=D":48634,"Ġpuck":48635,"Ġballots":48636,"Ġhugely":48637,"coeff":48638,"ĠpData":48639,".COLUMN":48640,"ĠHealing":48641,"Ġordin":48642,"!),":48643,"Ġ'',čĊ":48644,"(md":48645,"ĠSask":48646,"čĊ":48668,"Ġrá":48669,"Ġblunt":48670,"ĠImageIcon":48671,"ifik":48672,"RTC":48673,"Ġfibers":48674,"Ġtoile":48675,".sent":48676,"ĠPyQt":48677,"$app":48678,"Ġmedio":48679,"Ġgranting":48680,"Ġtslint":48681,"ĠMö":48682,"(figsize":48683,"Ġhurricane":48684,"Ġlifes":48685,"ĠÃĦ":48686,"rocessing":48687,"_standard":48688,"-option":48689,"')))":48690,"Ġvacant":48691,"å·¥":48692,"ĠHollow":48693,"handleChange":48694,"Ġdivider":48695,"ĠEngineers":48696,"Ġsvens":48697,"Ġcompliant":48698,"tanggal":48699,"ĠCredits":48700,"ĠEmirates":48701,"RuleContext":48702,"Ġrealization":48703,"Ġdistracted":48704,"]+=":48705,"Ġaugment":48706,"ĠDw":48707,"otp":48708,"orrent":48709,"Editar":48710,".stock":48711,"Study":48712,"pections":48713,"ĠGameManager":48714,"=cut":48715,"Ġflock":48716,"ĠRomans":48717,"them":48718,"-hop":48719,"Ġscreenshots":48720,"Ġ/*!Ċ":48721,"Ġconversions":48722,"Ġnormalization":48723,"(configuration":48724,"Ġaeros":48725,"_security":48726,"!'Ċ":48727,"Bonus":48728,"ĠDRIVER":48729,"ĉDate":48730,"tie":48731,"ĠWyoming":48732,"Stand":48733,"itre":48734,"Ġshoppers":48735,"Ġdisadvantage":48736,"Ġliking":48737,"ç¬ij":48738,"Ġunderstandable":48739,"SEE":48740,"Ġhoy":48741,"Ġninete":48742,"Ġconfer":48743,"Ġnowrap":48744,"ĠVern":48745,",čĊčĊ":48746,"imestep":48747,"LayoutManager":48748,"à·":48749,"ĉwait":48750,"PLETED":48751,"Japan":48752,"Ġinduce":48753,"Ġå¯":48754,"озв":48755,"_ENDPOINT":48756,".horizontal":48757,"Ġaccelerated":48758,"rimon":48759,"IVES":48760,"Transactions":48761,"Lean":48762,"ĠSOUR":48763,"whether":48764,"yg":48765,"Ġoid":48766,"ĠEntityManager":48767,"OUNTRY":48768,"Ġfila":48769,"OLUMNS":48770,"INUE":48771,"ĠAnchor":48772,"TRAN":48773,"woo":48774,"blockquote":48775,"ĠNurse":48776,"ĠCarp":48777,"Ġredeem":48778,".try":48779,"ĠJP":48780,"Ġtimestamps":48781,"Ġ?>\"><":48782,"ĠREMOVE":48783,"ĠStarbucks":48784,"Really":48785,"Ġflooded":48786,".Callback":48787,"DropDown":48788,"ipro":48789,"Ġtended":48790,"lte":48791,"Ġproportions":48792,"-te":48793,"ĠRena":48794,"licate":48795,"forces":48796,".extra":48797,".authenticate":48798,"вод":48799,"¡°":48800,"ĠforControlEvents":48801,"Ġsenha":48802,"Ġkein":48803,"Ġminist":48804,"ĠPreference":48805,"ĠTelegraph":48806,"Ñĥп":48807,"strpos":48808,"Ġillnesses":48809,"Ġpigs":48810,"ĠgetIntent":48811,"Sol":48812,"Ġ¡":48813,"(cpu":48814,"[prop":48815,"screens":48816,"');?>":48817,"ĠActs":48818,"Ġstrdup":48819,"Ġaverages":48820,"anal":48821,"ĠCasual":48822,"GroupBox":48823,"ĠHandbook":48824,"/comments":48825,"Ġnumbered":48826,"Ġbroadcasting":48827,"çĽij":48828,".nativeElement":48829,".mu":48830,"ĠupdatedAt":48831,"ĠDoesn":48832,".AC":48833,".coll":48834,"Ġrecorder":48835,"_sha":48836,"Bg":48837,"bil":48838,"Ġbolts":48839,"Ġç¬":48840,"Ġimposing":48841,"ĠInformationen":48842,"_flashdata":48843,"economic":48844,"Remark":48845,"ucas":48846,"ĠOfficers":48847,"ĠTER":48848,"Walk":48849,"Ġmercado":48850,"_generate":48851,"HY":48852,"Calling":48853,"snap":48854,"scriptId":48855,".operation":48856,"ĠFlame":48857,"liness":48858,"Ġrented":48859,"_toggle":48860,"-changing":48861,"ĠTY":48862,"'util":48863,"EEP":48864,"Ġgraphql":48865,"ĠUni":48866,"Ġimpulse":48867,".Basic":48868,"Ġenergies":48869,"MARY":48870,"ĠMarcel":48871,"Ġmortal":48872,"Ġfres":48873,"mens":48874,"motion":48875,"Ġsampled":48876,"âĢľThat":48877,"iday":48878,"quipment":48879,"getInt":48880,"ĠAbsolute":48881,",'\"":48882,"uned":48883,".share":48884,"Ġ})(":48885,"mmm":48886,"ĠRising":48887,"ä»»":48888,"Ġunemployed":48889,"xfa":48890,".follow":48891,"ĉĉĉĉĠĠĠĠĠĠ":48892,"slt":48893,".Phone":48894,"Ġknives":48895,"Ġeve":48896,"onClick":48897,"]))čĊ":48898,"ĠWitness":48899,"ĉNS":48900,"ĠEOS":48901,"ĠStefan":48902,"ĠPriest":48903,"âĢĶwhich":48904,"GetString":48905,".By":48906,"Ġupstairs":48907,"Ġdetriment":48908,"broken":48909,"embro":48910,"Ġnicotine":48911,"ilion":48912,"Ġastonishing":48913,"_aff":48914,"ĠLesson":48915,"Ġaccidental":48916,"odor":48917,"Ġdecir":48918,"ĠnewName":48919,"+.":48920,"缸":48921,"igslist":48922,"ĠGithub":48923,"Ġsuccessive":48924,"racial":48925,"Ġenviron":48926,"éªĮè¯ģ":48927,"Ġredirected":48928,"TOTAL":48929,"Ġgrabbing":48930,"ĠLance":48931,"Ġforfe":48932,"_CB":48933,"å¾®":48934,"Elapsed":48935,"_way":48936,"(DialogInterface":48937,"_measure":48938,"xbb":48939,"Dog":48940,"Depart":48941,"-src":48942,"resolver":48943,"withstanding":48944,"_shell":48945,"ĠLastName":48946,"ĠAviation":48947,"Ġbeginner":48948,"(\"%.":48949,"(tool":48950,"Ġнов":48951,":init":48952,"(API":48953,"ĠMorrison":48954,"vtColor":48955,"Ġstaple":48956,"/INFO":48957,"Ġsupernatural":48958,"Ġsteak":48959,"timeline":48960,"zzle":48961,"\"`ĊĊ":48962,"Secondary":48963,"ĠNepal":48964,".StringUtils":48965,"Ġadam":48966,"Ġ(...":48967,"Ġsubstitution":48968,"Ġboarding":48969,"ĠKeyword":48970,"ĠAssault":48971,"dbcTemplate":48972,"ĠorderId":48973,"(engine":48974,".assertThat":48975,"ĠVenus":48976,"Ġhomicide":48977,"ĠAval":48978,"Ġgutter":48979,"ĠSupported":48980,"/part":48981,"Ġacclaimed":48982,"Histor":48983,"Ġmeses":48984,"über":48985,"ĠRenew":48986,"Ġgras":48987,"ĠEk":48988,"Ġinfile":48989,"indy":48990,".music":48991,".Scroll":48992,"ĠAges":48993,"ĠNaruto":48994,"ĠGather":48995,"Ġconfirming":48996,"=(\"":48997,"Ġpitched":48998,"oley":48999,"France":49000,"+'\"":49001,"$total":49002,"Ġonde":49003,"Ġditch":49004,"_sigma":49005,"Ġcontinuity":49006,"reward":49007,"-load":49008,"Ġproceso":49009,"Locked":49010,"staw":49011,"Ġspinal":49012,"lazy":49013,"!==":49014,"jest":49015,"Ġdun":49016,"ĠRodgers":49017,"ĉgrid":49018,"Ġlogos":49019,"ĠBengal":49020,".super":49021,"Provides":49022,"Ġnutrient":49023,".Timestamp":49024,"IZATION":49025,"åĨĮ":49026,"Ġfats":49027,"ĠXxx":49028,"ctica":49029,"Targets":49030,"Ġcontours":49031,"Ġreordered":49032,":Array":49033,"Ġtolerate":49034,"Vir":49035,"Ġterribly":49036,"Ġbricks":49037,"(&_":49038,"hb":49039,"Portal":49040,"ĠBread":49041,".which":49042,"ÂŃt":49043,"asInstanceOf":49044,"Ġjobject":49045,"ĉlength":49046,"_MT":49047,";\">čĊ":49048,"_EXIST":49049,"Ġmaternal":49050,"REL":49051,"Ġê²½ìļ°":49052,"hee":49053,"Ġlayouts":49054,"ĠLap":49055,"aisy":49056,"Ġstumbled":49057,"ĠUIG":49058,"ĠSco":49059,"Ġimpaired":49060,"RESSED":49061,"Ġabuses":49062,"VF":49063,"ARB":49064,".NAME":49065,"rch":49066,"primir":49067,"_completed":49068,"Ġpenny":49069,"Chrome":49070,"(begin":49071,"ernen":49072,"-checkbox":49073,"PlainOldData":49074,"ĠLPC":49075,"rade":49076,"spir":49077,"Ġconceived":49078,"Tips":49079,"ĠIoT":49080,"ĠGan":49081,"èģĶ":49082,"Ġbiases":49083,"Ġconsultants":49084,"pled":49085,"_ht":49086,"associated":49087,"],ĊĊ":49088,"Ġdelightful":49089,"ĠÑĤек":49090,"Helvetica":49091,"(load":49092,"-expand":49093,"_WIDGET":49094,"toa":49095,"ĠAkt":49096,"Ġomn":49097,"Ġclauses":49098,"Intel":49099,"*/}Ċ":49100,"_registration":49101,"ĠoldValue":49102,"Ġrestoring":49103,"Ġunreal":49104,"OVER":49105,"ĉĊĉĊĉĊ":49106,"ATS":49107,"_probe":49108,"Ġdivisor":49109,".updateDynamic":49110,"å¹³":49111,"Produces":49112,"stamp":49113,".jboss":49114,"ĉtask":49115,"!(:":49116,"Ġpsychic":49117,"@class":49118,"Martin":49119,"ĠPassed":49120,"clarations":49121,"hel":49122,"аÑĩ":49123,"ĉcopy":49124,"-bin":49125,"zan":49126,"igram":49127,"াà¦":49128,"(sig":49129,"ĠCaval":49130,"_##":49131,"Ġ%=":49132,"outlined":49133,"ĠAcid":49134,"Ġunpredictable":49135,"-dashboard":49136,"HexString":49137,"+c":49138,".Public":49139,"ẩ":49140,"Ġconveyor":49141,"ĠEB":49142,"Ġselects":49143,"Ġknocking":49144,"ĠCec":49145,"IBUTES":49146,"owaÄĩ":49147,"gatsby":49148,"*v":49149,"entropy":49150,"Ġdispatched":49151,"Ġcamel":49152,"ĠSaturn":49153,"Ġoverweight":49154,"(phone":49155,"parable":49156,"%B":49157,"_vectors":49158,"Ġbrewing":49159,"ĠTk":49160,"ĠDownloads":49161,"ĠSaved":49162,".Price":49163,"Ġcurved":49164,"ĠParenthood":49165,"è¶":49166,".pnl":49167,"pletely":49168,".Day":49169,"Ġadvertisers":49170,"Ġejec":49171,"Ġprzed":49172,"ë¯":49173,"!';Ċ":49174,"ĠKush":49175,"ĠTAB":49176,"Ġquests":49177,"Ġcoincidence":49178,"ummies":49179,"ĠKashmir":49180,"ĠEthics":49181,"_growth":49182,"Ġaktiv":49183,"Ġgrouping":49184,"å¢ŀ":49185,"_truth":49186,"åIJ¬":49187,"todos":49188,"iset":49189,"TexCoord":49190,"ätt":49191,"ĠZur":49192,"roys":49193,"_MAGIC":49194,"Ġbrewery":49195,"(State":49196,"ĠSMALL":49197,"ĠPlants":49198,"itbart":49199,"eacher":49200,"ĠAdelaide":49201,"Lu":49202,"Ġfick":49203,"undles":49204,"_loaded":49205,"ие":49206,"Poll":49207,"ritic":49208,"ELY":49209,"Ġ+'":49210,"ĠProfession":49211,"Ġstamps":49212,"ĠSew":49213,"scrollView":49214,"Ġcommunist":49215,"/problems":49216,"}čĊčĊčĊčĊ":49217,",o":49218,"Ġudp":49219,"Ġobese":49220,"approve":49221,"ancellation":49222,"_Game":49223,"ĠHashtable":49224,"adaptiveStyles":49225,"Ġpossesses":49226,".matcher":49227,"functional":49228,"Mrs":49229,"ĉsave":49230,"ĠDbType":49231,"Ġken":49232,"getContext":49233,"Ġmans":49234,"(rel":49235,"ĠBrotherhood":49236,")`Ċ":49237,"è§£":49238,".Information":49239,"OutOfRangeException":49240,"ĠSek":49241,"Cas":49242,"Ġbloggers":49243,"Either":49244,"(\"\"\"":49245,"Ġpinch":49246,"Ġcoarse":49247,")p":49248,"ĠPulse":49249,"Ġlearnt":49250,"Ġdentist":49251,"Ġonchange":49252,"Ġdirectives":49253,"(actions":49254,"nyder":49255,"ĠShir":49256,"Trait":49257,"_dep":49258,"ĠPET":49259,"ĠREP":49260,".AppSettings":49261,"cuador":49262,"idenav":49263,"Ġenvi":49264,"Ġslammed":49265,"ĠShoot":49266,"ĠdateFormat":49267,".joda":49268,"veys":49269,"Ġ).ĊĊ":49270,"Ġcareg":49271,"ĠParallel":49272,"_translation":49273,".functions":49274,".obs":49275,"RuntimeException":49276,"[]=":49277,"overview":49278,"ĠSchl":49279,"Ġnoisy":49280,"ĠOnPropertyChanged":49281,"Sending":49282,"Ġunfamiliar":49283,"Upon":49284,"ĠPrints":49285,".typ":49286,"Ġfleeing":49287,"ĉmove":49288,"(Un":49289,"Ġqr":49290,"׾":49291,"_beta":49292,"Ġskies":49293,"ĉme":49294,"WND":49295,"Ġstickers":49296,"blas":49297,"Ġinserts":49298,"Ġverses":49299,"ĠDew":49300,"Ġtangible":49301,"Ġhecho":49302,"POL":49303,"Ġteardown":49304,"omnia":49305,"IBE":49306,".cover":49307,"_strategy":49308,"^-":49309,"setPosition":49310,"uale":49311,"Signed":49312,"Ġiface":49313,"aseline":49314,".setTime":49315,"ĠMineral":49316,"ĠFighting":49317,"skins":49318,"Ġdiscrimin":49319,"Ġdansk":49320,"ĠPrinceton":49321,"acist":49322,"Ġ());Ċ":49323,"tracks":49324,"imonial":49325,"adecimal":49326,"EPROM":49327,"uggle":49328,".Notification":49329,"$mail":49330,"cantidad":49331,"ĠJung":49332,"Ġseekers":49333,"Ġplausible":49334,"tier":49335,"еж":49336,"Ġrapper":49337,"ĠMana":49338,"ĠHttpStatusCode":49339,"Ġburnt":49340,"loses":49341,"ĠFoto":49342,"ĠJsonObject":49343,"Instagram":49344,"Ġsyscall":49345,"Ġrealities":49346,"ĠMATLAB":49347,":^{Ċ":49348,"TERM":49349,"ĠCbd":49350,"ĠParagraph":49351,"Ġtravés":49352,"Ġconstructing":49353,"Ġswal":49354,"Ġpige":49355,"LLLL":49356,"-existing":49357,"Gets":49358,"Ġmelted":49359,"Ġmitigate":49360,"Hen":49361,"Ġhm":49362,"imas":49363,"ĠAo":49364,"ĠPerez":49365,"ĠDAL":49366,"Ġëĭ¤":49367,"Ġdivis":49368,"StoryboardSegue":49369,"ĠModify":49370,"ĠÃľber":49371,"_OVERRIDE":49372,".pem":49373,"untos":49374,"Ġespañ":49375,"Ġ{?":49376,"ĠPAY":49377,"_ipv":49378,"ĠFury":49379,"__.__":49380,"elow":49381,"-centered":49382,"checks":49383,"_Reg":49384,"-Javadoc":49385,"ĉload":49386,"ĠLikewise":49387,"اÙħ":49388,"UNE":49389,".sem":49390,"xcb":49391,"ĠCave":49392,"_sleep":49393,"Ġsilently":49394,"ĠExtreme":49395,".ToUpper":49396,"ĉCHECK":49397,"Ġcue":49398,"ĠQByteArray":49399,"Ġcorrupted":49400,"ĠDé":49401,"Ġimped":49402,"GetName":49403,"Ġinaccurate":49404,"Ġsober":49405,"ее":49406,"Ġbarcode":49407,"--){Ċ":49408,"inki":49409,"Ġép":49410,"Ġdri":49411,"ĠALT":49412,">>>>>>>>":49413,"onta":49414,"[L":49415,"Ġinteres":49416,"verting":49417,"Ġdiagnostics":49418,"pdev":49419,"è©":49420,"ĠIntegrated":49421,").'":49422,"_gc":49423,"$text":49424,".games":49425,"ĠTerra":49426,"'Re":49427,".transfer":49428,"_FIFO":49429,"getModel":49430,"Ġbland":49431,"ĠColeman":49432,"Ġprimes":49433,"ĠæĪ":49434,"Ġcrosses":49435,"nk":49436,"GING":49437,"Ġ'^":49438,"ĠBlob":49439,"Ġintercourse":49440,"ĠBlvd":49441,"Ġweighs":49442,"_regular":49443,"ĠPerth":49444,"Ġseparating":49445,"Ġbilled":49446,".tabControl":49447,"Ġpuppet":49448,"Ġutilization":49449,"Ġâĸł":49450,"Ġsucces":49451,"Ġlamps":49452,"_proj":49453,"Eric":49454,"Ġrenovation":49455,"ĠFamilies":49456,"ĠBits":49457,"partials":49458,"-Men":49459,"solution":49460,"Ġdwarf":49461,".INTEGER":49462,"ĠLOCK":49463,".ct":49464,"Ġexcerpt":49465,"ĠPix":49466,"ĠFirstName":49467,"ANTED":49468,"ĠAdmir":49469,"-help":49470,"Prior":49471,"ĠAlign":49472,".INSTANCE":49473,"LineEdit":49474,"('/:":49475,"Ġinet":49476,"odus":49477,".pkl":49478,"ĠKY":49479,"upert":49480,"Ġnerves":49481,"_gradient":49482,"}','":49483,"_unref":49484,"Ġsaturated":49485,"ĠConnected":49486,"ĠFN":49487,"EXIT":49488,"Ġteleport":49489,"Ġavait":49490,"PageRoute":49491,"Ġdivorced":49492,"(lang":49493,"fst":49494,"ĠTyr":49495,"Ġmessenger":49496,"ifstream":49497,"XS":49498,"ĠBanking":49499,"Ġinfectious":49500,"ĠMons":49501,"_LOOP":49502,"Ġzurück":49503,"Ġobtener":49504,"/repos":49505,"Vel":49506,"acro":49507,"ĠuserRepository":49508,"styleType":49509,"ĠSRC":49510,"VMLINUX":49511,"recursive":49512,"/bar":49513,"_chip":49514,"ominated":49515,"ĠNit":49516,"âĢĶto":49517,"ĠBuddh":49518,"омеÑĢ":49519,"ĠMAG":49520,"ĠCHE":49521,"_den":49522,".raises":49523,"_degree":49524,"Ġpumpkin":49525,"_templates":49526,"_MEDIA":49527,"ĠTimeline":49528,"Ġbots":49529,"ObjectType":49530,"Ġbuys":49531,".posts":49532,"CAL":49533,"waiting":49534,"ĠDaniels":49535,"Ġdabei":49536,"ĠSigma":49537,"ilor":49538,"igel":49539,",W":49540,"ADS":49541,"(panel":49542,"ì²´":49543,"itating":49544,".palette":49545,"Ġmosquito":49546,"Ġtego":49547,"(parseInt":49548,"Ġdespués":49549,"promise":49550,"Ġwij":49551,"typescript":49552,"ĠTv":49553,"_IDENTIFIER":49554,").ĊĊĊ":49555,"_flat":49556,"itsu":49557,"USR":49558,"experience":49559,"-fit":49560,"phinx":49561,"_thresh":49562,"Ġideally":49563,"ĠFreeman":49564,",DB":49565,"_rw":49566,"çŃī":49567,"Ub":49568,"_statistics":49569,"=\"\"><":49570,"Ġchore":49571,"Ġyork":49572,"installed":49573,"Additionally":49574,"Ġpstmt":49575,"ylko":49576,"::Ċ":49577,"Forest":49578,"Ġheadset":49579,"Ġgallon":49580,"ÑĢем":49581,"Ġwithdrawn":49582,"ĠCandidate":49583,"Ġmelting":49584,"Ġfreezer":49585,"Ġhl":49586,"_HELP":49587,"mime":49588,"(/*":49589,"Ġthirst":49590,"$return":49591,"memberof":49592,"еб":49593,"ĠHttpServletRequest":49594,"(ob":49595,"_Result":49596,"Ġasserted":49597,"Ġfulfilling":49598,"Ġstretches":49599,"parated":49600,"-funded":49601,"ĠåĽ":49602,"ingles":49603,"_ca":49604,".condition":49605,"ĠDisplays":49606,"Ġorang":49607,"ĠCRE":49608,"ĠglBind":49609,"ĠSelector":49610,"/type":49611,"ĠAlexa":49612,"chedules":49613,"ĠPeninsula":49614,"Ġparity":49615,"ĉdest":49616,"ĠDoors":49617,"čĊĉčĊ":49618,"_dimension":49619,"Ġaload":49620,".StoredProcedure":49621,"(paren":49622,"ĠBurke":49623,"')]Ċ":49624,"-engine":49625,"Ġquir":49626,"ĠHybrid":49627,"ĠDoe":49628,"Ġoutlines":49629,"ĠTrends":49630,"_NV":49631,"periments":49632,"ĠHin":49633,"?',":49634,"ĉText":49635,"FUL":49636,"Ġsmells":49637,"Ġslick":49638,"Ġmiserable":49639,"ĠArrayAdapter":49640,"ĠparamString":49641,"Hom":49642,"_literals":49643,"usuarios":49644,"Ġprompting":49645,"_lazy":49646,"ĠActivation":49647,"_oc":49648,"Weak":49649,"Ġanecd":49650,"ĠUCLA":49651,"=re":49652,"issement":49653,"ĠEscorts":49654,"Excellent":49655,"ĠPause":49656,"Ġrepositories":49657,"TOR":49658,"ariate":49659,"_iso":49660,"updates":49661,"halb":49662,"udiante":49663,"ë¡Ŀ":49664,"Ġnaive":49665,"ĠPeg":49666,"ĠLounge":49667,"ARGIN":49668,"(bin":49669,"OnClickListener":49670,"ĠFAILED":49671,"Ġlite":49672,"Ġdzie":49673,"ĠLiteral":49674,"ivor":49675,"fcntl":49676,"Ġeats":49677,"Ġqed":49678,"Unlock":49679,"riding":49680,"undai":49681,"=M":49682,"ATTER":49683,"ConfigureAwait":49684,"icias":49685,"ustomed":49686,"Ġsuccession":49687,"endTime":49688,"ĠJupiter":49689,"Ġjudging":49690,"dration":49691,"_docs":49692,".mo":49693,"Ġeducators":49694,"ĠVine":49695,"Cond":49696,"[out":49697,"qb":49698,"\\Validator":49699,"Ġmeanings":49700,"Ġpresently":49701,"Ġdividing":49702,"ottenham":49703,"ascular":49704,"Ġtrailers":49705,"ĠCLOSE":49706,"ами":49707,"âĢĻai":49708,"ĠGain":49709,"wor":49710,"Ġplanner":49711,"Ġdistributing":49712,"vat":49713,"months":49714,"xlabel":49715,"HF":49716,"Viol":49717,".BASELINE":49718,"еÑĤÑģÑı":49719,"ĠRotate":49720,"Ġtxn":49721,":bold":49722,"Ġbloss":49723,"Forgery":49724,"(embed":49725,"Ġjako":49726,"sprintf":49727,"their":49728,"Ġexhibits":49729,"-static":49730,"hecy":49731,"getActiveSheet":49732,".clients":49733,"ãģį":49734,"_hide":49735,"[word":49736,"Cb":49737,"addItem":49738,"axe":49739,"_radio":49740,"alion":49741,"modifier":49742,"Ġsaturation":49743,"Ġdenom":49744,"_pixels":49745,"mess":49746,"(fl":49747,"atif":49748,"Ġsecs":49749,"Ġprostitution":49750,"Ġgrandchildren":49751,"Ġparadise":49752,"ĠFeld":49753,"_BINARY":49754,"itous":49755,"à¹Ħ":49756,"Ġflashing":49757,"-sided":49758,"Ġcontradiction":49759,"/*ĊĊ":49760,"ylabel":49761,"ĠTet":49762,"Ġadmire":49763,"reso":49764,"Ġletz":49765,"ĠSEARCH":49766,"slots":49767,"ĠRewards":49768,"ĠHog":49769,"ĠNSData":49770,"stash":49771,"Fall":49772,"ĠAmer":49773,"LinearLayout":49774,"/photos":49775,"Ġfeather":49776,"Ġ|čĊ":49777,"Downloads":49778,".StartsWith":49779,"Ġ//#":49780,"ineTransform":49781,"Ġaffid":49782,"Vtbl":49783,"ĠRogue":49784,"scribed":49785,"Ġfauc":49786,"ĠMonroe":49787,"Ġdeclares":49788,"modern":49789,"reon":49790,"aybe":49791,"PASS":49792,"fers":49793,"_MULTI":49794,"ĠMathematics":49795,"Ġsudah":49796,"_ATTACH":49797,"ĠnumberWith":49798,"ĠSolomon":49799,"jin":49800,"ografia":49801,"öl":49802,"_design":49803,"culated":49804,"ĠLuna":49805,"iesz":49806,"Ġ=>'":49807,"Ġrevelations":49808,"Along":49809,"(ed":49810,"ĠFilename":49811,"Ġylabel":49812,"Secure":49813,"Ġbusca":49814,"agnosis":49815,"_RECE":49816,"Ġoverlapping":49817,"Extent":49818,"Ġanticipation":49819,"Checks":49820,"ĠALSO":49821,"orc":49822,"ilingual":49823,"itational":49824,"Ġadvancement":49825,"ouro":49826,"ĠPredicate":49827,"å¾Ĺ":49828,"eria":49829,"ĠPierce":49830,"orio":49831,"Ġmerits":49832,"Ġpeanut":49833,".Package":49834,"ĠConduct":49835,"_SENSOR":49836,"Ġboiling":49837,"Ġintra":49838,"ĠIGN":49839,"ĠFur":49840,".Refresh":49841,"ĠReach":49842,"_decoder":49843,".Exp":49844,"ĠÑĤак":49845,"pill":49846,",Q":49847,"ĠGrill":49848,"Ġpopping":49849,".Ag":49850,"Ġproyecto":49851,"Ġmileage":49852,"Ġecological":49853,"]]);Ċ":49854,"ĠÂŃ":49855,"subplot":49856,"acad":49857,"ĠTrying":49858,"recipes":49859,"$criteria":49860,"ĠPersian":49861,"-bound":49862,"MASK":49863,"ĠGesture":49864,"Ġkk":49865,"ĠPVC":49866,"Ġprohibition":49867,"Ġcomando":49868,"ĠLOOK":49869,"Shopping":49870,"Ġdistortion":49871,"čĊ":49917,".Dependency":49918,".QueryString":49919,".Owner":49920,"Ġexpiry":49921,"Thu":49922,"(Vec":49923,"Ġhazardous":49924,"Ġrpm":49925,"APON":49926,"ĠaddTarget":49927,"sville":49928,"pNet":49929,"ĠImg":49930,"ĠTIMER":49931,".Animation":49932,"Ġbek":49933,"Ġassort":49934,"Ġlebih":49935,"ĠbodyParser":49936,"Ġvibrating":49937,"IDL":49938,"Ġbutterknife":49939,"inters":49940,"Ġpersuade":49941,"ĠLGBTQ":49942,"èĭ":49943,".soft":49944,"Ġbeams":49945,"_sur":49946,".Def":49947,"Ġlabs":49948,"ĉplt":49949,"Ġskins":49950,"Ġtransferring":49951,"Ġimaginary":49952,"_End":49953,";background":49954,"Ġlaps":49955,"_COMMENT":49956,"(SDL":49957,"onds":49958,".Record":49959,"ĠImplements":49960,"_ticks":49961,"()))ĊĊ":49962,"Ġarose":49963,"]?":49964,"ĠMp":49965,"ĠICommand":49966,"Ġsculpture":49967,"Ġcontracted":49968,"\">'":50446,"kinson":50447,"Ġкол":50448,"ognitive":50449,"_li":50450,"Ġimminent":50451,"Ġaffinity":50452,".signal":50453,"Ġnotch":50454,"ĠSteelers":50455,"maxlength":50456,"KK":50457,"ĠEugene":50458,"_PWM":50459,"roi":50460,"ĠâĹı":50461,"ĠHamburg":50462,".Must":50463,"Ġaxe":50464,"enef":50465,"Ġambitions":50466,"ĠSpecies":50467,"ĠStress":50468,"Ġawhile":50469,"ĠбÑĥд":50470,"Ġwithstand":50471,"ĠDecoder":50472,"_inventory":50473,"Ġ{ččĊ":50474,"Ġtgt":50475,"Ġrailroad":50476,"WASHINGTON":50477,"Ġnegotiated":50478,"NST":50479,"-phone":50480,",U":50481,"Ġexercising":50482,"ụ":50483,"_PIXEL":50484,"avors":50485,"iterated":50486,"Ġvampire":50487,"adal":50488,"Ingrese":50489,"Ġung":50490,"jective":50491,".cells":50492,"Ġnano":50493,"Ġmarkdown":50494,"_RULE":50495,"(events":50496,"Ġluggage":50497,"MESSAGE":50498,"igkeit":50499,"$count":50500,"AttributeName":50501,"IGINAL":50502,"_Ent":50503,"ĠBF":50504,"ĠCOMMENT":50505,"_ini":50506,"ĠEuropeans":50507,"ĠBelle":50508,"åij½":50509,")['":50510,"åºĶ":50511,"ĠUseful":50512,".reference":50513,"()\",":50514,"_grade":50515,"ĠKaw":50516,"Ġsentencing":50517,"Ġsocialism":50518,"monster":50519,"_LAYER":50520,"Ġdeepest":50521,"wk":50522,"ĠNoise":50523,"###ĊĊ":50524,"Ġpréc":50525,"otle":50526,"ÑĤе":50527,"auf":50528,"ibal":50529,"Ġconquer":50530,">Email":50531,"Ġambulance":50532,"OAD":50533,"Ġ(\"%":50534,"ĠFI":50535,".fixture":50536,"Ġterse":50537,"ĠĠĠĠĉĉĉĉ":50538,"Ġsanctuary":50539,"ugi":50540,"ĠComparator":50541,"Definitions":50542,"Ġasthma":50543,"Ġlact":50544,"Ġhardwood":50545,".clock":50546,"Ġattracting":50547,"ĠMour":50548,"(distance":50549,"icits":50550,"Ġbonne":50551,"ĠACCESS":50552,".DeserializeObject":50553,"ĠTyped":50554,"Ġjeu":50555,"ĠappId":50556,"ĠClara":50557,"ĠHF":50558,"ĠReich":50559,"ipples":50560,"//--------------------------------------------------------------------------------":50561,"_delivery":50562,"erialization":50563,"Ġplaintiffs":50564,"Scient":50565,"shopping":50566,"ĠDummy":50567,"ĠWald":50568,"GroupName":50569,"Ġinscription":50570,"elog":50571,"::::::::":50572,"_ld":50573,"BackPressed":50574,".Raw":50575,"ĠOnTrigger":50576,"Ġmuseums":50577,"ĠBeen":50578,"ĠAdventures":50579,"Ġslate":50580,"Ġlett":50581,"Ġsund":50582,"ĠGin":50583,"ĠMechanical":50584,".ship":50585,"AppComponent":50586,"Ġdestined":50587,"Ġdwelling":50588,"Profiler":50589,"Prepare":50590,"zeich":50591,"Ġsilicon":50592,"(has":50593,"Ġ#%":50594,"VIDEO":50595,"Ġcollaborate":50596,"Lin":50597,"Ġscopes":50598,"(className":50599,"(sd":50600,"andin":50601,".ham":50602,"ServiceImpl":50603,"-described":50604,"Ġirony":50605,"stial":50606,"ĠHuawei":50607,"(repo":50608,"Ġunexpectedly":50609,"ĠKai":50610,".install":50611,"\\xf":50612,"Ġexhibited":50613,"_TCP":50614,"ĠOx":50615,"_CHO":50616,"Ġprostituerte":50617,"Ġvä":50618,"Ġsito":50619,"Ġconstituents":50620,"ĠContinued":50621,"ĠSAVE":50622,"rss":50623,"/message":50624,"ubes":50625,"Ġmisdemean":50626,"Ġtaxation":50627,"Ġstoryline":50628,"hair":50629,"ĠFinds":50630,"SIG":50631,"verification":50632,"~=":50633,".hp":50634,"Iterable":50635,"Ñĭе":50636,"atori":50637,"Ġctr":50638,"Rx":50639,"_);ĊĊ":50640,"dag":50641,".pin":50642,"Ġpseud":50643,"Ġinvo":50644,"ÑģÑĤÑĢ":50645,"_pix":50646,"为空":50647,"Ġsworn":50648,"âĢĶor":50649,"_registry":50650,"Ġdisasters":50651,"ĠROI":50652,"ĠâĢķ":50653,"aktu":50654,"forest":50655,"beiten":50656,"âĢĶI":50657,"ueva":50658,"egt":50659,"Ġspikes":50660,"URES":50661,"ĠRecommended":50662,"Ġexploited":50663,"ĠFrederick":50664,"_COMPLETE":50665,"ĠDrugs":50666,"!!!!!!!!":50667,"ĠRiv":50668,"STOP":50669,"ROOM":50670,"ĠPASSWORD":50671,"Cookies":50672,".El":50673,"á»Ń":50674,"ĠBert":50675,"Ġhashed":50676,"icester":50677,"Ġdecorator":50678,"ĠqueryString":50679,":;Ċ":50680,"Ġ\"[\"":50681,"otope":50682,"-Americ":50683,"ĠMatthews":50684,"URAL":50685,"âĢľ,":50686,"Summer":50687,"fos":50688,"_CONTAINER":50689,"_ACK":50690,"Ġfiltr":50691,"_disp":50692,"_Re":50693,"Ġfacile":50694,"аÑĪ":50695,"ĠìķĬ":50696,"Ġeben":50697,"Ġsprink":50698,"ĠQuint":50699,">V":50700,"Ġhistorians":50701,"ourmet":50702,"ĠMonitoring":50703,"ledger":50704,"cott":50705,"Ġware":50706,"GGLE":50707,"cars":50708,"ĠMEDIATEK":50709,"Ġvolupt":50710,"_View":50711,"HEL":50712,"(copy":50713,"(stats":50714,"Ġchromosome":50715,"ĠCurtis":50716,"-conf":50717,"(asset":50718,"Ġhvor":50719,"FileSystem":50720,"<>();čĊ":50721,"ocoder":50722,"ĠCannon":50723,")x":50724,"ĠSmooth":50725,"ĠSAS":50726,"_ce":50727,"ĉprev":50728,"_movie":50729,"Ec":50730,"_wall":50731,".ĊĊ":51278,"ogenesis":51279,"ĠOPTIONS":51280,"uptools":51281,"Ġmilitant":51282,"Ġexited":51283,"igar":51284,"ĠCOMM":51285,"ĠDisposable":51286,"aycast":51287,"Ġrowspan":51288,"Ġsynthes":51289,"Ġsondern":51290,"ĠĊ":54769,"ĠJacket":54770,"RATION":54771,".getSelectedItem":54772,"-init":54773,"ĠRegisters":54774,"_sep":54775,"ĠToolkit":54776,".dict":54777,"Ġxlabel":54778,"\\Table":54779,"toc":54780,"_combo":54781,"ĠCompact":54782,"Ġrugged":54783,"à¥ĩà¤":54784,"-management":54785,"')}}\">Ċ":54786,"ĠStamp":54787,"ıl":54788,"rox":54789,"Ġlandscapes":54790,"_NOTE":54791,"monary":54792,"cab":54793,"Ġmoet":54794,"xaf":54795,"rcode":54796,"-cli":54797,"_gate":54798,"[event":54799,"SPORT":54800,"gia":54801,"ĠSUPER":54802,"/Login":54803,"_shutdown":54804,"interrupt":54805,"Ġpretending":54806,"Ġfringe":54807,"ĠReds":54808,"ĠCUDA":54809,"ĠUNIX":54810,"vit":54811,"Ġbrig":54812,"drv":54813,"ĠConnector":54814,"Therefore":54815,"Ġlia":54816,"Detection":54817,"_actor":54818,"Ġtempfile":54819,"Ġeccentric":54820,"-role":54821,"Ġpadx":54822,"dent":54823,"Western":54824,"Ġê·¸":54825,"ĠApplicationRecord":54826,"Ġcampaigning":54827,"_runner":54828,"ĠCivic":54829,"aleigh":54830,"Ġdirekt":54831,".sul":54832,"ĠĠĉĉĉ":54833,"anten":54834,"Ġissuer":54835,"Ġassertions":54836,"(orig":54837,"ATIO":54838,"Ġleaned":54839,"äs":54840,".DTO":54841,"explode":54842,".Observable":54843,"Ġstaggering":54844,"Ġkidnapped":54845,"Ġprogrammers":54846,"ĠInnov":54847,".parameter":54848,"Ġdomination":54849,"Ġskeptic":54850,"Ġæĺ¯":54851,"Ġavoids":54852,".Verify":54853,"ubby":54854,"ĠASN":54855,"Ġformato":54856,"ĠBeatles":54857,"_brand":54858,"Ġinset":54859,"youtu":54860,"Ġtoc":54861,"-final":54862,"Showing":54863,"ĠDoub":54864,"ĠMesa":54865,"Adj":54866,"_medium":54867,"Creates":54868,"(endpoint":54869,"ĉUP":54870,"bbie":54871,"Ġstalk":54872,".databind":54873,".Scan":54874,"agents":54875,"$,":54876,"individual":54877,"+)/":54878,"ĉvm":54879,"(notification":54880,"Ġinex":54881,"ĠClassification":54882,"reno":54883,"Ġolig":54884,"-rated":54885,"Ġformulation":54886,"',{":54887,"Ġacept":54888,"_unpack":54889,"_CA":54890,".Pow":54891,"ĉim":54892,"Ġaluminium":54893,"ANO":54894,"Ġxn":54895,"Ġcómo":54896,"ĠIngredient":54897,"Ġseizures":54898,"åħ±":54899,"ificador":54900,"Ġsiguiente":54901,"ĠInfragistics":54902,"Ġduplicated":54903,"ĠDee":54904,"Ġnø":54905,"ĠACCEPT":54906,"(crate":54907,"иÑĤелÑĮ":54908,"-less":54909,"Ġinfinity":54910,"Analyzer":54911,"-Day":54912,"ritt":54913,"(cin":54914,"ĠGy":54915,"Ġmultiplied":54916,"uchi":54917,"ĠBaldwin":54918,"/ip":54919,"Ġshortcuts":54920,".ADD":54921,"Ġvigor":54922,"_instruction":54923,"(;":54924,"_eta":54925,"è¿ŀ":54926,"utorials":54927,"Ġboosting":54928,"bv":54929,"Ġacknowledges":54930,"Listening":54931,"FAQ":54932,";b":54933,"((-":54934,"Ġarchitects":54935,"Ġzwe":54936,"Ġpuls":54937,"ĠgetCount":54938,"verbs":54939,"ãĢľ":54940,"(Collection":54941,"kre":54942,"Ġjurisdictions":54943,"_bridge":54944,"ĠCrack":54945,"ĠDifficulty":54946,"KO":54947,"Reservation":54948,"_requires":54949,"Tour":54950,"ãģĹãģŁ":54951,".setCurrent":54952,"Ġky":54953,"ĠAlbany":54954,"Ġè§":54955,"ller":54956,"agna":54957,"workers":54958,".blank":54959,"ĠPrayer":54960,"MIC":54961,"Ġresilience":54962,"TeX":54963,"ĠLanguages":54964,"study":54965,"ĉcurr":54966,"Ġenzymes":54967,"Slug":54968,"ĠíĮĮ":54969,"stral":54970,"Ġtumors":54971,"Ġsegunda":54972,"='{":54973,"instruction":54974,"ĠLisp":54975,"/info":54976,"Ġ\"{$":54977,",:),":54978,"Ġgv":54979,"(ErrorMessage":54980,"Ġ'=":54981,"}-${":54982,".Documents":54983,"\"Well":54984,"Ġreminiscent":54985,"Ġgaz":54986,"iropr":54987,"ehr":54988,"Ġsuppressed":54989,"ersh":54990,".scrollTo":54991,"Ġcadena":54992,"ĠgameState":54993,"ÃŃm":54994,"(conv":54995,"ĠTomorrow":54996,"ĠCCT":54997,"Mongo":54998,"ulg":54999,".Camera":55000,".handlers":55001,"mph":55002,"Ġstk":55003,"Ġgenetics":55004,"ACING":55005,"Trivia":55006,"ĠBam":55007,"(marker":55008,".Stretch":55009,"ĠSunni":55010,"ĠBetty":55011,".tolist":55012,"unlikely":55013,".Rectangle":55014,"obsolete":55015,"ILON":55016,"innerText":55017,"embourg":55018,"aN":55019,"ĠVehicles":55020,"unlock":55021,":utf":55022,"nob":55023,"ĠSeeing":55024,"ĠNEVER":55025,"Ġtls":55026,"Ġfilles":55027,"Ġbenefited":55028,"ĠClint":55029,"*/),":55030,".fold":55031,"Ġposible":55032,"ADED":55033,"thouse":55034,".DAL":55035,"ĠOdd":55036,"rokes":55037,"ĠSunny":55038,"ĠPartialEq":55039,"_Buffer":55040,"ĠLevi":55041,"longrightarrow":55042,"eldon":55043,"gages":55044,"_warn":55045,".CreateTable":55046,"ĠDip":55047,"_questions":55048,".logic":55049,"Ġ#\"":55050,"={()=>":55051,"Ġtep":55052,"Ġjuicy":55053,"ìĤ¬":55054,"enko":55055,"ialect":55056,"Ùī":55057,"Ġonboard":55058,"Ġæı":55059,"ĉrt":55060,"_UTF":55061,"ĠQAction":55062,"âĢŀ":55063,"(Component":55064,"(audio":55065,".hit":55066,"gte":55067,"Ġprogrammed":55068,"stateParams":55069,"Ġpolyester":55070,"fires":55071,"byss":55072,"]=(":55073,"_quality":55074,"OfDay":55075,"ĠFairy":55076,"Ġyelled":55077,"opl":55078,"(userName":55079,"ĠDifference":55080,"Ġevaluations":55081,"iffany":55082,"Ġcyclists":55083,"Ġcidade":55084,"Ġtextbook":55085,"Ġprofiling":55086,"__),":55087,"dea":55088,".activate":55089,"Ġindications":55090,"Ðķ":55091,"TouchUpInside":55092,"Ġinvaluable":55093,"ĠMASK":55094,"Ġcontend":55095,"Freq":55096,"Ġrecruits":55097,"(interval":55098,"ĠUserProfile":55099,"Ġ'./../":55100,"edu":55101,"_Callback":55102,"Ġanalogy":55103,"ĠTrophy":55104,"apphire":55105,"Videos":55106,"ĠCher":55107,"ĠHav":55108,"â̦\"":55109,".validator":55110,"gfx":55111,"ĠUObject":55112,"classnames":55113,"triangle":55114,"ĠEncoder":55115,".spy":55116,"Ġpredators":55117,"=status":55118,"-safe":55119,":\",Ċ":55120,"ĠIncluding":55121,"Ġ{};čĊ":55122,"*cos":55123,"Ġendured":55124,".sulake":55125,"Ġnursery":55126,"Ġfragrance":55127,"Ġrebuilding":55128,"Ġnth":55129,"ĠFraser":55130,".setDate":55131,"ĠVince":55132,"_REST":55133,"Ġventilation":55134,"æµ·":55135,"cribes":55136,".asm":55137,"lpVtbl":55138,"ĠAbe":55139,"uisine":55140,",array":55141,"ĉclassName":55142,"errals":55143,"Ġ'ĊĊ":55144,"Checkout":55145,"Ġsolicit":55146,"Aux":55147,"_capture":55148,"Ġribs":55149,"ragon":55150,"viol":55151,"topics":55152,"FunctionFlags":55153,"ĠMarty":55154,"bike":55155,"ĠTucker":55156,"(kernel":55157,"ĠOps":55158,"CloseOperation":55159,"/demo":55160,"ilda":55161,"ĠlÃŃnea":55162,"APPING":55163,"Ġsuites":55164,".visitVarInsn":55165,"urus":55166,"ĠMinute":55167,"(manager":55168,"Ġbutterfly":55169,"Ġapare":55170,"Ġwolves":55171,"JWT":55172,"ĠSalon":55173,"ĉdelay":55174,"-eslint":55175,"isations":55176,".rpc":55177,")|(":55178,"ĠSnapchat":55179,"/mm":55180,"MN":55181,"ceries":55182,".textAlignment":55183,"ĠFrankfurt":55184,"Ġado":55185,"(newValue":55186,"(access":55187,"(Expression":55188,"ĠSignIn":55189,"ĠHaiti":55190,"_tp":55191,".setParameter":55192,"Minute":55193,"Ġmanuals":55194,"ricanes":55195,"ĠPTR":55196,"ĠOuter":55197,"Ġgetline":55198,"ocations":55199,"_CD":55200,"ĠLyon":55201,"/gui":55202,"_live":55203,"idan":55204,".geom":55205,"ĠborderBottom":55206,"imuth":55207,"_checkpoint":55208,"Ġmeu":55209,"ĠIrving":55210,"Ġpeuvent":55211,"(MAX":55212,"ĠARCH":55213,"Ġpov":55214,".sourceforge":55215,"Ġjamais":55216,"Ġark":55217,"ĠBaghdad":55218,"ĠCLEAR":55219,"MenuBar":55220,"Ġtrois":55221,"CHEDULE":55222,"Ġ#čĊ":55223,"(Call":55224,"$order":55225,"(Material":55226,"Ġencontrado":55227,"$list":55228,"ĠMETHODS":55229,".beginTransaction":55230,"_MAG":55231,"StyleSheet":55232,"Ġmajors":55233,"Ġindefinitely":55234,"cleanup":55235,"Ġhomeland":55236,"(dto":55237,"Dates":55238,"Presentation":55239,"ĠDK":55240,"={`/":55241,"ĉKey":55242,"(Block":55243,"_checkbox":55244,"needs":55245,"ĠonComplete":55246,"rico":55247,"Ġgleich":55248,"Ġxm":55249,"OOD":55250,"Better":55251,"ĠSQLITE":55252,".Book":55253,"xad":55254,"ĠGone":55255,"ĉdp":55256,"Ġdevotion":55257,"Ġstm":55258,"Ġobsess":55259,"ĠBackend":55260,"Queries":55261,"Ik":55262,"//****************************************************************":55263,"Ġdividends":55264,".parentElement":55265,"}\")ĊĊ":55266,"ĠMaterialPageRoute":55267,":num":55268,"Ġexplic":55269,"ĠOL":55270,"least":55271,"Oops":55272,"imentos":55273,"Ġinsurers":55274,"Ġheroic":55275,"ĉfields":55276,".imgur":55277,".btnCancel":55278,"ĠDetective":55279,"(sm":55280,"ĠMutableLiveData":55281,".lab":55282,"(([":55283,"Ġhairst":55284,"ĠTransactions":55285,"å¼Ģå§ĭ":55286,"ĠstdClass":55287,"uento":55288,"GIS":55289,"_cod":55290,"Instructions":55291,"Calls":55292,"PointerType":55293,"ĠRw":55294,"Ġassortment":55295,"ĠDIG":55296,"+r":55297,"_CERT":55298,"Ġinstability":55299,"Ġvib":55300,"onas":55301,"Ġroku":55302,"apellido":55303,"Ġangl":55304,"preneur":55305,"Ġfluids":55306,"isease":55307,"Ġdeed":55308,"quist":55309,"_CONSTANT":55310,"Ġequilibrium":55311,"_delegate":55312,"ĠQuantum":55313,"rei":55314,"Capabilities":55315,"rectangle":55316,"?><":55317,"alien":55318,"ĠJug":55319,"DNA":55320,"Tickets":55321,"Occurs":55322,"ĠHawk":55323,".setHorizontalGroup":55324,"\\Collection":55325,"ffiti":55326,"Ġrearr":55327,".setVerticalGroup":55328,"Ġcavity":55329,"Ġadulte":55330,"Facade":55331,"-wh":55332,"ĠLOL":55333,"ذ":55334,"Ġgrandparents":55335,"Swift":55336,"ĉwx":55337,"æīĢæľī":55338,"ifen":55339,"ffset":55340,"Beyond":55341,"//}ĊĊ":55342,"Ġwager":55343,"Ġbury":55344,"Ġcommence":55345,"registro":55346,"scient":55347,"ĠPercent":55348,"Ġдолж":55349,"(identifier":55350,".setModel":55351,"Ġseldom":55352,"nton":55353,"Ġappliance":55354,"amus":55355,"rysler":55356,"Ġpanties":55357,"enguins":55358,"Ġmimic":55359,"ĠonChanged":55360,"Ġalcoholic":55361,".reloadData":55362,"Charge":55363,"ĠFax":55364,"ĠjScrollPane":55365,"Empresa":55366,"Ġshattered":55367,"xba":55368,"Fonts":55369,"?s":55370,"Ġpostseason":55371,"retain":55372,"_rates":55373,"ĠrequestCode":55374,".todo":55375,"´s":55376,"CHK":55377,"ĠKeeping":55378,"engeance":55379,"Ġvscode":55380,"IPPING":55381,"DefaultCloseOperation":55382,"_raise":55383,"ĠOculus":55384,"ograms":55385,"raj":55386,"pci":55387,"Ġcorrosion":55388,".handleSubmit":55389,"Accessible":55390,"ĠPiano":55391,"little":55392,"ACL":55393,"Äĩe":55394,".unwrap":55395,"ĠConvers":55396,"ĠLeben":55397,"ioneer":55398,"ĠMerchant":55399,"ĠJorge":55400,"Ġembracing":55401,"Ġventa":55402,"ást":55403,"Ġviene":55404,"Ċ":55556,"-growing":55557,"Ġdeepcopy":55558,"Ack":55559,"eggies":55560,"Ġ__(\"":55561,"Ġnoir":55562,"terrorism":55563,"Ġanthem":55564,"agency":55565,"_PACKAGE":55566,"ĠClosure":55567,".registry":55568,"Ġmammals":55569,"L":55600,"Ġbluetooth":55601,".Deep":55602,"-standing":55603,"ácil":55604,"Ġrooft":55605,"ĠPaths":55606,"_iterations":55607,"InvalidArgumentException":55608,".spi":55609,"ĠUIAlertAction":55610,"uye":55611,"signin":55612,".priority":55613,"ĠEssays":55614,"='{$":55615,"Ġè¿ĶåĽŀ":55616,"_signed":55617,".persist":55618,"Ġredesign":55619,"ToLower":55620,"ĠNewman":55621,"=start":55622,"ĠIsraelis":55623,"asiswa":55624,"Speech":55625,"Ġnumeros":55626,"handlers":55627,"ĠWong":55628,"ĠмеÑĤод":55629,"Weights":55630,"ĠGujar":55631,"teil":55632,"ĠNonetheless":55633,"_EFFECT":55634,"Ġvect":55635,"ĠOsc":55636,"Ġcoats":55637,"ĠWheat":55638,"Ġgeek":55639,"ĠPROPERTY":55640,"worm":55641,"_constants":55642,"ĠBoulder":55643,"ĠParm":55644,"cole":55645,"ĠdefaultCenter":55646,"ĠRouge":55647,":A":55648,"xcf":55649,"ĠVenice":55650,"median":55651,"Ġredemption":55652,"Fresh":55653,"Ġcosm":55654,"Ġfigur":55655,"Ġrefurb":55656,"COPE":55657,".cd":55658,"Ġchords":55659,"ĠSgt":55660,"Åį":55661,"VPN":55662,"ĠSEND":55663,"ainen":55664,"_accounts":55665,"Ġtenth":55666,"Ġdissolved":55667,"":55907,"Ġlegitimacy":55908,"Ġoo":55909,"Slinky":55910,"Ġnationals":55911,".words":55912,";p":55913,"trap":55914,"omanip":55915,"Ġcues":55916,"Ġgraduating":55917,"Ġsemaphore":55918,"\"]);ĊĊ":55919,"acey":55920,"REET":55921,"Grab":55922,"ĠFelix":55923,"(Id":55924,"_neighbors":55925,"Ġmeaningless":55926,"(del":55927,"Ġjeder":55928,"ĠContentValues":55929,".absolute":55930,"/cl":55931,"Ġxb":55932,"datum":55933,"Ġtortured":55934,"Ġrubbing":55935,"Scores":55936,"ĠðŁĺī":55937,"Ġavons":55938,"Ġamsterdam":55939,"EOS":55940,"Hal":55941,"Ġtrustworthy":55942,"#=":55943,".EXTRA":55944,"Ġmano":55945,"isicing":55946,"-support":55947,"ĉcursor":55948,"ĠSpo":55949,"aimassage":55950,"Mission":55951,"[]{\"":55952,"Ġprinters":55953,"GREEN":55954,"Ġteg":55955,"Ġabdominal":55956,"!ĊĊĊĊĊĊ":55957,".Short":55958,"азв":55959,"ĠGifts":55960,"}\")":55961,"(binding":55962,"xce":55963,"âĢij":55964,"infos":55965,"FormData":55966,"Ġdart":55967,"Ġelems":55968,"(inv":55969,"YL":55970,"tin":55971,"GENER":55972,"ữ":55973,"ĠTaken":55974,"uckle":55975,":e":55976,"Ġspectral":55977,".baidu":55978,"/');Ċ":55979,"Ġgreedy":55980,"esion":55981,",,,,,,,,":55982,"Ġ/>,Ċ":55983,"InternalServerError":55984,"NSNotificationCenter":55985,"ĠAi":55986,"Ġspit":55987,"Ġaugmented":55988,"ĠstandardUserDefaults":55989,"FINITY":55990,"Race":55991,":C":55992,"ĠRECORD":55993,"ĠHighlight":55994,"Ġ'`":55995,"Ġdeficits":55996,"Ġnei":55997,"Ġresearched":55998,"Ta":55999,"Ġcopp":56000,".GetHashCode":56001,"):čĊčĊ":56002,"OnClick":56003,"ĠWellington":56004,"Ġrevival":56005,"æ¯Ķ":56006,"éĹ®":56007,"ĠNSS":56008,"Ġforn":56009,"Ġinté":56010,"ĠKuwait":56011,"_flip":56012,"_bo":56013,"_\\":56014,"Ġoccurrences":56015,"ĠScientists":56016,"SRC":56017,"ogens":56018,"igrant":56019,"REMOTE":56020,"ĠSID":56021,".opts":56022,"uve":56023,"()])Ċ":56024,"Ġlibertarian":56025,"ĠGlide":56026,"lesen":56027,"Ġforme":56028,"owania":56029,"Ġannoyed":56030,"Defs":56031,"ĠExecutor":56032,"Ġcasts":56033,".setChecked":56034,"ĠSharing":56035,".SerializeObject":56036,"Ġselectors":56037,"_OTHER":56038,"미":56039,"(super":56040,"(OS":56041,"_VERIFY":56042,"idunt":56043,"';Ċ":56045,"Ġvidéo":56046,"ĠNegro":56047,"ĠLords":56048,"ĠTours":56049,"Ġsoftly":56050,".receive":56051,"ĠERC":56052,"ĠdataSet":56053,"Badge":56054,"ĉEvent":56055,"Ġperl":56056,"Ġ{}\\":56057,"(sentence":56058,"OrUpdate":56059,"Ġdiminish":56060,"PIN":56061,"(draw":56062,".ToDateTime":56063,".EqualTo":56064,"(pin":56065,"-pencil":56066,"luent":56067,"ĠCaller":56068,"Ġplayful":56069,"-'+":56070,"xca":56071,"swick":56072,"){}Ċ":56073,"}:${":56074,"ĠMeth":56075,".getCell":56076,".break":56077,"Ġymax":56078,"='Ċ":56291,"ĠHiro":56292,"(TRUE":56293,"asurer":56294,"Ġcuer":56295,"Uber":56296,".Operation":56297,"Ġolan":56298,"Ġthrilling":56299,"'.":56321,"ĉvalid":56322,"\"\",":56323,"Instrument":56324,">J":56325,"Ġnostr":56326,"ĠRift":56327,"_Port":56328,"Ġveces":56329,"[['":56330,"Ġrallies":56331,"-series":56332,"Ġvv":56333,".uc":56334,"Ġrtn":56335,"StateChanged":56336,"(ins":56337,"ĠCla":56338,"------------Ċ":56339,"cus":56340,"ĠReload":56341,"//------------------------------------------------------------------------------------------------":56342,".seconds":56343,"_destination":56344,"Ġscrewed":56345,">c":56346,"Thickness":56347,"Designer":56348,"Ġgrids":56349,"nÄħ":56350,"(cookie":56351,"Trip":56352,"-Mobile":56353,"Ġvoll":56354,"Ġgenital":56355,"Ġconfisc":56356,"ĠConfederate":56357,"ĠwebView":56358,"Ġmise":56359,"Ġcler":56360,"(selection":56361,"$date":56362,"Ġsharpen":56363,"ragen":56364,"AndUpdate":56365,"Ġremix":56366,"Ġhtons":56367,"RW":56368,"MPI":56369,"Ġretrieval":56370,"Ġrichest":56371,".Decode":56372,":initComponents":56373,"ĠTValue":56374,"Saint":56375,"@include":56376,"ĠPERSON":56377,".sep":56378,"ĠLDAP":56379,"gba":56380,"ĠgroÃŁe":56381,"Ġreliably":56382,"ĠDFS":56383,".getItemId":56384,"Ġprésent":56385,".getToken":56386,"Ġchinese":56387,"ĠMeal":56388,"YOU":56389,"\">>ĊĊ":56948,"bower":56949,"Ġswapped":56950,"/install":56951,"Ġsinks":56952,"etrize":56953,"Ġdeclines":56954,"ĉmysql":56955,"ĠCString":56956,"ĠMotionEvent":56957,".Language":56958,"Road":56959,"ÑĤеÑĢ":56960,"ascimento":56961,"'))->":56962,".about":56963,"(editor":56964,"ĠRatings":56965,"income":56966,"Å¡e":56967,".dequeueReusableCell":56968,"ĠAustrian":56969,"Ġsulla":56970,"ĠTribunal":56971,"ĠDidn":56972,"оваÑĢ":56973,"Ġinspections":56974,"Boss":56975,"Ġcocktails":56976,"Ġapologized":56977,"_subplot":56978,"opal":56979,"+=(":56980,"Ġresonance":56981,"ibu":56982,"Ġ리":56983,"roma":56984,"reserve":56985,"pls":56986,"ĠTah":56987,"axies":56988,"OPLE":56989,"ĠDarren":56990,"ĠZombie":56991,"_Map":56992,"Ġ])ĊĊ":56993,"ĠQi":56994,"ĠSail":56995,"Ġrestrictive":56996,"Ġerosion":56997,"-par":56998,"WHITE":56999,"Ġoldu":57000,"Ġaperture":57001,"Ġbitcoins":57002,"texto":57003,"ĠComcast":57004,"Ġtimeless":57005,"enkins":57006,"Ġfeeder":57007,"/tmp":57008,"resden":57009,"+'_":57010,".Destroy":57011,"Ġçok":57012,"ĠDOCUMENT":57013,".lng":57014,".tagName":57015,"Ġkullan":57016,"egrate":57017,"Ġ(*.":57018,"ç¼ĸè¾ij":57019,"Ġhandshake":57020,"soc":57021,"_geometry":57022,"ĠDamascus":57023,"Minor":57024,"ĠKafka":57025,"ìŬ":57026,"Florida":57027,"_compute":57028,".expr":57029,"Ġparalle":57030,"ĠDiaz":57031,"cir":57032,"[target":57033,"Ġjoking":57034,"Ġglor":57035,"(setq":57036,"_handlers":57037,"Hang":57038,"Ġferr":57039,"riminal":57040,"ĉĠĠĠĠĉĉ":57041,"enties":57042,"defines":57043,"-tax":57044,"jsonp":57045,"ĠUPS":57046,"metro":57047,"__;Ċ":57048,"ĠUganda":57049,"])):Ċ":57050,"_td":57051,"xae":57052,"lw":57053,".OS":57054,"ĠLogged":57055,"acid":57056,"ĠMayo":57057,"aspect":57058,"Ġvaginal":57059,"Ġinitializing":57060,"Ġsteroids":57061,"fiction":57062,"GRE":57063,"gend":57064,"Ġliabilities":57065,"ĠLets":57066,"Mech":57067,"(nc":57068,"(change":57069,"Ġconnectors":57070,":k":57071,"Ġtast":57072,"!\");ĊĊ":57073,"things":57074,"rophy":57075,"luetooth":57076,"ĠSignUp":57077,".ctrl":57078,"Ġtherein":57079,"orda":57080,".escape":57081,"igator":57082,"Ġpetrol":57083,"Ġspecimen":57084,"Ġdebuted":57085,"-Pro":57086,"Ġcrises":57087,".addView":57088,"ëıĻ":57089,"-door":57090,"Ġmonet":57091,"Ġmillis":57092,"Ġvier":57093,"InternalEnumerator":57094,"Ġadmins":57095,"ĠLair":57096,"zin":57097,"getQuery":57098,"umbles":57099,"LIMIT":57100,"ĠVig":57101,"_song":57102,"":57415,"Ġpasado":57416,"thank":57417,"_Delete":57418,"ĠBrighton":57419,",unsigned":57420,"ä½ľèĢħ":57421,"Ġaspirations":57422,"-how":57423,"Rose":57424,"=((":57425,"_needed":57426,"_plural":57427,">ĊĊ":57545,"Ġsurfaced":57546,"ĠìłĢìŀ¥":57547,"platz":57548,"ĉemail":57549,"ceptors":57550,"\">(":57551,"Ġepile":57552,"读":57553,"ĠDebt":57554,"åijĬ":57555,"NOP":57556,"\"https":57557,":j":57558,"FormItem":57559,"_LICENSE":57560,".getDouble":57561,"ĠAgenda":57562,"ĉfinally":57563,"(filters":57564,"(av":57565,"ç¾İ":57566,"APER":57567,"Ġlava":57568,"еÑĢж":57569,"))))ĊĊ":57570,"Ġfaulty":57571,"_nm":57572,"Ġtrava":57573,"(Bitmap":57574,"Ġspeeding":57575,">').":57576,"Ġscreened":57577,"_roll":57578,"ĠMacBook":57579,"ĠAUD":57580,"Ġdiagnose":57581,".Generate":57582,"Ġ^^":57583,"Ġstrs":57584,"[Test":57585,"Ġransom":57586,"ĠDHCP":57587,"elden":57588,"Ġinterpretations":57589,"()].":57590,"flatMap":57591,"ĠlineHeight":57592,"_mount":57593,"ĠWizards":57594,"Ġsluts":57595,"ehler":57596,"odal":57597,"Ġmilitia":57598,"å²":57599,"earned":57600,"Ġmisery":57601,"intval":57602,"fund":57603,"Ġhides":57604,"Ġdiarr":57605,"ĠWesley":57606,"Ġxmm":57607,"Ġquem":57608,"ĠArabs":57609,"ifth":57610,"ategorized":57611,"Disposable":57612,"Pure":57613,"_NOTIFY":57614,"snippet":57615,"ĠGarrett":57616,".running":57617,".weights":57618,"Ġ(--":57619,"Ġinvariant":57620,"äºĭä»¶":57621,"ĠAllowed":57622,"dirs":57623,"Ġpassions":57624,"Ġlad":57625,"ĠFlush":57626,"menus":57627,":block":57628,"Ġcompra":57629,".chomp":57630,"allocator":57631,"Ġcurated":57632,"ĠKnowing":57633,"ĠPatterson":57634,"Ġtelah":57635,"'ex":57636,"Ġdoomed":57637,"Ġphilanth":57638,"otty":57639,".styles":57640,"Owned":57641,"Ġallergies":57642,"=params":57643,"ocese":57644,"itelist":57645,"ĠSending":57646,"bef":57647,"orrar":57648,"ĠNão":57649,"ĠFargo":57650,"ĠLub":57651,"ĠCombined":57652,"_given":57653,"ĉĉĉĉĉĠĠĠĠ":57654,"Ġreconciliation":57655,"Patterns":57656,"azard":57657,"Ġbiomass":57658,"ĠHouses":57659,"respuesta":57660,"cco":57661,"/topics":57662,"ĠYuk":57663,"Ġweakened":57664,"_calendar":57665,"Ġmulheres":57666,"ĠMarl":57667,"Ġsine":57668,"ĠTil":57669,"ĠSouls":57670,"ĠDeutsche":57671,"ĠFOLLOW":57672,"Ġpipelines":57673,"ĠBeverly":57674,"_DIPSETTING":57675,"\"#":57676,"ĠProto":57677,".big":57678,"ĠSavings":57679,"ĠTanz":57680,"jun":57681,"ĠGamma":57682,"ĠSadd":57683,"Ġadvisors":57684,"Ġroast":57685,"Ġunters":57686,"udies":57687,"_lon":57688,"-pointer":57689,"ĠElementRef":57690,"\\Builder":57691,"exampleInput":57692,".webdriver":57693,"dataType":57694,"ĠQuite":57695,"ĠCeltics":57696,"uil":57697,"-defense":57698,"bish":57699,"ĠUIWindow":57700,"ĠSuddenly":57701,".hot":57702,".reason":57703,"Ġgör":57704,"AMD":57705,".Multi":57706,"authenticated":57707,"regions":57708,";(":57709,"аÑĢам":57710,"ĠKirby":57711,"$route":57712,"PRECATED":57713,"ĠDurham":57714,"owo":57715,"ĠPerforms":57716,"Ġdisregard":57717,"nst":57718,"ĠPols":57719,"ĠgetP":57720,"\"]:":57721,"-colored":57722,"(Keys":57723,"ĠAlleg":57724,"_modify":57725,"_loading":57726,"strained":57727,"Ġatroc":57728,"_phr":57729,"":58721,"ceph":58722,".DateTimePicker":58723,".\";ĊĊ":58724,"ĠTie":58725,",item":58726,"Ġmenn":58727,"Gas":58728,"ocha":58729,"_virtual":58730,"Ġmasterpiece":58731,"_sequences":58732,"LTE":58733,"ĠSubmission":58734,"Caller":58735,"$\\":58736,"Sport":58737,"agus":58738,"ConstraintMaker":58739,"Ġcoloc":58740,"Ġwig":58741,"ĠУ":58742,"ĉArray":58743,"Looks":58744,"ĠGTA":58745,".steps":58746,"atchewan":58747,"_ranges":58748,"extAlignment":58749,"ĠBrennan":58750,"Ġabstraction":58751,"ulerAngles":58752,".misc":58753,"Ġantibodies":58754,"Ġexponential":58755,"ĠCHANNEL":58756,"expense":58757,"'y":58758,"Ġdetectives":58759,"Ġpurported":58760,"YSTEM":58761,"Ġradioactive":58762,"ĠLatina":58763,".Encoding":58764,".TAG":58765,"xin":58766,"Degree":58767,"uracion":58768,"prices":58769,"ĠReferentialAction":58770,"Ġrarity":58771,"Ġpiles":58772,"gende":58773,"_projects":58774,"_globals":58775,".startTime":58776,"Ġ구":58777,"SECTION":58778,"_publish":58779,"Fault":58780,"DDL":58781,"_prior":58782,"Mom":58783,"Ġthicker":58784,"Ġsequelize":58785,"Ġessentials":58786,"stras":58787,"intr":58788,">(()":58789,".management":58790,"eil":58791,"éĹŃ":58792,"Aware":58793,".City":58794,"ĠArbit":58795,"_DM":58796,"_keyboard":58797,"LObject":58798,"-webpack":58799,"ĠNewport":58800,"ĠprincipalColumn":58801,"legant":58802,"Ġpallet":58803,"Ġfracture":58804,"Ġgmail":58805,".Meta":58806,"Above":58807,".KeyEvent":58808,"jit":58809,"_macro":58810,"_PUSH":58811,"ứ":58812,"/controller":58813,"åĬłè½½":58814,"Ġsuperficial":58815,"exterity":58816,"Ġmensagem":58817,"Wind":58818,"iston":58819,".openapi":58820,"иÑĢов":58821,"ĠSerializer":58822,"uctive":58823,"Ġzar":58824,"Places":58825,".Static":58826,"Ba":58827,"Ġinadvert":58828,"ĠIndonesian":58829,"_IPV":58830,"(horizontal":58831,"ĠgetTitle":58832,"idepress":58833,"ĠConsoleColor":58834,"ipers":58835,"$out":58836,"Ġfestive":58837,"Ġevenings":58838,".GetData":58839,"uitka":58840,"ĠManuals":58841,"ussed":58842,"_Max":58843,".Chat":58844,"ĠAircraft":58845,"=com":58846,"FOUND":58847,"apro":58848,"Ġtreasures":58849,"_alive":58850,"Ġgadget":58851,"eking":58852,"ButtonDown":58853,"Browsable":58854,".PERMISSION":58855,"PASSWORD":58856,"ĠHASH":58857,"fé":58858,"\\TestCase":58859,"LOSS":58860,"others":58861,",J":58862,"Ġasshole":58863,"werk":58864,"Ġmã":58865,".ie":58866,"evil":58867,"kontakte":58868,"////////////////////////////////////////////////////////////////////////////////Ċ":58869,"=sys":58870,"ĉlock":58871,"--;ĊĊ":58872,"_FUN":58873,"FillColor":58874,"óa":58875,"prend":58876,"Ġcompressor":58877,"Mother":58878,"ĠArcher":58879,".goto":58880,"Ġwürde":58881,"Ġbamboo":58882,"ï¼İ":58883,"ĠTrees":58884,"Ġbumper":58885,"Ġsausage":58886,"ĠElasticsearch":58887,"Ġhorizontally":58888,"ĠGul":58889,"Immutable":58890,"Ġloser":58891,"Ġaborted":58892,"-demo":58893,"ĠHatch":58894,"Ġunde":58895,"Ġprocesso":58896,"-call":58897,"Income":58898,"åĥ":58899,"_returns":58900,"'].\"'":58901,"(sw":58902,"CBS":58903,"amilies":58904,"ĠYourself":58905,"ĠHolt":58906,".MON":58907,"à§ĩ":58908,"ÑĪе":58909,"anon":58910,"ĠFontAwesome":58911,"producer":58912,"jr":58913,"Ġmau":58914,"ĉinter":58915,"Ġdishonest":58916,"Ġmagna":58917,"ĠCollective":58918,"Ġvraiment":58919,"Ġchoix":58920,"stay":58921,"Ġwelding":58922,"rising":58923,",min":58924,"ĠFate":58925,"glob":58926,"RGBA":58927,"Ġdette":58928,"Ven":58929,"Ġembarrassment":58930,".DELETE":58931,"gregar":58932,"-render":58933,"(bucket":58934,"\">ĊĊĊ":58935,".waitKey":58936,"Busy":58937,"Ġdifferentiation":58938,"ĠCST":58939,".Constant":58940,"ĠlineNumber":58941,"(matches":58942,"Ġwebsocket":58943,"Ġbarred":58944,"Ġpuedes":58945,"Mono":58946,"CORE":58947,"IID":58948,"ĠĠĠĠčĊčĊ":58949,"Ġpúblico":58950,"leaning":58951,"Ġcleansing":58952,"Ġcris":58953,"ĠDevils":58954,"_SETTING":58955,"untary":58956,".);Ċ":58957,"ĊĠĠĠĊ":58958,"[curr":58959,"tsy":58960,"ĠAlexis":58961,"ritel":58962,"Ġpetroleum":58963,".preprocessing":58964,"matter":58965,"ForResult":58966,"-license":58967,"Ġtravellers":58968,"ĠDispatcher":58969,"ennifer":58970,"Ġdigestive":58971,"PED":58972,"hibition":58973,"MASConstraintMaker":58974,"ĠWatt":58975,"Benef":58976,".setView":58977,"dto":58978,"TEE":58979,"ĠPelosi":58980,"_EXTRA":58981,"Ġmedals":58982,"xhr":58983,"forecast":58984,"Ġnargin":58985,"ouns":58986,"-fill":58987,"_CURSOR":58988,"Ġsupervised":58989,"Ġturf":58990,"ĠEdgar":58991,"POSITION":58992,"ĠcategoryId":58993,"âī":58994,"_ER":58995,"á»§a":58996,"Shown":58997,".ll":58998,"_POLICY":58999,"(),'":59000,"ĠPrev":59001,"ĠStringField":59002,"ĉGlobal":59003,"assed":59004,"Throughout":59005,"ostringstream":59006,".awtextra":59007,"Ġslopes":59008,"ĠSequential":59009,"Ġgiorn":59010,"Ġzelf":59011,"Ġversatility":59012,"leneck":59013,".cgi":59014,"Ġdoubling":59015,"ĠBangkok":59016,"Ġbuurt":59017,"Ġusuário":59018,"studio":59019,"Ġjeunes":59020,"Ġmuted":59021,"Ġips":59022,"_fraction":59023,"&&(":59024,"Ġstunt":59025,"');?>čĊ":59049,"Ġevapor":59050,"bable":59051,"ĠPRICE":59052,"Ġæ³":59053,"lucent":59054,"Ġvamp":59055,"ĠTechnician":59056,"Ġuniqueness":59057,"Mes":59058,"urban":59059,".parametrize":59060,"ĠReplay":59061,"Sessions":59062,"embr":59063,"-Americans":59064,"_PROXY":59065,"Ġpian":59066,"Ġtrie":59067,"ĠDestructor":59068,"GameState":59069,"ĠIMF":59070,"chin":59071,"Ġporte":59072,"ĠSwal":59073,"åŁİ":59074,"Substring":59075,"iming":59076,"/Library":59077,"Ġfrightened":59078,"writes":59079,"Ġrecursos":59080,"arResult":59081,"_INITIALIZ":59082,"ĠBadge":59083,"_crc":59084,"Eight":59085,"ĠDISTINCT":59086,"Ġthro":59087,"@Xml":59088,"ĠLegendary":59089,"-twitter":59090,"_easy":59091,"Ġ+++":59092,"(DATA":59093,".Locale":59094,"Ġkä":59095,"Ġnurt":59096,"Ġcruis":59097,"_ios":59098,"Ġsensing":59099,"_Line":59100,"ĊĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ":59101,"pong":59102,"oleon":59103,"Ġwildcard":59104,"ç͍æĪ·åIJį":59105,"Ġbegging":59106,"Rod":59107,"ĠÃİ":59108,"_CELL":59109,"Researchers":59110,".selector":59111,"_ing":59112,"Ġaspiring":59113,"Ġimmortal":59114,"Ġymin":59115,"_robot":59116,"Ġplur":59117,"BTC":59118,"ĠDID":59119,"Ġpiercing":59120,"*u":59121,"_DEFINED":59122,"ĠThi":59123,"itaire":59124,"(media":59125,"-ons":59126,"Ġchefs":59127,"Ġ\"*.":59128,"/AP":59129,"Ġrazor":59130,"ĠsearchData":59131,"Ġ=&":59132,"ĠãĢĤ":59133,"Ġmourn":59134,"tingham":59135,"Ġoli":59136,"ĠVernon":59137,"_RS":59138,"ŀæĢ§":59139,"Ġfácil":59140,"angen":59141,"celain":59142,"Ġail":59143,"lest":59144,"ĠQCOMPARE":59145,"gain":59146,"Ġε":59147,"ĠKob":59148,"ĠFault":59149,"_configs":59150,"ç»ĵæŀľ":59151,".+":59152,"calar":59153,"(colors":59154,"Mul":59155,"_ART":59156,"Ġexperimenting":59157,"ermen":59158,"ĠAnglo":59159,".FixedSingle":59160,"Sea":59161,"Ġctxt":59162,".slider":59163,"Collapse":59164,"Grey":59165,"Ġfld":59166,"-proof":59167,".capacity":59168,"getParent":59169,"ĠCompliance":59170,"Ġburgl":59171,"-rec":59172,"Ġoverwritten":59173,"MU":59174,"Ġrouters":59175,"ĉModel":59176,"Ġfantasies":59177,"avian":59178,"_prec":59179,"ĠScandin":59180,"Ġ//<":59181,"/oct":59182,"Ġceremonies":59183,"Months":59184,"undy":59185,"Ġqued":59186,"ĠNou":59187,"ĠVibr":59188,".rgb":59189,"Ġcitrus":59190,"Ġbraces":59191,"-uppercase":59192,"getTable":59193,"Ġdopo":59194,"ĠKerr":59195,"_CHILD":59196,"-cloud":59197,"ĉMatrix":59198,"Ġgardening":59199,"Sing":59200,"almost":59201,"Requirements":59202,"uguay":59203,"(Property":59204,"subscriber":59205,"FAST":59206,"reaction":59207,"(lp":59208,")})Ċ":59209,"`).":59210,".wallet":59211,"_exchange":59212,".Maximum":59213,"ĠVerb":59214,"âĶģ":59215,"()<":59216,"ï¼ĽĊ":59217,"ROT":59218,"CARD":59219,"ubit":59220,"{@":59221,"_kel":59222,"ĠTooltip":59223,"MySQL":59224,"MainActivity":59225,"arf":59226,"Ġmalign":59227,"Ġseinen":59228,"apist":59229,"Ġ<%":59230,"MethodImpl":59231,"Mil":59232,"ĠMick":59233,".depend":59234,">&":59267,"ĉok":59268,"-low":59269,".usuario":59270,"nested":59271,"XB":59272,"OURS":59273,".BorderColor":59274,"Ġbrow":59275,"ĠÐķ":59276,"corr":59277,"ĠRedskins":59278,".getTag":59279,".getTransaction":59280,"Ġstigma":59281,"hardt":59282,"ĠPlayerPrefs":59283,"alsy":59284,"ucson":59285,"Languages":59286,"ĠOlivia":59287,"Ġtac":59288,"Ġbli":59289,"Ġcaval":59290,"Ġconsolidated":59291,"Ġperil":59292,"Ġdele":59293,"Ġformulated":59294,"Ġhighways":59295,".spawn":59296,"==$":59297,"ĠNiet":59298,"Ġveggies":59299,"ypo":59300,"-rule":59301,"ĠVie":59302,"/epl":59303,"Ġenfants":59304,"stringLiteral":59305,"Ġtoughest":59306,"buyer":59307,"Ġcovariance":59308,"Ġili":59309,"ĠSophie":59310,"ĠBAB":59311,"Ġ\"),":59312,"ĠUk":59313,"currentIndex":59314,"_userdata":59315,".codec":59316,"ĠPunjab":59317,"ĠSNP":59318,"lol":59319,"advance":59320,"Ġcomfy":59321,"JsonIgnore":59322,"Ġfashionable":59323,"ĠICON":59324,"Ġora":59325,"ĠPricing":59326,"E":59384,"tering":59385,"/screens":59386,"Ġheightened":59387,"аÑĢÑĤ":59388,"Authorities":59389,"_bbox":59390,"ünst":59391,".fontSize":59392,"ĠBOOLEAN":59393,"divide":59394,"ĠSloven":59395,"ucer":59396,"ÙĴ":59397,"stub":59398,"Ġnavigating":59399,":animated":59400,"_NOW":59401,"_vect":59402,"}{Ċ":59403,"@(":59404,"Ġtelecom":59405,"Ġcontracting":59406,"ĠAssange":59407,"Ġextracting":59408,"Ġgrö":59409,"cobra":59410,".DIS":59411,"Ġcrab":59412,"Ġtwitch":59413,"Ġverts":59414,"Ġrejects":59415,"ĉformat":59416,"Ġregeneration":59417,".Sys":59418,"solve":59419,"ĉdialog":59420,"shi":59421,"meter":59422,"(best":59423,"validators":59424,"Ġonwards":59425,"Ġguru":59426,"Ġmoderator":59427,"owied":59428,"experiment":59429,"rub":59430,"Ġmqtt":59431,"ĠCaucas":59432,"Ġnationalism":59433,"Ġmange":59434,"ĉImGui":59435,"/Edit":59436,"Ġinh":59437,"Ġintellig":59438,"erokee":59439,"ĉexport":59440,"Ġdiscriminate":59441,"subtract":59442,"ĠMoodle":59443,"enser":59444,"ĠGuides":59445,"RAP":59446,"-hot":59447,"_grp":59448,".picture":59449,"XA":59450,"ĠinitView":59451,"_Comm":59452,"Ġoverdose":59453,"Ġ+ĊĊ":59454,"ĠSilent":59455,"shows":59456,"Ġinterpolate":59457,"Formation":59458,"Ġbisc":59459,"markets":59460,"(SC":59461,"Ze":59462,"ĠNetworking":59463,"Ġadrenal":59464,"ĠGuns":59465,"eteor":59466,"Declared":59467,"orgetown":59468,"Ġkarena":59469,"/password":59470,"_addresses":59471,"ITERAL":59472,"Buzz":59473,"ĠConway":59474,"(case":59475,"PWD":59476,"heiro":59477,"(act":59478,"**čĊ":59479,"());ĊĊĊ":59480,"Ġanv":59481,"Ġ..ĊĊ":59482,"(MenuItem":59483,"(mail":59484,"_sections":59485,"ĉnet":59486,"Ġplut":59487,"Ġwrench":59488,"/object":59489,"ĠIst":59490,"ĠVIS":59491,"/pub":59492,"alten":59493,"Ġguitars":59494,"Ġantibiotic":59495,"ï¼ĸ":59496,"¹":59497,"Ġ\"+\"":59498,"formula":59499,"Ġbabes":59500,"ĠPrompt":59501,"Ġenim":59502,"/player":59503,"ĉref":59504,"ĠbyÄĩ":59505,"Ġconsumes":59506,"ĠHast":59507,"ĠTao":59508,"Ġ'))Ċ":59509,"Ġclam":59510,"Ġthighs":59511,"Ġmotif":59512,"ApiOperation":59513,"ĠWL":59514,"getC":59515,"ĉflags":59516,"ointments":59517,"Ġeconomical":59518,"needle":59519,"xls":59520,"practice":59521,"utzer":59522,"timeofday":59523,"-output":59524,"ĠfindById":59525,"ĠBuddy":59526,"ÐŀÑĤ":59527,"Seven":59528,"ĠBark":59529,"Ġenvoy":59530,"_algorithm":59531,"åĪ©":59532,"Ġballistic":59533,"ç§»":59534,"rades":59535,"ĉdoc":59536,"roducing":59537,"ĠEating":59538,"Unmount":59539,"/dataTables":59540,"_bonus":59541,"Ġlitt":59542,"pps":59543,")localObject":59544,"perf":59545,"ĠHelvetica":59546,"shutdown":59547,"/ml":59548,".tokens":59549,"ĠHardcore":59550,",row":59551,"/bg":59552,"Scaler":59553,"âĢĶas":59554,"_logits":59555,"âĢĻint":59556,"ĉApp":59557,"Implicit":59558,".Fprintf":59559,"ETO":59560,"Ġterra":59561,"Ġpossessing":59562,".rstrip":59563,",),":59564,"=yes":59565,"ĠStripe":59566,"?=":59567,"neutral":59568,".good":59569,"Ġkennen":59570,"ĠSung":59571,"fault":59572,"ystatechange":59573,"Canadian":59574,"','\".$":59575,"ĠMits":59576,"ænd":59577,"ĠSTRUCT":59578,"ĠURLWithString":59579,"ĠCompass":59580,"Ġ--ĊĊ":59581,"ĠNSLayoutConstraint":59582,"|min":59583,"-adjust":59584,"Ġrebuilt":59585,"LIGHT":59586,"/se":59587,"-mount":59588,"vpn":59589,"validated":59590,"(QObject":59591,"Ġignition":59592,"ĠChargers":59593,"RYPTO":59594,"]initWithFrame":59595,"ĠFluid":59596,"Ġcadre":59597,"Ġnominations":59598,"Neill":59599,"ĠHou":59600,"Ġcurrents":59601,"_gene":59602,"(inp":59603,"Paris":59604,"zÄĻ":59605,"aggregate":59606,"Ġassoc":59607,"weeted":59608,"errat":59609,"âĢĵĊĊ":59610,"Ġ'/',Ċ":59611,"fixture":59612,"ĠHighest":59613,"ambient":59614,"Ġchmod":59615,"Ġconte":59616,"Ġsensual":59617,"Ġgarment":59618,"zers":59619,"ĠPowered":59620,"domains":59621,"Reward":59622,"iomanip":59623,"Ġcockpit":59624,"outfile":59625,"Ġbuiltin":59626,"Ġinsisting":59627,".vars":59628,"zipcode":59629,"Ġ����":59630,"fails":59631,"Ġconsolidation":59632,"_oid":59633,"Planet":59634,"Ġ=\",":59635,"ĉel":59636,"UILT":59637,"ätz":59638,"afari":59639,"ĠMcCl":59640,"Timeline":59641,"Esta":59642,"Ġfram":59643,"YE":59644,"Ġcerebral":59645,"OfMonth":59646,"ĠPregn":59647,"ĠклаÑģÑģ":59648,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ":59649,"ĠFres":59650,"Approved":59651,".Special":59652,"ĠProtestant":59653,"Ġallergy":59654,"_pcm":59655,"ĉCopyright":59656,"ĠsuperClass":59657,"\"strconv":59658,"ĠMohamed":59659,"Ġ'//":59660,"ForeColor":59661,"Arthur":59662,"ĠJungle":59663,"Ġveins":59664,"Sad":59665,"Ġbackups":59666,"ĠOpinion":59667,"ût":59668,"Ġintermitt":59669,"odyn":59670,"ĠChristina":59671,"Ġandre":59672,"Ġevacuation":59673,"palette":59674,"horse":59675,"ĠResident":59676,"ĠHassan":59677,".Nil":59678,"Ġaisle":59679,"ĠGrowing":59680,"Ġbloginfo":59681,"/sql":59682,"_ioctl":59683,"Scaling":59684,"ĠMonad":59685,"_cpp":59686,"ĠHutch":59687,"ĠAppleWebKit":59688,"Expense":59689,"_JOB":59690,"Ġpointless":59691,"FromBody":59692,"antal":59693,"Ġdepicting":59694,"ĠCELL":59695,"Ġrefin":59696,"ĠCNC":59697,"ì¹ĺ":59698,"_dimensions":59699,"ĠSAN":59700,"Ġaft":59701,"Ġfootsteps":59702,"ccoli":59703,"_PHONE":59704,"/math":59705,"-kind":59706,"ĠMeans":59707,"ichael":59708,".guna":59709,"Ġinauguration":59710,"-driving":59711,"(delete":59712,"ĠtotalCount":59713,"_MC":59714,".Extension":59715,"Commercial":59716,"ĠzIndex":59717,"$":59849,"Ġebay":59850,"Ġcaptive":59851,"pliant":59852,"ĠCalculates":59853,"olta":59854,"esting":59855,"_revision":59856,"Ġmús":59857,"+m":59858,"\",\"\",\"":59859,"WHAT":59860,"Ġcompassionate":59861,"harga":59862,"[random":59863,"Ġmodulo":59864,"(sn":59865,"Ġoccupations":59866,"////Ċ":59867,"ĉboard":59868,"ĠBalk":59869,"wiÄħ":59870,"ĠWifi":59871,".Profile":59872,":maj":59873,"ĉmat":59874,"LOCKS":59875,"(jButton":59876,"Ġ('$":59877,"Mur":59878,"æĮī":59879,"bble":59880,"Ġfrog":59881,"-hide":59882,"Ġbroadcaster":59883,"à¸ŀ":59884,"haled":59885,"Ġamusing":59886,"_predictions":59887,"_intr":59888,"Ġeagle":59889,"аÑĤелÑĮ":59890,"ĠgetList":59891,"psilon":59892,"Ġcharacterization":59893,"ARDS":59894,"Ġrelocation":59895,"Ġrulers":59896,"PAY":59897,"ĠDefinitely":59898,"_Action":59899,"Ġclosures":59900,"Ġfactual":59901,"odynamic":59902,"Ġprecautions":59903,"niej":59904,"ĠParties":59905,"ĠSubaru":59906,"Ġcousins":59907,"arbeit":59908,".money":59909,"gunta":59910,"(and":59911,"getitem":59912,".StylePriority":59913,"Ġslid":59914,"singleton":59915,"Ġgarn":59916,"ĠPAS":59917,"Ġdazz":59918,"aż":59919,"Ġbogus":59920,"ĠMog":59921,"Ġrivalry":59922,"isol":59923,"Ġlandmarks":59924,"ñas":59925,"Bern":59926,"ĠSachs":59927,"Ġ\")ĊĊ":59928,"Ġhostility":59929,"_mex":59930,"mere":59931,"Mot":59932,"pictureBox":59933,"Defense":59934,"Ġaffidavit":59935,"otherwise":59936,".directory":59937,"_UnityEngine":59938,"-blog":59939,".skin":59940,"phem":59941,"Apellido":59942,"erchant":59943,"[class":59944,"Ġwart":59945,".\"[":59946,"aleur":59947,"/back":59948,"ĠĠĠĠĉĠĠĠ":59949,"Ġprecipitation":59950,"Ġobstruction":59951,"ĠpObj":59952,"Ġrupt":59953,"UCKET":59954,"aye":59955,"æİĴ":59956,"gx":59957,"Ġecl":59958,"Ġsecrecy":59959,"/Header":59960,"ĠLesb":59961,"Ġlei":59962,"ĠBulletin":59963,"Ġgiveaway":59964,".Home":59965,"_ROOM":59966,"\"W":59967,"Ġcowork":59968,"_ra":59969,"ĠCycling":59970,"ĠPaw":59971,"Ġpupil":59972,"/arch":59973,"ĠFileUtils":59974,"é¦ĸ":59975,"rsp":59976,"Ġfreedoms":59977,"ĠLear":59978,"}`).":59979,"Ġbowls":59980,"/block":59981,"_logging":59982,"Ġmethane":59983,"Ġhorns":59984,"Ġwonderfully":59985,"Ġalterations":59986,"Ġexile":59987,"lsen":59988,"_pause":59989,"_LANGUAGE":59990,"ĠUSDA":59991,"_mysql":59992,"_AMOUNT":59993,"ĠLIFE":59994,"Ġyoungsters":59995,"Ġriots":59996,"[E":59997,"Ġunforgettable":59998,",},Ċ":59999,"Disposed":60000,"ĠAssassin":60001,"UNG":60002,"ĠNewsp":60003,"UserService":60004,":aload":60005,"+',":60006,"Ġsettlers":60007,"Ġscreams":60008,"Ġinconvenience":60009,".Rotate":60010,"Ġjars":60011,"ĠPuzzle":60012,"Ġmest":60013,"arsi":60014,"ĠSharma":60015,"|(":60016,".ds":60017,"ĠSacred":60018,"_evt":60019,"Ġexpresses":60020,"Ġhoch":60021,"ĠDuch":60022,".calls":60023,"thr":60024,"ĠSheffield":60025,".AlertDialog":60026,"Ġradically":60027,"Ġtrous":60028,"Ġprevailing":60029,"ĠWWII":60030,"âĢĻn":60031,"ensely":60032,"ĠYesterday":60033,"ĠSirius":60034,"Ġkillers":60035,"ĠFFT":60036,"Ġoval":60037,"'):čĊ":60038,"Ġìłķë³´":60039,"ourage":60040,"ĠCheckbox":60041,"Workbook":60042,".defer":60043,"_floor":60044,"Ġcouncill":60045,"Ġnorske":60046,"moil":60047,"orea":60048,"Ġmarketed":60049,"_SUR":60050,"xAA":60051,"Ġstained":60052,"eut":60053,"ĠMeng":60054,"Ġieee":60055,".extern":60056,"egie":60057,"Ġrapp":60058,"ĠPyongyang":60059,"'class":60060,"Mob":60061,"ĠinitialValue":60062,"_wave":60063,"Ġjab":60064,"Ġmasculine":60065,"Ġamplifier":60066,"Ġtty":60067,"PathComponent":60068,"_xt":60069,"ĠGFP":60070,"/sec":60071,"ĉdispatch":60072,"markdown":60073,"ĠSchn":60074,"bole":60075,"··":60076,"mousemove":60077,"ĠerrMsg":60078,"Ġasign":60079,"_mono":60080,"ToSelector":60081,"ĠZu":60082,"(Rect":60083,"ĠErrorCode":60084,"latin":60085,"angible":60086,"vtk":60087,"CGSize":60088,"Pokemon":60089,"Ġclassmates":60090,"Ġattracts":60091,"ĠTatto":60092,"ultan":60093,"ológ":60094,"Ġhalted":60095,"न":60096,"ĠKart":60097,"Ġue":60098,"_InitStructure":60099,"TestClass":60100,"ĠAirbnb":60101,"_\",":60102,"Ġcharcoal":60103,"Ġipc":60104,"ĠStretch":60105,".glide":60106,"latesAutoresizingMaskIntoConstraints":60107,"Ġpotion":60108,"ITTLE":60109,"Ġcountert":60110,"_hd":60111,"prepared":60112,"Ads":60113,"ĠVampire":60114,"robots":60115,".CreateIndex":60116,"StatusLabel":60117,"Ġtucked":60118,"afür":60119,"Ut":60120,"Ġsweater":60121,"_FN":60122,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĉ":60123,"ataka":60124,"Ġeyebrows":60125,"acoes":60126,"uden":60127,".LinearLayoutManager":60128,"Ġsway":60129,"Ġmultin":60130,"())))Ċ":60131,"ĠNSUInteger":60132,"ĠMyBase":60133,"Partner":60134,"utschen":60135,"ĠCater":60136,".setBackgroundColor":60137,"Ġaccomplishment":60138,"_problem":60139,".dtd":60140,"ĠpageNumber":60141,"Ġjackets":60142,"Ġcropped":60143,"uels":60144,"ĠHep":60145,"Ġcapped":60146,"*Math":60147,"_callbacks":60148,"Ġpubb":60149,"ĠBrunswick":60150,".respond":60151,"[\"_":60152,"Ġbedding":60153,"hythm":60154,"OX":60155,"(speed":60156,"Ġpesticides":60157,"Ġ-------":60158,".Blue":60159,"Ġnoodles":60160,"ĠGoes":60161,"Ġsaver":60162,"oxy":60163,"_completion":60164,"ĠSwinger":60165,"ĠgetDate":60166,"Ġminded":60167,"integration":60168,"ĠLotus":60169,"(stop":60170,"(',');Ċ":60171,"Ġfloods":60172,"ĠWorkflow":60173,"Ġerupted":60174,"Macro":60175,"ĠSauce":60176,"ĠeventName":60177,"\\Input":60178,"Breaking":60179,"ĉwhen":60180,"_pw":60181,"INDER":60182,"ĠWellness":60183,"Ġvoxel":60184,"ĠMell":60185,"ĠMEDIA":60186,"SENS":60187,"ĠFunds":60188,"ĠMild":60189,"Ċ":60198,"Ġtempting":60199,"Ġtestament":60200,"Ġbible":60201,"Ġconsulted":60202,"ĠIndexError":60203,"è¨ĺ":60204,"Ġkeypad":60205,"izzo":60206,"(ok":60207,"Ġwhatsapp":60208,"ĠRemoteException":60209,"Ġteamed":60210,"âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ":60211,"»,":60212,"ĠgetTime":60213,"diag":60214,"issy":60215,"Ġhed":60216,"Ġknots":60217,"jom":60218,"Ġfunnel":60219,"-mails":60220,"Ġexporting":60221,"ĠVL":60222,"ĠKarn":60223,"ĠBuddhism":60224,"ĠAllan":60225,"_RADIUS":60226,"Ġwording":60227,"ĠForget":60228,"ĠCorona":60229,"iphy":60230,"Ġlimburg":60231,"uggy":60232,"ĠUserRepository":60233,"imin":60234,"(ele":60235,"Ġlabelled":60236,"社":60237,"ĠHerman":60238,".qq":60239,"Ġ\"));Ċ":60240,"ieber":60241,".Translate":60242,"ryn":60243,"Ġdesenv":60244,"umd":60245,"Simply":60246,"ĉmode":60247,"Rpc":60248,"ĠValencia":60249,"Ġstaffers":60250,"Ġselv":60251,"ĠSpike":60252,"Ġdelic":60253,"Ġeru":60254,"_DT":60255,"Judge":60256,"á»ķ":60257,"ĠBasin":60258,".mutable":60259,"\"url":60260,"Ġtariff":60261,"ĠSleeve":60262,"Ġflare":60263,".dropout":60264,"Ġbrides":60265,")),čĊ":60266,"_constraints":60267,"destruct":60268,"Outline":60269,"Ġdisappears":60270,"_locked":60271,"ĠNSLocalizedString":60272,"cke":60273,"ĉnull":60274,"adresse":60275,"Ġtopping":60276,"ĠJoker":60277,"bishop":60278,"ноÑģÑĤÑĮ":60279,"andering":60280,"_amp":60281,"=time":60282,"_Space":60283,"_PULL":60284,"'=":60285,"Ġantiqu":60286,"Ġcach":60287,"___ĊĊ":60288,"ONES":60289,"оÑı":60290,"Ġunread":60291,".policy":60292,"oooooooo":60293,"룬":60294,"Ġusted":60295,"ĠRece":60296,"Ġallem":60297,"ãĥ¼ãĤ¹":60298,"ĠThoughts":60299,"veillance":60300,"istrate":60301,"_lane":60302,"Ġfamed":60303,".GetName":60304,"Ġsmoother":60305,"ĠQualified":60306,"azers":60307,"_geo":60308,"Fax":60309,"ĠMinds":60310,"ĠRaises":60311,"Ġtranscripts":60312,"Conversation":60313,"Ġremarked":60314,"ëĤĺ":60315,"dling":60316,"Ġdeploying":60317,"ĠsharedApplication":60318,"Ġkp":60319,"FontAwesomeIcon":60320,"_dummy":60321,"reiben":60322,"ĠJaneiro":60323,"Directions":60324,".getBean":60325,"sass":60326,"Ġcommanders":60327,"vation":60328,"errorCode":60329,"ĠAlloy":60330,".localized":60331,"Ðij":60332,"Ġdishwasher":60333,"ĠSoup":60334,"Nu":60335,"_Default":60336,"Ġuneven":60337,"Ġ/>\";Ċ":60338,"-Based":60339,"Ġseamlessly":60340,"-null":60341,"ĠXC":60342,"Ġstew":60343,"(delay":60344,"ATORS":60345,"ĠWheeler":60346,"\"H":60500,"east":60501,".air":60502,"âĢľBut":60503,"ObjectContext":60504,"successfully":60505,"_land":60506,"Ġfolds":60507,"_COORD":60508,"Ġsubpo":60509,".getAddress":60510,"instr":60511,"Materials":60512,"ÑĥÑģÑĤ":60513,"deposit":60514,"-last":60515,"_GRAY":60516,"=find":60517,"Ġmutant":60518,"Ġlesbienne":60519,"letcher":60520,"ROUGH":60521,"ureka":60522,".capture":60523,"Ġenn":60524,"Ġ([[":60525,"ĠFlu":60526,"ĠtaskId":60527,"ĠHussein":60528,".folder":60529,"Ġausterity":60530,"ISTRATION":60531,"_Impl":60532,"注æĦı":60533,"Ġdecree":60534,"-chat":60535,"Ġimplication":60536,"Ġguesses":60537,"ulkan":60538,"Analytics":60539,".plus":60540,"COMMAND":60541,"ели":60542,"»ĊĊ":60543,"_SITE":60544,"ĠequalTo":60545,"SupportFragmentManager":60546,"ĠRecording":60547,"å®ĮæĪIJ":60548,"Ġbaggage":60549,"Ġpitchers":60550,"ĠEh":60551,"oque":60552,"ĉcnt":60553,"Ġ=>$":60554,"/foo":60555,"IRA":60556,"ĠSatellite":60557,"borah":60558,"Ġ}}\"Ċ":60559,"ĠEnds":60560,"ĠSpray":60561,",param":60562,".Chrome":60563,"*q":60564,"thought":60565,"ibrated":60566,"Ġthieves":60567,"Ġbeneficiaries":60568,"Entered":60569,"ottesville":60570,"Ġveterin":60571,"ByID":60572,"quipe":60573,"umption":60574,"-unit":60575,"ExecutionContext":60576,"@s":60577,"ĠGiov":60578,".ToolTip":60579,"_friend":60580,"(attributes":60581,"Ġdumping":60582,"ĠJC":60583,"_DOCUMENT":60584,"ĠArmour":60585,"(insert":60586,".HorizontalAlignment":60587,"ĠQed":60588,"ãģĦãģ¾ãģĻ":60589,"/git":60590,"ĠYYYY":60591,"ĠCardiff":60592,"Ġapa":60593,"organic":60594,"ĠWhereas":60595,"ĠæĿ":60596,"ĠMia":60597,"Ġdemolition":60598,"Ġscars":60599,"Ġpai":60600,"Ġretries":60601,"Ġrq":60602,"ĠDenis":60603,"(Utils":60604,"Ġalleviate":60605,"ĠPIC":60606,"idue":60607,"Ġacknowledging":60608,"Ġ//////////////////////////////////":60609,"ç¡®å®ļ":60610,"Ä«":60611,"\\Json":60612,".binary":60613,"Ġxtype":60614,"signals":60615,"ĠAppearance":60616,"&r":60617,"}s":60618,"Ci":60619,"ĠIllum":60620,"porate":60621,"hog":60622,"ĠindexOf":60623,"\\Command":60624,"_parallel":60625,"ĠSherlock":60626,"íĥ":60627,"Ġ\"\")čĊ":60628,"////////////////////////////////////////////////////////////////////////////////////////////////":60629,"Ġcriticize":60630,"ĠSoap":60631,"ĠMatcher":60632,"Ġgrilled":60633,"*T":60634,"Ġadore":60635,"ulling":60636,"Ġjedoch":60637,"_refs":60638,"leanup":60639,"ĠJAXB":60640,"Ġroses":60641,"ĠLiam":60642,"sizei":60643,"Ġgetchar":60644,"Ġtarde":60645,"-tooltip":60646,"Ġqualifier":60647,"ĠIntermediate":60648,"_Window":60649,"ĠMalta":60650,"Disconnect":60651,"ewhere":60652,"Campo":60653,"Ġirrational":60654,"ledo":60655,"ĠDN":60656,"ARGV":60657,"Ġoutro":60658,"Ġthirteen":60659,"Joseph":60660,"MAR":60661,"/gl":60662,"Jess":60663,"ĠPsychiat":60664,"ĠpaddingBottom":60665,"-loop":60666,"/fonts":60667,"_seen":60668,"Teams":60669,"ReactDOM":60670,"(man":60671,"(xpath":60672,".getSimpleName":60673,">(*":60674,"ĠPvt":60675,"Ġelders":60676,"Ġpies":60677,".userAgent":60678,"-region":60679,"ĠGreeks":60680,"(fragment":60681,"stu":60682,"Ġcouncils":60683,"Ġstamina":60684,"ĠGoddess":60685,"西":60686,"Ġphilosophers":60687,"Ġpersone":60688,"ĠLose":60689,"ĠCLR":60690,"ĠDocs":60691,"Ġsoak":60692,"ĠHOLDER":60693,"Ġbells":60694,"hashCode":60695,"RATE":60696,"_WEIGHT":60697,"inous":60698,"endra":60699,"ophobic":60700,"Ġprose":60701,"Ġfinely":60702,"/oauth":60703,"(space":60704,"adge":60705,"ĠMama":60706,"ĠstringBuffer":60707,"Ġstint":60708,"Ġmisma":60709,"Ġvillains":60710,"ĠCrimea":60711,"Ġdiploma":60712,"ĠпоÑģл":60713,"ĠBea":60714,"(join":60715,"Ġíķ´":60716,"CHAT":60717,"pering":60718,"ĠCros":60719,"Ġmonkeys":60720,"Ġpreds":60721,"yla":60722,",,,":60723,"Ġvibrator":60724,"ĠNU":60725,"åħĪ":60726,"fant":60727,"zet":60728,"Ġbietet":60729,"unft":60730,"sworth":60731,".Flow":60732,"Ġpsyched":60733,"ĠContinental":60734,">t":60735,"Ġquilt":60736,".UP":60737,"Ġexpansive":60738,"Dispose":60739,"(language":60740,"Caps":60741,"_ZONE":60742,"Ġrecycle":60743,"ĠManaged":60744,"currentColor":60745,".broadcast":60746,"signIn":60747,".prom":60748,"llu":60749,"ueblo":60750,"Ġpunches":60751,"Ġautomat":60752,"Ġassigning":60753,"ĠcreateUser":60754,"ĠAllied":60755,"Ġconductor":60756,"Ĥ¨":60757,"Ġsaddle":60758,"Ġdni":60759,"omedical":60760,"-West":60761,"PositiveButton":60762,"Ġitalic":60763,"?[":60764,"(trigger":60765,"Ġelephants":60766,"\":\"\",\"":60767,"Ġcaliber":60768,"rafted":60769,"digits":60770,"Ġmarshal":60771,"milliseconds":60772,"markers":60773,"mom":60774,"/place":60775,"Ġholistic":60776,":t":60777,"#,":60778,"Ġboto":60779,"Ġnausea":60780,"ĠShooting":60781,"itech":60782,"ĠtextStatus":60783,"())Ċ":61004,"ADDRESS":61005,"BST":61006,"etzt":61007,"ĠQgs":61008,"Sense":61009,"ExceptionHandler":61010,"ĠChu":61011,".getOwnProperty":61012,"Ġexercised":61013,"iotic":61014,"ĠReleases":61015,"Ġpinterest":61016,"olie":61017,"isoft":61018,"Ġsequencing":61019,"Ġpadre":61020,"]));čĊ":61021,"(radius":61022,".med":61023,"ainties":61024,".ObjectModel":61025,"Ġemple":61026,"Ġseguro":61027,"Stars":61028,"Ġqualitative":61029,"lemn":61030,"á»±":61031,">\").":61032,"Ġgx":61033,"-cert":61034,"ĠASTM":61035,"Ġfullname":61036,"Ġtelemetry":61037,"ĠCambodia":61038,"_ul":61039,"ĠClare":61040,"CUSTOM":61041,"QC":61042,"ĠUns":61043,"ĠHTTPS":61044,"ĠParkinson":61045,"ancybox":61046,"','.":61047,"Tue":61048,".getLast":61049,"Ġabi":61050,"Äħd":61051,"Ast":61052,"ĠEditing":61053,".Unity":61054,"jmp":61055,"Ġmats":61056,"ĠsharedPreferences":61057,"Captain":61058,".pageSize":61059,"Ġrtl":61060,"Ġanmeld":61061,"RuntimeObject":61062,"Ġdemande":61063,"(\";":61064,"seite":61065,"-headed":61066,"ĠKra":61067,"ĠFONT":61068,"`\\":61069,"ClassNotFoundException":61070,".avg":61071,"atical":61072,"Aj":61073,"Ġpermitting":61074,"Proj":61075,"ERRQ":61076,"Ġcreampie":61077,"ĠBuyer":61078,"-modules":61079,"ĠSundays":61080,"|`Ċ":61081,"Ġdaytime":61082,"Ġ+(":61083,"Ġglitch":61084,"ĠOperand":61085,"Ġtoxins":61086,"inya":61087,"DNS":61088,"ĠSas":61089,"Cake":61090,"ĠNationals":61091,".addTo":61092,"Ġsinking":61093,"Ġcomprehension":61094,"Ġscor":61095,"agements":61096,"Ġtard":61097,"Ġmarching":61098,"ĠMTV":61099,"Ġsane":61100,"CreateInfo":61101,"ắ":61102,"ĠendIndex":61103,"ĉlayout":61104,"ĠåIJį":61105,"SITE":61106,"ĠTHERE":61107,"Ġ[{'":61108,"opathic":61109,"Ġtransmitter":61110,"/body":61111,"Ġpund":61112,"ĠClosing":61113,"Ġsetattr":61114,"Ġbounded":61115,"Atlas":61116,"suming":61117,"(times":61118,"parer":61119,"ynom":61120,"feit":61121,"Ġfrem":61122,"-leg":61123,"ĠBras":61124,">#":61125,"Ġì¶ľëł¥":61126,"ĠINSTANCE":61127,"ĠCouch":61128,"_hosts":61129,"likelihood":61130,".Marker":61131,"ĠMasks":61132,"Ġcereal":61133,"utilities":61134,"Ġelemental":61135,"Ġdistorted":61136,"inactive":61137,"cry":61138,"WL":61139,"UPPORTED":61140,".Throws":61141,"/schema":61142,"serie":61143,".\"',":61144,"ĠBenedict":61145,"-picker":61146,"iggs":61147,"ĠPirate":61148,"åij¨æľŁ":61149,"ĠThema":61150,"ĠSouthampton":61151,"ĠarrayWith":61152,"ĠPaula":61153,"Ġpredictor":61154,"-Ass":61155,".userid":61156,"Ġperi":61157,"Ġexaggerated":61158,"urate":61159,"arseille":61160,"ĠConcent":61161,"ĠPik":61162,"Ġ@_;ĊĊ":61163,"Ġformations":61164,"Ġdenomin":61165,"\"/>.Ċ":61166,"endedor":61167,"Ġpancre":61168,"Ġamt":61169,"ĠonResume":61170,"onDelete":61171,"ĠBCH":61172,")(\"":61173,"movement":61174,"Ġpotassium":61175,"":69726,"ĠPPC":69727,"isz":69728,"akeFromNib":69729,"ĠDisp":69730,"ĠAthletics":69731,"Ġnightclub":69732,"GOOD":69733,".setGeometry":69734,"+[":69735,"/send":69736,"Ġbinaries":69737,"Ġráp":69738,":req":69739,"-consuming":69740,"ertime":69741,"UPDATED":69742,"_nullable":69743,"VIN":69744,"ulia":69745,"cyan":69746,"Ġmisunderstanding":69747,"orical":69748,"degrees":69749,"Leading":69750,".AR":69751,"ickest":69752,"Nuevo":69753,"uforia":69754,"Ġgoodies":69755,"Ġfores":69756,"()<<\"":69757,"ademic":69758,"ActionCreators":69759,"servername":69760,"(nt":69761,"dbContext":69762,"Ġairborne":69763,"Ġexhibitions":69764,"cele":69765,"Ġtela":69766,"":69782,".setPreferredSize":69783,"ĠMID":69784,"ĠAless":69785,"Ġhorsepower":69786,"Ġatm":69787,"ĠPackaging":69788,"Ġciphertext":69789,"RequestMethod":69790,"Ġbeiden":69791,"è£":69792,"ĠPOW":69793,".WriteHeader":69794,"director":69795,"-but":69796,"ãģłãģķãģĦ":69797,"incer":69798,"_dn":69799,"!!!!!":69800,"Ġmanufactures":69801,".TextUtils":69802,"Ġconsciously":69803,"Ġbounced":69804,"culture":69805,"ĠSpar":69806,"ĠPiper":69807,".press":69808,"-owner":69809,"Ġevaluator":69810,"ĠSTREAM":69811,".PictureBoxSizeMode":69812,"Ġsugars":69813,"ScreenWidth":69814,"ĠnextState":69815,"Ġivory":69816,"Ġbrunch":69817,"density":69818,"_OW":69819,"ĠCoronavirus":69820,"ĠCFR":69821,"bak":69822,"\\Category":69823,"æķ°ç»Ħ":69824,"Ġinvokevirtual":69825,"}()Ċ":69826,"Ġsujet":69827,"-marker":69828,"isdigit":69829,"ĠMobil":69830,"ĠJsonRequestBehavior":69831,"_REMOTE":69832,".existsSync":69833,"Ġriches":69834,".presenter":69835,"ĠglColor":69836,"Ġhanya":69837,"Ġfortress":69838,"Ġflashed":69839,"viz":69840,"requently":69841,"buat":69842,"$con":69843,">|":69844,".Func":69845,"Ġhumorous":69846,"uem":69847,".ZERO":69848,"ĠSTL":69849,"ĠBuk":69850,"/sample":69851,"ĠGros":69852,"Recipes":69853,"Ġinflated":69854,"Ġswung":69855,":F":69856,"Facing":69857,".Theme":69858,"ник":69859,"Ġsplendid":69860,"ĠrequestId":69861,".CenterScreen":69862,"/autoload":69863,"embedded":69864,"_depart":69865,"ĠPorts":69866,"à¹ĥ":69867,"айд":69868,"discussion":69869,"_consum":69870,"Ġscouts":69871,"Ġcolabor":69872,".Stage":69873,".nano":69874,"eldorf":69875,"Ġgemacht":69876,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ":69877,"Ġpolicymakers":69878,"_PKT":69879,",Th":69880,"oky":69881,"_UID":69882,"Ping":69883,"Ġorchest":69884,"Ġoptics":69885,"uhan":69886,"ĠXOR":69887,"Ġespañol":69888,"ĠAdidas":69889,"rng":69890,"mans":69891,".vstack":69892,"Ġgetaway":69893,"Ġhierarchical":69894,"anoia":69895,"ĠBitmapFactory":69896,"realm":69897,"ĉap":69898,"_apps":69899,"-divider":69900,".drawer":69901,"ĠHARD":69902,"'];?>Ċ":69903,"-packed":69904,"æ²»":69905,"_STRUCTURE":69906,"[Y":69907,"iParam":69908,"(eq":69909,"Ġencompasses":69910,"Ġ\\ĊĊ":69911,"->[":69912,"&utm":69913,"groupon":69914,"strate":69915,"DY":69916,"omorphic":69917,"':[":69918,"Ġgravitational":69919,"ĠMicha":69920,"ĠTencent":69921,"Ġcoached":69922,"ì¶ľ":69923,"ÑĥменÑĤ":69924,"/mobile":69925,"MouseDown":69926,"bud":69927,"ĠYas":69928,"ĠProviders":69929,"NZ":69930,"ĉreport":69931,"errmsg":69932,"ĠimagePath":69933,"acterial":69934,"ĠManga":69935,"wicklung":69936,"(usuario":69937,"\"));čĊčĊ":69938,"/***":69939,"Ġorganise":69940,"Indexed":69941,"_QUAL":69942,"(PyObject":69943,"Ġsurrendered":69944,"POCH":69945,"ĠNOTES":69946,"\\\\\"":69947,"-job":69948,"Ġseventy":69949,"####Ċ":69950,"ĠManor":69951,"Ġdownright":69952,"Ġtimeframe":69953,"insurance":69954,"checker":69955,"ĠSECRET":69956,"Ġechoes":69957,"ĠCarmen":69958,".setHorizontalAlignment":69959,"ĠisChecked":69960,"ĠTOR":69961,"_nn":69962,"('(":69963,"FetchRequest":69964,"ĠPrinted":69965,"Fluid":69966,"ĠSTACK":69967,"GES":69968,"aigned":69969,"igor":69970,".Unknown":69971,"CBC":69972,"ĠCarlson":69973,".URI":69974,"Ġplight":69975,"/start":69976,"ĠPersonnel":69977,"ĠPREFIX":69978,",**":69979,"Ġlimite":69980,"_heat":69981,"%ï¼Į":69982,"ĠDonne":69983,"getNode":69984,"ĠScientology":69985,"Ġcomet":69986,"Ġwenig":69987,"Aside":69988,"ĠMPEG":69989,"'?":69990,"variably":69991,".endDate":69992,"Ġuncont":69993,"ĠScores":69994,"ĠLoginForm":69995,".generated":69996,",ch":69997,"-mar":69998,"ĠNed":69999,"ĠeventId":70000,"+p":70001,"ĠSIN":70002,"/reset":70003,".REACT":70004,"ĠMessi":70005,"_RANK":70006,".writeFile":70007,"Ġcripp":70008,"esthetic":70009,"ERSIST":70010,"Ġreimbursement":70011,"CurrentValue":70012,"Ġunin":70013,"DownLatch":70014,"ĠpaddingRight":70015,"Ġstocked":70016,"/'.":70017,"Ġrepayment":70018,"trak":70019,"/backend":70020,"Ġизмен":70021,"CSR":70022,"Ġpreventive":70023,"Ġpantalla":70024,"_trim":70025,"Pedido":70026,"hospital":70027,"Ġmanageable":70028,"routeParams":70029,"textures":70030,"......ĊĊ":70031,"Ġsélection":70032,"NameValuePair":70033,"Ġpollut":70034,"Modes":70035,"ĠLaud":70036,"jay":70037,"ĠUrs":70038,"Ġsigner":70039,"ĠJJ":70040,"ĠCherokee":70041,"_EXISTS":70042,"Ġdwar":70043,"Ġ($('#":70044,"Ġreef":70045,">{$":70046,"ĠBaylor":70047,"ĠModelState":70048,"-_":70049,"ĠStructures":70050,"Ġsouvent":70051,"Specify":70052,"(pipe":70053,"Ġfracking":70054,"ĠGPA":70055,"Ġbele":70056,"ĉĉĉĉĉĉĉĠĠĠ":70057,"ĠMinority":70058,"Ġtud":70059,"Ġopenness":70060,"ĠIllustrated":70061,"Ġoxidation":70062,"ĠNK":70063,"ĉUpdate":70064,"ĠEMS":70065,"ĠTeddy":70066,"Ġgenerals":70067,"ĉMat":70068,"Ġradios":70069,"ĠAntique":70070,"conomy":70071,"ĠSquadron":70072,")','":70073,"声":70074,"Ġyoure":70075,"ĠMainPage":70076,"Ġbehaviours":70077,"enght":70078,"(@\"%@\",":70079,"Ġtestcase":70080,"ĠCompilation":70081,"Ġflavours":70082,"ĠExtend":70083,"illator":70084,"Ġcoh":70085,"Ġspline":70086,"ĠKG":70087,"-pay":70088,"Ġcommunism":70089,"ĠBusinesses":70090,"ocking":70091,".MaxLength":70092,"assandra":70093,"quiring":70094,"adden":70095,"ĠJeb":70096,"_fault":70097,"[file":70098,"Ġprominence":70099,"disciplinary":70100,"âĢĶthey":70101,"_extent":70102,"ĠVIC":70103,"Ġentails":70104,".partner":70105,"Ġhippoc":70106,"League":70107,"çĶ·":70108,"wipe":70109,"-spinner":70110,"Ġsalute":70111,"ĠSurgical":70112,"(outputs":70113,"worked":70114,"[strlen":70115,"appointed":70116,"ĠHeg":70117,"ĠACPI":70118,"([^":70119,"uala":70120,"_tol":70121,"ĠRit":70122,".Payment":70123,"kowski":70124,"Ġwalmart":70125,"requirements":70126,"ĠFINSEQ":70127,"_BACKGROUND":70128,"ĠOsborne":70129,"(errorMessage":70130,"Reporting":70131,"Ġauctions":70132,"Ġcombos":70133,"ĠNoticed":70134,"_oct":70135,"Ġprimero":70136,"taire":70137,"_hr":70138,"Ġмод":70139,"Ġcontradictory":70140,"=\"@":70141,"achines":70142,"(optarg":70143,"ĠPenguin":70144,"ĠAbbas":70145,"Ġsublime":70146,"Ġpageable":70147,"ĠDefensive":70148,"Ġdistinctly":70149,"ĠAutomatically":70150,"Understanding":70151,"EqualityComparer":70152,"gota":70153,"Ġ\"::":70154,"Ġpulver":70155,"ĠBattles":70156,"Ġunparalleled":70157,"TCHA":70158,"Ġconstrued":70159,"-aff":70160,"Ġprecursor":70161,"-lfs":70162,"Ġmaduras":70163,"ĠDaisy":70164,"ĠArbeits":70165,".Management":70166,"ĉIn":70167,"Ġrobes":70168,"Ġspéc":70169,"âĢľ(":70170,"Ġmaternity":70171,"extent":70172,"ĠSpacer":70173,"DidAppear":70174,"ĉus":70175,".getRequestDispatcher":70176,"(cols":70177,"Ġplummet":70178,"ìħ":70179,"Ġ{ĊĊĊĊ":70180,"érica":70181,"ĠSizes":70182,".enum":70183,".Highlight":70184,"Ġ!!}ĊĊĊ":70193,"Wenn":70194,"Ġclimax":70195,"Ġcrem":70196,"_that":70197,"[â̦":70198,"_domains":70199,"_REPLY":70200,"Ġcompleta":70201,"VEST":70202,"_particle":70203,"Ġsop":70204,"Ġfatalities":70205,"implify":70206,"ĠSKF":70207,"Ġinfusion":70208,"ĠJavier":70209,"Ġballet":70210,"Ġamigo":70211,".want":70212,"Ġcollagen":70213,"ĠLawyer":70214,".Statement":70215,".rt":70216,"baar":70217,"EndPoint":70218,"ĠBek":70219,"SHIP":70220,"Ġpatriarch":70221,"ĠAunt":70222,"_TM":70223,"ĠmÃŃn":70224,"Ġmastered":70225,"WXYZ":70226,"Ġespos":70227,"=logging":70228,"Ġrighteousness":70229,"torrent":70230,"Ġbst":70231,"_CHAIN":70232,"Ġoutskirts":70233,"(rotation":70234,"Ġ'.')":70235,"igrants":70236,"+lsi":70237,"ĠCCTV":70238,"_PHASE":70239,".azure":70240,"_Process":70241,"vae":70242,"ĠTropical":70243,"ĠAnkara":70244,"imageView":70245,"_RUNNING":70246,"Ġ*)__":70247,"ến":70248,"(cli":70249,"scatter":70250,"Ġsche":70251,"Registrar":70252,"Ġairing":70253,"Ġpyplot":70254,"isión":70255,"/customer":70256,"Ġsimplement":70257,"Ġclassy":70258,"ĠDWC":70259,"ĠBashar":70260,"ĠDEVELO":70261,"ĠVick":70262,"avail":70263,"ĠHö":70264,"_extend":70265,"drFc":70266,".isNotBlank":70267,"Ġplais":70268,"|}Ċ":70269,"Ġpornofil":70270,"labs":70271,"Ġhaus":70272,"Ġoriginating":70273,"Ġsurrounds":70274,"ĠQUAL":70275,"meg":70276,"/logger":70277,"[obj":70278,"Ġirresponsible":70279,"ĠPublicKey":70280,"HONE":70281,":'/":70282,"ibox":70283,"ĠFVector":70284,"|{Ċ":70285,"ataloader":70286,"hawks":70287,"HDR":70288,"Ġescalation":70289,"ĠPodsDummy":70290,"elite":70291,"Ġpresup":70292,"Cached":70293,">G":70294,".optimizer":70295,"ĠVisible":70296,"´Ģ":70297,"Ġnen":70298,"Ġpcs":70299,"ĠIdle":70300,"[Any":70301,"Ġkeyboards":70302,"ĠCOMPONENT":70303,"Ġtitanium":70304,"(mut":70305,"ĠLedger":70306,"Ġprosperous":70307,"etrofit":70308,"_LL":70309,"_patient":70310,"Ġpdata":70311,"Ġkontakte":70312,"Swipe":70313,"Ġcheerful":70314,"ĠHonduras":70315,"\"][$":70316,"Ġhemorrh":70317,"\":\"+":70318,"Ġleasing":70319,"Ġinstalls":70320,"ĠPax":70321,"ĠLogistics":70322,"Ġkinetic":70323,"ĠPhon":70324,"_movement":70325,"ĉbytes":70326,"Ġcinco":70327,"ĠMadness":70328,"\")+":70329,"ĠJE":70330,"_ij":70331,"SceneManager":70332,"ĠBust":70333,"ptest":70334,"aea":70335,"Ġbesser":70336,"ÃŃg":70337,"дин":70338,"(tasks":70339,"(\"(\"":70340,"setType":70341,"(outfile":70342,"ĉreset":70343,"ĠARC":70344,"Ġmúsica":70345,"ĠShelf":70346,"ĠminY":70347,"pch":70348,"Ġweiber":70349,"issor":70350,"Ġtrouve":70351,"ĉButton":70352,"Ġregenerated":70353,"Å£i":70354,"imachinery":70355,"blocking":70356,".dataTables":70357,"_frac":70358,"ĠAdvantage":70359,".visitMethod":70360,"éĩįæĸ°":70361,"Ġextrapol":70362,"Ġteasing":70363,"ĠHitch":70364,"ĠGeek":70365,"ESCO":70366,"Ġwich":70367,"ĉax":70368,"_decor":70369,"ĠscreenWidth":70370,"ĠSophia":70371,"Forgot":70372,".uni":70373,"ĠVenture":70374,"_collision":70375,"Ġlawmaker":70376,"(Edit":70377,"blers":70378,"ĠgetNext":70379,"âĢĶyou":70380,"MediaPlayer":70381,"ĠHorde":70382,"ĠCongressman":70383,"observations":70384,"ĉproperty":70385,"Ġ<--":70386,"CreatedAt":70387,"ubyte":70388,"Ġquarantine":70389,"Ġdistressed":70390,"_APB":70391,"ĠGoodman":70392,"ãĤ«":70393,"Ġrecomend":70394,"_PRINTF":70395,"DONE":70396,"Bindable":70397,"rstrip":70398,"centaje":70399,"ĠUnexpected":70400,"ĠSCHOOL":70401,"ĠProfessionals":70402,"ĠGPUs":70403,"Lesson":70404,"Exclusive":70405,"Ġatrav":70406,"ĠDank":70407,"ĠLawyers":70408,"ĠWalton":70409,">[]":70410,"Ġaloud":70411,"=\"../../../":70412,"Ġdebating":70413,"ĠAVG":70414,"_VOL":70415,"/cgi":70416,".deg":70417,":g":70418,".Infof":70419,"MeasureSpec":70420,".song":70421,"mtree":70422,"ulls":70423,"Jordan":70424,"ĠCovers":70425,"Ġattributable":70426,"Ġjedis":70427,"iatrics":70428,"Ġrotterdam":70429,"Ġmeld":70430,"ĠContentType":70431,"Ġmantle":70432,"Ġalice":70433,"_duplicate":70434,"/Internal":70435,"Ġfilesize":70436,"ĉfire":70437,"rese":70438,"ondere":70439,"Ġfamiliarity":70440,"ĠCrest":70441,"Ġkarma":70442,"Ġtorino":70443,"Ġmesa":70444,"/temp":70445,"Ġchir":70446,"ĠOverflow":70447,"Ġtenemos":70448,"unik":70449,"NEXT":70450,"Alle":70451,"Ġnxt":70452,"Mart":70453,"Ġatl":70454,"Ġperiodo":70455,"_you":70456,"Ġ})).":70457,"intestinal":70458,".AdapterView":70459,"Ġhesitant":70460,"Ġcomparatively":70461,".UInt":70462,"(viewModel":70463,"Ġsangat":70464,"ĠResponsive":70465,"ĠZack":70466,"âħ":70467,"JAVA":70468,"ĠFuller":70469,"ĠâĿ¤":70470,".Consumer":70471,"Ġank":70472,"Ġreactors":70473,"fuck":70474,"_rat":70475,"ĠsessionFactory":70476,"_backward":70477,"Ġscrambled":70478,"ĉth":70479,"Ġinsensitive":70480,"Ġchamps":70481,"Ġnginx":70482,"Ġconhec":70483,"ĠJasper":70484,".fm":70485,"StrictEqual":70486,"achsen":70487,"-Nov":70488,"lassen":70489,".integration":70490,"(lbl":70491,"Compose":70492,"ĠFon":70493,"Ãļ":70494,"Gratis":70495,"ĠLime":70496,"ĠAdapterView":70497,"Ġpoisoned":70498,"anchors":70499,"设计":70500,"']?>\"":70501,"Ġprocur":70502,"Italy":70503,".MONTH":70504,"ĠLUA":70505,"ĠLithuania":70506,"ĠHeads":70507,"_CHUNK":70508,"ĠPUSH":70509,"AspectRatio":70510,"Ġweg":70511,"Ġvids":70512,"ĠWein":70513,"ĉINT":70514,"sessionId":70515,"Industry":70516,"Ġdenounced":70517,"JKLM":70518,"ĠVanessa":70519,".Identifier":70520,"propri":70521,"Ġиг":70522,"Ġtécn":70523,"Ġmosaic":70524,"StreamReader":70525,"-Th":70526,"forth":70527,"Ġadherence":70528,"bate":70529,"Ġknights":70530,"sounds":70531,"Ġsalle":70532,"OMET":70533,"ãĤ¹ãĥĪ":70534,"-tm":70535,"ĠRhe":70536,".FileOutputStream":70537,"åĪĨç±»":70538,"ĠENG":70539,"holiday":70540,"ĠCongratulations":70541,")(Ċ":70542,"Ġaggregates":70543,"HOOK":70544,"ewire":70545,"Senator":70546,"Ġembeddings":70547,"epy":70548,"(COM":70549,"Ġrobber":70550,"äter":70551,"wang":70552,"_teacher":70553,"Ġresentment":70554,"Ġlettuce":70555,"erreur":70556,"(ic":70557,"ĠTactical":70558,"ĠContracts":70559,"Ġmænd":70560,"Ġsitios":70561,"Ġbastante":70562,"Ġnuevos":70563,"ĉNdrFc":70564,"ĠprivateKey":70565,"ucch":70566,"MMdd":70567,"Ġè¾ĵåĩº":70568,"umba":70569,"@foreach":70570,":\");ĊĊ":70571,"Ġslippery":70572,"ĠKeystone":70573,"Ġpioneering":70574,"_triangle":70575,"(\"Ċ":70576,"ĉĉĉĉĉĉĉĉĠĠ":70577,"ĠIntervention":70578,"SCI":70579,"ĠcJSON":70580,"Ġterminating":70581,"ë¹Ħ":70582,"Ġbabys":70583,"Subset":70584,"Ġë¡":70585,"Ġseulement":70586,"Ġmuestra":70587,"Entre":70588,"以ä¸Ĭ":70589,"ngo":70590,"\"bytes":70591,"QRST":70592,"Ġypos":70593,"persona":70594,"ĠDeploy":70595,"cee":70596,"Ġà®":70597,".goal":70598,"Ġhabitats":70599,"ĠisAdmin":70600,"Ġexploiting":70601,"Ġventil":70602,"ĠBalls":70603,"اب":70604,"Ġmindfulness":70605,"(kwargs":70606,"Ġresembling":70607,"Ġchoir":70608,"ĠonBackPressed":70609,"ĠSECURITY":70610,"/gtest":70611,"Ġjustices":70612,"ĠintegerValue":70613,"blah":70614,"ĠAim":70615,"_finalize":70616,"keh":70617,"ĠComplexity":70618,"Ġaugust":70619,"getElementsByTagName":70620,"Ġpreach":70621,"Ġpronunciation":70622,"ĠTrash":70623,"-percent":70624,"_PRIV":70625,"ĠHunts":70626,"ĠCurse":70627,"uellen":70628,"Ġheavyweight":70629,"Xi":70630,"ĉselected":70631,"ĠMcCoy":70632,"å¼Ĥ常":70633,"|=Ċ":70634,"ĠBattlefield":70635,"ItemImage":70636,"Ġdeductions":70637,"ĠElemental":70638,"());//":70639,"ĠBurk":70640,"})čĊčĊ":70641,"swift":70642,"/function":70643,"Usually":70644,"_St":70645,"_feats":70646,"ĠIsValid":70647,"Ġzad":70648,"ImageContext":70649,"Ġclassname":70650,"Ġdonner":70651,"Ġ-->ĊĊĊ":70652,"Ġmotorcycles":70653,"+'/'+":70654,"ĠsetBackground":70655,"\\CMS":70656,".AllArgsConstructor":70657,"ĠLexington":70658,".examples":70659,"ĠPurs":70660,"PushMatrix":70661,"Ġ==============================================================":70662,".addTarget":70663,"pora":70664,"Fullscreen":70665,"Ġgoof":70666,"hlen":70667,"äge":70668,"ĠCURL":70669,"ĠInteresting":70670,"Ġretrieves":70671,"_Obj":70672,"inness":70673,"-----ĊĊ":70674,".tsv":70675,"(IM":70676,"ĠBraves":70677,"_ISR":70678,"osti":70679,"á»ĵ":70680,"ĠExterior":70681,"ĠCourtney":70682,"Ġresidues":70683,"Tier":70684,".*;čĊčĊ":70685,":black":70686,"webView":70687,"\"path":70688,"Ġmasa":70689,"]!='":70690,"ĠMatching":70691,"dur":70692,"Jvm":70693,"=context":70694,"_RING":70695,"Ġproponents":70696,"ĠQStringLiteral":70697,"Ġinflate":70698,"\">čĊ":70931,"_COST":70932,"ilinear":70933,"ĠWorkspace":70934,"Ġspel":70935,"agogue":70936,"ĠMillennium":70937,"ĠPopulate":70938,"Ġnid":70939,".parseColor":70940,"Solar":70941,"ĠGad":70942,"Ġì¤ij":70943,"ĠKamp":70944,"ĉrm":70945,"Ġbenz":70946,"ĠHonestly":70947,"Ġelectrode":70948,"ĠPrairie":70949,"ĠPROFILE":70950,"ĠOriental":70951,"ĠOLED":70952,"/copyleft":70953,"awaii":70954,"(products":70955,")\\<":70956,"-created":70957,".ManyToMany":70958,"\"How":70959,"ĠвÑĭп":70960,"Ġmitochondrial":70961,"_testing":70962,"(created":70963,"ĠgetField":70964,"_EVAL":70965,"].\"":70966,"ĠFSM":70967,"ĠRita":70968,"ĠåıĤæķ°":70969,"Ġcôt":70970,"ĠInsight":70971,"ĉmysqli":70972,"_timing":70973,"IDO":70974,")))))Ċ":70975,"COVERY":70976,".imag":70977,"CDF":70978,"lust":70979,"ickt":70980,"_FP":70981,".','":70982,"gcc":70983,"Ġkurz":70984,"_pwm":70985,"Ġodpowied":70986,"ĠBarrier":70987,"/***************************************************************************Ċ":70988,"pak":70989,"-Israel":70990,"ĠRutgers":70991,"ĠselectedItem":70992,"ĠRamirez":70993,"Farm":70994,"Ġcalendars":70995,"gzip":70996,"Ġblockbuster":70997,"ĠPlymouth":70998,"çľĮ":70999,"responses":71000,".DialogInterface":71001,"-grand":71002,"ĠgetSource":71003,"Ġdejtings":71004,"Ġtieten":71005,"Ġcondemnation":71006,"Ġcontinuar":71007,".MockMvc":71008,"/english":71009,"ĠMediaPlayer":71010,"computed":71011,"ĠClippers":71012,"(delegate":71013,".Slf":71014,"Ġë¡ľ":71015,"ĠTide":71016,"Ġihrem":71017,"ĠWan":71018,"ÑĥÑİÑī":71019,"}><":71020,"Discussion":71021,"Ġwatts":71022,"-minus":71023,"ĠJuliet":71024,"éĽħ":71025,"Ġconcluding":71026,"andscape":71027,"Ġúltima":71028,"ĠDERP":71029,"ĠsignUp":71030,"ĠSecondly":71031,"WAIT":71032,"lds":71033,".callbacks":71034,"(hour":71035,"imators":71036,"volent":71037,"AAF":71038,"edriver":71039,"ĠMathematic":71040,"'":71042,"{j":71043,"_ABORT":71044,"Ether":71045,"Ġeducator":71046,"Ġprecaution":71047,"Ġfingertips":71048,"getVar":71049,"camatan":71050,"-debug":71051,"ĠRAF":71052,"[arg":71053,"Ġraced":71054,"Ġtsunami":71055,".flink":71056,"Ġglyc":71057,"uko":71058,"ĠMultiply":71059,"Ġredistribution":71060,"AGO":71061,"ĠRoutine":71062,"Ġopr":71063,"(lower":71064,"ĠFunktion":71065,".dk":71066,"Ġegt":71067,"_BASIC":71068,"syscall":71069,"ĠLSD":71070,"ĠDuplicate":71071,"_sell":71072,"ĠerrorHandler":71073,"_ips":71074,"Ġerv":71075,"annie":71076,"(resourceName":71077,"Ġbottled":71078,"Ġcrawling":71079,"egment":71080,".setTag":71081,"Ġrss":71082,"ĠQuarry":71083,"_exact":71084,".jwt":71085,"ĠBoards":71086,"opi":71087,"Ġnasal":71088,"ĠXYZ":71089,".ud":71090,"Northern":71091,"Ġactivating":71092,"edx":71093,"ovah":71094,"Ġindx":71095,"AlertDialog":71096,"Ġtienes":71097,"annya":71098,"_pan":71099,"(decimal":71100,".Dict":71101,"Ġsubsidiaries":71102,"ProductName":71103,"Few":71104,"dato":71105,"odied":71106,"-under":71107,"Ġê²ĥ":71108,"çīĪæľ¬":71109,"atism":71110,"[Math":71111,".'<":71112,"(infile":71113,"Ġdenotes":71114,"$class":71115,"_SECURITY":71116,"Ġsewage":71117,"melon":71118,"(Character":71119,"/github":71120,"Ġglaring":71121,".Guid":71122,"_sparse":71123,"ĠMargin":71124,"_dns":71125,"Ġmeiner":71126,"Ġleftist":71127,"ĉloc":71128,"abytes":71129,"Ġequipments":71130,"expo":71131,"ĠSomerset":71132,"EK":71133,"æį¢":71134,"Ġlecturer":71135,"Ġmemiliki":71136,"æł¸":71137,"ç´ł":71138,"pron":71139,":pointer":71140,"borrow":71141,"ĠProtective":71142,"_cf":71143,"ĠÐķÑģли":71144,"bpp":71145,"';ĊĊĊĊ":71146,"aturally":71147,"_NAV":71148,"Ġpeptide":71149,">d":71150,"Ġifstream":71151,"_FACTORY":71152,"');//":71153,"joined":71154,"mong":71155,"Ġtimespec":71156,"Ġdestabil":71157,"Ġautop":71158,"-limit":71159,"publication":71160,"ĠDenn":71161,".Memory":71162,"(skb":71163,"ĠAnaheim":71164,"_RETURNTRANSFER":71165,"oueur":71166,"(_('":71167,"legt":71168,"istingu":71169,"ĉpriv":71170,"Ġredirects":71171,"Mt":71172,"Ġalleen":71173,"ĠPointF":71174,"Ġomin":71175,"Ġcitt":71176,"ĠTage":71177,"ĠWalls":71178,"á»ī":71179,"Ġoccupying":71180,"xBF":71181,"rangle":71182,"Ġrelational":71183,"-org":71184,"Ġjpg":71185,"-derived":71186,"Ġmalfunction":71187,"ĠBenson":71188,"(scroll":71189,"ĠXD":71190,"Holy":71191,"(commands":71192,"Ġtipping":71193,"Ġprimitives":71194,"Ġsexle":71195,"CallCheck":71196,"ĠMASTER":71197,"_TEAM":71198,".setRequestHeader":71199,"_specs":71200,"Ġserge":71201,".Master":71202,"Ġims":71203,".SpringBootTest":71204,"paypal":71205,"ĠWANT":71206,".Inst":71207,"ĠCarpet":71208,"Ġwrongly":71209,"($('.":71210,"Ġbild":71211,".Roll":71212,"ĠUrb":71213,"-can":71214,"ãģıãģłãģķãģĦ":71215,"oliberal":71216,"čĊčĊ":71610,"ĠMahm":71611,"}\";ĊĊ":71612,"Ġdq":71613,"ĠPublishers":71614,"ĠAmpl":71615,"ĠDanielle":71616,"Ġtern":71617,"èµ·":71618,"noÅĽÄĩ":71619,"ein":71620,"ĠAsyncStorage":71621,"unger":71622,"rouw":71623,"Ġscissors":71624,"/assert":71625,".bucket":71626,"/archive":71627,"_Man":71628,"Ġintoler":71629,"Ġ()=>":71630,"ĠÐĴÑĭ":71631,"Ġsai":71632,".xy":71633,".\"čĊ":71634,"Ġurinary":71635,"esub":71636,"ISTICS":71637,"Ġκ":71638,"Ġcompliments":71639,"ĠtypingsJapgolly":71640,"ihar":71641,"Expansion":71642,"ĠServing":71643,"_students":71644,"ĠXBOOLE":71645,"(il":71646,"Ġì²ĺ":71647,"Ġjó":71648,"(tol":71649,"(JS":71650,"ĉCG":71651,"ĠDRAW":71652,"twig":71653,"Ġoat":71654,"_smooth":71655,"ĠCSL":71656,"Ġosob":71657,"Ġensuing":71658,"Ġbanker":71659,"ĠBackpack":71660,"_ping":71661,"Ġwishlist":71662,"=ax":71663,"ĉĠĠĠĊ":71664,"Disney":71665,"steady":71666,"\">%":71667,"Ġprophets":71668,"ĠZX":71669,"Ġminimalist":71670,".PLAIN":71671,"Seattle":71672,".ordinal":71673,"ĠPIPE":71674,"Ġretorna":71675,"Ġjugador":71676,"ĠBret":71677,"ĠâĶľ":71678,"Ġplush":71679,"ULATOR":71680,"Sorting":71681,".gridy":71682,"ectomy":71683,"_activ":71684,"rack":71685,"Interactive":71686,"ĠAntarctica":71687,"Ġvengeance":71688,"enso":71689,"_known":71690,"upplier":71691,".Modules":71692,"ĠConnectionState":71693,"éļIJèĹı":71694,"@FindBy":71695,"Ġplacer":71696,"\\model":71697,"<()>":71698,".isSuccessful":71699,"-good":71700,"bz":71701,"ĠDraco":71702,"Assistant":71703,"-extra":71704,"аблиÑĨ":71705,"Ġhypocrisy":71706,"Ġtst":71707,"ĠAgr":71708,"$txt":71709,"Ġlogistic":71710,"licensed":71711,"ĠHof":71712,"Ġtat":71713,"(iv":71714,"Ġintoxic":71715,"postId":71716,"_strike":71717,"Ġhumiliation":71718,"pcodes":71719,"\"sync":71720,"(recipe":71721,"+N":71722,"rente":71723,"ĉClient":71724,"ycopg":71725,"ĠZurich":71726,"ĠProfiles":71727,"Countries":71728,"Ġpict":71729,"Ġrollout":71730,"requencies":71731,"Ġpatched":71732,"Ġcartridges":71733,"Ġshading":71734,"Jar":71735,"Ġsalvage":71736,"ĠTaxes":71737,"Ġstandby":71738,"aporan":71739,"Eigen":71740,".angular":71741,"ĠNested":71742,"享":71743,"ĠisVisible":71744,"ĠDwight":71745,"_BRANCH":71746,".Delay":71747,"Ġkend":71748,"Ġfacilitated":71749,".flatMap":71750,"Ġsanta":71751,"ĉSend":71752,"/messages":71753,"ĠofType":71754,"ĉswap":71755,"#plt":71756,"ĠTurks":71757,"NES":71758,"Ġprogressively":71759,"ĠResidence":71760,"ĠTREE":71761,"Ġnoen":71762,"dio":71763,"Ġnelle":71764,"Ġsogar":71765,"itti":71766,"weekly":71767,"Ġambiguity":71768,"_Settings":71769,"Ware":71770,".neo":71771,"_DST":71772,"Ġæĸ¹":71773,"prep":71774,"lobby":71775,"@email":71776,"/movie":71777,"Ġfunkc":71778,"ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĊ":71779,"ÂŃs":71780,"Ġguardians":71781,"-pos":71782,"Ġconfiguring":71783,"ĠCPS":71784,"ĠDeus":71785,"Ġvidéos":71786,"_empresa":71787,"Ġslapped":71788,"',Ċ":71820,"_XDECREF":71821,"ĠBuzzFeed":71822,"_MARGIN":71823,"PLOY":71824,".small":71825,"ĠmimeType":71826,"Ġholog":71827,"ĉcamera":71828,"lias":71829,"Ġsuspense":71830,"odynam":71831,"bau":71832,"Ġgraveyard":71833,"_named":71834,"\":\"'":71835,"Ġ************************************************":71836,"ĠgameOver":71837,"ĠLENGTH":71838,"ĉscreen":71839,"ĠdoInBackground":71840,"_dependencies":71841,"Ġrtc":71842,"/up":71843,"_ROM":71844,"Hall":71845,"Ġdeficiencies":71846,"(te":71847,"'#":71848,"_equiv":71849,"Ġpreorder":71850,"ĠAxe":71851,"омÑĥ":71852,".sendFile":71853,"Ġfilt":71854,"ĠLimits":71855,"ĠCavaliers":71856,".discount":71857,"âĨIJ":71858,"ĠWit":71859,"QRSTUV":71860,"Ġij":71861,"Ġtegen":71862,"Ġ:\",":71863,"difficulty":71864,"punkt":71865,"ĠEmails":71866,"chlor":71867,"(fun":71868,".Uint":71869,"ĠStall":71870,"_verified":71871,"uD":71872,"FileType":71873,"Ġpleasures":71874,"Ġjudiciary":71875,"Ġsham":71876,"ipur":71877,"_PLUS":71878,"offers":71879,"(foo":71880,"_GT":71881,"ĉcore":71882,"ENTION":71883,"ĠLiberation":71884,"CommandLine":71885,"_department":71886,".Ar":71887,"_neighbor":71888,"ĠSubmitted":71889,"ĠĊ":96121,"Ġdroits":96122,"Ġhomosexuals":96123,"Ġabduction":96124,"ĉwidget":96125,"$headers":96126,"ĠDAR":96127,"Ġfla":96128,"threat":96129,"Ġlouis":96130,".GetProperty":96131,"\"Just":96132,"(frames":96133,"ryo":96134,"profession":96135,"|i":96136,"íķ´ìĦľ":96137,"(sv":96138,"Ġunrecognized":96139,"Ionic":96140,"Fashion":96141,"ScreenState":96142,"ĠIncoming":96143,"NotNil":96144,"Ġsyncing":96145,"emie":96146,"Ġthermo":96147,"_procs":96148,"Ġinconsistency":96149,"religious":96150,".mj":96151,"Ġpersonn":96152,"Ġmomentos":96153,"orarily":96154,"ĠæĬ":96155,"_neurons":96156,"Illustr":96157,"imoto":96158,"ilik":96159,"ĠWoj":96160,"Trading":96161,"Ġappare":96162,"Ġentreprises":96163,"achat":96164,"Ġ¬":96165,"Ġneigh":96166,"BUTTONDOWN":96167,"ĠMaher":96168,"aghan":96169,"-hash":96170,"\"f":96171,"Ġclientele":96172,".addButton":96173,"ĉSP":96174,"Qi":96175,"Ġgrated":96176,"POSITE":96177,":>":96178,"ĠHowell":96179,"ĠComparative":96180,"ĠISC":96181,"ÂŃi":96182,"Ocean":96183,"Davis":96184,"ĠFilme":96185,"Wins":96186,"ĠJIT":96187,"occer":96188,"ĠCorm":96189,"ENCHMARK":96190,"rchive":96191,"icação":96192,"Ġmata":96193,"Ġchildbirth":96194,"ĠOptionally":96195,"Ens":96196,"Ġxhttp":96197,"Ġelucid":96198,"_OscInitStruct":96199,"))):Ċ":96200,"Ġintuit":96201,"ĠDonate":96202,"Ġcorrelates":96203,">Delete":96204,"Ġequipe":96205,"Ġboca":96206,"Ġinflatable":96207,"erah":96208,"ĠDateTimeKind":96209,"Ġcalves":96210,"\\Lib":96211,"Ġemlrt":96212,"ĠTrilogy":96213,"ĠPanc":96214,"ĠDuis":96215,"ĠpelÃŃcula":96216,"WARDS":96217,"_DETECT":96218,"-sectional":96219,"dhcp":96220,"ForRow":96221,"-destruct":96222,"ĠPresenter":96223,"/slick":96224,",on":96225,"ĠCitadel":96226,"loggedin":96227,"_subtype":96228,"Ġsigue":96229,"Ġcuring":96230,"ĠFirewall":96231,"Ġfluorescence":96232,"ĠItalians":96233,"иÑĤÑģÑı":96234,".getStyle":96235,"InSeconds":96236,"jie":96237,"-Smith":96238,"Ġxlink":96239,"Ġsubmissive":96240,"онÑĤ":96241,"arbonate":96242,"ĠFaul":96243,"_goals":96244,"ĠCommissioners":96245,"chartInstance":96246,"_POSTFIELDS":96247,"Ġmedial":96248,"Ġmanos":96249,"Ġdelt":96250,"svm":96251,".Apis":96252,"ephy":96253,"Ġasympt":96254,"ĠappDelegate":96255,"Ġimprobable":96256,"cka":96257,"simd":96258,"/Error":96259,".âĢĵ":96260,"ĠPTS":96261,"deer":96262,"Ġsina":96263,"magnitude":96264,"IDADE":96265,"']}'":96266,"Ġmayores":96267,"ĉcomment":96268,"/console":96269,"\"@":96270,"volt":96271,".sell":96272,"ĠMacy":96273,"Ġmelod":96274,"Ġimágenes":96275,"_chg":96276,"Ġinout":96277,"idente":96278,")'),Ċ":96279,"dni":96280,".blob":96281,"Ġtypography":96282,"Ġeerie":96283,"_OID":96284,"pesan":96285,"ajan":96286,"Ġchopping":96287,"Ġbluff":96288,"adf":96289,"_bases":96290,".Formatter":96291,"Ġ\\%":96292,"ĠPageInfo":96293,"Carrier":96294,"ĠCalibration":96295,"como":96296,"-bodied":96297,"Ġfinancier":96298,"ĠINA":96299,".ERR":96300,"Ġhoodie":96301,"ĠSanity":96302,"guarded":96303,".opendaylight":96304,"ISMATCH":96305,"Highlights":96306,"ünk":96307,"aniem":96308,"angered":96309,"assignments":96310,"Ġregistrado":96311,"ĠUPPER":96312,"ampilkan":96313,"ashire":96314,"ĠNikola":96315,"ĠCFL":96316,"ĠHDC":96317,"Ġpoids":96318,"ĠIPs":96319,"Ġpreventative":96320,"ipsoid":96321,"ifix":96322,".camel":96323,".ga":96324,"Volumes":96325,"-ste":96326,"Yahoo":96327,"_sibling":96328,"Highest":96329,"optgroup":96330,"Ġkvinna":96331,"âĢĿãĢĤĊĊ":96332,"ĠAppliances":96333,"Ġ\"><":96334,"')\")Ċ":96335,"htt":96336,"ĠIdentified":96337,"Ġpencils":96338,"ĠmemberId":96339,"ĠappendString":96340,".loadData":96341,"ĠmockMvc":96342,"Ġjub":96343,"ĠSlut":96344,"ĠTaipei":96345,"statt":96346,"Polit":96347,"Ġpartager":96348,"DidChange":96349,"Increases":96350,")}.":96351,"ĠBaba":96352,"_CLIP":96353,"[unit":96354,"ĠклÑİÑĩ":96355,"Ġalcuni":96356,"ĠLola":96357,"Ġclinging":96358,"@PostMapping":96359,"(concat":96360,"Ġssid":96361,"ĠFauc":96362,"okit":96363,"ĠRecorded":96364,"ález":96365,"($('<":96366,".assertIsNot":96367,"Ġkali":96368,"Volt":96369,"Ġwarmly":96370,"Ġscares":96371,"getti":96372,"führt":96373,"_does":96374,".EMAIL":96375,"imations":96376,"Ġspringfox":96377,"ĠDecom":96378,"arcy":96379,"Ġglitches":96380,"ĠMoff":96381,"ĠVoll":96382,".between":96383,"Ġcoorden":96384,"ĠParticularly":96385,"GBP":96386,"Ġsemble":96387,"Eastern":96388,"_MSB":96389,"]){čĊ":96390,"morgan":96391,"ĠEVAL":96392,"dere":96393,"HOUSE":96394,"moire":96395,"istique":96396,"_lstm":96397,"-commit":96398,"ysterious":96399,"Ġtwink":96400,"-thumbnails":96401,"enÃŃ":96402,":'',":96403,"Ġblackout":96404,"ĠFloors":96405,"Ġsofas":96406,"Ġoui":96407,"leshoot":96408,"ĠRaq":96409,"-abs":96410,"Ġkra":96411,"Mining":96412,"shaft":96413,".setColumns":96414,"Clazz":96415,"PRETTY":96416,".playlist":96417,"éĸ¢":96418,"-Saharan":96419,"MING":96420,"ĉbl":96421,"è®®":96422,"jf":96423,"DOCKER":96424,"hopefully":96425,"(ignore":96426,"ĠUsersController":96427,"ĠMitarbeiter":96428,"ĠLES":96429,"Hamilton":96430,"-metadata":96431,"ĠKK":96432,"iktig":96433,"Ġwollte":96434,"egrator":96435,"]bool":96436,",current":96437,"ĠvalueType":96438,"Ġexcavation":96439,"oland":96440,"Ġverv":96441,"/filepath":96442,"AuthProvider":96443,"Ġprocrast":96444,"ĉULONG":96445,"_MEMBERS":96446,"Ġuplift":96447,"ĠAutonomous":96448,"Ġartworks":96449,"ĠOutreach":96450,"Ġpore":96451,"Homepage":96452,"DialogTitle":96453,"ĠGenerating":96454,"PARSE":96455,"Ġsemanas":96456,"Ġhumano":96457,"JSGlobalScope":96458,"Ġvolte":96459,"Ġbella":96460,"(isinstance":96461,"Ġplc":96462,"\\Catalog":96463,"Ġesteemed":96464,"鼷":96465,"(suffix":96466,"Ġsweeps":96467,"ĉORDER":96468,"Ġdoivent":96469,"ĠSwarm":96470,"ĠCompiled":96471,"getPage":96472,"ADR":96473,".RichTextBox":96474,"ĠNaming":96475,"agged":96476,"ĠGANG":96477,"rasing":96478,"odeled":96479,"Ġgala":96480,"ĠJSName":96481,"ddf":96482,"Ġillust":96483,"ĠLansing":96484,"[port":96485,"-death":96486,"Ġdinheiro":96487,"ĠEighth":96488,"Ġbian":96489,"stÃ¥":96490,"Ġversión":96491,"ĠLinearGradient":96492,"ĠHarding":96493,".*)":96494,"eczy":96495,"$header":96496,"ĠvÃ¥r":96497,"Unchecked":96498,"Ġkoje":96499,"ĠPaladin":96500,"())),":96501,"Giving":96502,"()})Ċ":96503,"Ġdips":96504,"Friendly":96505,"Ġportrays":96506,"Ġhelium":96507,"Ġinsurgency":96508,"_expiry":96509,"ĠstringByAppendingString":96510,"Ġaantal":96511,"slope":96512,"mast":96513,".getInteger":96514,"Ġ########################":96515,"_PIPELINE":96516,"Ġdensely":96517,"Ġmutating":96518,"midi":96519,"ĠSeit":96520,"ayne":96521,"NOWLED":96522,"ĠDesmond":96523,"ĠFName":96524,"ĠNairobi":96525,"\\Context":96526,"Ġcalcular":96527,"-den":96528,"Ġcott":96529,"]):čĊ":96530,"ĠRecommendation":96531,"ĠRolex":96532,"ĠvalidationResult":96533,".pat":96534,"ĠnÃły":96535,"ĠRestClient":96536,"ĠGPI":96537,"ĠAsheville":96538,"ĠOSP":96539,"ĠPERMISSION":96540,"ÐĶаÑĤа":96541,"/notification":96542,"Knight":96543,"_Word":96544,"ĠBender":96545,"ranking":96546,"Ġpartida":96547,"_reservation":96548,"ÌĢ":96549,"ĠmName":96550,"Ġgetch":96551,"Ġborr":96552,"Ġdiligent":96553,"Discuss":96554,"æŃ£åľ¨":96555,"apeake":96556,"ioned":96557,"-Nazi":96558,".cum":96559,"ĠKron":96560,"=$('#":96561,"/single":96562,"Ġerotisch":96563,"ĠVib":96564,"Ġratified":96565,"Ġconcerted":96566,"ĠREGARD":96567,"Ġdobr":96568,".DriverManager":96569,"'r":96570,"Portable":96571,"ĉsuite":96572,"Ġrelaciones":96573,"ĠDop":96574,"emploi":96575,"DOB":96576,"Ġcrumbs":96577,"Ġxls":96578,"_Application":96579,"(':',":96580,"Ġ------------------------------------------------------------------------Ċ":96581,"mse":96582,"Ġberk":96583,"ĠReturnValue":96584,"ĠBelly":96585,"Ġcamar":96586,"ĠPeek":96587,"elsing":96588,"Ġnotifies":96589,"ĠTristan":96590,"ĠGAR":96591,"emme":96592,"ĠElevated":96593,"_CSV":96594,"(chalk":96595,"Ġtwenties":96596,"ĠSearchResult":96597,"=search":96598,"ĠMixing":96599,"ýt":96600,"Ġrecruiter":96601,"ĠIDEOGRAPH":96602,"ĠAgo":96603,"(Operation":96604,"$values":96605,"Ġworldly":96606,"ĠRosenberg":96607,"ĠConfigureServices":96608,">*Ċ":96705,"Ġsnork":96706,"_opacity":96707,"ĠinitWithNibName":96708,"iado":96709,"AAC":96710,"Ġ]).":96711,";z":96712,"_paragraph":96713,"Ġnoses":96714,"stands":96715,"ifr":96716,"_mE":96717,"Iraq":96718,".Predicate":96719,"enaire":96720,"]]];Ċ":96721,"Ġunidad":96722,"Ġretirees":96723,"_hello":96724,"Ġmodele":96725,"ĠUITableViewController":96726,"fwrite":96727,"_numero":96728,"_visited":96729,"Ġrecebe":96730,"(Notification":96731,"Fantastic":96732,"_submenu":96733,"ĠPEM":96734,"ĠCupertino":96735,"approximately":96736,"classed":96737,".ReadString":96738,"Ġdomicile":96739,"_PW":96740,"Ġballpark":96741,"ĠKale":96742,"contra":96743,"_favorite":96744,"/of":96745,"Quite":96746,"ĠOTA":96747,"Ġaccelerometer":96748,"didn":96749,"|^":96750,"ĠRohingya":96751,"ivicrm":96752,"annabin":96753,"обÑĭÑĤи":96754,"orado":96755,"')+":96756,"Haunted":96757,",ID":96758,"(UIAlertAction":96759,"urv":96760,"_bel":96761,"ĠMexicans":96762,"/terms":96763,"ĠPainter":96764,"InputLabel":96765,"ĠVinci":96766,"ĠRosie":96767,"\\uc":96768,"":96929,"_gs":96930,"Ġcompil":96931,"nard":96932,"-exc":96933,"Ġrhyme":96934,"Ġbutto":96935,"says":96936,"antasy":96937,"ë¸":96938,"ĠcittÃł":96939,"Ġcheg":96940,"TimeString":96941,"Ġpositivity":96942,"ĠDabei":96943,"Ġwang":96944,"Ġescre":96945,"\"c":96946,"ĉvideo":96947,"ĠRanked":96948,".strings":96949,">>>(":96950,"ĠинÑĤеÑĢ":96951,"Ġresta":96952,"[:,:":96953,"Ġrendre":96954,"Ġdeser":96955,"Jos":96956,"Ġdisruptions":96957,"ĠопеÑĢ":96958,"sampling":96959,"suppress":96960,"ĠcontainerView":96961,"ĠSeamless":96962,"Ġairy":96963,"Ġonload":96964,".WindowManager":96965,"ĠPLA":96966,"braco":96967,".setPositiveButton":96968,"Ġpdu":96969,"Ġgsi":96970,"ĠCli":96971,"_gradients":96972,"Ñıд":96973,"ĠWhisper":96974,"cstdint":96975,"Ġläng":96976,"Ġformulations":96977,"énom":96978,"ournemouth":96979,"[$_":96980,"Ġordinarily":96981,".setUsername":96982,"Ġfaculties":96983,"MITTED":96984,"/values":96985,"Ġweir":96986,"ĠApt":96987,"MZ":96988,"ĉcf":96989,"ucken":96990,"ĉĉĉĉĉĉĉĉĉĉĉĉĉĉĉĉĉĉĉĉ":96991,"defense":96992,"[iVar":96993,"ĠBusinessException":96994,"Selectors":96995,"(coordinates":96996,"ĠResets":96997,"ĠDrinks":96998,"oleans":96999,"(stypy":97000,"_IOC":97001,".xxx":97002,"ĠSlater":97003,"ĠBelize":97004,"Ġ/************************************************************************":97005,"addin":97006,"_episodes":97007,"Ġischem":97008,"legalArgumentException":97009,"Danny":97010,"Ġpared":97011,".codehaus":97012,"ĠAssy":97013,"ĉRect":97014,"âŀ":97015,".lista":97016,"ĠваÑĪ":97017,"Ġvets":97018,"HWND":97019,"isoner":97020,"Ġxo":97021,"Ġorally":97022,"ĠStmt":97023,".rnn":97024,"ĠDPI":97025,"ĠStrikes":97026,".setViewportView":97027,"ĠèĩªåĬ¨çĶŁæĪIJ":97028,"YELLOW":97029,"GLenum":97030,"partners":97031,"ĠImplicit":97032,"Ġtako":97033,"âĢĻelle":97034,"Ġermög":97035,"totalCount":97036,"Gil":97037,"ĉwork":97038,"Ġpratic":97039,"inati":97040,"abies":97041,"ĠSkinner":97042,"Ġspirited":97043,"Ġpancreatic":97044,"Ġhdf":97045,"'em":97046,"Ġpsychosis":97047,"olicit":97048,"Ġ\"{\"":97049,"_atual":97050,"Ġélect":97051,"TEAM":97052,"Ġdak":97053,"ĠSWAT":97054,".FragmentManager":97055,"Ġprovisioning":97056,"lifetime":97057,"_EXTENSIONS":97058,"ĠCASCADE":97059,"Ġ![":97060,"(KP":97061,"Ġvem":97062,"ĠInterracial":97063,"']},Ċ":97064,"spacer":97065,"_kv":97066,"Warehouse":97067,"RDD":97068,"_fsm":97069,".StretchImage":97070,",Yes":97071,"ĠRefugee":97072,"ĠBringing":97073,"Ġválido":97074,".intersection":97075,"Ġspooky":97076,"_portal":97077,"Ġmoth":97078,"ĠZodiac":97079,"ĠSOCIAL":97080,"MimeType":97081,"']}}":97200,"_Blue":97201,"Ġbotanical":97202,"Ġfrags":97203,"Ġfamilial":97204,"-du":97205,"Ġseizing":97206,"(blocks":97207,".rd":97208,".checkNotNull":97209,"Ġmiser":97210,"Ġmaxx":97211,"ĠKnee":97212,"ViewItem":97213,"InnerHTML":97214,"Danger":97215,"((__":97216,"Ġprzypad":97217,"createUrl":97218,"**,":97219,"ĠDecorating":97220,"ATEGY":97221,"?>/":97222,".Designer":97223,"hexdigest":97224,"ĠEverywhere":97225,"alleries":97226,".TEXTURE":97227,".Blocks":97228,"zell":97229,"Ġpreço":97230,"Suddenly":97231,"inputEmail":97232,"(sync":97233,".bd":97234,"golden":97235,">');":97236,"ĠDickinson":97237,">>(Ċ":97238,"ĠQUEUE":97239,"ĠgetColumn":97240,"ĠSAND":97241,".piece":97242,"licer":97243,"Flutter":97244,"ĠgetVersion":97245,"ĠresourceId":97246,"ogl":97247,"ÅĤaw":97248,".Branch":97249,"ĉweb":97250,"Ġframerate":97251,"PPP":97252,"Ġfray":97253,"CNT":97254,"Ġinformatie":97255,"']čĊčĊ":97256,"neas":97257,"HeaderCode":97258,"Ġæ¸":97259,"Ġtrg":97260,"rawtypes":97261,"Honda":97262,"Ġmarketer":97263,"ĠrequestData":97264,"ĠPg":97265,"ĉnot":97266,"ĠpageInfo":97267,"Ġaktuellen":97268,"ãģķãĤĵ":97269,"ĠAMS":97270,"pushViewController":97271,"ĉAL":97272,"Ġvests":97273,"produce":97274,"-même":97275,"ĠRahman":97276,"Funny":97277,"EZ":97278,"_Valid":97279,"Ġsquadron":97280,"Ġlash":97281,"Ġirm":97282,"iasco":97283,"ĠParan":97284,"Ġpetites":97285,"ĠDecay":97286,"Ġuninitialized":97287,"privileged":97288,"Ġmbedtls":97289,"å¤ĩ注":97290,"Ġ^.":97291,"Ġecstatic":97292,"Detroit":97293,"Ġparten":97294,"Ġsouvenir":97295,".getLogin":97296,"моÑĤÑĢ":97297,"enção":97298,"ĠmÃŃnimo":97299,"ĠAccessed":97300,"rió":97301,"Mic":97302,"ĠVocal":97303,".SetString":97304,"Ġmensajes":97305,"åĢį":97306,"Ġattravers":97307,"ĠAph":97308,"Ġ');čĊ":97309,"ünde":97310,"Ġenchanted":97311,"ĠRootState":97312,"ĠCLOSED":97313,"ĉĉĉĉĉĉĉĉčĊ":97314,"Ġcaliente":97315,"orris":97316,"Ġphysicists":97317,"hwnd":97318,"_vi":97319,"Ġrápido":97320,"Ġcapitalized":97321,"edBy":97322,"Ġmachining":97323,"Ġhubby":97324,"ĠStacy":97325,".Bus":97326,"drink":97327,"Hur":97328,"Ġpropia":97329,"UnitTest":97330,"Ġmisconception":97331,"__));Ċ":97332,"/dc":97333,"ĠMayweather":97334,"_mC":97335,".createFrom":97336,"ĠQPainter":97337,"ropsych":97338,"innitus":97339,"ayas":97340,"Ġgeg":97341,"(dw":97342,"Ġusado":97343,"Ġtrickle":97344,"Ġannihil":97345,"ĠPasta":97346,"Ġ++Ċ":97347,"(ExpectedConditions":97348,".postValue":97349,"icap":97350,"ĠDonetsk":97351,"_soup":97352,"-publish":97353,"ĠPb":97354,"mentions":97355,"ACCEPT":97356,".Pull":97357,",âĢĻâĢĻ":97358,"Ġretarded":97359,"_ATOM":97360,"ĠTerminator":97361,"-court":97362,"ĠCLLocationCoordinate":97363,"Ġreverence":97364,"ĠSSC":97365,"utely":97366,"ĠWON":97367,"ĠGSL":97368,"frei":97369,".getLongitude":97370,"ĠopenFileDialog":97371,".Butter":97372,"-important":97373,"_MANY":97374,"ĠGong":97375,"âĢľHow":97376,"Ġgorge":97377,"=msg":97378,"ĠEzek":97379,"createCommand":97380,":checked":97381,"Ġinfographic":97382,".WEST":97383,"Dirs":97384,"Ġguarda":97385,"Ġbeetle":97386,"Loading":97460,"_mA":97461,".getRandom":97462,"blings":97463,"Ġcheeses":97464,"tti":97465,".âĢ¢":97466,"ĠBurgess":97467,"enderit":97468,".',čĊ":97469,"(\"\"+":97470,"acb":97471,"%p":97472,"indexed":97473,"_predicate":97474,"nesia":97475,"Ġbied":97476,"ĠCIT":97477,"(Pos":97478,"_radi":97479,"ä»·æł¼":97480,"Biz":97481,"ĠAdolescent":97482,"Ġviên":97483,"cycl":97484,"_Cancel":97485,"Ġconclusive":97486,"Ġappellate":97487,"informatics":97488,"SJ":97489,"Ġelective":97490,"roleId":97491,"Fetcher":97492,"ĉCommand":97493,"(\"(%":97494,"Ġfart":97495,"ILA":97496,"getBlock":97497,"AUSE":97498,"Ġдан":97499,"ĠArte":97500,"Ġnotifying":97501,"Ġgele":97502,".same":97503,"ĠRegel":97504,"ĠBaÅŁ":97505,".creation":97506,"ĠVN":97507,"_community":97508,"Ġunsustainable":97509,"SEX":97510,"ĠgridSize":97511,"rescia":97512,"aversable":97513,"(',')[":97514,"ĠPhelps":97515,"á»ķi":97516,"ANCELED":97517,"-IS":97518,".runners":97519,"ĠStokes":97520,".Produ":97521,"Ġwhipping":97522,"_acquire":97523,"Ġinvestigación":97524,"fried":97525,".copyWith":97526,"ĠHardcover":97527,"-Se":97528,"áŀ¶áŀ":97529,"invitation":97530,"lesai":97531,"ĠDorm":97532,"ĠÑģпиÑģка":97533,"Ġconcatenated":97534,"ophil":97535,"Ġthinker":97536,"/fontawesome":97537,"ĠLeopard":97538,"Ġ\"/\");Ċ":97539,"Ġresiduals":97540,"ĠMicrowave":97541,"Ġconforme":97542,"throp":97543,"Ġdisemb":97544,"ĠOMG":97545,"ĠDiscipline":97546,"ĠAcrobat":97547,"/repository":97548,"dfa":97549,"_MED":97550,"bufio":97551,"Ġméthode":97552,"_HOLD":97553,"iasi":97554,"_legacy":97555,")ččĊ":97556,"æ£Ģ":97557,"GetProcAddress":97558,"Ġyay":97559,"otence":97560,"orderid":97561,"-tw":97562,"Ġdearly":97563,"Incoming":97564,"/il":97565,"Ġneurop":97566,"ucz":97567,");čččĊ":97568,"ĠInnovative":97569,"Ġprofund":97570,"igmat":97571,"SelectionMode":97572,"relevant":97573,".GO":97574,"Ġbruises":97575,"Ġsach":97576,"odef":97577,"Ġreimb":97578,"/desktop":97579,"-spot":97580,"undance":97581,"Entropy":97582,"\\core":97583,"Ġsuger":97584,"ĠMvc":97585,"ĠGNOME":97586,"_indx":97587,"ĠYYSTYPE":97588,"ĠMatlab":97589,"ĠCIF":97590,"Ġ*))":97591,"ĠproductList":97592,"ĠAlright":97593,"acemark":97594,"ÑĤив":97595,"modification":97596,"international":97597,"Ġhomers":97598,"Ġdicts":97599,"ĠQFont":97600,".SQLite":97601,"Ġtransplantation":97602,"ĠMessageBoxButton":97603,"ĠElves":97604,"']])Ċ":97605,"(QIcon":97606,"Ġcinemas":97607,"COORD":97608,"-China":97609,"Ġkhẩu":97610,"æĪijçļĦ":97611,"Ġskulls":97612,"Ġpainstaking":97613,"fce":97614,".XRLabel":97615,"Ġspecifier":97616,"Ġpreferring":97617,"/activity":97618,"(Photo":97619,"ált":97620,".lot":97621,"''.":97622,"annonce":97623,".googlecode":97624,"-pdf":97625,"ĠPoke":97626,"_ACL":97627,"Ġendowed":97628,"discover":97629,".omg":97630,"Ġwoodland":97631,".Magic":97632,"Ġvolont":97633,"NotAllowed":97634,"Ġchave":97635,"BMW":97636,"','=',":97637,"ĠSIX":97638,"æĪij们":97639,"Ġkosher":97640,"Ġaspiration":97641,"intl":97642,"_refptr":97643,"'+Ċ":97644,"mentor":97645,".club":97646,"WindowState":97647,".ARR":97648,"Ġzza":97649,"ĠmessageType":97650,".equ":97651,"Thor":97652,"Ġinjust":97653,"Ġgums":97654,"ĠborderSide":97655,"/////":97656,"ĠTransmit":97657,"Ġbufsize":97658,"Ġhak":97659,"Ġellas":97660,"RANDOM":97661,"ĉmc":97662,"Ġpea":97663,"eko":97664,"documento":97665,"Ġhysteria":97666,"Ġarenas":97667,"Ġgunmen":97668,"Ġmike":97669,"Ġimpunity":97670,"atisation":97671,"_Zero":97672,"_COMPANY":97673,"ĠGors":97674,"ĠuseClass":97675,"(redis":97676,"ĠRUNNING":97677,"ĠBair":97678,"velte":97679,"Ġ','.":97680,"аÑĤÑĮÑģÑı":97681,"öst":97682,"encodeURIComponent":97683,"_restrict":97684,"Ġdecals":97685,"ĠPedido":97686,"Ġaltercation":97687,"Displays":97688,"ĠApplicants":97689,"CUS":97690,"Textarea":97691,"ĠAngola":97692,".future":97693,"ĠUSHORT":97694,"Ġsuppressing":97695,"Ġsetzen":97696,"APolynomial":97697,"Ġtoch":97698,"Ġhallmark":97699,"Ġ$$$":97700,"ĠCHARSET":97701,".rpm":97702,"ĠDich":97703,"--------------------":97704,"_parm":97705,"è¿ĺ":97706,"acciones":97707,"hait":97708,"WARDED":97709,"_routing":97710,"ĠNOM":97711,"Ġenclave":97712,"ĠLotto":97713,"ĉfr":97714,"complexContent":97715,"ĠBallard":97716,"kube":97717,"/win":97718,".getColumnModel":97719,"_REPLACE":97720,"HeaderValue":97721,"Ġestudiantes":97722,"Ġapis":97723,"Ġbpm":97724,"ĠTypeName":97725,"AndGet":97726,"rita":97727,"Plans":97728,">Note":97729,"Ġfetisch":97730,"Ġtoned":97731,"_goto":97732,"onsense":97733,"Ġmolds":97734,"Ġinfiltration":97735,"ĠGuerrero":97736,"ubbo":97737,"cki":97738,"($(\".":97739,"_activities":97740,"(changes":97741,"ĠofApp":97742,"ĠKepler":97743,"ĠDemp":97744,"ĠContinent":97745,".Ticks":97746,"ĠUnsigned":97747,"ĠJahres":97748,"Ġfreshmen":97749,"ĠArchived":97750,"ĠкоÑĤоÑĢÑĭй":97751,"Ġ'::":97752,"Tutorial":97753,"Cc":97754,"ĠtableLayoutPanel":97755,"fromJson":97756,".levels":97757,"_transient":97758,"Ġendorsing":97759,"ĠDIC":97760,"lauf":97761,"Ġshred":97762,"_EMIT":97763,"ificantly":97764,"ALA":97765,"/proto":97766,"Ġnarrowing":97767,"Utc":97768,"Factors":97769,"Ġsentient":97770,"æŀIJ":97771,"lixir":97772,"ĠCROSS":97773,"meteor":97774,"Ġgroin":97775,"Ġmdb":97776,"ĠRotterdam":97777,"Ġcomida":97778,"ĠOpCode":97779,"ĠDefaultValue":97780,"PermissionsResult":97781,"Ġheterogeneous":97782,"Ġmoot":97783,"Ġdeceived":97784,"-independent":97785,"ĠObjectOutputStream":97786,"Ġoverpower":97787,".dup":97788,"Ġldb":97789,"Ġdomestically":97790,"Ġbestellen":97791,"Ġlov":97792,"ĠContractors":97793,"Triangles":97794,"Ġfodder":97795,"Ġfilmes":97796,"ä¼ģ":97797,"Ġrevolver":97798,"StartupScript":97799,"/validation":97800,"ĠResourceType":97801,"iÅŁ":97802,"ĠLaz":97803,"fef":97804,"Ġlstm":97805,"{*":97806,".attachment":97807,".hits":97808,"ewith":97809,"DOG":97810,"Alabama":97811,"Ġmediums":97812,".mContext":97813,"-cols":97814,"åıĭ":97815,".notice":97816,"Ġattn":97817,"ĠPacking":97818,"ĠLn":97819,"_COMPLEX":97820,"/Users":97821,".savetxt":97822,"ĠRounds":97823,"?,?,?,?,":97824,"Ġingl":97825,"ĠROC":97826,"_female":97827,"ĠStard":97828,"]];":97829,"Ġwrestlers":97830,"Ġtorrents":97831,"Ġsinh":97832,"ĊĊ":97833,"ë³µ":97834,"sense":97835,"however":97836,".Physics":97837,"Infrastructure":97838,"ĠSacr":97839,"Fel":97840,"ĠDISTRIBUT":97841,"éments":97842,"ĠValidates":97843,"############################################################":97844,"Ġ|/":97845,"Ġesl":97846,"Ġréseau":97847,"ĠBip":97848,"BYTES":97849,"_WATER":97850,"Turning":97851,"ELS":97852,"Ġjuxtap":97853,"Ġlesbische":97854,"ých":97855,"(Unknown":97856,"Neo":97857,"@JsonProperty":97858,"Ġalumnos":97859,"ĠRaqqa":97860,"imei":97861,".getBounds":97862,".MouseEventHandler":97863,"#######":97864,"GenericType":97865,"/cms":97866,"Ġturno":97867,"Ġмин":97868,"Ġfolklore":97869,"ĠEvo":97870,"Ġconductivity":97871,"Ġleben":97872,"Ġgearbox":97873,"-vs":97874,"ĠÏĨ":97875,"Ġdrinkers":97876,"Ġconexao":97877,"ĠTeeth":97878,"ĠgetArguments":97879,"ĠRAT":97880,"entious":97881,"Educ":97882,"+W":97883,"ĠInstitutional":97884,"ĠBord":97885,"isEqual":97886,"(pwd":97887,"Ġignited":97888,"ĠRousse":97889,"Ġimpactful":97890,"ĠMalk":97891,"Ġgeral":97892,"ĠPivot":97893,"Ġazt":97894,"Ġcsvfile":97895,"ĠRope":97896,"ĠSOLUTION":97897,"ĠArbitrary":97898,"Ġletto":97899,".MouseAdapter":97900,"Ġ}}}":97901,"ĠSailor":97902,"dera":97903,"Putting":97904,"Ġconcentrates":97905,"ĠauthDomain":97906,"âĢĿçļĦ":97907,"-finals":97908,",strlen":97909,"Muon":97910,"ĠOrdinary":97911,"firefox":97912,"ĠLaTeX":97913,"ĠHund":97914,"engineering":97915,"/blue":97916,"edTextBox":97917,"(\"\");":97918,"ĠCDDL":97919,"kept":97920,"ĠGetString":97921,"Kir":97922,"()='":97923,"ĠOCD":97924,"antium":97925,"$menu":97926,"ĠAppalachian":97927,"Secretary":97928,"ë¥ĺ":97929,"ีย":97930,"Semantic":97931,"Ġ*[":97932,"estone":97933,"ungkin":97934,"MaxY":97935,"-tone":97936,"\"};čĊ":97937,"_Part":97938,"ĊĊ":98140,"Lic":98141,"ĠMirage":98142,"ĠAssemblyFileVersion":98143,"TeV":98144,"ĠValueEventListener":98145,"-solving":98146,"Tho":98147,"roulette":98148,"_WP":98149,"Ġuninterrupted":98150,"ĠfieldType":98151,".Typed":98152,"Ġamour":98153,"Ġmockery":98154,"(vol":98155,"ĠSubcommittee":98156,"ĠRuf":98157,"erox":98158,":UIButtonTypeCustom":98159,"ĠBlur":98160,"Ġwykon":98161,"nces":98162,"ASHBOARD":98163,"!!\");Ċ":98164,"Ġmurderers":98165,".daily":98166,"ĠDIAG":98167,"jing":98168,"Ġdolphin":98169,"Ġlòng":98170,"Ġbö":98171,"ĠVocabulary":98172,".StObject":98173,"')\">":98174,"Ġzun":98175,"Ġscrimmage":98176,"tréal":98177,"ĠLig":98178,"[vi":98179,"Cole":98180,"Ġfrosting":98181,".Players":98182,"-translate":98183,"Feels":98184,"=\\\"/":98185,".ButterKnife":98186,"Ġ?>;Ċ":98187,"Ġavi":98188,"innie":98189,".Failure":98190,"Ġspindle":98191,"ConfigurationException":98192,"_hop":98193,"Ġposição":98194,"ĠAwait":98195,"UIImagePickerController":98196,"ĉday":98197,"Ġgenom":98198,"Cab":98199,"ĠÑĢезÑĥлÑĮÑĤаÑĤ":98200,"ORIGINAL":98201,"Ġejaculation":98202,"(tcp":98203,"SECOND":98204,"Ġtonic":98205,"ĠListBox":98206,"ĠĉĉĊ":98207,"()>Ċ":98208,"Ġquatre":98209,"ượng":98210,"withErrors":98211,".Maybe":98212,",â̦":98213,"tokenId":98214,"_UNDEF":98215,"Ġfreshness":98216,"ĠAmendments":98217,".mapbox":98218,".CV":98219,"(blog":98220,"_gettime":98221,".quest":98222,"sparse":98223,"Ġresale":98224,"Ġenthusiastically":98225,"ĠProstitutas":98226,"Wa":98227,"Cargo":98228,".Parcelable":98229,"SENSOR":98230,"ĠRyu":98231,"Laughs":98232,"_Native":98233,"/pg":98234,"ysts":98235,"Ġphotoc":98236,"ç®Ģ":98237,"adopt":98238,".species":98239,"conciliation":98240,"Adjusted":98241,".FirebaseAuth":98242,"uttle":98243,"ordination":98244,"Ġmunch":98245,"ĠStake":98246,".ping":98247,"anker":98248,"(QStringLiteral":98249,"Ġsubscript":98250,"ĠĠĉĊ":98251,"ĠMCC":98252,"_Cmd":98253,"sexy":98254,"iou":98255,"ĠMANY":98256,"Ġnanny":98257,"TRAIN":98258,"Ġflourishing":98259,"ĠWatches":98260,"ĠQMap":98261,"ĠFerm":98262,"Ġwasm":98263,"ĠAbed":98264,"_UD":98265,"ĠGlasses":98266,"+v":98267,"Attend":98268,".Chain":98269,"Ġdecency":98270,"ĠSupplementary":98271,"hunter":98272,"-txt":98273,"Ġ\"}\";Ċ":98274,".setWindowTitle":98275,"(\"":98377,"Ġmascara":98378,"(Profile":98379,"åĬŁèĥ½":98380,"imité":98381,"Ġwildfires":98382,"-ROM":98383,".isOn":98384,"(groupId":98385,"Repair":98386,"accumulate":98387,"Ġ<\",":98388,"Ġhandwritten":98389,"Ġacheter":98390,"ĠMGM":98391,"ĠIrma":98392,"->{_":98393,"gee":98394,"criminal":98395,"Ġèĭ¥è¦ģ":98396,"Ġmomentarily":98397,"\")!=":98398,"_lit":98399,"ĠexpiresIn":98400,".\").":98401,"éķ¿åº¦":98402,"Ġfrække":98403,"vlc":98404,"Ġorbs":98405,"),$":98406,"Ġventured":98407,"/>\\":98408,"charm":98409,"Nuitka":98410,"eldig":98411,"atonin":98412,"Witness":98413,"-lat":98414,"ĠsetHidden":98415,"Ġrelics":98416,"Ġconsulate":98417,".IGNORE":98418,"\"After":98419,"ĠsetAddress":98420,"Ġbesteht":98421,"Ġ'')ĊĊ":98422,".xaxis":98423,"Ġserão":98424,"Ġmisled":98425,"_UNIFORM":98426,"ĠVIA":98427,"incr":98428,"Ġzenith":98429,"Ġviscosity":98430,"Ġthinly":98431,".getSharedPreferences":98432,".ErrorCode":98433,"\"),\"":98434,"ĠMillionen":98435,"Ġ/>)Ċ":98436,"ScrollIndicator":98437,"-seeking":98438,"ĠPOLITICO":98439,"asca":98440,"_rl":98441,"Navig":98442,"(fullfile":98443,"Ġsolitude":98444,"Ġjuven":98445,"Ġhauling":98446,"ĠMacros":98447,"ĠGry":98448,"Ġexercitation":98449,"ĠATTACK":98450,"TickCount":98451,"Ġrites":98452,"Ġdoe":98453,"ParticleSystem":98454,"Ġslu":98455,"WindowText":98456,"ĠClassName":98457,"Ġslander":98458,"ĉPort":98459,"jong":98460,"?a":98461,".Dial":98462,"âĢĶat":98463,"$objPHPExcel":98464,"Ġsoar":98465,"ENN":98466,"appeared":98467,"Ġquotid":98468,"emachine":98469,"Ġnip":98470,"Ġmicrotime":98471,"ĠAlma":98472,";!":98473,"------------------------------------------------------------------------------------------------":98474,"ĠPassage":98475,"Ġdumpsters":98476,"ĠExclude":98477,"Ġsuggestive":98478,"ĠCircularProgressIndicator":98479,"_clr":98480,"ArrayType":98481,"ILLA":98482,"ElapsedTime":98483,"Driven":98484,"ĠresourceName":98485,"ĠGarrison":98486,"serir":98487,"-ahead":98488,"Ġpinnacle":98489,"ĠEspresso":98490,"Sparse":98491,"Ġassays":98492,"ĠGirlfriend":98493,"imid":98494,"]='\\":98495,"ONGLONG":98496,"Ġportraying":98497,"Lane":98498,"Ġbúsqueda":98499,"Ġreinforcements":98500,"ĠSpreadsheet":98501,"ĠArrayCollection":98502,",arr":98503,"lightbox":98504,"icana":98505,"<\"":98506,"builders":98507,"Kid":98508,"ĠMatSnackBar":98509,"EXPR":98510,"odcast":98511,"ĠFoundations":98512,"Ġinds":98513,"='${":98514,"Fizz":98515,"-functional":98516,"(workspace":98517,"Ġstemmed":98518,"_patches":98519,"ĠJarvis":98520,"READING":98521,"Ġdisrespectful":98522,"ĠQDom":98523,"Ġ${Ċ":98524,"estatus":98525,"Reached":98526,"!.ĊĊ":98527,"ILT":98528,"ĠNDEBUG":98529,"ĠCourage":98530,"birthdate":98531,"ĠTing":98532,"Ġutilizado":98533,"ánchez":98534,"Outdoor":98535,"Ġhandguns":98536,"RefCount":98537,"ÉĻ":98538,"romo":98539,"Ġtts":98540,".She":98541,"ĠPane":98542,"ãĢij,ãĢIJ":98543,"ĠIOCTL":98544,"/black":98545,"inscription":98546,"Ġbiopsy":98547,"ĠTimeInterval":98548,".TestCheck":98549,"ĠGUIStyle":98550,"ĠCapability":98551,"ĠBeitrag":98552,"donnees":98553,"Treatment":98554,".backup":98555,"Ġsignings":98556,"ĠBoca":98557,"drm":98558,".MAIN":98559,"Ġgoede":98560,"ĠMarkup":98561,"GREE":98562,"ĠBaseService":98563,".Creator":98564,"Ġjails":98565,"ĠKahn":98566,"IpAddress":98567,"ACHI":98568,"Ġinhibited":98569,"Ġ@$_":98570,"ĠAssass":98571,"Ġenviado":98572,"Heroes":98573,"ÐŁÐµÑĢ":98574,"ĠMaven":98575,".ls":98576,"Ġive":98577,"|RF":98578,"ĠresizeMode":98579,"Ġrumpe":98580,"_attachments":98581,"TU":98582,"Ġtactile":98583,"Attempting":98584,"Ġrobin":98585,"yaw":98586,"Ġmercenaries":98587,"ĠHabitat":98588,"enddate":98589,"Ġoxy":98590,"ĉRandom":98591,"ohon":98592,"IsNull":98593,"ĠValidationResult":98594,"ãĥļ":98595,"umbed":98596,"ppv":98597,"Ġarp":98598,"ichick":98599,"_rnn":98600,"ĠTFT":98601,"TexImage":98602,"\"On":98603,"ĠSampler":98604,"topl":98605,"Ġjane":98606,"yling":98607,"ĠUNICODE":98608,"TabIndex":98609,"<{Ċ":98610,"suspend":98611,"uvian":98612,",application":98613,"олиÑĩеÑģÑĤво":98614,"yat":98615,"ezier":98616,"ĠCHUNK":98617,"ĠAdler":98618,"/Add":98619,"ĠKeyValue":98620,"Ġsposób":98621,"Sampling":98622,"chers":98623,"_AMD":98624,"Ru":98625,".MustCompile":98626,"Nation":98627,"Assoc":98628,"Managing":98629,"ĠEngl":98630,"_GB":98631,"Ġsuccinct":98632,"Ġdisliked":98633,"ĠIke":98634,"Bulletin":98635,"_ARCHIVE":98636,"Proposal":98637,"Ġjogging":98638,".CREATED":98639,"Ġchol":98640,"è£ħ":98641,"Į¨":98642,"-push":98643,"Ġreserva":98644,"corev":98645,"ètre":98646,"THR":98647,"Ġincompetence":98648,"Ġcharisma":98649,"æĦŁ":98650,"Ġ\"==":98651,"BTN":98652,"ĠLocator":98653,"ivet":98654,"('.')Ċ":98655,"ĠforIndexPath":98656,"ôme":98657,"Ġcapacit":98658,"waters":98659,"ĠWRONG":98660,"hoa":98661,"ĠMIPS":98662,"Ġemiss":98663,"ĠJacqueline":98664,"(cmp":98665,"Ġeens":98666,"Leo":98667,".timing":98668,"CLUSION":98669,"Ġ(\"-":98670,"åĵĪ":98671,".kode":98672,"ĠUndert":98673,"Ġbewild":98674,"ĠEssen":98675,".hd":98676,"Ġrenegot":98677,"Ġmower":98678,"Ġlsp":98679,"Ġpenchant":98680,"Ġmanoe":98681,"Ġagli":98682,"Ġrecal":98683,"ĠOPERATION":98684,"(^)(":98685,"Ġν":98686,"ĠScoped":98687,"Ġ@\"Ċ":98688,"=label":98689,"[loc":98690,"Intl":98691,"ĠNz":98692,"tablet":98693,".ColumnName":98694,"ĠscreenSize":98695,"DBus":98696,"cooked":98697,"-registration":98698,"âĢľOne":98699,"-non":98700,"ĠwiÄĻc":98701,"Ġcosta":98702,".addTab":98703,".conditions":98704,"ĠHess":98705,"MEMORY":98706,"ĠAvalanche":98707,"()}}Ċ":98708,"Ġtriplet":98709,"Ġlabyrinth":98710,"ĠNodeList":98711,"ĠNYT":98712,"Ġyeni":98713,"dff":98714,".HtmlControls":98715,"AVIS":98716,"/Math":98717,"Ġmemcmp":98718,"اء":98719,"оÑģÑĮ":98720,"crap":98721,"(pages":98722,"Ġlxml":98723,"ĠQDateTime":98724,"_tcb":98725,"Ġopenid":98726,"Ġsynaptic":98727,"ĠMDMA":98728,"(slug":98729,"igmatic":98730,"enor":98731,"Ġcramped":98732,"GOP":98733,"ŃIJ":98734,".isFile":98735,"ĠDifferential":98736,"Ġ=\"\";Ċ":98737,"ĉĉĉĠĠĠĠĉ":98738,"ĠCooke":98739,"ĉUFUNCTION":98740,"Ġperseverance":98741,"RelativeLayout":98742,"IMPORTANT":98743,"Ġexon":98744,"Ġон":98745,"ibase":98746,"(CONT":98747,"novation":98748,"ä½ķ":98749,"[sub":98750,"AdminController":98751,"HTTPHeader":98752,"crear":98753,"ĠNIR":98754,"ĠDropDownList":98755,"Ġvalide":98756,"Ġdehydration":98757,".']":98758,"(WIN":98759,"Ġ...\\":98760,"Ġphotoshop":98761,"ĉInit":98762,"_cou":98763,"ĠtimeZone":98764,"darwin":98765,"romatic":98766,"NavigationItemSelectedListener":98767,"brates":98768,"]--;Ċ":98769,"Ġtragedies":98770,"ĠPediatrics":98771,"SMART":98772,"-API":98773,"ĠMessageLookup":98774,"ĉvo":98775,"Ġprejudices":98776,"ĠmA":98777,"Ups":98778,"ĠMISSING":98779,"ĉad":98780,"Cream":98781,"ĠTb":98782,"ĠMona":98783,"_ghost":98784,"ĉtypes":98785,"Emb":98786,"ĠDocumentary":98787,"');ĊĊĊĊ":98788,"Ġlup":98789,"_Reference":98790,"ĠBATCH":98791,"Ġintertwined":98792,"":98915,"Ġfoyer":98916,"'utilisation":98917,"ĠMüller":98918,"ĠFetish":98919,"ĠdefaultManager":98920,"Ġbacktrack":98921,"Bah":98922,"Explicit":98923,"_ASCII":98924,"ĠmActivity":98925,"(Msg":98926,"Ġê²Į":98927,"ĠTERMS":98928,"ĠAngie":98929,"HSV":98930,"ĠMosque":98931,".Names":98932,"íĬ¼":98933,"reste":98934,"_parms":98935,"Ġgaping":98936,"Ġcropping":98937,"DataFrame":98938,"Ġresponsiveness":98939,"_undo":98940,"_tran":98941,".terminate":98942,"Ġitaliane":98943,"Ġwalkthrough":98944,"Ġattractiveness":98945,"де":98946,"_STS":98947,"_learn":98948,"Ġchocolates":98949,"ierarchical":98950,"-thinking":98951,"Ġ)))":98952,"ishments":98953,".Logf":98954,"ĠTMZ":98955,"ĠCanary":98956,"foil":98957,"ĠVaccine":98958,".vx":98959,"ĠSurround":98960,"Intermediate":98961,"Ġiov":98962,"vais":98963,"';\";Ċ":98964,"ï½ŀĊĊ":98965,"éĢģæĸĻ":98966,"â̦it":98967,"Seats":98968,"Clar":98969,"Wars":98970,"ĠHutchinson":98971,"ĠHasan":98972,"!')ĊĊ":98973,"ĠRichie":98974,"cheiden":98975,"($('":98976,"York":98977,"Ġlids":98978,"Ġalphanumeric":98979,"ĠGlock":98980,".shapes":98981,"Ġsparking":98982,"_epsilon":98983,"uplicated":98984,".dirty":98985,"])==":98986,"ĠìľĦì¹ĺ":98987,"Ġscn":98988,"Ġ/****************************************************************":98989,"_PREVIEW":98990,"_HC":98991,"ielding":98992,"fgets":98993,"ĠAddison":98994,"ĠproductService":98995,"-figure":98996,"(retval":98997,"zano":98998,"Ġautob":98999,"ĉsd":99000,"_numer":99001,"ĠSetLastError":99002,"ĠFior":99003,"ificance":99004,"Untitled":99005,"Ġinfield":99006,"Ġ{}));Ċ":99007,"Ġspac":99008,"Ġrookies":99009,"(describing":99010,"ngen":99011,"ிà®":99012,".rdf":99013,".Mutex":99014,"Ġkneeling":99015,"ĠQE":99016,"setMax":99017,"ReadStream":99018,"Ġventas":99019,"sut":99020,"cmpeq":99021,".WriteAllText":99022,"ĠExperienced":99023,"$__":99024,"Ġkaum":99025,"ĠLIS":99026,"Ġdocumentos":99027,"_HEALTH":99028,"icontains":99029,"Ġartisans":99030,"OWNER":99031,"Ġblinked":99032,"getDisplay":99033,"Ġtoen":99034,"ĠrowNum":99035,"Ġavril":99036,"Ġinvis":99037,"ĠKear":99038,"toBeInTheDocument":99039,"apur":99040,"Ġracked":99041,"ĠMcMaster":99042,"_ATTRIB":99043,"Haz":99044,"Ġfactura":99045,"/ts":99046,"ĠÑĢазмеÑĢ":99047,"Ġzf":99048,"Ġshortfall":99049,".fasta":99050,"ĠCONSTANT":99051,".managed":99052,"gems":99053,"SharedPointer":99054,"Ġblurry":99055,"brightness":99056,"(components":99057,"Ġ...\"ĊĊ":99058,"SELL":99059,"ĠIllustrator":99060,".getChannel":99061,"Ġtrouvé":99062,"ysters":99063,"Ġvois":99064,"ĠLinden":99065,"Ġemojis":99066,"Ġbrawl":99067,"ĠMSR":99068,"ĠElo":99069,"ĠCroatian":99070,"PopupMenu":99071,"Lewis":99072,".JWT":99073,"Ġastonished":99074,"Bush":99075,"(itemId":99076,"Ġdetachment":99077,"ĠEncore":99078,"å°Ķ":99079,"Ġrekl":99080,"Ġcram":99081,")$/":99082,".getHost":99083,"_recommend":99084,"-HT":99085,"_calibration":99086,"Authenticate":99087,".firebaseapp":99088,"UNIX":99089,"ĉCamera":99090,"ĠHEAP":99091,"Ideal":99092,".office":99093,"Ġgoofy":99094,"(Symbol":99095,"Ġjouer":99096,"_partitions":99097,"Ġrapidement":99098,"ĠGNUNET":99099,"idUser":99100,"Ġsupervise":99101,"(Contact":99102,"AWN":99103,"ãģĺ":99104,"Ġnaam":99105,"Ġaust":99106,"åľ¨çº¿":99107,"_softmax":99108,"AllowAnonymous":99109,"ammable":99110,"ROUTE":99111,"*D":99112,"Ġaden":99113,"ĠCristina":99114,"ĠCristiano":99115,"Ġbloodstream":99116,"subclass":99117,"_persona":99118,"CHILD":99119,"-know":99120,"ĠnavigationOptions":99121,"ĠZukunft":99122,"ĠPixar":99123,"Tyler":99124,"Ġunderworld":99125,"Ġsincerity":99126,"Ġdispenser":99127,"Ġkter":99128,"idders":99129,".addNode":99130,"-checked":99131,"Ġkeyst":99132,"ĠWTO":99133,".signals":99134,"Ġadventurer":99135,"ĠPang":99136,"\\R":99137,"=pos":99138,"Ġdispensaries":99139,"ĠCloset":99140,"(\"{\\\"":99141,"ideon":99142,"Ġnécessaire":99143,"()\"Ċ":99144,"_RECEIVED":99145,"Ġrésultats":99146,"Ġmoden":99147,"ĠIcelandic":99148,";d":99149,".allowed":99150,"(newUser":99151,"Ġmerciless":99152,".WaitFor":99153,"Ġdaycare":99154,"ĠConveyor":99155,"çĸ":99156,"ð¬":99157,"çĥ":99158,"çĹ":99159,"çł":99160,"èĦ":99161,"é²":99162,"å¦":99163,"çĿĢ":99164,"å¾Ī":99165,"éħ":99166,"çĭ":99167,"éª":99168,"æĤ":99169,"é¥":99170,"èħ":99171,"æĥ³":99172,"å¨":99173,"é¹":99174,"çĤ":99175,"åĴ":99176,"çĮ":99177,"è´¨":99178,"æ¢":99179,"æ°Ķ":99180,"ð«":99181,"æķĻ":99182,"çŁ":99183,"åĦ":99184,"åıijå±ķ":99185,"åĪĽ":99186,"èij":99187,"æħ":99188,"åŀ":99189,"åģļ":99190,"æĪĺ":99191,"æIJ":99192,"强":99193,"æ·±":99194,"åĩł":99195,"ç¿":99196,"å©":99197,"èŀ":99198,"å§Ķ":99199,"åIJĦ":99200,"èİ":99201,"é¸":99202,"éº":99203,"åıĹ":99204,"èģĮ":99205,"åĺ":99206,"æ½":99207,"é£İ":99208,"èIJ¥":99209,"åħļ":99210,"èľ":99211,"éĤ£":99212,"é¢Ĩ":99213,"çij":99214,"é³":99215,"æľ¯":99216,"ä»Ģ":99217,"æĪ¿":99218,"ç²¾":99219,"åª":99220,"éĨ":99221,"太":99222,"èĤ¡":99223,"èĽ":99224,"åħī":99225,"æŀģ":99226,"åĬŀ":99227,"èĵ":99228,"çĺ":99229,"å´":99230,"åĹ":99231,"èĬ±":99232,"çłĶ":99233,"å¿«":99234,"å¸Ī":99235,"è¶Ĭ":99236,"è§Ĥ":99237,"æ¤":99238,"æ¦":99239,"çŀ":99240,"èĤ²":99241,"çα":99242,"çϽ":99243,"ä¸ĸ":99244,"ä»Ģä¹Ī":99245,"çľ¼":99246,"å³":99247,"èĴ":99248,"æĵ":99249,"被":99250,"å¹²":99251,"çĹħ":99252,"士":99253,"çĴ":99254,"è¸":99255,"æ¾":99256,"å·¥ä½ľ":99257,"让":99258,"çĥŃ":99259,"è¾ĥ":99260,"åĦ¿":99261,"åĬ©":99262,"积":99263,"ç³":99264,"çĵ":99265,"ç£":99266,"åĤ":99267,"è¹":99268,"èļ":99269,"å·±":99270,"çϾ":99271,"åĬ¿":99272,"èµĽ":99273,"æ¨":99274,"æ¿":99275,"èĸ":99276,"æĿij":99277,"带":99278,"å¢ĥ":99279,"æĬ¤":99280,"éŃ":99281,"å«":99282,"èĩªå·±":99283,"æµİ":99284,"ä½İ":99285,"åĮ»":99286,"éĺ²":99287,"åĨľ":99288,"èĨ":99289,"çĨ":99290,"é«":99291,"åĨĽ":99292,"æĪı":99293,"åįĩ":99294,"æĸ¯":99295,"ä½ı":99296,"èIJ½":99297,"åħ»":99298,"èĩ´":99299,"çĬ":99300,"çĩ":99301,"çħ":99302,"èĶ":99303,"ä¼ģä¸ļ":99304,"åĽ¢":99305,"æīį":99306,"æł¡":99307,"åĩĨ":99308,"å¥ĩ":99309,"åī¯":99310,"é¼":99311,"æ¼Ķ":99312,"马":99313,"èµ°":99314,"ç¥ŀ":99315,"åħĭ":99316,"æľĽ":99317,"æ²¹":99318,"è¾¹":99319,"åįĥ":99320,"å¾Ģ":99321,"åĪĩ":99322,"æ©":99323,"ç¶":99324,"åĻ":99325,"éĻħ":99326,"çīĮ":99327,"社ä¼ļ":99328,"游æĪı":99329,"æĸ½":99330,"çħ§":99331,"æİ§":99332,"满":99333,"è¯Ĩ":99334,"éĩįè¦ģ":99335,"è¶³":99336,"çķĻ":99337,"ç»Ĩ":99338,"åįı":99339,"éĢĤ":99340,"æĩ":99341,"æ§":99342,"éĦ":99343,"èĿ":99344,"å¸Ĥåľº":99345,"ç»ıæµİ":99346,"ä¹ł":99347,"æĸĩåĮĸ":99348,"éļ¾":99349,"ä¹IJ":99350,"åĨ³":99351,"欢":99352,"è§ī":99353,"åĽŃ":99354,"åħ´":99355,"åħħ":99356,"举":99357,"æī¹":99358,"èķ":99359,"æĬĬ":99360,"æĬĢæľ¯":99361,"ç©¶":99362,"第ä¸Ģ":99363,"便":99364,"åĵį":99365,"çİ©":99366,"åĿļ":99367,"èŀį":99368,"åįĬ":99369,"åĸľ":99370,"å±Ĥ":99371,"离":99372,"ä»ħ":99373,"éŁ":99374,"åij³":99375,"念":99376,"åŃ£":99377,"ç´§":99378,"ä¹ħ":99379,"é¤":99380,"éŀ":99381,"è¤":99382,"åĢĻ":99383,"åĨµ":99384,"çŁ³":99385,"åģ¥":99386,"æĢİ":99387,"å®Ŀ":99388,"è¡Ģ":99389,"åŁŁ":99390,"æĹ©":99391,"çŁ¥éģĵ":99392,"è´Ł":99393,"åįļ":99394,"å·´":99395,"亲":99396,"å±ŀ":99397,"严":99398,"äºī":99399,"å¯Ł":99400,"èº":99401,"ç°":99402,"建设":99403,"产ä¸ļ":99404,"åIJĥ":99405,"åŃ©":99406,"æĹħ":99407,"æł¹":99408,"æĿIJ":99409,"ä¼Ĺ":99410,"éļı":99411,"å®ĺ":99412,"åºķ":99413,"彩":99414,"å¯Į":99415,"温":99416,"åį«":99417,"åī§":99418,"çĽĬ":99419,"æĬĹ":99420,"è´¢":99421,"纪":99422,"æĨ":99423,"çĶŁæ´»":99424,"红":99425,"çĶŁäº§":99426,"è¿ľ":99427,"éĴ±":99428,"åĶ®":99429,"群":99430,"çıŃ":99431,"楼":99432,"éĩĩ":99433,"èīº":99434,"å±ħ":99435,"åģĩ":99436,"è°Ī":99437,"æĻļ":99438,"é¬":99439,"èĪª":99440,"害":99441,"èĹ":99442,"çį":99443,"åµ":99444,"çİĭ":99445,"康":99446,"èİ·":99447,"ç»Ń":99448,"äºļ":99449,"é£Ł":99450,"åİĭ":99451,"æĭĽ":99452,"èĮĥ":99453,"许":99454,"åĽ´":99455,"é½":99456,"éĻį":99457,"纳":99458,"åĵª":99459,"æķĻèĤ²":99460,"å·²ç»ı":99461,"å¾·":99462,"æŀĹ":99463,"å®īåħ¨":99464,"é¾Ļ":99465,"大家":99466,"éĿĴ":99467,"åºľ":99468,"æ²³":99469,"åı¤":99470,"èį¯":99471,"åĿĩ":99472,"æĻº":99473,"乡":99474,"çķ¥":99475,"åĨ·":99476,"ç¦ı":99477,"室":99478,"ç»´":99479,"æī¿":99480,"å±Ĭ":99481,"è¯ī":99482,"åĪ»":99483,"èŁ":99484,"æª":99485,"å°±æĺ¯":99486,"è¿Ļ个":99487,"ä¸Ńå¿ĥ":99488,"ä¸ĸçķĮ":99489,"åŁİå¸Ĥ":99490,"éĿŀ常":99491,"åĪĴ":99492,"åıĮ":99493,"æĢİä¹Ī":99494,"åΰäºĨ":99495,"æľĥ":99496,"åı²":99497,"ä¾Ĩ":99498,"å¾ĭ":99499,"å¥ĸ":99500,"ç»Ī":99501,"åªĴ":99502,"å®ģ":99503,"课":99504,"èģĮä¸ļ":99505,"åħį":99506,"æµĭ":99507,"æĢ¥":99508,"æķij":99509,"çĭ¬":99510,"èѦ":99511,"é¤IJ":99512,"æĦ¿":99513,"è´«":99514,"çĸij":99515,"åļ":99516,"她":99517,"åıĪ":99518,"åĽłä¸º":99519,"ä¸įæĺ¯":99520,"å¤Ł":99521,"æĸ¹éĿ¢":99522,"éķĩ":99523,"äºĴ":99524,"éħĴ":99525,"讲":99526,"çĸĹ":99527,"æĺ¥":99528,"æ¹ĸ":99529,"å¤ľ":99530,"责任":99531,"人æ°ij":99532,"åħ°":99533,"çŁŃ":99534,"æķħ":99535,"åĩı":99536,"æĻ®":99537,"亮":99538,"ä¾Ŀ":99539,"åį°":99540,"éĿĻ":99541,"åĢĭ":99542,"å¾ģ":99543,"åIJ¸":99544,"缺":99545,"æĶ»":99546,"åĩĢ":99547,"åħ¸":99548,"åĽº":99549,"访":99550,"ç¹":99551,"çĢ":99552,"æıIJä¾Ľ":99553,"ç»ĩ":99554,"å¾Īå¤ļ":99555,"çłĶç©¶":99556,"è·Ł":99557,"主è¦ģ":99558,"æĥħåĨµ":99559,"çŃĸ":99560,"æŃ»":99561,"大åѦ":99562,"æĶ¿åºľ":99563,"å½±åĵį":99564,"ä¹°":99565,"åħŃ":99566,"éĻ©":99567,"åħ«":99568,"æŁIJ":99569,"è´¨éĩı":99570,"åįł":99571,"å·®":99572,"æĽ´å¤ļ":99573,"æľĭ":99574,"éĿ©":99575,"宣":99576,"çł´":99577,"è½»":99578,"座":99579,"æĺ¾":99580,"稳":99581,"è´µ":99582,"èĥĮ":99583,"èī¯":99584,"çĸ«":99585,"æ¯Ĵ":99586,"ä¹İ":99587,"åĢŁ":99588,"è¿·":99589,"çŃĶ":99590,"æ¿Ģ":99591,"åij¼":99592,"äºĨä¸Ģ":99593,"è¶£":99594,"ä¼´":99595,"ä¼Ļ":99596,"è¼":99597,"ð¬Ń":99598,"åĽ½å®¶":99599,"æ´»åĬ¨":99600,"çİ°åľ¨":99601,"ç§ijæĬĢ":99602,"åį¡":99603,"ä¸įåIJĮ":99604,"个人":99605,"è®°èĢħ":99606,"ä¸įæĸŃ":99607,"éĹ»":99608,"ä¹Ŀ":99609,"èijĹ":99610,"综":99611,"ä¸ĥ":99612,"æłij":99613,"æľĭåıĭ":99614,"åįĸ":99615,"伤":99616,"æ²Ļ":99617,"åĸĦ":99618,"å¥Ĺ":99619,"è½®":99620,"ç©¿":99621,"è¡¥":99622,"ä¸Ģå®ļ":99623,"çªģ":99624,"çĿ£":99625,"追":99626,"å¨ģ":99627,"åı¦":99628,"åĽ°":99629,"æŀ¶":99630,"ç»Ŀ":99631,"æķ£":99632,"æİ¢":99633,"æ´Ĺ":99634,"临":99635,"ä¼¼":99636,"è´¸":99637,"丰":99638,"æĺ¯ä¸Ģ":99639,"ç«ŀ":99640,"è¿İ":99641,"èģļ":99642,"è«":99643,"æįŁ":99644,"æī§":99645,"驾":99646,"è¿Ŀ":99647,"è¥":99648,"èł":99649,"ä»ĸ们":99650,"æĹ¶åĢĻ":99651,"å®ĥ":99652,"人åijĺ":99653,"è¿Ļæł·":99654,"å·¥ç¨ĭ":99655,"åĪĽæĸ°":99656,"åŃ©åŃIJ":99657,"å¸Į":99658,"éĥ¨åĪĨ":99659,"éĵ¶":99660,"代表":99661,"é¦Ļ":99662,"帮":99663,"æİ¨è¿Ľ":99664,"çĽĺ":99665,"积æŀģ":99666,"éĥ¨éŨ":99667,"åŁ¹":99668,"æŃ¦":99669,"ä¸įä¼ļ":99670,"çŃij":99671,"éĢĻ":99672,"çݩ家":99673,"æĭ¿":99674,"åİĤ":99675,"æ¯Ľ":99676,"çģµ":99677,"æŃĮ":99678,"绿":99679,"å¦Ī":99680,"缼":99681,"é¦Ĩ":99682,"顺":99683,"èĦ¸":99684,"å°¼":99685,"丽":99686,"奥":99687,"éģĩ":99688,"è¯į":99689,"å°ģ":99690,"ä¸Ŀ":99691,"好çļĦ":99692,"æĭħ":99693,"èĦ±":99694,"æģ¶":99695,"åİļ":99696,"åĬ³":99697,"缣":99698,"æĬĺ":99699,"åı¥":99700,"æĢĢ":99701,"æŁĵ":99702,"书记":99703,"åĨł":99704,"é²ľ":99705,"æ¦Ĥ":99706,"éļIJ":99707,"å¹ħ":99708,"èµŀ":99709,"å¹ķ":99710,"æ¥Ń":99711,"éģĹ":99712,"åΤ":99713,"èĺ":99714,"å¶":99715,"æĬķèµĦ":99716,"è¡Įä¸ļ":99717,"äºij":99718,"çݯå¢ĥ":99719,"åѦçĶŁ":99720,"åIJĪä½ľ":99721,"åģ¥åº·":99722,"é£ŀ":99723,"ä¸ĢæŃ¥":99724,"ä¸Ģ缴":99725,"åıijçĶŁ":99726,"éĺ¿":99727,"é¢Ĩ导":99728,"åĸľæ¬¢":99729,"åºĶ该":99730,"çĤº":99731,"è®Ń":99732,"æĿĢ":99733,"港":99734,"交éĢļ":99735,"éĺ¶":99736,"éĴ¢":99737,"令":99738,"å°½":99739,"æ¯į":99740,"è¡£":99741,"ç²ī":99742,"é¡¶":99743,"ä¹Łä¸į":99744,"æĬĵ":99745,"èĭ¦":99746,"幸":99747,"礼":99748,"第ä¸ī":99749,"大çļĦ":99750,"éģİ":99751,"çĥŁ":99752,"éģ¿":99753,"ä»į":99754,"åºĨ":99755,"æĢķ":99756,"è°¢":99757,"çĽĸ":99758,"å°Ħ":99759,"éľ²":99760,"æĸĹ":99761,"çĬ¶":99762,"åѸ":99763,"æ¯ķ":99764,"å·¨":99765,"çŁ¿":99766,"çļĩ":99767,"å¸Ń":99768,"çĹĩ":99769,"æī¬":99770,"å»¶":99771,"ä¾§":99772,"æ·¡":99773,"çļĦä¸Ģ":99774,"ç¶²":99775,"æ´ģ":99776,"ç¸":99777,"è§Ī":99778,"çѹ":99779,"ç§ĺ":99780,"è¯Ĭ":99781,"çı¾":99782,"èªī":99783,"毫":99784,"ð¨":99785,"åį´":99786,"æĪIJ为":99787,"èĥ½åĬĽ":99788,"é»Ħ":99789,"æĹħ游":99790,"èά":99791,"æ¯Ķè¾ĥ":99792,"èµ·æĿ¥":99793,"äºĨè§£":99794,"èĩªçĦ¶":99795,"ä¸Ģ次":99796,"åŁºæľ¬":99797,"æĽ¾":99798,"综åIJĪ":99799,"èıľ":99800,"è§īå¾Ĺ":99801,"第äºĮ":99802,"è·ij":99803,"æ³¢":99804,"åĢĴ":99805,"ç¡Ģ":99806,"åħµ":99807,"èįī":99808,"çͳ":99809,"çͰ":99810,"æĤ£":99811,"è§Ħå®ļ":99812,"èĥľ":99813,"èµĦ产":99814,"梦":99815,"æľĿ":99816,"è¿ĻéĩĮ":99817,"夫":99818,"æĮ¥":99819,"ä½Ľ":99820,"å®Ī":99821,"鼶":99822,"æĸ¼":99823,"ç¯ĩ":99824,"å²Ľ":99825,"åĵ¥":99826,"éŃĶ":99827,"ä¸įåΰ":99828,"æīĺ":99829,"åºĬ":99830,"欧":99831,"èį£":99832,"æ±ĩ":99833,"æī©":99834,"åģı":99835,"å¢Ļ":99836,"讯":99837,"å©ļ":99838,"æĥł":99839,"æ´ĭ":99840,"å®ľ":99841,"润":99842,"æħ¢":99843,"éĢı":99844,"宽":99845,"顾":99846,"ç´¯":99847,"污":99848,"çĪĨ":99849,"ç§Ł":99850,"æĥĬ":99851,"涨":99852,"饰":99853,"éĺµ":99854,"饮":99855,"æļĸ":99856,"åºŁ":99857,"æĹĹ":99858,"éļĶ":99859,"ç¶ĵ":99860,"åĭĻ":99861,"實":99862,"éĢĶ":99863,"æī«":99864,"çĥĪ":99865,"鼻":99866,"åĪij":99867,"éĹľ":99868,"éĹª":99869,"å¥ĭ":99870,"åĤ¨":99871,"缩":99872,"ä¾µ":99873,"å¬":99874,"ð¬¶":99875,"åĽ½éĻħ":99876,"ç»Ħç»ĩ":99877,"ä¸ĵä¸ļ":99878,"åıijçݰ":99879,"å¸ĮæľĽ":99880,"ç»ıèIJ¥":99881,"åı«":99882,"æĿ¥è¯´":99883,"éļľ":99884,"ä»»ä½ķ":99885,"交æĺĵ":99886,"éĩįçĤ¹":99887,"çļ®":99888,"ç»į":99889,"æ´¾":99890,"ç§ijåѦ":99891,"åºĶç͍":99892,"建çŃij":99893,"èĤī":99894,"æĶ¹éĿ©":99895,"åŁºç¡Ģ":99896,"æ±ī":99897,"åĩºæĿ¥":99898,"è¿Ļä¹Ī":99899,"åĪļ":99900,"åĿIJ":99901,"ä¸įä»ħ":99902,"ä¼ļè®®":99903,"éĿł":99904,"åªĴä½ĵ":99905,"æ°¸":99906,"åĨ²":99907,"èĭı":99908,"央":99909,"çζ":99910,"åłĤ":99911,"å®ŀéĻħ":99912,"è¡Ĺ":99913,"ç«¥":99914,"éĺħ":99915,"äºĭæĥħ":99916,"åİŁåĽł":99917,"éħ¸":99918,"以æĿ¥":99919,"娱":99920,"宫":99921,"åĿĹ":99922,"绩":99923,"éĩİ":99924,"ä¸įå¾Ĺ":99925,"ä¼łå¥ĩ":99926,"硬":99927,"åİħ":99928,"æĹ¢":99929,"ç»ĥ":99930,"èĦij":99931,"å¼±":99932,"æİĮ":99933,"è´´":99934,"æĮĤ":99935,"åħ³éĶ®":99936,"å°ļ":99937,"é¥Ń":99938,"åºĦ":99939,"çϼ":99940,"åľĭ":99941,"æİĪ":99942,"个æľĪ":99943,"äºĪ":99944,"å¸ģ":99945,"è·Ŀ":99946,"æ²ī":99947,"竣":99948,"åĨ¬":99949,"æĬ½":99950,"éĨĴ":99951,"å¼Ł":99952,"触":99953,"èģĺ":99954,"è±Ĩ":99955,"æļ´":99956,"åijĬè¯ī":99957,"豪":99958,"èµ¢":99959,"è·¨":99960,"è³ĩ":99961,"çΏ":99962,"æĬ±":99963,"浪":99964,"麻":99965,"仪":99966,"è¡¡":99967,"奶":99968,"çģ¾":99969,"èµ¶":99970,"èĤ¥":99971,"å§IJ":99972,"åĢº":99973,"éľĩ":99974,"订":99975,"æ¬Ĭ":99976,"ç·":99977,"å»ī":99978,"ä¿Ĺ":99979,"å¿ĺ":99980,"å¦ĩ":99981,"ç¼ĵ":99982,"åŃķ":99983,"漫":99984,"è£ģ":99985,"çĩĥ":99986,"é»ĺ":99987,"çī¢":99988,"çĪ·":99989,"æĬµ":99990,"宾":99991,"æľīä¸Ģ":99992,"迹":99993,"è¿«":99994,"è²Į":99995,"æľīçļĦ":99996,"ð¬ĺ":99997,"è¿ĺæĺ¯":99998,"æīĢ以":99999,"ä¹Łæĺ¯":100000,"è¿ĻäºĽ":100001,"对äºİ":100002,"åIJ§":100003,"缮åīį":100004,"èĩªå·±çļĦ":100005,"èĥ½å¤Ł":100006,"å¦Ĥä½ķ":100007,"æľºæŀĦ":100008,"åıªæĺ¯":100009,"ç½ijç«Ļ":100010,"åħ¨éĿ¢":100011,"为äºĨ":100012,"å¼Ģåıij":100013,"æĸ°éĹ»":100014,"éĩijèŀį":100015,"ç»§":100016,"客æĪ·":100017,"ä¸Ģèµ·":100018,"èĮ¶":100019,"åħ³æ³¨":100020,"æ°´å¹³":100021,"åİĨåı²":100022,"å¢ŀéķ¿":100023,"é±":100024,"åŁºéĩij":100025,"åºŃ":100026,"åı¶":100027,"ä¿ĥ":100028,"鼨":100029,"æ¶Īè´¹":100030,"èι":100031,"çŁ¥è¯Ĩ":100032,"æĪĺçķ¥":100033,"ç»ıéªĮ":100034,"å³°":100035,"æĽ²":100036,"èĦļ":100037,"åĨ°":100038,"å¤ı":100039,"å½Ĵ":100040,"ç¬Ķ":100041,"èĻij":100042,"çͲ":100043,"åľĪ":100044,"è¯Ĺ":100045,"é½IJ":100046,"容æĺĵ":100047,"çłĶåıij":100048,"骨":100049,"纸":100050,"è·µ":100051,"æĹ§":100052,"çķ¶":100053,"åΏ":100054,"è´·":100055,"åı¬":100056,"ç§ĭ":100057,"æ¶²":100058,"è¡ĮæĶ¿":100059,"çĮ®":100060,"èĤ¤":100061,"éĢIJ":100062,"è¶ĬæĿ¥":100063,"è¶ĬæĿ¥è¶Ĭ":100064,"æĦıè§ģ":100065,"èĪŀ":100066,"åīĤ":100067,"æ¶ī":100068,"ç¨ĭ度":100069,"åħ¬åħ±":100070,"械":100071,"æľ«":100072,"纯":100073,"åͱ":100074,"æ´²":100075,"æĬ¢":100076,"æ¤į":100077,"å¿Ļ":100078,"ä¼°":100079,"å¼¹":100080,"æ³ī":100081,"æľĢ大":100082,"è¶ĭ":100083,"å·§":100084,"ç¦ģ":100085,"æī¶":100086,"åį±":100087,"çıł":100088,"çĨŁ":100089,"æĭľ":100090,"主ä¹ī":100091,"æĿĤ":100092,"éĻĦ":100093,"éģį":100094,"æIJŃ":100095,"æĮ¯":100096,"å¤ļå¹´":100097,"æķ¬":100098,"æijĦ":100099,"纷":100100,"å¼ĥ":100101,"湿":100102,"å¨ĺ":100103,"æ¡£":100104,"é©¶":100105,"æľĹ":100106,"æ®ĸ":100107,"æ¦ľ":100108,"åĵ¡":100109,"ä¸Ģä½ĵ":100110,"æŁ¥çľĭ":100111,"ç¹ģ":100112,"æµĵ":100113,"åħ¬å®ī":100114,"æ½ľ":100115,"è´¯":100116,"éªĹ":100117,"æIJľ":100118,"å·¡":100119,"è¬":100120,"éĬ":100121,"å§Ķä¼ļ":100122,"æĤł":100123,"åī©":100124,"æıŃ":100125,"åŃ£åº¦":100126,"ð«ĺ":100127,"ð¬¬":100128,"ä´":100129,"ðª":100130,"ä½Ĩæĺ¯":100131,"éĥ½æĺ¯":100132,"å¹³åı°":100133,"åŃ¦ä¹ł":100134,"åĵģçīĮ":100135,"ä¸Ķ":100136,"è¿Ļç§į":100137,"æĶ¿çŃĸ":100138,"æĭ¬":100139,"认为":100140,"ä¸Ģèά":100141,"æłĩåĩĨ":100142,"æĶ¯æĮģ":100143,"模å¼ı":100144,"åħ³ç³»":100145,"çļĦæĺ¯":100146,"è¿Ļä¸Ģ":100147,"ä¸įè¦ģ":100148,"çĶļ":100149,"ç²¾ç¥ŀ":100150,"æĭ¥":100151,"åĪ©ç͍":100152,"ä¿ĿæĬ¤":100153,"ä½ľç͍":100154,"èĭ¥":100155,"åĽ½åĨħ":100156,"ä»ĭç»į":100157,"ä¸Ģä¸ĭ":100158,"å·¥ä¸ļ":100159,"缮æłĩ":100160,"æľĢåIJİ":100161,"ä»·å̼":100162,"å°į":100163,"éĵģ":100164,"è°ģ":100165,"ç»ĵæŀĦ":100166,"éĽª":100167,"æĻºèĥ½":100168,"ä¼łç»Ł":100169,"ä½ĵèĤ²":100170,"çĶŁæĢģ":100171,"æĭį":100172,"æİª":100173,"åĨľä¸ļ":100174,"çī¹èī²":100175,"è§Ħ模":100176,"æĹ¶ä»£":100177,"è¿ĩç¨ĭ":100178,"éĴĪ":100179,"æĿ¾":100180,"åĶIJ":100181,"åĮ»çĸĹ":100182,"çģ¯":100183,"åζéĢł":100184,"æł¸å¿ĥ":100185,"ä¸įåı¯":100186,"ç³»åĪĹ":100187,"åIJī":100188,"åľ£":100189,"åĢij":100190,"ä½³":100191,"æĿ¥çľĭ":100192,"æ¯ĶèµĽ":100193,"ä¸ĭæĿ¥":100194,"åĩºäºĨ":100195,"å¹²éĥ¨":100196,"微信":100197,"å½ĵåľ°":100198,"åį·":100199,"åį«çĶŁ":100200,"ä¼Ł":100201,"çĸ«æĥħ":100202,"è°·":100203,"åĩłä¸ª":100204,"éĺ´":100205,"çĶŁçī©":100206,"å°¤":100207,"ä¼Ĭ":100208,"èĤ¯":100209,"éĿ¢ç§¯":100210,"åĪĽéĢł":100211,"æı¡":100212,"åľĨ":100213,"æĻĵ":100214,"æĪIJäºĨ":100215,"åĩ¡":100216,"çĸ¾":100217,"ç«ŀäºī":100218,"讨":100219,"主é¢ĺ":100220,"é²ģ":100221,"迪":100222,"ä¿Ħ":100223,"æĢª":100224,"並":100225,"èĻļ":100226,"æ½®":100227,"çĥ§":100228,"è̳":100229,"æ±ł":100230,"éĢĤåIJĪ":100231,"æł¹æľ¬":100232,"åĬłçĽŁ":100233,"ç͵è§Ĩ":100234,"æ··":100235,"ç¼ĺ":100236,"çªĹ":100237,"çĬ¯":100238,"æĥ¯":100239,"æĦıä¹ī":100240,"åĬŀæ³ķ":100241,"ä¼ij":100242,"æ»ij":100243,"åĭĩ":100244,"æķ¢":100245,"寻":100246,"è¦Ĩ":100247,"éĢĥ":100248,"ç»ıçIJĨ":100249,"åĿı":100250,"æ³½":100251,"ä¹ĺ":100252,"åĪº":100253,"å±ı":100254,"é¡¿":100255,"亡":100256,"éĤĢ":100257,"åħ¼":100258,"åĭ¤":100259,"æ®ĭ":100260,"æĺł":100261,"æ¯ķä¸ļ":100262,"æĪª":100263,"è·Į":100264,"å£ģ":100265,"åı¦ä¸Ģ":100266,"羣å®ŀ":100267,"磨":100268,"è¯ļ":100269,"å¿ħè¦ģ":100270,"æģĭ":100271,"æĩĤ":100272,"å¾Ĵ":100273,"è°ĵ":100274,"æķı":100275,"æĻ¨":100276,"èĥ¸":100277,"æĭ¼":100278,"å¦Ļ":100279,"诸":100280,"èģĬ":100281,"æĤī":100282,"麼":100283,"åĩŃ":100284,"èĪĴ":100285,"æ¶Ĥ":100286,"è¿ģ":100287,"沿":100288,"å¡ij":100289,"æĽ¿":100290,"æ¾³":100291,"å¿į":100292,"èĢĹ":100293,"龸":100294,"åĩłå¹´":100295,"åĪĬ":100296,"èĦī":100297,"èħIJ":100298,"æ¡Į":100299,"çºł":100300,"æ»ļ":100301,"æĤ²":100302,"åĨĴ":100303,"妹":100304,"çķħ":100305,"纵":100306,"æijĩ":100307,"夺":100308,"è·¯ä¸Ĭ":100309,"忽":100310,"èĸª":100311,"æģIJ":100312,"æĦıæĢĿ":100313,"å«Į":100314,"æı´":100315,"æ°§":100316,"èĢĢ":100317,"éĺ»":100318,"轨":100319,"å¹»":100320,"æįķ":100321,"åĿ¦":100322,"åĵĪåĵĪ":100323,"çĭIJ":100324,"滨":100325,"è²»":100326,"è¿Ł":100327,"人éĥ½":100328,"ç»ĺ":100329,"åı¹":100330,"çµIJ":100331,"æī°":100332,"æ»ĭ":100333,"å¥ij":100334,"åĭŁ":100335,"確":100336,"ð¦":100337,"éĽĨåĽ¢":100338,"æĿİ":100339,"å¼Ģå±ķ":100340,"æıIJåįĩ":100341,"åħ¨åĽ½":100342,"汽车":100343,"åŃ¦æł¡":100344,"æł¹æį®":100345,"è¿Ļæĺ¯":100346,"åĩºçݰ":100347,"éĻĪ":100348,"ç½Ĺ":100349,"èİ·å¾Ĺ":100350,"åĪĺ":100351,"éĶĢåĶ®":100352,"æľªæĿ¥":100353,"éľĢæ±Ĥ":100354,"å®ŀæĸ½":100355,"åĿļæĮģ":100356,"åħ¨çIJĥ":100357,"éĵ¶è¡Į":100358,"æİ§åζ":100359,"é¡»":100360,"åľ°åĮº":100361,"æīĵéĢł":100362,"çļĦè¯Ŀ":100363,"帮åĬ©":100364,"ä½ĵç³»":100365,"è¾¾åΰ":100366,"è§ĦåĪĴ":100367,"åŁ¹è®Ń":100368,"两个":100369,"æĬ¥åijĬ":100370,"åľ°æĸ¹":100371,"å®Įåħ¨":100372,"æİī":100373,"ç»ĵåIJĪ":100374,"å®£ä¼ł":100375,"æ³ķå¾ĭ":100376,"èīºæľ¯":100377,"ç͵影":100378,"說":100379,"ä¸ĢçĤ¹":100380,"è¶ħè¿ĩ":100381,"ç͵åŃIJ":100382,"æĢĿæĥ³":100383,"æķĻåѦ":100384,"éĺ¶æ®µ":100385,"åķĨä¸ļ":100386,"çµģ":100387,"åĪĽä¸ļ":100388,"æĸ¹æ¡Ī":100389,"çݰ代":100390,"æ¡¥":100391,"èIJ½å®ŀ":100392,"带æĿ¥":100393,"产çĶŁ":100394,"ç§Ģ":100395,"æ³°":100396,"ä¹±":100397,"åħ·ä½ĵ":100398,"åĸĿ":100399,"èĵĿ":100400,"å®Ĺ":100401,"åįĩ级":100402,"æ·±åħ¥":100403,"ä¿ĿéĻ©":100404,"ç®Ģåįķ":100405,"çĹĽ":100406,"稳å®ļ":100407,"è¾Ĩ":100408,"å±ŀäºİ":100409,"å·Ŀ":100410,"ä¸įå°ij":100411,"åĴ¨":100412,"ä¸ľè¥¿":100413,"å½¢å¼ı":100414,"娱ä¹IJ":100415,"æŃ£å¸¸":100416,"鸡":100417,"åħħåĪĨ":100418,"å®ŀè·µ":100419,"éĩĮéĿ¢":100420,"è·³":100421,"èĻİ":100422,"æĪIJéķ¿":100423,"æļĹ":100424,"çĿ¡":100425,"罪":100426,"çIJĨ念":100427,"æĮij":100428,"èµĦæľ¬":100429,"å¤ļå°ij":100430,"ä¸ĭéĿ¢":100431,"å¸Ŀ":100432,"åħ¬å¼Ģ":100433,"æ¸IJ":100434,"éķ·":100435,"å±ĭ":100436,"欢è¿İ":100437,"å¿ĥçIJĨ":100438,"çĤİ":100439,"æ¹¾":100440,"è®ĵ":100441,"éĤĦ":100442,"ç³ĸ":100443,"ä¹Į":100444,"åĬ±":100445,"çīĻ":100446,"èħ¿":100447,"å²Ĺ":100448,"ä¼į":100449,"æĪIJåijĺ":100450,"åŃĶ":100451,"å°ıç¼ĸ":100452,"èij£":100453,"泡":100454,"åħĪè¿Ľ":100455,"åħ§":100456,"åĺ´":100457,"è´Ŀ":100458,"è»":100459,"æIJŀ":100460,"æ³Ľ":100461,"鸣":100462,"ç½²":100463,"èĽĭ":100464,"主任":100465,"缮çļĦ":100466,"ä¹ı":100467,"æ´¥":100468,"æĪ´":100469,"ä¸¥æł¼":100470,"çħ¤":100471,"çĮ«":100472,"å͝":100473,"å°Ĭ":100474,"çĶľ":100475,"åŀĥ":100476,"åľ¾":100477,"æĭŁ":100478,"çĦ¦":100479,"é«Ķ":100480,"å®ı":100481,"æ©Ł":100482,"é©»":100483,"æĹģ":100484,"å½»":100485,"éĥ½ä¸į":100486,"æij©":100487,"ä»ĵ":100488,"ä¹³":100489,"岸":100490,"è°ĭ":100491,"大å¤ļ":100492,"çģŃ":100493,"èħ¾":100494,"æŁľ":100495,"èĪį":100496,"åħļçļĦ":100497,"å°ĺ":100498,"åįģå¹´":100499,"æĭĴ":100500,"裡":100501,"æŁĶ":100502,"å¹¼":100503,"éĶģ":100504,"ä¸ĵ项":100505,"æīİ":100506,"驾驶":100507,"ç¢İ":100508,"è¢ĭ":100509,"éĶĭ":100510,"壮":100511,"å°ĸ":100512,"çĶµæ±ł":100513,"è¿Ķ":100514,"æ¼ı":100515,"循":100516,"èıĮ":100517,"èĥĥ":100518,"è¾ħ":100519,"éĢĴ":100520,"èĥİ":100521,"éĻª":100522,"寿":100523,"å¥Ķ":100524,"çĮĽ":100525,"纹":100526,"çŁ¥åIJį":100527,"å¿Ĩ":100528,"æ¡ĥ":100529,"æ£ĭ":100530,"éĢĨ":100531,"çĤ¼":100532,"ç±į":100533,"çī§":100534,"æł·çļĦ":100535,"è¾Ľ":100536,"åłĨ":100537,"å®ŀåľ¨":100538,"ä¼ı":100539,"宿":100540,"èµı":100541,"è£Ĥ":100542,"åįĬå¹´":100543,"å̾":100544,"满æĦı":100545,"梯":100546,"æĦıåij³":100547,"åѤ":100548,"ç¥Ŀ":100549,"æĻ¶":100550,"èµĶ":100551,"åģ¿":100552,"èĦĤ":100553,"ç½ļ":100554,"ç¢į":100555,"æ²ĥ":100556,"æĵį":100557,"å´ĩ":100558,"æļĤ":100559,"è·ĥ":100560,"æIJ¬":100561,"å©Ĩ":100562,"éī":100563,"éī´":100564,"åħ´è¶£":100565,"èIJ¥ä¸ļ":100566,"è®Ĭ":100567,"èĦı":100568,"è¾Ī":100569,"å·ŀå¸Ĥ":100570,"è´«åĽ°":100571,"ç©·":100572,"ä¸Ńå°ı":100573,"æ¼Ĥ":100574,"çĻĮ":100575,"èľľ":100576,"ä¼Ļä¼´":100577,"çīµ":100578,"æĤŁ":100579,"éĻ·":100580,"èµĽåŃ£":100581,"樣":100582,"åģ¶":100583,"æĺĨ":100584,"è¢Ń":100585,"æįIJ":100586,"èī°":100587,"æĤ¬":100588,"çĶ¢":100589,"èij¡":100590,"çĽĹ":100591,"å©´":100592,"å°İ":100593,"纽":100594,"åĢ¡":100595,"æī®":100596,"è¨Ń":100597,"æĬij":100598,"ç¡ķ":100599,"è¾ĸ":100600,"éĥģ":100601,"辩":100602,"éĤ»":100603,"çݰåĩº":100604,"è¦ı":100605,"å½¹":100606,"éĺĶ":100607,"åīµ":100608,"诱":100609,"æĥij":100610,"æ·Ģ":100611,"é¢Ī":100612,"侦":100613,"æģ°":100614,"æ£Ģå¯Ł":100615,"éĨ«":100616,"çĦ¶æĺ¯":100617,"åĭĥ":100618,"èĮ«":100619,"äĵ":100620,"ð¬¸":100621,"ä½ľä¸º":100622,"çļĦ人":100623,"éĤ£ä¹Ī":100624,"ç¾İåĽ½":100625,"è¿ĺæľī":100626,"æıIJé«ĺ":100627,"èϽ":100628,"åħ·æľī":100629,"åĮħæĭ¬":100630,"æĪĸèĢħ":100631,"ä¸įè¿ĩ":100632,"ä¸Ĭæµ·":100633,"åĮ»éĻ¢":100634,"èµĦéĩij":100635,"çĶļèĩ³":100636,"åĪ¶åº¦":100637,"è§£åĨ³":100638,"èģĶç½ij":100639,"ç»§ç»Ń":100640,"建ç«ĭ":100641,"è¿Ľä¸ĢæŃ¥":100642,"æĿIJæĸĻ":100643,"ä»Ĭ天":100644,"å¿ħé¡»":100645,"åIJĦç§į":100646,"çİ°åľº":100647,"ä»ĸçļĦ":100648,"å¢ŀåĬł":100649,"é¢ĨåŁŁ":100650,"åıĤä¸İ":100651,"æĮģç»Ń":100652,"ä¹ĭä¸Ģ":100653,"çī¹åĪ«":100654,"é±¼":100655,"åħ±åIJĮ":100656,"åĬª":100657,"çİī":100658,"人们":100659,"åħĪçĶŁ":100660,"ä¼ĺåĬ¿":100661,"ä¿ĿæĮģ":100662,"ä½ľåĵģ":100663,"çīĽ":100664,"æĪIJæľ¬":100665,"æĶ¶åħ¥":100666,"åıĬæĹ¶":100667,"è´Łè´£":100668,"æİ¥åıĹ":100669,"èįIJ":100670,"åıªè¦ģ":100671,"羣çļĦ":100672,"导èĩ´":100673,"æľºåζ":100674,"è¡ĮåĬ¨":100675,"æĸ°çļĦ":100676,"å®ĮåĸĦ":100677,"为ä»Ģä¹Ī":100678,"ä¸Ń央":100679,"æĪIJç«ĭ":100680,"æĦŁè§ī":100681,"åıĺåĮĸ":100682,"åıĹåΰ":100683,"å¹¶ä¸į":100684,"åŃĻ":100685,"æĸ½å·¥":100686,"æĺİæĺ¾":100687,"è¿ĩåİ»":100688,"åıijæĮ¥":100689,"羣æŃ£":100690,"åŁºåľ°":100691,"æĺİç¡®":100692,"èĥ¡":100693,"许å¤ļ":100694,"ä¸Ģå¹´":100695,"æĸ¹åIJij":100696,"æģ©":100697,"çĽ¸ä¿¡":100698,"åľ³":100699,"详ç»Ĩ":100700,"äºĭä¸ļ":100701,"çĶŁåij½":100702,"åĴ¨è¯¢":100703,"æĸĩæĺİ":100704,"çijŀ":100705,"绿èī²":100706,"èİ«":100707,"æĦıè¯Ĩ":100708,"æĬķåħ¥":100709,"åĬłå¿«":100710,"æ¢ħ":100711,"ç¿»":100712,"å¼ĢæĶ¾":100713,"æĻ®éĢļ":100714,"åįıä¼ļ":100715,"æĪIJ绩":100716,"ä»Ļ":100717,"å¯Ĵ":100718,"è¯ģåΏ":100719,"认è¯Ĩ":100720,"丹":100721,"大éĩı":100722,"è¿ħ":100723,"åģļåΰ":100724,"设æĸ½":100725,"è´¸æĺĵ":100726,"èĥ½æºIJ":100727,"æĹ¶æľŁ":100728,"ä¸Ģ天":100729,"æ²»çIJĨ":100730,"åĺī":100731,"å®ĩ":100732,"丰å¯Į":100733,"举è¡Į":100734,"æĪIJæŀľ":100735,"èĤ¯å®ļ":100736,"çĭĹ":100737,"åĬ¨åĬĽ":100738,"森":100739,"åĩłä¹İ":100740,"åĽłç´ł":100741,"æ°ijæĹı":100742,"æ´ŀ":100743,"ç½ijåıĭ":100744,"åIJĪçIJĨ":100745,"广大":100746,"æ®Ĭ":100747,"æ´Ľ":100748,"æĿ¯":100749,"èĴĻ":100750,"ç͍äºİ":100751,"èŀįèµĦ":100752,"ç¥ĸ":100753,"æľºæ¢°":100754,"举åĬŀ":100755,"èĩªåĬ¨":100756,"åĬŀåħ¬":100757,"é»ŀ":100758,"éĽĦ":100759,"å̼å¾Ĺ":100760,"çĮª":100761,"以为":100762,"æĺĮ":100763,"è·Ŀ离":100764,"åIJ¸å¼ķ":100765,"ç»ķ":100766,"éļĨ":100767,"计ç®Ĺ":100768,"éĺŁä¼į":100769,"大ä¼ļ":100770,"å¼ķèµ·":100771,"çī¹çĤ¹":100772,"èĥ¶":100773,"å¹´è½»":100774,"æľ¬èº«":100775,"æľºåħ³":100776,"å®ĺæĸ¹":100777,"éĥij":100778,"æµĻ":100779,"è§Ĵèī²":100780,"èij£äºĭ":100781,"为主":100782,"æĹłè®º":100783,"ä¹łæĥ¯":100784,"æ¥ļ":100785,"æĭĵ":100786,"ç»Łè®¡":100787,"åħĦ":100788,"å¹¿æ³Ľ":100789,"åįĢ":100790,"污æŁĵ":100791,"è«ĭ":100792,"èĬĤ缮":100793,"伦":100794,"è¦ĨçĽĸ":100795,"èĢIJ":100796,"æī¶è´«":100797,"ç»ıåİĨ":100798,"éĩįè¦ģçļĦ":100799,"èĤ¡ä¸ľ":100800,"æĭĽèģĺ":100801,"åĽĽä¸ª":100802,"æĩī":100803,"èĥŀ":100804,"æijĨ":100805,"é«ĺéĢŁ":100806,"麦":100807,"åİŁåĪĻ":100808,"èݱ":100809,"æĽ´å¥½":100810,"éķľ":100811,"åĩĮ":100812,"åŀĥåľ¾":100813,"é̲":100814,"çģ°":100815,"éĵº":100816,"äºĭæķħ":100817,"çĶĺ":100818,"空æ°Ķ":100819,"é¾Ħ":100820,"èı²":100821,"çĵ¶":100822,"æĺ¨":100823,"æĹ¥æĬ¥":100824,"æµ®":100825,"åľ°åĽ¾":100826,"åijĪ":100827,"大åĬĽ":100828,"绪":100829,"å¸ħ":100830,"æľįåĭĻ":100831,"ä¸įéĶĻ":100832,"乡æĿij":100833,"å±¥":100834,"å¹³æĸ¹":100835,"éĹ²":100836,"æī£":100837,"ç´łè´¨":100838,"èµ´":100839,"éģŃ":100840,"èIJ¨":100841,"èĩªä¸»":100842,"éĩijå±ŀ":100843,"èī¯å¥½":100844,"两年":100845,"æ³¥":100846,"é¢ľ":100847,"精彩":100848,"ä¸Ńåįİ":100849,"æĻĭ":100850,"ä¹łè¿ij":100851,"ä¹łè¿ijå¹³":100852,"æĪĺ士":100853,"åģļçļĦ":100854,"éªij":100855,"æ»´":100856,"çĵľ":100857,"çīĪæĿĥ":100858,"èĤł":100859,"æľĥåĵ¡":100860,"çıį":100861,"種":100862,"仿":100863,"çī©ä¸ļ":100864,"åĢĭ人":100865,"妻":100866,"伸":100867,"æ±Ĺ":100868,"æĹº":100869,"çIJĨæĥ³":100870,"æij¸":100871,"è¿Ŀæ³ķ":100872,"å®Įæķ´":100873,"åݦ":100874,"è¸ı":100875,"æĸij":100876,"æ¡Ĥ":100877,"ä½ĵåζ":100878,"師":100879,"æĿĨ":100880,"殿":100881,"æ¯ģ":100882,"é¦Ī":100883,"è§Ĵ度":100884,"欣":100885,"çĥ¦":100886,"èĤº":100887,"éĩĩ访":100888,"æijĺ":100889,"æĮ¡":100890,"æ·ĺ":100891,"åħ»èĢģ":100892,"çĤ¸":100893,"è¿Ī":100894,"åİī":100895,"åĿĬ":100896,"è¾£":100897,"åĩĿ":100898,"泪":100899,"çĸı":100900,"æİĺ":100901,"åĥıæĺ¯":100902,"éĽķ":100903,"ç¼Ŀ":100904,"èį·":100905,"æį·":100906,"åł¡":100907,"åı¥è¯Ŀ":100908,"çĸ¼":100909,"æłı":100910,"éģµ":100911,"碳":100912,"å·¥åķĨ":100913,"æIJº":100914,"åĪ¥":100915,"ä¹Ļ":100916,"æĹĭ":100917,"æĥľ":100918,"ä¸Ģ大":100919,"å±Ĥ次":100920,"èµĸ":100921,"æĬ¬":100922,"æ¨Ĥ":100923,"è¯ŀ":100924,"åħĴ":100925,"篮":100926,"èĤĥ":100927,"å§¿":100928,"æĬļ":100929,"çĵ·":100930,"ç͵åĬ¨":100931,"æĸ°åĨł":100932,"æ¶µ":100933,"ç¢ij":100934,"æ·®":100935,"æĹ¨":100936,"踪":100937,"æ¸Ķ":100938,"æĦĪ":100939,"åıĶ":100940,"åįĹçľģ":100941,"義":100942,"å§Ķ书记":100943,"貸":100944,"æ¶Į":100945,"è«ĸ":100946,"èIJĦ":100947,"æıı":100948,"å¿§":100949,"辦":100950,"å¦Ĩ":100951,"æīŃ":100952,"åijµ":100953,"éģ¥":100954,"許":100955,"ä»ĩ":100956,"åįģä¸ī":100957,"åī²":100958,"èªį":100959,"èΰ":100960,"é¢ĩ":100961,"饱":100962,"çĭł":100963,"é«ĺçļĦ":100964,"çµ±":100965,"æħİ":100966,"é¢ģ":100967,"åIJĪéĢĤ":100968,"æµ´":100969,"èµĭ":100970,"æĬ¼":100971,"妥":100972,"éĻ¢éķ¿":100973,"èĢķ":100974,"辨":100975,"æħ°":100976,"åįģåĽĽ":100977,"æľµ":100978,"èĵĦ":100979,"æŀ¢":100980,"å»·":100981,"æĤĦ":100982,"涯":100983,"磩":100984,"åŃIJéĩĮ":100985,"çĬ¹":100986,"å±Ģéķ¿":100987,"éIJ":100988,"å¥ł":100989,"ä¼ļéķ¿":100990,"æĵļ":100991,"ä¸įåıĬ":100992,"åįģä¹Ŀ":100993,"欺":100994,"躺":100995,"éĺIJ":100996,"çºĮ":100997,"註":100998,"åĨĬ":100999,"èŃĺ":101000,"é«ĺçŃī":101001,"èħº":101002,"å¤ķ":101003,"ç»ij":101004,"åͤ":101005,"èķ´":101006,"çķľ":101007,"æħĭ":101008,"åıĻ":101009,"åıĥ":101010,"峡":101011,"人大":101012,"éħ¿":101013,"éģ©":101014,"奢":101015,"åı£æ°Ķ":101016,"éĮĦ":101017,"éı":101018,"åĭĺ":101019,"è´¿":101020,"éļª":101021,"éĭ":101022,"éļ¶":101023,"ð¥":101024,"ð¬£":101025,"ð£":101026,"ð«į":101027,"ð¬³":101028,"ð«ĵ":101029,"ð«Ħ":101030,"ð«Ł":101031,"ð¨±":101032,"äĹ":101033,"以åıĬ":101034,"æľīéĻIJ":101035,"åij¢":101036,"åIJĹ":101037,"çľĭåΰ":101038,"计åĪĴ":101039,"è¿Ľåħ¥":101040,"缴æİ¥":101041,"åĪĨæŀIJ":101042,"åıªæľī":101043,"设å¤ĩ":101044,"åħ¶å®ŀ":101045,"åĬłå¼º":101046,"ä¸ŃçļĦ":101047,"ä¿Ŀéļľ":101048,"èĢģå¸Ī":101049,"人æīį":101050,"å¾Ĺåΰ":101051,"é£İéĻ©":101052,"ä¸Ģç§į":101053,"空éĹ´":101054,"æĪijåĽ½":101055,"ä¹ĭåīį":101056,"ä¸ĵå®¶":101057,"æĿ¨":101058,"æĹ¥æľ¬":101059,"群ä¼Ĺ":101060,"åıĤåĬł":101061,"æķĪæŀľ":101062,"æľīåħ³":101063,"å®¶åºŃ":101064,"åĮºåŁŁ":101065,"åĬªåĬĽ":101066,"éļıçĿĢ":101067,"æĹłæ³ķ":101068,"交æµģ":101069,"è¡Į为":101070,"æ£ĢæŁ¥":101071,"æľŁéĹ´":101072,"å¦ĤæŃ¤":101073,"èĤ¡ä»½":101074,"å½ĵæĹ¶":101075,"è£ħå¤ĩ":101076,"åĩĨå¤ĩ":101077,"éħĴåºĹ":101078,"è¿IJåĬ¨":101079,"æıIJåĩº":101080,"å·¦åı³":101081,"æİªæĸ½":101082,"é£Łåĵģ":101083,"æ¶Īè´¹èĢħ":101084,"åѦéĻ¢":101085,"æĮĩ导":101086,"è¿IJèIJ¥":101087,"éĩį大":101088,"åĨľæĿij":101089,"éĢłæĪIJ":101090,"æĶ¿æ²»":101091,"éĴĪ对":101092,"æŃ£å¼ı":101093,"åıĸå¾Ĺ":101094,"éĤ£ä¸ª":101095,"éĽĨä¸Ń":101096,"åıªèĥ½":101097,"å¿«éĢŁ":101098,"身ä½ĵ":101099,"åħļåijĺ":101100,"èģĶåIJĪ":101101,"åĬĽéĩı":101102,"éĥ½æľī":101103,"æħ§":101104,"å¡Ķ":101105,"åĪ«äºº":101106,"表çݰ":101107,"æķħäºĭ":101108,"ä¸ĢåĪĩ":101109,"å°ĩ":101110,"èµĦæĸĻ":101111,"åŁ¹åħ»":101112,"éĺħ读":101113,"æľī人":101114,"èIJ¥éĶĢ":101115,"çĽijçĿ£":101116,"çݯä¿Ŀ":101117,"èĢĥèĻij":101118,"æ·±åľ³":101119,"严éĩį":101120,"èĮĥåĽ´":101121,"å§Ķåijĺ":101122,"çĽij管":101123,"ä¸ī个":101124,"è£ħä¿®":101125,"åħ¬éĩĮ":101126,"åĪĨåĪ«":101127,"çIJĨè§£":101128,"飩":101129,"åĬłå·¥":101130,"è®¤çľŁ":101131,"ä¸į好":101132,"åݻ年":101133,"éĻįä½İ":101134,"æľºä¼ļ":101135,"åįıè®®":101136,"符åIJĪ":101137,"å¢ŀ强":101138,"æĬĢèĥ½":101139,"é¦ĸåħĪ":101140,"秦":101141,"ä¸ģ":101142,"å°¾":101143,"æľīäºĨ":101144,"åľ°äº§":101145,"æ¸ł":101146,"æĸ¹ä¾¿":101147,"ç§»åĬ¨":101148,"éĢŁåº¦":101149,"å°¤åħ¶":101150,"éĢļçŁ¥":101151,"åĿĽ":101152,"éģ¿åħį":101153,"æģ¢":101154,"è´¡":101155,"èģĮå·¥":101156,"å®ŀåĬĽ":101157,"æĺ¯ä¸Ģç§į":101158,"åIJ¯åĬ¨":101159,"çĸ¾çĹħ":101160,"æĿ¥äºĨ":101161,"çĽ¸å¯¹":101162,"çݰå®ŀ":101163,"èŀįåIJĪ":101164,"åIJĮæł·":101165,"åħ¬åijĬ":101166,"ç®Ĭ":101167,"ç´«":101168,"ä¸ĭåİ»":101169,"ä¼łæĴŃ":101170,"æľĢ好":101171,"ä¼ĺè´¨":101172,"æ²Ĵ":101173,"æĮº":101174,"æĹ¦":101175,"诺":101176,"ä¸ĢåIJį":101177,"éģĵè·¯":101178,"示èĮĥ":101179,"è¿ĩæĿ¥":101180,"åIJĮåѦ":101181,"é¼ĵ":101182,"æĿŃ":101183,"æľ¬æ¬¡":101184,"åIJĮæĦı":101185,"ä¸ĸ纪":101186,"ç¾Ĭ":101187,"欲":101188,"å·¥èīº":101189,"çĵ¦":101190,"人士":101191,"æľīæīĢ":101192,"ä»İäºĭ":101193,"æľīå¾Īå¤ļ":101194,"ä¸įäºĨ":101195,"å²Ĺä½į":101196,"åıĺå¾Ĺ":101197,"åĬ³åĬ¨":101198,"å¤Ħäºİ":101199,"å¹³åĿĩ":101200,"形象":101201,"å¡ŀ":101202,"åħ±äº«":101203,"çĿĽ":101204,"åĪ©æ¶¦":101205,"æŃ£æĺ¯":101206,"å¾Ģå¾Ģ":101207,"缸æ¯Ķ":101208,"横":101209,"åĪ·":101210,"æµĻæ±Ł":101211,"大éĥ¨åĪĨ":101212,"å¤ļ个":101213,"æĤ¨çļĦ":101214,"ç͵åķĨ":101215,"å¾®åįļ":101216,"å§ĭç»Ī":101217,"çĬ¯ç½ª":101218,"æĺ¯åľ¨":101219,"ç»ĦåIJĪ":101220,"åİŁæĿ¥":101221,"æ¸ħæ¥ļ":101222,"åIJĦåľ°":101223,"æĦŁåıĹ":101224,"å½ĵä¸Ń":101225,"è¶ĭåĬ¿":101226,"æĻ¯åĮº":101227,"羣æĺ¯":101228,"ä¾ĽåºĶ":101229,"转åŀĭ":101230,"çĭĤ":101231,"èĨľ":101232,"èĭĹ":101233,"å¿ł":101234,"å¾Ī大":101235,"èĤ¡æĿĥ":101236,"ç¾İåħĥ":101237,"æİĴåIJį":101238,"åĬ¨çī©":101239,"éĶħ":101240,"墨":101241,"主å¸Ń":101242,"å¾Ī好":101243,"ç»Ŀ对":101244,"æĿľ":101245,"转载":101246,"çĴĥ":101247,"æĿijæ°ij":101248,"åIJ¨":101249,"åĽŃåĮº":101250,"é«ĺ度":101251,"çī©è´¨":101252,"è¾ī":101253,"æĹ¥å¸¸":101254,"æıĴ":101255,"ä¸īå¹´":101256,"ä½ĵçݰ":101257,"æīįæĺ¯":101258,"代çIJĨ":101259,"ä¸į管":101260,"æģĴ":101261,"åľ°ä½į":101262,"ç²®":101263,"èĸĦ":101264,"æĺİçϽ":101265,"ä¸Ģèĩ´":101266,"æĽ¼":101267,"åĵŃ":101268,"åĩ¤":101269,"åĬ²":101270,"æķĮ":101271,"æĪĺæĸĹ":101272,"主ä½ĵ":101273,"åħ¬å¸ĥ":101274,"åıĤèĢĥ":101275,"èĪªç©º":101276,"寺":101277,"åѦä¼ļ":101278,"åıįæĺł":101279,"ç¾İ丽":101280,"太éĺ³":101281,"建æĪIJ":101282,"æħ¢æħ¢":101283,"åIJĦ个":101284,"éĤ¦":101285,"ç»ĦæĪIJ":101286,"ä¸ī大":101287,"éͦ":101288,"大å¤ļæķ°":101289,"æ¦Ĥ念":101290,"éŃĤ":101291,"åħ¬çĽĬ":101292,"èįĴ":101293,"身份":101294,"æ·±åĪ»":101295,"åħ©":101296,"ç»ıåħ¸":101297,"åIJĦ项":101298,"èĻķ":101299,"è¿ĽæŃ¥":101300,"åįģäºĮ":101301,"æī§æ³ķ":101302,"æĥ³åΰ":101303,"æĦŁæŁĵ":101304,"åķĨåĬ¡":101305,"å°ıç»Ħ":101306,"èͬ":101307,"çıŃåŃIJ":101308,"åIJĮå¿Ĺ":101309,"éĿ¢ä¸´":101310,"çĤĴ":101311,"å¤ļç§į":101312,"è§ĤçĤ¹":101313,"åĵªéĩĮ":101314,"å°Ŀ":101315,"å§Ĩ":101316,"èħ¹":101317,"åŁİåĮº":101318,"太å¤ļ":101319,"çĹħæ¯Ĵ":101320,"åľ¨äºİ":101321,"æīĢè°ĵ":101322,"æĻ°":101323,"æŀĿ":101324,"æĭĸ":101325,"å®ħ":101326,"æķ´æ²»":101327,"ä½ıæĪ¿":101328,"åģ·":101329,"çĨĬ":101330,"èµģ":101331,"æ°Ľ":101332,"æł¼å±Ģ":101333,"åŁºç¡Ģä¸Ĭ":101334,"èĥĨ":101335,"åħ½":101336,"鼶åĶ®":101337,"åĿ¡":101338,"女åŃ©":101339,"æĴŀ":101340,"åħ¨åĬĽ":101341,"åĴĸ":101342,"èĤ©":101343,"çľī":101344,"èĩ³äºİ":101345,"åħļç»Ħ":101346,"ä¸Ģä»¶":101347,"æĭĨ":101348,"äºĭå®ŀ":101349,"åĤ³":101350,"æ¹ĺ":101351,"ç¶²ç«Ļ":101352,"循çݯ":101353,"åIJĮæ¯Ķ":101354,"æĭĶ":101355,"åĮ»èį¯":101356,"åħ»æ®ĸ":101357,"åĽºå®ļ":101358,"å®ŀéĻħä¸Ĭ":101359,"è®°å¾Ĺ":101360,"åĪ©äºİ":101361,"æĤ¦":101362,"æĭ³":101363,"èĤĿ":101364,"æķĪçĽĬ":101365,"該":101366,"æ°ij主":101367,"çĹĩçĬ¶":101368,"風":101369,"å¹¼åĦ¿":101370,"å§ij":101371,"æĪĴ":101372,"ä¸ĭçļĦ":101373,"渡":101374,"å¹´åºķ":101375,"è®°å¿Ĩ":101376,"åIJIJ":101377,"大å¹ħ":101378,"å¾½":101379,"åħ¬ä¼Ĺ":101380,"ä¿¡å¿ĥ":101381,"çİĽ":101382,"ä¼ļä¸Ĭ":101383,"ä¹Ķ":101384,"æijĦå½±":101385,"æ£ĭçīĮ":101386,"éĻķ":101387,"åºĶæĢ¥":101388,"æĶ¶è´¹":101389,"æİ§èĤ¡":101390,"仪å¼ı":101391,"çŀ¬":101392,"æīĢåľ¨":101393,"碰":101394,"å§ĵ":101395,"é¡Į":101396,"æĶ¯éĥ¨":101397,"使åij½":101398,"çĤī":101399,"å¯Ħ":101400,"翼":101401,"åľ°ä¸ĭ":101402,"è¾ŀ":101403,"俱":101404,"主æĮģ":101405,"è´§å¸ģ":101406,"æģ¨":101407,"èĤĮ":101408,"çĽĪ":101409,"éĶ»":101410,"å¿ĹæĦ¿":101411,"类似":101412,"æĮĸ":101413,"éĢ»":101414,"總":101415,"纪念":101416,"åķ¥":101417,"弯":101418,"åIJįåŃĹ":101419,"åģ¥èº«":101420,"çļĦå¿ĥ":101421,"驱":101422,"èĥĮåIJİ":101423,"æ³ķå¸Ī":101424,"ç²Ĵ":101425,"èĥ½éĩı":101426,"è¾°":101427,"èī³":101428,"å½¼":101429,"段æĹ¶éĹ´":101430,"åIJĪæ³ķ":101431,"æĵ¦":101432,"ç¾½":101433,"åݨ":101434,"æĪij说":101435,"äºĭåĬ¡":101436,"åĩłå¤©":101437,"åħģ":101438,"ç¼´":101439,"åįĵ":101440,"两ç§į":101441,"çĭ¬çī¹":101442,"帶":101443,"éĴ»":101444,"æĥ©":101445,"é¢ĨåħĪ":101446,"è¶³å¤Ł":101447,"壳":101448,"æĦıåij³çĿĢ":101449,"åĪĨå¸ĥ":101450,"ä¹ĥ":101451,"éģĭ":101452,"佩":101453,"è°±":101454,"çģ£":101455,"èį¡":101456,"贯彻":101457,"å¹¾":101458,"ç£ģ":101459,"åħ¸åŀĭ":101460,"åīĩ":101461,"åĨ»":101462,"æ¬ł":101463,"ä¸įä¹ħ":101464,"浦":101465,"éŃħ":101466,"å¼ĢäºĨ":101467,"使ç͍èĢħ":101468,"è¿Ļ款":101469,"å°Ī":101470,"èĦ±è´«":101471,"æĶ»åĿļ":101472,"ç®Ĺæĺ¯":101473,"ç¨Ģ":101474,"æĹłäºº":101475,"åłµ":101476,"å¥ı":101477,"éĥ½å¸Ĥ":101478,"åı¯è§ģ":101479,"ä¸įåĩº":101480,"æ·»":101481,"äºı":101482,"ç¾İ好":101483,"èĥĸ":101484,"飵":101485,"æłĩå¿Ĺ":101486,"èĬĤèĥ½":101487,"æĬ«":101488,"å°º":101489,"寸":101490,"ä¸Ģ代":101491,"é¢Ĺ":101492,"è̶":101493,"èĴ¸":101494,"åĸ®":101495,"滿":101496,"çĮľ":101497,"æµĨ":101498,"åŁĥ":101499,"åįĥä¸ĩ":101500,"èµĮ":101501,"èģ²":101502,"ä½ľé£İ":101503,"質":101504,"寨":101505,"年人":101506,"åį°è±¡":101507,"æ¡¶":101508,"æĴ¤":101509,"åįģäºĶ":101510,"æ¯ħ":101511,"沪":101512,"åĽ½æľī":101513,"大éĩıçļĦ":101514,"御":101515,"å¯ĵ":101516,"è¦ĸ":101517,"æ¼Ĥ亮":101518,"çľł":101519,"çĤŃ":101520,"é»İ":101521,"èϹ":101522,"åĪ©äºļ":101523,"èŃī":101524,"æµı":101525,"åįģåħ«":101526,"丢":101527,"è¾½":101528,"æľīä¸ĢäºĽ":101529,"æħĪ":101530,"åģľè½¦":101531,"å®ł":101532,"è§£æĶ¾":101533,"æľīå¤ļ":101534,"éĤĬ":101535,"常è§ģ":101536,"æĬ¹":101537,"纤":101538,"親":101539,"æ¡Ĩ":101540,"èİŀ":101541,"æ°§åĮĸ":101542,"è¿Ļä»¶":101543,"åĩ°":101544,"æŁ´":101545,"åıijç͵":101546,"é¼ł":101547,"转åĮĸ":101548,"å¨ĥ":101549,"æĮ¤":101550,"罩":101551,"å¯ĨåĪĩ":101552,"æĪijä¸į":101553,"é«ĺæĸ°":101554,"ä¸Ģç¯ĩ":101555,"è¿Ľç¨ĭ":101556,"è¡°":101557,"è¿ĺä¸į":101558,"çħĮ":101559,"æĸ°åįİ":101560,"èĤ¿":101561,"滩":101562,"ä¸Ģæµģ":101563,"è¯Ī":101564,"å®ŀä½ĵ":101565,"å¤ĸåĽ½":101566,"躲":101567,"èµł":101568,"覺":101569,"æ¢Ŀ":101570,"ä¸įè§ģ":101571,"è¨Ĭ":101572,"åĮ¹":101573,"åįµ":101574,"çĩ¥":101575,"æħķ":101576,"齿":101577,"å®´":101578,"饼":101579,"èij¡èIJĦ":101580,"å°ıå¿ĥ":101581,"æģ¼":101582,"éĻĮ":101583,"æĺĤ":101584,"åĥ¹":101585,"èĬĿ":101586,"æ¯ı个人":101587,"åīįæıIJ":101588,"ä½ĵä¼ļ":101589,"æ¨Ļ":101590,"æIJľçĭIJ":101591,"对åħ¶":101592,"丧":101593,"èľĤ":101594,"浸":101595,"調":101596,"åĿª":101597,"é¢ĸ":101598,"åIJį为":101599,"笼":101600,"èĪĮ":101601,"æľ¬ä¹¦":101602,"èģ¯":101603,"纺":101604,"ç®Ģ缴":101605,"éĽ¢":101606,"ç¾İçļĦ":101607,"éļ¨":101608,"é«ĺå³°":101609,"è¿Ļå®¶":101610,"åĤ¬":101611,"å°¸":101612,"ç¡ķ士":101613,"èŃ·":101614,"è°¨":101615,"æĺı":101616,"æĶ¿åįı":101617,"è¡Ķ":101618,"ç¿Ĵ":101619,"åľĴ":101620,"åĽ½æ°ij":101621,"主è§Ĵ":101622,"è£ķ":101623,"伪":101624,"åºŀ":101625,"æ°ijèIJ¥":101626,"æĥ§":101627,"ç§ĺ书":101628,"çĹķ":101629,"çϾåĪĨ":101630,"溶":101631,"æĹłçĸij":101632,"çļĦçľ¼":101633,"æĵİ":101634,"ä¼Łå¤§":101635,"å½°":101636,"åħ¬å®īå±Ģ":101637,"ç³ķ":101638,"å¼¥":101639,"åĤĻ":101640,"ä¹¾":101641,"毫ä¸į":101642,"注æĺİ":101643,"å̻":101644,"æĦī":101645,"æķ¦":101646,"馨":101647,"æĶĢ":101648,"éĢĿ":101649,"åı¯éĿł":101650,"夸":101651,"åľĺ":101652,"éĿ¢ä¸Ĭ":101653,"æĬĸ":101654,"èĦĨ":101655,"é©°":101656,"ä¼IJ":101657,"妨":101658,"å®ļäºĨ":101659,"ç³Ĭ":101660,"æŃ¡":101661,"éĥ¨éķ¿":101662,"ç§ī":101663,"èĪĨ":101664,"åĪijäºĭ":101665,"åIJµ":101666,"æ¤Ĵ":101667,"è¡ĵ":101668,"豫":101669,"èı©":101670,"åѵ":101671,"饲":101672,"就好":101673,"åłª":101674,"ä¸īè§Ĵ":101675,"åľºæ¯ĶèµĽ":101676,"ä¸įåģľ":101677,"æĵħ":101678,"åħ¨æĸĩ":101679,"æ³ģ":101680,"åѦä½į":101681,"æ±°":101682,"éłĺ":101683,"åıł":101684,"éļĽ":101685,"å¸IJ":101686,"çľĭåĩº":101687,"åĮł":101688,"å±ĢéĿ¢":101689,"æ³Į":101690,"è°Ĭ":101691,"åIJĮæľŁ":101692,"æĬķæłĩ":101693,"奴":101694,"æĿ¥çľĭçľĭ":101695,"èĦ¾":101696,"èŀº":101697,"æŃī":101698,"çĽ¯":101699,"ç¨İåĬ¡":101700,"å»Ĭ":101701,"æİ©":101702,"æħ¨":101703,"çĽ¼":101704,"èĬĴ":101705,"è®Ģ":101706,"æĮ£":101707,"èĮħ":101708,"æĸ¥":101709,"æ¤ħ":101710,"åΰæĿ¥":101711,"èijĹä½ľ":101712,"çĭ±":101713,"äºĮæīĭ":101714,"ä»İæĿ¥":101715,"çĸ²":101716,"åºĬä¸Ĭ":101717,"æĸ°æµª":101718,"æ³Ħ":101719,"å¢ŀå̼":101720,"丼":101721,"æļij":101722,"ä»İä¸ļ":101723,"æ·ĭ":101724,"å¤ļæł·":101725,"æľ´":101726,"份é¢Ŀ":101727,"æŀ£":101728,"西çľģ":101729,"æľ¬è´¨":101730,"深深":101731,"èīĩ":101732,"绵":101733,"产å̼":101734,"æ¼ł":101735,"èħ»":101736,"çŃĽ":101737,"åİĮ":101738,"æģŃ":101739,"å«Įçĸij":101740,"æĪ¶":101741,"æ»ŀ":101742,"èĨĢ":101743,"åĬ£":101744,"座è°Ī":101745,"常æĢģ":101746,"çļĦæĥħ":101747,"覽":101748,"å¯Ĥ":101749,"åĮĨ":101750,"èĩº":101751,"顯":101752,"çķı":101753,"éģ£":101754,"åįľ":101755,"çŃīå¥ĸ":101756,"責":101757,"溯":101758,"éİ":101759,"çĤ¹å¤´":101760,"èĵ¬":101761,"決":101762,"éħ¬":101763,"éģĬ":101764,"è³¼":101765,"註åĨĬ":101766,"æľ¬æĬ¥":101767,"çµķ":101768,"æ´»æĢ§":101769,"åħij":101770,"éĮ¯":101771,"åĨ¶":101772,"åĸ»":101773,"æºĸ":101774,"èĤ¢":101775,"æºĥ":101776,"æĹ¬":101777,"åīĬ":101778,"çIJĨäºĭ":101779,"å±ł":101780,"æ²§":101781,"èļĢ":101782,"鼻åŃIJ":101783,"为æŃ¢":101784,"常å§Ķ":101785,"çµĤ":101786,"éĬ·":101787,"çĭĢ":101788,"ä¾£":101789,"èĥĢ":101790,"èѰ":101791,"çĶ¨è½¦":101792,"åĻª":101793,"æŃ·":101794,"åįĶ":101795,"åι":101796,"竣æĺ¯":101797,"é©Ĺ":101798,"èIJĿ":101799,"çĻ«":101800,"çĹ«":101801,"æŃ§":101802,"å¼Ĭ":101803,"媽":101804,"çıĬ":101805,"è¡·":101806,"éľī":101807,"åŁºçĿ£":101808,"éļ±":101809,"æ°¨":101810,"绸":101811,"å°¼æĸ¯":101812,"çĥĺ":101813,"æľŁåĨħ":101814,"è°ħ":101815,"éĽĩ":101816,"éļĻ":101817,"åĸī":101818,"åī¥":101819,"çĹĺ":101820,"æĮ½":101821,"çĵ£":101822,"æ¹Ľ":101823,"樱":101824,"æ¾İ":101825,"æ¹ĥ":101826,"åĨ¬å¥¥":101827,"棵":101828,"å®°":101829,"åŀĴ":101830,"æ§ĭ":101831,"ä¾Ī":101832,"èĮĦ":101833,"åĺ¿":101834,"èıĩ":101835,"çĻĤ":101836,"åĬĥ":101837,"éį":101838,"èͽ":101839,"çŀŃ":101840,"æķŀ":101841,"ä¹ĸ":101842,"飧":101843,"è¾ľ":101844,"æĩĪ":101845,"ä½£":101846,"çŀ»":101847,"åŁĶ":101848,"èĪħ":101849,"å®ŀäºĭ":101850,"é¨":101851,"å§¥":101852,"絡":101853,"åĺ»":101854,"çķ¢":101855,"æ²ĥå°Ķ":101856,"è¿Ħ":101857,"èĤĩ":101858,"æħij":101859,"ã§":101860,"äı":101861,"ðł":101862,"ð¬ĩ":101863,"ð«Ń":101864,"ð«IJ":101865,"ã³":101866,"©½":101867,"ð«ł":101868,"ãĽ":101869,"ð¬į":101870,"é¿":101871,"ð¬Ĵ":101872,"ãĻ":101873,"ð¬¤":101874,"ð¬´":101875,"ð«ĸ":101876,"ð¤":101877,"ã¬":101878,"ä²":101879,"ð«Ķ":101880,"ð«ļ":101881,"è¦ģæ±Ĥ":101882,"ä¸ĢäºĽ":101883,"å®ŀçݰ":101884,"èĢĮä¸Ķ":101885,"åĽłæŃ¤":101886,"çͱäºİ":101887,"åħ³äºİ":101888,"çĦ¶åIJİ":101889,"æİ¨åĬ¨":101890,"ä¸Ģæł·":101891,"æĮīçħ§":101892,"è¿Ļæł·çļĦ":101893,"å½¢æĪIJ":101894,"æľīäºĽ":101895,"æĽ´åĬł":101896,"ç»ıè¿ĩ":101897,"建议":101898,"æ²»çĸĹ":101899,"ä½łä»¬":101900,"æīįèĥ½":101901,"ä¿ĥè¿Ľ":101902,"åijĺå·¥":101903,"ä½ĵéªĮ":101904,"èĪĩ":101905,"åģļ好":101906,"ä¿Ŀè¯ģ":101907,"æķ´ä¸ª":101908,"æĺ¯ä¸Ģ个":101909,"éĩĩç͍":101910,"çIJĨ论":101911,"æ¯Ķå¦Ĥ":101912,"ä¸ĬçļĦ":101913,"æİ¨èįIJ":101914,"çĶ³è¯·":101915,"天空":101916,"éĥ¨èIJ½":101917,"åįģåĪĨ":101918,"æĿ¥èĩª":101919,"ä¹ĭéĹ´":101920,"è°ĥæķ´":101921,"æ¯ı天":101922,"è°ĥæŁ¥":101923,"æĤ£èĢħ":101924,"è¿ĩç¨ĭä¸Ń":101925,"é¦Ļ港":101926,"广åijĬ":101927,"éĿ¢å¯¹":101928,"满足":101929,"éķ¿æľŁ":101930,"è§ĦèĮĥ":101931,"æķ´ä½ĵ":101932,"æĶ¹åıĺ":101933,"æĻºæħ§":101934,"å¦Īå¦Ī":101935,"å¦Ĥä»Ĭ":101936,"åIJĪåIJĮ":101937,"éĥ½ä¼ļ":101938,"åĦ¿ç«¥":101939,"åĩıå°ij":101940,"éŁ³ä¹IJ":101941,"ç»ı常":101942,"ä¸Ĭå¸Ĥ":101943,"ä¼ĺç§Ģ":101944,"çļĦéĩįè¦ģ":101945,"ä¸ĢæĿ¡":101946,"æµ·å¤ĸ":101947,"åı¦å¤ĸ":101948,"ä¸Ģå®¶":101949,"åİĭåĬĽ":101950,"大åŀĭ":101951,"çľĭçĿĢ":101952,"åĪĢ":101953,"幸ç¦ı":101954,"æİ¨å¹¿":101955,"åIJĽ":101956,"å¾IJ":101957,"æī¾åΰ":101958,"äºİæĺ¯":101959,"èĩªèº«":101960,"ä¸Ģä½į":101961,"åľŁåľ°":101962,"åĬłåħ¥":101963,"æİ¢ç´¢":101964,"æ¢ģ":101965,"主åĬ¨":101966,"å°±ä¸ļ":101967,"女æĢ§":101968,"çªģçł´":101969,"ä¸įåIJĮçļĦ":101970,"è¿IJè¾ĵ":101971,"èĩªçͱ":101972,"å±ħæ°ij":101973,"æŃ¤æ¬¡":101974,"çļĦæĹ¶éĹ´":101975,"å®¶éķ¿":101976,"ä¸Ģ个人":101977,"æ£Ģæµĭ":101978,"åĨħéĥ¨":101979,"广å·ŀ":101980,"缴æĴŃ":101981,"ä»İèĢĮ":101982,"贷款":101983,"åı¬å¼Ģ":101984,"æĶ¹éĢł":101985,"人çĶŁ":101986,"å±ķ示":101987,"æ¯ıå¹´":101988,"女人":101989,"çļĦæĸ¹å¼ı":101990,"æķĪçİĩ":101991,"å±±ä¸ľ":101992,"æ¸łéģĵ":101993,"ä¼¼ä¹İ":101994,"æ¡Īä»¶":101995,"åĪ©çĽĬ":101996,"çľĭçľĭ":101997,"å¿ĥéĩĮ":101998,"ç»´æĬ¤":101999,"å®Ŀå®Ŀ":102000,"ç½ijä¸Ĭ":102001,"论åĿĽ":102002,"å°±åı¯ä»¥":102003,"ä¸įè¶³":102004,"æģ¢å¤į":102005,"å¸ĥå±Ģ":102006,"è´¡çĮ®":102007,"ä¸ĭéĻį":102008,"æİĮæı¡":102009,"çļ®èĤ¤":102010,"å·¥åħ·":102011,"éĩįåºĨ":102012,"åĵģè´¨":102013,"æİ¨åĩº":102014,"çĶ·äºº":102015,"æī¿æĭħ":102016,"çªģåĩº":102017,"èĢĮè¨Ģ":102018,"æ²Ł":102019,"åįıè°ĥ":102020,"æĺ¯ä»Ģä¹Ī":102021,"汤":102022,"æĴij":102023,"çĭ¬ç«ĭ":102024,"çݯèĬĤ":102025,"æī©å¤§":102026,"æ´ª":102027,"æĿ°":102028,"çĽIJ":102029,"ä»ģ":102030,"æ¶īåıĬ":102031,"èĢģ人":102032,"åį³ä½¿":102033,"åįĹ京":102034,"éħįåIJĪ":102035,"鬼":102036,"çĪ¶äº²":102037,"ç½Ĺæĸ¯":102038,"å°ıåĮº":102039,"æķĻæİĪ":102040,"åĨ³çŃĸ":102041,"é¢Ħ计":102042,"æľ¬äºº":102043,"伯":102044,"竹":102045,"åΰåºķ":102046,"å¸Ĥæ°ij":102047,"åĩºåı£":102048,"éĩĩè´Ń":102049,"æĢ»ç»ĵ":102050,"æŃ¦æ±ī":102051,"åĬłå¤§":102052,"å¹¿ä¸ľ":102053,"æµģç¨ĭ":102054,"人åı£":102055,"å¦Ĥæŀľä½ł":102056,"åĩºåİ»":102057,"åĩī":102058,"åĨľæ°ij":102059,"çݰ象":102060,"åĬĽåº¦":102061,"ç»ĻäºĪ":102062,"åħļå§Ķ":102063,"è¯Ńè¨Ģ":102064,"线ä¸Ĭ":102065,"æĢİæł·":102066,"åĦ¿åŃIJ":102067,"ç¡®å®ŀ":102068,"ä¹ĭå¤ĸ":102069,"éĥ½åľ¨":102070,"èī¾":102071,"çļĦæĥħåĨµ":102072,"éĩĮçļĦ":102073,"åĽ´ç»ķ":102074,"æĽ´å¤ļçļĦ":102075,"ä¾Ŀæ³ķ":102076,"åħ¬åĽŃ":102077,"å®¶éĩĮ":102078,"æ¯į亲":102079,"ä¸įåĨį":102080,"èĭ¹":102081,"æ³ķéĻ¢":102082,"éŁ©åĽ½":102083,"缸å½ĵ":102084,"ä¸įçŁ¥":102085,"è¯Ħä¼°":102086,"ä¸įç͍":102087,"顺åĪ©":102088,"éĩįè§Ĩ":102089,"è´¢åĬ¡":102090,"ä»ĸåĢij":102091,"åıijè¡Į":102092,"ä¸ĵéŨ":102093,"åħ·å¤ĩ":102094,"å¹¶ä¸įæĺ¯":102095,"è¶³çIJĥ":102096,"éŀĭ":102097,"åıij表":102098,"æ°¸è¿ľ":102099,"èIJ¥åħ»":102100,"éħįå¥Ĺ":102101,"æķ´åIJĪ":102102,"è´º":102103,"åĽŀçŃĶ":102104,"æĶ¶çĽĬ":102105,"ä¹Łè®¸":102106,"è»Ĭ":102107,"æİ¥è§¦":102108,"æĶ»åĩ»":102109,"åĽĽå·Ŀ":102110,"æĢ§èĥ½":102111,"åĽŀåΰ":102112,"èħ°":102113,"ä¹Łæ²¡æľī":102114,"å¼Ħ":102115,"设ç«ĭ":102116,"éĺ²æİ§":102117,"æĬĢå·§":102118,"éĢļ常":102119,"è´¢æĶ¿":102120,"éĥ¨ç½²":102121,"åľºæĻ¯":102122,"æ±Łèĭı":102123,"表达":102124,"åĸ·":102125,"女åĦ¿":102126,"èζ":102127,"給":102128,"ä¼ļåijĺ":102129,"æĪĸ许":102130,"亩":102131,"举æĸ¹":102132,"天津":102133,"è¿ijå¹´":102134,"çľĭæĿ¥":102135,"æ¯Ķä¾ĭ":102136,"岩":102137,"éĵľ":102138,"çİ»":102139,"å®ŀéªĮ":102140,"æĢĿç»´":102141,"æĭħå¿ĥ":102142,"æ²Ī":102143,"身边":102144,"æ·±åĮĸ":102145,"ç²¾åĩĨ":102146,"ç§ģæľį":102147,"æ¶Īéĺ²":102148,"åİ»äºĨ":102149,"ç»Ĩèĥŀ":102150,"çIJĥéĺŁ":102151,"æĺİæĺŁ":102152,"é£Łçī©":102153,"å¾Īå¿«":102154,"è®©ä½ł":102155,"ä¿¡ç͍":102156,"å͝ä¸Ģ":102157,"åħ¶å®ĥ":102158,"çŃīæĸ¹éĿ¢":102159,"å¾ĭå¸Ī":102160,"æŃ»äº¡":102161,"æŁ³":102162,"ä¸Ģæī¹":102163,"ä¸Ĭ涨":102164,"æľºåľº":102165,"å½¢åĬ¿":102166,"æĦ¿æĦı":102167,"éĽĨä½ĵ":102168,"æĸ°åŀĭ":102169,"æįŁå¤±":102170,"æĽ¸":102171,"ä¸ĭåįĪ":102172,"æ¯ı次":102173,"æĪIJå°±":102174,"åħ¬è·¯":102175,"èĻ«":102176,"åĴ±":102177,"西å®ī":102178,"æľĢä½³":102179,"ç§ijçłĶ":102180,"å¤įæĿĤ":102181,"æľºåύ":102182,"çαæĥħ":102183,"çħ§çīĩ":102184,"å¹´é¾Ħ":102185,"è³ĩæĸĻ":102186,"ç²Ĺ":102187,"åĩĨç¡®":102188,"åĬłä¸Ĭ":102189,"åĩºçīĪ":102190,"è°IJ":102191,"å®¶å±ħ":102192,"èĥĮæĻ¯":102193,"ä¸Ģ线":102194,"äºĭ项":102195,"åĬ¨ä½ľ":102196,"祥":102197,"æĢ»ä½ĵ":102198,"æĪ¿åŃIJ":102199,"ä¹Łå°±æĺ¯":102200,"大æ¦Ĥ":102201,"é«ĺæķĪ":102202,"åIJ¹":102203,"æİĪæĿĥ":102204,"éĻĦè¿ij":102205,"æ¡Īä¾ĭ":102206,"éĹ¹":102207,"çΏçΏ":102208,"彩票":102209,"æĢĴ":102210,"举æĬ¥":102211,"æĻ®éģį":102212,"çķĻä¸ĭ":102213,"è¡£æľį":102214,"æĹłè®ºæĺ¯":102215,"åħħ满":102216,"深度":102217,"æ¡ij":102218,"æĪªèĩ³":102219,"带æĿ¥çļĦ":102220,"éϵ":102221,"æĦŁæĥħ":102222,"èµļ":102223,"åĵªäºĽ":102224,"æķ´æĶ¹":102225,"æĪIJçĨŁ":102226,"å¨ľ":102227,"é¼»":102228,"磼":102229,"çĽ¾":102230,"好好":102231,"ç¬¬åĽĽ":102232,"åĨłåĨĽ":102233,"è´¢å¯Į":102234,"æľĢ好çļĦ":102235,"车åŀĭ":102236,"éĸĢ":102237,"åį³å°Ĩ":102238,"åĪĨ为":102239,"éĿĴå²Ľ":102240,"纷纷":102241,"ä»ĬæĹ¥":102242,"平衡":102243,"å¹³æĸ¹ç±³":102244,"éĤ£ç§į":102245,"åĩºçĶŁ":102246,"éĿĴæĺ¥":102247,"人群":102248,"人工":102249,"ä¹ĭä¸ĭ":102250,"æ¹ĸåĮĹ":102251,"åľ¨æŃ¤":102252,"åįļ士":102253,"æĹ¶åĪ»":102254,"æ²³åĮĹ":102255,"æĶ¾å¼ĥ":102256,"éĢļéģĵ":102257,"森æŀĹ":102258,"çĸĨ":102259,"æķ¸":102260,"èĬ³":102261,"æīĵåĩ»":102262,"æĽ¹":102263,"åĮĸåѦ":102264,"æĥ³è±¡":102265,"ä¸ĩ人":102266,"è´¢ç»ı":102267,"åħĥç´ł":102268,"ä¼ļ计":102269,"åħ¨ä½ĵ":102270,"æĦĽ":102271,"é«ĺä¸Ń":102272,"æľºéģĩ":102273,"å£°éŁ³":102274,"æĹħè¡Į":102275,"浩":102276,"æŁ±":102277,"å°ijå¹´":102278,"åĽ½å¤ĸ":102279,"èijĹåIJį":102280,"çĶŁåŃĺ":102281,"å§ľ":102282,"带é¢Ĩ":102283,"é¢ľèī²":102284,"ä¸Ĭä¸ĭ":102285,"产ä¸ļéĵ¾":102286,"æĽ´å¥½çļĦ":102287,"å²Ń":102288,"ä¼ĺæĥł":102289,"便æĺ¯":102290,"åħ§å®¹":102291,"ä¸Ģåıª":102292,"çIJ´":102293,"梦æĥ³":102294,"ç§Łèµģ":102295,"å¼ĢåIJ¯":102296,"è´Ńçī©":102297,"åĮħåIJ«":102298,"åĪ©çİĩ":102299,"èµ·äºĨ":102300,"æľīåĬĽ":102301,"éĤ£éĩĮ":102302,"审æī¹":102303,"对æīĭ":102304,"çݰéĩij":102305,"天çĦ¶":102306,"çĽĴ":102307,"çν":102308,"å¿ħçĦ¶":102309,"åĮĸå·¥":102310,"ä¸ĵåĪ©":102311,"åķ¡":102312,"å¼Ģå¿ĥ":102313,"人ä½ĵ":102314,"éģĵ士":102315,"æĢģ度":102316,"空è°ĥ":102317,"æĭĽåķĨ":102318,"å§»":102319,"第äºĶ":102320,"æ£Ĵ":102321,"ä¸Ģç³»åĪĹ":102322,"å᱿ľº":102323,"转åıĺ":102324,"åľºæīĢ":102325,"鸣":102326,"æĪ¿éĹ´":102327,"é̼":102328,"è¯ķçĤ¹":102329,"对å¤ĸ":102330,"åĩºåı°":102331,"åľ¨è¿Ļ":102332,"åİĤå®¶":102333,"巨大":102334,"ç®Ģä»ĭ":102335,"çľĭäºĨ":102336,"åħļ建":102337,"æĮĩæĮ¥":102338,"çŁ³æ²¹":102339,"ä¸įåı¯èĥ½":102340,"èݲ":102341,"ä¸į太":102342,"åĪĽæĦı":102343,"第ä¸Ģ个":102344,"è´µå·ŀ":102345,"è¿ĩäºĨ":102346,"æľ¬æĿ¥":102347,"éģĵå¾·":102348,"çŃĶæ¡Ī":102349,"é϶":102350,"ä¸Ģè·¯":102351,"èĤĸ":102352,"æ¸ħæ´ģ":102353,"æľīæľº":102354,"åIJįåįķ":102355,"æĿ±":102356,"åij¼åIJ¸":102357,"ä¸Ī":102358,"ç¦ı建":102359,"è¯ķéªĮ":102360,"å¼ķåıij":102361,"ä¹Łæ²¡":102362,"ä¸įä½ı":102363,"çĨŁæĤī":102364,"èIJ¬":102365,"ä¸įèī¯":102366,"çłĸ":102367,"èĩ´åĬĽ":102368,"çŃ¾è®¢":102369,"åIJĬ":102370,"侯":102371,"çĺ¦":102372,"å§ijå¨ĺ":102373,"æĸ¤":102374,"妻åŃIJ":102375,"æĺ¥èĬĤ":102376,"çά":102377,"æĽĿ":102378,"çĥŃæĥħ":102379,"éķ¿æ²Ļ":102380,"èIJ¥éĢł":102381,"éħ·":102382,"éĵĿ":102383,"åŁºæľ¬ä¸Ĭ":102384,"åij¨åĽ´":102385,"ä»Ģ麼":102386,"认åı¯":102387,"åĪĨåŃIJ":102388,"ä¸Ģæĸ¹éĿ¢":102389,"è½´":102390,"å¼·":102391,"马ä¸Ĭ":102392,"éĽ¾":102393,"èĩ£":102394,"å°¿":102395,"çĶŁæĦı":102396,"å®īå¾½":102397,"ç¥ŀç»ı":102398,"åĩºå¸Ń":102399,"èį¯åĵģ":102400,"çIJĨçͱ":102401,"åįıåIJĮ":102402,"æµģåĬ¨":102403,"åıijåĬ¨":102404,"åĿļå®ļ":102405,"表æĺİ":102406,"åIJİéĿ¢":102407,"ä¹īåĬ¡":102408,"å¦ĸ":102409,"æľīåı¯èĥ½":102410,"年轻人":102411,"大éĻĨ":102412,"å²³":102413,"ä¸įèµ·":102414,"çŀ¬éĹ´":102415,"ä¸įå¾Ĺä¸į":102416,"çŃ¾çº¦":102417,"åIJĪæł¼":102418,"åħļæĶ¯éĥ¨":102419,"æµİåįĹ":102420,"便åĪ©":102421,"éļıæĹ¶":102422,"å¥ī":102423,"称为":102424,"产æĿĥ":102425,"åIJķ":102426,"çĽĨ":102427,"课åłĤ":102428,"ç·ļ":102429,"æ£ī":102430,"线ä¸ĭ":102431,"èĩªè¡Į":102432,"举æİª":102433,"åݦéŨ":102434,"èĩªä¿¡":102435,"å½±è§Ĩ":102436,"ä»Ķ":102437,"çĶŁæ´»ä¸Ń":102438,"æĿĥçĽĬ":102439,"çϽèī²":102440,"å°±ä¸į":102441,"è¿Ľå±ķ":102442,"æ¯ıæĹ¥":102443,"ä¾Ľç»Ļ":102444,"æĿĥåĪ©":102445,"æĹłæķ°":102446,"çIJĨè´¢":102447,"ä¾ĿæĹ§":102448,"ä¸ĬåįĪ":102449,"è¯ĨåĪ«":102450,"çĽĪåĪ©":102451,"çłĤ":102452,"许åı¯":102453,"åIJĮäºĭ":102454,"åĺĽ":102455,"éģ¸":102456,"çĿĢåĬĽ":102457,"éŨåı£":102458,"ä¸įå¤ļ":102459,"åħ¶æ¬¡":102460,"碧":102461,"çī©çIJĨ":102462,"åĨħå¿ĥ":102463,"çϾå§ĵ":102464,"æĢ»ç»Ł":102465,"å¹²åĩĢ":102466,"积累":102467,"åıįé¦Ī":102468,"æłijç«ĭ":102469,"社交":102470,"ç§©":102471,"åįģä¸Ģ":102472,"éĤĵ":102473,"驱åĬ¨":102474,"å±ķè§Ī":102475,"èĪĴéĢĤ":102476,"åŁºåĽł":102477,"å·®å¼Ĥ":102478,"转让":102479,"å°ıå§IJ":102480,"æł·åŃIJ":102481,"ç¿Ķ":102482,"é«ĺåħ´":102483,"å½±åĵįåĬĽ":102484,"æīĭç»Ń":102485,"缸åIJĮ":102486,"缸åºĶ":102487,"æĻĴ":102488,"è§Ģ":102489,"å¸Ĥå§Ķ":102490,"èĬ¯":102491,"å±ķçݰ":102492,"åľ°çIJĥ":102493,"éĤª":102494,"ä¸Ģå®ļçļĦ":102495,"åħģ许":102496,"ä¿¡ä»»":102497,"æīij":102498,"éĻ¢æł¡":102499,"ç®Ģç§°":102500,"åģļæ³ķ":102501,"ä¹ĭè·¯":102502,"æĹĹä¸ĭ":102503,"èħĶ":102504,"æ¶Ī失":102505,"ä¸ĸçķĮä¸Ĭ":102506,"åŁİ乡":102507,"èĪŀåı°":102508,"å¾Ī大çļĦ":102509,"绣çѹ":102510,"åħ¬å¹³":102511,"èĤ¾":102512,"çļĦ好":102513,"æ±ģ":102514,"çľ¼åīį":102515,"éĽ£":102516,"å¹½":102517,"åħ±äº§":102518,"主åĬŀ":102519,"å¤Ħç½ļ":102520,"åºĻ":102521,"éģĵçIJĨ":102522,"å¼µ":102523,"æİ¥çĿĢ":102524,"çĮİ":102525,"çģĮ":102526,"çͱæŃ¤":102527,"人åĬĽ":102528,"æµģè¡Į":102529,"ä¾ł":102530,"åı¯ä»¥è¯´":102531,"èĴĭ":102532,"å½¢æĢģ":102533,"æĹ¥åŃIJ":102534,"æ¼Ĩ":102535,"çķĻåѦ":102536,"缸éĹľ":102537,"æľĢå¤ļ":102538,"åĩŃåĢŁ":102539,"åħ¬äº¤":102540,"æĮĸæİĺ":102541,"æĿĤå¿Ĺ":102542,"主人":102543,"éļľç¢į":102544,"æł¡éķ¿":102545,"æĸ¹ä½į":102546,"ä¸ĬçıŃ":102547,"å¤ļåħĥ":102548,"èĥģ":102549,"éŃħåĬĽ":102550,"èĮĤ":102551,"åħħç͵":102552,"强大":102553,"çĥ¤":102554,"å¥ĭæĸĹ":102555,"å®ŀç͍":102556,"éĺģ":102557,"ç»ĻäºĨ":102558,"æľ¬ç§ij":102559,"æłĭ":102560,"æĭ¨":102561,"æķĻç»ĥ":102562,"éĥ½çŁ¥éģĵ":102563,"æ¯ķä¸ļçĶŁ":102564,"ç¢Ĺ":102565,"åŀĤ":102566,"讼":102567,"å®ģæ³¢":102568,"åѦèĢħ":102569,"谢谢":102570,"åŁİéķĩ":102571,"æĢİä¹ĪåĬŀ":102572,"éģĶ":102573,"æĪIJ交":102574,"æ½ľåĬĽ":102575,"åį§":102576,"æĸ°å¼Ģ":102577,"éħįå¤ĩ":102578,"主åĬĽ":102579,"åij³éģĵ":102580,"çĥĤ":102581,"é£ŀè¡Į":102582,"å«ģ":102583,"大大":102584,"ç»Ļ大家":102585,"å¤ĸéĿ¢":102586,"éĨī":102587,"åıijè¨Ģ":102588,"æĹ©é¤IJ":102589,"åIJĦèĩª":102590,"å®Ļ":102591,"èį£èªī":102592,"æĬ«éľ²":102593,"é¡ŀ":102594,"åĨħçļĦ":102595,"èĤª":102596,"è¾IJ":102597,"æ³µ":102598,"æĬĽ":102599,"æĺŁæľŁ":102600,"ä¸Ģ带":102601,"çĶŁç´ł":102602,"ç»ıéĶĢ":102603,"åĩ¶":102604,"åľ°ä¸Ĭ":102605,"åij½è¿IJ":102606,"åĵ²":102607,"ä¸Ĭåİ»":102608,"æĸĩçī©":102609,"è¯ij":102610,"æĮ¯åħ´":102611,"éķ¿æĹ¶éĹ´":102612,"ç¥Ń":102613,"åIJĪèĤ¥":102614,"è¿Ŀè§Ħ":102615,"èģª":102616,"ä½İäºİ":102617,"éĢĤå½ĵ":102618,"æľīåºı":102619,"æľ¬ç½ij":102620,"çķĻè¨Ģ":102621,"æĥ³æ³ķ":102622,"çŃ¾ç½²":102623,"å§ļ":102624,"æĢ§æł¼":102625,"èĴĻåı¤":102626,"æŁı":102627,"åŀ«":102628,"åѦåİĨ":102629,"ä»ħä»ħ":102630,"讲è¯Ŀ":102631,"éĶIJ":102632,"æĢĸ":102633,"åīª":102634,"èĭį":102635,"åIJĵ":102636,"强çĥĪ":102637,"åģ¥åħ¨":102638,"çĸ¯":102639,"åı¤ä»£":102640,"å¥Ī":102641,"ä¸įçĦ¶":102642,"乡éķĩ":102643,"æľĭåıĭ们":102644,"åĤħ":102645,"èģ½":102646,"个æĢ§":102647,"æ³ķè§Ħ":102648,"å°ıéķĩ":102649,"çĶ»éĿ¢":102650,"第åħŃ":102651,"網路":102652,"åīįæĻ¯":102653,"åIJ¬è¯´":102654,"ä¼łåªĴ":102655,"æĿ¡ä¾ĭ":102656,"åĪ«çļĦ":102657,"ä¸įæĩĤ":102658,"顾éĹ®":102659,"强度":102660,"éĺ¿éĩĮ":102661,"èµ°åĬ¿":102662,"帽":102663,"çļĦç¡®":102664,"åĮºåĪ«":102665,"éĮ¢":102666,"主管":102667,"ä¸Ģçľĭ":102668,"æĸľ":102669,"åŃĺåľ¨çļĦ":102670,"仲":102671,"åį±å®³":102672,"éĵŃ":102673,"游æĪıä¸Ń":102674,"éħ±":102675,"é¾Ļ头":102676,"人å¿ĥ":102677,"éĢĢä¼ij":102678,"æµıè§Ī":102679,"åĬ«":102680,"éĺ²æ²»":102681,"ç®Ń":102682,"å±Ī":102683,"è¾½å®ģ":102684,"壤":102685,"è¿İæĿ¥":102686,"éŀį":102687,"ç͍æĿ¥":102688,"å¤§åľ°":102689,"ä»°":102690,"éĢļ讯":102691,"å¼Ģå·¥":102692,"裤":102693,"å¦ĤåIJĮ":102694,"骤":102695,"éĺŁåijĺ":102696,"轩":102697,"ç¾İæľ¯":102698,"èĻŁ":102699,"åIJĮä¸Ģ":102700,"åľĸ":102701,"书æ³ķ":102702,"æīĵåį°":102703,"åIJ«æľī":102704,"éĽĨæĪIJ":102705,"éĹ·":102706,"å¸Ĥåľºä¸Ĭ":102707,"æĹģè¾¹":102708,"åľ°æĿ¿":102709,"产çĶŁçļĦ":102710,"粤":102711,"éĩįç»Ħ":102712,"è¡Ģæ¶²":102713,"çŃĭ":102714,"åĬŀäºĭ":102715,"常è§ģçļĦ":102716,"ä¸ĬåįĬå¹´":102717,"å±ıå¹ķ":102718,"åIJīæŀĹ":102719,"å·©":102720,"åĸľçα":102721,"ç¿ł":102722,"ä¸īç§į":102723,"æ¡Ĩæŀ¶":102724,"举èİŀ":102725,"çĶĺèĤĥ":102726,"èĬ¬":102727,"åĽ¾ä¹¦":102728,"åĩ¤åĩ°":102729,"æ°ĶåĢĻ":102730,"å°´":102731,"å°¬":102732,"两天":102733,"è¾ħ导":102734,"åĢŁæ¬¾":102735,"æĹ¥èµ·":102736,"æ´Ĵ":102737,"ä¸Ģ度":102738,"è¹Ī":102739,"æ½Ń":102740,"æīĩ":102741,"çĻľ":102742,"æĸ°åħ´":102743,"åĤ²":102744,"诸å¤ļ":102745,"è´ª":102746,"éĻ·åħ¥":102747,"èĪŁ":102748,"èĤºçĤİ":102749,"ä¸Ģæł·çļĦ":102750,"åİĺ":102751,"åľ°çIJĨ":102752,"æĬķæ³¨":102753,"éļĬ":102754,"åħīä¼ı":102755,"ä¿Ŀåģ¥":102756,"åħĶ":102757,"åħ¬åĬ¡":102758,"æīĵçł´":102759,"çĶ·åŃ©":102760,"åĬ³åĬ¡":102761,"ä½łä¼ļ":102762,"çĶ¨åľ°":102763,"溢":102764,"åıijè¾¾":102765,"èĤļ":102766,"è¿ĩäºİ":102767,"èĩĤ":102768,"éĢĻæ¨£":102769,"轻轻":102770,"ä¸Ńåħ±":102771,"åIJĦåĽ½":102772,"åĶĩ":102773,"å®ŀä¹ł":102774,"èϾ":102775,"æ§½":102776,"ä¸įä¸Ĭ":102777,"åħįçĸ«":102778,"åįłæį®":102779,"å·¥ä¼ļ":102780,"åĽĬ":102781,"èĪªå¤©":102782,"åı¯çα":102783,"æĸĹäºī":102784,"çĺ¤":102785,"å¦Ĥæľī":102786,"éĽĸ":102787,"对æĪij":102788,"åĩºç§Ł":102789,"好çľĭ":102790,"太大":102791,"æ°´åĪ©":102792,"åĬ¿åĬĽ":102793,"åħ¨æ°ij":102794,"ç½¢":102795,"èµ¢å¾Ĺ":102796,"çĶµä¿¡":102797,"车éĹ´":102798,"æĻĤåĢĻ":102799,"å°ijæķ°":102800,"éĵ¸":102801,"åħ³èģĶ":102802,"ä¸įä»ħä»ħ":102803,"为æĤ¨":102804,"åĴ¸":102805,"æľºåĬ¨":102806,"è£Ļ":102807,"åĵįåºĶ":102808,"éģł":102809,"è²·":102810,"ç©´":102811,"å¢ħ":102812,"éĶ¡":102813,"çµĦ":102814,"çģ«è½¦":102815,"è³ĩè¨Ĭ":102816,"åĨ³èµĽ":102817,"污水":102818,"èªŀ":102819,"å´Ľ":102820,"ç´§å¯Ĩ":102821,"缺å°ij":102822,"å¤ļ人":102823,"æĢ»ä¹¦è®°":102824,"éĶĪ":102825,"èijĽ":102826,"å¿ĺè®°":102827,"éĻĮçĶŁ":102828,"éķ¿å¤§":102829,"åħĪè¿ĽçļĦ":102830,"ç¡ħ":102831,"åıijæĺİ":102832,"å©´åĦ¿":102833,"æīİå®ŀ":102834,"èĽĭçϽ":102835,"ä¸ĢçϾ":102836,"缮åħī":102837,"æħĮ":102838,"åĬłæ²¹":102839,"åIJŀ":102840,"ä¸Ģ群":102841,"ä¸Ńä»ĭ":102842,"å¸ĸ":102843,"å¿Į":102844,"èģĮèĥ½":102845,"广æĴŃ":102846,"çĽijå¯Ł":102847,"ç§ĺå¯Ĩ":102848,"çĭ®":102849,"è¿ĻæĿ¡":102850,"éĢ¢":102851,"æĢ¨":102852,"åįģåħŃ":102853,"試":102854,"说åΰ":102855,"åĩĿèģļ":102856,"æĮĩ示":102857,"æ°¢":102858,"å¼ĺ":102859,"éĺĢ":102860,"æĸ©":102861,"éłħ":102862,"ä¸Ģå¼Ģå§ĭ":102863,"æİĴè¡Į":102864,"åľ¨æĪij":102865,"纪å½ķ":102866,"æĬĦ":102867,"æłª":102868,"说æ³ķ":102869,"ä¸Ńèį¯":102870,"好å¤ļ":102871,"åıªä¸įè¿ĩ":102872,"çķĻåľ¨":102873,"个å°ıæĹ¶":102874,"è®¤çŁ¥":102875,"çķ«":102876,"è§ģè¿ĩ":102877,"å°ıå¾®":102878,"ä½Ľå±±":102879,"çľ¾":102880,"讲述":102881,"梳":102882,"ç§°åı·":102883,"æĹ¥æĻļ":102884,"è¢ĸ":102885,"åķ¤":102886,"æľªç»ı":102887,"æľĢæĹ©":102888,"æī®æ¼Ķ":102889,"è¡Ģ管":102890,"纱":102891,"æĥħèĬĤ":102892,"第ä¸ĥ":102893,"æį§":102894,"ä»Ĺ":102895,"æ¿ĢçĥĪ":102896,"æĹłçº¿":102897,"ä¸į容æĺĵ":102898,"å¼Ģå¹ķ":102899,"æĸ°çĶŁ":102900,"ä¸ĵ注":102901,"èij±":102902,"åįĹæµ·":102903,"çĩŁ":102904,"èµ·ä¾Ĩ":102905,"æ´¾åĩº":102906,"åĦĴ":102907,"侨":102908,"è¼ĥ":102909,"åįļè§Ī":102910,"é̾":102911,"åĮĢ":102912,"ç»ıæµİåѦ":102913,"æ¸Ĺ":102914,"ä¿ĿèŃ·":102915,"çīº":102916,"çī²":102917,"çİ«":102918,"çij°":102919,"æľĢåIJİä¸Ģ":102920,"æĶ¿åĬ¡":102921,"æ§Ľ":102922,"èĻķçIJĨ":102923,"éļIJæĤ£":102924,"æī¿åĮħ":102925,"極":102926,"æ¡©":102927,"çĽ²":102928,"导åIJij":102929,"èĩ´å¯Į":102930,"ç¼Ĩ":102931,"æģĭçα":102932,"ä¸įåĬ¨":102933,"ç»Ļ人":102934,"å·¢":102935,"表æĥħ":102936,"举åįĹ":102937,"åĨħå¤ĸ":102938,"è¾ĪåŃIJ":102939,"åıī":102940,"åįļä¼ļ":102941,"åĬŁæķĪ":102942,"渴":102943,"屬":102944,"æİĴéϤ":102945,"éĢĽ":102946,"ä¸Ģä¼ļ":102947,"ä¸įå¼Ģ":102948,"å¼Ģå¥ĸ":102949,"é»ijé¾Ļ":102950,"é»ijé¾Ļæ±Ł":102951,"å¿«ä¸ī":102952,"度åģĩ":102953,"åĿ¤":102954,"éĤ®ä»¶":102955,"æĩĴ":102956,"ä¾Ľç͵":102957,"廣":102958,"好è¯Ħ":102959,"ç§ĺ书éķ¿":102960,"æĪĺåľº":102961,"好å¥ĩ":102962,"ä¾µæĿĥ":102963,"æĨ¾":102964,"æľĢåĪĿ":102965,"æī¹åıij":102966,"åİķ":102967,"è¼ķ":102968,"æŀ¯":102969,"ä¸ļåĨħ":102970,"è´ŃæĪ¿":102971,"ä¸įåľ¨":102972,"纪å§Ķ":102973,"æīĢéľĢ":102974,"å¸Ĥéķ¿":102975,"è³½":102976,"å¼ķæĵİ":102977,"çģµéŃĤ":102978,"éĬĢ":102979,"滤":102980,"çĿIJ":102981,"å¤ļ项":102982,"åĽŀ头":102983,"èīĺ":102984,"å¤įå·¥":102985,"éĥ¨ä»¶":102986,"ç´§ç´§":102987,"æŁIJç§į":102988,"使åħ¶":102989,"æĸ°äºº":102990,"æŀļ":102991,"æ³ķå®ļ":102992,"å·´å·´":102993,"æ¶µçĽĸ":102994,"稻":102995,"æĭ¾":102996,"æĻķ":102997,"轿":102998,"éĢļè¡Į":102999,"åĵĢ":103000,"æ³Ĭ":103001,"温馨":103002,"éĽĨèģļ":103003,"çĨĻ":103004,"åĩij":103005,"åįģä¸ĥ":103006,"æ°Ķæģ¯":103007,"æıIJä¾ĽçļĦ":103008,"æ³³":103009,"奥è¿IJ":103010,"çģ¾å®³":103011,"åĩĢåĮĸ":103012,"è·¨è¶Ĭ":103013,"åĵªæĢķ":103014,"éŁ¿":103015,"å¢ŀæ·»":103016,"çĦĬ":103017,"æ®ĭçĸ¾":103018,"ç¢Į":103019,"æĤĶ":103020,"è§ģè¯ģ":103021,"è¾ĸåĮº":103022,"å¿ĥèĦı":103023,"éļ§":103024,"åį¸":103025,"åı¯èĥ½æĢ§":103026,"æľīè¶£":103027,"åī¯ä¹¦è®°":103028,"åĮĸå¦Ĩ":103029,"ä¿Ĥ":103030,"æ£ļ":103031,"éĨĩ":103032,"带头":103033,"éłĪ":103034,"追究":103035,"æijĶ":103036,"è¿Ļéĥ¨":103037,"ä¸į论":103038,"祸":103039,"å³»":103040,"éģķ":103041,"çĶŁèĤ²":103042,"å¤ł":103043,"å¤ĸ交":103044,"è¯Ħ为":103045,"ä»İå°ı":103046,"å°ıå°ı":103047,"饿":103048,"æĴ¼":103049,"è·¨å¢ĥ":103050,"被åijĬ":103051,"åįĹå®ģ":103052,"身å¿ĥ":103053,"åĨįçĶŁ":103054,"æīĢ说":103055,"æĹ¶éĹ´åĨħ":103056,"åĪĹåħ¥":103057,"éĿĴæµ·":103058,"çα好":103059,"çªĦ":103060,"èĪĪ":103061,"è¿ĩ渡":103062,"æ¿Ł":103063,"éĽĢ":103064,"审议":103065,"åĽ½èµĦ":103066,"æŃ¥ä¼IJ":103067,"轨éģĵ":103068,"信念":103069,"ä¸īåĪĨ":103070,"çĨ¬":103071,"åѵåĮĸ":103072,"ç¼ł":103073,"éĥĬ":103074,"èĪĴæľį":103075,"纪æ£Ģ":103076,"ä¸Ģä¸ĭåŃIJ":103077,"éĽ»è©±":103078,"è²ł":103079,"éĴ¥":103080,"åĮĻ":103081,"çĹ´":103082,"è¶ģ":103083,"绣":103084,"çε":103085,"è½°":103086,"éªĦ":103087,"姨":103088,"æĭĺ":103089,"çĮ´":103090,"è®¶":103091,"è¿Ļ座":103092,"çį¨":103093,"æ·ĺæ±°":103094,"çĹħä¾ĭ":103095,"æ²Ļåıij":103096,"è§Ĩ为":103097,"头æĿ¡":103098,"å¿ħè¦ģçļĦ":103099,"åı¯è°ĵ":103100,"è¯Ŀ说":103101,"ç¯Ħ":103102,"æĹ©çĤ¹":103103,"æŀ¢çº½":103104,"羡":103105,"çĪ±åĽ½":103106,"çªģåıij":103107,"éĢĬ":103108,"æ½į":103109,"èį£èĢĢ":103110,"èŁ¹":103111,"æ¦Ĥçİĩ":103112,"å¾Īä¹ħ":103113,"æĥķ":103114,"訴":103115,"åľĨ满":103116,"çļ±":103117,"åĪĨæ³Į":103118,"åħħè¶³":103119,"çľĭæ³ķ":103120,"è¾Ł":103121,"æĭ¦":103122,"æĭ©":103123,"对åºĶ":103124,"ä¸ºæł¸å¿ĥ":103125,"èħĬ":103126,"å¤ļä¹Ī":103127,"æµij":103128,"å®ıè§Ĥ":103129,"èĦĸ":103130,"åIJĪèµĦ":103131,"çĶŁæ¶¯":103132,"å®ŀè´¨":103133,"ä¼ĺçĤ¹":103134,"çĶ¨æ°´":103135,"寿åij½":103136,"沫":103137,"åIJģ":103138,"詹":103139,"åĽ½éĺ²":103140,"å´©":103141,"åĿİ":103142,"èĨı":103143,"ä¸Ģè½®":103144,"éģĹ产":103145,"æ¹¾åĮº":103146,"ç»İ":103147,"åįķ纯":103148,"æ¾Ħ":103149,"åīįåĪĹ":103150,"身影":103151,"é»ĺé»ĺ":103152,"æįī":103153,"çĴ°":103154,"èıĬ":103155,"æĢľ":103156,"åħĭæĢĿ":103157,"æĢ»å±Ģ":103158,"çĩĥæĸĻ":103159,"ä¸ļæĢģ":103160,"åIJĦæł·":103161,"åĴ½":103162,"åĩºèī²":103163,"åĪĿå¿ĥ":103164,"åıĽ":103165,"çłĶ讨":103166,"è¡«":103167,"åİĨç¨ĭ":103168,"禽":103169,"è¶³å¤ŁçļĦ":103170,"èįĨ":103171,"çľĭå¾ħ":103172,"è´©":103173,"åĨ³å¿ĥ":103174,"裹":103175,"å¸ĪèĮĥ":103176,"åŀĦ":103177,"æĿł":103178,"åĩ¸":103179,"çĬ¹è±«":103180,"çĥŃè¡Ģ":103181,"åIJĪä¼Ļ":103182,"éħµ":103183,"èIJ½åľ¨":103184,"åįłåľ°":103185,"衬":103186,"èĵī":103187,"æĦ¤":103188,"æ¸Ĭ":103189,"åĪĨæķ°":103190,"ç¬ijçĿĢ":103191,"太平":103192,"çĤ«":103193,"æİ¨ä»ĭ":103194,"æĸ¯åĿ¦":103195,"形容":103196,"æĵĬ":103197,"æĦŁåħ´è¶£":103198,"åĨĽäºº":103199,"åĩĮæĻ¨":103200,"对çħ§":103201,"åıijçĹħ":103202,"å·¾":103203,"èĪī":103204,"檢":103205,"ç¬ijäºĨ":103206,"ç¡®è¯Ĭ":103207,"è´ŁåĢº":103208,"壮大":103209,"æĪļ":103210,"äºĴèģĶ":103211,"課":103212,"èħ¦":103213,"æĹ±":103214,"åıĹæ¬¢è¿İ":103215,"åįī":103216,"éϢ士":103217,"æ©¡":103218,"ä¸Ģ对":103219,"è¾±":103220,"æ²Ĥ":103221,"åı²ä¸Ĭ":103222,"æIJı":103223,"å´ĸ":103224,"代谢":103225,"磷":103226,"é¡ĺ":103227,"æµĩ":103228,"常ç͍":103229,"åįij":103230,"åĩºåĽ½":103231,"è¯ł":103232,"稳æŃ¥":103233,"ç»ı纪":103234,"å¤ļå¤ļ":103235,"æīĢå¾Ĺ":103236,"为主é¢ĺ":103237,"ä¸ĢåĪĨ":103238,"æł½":103239,"é¡§":103240,"纲":103241,"åĥħ":103242,"å£ĵ":103243,"åĦª":103244,"ç¿°":103245,"æİĢ":103246,"人为":103247,"媳":103248,"æ´½":103249,"èĿ¶":103250,"å¤įåħ´":103251,"ä¼ļå½±åĵį":103252,"åIJĦçķĮ":103253,"éĤ£ä¸Ģ":103254,"颤":103255,"çĢı":103256,"çĢı覽":103257,"å¯ŀ":103258,"åı¯æĢķ":103259,"åį³æĹ¶":103260,"çķ´":103261,"ä¸ĭåįĬå¹´":103262,"ç¬Ķè®°":103263,"éĻĦåĬł":103264,"çĥŃæ°´":103265,"奸":103266,"ç£ħ":103267,"æĿī":103268,"æ¸ħåįİ":103269,"éĸ±":103270,"ç°¡":103271,"å¤Ħå¤Ħ":103272,"åIJĪéĩij":103273,"æ²³æµģ":103274,"ç´°":103275,"è´ŁéĿ¢":103276,"çļĦ羣å®ŀ":103277,"åĻ¨æ¢°":103278,"èĴIJ":103279,"西äºļ":103280,"å·ħ":103281,"ç²¹":103282,"åİŁæĸĩ":103283,"æŀķ":103284,"è¡Ģåİĭ":103285,"åļ´":103286,"å¸ĺ":103287,"åĨĢ":103288,"æĮ«":103289,"çĶµè·¯":103290,"å°ıä¼Ļä¼´":103291,"èĿ´":103292,"æľĢå¿«":103293,"æĭĮ":103294,"宪":103295,"æĸ·":103296,"ç¿ħ":103297,"åĴ³":103298,"åĹ½":103299,"ç¾ŀ":103300,"èººåľ¨":103301,"èµĽè½¦":103302,"æ²IJ":103303,"éĻIJ度":103304,"为ä¸Ģä½ĵ":103305,"èĴľ":103306,"幫":103307,"æIJħ":103308,"åĭĭ":103309,"åīĸ":103310,"纳ç¨İ":103311,"éķ¿æķĪ":103312,"ç½ķ":103313,"åľ¬":103314,"ç©į":103315,"éĴ©":103316,"ç¹¼":103317,"åĽ½åľŁ":103318,"è¼ī":103319,"ä¸įå¿ĺ":103320,"èŃ¦ç¤º":103321,"çģ¿":103322,"å¿ĥå¾Ĺ":103323,"æĦļ":103324,"忽çķ¥":103325,"åĽŀäºĭ":103326,"åįłæľī":103327,"æ·Ħ":103328,"çī¡":103329,"çĽijäºĭ":103330,"ç¿¡":103331,"éĴĪ对æĢ§":103332,"çªĥ":103333,"製":103334,"èĨĿ":103335,"ç³Ł":103336,"港澳":103337,"太太":103338,"澡":103339,"ç»ĨåĮĸ":103340,"åĶ®åIJİ":103341,"å®ŀåľ¨æĺ¯":103342,"ç«£":103343,"çį²":103344,"å̾åIJij":103345,"å¼ķç͍":103346,"é¹ħ":103347,"ç¬ij容":103348,"ä¹IJè¶£":103349,"æ°ijæĶ¿":103350,"éŨæĪ·":103351,"å±ģ":103352,"迷失":103353,"éĶĮ":103354,"å°ı康":103355,"åĭī":103356,"æ³¼":103357,"ä¾ĭåŃIJ":103358,"ä¸īä½į":103359,"å»ł":103360,"èĶĵ":103361,"广éĺĶ":103362,"èĢį":103363,"èĢģèĻİ":103364,"åĭŁéĽĨ":103365,"èĦļæŃ¥":103366,"æĭ¯":103367,"åŃĹåı·":103368,"çĦ°":103369,"é¢ł":103370,"èļĤ":103371,"èļģ":103372,"飯":103373,"人æĢ§":103374,"æĴ°":103375,"åİ¢":103376,"å±ĢéĻIJ":103377,"æľªæĪIJ":103378,"åĵªåĦ¿":103379,"大åıij":103380,"ä¸įå®ļ":103381,"å¾ģæ±Ĥ":103382,"éĥµ":103383,"åĢºæĿĥ":103384,"çĪ±ä½ł":103385,"èºģ":103386,"ä»ħä¾Ľ":103387,"è¿ľå¤Ħ":103388,"éĨĽ":103389,"åĥµ":103390,"积æŀģæĢ§":103391,"æİ¡":103392,"åīįä¸ī":103393,"äºİä¸Ģä½ĵ":103394,"çŀĦ":103395,"çĿģ":103396,"沸":103397,"åħ±èµ¢":103398,"éĢĢå½¹":103399,"è´Ŀå°Ķ":103400,"æİı":103401,"æĪ²":103402,"è¡į":103403,"éĶĤ":103404,"ä¸ĩä½Ļ":103405,"ç§ijåĪĽ":103406,"æ¼Ķåͱ":103407,"欧åħĥ":103408,"æ·¡æ·¡":103409,"éĿĴå±±":103410,"èĹĿ":103411,"绽":103412,"令çīĮ":103413,"éĽĨ群":103414,"ä½ľçī©":103415,"çĢij":103416,"夯":103417,"ç½ij游":103418,"åħ«å¤§":103419,"éªļ":103420,"èªĵ":103421,"ä¼ļå±ķ":103422,"åħļåı²":103423,"æ£Ģå¯ŁéĻ¢":103424,"åĸĺ":103425,"éĺ±":103426,"èĢĮåĩº":103427,"éĢļ车":103428,"éĴĵ":103429,"æĥħ人":103430,"æ¸Ľ":103431,"ä¸Ńç§ĭ":103432,"çĪŃ":103433,"åıªåī©":103434,"æĺĶ":103435,"éĩİçĶŁ":103436,"ç¡«":103437,"èIJĿåįľ":103438,"æĬµæĬĹ":103439,"çĻ«çĹ«":103440,"éĻĢ":103441,"èĶļ":103442,"å¸ľ":103443,"满满":103444,"èı±":103445,"éļĨéĩį":103446,"æĺŁçº§":103447,"æ½ĩ":103448,"åħ¬åħĥ":103449,"è°£":103450,"æ¯Ķäºļ":103451,"æ¡ĮåŃIJ":103452,"èµ£":103453,"è²¼":103454,"æĦ¿æľĽ":103455,"顽":103456,"æ´¾éģ£":103457,"ç¥Ľ":103458,"åªļ":103459,"éĺľ":103460,"èij«":103461,"èĬ¦":103462,"æ³»":103463,"å¡Į":103464,"çĭŃ":103465,"å»īæĶ¿":103466,"å¥ijæľº":103467,"æĹĹèΰ":103468,"æĥ«":103469,"严åİī":103470,"åıĭæĥħ":103471,"å¦Ĭ":103472,"å¨ł":103473,"åĵªå®¶":103474,"èĨ¨":103475,"è¶Ł":103476,"æĮª":103477,"èĻIJ":103478,"éłģ":103479,"çŀ©":103480,"éºŁ":103481,"稣":103482,"èģĶéĢļ":103483,"åı®":103484,"çİĭèĢħ":103485,"ä¸įç¡®å®ļ":103486,"çijľ":103487,"è°İ":103488,"çī¢è®°":103489,"碼":103490,"æĬ¤èĤ¤":103491,"é¡·":103492,"çĦķ":103493,"åģļ强":103494,"éļ±ç§ģ":103495,"éļ±ç§ģæ¬Ĭ":103496,"åıĹ害":103497,"ä¸įçͱ":103498,"çĥ¹":103499,"饪":103500,"驳":103501,"ä¼½":103502,"ä¸Ŀ绸":103503,"è¥Ħ":103504,"åįģä½Ļ":103505,"éºĹ":103506,"æ¬ĬåĪ©":103507,"èģŀ":103508,"åı¤èĢģ":103509,"éģı":103510,"åIJĦå¼ı":103511,"å°±è¡Į":103512,"åħ¥å¢ĥ":103513,"çĥģ":103514,"èľĺ":103515,"èĽĽ":103516,"纬":103517,"磫":103518,"è»Ł":103519,"æ´Ĺè¡£":103520,"æĦ§":103521,"é¢Ħæ¡Ī":103522,"éľĨ":103523,"æ·±åİļ":103524,"éĺ¿æĭī":103525,"åĨĻåŃĹ":103526,"åį¦":103527,"éķĢ":103528,"æ¨¡æł·":103529,"åĤį":103530,"æIJį":103531,"èĸ¯":103532,"åłħ":103533,"åħ¬ç§¯":103534,"è¨İ":103535,"ä¼łæŁĵ":103536,"毯":103537,"çIJĨå·¥":103538,"åĨ·éĵ¾":103539,"ç«ĭæĸ¹":103540,"æ¢Ń":103541,"åľ£è¯ŀ":103542,"综èīº":103543,"çİ©ç¬ij":103544,"æĥ³ä¸įåΰ":103545,"æijĩ头":103546,"æ·¹":103547,"åģĩæĹ¥":103548,"åĢĺ":103549,"è̽":103550,"èİĵ":103551,"åŁ·":103552,"èĩªè´¸":103553,"åįĬ天":103554,"æªĶ":103555,"æ¾İæ¹ĥ":103556,"éķij":103557,"丫":103558,"éĩĮç¨ĭ":103559,"å¼ĢèįĴ":103560,"èıı":103561,"å®Ŀè´µ":103562,"èѬ":103563,"åķŁ":103564,"æŁł":103565,"檬":103566,"é©Ń":103567,"æ±Ľ":103568,"çĨĬçĮ«":103569,"èķī":103570,"éļıä¹ĭ":103571,"å±ij":103572,"è¾ĥ强":103573,"èĥ³":103574,"èĨĬ":103575,"éĿĻéĿĻ":103576,"åĴª":103577,"æĭĽåij¼":103578,"代è¨Ģ":103579,"ä¿¡ç®±":103580,"è£ħéħį":103581,"æĤį":103582,"åįķ车":103583,"èIJİ":103584,"å¤ļ彩":103585,"éϏ":103586,"ä»İ严":103587,"æ©Ħ":103588,"æ¦Ħ":103589,"éĢ®":103590,"éĩĮæĸ¯":103591,"å§¿æĢģ":103592,"太æŀģ":103593,"éĩĿ":103594,"æºī":103595,"è¿Ń":103596,"秸":103597,"ç§Ĩ":103598,"å·¥å§Ķ":103599,"æ±ķ":103600,"èģĨ":103601,"佬":103602,"ç¼ħ":103603,"ç͏":103604,"åī¯å±Ģéķ¿":103605,"éĹº":103606,"誤":103607,"è¤IJ":103608,"ä¸įéĻIJ":103609,"èħķ":103610,"åijķ":103611,"磶":103612,"åĨľå®¶":103613,"管å§Ķä¼ļ":103614,"饺":103615,"èĬľ":103616,"æ¾Ī":103617,"è©¢":103618,"å¨ģå°¼æĸ¯":103619,"ä½ķåĨµ":103620,"å°ıä¼Ļ":103621,"奢ä¾Ī":103622,"è¿Ļç¯ĩ":103623,"诵":103624,"竳ç¨ĭ":103625,"ç´Ģ":103626,"éIJĺ":103627,"éĤ¢":103628,"ç³Ļ":103629,"ç¼Ģ":103630,"ä¹Ĵ":103631,"ä¹ĵ":103632,"çī¢åĽº":103633,"åĿŀ":103634,"å¼Ī":103635,"ä¾ĭå¤ĸ":103636,"廳":103637,"è§Ħ竳":103638,"èĬĻ":103639,"篷":103640,"躯":103641,"æłĪ":103642,"åĿļå®ŀ":103643,"åŁºå»º":103644,"çĿĢçľ¼":103645,"ç·´":103646,"èij©":103647,"ç¼ļ":103648,"æ¦Ĩ":103649,"主åĭķ":103650,"ç¥Ģ":103651,"äºĴéĢļ":103652,"尤为":103653,"å®Ľ":103654,"骼":103655,"æ±²":103656,"ä¾ĥ":103657,"æĤłä¹ħ":103658,"æij§":103659,"æĭĩ":103660,"é«ĵ":103661,"éºĴ":103662,"éĻĽ":103663,"æŀ¸":103664,"æĿŀ":103665,"è´¬":103666,"å°ıé¾Ļ":103667,"åĵ®":103668,"èĵ¬åĭĥ":103669,"åĮĪ":103670,"çķľçī§":103671,"娩":103672,"个å¤ļ":103673,"æ²¥":103674,"æĺ§":103675,"çĦļ":103676,"æĬijéĥģ":103677,"çĸ¡":103678,"èĺij":103679,"éģİç¨ĭ":103680,"橱":103681,"éĿĵ":103682,"大çIJĨ":103683,"髦":103684,"åĪĨ辨":103685,"渤":103686,"çĸ¤":103687,"åĬ¨èĥ½":103688,"å¼łå®¶":103689,"ä¸ĩåįĥ":103690,"滥":103691,"饥":103692,"åºŁå¼ĥ":103693,"帳":103694,"æ¼³":103695,"è±IJ":103696,"ä»ij":103697,"å«ī":103698,"å¦Ĵ":103699,"çŀĴ":103700,"è¡ħ":103701,"çĭ¸":103702,"å¾ģç¨ĭ":103703,"éĤ¯":103704,"éĥ¸":103705,"ç¥Ī":103706,"祷":103707,"è¶´":103708,"ç»ĵæŀĦæĢ§":103709,"è§ĨåIJ¬":103710,"è¬Ŀ":103711,"çĴĢ":103712,"çĴ¨":103713,"åĩºå¤Ħ":103714,"è¯Ģ":103715,"å¾ĺ":103716,"å¾Ĭ":103717,"羨":103718,"åĸĩ":103719,"åıŃ":103720,"åĺ²":103721,"çķ¸":103722,"å¹²äºĭ":103723,"æļ§":103724,"æ²Ľ":103725,"åĦĦ":103726,"å»ĵ":103727,"åİ¿éķ¿":103728,"èĥļ":103729,"çIJ¢":103730,"çŃ·":103731,"éĩĭ":103732,"ä¾®":103733,"åIJ©":103734,"åĴIJ":103735,"åĮ¿":103736,"æĬ¬èµ·":103737,"æ³£":103738,"涤":103739,"麽":103740,"æĽĻ":103741,"åī¯éĻ¢éķ¿":103742,"åħļåĴĮ":103743,"æķ£åıij":103744,"润æ»ij":103745,"åĵº":103746,"æĥ¬":103747,"漫éķ¿":103748,"ä¸įæĩĪ":103749,"åŁł":103750,"åĹĵ":103751,"èĢģçĪ·":103752,"讽":103753,"æĪĺç»ĦåIJĪ":103754,"æ£ł":103755,"åħ¨åŁŁ":103756,"èł¢":103757,"诡":103758,"åīįçŀ»":103759,"æķĽ":103760,"ä¸Ģå°ģ":103761,"å¹Ĥ":103762,"èİĨ":103763,"è¯Ŀè¯Ń":103764,"ç»ĨåĪĻ":103765,"屿":103766,"åµĮ":103767,"éĢį":103768,"åĺ±":103769,"渲":103770,"çĥ¯":103771,"çĿ¹":103772,"é¦Ĵ":103773,"èħ¥":103774,"æĬĹåĩ»":103775,"çĿ«":103776,"èįĶ":103777,"éļİ":103778,"æ³īæ°´":103779,"è¬Ĥ":103780,"çĤ¬":103781,"åĩıæİĴ":103782,"è¸Ĭ":103783,"è·»":103784,"æ·Į":103785,"éľ¾":103786,"å¥ĩ纳":103787,"å¯Ŀ":103788,"æ¤İ":103789,"æŁ¬":103790,"æĸ¯åŁº":103791,"åħ¬ç«ĭ":103792,"è¨ĵ":103793,"é£Ļ":103794,"é©¿":103795,"åĤµ":103796,"èĽĻ":103797,"ç¯ĩ竳":103798,"åĪĨæĶ¯":103799,"ä¸Ĭå¹´":103800,"çŃĿ":103801,"缤":103802,"èĢģæĹ§":103803,"åϬ":103804,"æľ¦":103805,"èĥ§":103806,"æ¶Īè²»":103807,"æĵĶ":103808,"榴":103809,"æ¿Ĵ":103810,"糯":103811,"泸":103812,"æįĨ":103813,"ç»ļ":103814,"èµİ":103815,"çIJIJ":103816,"èµĤ":103817,"æħ®":103818,"æ²Į":103819,"çĦĻ":103820,"æĴŃæĬ¥":103821,"æ·ĩ":103822,"åĪĩåħ¥":103823,"çijķ":103824,"çĸµ":103825,"éģ´":103826,"ç¨ļ":103827,"ç©©":103828,"èŀĥ":103829,"æ£ķ":103830,"æĨ§":103831,"æĨ¬":103832,"伺":103833,"æ¯Ĺ":103834,"æįį":103835,"æĬī":103836,"ç´Ĭ":103837,"å¼Ľ":103838,"æĭŃ":103839,"æĹıèĩªæ²»":103840,"åĿ·":103841,"ç«¶":103842,"詳":103843,"è¿Ħä»Ĭ":103844,"è°´":103845,"çŀŃè§£":103846,"æŁ¿":103847,"é¢Ĭ":103848,"ç°§":103849,"çĥŁèĬ±":103850,"ä¾¥":103851,"çĿ¦":103852,"éħĿ":103853,"æ°ĵ":103854,"çIJī":103855,"å§Ĭ":103856,"æ²®":103857,"æħ·":103858,"èľķ":103859,"çijļ":103860,"éĩĩçŁ¿":103861,"åł°":103862,"åºķèķ´":103863,"èĨ³":103864,"è¾ķ":103865,"éŁŃ":103866,"åĴĻ":103867,"ç²½":103868,"åīĶ":103869,"沦":103870,"èĤ´":103871,"éķ¶":103872,"æĺ¼":103873,"è¾Ĺ":103874,"婪":103875,"åĮ®":103876,"æĸĵ":103877,"æ±¶":103878,"éĥ´":103879,"éł»":103880,"çªĴ":103881,"袱":103882,"åĽ±":103883,"èĢĺ":103884,"èļĮ":103885,"çĭĻ":103886,"çĹ¹":103887,"ç¥ī":103888,"æı®":103889,"æ·Ĩ":103890,"ç£ĭ":103891,"éĺª":103892,"æ«":103893,"ã¸":103894,"϶":103895,"ãij":103896,"ð£²":103897,"ä¢":103898,"ãŃ":103899,"ð¬¨":103900,"ð¬Ģ":103901,"ð¬®":103902,"ð¬¯":103903,"ð¬ľ":103904,"ðª¨":103905,"ð«Ĺ":103906,"ð¬Ĭ":103907,"ð¬±":103908,"ð¬Ł":103909,"äİ":103910,"ð¡":103911,"äĥ":103912,"ãł":103913,"ð©":103914,"ð©¾":103915,"ð¬º":103916,"ð¬Ļ":103917,"ãĢĶ":103918,"ãĢķ":103919,"çļĦæĹ¶åĢĻ":103920,"æľīéĻIJåħ¬åı¸":103921,"ä¹ĭåIJİ":103922,"ä¸ļåĬ¡":103923,"åķĬ":103924,"èϽçĦ¶":103925,"æĭ¥æľī":103926,"äºĴèģĶç½ij":103927,"éĤ£äºĽ":103928,"ä½łçļĦ":103929,"åĨ³å®ļ":103930,"éϤäºĨ":103931,"åĽ¢éĺŁ":103932,"åı¯æĺ¯":103933,"以åIJİ":103934,"社åĮº":103935,"çļĦéĹ®é¢ĺ":103936,"å¹¶ä¸Ķ":103937,"æķĻå¸Ī":103938,"å°±ä¼ļ":103939,"天空éĥ¨èIJ½":103940,"æľĢç»Ī":103941,"å½ĵçĦ¶":103942,"ä¹Łæľī":103943,"ç¡®ä¿Ŀ":103944,"æĥ³è¦ģ":103945,"è´Ńä¹°":103946,"人çļĦ":103947,"åIJ´":103948,"çļĦåıijå±ķ":103949,"ä¸įçŁ¥éģĵ":103950,"软件":103951,"æĪij们çļĦ":103952,"çζæ¯į":103953,"åīij":103954,"èĢĮæĺ¯":103955,"å®īæİĴ":103956,"åIJİæĿ¥":103957,"çļĦåľ°æĸ¹":103958,"èµµ":103959,"èĢĥè¯ķ":103960,"çªģçĦ¶":103961,"ä¸Ģå®ļè¦ģ":103962,"åĪ¶ä½ľ":103963,"è¯Ħä»·":103964,"åħįè´¹":103965,"è´¹ç͍":103966,"绣ä¸Ģ":103967,"çĦ¶èĢĮ":103968,"è¿Ļ次":103969,"éĿĴå¹´":103970,"人类":103971,"亦":103972,"让人":103973,"è´Łè´£äºº":103974,"éĩĩåıĸ":103975,"çļĦäºĭæĥħ":103976,"ä¹Łä¼ļ":103977,"车è¾Ĩ":103978,"æĽ´æĺ¯":103979,"强åĮĸ":103980,"æĪijåĢij":103981,"以åīį":103982,"ä¼ĺåĮĸ":103983,"å§Ķåijĺä¼ļ":103984,"åĽ°éļ¾":103985,"年度":103986,"ä½įäºİ":103987,"æĮĩåĩº":103988,"åĨῬ¡":103989,"åĬŀçIJĨ":103990,"æ¯ı个":103991,"对æĸ¹":103992,"è¿Ľè¡ĮäºĨ":103993,"æľĢé«ĺ":103994,"课ç¨ĭ":103995,"身ä¸Ĭ":103996,"æĽ¾ç»ı":103997,"åĮ»çĶŁ":103998,"å®īè£ħ":103999,"æľ±":104000,"è¿IJè¡Į":104001,"åıĮæĸ¹":104002,"æľĢ大çļĦ":104003,"æŀĦ建":104004,"è¿ŀç»Ń":104005,"çļĦå°ı":104006,"她çļĦ":104007,"çŃīçŃī":104008,"æĶ¹åĸĦ":104009,"åIJĦç±»":104010,"éģĩåΰ":104011,"æľīçĿĢ":104012,"人çī©":104013,"æĢ»æĺ¯":104014,"è¿ħéĢŁ":104015,"åζå®ļ":104016,"å®ĥ们":104017,"å®ĺç½ij":104018,"è¿ĺè¦ģ":104019,"ç»Īäºİ":104020,"æĪ¿åľ°äº§":104021,"è¯ģæĺİ":104022,"èĤ¡ç¥¨":104023,"åºĶå½ĵ":104024,"èĭ±åĽ½":104025,"è¿IJç͍":104026,"æľĢæĸ°":104027,"享åıĹ":104028,"让æĪij":104029,"æĻļä¸Ĭ":104030,"å¾ŀ":104031,"å°ı说":104032,"å°¤åħ¶æĺ¯":104033,"è®Ńç»ĥ":104034,"åħ¨å¸Ĥ":104035,"æĮijæĪĺ":104036,"æľīçĤ¹":104037,"带çĿĢ":104038,"çļĦä¸ľè¥¿":104039,"é£İæł¼":104040,"é»Ħéĩij":104041,"å¼ķ导":104042,"æŃ¤å¤ĸ":104043,"æľĢè¿ij":104044,"追æ±Ĥ":104045,"强è°ĥ":104046,"ä¹Łåı¯ä»¥":104047,"æĦŁåΰ":104048,"èĩªæĪij":104049,"çī¹åĪ«æĺ¯":104050,"æĪIJéĥ½":104051,"éĢIJæ¸IJ":104052,"å¿«ä¹IJ":104053,"ä¹ĭä¸Ń":104054,"æĬķèµĦèĢħ":104055,"ä»ĸ们çļĦ":104056,"æ°ı":104057,"å·¥ä½ľäººåijĺ":104058,"äºĨä¸Ģ个":104059,"åķ¦":104060,"ä¸ĢåĢĭ":104061,"åŁºå±Ĥ":104062,"æ²ŁéĢļ":104063,"第ä¸Ģ次":104064,"并没æľī":104065,"çļĦå·¥ä½ľ":104066,"åľ¨è¿ĻéĩĮ":104067,"æŀª":104068,"æĶ¯æĴij":104069,"æĹ¶å°ļ":104070,"æĿ¥åΰ":104071,"æĶ¶è´Ń":104072,"éĿ©åij½":104073,"æĺ¯ä¸įæĺ¯":104074,"讨论":104075,"ä¸ļ绩":104076,"å°±èĥ½":104077,"ç«ĭåį³":104078,"è¡Ĺéģĵ":104079,"åľ¨ä¸Ģèµ·":104080,"æľĪ份":104081,"é«ĺ端":104082,"å¾Īéļ¾":104083,"ä¿Ħç½Ĺæĸ¯":104084,"æīĭ段":104085,"åģļåĩº":104086,"ä¼Ĺå¤ļ":104087,"å®ŀè¡Į":104088,"æīĵå¼Ģ":104089,"游客":104090,"ä¾ĿçĦ¶":104091,"å°±åĥı":104092,"离å¼Ģ":104093,"说éģĵ":104094,"æĸ°èĥ½æºIJ":104095,"溪":104096,"äºķ":104097,"令人":104098,"ä¸Ģåľº":104099,"æĪijæĥ³":104100,"两人":104101,"èĩ³å°ij":104102,"çļĦçĶŁæ´»":104103,"æĺ¯ä¸ª":104104,"èĭ±è¯Ń":104105,"æ²Ĵæľī":104106,"æĢĿèĢĥ":104107,"éĻIJåζ":104108,"åı°æ¹¾":104109,"ä¸ĢæĹ¦":104110,"çļĦä¸Ģ个":104111,"é«ĺ级":104112,"åĬŀåħ¬å®¤":104113,"å¾·åĽ½":104114,"æĪijå°±":104115,"å®ļä½į":104116,"éĢĤåºĶ":104117,"æĮĩæłĩ":104118,"åħ¨çľģ":104119,"ä¸Ĭè¿°":104120,"å®ĥçļĦ":104121,"åĽŀå®¶":104122,"欧洲":104123,"éĵģè·¯":104124,"é¼ĵåĬ±":104125,"çļĦå½±åĵį":104126,"é«ĺæł¡":104127,"天ä¸ĭ":104128,"é«ĺè´¨éĩı":104129,"æĿŃå·ŀ":104130,"èµĦ讯":104131,"æĶ¾åľ¨":104132,"æľīä¸Ģ个":104133,"å°±è¦ģ":104134,"ä¸ĬéĿ¢":104135,"è§£éĩĬ":104136,"éĢIJæŃ¥":104137,"尽管":104138,"æľīä»Ģä¹Ī":104139,"çļĦäºĭ":104140,"çĻ»è®°":104141,"人æ°ijå¸ģ":104142,"è§Ĥä¼Ĺ":104143,"è§Ĥå¯Ł":104144,"ç͵èĦij":104145,"çļĦåIJĮæĹ¶":104146,"ä½ľä¸ļ":104147,"宣å¸ĥ":104148,"çļĦä½ľç͍":104149,"åĽŀæĿ¥":104150,"éļ¾ä»¥":104151,"æīĢæľīçļĦ":104152,"å°ıåѦ":104153,"æıIJåīį":104154,"æ¤įçī©":104155,"åĩ¯":104156,"ä¸ĬäºĨ":104157,"å°±åľ¨":104158,"åħĪåIJİ":104159,"æīĭæľ¯":104160,"éĥŃ":104161,"éĿ¢åīį":104162,"æ¯ķ竣":104163,"äºĮæĺ¯":104164,"红èī²":104165,"éĺ³åħī":104166,"èĭ¹æŀľ":104167,"å¾Īå¤ļ人":104168,"ç»ĻæĪij":104169,"åĵ¦":104170,"çľ¼çĿĽ":104171,"éłŃ":104172,"ä¸Ģæĺ¯":104173,"åıijå±ķçļĦ":104174,"åıįåºĶ":104175,"æĪ¿å±ĭ":104176,"æľŁå¾ħ":104177,"ç§įæ¤į":104178,"æĸĩåѦ":104179,"åį³åı¯":104180,"é¦ĸ次":104181,"èĭ±éĽĦ":104182,"å¤ļ次":104183,"åĮħè£ħ":104184,"æ²³åįĹ":104185,"ä¹ĭéĹ´çļĦ":104186,"ä»įçĦ¶":104187,"åIJ¬åΰ":104188,"èij£äºĭéķ¿":104189,"è§ĦåĪĻ":104190,"ä¸Ģ份":104191,"大ä¼Ĺ":104192,"使å¾Ĺ":104193,"è¿Ľåı£":104194,"ä¸Ģçīĩ":104195,"æĢ§çļĦ":104196,"çļĦ大":104197,"æĪijæĺ¯":104198,"äºĴåĬ¨":104199,"æ°£":104200,"çļĨ":104201,"åħ¬åı¸çļĦ":104202,"ä¸Ģè¾¹":104203,"åıĬåħ¶":104204,"èī¯å¥½çļĦ":104205,"æĭĵå±ķ":104206,"å½ĵå¹´":104207,"å¹¿åľº":104208,"åģļäºĨ":104209,"åŁºäºİ":104210,"æıIJéĨĴ":104211,"åħĦå¼Ł":104212,"èĢģæĿ¿":104213,"è¿ijæĹ¥":104214,"çĬ¶åĨµ":104215,"注éĩį":104216,"åĪļåĪļ":104217,"è°ĥçłĶ":104218,"å¿ĥä¸Ń":104219,"æĬĬæı¡":104220,"éļıåIJİ":104221,"ä¸įå¤Ł":104222,"åĪĽä½ľ":104223,"ç«Ļåľ¨":104224,"缸äºĴ":104225,"çĸ«æĥħéĺ²æİ§":104226,"年代":104227,"带åĬ¨":104228,"伤害":104229,"竣çĦ¶":104230,"å¼ķè¿Ľ":104231,"累计":104232,"让æĪij们":104233,"åĽŀæĶ¶":104234,"æĬ¥åIJį":104235,"åĬ©åĬĽ":104236,"èģĶ缣":104237,"çŃĸçķ¥":104238,"åij¨è¾¹":104239,"åĭĴ":104240,"è¿ĺåľ¨":104241,"æµģéĩı":104242,"寻æī¾":104243,"ç͵åĬĽ":104244,"èιèζ":104245,"è¿ĺèĥ½":104246,"æĭħä»»":104247,"çļĦæĥħåĨµä¸ĭ":104248,"çļĦåİŁåĽł":104249,"缺ä¹ı":104250,"çIJĥåijĺ":104251,"å²ģçļĦ":104252,"çĶ·åŃIJ":104253,"å·¥èµĦ":104254,"è¿ijå¹´æĿ¥":104255,"åijĢ":104256,"æıIJä¾ĽäºĨ":104257,"她们":104258,"å®¶åħ·":104259,"çĩķ":104260,"è½»æĿ¾":104261,"æł¡åĽŃ":104262,"èĢĥæł¸":104263,"åį±éĻ©":104264,"åħļç»Ħç»ĩ":104265,"æĢ»ç»ıçIJĨ":104266,"çļĦæĸ°":104267,"çİ»çĴĥ":104268,"è¿Ļä½į":104269,"对æŃ¤":104270,"家人":104271,"çļĦè¦ģæ±Ĥ":104272,"温度":104273,"æĮĩæķ°":104274,"缴åΰ":104275,"æŃ¤æĹ¶":104276,"æ¹ĸåįĹ":104277,"éĥ½è¦ģ":104278,"ä½ľåĩº":104279,"åIJĦä½į":104280,"èĢĥçĶŁ":104281,"ä¾Ŀæį®":104282,"说è¯Ŀ":104283,"æĪijä¹Ł":104284,"å·¥åİĤ":104285,"åıĺæĪIJ":104286,"ä»ĸ人":104287,"æĪijè§īå¾Ĺ":104288,"åIJĦ级":104289,"ä¼łå¥ĩç§ģæľį":104290,"ä¸Ĭåįĩ":104291,"好åĥı":104292,"åĬłéĢŁ":104293,"äºĮåįģ":104294,"è¢ģ":104295,"è£ħ饰":104296,"éĥ½èĥ½":104297,"ä¸Ģå¼ł":104298,"åĬ¨æĢģ":104299,"å¹´çļĦ":104300,"è¿Ļå°±æĺ¯":104301,"ä¹Łè¦ģ":104302,"èµĦæł¼":104303,"æĪĺäºī":104304,"æĦŁè°¢":104305,"åŁ¹èĤ²":104306,"天æ°Ķ":104307,"女士":104308,"åı¯èĥ½ä¼ļ":104309,"çļĦ产åĵģ":104310,"ä¹Łå°±":104311,"主è¦ģæĺ¯":104312,"åĪºæ¿Ģ":104313,"ç»Ļä½ł":104314,"大æķ°æį®":104315,"åĮ»åѦ":104316,"åΤæĸŃ":104317,"ä»ĸ说":104318,"表æ¼Ķ":104319,"äºļæ´²":104320,"ä¸ĵé¢ĺ":104321,"ç«ŀäºīåĬĽ":104322,"éĤ£æł·":104323,"å±ķå¼Ģ":104324,"å¹³æĹ¶":104325,"æİ¥ä¸ĭæĿ¥":104326,"æī¿è¯º":104327,"æ³ķåĽ½":104328,"åħ³å¿ĥ":104329,"ä¼ļæľī":104330,"éĤĢ请":104331,"é¢Ħéĺ²":104332,"对æİ¥":104333,"好äºĨ":104334,"åĴ±ä»¬":104335,"çļĦæĦŁè§ī":104336,"æĢĿè·¯":104337,"éĥ½æ²¡æľī":104338,"çļĦæĸ¹æ³ķ":104339,"女åŃIJ":104340,"åı¸æ³ķ":104341,"è¿ĺä¼ļ":104342,"è¶ĬæĿ¥è¶Ĭå¤ļ":104343,"åĽłçĤº":104344,"æµ·åįĹ":104345,"人æķ°":104346,"å°Ĩä¼ļ":104347,"ä¸ļ主":104348,"é¤IJ饮":104349,"å±ħä½ı":104350,"åıijåĩº":104351,"è¿ijæľŁ":104352,"å¼ķé¢Ĩ":104353,"æľºåĻ¨äºº":104354,"åĩºæĿ¥çļĦ":104355,"çľĭè§ģ":104356,"ä¿Ĭ":104357,"让ä»ĸ":104358,"ä¸įæĥ³":104359,"å·¥ä½ľçļĦ":104360,"è¡¥åħħ":104361,"æµħ":104362,"çī¹å¾ģ":104363,"ä¸Ĭå¸Ĥåħ¬åı¸":104364,"ç¾İé£Ł":104365,"广西":104366,"æ¯ıä¸Ģ个":104367,"èIJ½åľ°":104368,"åĵģç§į":104369,"åĴĮè°IJ":104370,"å½»åºķ":104371,"é«ĺèĢĥ":104372,"æĺ¨å¤©":104373,"åīįå¾Ģ":104374,"çĽijæµĭ":104375,"çĻ¾åº¦":104376,"åľ¨ä¸ŃåĽ½":104377,"çļĦéľĢæ±Ĥ":104378,"亿ç¾İåħĥ":104379,"åŃ¦æľ¯":104380,"æĶ¶åΰ":104381,"æĿ¿åĿĹ":104382,"ä¸Ģ段":104383,"æŀĦæĪIJ":104384,"ä¼ģä¸ļçļĦ":104385,"表éĿ¢":104386,"æķ´çIJĨ":104387,"ç»ĵå©ļ":104388,"人家":104389,"åģľæŃ¢":104390,"åѦç§ij":104391,"æĺ¾å¾Ĺ":104392,"ä¼ijæģ¯":104393,"é¢ĦæľŁ":104394,"æĪĸæĺ¯":104395,"çļĦ主è¦ģ":104396,"åºĶ对":104397,"èµ°äºĨ":104398,"ä¸ŃéĹ´":104399,"èµ°è¿Ľ":104400,"åijĪçݰ":104401,"æIJŃéħį":104402,"é¹ı":104403,"æĺ¯åĽłä¸º":104404,"æĥħ绪":104405,"å®ļæľŁ":104406,"社ä¼ļ主ä¹ī":104407,"çŃī级":104408,"çŁĽçĽ¾":104409,"é£ŀæľº":104410,"èĩ³ä»Ĭ":104411,"æĶ¶éĽĨ":104412,"çļĦæķħäºĭ":104413,"åĪĩå®ŀ":104414,"å®ŀçݰäºĨ":104415,"å½¢æĪIJäºĨ":104416,"åįĹæĸ¹":104417,"ä¸ŃåѦ":104418,"æµ·æ´ĭ":104419,"åIJ¦åĪĻ":104420,"æĭįæijĦ":104421,"大åѦçĶŁ":104422,"åĩºçݰäºĨ":104423,"æĦıå¤ĸ":104424,"ä¹Łèĥ½":104425,"çļĦèĥ½åĬĽ":104426,"åĿIJåľ¨":104427,"åĪĻæĺ¯":104428,"èĢĥå¯Ł":104429,"å°Ĭéĩį":104430,"éĺ²æŃ¢":104431,"ç´§å¼ł":104432,"读书":104433,"åĩºè¡Į":104434,"å°±æľī":104435,"å±¥è¡Į":104436,"çݰ代åĮĸ":104437,"åĽ½åĬ¡":104438,"åĽ½åĬ¡éĻ¢":104439,"ç»´ä¿®":104440,"åİŁåĪĽ":104441,"æĺ¯æĮĩ":104442,"ä¼ijéĹ²":104443,"çĤ®":104444,"æĸ°æĹ¶ä»£":104445,"éĢĻåĢĭ":104446,"ä¸įæķ¢":104447,"å®Įç¾İ":104448,"ç»ĨèĬĤ":104449,"éŃı":104450,"èͬèıľ":104451,"é¢Ĩ导çıŃåŃIJ":104452,"è¶ħ级":104453,"è¡Įæĥħ":104454,"人工æĻºèĥ½":104455,"åį°åº¦":104456,"åŁºç¡Ģ设æĸ½":104457,"åıĪæĺ¯":104458,"èį¯çī©":104459,"åIJ¸æĶ¶":104460,"åį´æĺ¯":104461,"éĥİ":104462,"å¥ĸåĬ±":104463,"çļĦæľĭåıĭ":104464,"ä¿ĿçķĻ":104465,"è§Ħå¾ĭ":104466,"æĸ°çĸĨ":104467,"è¿ĺåı¯ä»¥":104468,"æİ¥è¿ij":104469,"æŃ¤åīį":104470,"æī¹åĩĨ":104471,"æĢİä¹Īæł·":104472,"çļĦä½įç½®":104473,"ä¸ĢåĿĹ":104474,"æĭĴç»Ŀ":104475,"顾客":104476,"ä¹Łåľ¨":104477,"ä¸ĢçĶŁ":104478,"éĥ¨éĺŁ":104479,"å¹´åīį":104480,"æĸ¹éĿ¢çļĦ":104481,"å°Ŀè¯ķ":104482,"羣æŃ£çļĦ":104483,"ç¦ģæŃ¢":104484,"è¿ĺ没æľī":104485,"æ°ijçĶŁ":104486,"èµ°åIJij":104487,"èĦ¸ä¸Ĭ":104488,"å½ĵ天":104489,"éĽĨåĽ¢åħ¬åı¸":104490,"çļĦä¸Ģç§į":104491,"西æĸ¹":104492,"åĽŀåºĶ":104493,"ä¸Ģ声":104494,"常常":104495,"æıIJåΰ":104496,"èħ¾è®¯":104497,"æľįè£ħ":104498,"为ä½ķ":104499,"äºijåįĹ":104500,"å°±ç®Ĺ":104501,"ä¼łæī¿":104502,"åıįèĢĮ":104503,"ä¸ĩåIJ¨":104504,"财产":104505,"å¦Ĥä¸ĭ":104506,"æĹ¥åīį":104507,"åİŁæľ¬":104508,"æľĢéĩįè¦ģçļĦ":104509,"认è¯ģ":104510,"ä¸Ģéģĵ":104511,"ä¿¡æģ¯åĮĸ":104512,"å¾ĹåΰäºĨ":104513,"é̲è¡Į":104514,"æĪijè¦ģ":104515,"éĢļä¿¡":104516,"室åĨħ":104517,"èµļéĴ±":104518,"æĶ¶èĹı":104519,"è§£åĨ³æĸ¹æ¡Ī":104520,"æĪ¿äº§":104521,"çĭ¼":104522,"æ´»åĬĽ":104523,"ç»ıæµİåıijå±ķ":104524,"çŃīå¾ħ":104525,"ä¹Łå¾Ī":104526,"åĿij":104527,"å¾Ī好çļĦ":104528,"éļ¾åº¦":104529,"ä¸įå¦Ĥ":104530,"人æ°ijæĶ¿åºľ":104531,"åĩºåıij":104532,"åīįæľŁ":104533,"æ¼Ķåijĺ":104534,"女çĶŁ":104535,"èģļçĦ¦":104536,"审计":104537,"é¢Ħæµĭ":104538,"ä¾Ŀæīĺ":104539,"äºĶå¹´":104540,"补贴":104541,"æ¸ħæĻ°":104542,"éªĤ":104543,"çľĭèµ·æĿ¥":104544,"çļĦåŃ©åŃIJ":104545,"é¢ijéģĵ":104546,"ä½ıå®ħ":104547,"éĿ¢åIJij":104548,"æľĢä½İ":104549,"æĹ¢çĦ¶":104550,"ä¸Ģå¥Ĺ":104551,"æķ°åѦ":104552,"群ä½ĵ":104553,"åĮĹ京å¸Ĥ":104554,"å±ħçĦ¶":104555,"æ°ĽåĽ´":104556,"éĢĶå¾Ħ":104557,"çļĦåŁºç¡Ģä¸Ĭ":104558,"èģĮè´£":104559,"åı¯èĥ½æĺ¯":104560,"åĨĽäºĭ":104561,"æĪIJæķĪ":104562,"åŃ©åŃIJ们":104563,"计ç®Ĺæľº":104564,"赤":104565,"产ä¸ļåıijå±ķ":104566,"巨大çļĦ":104567,"工人":104568,"çĶŁéķ¿":104569,"éĥ½åı¯ä»¥":104570,"çļĦæľºä¼ļ":104571,"èµĦè´¨":104572,"çĹĽèĭ¦":104573,"ç²īä¸Ŀ":104574,"å¢ĵ":104575,"å¹³å®ī":104576,"管éģĵ":104577,"è·ŁçĿĢ":104578,"é¥®é£Ł":104579,"åķĨå®¶":104580,"å¤ļå®¶":104581,"åı¸æľº":104582,"åºĶ该æĺ¯":104583,"éĢıéľ²":104584,"认å®ļ":104585,"è¡Įä¸ļçļĦ":104586,"çļĦä¼ģä¸ļ":104587,"æ¯ıä¸Ģ":104588,"èĮĥåĽ´åĨħ":104589,"è¾ĥ大":104590,"è´¤":104591,"å¤§èµĽ":104592,"å¤ļäºĨ":104593,"鸿":104594,"临åºĬ":104595,"åľ¨è¿Ļ个":104596,"çļĦåĨħ容":104597,"éĶĢéĩı":104598,"å¾Īå°ij":104599,"åŃŁ":104600,"ç»´æĮģ":104601,"åĴĸåķ¡":104602,"æľ¬åľ°":104603,"èī²å½©":104604,"å¹¶éĿŀ":104605,"èĢĮå·²":104606,"温æļĸ":104607,"èIJ§":104608,"æĬĵä½ı":104609,"èĢĮä¸įæĺ¯":104610,"åĸĬ":104611,"çļĦåħ³ç³»":104612,"çī©åĵģ":104613,"éĤ£æĺ¯":104614,"åĨľäº§åĵģ":104615,"è¿ĻæĹ¶":104616,"å©ļå§»":104617,"æ°´æŀľ":104618,"æĶ¶èİ·":104619,"ä»ĺåĩº":104620,"客æĪ·ç«¯":104621,"æ¼Ķåĩº":104622,"åħ¨æĸ°":104623,"è¿Ļä¹Łæĺ¯":104624,"æĺ¯çͱ":104625,"è§Ĥ念":104626,"æľī个":104627,"éĢłåŀĭ":104628,"èĥľåĪ©":104629,"ä¸īæĺ¯":104630,"è¶ħå¸Ĥ":104631,"åħļå»ºå·¥ä½ľ":104632,"æĶ¾å¿ĥ":104633,"线路":104634,"æĭĽçĶŁ":104635,"åIJĥé¥Ń":104636,"è½ī":104637,"å°½éĩı":104638,"è§ģåΰ":104639,"åIJĮæ¯Ķå¢ŀéķ¿":104640,"åįİ为":104641,"æĪijå¸Ĥ":104642,"æıIJåĩºäºĨ":104643,"æ°ijèѦ":104644,"åįļçī©":104645,"åįļçī©é¦Ĩ":104646,"è¯ļä¿¡":104647,"åīįéĿ¢":104648,"山西":104649,"è¾ħåĬ©":104650,"转移":104651,"æĽ´ä¸º":104652,"丰å¯ĮçļĦ":104653,"åį¢":104654,"å¿«éĢĴ":104655,"æĺ¾èijĹ":104656,"çī©èµĦ":104657,"åĪ°è¾¾":104658,"æľīåĪ©äºİ":104659,"åijĨ":104660,"åŃ©åŃIJçļĦ":104661,"ä¸įä½Ĩ":104662,"çłĶç©¶éĻ¢":104663,"çͳæĬ¥":104664,"æļ¨":104665,"æ°ijéĹ´":104666,"åį»":104667,"çļĦå£°éŁ³":104668,"å¸ĤåľºçļĦ":104669,"ä¸Ģåı¥":104670,"çľģ级":104671,"æĿ¥çļĦ":104672,"åĵªä¸ª":104673,"æīįä¼ļ":104674,"åĪĨéħį":104675,"èĶ¡":104676,"ä»ĸåľ¨":104677,"åħ±æľī":104678,"å¡ĺ":104679,"èĴĤ":104680,"éľį":104681,"åıĤè§Ĥ":104682,"ä¸Ī夫":104683,"ä¾ĿéĿł":104684,"æľīæĹ¶":104685,"äºĨå¾Īå¤ļ":104686,"ä¸ĸçķĮæĿ¯":104687,"å®¶æĹı":104688,"ä¸įéľĢè¦ģ":104689,"大å¸Ī":104690,"èŀįåħ¥":104691,"éĿŀæ³ķ":104692,"çĹħ人":104693,"åIJİæľŁ":104694,"大家éĥ½":104695,"ç½ijåĿĢ":104696,"åİŁæĸĻ":104697,"ä¾¿å®ľ":104698,"æ¶Ľ":104699,"ä»¿ä½Ľ":104700,"å·®è·Ŀ":104701,"åı¦ä¸Ģæĸ¹éĿ¢":104702,"产åĵģçļĦ":104703,"赫":104704,"æĥħåĨµä¸ĭ":104705,"éĴ¢éĵģ":104706,"æľ¬ç«Ļ":104707,"纳åħ¥":104708,"å·²æľī":104709,"æľī没æľī":104710,"估计":104711,"é£ĺ":104712,"æľŁè´§":104713,"åĢĭ人è³ĩæĸĻ":104714,"ä¸ĵä¸ļçļĦ":104715,"çĪĨåıij":104716,"èĩ´åĬĽäºİ":104717,"çİ°åľ¨çļĦ":104718,"æľīåĵªäºĽ":104719,"çł´åĿı":104720,"æķ°åŃĹåĮĸ":104721,"åľ°éĿ¢":104722,"é»ijèī²":104723,"å¹¼åĦ¿åĽŃ":104724,"çļĦç²¾ç¥ŀ":104725,"äºŃ":104726,"导æ¼Ķ":104727,"çݰæľī":104728,"æŃ¦åύ":104729,"èĭıå·ŀ":104730,"çİĦ":104731,"æ±Łè¥¿":104732,"延伸":104733,"论æĸĩ":104734,"è¾ĥ为":104735,"çİ©æ³ķ":104736,"é¼İ":104737,"åIJĮæŃ¥":104738,"éĩĬæĶ¾":104739,"æĽĿåħī":104740,"åĿļåĨ³":104741,"å§Ķæīĺ":104742,"å°Ĩåľ¨":104743,"äºĪ以":104744,"ä½ľæĸĩ":104745,"èĢĮåľ¨":104746,"ä¼ĺåħĪ":104747,"åĽŀåİ»":104748,"ä¿®å¤į":104749,"åĽ½åĨħå¤ĸ":104750,"çŃĸåĪĴ":104751,"åıijæĶ¾":104752,"å¿ĥæĥħ":104753,"çļĦåİĨåı²":104754,"éĿ¢è¯ķ":104755,"举åĮĹ":104756,"ä¿¡åı·":104757,"ç²®é£Ł":104758,"è¯ģ书":104759,"æŁIJäºĽ":104760,"è¿IJä½ľ":104761,"åĨ²åĩ»":104762,"çĥŃçĤ¹":104763,"æĹ¶æĹ¶":104764,"æĹ¶æĹ¶å½©":104765,"åľ°çĤ¹":104766,"ä¸Ģä½ĵåĮĸ":104767,"éļ¾é¢ĺ":104768,"æĽ°":104769,"ç«ĭåĪ»":104770,"æĺ¯éĿŀ常":104771,"åħ±åĴĮ":104772,"åħ±åĴĮåĽ½":104773,"æ¿ĢåĬ±":104774,"æľīæķĪçļĦ":104775,"å¤Ħç½®":104776,"该åħ¬åı¸":104777,"æ£ĢéªĮ":104778,"èѦæĸ¹":104779,"è´¾":104780,"äºĨä¸Ģä¸ĭ":104781,"ä»ĬåIJİ":104782,"çħ®":104783,"ç͍åĵģ":104784,"读èĢħ":104785,"æĪijåľ¨":104786,"åĽŀå¤į":104787,"ä¸Ģ座":104788,"è¿ĺ没":104789,"å®ļåζ":104790,"没æĥ³åΰ":104791,"夹":104792,"ä¼łéĢĴ":104793,"ä¸Ģ款":104794,"强大çļĦ":104795,"çļĦè¡Į为":104796,"å¤ı天":104797,"åıijåĬ¨æľº":104798,"é¢ĨåŁŁçļĦ":104799,"å®ŀéªĮ室":104800,"ä¸ĢæĬĬ":104801,"æĺ¯ä¸ºäºĨ":104802,"éĻķ西":104803,"æĭħä¿Ŀ":104804,"è¾¾æĪIJ":104805,"è¦ģæĺ¯":104806,"æĺİ天":104807,"ç»Ļä»ĸ":104808,"建ç«ĭäºĨ":104809,"ä¸įè¡Į":104810,"ä¸Ńæĸĩ":104811,"åľ°è¯´":104812,"åIJİçļĦ":104813,"çĽijæİ§":104814,"é̏":104815,"æĢ»éĥ¨":104816,"æľ¬æĸĩ":104817,"鹿":104818,"æĻ¯è§Ĥ":104819,"çļĦ缮æłĩ":104820,"èĽĩ":104821,"åĨ¯":104822,"ä¸ŃåĮ»":104823,"æķĪåºĶ":104824,"产éĩı":104825,"åŃĿ":104826,"è´¦æĪ·":104827,"è¿Ŀåıį":104828,"èij£äºĭä¼ļ":104829,"äº¬ä¸ľ":104830,"责任ç¼ĸè¾ij":104831,"åķıé¡Į":104832,"çαå¿ĥ":104833,"èŃ¦å¯Ł":104834,"é¤IJåİħ":104835,"å¸ĤæĶ¿åºľ":104836,"天天":104837,"æĸ°é²ľ":104838,"éĥijå·ŀ":104839,"è¶ħè¶Ĭ":104840,"å½Ń":104841,"çŁ¥è¯Ĩ产æĿĥ":104842,"åĽŀå¿Ĩ":104843,"路线":104844,"å»īæ´ģ":104845,"éĿĴå°ijå¹´":104846,"åıĸå¾ĹäºĨ":104847,"çľĭåΰäºĨ":104848,"馬":104849,"ç²¾åĵģ":104850,"åľ°éĵģ":104851,"æĮģæľī":104852,"ä¸ĭäºĨ":104853,"æľīæĹ¶åĢĻ":104854,"ä¸Ģ人":104855,"æĴĴ":104856,"ä»Ķç»Ĩ":104857,"èĢģåħ¬":104858,"äºĭå®ŀä¸Ĭ":104859,"èģĶèµĽ":104860,"ä¾ĽåºĶéĵ¾":104861,"é¢Ħç®Ĺ":104862,"åζéĢłä¸ļ":104863,"å®īåħ¨çĶŁäº§":104864,"俱ä¹IJ":104865,"俱ä¹IJéĥ¨":104866,"çļĦæł¸å¿ĥ":104867,"æīĵç®Ĺ":104868,"å½±çīĩ":104869,"æIJŃ建":104870,"ä¹Łä¸įä¼ļ":104871,"æĭħå½ĵ":104872,"å±ĤéĿ¢":104873,"åѦåijĺ":104874,"临æĹ¶":104875,"缸ç»ĵåIJĪ":104876,"对æ¯Ķ":104877,"ä»ĸæĺ¯":104878,"æĸ°åĮº":104879,"è¿Ľåİ»":104880,"çϾ年":104881,"ä¿©":104882,"尽快":104883,"ç͵åŃIJåķĨåĬ¡":104884,"æĽ´æľī":104885,"æ¸ħçIJĨ":104886,"åı¦ä¸Ģ个":104887,"åĤ»":104888,"ä»Ģä¹Īæł·çļĦ":104889,"æĺ¯æľĢ":104890,"åij¨å¹´":104891,"å¾Ī容æĺĵ":104892,"åĽ¢ç»ĵ":104893,"ç´Ħ":104894,"æĹ©å·²":104895,"çļĦåıĺåĮĸ":104896,"éľŀ":104897,"æĹ¥ä¸ĬåįĪ":104898,"失åİ»":104899,"ä¸Ńåľĭ":104900,"çļĦä¸ĢäºĽ":104901,"å°ıåŃ©":104902,"ä¸ĭè·Į":104903,"éĶ»çĤ¼":104904,"éij":104905,"éij«":104906,"å¿ĹæĦ¿èĢħ":104907,"èĤ¡å¸Ĥ":104908,"èµĽäºĭ":104909,"许åı¯è¯ģ":104910,"åı¯æĮģç»Ń":104911,"åijĬè¯īè®°èĢħ":104912,"éĢ»è¾ij":104913,"å¼ķåħ¥":104914,"çļĦè¿ĩç¨ĭä¸Ń":104915,"è§Ĩè§ī":104916,"èĩªæ²»åĮº":104917,"è¯ģæį®":104918,"è£ħç½®":104919,"第ä¸īæĸ¹":104920,"å¹´æĿ¥":104921,"å¹¿ä¸ľçľģ":104922,"带æĿ¥äºĨ":104923,"éķ¿æ±Ł":104924,"访éĹ®":104925,"å·®ä¸įå¤ļ":104926,"æĺ¯æĪij":104927,"éģŃéģĩ":104928,"æĬĵ好":104929,"é«ĺè¾¾":104930,"å¹¶åľ¨":104931,"èĩªè§ī":104932,"ä¾ĽåºĶåķĨ":104933,"æĥħæĦŁ":104934,"ä½ıäºĨ":104935,"çļĦèģĮä¸ļ":104936,"çļĩå¸Ŀ":104937,"西éĥ¨":104938,"åĴĮå¹³":104939,"çļĦåĬĽéĩı":104940,"汪":104941,"åħħåĪĨåıijæĮ¥":104942,"æĬķè¯ī":104943,"èµ·åΰ":104944,"äºĴ缸":104945,"æ¾³éŨ":104946,"æİ¥åΰ":104947,"æ°´æ³¥":104948,"模åŀĭ":104949,"ä¸ĢåįĬ":104950,"ç§©åºı":104951,"æĪijä»¬åľ¨":104952,"æī¿è®¤":104953,"ä¸Ģéĥ¨åĪĨ":104954,"åįłæ¯Ķ":104955,"å¦ĩ女":104956,"ç²ĺ":104957,"äºĨè§£åΰ":104958,"ä¸Ģå®ļä¼ļ":104959,"åIJĦ大":104960,"èµ°åĩº":104961,"为大家":104962,"é«ĺéĵģ":104963,"åı¯ä»¥åľ¨":104964,"ä½Ĩåľ¨":104965,"çĶŁæĢģçݯå¢ĥ":104966,"èı¯":104967,"çļĦä»·æł¼":104968,"麻çĥ¦":104969,"æ¿Ģåıij":104970,"éĤ£å°±":104971,"çļĦæł·åŃIJ":104972,"为æŃ¤":104973,"å¤©åľ°":104974,"çļĦ缮çļĦ":104975,"åĢºåΏ":104976,"å·²ç¶ĵ":104977,"åĽĽå¤§":104978,"åIJĮæĹ¶ä¹Ł":104979,"å½¼æŃ¤":104980,"æĭ¿åΰ":104981,"åIJ«éĩı":104982,"åįģ大":104983,"éļ¾éģĵ":104984,"å¼Ĺ":104985,"ä¸Ģ段æĹ¶éĹ´":104986,"çħ§é¡¾":104987,"æķ°æį®æĺ¾ç¤º":104988,"æĪIJ为äºĨ":104989,"èµ°åΰ":104990,"æľ¬åħ¬åı¸":104991,"ç»Ī端":104992,"ä¹Łä¸įæĺ¯":104993,"头åıij":104994,"大约":104995,"é£İæĻ¯":104996,"æ¶ĪèĢĹ":104997,"å®¡æŁ¥":104998,"äºīåıĸ":104999,"æ³ķæ²»":105000,"äºĭçī©":105001,"ç¼ĵè§£":105002,"æĥ¨":105003,"缸åºĶçļĦ":105004,"çļĦæķĪæŀľ":105005,"åıįå¤į":105006,"åıijçĶŁäºĨ":105007,"éĢĻäºĽ":105008,"ç»ĥä¹ł":105009,"åݨæĪ¿":105010,"å¼Ģæĭĵ":105011,"欣èµı":105012,"夫妻":105013,"ä¸įä¸Ģæł·":105014,"产èĥ½":105015,"èĬ¯çīĩ":105016,"è¦ģç´ł":105017,"åıį对":105018,"çİĩåħĪ":105019,"è´§çī©":105020,"æĹ¥ç͵":105021,"ä½ľå®¶":105022,"æĶ¹è¿Ľ":105023,"æĪIJåĪĨ":105024,"åĽłèĢĮ":105025,"åĩıèĤ¥":105026,"æ½ĺ":105027,"å±±ä¸ľçľģ":105028,"åĬĿ":105029,"åŁĭ":105030,"æŃ¦è£ħ":105031,"æ±ĩæĬ¥":105032,"ä¸Ģ个æľĪ":105033,"çĥŃéŨ":105034,"大éģĵ":105035,"æ´»åĭķ":105036,"éĥ½å¾Ī":105037,"çĶµæ¢¯":105038,"ç´§æĢ¥":105039,"åĢºåĬ¡":105040,"客æľį":105041,"ä¸Ģéĥ¨":105042,"ä½łæĺ¯":105043,"çݰçĬ¶":105044,"æŃ£ç¡®çļĦ":105045,"ä¹ĭå¤Ħ":105046,"ç¼ĸåζ":105047,"ä½łåı¯ä»¥":105048,"çŃīåľ°":105049,"èİī":105050,"对è¯Ŀ":105051,"æ·ĺå®Ŀ":105052,"è°ĥèĬĤ":105053,"æİĴæĶ¾":105054,"åºĵåŃĺ":105055,"ç´ļ":105056,"çļĦä¼ĺåĬ¿":105057,"æĿĥå¨ģ":105058,"以ä¸ĭç®Ģç§°":105059,"ä¸Ģ项":105060,"èģļéĽĨ":105061,"ä¼łç»ŁçļĦ":105062,"æ··åIJĪ":105063,"è¿Ļä¸ĢçĤ¹":105064,"ä¸Ģçľ¼":105065,"æĹłéĻIJ":105066,"èİ·å¾ĹäºĨ":105067,"éĢīæīĭ":105068,"åζåĵģ":105069,"åįıä½ľ":105070,"çĭ¬çī¹çļĦ":105071,"ä¸Ģ级":105072,"è¿Ļ个éĹ®é¢ĺ":105073,"æĸĮ":105074,"æĺ¯æĪij们":105075,"æķĮ人":105076,"æ¸ħæ´Ĺ":105077,"ä¸ĢçĽ´åľ¨":105078,"å°ıç±³":105079,"çļĦè¿ĩç¨ĭ":105080,"åľ¨åĮĹ京":105081,"ä¸ĢæĶ¯":105082,"æĹ©ä¸Ĭ":105083,"æĸĩèīº":105084,"ç¦ıåĪ©":105085,"é£Łç͍":105086,"æĦŁåĬ¨":105087,"åħ¨ç¨ĭ":105088,"æĶ¯åĩº":105089,"æĸ°å»º":105090,"å¸ķ":105091,"æĺ¾çĦ¶":105092,"羣çļĦæĺ¯":105093,"æĸ°éĹ»ç½ij":105094,"èĥ½åIJ¦":105095,"åįıåĬ©":105096,"亲èĩª":105097,"å¾Īæľī":105098,"çϼå±ķ":105099,"æĦı大":105100,"æĦı大åĪ©":105101,"ç͵ç½ij":105102,"æĹ¥çĽĬ":105103,"çĨ±":105104,"èĤĮèĤ¤":105105,"çĶ·æĢ§":105106,"ç»Ħ建":105107,"çŃīéĹ®é¢ĺ":105108,"æ¶ĪéϤ":105109,"æĬ¤çIJĨ":105110,"å¡ijæĸĻ":105111,"ä¹Įåħĭ":105112,"ä¹Įåħĭåħ°":105113,"åķĨæłĩ":105114,"çIJ³":105115,"æĸ°æīĭ":105116,"çļĦçī¹çĤ¹":105117,"åĴ¬":105118,"å½ĵä¸ĭ":105119,"设计å¸Ī":105120,"èµĶåģ¿":105121,"第åįģ":105122,"æĻºèĥ½åĮĸ":105123,"å¼ĢåıijåĮº":105124,"åı¯ä»¥éĢļè¿ĩ":105125,"åħ±äº§åħļ":105126,"åİī害":105127,"ç쵿´»":105128,"æĹ¶åħī":105129,"éĥ¨ä½į":105130,"人æĸĩ":105131,"è¿ĽæĿ¥":105132,"ä¹ĭæīĢ以":105133,"ä¸īåįģ":105134,"çļĦåѦçĶŁ":105135,"éĺ²æĬ¤":105136,"åĽ½äº§":105137,"æ·±åľ³å¸Ĥ":105138,"éĤ£å°±æĺ¯":105139,"åΰä½į":105140,"çľĹ":105141,"çľĹæĻ®":105142,"å®ŀæĹ¶":105143,"åı°çģ£":105144,"èĢĮä¸į":105145,"æĮĩå®ļ":105146,"åĿĿ":105147,"èħIJè´¥":105148,"çī¹å®ļ":105149,"å¢ŀéĢŁ":105150,"æłĩçѾ":105151,"æĪ¿ä»·":105152,"æĦģ":105153,"贯彻èIJ½å®ŀ":105154,"æĢ§è´¨":105155,"çłĶç©¶çĶŁ":105156,"ç¾İ容":105157,"æī¹è¯Ħ":105158,"究竣":105159,"人åĬĽèµĦæºIJ":105160,"éĸĭå§ĭ":105161,"åĽŀå½Ĵ":105162,"èIJ¥åķĨ":105163,"èIJ¥åķĨçݯå¢ĥ":105164,"ä¸ŃåĽ½äºº":105165,"çļĦåŁºæľ¬":105166,"è¯Ŀé¢ĺ":105167,"æłĩåĩĨåĮĸ":105168,"西èĹı":105169,"åĭ¾":105170,"çļĦ设计":105171,"ç®ĢåįķçļĦ":105172,"å¤įåζ":105173,"æ¸IJæ¸IJ":105174,"以å¤ĸ":105175,"èģĶåĬ¨":105176,"两次":105177,"æĢ§åĴĮ":105178,"æĽ´å¤§":105179,"çļĦåIJįåŃĹ":105180,"飦":105181,"ä½łè¦ģ":105182,"å¢ĥå¤ĸ":105183,"æĹ©æľŁ":105184,"åĪĿæŃ¥":105185,"è´¦åı·":105186,"害æĢķ":105187,"æĺ¨æĹ¥":105188,"åĪļæīį":105189,"ç¥ŀç§ĺ":105190,"ç²¾å¿ĥ":105191,"æµģéĢļ":105192,"åħ¨æĸ¹ä½į":105193,"以å¾Ģ":105194,"ä¹Łå°Ĩ":105195,"æĺ¯ä¸ŃåĽ½":105196,"åĽ½å®¶çº§":105197,"å°ĨåĨĽ":105198,"æijĬ":105199,"æľĢ为":105200,"第ä¸ĢæĹ¶éĹ´":105201,"æ¶Īæ¯Ĵ":105202,"å°Ĩäºİ":105203,"å¨ģèĥģ":105204,"èĭ±æĸĩ":105205,"æīĭä¸Ń":105206,"çIJĥè¿·":105207,"è§Ĥçľĭ":105208,"离å©ļ":105209,"æľ¬åľŁ":105210,"åĪĨæķ£":105211,"æĻ´":105212,"è¦ģ注æĦı":105213,"浪费":105214,"管æİ§":105215,"åĩºåĶ®":105216,"æĢ»è£ģ":105217,"ä¸Ģéĺµ":105218,"å¨ĩ":105219,"äºĶ个":105220,"å½ĵåĪĿ":105221,"çºłçº·":105222,"ä¸ĵç͍":105223,"å¤ĩæ¡Ī":105224,"åĪĿæľŁ":105225,"å®ĥæĺ¯":105226,"åĮºåĿĹ":105227,"åĮºåĿĹéĵ¾":105228,"大è¿ŀ":105229,"è¿Ļç±»":105230,"åıĺæĪIJäºĨ":105231,"éĤĦæĺ¯":105232,"åįļ客":105233,"çı¾åľ¨":105234,"ä¸Ģæĸ¹":105235,"å®ĮæĪIJäºĨ":105236,"è¿Ļ个æĹ¶åĢĻ":105237,"åħ¨å¹´":105238,"ä¸Ĭ线":105239,"ç½IJ":105240,"ç«ŀèµĽ":105241,"åĩºçīĪ社":105242,"åĵ¥åĵ¥":105243,"寫":105244,"å¾Ĺ以":105245,"èĬ±åĽŃ":105246,"äºĨèµ·æĿ¥":105247,"èĦ±è´«æĶ»åĿļ":105248,"çļĦåİŁåĪĻ":105249,"讲解":105250,"æ¶ĪåĮĸ":105251,"æįŁå®³":105252,"æļĤæĹ¶":105253,"å¾ĹçŁ¥":105254,"éĢĤç͍":105255,"éŨåºĹ":105256,"解读":105257,"æĻ®åıĬ":105258,"人æ°ijæ³ķéĻ¢":105259,"åī¯ä¸»ä»»":105260,"å¿ĥçģµ":105261,"è¯ĬæĸŃ":105262,"ç¾İ女":105263,"æŁ¯":105264,"年以æĿ¥":105265,"æ´»è·ĥ":105266,"åĢŁåĬ©":105267,"åħ±å»º":105268,"è¯ī讼":105269,"æĶ¾æĿ¾":105270,"çªĹåı£":105271,"ä¼ģæ¥Ń":105272,"åĬłæĭ¿":105273,"åĬłæĭ¿å¤§":105274,"ä¹°äºĨ":105275,"主æµģ":105276,"æĩĤå¾Ĺ":105277,"å°Ĩåħ¶":105278,"éĢıæĺİ":105279,"å·¥ä½ľä¸Ń":105280,"èĤ¡ä»·":105281,"æ¡£æ¡Ī":105282,"没æľīä»»ä½ķ":105283,"åijĬçŁ¥":105284,"å¹´åĪĿ":105285,"æĹ¥ä¸ĭåįĪ":105286,"åİĤåķĨ":105287,"èĬĤå¥ı":105288,"主导":105289,"è£Ŀ":105290,"åħ³éĶ®è¯į":105291,"èģĬ天":105292,"åĨĻä½ľ":105293,"æĶ¹éĿ©å¼ĢæĶ¾":105294,"æľīæľĽ":105295,"éĢļæĬ¥":105296,"èIJĮ":105297,"æĢ»é¢Ŀ":105298,"çŁŃæľŁ":105299,"ä¸Ģçķª":105300,"çĶŁæ´»çļĦ":105301,"åĮĸçļĦ":105302,"æĺ¥å¤©":105303,"è¿Ļåľº":105304,"æĸ°å¼Ģä¼łå¥ĩ":105305,"æĺ¯è¦ģ":105306,"å°ļæľª":105307,"åıĺæĽ´":105308,"ä¸Ģåij¨":105309,"客è§Ĥ":105310,"æĹ¥èĩ³":105311,"é¹°":105312,"çݲ":105313,"å°ĨæĿ¥":105314,"客人":105315,"åıĺéĿ©":105316,"说äºĨ":105317,"åİŁçIJĨ":105318,"èģĮåĬ¡":105319,"åıĪæľī":105320,"ä¸Ģåı¥è¯Ŀ":105321,"æĦŁåıĹåΰ":105322,"ç¬ĶèĢħ":105323,"ç§»æ°ij":105324,"西åįĹ":105325,"ä¹ĥèĩ³":105326,"æŃ£è§Ħ":105327,"åĪĿä¸Ń":105328,"çĬ¬":105329,"å½ĵäºĭ":105330,"å½ĵäºĭ人":105331,"æĪij们è¦ģ":105332,"åħ¥åı£":105333,"éĤ£æĹ¶":105334,"æľīéĻIJ责任":105335,"å°ij女":105336,"è¿Ļä¹Īå¤ļ":105337,"åĪĨåħ¬åı¸":105338,"å®ĩå®Ļ":105339,"çļĦéĢīæĭ©":105340,"å§IJå§IJ":105341,"åıijèµ·":105342,"è»į":105343,"æĽ´å¥½åľ°":105344,"éĻĨç»Ń":105345,"æľ¬æľįåĭĻ":105346,"å«©":105347,"èµ¶ç´§":105348,"èĦĤèĤª":105349,"第äºĮ天":105350,"æĪijä¼ļ":105351,"两ä½į":105352,"æķ²":105353,"åħ¬å®īæľºåħ³":105354,"ç§ijæĬĢåĪĽæĸ°":105355,"尺寸":105356,"è¾IJå°Ħ":105357,"å®ĹæķĻ":105358,"转æį¢":105359,"åĩºçİ°åľ¨":105360,"ä¸Ģé¢Ĺ":105361,"æľŁéĻIJ":105362,"åIJĮåѦ们":105363,"åĮĹæĸ¹":105364,"ä½łå°±":105365,"ä¸Ģ带ä¸Ģè·¯":105366,"èĢģå©Ĩ":105367,"游æĪıçݩ家":105368,"çļĦç»ĵæŀľ":105369,"è¡¥åģ¿":105370,"å¤ĸè´¸":105371,"对å¾ħ":105372,"ç»´çĶŁç´ł":105373,"ç»ıéĶĢåķĨ":105374,"è¿ĺå°Ĩ":105375,"åŃIJ女":105376,"æĽ´é«ĺ":105377,"ä¸į大":105378,"éī´å®ļ":105379,"让ä»ĸ们":105380,"æīĢè°ĵçļĦ":105381,"æŃ»äºĨ":105382,"帮æī¶":105383,"åĵ²åѦ":105384,"以ä¸ĬçļĦ":105385,"çļĦåħ³éĶ®":105386,"æĹ©å°±":105387,"æĬ¥ä»·":105388,"éģµå®Ī":105389,"æī©å¼ł":105390,"æĺ¯å¾Ī":105391,"å¼ĢéĢļ":105392,"æĸ°åĬł":105393,"æĸ°åĬłåĿ¡":105394,"ç¿»è¯ij":105395,"询éĹ®":105396,"é¸Ń":105397,"ä½ĵåĨħ":105398,"两个人":105399,"çι":105400,"éľľ":105401,"乡æĿijæĮ¯åħ´":105402,"çĿ¡è§ī":105403,"å®ĺåijĺ":105404,"åĪĽå§ĭ":105405,"åĪĽå§ĭ人":105406,"ä¼Ĺ人":105407,"åį³ä¾¿":105408,"çĸ«èĭĹ":105409,"ä¼ģä¸ļå®¶":105410,"渣":105411,"ç²¾åĬĽ":105412,"å¤ĸéĥ¨":105413,"èģªæĺİ":105414,"è¿Ļä¹Ł":105415,"å½ķåıĸ":105416,"åĨ²çªģ":105417,"åħ¨èº«":105418,"åŃ£èĬĤ":105419,"忽çĦ¶":105420,"çļĦæĢģ度":105421,"åĤ¨å¤ĩ":105422,"ä¿Ŀåħ»":105423,"çļĦæĥ³æ³ķ":105424,"ä¸Ĭæµ·å¸Ĥ":105425,"æIJºæīĭ":105426,"çļĦä¿¡æģ¯":105427,"åķĨåľº":105428,"çļĦæĢĿæĥ³":105429,"æĿĥåĬĽ":105430,"毫æĹł":105431,"æĢĢåŃķ":105432,"硬件":105433,"åĨħèĴĻåı¤":105434,"æİ¢è®¨":105435,"åħ»çĶŁ":105436,"çļĦ表çݰ":105437,"空ä¸Ń":105438,"æģIJæĢĸ":105439,"å¾Īé«ĺ":105440,"ç»ıæµİ社ä¼ļ":105441,"ä¸ĬæĿ¥":105442,"å»¶ç»Ń":105443,"éĩįå¤į":105444,"éĺ²èĮĥ":105445,"çļĦå½¢å¼ı":105446,"æľĪåºķ":105447,"èĢģ年人":105448,"绿åĮĸ":105449,"å±±åĮº":105450,"æĭ¿åĩº":105451,"æĹħ客":105452,"æĽ´æį¢":105453,"åħ¬ä¸»":105454,"èĬĤ约":105455,"åħ¨åİ¿":105456,"åĽŀæĬ¥":105457,"çIJĨæĢ§":105458,"çĸ¯çĭĤ":105459,"æ¶īå«Į":105460,"åī§æĥħ":105461,"åĨ¬åŃ£":105462,"åIJİç»Ń":105463,"è¿Ļæĺ¯ä¸Ģ个":105464,"æ¼Ķ讲":105465,"ä¸Ģå±Ĥ":105466,"æľīåħ³éĥ¨éŨ":105467,"æĹłå¥Ī":105468,"ç§įç±»":105469,"缸åħ³çļĦ":105470,"æĪĸèĢħæĺ¯":105471,"æī¶æĮģ":105472,"å¤ļæķ°":105473,"çļĦä½ľåĵģ":105474,"ä¸ĭä¸ĢæŃ¥":105475,"å¸ĪåĤħ":105476,"é«ĺéĢŁåħ¬è·¯":105477,"好åıĭ":105478,"ä¼ĺç§ĢçļĦ":105479,"è¿ĽäºĨ":105480,"æģIJæĢķ":105481,"äºĨåIJ§":105482,"大è§Ħ模":105483,"çļĦä¸ĸçķĮ":105484,"æĢĢçĸij":105485,"å··":105486,"åħ´å¥ĭ":105487,"æĪ°":105488,"æĿijéĩĮ":105489,"æľĭåıĭåľĪ":105490,"åĨ¬å¤©":105491,"ä¸Ńåįİ人æ°ij":105492,"åįıåķĨ":105493,"è¯ĦéĢī":105494,"æĹŃ":105495,"å¢ŀåĬłäºĨ":105496,"åıĹ伤":105497,"ä¸ĢèĤ¡":105498,"便æį·":105499,"ä¸ij":105500,"鹤":105501,"å¤ĸè§Ĥ":105502,"å·¥ç¨ĭå¸Ī":105503,"åĴĮåħ¶ä»ĸ":105504,"è¿Ļå°±":105505,"ä¸Ńå°ıä¼ģä¸ļ":105506,"西åĮĹ":105507,"åĽ½æľīä¼ģä¸ļ":105508,"èĭ¥æĺ¯":105509,"åı¯æĥľ":105510,"çĶŁæĹ¥":105511,"åĩ½":105512,"ä¹°åįĸ":105513,"ç¥Ŀç¦ı":105514,"人æ°ij群ä¼Ĺ":105515,"åħīæĺİ":105516,"åħ¬å¯ĵ":105517,"æĺ¯è°ģ":105518,"æĪijçŁ¥éģĵ":105519,"è¯Ńæĸĩ":105520,"æķıæĦŁ":105521,"ä¸įéĶĻçļĦ":105522,"æĿ¥è®²":105523,"æ³¢åĬ¨":105524,"çļĦ第ä¸Ģ":105525,"åľ°éľĩ":105526,"åľ¨åħ¨åĽ½":105527,"骨干":105528,"å®īç½®":105529,"å®¶ç͵":105530,"ä¸İæŃ¤":105531,"ä¸İæŃ¤åIJĮæĹ¶":105532,"åıĹçģ¾":105533,"çĥŃ线":105534,"çļĦæĬĢæľ¯":105535,"æµĭéĩı":105536,"ä¾Ŀèµĸ":105537,"ä¸ŃåĽ½çļĦ":105538,"ç̧":105539,"è¾ĥé«ĺ":105540,"踩":105541,"ä¼ļåľ¨":105542,"建éĢł":105543,"导èĪª":105544,"æĥ³èµ·":105545,"åħ¨ä¸ĸçķĮ":105546,"建æĿIJ":105547,"ç¯Ģ":105548,"çļĦåŁºç¡Ģ":105549,"èĩªåĬ¨åĮĸ":105550,"åīįåIJİ":105551,"çĿ¡çľł":105552,"æİ¨è¡Į":105553,"æį®äºĨè§£":105554,"ä»Ģä¹ĪæĹ¶åĢĻ":105555,"ä¸įåĸľæ¬¢":105556,"çħ¤çĤŃ":105557,"éĤ£ä¹Īå¤ļ":105558,"å¸ĤåľºåĮĸ":105559,"ä¸į管æĺ¯":105560,"ç«ĭåľº":105561,"éĥ½æ²¡":105562,"课é¢ĺ":105563,"æĪij们å°Ĩ":105564,"è¿ĩçļĦ":105565,"åĨįåĬłä¸Ĭ":105566,"çξ":105567,"身æĿIJ":105568,"çͷ女":105569,"è¿ľè¿ľ":105570,"çĶ·çĶŁ":105571,"èĩªèº«çļĦ":105572,"è´Łæĭħ":105573,"çϾä¸ĩ":105574,"西çıŃ":105575,"西çıŃçīĻ":105576,"åĩĢåĪ©æ¶¦":105577,"澳大":105578,"澳大åĪ©äºļ":105579,"ä¸įåİ»":105580,"æī¿åıĹ":105581,"楼çĽĺ":105582,"å¢ĥåĨħ":105583,"æ··åĩĿ":105584,"æ··åĩĿåľŁ":105585,"æĢĿæĥ³æĶ¿æ²»":105586,"å¸ĤåĮº":105587,"æĭĽæłĩ":105588,"åĽ¢ä½ĵ":105589,"è¿Ľåº¦":105590,"åĨĽéĺŁ":105591,"åıįå¼¹":105592,"äºĨä¸ĢäºĽ":105593,"æİ¥å¾ħ":105594,"çļĦåŃ¦ä¹ł":105595,"éħįéĢģ":105596,"é£Łåĵģå®īåħ¨":105597,"æĽ¿ä»£":105598,"æĺ¯ä»¥":105599,"éĢļç͍":105600,"çłĶç©¶æīĢ":105601,"ç¦ħ":105602,"æīĶ":105603,"éļĶ离":105604,"ä¸ĩå¹³æĸ¹ç±³":105605,"çļĦè§Ħå®ļ":105606,"ç»ĻæĪij们":105607,"æ¿Ģåħī":105608,"ä¼ļåĩºçݰ":105609,"çŁŃä¿¡":105610,"ç©¿çĿĢ":105611,"æ²Īéĺ³":105612,"æķĻæĿIJ":105613,"éĺ²çĸ«":105614,"ä¼ĺèī¯":105615,"约å®ļ":105616,"æĪijçľģ":105617,"åħ¬æ°ij":105618,"é쏿ĵ":105619,"é쏿ĵĩ":105620,"å·²æĪIJ为":105621,"ä¸įå¿ħ":105622,"ç¥ĸåĽ½":105623,"å¹¶æľª":105624,"åľŁå£¤":105625,"å¾®ç¬ij":105626,"äºĭä¸ļåįķä½į":105627,"çļĦ游æĪı":105628,"åħ¬ç¤º":105629,"åIJĪçIJĨçļĦ":105630,"çªĿ":105631,"æ°Ķ象":105632,"å®¶ä¸Ń":105633,"äº®çĽ¸":105634,"å᫿ĺŁ":105635,"è®°è½½":105636,"è§Ĩéĩİ":105637,"åľ°åĮºçļĦ":105638,"ä½Ĩä»ĸ":105639,"èĤĮèĤī":105640,"äºıæįŁ":105641,"åĬŀåѦ":105642,"ä¸Ģè¡Į":105643,"è¯ŀçĶŁ":105644,"åıijå¸ĥçļĦ":105645,"çļĦæľįåĬ¡":105646,"çļĦçłĶç©¶":105647,"åij¨æľ«":105648,"产ä¸ļåĽŃ":105649,"é«ĺ温":105650,"æĪIJåĬŁçļĦ":105651,"æŃ¥éª¤":105652,"åŃĺåĤ¨":105653,"åŃIJåħ¬åı¸":105654,"让她":105655,"ä¸Ńæľī":105656,"åĺī宾":105657,"妮":105658,"æĺİå¹´":105659,"äºĨåIJĹ":105660,"äºīè®®":105661,"æĪĪ":105662,"ä¸Ģæľ¬":105663,"ç¾İ丽çļĦ":105664,"ä½łè¯´":105665,"大人":105666,"æĶ»çķ¥":105667,"ä¸įæľĥ":105668,"å¾ħéģĩ":105669,"ä¸Ģè¾Ĩ":105670,"çīĪæĿĥæīĢæľī":105671,"æ°ijä¼Ĺ":105672,"åĬŁå¤«":105673,"å±ķä¼ļ":105674,"大èĦij":105675,"æ¯ıæľĪ":105676,"å°ı麦":105677,"æµĻæ±Łçľģ":105678,"çļĦæīĢæľī":105679,"ä¸ĭæ»ij":105680,"èĵĿèī²":105681,"è¦ģæĥ³":105682,"åѦçĶŁçļĦ":105683,"å½ĵä½ł":105684,"ä½ľæĪĺ":105685,"家乡":105686,"å¤ļåIJį":105687,"é«ĺäºİ":105688,"åĿļ强":105689,"è¿ŀéĶģ":105690,"åIJİæŀľ":105691,"人äºĭ":105692,"ç´ħ":105693,"æ¿ĢåĬ¨":105694,"è¿ĽæĶ»":105695,"ç©Ĩ":105696,"ä¸ĺ":105697,"让èĩªå·±":105698,"以æŃ¤":105699,"夫人":105700,"å¼Ģ设":105701,"æ°Ķè´¨":105702,"鸡èĽĭ":105703,"çĦ¡æ³ķ":105704,"åIJĥäºĨ":105705,"åĪĨåĪ«ä¸º":105706,"èģĶåIJĪåĽ½":105707,"å½ĵ代":105708,"å¦Ĥæŀľæĺ¯":105709,"è¿ľç¨ĭ":105710,"åĸĤ":105711,"è®°ä½ı":105712,"æ¸ħåįķ":105713,"åIJĪä½ľä¼Ļä¼´":105714,"åİ»åģļ":105715,"æķħéļľ":105716,"模æĭŁ":105717,"å¸ĪçĶŁ":105718,"åīįæĿ¥":105719,"ç͵è§Ĩåī§":105720,"çĥŃçα":105721,"éľ²åĩº":105722,"é«ĺå±Ĥ":105723,"ç͵åύ":105724,"纪å¾ĭ":105725,"å¼ĢåıijåķĨ":105726,"éķ¿å®ī":105727,"è½½ä½ĵ":105728,"çļĦå°±æĺ¯":105729,"被人":105730,"åıĹçIJĨ":105731,"篮çIJĥ":105732,"èİİ":105733,"交ç»Ļ":105734,"æľªæĿ¥çļĦ":105735,"两大":105736,"åIJķå¸ĥ":105737,"çŃī人":105738,"çļĦæĹ¥åŃIJ":105739,"åIJĪä½ľç¤¾":105740,"æĮijéĢī":105741,"åŃĺæ¬¾":105742,"ç³»ç»ŁçļĦ":105743,"æĬĬå®ĥ":105744,"没æľīä»Ģä¹Ī":105745,"ä»İæŃ¤":105746,"ä¸ŃåįĪ":105747,"çĸ¼çĹĽ":105748,"å·©åĽº":105749,"浪漫":105750,"缸åħ³éĥ¨éŨ":105751,"éķ¿åŁİ":105752,"纤维":105753,"ä¸ĬéŨ":105754,"çĪĨçĤ¸":105755,"èµ·çĤ¹":105756,"çļĦéĢļçŁ¥":105757,"èĢĮæĿ¥":105758,"çļĦèĢģ":105759,"æīĭéĩĮ":105760,"è¯ŃéŁ³":105761,"è¾Ľèĭ¦":105762,"æ±Łèĭıçľģ":105763,"ç͍äºĨ":105764,"身份è¯ģ":105765,"æľīåĬ©":105766,"æľīåĬ©äºİ":105767,"çī©èģĶç½ij":105768,"åĩºéŨ":105769,"å¼ŁåŃIJ":105770,"æĥ¹":105771,"è¿Ļä»¶äºĭ":105772,"æĪij们åı¯ä»¥":105773,"çļĦçĶŁåij½":105774,"æľīä¸Ģç§į":105775,"åºĹéĵº":105776,"åıĮæīĭ":105777,"çļĦæ¶Īæģ¯":105778,"èĢIJå¿ĥ":105779,"å°´å°¬":105780,"éĤ£å¤©":105781,"é¦ĸæī¹":105782,"æĺ¯ä¸Ģå®¶":105783,"人æ°Ķ":105784,"åıįæŃ£":105785,"æĪijåĴĮ":105786,"å®łçī©":105787,"ä¸į对":105788,"寻æ±Ĥ":105789,"çĽ¸ä¼¼":105790,"åľ¨ç¾İåĽ½":105791,"åı«åģļ":105792,"åĹİ":105793,"ç«ĭè¶³":105794,"ç͍éĢĶ":105795,"åħĨ":105796,"大æ°Ķ":105797,"åIJijä¸Ĭ":105798,"ä»ĸå°±":105799,"é¡¹çĽ®å»ºè®¾":105800,"èĭ¥å¹²":105801,"æĺ¯æľī":105802,"æ¿Ģæĥħ":105803,"çļĦæĦıä¹ī":105804,"æĺŃ":105805,"严éĩįçļĦ":105806,"å¯ĨéĽĨ":105807,"èĪŀè¹Ī":105808,"èį£èİ·":105809,"èİ·æĤī":105810,"æ±ŁåįĹ":105811,"åģĩå¦Ĥ":105812,"æĪ·å¤ĸ":105813,"线索":105814,"ç§ģ人":105815,"转åŀĭåįĩ级":105816,"çļĦä»·å̼":105817,"åįķçĭ¬":105818,"èĢģçϾå§ĵ":105819,"å°įæĸ¼":105820,"åĽ½éĻħåĮĸ":105821,"ä¼°å̼":105822,"æľįåĬ¡ä¸ļ":105823,"èĩŃ":105824,"æİīäºĨ":105825,"è§£åĨ³äºĨ":105826,"ä¹Łä¸įèĥ½":105827,"åħ¹":105828,"æĸ¯çī¹":105829,"æķħæĦı":105830,"è¿ĩ度":105831,"èĬĤæĹ¥":105832,"çϽçĻľ":105833,"çϽçĻľé£İ":105834,"ç»§æī¿":105835,"äºĨä¸įå°ij":105836,"äºĮ人":105837,"è§ģéĿ¢":105838,"æĥ³æĥ³":105839,"å¤įåIJĪ":105840,"康å¤į":105841,"åİ¿åŁİ":105842,"åľ¨åĽ½åĨħ":105843,"åľºåľ°":105844,"é϶çĵ·":105845,"è¿Ļ项":105846,"çľ¼ä¸Ń":105847,"糸":105848,"æĦŁè§īåΰ":105849,"æŀľçĦ¶":105850,"æĶ¾åħ¥":105851,"约æĿŁ":105852,"æİĴæŁ¥":105853,"车主":105854,"çļĦæĦıæĢĿ":105855,"æĸ°åŁİ":105856,"æĥ³çĿĢ":105857,"éģĤ":105858,"èĮ¶åı¶":105859,"ä¹°æĪ¿":105860,"åĨľæĪ·":105861,"é«ĺæīĭ":105862,"çİīç±³":105863,"æĸ°åĨłèĤºçĤİ":105864,"çħ§æĺİ":105865,"æĮĩåįĹ":105866,"踢":105867,"æķijæı´":105868,"æĻ¯çĤ¹":105869,"ç¨İæĶ¶":105870,"çļĦæīĭ":105871,"æŃ£å¥½":105872,"è¦ģæĬĬ":105873,"éļıæĦı":105874,"åħ¶å®ŀæĺ¯":105875,"ç»Ļèĩªå·±":105876,"è°ĪåΤ":105877,"æ¯ı天éĥ½":105878,"æĢģåĬ¿":105879,"é¢Ħ约":105880,"åİĨåı²ä¸Ĭ":105881,"å®Ŀè´Ŀ":105882,"åīįè¿Ľ":105883,"ä¹Łå°±æĺ¯è¯´":105884,"çļĦæĦıè§ģ":105885,"åı£ç½©":105886,"åİĺç±³":105887,"èĬ±è´¹":105888,"ä½ĵèĤ²æĬķæ³¨":105889,"åħ¬ä¼Ĺåı·":105890,"èijĹåIJįçļĦ":105891,"å¼ĢæĪ·":105892,"æĭįåįĸ":105893,"å²ģæľĪ":105894,"åĨħæ¶µ":105895,"å®Įæķ´çļĦ":105896,"é«ĺåİĭ":105897,"åħ¬åĬ¡åijĺ":105898,"使ç͍çļĦ":105899,"çĶŁäº§çº¿":105900,"妹妹":105901,"走访":105902,"æĺ¯åı¯ä»¥":105903,"åľ¨å®¶":105904,"æļ´åĬĽ":105905,"æ³°åĽ½":105906,"è´¨çĸij":105907,"ä¸įéģİ":105908,"天çĦ¶æ°Ķ":105909,"缺çĤ¹":105910,"å°ıåŀĭ":105911,"ä¸įä»ħæĺ¯":105912,"é»ijæļĹ":105913,"梨":105914,"æĸĩæĹħ":105915,"è¦ģæľī":105916,"ä¸Ńå±±":105917,"çļĦæķ°æį®":105918,"å¾Ĺå¾Ī":105919,"以便":105920,"对ä»ĸ":105921,"åĬłä»¥":105922,"çϼçı¾":105923,"设å®ļ":105924,"èĤļåŃIJ":105925,"éĿĸ":105926,"å¥īçĮ®":105927,"ä¸įåıĺ":105928,"åı£ç¢ij":105929,"åľ¨åĵªéĩĮ":105930,"ä½IJ":105931,"è¿Ļ两个":105932,"çļĦæĸ¹åIJij":105933,"æŀ«":105934,"äºĮ次":105935,"çīĩåĮº":105936,"éłIJ":105937,"ç£Ĭ":105938,"æĭ¿çĿĢ":105939,"å·²ç»ıæĪIJ为":105940,"ä¹ĭä¸Ĭ":105941,"å®ĹæĹ¨":105942,"奶奶":105943,"é«ĺæĸ°åĮº":105944,"社æľĥ":105945,"è·Łè¸ª":105946,"æľįåĬ¡ä¸Ńå¿ĥ":105947,"æī¯":105948,"æīĭæĮĩ":105949,"礼çī©":105950,"宿èĪį":105951,"ç͍å¿ĥ":105952,"æıIJé«ĺäºĨ":105953,"亮çĤ¹":105954,"ä¸įæĦ¿æĦı":105955,"æĴѿ;":105956,"å¤ļå°ijéĴ±":105957,"没ä»Ģä¹Ī":105958,"æķ°åįģ":105959,"æĢ»çĽij":105960,"çļĦåŁİå¸Ĥ":105961,"æī¾åΰäºĨ":105962,"åĨħåľ°":105963,"åΰçİ°åľ¨":105964,"æĪĺæĸĹåĬĽ":105965,"åİŁå§ĭ":105966,"åĥ§":105967,"åĢĴæĺ¯":105968,"æľĢåħ·":105969,"è´«åĽ°æĪ·":105970,"éĢģåΰ":105971,"级åĪ«":105972,"åĩºèµĦ":105973,"æĪªæŃ¢":105974,"ç§įåŃIJ":105975,"èĥ½ä¸įèĥ½":105976,"幸è¿IJ":105977,"èĸĩ":105978,"项éĵ¾":105979,"æĮĤçīĮ":105980,"ä¸Ģ樣":105981,"ä¹ĺ客":105982,"èIJ½åIJİ":105983,"ä½ĨæĪij":105984,"æĹ©åľ¨":105985,"åĬ¨æ¼«":105986,"å¹³çŃī":105987,"å¯¹ä½ł":105988,"ä¸įæĢķ":105989,"å¤ĸçķĮ":105990,"å¤ļå¹´æĿ¥":105991,"é¦ĸ个":105992,"æ²³åįĹçľģ":105993,"æĪĸåħ¶ä»ĸ":105994,"éķľå¤´":105995,"åįĹæĺĮ":105996,"ä¸ĢéĿ¢":105997,"éĢłæĪIJçļĦ":105998,"å´Ķ":105999,"çŃĴ":106000,"æķĻèĤ²éĥ¨":106001,"åľ°åŁŁ":106002,"æĺĨæĺİ":106003,"å·´é»İ":106004,"æīĭ游":106005,"ä¸ĢæĹ¶":106006,"çłį":106007,"顶级":106008,"åħ±è®¡":106009,"åİŁæ²¹":106010,"è¾īçħĮ":106011,"说æĺ¯":106012,"æĸ°åįİ社":106013,"ç»ıåİĨäºĨ":106014,"ä¸įæŃ¢":106015,"è¦ģä¹Ī":106016,"èĢħçļĦ":106017,"æĢ»æĬķèµĦ":106018,"è¡Įé©¶":106019,"ä¸Ĭå¸Ŀ":106020,"年纪":106021,"çIJ¼":106022,"ä¼łè¯´":106023,"ç²¾èĭ±":106024,"æĸ¹éĴĪ":106025,"æ±Łæ¹ĸ":106026,"æĪIJçĤº":106027,"æĢ»éĩı":106028,"æĬķæĶ¾":106029,"åĬ¨çĶ»":106030,"èŤ":106031,"ç͵æºIJ":106032,"éĴĻ":106033,"åIJĮè¡Į":106034,"æĻ®éĢļçļĦ":106035,"åĽ¾ä¹¦é¦Ĩ":106036,"è¯ĪéªĹ":106037,"æħĪåĸĦ":106038,"è¿Ļ份":106039,"主æĮģ人":106040,"å°±è¿Ļæł·":106041,"èĢĮæĪIJ":106042,"èĩªè¡Į车":106043,"ä¸ŃåĽ½çī¹èī²":106044,"èĤ¿çĺ¤":106045,"åIJ¾":106046,"å¼Łå¼Ł":106047,"åıĹçĽĬ":106048,"éĢīæĭ©äºĨ":106049,"æĺİæĺ¾çļĦ":106050,"æĬ¥èĢĥ":106051,"ç¬ijéģĵ":106052,"éĽĸçĦ¶":106053,"温å·ŀ":106054,"éĿŀæ´²":106055,"ç§įç§į":106056,"åıĤåĬłäºĨ":106057,"è´§è¿IJ":106058,"éļı便":106059,"就没æľī":106060,"縣":106061,"央è§Ĩ":106062,"ç©¿è¶Ĭ":106063,"çļĦçݰ象":106064,"åĩłæ¬¡":106065,"çļĦé£İéĻ©":106066,"æŃĮæĽ²":106067,"æľ¬å±Ĭ":106068,"å¹´åĨħ":106069,"ä¸įè¶ħè¿ĩ":106070,"è¿ĩå¤ļ":106071,"å¿ħé¡»è¦ģ":106072,"ç»ĵ论":106073,"åĢŁéī´":106074,"ç¥ŀå¥ĩ":106075,"æľŁæľĽ":106076,"ä¸ĵ享":106077,"éĿŀ常éĩįè¦ģ":106078,"æĦıè¯Ĩåΰ":106079,"åIJĪå¹¶":106080,"æĬĬèĩªå·±":106081,"å¥Ĺè£ħ":106082,"éŃĶæ³ķ":106083,"å¤ıåŃ£":106084,"ä¸įåĥı":106085,"å¢ĥçķĮ":106086,"æĥĬåĸľ":106087,"æľīä¸Ģ天":106088,"çĦ¦çĤ¹":106089,"æĪij认为":106090,"åħ°å·ŀ":106091,"ç͵æ°Ķ":106092,"èģĶç³»æĪij们":106093,"ç§ijæĻ®":106094,"她说":106095,"çļĦæĸĩ竳":106096,"å¥ĩæĢª":106097,"åıĭ好":106098,"饮æĸĻ":106099,"çļĦæĶ¯æĮģ":106100,"çŃĶåºĶ":106101,"éĩįéĩı":106102,"çij¶":106103,"åĩıè½»":106104,"ç§ijåѦ家":106105,"巴西":106106,"éĩijèŀįæľºæŀĦ":106107,"åħļå§Ķ书记":106108,"貸款":106109,"ç²¾èĩ´":106110,"ä»İæľª":106111,"åį°åĪ·":106112,"åĽŀ顾":106113,"é¦ĸéĥ½":106114,"åıijèĤ²":106115,"éĹ®éģĵ":106116,"è¾¾åΰäºĨ":106117,"å¿įä¸įä½ı":106118,"æīįæľī":106119,"æįIJèµł":106120,"ä½ĽæķĻ":106121,"ä¸įæ¸ħ":106122,"éĺŁéķ¿":106123,"缸åıį":106124,"æĬ¥èѦ":106125,"大åħ¨":106126,"æ¬§çĽŁ":106127,"帮å¿Ļ":106128,"çļĦæĻĤåĢĻ":106129,"缮å½ķ":106130,"足以":106131,"èī°éļ¾":106132,"ä»ĸä¹Ł":106133,"å·¥ä½ľèĢħ":106134,"头èĦij":106135,"缺éĻ·":106136,"æĪIJç«ĭäºĨ":106137,"å°±å¼Ģå§ĭ":106138,"认åIJĮ":106139,"é»Ħèī²":106140,"çĹħæĥħ":106141,"覺å¾Ĺ":106142,"è¿Ļ两":106143,"ä¿¡ä»°":106144,"åľĭå®¶":106145,"ä¸įä»ħä»ħæĺ¯":106146,"çĭ¬å®¶":106147,"èάçļĦ":106148,"æĿIJè´¨":106149,"æµ·ä¸Ĭ":106150,"çĤºäºĨ":106151,"æľºåĬ¨è½¦":106152,"缸å½ĵäºİ":106153,"å¤ļåħĥåĮĸ":106154,"æĽ´å¤§çļĦ":106155,"èĽ®":106156,"åģĩæľŁ":106157,"å¼ıçļĦ":106158,"交éĢļè¿IJè¾ĵ":106159,"çľģå§Ķ":106160,"ä¸įç®Ĺ":106161,"æĶ¾ä¸ĭ":106162,"éĹ¯":106163,"äººåľ¨":106164,"港åı£":106165,"æĹ¨åľ¨":106166,"åij½ä»¤":106167,"æŁIJ个":106168,"平稳":106169,"åıªå¥½":106170,"人人":106171,"äºŀ":106172,"äºĮç»´":106173,"äºĮç»´çłģ":106174,"æŀģ为":106175,"åĪ«å¢ħ":106176,"åħ¶ä½Ļ":106177,"大äºĭ":106178,"主管éĥ¨éŨ":106179,"æĹłéĶ¡":106180,"éŵ":106181,"éģŃåΰ":106182,"说è¿ĩ":106183,"ä¸ºä½ł":106184,"è§£çŃĶ":106185,"éªĮæĶ¶":106186,"çļĦç»ıéªĮ":106187,"åĮ¹éħį":106188,"çģ«ç®Ń":106189,"豪åįİ":106190,"æŁIJæŁIJ":106191,"çļĦæĹ¶ä»£":106192,"书éĿ¢":106193,"æģĴ大":106194,"å»¶éķ¿":106195,"ä¸ĢåIJĮ":106196,"æľªèĥ½":106197,"交æį¢":106198,"çĶ¢åĵģ":106199,"çŃīåΰ":106200,"åĪĨ离":106201,"æīĵç͵è¯Ŀ":106202,"å¹²çĩ¥":106203,"è¾ĥå¤ļ":106204,"å¤ļå¹´çļĦ":106205,"èĥĮæĻ¯ä¸ĭ":106206,"为ä¾ĭ":106207,"æijĺè¦ģ":106208,"å´Ľèµ·":106209,"æŃ¤åĪ»":106210,"æľīæľºä¼ļ":106211,"æĿ¡æ¬¾":106212,"é¢Ĩ导å°ıç»Ħ":106213,"çļĦ身ä½ĵ":106214,"åįķä¸Ģ":106215,"央è¡Į":106216,"ä¸įæĸŃæıIJé«ĺ":106217,"ä»·å̼è§Ĥ":106218,"èĬ½":106219,"èIJį":106220,"æ³ķå¾ĭæ³ķè§Ħ":106221,"ä¸įéĶĪ":106222,"ä¸įéĶĪéĴ¢":106223,"åĩºäºİ":106224,"èĻļæĭŁ":106225,"æį®æĤī":106226,"çĥ¦æģ¼":106227,"åħ¨æĸ°çļĦ":106228,"æī«æıı":106229,"çĻ»éĻĨ":106230,"èīºæľ¯å®¶":106231,"çļĦé£Łçī©":106232,"çļĦåŃĺåľ¨":106233,"客åİħ":106234,"æĪij们就":106235,"æŁ¥çľĭæĽ´å¤ļ":106236,"è¯Ħ审":106237,"å¸Ĥåł´":106238,"è¬Ľ":106239,"巨头":106240,"ä¸ŃåĽ½ç»ıæµİ":106241,"äºĨèĩªå·±çļĦ":106242,"åĨ³è®®":106243,"çĽijçĿ£ç®¡çIJĨ":106244,"æĬķ票":106245,"åĨį度":106246,"è¡ĮçĤº":106247,"注åħ¥":106248,"ä½ľä¸ºä¸Ģ个":106249,"æ¯ı个人éĥ½":106250,"åįķåħĥ":106251,"è¦ģçŁ¥éģĵ":106252,"被称为":106253,"ä¹ĭéĻħ":106254,"è§£éϤ":106255,"丸":106256,"溫":106257,"ä¸īæĺŁ":106258,"é²ľæĺİ":106259,"ä¹Łéĥ½":106260,"æĹ¶æľº":106261,"åĩºæīĭ":106262,"æĥħå½¢":106263,"åķĨè´¸":106264,"éĢī举":106265,"对èĩªå·±":106266,"çĶŁåĬ¨":106267,"åħĭæľį":106268,"个ä½ĵ":106269,"èĭij":106270,"稱":106271,"大åݦ":106272,"æĺ¯å¯¹":106273,"åĪ©æģ¯":106274,"è¿IJåĬ¨åijĺ":106275,"åĮĸè§£":106276,"åīįæ²¿":106277,"æĦŁæģ©":106278,"æĢ»ä¹ĭ":106279,"é«ĺæĸ°æĬĢæľ¯":106280,"åĿĩ为":106281,"åħ¨åĮº":106282,"æ°Ķæ°Ľ":106283,"åı¯ä»¥è¯´æĺ¯":106284,"ä½ı宿":106285,"åħļåijĺå¹²éĥ¨":106286,"åĹ¯":106287,"è·µè¡Į":106288,"çļĦä¸ĵä¸ļ":106289,"èĢĥéªĮ":106290,"èķ¾":106291,"åħ¬åŃIJ":106292,"çļĦçĬ¶æĢģ":106293,"æ½®æµģ":106294,"ä¿¡æīĺ":106295,"è´¼":106296,"åIJĦæĸ¹":106297,"æķijåĬ©":106298,"éĿŀ常çļĦ":106299,"æ¡¥æ¢ģ":106300,"åħ¬æĸ¤":106301,"ä¼¼çļĦ":106302,"çľĭ好":106303,"å±Ģéĥ¨":106304,"å®īéĿĻ":106305,"éħįä»¶":106306,"常è§Ħ":106307,"å¼Ģ车":106308,"第äºĮ次":106309,"ä¸Ĭ级":106310,"åıĤèµĽ":106311,"å®¶å±ŀ":106312,"强åĬ¿":106313,"åľ¨ä»ĸ":106314,"åIJijåīį":106315,"ä¹ĭåľ°":106316,"éĥ¡":106317,"è¡Įç¨ĭ":106318,"èѦåijĬ":106319,"è§Ħå®ļçļĦ":106320,"åķĨåŁİ":106321,"äºĶ大":106322,"æķĻ室":106323,"åįģè¶³":106324,"æīĢä»¥åľ¨":106325,"å°Ĩç»§ç»Ń":106326,"çŃīæĸ¹å¼ı":106327,"å®¶ä¼ģä¸ļ":106328,"交ä»ĺ":106329,"çĤ¹è¯Ħ":106330,"ç»ĵç®Ĺ":106331,"ä¹Łåı¯":106332,"å¤ĸæ±ĩ":106333,"è¿Ļç§įæĥħåĨµ":106334,"æİĪäºĪ":106335,"å¸ĥç½®":106336,"æĪIJç«ĭäºİ":106337,"é¢ĦèѦ":106338,"管çIJĨ人åijĺ":106339,"å©ļ礼":106340,"ç»ĵæĿŁåIJİ":106341,"åħ¥éĢī":106342,"æĹłæ¯Ķ":106343,"åĴĮåıijå±ķ":106344,"çϽéħĴ":106345,"çİ©åħ·":106346,"ä¸ĩç¾İåħĥ":106347,"çļĦæĪIJ绩":106348,"æĭįçħ§":106349,"èĢĥèĻijåΰ":106350,"ä¼ģä¸ļåıijå±ķ":106351,"äºĨ个":106352,"çĶŁæ°Ķ":106353,"çļĦ女人":106354,"äºĶåįģ":106355,"çĪ·çĪ·":106356,"纽约":106357,"éĥ½è¢«":106358,"ä¸Ĭ课":106359,"çĽ¡":106360,"ä¼łç»ŁæĸĩåĮĸ":106361,"æ½ľåľ¨":106362,"åıijå°Ħ":106363,"ä¸Ģ身":106364,"éĺ²å®Ī":106365,"åĪ®":106366,"é¢ĺ缮":106367,"åľ¨åĨħçļĦ":106368,"ç¾İ好çļĦ":106369,"è¿ĻéĩĮçļĦ":106370,"ä¸Ģä¸Ŀ":106371,"人åĿĩ":106372,"å̡坼":106373,"身åIJİ":106374,"æī©å±ķ":106375,"大éŨ":106376,"就被":106377,"è¯¥é¡¹çĽ®":106378,"æŀ¶æŀĦ":106379,"ä¸Ģåı£":106380,"ä¿¡æģ¯æĬĢæľ¯":106381,"å¼Ģä¸ļ":106382,"æĶ¶åıĸ":106383,"ç½ij页":106384,"æĶ¯æı´":106385,"å°ģéĹŃ":106386,"å¡ijéĢł":106387,"大èĥĨ":106388,"å¿«éĢŁåıijå±ķ":106389,"çľĭä¼¼":106390,"æ¸Ŀ":106391,"è¿Ļæł·ä¸Ģ个":106392,"模åĿĹ":106393,"注æĦıåΰ":106394,"çł´è§£":106395,"èĩªä»İ":106396,"åijµåijµ":106397,"ä¹ĭå¾Į":106398,"ä¹ĭæĹħ":106399,"è·ŁæĪij":106400,"æ³ķ人":106401,"æİĴè¡Įæ¦ľ":106402,"åĿļå®Ī":106403,"好å¤Ħ":106404,"çŁ³å¤´":106405,"å¹¶å°Ĩ":106406,"èα":106407,"æŃĩ":106408,"两岸":106409,"å¤ļä¹ħ":106410,"象å¾ģ":106411,"个æĢ§åĮĸ":106412,"çļĦè§Ĵ度":106413,"å¸Ĩ":106414,"ç¦ıå·ŀ":106415,"æŁ¥å¤Ħ":106416,"ä¸¤åĽ½":106417,"åIJ¸å¼ķäºĨ":106418,"é¦ĸå¸Ń":106419,"大åĵ¥":106420,"é¤Ĭ":106421,"涨å¹ħ":106422,"éĢīç͍":106423,"許å¤ļ":106424,"èIJ½æĪ·":106425,"åĵĪå°Ķ":106426,"åĵĪå°Ķ滨":106427,"åģļä»Ģä¹Ī":106428,"以åħį":106429,"é¾į":106430,"æĹłéľĢ":106431,"åΰåºķæĺ¯":106432,"æĢ¡":106433,"åijĬè¯īä½ł":106434,"éĺ²æ°´":106435,"è¿ĻæĹ¶åĢĻ":106436,"欢ä¹IJ":106437,"转åIJij":106438,"è¿Ļä¸ªåľ°åĽ¾":106439,"åħ¥é©»":106440,"èįīåİŁ":106441,"æĹ¶ä»£çļĦ":106442,"åıĺåĬ¨":106443,"åĬłå¼ºå¯¹":106444,"åģ¶å°Ķ":106445,"å®ĪæĬ¤":106446,"æ°Ķ温":106447,"人éĹ´":106448,"æľĿé²ľ":106449,"ç»ıè´¹":106450,"åĽŃæŀĹ":106451,"å·¥åľ°":106452,"è§Ħæł¼":106453,"åĩłåįģ":106454,"è¯ķåĽ¾":106455,"å¦ĥ":106456,"éĤ£æĹ¶åĢĻ":106457,"å¼ĺæī¬":106458,"ä¸ļçķĮ":106459,"çļĦéĢŁåº¦":106460,"ä¼ļä¸įä¼ļ":106461,"èIJ¥æĶ¶":106462,"å°ıå¾®ä¼ģä¸ļ":106463,"çľĭè¿ĩ":106464,"æĬĬä»ĸ":106465,"éģµå¾ª":106466,"è¿Ļè¾¹":106467,"没æľī人":106468,"壶":106469,"æ¹ĸåįĹçľģ":106470,"æŀģåħ¶":106471,"çļĦ人çĶŁ":106472,"ä»ĸè¿ĺ":106473,"转åĮĸ为":106474,"èµ°è¿ĩ":106475,"æĬ±çĿĢ":106476,"çīĽå¥¶":106477,"ä¸ĩ亩":106478,"å¿ĥæĢģ":106479,"æĹ¥å¸¸çĶŁæ´»":106480,"ä½ĵæ£Ģ":106481,"æĻĥ":106482,"çŃīé¢ĨåŁŁ":106483,"æĩī該":106484,"åı¯ä»¥çľĭåΰ":106485,"æī¾ä¸įåΰ":106486,"èĢģå¹´":106487,"æĬĬæĪij":106488,"积åĪĨ":106489,"梳çIJĨ":106490,"绳":106491,"çļĦæĶ¿æ²»":106492,"å¸ĿåĽ½":106493,"éĻªä¼´":106494,"æ´Ľéĺ³":106495,"åħ¬æŃ£":106496,"å¼Ģåı£":106497,"çī¹èī²çļĦ":106498,"åĽ°å¢ĥ":106499,"ä¸Ĭæľī":106500,"ç«ĭä½ĵ":106501,"æīĵå·¥":106502,"åķ¤éħĴ":106503,"åľ¨éĤ£éĩĮ":106504,"éĤ£è¾¹":106505,"个åĪ«":106506,"ä¸Ģå®ļæĺ¯":106507,"çļĦéĩįè¦ģæĢ§":106508,"ä¸»å¼ł":106509,"åĴĮæľįåĬ¡":106510,"ä¸Ĭç½ij":106511,"è¡¥åĬ©":106512,"åıªéľĢ":106513,"弦":106514,"éģ®":106515,"åĬĽäºī":106516,"度è¿ĩ":106517,"èij¬":106518,"é¡¿æĹ¶":106519,"éĦī":106520,"纺ç»ĩ":106521,"åľ°åĿĹ":106522,"ä¿¡ç͍åį¡":106523,"ç½ļ款":106524,"åijĬè¯īæĪij":106525,"éĽĻ":106526,"书çĶ»":106527,"è¨Ńè¨Ī":106528,"æĢ»ä¼ļ":106529,"åΤåĨ³":106530,"ä¿¡èªī":106531,"个èĤ¡":106532,"平常":106533,"æĢİ麼":106534,"ä½ĵçİ°åľ¨":106535,"é»Ħæ²³":106536,"åĽĽå·Ŀçľģ":106537,"羣缸":106538,"åIJĦé¡¹å·¥ä½ľ":106539,"åĬ¨åijĺ":106540,"å³°ä¼ļ":106541,"ä¸ĢæľŁ":106542,"æľīä¸Ģå®ļçļĦ":106543,"é«ĺ度éĩįè§Ĩ":106544,"ç¹ģèį£":106545,"åıijçݰäºĨ":106546,"ç½ij红":106547,"æīĭæ³ķ":106548,"å®¶åĽŃ":106549,"仪åύ":106550,"è¾ĥä½İ":106551,"çļĦå®īåħ¨":106552,"æ¡IJ":106553,"ä»ĺ款":106554,"æĬijåζ":106555,"åįĵè¶Ĭ":106556,"æŃ£éĿ¢":106557,"åĵij":106558,"强åζ":106559,"ä»Ĭ天çļĦ":106560,"æĪĺèĥľ":106561,"楼å¸Ĥ":106562,"æĭ¿ä¸ĭ":106563,"é¢ľå̼":106564,"举éĥ¨":106565,"çłĶåζ":106566,"çļĦæĪĺçķ¥":106567,"åľ¨ä¸Ģ个":106568,"ä¸ī人":106569,"å®ĮäºĨ":106570,"æĸ°æĬĢæľ¯":106571,"ç»ıæµİæķĪçĽĬ":106572,"å¯Įæľī":106573,"澳洲":106574,"åĬ©çIJĨ":106575,"é¢Ĩåıĸ":106576,"è°Ń":106577,"çĩĥçĥ§":106578,"ç´łåħ»":106579,"éĤĦæľī":106580,"è¿ĽèĢĮ":106581,"ä»Ģä¹Īæĺ¯":106582,"çłĶç©¶ä¸Ńå¿ĥ":106583,"éĢĤç͍äºİ":106584,"æİ¥æĶ¶":106585,"å¤±æľĽ":106586,"äºĮ级":106587,"éĹ´çļĦ":106588,"åİŁæłĩé¢ĺ":106589,"èªįçĤº":106590,"æį¡":106591,"对çĿĢ":106592,"对éĿ¢":106593,"ä¸ŃåİŁ":106594,"éĵĥ":106595,"çĶŁäº§çļĦ":106596,"åıijå¸ĥä¼ļ":106597,"士åħµ":106598,"è¿Ļåı¥è¯Ŀ":106599,"缴纳":106600,"ä¸Ģ个个":106601,"åѸçĶŁ":106602,"çĸijéĹ®":106603,"交èѦ":106604,"示èĮĥåĮº":106605,"天使":106606,"åľ¨ä¸Ĭæµ·":106607,"åIJĮæĻĤ":106608,"è½»æĺĵ":106609,"å͝ä¸ĢçļĦ":106610,"çĥŃéĹ¹":106611,"ä¹IJè§Ĥ":106612,"çļĦ身份":106613,"åĸĦäºİ":106614,"大åİħ":106615,"èĤ¯å®ļæĺ¯":106616,"éĺ²çģ«":106617,"å¤ĸåĩº":106618,"æį®è¯´":106619,"é¡¹çĽ®çļĦ":106620,"ä¸Ģåı°":106621,"èĻļåģĩ":106622,"ä¸Ģç¬Ķ":106623,"ç«ĭæ³ķ":106624,"严èĤĥ":106625,"æī¿åĬŀ":106626,"åįģåĩł":106627,"çļĦ空éĹ´":106628,"æľ¬ç½ijç«Ļ":106629,"åģļå¾Ĺ":106630,"ä¿Ŀ温":106631,"æľĪåĪĿ":106632,"åľ¨ç½ijä¸Ĭ":106633,"åIJĦæĸ¹éĿ¢":106634,"ä¸ī天":106635,"交æĺĵæīĢ":106636,"è§£æŀIJ":106637,"åħļä¸Ń央":106638,"è¿Ľåĩºåı£":106639,"åĴĮ社ä¼ļ":106640,"次æķ°":106641,"ä¹ĭå®¶":106642,"维度":106643,"æ´¾åĩºæīĢ":106644,"产çĶŁäºĨ":106645,"带æľī":106646,"å¾Ī强":106647,"æľīäºĽäºº":106648,"å¹´åIJİ":106649,"äºĨ许å¤ļ":106650,"å¯Ĩ度":106651,"åŃ¦æľŁ":106652,"çıłæµ·":106653,"æľĢå¤ļçļĦ":106654,"è¾¹ç¼ĺ":106655,"容éĩı":106656,"第äºĮ个":106657,"ä¸Ģ缴æĺ¯":106658,"ä¸įç¦ģ":106659,"æŃ²":106660,"ä»ĭç»įäºĨ":106661,"ä¼ĺéĽħ":106662,"æ¯Ķè¼ĥ":106663,"èģĮä½į":106664,"温æŁĶ":106665,"æľīéĴ±":106666,"æľĢé«ĺçļĦ":106667,"åįļè§Īä¼ļ":106668,"ä¸įæĪIJ":106669,"éĶĻäºĨ":106670,"è¯ģçĽij":106671,"è¯ģçĽijä¼ļ":106672,"æĪIJ人":106673,"åĿĩåĮĢ":106674,"æľīåĪ©":106675,"è¶ĬåįĹ":106676,"æīĵäºĨ":106677,"好åIJĥ":106678,"系統":106679,"è·Łéļı":106680,"çļĦåľ°ä½į":106681,"æŃ£å¦Ĥ":106682,"ç¨įå¾®":106683,"åį°åıij":106684,"åĪĽç«ĭ":106685,"é£İåħī":106686,"å°ĨæĪIJ为":106687,"ä¸įé«ĺ":106688,"é¢ijç¹ģ":106689,"设æľī":106690,"ä¼ŀ":106691,"æĭĨéϤ":106692,"å½±åĥı":106693,"æ¸ĹéĢı":106694,"å¹´å¼Ģå§ĭ":106695,"ç½ijæĺĵ":106696,"è¦ģåģļ":106697,"ç͵åĬ¨è½¦":106698,"羣å¿ĥ":106699,"æµ·åĨĽ":106700,"ä¼łæĿ¥":106701,"å·®åĪ«":106702,"è°¨æħİ":106703,"çĥŁåı°":106704,"åįĥå¹´":106705,"è¯ģå®ŀ":106706,"çIJª":106707,"çļĦåħ·ä½ĵ":106708,"åΰå¤Ħ":106709,"ä¸įå®ľ":106710,"èľĢ":106711,"èĥ½åĬĽåĴĮ":106712,"çīºçī²":106713,"çļĦéĴ±":106714,"大éĺŁ":106715,"é¦ĸè¦ģ":106716,"ä¸įæĦ¿":106717,"çİ«çij°":106718,"人æ°ijç½ij":106719,"è¿ĺæĺ¯è¦ģ":106720,"åĽĽå¹´":106721,"æįŁä¼¤":106722,"çļĦåģļæ³ķ":106723,"éĿĪ":106724,"è¡Ķæİ¥":106725,"åIJĪæĪIJ":106726,"没人":106727,"éĹ¨æ§Ľ":106728,"ä¿¡è´·":106729,"çļĦ缸åħ³":106730,"举é£İ":106731,"社ä¿Ŀ":106732,"ä¸ĭ游":106733,"åĿĹéĴ±":106734,"è¿ĩåIJİ":106735,"çļĦåºĶç͍":106736,"饶":106737,"é¢ģåıij":106738,"ä¸Ģå¤Ħ":106739,"åįİå¤ı":106740,"为ä¼ģä¸ļ":106741,"åıªä¼ļ":106742,"侵害":106743,"çļĦåĬŁèĥ½":106744,"åѸç¿Ĵ":106745,"ä¸Ńåįİæ°ijæĹı":106746,"åıijå¸ĥäºĨ":106747,"è¿İæİ¥":106748,"æĪijèĩªå·±":106749,"è¿ĺéľĢè¦ģ":106750,"太éĺ³èĥ½":106751,"åİ»ä¸ĸ":106752,"æĺ¯ä½ł":106753,"åIJĪåĬĽ":106754,"ç»ĺçĶ»":106755,"åı°åĮĹ":106756,"çĿ£ä¿ĥ":106757,"åĮĹéĥ¨":106758,"æľīå¤ļå°ij":106759,"å¾Īéĩįè¦ģ":106760,"åĪĴåĪĨ":106761,"åı·çº¿":106762,"æĶ¾å¤§":106763,"ä¼ļ被":106764,"èİ·å¥ĸ":106765,"ä¹ĭåĨħ":106766,"失åİ»äºĨ":106767,"çݩ家们":106768,"éĩĩéĽĨ":106769,"壹":106770,"å®¶ä¼Ļ":106771,"çϽ天":106772,"åĽłä¸ºä»ĸ":106773,"社ä¼ļæ²»çIJĨ":106774,"å¼ĢåĪĽ":106775,"ç͵ç¼Ĩ":106776,"æĸ°ä¸Ģ代":106777,"å¹¶è´Ń":106778,"就已ç»ı":106779,"çļĦ社ä¼ļ":106780,"éϤéĿŀ":106781,"åı¯ä»¥ç͍":106782,"å©ī":106783,"æ¯Ķè¾ĥ好":106784,"å®ŀä¸ļ":106785,"åĪĽåĬŀ":106786,"æıIJèµ·":106787,"é»ĥ":106788,"ä½ıåľ¨":106789,"å¸ĤæĶ¿":106790,"éĿ¢ä¸´çļĦ":106791,"èĥ½åľ¨":106792,"çŁŃçŁŃ":106793,"çľŁäºº":106794,"æĺİæĺİ":106795,"èµĦåĬ©":106796,"çļĦä¸įåIJĮ":106797,"å°ıæľĭåıĭ":106798,"é¢ĺæĿIJ":106799,"ç¾İåij³":106800,"æĺŁåº§":106801,"ä¸įä¸Ģæł·çļĦ":106802,"çľĭä¸Ĭåİ»":106803,"ä¸Ģæł¹":106804,"广å·ŀå¸Ĥ":106805,"åıijçĶŁçļĦ":106806,"é«ĺç§ijæĬĢ":106807,"ä¸Ģè¾ĪåŃIJ":106808,"交åıī":106809,"ä½ĵ系建设":106810,"åĽłä¸ºæĪij":106811,"çıįæĥľ":106812,"ä¸ĬåѦ":106813,"æĪĺæľ¯":106814,"æŃ¤ç±»":106815,"交å¾Ģ":106816,"æĮīæij©":106817,"人们çļĦ":106818,"åħ¶å¯¦":106819,"åİŁæĿIJæĸĻ":106820,"æ¸´æľĽ":106821,"缸å¤Ħ":106822,"微微":106823,"æ®·":106824,"ä¹ĺåĿIJ":106825,"å¼Ģå±ķäºĨ":106826,"é«ĺåĵģè´¨":106827,"æĹłäººæľº":106828,"ä¸įæĺ¯å¾Ī":106829,"çļĦæĬķèµĦ":106830,"èĬĤçľģ":106831,"èĩī":106832,"ç²¾éĢī":106833,"çļĦæłĩåĩĨ":106834,"åįĹéĥ¨":106835,"认è¯Ĩåΰ":106836,"å¹³éĿĻ":106837,"èĹ¥":106838,"æī«é»ij":106839,"æī«é»ijéϤ":106840,"æī«é»ijéϤæģ¶":106841,"éĢĻ種":106842,"建çŃijéĿ¢ç§¯":106843,"ç¡®ç«ĭ":106844,"管çIJĨåĬŀæ³ķ":106845,"æĦıå¿Ĺ":106846,"丨":106847,"让åŃ©åŃIJ":106848,"æķijçģ¾":106849,"å½ĵä»Ĭ":106850,"çģ«çģ¾":106851,"åIJĦéĥ¨éŨ":106852,"ä¾µçĬ¯":106853,"æ¯ıåij¨":106854,"æı½":106855,"ä¸Ģ次æĢ§":106856,"åħ¶ä»ĸ人":106857,"éĶĻè¿ĩ":106858,"ä¸İåħ¶":106859,"åĭĩæ°Ķ":106860,"çĩĥæ°Ķ":106861,"é¦ĸå±Ĭ":106862,"æľį饰":106863,"ç²¥":106864,"å®Įæ¯ķ":106865,"å°±æĬĬ":106866,"åĬŀäºĭå¤Ħ":106867,"ä¸Ģä¼ļåĦ¿":106868,"离ä¸įå¼Ģ":106869,"å¦ĤæŀľæĤ¨":106870,"ä»ĵåºĵ":106871,"导å¸Ī":106872,"åIJĪéĢĤçļĦ":106873,"毫米":106874,"å®īåħ¨æĢ§":106875,"ä¾Ŀçħ§":106876,"产ä¸ļåĮĸ":106877,"ä½łçľĭ":106878,"羣çļĦå¾Ī":106879,"åѤçĭ¬":106880,"éĺ²å¾¡":106881,"å¾Īç®Ģåįķ":106882,"é£İæ°´":106883,"ä½Ĩä¹Ł":106884,"æİ¨åĩºäºĨ":106885,"æ°ijèIJ¥ä¼ģä¸ļ":106886,"çłģ头":106887,"å¤įæĿĤçļĦ":106888,"ç»ĦæĪIJéĥ¨åĪĨ":106889,"åħħ满äºĨ":106890,"è¿ijåĩłå¹´":106891,"çľģæĶ¿åºľ":106892,"æľīå¿ħè¦ģ":106893,"éϳ":106894,"ä¹ĭç±»":106895,"ä¹ĭç±»çļĦ":106896,"æĢ§ä»·":106897,"æĢ§ä»·æ¯Ķ":106898,"åķĨåºĹ":106899,"å¸Ĥå̼":106900,"人æīįåŁ¹åħ»":106901,"æ·±åıĹ":106902,"管çIJĨå±Ģ":106903,"æģIJæĥ§":106904,"ä»ħæľī":106905,"æĬµè¾¾":106906,"æµ·åħ³":106907,"èµĭäºĪ":106908,"äºĭåĦ¿":106909,"ä»·éĴ±":106910,"æīĭä¸Ĭ":106911,"èĩªå¾ĭ":106912,"åħ³çα":106913,"享æľī":106914,"éģĹæĨ¾":106915,"å¾Īå¿«å°±":106916,"æĽ´å¿«":106917,"æłĩè¯Ĩ":106918,"åºĨç¥Ŀ":106919,"ä¹Łå¥½":106920,"ä¸įæĺĵ":106921,"æĪijå¾Ī":106922,"æĶ¹éĿ©åıijå±ķ":106923,"å¤ĸåľ°":106924,"æĬµæĬ¼":106925,"è¯Ĺ人":106926,"åİķæīĢ":106927,"æĸ°åªĴä½ĵ":106928,"èĸĽ":106929,"è°Īè¯Ŀ":106930,"ä¸Ģå®ļç¨ĭ度":106931,"èµ°åľ¨":106932,"æľĢ强":106933,"åĬŁçİĩ":106934,"åħ±è¯Ĩ":106935,"大桥":106936,"ä¸ĭæĸ¹":106937,"å¤ĸèµĦ":106938,"碱":106939,"å·¡è§Ĩ":106940,"æ¹ĸåĮĹçľģ":106941,"个çϾåĪĨ":106942,"个çϾåĪĨçĤ¹":106943,"çļĦ责任":106944,"çļĦåĵģçīĮ":106945,"åĬ©æİ¨":106946,"åĪĽéĢłäºĨ":106947,"ä»»èģĮ":106948,"å¿«æį·":106949,"æĿijåºĦ":106950,"åİ»çľĭ":106951,"æīįèĥ½å¤Ł":106952,"層":106953,"æĪijå®¶":106954,"æĺ¯ä¸Ģ款":106955,"ç¾ħ":106956,"åĨ°éĽª":106957,"æŀģ大":106958,"çģ¯åħī":106959,"éĨĭ":106960,"ä¸İåħ¶ä»ĸ":106961,"æıIJåĩºçļĦ":106962,"éĿłè¿ij":106963,"è°ĥåĬ¨":106964,"å°½åı¯èĥ½":106965,"åıijåĬĽ":106966,"ç»Ļ她":106967,"éĢĤéĩı":106968,"è·¨åĽ½":106969,"åħĪè¡Į":106970,"æĸ°æĿIJæĸĻ":106971,"ä½ľäºĨ":106972,"满äºĨ":106973,"ä¸į满":106974,"çļĦçľ¼çĿĽ":106975,"çľĭå¾Ĺ":106976,"è¿Ļä¸Ģ次":106977,"é½IJåħ¨":106978,"çļĦä¸Ģéĥ¨åĪĨ":106979,"ä¸Ļ":106980,"æ¸ħæĸ°":106981,"說æĺİ":106982,"身边çļĦ":106983,"æīĢæľī人":106984,"å½°æĺ¾":106985,"è±¹":106986,"åį¿":106987,"è¿IJ转":106988,"æĮĩå¼ķ":106989,"å¸Ĥåħ¬å®īå±Ģ":106990,"åıĤå±ķ":106991,"ä¹ĭæĹ¶":106992,"éĩijèŀįæľįåĬ¡":106993,"èµĦæľ¬å¸Ĥåľº":106994,"èĥ½è®©":106995,"å¿ĺäºĨ":106996,"天åłĤ":106997,"æ¯Ķå¦Ĥ说":106998,"éĬĢè¡Į":106999,"èĽĭç³ķ":107000,"çĶ©":107001,"æł¸å®ŀ":107002,"æĻ®äº¬":107003,"ä¼ĺç¾İ":107004,"åı£èħĶ":107005,"漫çĶ»":107006,"çľ¼éĩĮ":107007,"äºĨä¸ĭæĿ¥":107008,"æĪijä»¬ä¹Ł":107009,"ä¾į":107010,"为ä¸Ńå¿ĥ":107011,"å¥ĩ迹":107012,"éĿĴçĿIJ":107013,"æĪªèĩ³çĽ®åīį":107014,"åĩºä¾Ĩ":107015,"æĢ»åħ¬åı¸":107016,"弥补":107017,"ç®Ĺæ³ķ":107018,"å·¥ä½ľå®¤":107019,"æīĢ以æĪij":107020,"æ°´åĪĨ":107021,"æīĢå±ŀ":107022,"ä¸į说":107023,"ä½Ĩæĺ¯åľ¨":107024,"è¦ģåİ»":107025,"åĪĽä¸ļèĢħ":107026,"ä¸įæ¸ħæ¥ļ":107027,"åĽĽåij¨":107028,"æĺ¯ä»İ":107029,"çļĦæł¹æľ¬":107030,"çģ¶":107031,"æ¯Ľæ³½":107032,"æ¯Ľæ³½ä¸ľ":107033,"æµ·åı£":107034,"åĽĽåįģ":107035,"ä¹Łè¢«":107036,"èģ·":107037,"ä¸Ģæīĭ":107038,"绩æķĪ":107039,"çļĦçĶ·äºº":107040,"书ç±į":107041,"ä¸ĢèĦ¸":107042,"大äºİ":107043,"鼶éĥ¨ä»¶":107044,"åħ³æĢĢ":107045,"平米":107046,"æļ´éľ²":107047,"å¾Ĺå¤ļ":107048,"ä¸ī级":107049,"æľ¬åij¨":107050,"两èĢħ":107051,"对ä¸ŃåĽ½":107052,"åıªè§ģ":107053,"欧ç¾İ":107054,"å¦Ĥæŀľæľī":107055,"å·²ç»ıæĺ¯":107056,"çľĭå®Į":107057,"çģ«éĶħ":107058,"èµIJ":107059,"ä¸Ģéģį":107060,"æĦŁåĨĴ":107061,"ç»ĵå±Ģ":107062,"ä»ĵåĤ¨":107063,"å®ŀåľ°":107064,"å̻ç»ıçIJĨ":107065,"ä¹Łä¸įçŁ¥éģĵ":107066,"碰åΰ":107067,"åIJĪ计":107068,"客æĪ·çļĦ":107069,"ç½Ĺ马":107070,"æĦīå¿«":107071,"é£Ľ":107072,"çĥŃçĥĪ":107073,"伦æķ¦":107074,"åĮ»ä¿Ŀ":107075,"éĺ¿éĩĮå·´å·´":107076,"åĨį说":107077,"ä¸ºåŁºç¡Ģ":107078,"çĶŁäº§ç»ıèIJ¥":107079,"è¿ĻäºĽäºº":107080,"åĪĹ车":107081,"æ²³åĮĹçľģ":107082,"è¿Ļ段":107083,"æ´»åĬ¨ä¸Ń":107084,"å©·":107085,"çĶŁçIJĨ":107086,"ä¸ŃåĽ½äººæ°ij":107087,"éĦĤ":107088,"åIJ¬åıĸ":107089,"å¤įä¹ł":107090,"æľīçĽĬ":107091,"æĶ¶æĭ¾":107092,"å¾Īåı¯èĥ½":107093,"ç½ijç»ľæ¸¸æĪı":107094,"们çļĦ":107095,"èµĭèĥ½":107096,"éļ¾å¾Ĺ":107097,"åĪĨæīĭ":107098,"羣è¯ļ":107099,"åħ¬åı¸åľ¨":107100,"åĿĩè¡¡":107101,"åı£åij³":107102,"çīµå¤´":107103,"ä¸ĢèάçļĦ":107104,"轿车":107105,"çŃīäºİ":107106,"æ²īé»ĺ":107107,"æĪijéĥ½":107108,"å°ıç¨ĭåºı":107109,"ä¸Ģåī¯":107110,"æī¿è½½":107111,"åľ°è´¨":107112,"çķĮéĿ¢":107113,"çĶµæľº":107114,"çĦ¦èĻij":107115,"éĶĢåĶ®é¢Ŀ":107116,"æĸ°è½¦":107117,"ä¸Ĭ游":107118,"主æ¼Ķ":107119,"éļIJç§ģ":107120,"åıijå±ķæĪĺçķ¥":107121,"çļĦåĬªåĬĽ":107122,"å¼Ģåħ³":107123,"è§£åĨ³éĹ®é¢ĺ":107124,"çĿ£å¯¼":107125,"对æĬĹ":107126,"å¾Īå¤ļ人éĥ½":107127,"æĹłæķĪ":107128,"产åĵģè´¨éĩı":107129,"å®īå¿ĥ":107130,"åįİ人":107131,"ä¸į符åIJĪ":107132,"èĩªå®¶":107133,"éĺµå®¹":107134,"çļĦåIJĦç§į":107135,"çļĦçIJĨ念":107136,"çļĦæĸĩåĮĸ":107137,"为èĩªå·±":107138,"山水":107139,"游泳":107140,"éľĩèį¡":107141,"çĶŁæ´»æĸ¹å¼ı":107142,"è¿ľç¦»":107143,"çŁ³åĮĸ":107144,"æŃ¤äºĭ":107145,"æĺ¯çľŁçļĦ":107146,"çļĦæ¯Ķä¾ĭ":107147,"ç͍ç͵":107148,"奥è¿IJä¼ļ":107149,"ä¿Ŀå®ī":107150,"èĽĭçĻ½è´¨":107151,"çļĦå¿ĥçIJĨ":107152,"å·«":107153,"åı·çłģ":107154,"æ°Ķä½ĵ":107155,"åıijæĶ¹":107156,"åıijæĶ¹å§Ķ":107157,"åĮ»å¸Ī":107158,"æ¶ĤæĸĻ":107159,"æĺĬ":107160,"å¸Ĥ级":107161,"ä¸ĸçķĮçļĦ":107162,"åĪĨåĪ«æĺ¯":107163,"çł´äº§":107164,"ä¸ĢæĿ¯":107165,"æĭīå¼Ģ":107166,"å¹³åĩ¡":107167,"çļĦåıijçĶŁ":107168,"åĬ¨æīĭ":107169,"ä¸ĢçĽ´ä»¥æĿ¥":107170,"æīĭå·¥":107171,"éĩĮéĿ¢çļĦ":107172,"æĹłåħ³":107173,"ä»ĭåħ¥":107174,"èµ°ä¸Ĭ":107175,"å°±æĺ¯è¦ģ":107176,"å¹´éĹ´":107177,"åĩºçı¾":107178,"å½±éŁ¿":107179,"å¹ħ度":107180,"éĽģ":107181,"éģĵåħ·":107182,"缮çļĦåľ°":107183,"åIJİèĢħ":107184,"ä¸Ĭæ¼Ķ":107185,"äºĨåĩł":107186,"æ®ĭçĸ¾äºº":107187,"å¿Ļç¢Į":107188,"æĺ¯åIJ¦æľī":107189,"并对":107190,"ä¼ļ导èĩ´":107191,"æ°´åºĵ":107192,"ç»Ĩèĩ´":107193,"åIJİæĤĶ":107194,"å¿ĥæĢĿ":107195,"åģļäºĭ":107196,"åİĤæĪ¿":107197,"çĿ¿":107198,"è¿IJèIJ¥åķĨ":107199,"头éĥ¨":107200,"çļĦè§Ĵèī²":107201,"æĺ¯ä»ĸ":107202,"æĹ¢æľī":107203,"å°ıæĹ¶åĢĻ":107204,"强åĬ²":107205,"主æĴŃ":107206,"åħ¨åĽ½åIJĦåľ°":107207,"æįı":107208,"æįŁåĿı":107209,"åķĨä¼ļ":107210,"ä¿Ŀç½Ĺ":107211,"çľģå¸Ĥ":107212,"éļ§éģĵ":107213,"æľīä¸įå°ij":107214,"è¦ģåľ¨":107215,"å»ºè®¾é¡¹çĽ®":107216,"ç³ĸå°¿":107217,"ç³ĸå°¿çĹħ":107218,"æĿ¡ä»¶ä¸ĭ":107219,"ä¼ĺè´¨çļĦ":107220,"é¦ĸåıij":107221,"å½ĵæĹ¶çļĦ":107222,"丰çͰ":107223,"大çĽĺ":107224,"缸继":107225,"å®ģå¤ı":107226,"åħ¥ä½ı":107227,"æĪijè¿ĺ":107228,"åħĭæĸ¯":107229,"å®ļä»·":107230,"å¹³æĸ¹åħ¬éĩĮ":107231,"çļĦçŁ¥è¯Ĩ":107232,"æĪij们ä¼ļ":107233,"åħĥå®Ŀ":107234,"ä½ĵéĩį":107235,"è³£":107236,"对æĪij们":107237,"çŁ³å®¶":107238,"çŁ³å®¶åºĦ":107239,"ç²¾åįİ":107240,"å½¢çĬ¶":107241,"åıĹåΰäºĨ":107242,"修订":107243,"ç¾İåľĭ":107244,"é«ĺæ¸ħ":107245,"çľ¼éķľ":107246,"è§īå¾Ĺèĩªå·±":107247,"带ç»Ļ":107248,"åͮ价":107249,"éĹ¨ç¥¨":107250,"åŃķå¦ĩ":107251,"ç͵è§Ĩåı°":107252,"åıijä½ľ":107253,"çļĦåij³éģĵ":107254,"éķ¿è¿ľ":107255,"åħ¬åħ±æľįåĬ¡":107256,"æŃ£å¸¸çļĦ":107257,"æľīè¿ĩ":107258,"é£İæĥħ":107259,"æ¯Ķéĩį":107260,"åIJ»":107261,"管çIJĨå·¥ä½ľ":107262,"综åIJο̧":107263,"已被":107264,"说起":107265,"æİĴæ°´":107266,"ä¸įæĸŃåľ°":107267,"æĥħæĢĢ":107268,"è¾ĵéĢģ":107269,"è¿ĩæķı":107270,"çļĦåı¯èĥ½æĢ§":107271,"æľįç͍":107272,"æľī许å¤ļ":107273,"å§Ķåī¯ä¹¦è®°":107274,"åĮĸå¦Ĩåĵģ":107275,"æļĤåģľ":107276,"æĬķèµĦ人":107277,"çıŃ级":107278,"说çĿĢ":107279,"åįĹåĮĹ":107280,"åĪĨè¡Į":107281,"çıłå®Ŀ":107282,"寶":107283,"å¢ŀå¤ļ":107284,"被åĬ¨":107285,"ç®ĬçļĦ":107286,"éĹľä¿Ĥ":107287,"çļĦèĦ¸":107288,"æĥŁ":107289,"ä¸įä¸Ģå®ļ":107290,"ç¶Ń":107291,"çģ«çĪĨ":107292,"ç§Łéĩij":107293,"çŀ§":107294,"éĩį建":107295,"è·ª":107296,"ä¸Ģ種":107297,"çļĦåIJĪä½ľ":107298,"å®īæħ°":107299,"ä»įæĺ¯":107300,"ä¸ĵä¸ļåĮĸ":107301,"è°ĥè§£":107302,"ä¸į妨":107303,"éĢĻæĺ¯":107304,"å¿ħéłĪ":107305,"ä¼ĬæľĹ":107306,"å¾ĹäºĨ":107307,"æľįåĬ¡å¹³åı°":107308,"姬":107309,"åħĪéĶĭ":107310,"çİĭåŃIJ":107311,"çļĦä¸ĢåĪĩ":107312,"æĢ»çIJĨ":107313,"åĵ¼":107314,"çªij":107315,"çļĦå¿ĥæĥħ":107316,"çļĦéĩį大":107317,"çijŁ":107318,"ä¸Ģç¬ij":107319,"åıijå±ķä¸Ń":107320,"åģ¥åº·åıijå±ķ":107321,"åĵģçīĮçļĦ":107322,"禮":107323,"ä½Ļ人":107324,"ä»Ĭ年以æĿ¥":107325,"æķ°çłģ":107326,"çѾè¯ģ":107327,"åİ»æī¾":107328,"åŁºéĩijä¼ļ":107329,"æĬ±æĢ¨":107330,"æŃ£å½ĵ":107331,"çıŃåŃIJæĪIJåijĺ":107332,"ä¸įåIJĪæł¼":107333,"åζå®ļäºĨ":107334,"ç¼ĵæħ¢":107335,"åĪ¶çº¦":107336,"æłı缮":107337,"å¸Ĥåľºç»ıæµİ":107338,"ç»ĦæĪIJçļĦ":107339,"严峻":107340,"æĹ¥è®¯":107341,"ä¸ĢçĤ¹çĤ¹":107342,"æĺ¯æĢİä¹Ī":107343,"çļĦçħ§çīĩ":107344,"éĺ»æŃ¢":107345,"模ç³Ĭ":107346,"缸":107347,"éģķåıį":107348,"æIJ¬è¿ģ":107349,"éĩijéĴ±":107350,"彬":107351,"ä¸įå®ī":107352,"æĪĺçķ¥åIJĪä½ľ":107353,"å¡«åĨĻ":107354,"讲究":107355,"åħħåĪĨåĪ©ç͍":107356,"èĥ½å¤ł":107357,"èij¡èIJĦéħĴ":107358,"éĩĩç͍äºĨ":107359,"åľ¨ä»Ĭå¹´":107360,"ä¸Ńå°ıåѦ":107361,"åľ¨æĦı":107362,"çļĦåİĭåĬĽ":107363,"ä¸į幸":107364,"åζèį¯":107365,"åı¯ä»¥è®©":107366,"被è¯Ħ为":107367,"ç»ĨèıĮ":107368,"æĪıåī§":107369,"åįĬ导":107370,"åįĬ导ä½ĵ":107371,"è§Ĩè§Ĵ":107372,"åĸľæŃ¡":107373,"å¾ģæĶ¶":107374,"è°ĭåĪĴ":107375,"æŀģ大çļĦ":107376,"çĤ¹èµŀ":107377,"è®°èĢħä»İ":107378,"两åIJį":107379,"èĩªåĬ©":107380,"èµ·æŃ¥":107381,"æĬ¤å£«":107382,"å®Ŀ马":107383,"太åŃIJ":107384,"å°ıå°ıçļĦ":107385,"温æ³ī":107386,"åĩºç§Łè½¦":107387,"ç§ŁæĪ¿":107388,"两家":107389,"éľĩæĴ¼":107390,"ç§īæī¿":107391,"ä¸Ģä»¶äºĭ":107392,"çĥĪ士":107393,"å®ĺåħµ":107394,"转身":107395,"ä¹IJåĽŃ":107396,"çĻĮçĹĩ":107397,"模èĮĥ":107398,"æĦ£":107399,"è¿ĩåİ»çļĦ":107400,"代价":107401,"çļĦæ¦Ĥ念":107402,"åĩłçϾ":107403,"è´µéĺ³":107404,"æĭħå¿§":107405,"éĢĤå®ľ":107406,"çݯå¢ĥä¿ĿæĬ¤":107407,"çĥ«":107408,"ä½łæĥ³":107409,"æŃ¤åIJİ":107410,"ä½łä¹Ł":107411,"çįİ":107412,"éϤæŃ¤":107413,"éϤæŃ¤ä¹ĭå¤ĸ":107414,"è°ĥ度":107415,"ç§ij缮":107416,"æīĢ说çļĦ":107417,"åĬĩ":107418,"忽è§Ĩ":107419,"ä¸ī次":107420,"ä¸ĢæĹ¥":107421,"åŀĤ缴":107422,"ç«ŀæĬĢ":107423,"éĿ¢åĮħ":107424,"大æĪĺ":107425,"æIJºå¸¦":107426,"å¦Ĥæŀľæ²¡æľī":107427,"åħ»æĪIJ":107428,"åĩºè¡Ģ":107429,"çα好èĢħ":107430,"æīĵéĢļ":107431,"èµ·è¯ī":107432,"åijĪçݰåĩº":107433,"æŃĮæīĭ":107434,"åľ¨å¤ĸ":107435,"é¢Ĩ导干éĥ¨":107436,"åĨ¥":107437,"èĪĨ论":107438,"æıIJåıĸ":107439,"éĺ¿å°Ķ":107440,"æľĽçĿĢ":107441,"ä¸īäºļ":107442,"財":107443,"åĪ·æĸ°":107444,"æĻļæĬ¥":107445,"è¿ĺæľīä¸Ģ个":107446,"åĨ°ç®±":107447,"ç½ijçĤ¹":107448,"åĩºåħ·":107449,"强çĥĪçļĦ":107450,"æĪijçĽ¸ä¿¡":107451,"å¸ĮæľĽèĥ½":107452,"çīĻ齿":107453,"äºĭå®ľ":107454,"ä¸ļåĨħ人士":107455,"ä»£æĽ¿":107456,"åıĺå½¢":107457,"éĽ²":107458,"è°ĥæİ§":107459,"åĪĽæĸ°åĪĽä¸ļ":107460,"æĭĨè¿ģ":107461,"æł¸æŁ¥":107462,"éĢĹ":107463,"åħ¥åѦ":107464,"æĦıåIJij":107465,"æıĽ":107466,"ä¸ĭ次":107467,"ä¼łè¾ĵ":107468,"ä»ĸä»¬åľ¨":107469,"èĢĮä¸Ķè¿ĺ":107470,"æĹ¥åľ¨":107471,"æķĻè®Ń":107472,"æ´»çĿĢ":107473,"çļĦæľīæķĪ":107474,"å¤įå·¥å¤į":107475,"å¤įå·¥å¤į产":107476,"æĺ¯ä¸Ģä»¶":107477,"çŃīçĿĢ":107478,"復":107479,"åĭĩæķ¢":107480,"éģŃåıĹ":107481,"å¥Ķé©°":107482,"讲座":107483,"说å®Į":107484,"ç»Ļåĩº":107485,"è°¦":107486,"è¯ĬçĸĹ":107487,"çĽ²çĽ®":107488,"客è¿IJ":107489,"å°±è¿ŀ":107490,"å¼Ģåħĥ":107491,"å¼Ģåħĥæ£ĭçīĮ":107492,"ä¸įæĸŃæıIJåįĩ":107493,"ç͍æĪ·çļĦ":107494,"æĴķ":107495,"ä¾Ľæ°´":107496,"ç¶ĵæ¿Ł":107497,"ä¸ŃåĮ»èį¯":107498,"èģĶæĥ³":107499,"åħ¬äº¤è½¦":107500,"èĪªçıŃ":107501,"æĬĢè¡ĵ":107502,"å¼ķèµ·çļĦ":107503,"å°¹":107504,"èµĦæ·±":107505,"åĽ½èµĦå§Ķ":107506,"èĺŃ":107507,"é¼»åŃIJ":107508,"éĹ½":107509,"æİĴéĺŁ":107510,"è§Ĥåħī":107511,"éģĹåĿĢ":107512,"ä¸ľäº¬":107513,"é¥ŃåºĹ":107514,"ä¸įæĸŃçļĦ":107515,"å°±æĺ¯ä¸Ģ个":107516,"éķ¿ä¹ħ":107517,"çļĦè§ĤçĤ¹":107518,"娶":107519,"æĪijçİ°åľ¨":107520,"çķ°":107521,"å¾Ĺåĩº":107522,"å¿ħå®ļ":107523,"ä¸įåıĹ":107524,"åıªéľĢè¦ģ":107525,"åĽ°æī°":107526,"ç§ijåѦæĬĢæľ¯":107527,"çīĽèĤī":107528,"è¾ĥé«ĺçļĦ":107529,"è·ijæŃ¥":107530,"æ²¾":107531,"èı©èIJ¨":107532,"æľĢå¾Į":107533,"ä¿Ŀå¯Ĩ":107534,"æ²»å®ī":107535,"éĤ±":107536,"常è¯Ĩ":107537,"èĦ¸èī²":107538,"åĮĹ大":107539,"æ±ĩèģļ":107540,"æijĨèĦ±":107541,"é¾Ļ头ä¼ģä¸ļ":107542,"女åıĭ":107543,"çŃīå·¥ä½ľ":107544,"ä¸Ńç¾İ":107545,"èģĮåľº":107546,"èĦijè¢ĭ":107547,"åĨĻçļĦ":107548,"饲æĸĻ":107549,"åĬ³åĬ¨åĬĽ":107550,"屯":107551,"æĮģèĤ¡":107552,"åĽ¾åĥı":107553,"è¿ĩåİ»äºĨ":107554,"貨":107555,"è¾²":107556,"éĹ®æĪij":107557,"è·Łä½ł":107558,"çĶŁæŃ»":107559,"审ç¾İ":107560,"é¢Ĺç²Ĵ":107561,"ä¸Ńæĸ¹":107562,"åĬłçĥŃ":107563,"æĹħè¡Į社":107564,"çϼçĶŁ":107565,"ä¸įåłª":107566,"åĤ·":107567,"æ¥ł":107568,"åĬŀæ¡Ī":107569,"æŁĦ":107570,"æĹ¢æĺ¯":107571,"å¤ĦåĪĨ":107572,"羣å®ŀçļĦ":107573,"æĬ¥çº¸":107574,"å¸Īçζ":107575,"å®īå¾½çľģ":107576,"åī¯ä¸»å¸Ń":107577,"ä¹ĭéģĵ":107578,"导弹":107579,"åŃ¦æł¡çļĦ":107580,"åŁİå¸ĤçļĦ":107581,"è°Īåΰ":107582,"æ¢Ĺ":107583,"å¹³éĿ¢":107584,"说ä»Ģä¹Ī":107585,"é¢ijçİĩ":107586,"éķ¿ä¸īè§Ĵ":107587,"çļĦåĪ©çĽĬ":107588,"黨":107589,"è±ĨèħIJ":107590,"å®ŀéĻħæĥħåĨµ":107591,"æŀĹä¸ļ":107592,"纪æ£ĢçĽijå¯Ł":107593,"ä½ıéĻ¢":107594,"çļĦæķ´ä½ĵ":107595,"åīįè¡Į":107596,"æĮ¨":107597,"çħ¤çŁ¿":107598,"å̻è£ģ":107599,"å°ıåIJĥ":107600,"æŀģ端":107601,"å©Ĩå©Ĩ":107602,"çݰ货":107603,"è¯ĹæŃĮ":107604,"éĴ¥åĮĻ":107605,"缩çŁŃ":107606,"ä½Ĩè¿Ļ":107607,"æĸ°åĵģ":107608,"è¿Ļ对":107609,"çŁ¥åIJį度":107610,"å¿ĹæĦ¿æľįåĬ¡":107611,"大å±Ģ":107612,"è¡¡éĩı":107613,"ä½ĵçݰäºĨ":107614,"æ¡ĥèĬ±":107615,"åIJ¸å¼ķåĬĽ":107616,"åł¤":107617,"æĵħéķ¿":107618,"åĴĴ":107619,"çĽ¸æľº":107620,"ä¸Ģç«Ļ":107621,"ä¸Ģç«Ļå¼ı":107622,"æľĢç¾İ":107623,"æ°¸ä¹ħ":107624,"çļĦéĥ¨åĪĨ":107625,"åĪĨå·¥":107626,"å·¥ç¨ĭ建设":107627,"æIJŃè½½":107628,"æ°´ä¸Ń":107629,"èĮ¨":107630,"çļĦæĵįä½ľ":107631,"ç»Łæ²»":107632,"çķħéĢļ":107633,"åħļçļĦåįģ":107634,"輸":107635,"測":107636,"ç¾İè§Ĥ":107637,"ä¸įåĪ©":107638,"åıįæĢĿ":107639,"éªĦåĤ²":107640,"æłĩçļĦ":107641,"æĿĢ人":107642,"éĺ¿å§¨":107643,"é£ŁæĿIJ":107644,"åIJĥçļĦ":107645,"åIJİåĨį":107646,"çŁ£":107647,"两侧":107648,"æ¸ħæ°´":107649,"è¿ĽçIJĥ":107650,"å¼Ģå§ĭäºĨ":107651,"åIJ¬äºĨ":107652,"çĦĬæİ¥":107653,"磮":107654,"å¨Ł":107655,"为人":107656,"éĢģç»Ļ":107657,"åĨĴéĻ©":107658,"æķ·":107659,"ç»ĪæŃ¢":107660,"æīįçŁ¥éģĵ":107661,"è¿IJæ°Ķ":107662,"éĢļé£İ":107663,"æĥĬè®¶":107664,"ç§ijåѦéĻ¢":107665,"æıIJéĹ®":107666,"太åİŁ":107667,"缸åIJĮçļĦ":107668,"ä»ķ":107669,"èģĸ":107670,"æĥħæ³ģ":107671,"é¢Ĩ导人":107672,"åĩºæĿ¥äºĨ":107673,"沿线":107674,"éϽ":107675,"æĦŁè¦º":107676,"ä»įåľ¨":107677,"æ©Ļ":107678,"约为":107679,"åĸĿéħĴ":107680,"ç͍èį¯":107681,"ä¸ĭä¸Ģ":107682,"æ³ķå®ĺ":107683,"顺åºı":107684,"åģļä¸Ģ个":107685,"åĭ¢":107686,"æŃª":107687,"ç͵ç«ŀ":107688,"ä¼´éļıçĿĢ":107689,"ä¹ĭåĬĽ":107690,"ä¹ĭ人":107691,"äºij计ç®Ĺ":107692,"åĪ«äººçļĦ":107693,"ç§ijåѦåıijå±ķ":107694,"第åħ«":107695,"å¹²æī°":107696,"女ç¥ŀ":107697,"è¿Ļæł·åģļ":107698,"å¤Ħåľ¨":107699,"æ°´è´¨":107700,"éķ¿æĺ¥":107701,"å¸ĤåľºéľĢæ±Ĥ":107702,"ç»´æĿĥ":107703,"èĢ³æľµ":107704,"æĸĩåĮĸçļĦ":107705,"奶ç²ī":107706,"ä¼łè¾¾":107707,"æīĭæľºçīĪ":107708,"æĽ¾åľ¨":107709,"äºĮæľŁ":107710,"åİŁåĽłæĺ¯":107711,"æºIJ头":107712,"åıĪèĥ½":107713,"裸":107714,"æĬĢæľ¯åĪĽæĸ°":107715,"æĸĩåĮĸæĹħ游":107716,"åıij票":107717,"年级":107718,"ä½łä¸į":107719,"ä¹ĭå¿ĥ":107720,"æķ°çϾ":107721,"åIJijå¾Ģ":107722,"èĢģå®¶":107723,"åľĭéļĽ":107724,"çļĦé«ĺ度":107725,"æľĿéĺ³":107726,"æ¸ħéϤ":107727,"èĩªæľī":107728,"书ä¸Ń":107729,"游æĪıè£ħå¤ĩ":107730,"ä¸ĩå¤ļ":107731,"驾驶åijĺ":107732,"ä½łçŁ¥éģĵ":107733,"åĽ½åºĨ":107734,"é£ŁåłĤ":107735,"æİ¥åı£":107736,"æĢ»æķ°":107737,"åħ¶ä»ĸçļĦ":107738,"çĶŁåij½çļĦ":107739,"ä½łåľ¨":107740,"çļĦ缮åħī":107741,"è¿Ļæĸ¹éĿ¢":107742,"éĥ½è¯´":107743,"çĸĹæ³ķ":107744,"åĭĩ士":107745,"åľ¨åħ¨çIJĥ":107746,"ä¿ĿéĻ©åħ¬åı¸":107747,"çĿ£æŁ¥":107748,"åĸĦèī¯":107749,"表彰":107750,"è¹²":107751,"路段":107752,"æľĥåĵ¡è¦ı":107753,"æľĥåĵ¡è¦ıç¯Ħ":107754,"æĪ·åŀĭ":107755,"ä¿ĥ使":107756,"修建":107757,"é«ĺæ°´å¹³":107758,"åģļåĩºäºĨ":107759,"ä¸»åľº":107760,"è¡Įèµ°":107761,"空çϽ":107762,"æľī人说":107763,"è¿Ļ个ä¸ĸçķĮ":107764,"åIJįä¹ī":107765,"å®Įç¾İçļĦ":107766,"羡æħķ":107767,"åıĬåħ¶ä»ĸ":107768,"åı¯ç͍":107769,"æĭIJ":107770,"è¾ĥ大çļĦ":107771,"æĬĢæľ¯åĴĮ":107772,"å°¼äºļ":107773,"çĻ¾è´§":107774,"æıī":107775,"éĢīè´Ń":107776,"éĺŁåıĭ":107777,"ä¼łæĦŁ":107778,"ä¼łæĦŁåύ":107779,"åıªè¦ģä½ł":107780,"为ä»Ģä¹Īè¦ģ":107781,"ä¸ĵ注äºİ":107782,"ä½Ļé¢Ŀ":107783,"åħ¸åŀĭçļĦ":107784,"缮åīįå·²":107785,"æ¬²æľĽ":107786,"èģĶ绾":107787,"æµģä¼ł":107788,"çļĦå®¶åºŃ":107789,"åı·åı¬":107790,"çıįè´µ":107791,"ä¼Łå¤§çļĦ":107792,"éī´äºİ":107793,"è·Łä»ĸ":107794,"产çī©":107795,"ä¸įå·²":107796,"è¿Ŀæ³ķè¡Į为":107797,"头ä¸Ĭ":107798,"åĪĨè§£":107799,"åı¯ä»¥çľĭåĩº":107800,"æł¡åĮº":107801,"åŃĹä½ĵ":107802,"ä¿®çĤ¼":107803,"çĶļèĩ³æĺ¯":107804,"微信åħ¬ä¼Ĺ":107805,"åıĸ代":107806,"èIJ¥ä¸ļæĶ¶åħ¥":107807,"æ½įåĿĬ":107808,"ä½łèĥ½":107809,"社ä¼ļä¿Ŀéļľ":107810,"æ¯ĶèµĽä¸Ń":107811,"污水å¤ĦçIJĨ":107812,"夫å¦ĩ":107813,"ä¸Ģå¹ħ":107814,"沿海":107815,"åı£æĦŁ":107816,"ä½Ĩåį´":107817,"å½ĵæĹ¥":107818,"çļĦæľĢ大":107819,"æ¯ıä¸Ģä½į":107820,"没äºĭ":107821,"çī¹åĪ¥":107822,"å¼ĢåѦ":107823,"è·¯éĿ¢":107824,"å¿ĥçIJĨåѦ":107825,"æĶ¾ç½®":107826,"éĩįåºĨå¸Ĥ":107827,"ä½łèĩªå·±":107828,"æ¶Īè´¹èĢħçļĦ":107829,"ä¸Ģæ³¢":107830,"èѦæĥķ":107831,"åį§å®¤":107832,"注å°Ħ":107833,"é£İ鼨":107834,"沿çĿĢ":107835,"åijĬ訴":107836,"表çݰåĩº":107837,"åĽĽæĺ¯":107838,"åı¤åħ¸":107839,"æĽ´éĩįè¦ģçļĦ":107840,"好äºĭ":107841,"çľ¼æ³ª":107842,"æ¨ĵ":107843,"审åΤ":107844,"碰æĴŀ":107845,"车ç«Ļ":107846,"è¿Ľåħ¥äºĨ":107847,"éĽĨåIJĪ":107848,"æł¼å¤ĸ":107849,"宾é¦Ĩ":107850,"æĶ¯ä»ĺå®Ŀ":107851,"她æĺ¯":107852,"æĺ¯å¦Ĥä½ķ":107853,"人次":107854,"çļĦæĪIJåĬŁ":107855,"æĹłåĬĽ":107856,"æµ·æĭĶ":107857,"æĺ¥åŃ£":107858,"éĥ½ä¸įä¼ļ":107859,"çŃīå¤ļç§į":107860,"ä¸Ģ个å°ı":107861,"åģľè½¦åľº":107862,"è®©æĽ´å¤ļ":107863,"è¿ĻçĤ¹":107864,"æĪIJåĵģ":107865,"éĴī":107866,"éģĩè§ģ":107867,"çıŃ主任":107868,"æĦıæĦ¿":107869,"çļĦåIJĮåѦ":107870,"游è§Ī":107871,"åİĭ缩":107872,"åľ¨ä¼łå¥ĩ":107873,"å¼¹æĢ§":107874,"æĹ¥åĨħ":107875,"ç¦ı建çľģ":107876,"è§ĴèIJ½":107877,"åĪĨå¼Ģ":107878,"ä¼ļ让":107879,"å¤ĸåĽ´":107880,"çĨŁæĤīçļĦ":107881,"çĨĶ":107882,"ä¸ĩè¾Ĩ":107883,"å¤ľéĹ´":107884,"车身":107885,"ä¸ŃæľŁ":107886,"å®ĮåĸĦçļĦ":107887,"åĵģç±»":107888,"åıĭè°Ĭ":107889,"éĢīæĭĶ":107890,"éªij士":107891,"彦":107892,"çļĦçľĭæ³ķ":107893,"åĽ½çİĭ":107894,"è¾£æ¤Ĵ":107895,"åıijå¸ĥæĹ¶éĹ´":107896,"åı¤åŁİ":107897,"éļıæľº":107898,"ç«ĸ":107899,"å¼Ģè¾Ł":107900,"ä¼ĹçĶŁ":107901,"没åĬŀæ³ķ":107902,"åįĥéĩĮ":107903,"æĿ¥æºIJäºİ":107904,"çļĦæĿĥåĪ©":107905,"æ¯ĶåĪĨ":107906,"满æĦıçļĦ":107907,"ä¿®è¡Į":107908,"åĿł":107909,"大海":107910,"èݹ":107911,"åĩºèº«":107912,"è«ĩ":107913,"åħ³èĬĤ":107914,"åIJį人":107915,"éľĢè¦ģ注æĦı":107916,"æĹ©æĻ¨":107917,"å¤ĸåįĸ":107918,"åıĪè¦ģ":107919,"æ¶īæ¡Ī":107920,"çĶ³è¯·äºº":107921,"éĻĦè¿ijçļĦ":107922,"åĬłå¿«æİ¨è¿Ľ":107923,"æĸ°å¹´":107924,"大è¡Ĺ":107925,"ä¸Ģé»ŀ":107926,"èĭıå®ģ":107927,"æĤĦæĤĦ":107928,"èĦ¾æ°Ķ":107929,"å¸ĮèħĬ":107930,"éļıåį³":107931,"æķ¢äºİ":107932,"å®ŀè·µä¸Ń":107933,"æĺ¯æ²¡æľī":107934,"æľīè¶£çļĦ":107935,"æĿ¥èĩªäºİ":107936,"è£ģåΤ":107937,"女åŃ©åŃIJ":107938,"èĩ³åħ³":107939,"èĩ³åħ³éĩįè¦ģ":107940,"æĻºåĬĽ":107941,"èµ°åĩºåİ»":107942,"çŁŃæĿ¿":107943,"å¤§åĽ½":107944,"çļĦ认è¯Ĩ":107945,"å¹´å¤ľ":107946,"åĨįåΰ":107947,"åIJĮæł·çļĦ":107948,"å¯Ĩå°ģ":107949,"å¤ĸ交éĥ¨":107950,"çĶŁæķĪ":107951,"æĤ¨åı¯ä»¥":107952,"ä½łåĢij":107953,"è¿ĩå¹´":107954,"å¼ĵ":107955,"è¡ĮæĿİ":107956,"æ¯Ķèµ·":107957,"身é«ĺ":107958,"è¿Ļ个人":107959,"ä¸Ńå¤ĸ":107960,"éģĵæŃī":107961,"çĽ¯çĿĢ":107962,"亲åŃIJ":107963,"éŸ":107964,"çϽäºij":107965,"èĦĸåŃIJ":107966,"ä¸ĢåĪĩéĥ½":107967,"æ·ij":107968,"è°ľ":107969,"åģ¶çĦ¶":107970,"éĿłè°±":107971,"é«ĺ管":107972,"ä¸ĭåıij":107973,"æĶ¾åΰ":107974,"ç±»åĪ«":107975,"ä¸ĭåĪĹ":107976,"æ··ä¹±":107977,"åIJĪæ³ķæĿĥçĽĬ":107978,"çݯçIJĥ":107979,"æľīæķĪåľ°":107980,"åķĨæĪ·":107981,"æ¹ĸ人":107982,"海岸":107983,"æĬķ产":107984,"两个æľĪ":107985,"éĥ½éĿŀ常":107986,"å¢ŀ强äºĨ":107987,"æĿ¥åΰäºĨ":107988,"åī©ä½Ļ":107989,"æĤ¨çļĦåŃ©åŃIJ":107990,"æµģæ°´":107991,"æŃ£ä¹ī":107992,"天çĮ«":107993,"åģļè¿ĩ":107994,"ä½ķæĹ¶":107995,"æĪijåİ»":107996,"çľģ份":107997,"å¥ĸéĩij":107998,"该å¦Ĥä½ķ":107999,"ä¸ĭçıŃ":108000,"åģ¶åĥı":108001,"æijĨæĶ¾":108002,"æĸ°æ¨¡å¼ı":108003,"æĬķè³ĩ":108004,"è·¯åı£":108005,"åĨľæ°ijå·¥":108006,"大åѸ":108007,"ä»¶äºĭ":108008,"æł¹æľ¬ä¸į":108009,"æµĵ度":108010,"æµĵåİļ":108011,"è½®èĥİ":108012,"æĪ¿ä¼ģ":108013,"éĿŀ常好":108014,"ä»İä¸Ń":108015,"äººæł¼":108016,"ç¿ģ":108017,"æĹ¶éĹ´åĴĮ":108018,"è¿Ļä¸įæĺ¯":108019,"åΏåķĨ":108020,"æĥĬ人":108021,"åύå®ĺ":108022,"åĩĨåĪĻ":108023,"æĥħæĻ¯":108024,"æĽ´é«ĺçļĦ":108025,"åѦ家":108026,"泡沫":108027,"åľ°æĸ¹æĶ¿åºľ":108028,"å°±çŁ¥éģĵ":108029,"åij¼åIJģ":108030,"ç»ıè´¸":108031,"èĬ±éĴ±":108032,"æľīä¸Ģ次":108033,"æĦŁæħ¨":108034,"ä¸Ģåįĥ":108035,"å¤ľæĻļ":108036,"詹å§Ĩ":108037,"詹å§Ĩæĸ¯":108038,"è¦ģéĹ»":108039,"ç»Ĵ":108040,"æºIJäºİ":108041,"çļĦè´¨éĩı":108042,"注æĦıäºĭ项":108043,"æħ¢æĢ§":108044,"稳å®ļçļĦ":108045,"建设åĴĮ":108046,"æĻ¯è±¡":108047,"éĩıåĮĸ":108048,"çļĦ話":108049,"è¯Ħ级":108050,"æºľ":108051,"红åĮħ":108052,"éĢļéģİ":108053,"社ä¼ļ责任":108054,"æĸ°äº§åĵģ":108055,"åĨ·éĿĻ":108056,"çľĭä¸įåΰ":108057,"èģĶéĤ¦":108058,"éŃĦ":108059,"çļĦåīįæıIJ":108060,"çļĦåīįæıIJä¸ĭ":108061,"è¾ĥ好":108062,"çļĦæĦŁæĥħ":108063,"客æĪ·æıIJä¾Ľ":108064,"çĭ¬èĩª":108065,"å¢ŀæĶ¶":108066,"æĸĩçĮ®":108067,"æĭ¼åij½":108068,"管çIJĨåĴĮ":108069,"æµģåĬ¨æĢ§":108070,"åħ¨å®¶":108071,"ä¸Ĭæĸ¹":108072,"æİ¨åĩºçļĦ":108073,"ä¸īåĽ½":108074,"ä¸Ģ个æĺ¯":108075,"æĸ°ä¸Ģè½®":108076,"æĸĩåĮĸéģĹ产":108077,"殺":108078,"大湾åĮº":108079,"éĥ½éľĢè¦ģ":108080,"çļĦå®ŀéĻħ":108081,"ç·Ĭ":108082,"大å¥ĸ":108083,"åħīèĬĴ":108084,"便äºİ":108085,"çļĦ表æĥħ":108086,"æ¼Ķç»İ":108087,"红åĨĽ":108088,"å½ĵæĪij":108089,"æ²»æĦĪ":108090,"é¢Ŀ度":108091,"éĿľ":108092,"ä»»ä½ķ人":108093,"è¡Ĺ头":108094,"çĸ¯":108095,"çĸ¯æĭī":108096,"åĮ»çĸĹæľºæŀĦ":108097,"ç»ĻåŃ©åŃIJ":108098,"è§Ħ磩":108099,"è£ľ":108100,"çļĦ身影":108101,"ä¸ĵæłı":108102,"æĿ¥ä¸´":108103,"童年":108104,"å¤įèĭı":108105,"è¨Ĥ":108106,"åŀĭåı·":108107,"åĽ¾æ¡Ī":108108,"ç®ĢåİĨ":108109,"æĭ±":108110,"èį·åħ°":108111,"ä»»æĦı":108112,"æī¿æİ¥":108113,"è¿Ļæīį":108114,"客车":108115,"æľĿçĿĢ":108116,"éłħ缮":108117,"åı°é£İ":108118,"çļĦæĪ¿åŃIJ":108119,"éªı":108120,"æĿ±è¥¿":108121,"éģĹä¼ł":108122,"è¶Ĭå¤ļ":108123,"äºĨä»ĸçļĦ":108124,"ä¸Ĭåij¨":108125,"管çIJĨåĪ¶åº¦":108126,"失ä¸ļ":108127,"çĶ·åıĭ":108128,"æİ¥ç§į":108129,"å¨ģåIJį":108130,"çĴ°å¢ĥ":108131,"åıijçĶŁåľ¨":108132,"ä¸ªåĽ½å®¶":108133,"åĪĽæĸ°åıijå±ķ":108134,"æĶ¹åıĺäºĨ":108135,"åģ¥åº·çļĦ":108136,"å̼å¾Ĺä¸Ģ":108137,"å̼å¾Ĺä¸ĢæıIJ":108138,"åĽ¢ä¼Ļ":108139,"åģĩ设":108140,"åı°ä¸Ĭ":108141,"è§ĦèĮĥåĮĸ":108142,"éĻªåIJĮ":108143,"座æ¤ħ":108144,"åı¯æĢľ":108145,"åħĭæĢĿ主ä¹ī":108146,"æ³ķå¾ĭ责任":108147,"ä¸Ģé¡¿":108148,"æĬ¬å¤´":108149,"为éĩįçĤ¹":108150,"è¿ľæ´ĭ":108151,"éĢıè¿ĩ":108152,"åħ¨çIJĥåĮĸ":108153,"è¶£åij³":108154,"票æĪ¿":108155,"æ¯ı人":108156,"åIJĦç§įåIJĦæł·":108157,"äºĨåĩºæĿ¥":108158,"ç»Ŀ对æĺ¯":108159,"ä¸ĭå±ŀ":108160,"ä¸ĢåıĮ":108161,"è¿ĻåĿĹ":108162,"æĬĹçĸ«":108163,"è¦ģçĤ¹":108164,"å½¢æĪIJçļĦ":108165,"æĪijçľĭ":108166,"ä¸ĩéĩĮ":108167,"èĢĥçłĶ":108168,"为åħ¶":108169,"æ°ij宿":108170,"å¤ļä½į":108171,"大èĩ´":108172,"ä»ĺè´¹":108173,"åħ¥æīĭ":108174,"å±ħå®¶":108175,"æīĢåľ¨åľ°":108176,"人身":108177,"è¿ĩå¾Ĺ":108178,"è¯ķè¯ķ":108179,"访è°Ī":108180,"åĬłéĩį":108181,"å°±ä¸įä¼ļ":108182,"çĶŁäº§ä¼ģä¸ļ":108183,"åĽŀåĽ½":108184,"åºķ线":108185,"èµ¶åΰ":108186,"æĶ¯éĺŁ":108187,"æĪij们éĥ½":108188,"éĤ®æĶ¿":108189,"缴èĩ³":108190,"éĴ¢çIJ´":108191,"åħľ":108192,"çłĶ讨ä¼ļ":108193,"æľĪ亮":108194,"åĿļæĮģ以":108195,"åħ¬å®īéĥ¨":108196,"éĴ¢ç®¡":108197,"å°ıçϽ":108198,"ç½®ä¸ļ":108199,"èģĭ":108200,"书åĨĻ":108201,"æĿı":108202,"éħįæĸ¹":108203,"èĢĮåıĪ":108204,"çijŀ士":108205,"çķĮçļĦ":108206,"èĢģ大":108207,"æĪIJçĨŁçļĦ":108208,"å¹²ä»Ģä¹Ī":108209,"ä¸ĵ项æĸĹäºī":108210,"çŃīå¤ļ个":108211,"èĦ±ç¦»":108212,"ä¸ī个æľĪ":108213,"çłĶç©¶åijĺ":108214,"æĹĭ转":108215,"æŀģèĩ´":108216,"åħįè´£":108217,"åħį责声æĺİ":108218,"å¾Īå¤ļçݩ家":108219,"车ä¸Ĭ":108220,"交äºĴ":108221,"å·²æĺ¯":108222,"ä¸Ģå°ı":108223,"çļĦéĩįçĤ¹":108224,"èĬ±äºĨ":108225,"ä¸įæĺİ":108226,"æľīåħ³è§Ħå®ļ":108227,"çĬ¹å¦Ĥ":108228,"羸":108229,"寡":108230,"çļĦè¡£æľį":108231,"åĮħ裹":108232,"身åŃIJ":108233,"å¸ĪèĮĥ大åѦ":108234,"äºĭåħĪ":108235,"线æĿ¡":108236,"æ³ķåζ":108237,"åħ»æĬ¤":108238,"稳å®ļæĢ§":108239,"éĤµ":108240,"åŀĦæĸŃ":108241,"é¡į":108242,"èĢĥåı¤":108243,"æĿłæĿĨ":108244,"èĭıèģĶ":108245,"æ°´ç͵":108246,"åħ·ä½ĵçļĦ":108247,"æ¿Ģæ´»":108248,"æĪijæł¡":108249,"åĪļå¼Ģå§ĭ":108250,"åĩ¸æĺ¾":108251,"禾":108252,"åħ¼èģĮ":108253,"éĢıéģİ":108254,"åľ¨æ¸¸æĪıä¸Ń":108255,"社ä¼ļåıijå±ķ":108256,"好çİ©":108257,"å¹»æĥ³":108258,"ä¸į代表":108259,"注æĦıåĬĽ":108260,"æ£į":108261,"ç͍æīĭ":108262,"ç¾İ人":108263,"许å¤ļ人":108264,"å¾Īæĺ¯":108265,"çļĦçłĶåıij":108266,"æīĵåĩº":108267,"åIJĪä¼Ļ人":108268,"ä¸Ģå¤ľ":108269,"ç¼ĵç¼ĵ":108270,"ä¿®æŃ£":108271,"æĦŁçŁ¥":108272,"ç»Ī身":108273,"æ¿Ģç´ł":108274,"çݯå¢ĥä¸ĭ":108275,"次ä¼ļè®®":108276,"ç»ıæµİå¢ŀéķ¿":108277,"æīĽ":108278,"åıijéħµ":108279,"åĪĨæŀIJå¸Ī":108280,"åľ¨æľªæĿ¥":108281,"主è¦ģæľī":108282,"ä¸ĢåŃ£åº¦":108283,"çļĦ说æ³ķ":108284,"ä»İæĿ¥æ²¡æľī":108285,"货车":108286,"缩å°ı":108287,"太è¿ĩ":108288,"æķĪåĬĽ":108289,"ä¸įä¸ĭ":108290,"æĬķ稿":108291,"èį¯ä¸ļ":108292,"ç»Ħéķ¿":108293,"ç«ĻçĤ¹":108294,"å¾Īåĸľæ¬¢":108295,"éIJµ":108296,"åĬ¿å¤´":108297,"æ¼ıæ´ŀ":108298,"æĦ¤æĢĴ":108299,"åħħå®ŀ":108300,"åĪĽä¸ļæĿ¿":108301,"çĪª":108302,"æľªå¿ħ":108303,"åºķéĥ¨":108304,"å¾ĹåĪĨ":108305,"人æ°ijåĮ»éĻ¢":108306,"äºĮæīĭæĪ¿":108307,"å·²ç»ı被":108308,"大楼":108309,"æĸ°æĪ¿":108310,"辦æ³ķ":108311,"ç͍åĬĽ":108312,"æĭĵ宽":108313,"åĨħåľ¨":108314,"æĴŃåĩº":108315,"饰æ¼Ķ":108316,"ä¹Łè®©":108317,"ä½ľçĤº":108318,"çī©ä¸ļ管çIJĨ":108319,"åį´ä¸į":108320,"为ä¸ŃåĽ½":108321,"å±ĢåĬ¿":108322,"ä¸įèĤ¯":108323,"æľĢæĸ°çļĦ":108324,"åı¯ä»¥éĢīæĭ©":108325,"æĺ¾çݰ":108326,"å°±ç®Ĺæĺ¯":108327,"åľ¨æł¡":108328,"é¾Ł":108329,"两æĿ¡":108330,"çļĦå®ŀåĬĽ":108331,"è¶Ĭ好":108332,"å¥¹åľ¨":108333,"å¿łè¯ļ":108334,"ä¹ŁéľĢè¦ģ":108335,"游æĪıæĵįä½ľ":108336,"è¶ħåĩº":108337,"å¦Ĥæŀľä¸į":108338,"æīĢåľ¨çļĦ":108339,"ä½łè¿ĺ":108340,"以åĨħ":108341,"æľīä¸Ģå®ļ":108342,"åı¯è¾¾":108343,"è·ijåΰ":108344,"åīĽ":108345,"建ç«ĭåģ¥åħ¨":108346,"æķ´è½¦":108347,"åīįæĸ¹":108348,"éĹ´æİ¥":108349,"çѹå¤ĩ":108350,"çĸ²åĬ³":108351,"离å¼ĢäºĨ":108352,"æ±Ŀ":108353,"éĿ¢éĥ¨":108354,"ä¹ĭåīįçļĦ":108355,"åıĺ为":108356,"å¦Ĥæŀľè¯´":108357,"对ä»ĺ":108358,"åĿĩåı¯":108359,"被åijĬ人":108360,"ç²¾ç¾İ":108361,"èģļä¼ļ":108362,"çĿ̥̿":108363,"è°·æŃĮ":108364,"ä¸Ģåı·":108365,"红åĪ©":108366,"ä¼łå¥ĩ游æĪı":108367,"å»ĸ":108368,"è´ŀ":108369,"ä¹°åΰ":108370,"éŃļ":108371,"ä½ĵè´¨":108372,"å°ijäºĨ":108373,"æ³īå·ŀ":108374,"åIJŁ":108375,"ç»Ŀä¸į":108376,"é»ijæģ¶":108377,"é»ijæģ¶åĬ¿åĬĽ":108378,"ä¸Ĭæĺł":108379,"çļĦè¯Ŀé¢ĺ":108380,"ä¸ĩ人次":108381,"ä¸ĸéĹ´":108382,"ç͍工":108383,"贯穿":108384,"å®ĿçŁ³":108385,"ä½łå¥½":108386,"åĪĩåī²":108387,"å¼ºåĽ½":108388,"åĽŀèIJ½":108389,"æ°´æĻ¶":108390,"模仿":108391,"洪水":108392,"éĢĻ麼":108393,"åįģä¸īäºĶ":108394,"ä½ij":108395,"éĻĦä»¶":108396,"çļĦå¢ŀéķ¿":108397,"éĻĦå±ŀ":108398,"çݰ已":108399,"å¸®ä½ł":108400,"éĩijçīĮ":108401,"é«ĺåİŁ":108402,"åľ¨å®¶éĩĮ":108403,"éĺ²èħIJ":108404,"ç¡®å®ŀæĺ¯":108405,"宣讲":108406,"天æīį":108407,"ç»ıèIJ¥ç®¡çIJĨ":108408,"éĶħçĤī":108409,"åIJĪä¸Ģ":108410,"è§Ĥèµı":108411,"éķ¿è¾¾":108412,"主ä¹īæĢĿæĥ³":108413,"éĤ£éº¼":108414,"é£İäºij":108415,"为主çļĦ":108416,"æļijåģĩ":108417,"æĮģä¹ħ":108418,"å¼Ĥåľ°":108419,"å¼ĢéŨ":108420,"模æĿ¿":108421,"æī¹æ¬¡":108422,"ä¸į便":108423,"天çĶŁ":108424,"åĩłä¸ªæľĪ":108425,"ä¸ĵç§ij":108426,"åı¦æľī":108427,"åħ¬å¸ĥçļĦ":108428,"æĩ·":108429,"åľºåIJĪ":108430,"çļĦå¿ĥæĢģ":108431,"è¿ĺ好":108432,"å®ŀæĪĺ":108433,"èĢģå¸ĪçļĦ":108434,"åħ©åĢĭ":108435,"åı¯åľ¨":108436,"éĤ£ä½į":108437,"å¥łå®ļäºĨ":108438,"ä¿ĥéĶĢ":108439,"æı´åĬ©":108440,"ä¸ĩçī©":108441,"æĥħæĬ¥":108442,"é¦ĸåħĪè¦ģ":108443,"æĸĩåĮĸåĴĮ":108444,"éĥ½å·²ç»ı":108445,"ä¸Ĭä¸ĸ纪":108446,"åĨľåľº":108447,"大æī¹":108448,"æĺİçϽäºĨ":108449,"çļĦæĪIJéķ¿":108450,"çļĦæ¯ĶèµĽ":108451,"失误":108452,"åģļæĪIJ":108453,"ä»Ĭ天å°ıç¼ĸ":108454,"é¢Ĩè¢ĸ":108455,"æıIJåįĩäºĨ":108456,"å¾IJå·ŀ":108457,"ä»įæľī":108458,"è¿ĩ滤":108459,"å¹½é»ĺ":108460,"çĥŃéĩı":108461,"ä¸Ģé¦ĸ":108462,"æ¼Ĥ亮çļĦ":108463,"åĩłç§į":108464,"åĢ¡è®®":108465,"å°±åı¯ä»¥äºĨ":108466,"æİĴåĪĹ":108467,"éĩįéĩį":108468,"ä¼ģä¸ļåĴĮ":108469,"ä¸ĵå±ŀ":108470,"çħİ":108471,"亲æĪļ":108472,"çϾåĪĨä¹ĭ":108473,"稿件":108474,"è¿ĺå¾Ĺ":108475,"人åĵ¡":108476,"äºī夺":108477,"æĽ´å®¹æĺĵ":108478,"大èĩªçĦ¶":108479,"鼻èħ¦":108480,"太空":108481,"åľ°å¤Ħ":108482,"夢":108483,"ä»ĸ对":108484,"å¿ħå°Ĩ":108485,"ä¸įå½ĵ":108486,"严谨":108487,"åĩºåľº":108488,"å·²ç»ıæľī":108489,"é¢ĨåĨĽ":108490,"é«ĺæ¡£":108491,"ä¸ĢæīĢ":108492,"æłĹ":108493,"让åѦçĶŁ":108494,"æĽ¹æĵį":108495,"æŁIJä¸Ģ":108496,"伸åĩº":108497,"èĬ±åįī":108498,"æ¸ħéĨĴ":108499,"èģĶç³»æĸ¹å¼ı":108500,"åĪĨå±Ģ":108501,"èħ³":108502,"æ©¡èĥ¶":108503,"éķ¿å¾Ĺ":108504,"ç»¿åľ°":108505,"è¢į":108506,"çļĦèīºæľ¯":108507,"女æľĭåıĭ":108508,"ä¸Ńè¶ħ":108509,"离åŃIJ":108510,"å¤ļæł·åĮĸ":108511,"éĺ³åı°":108512,"ä½İ碳":108513,"ä¸Ģç±»":108514,"çŃīæĸ¹éĿ¢çļĦ":108515,"å¾Ĺ好":108516,"模åħ·":108517,"ä¸ĩ亿":108518,"çķĻæĦı":108519,"临æ²Ĥ":108520,"å°ijéĩı":108521,"çľĭåIJij":108522,"ç»ıèIJ¥èĢħ":108523,"çķĻä¸ĭäºĨ":108524,"åĿıäºĨ":108525,"åijĬåĪ«":108526,"羣çIJĨ":108527,"ç¼´è´¹":108528,"æĬĬä½ł":108529,"çļĦä»»åĬ¡":108530,"æĪij对":108531,"ä¹°åħ¥":108532,"çĻ»ä¸Ĭ":108533,"æľī两个":108534,"ä¸Ģ头":108535,"æĵįæİ§":108536,"åħ¨è¦ĨçĽĸ":108537,"çĿĢæīĭ":108538,"å¢ĻéĿ¢":108539,"å¤ļæĸ¹":108540,"åı¯çαçļĦ":108541,"ä¹Łåı¯èĥ½":108542,"æľĢæľī":108543,"è¿ĻäºĽéĥ½æĺ¯":108544,"æĥ¡":108545,"å®®":108546,"å¾Īå°ı":108547,"éĹ®é¢ĺæĺ¯":108548,"åĿĩæľī":108549,"å¾ģéĽĨ":108550,"说åĩº":108551,"æľīæĦı":108552,"é¢Ĥ":108553,"æī¬å·ŀ":108554,"åķĨä¸ļ模å¼ı":108555,"çĶŁèĤĸ":108556,"æįIJ款":108557,"å²Ĥ":108558,"ç¾İæĻ¯":108559,"è¿ĺ羣":108560,"æĭ¥æĬ±":108561,"身ä½ĵåģ¥åº·":108562,"æ·±å¤Ħ":108563,"çľ¼ç¥ŀ":108564,"çļĦ形象":108565,"ä¼ĺè¶Ĭ":108566,"å½ĵæĪIJ":108567,"åĮºåĪĨ":108568,"åİ»éϤ":108569,"注å®ļ":108570,"å§IJ妹":108571,"åĮºåĨħ":108572,"é©ļ":108573,"æļĹ示":108574,"æĺİ亮":108575,"æħ°éĹ®":108576,"å¸Ĥåľºä»½é¢Ŀ":108577,"çĮªèĤī":108578,"çļĦèµĦéĩij":108579,"åİĨç»ı":108580,"å§ĭç»ĪåĿļæĮģ":108581,"çĶŁæľº":108582,"ä¸į顾":108583,"éĩijåĪļ":108584,"大声":108585,"éĻķ西çľģ":108586,"é²į":108587,"åĨľä¸ļåĨľæĿij":108588,"æľī害":108589,"éŨè¯Ĭ":108590,"æ¯ıä¸Ģ次":108591,"çļĦåĽłç´ł":108592,"é¢Ŀå¤ĸ":108593,"åݿ级":108594,"çļĩåIJİ":108595,"åĽ½ä¼ģ":108596,"é¦ĸéĢī":108597,"ç¼ĸåĨĻ":108598,"æĭ¿èµ·":108599,"åģ·åģ·":108600,"ä¸İä¸ŃåĽ½":108601,"åįĸå®¶":108602,"ç»Ļä»ĸ们":108603,"ç¥ŀè¯Ŀ":108604,"åŃ¸æł¡":108605,"æĪijä¸Ģ缴":108606,"çŁ¥éģĵäºĨ":108607,"åįĴ":108608,"åĴĮåľ°åĮº":108609,"ä»Ģä¹Īéĥ½":108610,"çͻ家":108611,"æľ¬çĿĢ":108612,"ä½ĻåIJį":108613,"审çIJĨ":108614,"ä¸ĢåIJij":108615,"åıijå±ķè¶ĭåĬ¿":108616,"åĮºéĹ´":108617,"注åĨĮèµĦæľ¬":108618,"çIJ¦":108619,"ä¸įåı¯ä»¥":108620,"çļĦåĦ¿åŃIJ":108621,"å̼çıŃ":108622,"ä¸¥æł¼çļĦ":108623,"å®ŀä½ĵç»ıæµİ":108624,"æľīæĿĥ":108625,"æĪijåıĪ":108626,"éĵ¶æ²³":108627,"ç«ĭ马":108628,"æĿĢäºĨ":108629,"åĮħ容":108630,"管家":108631,"身é«Ķ":108632,"éĵħ":108633,"å°ıåŃIJ":108634,"管çIJĨç³»ç»Ł":108635,"æľīçļĦ人":108636,"é£İç͵":108637,"æĻºèĥ½åζéĢł":108638,"精确":108639,"æĭĽåķĨå¼ķ":108640,"æĭĽåķĨå¼ķèµĦ":108641,"äºĮæīĭ车":108642,"åİ¿å§Ķ":108643,"èīºäºº":108644,"å¥ķ":108645,"è¿İæĿ¥äºĨ":108646,"ç»ĵæĿŁäºĨ":108647,"çļĦä¼łç»Ł":108648,"æĭ¼æIJı":108649,"奥迪":108650,"çĸijæĥij":108651,"ä¹ĭæĹ¥èµ·":108652,"æłĩå¿ĹçĿĢ":108653,"åľ°åįĢ":108654,"è¯łéĩĬ":108655,"åĪ°æľŁ":108656,"åħ¨éĥ½":108657,"çŁŃæļĤ":108658,"æĺ¯æĪijåĽ½":108659,"æĪijå·²ç»ı":108660,"æ»´æ»´":108661,"天èµĭ":108662,"对她":108663,"åį«çĶŁéĹ´":108664,"çĶŁäº§åŁºåľ°":108665,"æĹ¥è®°":108666,"çļĦæķĻåѦ":108667,"åĵĩ":108668,"æ°ijäºĭ":108669,"è¿ĺåİŁ":108670,"æīĭä¸ŃçļĦ":108671,"çļĦèī¯å¥½":108672,"æ·«":108673,"ä¸Ńåħ±ä¸Ń央":108674,"åĪĥ":108675,"åĵĦ":108676,"åľ¨ä»ĸçļĦ":108677,"å°Īæ¥Ń":108678,"åľºéĿ¢":108679,"éĤ»å±ħ":108680,"çĹĴ":108681,"å¦Ħ":108682,"å¤ĸç§ij":108683,"ä¸įéĢĤ":108684,"举åĬŀçļĦ":108685,"éĤ¹":108686,"åħļçļĦ建设":108687,"çĻ¼è¡¨":108688,"è·¨çķĮ":108689,"æ²īæ·Ģ":108690,"大çīĩ":108691,"è¶Ĭé«ĺ":108692,"å°Ĩæĺ¯":108693,"è§īéĨĴ":108694,"åĤ¨åŃĺ":108695,"å¢ŀ大":108696,"ä¸į让":108697,"æķ´å½¢":108698,"å¹³åı°ä¸Ĭ":108699,"åĩłä½į":108700,"è¯īæ±Ĥ":108701,"好ä¸į好":108702,"åľį":108703,"æĸĩæľ¬":108704,"é̲åħ¥":108705,"ç´į":108706,"æł¹æĵļ":108707,"èįīæ¡Ī":108708,"åħŃ个":108709,"åĭ¿":108710,"åζæĪIJ":108711,"饮水":108712,"æ°¸æģĴ":108713,"èĩªæĿĢ":108714,"åı¸é©¬":108715,"éļ¾çĤ¹":108716,"为æĪij们":108717,"å¼§":108718,"åī©ä¸ĭçļĦ":108719,"åĩĨå¤ĩ好":108720,"çļĦæľĢä½³":108721,"èģĶåIJĪä¼ļ":108722,"æĤ£èĢħçļĦ":108723,"æĪijä¸įçŁ¥éģĵ":108724,"ä¸ĭä¸Ģ个":108725,"åıijå±ķæĸ¹åIJij":108726,"笨":108727,"æīĢ以æĪij们":108728,"åĨĻäºĨ":108729,"éĢłæĪIJäºĨ":108730,"æ²Ļæ¼ł":108731,"çŃĽéĢī":108732,"çģ¾åĮº":108733,"ä¸Ĭçľĭ":108734,"éħ¶":108735,"æ»ļåĬ¨":108736,"éļ¾åħį":108737,"åIJīåĪ©":108738,"ä¸Ģä¸Ģ":108739,"ç²¾å¯Ĩ":108740,"伸æīĭ":108741,"礼仪":108742,"åħ¨æĺ¯":108743,"è¶Ĭ大":108744,"ä¸Ńæłĩ":108745,"åıĸåĨ³":108746,"åıĸåĨ³äºİ":108747,"éĢĶä¸Ń":108748,"讨åİĮ":108749,"æīĭåĨĮ":108750,"第ä¹Ŀ":108751,"åŃĶåŃIJ":108752,"çĦ¶å¾Į":108753,"ä¸Ģåħ±":108754,"æµ·æĬ¥":108755,"款å¼ı":108756,"æķ´å¤©":108757,"è¾¹çķĮ":108758,"路边":108759,"æĻĭ级":108760,"åIJIJæ§½":108761,"çļĦåħ³æ³¨":108762,"æĪij没æľī":108763,"å°±æĺ¯åľ¨":108764,"缮çļĦæĺ¯":108765,"åį³ä½¿æĺ¯":108766,"é¡¶å°ĸ":108767,"å·²ç»ıåľ¨":108768,"å®īåħ¨éļIJæĤ£":108769,"æłĩæĿĨ":108770,"åįĹéĢļ":108771,"ä¼ļ对":108772,"座ä½į":108773,"èµ¢å¾ĹäºĨ":108774,"åİŁæĿ¥çļĦ":108775,"身为":108776,"书åºĹ":108777,"è¢Ńåĩ»":108778,"ä»ĬæĻļ":108779,"以èī²":108780,"以èī²åĪĹ":108781,"æĬĸéŁ³":108782,"åį´æ²¡æľī":108783,"丧失":108784,"çļĦå±ĢéĿ¢":108785,"åįģåĽĽäºĶ":108786,"çŃī缸åħ³":108787,"æ±ĩæĢ»":108788,"å¤ĸ表":108789,"为æ°ij":108790,"éľĩæĥĬ":108791,"å¥Ĺè·¯":108792,"çĬ¯ç½ªå«Įçĸij":108793,"å°Ĩ以":108794,"çİĩé¢Ĩ":108795,"éħĴåIJ§":108796,"è¡Įä¸ļåıijå±ķ":108797,"å¹´èĩ³":108798,"åύæĿIJ":108799,"åĴĮæĬĢæľ¯":108800,"æľĢå°ı":108801,"è¿Ļä¸ĢåĪĩ":108802,"èģĮç§°":108803,"å½ĵä½ľ":108804,"æİĢèµ·":108805,"åĴĭ":108806,"ä¸Ńéĥ¨":108807,"æīĭèĩĤ":108808,"ç½¢äºĨ":108809,"媳å¦ĩ":108810,"æ´½è°Ī":108811,"æĹ¶ä»£ä¸ŃåĽ½":108812,"人çĶŁçļĦ":108813,"æŀģéĻIJ":108814,"ç¦Ħ":108815,"åĮºæĶ¿åºľ":108816,"æľ¬éĴ±":108817,"礼åĵģ":108818,"çļĦéĤ£ä¸ª":108819,"ä¾¦æŁ¥":108820,"太å¤ļçļĦ":108821,"å®ŀæĸ½æĸ¹æ¡Ī":108822,"é«ĺæłĩåĩĨ":108823,"æĮĩæĮ¥éĥ¨":108824,"å̾æĸľ":108825,"çī¹èī²ç¤¾ä¼ļ":108826,"çµIJæŀľ":108827,"éĴ»çٳ":108828,"ç§»æ¤į":108829,"çī¹ç§į":108830,"èĩªæĦ¿":108831,"æĭľçĻ»":108832,"åįķ身":108833,"åį´åıĪ":108834,"åĪ¥äºº":108835,"åIJĪè§Ħ":108836,"æľºç͵":108837,"çĦı":108838,"å½ĵåīįä½įç½®":108839,"ä¹°å®¶":108840,"åIJĪ约":108841,"èĤ©èĨĢ":108842,"为åĩĨ":108843,"å®¶è£ħ":108844,"çļĦçĥŃæĥħ":108845,"éĿŀéģĹ":108846,"çļĦéŃħåĬĽ":108847,"åİŁåijĬ":108848,"社ä¼ļåIJĦçķĮ":108849,"ä¹°çļĦ":108850,"å¤ļåIJĥ":108851,"éĽķå¡ij":108852,"èµ·ä¹ī":108853,"åĬłåī§":108854,"éĤ£ä¸ĢåĪ»":108855,"å°Ĩè¿Ľä¸ĢæŃ¥":108856,"æ¡ĤæŀĹ":108857,"æĽ´å¼º":108858,"对ä¼ģä¸ļ":108859,"æĹłæĦı":108860,"ä¹łè¿ijå¹³æĸ°":108861,"æµģ失":108862,"微软":108863,"çĽ¸å¯¹äºİ":108864,"座è°Īä¼ļ":108865,"主èIJ¥ä¸ļ":108866,"主èIJ¥ä¸ļåĬ¡":108867,"ç§ģåĭŁ":108868,"å±ķ示äºĨ":108869,"常æĢģåĮĸ":108870,"è²´":108871,"符åı·":108872,"å¹´è½»çļĦ":108873,"å°±éľĢè¦ģ":108874,"ä¹ŁæĽ¾":108875,"çļĦæĥħ绪":108876,"è¾¾æłĩ":108877,"èĩ¨":108878,"ä½įå±ħ":108879,"ä»ħ为":108880,"é¦ĸå®¶":108881,"éĺ´éĺ³":108882,"ä¸įåĨįæĺ¯":108883,"åĽłä¸ºå®ĥ":108884,"ä¼ģä¸ļåľ¨":108885,"çĺ¾":108886,"åIJ¬è§ģ":108887,"åİŁæľī":108888,"åζè£ģ":108889,"å¯Ĥå¯ŀ":108890,"éĢļè¿ĩ对":108891,"æ»ijéĽª":108892,"è¿Ļå¼ł":108893,"çļĦçIJĨè§£":108894,"æĸ°ä¸ŃåĽ½":108895,"è¿ĻåĦ¿":108896,"ä½İä»·":108897,"æĥ³è¿ĩ":108898,"çļĦä¿¡å¿ĥ":108899,"建çŃijçī©":108900,"çļĦé¢ľèī²":108901,"ä¸įåºĶ该":108902,"æĹłçĸijæĺ¯":108903,"å¼ķèµ·äºĨ":108904,"åħ¨åijĺ":108905,"æĿ°åĩº":108906,"è¿Ļæĺ¯æĪij":108907,"誰":108908,"èĺĩ":108909,"éĺµåľ°":108910,"åħħå̼":108911,"çŁ¿ä¸ļ":108912,"çĿĢä»ĸ":108913,"信访":108914,"ä¸ĩè¾¾":108915,"æij©æĵ¦":108916,"å¼Ģ端":108917,"èı²å¾ĭ":108918,"èı²å¾ĭ宾":108919,"车åŃIJ":108920,"æľ¬èº«çļĦ":108921,"çģ«è½¦ç«Ļ":108922,"常å·ŀ":108923,"为代表":108924,"为代表çļĦ":108925,"广ç͵":108926,"亲人":108927,"åı³æīĭ":108928,"éĽĨè£ħ":108929,"éĽĨè£ħç®±":108930,"çļĦåį°è±¡":108931,"æ©Łæľĥ":108932,"åĮĨåĮĨ":108933,"åħīç͵":108934,"大æĸ¹":108935,"è¿ĺæľª":108936,"åΩ好":108937,"ç»Ŀ大å¤ļæķ°":108938,"åľ¨è¿Ļç§į":108939,"ä¸Ģç»Ħ":108940,"æĸ°èĤ¡":108941,"转åıij":108942,"æ³ķåºŃ":108943,"æĹłæīĢ":108944,"éģĵè·¯ä¸Ĭ":108945,"çŁ¿å±±":108946,"èijī":108947,"æĶ¶åĽŀ":108948,"ç§°ä¹ĭ":108949,"ç§°ä¹ĭ为":108950,"æıŃéľ²":108951,"åı£å²¸":108952,"åIJ¼":108953,"å¿ĥæĥ³":108954,"çļĦ梦æĥ³":108955,"éĽ¯":108956,"ä¹ĭåĪĿ":108957,"å¥ĸ项":108958,"订éĺħ":108959,"èĵĿ天":108960,"åĿ¦åħĭ":108961,"ç«ĭæ¡Ī":108962,"èģĶæīĭ":108963,"ä½Ĩæĺ¯æĪij":108964,"帮æĪij":108965,"ä»ħ代表":108966,"说æĪij":108967,"çļĦè¶ĭåĬ¿":108968,"æ¯Ķè¾ĥ大":108969,"èµ°å»Ĭ":108970,"éĩįçĤ¹é¡¹çĽ®":108971,"èµĮåľº":108972,"åIJįçīĩ":108973,"æĦŁåı¹":108974,"åľ¨åľ°ä¸Ĭ":108975,"åıijçĥŃ":108976,"èĮĥçķ´":108977,"çļĦéģĵè·¯":108978,"éĩijèī²":108979,"ä»ĸåıĪ":108980,"ä¼ļ产çĶŁ":108981,"æ°ijåĽ½":108982,"å®ĺæĸ¹ç½ijç«Ļ":108983,"æĶ¶çĽĬçİĩ":108984,"çļĦåΰæĿ¥":108985,"çļĦåĬŀæ³ķ":108986,"æĶ¹åζ":108987,"ä¸ĩç§ij":108988,"ä¸įäºĪ":108989,"è¿ĻäºĽéĹ®é¢ĺ":108990,"çαä¸Ĭ":108991,"çIJĥåľº":108992,"责令":108993,"æİĪ课":108994,"åľ¨é¦Ļ港":108995,"ç»Ĩèħ»":108996,"å¤ļä¸ĩ":108997,"åIJĮå¹´":108998,"大使":108999,"æĸĭ":109000,"ä¹Łä¸º":109001,"æĥłå·ŀ":109002,"åIJī祥":109003,"çͰåĽŃ":109004,"åĽ½å®¶éĺŁ":109005,"éĩįçĶŁ":109006,"åľ¨åħ¶":109007,"é¦Ļåij³":109008,"è´Łèį·":109009,"亲åĪĩ":109010,"èĩªè±ª":109011,"没éĶĻ":109012,"åĽłä¸ºåľ¨":109013,"æĺŁæĺŁ":109014,"éĤij":109015,"è¿ĺæľīå¾Īå¤ļ":109016,"æij©æīĺ":109017,"æij©æīĺ车":109018,"æŃ¥è¡Į":109019,"管çIJĨä½ĵç³»":109020,"èĦļä¸ĭ":109021,"éģİåİ»":109022,"æ±īè¯Ń":109023,"对ä¸įèµ·":109024,"çļĦç»ıåİĨ":109025,"åıĬ缸åħ³":109026,"ä¸įå°ij人":109027,"éĩįç£ħ":109028,"åĬ³åĬ¨èĢħ":109029,"大åĬĽåıijå±ķ":109030,"æĢİä¹Īåģļ":109031,"çĭĹçĭĹ":109032,"举åįĹäºļ":109033,"åĭĩäºİ":109034,"åħ¬éĸĭ":109035,"çĵ·çłĸ":109036,"åıĤçħ§":109037,"广æĴŃç͵è§Ĩ":109038,"举åĬ¨":109039,"æ±Łè¥¿çľģ":109040,"æķĪèĥ½":109041,"å͝æľī":109042,"éĿ¢è²Į":109043,"èĩªåĬ¨é©¾é©¶":109044,"æ¦ľåįķ":109045,"å½ĵæĪij们":109046,"仲è£ģ":109047,"æľ¨æĿIJ":109048,"ç±³åħ°":109049,"çϽéĵ¶":109050,"çļĦ人éĥ½":109051,"å°±åĥıæĺ¯":109052,"æŃ¥åħ¥":109053,"åįłç͍":109054,"åĩ»è´¥":109055,"让大家":109056,"ä¼ļè®©ä½ł":109057,"åİ¿æĶ¿åºľ":109058,"è¦ģç͍":109059,"çŃīå½¢å¼ı":109060,"åįĩé«ĺ":109061,"责任æĦŁ":109062,"å¤ĩç͍":109063,"ä»ĸ认为":109064,"æ¸ħåįİ大åѦ":109065,"ä»ĸèĩªå·±":109066,"éĸ±è®Ģ":109067,"太平æ´ĭ":109068,"éĶģå®ļ":109069,"çŃĨ":109070,"è¿Ļçīĩ":109071,"æī§æĶ¿":109072,"è¿ĶåĽŀæIJľçĭIJ":109073,"å°±æŃ¤":109074,"éģĩåΰäºĨ":109075,"å¼Ģå¹ķå¼ı":109076,"管çIJĨéĥ¨éŨ":109077,"å§¿åĬ¿":109078,"设æĥ³":109079,"åĽĽåŃ£":109080,"æĬĢæľ¯äººåijĺ":109081,"å·®çĤ¹":109082,"è¾ŀèģĮ":109083,"èĢģ師":109084,"çļĦæĦŁåıĹ":109085,"ä¹ŁéĿŀ常":109086,"å¹´ä¸ĬåįĬå¹´":109087,"æĢªçī©":109088,"èĮĥæĸĩ":109089,"æĪĺå½¹":109090,"åIJ«ä¹ī":109091,"åħ¨è¿ĩç¨ĭ":109092,"èĢĮéĿŀ":109093,"éĢļ讯åijĺ":109094,"è¿Ļæł·æīįèĥ½":109095,"æľºç»Ħ":109096,"è£ı":109097,"çķ¶çĦ¶":109098,"èµĮåįļ":109099,"åIJĦæľī":109100,"å·¥ä½ľæľºåζ":109101,"äºĭåIJİ":109102,"åī§éĻ¢":109103,"å±ĬæĹ¶":109104,"åĺ´éĩĮ":109105,"主线":109106,"ä¸ĢåľĪ":109107,"主è¦ģåİŁåĽł":109108,"å°¸ä½ĵ":109109,"åĮ»çĸĹåĻ¨æ¢°":109110,"ä½łæĢİä¹Ī":109111,"ä½Ĩçͱäºİ":109112,"æĹ¶ç©º":109113,"çĶ·æľĭåıĭ":109114,"çĶľèľľ":109115,"é«ĺåľ°":109116,"æĻĸ":109117,"èĴIJéĽĨ":109118,"åĩĿèģļåĬĽ":109119,"å¤ĩåıĹ":109120,"æĸĩåĪĽ":109121,"马æĿ¥":109122,"马æĿ¥è¥¿äºļ":109123,"æŁ´æ²¹":109124,"使人":109125,"æķĻä¼ļ":109126,"ç§ĭ天":109127,"æĺİçıł":109128,"åħŃåįģ":109129,"çݯå¢ĥä¸Ń":109130,"æ¸ħæĻ¨":109131,"积æŀģåıĤä¸İ":109132,"å·ħå³°":109133,"ä¸ºæľŁ":109134,"çѾåŃĹ":109135,"æĦŁæ¿Ģ":109136,"ç§ĭåŃ£":109137,"æĿijåŃIJ":109138,"æ¢ħ西":109139,"æļ´éĽ¨":109140,"çĶŁæ´»åľ¨":109141,"çªĹæĪ·":109142,"æģ¶åĬ£":109143,"纯粹":109144,"åľ¨æİ¥åıĹ":109145,"没èĥ½":109146,"è¡Į人":109147,"åĭº":109148,"æĭ¨æīĵ":109149,"ä½ľåĩºäºĨ":109150,"çļĦ主é¢ĺ":109151,"æľªä¾Ĩ":109152,"ä¸ŃæľĢ":109153,"æ¾ľ":109154,"é«ĺè¡Ģåİĭ":109155,"åħ´èµ·":109156,"æŃ£èĥ½éĩı":109157,"åŁ¹è®ŃçıŃ":109158,"æİ¥åħ¥":109159,"çĦ¶åIJİåĨį":109160,"åѦçĶŁä»¬":109161,"é¢ĨåħĪçļĦ":109162,"çģ«çĥŃ":109163,"ä¸ĵèģĮ":109164,"æĪĸèĢħ说":109165,"建è¨Ń":109166,"é»ı":109167,"对åħ¬åı¸":109168,"çľīçļĦ":109169,"åħīèį£":109170,"å½ĵåľº":109171,"éĿ¢åŃIJ":109172,"èµĦ产管çIJĨ":109173,"æĹ¶æľŁçļĦ":109174,"çŀİ":109175,"åįİ举":109176,"åıĪä¸Ģ次":109177,"èĥİåĦ¿":109178,"å®ļçĤ¹":109179,"头çĹĽ":109180,"æ¶²ä½ĵ":109181,"æĺ¯ä¸Ģä½į":109182,"帽åŃIJ":109183,"å¹´èµ·":109184,"ä¸įä½İäºİ":109185,"è¾ĥå°ij":109186,"éĿ¢ä¸´çĿĢ":109187,"å±Ĥå±Ĥ":109188,"èĿ´èĿ¶":109189,"èī°èĭ¦":109190,"éĺ¿æł¹":109191,"éĺ¿æł¹å»·":109192,"æ¦Ĥæĭ¬":109193,"请éĹ®":109194,"èµ·åºĬ":109195,"å±Ģå±Ģéķ¿":109196,"稳åģ¥":109197,"å¦ĤæŀľæĪij们":109198,"éħĴç²¾":109199,"æĪ·åı£":109200,"æĦŁæĤŁ":109201,"æĪij们éľĢè¦ģ":109202,"æĬĢèīº":109203,"èĩªåªĴä½ĵ":109204,"è¿ĽåĮĸ":109205,"æ¿ĢçĥĪçļĦ":109206,"ä½ĵ温":109207,"èļķ":109208,"èĩ´è¾ŀ":109209,"宪æ³ķ":109210,"ä¸ĢçŃīå¥ĸ":109211,"çĵ¶é¢Ī":109212,"æĥłæ°ij":109213,"èµ°è·¯":109214,"çݰ任":109215,"åķĨéĩı":109216,"ä¸ĭ车":109217,"åĪł":109218,"責任":109219,"èŀįåIJĪåıijå±ķ":109220,"ç´łæĿIJ":109221,"油价":109222,"åģļ人":109223,"çŀª":109224,"æĶ¹éĿ©åĪĽæĸ°":109225,"çļĦåĮºåĪ«":109226,"è·¨å¢ĥç͵åķĨ":109227,"æ¶īåıĬåΰ":109228,"æīĺ管":109229,"æĪijè¿ĺæĺ¯":109230,"åĿIJæłĩ":109231,"ç½ij讯":109232,"å½ĵåľ°çļĦ":109233,"追溯":109234,"åľŁè̳":109235,"åľŁè̳åħ¶":109236,"åºķä¸ĭ":109237,"åĩłåįģå¹´":109238,"ç©¿è¿ĩ":109239,"çĶŁæĢģæĸĩæĺİ":109240,"æİ¨èĸ":109241,"æİ¨èĸ¦":109242,"éłĨ":109243,"åĴ³åĹ½":109244,"åĪĨæĪIJ":109245,"çĹķ迹":109246,"æĪ·ç±į":109247,"éĥ½ä¸įèĥ½":109248,"æĻļä¼ļ":109249,"åĢ©":109250,"ä½ĵåĬĽ":109251,"è¿Ļ个èģĮä¸ļ":109252,"æĹłå½¢":109253,"åıªæĥ³":109254,"è¿Ľåıĸ":109255,"æĿ̿ѻ":109256,"èĦĬ":109257,"äºijåįĹçľģ":109258,"æľªçŁ¥":109259,"ç¾İèģĶ":109260,"ç¾İèģĶåĤ¨":109261,"å¤ĸå½¢":109262,"诱æĥij":109263,"çĽ£":109264,"è¡Į使":109265,"åłĨ积":109266,"çĨŁç»ĥ":109267,"éĺIJè¿°":109268,"æľĢ大éĻIJ度":109269,"å·¡æŁ¥":109270,"夺åĨł":109271,"ä¼ģä¸ļæĸĩåĮĸ":109272,"çĭ®åŃIJ":109273,"ä¿Ŀå®Ī":109274,"ä¸ºæł¸å¿ĥçļĦ":109275,"æī©æķ£":109276,"åζéĢłåķĨ":109277,"æŁĶ软":109278,"为ä¸Ģä½ĵçļĦ":109279,"游çİ©":109280,"çĶŁçĹħ":109281,"幫åĬ©":109282,"åͱæŃĮ":109283,"æīįåı¯ä»¥":109284,"宽æĿ¾":109285,"è¦ģæ¯Ķ":109286,"æĺ¯æĢİæł·":109287,"çģ°èī²":109288,"çİĭåĽ½":109289,"æIJħæĭĮ":109290,"计éĩı":109291,"åij¨åĽ´çļĦ":109292,"æĻºèĥ½æīĭæľº":109293,"常åĬ¡":109294,"常åĬ¡åī¯":109295,"é©´":109296,"å°Ĩè¿ij":109297,"寻常":109298,"ä¸ŃåĽ½å¸Ĥåľº":109299,"容åύ":109300,"å±±ä¸Ĭ":109301,"èĥĮåIJİçļĦ":109302,"亲å¯Ĩ":109303,"æīĢ以说":109304,"éİ®":109305,"çļĦçIJĨçͱ":109306,"大åŁİå¸Ĥ":109307,"常年":109308,"æĹħ游ä¸ļ":109309,"å°±æĺ¯è¿Ļæł·":109310,"åĨįæĿ¥":109311,"é«ĺä½į":109312,"åĨħ饰":109313,"æŀĦéĢł":109314,"ä¸Ģèµ·æĿ¥":109315,"çͳè«ĭ":109316,"å·²ç»ıå¼Ģå§ĭ":109317,"çļĦåĬ¨ä½ľ":109318,"被迫":109319,"éģįå¸ĥ":109320,"åīĸæŀIJ":109321,"å°ıäºĭ":109322,"å¿ĥä¸ŃçļĦ":109323,"ä½ĵåζæĶ¹éĿ©":109324,"çļĩå®¶":109325,"æķĻåłĤ":109326,"åIJĥå®Į":109327,"åĽ½æ°ijåħļ":109328,"æĺİç¡®äºĨ":109329,"åıijå±ķè§ĦåĪĴ":109330,"第ä¸ĢæŃ¥":109331,"å¾Ĺèµ·":109332,"åľ¨åĵª":109333,"çļĦè·¯ä¸Ĭ":109334,"é»Ķ":109335,"çķ¶æĻĤ":109336,"大åĬĽæĶ¯æĮģ":109337,"åıĮéĩį":109338,"çŁ¥éģĵèĩªå·±":109339,"åIJĪä½ľåįıè®®":109340,"æ°ĶåĬ¿":109341,"éķ¿æķĪæľºåζ":109342,"ç½ķè§ģ":109343,"åĽŀæĿ¥äºĨ":109344,"ä»ĸä¼ļ":109345,"ä¸Ńæĸ°":109346,"ä¸Ńæĸ°ç½ij":109347,"çļĦåķĨåĵģ":109348,"èµłéĢģ":109349,"決å®ļ":109350,"å¸ĤåľºçĽij管":109351,"çķĻåѦçĶŁ":109352,"ç͵åİĭ":109353,"äºļ马":109354,"äºļ马éĢĬ":109355,"è¿ĺæĺ¯æ¯Ķè¾ĥ":109356,"ä¿ĥè¿ĽäºĨ":109357,"æµģåħ¥":109358,"æijĦåĥı":109359,"æijĦåĥı头":109360,"æıIJåıĬ":109361,"åıijæİĺ":109362,"æī¾åĩº":109363,"æ¢Ŀä»¶":109364,"ç¹¼çºĮ":109365,"æĪijåĸľæ¬¢":109366,"å¥İ":109367,"æ¦ľæł·":109368,"å¼ĢèĬ±":109369,"æ²īéĩį":109370,"åŁºåĩĨ":109371,"ä»ħä»ħæĺ¯":109372,"轨éģĵ交éĢļ":109373,"åĶIJå±±":109374,"çŃīä¸Ģç³»åĪĹ":109375,"ä¸įè¿ĩæĺ¯":109376,"åŃĺåľ¨çĿĢ":109377,"èĬ±çĶŁ":109378,"夷":109379,"ç»Īç©¶":109380,"ä¹Łæĺ¯ä¸Ģ个":109381,"åįģåŃĹ":109382,"èĸªéħ¬":109383,"伤å¿ĥ":109384,"æĺ¥ç§ĭ":109385,"åĨ·åį´":109386,"ç²¾çģµ":109387,"çļĦåľ°åĽ¾":109388,"æ¯Ķçī¹":109389,"æ¯Ķçī¹å¸ģ":109390,"æĢ§åĪ«":109391,"ä½Ļä¸ĩåħĥ":109392,"ä¸įå¿ĺåĪĿå¿ĥ":109393,"å¿ĥçĸ¼":109394,"æĽ²çº¿":109395,"é«ĺä½İ":109396,"è¦ıå®ļ":109397,"æĻ¯èī²":109398,"è¦ģ说":109399,"åħ¬åı¸å°Ĩ":109400,"æ¶²åİĭ":109401,"è¿Ŀ约":109402,"åİļ度":109403,"åºŀ大çļĦ":109404,"è¿ĺæĺ¯å¾Ī":109405,"é¦ĸåħĪæĺ¯":109406,"çµ²":109407,"åĬ¡å®ŀ":109408,"並ä¸Ķ":109409,"å¢ŀè¿Ľ":109410,"ç»Ħç»ĩå¼Ģå±ķ":109411,"èµ·æĿ¥äºĨ":109412,"è¾ĥå°ı":109413,"导游":109414,"ä¸¤åľ°":109415,"ç¿ĺ":109416,"çģ¿çĥĤ":109417,"é£İéĩĩ":109418,"æĶ¯çº¿":109419,"æĶ¯çº¿ä»»åĬ¡":109420,"娱ä¹IJåľĪ":109421,"天津å¸Ĥ":109422,"åĮħåĽ´":109423,"æľ¬èµĽåŃ£":109424,"éĩįè¦ģ讲è¯Ŀ":109425,"åıĮåIJij":109426,"åįİ丽":109427,"éͤ":109428,"åĦ¿å¥³":109429,"åįĸåĩº":109430,"ä¾Ĩ說":109431,"ä»ĭç»įä¸Ģä¸ĭ":109432,"åIJ¦è®¤":109433,"åĭĿ":109434,"æĻ®éĢļ人":109435,"çļĦåĬ¨åĬĽ":109436,"涨åģľ":109437,"åŁºéĩij管çIJĨ":109438,"ä¸Ģ个éĩįè¦ģ":109439,"è¿IJæ²³":109440,"çħŀ":109441,"è´¢æĶ¿éĥ¨":109442,"è¡Įä¸ļåįıä¼ļ":109443,"éĥ½å°Ĩ":109444,"è¨Ģ论":109445,"ä¸ĭä¾Ĩ":109446,"墨西":109447,"墨西åĵ¥":109448,"åĽłä¸ºä»ĸ们":109449,"æĢİä¹ĪåĽŀäºĭ":109450,"åĬłå¤§å¯¹":109451,"èĬŃ":109452,"çīĮåŃIJ":109453,"ä¼ļ使":109454,"妹åŃIJ":109455,"ç«Ļéķ¿":109456,"å¿ħå¤ĩ":109457,"æłijæľ¨":109458,"æģ¶æĦı":109459,"æ²³éģĵ":109460,"å¯Įè£ķ":109461,"ç¹ģåįİ":109462,"ä»£è¡¨åĽ¢":109463,"æµij身":109464,"é¦ĸä½į":109465,"èĪªç©ºåħ¬åı¸":109466,"éĽ»å½±":109467,"ä¸ĵè¾ij":109468,"æ°´æºIJ":109469,"ä¸Ńæ¯Ĵ":109470,"並ä¸į":109471,"èĢĮåİ»":109472,"éĥĿ":109473,"äºİæŃ¤":109474,"æĸĩåĮĸ建设":109475,"èĤ¯å®ļä¼ļ":109476,"å¸ĮæľĽå¤§å®¶":109477,"æııåĨĻ":109478,"ä½İè°ĥ":109479,"æĸ°åħ´äº§ä¸ļ":109480,"æ·Ħåįļ":109481,"æĶ¾å¼Ģ":109482,"çļĦæĢ§æł¼":109483,"çĸ¾çĹħçļĦ":109484,"æķ´é¡¿":109485,"线ä¸Ĭ线ä¸ĭ":109486,"éĢī项":109487,"çļĦ认åı¯":109488,"æķ´é½IJ":109489,"çĶļä¹Ī":109490,"çľģåĨħ":109491,"åı¤äºº":109492,"æ°ijä¿Ĺ":109493,"çī¡ä¸¹":109494,"éŨçªĹ":109495,"éĤ£æł·çļĦ":109496,"çĽijäºĭä¼ļ":109497,"ç¿¡ç¿ł":109498,"禹":109499,"åįĥä¸ĩä¸įè¦ģ":109500,"æĶ¶ç¼©":109501,"çļĦæĸĩåŃĹ":109502,"åĴĮå°ļ":109503,"æĮĩ令":109504,"åħ±äº§åħļåijĺ":109505,"çļĦçĪ¶äº²":109506,"å®Įå·¥":109507,"åĬ¡å·¥":109508,"马æĭī":109509,"马æĭīæĿ¾":109510,"æµĭè¯Ħ":109511,"å²ļ":109512,"ä¸įåģļ":109513,"ä¸ĥå¹´":109514,"åĿĩä»·":109515,"主è§Ĥ":109516,"å¾Īä¸įéĶĻ":109517,"èĤ¡ä¸ľå¤§ä¼ļ":109518,"äºĶä¸Ģ":109519,"é£İåIJ¹":109520,"å¼Ģéĩĩ":109521,"è¿Ļä¹Ī大":109522,"èĥ½çľĭåΰ":109523,"èĢĥè¯Ħ":109524,"åį³ä¾¿æĺ¯":109525,"çݰ代åĨľä¸ļ":109526,"æ¯Ķè¾ĥé«ĺ":109527,"è¦ģçľĭ":109528,"没äºĨ":109529,"解決":109530,"çݯæ¯Ķ":109531,"åĨ²åĬ¨":109532,"æ·±å¤ľ":109533,"åĩłåįĥ":109534,"ä¿ı":109535,"ç½ijæ°ij":109536,"就没":109537,"ä»ĸ表示":109538,"éĩıåŃIJ":109539,"æĹ©é¤IJåĬłçĽŁ":109540,"åįĬå²Ľ":109541,"æIJŀç¬ij":109542,"ä¸ĬæĬ¥":109543,"審":109544,"é¢Ħ订":109545,"èľĤèľľ":109546,"æŁ¥æī¾":109547,"ä¼ĹæīĢ":109548,"ä¼ĹæīĢåij¨":109549,"ä¼ĹæīĢåij¨çŁ¥":109550,"æĹ©æĹ¥":109551,"åıijæī¬":109552,"åĴĮ个人":109553,"åĬłåħ¥äºĨ":109554,"åĸ®ä½į":109555,"åĪĨæĺİ":109556,"第ä¸Ģæī¹":109557,"ç¾İåĨĽ":109558,"æĿĢæīĭ":109559,"éŨå¤ĸ":109560,"åķĨåľĪ":109561,"ä¸ĢåĪ»":109562,"çļĦçľ¼ç¥ŀ":109563,"éľĦ":109564,"äºĽä»Ģä¹Ī":109565,"åĬłæ·±":109566,"æ¯ıä½į":109567,"å¸ĤéĿ¢ä¸Ĭ":109568,"åıĶåıĶ":109569,"çļĦéĤ£ç§į":109570,"粤港澳":109571,"è´´å¿ĥ":109572,"æĸĩåĮĸ产ä¸ļ":109573,"红æĹĹ":109574,"åĺīåħ´":109575,"æĶ¶çĽĺ":109576,"å®ĮæĪIJåIJİ":109577,"ä¼ģä¸ļ管çIJĨ":109578,"纵横":109579,"ä¸įä¿¡":109580,"æĪIJéĥ½å¸Ĥ":109581,"æ´Ĺ澡":109582,"举è¡ĮçļĦ":109583,"çĶ¢çĶŁ":109584,"ç©¿ä¸Ĭ":109585,"åĪļ好":109586,"åħī线":109587,"æīĵæŀ¶":109588,"è¿Ļæľ¬ä¹¦":109589,"åĶ®åIJİæľįåĬ¡":109590,"åĩłåĪĨ":109591,"ä¸Ĭ次":109592,"ä¸įåĪĨ":109593,"产åIJİ":109594,"éģ¿å¼Ģ":109595,"ç»Īæŀģ":109596,"代表大ä¼ļ":109597,"æ¼ĶæĬĢ":109598,"åĽŀè´Ń":109599,"åŃ¦è´¹":109600,"éĺ»ç¢į":109601,"ä¸Ģ大æī¹":109602,"竣工":109603,"åĨ³å®ļäºĨ":109604,"ä½Ĩå¦Ĥæŀľ":109605,"ç͵æµģ":109606,"ä¸Ŀ毫":109607,"èĥ½å¤Łåľ¨":109608,"éĶĢåĶ®æĶ¶åħ¥":109609,"åľ¨åŃ¦æł¡":109610,"æ°´åĩĨ":109611,"è§Ĩ线":109612,"èĩªåľ¨":109613,"åķĨä¸ļéĵ¶è¡Į":109614,"为äºĨ让":109615,"çį²å¾Ĺ":109616,"çݩ家æľĭåıĭ":109617,"éĿ¢èĨľ":109618,"åĪĨåī²":109619,"åī§æľ¬":109620,"ç«Ń":109621,"说å¾Ĺ":109622,"æĥ³çŁ¥éģĵ":109623,"çļĦ人çī©":109624,"èĮħåı°":109625,"åIJĮä¸Ģ个":109626,"æķ°æį®ä¸Ńå¿ĥ":109627,"çĶĦ":109628,"åĸľæĤ¦":109629,"ä¸ĭæĿ¥çļĦ":109630,"å®ļåIJij":109631,"æŀģåħ·":109632,"çļĦåľŁåľ°":109633,"éĤ£åĢĭ":109634,"æijĦåħ¥":109635,"äºĨæĪijçļĦ":109636,"马路":109637,"åħ¨ç¤¾ä¼ļ":109638,"è®®æ¡Ī":109639,"å±ĭåŃIJ":109640,"åIJįåı«":109641,"åĮª":109642,"åľ¨å¤ĸéĿ¢":109643,"åįİåįĹ":109644,"åıijè´§":109645,"å¯ĴåĨ·":109646,"é«ĺçŃīæķĻèĤ²":109647,"详ç»ĨçļĦ":109648,"ä¸ªé¡¹çĽ®":109649,"çĶŁäº§åĬĽ":109650,"æĹ¶å¸¸":109651,"å°±æľĥ":109652,"ä¸ĩèĤ¡":109653,"éĻĮçĶŁäºº":109654,"æııç»ĺ":109655,"å½ĵçĦ¶æĺ¯":109656,"æĭīåĬ¨":109657,"éĵ¾æĿ¡":109658,"æī£éϤ":109659,"ä¸Ģ缴éĥ½":109660,"å°ıåŃ©åŃIJ":109661,"伤åı£":109662,"第äºĮå±Ĭ":109663,"è´Ńç½®":109664,"çļĩ马":109665,"æĹłèģĬ":109666,"表åĨ³":109667,"诸å¦Ĥ":109668,"åĵįèµ·":109669,"é£İæļ´":109670,"ä¸ĢæµģçļĦ":109671,"ç·¨":109672,"è§£æĶ¾åĨĽ":109673,"室å¤ĸ":109674,"å°±è¿Ļä¹Ī":109675,"å³¶":109676,"æīĢæľī人éĥ½":109677,"æIJľç´¢å¼ķæĵİ":109678,"çļĦæĪIJæľ¬":109679,"åħļæĶ¿":109680,"åıijè¡Į人":109681,"çļĦäºĭå®ŀ":109682,"对该":109683,"åıĹæįŁ":109684,"ä¿Ħä¹Į":109685,"é²ľèĬ±":109686,"åĨľèį¯":109687,"æŀģéĢŁ":109688,"æĢ¥æĢ§":109689,"两ä¼ļ":109690,"ä¸ĢèάæĿ¥è¯´":109691,"æµ·é²ľ":109692,"åĨĪ":109693,"çĶ¨äºº":109694,"çĶ¨äººåįķä½į":109695,"åĢª":109696,"åĦªæĥł":109697,"æł¹æºIJ":109698,"åĽ¢è´Ń":109699,"ç¾İæ´²":109700,"ä¸ĭè¡Į":109701,"å¹´æľ«":109702,"èľ¡":109703,"è¯ģä»¶":109704,"åľ¨æĪijåĽ½":109705,"ä¸įåºĶ":109706,"æĮīæĹ¶":109707,"åłªç§°":109708,"åľºä¸Ĭ":109709,"å¹²éĥ¨èģĮå·¥":109710,"æľīå¾Ī大çļĦ":109711,"æķ°åŃĹç»ıæµİ":109712,"æ¼Ķç»ĥ":109713,"æį®ç»Łè®¡":109714,"å¾ĢæĿ¥":109715,"广åijĬæľįåĬ¡":109716,"çļĦè·Ŀ离":109717,"æŃ¸":109718,"è¨Ģè¯Ń":109719,"被èªī":109720,"被èªī为":109721,"åĭī强":109722,"å°Ĭæķ¬":109723,"ä¸ĩ亿åħĥ":109724,"ä¸ŃåĽ½åĽ½éĻħ":109725,"å¹²é¢Ħ":109726,"年产":109727,"èĢķåľ°":109728,"èĮİ":109729,"å᳿ĺ¯":109730,"æĺ¨æĻļ":109731,"æĪIJ为ä¸Ģ个":109732,"çºłæŃ£":109733,"åij½åIJį":109734,"é¢ģå¸ĥ":109735,"çĮľæµĭ":109736,"ä¿ĿèŃ·æĶ¿çŃĸ":109737,"æĭ¢":109738,"活泼":109739,"çŃīéĥ¨éŨ":109740,"åѦåΰ":109741,"å¢ŀå̼ç¨İ":109742,"èĪªçº¿":109743,"åĨ¤":109744,"åįģåĩłå¹´":109745,"æİ§èĤ¡èĤ¡ä¸ľ":109746,"ä¸ĢéŨ":109747,"ä¸ªå·¥ä½ľ":109748,"ä¸ªå·¥ä½ľæĹ¥":109749,"æĸ°è¥¿":109750,"æĸ°è¥¿åħ°":109751,"论è¯ģ":109752,"ä»Ĩ":109753,"åı¦å¤ĸä¸Ģ个":109754,"æĶ¹ç¼ĸ":109755,"严ç¦ģ":109756,"åĸľå¥½":109757,"个人信æģ¯":109758,"满æĦı度":109759,"åĵ¨":109760,"å¸ĪèµĦ":109761,"æĶ¹ä¸º":109762,"ç«ŀäºī对æīĭ":109763,"åĩºçĤī":109764,"åķĨ人":109765,"大æ£ļ":109766,"æĮĩ导ä¸ĭ":109767,"å¦ĩç§ij":109768,"輪":109769,"æīģ":109770,"åIJĮæĹ¶è¿ĺ":109771,"å¹¶éĢļè¿ĩ":109772,"æĪĺéĺŁ":109773,"èĶĵå»¶":109774,"ä¿ŀ":109775,"éĢĤå½ĵçļĦ":109776,"åīįè¾Ī":109777,"åĵģåij³":109778,"æ¹¿åľ°":109779,"æĪIJåŀĭ":109780,"ä¸įåıªæĺ¯":109781,"æĥ©ç½ļ":109782,"åĩºåı°äºĨ":109783,"çݩ游æĪı":109784,"æīįåıijçݰ":109785,"åºĶèģĺ":109786,"å¤ĸæĿ¥":109787,"åįłé¢Ĩ":109788,"å±ķæľĽ":109789,"å«Ĥ":109790,"港èĤ¡":109791,"æ¡Įä¸Ĭ":109792,"æĶ¯æŁ±":109793,"çļĦæĥħå½¢":109794,"广éĺĶçļĦ":109795,"æĶ¯è¡Į":109796,"å´©æºĥ":109797,"æľĪä¸Ń":109798,"æľĪä¸ŃæĹ¬":109799,"ç»įåħ´":109800,"临è¿ij":109801,"æĬ¤æłı":109802,"æļ®":109803,"åįķèģĮä¸ļ":109804,"è¾¹å¢ĥ":109805,"æĹ¥çħ§":109806,"ä¸ĢåłĨ":109807,"缴å¾Ħ":109808,"åħ±åIJĮä½ĵ":109809,"æĸ°åįİç½ij":109810,"æīĵ好":109811,"ç͵åĬ¨æ±½è½¦":109812,"ä¸įæĺİçϽ":109813,"éĢĻ裡":109814,"çĽĽå¤§":109815,"çİĭæľĿ":109816,"åĨįä¸Ģ次":109817,"åĬŀåħ¬åİħ":109818,"è´¨æĬ¼":109819,"åIJĪåĩ»":109820,"人们对":109821,"éĽ¶é£Ł":109822,"éĥ½ä¸įçŁ¥éģĵ":109823,"çļĦè¯Ńè¨Ģ":109824,"åĭŁéĽĨèµĦéĩij":109825,"åĬ¨èĦī":109826,"彤":109827,"è¿Ļåĩłå¹´":109828,"çŁŃè§Ĩé¢ij":109829,"太é«ĺ":109830,"常å§Ķä¼ļ":109831,"åĬłçıŃ":109832,"éĩįå¿ĥ":109833,"åªĴä½ĵæĬ¥éģĵ":109834,"没æ³ķ":109835,"éĹ»åIJį":109836,"çĥŃ度":109837,"å¹¿æ³ĽçļĦ":109838,"åħŃ大":109839,"çī©ä½ĵ":109840,"ä¸į该":109841,"é¢ĺ主":109842,"精彩çļĦ":109843,"ä¸ºè¿Ľä¸ĢæŃ¥":109844,"èĻŀ":109845,"åĽºçĦ¶":109846,"è´µå·ŀçľģ":109847,"çºłç»ĵ":109848,"代çIJĨ人":109849,"æ³ķå®ļ代表":109850,"åı¦ä¸Ģç§į":109851,"ä¸įåIJ«":109852,"æĭ¯æķij":109853,"ä¼ļç»Ļ":109854,"è¯Ĺè¯į":109855,"åIJĮç±»":109856,"å¾Ĺä¸įåΰ":109857,"æĬĵç´§":109858,"以åħ¶":109859,"åħ¥åħļ":109860,"è¿ĺåı¯":109861,"æľŁåĪĬ":109862,"å¾Īå¤ļæĹ¶åĢĻ":109863,"æĹ¥åIJİ":109864,"åħ¬çº¦":109865,"ä¸Ģ举":109866,"æ¯Ķè¾ĥå¤ļ":109867,"éĩijæ²Ļ":109868,"æįŀ":109869,"æİĴåĩº":109870,"æŃ¦æľ¯":109871,"ä¸įæĸ·":109872,"ä¸ŃèĢĥ":109873,"ä¿¡èµĸ":109874,"ä»İä¸ļ人åijĺ":109875,"çģ«çĦ°":109876,"éĨĴæĿ¥":109877,"ä½İ温":109878,"éĢ¾æľŁ":109879,"åĬ±å¿Ĺ":109880,"éħ¥":109881,"åı¯è°ĵæĺ¯":109882,"è¿ĻæĦıåij³çĿĢ":109883,"é¢łè¦Ĩ":109884,"åĮĹ京大åѦ":109885,"ä¸ĵ线":109886,"åıĬ以ä¸Ĭ":109887,"訪":109888,"èĢĮåIJİ":109889,"çŁ¥ä¹İ":109890,"ä¸Ģ对ä¸Ģ":109891,"å¨ĥå¨ĥ":109892,"çģ¾éļ¾":109893,"åħ¨å±Ģ":109894,"æīĢå¾Ĺç¨İ":109895,"å®ŀæĥł":109896,"èļĤèļģ":109897,"ä¹ŁçŁ¥éģĵ":109898,"温åĴĮ":109899,"èIJ½ä¸ĭ":109900,"åŀĭä¼ģä¸ļ":109901,"åĨįä¹Ł":109902,"ä¾ĽçĥŃ":109903,"é«ĺæ½®":109904,"çĢı覽åύ":109905,"çļĦ巨大":109906,"åħĪ天":109907,"å¹´ä¸ŃåĽ½":109908,"类似çļĦ":109909,"çIJĨäºĭä¼ļ":109910,"空éĸĵ":109911,"ç쵿ĦŁ":109912,"åĬĽæ°Ķ":109913,"带ä¸Ĭ":109914,"ä¸į好æĦıæĢĿ":109915,"æľīä½ķ":109916,"å·²åľ¨":109917,"åıĸåĩº":109918,"è¿Ŀæ³ķçĬ¯ç½ª":109919,"åŃ¦ä¹łè´¯å½»":109920,"åľ°å¸¦":109921,"楼梯":109922,"çŃīæĥħåĨµ":109923,"ä»İåīį":109924,"çļĦä¹łæĥ¯":109925,"ç³Łç³ķ":109926,"å°±èĥ½å¤Ł":109927,"è©ķ":109928,"ä¸Ģå¾ĭ":109929,"æĮ«æĬĺ":109930,"åİŁæĸĩåľ°åĿĢ":109931,"å½ĵå±Ģ":109932,"ä¸įéĢļ":109933,"æķ°åįĥ":109934,"éĺŁä¼į建设":109935,"æĹ¶èĬĤ":109936,"åģļèµ·":109937,"çļĦè®°å¿Ĩ":109938,"ç½ij绾å®īåħ¨":109939,"åĩ¡æĺ¯":109940,"æ°¯":109941,"éĽķåĪ»":109942,"åŁĥåıĬ":109943,"æĪijåı¯ä»¥":109944,"çĽijçIJĨ":109945,"æĽ´åħ·":109946,"åŁİ管":109947,"èĭ¯":109948,"åı¥åŃIJ":109949,"èĭ¥æľī":109950,"ä»İæĿ¥ä¸į":109951,"缸åħ³è´Łè´£":109952,"å®īåħ¨æĦŁ":109953,"æĽ´è¦ģ":109954,"çļĦæĥħæĦŁ":109955,"çī¢çī¢":109956,"è¾ĥ好çļĦ":109957,"æ°®":109958,"ç¬ijè¯Ŀ":109959,"车å±ķ":109960,"ä¹ĭç¾İ":109961,"ç®Ģ约":109962,"ç±»åŀĭçļĦ":109963,"èĢģåĮĸ":109964,"çľĭä½ł":109965,"è¿ĩåĪĨ":109966,"éŨåīį":109967,"ä¸ĢéĹ´":109968,"æĥ³åİ»":109969,"åªĽ":109970,"åľŁè±Ĩ":109971,"åıĪç§°":109972,"ä¸Ńä¿¡":109973,"åŃĺéĩı":109974,"马äºij":109975,"èĩ´ä½¿":109976,"åħĪåīį":109977,"èĢģåŃIJ":109978,"æīĵæī®":109979,"æ¯ķä¸ļäºİ":109980,"æ¯ķä¸ļåIJİ":109981,"ç¾İ好çĶŁæ´»":109982,"å·¥ä¸ļä¼ģä¸ļ":109983,"就好äºĨ":109984,"èħIJèļĢ":109985,"çıįçıł":109986,"åΰè¿ĻéĩĮ":109987,"æīĢéľĢçļĦ":109988,"è¿Ļæĺ¯åĽłä¸º":109989,"çIJĨæĥ³çļĦ":109990,"å·®å¼ĤåĮĸ":109991,"é®":109992,"é®®":109993,"äºļ太":109994,"æĹłç©·":109995,"æıIJçݰ":109996,"ä¸ĵä¸ļæĬĢæľ¯":109997,"çĶ¢æ¥Ń":109998,"åѦåŃIJ":109999,"ç§ijå¹»":110000,"åįłåľ°éĿ¢ç§¯":110001,"ä¸įåĩĨ":110002,"æľªæĪIJ年人":110003,"æĶ¶å½ķ":110004,"è¿ĺ款":110005,"éĴ¢çŃĭ":110006,"æ¼¢":110007,"å¾ĹæĦı":110008,"综åIJĪä½ĵ":110009,"æŀģé«ĺ":110010,"åįķè¯į":110011,"é«ĺæķĪçļĦ":110012,"骨头":110013,"æī§çĿĢ":110014,"缼ä¸ĸ":110015,"模çī¹":110016,"æĽ´èĥ½":110017,"ç»ĿæľĽ":110018,"对åºĶçļĦ":110019,"æ¨Ĭ":110020,"æĸ°ä¸ī":110021,"æĸ°ä¸īæĿ¿":110022,"æģ°æģ°":110023,"åIJįå®¶":110024,"æł¸å¿ĥæĬĢæľ¯":110025,"个å°ı":110026,"æĢİä¹Īä¼ļ":110027,"说ä¸įå®ļ":110028,"西çĵľ":110029,"åĵİ":110030,"ç¢Ł":110031,"å¿ħä¸įåı¯":110032,"å¿ħä¸įåı¯å°ij":110033,"ä¹ĭéĸĵ":110034,"åĪĨ管":110035,"交éĢļäºĭæķħ":110036,"å¼ĢåĬŀ":110037,"å¾ģæ±ĤæĦıè§ģ":110038,"亨":110039,"鼻åŃIJéĥµ":110040,"鼻åŃIJéĥµä»¶":110041,"ä¿¡æģ¯æľįåĬ¡":110042,"ä½łè§īå¾Ĺ":110043,"缴è§Ĥ":110044,"å·²å®ĮæĪIJ":110045,"åĪĨä¼ļ":110046,"åĽŀåįĩ":110047,"éļ»":110048,"好人":110049,"äºĨè§£ä¸Ģä¸ĭ":110050,"å᫿µ´":110051,"æľĢçα":110052,"åºŀ大":110053,"客æĪ¿":110054,"çijŀåħ¸":110055,"éĥ½ä¸įæĺ¯":110056,"館":110057,"èĹī":110058,"çļĦåIJĦ项":110059,"ä¸ºçĽ®æłĩ":110060,"çļĦè®¤çŁ¥":110061,"å½±åĵįåĬĽçļĦ":110062,"å¤¸å¼ł":110063,"佩æĪ´":110064,"æ±ĩçİĩ":110065,"çļĦçαæĥħ":110066,"æĺ¥é£İ":110067,"æĺ¯æĪijçļĦ":110068,"樹":110069,"åįĬå°ıæĹ¶":110070,"å±±åİ¿":110071,"山西çľģ":110072,"èĢĮè¿Ļ":110073,"æĽ´å¤ļä¿¡æģ¯":110074,"è¿ĺæľīä¸ĢäºĽ":110075,"ç²¾ç»ĨåĮĸ":110076,"ç¾İåѦ":110077,"çͱæĸ¼":110078,"ä»ħä¾ĽåıĤèĢĥ":110079,"å¾Īé«ĺçļĦ":110080,"åıłåĬł":110081,"è¿Ļä¹Ī说":110082,"å±ķåĩº":110083,"åĽĽå¤Ħ":110084,"ä¸ĩå®¶":110085,"æĭĽåĭŁ":110086,"çļĦ强大":110087,"æĤ£æľī":110088,"å°ıäºİ":110089,"ä¹Łè®¸æĺ¯":110090,"对èĩªå·±çļĦ":110091,"èģĮä¸ļæķĻèĤ²":110092,"æĿ¥è¿Ľè¡Į":110093,"档次":110094,"æīĵèµ¢":110095,"éĥ½æľīçĿĢ":110096,"庸":110097,"è¯Ńæ°Ķ":110098,"çͲéĨĽ":110099,"空åĨĽ":110100,"车åĨħ":110101,"åĽłä¸ºä½ł":110102,"å®ŀæķĪ":110103,"æĥħä¾£":110104,"åıijè¾¾åĽ½å®¶":110105,"éķľåŃIJ":110106,"æ¯įå©´":110107,"ä½Ĩæĺ¯ä»ĸ":110108,"积æŀģæİ¨è¿Ľ":110109,"大å¹ħ度":110110,"çļĦ女åĦ¿":110111,"é¤IJæ¡Į":110112,"åIJ¬å¾Ĺ":110113,"çļĦ积æŀģæĢ§":110114,"好åIJ§":110115,"æĹ¥æ¶Īæģ¯":110116,"æľīä»»ä½ķ":110117,"æ¯Ĵåĵģ":110118,"æĹ©çĤ¹åĬłçĽŁ":110119,"第ä¸Ģ天":110120,"å°½åĬĽ":110121,"æłĸ":110122,"主æīĵ":110123,"æĺ¯ä¸ĢåIJį":110124,"çĪĨæĸĻ":110125,"äºĭä¸ļåıijå±ķ":110126,"å¾®åķĨ":110127,"äºİä¸Ģä½ĵçļĦ":110128,"çĶŁçĮª":110129,"èĩªçĦ¶èµĦæºIJ":110130,"çŀĦåĩĨ":110131,"è§Ħ模åĮĸ":110132,"å¹¶ä¸İ":110133,"èĤ¥èĥĸ":110134,"å®¶ç͍":110135,"大çĪ·":110136,"é¢ĦåijĬ":110137,"æĿ¥åģļ":110138,"éĺ³åİ¿":110139,"æŀĦçŃij":110140,"é¢ģå¥ĸ":110141,"åİĨåı²æĸĩåĮĸ":110142,"æľįåĭĻæĪĸ":110143,"æĢ»åĨ³èµĽ":110144,"åıijåŀĭ":110145,"æĪij羣çļĦ":110146,"æĽ¦":110147,"åıĤä¼ļ":110148,"èĦĨå¼±":110149,"åĩĨåħ¥":110150,"èħ¹éĥ¨":110151,"åı¸ä»¤":110152,"æĤ²åī§":110153,"天ä¸Ĭ":110154,"åı£ä¸Ń":110155,"ä¸ĩ个":110156,"åѦä¸ļ":110157,"æıIJåĢ¡":110158,"两边":110159,"大èĤ¡ä¸ľ":110160,"åı¤éķĩ":110161,"è¡Ģç³ĸ":110162,"çļĦç¨ĭ度":110163,"æ£īèĬ±":110164,"åIJİåı°":110165,"å°±åĮ»":110166,"æķ´æķ´":110167,"èĴ²":110168,"çĽĪåĪ©èĥ½åĬĽ":110169,"ç±½":110170,"èĦ«":110171,"çľĭéĩį":110172,"å®¶éķ·":110173,"èģĺç͍":110174,"èµĽéģĵ":110175,"åīįèĢħ":110176,"建èѰ":110177,"å¾ĭå¸ĪäºĭåĬ¡":110178,"èīºæľ¯åĵģ":110179,"æľīèĩªå·±çļĦ":110180,"åIJ¦å®ļ":110181,"ç¤¾åĽ¢":110182,"åij¨äºĶ":110183,"带åΰ":110184,"å·¥ä½ľä¼ļè®®":110185,"èĤ¡æľ¬":110186,"å¤ĸåĮħ":110187,"å®¶åħ¬åı¸":110188,"çĽijçĭ±":110189,"èĪĬ":110190,"åIJįæł¡":110191,"西æ¹ĸ":110192,"è¶ħè¿ĩäºĨ":110193,"åįĹå±±":110194,"ç»Ħä»¶":110195,"å̼å¾Ĺ注æĦı":110196,"æĮ£æīİ":110197,"äºĭ迹":110198,"ç¶ĵçĩŁ":110199,"ç§ij室":110200,"好åIJĹ":110201,"æ¤ħåŃIJ":110202,"åľĪåŃIJ":110203,"ä½Ĩ她":110204,"æµģçķħ":110205,"åIJĦèĩªçļĦ":110206,"èģĮåijĺ":110207,"è¡įçĶŁ":110208,"åħ¨åľº":110209,"æĴ¤éĶĢ":110210,"åį´è¢«":110211,"å®ģéĿĻ":110212,"åīįæīĢ":110213,"åīįæīĢæľª":110214,"åīįæīĢæľªæľī":110215,"主ä¸ļ":110216,"åĮĹç¾İ":110217,"è¯Ħå®ļ":110218,"åĵģå°Ŀ":110219,"大家éĥ½åľ¨":110220,"主å¸ħ":110221,"ç»Ĩå¿ĥ":110222,"ä¿¡æģ¯æĬ«éľ²":110223,"çļĦç«ŀäºī":110224,"éĢĻæ¨£çļĦ":110225,"ç§ijåĪĽæĿ¿":110226,"éĩĩæijĺ":110227,"票æį®":110228,"éĢIJå¹´":110229,"èĭ±è¶ħ":110230,"è¡Įä¸ļåĨħ":110231,"人寿":110232,"åIJİåĭ¤":110233,"å¦ĤæĦı":110234,"ç¬Ķè¯ķ":110235,"æ·¡æ·¡çļĦ":110236,"ä¸įèĪĴæľį":110237,"ä½ĵ积":110238,"ä¹Łä¸įè¦ģ":110239,"éĿ¢æĸĻ":110240,"æł·æľ¬":110241,"ç¥ģ":110242,"æĮīè§Ħå®ļ":110243,"大æ¦Ĥæĺ¯":110244,"æĥħåĨµè¿Ľè¡Į":110245,"åIJĦåįķä½į":110246,"çļĦç¬ij容":110247,"åĩºèī²çļĦ":110248,"代表æĢ§":110249,"çļĦç¾İ好":110250,"éĴ¦":110251,"å¾®çĶŁçī©":110252,"è¶Ĭæĺ¯":110253,"æĸ¹åı¯":110254,"å¹²èĦĨ":110255,"éģĬæĪ²":110256,"çļĦåħ´è¶£":110257,"éĹ®è´£":110258,"åĽłä¸ºæĪij们":110259,"èĢĥéĩı":110260,"çĶŁçĶŁ":110261,"éĺ»åĬĽ":110262,"ä¸įåħģ许":110263,"æıIJè®®":110264,"åĩıæĮģ":110265,"åıªæĺ¯ä¸Ģ个":110266,"æĪijæĬĬ":110267,"åıijçݰèĩªå·±":110268,"å¢ŀå¹ħ":110269,"å¦į":110270,"èĹĿè¡ĵ":110271,"ä¸Ģ家人":110272,"åĪĨ级":110273,"çļĦæķ°éĩı":110274,"è½®èŀįèµĦ":110275,"çŃīåĽłç´ł":110276,"大夫":110277,"èģĺ请":110278,"é£İæľº":110279,"绽æĶ¾":110280,"ä»»ä½ķä¸Ģ个":110281,"éłĤ":110282,"éĺ¶çº§":110283,"æĬĬ她":110284,"è¿ĽåĨĽ":110285,"èĥ½åģļåΰ":110286,"åŁ¹è®ŃæľºæŀĦ":110287,"çĸĻ":110288,"ç«¥è¯Ŀ":110289,"æĮĩ导æĦıè§ģ":110290,"éĺ®":110291,"æ·±åħ¥æİ¨è¿Ľ":110292,"ä¸»æľº":110293,"æ¸Ķä¸ļ":110294,"ä¸įæľį":110295,"æµĵéĥģ":110296,"è¡Ĺä¸Ĭ":110297,"ä¾Ŀ次":110298,"æĹ¶æ®µ":110299,"梵":110300,"çļĦåĸľçα":110301,"å¾Īéķ¿":110302,"åĪĿ级":110303,"æŀľæĸŃ":110304,"æĬ¢æķij":110305,"é¼ĵèĪŀ":110306,"ä¾ĽéľĢ":110307,"æ·±åħ¥å¼Ģå±ķ":110308,"产ä¸ļéĽĨ群":110309,"åĻªéŁ³":110310,"åIJ¬çĿĢ":110311,"æ·±åĪ»çļĦ":110312,"å¿įåıĹ":110313,"ç͵ç£ģ":110314,"强èĢħ":110315,"æ»ĭåij³":110316,"æĽ¼èģĶ":110317,"åı¯ä»¥çĽ´æİ¥":110318,"大米":110319,"æŃ·åı²":110320,"æĶ¿åĬ¡æľįåĬ¡":110321,"åħ¬å¼ı":110322,"社群":110323,"éģĵ士èģĮä¸ļ":110324,"ä¹ĭæĥħ":110325,"æµ·æ°´":110326,"æ¼Ķå¥ı":110327,"åºĹéĩĮ":110328,"迹象":110329,"åıijå±ķçIJĨ念":110330,"é«ĺ空":110331,"åij¨åĪĬ":110332,"åĽŀåΰäºĨ":110333,"ä¸įéĢĤåIJĪ":110334,"åłµå¡ŀ":110335,"åĬĪ":110336,"æ°´ä¸Ĭ":110337,"çĢijå¸ĥ":110338,"纳ç¨İ人":110339,"çĩĥæ²¹":110340,"å·¥ç¨ĭé¡¹çĽ®":110341,"峡谷":110342,"æľīéĴĪ对æĢ§":110343,"åľĨå½¢":110344,"æľ¬å¸Ĥ":110345,"è¿Ļè¯Ŀ":110346,"管çIJĨèĢħ":110347,"ç¡®è¯ĬçĹħä¾ĭ":110348,"æĬĬæīĭ":110349,"彩èī²":110350,"ä¸Ĭåīį":110351,"夯å®ŀ":110352,"ç¾ĬèĤī":110353,"å¾Ģå¹´":110354,"æĵħèĩª":110355,"迷人":110356,"èĪªæ¯į":110357,"ç²¾ç»Ĩ":110358,"åľ¨æĪijçļĦ":110359,"åĪĽæĬķ":110360,"麦åħĭ":110361,"æľĪç»ı":110362,"åĮĹæµ·":110363,"ä¹ĭæĺŁ":110364,"åı¶åŃIJ":110365,"å¸Ĥåľºç«ŀäºī":110366,"è¿Ļäºĭ":110367,"åıĥèĪĩ":110368,"äº§åľ°":110369,"åĶī":110370,"åķĨåĵģæĪ¿":110371,"èĪªè¿IJ":110372,"ä¼ĺå¼Ĥ":110373,"ä»ĸ们æĺ¯":110374,"éĽ¨æ°´":110375,"è¯įæ±ĩ":110376,"åĨľçͰ":110377,"欧éĺ³":110378,"çŁŃ线":110379,"管ç½ij":110380,"æł¹åŁº":110381,"åıªæľīä¸Ģ个":110382,"éŀĭåŃIJ":110383,"å¸Ĥå§Ķ书记":110384,"åĪ»æĦı":110385,"è¡Į车":110386,"åıĪ被":110387,"åı¯éĿłæĢ§":110388,"è´±":110389,"ä»»åij½":110390,"åºĶåľ¨":110391,"å°±å¾Ĺ":110392,"æľįåĬ¡ä½ĵç³»":110393,"æĶ¿æĿĥ":110394,"åıijè¨Ģ人":110395,"è¿ĩå¾Ģ":110396,"两åıª":110397,"èĻ½è¯´":110398,"éĢģä¸Ĭ":110399,"ä»Ģä¹Īäºĭ":110400,"æķ£æĸĩ":110401,"æİĮæİ§":110402,"èĸĦå¼±":110403,"ä¸ĭéĿ¢å°±":110404,"主è¦ģåĨħ容":110405,"å¾Īéĩįè¦ģçļĦ":110406,"就说":110407,"çϽèī²çļĦ":110408,"éĤ£ä¸ªæĹ¶åĢĻ":110409,"ç»ı纪人":110410,"çļĦæ¯į亲":110411,"ç¬Ķè®°æľ¬":110412,"åºķå±Ĥ":110413,"è¿ij代":110414,"解说":110415,"è²łè²¬":110416,"æľĢ大åĮĸ":110417,"åķĨéĵº":110418,"æł¡åıĭ":110419,"æ²ģ":110420,"ä¸įåĩºæĿ¥":110421,"éĻ·éĺ±":110422,"ç¨ħ":110423,"åħ¬å¸ĥäºĨ":110424,"åĩĢå̼":110425,"çĽ¸å¯¹è¾ĥ":110426,"笼":110427,"æł¸ç®Ĺ":110428,"åįİ侨":110429,"æĢ¥æķij":110430,"æĮºå¥½":110431,"åħĴç«¥":110432,"äºĮèĥİ":110433,"åĩºèĩª":110434,"åĿŁ":110435,"æīĭä¸ĭ":110436,"屡":110437,"åĪĽéĢłæĢ§":110438,"ä¸¥æł¼æĮīçħ§":110439,"åĨįåİ»":110440,"举缣":110441,"人æµģ":110442,"äºĨä¸Ģ声":110443,"å°ıæĹ¶åīį":110444,"è´µæĹı":110445,"éľĸ":110446,"ä¹Łæĺ¯éĿŀ常":110447,"é̱":110448,"çľĭäºĨçľĭ":110449,"ç¹ģæ®ĸ":110450,"èĩ³æŃ¤":110451,"é¢Ħå¤ĩ":110452,"å¾Īæĺİæĺ¾":110453,"æ¼Ķèīº":110454,"åĿIJçĿĢ":110455,"ä¿ĦåĨĽ":110456,"åľ¨è¿ĩåİ»":110457,"ä¹ĭäºĭ":110458,"æĬĵèİ·":110459,"åĿIJä¸ĭ":110460,"çͱä¸ŃåĽ½":110461,"ä¹Łå¼Ģå§ĭ":110462,"çŃĶå¤į":110463,"åŀĥåľ¾åĪĨç±»":110464,"éĴĵé±¼":110465,"åIJĦ種":110466,"缸éģĩ":110467,"ä¸įåģľçļĦ":110468,"æī¹éĩı":110469,"éĩįè¦ģä½ľç͍":110470,"å§Ķå±Ī":110471,"åħŃå¹´":110472,"ä¸ĥåįģ":110473,"ä¹ĭæĪĺ":110474,"é£İéĻ©ç®¡çIJĨ":110475,"éŁ³æ¨Ĥ":110476,"è¡ĮæĶ¿å¤Ħç½ļ":110477,"æľ¬äºĭ":110478,"æĴ°åĨĻ":110479,"èģļåIJĪ":110480,"éĢĤæĹ¶":110481,"æIJ¬å®¶":110482,"ç¢İçīĩ":110483,"çĽĽå®´":110484,"ç®Ģæ´ģ":110485,"åı¬éĽĨ":110486,"ç®ĢåĮĸ":110487,"åĮĹ京æĹ¶éĹ´":110488,"第ä¸īå±Ĭ":110489,"æĿ¥åĽŀ":110490,"常ç͍çļĦ":110491,"京津":110492,"京津åĨĢ":110493,"梦幻":110494,"è¯ķè¡Į":110495,"æľºåºĬ":110496,"åΰæľĢåIJİ":110497,"åĬ©æīĭ":110498,"åĪĨ彩":110499,"åĩºåĵģ":110500,"åĪ¹è½¦":110501,"åIJ¯åıij":110502,"ä¾§éĿ¢":110503,"æ¯ıå½ĵ":110504,"缸åħ³è§Ħå®ļ":110505,"ä¸ĸ人":110506,"è´Ń车":110507,"å¿ĥ缮":110508,"å¿ĥ缮ä¸Ń":110509,"äºĶéĩij":110510,"è¿ĺè®°å¾Ĺ":110511,"ä¾ĿçĦ¶æĺ¯":110512,"æıIJæ¡Ī":110513,"ç͵åķĨå¹³åı°":110514,"åģļåΰäºĨ":110515,"æĿľç»Ŀ":110516,"å®īåįĵ":110517,"ä¸ĸçķĮåIJĦåľ°":110518,"åīįéĢĶ":110519,"æ´ĹåĩĢ":110520,"å¥ĭåĬĽ":110521,"åŁİå¸Ĥ建设":110522,"å¤ļåĬŁèĥ½":110523,"ä¼ļéĢłæĪIJ":110524,"åıijå¸ĥä¼ļä¸Ĭ":110525,"究竣æĺ¯":110526,"åĪĨ红":110527,"çŁ¥èŃĺ":110528,"éĿ¢æĿ¿":110529,"æĹłå£°":110530,"æĢ¥éľĢ":110531,"å¤±çľł":110532,"çΏå¦Ī":110533,"äºĤ":110534,"åħ¨æĻ¯":110535,"ç»ıåħ¸çļĦ":110536,"åī§ä¸Ń":110537,"é¢Ĩ导ä¸ĭ":110538,"åħļåĨħ":110539,"åħ¥ä¾µ":110540,"æĭīæĸ¯":110541,"ä¸Ģå¹ķ":110542,"åĬłä¹ĭ":110543,"èĤĨ":110544,"èĭ±æł¼":110545,"èĭ±æł¼åħ°":110546,"å·§åħĭ":110547,"å·§åħĭåĬĽ":110548,"ä¸Ģå¿ĥ":110549,"èģĤ":110550,"å¾Ģå¾Ģæĺ¯":110551,"管çIJĨå±Ĥ":110552,"çĻ»åħ¥":110553,"建ç«ĭèµ·":110554,"å»ºåĽ½":110555,"åŃIJ宫":110556,"åºĶä»ĺ":110557,"æİ¢ç©¶":110558,"第ä¸Ģä½į":110559,"ä½Ļå®¶":110560,"çŃīæ´»åĬ¨":110561,"æīĢèĩ´":110562,"è¾ĥå¿«":110563,"æĺ¯éĿŀ":110564,"æıIJåIJį":110565,"äºĮèĢħ":110566,"åıªåī©ä¸ĭ":110567,"åħ¶ä¸ŃåĮħæĭ¬":110568,"ç¼ĸç¨ĭ":110569,"çł´ç¢İ":110570,"ä¸Ń举":110571,"å·¥ä½ľæĬ¥åijĬ":110572,"çѾåIJį":110573,"éħĴä¸ļ":110574,"çŁ¥æĻĵ":110575,"çĥŃå¿ĥ":110576,"éĿŀåĩ¡":110577,"èIJ¥ä¸ļæī§":110578,"èIJ¥ä¸ļæī§çħ§":110579,"人大代表":110580,"ä¸Ģ个æĸ°çļĦ":110581,"å¨ģæµ·":110582,"éĤ£äºº":110583,"涨价":110584,"æ¶ĪçģŃ":110585,"éļ¾å¿ĺ":110586,"ç¶ĵé©Ĺ":110587,"åı£è¢ĭ":110588,"ç³»æķ°":110589,"æĸĩä¸Ń":110590,"好转":110591,"æĸ°éĽ¶åĶ®":110592,"讲述äºĨ":110593,"å¼ĢçĽĺ":110594,"çķĻç»Ļ":110595,"æħ¢æħ¢çļĦ":110596,"æĤ²ä¼¤":110597,"æľ¬æľŁ":110598,"äºĨå¤ļå°ij":110599,"è¿Ļ让":110600,"åIJĮçŃī":110601,"æ¸ħæĺİ":110602,"个åŁİå¸Ĥ":110603,"æºĸåĤĻ":110604,"åĩłä¹İæĺ¯":110605,"强åĬĽ":110606,"俯":110607,"水稻":110608,"åĽºå®ļçļĦ":110609,"æł¸åĩĨ":110610,"说æľį":110611,"顯示":110612,"è¿Ļå¥Ĺ":110613,"æĻºæħ§åŁİå¸Ĥ":110614,"å±ĭé¡¶":110615,"ä¸įæĿ¥":110616,"çĶŁé²ľ":110617,"çŁ¥æĥħ":110618,"æĬķ身":110619,"åijĬè¯īæĪij们":110620,"ä¸īåĽĽ":110621,"ä¸ĩä¸Ģ":110622,"è¾Ĩ车":110623,"为ä¹ĭ":110624,"åΰæĹ¶åĢĻ":110625,"è¿Ļæīįæĺ¯":110626,"åIJįçīĮ":110627,"åºŁæ°´":110628,"åݻ年åIJĮæľŁ":110629,"å¹´éĻIJ":110630,"éģĭåĭķ":110631,"åıĮçľ¼":110632,"è¦ģç´§":110633,"对çŃĸ":110634,"åľºé¦Ĩ":110635,"çϾç§ij":110636,"è¶Ĭéĩİ":110637,"å¯ĮåIJ«":110638,"大å¤ļæķ°äºº":110639,"æľĢå°ij":110640,"åı¬åͤ":110641,"åħ¸èĮĥ":110642,"åĨľæľº":110643,"æŃ£æĸĩ":110644,"åºĶç͍äºİ":110645,"æ·±èĢķ":110646,"ä¿Ń":110647,"ä»Ģä¹Īä¸ľè¥¿":110648,"å¥Ĺé¤IJ":110649,"å½ĵéĢī":110650,"å·¦æīĭ":110651,"è°ĥçIJĨ":110652,"æĻļé¤IJ":110653,"éļ¾åħ³":110654,"åĩŃè¯ģ":110655,"çĪ±äºº":110656,"æĮĩè´£":110657,"è´£ç¼ĸ":110658,"çļĦä¸Ģ款":110659,"éĵ²":110660,"åįģ个":110661,"èĢ»":110662,"æľįåĬ¡åķĨ":110663,"åľ°çĭ±":110664,"è¿ŀå¿Ļ":110665,"åĽ°æĥij":110666,"çļĵ":110667,"ä¸įåIJĥ":110668,"çİ°åľ¨å·²ç»ı":110669,"çĽĺçĤ¹":110670,"ä¸įåģľåľ°":110671,"管çIJĨ模å¼ı":110672,"è¿Ļ段æĹ¶éĹ´":110673,"椰":110674,"礼åĮħ":110675,"æµģ转":110676,"æī«çłģ":110677,"éĽĨä¸Ńåľ¨":110678,"æ±ĤåĬ©":110679,"åįĬ个":110680,"å¿«éĢŁå¢ŀéķ¿":110681,"å¾Ģä¸ĭ":110682,"è¯ĦåĪĨ":110683,"å°±æĥ³":110684,"åķĨåĬ¡éĥ¨":110685,"æľīéĹ®é¢ĺ":110686,"èİ·åĪ©":110687,"æ¯ĽçĹħ":110688,"æĦŁåºĶ":110689,"è̧":110690,"åĪĨæŃ§":110691,"åĨī":110692,"æĪij们çİ°åľ¨":110693,"è¦ģåĬłå¼º":110694,"å·§å¦Ļ":110695,"èŀºæĹĭ":110696,"åĪĩæį¢":110697,"çĭĦ":110698,"顺çķħ":110699,"å°¤åħ¶æĺ¯åľ¨":110700,"èĬĿ麻":110701,"éļ¾è¿ĩ":110702,"æĹĹå¸ľ":110703,"å¤įåį°":110704,"å¤įåį°ä»¶":110705,"å¿ħéľĢ":110706,"对å¤ĸå¼ĢæĶ¾":110707,"éļ¾åıĹ":110708,"åİŁæĿ¥æĺ¯":110709,"ç®ĹäºĨ":110710,"é«ĺå±±":110711,"离èģĮ":110712,"çµĦç¹":110713,"çµĦç¹Ķ":110714,"å±ģèĤ¡":110715,"çϾ家":110716,"éģĩä¸Ĭ":110717,"æĺĶæĹ¥":110718,"ä¸į容":110719,"çĽij管éĥ¨éŨ":110720,"主æĦı":110721,"æµģåŁŁ":110722,"è·Įå¹ħ":110723,"èĩ³ä¸Ĭ":110724,"åĪ«è¯´":110725,"æĺ¯æ¯Ķè¾ĥ":110726,"å®ıè§Ĥç»ıæµİ":110727,"å¸Ĥåľºä¸»ä½ĵ":110728,"污æŁĵçī©":110729,"æķijæ²»":110730,"丰æĶ¶":110731,"åŃĺæĶ¾":110732,"åĩĦ":110733,"éĩijå±±":110734,"æį¢äºĨ":110735,"ä¸ĵ人":110736,"éĹľæĸ¼":110737,"æĹ¢è¦ģ":110738,"åĽ½è¶³":110739,"éļĭ":110740,"åıįåĩ»":110741,"起身":110742,"åħĪæĺ¯":110743,"å¸ĮæľĽèĥ½å¤Ł":110744,"åĪ¶è®¢":110745,"åºĹéĿ¢":110746,"åĸĢ":110747,"æķĻä½ł":110748,"éĻ῏©":110749,"åĬĽæ±Ĥ":110750,"ä¸īçϾ":110751,"çī©ä»·":110752,"丢失":110753,"å¢Ļä¸Ĭ":110754,"éĥ¨ä»½":110755,"æł·æĿ¿":110756,"ä¹ĭæĦı":110757,"ç½ijå°ıç¼ĸ":110758,"ä¸ĸä¸Ĭ":110759,"è°ĥè¯ķ":110760,"污æŁĵéĺ²æ²»":110761,"å½±éĻ¢":110762,"å®Įåħ¨åı¯ä»¥":110763,"éĢļåħ³":110764,"ä¹īåĬ¡æķĻèĤ²":110765,"没æľīåĬŀæ³ķ":110766,"èĢ¿":110767,"妳":110768,"æĹłæĥħ":110769,"å¾ĹçĽĬ":110770,"å¾ĹçĽĬäºİ":110771,"æľŁçĽ¼":110772,"娱ä¹IJåľº":110773,"çͲæĸ¹":110774,"ä¸Ģæ±½":110775,"çŰ":110776,"çĸijä¼¼":110777,"æĸ°æµªå¾®åįļ":110778,"强è¡Į":110779,"å½ĵä»ĸ":110780,"èĥº":110781,"ç͍æĪ·æıIJä¾Ľ":110782,"åĮºå§Ķ":110783,"æĦ¿æĻ¯":110784,"æĬĺæī£":110785,"失踪":110786,"è¿«åĪĩ":110787,"åŃĹæ¯į":110788,"åĴ¯":110789,"èªįèŃĺ":110790,"ä»Ģä¹ĪæĦıæĢĿ":110791,"çĽĴåŃIJ":110792,"å½ķéŁ³":110793,"建设工ç¨ĭ":110794,"ä¸ļä½Ļ":110795,"å®ŀ践活åĬ¨":110796,"çľŁç©º":110797,"çĤĸ":110798,"åľ¨è·¯ä¸Ĭ":110799,"主è¦ģåĮħæĭ¬":110800,"该æĢİä¹Ī":110801,"æĢ»æľī":110802,"æĢ§æĦŁ":110803,"æ°ijèĪª":110804,"å¼ĢåºĹ":110805,"欺éªĹ":110806,"çªģåĩ»":110807,"缺失":110808,"æī§ä¸ļ":110809,"åľ°éģĵ":110810,"å¹¶æĹł":110811,"æ°ijåĬŀ":110812,"ç»Ħç»ĩçĶŁæ´»":110813,"æĪijå¦Ī":110814,"è¨ĺèĢħ":110815,"管åζ":110816,"æī¾ä¸ª":110817,"èĹ»":110818,"çĤİçĹĩ":110819,"äºĴåĬ©":110820,"æµıè§Īåύ":110821,"çݩ家æĿ¥è¯´":110822,"éĻįä½İäºĨ":110823,"è£Ķ":110824,"æĮ£éĴ±":110825,"åķĨæľº":110826,"æĶ¹è£ħ":110827,"æµģ浪":110828,"æĶ¿æ³ķ":110829,"èĢģ头":110830,"çĶŁäº§åĴĮ":110831,"ç©Ĺ":110832,"亲çα":110833,"亲çαçļĦ":110834,"å±¥èģĮ":110835,"åŁİéĩĮ":110836,"ç»ĨåĪĨ":110837,"åĬ³åĬ¨åIJĪåIJĮ":110838,"åľ¨æĹ¥æľ¬":110839,"å¨ģå°Ķ":110840,"åį«è§Ĩ":110841,"éĢ£çµIJ":110842,"çĿĢéĩį":110843,"æĬĺ磨":110844,"åĽ¾ä¸º":110845,"çľ·":110846,"å·¥åºı":110847,"æĵģ":110848,"æĵģæľī":110849,"ç½ijç«Ļåľ°åĽ¾":110850,"çļĦä¸Ģ大":110851,"ç»Ħç»ĩå®ŀæĸ½":110852,"æĬĽå¼ĥ":110853,"åĴĮæĶ¯æĮģ":110854,"æ³ķåĪĻ":110855,"浪潮":110856,"çݰæľīçļĦ":110857,"åĩłçİĩ":110858,"为客æĪ·":110859,"åįģä¸ĩ":110860,"è¹Ħ":110861,"çªģåĩºéĹ®é¢ĺ":110862,"åıĥåĬł":110863,"éĥ½ä¼ļæľī":110864,"缤":110865,"è°ģéĥ½":110866,"æīĭåĬ¨":110867,"çĽ´è¾¾":110868,"çĤ¹å¤ļ":110869,"éĺ¶å±Ĥ":110870,"ä¸įä½³":110871,"éĤ£æ®µ":110872,"滨海":110873,"æĺ¯åĽ½åĨħ":110874,"æĪijå¸ĮæľĽ":110875,"åIJĽåŃIJ":110876,"è§ĤéŁ³":110877,"åģļé¥Ń":110878,"æ±½è»Ĭ":110879,"åħ³ç¨İ":110880,"çľ¼åīįçļĦ":110881,"æ°´éĿ¢":110882,"èĢ³æľº":110883,"追踪":110884,"æİ¨éĢģ":110885,"éĴ±åĮħ":110886,"æģ¶å¿ĥ":110887,"æµ·åŁŁ":110888,"å·į":110889,"å¼ĢæĿ¥":110890,"表æĢģ":110891,"仪表":110892,"å¹³åİŁ":110893,"åįģå¤ļå¹´":110894,"ä¹ŁæĹłæ³ķ":110895,"åħ¼é¡¾":110896,"è¡£æŁľ":110897,"æł½åŁ¹":110898,"æĪ¿æºIJ":110899,"设ç«ĭäºĨ":110900,"ä¸ĩåIJį":110901,"æķ°é¢Ŀ":110902,"è¦ģåĿļæĮģ":110903,"åIJīæŀĹçľģ":110904,"请èģĶç³»":110905,"ç»ıåİĨè¿ĩ":110906,"çļĦæľ¬è´¨":110907,"åħ¥éŨ":110908,"æľ¬æ¡Ī":110909,"çİĩè¾¾åΰ":110910,"åı°éĺ¶":110911,"éĴŀ":110912,"æĪijèĥ½":110913,"èݲèĬ±":110914,"éĴł":110915,"ä¸Ģäºĭ":110916,"åİŁæľīçļĦ":110917,"æ¯ıåĢĭ":110918,"æ¯Ķäºļ迪":110919,"æ£ĭçīĮ游æĪı":110920,"ä¸įä¼ļæľī":110921,"å½ĴæĿ¥":110922,"äºĶçϾ":110923,"è¿ĩé«ĺ":110924,"éĽ·è¾¾":110925,"ä¸Ģèµ·åİ»":110926,"æķĻ导":110927,"å°±è¯Ĭ":110928,"å°±å¾Ī":110929,"ä¸įåIJĮäºİ":110930,"俺":110931,"å¸ĸåŃIJ":110932,"æĶ¿åįıå§Ķåijĺ":110933,"çĸ«æĥħå½±åĵį":110934,"åĪĨè£Ĥ":110935,"为ä»Ģä¹Īä¼ļ":110936,"äºĶæĺŁ":110937,"å°ijåĦ¿":110938,"æĬ¢éĻ©":110939,"梦è§ģ":110940,"è®°èĢħéĩĩ访":110941,"山路":110942,"æĪij个人":110943,"æ²Ļ滩":110944,"è¹Ń":110945,"æĶ¹è®Ĭ":110946,"æĸ°åŀĭåĨł":110947,"æĸ°åŀĭåĨłçĬ¶":110948,"åĮ»æĬ¤":110949,"åĮ»æĬ¤äººåijĺ":110950,"æµ·å°Ķ":110951,"åħ³äºİæĪij们":110952,"éϤå¤ĸ":110953,"åºļ":110954,"宣åijĬ":110955,"ä¸īåįĥ":110956,"榨":110957,"ç§ijæĬĢ大åѦ":110958,"ä¸ĥåħ«":110959,"顺åºĶ":110960,"çΏçΏå¦Īå¦Ī":110961,"éĢīåıĸ":110962,"åī§çĥĪ":110963,"乡æĿijæĹħ游":110964,"积æŀģæİ¢ç´¢":110965,"表çݰ为":110966,"å¾Īæ¸ħæ¥ļ":110967,"大åĨĽ":110968,"æĿ¥ç͵":110969,"å¥ĹæĪ¿":110970,"çݰè¡Į":110971,"享åıĹåΰ":110972,"çľĭçĤ¹":110973,"åĽºå®ļèµĦ产":110974,"以人为":110975,"ä»¥äººä¸ºæľ¬":110976,"ä¸įå®Į":110977,"éĻį鼨":110978,"åģļçļĦäºĭæĥħ":110979,"å¹¶äºİ":110980,"顽强":110981,"è̏":110982,"åĺ´å·´":110983,"缸åħ³ä¿¡æģ¯":110984,"æĪij没":110985,"æĪĺçķ¥æĢ§":110986,"æĢĿ念":110987,"åĪĺå¤ĩ":110988,"åĬ©æĶ»":110989,"é£İè²Į":110990,"éĿ¢å¯¹éĿ¢":110991,"积æŀģå¼Ģå±ķ":110992,"çĸĹæķĪ":110993,"çľĭ书":110994,"缺åı£":110995,"åĽ½æ°ijç»ıæµİ":110996,"使ç͍æĿĥ":110997,"éģ¥è¿ľ":110998,"å¡«è¡¥":110999,"第ä¸ī人":111000,"åįĬå¤ľ":111001,"æŃ¦æ±īå¸Ĥ":111002,"æĪijåıijçݰ":111003,"ä¼ĺæĥłæĶ¿çŃĸ":111004,"é£İåı£":111005,"å°±ä¸įèĥ½":111006,"为主è¦ģ":111007,"æµģåĩº":111008,"å´ĩæĭľ":111009,"å¹¶ä¸įèĥ½":111010,"é«ĺä¸ī":111011,"ä¸ĸçķĮä¸ĬæľĢ":111012,"æĥ³å¿ħ":111013,"åħ¶æīĢ":111014,"åĢĻéĢī":111015,"åĢĻéĢī人":111016,"ä¸įçα":111017,"åī¯ä½ľç͍":111018,"人æ°ijæĹ¥æĬ¥":111019,"æĪijä¸įæĺ¯":111020,"å®ŀçī©":111021,"ç͵åİĤ":111022,"ä¹Łç®Ĺæĺ¯":111023,"æľīéĹľ":111024,"æľīèĥ½åĬĽ":111025,"æĮĤåľ¨":111026,"çľ¼ä¸ĭ":111027,"约翰":111028,"å°ıåѦçĶŁ":111029,"èµ·åΰäºĨ":111030,"工夫":111031,"åIJĮå¿ĥ":111032,"åĿ¦è¨Ģ":111033,"çłĮ":111034,"åıijæĮ¥äºĨ":111035,"èģĮä¸ļéģĵå¾·":111036,"è¿ĻäºĽå¹´":111037,"念头":111038,"èĢģé¼ł":111039,"åħ¨èµĦ":111040,"åħ¨èµĦåŃIJ":111041,"ä¸Ģåij³":111042,"å¤ļä¸ĩåħĥ":111043,"æł¼æľĥ":111044,"éķ¿éĢĶ":111045,"带走":111046,"èĭ±å¯¸":111047,"æĸĩä½ĵ":111048,"对ä»ĸ们":111049,"åĵŃäºĨ":111050,"å¡«æĬ¥":111051,"çīĪæĿĥ声æĺİ":111052,"çĶµçº¿":111053,"è´Ńçī©ä¸Ńå¿ĥ":111054,"饱满":111055,"ä½İ头":111056,"强迫":111057,"ä¿Ŀæ´ģ":111058,"欧åĨł":111059,"缸è¿ŀ":111060,"认è´Ń":111061,"ç쫿ĺŁ":111062,"é«ĺå°Ķ":111063,"é«ĺå°Ķ夫":111064,"èij«èĬ¦":111065,"æłĩ注":111066,"çļĦçIJĨæĥ³":111067,"æł¸éħ¸":111068,"æł¸éħ¸æ£Ģæµĭ":111069,"åĬī":111070,"ä¸Ģèάæĺ¯":111071,"æĢĿç´¢":111072,"轨迹":111073,"çĥŃ带":111074,"éĻ£":111075,"åĩĨç¡®æĢ§":111076,"æĪ´çĿĢ":111077,"åľ¨çĶŁæ´»ä¸Ń":111078,"æīĢèĥ½":111079,"æľ¯åIJİ":111080,"å¸¦ä½ł":111081,"ç¥ł":111082,"æ®ĭéħ·":111083,"ä¹Łåıªæĺ¯":111084,"çͳè´Ń":111085,"举åĬŀäºĨ":111086,"æľīæĦıä¹ī":111087,"æĹºçĽĽ":111088,"åľ¨ç¶²":111089,"åľ¨ç¶²è·¯ä¸Ĭ":111090,"å¾Ī大ç¨ĭ度":111091,"管è¾ĸ":111092,"çĸ«æĥħæľŁéĹ´":111093,"触æij¸":111094,"éĺ¶æ®µæĢ§":111095,"ä¼ļè§īå¾Ĺ":111096,"çļĦçĶ»éĿ¢":111097,"æİ¥åıĹäºĨ":111098,"表达äºĨ":111099,"éĤĵå°ı":111100,"éĤĵå°ıå¹³":111101,"åħļé£İ":111102,"åħļé£İå»īæĶ¿":111103,"åķĨåѦéĻ¢":111104,"åħijæį¢":111105,"é£Łåĵģèį¯åĵģ":111106,"éĿŀ常好çļĦ":111107,"çľ¯":111108,"纳米":111109,"åĬ¨æijĩ":111110,"åĽŀéģ¿":111111,"çľĭèijĹ":111112,"款项":111113,"åħ«å¹´":111114,"åģļ个":111115,"æĸĩæ¡£":111116,"éĩijèŀįç§ijæĬĢ":111117,"åħ¶ä¸Ńæľī":111118,"äºĨä¸Ģç³»åĪĹ":111119,"æĹĹèΰåºĹ":111120,"ç§°èµŀ":111121,"éĽ¢éĸĭ":111122,"åζåĨ·":111123,"å®¶éŨåı£":111124,"åįģå¤ļ":111125,"ä¼´ä¾£":111126,"çľĭçĹħ":111127,"æĭīçĿĢ":111128,"æīĴ":111129,"çĸ²æĥ«":111130,"å°ijæķ°æ°ijæĹı":111131,"åĽ¾å½¢":111132,"è½§":111133,"å¢ŀéĩı":111134,"饲åħ»":111135,"çģ«å±±":111136,"æ¯ı个æľĪ":111137,"ä½ľä¸ºä¸ĢåIJį":111138,"è½´æī¿":111139,"æĸĩ书":111140,"ç¼ķ":111141,"åħ·ä½ĵæĥħåĨµ":111142,"çĹĽçĤ¹":111143,"缴éĶĢ":111144,"å¡Ĭ":111145,"ä¹Łæľĥ":111146,"çĥŃæ½®":111147,"å¹³æ°ij":111148,"æ¼Ķåͱä¼ļ":111149,"æķĻçłĶ":111150,"éĢĥéģ¿":111151,"ä¸Ģè´¯":111152,"å°±è¶Ĭ":111153,"å®ŀå®ŀåľ¨":111154,"å®ŀå®ŀåľ¨åľ¨":111155,"ä¹łè¿ijå¹³æĢ»":111156,"溺":111157,"å¿ĥåºķ":111158,"éķ¿å¾ģ":111159,"媽媽":111160,"第ä¸ī次":111161,"åĩºæ¼Ķ":111162,"çĭĢæ³ģ":111163,"å°Ķæĸ¯":111164,"代çIJĨåķĨ":111165,"çĨı":111166,"çļĦ对象":111167,"ç͵éĩı":111168,"è¡ĮåĪĹ":111169,"åĽ½äºº":111170,"è·ijäºĨ":111171,"åįĶåĬ©":111172,"èIJ¥è¿IJ":111173,"å¸ĪåħĦ":111174,"榮":111175,"æĥ³åĥı":111176,"æĢ§å¼º":111177,"ç§ijåѦçłĶç©¶":111178,"å»¶å®ī":111179,"ä¸¥æł¼èIJ½å®ŀ":111180,"é¢Ĩä¼ļ":111181,"çĽ¸å·®":111182,"路人":111183,"çĶ«":111184,"æľīä»·å̼":111185,"æľīä»·å̼çļĦ":111186,"ç¾İåĽ¢":111187,"æ°ij主çĶŁæ´»":111188,"æĪijæīį":111189,"ç¾İåĽ½äºº":111190,"æ°Ķåij³":111191,"åıįå°Ħ":111192,"çļĦåĨ³å¿ĥ":111193,"大è±Ĩ":111194,"交代":111195,"è¿Ľåĩº":111196,"åıįæĬĹ":111197,"æĮĩçļĦæĺ¯":111198,"ä»·ä½į":111199,"è¿Ľé©»":111200,"ä¸ĬçϾ":111201,"ä½įåĪĹ":111202,"ä¸ŃåĽ½ä¼ģä¸ļ":111203,"çļĦ好å¤Ħ":111204,"主ç¼ĸ":111205,"汽油":111206,"ä½ĨæĪij们":111207,"æĢİä¹Īçľĭ":111208,"é»Ħå±±":111209,"å¤ļåªĴä½ĵ":111210,"åIJİåį«":111211,"èİ·å¾ĹæĽ´å¤ļ":111212,"åĬ¡å¿ħ":111213,"为å¥ijæľº":111214,"é¦ĸ饰":111215,"ä¸ĩåįļ":111216,"è¶ĬæĿ¥è¶Ĭ大":111217,"ä¸ĵ项è¡ĮåĬ¨":111218,"å¥ĭè¿Ľ":111219,"ä»įçĦ¶æĺ¯":111220,"è´¨æĦŁ":111221,"å¦Ĥæŀľä¸įæĺ¯":111222,"ç«Ļèµ·æĿ¥":111223,"ä¹¾éļĨ":111224,"åı¯æĢķçļĦ":111225,"å¯Įè´µ":111226,"æ¸ħç®Ĺ":111227,"åIJijä¸ĭ":111228,"åĢļ":111229,"çļĦçŃĶæ¡Ī":111230,"èιä¸Ĭ":111231,"çļĦ羣å®ŀæĢ§":111232,"çŃīåĬŁèĥ½":111233,"åĸľåī§":111234,"å¨ģåĬĽ":111235,"æĸ°é¢ĸ":111236,"æł¸ç͵":111237,"æĬ¥éĶĢ":111238,"æķħ乡":111239,"ä¼´éļı":111240,"éŀŃ":111241,"å¦Ĭå¨ł":111242,"åĪĨåĮĸ":111243,"æľīå¾Ī大":111244,"æĢİä¹Ī说":111245,"æĻĤ代":111246,"产åĩº":111247,"ä»ĭç»į说":111248,"å¤ĦçIJĨåύ":111249,"èĨ¨èĥĢ":111250,"åī¯å¸Ĥéķ¿":111251,"çļĦ妻åŃIJ":111252,"æł·åĵģ":111253,"åIJĮæ¯Ķä¸ĭéĻį":111254,"åħĥå·¦åı³":111255,"ç͍èĩªå·±çļĦ":111256,"é«ĺéĽĦ":111257,"æĺ¥æĻļ":111258,"ä¹Łæľīå¾Īå¤ļ":111259,"çľ¼çIJĥ":111260,"æķ£æŃ¥":111261,"ä»ĸ们éĥ½":111262,"第ä¸Ģå®¶":111263,"åĬŀ好":111264,"å®īéĺ²":111265,"ä¸Ģä¸ĩ":111266,"åľ¨éĩĮéĿ¢":111267,"éŁ³é¢ij":111268,"åı£åı·":111269,"ä¸Ģè¶Ł":111270,"ç¦ıçī¹":111271,"é³ŀ":111272,"æĥĬèī³":111273,"æĸ°å¨ĺ":111274,"绿èī²åıijå±ķ":111275,"ä¸Ńå¼ı":111276,"ä¹Łåıªæľī":111277,"çݰ身":111278,"åı¯ä¾Ľ":111279,"æ¯ıä¸Ģ个人":111280,"第ä¸īèĢħ":111281,"åľ°å½¢":111282,"éĴ¢ç»ĵæŀĦ":111283,"çĽijçĿ£æ£ĢæŁ¥":111284,"åı«æĪij":111285,"èĩ´æķ¬":111286,"æ´Ĺæīĭ":111287,"ä¸ĭè°ĥ":111288,"康çĨĻ":111289,"æĪIJ交éĩı":111290,"ä¹ŁæĪIJ为":111291,"åħīæ»ij":111292,"å®Įæķ´æĢ§":111293,"çģ¼":111294,"ç¶²éłģ":111295,"éķ¿å¯¿":111296,"éģ©ç͍":111297,"çļĦä¸Ģ项":111298,"çŀ©çĽ®":111299,"æĬĬèĩªå·±çļĦ":111300,"éĵ¶è¡Įåį¡":111301,"å°±å¿ħé¡»":111302,"ç¾İçϽ":111303,"éŀįå±±":111304,"æľ¬é¢Ĩ":111305,"ä¸Ģç¢Ĺ":111306,"æīĵæ³ķ":111307,"æĤ¨å¥½":111308,"对åŃ©åŃIJ":111309,"æĬ¥éģĵç§°":111310,"ä¼łåĩº":111311,"大èĩ£":111312,"ç¬ĭ":111313,"çĽı":111314,"é¾ļ":111315,"çĽ´çº¿":111316,"æĻºåºĵ":111317,"ç§Łè½¦":111318,"é£İåij³":111319,"çľĭä¸Ģä¸ĭ":111320,"æİ¨éĶĢ":111321,"éĥ¨éĥ¨éķ¿":111322,"è´¨éĩıåĴĮ":111323,"åĪĬçĻ»":111324,"å·¥ä¸ļåĮĸ":111325,"çİĩ为":111326,"鼶件":111327,"硬åĮĸ":111328,"ä¸Ĭåįĥ":111329,"ç»ıéªĮå̼":111330,"å¹³è¡Į":111331,"声éģĵ":111332,"æľįåĬ¡è´¨éĩı":111333,"çĶŁçĶ¢":111334,"æľĢ容æĺĵ":111335,"ä¸Ģæŀļ":111336,"å¹´æĬ¥":111337,"åħ¬ç½ij":111338,"åħ¬ç½ijå®ī":111339,"åħ¬ç½ijå®īå¤ĩ":111340,"çļĦèĥ½éĩı":111341,"å®ŀéĻħè¡ĮåĬ¨":111342,"è¦ģä¸įè¦ģ":111343,"æĹ¥æľ¬äºº":111344,"èĢ¶ç¨£":111345,"ç¼ĸåī§":111346,"æ¶©":111347,"åį°å°¼":111348,"ä¸Ĭä¸ĭ游":111349,"åĩłåı¥":111350,"ä¸Ńéĵģ":111351,"ç°¡åĸ®":111352,"èĩªå¸¦":111353,"çĶŁäºİ":111354,"ä¸Ģåı£æ°Ķ":111355,"åĭ¤å¥ĭ":111356,"éĻįä»·":111357,"å±ķçݰäºĨ":111358,"å¸ĥæĭī":111359,"ä¼ļéĢīæĭ©":111360,"çļĦç»ıåħ¸":111361,"好æľĭåıĭ":111362,"车éģĵ":111363,"æķ´åĢĭ":111364,"åľĵ":111365,"éķ¿æľŁä»¥æĿ¥":111366,"æĬķå½±":111367,"çļĩåĨł":111368,"è¿ĩ大":111369,"åijĬè¯īä»ĸ":111370,"ä¼ģä¸ļæıIJä¾Ľ":111371,"æĬ½è±¡":111372,"éĢĤ度":111373,"çļĦ女åŃ©":111374,"èµ·ä¼ı":111375,"çļĦåĬŁæķĪ":111376,"ä¸ĵ项æķ´æ²»":111377,"åı¯éĢļè¿ĩ":111378,"ä¸įåIJĮç¨ĭ度":111379,"å¼Ĥè®®":111380,"åĩĢèµĦ产":111381,"åijĹ":111382,"ä»Ģä¹Īåij¢":111383,"å·¡éĢ»":111384,"è¸ıä¸Ĭ":111385,"ä½Ĩå®ĥ":111386,"精度":111387,"管å±Ģ":111388,"第ä¸ĢåIJį":111389,"åĨħåŃĺ":111390,"æijĨåľ¨":111391,"åī©ä¸ĭ":111392,"主ä½ĵ责任":111393,"çĤ¹åįĬ":111394,"以èĩ³äºİ":111395,"åħ»èĢģä¿ĿéĻ©":111396,"æĦŁåıĹåΰäºĨ":111397,"çŁ¥åIJįçļĦ":111398,"å¯Į豪":111399,"妥åĸĦ":111400,"åŃĻåŃIJ":111401,"éĵĤ":111402,"说èĩªå·±":111403,"让æĤ¨":111404,"æķ°æİ§":111405,"çļĦçľ¼åħī":111406,"注éĶĢ":111407,"çļĦçģµéŃĤ":111408,"è¿ĺä¸įéĶĻ":111409,"éĹ®ä»ĸ":111410,"èĩªä¸»çłĶåıij":111411,"èĵĭ":111412,"ç´«èī²":111413,"åĽ½å®¶å®īåħ¨":111414,"è¾½å®ģçľģ":111415,"ä¹Łæ¯Ķè¾ĥ":111416,"ç¾İèĤ¡":111417,"ä¸įç¡®å®ļæĢ§":111418,"å¿ĥ头":111419,"æĪ³":111420,"级åĪ«çļĦ":111421,"论述":111422,"çļĦåĽŀçŃĶ":111423,"ä¿Ŀè¯ģéĩij":111424,"çŃīè¡Įä¸ļ":111425,"幸ç¦ıæĦŁ":111426,"æŃ§è§Ĩ":111427,"æľºç¥¨":111428,"派人":111429,"èĩ´åij½":111430,"åĺ´è§Ĵ":111431,"æĸ°éĹ»ä¸Ńå¿ĥ":111432,"æĶ¾å¼ĥäºĨ":111433,"å®ľå±ħ":111434,"åĨĻä¸ĭ":111435,"éĹ®çŃĶ":111436,"è¿ĻéĩĮæĺ¯":111437,"å¤ļåľ°":111438,"åĮºåŁŁåĨħ":111439,"åĸ°":111440,"çľĭä»ĸ":111441,"æī§æ³ķ人åijĺ":111442,"åĬ¨æľº":111443,"éŁ³åĵį":111444,"çļĦåij½è¿IJ":111445,"é¡¶éĥ¨":111446,"åĵŁ":111447,"éĥ½æľĥ":111448,"æīĵéĢłæĪIJ":111449,"æĦıåĽ¾":111450,"çļĸ":111451,"åĢĴåħ¥":111452,"å·´èIJ¨":111453,"åĬ©åѦ":111454,"å¤įåı¤":111455,"åIJ¯ç͍":111456,"åĽ½éĻħå¸Ĥåľº":111457,"åĤ¨èĥ½":111458,"é»ijé¾Ļæ±Łçľģ":111459,"ä¹ĺ车":111460,"è¿IJåĬ¨ä¼ļ":111461,"ä¿ĿåĪ©":111462,"çŁ³æĿIJ":111463,"çµ®":111464,"çĤĴä½ľ":111465,"çļĦä¿¡ä»»":111466,"å°±æĪIJäºĨ":111467,"åı¯è§Ĥ":111468,"çļĩä¸Ĭ":111469,"è¿Ļåĩłå¤©":111470,"ä¸ĢéĶ®":111471,"åĨ·åĨ»":111472,"ä¿Ŀåį«":111473,"æł¸æ¡ĥ":111474,"åIJĪä½ľåħ³ç³»":111475,"éĢģåĩº":111476,"æĹĹä¸ĭçļĦ":111477,"åľ¨ä¹İ":111478,"为广大":111479,"åįĪé¤IJ":111480,"ä¸ĵ访":111481,"æĪĸå°Ĩ":111482,"éĿĴå²Ľå¸Ĥ":111483,"å¥Ķè·ij":111484,"æĹ¥æĬ¥éģĵ":111485,"å¥ijåIJĪ":111486,"æĸ°æĺ¥":111487,"ä¸įå°ıå¿ĥ":111488,"两ä¸ī":111489,"æĦıæĢĿæĺ¯":111490,"åĨ·èĹı":111491,"çļĦçĹĩçĬ¶":111492,"æĢ§åij½":111493,"è¶ħæłĩ":111494,"å¯Ĩ碼":111495,"ç§ijæĬĢèĤ¡ä»½":111496,"äºĨä¸Ģæī¹":111497,"çĿ£å¯Ł":111498,"åªĴä»ĭ":111499,"å°Ħæīĭ":111500,"ä¿®åħ»":111501,"çīĩåĪ»":111502,"éĢĤåIJĪèĩªå·±":111503,"åıªè¦ģæĺ¯":111504,"åIJĥè¿ĩ":111505,"éĩijéĵ¶":111506,"缴å±ŀ":111507,"åѦéĹ®":111508,"åİĭåζ":111509,"çªĹå¤ĸ":111510,"æĶ¶åΰäºĨ":111511,"åħ¨åĽ½äººå¤§":111512,"ä½Ĩæĺ¯å¯¹äºİ":111513,"åľ¨æķ´ä¸ª":111514,"çļĦèĥĮåIJİ":111515,"åĩıå°ijäºĨ":111516,"åıįèħIJ":111517,"åıįèħIJåĢ¡":111518,"åıįèħIJåĢ¡å»ī":111519,"æĹ·":111520,"åĪĨæľŁ":111521,"åľ¨æ·±åľ³":111522,"æīĵçĿĢ":111523,"æī«ä¸Ģ":111524,"æī«ä¸Ģæī«":111525,"æĶ¿åºľéĥ¨éŨ":111526,"æİ¥è¿ŀ":111527,"å±ŀäºİèĩªå·±":111528,"åŃIJå¼¹":111529,"åIJĮæł·æĺ¯":111530,"æĢ»åħ±":111531,"车ä¼ģ":111532,"æ¢ĵ":111533,"åħ¬é¡·":111534,"åıij声":111535,"éĴĽ":111536,"èµ°åĬ¿åĽ¾":111537,"主èIJ¥":111538,"åĸĶ":111539,"æķ°æį®åĪĨæŀIJ":111540,"ä¸įè¿ľ":111541,"æľīåIJį":111542,"æľīåIJįçļĦ":111543,"åģ¿è¿ĺ":111544,"å¾Īä½İ":111545,"è®ĵ人":111546,"èĿī":111547,"é«ĺè´µ":111548,"å°ij许":111549,"æ°Ł":111550,"å¹¢":111551,"亲æĥħ":111552,"è¿Ļä»¶äºĭæĥħ":111553,"ç͍é¤IJ":111554,"缸åħ³æĸ°éĹ»":111555,"å°±åºĶ该":111556,"ç»ĪçĤ¹":111557,"æĺ¯å¤ļå°ij":111558,"çĻ»åľº":111559,"è¯ķ管":111560,"è¯ķ管婴åĦ¿":111561,"åģļ大":111562,"åģļ大åģļ强":111563,"çļĦä¾ĭåŃIJ":111564,"åħ«ä¸ª":111565,"æĺİæĹ¥":111566,"çĤ³":111567,"èµ°åİ»":111568,"éģº":111569,"墩":111570,"ä½ĵä¼ļåΰ":111571,"åĴı":111572,"ä¸ĭè¾¾":111573,"å¤įåıij":111574,"追éĢIJ":111575,"æīĵåĵį":111576,"çļĦéļ±ç§ģæ¬Ĭ":111577,"åħ·æľīä¸Ģå®ļ":111578,"è¿Ļä¹Īå¤ļå¹´":111579,"æłijæŀĹ":111580,"æľĢéķ¿":111581,"åIJĮèĥŀ":111582,"åħīæ³½":111583,"åŁŁåIJį":111584,"æĮĩåIJij":111585,"åıĹ害èĢħ":111586,"æłijèĦĤ":111587,"æľīå¤ļ大":111588,"大éĿ¢ç§¯":111589,"æĹłç¼Ŀ":111590,"æĶ¹æŃ£":111591,"æĽ´å¤ļçļĦæĺ¯":111592,"æľŁæľ«":111593,"æŃ¼":111594,"ä¹īä¹Į":111595,"éĤ£ä½ł":111596,"çļĦ第ä¸Ģ个":111597,"èĮµ":111598,"å°§":111599,"èį«":111600,"ä¸įä»ħåı¯ä»¥":111601,"æ¶Įçݰ":111602,"æĢ»éĿ¢ç§¯":111603,"æĸ°éĹ»åıijå¸ĥ":111604,"æ°ijç͍":111605,"就读":111606,"æīĵè´¥":111607,"å¤ĸè¯Ń":111608,"æĪij们ä¸Ģèµ·":111609,"é¢Ħå®ļ":111610,"çĥ¹é¥ª":111611,"æľĢ主è¦ģ":111612,"æľĢ主è¦ģçļĦ":111613,"çīĮçħ§":111614,"åĽłåħ¶":111615,"ä½İä¸ĭ":111616,"ä¼ļåIJĮ":111617,"è§ģè§£":111618,"éĹ´éļĶ":111619,"æķĻç¨ĭ":111620,"å°ī":111621,"å¸Ĥä¸Ńå¿ĥ":111622,"åħ³éĶ®æĺ¯":111623,"æµ·åįĹçľģ":111624,"çī¹åĪ«æĺ¯åľ¨":111625,"ä¸ŃåĽ½å¤§éĻĨ":111626,"åħħè¶³çļĦ":111627,"æĹ¢èĥ½":111628,"åĤ³çµ±":111629,"çijľä¼½":111630,"åħ¥åĽ´":111631,"æħ¢æħ¢åľ°":111632,"æĬ¥éħ¬":111633,"æī¹å¤į":111634,"å·¥ä¸ļåĽŃåĮº":111635,"ä¸İåıijå±ķ":111636,"èĥ¸éĥ¨":111637,"åľ¨ç½ij绾":111638,"åľ¨ç½ij绾ä¸Ĭ":111639,"交è°Ī":111640,"æĽ´æĶ¹":111641,"åįłæľīçİĩ":111642,"ä¸Ŀ绸ä¹ĭè·¯":111643,"è¡Ľ":111644,"çłĶåΤ":111645,"åĪª":111646,"åĪªéϤ":111647,"è¿Ļåıª":111648,"çļĦæ°Ķæģ¯":111649,"åĬłå·ŀ":111650,"éĴ§":111651,"çIJĨäºĭéķ¿":111652,"ä¸ĸå®¶":111653,"æµģè¡ĮçļĦ":111654,"å¾Īæľīåı¯èĥ½":111655,"们éĥ½":111656,"ç»ıèIJ¥æ¨¡å¼ı":111657,"è¡Įä¸ļä¸Ń":111658,"éĢļçŁ¥ä¹¦":111659,"åij½é¢ĺ":111660,"æľ¬ç¶²ç«Ļ":111661,"æ²Ļçī¹":111662,"åıijåħī":111663,"é«ĺä»·":111664,"å·²çĦ¶":111665,"åıĮåįģä¸Ģ":111666,"ä¸Ĭè¯ī":111667,"ç¿ħèĨĢ":111668,"è¿Ļä¸Ģå¹´":111669,"大ä¼ļä¸Ĭ":111670,"éĩī":111671,"å®Įåħ¨æĺ¯":111672,"å¾Ĺ太":111673,"ä¸ĢèĪ¬äºº":111674,"è¿ĺç®Ĺ":111675,"æĬĺåıł":111676,"æĬķæľº":111677,"çĤ¹çĩĥ":111678,"çݰéĩijæµģ":111679,"åħĶåŃIJ":111680,"ç½ijæł¼":111681,"æİ¥è¿ĩ":111682,"ä¾Ľè´§":111683,"éĺ´å½±":111684,"åİŁåħĪ":111685,"æį£":111686,"左侧":111687,"åħĭæĭī":111688,"æīĵåį¡":111689,"ç§ijæ¯Ķ":111690,"æ±ĩéĽĨ":111691,"åľ°çIJĨä½įç½®":111692,"è¯Ħå§Ķ":111693,"ç»ĵåIJĪèµ·æĿ¥":111694,"è¿Ľåħ¥åΰ":111695,"åı¯è¡Į":111696,"åı¯è¡ĮæĢ§":111697,"让å®ĥ":111698,"åĪ¶åº¦æĶ¹éĿ©":111699,"çĶĺèĤĥçľģ":111700,"åĵĹ":111701,"åģıåģı":111702,"è¡£çī©":111703,"ç¥Ŀè´º":111704,"æºIJèĩª":111705,"å¹¶ä¸į代表":111706,"åĽ½åº¦":111707,"好åĿı":111708,"æĿĸ":111709,"æĿŃå·ŀå¸Ĥ":111710,"湿度":111711,"鲸":111712,"åįļ彩":111713,"æ³°å±±":111714,"æĿijèIJ½":111715,"æĸ°èģŀ":111716,"èĤĭ":111717,"åı¤èĢģçļĦ":111718,"çļĦç§ĺå¯Ĩ":111719,"ä¸Ģ个éĹ®é¢ĺ":111720,"éģıåζ":111721,"åįĥ亿":111722,"è¿ĩ硬":111723,"å°Ħåĩ»":111724,"èĩªçĦ¶æĺ¯":111725,"产åĮº":111726,"çĤ¹çĤ¹å¤´":111727,"åı¯ä»¥å¸®åĬ©":111728,"说å®ŀ":111729,"说å®ŀè¯Ŀ":111730,"æĪijåıªæĺ¯":111731,"ä¹ĭä½Ļ":111732,"åIJĮæĹ¶ä¹Łæĺ¯":111733,"ä¸ŃåĽ½éĺŁ":111734,"建æĪIJåIJİ":111735,"ä¹IJè§Ĩ":111736,"åij¨å²ģ":111737,"èį¯åºĹ":111738,"éĩijåįİ":111739,"严éĩįå½±åĵį":111740,"è´¨åľ°":111741,"æĹħéģĬ":111742,"åħµåύ":111743,"æķĻèĤ²æķĻåѦ":111744,"离åİ»":111745,"åIJĦå¼ıåIJĦæł·":111746,"ä»ĭç´":111747,"ä»ĭç´¹":111748,"å¼Ģ头":111749,"å°Ĩèĩªå·±çļĦ":111750,"åIJ¬åĬĽ":111751,"ä¿¡æģ¯ç³»ç»Ł":111752,"ä»İæł¹æľ¬":111753,"ä»İæł¹æľ¬ä¸Ĭ":111754,"æİĮ声":111755,"欢åĸľ":111756,"å±ķåĮº":111757,"åķ¸":111758,"太å¤ļäºĨ":111759,"éĹ²ç½®":111760,"èĥ¡èIJĿåįľ":111761,"å§Ķå®£ä¼ł":111762,"å§Ķå®£ä¼łéĥ¨":111763,"åįĹéĺ³":111764,"å·ŀåĮº":111765,"ä¸İæĹ¶":111766,"ä¸İæĹ¶ä¿±":111767,"ä¸İæĹ¶ä¿±è¿Ľ":111768,"å«Įçĸij人":111769,"èī¯å¿ĥ":111770,"头顶":111771,"è´¢æĬ¥":111772,"ä½Ľæ³ķ":111773,"å¾µ":111774,"åİŁä»¶":111775,"åĭŀ":111776,"çĶ·ç¯®":111777,"å¤ĸåĽ½äºº":111778,"è¿Ŀ纪":111779,"æī¾äºĨ":111780,"æįķæįī":111781,"缸è¯Ĩ":111782,"æIJľéĽĨ":111783,"çļĦä¼Łå¤§":111784,"ä¸īç»´":111785,"å°±è¡ĮäºĨ":111786,"çĭIJæľĪ":111787,"çĭIJæľĪå±±":111788,"å¸ĮæľĽéĢļè¿ĩ":111789,"èĢĮ对äºİ":111790,"éĿ¢å°į":111791,"åĨĽåĽ¢":111792,"è¡ĹåĮº":111793,"æĤ¬æĮĤ":111794,"便ç§ĺ":111795,"æľīä¸ĢçĤ¹":111796,"ä¼ļè®®ä¸Ĭ":111797,"ä¸ĭæīĭ":111798,"廣åijĬ":111799,"äºĶè¡Į":111800,"çŃīåĢĻ":111801,"ç´§ç´§åĽ´ç»ķ":111802,"æĭ¿äºĨ":111803,"æ¡ĮéĿ¢":111804,"ç¥ŀæĥħ":111805,"éĽĦåİļ":111806,"çŀ³":111807,"楼ä¸ĭ":111808,"彪":111809,"äºĭåıij":111810,"åĨįè§ģ":111811,"é¤ĺ":111812,"é¢ĦåĶ®":111813,"åİ»çľĭçľĭ":111814,"æĪij们åºĶ该":111815,"ä¸īå®¶":111816,"æµĬ":111817,"ä¹IJéĺŁ":111818,"çľĭä¸įè§ģ":111819,"èĦijåŃIJ":111820,"æĮģæľīçļĦ":111821,"çϽèıľ":111822,"éĹªçĥģ":111823,"åĸĿæ°´":111824,"æİ§åĪ¶ç³»ç»Ł":111825,"ä¸ĵåĮº":111826,"æľĿå»·":111827,"æĪijå¿ĥéĩĮ":111828,"å±ķåİħ":111829,"èľĺèĽĽ":111830,"åĨ»ç»ĵ":111831,"粪":111832,"åºIJ":111833,"åIJij社ä¼ļ":111834,"åĨ³çŃĸéĥ¨ç½²":111835,"çŁŃæľŁåĨħ":111836,"æĸ°ä¸ļæĢģ":111837,"æľĶ":111838,"æĹ¶æĬ¥":111839,"使ä¹ĭ":111840,"åĽłåŃIJ":111841,"åıĤä¸İèĢħ":111842,"çļĦ年轻人":111843,"æīĭ表":111844,"å°ģéĶģ":111845,"为ä»Ģä¹Īä¸į":111846,"åIJ¸çĥŁ":111847,"æ¯Ĵç´ł":111848,"åĪijæ³ķ":111849,"磫æŃ£":111850,"身æĹģ":111851,"åİŁè°ħ":111852,"çĽijæĬ¤":111853,"æŃ¤å¤Ħ":111854,"éĻĤ":111855,"æŀľå®ŀ":111856,"åĮ»çĸĹæľįåĬ¡":111857,"ä¸įåIJĪçIJĨ":111858,"æIJŀ好":111859,"çļĦèĦļæŃ¥":111860,"å¤ĸå¥Ĺ":111861,"ç¶ĵéģİ":111862,"æĶ¾ç¼ĵ":111863,"åģľçķĻ":111864,"æĺŁçIJĥ":111865,"çļĦä¸ĢéĿ¢":111866,"åĩłä½ķ":111867,"è½®åĽŀ":111868,"æ¯Ľå·¾":111869,"ä¿®çIJĨ":111870,"ä¸įçŁ¥ä¸į":111871,"ä¸įçŁ¥ä¸įè§ī":111872,"æķ´ä¸ªäºº":111873,"æ¯ģçģŃ":111874,"åı°å·ŀ":111875,"使çĶ¨å¯¿åij½":111876,"é»ijçϽ":111877,"æij¸ç´¢":111878,"é¼łæłĩ":111879,"éĿ©æĸ°":111880,"麵":111881,"ä¸ĵéĹ¨ä¸º":111882,"å¾Īå¤ļæľĭåıĭ":111883,"å·¥ä½ľç»Ħ":111884,"åIJĪå½±":111885,"çĤºä»Ģ麼":111886,"æŀģ度":111887,"çļĦè¿ĽæŃ¥":111888,"å½ĵä¹ĭ":111889,"å½ĵä¹ĭæĹł":111890,"å½ĵä¹ĭæĹłæĦ§":111891,"è´´è¿ij":111892,"尺度":111893,"åľ¨çİ°åľº":111894,"éĻį临":111895,"åħ»èĢģéĩij":111896,"ç£ķ":111897,"åı¯ä»¥ä½¿":111898,"管çIJĨæ°´å¹³":111899,"æľ¬æĬ¥è®°èĢħ":111900,"æ³ķ令":111901,"åį¡è½¦":111902,"ä¸ľæµ·":111903,"å¤ļéĩį":111904,"åħ¶éĹ´":111905,"ç´Ļ":111906,"éĩįå¤§é¡¹çĽ®":111907,"æ±Ĺæ°´":111908,"ç»Ħå§Ķä¼ļ":111909,"ä¿¡æģ¯åħ¬å¼Ģ":111910,"ä¸į论æĺ¯":111911,"ä¸ĢåIJ¬":111912,"èĴ¸æ±½":111913,"æıŃç§ĺ":111914,"è¶ħéģİ":111915,"触åıij":111916,"婦":111917,"åħ³èģĶ交æĺĵ":111918,"å°±ç»Ļ大家":111919,"好ä¹ħ":111920,"åĢŁè´·":111921,"游æĪıè§Ĵèī²":111922,"å¼ĢåIJ¯äºĨ":111923,"æİł":111924,"åħļçļĦåįģä¹Ŀ":111925,"ä¸ĭ鼨":111926,"çŁŃæĹ¶éĹ´åĨħ":111927,"å¯ħ":111928,"导åħ¥":111929,"å·¥ä½ľç»ıéªĮ":111930,"ä¹Łåıªèĥ½":111931,"鼷éľĨ":111932,"è·Łè¿Ľ":111933,"åį¡éĢļ":111934,"é¢ĩæľī":111935,"æľºä½ĵ":111936,"æĪĺ士èģĮä¸ļ":111937,"女主":111938,"ä½ĵåĪ¶æľºåζ":111939,"è¶³åįı":111940,"èĪĴéĢĤçļĦ":111941,"åĢŁåı£":111942,"æī¹åΤ":111943,"æķ°å̼":111944,"諾":111945,"éĺ¿æĭī伯":111946,"åĺİ":111947,"æħ¶":111948,"达人":111949,"å¼Ģæ°´":111950,"å¤§éĽ¨":111951,"温室":111952,"ä½İè¿·":111953,"ä»įæĹ§":111954,"éªĹåŃIJ":111955,"亲å±ŀ":111956,"çIJĨæĻº":111957,"æľ¬åŁºéĩij":111958,"å¨ħ":111959,"åĨĻåŃĹæ¥¼":111960,"å¢Ļå£ģ":111961,"宵":111962,"èϽçĦ¶æĺ¯":111963,"顺çĿĢ":111964,"åħ«åį¦":111965,"åķĨç͍":111966,"ä¸į失":111967,"è¿·èĮ«":111968,"顺便":111969,"æļijæľŁ":111970,"æ¬ºè´Ł":111971,"é¢ijé¢ij":111972,"è¯¥æł¡":111973,"æĸĻçIJĨ":111974,"æ·±æĥħ":111975,"åīįéĶĭ":111976,"ä¿ĿèŃī":111977,"èģĮä¸ļçĶŁæ¶¯":111978,"åħ¬å¼Ģåıij":111979,"åħ¬å¼Ģåıijè¡Į":111980,"åħ¥æĪ·":111981,"éłĵ":111982,"å̾åIJ¬":111983,"éŃģ":111984,"æĦīæĤ¦":111985,"åĽŀåIJĪ":111986,"åħ¨åĬĽä»¥":111987,"åħ¨åĬĽä»¥èµ´":111988,"åĥ¹å̼":111989,"èĥ½åĬĽå¼º":111990,"ç»ıå¼Ģ":111991,"ç»ıå¼ĢåĮº":111992,"è¿ľæĸ¹":111993,"çļĦéģĵçIJĨ":111994,"缴åįĩ":111995,"缴åįĩæľº":111996,"为主é¢ĺçļĦ":111997,"ç»ĻæĤ¨":111998,"è¿ĺæĥ³":111999,"æ¯ĶæĪij":112000,"åĨľçī§":112001,"æµ·åºķ":112002,"çŃ¾è®¢äºĨ":112003,"对äºİæĪij们":112004,"æĹ¶è®¸":112005,"éĶ®çĽĺ":112006,"å®ŀéĻħæİ§åζ":112007,"çļĦæ¨¡æł·":112008,"åıįæĺłäºĨ":112009,"代åĬŀ":112010,"åĮ»ç͍":112011,"éĽĨç»ĵ":112012,"åıijå±ķåīįæĻ¯":112013,"æĮĩçĿĢ":112014,"åįİåĮĹ":112015,"è¿Ļåĩłä¸ª":112016,"åIJįæ°Ķ":112017,"åĤįæĻļ":112018,"èĩªåıij":112019,"æ³¢åħ°":112020,"大åĬĽæİ¨è¿Ľ":112021,"èĩªç§°":112022,"èįĨå·ŀ":112023,"æIJį害":112024,"äºĨä¸Ģåı¥":112025,"æľĢåĪĿçļĦ":112026,"éĩijèŀįå᱿ľº":112027,"æĢĢ念":112028,"è¡Įåĭķ":112029,"女æİĴ":112030,"ä¸įè§£":112031,"ä¼łéĶĢ":112032,"转载请":112033,"饰åĵģ":112034,"åıªä¸º":112035,"ä¸İä¼Ĺ":112036,"ä¸İä¼Ĺä¸įåIJĮ":112037,"èĥ½èĢĹ":112038,"èı©æıIJ":112039,"è¿ij两年":112040,"è¿Ķ乡":112041,"马ä¸Ĭå°±":112042,"äºĮçŃīå¥ĸ":112043,"水管":112044,"æ³ķåѦ":112045,"çģŃçģ«":112046,"大å§IJ":112047,"åij¨è½¬":112048,"æľīæľŁ":112049,"æľīæľŁå¾Ĵ":112050,"æľīæľŁå¾ĴåĪij":112051,"å°įæĸ¹":112052,"ç¥ŀèī²":112053,"æ²¹èĦĤ":112054,"ä¸īçĤ¹":112055,"ä¸įåĪ©äºİ":112056,"äºĭä¸ļéĥ¨":112057,"å°±è·Ł":112058,"å¼ĢæĶ¯":112059,"å°ı女åŃ©":112060,"åħ±åIJĮåĬªåĬĽ":112061,"çĶļèĩ³è¿ĺ":112062,"è¿ĻåIJį":112063,"è¿Ļç¬Ķ":112064,"çݯåį«":112065,"æľīç§į":112066,"è§ĨåĬĽ":112067,"çĨŁçŁ¥":112068,"åħ¬ç§¯éĩij":112069,"æ¶Īéĺ²å®īåħ¨":112070,"é¢ĩ为":112071,"大èħ¿":112072,"éĿ¶":112073,"çķĪ":112074,"æľįåĬ¡åĮº":112075,"å¼Ģåĩº":112076,"深度èŀįåIJĪ":112077,"æĹłå¿§":112078,"æŁ¥éĺħ":112079,"ç»Īç»ĵ":112080,"ä¿Ŀç¨İ":112081,"è¨İè«ĸ":112082,"å½ĵåģļ":112083,"è·³èĪŀ":112084,"寧":112085,"女çİĭ":112086,"è®°èĢħåľ¨":112087,"åħ¨äº§ä¸ļéĵ¾":112088,"è´¯éĢļ":112089,"åħ´ä¸ļ":112090,"éĻįåΰ":112091,"å°ģéĿ¢":112092,"åħ¨éĿ¢æİ¨è¿Ľ":112093,"奶èĮ¶":112094,"éĢīåĿĢ":112095,"äºĨä¸Ģåľº":112096,"åIJĮä¼´":112097,"议论":112098,"æIJĵ":112099,"诸èijĽ":112100,"诸èijĽäº®":112101,"å¹²åĺĽ":112102,"æµģæĦŁ":112103,"ä¸ĵä¸ļçŁ¥è¯Ĩ":112104,"ç͵ç«Ļ":112105,"åĩıå¼±":112106,"åĩºåħ¥":112107,"åIJĦçľģ":112108,"éĿŀ常é«ĺ":112109,"åľ°æ¯¯":112110,"åıijæĸĩ":112111,"çĦī":112112,"çĥ§çĥ¤":112113,"å£ģ纸":112114,"æģ¶åĮĸ":112115,"èĬ¸":112116,"èĥĸåŃIJ":112117,"çĩĴ":112118,"çľģéĴ±":112119,"çĻ¾å¼º":112120,"çIJĨ工大åѦ":112121,"éĴ¢æĿIJ":112122,"åĽ½æľīèµĦ产":112123,"æĪĺæľº":112124,"æ³Ħéľ²":112125,"åIJİéĿ¢çļĦ":112126,"æ°´èµĦæºIJ":112127,"æ¢ħèĬ±":112128,"åĨĻçĿĢ":112129,"ä¹ĭ声":112130,"æĹłåı¯":112131,"æĺİæľĿ":112132,"ç«ĭæĸ¹ç±³":112133,"ç·£":112134,"æĶ¾è¿ĩ":112135,"ç¦ıçͰ":112136,"å¾Ĺä½ı":112137,"åıĹä¼Ĺ":112138,"ä¸Ń级":112139,"çĹħåıĺ":112140,"ä¸Ģçŀ¬éĹ´":112141,"æĿĥéĩį":112142,"人æĢ§åĮĸ":112143,"åĮ»çĸĹåį«çĶŁ":112144,"ä¸įåΰä½į":112145,"æĻºèĥ½å®¶å±ħ":112146,"饮ç͍":112147,"æ¼Ķåıĺ":112148,"é«ĺç´łè´¨":112149,"ä¹Ļæĸ¹":112150,"åģľçķĻåľ¨":112151,"èİ·æī¹":112152,"ç©¿æ¢Ń":112153,"å®¢åľº":112154,"æĮ½åĽŀ":112155,"京åŁİ":112156,"çĶŁåij½åĬĽ":112157,"實éļĽ":112158,"çĩĪ":112159,"åĨįçݰ":112160,"çݰå®ŀä¸Ń":112161,"æľīä¿¡å¿ĥ":112162,"çĸıéĢļ":112163,"åĺ´åĶĩ":112164,"鼷éĶĭ":112165,"èıľåįķ":112166,"éħ¯":112167,"è¶ħé«ĺ":112168,"å¾Īé«ĺåħ´":112169,"çĶŁæ®ĸ":112170,"éĢłä»·":112171,"误åĮº":112172,"æĨĭ":112173,"好æ¶Īæģ¯":112174,"å´Ń":112175,"以èĩ´":112176,"å¼Ģçİ©ç¬ij":112177,"çĽijè§Ĩ":112178,"å·¡å¯Ł":112179,"å¾·å·ŀ":112180,"æĹ©æĹ©":112181,"éĹªç͵":112182,"æĪªåĽ¾":112183,"åı¯ä»¥æł¹æį®":112184,"æīĭèīº":112185,"æİ¥è½¨":112186,"ç§įæĹı":112187,"æĢĢéĩĮ":112188,"åİ»åĮ»éĻ¢":112189,"ä¸ĢäºĮ":112190,"å¼ĢéĺĶ":112191,"åĩıéĢŁ":112192,"ä½Ĩä»İ":112193,"éĢĻä¸Ģ":112194,"åĩıåħį":112195,"主é¢ĺæķĻèĤ²":112196,"å¼Ģ工建设":112197,"蹦":112198,"æľĪ饼":112199,"ä¸ĭæ²ī":112200,"å°Ĭ严":112201,"éĻĩ":112202,"å®ŀæľ¨":112203,"å»łåķĨ":112204,"声称":112205,"èĢĥåľº":112206,"å¸ĥé²ģ":112207,"èĩªæĿ¥":112208,"èĩªæĿ¥æ°´":112209,"éĴ¾":112210,"年以ä¸Ĭ":112211,"大åıĶ":112212,"ä»ĸå·²ç»ı":112213,"åħ¨æĿij":112214,"èģĶç³»ç͵è¯Ŀ":112215,"为导åIJij":112216,"åΤå¤Ħ":112217,"对éĺµ":112218,"缮æ¨Ļ":112219,"åIJįé¢Ŀ":112220,"客æ°Ķ":112221,"横åIJij":112222,"çŃīåĨħ容":112223,"åĩłçĤ¹":112224,"è°Ī论":112225,"ä¸įä¹ı":112226,"å±ķçݰåĩº":112227,"è¾ĥéķ¿":112228,"éĢĨ转":112229,"å°ıæĻĤ":112230,"æĺ¯å¤ļä¹Ī":112231,"æľ¬æľĪ":112232,"è¿ijè§Ĩ":112233,"æĪIJç«ĭ以æĿ¥":112234,"代表çĿĢ":112235,"æĬ¥å¤į":112236,"æĪıæĽ²":112237,"è¨ŃåĤĻ":112238,"åħ¥èĤ¡":112239,"å¾ģæľį":112240,"é«ĺåĩº":112241,"èĪŀåı°ä¸Ĭ":112242,"å¿ĥåĬ¨":112243,"两çĤ¹":112244,"缸çķ¶":112245,"èĻĽ":112246,"主页":112247,"åĩłå®¶":112248,"æĹłä¸į":112249,"åįıå®ļ":112250,"æĸIJ":112251,"å¯ĵæĦı":112252,"åħ¨çº¿":112253,"æįķé±¼":112254,"åı¯ä»¥ä»İ":112255,"æľīè¿Ļæł·çļĦ":112256,"æģ¶éŃĶ":112257,"åĮħåŃIJ":112258,"æģ¤":112259,"å¼Ģå¥ĸç»ĵæŀľ":112260,"ä¸įæŃ»":112261,"èĹį":112262,"å¼¯æĽ²":112263,"海峡":112264,"éĶĢæ¯ģ":112265,"çļĦçĭ¬çī¹":112266,"示æĦı":112267,"ä¸įèĥ½åĨį":112268,"èĥ½æĬĬ":112269,"éĺ²çº¿":112270,"ä¸įå°ijäºİ":112271,"æ±Ģ":112272,"çļĦéĤ£ä¸Ģ":112273,"羣æĥħ":112274,"åŀ®":112275,"被æīĵ":112276,"åĽ½å®ī":112277,"ç¾İå¦Ļ":112278,"è¿Ļåĩł":112279,"åĩºéģĵ":112280,"æľįåĬ¡äºİ":112281,"æĪIJæŀľè½¬åĮĸ":112282,"æīįåįİ":112283,"天é¹ħ":112284,"åĩłä¸ªäºº":112285,"åĢĺèĭ¥":112286,"èĢ½è¯¯":112287,"æĬĹæĪĺ":112288,"è¡ĮéĬ·":112289,"æĿ¥è¢Ń":112290,"åĢŁéĮ¢":112291,"èįīèİĵ":112292,"ä¸¥æł¼æī§è¡Į":112293,"举è¡ĮäºĨ":112294,"å¤ĸç±į":112295,"已达":112296,"æĿijåħļæĶ¯éĥ¨":112297,"è¡Ŀ":112298,"éĻįèĩ³":112299,"æµ·éĩı":112300,"é¤IJé¦Ĩ":112301,"æĢ¥å¿Ļ":112302,"æ·±è¿ľ":112303,"å¾Ģè¿Ķ":112304,"ç¨İåĬ¡å±Ģ":112305,"å¹¿æ³ĽåºĶç͍":112306,"è®®åijĺ":112307,"æĹłæķĮ":112308,"çľ¼åħī":112309,"çĥŃè¡Ģä¼łå¥ĩ":112310,"æŃIJ":112311,"äºĨäºĽ":112312,"è¿ĿèĥĮ":112313,"è¿Ļæĺ¯ä¸Ģç§į":112314,"ä¸į稳å®ļ":112315,"大家åĪĨ享":112316,"表çı¾":112317,"åīįåįģ":112318,"è·¯è¿ĩ":112319,"æĴ©":112320,"åIJĮæĥħ":112321,"ä¹łä¿Ĺ":112322,"åıijè´¢":112323,"åºĶæľīçļĦ":112324,"æĿİæŁIJ":112325,"èĤĽ":112326,"马åħĭ":112327,"éĢļåijĬ":112328,"巨人":112329,"ä¸ĢåĽ¢":112330,"éĢĻæ¬¡":112331,"ä¸įäºĨè§£":112332,"æĸ½è¡Į":112333,"èij¡èIJĦçīĻ":112334,"åıĺå¾ĹæĽ´åĬł":112335,"æı£":112336,"åĪĽæĸ°èĥ½åĬĽ":112337,"çķħéĶĢ":112338,"表æī¬":112339,"æ¯ĶåĪ©":112340,"æ¯ĶåĪ©æĹ¶":112341,"åĮ»çĸĹä¿ĿéĻ©":112342,"æĵį纵":112343,"伤亡":112344,"æµİå®ģ":112345,"åıĺäºĨ":112346,"æľ¬æ¬¡æ´»åĬ¨":112347,"åľŁè±ª":112348,"æĥ³åĬŀæ³ķ":112349,"æĺķ":112350,"å½ĵæĻļ":112351,"åĩºå±Ģ":112352,"çĥŃè®®":112353,"è°Īè°Ī":112354,"æĻĭåįĩ":112355,"åĬ¿å¿ħ":112356,"çϻ山":112357,"éĤ£åĦ¿":112358,"åIJĥåΰ":112359,"ä¹ĭåŁİ":112360,"å¿«æĿ¥":112361,"æ¹Ľæ±Ł":112362,"第ä¸ī个":112363,"åħ¨éĿ¢æıIJåįĩ":112364,"å¥ĸåѦ":112365,"å¥ĸåѦéĩij":112366,"æĬķåħ¥ä½¿ç͍":112367,"é½IJé²ģ":112368,"åı¯ä»¥æĬĬ":112369,"åĴĮä»ĸçļĦ":112370,"è´ŃæĪ¿èĢħ":112371,"æŃ£å¼ıåIJ¯åĬ¨":112372,"åįİæ¶¦":112373,"ä¸įæĸŃå®ĮåĸĦ":112374,"éĴ¢æĿ¿":112375,"累积":112376,"满èĦ¸":112377,"åĽĽæĸ¹":112378,"è´¢çī©":112379,"ä»ĸ们ä¼ļ":112380,"å¤ıæĹ¥":112381,"éĤ£ä¸ªäºº":112382,"éĿłçĿĢ":112383,"çĤ¹äºĨ":112384,"çĤ¹äºĨçĤ¹å¤´":112385,"æ©ĭ":112386,"åıĪ好":112387,"åıĪ好åıĪ":112388,"åıĪ好åıĪå¿«":112389,"éĺµéĺµ":112390,"å°ģ建":112391,"æľ¬çͰ":112392,"çī©ä¸ļæľįåĬ¡":112393,"èĩªè´¸åĮº":112394,"åIJı":112395,"便åĪ©åºĹ":112396,"åĽ½å®¶æłĩåĩĨ":112397,"éĿ¢ç²ī":112398,"èī°è¾Ľ":112399,"æĶ»åħ³":112400,"æīĵåĮħ":112401,"车éĺŁ":112402,"人éĢī":112403,"åı¯ä¸įæĺ¯":112404,"äºĮåįģå¹´":112405,"åIJįå¸Ī":112406,"æµ¦ä¸ľ":112407,"åħ¬è¯ģ":112408,"è¿IJéĢģ":112409,"æĺ¯æľĢ好çļĦ":112410,"æŁĶåĴĮ":112411,"çİĭæŁIJ":112412,"çĹħæĪ¿":112413,"åĨ¶éĩij":112414,"ä¸Ģä»¶äºĭæĥħ":112415,"åį¤":112416,"åı¯æİ§":112417,"çīŁ":112418,"æĭĤ":112419,"å·²äºİ":112420,"人éĢł":112421,"çĶŁçī©åĮ»èį¯":112422,"ä½ĵçݰåĩº":112423,"èĤ²åĦ¿":112424,"èĢģå®ŀ":112425,"åľĸçīĩ":112426,"諸":112427,"ç´¯äºĨ":112428,"æĦŁåħ´è¶£çļĦ":112429,"åĽ¾çīĩæĿ¥æºIJ":112430,"ä¹Łæĺ¯ä¸Ģç§į":112431,"æ¾İæ¹ĥæĸ°éĹ»":112432,"æĹ¶è¡¨ç¤º":112433,"åħīè¾ī":112434,"æĬ¥åºŁ":112435,"å²ģæĹ¶":112436,"éħ®":112437,"æ£Ģä¿®":112438,"åıĺéĢŁ":112439,"åıĺéĢŁç®±":112440,"åľ¨èģĮ":112441,"éı¡":112442,"æįĤ":112443,"çĿ£åĬŀ":112444,"æ°¸ä¸į":112445,"åģļä¸ĢäºĽ":112446,"åİĨæĹ¶":112447,"å·¥ç¨ĭæľºæ¢°":112448,"æģ°å½ĵ":112449,"å°±åľ¨äºİ":112450,"ç§°åij¼":112451,"éĢļ常æĺ¯":112452,"æł·å¼ı":112453,"åij¨ä¸Ģ":112454,"èĭ±éķij":112455,"åĿĩ线":112456,"ä¼łéĹ»":112457,"ç͍æĪ·ä½ĵéªĮ":112458,"èµŀåIJĮ":112459,"骨æĬĺ":112460,"为主ä½ĵ":112461,"æ±Łå±±":112462,"æ¸ħæľĿ":112463,"æĶĢåįĩ":112464,"ä¸įçĽ¸ä¿¡":112465,"éĿ´":112466,"æŃ¦åĬŁ":112467,"åĭ¤åĬ³":112468,"æĿ¥æī¾":112469,"å°ĨæĮģç»Ń":112470,"丫头":112471,"æ¨Ļæºĸ":112472,"裴":112473,"深深çļĦ":112474,"åŃķèĤ²":112475,"è§ĦåĪĴ建设":112476,"æ¸ħçν":112477,"ç²¾åĩĨæī¶è´«":112478,"æīĵçł´äºĨ":112479,"è¿Ļä¸Ģ天":112480,"å·¥ä½ľæĢ»ç»ĵ":112481,"æĹħç¨ĭ":112482,"举èIJ¥":112483,"æĶ¾å°Ħ":112484,"æľīåĩłä¸ª":112485,"éĿŀçī©è´¨":112486,"åIJĥå¾Ĺ":112487,"åŨ":112488,"ä¼ļåıijçĶŁ":112489,"篮æĿ¿":112490,"å¼Ģå°ģ":112491,"麻å°Ĩ":112492,"èııæ³½":112493,"ä¸įåIJĪ":112494,"ç³»åĪĹ产åĵģ":112495,"èѬå¦Ĥ":112496,"ç¾İèªī":112497,"èĩªå·±åĸľæ¬¢":112498,"交æĺĵä¸Ńå¿ĥ":112499,"åIJĪåͱ":112500,"使æĪij":112501,"åĥıç´ł":112502,"带éĺŁ":112503,"ä½Ĩ对äºİ":112504,"æĬĬè¿Ļ个":112505,"èĤĿèĦı":112506,"åįķ纯çļĦ":112507,"æĶ»åĿļæĪĺ":112508,"缼ä¼ļ":112509,"åijµæĬ¤":112510,"æªĢ":112511,"èµ¶ä¸Ĭ":112512,"æ¥Ĭ":112513,"ä¹ħäºĨ":112514,"ç¡Ŀ":112515,"çŃĶé¢ĺ":112516,"ä¿ĿæĮģçĿĢ":112517,"è§ģè¯Ĩ":112518,"çĤ¹åĦ¿":112519,"åįĬ个æľĪ":112520,"æ»ĩ":112521,"浸泡":112522,"ä¼łéĢģ":112523,"åľ¨å¸Ĥåľºä¸Ĭ":112524,"ä¹ĭ乡":112525,"çī¹éķ¿":112526,"éĽŀ":112527,"èªł":112528,"身å¤Ħ":112529,"æŁłæª¬":112530,"身穿":112531,"çľģåħ¬å®ī":112532,"çľģåħ¬å®īåİħ":112533,"åıĻåĪ©äºļ":112534,"åĩłåĪĨéĴŁ":112535,"人åĢij":112536,"åľ°æ®µ":112537,"èĩªåѦ":112538,"ä¹Łè¶ĬæĿ¥è¶Ĭ":112539,"èģĮæĿĥ":112540,"æĸ§":112541,"èĩ»":112542,"å½Ĵ纳":112543,"驾é©Ń":112544,"éĥ¨åĪĨåľ°åĮº":112545,"没æľīæĥ³åΰ":112546,"æĴĩ":112547,"ä¹Įé²ģ":112548,"ä¹Įé²ģæľ¨":112549,"ä¹Įé²ģæľ¨é½IJ":112550,"èĤ²äºº":112551,"çļĦæŃ¥ä¼IJ":112552,"å»¶æľŁ":112553,"æ²¹æ°Ķ":112554,"åģļå®Į":112555,"åľ£åľ°":112556,"丰åİļ":112557,"宽带":112558,"åı¯éĿłçļĦ":112559,"åºŃéĻ¢":112560,"åŃľ":112561,"å°ı康社ä¼ļ":112562,"å®īåħ¨ç®¡çIJĨ":112563,"年第":112564,"æİĴ污":112565,"èĥĮåĮħ":112566,"å®¶ä½ı":112567,"åħ¶å®ŀå°±æĺ¯":112568,"ä¼ļè§ģ":112569,"帮åĬ©ä¼ģä¸ļ":112570,"ç½ijè´Ń":112571,"æĺ¯ä¸įä¼ļ":112572,"飯åºĹ":112573,"æŃ»åİ»":112574,"åħįçĸ«åĬĽ":112575,"æľķ":112576,"åĸĿäºĨ":112577,"轻微":112578,"个æľĪåĨħ":112579,"ç»ĦåĽ¢":112580,"åĴĮå®ĮåĸĦ":112581,"鸽":112582,"æıIJéĢŁ":112583,"西å®īå¸Ĥ":112584,"ä¸Ńå¿ĥ主任":112585,"æĹ¶éĹ´ä¸º":112586,"æľŁæĿĥ":112587,"è¶ķ":112588,"ä¸įä»ħè¦ģ":112589,"æľįä»İ":112590,"é¡ĺæĦı":112591,"ä¸įå°ı":112592,"ä¸įå°ıçļĦ":112593,"ç°ĩ":112594,"窦":112595,"åĪĩæĪIJ":112596,"åĵĪåĪ©":112597,"å¤©çľŁ":112598,"ä¸Ģ次次":112599,"éĩijå¸ģ":112600,"æĢİä¹Īèĥ½":112601,"ç½ijè´·":112602,"ä¼ļ计å¸Ī":112603,"çŁŃ缺":112604,"对æłĩ":112605,"åıĺå¾ĹæĽ´":112606,"åīįåĩłå¤©":112607,"éĺ²æ±Ľ":112608,"彩èϹ":112609,"åĵģä½į":112610,"è¡¨æł¼":112611,"严å¯Ĩ":112612,"æ¯ĽåĪ©çİĩ":112613,"çļĦåį±å®³":112614,"å½ķåζ":112615,"æ°´åĬ¡":112616,"èĥ½å¤Łè®©":112617,"å¹³æĿ¿":112618,"ä¹³æĪ¿":112619,"è¸ıå®ŀ":112620,"é¦ĸåĪĽ":112621,"é¦Ļèķī":112622,"æĬ¥è¡¨":112623,"ä¸ĢæĬ¹":112624,"åĩºçĶŁäºİ":112625,"è²»ç͍":112626,"åĩºè®©":112627,"åIJĪæ³ķæĢ§":112628,"å°¼åħĭ":112629,"åĨ°åĨ·":112630,"é¦Ļæ°Ķ":112631,"åı·ç§°":112632,"èµ·çłģ":112633,"åŁİåİ¿":112634,"çİ©èĢį":112635,"ä¸ĬéĻIJ":112636,"ä¼ļ议精ç¥ŀ":112637,"æĹģè¾¹çļĦ":112638,"便ä¼ļ":112639,"æıŃæĻĵ":112640,"çİ©æĦı":112641,"éĽªå±±":112642,"åIJijçĿĢ":112643,"ä½ĵèĤ²åľ¨çº¿":112644,"说æĺİ书":112645,"åĮĸèĤ¥":112646,"åħļç»Ħ书记":112647,"åĬ¨äºº":112648,"ä¹ĭæīĢ":112649,"æľĪèĩ³":112650,"æľĢå¿«çļĦ":112651,"èĬĤåģĩæĹ¥":112652,"ä¸ĵåľº":112653,"èĢĥä¸Ĭ":112654,"çªŁ":112655,"é²ľè¡Ģ":112656,"è¾ĥ强çļĦ":112657,"æĤĦçĦ¶":112658,"å¤ļä¸ªåĽ½å®¶":112659,"çªĹå¸ĺ":112660,"æŀģå¤§åľ°":112661,"ä¸įç͍æĭħå¿ĥ":112662,"è¿Ļä¹Īåģļ":112663,"åĥ¹æł¼":112664,"ç¾İ丽乡æĿij":112665,"å°ıæĹ¶åĨħ":112666,"ç´§è¿«":112667,"大çģ«":112668,"èĥ³èĨĬ":112669,"æĵįä½ľç³»ç»Ł":112670,"æ®ĭçķĻ":112671,"åĨĻåĩº":112672,"ç¦ģå¿Į":112673,"åĬłçĽŁåºĹ":112674,"è¿ijçϾ":112675,"便åı¯":112676,"æķ´æĶ¹æİªæĸ½":112677,"éĩĩ访æĹ¶":112678,"åĶIJ代":112679,"æ·±åĮĸæĶ¹éĿ©":112680,"çŁ¢":112681,"éĥ½åĸľæ¬¢":112682,"è¶ĬæĿ¥è¶Ĭé«ĺ":112683,"èĬ±æľµ":112684,"头çĸ¼":112685,"å®ī康":112686,"å¢ŀéķ¿çİĩ":112687,"çľ¼çľĭ":112688,"å°±æĺ¯ä¸ºäºĨ":112689,"èĢĮ导èĩ´":112690,"åĬłå¿«å»ºè®¾":112691,"èĬ±æł·":112692,"åĨħå¿ĥçļĦ":112693,"æĺĨå±±":112694,"è³ĩæºIJ":112695,"åĽŀåΰ家":112696,"èıĬèĬ±":112697,"æ°´éĩı":112698,"å¾ģä¿¡":112699,"è¡ĮæĶ¿åĮº":112700,"ä¹ĥæĺ¯":112701,"æĬķèµĦé¡¹çĽ®":112702,"å«ģç»Ļ":112703,"ç¥ŀåľ£":112704,"稳":112705,"æľ¬æĿ¥å°±":112706,"éĢIJä¸Ģ":112707,"èģĮä¸ļæĬĢæľ¯":112708,"ä¸įèī¯ä¿¡æģ¯":112709,"æīĺè¿IJ":112710,"åIJ¯ç¤º":112711,"ä¹ĭåħ§å®¹":112712,"飶":112713,"奢åįİ":112714,"æıŃ示":112715,"æĪIJ为ä¸ŃåĽ½":112716,"æ¶Īè´¹åĵģ":112717,"åħ¬ç͍":112718,"æIJŀå®ļ":112719,"è¯·ä½ł":112720,"æŁļ":112721,"åĨħè¡£":112722,"ä½Ĩä»ĸ们":112723,"ä¿Ŀ湿":112724,"该åİ¿":112725,"饱åĴĮ":112726,"æİ¨åIJij":112727,"èµĦæĸĻæĺ¾ç¤º":112728,"ä¸įå½±åĵį":112729,"人人éĥ½":112730,"åıijå±ķ壮大":112731,"åħ»èĢģæľįåĬ¡":112732,"çĶŁæ´»æ°´å¹³":112733,"åIJĦåİ¿":112734,"ä½łéľĢè¦ģ":112735,"说çļĦæĺ¯":112736,"å¤ĸåªĴ":112737,"æŃ¤äºº":112738,"次è¦ģ":112739,"追赶":112740,"åºĶ该å¦Ĥä½ķ":112741,"æĹ¥åĩĮæĻ¨":112742,"çķ¥æľī":112743,"éĥ½æĥ³":112744,"游ä¹IJ":112745,"è¿Ļ款游æĪı":112746,"平淡":112747,"æĺ¯ä¸ĢåĢĭ":112748,"å¤ĩèĢĥ":112749,"åζæŃ¢":112750,"ä¸Ģå®ļèĥ½":112751,"å¾Ĵå¼Ł":112752,"以çĤº":112753,"åįĥåħĥ":112754,"äºĶåħŃ":112755,"迪士":112756,"迪士尼":112757,"éĺ³æĢ§":112758,"åĨ¬å¥¥ä¼ļ":112759,"å°±æĺ¯åĽłä¸º":112760,"æĮĤéĴ©":112761,"æ¦ĤåĨµ":112762,"åıªè¦ģæľī":112763,"æ²¹çĶ»":112764,"åľ°æłĩ":112765,"ä¸Ĭè°ĥ":112766,"产ä¸ļåĽŃåĮº":112767,"åħ«åįģ":112768,"棱":112769,"æ¶²æĻ¶":112770,"æĿijå§Ķä¼ļ":112771,"çŃ¾çº¦ä»ªå¼ı":112772,"è¿Ļåħ¶ä¸Ń":112773,"åĨĻéģĵ":112774,"示èĮĥåŁºåľ°":112775,"éĩİçĶŁåĬ¨çī©":112776,"鼻åŃIJä¿¡ç®±":112777,"åĽ½éĻħè´¸æĺĵ":112778,"人æĿĥ":112779,"ä¿Ŀ管":112780,"èĭ¥æĤ¨":112781,"åİĭæĬij":112782,"黼":112783,"åľ°çľĭçĿĢ":112784,"éϰ":112785,"ä¸Ģå¹´å¤ļ":112786,"ä»İ容":112787,"ä¸ŃæĸŃ":112788,"å¯Łè§ī":112789,"移交":112790,"é͝":112791,"æĪĸ许æĺ¯":112792,"ç¶ł":112793,"两项":112794,"æľĢåĸľæ¬¢":112795,"æľĢåĸľæ¬¢çļĦ":112796,"å¤ľéĩĮ":112797,"åIJĮä»ģ":112798,"åĪĽæĸ°é©±åĬ¨":112799,"è°ģèĥ½":112800,"飾":112801,"åħīåѦ":112802,"åİĦ":112803,"èĦ±é¢ĸ":112804,"èĦ±é¢ĸèĢĮåĩº":112805,"迦":112806,"æĺ¯ä¸įåı¯èĥ½":112807,"窥":112808,"èĥ½æ»¡è¶³":112809,"宽度":112810,"伦çIJĨ":112811,"åı¯ä»¥èİ·å¾Ĺ":112812,"转ä¼ļ":112813,"å±±æĿij":112814,"éĵºè®¾":112815,"åĩºåĩ»":112816,"æĸĩåĮĸèīºæľ¯":112817,"ä¼ļ议室":112818,"æŃĮ声":112819,"æ»Ķ":112820,"èIJİ缩":112821,"æľįåĬ¡åijĺ":112822,"åıij表äºĨ":112823,"æĸ¼æĺ¯":112824,"æĺİç¡®è§Ħå®ļ":112825,"ç»´å¥ĩ":112826,"水产":112827,"æĬķä¿Ŀ":112828,"éĺ´éģĵ":112829,"èµ¶å¿«":112830,"夺å¾Ĺ":112831,"ä¸ĭåįķ":112832,"çµģåħ¬åı¸":112833,"çݯç»ķ":112834,"å½Ī":112835,"ä½ľé£İ建设":112836,"æĹħ游æĻ¯åĮº":112837,"æľīæĽ´å¤ļçļĦ":112838,"丰å¯Įå¤ļ彩":112839,"çIJĨ财产åĵģ":112840,"åĩºå·®":112841,"ä»İ严治":112842,"ä»İ严治åħļ":112843,"çĽ¸å¹²":112844,"æ»ĭ润":112845,"主åĬŀæĸ¹":112846,"åī§åľº":112847,"æ»ļçIJĥ":112848,"æ©Ħæ¦Ħ":112849,"èĩªä¸»åĪĽæĸ°":112850,"éĢļå¾Ģ":112851,"æł¼å°Ķ":112852,"çļĦä¼ĺçĤ¹":112853,"èĥĮä¸Ĭ":112854,"çªľ":112855,"çĪĨåĩº":112856,"å¹³æķ´":112857,"ä¸ĢèĦļ":112858,"åħ¨ä½ĵåijĺå·¥":112859,"éĻIJå®ļ":112860,"åŁİéķĩåĮĸ":112861,"æ·³":112862,"éĢ®æįķ":112863,"è¡ĮåĬ¨è®¡åĪĴ":112864,"æīĵå¾Ĺ":112865,"åİļéĩį":112866,"纪å½ķçīĩ":112867,"åĿļä¿¡":112868,"央ä¼ģ":112869,"åĨįä¹Łä¸į":112870,"天涯":112871,"åıĤèĢĥèµĦæĸĻ":112872,"æľīæ¯Ĵ":112873,"åIJ¸çº³":112874,"è¶Ĭåıij":112875,"éĩįè¦ģæĦıä¹ī":112876,"åĽ½éĺ²éĥ¨":112877,"è¿Ļ个è¡Įä¸ļ":112878,"æĻ®æŁ¥":112879,"å¼ĤæĢ§":112880,"å»¶è¿Ł":112881,"å°ıå¹ħ":112882,"èĥħ":112883,"综åIJĪæ²»çIJĨ":112884,"æŃ£æĺ¯åĽłä¸º":112885,"产ä¸ļç»ĵæŀĦ":112886,"çłĶç©¶æĬ¥åijĬ":112887,"åģľä¸ĭ":112888,"éķ¿èĢģ":112889,"éĩĿå°į":112890,"åįĹ京å¸Ĥ":112891,"çģĮæºī":112892,"转è¿IJ":112893,"欺è¯Ī":112894,"éĢłåģĩ":112895,"åĪĨå¸ĥå¼ı":112896,"æĦŁè§¦":112897,"æĪijå½ĵæĹ¶":112898,"åıijè§ī":112899,"åĽ¾çº¸":112900,"æĶ¹èī¯":112901,"çĭłçĭł":112902,"åĨ²åĪº":112903,"æĸ°äº¬":112904,"æĸ°äº¬æĬ¥":112905,"ç¥ŀåύ":112906,"秸ç§Ĩ":112907,"çĪº":112908,"å°Ĩè¿İæĿ¥":112909,"工信":112910,"工信éĥ¨":112911,"éĻIJéĩı":112912,"æŃ¢æįŁ":112913,"åѦä¼ļäºĨ":112914,"åįİ缼":112915,"åįİçĽĽé¡¿":112916,"å¾Įä¾Ĩ":112917,"ä¸ĭéĿ¢æĺ¯":112918,"ä¸ĭéĿ¢æĺ¯å°ı":112919,"æIJ¬è¿IJ":112920,"ç¾İæľ¯é¦Ĩ":112921,"æ¸ħåĩī":112922,"å¤ļå¹´åīį":112923,"è©ŀ":112924,"åįĥç±³":112925,"表述":112926,"æ±ŁéŨ":112927,"åĬłæ²¹ç«Ļ":112928,"æľ¬èĥ½":112929,"导读":112930,"åĽ´è§Ĥ":112931,"å¹¶åIJij":112932,"åŁºæľ¬æĥħåĨµ":112933,"æīĵå¼ĢäºĨ":112934,"è¿Ļä¸ī个":112935,"æ±ķ头":112936,"强æľīåĬĽ":112937,"强æľīåĬĽçļĦ":112938,"è¿Ľåľº":112939,"ä¹Ŀæ±Ł":112940,"çIJĥæĺŁ":112941,"好çľĭçļĦ":112942,"大æĪ·":112943,"湯":112944,"å¥ĩå¦Ļ":112945,"ä¹IJåύ":112946,"æĪijçļĦå¿ĥ":112947,"çľī头":112948,"åĨľä¸ļçĶŁäº§":112949,"ç¼ĸçłģ":112950,"åŁºç¤":112951,"åŁºç¤İ":112952,"天æĸĩ":112953,"åĢĭ人è³ĩè¨Ĭ":112954,"åİ»è¿ĩ":112955,"èģĨåIJ¬":112956,"æĶ¾åģĩ":112957,"ä¸įåħ·å¤ĩ":112958,"æ·Ģç²ī":112959,"大佬":112960,"åħ¨å¤©":112961,"åħ¨éĿ¢å»ºæĪIJ":112962,"éļIJå½¢":112963,"ç¼ħç͏":112964,"åIJ³":112965,"è¡ĮæĶ¿æī§æ³ķ":112966,"åŁİåł¡":112967,"èİ«æĸ¯":112968,"èİ«æĸ¯ç§ij":112969,"æīĢæľīæĿĥ":112970,"éĽĨåľĺ":112971,"å±Ģåī¯å±Ģéķ¿":112972,"åĩłä¹İ没æľī":112973,"æ´ģåĩĢ":112974,"ç͵影èĬĤ":112975,"åŃ©ç«¥":112976,"æīĢåģļçļĦ":112977,"æ¸ħ代":112978,"æĸ°çīĪ":112979,"éĵĿåIJĪéĩij":112980,"为æĬĵ":112981,"为æĬĵæīĭ":112982,"åΤå®ļ":112983,"çī¹äº§":112984,"æīĭæ©Ł":112985,"ä¸įåı¯æĪĸ":112986,"ä¸įåı¯æĪĸ缺":112987,"å¸Ĥåľºè§Ħ模":112988,"åĿ¯":112989,"åĮ»åѦéĻ¢":112990,"å¿«è¦ģ":112991,"èĮľ":112992,"æĬĺèħ¾":112993,"äºĨè¿ĩæĿ¥":112994,"æĬ¥åijĬæľŁåĨħ":112995,"çī©ç§į":112996,"ç»Łè®¡å±Ģ":112997,"æī©å»º":112998,"æ¶ħ":112999,"责任人":113000,"éĺİ":113001,"è¯Ħè®®":113002,"å¾Ģäºĭ":113003,"æīĢ示":113004,"æķ´æ´ģ":113005,"éĹºèľľ":113006,"æĹħéĢĶ":113007,"å®ŀè®Ń":113008,"ä¹ĭç§°":113009,"巴士":113010,"éĢŁåº¦å¿«":113011,"ä¸įä»ħå¦ĤæŃ¤":113012,"å®Ŀè´µçļĦ":113013,"åºŁçī©":113014,"河水":113015,"æİ¥çº³":113016,"ç²¾æ¹Ľ":113017,"åħ¶æ¬¡æĺ¯":113018,"顺德":113019,"åħ¬åħ±åį«çĶŁ":113020,"è¤IJèī²":113021,"ä¸įæĥľ":113022,"æĬĢæľ¯æľįåĬ¡":113023,"æİ·":113024,"æ±ĤèģĮ":113025,"ä¸ī峡":113026,"æĬķåħ¥åΰ":113027,"太åIJİ":113028,"åIJ¯åĬ¨ä»ªå¼ı":113029,"缴æİ¥å½±åĵį":113030,"æĸ°æ¬¾":113031,"个乡éķĩ":113032,"çĻ¾äº¿":113033,"庫":113034,"ä¹ŁæŃ£æĺ¯":113035,"åı¶çīĩ":113036,"æľĢæĹ©çļĦ":113037,"æĪĺ绩":113038,"å·¥æľŁ":113039,"æĻļæľŁ":113040,"è¿Ļæł·è¯´":113041,"è¯įè¯Ń":113042,"ä¾Ħ":113043,"æķ£çĥŃ":113044,"éĽĨæĪIJçĶµè·¯":113045,"åIJįè¯į":113046,"æĻºåķĨ":113047,"æĭ¥åłµ":113048,"çĭĤ欢":113049,"è¿Ļèά":113050,"浴室":113051,"åijķåIJIJ":113052,"æľªæĿ¥åıijå±ķ":113053,"ä¸īä½įä¸Ģä½ĵ":113054,"åªĴé«Ķ":113055,"ä¸įå¾Ĺ转载":113056,"åĽłä¸ºå¥¹":113057,"æĺ¾ç¤ºå±ı":113058,"ä¾Ľæļĸ":113059,"éĨ«éĻ¢":113060,"æľīæĦıæĢĿ":113061,"æľīæĦıæĢĿçļĦ":113062,"娱ä¹IJåŁİ":113063,"åįµå·¢":113064,"åĪĽéĢłåĬĽ":113065,"竳èĬĤ":113066,"人大常å§Ķ":113067,"èĢĮçİ°åľ¨":113068,"å¤ĸå©Ĩ":113069,"å¢ŀæĮģ":113070,"äºĶåįĥ":113071,"èĢģå¸Ī们":113072,"æ´ĽæĿī":113073,"æ´ĽæĿī磶":113074,"æİĮæı¡äºĨ":113075,"ä¸ŃåĽ½æĸĩåĮĸ":113076,"æĸ°æĶ¿":113077,"主è¦ģç͍äºİ":113078,"åıijçĥ§":113079,"类似äºİ":113080,"åĮĹæŀģ":113081,"æĪij们认为":113082,"弥漫":113083,"åħ¨çIJĥç»ıæµİ":113084,"é¢IJ":113085,"ä¸Ģèµ·è£ħä¿®":113086,"æĶĴ":113087,"æĭīèIJ¨":113088,"帶ä¾Ĩ":113089,"åĨ·æ°´":113090,"ä¸īåĨľ":113091,"æĿ¿æĿIJ":113092,"è¿ŀè¿ŀ":113093,"éĵ®":113094,"ç»ıèIJ¥çIJĨ念":113095,"山顶":113096,"å¾Īæĥ³":113097,"çĺ«":113098,"å§ĭç»Īä¿ĿæĮģ":113099,"åľ¨å¹¿å·ŀ":113100,"ä¸įåIJĮæĦı":113101,"åıĺåİĭ":113102,"åıĺåİĭåύ":113103,"产éĶĢ":113104,"表éĿ¢ä¸Ĭ":113105,"æīĢ以ä»ĸ":113106,"ç»ıéªĮ丰å¯Į":113107,"éĥ¨å§Ķ":113108,"åħµåĽ¢":113109,"æīĢè¿°":113110,"æķ¦çħĮ":113111,"ç»ıèIJ¥èĮĥåĽ´":113112,"åı£è¯Ń":113113,"失信":113114,"æ¯ı个人çļĦ":113115,"æīĭæĮģ":113116,"æģIJæħĮ":113117,"åł¡åŀĴ":113118,"é¦ħ":113119,"éĵ¸éĢł":113120,"æĭ¿åĩºæĿ¥":113121,"æİ¢æµĭ":113122,"大家ä¸Ģèµ·":113123,"奧":113124,"å®ŀè´¨æĢ§":113125,"å°ıåĦ¿":113126,"èĩºåįĹ":113127,"èĩºåįĹå¸Ĥ":113128,"å¼ĢåıijèĢħ":113129,"åı¯æł¹æį®":113130,"ç®±åŃIJ":113131,"饺åŃIJ":113132,"å¿ĻçĿĢ":113133,"æĿ¥ä¸įåıĬ":113134,"çĽ¸ä¼ł":113135,"åĽ½ç½ij":113136,"èħ¹æ³»":113137,"è¿ĻéĩĮæľī":113138,"é£İæĻ¯åĮº":113139,"åıĤä¿Ŀ":113140,"æŃ»èĢħ":113141,"æĪ´ä¸Ĭ":113142,"æ©Łæ§ĭ":113143,"è¯ķéªĮåĮº":113144,"ä¼łæİĪ":113145,"æµ·è¾¹":113146,"泪水":113147,"缸åħ³åĨħ容":113148,"éĥijå·ŀå¸Ĥ":113149,"åħijçݰ":113150,"两åij¨":113151,"èĬľæ¹ĸ":113152,"ç͵åŃIJä¿¡æģ¯":113153,"红å¤ĸ":113154,"æĹħ游å±Ģ":113155,"å¾Ģå¾Ģä¼ļ":113156,"è¿ħçĮĽ":113157,"ä¼łçľŁ":113158,"æ¸ħæ¾Ī":113159,"å°±è¿ij":113160,"微信群":113161,"ç³»åĪĹæ´»åĬ¨":113162,"ç»ı常ä¼ļ":113163,"è§Ĥæµĭ":113164,"å¿ĥå¾Ĺä½ĵä¼ļ":113165,"éĻĪåĪĹ":113166,"åĮĹæĸĹ":113167,"è«®":113168,"諮詢":113169,"è¿ĺæĺ¯ä¼ļ":113170,"æµĭç®Ĺ":113171,"æĺŁç©º":113172,"宽容":113173,"çī©ä¸ļåħ¬åı¸":113174,"æĪĴæĮĩ":113175,"å¸ħæ°Ķ":113176,"ä¸ĢæŃ¥æŃ¥":113177,"åħ±é¸£":113178,"åĨ³ä¸į":113179,"æİ¥ç®¡":113180,"å¦ĩèģĶ":113181,"æ¯Ķåĸ»":113182,"é²ģè¿ħ":113183,"æĮģçºĮ":113184,"çĽ¸äº²":113185,"å¨ģå°¼æĸ¯äºº":113186,"ç«ĭ项":113187,"åĪĿå§ĭ":113188,"èĩªåζ":113189,"è¿Īè¿Ľ":113190,"ä¸Ĭæ±½":113191,"å®ıä¼Ł":113192,"æł¹æľ¬æ²¡æľī":113193,"æĸ°åĨłçĹħæ¯Ĵ":113194,"åĵªç§į":113195,"康åħ»":113196,"è¡°èĢģ":113197,"å½ķåĥı":113198,"é«Ķé©Ĺ":113199,"ç»ijå®ļ":113200,"é¢Ŀ头":113201,"äºĶæľĪ":113202,"èĬ±å¼Ģ":113203,"ä¸Ģ线åŁİå¸Ĥ":113204,"åĪ°åľº":113205,"æĬķéĻį":113206,"çĹĺçĹĺ":113207,"åıĹä¸įäºĨ":113208,"æīİæł¹":113209,"æĽ´ä½ķåĨµ":113210,"æĬ½æŁ¥":113211,"åĩºè·¯":113212,"审议éĢļè¿ĩ":113213,"ä¸įåĥħ":113214,"èī²è°ĥ":113215,"çϾä½Ļ":113216,"èĤłéģĵ":113217,"æ·±åİļçļĦ":113218,"马åĬĽ":113219,"æĹ©æĻļ":113220,"æŃĮèĪŀ":113221,"éĺ²æĻĴ":113222,"æľĢåIJİä¸Ģ个":113223,"樱èĬ±":113224,"å°ıä¼ĻåŃIJ":113225,"åľ¨å½ĵåľ°":113226,"å°ıä¼Ļ伴们":113227,"èµ·æºIJ":113228,"åħ¨åªĴä½ĵ":113229,"ç°½":113230,"éħ±æ²¹":113231,"æĹłè®ºå¦Ĥä½ķ":113232,"裤åŃIJ":113233,"åģľäº§":113234,"ä¸įçͱå¾Ĺ":113235,"çīµå¼ķ":113236,"ä¼łåĬ¨":113237,"ä¹Ŀé¾Ļ":113238,"åĬłåĽº":113239,"ä¹Łä¸įæķ¢":113240,"æĬĢæľ¯æĶ¯æĮģ":113241,"ä¸Ĭå²Ĺ":113242,"ç»ıéªĮåĴĮ":113243,"æł¼æŀĹ":113244,"åIJ¸éĻĦ":113245,"æľªæĪIJå¹´":113246,"奢ä¾Īåĵģ":113247,"追æį§":113248,"好ä¸į容æĺĵ":113249,"èķ´åIJ«":113250,"ä¿Ŀå®ļ":113251,"æĬ¥ä¸ļ":113252,"æµ·åĨħå¤ĸ":113253,"ä½łçİ°åľ¨":113254,"æ²¹èĢĹ":113255,"è´¨éĩı管çIJĨ":113256,"æ½ľæ°´":113257,"ä¸½æ±Ł":113258,"转åħ¥":113259,"è¿Ļä¹Īä¹ħ":113260,"æĺİ代":113261,"责任åζ":113262,"éĩįå·¥":113263,"大巴":113264,"触åıĬ":113265,"èµ·åĪĿ":113266,"大å¦Ī":113267,"æĸ¯å¡Ķ":113268,"åĨĽå·¥":113269,"书éĻ¢":113270,"峨":113271,"æİ¨çIJĨ":113272,"è¿Ļç¯ĩæĸĩ竳":113273,"è¿ģç§»":113274,"åľ¨åIJĮä¸Ģ":113275,"ç»Ĩç»Ĩ":113276,"åīĬå¼±":113277,"书æĪ¿":113278,"ç¶ĵ常":113279,"è¯ķé¢ĺ":113280,"æĤ£ä¸Ĭ":113281,"çĻ«çĹ«çĹħ":113282,"åĨ²æ´Ĺ":113283,"å¤ĸæı´":113284,"åħĭåζ":113285,"åįģæľĪ":113286,"åģļä¸įåΰ":113287,"ç¾İåĮĸ":113288,"å¦ĤæľŁ":113289,"è¿ĺéľĢ":113290,"å¤©åºľ":113291,"å°±æĦıåij³çĿĢ":113292,"çļĦç¡®æĺ¯":113293,"éªĹå±Ģ":113294,"å°ıç»ĦèµĽ":113295,"è©©":113296,"ä¹Ŀå¹´":113297,"æĻĵå¾Ĺ":113298,"çłĶ究人åijĺ":113299,"大éħĴåºĹ":113300,"ç§ijåѸ":113301,"åħŃåIJĪ":113302,"çķĮå®ļ":113303,"车载":113304,"å¼ĢçĿĢ":113305,"毫æĹłçĸij":113306,"毫æĹłçĸijéĹ®":113307,"è¿IJç»´":113308,"ç¦ģåĮº":113309,"èĦ±èIJ½":113310,"讲å¸Ī":113311,"产ä¸ļåŁºåľ°":113312,"é«ĺæĢ§èĥ½":113313,"åħī彩":113314,"çݰéĺ¶æ®µ":113315,"åĩ¿":113316,"è¾ĥå·®":113317,"饮çĶ¨æ°´":113318,"éĸĭçϼ":113319,"ç½ijåIJ§":113320,"çĮ´åŃIJ":113321,"æŃ¦æŀĹ":113322,"å®īåİ¿":113323,"ä¸įåı¯æĢĿ":113324,"ä¸įåı¯æĢĿè®®":113325,"éĬ·åĶ®":113326,"è´«ç©·":113327,"为åķ¥":113328,"éºĵ":113329,"å¹¾åĢĭ":113330,"è§Ħ模以ä¸Ĭ":113331,"æıļ":113332,"è¢«åĽ°":113333,"缺å¸Ń":113334,"å¿«é¤IJ":113335,"æĬ¢åįł":113336,"æĻŁ":113337,"å¤įæ´»":113338,"æľ¬æĬ¥è®¯":113339,"åĪĽä¸ĭ":113340,"海滩":113341,"éĩı产":113342,"å¦Ĥä½ķåİ»":113343,"车ä½į":113344,"å¯ĩ":113345,"äºĮåįģåĽĽ":113346,"ç»ıæµİæįŁå¤±":113347,"éħįå¥Ĺ设æĸ½":113348,"åŁºæľ¬éĿ¢":113349,"äºī论":113350,"就好åĥı":113351,"çłĶç©¶æĪIJæŀľ":113352,"éĻĪè¿°":113353,"æīĵåĬ¨":113354,"ä¸ĭå·´":113355,"ç§ĴéĴŁ":113356,"对人ä½ĵ":113357,"æĬĢæľ¯çłĶåıij":113358,"åİŁåŃIJ":113359,"æĺ¯ä¸Ģ项":113360,"äºĨä¸Ģ份":113361,"æĮĩçͲ":113362,"ç͍éĩı":113363,"è¿ĺä¸įå¤Ł":113364,"æĶ¿åºľéĩĩè´Ń":113365,"çŁ¥è¯ĨçĤ¹":113366,"ä¸ŃåĽ½æ¢¦":113367,"å¾Īå¼Ģå¿ĥ":113368,"礼è²Į":113369,"éĿŀ常å¤ļ":113370,"éĿŀ常å¤ļçļĦ":113371,"åĽļ":113372,"æĹħé¦Ĩ":113373,"å°½æĥħ":113374,"æŃĮåͱ":113375,"æ²Ļé¾Ļ":113376,"车åİ¢":113377,"客æµģ":113378,"åģıå·®":113379,"积累äºĨ":113380,"æ¡Ķ":113381,"çĶ»çĶ»":113382,"ä¹ŁåºĶ该":113383,"åºĶç͍ç¨ĭåºı":113384,"èĥĥèĤł":113385,"以å¾Į":113386,"豪å®ħ":113387,"æ·±åĬłå·¥":113388,"缴è¨Ģ":113389,"åĮĸçŁ³":113390,"åĽ½éģĵ":113391,"ä¸ĥ个":113392,"ä»İèĢĮ使":113393,"èĤłèĥĥ":113394,"æĹ¥è¶ĭ":113395,"çζåŃIJ":113396,"ç·©":113397,"æĭĽçīĮ":113398,"产å¦ĩ":113399,"çķªèĮĦ":113400,"æĪijéĻ¢":113401,"建çŃijå·¥ç¨ĭ":113402,"å±ķè§Īä¼ļ":113403,"å®¶éķ¿ä»¬":113404,"åĨľä½ľçī©":113405,"æĹ¥å¤ľ":113406,"æĶ»æĵĬ":113407,"è§Ħéģ¿":113408,"èĪŁå±±":113409,"便æ°ij":113410,"åħ«åŃĹ":113411,"ä¸įæĽ¾":113412,"æĶ¯éħį":113413,"çĨ¬å¤ľ":113414,"人é¡ŀ":113415,"ç´ĢéĮĦ":113416,"ç»ıèIJ¥æ´»åĬ¨":113417,"大涨":113418,"å¸Ĥå§Ķ常å§Ķ":113419,"åĪĨéIJĺ":113420,"ä¸Ģ个èģĮä¸ļ":113421,"çĹħåĽł":113422,"è¿Ļ对äºİ":113423,"ä¸įå¾Ĺä¸į说":113424,"åıijçĶµæľº":113425,"æľīæīĢ帮åĬ©":113426,"缮æłĩä»»åĬ¡":113427,"åĽłåľ°":113428,"åĽłåľ°åζ":113429,"åĽłåľ°åĪ¶å®ľ":113430,"å°Ĩè¾¾åΰ":113431,"ç²Ĺç³Ļ":113432,"ç¨³åĽº":113433,"å«£":113434,"çİ°åľ¨å¾Īå¤ļ":113435,"ä¸ĸçķĮ级":113436,"å¼łæŁIJ":113437,"çĤ¹ç¼Ģ":113438,"èijµ":113439,"社ä¼ļç»Ħç»ĩ":113440,"å¾ĢåIJİ":113441,"åĬłæģ¯":113442,"åĻªå£°":113443,"æľīåħ´è¶£":113444,"为æĤ¨æıIJä¾Ľ":113445,"æ²¹æ¼Ĩ":113446,"ç¬¬åĽĽå±Ĭ":113447,"çļĩ宫":113448,"ä¹Ĵä¹ĵ":113449,"ä¹Ĵä¹ĵçIJĥ":113450,"éļ¨èijĹ":113451,"éģ©åIJĪ":113452,"åįĹéĿŀ":113453,"æĵ´":113454,"西æ´ĭ":113455,"åĬłå¯Ĩ":113456,"æĪIJåĬŁä¸¾åĬŀ":113457,"åı£æ°´":113458,"æĪIJ年人":113459,"æīĢæıIJä¾ĽçļĦ":113460,"éļĶå£ģ":113461,"åľ¨äº¬":113462,"å½ĵåľ°æĹ¶éĹ´":113463,"çŃīåIJĦç§į":113464,"é£İæ°Ķ":113465,"å±ĭéĩĮ":113466,"ä¸ĢåŃĹ":113467,"çļĦæĹ¶éĹ´éĩĮ":113468,"åĺ¿åĺ¿":113469,"快讯":113470,"ä¸Ńåľº":113471,"ä¸Ģçĵ¶":113472,"æ»ķ":113473,"é¢Ĩè·ij":113474,"好èݱ":113475,"好èݱåĿŀ":113476,"没åħ³ç³»":113477,"åĩºå¢ĥ":113478,"ä¸įæĺ¯ä¸Ģ个":113479,"éĥ½æĺ¯éĿŀ常":113480,"éľĩåĬ¨":113481,"èİ·èĥľ":113482,"åįļå¼Ī":113483,"æĬļåħ»":113484,"对ç«ĭ":113485,"æľįåĬ¡æľºæŀĦ":113486,"è°£è¨Ģ":113487,"社ä¼ļç§ijåѦ":113488,"åIJ¬è¯´è¿ĩ":113489,"æī³":113490,"æīĵ磨":113491,"åı£æľį":113492,"好åĥıæĺ¯":113493,"以åıĬåħ¶ä»ĸ":113494,"çī¹è´¨":113495,"亲è¿ij":113496,"ä¸Ģç»ı":113497,"æ¶Ŀ":113498,"éŃĶæľ¯":113499,"éģĵ路交éĢļ":113500,"è§Ħ模æľĢ大":113501,"å®ŀæĸ½æĦıè§ģ":113502,"ä¹ŀ":113503,"ä¸Ģä¸ĸ":113504,"åŁ·è¡Į":113505,"è±Ĩçĵ£":113506,"åĪĹ为":113507,"æķħ宫":113508,"çĶŁåij½åij¨æľŁ":113509,"ä¸īç§įèģĮä¸ļ":113510,"详ç»Ĩä»ĭç»į":113511,"å®Įå¤ĩ":113512,"å²©çŁ³":113513,"éļıæīĭ":113514,"飲":113515,"æķĪæŀľåĽ¾":113516,"ç§ĭåĨ¬":113517,"åĬŁå¾·":113518,"è§Ħ竳åĪ¶åº¦":113519,"æĹ¥æ¸IJ":113520,"æīĢéľĢè¦ģ":113521,"æīĢéľĢè¦ģçļĦ":113522,"å²Ľä¸Ĭ":113523,"åĩºåľŁ":113524,"åĽ¾æĸĩ":113525,"ç§ijæĬĢè¿ĽæŃ¥":113526,"éĢļèĥĢ":113527,"èĢģ太太":113528,"èĭĹæľ¨":113529,"éĵ¶å·Ŀ":113530,"å¸IJ篷":113531,"éĿŀè¦ģ":113532,"éħįç͵":113533,"å¤Ħå¢ĥ":113534,"èĤ¡æĿĥæĬķèµĦ":113535,"ä¸Ģ缴åΰ":113536,"åĿĩçͱ":113537,"æĬĹæĹ¥":113538,"æį®ä»ĭç»į":113539,"ä½łåĸľæ¬¢":113540,"åĪĽæĸ°åŀĭ":113541,"åıĺè¿ģ":113542,"è§Ĩå¯Ł":113543,"å®Įåħ¨æ²¡æľī":113544,"åħĥæĹ¦":113545,"åı¯ä¿¡":113546,"åı¦è¡Į":113547,"æĿij级":113548,"åħ¥åľº":113549,"æIJŃæ¡£":113550,"ä¹ŁåĽłæŃ¤":113551,"æį¢æĪIJ":113552,"ä¸įè´Ł":113553,"äºĨ大éĩıçļĦ":113554,"éģĶåΰ":113555,"å¸Ĥåİ¿":113556,"å¹´è¼ķ":113557,"å¿«æīĭ":113558,"å¸Įå°Ķ":113559,"èĩªèIJ¥":113560,"éĽªèĬ±":113561,"æIJģ":113562,"çľ¼ç§ij":113563,"æŃ£ç¢º":113564,"çļĦå§¿æĢģ":113565,"åĿļå®ŀçļĦ":113566,"æĮĩ纹":113567,"æªĶæ¡Ī":113568,"ç½®äºİ":113569,"佩æľį":113570,"豪éŨ":113571,"åĵĴ":113572,"æģ°å¥½":113573,"æª¢æŁ¥":113574,"åĪĿè¡·":113575,"大åĶIJ":113576,"约ä¼ļ":113577,"èĴ¸åıij":113578,"çѹåĪĴ":113579,"å¹´ç»Ī":113580,"è¡Įæ¥Ń":113581,"åħ±éĿĴ":113582,"åħ±éĿĴåĽ¢":113583,"ä¼ļå¼ķèµ·":113584,"ä¸Ńç§ij":113585,"ä¸Ńç§ijéĻ¢":113586,"æĮ¯åĬ¨":113587,"åį´åıijçݰ":113588,"ä¸įåĬ¨äº§":113589,"èĮ¹":113590,"æĪ¿éĹ´éĩĮ":113591,"è´§å¸ģæĶ¿çŃĸ":113592,"æ²»çĻĤ":113593,"æħİéĩį":113594,"å¡ŀå°Ķ":113595,"åĽ½ç±į":113596,"åĽłæŀľ":113597,"çŃīçī¹çĤ¹":113598,"山谷":113599,"ä¸ĭè¼ī":113600,"è®ĵæĪij":113601,"饮éħĴ":113602,"è¿Ļ个游æĪı":113603,"ç»Ŀ大éĥ¨åĪĨ":113604,"åĴ¨è¯¢æľįåĬ¡":113605,"干活":113606,"è®®ä¼ļ":113607,"æ¦Ĥè¿°":113608,"åĪĨåĮº":113609,"æŃ»åIJİ":113610,"ç«ĻçĿĢ":113611,"主è¦ģé¢Ĩ导":113612,"åIJĮåŁİ":113613,"大æłij":113614,"对åѦçĶŁ":113615,"社ä¼ļä¿ĿéĻ©":113616,"å¢ŀèµĦ":113617,"主人åħ¬":113618,"å®£ä¼łæķĻèĤ²":113619,"æĸĩåĮĸ交æµģ":113620,"客æĪ¶":113621,"çŁ¥åIJįåĵģçīĮ":113622,"æ»ŀåIJİ":113623,"äºĴè¡¥":113624,"æĦŁäºº":113625,"åī¿":113626,"åIJİ代":113627,"äºī龸":113628,"æķĻèĤ²åٹè®Ń":113629,"éĿĻèĦī":113630,"ä¹ıåĬĽ":113631,"说åĩºæĿ¥":113632,"çİĭèĢħèį£èĢĢ":113633,"åĢ«":113634,"åįĩèµ·":113635,"éķģ":113636,"åĩºæ¸¸":113637,"éĢļè¡Įè¯ģ":113638,"å·¥ä½ľå²Ĺä½į":113639,"åĮłå¿ĥ":113640,"æĭ¿æĿ¥":113641,"æ´Ĺè¡£æľº":113642,"æĪijä¸įæĥ³":113643,"é¢Ħè§ģ":113644,"æ¼Ķ示":113645,"ä¸ĢçĽ´æ²¡æľī":113646,"è·Łå¥¹":113647,"对çħ§æ£ĢæŁ¥":113648,"ç°¿":113649,"ä¸ĵå¿ĥ":113650,"è®®äºĭ":113651,"åīį端":113652,"åį¡å°Ķ":113653,"è¨Ńå®ļ":113654,"设置äºĨ":113655,"å©ļ纱":113656,"åľ¨åĽ½å¤ĸ":113657,"åı³ä¾§":113658,"è³¼çī©":113659,"å¥ĩèij©":113660,"å¢ŀåĬłå̼":113661,"好è¿IJ":113662,"åĽ½éĻħæľºåľº":113663,"ä¸ĭç§°":113664,"缮åīį为æŃ¢":113665,"ç¥ŀä»Ļ":113666,"å®ĥåı¯ä»¥":113667,"æ¾Ħæ¸ħ":113668,"èĥ½ä½¿":113669,"游åĩ»":113670,"游åĩ»éĺŁ":113671,"åĩ¹":113672,"ä¸įè¦ģåĨį":113673,"åĨ³èĥľ":113674,"åĨ³æĪĺ":113675,"æĭ½":113676,"缼åħ¸":113677,"å¾Īå¥½åľ°":113678,"æľĢç¾İçļĦ":113679,"åĥļ":113680,"å·´åŁº":113681,"å·´åŁºæĸ¯åĿ¦":113682,"æľĢéĢĤåIJĪ":113683,"é«ĺèģĮ":113684,"ä¿Ŀå§Ĩ":113685,"æİĪæ¬Ĭ":113686,"说åΰè¿ĻéĩĮ":113687,"æİ¨å¼Ģ":113688,"çİĩè¾¾":113689,"ä¸īåĪĨä¹ĭä¸Ģ":113690,"管çIJĨä¸Ńå¿ĥ":113691,"交æ±ĩ":113692,"森æŀĹåħ¬åĽŃ":113693,"å¾Ģä¸Ĭ":113694,"éªijè¡Į":113695,"æį®æŃ¤":113696,"纽带":113697,"ç»ŀ":113698,"ä¸īæĸ¹":113699,"æĦıä¹īä¸ĬçļĦ":113700,"æİ¨è¿Ł":113701,"å¤ļæł·æĢ§":113702,"æĥ³èµ·äºĨ":113703,"æİĴåIJį第":113704,"å·¨é¢Ŀ":113705,"æĿŁç¼ļ":113706,"å®īå®ļ":113707,"äºĭ實":113708,"çļĦæĦ¿æľĽ":113709,"è£ħå¤ĩåζéĢł":113710,"人å±ħ":113711,"人å±ħçݯå¢ĥ":113712,"å¿ĺè®°äºĨ":113713,"该游æĪı":113714,"楼ä¸Ĭ":113715,"å¼Ģä¼ļ":113716,"æģ³":113717,"åıĭæĥħéĵ¾æİ¥":113718,"ç¡Ĵ":113719,"ç»ĻäºĪäºĨ":113720,"åģı好":113721,"åĵī":113722,"交éĢļå®īåħ¨":113723,"éĽĮ":113724,"æ²»çĹħ":113725,"è§īå¾Ĺå¾Ī":113726,"衬衫":113727,"å¿ĥæĦ¿":113728,"æ´ŀå¯Ł":113729,"æ°ijæ£Ģå¯ŁéĻ¢":113730,"æıIJçĤ¼":113731,"è¦ģè¿Ľä¸ĢæŃ¥":113732,"驾车":113733,"æĻ®æĥł":113734,"æķĸ":113735,"ç¦ıéŁ³":113736,"éĢģè¾¾":113737,"è§ĦåĪĴ设计":113738,"æīĭå¥Ĺ":113739,"å®īä¿Ŀ":113740,"è¿ĺä¸įå¦Ĥ":113741,"åīįè¿°":113742,"æłĩè®°":113743,"ç´§æİ¥çĿĢ":113744,"æ§IJ":113745,"æ·±æ·±åľ°":113746,"满满çļĦ":113747,"æĺ¥è¿IJ":113748,"æĹ¥äº§":113749,"çαæĬ¤":113750,"åħ¨æĹ¥":113751,"åħ¨æĹ¥åζ":113752,"转åĬ¨":113753,"ç¥Ńç¥Ģ":113754,"ä¹°ä¸ľè¥¿":113755,"å¯¹æľªæĿ¥":113756,"æ¶Ī失äºĨ":113757,"åļ´éĩį":113758,"ä¸īæĿ¡":113759,"éħ¸å¥¶":113760,"éĽĨåĽ¢èĤ¡ä»½":113761,"西路":113762,"åıªå¾Ĺ":113763,"éĢģåİ»":113764,"çĭłæĬĵ":113765,"åĪ©ç͍çİĩ":113766,"ä¸ĭåij¨":113767,"å¥ĭæĪĺ":113768,"æĺ¥èĬĤæľŁéĹ´":113769,"è´Łè´£ä»»":113770,"æĺĤè´µ":113771,"尾巴":113772,"ç¯ĩæĸĩ竳":113773,"åħ®":113774,"è®ĬæĪIJ":113775,"å¹¹":113776,"çĻ»éĮĦ":113777,"ä½Ī":113778,"å·¥åĮł":113779,"åĵªæĢķæĺ¯":113780,"åıįåĵį":113781,"ç§ĥ":113782,"åĩºè½¨":113783,"æĹ¥åĨĽ":113784,"åIJįèªī":113785,"æķıéĶIJ":113786,"æľįåĬ¡æ°´å¹³":113787,"çħ§å°Ħ":113788,"ä¼Ĭæĭī":113789,"ä¼Ĭæĭīåħĭ":113790,"åĨħéĺģ":113791,"èĬĴæŀľ":113792,"ä¸ĩåĪĨ":113793,"éĢĢæ¬¾":113794,"缴æĴŃéĹ´":113795,"æĭ¿åΰäºĨ":113796,"å°İèĩ´":113797,"空æ°Ķä¸Ń":113798,"客æĪ·æľįåĬ¡":113799,"è¿IJåĬ¿":113800,"ç»ĵçŁ³":113801,"ä¸įå¿ħè¦ģçļĦ":113802,"èĥ¶åĽĬ":113803,"çIJĨä¼ļ":113804,"æĬ½åĩº":113805,"空æ°Ķè´¨éĩı":113806,"æ¯ķ竣æĺ¯":113807,"åĨ·æ¼ł":113808,"ä¸Ģå¦Ĥ":113809,"ä¸Ģå¦ĤæĹ¢":113810,"ä¸Ģå¦ĤæĹ¢å¾Ģ":113811,"æĤ£çĹħ":113812,"åĬłæĮģ":113813,"èµŀåĬ©":113814,"é«®":113815,"åij½ä¸Ń":113816,"æĦıä¹īä¸Ĭ":113817,"ä¸įèĪį":113818,"å쬦":113819,"æīĵæī«":113820,"æĺŁåħī":113821,"æĸŃè£Ĥ":113822,"åħ¨å¥Ĺ":113823,"è£ģå®ļ":113824,"马åħĭæĢĿ":113825,"骨骼":113826,"ä¸Ģè·¯ä¸Ĭ":113827,"å®ļæĹ¶":113828,"å·¥ç¨ĭæĬĢæľ¯":113829,"å½¼å¾Ĺ":113830,"æ±²åıĸ":113831,"ä¸Ģè§Ī":113832,"åIJµæŀ¶":113833,"ä¿Ĺç§°":113834,"æłªæ´²":113835,"åºŁæĹ§":113836,"è¡ĮæĺŁ":113837,"åıijçĶŁåıĺåĮĸ":113838,"é¦ĸä»ĺ":113839,"åįģåĪĨéĩįè¦ģ":113840,"æĬĬè¿ĻäºĽ":113841,"ç¥ŀå·ŀ":113842,"æıIJä¾ĽåķĨ":113843,"楷":113844,"å±İ":113845,"çĬ¶åħĥ":113846,"åŁİå¢Ļ":113847,"çľĭä¸Ģçľĭ":113848,"çĶŁäº§èĥ½åĬĽ":113849,"åŁºæľ¬ä¸Ĭéĥ½":113850,"æīĵæī°":113851,"åĪĿ次":113852,"åĩºç¤º":113853,"åħ¶ä¸Ńä¸Ģ个":113854,"çĶŁæĢģç³»ç»Ł":113855,"æīĭæİĮ":113856,"æµİåįĹå¸Ĥ":113857,"åľĭåħ§":113858,"æŃ£å̼":113859,"å¹¾ä¹İ":113860,"æİ¨èįIJéĺħ读":113861,"è¿Ń代":113862,"è°ĥä¾ĥ":113863,"饮åĵģ":113864,"å¢Ļä½ĵ":113865,"åıĺçݰ":113866,"äºĨ好":113867,"äºĨ好åĩł":113868,"ä¸įçķĻ":113869,"çβ":113870,"å°½æĹ©":113871,"æŃ£åľ¨è¿Ľè¡Į":113872,"åĩºéĻ¢":113873,"æĿĢ害":113874,"æıIJ款":113875,"åıijå±ķ空éĹ´":113876,"åīį身":113877,"ä¸įæĸŃå¢ŀ强":113878,"æ·±å±Ĥ次":113879,"容纳":113880,"éĤ£ä»½":113881,"å·¥ä½ľæķĪçİĩ":113882,"æľ¬åĽ½":113883,"失èIJ½":113884,"æŃ£åĽłä¸º":113885,"èĬĤæ°´":113886,"ä¸ĭä¸Ģ代":113887,"çłĶåıijä¸Ńå¿ĥ":113888,"ä¸įçIJĨ":113889,"å®Į好":113890,"ä¿ĿæĬ¤åĮº":113891,"ç»ĵæŀĦè°ĥæķ´":113892,"å¥łå®ļ":113893,"宣称":113894,"éĺ»æĮ¡":113895,"æĴ¤ç¦»":113896,"ä¸įæĸ¹ä¾¿":113897,"åĴķ":113898,"ç¬ijäºĨç¬ij":113899,"çݯå¢ĥ污æŁĵ":113900,"ä½ıæĪ·":113901,"ç»Ŀç¼ĺ":113902,"éϤå°ĺ":113903,"é«ĺå°ļ":113904,"æĢİä¹Īåı¯èĥ½":113905,"éĿ¢èī²":113906,"åķĨæ¥Ń":113907,"çĸ¹":113908,"èµĦæºIJä¼ĺåĬ¿":113909,"è¾ĸåĮºåĨħ":113910,"èĢĢçľ¼":113911,"æij§æ¯ģ":113912,"ä¸ĸçķĮç»ıæµİ":113913,"å¼ķæĿ¥":113914,"ä¸ĢåĪĻ":113915,"æĭĩæĮĩ":113916,"æĬµå¾¡":113917,"éĽį":113918,"åĩĨå¤ĩå·¥ä½ľ":113919,"çıłä¸īè§Ĵ":113920,"ç¨ĢåľŁ":113921,"èİ·å¾ĹæĦŁ":113922,"æĪIJåĬŁçİĩ":113923,"ç½ij约":113924,"ç½ij约车":113925,"èĦIJ":113926,"æķ¬ä¸ļ":113927,"éĩijä»·":113928,"ç²¾é«ĵ":113929,"买车":113930,"åħ³åı£":113931,"åĨįå¤ļ":113932,"æŀģåĵģ":113933,"åIJĦå®¶":113934,"举æĬ¥ç͵è¯Ŀ":113935,"èļĬ":113936,"æĸ¹å½¢":113937,"ç§ijæĬĢæĪIJæŀľ":113938,"æľĢ好æĺ¯":113939,"éĹ®åĢĻ":113940,"红éħĴ":113941,"åĽĽç§į":113942,"ç¿Ĵæħ":113943,"ç¿Ĵæħ£":113944,"åŀ¦":113945,"éĤ£åıª":113946,"é¢ĨæĤŁ":113947,"çľ¼éĥ¨":113948,"æ³°å®ī":113949,"ä»»æľŁ":113950,"磨æįŁ":113951,"æĽ¿æį¢":113952,"åħ¸ç¤¼":113953,"符åIJĪæĿ¡ä»¶":113954,"è¿ĺæľīä»Ģä¹Ī":113955,"åħ±äº«åįķ车":113956,"åı¯åĪĨ为":113957,"åŃ£åIJİ":113958,"åŃ£åIJİèµĽ":113959,"举èİŀå¸Ĥ":113960,"å¿ĥæĦı":113961,"æīŃæĽ²":113962,"ä½ľä¸ºä¸Ģç§į":113963,"è¿Ļéĥ¨åĪĨ":113964,"åıĤä¸İåΰ":113965,"ç½ijçIJĥ":113966,"實çı¾":113967,"ç»Ħè£ħ":113968,"åIJijå¤ĸ":113969,"å·¥ä½ľæĸ¹æ¡Ī":113970,"åįģæĿ¡":113971,"課ç¨ĭ":113972,"颤æĬĸ":113973,"åĵ©":113974,"éĤ®å¯Ħ":113975,"亢":113976,"åħįè²»":113977,"秤":113978,"åºĶæĢ¥ç®¡çIJĨ":113979,"åĽĽäºĶ":113980,"éºĴéºŁ":113981,"å¾ĴæŃ¥":113982,"è¨ĺå¾Ĺ":113983,"çĴIJ":113984,"æĺ¯åIJ¦ä¼ļ":113985,"æĦıè§ģåıįé¦Ī":113986,"éļ¾æĢª":113987,"çªį":113988,"交æİ¥":113989,"两åįĥ":113990,"æĩīç͍":113991,"æľŁéĸĵ":113992,"æIJ¬åΰ":113993,"è®®é¢ĺ":113994,"碧æ¡Ĥ":113995,"碧æ¡ĤåĽŃ":113996,"åģļçĶŁæĦı":113997,"éĻĽä¸ĭ":113998,"è·ĭ":113999,"èĢģ人家":114000,"带åĽŀ":114001,"æŀ¸æĿŀ":114002,"è¡Įéķ¿":114003,"åĨħ容ç®Ģä»ĭ":114004,"梢":114005,"æĮĩæİ§":114006,"éĩįçĹĩ":114007,"ç½ijåıĭ们":114008,"çı¾ä»£":114009,"类产åĵģ":114010,"å¥Ķæ³¢":114011,"渺":114012,"ç²īç¢İ":114013,"è¿Ļåıªæĺ¯":114014,"æ£Ģå¯Łæľºåħ³":114015,"é½Ĭ":114016,"æĪ¿ç§Ł":114017,"å¾·æĭī":114018,"å²ģ以ä¸Ĭ":114019,"纯åĩĢ":114020,"åĪĨå¸ĥåľ¨":114021,"èĥ½å¾Ĺåΰ":114022,"ä¸įå°½":114023,"ç«ŀä»·":114024,"çļĦ带é¢Ĩ":114025,"çļĦ带é¢Ĩä¸ĭ":114026,"ä¸Ńè᝿ĿIJ":114027,"æĿijéķĩ":114028,"ä¸įåı¯éģ¿åħį":114029,"éľ²å¤©":114030,"å°ıå§ijå¨ĺ":114031,"çī©ä»¶":114032,"èijĹä½ľæĿĥ":114033,"æĭĺçķĻ":114034,"éĥ½è§īå¾Ĺ":114035,"æĽ²æĬĺ":114036,"æ·»åĬłåīĤ":114037,"åı¬åĽŀ":114038,"æīİå®ŀæİ¨è¿Ľ":114039,"æĬĦè¢Ń":114040,"åĮĸ身":114041,"缴èIJ¥":114042,"ä¹Łå¸ĮæľĽ":114043,"èį£èªīç§°åı·":114044,"åįĸç»Ļ":114045,"æľīä¸įåIJĮçļĦ":114046,"å¥ĩçī¹":114047,"éĥ½è®¤ä¸º":114048,"å¦ŀ":114049,"æĪIJéķ¿ä¸º":114050,"辩æĬ¤":114051,"主æķĻç»ĥ":114052,"æ³ķå¸ĪèģĮä¸ļ":114053,"æ¤įåħ¥":114054,"索尼":114055,"åIJ¬è¿ĩ":114056,"ä¹łæĥ¯äºĨ":114057,"夺åıĸ":114058,"éŁĵ":114059,"æľ¬è´¨ä¸Ĭ":114060,"æİ¥åĬĽ":114061,"äºij端":114062,"è¦ģåģļ好":114063,"è·¯çģ¯":114064,"åįıåIJĮåıijå±ķ":114065,"æľīå¾ħ":114066,"æ°´åŁŁ":114067,"æIJľçĭIJé¦ĸ页":114068,"è´¨éĩıå®īåħ¨":114069,"åįģäºĮäºĶ":114070,"åĵ®åĸĺ":114071,"èĵ¬åĭĥåıijå±ķ":114072,"åIJį声":114073,"身亡":114074,"çİĭåºľ":114075,"åİŁåĪĻä¸Ĭ":114076,"çĥĺå¹²":114077,"éģĹæ¼ı":114078,"éĿ¢çĽ®":114079,"åĽ½ä¼ļ":114080,"ä¸Ģ缴éĥ½æĺ¯":114081,"æľīä¸Ģä½į":114082,"éħįæľī":114083,"éĻªçĿĢ":114084,"ä¼ģåĽ¾":114085,"æĮīä¸ĭ":114086,"èĵĿåĽ¾":114087,"æ©ĺ":114088,"大å¤ļæĺ¯":114089,"辩论":114090,"æĹĭå¾ĭ":114091,"æĬ¥éĢģ":114092,"æĿ¡è§Ħå®ļ":114093,"åĬ¨éĿĻ":114094,"åĮĪ奴":114095,"æĭľè®¿":114096,"ä¸ĢåĪĢ":114097,"ä»ĸçŁ¥éģĵ":114098,"主æĿĥ":114099,"ä»ĸæĽ¾":114100,"æĴŃç§į":114101,"å£ģåŀĴ":114102,"çī¢è®°ä½¿åij½":114103,"åľ¨è¿Ļæĸ¹éĿ¢":114104,"æīĭèħķ":114105,"æĶ¯æŀ¶":114106,"ä¾Ĩèĩª":114107,"éĩįå¡ij":114108,"å¤ļå±Ĥ次":114109,"ä»ĭè´¨":114110,"éĿ¢åŃĶ":114111,"潮湿":114112,"åİ¿åŁŁ":114113,"游æĪıå½ĵä¸Ń":114114,"å£ŀ":114115,"åĪĹåĩº":114116,"èµĽåĮº":114117,"å¤ļåįĬ":114118,"éĩįçĤ¹å·¥ä½ľ":114119,"æĪij们å¿ħé¡»":114120,"æŁıæŀĹ":114121,"é²ģèĥ½":114122,"æĸ½å±ķ":114123,"åIJĦåĮº":114124,"åħįç¨İ":114125,"èµĽåIJİ":114126,"æľĢéĩįè¦ģ":114127,"ä¸Ģ个好çļĦ":114128,"è¿Ŀæ³ķè¿Ŀè§Ħ":114129,"äºĨè§£æĽ´å¤ļ":114130,"æķ¬è¯·":114131,"ç¬ijçĿĢ说":114132,"ä¸įæĸŃåıijå±ķ":114133,"æijĦå½±å¸Ī":114134,"以éĺ²":114135,"çĤ¸å¼¹":114136,"声åĵį":114137,"ç¤ģ":114138,"æĩ¿":114139,"èĪĨæĥħ":114140,"èĩªçĶ±è´¸æĺĵ":114141,"æķıæį·":114142,"ä¸ī大éĺ¶æ®µ":114143,"èĭĶ":114144,"æĹºåŃ£":114145,"ä¸į满æĦı":114146,"微信åı·":114147,"修为":114148,"çł´è£Ĥ":114149,"éĢĥ离":114150,"æ¯ıèĤ¡":114151,"è¾¾ä¸įåΰ":114152,"æ¯ıå¹´éĥ½":114153,"çģ¯ç¬¼":114154,"æŃ¤åŁºç¡Ģä¸Ĭ":114155,"åĥı个":114156,"åĪĨ娩":114157,"æĻ¾":114158,"ä¸įèĩ³äºİ":114159,"红线":114160,"误解":114161,"ä¸ľè·¯":114162,"æ·®å®ī":114163,"产åѦ":114164,"产åѦçłĶ":114165,"è»ĭ":114166,"è»ĭçĹħ":114167,"åīįæıIJæĺ¯":114168,"æ¯ıä¸Ģ天":114169,"ä¸ĥ大":114170,"æłijåı¶":114171,"èµ°å¾Ĺ":114172,"è¿Ļ两ç§į":114173,"æİıåĩº":114174,"æİIJ":114175,"é¢Ĩ导èĢħ":114176,"ä¸Ģæľµ":114177,"个å¤ļæľĪ":114178,"ä¸Ńåħ³":114179,"ä¸Ńåħ³æĿij":114180,"课åłĤæķĻåѦ":114181,"大åĴĸ":114182,"éģĭç͍":114183,"è¯ļæĦı":114184,"ç»ĦåĽ¾":114185,"è¯ķçĿĢ":114186,"ä¹Ķæ²»":114187,"è¿ĺä¸įæĺ¯":114188,"æľīæĽ´å¥½çļĦ":114189,"åIJİå¤ĩ":114190,"æĸ°çĶŁåĦ¿":114191,"æ°Ķè¡Ģ":114192,"æ²¥éĿĴ":114193,"å±ıéļľ":114194,"æ¥ŃåĭĻ":114195,"æĪij以为":114196,"éķ¿çĽ¸":114197,"èĢģçΏ":114198,"éķĩæ±Ł":114199,"æľºæ¢°è®¾å¤ĩ":114200,"ä½Ĩæĺ¯å¦Ĥæŀľ":114201,"åĿļå®ļä¸į":114202,"åĿļå®ļä¸įç§»":114203,"åĨ²éĶĭ":114204,"ç®Ģ缴æĺ¯":114205,"åĤ¨èĵĦ":114206,"纯ç͵åĬ¨":114207,"漫æŃ¥":114208,"举起":114209,"æģ¶æĢ§":114210,"è¨ĺéĮĦ":114211,"èģĮèĥ½éĥ¨éŨ":114212,"åħ¨éķ¿":114213,"鼻è¦ĸ":114214,"ä¹³èħº":114215,"ä½ķå¤Ħ":114216,"æ¶Īæŀģ":114217,"æŃ£å¤Ħäºİ":114218,"å®īå®ģ":114219,"æĪIJéķ·":114220,"åıĻè¿°":114221,"æºĥçĸ¡":114222,"ä½Ĩçİ°åľ¨":114223,"女æĺŁ":114224,"å©´å¹¼åĦ¿":114225,"æĬķèŀįèµĦ":114226,"éĹ®éĹ®":114227,"æıŃå¼Ģ":114228,"è¯ı":114229,"åIJįå½ķ":114230,"èĺijèıĩ":114231,"åIJĬé¡¶":114232,"æ¹ĸåĮº":114233,"åįĸåľº":114234,"建ç¯":114235,"建ç¯ī":114236,"èݽ":114237,"åIJ¬åIJ¬":114238,"ç«ŀäºīä¼ĺåĬ¿":114239,"åĩºä»»":114240,"æľī两ç§į":114241,"æ©±æŁľ":114242,"褪":114243,"è¯ķåį·":114244,"ç»ıæµİæĬĢæľ¯":114245,"æ·±å±Ĥ":114246,"éĩįè¦ģåĨħ容":114247,"é£İæİ§":114248,"çĬ¶æĢģä¸ĭ":114249,"éĥ¨éĸĢ":114250,"广汽":114251,"è§Ĥæij©":114252,"éģĹçķĻ":114253,"转账":114254,"æĮģä»ĵ":114255,"æĢ»è®¡":114256,"åľĺéļĬ":114257,"æĪ¿ä¸ľ":114258,"éĺĢéŨ":114259,"åħ¬åħ³":114260,"åħ³åĪĩ":114261,"èĤĺ":114262,"æķ¸æĵļ":114263,"ä¸īåįģå¹´":114264,"è§ģè¯ģäºĨ":114265,"å±Ĩ":114266,"çģ°å°ĺ":114267,"æ¦ľé¦ĸ":114268,"è¦ĨçĽĸçİĩ":114269,"ä»Ļ女":114270,"çĶŁäº§æĢ»":114271,"çĶŁäº§æĢ»å̼":114272,"æĪ¿è´·":114273,"æ±ŁåĮº":114274,"åħħçĶµæ¡©":114275,"çϾåIJĪ":114276,"確èªį":114277,"转移åΰ":114278,"éĥ½æĹłæ³ķ":114279,"纪念é¦Ĩ":114280,"çŃ¾ç½²äºĨ":114281,"å¹¶ä¸įå¤ļ":114282,"æĮł":114283,"ä¸į太好":114284,"ä¸ĸ代":114285,"误导":114286,"é«ĺ峰论åĿĽ":114287,"åħ¼å®¹":114288,"龸æ°Ķ":114289,"æĿ¥è®¿":114290,"æīĢ带æĿ¥çļĦ":114291,"æĺ¯ä¸Ģéĥ¨":114292,"æĻļé¥Ń":114293,"åİĨ代":114294,"åIJ¦åīĩ":114295,"ä¹ħä¹ħ":114296,"æľīæķĪæľŁ":114297,"诱åıij":114298,"æĢ»èµĦ产":114299,"æľ¬èº«å°±æĺ¯":114300,"çĶŁäº§åİĤå®¶":114301,"æĹ¶é«¦":114302,"èĢIJç͍":114303,"ä»İå°ıå°±":114304,"æĿ¡çº¦":114305,"èĭ±åĭĩ":114306,"ä¿Ĺè¯Ŀ说":114307,"寺åºĻ":114308,"å¿ĥçIJĨåģ¥åº·":114309,"ä»Ģä¹Īäºĭæĥħ":114310,"æ±īåŃĹ":114311,"çķĻä½ı":114312,"åįĹè·¯":114313,"ä¸ī项":114314,"丢äºĨ":114315,"æĥ³åΰäºĨ":114316,"çѹéĽĨ":114317,"éĻĦåĬłå̼":114318,"西è£ħ":114319,"ä¹ĭä½ľ":114320,"åģļçļĦäºĭ":114321,"çķ¶æĤ¨":114322,"çķ¶æĤ¨åľ¨":114323,"é¦ĸ款":114324,"ä¸įåľ¨ä¹İ":114325,"å·¥ç¨ĭæĸ½å·¥":114326,"éļIJéļIJ":114327,"åıĺ身":114328,"沿éĢĶ":114329,"æĤłæĤł":114330,"ä¿Ŀæļĸ":114331,"çĶŁæ´»åŀĥåľ¾":114332,"渤海":114333,"æŃ¦ä¾ł":114334,"女主è§Ĵ":114335,"举ä¾ĭ":114336,"æ·¨":114337,"çϽé¢Ĩ":114338,"è£ĻåŃIJ":114339,"è¿Ķè¿ĺ":114340,"è¿Īåĩº":114341,"é¾ĻéŨ":114342,"ç»ıæµİä½ĵ":114343,"æĶ¶å®ĺ":114344,"çķĮéĻIJ":114345,"è·³åĩº":114346,"åįĩå̼":114347,"绵éĺ³":114348,"çĸ¤çĹķ":114349,"çľĭæ¸ħ":114350,"æĭĴçµķ":114351,"è¥Ħéĺ³":114352,"课å¤ĸ":114353,"åŃIJåŃĻ":114354,"æŃĮè¯į":114355,"æĪIJåIJį":114356,"溶液":114357,"åĦĴå®¶":114358,"åķĨä¸ļåĮĸ":114359,"辨åĪ«":114360,"å¤ļè¾¾":114361,"ç½ijåºĹ":114362,"ä¹Ŀ大":114363,"ä¹Ŀ大精ç¥ŀ":114364,"æŃ¤ä¸¾":114365,"è¿ŀè½½":114366,"ä¸ĢåĢĭ人":114367,"è³½":114368,"æ¶µçĽĸäºĨ":114369,"è¦ıåĬĥ":114370,"åĽ½æĥħ":114371,"åį«çĶŁåģ¥åº·":114372,"积æŀģåĵįåºĶ":114373,"æĭĻ":114374,"åζåĬ¨":114375,"æĥ³è±¡åĬĽ":114376,"çļĦä¹IJè¶£":114377,"å¼łå®¶çķĮ":114378,"å´İ":114379,"éĩįåŀĭ":114380,"å¤ĸå¢Ļ":114381,"æĶ¾åѦ":114382,"è®¤çľŁåŃ¦ä¹ł":114383,"è´¬å̼":114384,"æ³ķæ¡Ī":114385,"æĬ¤èĤ¤åĵģ":114386,"éĻ·åħ¥äºĨ":114387,"请æĤ¨":114388,"åŀ¢":114389,"æķĻèĤ²èµĦæºIJ":114390,"交æĺĵå¹³åı°":114391,"æĹ¶è£ħ":114392,"ä¼łæŁĵçĹħ":114393,"æ¹ĸæ³Ĭ":114394,"èµĦ管":114395,"åݨå¸Ī":114396,"éĹľéį":114397,"éĹľéįµ":114398,"åĵĪåĵĪåĵĪ":114399,"çĽĹçªĥ":114400,"çĶľç¾İ":114401,"åºĦåĽŃ":114402,"缮åīįå·²ç»ı":114403,"è¾¹ä¸Ĭ":114404,"çģ«èĬ±":114405,"æĬ¥è®°èĢħ":114406,"æģĭæĥħ":114407,"ç´§åĩij":114408,"æ°´æµģ":114409,"è¿Ļæĺ¯æĪij们":114410,"æ³¥åľŁ":114411,"æĽ¾ä»»":114412,"æĸ¹è¨Ģ":114413,"åij¨åħŃ":114414,"åı·æ¥¼":114415,"ä¼ijåģĩ":114416,"误ä¼ļ":114417,"åĽ½åĢº":114418,"åīįå¤ķ":114419,"ä¸¤å¼ł":114420,"éĹ«":114421,"éŃĶ鬼":114422,"æĬĬæĮģ":114423,"èĬĤèĥ½çݯä¿Ŀ":114424,"æ¸ħæ´ģèĥ½æºIJ":114425,"èĤ¥æĸĻ":114426,"é«ĺé¢ij":114427,"å°±æľīäºĨ":114428,"交ä¼ļ":114429,"没éĴ±":114430,"éĽħæĢĿ":114431,"è¦ģåıĬæĹ¶":114432,"åŁ¹åħ»åѦçĶŁ":114433,"欣åĸľ":114434,"çĥŃæ°´åύ":114435,"é¾Ļæ¹ĸ":114436,"äºĮ楼":114437,"æĸ°æµªè´¢ç»ı":114438,"æĸ°åĬ¨èĥ½":114439,"èµ£å·ŀ":114440,"æĭ³å¤´":114441,"æµģåIJij":114442,"ä¹Łæĺ¯å¾Ī":114443,"åıijåĶ®":114444,"ä¸ŃåIJ«æľī":114445,"åIJĵå¾Ĺ":114446,"å·¨æĺŁ":114447,"æĹłæīĢè°ĵ":114448,"æ¯ĽåŃĶ":114449,"åħ¬åħ±äº¤éĢļ":114450,"çĤİçĥŃ":114451,"èµ·èįī":114452,"åĬłçĽŁåķĨ":114453,"说ä¸įåĩº":114454,"大åѦæ¯ķä¸ļ":114455,"å·¥ä¸ļåĽŃ":114456,"éłĺåŁŁ":114457,"åºĨåħ¸":114458,"æµģ产":114459,"èģ²éٳ":114460,"ä¼¼ä¹İæĺ¯":114461,"è´§æºIJ":114462,"æ·±åĪĩ":114463,"æ²»çĸĹæĸ¹æ³ķ":114464,"èµĦæºIJéħįç½®":114465,"ç¶²åıĭ":114466,"çĶ£":114467,"亥":114468,"èº²åľ¨":114469,"社ç§ij":114470,"è»Łé«Ķ":114471,"女è£ħ":114472,"æŃ¡è¿İ":114473,"综åIJĪå®ŀåĬĽ":114474,"æł¼å°ĩ":114475,"åħļåı²åŃ¦ä¹ł":114476,"æľĢåŁºæľ¬":114477,"æľĢåŁºæľ¬çļĦ":114478,"çľĭæľĽ":114479,"åıĹè´¿":114480,"ä¸įä»ħèĥ½":114481,"ä½ķå¿ħ":114482,"ä¸Ģ个å°ıæĹ¶":114483,"ç¾Į":114484,"æĭĽæĶ¶":114485,"çĤĴèĤ¡":114486,"æĿijå¹²éĥ¨":114487,"缸çα":114488,"æ½ľèĥ½":114489,"ä¹į":114490,"æĹ¶è¾°":114491,"欣æħ°":114492,"éĵ¶è¡Įä¸ļ":114493,"çĭŃçªĦ":114494,"éĩįçĤ¹é¢ĨåŁŁ":114495,"çݰå®ŀçĶŁæ´»":114496,"éĮ¯èª¤":114497,"æĸ°è§Ħ":114498,"滥ç͍":114499,"æĹ¶ä¸į":114500,"æĹ¶ä¸įæĹ¶":114501,"帳èĻŁ":114502,"ç¨Ģ缺":114503,"åIJij举":114504,"ä¿Ŀåģ¥åĵģ":114505,"çıŃéķ¿":114506,"äºĴåĭķ":114507,"笼罩":114508,"æ½Ľ":114509,"æļĸå¿ĥ":114510,"è½°çĤ¸":114511,"åºĨ幸":114512,"è²Įä¼¼":114513,"æĵº":114514,"èĢIJ磨":114515,"ä¸ĵä¸ļ人士":114516,"ä¸Ģèάéĥ½æĺ¯":114517,"æ¼³å·ŀ":114518,"åħ¨èĩªåĬ¨":114519,"å½ķç͍":114520,"大è·Į":114521,"æľīæķο̧":114522,"èĩªåĭķ":114523,"ä¸ī个æĸ¹éĿ¢":114524,"港åĮº":114525,"信貸":114526,"éĢļè¯Ŀ":114527,"é«ĺ涨":114528,"æ³Ħæ¼ı":114529,"éħįä¸Ĭ":114530,"åħļå·¥å§Ķ":114531,"被认为":114532,"被认为æĺ¯":114533,"ä¸įä¼ļåĨį":114534,"è°ĥåīĤ":114535,"åıĤèĤ¡":114536,"èĦ±åıij":114537,"å¿łå®ŀ":114538,"åĨħåĪĨæ³Į":114539,"ç¹ģå¿Ļ":114540,"åıĮåĪĽ":114541,"é©»æĿij":114542,"åĪĴç®Ĺ":114543,"éģİä¾Ĩ":114544,"åľ£ç»ı":114545,"èıľé¸Ł":114546,"æĭ¼å¤ļå¤ļ":114547,"ä¸ŃåĽ½æ±½è½¦":114548,"çĥŁèįī":114549,"缴æµģ":114550,"äºĨä¸Ģåı£æ°Ķ":114551,"ä½İæĪIJæľ¬":114552,"æī¾åĽŀ":114553,"èĩªåįij":114554,"總æĺ¯":114555,"æĸĩåĮĸåĪĽæĦı":114556,"天河":114557,"樱æ¡ĥ":114558,"éªijåħµ":114559,"éĩĮéĿ¢æľī":114560,"çİ®":114561,"èĥ½æī¾åΰ":114562,"éĢĥè·ij":114563,"åĪĩå°Ķ":114564,"åĪĩå°Ķ西":114565,"以ä¸ĭæĺ¯":114566,"å²³éĺ³":114567,"çļĦæ¦Ĥçİĩ":114568,"æĬµåζ":114569,"å¸ĪäºĭåĬ¡":114570,"å¸ĪäºĭåĬ¡æīĢ":114571,"åĩĨæĹ¶":114572,"屬æĸ¼":114573,"订è´Ń":114574,"åįłæį®äºĨ":114575,"ä¸ŃéĢĶ":114576,"å°ĭ":114577,"é»ij马":114578,"åİ¿åħ¬å®īå±Ģ":114579,"ä¸ĥæľĪ":114580,"èī²ç´ł":114581,"å¿ĥèĦıçĹħ":114582,"æĹ¶éĻIJ":114583,"æ¯įåħ¬åı¸":114584,"å¹ķåIJİ":114585,"ä¸Ĭæ¦ľ":114586,"å̾åIJijäºİ":114587,"纸ä¸Ĭ":114588,"æ¡ĵ":114589,"éĽĨä½ĵç»ıæµİ":114590,"æĥħå¢ĥ":114591,"è¦ģåģļåΰ":114592,"ç©į極":114593,"åıªæĢķ":114594,"æ¹ĺ西":114595,"çļ±çº¹":114596,"åħ¨åľĭ":114597,"çĦ¡è«ĸ":114598,"好æĦŁ":114599,"åįķä»·":114600,"è¿Ľç¨ĭä¸Ń":114601,"æĺĨä»ij":114602,"åĪĽå®¢":114603,"åħħæĸ¥":114604,"åħĪæĬĬ":114605,"该æĢİä¹ĪåĬŀ":114606,"åĵģå¾·":114607,"åħ¨éĿ¢åıijå±ķ":114608,"è¨ĪåĬĥ":114609,"æĢ»å·¥ä¼ļ":114610,"ä½Ľå±±å¸Ĥ":114611,"æĬĹè¡¡":114612,"å¼Ģåľº":114613,"éĴ±å¸ģ":114614,"åıĭ们":114615,"å«īå¦Ĵ":114616,"ç´¢èµĶ":114617,"è®ĬåĮĸ":114618,"æĮ¤åİĭ":114619,"æĮijè¡ħ":114620,"çŃīä¸Ģæī¹":114621,"æĿ¨æ¬¢":114622,"ä¸ĵå®¶åѦèĢħ":114623,"èĥ½è¾¾åΰ":114624,"èµ°è¿ij":114625,"è´«åĽ°åľ°åĮº":114626,"éĻIJæľŁ":114627,"ä¸į平衡":114628,"åĽ½åĨħå¸Ĥåľº":114629,"èµĽåľº":114630,"éħįèµĦ":114631,"è¦ģèĢĥèĻij":114632,"ä¸ĩåı°":114633,"æľĪæľ«":114634,"éĶ¥":114635,"åŃ«":114636,"æİ¥è§¦åΰ":114637,"åĩºäº§":114638,"æķĻåѸ":114639,"ä½ľå¼Ĭ":114640,"çļĦæľĢåIJİä¸Ģ":114641,"ä¿ĥæĪIJ":114642,"åIJ¸åıĸ":114643,"æ½ľèīĩ":114644,"被éªĹ":114645,"è¾ĵäºĨ":114646,"çĭIJçĭ¸":114647,"åįĩéĻį":114648,"è¿ĻäºĽä¸ľè¥¿":114649,"æĬķèµĦåŁºéĩij":114650,"çĶŁçī©åѦ":114651,"ç½ij绾èIJ¥éĶĢ":114652,"åIJijè®°èĢħ":114653,"èįīåľ°":114654,"æĢ¯":114655,"æľįåĬ¡èĥ½åĬĽ":114656,"éĥģéĹ·":114657,"åįķåĵģ":114658,"å¾Ĺ罪":114659,"æĺĵäºİ":114660,"个å¤ļå°ıæĹ¶":114661,"éĩįä»»":114662,"ä¸Ĭå®ĺ":114663,"æľ¬éĩij":114664,"çı¾åł´":114665,"溢价":114666,"æĺŁè¾°":114667,"æ´»åĬ¨çİ°åľº":114668,"丹麦":114669,"å¸Ŀçİĭ":114670,"æŁ¥æĺİ":114671,"åŃĺåľ¨äºİ":114672,"é¦Ļæ°´":114673,"æĬ½æ£Ģ":114674,"å®ŀéĻħä¸Ĭæĺ¯":114675,"æĸ°å¾ģç¨ĭ":114676,"è´¢åĬ¡ç®¡çIJĨ":114677,"æİĽ":114678,"åĨľåİĨ":114679,"éĥ½èĥ½å¤Ł":114680,"éĤ¯éĥ¸":114681,"çľŁå¯¦":114682,"ç»Ĭ":114683,"åĨµä¸Ķ":114684,"置身":114685,"ç¥Ī祷":114686,"çĿģå¼Ģ":114687,"æĮĩçĤ¹":114688,"å¼Ģæľº":114689,"西å®ģ":114690,"åĮĹ约":114691,"积水":114692,"åĩºåĬ¨":114693,"åıijå±ķ模å¼ı":114694,"转æĬĺ":114695,"èĢĥçĤ¹":114696,"æľīç½ijåıĭ":114697,"è´«åĽ°æĿij":114698,"æĪijä»¬çŁ¥éģĵ":114699,"åĪĨéĶĢ":114700,"å±±èĦī":114701,"æ¯ĶæĭŁ":114702,"ä¼°ç®Ĺ":114703,"æĶ¹å»º":114704,"壮è§Ĥ":114705,"ç§īæĮģ":114706,"æıª":114707,"ç¦Ģ":114708,"åĮĸåѦåĵģ":114709,"ä¸ŃåĽ½åζéĢł":114710,"ä¸Ģæŀ¶":114711,"æīįè¡Į":114712,"æĭĽå¾ħ":114713,"åıĺæį¢":114714,"åīį线":114715,"幸好":114716,"è¿Ļæł·çļĦè¯Ŀ":114717,"å¿ĥè¡Ģ管":114718,"æĢ§çĸ¾çĹħ":114719,"åħ¨èĥ½":114720,"åĪij侦":114721,"ä¿¡æģ¯åıijå¸ĥ":114722,"æĺ¾çĦ¶æĺ¯":114723,"éĿĴéĵľ":114724,"åIJĥä»Ģä¹Ī":114725,"ç͵价":114726,"æ³ķå¾ĭè§Ħå®ļ":114727,"çħ²":114728,"çĵ·åύ":114729,"èĤīç±»":114730,"æıĴåħ¥":114731,"åĹľ":114732,"è¿Łè¿Ł":114733,"ä¸ĢçĤ¹éĥ½ä¸į":114734,"è¿ĺåĮħæĭ¬":114735,"èĪįä¸įå¾Ĺ":114736,"æłĩå¿ĹæĢ§":114737,"æľĪ以æĿ¥":114738,"ç³ĸæŀľ":114739,"éĥ½åºĶ该":114740,"çݯå¢ĥåį«çĶŁ":114741,"èĪªè¡Į":114742,"éĥijéĩį":114743,"ç½ijæĬķ":114744,"åįģä½³":114745,"ç§ģä¸ĭ":114746,"æļ´è·Į":114747,"åĬłå¿«åıijå±ķ":114748,"产åĵģçłĶåıij":114749,"åĪĽéĢłåĩº":114750,"æĢ»è§īå¾Ĺ":114751,"åºķçĽĺ":114752,"èķĬ":114753,"åĩºå¸Ńä¼ļè®®":114754,"主æĿ¿":114755,"æĹ¥æĻļéĹ´":114756,"å®ĺæĸ¹å¾®åįļ":114757,"å¼ķç͍æĹ¥æľŁ":114758,"åķĻæİĪ":114759,"ç͵åŃIJ产åĵģ":114760,"è¡°éĢĢ":114761,"çķĻåŃĺ":114762,"çģ«åĬĽ":114763,"çĴ§":114764,"çļĤ":114765,"åħ¼åħ·":114766,"éĩįè¿Ķ":114767,"é¢Ĩçķ¥":114768,"åĪĩéϤ":114769,"åĨįçĶŁèĥ½æºIJ":114770,"å®ŀåľ¨å¤ª":114771,"çIJĨ论ä¸Ĭ":114772,"ä¸īå±Ĥ":114773,"ä¸ĸçķĮåIJĦåĽ½":114774,"å®ľæĺĮ":114775,"èĢ³è¾¹":114776,"宽æķŀ":114777,"æ±īæĹı":114778,"çϽçϽ":114779,"è¿ĻéĩĮéĿ¢":114780,"çĶŁæ´»ä¹łæĥ¯":114781,"èµŀèµı":114782,"çͷ士":114783,"ä¸Ńä¿Ħ":114784,"车祸":114785,"åīĤéĩı":114786,"éϤåİ»":114787,"左边":114788,"çŃijçī¢":114789,"çīĽå¸Ĥ":114790,"å®¶åĬ¡":114791,"åķĥ":114792,"ç½®æį¢":114793,"ç´«å¤ĸ":114794,"ç´«å¤ĸ线":114795,"å¾Ģåīį":114796,"åĬĽåѦ":114797,"ç´§è·Ł":114798,"缮çļĦåľ¨äºİ":114799,"ç»®":114800,"ç¥Ĥ":114801,"宣è¨Ģ":114802,"äºĮæ°§åĮĸ":114803,"äºĮæ°§åĮĸ碳":114804,"æĹłç¼ĺ":114805,"ç²¾éĢļ":114806,"診":114807,"å¼ķåıijäºĨ":114808,"æľĢåħĪ":114809,"派驻":114810,"ä¸įå¿į":114811,"æĪijçΏ":114812,"å¹´ä¸ĭåįĬå¹´":114813,"æ·ĭå·´":114814,"没éĹ®é¢ĺ":114815,"åºĹåĨħ":114816,"è·ŁæĪij说":114817,"çĶŁäº§çĶŁæ´»":114818,"è§ĤæľĽ":114819,"æ¸į":114820,"被æī§è¡Į":114821,"被æī§è¡Į人":114822,"èĪľ":114823,"æİº":114824,"ä¸Ģç§Ĵ":114825,"èįīåĿª":114826,"åij¼åĴĮ":114827,"åij¼åĴĮ浩":114828,"åij¼åĴĮ浩çī¹":114829,"人æ°ijéĵ¶è¡Į":114830,"çĦķåıij":114831,"è¯ģåĪ¸äº¤æĺĵ":114832,"çķĶ":114833,"æľºèĥ½":114834,"妾":114835,"æĻļå¹´":114836,"å·¥åķĨèģĶ":114837,"åİŁåŀĭ":114838,"è§Ĵ度çľĭ":114839,"æĬ¥ç¤¾":114840,"è¯įæĿ¡":114841,"躲éģ¿":114842,"éĩįåIJ¯":114843,"å¤ķéĺ³":114844,"èĤ¡æĿĥ转让":114845,"åľ¨ä¸Ģ":114846,"åľ¨ä¸ĢæĹģ":114847,"社ä¼ļåĮĸ":114848,"åıijå±ķåİĨç¨ĭ":114849,"æĭĸæ¬ł":114850,"使èĢħ":114851,"ä¸İåIJ¦":114852,"æĸ°å±ĢéĿ¢":114853,"ä»Ĭ天æĪij们":114854,"é½IJèģļ":114855,"对æĪij说":114856,"éĢĴ交":114857,"æľªæĽ¾":114858,"èİĬ":114859,"éĸī":114860,"亲æīĭ":114861,"è§ĴéĢIJ":114862,"æľīé»ŀ":114863,"ç¨İçİĩ":114864,"ä½İ声":114865,"é»ĺå¥ij":114866,"æĻ®æ³ķ":114867,"大ä¸ĵ":114868,"第äºĮ大":114869,"ä½ıåĿĢ":114870,"æĶ¾è¿Ľ":114871,"äºĮæĪĺ":114872,"亲身":114873,"åĽºåĮĸ":114874,"ä¸ĭ乡":114875,"åħ³éĶ®æĬĢæľ¯":114876,"åĽŀæĥ³":114877,"æĬ¥åĪĬ":114878,"æ¶ĤæĬ¹":114879,"èĹıçĿĢ":114880,"ç¥ĿæĦ¿":114881,"åįĩ温":114882,"çĶļèĩ³è¿ŀ":114883,"åħ¬åħĥåīį":114884,"ç¾İæĸ¹":114885,"è¯ļå®ŀ":114886,"æĹłåģ¿":114887,"å¥Ń":114888,"å°ıå¿ĥ翼":114889,"å°ıå¿ĥ翼翼":114890,"两æīĭ":114891,"温馨æıIJ示":114892,"ä»¿çľŁ":114893,"æĥ¶":114894,"èĥ¡åŃIJ":114895,"å·¥ä½ľç«Ļ":114896,"硬çĽĺ":114897,"ç«¿":114898,"åĤ³éĢģ":114899,"åħ¨æł¡":114900,"é²ľæ´»":114901,"çĴĢçĴ¨":114902,"ç»ĵå°¾":114903,"æį¢æĿ¥":114904,"æĪĢ":114905,"ä½İä½į":114906,"ä¸ĩåħĥ以ä¸Ĭ":114907,"åĬłåĪĨ":114908,"æİ¨ä»ĭä¼ļ":114909,"çIJĨèµĶ":114910,"å¾·å°Ķ":114911,"æĬĹè®®":114912,"æ´¼":114913,"åĸ§":114914,"åŁİéĻħ":114915,"å¾Īæ£Ĵ":114916,"人æŃ»äº¡":114917,"ä¼ļå±ķä¸Ńå¿ĥ":114918,"äºĴèģĶäºĴéĢļ":114919,"èĸĦèĨľ":114920,"éĩįé»ŀ":114921,"ç¦ģæ¯Ĵ":114922,"åĨ·ç¬ij":114923,"大家åı¯ä»¥":114924,"é¦ĸ缸":114925,"è¿ijè·Ŀ离":114926,"æµ®çݰ":114927,"ç§ĺè¯Ģ":114928,"èµ·é£ŀ":114929,"æIJ¶":114930,"羣åģĩ":114931,"æģķ":114932,"å°ıåºĹ":114933,"æ°ijçľ¾":114934,"åıijå¸ĥåħ¬åijĬ":114935,"ä¾§éĩį":114936,"å¾ĺå¾Ĭ":114937,"æĢĶ":114938,"æªIJ":114939,"æķ°çĽ®":114940,"åī¯ç§ĺ书éķ¿":114941,"两åı¥":114942,"éļIJçŀĴ":114943,"åıĮåıĮ":114944,"æīĭæĦŁ":114945,"èij¡äº¬":114946,"éģĹå¿ĺ":114947,"鬥":114948,"è¿Ļä¸ªåľ°æĸ¹":114949,"说çļĦè¯Ŀ":114950,"å·¡åĽŀ":114951,"è¿Ŀ竳":114952,"æī¾å·¥ä½ľ":114953,"æĶ¯çIJĥéĺŁ":114954,"裡éĿ¢":114955,"æĺ¾ç¤ºåĩº":114956,"èĩ³å°Ĭ":114957,"两级":114958,"åīįæ®µæĹ¶éĹ´":114959,"çĺ¦èº«":114960,"èĤ¢ä½ĵ":114961,"æ¯į親":114962,"æīĭç»Ńè´¹":114963,"汽车è¡Įä¸ļ":114964,"æİ©çĽĸ":114965,"æİ§èĤ¡éĽĨåĽ¢":114966,"åı£å¾Ħ":114967,"æĶ¿çŃĸæİªæĸ½":114968,"海绵":114969,"åħ¨éķĩ":114970,"äºĭåħ³":114971,"å¸Ńæī§è¡Į":114972,"å¸Ńæī§è¡Įå®ĺ":114973,"éĤ£æ¬¡":114974,"åı¯èĥ½åĩºçݰ":114975,"ä¸Ńå¿ĥåŁİå¸Ĥ":114976,"翻身":114977,"ä¹Łç®Ĺ":114978,"ä¾µçķ¥":114979,"åĸĩåıŃ":114980,"æ¯ı次éĥ½":114981,"è§ħ":114982,"éĻ¢éĻ¢éķ¿":114983,"å§ĭäºİ":114984,"èѦåĬ¡":114985,"è᝿ĿIJ":114986,"å±łæĿĢ":114987,"æľ¬èº«å°±":114988,"éļıæĹ¶éļı":114989,"éļıæĹ¶éļıåľ°":114990,"åĶ®åįĸ":114991,"æĹłäººé©¾é©¶":114992,"é¢ħ":114993,"åĵģ質":114994,"åĺ²ç¬ij":114995,"è·ijåİ»":114996,"åħĭéĩĮæĸ¯":114997,"çķ¸å½¢":114998,"修饰":114999,"磩éĺµ":115000,"éŁ³ä¹IJä¼ļ":115001,"æŁ³å·ŀ":115002,"齡":115003,"ä¼ļè°Ī":115004,"æŃ£çīĪ":115005,"ä¹ŁåIJĮæł·":115006,"æļ§æĺ§":115007,"è¡ĮæĶ¿éĥ¨éŨ":115008,"ä¹ĸä¹ĸ":115009,"èĤ¤èī²":115010,"æĹ¶ä»»":115011,"羣åĪĩ":115012,"æľĪä¸ĭ":115013,"æľĪä¸ĭæĹ¬":115014,"举æĸ¹è´¢å¯Į":115015,"è£ħä¿®åħ¬åı¸":115016,"éĢĢè¿ĺ":115017,"åĭĺå¯Ł":115018,"åĵ¥ä¼¦":115019,"åĵ¥ä¼¦æ¯Ķäºļ":115020,"çĭ¬ä¸Ģ":115021,"çĭ¬ä¸ĢæĹł":115022,"çĭ¬ä¸ĢæĹłäºĮ":115023,"è°ĥåij³":115024,"åİĭè¿«":115025,"åħ¨çIJĥæľĢ大":115026,"åł¡éķ¿":115027,"æĽ´ä½İ":115028,"åĪĨéĴŁåIJİ":115029,"åĽŀä¾Ĩ":115030,"åζåīĤ":115031,"åijĬè¯ī大家":115032,"çĤ¹éĴŁ":115033,"åįģä¸īå±Ĭ":115034,"åij¨åĽĽ":115035,"è¿Ļæł·ä¸Ģ":115036,"è¿Ļæł·ä¸ĢæĿ¥":115037,"èĭŁ":115038,"æľĽåİ»":115039,"æĪIJè¯Ń":115040,"å½ĵåį³":115041,"ç¬ij声":115042,"ä¹ĭåĬ¿":115043,"åĪijäºĭæ¡Īä»¶":115044,"æĮĤçĿĢ":115045,"ä½ķç§į":115046,"å°ı游æĪı":115047,"åĽ½å®¶æĪĺçķ¥":115048,"åĨ·åĨ·":115049,"å®ľå®¾":115050,"æIJºç¨ĭ":115051,"è¶ĭäºİ":115052,"åıįçľģ":115053,"常说":115054,"ä¸ĩæĪ·":115055,"åĥµå°¸":115056,"åįĥä¸ĩåĪ«":115057,"åıijçݰéĹ®é¢ĺ":115058,"åı¯çŁ¥":115059,"éŨæĪ·ç½ijç«Ļ":115060,"åģ¥åº·äº§ä¸ļ":115061,"åı³è¾¹":115062,"æµ·è¿IJ":115063,"è¿ijä¹İ":115064,"åĮ»æ²»":115065,"æĢ»ç®Ĺ":115066,"ä¸ĢåĪĨéĴŁ":115067,"æĭ§":115068,"ä¹Łæľīä¸ĢäºĽ":115069,"ä¾Ľç͵åħ¬åı¸":115070,"å»īä»·":115071,"帮ä»ĸ":115072,"æŃ¤æ¬¡æ´»åĬ¨":115073,"åıªèĥ½è¯´":115074,"èĬĭ":115075,"çīĩ段":115076,"åŃĺåľ¨éĹ®é¢ĺ":115077,"ä½łä¼ļåıijçݰ":115078,"è½®å»ĵ":115079,"ç½ijéĢļ":115080,"æ»¨æ±Ł":115081,"æİĪä¿¡":115082,"é»İæĺİ":115083,"ä¸įå±ŀäºİ":115084,"约åįł":115085,"éķ¿æ²Ļå¸Ĥ":115086,"èĥļèĥİ":115087,"åħĥä»¶":115088,"éĻĨåĨĽ":115089,"購買":115090,"æĮĩæľĽ":115091,"å®ŀä¹łçĶŁ":115092,"çī¹çĤ¹æĺ¯":115093,"çıłæ±Ł":115094,"çľĭä¸įåĩº":115095,"ä¸įè§ģäºĨ":115096,"ç¼ī":115097,"éĺµèIJ¥":115098,"åĶIJæľĿ":115099,"没å¿ħè¦ģ":115100,"åĽ½åľŁèµĦæºIJ":115101,"ç»ıæµİåѦ家":115102,"åIJĪèĤ¥å¸Ĥ":115103,"çIJ¢ç£¨":115104,"ç¡®åĪĩ":115105,"åŁİå¸Ĥåıijå±ķ":115106,"çŃ·åŃIJ":115107,"人æ°ijæľįåĬ¡":115108,"满åĪĨ":115109,"è¿·ä¿¡":115110,"ä½ľèĢħæľ¬äºº":115111,"æĸĩ竳æĿ¥æºIJ":115112,"ç«Ļç«ĭ":115113,"æŀĦæĪIJäºĨ":115114,"è¾Ľåĭ¤":115115,"è¶ħ强":115116,"éĶļ":115117,"åīįä¸īåŃ£åº¦":115118,"å°±è§īå¾Ĺ":115119,"å´ĩé«ĺ":115120,"è¶Ĭä¾Ĩ":115121,"è¶Ĭä¾Ĩè¶Ĭ":115122,"å¸ĤåľºèIJ¥éĶĢ":115123,"综åIJĪç´łè´¨":115124,"åŃļ":115125,"侮辱":115126,"äºĮåŃĹ":115127,"å·¥ä½ľä»»åĬ¡":115128,"åı²ä¸ĬæľĢ":115129,"æľĢä¼ĺ":115130,"åIJ©åĴIJ":115131,"表çϽ":115132,"èİ«åIJį":115133,"èİ«åIJįåħ¶":115134,"èİ«åIJįåħ¶å¦Ļ":115135,"å¹£":115136,"åIJĮå¿Ĺ们":115137,"建设çĶ¨åľ°":115138,"åĦĢ":115139,"éħįåģ¶":115140,"弩":115141,"åͱçīĩ":115142,"æīĭèĦļ":115143,"åħ¼ä»»":115144,"åģľæĶ¾":115145,"æŃ£å®Ĺ":115146,"æĸ°åĨľæĿij":115147,"åĤ¬çĶŁ":115148,"æīĢåŃ¦æł¡":115149,"å¿µä½Ľ":115150,"åͤéĨĴ":115151,"åħ±åĪĽ":115152,"æĭīä¸ģ":115153,"èĥĮçĿĢ":115154,"çĶŁæĢģä¿ĿæĬ¤":115155,"åı£å¤´":115156,"æĸ¹åIJijçĽĺ":115157,"調æķ´":115158,"æĭĽèģĺä¿¡æģ¯":115159,"åħ¶ä»ĸåĽ½å®¶":115160,"ç®Ģæĺĵ":115161,"åĮ¿åIJį":115162,"è¯Ħæµĭ":115163,"æĺ¯ä¸Ģ座":115164,"çīĭ":115165,"足迹":115166,"çIJĨè§£åĴĮ":115167,"æľĢåıĹ":115168,"å¿ĥè·³":115169,"çĪ¶è¦ª":115170,"éĿŀ常åĸľæ¬¢":115171,"èĭ¦éļ¾":115172,"æĬĢå¸Ī":115173,"æ°ijæĦı":115174,"æĪĺåĽ½":115175,"æĽ¿è¡¥":115176,"津贴":115177,"ä¸ŃåĽ½ä¼łç»Ł":115178,"åIJĦè¡Į":115179,"åIJĦè¡ĮåIJĦ":115180,"åIJĦè¡ĮåIJĦä¸ļ":115181,"第äºĶå±Ĭ":115182,"èį·èĬ±":115183,"æĦıèŃĺ":115184,"票价":115185,"åĪĨæµģ":115186,"æĿİçϽ":115187,"æ±ŁåĮĹ":115188,"æİĴæĸ¥":115189,"ä½ĵéĩı":115190,"åĮħåIJ«äºĨ":115191,"åĪĺæŁIJ":115192,"çݰå¦Ĥä»Ĭ":115193,"å·¥èīºåĵģ":115194,"è¿Ļç§įæĸ¹æ³ķ":115195,"åĬŀåħ¬æ¥¼":115196,"ç͵工":115197,"çħĻ":115198,"åį¡çīĩ":115199,"å¹´å¹´åºķ":115200,"ä¸ĵ项èµĦéĩij":115201,"åĮ»ç§ij":115202,"åĮ»ç§ij大åѦ":115203,"åĽŀ头çľĭ":115204,"ä¸įå±ij":115205,"èĩªé©¾":115206,"没æĶ¶":115207,"æīĵçĮİ":115208,"èĦ¸éĥ¨":115209,"åıĥèĢĥ":115210,"å°Ĩ士":115211,"è´«åĽ°äººåı£":115212,"çIJĨæĥ³ä¿¡å¿µ":115213,"é£İå°ļ":115214,"人æīįéĺŁä¼į":115215,"çij¾":115216,"æĿ¥è¿ĻéĩĮ":115217,"æ´Ĺ涤":115218,"å¹´èĸª":115219,"èĭįçϽ":115220,"ä¸ĩäºĭ":115221,"è¯¾æľ¬":115222,"åºĵéĩĮ":115223,"ç´¾":115224,"ç´¾åijĺ":115225,"èµŀç¾İ":115226,"ç©¿æĪ´":115227,"è£½ä½ľ":115228,"èµŀæĪIJ":115229,"ä¸Ģä¾§":115230,"å½ĵåľ°äºº":115231,"æĭİ":115232,"纸质":115233,"ä½Ļ个":115234,"éĶĤçĶµæ±ł":115235,"æľºåŀĭ":115236,"éĻ¢éϢ士":115237,"åģļå·¥":115238,"å¼łè´´":115239,"ç¥Ľæĸij":115240,"æ®ĸæ°ij":115241,"å¥ij约":115242,"æ¹ĺæ½Ń":115243,"æIJĸ":115244,"åŃĺè´§":115245,"交éĢļ大åѦ":115246,"è¶ģçĿĢ":115247,"æĸĩçī©ä¿ĿæĬ¤":115248,"å¤ĩæĪĺ":115249,"éĩĩ纳":115250,"åįĬæľĪ":115251,"æľĢåħ³éĶ®":115252,"æľĢåħ³éĶ®çļĦ":115253,"æİ¥éĢģ":115254,"æĶ¶åī²":115255,"åıįåĢĴ":115256,"çĥĽ":115257,"æ½Ķ":115258,"ä¼Łå¤§å¤įåħ´":115259,"çļĦè¯Ŀè¯Ń":115260,"容å¿į":115261,"å®ļéĩı":115262,"æķĹ":115263,"åĵģçīĮ形象":115264,"æīŃ转":115265,"åĽ½å®¶éĩįçĤ¹":115266,"èĨĿçĽĸ":115267,"ä¸Ģ楼":115268,"大éϏ":115269,"éĤªæģ¶":115270,"åĽŀåij³":115271,"çĮ¿":115272,"çĿ¡åīį":115273,"æĹłè¾ľ":115274,"çĹħæ¯ĴæĦŁæŁĵ":115275,"æľºæ¢°åĮĸ":115276,"çĤ¹äº®":115277,"溶解":115278,"åĩłä¹İæīĢæľī":115279,"è·ijéģĵ":115280,"ç͵è§Ĩæľº":115281,"åı¨":115282,"æijĩäºĨ":115283,"æijĩäºĨæijĩ头":115284,"èĩªè´Ł":115285,"综åIJĪåĪ©ç͍":115286,"èĩªå¦Ĥ":115287,"åİŁä¾Ĩ":115288,"ä¹Łä¸įæĥ³":115289,"èĬĤ课":115290,"è¿ĩåī©":115291,"çͲçĬ¶":115292,"çͲçĬ¶èħº":115293,"æĸ°ä¸ĸ纪":115294,"èĩªä¸»åĵģçīĮ":115295,"é«ĺå±Ĥ次":115296,"ä¸Ģè§Ĵ":115297,"è¡Įäºĭ":115298,"ç¥ĸåħĪ":115299,"å©ļåIJİ":115300,"éĹ´éļĻ":115301,"ç¼ĿéļĻ":115302,"è¿ĻæĶ¯":115303,"ä¸įæĸŃåĪĽæĸ°":115304,"å¾®åŀĭ":115305,"æĽĻåħī":115306,"享ç͍":115307,"ä¸ŃåĽ½ç§»åĬ¨":115308,"éĹŃçݯ":115309,"æī§æĦı":115310,"åıijå±ķæł¼å±Ģ":115311,"æł¸å¿ĥåĮº":115312,"éªļæī°":115313,"åħļåĴĮåĽ½å®¶":115314,"ä¸ŃåĽ½æĶ¿åºľ":115315,"帶èijĹ":115316,"ä¸ĩåįĥçĵ¦":115317,"åħ©äºº":115318,"äºİæĺ¯æĪij":115319,"åĽºä½ĵ":115320,"çªģå¦Ĥ":115321,"çªģå¦Ĥåħ¶":115322,"çªģå¦Ĥåħ¶æĿ¥":115323,"éĩĮç¨ĭç¢ij":115324,"çαç¾İ":115325,"æŁ¥éªĮ":115326,"åıĮèµ¢":115327,"éĹªåħī":115328,"楼å®ĩ":115329,"æĻı":115330,"æľīè¶³å¤ŁçļĦ":115331,"æŁĶæĢ§":115332,"ä¿¡æģ¯å®īåħ¨":115333,"管线":115334,"å¹¶ä¸įä¼ļ":115335,"åύ件":115336,"ä½łåºĶ该":115337,"çĿĢå®ŀ":115338,"æĺİæ¸ħ":115339,"æĬĹçĶŁç´ł":115340,"æīĵæŃ»":115341,"å®Įåħ¨ä¸įåIJĮ":115342,"èĬ±æ¤Ĵ":115343,"æĶ¾å®½":115344,"ä½İ端":115345,"åĽĽèĤ¢":115346,"åĮĹäº¬èµĽè½¦":115347,"éĽĨå¸Ĥ":115348,"æľªå©ļ":115349,"大å¹ħæıIJåįĩ":115350,"建çŃij设计":115351,"çĭ¬æľīçļĦ":115352,"æİ¢éĻ©":115353,"æ²³æµģåŁŁ":115354,"æħķ容":115355,"被çĽĹ":115356,"åĵºä¹³":115357,"èıģ":115358,"æĥ¬æĦı":115359,"è¶ĬæĿ¥è¶Ĭ好":115360,"广大群ä¼Ĺ":115361,"å¾·èĤ²":115362,"å¸Ĥåľºä»·æł¼":115363,"奥巴":115364,"奥巴马":115365,"èĬĤ缮ä¸Ń":115366,"两款":115367,"ä¸ĩä½Ļåħĥ":115368,"ç»´å°Ķ":115369,"çĶŁçī©ç§ijæĬĢ":115370,"åIJ¬èµ·æĿ¥":115371,"çłļ":115372,"æĭŁå®ļ":115373,"æ²¹çͰ":115374,"声èªī":115375,"建çŃijä¸ļ":115376,"éĻIJè´Ń":115377,"çīĩåŃIJ":115378,"çķľç¦½":115379,"ç½ijé¦ĸ页":115380,"ä¼Ĺçѹ":115381,"æĴŀåĩ»":115382,"åīįä¸įä¹ħ":115383,"åīįä¸ĸ":115384,"åĽĽä¸ªæĦıè¯Ĩ":115385,"æµĭç»ĺ":115386,"éĺ²ç©º":115387,"漫éķ¿çļĦ":115388,"æ²IJæµ´":115389,"æ¯Ķè¾ĥç®Ģåįķ":115390,"æµĭå®ļ":115391,"åĽŀè°ĥ":115392,"让人们":115393,"èĴĭä»ĭ":115394,"èĴĭä»ĭçŁ³":115395,"ç»ĵæĻ¶":115396,"å¢ŀæ·»äºĨ":115397,"æĿ¡è¯Ħ论":115398,"åī¯ä¼ļéķ¿":115399,"ä½ıæīĢ":115400,"ç»ĻåĩºäºĨ":115401,"è°ĥéħį":115402,"æ²ĸ":115403,"æľīç͍":115404,"æľīç͍çļĦ":115405,"ä¸ĢæĿ¡é¾Ļ":115406,"éĩİå¤ĸ":115407,"ç¼ĺåĪĨ":115408,"æ°¸è¿ľä¸įä¼ļ":115409,"æŀľæłij":115410,"大åıijå¿«ä¸ī":115411,"麻éĨī":115412,"äºijéĽĨ":115413,"åİ»åĵªéĩĮ":115414,"åħ¥å¸Ĥ":115415,"ä»»æĢ§":115416,"建档":115417,"建档ç«ĭ":115418,"建档ç«ĭåį¡":115419,"ä¸Ģ棵":115420,"社åįĢ":115421,"çĽ¸ä¼´":115422,"åļ·":115423,"å¡«åħħ":115424,"ä¸ĢæĹı":115425,"ç¾ģ":115426,"åıĸè¯ģ":115427,"èΰéĺŁ":115428,"åİĤåĮº":115429,"è¡·å¿ĥ":115430,"åıijå±ķéĺ¶æ®µ":115431,"é«ĺ强度":115432,"åĹĵåŃIJ":115433,"é¢Ĩè¡Ķ":115434,"楼主":115435,"大èĴľ":115436,"æŀķ头":115437,"粮油":115438,"é»Ħçĵľ":115439,"æĵĴ":115440,"å°ıçĭĹ":115441,"æĶ¹éĿ©å§Ķ":115442,"åįģåĪĨéĴŁ":115443,"é²ľèī³":115444,"åħ³ç¾½":115445,"çĭĢæħĭ":115446,"å®ŀç͍æĢ§":115447,"å°ijè§ģ":115448,"é£ŀæī¬":115449,"çͰéĩİ":115450,"æIJĤ":115451,"è¿Ļ个è¯į":115452,"åºĶæĢ¥é¢Ħæ¡Ī":115453,"è§Ĵ度æĿ¥çľĭ":115454,"æķ¬çķı":115455,"æ³ķå®Ŀ":115456,"åĸĦæĦı":115457,"æīĵæĸŃ":115458,"对åĨ³":115459,"çµķå°į":115460,"åĢŁæŃ¤":115461,"å¼ĢæºIJ":115462,"å°ı說":115463,"祺":115464,"å²ģ以ä¸ĭ":115465,"éĢĢå½¹åĨĽäºº":115466,"ä¸įä¹ħåīį":115467,"åĩºåİĤ":115468,"讽åĪº":115469,"æĿ¥çľĭçľĭåIJ§":115470,"éŃĶåħ½":115471,"çķĻä¸ĭæĿ¥":115472,"å±ħ室":115473,"åłħæĮģ":115474,"çľĭäºĨä¸Ģ":115475,"çľĭäºĨä¸Ģçľ¼":115476,"éĽĨåĽ¢æĹĹä¸ĭ":115477,"æĪĺæĪĺç»ĦåIJĪ":115478,"è®¤çľŁèIJ½å®ŀ":115479,"汽车产ä¸ļ":115480,"çī©çIJĨåѦ":115481,"æķµ":115482,"éĴĿ":115483,"åĽ¢éķ¿":115484,"ä¸įæĸŃæī©å¤§":115485,"èĤ©è´Ł":115486,"åıijå±ķ缮æłĩ":115487,"è³ĩéĩij":115488,"åīįç½®":115489,"ä¸ŃåĽ½åı¤ä»£":115490,"æŃ»åĪij":115491,"åħħåĪĨä½ĵçݰ":115492,"åħ³éŨ":115493,"ç¾İæĦŁ":115494,"æīĵåħ¥":115495,"æĬijéĥģçĹĩ":115496,"å°ijçĪ·":115497,"æłijæŀĿ":115498,"æ¶Īæģ¯ç§°":115499,"æ´Ľåħĭ":115500,"åį¯":115501,"è¿ĪåIJij":115502,"æİ¨åĭķ":115503,"ä»İä¸ļèĢħ":115504,"åݻ买":115505,"欢快":115506,"æĭ¥æĮ¤":115507,"马桶":115508,"æĬĬæİ§":115509,"æĶ¿åħļ":115510,"å¼łæī¬":115511,"客æłĪ":115512,"红æĺŁ":115513,"éĢģæĿ¥":115514,"åħ¨åŁŁæĹħ游":115515,"èĩªç§ģ":115516,"åįģäºĮæĿ¡":115517,"åı¹æģ¯":115518,"ä¸Ģèīĺ":115519,"ä¿Ŀè´¹":115520,"æĸ½å·¥çİ°åľº":115521,"æľī幸":115522,"ç»ŃèĪª":115523,"åı¯èĥ½æľĥ":115524,"èĥĮåıĽ":115525,"ä½£éĩij":115526,"ä¸īçŃīå¥ĸ":115527,"å¾Ī满æĦı":115528,"游æĪıåľ¬":115529,"群éĩĮ":115530,"æŀĦä»¶":115531,"åºıå¹ķ":115532,"太æ¹ĸ":115533,"æľ¨è´¨":115534,"æĻĭæ±Ł":115535,"çµĤæĸ¼":115536,"è·³è·ĥ":115537,"åĢºæĿĥ人":115538,"çŃī诸å¤ļ":115539,"æĶ¾åĩº":115540,"åħ³éĶ®æĹ¶åĪ»":115541,"æĦŁæŁĵèĢħ":115542,"é£ŀè¡Įåijĺ":115543,"èĥĨåĽº":115544,"èĥĨåĽºéĨĩ":115545,"æĬ±æŃī":115546,"åij¨äºĮ":115547,"æĸ°æĹ¶æľŁ":115548,"åĨ·éĵ¾çµģ":115549,"è¿Ļç§įæĸ¹å¼ı":115550,"该æĿij":115551,"åĽŀé¦Ī":115552,"åŁºçĿ£æķĻ":115553,"人åıĤ":115554,"æŀ¯çĩ¥":115555,"æī¹åıijå¸Ĥåľº":115556,"åħħåĪĨèĤ¯å®ļ":115557,"å¸ĤæĶ¿åįı":115558,"äºĭæ¥Ń":115559,"龸çİĭ":115560,"çĥŃæIJľ":115561,"åįģä¹Ŀ大":115562,"ä¼´æľī":115563,"ç¾İåĽ½æĢ»ç»Ł":115564,"åŁİå¸Ĥ管çIJĨ":115565,"ä¸ĭ令":115566,"èĥ¸åı£":115567,"åıªçŁ¥éģĵ":115568,"åij¨ä¸ī":115569,"ç͍æĪ¶":115570,"éѝ":115571,"å¿ĥè¡Ģ":115572,"带头人":115573,"åĮ»åĬ¡":115574,"åĮ»åĬ¡äººåijĺ":115575,"æİ§åζåύ":115576,"ä½ľåĵģåĨħ容":115577,"æĪĺåıĭ":115578,"åİĨå¹´":115579,"ä¸įåħĭ":115580,"ä¸įåħĭä¸įåıĬ":115581,"æĹ¥æŃ£å¼ı":115582,"è±IJå¯Į":115583,"ç¨İè´¹":115584,"æĹ¶æķĪ":115585,"å±ķä½į":115586,"è¡¡éĺ³":115587,"æĪ¿è²¸":115588,"çĪĨ款":115589,"ä¹IJæĦı":115590,"çͷ䏻":115591,"寬":115592,"æľĥèѰ":115593,"ä¹ĭå¤ľ":115594,"åIJĮ樣":115595,"ä¸įè¦ģ太":115596,"ä¼Ĭæĸ¯":115597,"ä¼Ĭæĸ¯åħ°":115598,"åŁºæľ¬åİŁåĪĻ":115599,"åİ»æİī":115600,"ä½İä¿Ŀ":115601,"个交æĺĵ":115602,"个交æĺĵæĹ¥":115603,"èģĬèģĬ":115604,"åĽĽä½į":115605,"åħļç»ĦæĪIJåijĺ":115606,"主è¦ģä»İäºĭ":115607,"å½±éŁ³":115608,"åĨĴåĩº":115609,"åij¼åIJ¸éģĵ":115610,"è¾¾å°Ķ":115611,"æľ¨åľ°æĿ¿":115612,"诡å¼Ĥ":115613,"çģ¯åħ·":115614,"çģ«çĥ§":115615,"è§£èĦ±":115616,"æĦĪåıij":115617,"æ¹ĸå·ŀ":115618,"é£İä¿Ĺ":115619,"æĸ°å½¢åĬ¿":115620,"æĸ°å½¢åĬ¿ä¸ĭ":115621,"è²Ŀ":115622,"èĦĵ":115623,"åĬ¨åĬĽçĶµæ±ł":115624,"é£ŀèι":115625,"飧æĢ§":115626,"åĪ©çī©":115627,"åĪ©çµ¦":115628,"ä¸į认è¯Ĩ":115629,"ç¼ĸç»ĩ":115630,"ä½ľåĿĬ":115631,"èģĮä¸ļæĬĢèĥ½":115632,"çľĭè¦ĭ":115633,"åĽ´æ£ĭ":115634,"æĺıè¿·":115635,"å½Ĵå±ŀäºİ":115636,"æĤ¬å´ĸ":115637,"éĨ«çĻĤ":115638,"å®ĭ代":115639,"åºĦæĿij":115640,"èĹķ":115641,"çĮĽçĦ¶":115642,"çĩĥæĸĻçĶµæ±ł":115643,"å®ŀä½ĵåºĹ":115644,"ä¸į足以":115645,"æĥħç·":115646,"æĥħç·Ĵ":115647,"å»ĬåĿĬ":115648,"ç͵åı°":115649,"åºĶåĬĽ":115650,"ä¸Ńå°ıåѦçĶŁ":115651,"èĥ¡åIJĮ":115652,"éī´åĪ«":115653,"åĨħç½®":115654,"乱象":115655,"æ¬ĬçĽĬ":115656,"å¼ĢæĶ¾å¼ı":115657,"åįļæĸĩ":115658,"讲课":115659,"çŃīåİŁåĽł":115660,"穷人":115661,"äº¤æĽ¿":115662,"æĬ¤çħ§":115663,"åıijå±ķæľºéģĩ":115664,"客åķĨ":115665,"åıįä¹ĭ":115666,"ç±³é¥Ń":115667,"å¹¶åıij":115668,"å¹¶åıijçĹĩ":115669,"æ±īåŃIJ":115670,"æŀľåĽŃ":115671,"对æĪijæĿ¥è¯´":115672,"åģıåIJij":115673,"æī¹ç¤º":115674,"读åIJİ":115675,"读åIJİæĦŁ":115676,"æĺİæĻº":115677,"åĽ´çĿĢ":115678,"åıį转":115679,"æĿ¨å¹Ĥ":115680,"ä¸ĵåįĸ":115681,"ä¸ĵåįĸåºĹ":115682,"åıĹéĻIJ":115683,"åºŁè¯Ŀ":115684,"æŀģå°ij":115685,"åįĪåIJİ":115686,"è¿Ľä¿®":115687,"åīĬåĩı":115688,"æľ¬ç§ijçĶŁ":115689,"ä¼ĺéĢī":115690,"åħīçħ§":115691,"åıĻäºĭ":115692,"åıĸæļĸ":115693,"åĮĹè·¯":115694,"æ¦ķ":115695,"èİĨçͰ":115696,"楼å±Ĥ":115697,"天èĬ±":115698,"天èĬ±æĿ¿":115699,"çĤľ":115700,"å·²ç»ıæľīäºĨ":115701,"è¶¾":115702,"çͳåįļ":115703,"ç͵éĺ»":115704,"åĬŁè¯¾":115705,"æŃ¥æŃ¥":115706,"éĤ£ä¹Ī容æĺĵ":115707,"æŃ¤æĸĩ":115708,"ä½°":115709,"计è¾ĥ":115710,"çīĩéĿ¢":115711,"ç͵影éĻ¢":115712,"ä¸įåħ¬å¹³":115713,"ä¸īæľŁ":115714,"æĹħ游èµĦæºIJ":115715,"å¤ļç§įå½¢å¼ı":115716,"è£Ĥç¼Ŀ":115717,"åIJİæİĴ":115718,"硬度":115719,"åĽŀæļĸ":115720,"éģĵæķĻ":115721,"è´«è¡Ģ":115722,"æ¸ħé¦Ļ":115723,"伤çĹħ":115724,"æĦı義":115725,"çļĦç¼ĺ":115726,"çļĦç¼ĺæķħ":115727,"åºĦ严":115728,"åıªæĺ¯ä¸ºäºĨ":115729,"æīĵæĬĺ":115730,"以ä¾Ĩ":115731,"滿足":115732,"çİĽä¸½":115733,"風éļª":115734,"æĸĩç§ij":115735,"éħįå¤ĩäºĨ":115736,"è¿Ľé£Ł":115737,"æ¶¡":115738,"è·¯ç¨ĭ":115739,"åı«å£°":115740,"ä¸Ńå¿ĥåŁİåĮº":115741,"æľīæīĢä¸įåIJĮ":115742,"張貼":115743,"é¢ĦæĬ¥":115744,"æľīå¤ļä¹Ī":115745,"è¿Ľè¡Įåħ¨éĿ¢":115746,"æĽ¾ç¶ĵ":115747,"ä¸ī代":115748,"å®ı大":115749,"æ¸ħæī«":115750,"éĢīåĩº":115751,"åĵªä¸Ģ个":115752,"主義":115753,"ä¾Ŀæĵļ":115754,"çļ®éĿ©":115755,"èµ¶æĿ¥":115756,"çŃĽæŁ¥":115757,"æ¨Ł":115758,"ä¿ĿèįIJ":115759,"åIJĥæĥĬ":115760,"æľĭåıĭ们对":115761,"ä»ĸæĺ¯ä¸Ģ个":115762,"åºŁæ°Ķ":115763,"æ»ħ":115764,"è´¢ç¨İ":115765,"æĿijæĿijæ°ij":115766,"èµĦäº§è´ŁåĢº":115767,"å®īå¨ľ":115768,"缮åīįåĽ½åĨħ":115769,"æĦŁè§īèĩªå·±":115770,"çµIJåIJĪ":115771,"éͦæłĩ":115772,"éͦæłĩèµĽ":115773,"æĽ´æ·±":115774,"åŁºæķ°":115775,"éħ¿éħĴ":115776,"çī¹èī²äº§ä¸ļ":115777,"åİĭå®ŀ":115778,"ä¾Ŀæ³ķ追究":115779,"æ·¡å®ļ":115780,"ç®ĢçĽ´å°±æĺ¯":115781,"å£ĵåĬĽ":115782,"æ°ijå¿ĥ":115783,"ä¸įåIJĪéĢĤ":115784,"çͱæŃ¤åı¯è§ģ":115785,"èµŀèªī":115786,"澤":115787,"åĩłå¹´åīį":115788,"åIJīä»ĸ":115789,"çł´æįŁ":115790,"è½»è½»åľ°":115791,"å²Ľå±¿":115792,"æĦıå¢ĥ":115793,"ä»Ģä¹Īåı«":115794,"åģĩè£ħ":115795,"éĢģè´§":115796,"å¹ķå¢Ļ":115797,"妥åįı":115798,"åĽ½æĹĹ":115799,"äºĨå¾Īä¹ħ":115800,"åĪĨ辨çİĩ":115801,"ç´Ķ":115802,"éĺ³åĮº":115803,"åĩŃçĿĢ":115804,"åģľè½¦ä½į":115805,"京éĥ½":115806,"éĶ£":115807,"æĵ¾":115808,"è¿ĽéŨ":115809,"åĪĺæµ·":115810,"åĽĽçº§":115811,"女足":115812,"è¡ĮæĶ¿å®¡æī¹":115813,"éģ¥æİ§":115814,"ä¸įéĮ¯":115815,"å¾Ĺå¾Ī好":115816,"ä¸ºçĽ®çļĦ":115817,"ä»įæľª":115818,"ç²¾è£ħ":115819,"éĢįéģ¥":115820,"尽头":115821,"çºłç¼ł":115822,"éłĺå°İ":115823,"æĭħè´Ł":115824,"æĪĸèĢħåħ¶ä»ĸ":115825,"åıªä¸įè¿ĩæĺ¯":115826,"åı®åĺ±":115827,"åģĩåĨĴ":115828,"æļĸæ°Ķ":115829,"çĽIJåŁİ":115830,"被è§Ĩ为":115831,"诺è´Ŀå°Ķ":115832,"ç»ĻäºĨæĪij":115833,"è¿ijåįĥ":115834,"éĩįåĽŀ":115835,"éĨĴäºĨ":115836,"çĶµè§£":115837,"忽çķ¥äºĨ":115838,"èĥĮéĥ¨":115839,"æĸĩæĺİåŁİå¸Ĥ":115840,"æºħ":115841,"è²ĵ":115842,"æĬµæĮ¡":115843,"åĸľæ¬¢åIJĥ":115844,"éĿĻéĿĻåľ°":115845,"å¾Īæ·±":115846,"åŁºç¡ĢçŁ¥è¯Ĩ":115847,"è¿ĩéĶĻ":115848,"çIJĨç§ij":115849,"交æµģåIJĪä½ľ":115850,"èĪĶ":115851,"èª¿æŁ¥":115852,"æħĪæĤ²":115853,"éĴ°":115854,"èĩ´ç͵":115855,"å®£ä¼łæ´»åĬ¨":115856,"åıĺéĩı":115857,"çļĦ人æĿ¥è¯´":115858,"æĹ¶éļĶ":115859,"ä¸įç®¡ä½ł":115860,"缸è¿ij":115861,"è´µéĩijå±ŀ":115862,"ä¹Łä¸įåı¯èĥ½":115863,"ç²īæľ«":115864,"åįĹçĵľ":115865,"çϽ马":115866,"åħīæºIJ":115867,"éĩijå¥ĸ":115868,"çĭ¬è§Ĵ":115869,"çĭ¬è§Ĵåħ½":115870,"妨ç¢į":115871,"ç»ĻåĬĽ":115872,"ä½Ĩä»į":115873,"å¼łå®¶åı£":115874,"èIJ¬åħĥ":115875,"渲æŁĵ":115876,"éķ¿å¤§äºĨ":115877,"è®°èĢħäºĨè§£":115878,"æĢĢçĿĢ":115879,"è¦ģåѦä¼ļ":115880,"游æĪı代":115881,"游æĪı代ç»ĥ":115882,"äºĮçϾ":115883,"æĦıè¯Ĩå½¢æĢģ":115884,"çݺ":115885,"计åĪĴçĶŁèĤ²":115886,"æī¾åĩĨ":115887,"åħ°èĬ±":115888,"è¿Ļ座åŁİå¸Ĥ":115889,"污泥":115890,"å®ĺæĸ¹å¾®ä¿¡":115891,"å½Ĵå±ŀ":115892,"æ°§æ°Ķ":115893,"éģİç¨ĭä¸Ń":115894,"åį°è±¡æ·±åĪ»":115895,"稳妥":115896,"çµIJæĿŁ":115897,"åŃķæľŁ":115898,"çĿĥ":115899,"åĿļåĽº":115900,"顺åĬ¿":115901,"æŀľèͬ":115902,"éĨ«å¸«":115903,"åİ®":115904,"ä¹Łæĺ¯å¦ĤæŃ¤":115905,"é¦Ĵ头":115906,"缸åĬ©":115907,"干线":115908,"ä¸Ģæľ¬ä¹¦":115909,"绥":115910,"æĮ¯å¥ĭ":115911,"èĤ¾èĦı":115912,"åĭķçī©":115913,"é£ŀè·ĥ":115914,"èıľåĵģ":115915,"å¤ļä½Ļ":115916,"å¤ļä½ĻçļĦ":115917,"éĢĿä¸ĸ":115918,"æģĭ人":115919,"å¼ĢåıijåĪ©ç͍":115920,"顺丰":115921,"éĩİå¿ĥ":115922,"æł¡å¤ĸ":115923,"æģIJé¾Ļ":115924,"éĿ¢åħ·":115925,"éķ¿è¾Ī":115926,"éļıå¤Ħ":115927,"éļıå¤Ħåı¯è§ģ":115928,"紧缺":115929,"éĩįä¸Ń":115930,"éĩįä¸Ńä¹ĭ":115931,"éĩįä¸Ńä¹ĭéĩį":115932,"奥æĸ¯":115933,"奥æĸ¯åį¡":115934,"ä¸Ģ个å¤ļ":115935,"ä¸Ģ个å¤ļæľĪ":115936,"ä¸įåı¯ç¼ºå°ij":115937,"æĸ°æł¼å±Ģ":115938,"æıIJæĮ¯":115939,"è¡Įè´¿":115940,"æ¼Ĥæµģ":115941,"èģĬåŁİ":115942,"åħ´å»º":115943,"è´¨æ£Ģ":115944,"ç§ģæľį游æĪı":115945,"æĽ´éĩįè¦ģ":115946,"è´®":115947,"çħľ":115948,"转åıĺ为":115949,"è¿Ļ两年":115950,"ä¿Ŀé²ľ":115951,"æī§æķĻ":115952,"çĥ¨":115953,"å¼Ģåıij建设":115954,"è¿IJèIJ¥ç®¡çIJĨ":115955,"误差":115956,"京åī§":115957,"å¸IJåı·":115958,"å·¥ä½ľä½ľé£İ":115959,"ä¸ĸä¿Ĺ":115960,"çϽ宫":115961,"å¤©åĽ½":115962,"å¤©åĽ½ç»§ç»Ń":115963,"å·´æĸ¯":115964,"èIJ¥åĪ©":115965,"åĵģæł¼":115966,"æĿijæ°ij们":115967,"æĪ¿è½¦":115968,"çŃīçĹĩçĬ¶":115969,"å¦Ĥå®ŀ":115970,"宸":115971,"å±Ĥ级":115972,"éĶĻè¿ĩäºĨ":115973,"ç»ĵå®ŀ":115974,"ç¬ijèĦ¸":115975,"羣å®ŀæĢ§":115976,"éĥ½å¸ĤæĬ¥":115977,"é¥Ńèıľ":115978,"åºĶ注æĦı":115979,"æĬ½çĥŁ":115980,"伪éĢł":115981,"åīįä¸Ģ天":115982,"éŃĶé¾Ļ":115983,"éŃĶé¾Ļ令çīĮ":115984,"约è°Ī":115985,"绣çѹæİ¨è¿Ľ":115986,"让ç͍æĪ·":115987,"åħ¨éĿ¢èIJ½å®ŀ":115988,"å¼Ħå¾Ĺ":115989,"è°Īæģĭçα":115990,"鸣æĪIJéķ¿":115991,"鸣æĪIJéķ¿è®°":115992,"æ´ĭæ´ĭ":115993,"çĸıæķ£":115994,"éĿ¢ç§¯çº¦":115995,"æµĵ缩":115996,"æĸ¯é¡¿":115997,"çĶŁæĢģåľĪ":115998,"æī§å¯¼":115999,"ç§»éĢģ":116000,"齿轮":116001,"æł¹æľ¬å°±ä¸į":116002,"缩åĩı":116003,"èµ°ä¸ĭåİ»":116004,"çĿ«æ¯Ľ":116005,"ä¹Łä¸įéĶĻ":116006,"åıįæĺłåĩº":116007,"èĭ¦æģ¼":116008,"缸åħ³æĶ¿çŃĸ":116009,"é«ĺ楼":116010,"ç²īèī²":116011,"æĬķèµĦé¢Ŀ":116012,"ä¸įç»ı":116013,"ä¸įç»ıæĦı":116014,"å®ģæĦ¿":116015,"èĪĮ头":116016,"æ»ĭçĶŁ":116017,"å®ģåİ¿":116018,"åīįåĪĹèħº":116019,"åĩ³":116020,"é£Łæ¬²":116021,"åıĸèĥľ":116022,"éĻ¢åŃIJ":116023,"ç´łè´¨æķĻèĤ²":116024,"滨å·ŀ":116025,"æĬ¢æĬĵ":116026,"å¼Ĥåij³":116027,"åĴļ":116028,"åĬį":116029,"宽éĺĶ":116030,"æļ´æ¶¨":116031,"æĥłåıĬ":116032,"è§Ħç¨ĭ":116033,"ä¾Ľåħ»":116034,"éĢģå¾Ģ":116035,"å±±åºĦ":116036,"举äºļ":116037,"å±ķé¦Ĩ":116038,"è§£éĶģ":116039,"æĹłè§Ĩ":116040,"éĻįèIJ½":116041,"è¿ŀäºij":116042,"è¿ŀäºij港":116043,"åıĤè°ĭ":116044,"çİĸ":116045,"ç¬ĥ":116046,"èĢĹè´¹":116047,"æī¿å¾·":116048,"社ä¼ļæķĪçĽĬ":116049,"åįĹæµ·ç½ij":116050,"åĪĽä¼¤":116051,"èIJ±":116052,"åħħæ²Ľ":116053,"ç½ijç«Ļ建设":116054,"大åºĨ":116055,"åĨįéĢł":116056,"åŃĹæł·":116057,"åħ¨æ°ijåģ¥èº«":116058,"èĮ«èĮ«":116059,"æµ®åĬ¨":116060,"åīįåı°":116061,"å¢ŀ设":116062,"éĢĽè¡Ĺ":116063,"åĢĴéĹŃ":116064,"æ³ķå¾ĭ顾éĹ®":116065,"çĸ®":116066,"çĹħçĹĩ":116067,"空åīį":116068,"请æķĻ":116069,"èĥľä»»":116070,"æĿĢèıĮ":116071,"æĪĺæĸĹæľº":116072,"ç»ĺåζ":116073,"å¤Ħæĸ¹":116074,"çªģåĽ´":116075,"çĮ«åĴª":116076,"æĬ¥åijĬæĺ¾ç¤º":116077,"ç¿Ł":116078,"çķ¶åľ°":116079,"æľĢéļ¾":116080,"纪å§Ķ书记":116081,"ä½İåİĭ":116082,"èĻļ空":116083,"è¿Ļéĥ¨ç͵影":116084,"产ä¸ļåįĩ级":116085,"è°·çα":116086,"è°·çαåĩĮ":116087,"æĬ¼éĩij":116088,"女æĸ¹":116089,"éĴ»çłĶ":116090,"æļĹæļĹ":116091,"è¿·ä½ł":116092,"æīĢè¬Ĥ":116093,"å¨ģå»ī":116094,"å¼ĢæľĹ":116095,"å²Ķ":116096,"çģ«çĤ¬":116097,"åIJĪçIJĨæĢ§":116098,"åħ¬åĬŀ":116099,"ä¼ļä¼ļéķ¿":116100,"éĺ´è°ĭ":116101,"å¼Ģå±Ģ":116102,"æĻ®éĢļè¯Ŀ":116103,"å᡿ĭī":116104,"å°ijåIJĥ":116105,"éĹªèĢĢ":116106,"æŀľæ±ģ":116107,"æī§è¡ĮåĬĽ":116108,"è°Ľ":116109,"æĬ¢åĬ«":116110,"é«ĺéĢŁåıijå±ķ":116111,"飬":116112,"åįĹæ²Ļ":116113,"é«ĺçŃīåŃ¦æł¡":116114,"æį¢ä¸ª":116115,"åı¯èĥ½åŃĺåľ¨":116116,"æĬĴ":116117,"è°±åĨĻ":116118,"被æĬĵ":116119,"æĿ¯åŃIJ":116120,"èĬĤèĥ½åĩıæİĴ":116121,"æ°ĶåĢĻåıĺåĮĸ":116122,"åĪĨåĪ¥":116123,"ä¸Ńæŀ¢":116124,"欢åij¼":116125,"åħī纤":116126,"è¿Ļ群":116127,"çľ¼çķĮ":116128,"åħ±åIJĮåıijå±ķ":116129,"çݰä»Ĭ":116130,"éĹ»è¨Ģ":116131,"çī¹èī²å°ıéķĩ":116132,"æķij人":116133,"éĻįæ°´":116134,"ä¸ĸçķĮä¸Ģæµģ":116135,"å°±é¤IJ":116136,"çŀ¥":116137,"å¤įä»ĩ":116138,"ç¾½æ¯Ľ":116139,"ç¾½æ¯ĽçIJĥ":116140,"è´©åįĸ":116141,"æºIJæ³ī":116142,"æĢ»ä½ĵè§ĦåĪĴ":116143,"åĬ¨æĦŁ":116144,"ä¸Ģ审":116145,"åĢŁéĴ±":116146,"è§ģæķĪ":116147,"èĬ±èįī":116148,"åIJĮä¸ļ":116149,"æŁ¥è©¢":116150,"åĽ½éĻħåIJĪä½ľ":116151,"ä¾ĽåĽ¾":116152,"åģ´":116153,"æłĵ":116154,"缸éĢļ":116155,"è°ĪåıĬ":116156,"è¿ĩç¨ĭå½ĵä¸Ń":116157,"é¦Ļèıĩ":116158,"åįģåĽĽæĿ¡":116159,"ä¸Ģå¼Ģå§ĭå°±":116160,"ä¸ĵåijĺ":116161,"æĺİ顯":116162,"æīĵéĢłåĩº":116163,"ä¸ĭéĿ¢æĪij们":116164,"æľºæ²¹":116165,"åı°è¯į":116166,"åŃIJå¼Ł":116167,"æľĢ常è§ģçļĦ":116168,"æĪijè®°å¾Ĺ":116169,"ç»°":116170,"æĤ¬æµ®":116171,"è¿ĺ羣æĺ¯":116172,"æĮĤåı·":116173,"åıĭåĸĦ":116174,"éĩį伤":116175,"çħ§äº®":116176,"æŃ¦èѦ":116177,"åĩºçݰéĹ®é¢ĺ":116178,"è¸Ĭè·ĥ":116179,"åľ°çIJĥä¸Ĭ":116180,"å¸Ĥ人大":116181,"åıĹ害人":116182,"å²IJ":116183,"åIJĮåѸ":116184,"éĩijèŀįå¸Ĥåľº":116185,"æľīçļĦçݩ家":116186,"å¸ĤæķĻèĤ²":116187,"å¸ĤæķĻèĤ²å±Ģ":116188,"åIJĦå¼Ĥ":116189,"ç·ļä¸Ĭ":116190,"æģº":116191,"æľī大éĩıçļĦ":116192,"åķĨæĬ¥":116193,"åįķåįķ":116194,"åħ¨é¢Ŀ":116195,"ä¾ĿæĹ§æĺ¯":116196,"好åĩłä¸ª":116197,"åĸµ":116198,"éĩįæķ´":116199,"çĶŁæ´»è´¨éĩı":116200,"æİ¢è®¿":116201,"åį°èĬ±":116202,"缼è¡Į":116203,"å¾®è§Ĥ":116204,"èĪįå¾Ĺ":116205,"åºŁå¼ĥçī©":116206,"积èĵĦ":116207,"å®ļå±ħ":116208,"æĤ¼":116209,"èĮ¸":116210,"çļĦ帮åĬ©":116211,"çļĦ帮åĬ©ä¸ĭ":116212,"亿åIJ¨":116213,"åŃĶéĽĢ":116214,"è¿ĻæĿ¡è·¯":116215,"饵":116216,"æĦĪåĬł":116217,"éķį":116218,"ä½ľæ¡Ī":116219,"èįĶæŀĿ":116220,"太å°ij":116221,"跻身":116222,"åħ¬çĽĬæ´»åĬ¨":116223,"çϽæĸij":116224,"æĬĢæľ¯æ°´å¹³":116225,"帧":116226,"æĹłçŁ¥":116227,"åºĶ该æĢİä¹Ī":116228,"éĢĢå¸Ĥ":116229,"æ¸Ń":116230,"åħ»çĮª":116231,"驼":116232,"ç¾¤å²Ľ":116233,"大åį«":116234,"ä¹ĺçĶ¨è½¦":116235,"èı²å°Ķ":116236,"è´´åIJ§":116237,"åģľä¸ĭæĿ¥":116238,"æľīæľºç»ĵåIJĪ":116239,"åĪ»èĭ¦":116240,"çļĦåľ°":116241,"çļĦåľ°æŃ¥":116242,"è¯ĬæīĢ":116243,"å¼ĢæĪĺ":116244,"èĢģçīĮ":116245,"çѹçłģ":116246,"åħ«å¤§ä»¥æĿ¥":116247,"楼æĪ¿":116248,"åŃĻæĤŁ":116249,"åŃĻæĤŁç©º":116250,"åħĴåŃIJ":116251,"第ä¸ĢæĿ¡":116252,"社交åªĴä½ĵ":116253,"æĥ³èµ·æĿ¥":116254,"大æ´ĭ":116255,"æĭ¼éٳ":116256,"è¿Ľåįļä¼ļ":116257,"è¿ĩåħ³":116258,"æ²¼":116259,"ç©¿æIJŃ":116260,"éĤ£ä¸Ģ天":116261,"çł´éŨ":116262,"æĬķæłĩ人":116263,"赢家":116264,"èĻļå¼±":116265,"æ¿ĥ":116266,"å®īæ£Ģ":116267,"客家":116268,"çĭ¬ç«ĭèij£äºĭ":116269,"æīĭåĬ¿":116270,"åīµéĢł":116271,"åľĨ满å®ĮæĪIJ":116272,"为主线":116273,"好å¥ĩå¿ĥ":116274,"é¢ĨåľŁ":116275,"çªĸ":116276,"åħ¸åŀĭæ¡Īä¾ĭ":116277,"çªģåıijäºĭä»¶":116278,"åºķæ°Ķ":116279,"头æĻķ":116280,"å®Ľå¦Ĥ":116281,"觸":116282,"æ¸ħæ·¡":116283,"åļ¼":116284,"åģľç͵":116285,"ç²īå°ĺ":116286,"éĻįä½İæĪIJæľ¬":116287,"æĶ¾æīĭ":116288,"è®°èĢħ表示":116289,"æĭĸå»¶":116290,"éªĩ":116291,"æ®ĭå¿į":116292,"çľģæķĻèĤ²":116293,"çľģæķĻèĤ²åİħ":116294,"é«ĺé¢Ŀ":116295,"éĦĻ":116296,"æ¥ŀ":116297,"åĨħç§ij":116298,"èIJ¥ä¸ļé¢Ŀ":116299,"åŁºçŁ³":116300,"æµģæ·Į":116301,"主æĹ¨":116302,"éĺIJéĩĬ":116303,"建åįİ":116304,"æĥĬåı¹":116305,"çī¢åĽºæłijç«ĭ":116306,"æĺ¯åIJ¦åŃĺåľ¨":116307,"建åĨĽ":116308,"éĽ¾éľ¾":116309,"åħ¬è®¤":116310,"åħ¬è®¤çļĦ":116311,"æ°¨åŁº":116312,"æ°¨åŁºéħ¸":116313,"åīįåĩłå¹´":116314,"åιéĤ£":116315,"æ±Łä¸ľ":116316,"å·¥æ¥Ń":116317,"ä¸ĢçĤ¹ä¹Łä¸į":116318,"修士":116319,"äºĨä¸Ģéģį":116320,"åĪģ":116321,"æ»ļæ»ļ":116322,"åĪĨæł¡":116323,"羣çα":116324,"è¡ĢèĦī":116325,"æĢ¥åī§":116326,"ä¸Ģ群人":116327,"羯":116328,"æĪIJé¾Ļ":116329,"ç²¾ç¥ŀçĹħ":116330,"缸åħ³äººåijĺ":116331,"éĿĵ丽":116332,"ä¸īåŃ£åº¦":116333,"åĪĴå®ļ":116334,"ä¸ĸçķĮ第ä¸Ģ":116335,"éĢļä¿Ĺ":116336,"åķĨä¸ļåľ°äº§":116337,"åĬŁèĥ½æĢ§":116338,"èµĦæľ¬ä¸»ä¹ī":116339,"详è§ģ":116340,"æĬĵæįķ":116341,"æĸĩæĺĮ":116342,"å®Ŀå®ī":116343,"è£ħéħįå¼ı":116344,"æºIJæºIJ":116345,"æºIJæºIJä¸įæĸŃ":116346,"çĶŁæĢķ":116347,"纵åIJij":116348,"壽":116349,"çľ¼è¢ĭ":116350,"èĤīä½ĵ":116351,"åı¤ä»Ĭ":116352,"èŀįåªĴä½ĵ":116353,"åģī":116354,"æł¼æľĥåĵ¡":116355,"çĥ·":116356,"åĬŁç͍":116357,"æīŃ磩":116358,"绿èī²éĢļéģĵ":116359,"åī§ç»Ħ":116360,"å¼±åĬ¿":116361,"è´¨éĩıéĹ®é¢ĺ":116362,"éĻIJé¢Ŀ":116363,"éªĨ":116364,"éģµä¹ī":116365,"å¯Ŀ室":116366,"æĥ³å¿µ":116367,"åł±åijĬ":116368,"ä»ħ次":116369,"ä»ħ次äºİ":116370,"èŀįåĪĽ":116371,"æĭĽèģĺä¼ļ":116372,"åºĬåŀ«":116373,"转åŀĭåıijå±ķ":116374,"ä¸ŃåĽ½çĶµä¿¡":116375,"åIJ¬è¯Ŀ":116376,"è«ĭæ±Ĥ":116377,"大éĥ¨åĪĨ人":116378,"æ´»å¾Ĺ":116379,"åĵŃæ³£":116380,"è¶Ļ":116381,"åıijçĹħçİĩ":116382,"ä¸į符":116383,"åĨĽå®ĺ":116384,"é¢Īæ¤İ":116385,"æĸ°åĨłçĸ«æĥħ":116386,"æŁ¬åŁĶ":116387,"æŁ¬åŁĶ寨":116388,"ä»»ä½ķå½¢å¼ı":116389,"人éĻħ":116390,"人éĻħåħ³ç³»":116391,"æĢ»æī¿åĮħ":116392,"å¹³åĿĩæ¯ı":116393,"æģŃåĸľ":116394,"åĦĺ":116395,"åħµé©¬":116396,"è¿Łåΰ":116397,"工伤":116398,"çīĪæĿĥå½Ĵ":116399,"çīĪæĿĥå½ĴåİŁ":116400,"æĭ¥æĬ¤":116401,"ç³Ĭæ¶Ĥ":116402,"å¹²æ¶ī":116403,"å°ijä¸įäºĨ":116404,"æĥ³æī¾":116405,"è´¹çİĩ":116406,"该éĻ¢":116407,"èŀįåĮĸ":116408,"è¿İåIJĪ":116409,"è§ĨåIJ¬èĬĤ缮":116410,"æł¼ç¶²ç«Ļ":116411,"çľīæ¯Ľ":116412,"欢è¿İ大家":116413,"å®¶åºŃæķĻèĤ²":116414,"ä¾µèļĢ":116415,"ç»Ļä½łä»¬":116416,"è¡Ģ液循çݯ":116417,"å¯Ħæīĺ":116418,"å°ĸåı«":116419,"以ä¸ĭåĩłä¸ª":116420,"è¿ĺ以为":116421,"åħ¶ä»ĸçݩ家":116422,"ç¬ijç¬ij":116423,"æīĵåIJ¬":116424,"èĩªçĦ¶ç§ijåѦ":116425,"åŁºç«Ļ":116426,"ä¹Ŀå·ŀ":116427,"ä¿Ŀ驾":116428,"ä¿Ŀ驾æĬ¤":116429,"ä¿Ŀ驾æĬ¤èĪª":116430,"æĶ¾çľ¼":116431,"çŁ¥åIJįä¼ģä¸ļ":116432,"縮":116433,"稽":116434,"æļĩ":116435,"使çĶ¨ç¶²è·¯":116436,"é¢ĦçķĻ":116437,"大象":116438,"åıijæĺİä¸ĵåĪ©":116439,"æĸĩ娱":116440,"éĢłç¦ı":116441,"湿润":116442,"éĿ¢æĿ¡":116443,"æ¶Īè´¹åįĩ级":116444,"è®Ĭå¾Ĺ":116445,"åĩłåIJį":116446,"ä»Ħ":116447,"认æ¸ħ":116448,"è¿ľæĻ¯":116449,"æıĴ座":116450,"诸侯":116451,"åıĺæĢģ":116452,"ç¦ı彩":116453,"è´§æŀ¶":116454,"失æİ§":116455,"ç§»åĬ¨ç«¯":116456,"ä¸Ĭåı¸":116457,"éĢłçº¸":116458,"å¸ĥæľĹ":116459,"çĴĩ":116460,"åı°åįĹ":116461,"åĮĹ京åĨ¬å¥¥":116462,"èĵĿçīĻ":116463,"éķ¿çŁŃ":116464,"æĬĺå°Ħ":116465,"ç»ijæŀ¶":116466,"å¯Ĵåģĩ":116467,"è½¬åŁºåĽł":116468,"æĢ¥äºİ":116469,"æŃ£åĵģ":116470,"åħħ滿":116471,"大纲":116472,"æĬĹä½ĵ":116473,"è¨ĵç·´":116474,"æĶ¶ç´§":116475,"æ¯Ķè³½":116476,"åħµåĬĽ":116477,"æľ¬æĽ¸":116478,"äºĮ代":116479,"æĢ¥è¯Ĭ":116480,"æĸĩæ¡Ī":116481,"ç»ıåķĨ":116482,"æĻ¨æĬ¥":116483,"æ£ĺ":116484,"æĢ»ä¹¦è®°åľ¨":116485,"åıĹéĤĢ":116486,"äºĶåĽĽ":116487,"å²ŃåįĹ":116488,"çαåIJĥ":116489,"åŁĥå°Ķ":116490,"å¿ĥå¢ĥ":116491,"è¦ĨçĽĸéĿ¢":116492,"å®ŀåľ¨æĺ¯å¤ª":116493,"æł¹åºķ":116494,"纷纷表示":116495,"åĹħ":116496,"éļıçĿĢæĹ¶éĹ´":116497,"åİĨåı²æĤłä¹ħ":116498,"éħī":116499,"æĢ»éĺŁ":116500,"主é¢ĺæ´»åĬ¨":116501,"éĹ®åį·":116502,"é©¿ç«Ļ":116503,"æı¡ä½ı":116504,"åı¯èĥ½å¯¼èĩ´":116505,"æ°ijéĸĵ":116506,"éĸĭåķŁ":116507,"ä½Ĩä¸įéĻIJ":116508,"ä½Ĩä¸įéĻIJäºİ":116509,"åįģéĩĮ":116510,"娥":116511,"æįŁèĢĹ":116512,"çĸı导":116513,"çݯ氧":116514,"ç¥ŀéĢļ":116515,"çαå°Ķ":116516,"çαå°Ķåħ°":116517,"æľ´å®ŀ":116518,"å¿«æĬ¥":116519,"æĶ¶åıĹ":116520,"æĪĸ許":116521,"èĥĮéĿ¢":116522,"æĸĩåĮĸä¼łåªĴ":116523,"ä¸īåĢĭ":116524,"æĶ»åĬ¿":116525,"å®ī举":116526,"å®īä¸ľå°¼":116527,"åĿĩå·²":116528,"顾èĻij":116529,"éĦŃ":116530,"è¿Ļå®¶åħ¬åı¸":116531,"åħ¬åijĬç§°":116532,"æıIJä¾Ľä¼ĺè´¨":116533,"稳æŃ¥æİ¨è¿Ľ":116534,"å¤įè¯ķ":116535,"å°Ĩé¢Ĩ":116536,"è°Īèµ·":116537,"å¨Ħ":116538,"è¿ŀ线":116539,"æ©ŁéĹľ":116540,"åºĶçĶ¨åľºæĻ¯":116541,"çĶ»åĥı":116542,"è´¢è¿IJ":116543,"ä¿Ŀéļª":116544,"çĹħçIJĨ":116545,"æ¯Ľä¸»å¸Ń":116546,"ä¸Ŀ毫ä¸į":116547,"çαå¥ĩ":116548,"çαå¥ĩèīº":116549,"ä¸ĵå®¶ç»Ħ":116550,"åij¼åͤ":116551,"éĭ¼":116552,"çģ¸":116553,"é¢ĨåħĪåľ°ä½į":116554,"æıIJæĭĶ":116555,"龸éģĵ":116556,"å±±åĿ¡":116557,"èĿİ":116558,"沸èħ¾":116559,"该项":116560,"ä»ĬçĶŁ":116561,"ä¸Ģç¯ĩæĸĩ竳":116562,"æĸ¹å¼ıè¿Ľè¡Į":116563,"é»ij客":116564,"æĶ¹åĬ¨":116565,"主é¡Į":116566,"æķ£å¸ĥ":116567,"ä»Ģä¹Īåľ°æĸ¹":116568,"åĮĸåIJĪ":116569,"åĮĸåIJĪçī©":116570,"éĿĻç͵":116571,"æĢ»æĶ¶åħ¥":116572,"å§Ķç»Ħç»ĩ":116573,"å§Ķç»Ħç»ĩéĥ¨":116574,"éĿĻæĢģ":116575,"èĢģåŃĹåı·":116576,"室åıĭ":116577,"éĥ½ä¸įæķ¢":116578,"æŀ¶åŃIJ":116579,"ç쵿ķı":116580,"审è§Ĩ":116581,"æĤ£åĦ¿":116582,"山寨":116583,"èĸªèµĦ":116584,"é©°æı´":116585,"éĥ¨åĪĨåĨħ容":116586,"好似":116587,"æĪIJåijĺåĽ½":116588,"åľ¨æĪijçľĭæĿ¥":116589,"åħ³æ³¨åº¦":116590,"éĻĪæŁIJ":116591,"è¿Ļç§įäºĭæĥħ":116592,"éĢīå®ļ":116593,"ç²¾åŃIJ":116594,"å£ģçĶ»":116595,"æ±Łæ·®":116596,"é«ĺæĺĤ":116597,"æł¼åĬĽ":116598,"輩":116599,"åѦåłĤ":116600,"æĤ¨åIJĮæĦı":116601,"ä¸ĢåĪĩéĥ½æĺ¯":116602,"潤":116603,"éĸĥ":116604,"å¸ĮæľĽèĩªå·±":116605,"ä¿ĺ":116606,"æ±Łåİ¿":116607,"æ³¾":116608,"ç§ijæķĻ":116609,"æīĵè¿Ľ":116610,"ä¸įæħİ":116611,"å¯ĴåĨ¬":116612,"æ¸Ķæ°ij":116613,"鼷æĸ¯":116614,"主宰":116615,"æĹħ游度åģĩ":116616,"ç͵åŃIJéĤ®ä»¶":116617,"æ±Ĥå©ļ":116618,"éļİæ®µ":116619,"åģ¥èº«æĪ¿":116620,"注æĺİåĩºå¤Ħ":116621,"äºĭæķħåıijçĶŁ":116622,"级以ä¸Ĭ":116623,"åŃĺæ´»":116624,"æĸ½èĤ¥":116625,"èľľèľĤ":116626,"嵩":116627,"æĮĸæİĺæľº":116628,"æĬĹæĭĴ":116629,"ä¼łå¯¼":116630,"æĺ¯ä»Ģä¹Īåij¢":116631,"ä¸Ĭå¹´åIJĮæľŁ":116632,"建åħļ":116633,"çĶŁæħĭ":116634,"ä¿Ŀä½ı":116635,"款车åŀĭ":116636,"人èĦī":116637,"éļIJèͽ":116638,"失æķĪ":116639,"éģ¿åŃķ":116640,"ç®Ģ便":116641,"è°¢è°¢ä½ł":116642,"å®Īä½ı":116643,"æĶ¾æĺł":116644,"è¨Īçķ«":116645,"çݰ代çµģ":116646,"é¤IJ廳":116647,"æķħå±ħ":116648,"大大å°ı":116649,"大大å°ıå°ı":116650,"çī¹åΫ声æĺİ":116651,"éģįåıĬ":116652,"å¿ĥçIJĨåĴ¨è¯¢":116653,"è³´":116654,"çĮ®è¡Ģ":116655,"å·²ç»ıè¾¾åΰ":116656,"æīĵæĭĽåij¼":116657,"åıĮè¾¹":116658,"ä¸Ģæĸ¹éĿ¢æĺ¯":116659,"å´ĩå°ļ":116660,"éĺ¿å¯Į":116661,"éĺ¿å¯Įæ±Ĺ":116662,"æĮģæľī人":116663,"è±ģ":116664,"é£İçŃĿ":116665,"åĬ¨èį¡":116666,"äºĨä¸Ģä¼ļ":116667,"äºĨä¸Ģä¼ļåĦ¿":116668,"ä¸ĩ象":116669,"çľĭç͵è§Ĩ":116670,"åįģä¸īæĿ¡":116671,"çĮĽçĥĪ":116672,"è¦ģä¸įçĦ¶":116673,"太æŀģæĭ³":116674,"å¼ķçĪĨ":116675,"ç»ıè¿ĩå¤ļå¹´":116676,"游æĪıéĩĮçļĦ":116677,"é¾Ļæ³ī":116678,"æłĩéħį":116679,"è®ĵä»ĸåĢij":116680,"éĢłæŀĹ":116681,"åĮºåŁŁæĢ§":116682,"亿ä¸ĩ":116683,"æĪĺçķ¥å¸ĥå±Ģ":116684,"éķĩæĶ¿åºľ":116685,"åĶ®ç¥¨":116686,"çĶŁäº§å·¥èīº":116687,"éķĩåħļå§Ķ":116688,"ä¸Ńå°ıåŀĭ":116689,"æľ¨è̳":116690,"河边":116691,"èĦ¾èĥĥ":116692,"欢è¿İæĤ¨":116693,"åıĺå¼Ĥ":116694,"缤纷":116695,"åŀĥåľ¾æ¡¶":116696,"辩è¯ģ":116697,"车åºĵ":116698,"æ¯Ķçİĩ":116699,"åħ´æĹº":116700,"详ç»ĨäºĨè§£":116701,"å®īå±ħ":116702,"çħ§æĸĻ":116703,"æĸ¹æīį":116704,"赦":116705,"åĨķ":116706,"å¥Ķèµ´":116707,"å®Ŀ鸡":116708,"åľºåĿĩ":116709,"缮åīįæŃ£åľ¨":116710,"åIJŀåϬ":116711,"è¿°èģĮ":116712,"æĩµ":116713,"å¥ĩçijŀ":116714,"ä»įå°Ĩ":116715,"èĪī辦":116716,"å·¥åķĨå±Ģ":116717,"å¡ijèĥ¶":116718,"åĬŀå®ŀäºĭ":116719,"æĸ¹æĸ¹éĿ¢":116720,"æĸ¹æĸ¹éĿ¢éĿ¢":116721,"æĸĩåĮĸèĬĤ":116722,"åħ¥èģĮ":116723,"鸥":116724,"ç©¿éĢı":116725,"ä»¥ä¹łè¿ijå¹³":116726,"åį±éļª":116727,"æľ¦èĥ§":116728,"åİĨåı²æĢ§":116729,"æķŀå¼Ģ":116730,"ä¼Ļä¼´åħ³ç³»":116731,"çŁ¿åĮº":116732,"åĽ½éĻħåľ¨çº¿":116733,"ä¼łå¥ĩéĩĮéĿ¢":116734,"è¿ijäºĽ":116735,"è¿ijäºĽå¹´":116736,"åĬ£åĬ¿":116737,"æĶ»åĩ»åĬĽ":116738,"æĻºéĢł":116739,"禧":116740,"çİĭåħĪçĶŁ":116741,"éĨ«çĶŁ":116742,"åĽĽé¡¹":116743,"å®ŀæĻ¯":116744,"åĪĿåĪĽ":116745,"å¿ĥ裡":116746,"æĻ¶ä½ĵ":116747,"交éĻħ":116748,"让æ¶Īè´¹èĢħ":116749,"课æĸĩ":116750,"æİĴæ°Ķ":116751,"å¹¶ä¸įæĦıåij³":116752,"çĽ¸å£°":116753,"第ä¸Ģå±Ĭ":116754,"åİŁèijĹ":116755,"鼾":116756,"没æľī太大":116757,"补水":116758,"çµģä¼ģä¸ļ":116759,"第äºĮæī¹":116760,"åħ¶å®ĥéĹ®é¢ĺ":116761,"æİĮéŨ":116762,"责任å¿ĥ":116763,"é¤IJåħ·":116764,"ç¾Ĭæ¯Ľ":116765,"没æľīå¿ħè¦ģ":116766,"ä¹IJåĽ¢":116767,"è¿ĽåŁİ":116768,"ä¸ĢçĤ¹åĦ¿":116769,"身形":116770,"çļ®èĤ¤çĹħ":116771,"æĺ±":116772,"å¢ŀèĩ³":116773,"è첿ĺİ":116774,"æıIJè´¨":116775,"ä½ĵèĤ²åľº":116776,"çŃ¹å»º":116777,"é¬Ĩ":116778,"车çīĮ":116779,"éļĶéŁ³":116780,"è´Łè´£åIJĮå¿Ĺ":116781,"丰ç¡ķ":116782,"ä½ĽéĻĢ":116783,"äºīåIJµ":116784,"庶":116785,"æ·¡æ°´":116786,"å°ıçĶ·åŃ©":116787,"ç§ģèĩª":116788,"åĮĸè¿Ľç¨ĭ":116789,"æĪĺ士æĿ¥è¯´":116790,"æ²¹èħ»":116791,"èĦ±è´«èĩ´å¯Į":116792,"æĹ¥å¸¸å·¥ä½ľ":116793,"交èŀį":116794,"åĨľè´¸":116795,"åĨľè´¸å¸Ĥåľº":116796,"åĵĪçĻ»":116797,"çĶµè´¹":116798,"èµĺ":116799,"åıĮèħ¿":116800,"æĵĶå¿ĥ":116801,"æĿ¥å½¢å®¹":116802,"使åij½æĦŁ":116803,"éĤ£ä¹Īç®Ģåįķ":116804,"èĬĻèĵī":116805,"åĢŁæ¬¾äºº":116806,"ç§Ģ丽":116807,"è®ĵä»ĸ":116808,"严åİīæīĵåĩ»":116809,"è³ŀ":116810,"æļ«":116811,"çħ¤æ°Ķ":116812,"çάä¸Ĭ":116813,"æ½ĩæ´Ĵ":116814,"太ä¹ħ":116815,"åij½åIJį为":116816,"è·¯çͱ":116817,"è·¯çͱåύ":116818,"驯":116819,"æıIJæĹ©":116820,"æĬĹåĩ»çĸ«æĥħ":116821,"åĩĽ":116822,"交åıĭ":116823,"éĶĢåĶ®æ¸łéģĵ":116824,"毫ä¸įçĬ¹è±«":116825,"èIJ¥åľ°":116826,"çłĶ究表æĺİ":116827,"鱼类":116828,"æį¢å±Ĭ":116829,"æİ¡åıĸ":116830,"çīĨ":116831,"缼å¼Ģ":116832,"æ²§æ¡ij":116833,"åºŃ审":116834,"ç»ıæŁ¥":116835,"åĬłå¼·":116836,"缸æ¯Ķäºİ":116837,"ä¸ĵçıŃ":116838,"ä½ĵåŀĭ":116839,"被害":116840,"被害人":116841,"æĶ¶æ¬¾":116842,"åħ·æľīèī¯å¥½":116843,"é«ĺå³°æľŁ":116844,"åģıä½İ":116845,"åĦŁ":116846,"åĨľä¸ļç§ijæĬĢ":116847,"ç®ĬæĥħåĨµ":116848,"å¦Ĥæŀľçݩ家":116849,"éķ¿çº¦":116850,"第åħŃå±Ĭ":116851,"åħ¬å¼ĢæĭĽèģĺ":116852,"åĪĩæĸŃ":116853,"迫使":116854,"çĸĹç¨ĭ":116855,"第äºĮç§į":116856,"ä¸įåħį":116857,"å¹²èѦ":116858,"çŁ³æ¦´":116859,"åĹ£":116860,"两类":116861,"çε士":116862,"åŁİ乡å±ħæ°ij":116863,"æŃ¤é¡¹":116864,"缴è¾ĸ":116865,"缴è¾ĸå¸Ĥ":116866,"åij¼åºĶ":116867,"éĴ¯":116868,"ç¦ıå¾·":116869,"æľºèº«":116870,"æĵįåľº":116871,"æ¿Ĵ临":116872,"人群ä¸Ń":116873,"èĤ¡æ°ij":116874,"åѽ":116875,"æ³ķåħ°":116876,"é¨İ":116877,"糯米":116878,"æĢ»çļĦ":116879,"æĢ»çļĦæĿ¥è¯´":116880,"åħ¸éĽħ":116881,"æĸ°éĻĪ":116882,"æĸ°éĻĪ代谢":116883,"缮çĿ¹":116884,"é¢Ħè¨Ģ":116885,"è·Įçł´":116886,"æĸ°ç¯ĩ竳":116887,"æ¯ĴæĢ§":116888,"åĸĿèĮ¶":116889,"æŁ¥èİ·":116890,"亮丽":116891,"çĶŁäº§åķĨ":116892,"æĶ¹æĪIJ":116893,"为äºĨæĽ´å¥½":116894,"深交":116895,"深交æīĢ":116896,"æİĥ":116897,"ä¹ĻèĤĿ":116898,"泸å·ŀ":116899,"åħĪè¿ĽæĬĢæľ¯":116900,"è¾ĵç»Ļ":116901,"æķ£æĪ·":116902,"æĢĿç»´æĸ¹å¼ı":116903,"åºĹ主":116904,"è°ĭæ±Ĥ":116905,"游æĪıæĬĢå·§":116906,"ä¸Ģ年级":116907,"çľ¼è§Ĵ":116908,"ä¸Ńä»ĭæľºæŀĦ":116909,"å·§åIJĪ":116910,"éĺ²çĽĹ":116911,"导è´Ń":116912,"æĪĬ":116913,"æĽ´éĢĤåIJĪ":116914,"åŁºæľ¬ä¿¡æģ¯":116915,"马ä¸ģ":116916,"åħ»æ®ĸåľº":116917,"åıįè¿ĩæĿ¥":116918,"æİ¨å´ĩ":116919,"å¯ĨåĪĩåħ³æ³¨":116920,"åŁºéĩijç»ıçIJĨ":116921,"æĮīéĶ®":116922,"åĨħéĥ¨æİ§åζ":116923,"æĪIJåijĺåįķä½į":116924,"æľ¯è¯Ń":116925,"åζæľį":116926,"åĪļéľĢ":116927,"æ£Ģç´¢":116928,"大大æıIJé«ĺ":116929,"åģ¥åº·ç®¡çIJĨ":116930,"èĩªæŃ¤":116931,"客æĪ·éľĢæ±Ĥ":116932,"丰èĥ¸":116933,"èµ·éĩį":116934,"èµ·éĩįæľº":116935,"æ¬łç¼º":116936,"æ¡ĪåŃIJ":116937,"æĥħ人èĬĤ":116938,"åħļæł¡":116939,"è¢ľ":116940,"该åī§":116941,"è¿·å¤±ä¼łå¥ĩ":116942,"ç»ļ丽":116943,"åķª":116944,"æĹłç§ģ":116945,"é̲ä¸ĢæŃ¥":116946,"第ä¸Ģ竳":116947,"åύåħ·":116948,"åĨľèµĦ":116949,"確實":116950,"åºıåĪĹ":116951,"娱ä¹IJå¹³åı°":116952,"èŀįèµĦç§Łèµģ":116953,"èµĦæºIJåħ±äº«":116954,"èģ½åΰ":116955,"æIJŀå¾Ĺ":116956,"ç»§ç»Ńä¿ĿæĮģ":116957,"åIJ¯èĴĻ":116958,"çľº":116959,"ä¸Ŀè·¯":116960,"设æĸ½å»ºè®¾":116961,"æİ¥åľ°":116962,"æİ¥åľ°æ°Ķ":116963,"第ä¸īåŃ£åº¦":116964,"åŁºè°ĥ":116965,"åıijéŁ³":116966,"社ä¼ļèµĦæľ¬":116967,"éĽĩ主":116968,"è¿ŀèĥľ":116969,"没åķ¥":116970,"廢":116971,"èµ¶èµ´":116972,"æ¼ĶåĮĸ":116973,"åı¤æĢª":116974,"çİĭçĪ·":116975,"é¢ĦåħĪ":116976,"å¼Ģåħ·":116977,"åĽŀé¦ĸ":116978,"åľ°ä¸ĭæ°´":116979,"å°ıç¼ĸä¸Ģèµ·":116980,"èµİåĽŀ":116981,"åľ°è²Į":116982,"åĪĿä¸ī":116983,"åı¯ç͍äºİ":116984,"éģĹ迹":116985,"è¿Ļæī¹":116986,"èĸªæ°´":116987,"å¿ħçĦ¶ä¼ļ":116988,"æ²½":116989,"éįĭ":116990,"第ä¸Ģéĥ¨":116991,"åĪĬçī©":116992,"å®ŀä¾ĭ":116993,"æ¸ħåĩĢ":116994,"ä¸ĬèµĽåŃ£":116995,"åĽ¾è¡¨":116996,"éĤ®è½®":116997,"åĵªè£¡":116998,"缸è§ģ":116999,"æī°ä¹±":117000,"æ¯ıæ¯ı":117001,"è¿Ļè¾ĪåŃIJ":117002,"ç¡«éħ¸":117003,"äºī缸":117004,"溯æºIJ":117005,"åĩºä¼Ĺ":117006,"çİīçŁ³":117007,"åħ±çĶŁ":117008,"æĹ¶éĹ´æ®µ":117009,"éĩįè¦ģæĮĩ示":117010,"æ¶Īè´¹éľĢæ±Ĥ":117011,"éķ¿éķ¿":117012,"éķ¿éķ¿çļĦ":117013,"å®īæĬļ":117014,"å¢ŀé«ĺ":117015,"æľ¬è½®":117016,"äº²çľ¼":117017,"é£İæ³¢":117018,"èĢģå¦Ī":117019,"æĶ¶è´¹æłĩåĩĨ":117020,"åĨħéĻĨ":117021,"æĮ¥åıij":117022,"åįĩåѦ":117023,"èĥ¸åīį":117024,"åģıè¿ľ":117025,"纯æ´ģ":117026,"æĸ½å·¥åįķä½į":117027,"身价":117028,"è´¢åĬĽ":117029,"纶":117030,"è£ħçͲ":117031,"æĺ¾ç¤ºåύ":117032,"毫åįĩ":117033,"æ·±çŁ¥":117034,"è̶ç©":117035,"è̶ç©Į":117036,"è¾ĥéĩı":117037,"åľ¨è¿ĩ渡":117038,"åľ¨è¿ĩæ¸¡æľŁ":117039,"èĮĹ":117040,"ä¸Ģ个æĺŁæľŁ":117041,"èĬ·":117042,"è´¿èµĤ":117043,"æ¿ķ":117044,"æĩĤäºĭ":117045,"ç§§":117046,"åħħå½ĵ":117047,"åĽ½ç«ĭ":117048,"èĬ±çĵ£":117049,"éĤĦè¦ģ":117050,"åħ¬åľĴ":117051,"触åĬ¨":117052,"æ³°å·ŀ":117053,"ä»Ģä¹Īæł·":117054,"æ»ĭåħ»":117055,"è¯ĦåΤ":117056,"æĮ¥æīĭ":117057,"èĦĪ":117058,"姥姥":117059,"è¿IJè´¹":117060,"æ¯ħåĬĽ":117061,"å¿ĥæĻº":117062,"ä¸įæİĴéϤ":117063,"第ä¸ī代":117064,"éĢĢè´§":117065,"æĺŁéĻħ":117066,"æ°¸åĪ©":117067,"æĬ¤åį«":117068,"çıŃ车":117069,"è¨Ģè¡Į":117070,"繪":117071,"主åĬ¨æĢ§":117072,"å·¥ç¨ĭè´¨éĩı":117073,"éĥĬåĮº":117074,"ä¸Ģæłĭ":117075,"ä½Ĩå®ŀéĻħä¸Ĭ":117076,"ä¸ī大èģĮä¸ļ":117077,"åij¼åı«":117078,"女åħĴ":117079,"è¯ģåΏæĬķèµĦ":117080,"èĢĥæħ®":117081,"çĤ«èĢĢ":117082,"治好":117083,"åĺ¶":117084,"èĥ¤":117085,"åħīä¼ıåıijç͵":117086,"åĩłæŃ¥":117087,"æīĢæīĢ":117088,"æīĢæīĢéķ¿":117089,"çħ§æł·":117090,"åĵ¥ä»¬":117091,"è¯Ľ":117092,"è¿Ļä¸ĢåĪ»":117093,"çŁ¿çī©è´¨":117094,"ä¸įå¾Ĺå·²":117095,"åIJĮ缣":117096,"ç»Ĩå¾®":117097,"è·¯èĻİ":117098,"çϾèĬ±":117099,"æ··æ²Į":117100,"ä¸Ĭæµ·è¯ģåΏ":117101,"éĢĢç¨İ":117102,"èµŀåı¹":117103,"æī®æ¼Ķ游æĪı":117104,"åIJįåĪĹ":117105,"åIJįåĪĹåīį":117106,"åIJįåĪĹåīįèĮħ":117107,"ç±³å°Ķ":117108,"ä»Ģä¹ĪåİŁåĽł":117109,"å®īåħ¨ä¿Ŀéļľ":117110,"ä¸Ģåıªæīĭ":117111,"ä¹³ä¸ļ":117112,"ä¸įçĶĺ":117113,"æĥħåķĨ":117114,"æĮ¡ä½ı":117115,"åİŁåĽłä¹ĭä¸Ģ":117116,"è¿Ļ两天":117117,"çĥĺçĦĻ":117118,"豬":117119,"ä½łä»¥ä¸º":117120,"没è§ģè¿ĩ":117121,"åĵªå®¶å¥½":117122,"åīįä»»":117123,"è¿Ľè´§":117124,"éĢĢåĽŀ":117125,"串èģĶ":117126,"èĩ³æĸ¼":117127,"åĨ°æ·ĩ":117128,"åĨ°æ·ĩæ·ĭ":117129,"æŁ¥çľĭ详æĥħ":117130,"çı¾å¯¦":117131,"æİ¨æµĭ":117132,"æİ¥æīĭ":117133,"éļ¶å±ŀäºİ":117134,"åŁİå¸Ĥ群":117135,"æĿİåħĪçĶŁ":117136,"çŁ¿æ³īæ°´":117137,"çī¹ä»·":117138,"æĽ´å¤ļ精彩":117139,"ç¨ĭå¼ı":117140,"读æĩĤ":117141,"å±ıèͽ":117142,"奥æŀĹ":117143,"奥æŀĹåĮ¹":117144,"奥æŀĹåĮ¹åħĭ":117145,"红èĸ¯":117146,"奮":117147,"å®Ŀçİī":117148,"網絡":117149,"è²§":117150,"欧å¼ı":117151,"çϽç³ĸ":117152,"èĩªçĦ¶çģ¾å®³":117153,"åijĬè¯ī她":117154,"å»ļ":117155,"çĤ¹åĩ»æŁ¥çľĭ":117156,"é£İ湿":117157,"èµĦ产éĩįç»Ħ":117158,"ä¹Łä¸įä¾ĭå¤ĸ":117159,"åįĬ个å°ıæĹ¶":117160,"åIJ¸å¼ķæĽ´å¤ļ":117161,"æĹ¶éĹ´èĬĤçĤ¹":117162,"æĶ¶çº³":117163,"åIJ¸æ¯Ĵ":117164,"èĢģ乡":117165,"çIJħ":117166,"æľĢçµĤ":117167,"åıįæĦŁ":117168,"çĶ¨å¾®ä¿¡":117169,"çĶ¨å¾®ä¿¡æī«":117170,"éĢŁçİĩ":117171,"大çĨĬçĮ«":117172,"åı¯æĥ³":117173,"åı¯æĥ³èĢĮ":117174,"åı¯æĥ³èĢĮçŁ¥":117175,"åĴ§":117176,"èµ°åħ¥":117177,"碳éħ¸":117178,"èĮĥåĨ°":117179,"èĮĥåĨ°åĨ°":117180,"被åΤ":117181,"积æŀģæİ¨åĬ¨":117182,"足足":117183,"ç²ĴåŃIJ":117184,"大å®Ĺ":117185,"大å®ĹåķĨåĵģ":117186,"ç½ij绾ç§ijæĬĢ":117187,"æĽ¼åŁİ":117188,"å·²ä¹ħ":117189,"å·²ä¹ħçļĦ":117190,"秦çļĩ":117191,"秦çļĩå²Ľ":117192,"ä»»æķĻ":117193,"å͝ç¾İ":117194,"æ·¡åĮĸ":117195,"æ¡ĤèĬ±":117196,"çŁ¥è¯ĨåĪĨåŃIJ":117197,"æĩĴå¾Ĺ":117198,"主åħ¬":117199,"设计çIJĨ念":117200,"賺":117201,"æīĢæıIJä¾Ľ":117202,"æīĢæıIJä¾Ľä¹ĭ":117203,"æĶ»åħĭ":117204,"åĤ¾":117205,"è¯Ńæ³ķ":117206,"åįĥåı¤":117207,"éĸĭæĶ¾":117208,"第ä¸ĢèĬĤ":117209,"éĤĦæ²Ĵ":117210,"éĢĥçĶŁ":117211,"æ³Ĺ":117212,"åİ¿å§Ķ书记":117213,"ä½ľèĢħæīĢæľī":117214,"çħ½":117215,"ç»ħ":117216,"æłħ":117217,"æľ´ç´ł":117218,"çijķçĸµ":117219,"åĮħåĮħ":117220,"æ°ij主åħļ":117221,"ä¸įè¿ľå¤Ħ":117222,"å¥ĩå¼Ĥ":117223,"åĺ»åĺ»":117224,"æī¼":117225,"ç¿»å¼Ģ":117226,"æĢİèĥ½":117227,"éģ´éĢī":117228,"è§£éĩĭ":117229,"å¹¼ç¨ļ":117230,"è¦ģ好好":117231,"è¶´åľ¨":117232,"ç´¢åıĸ":117233,"ç»ĪçĶŁ":117234,"åħ¨æµģç¨ĭ":117235,"éģ©çķ¶":117236,"åįıè°ĥåıijå±ķ":117237,"æĬ¥ä»ĩ":117238,"ç§ijæĬĢåĽŃ":117239,"ä»Ģä¹Īéĥ½ä¸į":117240,"æľĢåIJİä¸Ģ次":117241,"ç»Ļ人ä¸Ģç§į":117242,"æł¸å®ļ":117243,"被åĪĹåħ¥":117244,"æĦıæĥ³ä¸įåΰ":117245,"èĢĥæŁ¥":117246,"åľ¨æŃ¤ä¹ĭåīį":117247,"æīĵçIJĥ":117248,"è¶ĬæĿ¥è¶Ĭå°ij":117249,"å®ļå¾ĭ":117250,"è¡ĮæĶ¿æľºåħ³":117251,"ä½ıæĪ¿åħ¬ç§¯":117252,"å°ıå§IJå§IJ":117253,"ä¸īèı±":117254,"修补":117255,"èŀĥèŁ¹":117256,"西çͲ":117257,"æĢł":117258,"çŃīå¤ļ项":117259,"产ä¸ļéĽĨèģļ":117260,"ä»·æł¼ä¸Ĭ涨":117261,"åħ¬åħ±åľºæīĢ":117262,"è¢ĭåŃIJ":117263,"æĨ§æĨ¬":117264,"çļĦæĸ¹å¼ıæĿ¥":117265,"åĪ°è´¦":117266,"çģ½":117267,"å·´èı²":117268,"å·´èı²çī¹":117269,"æ¼Ķä¹ł":117270,"èŃ¦ç¤ºæķĻèĤ²":117271,"çķıæĥ§":117272,"å¼ķæµģ":117273,"æĶ¶æĶ¯":117274,"å±Ĥåĩº":117275,"å±Ĥåĩºä¸į":117276,"å±Ĥåĩºä¸įç©·":117277,"æijĩæ»ļ":117278,"辦çIJĨ":117279,"纵è§Ĥ":117280,"æķijæµİ":117281,"å®¶éĥ½çŁ¥éģĵ":117282,"åĮ¯":117283,"å°ı鸣":117284,"ä»»åĭĻ":117285,"计åħ¥":117286,"ç«ŀéĢī":117287,"å¼ĢèįĴæĹ¶æľŁ":117288,"åij¨æģ©":117289,"åij¨æģ©æĿ¥":117290,"交ç»ĩ":117291,"çķ¢æ¥Ń":117292,"æł¹æį®èĩªå·±":117293,"æĸ°äººçݩ家":117294,"åѵåĮĸåύ":117295,"éĩĩæļĸ":117296,"å¹³åĿĩæ°´å¹³":117297,"åħ¬å¼Ģ课":117298,"失åĪ©":117299,"伺æľį":117300,"çĬģ":117301,"忽æĤł":117302,"主è¦ģéĽĨä¸Ń":117303,"æ¤įæłij":117304,"æ¯ĹéĤ»":117305,"èĩºçģ£":117306,"åĩºåĽ½çķĻåѦ":117307,"æĬĹéľĩ":117308,"æĥ©æĪĴ":117309,"å¹´åºķåīį":117310,"åĴ¸éĺ³":117311,"æ°ijå±ħ":117312,"大çIJĨçŁ³":117313,"éĿ³":117314,"éķĸ":117315,"æ¸ħè¿ľ":117316,"è£ħè½½":117317,"èĩĢ":117318,"å½±ä¸ļ":117319,"å¼ŁåħĦ":117320,"æĤ²è§Ĥ":117321,"çĿĢçľ¼äºİ":117322,"æįįåį«":117323,"åī¥å¤º":117324,"ç¯Ĩ":117325,"å¾Īéķ¿æĹ¶éĹ´":117326,"è¥Ł":117327,"第ä¸ĢçϾ":117328,"ä¸ĢåĪĨéĴ±":117329,"æĸ°éĹ»è®°èĢħ":117330,"éķ·æľŁ":117331,"æ³ķæĪĺç»ĦåIJĪ":117332,"è°ģçŁ¥éģĵ":117333,"èħ°éĥ¨":117334,"æ±īåł¡":117335,"åħ¥çĿ¡":117336,"åįĸæİī":117337,"æ¶Īè²»èĢħ":117338,"æĥ¯ä¾ĭ":117339,"æĥ³äºĨ":117340,"æĥ³äºĨæĥ³":117341,"èĢģæĹ§å°ıåĮº":117342,"ä¼łè¨Ģ":117343,"åĪĨæķ°çº¿":117344,"æµģ泪":117345,"ç»Ħç»ĩé¢Ĩ导":117346,"äºļåĨĽ":117347,"å¢ŀå̼æľįåĬ¡":117348,"å¾¹":117349,"ä¼¶":117350,"äºĽè®¸":117351,"å¸ĥèݱ":117352,"强æĤį":117353,"宫廷":117354,"绿èĮ¶":117355,"åĮ¡":117356,"å¾ĪæŃ£å¸¸":117357,"æĺ¥å¤ı":117358,"æ¯Ļ":117359,"è¯Ħæ¯Ķ":117360,"åĩ¡äºĭ":117361,"æĬīæĭ©":117362,"åĢĴéľī":117363,"éĩį度":117364,"åįıä¼ļä¼ļéķ¿":117365,"å¿§èĻij":117366,"ä¸ĭä¸Ģç¯ĩ":117367,"沪深":117368,"æĪİ":117369,"æīĵä»Ĺ":117370,"åįĪé¥Ń":117371,"å¹´é¾Ħ段":117372,"ä¸ŃåĽ½è¶³çIJĥ":117373,"设计æĸ¹æ¡Ī":117374,"åºĶçĶ¨æŁ¥çľĭ":117375,"é¢ĦæĸĻ":117376,"åĹ¡":117377,"ç¥ĸçζ":117378,"çļĦä¸Ģåijĺ":117379,"æ´Ĺå¹²åĩĢ":117380,"åİĨåı²æĸ°":117381,"åİĨåı²æĸ°é«ĺ":117382,"çĭ¬åħ·":117383,"æħĭ度":117384,"æīĵ交":117385,"æīĵ交éģĵ":117386,"é»ĦçŁ³":117387,"çĽ¼æľĽ":117388,"çī§åľº":117389,"转弯":117390,"åįĩåįİ":117391,"åĨįä¹Łæ²¡æľī":117392,"èĭ±æīį":117393,"æĽ´åIJį为":117394,"åĢŁç͍":117395,"çºłéĶĻ":117396,"ç»Ŀ对ä¸įä¼ļ":117397,"çİĭçīĮ":117398,"çĽĨåľ°":117399,"失è°ĥ":117400,"好象":117401,"é³¥":117402,"ä¿Ŀä¿®":117403,"åĽĽä¸ªèĩªä¿¡":117404,"头çļ®":117405,"åİŁåīĩ":117406,"æĬ¥æ¡Ī":117407,"奴éļ¶":117408,"å³Ļ":117409,"è°ĥæĸĻ":117410,"ä¹Łè¨±":117411,"èIJ½åΰ":117412,"èIJ½åΰå®ŀ":117413,"èIJ½åΰå®ŀå¤Ħ":117414,"çĦļçĥ§":117415,"çĶŁæ´»çݯå¢ĥ":117416,"åºĶåıĬæĹ¶":117417,"è¶Ĭè¿ĩ":117418,"æĦŁè¬Ŀ":117419,"æĻ¯å¾·":117420,"æĻ¯å¾·éķĩ":117421,"çĬĢ":117422,"身éĤĬ":117423,"ç¨İåĬ¡æĢ»å±Ģ":117424,"åĩĢåľŁ":117425,"ä¾µåįł":117426,"åĬ¨å·¥":117427,"å¹´ä¹ĭ":117428,"å¹´ä¹ĭä¹ħ":117429,"第äºĮèĬĤ":117430,"åĬ¨çī©åĽŃ":117431,"第ä¸Ģ书记":117432,"éħļ":117433,"çĶŁäº§è®¾å¤ĩ":117434,"æŁIJç§įç¨ĭ度":117435,"åľŃ":117436,"åĩŃåĢŁçĿĢ":117437,"éĺħè§Ī":117438,"çϽæ²Ļ":117439,"æ²¹çĥŁ":117440,"çªģçł´åı£":117441,"åıĹå½±åĵį":117442,"åı¯ä»¥æĽ´å¥½":117443,"å³°å̼":117444,"æĿĤè´¨":117445,"宿è¿ģ":117446,"çĽĺæ´»":117447,"æ¿Ģèµ·":117448,"åĦ¿ç§ij":117449,"åĿIJèIJ½åľ¨":117450,"æĮªå¨ģ":117451,"æµ·å²Ľ":117452,"绣绣":117453,"éύ":117454,"ä¼ĺäºİ":117455,"å°Īå®¶":117456,"ä¸ĢéĤĬ":117457,"èIJĬ":117458,"äºĨä¸Ģåı£":117459,"æ²ĥå°Ķæ²ĥ":117460,"æŃ£å¸¸ä½¿ç͍":117461,"æĻ®éģįåŃĺåľ¨":117462,"丰满":117463,"çĶ»åį·":117464,"åºĶæĶ¶":117465,"åºĶæĶ¶è´¦":117466,"åºĶæĶ¶è´¦æ¬¾":117467,"å®Įæķ´çĥŃ":117468,"å®Įæķ´çĥŃæ¦ľ":117469,"注è§Ĩ":117470,"çĨĦ":117471,"躬":117472,"éĶĢåĶ®äººåijĺ":117473,"è¶ĭåIJij":117474,"çĦ¦æĢ¥":117475,"åįģå¹´åīį":117476,"ä¼łç»Łäº§ä¸ļ":117477,"質éĩı":117478,"åĩ¤åĩ°ç½ij":117479,"èµĦæºIJæķ´åIJĪ":117480,"æ¶Įåħ¥":117481,"æĸĩåĮĸä¼łæĴŃ":117482,"çķĮ第ä¸Ģ":117483,"æ°´æ³µ":117484,"宫殿":117485,"æİ¢å¯»":117486,"ä¿®åīª":117487,"æĦıè¦ĭ":117488,"ç´Ĭä¹±":117489,"æĽī":117490,"çĻ½è¡£":117491,"èĻİåį«":117492,"ç´§æī£":117493,"å¤Ħå¤Ħéķ¿":117494,"åĪĽå»ºå·¥ä½ľ":117495,"红æŀ£":117496,"饼干":117497,"äºĨåįĬ天":117498,"ä¼ļå½±åĵįåΰ":117499,"çĽ¸ä¿¡å¤§å®¶":117500,"èħ¾é£ŀ":117501,"å°±å¦ĤåIJĮ":117502,"ä¸ĭéĿ¢å°ıç¼ĸ":117503,"æ°ijèIJ¥ç»ıæµİ":117504,"æĻ¦":117505,"è£ħæī®":117506,"é»ijå¤ľ":117507,"常德":117508,"å·¥ä¸ļ大åѦ":117509,"æĺİçŁ¥":117510,"éĺŁåijĺ们":117511,"åIJ¬è¯¾":117512,"æ¯ıéļĶ":117513,"羣æĺ¯å¤ª":117514,"åIJĪä½ľåħ±èµ¢":117515,"çIJĨåıij":117516,"æīįå¹²":117517,"çľĭèµ·ä¾Ĩ":117518,"殿ä¸ĭ":117519,"å®īéĺ³":117520,"æīĢ产çĶŁçļĦ":117521,"éĽĩä½£":117522,"æĬ¬èµ·å¤´":117523,"æį®æĬ¥éģĵ":117524,"éļĨéĩį举è¡Į":117525,"交éĶĻ":117526,"è¶ħé¢Ŀ":117527,"åĮĸçĸĹ":117528,"é¡Ĩ":117529,"纵深":117530,"çĪ±åĽ½ä¸»ä¹ī":117531,"éĻ¢åī¯éĻ¢éķ¿":117532,"讳":117533,"羣æŃ£åģļåΰ":117534,"åѤåįķ":117535,"èĩªçĦ¶èĢĮ":117536,"èĩªçĦ¶èĢĮçĦ¶":117537,"修身":117538,"èĬ¹":117539,"æģ¯æģ¯":117540,"æģ¯æģ¯çĽ¸åħ³":117541,"é©¾æł¡":117542,"æİ©é¥°":117543,"æ³½è¿ŀ":117544,"æ³½è¿ŀæĸ¯åŁº":117545,"举æŃ¢":117546,"管çIJĨä½ĵåζ":117547,"åħ¶ä¸Ńä¹ĭä¸Ģ":117548,"æĿ¾å¼Ľ":117549,"æĭ¦æĪª":117550,"åį«åģ¥":117551,"åį«åģ¥å§Ķ":117552,"ä»İåݻ年":117553,"åĤ¢":117554,"è´Ń票":117555,"åĽ¾æłĩ":117556,"河西":117557,"æ°ijæĶ¿å±Ģ":117558,"ç§ģèIJ¥":117559,"å¤ĸåĽ½è¯Ń":117560,"干货":117561,"æĵ¦æĭŃ":117562,"åľ°ä¸Ń":117563,"åľ°ä¸Ńæµ·":117564,"æµĵæµĵ":117565,"æµĵæµĵçļĦ":117566,"å§ĭ建":117567,"å§ĭ建äºİ":117568,"ç¶ĵæŃ·":117569,"è·¯æ¼Ķ":117570,"æļ´é£İ":117571,"åŁºè¾ħ":117572,"æī¶è´«å·¥ä½ľ":117573,"ä¸Ģ缴å¤Ħäºİ":117574,"æĥħè¶£":117575,"äºĮåŃ£åº¦":117576,"åİĮæģ¶":117577,"顺åĪ©å®ĮæĪIJ":117578,"æŁ¥å°ģ":117579,"顶端":117580,"ä¸įåŃķ":117581,"ä¸Ģ大åłĨ":117582,"被æ·ĺæ±°":117583,"æĺ¯ç͍æĿ¥":117584,"æľĢåIJĪéĢĤ":117585,"äº®çľ¼":117586,"å¹¶ä¸įæĺ¯å¾Ī":117587,"ç§ijçłĶéĻ¢":117588,"ç§ijçłĶéĻ¢æīĢ":117589,"ç²Ł":117590,"é¢Īéĥ¨":117591,"é»ĺé»ĺåľ°":117592,"é«ĺä¸ŃçĶŁ":117593,"æĹıèĩªæ²»åİ¿":117594,"æķĻåŃ¦è´¨éĩı":117595,"æĪĺçģ«":117596,"åĿİåĿ·":117597,"æIJŃä¹ĺ":117598,"è¯ĹæĦı":117599,"åĪijèѦ":117600,"åĩºæ±Ĺ":117601,"åįģåħŃæĿ¡":117602,"请åıĬæĹ¶":117603,"åĨľä¸ļ大åѦ":117604,"èIJ½åı¶":117605,"æĢ»èĢĮè¨Ģ":117606,"æĢ»èĢĮè¨Ģä¹ĭ":117607,"æĿľåħ°":117608,"æĿľåħ°çī¹":117609,"éĻªä½ł":117610,"åħ¬æĬ¥":117611,"çķĻè¨ĢæĿ¿":117612,"éĺħåİĨ":117613,"ç«¶çĪŃ":117614,"ç»ĻåĪ«äºº":117615,"æĹ¥æĬ¥ç¤¾":117616,"åĿIJèIJ½":117617,"åĿIJèIJ½äºİ":117618,"éĩijåŃĹ":117619,"éĩijåŃĹå¡Ķ":117620,"åĽ¤":117621,"è¯Ŀåī§":117622,"æĮģç»Ńæİ¨è¿Ľ":117623,"æ¼ıæ°´":117624,"詳細":117625,"æĢĢæĬ±":117626,"åıĺå¹»":117627,"饥饿":117628,"éļIJ身":117629,"ä¸ªèµĽåŃ£":117630,"åĵ¡å·¥":117631,"æģ¢å¤įæŃ£å¸¸":117632,"äºĨ好å¤ļ":117633,"æĺŁå·´":117634,"æĺŁå·´åħĭ":117635,"åħīçݯ":117636,"å¸ħåĵ¥":117637,"çĻ½éĽª":117638,"ç¨įç¨į":117639,"计æıIJ":117640,"æĦĽæĥħ":117641,"éİĸ":117642,"ä¿¡éĺ³":117643,"è§Ģå¯Ł":117644,"å¦Ĥæŀľä½łæĥ³":117645,"缸æ¯Ķä¹ĭä¸ĭ":117646,"è§£å¼Ģ":117647,"æīĵåį°æľº":117648,"身躯":117649,"ç²¾ç¥ŀæĸĩæĺİ":117650,"èĤ¡æĮĩ":117651,"å¾®åĪĽ":117652,"红èĮ¶":117653,"èĩ´çĻĮ":117654,"æģ©æĸ½":117655,"èħ¿éĥ¨":117656,"大åŀĭå¤ļ人":117657,"å®īåĢį":117658,"è¾ħ导åijĺ":117659,"èĪªéģĵ":117660,"å¸ĥå°Ķ":117661,"åįĹå®ģå¸Ĥ":117662,"ä¸ĬçıŃæĹı":117663,"ä¾§ç»ĵæŀĦæĢ§":117664,"追éļı":117665,"å½ĵåľ°æĶ¿åºľ":117666,"èµ°åĩºæĿ¥":117667,"éĩijèŀįä¸ļ":117668,"ä¸Ľä¹¦":117669,"é¡¹çĽ®ç»ıçIJĨ":117670,"è¿ĩæĪ·":117671,"骨æŀ¶":117672,"è¡Ļ":117673,"ä»Ģ麽":117674,"èħĭ":117675,"è¦ģ害":117676,"åľ¨åºĬä¸Ĭ":117677,"代è¨Ģ人":117678,"並å°ĩ":117679,"åIJĦ个æĸ¹éĿ¢":117680,"è°´è´£":117681,"åħ±æĮ¯":117682,"åį³å°ĨåΰæĿ¥":117683,"èĤºçĻĮ":117684,"ä¾ĽéĶĢ":117685,"丼æŀĹ":117686,"èµĥ":117687,"åįģä½Ļå¹´":117688,"åĭĺæİ¢":117689,"飵åij³":117690,"èĭ¦ç¬ij":117691,"æľĢ大ç¨ĭ度":117692,"éĩįçĤ¹åħ³æ³¨":117693,"ä¹ĭ举":117694,"满æĢĢ":117695,"åıĹåΰ影åĵį":117696,"æĭĽæĬķæłĩ":117697,"è¡¥é½IJ":117698,"西红":117699,"è¥¿çº¢æŁ¿":117700,"鬧":117701,"è£ħåį¸":117702,"éĤ»éĩĮ":117703,"èĤĩäºĭ":117704,"æİĴæ¯Ĵ":117705,"åѤåĦ¿":117706,"鼶è·Ŀ离":117707,"å®ŀå¹²":117708,"çľĭæŁ¥çľĭ":117709,"æĶ¶è´¹ç«Ļ":117710,"ç»·":117711,"åħ¬çĽĬæĢ§":117712,"éĢĴç»Ļ":117713,"æĶ»æīĵ":117714,"æĺŁçº§éħĴåºĹ":117715,"æĺİåªļ":117716,"çį¨ç«ĭ":117717,"è¯Ŀè¯ŃæĿĥ":117718,"ä¸ĢæŃ¥ä¸ĢæŃ¥":117719,"书æ³ķå®¶":117720,"æľªç»ıæİĪæĿĥ":117721,"çŁ³èĨı":117722,"åĩŃä»Ģä¹Ī":117723,"çļĦæĹ¥":117724,"çļĦæĹ¥åŃIJéĩĮ":117725,"诱人":117726,"çϾåĪĨçϾ":117727,"èĪĪè¶£":117728,"å¼łåħĪçĶŁ":117729,"èĢģçĪ·åŃIJ":117730,"æ³¢çī¹":117731,"åŁºéĩij份é¢Ŀ":117732,"æ²Ļåıijä¸Ĭ":117733,"å¥ĭæĸĹ缮æłĩ":117734,"æ°¢èĥ½":117735,"æ²ĥå°ĶçİĽ":117736,"義åĭĻ":117737,"éŁ³ç®±":117738,"æ²ī浸":117739,"æ²īæµ¸åľ¨":117740,"èĭ±åľĭ":117741,"çģ¯çģ«":117742,"è¿Ľé¡¹":117743,"两端":117744,"ä¹Ķ丹":117745,"èĦ¸é¢Ĭ":117746,"åıijå±ķæ½ľåĬĽ":117747,"åĭķä½ľ":117748,"åĵĪä½Ľ":117749,"å®´ä¼ļ":117750,"æ§į":117751,"ç«ĭå¿Ĺ":117752,"ç¡ķ士åѦä½į":117753,"åĭĭ竳":117754,"è¿Ļåľºæ¯ĶèµĽ":117755,"æĮģå¹³":117756,"éķĢéĶĮ":117757,"èĭ±çī¹":117758,"èĭ±çī¹å°Ķ":117759,"æķĻèģĮå·¥":117760,"åĬŁåĬĽ":117761,"该æ¡Ī":117762,"ä¸Ģæ¢Ŀ":117763,"åĺīå¹´":117764,"åĺīå¹´åįİ":117765,"è¿«ä¸įåıĬ":117766,"è¿«ä¸įåıĬå¾ħ":117767,"è¿Ļ个æĹ¶ä»£":117768,"精彩æĴŃæĬ¥":117769,"人èĦ¸":117770,"人èĦ¸è¯ĨåĪ«":117771,"æ£Ģå¯Łå®ĺ":117772,"å°ıèħ¿":117773,"éĨĴ缮":117774,"åħļæĢ»":117775,"åħļæĢ»æĶ¯":117776,"æĪŁ":117777,"èĮ«çĦ¶":117778,"è±ĨæµĨ":117779,"主治":117780,"éĿĴæµ·çľģ":117781,"åĪijäºĭ责任":117782,"çł°":117783,"ä¹ĭæ¬ĬåĪ©":117784,"äºĶå®ĺ":117785,"è¿·æĥij":117786,"åħ¥åºĵ":117787,"家纺":117788,"弹簧":117789,"åįģäºĶæĿ¡":117790,"ç»Ļå®Ŀå®Ŀ":117791,"èĪªç©ºèĪªå¤©":117792,"å¾Ģå¤ĸ":117793,"å¼ķåĬĽ":117794,"çľ¼çļ®":117795,"æ¶īè¶³":117796,"æĿ¥å®¾":117797,"åľ¨çº¿è§Ĵèī²":117798,"çĥŃéĶĢ":117799,"æµģéĢĿ":117800,"泡泡":117801,"éĻįå¹ħ":117802,"è´ŁéĿ¢å½±åĵį":117803,"红楼":117804,"红楼梦":117805,"éļĶçĿĢ":117806,"侥幸":117807,"许ä¹ħ":117808,"åĴĮçĿ¦":117809,"èѽ":117810,"使ç͍èĢħæĪĸ":117811,"ä¹°åįķ":117812,"è¿´":117813,"é£İæīĩ":117814,"æķĻ師":117815,"æ¡ĮåŃIJä¸Ĭ":117816,"å¾Īæ¼Ĥ亮":117817,"åł±å°İ":117818,"第ä¸ĢåŃ£åº¦":117819,"ç©©å®ļ":117820,"æĤ²åĵĢ":117821,"çĿĢåĬĽæīĵéĢł":117822,"æĮŁ":117823,"路桥":117824,"åijIJ":117825,"åľ£è¯ŀèĬĤ":117826,"çļĩåŃIJ":117827,"ä»ĩæģ¨":117828,"éħĿéħ¿":117829,"ä¸įéĹ´":117830,"ä¸įéĹ´æĸŃ":117831,"æĮĩå°ĸ":117832,"ä¸ŃåĽ½ç½ij游":117833,"åŀ£":117834,"æĦıè§ģ建议":117835,"æ¯ħçĦ¶":117836,"亮度":117837,"èģĶè°Ĭ":117838,"å½ķåħ¥":117839,"åĦ²":117840,"å¨ĺå®¶":117841,"ç§ijå°Ķ":117842,"ä¹Łæ²¡ä»Ģä¹Ī":117843,"æł¹æį®ä¸įåIJĮ":117844,"åı¶ä¿®":117845,"å̼å®Ī":117846,"æľ«ç«¯":117847,"å΍":117848,"åĤµåĭĻ":117849,"èģ¯åIJĪ":117850,"å¥ĩå¹»":117851,"èĻļæŀĦ":117852,"é»Ħæĺı":117853,"å¹³åĿ¦":117854,"æµģæ°ĵ":117855,"æĸ°åŁºå»º":117856,"æĮ½æķij":117857,"åįİå°Ķ":117858,"åįİå°Ķè¡Ĺ":117859,"æľĢåıĹæ¬¢è¿İ":117860,"ç»Ń约":117861,"å¼Ĭ端":117862,"éŃĶæ³ķå¸Ī":117863,"éŃĶæ³ķå¸ĪåĴĮ":117864,"åħ·ä½ĵåĨħ容":117865,"çIJīçĴĥ":117866,"æī©å®¹":117867,"èĮ¶åĽŃ":117868,"主ä¹īèĢħ":117869,"ç«ĭéĿ¢":117870,"æİ¥åıĹéĩĩ访":117871,"åĩºåħ¥å¢ĥ":117872,"ç§ijåįı":117873,"éĴ³":117874,"çµIJæ§ĭ":117875,"ç»ĵæŀľæĺ¾ç¤º":117876,"åı°è´¦":117877,"å°±æĿ¥çľĭçľĭ":117878,"èĩªæķij":117879,"åıįæĩī":117880,"åİ»åĵªåĦ¿":117881,"è¿Ļé¦ĸ":117882,"è¿Ļé¦ĸæŃĮ":117883,"åIJ¬ä¼Ĺ":117884,"å¤ĸ壳":117885,"ä½ĵèĤ²é¦Ĩ":117886,"實æĸ½":117887,"èŀºä¸Ŀ":117888,"æĭīåįĩ":117889,"çĮĽåľ°":117890,"åħ¨åĽ½äººæ°ij":117891,"æĤīå°¼":117892,"æĹı群":117893,"åĽ¢åijĺ":117894,"两个å°ıæĹ¶":117895,"åľ¨çݩ家":117896,"åľ¨çݩ家ä¸Ń":117897,"çĶľçĶľ":117898,"æĬķè¡Į":117899,"åįĶæľĥ":117900,"éĻ¡":117901,"åĬłå·¥åİĤ":117902,"æ¦ĨæŀĹ":117903,"æŃ»è§Ĵ":117904,"åĨħå¹ķ":117905,"æīĢæľīæĥħèĬĤ":117906,"åĪ·åį¡":117907,"æ°´èĤ¿":117908,"èĥĥåı£":117909,"å«Įå¼ĥ":117910,"沮丧":117911,"ä¸ī年级":117912,"æ¶Ĥå±Ĥ":117913,"å¿ĥ仪":117914,"å¿ĥ仪çļĦ":117915,"å¤Ń":117916,"é¦ĸè½®":117917,"æĹłè®ºæĺ¯åħ¶":117918,"éĢıæ°Ķ":117919,"äºĮåįģäºĶ":117920,"箫":117921,"åĬŁåĬ³":117922,"çѾä¸ĭ":117923,"æ²īè¿·":117924,"æķijåij½":117925,"éĹªéĹª":117926,"åIJĥäºı":117927,"å±ķåĵģ":117928,"åį³æĹ¶åıijçĶŁ":117929,"ç¶ľ":117930,"ç¶ľåIJĪ":117931,"æłĩæĺİ":117932,"çľĭç͵影":117933,"åħ¬ç«ł":117934,"éĺ¿æ£®":117935,"éĺ¿æ£®çº³":117936,"身åĪĽéĢł":117937,"身åĪĽéĢłçļĦ":117938,"æ¸Ľå°ij":117939,"å̼å¾Ĺåħ³æ³¨":117940,"鼶åĶ®åķĨ":117941,"æįĨç»ij":117942,"è¸ıåħ¥":117943,"èĽŁ":117944,"æŁ´çº³":117945,"èĢģåħµ":117946,"绿èī²çݯä¿Ŀ":117947,"é¹Ń":117948,"éº»æľ¨":117949,"æıŃçīĮ":117950,"è¿Ļ款车":117951,"ç¾İå¾·":117952,"ç¾İå¾·åħ¬åı¸":117953,"æ¶§":117954,"è°ģçŁ¥":117955,"æ´ĭèij±":117956,"æ¯įæł¡":117957,"ä¸ĢéĹª":117958,"çͷ䏻è§Ĵ":117959,"æĹłçº¿ç͵":117960,"å±łå®°":117961,"æĺ¯éŁ©åĽ½":117962,"æĺ¯éŁ©åĽ½å¨±":117963,"容è²Į":117964,"åĿĩ使åħ¶":117965,"太快":117966,"å¹´çͱ":117967,"å¹´çĶ±çĽĽ":117968,"èĭ¦èĭ¦":117969,"åĬĽè¿ĺæĺ¯":117970,"åĬĽè¿ĺæĺ¯èĩª":117971,"æĨ©":117972,"èģ¯çµ¡":117973,"å;":117974,"åħ·æľīæĪĺ士":117975,"追éĹ®":117976,"åłĨæĶ¾":117977,"åıį驳":117978,"å®ŀäºĭæ±Ĥ":117979,"å®ŀäºĭæ±Ĥæĺ¯":117980,"åѸéĻ¢":117981,"åįģåĩłä¸ª":117982,"æķijæĬ¤":117983,"æķijæĬ¤è½¦":117984,"ç½ijç»ľä¼łæĴŃ":117985,"åįģåħ«å±Ĭ":117986,"éĥ¨åī¯":117987,"éĥ¨åī¯éĥ¨éķ¿":117988,"çĹ´è¿·":117989,"管çIJĨæĿ¡ä¾ĭ":117990,"èŀį为ä¸Ģä½ĵ":117991,"æĢ»äº§å̼":117992,"è³ĵ":117993,"ä¸ĥæĺŁ":117994,"çıŃç»Ħ":117995,"绣é¢Ĩ":117996,"请大家":117997,"éĩijéϵ":117998,"èĪħèĪħ":117999,"æµ·æ¹¾":118000,"æĸ½çŃĸ":118001,"享èªī":118002,"麥":118003,"端åįĪ":118004,"绿åŁİ":118005,"確ä¿Ŀ":118006,"å·´æĭī":118007,"åĨĴçĿĢ":118008,"æħ·æħ¨":118009,"个人è§ĤçĤ¹":118010,"ä¹Ļçĥ¯":118011,"ç¡ħè°·":118012,"éĸĭå±ķ":118013,"å°ļ书":118014,"åĿļ飧":118015,"庵":118016,"èĢģé¾Ħ":118017,"èĢģé¾ĦåĮĸ":118018,"çľ¨çľ¼":118019,"绿水":118020,"绿水éĿĴå±±":118021,"书é¦Ļ":118022,"主åĬĽåĨĽ":118023,"æīįæĺ¯çľŁæŃ£":118024,"æĬ¢åħĪ":118025,"æĪIJå°±æĦŁ":118026,"éĩįæŀĦ":118027,"éĴ¢åİĤ":118028,"æĪIJ份":118029,"èĬ±çº¹":118030,"ä¹ĭäºī":118031,"å¹²ç»Ĩèĥŀ":118032,"æĹ¢åı¯ä»¥":118033,"ç¹ģçIJIJ":118034,"æĦļèł¢":118035,"éĿŀ常æĺİæĺ¾":118036,"ä½ĵ彩":118037,"æĬĢæ³ķ":118038,"æĿĨèıĮ":118039,"å¹¿æ³Ľåħ³æ³¨":118040,"åĮĹå®ĭ":118041,"å§Ĭ妹":118042,"åįıåĬŀ":118043,"æ·®åįĹ":118044,"çĥı":118045,"æ´ĹèĦ¸":118046,"åıĹ访":118047,"åıĹ访èĢħ":118048,"éĩįè¦ģåĽłç´ł":118049,"å½±è§Ĩåī§":118050,"综èīºèĬĤ缮":118051,"èľķåıĺ":118052,"äºĮ线":118053,"äºĮ线åŁİå¸Ĥ":118054,"ä¼Ĭå§ĭ":118055,"çıĬçijļ":118056,"èĩªæŁ¥":118057,"åħ¥åĽŃ":118058,"åĩ¶æīĭ":118059,"åħ¬è¯ī":118060,"éģĩéļ¾":118061,"éĩĩçŁ¿çŃī":118062,"èĩªçIJĨ":118063,"åĸ·æ¶Ĥ":118064,"æī©åħħ":118065,"éĢıè§Ĩ":118066,"é«ĺéĢŁå¢ŀéķ¿":118067,"åĽ¾çĶ»":118068,"ç¾¹":118069,"èĤĩåºĨ":118070,"è¾ľè´Ł":118071,"èµĶä»ĺ":118072,"è·¡":118073,"åģ¥åº·æĪIJéķ¿":118074,"以ä¸ĬåѦåİĨ":118075,"åıĸå¾Ĺ以åıĬ":118076,"æ²ī积":118077,"åįģä¹Ŀå±Ĭ":118078,"缸éĹľæľįåĭĻ":118079,"æī§åĭ¤":118080,"åī¯åİ¿éķ¿":118081,"寰":118082,"åģľæ»ŀ":118083,"淹没":118084,"çŁ³çģ°":118085,"çį¸":118086,"å̦":118087,"ç¾İåªĴ":118088,"æķĻæ¡Ī":118089,"åĬłçĽĸ":118090,"åħ¬å¼ĢèµĽ":118091,"å¥łåŁº":118092,"æĺĨèĻ«":118093,"çŀħ":118094,"磷éħ¸":118095,"äºīåĪĽ":118096,"çİĭæĻĵ":118097,"ç¼ĵåĨ²":118098,"åİļåİļ":118099,"åİļåİļçļĦ":118100,"æŀ£åºĦ":118101,"ç²¾çĽĬ":118102,"ç²¾çĽĬæ±Ĥ":118103,"ç²¾çĽĬæ±Ĥç²¾":118104,"åĪĨæĶ¯æľºæŀĦ":118105,"å®ŀæĸ½ç»ĨåĪĻ":118106,"æĸ°èµĽåŃ£":118107,"總統":118108,"éĢłè¡Ģ":118109,"é¢ĩåħ·":118110,"é»ĦåŁĶ":118111,"è¡ĢèĦĤ":118112,"交éĢļå·¥åħ·":118113,"å³¥":118114,"æĹıèĩªæ²»å·ŀ":118115,"寺éĻ¢":118116,"確å®ļ":118117,"æ¦Ĥ念èĤ¡":118118,"æĦŁå®ĺ":118119,"æŁľåı°":118120,"åĶĶ":118121,"çŀŃ解並":118122,"æĢ»ä»·":118123,"åIJ¸åħ¥":118124,"æĢ¼":118125,"æĻļéĹ´":118126,"å±Ĭæ¯ķä¸ļçĶŁ":118127,"çĶŁå§ľ":118128,"éĺħ读åħ¨æĸĩ":118129,"å¾ĹåΰæľīæķĪ":118130,"æIJľæķij":118131,"åİĨæĿ¥":118132,"èŃīæĺİ":118133,"åĥ»":118134,"èĨ³é£Ł":118135,"åĦĦåħĥ":118136,"æīĵåİĭ":118137,"宾客":118138,"åķ¼":118139,"ä¸ĢçϾå¤ļ":118140,"æ·±åħ¥äººå¿ĥ":118141,"æ¢ħå·ŀ":118142,"çłĶåѦ":118143,"åħ³ä¹İ":118144,"è¼Ľ":118145,"亲åıĭ":118146,"éħįæĸĻ":118147,"æĪijçĪ±ä½ł":118148,"è´¸æĺĵæĪĺ":118149,"æľīèī²":118150,"æľīèī²éĩijå±ŀ":118151,"æįIJåĬ©":118152,"为é¦ĸ":118153,"为é¦ĸçļĦ":118154,"å¯ĮåĬĽ":118155,"çĶ·ç¥ŀ":118156,"é³³":118157,"æµĩæ°´":118158,"åIJ±":118159,"æĺİç¡®æıIJåĩº":118160,"åı¹äºĨ":118161,"åı¹äºĨåı£æ°Ķ":118162,"礼æĭľ":118163,"è¿Ļ个åIJįåŃĹ":118164,"ä¿¡å¾Ĵ":118165,"å¿Ĺ强":118166,"éĻIJæĹ¶":118167,"æĶ¶è²»":118168,"åĨľå®¶ä¹IJ":118169,"å°ıé¾ĻèϾ":118170,"èIJ½å¹ķ":118171,"æ§Ł":118172,"åѦ龸":118173,"æĪĸå¤ļ":118174,"æĪĸå¤ļæĪĸ":118175,"æĪĸå¤ļæĪĸå°ij":118176,"座è°Īä¼ļä¸Ĭ":118177,"æ¶¼":118178,"éŃĶçİĭ":118179,"å²±":118180,"é¡¶å±Ĥ":118181,"é¡¶å±Ĥ设计":118182,"èĦijåŃIJéĩĮ":118183,"éĻ¢åŃIJéĩĮ":118184,"轩è¾ķ":118185,"身å¿ĥåģ¥åº·":118186,"èħij":118187,"éĹľæ³¨":118188,"åıĤåĬłä¼ļè®®":118189,"ä¸ŃåįİæĸĩåĮĸ":118190,"追寻":118191,"å®īçĦ¶":118192,"é£Ļåįĩ":118193,"éŁŃèıľ":118194,"鸦":118195,"åĤ¨éĩı":118196,"çĶ·æĸ¹":118197,"å¤ĩ份":118198,"æijĶåĢĴ":118199,"润æ»ijæ²¹":118200,"é̼è¿ij":118201,"çͳè¯ī":118202,"鸣类":118203,"çŁ³æ²¹åĮĸå·¥":118204,"åĿļæŀľ":118205,"è¿Ļå®¶ä¼Ļ":118206,"æĭĴä¸į":118207,"羣çļ®":118208,"è·ĿéĽ¢":118209,"è¿ĺæĮº":118210,"éĽķåĥı":118211,"åĪĿæģĭ":118212,"æıIJä¾ĽæĽ´å¤ļ":118213,"æŁ¥çľĭåħ¨æĸĩ":118214,"æķ°åŃĹè´§å¸ģ":118215,"åĸīåĴĻ":118216,"åı¦ä¸Ģä½į":118217,"åĤ¬åĮĸ":118218,"åĤ¬åĮĸåīĤ":118219,"ä»İæĿ¥æ²¡":118220,"å¯ĨåĪĩ缸åħ³":118221,"éĥ¨ä¸»ä»»":118222,"产åĵģç»ıçIJĨ":118223,"並åIJĮæĦı":118224,"èIJ½åħ¥":118225,"å±ıå¹ķä¸Ĭ":118226,"åħ¬åı¸ç«łç¨ĭ":118227,"æį¢åı¥è¯Ŀ":118228,"æį¢åı¥è¯Ŀ说":118229,"ä½įæĸ¼":118230,"ä½Ķ":118231,"åĩ»æĿĢ":118232,"缸è¾ĥ":118233,"缸è¾ĥäºİ":118234,"ç²½åŃIJ":118235,"åįĹæŀģ":118236,"宫é¢Ī":118237,"è£ģåijĺ":118238,"æĺİç»Ĩ":118239,"ä»·å̼éĵ¾":118240,"åĽĽä¸ªæĸ¹éĿ¢":118241,"æĥħåĨµæĿ¥çľĭ":118242,"æĮijåīĶ":118243,"æ®ĺ":118244,"æŀģåĬĽ":118245,"çĸijéļ¾":118246,"æĬµæĬĹåĬĽ":118247,"æĢ¥éĢŁ":118248,"æĪĮ":118249,"ä½İä¼°":118250,"éĹªè¿ĩ":118251,"æģ¬":118252,"èµŀæī¬":118253,"ä»ĸå¦Ī":118254,"æĪIJ为ä¸ĢåIJį":118255,"æ´Ĺ礼":118256,"é¢Ħ计å°Ĩ":118257,"åħĪè¿Ľåįķä½į":118258,"è¼Ķ":118259,"éĢĥèĦ±":118260,"çݰåŃĺ":118261,"èĢģèĻİæľº":118262,"åįģä¸ĥæĿ¡":118263,"åı¦ä¸ĢåįĬ":118264,"温æĥħ":118265,"åī¥ç¦»":118266,"ä¸ĸè´¸":118267,"å®ĺåı¸":118268,"å¾Īå·®":118269,"éĹ´è·Ŀ":118270,"请注æĦı":118271,"åı²è¯Ĺ":118272,"åĪ©åύ":118273,"è¿IJç®Ĺ":118274,"沦为":118275,"該使ç͍èĢħ":118276,"èĮ¬":118277,"éĶ¦ç»£":118278,"åı²æĸĻ":118279,"ç쵿´»æĢ§":118280,"èģĶ社":118281,"æĹłåĬ©":118282,"æĬĹæ°§åĮĸ":118283,"èıľèĤ´":118284,"éĢłèι":118285,"æİīèIJ½":118286,"å¤įæŁ¥":118287,"åĭĥåĭĥ":118288,"åij¼å£°":118289,"給äºĪ":118290,"åIJĮäºĭ们":118291,"ç½°":118292,"è¯ķæİ¢":118293,"åħ³éĶ®åŃĹ":118294,"æįIJçĮ®":118295,"ç»Łè®¡æķ°æį®":118296,"åĪĽä½ľèĢħ":118297,"ä¸ĭåįĬ":118298,"ä¸ĭåįĬåľº":118299,"æī¿æĭħ责任":118300,"端æŃ£":118301,"ç©¿è¡£":118302,"ä¼łçIJĥ":118303,"åĬ©éķ¿":118304,"åĩ±":118305,"éķ¶åµĮ":118306,"é£ŀç¿Ķ":118307,"è¾ĵåįµ":118308,"è¾ĵåįµç®¡":118309,"ä¸ĩåħ¬éĩĮ":118310,"æİ¨å¹¿åºĶç͍":118311,"å¿«æ¨Ĥ":118312,"ç§½":118313,"èī°å·¨":118314,"åIJ¬å®Į":118315,"åĿļ硬":118316,"å¥¥åľ°":118317,"å¥¥åľ°åĪ©":118318,"é¢ĵ":118319,"èĻIJå¾ħ":118320,"ä¾Ľæ±Ĥ":118321,"éľīç´ł":118322,"伪è£ħ":118323,"ä¹¡åľŁ":118324,"åĩ¡æľ¬ç½ij":118325,"åĩ¡æľ¬ç½ij注":118326,"ä¼ĬåĪ©":118327,"è¡¡æ°´":118328,"æĽ´åĥıæĺ¯":118329,"åĪĨéĴŁå·¦åı³":118330,"è¦ı模":118331,"äºĶåĪĨéĴŁ":118332,"åºĹåĬłçĽŁ":118333,"åĽ°éĽ£":118334,"åħ³åģľ":118335,"æĢĿ绪":118336,"åĴ½åĸī":118337,"缸符":118338,"çĥ¦èºģ":118339,"æĻĤæľŁ":118340,"åijĪçı¾":118341,"è§£æķ£":118342,"诱导":118343,"éļĶçĥŃ":118344,"çĮ¶":118345,"åįĹå®ĭ":118346,"æ·±åħ¥äºĨè§£":118347,"çŃĶçĸij":118348,"æĺ¼å¤ľ":118349,"åįĥä¼ı":118350,"åĬ³åĬ¡æ´¾éģ£":118351,"红è±Ĩ":118352,"åĿıäºĭ":118353,"çĤ¹æ»´":118354,"å°±ä¸ļå²Ĺä½į":118355,"约åIJĪ":118356,"åħįéϤ":118357,"éĢĨåĬ¿":118358,"éĩįéĩijå±ŀ":118359,"å®ĺ宣":118360,"ä½İå»ī":118361,"æģ¨ä¸įå¾Ĺ":118362,"å¾Ĺ天":118363,"å¾Ĺ天çĭ¬":118364,"å¾Ĺ天çĭ¬åİļ":118365,"ä¸Ģå°ģä¿¡":118366,"æĬ½å¥ĸ":118367,"è¾Ĺ转":118368,"çķĻå®Ī":118369,"çķĻå®ĪåĦ¿ç«¥":118370,"çŃĶåį·":118371,"å·¨åŀĭ":118372,"æľĢ好ä¸įè¦ģ":118373,"æµĻæ±Łå¤§åѦ":118374,"æĨ¨":118375,"æı¡æīĭ":118376,"éĴĪç»ĩ":118377,"æİĴ骨":118378,"çĤ½":118379,"å°ģè£ħ":118380,"åįĢåŁŁ":118381,"空æ°ĶåĩĢåĮĸ":118382,"åħīå½±":118383,"åĢĴå¡Į":118384,"å§ļæĺİ":118385,"æ¤į被":118386,"åѦåīį":118387,"åѦåīįæķĻèĤ²":118388,"èĬĿåĬł":118389,"èĬĿåĬłåĵ¥":118390,"缩水":118391,"ä½Ł":118392,"åľ¨çº¿åĴ¨è¯¢":118393,"èµıæŀIJ":118394,"éĿĴèĽĻ":118395,"æĬ±ä½ı":118396,"èĮĤåIJį":118397,"åħ¨åĬĽæīĵéĢł":118398,"åįļ士åѦä½į":118399,"æ²§å·ŀ":118400,"åĻ¢":118401,"æĿĤçī©":118402,"åĪ»çĶ»":118403,"æįħ":118404,"å¾®éĩı":118405,"å¾®éĩıåħĥç´ł":118406,"ä¸ĢåĽŀäºĭ":118407,"鸡èĤī":118408,"åĪ©æ¶¦çİĩ":118409,"æīįç®Ĺ":118410,"å¾®å¦Ļ":118411,"棵æłij":118412,"贪婪":118413,"åĩıå̼":118414,"梦å¢ĥ":118415,"åı¯è§Ĩ":118416,"åı¯è§ĨåĮĸ":118417,"广大å¸Ĥæ°ij":118418,"ä¸ĵä¸ļä»İäºĭ":118419,"ç»ı纬":118420,"ç´§çĽ¯":118421,"çŁ¥å·±":118422,"è¤ļ":118423,"æĸĩåĮĸåºķèķ´":118424,"åݦéŨå¸Ĥ":118425,"临港":118426,"对åħ¶çľŁå®ŀ":118427,"岸边":118428,"è¦ĸçĤº":118429,"æĬĹçĻĮ":118430,"åĶIJå®ĩ":118431,"ä¸įå¾Ĺè¶ħè¿ĩ":118432,"å¨ģæħij":118433,"æ¡Ĩæŀ¶åįıè®®":118434,"èµ°ç§ģ":118435,"åĽ¢å§Ķ":118436,"夸大":118437,"æ¬Ħ":118438,"ç¥ŀç»ıç³»ç»Ł":118439,"æijĦå½±ä½ľåĵģ":118440,"èĬ¥":118441,"å®īåºĨ":118442,"海滨":118443,"æŀĦæĢĿ":118444,"çĮĤ":118445,"åı©":118446,"éĺIJæĺİ":118447,"éģģ":118448,"精油":118449,"ç©´ä½į":118450,"æĬ¤èº«":118451,"æĬ¤èº«ç¬¦":118452,"æĮĩå°İ":118453,"åŃĺåľ¨ä¸Ģå®ļ":118454,"å¯ĤéĿĻ":118455,"æµ·å¤ĸå¸Ĥåľº":118456,"éĿ¡":118457,"综åIJĪå¾ģ":118458,"ä¿IJ":118459,"è¨Īç®Ĺ":118460,"æĺİæľĹ":118461,"äºļè¿IJ":118462,"äºļè¿IJä¼ļ":118463,"åīįçŀ»æĢ§":118464,"åĮ®ä¹ı":118465,"产ä¸ļæī¶è´«":118466,"èĦijæµ·":118467,"èĦijæµ·ä¸Ń":118468,"åħļçļĦé¢Ĩ导":118469,"åĪĺéĤ¦":118470,"æµģæĺŁ":118471,"æĵĤ":118472,"æĶĢçĻ»":118473,"åĴĶ":118474,"ä¸Ģä¸ĭåŃIJå°±":118475,"è¯Ĭæ²»":118476,"使åĬ²":118477,"åīµä½ľ":118478,"éĵŃè®°":118479,"éĴ±è´¢":118480,"æĹ¥æĬ¥è®°èĢħ":118481,"çĥŁçģ«":118482,"èĥľè´Ł":118483,"åįļ主":118484,"ä¸ŃåĽ½èģĶéĢļ":118485,"ç½ijç«Ļé¦ĸ页":118486,"å°±å¤Ł":118487,"å°±å¤ŁäºĨ":118488,"æīijåħĭ":118489,"å±ħå§Ķä¼ļ":118490,"è°¬":118491,"å®īåħ¨äºĭæķħ":118492,"åķĨçĶ¨è½¦":118493,"循çݯç»ıæµİ":118494,"æ·¤":118495,"èĢĥè¯ģ":118496,"å®ĿèĹı":118497,"å®Įç»ĵ":118498,"çłĶåıijæĬķåħ¥":118499,"å²ij":118500,"æģŃæķ¬":118501,"离éĢĢä¼ij":118502,"水墨":118503,"å©¶":118504,"è¯Ĺåı¥":118505,"å®ģæ³¢å¸Ĥ":118506,"å¼±çĤ¹":118507,"åģľçīĮ":118508,"奶油":118509,"å¥ĩ纳河":118510,"æĨĤ":118511,"社ä¼ļå®ŀè·µ":118512,"è´Ŀ壳":118513,"çłĤæµĨ":118514,"èιåıª":118515,"宣æī¬":118516,"综åIJĪæķ´æ²»":118517,"åĤij":118518,"æ°ijæĹıæĸĩåĮĸ":118519,"éĩįçݰ":118520,"积æ·Ģ":118521,"åħ¬çĦ¶":118522,"çħī":118523,"缸èģļ":118524,"æ±¾":118525,"纹çIJĨ":118526,"çĩĥçħ¤":118527,"æŃ¤ç§į":118528,"ç¾İå¦Ĩ":118529,"åįĥçĵ¦":118530,"çIJĽ":118531,"驾驶è¯ģ":118532,"éĺ¶æ¢¯":118533,"ä¸Ŀä¸Ŀ":118534,"å¾Īå¤ļäºĭæĥħ":118535,"åħīéĺ´":118536,"èijĹä½ľæ¬Ĭ":118537,"åħ§éĥ¨":118538,"çĽ¸å¯¹æĿ¥è¯´":118539,"éĸĴ":118540,"éľĩæħij":118541,"說話":118542,"æĨij":118543,"ç«¥è£ħ":118544,"ä½ıæĪ¿åĴĮ":118545,"ä½ıæĪ¿åĴĮåŁİ":118546,"å·²ç»ıè¶ħè¿ĩ":118547,"ä¾¦å¯Ł":118548,"çŁ¿çī©":118549,"ä¾Ľå¤§å®¶":118550,"çī¹éĤĢ":118551,"ç¨ĭåºıåijĺ":118552,"çķľçī§ä¸ļ":118553,"æ°ª":118554,"çijª":118555,"åĢĴåľ¨":118556,"åĢĴåľ¨åľ°":118557,"æ¯Ģ":118558,"梯éĺŁ":118559,"æİ¥èijĹ":118560,"æĬĹèıĮ":118561,"è¤ĩ":118562,"ç¬Ļ":118563,"æ¯Ķä¸Ĭå¹´":118564,"鸡汤":118565,"åŃ¦ä¹łæĪIJ绩":118566,"æĸijæĸĵ":118567,"åħĪ导":118568,"åĪĹ举":118569,"è°ĥæŁ¥æĺ¾ç¤º":118570,"æ©«":118571,"ä¹Ŀåįģ":118572,"è°¢éŁµ":118573,"è·¨è¶Ĭå¼ı":118574,"女æĢ§æľĭåıĭ":118575,"èIJ¥åħ»ä»·å̼":118576,"å®ŀè·µç»ıéªĮ":118577,"èĭıå·ŀå¸Ĥ":118578,"çĵ¶åŃIJ":118579,"æĸ°çļĦä¸Ģ":118580,"æĸ°çļĦä¸Ģå¹´":118581,"æĺİæĻ°":118582,"å®łçα":118583,"åŃĹ第":118584,"æľĹ诵":118585,"纳æĸ¯":118586,"éĢĨè¡Į":118587,"è«ĭæĤ¨":118588,"è«ĭæĤ¨æıIJä¾Ľ":118589,"èĥ¸æĢĢ":118590,"第ä¸ĥå±Ĭ":118591,"强壮":118592,"代åŃķ":118593,"æ±¶å·Ŀ":118594,"å®¶åĸ»":118595,"å®¶åĸ»æĪ·":118596,"å®¶åĸ»æĪ·æĻĵ":118597,"èħ®":118598,"åIJ¯è¿ª":118599,"æĹłéļľç¢į":118600,"èĻķçIJĨåıĬ":118601,"æĿ¥åİĨ":118602,"å®ŀåĬ¡":118603,"ä¹Łéļıä¹ĭ":118604,"æĬĢèĥ½åٹè®Ń":118605,"åѤç«ĭ":118606,"åīģ":118607,"éĥ´å·ŀ":118608,"æĶ¶æķĽ":118609,"éł»éģĵ":118610,"èį£å¹¸":118611,"èİ«è¿ĩäºİ":118612,"æŃ¤æĻĤ":118613,"纪å§ĶçĽij":118614,"纪å§ĶçĽijå§Ķ":118615,"缸éĤ»":118616,"åı¦ä¸Ģè¾¹":118617,"çªĴæģ¯":118618,"æľīå¾Īå¤ļç§į":118619,"æ¯ıéĢ¢":118620,"éĹ®ä¸ĸ":118621,"累累":118622,"éĿĴæĺ¥æľŁ":118623,"è·¯åĨµ":118624,"åħĭèݱ":118625,"è¿Ħä»Ĭ为æŃ¢":118626,"æĥĬå¥ĩ":118627,"跨度":118628,"éħ¿éĢł":118629,"åĩĭ":118630,"è¿ijä¸īå¹´":118631,"åĨħ马":118632,"åĨħ马å°Ķ":118633,"æıį":118634,"è¿Ľå±ķæĥħåĨµ":118635,"èĮ§":118636,"æľīåºıæİ¨è¿Ľ":118637,"æĢ»åĨłåĨĽ":118638,"æĪIJ绩åįķ":118639,"éĽ»è©±åıĬ":118640,"ç´§å¯Ĩç»ĵåIJĪ":118641,"åºĬä½į":118642,"é¹Ĭ":118643,"æķ£åıijçĿĢ":118644,"åĭŁèµĦ":118645,"æ°¨éħ¸":118646,"彩ç¥ŀ":118647,"è®Ģåıĸ":118648,"éĩ῏©":118649,"ä¸ŃåŃĺåľ¨çļĦ":118650,"ç¾İéºĹ":118651,"ä¸įæĸŃå¢ŀåĬł":118652,"è½®æµģ":118653,"æİ¥åIJ¬":118654,"年产å̼":118655,"åįĥåħĭ":118656,"æĪĺåľºä¸Ĭ":118657,"çħ§é¡§":118658,"å¹²éĥ¨éĺŁä¼į":118659,"åį°ç«ł":118660,"ä¸Ģèĩ´æĢ§":118661,"è¿ŀå¤ľ":118662,"åħħè£ķ":118663,"é»ijåIJįåįķ":118664,"åĩĢæ°´":118665,"ä¸Ģ大æĹ©":118666,"åĮħ袱":118667,"çĬ¯è§Ħ":118668,"çIJĨè«ĸ":118669,"æŀģæĺĵ":118670,"骸":118671,"å¨ĺå¨ĺ":118672,"åĽ¢åľĨ":118673,"亿åħĥ以ä¸Ĭ":118674,"åĪ©ç͍æĤ¨çļĦ":118675,"带æĿ¥æĽ´å¤ļ":118676,"ä¸Ń央空è°ĥ":118677,"æľĪèĸª":118678,"çĮľæĥ³":118679,"åĪºå®¢":118680,"ä½ľæģ¯":118681,"åįķè°ĥ":118682,"äºĴåĪ©":118683,"å¦Ĥæľīä¾µæĿĥ":118684,"å°ıå·§":118685,"åįģåł°":118686,"åĵĪåĵĪåĵĪåĵĪ":118687,"è¾¹éĻħ":118688,"æłĩè¯Ń":118689,"åĪĩåħ¥çĤ¹":118690,"éĢĨè¢Ń":118691,"è¯ķåīĤ":118692,"绿è±Ĩ":118693,"è®ļ":118694,"åŁºçĿ£å¾Ĵ":118695,"壬":118696,"åħ¨æĺİæĺŁ":118697,"éĢīç§Ģ":118698,"èĪĮå°ĸ":118699,"ä¸įåIJĮç±»åŀĭ":118700,"çĥŁåĽ±":118701,"ç쵿°Ķ":118702,"åĮºç®¡å§Ķä¼ļ":118703,"åĨľåī¯":118704,"åĨľåī¯äº§åĵģ":118705,"èĶļæĿ¥":118706,"沪æĮĩ":118707,"åħ»æ®ĸæĪ·":118708,"æĸĹå¿Ĺ":118709,"é¦ĸé¢Ĩ":118710,"è¡Ģèħ¥":118711,"åĬłç´§":118712,"ä¸Ģèĩ´å¥½è¯Ħ":118713,"第ä¸īèĬĤ":118714,"æī¬å°ĺ":118715,"交éĢļæŀ¢çº½":118716,"鼶ç¢İ":118717,"é»ijæ´ŀ":118718,"çľĭä¸įæĩĤ":118719,"å±ŀå®ŀ":118720,"主åŁİåĮº":118721,"å¨Ľ":118722,"å¨Ľæ¨Ĥ":118723,"ç¬ijæĦı":118724,"èĻ¹æ¡¥":118725,"åIJĦ个çݯèĬĤ":118726,"çķ¥å¾®":118727,"èĢķèĢĺ":118728,"æľ¬åľºæ¯ĶèµĽ":118729,"æĪIJè´¥":118730,"éĢīèĤ¡":118731,"èªŀè¨Ģ":118732,"çŃĶ辩":118733,"èĩªä¹ł":118734,"棺":118735,"ä¸ĩ欧åħĥ":118736,"åģľå·¥":118737,"对åħ¶è¿Ľè¡Į":118738,"积æŀģéħįåIJĪ":118739,"ä¹¾åĿ¤":118740,"å¦ĸæĢª":118741,"èļĮåŁł":118742,"èµĦ产è¯Ħä¼°":118743,"è°ĥçļ®":118744,"éϤå¤ķ":118745,"åĽ´å¢Ļ":118746,"æľįå½¹":118747,"æ·±æ¸Ĭ":118748,"é¢Ħåζ":118749,"çĥ½":118750,"å®ī稳":118751,"建æŀĦ":118752,"çĭĻåĩ»":118753,"主åĭķ註åĨĬ":118754,"éĥ½æľīèĩªå·±":118755,"æİĴåIJį第ä¸Ģ":118756,"麻辣":118757,"çĢļ":118758,"çĥŁèĬ±çĪĨ":118759,"çĥŁèĬ±çĪĨ竹":118760,"èĩªçĦ¶ä¿ĿæĬ¤":118761,"ä»Ļå¢ĥ":118762,"为äºĨéģ¿åħį":118763,"åĨ·åºĵ":118764,"è§£æĶ¾æĢĿæĥ³":118765,"åĪĿäºĮ":118766,"ä½ĵè´´":118767,"é¦ĸå¯Į":118768,"迪æĭľ":118769,"æļĤç¼ĵ":118770,"æĶ¯æĮģåĬĽåº¦":118771,"侦æİ¢":118772,"马åĪº":118773,"åĮĹæ±½":118774,"ç¹ŀ":118775,"è°İè¨Ģ":118776,"éĢ£çºĮ":118777,"å·³":118778,"ä»»ä½ķæĹ¶åĢĻ":118779,"车èģĶç½ij":118780,"åįķ项":118781,"å¸Ńåį·":118782,"建çŃijæĿIJæĸĻ":118783,"ä¸Ńç§ĭèĬĤ":118784,"ç¡ķ士çłĶç©¶":118785,"ç§ģç«ĭ":118786,"åħļåĴĮæĶ¿åºľ":118787,"æľ¬æ¬¡äº¤æĺĵ":118788,"èººåľ¨åºĬä¸Ĭ":118789,"ç½ijåıĭè¯Ħ论":118790,"å¦Ŀ":118791,"害ç¾ŀ":118792,"åħ¬ç«ĭåĮ»éĻ¢":118793,"ä¸ŀ":118794,"çĶŁçī©è´¨":118795,"åºĶéĤĢ":118796,"æĬ½åıĸ":118797,"åĩłå¼ł":118798,"æijĺç¼ĸ":118799,"ç»ĺæľ¬":118800,"详解":118801,"强硬":118802,"æľĢåħĪè¿ĽçļĦ":118803,"æĭĽèĤ¡":118804,"æĭĽèĤ¡ä¹¦":118805,"åįĥæĸ¹":118806,"åįĥæĸ¹çϾ":118807,"åįĥæĸ¹çĻ¾è®¡":118808,"éħįéŁ³":118809,"驾çħ§":118810,"å¾ģæĪĺ":118811,"èªĵè¨Ģ":118812,"æĭľå¸Ī":118813,"æĭľå¸ĪåѦ":118814,"æĭľå¸ĪåѦèīº":118815,"æĬ±åĽ¢":118816,"ç±³ç²ī":118817,"éĿŀ常éĢĤåIJĪ":118818,"èĪªæµ·":118819,"履约":118820,"åįģåħ«æĿ¡":118821,"éĶ»éĢł":118822,"éĩįè¦ģ举æİª":118823,"åıijæĮ¥ä½ľç͍":118824,"æ·ļ":118825,"人社":118826,"人社å±Ģ":118827,"è¯ķçĤ¹å·¥ä½ľ":118828,"éĺľéĺ³":118829,"æ¡ĥåľĴ":118830,"æ°ijä¼ģ":118831,"æ´ģçϽ":118832,"贵宾":118833,"åħ¬ç¤¾":118834,"è§īæĤŁ":118835,"è®°å¿ĨåĬĽ":118836,"æľĥåĵ¡è¨»åĨĬ":118837,"æŃ¤æ¡Ī":118838,"麻çĹ¹":118839,"çıĢ":118840,"æĸ©èİ·":118841,"çĶ·åŃ©åŃIJ":118842,"å±ĢéĻIJäºİ":118843,"åĭĺæŁ¥":118844,"åIJĥ饱":118845,"èĬ¬åħ°":118846,"æ£ķèī²":118847,"ç¦ıç¥ī":118848,"çͳèĬ±":118849,"æµ·çĽĹ":118850,"èĶij":118851,"æĸĩåѸ":118852,"æ´»æĢ§çĤŃ":118853,"缴éĢļ车":118854,"è°¢éĤĢ":118855,"躺çĿĢ":118856,"åľĥ":118857,"æ¯ıæĹ¥ç»ıæµİ":118858,"åħ¬åħ±æĸĩåĮĸ":118859,"讲æķħäºĭ":118860,"å¯Łçľĭ":118861,"æĤłéĹ²":118862,"åľ°åĿª":118863,"æ¶Įçݰåĩº":118864,"é«ĺçŃīéĻ¢æł¡":118865,"èĮĦåŃIJ":118866,"éĺ²åį«":118867,"ä¾ĭè¡Į":118868,"æĺ¾éľ²":118869,"æĸ°å¸¸æĢģ":118870,"ç»Ŀä½³":118871,"å¯Įæ°ij":118872,"以人æ°ij":118873,"以人æ°ij为":118874,"éĤ¢åı°":118875,"å±ķæ¼Ķ":118876,"çϼå¸ĥ":118877,"è´Łè½½":118878,"åģı离":118879,"æ°¸éģł":118880,"éĩįè¦ģåİŁåĽł":118881,"åįıä¼ļä¼ļåijĺ":118882,"é﾿°ij":118883,"çĶŁäº§è½¦éĹ´":118884,"çģµåĬ¨":118885,"两年åīį":118886,"æĸ¹åľĨ":118887,"æ´»ä¸ĭåİ»":118888,"ä¸ĸçķĮè§Ĥ":118889,"éªĹåıĸ":118890,"ç¾İè²Į":118891,"èĥ½çľĭåĩº":118892,"çϼæı®":118893,"è§Ĥå½±":118894,"åīĥ":118895,"åIJĪèµĦåħ¬åı¸":118896,"å©§":118897,"å¹²æĹ±":118898,"åħŃ个æľĪ":118899,"尤为éĩįè¦ģ":118900,"èĤ½":118901,"ç§¦åĽ½":118902,"æīĺç¦ı":118903,"建çŃijå¸Ī":118904,"åįĩ级æĶ¹éĢł":118905,"å°ıé¢Ŀ":118906,"å°ıé¢Ŀ贷款":118907,"两个维æĬ¤":118908,"æĭįæĭį":118909,"åı¯çĸij":118910,"æį¢åıĸ":118911,"æŃ¦å£«":118912,"èµĸ以":118913,"èµĸ以çĶŁåŃĺ":118914,"æĮļ":118915,"殿åłĤ":118916,"èĩªçĦ¶çķĮ":118917,"ç£ģåľº":118918,"å¦Ĥä½ķçľĭå¾ħ":118919,"ä»ĬæĹ¥å¤´æĿ¡":118920,"è¥¿åŁŁ":118921,"èİ·è¯Ħ":118922,"é¢¨æł¼":118923,"ä¿ĦåĽ½":118924,"æīĵæĭ¼":118925,"å®£ä¼łçīĩ":118926,"å¾Īæĸ¹ä¾¿":118927,"ä¾Ľç»Ļä¾§":118928,"纪念ç¢ij":118929,"毫åħĭ":118930,"èĬ³é¦Ļ":118931,"å·¥åķĨéĵ¶è¡Į":118932,"请çĤ¹åĩ»":118933,"缪":118934,"æĹłæķ°æ¬¡":118935,"èį¯å¸Ī":118936,"èħ¸":118937,"游èīĩ":118938,"åĮ¾":118939,"å·¡èĪª":118940,"æ²»çIJĨä½ĵç³»":118941,"èIJ¥éĢłèī¯å¥½":118942,"æ··æ·Ĩ":118943,"éĢļçķħ":118944,"åĬ³ç´¯":118945,"ä»ĵä½į":118946,"å¢ŀéķ·":118947,"éļIJ约":118948,"æĿĤå¿Ĺ社":118949,"åħ»èĤ²":118950,"åı¯èĥ½åıijçĶŁ":118951,"èĢĥ試":118952,"西侧":118953,"åĬłåĢį":118954,"主æĮģåı¬å¼Ģ":118955,"çķ¢ç«Ł":118956,"éĹ®è¯¢":118957,"æµ·æ£ł":118958,"èĹ©":118959,"注æĺİæĿ¥æºIJ":118960,"æ£Ģçĸ«":118961,"请åģĩ":118962,"æĬļæij¸":118963,"èĵĦçĶµæ±ł":118964,"è·Łä¸įä¸Ĭ":118965,"çݰ代社ä¼ļ":118966,"çѹèµĦ":118967,"ä½ĵèĤ²å½©ç¥¨":118968,"延误":118969,"è¾Ľè¾£":118970,"éĿ¢å®¹":118971,"åį°è®°":118972,"çģŃ亡":118973,"ç´łé£Ł":118974,"åħ´èĩ´":118975,"éľĢè¦ģç͍":118976,"éľĢè¦ģç͍åΰ":118977,"å®Ŀå¦Ī":118978,"ç£ĭåķĨ":118979,"éļ¶å±ŀ":118980,"è´¡çĮ®åĬĽéĩı":118981,"åħ¬åħ±èµĦæºIJ":118982,"大éĺª":118983,"åĨĽè®Ń":118984,"æĤ¬å¿µ":118985,"社ä¼ļ稳å®ļ":118986,"å¹²äºĭåĪĽä¸ļ":118987,"æľīæĿ¡ä»¶":118988,"æľīæĿ¡ä»¶çļĦ":118989,"ä¸Ģå¹´ä¸Ģ度":118990,"åİ¥":118991,"强奸":118992,"豪车":118993,"æİĮæŁľ":118994,"æ°´åΩ工ç¨ĭ":118995,"峪":118996,"积æŀģä½ľç͍":118997,"æµ·æ·Ģ":118998,"æµ·æ·ĢåĮº":118999,"çĥŃæĴŃ":119000,"åĿļæĮģä¸įæĩĪ":119001,"åıĮèĦļ":119002,"绣æĪĺ":119003,"ä»»ä½ķ人éĥ½":119004,"åľ°ä¸ĭ室":119005,"åĨ¶çĤ¼":119006,"è°ħè§£":119007,"æ¸Ķèι":119008,"太éĺ³åŁİ":119009,"被æįķ":119010,"计ç®Ĺåύ":119011,"西åĮ»":119012,"èĪĴå¿ĥ":119013,"桦":119014,"éģ²":119015,"åĬij":119016,"è¨Ĺ":119017,"èݺ":119018,"åĸ¬":119019,"çĵ¯":119020,"åĺĺ":119021,"åłķ":119022,"æķĿ":119023,"åij¦":119024,"èĭŀ":119025,"æŃ¹":119026,"æĵ¬":119027,"æ£Ħ":119028,"èε":119029,"奪":119030,"çļĭ":119031,"æĶ¸":119032,"åľ©":119033,"ç¤Ļ":119034,"ç¢ĺ":119035,"éıĪ":119036,"æĦķ":119037,"ç¹³":119038,"èĺ¸":119039,"è²Ĥ":119040,"æ¼²":119041,"æij¹":119042,"æĶĿ":119043,"åŃ¢":119044,"èķŃ":119045,"騰":119046,"æ½¼":119047,"éħ°":119048,"æĴ¥":119049,"蹬":119050,"é¨Ļ":119051,"踹":119052,"éģIJ":119053,"çĺĢ":119054,"èĽ¤":119055,"æĤĸ":119056,"çĴŀ":119057,"ç£IJ":119058,"æİ°":119059,"è¾Ĭ":119060,"å¾ij":119061,"æİĸ":119062,"éģŀ":119063,"éĤ¸":119064,"éĽı":119065,"æĨİ":119066,"æľ½":119067,"çį»":119068,"ç®Ķ":119069,"褶":119070,"æļ¢":119071,"æĺµ":119072,"çıĤ":119073,"æĤ¸":119074,"åģµ":119075,"åĻľ":119076,"壯":119077,"æĴ®":119078,"æģį":119079,"å©ķ":119080,"篱":119081,"éĺĻ":119082,"çīł":119083,"è£ĺ":119084,"è³¢":119085,"éĩľ":119086,"éĵł":119087,"èİĺ":119088,"æ®Ĩ":119089,"çϏ":119090,"è´ı":119091,"ç²±":119092,"å«¡":119093,"åĨ¢":119094,"è¤Ĵ":119095,"æĩĬ":119096,"éľĵ":119097,"塵":119098,"æĭ£":119099,"å»Ł":119100,"飽":119101,"é¢Į":119102,"åļİ":119103,"æ·º":119104,"èĨł":119105,"åİŃ":119106,"åļĩ":119107,"åijĥ":119108,"çĴĭ":119109,"çѱ":119110,"æĭ·":119111,"èį§":119112,"éͰ":119113,"åѰ":119114,"èĵĵ":119115,"èĨ½":119116,"æŀī":119117,"åĸ½":119118,"çĽĶ":119119,"çŃIJ":119120,"ç¾ļ":119121,"èħĮ":119122,"辫":119123,"æ³ĵ":119124,"çͬ":119125,"èŁ²":119126,"åĸª":119127,"å¦ĵ":119128,"è¬Ģ":119129,"çĤĬ":119130,"æĽľ":119131,"æ±IJ":119132,"è´Ī":119133,"èįĢ":119134,"æĬł":119135,"碾":119136,"æ«ĥ":119137,"éŀł":119138,"èijĨ":119139,"祯":119140,"å½Ŀ":119141,"é¦į":119142,"åĮ£":119143,"æľŃ":119144,"åĿĤ":119145,"ä¿ij":119146,"èĵ®":119147,"çijĽ":119148,"æīī":119149,"èĩŁ":119150,"貫":119151,"çİ¥":119152,"æ·¼":119153,"åݲ":119154,"é³Į":119155,"å³Ń":119156,"åijĽ":119157,"é§":119158,"é§IJ":119159,"éģ·":119160,"俪":119161,"æĢĤ":119162,"è¾į":119163,"å±į":119164,"åĭģ":119165,"å¥ļ":119166,"éļħ":119167,"éĴ´":119168,"è¼Ŀ":119169,"宦":119170,"èIJĥ":119171,"çĺĭ":119172,"æĨ¶":119173,"æĤħ":119174,"è¾Ļ":119175,"åijľ":119176,"çłº":119177,"éĢŀ":119178,"æµļ":119179,"éĸ£":119180,"èĸ©":119181,"éĻĭ":119182,"çĤĻ":119183,"èªķ":119184,"丣":119185,"é¹½":119186,"ç±Į":119187,"è´°":119188,"éĭª":119189,"çľ©":119190,"æĴIJ":119191,"èĨº":119192,"éŀĺ":119193,"ç¾²":119194,"窮":119195,"ç´IJ":119196,"æ®´":119197,"纾":119198,"èºį":119199,"ç´ĭ":119200,"çĦĸ":119201,"çĶº":119202,"çī½":119203,"çĤ¯":119204,"ç¼Ķ":119205,"æ¯ĵ":119206,"嬰":119207,"梧":119208,"äºŁ":119209,"è¢ħ":119210,"çįĦ":119211,"è¿¥":119212,"æ¼¾":119213,"çĿij":119214,"績":119215,"é¦ĭ":119216,"é¤ħ":119217,"æ¹Ħ":119218,"æĺĩ":119219,"æŀŃ":119220,"èĸ°":119221,"æŁij":119222,"榻":119223,"åĻĹ":119224,"åĻ´":119225,"棣":119226,"åͧ":119227,"çĨ¹":119228,"輯":119229,"å¢Ł":119230,"é²²":119231,"æĪĽ":119232,"èī¦":119233,"èĬ®":119234,"åĺŁ":119235,"帥":119236,"å¿»":119237,"çĮĿ":119238,"寵":119239,"賦":119240,"èĽ¾":119241,"滾":119242,"çĤķ":119243,"éĵ¬":119244,"èĴ¿":119245,"éĴ¨":119246,"çĥĻ":119247,"ç²ķ":119248,"æĥ¦":119249,"溧":119250,"é¢į":119251,"éħ£":119252,"峦":119253,"ç±ģ":119254,"çĥĥ":119255,"åĨĹ":119256,"åıģ":119257,"缧":119258,"ç½µ":119259,"éĴĹ":119260,"å¬ī":119261,"è°ı":119262,"ç³§":119263,"è¾Ń":119264,"æ·¬":119265,"èŁĴ":119266,"诩":119267,"è¦ĥ":119268,"çĻĸ":119269,"é½Ĵ":119270,"çĪIJ":119271,"ç®į":119272,"ç¼İ":119273,"磺":119274,"诫":119275,"褲":119276,"æĵł":119277,"èIJ¦":119278,"çĿ¬":119279,"è°į":119280,"éĦ°":119281,"æł¾":119282,"é¡ı":119283,"縱":119284,"桨":119285,"éĨ¬":119286,"襲":119287,"讪":119288,"婺":119289,"èįŁ":119290,"åĮĿ":119291,"çĨł":119292,"èĽĬ":119293,"æ¸ļ":119294,"å´½":119295,"鲤":119296,"åķ°":119297,"åĮķ":119298,"ä¸IJ":119299,"讥":119300,"åı½":119301,"åı¼":119302,"çļ¿":119303,"è¿Ĥ":119304,"åIJĨ":119305,"å±¹":119306,"èĩ¼":119307,"讹":119308,"é©®":119309,"纫":119310,"æ±ŀ":119311,"æĬ¡":119312,"èĭĩ":119313,"åIJł":119314,"åIJŃ":119315,"åIJ®":119316,"å²ĸ":119317,"ä½ĥ":119318,"çĭĪ":119319,"åºĩ":119320,"åIJĿ":119321,"éŰ":119322,"æ±¹":119323,"忱":119324,"æĭĦ":119325,"æĭĹ":119326,"èĮī":119327,"èĭĽ":119328,"èĮģ":119329,"çŁ¾":119330,"èĻı":119331,"åij»":119332,"åĴĦ":119333,"å¿¿":119334,"èĤ®":119335,"çĭŀ":119336,"çĸŁ":119337,"çĸĻ":119338,"çĸļ":119339,"æ³ŀ":119340,"å¸ļ":119341,"å±ī":119342,"è¿¢":119343,"驹":119344,"çİ·":119345,"çıĬó":119346,"çıĬół":119347,"çıĬółĦ":119348,"çıĬółĦģ":119349,"æĮİ":119350,"æĭ´":119351,"åŀĽ":119352,"èį¤":119353,"æ®ĥ":119354,"çĽ¹":119355,"åĵĨ":119356,"è´»":119357,"毡":119358,"çĭ°":119359,"çĭ¡":119360,"æŁĴ":119361,"æģĥ":119362,"诬":119363,"è¢Ħ":119364,"诲":119365,"èļ¤":119366,"èĢĻ":119367,"åŁĤ":119368,"æįİ":119369,"æįĮ":119370,"æ¢Ĩ":119371,"éħĮ":119372,"çł¾":119373,"æ®ī":119374,"åĶł":119375,"æĻĮ":119376,"èļ£":119377,"èļª":119378,"èļĵ":119379,"鸯":119380,"åĶģ":119381,"åĶĨ":119382,"åĢĶ":119383,"èĪĢ":119384,"豺":119385,"èĥ°":119386,"鸵":119387,"鸳":119388,"é¦ģ":119389,"ç¾Ķ":119390,"æ¶£":119391,"æ¶ķ":119392,"æĤ¯":119393,"诽":119394,"è°Ĩ":119395,"ç¥Ł":119396,"绢":119397,"æįº":119398,"æį¶":119399,"æį»":119400,"æİĤ":119401,"èıł":119402,"èIJ¤":119403,"éħĹ":119404,"çľ¶":119405,"åķĦ":119406,"èļ¯":119407,"èĽĢ":119408,"åͬ":119409,"帷":119410,"éĵIJ":119411,"éĵĽ":119412,"åģİ":119413,"å¾Ļ":119414,"èĦ¯":119415,"è±ļ":119416,"çĮĸ":119417,"çĹĬ":119418,"æ¶®":119419,"æĥŃ":119420,"æĤ´":119421,"æĥĭ":119422,"è°ļ":119423,"æı©":119424,"æIJĢ":119425,"æIJĶ":119426,"æ¦Ķ":119427,"æ¤Ń":119428,"éĽ³":119429,"åĸ³":119430,"è·Ľ":119431,"èľĵ":119432,"èľĴ":119433,"é¹ĥ":119434,"éĶĦ":119435,"çĶ¥":119436,"çŃı":119437,"çĮ©":119438,"çĮ¬":119439,"çĮ¾":119440,"çĹ¢":119441,"çĹª":119442,"æĥ°":119443,"çªĺ":119444,"è°¤":119445,"éļĺ":119446,"å©¿":119447,"é¹ī":119448,"çijĻ":119449,"æĸŁ":119450,"椿":119451,"éħª":119452,"éĽ¹":119453,"åŦ":119454,"è··":119455,"è·º":119456,"è·¤":119457,"èľĪ":119458,"èľĹ":119459,"å¹Į":119460,"é¦ı":119461,"èªĬ":119462,"æ¼ĵ":119463,"è¤Ĥ":119464,"èĶĹ":119465,"èͼ":119466,"åħ¢":119467,"裳":119468,"èľ»":119469,"èĿĩ":119470,"åĺĢ":119471,"é͹":119472,"ç®ķ":119473,"箩":119474,"çĺ©":119475,"çĺŁ":119476,"æ¼±":119477,"寥":119478,"骡":119479,"æĴµ":119480,"æĴ¬":119481,"è±Į":119482,"åĺ¹":119483,"èĿł":119484,"èĿĮ":119485,"èĿĹ":119486,"èĿĻ":119487,"éķIJ":119488,"稼":119489,"ç¯ĵ":119490,"èĨĽ":119491,"鲫":119492,"çĺª":119493,"鲨":119494,"æĨĶ":119495,"ç¿©":119496,"褥":119497,"ç¼Ń":119498,"åĻ©":119499,"çĵ¢":119500,"éľİ":119501,"踱":119502,"è¹Ĥ":119503,"èŁĨ":119504,"鹦":119505,"篡":119506,"çĺ¸":119507,"窿":119508,"ç¼°":119509,"èĹIJ":119510,"è¹ĭ":119511,"èŁĭ":119512,"èŁĢ":119513,"赡":119514,"èĩĬ":119515,"é³Ħ":119516,"ç³ł":119517,"æĩ¦":119518,"åļ£":119519,"éķ°":119520,"é³į":119521,"ç°¸":119522,"çĻ£":119523,"é³ĸ":119524,"é¬ĵ":119525,"èłķ":119526,"éľ¹":119527,"èºı":119528,"黯":119529,"çĵ¤":119530,"çŁĹ":119531,"ä¹Ĥ":119532,"ä¹ľ":119533,"åħĢ":119534,"å¼ĭ":119535,"åŃij":119536,"åŃĵ":119537,"幺":119538,"äºĵ":119539,"廿":119540,"ä¸ı":119541,"åįħ":119542,"ä»ĥ":119543,"ä»ī":119544,"ä»Ĥ":119545,"åĪĪ":119546,"çĪ»":119547,"åįŀ":119548,"éĹ©":119549,"讣":119550,"夬":119551,"çĪ¿":119552,"æ¯ĭ":119553,"éĤĹ":119554,"éĤĽ":119555,"èī½":119556,"èī¿":119557,"åıµ":119558,"ä¸ķ":119559,"åĮľ":119560,"åĬ¢":119561,"åįŁ":119562,"åı±":119563,"åı»":119564,"仨":119565,"代":119566,"仡":119567,"仫":119568,"ä»ŀ":119569,"åį®":119570,"æ°IJ":119571,"çĬ°":119572,"åĪį":119573,"éĤĿ":119574,"éĤĻ":119575,"讦":119576,"è®§":119577,"讫":119578,"å°»":119579,"éĺ¡":119580,"å°ķ":119581,"å¼ģ":119582,"èĢĴ":119583,"çİİ":119584,"çİij":119585,"åľ¬":119586,"æī¦":119587,"åľª":119588,"åľ¹":119589,"æīª":119590,"åľ®":119591,"åľ¯":119592,"èĬĬ":119593,"èĬį":119594,"èĬĦ":119595,"èĬ¨":119596,"èĬij":119597,"èĬİ":119598,"èĬĹ":119599,"äºĺ":119600,"åİį":119601,"夼":119602,"æĪį":119603,"å°¥":119604,"乩":119605,"æĹ¯":119606,"æĽ³":119607,"å²Į":119608,"屺":119609,"åĩ¼":119610,"åĽ¡":119611,"éĴĩ":119612,"ç¼¶":119613,"æ°ĺ":119614,"æ°ĸ":119615,"çīĿ":119616,"ä¼İ":119617,"ä¼Ľ":119618,"ä¼¢":119619,"佤":119620,"仵":119621,"ä¼¥":119622,"ä¼§":119623,"ä¼ī":119624,"伫":119625,"åĽŁ":119626,"æ±Ĩ":119627,"åĪĸ":119628,"å¤Ļ":119629,"æĹ®":119630,"åĪİ":119631,"çĬ·":119632,"çĬ¸":119633,"èĪĽ":119634,"åĩ«":119635,"éĤ¬":119636,"饧":119637,"æ±Ķ":119638,"æ±ľ":119639,"æ±Ĭ":119640,"å¿ĸ":119641,"å¿ı":119642,"è®´":119643,"讵":119644,"è®·":119645,"èģ¿":119646,"èī®":119647,"åݾ":119648,"å¦ģ":119649,"纡":119650,"纣":119651,"纥":119652,"纨":119653,"çİķ":119654,"çİĻ":119655,"æĬŁ":119656,"æĬĶ":119657,"åľ»":119658,"åĿį":119659,"æĬĥ":119660,"ã§IJ":119661,"èĬ«":119662,"èĬ¾":119663,"èĭĪ":119664,"èĭ£":119665,"èĭĭ":119666,"èĬ¼":119667,"èĭĮ":119668,"èĭģ":119669,"èĬ©":119670,"èĬª":119671,"èĬ¡":119672,"èĬŁ":119673,"èĭĦ":119674,"èĭİ":119675,"èĭ¡":119676,"æĿĮ":119677,"æĿĵ":119678,"æĿĪ":119679,"å¿ij":119680,"åŃĽ":119681,"éĤ´":119682,"éĤ³":119683,"å¥ģ":119684,"è±ķ":119685,"å¿Ĵ":119686,"欤":119687,"轫":119688,"è¿ĵ":119689,"éĤ¶":119690,"å¿IJ":119691,"åį£":119692,"éĤº":119693,"æĹ°":119694,"åijĭ":119695,"åijĴ":119696,"åijĵ":119697,"åijĶ":119698,"åijĸ":119699,"æĹ¸":119700,"åIJ¡":119701,"èϬ":119702,"åIJ½":119703,"åIJ£":119704,"åIJ²":119705,"å¸ı":119706,"å²Ī":119707,"å²ĺ":119708,"åħķ":119709,"åĽµ":119710,"åĽ«":119711,"éĴĬ":119712,"éĴĭ":119713,"éĴĮ":119714,"è¿ķ":119715,"æ°Ļ":119716,"æ°ļ":119717,"çī¤":119718,"ä½ŀ":119719,"ä½ļ":119720,"ä½Ŀ":119721,"ä½Ĺ":119722,"å½·":119723,"ä½ĺ":119724,"ä½¥":119725,"豸":119726,"åĿĮ":119727,"èĤŁ":119728,"å¥Ĥ":119729,"åĬ¬":119730,"çĭģ":119731,"鸳":119732,"饨":119733,"饩":119734,"饫":119735,"饬":119736,"åºij":119737,"åºĭ":119738,"çĸĶ":119739,"çĸĸ":119740,"èĤĵ":119741,"éű":119742,"éĹ³":119743,"çĤĢ":119744,"æ²£":119745,"æ²ħ":119746,"æ²Ķ":119747,"沤":119748,"æ²ı":119749,"æ²ļ":119750,"汩":119751,"汨":119752,"沨":119753,"æ±´":119754,"æ²Ĩ":119755,"沩":119756,"æ³IJ":119757,"æĢĥ":119758,"æĢĦ":119759,"å¿¡":119760,"忤":119761,"忾":119762,"æĢħ":119763,"忪":119764,"æĢĨ":119765,"å¿Ń":119766,"忸":119767,"è¯Ĥ":119768,"è¯ĥ":119769,"è¯ħ":119770,"è¯ĭ":119771,"è¯Į":119772,"è¯Ĵ":119773,"éĻĤ":119774,"éĻī":119775,"妩":119776,"妪":119777,"妣":119778,"å¦Ĺ":119779,"妫":119780,"å§Ĵ":119781,"妤":119782,"åĬŃ":119783,"åĪŃ":119784,"éĤ°":119785,"çºŃ":119786,"纰":119787,"纴":119788,"çİ¡":119789,"çİŃ":119790,"çİł":119791,"çİ¢":119792,"çݦ":119793,"çĽĤ":119794,"å¿Ŀ":119795,"åĮ¦":119796,"åĿ©":119797,"æĬ¨":119798,"æĭ¤":119799,"åĿ«":119800,"æĭĪ":119801,"åŀĨ":119802,"æĬ»":119803,"åĬ¼":119804,"æĭĥ":119805,"æĭĬ":119806,"åĿ¼":119807,"åĿ»":119808,"ã§Ł":119809,"åĿ¨":119810,"åĿŃ":119811,"æĬ¿":119812,"åĿ³":119813,"èĭ·":119814,"èĭ¤":119815,"èĮı":119816,"èĭ«":119817,"èĭľ":119818,"èĭ´":119819,"èĭĴ":119820,"èĭĺ":119821,"èĮĮ":119822,"èĭ»":119823,"èĭĵ":119824,"èĮļ":119825,"èĮĨ":119826,"èĮij":119827,"èĮĵ":119828,"èĮĶ":119829,"èĮķ":119830,"èĮĢ":119831,"èĭķ":119832,"æŀ¥":119833,"æŀĩ":119834,"æĿª":119835,"æĿ³":119836,"æŀ§":119837,"æĿµ":119838,"æŀ¨":119839,"æŀŀ":119840,"æŀĭ":119841,"æĿ»":119842,"æĿ·":119843,"æĿ¼":119844,"磸":119845,"çłĢ":119846,"åγ":119847,"å¥Ħ":119848,"æ®ģ":119849,"éĥı":119850,"è½Ń":119851,"éĥħ":119852,"鸢":119853,"缱":119854,"æĺĻ":119855,"æĿ²":119856,"æĺĥ":119857,"åĴĤ":119858,"åij¸":119859,"æĺĢ":119860,"æĹ»":119861,"æĺī":119862,"çĤħ":119863,"çķĢ":119864,"èĻ®":119865,"åĴĢ":119866,"åij·":119867,"黾":119868,"åij±":119869,"åij¤":119870,"åĴĨ":119871,"åĴĽ":119872,"åij¶":119873,"åij£":119874,"åĴĿ":119875,"å²¢":119876,"岿":119877,"岬":119878,"岫":119879,"å¸Ļ":119880,"å²£":119881,"å³ģ":119882,"åĪ¿":119883,"å²·":119884,"åīĢ":119885,"å¸Ķ":119886,"å³Ħ":119887,"æ²ĵ":119888,"åĽ¹":119889,"ç½Ķ":119890,"éĴį":119891,"éĴİ":119892,"éĴı":119893,"éĴĴ":119894,"éĴķ":119895,"éĤ¾":119896,"è¿®":119897,"çī¦":119898,"竺":119899,"迤":119900,"ä½¶":119901,"ä¾ij":119902,"ä¾ī":119903,"èĩ¾":119904,"ä¾Ĺ":119905,"ä¾ı":119906,"侩":119907,"ä½»":119908,"ä½¾":119909,"侪":119910,"ä½¼":119911,"佯":119912,"侬":119913,"å¸Ľ":119914,"ä¾Ķ":119915,"å¾Ĥ":119916,"åν":119917,"éĥĦ":119918,"ç±´":119919,"çĵ®":119920,"æĪĹ":119921,"èĤ¼":119922,"äıĿ":119923,"èĤ±":119924,"èĤ«":119925,"è¿©":119926,"éĥĩ":119927,"çĭİ":119928,"çĭį":119929,"çĭĴ":119930,"åĴİ":119931,"饯":119932,"饴":119933,"åĨ½":119934,"åĨ¼":119935,"åºĸ":119936,"çĸł":119937,"çĸĿ":119938,"åħĸ":119939,"åĬ¾":119940,"ð¬ī":119941,"ð¬ī¼":119942,"çĤĺ":119943,"çĤĿ":119944,"çĤĶ":119945,"æ³Ķ":119946,"æ²Ń":119947,"æ³·":119948,"æ³±":119949,"æ³ħ":119950,"æ³ł":119951,"泺":119952,"æ³ĸ":119953,"泫":119954,"æ³®":119955,"æ²±":119956,"泯":119957,"æĢĻ":119958,"æĢµ":119959,"æĢ¦":119960,"æĢĽ":119961,"æĢı":119962,"æĢį":119963,"ã¤":119964,"ã¤ĺ":119965,"æĢ©":119966,"æĢ«":119967,"æĢ¿":119968,"å®ķ":119969,"穹":119970,"å®ĵ":119971,"è¯ĵ":119972,"è¯Ķ":119973,"è¯ĸ":119974,"è¯ĺ":119975,"æĪ¾":119976,"è¯Ļ":119977,"æĪ½":119978,"éĥĵ":119979,"è¡©":119980,"ç¥Ĩ":119981,"ç¥İ":119982,"ç¥ĩ":119983,"è¯ľ":119984,"è¯Ł":119985,"诣":119986,"诤":119987,"诧":119988,"诨":119989,"æĪķ":119990,"éĻĶ":119991,"妲":119992,"妯":119993,"å§Ĺ":119994,"å¸ij":119995,"åŃ¥":119996,"驽":119997,"èϱ":119998,"迨":119999,"ç»Ģ":120000,"ç»ģ":120001,"ç»Ĥ":120002,"é©·":120003,"驸":120004,"ç»ī":120005,"ç»Į":120006,"éªĢ":120007,"ç;":120008,"çıı":120009,"çıIJ":120010,"çıij":120011,"çݳ":120012,"顸":120013,"çıī":120014,"çıĪ":120015,"æĭ®":120016,"åŀŃ":120017,"æĮĿ":120018,"æĮŀ":120019,"åŀ¤":120020,"èµ³":120021,"è´²":120022,"åŀ±":120023,"åŀĮ":120024,"åŀ§":120025,"åŀĵ":120026,"æĮ¦":120027,"åŀł":120028,"èįļ":120029,"èįij":120030,"è´³":120031,"èįľ":120032,"èİĴ":120033,"èĮ¼":120034,"èĮ´":120035,"èĮ±":120036,"èİĽ":120037,"èįŀ":120038,"èĮ¯":120039,"èįı":120040,"èįĩ":120041,"èįĥ":120042,"èįł":120043,"èĮŃ":120044,"åŀ©":120045,"èį¥":120046,"èį¦":120047,"èį¨":120048,"èį©":120049,"åīĭ":120050,"èįª":120051,"èį¬":120052,"èį®":120053,"æŁ°":120054,"æłī":120055,"æŁĺ":120056,"æłĬ":120057,"æŁ©":120058,"æŀ°":120059,"æłĮ":120060,"æŁĻ":120061,"æŀµ":120062,"æŀ³":120063,"æŁŀ":120064,"æŁĿ":120065,"æłĢ":120066,"æŁ¢":120067,"æłİ":120068,"æŁĪ":120069,"æŁģ":120070,"æŀ·":120071,"æŁ½":120072,"åīĮ":120073,"éħĬ":120074,"éĥ¦":120075,"çĶŃ":120076,"çłĹ":120077,"çłĺ":120078,"çłĴ":120079,"æĸ«":120080,"çłŃ":120081,"çłľ":120082,"èĢ·":120083,"èĻº":120084,"æ®Ĥ":120085,"æ®ĩ":120086,"æ®Ħ":120087,"è½±":120088,"è½²":120089,"è½³":120090,"è½¶":120091,"轸":120092,"èĻ¿":120093,"æ¯ĸ":120094,"è§ĩ":120095,"å°ľ":120096,"åĵIJ":120097,"çľĦ":120098,"çľį":120099,"ðł³":120100,"ðł³IJ":120101,"éĥ¢":120102,"çľĩ":120103,"çľĬ":120104,"çľĪ":120105,"禺":120106,"åĵĤ":120107,"åĴ´":120108,"æĽ·":120109,"æĺ´":120110,"åĴ¦":120111,"åĵĵ":120112,"åĵĶ":120113,"çķİ":120114,"åij²":120115,"èĥĦ":120116,"çķĭ":120117,"çķĪ":120118,"èϼ":120119,"èĻ»":120120,"çĽħ":120121,"åĴ£":120122,"åĵķ":120123,"åīIJ":120124,"éĥ§":120125,"åĴ»":120126,"åĽ¿":120127,"åĴ¿":120128,"åĵĮ":120129,"åĵĻ":120130,"åĵļ":120131,"åĴ©":120132,"åĴ¤":120133,"åĵĿ":120134,"åĵı":120135,"åĵŀ":120136,"å³£":120137,"ç½ĺ":120138,"å³Ĵ":120139,"峤":120140,"å³ĭ":120141,"è´¶":120142,"éĴļ":120143,"éĴ¡":120144,"éĴ£":120145,"éĴ¤":120146,"éĴ«":120147,"æ°¡":120148,"çī¯":120149,"éĥľ":120150,"ç§ķ":120151,"ç§Ń":120152,"竽":120153,"ç¬Ī":120154,"俦":120155,"俨":120156,"ä¿ħ":120157,"åıŁ":120158,"åŀ¡":120159,"çī®":120160,"ä¿£":120161,"ä¿ļ":120162,"çļĪ":120163,"ä¿Ł":120164,"éĢħ":120165,"å¾ĩ":120166,"å¾ī":120167,"èĪ¢":120168,"éĥĹ":120169,"ä¿İ":120170,"éĥ¤":120171,"çΰ":120172,"éĥĽ":120173,"çĵ´":120174,"èĥ¨":120175,"èĥª":120176,"èĥĽ":120177,"èĥĤ":120178,"èĥĻ":120179,"èĥį":120180,"èĥĹ":120181,"èĥĿ":120182,"æľIJ":120183,"èĥ«":120184,"鸨":120185,"åĮį":120186,"çĭ¨":120187,"çĭ¯":120188,"é£ij":120189,"çĭ©":120190,"çĭ²":120191,"è¨ĩ":120192,"éĢĦ":120193,"æĺĿ":120194,"饷":120195,"饸":120196,"饹":120197,"åŃª":120198,"å¨Ī":120199,"庥":120200,"çĸ¬":120201,"çĸ£":120202,"çĸ¥":120203,"çĸŃ":120204,"åºł":120205,"ç«ij":120206,"é£Ĵ":120207,"éĹ¼":120208,"éĹ¾":120209,"éĹ¿":120210,"éĺĤ":120211,"ç¾ij":120212,"迸":120213,"ç±¼":120214,"éħĭ":120215,"çĤ»":120216,"çĥĢ":120217,"çĤ·":120218,"æ´±":120219,"æ´¹":120220,"æ´§":120221,"æ´Į":120222,"æµĥ":120223,"æ´ĩ":120224,"æ´Ħ":120225,"æ´Ļ":120226,"æ¶İ":120227,"æ´İ":120228,"æ´«":120229,"æµį":120230,"æ´®":120231,"æ´µ":120232,"æµĴ":120233,"æµĶ":120234,"æµķ":120235,"æ´³":120236,"æģ¸":120237,"æģĵ":120238,"æģ¹":120239,"æģ«":120240,"æģ»":120241,"æģĤ":120242,"æģª":120243,"æģ½":120244,"宥":120245,"æīĥ":120246,"衲":120247,"衽":120248,"è¡¿":120249,"è¢Ĥ":120250,"ç¥ľ":120251,"ç¥ĵ":120252,"ç¥ļ":120253,"诮":120254,"ç¥Ĺ":120255,"祢":120256,"诰":120257,"诳":120258,"鸩":120259,"æĺ¶":120260,"åĴ«":120261,"å¼Ń":120262,"çīģ":120263,"èĥ¥":120264,"éĻŁ":120265,"å§®":120266,"å¨Ĩ":120267,"å§Ŀ":120268,"å§£":120269,"å§ĺ":120270,"å§¹":120271,"羿":120272,"çĤ±":120273,"磾":120274,"ç»Ķ":120275,"éªģ":120276,"éªħ":120277,"ç»Ĺ":120278,"综":120279,"éªĪ":120280,"èĢĸ":120281,"æĮĪ":120282,"çı¥":120283,"çıĻ":120284,"顼":120285,"çı°":120286,"çı©":120287,"çı§":120288,"çı£":120289,"çıŀ":120290,"çIJ¤":120291,"çı²":120292,"æģļ":120293,"åŁķ":120294,"åŁĺ":120295,"åŁĻ":120296,"åŁļ":120297,"æĮ¹":120298,"èĢĨ":120299,"èĢĦ":120300,"åŁĴ":120301,"æįĭ":120302,"è´½":120303,"åŀ¸":120304,"æįĥ":120305,"çĽį":120306,"èį¸":120307,"èݳ":120308,"èİ´":120309,"èݪ":120310,"èİł":120311,"èİľ":120312,"èİħ":120313,"èį¼":120314,"èİ©":120315,"èį½":120316,"èݸ":120317,"èį»":120318,"èݨ":120319,"鸪":120320,"èݼ":120321,"æł²":120322,"æł³":120323,"æ¡¡":120324,"æ¡İ":120325,"æ¡¢":120326,"桤":120327,"æ¢ĥ":120328,"æłĿ":120329,"æ¡ķ":120330,"æ¡ģ":120331,"æ¡§":120332,"æ¡ħ":120333,"æłŁ":120334,"æ¡ī":120335,"æł©":120336,"éĢij":120337,"éĢĭ":120338,"å½§":120339,"鬲":120340,"è±ĩ":120341,"éħIJ":120342,"é̦":120343,"åİĿ":120344,"åѬ":120345,"çłĿ":120346,"çł¹":120347,"çł§":120348,"çł·":120349,"糣":120350,"çł¼":120351,"çł¥":120352,"çł£":120353,"åīŀ":120354,"çł»":120355,"è½¼":120356,"è½¾":120357,"è¾Ĥ":120358,"鸫":120359,"趸":120360,"é¾Ģ":120361,"鸬":120362,"èĻĶ":120363,"羬":120364,"åĶĽ":120365,"çľĻ":120366,"åĵ§":120367,"åĵ½":120368,"æĻģ":120369,"鸮":120370,"è¶µ":120371,"è¶¿":120372,"çķĽ":120373,"èļ¨":120374,"èļľ":120375,"èļį":120376,"èļĭ":120377,"èļ¬":120378,"èļĿ":120379,"èļ§":120380,"åĶ¢":120381,"åľĦ":120382,"åĶ£":120383,"åĶı":120384,"çĽİ":120385,"åĶij":120386,"å´Ĥ":120387,"å´ĥ":120388,"罡":120389,"ç½Ł":120390,"è§Ĭ":120391,"èµħ":120392,"éĴ²":120393,"éĴµ":120394,"éĴ¹":120395,"éĴº":120396,"éĴ½":120397,"éĴ¼":120398,"éĴ¿":120399,"éĵĢ":120400,"éĵĦ":120401,"éĵĨ":120402,"éĵĪ":120403,"éĵī":120404,"éĵĬ":120405,"éĵĭ":120406,"éĵĮ":120407,"éĵį":120408,"ä¥":120409,"䥽":120410,"éĵİ":120411,"æ°©":120412,"æ°¤":120413,"æ°¦":120414,"毪":120415,"èĪIJ":120416,"ç§£":120417,"ç§«":120418,"çĽī":120419,"ç¬Ħ":120420,"ç¬ķ":120421,"ç¬Ĭ":120422,"ç¬ı":120423,"ç¬Ĩ":120424,"俸":120425,"俵":120426,"åģĮ":120427,"俳":120428,"ä¿¶":120429,"å̬":120430,"åĢı":120431,"æģģ":120432,"åĢŃ":120433,"俾":120434,"åĢľ":120435,"éļ¼":120436,"éļ½":120437,"åĢĮ":120438,"åĢ¥":120439,"èĩ¬":120440,"éĥ«":120441,"å̍":120442,"è¡Ħ":120443,"é¢Ģ":120444,"å¾ķ":120445,"èĪ«":120446,"衾":120447,"èĥ¯":120448,"èĥ±":120449,"èĥ´":120450,"èĥŃ":120451,"èĦį":120452,"èĥ¼":120453,"èĦĴ":120454,"鸱":120455,"鸲":120456,"çĭ·":120457,"çĮģ":120458,"çĭ³":120459,"çĮĥ":120460,"çĭº":120461,"éĢĸ":120462,"æ¡Ģ":120463,"饽":120464,"åĩĩ":120465,"æĮĽ":120466,"亳":120467,"çĸ³":120468,"çĸ´":120469,"çĸ¸":120470,"çĸ½":120471,"çĹĪ":120472,"çĸ±":120473,"çĹĤ":120474,"çĹī":120475,"è¡®":120476,"é¢ĥ":120477,"æģ£":120478,"æĹĨ":120479,"æĹĦ":120480,"æĹĥ":120481,"éĺĥ":120482,"éĺĦ":120483,"è¨ļ":120484,"éĺĨ":120485,"æģĻ":120486,"ç²ij":120487,"çĥľ":120488,"çĥ©":120489,"çĥĬ":120490,"åī¡":120491,"éĥ¯":120492,"çĥ¬":120493,"æ¶ij":120494,"浯":120495,"æ¶ŀ":120496,"æ¶Ł":120497,"å¨ij":120498,"æ¶ł":120499,"æµŀ":120500,"æ¶ĵ":120501,"æµ¥":120502,"æ¶Ķ":120503,"æµľ":120504,"æµł":120505,"æµ£":120506,"æĤļ":120507,"æĤŃ":120508,"æĤĿ":120509,"æĤĴ":120510,"æĤĮ":120511,"æĤĽ":120512,"çªĪ":120513,"åīľ":120514,"诹":120515,"诼":120516,"è¢Ĵ":120517,"袢":120518,"诿":120519,"è°Ģ":120520,"è°Ĥ":120521,"è°Ħ":120522,"è°ĩ":120523,"å±IJ":120524,"å±Ļ":120525,"éϬ":120526,"åĭIJ":120527,"å¥ĺ":120528,"çīĤ":120529,"èļ©":120530,"éϲ":120531,"å¨Į":120532,"å¨ī":120533,"娲":120534,"娴":120535,"娣":120536,"å¨ĵ":120537,"å©Ģ":120538,"çķļ":120539,"éĢ¡":120540,"绳":120541,"éªĬ":120542,"绡":120543,"éªĭ":120544,"绦":120545,"绨":120546,"éªİ":120547,"éĤķ":120548,"鸶":120549,"å½Ĺ":120550,"èĢľ":120551,"çĦĺ":120552,"èĪĤ":120553,"çIJı":120554,"çIJĩ":120555,"麸":120556,"æı¶":120557,"åŁ´":120558,"åŁ¯":120559,"æį¯":120560,"æİ³":120561,"æİ´":120562,"åŁ¸":120563,"åŁµ":120564,"èµ§":120565,"åŁ¤":120566,"æįŃ":120567,"é̵":120568,"åŁĿ":120569,"åłĭ":120570,"åłį":120571,"æİ¬":120572,"鸷":120573,"æį½":120574,"æİĬ":120575,"åłī":120576,"æİ¸":120577,"æį©":120578,"æİ®":120579,"æĤ«":120580,"åŁŃ":120581,"åŁ½":120582,"æİĩ":120583,"æİ¼":120584,"èģĥ":120585,"èIJģ":120586,"èıĺ":120587,"åłĩ":120588,"èIJĺ":120589,"èIJĭ":120590,"èı½":120591,"èıĸ":120592,"èIJľ":120593,"èIJ¸":120594,"èIJij":120595,"棻":120596,"èıĶ":120597,"èıŁ":120598,"èIJı":120599,"èı¹":120600,"èıª":120601,"èıħ":120602,"èıĢ":120603,"èı°":120604,"èı¡":120605,"梿":120606,"æ¢ı":120607,"è§ĭ":120608,"æ¡´":120609,"æ¡·":120610,"æ£ģ":120611,"æ¡«":120612,"æ£Ĥ":120613,"åķ¬":120614,"éĥ¾":120615,"æķķ":120616,"è±ī":120617,"éĦĦ":120618,"éħŀ":120619,"ç¡İ":120620,"ç¡Ń":120621,"ç¡ĸ":120622,"ç¡Ĺ":120623,"ç¡IJ":120624,"ç¡ĩ":120625,"ç¡Į":120626,"鸸":120627,"çĵł":120628,"åĮı":120629,"åİ©":120630,"æ®Ĵ":120631,"æ®ĵ":120632,"æ®į":120633,"èµī":120634,"鼩":120635,"è¾Ħ":120636,"åłij":120637,"çľŃ":120638,"羦":120639,"åķ§":120640,"æĻ¡":120641,"æĻ¤":120642,"çľµ":120643,"åľĬ":120644,"åĸı":120645,"åķī":120646,"åĭĸ":120647,"æĻŀ":120648,"å͵":120649,"æĻĹ":120650,"åķŃ":120651,"çķ¦":120652,"趺":120653,"åķ®":120654,"è·Ħ":120655,"èļ¶":120656,"èĽĦ":120657,"èĽİ":120658,"èĽĨ":120659,"èļ°":120660,"åľī":120661,"èļ±":120662,"èĽī":120663,"èĽı":120664,"èļ´":120665,"åķģ":120666,"åķķ":120667,"åĶ¿":120668,"åķIJ":120669,"åͼ":120670,"åĶ·":120671,"åķĸ":120672,"åķµ":120673,"åķ¶":120674,"åķ·":120675,"åͳ":120676,"åͰ":120677,"åķľ":120678,"帻":120679,"å´ļ":120680,"å´¦":120681,"帼":120682,"å´®":120683,"å´¤":120684,"å´Ĩ":120685,"èµĩ":120686,"èµĪ":120687,"èµĬ":120688,"éĵij":120689,"éĵĴ":120690,"éĵĹ":120691,"éĵĻ":120692,"éĵŁ":120693,"éĵ¡":120694,"éĵ¢":120695,"éĵ£":120696,"éĵ¤":120697,"éĵ§":120698,"éĵ¨":120699,"éĵ©":120700,"éĵª":120701,"éĵ«":120702,"éĵ¯":120703,"éĵ°":120704,"éĵ±":120705,"éĵ³":120706,"éĵµ":120707,"éĵ·":120708,"çī¾":120709,"鸹":120710,"ç§¾":120711,"é̶":120712,"笺":120713,"çŃĩ":120714,"笸":120715,"笪":120716,"笮":120717,"笳":120718,"笥":120719,"笤":120720,"笳":120721,"笾":120722,"ç¬ŀ":120723,"åģ¾":120724,"åģĥ":120725,"åģķ":120726,"åģĪ":120727,"åĤĢ":120728,"åģ¬":120729,"åģ»":120730,"çļij":120731,"çļİ":120732,"鸻":120733,"å¾ľ":120734,"èΏ":120735,"èĪ»":120736,"èĪ´":120737,"èĪ·":120738,"é¾Ľ":120739,"ç¿İ":120740,"èĦ¬":120741,"èĦĺ":120742,"èĦ²":120743,"åĮIJ":120744,"çĮĹ":120745,"çĮ¡":120746,"çĮŀ":120747,"æĸĽ":120748,"çĮķ":120749,"é¦Ĺ":120750,"é¦ĥ":120751,"é¦Ħ":120752,"鸾":120753,"庹":120754,"庾":120755,"çĹĶ":120756,"çĹį":120757,"ç¿Ĭ":120758,"æĹĮ":120759,"æĹİ":120760,"袤":120761,"éĺĩ":120762,"éĺĪ":120763,"éĺī":120764,"éĺĬ":120765,"éĺĭ":120766,"éĺį":120767,"éĺı":120768,"ç¾Ł":120769,"ç²Ŀ":120770,"çĦIJ":120771,"çĦĵ":120772,"çĦĹ":120773,"æ·ħ":120774,"æ·ŀ":120775,"æ¸İ":120776,"æ¶¿":120777,"æ·ĸ":120778,"æĮ²":120779,"æ·ł":120780,"涸":120781,"æ¸ij":120782,"æ·¦":120783,"æ·Ŀ":120784,"涪":120785,"æ·Ļ":120786,"æ¶«":120787,"æ¸Į":120788,"æĤ»":120789,"æĤ±":120790,"æĥĿ":120791,"æĥĺ":120792,"æĥĨ":120793,"æĥļ":120794,"æĥĩ":120795,"æĥ®":120796,"çªķ":120797,"è°Į":120798,"æīĪ":120799,"çļ²":120800,"è°ij":120801,"è£Ĩ":120802,"袷":120803,"è£ī":120804,"è°Ĵ":120805,"è°Ķ":120806,"è°ķ":120807,"è°ĸ":120808,"è°Ĺ":120809,"è°Ļ":120810,"è°Ŀ":120811,"é̝":120812,"éĥ¿":120813,"éļĪ":120814,"ç²ľ":120815,"éļį":120816,"éļĹ":120817,"å©Ĭ":120818,"娼":120819,"å©¢":120820,"婵":120821,"èĥ¬":120822,"è¢Ī":120823,"ç¿Į":120824,"æģ¿":120825,"欸":120826,"绫":120827,"éªIJ":120828,"绯":120829,"ç»±":120830,"éªĴ":120831,"绲":120832,"éªĵ":120833,"ç»¶":120834,"绺":120835,"ç»»":120836,"绾":120837,"éªĸ":120838,"ç¼ģ":120839,"èĢł":120840,"çIJ«":120841,"çIJµ":120842,"çIJ¶":120843,"çIJ¥":120844,"çIJ¨":120845,"çIJ°":120846,"çIJ®":120847,"çIJ¯":120848,"çIJ¬":120849,"çIJļ":120850,"è¾ĩ":120851,"é¼ĭ":120852,"æı³":120853,"åłŀ":120854,"æIJ½":120855,"æı¸":120856,"æıł":120857,"åłĻ":120858,"è¶Ħ":120859,"æıĸ":120860,"é¢ī":120861,"å¡Ħ":120862,"æı¿":120863,"èĢĭ":120864,"æıĦ":120865,"èĽ©":120866,"èĽ°":120867,"å¡Ĩ":120868,"æijĴ":120869,"æıĨ":120870,"æİ¾":120871,"èģĴ":120872,"èijij":120873,"èijļ":120874,"éĿ°":120875,"éĿ¸":120876,"èij³":120877,"èijº":120878,"èij¸":120879,"èIJ¼":120880,"èij¶":120881,"èĴĮ":120882,"èijŃ":120883,"楮":120884,"棼":120885,"æ¤Ł":120886,"棹":120887,"椤":120888,"棰":120889,"èµį":120890,"æ¤ĭ":120891,"æ¤ģ":120892,"椪":120893,"æ¤IJ":120894,"é¹ģ":120895,"éħ¤":120896,"éħ¢":120897,"éħ¡":120898,"é¹Ĥ":120899,"æ®ļ":120900,"æ®Ľ":120901,"鼱":120902,"è¾ĭ":120903,"æ¤ł":120904,"è¾İ":120905,"çĿĦ":120906,"çĿĩ":120907,"çĿĥ":120908,"æĪ¢":120909,"åĸĭ":120910,"åĹĴ":120911,"åĸĥ":120912,"åĸ±":120913,"åĸ¹":120914,"æĻ·":120915,"åĸĪ":120916,"è·ĸ":120917,"è·Ĺ":120918,"è·ŀ":120919,"è·ļ":120920,"è·İ":120921,"è·ı":120922,"è·Ĩ":120923,"èĽ±":120924,"èĽ²":120925,"èĽŃ":120926,"èĽ³":120927,"èĽIJ":120928,"èĽĶ":120929,"èĽŀ":120930,"èĽ´":120931,"èĽĺ":120932,"åĸģ":120933,"åĸŁ":120934,"åķ¾":120935,"åĹĸ":120936,"åĸij":120937,"åĹŁ":120938,"åĹŀ":120939,"åĸĻ":120940,"åµĺ":120941,"åµĸ":120942,"å´´":120943,"éģĦ":120944,"è©Ī":120945,"åµİ":120946,"嵬":120947,"åµĽ":120948,"嵯":120949,"åµĿ":120950,"嵫":120951,"å¹Ħ":120952,"åµĭ":120953,"èµķ":120954,"éĵ»":120955,"éĵ¼":120956,"éĵ¿":120957,"éĶĥ":120958,"éĶĨ":120959,"éĶĩ":120960,"éĶī":120961,"éĶı":120962,"éĶij":120963,"éĶĴ":120964,"éĶĶ":120965,"éĶķ":120966,"æİ£":120967,"磬":120968,"æ°°":120969,"毳":120970,"毽":120971,"çĬĬ":120972,"çĬĦ":120973,"çĬĭ":120974,"é¹Ħ":120975,"çĬį":120976,"åµĩ":120977,"é»į":120978,"ç¨ĥ":120979,"ç¨Ĥ":120980,"çŃļ":120981,"çѵ":120982,"çŃĮ":120983,"åĤ£":120984,"åĤĪ":120985,"èĪĦ":120986,"çīį":120987,"åĤ¥":120988,"åĤ§":120989,"éģij":120990,"åĤ©":120991,"徨":120992,"åªŃ":120993,"çķ²":120994,"å¼ij":120995,"ç¿ķ":120996,"é¹Ĩ":120997,"èħĪ":120998,"èħĵ":120999,"èħĨ":121000,"èħ´":121001,"èħļ":121002,"èħ±":121003,"鱿":121004,"é²Ģ":121005,"é²Ĥ":121006,"çĮ¢":121007,"çĮ¹":121008,"çĮ¥":121009,"é£ĵ":121010,"è§ŀ":121011,"è§ļ":121012,"çĮ±":121013,"é¢İ":121014,"飧":121015,"é¦ĩ":121016,"é¦Ĭ":121017,"亵":121018,"èĦĶ":121019,"è£Ĵ":121020,"çĹ£":121021,"çŨ":121022,"çŦ":121023,"çĹŀ":121024,"çŤ":121025,"çŧ":121026,"èµĵ":121027,"竦":121028,"çĵ¿":121029,"åķ»":121030,"é¢ı":121031,"é¹ĩ":121032,"éĺij":121033,"éĺĴ":121034,"éĺķ":121035,"ç²ŀ":121036,"éģĴ":121037,"åѳ":121038,"çĦ¯":121039,"çĦľ":121040,"çĦ±":121041,"é¹Ī":121042,"渫":121043,"æ¹®":121044,"æ¹İ":121045,"æ¹ľ":121046,"æ¹į":121047,"湫":121048,"溲":121049,"æ¹Ł":121050,"æºĨ":121051,"æ¹²":121052,"æ¹Ķ":121053,"æ¹ī":121054,"渥":121055,"æ»ģ":121056,"æĦł":121057,"æĥº":121058,"æĦ¦":121059,"æĥ´":121060,"æĦĢ":121061,"æĦİ":121062,"æĦĶ":121063,"åĸ¾":121064,"å¯IJ":121065,"è°Ł":121066,"裢":121067,"è£İ":121068,"裥":121069,"祾":121070,"è°ł":121071,"è°¡":121072,"è°¥":121073,"è°§":121074,"åѱ":121075,"å¼¼":121076,"å·½":121077,"éªĺ":121078,"媪":121079,"å·¯":121080,"ç¿ļ":121081,"çļ´":121082,"éªĽ":121083,"ç¼Ĥ":121084,"ç¼ĥ":121085,"ç¼Ħ":121086,"å½ĺ":121087,"ç¼ĩ":121088,"ç¼Ī":121089,"ç¼Į":121090,"ç¼ij":121091,"ç¼Ĵ":121092,"ç¼Ĺ":121093,"飨":121094,"èĢ¢":121095,"çijģ":121096,"çijĹ":121097,"çijĦ":121098,"éģ¨":121099,"éªľ":121100,"飫":121101,"é«¡":121102,"塬":121103,"éĦ¢":121104,"è¶Ķ":121105,"è¶ij":121106,"æijħ":121107,"æijģ":121108,"èľĩ":121109,"æIJĭ":121110,"æIJª":121111,"æIJIJ":121112,"æIJĽ":121113,"æIJł":121114,"æijĪ":121115,"å½Ģ":121116,"æ¯Ĥ":121117,"æIJ¦":121118,"æIJ¡":121119,"èĵģ":121120,"æĪ¡":121121,"èĵį":121122,"éĦŀ":121123,"èĵIJ":121124,"èĵ¦":121125,"é¹ĭ":121126,"èĴ½":121127,"èĵĸ":121128,"èĵĬ":121129,"èĴ¯":121130,"èĵŁ":121131,"èĵij":121132,"èĴº":121133,"èĵł":121134,"èĴŁ":121135,"èĴ¡":121136,"èĴ¹":121137,"èĴ´":121138,"èĴĹ":121139,"èĵ¥":121140,"æ¥Ķ":121141,"æ¥Ĥ":121142,"æ¥Ŀ":121143,"楫":121144,"楸":121145,"椴":121146,"æ§Į":121147,"楯":121148,"çļĻ":121149,"æ¦Ī":121150,"æ§İ":121151,"æ¦ī":121152,"楦":121153,"楣":121154,"楹":121155,"椽":121156,"åī½":121157,"éħ©":121158,"èľĥ":121159,"ç¢Ľ":121160,"ç¢ĵ":121161,"硼":121162,"ç¢ī":121163,"ç¢ļ":121164,"ç¢ĩ":121165,"ç¢ľ":121166,"é¹Į":121167,"è¾ı":121168,"é¾ĥ":121169,"é¾ħ":121170,"訾":121171,"ç²²":121172,"çĿļ":121173,"åĹª":121174,"éŁª":121175,"åĹ·":121176,"åĹī":121177,"çĿ¨":121178,"çĿ¢":121179,"éĽİ":121180,"çĿ¥":121181,"åĹij":121182,"åĹ«":121183,"åŬ":121184,"åĹĶ":121185,"åĹĿ":121186,"æĪ¥":121187,"åĹĦ":121188,"çħ¦":121189,"æļĦ":121190,"éģ¢":121191,"æļĮ":121192,"è·¬":121193,"è·¶":121194,"è·¸":121195,"è·IJ":121196,"è·£":121197,"è·¹":121198,"èĽ¸":121199,"èľĬ":121200,"èľį":121201,"èľī":121202,"èľ£":121203,"çķ¹":121204,"èĽ¹":121205,"åĹ¥":121206,"åĹ²":121207,"åĹ³":121208,"åĹĮ":121209,"åĹį":121210,"åĹIJ":121211,"åŤ":121212,"åŵ":121213,"罨":121214,"åµĬ":121215,"åµ´":121216,"骰":121217,"éĶĹ":121218,"éĶĽ":121219,"éĶľ":121220,"éĶĿ":121221,"éĶŀ":121222,"éĶŁ":121223,"éĶ¢":121224,"é͍":121225,"éĶ©":121226,"éĶŃ":121227,"éͱ":121228,"éĽī":121229,"æ°²":121230,"çĬı":121231,"æŃĥ":121232,"ç¨ŀ":121233,"ç¨Ĺ":121234,"ç¨Ķ":121235,"çŃł":121236,"çŃ¢":121237,"çŃ®":121238,"çѲ":121239,"çīĴ":121240,"æķ«":121241,"å¾Ń":121242,"æĦĨ":121243,"èīĦ":121244,"è§İ":121245,"毹":121246,"è²Ĭ":121247,"è²ħ":121248,"è²ī":121249,"é¢Ķ":121250,"èħł":121251,"èħ©":121252,"èħ¼":121253,"èħŃ":121254,"èħ§":121255,"å¡į":121256,"媵":121257,"é²ħ":121258,"é²Ĩ":121259,"é²ĩ":121260,"é²Ī":121261,"é²ĭ":121262,"é²IJ":121263,"èĤĦ":121264,"é¹IJ":121265,"é£ķ":121266,"è§¥":121267,"éģĽ":121268,"é¦IJ":121269,"é¹ij":121270,"亶":121271,"çĺĥ":121272,"çű":121273,"çĹ¼":121274,"çĹ¿":121275,"çĺIJ":121276,"çĺģ":121277,"çĺĨ":121278,"éºĤ":121279,"æŃĨ":121280,"æĹĴ":121281,"éĺĸ":121282,"éĺĹ":121283,"ç¾§":121284,"è±¢":121285,"ç²³":121286,"çĮ·":121287,"çħ³":121288,"çħ¨":121289,"çħħ":121290,"çħĬ":121291,"çħ¸":121292,"çħº":121293,"æ»Ł":121294,"溱":121295,"æºĺ":121296,"æ¼Ń":121297,"滢":121298,"溥":121299,"溽":121300,"è£Ł":121301,"溻":121302,"溷":121303,"æ»Ĺ":121304,"滫":121305,"溴":121306,"æ»ı":121307,"æ»ĥ":121308,"滦":121309,"æºı":121310,"æ»Ĥ":121311,"æ»ĵ":121312,"æºŁ":121313,"滪":121314,"æĦ«":121315,"æħĬ":121316,"é²İ":121317,"éªŀ":121318,"çªł":121319,"窣":121320,"裱":121321,"裨":121322,"裾":121323,"裰":121324,"ç¦Ĭ":121325,"è°©":121326,"è°ª":121327,"媾":121328,"å««":121329,"媲":121330,"å«Ĵ":121331,"å«Ķ":121332,"媸":121333,"ç¼Ļ":121334,"ç¼ľ":121335,"ç¼Ľ":121336,"è¾Ķ":121337,"éªĿ":121338,"ç¼Ł":121339,"缡":121340,"ç¼¢":121341,"ç¼£":121342,"éªŁ":121343,"èĢ¥":121344,"çĴĪ":121345,"çijŃ":121346,"çįĴ":121347,"è§ı":121348,"æħĿ":121349,"å«ł":121350,"åıĨ":121351,"æij½":121352,"å¢ģ":121353,"æĴĤ":121354,"æijŀ":121355,"æĴĦ":121356,"ç¿¥":121357,"è¸ħ":121358,"æijŃ":121359,"å¢ī":121360,"å¢Ĵ":121361,"æ¦ĸ":121362,"綦":121363,"èĶ«":121364,"èĶ·":121365,"éĿº":121366,"éĿ¼":121367,"éŀħ":121368,"éĿ¿":121369,"çĶį":121370,"è͏":121371,"èĶŁ":121372,"èĶº":121373,"æĪ¬":121374,"èķĸ":121375,"èĶ»":121376,"èĵ¿":121377,"æĸ¡":121378,"é¹ķ":121379,"èĵ¼":121380,"æ¦Ľ":121381,"榧":121382,"榫":121383,"æ¦Ń":121384,"æ§Ķ":121385,"榱":121386,"æ§ģ":121387,"æ§ł":121388,"榷":121389,"åĥ°":121390,"éħ½":121391,"éħ¹":121392,"碡":121393,"碴":121394,"碣":121395,"碲":121396,"èĩ§":121397,"豨":121398,"殡":121399,"éľģ":121400,"èľļ":121401,"é¾ĩ":121402,"é¾Ī":121403,"äģ":121404,"äģĸ":121405,"çĿ½":121406,"åĺŀ":121407,"åĺĪ":121408,"åĺĮ":121409,"åĺģ":121410,"æļĿ":121411,"è¸Į":121412,"è¸ī":121413,"èľŀ":121414,"èľ¥":121415,"èľ®":121416,"èĿĪ":121417,"èľ´":121418,"èľ±":121419,"èľ©":121420,"èľ·":121421,"èľ¿":121422,"èŀĤ":121423,"èľ¢":121424,"åĺ¡":121425,"é¹Ĺ":121426,"åĺ£":121427,"åĺ¤":121428,"åĺļ":121429,"åĹ¾":121430,"åĺ§":121431,"ç½´":121432,"ç½±":121433,"å¹Ķ":121434,"å¶Ĥ":121435,"å¹Ľ":121436,"èµĻ":121437,"ç½Ĥ":121438,"骷":121439,"骶":121440,"é¹ĺ":121441,"éͲ":121442,"éĶ´":121443,"éͶ":121444,"éĶ·":121445,"é͏":121446,"é͵":121447,"éķĤ":121448,"çĬĴ":121449,"ç®IJ":121450,"箦":121451,"ç®§":121452,"箸":121453,"箬":121454,"ç®ħ":121455,"箪":121456,"箾":121457,"箢":121458,"ç®ĵ":121459,"åĥĸ":121460,"åĦĨ":121461,"åĥ³":121462,"åĥŃ":121463,"åĬģ":121464,"åĥ®":121465,"éŃĥ":121466,"éŃĨ":121467,"çĿ¾":121468,"èīĭ":121469,"éĦ±":121470,"èĨĪ":121471,"èĨij":121472,"é²ij":121473,"é²Ķ":121474,"é²ļ":121475,"é²Ľ":121476,"é²Ł":121477,"çįIJ":121478,"è§«":121479,"éĽĴ":121480,"夤":121481,"é¦ij":121482,"éĬ®":121483,"塾":121484,"çĺĮ":121485,"çĺĬ":121486,"çĺĺ":121487,"çĺĻ":121488,"æĹĸ":121489,"èĨĤ":121490,"éĺļ":121491,"éĦ¯":121492,"é²ŀ":121493,"粿":121494,"ç²¼":121495,"ç³ģ":121496,"æ§Ĭ":121497,"é¹ļ":121498,"çĨĺ":121499,"çĨ¥":121500,"æ½¢":121501,"æ¼ķ":121502,"滹":121503,"漯":121504,"æ¼¶":121505,"æ½ĭ":121506,"æ½´":121507,"漪":121508,"æ¼ī":121509,"漩":121510,"æ¾ī":121511,"æħµ":121512,"æIJ´":121513,"窨":121514,"寤":121515,"ç¶®":121516,"è°®":121517,"褡":121518,"è¤Ļ":121519,"è¤ĵ":121520,"è¤Ľ":121521,"è¤Ĭ":121522,"è°¯":121523,"è°°":121524,"è°²":121525,"å±£":121526,"é¹Ľ":121527,"嫱":121528,"å«ĸ":121529,"嫦":121530,"å«ļ":121531,"å«ĺ":121532,"é¼IJ":121533,"çŀĢ":121534,"é¹ľ":121535,"éªł":121536,"ç¼¥":121537,"缦":121538,"ç¼§":121539,"缨":121540,"骢":121541,"缫":121542,"è̦":121543,"ȩ̀":121544,"çĴľ":121545,"çĴİ":121546,"çĴģ":121547,"å¥Ń":121548,"髯":121549,"é««":121550,"æĴ·":121551,"æĴħ":121552,"èµŃ":121553,"æĴ¸":121554,"éĭĨ":121555,"æĴĻ":121556,"æĴº":121557,"å¢Ģ":121558,"èģ©":121559,"è§IJ":121560,"éŀij":121561,"èķĻ":121562,"éŀĴ":121563,"èķĪ":121564,"èķ¨":121565,"èķ¤":121566,"èķŀ":121567,"èķº":121568,"çŀ¢":121569,"èķĥ":121570,"èķ²":121571,"èµľ":121572,"æ§¿":121573,"樯":121574,"æ§Ń":121575,"æ¨Ĺ":121576,"æ¨ĺ":121577,"æ§²":121578,"éĨĮ":121579,"éĨħ":121580,"éĿ¥":121581,"éŃĩ":121582,"é¤į":121583,"ç£Ķ":121584,"ç£Ļ":121585,"éľĪ":121586,"è¾ĺ":121587,"é¾ī":121588,"é¾Ĭ":121589,"è§ij":121590,"çŀĮ":121591,"çŀĭ":121592,"çŀij":121593,"åĺŃ":121594,"åĻİ":121595,"å϶":121596,"é¢Ļ":121597,"æļ¹":121598,"åĻĺ":121599,"è¸Ķ":121600,"è¸Ŀ":121601,"è¸Ł":121602,"è¸Ĵ":121603,"踬":121604,"踮":121605,"踯":121606,"踺":121607,"è¸ŀ":121608,"èĿ½":121609,"èĿ¾":121610,"èĿ»":121611,"èĿ°":121612,"èĿ®":121613,"èŀĭ":121614,"èĿĵ":121615,"èĿ£":121616,"èĿ¼":121617,"åĺ¬":121618,"é¢ļ":121619,"åĻį":121620,"åĻĻ":121621,"åĻĮ":121622,"åĻĶ":121623,"é¢Ľ":121624,"å¹ŀ":121625,"幡":121626,"å¶Ļ":121627,"å¶Ŀ":121628,"骺":121629,"éķĬ":121630,"éķī":121631,"éķĮ":121632,"éķı":121633,"éķĴ":121634,"éķĵ":121635,"éķĶ":121636,"稷":121637,"ç®´":121638,"ç¯ij":121639,"ç¯ģ":121640,"ç¯Į":121641,"çīĸ":121642,"åĦĭ":121643,"èĻ¢":121644,"é¹ŀ":121645,"èĨĺ":121646,"é²ł":121647,"鲡":121648,"é²¢":121649,"é²£":121650,"é²¥":121651,"é²§":121652,"鲩":121653,"çįĹ":121654,"çįł":121655,"觯":121656,"é¦ĵ":121657,"é¦Ķ":121658,"麾":121659,"å»Ľ":121660,"çĺĽ":121661,"çĺ¼":121662,"çĺ¢":121663,"çĺł":121664,"é½ij":121665,"ç¾°":121666,"ð¥»":121667,"ð¥»Ĺ":121668,"ç³Į":121669,"ç³į":121670,"ç³ħ":121671,"çĨľ":121672,"çĨµ":121673,"æ¾į":121674,"æ¾Į":121675,"潸":121676,"潦":121677,"æ½²":121678,"éĭĪ":121679,"æ½Ł":121680,"潺":121681,"寮":121682,"窳":121683,"è°³":121684,"褴":121685,"è¤Ł":121686,"褫":121687,"è°µ":121688,"çĨ¨":121689,"屦":121690,"åĭ°":121691,"æĪ®":121692,"èĿ¥":121693,"缬":121694,"ç¼®":121695,"缯":121696,"骣":121697,"çķ¿":121698,"èĢ©":121699,"è̍":121700,"èĢª":121701,"çĴŁ":121702,"éĿĽ":121703,"çĴł":121704,"çĴĺ":121705,"èģ±":121706,"èŀ¯":121707,"é«»":121708,"é«Ń":121709,"髹":121710,"æĵĢ":121711,"çĶı":121712,"æĵŀ":121713,"縳":121714,"磬":121715,"é¢ŀ":121716,"èķ»":121717,"é¢Ł":121718,"èĸ¤":121719,"èĸ¨":121720,"æªł":121721,"èĸı":121722,"èĸ®":121723,"èĸľ":121724,"èĸħ":121725,"樾":121726,"æ©Ľ":121727,"æ©ĩ":121728,"樵":121729,"æªİ":121730,"橹":121731,"樽":121732,"樨":121733,"橼":121734,"墼":121735,"æ©IJ":121736,"ç¿®":121737,"éĨIJ":121738,"éĨį":121739,"éĨļ":121740,"磲":121741,"èµĿ":121742,"殪":121743,"éľı":121744,"éĮ¾":121745,"è¾ļ":121746,"éģ½":121747,"æ°ħ":121748,"çŀŁ":121749,"çŀł":121750,"çŀ°":121751,"åļĦ":121752,"åļĨ":121753,"åϤ":121754,"æļ¾":121755,"è¹Ģ":121756,"踵":121757,"踽":121758,"è¹ī":121759,"è¹ģ":121760,"èŀ¨":121761,"èŀĪ":121762,"èŀħ":121763,"èŀŃ":121764,"èŀł":121765,"èŀŁ":121766,"åϱ":121767,"åĻ«":121768,"åĻ»":121769,"åϼ":121770,"ç½¹":121771,"åľľ":121772,"ä¦":121773,"ä¦ĥ":121774,"éķĹ":121775,"éķĺ":121776,"éķļ":121777,"éķĽ":121778,"éķĿ":121779,"éķŀ":121780,"éķł":121781,"æ°ĩ":121782,"æ°Ĩ":121783,"ç©ij":121784,"ç¯Ŀ":121785,"篥":121786,"篦":121787,"篪":121788,"ç¯Ļ":121789,"çĽ¥":121790,"åĬĵ":121791,"翱":121792,"éŃī":121793,"éŃĪ":121794,"å¾¼":121795,"æŃĻ":121796,"èĨ¦":121797,"èĨĻ":121798,"é²®":121799,"é²±":121800,"é²³":121801,"é²´":121802,"é²µ":121803,"é²·":121804,"é²»":121805,"çį´":121806,"çįŃ":121807,"çį¬":121808,"éĤĤ":121809,"é¹§":121810,"廨":121811,"èµŁ":121812,"çĺ°":121813,"廪":121814,"çĺ¿":121815,"çĺµ":121816,"çĺ´":121817,"çĻĥ":121818,"çĺ³":121819,"éºĩ":121820,"éºĪ":121821,"嬴":121822,"å£ħ":121823,"ç³Ĺ":121824,"çĶij":121825,"çĩİ":121826,"çĩł":121827,"çĩĶ":121828,"çĩ§":121829,"æ¿ij":121830,"æ¿ī":121831,"æ½ŀ":121832,"æ¾§":121833,"æ¾¹":121834,"æ¾¥":121835,"æ¾¶":121836,"æ¿Ĥ":121837,"褰":121838,"窸":121839,"å¬ĸ":121840,"çĬŁ":121841,"éļ°":121842,"å¬Ĺ":121843,"颡":121844,"ç¼±":121845,"ç¼²":121846,"ç¼³":121847,"çĴ©":121848,"çĴª":121849,"èŀ«":121850,"æĵ¤":121851,"å£ķ":121852,"è§³":121853,"ç½Ħ":121854,"æĵ¢":121855,"èĸ¹":121856,"éŀ¡":121857,"éŀ¬":121858,"èĸ·":121859,"èĹĵ":121860,"èĹģ":121861,"æªĦ":121862,"檩":121863,"æĩĭ":121864,"éĨ¢":121865,"翳":121866,"ç¤ħ":121867,"磴":121868,"鹩":121869,"é¾ĭ":121870,"é¾Į":121871,"è±³":121872,"å£ij":121873,"é»»":121874,"åļı":121875,"åļħ":121876,"è¹ij":121877,"è¹Ĵ":121878,"è¹Ĭ":121879,"èŁ¥":121880,"èŀ¬":121881,"èŀµ":121882,"çĸĥ":121883,"èŀ³":121884,"èŁij":121885,"åļĵ":121886,"ç½½":121887,"ç½¾":121888,"å¶·":121889,"黾":121890,"é»Ŀ":121891,"é«ģ":121892,"é«Ģ":121893,"éķ¡":121894,"éķ¢":121895,"éķ£":121896,"éķ¦":121897,"éķ§":121898,"éķ©":121899,"éķª":121900,"éķ«":121901,"ç½ħ":121902,"ç°Į":121903,"篾":121904,"篼":121905,"ç°ĸ":121906,"ç°ĭ":121907,"é¼¢":121908,"åĦ¡":121909,"鹪":121910,"é¼¾":121911,"çļ¤":121912,"éŃį":121913,"é¾ł":121914,"ç¹ĩ":121915,"è²ĺ":121916,"éĤĪ":121917,"è²Ķ":121918,"èĩĮ":121919,"èĨ»":121920,"èĩĨ":121921,"èĩĥ":121922,"é²¼":121923,"é²½":121924,"é³Ģ":121925,"é³ĥ":121926,"é³ħ":121927,"é³ĩ":121928,"é³Ĭ":121929,"èŀ½":121930,"çĩ®":121931,"鹫":121932,"ç³ľ":121933,"縻":121934,"çĻį":121935,"éºĭ":121936,"æĩij":121937,"æ¿¡":121938,"æ¿®":121939,"æ¿ŀ":121940,"æ¿ł":121941,"濯":121942,"è¹ĩ":121943,"è¬ĩ":121944,"éĤĥ":121945,"è¥ģ":121946,"æªĹ":121947,"æĵĺ":121948,"åŃº":121949,"éļ³":121950,"嬷":121951,"èŁĬ":121952,"鹬":121953,"éįª":121954,"éıĬ":121955,"é¬Ī":121956,"é¬ĥ":121957,"çŀ½":121958,"éŀ¯":121959,"éŀ¨":121960,"éŀ«":121961,"éŀ§":121962,"éŀ£":121963,"èĹľ":121964,"èĹł":121965,"éĨª":121966,"è¹Ļ":121967,"ç¤ĵ":121968,"çĩ¹":121969,"餮":121970,"çŀ¿":121971,"æĽĽ":121972,"颢":121973,"èºĩ":121974,"è¹ļ":121975,"èŁĽ":121976,"èŁª":121977,"èŁł":121978,"èŁ®":121979,"é¹®":121980,"黳":121981,"黣":121982,"é«ħ":121983,"é«Ĥ":121984,"éķ¬":121985,"éķŃ":121986,"éķ¯":121987,"馥":121988,"ç°Ł":121989,"ç°ª":121990,"鼬":121991,"鼳":121992,"èīŁ":121993,"é³İ":121994,"é³ı":121995,"é³IJ":121996,"çĻŀ":121997,"çĻĶ":121998,"糨":121999,"蹩":122000,"éİı":122001,"éĤĭ":122002,"é¬ı":122003,"æĶī":122004,"éŀ²":122005,"éŀ´":122006,"èĹ¿":122007,"èĺ§":122008,"èĺħ":122009,"éĨ®":122010,"éĨ¯":122011,"éħĥ":122012,"éľª":122013,"éľŃ":122014,"龨":122015,"黼":122016,"åļ¯":122017,"è¹°":122018,"è¹¶":122019,"è¹½":122020,"è¹¼":122021,"è¹´":122022,"è¹¾":122023,"蹿":122024,"èłĸ":122025,"èłĵ":122026,"èŁ¾":122027,"èłĬ":122028,"黢":122029,"é«ĭ":122030,"é«Į":122031,"éķ²":122032,"ç±Ģ":122033,"é½ģ":122034,"éŃij":122035,"èī¨":122036,"é³ĵ":122037,"é³Ķ":122038,"é³ķ":122039,"é³Ĺ":122040,"é³Ļ":122041,"éıĸ":122042,"羸":122043,"ã¸Ĩ":122044,"çĢ£":122045,"çĢĽ":122046,"襦":122047,"è°¶":122048,"è¥ŀ":122049,"骥":122050,"ç¼µ":122051,"çĵĴ":122052,"æĶĺ":122053,"èĺ©":122054,"èĺĸ":122055,"éĨ´":122056,"éľ°":122057,"éħĨ":122058,"çŁį":122059,"èºħ":122060,"é¼į":122061,"å·ī":122062,"黩":122063,"黥":122064,"黪":122065,"éķ³":122066,"éķ´":122067,"é»§":122068,"çºĤ":122069,"çĴº":122070,"鼯":122071,"èĩľ":122072,"é³ľ":122073,"é³Ŀ":122074,"é³Ł":122075,"çį¾":122076,"åŃĢ":122077,"骧":122078,"çĵĺ":122079,"é¼Ļ":122080,"éĨº":122081,"礴":122082,"颦":122083,"æĽ©":122084,"é³¢":122085,"éºĿ":122086,"å¤Ķ":122087,"çĪĿ":122088,"çģı":122089,"禳":122090,"éIJ¾":122091,"ç¾¼":122092,"èł¡":122093,"è̱":122094,"é¹³":122095,"æ°į":122096,"é¥ķ":122097,"èºIJ":122098,"é«ij":122099,"éķµ":122100,"ç©°":122101,"é¥Ķ":122102,"鬻":122103,"鬣":122104,"è¶±":122105,"æĶ«":122106,"æĶ¥":122107,"颧":122108,"èºľ":122109,"é¼¹":122110,"çϝ":122111,"èł²":122112,"èł¹":122113,"èºŀ":122114,"è¡¢":122115,"çģŀ":122116,"襻":122117,"çºĽ":122118,"鬣":122119,"æĶ®":122120,"åĽĶ":122121,"é¦ķ":122122,"æĪĨ":122123,"ç΍":122124,"é½ī":122125,"äºį":122126,"å°¢":122127,"å½³":122128,"åį¬":122129,"殳":122130,"ðłĻ¶":122131,"æ¯Į":122132,"éĤĺ":122133,"æĪĭ":122134,"åľ¢":122135,"æ°ķ":122136,"ä¼ĭ":122137,"ä»Ŀ":122138,"åĨ®":122139,"æ°¿":122140,"æ±Ī":122141,"æ°¾":122142,"å¿ī":122143,"å®Ħ":122144,"ð¬£Ļ":122145,"è®±":122146,"æīŀ":122147,"åľ²":122148,"åľ«":122149,"èĬı":122150,"èĬĥ":122151,"æľ³":122152,"æľ¸":122153,"ð¨Ļ":122154,"ð¨Ļ¸":122155,"éĤ¨":122156,"åIJĴ":122157,"åIJĸ":122158,"å±¼":122159,"å±¾":122160,"辿":122161,"éĴĨ":122162,"仳":122163,"ä¼£":122164,"ä¼Ī":122165,"çĻ¿":122166,"çĶª":122167,"éĤł":122168,"çĬ´":122169,"åĨ±":122170,"éĤ¡":122171,"ð¬ĩķ":122172,"æ±ĭ":122173,"äľ":122174,"äľ£":122175,"è®»":122176,"ð¬£ŀ":122177,"åŃĸ":122178,"ð¬ĺĵ":122179,"纩":122180,"çİĴ":122181,"çİĵ":122182,"çİĺ":122183,"çİļ":122184,"åά":122185,"ð«ŃŁ":122186,"åĿľ":122187,"åĿī":122188,"æī½":122189,"ð«Ń¢":122190,"åĿĭ":122191,"æīº":122192,"ã§ij":122193,"æ¯IJ":122194,"èĬ°":122195,"èĬ£":122196,"èĭĬ":122197,"èĭī":122198,"èĬĺ":122199,"èĬ´":122200,"èĬł":122201,"ð«ĩ":122202,"ð«ĩŃ":122203,"èĬ¤":122204,"æĿķ":122205,"æĿĻ":122206,"æĿĦ":122207,"æĿ§":122208,"æĿ©":122209,"å°ª":122210,"å°¨":122211,"轪":122212,"ð«IJĦ":122213,"åĿĴ":122214,"èĬĪ":122215,"æĹ´":122216,"æĹµ":122217,"åijĻ":122218,"ãķ":122219,"ãķ®":122220,"å²į":122221,"ð«µ":122222,"𫵷":122223,"å²ł":122224,"å²ľ":122225,"åijĩ":122226,"åĨı":122227,"è§ĥ":122228,"å²Ļ":122229,"ä¼¾":122230,"ãijĩ":122231,"ä¼Ń":122232,"ä½ĸ":122233,"ä¼²":122234,"ä½ģ":122235,"é£ı":122236,"çĭĥ":122237,"éŶ":122238,"æ±§":122239,"汫":122240,"ð£²ĺ":122241,"ð£²Ĺ":122242,"æ²Ħ":122243,"æ²ĺ":122244,"ð¬ĩĻ":122245,"æ±Ń":122246,"ã³ĩ":122247,"æ²ĩ":122248,"å¿®":122249,"忳":122250,"忺":122251,"𬣡":122252,"ç¥ĥ":122253,"è¯ĩ":122254,"éĤ²":122255,"è¯İ":122256,"è¯IJ":122257,"å±ĥ":122258,"ð«¸":122259,"𫸩":122260,"å²Ĭ":122261,"éĺ½":122262,"䢺":122263,"éĺ¼":122264,"妧":122265,"å¦ĺ":122266,"ð¨ļ":122267,"ð¨ļķ":122268,"纮":122269,"驲":122270,"ð«ĺľ":122271,"纻":122272,"ð¬ĺĺ":122273,"ð«ĺĿ":122274,"纼":122275,"çݤ":122276,"çİŀ":122277,"çݱ":122278,"çİŁ":122279,"éĤ½":122280,"éĤ¿":122281,"åĿ¥":122282,"åĿ°":122283,"åĿ¬":122284,"åĿ½":122285,"å¼Ĩ":122286,"è̵":122287,"䢼":122288,"ð¦Ń":122289,"ð¦Ńľ":122290,"èĮĭ":122291,"èĭ§":122292,"èĭ¾":122293,"èĭł":122294,"æŀħ":122295,"ãŃİ":122296,"æŀĺ":122297,"æŀį":122298,"çŁ¼":122299,"磻":122300,"åĮ¼":122301,"ð¬¨Ĥ":122302,"ð¬Ģ©":122303,"ð¬Ģª":122304,"æĹ¿":122305,"æĺĦ":122306,"æĺĴ":122307,"æĺĪ":122308,"åĴī":122309,"åĴĩ":122310,"åĴį":122311,"å²µ":122312,"å²½":122313,"岨":122314,"å²ŀ":122315,"å³Ĥ":122316,"ãŁ":122317,"ãŁĥ":122318,"åĽ·":122319,"𬬩":122320,"éĴIJ":122321,"éĴĶ":122322,"éĴĸ":122323,"çī¥":122324,"ä½´":122325,"åŀĪ":122326,"ä¾ģ":122327,"ä¾¹":122328,"佸":122329,"佺":122330,"éļ¹":122331,"ãijĬ":122332,"ä¾Ĥ":122333,"ä½½":122334,"ä¾ĺ":122335,"éĥĪ":122336,"èĪł":122337,"éĥIJ":122338,"éĥĥ":122339,"æĶ½":122340,"èĤŃ":122341,"èĤ¸":122342,"èĤ·":122343,"çĭī":122344,"çĭĿ":122345,"饳":122346,"å¿ŀ":122347,"çĤĮ":122348,"çĤĨ":122349,"æ³Ļ":122350,"沺":122351,"æ³Ĥ":122352,"æ³ľ":122353,"æ³ĥ":122354,"æ³ĩ":122355,"æĢĬ":122356,"å³ĥ":122357,"穸":122358,"ç¥ĭ":122359,"ç¥Ĭ":122360,"ð«į£":122361,"𬣳":122362,"𬩽":122363,"鸤":122364,"å¼¢":122365,"弨":122366,"éĻij":122367,"𬮿":122368,"éĻİ":122369,"ð¬¯Ģ":122370,"åįº":122371,"乸":122372,"å¦Ń":122373,"å§Ī":122374,"ð«°":122375,"ð«°Ľ":122376,"迳":122377,"åıķ":122378,"𬳵":122379,"驵":122380,"𬳶":122381,"äĮ":122382,"äĮ¹":122383,"驺":122384,"ð«łĬ":122385,"ç»ĭ":122386,"ç»IJ":122387,"çłī":122388,"èĢĶ":122389,"ãĽĥ":122390,"çݶ":122391,"çıĩ":122392,"çıħ":122393,"ð¬įĽ":122394,"çıĭ":122395,"çݹ":122396,"çıĮ":122397,"çİ¿":122398,"飨":122399,"åŀļ":122400,"åŀ¯":122401,"åŀĻ":122402,"åŀ²":122403,"åŁı":122404,"åŀį":122405,"èĢĩ":122406,"é¿į":122407,"åŀİ":122408,"åŀ´":122409,"åŀŁ":122410,"åŀŀ":122411,"æĮĵ":122412,"åŀµ":122413,"åŀı":122414,"æĭ¶":122415,"èįĸ":122416,"èįģ":122417,"èįĻ":122418,"èįĽ":122419,"èĮĪ":122420,"èĮ½":122421,"èįĦ":122422,"èĮº":122423,"ð¬ľ¬":122424,"èįĵ":122425,"èĮ³":122426,"ð¦°":122427,"𦰡":122428,"èĮĽ":122429,"èįŃ":122430,"ãŃķ":122431,"æŁ·":122432,"æŁĥ":122433,"æŁĬ":122434,"æŀ¹":122435,"æłIJ":122436,"æŁĸ":122437,"éĥļ":122438,"åīħ":122439,"ä´ĵ":122440,"迺":122441,"åİĸ":122442,"çłĨ":122443,"çłij":122444,"çłĦ":122445,"èĢı":122446,"å¥ĵ":122447,"ä¶":122448,"ä¶®":122449,"è½µ":122450,"è½·":122451,"è½¹":122452,"轺":122453,"æĺº":122454,"ðª¾":122455,"𪾢":122456,"æĺ½":122457,"缷":122458,"åĴ¡":122459,"åĴº":122460,"æĺ³":122461,"æĺ£":122462,"æĺ¤":122463,"æĺ«":122464,"æĺ¡":122465,"åĴ¥":122466,"æĺª":122467,"èĻ·":122468,"èϏ":122469,"åĵĥ":122470,"å³ĺ":122471,"èĢij":122472,"å³Ľ":122473,"𪨰":122474,"å³Ĺ":122475,"å³§":122476,"帡":122477,"éĴĺ":122478,"ð«ĵ§":122479,"éĴľ":122480,"𬬮":122481,"𬬱":122482,"ð¬¬Ń":122483,"éĴª":122484,"éĴ¬":122485,"éĴŃ":122486,"磧":122487,"秬":122488,"ä¿«":122489,"èĪģ":122490,"ä¿ľ":122491,"ä¿Ļ":122492,"ä¿į":122493,"åŀķ":122494,"è¡İ":122495,"èĪ£":122496,"å¼ĩ":122497,"ä¾´":122498,"鸧":122499,"äı¡":122500,"èĥł":122501,"ð¦Ļ¶":122502,"èĥĪ":122503,"èĥ©":122504,"èĥ£":122505,"æľı":122506,"é£IJ":122507,"è¨Ħ":122508,"饻":122509,"庤":122510,"çĸ¢":122511,"çĤ£":122512,"çĤŁ":122513,"ã¶":122514,"ã¶²":122515,"æ´Ń":122516,"æ´ĺ":122517,"æ´ĵ":122518,"æ´¿":122519,"ã³ļ":122520,"æ³ļ":122521,"æµĪ":122522,"æµī":122523,"æ´¸":122524,"æ´ij":122525,"æ´¢":122526,"æ´Ī":122527,"æ´ļ":122528,"æ´º":122529,"æ´¨":122530,"æµIJ":122531,"ã³ĺ":122532,"æ´´":122533,"æ´£":122534,"æģĶ":122535,"宬":122536,"çªĢ":122537,"æīĤ":122538,"è¢Ĩ":122539,"ç¥ı":122540,"ç¥IJ":122541,"ç¥ķ":122542,"åıļ":122543,"éϧ":122544,"éĻŀ":122545,"å¨Ģ":122546,"å§ŀ":122547,"å§±":122548,"姤":122549,"å§¶":122550,"å§½":122551,"æŀ²":122552,"ç»ĸ":122553,"éªĥ":122554,"ð¬ĺ¡":122555,"𬳽":122556,"ð¬ĺ©":122557,"ð«Ħ§":122558,"å½ĸ":122559,"éªī":122560,"æģĿ":122561,"çıª":122562,"çıĽ":122563,"çı¹":122564,"çIJĬ":122565,"çݼ":122566,"çıĸ":122567,"ðªŁ":122568,"ðªŁĿ":122569,"çı½":122570,"çı¦":122571,"çı«":122572,"çıĴ":122573,"ð¬į¤":122574,"çı¢":122575,"çıķ":122576,"çıĿ":122577,"ð«Ń¼":122578,"åŁĹ":122579,"åŀ¾":122580,"åŀº":122581,"åŁĨ":122582,"åŀ¿":122583,"åŁĮ":122584,"åŁĩ":122585,"èݰ":122586,"èĮĿ":122587,"ð¬ľ¯":122588,"éĦĢ":122589,"èݶ":122590,"èİĿ":122591,"äĵĸ":122592,"èİĻ":122593,"æł»":122594,"æ¡ł":122595,"ð¬Ĥ":122596,"ð¬Ĥ©":122597,"æ¡Ħ":122598,"æ¢ł":122599,"æł´":122600,"梴":122601,"æłĴ":122602,"éħİ":122603,"éħı":122604,"ð«łĨ":122605,"çłµ":122606,"çłł":122607,"çł«":122608,"糬":122609,"ç¡ģ":122610,"æģ§":122611,"ç¿ĥ":122612,"éĥª":122613,"ð¨IJ":122614,"ð¨IJĪ":122615,"è¾Ģ":122616,"è¾ģ":122617,"ð¬Į":122618,"ð¬ĮĹ":122619,"åīķ":122620,"èµĢ":122621,"åĵ¢":122622,"æĻħ":122623,"æĻĬ":122624,"åĶĿ":122625,"åĵ³":122626,"åĵ±":122627,"åĨĶ":122628,"æĻĶ":122629,"æĻIJ":122630,"çķĸ":122631,"èļĦ":122632,"èļĨ":122633,"ð«ij":122634,"ð«ij¡":122635,"帱":122636,"å´ģ":122637,"峿":122638,"𪨶":122639,"å´Ħ":122640,"帨":122641,"å´Ģ":122642,"èµĨ":122643,"𬬸":122644,"éĴ·":122645,"𬬻":122646,"𬬹":122647,"𬬿":122648,"ð¬Ńģ":122649,"çľļ":122650,"çĶ¡":122651,"笫":122652,"åĢ»":122653,"åĢ´":122654,"èĦ©":122655,"åĢ®":122656,"åĢķ":122657,"åĢŀ":122658,"ð«¢":122659,"𫢸":122660,"åĢĵ":122661,"å̧":122662,"è¡ĥ":122663,"èĻĴ":122664,"èĪŃ":122665,"èΝ":122666,"èĪ¥":122667,"çĵŀ":122668,"鬯":122669,"鸰":122670,"èĦİ":122671,"æľĵ":122672,"èĥ²":122673,"èĻĵ":122674,"é±½":122675,"çĭ´":122676,"å³±":122677,"çĭ»":122678,"çľ¢":122679,"ð«Ĺ§":122680,"åĭį":122681,"çĹĦ":122682,"çĸ°":122683,"çĹĥ":122684,"ç«ĺ":122685,"ç¾ĸ":122686,"ç¾ĵ":122687,"æ¡Ĭ":122688,"æķī":122689,"çĥł":122690,"çĥĶ":122691,"çĥ¶":122692,"çĥ»":122693,"ð¬ĬĪ":122694,"æ¶į":122695,"浡":122696,"æµŃ":122697,"浬":122698,"æ¶Ħ":122699,"æ¶¢":122700,"æ¶IJ":122701,"æµ°":122702,"æµŁ":122703,"æµĽ":122704,"æµ¼":122705,"æµ²":122706,"æ¶ĺ":122707,"æĤĪ":122708,"æĤĥ":122709,"æĤ¢":122710,"ð¬ĴĪ":122711,"å®§":122712,"çªħ":122713,"çªĬ":122714,"çªİ":122715,"æīħ":122716,"æīĨ":122717,"袪":122718,"è¢Ĺ":122719,"袯":122720,"祧":122721,"éļº":122722,"åł²":122723,"çĸį":122724,"ð¨º":122725,"ð¨ºĻ":122726,"éĻ´":122727,"çĥĿ":122728,"çł®":122729,"ãĽļ":122730,"åĵ¿":122731,"ç¿Ģ":122732,"ç¿Ĥ":122733,"åīŁ":122734,"𬳿":122735,"ð«Ħ¨":122736,"绤":122737,"éªį":122738,"ð¬ĺ«":122739,"äĤ":122740,"äĤ®":122741,"çIJİ":122742,"çı¸":122743,"çıµ":122744,"çIJĦ":122745,"çIJĪ":122746,"çIJĢ":122747,"çıº":122748,"æİŃ":122749,"åłİ":122750,"åłIJ":122751,"åŁ¼":122752,"æİİ":122753,"åŁ«":122754,"åłĮ":122755,"æĻ¢":122756,"ð«®":122757,"ð«®ĥ":122758,"æİŀ":122759,"åŁª":122760,"壸":122761,"ãĻį":122762,"èģį":122763,"èıĿ":122764,"èIJļ":122765,"èı¥":122766,"èİ¿":122767,"äĵ«":122768,"åĭļ":122769,"äĵ¬":122770,"èIJĨ":122771,"èıĤ":122772,"èıį":122773,"èı¼":122774,"èIJ£":122775,"äĵ¨":122776,"èıī":122777,"äĵĽ":122778,"梼":122779,"梽":122780,"桲":122781,"梾":122782,"桯":122783,"梣":122784,"æ¢Į":122785,"桹":122786,"æķĶ":122787,"åİ£":122788,"ç¡Ķ":122789,"é¿İ":122790,"ç¡Ļ":122791,"ç¡ļ":122792,"ç¡Ĭ":122793,"ç¡į":122794,"åĭĶ":122795,"ä´ķ":122796,"é¾ģ":122797,"éĢ´":122798,"åĶª":122799,"åķ«":122800,"ç¿Ī":122801,"ã«":122802,"ã«°":122803,"æĻĻ":122804,"çķ¤":122805,"ð¬±ĸ":122806,"è¶¼":122807,"è·Ĥ":122808,"èĽĥ":122809,"èļ²":122810,"ð¬Ł½":122811,"èļº":122812,"åķ´":122813,"äİĥ":122814,"å´§":122815,"å´Ł":122816,"å´ŀ":122817,"å´Ĵ":122818,"å´Į":122819,"å´¡":122820,"éĵı":122821,"ð«ĵ¯":122822,"ð«Ł¹":122823,"éĵķ":122824,"ð«Ł¼":122825,"éĵĸ":122826,"éĵĺ":122827,"éĵļ":122828,"éĵŀ":122829,"éĵ¥":122830,"éĵ´":122831,"çī»":122832,"çī¿":122833,"ç¨Ĩ":122834,"笱":122835,"笯":122836,"åģ°":122837,"åģ¡":122838,"鸺":122839,"åģŃ":122840,"åģ²":122841,"åģģ":122842,"ã¿":122843,"ã¿ł":122844,"éĦħ":122845,"åģĵ":122846,"å¾Ľ":122847,"è¡Ĵ":122848,"èγ":122849,"èβ":122850,"鸼":122851,"æĤĨ":122852,"éĦĥ":122853,"çĵ»":122854,"äĿ":122855,"äĿĻ":122856,"èĦ¶":122857,"èĦŀ":122858,"èĦŁ":122859,"äı²":122860,"é±¾":122861,"çĮĩ":122862,"çĮĬ":122863,"çĮĦ":122864,"è§ĸ":122865,"ðłħ":122866,"ðłħ¤":122867,"庱":122868,"庼":122869,"庳":122870,"çĹĵ":122871,"ä´Ķ":122872,"ç««":122873,"åłĥ":122874,"éĺĮ":122875,"ç¾Ŀ":122876,"ç¾ķ":122877,"çĦĨ":122878,"çĥº":122879,"çĦĮ":122880,"æ·ı":122881,"ð¬ĩ¹":122882,"æ·Ł":122883,"æ·ľ":122884,"æ·´":122885,"æ·¯":122886,"æ¹´":122887,"æ¶´":122888,"ð¬į¡":122889,"ã¥":122890,"ã¥Ħ":122891,"æĥĽ":122892,"æĥĶ":122893,"æĤ°":122894,"æĥĻ":122895,"å¯ģ":122896,"éĢŃ":122897,"ð¬¤ĩ":122898,"ð«į¯":122899,"袼":122900,"è£Ī":122901,"祲":122902,"ð¬¤Ĭ":122903,"ð«į²":122904,"è°ŀ":122905,"èī´":122906,"弸":122907,"å¼¶":122908,"ð¬¯İ":122909,"éļĥ":122910,"å©ŀ":122911,"娵":122912,"婼":122913,"åªĸ":122914,"婳":122915,"å©į":122916,"å©Į":122917,"å©«":122918,"婤":122919,"å©ĺ":122920,"å©ł":122921,"ð¬ĺ¬":122922,"ð¬ĺŃ":122923,"ð¬´Ĥ":122924,"ð«ĺ¦":122925,"绹":122926,"ð«Łħ":122927,"ð¬ĺ¯":122928,"éªķ":122929,"ð«ĺ§":122930,"絾":122931,"çı·":122932,"çIJ²":122933,"çIJ¡":122934,"çIJŁ":122935,"çIJĶ":122936,"çIJŃ":122937,"åł¾":122938,"åł¼":122939,"æıķ":122940,"ãĻĺ":122941,"åł§":122942,"åĸĨ":122943,"åł¨":122944,"å¡ħ":122945,"åłł":122946,"çµ·":122947,"ðª£":122948,"𪣻":122949,"ð¡İ":122950,"ð¡İļ":122951,"èijľ":122952,"æĥİ":122953,"èIJ³":122954,"èijĻ":122955,"éĿ¬":122956,"èij´":122957,"èĴĩ":122958,"èĴĪ":122959,"éĦļ":122960,"èĴī":122961,"èĵĩ":122962,"èIJ©":122963,"èij°":122964,"èijİ":122965,"éĦij":122966,"èĴİ":122967,"èijĸ":122968,"èĴĦ":122969,"èIJ¹":122970,"棤":122971,"棽":122972,"棫":122973,"æ¤ĵ":122974,"æ¤ij":122975,"ð¬ĥ":122976,"ð¬ĥĬ":122977,"é¹Ģ":122978,"æ¤Ĩ":122979,"æ£ĵ":122980,"棬":122981,"棪":122982,"æ¤Ģ":122983,"æ¥Ĺ":122984,"ð¬·":122985,"ð¬·ķ":122986,"çͦ":122987,"éħ¦":122988,"è§Į":122989,"奡":122990,"çļķ":122991,"硪":122992,"欹":122993,"è©Ł":122994,"ð«IJIJ":122995,"è¾Į":122996,"æ£IJ":122997,"é¾Ĥ":122998,"ð¬¹":122999,"𬹼":123000,"黹":123001,"çīļ":123002,"çĿİ":123003,"æĻ«":123004,"æĻª":123005,"æĻ±":123006,"ð§":123007,"ð§¿":123008,"ð§¿¹":123009,"èĽij":123010,"çķ¯":123011,"æĸĿ":123012,"åĸ¤":123013,"å´¶":123014,"åµģ":123015,"ð«¶":123016,"ð«¶ĩ":123017,"å´¾":123018,"åµħ":123019,"å´¿":123020,"åµļ":123021,"ç¿Ļ":123022,"ð«ĸ®":123023,"åľĮ":123024,"åľIJ":123025,"èµij":123026,"èµĴ":123027,"é¿ı":123028,"éĵ¹":123029,"ð¬ŃĬ":123030,"éĵ½":123031,"ð¨±ĩ":123032,"ð«ĵ¶":123033,"éĶĬ":123034,"éĶį":123035,"éĶİ":123036,"ð¬Ńİ":123037,"éĶĵ":123038,"çĬĩ":123039,"é¢ĭ":123040,"ç¨Į":123041,"çŃĢ":123042,"çŃĺ":123043,"çŃľ":123044,"çŃ¥":123045,"çŃħ":123046,"åĤĥ":123047,"åĤī":123048,"ç¿Ľ":123049,"åĤĴ":123050,"åĤķ":123051,"èξ":123052,"çķ¬":123053,"ð«ĸ¯":123054,"èĦ¿":123055,"èħĺ":123056,"äIJ":123057,"äIJĥ":123058,"èħĻ":123059,"èħĴ":123060,"ð¬±Ł":123061,"é²ĥ":123062,"çĮ°":123063,"ð«Ľ":123064,"ð«ĽŃ":123065,"çĮ¯":123066,"ãº":123067,"ãºĦ":123068,"é¦ī":123069,"åĩĵ":123070,"éĦĹ":123071,"ð«·":123072,"ð«··":123073,"å»ĭ":123074,"å»Ĩ":123075,"éĦĮ":123076,"ç²¢":123077,"éģĨ":123078,"æĹIJ":123079,"𬮱":123080,"çĦŀ":123081,"ð¬Ĭ¤":123082,"欻":123083,"ð£¸":123084,"𣸣":123085,"æºļ":123086,"æºģ":123087,"æ¹Ŀ":123088,"渰":123089,"æ¹ĵ":123090,"ã´":123091,"ã´Ķ":123092,"æ¸Ł":123093,"æºł":123094,"渼":123095,"æºĩ":123096,"æ¹£":123097,"æ¹ij":123098,"æºŀ":123099,"æĦIJ":123100,"æĦĥ":123101,"æķ©":123102,"ç͝":123103,"棨":123104,"æīĬ":123105,"裣":123106,"祼":123107,"å©»":123108,"åªĨ":123109,"åªŀ":123110,"ãĽ¹":123111,"åªĵ":123112,"åªĤ":123113,"åªĦ":123114,"毵":123115,"çŁŀ":123116,"ð¬´ĥ":123117,"ð«ĺ¨":123118,"ç¼Ĭ":123119,"ç¼IJ":123120,"éªĻ":123121,"çijĥ":123122,"çijĵ":123123,"çijħ":123124,"çijĨ":123125,"ä´ĸ":123126,"çijĸ":123127,"çijĿ":123128,"çijĶ":123129,"çijĢ":123130,"ð¤§":123131,"ð¤§Ľ":123132,"çij³":123133,"çijĤ":123134,"å¶ħ":123135,"çijij":123136,"éģĺ":123137,"é«¢":123138,"å¡¥":123139,"åł½":123140,"赪":123141,"æijĽ":123142,"å¡Ŀ":123143,"æIJĴ":123144,"æIJĮ":123145,"èĴ±":123146,"èĴ¨":123147,"èĵı":123148,"èĶĢ":123149,"èĵ¢":123150,"èĵĤ":123151,"èĴ»":123152,"èĵ£":123153,"椹":123154,"楪":123155,"æ¦ĥ":123156,"æ¦ħ":123157,"æ¥Ĵ":123158,"楩":123159,"æ¦ĩ":123160,"椸":123161,"æ¥Ļ":123162,"æŃħ":123163,"ð¬ª":123164,"𬪩":123165,"ç¢ĥ":123166,"ç¢ı":123167,"ð¬ĴĶ":123168,"ç¢Ī":123169,"äĥħ":123170,"ç¡¿":123171,"éĦł":123172,"è¾Ĵ":123173,"ð¬¨İ":123174,"ð«IJĵ":123175,"é¾Ĩ":123176,"è§ľ":123177,"ä£":123178,"ä£ĺ":123179,"æļķ":123180,"é¹į":123181,"ð««":123182,"ð««ĩ":123183,"ã¬Ĭ":123184,"æļħ":123185,"è·±":123186,"èľIJ":123187,"èľİ":123188,"åµ²":123189,"èµĹ":123190,"骱":123191,"éĶĸ":123192,"ð«ĵ¹":123193,"éĶĺ":123194,"éͳ":123195,"éͧ":123196,"éĶª":123197,"ð¬Ńļ":123198,"éĶ«":123199,"éͬ":123200,"ð¬ŃĽ":123201,"ç¨ij":123202,"ç¨Ļ":123203,"äħ":123204,"äħŁ":123205,"ð¬ķ":123206,"ð¬ķĤ":123207,"çŃ»":123208,"çѼ":123209,"çѶ":123210,"çѦ":123211,"çѤ":123212,"åĤº":123213,"é¹İ":123214,"åĥĩ":123215,"èīħ":123216,"èīī":123217,"è°¼":123218,"è²Ĩ":123219,"èħ½":123220,"èħ¨":123221,"èħ¯":123222,"é²ī":123223,"é²Ĭ":123224,"é²Į":123225,"ä²Ł":123226,"ð¬¶ĭ":123227,"ð¬¶į":123228,"é²ı":123229,"éĽĬ":123230,"çĮº":123231,"é£Ķ":123232,"è§Ł":123233,"ð¦Ŀ¼":123234,"é¦Į":123235,"è£Ľ":123236,"å»Ĵ":123237,"çĺħ":123238,"éĦĺ":123239,"é¹Ĵ":123240,"éĦľ":123241,"éºĢ":123242,"éĦ£":123243,"éĺĺ":123244,"ð«Ķ¶":123245,"çħģ":123246,"çħĥ":123247,"çħ´":123248,"çħĭ":123249,"çħŁ":123250,"çħĵ":123251,"æ»ł":123252,"æºį":123253,"溹":123254,"æ»Ĩ":123255,"æ»ī":123256,"溦":123257,"溵":123258,"æ¼·":123259,"æ»§":123260,"æ»ĺ":123261,"æ»į":123262,"æĦŃ":123263,"æħ¥":123264,"æħĨ":123265,"塱":123266,"ð«ĮĢ":123267,"裼":123268,"ç¦ĭ":123269,"ç¦Ķ":123270,"ç¦ĺ":123271,"ç¦Ĵ":123272,"è°«":123273,"é¹Ķ":123274,"ð«ĸ³":123275,"æĦį":123276,"å«Ħ":123277,"媱":123278,"æĪ¤":123279,"åĭł":123280,"æĪ£":123281,"ð«ĺª":123282,"ð«ĺ¬":123283,"ç¼ŀ":123284,"è̤":123285,"çij§":123286,"ð«ŀ":123287,"ð«ŀ©":123288,"çij¨":123289,"çij±":123290,"çij·":123291,"çij¢":123292,"æĸł":123293,"æijı":123294,"å¢ķ":123295,"å¢Ī":123296,"å¢IJ":123297,"å¢ĺ":123298,"æij´":123299,"éĬİ":123300,"ð¡IJ":123301,"ð¡IJĵ":123302,"å¢ļ":123303,"æĴĸ":123304,"ðª¤":123305,"ðª¤Ĺ":123306,"éĿ½":123307,"éŀģ":123308,"èĶĮ":123309,"èĶĪ":123310,"èĵ°":123311,"è͹":123312,"èĶĬ":123313,"åĺı":123314,"榰":123315,"æ¦ij":123316,"æ§ļ":123317,"ð£Ĺ":123318,"ð£Ĺĭ":123319,"æ§ľ":123320,"æ¦į":123321,"çĸIJ":123322,"ð¬¸ĺ":123323,"éħº":123324,"éħ¾":123325,"éħ²":123326,"éħ´":123327,"碶":123328,"äĥİ":123329,"ð¬ĴĹ":123330,"碨":123331,"ð¥Ķ":123332,"ð¥Ķ²":123333,"碹":123334,"碥":123335,"åĬĤ":123336,"ð«ļĸ":123337,"ä´Ĺ":123338,"夥":123339,"çŀį":123340,"é¹ĸ":123341,"ã¬İ":123342,"è·½":123343,"èľ¾":123344,"å¹ĸ":123345,"å¶į":123346,"åľĻ":123347,"ð¨±ı":123348,"éĶº":123349,"éͼ":123350,"éͽ":123351,"ð¬Ń¤":123352,"é;":123353,"éĶ¿":123354,"éķĥ":123355,"éķĦ":123356,"éķħ":123357,"é¦Ŀ":123358,"é¹Ļ":123359,"箨":123360,"ç®ĸ":123361,"åĬĦ":123362,"åĥ¬":123363,"åĥ¦":123364,"åĥĶ":123365,"åĥİ":123366,"æ§ĥ":123367,"ãϦ":123368,"é²Ĵ":123369,"é²ķ":123370,"ð«ļķ":123371,"é²ĸ":123372,"é²Ĺ":123373,"é²ĺ":123374,"é²Ļ":123375,"ð¬¶IJ":123376,"ð¬¶ı":123377,"ð©½":123378,"𩽾":123379,"å¤IJ":123380,"çįį":123381,"é£Ĺ":123382,"ð¬¸ļ":123383,"åĩĺ":123384,"å»ij":123385,"å»Ļ":123386,"çĺĹ":123387,"çĺ¥":123388,"çĺķ":123389,"é²Ŀ":123390,"éĦ«":123391,"çĨĩ":123392,"æ¼¹":123393,"æ¼ĸ":123394,"æ½Ĩ":123395,"漤":123396,"潩":123397,"æ¼¼":123398,"æ¼´":123399,"ã½":123400,"ã½ı":123401,"æ¼Ī":123402,"æ¼ĭ":123403,"æ¼»":123404,"æħ¬":123405,"窬":123406,"çªŃ":123407,"ã®":123408,"㮾":123409,"ð¬¤Ŀ":123410,"è¤ķ":123411,"禼":123412,"ç¦ļ":123413,"éļ©":123414,"å«ķ":123415,"å«Ń":123416,"å«ľ":123417,"嫪":123418,"ð¬ĻĤ":123419,"ã»":123420,"㻬":123421,"麹":123422,"çĴĨ":123423,"漦":123424,"åıĩ":123425,"墣":123426,"墦":123427,"墡":123428,"åĬIJ":123429,"èĸģ":123430,"èķ°":123431,"èĶĥ":123432,"é¼Ĵ":123433,"æ§±":123434,"é¹Ŀ":123435,"ç£ı":123436,"ç£ī":123437,"殣":123438,"æħŃ":123439,"éľħ":123440,"æļµ":123441,"æļ²":123442,"æļ¶":123443,"踦":123444,"踣":123445,"äĹĸ":123446,"èĿĺ":123447,"èĿ²":123448,"èĿ¤":123449,"åĻĩ":123450,"åĻĤ":123451,"åĻĢ":123452,"ç½¶":123453,"å¶²":123454,"å¶ĵ":123455,"ãłĩ":123456,"å¶Ł":123457,"å¶Ĵ":123458,"éķĨ":123459,"éķĪ":123460,"éķĭ":123461,"éķİ":123462,"ð¬Ń©":123463,"éķķ":123464,"稹":123465,"åĦĩ":123466,"çļŀ":123467,"çļĽ":123468,"ä´ĺ":123469,"èīİ":123470,"èīı":123471,"é¹Ł":123472,"ð©¾ĥ":123473,"鲦":123474,"鲪":123475,"鲬":123476,"æ©¥":123477,"è§Ń":123478,"é¹ł":123479,"鹡":123480,"ç³ĩ":123481,"ç³Ī":123482,"翦":123483,"é¹¢":123484,"é¹£":123485,"çĨĽ":123486,"æ½ĸ":123487,"æ½µ":123488,"ãµ":123489,"ãµIJ":123490,"æ¾Ĥ":123491,"æ¾Ľ":123492,"çij¬":123493,"æ½½":123494,"æ½¾":123495,"æ½ı":123496,"æĨŃ":123497,"æĨķ":123498,"𬸣":123499,"æĪŃ":123500,"褯":123501,"禤":123502,"ð«į½":123503,"嫽":123504,"éģ¹":123505,"ð¬´Ĭ":123506,"çĴ¥":123507,"çĴ²":123508,"çĴĴ":123509,"æĨĻ":123510,"æĵIJ":123511,"éĦ¹":123512,"èĸ³":123513,"éŀĶ":123514,"é»ĩ":123515,"ð¬ŀ":123516,"ð¬ŀŁ":123517,"èķĹ":123518,"èĸ¢":123519,"èķ¹":123520,"æ©ŀ":123521,"æ©ij":123522,"橦":123523,"éĨij":123524,"è§±":123525,"磡":123526,"ð¥ķ":123527,"ð¥ķ¢":123528,"ç£ľ":123529,"è±®":123530,"ð«Ł¦":123531,"ð¬ºĪ":123532,"ð«łľ":123533,"é¹¾":123534,"èϤ":123535,"æļ¿":123536,"æĽĮ":123537,"æĽĪ":123538,"ã¬ļ":123539,"è¹ħ":123540,"踶":123541,"äĹĽ":123542,"èŀĹ":123543,"çĸģ":123544,"ãłĵ":123545,"幪":123546,"ðª©":123547,"ðª©ĺ":123548,"嶦":123549,"ð¬Ń¬":123550,"ð¨±ij":123551,"ð¬Ń¯":123552,"é¦ŀ":123553,"ç©Ħ":123554,"ç¯ļ":123555,"篯":123556,"ç°ī":123557,"é¼½":123558,"è¡ł":123559,"缦":123560,"èŀ£":123561,"縢":123562,"é²Ń":123563,"鲯":123564,"é²°":123565,"鲺":123566,"é²¹":123567,"ð«Ĺ´":123568,"亸":123569,"çĻĢ":123570,"çĺŃ":123571,"𬸦":123572,"ç¾±":123573,"ç³Ĵ":123574,"çĩĭ":123575,"çĨ»":123576,"çĩĬ":123577,"çĩļ":123578,"çĩı":123579,"æ¿©":123580,"æ¿ĭ":123581,"澪":123582,"æ¾½":123583,"æ¾´":123584,"æ¾Ń":123585,"æ¾¼":123586,"æĨ·":123587,"æĨº":123588,"æĩĶ":123589,"é»ī":123590,"å¬Ľ":123591,"鹨":123592,"翯":123593,"ð«Ħ·":123594,"çĴ±":123595,"𤩽":123596,"çĴ¬":123597,"çĴ®":123598,"髽":123599,"æĵ¿":123600,"èĸ¿":123601,"èĸ¸":123602,"æªij":123603,"æ«Ĩ":123604,"æªŀ":123605,"éĨ¨":123606,"ç¹Ħ":123607,"磹":123608,"磻":123609,"çŀ«":123610,"çŀµ":123611,"è¹IJ":123612,"èŁı":123613,"ãĺ":123614,"ãĺİ":123615,"ð¬Ń³":123616,"éķ¤":123617,"ð¬Ń¶":123618,"ð«Ķį":123619,"éķ¥":123620,"éķ¨":123621,"ð¬Ń¸":123622,"ð¨±Ķ":123623,"ð¬Ń¼":123624,"ð«Ķİ":123625,"磰":123626,"ç©Ļ":123627,"穾":123628,"穣":123629,"ç°ķ":123630,"ç°ĥ":123631,"ç°ı":123632,"åĦ¦":123633,"éŃĭ":123634,"æĸ¶":123635,"èīļ":123636,"𬸪":123637,"è°¿":123638,"ä²ł":123639,"ð¬¶Ł":123640,"é²¾":123641,"ð¬¶ł":123642,"鲿":123643,"é³ģ":123644,"é³Ĥ":123645,"é³Ī":123646,"é³ī":123647,"çį¯":123648,"äĹª":123649,"é¦ĺ":123650,"è¥ķ":123651,"è¥ļ":123652,"𬶨":123653,"èŀ±":123654,"çĶĵ":123655,"嬬":123656,"嬥":123657,"ð¦Ī":123658,"ð¦Ī¡":123659,"ð«Ħ¸":123660,"çĵĢ":123661,"éĩIJ":123662,"鬶":123663,"çĪĩ":123664,"éŀ³":123665,"éŀ®":123666,"ð¬Łģ":123667,"èĹŁ":123668,"èŦ":123669,"èŨ":123670,"é¹²":123671,"檫":123672,"黡":123673,"ç¤ŀ":123674,"ç¤Į":123675,"ð¥ĸ":123676,"ð¥ĸ¨":123677,"è¹¢":123678,"è¹ľ":123679,"èŁ«":123680,"äĹ´":123681,"åļļ":123682,"é«ĥ":123683,"éķ®":123684,"éķ±":123685,"éħĤ":123686,"馧":123687,"ç°ł":123688,"ç°Ŀ":123689,"ç°°":123690,"鼫":123691,"鼩":123692,"çļ¦":123693,"èĩij":123694,"ä²¢":123695,"é³ij":123696,"é³Ĵ":123697,"é¹±":123698,"鹯":123699,"çĻĹ":123700,"ð¦Ĵ":123701,"ð¦Ĵį":123702,"æĹŀ":123703,"ç¿·":123704,"åĨģ":123705,"äİĸ":123706,"çĢĶ":123707,"çĢį":123708,"çĢĮ":123709,"è¥ľ":123710,"ä´Ļ":123711,"ð¬ĻĬ":123712,"åļŃ":123713,"ã°":123714,"ã°Ģ":123715,"鬷":123716,"éĨŃ":123717,"蹯":123718,"èłĭ":123719,"翾":123720,"é³ĺ":123721,"åĦ³":123722,"åĦ´":123723,"é¼Ĺ":123724,"ð¬¶Ń":123725,"ð©¾Į":123726,"é³ļ":123727,"é³Ľ":123728,"éºij":123729,"éºĸ":123730,"èłĥ":123731,"å½Ł":123732,"嬿":123733,"é¬Ĵ":123734,"èĺĺ":123735,"æ¬Ĥ":123736,"éĨµ":123737,"颥":123738,"çĶĹ":123739,"ð¨Ł":123740,"ð¨Łł":123741,"å·ĩ":123742,"éħħ":123743,"é«İ":123744,"çĬ¨":123745,"𬶮":123746,"ð¨Ń":123747,"ð¨Ńī":123748,"ã¸Į":123749,"çĪĶ":123750,"ç̱":123751,"ç̹":123752,"ç̼":123753,"ç̵":123754,"襫":123755,"åŃħ":123756,"骦":123757,"ð¬Ļĭ":123758,"ḛ̀":123759,"ð¤«":123760,"ð¤«ī":123761,"çĵĸ":123762,"é¬ĺ":123763,"趯":123764,"ð¬ºĵ":123765,"ç½į":123766,"é¼±":123767,"é³ł":123768,"鳡":123769,"é³£":123770,"çĪŁ":123771,"çĪļ":123772,"çģĪ":123773,"éŁĤ":123774,"ç³µ":123775,"èĺ¼":123776,"礵":123777,"é¹´":123778,"èºĶ":123779,"çļŃ":123780,"é¾¢":123781,"鳤":123782,"亹":123783,"ç±¥":123784,"é¼·":123785,"ð«ļŃ":123786,"çİĥ":123787,"éĨ¾":123788,"é½ĩ":123789,"è§¿":123790,"èł¼":123791,"×§":123792,"פ":123793,"׼":123794,"×ķת":123795,"ס":123796,"×Ļ×Ŀ":123797,"צ":123798,"×Ĵ":123799,"×ĺ":123800,"×ķר":123801,"×Ŀ":123802,"×ķ׾":123803,"×ĸ":123804,"à¹Ĥ":123805,"ïº":123806,"ðŁį":123807,"ðŁIJ":123808,"×Ļר":123809,"ï»":123810,"ðŁij":123811,"ðĿIJ":123812,"ðŁı":123813,"ðŁĶ":123814,"ðŁĮ":123815,"ðŁİ":123816,"ðŁĵ":123817,"ף":123818,"ðĿij":123819,"×ķ×ĵ":123820,"ï¦":123821,"Ġ×ķ":123822,"×ķ×ij":123823,"à¸Ńà¸ĩ":123824,"ðĿĺ":123825,"×Ļת":123826,"ðĿķ":123827,"à¸Ĺีà¹Ī":123828,"ائ":123829,"ð٤":123830,"×ķף":123831,"رÙĬ":123832,"×Ļ׾":123833,"ระ":123834,"าย":123835,"ï¯":123836,"ï®":123837,"าม":123838,"âĩ":123839,"ðŁ¥":123840,"ïŃ":123841,"ðĿĻ":123842,"×ķ׳":123843,"á½":123844,"Ġ׼":123845,"ðŁļ":123846,"âļ":123847,"ï§":123848,"×ijר":123849,"×Ļ׳":123850,"á´":123851,"Ġ×Ĺ":123852,"á¼":123853,"ðĿĹ":123854,"Ġ×¢":123855,"×Ļ×Ķ":123856,"ãģ£ãģŁ":123857,"ãģĵãģ¨":123858,"á¸":123859,"ÙĬÙĨ":123860,"ãģªãģĦ":123861,"اع":123862,"ศ":123863,"à¹Īà¸ĩ":123864,"×Ļ×ĵ":123865,"×ŀש":123866,"áĪ":123867,"׳×Ļ":123868,"×Ļ×ij":123869,"ï¥":123870,"ðĿĵ":123871,"Ġ×Ļ":123872,"×ļ":123873,"ัà¸ĩ":123874,"âĵ":123875,"ï¤":123876,"ĠاÙĦØ£":123877,"าà¸ģ":123878,"à¹īà¸Ļ":123879,"à¹Ģร":123880,"×ķ×Ŀ":123881,"á¹":123882,"ึ":123883,"×Ļ×§":123884,"à¸ĭ":123885,"à¸Ħร":123886,"à¸ĺ":123887,"ัà¸ģ":123888,"ðŁķ":123889,"ÙĪÙĨ":123890,"à¸Ńย":123891,"âĬ":123892,"ðĿĴ":123893,"ĠاÙĦع":123894,"าà¸Ļ":123895,"×Ļף":123896,"ÙĦÙĬ":123897,"×Ļש":123898,"à¸Ľà¸£à¸°":123899,"à¹Ģà¸Ľ":123900,"Ġ׳":123901,"×ķס":123902,"à¸ł":123903,"ÙħÙĨ":123904,"×ķ×¢":123905,"×ķ×ŀ":123906,"âĮ":123907,"ð٧":123908,"à¹ĩà¸Ļ":123909,"à¸į":123910,"ãİ":123911,"áµ":123912,"ĠاÙĦس":123913,"×ķ×§":123914,"หล":123915,"ðŁĩ":123916,"âı":123917,"ð٦":123918,"Ġ×Ķ×ŀ":123919,"ÙĪØ§":123920,"Ġת":123921,"ר×IJ":123922,"à¸Ńà¸Ļ":123923,"ษ":123924,"à¹Īว":123925,"×ķצ":123926,"íĹ":123927,"ãĦ":123928,"ï¨":123929,"ï¹":123930,"âİ":123931,"ï²":123932,"ðĿļ":123933,"ðIJ":123934,"à¸Ħว":123935,"หà¸Ļ":123936,"Ġר":123937,"بÙĬ":123938,"รà¹Į":123939,"را":123940,"شر":123941,"×ķ×Ĺ":123942,"×ķפ":123943,"×ķש":123944,"×ķ×Ĵ":123945,"íĿ":123946,"âĽ":123947,"à¸ķิ":123948,"à¹Ģà¸ģ":123949,"ï³":123950,"ï±":123951,"à¸Ķà¹ī":123952,"ë¹":123953,"ï¬":123954,"á¿":123955,"ðŁĽ":123956,"ðĿĸ":123957,"à¹Īาà¸ĩ":123958,"ูà¹ī":123959,"Ġ×Ķ×IJ":123960,"ĠاÙĦØŃ":123961,"פר":123962,"ÙĪÙħ":123963,"à¹Ģล":123964,"íĸ":123965,"×Ļ×¢":123966,"ìĪ":123967,"íĵ":123968,"ðŁħ":123969,"áł":123970,"à¸Ħวาม":123971,"à¸Īะ":123972,"׳×Ķ":123973,"Ġ×§":123974,"à¸Ł":123975,"à¹īà¸ĩ":123976,"หม":123977,"تÙħ":123978,"׾×Ļ":123979,"ÙĬد":123980,"à¹Īà¸Ļ":123981,"×Ĺר":123982,"שר":123983,"à¹Ģà¸Ĺ":123984,"×ŀר":123985,"ëĸ":123986,"عÙĦ":123987,"×ŀ×¢":123988,"â²":123989,"׾×Ķ":123990,"Ġפ":123991,"à¸Ńà¸ģ":123992,"سÙĦ":123993,"×Ļ×ŀ":123994,"ÙĤÙĬ":123995,"íİ":123996,"تØŃ":123997,"×Ļס":123998,"×Ļ×Ĺ":123999,"íĽ":124000,"ï°":124001,"â½":124002,"áī":124003,"áĬ":124004,"á¨":124005,"Ùĩا":124006,"Ġ׾×Ķ":124007,"×ķ×IJ":124008,"Ùħا":124009,"à¹īà¸Ńà¸ĩ":124010,"رب":124011,"ĠاÙĦج":124012,"×ŀ×ĵ":124013,"ÙħÙĦ":124014,"تر":124015,"à¹Ģà¸Ķ":124016,"קר":124017,"íħ":124018,"ì¼":124019,"ê¿":124020,"ãĪ":124021,"áIJ":124022,"ðŁĹ":124023,"ê¦":124024,"áĭ":124025,"ðĿĶ":124026,"à¹Ģà¸Ľà¹ĩà¸Ļ":124027,"à¹ĥห":124028,"มา":124029,"วà¹Īา":124030,"มี":124031,"ีà¹ī":124032,"à¹Ħมà¹Ī":124033,"ÙĨÙĬ":124034,"ؤ":124035,"รา":124036,"×ķ×Ļ":124037,"ãĤĪãģĨ":124038,"ิà¸Ķ":124039,"×Ļפ":124040,"×Ĺ׾":124041,"ÙĤد":124042,"à¹Ģส":124043,"×Ļ×ĺ":124044,"à¸ģล":124045,"ר׼":124046,"×ķ׼":124047,"×Ļ׼":124048,"ëĪ":124049,"ëĥ":124050,"ðŁĸ":124051,"áħ":124052,"â¼":124053,"ãī":124054,"à¹Ħà¸Ķà¹ī":124055,"ת×Ļ":124056,"×Ļ×IJ":124057,"ĠاÙĦØ¥":124058,"à¸łà¸²":124059,"ริ":124060,"ÙĤØ©":124061,"ØŃد":124062,"ê»":124063,"ì±":124064,"ת×Ĺ":124065,"ìº":124066,"âĭ":124067,"áĦ":124068,"á¾":124069,"âµ":124070,"â¾":124071,"ĠÙĪØ§ÙĦ":124072,"׳×ķ":124073,"ÙĢ":124074,"ÙĬا":124075,"à¸ģà¹ĩ":124076,"×ŀ×Ķ":124077,"ãģĦãĤĭ":124078,"عد":124079,"ĠاÙĦÙĨ":124080,"Ġ×Ķש":124081,"ئ":124082,"ัà¹īà¸ĩ":124083,"รัà¸ļ":124084,"ÙĪÙĤ":124085,"ãģ§ãģį":124086,"à¹Ģà¸ŀ":124087,"׼׾":124088,"×ĺר":124089,"ัà¸Ķ":124090,"à¸Ńา":124091,"ì¢":124092,"à¸Ńà¸ļ":124093,"à¸ķร":124094,"à¹Ģà¸Ĭ":124095,"ìĶ":124096,"ãģĹãģ¾":124097,"ëģ":124098,"ëķ":124099,"ðŁĻ":124100,"âĴ":124101,"á¶":124102,"à¹ģล":124103,"ÙĨا":124104,"à¹ĥหà¹ī":124105,"à¹Ħà¸Ľ":124106,"×£":124107,"ัว":124108,"าà¸ĩ":124109,"×ĵר":124110,"×ij׾":124111,"פ×Ļ":124112,"Ġ×ĵ":124113,"ĠاÙĦÙģ":124114,"à¹Ģà¸Ĥ":124115,"ש×Ķ":124116,"×IJר":124117,"ë¬":124118,"ãģ«ãģª":124119,"ÑĢо":124120,"วิ":124121,"Ùħر":124122,"×IJת":124123,"Ùĥر":124124,"سب":124125,"ÙĨت":124126,"ãģĹãģĦ":124127,"اج":124128,"à¸Ńรà¹Į":124129,"ÙĥÙĦ":124130,"سÙħ":124131,"สิ":124132,"×Ļצ":124133,"ëĿ":124134,"íľ":124135,"ìī":124136,"áĨ":124137,"ÙĩÙħ":124138,"à¸Ļีà¹ī":124139,"ãģĤãĤĭ":124140,"ãģĦãģ¦":124141,"سÙĬ":124142,"׾×IJ":124143,"در":124144,"ãģļ":124145,"ÙĪØ¬":124146,"ĠاÙĦØ®":124147,"صر":124148,"íı":124149,"à¹īาà¸ĩ":124150,"ุà¸Ķ":124151,"×ķ×ĺ":124152,"×ij×¢":124153,"íĨ":124154,"à¸Ĭา":124155,"รม":124156,"ש×ŀ":124157,"×ŀס":124158,"ê´":124159,"ì´":124160,"ëľ":124161,"ì¿":124162,"ì©":124163,"ë»":124164,"â¤":124165,"ðŁĨ":124166,"áĮ":124167,"áķ":124168,"ذا":124169,"à¸Ĺำ":124170,"à¸ķà¹Ī":124171,"ĠاÙĦÙĤ":124172,"ÙĦÙĥ":124173,"ูà¹Ī":124174,"à¸Ħุ":124175,"ÙĬÙħ":124176,"׳×Ļ×Ŀ":124177,"ืà¹Īà¸Ń":124178,"ÙĪØ¹":124179,"ãĤĩ":124180,"اÙĤ":124181,"Ġ×ij×¢":124182,"à¹Ģม":124183,"جÙħ":124184,"ừ":124185,"ãģĵãģ¨ãģĮ":124186,"بد":124187,"×ķ×Ķ":124188,"ש׾":124189,"Ùĩر":124190,"à¹Ģà¸Ļ":124191,"ãģ¹":124192,"íĭ":124193,"ì»":124194,"ì½":124195,"ëŃ":124196,"ìĮ":124197,"íĢ":124198,"ëĮ":124199,"ëº":124200,"ãĬ":124201,"à¹ĥà¸Ļ":124202,"Ġ×Ĵ":124203,"à¹Ĩ":124204,"à¸Īาà¸ģ":124205,"วย":124206,"à¹ĥà¸Ĭ":124207,"à¸ĩาà¸Ļ":124208,"ĠاÙĦØ´":124209,"اØŃ":124210,"à¹īาà¸Ļ":124211,"ืà¹Īà¸Ńà¸ĩ":124212,"×IJ×Ļ":124213,"بÙĦ":124214,"ã썿ĢĿ":124215,"×ł×¡":124216,"ãģ¾ãģĽ":124217,"ÙĥÙĨ":124218,"ער":124219,"ĠاÙĦد":124220,"שת":124221,"íŀ":124222,"Ùħس":124223,"صÙĦ":124224,"×ķ׳×Ķ":124225,"ارة":124226,"ÙĦÙħ":124227,"สม":124228,"Ø£ÙĨ":124229,"תר":124230,"×IJ×ŀ":124231,"عب":124232,"خت":124233,"ãĤĥ":124234,"ì¡":124235,"ì£":124236,"ива":124237,"สั":124238,"ึà¸ģ":124239,"ì¸":124240,"ëĨ":124241,"алÑĮн":124242,"ì³":124243,"ìį":124244,"ê¼":124245,"ê½":124246,"ìı":124247,"ãĮ":124248,"ãı":124249,"ï©":124250,"êª":124251,"áİ":124252,"Ġ×ĸ":124253,"à¸ģัà¸Ļ":124254,"×Ļ×ķ":124255,"à¸Ħà¸Ļ":124256,"׳×ķת":124257,"à¸ľà¸¹à¹ī":124258,"à¹ĥà¸Ī":124259,"ãģĦãģŁ":124260,"Ù쨱":124261,"×ĺ×Ļ":124262,"צ×Ļ":124263,"ãĤĤãģ®":124264,"ĠاÙĦص":124265,"ãģ¾ãģĽãĤĵ":124266,"دة":124267,"×ij×Ļ":124268,"ĠاÙĦر":124269,"Ġ×ŀ×IJ":124270,"สำ":124271,"à¹Ģห":124272,"عر":124273,"ãģªãģı":124274,"à¸ģระ":124275,"×ij×ĵ":124276,"à¹Ģà¸Ī":124277,"×Ļ×ļ":124278,"×Ĺ×Ļ":124279,"ÙĬع":124280,"ש×ij":124281,"ÙĨØ©":124282,"ÙĪØ¶":124283,"ÙĦÙģ":124284,"ÙĢÙĢ":124285,"פע":124286,"íĪ":124287,"×ŀ×§":124288,"à¸IJ":124289,"ØŃØ©":124290,"اص":124291,"Ñĭва":124292,"à¸Ħม":124293,"วั":124294,"à¸Ľà¸¥":124295,"ìŁ":124296,"íļ":124297,"ë´":124298,"ëij":124299,"ëī":124300,"ëĩ":124301,"ì¨":124302,"ë±":124303,"ëİ":124304,"â¬":124305,"á¥":124306,"áĹ":124307,"áĽ":124308,"áį":124309,"Å©":124310,"à¸Ķี":124311,"ôi":124312,"Ġס":124313,"׾×ķ":124314,"á»Ŀi":124315,"à¸Ħุà¸ĵ":124316,"ây":124317,"à¸Ļา":124318,"×Ĺ×ĵ":124319,"×ĵ×Ļ":124320,"หา":124321,"جÙĦ":124322,"à¹Ģว":124323,"ãĤĩãģĨ":124324,"ÙħØ©":124325,"ĠاÙĦÙĥ":124326,"Ġ×Ķ×¢":124327,"جر":124328,"×ĸר":124329,"اط":124330,"×Ľ×ª":124331,"×ķ׳×Ļ×Ŀ":124332,"ØŃÙħ":124333,"ê¶":124334,"رÙĥ":124335,"Ġ×ľ×¢":124336,"×ķ×ĸ":124337,"สร":124338,"צ׾":124339,"Ø¢":124340,"است":124341,"à¹Īม":124342,"خر":124343,"צע":124344,"×Ļר×ķת":124345,"ادة":124346,"شار":124347,"×ŀ×Ĺ":124348,"íĴ":124349,"à¹Ģรีย":124350,"×Ĺ×§":124351,"اث":124352,"รà¸ĩ":124353,"à¹Ģà¸ķ":124354,"à¸Īำ":124355,"à¸Ŀ":124356,"à¹Īาย":124357,"à¸Ħล":124358,"ÙĤÙĪ":124359,"иÑĩеÑģк":124360,"à¸ĵà¹Į":124361,"ัย":124362,"Ùħع":124363,"ë¨":124364,"ë¿":124365,"ë®":124366,"ï´":124367,"ì¥":124368,"ì«":124369,"ëµ":124370,"á¡":124371,"âį":124372,"ðĵ":124373,"â°":124374,"à¸Ĥà¸Ńà¸ĩ":124375,"Ùĭ":124376,"à¸ģัà¸ļ":124377,"ãģ®ãģ§":124378,"à¹īว":124379,"à¸Ńยà¹Īาà¸ĩ":124380,"ãģŃ":124381,"á»ĩt":124382,"à¸ķà¹īà¸Ńà¸ĩ":124383,"×ŀ×Ļ":124384,"à¹ģà¸ļ":124385,"×Ĵר":124386,"ÙĪÙģ":124387,"ÙĤÙĦ":124388,"à¸łà¸²à¸ŀ":124389,"ר×Ļ":124390,"ลา":124391,"ÙĬس":124392,"Ġצ":124393,"ÙĬÙģ":124394,"Ġ×ĺ":124395,"à¸ľà¸¥":124396,"áng":124397,"รว":124398,"Ġ×ŀש":124399,"×IJ×ķת":124400,"×ĸ×Ķ":124401,"ูà¸ģ":124402,"à¸Ļัà¸ģ":124403,"اÙĨÙĬ":124404,"دا":124405,"ãģ³":124406,"׼ף":124407,"ãĤīãĤĮ":124408,"ãĤĮãģ°":124409,"תק":124410,"úc":124411,"ÙĪØ²":124412,"×Ļר×Ķ":124413,"Ġngh":124414,"ánh":124415,"Ġ×ķ×IJ":124416,"á»ħ":124417,"สุà¸Ķ":124418,"ëį°":124419,"اض":124420,"اÙĦÙĬ":124421,"بار":124422,"عÙħ":124423,"à¸ļา":124424,"تج":124425,"à¸ŀร":124426,"×ķר×Ķ":124427,"ảng":124428,"Ø®ÙĦ":124429,"à¸ī":124430,"ắc":124431,"ש×Ļ×Ŀ":124432,"íĶ":124433,"Ù쨳":124434,"×Ļ×Ĵ":124435,"пÑĢ":124436,"ĠاÙĦØ«":124437,"سط":124438,"รูà¹ī":124439,"ีà¹Īย":124440,"à¸Ńà¸Ķ":124441,"ãģªãĤĬ":124442,"×Ĵ×ĵ":124443,"ãģĦãģ¾ãģĹãģŁ":124444,"סק":124445,"خص":124446,"laÅŁ":124447,"енно":124448,"بØŃ":124449,"สà¸Ļ":124450,"ฮ":124451,"ר×IJש":124452,"ÙħÙĪ":124453,"دÙĬد":124454,"ษา":124455,"×ķ×ļ":124456,"ãĥ§ãĥ³":124457,"à¸ķุ":124458,"Ġêµ":124459,"ĠÑģво":124460,"צ×ij":124461,"à¸Ńม":124462,"à¸Ľà¸£":124463,"تع":124464,"×Ķת":124465,"اÙħÙĦ":124466,"×ŀ׳":124467,"ç¶ļ":124468,"ฤ":124469,"íį":124470,"ëĺ":124471,"ë¤":124472,"ìij":124473,"â´":124474,"ãĭ":124475,"ĠباÙĦ":124476,"á»ģu":124477,"ĠاÙĦÙĦ":124478,"à¸ķัว":124479,"ذÙĩ":124480,"ึà¸ĩ":124481,"à¹ĥà¸Ĭà¹ī":124482,"á»ĵng":124483,"à¸Ļั":124484,"มาà¸ģ":124485,"ãĥŁ":124486,"×ŀ×ķ":124487,"à¸Ĺย":124488,"á»Ļi":124489,"ằ":124490,"ảo":124491,"à¹Ĥà¸Ķ":124492,"×IJ׾":124493,"สาม":124494,"ÙĪØ¨":124495,"à¸Ĺุ":124496,"ยัà¸ĩ":124497,"עת":124498,"×ķ׳×ķת":124499,"à¸Ĥึ":124500,"à¸Ĥึà¹īà¸Ļ":124501,"à¸ģà¹Ī":124502,"ẫ":124503,"á»ijc":124504,"ãģĹãĤĩãģĨ":124505,"á»ĭch":124506,"Ġ×IJ×ķת":124507,"Ġש×IJ":124508,"׼×ķ׾":124509,"á»Ļc":124510,"عة":124511,"à¸Ĺี":124512,"à¹Ģà¸Ń":124513,"Ùĥت":124514,"ãģ»":124515,"ẻ":124516,"ìĹħ":124517,"à¸Ńà¸Ńà¸ģ":124518,"اÙĨت":124519,"à¹Ħร":124520,"Ġ×IJ×Ĺר":124521,"طر":124522,"ÙĨد":124523,"ืà¹īà¸Ń":124524,"Ø·ÙĦ":124525,"×IJ×Ķ":124526,"uyên":124527,"íĸī":124528,"×ij×Ķ":124529,"à¸Ħà¹Ī":124530,"à¸Ĭà¹Īว":124531,"ãģĤãĤĬãģ¾ãģĻ":124532,"ÙĬب":124533,"ק׾":124534,"ãĥĻ":124535,"Ä©":124536,"سر":124537,"าว":124538,"ãĤ±":124539,"à¸ļริ":124540,"ר×Ĵ":124541,"á»ĥu":124542,"ØŃت":124543,"×ķ×ŀ×Ļ":124544,"بÙĨ":124545,"êµIJ":124546,"ÄŁu":124547,"ãģªãĤĵ":124548,"×ij×§":124549,"Ġפר":124550,"ắn":124551,"ØŃÙĦ":124552,"×ij×Ĺ":124553,"ấu":124554,"×ij×ķ×ĵ":124555,"ãĥ¯":124556,"Ġ׾ק":124557,"ัà¸į":124558,"à¸ŀิ":124559,"×Ĺ×Ķ":124560,"×ĸ׼":124561,"ãĥ¼ãĥł":124562,"ÑĤелÑĮ":124563,"×ŀ×Ļ×ĵ":124564,"ÙĬØ®":124565,"ẳ":124566,"تص":124567,"à¸ĺิ":124568,"è¾¼":124569,"ìĵ":124570,"ÙĥØ©":124571,"ÙĤب":124572,"à¸Ħà¹Į":124573,"à¹īาย":124574,"à¸ĵะ":124575,"าะ":124576,"ëĴ":124577,"ê¾":124578,"ë·":124579,"ìĩ":124580,"êº":124581,"ìģ":124582,"ëĢ":124583,"ì¾":124584,"ë½":124585,"ëļ":124586,"ìŃ":124587,"ìİ":124588,"áij":124589,"ëĹ":124590,"êĴ":124591,"à¡":124592,"à¬":124593,"ðIJĮ":124594,"ãĩ":124595,"ðĿĦ":124596,"Ġ׾×IJ":124597,"ãģ¨ãģĦãģĨ":124598,"Ġnhi":124599,"×Ļ×ķת":124600,"Ġש×Ķ":124601,"à¹ģลà¹īว":124602,"Æ°á»Ľc":124603,"à¸Ķà¹īวย":124604,"à¸Ĺาà¸ĩ":124605,"×ł×ª":124606,"פת":124607,"à¹ģà¸ķà¹Ī":124608,"ưng":124609,"à¸Ńยูà¹Ī":124610,"à¹īำ":124611,"Ġ×IJ׾":124612,"ÙĥÙħ":124613,"ấp":124614,"ลà¸ĩ":124615,"ãģŁãĤģ":124616,"×Ĵ׾":124617,"หร":124618,"ĠÑĢе":124619,"à¹Ģà¸Ĥà¹īา":124620,"ÙĤر":124621,"Ġ×Ķס":124622,"ÙĪÙĬ":124623,"สามาร":124624,"สามารà¸ĸ":124625,"Äĥn":124626,"à¸Ńี":124627,"פ×ķ":124628,"×Ļ׳×ķ":124629,"วัà¸Ļ":124630,"ặc":124631,"íķĻ":124632,"×ŀת":124633,"êu":124634,"ẹ":124635,"ÙģÙĬ":124636,"×ŀצ":124637,"à¸Ħา":124638,"ãģĿãģĨ":124639,"ãĢħ":124640,"از":124641,"اÙĩ":124642,"ר×Ļ×Ŀ":124643,"ấn":124644,"หาร":124645,"ạt":124646,"ÙĨÙĩ":124647,"à¹Ģà¸Ħร":124648,"جÙĩ":124649,"׼×Ļ":124650,"ắt":124651,"à¸Ħà¹īา":124652,"رة":124653,"ãĥı":124654,"ÙĥÙĪÙĨ":124655,"ứng":124656,"Ġìļ°":124657,"ยà¹Į":124658,"à¹Īวà¸Ļ":124659,"à¸ģำ":124660,"ثر":124661,"Ñģи":124662,"ĠاÙĦØ·":124663,"Ġ×Ķצ":124664,"ĠØ·":124665,"ĠاÙĦÙĪ":124666,"ê¹Į":124667,"ØŃÙĬ":124668,"ارات":124669,"à¹Ģà¸ĭ":124670,"با":124671,"гÑĢ":124672,"รี":124673,"ืà¸Ńà¸Ļ":124674,"عت":124675,"ÙĤاÙĦ":124676,"دÙħ":124677,"Ø¡":124678,"Ġ×ŀ×§":124679,"×ĵ×Ļ×Ŀ":124680,"×¢×ľ":124681,"ãģĴ":124682,"ëĭĺ":124683,"×¢×Ķ":124684,"Ġìĸ´":124685,"ÑģÑĮ":124686,"ÙĤØ·":124687,"ãĥĽ":124688,"èĢĥãģĪ":124689,"à¹ģà¸Ļ":124690,"ÙĪØ§Øª":124691,"âu":124692,"ĠìĤ¬ëŀ":124693,"หว":124694,"ĠاÙĦØ£Ùħ":124695,"Ġ×Ķ×ŀש":124696,"بÙĪ":124697,"à¸Ĭà¸Ļ":124698,"ãĤĵãģ§ãģĻ":124699,"วà¸Ļ":124700,"à¸ģรรม":124701,"×ŀ×ķ×ĵ":124702,"ÙĥاÙĨ":124703,"×ķ×£":124704,"олог":124705,"تÙĨ":124706,"à¸ķà¹Į":124707,"ê²ĥ":124708,"ר×ĺ":124709,"ừng":124710,"×ķ×ij×Ķ":124711,"ÙħØŃ":124712,"ĠЧ":124713,"פ×Ĵ":124714,"สà¸ĸ":124715,"ãģĭãĤĬ":124716,"ınız":124717,"à¹Ģย":124718,"ãĥ¼ãĥ³":124719,"ãģĬãĤĬ":124720,"פש":124721,"ิà¸ķ":124722,"Ø·ÙĨ":124723,"×Ļת×Ļ":124724,"×IJ׳":124725,"çek":124726,"ìª":124727,"×ŀ×ij":124728,"ศา":124729,"ãĤ¹ãĤ¿":124730,"à¸ļุ":124731,"×ĵ×ijר":124732,"ãģĦãģı":124733,"สะ":124734,"à¹Ģหล":124735,"ิà¸ĩ":124736,"à¸ŀัà¸Ļ":124737,"ãģĦãģŁãģł":124738,"ãĤĤãĤī":124739,"à¹īม":124740,"ãģĵãģ¨ãģĮãģ§ãģį":124741,"ารà¹Į":124742,"ุà¸ĩ":124743,"íij":124744,"ì¯":124745,"ë¼":124746,"íĤ":124747,"ì·":124748,"ê¡":124749,"áı":124750,"áĴ":124751,"ðĿľ":124752,"á©":124753,"ðŁĦ":124754,"ðIJ¤":124755,"Ġש׾":124756,"Ġ×ŀ×Ķ":124757,"à¹ģละ":124758,"Ġ׼׾":124759,"ẽ":124760,"á»Ļng":124761,"ذÙĬ":124762,"ле":124763,"×¥":124764,"ãģªãģ©":124765,"ĠÙĪØ£":124766,"หà¸Ļà¹īา":124767,"ãģ¾ãģ§":124768,"à¸ķà¹Īà¸Ń":124769,"à¸Ĺัà¹īà¸ĩ":124770,"ãģłãģij":124771,"à¹ģà¸ļà¸ļ":124772,"à¹Ģรา":124773,"פ׾":124774,"ãģŁãģĦ":124775,"à¹Ģลย":124776,"ãģ£ãģ¦ãģĦãĤĭ":124777,"ếp":124778,"ึà¹Īà¸ĩ":124779,"ê´Ģ":124780,"ê³Ħ":124781,"׼×ķ":124782,"à¹Ģรืà¹Īà¸Ńà¸ĩ":124783,"×§×Ļ":124784,"êµŃ":124785,"פס":124786,"تÙĬ":124787,"ãĥĦ":124788,"Ġ×Ķ×Ĺ":124789,"ги":124790,"ר×IJ׾":124791,"×ŀ׾":124792,"ĠØ£ÙĬ":124793,"ĠعÙĦÙĬ":124794,"ãģĭãģ£ãģŁ":124795,"ש×Ļ":124796,"дÑĥ":124797,"×ŀף":124798,"׳×ĺ":124799,"׳×Ļת":124800,"miÅŁ":124801,"׼×Ŀ":124802,"Ġ×ijר":124803,"Ġ׾×ij":124804,"ĠÐĽ":124805,"çe":124806,"×ķ׳×Ļ":124807,"ãĤĪãģĨãģ«":124808,"פ×ķר":124809,"ãĥį":124810,"ÙĥÙĬ":124811,"×Ĺת":124812,"ÙģÙĦ":124813,"Ġ×Ķ×§":124814,"Ġ×Ķ×ij":124815,"Ġ×ŀס":124816,"à¹Īาà¸Ļ":124817,"пеÑĢ":124818,"à¹Īาว":124819,"Ġ×ij×IJ":124820,"ĠÙĪÙĩ":124821,"à¸Ļำ":124822,"Ġ×ijש":124823,"׳ק":124824,"ãģ©ãģĨ":124825,"ש×ķת":124826,"×ĵ×Ķ":124827,"à¹Ģà¸ļ":124828,"ÙĨس":124829,"Ġìļ°ë¦¬":124830,"สà¹Īวà¸Ļ":124831,"ลัà¸ĩ":124832,"جز":124833,"Ġ×Ĺ×Ļ":124834,"Ùĥثر":124835,"ละ":124836,"Ùĩد":124837,"ĠÙĪØ¨":124838,"اÙĦÙħ":124839,"à¹ģม":124840,"Æ¡i":124841,"Ġ×ij×Ĺ":124842,"ữa":124843,"à¹Ģà¸Ĺศ":124844,"à¸ķัà¹īà¸ĩ":124845,"огда":124846,"׾ק":124847,"دد":124848,"สรà¹īาà¸ĩ":124849,"à¸Ĭี":124850,"Ù쨶":124851,"à¹ģห":124852,"uyá»ĩn":124853,"รัà¸ģ":124854,"á»ĩm":124855,"สา":124856,"פק":124857,"ียà¸ĩ":124858,"à¸ķà¹Īาà¸ĩ":124859,"à¸Ħรัà¹īà¸ĩ":124860,"ØŃÙĤ":124861,"à¹Ģà¸Ńà¸ĩ":124862,"ائÙĬ":124863,"×ĺ×¢":124864,"اÙĦØ©":124865,"ิà¹Īม":124866,"ãĤ½":124867,"دÙī":124868,"Ġר×IJ":124869,"ãģ£ãģ¨":124870,"ãĥĥãĥĹ":124871,"ÙĬرة":124872,"ê±´":124873,"×ŀ×IJ":124874,"×ķ×ķ":124875,"بع":124876,"ãģ²":124877,"ราย":124878,"×ĵ×Ŀ":124879,"تÙģ":124880,"à¸ķà¸ģ":124881,"ạng":124882,"ãĤĴè¦ĭ":124883,"à¸Ĭั":124884,"Æ°á»Ł":124885,"Æ°á»Łng":124886,"جب":124887,"×ķ×ŀר":124888,"ĠìĤ¬ëŀĮ":124889,"óng":124890,"รั":124891,"Ġ×Ķ×ĸ":124892,"רצ":124893,"Ġ×Ĺ×ĵ":124894,"ذÙĦÙĥ":124895,"×ķר×Ļ":124896,"ãģ¡ãĤĥ":124897,"Ù쨹":124898,"Ġ׾צ":124899,"ái":124900,"à¹ĩà¸ļ":124901,"ãģİ":124902,"à¸ģิ":124903,"ạc":124904,"ë©°":124905,"ãģªãĤĭ":124906,"×ķ׾×Ŀ":124907,"à¹ģà¸Ĺ":124908,"×ķ×¥":124909,"меÑĤ":124910,"Ã¼ÅŁ":124911,"ÑĢÑı":124912,"à¸Ĵ":124913,"ÑģÑĤоÑı":124914,"عÙĪØ¯":124915,"Ùħار":124916,"طة":124917,"à¸ŀื":124918,"кÑĢ":124919,"à¹ģà¸ģ":124920,"à¹Ĥรà¸ĩ":124921,"×ij×Ļ×ĺ":124922,"ê²ł":124923,"×ķ׾×Ķ":124924,"ØŃر":124925,"ืà¹Īà¸Ńà¸Ļ":124926,"×ķ×ijר":124927,"×Ĺש":124928,"ãĥķãĤ¡":124929,"×ŀ×ĺ":124930,"út":124931,"Ġdön":124932,"ắng":124933,"ëłĩ":124934,"ẳng":124935,"วà¸ģ":124936,"صد":124937,"خط":124938,"à¸Ńั":124939,"ãĤıãĤĮ":124940,"سÙĦاÙħ":124941,"à¹Ģรà¹ĩ":124942,"×Ļש×Ļ":124943,"جاÙĦ":124944,"ãģijãĤĭ":124945,"à¸Ĭาà¸ķิ":124946,"ÙĪØ§ÙĤ":124947,"à¹Ĥà¸Ļ":124948,"ãģ¦ãģĹãģ¾":124949,"اعة":124950,"ãĤŃãĥ£":124951,"à¸įา":124952,"ÙĦاÙĤ":124953,"ิà¸ģ":124954,"ĠÑģов":124955,"ÑĢак":124956,"×Ļ׳×Ļ":124957,"Ã¼ÄŁ":124958,"Ã¼ÄŁÃ¼":124959,"×§×ij":124960,"à¹Īà¸Ńà¸ĩ":124961,"Ġgerçek":124962,"à¸Ĺั":124963,"ованиÑı":124964,"×ŀ׼":124965,"سة":124966,"×Ļ×£":124967,"leÅŁ":124968,"Ùħؤ":124969,"ĠìĿĺ":124970,"à¸IJาà¸Ļ":124971,"ĠÑģоб":124972,"ĠêµŃ":124973,"עצ":124974,"зв":124975,"สà¸ĩ":124976,"زÙĦ":124977,"ãģıãĤĮ":124978,"иÑĢÑĥ":124979,"تأ":124980,"полн":124981,"ìĺĢ":124982,"ÙĨØ´":124983,"׼×IJ":124984,"ÙħØ´":124985,"à¸Ķà¹Į":124986,"ÙĪÙĬÙĦ":124987,"à¹ģà¸Ĥ":124988,"ãģ£ãģ¦ãģĹãģ¾":124989,"ноÑģÑĤ":124990,"вл":124991,"ÙħÙĤ":124992,"راج":124993,"å¤ī":124994,"ëĽ":124995,"â¸":124996,"ìIJ":124997,"à»":124998,"áļ":124999,"â»":125000,"êĻ":125001,"â§":125002,"ðĴ":125003,"ðĿĩ":125004,"Ġ×IJת":125005,"ĠÙĦÙĦ":125006,"ĠØ£ÙĨ":125007,"Ġ×ķ×Ķ":125008,"ãģ«ãģ¯":125009,"Ġ×Ļש":125010,"تÙĩ":125011,"ÃŃnh":125012,"ÙĬات":125013,"Ġ×ij×ŀ":125014,"à¸Ļัà¹īà¸Ļ":125015,"à¸Ļà¹īำ":125016,"Ãło":125017,"à¸ķาม":125018,"ãģ®ãģ¯":125019,"dır":125020,"Ġnghi":125021,"ặt":125022,"×ŀ×Ļ×Ŀ":125023,"ãģ¦ãģĦãĤĭ":125024,"Ġ×ijת":125025,"หรืà¸Ń":125026,"ĠسÙĬ":125027,"ãģªãĤī":125028,"à¹Ĥà¸Ķย":125029,"ıyor":125030,"à¸Ńีà¸ģ":125031,"á»ĩnh":125032,"Ñĭм":125033,"à¸Ĺุà¸ģ":125034,"Ġ׾×Ĺ":125035,"Ġ×Ķר":125036,"Ġ×Ķ×Ļ":125037,"à¸ŀระ":125038,"à¹Ģวลา":125039,"Ġغ":125040,"ẫn":125041,"mÄ±ÅŁ":125042,"׼×Ķ":125043,"á»ijn":125044,"ãģ§ãģĹãĤĩãģĨ":125045,"ãĥ¢":125046,"à¸Ľà¸µ":125047,"ס×Ļ":125048,"ãģĵãĤį":125049,"Ġ׾פ":125050,"รà¸ĸ":125051,"ê¸Ī":125052,"à¸ģวà¹Īา":125053,"무":125054,"á»įng":125055,"ãĤĵãģ§":125056,"ãĤĪãģĨãģª":125057,"á»ĵi":125058,"ãĤ¬":125059,"สà¹Īà¸ĩ":125060,"×Ļ׳×Ķ":125061,"à¸ĸูà¸ģ":125062,"à¸Īัà¸Ķ":125063,"Ġ×Ķ×Ĵ":125064,"ãĥľ":125065,"×ŀ×ķת":125066,"ÙĪÙĥ":125067,"ëĭ¨":125068,"ĠØ«":125069,"ãģ®ãģĮ":125070,"à¹Ģหà¹ĩà¸Ļ":125071,"عا":125072,"à¸Ļิ":125073,"Åŀ":125074,"à¸Ńะ":125075,"ãģĪãĤĭ":125076,"Ø«ÙĦ":125077,"ØŃÙħد":125078,"à¹Ģà¸ģิà¸Ķ":125079,"פשר":125080,"פ×Ķ":125081,"มิ":125082,"ئÙĬس":125083,"à¸Ĺำà¹ĥหà¹ī":125084,"×¢×ĵ":125085,"ìĭ¤":125086,"à¸Ĭà¹Īวย":125087,"ĠاÙĦÙħÙĨ":125088,"زÙĬ":125089,"عÙĬ":125090,"Ġ׼×IJ":125091,"ạnh":125092,"ỹ":125093,"ãĤĵãģª":125094,"สู":125095,"צר":125096,"Æ°á»Ľng":125097,"×ķ×ķ×Ķ":125098,"à¹Ĥล":125099,"ĠاÙĦÙĩ":125100,"วา":125101,"หลาย":125102,"Ñīе":125103,"à¸Ĥà¹īà¸Ń":125104,"à¹īà¸Ńย":125105,"بط":125106,"каÑı":125107,"ĠØ¢":125108,"ĠиÑģ":125109,"ĠاÙĦغ":125110,"à¸ģา":125111,"à¸Ļà¹Īา":125112,"ÙĬÙĪ":125113,"×ij×ķר":125114,"á»ħn":125115,"วà¸ĩ":125116,"×Ļ×ĸ":125117,"ì²Ń":125118,"ним":125119,"룰":125120,"×Ĵ×ķר":125121,"صØŃ":125122,"ÙĦÙĪ":125123,"×Ĺ×ķת":125124,"สุ":125125,"رÙĬÙĤ":125126,"ס×ĺ":125127,"Ġ×ŀ×¢":125128,"ãĥĨãĤ£":125129,"à¸Ħิà¸Ķ":125130,"ãĤįãģĨ":125131,"à¹Ħล":125132,"à¸Ļà¹Į":125133,"á»ıi":125134,"ÑģÑĤÑĢо":125135,"สà¸Ķ":125136,"สาร":125137,"ÙĪÙĦØ©":125138,"ầm":125139,"รà¹Īว":125140,"รà¹Īวม":125141,"รุ":125142,"ĠاÙĦسÙĬ":125143,"ìĺģ":125144,"Ġ×ŀ×ij":125145,"פ×ĺ":125146,"à¸ķิà¸Ķ":125147,"×ĺ×Ļ×Ŀ":125148,"Ġ무":125149,"ÙĤدÙħ":125150,"ĠdÃ¼ÅŁ":125151,"ائÙĦ":125152,"мÑĭ":125153,"ØŃس":125154,"ÙĪØµ":125155,"×Ļ×§×Ķ":125156,"ãģ§ãģ¯ãģªãģĦ":125157,"à¹Ģหม":125158,"оÑĢÑĤ":125159,"íĨµ":125160,"ãģIJ":125161,"кÑĢа":125162,"ียว":125163,"عار":125164,"ئة":125165,"íĥĢ":125166,"ãģ«ãģªãĤĬ":125167,"جة":125168,"ÙĪÙĤع":125169,"ÑĮÑı":125170,"×ķצ×Ķ":125171,"ש×Ŀ":125172,"بÙĤ":125173,"Ġ×Ļ×Ķ":125174,"ÙĬØ·":125175,"ımız":125176,"деÑĢж":125177,"×Ļשר×IJ׾":125178,"غÙĬر":125179,"รà¸Ńà¸ĩ":125180,"à¹Ģรียà¸Ļ":125181,"Ġ×Ķ×ĺ":125182,"หมาย":125183,"ÙħÙĩ":125184,"اÙ쨩":125185,"ĠоÑĢг":125186,"ÙĪÙī":125187,"ãĥ©ãĤ¤":125188,"×ŀ׳×Ķ":125189,"ĠÄijo":125190,"ĠгоÑĢ":125191,"اÙħØ©":125192,"楽":125193,"Ø«ÙĬر":125194,"à¸ģิà¸Ī":125195,"á»ĵn":125196,"ÙĨب":125197,"ÑĢÑĥд":125198,"ìĹĪ":125199,"Ġ×Ĺ×ijר":125200,"ÑĢаж":125201,"ạch":125202,"تÙĪ":125203,"à¹Ĥม":125204,"×ij×Ļ×ij":125205,"ĠíĨµ":125206,"acaģı":125207,"جÙĦس":125208,"à¹Ģà¸Ľà¸¥":125209,"วà¸Ķ":125210,"à¸Ńล":125211,"ãģŁãĤĬ":125212,"à¸Ľà¸±à¸į":125213,"ĠìķĮ":125214,"عرÙģ":125215,"à¹Ħà¸Ł":125216,"أخ":125217,"å¤ļãģĦ":125218,"à¸Ķัà¸ĩ":125219,"Ø´Ùģ":125220,"ãģ£ãģ¦ãģĦãģ¾ãģĻ":125221,"×Ľ×ł×¡":125222,"ÑĨе":125223,"еÑģп":125224,"ÙħاÙħ":125225,"à¸ŀืà¹īà¸Ļ":125226,"иÑĩеÑģки":125227,"خد":125228,"ÙĥÙĪÙħ":125229,"Ġ×Ķר×IJש":125230,"تاب":125231,"é£Łãģ¹":125232,"ืà¸Ļ":125233,"оÑĢо":125234,"Ġböl":125235,"×ķ×Ĺ×ĵ":125236,"دÙĬر":125237,"ắm":125238,"دع":125239,"ãģķãģĽ":125240,"à¸ĺร":125241,"à¸ĺรรม":125242,"ãģĭãĤĤ":125243,"å¤ļãģı":125244,"rä":125245,"سع":125246,"×Ļ׾×Ķ":125247,"ضر":125248,"ĠاÙĦشر":125249,"×ĸ×ķר":125250,"×¢×ijר":125251,"ạm":125252,"алÑĮно":125253,"رÙĨ":125254,"اÙħج":125255,"׼×ļ":125256,"dıģ":125257,"ден":125258,"ضا":125259,"ÙĦÙĬÙħ":125260,"Ġê·¸ëŁ¬":125261,"تÙħاع":125262,"ارÙĬØ®":125263,"à¹Ĥà¸ķ":125264,"ĠÑģÑĢед":125265,"Ġ׳×ķס":125266,"ÙĤبÙĦ":125267,"оÑĤов":125268,"leÅŁtir":125269,"ĠмеÑģÑĤ":125270,"سÙĦÙħ":125271,"Ġעצ":125272,"ĠاÙĦسÙĦ":125273,"еÑĤÑĮ":125274,"ابة":125275,"нак":125276,"สà¸ĸาà¸Ļ":125277,"Ġ×ij׳":125278,"à¸ļัà¸Ļ":125279,"׼׳":125280,"ĠÃ¶ÄŁ":125281,"ãģ¨è¨Ģ":125282,"uyến":125283,"diÄŁ":125284,"áºŃu":125285,"ÑĢаÑģ":125286,"ãĤ·ãĥ§ãĥ³":125287,"nız":125288,"×ķ×ĵ×Ķ":125289,"تس":125290,"ÙħاÙĦ":125291,"à¹Ģหà¸ķุ":125292,"ยว":125293,"à¸ŀัà¸ģ":125294,"ãģĦãģªãģĦ":125295,"ĠкаÑĩ":125296,"ลà¹Į":125297,"×¨×Ľ×ª":125298,"ÅŁtur":125299,"×ŀ×ķס":125300,"ãģ¥":125301,"бол":125302,"عÙħاÙĦ":125303,"×ķרת":125304,"ÑĨион":125305,"ศึà¸ģ":125306,"à¸ı":125307,"ÑĢен":125308,"اسÙĬ":125309,"ائر":125310,"à¹Ĥà¸Ľà¸£":125311,"Ġseç":125312,"غÙĬ":125313,"ÑįÑĤ":125314,"енн":125315,"ãģªãģ®":125316,"×Ļש×Ķ":125317,"×Ļפ×ķר":125318,"ãģŁãĤģãģ«":125319,"زة":125320,"Ġçoc":125321,"ãĤ¯ãĥª":125322,"ÑĪен":125323,"ãĤıãģij":125324,"رÙĬد":125325,"ĠÑĢаÑģÑģ":125326,"Ùĥات":125327,"สà¸Ńà¸ļ":125328,"ceÄŁi":125329,"ãĤ¿ãĤ¤":125330,"à¸ļร":125331,"ĠاÙĦبر":125332,"׳×ķ×¢":125333,"rün":125334,"راض":125335,"ศาส":125336,"à¸ķรà¹Į":125337,"ãģįãģŁ":125338,"×ķ׾×ĵ":125339,"еÑĢи":125340,"íĹĺ":125341,"ắp":125342,"تعÙĦ":125343,"Ùĥد":125344,"иÑĤелÑĮно":125345,"Ø·Ùģ":125346,"ĠавÑĤом":125347,"Ġ×ŀצ":125348,"ÑĪиÑħ":125349,"اتÙģ":125350,"ĠÑħоÑĤ":125351,"ÙİØ§":125352,"ãģıãĤĭ":125353,"×Ķפ":125354,"à¹Ĥà¸Ĺ":125355,"à¹ģà¸ŀ":125356,"à¹Īà¸Ńย":125357,"ĠاÙĦÙħØ´":125358,"à¸ģารà¸ĵà¹Į":125359,"аниз":125360,"×Ķ׾":125361,"ظÙħ":125362,"ยุ":125363,"liÄŁ":125364,"à¹Ħà¸Ĥ":125365,"à¸ĸืà¸Ń":125366,"öz":125367,"ãģijãģ¦":125368,"à¹Ģà¸ľ":125369,"ุม":125370,"ãĥĹãĥ¬":125371,"Ġ×Ķ×IJ×Ĺר":125372,"ختÙĦÙģ":125373,"à¸İ":125374,"ÙĦاØŃ":125375,"Ġdüzen":125376,"צ×Ķ":125377,"ساء":125378,"×ķר×ļ":125379,"×ķ×ĵ×Ļ":125380,"ÑĢаÑĦ":125381,"ÅŁtır":125382,"ãģ«åħ¥":125383,"ãģĪãģ°":125384,"صÙĪÙĦ":125385,"ĠÐľÐ¾Ñģ":125386,"اÙĩر":125387,"ãģ£ãģ":125388,"ĠлÑİб":125389,"×Ļ×¢×Ķ":125390,"Ġ×Ķ×ŀ×§":125391,"สิà¸Ĺ":125392,"สิà¸Ĺà¸ĺิ":125393,"×Ļ׳×Ŀ":125394,"ÙĦاÙģ":125395,"à¸ŀัà¸Ļà¸ĺ":125396,"×ķ×IJ×Ķ":125397,"มั":125398,"à¸Ĥà¸ĵะ":125399,"доÑĢ":125400,"ãģ¨ãģª":125401,"à¸ģระà¸Ĺ":125402,"acı":125403,"×ķ׾×ķ×Ĵ":125404,"ÑĥÑĪ":125405,"ãĥ¥ãĥ¼":125406,"ãĥ¦":125407,"Ùħست":125408,"ĠaÅŁ":125409,"שק":125410,"פת×Ĺ":125411,"ายà¸Ļ":125412,"íĩ":125413,"ë¢":125414,"ï·":125415,"íī":125416,"ìµ":125417,"ì¬":125418,"ðĿĽ":125419,"ìĴ":125420,"ëĻ":125421,"ê§":125422,"áĸ":125423,"â¨":125424,"â±":125425,"áĺ":125426,"ðĸ":125427,"àł":125428,"áĶ":125429,"ðIJŃ":125430,"ững":125431,"Å©ng":125432,"Ġ×Ķת":125433,"ĠاÙĦا":125434,"Ġ×ŀת":125435,"à¸ĸึà¸ĩ":125436,"òn":125437,"á»ĭnh":125438,"нÑĭм":125439,"Ġcả":125440,"à¸Ķู":125441,"Ġà¹ģà¸ķà¹Ī":125442,"Ġ×ij×Ķ":125443,"ói":125444,"ãģ¨ãģĹãģ¦":125445,"úng":125446,"Ġذ":125447,"Ġ×Ķ׳":125448,"ĠبÙĨ":125449,"ÙĦاÙĦ":125450,"à¹Ħà¸Ĺย":125451,"á»ĩp":125452,"tı":125453,"มัà¸Ļ":125454,"ằng":125455,"á»ijt":125456,"ком":125457,"à¸ĭึà¹Īà¸ĩ":125458,"à¸Ħรัà¸ļ":125459,"à¸ļà¹īาà¸Ļ":125460,"ĠاÙĦÙĬ":125461,"lü":125462,"ÙĪØ³":125463,"ãģłãģ£ãģŁ":125464,"à¹Ģà¸ĩ":125465,"Ġê³µ":125466,"нÑĥ":125467,"ãĤĪãĤĬ":125468,"мÑĥ":125469,"à¹Ģà¸Ĥา":125470,"ãĤĢ":125471,"ние":125472,"ãģ«ãģªãĤĭ":125473,"áºŃy":125474,"ĠÙĪØ§":125475,"볤":125476,"ש×ķ":125477,"áp":125478,"×ĵ×ķ":125479,"ãģ§ãģĹãģŁ":125480,"عض":125481,"Ñģкой":125482,"æĦŁãģĺ":125483,"ÑİÑĤÑģÑı":125484,"Ġ×Ļ׼×ķ׾":125485,"ãĤĵãģł":125486,"ви":125487,"à¹Ģลà¹Īà¸Ļ":125488,"ìĿ´ëĭ¤":125489,"ĠÙĦÙĩ":125490,"à¸Ħืà¸Ń":125491,"تÙĥ":125492,"ÙħÙĥÙĨ":125493,"aģı":125494,"׳×ĵ":125495,"민":125496,"à¹Ħว":125497,"สำห":125498,"สำหรัà¸ļ":125499,"Ñģлед":125500,"tır":125501,"ĠÙĦÙĬ":125502,"ĠاÙĦعÙħÙĦ":125503,"×ij×ķת":125504,"×ij×Ļ×Ŀ":125505,"à¸Ħำ":125506,"à¹Ģà¸Ħรืà¹Īà¸Ńà¸ĩ":125507,"lıģı":125508,"ืà¸Ńà¸ĩ":125509,"جد":125510,"íŀĪ":125511,"ìĭ¬":125512,"×¢×ķת":125513,"สิà¸Ļ":125514,"Ñĩи":125515,"رض":125516,"à¹Ģà¸Ľà¸´à¸Ķ":125517,"à¸Ħà¹Īา":125518,"ìĦł":125519,"ÙĪØ±Ø©":125520,"×§×ĺ":125521,"ìľł":125522,"عÙħÙĦ":125523,"×IJ×Ļ×Ŀ":125524,"׾×Ļ×Ŀ":125525,"à¹ĥหà¸į":125526,"à¹ĥหà¸įà¹Ī":125527,"ừa":125528,"á»įi":125529,"ãģ¶":125530,"ÃŃch":125531,"ãĥĩãĤ£":125532,"×ķר×Ļ×Ŀ":125533,"Ñģо":125534,"ìķ½":125535,"ова":125536,"ÑĩаÑģÑĤ":125537,"à¹Ģà¸Īà¹īา":125538,"пÑĢо":125539,"Ġ×ŀ×Ĺ":125540,"ãĥİ":125541,"×ķ×Ļ×ķת":125542,"Ġде":125543,"ë§Ī":125544,"ì§ģ":125545,"×Ļפ×Ķ":125546,"ĠاÙĦعاÙĦÙħ":125547,"르":125548,"ר×IJ×Ķ":125549,"uyá»ĥn":125550,"×¢×Ļ":125551,"มืà¸Ń":125552,"Ø¥ÙĨ":125553,"รู":125554,"Ġز":125555,"×Ļ×ķ×Ŀ":125556,"à¸ķà¹īà¸Ļ":125557,"ãģ¦ãģĦãģ¾ãģĻ":125558,"ÙħاÙĨ":125559,"ĠÐ¥":125560,"à¸Ľà¸£à¸°à¹Ģà¸Ĺศ":125561,"ỳ":125562,"׾×ij":125563,"à¹Ģà¸Ķà¹ĩ":125564,"ãģŁãģ¡":125565,"à¸Ĺีม":125566,"à¸Ļะ":125567,"ìŰ":125568,"ĠìłĢ":125569,"ÙĦÙĩ":125570,"ợi":125571,"ĠاÙĦز":125572,"دار":125573,"ãĤ³ãĥ³":125574,"мин":125575,"à¹ģหà¹Īà¸ĩ":125576,"à¸Ķัà¸ļ":125577,"׼ר":125578,"жа":125579,"íĸĪ":125580,"×ŀ×ĸ":125581,"ợi":125582,"à¸Ķา":125583,"Ġعبد":125584,"à¹ģร":125585,"×IJתר":125586,"×¢×ł×Ļ":125587,"à¹Ģà¸Ħ":125588,"×ķצר":125589,"ì§Ģë§Į":125590,"ائÙħ":125591,"أس":125592,"uyá»ģn":125593,"Ġ×IJ׳":125594,"×Ĺ׳×ķ":125595,"×ĸ×Ļ":125596,"รà¹īาà¸Ļ":125597,"ĠÐłÐ¾Ñģ":125598,"ĠÐłÐ¾ÑģÑģ":125599,"ربÙĬØ©":125600,"tür":125601,"ãĤĭãģĵãģ¨":125602,"ظر":125603,"бÑĭ":125604,"à¸Ĺีà¹Īสุà¸Ķ":125605,"Ġצר":125606,"èĩªåĪĨ":125607,"лаÑģ":125608,"ĠÑıв":125609,"ĠÑıвлÑı":125610,"à¸ŀรà¹īà¸Ńม":125611,"à¸Ńาà¸Ī":125612,"à¸ļริà¸ģาร":125613,"Ġçı":125614,"ëįĺ":125615,"ĠاÙĦÙħست":125616,"تش":125617,"ש×ķ×ij":125618,"ãĤ´":125619,"Ġyapıl":125620,"ĠاÙĦذ":125621,"ุà¹Īม":125622,"à¸ĸà¹īา":125623,"ìĦ¤":125624,"ì°¨":125625,"ваÑĢ":125626,"à¹Ģà¸ŀิà¹Īม":125627,"Æ°á»Ľi":125628,"Ùĥس":125629,"à¸Ńยาà¸ģ":125630,"ãģ¦ãĤĤ":125631,"Ġгод":125632,"ÙĬار":125633,"à¸ķà¸Ńà¸Ļ":125634,"ĠигÑĢ":125635,"à¹Ħà¸Ķà¹īรัà¸ļ":125636,"ĠاÙĦÙħر":125637,"ÙĤت":125638,"Ġëĺ":125639,"ĠëĺIJ":125640,"ẩn":125641,"ãģĻãĤĭãģĵãģ¨":125642,"×Ĵ×Ŀ":125643,"Ġ×ij×ij":125644,"تد":125645,"ÙĪØ§Ø±":125646,"ãĤ®":125647,"пол":125648,"Ġмог":125649,"ترÙĥ":125650,"ÙĪØ«":125651,"Ġçık":125652,"اة":125653,"à¹Ģà¸Ķียว":125654,"มีà¸Ħวาม":125655,"Ġ×ŀ×Ĵ":125656,"صÙģ":125657,"ĠТак":125658,"Ġ×Ľ×ª":125659,"×Ļ×ĵ×Ļ":125660,"овоÑĢ":125661,"ầy":125662,"สิà¹Īà¸ĩ":125663,"بت":125664,"ürü":125665,"ÙĨج":125666,"หลัà¸ģ":125667,"×Ļ×Ķ×Ŀ":125668,"ÙĤص":125669,"зÑĭ":125670,"×Ľ×ª×ij":125671,"ưu":125672,"mız":125673,"ĠìĦ¸":125674,"лог":125675,"ÙħÙĬÙĦ":125676,"ÙĬج":125677,"íĴĪ":125678,"à¸ŀà¸ļ":125679,"หัว":125680,"зна":125681,"רק":125682,"à¹Ĥร":125683,"Ġ×ijס":125684,"ĠBaÅŁkan":125685,"ĠëͰ":125686,"à¸Ńัà¸Ļ":125687,"ีà¹Īยว":125688,"неÑģ":125689,"à¹Ģà¸Ķิà¸Ļ":125690,"ÙĬاÙĨ":125691,"×ķ׾×Ļ":125692,"اخت":125693,"צ×ķת":125694,"ãģĵãģĵ":125695,"ĠاÙĦاÙĨ":125696,"ĠпÑĢоÑĨ":125697,"ãģ¾ãģł":125698,"×Ľ×¡":125699,"ĠاÙĦØ¢":125700,"ÙĬز":125701,"ĠاÙĦدÙĪÙĦ":125702,"ĠíķĺëĤĺ":125703,"ضع":125704,"ê»ĺ":125705,"ÅĽwi":125706,"ยิ":125707,"ãģ¡ãĤĥãĤĵ":125708,"ĠÙħØ´":125709,"à¸ĺี":125710,"ãģ¨ãģį":125711,"׳×Ļ×ķת":125712,"Ġë¯":125713,"Ġ미":125714,"Ġsı":125715,"ëĭĪê¹Į":125716,"Ġпл":125717,"غÙĦ":125718,"à¹ģรà¸ĩ":125719,"بÙĬر":125720,"ãģĤãĤĬãģ¾ãģĽãĤĵ":125721,"ê·¼":125722,"Ġyüz":125723,"ĠdeÄŁer":125724,"åł´åIJĪ":125725,"ỡ":125726,"маÑĤ":125727,"ราà¸Ĭ":125728,"ÙĪØ±ÙĬ":125729,"жен":125730,"ãģ¾ãĤĬ":125731,"ãģ®ä¸Ń":125732,"×Ļ×ĵ×¢":125733,"à¸Ńุ":125734,"à¸ļà¸Ńล":125735,"à¸Ľà¸±à¸įหา":125736,"زÙħ":125737,"ÄŁa":125738,"à¸Ńืà¹Ī":125739,"à¸Ńืà¹Īà¸Ļ":125740,"пл":125741,"ĠнеобÑħодим":125742,"׼×ij":125743,"à¹Ģศ":125744,"קר×Ķ":125745,"ì²ĺ":125746,"볨":125747,"×ŀ×§×ķ×Ŀ":125748,"jÄħc":125749,"ÙĩÙĦ":125750,"Ġ×¢×ij×ķ×ĵ":125751,"à¹Ħมà¹ī":125752,"à¸ģลัà¸ļ":125753,"×ķ׼׾":125754,"×§×ĵ":125755,"اÙĦÙĬØ©":125756,"رÙĩ":125757,"ãģijãĤĮãģ°":125758,"ĠÙĨÙ쨳":125759,"ãĤ¢ãĥ«":125760,"ìĹĪëĭ¤":125761,"×§×ķר":125762,"неÑĢ":125763,"باب":125764,"ãĤ¶":125765,"سبب":125766,"ÙĦÙĬÙĦ":125767,"صÙĨ":125768,"صدر":125769,"ếm":125770,"à¸Ĭà¹Īวà¸ĩ":125771,"ØŃÙĨ":125772,"Ġ×ij×Ĵ":125773,"×ŀ×ķ×¢":125774,"׾×Ĺ":125775,"大ãģį":125776,"تب":125777,"неÑĤ":125778,"×Ļ×ij×Ķ":125779,"бл":125780,"ãĥĹãĥª":125781,"اصة":125782,"ãģ¤ãģij":125783,"×Ļ×ŀ×ķש":125784,"ãģĮãģĤ":125785,"ëĭ´":125786,"ãģĭãĤĤãģĹ":125787,"ãģĭãĤĤãģĹãĤĮ":125788,"ãģ¡ãĤī":125789,"×ij×ĺ":125790,"ĠbaÄŁ":125791,"×Ļ×Ĺס":125792,"×ij×ķ×¢":125793,"ลี":125794,"פע×Ļ׾":125795,"ими":125796,"gÅĤ":125797,"Ġиме":125798,"خداÙħ":125799,"×IJ×Ļר":125800,"Ġyapt":125801,"ãģ¨ãģĦ":125802,"à¸ĩà¹Īาย":125803,"׾×Ļ×ķ":125804,"ØŃدث":125805,"راÙĤ":125806,"ĠÄIJi":125807,"ادر":125808,"ãģĵãģ¨ãĤĤ":125809,"×ij×Ļר":125810,"Ġвз":125811,"ضاÙģ":125812,"ת×ķ׼":125813,"ÑĢом":125814,"رات":125815,"à¹Ģà¸Ĺà¹Īา":125816,"ãģĺãĤĥ":125817,"ãģĿãģĵ":125818,"اجتÙħاع":125819,"à¹īà¸Ńà¸Ļ":125820,"ÙĤÙħ":125821,"본":125822,"Äŀ":125823,"ש×Ļ×ķ":125824,"×ij׳×Ļ":125825,"ìľĦìĽIJ":125826,"à¹ģà¸Ī":125827,"×Ĺ×ķר":125828,"دÙĬÙĨØ©":125829,"تط":125830,"ằm":125831,"òa":125832,"ยà¸Ńà¸Ķ":125833,"Ġëĭ¹":125834,"สุà¸Ĥ":125835,"×ĵר×ļ":125836,"دÙĨ":125837,"سÙĬÙĨ":125838,"ÙĪÙĤÙģ":125839,"ÑĨÑĭ":125840,"гоÑĤов":125841,"еждÑĥ":125842,"à¸ŀวà¸ģ":125843,"اÙĤتص":125844,"اÙĤتصاد":125845,"czÄĻ":125846,"niÄĻ":125847,"ÑĢеб":125848,"ØŃÙĪ":125849,"à¸Ĺà¹Į":125850,"ãĤĪãģŃ":125851,"дж":125852,"à¸ģลà¹Īาว":125853,"دÙĬØ«":125854,"ãĤ³ãĥŁ":125855,"ÙĤÙĪÙħ":125856,"ĠتØŃ":125857,"à¹Ģà¸ķิ":125858,"اÙ쨏":125859,"à¸Īุ":125860,"رÙĬاض":125861,"×ŀש×ļ":125862,"à¹Ĥย":125863,"еÑĢе":125864,"ãģ¿ãģŁãģĦ":125865,"ìĿ´ëĿ¼":125866,"ĠاÙĦÙħÙĪ":125867,"ĠÑģÑĤо":125868,"à¹Ģรà¹ĩว":125869,"ĠдеÑĤ":125870,"ĠÑģдел":125871,"à¹Ģà¸Ĭืà¹Īà¸Ń":125872,"פ׳×Ļ":125873,"ÙĪØ¶ÙĪØ¹":125874,"×ijס":125875,"à¹ģà¸Ķ":125876,"óc":125877,"ริม":125878,"ÑĢад":125879,"ìĪł":125880,"ãĥ¼ãĤº":125881,"ãģ«ãģĬ":125882,"ино":125883,"פ×Ļ׾":125884,"à¸Ĭัà¹Īà¸Ļ":125885,"×Ĺ×ĵש":125886,"à¹Ģà¸Ļืà¹Īà¸Ńà¸ĩ":125887,"׳×Ļס":125888,"غرب":125889,"ãĤ¸ãĥ£":125890,"สัà¸ĩ":125891,"à¹Ģà¸Ĺีà¹Ī":125892,"à¹Ģà¸Ĺีà¹Īยว":125893,"ëŁ¼":125894,"à¹ģà¸Ł":125895,"ãĥ¼ãĤ·":125896,"ãĥ¼ãĤ·ãĥ§ãĥ³":125897,"Ġвозмож":125898,"جÙħÙĪØ¹":125899,"×ijר×Ļ×Ŀ":125900,"ãĥĪãĥ©":125901,"ĠкаÑĩеÑģÑĤв":125902,"Ø·ÙĬ":125903,"ÑĤÑı":125904,"צ×ķ×¢":125905,"ģını":125906,"عÙĦÙī":125907,"اذ":125908,"ÙĪØ§ÙĤع":125909,"ÙħÙĪØ§":125910,"ائÙĬÙĦ":125911,"кол":125912,"á»ģm":125913,"à¸ľà¸¥à¸´à¸ķ":125914,"×Ļ׳×ĺר":125915,"سÙĥ":125916,"ש×Ļר":125917,"ศึà¸ģษา":125918,"à¸ļั":125919,"ÑĩаÑģ":125920,"×ķפ×Ķ":125921,"×Ļפ×ķ׾":125922,"ĠاÙĦساب":125923,"رÙĬب":125924,"ĠاÙĦبÙĬ":125925,"ãĤ¹ãĥĨ":125926,"Ñĩен":125927,"à¹ģà¸ľ":125928,"Ġ׳ש":125929,"زÙĬد":125930,"ØŃاد":125931,"ëįĶ":125932,"رÙĪØ¹":125933,"à¸Ĺุà¸Ļ":125934,"สมา":125935,"czeÅĦ":125936,"×Ļ×ĵ×Ķ":125937,"ãģ§ãģĤ":125938,"Ġçocuk":125939,"خب":125940,"à¸ļาย":125941,"à¸Ľà¸£à¸°à¸Ĭา":125942,"×ŀש׾":125943,"ãģªãģĭ":125944,"à¸ģาย":125945,"ãĥģãĥ£":125946,"аÑĢи":125947,"ĠÑĩа":125948,"à¸Ķำ":125949,"à¸Ĺัà¹Īว":125950,"ÑĥÑħ":125951,"Ġöz":125952,"Ġì¢ĭ":125953,"جرÙĬ":125954,"ائÙĤ":125955,"à¸łà¸±à¸¢":125956,"طار":125957,"دارة":125958,"Ä©nh":125959,"Ø«ÙĨ":125960,"zellik":125961,"اÙĦت":125962,"Ġgeli":125963,"ãĥķãĤ©":125964,"олод":125965,"ربع":125966,"שת×ŀש":125967,"à¸ļรร":125968,"íĿ¬":125969,"Ġürün":125970,"Ġê·¸ëłĩ":125971,"ศาสà¸ķรà¹Į":125972,"ãģľ":125973,"×Ļ×ij׾":125974,"ĠпÑĢедÑģÑĤав":125975,"سطÙĬÙĨ":125976,"ãĤĴ使":125977,"ĠпомоÑī":125978,"×ķקר":125979,"ãĥ¯ãĥ¼":125980,"Ġyönet":125981,"×Ļקר":125982,"à¸Ĥา":125983,"еÑĢиал":125984,"ØŃÙģ":125985,"Ġ×Ļצ":125986,"à¸Ĺิ":125987,"売":125988,"à¸Ļà¸Ńà¸ģ":125989,"×ķ׼ר":125990,"íĻľ":125991,"á»§y":125992,"ĠاÙĦÙĤر":125993,"×Ļ×ij×ķת":125994,"ÅĽni":125995,"Ùħشار":125996,"ượt":125997,"ĠÙĦدÙĬ":125998,"ÑĤел":125999,"ĠØ¥ÙĦÙĬ":126000,"عÙĦÙĪÙħ":126001,"ìķĺ":126002,"виÑĤ":126003,"à¸Ħะ":126004,"yrı":126005,"ãģ¨ãģ£ãģ¦":126006,"à¹Ģà¸ī":126007,"à¸ĸาม":126008,"ÙĤار":126009,"عÙĦاÙħ":126010,"ặng":126011,"ÙħÙĴ":126012,"×Ļ×ŀת":126013,"سبة":126014,"ãĤ¯ãĥ©":126015,"×ķסף":126016,"ĠпÑĢин":126017,"ãģĦãĤį":126018,"ساس":126019,"عتبر":126020,"วิà¸Ĺย":126021,"วิà¸Ĺยา":126022,"سÙĥر":126023,"ãĤ·ãĥ§":126024,"ãģģ":126025,"ัà¸ģษ":126026,"×ij×ķ×Ķ":126027,"หย":126028,"ãģ¾ãĤĮ":126029,"ĠоÑĢганиз":126030,"казал":126031,"ĠÑģвÑıз":126032,"uyết":126033,"ĠпÑĢоиз":126034,"Ġ×§×ĺ":126035,"à¹ģà¸ģà¹ī":126036,"пÑĥÑģ":126037,"Ġê·¸ê²ĥ":126038,"ëĬIJ":126039,"лекÑģ":126040,"ãĥ¼ãĥĹ":126041,"à¸ķำ":126042,"ת×Ĺ×Ļ׾":126043,"à¸Ńà¸ĩà¸Ħà¹Į":126044,"ẵ":126045,"׳צ":126046,"أش":126047,"Ø´Ùĩ":126048,"ยะ":126049,"à¸ģà¸İ":126050,"ĠاÙĦإسÙĦاÙħ":126051,"едÑĮ":126052,"ãģ²ãģ¨":126053,"ëıĦë¡Ŀ":126054,"ãģ©ãģ®":126055,"Ñĥв":126056,"еÑĩение":126057,"ĠاÙĦتج":126058,"ãģ«è¡Į":126059,"Ġпозв":126060,"ãĤıãĤĬ":126061,"ÙĦاث":126062,"íķĺìĺĢ":126063,"ĠмаÑĢ":126064,"ĠkonuÅŁ":126065,"ãĥ¬ãĤ¹":126066,"ãĤĴæĮģ":126067,"ĠоÑģнов":126068,"×Ĺ×ij":126069,"ÙĪØ¬ÙĪØ¯":126070,"פ×ķף":126071,"воÑĢ":126072,"Ġник":126073,"ãģĭãĤĭ":126074,"ÅŁtırma":126075,"×Ļס×ĺ":126076,"Ø£ÙĦ":126077,"หà¹Į":126078,"иона":126079,"лÑĮн":126080,"ĠгоÑģ":126081,"ĠÐľÐ¾Ñģк":126082,"ÑĢоб":126083,"×ķ×IJ×Ļ":126084,"ãģĬãĤĬãģ¾ãģĻ":126085,"ãģ£ãģ±":126086,"кл":126087,"à¸Ļà¸Ķà¹Į":126088,"رÙĬÙģ":126089,"اسب":126090,"ĠÑĢеÑĪ":126091,"Ġдол":126092,"ãģ¹ãģį":126093,"×Ļ×ij×ķר":126094,"меÑī":126095,"ĠнаÑĪ":126096,"à¹ģà¸Ľà¸¥":126097,"ÑĢиÑĤ":126098,"кÑĥÑģ":126099,"иÑĢа":126100,"аÑĤÑĥÑĢ":126101,"ÙĪØ§ØµÙĦ":126102,"à¹Ģà¸ľà¸¢":126103,"à¸Ńำ":126104,"à¹Ģà¸ģิà¸Ļ":126105,"غÙħ":126106,"ãģĻãģİ":126107,"lıkl":126108,"ÅĦsk":126109,"견":126110,"×Ļ׼×Ķ":126111,"×Ĺש×ij":126112,"ÙĪØ±ÙĬØ©":126113,"ĠдейÑģÑĤв":126114,"×Ĺ׾×ĺ":126115,"Ġ׾×ŀ×¢":126116,"צ׾×Ļ×Ĺ":126117,"еÑĩа":126118,"ÙģØ§Ø¹":126119,"×Ĵ×Ļ×ĵ":126120,"áºŃm":126121,"ÄĻb":126122,"شع":126123,"ãģıãĤĬ":126124,"à¸ŀุ":126125,"едеÑĢ":126126,"à¸Ĥà¸Ļ":126127,"à¸Ħาร":126128,"ĠболÑĮÑĪ":126129,"ãģıãģªãĤĬ":126130,"à¸ĵา":126131,"×ĵ×ķ×Ĵ":126132,"Ġмн":126133,"ä¸ĬãģĮ":126134,"ç¶ļãģį":126135,"ฤษ":126136,"à¸Ĩ":126137,"Ø®ÙĬ":126138,"à¹Ģà¸Ĺà¸ŀ":126139,"สัม":126140,"à¹Ģสà¸Ļ":126141,"à¹Ģสà¸Ļà¸Ń":126142,"ãĥ´":126143,"ĠиÑģÑĤ":126144,"باشر":126145,"ĠÑĥÑĢов":126146,"×ŀ×ķ×ĸ":126147,"abı":126148,"waż":126149,"×ķצ×IJ×Ķ":126150,"ÑĤвеÑĢ":126151,"à¸ŀัà¸Ļà¸ĺà¹Į":126152,"׳×Ĵ×ĵ":126153,"ãĤĭãģĵãģ¨ãģĮãģ§ãģį":126154,"ĠÑĤÑĢеб":126155,"à¸ģรุà¸ĩ":126156,"ØŃتاج":126157,"à¹Ģà¸Ħล":126158,"ãĨ":126159,"ÄĻtr":126160,"Ġszczeg":126161,"Ġרש":126162,"à¸Ĺà¸ĺ":126163,"Ġнек":126164,"ĠнекоÑĤоÑĢ":126165,"вÑĪ":126166,"Ь":126167,"à¹Īวย":126168,"ลุ":126169,"бÑĢÑı":126170,"หมูà¹Ī":126171,"à¹ģà¸ķà¸ģ":126172,"ר׼×Ļ×Ŀ":126173,"Ġíĸī":126174,"ãi":126175,"Ùĥرة":126176,"âŃ":126177,"íIJ":126178,"ãį":126179,"áģ":126180,"â®":126181,"â¥":126182,"ì®":126183,"à¿":126184,"â¿":126185,"áĤ":126186,"á¤":126187,"âł":126188,"íŁ":126189,"ðIJį":126190,"ðIJ°":126191,"ðĿĨ":126192,"ðŁĪ":126193,"Ġ×¢×ľ":126194,"ĠعÙĨ":126195,"ĠÙħع":126196,"Ġ×ĸ×Ķ":126197,"ĠÙħا":126198,"ĠmÃł":126199,"Ġdụ":126200,"á»ĩc":126201,"аÑħ":126202,"sı":126203,"íķĺê³ł":126204,"Ġ×ķ×ij":126205,"ĠÐŁÐ¾":126206,"×ķתר":126207,"ĠÙĦÙħ":126208,"Ġ×ķ׾":126209,"ãģĹãģ¦ãģĦãĤĭ":126210,"Ġ×ŀ×Ļ":126211,"ĠبÙĬÙĨ":126212,"за":126213,"ĠÙĥاÙĨ":126214,"Ġ×Ķ×Ļ×Ķ":126215,"ëħĦ":126216,"×IJ×ķ":126217,"ди":126218,"ĠпеÑĢе":126219,"dı":126220,"Ġ׾ש":126221,"Ġש×ŀ":126222,"ãģĮãģĤãĤĭ":126223,"ãģĦãģĦ":126224,"ÑĢе":126225,"×§×ķ":126226,"или":126227,"ме":126228,"ÙĬت":126229,"ãģ§ãģĤãĤĭ":126230,"Ġво":126231,"à¹ĥหม":126232,"à¹ĥหมà¹Ī":126233,"Ġש×ij":126234,"Ġà¹Ĥà¸Ķย":126235,"ÙĬÙĩ":126236,"ãģ§ãģĻãģĮ":126237,"ãģ¨ãģ¯":126238,"ר×ķ":126239,"Ġà¸ĭึà¹Īà¸ĩ":126240,"ãģ§ãģįãĤĭ":126241,"мо":126242,"à¹Ģà¸ŀืà¹Īà¸Ń":126243,"צ×ķ":126244,"×ĺ×ķ":126245,"ìķĪ":126246,"Ġhá»į":126247,"à¹Ģà¸ĩิà¸Ļ":126248,"ĠاÙĦب":126249,"Ġมี":126250,"물":126251,"Ñģе":126252,"ëĵ¤ìĿ´":126253,"Ġë§IJ":126254,"ĠlỼ":126255,"aÅĤ":126256,"×Ĺ×ijר":126257,"Ġdá»±":126258,"ÙĬØ«":126259,"Ġthá»ĭ":126260,"à¸ģà¹Īà¸Ńà¸Ļ":126261,"Ġ×ij׼׾":126262,"ãģ¸":126263,"ã썿ĢĿãģĦãģ¾ãģĻ":126264,"ảnh":126265,"ยา":126266,"Ù쨧":126267,"สี":126268,"à¸ķา":126269,"ë²ķ":126270,"ãĥªãĥ¼":126271,"ราà¸Ħา":126272,"Ġ×ķ׾×IJ":126273,"ãģ¨ãģĵãĤį":126274,"à¹Ģลืà¸Ń":126275,"diÄŁi":126276,"ÙĪØ§ÙĨ":126277,"Ġ׾×Ķת":126278,"รวม":126279,"פ×Ļ×Ŀ":126280,"à¸ľà¸¡":126281,"жи":126282,"cı":126283,"ÑĢод":126284,"ĠkarÅŁÄ±":126285,"×Ĵ×ķ":126286,"ãģ«ãģ¤":126287,"ãģ«ãģ¤ãģĦãģ¦":126288,"rÃł":126289,"×Ļ×ķתר":126290,"ĠìĨĮ":126291,"×§×Ķ":126292,"ÑģÑĤво":126293,"ãģijãģ©":126294,"gé":126295,"à¸Ķà¹īาà¸Ļ":126296,"çļĦãģ«":126297,"ĠÙĬÙħÙĥÙĨ":126298,"ìĨį":126299,"ÙĬÙĥ":126300,"à¹Ħวà¹ī":126301,"Ñģкий":126302,"ìm":126303,"Ġ׾×IJ×Ĺר":126304,"à¸Ńาหาร":126305,"Ġà¹Ģà¸ŀ":126306,"ราะ":126307,"ลูà¸ģ":126308,"ÑģÑĤа":126309,"Ġìľł":126310,"ÙĤÙĪÙĦ":126311,"боÑĢ":126312,"Ñģкого":126313,"หลัà¸ĩ":126314,"à¸Ĥà¹Īาว":126315,"à¹Ģมืà¸Ńà¸ĩ":126316,"ê°ģ":126317,"tÃł":126318,"ÙĬÙĬÙĨ":126319,"عرض":126320,"ë°©":126321,"ĠëıĻ":126322,"Ġà¹Ģà¸Ľ":126323,"Ġà¹Ģà¸Ľà¹ĩà¸Ļ":126324,"çi":126325,"liÄŁi":126326,"ìĹIJê²Į":126327,"ãĤ¿ãĥ¼":126328,"Ġ×ľ×ª":126329,"פ×ķת":126330,"à¸Ĥà¸Ń":126331,"رس":126332,"ìłIJ":126333,"à¸ľà¹Īาà¸Ļ":126334,"ÑĦи":126335,"جÙĨ":126336,"ì¢ħ":126337,"Ġ×Ķפ":126338,"Ġngo":126339,"á»ĭa":126340,"Ġtá»ķ":126341,"Ġ그리":126342,"à¹Ģมืà¹Īà¸Ń":126343,"ذÙĥر":126344,"ìĸij":126345,"ìĹŃ":126346,"×ĺ׾":126347,"kı":126348,"ĠعÙħÙĦ":126349,"ĠعÙĨد":126350,"à¸ĭืà¹īà¸Ń":126351,"Ġê±°":126352,"ве":126353,"rü":126354,"à¹Ģà¸Ńา":126355,"สà¹Į":126356,"à¸Īà¸Ļ":126357,"סת":126358,"Ġgiả":126359,"ãĤĭãģ¨":126360,"à¸ģำลัà¸ĩ":126361,"ней":126362,"à¸Īริ":126363,"à¸Īริà¸ĩ":126364,"Ġëį":126365,"ĠëįĶ":126366,"à¸Ħà¹Īะ":126367,"ìn":126368,"Ġsüre":126369,"Ġquy":126370,"à¸ļาà¸ĩ":126371,"åıĸãĤĬ":126372,"ר×Ĺ":126373,"×ijת":126374,"ãģĮãģĤãĤĬãģ¾ãģĻ":126375,"רש":126376,"ìĹIJëĬĶ":126377,"Ġ×IJפשר":126378,"ayı":126379,"ãģĮãĤī":126380,"ØŃب":126381,"анÑģ":126382,"سÙĪ":126383,"ĠпÑĢе":126384,"دÙĪ":126385,"ãģ«ãĤĪ":126386,"à¹Ģà¸ģม":126387,"สูà¸ĩ":126388,"makt":126389,"maktad":126390,"maktadır":126391,"Ġönem":126392,"×Ļ×ŀ×Ļ×Ŀ":126393,"бо":126394,"ÙĪÙĬØ©":126395,"à¸£à¸¹à¸Ľ":126396,"à¹Ĥลà¸ģ":126397,"ÙħÙĬع":126398,"ÑģÑĤÑĥп":126399,"à¹Ĥà¸Ń":126400,"دÙĬÙĨ":126401,"ì¤ij":126402,"ãģĹãģı":126403,"à¹Ģสีย":126404,"вÑĭ":126405,"Ùħت":126406,"íĺĦ":126407,"ãĥIJãĥ¼":126408,"اش":126409,"קס":126410,"Ġtụ":126411,"ลà¸Ķ":126412,"Ù쨩":126413,"íijľ":126414,"رج":126415,"kÅĤad":126416,"ĠÅŁey":126417,"ĠØ£Ùħ":126418,"Ġà¹Ģม":126419,"ĠبÙĦ":126420,"ÑģкаÑı":126421,"ãģ¨ãģ®":126422,"Ġìĭ¤":126423,"ấm":126424,"หà¹īà¸Ńà¸ĩ":126425,"à¸Ĭม":126426,"dü":126427,"Ġçek":126428,"Ġê³ł":126429,"×Ĵ×ij":126430,"à¸Ĭีวิ":126431,"à¸Ĭีวิà¸ķ":126432,"Ù쨶ÙĦ":126433,"ฯ":126434,"çı":126435,"Ġبش":126436,"ĠÙĩÙĨا":126437,"ãģįãģ¾ãģĹãģŁ":126438,"tü":126439,"Ġìĺģ":126440,"ĠTürk":126441,"кÑĤ":126442,"פרס":126443,"ãģ¨ãģĦãģĨãģĵãģ¨":126444,"íĶĦ":126445,"à¹ģรà¸ģ":126446,"ר×ķף":126447,"Ġaras":126448,"×ŀצ×IJ":126449,"Ġtá»ī":126450,"سا":126451,"à¸ŀà¸Ń":126452,"ĠاÙĦÙħØŃ":126453,"ãĥ¤":126454,"ĠاÙĦاست":126455,"ÙģÙĨ":126456,"×Ļ×ŀ×Ķ":126457,"رت":126458,"ãģ¨ãĤĤ":126459,"ĠнаÑģ":126460,"пÑĢи":126461,"Ġ×Ĺ×ķ":126462,"ила":126463,"ÙĬØ´":126464,"Ġgöz":126465,"Ġ×ij׳×Ļ":126466,"ımı":126467,"ĠÑĤеÑħ":126468,"Ġhá»Ļ":126469,"غر":126470,"кон":126471,"اØŃت":126472,"Ġà¸ŀ":126473,"à¸Ńà¸Ńà¸Ļ":126474,"à¸Ńà¸Ńà¸Ļà¹Ħล":126475,"à¸Ńà¸Ńà¸Ļà¹Ħลà¸Ļà¹Į":126476,"Ñħо":126477,"Ñıв":126478,"à¹ģสà¸Ķ":126479,"à¹ģสà¸Ķà¸ĩ":126480,"à¹Ģà¸ŀียà¸ĩ":126481,"ÑĤов":126482,"اÙĬ":126483,"Ġ×Ķ×ĵ":126484,"Ġ×ķ׼":126485,"ãĤīãģĦ":126486,"×ķפף":126487,"Ġë¶Ī":126488,"ลà¸Ńà¸ĩ":126489,"طاÙĦ":126490,"Ġни":126491,"ĠÙħست":126492,"ếc":126493,"Ġש׼":126494,"ĠëķĮ문":126495,"วัà¸Ļà¸Ĺีà¹Ī":126496,"×Ļ׾×ĵ":126497,"ØŃا":126498,"еÑĨ":126499,"Ġcứ":126500,"×ĵ×ķר":126501,"ĠÙħØŃ":126502,"ר׼×ij":126503,"بÙĬع":126504,"нии":126505,"ĠاÙĦØ£ÙĪÙĦ":126506,"à¸Ħวร":126507,"ã썿ĢĿãģĨ":126508,"ĠСо":126509,"ائÙĬØ©":126510,"راء":126511,"оÑģоб":126512,"ĠبأÙĨ":126513,"×¢×ķ×ĵ":126514,"ĠÑĤе":126515,"ãģĵãģĨ":126516,"ÑģÑĤÑĢа":126517,"айн":126518,"Ġsöz":126519,"تÙĨا":126520,"à¸Ńิ":126521,"ặp":126522,"ĠìķĦëĭĪ":126523,"íķŃ":126524,"Ġר×IJש":126525,"Ġà¹Ħà¸Ķà¹ī":126526,"Ġ×Ĵ×ĵ":126527,"Ġספר":126528,"обÑīе":126529,"ĠÙĪØ¥":126530,"adaÅŁ":126531,"ãģ¡ãĤĩ":126532,"×§×ķ׾":126533,"ÑĢез":126534,"ĠdÃ¼ÅŁÃ¼n":126535,"Ġ×ij×IJ×ŀ":126536,"Ġìĸ´ëĸ":126537,"ער×ij":126538,"нее":126539,"ĠÑģÑĤÑĢан":126540,"ساÙĨ":126541,"ynı":126542,"ĠاÙĦرئÙĬس":126543,"ãģĹãģª":126544,"Ġ×ł×ª":126545,"ãģ«ãģªãģ£ãģŁ":126546,"gü":126547,"åıĹãģij":126548,"×ľ×ª":126549,"ìłĪ":126550,"ëĬĶëį°":126551,"Ø®ÙĬر":126552,"à¸ķà¹īà¸Ńà¸ĩà¸ģาร":126553,"ĠÙĦØ£ÙĨ":126554,"Ġchá»ĭ":126555,"ÙĪØ©":126556,"à¹ĥส":126557,"ë¶ĢíĦ°":126558,"íķĺë©´":126559,"ữu":126560,"à¹Ģหมืà¸Ńà¸Ļ":126561,"беÑĢ":126562,"ĠìĿ´ìļ©":126563,"ĠÑģеб":126564,"wiÄĻks":126565,"Ġ×ł×¢":126566,"ÑĤÑĥÑĢ":126567,"ĠnghÄ©":126568,"ש×ķ×ĺ":126569,"tiÄŁi":126570,"ĠdeÄŁi":126571,"×IJ×ij":126572,"Ġ×ŀ×ŀ":126573,"ãĥĹãĥŃ":126574,"waÅĤ":126575,"à¸Īึà¸ĩ":126576,"خدÙħ":126577,"×IJ×Ŀ":126578,"Ä±ÅŁÄ±":126579,"czÄħ":126580,"ר×ĵ":126581,"ĠÑĢÑĥб":126582,"خرÙī":126583,"ã쮿ĸ¹":126584,"ĠденÑĮ":126585,"×Ĺ×Ļ×Ŀ":126586,"еÑĤе":126587,"ëĤľ":126588,"×IJ×Ĵ":126589,"×¢×ķר":126590,"ë³Ħ":126591,"åIJĮãģĺ":126592,"ãĤ²":126593,"ר×ļ":126594,"×ķש×IJ":126595,"ìľ¡":126596,"اخ":126597,"צ×Ļ×Ķ":126598,"á»±a":126599,"ãģĪãģ¦":126600,"ש×Ķ×ķ":126601,"анÑĤ":126602,"ลาà¸Ķ":126603,"инг":126604,"ë¡ł":126605,"اعد":126606,"ÙĪØ³Ø·":126607,"Ġвоп":126608,"ĠвопÑĢоÑģ":126609,"ÙħÙĬÙĨ":126610,"à¸Ħà¸ĩ":126611,"×Ļר×Ļ×Ŀ":126612,"ców":126613,"격":126614,"Ġê·¸ëŁ°":126615,"Ġì§Ħ":126616,"Ġש׾×Ķ":126617,"à¹Ģริà¹Īม":126618,"à¸Ĭà¸Ńà¸ļ":126619,"деÑĤ":126620,"ÑİÑīиÑħ":126621,"à¸ļà¸Ńà¸ģ":126622,"æĢĿãģĦ":126623,"عÙĬد":126624,"ס×ŀ":126625,"×Ĵ×Ļ×¢":126626,"צ×ĵ":126627,"بات":126628,"ĠëͰëĿ¼":126629,"à¸Īัà¸ĩ":126630,"ãģłãģijãģ§":126631,"×¢×Ļר":126632,"ĠÑĩел":126633,"ĠÑĩелов":126634,"ĠÑĩеловек":126635,"ãĥĥãĥģ":126636,"à¹Ģà¸ģีà¹Īยว":126637,"à¸Ķิ":126638,"Ġפע":126639,"×Ļ×ŀ×Ļ":126640,"ë°ĺ":126641,"خار":126642,"×ij×Ļת":126643,"×¢×Ļ×Ŀ":126644,"üyor":126645,"ãĤģãģ¦":126646,"клад":126647,"Ġà¸Īาà¸ģ":126648,"à¹Ģà¸Ħย":126649,"สà¸Ńà¸ĩ":126650,"à¹ģà¸Ħà¹Ī":126651,"ẫu":126652,"หà¸Ļัà¸ĩ":126653,"ש׾×ķ×Ŀ":126654,"اÙĨÙĬØ©":126655,"åĩºä¼ļ":126656,"åĩºä¼ļãģĦ":126657,"à¸łà¸²à¸¢":126658,"à¸ļาà¸Ĺ":126659,"à¸Ĭาว":126660,"muÅŁ":126661,"Ġ׾ק×ij׾":126662,"ãĤ·ãĥ£":126663,"ĠÄ°ÅŁ":126664,"×Ĵ×ĵ×ķ׾":126665,"جعÙĦ":126666,"ë³Ģ":126667,"ยิà¹Īà¸ĩ":126668,"à¸Ļาย":126669,"à¸Ļีà¹Ī":126670,"วิà¸ĺี":126671,"ãĤīãģªãģĦ":126672,"ëłĪ":126673,"Ġë¬¸ìłľ":126674,"Ġà¸ģ":126675,"à¸Ĺำà¸ĩาà¸Ļ":126676,"à¹Ģวà¹ĩà¸ļ":126677,"ÑĦе":126678,"楽ãģĹ":126679,"สำà¸Ħ":126680,"สำà¸Ħัà¸į":126681,"رÙħ":126682,"ãģķãĤĮãģ¦":126683,"Ġобла":126684,"ר×IJ×Ļ":126685,"หมà¸Ķ":126686,"ÙĨÙĬØ©":126687,"лин":126688,"ĠeÄŁ":126689,"itim":126690,"ëł¹":126691,"صاÙĦ":126692,"ÅĽl":126693,"à¸ľà¸´à¸Ķ":126694,"ãĥŀãĥ³":126695,"åħ¥ãĤĮ":126696,"à¹Ģà¸ķà¸Ńรà¹Į":126697,"ارÙĬ":126698,"ĠЦ":126699,"dür":126700,"สวย":126701,"립":126702,"رÙĥØ©":126703,"Ġhã":126704,"×Ļת×Ķ":126705,"à¸Ĥà¸Ļา":126706,"à¸Ĥà¸Ļาà¸Ķ":126707,"à¸Īำà¸Ļ":126708,"à¸Īำà¸Ļวà¸Ļ":126709,"ש×ķ×§":126710,"Ġдом":126711,"ì±ħ":126712,"ãģĭãģij":126713,"פ×ķ׾":126714,"à¸Ĭาย":126715,"ÑģмоÑĤÑĢ":126716,"ÑģлÑĥж":126717,"ש×IJ׾":126718,"кÑĢÑĭÑĤ":126719,"Ġìŀĺ":126720,"é«ĺãģĦ":126721,"ĠÑĢÑĥк":126722,"ÙĨص":126723,"дав":126724,"ưỡ":126725,"ưỡng":126726,"راÙħ":126727,"×Ļ׳×Ļ×Ŀ":126728,"ãĥ©ãĥ¼":126729,"ëĦ¤":126730,"Ġتع":126731,"lke":126732,"好ãģį":126733,"æĮģãģ¡":126734,"Ġë§İ":126735,"Ġyük":126736,"ĠÑģоÑģÑĤав":126737,"енÑĤÑĢ":126738,"peÅĤ":126739,"à¹Ģà¸Ľà¸¥à¸µà¹Īย":126740,"à¹Ģà¸Ľà¸¥à¸µà¹Īยà¸Ļ":126741,"íıī":126742,"ãĤĦãģĻ":126743,"×Ĺ×ĸ":126744,"×ijר×Ķ":126745,"루":126746,"ìĶĢ":126747,"بØŃØ«":126748,"à¹Ģà¸ķà¹ĩ":126749,"ówi":126750,"بÙĩ":126751,"ãģįãģ¾ãģĻ":126752,"Ġ×¢×ŀ":126753,"×Ĵ×ķ׾":126754,"езд":126755,"ÙĬÙ쨩":126756,"สà¸Ļà¹ĥà¸Ī":126757,"Ġ×ª×ľ":126758,"ÑıÑī":126759,"ĠسÙĨ":126760,"ĠÙĪØ§ØŃد":126761,"ĠÑģм":126762,"ladı":126763,"ıld":126764,"×Ļרת":126765,"ียà¸Ļ":126766,"ת×Ĺת":126767,"Ġжиз":126768,"à¸ŀั":126769,"à¸ŀัà¸Ĵ":126770,"à¸ŀัà¸Ĵà¸Ļา":126771,"à¸Ĭิ":126772,"اخÙĦ":126773,"ãģ£ãģ¦ãģĦãģŁ":126774,"รัà¸IJ":126775,"ãĤģãĤĭ":126776,"à¹Ĥà¸ģ":126777,"ĠTá»ķ":126778,"Ġhakk":126779,"رÙģ":126780,"ìłĢ":126781,"Ñģоб":126782,"ãģªãģijãĤĮãģ°":126783,"ÙĩÙĪ":126784,"Ġë²ķ":126785,"ãĤĨ":126786,"ĠاÙĦسعÙĪØ¯":126787,"Ġ×IJתר":126788,"اغ":126789,"Ġ׾×ĵ":126790,"à¹ģà¸ķ":126791,"à¹ģà¸ķà¹Īà¸ĩ":126792,"íĮĮ":126793,"ÑĥпиÑĤÑĮ":126794,"à¸ŀืà¹īà¸Ļà¸Ĺีà¹Ī":126795,"×ijת×Ļ":126796,"à¹ĩà¸ģ":126797,"ÅĤat":126798,"Ġê°ľìĿ¸":126799,"ìłķë³´":126800,"ÑĤал":126801,"Ġgüven":126802,"Ġİl":126803,"Ġê°ģ":126804,"Ġبت":126805,"×ŀ×ķ׳×Ķ":126806,"ĠاÙĦØŃÙĥÙĪÙħ":126807,"ÙĤات":126808,"à¹ģà¸ģà¹Ī":126809,"หาà¸ģ":126810,"нÑĮ":126811,"à¸Ľà¸£à¸±à¸ļ":126812,"มาà¸ĵ":126813,"ĠнеÑģк":126814,"Ġض":126815,"สมั":126816,"สมัà¸Ħร":126817,"ãģĮãģĤãĤĬ":126818,"меÑģÑĤ":126819,"Ġ×IJצ׾":126820,"Ġкомпани":126821,"סר":126822,"ÙĬÙħØ©":126823,"ĠÑħоÑĢо":126824,"ĠÑħоÑĢоÑĪ":126825,"Ġ×Ļ×ķ×ĵ":126826,"üs":126827,"×Ĵ×Ļש":126828,"à¸ļà¸Ĺ":126829,"تÙĨظ":126830,"วาà¸ĩ":126831,"มหา":126832,"Ġ׼×ķ׾":126833,"à¸Ĥà¹īาà¸ĩ":126834,"ë°ľ":126835,"год":126836,"дан":126837,"ãģĭãĤĤãģĹãĤĮãģ¾ãģĽãĤĵ":126838,"ãģĵãģ¡ãĤī":126839,"ãĥIJãĤ¤":126840,"eceÄŁi":126841,"دÙĬدة":126842,"ÙĨÙī":126843,"Ġëĭ¤ìĿĮ":126844,"วี":126845,"غا":126846,"лиз":126847,"à¹Ģà¸Ķิ":126848,"à¹Ģà¸Ķิม":126849,"ĠÙĬست":126850,"Ġyılı":126851,"koÅĦ":126852,"ãģ§ãģĹãĤĩãģĨãģĭ":126853,"ãģĤãģª":126854,"ãģĤãģªãģŁ":126855,"ÑĨен":126856,"ĠÙĪØ²":126857,"×IJ×Ļש":126858,"à¹Īà¸Ń":126859,"رØŃ":126860,"ê´ij":126861,"ÑĢаÑģÑĤ":126862,"Ġ×Ķ׾":126863,"ãģĹãģ¦ãĤĤ":126864,"×ŀר׼":126865,"×ŀר׼×ĸ":126866,"éģķãģĦ":126867,"ãģŁãģı":126868,"ĠÑģÑĥд":126869,"веÑģÑĤи":126870,"ĠíķĦìļĶ":126871,"ãĥķãĤ§":126872,"ÑĤелÑĮно":126873,"à¹Ģà¸ŀืà¹Īà¸Ńà¸Ļ":126874,"ÅĤuż":126875,"à¹Ģà¸Ķิà¸Ļà¸Ĺาà¸ĩ":126876,"ש×ķר":126877,"Ġ×ŀ×ĵ":126878,"×ķ×¢×ľ":126879,"ÙĦاÙħ":126880,"à¹Ħà¸ĭ":126881,"лей":126882,"кÑĥÑĢ":126883,"Ả":126884,"à¸Ĺาà¸Ļ":126885,"ì§ij":126886,"ĠгоÑĢод":126887,"רס":126888,"׾×ķ×Ĵ":126889,"masını":126890,"ĠлÑĥÑĩ":126891,"ลà¹Īา":126892,"ìļ¸":126893,"ש×ĺ":126894,"ĠÐĺн":126895,"íĤ¤":126896,"ÙĪÙĦا":126897,"ìķł":126898,"ĠØ£ÙĬضا":126899,"Ùĥار":126900,"ĠاÙĦتع":126901,"สูà¹Ī":126902,"ãĤ¼":126903,"×ij×Ļ×IJ":126904,"ยà¸ģ":126905,"ĠØŃÙĤ":126906,"ربÙĬ":126907,"ãģĺãĤĥãģªãģĦ":126908,"รัà¸ģษา":126909,"ÑħодиÑĤ":126910,"à¸ķà¸Ńà¸ļ":126911,"׳×ĺ×Ļ":126912,"ĠاÙĦÙħج":126913,"تÙħع":126914,"оваÑĤÑĮ":126915,"ÙĦÙĬÙĨ":126916,"×Ļ×ŀ×ķת":126917,"Ġmù":126918,"nÄĻ":126919,"ĠدÙĬ":126920,"׼ש×Ļ×ķ":126921,"Ġhiç":126922,"ëijIJ":126923,"ÙĪØ§Ø¡":126924,"ÙĪØ·":126925,"ĠاÙĦبÙĦ":126926,"à¹ģมà¹ī":126927,"×§×ķת":126928,"ÙĪØ¬Ø¯":126929,"å§ĭãĤģ":126930,"ÙĬئة":126931,"Ġ매":126932,"صبØŃ":126933,"פ×IJ":126934,"гоÑĢ":126935,"ס×Ķ":126936,"بÙĬÙĤ":126937,"ยาà¸ģ":126938,"Ġнад":126939,"ÙĬÙij":126940,"ĠبÙĪ":126941,"ס×ķר":126942,"ÙħÙĥاÙĨ":126943,"ר×ij":126944,"×Ĵ×ĸ":126945,"צת":126946,"bilit":126947,"лаг":126948,"ĠNgo":126949,"×IJ×ķר":126950,"à¸ķà¸Ļ":126951,"íĬ¹":126952,"à¸Ĺีà¹Īà¸Ķี":126953,"à¸Ľà¸£à¸°à¸Īำ":126954,"ование":126955,"ãģĦãģ¤":126956,"ãĥĥãĤ¯ãĤ¹":126957,"åIJĪãĤı":126958,"åIJĪãĤıãģĽ":126959,"×Ļ׳×ķ×Ļ":126960,"ạy":126961,"Ø«ÙĤ":126962,"ĠпÑĢоб":126963,"ĠпÑĢоблем":126964,"ÅŁeh":126965,"ÅŁehir":126966,"عادة":126967,"اÙĨÙĪÙĨ":126968,"à¸ķัวà¹Ģà¸Ńà¸ĩ":126969,"ì¶ķ":126970,"ılan":126971,"бан":126972,"ãĥ³ãĥī":126973,"à¸Īี":126974,"Ġ×Ķש׳×Ļ":126975,"поÑĤ":126976,"×ķ׾×Ļ×Ŀ":126977,"ลัà¸ļ":126978,"ĠÑįÑĤи":126979,"×ijקש":126980,"ë¹ĦìĬ¤":126981,"à¸Ńยà¹Īาà¸ĩà¹Ħร":126982,"×Ļ׾×Ļ":126983,"à¹ĥà¸Ĭà¹Ī":126984,"ĠاÙĦÙĥÙĦ":126985,"ãĥļãĥ¼ãĤ¸":126986,"صة":126987,"ÑĤиÑĢ":126988,"ãĤĵãģ©":126989,"зÑĭк":126990,"wyż":126991,"ÙĩÙĬ":126992,"ĠÙħÙĦÙĬ":126993,"Ġвиде":126994,"ظاÙħ":126995,"داÙĪÙĦ":126996,"×ŀת×Ļ":126997,"Ġsık":126998,"à¹Ģà¸ķิม":126999,"ãĤ¢ãĤ¤":127000,"каÑħ":127001,"צ×Ļ׾":127002,"à¹Ģà¸Ĭà¹Īà¸Ļ":127003,"маг":127004,"магаз":127005,"магазин":127006,"à¸Ľà¸±":127007,"à¸Ľà¸±à¸Ī":127008,"Ġש×Ļר×ķת":127009,"ียม":127010,"ãĥĸãĥ«":127011,"ĠدÙĪÙĦ":127012,"קר×Ļ×Ŀ":127013,"ÙĩÙı":127014,"ово":127015,"Ġüret":127016,"دÙĪÙĨ":127017,"à¹ģà¸Ļว":127018,"à¹Ģà¸Ļืà¹īà¸Ń":127019,"ĠÑĦоÑĤ":127020,"ãĥĺ":127021,"ãģ¤ãģĭ":127022,"ÑıÑģ":127023,"ĠíķĺëĤĺëĭĺ":127024,"ائع":127025,"ĠплаÑĤ":127026,"ìĺĪ":127027,"ĠdostÄĻp":127028,"ÙĪØ¬Ùĩ":127029,"Ġ×Ķ×Ĺ×Ļ":127030,"׳×Ļ×§":127031,"дей":127032,"íĽĦ":127033,"ıy":127034,"بØŃر":127035,"à¹Ģสริม":127036,"Ġ׾×Ĵ":127037,"ذÙĩب":127038,"جÙĬÙĦ":127039,"رÙĥز":127040,"Ġëħ":127041,"Ġëħ¸":127042,"פ×Ļ׾×ķ":127043,"ãģ¾ãģļ":127044,"iriÅŁ":127045,"ĠÙĥÙĬÙģ":127046,"Ġ×ijצ":127047,"ĠêµIJ":127048,"ÑĢоÑģÑģ":127049,"ĠØ´ÙĬ":127050,"Ġiçer":127051,"×Ĵ×ķ×ij×Ķ":127052,"менно":127053,"×¢×ij×Ļר":127054,"×ķ×ŀ×Ķ":127055,"ãĤīãģĹãģĦ":127056,"ãģ¼":127057,"Ñīин":127058,"è²·ãģĦ":127059,"جÙħÙĪØ¹Ø©":127060,"Ġdönem":127061,"Ġ×ij×IJר":127062,"веÑģÑĤ":127063,"×ķר×ķת":127064,"سÙģ":127065,"à¹ģà¸Ĺà¸Ļ":127066,"ĠдокÑĥменÑĤ":127067,"ĠاÙĬ":127068,"جاÙĨ":127069,"צ×ķ×¢×Ļ":127070,"ĠоÑģоб":127071,"ĠاÙĦÙħس":127072,"ÑĢаб":127073,"à¸łà¸¹":127074,"à¸Ķาว":127075,"лекÑĤ":127076,"عÙĤ":127077,"×ķ×ĵ×ķת":127078,"Ġolu":127079,"ĠoluÅŁtur":127080,"ãģ¾ãģ¾":127081,"един":127082,"à¹Ģà¸Ńà¸ģ":127083,"ãĤµãĤ¤":127084,"ëĦĪ":127085,"Ø·ÙĨÙĬ":127086,"Ø·ÙĤØ©":127087,"ĠÐłÐ°Ð·":127088,"ÙĦÙij":127089,"Ñĩем":127090,"Ġ׾×ĺ":127091,"สัà¹Īà¸ĩ":127092,"سرائÙĬÙĦ":127093,"Ġפר×ĺ×Ļ":127094,"деÑģÑĮ":127095,"Ġ׳׼":127096,"اÙĨب":127097,"ÙĬاة":127098,"Ùħبر":127099,"Ġkı":127100,"à¸Ľà¸ı":127101,"à¸Ľà¸ıิ":127102,"à¸ļัà¸ķิ":127103,"×ł×ª×Ļ":127104,"ìĨ¡":127105,"راب":127106,"à¹ĥà¸ķ":127107,"à¹ĥà¸ķà¹ī":127108,"×Ļ×ł×ª":127109,"ÙĪÙĬر":127110,"Ġ×Ķ×ŀ×Ļ":127111,"ейÑĩаÑģ":127112,"×§×ķ×ij":127113,"دراس":127114,"ĠÙħÙĤ":127115,"رÙĬÙĨ":127116,"خاص":127117,"ãģĬéĩij":127118,"Ġجدا":127119,"ãģĨãģ¡":127120,"ëħ¸":127121,"ırım":127122,"æ§ĺ":127123,"ãģ«å¯":127124,"ãģ«å¯¾":127125,"ÑĨев":127126,"Ġvard":127127,"ĠÐIJн":127128,"eÄŁ":127129,"ÑģÑĤвенно":127130,"Ш":127131,"سد":127132,"à¸ģุ":127133,"à¹ģà¸ľà¸Ļ":127134,"รูà¹īส":127135,"รูà¹īสึà¸ģ":127136,"اتØŃاد":127137,"ÑijÑĤ":127138,"×Ĺ×ķ×§":127139,"ãģĻãģIJ":127140,"Ø·ÙĦاÙĤ":127141,"Ġ×§×ķ×ĵ":127142,"à¹ĥà¸Ĭà¹īà¸ĩ":127143,"à¹ĥà¸Ĭà¹īà¸ĩาà¸Ļ":127144,"ãĥ¼ãĤ¿":127145,"Ġsür":127146,"ÑĢок":127147,"ë³ij":127148,"สมาà¸Ĭ":127149,"สมาà¸Ĭิà¸ģ":127150,"ãĥķãĥ¬":127151,"è¾¼ãģ¿":127152,"ãĤ»ãĥ³":127153,"Ġê°Ģì§Ģ":127154,"à¸ľà¹īา":127155,"ÑįÑĤомÑĥ":127156,"иÑĤел":127157,"à¸łà¸±":127158,"à¸ij":127159,"ãĥĸãĥ©":127160,"×Ľ×ª×ķ×ij":127161,"׳×Ŀ":127162,"еннÑĭе":127163,"×¢×¨×Ľ×ª":127164,"ĠìĤ":127165,"ĠìĤ´":127166,"à¸Ĥà¹īา":127167,"׳×ķס":127168,"ãĥ¬ãĥĵ":127169,"ÑĢеÑģ":127170,"à¹Ģลà¸Ĥ":127171,"ثاÙĦ":127172,"ìĹĨ":127173,"ĠÑĩаÑģÑĤ":127174,"าศ":127175,"ãĥªãĤ¢":127176,"uç":127177,"×Ļ׼×ķת":127178,"ลà¹īาà¸Ļ":127179,"ië":127180,"ãĤ¸ãĤ§":127181,"à¸Īà¸Ń":127182,"ÙĪØŃØ¯":127183,"×Ļצ×ķ×ij":127184,"Ġ×ijש׾":127185,"око":127186,"ضة":127187,"ذر":127188,"ĠÑĥд":127189,"İL":127190,"×ķצ×Ļ×Ŀ":127191,"×ĸ×ŀף":127192,"à¸Ľà¸ģ":127193,"íķĻêµIJ":127194,"ساÙħ":127195,"à¹Ħà¸Ķ":127196,"ละà¹Ģà¸Ń":127197,"ละà¹Ģà¸Ńีย":127198,"ละà¹Ģà¸Ńียà¸Ķ":127199,"ảy":127200,"аÑĨион":127201,"ãĤ¹ãĤ¯":127202,"פ×ķס":127203,"รà¹Īาà¸ĩ":127204,"еннÑĭй":127205,"عÙĨ":127206,"عÙĦÙĨ":127207,"ائÙģ":127208,"dÄĻ":127209,"ؤÙĪÙĦ":127210,"׾×ķ×ķ":127211,"Ġ×ijש×ij":127212,"ä»ĬåĽŀ":127213,"ĠاÙĦجÙĨ":127214,"داد":127215,"waÄĩ":127216,"ãĥªãĥ³":127217,"ĠìŀIJìĭł":127218,"اÙĨÙĬا":127219,"ãĥ¡ãĥª":127220,"ÙĦÙĪÙĨ":127221,"à¸Ĺà¹Īà¸Ńà¸ĩ":127222,"à¸Ĺà¹Īà¸Ńà¸ĩà¹Ģà¸Ĺีà¹Īยว":127223,"اÙģÙĬ":127224,"ĠлиÑĪ":127225,"ÙħÙĬØ©":127226,"оÑĤвеÑĤ":127227,"Ñĩин":127228,"ÃĬ":127229,"ãĥ¡ãĥ³":127230,"å®Ł":127231,"éļĽãģ«":127232,"ĠÑĢай":127233,"ãĤ¦ãĥ³":127234,"×Ļר×ķש":127235,"×Ļר×ķש׾×Ļ×Ŀ":127236,"มะ":127237,"Ġara":127238,"казаÑĤÑĮ":127239,"à¸ķัà¸Ķ":127240,"ÑĥÑİÑĤ":127241,"Ġüst":127242,"×Ĵ×ķ×ij":127243,"×Ĵ×ķ×ij×ķת":127244,"malı":127245,"егод":127246,"егоднÑı":127247,"اÙģÙĤ":127248,"à¸Ĭà¹Īà¸Ńà¸ĩ":127249,"Ġözellik":127250,"×Ļצ×ķר":127251,"ĠmiÄĻd":127252,"ĠiliÅŁ":127253,"ĠнаÑħод":127254,"×¢×ĸר":127255,"×ľ×Ľ×ª":127256,"ÙĨتاج":127257,"ĠÑģем":127258,"à¸Īà¹Īาย":127259,"à¸ķรว":127260,"à¸ķรวà¸Ī":127261,"פר×ķ":127262,"à¸Ĥัà¸ļ":127263,"ãģŀ":127264,"Ġпло":127265,"колÑĮ":127266,"×ŀ×¢×ĺ":127267,"íķĺìĭľ":127268,"jÄħce":127269,"ÙĨاÙĨ":127270,"ลีà¸ģ":127271,"нÑĥÑĤ":127272,"ĠобÑĢаз":127273,"Ùĥبر":127274,"ĠاÙĦÙĪØ·ÙĨ":127275,"ãģķãģĽãģ¦":127276,"ÙĤاء":127277,"×ŀ×ĵ×Ļ׳":127278,"yü":127279,"פ×Ļת":127280,"׳×ķף":127281,"ÙħÙĨظ":127282,"หà¸Ļัà¸ģ":127283,"ìŀĪ":127284,"ãĤ«ãĥ¼ãĥī":127285,"عÙĨÙĬ":127286,"под":127287,"ضاء":127288,"à¸Ļà¸ķà¹Į":127289,"×ŀשפ":127290,"วà¹Į":127291,"ר×ķ×§":127292,"สืà¹Īà¸Ń":127293,"פק×Ļ×ĵ":127294,"ãģªãĤīãģªãģĦ":127295,"ĠìŬ룬":127296,"ÙĦج":127297,"ÑīиÑĤ":127298,"ãĥĥãĤ·":127299,"ÙĦÙĬس":127300,"ĠÙĦÙħا":127301,"ìłij":127302,"×ij×Ļף":127303,"ãĥģãĤ§":127304,"Ġgüç":127305,"Ġchứ":127306,"×ķצ×IJ":127307,"קר×ij":127308,"à¹Ĥà¸ŀ":127309,"оÑĩно":127310,"סק×Ļ":127311,"ש׾×Ŀ":127312,"صرÙģ":127313,"ĠLÃł":127314,"×¢×Ļת":127315,"á»·":127316,"à¹Ĥà¸Ńà¸ģ":127317,"à¹Ĥà¸Ńà¸ģา":127318,"à¹Ĥà¸Ńà¸ģาส":127319,"Ġ×Ķ×ĵ×ijר":127320,"à¸Ļัà¹Īà¸Ļ":127321,"زر":127322,"нако":127323,"íļį":127324,"ãĤĤãģ¡":127325,"ãĤĤãģ¡ãĤį":127326,"ãĤĤãģ¡ãĤįãĤĵ":127327,"اÙħت":127328,"عداد":127329,"инÑĭ":127330,"ÅĤyw":127331,"à¸Ħà¸ĵะ":127332,"à¸Ĺะ":127333,"ktör":127334,"×Ļ×Ĺ×Ķ":127335,"Ġме":127336,"ĠмеÑģÑı":127337,"׳×Ķ×Ĵ":127338,"ĠÑģÑĥÑīеÑģÑĤв":127339,"à¸Ļัà¸Ļ":127340,"ÑĦÑĦ":127341,"екÑĤив":127342,"عÙĦÙĪÙħات":127343,"бÑĥд":127344,"à¸Ļัà¸ģà¸ĩาà¸Ļ":127345,"หà¸Ļà¹īาà¸Ĺีà¹Ī":127346,"ÙĤÙĬÙĤ":127347,"ãĤ·ãĥ³":127348,"ãģ«éĸ¢":127349,"×IJר×Ĵ":127350,"ĠпÑĢоÑĤ":127351,"ĠпÑĢоÑĤив":127352,"ĠìŀĪìĸ´":127353,"ÙĤÙĬÙĤØ©":127354,"ìĹĩ":127355,"kür":127356,"ãģ«ãģªãĤĬãģ¾ãģĹãģŁ":127357,"ĠдеÑıÑĤ":127358,"ĠдеÑıÑĤелÑĮ":127359,"פ×ķר×ĺ":127360,"à¸Łà¹īา":127361,"à¹Ģà¸ł":127362,"ĠавÑĤомаÑĤ":127363,"×ĸ×Ļ×§":127364,"Ġolduk":127365,"عاÙħ":127366,"ĠÑĤоÑĢ":127367,"yrıca":127368,"êÌ":127369,"ãĤŃãĥ³ãĤ°":127370,"ãģ«ãģ¨ãģ£ãģ¦":127371,"à¹Ģà¸īà¸ŀ":127372,"à¹Ģà¸īà¸ŀาะ":127373,"ãģ¯ãģļ":127374,"×ŀ×IJ×Ļ":127375,"สะà¸Ķ":127376,"สะà¸Ķวà¸ģ":127377,"ìľ¼ë©°":127378,"à¸ģี":127379,"ฬ":127380,"Ġ×¢×ķש":127381,"à¸łà¸²à¸©à¸²":127382,"à¸Ĺัà¸Ļ":127383,"acakt":127384,"acaktır":127385,"اعدة":127386,"ĠÑĥÑģлÑĥг":127387,"סר×ĺ":127388,"×ķ×ŀ×ķת":127389,"×Ķ×ķר":127390,"×ŀ×ķ×ij":127391,"×ŀ×ķ×ijף":127392,"سÙĬاس":127393,"اتÙ쨧ÙĤ":127394,"×Ķצ׾":127395,"Ùħؤس":127396,"Ġpó":127397,"Ġкни":127398,"×Ļ׼×ķ׾":127399,"à¹Ģหลืà¸Ń":127400,"׼׾׼":127401,"׳×ĸ":127402,"ÑĪие":127403,"rès":127404,"ĠاÙĦØŃÙĤ":127405,"лÑıÑĢ":127406,"หà¸į":127407,"หà¸įิà¸ĩ":127408,"ר×Ĵ×Ļש":127409,"à¹Ģสà¹īà¸Ļ":127410,"ש×ij×ķף":127411,"ôtel":127412,"апÑĢ":127413,"апÑĢимеÑĢ":127414,"ابÙĦ":127415,"ĠÑĢазвиÑĤ":127416,"ĠполÑĮз":127417,"ĠСеÑĢ":127418,"×ķ×ij×Ļ":127419,"róż":127420,"ìĭŃ":127421,"ãĤ¯ãĥĪ":127422,"ãģĹãĤĪãģĨ":127423,"à¸ģรม":127424,"ØŃÙĥÙĪÙħ":127425,"à¹Ĥà¸ļ":127426,"à¸Ĺà¹īาย":127427,"ĠMá":127428,"ĠÑĤÑĭ":127429,"à¸Ħรัว":127430,"ÑĢÑĥб":127431,"ạp":127432,"ĠmÅĤ":127433,"ĠmÅĤod":127434,"ĠgörÃ¼ÅŁ":127435,"ĠgeliÅŁ":127436,"ươi":127437,"×ŀשק":127438,"ÙĢÙĢÙĢÙĢ":127439,"ราว":127440,"ãģĹãģ£":127441,"ãģĹãģ£ãģĭãĤĬ":127442,"ĠÐļон":127443,"Ġkê":127444,"à¹Ĥà¸Ĺร":127445,"èIJ½ãģ¡":127446,"åĩºãģ¦":127447,"ลัà¸ģษ":127448,"Ġ×Ĵ×ij×ķ×Ķ":127449,"ãĥĻãĥ«":127450,"ê±°ëĤĺ":127451,"ë§IJ":127452,"×Ļ׾×ĵ×Ļ×Ŀ":127453,"ĠëĦĪ":127454,"×ŀר×Ļ":127455,"รส":127456,"ãĥŃãĥ³":127457,"ило":127458,"ноÑģÑĤÑĮÑİ":127459,"×ĸר×Ĺ":127460,"пон":127461,"Ġ×Ķש׾":127462,"ê²łìĬµëĭĪëĭ¤":127463,"ĠkiÅŁ":127464,"ĠÐļи":127465,"วร":127466,"داع":127467,"ÅŁim":127468,"ÙĨÙij":127469,"ваÑĤ":127470,"راÙĥ":127471,"باÙĦ":127472,"иде":127473,"Ġ×Ķ×ŀ×Ĺ":127474,"ìĸµ":127475,"تÙģØ§Ø¹":127476,"أت":127477,"ëĬĺ":127478,"ש×Ļת":127479,"ستÙħر":127480,"ĠÑĦак":127481,"ĠاÙĦØ£ÙħرÙĬ":127482,"ëŀ¨":127483,"اسÙħ":127484,"ĠaÄŁ":127485,"Ġçev":127486,"ÙĥÙĪØ±":127487,"ãģķãģ¾":127488,"Ġçöz":127489,"Ġرس":127490,"Äħda":127491,"สà¸Ļุ":127492,"ãģĹãģ¦ãģıãĤĮ":127493,"нÑİ":127494,"leÅŁme":127495,"ãĤªãĥ³":127496,"ãģ¨ãģªãĤĬ":127497,"avaÅŁ":127498,"×ĺ×Ļ×ij":127499,"ØŃض":127500,"×ķצ×IJ×ķת":127501,"ÙĨÙħÙĪ":127502,"ıt":127503,"ĠÑħа":127504,"ĠÑħаÑĢак":127505,"ĠÑħаÑĢакÑĤеÑĢ":127506,"ĠdÅĤ":127507,"ãĥĹãĥ©":127508,"à¸Ĭุม":127509,"à¹Īà¸Ńà¸Ļ":127510,"×ķ×ij׾":127511,"Ñģол":127512,"×ĵ×Ĵ":127513,"аÑĢаÑĤ":127514,"nivers":127515,"ĠgerçekleÅŁtir":127516,"ĠاÙĦÙĦÙĬ":127517,"ระยะ":127518,"ĠÙħختÙĦÙģ":127519,"Ġgönder":127520,"ÙģØ§Ø±":127521,"doÄŁ":127522,"doÄŁan":127523,"صÙĦاØŃ":127524,"Ġyayın":127525,"ãĥĨãĥ³":127526,"รวà¸Ī":127527,"×Ļ×Ĺ×Ļ×ĵ":127528,"ünkü":127529,"ÑĨиалÑĮн":127530,"à¸ļู":127531,"มุ":127532,"hä":127533,"Ø®Ùģ":127534,"å¢Ĺ":127535,"å¢ĹãģĪ":127536,"еÑĩно":127537,"ĠاÙĦسÙĨ":127538,"à¸Ĥาว":127539,"imdi":127540,"Ы":127541,"à¸Ļà¸Ńà¸ģà¸Īาà¸ģ":127542,"à¸ļาล":127543,"תש":127544,"Ġdüzenle":127545,"мÑĭÑģл":127546,"ãģıãģª":127547,"żu":127548,"ĠwspóÅĤ":127549,"Ġназ":127550,"ındaki":127551,"ترة":127552,"ÅŁek":127553,"Ġöd":127554,"ĠÙĪÙĥ":127555,"ĠпозволÑı":127556,"Ġת×ķ׼":127557,"ÙħÙĨتج":127558,"ë§ī":127559,"ĠاÙĦØ«ÙĦاث":127560,"аÑĨиÑİ":127561,"ÙĪØ±ÙĪ":127562,"ÑĭваеÑĤ":127563,"خصص":127564,"ĠاÙĦÙģÙĦ":127565,"ĠاÙĦÙģÙĦسطÙĬÙĨ":127566,"إجر":127567,"إجراء":127568,"اÙĨتخ":127569,"اÙĨتخاب":127570,"ارÙĬØ©":127571,"×ķÖ":127572,"Ø¢ÙĨ":127573,"×ŀ×¢×ķת":127574,"Ġмал":127575,"Ġ×IJ×Ĺ":127576,"à¸Ĺà¹īà¸Ńà¸ĩ":127577,"zeÅĽ":127578,"Ġë§Įëĵ¤":127579,"رÙĬع":127580,"äºĭãĤĴ":127581,"à¸ļริหาร":127582,"׾×ŀ×Ļ×ĵ":127583,"ĠмÑĥж":127584,"ترÙĪ":127585,"ĠباÙĦØ¥":127586,"פ×Ļ×§":127587,"زÙħØ©":127588,"ĠÃ¶ÄŁrenc":127589,"ãĥ¶":127590,"اÙħعة":127591,"×§×ij×ķצ":127592,"×ŀ׳×ķת":127593,"رÙĬÙħ":127594,"Ġоказ":127595,"ãģłãģijãģ©":127596,"Ġhız":127597,"Ġש×IJת":127598,"ãĤ¢ãĥ¼":127599,"Ġmożliwo":127600,"ìĦ¼":127601,"ÙĪØ§Ø¨":127602,"огÑĢаÑĦ":127603,"ĠعبداÙĦ":127604,"ãĤĴè¡Į":127605,"بÙĬÙĦ":127606,"Ġİç":127607,"ยาย":127608,"ĠÑĥÑĩаÑģÑĤ":127609,"ÑĦеÑģÑģ":127610,"ÑĦеÑģÑģиона":127611,"Ấ":127612,"ÙĨÙĬÙĨ":127613,"عدÙĦ":127614,"สรร":127615,"دÙĬÙĦ":127616,"×ij×Ļ×§":127617,"czyÅĤ":127618,"ÑĢоме":127619,"Ġмед":127620,"ìĻĶ":127621,"ãĥ©ãĤ¤ãĥ³":127622,"ĠÑĤеп":127623,"еÑĢÑĮ":127624,"iÄŁi":127625,"вели":127626,"ÑĢиÑģÑĤ":127627,"ס×ķפ":127628,"×ŀ׾×Ĺ":127629,"ĠاÙĦØ¥ÙĨ":127630,"Ġ׾×Ķש":127631,"è¶ĬãģĹ":127632,"ĠÑĢÑĭ":127633,"×ķ×IJר":127634,"رÙĩاب":127635,"פ×ķ×IJ×Ļ":127636,"ĠгоÑģÑĥд":127637,"ĠгоÑģÑĥдаÑĢ":127638,"ĠгоÑģÑĥдаÑĢÑģÑĤв":127639,"ĠاÙĦØ£ÙħÙĬر":127640,"Ùħج":127641,"à¹Ģหมาะ":127642,"ÑĢев":127643,"à¸Ĭีà¸ŀ":127644,"ãĥķãĥĪ":127645,"иÑĩно":127646,"ĠاÙĦÙħؤ":127647,"Ġiht":127648,"íħľ":127649,"دÙĨÙĬ":127650,"رص":127651,"лаÑģÑĤ":127652,"à¹Ģหลà¹Īา":127653,"ılır":127654,"รà¸ĵà¹Į":127655,"×ŀש×Ļ×ļ":127656,"Ġdá»ĭ":127657,"Ø·Ù쨧ÙĦ":127658,"×ĺ×ķף":127659,"Ġ×ij×Ļ׳":127660,"ãģ¾ãģ£ãģŁ":127661,"ложениÑı":127662,"تØŃر":127663,"باØŃ":127664,"à¹Ģสืà¹īà¸Ń":127665,"ãģĻãģĶ":127666,"ltür":127667,"à¸ĩาม":127668,"Ġtü":127669,"ĠпÑĢим":127670,"ĠпÑĢимен":127671,"Ġhayat":127672,"ëĥIJ":127673,"ëĭĮ":127674,"׳×Ļ×ķ":127675,"веден":127676,"ìħ¨":127677,"à¸Īัย":127678,"à¸ģà¹Īà¸Ń":127679,"Ġвод":127680,"оÑģÑĤоÑı":127681,"наÑĤ":127682,"à¹ģหล":127683,"سÙħÙĬ":127684,"à¸Ķำà¹Ģà¸Ļ":127685,"à¸Ķำà¹Ģà¸Ļิà¸Ļ":127686,"wód":127687,"öyle":127688,"ãĥĢãĤ¤":127689,"ÑĪий":127690,"меÑīен":127691,"ãģĹãģ¾ãģĨ":127692,"ãĥīãĥ©":127693,"ÙĪØ¶ØŃ":127694,"à¸Ńà¸Ļุ":127695,"ĠاÙĦاجتÙħاع":127696,"laÅŁma":127697,"à¸Ħà¸Ńà¸Ļ":127698,"×ŀר×Ļ×Ŀ":127699,"ÙĨاÙħج":127700,"שר×ķת":127701,"اÙĦØ£":127702,"ĠksiÄħż":127703,"Ġан":127704,"ÑĢай":127705,"اÙĩرة":127706,"×ŀ×ĵ×Ķ":127707,"ä¸Ģç·":127708,"ä¸Ģç·Ĵ":127709,"ä¸Ģç·Ĵãģ«":127710,"ÑĢиÑĤоÑĢ":127711,"dıkl":127712,"à¹ģà¸ĸ":127713,"à¹ģà¸Ĥà¹Īà¸ĩ":127714,"екÑĤоÑĢ":127715,"×ŀסע":127716,"ÑĢакÑĤи":127717,"uÄŁu":127718,"×ķ×ijת":127719,"สูà¸ķร":127720,"ĠçalÄ±ÅŁm":127721,"ĠçalÄ±ÅŁmalar":127722,"Ġана":127723,"ãĥĽãĥ¼ãĥł":127724,"Ġbölüm":127725,"Ġبص":127726,"олоÑģ":127727,"ĠìķĬëĬĶ":127728,"à¹Īะ":127729,"ÙĪØªØ±":127730,"ä¹Ĺ":127731,"ستخداÙħ":127732,"פ×Ļ×Ļס":127733,"פ×Ļ×Ļס×ij":127734,"פ×Ļ×Ļס×ij×ķ×§":127735,"ĠкÑĢаÑģ":127736,"лик":127737,"رÙĬØŃ":127738,"×ŀש׾×Ķ":127739,"à¹Ģยีà¹Īย":127740,"à¹Ģยีà¹Īยม":127741,"виÑģ":127742,"омн":127743,"ÄŁun":127744,"ãĥŃãĥ¼ãĥ³":127745,"أتÙĬ":127746,"à¸ķรี":127747,"çͳãģĹ":127748,"تÙħر":127749,"ìĹĪìĬµëĭĪëĭ¤":127750,"ĠÙĪØºÙĬر":127751,"redni":127752,"ĠاÙĦصÙģ":127753,"ĠнаÑģÑĤоÑı":127754,"ĠнаÑģÑĤоÑıÑī":127755,"à¸ķรา":127756,"ĠÑĥÑģлов":127757,"ĠÑĥÑģловиÑı":127758,"ÑĨеп":127759,"×Ķ×Ĺ׾×ĺ":127760,"Ø·ÙĬع":127761,"ĠBakan":127762,"ĠاÙĦرÙĪ":127763,"илÑĮно":127764,"ĠмеÑĤ":127765,"à¸Ķà¸Ńà¸ģ":127766,"ãģĭãĤīãģªãģĦ":127767,"ĠпоÑģÑĤоÑı":127768,"ĠпоÑģÑĤоÑıн":127769,"ĠÑĩаÑģ":127770,"üc":127771,"wró":127772,"бÑĥÑĢ":127773,"ãĥIJãĥĥãĤ¯":127774,"ãĥ©ãĥ³ãĥī":127775,"ĠогÑĢ":127776,"สัà¸į":127777,"สัà¸įà¸įา":127778,"มัà¹Īà¸Ļ":127779,"à¸Ħà¸Ńม":127780,"alık":127781,"Ġнед":127782,"ümüz":127783,"ĠÅĽwie":127784,"ério":127785,"×Ļ×IJ×Ķ":127786,"دÙħات":127787,"ırl":127788,"ĠоÑĤз":127789,"ĠоÑĤзÑĭв":127790,"ä»ĺãģį":127791,"Ġkażde":127792,"миниÑģÑĤ":127793,"ãĤ°ãĥ«":127794,"ë°ĸ":127795,"езн":127796,"اÙĦÙģ":127797,"Ġשק׾":127798,"Ùħض":127799,"ãĥĿãĥ¼ãĥĪ":127800,"ÙħÙĨت":127801,"ÙĤÙĬاÙħ":127802,"Ø´ÙĨ":127803,"×Ļר×ķ×¢":127804,"ãĤŃãĥ£ãĥ³":127805,"доÑĢов":127806,"×ŀ×Ļת×Ļ":127807,"ÙĪÙĦÙĪØ¬":127808,"ÙĥاÙģ":127809,"ĠÑĢазлиÑĩ":127810,"иÑĤеÑĤ":127811,"нолог":127812,"ลà¸ĩà¸Ĺุà¸Ļ":127813,"ĠyaklaÅŁ":127814,"ãĥ¬ãĤ¤":127815,"ê²łëĭ¤":127816,"æ±ĤãĤģ":127817,"رÙĪÙģ":127818,"ĠíĬ":127819,"ĠíĬ¹":127820,"ãģ£ãģıãĤĬ":127821,"à¸Ħวามà¸Ħิà¸Ķ":127822,"×Ķ×Ļס×ĺ":127823,"Ø¥ÙĤ":127824,"ãģ¦ãģĦ":127825,"à¹Ĥà¸Ĭ":127826,"ĠBüyük":127827,"ĠФедеÑĢ":127828,"ÑĨин":127829,"ÑĢова":127830,"ĠاÙĦاÙĤتصاد":127831,"Ġchá":127832,"à¸ĺาà¸Ļ":127833,"ë¥ł":127834,"à¹Ħà¸ķ":127835,"ÃŃpio":127836,"Ùĭا":127837,"ĠобÑıз":127838,"Ùĩج":127839,"Ġì¤ijìļĶ":127840,"ãģ®ãģ§ãģ¯ãģªãģĦ":127841,"باراة":127842,"ãĤ¤ãĥ«":127843,"ĠноÑĢм":127844,"á»īnh":127845,"mö":127846,"möglich":127847,"ÑĨип":127848,"ãĤ¢ãĤ¯":127849,"×Ķ×Ļ":127850,"ÑĨиалÑĮно":127851,"ĠÅĽwi":127852,"تÙĤ":127853,"ĠÑģÑĤоим":127854,"بÙĬعÙĬ":127855,"Ġ׾ש×ŀ":127856,"глÑı":127857,"глÑıд":127858,"ãģ¦ãģıãĤĮ":127859,"ÄĻdzi":127860,"à¸Ĥั":127861,"à¸Ĥัà¹īà¸Ļ":127862,"Ø·ÙĤ":127863,"ĠìĹŃ":127864,"ãģ£ãģ¦ãģĹãģ¾ãģĨ":127865,"ĠdeÄŁerl":127866,"ĠdeÄŁerlendir":127867,"Ġülk":127868,"Ġмног":127869,"à¹ĭ":127870,"ë¿IJ":127871,"ĠУкÑĢа":127872,"ÄŁini":127873,"Ġбезоп":127874,"ĠбезопаÑģ":127875,"à¸Ńà¸Ńà¸ģà¹ģà¸ļà¸ļ":127876,"اظ":127877,"ØŃداث":127878,"леÑĢ":127879,"×Ļ×¥":127880,"×Ļ׳×ĺר׳×ĺ":127881,"larınız":127882,"ØŃÙĬØŃ":127883,"żeli":127884,"à¸Ńัà¸ĩ":127885,"à¸Ńัà¸ĩà¸ģ":127886,"à¸Ńัà¸ĩà¸ģฤษ":127887,"ĠоÑĤлиÑĩ":127888,"ัส":127889,"ëŀį":127890,"ожно":127891,"ãĤ¹ãĥĿ":127892,"ĠÑħоÑĩ":127893,"Ġкап":127894,"еÑĩен":127895,"ØŃÙĦØ©":127896,"ÙĬاÙĩ":127897,"нал":127898,"×ķצר×Ļ×Ŀ":127899,"Ġkald":127900,"åĥį":127901,"ĠاÙĦشخص":127902,"Ġзна":127903,"Ġwzgl":127904,"życz":127905,"ê°Ŀ":127906,"à¸ŀลัà¸ĩ":127907,"íģ¼":127908,"Ġöl":127909,"Ġbụ":127910,"Ø´Ùĩر":127911,"Ġзам":127912,"Ġдев":127913,"×Ļ×ĺת":127914,"تعÙĦÙĤ":127915,"ÙĪÙħØ©":127916,"ãĤĴä½ľ":127917,"ãģįãģ¦":127918,"íĥĿ":127919,"rasında":127920,"ãĤĴæİ¢":127921,"ĠÙħباشر":127922,"راجع":127923,"Ġвозд":127924,"ÙħØŃا":127925,"×ķשר":127926,"ĠиÑģÑĤоÑĢ":127927,"มัà¸ģ":127928,"tıģ":127929,"ثار":127930,"ترÙĨت":127931,"à¹ģà¸Ĥà¹ĩ":127932,"à¹ģà¸Ĥà¹ĩà¸ĩ":127933,"поÑĩ":127934,"Ġ×ij×IJ×ķת":127935,"ë¯Ģ":127936,"ëĿ¼ëıĦ":127937,"à¸Ĭัà¸Ķ":127938,"สà¸ķà¹Į":127939,"ãĥĭãĥĥãĤ¯":127940,"иденÑĤ":127941,"ĠгÑĢÑĥпп":127942,"تخ":127943,"áºł":127944,"ยืà¸Ļ":127945,"ยัà¸Ļ":127946,"óry":127947,"TÃľ":127948,"ãģĹãĤĥ":127949,"ĠпÑĢовед":127950,"лÑıеÑĤ":127951,"ÙħØ®":127952,"ยà¸Ńม":127953,"×Ľ×ł×¡×ª":127954,"ĠاÙĦÙħÙĨت":127955,"Ġolmad":127956,"ר׼×ĸ×Ļ":127957,"ĠвÑģÑĤÑĢ":127958,"ĠиÑģÑģлед":127959,"ÑĤвеÑĢж":127960,"بدÙĪ":127961,"еÑĢÑĤ":127962,"ï»·":127963,"±ħ":127964,"สัมà¸ŀัà¸Ļà¸ĺà¹Į":127965,"ิà¹Īà¸Ļ":127966,"צ×Ļ×ij":127967,"wiÄĻt":127968,"Ġì°¸":127969,"ĠzwiÄħz":127970,"سبÙĪØ¹":127971,"ãĥĥãĤ°":127972,"à¸Ľà¸¥à¸Ńà¸Ķ":127973,"à¸Ľà¸¥à¸Ńà¸Ķà¸łà¸±à¸¢":127974,"ãĤĤãĤĬ":127975,"ÙĤدس":127976,"Ġsprz":127977,"Ġsprzeda":127978,"Ġistedi":127979,"Ġkhu":127980,"Ġден":127981,"ĠkoÅĦ":127982,"Ġ×ij×Ĺ×Ļ":127983,"à¹Ģà¸Ĺà¹īา":127984,"×ķס×Ļ×£":127985,"ãĥĭãĥ¥ãĥ¼":127986,"ĠпÑĢедоÑģÑĤ":127987,"ĠпÑĢедоÑģÑĤав":127988,"à¹Ĥà¸Ł":127989,"év":127990,"ĠاÙĦصØŃ":127991,"صØŃاب":127992,"à¹Ģà¸Īà¹ĩà¸ļ":127993,"влек":127994,"วัà¸ķ":127995,"à¸ĸุ":127996,"ãģĵãģ¨ãģĮãģ§ãģįãģ¾ãģĻ":127997,"ÙĤÙĬÙĤÙĬ":127998,"×ķ×Ĺר":127999,"ÑĭÑĪ":128000,"ĠоÑĤно":128001,"ĠоÑĤноÑĪ":128002,"обилÑĮ":128003,"ÙģØŃ":128004,"ınt":128005,"ıntı":128006,"Ġ׾×ij×ĵ":128007,"íİĺìĿ´ì§Ģ":128008,"ãĥĬãĥ«":128009,"ĠÙħساء":128010,"×Ļ×ĺ×ij":128011,"ÑĮеÑĢ":128012,"ëĦ·":128013,"ÑĭÑĤа":128014,"ĠоÑĩеÑĢ":128015,"à¸Ķืà¹Ī":128016,"à¸Ķืà¹Īม":128017,"ĠNgh":128018,"تعب":128019,"ÙĦاÙĤات":128020,"×ķ׾×ķ×Ĵ×Ļ×Ķ":128021,"ĠìĿ´ê²ĥ":128022,"Ġ×Ķ×ijר":128023,"ìľµ":128024,"à¹Ģà¸Ħลืà¹Īà¸Ńà¸Ļ":128025,"ÙĩØ©":128026,"à¸Īำà¹Ģà¸Ľà¹ĩà¸Ļ":128027,"å¤īãģĪ":128028,"wiÅĽcie":128029,"chod":128030,"chodzÄħ":128031,"вÑĢо":128032,"×ŀ×Ĺ×Ļר":128033,"Ġyı":128034,"Ġyıll":128035,"ì¡Į":128036,"à¹Ħหว":128037,"ãģªãģıãģª":128038,"ĠзавиÑģ":128039,"ĠìĺĪìĪĺ":128040,"Ù쨰":128041,"á»§ng":128042,"à¸ŀุà¸Ĺà¸ĺ":128043,"зн":128044,"layan":128045,"ãĤ¡":128046,"à¸ģà¹ĩà¸ķาม":128047,"ĠsaÄŁlam":128048,"รà¸ĵ":128049,"ĠÑģиÑĤ":128050,"ĠÑģиÑĤÑĥ":128051,"ĠاÙĦتÙĨ":128052,"×Ķ×ĸ":128053,"ĠØ·ÙĪÙĬÙĦ":128054,"taÅĤ":128055,"Ġgörd":128056,"å¤īãĤı":128057,"ëĥ¥":128058,"à¸Ħà¹Īà¸Ńย":128059,"×IJ×ķ×ĺ":128060,"ëħIJ":128061,"ãĥ©ãĥ³ãĤ¹":128062,"วัà¸Ĵ":128063,"วัà¸Ĵà¸Ļ":128064,"ĠoluÅŁ":128065,"פע×ķ׾":128066,"ĠszczegóÅĤ":128067,"à¸Ħาสิ":128068,"à¸Ħาสิà¹Ĥà¸Ļ":128069,"powied":128070,"ĠÑĤеб":128071,"หà¸Ļà¹Īวย":128072,"Ġмил":128073,"ØŃÙĥ":128074,"à¸Ĺà¸Ķ":128075,"ĠмаÑĤеÑĢиал":128076,"ÅĤow":128077,"à¹Ģà¸ģีย":128078,"ĠÑģовеÑĢ":128079,"ãĤ©":128080,"à¸Ľà¸£à¸´":128081,"ĠиÑİ":128082,"наÑĩен":128083,"ÑĢенд":128084,"muÅŁtur":128085,"ĠпÑĢодÑĥк":128086,"зд":128087,"ÑıÑĤи":128088,"ÑıÑĤиÑı":128089,"à¹Ģมีย":128090,"راتÙĬج":128091,"Ġamacı":128092,"ש×ķ׾":128093,"ש×ķ׾×Ĺ":128094,"สะà¸Ńา":128095,"สะà¸Ńาà¸Ķ":128096,"פ×Ĵ×¢":128097,"عبة":128098,"dın":128099,"íħĶ":128100,"Ġ×ŀש×Ĺ×§":128101,"Ġfiyat":128102,"ĠзаÑı":128103,"ĠзаÑıв":128104,"à¹Ĥหล":128105,"à¹Ĥหลà¸Ķ":128106,"à¸ģรุà¸ĩà¹Ģà¸Ĺà¸ŀ":128107,"צ×Ļ×Ļף":128108,"ìļ±":128109,"Ùħب":128110,"Ùħباد":128111,"landır":128112,"ĠвеÑģÑĮ":128113,"Ġhük":128114,"ĠÐĴоз":128115,"ÑĩиÑĤÑĭва":128116,"วล":128117,"×ķצע":128118,"à¸Ĥà¸ĵะà¸Ĺีà¹Ī":128119,"ĠaÅŁaģı":128120,"׾×IJ×ķ×ŀ×Ļ":128121,"trzym":128122,"Ã¤ÃŁig":128123,"owoÅĽci":128124,"ãģĿãĤĤ":128125,"ĠrozwiÄħz":128126,"ĠgÅĤówn":128127,"монÑĤ":128128,"×ŀ×ķ×ŀ":128129,"ĠÑģÑĤан":128130,"ÙĦاÙĤØ©":128131,"prowad":128132,"prowadzi":128133,"ĠÑģоÑģÑĤоÑı":128134,"×Ļ×IJ×ķת":128135,"rı":128136,"gı":128137,"ãĥijãĥij":128138,"ĠналиÑĩ":128139,"×Ķצע":128140,"Ġ׳×Ķ":128141,"à¸Ħัà¸ļ":128142,"عراض":128143,"иж":128144,"ÙĩائÙĬ":128145,"ãĤīãģı":128146,"ожеÑĤ":128147,"ĠобоÑĢ":128148,"ĠобоÑĢÑĥд":128149,"أسÙĦ":128150,"à¹ĩà¸Ķ":128151,"ÑĢÑĥÑĤ":128152,"دÙĬÙħÙĤ":128153,"دÙĬÙħÙĤرا":128154,"Ġjeste":128155,"×ķ×ķ×Ļר":128156,"×ij×ĵ×Ļ×§":128157,"деÑĢжива":128158,"ãģĬãģı":128159,"ewnÄĻtr":128160,"ewnÄĻtrzn":128161,"à¸ŀฤ":128162,"Ġ×IJ×ķ×Ķ":128163,"ת×Ĺ×ķש":128164,"Ġzob":128165,"дÑĥм":128166,"ĠÑģÑĭ":128167,"ÙĬرا":128168,"ĠwiÄĻks":128169,"à¹ģà¸ķà¸ģà¸ķà¹Īาà¸ĩ":128170,"lararas":128171,"lararası":128172,"íĺĢ":128173,"ëī´":128174,"×ķ×Ĵ׾":128175,"ĠоÑĤмеÑĤ":128176,"ĠÑĢан":128177,"تÙĥÙĦ":128178,"иÑĤелÑĮн":128179,"à¸Ľà¸£à¸°à¸§à¸±":128180,"à¸Ľà¸£à¸°à¸§à¸±à¸ķิ":128181,"ìŀĸ":128182,"можно":128183,"pieczeÅĦ":128184,"pieczeÅĦst":128185,"못":128186,"ìĬ¨":128187,"×ŀס×ŀ":128188,"Ủ":128189,"ศิ":128190,"ศิล":128191,"à¸¨à¸´à¸¥à¸Ľ":128192,"ĠÅļw":128193,"ãĥĥãĤ·ãĥ§ãĥ³":128194,"unitÃł":128195,"Ġmieszka":128196,"ĠmieszkaÅĦ":128197,"przed":128198,"przedsi":128199,"przedsiÄĻb":128200,"przedsiÄĻbior":128201,"à¸Ľà¸£à¸°à¸ªà¸´à¸Ĺà¸ĺิ":128202,"à¸Ľà¸£à¸°à¸ªà¸´à¸Ĺà¸ĺà¸´à¸łà¸²à¸ŀ":128203,"ยà¹Ī":128204,"ìķĻ":128205,"รวà¸Ķ":128206,"รวà¸Ķà¹Ģรà¹ĩว":128207,"å½ĵãģŁãĤĬ":128208,"älle":128209,"ÑĥеÑĤÑģÑı":128210,"ãn":128211,"ëłµ":128212,"thè":128213,"ãĤĴåĪ©ç͍":128214,"ìµľ":128215,"íĵ¨":128216,"à¸Ĺัà¸ļ":128217,"าà¸Ħม":128218,"ãģĩ":128219,"ëĤĮ":128220,"à¹Ģà¸Ľà¸¥à¹Īา":128221,"â¦":128222,"ë¾":128223,"êĢ":128224,"êĩ":128225,"â¡":128226,"ðŁŁ":128227,"ãIJ":128228,"âº":128229,"áŃ":128230,"áĻ":128231,"áĵ":128232,"á²":128233,"ðĵı":128234,"á¬":128235,"â¯":128236,"ä¨":128237,"êĿ":128238,"ê«":128239,"ðij":128240,"ðĵĥ":128241,"ðĿħ":128242,"":128244,"":128245,"":128247,"ĠعÙĦÙī":128248,"Ġmá»Ļt":128249,"ĠvỼi":128250,"Ġngưá»Ŀi":128251,"ĠØ¥ÙĦÙī":128252,"Ġnhững":128253,"Ġthá»ĥ":128254,"Ġ×IJ×ķ":128255,"Ġ×¢×Ŀ":128256,"اÙĭ":128257,"Ġà¹ģละ":128258,"ĠÙĦا":128259,"Ġnhư":128260,"ĠاÙĦتÙĬ":128261,"Ġ×Ķ×ķ×IJ":128262,"ĠÄijến":128263,"ĠØ£ÙĪ":128264,"Ġvá»ģ":128265,"ĠlÃłm":128266,"Ġsẽ":128267,"ĠcÅ©ng":128268,"Ġợ":128269,"ĠÄijó":128270,"Ġnhiá»ģu":128271,"Ġtại":128272,"Ġtrên":128273,"Ġ×Ĵ×Ŀ":128274,"ĠnhÃł":128275,"Ġ׼×Ļ":128276,"Ġsá»±":128277,"ĠÄijầu":128278,"Ġbá»ĭ":128279,"ĠÙĩذا":128280,"Ġnhất":128281,"Ġphải":128282,"Ġhiá»ĩn":128283,"Ġdụng":128284,"ĠÄijá»Ļng":128285,"ĠاÙĦÙĦÙĩ":128286,"ĠØĮ":128287,"ĠÙĥÙĦ":128288,"Ġviá»ĩc":128289,"ĠnÄĥm":128290,"Ġthì":128291,"Ġhá»įc":128292,"ĠÙĪØª":128293,"té":128294,"ĠاÙĨ":128295,"Ġtôi":128296,"Ġ×IJ׳×Ļ":128297,"Ġ׾×Ļ":128298,"Ġ×ŀ×ķ":128299,"ĠngÃły":128300,"ĠnÆ°á»Ľc":128301,"Ġ×Ķ×Ļ×IJ":128302,"Ġ×IJ×Ļ":128303,"ĠhÆ¡n":128304,"ĠÙĩذÙĩ":128305,"ĠÙĪÙĬ":128306,"ĠاÙĦذÙĬ":128307,"Ġ×ķ×ŀ":128308,"Ġgiá":128309,"Ġnhân":128310,"ĠchÃŃnh":128311,"Ġmình":128312,"ĠÐĿа":128313,"Ġthế":128314,"Ġ×Ļ×ķתר":128315,"Ġ×IJ×Ŀ":128316,"Ġnên":128317,"Ġhợ":128318,"Ġhợp":128319,"Ġcòn":128320,"ĠÙĩÙĪ":128321,"ĠcÆ¡":128322,"Ġrất":128323,"ĠViá»ĩt":128324,"Ġبعد":128325,"Ġש×Ļ":128326,"Ġthá»Ŀi":128327,"Ġcách":128328,"ĠÄijá»ĵng":128329,"Ġно":128330,"Ġtrưá»Ŀng":128331,"ØŁ":128332,"ĠÄijá»ĭnh":128333,"ĠÄijiá»ģu":128334,"×Ļ×Ļ×Ŀ":128335,"Ġthá»±c":128336,"nın":128337,"Ġhình":128338,"Ġnói":128339,"Ġcùng":128340,"Ġ×Ķ×Ķ":128341,"ĠØ¥ÙĨ":128342,"Ġ×IJ×ij׾":128343,"Ġnhưng":128344,"Ġbiết":128345,"Ġже":128346,"Ġchúng":128347,"ĠÄijang":128348,"ĠذÙĦÙĥ":128349,"Ġlên":128350,"Ġkhách":128351,"ĠnÃło":128352,"Ġsá»Ń":128353,"Ġkhác":128354,"Ġë°ı":128355,"Ġlý":128356,"×Ļ×Ļ":128357,"ĠÄijây":128358,"Ġ׾×ŀ":128359,"Ġcần":128360,"Ġtrình":128361,"Ġphát":128362,"ãģ«ãĤĤ":128363,"по":128364,"ĠnÄĥng":128365,"Ġbá»Ļ":128366,"Ġvụ":128367,"ĠÄijá»Ļ":128368,"Ñĩе":128369,"ĠnháºŃn":128370,"ĠtrÆ°á»Ľc":128371,"Ġ×¢×ĵ":128372,"ĠhÃłnh":128373,"ĠØ®ÙĦاÙĦ":128374,"Ġlượng":128375,"Ġcấp":128376,"Ġtá»±":128377,"Ġvì":128378,"Ġtư":128379,"Ġchất":128380,"Ġ׼×ŀ×ķ":128381,"Ġgì":128382,"Ġש׳":128383,"Ġtế":128384,"ת×ķ":128385,"Ġnghiá»ĩp":128386,"Ġmặt":128387,"ĠÙĥÙħا":128388,"Ġ×ij×Ļף":128389,"Ġרק":128390,"Ġthấy":128391,"Ġmáy":128392,"ĠÙģÙī":128393,"Ġdân":128394,"Ġ×IJ×Ĺ×ĵ":128395,"Ġtâm":128396,"Ġ׼×ļ":128397,"Ġ׾×ķ":128398,"во":128399,"Ġtác":128400,"ĠtoÃłn":128401,"ĠÙĪÙħ":128402,"Ġkết":128403,"Ġหรืà¸Ń":128404,"ĠÙĪØ§ÙĦÙħ":128405,"ĠÄijiá»ĥm":128406,"Ġ×ĸ×ķ":128407,"Ġ×ij×ķ":128408,"׼×ķת":128409,"Ġhá»Ļi":128410,"Ġbằng":128411,"تÙĩا":128412,"Ġ׼×ĵ×Ļ":128413,"Ġ×Ķ×Ŀ":128414,"Ġxuất":128415,"ĠÙĤد":128416,"Ġbảo":128417,"Ġtá»ijt":128418,"Ġtình":128419,"ĠÙĩÙĬ":128420,"ĠÄijá»iji":128421,"Ġthiết":128422,"Ġhiá»ĩu":128423,"Ġtiếp":128424,"Ġtạo":128425,"ת×Ķ":128426,"Ġchá»§":128427,"oÅĽÄĩ":128428,"Ġgiú":128429,"Ġgiúp":128430,"Ġý":128431,"Ġquả":128432,"Ġloại":128433,"Ġcô":128434,"Ġô":128435,"Ġông":128436,"Ġ×Ķ×ķ":128437,"ĠاÙĦÙĬÙĪÙħ":128438,"ĠtÃŃnh":128439,"га":128440,"Ġphòng":128441,"ĠÄĥn":128442,"ĠعاÙħ":128443,"Ġvá»ĭ":128444,"larını":128445,"rÃŃa":128446,"ĠtỼi":128447,"ĠÄijưá»Ŀng":128448,"ĠgiỼi":128449,"Ġbản":128450,"Ġcầu":128451,"Ġnhiên":128452,"Ġbá»ĩnh":128453,"Ġthưá»Ŀng":128454,"Ġ×IJ×Ļף":128455,"ĠÄijá»ģ":128456,"Ġhá»ĩ":128457,"Ġ×Ļשר×IJ׾":128458,"Ġquá":128459,"ĠÐĹа":128460,"ãģ®ãģ§ãģĻãģĮ":128461,"ĠÐŁÑĢи":128462,"Ġphần":128463,"ĠÙĪÙĦا":128464,"ĠlỼn":128465,"Ġtrá»ĭ":128466,"Ġcảm":128467,"Ġмо":128468,"Ġdùng":128469,"ĠاÙĦÙī":128470,"ĠعÙĦÙĬÙĩ":128471,"ĠìŀĪìĬµëĭĪëĭ¤":128472,"ÙĬÙĤ":128473,"ĠÙĤبÙĦ":128474,"Ġhoặc":128475,"ĠØŃÙĬØ«":128476,"Ġà¸Ĺีà¹Ī":128477,"ĠغÙĬر":128478,"ĠÄijại":128479,"Ġsá»ijng":128480,"нÑĭми":128481,"Ġthức":128482,"Ġפ×Ļ":128483,"ĠÄijiá»ĩn":128484,"ãģªãģĭãģ£ãģŁ":128485,"Ġgiải":128486,"Ġvẫn":128487,"ĠиÑħ":128488,"Ġönce":128489,"ĠváºŃy":128490,"Ġmuá»ijn":128491,"Ġảnh":128492,"à¹ĥà¸Ļà¸ģาร":128493,"ĠQuá»ijc":128494,"Ġkế":128495,"׳×IJ":128496,"Ġס×Ļ":128497,"Ġyêu":128498,"ãģ®ãģĭ":128499,"ĠÄijẹ":128500,"ĠÄijẹp":128501,"Ġchức":128502,"Ġyıl":128503,"ĠTürkiye":128504,"dé":128505,"ĠÙĤاÙĦ":128506,"Ġdá»ĭch":128507,"ĠolduÄŁu":128508,"Ġchá»įn":128509,"ĠتÙħ":128510,"หà¸Ļึà¹Īà¸ĩ":128511,"ãģķãĤĮãģŁ":128512,"Ġpháp":128513,"ìĽĶ":128514,"Ġtiá»ģn":128515,"ãģĹãģ¾ãģĹãģŁ":128516,"Ġש׾×IJ":128517,"ÙĦØ©":128518,"Ġ׾פ׳×Ļ":128519,"Ġ×ij×Ļת":128520,"ĠHÃł":128521,"ĠØŃت":128522,"ĠØŃتÙī":128523,"Ġ×¢×ķ×ĵ":128524,"Ġnó":128525,"Ġtháng":128526,"à¹Ģลืà¸Ńà¸ģ":128527,"ר×Ķ":128528,"ĠtÄĥng":128529,"Ġcái":128530,"Ġtriá»ĥn":128531,"Ġ×IJ×ķת×ķ":128532,"ìłģìĿ¸":128533,"ĠCông":128534,"Ġ׾×Ķ×Ļ×ķת":128535,"Ġгода":128536,"иÑİ":128537,"Ġبعض":128538,"Ġà¸ģาร":128539,"èī¯ãģĦ":128540,"ÙĪØª":128541,"Ġliên":128542,"ĠÐĿо":128543,"ĠÐĿе":128544,"çļĦãģª":128545,"ĠÙħت":128546,"ĠÑĤакже":128547,"ĠкоÑĤоÑĢÑĭе":128548,"Ġ×Ļ×ĵ×Ļ":128549,"Ġtrá»įng":128550,"ãĤµãĤ¤ãĥĪ":128551,"ìłģìľ¼ë¡ľ":128552,"ĠtáºŃp":128553,"Ġש׾×Ļ":128554,"íķĺê²Į":128555,"ĠtÃłi":128556,"ĠЯ":128557,"Ġrá»ĵi":128558,"اÙĥ":128559,"Ġthương":128560,"Ġ×Ķ×ĸ×Ķ":128561,"ĠÙĪÙħÙĨ":128562,"à¸Ĺีà¹Īมี":128563,"Ġcuá»Ļc":128564,"Ġbüyük":128565,"ãģ¨ãģĭ":128566,"Ġ×ij×Ļ×ķתר":128567,"Ġlần":128568,"Ġgöre":128569,"Ġtrợ":128570,"Ġ×ĺ×ķ×ij":128571,"ÑĤÑĮÑģÑı":128572,"Ġthá»ijng":128573,"Ġ׼ש":128574,"Ġtiêu":128575,"Ġ×ŀ×IJ×ķ×ĵ":128576,"ØĽ":128577,"kÄħ":128578,"Ġà¹ĥà¸Ļ":128579,"Ġvấn":128580,"Ġש׾×ķ":128581,"ĠÄijá»ģu":128582,"ÙģØª":128583,"Ġê²ĥìĿ´":128584,"Ġhóa":128585,"ĠاÙĦعاÙħ":128586,"ĠÙĬÙĪÙħ":128587,"кой":128588,"Ġbiá»ĩt":128589,"ÑģÑĤо":128590,"Ġ×Ķ×Ļ×ķ":128591,"à¸Ĺีà¹Īà¸Īะ":128592,"Ġ×ĵ×Ļ":128593,"Ġ×IJ×ļ":128594,"Ġán":128595,"صÙĪØ±":128596,"ĠtrÃŃ":128597,"ĠÐŁÑĢо":128598,"Ġlá»±c":128599,"ãģĹãģ¦ãģĦãģ¾ãģĻ":128600,"ĠbÃłi":128601,"Ġ×ĸ×IJת":128602,"Ġbáo":128603,"à¸ļà¸Ļ":128604,"ĠëĮĢíķľ":128605,"Ġtiế":128606,"Ġtiếng":128607,"Ġbên":128608,"ãģķãĤĮãĤĭ":128609,"sión":128610,"Ġtìm":128611,"×¢×ķ":128612,"mé":128613,"ниÑı":128614,"ãģ»ãģ©":128615,"Ġà¹Ģà¸ŀราะ":128616,"بة":128617,"Ġë¶Ħ":128618,"Ġ×IJ×ĸ":128619,"à¸Ĺà¹Īาà¸Ļ":128620,"ת×Ŀ":128621,"Ġthêm":128622,"Ġhoạt":128623,"yı":128624,"×ĸ×ķ":128625,"Ġgiá»Ŀ":128626,"Ġbán":128627,"à¸Ĥาย":128628,"Ñĩа":128629,"Ġà¹Ĩ":128630,"ĠاÙĦÙħت":128631,"ĠоÑĩенÑĮ":128632,"Ġbất":128633,"Ġtrẻ":128634,"ÑĤÑĢ":128635,"ĠØ£ÙĨÙĩ":128636,"ĠØ«Ùħ":128637,"Ġ׼×ŀ×Ķ":128638,"Ġkhó":128639,"Ġrằng":128640,"ĠÙĪÙģÙĬ":128641,"ний":128642,"ĠhoÃłn":128643,"tó":128644,"Ġ×IJשר":128645,"ĠìĥĿê°ģ":128646,"Ñģа":128647,"Ġ׼×ijר":128648,"ĠÑįÑĤом":128649,"larının":128650,"Ġchưa":128651,"зи":128652,"Ġdẫn":128653,"ĠÐļак":128654,"جÙĪ":128655,"ĠбÑĭло":128656,"ĠÙĬت":128657,"nı":128658,"ÅĤam":128659,"ĠÙĪÙĩÙĪ":128660,"×ij×ķ":128661,"пи":128662,"רת":128663,"Ġquá»ijc":128664,"жд":128665,"ĠÄijÆ¡n":128666,"Ùĥتب":128667,"Ġmắt":128668,"ระà¸ļ":128669,"ระà¸ļà¸ļ":128670,"ĠÙĥاÙĨت":128671,"Ġthân":128672,"สิà¸Ļà¸Ħà¹īา":128673,"×Ĵ×Ļ":128674,"Ġphương":128675,"à¹Ħมà¹Īà¹Ħà¸Ķà¹ī":128676,"ĠìĦ±":128677,"ĠCác":128678,"Ġ×Ķ×ŀ×ķ":128679,"ĠÑĤем":128680,"Ġ×ĵ×ķ":128681,"à¸Ńะà¹Ħร":128682,"ĠvÄĥn":128683,"ãģªãģ®ãģ§":128684,"ĠNá»Ļi":128685,"Ġ×¢×ķ":128686,"ãĤīãĤĮãĤĭ":128687,"Ġsáng":128688,"Ġgöster":128689,"ãģĵãģ¨ãĤĴ":128690,"Ġtarafından":128691,"Ġма":128692,"ĠпоÑģле":128693,"Ġ׳×Ļת":128694,"Ġ׳×Ļ×ª×Ł":128695,"ĠлеÑĤ":128696,"Ġ׾׳×ķ":128697,"ÑģÑģ":128698,"Ġ×Ļ×ķ":128699,"пе":128700,"ĠÙĪÙĦÙĥ":128701,"ĠÙĪÙĦÙĥÙĨ":128702,"ĠngoÃłi":128703,"ĠÄijá»ĭa":128704,"rzÄħd":128705,"dziaÅĤ":128706,"ĠÙħر":128707,"иÑĤÑĮÑģÑı":128708,"Ġ×IJ×Ĺר×Ļ":128709,"Ġ׾׼׾":128710,"à¸Ĥà¹īà¸Ńม":128711,"à¸Ĥà¹īà¸Ńมูล":128712,"Ġбол":128713,"Ġболее":128714,"جÙħع":128715,"леÑĤ":128716,"Ġlá»ĭch":128717,"ĠÙħØ«ÙĦ":128718,"Ġê·¸ë¦¬ê³ł":128719,"Ġthứ":128720,"ĠdeÄŁil":128721,"ÙĪØŃ":128722,"Ġש׾×ļ":128723,"ĠÙħØŃÙħد":128724,"Ġnếu":128725,"ĠÄijá»ķi":128726,"Ġvừa":128727,"Ġmá»įi":128728,"Ġони":128729,"Ġlúc":128730,"ĠÙĬÙĥÙĪÙĨ":128731,"ì§Ī":128732,"Ġש׾׳×ķ":128733,"ĠÐĶо":128734,"Ġש׳×Ļ":128735,"ลิ":128736,"×IJפשר":128737,"Ġsức":128738,"ê¶Į":128739,"Ġứng":128740,"à¹Ħมà¹Īมี":128741,"Ø·ÙĦب":128742,"ĠÑĩем":128743,"Ġchuyên":128744,"ĠthÃŃch":128745,"Ġ×ķ×Ļ":128746,"íķ©":128747,"ĠÙħصر":128748,"до":128749,"ĠÄijất":128750,"Ġchế":128751,"à¸Ĭืà¹Īà¸Ń":128752,"Ġìĭł":128753,"Ġإذا":128754,"ĠرئÙĬس":128755,"Ġש×Ļש":128756,"Ġgiảm":128757,"Ñģка":128758,"larında":128759,"Ġsợ":128760,"ĠtÃŃch":128761,"ĠÙĦÙĥÙĨ":128762,"ĠبÙħ":128763,"×¢×ķ×ij":128764,"×¢×ķ×ij×ĵ":128765,"ÅĤÄħcz":128766,"larına":128767,"Ġש×Ŀ":128768,"ĠÙĦت":128769,"Ġש×Ķ×ķ×IJ":128770,"tów":128771,"Ġëĭ¤ë¥¸":128772,"ĠØ£Ùĥثر":128773,"ãģ®ãģ§ãģĻ":128774,"׼×Ļ×Ŀ":128775,"ĠolduÄŁunu":128776,"ãģĭãģª":128777,"ãĤĤãģĨ":128778,"ÙĬØŃ":128779,"Ġnhìn":128780,"Ġnghá»ĩ":128781,"ãģ«ãģªãģ£ãģ¦":128782,"па":128783,"Ġquyết":128784,"ÙĦÙĤ":128785,"tá":128786,"Ġluôn":128787,"ĠÄijặc":128788,"Ġ×IJר":128789,"Ġtuá»ķi":128790,"são":128791,"ìϏ":128792,"رد":128793,"ĠبÙĩا":128794,"Ġ×Ķ×Ļ×ķ×Ŀ":128795,"×ķ×ķ×Ļ":128796,"ãģ§ãģĻãģŃ":128797,"ĠÑĤого":128798,"Ġthá»§":128799,"ãģĹãģŁãģĦ":128800,"رÙĤ":128801,"Ġbắt":128802,"гÑĥ":128803,"Ġtá»Ń":128804,"ÑĪа":128805,"Ġà¸Ľà¸µ":128806,"Ġ×Ķ×IJ×Ŀ":128807,"íı¬":128808,"ża":128809,"Ġ×IJת×Ķ":128810,"Ġná»Ļi":128811,"ĠphÃŃ":128812,"ĠÅŁekilde":128813,"Ġlá»Ŀi":128814,"dıģı":128815,"Ġ׼×IJף":128816,"Ġtüm":128817,"Ġmạnh":128818,"ĠMỹ":128819,"ãģĿãĤĵãģª":128820,"Ġnhá»ı":128821,"ãģªãģĮãĤī":128822,"Ġbình":128823,"ıp":128824,"à¸ŀา":128825,"ĠÄijánh":128826,"ĠÙĪÙĦ":128827,"ר×ķת":128828,"Ġ×IJ×Ļ×ļ":128829,"Ġchuyá»ĥn":128830,"Ùĥا":128831,"ãĤĮãĤĭ":128832,"à¹ģมà¹Ī":128833,"ãĤĪãģı":128834,"ĠÙĪÙĤد":128835,"íĸĪëĭ¤":128836,"ĠnÆ¡i":128837,"ãģ«ãĤĪãģ£ãģ¦":128838,"Ġviết":128839,"Ġà¹Ģà¸ŀืà¹Īà¸Ń":128840,"ëIJĺëĬĶ":128841,"ادÙĬ":128842,"ĠÙ쨥ÙĨ":128843,"ì¦Ŀ":128844,"ĠÄijặt":128845,"ĠhÆ°á»Ľng":128846,"Ġxã":128847,"Ġönemli":128848,"ãģłãģ¨":128849,"Ġmẹ":128850,"Ġ×ij×Ļ":128851,"Ġ×ĵ×ijר":128852,"ĠváºŃt":128853,"ĠÄijạo":128854,"Ġdá»±ng":128855,"ĠÑĤом":128856,"ĠÙģÙĬÙĩا":128857,"ĠجÙħÙĬع":128858,"ĠthuáºŃt":128859,"stÄĻp":128860,"Ġtiết":128861,"Ø´ÙĬ":128862,"ĠеÑīе":128863,"ãģĻãĤĭãģ¨":128864,"ĠmÃłu":128865,"ĠÑįÑĤого":128866,"Ġvô":128867,"ĠÐŃÑĤо":128868,"ĠtháºŃt":128869,"Ġnữa":128870,"Ġbiến":128871,"Ġnữ":128872,"Ġ׾׼×Ŀ":128873,"×Ļ×Ļף":128874,"Ġست":128875,"ĠÐŀÑĤ":128876,"Ġphụ":128877,"ê¹Įì§Ģ":128878,"Ġ׾×ļ":128879,"Ġkỳ":128880,"à¹ĥà¸Ħร":128881,"Ġgây":128882,"ĠÙĦÙĦÙħ":128883,"Ġtục":128884,"تÙĬÙĨ":128885,"Ġtrợ":128886,"Ġ׾פ×Ļ":128887,"Ġbá»ij":128888,"ĠÐļа":128889,"ĠÄijình":128890,"owÄħ":128891,"sında":128892,"Ġkhiến":128893,"sız":128894,"Ġкогда":128895,"×¡×ľ":128896,"ĠбÑĭл":128897,"à¸Ļà¹īà¸Ńย":128898,"обÑĢаз":128899,"Ġê²ĥìĿ´ëĭ¤":128900,"ëĵ¤ìĿĢ":128901,"ãģ¸ãģ®":128902,"Ġà¹Ģมืà¹Īà¸Ń":128903,"Ġphục":128904,"Ġ×Ĺ׾ק":128905,"Ġhết":128906,"ĠÄija":128907,"à¹Ģà¸Ķà¹ĩà¸ģ":128908,"íĺķ":128909,"lÃŃ":128910,"ê¸ī":128911,"Ġعدد":128912,"ĠÄijá»ĵ":128913,"Ġgần":128914,"Ġ×Ļ×ķ×Ŀ":128915,"ĠsÄ©":128916,"ÑĢÑıд":128917,"Ġquyá»ģn":128918,"Ġ×IJ׾×IJ":128919,"ÙĩÙħا":128920,"׳×Ļ×Ķ":128921,"׾×ķת":128922,"Ġ×Ķר×ij×Ķ":128923,"Ġtiên":128924,"Ġalın":128925,"Ġdá»ħ":128926,"人ãģĮ":128927,"ноÑģ":128928,"лÑģÑı":128929,"ĠÄijưa":128930,"สาว":128931,"иÑĢован":128932,"Ġ×ŀספר":128933,"×Ĵף":128934,"Ġkiến":128935,"ĠШ":128936,"pé":128937,"бÑĥ":128938,"овой":128939,"ба":128940,"ĠØ¥ÙĦا":128941,"×IJ׾×Ļ":128942,"Ġxây":128943,"Ġbợi":128944,"Ġש×ķ":128945,"人ãģ®":128946,"×§×Ļ×Ŀ":128947,"à¹Ģà¸Ķืà¸Ńà¸Ļ":128948,"Ġkhá":128949,"Ġ×ķ׾×Ķ":128950,"×ĵ×ķת":128951,"Ġ×¢×ij×ķר":128952,"ĠبشÙĥÙĦ":128953,"ĠÙĩÙĨاÙĥ":128954,"ÑĤÑĢа":128955,"ĠíķĺëĬĶ":128956,"รà¸Ńà¸ļ":128957,"owaÅĤ":128958,"hé":128959,"Ġdiá»ħn":128960,"Ġ×Ķ׼׾":128961,"Ġأس":128962,"Ġchuyá»ĩn":128963,"ระà¸Ķัà¸ļ":128964,"ĠNhững":128965,"Ġ×IJ×Ĺת":128966,"ĠØŃÙĪÙĦ":128967,"лов":128968,"׳ר":128969,"Ġ×ķ׳":128970,"ĠchÆ¡i":128971,"Ġiçinde":128972,"ÑģÑĤвÑĥ":128973,"Ġphá»ij":128974,"ĠÑģÑĥ":128975,"ç§ģãģ¯":128976,"Ġchứng":128977,"Ġvá»±c":128978,"à¹ģà¸Ń":128979,"ĠláºŃp":128980,"Ġtừng":128981,"å°ijãģĹ":128982,"ĠNguy":128983,"ĠNguyá»ħn":128984,"ĠÙģÙĬÙĩ":128985,"Ġба":128986,"×Ļ×Ļת":128987,"Ġ×ľ×¢×©×ķת":128988,"Ġ×ŀ׼":128989,"Ġnghiá»ĩm":128990,"Ġмного":128991,"Ġее":128992,"ëIJĺìĸ´":128993,"Ġlợi":128994,"Ġ׾׾×IJ":128995,"Ġ׼ף":128996,"ĠchÃŃ":128997,"ãģ§ãģ®":128998,"×Ĺ×ķ":128999,"ש×ķ×Ŀ":129000,"Ġ×ŀר":129001,"ĠÐĶлÑı":129002,"Åģ":129003,"Ġ׼×IJשר":129004,"ĠMá»Ļt":129005,"ĠÙĪØ§ÙĦت":129006,"ĠìĿ´ëٰ":129007,"ÅŁa":129008,"Ġchiến":129009,"Ġarasında":129010,"Ġ×ij×IJתר":129011,"ãģķãĤĮãģ¦ãģĦãĤĭ":129012,"Ø´ÙĥÙĦ":129013,"Ġtượng":129014,"Ġتت":129015,"ĠCó":129016,"Ġbá»ı":129017,"Ġtá»īnh":129018,"ĠkhÃŃ":129019,"ĠпÑĢоÑģÑĤ":129020,"ĠпÑĢоÑģÑĤо":129021,"ĠÙĪÙĤاÙĦ":129022,"Ġgiáo":129023,"ĠNếu":129024,"×IJ×ŀר":129025,"×¢×ł×Ļ×Ļף":129026,"íݸ":129027,"ÙĩدÙģ":129028,"ĠBá»Ļ":129029,"ĠbÃłn":129030,"Ġnguyên":129031,"Ġgüzel":129032,"สาย":129033,"ì²ľ":129034,"×ŀ×ķר":129035,"Ġphân":129036,"ספק":129037,"×§×ij׾":129038,"ĠاÙĦÙħتØŃ":129039,"ĠاÙĦÙħتØŃدة":129040,"ائد":129041,"Ġ×IJ×ŀר":129042,"ĠkiÅŁi":129043,"ì¤Ģ":129044,"Ġtruyá»ģn":129045,"ĠÙĦÙĩا":129046,"ĠÐľÐ°":129047,"à¸ļริษ":129048,"à¸ļริษั":129049,"à¸ļริษัà¸Ĺ":129050,"Ġש׳×Ļ×Ŀ":129051,"ĠменÑı":129052,"ÅŁe":129053,"Ġdiá»ĩn":129054,"Ġ×IJ׳×Ĺ׳×ķ":129055,"kü":129056,"Ġcá»ķ":129057,"Ġmá»Ĺi":129058,"wä":129059,"ÙħÙĬ":129060,"Ġhiá»ĥu":129061,"ëĭ¬":129062,"Ġ×Ķ×Ĺ׾":129063,"Ġtên":129064,"Ġkiá»ĩn":129065,"ÙĨÙĤÙĦ":129066,"Ġvá»ĩ":129067,"×ĵת":129068,"ĠÐłÐ¾ÑģÑģии":129069,"лÑĥ":129070,"ĠاÙĦعربÙĬØ©":129071,"ĠطرÙĬÙĤ":129072,"Ġ×Ķ×ij×Ļת":129073,"ÑģеÑĢ":129074,"Ġмне":129075,"äu":129076,"Ġtriá»ĩu":129077,"ĠÄijá»§":129078,"Ġר×ij":129079,"تÙĩÙħ":129080,"à¸ĭี":129081,"Ġì§Ģê¸Ī":129082,"liÅĽmy":129083,"دعÙħ":129084,"ãģłãĤįãģĨ":129085,"Ñģкие":129086,"Ġhá»ıi":129087,"Ġ×§×ķ":129088,"ÑĢÑĥÑģ":129089,"ÙĨظر":129090,"ãģ®ãĤĤ":129091,"Ġ×Ķ׼×Ļ":129092,"ĠìĽIJ":129093,"ÙĪÙĩ":129094,"ĠÙĪÙİ":129095,"ĠBạn":129096,"плаÑĤ":129097,"Ġ×ŀ×ŀש":129098,"лÑİб":129099,"ĠнÑĥжно":129100,"Ġthư":129101,"ãģµ":129102,"ãģıãĤīãģĦ":129103,"رش":129104,"ר×ķ×Ĺ":129105,"ĠÙĬتÙħ":129106,"Ġצר×Ļ×ļ":129107,"Ġphá":129108,"มà¸Ńà¸ĩ":129109,"Ġ×ij×IJ×ķפף":129110,"Ġcảnh":129111,"Ġíķľëĭ¤":129112,"Ġ×Ķ×ŀת":129113,"à¸ķà¹Īาà¸ĩà¹Ĩ":129114,"มีà¸ģาร":129115,"ÑģкиÑħ":129116,"ĠÐĴÑģе":129117,"ĠاÙĪ":129118,"جÙĬ":129119,"ãģĵãģ¨ãģ¯":129120,"ĠdÃłi":129121,"Ġhá»ĵ":129122,"èĩªåĪĨãģ®":129123,"à¹Ħหà¸Ļ":129124,"ëĵ¤ìĿĦ":129125,"ĠVÄĥn":129126,"Ġдаж":129127,"Ġдаже":129128,"Ñĭми":129129,"лаÑģÑĮ":129130,"ÙĬÙĪÙĨ":129131,"ÙĨÙĪ":129132,"có":129133,"ãģĹãģ¦ãģĦãģŁ":129134,"ãģłãģĭãĤī":129135,"طاÙĦب":129136,"Ġcá»Ńa":129137,"пÑĢоÑģ":129138,"ãģªãģ©ãģ®":129139,"รุà¹Īà¸Ļ":129140,"Ġchiếc":129141,"лÑĭ":129142,"ĠÑıвлÑıеÑĤÑģÑı":129143,"Ġná»ķi":129144,"ãģ®ãģĬ":129145,"Ġ×IJת×Ŀ":129146,"ĠëķĮ문ìĹIJ":129147,"à¸ģลาà¸ĩ":129148,"ĠbaÅŁka":129149,"ìĦĿ":129150,"ĠÑĨел":129151,"ÙģÙĤ":129152,"ãģ«ãĤĪãĤĭ":129153,"ÙĤا":129154,"Ġçıkar":129155,"Ġcứu":129156,"طا":129157,"Ġשת":129158,"à¹Ĥà¸Ħ":129159,"Ġ×ŀ׾":129160,"Ġ×Ķפר":129161,"Ġгде":129162,"Ġخط":129163,"åīįãģ«":129164,"cjÄĻ":129165,"Ġ×Ĺש×ķ×ij":129166,"ר×Ĵ×¢":129167,"Ġkhoảng":129168,"ĠÄijá»Ŀi":129169,"ĠÐłÐµ":129170,"Ġона":129171,"Ġ×IJ׳×ķ":129172,"ãģ®ãģ«":129173,"ĠاÙĦذÙĬÙĨ":129174,"кÑĥп":129175,"ãĤµãĥ¼ãĥ":129176,"ãĤµãĥ¼ãĥĵ":129177,"ãĤµãĥ¼ãĥĵãĤ¹":129178,"вал":129179,"ге":129180,"Ġgiữa":129181,"ĠKhông":129182,"ĠâĹĭ":129183,"à¸ģลุà¹Īม":129184,"ĠÙħÙĨذ":129185,"à¸Ńà¹Īาà¸Ļ":129186,"ĠÑģпоÑģоб":129187,"ĠÄijá»Ļi":129188,"ĠdiÄŁer":129189,"Ġà¸ĸà¹īา":129190,"ÙħØ«ÙĦ":129191,"Ġ×Ķ×IJ×Ļ":129192,"ĠدÙĪÙĨ":129193,"ÙĬراÙĨ":129194,"Ñīи":129195,"بÙĨاء":129196,"Ġآخر":129197,"ظÙĩر":129198,"Ġ×ij׼":129199,"ĠاÙĦÙħع":129200,"ãĥĴ":129201,"Ġtất":129202,"Ġmục":129203,"ĠdoÄŁru":129204,"ãģŁãĤī":129205,"Ġס×ķ":129206,"Ġxác":129207,"รà¸Ń":129208,"ĠcÄĥn":129209,"Ġонл":129210,"Ġонлайн":129211,"Ġký":129212,"Ġchân":129213,"Ġà¹Ħมà¹Ī":129214,"اØŃØ©":129215,"rán":129216,"׳×Ļ×Ļ×Ŀ":129217,"Ġ×ijף":129218,"ĠÐĸ":129219,"à¸ķรà¸ĩ":129220,"дÑĭ":129221,"Ġsắc":129222,"ÙĦت":129223,"ãĥŃãĥ¼":129224,"ĠÙĦÙĨ":129225,"Ġר×ķ":129226,"ĠdÆ°á»Ľi":129227,"à¹Ģà¸ĺ":129228,"à¹Ģà¸ĺà¸Ń":129229,"eÄŁi":129230,"Ġ×ķש":129231,"ĠÙĦØ£":129232,"Ġgặp":129233,"Ġcá»ij":129234,"ãģ¨ãģ¦ãĤĤ":129235,"رÙĪØ³":129236,"Ġ׾×Ķ×Ļ":129237,"Ġ본":129238,"ä¸ĬãģĴ":129239,"Ġmức":129240,"Ñħа":129241,"Ġìŀ¬":129242,"à¸īัà¸Ļ":129243,"ÑĢÑĥж":129244,"Ġaçık":129245,"ÙĪØ§ÙĦ":129246,"Ġ×ĸ×ŀף":129247,"人ãģ¯":129248,"عÙĬÙĨ":129249,"ÑıÑħ":129250,"Ġ×Ĵ×ĵ×ķ׾":129251,"ר×ķ×ij":129252,"gó":129253,"ëĿ¼ê³ł":129254,"ĠarkadaÅŁ":129255,"ÙĨشر":129256,"ĠгодÑĥ":129257,"ĠболÑĮÑĪе":129258,"ãģ¡ãĤĩãģ£ãģ¨":129259,"Ġcâu":129260,"Ġsát":129261,"íͼ":129262,"Ġtiến":129263,"íķ´ìķ¼":129264,"ĠÙĪØ£ÙĨ":129265,"à¸Ļาà¸Ļ":129266,"Ġ×ij×IJ×ŀצע":129267,"Ġ×ij×IJ×ŀצע×ķת":129268,"Ġ׾ר":129269,"Ġquản":129270,"ĠÙĪØ§ÙĦØ£":129271,"Ġ×IJ×ķת×Ķ":129272,"Ġìĸ´ëĸ¤":129273,"Ġê²ĥìĿĢ":129274,"ØŃسÙĨ":129275,"Ġmất":129276,"à¸Ħูà¹Ī":129277,"ãĥ¬ãĥ¼":129278,"ĠÐĶа":129279,"Ġolması":129280,"Ġthuá»Ļc":129281,"׳×Ĺ":129282,"íĨł":129283,"Ġsöyle":129284,"ãģĿãģĨãģ§ãģĻ":129285,"ĠتÙĥÙĪÙĨ":129286,"лÑĥÑĩ":129287,"׾×Ļ×ļ":129288,"ĠØ£ØŃد":129289,"лиÑģÑĮ":129290,"ĠвÑģего":129291,"Ġ×Ķר×ij":129292,"Ġ못":129293,"oÄŁ":129294,"oÄŁlu":129295,"ĠìĦł":129296,"ĠкаÑĢ":129297,"à¸łà¸²à¸Ħ":129298,"eÅĦ":129299,"Ġà¸ģà¹ĩ":129300,"Ġaynı":129301,"ĠbÃł":129302,"ãģªãĤĵãģ¦":129303,"Ġ모ëĵł":129304,"ÙĤرار":129305,"ãģĹãģªãģĦ":129306,"ĠÐĴо":129307,"ĠÙĪÙĩÙĬ":129308,"ники":129309,"ãĤĮãģŁ":129310,"Ġchuẩn":129311,"רע":129312,"Ù쨱ÙĬÙĤ":129313,"ãĤĴåıĹãģij":129314,"ĠÄijúng":129315,"бе":129316,"׼×ķ×Ĺ":129317,"пÑĥ":129318,"Ġ×ķ×Ĵ×Ŀ":129319,"×ŀ׳×Ļ":129320,"íĸ¥":129321,"צ×Ļ×Ŀ":129322,"à¸ĭิ":129323,"ÙĩÙĨ":129324,"нем":129325,"Ġ×ij×ij×Ļת":129326,"رع":129327,"Ġส":129328,"ĠÄIJÃł":129329,"íķĺëĭ¤":129330,"Ġấy":129331,"×Ĺ×ķ×ĵ":129332,"×Ĺ×ķ×ĵש":129333,"ĠÑĩеÑĢез":129334,"Ñĥл":129335,"ĠBình":129336,"Ġê²ĥìĿĦ":129337,"Ġ×Ĵר":129338,"ä»ĺãģij":129339,"×Ĺ׾ק":129340,"ĠتÙĦÙĥ":129341,"à¹ĥสà¹Ī":129342,"szÄħ":129343,"ÙĤاÙħ":129344,"دÙĪØ±":129345,"ĠÙģÙĤØ·":129346,"Ġhữu":129347,"ĠмогÑĥÑĤ":129348,"Ġgá»įi":129349,"Ġקר":129350,"à¸Īะมี":129351,"تÙĤدÙħ":129352,"Ġعبر":129353,"Ġ׾×Ķ×Ŀ":129354,"ĠÑģамо":129355,"ס×ĵר":129356,"ĠcÃłng":129357,"rÃŃ":129358,"Ġìŀ¥":129359,"ëĵ¤ìĿĺ":129360,"ĠÙĦÙĥ":129361,"поÑĢÑĤ":129362,"Ġkhả":129363,"ĠÑģебÑı":129364,"׳ף":129365,"ĠدÙĪØ±":129366,"Ġmợ":129367,"Ġcây":129368,"Ġfark":129369,"Ġfarklı":129370,"аÑİÑĤ":129371,"Ġtrá»±c":129372,"wiÄĻksz":129373,"Ġthuá»ijc":129374,"ĠتØŃت":129375,"تÙĦ":129376,"овÑĭе":129377,"ëĤł":129378,"Ġвам":129379,"بÙĦغ":129380,"Ġê°ĻìĿĢ":129381,"íĮIJ":129382,"ÙĦب":129383,"Ġnasıl":129384,"Ġодин":129385,"ман":129386,"ĠعÙĦÙĬÙĩا":129387,"би":129388,"Ġפש×ķ×ĺ":129389,"×ijר×Ļ":129390,"Ġש׳×Ķ":129391,"ĠëıĦ":129392,"ĠÄIJại":129393,"Ġ×IJ×ķת×Ŀ":129394,"ĠاÙĦØŃر":129395,"Ġбо":129396,"à¸Īุà¸Ķ":129397,"Ġrõ":129398,"ĠdeÄŁiÅŁ":129399,"Ġëĭ¨":129400,"ĠÑģлÑĥÑĩа":129401,"ĠÑģлÑĥÑĩае":129402,"Ġ×IJ׳ש×Ļ×Ŀ":129403,"×ĵ×£":129404,"ש×ijת":129405,"Ġש׾׼×Ŀ":129406,"Ġchú":129407,"ników":129408,"Ġtanı":129409,"Ġcáo":129410,"ĠÄijá":129411,"Ġ×IJ×ĵ×Ŀ":129412,"Ġê°ķ":129413,"Ġnhiá»ĩm":129414,"Ġ×ľ×¡":129415,"Ġ×Ľ×ª×ij":129416,"Ġ×Ķספר":129417,"ĠÄijÄĥng":129418,"ĠëijIJ":129419,"à¸ľà¸´":129420,"à¸ľà¸´à¸§":129421,"جا":129422,"Ġê°IJ":129423,"رأ":129424,"ستخدÙħ":129425,"ãģ«ãģªãĤĬãģ¾ãģĻ":129426,"Ġtá»·":129427,"×ĺ×ķר":129428,"говоÑĢ":129429,"ĠвоÑģ":129430,"ĠÙħÙĨÙĩا":129431,"иÑĢоваÑĤÑĮ":129432,"ĠÄijầy":129433,"׳×Ĵ":129434,"ĠÙħÙĪ":129435,"ĠÙħÙĪÙĤع":129436,"ר׼×Ļ":129437,"تÙı":129438,"모":129439,"Ġת×ķ":129440,"ÙĬاÙĭ":129441,"à¹ĥà¸Ķ":129442,"ãĤĬãģ¾ãģĻ":129443,"à¸Ńยูà¹Īà¹ĥà¸Ļ":129444,"ĠØ£ÙĪÙĦ":129445,"ĠأخرÙī":129446,"Ġcư":129447,"صار":129448,"×ŀ×Ĺש×ij":129449,"бÑĢа":129450,"ÅĦski":129451,"бÑĢ":129452,"ĠÙĬÙı":129453,"à¸ģิà¸Ļ":129454,"Ġchá»ijng":129455,"ÙħÙı":129456,"Ġà¸Ħืà¸Ń":129457,"ĠتÙĨ":129458,"tÃŃ":129459,"yÄĩ":129460,"Ġmạng":129461,"ÙģÙĪ":129462,"Ġdünya":129463,"קר×IJ":129464,"Ġק׾":129465,"ĠØŃاÙĦ":129466,"cÃŃa":129467,"Ġà¹Ģรา":129468,"Ġר×ķצ×Ķ":129469,"Ġáp":129470,"ë°ķ":129471,"اÙĤØ©":129472,"ниÑİ":129473,"Ġ×IJ׾×ķ":129474,"Ġ×ŀס×ķ":129475,"ãģ§ãģ¯ãģªãģı":129476,"Ġtrả":129477,"Ġקשר":129478,"miÅŁtir":129479,"Ġlưu":129480,"Ġhá»Ĺ":129481,"ĠбÑĭли":129482,"Ġlấy":129483,"عÙĦÙħ":129484,"Ġözel":129485,"æ°ĹãģĮ":129486,"Ġ×ĵר×ļ":129487,"Ùħد":129488,"sını":129489,"׳×ķש×IJ":129490,"rów":129491,"ÑĩеÑĢ":129492,"êµIJìľ¡":129493,"ĠÐľÐ¾":129494,"лег":129495,"ĠVỼi":129496,"วัà¸Ļà¸Ļีà¹ī":129497,"ÑİÑīие":129498,"ãģĬãģĻ":129499,"ãģĬãģĻãģĻ":129500,"ãģĬãģĻãģĻãĤģ":129501,"ëıħ":129502,"Ġ×Ļ×Ķ×Ļ×Ķ":129503,"×ŀ×ĺר":129504,"Ñıми":129505,"Ġlá»±a":129506,"ĠÄijấu":129507,"à¹Ģสียà¸ĩ":129508,"Ġtương":129509,"ëĵ±":129510,"ĠÑģÑĤаÑĢ":129511,"à¹ĥà¸ļ":129512,"วัà¸Ķ":129513,"Ġİstanbul":129514,"Ġà¸Īะ":129515,"à¸ķลาà¸Ķ":129516,"ĠبÙĬ":129517,"à¹ģà¸Ļะ":129518,"à¹ģà¸Ļะà¸Ļำ":129519,"ساعد":129520,"Ġبأ":129521,"Ġkiá»ĥm":129522,"ØŃسب":129523,"à¸Ĭัà¹īà¸Ļ":129524,"Ġ×ķ×¢×ķ×ĵ":129525,"овÑĭÑħ":129526,"оÑģнов":129527,"ĠtrÆ°á»Łng":129528,"צ×ij×¢":129529,"ĠÃŃt":129530,"Ġkỹ":129531,"cré":129532,"Ñıм":129533,"êµ°":129534,"ãģĮãģªãģĦ":129535,"ÙĬÙĦØ©":129536,"ãĥķãĤ£":129537,"رÙī":129538,"ĠÙĬجب":129539,"Ġ×IJ×£":129540,"Ġcá»±c":129541,"ãĤīãĤĮãģŁ":129542,"Ġà¸ľà¸¹à¹ī":129543,"Ġà¸Ń":129544,"larımız":129545,"Ġkadın":129546,"Ġê·¸ëŀĺ":129547,"Ġê·¸ëŀĺìĦľ":129548,"ĠëĺIJëĬĶ":129549,"ĠÄijả":129550,"ĠÄijảm":129551,"Ġ×IJ×ķ×ŀר":129552,"Ġyếu":129553,"ciÄħ":129554,"ciÄħg":129555,"Ġtá»ij":129556,"Ġש×IJ׳×Ļ":129557,"ĠdziaÅĤa":129558,"Ñīа":129559,"ĠÄijÃłn":129560,"sına":129561,"ãģĵãĤĮãģ¯":129562,"Ġ×ij׾×Ļ":129563,"Ġ×ij×Ļשר×IJ׾":129564,"лоÑģÑĮ":129565,"Ġgiữ":129566,"ê°IJ":129567,"ÑĢон":129568,"تجار":129569,"глав":129570,"вин":129571,"Ġhạn":129572,"Ġyapılan":129573,"بس":129574,"Ġà¸ŀรà¹īà¸Ńม":129575,"ê´Ģ리":129576,"mÄ±ÅŁtır":129577,"bü":129578,"rück":129579,"ĠBaÅŁkanı":129580,"ĠÙĦÙĬس":129581,"ĠsÆ¡":129582,"à¸Īัà¸ĩหว":129583,"à¸Īัà¸ĩหวัà¸Ķ":129584,"داء":129585,"Ġ×Ķ׼":129586,"vÃŃ":129587,"ש×IJר":129588,"ĠhÆ°á»Łng":129589,"Ġbóng":129590,"ĠChÃŃnh":129591,"Äħc":129592,"à¹Ģà¸ģีà¹Īยวà¸ģัà¸ļ":129593,"Ġtứ":129594,"Ġtức":129595,"ĠÑĨвеÑĤ":129596,"Ġtá»iji":129597,"ĠnghÄ©a":129598,"ÙĦاعب":129599,"دÙĦ":129600,"Ġפע×Ŀ":129601,"hör":129602,"à¸Ĭุà¸Ķ":129603,"à¸ŀู":129604,"à¸ŀูà¸Ķ":129605,"паÑģ":129606,"ĠÅŁu":129607,"ĠtÆ°á»Łng":129608,"خارج":129609,"Ġâm":129610,"ĠинÑĤеÑĢеÑģ":129611,"еннÑĭÑħ":129612,"×IJ׳×Ļ":129613,"بدأ":129614,"ëĿ¼ëĬĶ":129615,"ì¹´":129616,"æĸ¹ãģĮ":129617,"лив":129618,"Ġà¸Ħà¸Ļ":129619,"ער×ļ":129620,"à¸Ĥà¸Ńà¸ĩà¸Ħุà¸ĵ":129621,"пад":129622,"Ġcạnh":129623,"ĠëĤ¨":129624,"ĠÄijâu":129625,"Ġbiá»ĥu":129626,"ãĤĤãģĤãĤĭ":129627,"׾×Ĵ":129628,"Ġสำหรัà¸ļ":129629,"Ġxuá»ijng":129630,"ס×ķ":129631,"Ġذات":129632,"ĠÐľÐµ":129633,"عاÙĦÙħ":129634,"×IJס":129635,"بÙĬØ©":129636,"شا":129637,"ием":129638,"ĠNgưá»Ŀi":129639,"íĺij":129640,"Ñģлов":129641,"Ġпа":129642,"Ġmẫu":129643,"ĠпÑĢоÑĨеÑģÑģ":129644,"ĠNhÃł":129645,"пÑĢоиз":129646,"пÑĢоизвод":129647,"à¸łà¸²à¸¢à¹ĥà¸Ļ":129648,"Ġà¸ļาà¸Ĺ":129649,"×ŀ׳×ķ":129650,"ĠоÑĢган":129651,"רצ×ķ":129652,"×ķ×ŀ×Ļ×Ŀ":129653,"Ġyazı":129654,"Ġdù":129655,"ãĥ¬ãĥ³":129656,"ÙĪÙĦÙĬ":129657,"ยู":129658,"Ġtrò":129659,"à¹Ģà¸ŀลà¸ĩ":129660,"Ġ×ŀ׾×IJ":129661,"à¸ķล":129662,"à¸ķลà¸Ńà¸Ķ":129663,"ĠÄijạt":129664,"Ġ×Ĺ×ĵש":129665,"póÅĤ":129666,"Ġ×ŀ×ĵ×Ļ":129667,"ujÄħc":129668,"×ŀ׳×Ķ׾":129669,"Ġש×ij×ķ":129670,"Ġ×Ķ×ŀשפ×ĺ":129671,"Ġ×IJ׾×Ķ":129672,"ĠÙĪØ°ÙĦÙĥ":129673,"à¹Ģà¸ŀราะ":129674,"ĠÄijoÃłn":129675,"Ġíķ¨ê»ĺ":129676,"Ġdục":129677,"شت":129678,"Ġula":129679,"ĠulaÅŁ":129680,"Ġquý":129681,"Ġ×Ķ×Ĵ×ĵ×ķ׾":129682,"à¸ķัà¹īà¸ĩà¹ģà¸ķà¹Ī":129683,"Ġשר":129684,"Ø´Ùĩد":129685,"׳ש×Ļ×Ŀ":129686,"à¸ŀล":129687,"رÙĪØ§":129688,"ãĤĮãģ¦":129689,"ĠниÑħ":129690,"Ġдела":129691,"ãģ§ãģįãģªãģĦ":129692,"ÅĤoż":129693,"×IJ×Ĺר":129694,"ì½Ķ":129695,"ãĤ¢ãĥĥãĥĹ":129696,"دÙ쨹":129697,"Ġtiá»ĩn":129698,"Ġkhá»ı":129699,"Ġkhá»ıe":129700,"ĠاÙĦعاÙħØ©":129701,"ãģ«ãģĤãĤĭ":129702,"ĠÄijá»Ļc":129703,"족":129704,"Ġcụ":129705,"йÑĤе":129706,"Ġзакон":129707,"ĠпÑĢоекÑĤ":129708,"ìĸ¸":129709,"ÙĦØŃ":129710,"ĠçalÄ±ÅŁma":129711,"ãĤĴãģĻãĤĭ":129712,"Ñħи":129713,"عاد":129714,"Ġ׳×ŀצ×IJ":129715,"Ġר×Ļ":129716,"à¸Ńà¸Ńà¸ģมา":129717,"ĠTôi":129718,"Ġthần":129719,"ĠÙĬا":129720,"ลาย":129721,"ĠавÑĤо":129722,"Ġsıra":129723,"ĠÙĥØ«ÙĬر":129724,"ÙħÙĬز":129725,"ĠاÙĦعÙĦÙħ":129726,"æĸ¹ãģ¯":129727,"×ķ×¢×ĵ":129728,"ĠоблаÑģÑĤи":129729,"×Ļ׾×Ļ×Ŀ":129730,"ãģĮåĩº":129731,"à¸ĺุ":129732,"à¸ĺุร":129733,"à¸ĺุรà¸ģิà¸Ī":129734,"ÙĤتÙĦ":129735,"ר×IJ×ķ":129736,"Ġngu":129737,"Ġnguá»ĵn":129738,"Ġมา":129739,"Ġплан":129740,"tório":129741,"Ġcuá»iji":129742,"Ñģком":129743,"ĠاÙĦÙħاض":129744,"ĠاÙĦÙħاضÙĬ":129745,"Ġ×ij×¢×ľ":129746,"Ġר×ij×Ļ×Ŀ":129747,"ĠluáºŃn":129748,"ÙĥÙĪ":129749,"à¸Ĺัà¹īà¸ĩหมà¸Ķ":129750,"ван":129751,"Ġthoại":129752,"à¹Ħà¸Ń":129753,"биÑĢ":129754,"ĠاÙĦض":129755,"تا":129756,"ĠÑĢод":129757,"ĠVÃł":129758,"×ŀ×Ļף":129759,"ĠбÑĭла":129760,"ками":129761,"ĠÐĶе":129762,"tık":129763,"קר×Ļ":129764,"ĠeÄŁitim":129765,"ĠÙĥبÙĬر":129766,"بÙĥ":129767,"ĠÙĦÙĪ":129768,"вой":129769,"Ġãģĵãģ®":129770,"ĠÑĤÑĢÑĥд":129771,"myÅĽl":129772,"Ġsư":129773,"à¸ŀีà¹Ī":129774,"Ġà¹ģลà¹īว":129775,"×¢×§":129776,"Ġ×Ĺ×ijרת":129777,"ระหว":129778,"ระหวà¹Īาà¸ĩ":129779,"×Ļ×Ļ×Ķ":129780,"ĠاÙĦÙĨاس":129781,"ünü":129782,"Ġ׾×ŀ×Ķ":129783,"Ġchương":129784,"ĠHá»ĵ":129785,"ارت":129786,"ãĤĪãģĨãģ§ãģĻ":129787,"lá":129788,"×§×Ļ×Ļ×Ŀ":129789,"æľ¬å½ĵ":129790,"æľ¬å½ĵãģ«":129791,"ãģĵãĤĵãģª":129792,"Ñģов":129793,"Ġ×ķ×Ĺ":129794,"à¹Ģà¸ģà¹ĩà¸ļ":129795,"ĠкÑĤо":129796,"à¹Ĥรà¸Ħ":129797,"ĠشرÙĥØ©":129798,"عزÙĬ":129799,"عزÙĬز":129800,"Ø·ÙĦÙĤ":129801,"пÑĥÑģÑĤ":129802,"ÙģØªØŃ":129803,"ëŀĢ":129804,"Ġhãy":129805,"ضÙħ":129806,"린":129807,"åł´åIJĪãģ¯":129808,"ãĤªãĥ¼":129809,"Ġhắn":129810,"Ġ×IJ×ij×Ļ×ij":129811,"Ġש׾×Ķ×Ŀ":129812,"Ġ×Ķ×Ļ×Ļת×Ķ":129813,"ĠاÙĦدÙĪÙĦØ©":129814,"ĠاÙĦÙĪÙĤ":129815,"ĠاÙĦÙĪÙĤت":129816,"ãģĤãģ¾ãĤĬ":129817,"ĠtaÅŁÄ±":129818,"İN":129819,"עסק":129820,"ãģ¦ãģĦãģŁ":129821,"Ġtá»ķng":129822,"ĠاÙĦØ¥ÙĨس":129823,"ĠاÙĦØ¥ÙĨساÙĨ":129824,"ÑĢеÑĪ":129825,"Ġgái":129826,"ĠÑĨен":129827,"ĠÙģÙĤد":129828,"Ùħات":129829,"ãģķãĤĵãģ®":129830,"Ġphù":129831,"×ĺ×Ķ":129832,"ĠÙĪØ§ÙĦتÙĬ":129833,"ĠبÙĥ":129834,"ìĿ´ëĤĺ":129835,"кÑģ":129836,"ÙħÙĬر":129837,"Ġvùng":129838,"ĠاÙĦشعب":129839,"ĠNhưng":129840,"ãĥĢãĥ¼":129841,"Ġ×Ĺ×Ļ×Ļ×Ŀ":129842,"Ġشخص":129843,"×§×ķ×ĵ":129844,"ê²Ģ":129845,"עש":129846,"×¢×ķ׾×Ŀ":129847,"צ×ķר":129848,"عÙĤد":129849,"ĠiÅŁlem":129850,"Ġ×Ķ×ij×IJ":129851,"Ġdưỡng":129852,"à¸Łà¸£à¸µ":129853,"ĠphÃŃa":129854,"ãģ®ä¸Ńãģ§":129855,"Ġпи":129856,"ĠngÃłnh":129857,"нима":129858,"ĠÙĩÙĦ":129859,"Ġ×ķ×IJת":129860,"ĠÄijáng":129861,"équipe":129862,"ĠÑįÑĤоÑĤ":129863,"Ġgörev":129864,"매":129865,"Ġquân":129866,"å¼ķãģį":129867,"æĻĤãģ«":129868,"ĠبÙħا":129869,"×ŀ×Ļת":129870,"Ġülke":129871,"Ġ×ŀ×§×ķ×Ŀ":129872,"×ijף":129873,"æ°ĹæĮģãģ¡":129874,"Ġë§İìĿĢ":129875,"Ġyüksek":129876,"ÑĨенÑĤÑĢ":129877,"ĠÙħجÙĦس":129878,"ç§ģãģ®":129879,"ÙĤدر":129880,"Ġë¶Ģë¶Ħ":129881,"Ġì°¨":129882,"خرج":129883,"ãģĭãģªãĤĬ":129884,"ë³´ëĭ¤":129885,"Ġ×ŀ×Ļ×ĵ×¢":129886,"peÅĤni":129887,"Ġxá»Ń":129888,"ìĹIJìĦľëĬĶ":129889,"ĠباÙĦÙħ":129890,"ĠÙĪÙħا":129891,"ĠÑįÑĤой":129892,"بÙĬÙĨ":129893,"nü":129894,"ØŃز":129895,"ØŃزب":129896,"ĠÑĢабоÑĤа":129897,"ĠNháºŃt":129898,"ÙĦاء":129899,"Ġëĵ¤":129900,"Ġëĵ¤ìĸ´":129901,"ãĤĦãģĻãģĦ":129902,"×Ĺ×ĸ×§":129903,"Ġ×Ķ×Ĺ×ijר×Ķ":129904,"пиÑĤ":129905,"ãģĭãĤīãģ®":129906,"Ġë§IJìĶĢ":129907,"Ġפ×ķ":129908,"ÙĦÙİ":129909,"à¹Ģà¸ķà¹ĩม":129910,"ĠÐļо":129911,"Ġmówi":129912,"ĠtÃŃn":129913,"ר×Ĵש":129914,"פרק":129915,"Ġtrạng":129916,"ĠÐŀн":129917,"×Ĺ×ķ×¥":129918,"ĠعÙĨدÙħا":129919,"Ġبر":129920,"使ãģĦ":129921,"Ġrá»Ļng":129922,"ëĮĢë¡ľ":129923,"íά":129924,"Ġktórych":129925,"вид":129926,"ลูà¸ģà¸Ħà¹īา":129927,"ĠmogÄħ":129928,"Ġש×Ĺ":129929,"×ij×Ĺר":129930,"ãĥĸãĥŃãĤ°":129931,"ĠThÃłnh":129932,"Ġ×Ķר×Ļ":129933,"ĠÑģÑĤаÑĤÑĮ":129934,"ĠHá»Ļi":129935,"à¸ļà¹īาà¸ĩ":129936,"çī¹ãģ«":129937,"ĠÄIJức":129938,"èĢħãģ®":129939,"×¢×ŀ×ķ×ĵ":129940,"×ĺר×Ķ":129941,"Ð¥":129942,"ĠÙħÙħا":129943,"ĠeÅŁ":129944,"ĠнеобÑħодимо":129945,"ников":129946,"Ġüzerinde":129947,"aÅĤa":129948,"Ġchá»ĭu":129949,"ĠاÙĦدÙĬÙĨ":129950,"أخبار":129951,"ĠÄijau":129952,"ãģĮå¤ļãģĦ":129953,"jÄħcych":129954,"دخÙĦ":129955,"larınd":129956,"larından":129957,"Ġsẻ":129958,"à¸ŀิà¹Ģศ":129959,"à¸ŀิà¹Ģศษ":129960,"×ª×Ł":129961,"tıģı":129962,"ĠluáºŃt":129963,"ĠÅŀe":129964,"ãĤ«ãĥ¼":129965,"ãģ®ãģĤãĤĭ":129966,"Ġ×Ķ×IJתר":129967,"ĠاÙĦØ¢ÙĨ":129968,"ıldı":129969,"Ġáo":129970,"ĠнаÑĩал":129971,"Ġviá»ĩn":129972,"Ġ×ij×¢×ķ׾×Ŀ":129973,"знаÑĩ":129974,"×Ļ×ĺ×Ķ":129975,"кам":129976,"ĠÐĺз":129977,"à¹Ģà¸Ĥียà¸Ļ":129978,"à¸Ļà¹īà¸Ńà¸ĩ":129979,"ÑĤÑĢо":129980,"à¹Ģà¸Ł":129981,"Ġжизни":129982,"Ġสà¹Īวà¸Ļ":129983,"ĠváºŃn":129984,"Ġê´Ģ볨":129985,"Ġlâu":129986,"ס×ĺר":129987,"קש":129988,"سÙĬر":129989,"Ġ×IJ×ķת×Ļ":129990,"Ġmôi":129991,"ائب":129992,"ĠоÑģÑĤа":129993,"Ġmón":129994,"Ġ×ij×ŀ×§×ķ×Ŀ":129995,"ĠداخÙĦ":129996,"Ġ×IJ×ķר":129997,"ĠваÑģ":129998,"ÙĥØ´Ùģ":129999,"ìĺ¨":130000,"à¸ĸà¹Īาย":130001,"Ġkullanıl":130002,"Ġtô":130003,"ãģ«ãĤĪãĤĬ":130004,"ĠëĺIJíķľ":130005,"Ġ×¢×ij×ķ×ĵ×Ķ":130006,"Ġriê":130007,"Ġriêng":130008,"Ġyakın":130009,"زا":130010,"Å»":130011,"×IJ×ķ׼׾":130012,"شارÙĥ":130013,"ĠбеÑģ":130014,"×´":130015,"ĠابÙĨ":130016,"ĠTá»ķng":130017,"ÙĨظ":130018,"ÅĽwiad":130019,"ãĤµãĥ¼":130020,"หาย":130021,"ĠGün":130022,"Ġhakkında":130023,"à¹Ģà¸Ĥà¹īามา":130024,"زÙĨ":130025,"ĠÐłÐ¾":130026,"Ġbiá»ĥn":130027,"ãģ©ãģĵ":130028,"Ù쨹ÙĦ":130029,"زع":130030,"פר×ĺ":130031,"Ġ×Ķף":130032,"Ø£ÙĩÙĦ":130033,"Ġthất":130034,"ØŃÙħÙĦ":130035,"ÑĩÑĥ":130036,"ĠìĤ¬ìĭ¤":130037,"ì°¸":130038,"ĠìľĦíķ´":130039,"ÙĪØ¸":130040,"ĠÐŁÐ¾Ð´":130041,"Ġkhoản":130042,"ÑĤен":130043,"ĠÙ쨧ÙĦ":130044,"Ñģад":130045,"à¸Ļà¸Ńà¸Ļ":130046,"ĠاÙĦسعÙĪØ¯ÙĬØ©":130047,"\"ØĮ":130048,"ĠاÙĦÙĴ":130049,"ãĤīãģļ":130050,"Ġtoán":130051,"Ġchắc":130052,"׼×Ļר":130053,"méd":130054,"média":130055,"زÙĪ":130056,"Ġyanı":130057,"פ׳×Ļ×Ŀ":130058,"ØŃظ":130059,"ĠбеÑģп":130060,"ĠбеÑģплаÑĤ":130061,"ĠбеÑģплаÑĤно":130062,"ĠØ£ÙħاÙħ":130063,"à¸Ńาย":130064,"à¸Ńายุ":130065,"רשת":130066,"Ġgá»ĵ":130067,"Ġgá»ĵm":130068,"Ġuá»ijng":130069,"صب":130070,"kır":130071,"ãĥijãĥ¼":130072,"Ġ׾×ĵעת":130073,"ĠкÑĥпиÑĤÑĮ":130074,"׾×ķ×Ĺ":130075,"ÙĪØ¶Ø¹":130076,"ÙĤÙĬÙħ":130077,"à¸Ľà¸²":130078,"жив":130079,"à¸Ķิà¸Ļ":130080,"×IJ×ķפ":130081,"à¹Ģลà¹ĩà¸ģ":130082,"ãĥĥãĥī":130083,"иÑĩеÑģкиÑħ":130084,"ĠChá»§":130085,"кÑĢаÑģ":130086,"ÙĪØµÙĦ":130087,"pÅĤat":130088,"моÑĢ":130089,"Ġ×Ķ×IJ×ķ":130090,"à¸Ńิà¸Ļ":130091,"ĠíķľêµŃ":130092,"гÑĢе":130093,"Ġìłľê³µ":130094,"ì°½":130095,"Ġê°ľìĿ¸ìłķë³´":130096,"Ġnghá»ĭ":130097,"à¸ĭา":130098,"ØŃساب":130099,"ĠbyÅĤa":130100,"ÙħÙĦÙĥ":130101,"иÑĩеÑģкие":130102,"Ġbác":130103,"ضØŃ":130104,"길":130105,"ש×ŀ×¢":130106,"Ġìĸ´ëĸ»":130107,"Ġìĸ´ëĸ»ê²Į":130108,"ìĽĮ":130109,"اتÙĩ":130110,"à¹Ĥรà¸ĩà¹ģ":130111,"à¹Ĥรà¸ĩà¹ģรม":130112,"خدÙħØ©":130113,"ĠÐłÐ°":130114,"׼×ķ׾×Ŀ":130115,"×ŀש×Ĺ×§":130116,"ĠÙĪÙĥاÙĨ":130117,"ס×ķ×£":130118,"ĠاÙĦØŃÙĥÙĪÙħØ©":130119,"Ġ×ij×ĺ":130120,"ĠtráºŃn":130121,"Ġ×Ķ×¢×ķ׾×Ŀ":130122,"ĠÃŃch":130123,"tÄħ":130124,"ש×ŀ×ķ":130125,"Ġ×Ķר×IJש×ķף":130126,"Ġíķĺê³ł":130127,"ãģķãĤī":130128,"ãģķãĤīãģ«":130129,"ãģ«ãģĹãģ¦":130130,"Ġà¸ľà¸¡":130131,"ãģ®ãĤĪãģĨãģª":130132,"ĠÙĪÙĤت":130133,"ãĥįãĥĥãĥĪ":130134,"ÙĦعب":130135,"ÙĪØ´":130136,"ìĺ¬":130137,"Ġหาà¸ģ":130138,"ĠmiaÅĤ":130139,"à¸Ĺà¸Ńà¸ĩ":130140,"иÑĤа":130141,"اصر":130142,"илÑģÑı":130143,"зе":130144,"à¸Ľà¸£à¸°à¸¡à¸²à¸ĵ":130145,"ãģĿãĤĮãģ¯":130146,"Ġbır":130147,"Ġbırak":130148,"صÙĨاع":130149,"Ю":130150,"شعر":130151,"Ġ׳×Ĵ×ĵ":130152,"Ġبسبب":130153,"ãĥĿãĤ¤":130154,"ãĥĿãĤ¤ãĥ³ãĥĪ":130155,"ĠاÙĦجÙĪ":130156,"ĠнеÑģколÑĮко":130157,"Ġkiếm":130158,"ÙģÙİ":130159,"Ġضد":130160,"×ij×Ļ×ĺ×ķ×Ĺ":130161,"تابع":130162,"ÙĨز":130163,"ĠBản":130164,"Ġaçıkl":130165,"Ġaçıklama":130166,"Ġà¸Ħุà¸ĵ":130167,"à¸Ĺา":130168,"ÅĤów":130169,"طب":130170,"ÙĨØŃÙĨ":130171,"Ġ×ŀ×§×ķר":130172,"Ġİs":130173,"Ġдома":130174,"Ġวัà¸Ļ":130175,"ĠdÃłnh":130176,"Ñıн":130177,"миÑĢ":130178,"Ġmô":130179,"ĠvÃłng":130180,"صاب":130181,"sının":130182,"à¸Ħืà¸Ļ":130183,"خبر":130184,"×ĸ׼×ķ":130185,"Ġ×ŀש×Ķ×ķ":130186,"mü":130187,"Ġкомпании":130188,"Ġ×Ķ×¢×Ļר":130189,"ĠÙĥÙĪ":130190,"ÙĤÙĦب":130191,"ĠlỼp":130192,"ики":130193,"׳×ij":130194,"à¹Ĥà¸Ħร":130195,"à¹Ĥà¸Ħรà¸ĩ":130196,"à¹Ĥà¸Ħรà¸ĩà¸ģาร":130197,"×ŀ×ķ×¢×ĵ":130198,"ÑıÑĤÑģÑı":130199,"หลัà¸ĩà¸Īาà¸ģ":130200,"ениÑİ":130201,"Ġשע":130202,"ĠbÆ°á»Ľc":130203,"ãĥ¡ãĥ¼ãĥ«":130204,"ãĤĦãĤĬ":130205,"Ġ×Ļ×ķ×ĵ×¢":130206,"Ġê´Ģíķľ":130207,"ĠاÙĦØ£Ùħر":130208,"Ġbölge":130209,"ĠÑģвой":130210,"ÙĦس":130211,"Ġ×ŀ×Ļ×ķ×Ĺ×ĵ":130212,"ĠëĤ´ìļ©":130213,"ĠأجÙĦ":130214,"ĠÄIJông":130215,"Ġ×ŀ×ł×ª":130216,"Ġìĭľê°Ħ":130217,"ÙĥÙİ":130218,"ãģ¨ãģĦãģĨãģ®ãģ¯":130219,"Ġnależy":130220,"تÙĨظÙĬÙħ":130221,"ĠÑģозда":130222,"Ġphé":130223,"Ġphép":130224,"ãģ§ãģįãģ¾ãģĻ":130225,"ĠعÙĦÙħ":130226,"大ãģįãģª":130227,"ãĤ²ãĥ¼ãĥł":130228,"íħĮ":130229,"Ġ׼×ķ׾׾":130230,"ĠинÑĤеÑĢнеÑĤ":130231,"ĠTừ":130232,"ãģ¨ãģªãĤĭ":130233,"زاÙĦ":130234,"Ġktórym":130235,"Ġnhé":130236,"ìĪľ":130237,"нев":130238,"деÑĢ":130239,"ãĤ¢ãĥĹãĥª":130240,"iá»ĩu":130241,"×ij×Ļ׾":130242,"Ġتس":130243,"ĠÄIJây":130244,"ĠاÙĦخاصة":130245,"Ġà¹Ģà¸Ĭ":130246,"Ġà¹Ģà¸Ĭà¹Īà¸Ļ":130247,"صاد":130248,"Ġdạng":130249,"سعر":130250,"Ġש×Ļ×ŀ×ķש":130251,"×Ĵ×Ļ×Ŀ":130252,"ãģĮãģĤãģ£ãģŁ":130253,"пÑĢов":130254,"пÑĢовод":130255,"Ġ×IJ×Ļ׳×ķ":130256,"Ġ׾ר×IJ":130257,"Ġ׾ר×IJ×ķת":130258,"ĠØ£Ù쨶ÙĦ":130259,"ĠØŃÙĦ":130260,"ĠأبÙĪ":130261,"ê°ķ":130262,"Ġì§ij":130263,"ãģ®ãĤĪãģĨãģ«":130264,"Ġפ׳×Ļ":130265,"ס×Ļ×Ŀ":130266,"ĠÙĪÙĩذا":130267,"Ġkaç":130268,"Ġéén":130269,"Ġê±´":130270,"ë°Ķ":130271,"Ñĥз":130272,"à¸Ĥà¸Ńà¸ĩà¹Ģรา":130273,"iÅĤ":130274,"ĠÐľÑĭ":130275,"Ġchết":130276,"ĠاÙĦثاÙĨÙĬ":130277,"×IJ×§":130278,"Ġ×ķ×¢×ľ":130279,"ĠاÙĦطب":130280,"×ij×ĺ×Ĺ":130281,"ĠجدÙĬدة":130282,"ĠعدÙħ":130283,"عز":130284,"สิà¹Īà¸ĩà¸Ĺีà¹Ī":130285,"ãģĻãĤĮãģ°":130286,"ĠÄijô":130287,"ì£ł":130288,"دÙĤ":130289,"номÑĥ":130290,"Ġká»ĥ":130291,"ãĤ¢ãĥ³":130292,"å¤ļãģıãģ®":130293,"à¸Ľà¸£à¸°à¸ģ":130294,"à¸Ľà¸£à¸°à¸ģà¸Ńà¸ļ":130295,"פע×Ļ׾×ķת":130296,"ĠÑģÑĤол":130297,"mayı":130298,"ãģ¤ãģĦ":130299,"Ġyılında":130300,"Ġà¸Īึà¸ĩ":130301,"koÅĦcz":130302,"ĠThông":130303,"ĠакÑĤив":130304,"нÑģÑĤ":130305,"нÑģÑĤÑĢÑĥ":130306,"ĠÃĸz":130307,"Ġת×ŀ×Ļ×ĵ":130308,"ĠÙĥÙĨت":130309,"ÑģиÑģÑĤем":130310,"prés":130311,"présent":130312,"Ġnâ":130313,"Ġnâng":130314,"gÅĤos":130315,"ĠÙĪØ²ÙĬر":130316,"ØŃصÙĦ":130317,"ĠимееÑĤ":130318,"ØŃرÙĥØ©":130319,"à¸ŀà¹Īà¸Ń":130320,"ãĤĴãģĬ":130321,"ĠاستخداÙħ":130322,"×IJ×Ļר×ķ×¢":130323,"ä»ĸãģ®":130324,"Ġש×Ķ×Ŀ":130325,"ãģĹãģŁãĤī":130326,"ש×ŀ×Ļ":130327,"Ñģла":130328,"mı":130329,"Ġbazı":130330,"Ġíķĺì§Ģë§Į":130331,"×ĵ׾":130332,"Ġyaptıģı":130333,"ãĥĬãĥ¼":130334,"׾×Ļ׾×Ķ":130335,"ãģ¨ãģĦãģ£ãģŁ":130336,"ändig":130337,"ĠÅŁa":130338,"ĠÙģÙĬÙħا":130339,"иÑĤелÑı":130340,"×ŀ×ķש":130341,"à¸Ĥà¸Ńà¸ļ":130342,"lük":130343,"Ġhá»ĵi":130344,"Ġëªħ":130345,"ĠاÙĦÙĥØ«ÙĬر":130346,"צ×IJ":130347,"Ġhazır":130348,"طرÙģ":130349,"اÙĬا":130350,"ĠÄijôi":130351,"енд":130352,"ÙĦغ":130353,"×Ĺ×ĸ×ķר":130354,"ĠвÑģег":130355,"ĠвÑģегда":130356,"ëIJĺê³ł":130357,"×ĵ×ķ×ĵ":130358,"ана":130359,"دÙĪÙĦØ©":130360,"Ġhoạch":130361,"عÙĦا":130362,"عÙĦاج":130363,"Ġ×ķ×¢×ĵ":130364,"×Ķ×Ŀ":130365,"кий":130366,"ÙĦÙIJ":130367,"Ġ×¢×ľ×Ļ×ķ":130368,"ÑİÑīий":130369,"Ġngá»§":130370,"صÙĨع":130371,"ĠاÙĦعراÙĤ":130372,"à¸ķà¹Īà¸Ńà¹Ħà¸Ľ":130373,"ãģŁãģıãģķãĤĵ":130374,"Ġphạm":130375,"ÙĦاÙĨ":130376,"اتÙĩا":130377,"Ġböyle":130378,"تÙĨÙģÙĬ":130379,"تÙĨÙģÙĬذ":130380,"Ġש×Ķ×Ļ×IJ":130381,"ÑģÑĥ":130382,"ยาว":130383,"Ġש×ķ׳×Ļ×Ŀ":130384,"Ġ×ŀ×ķ׾":130385,"ĠÑģил":130386,"Ġ×IJ×Ĺר×Ļ×Ŀ":130387,"Ġphá»§":130388,"ÙĤطع":130389,"ĠThá»§":130390,"à¸Ľà¸£à¸°à¹Ģà¸Ĺศà¹Ħà¸Ĺย":130391,"ÙĨÙĤ":130392,"ĠÄijoạn":130393,"Ġبإ":130394,"пÑĢедел":130395,"×ķת×ķ":130396,"Ġyarı":130397,"пÑĢе":130398,"ĠczÄĻÅĽci":130399,"ØŃÙĥÙħ":130400,"×ķ׳×Ļת":130401,"×¤×¢×ľ":130402,"ãĤĴãģĹãģ¦":130403,"Ġktórzy":130404,"׾×Ŀ":130405,"ĠÄIJiá»ģu":130406,"ĠкоÑĤоÑĢаÑı":130407,"ĠìĿ´ìĥģ":130408,"ãģĤãģ£ãģŁ":130409,"Ġ×ŀ×ĵ×ķ×ijר":130410,"פ×ķ×¢×ľ":130411,"dım":130412,"éĢļãĤĬ":130413,"ĠбÑĥдÑĥÑĤ":130414,"à¹Ģวà¹ĩà¸ļà¹Ħà¸ĭ":130415,"à¹Ģวà¹ĩà¸ļà¹Ħà¸ĭà¸ķà¹Į":130416,"اخر":130417,"×Ĺ×Ļ׾":130418,"Ġ×Ļ׾":130419,"Ġ×Ļ׾×ĵ×Ļ×Ŀ":130420,"×Ĺ×Ļפ":130421,"×Ĺ×Ļפ×ķש":130422,"Ġdòng":130423,"Ġש×ĸ×Ķ":130424,"ÑĮе":130425,"ãģĤãģ¨":130426,"ìŀIJê°Ģ":130427,"×IJ×ĵ":130428,"Ġüz":130429,"Ġüzere":130430,"ظÙĦ":130431,"Ġ×IJ×ķ׾×Ļ":130432,"Ġ×ij×Ļ×ķ×Ŀ":130433,"ÙĦات":130434,"Ġmê":130435,"침":130436,"تØŃد":130437,"تØŃدث":130438,"Ġخاصة":130439,"ĠبرÙĨ":130440,"ĠبرÙĨاÙħج":130441,"ĠHÃłn":130442,"×Ĺס":130443,"ĠÙĪÙĦÙħ":130444,"×¢×Ŀ":130445,"Ġmı":130446,"à¸Łà¸±à¸ĩ":130447,"שע×Ķ":130448,"ÙĪÙģÙĤ":130449,"ס×ij×Ļר":130450,"алÑĮнÑĭй":130451,"×Ĺש×ķ×ij":130452,"ĠnÃłng":130453,"ë³¼":130454,"ĠкоÑĤоÑĢÑĭÑħ":130455,"Ġ×Ĺ×ķ×§":130456,"tör":130457,"ĠлÑĥÑĩÑĪе":130458,"ãĥijãĥ³":130459,"ลà¹Īาสุà¸Ķ":130460,"ĠجدÙĬد":130461,"ÙĬدة":130462,"à¸Ĺรà¸ĩ":130463,"ãĤĪãĤĬãĤĤ":130464,"ÙĦÙĦ":130465,"ãĤĤãģ£ãģ¨":130466,"ש×ĺ×Ĺ":130467,"Ġ×ķ×IJ×Ļ":130468,"Ġgiá»ijng":130469,"إضاÙģ":130470,"קת":130471,"ë§Ŀ":130472,"ĠzostaÅĤ":130473,"ÑĢоз":130474,"×Ļפ×Ļ×Ŀ":130475,"Ġ׼׾׾":130476,"ת×ķ׼ף":130477,"dıģını":130478,"ÙĤسÙħ":130479,"ĠÑģÑĩиÑĤ":130480,"ĠÑģÑĩиÑĤа":130481,"×ĺ×ķת":130482,"Ġưu":130483,"ĠØ¢ÙĦ":130484,"Ġмом":130485,"ĠмоменÑĤ":130486,"ĠاÙĦتعÙĦÙĬÙħ":130487,"×¢×ľ×ķת":130488,"Ġchữa":130489,"Ġyön":130490,"ĠtrÃł":130491,"ĠØŃÙĬÙĨ":130492,"à¸ĭั":130493,"ĠCá":130494,"×¢×ĸ":130495,"ĠاÙĦØ£ÙħÙĨ":130496,"cÃŃ":130497,"Ġvá»ijn":130498,"Ġà¸Ļาย":130499,"обÑĢа":130500,"×§×IJ":130501,"Ġthiếu":130502,"ãĥŀãĥ¼":130503,"สวà¸Ļ":130504,"Ġgá»Ń":130505,"Ġgá»Ńi":130506,"Ġê¹":130507,"Ġê¹Ģ":130508,"Ġthiá»ĩn":130509,"ÙĤع":130510,"wÄĻ":130511,"Ġнам":130512,"ÑĤол":130513,"Ġsân":130514,"ס×ķ×Ĵ":130515,"Ġgeçir":130516,"ÑĤон":130517,"ева":130518,"ĠÙĪØ¶Ø¹":130519,"Ġعشر":130520,"Ñģло":130521,"à¸Īัà¸ļ":130522,"ãĤ·ãĥ¼":130523,"ãĤĤãģĤãĤĬãģ¾ãģĻ":130524,"Ġvẻ":130525,"ĠÄIJá»ĥ":130526,"رÙ쨹":130527,"ĠاÙĦØ£ÙĪÙĦÙī":130528,"ÑĤаÑĢ":130529,"ãģªãģıãģ¦":130530,"ÙħÙİ":130531,"quÃŃ":130532,"×¢×ł×Ļ×Ļ׳":130533,"ген":130534,"Ġhôm":130535,"à¸Īา":130536,"ĠnhỼ":130537,"ĠاÙĦعربÙĬ":130538,"×IJף":130539,"Ġlá»Ļ":130540,"ĠjeÅĽli":130541,"à¹Ģà¸Ĺà¹Īาà¸Ļัà¹īà¸Ļ":130542,"ĠØ£ÙĨÙĩا":130543,"Ġtuy":130544,"Ġtuyá»ĩt":130545,"Ġتص":130546,"ĠتصÙĨÙĬ":130547,"ĠتصÙĨÙĬÙģ":130548,"Ġê·¸ëŁ¬ëĤĺ":130549,"оÑĨен":130550,"à¸ģิà¸Īà¸ģรรม":130551,"ãĤĦãģ£ãģ¦":130552,"Ġkhá»ıi":130553,"Ġlá»ĩ":130554,"ĠاÙĦÙħجتÙħع":130555,"à¸Ńาà¸Īà¸Īะ":130556,"à¸Īะà¹Ģà¸Ľà¹ĩà¸Ļ":130557,"овÑĭй":130558,"ר×Ŀ":130559,"รà¹īà¸Ńà¸Ļ":130560,"ש×ŀש":130561,"人ãģ«":130562,"Ġüzerine":130563,"פר×Ļ":130564,"duÄŁu":130565,"Ñĩик":130566,"Ġmùa":130567,"Ġ×ŀת×ķ×ļ":130568,"ĠcáºŃp":130569,"ĠتارÙĬØ®":130570,"×ij×ľ×ª×Ļ":130571,"Ġì¢Ģ":130572,"ÙĦع":130573,"باÙĨ":130574,"Ġchút":130575,"Ġ×Ķ×ĸ×ŀף":130576,"née":130577,"ĠLiên":130578,"ĠÙĦÙĦØ£":130579,"ØŃدÙĪØ¯":130580,"Ġ×¢×Ľ×©×Ļ×ķ":130581,"воз":130582,"Ġyaptı":130583,"Ġобо":130584,"à¹ĥหà¹īà¸ģัà¸ļ":130585,"Ġ×ij×Ķ×Ŀ":130586,"ãģıãģ¦":130587,"رأس":130588,"ĠÑģÑĢедÑģÑĤв":130589,"ĠBÃłi":130590,"ãģĵãģ¨ãģ«":130591,"ĠìĤ¬íļĮ":130592,"Ġ모ëijIJ":130593,"×ij×IJ":130594,"Ġtrắng":130595,"ĠاÙĦبÙĦد":130596,"ĠHoÃłng":130597,"либо":130598,"ĠдÑĢÑĥгиÑħ":130599,"İR":130600,"Ñĥма":130601,"ĠJeÅĽli":130602,"ãĤĤãģĹ":130603,"Ġvòng":130604,"Ġ×IJתר×Ļ×Ŀ":130605,"ĠÄijá»įc":130606,"ĠвоÑĤ":130607,"ãģłãģĮ":130608,"ë°°":130609,"à¸Ķูà¹ģล":130610,"Ġ×ŀ׼׾":130611,"ìĹIJëıĦ":130612,"газ":130613,"Ġ׳×ķספ×Ļ×Ŀ":130614,"ãģĵãģ¨ãģ§":130615,"ĠتÙĪ":130616,"ãģ§ãģĤãĤĬ":130617,"à¸Ļัà¹Īà¸ĩ":130618,"ĠможеÑĤе":130619,"szÄĻ":130620,"ãģ®ãģł":130621,"ĠÙħÙĨÙĩ":130622,"Ġbá»ķ":130623,"Ġbüt":130624,"Ġbütün":130625,"ë³´ê³ł":130626,"Ġchá»ĵng":130627,"à¹ģà¸Īà¹īà¸ĩ":130628,"ĠVì":130629,"ĠØŃر":130630,"Ġgiản":130631,"ĠÙħدÙĬÙĨØ©":130632,"تطبÙĬÙĤ":130633,"à¸Īิ":130634,"æĹ¥ãģ®":130635,"бил":130636,"à¸ģà¸Ńà¸ĩ":130637,"ê³³":130638,"ĠØ£Ùħا":130639,"ìĨIJ":130640,"Ġtrái":130641,"ĠвÑģем":130642,"ĠسÙĨØ©":130643,"ĠÑģайÑĤ":130644,"ĠгоÑĤов":130645,"пÑĭ":130646,"ĠëIJł":130647,"ĠاÙĦخط":130648,"ĠاÙĦرئÙĬسÙĬØ©":130649,"Ġíķ©ëĭĪëĭ¤":130650,"ĠìķĦëĭĪëĿ¼":130651,"ĠìĿ´ëłĩ":130652,"ĠìĿ´ëłĩê²Į":130653,")ØĮ":130654,"hält":130655,"ĠØ£Ùħر":130656,"ĠعÙħر":130657,"à¸ģà¹ĩà¸Īะ":130658,"Ġà¸Ĺำà¹ĥหà¹ī":130659,"Ġcân":130660,"Ġ×ij׾":130661,"Ġ×ij׾×ij×ĵ":130662,"פסק":130663,"ĠÙĬÙĤÙĪÙĦ":130664,"нÑĥÑĤÑĮ":130665,"à¹ģà¸Ħ":130666,"Ġקצת":130667,"Ġnằm":130668,"Ġhòa":130669,"bilitÃł":130670,"ĠìĹĨëĭ¤":130671,"Ġ׼פ×Ļ":130672,"ÑĢож":130673,"лага":130674,"Ġ×Ķש×Ļ":130675,"ĠNgoÃłi":130676,"ĠÙĪØ¬":130677,"ĠÙĪØ¬ÙĪØ¯":130678,"ĠìľĦíķľ":130679,"ĠusÅĤug":130680,"Ġtuần":130681,"dź":130682,"×ŀ×ķף":130683,"ĠاÙĦعدÙĬد":130684,"Ġchẳng":130685,"สุà¸Ĥà¸łà¸²à¸ŀ":130686,"Ġ×ij×ĵר×ļ":130687,"ĠÑģебе":130688,"ĠìŀĪìĿĦ":130689,"ĠاÙĦØŃاÙĦ":130690,"Ġdá":130691,"Ġcưá»Ŀi":130692,"Ġnghiên":130693,"ieÅĦ":130694,"ĠDương":130695,"ï¼ħ":130696,"شد":130697,"ãģĦãģ¤ãĤĤ":130698,"ĠвÑĭбоÑĢ":130699,"Ġcá»Ļng":130700,"ש×Ļ׳×ķ×Ļ":130701,"Ġchạy":130702,"Ġ×ij×¢×ľ×Ļ":130703,"اخبار":130704,"íķĺë©°":130705,"żÄħ":130706,"جاز":130707,"Ġ׳ר×IJ×Ķ":130708,"ศู":130709,"ศูà¸Ļ":130710,"ศูà¸Ļยà¹Į":130711,"×Ĵ×¢":130712,"Ġ×¢×ĵ×Ļ":130713,"Ġ×¢×ĵ×Ļ×Ļף":130714,"برا":130715,"ÑĨий":130716,"ĠÄIJá»ĵng":130717,"ÙĤاÙĨÙĪÙĨ":130718,"ĠÄijứng":130719,"ãģĹãģŁãĤĬ":130720,"Ġ×Ĺ×Ļ×Ļ":130721,"ĠëIJľ":130722,"ĠëIJľëĭ¤":130723,"ĠмеждÑĥ":130724,"à¸ŀวà¸ģà¹Ģà¸Ĥา":130725,"ĠBắc":130726,"ลำ":130727,"ë°±":130728,"ĠíĻķ":130729,"มาà¸ģม":130730,"มาà¸ģมาย":130731,"банк":130732,"à¸Ńาà¸ģาร":130733,"ĠhÃł":130734,"Ġ׾׳":130735,"à¸Ńà¸Ń":130736,"Ġë°Ķë¡ľ":130737,"лом":130738,"mática":130739,"ĠØŃد":130740,"ابت":130741,"à¸Ĺีà¹Īà¸Ļีà¹Ī":130742,"ĠcoÅĽ":130743,"ÙģÙĬدÙĬ":130744,"ÙģÙĬدÙĬÙĪ":130745,"ĠмеÑģÑĤо":130746,"Ġphút":130747,"มาà¸ģà¸ģวà¹Īา":130748,"×IJפ":130749,"بÙIJ":130750,"ĠPhú":130751,"ì±Ħ":130752,"ĠÙĪØ³ÙĦÙħ":130753,"à¸Īีà¸Ļ":130754,"поÑĤÑĢеб":130755,"Ġ×Ĺ×ĵש×ķת":130756,"Ø´ÙĪ":130757,"Ġעצ×ŀ×ķ":130758,"ĠعÙħÙĦÙĬØ©":130759,"à¸Ħุà¸ĵà¸łà¸²à¸ŀ":130760,"ãģ¾ãģĻãģĮ":130761,"دعÙĪ":130762,"طرÙĤ":130763,"à¹Ħมà¹Īà¸ķà¹īà¸Ńà¸ĩ":130764,"ë²Ķ":130765,"ìĬ¹":130766,"ĠkÃŃch":130767,"ĠìĹĨëĬĶ":130768,"ĠÑĤам":130769,"ĠÙĨØŃÙĪ":130770,"ĠاÙĦÙĤاÙĨÙĪÙĨ":130771,"×Ĺ×ķ×Ŀ":130772,"Ġkız":130773,"Ġ×ĵ×Ļף":130774,"ĠвÑĢемени":130775,"ãģ£ãģŁãĤĬ":130776,"ĠØ´Ùĩر":130777,"ĠìĦľë¹ĦìĬ¤":130778,"עש×Ķ":130779,"Ġgiác":130780,"ĠاÙĦسÙĦاÙħ":130781,"Ġ×IJש":130782,"ĠполÑĥÑĩа":130783,"à¸Īัà¸Ķà¸ģาร":130784,"коÑĢ":130785,"Ġ×Ķ×ĺ×ķ×ij":130786,"รายà¸ģาร":130787,"주ìĿĺ":130788,"à¹ģà¸ķà¹Īละ":130789,"Ġê·¸ëŁ°ëį°":130790,"à¸Ĺีà¹Īà¹Ģà¸Ľà¹ĩà¸Ļ":130791,"Ġת×ķ×ļ":130792,"بÙĬاÙĨ":130793,"ÐĻ":130794,"oÅĽciÄħ":130795,"ÑĤок":130796,"ĠÃĶ":130797,"ĠÃĶng":130798,"à¹Ħมà¹Īà¹ĥà¸Ĭà¹Ī":130799,"ãģ¿ãģ¦":130800,"ÐŁÐ¾":130801,"ĠЧÑĤо":130802,"íĻ©":130803,"×ĺ×ij×¢":130804,"меÑĤÑĢ":130805,"Ġ×ij×ŀ×Ķ":130806,"Ġ×ij×ŀ×Ķ׾":130807,"Ġ×ij×ŀ×Ķ׾×ļ":130808,"ÑĩÑĮ":130809,"קש×Ķ":130810,"знак":130811,"знаком":130812,"ujÄĻ":130813,"×Ļצר":130814,"ĠاÙĦÙħÙĦÙĥ":130815,"ıyla":130816,"×IJ×ŀת":130817,"à¸Ľà¸´à¸Ķ":130818,"×IJ×Ĺ×ĵ":130819,"راد":130820,"ĠmáºŃt":130821,"ëĭ¤ëĬĶ":130822,"Ġlạnh":130823,"ש׾×ķש":130824,"ØŃدÙĬØ«":130825,"تز":130826,"å¹´ãģ®":130827,"ĠкваÑĢ":130828,"ĠкваÑĢÑĤиÑĢ":130829,"ä½ľãĤĬ":130830,"رÙĪØ¨":130831,"ован":130832,"ĠТе":130833,"à¸Īำà¸ģ":130834,"à¸Īำà¸ģัà¸Ķ":130835,"باط":130836,"×Ĵת":130837,"ĠмаÑĪ":130838,"ĠмаÑĪин":130839,"×Ļצ×Ķ":130840,"ãģ»ãģ¨":130841,"ãģ»ãģ¨ãĤĵãģ©":130842,"ÃŃdo":130843,"ĠÑıзÑĭк":130844,"à¸ļิà¸Ļ":130845,"สà¸ĸาà¸Ļà¸Ĺีà¹Ī":130846,"ĠìĹ´":130847,"ãĤ¦ãĤ§":130848,"ĠcÃł":130849,"пан":130850,"åı£ãĤ³ãĥŁ":130851,"Ġرد":130852,"اÙĤت":130853,"ĠÙĥب":130854,"ĠÙĥبÙĬرة":130855,"ÑģÑĤал":130856,"ש×ŀ×Ĺ":130857,"posición":130858,"ĠÙħÙĦÙĬÙĪÙĨ":130859,"ĠìĿ´ìķ¼":130860,"ĠìĿ´ìķ¼ê¸°":130861,"Ġhút":130862,"ĠÅĽwiat":130863,"Ġë°©ë²ķ":130864,"ĠÑģвеÑĤ":130865,"Ġвидео":130866,"ĠاÙĦÙĨظاÙħ":130867,"Ġtrá»Ŀi":130868,"ĠëĮĢíķ´ìĦľ":130869,"ר×ŀת":130870,"تداÙĪÙĦ":130871,"×ķר×ĵ":130872,"ת×ŀ":130873,"ת×ŀ×ķ׳×ķת":130874,"Ġ×ŀף":130875,"Ġдва":130876,"Ġ×Ķ×§×ķ":130877,"æĹ¥ãģ«":130878,"Ġ×Ķ×Ĵ×Ļ×¢":130879,"à¹Ģà¸ŀิà¹Īมà¹Ģà¸ķิม":130880,"Ùħارس":130881,"Ġê²ĥìŀħëĭĪëĭ¤":130882,"ãģªãģĦãģ¨":130883,"Ġnhiá»ĩt":130884,"ëIJ©ëĭĪëĭ¤":130885,"Ġ×ij׳×ķש×IJ":130886,"Ġê°Ģìŀ¥":130887,"Ġvợ":130888,"ĠÄijóng":130889,"צ×Ļ׾×ķ×Ŀ":130890,"ê´Ģê³Ħ":130891,"ваÑı":130892,"×IJ×Ļ×ĸ":130893,"×IJ×Ļ×ĸ×Ķ":130894,"ĠÙĨظاÙħ":130895,"ÙħØŃاÙ쨏":130896,"Ġtải":130897,"기ëıĦ":130898,"à¸Ľà¸±à¸Īà¸Īุ":130899,"à¸Ľà¸±à¸Īà¸Īุà¸ļัà¸Ļ":130900,"׼×ĵ×ķר":130901,"ĠìķĦìĿ´":130902,"׼׳×Ļס":130903,"à¹Ģà¸ķร":130904,"à¹Ģà¸ķรียม":130905,"Ġngoại":130906,"ĠدÙĪÙĦار":130907,"Ġrẻ":130908,"ĠkhÄĥn":130909,"عدد":130910,"شعب":130911,"czyÄĩ":130912,"ĠاÙĦÙĥر":130913,"ĠÑĩеловека":130914,"ĠÙĪØ¥ÙĨ":130915,"×IJ×ĺ":130916,"ĠthÆ¡":130917,"ĠاÙĦرÙĬاض":130918,"опÑĢедел":130919,"опÑĢеделен":130920,"×Ķ×ŀש×ļ":130921,"ĠÐĿово":130922,"зÑĭва":130923,"ĠاÙĦدÙĪÙĦÙĬ":130924,"ĠÄijáp":130925,"ĠкÑĢед":130926,"ĠкÑĢедиÑĤ":130927,"ового":130928,"Ġmôn":130929,"à¸Ľà¸£à¸°à¹Ĥย":130930,"à¸Ľà¸£à¸°à¹Ĥยà¸Ĭà¸Ļ":130931,"à¸Ľà¸£à¸°à¹Ĥยà¸Ĭà¸Ļà¹Į":130932,"ÑģÑĤе":130933,"ĠThá»ĭ":130934,"دÙĬØ©":130935,"×ŀצ×ķ":130936,"ÙģØ§Øª":130937,"×§×ĵ×Ŀ":130938,"ìĿ´ëĿ¼ê³ł":130939,"ÙĪØ®":130940,"Ġ×Ĺ×ĸ":130941,"ĠÑĦоÑĤо":130942,"׾×Ļת":130943,"تÙİ":130944,"ÙĪØ¨Ø±":130945,"йÑĤи":130946,"ĠÃ¶ÄŁren":130947,"Ġ×Ķ×ĸ×ķ":130948,"Ġvá»įng":130949,"ÙĤÙĪØ©":130950,"ĠTây":130951,"ĠÐĿи":130952,"Ġש×ķ×ij":130953,"ãģ¨è¨ĢãĤıãĤĮ":130954,"ãģ©ãĤĵãģª":130955,"×Ĺצ×Ļ":130956,"ï½ľ":130957,"Ġ×ķ×Ķ×ķ×IJ":130958,"ä¸Ģãģ¤":130959,"ĠÑģÑĤоиÑĤ":130960,"niÄħ":130961,"×ĺר×Ļ":130962,"ĠдеÑĤей":130963,"нÑıÑĤÑĮ":130964,"ĠÑģделаÑĤÑĮ":130965,"Ġë§İìĿ´":130966,"ä½ķãģĭ":130967,"ãģĽãĤĭ":130968,"à¹Ħหม":130969,"à¸ķิà¸Ķà¸ķà¹Īà¸Ń":130970,"Ġ×ijת×Ĺ":130971,"Ġ×ijת×Ĺ×ķ×Ŀ":130972,"ìĻĦ":130973,"ì§ĢëĬĶ":130974,"ÑģÑĤаÑĤ":130975,"ÑıÑģн":130976,"üb":130977,"Ġthả":130978,"Ġ×ij×IJ×ŀת":130979,"Ġtuyến":130980,"×ĵ×Ļר×Ķ":130981,"Ġ×IJ×Ļש×Ļ":130982,"×ĸ׼ר":130983,"ãģ°ãģĭãĤĬ":130984,"Ġxét":130985,"׼×Ļ×ķ":130986,"׼×Ļ×ķ×ķף":130987,"diÄŁini":130988,"ĠاÙĦÙħÙĪØ¶ÙĪØ¹":130989,"ĠháºŃu":130990,"à¸Īาà¸ģà¸ģาร":130991,"×ijס×Ļס":130992,"Ġ×ŀ×Ĵ×Ļ×¢":130993,"×ij×Ļ×¢":130994,"ĠÙĪØ¬Ùĩ":130995,"à¹ģà¸Ķà¸ĩ":130996,"à¸Ļาà¸ĩ":130997,"ĠÅŀa":130998,"ì¡´":130999,"ë¡Ģ":131000,"à¸ķะ":131001,"Ġ×Ķ×Ĺ×Ļ×Ļ×Ŀ":131002,"ÙģÙĬد":131003,"ãģ§ãģĻãģĭãĤī":131004,"ê·ľ":131005,"źni":131006,"ĠлÑİдей":131007,"Ġyüzde":131008,"ıyorum":131009,"ĠاÙĦبØŃر":131010,"eño":131011,"паÑĢ":131012,"ÙĬÙĤØ©":131013,"обÑĢ":131014,"ר×ķ×ļ":131015,"تÙĪÙĤع":131016,"ĠاÙĦØ´ÙĬØ®":131017,"åĪĿãĤģãģ¦":131018,"ĠÑĤелеÑĦ":131019,"ĠÑĤелеÑĦон":131020,"Ġthôi":131021,"Ġ×Ļ׼×ķ׾×Ļ×Ŀ":131022,"ĠÅŁirk":131023,"ĠÅŁirket":131024,"Ġìļ°ë¦¬ê°Ģ":131025,"ĠÄijông":131026,"Ġת×ķ×ĵ×Ķ":131027,"ÑģмоÑĤÑĢеÑĤÑĮ":131028,"ĠÙĦÙĩÙħ":131029,"Ġ׾׼":131030,"ĠNó":131031,"ĠØŃاÙĦØ©":131032,"ãģĦãģij":131033,"קר×ķ":131034,"azı":131035,"ãĤ³ãĥ¼":131036,"ĠÙĦÙĦت":131037,"sınız":131038,"ĠHải":131039,"기ìĪł":131040,"ยัà¸ĩà¹Ħมà¹Ī":131041,"ëĭ¤ê³ł":131042,"פ×Ĺ":131043,"Ġ׾×Ĵ×ij×Ļ":131044,"ĠعÙĨÙĩ":131045,"Ġказ":131046,"Ġказино":131047,"بÙĪØ±":131048,"ÑĦеÑĢ":131049,"Ġê°ĻìĿ´":131050,"تسجÙĬÙĦ":131051,"ĠاÙĦÙħرÙĥز":131052,"ĠThái":131053,"даÑĤÑĮ":131054,"×ŀ×Ļ×Ļ׾":131055,"ĠpaylaÅŁ":131056,"ãģ¤ãģ®":131057,"à¹Ģรืà¸Ń":131058,"nça":131059,"׳×ķ×Ĺ":131060,"Ġ×IJפ×Ļ׾×ķ":131061,"ãģ¨èĢĥãģĪ":131062,"ãģ¨ãģĹãģ¦ãģ¯":131063,"à¹Ģà¸Īà¸Ń":131064,"×ŀפ":131065,"ĠgiriÅŁ":131066,"лиÑĤ":131067,"ÑĤелÑı":131068,"Ñijн":131069,"æ°Ĺãģ«":131070,"Ġgó":131071,"Ġgóp":131072,"åĪĩãĤĬ":131073,"Ġ×Ķ×Ĺ×ĵש":131074,"жал":131075,"Ġ×ĵעת":131076,"éģķãģĨ":131077,"à¹Ģà¸Ĥà¹īาà¹Ħà¸Ľ":131078,"Ġסר×ĺ":131079,"eña":131080,"æĸ°ãģĹãģĦ":131081,"رÙİ":131082,"ĠÐIJÑĢ":131083,"Ġphản":131084,"à¸Īะà¹Ħà¸Ķà¹ī":131085,"Ġ×ijצ×ķר×Ķ":131086,"شاÙĩ":131087,"شاÙĩد":131088,"ÙĪØ±Ø¯":131089,"à¹Ģà¸Ļืà¹Īà¸Ńà¸ĩà¸Īาà¸ģ":131090,"илиÑģÑĮ":131091,"à¹ģละà¸ģาร":131092,"Ġ×Ķ×ĸ׼":131093,"Ġ×Ķ×ĸ׼×ķ×Ļ×ķת":131094,"eiÃŁ":131095,"ãĥ¨":131096,"ìĥĪ":131097,"ĠÃĩa":131098,"Ư":131099,"ש×Ĵ":131100,"ÙĬÙĨØ©":131101,"รà¹īà¸Ńà¸ĩ":131102,"ãĤµãĥ³":131103,"ÑĢоÑģÑģий":131104,"ÑĢоÑģÑģийÑģк":131105,"aÄŁa":131106,"ĠнаÑĩина":131107,"ĠصÙĦÙī":131108,"à¸Ĺุà¸ģà¸Ħà¸Ļ":131109,"íļĮìĤ¬":131110,"ĠлиÑĨ":131111,"Ø´ÙĬر":131112,"ĠØ´ÙĬØ¡":131113,"ÙĬÙĨا":131114,"Ġפ×Ĺ×ķת":131115,"Ġiçeris":131116,"Ġiçerisinde":131117,"ĠØ£ØŃÙħد":131118,"Ġżeby":131119,"ì´Ŀ":131120,"Ġпоказ":131121,"Ġименно":131122,"หà¸Ļัà¸ĩส":131123,"หà¸Ļัà¸ĩสืà¸Ń":131124,"ĠÑĤÑĢе":131125,"สัà¸ĩà¸Ħม":131126,"Ø¥ÙIJ":131127,"ãģĮå¿ħè¦ģ":131128,"ÙĬÙijØ©":131129,"פצ":131130,"íĭ°":131131,"ĠÙħجاÙĦ":131132,"׳פש":131133,"кан":131134,"×Ĺ×ķפ":131135,"×Ĺ×ķפש":131136,"ì²ĺëŁ¼":131137,"оваÑı":131138,"зов":131139,"Ġhạ":131140,"ĠdziÄĻki":131141,"×Ļר×ķ":131142,"Ġ׾×ŀצ":131143,"Ġ׾×ŀצ×ķ×IJ":131144,"×Ļ×ĵ×ķ":131145,"Ġsợ":131146,"Ġ׾×Ķ×Ĵ×Ļ×¢":131147,"×§×ij×¢":131148,"Ġchiá»ģu":131149,"ãĥŀãĤ¤":131150,"ĠdÃłng":131151,"à¹ģà¸Łà¸Ļ":131152,"Ġüye":131153,"×Ļ׳×Ĵ":131154,"à¹Ģรียà¸ģ":131155,"ç§ģãģĮ":131156,"thé":131157,"ĠÑĦилÑĮ":131158,"ĠÑĦилÑĮм":131159,"ĠNgÃły":131160,"Ġжен":131161,"ĠженÑīин":131162,"جÙĬد":131163,"nç":131164,"à¸Ľà¸£à¸²":131165,"×Ļ×ŀ×ķ":131166,"Ġná»ģn":131167,"×IJ×ķ׾×Ŀ":131168,"ĠвозможноÑģÑĤÑĮ":131169,"Ġëĭ¤ìĭľ":131170,"è¦ĭãģŁ":131171,"à¸ĸà¸Ļ":131172,"à¸ĸà¸Ļà¸Ļ":131173,"mızı":131174,"ĠÙħجÙħÙĪØ¹Ø©":131175,"cjÄħ":131176,"ĠÐłÐ¤":131177,"à¸ģำหà¸Ļ":131178,"à¸ģำหà¸Ļà¸Ķ":131179,"ĠìĹ¬ê¸°":131180,"landı":131181,"ниÑĨ":131182,"ÑģÑĤве":131183,"Ġ×ĵ×ijר×Ļ×Ŀ":131184,"ĠskÅĤad":131185,"ãĤĬãģ¾ãģĹãģŁ":131186,"ĠоÑĤкÑĢÑĭÑĤ":131187,"нÑıÑĤ":131188,"ĠÑģвоей":131189,"à¸Īิà¸ķ":131190,"ĠкаÑĩеÑģÑĤве":131191,"ĠettiÄŁi":131192,"ìĤ¬íķŃ":131193,"ĠاÙĦÙĬÙħÙĨ":131194,"иÑĩеÑģкий":131195,"ë¸Į":131196,"Ġ×ij×IJרץ":131197,"ĠاسÙħ":131198,"ĠизвеÑģÑĤ":131199,"rão":131200,"ĠattivitÃł":131201,"à¹Ģà¸Ľà¹ĩà¸Ļà¸ģาร":131202,"ĠاÙĦدÙĥت":131203,"ĠاÙĦدÙĥتÙĪØ±":131204,"ĠÙĪØ§ØŃدة":131205,"ĠÑģÑĩеÑĤ":131206,"ĠпÑĢиÑĩ":131207,"ĠпÑĢиÑĩин":131208,"ĠÙĪØ²Ø§Ø±Ø©":131209,"Ġhuyá»ĩn":131210,"ĠÙĥتاب":131211,"à¹ģà¸Ļà¹Īà¸Ļ":131212,"à¹ģà¸Ļà¹Īà¸Ļà¸Ńà¸Ļ":131213,"Ġgünü":131214,"гÑĢÑĥз":131215,"ĠاÙĦخاص":131216,"Ġgörül":131217,"׾×ŀ×ĵ":131218,"ĠìłķëıĦ":131219,"×ķ×ij×Ļ׾":131220,"Ġ×ŀקצ×ķ×¢×Ļ":131221,"ĠоÑģобенно":131222,"à¸Ľà¸£à¸°à¸ģา":131223,"à¸Ľà¸£à¸°à¸ģาศ":131224,"acaģını":131225,"ë¶ģ":131226,"à¸łà¸¹à¸¡à¸´":131227,"ĠÑįлекÑĤ":131228,"ĠÑįлекÑĤÑĢо":131229,"Ġקש×Ķ":131230,"سÙĦØ·":131231,"à¸Ĭà¸Ļะ":131232,"×¢×Ļ׾":131233,"ĠЧе":131234,"à¹ģà¸Ļà¹Ī":131235,"lıģ":131236,"lıģın":131237,"Ġ×ŀ×¢×¨×Ľ×ª":131238,"好ãģįãģª":131239,"มาà¸ģà¸Ĥึà¹īà¸Ļ":131240,"×ŀ×¢×ijר":131241,"ĠاÙĦÙħغرب":131242,"ĠпеÑĢи":131243,"ĠпеÑĢиод":131244,"Ġnhạc":131245,"اÙĪÙĬ":131246,"ĠÙĪØ¹ÙĦÙī":131247,"أخذ":131248,"ĠCô":131249,"תר×ij×ķת":131250,"×Ĵ×Ķ":131251,"Ġktórej":131252,"×IJ×Ļת":131253,"×ij×ķ×IJ":131254,"делÑĮ":131255,"รีวิ":131256,"รีวิว":131257,"жÑĥ":131258,"Ġ×ij×Ĺ×ķ":131259,"еÑĪÑĮ":131260,"ĠØ£ÙĦÙģ":131261,"ĠاÙĦÙĪØ·ÙĨÙĬ":131262,"ĠاÙĦÙħÙĨØ·ÙĤØ©":131263,"nÄħÄĩ":131264,"Ġthiên":131265,"иÑĩеÑģкой":131266,"ĠاÙĦÙħÙĦ":131267,"ĠعÙħ":131268,"ספר":131269,"Ġnhóm":131270,"ÙĪØµÙģ":131271,"ĠChúng":131272,"ĠرÙĤÙħ":131273,"ãģ¾ãģĹãģŁãģĮ":131274,"alité":131275,"ลม":131276,"ĠëĤ´ê°Ģ":131277,"׾ק×ķ×Ĺ":131278,"ĠSÆ¡n":131279,"posição":131280,"miÄĻ":131281,"Ġtránh":131282,"ĠÄIJá»Ļ":131283,"׼×Ĺ":131284,"ãģĤãģ£ãģ¦":131285,"à¸Ńยà¹Īา":131286,"Ġ×ŀ×Ĺ×Ļר":131287,"Ġ×Ķ×Ļת×Ķ":131288,"à¸Ľà¹Īา":131289,"à¸Ńืà¹Īà¸Ļà¹Ĩ":131290,"Ø´ÙĤ":131291,"×ł×¡×Ļ":131292,"림":131293,"ãģ¦ãģĹãģ¾ãģĨ":131294,"Ġ×ŀצ×ij":131295,"ãģ«åĩº":131296,"ÙħÙĪØ§Ø·ÙĨ":131297,"ยัà¸ĩมี":131298,"алÑĮнÑĭе":131299,"sanız":131300,"إسرائÙĬÙĦ":131301,"ĠvÃłi":131302,"ì¤Ħ":131303,"ã썿ĢĿãģ£ãģ¦":131304,"×Ļ×ķ׳×Ļ":131305,"çĶŁãģį":131306,"Ġsâu":131307,"ÑĩиÑģÑĤ":131308,"Ġlá»ħ":131309,"ĠGiá":131310,"à¸Ńà¸¸à¸Ľ":131311,"à¸Ńà¸¸à¸Ľà¸ģร":131312,"à¸Ńà¸¸à¸Ľà¸ģรà¸ĵà¹Į":131313,"Ġnhẹ":131314,"rö":131315,"ס×ĺ×Ļ":131316,"ãģķãĤĵãģĮ":131317,"Ġdầu":131318,"عÙİ":131319,"ترا":131320,"×Ĵ×ĵ׾":131321,"Ġtécnica":131322,"׼׳×Ļ×Ŀ":131323,"תקש":131324,"תקש×ķרת":131325,"Ġнего":131326,"était":131327,"Ġmá»ģm":131328,"ÑģеÑĤ":131329,"ĠnháºŃt":131330,"Ġ×ŀ×¢×ľ":131331,"Ġ×Ķ×¢×ij×ķ×ĵ":131332,"Ġ×Ķ×¢×ij×ķ×ĵ×Ķ":131333,"Ġ×Ĵ×Ļ׾":131334,"ãģ¯ãģªãģĦ":131335,"ائØŃ":131336,"ĠздеÑģÑĮ":131337,"×IJ×Ļ׳×ĺר":131338,"ÙħÙIJ":131339,"Ġ×Ļ×Ĺ×ĵ":131340,"راÙģ":131341,"ì²ĺ리":131342,"×ĵ×¢×ķת":131343,"ì¹ľ":131344,"ĠТо":131345,"ĠThế":131346,"ì¶©":131347,"Ġ׳׼×ķף":131348,"عÙĬØ´":131349,"низ":131350,"ĠجاÙĨب":131351,"×ŀקצ×ķ×¢":131352,"à¹Ĥà¸ĭ":131353,"ÑģÑĥÑĤ":131354,"ìĸ´ìļĶ":131355,"ãĤĴè¦ĭãģ¦":131356,"ارد":131357,"Ġaçıl":131358,"ĠاÙĦØŃÙĬاة":131359,"à¸ģà¹ĩà¹Ħà¸Ķà¹ī":131360,"ãģĿãĤĮãĤĴ":131361,"عضÙĪ":131362,"ĠгÑĢаж":131363,"ĠгÑĢаждан":131364,"à¸Īะà¸ķà¹īà¸Ńà¸ĩ":131365,"ĠìĿ´ë٬":131366,"ĠìĿ´ë٬íķľ":131367,"Ġtrách":131368,"ÙĨÙİ":131369,"Ġkısa":131370,"ÃĶ":131371,"ÑĪка":131372,"ãģ®äºº":131373,"ĠÐŁÐ¾Ñģ":131374,"ĠÐŁÐ¾Ñģле":131375,"ÑĥлÑĮ":131376,"ÙĪØ§Ø¬Ùĩ":131377,"ÙĤرب":131378,"à¸Ľà¸ıิà¸ļัà¸ķิ":131379,"ê°Ļ":131380,"Ġ×ŀ׳":131381,"ĠÑģвои":131382,"براÙħج":131383,"ĠرÙĪ":131384,"пÑĢод":131385,"пÑĢодаж":131386,"ĠbyÅĤy":131387,"วัย":131388,"Ġgörün":131389,"ĠÃĪ":131390,"ÑİÑīим":131391,"ĠÑĤакой":131392,"ÙģÙĪØ±":131393,"ĠÙ쨹ÙĦ":131394,"Ġбел":131395,"ëIJł":131396,"erÃŃa":131397,"ĠÑģвоÑİ":131398,"Ġlã":131399,"Ġlãnh":131400,"à¹Ģà¸ŀืà¹Īà¸Ńà¹ĥหà¹ī":131401,"ÙĤÙĨ":131402,"تطÙĪÙĬر":131403,"Ġsayı":131404,"ĠÑģейÑĩаÑģ":131405,"Ġ×IJ×Ĺרת":131406,"×§×ķפ×Ķ":131407,"×§×ķרס":131408,"ĠسÙħ":131409,"Ġ×ĺ×Ļפ×ķ׾":131410,"ìĿ´ëĿ¼ëĬĶ":131411,"دراسة":131412,"èµ·ãģĵ":131413,"×Ĺ×Ļ׳":131414,"×Ĺ×Ļ׳×ķ×ļ":131415,"×ĵ×§":131416,"Ġë§ŀ":131417,"Ġкоманд":131418,"ĠÐijо":131419,"ĠигÑĢÑĭ":131420,"à¸ļี":131421,"ĠØ£Ùİ":131422,"вен":131423,"ĠاÙĦجدÙĬد":131424,"ĠÙĦØ¥":131425,"Ġ×ķ×IJ׳×Ļ":131426,"Ġ×Ķס×Ļ":131427,"иÑĩеÑģкого":131428,"رÙĪØŃ":131429,"à¸ģารศึà¸ģษา":131430,"ĠTrưá»Ŀng":131431,"игÑĢа":131432,"ılması":131433,"ĠмаÑģÑģ":131434,"ãģ¨ãģįãģ«":131435,"à¸Ĺีà¹Īà¸ľà¹Īาà¸Ļ":131436,"à¸Ĺีà¹Īà¸ľà¹Īาà¸Ļมา":131437,"ĠاÙĦسابÙĤ":131438,"Ġ×ŀ×¢×ĺ":131439,"ваÑĤÑĮ":131440,"mÃ¼ÅŁ":131441,"Ġ׾׼×ļ":131442,"Ġtá»ĭch":131443,"ÙģÙĩÙħ":131444,"تدرÙĬب":131445,"Ø´Ùĥ":131446,"Ġ×ij×ŀ×Ļ":131447,"Ġ×ij×ŀ×Ļ×ķ×Ĺ×ĵ":131448,"ÙĤطاع":131449,"ãģªãģĹ":131450,"×ķצ×Ļ×IJ":131451,"ĠÙĪØ³ÙĬ":131452,"зÑĥ":131453,"Ġyat":131454,"Ġyatırım":131455,"ë§İ":131456,"Ġthắng":131457,"ãģĬ客":131458,"ãģĬ客æ§ĺ":131459,"ĠThiên":131460,"ãģ«å¯¾ãģĹãģ¦":131461,"ÑĢиÑģ":131462,"ÙĨتائ":131463,"ÙĨتائج":131464,"Ġ×ŀשר":131465,"Ġ×ŀשר×ĵ":131466,"ĠتعاÙĦ":131467,"ĠتعاÙĦÙī":131468,"ש׳×Ļ":131469,"ÙĩاÙħ":131470,"×IJ׳ש×Ļ×Ŀ":131471,"Ġżycia":131472,"ĠÑĢÑĥблей":131473,"ÙĬض":131474,"Ġkatıl":131475,"ĠÙħÙĪØ¶ÙĪØ¹":131476,"Ġvardır":131477,"ĠÙħÙĨØ·ÙĤØ©":131478,"ĠTrần":131479,"ĠвеÑģ":131480,"üp":131481,"ÙħÙĪÙĨ":131482,"ÑĪли":131483,"Ġnóng":131484,"Ø®ÙĦÙģ":131485,"ĠСÑĤа":131486,"ĠдоÑĢ":131487,"ĠдоÑĢог":131488,"ĠwÅĤaÅĽnie":131489,"eÄŁin":131490,"Ġhiá»ĥm":131491,"ĠСам":131492,"ê»ĺìĦľ":131493,"ĠÑĦа":131494,"ãģ»ãģĨ":131495,"ãģ»ãģĨãģĮ":131496,"×ķפ×Ļ×¢":131497,"ê°Ī":131498,"دÙĪÙĦ":131499,"Ġthuê":131500,"Ġchá»Ĺ":131501,"Ġëĭ¹ìĭł":131502,"ãģijãĤĮ":131503,"ãģijãĤĮãģ©":131504,"ë³´íĺ¸":131505,"ãģķãĤĮãģ¦ãģĦãģ¾ãģĻ":131506,"Ġнадо":131507,"ĠìĤ¬ëŀĮëĵ¤":131508,"à¹Ģà¸Ĥà¸ķ":131509,"สมัย":131510,"zÅĤ":131511,"تÙĪØ±":131512,"Ġשת×Ļ":131513,"vê":131514,"Ġ×ijת×ķ×ļ":131515,"à¸Ĭัย":131516,"ãģĦãģ£ãģŁ":131517,"ìĿij":131518,"Ġtầ":131519,"Ġtầng":131520,"ש׼ר":131521,"Ġê¸Ģ":131522,"Ġ×Ķש׳×Ķ":131523,"ĠاÙĨÙĩ":131524,"ç«ĭãģ¡":131525,"rés":131526,"führen":131527,"رØŃÙħ":131528,"ê·¹":131529,"ĠâĢ«":131530,"Ġsuất":131531,"à¸Łà¸´":131532,"ÙĬÙĩا":131533,"ĠاÙĦاتØŃاد":131534,"Ġtuyá»ĥn":131535,"ãģ¾ãĤĭ":131536,"Ġmại":131537,"Ġngân":131538,"ãĤ°ãĥ©":131539,"欲ãģĹãģĦ":131540,"سار":131541,"ãĤĤãģ®ãģ§ãģĻ":131542,"кие":131543,"Ġseçim":131544,"åħ¥ãĤĬ":131545,"ãģªãģ©ãĤĴ":131546,"ÑĤÑĢи":131547,"ĠÑģпеÑĨ":131548,"Ġأد":131549,"Ġодно":131550,"ÑĪел":131551,"ãĥĩãĥ¼ãĤ¿":131552,"ãĤ·ãĤ¹ãĥĨ":131553,"ãĤ·ãĤ¹ãĥĨãĥł":131554,"è¡Įãģį":131555,"ã썿ĢĿãģ£ãģŁ":131556,"à¹Ģà¸ģิà¸Ķà¸Ĥึà¹īà¸Ļ":131557,"ĠÑĤож":131558,"ĠÑĤоже":131559,"Ġsạch":131560,"ĠÑģÑĢок":131561,"ĠклиенÑĤ":131562,"ĠÙħشرÙĪØ¹":131563,"Ġaltında":131564,"Ġì·¨":131565,"ä¸Ńãģ®":131566,"ãģķãģĽãĤĭ":131567,"ãģĻãģ¹":131568,"ãģĻãģ¹ãģ¦":131569,"ê°ľë°ľ":131570,"ĠÄijêm":131571,"ãģªãģĦãģ®ãģ§":131572,"ì²ł":131573,"×¢×ij×ĵ":131574,"Ġdấu":131575,"à¸Ħà¸Ļà¸Ĺีà¹Ī":131576,"ĠCách":131577,"تعÙĦÙĬÙħ":131578,"Ġhại":131579,"ãĤ»ãĥķãĥ¬":131580,"ĠÙĨÙ쨳Ùĩ":131581,"ĠíĨµíķ´":131582,"ÑĪло":131583,"ĠнапÑĢав":131584,"ĠнапÑĢавлен":131585,"ÑĢÑĥÑĩ":131586,"íĶĮ":131587,"Ġ×ijר×Ļ×IJ":131588,"ãģ®ãģ¿":131589,"ãģ«ãģĬãģĦãģ¦":131590,"×ij׳ק":131591,"ãĤ¨ãĥ³":131592,"Ø«ÙĦاث":131593,"Ġmỹ":131594,"ĠÑģайÑĤе":131595,"ĠемÑĥ":131596,"تغÙĬ":131597,"تغÙĬÙĬر":131598,"خصÙĪØµ":131599,"ÑĤели":131600,"Ġ×ķ׾׼ף":131601,"פע×Ŀ":131602,"ĠпоÑįÑĤомÑĥ":131603,"راÙĨ":131604,"иÑĤелей":131605,"пиÑģан":131606,"×¢×¥":131607,"ĠìĤ¬ìĹħ":131608,"Ùħز":131609,"جÙħÙĬع":131610,"ë©´ìĦľ":131611,"à¸ľà¸¥à¸´à¸ķà¸łà¸±":131612,"à¸ľà¸¥à¸´à¸ķà¸łà¸±à¸ĵ":131613,"à¸ľà¸¥à¸´à¸ķà¸łà¸±à¸ĵà¸ij":131614,"à¸ľà¸¥à¸´à¸ķà¸łà¸±à¸ĵà¸ijà¹Į":131615,"ĠпÑĢимеÑĢ":131616,"ãĤŃãĥ¼":131617,"lâ":131618,"ĠchÄĥm":131619,"缮ãģ®":131620,"ãģĦãģĭ":131621,"ãģ¨è¨ĢãģĨ":131622,"×ĸ×ķ×Ĵ":131623,"Ġ×ij×ĵ×Ļ":131624,"Ġ×ij×ĵ×Ļ×ķ×§":131625,"ãģĬåºĹ":131626,"à¸ķà¸Ńà¸Ļà¸Ļีà¹ī":131627,"Ġphá»iji":131628,"пÑĤ":131629,"สà¸Ļาม":131630,"Ø·ÙĪ":131631,"صاØŃ":131632,"صاØŃب":131633,"ĠDü":131634,"ĠDünya":131635,"Ġпока":131636,"пал":131637,"ĠÄijảo":131638,"ĠاÙĦÙģÙĪØ±":131639,"ĠاÙĦÙģÙĪØ±Ùĥس":131640,"Ġmáu":131641,"кÑĢеп":131642,"ĠاÙĦساعة":131643,"ĠгоÑĢода":131644,"Ù쨵ÙĦ":131645,"айÑĤе":131646,"Ġдог":131647,"ĠдоговоÑĢ":131648,"Ġإذ":131649,"Ġ×ij׼׾׾":131650,"ÙĬتÙĩ":131651,"×Ĵ×ijר":131652,"Ġbirç":131653,"Ġbirçok":131654,"문íĻĶ":131655,"ãģĿãģĨãģª":131656,"راØŃ":131657,"ĠÙħرة":131658,"ĠденÑĮги":131659,"fä":131660,"à¸Ĥà¹īาว":131661,"ĠÑģовÑĢем":131662,"ĠÑģовÑĢеменн":131663,"׾×Ĺ×¥":131664,"èī¯ãģı":131665,"ĠÙ쨣":131666,"Ġ×ķ×ĸ×Ķ":131667,"Ġзани":131668,"Ġзанима":131669,"Ġê°Ģì§Ģê³ł":131670,"ĠhÆ¡i":131671,"ãģªãģ®ãģĭ":131672,"ãĥĨãĥ¬ãĥĵ":131673,"Ġר×ij×ķת":131674,"à¸ķี":131675,"Ġ×ij×©×ł×ª":131676,"ĠTại":131677,"ĠthuáºŃn":131678,"Ñģел":131679,"Ñijм":131680,"dziÄĩ":131681,"ĠÑģка":131682,"ĠÑģкаÑĩ":131683,"ĠÑģкаÑĩаÑĤÑĮ":131684,"×ķ×ŀ×ķ":131685,"гла":131686,"ĠминÑĥÑĤ":131687,"åĩºãģĻ":131688,"Ġ×Ĺ×Ļ×Ļ×ij":131689,"Ġת×Ĵ×ķ×ij×Ķ":131690,"à¸£à¸¹à¸Ľà¹ģà¸ļà¸ļ":131691,"ниÑĨа":131692,"Ġİn":131693,"Ġأع":131694,"ĠضÙħÙĨ":131695,"ÙħثاÙĦ":131696,"ĠyaÅŁan":131697,"ĠìĹ°êµ¬":131698,"ĠLê":131699,"ש׾×Ĺ":131700,"ãģıãģªãĤĭ":131701,"ìĹĨìĿ´":131702,"ĠÑĤÑĢи":131703,"ĠÑĩаÑģÑĤо":131704,"ĠобÑĢаÑĤ":131705,"пло":131706,"دخ":131707,"دخÙĪÙĦ":131708,"سÙĩ":131709,"à¸Ńาà¸ģ":131710,"à¸Ńาà¸ģาศ":131711,"Ġ׼×ĸ×Ķ":131712,"Ġ×Ķעסק":131713,"ĠاÙĦØ£ÙĨ":131714,"å¹´ãģ«":131715,"עש×ķ":131716,"Ġשע×ķת":131717,"ĠmÃłn":131718,"×IJר×Ļ":131719,"sıyla":131720,"Ù쨱ÙĤ":131721,"ниÑħ":131722,"Ġتست":131723,"è¦ĭãģ¦":131724,"ØŃاÙĪÙĦ":131725,"×IJ×Ļ׼×ķת":131726,"ĠbaÅŁladı":131727,"stÄħ":131728,"stÄħpi":131729,"à¸Ĺีà¹Īà¹Ģรา":131730,"ÙĤرر":131731,"جاب":131732,"Ġ×ijר×ķר":131733,"à¹Ģà¸Ĥà¹īาà¹ĥà¸Ī":131734,"×ŀ×Ĺקר":131735,"alım":131736,"Ġס×Ļפ×ķר":131737,"ãģ§ãģĤãĤĮãģ°":131738,"Ġש×ŀ×ķר×ķת":131739,"Ġ×ķ×ŀ×Ķ":131740,"ãģĵãģĿ":131741,"idée":131742,"ä¸ĭãģķãģĦ":131743,"تÙĨاÙĪÙĦ":131744,"Ġลà¹īาà¸Ļ":131745,"Ġìļ°ë¦¬ëĬĶ":131746,"اÙĨا":131747,"ÑģÑĤой":131748,"боÑĤ":131749,"ĠyaÅŁam":131750,"köy":131751,"Ø¥ÙĦ":131752,"ÑĢÑĭв":131753,"기ìĹħ":131754,"Ġ×Ķ×ŀ×ĵ":131755,"Ġ×Ķ×ŀ×ĵ×Ļ׳×Ķ":131756,"دب":131757,"×¢×Ļ׳×Ļ":131758,"×ŀת×Ĺ":131759,"Ġפר×Ļ":131760,"ãĥĭãĥ¼":131761,"اÙħÙĬ":131762,"Ġnhằm":131763,"ãĤĮãģªãģĦ":131764,"تعرÙģ":131765,"Ġë§ĪìĿĮ":131766,"ìĵ°":131767,"Ġhấp":131768,"ר×Ĵ×Ļ׾":131769,"بÙİ":131770,"ĠrÄĥng":131771,"glÄħd":131772,"ĠÑģиÑģÑĤемÑĭ":131773,"Ġkhóa":131774,"ãģ§ãģĻãĤĪãģŃ":131775,"大ãģįãģı":131776,"기를":131777,"Ġkéo":131778,"ÙĪØ¡":131779,"جاÙħ":131780,"جاÙħع":131781,"Ġ×¢×Ļצ×ķ×ij":131782,"téri":131783,"Ġתש":131784,"Ġ×IJ×ij×Ļ":131785,"ĠChương":131786,"à¸ļริà¹Ģว":131787,"à¸ļริà¹Ģวà¸ĵ":131788,"ãģ¤ãģı":131789,"Ġ×Ĺ×ķ׾":131790,"עת×Ļ×ĵ":131791,"ש×Ļ×ŀ×Ķ":131792,"ëĤ¨":131793,"Ġש×IJ×Ļף":131794,"ĠÙĪØ§ÙĦØ¥":131795,"ÑĦа":131796,"Ġkhám":131797,"Ġ×ĺ×ķ×ij×Ķ":131798,"ĠвÑĭÑģ":131799,"ĠвÑĭÑģоко":131800,"ĠاÙĦØŃدÙĬØ«":131801,"人ãĤĤ":131802,"dÃ¼ÄŁÃ¼":131803,"×Ļ×Ĺ×ķ×ĵ":131804,"تعÙĦÙĬ":131805,"تعÙĦÙĬÙĤ":131806,"lö":131807,"تØŃدÙĬد":131808,"него":131809,"ĠÑĥдоб":131810,"Ġ׾×ŀ×Ļ":131811,"Ġר×ķצ×Ļ×Ŀ":131812,"Ġجاء":131813,"Ġ×ij×ĸ×ŀף":131814,"à¸Ľà¸ģà¸ķิ":131815,"é«ĺãģı":131816,"à¸Ľà¸¥à¸²":131817,"Ġartık":131818,"Ġbugün":131819,"ק׳×Ļ":131820,"Ġkhoá":131821,"ĠÙħرÙĥز":131822,"ĠìŀIJ기":131823,"درجة":131824,"×ŀשר×ĵ":131825,"Ġgiấy":131826,"Ġchóng":131827,"קפ":131828,"ÙĬبة":131829,"ĠczÄĻsto":131830,"вали":131831,"Ùĥب":131832,"ìŁģ":131833,"สà¸ļาย":131834,"à¸Ľà¸£à¸°à¸Ĭาà¸Ĭà¸Ļ":131835,"×Ĵ×ķ×£":131836,"ëŁī":131837,"ãģ®ãģĵãģ¨":131838,"ลà¸Ń":131839,"Ġnghá»ī":131840,"åŃIJãģ©":131841,"åŃIJãģ©ãĤĤ":131842,"à¹Ħà¸Ķà¹īà¸Ńย":131843,"à¹Ħà¸Ķà¹īà¸Ńยà¹Īาà¸ĩ":131844,"×ĵ×¢":131845,"ĠاÙĦتÙī":131846,"ĠÑģовеÑĤ":131847,"ĠqualitÃł":131848,"åĩºãģĹ":131849,"ĠÑĢÑĥков":131850,"ĠÑĢÑĥковод":131851,"รายละà¹Ģà¸Ńียà¸Ķ":131852,"ãģªãģĭãģªãģĭ":131853,"기ê´Ģ":131854,"Ġ×Ĺ×ķש":131855,"Ġ×Ĺ×ķש×ij":131856,"лоÑĤ":131857,"à¸Ļะà¸Ħรัà¸ļ":131858,"×§×ij×ķצ×Ķ":131859,"Ġthái":131860,"Ġש×ij×Ķ":131861,"ĠÑĪкол":131862,"ĠÙĦÙĥÙĦ":131863,"à¹ĥà¸Ļà¸Ĭà¹Īวà¸ĩ":131864,"ĠÙħÙĥاÙĨ":131865,"ëķĮ":131866,"Ġcải":131867,"ĠChÃŃ":131868,"ÑĥÑĩа":131869,"ìĿµ":131870,"Ġxảy":131871,"à¸Ĭà¸Ļิà¸Ķ":131872,"ĠcáºŃu":131873,"кÑĢов":131874,"ssé":131875,"ĠÙĨÙĪØ¹":131876,"ĠТа":131877,"Ø®Ùħس":131878,"פ×ķס×ĺ":131879,"Ġmắc":131880,"ĠÄijem":131881,"à¸ģารà¹ĥà¸Ĭà¹ī":131882,"ר×ķס":131883,"ĠÐĽÐµ":131884,"Ġthá»Ń":131885,"รà¹Īาà¸ĩà¸ģาย":131886,"üzü":131887,"æĹ¥æľ¬ãģ®":131888,"ê³¼ìłķ":131889,"ש×Ļ×IJ":131890,"ĠìŀĪê³ł":131891,"×ij×ķ׾":131892,"ìķħ":131893,"ĠÙĪØ§ÙĦا":131894,"ĠÐĽÐ¸":131895,"ĠвÑģÑij":131896,"Ġużytkow":131897,"×Ĺ×ķ׾":131898,"رÙ쨶":131899,"Ġsonuç":131900,"ãģĦãģ¾ãģĽãĤĵ":131901,"ìĤ¬ìĹħ":131902,"ëĪĦ":131903,"ÑĤек":131904,"ĠudziaÅĤ":131905,"лез":131906,"Ġ×Ķ×Ļ×Ļת×Ļ":131907,"ãĤīãĤĮãģ¦":131908,"ÙħسؤÙĪÙĦ":131909,"رار":131910,"ÑĤан":131911,"ĠÄijÃło":131912,"Ġר×ķ×ij":131913,"Ġ×ijש×ij×Ļ׾":131914,"ä»ĬåĽŀãģ¯":131915,"ãĤ¸ãĥ¥":131916,"Ġ×¢×ijר":131917,"ãģĽãģ¦":131918,"полÑĮ":131919,"aklı":131920,"ĠkÃŃnh":131921,"دت":131922,"ложение":131923,"ĠاÙĦÙħص":131924,"ĠاÙĦÙħصرÙĬ":131925,"à¸Īริà¸ĩà¹Ĩ":131926,"ĠاÙĦشرÙĥØ©":131927,"ĠÄijá»ı":131928,"ãĥĽãĥĨ":131929,"ãĥĽãĥĨãĥ«":131930,"Ñįкон":131931,"Ñįконом":131932,"ĠÙĪØ¹ÙĨ":131933,"Ġ×ª×ł":131934,"Ġ×ª×ł×IJ×Ļ":131935,"ĠاÙĦدÙĪÙĦÙĬØ©":131936,"Ġì§ĢìĹŃ":131937,"ãģ§ãģĻãģĭ":131938,"ĠваÑĢи":131939,"ĠваÑĢианÑĤ":131940,"ĠاÙĦعرب":131941,"ела":131942,"ĠtÆ°á»Ľng":131943,"skÄħ":131944,"Ġmặc":131945,"สัà¸ģ":131946,"ãĥĵãĥ¼":131947,"Ġ×ij×Ĵ׾":131948,"Ġ×ij×Ĵ׾׾":131949,"ãĥķãĤ¡ãĥ³":131950,"×ij×Ļצ":131951,"×ij×Ļצ×ķ×¢":131952,"лиÑģÑĤ":131953,"à¸Łà¸¸":131954,"à¸Łà¸¸à¸ķ":131955,"à¸Łà¸¸à¸ķà¸ļà¸Ńล":131956,"à¸Ŀà¹Īาย":131957,"ìŀIJìĿĺ":131958,"ĠسÙĪÙģ":131959,"Ġש×Ķת":131960,"Ġ걸":131961,"×¢×ij×ķ×ĵ":131962,"ãģĻãĤĭãģĵãģ¨ãģĮ":131963,"ĠÑĩаÑģÑĤÑĮ":131964,"ãĤ¢ãĥ¡ãĥª":131965,"ãĤ¢ãĥ¡ãĥªãĤ«":131966,"Ġtakım":131967,"ĠsỼ":131968,"ĠsỼm":131969,"שר×Ķ":131970,"è¨ĢãģĨ":131971,"лан":131972,"커":131973,"׼׳×Ķ":131974,"ÙĪÙģÙĬ":131975,"íĹĪ":131976,"luÄŁu":131977,"ĠëĮĢíķ´":131978,"Ġ׾×ij×Ļת":131979,"Ġ×Ķר×IJש×ķ׳×Ķ":131980,"صÙħ":131981,"Ġsöyled":131982,"Ġsöyledi":131983,"à¸Ľà¸²à¸ģ":131984,"Ġardından":131985,"ãģĪãģŁ":131986,"à¸Ĺัà¹Īวà¹Ħà¸Ľ":131987,"Ġ׳×ķסף":131988,"болÑĮ":131989,"ãĤĵãģ§ãģĻãģijãģ©":131990,"ĠлиÑĪÑĮ":131991,"Ġ×ij×IJ×Ļ":131992,"ĠбÑĭÑģÑĤÑĢо":131993,"สัà¸Ļ":131994,"Ġ×ijפ׳×Ļ":131995,"леÑĩ":131996,"ĠاÙĦخبر":131997,"Ġsóc":131998,"Ġthú":131999,"ĠпÑıÑĤ":132000,"ãģĬé¡ĺ":132001,"ãģĬé¡ĺãģĦ":132002,"ÑĤин":132003,"ãģ«ãģ¤ãģĦãģ¦ãģ¯":132004,"פף":132005,"ĠдвÑĥÑħ":132006,"à¸įีà¹Ī":132007,"à¸įีà¹Īà¸Ľ":132008,"à¸įีà¹Īà¸Ľà¸¸":132009,"à¸įีà¹Īà¸Ľà¸¸à¹Īà¸Ļ":132010,"опеÑĢ":132011,"ĠاÙĦبشر":132012,"ĠاÙĦÙħاÙĦ":132013,"ıyoruz":132014,"تØŃÙħÙĬÙĦ":132015,"à¸ģะ":132016,"éĸĵãģ«":132017,"×Ĺ×ķש":132018,"ĠNguyên":132019,"ãģĦãģ¦ãģĦãĤĭ":132020,"дÑĥÑĪ":132021,"שפע":132022,"ÑĪÑĥ":132023,"å®ŁéļĽãģ«":132024,"ĠÑĢайон":132025,"ĠChá»ī":132026,"ÙĨصر":132027,"Ġìļ´":132028,"Ġìļ´ìĺģ":132029,"Ġ×Ķ×ĵ×Ļף":132030,"ØŃدد":132031,"رز":132032,"ĠاÙĦدÙħ":132033,"ĠPháp":132034,"ÑĤÑģÑı":132035,"è¦ĭãģĪ":132036,"Ġtiá»ĥu":132037,"Ġsá»Ńa":132038,"аÑİÑĤÑģÑı":132039,"ĠBá":132040,"Ġ×ķ׼׾":132041,"Ðĸ":132042,"ÑĪим":132043,"ìĿ´ëĬĶ":132044,"лев":132045,"dık":132046,"Ġprésente":132047,"Ġaraç":132048,"صدÙĤ":132049,"Ġпомог":132050,"ĠاÙĦشرÙĤ":132051,"ĠÙĪØ§ÙĦذÙĬ":132052,"رÙĬا":132053,"×ij׳×ķת":132054,"Ġngá»ĵi":132055,"ר×ķפ":132056,"ר×ķפ×IJ":132057,"Ġthấp":132058,"ãĤĦãģ¯":132059,"ãĤĦãģ¯ãĤĬ":132060,"ĠاÙĦجدÙĬدة":132061,"éĿŀ常ãģ«":132062,"ÙĬÙĦÙĬ":132063,"쪽":132064,"تعاÙħÙĦ":132065,"ãģłã썿ĢĿãģĦãģ¾ãģĻ":132066,"ÙħÙħ":132067,"иÑĤели":132068,"ãĤµãĤ¤ãĤº":132069,"ادات":132070,"ĠاÙĦÙħاÙĦÙĬØ©":132071,"Ùĥاتب":132072,"кли":132073,"веÑĢÑħ":132074,"ниÑĩ":132075,"Ġ×ľ×¢×ij×ķ×ĵ":132076,"׾×Ļ×Ķ":132077,"ØŃÙİ":132078,"ãĤ¤ãĥĻ":132079,"ãĤ¤ãĥĻãĥ³ãĥĪ":132080,"Ġת×Ĵ×ķ×ij×ķת":132081,"ÑĦон":132082,"ĠдÑĢÑĥгие":132083,"×IJ×ĸ×ķר":132084,"Ġperò":132085,"ìķŀ":132086,"åĢŁãĤĬ":132087,"רצ×Ļ":132088,"×IJ×ĸ":132089,"алÑĮнÑĭÑħ":132090,"Ġê²ĥìľ¼ë¡ľ":132091,"ĠпÑĢаво":132092,"ĠاÙĦأرض":132093,"à¹Ģà¸Ĺà¸Ħ":132094,"à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļ":132095,"à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļà¹Ĥล":132096,"à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļà¹Ĥลย":132097,"à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļà¹Ĥลยี":132098,"צר×Ļ":132099,"ĠÐļÑĥ":132100,"ılma":132101,"決ãĤģ":132102,"اÙĪ":132103,"Ġ×ĵ×§×ķת":132104,"à¸Ħรู":132105,"ĠÙħستÙĪÙī":132106,"à¸Ľà¹īà¸Ńà¸ĩ":132107,"à¸Ľà¹īà¸Ńà¸ĩà¸ģัà¸Ļ":132108,"×ĵ×ķ×ŀ×Ķ":132109,"ĠÑģегоднÑı":132110,"سÙĪÙĤ":132111,"ר×Ĺ×ķ×ij":132112,"Ġإدارة":132113,"Ñħож":132114,"éģİãģİ":132115,"à¸Ħà¸Ń":132116,"нÑĥл":132117,"×ķ׼×Ķ":132118,"ÙĪØ§ÙģÙĤ":132119,"׼׾׾":132120,"Ġ×Ķ×ĵ×ķ":132121,"ĠlÄ©nh":132122,"Ġkhảo":132123,"×IJ×ŀצע":132124,"머":132125,"Ġ׼×Ļצ":132126,"Ġ׼×Ļצ×ĵ":132127,"ĠдолжнÑĭ":132128,"หวัà¸ĩ":132129,"ãĥĩãĤ¶":132130,"ãĥĩãĤ¶ãĤ¤ãĥ³":132131,"Ġngá»Ŀ":132132,"ä¸Ńãģ«":132133,"à¸ģลัà¸ļมา":132134,"جÙħاÙĦ":132135,"à¸Ķัà¸ĩà¸ģลà¹Īาว":132136,"سÙĥÙĨ":132137,"سÙĨ":132138,"Ġözellikle":132139,"зеÑĢ":132140,"rzÄĻ":132141,"×ŀ×ķר×Ķ":132142,"Ġlạ":132143,"×ŀ×Ļ׳×Ļ":132144,"ר×Ļת":132145,"ãģĿãĤĮãģĮ":132146,"ãģĭãĤĮ":132147,"ĠÙĬÙħÙĥÙĨÙĥ":132148,"öffentlich":132149,"ган":132150,"ĠاÙĦØŃÙĦ":132151,"ĠmiÄĻdzy":132152,"ĠÑĩаÑģÑĤи":132153,"ujÄħcy":132154,"ĠbaÄŁlı":132155,"ĠiliÅŁki":132156,"ÙģØ§Ø¡":132157,"ãĥªãĥ³ãĤ°":132158,"Ġhãng":132159,"ĠконÑĤÑĢ":132160,"ĠконÑĤÑĢол":132161,"коп":132162,"ש×Ļ×¢":132163,"ש×Ļ×¢×ķר":132164,"ĠÐĴаÑĪ":132165,"Ġ×Ķתק":132166,"ÙħÙĨع":132167,"ĠpolÃŃtico":132168,"Ġголов":132169,"ĠØ¥ÙĬ":132170,"Ø¥ÙĨتاج":132171,"à¸ļิ":132172,"ĠговоÑĢ":132173,"ĠговоÑĢиÑĤ":132174,"Ġphá»ķ":132175,"ĠÑģемÑĮ":132176,"ãģ¯ãģĤãĤĬãģ¾ãģĽãĤĵ":132177,"ĠÙĪØ§Ø³Øª":132178,"×ŀשפ×ĺ":132179,"зем":132180,"×ŀ×ĵ×ijר":132181,"Ġíģ°":132182,"ĠìĿ´ë²Ī":132183,"ê°ĢëĬĶ":132184,"Ġì§ĢìĽIJ":132185,"ĠcaÅĤy":132186,"ĠgeliÅŁtir":132187,"Ñģкое":132188,"posé":132189,"Ġkhô":132190,"à¸ķิà¸Ķà¸ķาม":132191,"missão":132192,"Ġ׾×ŀר":132193,"Ġ׾×ŀר×ķת":132194,"Ġbó":132195,"à¸ķรวà¸Īสà¸Ńà¸ļ":132196,"Ġnghá»ģ":132197,"Ġбиз":132198,"ĠбизнеÑģ":132199,"ÑģÑĤеÑĢ":132200,"ÙĪÙİ":132201,"楽ãģĹãģ":132202,"楽ãģĹãģ¿":132203,"ãģĵãĤĮãģĭãĤī":132204,"wiÄħzan":132205,"สà¸Ńà¸Ļ":132206,"ÙħÙĪØ±":132207,"׳×ĵ׾":132208,"Ġ×Ķ×IJ×ĵ×Ŀ":132209,"Ġмолод":132210,"ØŃÙħا":132211,"ØŃÙħاÙĬØ©":132212,"ÑģÑĤÑĢан":132213,"Ġbuá»ķi":132214,"ת×Ļ×Ļ×Ŀ":132215,"abileceÄŁi":132216,"Lİ":132217,"à¹Ģยà¸Ńะ":132218,"à¸Īร":132219,"سÙĥاÙĨ":132220,"à¸Ļัà¸Ķ":132221,"Ġmấy":132222,"ĠÐijа":132223,"sÅĤaw":132224,"ĠÙģÙĦا":132225,"ĠкоÑĤоÑĢой":132226,"ĠплоÑī":132227,"ĠплоÑīад":132228,"ãĤĤãģĤãĤĬ":132229,"szczÄĻ":132230,"×Ļפ×ķ":132231,"ש×ŀת":132232,"owaÅĤa":132233,"Ġnông":132234,"צ×ij×IJ":132235,"ĠìŀĪìĹĪ":132236,"ãģ¾ãģ¨":132237,"ãģ¾ãģ¨ãĤģ":132238,"ÙĤÙĪØ§Øª":132239,"ãģ¿ãĤĵãģª":132240,"Ġ׼×ŀ×¢×ĺ":132241,"Ġxúc":132242,"ï¼Ĩ":132243,"rÄĻ":132244,"rÄĻcz":132245,"×ĵ×ŀ×Ļ":132246,"ĠtáºŃn":132247,"à¸Ķวà¸ĩ":132248,"ê²½ìłľ":132249,"пÑĥÑĤ":132250,"أربع":132251,"Ġ×ŀשת×ŀש":132252,"ãĤ¿ãĤ¤ãĥĹ":132253,"Ġìłľê°Ģ":132254,"Ġ׾׼ף":132255,"ĠобÑĢазом":132256,"ÙĬÙĥا":132257,"wÅĤ":132258,"wÅĤasn":132259,"ĠاÙĦÙĪØ·ÙĨÙĬØ©":132260,"بÙĬب":132261,"×ŀ׾×Ļ":132262,"кÑĢаÑĤ":132263,"기ìĹIJ":132264,"ÙĤاد":132265,"ĠÙĦدÙī":132266,"à¸Ħวามรูà¹ī":132267,"×ŀ×ĵ×Ļ׳×Ļ×ķת":132268,"겨":132269,"ĠíĺĦìŀ¬":132270,"שת×Ļ":132271,"мол":132272,"Ġmái":132273,"à¸ŀิม":132274,"à¸ŀิมà¸ŀ":132275,"à¸ŀิมà¸ŀà¹Į":132276,"หลวà¸ĩ":132277,"Ġxuyên":132278,"×Ĺסר":132279,"رÙĪÙĨ":132280,"ãģĿãģĨãģĦãģĨ":132281,"ãģĿãĤĮãģŀ":132282,"ãģĿãĤĮãģŀãĤĮ":132283,"Ġ׼ש×Ķ":132284,"ÐŁÑĢав":132285,"×ŀ×ijצע":132286,"عرب":132287,"Ġbüyü":132288,"פ×Ļת×ķ×Ĺ":132289,"à¸Īà¸ļ":132290,"ĠØ£Ùĥبر":132291,"שרת":132292,"×ŀ׼ש×Ļר":132293,"ĠÙĪÙħع":132294,"ãģ®ãģŁãĤģãģ«":132295,"à¸Ļัà¸ļ":132296,"ì°°":132297,"ãĥªãĥķãĤ©":132298,"ãĥªãĥķãĤ©ãĥ¼ãĥł":132299,"Ġcưá»Ŀng":132300,"ĠìłĢíĿ¬":132301,"ÙħÙĨظÙħØ©":132302,"Ġhiçbir":132303,"ãģ§ãģ¯ãģĤãĤĬãģ¾ãģĽãĤĵ":132304,"รà¸Ńย":132305,"ëIJľëĭ¤":132306,"ãģĻãģIJãģ«":132307,"кла":132308,"Ġürünler":132309,"Ġkiá»ĥu":132310,"ĠëĤĺëĬĶ":132311,"ÑĤки":132312,"Ñģим":132313,"Ġchá»īnh":132314,"ãĤĤãģªãģĦ":132315,"ศรี":132316,"æĽ¿ãģĪ":132317,"taÅŁ":132318,"ĠبÙĥÙĦ":132319,"Ġ×ķ×Ļש":132320,"visão":132321,"ä¼Ŀ":132322,"ä¼ĿãģĪ":132323,"ÙĦد":132324,"׾×Ļ×ŀ":132325,"׾×Ļ×ŀ×ķ×ĵ":132326,"tória":132327,"دÙij":132328,"اÙħر":132329,"Ġê·¸ëłĩê²Į":132330,"ĠmateriaÅĤ":132331,"à¸Ĺรา":132332,"à¸Ĺราà¸ļ":132333,"ã쮿ĸ¹ãģĮ":132334,"ãģ¦ãģįãģŁ":132335,"ضغ":132336,"ضغط":132337,"ĠÙĬعÙĨÙĬ":132338,"ело":132339,"×IJ×Ķ×ij×Ķ":132340,"×¢×ŀ":132341,"ÅŁÄ±k":132342,"ìŀIJëĬĶ":132343,"ãĤ¿ãĥ³":132344,"ĠbáºŃt":132345,"×ŀשפ×Ĺ×Ķ":132346,"кÑĢи":132347,"бли":132348,"สัà¸ķ":132349,"สัà¸ķวà¹Į":132350,"ĠسÙĨÙĪØ§Øª":132351,"ĠPhương":132352,"ãģ¦ãģĹãģ¾ãģ£ãģŁ":132353,"ãģªãģľ":132354,"Ġ×ij×IJ×ķ":132355,"Ġcán":132356,"سجÙĦ":132357,"Ġlẽ":132358,"ãĤ±ãĥ¼ãĤ¹":132359,"Ġ×§×Ļ×ij׾":132360,"à¸ļà¸Ĺà¸Ħวาม":132361,"Ġ×ķ׼ף":132362,"ĠпÑĢедÑģÑĤавлен":132363,"Ġná»iji":132364,"Ġcomentário":132365,"ением":132366,"Ġtá»ı":132367,"lÃł":132368,"Ġש×Ķ×Ļ×Ķ":132369,"Ñģлав":132370,"ĠاÙĦÙĪÙĦا":132371,"ĠاÙĦÙĪÙĦاÙĬات":132372,"ÙĦجÙĨØ©":132373,"×§×ķר×IJ":132374,"бÑĭÑĤ":132375,"Ġì¦":132376,"Ġì¦ī":132377,"ãģ§ãģĻãģĹ":132378,"หรืà¸Ńà¹Ħมà¹Ī":132379,"заÑīиÑĤ":132380,"ÙģÙĦسطÙĬÙĨ":132381,"Ġmiá»ħn":132382,"à¹Ģยà¹ĩà¸Ļ":132383,"ĠçalÄ±ÅŁan":132384,"×Ļ×Ĵ×Ķ":132385,"ĠEÄŁ":132386,"ĠEÄŁitim":132387,"ãĥĥãĤ·ãĥ¥":132388,"ĠопÑĭ":132389,"ĠопÑĭÑĤ":132390,"رغ":132391,"رغب":132392,"ĠÑģвоиÑħ":132393,"à¸Ľà¸£à¸°à¸ķ":132394,"à¸Ľà¸£à¸°à¸ķู":132395,"Ġ×ŀ×IJ×ĵ":132396,"׼×ķ׳×Ļ×Ŀ":132397,"à¸Ļี":132398,"ĠвÑĭÑħод":132399,"ãģ®ä¸Ńãģ«":132400,"פ׾×IJ":132401,"ĠÙĪÙĦÙĬس":132402,"פ×ķרס":132403,"פ×ķרס×Ŀ":132404,"ÙħسÙĦÙħ":132405,"Ġngôi":132406,"×ĵ×ŀ×ķת":132407,"ãĤĴ使ãģ£ãģ¦":132408,"ĠпомоÑīÑĮÑİ":132409,"أسر":132410,"блок":132411,"ÙĤÙĩ":132412,"ãģĹãģ¾ãģĦ":132413,"ãģ¨ãģĹãģŁ":132414,"ĠпеÑģ":132415,"ãĥīãĥ«":132416,"×Ĺ×Ŀ":132417,"ãģĹãģªãģĮãĤī":132418,"ĠÐŁÑĢед":132419,"ãĥģãĤ§ãĥĥãĤ¯":132420,"å¼·ãģĦ":132421,"ש×Ļר×ķת":132422,"даеÑĤ":132423,"×Ļ×ij×ķ":132424,"Ġgenç":132425,"илаÑģ":132426,"илаÑģÑĮ":132427,"ĠبÙĦد":132428,"æĤª":132429,"æĤªãģĦ":132430,"Ġ×ŀשת":132431,"æ§ĺãĢħ":132432,"æ§ĺãĢħãģª":132433,"à¸ĺรรมà¸Ĭาà¸ķิ":132434,"ĠÙĥاÙħÙĦ":132435,"ĠاÙĦسÙħ":132436,"×ij×ĺ×Ļ×Ĺ":132437,"cá":132438,"gência":132439,"ãĤ¹ãĤ¿ãĥ¼":132440,"à¸Ĺำà¸ģาร":132441,"×Ļ×ľ×ª":132442,"Ġ×Ļ×ķצ×IJ":132443,"wój":132444,"à¸ļุà¸Ħ":132445,"à¸ļุà¸Ħà¸Ħล":132446,"عتÙħ":132447,"عتÙħد":132448,"ãģĿãĤĮãģ«":132449,"ĠاÙĦتارÙĬØ®":132450,"ÙĤراء":132451,"Ġyönetim":132452,"קשר":132453,"ĠÑģпоÑĢÑĤ":132454,"Ġר×IJש×ķף":132455,"Ġseñal":132456,"Ġchắn":132457,"çĦ¡ãģĦ":132458,"ĠдоÑģÑĤаÑĤ":132459,"ĠдоÑģÑĤаÑĤоÑĩно":132460,"Ġágua":132461,"à¸ģรà¸ĵ":132462,"à¸ģรà¸ĵี":132463,"Ġ×ŀש×ķ":132464,"Ġtrải":132465,"ë²Į":132466,"ujÄħcych":132467,"ÙģØ±Ø¯":132468,"à¹ĥà¸ģล":132469,"à¹ĥà¸ģลà¹ī":132470,"ãĤĭãģ®ãģ¯":132471,"ר×ķ×ķ×Ĺ":132472,"ÙĨÙĥ":132473,"ĠاÙĦÙĨÙĤ":132474,"ãģ®ãģ§ãģĹãĤĩãģĨ":132475,"ãģ®ãģ§ãģĹãĤĩãģĨãģĭ":132476,"ÙħعرÙģ":132477,"ÙħعرÙ쨩":132478,"ÑĥÑīе":132479,"Ġ×ij×¢×Ļקר":132480,"تصÙĦ":132481,"Ġ×Ķ×IJר":132482,"Ġ×Ķ×IJרץ":132483,"ĠÅŀi":132484,"à¸Ĥาà¸Ķ":132485,"íŀĺ":132486,"ãģªãĤĵãģ¨":132487,"ĠìĤ¬ëŀij":132488,"lÃ¼ÄŁÃ¼":132489,"باء":132490,"ĠاÙĦآخر":132491,"ĠfamÃŃlia":132492,"ĠTháng":132493,"ÑīениÑı":132494,"ãĤ¯ãĥŃ":132495,"ĠThứ":132496,"æĽ¸ãģį":132497,"енной":132498,"ìŀ¡":132499,"благ":132500,"благо":132501,"пов":132502,"à¹ģว":132503,"à¸ĩà¸Ħà¹Į":132504,"à¸Ńัà¸Ļà¸Ķัà¸ļ":132505,"ãģĤãģĴ":132506,"รà¹īาย":132507,"ünün":132508,"Ġ×Ļ׼×ķ׾×Ķ":132509,"зон":132510,"ĠÐľÐ¸":132511,"маÑĤеÑĢиал":132512,"Ġë³´ë©´":132513,"ØŃÙ쨏":132514,"êÌģ":132515,"ãģ«ãģĻãĤĭ":132516,"Ġת×IJ":132517,"Ġ×Ķס×ķ":132518,"ĠÑģÑĤоÑĢ":132519,"ĠÑģÑĤоÑĢон":132520,"ãĥĪãĥĥãĥĹ":132521,"ÅĤoÅĽÄĩ":132522,"ëħ¼":132523,"ëĵĿ":132524,"ĠÙĪØ§ÙĦع":132525,"ì¶Ķ":132526,"Ġ×Ļצ×IJ":132527,"ĠÑĢаздел":132528,"алÑĮнаÑı":132529,"×IJ׳ש×Ļ":132530,"spoÅĤ":132531,"spoÅĤec":132532,"spoÅĤeczn":132533,"إعÙĦ":132534,"إعÙĦاÙĨ":132535,"ÙĤÙĪÙī":132536,"íķĺë©´ìĦľ":132537,"تطÙĪØ±":132538,"Ġsiêu":132539,"Ỽt":132540,"дви":132541,"движ":132542,"Ġquần":132543,"kıl":132544,"ĠпÑĢизна":132545,"ĠHã":132546,"ĠHãy":132547,"ĠباÙĦت":132548,"manın":132549,"ãĤ«ãĥ«":132550,"Ġká»·":132551,"ק׾×Ļ":132552,"ëIJĺì§Ģ":132553,"تعÙĦÙħ":132554,"ìĭľìĦ¤":132555,"ìĭ¶":132556,"íĺ¼":132557,"ÙĥÙĬÙģ":132558,"売ãĤĬ":132559,"วิà¸Ĭา":132560,"бал":132561,"ĠØ£ØŃ":132562,"Ġдолжен":132563,"ราà¸ĩ":132564,"ราà¸ĩวั":132565,"ราà¸ĩวัล":132566,"Ùħاء":132567,"جار":132568,"Åļ":132569,"Ġ×ŀ×IJ×ĸ":132570,"ר×ŀ×Ķ":132571,"ãģĭãĤĤãģĹãĤĮãģªãģĦ":132572,"étude":132573,"czÄħc":132574,"Ġgór":132575,"×ł×¡×Ķ":132576,"ÙħÙĬد":132577,"ĠÐŁÐµÑĢе":132578,"أخر":132579,"ãģĿãģ®å¾Į":132580,"à¹Ģà¸Ķียวà¸ģัà¸Ļ":132581,"×ŀ×Ĵ×ķ":132582,"×ŀ×Ĵ×ķ×ķף":132583,"дов":132584,"masına":132585,"×¢×ł×Ķ":132586,"ãĤ±ãĥĥãĥĪ":132587,"סע":132588,"סע×Ļ×£":132589,"ĠTư":132590,"Ġtóc":132591,"íĻľëıĻ":132592,"ĠÐŀд":132593,"ĠÐŀднако":132594,"Ġdolayı":132595,"ؤÙĥد":132596,"ê³Ħíļį":132597,"׾ר":132598,"веÑĩ":132599,"Ġkhợi":132600,"Ġthá»§y":132601,"×ĵף":132602,"รà¸ģ":132603,"à¸ļัà¸ķร":132604,"à¹Ģà¸ģà¹Īา":132605,"ĠاÙĦثاÙĦ":132606,"ĠاÙĦثاÙĦØ«":132607,"Ġpodrá":132608,"ער×Ļ":132609,"ÙĨجاØŃ":132610,"Ġkhắc":132611,"측":132612,"İM":132613,"ãĤ»ãĥĥãĥĪ":132614,"żenia":132615,"Ġ׾×Ĺ×ijר":132616,"erÃł":132617,"ì´Ī":132618,"Ġküç":132619,"Ġküçük":132620,"اتÙĩÙħ":132621,"à¸ĭà¹Į":132622,"ÙħشارÙĥØ©":132623,"ĠاÙĦبط":132624,"Ġdây":132625,"еннÑĭм":132626,"à¸Ĺีà¹Īà¹Ħมà¹Ī":132627,"ÙĤÙİ":132628,"Ġvượt":132629,"Ġtrì":132630,"ĠwpÅĤyw":132631,"AÅŀ":132632,"зо":132633,"ĠاÙĦسÙĬد":132634,"à¸Ĺะà¹Ģล":132635,"ĠÑģодеÑĢжа":132636,"عطÙĬ":132637,"ĠاÙĦعÙĨ":132638,"èĢħãģĮ":132639,"à¹Ģหà¸Ļ":132640,"à¹Ģหà¸Ļืà¸Ń":132641,"ĠbÃŃ":132642,"Ġüzerinden":132643,"ĠVÅ©":132644,"Ġnuôi":132645,"ÙĨÙħ":132646,"алÑĮного":132647,"×¢×Ļף":132648,"ØŃضر":132649,"ĠоÑĤдел":132650,"ëªĩ":132651,"ìķ¡":132652,"ĠÙĦدÙĬÙĩ":132653,"ìĻľ":132654,"Ġsektör":132655,"Ġвозможно":132656,"ĠÐĶж":132657,"Ġhô":132658,"äºĭãģĮ":132659,"иÑĢование":132660,"алÑĮной":132661,"Ġ미êµŃ":132662,"رØŃÙĦ":132663,"ĠÑįкÑģ":132664,"пÑĢавлÑı":132665,"Ġnhá»Ŀ":132666,"ĠÄijẩ":132667,"ĠÄijẩy":132668,"ÙģÙĥر":132669,"ĠÙĪØ£Ø¶Ø§Ùģ":132670,"ãĥIJãĤ¹":132671,"ת×ķ׼׳×Ļת":132672,"ÑĤелей":132673,"ĠØ¥ÙĦÙĬÙĩ":132674,"ãģ¨è¨Ģãģ£ãģ¦":132675,"Ġдве":132676,"Ġchấp":132677,"ĠLö":132678,"à¸Ħลิ":132679,"à¸Ħà¸¥à¸´à¸Ľ":132680,"ĠسÙĪØ±":132681,"ĠسÙĪØ±ÙĬا":132682,"×ŀ×Ĺ×ķ":132683,"stä":132684,"доб":132685,"Ġniá»ĩm":132686,"ãģ®å¤§":132687,"פר×ķ×Ļ×§":132688,"פר×ķ×Ļ×§×ĺ":132689,"ĠChâu":132690,"Ġ×ŀ×Ķ×Ŀ":132691,"Ñģким":132692,"ĠполÑĥÑĩиÑĤÑĮ":132693,"ÙĬÙĪÙħ":132694,"Ø«ÙĪØ±":132695,"פ×ķ׾×Ļ×ĺ":132696,"פ×ķ׾×Ļ×ĺ×Ļ":132697,"ĠмеÑģÑıÑĨ":132698,"åħ¨ãģ¦":132699,"ĠاÙĦÙħجÙĦس":132700,"ĠاÙĦتاÙĦÙĬ":132701,"Ġ×Ĺר":132702,"åIJijãģij":132703,"׼×ŀ×Ķ":132704,"бед":132705,"أعض":132706,"أعضاء":132707,"ÙĪÙĦد":132708,"วà¹Īาà¸Īะ":132709,"Ġbánh":132710,"à¸Ļิย":132711,"à¸Ļิยม":132712,"à¸Ľà¸£à¸°à¸ģัà¸Ļ":132713,"ÑģÑĤавиÑĤÑĮ":132714,"à¸ŀà¸Ļัà¸Ļ":132715,"ĠÑįÑĦÑĦ":132716,"ĠÑįÑĦÑĦекÑĤив":132717,"ĠавÑĤоÑĢ":132718,"ĠÄIJÄĥng":132719,"ĠthÆ°á»Łng":132720,"ãĤĴæĦŁãģĺ":132721,"à¸ģัà¸ļà¸ģาร":132722,"å¾Įãģ«":132723,"ĠyaÄŁ":132724,"ستاÙĨ":132725,"Ġliá»ģn":132726,"ãģĦãģ¾":132727,"iêu":132728,"à¹Ĥà¸Ķà¸Ļ":132729,"ĠÙĦذÙĦÙĥ":132730,"à¹Ĥรà¸ĩà¹Ģรียà¸Ļ":132731,"צ×Ļ×Ĵ":132732,"ĠاÙĦÙħعÙĦÙĪÙħات":132733,"ç§ģãģŁãģ¡":132734,"à¸Ĺีà¹Īà¸Ħุà¸ĵ":132735,"ãģ«ãģªãģ£ãģ¦ãģĦãĤĭ":132736,"×ŀ×ĵ×Ļ׳×Ķ":132737,"×¡×Ľ×Ŀ":132738,"Ġвне":132739,"à¸ŀà¸Ļัà¸ģà¸ĩาà¸Ļ":132740,"ÑĢей":132741,"à¹Ģà¸Īà¹īาหà¸Ļà¹īาà¸Ĺีà¹Ī":132742,"ĠHiá»ĩn":132743,"Ġmédico":132744,"ĠتØŃÙĤÙĬÙĤ":132745,"ÑĮÑĤе":132746,"miÅŁti":132747,"ÙĤÙĬادة":132748,"ãĤıãģĭãĤĬ":132749,"มาà¸Īาà¸ģ":132750,"ëħĢ":132751,"ãģ«éĸ¢ãģĻãĤĭ":132752,"×IJר×Ĵ×ķף":132753,"mètre":132754,"Ġעצ×ŀ×Ļ":132755,"ĠChúa":132756,"รูà¹īà¸Ī":132757,"รูà¹īà¸Īัà¸ģ":132758,"ì£Ħ":132759,"ëĭµ":132760,"à¹ģà¸Ĺà¹ī":132761,"Ġgeçen":132762,"Ġlança":132763,"ĠاÙĦبØŃØ«":132764,"×ĵ×ŀ×ķ":132765,"ãģ¯ãģĺ":132766,"ãģ¯ãģĺãĤģ":132767,"ĠdönÃ¼ÅŁ":132768,"è¿ijãģı":132769,"à¹Ģสม":132770,"à¹Ģสมà¸Ń":132771,"ëĿ½":132772,"Ġüç":132773,"á»ŀ":132774,"ÑĪаÑı":132775,"à¸Ĺร":132776,"ØŃÙĤÙĬÙĤØ©":132777,"à¸Ĥà¸Ńà¸ĩà¸ģาร":132778,"Ġ무ìĹĩ":132779,"Ġ×Ķ׼ר":132780,"ĠاÙĦصÙĬÙĨ":132781,"ĠлÑİди":132782,"à¸ķาย":132783,"بÙĪÙĦ":132784,"Ġviêm":132785,"Ġthiá»ĩu":132786,"à¸ģà¸Ķ":132787,"Ġ׾×ĵ×ijר":132788,"פ׳×Ķ":132789,"×IJר×ij×¢":132790,"سÙī":132791,"ĠاÙĦسÙĬاس":132792,"ĠاÙĦسÙĬاسÙĬØ©":132793,"ydı":132794,"ÙĪØŃØ¯Ø©":132795,"ĠдеÑıÑĤелÑĮноÑģÑĤи":132796,"Ġ×ķ×Ķ×ŀ":132797,"пеÑĩ":132798,"пеÑĩаÑĤ":132799,"иÑĢованиÑı":132800,"ĠÑģог":132801,"ĠÑģоглаÑģ":132802,"Ġ׼×ĵ":132803,"Ġ׼×ĵ×IJ×Ļ":132804,"ĠиÑģполÑĮзоваÑĤÑĮ":132805,"ספ×ķר×ĺ":132806,"Ġilçe":132807,"expérience":132808,"ĠThá»Ŀi":132809,"İK":132810,"à¹Ħà¸Łà¸Łà¹īา":132811,"ëĵ¤ìĹIJê²Į":132812,"à¸Ľà¸£à¸°à¹Ģà¸ł":132813,"à¸Ľà¸£à¸°à¹Ģà¸łà¸Ĺ":132814,"Ġmümk":132815,"Ġmümkün":132816,"Ġ×IJ×ķ×ª×ł×ķ":132817,"ìĦ±ìĿĦ":132818,"ĠìĿ´ìľł":132819,"زÙĬارة":132820,"Ġoldukça":132821,"rób":132822,"ĠØ£ÙĨا":132823,"Ġ×Ķ×ij×Ļ":132824,"Ñģен":132825,"×¢×Ļקר":132826,"×Ļ×ĵ×ķ×¢":132827,"dzÄħ":132828,"ÙħعÙĦÙĪÙħات":132829,"شاب":132830,"Ġparça":132831,"à¸Ļะà¸Ħะ":132832,"باس":132833,"ĠÑĤоÑĢг":132834,"ĠÑĤоÑĢгов":132835,"Ġ×Ĺ×ĵר":132836,"׼ר×ĺ":132837,"׼ר×ĺ×Ļס":132838,"ĠAyrıca":132839,"ệ":132840,"ìľ¨":132841,"ĠÑĤакие":132842,"Ġ×ŀצ×ķ×Ļ":132843,"ãĥ©ãĥ³ãĤŃãĥ³ãĤ°":132844,"ש×Ļ×ķ×ķ×§":132845,"åīįãģ®":132846,"ĠBảo":132847,"ÑīÑĥ":132848,"æĹ©ãģı":132849,"ĠPhòng":132850,"à¸ŀระราà¸Ĭ":132851,"פ×Ĺ×ķת":132852,"Ġгл":132853,"Ġглаз":132854,"à¸Ĺà¹Īา":132855,"Ġdạy":132856,"ÑĢоÑģÑĤ":132857,"à¹Ĥà¸Ķยà¹Ģà¸īà¸ŀาะ":132858,"ĠquáºŃn":132859,"Ġ×Ĺ×ijר×ķת":132860,"même":132861,"mÄ±ÅŁtı":132862,"ĠاÙĦتداÙĪÙĦ":132863,"Ġnạn":132864,"Ġ×Ķ×ĵ×Ļ":132865,"ĠاÙĦطرÙĬÙĤ":132866,"×Ĵ×ķת":132867,"Ġ×Ķ×ĵר×ļ":132868,"ujÄħce":132869,"Ġchữ":132870,"ãĤĤãģ®ãģ®":132871,"ë°Ľ":132872,"ãģķãĤĵãģ¯":132873,"Ġyardım":132874,"ĠاÙĦعÙħ":132875,"Ġì§Ħíĸī":132876,"Ġ×Ļ×Ĺ":132877,"Ġ×Ļ×Ĺס×Ļ":132878,"ĠاÙĦÙħدÙĬÙĨØ©":132879,"Ġcú":132880,"à¸ģีฬ":132881,"à¸ģีฬา":132882,"Ġniên":132883,"misión":132884,"׳×Ļס×Ļ":132885,"׳×Ļס×Ļ×ķף":132886,"ĠвозÑĢаÑģÑĤ":132887,"Ġ×¢×ķש×Ķ":132888,"ĠÙħدÙĬر":132889,"ÑıÑģÑĮ":132890,"ØŃجÙħ":132891,"íĻĺê²½":132892,"ĠاÙĦأخرÙī":132893,"uÃŁer":132894,"ĠاÙĦعاÙĦÙħÙĬØ©":132895,"ĠNgá»įc":132896,"êµIJíļĮ":132897,"ä¸Ĭãģ§":132898,"×Ļ×Ķ×ķ×ĵ":132899,"×Ļ×Ķ×ķ×ĵ×Ļ×Ŀ":132900,"Ùħساعدة":132901,"ĠжизнÑĮ":132902,"ĠпоÑĤомÑĥ":132903,"ĠاÙĦÙħÙħÙĦ":132904,"ĠاÙĦÙħÙħÙĦÙĥØ©":132905,"ĠGör":132906,"رÙIJ":132907,"×ŀ×§×ķ×ŀ×ķת":132908,"åĩºæĿ¥ãĤĭ":132909,"ÑĦÑĤ":132910,"ĠìĿ´ìłľ":132911,"ĠÑĢем":132912,"ĠÑĢемонÑĤ":132913,"ת×ķ×ļ":132914,"æĻĤãģ¯":132915,"ãĤīãĤĮãģªãģĦ":132916,"altı":132917,"å®¶ãģ®":132918,"ĠاÙĦإعÙĦاÙħ":132919,"리ëĬĶ":132920,"ãģĭãĤīãģ¯":132921,"ĠHạ":132922,"ãģĤãģ®":132923,"×ĵ×Ļ×ķף":132924,"رÙĬس":132925,"ĠsocietÃł":132926,"ĠاÙĦÙĥبÙĬر":132927,"Ġ×ij×ŀס":132928,"Ġ×ij×ŀס×Ĵר":132929,"Ġ×ij×ŀס×Ĵרת":132930,"ĠìŀĪìľ¼ë©°":132931,"Ġnặng":132932,"ÙĩÙī":132933,"ĠBÃł":132934,"×ŀר×ķ":132935,"ĠjÄĻ":132936,"ĠjÄĻzy":132937,"ĠjÄĻzyk":132938,"Ġ׼×ŀ×ķ×ijף":132939,"×¢×ľ×Ķ":132940,"à¸Ĺีà¹Īà¹Ħà¸Ķà¹ī":132941,"ãģ¾ãģĹãĤĩãģĨ":132942,"×ŀספר":132943,"ТÐŀ":132944,"سÙĬاسة":132945,"ĠкаждÑĭй":132946,"ë²ł":132947,"tım":132948,"yá»ĩn":132949,"รีà¹Ī":132950,"ĠдеÑĤÑģк":132951,"วิà¸ĺีà¸ģาร":132952,"mówi":132953,"×ĺ×¢×Ŀ":132954,"×Ķצ׾×Ĺ×Ķ":132955,"ضÙĬÙģ":132956,"ĠÑħоÑĤÑı":132957,"ãĤĵãģ§ãģĦãĤĭ":132958,"à¸Ħาà¸Ķ":132959,"à¸Ħรà¸ļ":132960,"ĠкÑĥÑĢÑģ":132961,"ĠbaÅŁarı":132962,"×ijר×ķ":132963,"ÙĬعة":132964,"ĠÐĿÑĥ":132965,"à¸Ħวามà¹Ģà¸Ľà¹ĩà¸Ļ":132966,"Ġ׾×ŀש׾":132967,"Ġì¢ĭìĿĢ":132968,"Ùħؤسس":132969,"Ùħؤسسات":132970,"Ġprécis":132971,"Ġthảo":132972,"à¸ģà¹ĩà¸Ħืà¸Ń":132973,"Ġש׼׾":132974,"führung":132975,"ãģĦãģ§":132976,"à¹ģละมี":132977,"à¸ģà¹ĩมี":132978,"Ġשש":132979,"мел":132980,"Ġкниг":132981,"ĠباÙĦÙĨ":132982,"ĠباÙĦÙĨسبة":132983,"Ġaldı":132984,"ÑĤай":132985,"Ġ×Ĺ×ĵש×Ļ×Ŀ":132986,"å®Łãģ¯":132987,"عÙĪØ§":132988,"ĠìĿĺ미":132989,"изм":132990,"ÑĢабоÑĤаÑĤÑĮ":132991,"Ù쨵":132992,"Ġ×ij׳×ķסף":132993,"ãģ¨ãģĹãģ¦ãĤĤ":132994,"à¹Ģà¸Ľà¹ĩà¸Ļà¸Ĺีà¹Ī":132995,"ĠÑģледÑĥеÑĤ":132996,"èĢĥãģĪãģ¦":132997,"Ġ׼×Ļ×ķ×Ŀ":132998,"ÑģÑĤÑĭ":132999,"׼׾׼׾×Ļ":133000,"æµģãĤĮ":133001,"ãĤĴãģ¤ãģij":133002,"ÑĩаÑĤ":133003,"×Ļ׼×ķף":133004,"×Ļר×Ļ":133005,"larıyla":133006,"ãĤ¤ãĥ¡":133007,"ãĤ¤ãĥ¡ãĥ¼ãĤ¸":133008,"׳×ĸ×§":133009,"Ġciò":133010,"Ġsın":133011,"Ġsınır":133012,"à¸Ļà¸Ħร":133013,"каÑĤ":133014,"Ġlá»Ĺi":133015,"ëŀĮ":133016,"تÙģØ§Øµ":133017,"تÙģØ§ØµÙĬÙĦ":133018,"ëĨĵ":133019,"ĠÙħض":133020,"ilmiÅŁ":133021,"بارÙĥ":133022,"ÐĿÐĺ":133023,"Ġthẩm":133024,"Ġ×IJ×ķת×ļ":133025,"ĠпÑĢиним":133026,"ĠпÑĢинима":133027,"Ġyönt":133028,"Ġyöntem":133029,"Ġ×ŀ×§×ij׾":133030,"Ġktórego":133031,"ê·Ģ":133032,"شرÙģ":133033,"داÙħ":133034,"ãģĦãĤįãģĦãĤį":133035,"ĠAlém":133036,"Ġgörü":133037,"Ġgörünt":133038,"Ġgörüntü":133039,"دس":133040,"ÑĪки":133041,"гÑĢад":133042,"Ġlạc":133043,"Ġsữa":133044,"ãĤīãĤĮãģ¾ãģĻ":133045,"oÃłi":133046,"Ñīен":133047,"ãģĭãģªãģĦ":133048,"Ġпоп":133049,"ĠпопÑĥ":133050,"ĠпопÑĥлÑıÑĢ":133051,"ĠاÙĦÙħÙĪÙĤع":133052,"räg":133053,"A":133054,"íķĦ":133055,"ãĤĴè¦ĭãĤĭ":133056,"اÙħا":133057,"ĠاÙĦØŃرب":133058,"ĠÐŁÐ°":133059,"Ġ׾×IJתר":133060,"Ġtá»ijc":133061,"×ij׾×Ķ":133062,"رئÙĬس":133063,"вÑĥ":133064,"ÙĬدÙĬ":133065,"казан":133066,"Ġ×Ĺש×ij×ķף":133067,"hôtel":133068,"×¢×ķ׳×Ķ":133069,"بÙĨÙĬ":133070,"×ŀ×ķ׾":133071,"ĠднÑı":133072,"éĽ£ãģĹãģĦ":133073,"ведениÑı":133074,"Ġ×ķ×ŀת":133075,"напÑĢимеÑĢ":133076,"ÙĤابÙĦ":133077,"Ġrésultat":133078,"ĠÑĢазвиÑĤиÑı":133079,"رÙij":133080,"ìłĦ문":133081,"ĠاÙĦÙħزÙĬد":133082,"ĠìľĦíķ´ìĦľ":133083,"ëĨį":133084,"íĻķ":133085,"ĠThiết":133086,"íĮ¨":133087,"malıdır":133088,"ĠczÅĤ":133089,"ĠczÅĤowie":133090,"ĠczÅĤowiek":133091,"ĠÙĦبÙĨ":133092,"ĠÙĦبÙĨاÙĨ":133093,"üsü":133094,"ãģªãĤĵãģł":133095,"Ġżycie":133096,"ĠÑħоÑĢоÑĪо":133097,"æĸ¹ãģ«":133098,"ëĭ¤ë©´":133099,"иÑĩеÑģкаÑı":133100,"ער×Ļ׼":133101,"ער×Ļ×Ľ×ª":133102,"ãģ¾ãģĽãĤĵãģ§ãģĹãģŁ":133103,"ĠÑģобой":133104,"Ġgá»Ĺ":133105,"ĠделаÑĤÑĮ":133106,"daÄĩ":133107,"аÑĢа":133108,"różni":133109,"à¹Ģลีà¹ī":133110,"à¹Ģลีà¹īย":133111,"à¹Ģลีà¹īยà¸ĩ":133112,"à¸Ŀาà¸ģ":133113,"ĠتÙĤ":133114,"ĠتÙĤدÙĬ":133115,"ĠتÙĤدÙĬÙħ":133116,"หà¸Ļุà¹Īม":133117,"Ġmücade":133118,"Ġmücadele":133119,"ì§Ģ를":133120,"ãĤ¤ãĤ¹":133121,"Ġأساس":133122,"jÄħcego":133123,"ĠÅŁeh":133124,"нÑĤеÑĢ":133125,"ÑĨиÑİ":133126,"ï»»":133127,"ÑİÑīего":133128,"à¹Ĥà¸Ľà¸£à¹ģ":133129,"à¹Ĥà¸Ľà¸£à¹ģà¸ģรม":133130,"ĠmieÄĩ":133131,"ØŃÙĥÙĪÙħØ©":133132,"ãģ§ãģĹãģŁãģĮ":133133,"×Ļס×Ķ":133134,"ãĤĤãģ®ãĤĴ":133135,"Ġ×ŀ×IJת":133136,"สุà¸Ķà¸Ĺà¹īาย":133137,"ĠcÅ©":133138,"ÙĨسب":133139,"ĠпÑĢоÑĩ":133140,"Ġдней":133141,"ĠÑįÑĤиÑħ":133142,"׾×ŀת":133143,"нÑıÑı":133144,"Ñįк":133145,"Ġì§ĢëĤľ":133146,"มหาวิà¸Ĺยา":133147,"มหาวิà¸Ĺยาล":133148,"มหาวิà¸Ĺยาลัย":133149,"dão":133150,"ĠMáy":133151,"ĠêµŃê°Ģ":133152,"à¸ļุรี":133153,"×Ĵ×Ļ׾":133154,"ĠÑĤÑĭÑģÑı":133155,"ĠÑĤÑĭÑģÑıÑĩ":133156,"ÙģÙĥ":133157,"ĠÐĺÑģ":133158,"è¡ĮãĤıãĤĮ":133159,"פר×ĵ":133160,"ãģ¤ãģį":133161,"à¸Ħรà¸Ńà¸ļ":133162,"à¸Ħรà¸Ńà¸ļà¸Ħรัว":133163,"à¸Ĥึà¹īà¸Ļมา":133164,"ä»ĬæĹ¥ãģ¯":133165,"ĠìĤ¬ëŀĮìĿ´":133166,"עצ×ŀ×Ķ":133167,"поÑĢ":133168,"ĠKỳ":133169,"ĠÆ¡n":133170,"ĠthÄĥm":133171,"Ù쨧ÙĤ":133172,"ãģļãģ«":133173,"Ġ׾קר":133174,"Ġ׾קר×ķ×IJ":133175,"اÙģÙĬØ©":133176,"ÙħÙİØ§":133177,"гаÑĢ":133178,"صÙĦا":133179,"صÙĦاة":133180,"Ġ×ŀ×ĸ×Ķ":133181,"lıģını":133182,"Ġ×IJ×Ļ׳×Ķ":133183,"кÑĢо":133184,"Ġngươi":133185,"Ġвним":133186,"Ġвнимание":133187,"jÄħcy":133188,"ÙĢÙĢÙĢÙĢÙĢ":133189,"ÑģÑħод":133190,"ãģªãĤĵãģĭ":133191,"×ŀ×Ļ׾":133192,"Ġ×Ķ×IJ×Ĺ":133193,"ãĤıãģªãģĦ":133194,"عسÙĥر":133195,"ĠìĦ¸ê³Ħ":133196,"ĠÑĩего":133197,"ĠÑģÑĢедÑģÑĤва":133198,"ĠÐłÐ°Ñģ":133199,"ãģªãģģ":133200,"ÙĨÙ쨳":133201,"ר×Ļ×ķף":133202,"ÑģÑĥд":133203,"ĠìĿ¸ê°Ħ":133204,"ĠاÙĦÙħÙĤبÙĦ":133205,"ÙĨعÙħ":133206,"تÙĪÙ쨱":133207,"ש×ij×¢":133208,"ılm":133209,"ılmÄ±ÅŁ":133210,"Ġ×ľ×ª×ª":133211,"تصÙģ":133212,"×Ķפ×ķ×ļ":133213,"à¹ĥà¸Ļà¸Ľà¸µ":133214,"ìĿ´ê³ł":133215,"ÙģÙĪØ²":133216,"à¸ľà¸¥à¸ĩาà¸Ļ":133217,"ĠGiáo":133218,"à¸ļà¸Ńà¸ģวà¹Īา":133219,"ĠdÄ±ÅŁ":133220,"ĠdÄ±ÅŁÄ±nda":133221,"죽":133222,"ĠdzieÅĦ":133223,"кÑĨии":133224,"иÑĨе":133225,"ãģ®ä¸Ģ":133226,"عش":133227,"пÑĢеÑģÑģ":133228,"หà¸Ļà¹Īà¸Ńย":133229,"ลัà¸ģษà¸ĵะ":133230,"ĠpossibilitÃł":133231,"à¹Ħà¸Ķà¹īรัà¸ļà¸ģาร":133232,"หยุà¸Ķ":133233,"Ġphiên":133234,"çĶŁãģ¾ãĤĮ":133235,"Ø·ÙĪÙĦ":133236,"ÑĦин":133237,"für":133238,"ØŃÙĬاة":133239,"íĸĪìĬµëĭĪëĭ¤":133240,"׼׳×ķת":133241,"à¸Ľà¸£à¸°à¸ª":133242,"à¸Ľà¸£à¸°à¸ªà¸ļ":133243,"à¸Ľà¸£à¸°à¸ªà¸ļà¸ģารà¸ĵà¹Į":133244,"ëIJĺìĹĪ":133245,"Ġkażdy":133246,"Ġluyá»ĩn":133247,"ĠоÑĢганизаÑĨии":133248,"å°ijãģªãģı":133249,"ÑģÑĤÑĢоен":133250,"Ġtécnico":133251,"×§×Ķ׾":133252,"Ġ×ķ×IJ×Ĺ":133253,"ĠعÙĦÙĬÙĥ":133254,"Ñīение":133255,"Ġ×Ķ×Ļ׾×ĵ×Ļ×Ŀ":133256,"ÙĪØ³Ø§Ø¦ÙĦ":133257,"Ġ×ķ×Ķת":133258,"تÙħÙĬز":133259,"ĠÑģказал":133260,"Ġполи":133261,"Ġ×Ķ×ŀס":133262,"ÙĦÙijÙİ":133263,"Ùħؤسسة":133264,"Ġ×ŀ×Ļ×ĵ":133265,"ãģ£ãģ¡":133266,"ĠëĦĪ무":133267,"à¸ŀี":133268,"Ġtặng":133269,"Ġtấn":133270,"רש×Ŀ":133271,"Ġmédica":133272,"Ġ×¢×ķ×ŀ":133273,"Ġ×¢×ķ×ŀ×ĵ":133274,"ÑĦоÑĢ":133275,"Ùħرة":133276,"Ġvatanda":133277,"ĠvatandaÅŁ":133278,"Ġдело":133279,"à¸Ļม":133280,"ãģ¨åIJĮãģĺ":133281,"ÙģÙī":133282,"ÑģоÑĢ":133283,"Ġ×Ķסר×ĺ":133284,"Ġépoca":133285,"ìłķì±ħ":133286,"ĠÑģвÑıзан":133287,"ضرب":133288,"ĠÙĦÙĨا":133289,"Ġużywa":133290,"ĠاÙĦجÙĬØ´":133291,"ÑİÑĢ":133292,"×ijס×ķ×£":133293,"ĠмÑĥ":133294,"ĠмÑĥзÑĭк":133295,"bilité":133296,"Ġmaç":133297,"سÙİ":133298,"تÙĦÙĥ":133299,"ãģ¬":133300,"ÙĬÙĦا":133301,"ÑĪла":133302,"ÙĢÙĢÙĢ":133303,"Ġодной":133304,"зван":133305,"ĠÑģÑĢаз":133306,"ĠÑģÑĢазÑĥ":133307,"ÙĨظÙħ":133308,"راÙĩ":133309,"ĠÙĦÙĩذا":133310,"׼×ķר":133311,"Ġ×Ķש×ij×ķ×¢":133312,"Ġ×Ķשת":133313,"ĠQuảng":133314,"ãĥ«ãĥ¼":133315,"ãģĪãģªãģĦ":133316,"×ĺ×IJ":133317,"Ġmiá»ģn":133318,"ĠPháºŃt":133319,"ĠاÙĦسÙĪÙĤ":133320,"ÄĤ":133321,"ĠاÙĦجÙħع":133322,"ĠاÙĦجÙħعة":133323,"ÑİÑīей":133324,"aÅĤem":133325,"عتÙĤد":133326,"Ø£ÙĦÙħ":133327,"Ñģке":133328,"ĠìĿ´íķ´":133329,"ÙĨسخ":133330,"è¨ĢãģĦ":133331,"добав":133332,"سبÙĤ":133333,"×¢×ķרר":133334,"ÑĤип":133335,"ãģĿãģĵãģ§":133336,"visión":133337,"عÙĪØ¯Ø©":133338,"먹":133339,"×ŀ×ĸר×Ĺ":133340,"ĠØ¥ØŃ":133341,"Ġ׾×ij×Ļף":133342,"Ġ׾צ×IJת":133343,"Ġyardı":133344,"Ġyardımc":133345,"Ġyardımcı":133346,"İZ":133347,"קפ×Ķ":133348,"tré":133349,"liÄŁini":133350,"клÑİÑĩа":133351,"Ġüretim":133352,"Ġayrı":133353,"ĠkiÅŁiler":133354,"à¸Ħà¹īà¸Ļ":133355,"à¸Ħà¹īà¸Ļหา":133356,"ĠSá»±":133357,"Ġ×Ľ×¡":133358,"Ġ×Ľ×¡×£":133359,"ĠÑĤакиÑħ":133360,"ĠXuân":133361,"Ġлег":133362,"Ġлегко":133363,"Ø«ÙĤاÙ쨩":133364,"ÐĿÐŀ":133365,"ãĤ¹ãĤ¿ãĥĥ":133366,"ãĤ¹ãĤ¿ãĥĥãĥķ":133367,"åIJĪãģĦ":133368,"Ġ×Ķש×Ļ×ŀ×ķש":133369,"manız":133370,"ĠÐĴаÑģ":133371,"gün":133372,"ìľĦìĽIJíļĮ":133373,"Ġwspóln":133374,"ĠÑģвое":133375,"íĥģ":133376,"à¹Ģà¸Ļีย":133377,"ÙĪØ¨Ø©":133378,"вÑıз":133379,"ıdır":133380,"ëIJĺìĹĪëĭ¤":133381,"ĠdeÄŁiÅŁtir":133382,"ãĤĭãģĵãģ¨ãģĮ":133383,"Ġ×Ĺ×ĵש×Ķ":133384,"ãĤīãĤĮãģ¦ãģĦãĤĭ":133385,"×Ĺ×Ļ×Ļ×ij":133386,"ĠÐļаÑĢ":133387,"׳×Ļת×ķ×Ĺ":133388,"Ġ×§×ĺף":133389,"ר×ĸ":133390,"ÙĪØº":133391,"èªŃãģ¿":133392,"ĠتÙĤÙĪÙħ":133393,"ĠÙĥاÙĦ":133394,"à¸Ŀึà¸ģ":133395,"Ġë°ľìĥĿ":133396,"ológico":133397,"راع":133398,"à¹ģà¸ģà¹īà¹Ħà¸Ĥ":133399,"ĠÑĢабоÑĤÑĥ":133400,"ÙĨÙijÙİ":133401,"à¸Ńยูà¹Īà¸Ĺีà¹Ī":133402,"ĠاÙĦثاÙĨÙĬØ©":133403,"ĠNhân":133404,"ÑħваÑĤ":133405,"öne":133406,"Ġعدة":133407,"à¹ģสà¸ĩ":133408,"ÑĤоп":133409,"пÑĥÑģка":133410,"شراء":133411,"ĠÐļом":133412,"Ġפע×ķ׾×Ķ":133413,"ìĤ¬ìĿ´":133414,"ìĤ¬ìĿ´íĬ¸":133415,"è¡Įãģ£ãģ¦":133416,"Ġ×Ķ×Ķת":133417,"ĠÑģÑĤоÑĢо":133418,"ĠÑģÑĤоÑĢонÑĭ":133419,"درس":133420,"à¸ĭู":133421,"à¸ķà¹Īำ":133422,"ĠأبÙĬ":133423,"подоб":133424,"ãģ«ãģ¦":133425,"ارتÙģØ§Ø¹":133426,"ĠÙħؤ":133427,"иков":133428,"geführt":133429,"มืà¸Ńà¸ĸืà¸Ń":133430,"ĠÙĦÙĤد":133431,"ĠØ£ÙĨÙij":133432,"سÙĬطر":133433,"ãģ¾ãģļãģ¯":133434,"ס×ĵ":133435,"ÑģколÑĮко":133436,"ãģ¿ãģŁãģĦãģª":133437,"×ĵר×Ĵ":133438,"×¢×Ļ×ĵ":133439,"à¹ĥหà¹īà¸ļริà¸ģาร":133440,"ĠÐĶи":133441,"×ij×¢×Ļ×ķת":133442,"Ġ×Ķ×Ĺ×ķ":133443,"пиÑģÑĮ":133444,"ĠاÙĦØ®ÙĦ":133445,"бав":133446,"Ġİlk":133447,"ĠاÙĦØ®Ùħ":133448,"ĠاÙĦØ®ÙħÙĬس":133449,"ĠÙĬÙĤÙĪÙħ":133450,"æĻĤãģ®":133451,"ĠsÅĤow":133452,"ĠØ£ÙĩÙħ":133453,"Ø®ÙĦÙĤ":133454,"ĠأصبØŃ":133455,"Ġchứa":133456,"Ġthác":133457,"Ù쨧ÙĦ":133458,"Ġchá»Ŀ":133459,"ĠاÙĦخار":133460,"ĠاÙĦخارج":133461,"ĠاÙĦخارجÙĬØ©":133462,"طائر":133463,"ĠtÃł":133464,"ĠtÃłu":133465,"à¸ģลà¹īà¸Ńà¸ĩ":133466,"ĠاÙĦÙħرأ":133467,"ĠاÙĦÙħرأة":133468,"åħ¨ãģı":133469,"ĠÃĸn":133470,"çļĦãģ«ãģ¯":133471,"Ġpièce":133472,"×Ĵ×Ļ×ij":133473,"ĠاÙĦÙĪØ§ÙĤع":133474,"ä»Ĭãģ®":133475,"ĠاÙĦÙħÙĤ":133476,"cznÄħ":133477,"ÙģØ¹Ø§ÙĦ":133478,"енного":133479,"ĠÑĦакÑĤ":133480,"ìĭłì²Ń":133481,"ĠÐŀни":133482,"ĠاÙĦبÙĦاد":133483,"овиÑĩ":133484,"ëıĮ":133485,"ÑĦÑĥнкÑĨи":133486,"Ġìĸ´ëĬIJ":133487,"ãĥķãĤ©ãĥ¼":133488,"dÃŃ":133489,"илоÑģÑĮ":133490,"ÙħÙī":133491,"ĠاÙĦØ£ÙħرÙĬÙĥ":133492,"ĠاÙĦØ£ÙħرÙĬÙĥÙĬØ©":133493,"×ĺ×Ļפ×ķ׾":133494,"íĶĦë¡ľê·¸":133495,"íĶĦë¡ľê·¸ëŀ¨":133496,"Ġש×ķ׳×ķת":133497,"Ø´ÙħÙĦ":133498,"ĠпаÑĢа":133499,"Ġ×Ķ×Ĺ×ķ×§":133500,"ÙĪØ²Ø§Ø±Ø©":133501,"ãģ¨ãģĻãĤĭ":133502,"Ġquảng":133503,"Ġaģır":133504,"ĠاÙĦÙĦج":133505,"ĠاÙĦÙĦجÙĨØ©":133506,"긴":133507,"ĠTân":133508,"جÙħÙĦ":133509,"дол":133510,"à¹ģà¸ŀà¸Ĺย":133511,"à¹ģà¸ŀà¸Ĺยà¹Į":133512,"Ġר×IJש×Ļ":133513,"Ñīей":133514,"Ġçevre":133515,"ĠкомплекÑģ":133516,"Ġ×ij×ŀש×ļ":133517,"Ġaltın":133518,"ĠأعÙħاÙĦ":133519,"ĠÑģвоего":133520,"ãĤĪãģĦ":133521,"×Ĺ׾×Ļ×ĺ":133522,"×ŀ×ł×¢":133523,"Ġר×ij×Ķ":133524,"ĠØ£ÙĬضاÙĭ":133525,"×ĸ׾":133526,"ĠاÙĦسÙĬاسÙĬ":133527,"æĢĿãģĨ":133528,"קרק":133529,"קרקע":133530,"ĠاÙĦÙ쨱ÙĬÙĤ":133531,"биÑĤ":133532,"ק׳×Ķ":133533,"ĠØ¥ÙĨÙĩ":133534,"ĠÐĴам":133535,"ÐłÐŀ":133536,"ãĥĪãĥª":133537,"å¿ħè¦ģãģª":133538,"Ġchâu":133539,"ç¶ļãģij":133540,"Ġçözüm":133541,"gÅĤow":133542,"عÙĤÙĦ":133543,"売ãĤĭ":133544,"iết":133545,"à¸Ĭิà¹īà¸Ļ":133546,"ĠØŃÙĤÙĪÙĤ":133547,"Ø·ÙĦع":133548,"ĠÄijen":133549,"ĠÙĥاÙ쨩":133550,"ãģ®ãģĶ":133551,"Ġë¬":133552,"Ġ물":133553,"Ġë¬¼ë¡ł":133554,"ĠرسÙĪÙĦ":133555,"зам":133556,"замен":133557,"Ġkullanıcı":133558,"×¢×ķ׾":133559,"èī²ãĢħ":133560,"ÑĪиÑĢ":133561,"Ġ×Ĺש":133562,"Ġwygl":133563,"ĠwyglÄħda":133564,"ש×Ļ×ŀ×ķש":133565,"å¿ĺãĤĮ":133566,"×¢×Ļצ×ķ×ij":133567,"ĠاÙĦسÙĪØ±ÙĬ":133568,"å°ijãģªãģĦ":133569,"ĠпоиÑģк":133570,"สำà¸Ļัà¸ģà¸ĩาà¸Ļ":133571,"Ġ×ŀצ×ĵ":133572,"ĠmÃ¼ÅŁ":133573,"ĠmÃ¼ÅŁter":133574,"ĠmÃ¼ÅŁteri":133575,"ĠÙħÙĨÙĩÙħ":133576,"à¸ķำà¹ģ":133577,"à¸ķำà¹ģหà¸Ļ":133578,"à¸ķำà¹ģหà¸Ļà¹Īà¸ĩ":133579,"ÅĽmie":133580,"Ġ×©×ł×ª":133581,"Ġ×Ķפ×Ļ":133582,"פרש":133583,"×¢×ijר×Ļת":133584,"สà¸Ļัà¸ļ":133585,"สà¸Ļัà¸ļสà¸Ļุ":133586,"สà¸Ļัà¸ļสà¸Ļุà¸Ļ":133587,"è¨Ģãģ£ãģ¦":133588,"à¸ģารà¸Īัà¸Ķ":133589,"ĠMoże":133590,"изаÑĨии":133591,"ứt":133592,"ĠÙĪØ¨Ø¹Ø¯":133593,"ĠdeÄŁild":133594,"ĠdeÄŁildir":133595,"Ġת×ŀ":133596,"Ġ×ŀ×ŀ׳×ķ":133597,"話ãĤĴ":133598,"ĠÑĨена":133599,"Ġthúc":133600,"×Ļ×ŀ×ķף":133601,"ĠBáo":133602,"ãĤĴåıĸãĤĬ":133603,"å®īãģĦ":133604,"Ġ×¢×ķש×Ļ×Ŀ":133605,"èĩªåĪĨãģĮ":133606,"lée":133607,"ãĤĭãģ®ãģ§":133608,"иÑĢÑĥеÑĤ":133609,"ãģ¦ãĤĭ":133610,"ستر":133611,"ĠاÙĦØŃÙĬ":133612,"×Ļ׾×ķת":133613,"Ġ×Ĺ×ij":133614,"ÙĤرأ":133615,"تÙħÙĥÙĨ":133616,"سائÙĦ":133617,"prüf":133618,"ãģĭãģijãģ¦":133619,"ĠÑģобÑģÑĤвенно":133620,"ĠìľĦíķĺìŬ":133621,"׾×Ļ×ĺ":133622,"ãģĮå¤ļãģı":133623,"ÙĬتÙĩا":133624,"ç«ĭãģ¦":133625,"มà¸Ńà¸ļ":133626,"ìĭľìŀ¥":133627,"оÑĢа":133628,"ĠsavaÅŁ":133629,"×ĺ×Ļ×ij×Ļ":133630,"×ij׳×ķ":133631,"Ùħاذا":133632,"기ê°Ħ":133633,"ãģªãģ©ãģ§":133634,"Ġ×ŀת×Ĺ×Ļ׾":133635,"Ġnhiá»ħ":133636,"Ġnhiá»ħm":133637,"каÑĢ":133638,"каÑĢÑĤ":133639,"Ġ׾×Ķשת×ŀש":133640,"׳×Ļ×Ĺ":133641,"ادÙĬØ©":133642,"รายà¸ĩาà¸Ļ":133643,"ĠprzykÅĤad":133644,"Ñīий":133645,"ØŃضÙĪØ±":133646,"Ġhôn":133647,"ÃĿ":133648,"ת×ķצ×IJ×ķת":133649,"رابط":133650,"Ġbếp":133651,"ĠполÑĥÑĩи":133652,"åĩºä¼ļãģĦç³»":133653,"à¸Ľà¸¥à¹Īà¸Ńย":133654,"ĠاÙĦشباب":133655,"اÙĩÙĦ":133656,"ä»Ĭãģ¾ãģ§":133657,"رجع":133658,"ãĤ¶ãĥ¼":133659,"ÙĤÙģ":133660,"ĠGroÃŁ":133661,"ĠíļĮìĽIJ":133662,"اجر":133663,"Ġ×ij×ŀקר×Ķ":133664,"Ġsegurança":133665,"fühl":133666,"ãģ¦ãģĦãģı":133667,"หมà¸Ń":133668,"ĠкоÑĤоÑĢом":133669,"ĠNÄĥm":133670,"ĠdÅĤugo":133671,"ÙħÙĨØŃ":133672,"ש×ķ×ķ×Ļ":133673,"ĠØ£ÙĬاÙħ":133674,"à¸ªà¸łà¸²à¸ŀ":133675,"rzÄħ":133676,"شرÙĥات":133677,"ãĤĴèĢĥãģĪ":133678,"даÑĢ":133679,"à¸Ľà¸£à¸°à¸Ĭุม":133680,"Ġ×ķ×IJ×ĸ":133681,"iá»ĩn":133682,"Ġtươi":133683,"ש×Ļ×Ĺ":133684,"à¸Ńà¹Īà¸Ńà¸Ļ":133685,"æĽ¸ãģĦãģ¦":133686,"Ġngữ":133687,"×ij×Ļ×ĺ×Ĺ":133688,"×ij×Ļ×ĺ×Ĺ×ķף":133689,"Ġsẵ":133690,"Ġsẵn":133691,"ì§ĢëıĦ":133692,"ĠпÑĢеп":133693,"ĠпÑĢепаÑĢаÑĤ":133694,"ĠнаÑĥÑĩ":133695,"ĠÃľnivers":133696,"ĠÃľniversites":133697,"ĠÃľniversitesi":133698,"Ġ×Ĵ×ĵ×ķ׾×Ķ":133699,"Ġ×Ķ×ł×ª":133700,"Ġ×Ķ×ł×ª×ij×¢":133701,"ãģ§ãģĤãģ£ãģŁ":133702,"ĠmiesiÄħ":133703,"ĠmiesiÄħc":133704,"гÑĢам":133705,"гÑĢамм":133706,"ĠبشأÙĨ":133707,"ĠÑħÑĢ":133708,"×§×Ļ×ĵ":133709,"×§×Ļ×ĵ×ķ×Ŀ":133710,"Ø´Ùĥر":133711,"Ġá»ķ":133712,"Ġá»ķn":133713,"ãģĮãģĤãģ£ãģ¦":133714,"ãģķãĤĮãģ¾ãģĻ":133715,"Ġ×Ĺ×ķ×ĵ":133716,"Ġ×Ĺ×ķ×ĵש×Ļ×Ŀ":133717,"ÙħÙĪØ§Ø¬Ùĩ":133718,"ÙħÙĪØ§Ø¬ÙĩØ©":133719,"أشخاص":133720,"بغ":133721,"à¹Ģรียà¸Ļรูà¹ī":133722,"ãģĹãģ¦ãģĦãģı":133723,"Ġsạn":133724,"å¿ħãģļ":133725,"׳×Ļ×Ĵ":133726,"׳×Ļ×Ĵ×ķ×ĵ":133727,"باÙĦغ":133728,"×Ĺש×ŀ":133729,"×Ĺש×ŀ׾":133730,"Ġnapraw":133731,"ĠnaprawdÄĻ":133732,"Ø´Ùĩاد":133733,"×IJ×ķ×Ķ":133734,"×IJ×ķ×Ķ×ij":133735,"иÑĨÑĭ":133736,"Ġ×Ķר׼×ij":133737,"ëŀij":133738,"Ġתע":133739,"Ġ×Ķ×Ļש":133740,"Ġ×Ķ×Ļשר×IJ":133741,"Ġ×Ķ×Ļשר×IJ׾×Ļ":133742,"Ø£ÙħÙĨ":133743,"ÑİÑīаÑı":133744,"skór":133745,"LERİ":133746,"Ġ×Ķ×IJ×Ĺר×ķף":133747,"×¢×ł×§":133748,"ĠÙĪÙĥÙĦ":133749,"ãģĵãģĵãģ§":133750,"Ġquán":133751,"liÄŁin":133752,"à¸ģà¸İหมาย":133753,"Ø·Ùħ":133754,"أجÙĩ":133755,"أجÙĩزة":133756,"ĠErdoÄŁan":133757,"ãģ§ãģĬ":133758,"ĠвÑĢа":133759,"ĠвÑĢаÑĩ":133760,"ĠPhó":133761,"à¸Ĭัà¹Īว":133762,"à¸Ĭัà¹Īวà¹Ĥม":133763,"à¸Ĭัà¹Īวà¹Ĥมà¸ĩ":133764,"Ġphúc":133765,"×Ļפ×ķת":133766,"×¢×Ļ×ķף":133767,"Ġdużo":133768,"ãĥģãĥ¼ãĥł":133769,"ĠÙĬÙİ":133770,"ĠзадаÑĩ":133771,"Ġ×Ĵ×ij×ķ×Ķ×Ķ":133772,"Ġ׼׼׾":133773,"ложен":133774,"état":133775,"ĠngÄĥn":133776,"èµ·ãģį":133777,"ĠTiến":133778,"صعب":133779,"Ġexperiência":133780,"Ø®Ùħ":133781,"à¸ģารà¸Ĺำà¸ĩาà¸Ļ":133782,"سÙĬد":133783,"ĠDá»±":133784,"ĠкоÑĤоÑĢого":133785,"ladıģı":133786,"Ġkhá»ķ":133787,"Ġê³ĦìĨį":133788,"Ñīик":133789,"สà¹Īวà¸Ļà¸ķัว":133790,"зоÑĢ":133791,"ÙĨÙı":133792,"Ġà¸Ķัà¸ĩ":133793,"Ġà¸Ķัà¸ĩà¸Ļัà¹īà¸Ļ":133794,"Ġcấu":133795,"ĠÄijá»ijc":133796,"оÑĦ":133797,"ĠاÙĦأعÙħاÙĦ":133798,"ãģªãģıãģ¦ãĤĤ":133799,"×ķ׼×Ļ×Ŀ":133800,"à¹ģà¸Ľ":133801,"ĠBên":133802,"ãĥ¯ãĥ³":133803,"Ġgiám":133804,"ĠÅŀu":133805,"Ġdáng":133806,"عÙĦÙĬ":133807,"à¹Ģà¸ģษ":133808,"à¹Ģà¸ģษà¸ķร":133809,"ÙĪØ¬Ø¨":133810,"ннÑĭе":133811,"ÙĤضاء":133812,"à¸Ħวà¸ļ":133813,"à¸Ħวà¸ļà¸Ħุ":133814,"à¸Ħวà¸ļà¸Ħุม":133815,"ãģ¤ãģ¤":133816,"ĠViá»ĩc":133817,"×ŀ×ij×ĺ":133818,"ש×Ļת×ķ×£":133819,"ĠведÑĮ":133820,"kaza":133821,"kazaÅĤ":133822,"à¸ķำรวà¸Ī":133823,"ãĤ¿ãĥ«":133824,"ĠповÑĭ":133825,"ĠповÑĭÑĪен":133826,"ĠSợ":133827,"ĠìĦ¤ëªħ":133828,"ĠÃĩünkü":133829,"ìĥĿíĻľ":133830,"Ö¾":133831,"ãĤĮãģ¦ãģĦãĤĭ":133832,"Ġ×ijר×IJש":133833,"ר×ķ×Ĵ":133834,"ĠоÑĦи":133835,"ĠоÑĦиÑĨиалÑĮн":133836,"ĠÑĥÑģÑĤанов":133837,"ĠÑĥÑģÑĤановлен":133838,"ĠاÙĦÙħصر":133839,"ĠاÙĦÙħصرÙĬØ©":133840,"ĠÐŁÐ¾ÑįÑĤомÑĥ":133841,"ÙĨصÙģ":133842,"ĠÙĪØ§ÙĦÙĨ":133843,"ĠhÃłi":133844,"à¸Ħิ":133845,"ĠAprès":133846,"ì³IJ":133847,"à¹Ģà¸ĭีย":133848,"×ĵ×ŀ×Ķ":133849,"activité":133850,"à¸Ħิà¸Ķวà¹Īา":133851,"ÑĤÑĢен":133852,"à¹Ģฮ":133853,"ãĥıãĤ¤":133854,"ãģĮå¢ĹãģĪ":133855,"еннаÑı":133856,"Ġìĺ¤ëĬĺ":133857,"ãĥ¢ãĥ³":133858,"ĠконеÑĩно":133859,"ĠÙħÙĤابÙĦ":133860,"clé":133861,"Ġhü":133862,"Ġthẳng":133863,"ìłģìĿ´":133864,"ĠÐIJлекÑģ":133865,"ĠÐIJлекÑģан":133866,"ĠÐIJлекÑģандÑĢ":133867,"ãĥŀãĥ³ãĤ·ãĥ§ãĥ³":133868,"ãģ²ãģ¨ãģ¤":133869,"ãģªãģĬ":133870,"à¹Ģà¸Īà¹īาà¸Ĥà¸Ńà¸ĩ":133871,"ëĵľë¦¬":133872,"شاء":133873,"ĠsaÄŁlık":133874,"ĠÅŁimdi":133875,"×Ļ×IJ׾":133876,"تأثÙĬر":133877,"أسب":133878,"أسباب":133879,"ĠвÑĭполнен":133880,"лок":133881,"ש×Ļ×ij×Ķ":133882,"Ġlắm":133883,"ĠTrÆ°á»Ľc":133884,"Ġ×Ķ×¢×ľ":133885,"리를":133886,"ĠÑĢеж":133887,"ĠÑĢежим":133888,"inté":133889,"intégr":133890,"×Ĵ׳×Ļ":133891,"ĠاÙĦشعر":133892,"Ġmilhões":133893,"Ġpequeño":133894,"ãĤ³ãĥ¼ãĤ¹":133895,"×ķ׼×Ĺ":133896,"à¹Ģà¸Ĭà¹īา":133897,"شرÙĤ":133898,"Ġhương":133899,"รัà¸IJà¸ļาล":133900,"à¸ģลาย":133901,"à¸ģลายà¹Ģà¸Ľà¹ĩà¸Ļ":133902,"ĠподÑħод":133903,"תש×ķ×ij×Ķ":133904,"ãģıãģªãģ£ãģ¦":133905,"ĠاÙĦØ£ÙħÙħ":133906,"ĠHá»įc":133907,"ĠwspóÅĤpr":133908,"ĠwspóÅĤprac":133909,"ÑĩÑĥв":133910,"ÑĩÑĥвÑģÑĤв":133911,"ÃŃstico":133912,"à¹Ģà¸ģาะ":133913,"ìĽĢ":133914,"Ġназад":133915,"ãĤĭãĤĪãģĨãģ«":133916,"ĠСШ":133917,"ĠСШÐIJ":133918,"мон":133919,"ĠAsÃŃ":133920,"×ķר×Ĵ":133921,"полнен":133922,"×ŀ×¡×ľ":133923,"×ŀ×¡×ľ×ķ׾":133924,"à¹Ģลืà¸Ńà¸Ķ":133925,"à¹Ģริà¹Īมà¸ķà¹īà¸Ļ":133926,"ĠاÙĦØ¥Ùħ":133927,"ĠاÙĦØ¥Ùħارات":133928,"צ×Ķר":133929,"ãĥ¡ãĥªãĥĥãĥĪ":133930,"ĠпоÑĤом":133931,"виз":133932,"ĠÙģØªØ±Ø©":133933,"å¾Įãģ®":133934,"ÐĿÐIJ":133935,"×ŀסר":133936,"ÙĬرÙĬ":133937,"pré":133938,"ĠteÅŁek":133939,"ĠteÅŁekkür":133940,"Ġödeme":133941,"داÙĨ":133942,"ãģ¾ãģĹãģ¦":133943,"缮ãģ«":133944,"ĠÑĤеÑĩение":133945,"lard":133946,"lardır":133947,"à¹Ģราà¸Īะ":133948,"ספ×Ļ":133949,"ĠÙĪÙĥذÙĦÙĥ":133950,"Ġhát":133951,"Ġtá»Ļc":133952,"à¸Ħุย":133953,"Ġbức":133954,"ØŃÙĬÙĨ":133955,"èģŀãģĦãģ¦":133956,"Ùħؤشر":133957,"ĠNhư":133958,"Ġменее":133959,"ละà¸Ħร":133960,"Ñģин":133961,"ĠÑĢек":133962,"ĠÑĢекл":133963,"ĠÑĢеклам":133964,"ĠÙģÙĩÙĪ":133965,"Ġ׾×ĸ":133966,"×Ļ׳×ķת":133967,"ĠÅŁart":133968,"ÑģÑĤавка":133969,"Ġíı¬íķ¨":133970,"ãģ«è¡Įãģı":133971,"ï¼Ŀ":133972,"ĠпозволÑıеÑĤ":133973,"Ġת×ķ׼׾×ķ":133974,"овал":133975,"صÙĦØ©":133976,"Ġ׾ש׳×ķת":133977,"ĠÐĺгÑĢ":133978,"ÙħÙĨتجات":133979,"ĠsatÄ±ÅŁ":133980,"Ñģко":133981,"ĠاÙĦØ«ÙĦاثاء":133982,"Ġ×Ķ×ĵ×ijר×Ļ×Ŀ":133983,"ãģĹãģ¾ãģĹãĤĩãģĨ":133984,"بÙĤÙī":133985,"åĬĽãĤĴ":133986,"ĠÃĩok":133987,"ãĥģãĥ¥":133988,"à¹Ģà¸Ĭืà¹īà¸Ń":133989,"ยุà¸Ħ":133990,"ศาล":133991,"Ġ×§×ķ×ĵ×Ŀ":133992,"×ĸר×Ļ×Ŀ":133993,"ãģ®åł´åIJĪ":133994,"ĠìķĬìķĺ":133995,"ãģĤãĤĬãģ¾ãģĻãģĮ":133996,"×IJשר":133997,"è¡Įãģı":133998,"ãģ»ãģĭ":133999,"æ°Ĺãģ«ãģªãĤĭ":134000,"йдеÑĤ":134001,"íķĺìĺĢëĭ¤":134002,"ستÙħرار":134003,"ĠÐŁÑĢе":134004,"ĠÑģбоÑĢ":134005,"ĠìķĦ무":134006,"ç§ģãĤĤ":134007,"عص":134008,"ĠниÑĩ":134009,"ĠниÑĩего":134010,"ĠпÑĢием":134011,"×§×ķ×ŀ":134012,"ĠìĪĺëıĦ":134013,"Ġì¡´":134014,"Ġì¡´ìŀ¬":134015,"ĠأثÙĨ":134016,"ĠأثÙĨاء":134017,"ĠÙĪØ§ÙĦØŃ":134018,"ãģĮãģ§ãģįãĤĭ":134019,"Ġת×Ķ":134020,"Ġת×Ķ×Ļ×Ķ":134021,"רף":134022,"ĠÑģвÑıзи":134023,"×Ĵשת":134024,"ÑģпекÑĤ":134025,"ס×ij×Ļ×ij":134026,"ס×ij×Ļ×ij×Ķ":134027,"ĠíķĦìļĶíķľ":134028,"تخصص":134029,"Ġжив":134030,"ĠживоÑĤ":134031,"ĠMayıs":134032,"تعا":134033,"تعاÙĪÙĨ":134034,"ĠعÙĨÙĩا":134035,"ówki":134036,"ĠاÙĦÙģÙĦسطÙĬÙĨÙĬ":134037,"ãģłãģijãģ§ãģªãģı":134038,"ìĿ¸ì§Ģ":134039,"ĠاÙĦسÙĪØ¯":134040,"ĠاÙĦسÙĪØ¯Ø§ÙĨ":134041,"إجراءات":134042,"Ġkötü":134043,"Ġ×Ļתר":134044,"×Ĵ×Ļש×Ķ":134045,"Ġצ×ķר×ļ":134046,"รà¸ĸย":134047,"รà¸ĸยà¸Ļà¸ķà¹Į":134048,"ÑħоÑĤ":134049,"ÐłÐIJ":134050,"ÙĪØ·ÙĨ":134051,"Ġsayısı":134052,"ס×Ĺר":134053,"ÙħÙĪÙĦ":134054,"ãĤĴæĮģãģ£ãģ¦":134055,"عاÙĨ":134056,"Ġtá»Ļi":134057,"ĠвÑĭÑĪе":134058,"Ġtầm":134059,"ãĥĪãĥ¬":134060,"×Ļצ×ķ":134061,"มุม":134062,"سÙĪØ¯":134063,"ìłĦìŀIJ":134064,"ãĤµãĥŃãĥ³":134065,"ìĤ°ìĹħ":134066,"ĠоÑģнован":134067,"Ø®Ù쨶":134068,"רצ×Ķ":134069,"بÙĬض":134070,"×ķÖ¹":134071,"ס×Ļ×Ļ×¢":134072,"Ġש×IJ×Ļ":134073,"ĠاÙĦÙĤرآÙĨ":134074,"ĠТакже":134075,"×ŀש×ŀ×¢×ķת":134076,"سÙĩÙĦ":134077,"Ġ×Ķ׳×Ķ":134078,"ãĤĴãģĹãģ¦ãģĦãĤĭ":134079,"×Ļ×Ļס":134080,"×Ķ×ķ×IJ":134081,"ĠBÃŃ":134082,"Ġмало":134083,"ĠëͰëĿ¼ìĦľ":134084,"Ġר×Ĺ×ij":134085,"ãģĮé«ĺãģĦ":134086,"ÙĪØ§Ø³":134087,"ìĤ¼":134088,"×ł×¢":134089,"ãģ£ãģ¡ãĤĥ":134090,"ĠTüm":134091,"à¸Ńีà¸ģà¸Ķà¹īวย":134092,"ãģĹãģ¦ãģıãģłãģķãģĦ":134093,"ÙĨشاط":134094,"ãĥĹãĥ©ãĥ³":134095,"алиÑģÑĮ":134096,"×ĵ×ľ×ª":134097,"ĠwczeÅĽ":134098,"ĠwczeÅĽniej":134099,"ĠÑįÑĤим":134100,"Ġthá»ĭt":134101,"à¸ļัà¸į":134102,"à¸ļัà¸įà¸Ĭี":134103,"ãģļãģ£ãģ¨":134104,"ÑĢин":134105,"ĠswojÄħ":134106,"íķĺëĬĶëį°":134107,"Ġë§Įëĵ¤ìĸ´":134108,"تشÙĥ":134109,"تشÙĥÙĬÙĦ":134110,"ائÙĩ":134111,"Ġ׾פ×Ĺ×ķת":134112,"ãĥĭãĥ¥":134113,"ãĥĭãĥ¥ãĥ¼ãĤ¹":134114,"׼×IJף":134115,"ãģ§ãģįãģŁ":134116,"звон":134117,"ĠstaÅĤ":134118,"×Ĺ×ijרת×Ļ":134119,"ĠأعÙĦÙĨ":134120,"à¹ģà¸ļà¸ļà¸Ļีà¹ī":134121,"بدء":134122,"ãĤģãģŁ":134123,"Ġ×ŀש×ŀ×¢×ķת":134124,"Ġ×ŀש×ŀ×¢×ķת×Ļ":134125,"örü":134126,"Ġhạnh":134127,"zähl":134128,"ĠLý":134129,"Ġ×ij×Ķת":134130,"Ġ×ij×Ķת×IJ×Ŀ":134131,"баÑĢ":134132,"ì¦Ī":134133,"ä»ĬåĽŀãģ®":134134,"Ġyü":134135,"Ġyüks":134136,"Ġyüksel":134137,"ãĤ½ãĥ¼":134138,"ãģĤãĤĮ":134139,"×ª×ľ×ŀ×Ļ×ĵ":134140,"ãģ¤ãģª":134141,"×ij׳×Ļ×Ŀ":134142,"Ġxếp":134143,"ĠмÑĥжÑĩин":134144,"ĠاÙĦÙĥتاب":134145,"׼×ŀ×ķת":134146,"Ġçe":134147,"ĠçeÅŁ":134148,"ĠçeÅŁit":134149,"ĠçeÅŁitli":134150,"×ĵ×Ļר×ķת":134151,"à¸ļุà¸į":134152,"ĠاÙĦØ¥ÙĦÙĥ":134153,"ĠاÙĦØ¥ÙĦÙĥترÙĪ":134154,"ĠاÙĦØ¥ÙĦÙĥترÙĪÙĨÙĬ":134155,"ĠباÙĦإض":134156,"ĠباÙĦإضاÙ쨩":134157,"Ġyönel":134158,"Ġyönelik":134159,"mysÅĤ":134160,"à¸Ķà¹īวยà¸ģาร":134161,"à¸ģารà¸Ĺำ":134162,"овÑĭм":134163,"أزÙħØ©":134164,"æİ¢ãģĹ":134165,"íļ¨":134166,"Ġ×ķ×IJ×Ŀ":134167,"Ġnghiêm":134168,"ÑĪин":134169,"кал":134170,"Ġcrianças":134171,"èĩªåĪĨãģ§":134172,"Ġнай":134173,"ĠнайÑĤи":134174,"ĠSá»ij":134175,"ĠÃ¶ÄŁrenciler":134176,"ãĥ¶æľĪ":134177,"Ñģан":134178,"ĠJá":134179,"ĠkonuÅŁma":134180,"شرط":134181,"ëĪĪ":134182,"arrière":134183,"ضرÙĪØ±Ø©":134184,"ãĥĶãĥ³":134185,"עשר":134186,"аÑĢÑĮ":134187,"جÙħاع":134188,"Ġdéco":134189,"Ġ×Ļ×Ķ×ķ×ĵ×Ļ":134190,"à¸ŀลาà¸Ķ":134191,"ĠÙĬÙĥÙĨ":134192,"ĠجاÙħعة":134193,"طبÙĤ":134194,"ĠboÅŁ":134195,"×ķ×ķ×IJ":134196,"×ŀ×ĵ×¢":134197,"×§×ij×ķצת":134198,"פ×Ļר":134199,"jÄħcym":134200,"Ùħشا":134201,"ÙħشاÙĥÙĦ":134202,"צפ×ķף":134203,"إست":134204,"×ŀ׼ר":134205,"سÙħع":134206,"Ġкакой":134207,"ÑĤвоÑĢ":134208,"ØŃج":134209,"ÙģØ±Ø¶":134210,"пÑĢавлен":134211,"Ġникак":134212,"Ġmiá»ĩ":134213,"Ġmiá»ĩng":134214,"Ã¼ÃŁ":134215,"иÑĢовал":134216,"׾×ŀ×ķת":134217,"次ãģ®":134218,"ÙĦØ·":134219,"à¸ķัà¸Ļ":134220,"×Ķת×Ĺ×Ļ׾":134221,"ĠfotoÄŁ":134222,"ĠfotoÄŁraf":134223,"طرØŃ":134224,"à¸Ńà¸Ńà¸ģà¹Ħà¸Ľ":134225,"Ġyên":134226,"Ġпок":134227,"ĠпокÑĥп":134228,"ĠпокÑĥпа":134229,"ÑĨÑĥ":134230,"ĠкомпÑĮÑİ":134231,"ĠкомпÑĮÑİÑĤеÑĢ":134232,"ĠاÙĦÙĥرÙĬÙħ":134233,"تصÙħ":134234,"تصÙħÙĬÙħ":134235,"Ġоказа":134236,"Ġzarówn":134237,"Ġzarówno":134238,"ëĮĢì¶ľ":134239,"ãĤ»ãĥ³ãĤ¿ãĥ¼":134240,"ĠjakoÅĽci":134241,"æĤ©":134242,"æĤ©ãģ¿":134243,"Ø£ÙĨÙĪ":134244,"Ø£ÙĨÙĪØ§Ø¹":134245,"ë¹ł":134246,"Ġìłķë§IJ":134247,"Ġkẻ":134248,"ĠÑģайÑĤа":134249,"Ġ×Ķער×ij":134250,"Ùĩز":134251,"presión":134252,"ĠÑģÑĤен":134253,"ãģ£ãģ¦ãĤĭ":134254,"Ġhızlı":134255,"ÐļÐIJ":134256,"×ŀשפ×Ĺת":134257,"ĠÙĨÙĩا":134258,"ĠÙĨÙĩاÙĬØ©":134259,"ãģ¾ãģĦ":134260,"оÑħÑĢан":134261,"รà¹īà¸Ńย":134262,"ลึà¸ģ":134263,"ĠÙĪØ¨Ø§ÙĦ":134264,"ãĤĤãģ®ãģĮ":134265,"ר׼×Ļ×ij":134266,"ãĤ¤ãĥ¤":134267,"سؤ":134268,"سؤاÙĦ":134269,"ĠÙĦØ£ÙĨÙĩ":134270,"ĠkonuÅŁtu":134271,"ÐļÑĥпиÑĤÑĮ":134272,"Ġש×IJת×Ķ":134273,"ĠÙĪØ§ÙĦس":134274,"ĠmożliwoÅĽci":134275,"Ġprób":134276,"ëͰ":134277,"ãģ©ãĤĮ":134278,"ĠÐľÐ¸Ð½":134279,"ĠоÑĢганизм":134280,"ãģ«å¯¾ãģĻãĤĭ":134281,"ĠPré":134282,"Ġprivé":134283,"chè":134284,"ãģĦãģŁãģłãģį":134285,"สà¸Ļุà¸ģ":134286,"ajÄħce":134287,"ĠDzi":134288,"ĠDziÄĻki":134289,"ÅĤatw":134290,"rän":134291,"ränk":134292,"æĿ¥ãģŁ":134293,"Ġ×Ķ×Ļ×Ķ×ķ×ĵ×Ļ":134294,"ãĤ¬ãĥ¼":134295,"ĠÑĢад":134296,"ĠÑĢади":134297,"кÑĤив":134298,"Ø£Ùĩد":134299,"Ø£ÙĩداÙģ":134300,"ש×IJ×Ļר":134301,"ãģ¦ãģĦãģªãģĦ":134302,"Ġfrüh":134303,"Ġокол":134304,"Ġоколо":134305,"Ġregião":134306,"ĠÑĩиÑģле":134307,"Ġponiew":134308,"Ġponieważ":134309,"ìĦ¼íĦ°":134310,"Ġbầu":134311,"Ġê·":134312,"Ġê·ľ":134313,"Ġê·ľìłķ":134314,"ĠHòa":134315,"ĠÑĤоÑĤ":134316,"ãĤĤå¤ļãģĦ":134317,"ĠاÙĦإسÙĦاÙħÙĬØ©":134318,"ãģĭãģĦ":134319,"Ñįн":134320,"ĠÑĥказан":134321,"ĠÑĤакое":134322,"ï¼³":134323,"ëĮĢíķĻ":134324,"ĠgeniÅŁ":134325,"ĠاÙĦØ®ÙĬ":134326,"ĠاÙĦØ®ÙĬارات":134327,"ãĤĴè¡ĮãģĨ":134328,"ש×ŀ×Ķ":134329,"ĠLÃłm":134330,"ÙĪÙĨÙĬ":134331,"Ġ×IJ׾×Ļ×ķ":134332,"Äĺ":134333,"à¹Ħมà¹Īสามารà¸ĸ":134334,"人ãģ¨":134335,"برز":134336,"×Ļס×ķ×ĵ":134337,"×Ĵ׾×Ļ":134338,"ĠÙĬÙĨا":134339,"ĠÙĬÙĨاÙĬر":134340,"ĠкаÑĢÑĤин":134341,"Ġtôn":134342,"à¹Ģà¸ģร":134343,"à¸Ħà¸Ķี":134344,"Ġ׾×IJ×ķר×ļ":134345,"ãĤĤãĤīãģĨ":134346,"ãģĭãģĭãĤĭ":134347,"ании":134348,"ĠaraÅŁtırma":134349,"ÙĦاØŃظ":134350,"ãģĦãĤĦ":134351,"ĠTÃłi":134352,"Ġà¸Ļà¸Ńà¸ģà¸Īาà¸ģ":134353,"Ġà¸Ļà¸Ńà¸ģà¸Īาà¸ģà¸Ļีà¹ī":134354,"ĠÄIJảng":134355,"ãģ£ãģ¦ãģįãģŁ":134356,"Ġà¸ĭึà¹Īà¸ĩà¹Ģà¸Ľà¹ĩà¸Ļ":134357,"Ġtả":134358,"ĠmożliwoÅĽÄĩ":134359,"ĠSản":134360,"Ġİki":134361,"Ġcắt":134362,"سأÙĦ":134363,"Ġbakım":134364,"شب":134365,"à¸ķีà¹ī":134366,"à¸ŀยาย":134367,"à¸ŀยายาม":134368,"à¸ªà¸±à¸Ľ":134369,"à¸ªà¸±à¸Ľà¸Ķา":134370,"à¸ªà¸±à¸Ľà¸Ķาหà¹Į":134371,"ë°Ģ":134372,"еÑĢÑĭ":134373,"Ġcánh":134374,"Ġthuế":134375,"تبع":134376,"ãģ«åħ¥ãĤĮ":134377,"ÑİÑģÑĮ":134378,"íļĮìĿĺ":134379,"ç°¡åį":134380,"ç°¡åįĺ":134381,"ç°¡åįĺãģ«":134382,"Ġtrúc":134383,"ĠاÙĦÙĥÙĪÙĬ":134384,"ĠاÙĦÙĥÙĪÙĬت":134385,"ãĤıãģijãģ§ãģĻ":134386,"ĠÑģвоб":134387,"ĠÑģвобод":134388,"ĠÑĥÑĩаÑģÑĤник":134389,"สิà¹īà¸Ļ":134390,"ĠпÑĢоÑĦеÑģÑģиона":134391,"ĠпÑĢоÑĦеÑģÑģионалÑĮн":134392,"ÑģпоÑĢ":134393,"×Ĺ×ķ×ij×Ķ":134394,"ÙħعÙĨÙī":134395,"ĠاÙĦÙģØªØ±Ø©":134396,"สูà¸ĩสุà¸Ķ":134397,"ãĤıãģļ":134398,"ĠÄijè":134399,"ĠÄijèn":134400,"æ¯Ķãģ¹":134401,"าà¸ĺิ":134402,"Ġmożemy":134403,"à¹ģà¸ĭ":134404,"à¸Īะà¹Ħมà¹Ī":134405,"Ġsắp":134406,"ÐļÐŀ":134407,"Ġpráctica":134408,"ÙĪÙĥاÙĦØ©":134409,"è¾¼ãĤĵãģ§":134410,"ológica":134411,"ĠеÑī":134412,"ĠеÑīÑij":134413,"تعدÙĬÙĦ":134414,"ĠØ£Ùĥد":134415,"Ġצר×Ļ׼":134416,"Ġצר×Ļ׼×Ļ×Ŀ":134417,"Ø«Ùħ":134418,"ĠкÑĢÑĥ":134419,"ĠкÑĢÑĥп":134420,"×ij×Ļ×§×ķרת":134421,"Ġì¡°ê¸Ī":134422,"ãģ¨ãģįãģ¯":134423,"Ġbạc":134424,"ĠÑĢаÑģпол":134425,"ĠÑĢаÑģполож":134426,"ĠÑĢаÑģположен":134427,"زÙĬÙĨ":134428,"ĠÐļÑĢоме":134429,"ĠاÙĦÙĨظر":134430,"×Ķ×ķ×ĵ":134431,"ĠاÙĦسبت":134432,"ã썿ĢĿãģĦ":134433,"ĠpaÅĦst":134434,"ĠpaÅĦstw":134435,"ĠÙĦÙĬست":134436,"ĠбÑĥдÑĥ":134437,"à¸Ĺัà¸Ļà¸Ĺี":134438,"ราม":134439,"ØŃصÙĪÙĦ":134440,"ãģĹãģ¦ãģıãĤĮãĤĭ":134441,"ĠاÙĦإسرائÙĬÙĦ":134442,"ĠاÙĦإسرائÙĬÙĦÙĬ":134443,"ãģĵãĤĮãģ¾ãģ§":134444,"ìĤ¬ë¥¼":134445,"Ġsürü":134446,"à¹Ģวà¸Ńรà¹Į":134447,"à¹Ģà¸ĭà¸Ńรà¹Į":134448,"Ġutilisé":134449,"ĠÑģиÑģÑĤема":134450,"Ġdwó":134451,"Ġdwóch":134452,"Ġpróprio":134453,"Ġëĵ±ìĿĦ":134454,"arrêt":134455,"ĠЧа":134456,"×IJ×ŀ׳×ķת":134457,"عارض":134458,"à¹Ģà¸ģมสà¹Į":134459,"Ġ׾×Ķ×ij×Ļף":134460,"Ġ׾×ij×Ĺ":134461,"Ġ׾×ij×Ĺ×ķר":134462,"สาà¸Ĥา":134463,"ĠÐľÐ¾Ñģкве":134464,"بعد":134465,"ĠاÙĦÙĤرار":134466,"ĠÄIJá»ĭa":134467,"Ġ×Ĺ×Ĵ":134468,"ÙģØªØ±":134469,"ÙĪÙĨØ©":134470,"Ġ×Ķ×ĸ×IJת":134471,"å¸Ĥãģ®":134472,"ãģ»ãģĹãģĦ":134473,"Ġ×ij×¢×Ļר":134474,"ĠÑĤепеÑĢÑĮ":134475,"ìĬµëĭĪê¹Į":134476,"à¹Ħมà¹Īว":134477,"à¹Ħมà¹Īวà¹Īา":134478,"à¹Ħมà¹Īวà¹Īาà¸Īะ":134479,"×ŀ×IJ×Ķ":134480,"æĥħåł±":134481,"æĥħåł±ãĤĴ":134482,"غÙĨ":134483,"ĠпоÑı":134484,"ĠпоÑıви":134485,"éģİãģĶ":134486,"تشغ":134487,"تشغÙĬÙĦ":134488,"вел":134489,"Ġ×Ĺ×ŀ":134490,"ãģ¨ãģªãĤĬãģ¾ãģĻ":134491,"ĠraÄŁ":134492,"ĠraÄŁmen":134493,"ãģĭãģ©ãģĨ":134494,"ãģĭãģ©ãģĨãģĭ":134495,"енко":134496,"ì§Ģê³ł":134497,"Ġ×IJ׾×Ļ×Ķ":134498,"ĠØ£ÙĦ":134499,"à¸Īำหà¸Ļ":134500,"à¸Īำหà¸Ļà¹Īาย":134501,"nızı":134502,"Ġ׾ק×Ĺת":134503,"Ø£ÙĩÙħ":134504,"Ø£ÙĩÙħÙĬØ©":134505,"تغÙĬر":134506,"ש×Ĺר":134507,"ס×ķפר":134508,"×ĵ×Ļר":134509,"èī¯ãģĭãģ£ãģŁ":134510,"×ŀ׾×Ĺ×ŀ×Ķ":134511,"ÑģÑĤвие":134512,"ÑĤÑĢаÑĤ":134513,"ĠاÙĦأخ":134514,"ĠاÙĦأخÙĬرة":134515,"ĠاÙĦØŃصÙĪÙĦ":134516,"Ġcrédito":134517,"צ×Ļ×¢":134518,"ãĥ¬ãĥĻãĥ«":134519,"برÙĬ":134520,"ëIJIJ":134521,"ãģłãģ£ãģ¦":134522,"ĠrealtÃł":134523,"سÙ쨱":134524,"×ķ׳×ķ":134525,"×Ĵ×ķ×ĵ":134526,"×Ĵ×ķ×ĵ׾":134527,"ฮา":134528,"ãģĹãģ¦ãģĬãĤĬãģ¾ãģĻ":134529,"ĠgÃł":134530,"Ġ׾×ijצע":134531,"å¼ķè¶ĬãģĹ":134532,"Ġ×ŀ×Ļ׾×Ļ":134533,"Ġ×ŀ×Ļ׾×Ļ×ķף":134534,"Ùħدر":134535,"Ùħدرسة":134536,"פ×ķ×ĺ":134537,"à¸Ļà¹īำมัà¸Ļ":134538,"ëģĿ":134539,"عÙĥس":134540,"ĠÙĤض":134541,"ĠÑĢÑĭб":134542,"خطط":134543,"×ŀ×ķס×ĵ":134544,"Ġ׼׾׾×Ļ":134545,"ĠкоÑĤоÑĢое":134546,"צ×Ļ×ķף":134547,"ĠмеÑģÑĤа":134548,"ãģĭãģ¤":134549,"гÑĢÑĥпп":134550,"׾×Ļ׾":134551,"ת×ķ×IJר":134552,"ë³µì§Ģ":134553,"à¹ģà¸ľà¹Īà¸Ļ":134554,"Ġ×ijעת":134555,"æĻĤéĸĵãĤĴ":134556,"ï¼£":134557,"ãģ¨ãģĦãģĨãģĵãģ¨ãģ§":134558,"Ġ׾×Ķ×§":134559,"Ġ׾×ĸ×Ķ":134560,"ĠìłĢëĬĶ":134561,"ĠاÙĦإرÙĩاب":134562,"ĠìŀĪëĬĶëį°":134563,"ĠÑĤогда":134564,"Ġ×Ķצ×Ļ":134565,"×ķ׾×ĺ":134566,"Ġרפ×ķ×IJ×Ļ":134567,"ãģĵãģ¨ãģ§ãģĻ":134568,"ĠÄijÃŃch":134569,"ØŃÙĬا":134570,"Ġ×Ķ×ŀש×Ĺ×§":134571,"ãģľãģ²":134572,"Ġ×ŀ×IJפשר":134573,"ãģ¿ãģ¾ãģĹãģŁ":134574,"ĠاÙĦØ£ÙħÙĬرÙĥÙĬ":134575,"ÙħجتÙħع":134576,"Ġساب":134577,"ĠسابÙĤ":134578,"׼×Ļ׾":134579,"Ế":134580,"ãĥªãĤ¹ãĥĪ":134581,"Ġìĥ":134582,"ĠìĥĪ":134583,"ĠìĥĪë¡ľ":134584,"ĠìĥĪë¡ľìļ´":134585,"ĠDá»ĭch":134586,"à¹Ģหมาะสม":134587,"ĠاÙĦÙĨبÙĬ":134588,"׾׾":134589,"ÙĨع":134590,"Ðĵлав":134591,"ÐĵлавнаÑı":134592,"Ùħرض":134593,"Ġ×ķ×ĵ":134594,"تÙĤÙĬ":134595,"تÙĤÙĬÙĬÙħ":134596,"Ġbảng":134597,"ĠÙģÙĤاÙĦ":134598,"×¢×ŀ×Ļ":134599,"дÑĢа":134600,"Ġsuá»ijt":134601,"سرعة":134602,"Ġcá»Ń":134603,"Ġ×Ķ×Ļ×Ĺ×Ļ×ĵ":134604,"سعÙĬد":134605,"à¸Ńาà¸Ĭีà¸ŀ":134606,"ĠسÙĪØ§Ø¡":134607,"ãĤ½ãĥķãĥĪ":134608,"ĠлиÑĩно":134609,"ĠÐļоÑĢ":134610,"اÙĩتÙħ":134611,"اÙĩتÙħاÙħ":134612,"à¸Ńà¸Ķี":134613,"à¸Ńà¸Ķีà¸ķ":134614,"ãģIJãĤīãģĦ":134615,"Ġihtiya":134616,"Ġihtiyaç":134617,"ãģ¾ãģ§ãģ®":134618,"ìĭľìĬ¤":134619,"ìĭľìĬ¤íħľ":134620,"ÑĢÑĥÑĪ":134621,"ãĤĦãģ£ãģ±":134622,"ãĤĦãģ£ãģ±ãĤĬ":134623,"кеÑĢ":134624,"Ġży":134625,"Ġżyw":134626,"клон":134627,"Ġlượt":134628,"þ":134629,"даÑĩи":134630,"türk":134631,"غÙĪ":134632,"ĠигÑĢок":134633,"Ġphê":134634,"Ġ×©×¢×ľ":134635,"ĠاÙĦÙħدÙĨÙĬ":134636,"ĠìŬ룬ë¶Ħ":134637,"ער×Ļ×Ŀ":134638,"ÑħодÑıÑĤ":134639,"Ġxứ":134640,"ÐĹа":134641,"ĠÙģØ±Øµ":134642,"à¸Īะà¸Ĺำà¹ĥหà¹ī":134643,"íģ´":134644,"×¢×ij×ķר":134645,"à¹Ģหลà¹Īาà¸Ļีà¹ī":134646,"èĢĥãģĪãĤĭ":134647,"ÑĢеÑģÑĤ":134648,"ннÑĭй":134649,"Ġcầm":134650,"داخÙĦ":134651,"ĠÙħÙĦÙĬار":134652,"ĠÐIJл":134653,"ĠвÑĢемен":134654,"à¸Ĭà¹Īวยà¹ĥหà¹ī":134655,"ר×Ļ×ķת":134656,"ëĵ¯":134657,"飲ãģ¿":134658,"׳׾":134659,"שתף":134660,"ĠاÙĦسعÙĪØ¯ÙĬ":134661,"uÃŁ":134662,"ìĿ¸ëį°":134663,"ĠìĿ¼ë°ĺ":134664,"ÅĤÄĻ":134665,"Ġmá»iji":134666,"×ŀ×Ļ׳":134667,"ĠاÙĦأطÙ쨧ÙĦ":134668,"Ġçıkan":134669,"école":134670,"×§×Ļש":134671,"×§×Ļש×ķר":134672,"ĠоÑģÑĥÑīеÑģÑĤв":134673,"ĠоÑģÑĥÑīеÑģÑĤвлÑı":134674,"×ij×IJר":134675,"à¹Ħà¸Ľà¸Ķà¹īวย":134676,"Ġ×¢×ķ׾×Ķ":134677,"à¸ģà¹ĩà¹Ħมà¹Ī":134678,"ãĥ¢ãĥĩ":134679,"ãĥ¢ãĥĩãĥ«":134680,"تØŃÙĪÙĦ":134681,"Ġодного":134682,"ת×Ĺ×Ļ×ľ×ª":134683,"Ġتخ":134684,"Ġchcia":134685,"ĠchciaÅĤ":134686,"ãĥIJãĥ³":134687,"èĢħãģ¯":134688,"ĠÙħØŃÙĦ":134689,"Ñģлож":134690,"Ñģложн":134691,"ĠtÄĻ":134692,"Ġçıkt":134693,"Ġçıktı":134694,"ĠCÆ¡":134695,"à¹Ħà¸Ķà¹īà¹Ģลย":134696,"ırken":134697,"à¹Ģà¸Ĥà¹īาสูà¹Ī":134698,"ÙħØŃÙĥ":134699,"ÙħØŃÙĥÙħØ©":134700,"à¸Ħุà¹īม":134701,"à¸Ļà¹Īาà¸Īะ":134702,"лÑİд":134703,"деÑģÑı":134704,"деÑģÑıÑĤ":134705,"ĠлÑİбой":134706,"تØŃرÙĬر":134707,"צע×ĵ":134708,"ĠеÑij":134709,"ĠاÙĦØŃÙĥÙħ":134710,"ĠصباØŃ":134711,"à¹Ģà¸ļà¸Ńรà¹Į":134712,"Ġróżnych":134713,"гиб":134714,"ĠÑģоÑĤ":134715,"ĠÑģоÑĤÑĢÑĥд":134716,"ĠÑģоÑĤÑĢÑĥдник":134717,"ĠобÑĬем":134718,"פ×ĺר":134719,"ãģĻãģĶãģı":134720,"ãģ«éĸ¢ãģĹãģ¦":134721,"вол":134722,"Ø«ÙħاÙĨ":134723,"Ġdần":134724,"æĬľ":134725,"æĬľãģij":134726,"Ġעש":134727,"Ġעש×ķ×Ļ":134728,"ס×ķף":134729,"ãģªãģ®ãģ§ãģĻ":134730,"ãģ¯ãģ©ãģĨ":134731,"×ŀער×ij":134732,"ï¼°":134733,"Ùħصر":134734,"ÙħÙĨاسب":134735,"ÙħÙĨاسبة":134736,"ä¸Ĭãģ®":134737,"×IJ×Ļש×ķר":134738,"ĠìĦ¤ì¹ĺ":134739,"×ŀ×ĵ×Ļ׳×ķת":134740,"×ŀרת":134741,"ãĤĭãģ®ãģĮ":134742,"دÙİ":134743,"ĠاÙĦشرÙĥات":134744,"ìĭľê°Ħ":134745,"ĠÑĢеÑĪение":134746,"ãģĻãĤĭãģ®ãģ¯":134747,"ĠìŀIJìĭłìĿĺ":134748,"׾×ŀ×ķ":134749,"ãģ¨ãģĵãĤįãģ§":134750,"Ġקצר":134751,"Ġmãi":134752,"Ġkültür":134753,"ãĥ©ãĤ¤ãĥĸ":134754,"à¸ľà¸¹à¹īหà¸įิà¸ĩ":134755,"æĻĤéĸĵãģĮ":134756,"клÑİÑĩи":134757,"diÄŁiniz":134758,"มาà¸ģà¹Ĩ":134759,"تØŃÙħÙĦ":134760,"Ġhạt":134761,"ãĤ¦ãĤ£":134762,"пле":134763,"×ŀ׾×IJ":134764,"ÅĤó":134765,"Ġgá»ijc":134766,"Ġ×IJ×ķ×ĵ×ķת":134767,"หวาà¸Ļ":134768,"ĠاÙĦÙĪØ²":134769,"ĠاÙĦÙĪØ²Ø±Ø§Ø¡":134770,"ëĵ¤ê³¼":134771,"ĠصØŃ":134772,"ĠصØŃÙĬÙ쨩":134773,"Ġмм":134774,"تدخÙĦ":134775,"Ġpersönlich":134776,"ĠزÙĬ":134777,"ĠزÙĬادة":134778,"ãĤ·ãĤ¢":134779,"Ġngắn":134780,"à¸Ħลิà¸ģ":134781,"Ġsông":134782,"Ġtüket":134783,"ÑįÑĦÑĦ":134784,"ÑįÑĦÑĦекÑĤ":134785,"ש×Ļ×ij":134786,"Ġاعت":134787,"تض":134788,"تضÙħÙĨ":134789,"ĠاÙĦÙħشرÙĪØ¹":134790,"Ġprodução":134791,"ĠпÑĢименÑı":134792,"ниÑĨÑĭ":134793,"주ëĬĶ":134794,"رÙı":134795,"ĠmÆ¡":134796,"Ġhayatı":134797,"ëŁ½":134798,"Ġücret":134799,"Ġyanında":134800,"Ġprática":134801,"×ij×Ļ×§×ķר":134802,"ÃľN":134803,"ÑģоÑĤ":134804,"ãĤıãģijãģ§":134805,"Ġдолго":134806,"×ª×Ľ×ķ":134807,"ĠìķĦëĭĮ":134808,"ëį°ìĿ´":134809,"Ġçiz":134810,"ĠchoÄĩ":134811,"Ġ×Ķ×Ļת":134812,"Ġ×Ķ×Ļתר":134813,"Ġsoát":134814,"׼×ij×ĵ":134815,"à¹Ģลà¹Īา":134816,"ĠдеÑĢ":134817,"ĠдеÑĢев":134818,"ãĤĴåħ¥ãĤĮ":134819,"×Ĺ×ķס":134820,"×Ĺ×ķסר":134821,"جÙĬÙĨ":134822,"tón":134823,"onné":134824,"ĠполноÑģÑĤÑĮÑİ":134825,"人ãģŁãģ¡":134826,"Ġprêt":134827,"본":134828,"Ġdécembre":134829,"cılar":134830,"Ġתת":134831,"Ġê²½ìļ°ìĹIJëĬĶ":134832,"ÙĪØ¹Ø¯":134833,"è¦ĭãĤĭ":134834,"วิà¸Īัย":134835,"ë¶Ī":134836,"زÙĪØ§":134837,"زÙĪØ§Ø¬":134838,"dì":134839,"ãģ§ãģĻãĤĪ":134840,"Ġводо":134841,"ĠÙĬÙĪØ¬Ø¯":134842,"ÑģоÑģÑĤоÑı":134843,"ÐŀС":134844,"ĠÄIJó":134845,"×Ĺפש":134846,"Ġצ×Ļ×ij×ķר":134847,"ĠاÙĦÙĤØ·":134848,"ĠاÙĦÙĤطاع":134849,"ĠимеÑİÑĤ":134850,"ĠpháºŃn":134851,"×Ľ×¡×¤×Ļ":134852,"полниÑĤелÑĮ":134853,"éĻIJãĤĬ":134854,"ĠÑģÑĢав":134855,"ĠÑģÑĢавн":134856,"ÙħاÙĦÙĥ":134857,"×ĵר×ķ×Ŀ":134858,"çļĨãģķãĤĵ":134859,"ØŃÙĤÙĤ":134860,"à¹ģหลà¹Īà¸ĩ":134861,"ĠاÙĦرسÙħÙĬ":134862,"оÑĩки":134863,"×ĺ×ij×Ĺ":134864,"Ġcanlı":134865,"Ġ׾׾":134866,"Ġ׾׾×ŀ×ķ×ĵ":134867,"×ŀ×ij×ķ":134868,"×ª×Ľ":134869,"×ª×Ľ×ł×Ļת":134870,"ĠاÙĦÙħشار":134871,"ĠاÙĦÙħشارÙĥØ©":134872,"İÅŀ":134873,"ĠسÙĬاسÙĬ":134874,"волÑĮ":134875,"ĠÑģпÑĢав":134876,"æĿ¥ãģ¦":134877,"פ×ķר×ķ×Ŀ":134878,"สำà¹Ģรà¹ĩ":134879,"สำà¹Ģรà¹ĩà¸Ī":134880,"ĠÅŁÃ¶yle":134881,"ĠzostaÅĤa":134882,"ĠHü":134883,"ר×ķש":134884,"دÙĦÙĬÙĦ":134885,"ÑĢид":134886,"שף":134887,"×ŀ×§×ķר":134888,"ĠÑĥÑĩ":134889,"ĠÑĥÑĩеб":134890,"ĠÑįÑĤа":134891,"кова":134892,"à¸ķà¸Ļà¹Ģà¸Ńà¸ĩ":134893,"ÙĨÙIJ":134894,"à¸Ńีà¸ģà¸Ħรัà¹īà¸ĩ":134895,"ระà¸ļุ":134896,"Ġdữ":134897,"ĠاÙĦØŃاÙĦÙĬ":134898,"׼×ķ׼":134899,"׼×ķ׼×ij":134900,"Ġ×ŀ×IJשר":134901,"Ġtrụ":134902,"ÑĤелем":134903,"Ġвли":134904,"ĠвлиÑı":134905,"Ġש×IJת×Ŀ":134906,"Ġuwag":134907,"ĠuwagÄĻ":134908,"×ĺ×Ļת":134909,"×IJ×ĵ×Ŀ":134910,"à¸Ķุ":134911,"Ġ×Ķ×IJ׾×Ķ":134912,"ĠkarÄ±ÅŁ":134913,"ĠÄIJá»iji":134914,"даÑİÑĤ":134915,"ãģªãģ®ãģ«":134916,"Äħcych":134917,"à¹Ģà¸Ļà¹īà¸Ļ":134918,"ãģĹãģ¦ãģĹãģ¾ãģĨ":134919,"intérieur":134920,"ĠfÃŃsica":134921,"ĠÐŁÐ¾Ð»":134922,"ãģĹãģķ":134923,"à¸Ĺำà¹Ħม":134924,"ĠLâm":134925,"ĠاÙĦÙħسÙĦÙħ":134926,"ĠاÙĦÙħسÙĦÙħÙĬÙĨ":134927,"صØŃØ©":134928,"ìĹĦ":134929,"à¹Ģà¸Ķà¹ĩà¸Ķ":134930,"ĠÑĥÑĩеÑĤ":134931,"âÌģ":134932,"ĠبÙĦا":134933,"ĠاÙĦاجتÙħاعÙĬ":134934,"פרס×Ŀ":134935,"ãĥķãĥ©":134936,"ĠÐļогда":134937,"mieÅĽci":134938,"ĠبÙĬÙĨÙħا":134939,"Ġ×ŀ×IJ×ŀר×Ļ×Ŀ":134940,"Ġ×ij×IJ×ĸ×ķר":134941,"×ķש×Ļ×Ŀ":134942,"ĠÑģдела":134943,"entrée":134944,"à¹Ģà¸Ħà¹īา":134945,"Ñĥгл":134946,"ĠاÙĦÙģÙĨÙĬ":134947,"ĠÐĴоÑĤ":134948,"à¸Ĺีà¹Īมา":134949,"×ķצ×Ĵ":134950,"ÙĤدرة":134951,"Ġ목":134952,"Ġ목ìłģ":134953,"íıīê°Ģ":134954,"ĠاÙĦأربع":134955,"ĠاÙĦأربعاء":134956,"פס×Ļ×§":134957,"ĠÑıвлÑıÑİÑĤÑģÑı":134958,"بÙĪÙĨ":134959,"ì°¾":134960,"×ŀ×¢×¨×Ľ":134961,"×ŀ×¢×¨×Ľ×ķת":134962,"ãĤ·ãĤ§":134963,"ĠباÙĦØ£":134964,"íĸĪëįĺ":134965,"ĠاÙĦبرÙĨاÙħج":134966,"ĠاÙĦØ£ØŃد":134967,"ĠmÅ©":134968,"ĠmÅ©i":134969,"паÑĤ":134970,"بث":134971,"ĠÑĨенÑĭ":134972,"Ġ×ij×ª×ľ":134973,"è¨ĢãĤıãĤĮ":134974,"ĠاÙĦÙħجاÙĦ":134975,"ĠìĦ¸ìĥģ":134976,"Ġ×Ĵ×ķפ":134977,"ĠнаÑĪей":134978,"ĠкомпаниÑı":134979,"бин":134980,"ölü":134981,"×Ļ×Ļ×ĺ":134982,"Ġ×ŀספ×Ļ×§":134983,"ยัà¸ĩà¸Ħà¸ĩ":134984,"ĠЧи":134985,"ĠанÑĤи":134986,"ĠÑģÑĢеди":134987,"สà¹Īวà¸Ļà¹ĥหà¸įà¹Ī":134988,"оÑĩка":134989,"íĬ¹ë³Ħ":134990,"วà¹Īาà¸ĩ":134991,"гоÑĢод":134992,"باÙĥ":134993,"à¹Ģสีà¹Īย":134994,"à¹Ģสีà¹Īยà¸ĩ":134995,"ãĤĤãĤīãģĦ":134996,"×§×ķ×Ŀ":134997,"ãģĽãģļ":134998,"ĠاÙĦÙĤاÙĩرة":134999,"Ġ×ij׼×ļ":135000,"ÙħشارÙĬع":135001,"باØŃØ«":135002,"ĠпоÑĩ":135003,"ĠпоÑĩÑĤи":135004,"ĠÑĦоÑĢма":135005,"Sİ":135006,"Ġ×ŀצ×Ļ×¢":135007,"ลื":135008,"ลืม":135009,"ĠÑĤеÑĢ":135010,"ĠÑĤеÑĢÑĢиÑĤоÑĢ":135011,"ĠÑĤеÑĢÑĢиÑĤоÑĢии":135012,"ĠвмеÑģÑĤ":135013,"ĠвмеÑģÑĤе":135014,"dıkları":135015,"opération":135016,"à¹Ĥห":135017,"صدÙĬ":135018,"صدÙĬÙĤ":135019,"íĸīìłķ":135020,"تجا":135021,"تجاÙĪØ²":135022,"Ġsuç":135023,"Ġarty":135024,"Ġartyku":135025,"ĠartykuÅĤ":135026,"ãĤ·ãĥ§ãĥĥãĥĹ":135027,"שפ":135028,"שפ×Ļ×¢":135029,"Ġ×Ķש×Ļר×ķת":135030,"à¹ģà¸ĸม":135031,"ë¸Ķ":135032,"ĠukÅĤad":135033,"Ġ×ķ׼×Ļ":135034,"หลาà¸ģ":135035,"หลาà¸ģหลาย":135036,"æĸ¹ãĤĤ":135037,"Ġpodróż":135038,"ĠEÄŁer":135039,"ĠкомнаÑĤ":135040,"ĠÑģамÑĭÑħ":135041,"ĠвкÑĥÑģ":135042,"беж":135043,"Ġ×ij×§×ķ":135044,"æİĽãģij":135045,"ãģ¿ãĤĭãģ¨":135046,"ĠiliÅŁkin":135047,"ĠÙĬعÙħÙĦ":135048,"ĠподаÑĢ":135049,"Ġyazılı":135050,"ãĤĴå¾Ĺ":135051,"ĠwystÄĻp":135052,"à¸Ĺีà¹Īà¹ĥà¸Ĭà¹ī":135053,"ØŃادث":135054,"ÙĪÙĬد":135055,"кÑĥлÑĮÑĤ":135056,"кÑĥлÑĮÑĤÑĥÑĢ":135057,"à¸ģารà¹ģà¸Ĥà¹Īà¸ĩ":135058,"à¸ģารà¹ģà¸Ĥà¹Īà¸ĩà¸Ĥ":135059,"à¸ģารà¹ģà¸Ĥà¹Īà¸ĩà¸Ĥัà¸Ļ":135060,"ÙħÙĪØ¸":135061,"ÙħÙĪØ¸Ùģ":135062,"ÙĬÙħÙĬ":135063,"ãĤĵãģ§ãģĻãģĮ":135064,"diÄŁim":135065,"diÄŁimiz":135066,"ĠÐŁÐµÑĢ":135067,"ĠÐŁÐµÑĢв":135068,"Ġmão":135069,"ĠÑģез":135070,"ĠÑģезон":135071,"Ġ×Ķ×ŀ×¢":135072,"ÙħجÙħÙĪØ¹Ø©":135073,"ĠинÑĦоÑĢмаÑĨии":135074,"iếc":135075,"ãng":135076,"ĠÄijấy":135077,"ãģĶç´":135078,"ãģĶç´¹":135079,"ãģĶç´¹ä»ĭ":135080,"Ġadım":135081,"à¹Ħหล":135082,"ĠпÑĢакÑĤи":135083,"ĠпÑĢакÑĤиÑĩ":135084,"ĠпÑĢакÑĤиÑĩеÑģ":135085,"ĠпÑĢакÑĤиÑĩеÑģки":135086,"ĠاÙĦÙĨÙ쨳":135087,"ĠÑĢабоÑĤе":135088,"ÙĦÙĬÙģ":135089,"ĠاÙĦجÙĨÙĪØ¨":135090,"ĠводÑĭ":135091,"ì¹Ļ":135092,"ĠмиÑĢа":135093,"ĠÄijừng":135094,"ĠпÑĢоÑĤиво":135095,"ĠÑģÑĤÑĢанÑĭ":135096,"ลู":135097,"ìĤ¶":135098,"kreÅĽl":135099,"Ġbulund":135100,"ĠbulunduÄŁu":135101,"à¹ģสà¸Ļ":135102,"ãĤ±ãĤ¢":135103,"ת×Ĺ×ķ×ŀ×Ļ":135104,"ר׼×Ķ":135105,"Ġ׾ק×ķ×Ĺ":135106,"Ġ׾ק×ķ×Ĺ×ķת":135107,"Ġ×Ľ×ª×ķ×ijת":135108,"ĠÙĦÙĥÙħ":135109,"بشر":135110,"ĠrÃłng":135111,"Ġ×ŀ×Ķ×ŀ":135112,"Ġ×IJ×Ĺר×ķת":135113,"Ġбон":135114,"ĠбонÑĥÑģ":135115,"ï½Ĺ":135116,"à¹ģยà¸ģ":135117,"ãģĤãģªãģŁãģ®":135118,"ĠÑĥÑĩаÑģÑĤие":135119,"ĠEyl":135120,"ĠEylül":135121,"ĠçalÄ±ÅŁmaları":135122,"خطر":135123,"ìĿ½":135124,"à¸ģารà¹ĥà¸Ĭà¹īà¸ĩาà¸Ļ":135125,"Ġанализ":135126,"תק×ij׾":135127,"нием":135128,"Ġİns":135129,"Ġİnsan":135130,"ĠبÙĪØ§Ø³":135131,"ĠبÙĪØ§Ø³Ø·Ø©":135132,"Ġ×ł×Ľ×ł×¡":135133,"Ġ×Ķ×ŀ×Ļ×ĵ×¢":135134,"Ġço":135135,"ĠçoÄŁu":135136,"á»ĺ":135137,"ĠêµŃ민":135138,"ãĤĤãģĦãģĦ":135139,"Ġ׼׾×Ļ":135140,"ĠÑģÑĢедне":135141,"gÅĤo":135142,"gÅĤoÅĽ":135143,"Ġnegó":135144,"Ġnegócio":135145,"ĠÑĢегиÑģÑĤ":135146,"ĠÑĢегиÑģÑĤÑĢа":135147,"ĠÑĢегиÑģÑĤÑĢаÑĨии":135148,"Ġtrá»ĵng":135149,"ĠпÑĢÑı":135150,"ĠпÑĢÑıмо":135151,"ëłĪìĿ´":135152,"Ġkém":135153,"кле":135154,"à¸Ļำมา":135155,"ĠÑĦин":135156,"ĠÑĦинанÑģ":135157,"ĠÑĦинанÑģов":135158,"Ġkiá»ĩm":135159,"ยัà¸ĩà¹Ħ":135160,"ยัà¸ĩà¹Ħà¸ĩ":135161,"ยิà¸ĩ":135162,"à¹Ĥà¸Ľ":135163,"ĠполÑĥÑĩил":135164,"×Ļ×ĸ×Ŀ":135165,"à¹ģละà¸Ħวาม":135166,"ĠвообÑīе":135167,"صÙĬر":135168,"ãĥıãĥ³":135169,"ĠاÙĦÙĤاد":135170,"ĠاÙĦÙĤادÙħ":135171,"ĠبدÙĪÙĨ":135172,"عظÙħ":135173,"×ª×ł×ķ×¢":135174,"×ª×ł×ķ×¢×Ķ":135175,"Ø£ÙħÙĦ":135176,"ãģķãģĪ":135177,"ÑĤем":135178,"ÑĤемпеÑĢ":135179,"ÑĤемпеÑĢаÑĤÑĥÑĢ":135180,"Ġ׾×Ļצ×ķר":135181,"ĠrÄĻk":135182,"رسÙĦ":135183,"ìŀIJ를":135184,"Ġ×Ļצ×Ļרת":135185,"ÙĨبÙĬ":135186,"ÑĩнаÑı":135187,"تØŃÙĦÙĬÙĦ":135188,"Ġмик":135189,"ĠмикÑĢо":135190,"ĠSöz":135191,"Ġforça":135192,"Ñģон":135193,"ĠاÙĦعرا":135194,"ĠاÙĦعراÙĤÙĬ":135195,"ĠHá»ĵng":135196,"ãģĻãĤĭãģŁãĤģãģ«":135197,"à¸Ĺีà¹Īà¸Ńยูà¹Ī":135198,"Ġ×ķ×IJ×£":135199,"صÙĬد":135200,"ĠìķĬê³ł":135201,"รัà¸ĩ":135202,"ĠاÙĦتÙĪØ§ØµÙĦ":135203,"à¹Ģมà¸ķร":135204,"ÑĥÑģÑĤÑĢой":135205,"ÑĥÑģÑĤÑĢойÑģÑĤв":135206,"mıyor":135207,"ĠباسÙħ":135208,"Ġ×ķ׼×ķ":135209,"ĠGül":135210,"á»IJ":135211,"Ãītat":135212,"غاÙĦ":135213,"Ø¥ÙĨØ´":135214,"Ø¥ÙĨشاء":135215,"Tİ":135216,"à¸Ĥà¹īาม":135217,"Ġtroch":135218,"ĠtrochÄĻ":135219,"إص":135220,"إصابة":135221,"ĠثاÙĨÙĬ":135222,"ĠاÙĦصØŃØ©":135223,"Ġ×ĸ×Ķ×ķ":135224,"jÄħcej":135225,"ãĥĢãĥ³":135226,"ìĿ¸ìĿ´":135227,"ĠволоÑģ":135228,"ëIJĺë©´":135229,"ĠzakÅĤad":135230,"ãģĻãģĵãģ¨":135231,"以ä¸Ĭãģ®":135232,"Ġ×Ķ×ŀ×§×ķ×Ŀ":135233,"ÙħشاÙĩ":135234,"ÙħشاÙĩدة":135235,"Ñĩив":135236,"بش":135237,"ยà¹īาย":135238,"Ġsürdür":135239,"ĠNẵ":135240,"ĠNẵng":135241,"ĠигÑĢаÑĤÑĮ":135242,"Ġê·¸ëŁ¬ë©´":135243,"ãĥķãĥ«":135244,"ลà¹Īะ":135245,"Ġtendrá":135246,"ĠbÃły":135247,"à¹Ģà¸Ľà¹ĩà¸Ļà¸ľà¸¹à¹ī":135248,"Ġoko":135249,"ĠokoÅĤo":135250,"wÅĤa":135251,"wÅĤaÅĽci":135252,"wÅĤaÅĽciw":135253,"æĢĿãĤı":135254,"ĠYaÅŁ":135255,"ĠBá»ĩnh":135256,"íıŃ":135257,"بÙĬد":135258,"קרף":135259,"à¹Ģศร":135260,"à¹Ģศรษ":135261,"à¹Ģศรษà¸IJ":135262,"à¹Ģศรษà¸IJà¸ģิà¸Ī":135263,"ĠاÙĦØ£ÙĪØ±ÙĪ":135264,"ĠاÙĦØ£ÙĪØ±ÙĪØ¨ÙĬ":135265,"fläche":135266,"ä¹ĹãĤĬ":135267,"Ġbá»ģn":135268,"Ùĩب":135269,"æľĢãĤĤ":135270,"Ġsaç":135271,"à¸Ńำà¹Ģà¸ł":135272,"à¸Ńำà¹Ģà¸łà¸Ń":135273,"Ġأج":135274,"ĠاÙĦداخÙĦ":135275,"ĠاÙĦداخÙĦÙĬØ©":135276,"×ĺ×ķ×ij":135277,"ãĤĤãģªãģı":135278,"ĠлиÑĨа":135279,"à¹ģลà¹īวà¸ģà¹ĩ":135280,"×ĸ׼×Ļר":135281,"ĠquÃł":135282,"ĠÙĥذÙĦÙĥ":135283,"صØŃÙģ":135284,"ĠÃĤu":135285,"ÙĪØ¨Ø§":135286,"à¹Ģà¸Ľà¸¥à¸µà¹Īยà¸Ļà¹ģà¸Ľà¸¥":135287,"à¹Ģà¸Ľà¸¥à¸µà¹Īยà¸Ļà¹ģà¸Ľà¸¥à¸ĩ":135288,"à¸ķัวà¸Ńยà¹Īาà¸ĩ":135289,"Ġrápida":135290,"Ġtasar":135291,"Ġtasarım":135292,"ĠعÙĦÙĬÙĩÙħ":135293,"ס×ķ׾":135294,"cılı":135295,"cılık":135296,"ĠرغÙħ":135297,"ìĭľíĤ¤":135298,"Ġ×IJ׾ק":135299,"Ġ×IJ׾ק×ĺר":135300,"Ġ×IJ׾ק×ĺר×ķ׳×Ļ":135301,"à¹ģà¸ļà¹Īà¸ĩ":135302,"Ġhạng":135303,"ãģ£ãģ¦ãģıãĤĮ":135304,"ĠÙĨتÙĬ":135305,"ĠÙĨتÙĬجة":135306,"ıklı":135307,"غاÙĨ":135308,"à¸Ĥà¹īà¸Ńà¸Ħวาม":135309,"à¸Ľà¸¥à¸²à¸¢":135310,"ĠØ£Ùħس":135311,"à¸Ĺีà¹Īà¹Ģà¸ģีà¹Īยว":135312,"à¸Ĺีà¹Īà¹Ģà¸ģีà¹Īยวà¸Ĥ":135313,"à¸Ĺีà¹Īà¹Ģà¸ģีà¹Īยวà¸Ĥà¹īà¸Ńà¸ĩ":135314,"Ġdéfin":135315,"Ġdéfini":135316,"ÙģÙĨاد":135317,"ÙģÙĨادÙĤ":135318,"à¹Ħà¸Ķà¹īวà¹Īา":135319,"ãģªãģĦãĤĪãģĨãģ«":135320,"Ġprópria":135321,"ĠPhát":135322,"ãĤĦãģĻãģı":135323,"สวยà¸ĩาม":135324,"ê³łìļĶ":135325,"ÑıеÑĤ":135326,"ãģĭãĤĤãģĹãĤĮãģ¾ãģĽãĤĵãģĮ":135327,"ترجÙħ":135328,"ĠкÑĢаÑģив":135329,"Ġ×ŀר×IJש":135330,"деж":135331,"ĠÙĬÙĪÙĨ":135332,"ĠÙĬÙĪÙĨÙĬÙĪ":135333,"ÑģкоÑĢ":135334,"ĠKasım":135335,"ê³Ħìķ½":135336,"коÑģ":135337,"ĠнаÑĢÑĥ":135338,"ĠнаÑĢÑĥÑĪен":135339,"Ġduże":135340,"accès":135341,"Ġhá»ĵng":135342,"ĠvÅ©":135343,"ãģĦãģŁãģĹãģ¾ãģĻ":135344,"Ġ×ĺ×Ļ":135345,"Ġ×ĺ×Ļ×ķ׾":135346,"lıkları":135347,"Ġquê":135348,"ëħ¸ëıĻ":135349,"ìķĶ":135350,"CIÃĵN":135351,"Ġtắc":135352,"pressão":135353,"ĠìŀĪìľ¼":135354,"สิà¸Ĺà¸ĺิà¹Į":135355,"íĥĦ":135356,"Ġ×Ķ×ŀ×ŀש׾×Ķ":135357,"å¬īãģĹãģĦ":135358,"ĠÄIJặc":135359,"ÙĨزÙĦ":135360,"ĠдÑĢÑĥгой":135361,"дÑĥÑĤ":135362,"ìĪĻ":135363,"Ġthụ":135364,"à¹Ģสร":135365,"à¹Ģสรà¹ĩ":135366,"à¹Ģสรà¹ĩà¸Ī":135367,"Ġtoplant":135368,"Ġtoplantı":135369,"×IJ×ŀף":135370,"×ķ×ľ×ª":135371,"помн":135372,"ĠyoÄŁun":135373,"ÅĦskiego":135374,"ì°©":135375,"ĠØ«ÙĦاث":135376,"ĠØ«ÙĦاثة":135377,"Ġlắng":135378,"릴":135379,"ราà¸Ĭà¸ģาร":135380,"ĠÑģлова":135381,"á»Ĩ":135382,"à¸Ķีà¸ģวà¹Īา":135383,"ãģĶãģĸãģĦãģ¾ãģĻ":135384,"Ġдиз":135385,"Ġдизайн":135386,"férence":135387,"lıklar":135388,"ãģªãĤĵãģ§ãģĻ":135389,"ajÄħcy":135390,"Ġëĭ¤ìĸij":135391,"Ġëĭ¤ìĸijíķľ":135392,"×§×Ļר":135393,"ØŃار":135394,"สูà¹ī":135395,"Ġzro":135396,"Ġzrobi":135397,"ĠzrobiÄĩ":135398,"×ŀ×Ļ׼×Ķ":135399,"à¸Ĭà¹Īวยà¹Ģหลืà¸Ń":135400,"ĠÑįÑĤÑĥ":135401,"ë´ī":135402,"楽ãģĹãģĦ":135403,"سÙĪØ±":135404,"íķĺê±°ëĤĺ":135405,"ÙħؤتÙħر":135406,"ĠpoczÄħ":135407,"ĠpoczÄħtk":135408,"ĠpoczÄħtku":135409,"ĠعربÙĬ":135410,"اÙĦأر":135411,"اÙĦأردÙĨ":135412,"à¸Ķร":135413,"Åĵuvre":135414,"ĠÙĪÙĥاÙĨت":135415,"ĠÅĽredni":135416,"خضر":135417,"Ġchuyến":135418,"нÑĤ":135419,"ĠìķĮê³ł":135420,"Ġvá»Ŀi":135421,"Ġ×ij×Ļ×ĵ×Ļ":135422,"×ŀ×ĵ×ķ×ijר":135423,"ÙĪÙ쨱":135424,"ÙĬØ¡":135425,"×ł×Ľ×¡":135426,"ĠÐĽÐ°":135427,"лон":135428,"Ġxấu":135429,"ÙģÙĬÙĨ":135430,"Ġfévrier":135431,"ĠÐŀна":135432,"ĠVá»ģ":135433,"ĠÅŁeyler":135434,"ĠполÑĥÑĩен":135435,"зад":135436,"Ġnét":135437,"à¹Ħà¸Ľà¸¢à¸±à¸ĩ":135438,"×Ĺש×ij×ķ":135439,"à¸ļัà¸Ļà¸Ĺ":135440,"à¸ļัà¸Ļà¸Ĺึà¸ģ":135441,"ĠgerçekleÅŁ":135442,"иÑĩеÑģкое":135443,"ìĪĺê°Ģ":135444,"ثبت":135445,"ãģ¤ãģ¾ãĤĬ":135446,"ĠÑĥÑģловиÑıÑħ":135447,"ëĭ¤ê°Ģ":135448,"รายà¹Ħà¸Ķà¹ī":135449,"׼×IJ×ij":135450,"à¹Ĥà¸Ľà¸£à¹Ĥม":135451,"à¹Ĥà¸Ľà¸£à¹Ĥมà¸Ĭัà¹Īà¸Ļ":135452,"jähr":135453,"jährige":135454,"ק׳×Ļ×Ŀ":135455,"×ŀ×ķ×§":135456,"×ŀ×ķ×§×ĵ":135457,"ãģ«è¡Įãģ£ãģ¦":135458,"Ø¢ÙĦ":135459,"ведение":135460,"Ġ×ľ×Ľ×ª×ķ×ij":135461,"جÙħÙĩ":135462,"جÙħÙĩÙĪØ±ÙĬØ©":135463,"à¸īà¸ļ":135464,"à¸īà¸ļัà¸ļ":135465,"ĠCòn":135466,"à¸ľà¸ªà¸¡":135467,"ãģªãģ©ãģĮ":135468,"×IJ×Ķ×ij":135469,"ĠдейÑģÑĤвиÑı":135470,"yız":135471,"à¹Ħมà¹Īà¹Ģà¸Ħย":135472,"جÙĪØ²":135473,"×Ķ×Ĺ׾×ĺ×Ķ":135474,"fällt":135475,"ãĥĵãĤ¸":135476,"ãĥĵãĤ¸ãĥį":135477,"ãĥĵãĤ¸ãĥįãĤ¹":135478,"Ġ×IJ×Ļ׳×Ŀ":135479,"ĠнаÑħодиÑĤÑģÑı":135480,"ĠdziÅĽ":135481,"ستطÙĬع":135482,"׾×Ļף":135483,"Ø®ÙĦاÙģ":135484,"ÙĩÙIJ":135485,"Ġatrás":135486,"íĺģ":135487,"ãĤĴãģĶ":135488,"Ġ×Ķ×ŀ×ķצר":135489,"ĠBakanlıģı":135490,"ÑİÑīее":135491,"ÙħÙĨاط":135492,"ÙħÙĨاطÙĤ":135493,"Ù쨝":135494,"à¸Ļำà¹Ħà¸Ľ":135495,"Ġваж":135496,"Ġважно":135497,"Ġmạch":135498,"׼׳×ķ":135499,"بعث":135500,"lanması":135501,"Ġayr":135502,"Ġayrıl":135503,"ìĤ¬íļĮ":135504,"dÃŃa":135505,"pÅĤyw":135506,"اÙħÙĬØ©":135507,"íĺľ":135508,"×IJ׳×Ĵ׾":135509,"×IJ׳×Ĵ׾×Ļת":135510,"ĠìŀĪëĭ¤ëĬĶ":135511,"Ġساعة":135512,"ĠëĤĺíĥĢ":135513,"bö":135514,"à¸Ħัà¸Ļ":135515,"ĠdziaÅĤania":135516,"Ø©Ùĭ":135517,"ĠngÅ©":135518,"׳צ×Ĺ":135519,"ãģ¯ãģĤãĤĭ":135520,"ĠyaÅŁÄ±nda":135521,"stück":135522,"caracter":135523,"caracterÃŃsticas":135524,"Ġrá»Ńa":135525,"ĠÙħختÙĦÙ쨩":135526,"ãģ«ãģĬãģijãĤĭ":135527,"à¹ģà¸ŀà¸ĩ":135528,"วิà¹Īà¸ĩ":135529,"תפ×ķ":135530,"ساÙĩÙħ":135531,"使ãģĨ":135532,"ÙĥرÙĬ":135533,"×IJפ×Ļ":135534,"...............":135535,"ĠÑĤаким":135536,"×Ļ׼×ķ×Ļ":135537,"شبÙĩ":135538,"جÙĬر":135539,"ãģĿãģ®ãģ¾ãģ¾":135540,"acjÄĻ":135541,"ĠاÙĦترÙĥ":135542,"ĠاÙĦترÙĥÙĬ":135543,"ĠпÑĢавилÑĮно":135544,"ĠتعÙħÙĦ":135545,"à¸ģลà¹īา":135546,"Ġbiên":135547,"Ġ×ij׳×Ļ×Ļת":135548,"ĠклÑĥб":135549,"Ġ×ŀש×Ķ":135550,"вÑĪий":135551,"ãģĵãģ¨ãģĮãģ§ãģįãĤĭ":135552,"à¸ŀัà¸Ļà¸ĺุ":135553,"à¸ŀัà¸Ļà¸ĺุà¹Į":135554,"ר×ķ×Ŀ":135555,"ĠاÙĦÙ쨱ÙĨ":135556,"ĠاÙĦÙ쨱ÙĨسÙĬ":135557,"à¹Ģà¸Ľà¹ĩà¸Ļà¸Ħà¸Ļ":135558,"ãģĹãģ¦ãģĬãĤĬ":135559,"Ġthầy":135560,"ãĤĵãģłãģijãģ©":135561,"ì͍":135562,"ÙħدÙĨ":135563,"تÙĪÙĨ":135564,"ĠмеÑĤал":135565,"ĠмеÑĤалл":135566,"ĠinÃŃcio":135567,"à¸Ńà¸Ńà¸ģà¸Īาà¸ģ":135568,"ëĴ¤":135569,"Ġcuá»ijn":135570,"Ġbuá»Ļc":135571,"ÙĨسÙĬ":135572,"ächt":135573,"×ŀ×Ļ׳×Ļ×Ŀ":135574,"ãģķãģ¦":135575,"ãģĮãģ§ãģį":135576,"ÑĬем":135577,"Ġtái":135578,"ĠЧÑĤ":135579,"ĠЧÑĤобÑĭ":135580,"à¸Ľà¸¥à¸¹à¸ģ":135581,"à¸Ĭุมà¸Ĭà¸Ļ":135582,"нÑģкий":135583,"Ġvững":135584,"Ġ×Ķ׾×ij":135585,"ële":135586,"Ġשע×ijר":135587,"ваÑĤÑĮÑģÑı":135588,"бой":135589,"عÙĪÙĨ":135590,"à¹ģà¸Ķà¸Ļ":135591,"Ġספר×Ļ×Ŀ":135592,"Ġtuyên":135593,"Ġnhiêu":135594,"ĠQuý":135595,"Ġhuyết":135596,"ãĤıãģĭãĤīãģªãģĦ":135597,"Ġ×ŀ׼ף":135598,"Ġ×Ķק׾":135599,"Ġ׾×IJ×ķר":135600,"ĠÄIJiá»ĩn":135601,"شؤ":135602,"شؤÙĪÙĨ":135603,"Ġ×ŀ×Ĺפש":135604,"ĠпоÑģÑĤоÑıнно":135605,"×ŀ×Ļר":135606,"ìħĶ":135607,"ÐŀÑģ":135608,"ÐŀÑģнов":135609,"×ĸ×Ļת":135610,"ĠHá":135611,"ĠÑĩаÑģов":135612,"×IJ×ķ׾×Ļ":135613,"Ġmát":135614,"خرÙĪ":135615,"خرÙĪØ¬":135616,"ÙĤضا":135617,"ÙĤضاÙĬا":135618,"à¹Ģà¸Ľà¸Ńรà¹Į":135619,"ĠÙĬÙĪÙĦ":135620,"ĠÙĬÙĪÙĦÙĬÙĪ":135621,"à¹Ĥà¸Ĺษ":135622,"׳פ׾":135623,"ת×ķש":135624,"ת×ķש×ij×Ļ":135625,"Ġvários":135626,"×ŀר×IJ×Ķ":135627,"ëĿ¼ìĿ´":135628,"ÙĨغ":135629,"×ijצע":135630,"гон":135631,"ĠÄIJược":135632,"عÙı":135633,"пÑĥÑģк":135634,"ĠÙĪØ§ÙĦÙģ":135635,"ücü":135636,"×Ļ×§×Ļ×Ŀ":135637,"ĠسبÙĬÙĦ":135638,"׾×ijף":135639,"ĠاÙĦÙĤرÙĨ":135640,"ס×ķת":135641,"ĠQuáºŃn":135642,"ãģĵãĤĮãģĮ":135643,"ãĥĸãĥ©ãĥ³ãĥī":135644,"×Ĵ×ŀר":135645,"ĠwartoÅĽci":135646,"ĠÙĪØ¨ÙĬÙĨ":135647,"Ġdạ":135648,"ÐIJв":135649,"ÐIJвÑĤо":135650,"Ġolacaktır":135651,"à¸Ļà¸Ĺà¹Į":135652,"Ùħطار":135653,"Ġ×¢×§×ij":135654,"Ġתפ":135655,"ãģĹãģ¦ãģĦãģ¦":135656,"צ×ŀ×Ĺ":135657,"à¸Īà¸Ńà¸ĩ":135658,"Ġöde":135659,"ìį¨":135660,"ÙĨاس":135661,"調ãģ¹":135662,"ĠогÑĢомн":135663,"ë³´íĹĺ":135664,"×ĺ×§":135665,"×ĺקס×ĺ":135666,"ĠbaÅŁv":135667,"ĠbaÅŁvuru":135668,"Ġpomys":135669,"ĠpomysÅĤ":135670,"ãģ«ä¹Ĺ":135671,"Ġש׼ף":135672,"ĠاÙĦÙħسؤÙĪÙĦ":135673,"Ġзан":135674,"ĠзанÑıÑĤ":135675,"Ġdương":135676,"ãĥĹãĥ¬ãĤ¤":135677,"ลà¸ļ":135678,"ÑĤика":135679,"ĠAralık":135680,"Ġнедо":135681,"Ġmá»Ļ":135682,"Ġoran":135683,"Ġoranı":135684,"Ġktór":135685,"ĠktórÄħ":135686,"Ġ×Ķ×IJ×Ĺר×ķ׳×ķת":135687,"ائÙĨ":135688,"ÅĦs":135689,"ÅĦska":135690,"åĽ½ãģ®":135691,"×ŀ×ĺ×Ļ":135692,"ĠвопÑĢоÑģÑĭ":135693,"à¸Ńà¸ĩà¸Ħà¹Įà¸ģร":135694,"×ŀ×ķצ×IJ":135695,"Ġpóź":135696,"Ġpóźniej":135697,"ש×ŀ×IJ׾":135698,"Ġkaps":135699,"Ġkapsam":135700,"Ġkapsamında":135701,"Ġmáquina":135702,"ĠÅĽwiecie":135703,"ĠhoÃłng":135704,"Ġözgü":135705,"×Ĵ×ķר×Ŀ":135706,"ãģĤãģŁãĤĬ":135707,"à¸ķัà¸Ķสิà¸Ļ":135708,"à¸ķัà¸Ķสิà¸Ļà¹ĥà¸Ī":135709,"бÑĢи":135710,"ãģ«ãģªãĤĭãģ¨":135711,"تÙĥÙĪÙĨ":135712,"Ġ×ķ×Ķ×Ļ×IJ":135713,"Ġchiếu":135714,"ÑģÑĤанав":135715,"ÑģÑĤанавли":135716,"ÑģÑĤанавлива":135717,"×ŀ×ķ×Ĵ":135718,"cité":135719,"ĠKörper":135720,"Ġש×Ĵ×Ŀ":135721,"عظ":135722,"عظÙĬÙħ":135723,"Ġ×Ķ×IJ×Ļש×Ļ":135724,"Ġmatière":135725,"ĠÙģÙĪÙĤ":135726,"Ġkto":135727,"ĠktoÅĽ":135728,"à¸Ļà¹Ĥย":135729,"à¸Ļà¹Ĥยà¸ļาย":135730,"å¾ħãģ¡":135731,"à¹Ģมà¸Ļ":135732,"à¹Ģมà¸Ļู":135733,"AÃĩÃĥO":135734,"Ġtù":135735,"Ġtùy":135736,"ãĥĪãĥ³":135737,"ĠоÑĤказ":135738,"Ġ×ŀ×ķצר":135739,"ülü":135740,"ãģķãĤĵãģ«":135741,"Ġ×Ĺ×ķ×ij":135742,"קר×Ļ×IJ×Ķ":135743,"ĠاÙĦخدÙħات":135744,"ĠÙĦÙħدة":135745,"رؤ":135746,"رؤÙĬØ©":135747,"ãĤĴè¦ĭãģ¤ãģij":135748,"à¸Łà¸²":135749,"Ġréussi":135750,"à¸Ļัà¸ģà¹Ģรียà¸Ļ":135751,"ĠÑĩиÑģл":135752,"à¸ģารà¹Ģลà¹Īà¸Ļ":135753,"Ġhazırl":135754,"Ġhazırlan":135755,"ĠпеÑĢвÑĭй":135756,"лим":135757,"ĠоÑĤзÑĭвÑĭ":135758,"ĠwyjÄħ":135759,"ĠwyjÄħtk":135760,"ĠØ£ÙĤÙĦ":135761,"ס×ļ":135762,"Ġê²°ìłķ":135763,"Ġ׾×ŀעש×Ķ":135764,"Ġlắp":135765,"à¹ģà¸ļร":135766,"à¹ģà¸ļรà¸Ļà¸Ķà¹Į":135767,"วà¹Īาà¹Ģà¸Ľà¹ĩà¸Ļ":135768,"Ġبدا":135769,"ĠبداÙĬØ©":135770,"ãģ¨ãģĦãģĨãģ®ãģĮ":135771,"иÑĩеÑģким":135772,"à¸ģารà¸ŀัà¸Ĵà¸Ļา":135773,"ĠbÃło":135774,"ĠmiaÅĤa":135775,"ywaÄĩ":135776,"ĠMärz":135777,"ĠÙĨسبة":135778,"Ġéconomique":135779,"×ĸ×ŀ":135780,"×ĸ×ŀ׳×Ļ×Ŀ":135781,"æŃ¢ãĤģ":135782,"Ġtá»§":135783,"íķĺìĭł":135784,"Ġkażdego":135785,"straÃŁe":135786,"à¸Ĭีà¹ī":135787,"à¹Ģà¸ļา":135788,"ÑĢеÑģÑĥÑĢÑģ":135789,"евой":135790,"شباب":135791,"à¸ķà¹Īาà¸ĩà¸Ľà¸£à¸°à¹Ģà¸Ĺศ":135792,"Ġ×IJ×Ļש":135793,"Ġ×IJ×Ļש×Ļת":135794,"×Ļ×ķפ":135795,"×Ļ×ķפ×Ļ":135796,"ĠìļĶ구":135797,"ì¡°ìĤ¬":135798,"ãģ£ãģŁãĤī":135799,"׾×Ļ×§":135800,"миниÑģÑĤÑĢ":135801,"ãĤĤãģ®ãģ¯":135802,"Ġlương":135803,"Ġнаи":135804,"Ġнаибол":135805,"Ġнаиболее":135806,"íİĺ":135807,"à¹ģà¸ŀà¹ī":135808,"ãĤŃãĥ¥":135809,"ĠкоÑĤоÑĢÑĭм":135810,"à¹ģà¸Ĺà¸ĩ":135811,"à¹ģà¸Ĺà¸ĩà¸ļà¸Ńล":135812,"Ġ׳×Ļ×Ķ":135813,"Ġ׳×Ļ×Ķ×ķ׾":135814,"âĤª":135815,"ĠGiải":135816,"ĠиÑģполÑĮзова":135817,"ëł¥ìĿĦ":135818,"ãģĹãģĭãĤĤ":135819,"à¸ģà¹ĩà¸ķà¹īà¸Ńà¸ĩ":135820,"ĠÑĢеб":135821,"ĠÑĢебен":135822,"ĠÑĢебенка":135823,"تÙĪØ§ØµÙĦ":135824,"ãĤ°ãĥ«ãĥ¼ãĥĹ":135825,"ãĤĦãĤī":135826,"à¹Ģà¸Ľà¸´à¸Ķà¸ķัว":135827,"бÑĢо":135828,"ë°ĸìĹIJ":135829,"ÙĨÙİØ§":135830,"×Ķ×Ĵ":135831,"×Ķ×Ĵ׳×Ķ":135832,"à¸Ĺรั":135833,"à¸Ĺรัà¸ŀ":135834,"à¸Ĺรัà¸ŀยà¹Į":135835,"Ġkhá»iji":135836,"עצ×ŀ×ķ":135837,"болезн":135838,"Ġë°ĽìķĦ":135839,"มà¸Ļ":135840,"มà¸Ļุ":135841,"มà¸Ļุษ":135842,"มà¸Ļุษยà¹Į":135843,"âĹĨ":135844,"×ŀצ׾×Ļ×Ĺ":135845,"Ñıвление":135846,"ÙħØ·ÙĦ":135847,"ÙħØ·ÙĦÙĪØ¨":135848,"خاÙĦÙģ":135849,"تÙĪÙĤÙģ":135850,"ãģ§ãģįãģ¾ãģĽãĤĵ":135851,"оÑģÑĤей":135852,"меÑĩа":135853,"기ëĬĶ":135854,"תשע":135855,"صÙĬب":135856,"Ġ×ij×¢×ķ×ĵ":135857,"à¸Ĥà¸Ńà¸ĩà¹Ģà¸Ĥา":135858,"ÑĤÑıж":135859,"ĠÑĥпÑĢав":135860,"ĠÑĥпÑĢавлениÑı":135861,"Ġgénér":135862,"ĠthÃŃ":135863,"פ×ļ":135864,"ĠرÙħض":135865,"ĠرÙħضاÙĨ":135866,"Ġtruyá»ĩn":135867,"إعداد":135868,"ãĤµãĥĿãĥ¼ãĥĪ":135869,"Ġполно":135870,"خاÙħ":135871,"ÐŁÐµÑĤ":135872,"ÐŁÐµÑĤеÑĢ":135873,"ÐŁÐµÑĤеÑĢбÑĥÑĢ":135874,"ÐŁÐµÑĤеÑĢбÑĥÑĢг":135875,"ÙħÙĨتدÙī":135876,"ãģķãĤĮãģ¾ãģĹãģŁ":135877,"ĠëĮĢíķĺìŬ":135878,"à¸ľà¸¹à¹īà¸Ĺีà¹Ī":135879,"Ġ×ŀ×IJ×ķ":135880,"׾׳×ĵ":135881,"оÑĩнÑĭе":135882,"ĠнаÑĩала":135883,"Ġ׾×Ļ׾×ĵ×Ļ×Ŀ":135884,"овое":135885,"ãģĻãĤĭãģĵãģ¨ãģ§":135886,"ĠاÙĦÙĨÙģ":135887,"ĠاÙĦÙĨÙ쨷":135888,"ìŀĪëĬĶ":135889,"غÙĨÙĬ":135890,"פ×ĵ":135891,"ãĤ¾":135892,"ĠCré":135893,"ãģ©ãģ¡ãĤī":135894,"ثاÙĨ":135895,"ÑĢабаÑĤ":135896,"ÑĢабаÑĤÑĭва":135897,"Ġê°Ļëĭ¤":135898,"à¸Īั":135899,"à¸Īัà¸ģร":135900,"Ġchụ":135901,"Ġchụp":135902,"ĠмаÑģÑĤ":135903,"ĠмаÑģÑĤеÑĢ":135904,"Ġnắm":135905,"ĠÑģÑĤали":135906,"Ġ×Ķ×IJ×Ļר×ķ×¢":135907,"ãĤ½ãĥ³":135908,"åĪĨãģĭãĤĬ":135909,"طبع":135910,"بدا":135911,"gráfico":135912,"геÑĢ":135913,"à¸Ķำà¹Ģà¸Ļิà¸Ļà¸ģาร":135914,"Ġsaldır":135915,"Ġsaldırı":135916,"вÑĪиÑħ":135917,"ãģĭãģ£ãģŁãģ§ãģĻ":135918,"Ġyapıyor":135919,"ĠاÙĦÙģØª":135920,"צרפת":135921,"здоÑĢов":135922,"×ij×¢×ľ":135923,"Ġ×IJ×ŀ×Ļת×Ļ":135924,"ĠобÑĭ":135925,"ĠобÑĭÑĩ":135926,"ĠобÑĭÑĩно":135927,"Ġ׾×ķ×ŀר":135928,"تÙĥÙĨ":135929,"تÙĥÙĨÙĪÙĦÙĪØ¬":135930,"تÙĥÙĨÙĪÙĦÙĪØ¬ÙĬا":135931,"Ġhakkı":135932,"ĠÑĢав":135933,"ĠÑĢавно":135934,"رÙĬÙĥ":135935,"Ġ×ij×ŀ×Ļ×ĵ":135936,"Ġ×ij×ŀ×Ļ×ĵ×Ķ":135937,"à¹ģà¸ģà¹īว":135938,"Ġìĸĺ":135939,"Ġìĸĺ기":135940,"ãģĹãģ¦ãģĦãģ¾ãģĹãģŁ":135941,"Ġkısm":135942,"Ġkısmı":135943,"걸":135944,"åĨħãģ®":135945,"ì§ķ":135946,"à¹Ģหมืà¸Ńà¸Ļà¸ģัà¸Ļ":135947,"ĠÙģÙIJ":135948,"ĠÙģÙIJÙĬ":135949,"ÙĤاعدة":135950,"Ġmożesz":135951,"ÙħصاÙĦ":135952,"ÙħصاÙĦØŃ":135953,"ãģ¾ãģŁãģ¯":135954,"бег":135955,"Ġsıc":135956,"Ġsıcak":135957,"ÑĩиÑģ":135958,"ÑĩиÑģлен":135959,"Ġног":135960,"ãĥģãĥ£ãĥ³":135961,"ãĥ«ãĥī":135962,"Ġgió":135963,"Ġsını":135964,"Ġsınıf":135965,"иваÑĤÑĮ":135966,"Ġquên":135967,"Ġìłģ":135968,"Ġìłģìļ©":135969,"ĠJoão":135970,"ÙģØ§Ø¯":135971,"ĠGlück":135972,"à¸Ĺà¸Ńà¸Ķ":135973,"Ġgói":135974,"ï¼Ĭ":135975,"Ġdétail":135976,"ĠدÙĬسÙħ":135977,"ĠدÙĬسÙħبر":135978,"ë¡ľìĦľ":135979,"×ŀ×ķ×Ĺ":135980,"à¹Ħฮ":135981,"ĠоÑĤд":135982,"ĠоÑĤдÑĭÑħ":135983,"Ġkhuyến":135984,"à¸Ħà¸Ńย":135985,"ĠجÙĨÙĬ":135986,"ĠجÙĨÙĬÙĩ":135987,"ĠاÙĦدÙģØ§Ø¹":135988,"à¸Ļà¹īำหà¸Ļัà¸ģ":135989,"ĠìĤ¬ëŀĮëĵ¤ìĿ´":135990,"Ġthừa":135991,"ĠÃ¶ÄŁrenci":135992,"ĠпомоÑīи":135993,"ĠczÄĻÅĽÄĩ":135994,"ש×ĺר":135995,"ĠNhi":135996,"ĠNhiá»ģu":135997,"׳צ×Ļ":135998,"ĠнаÑĪем":135999,"ĠkarÅŁÄ±laÅŁ":136000,"Ġ×Ķש׳×Ļ×Ŀ":136001,"ĠÄIJưá»Ŀng":136002,"Ġtrú":136003,"ĠÑĢазлиÑĩнÑĭÑħ":136004,"ĠاÙĦØ´Ùĩر":136005,"Ġ×ľ×¢×ķ׾×Ŀ":136006,"ØŃجر":136007,"ĠÄijá»ķ":136008,"ĠìĿĺíķ´":136009,"à¸ļà¹Īà¸Ńย":136010,"Ġ×Ķ×Ļ׾×ĵ":136011,"ãģ¨ãģªãģ£ãģŁ":136012,"Ġ×Ĺ×ķ×ķת":136013,"Ġש×Ļר×ķת×Ļ":136014,"Äħcy":136015,"سرÙĬ":136016,"Kİ":136017,"פ׳×ķ":136018,"ÑģÑĤÑĢÑĥкÑĤÑĥÑĢ":136019,"ÑĤÑĢÑĥд":136020,"Ġ×Ķקר":136021,"Ġ×Ķקר×ķ×ij":136022,"ĠtháºŃm":136023,"èģŀãģį":136024,"ÙĤÙĪÙĬ":136025,"клÑİÑĩен":136026,"ÑĤеÑħ":136027,"ÑĤеÑħнолог":136028,"è¡Įãģ£ãģŁ":136029,"Ġ×ķ×IJ×Ļף":136030,"ĠÅŁeklin":136031,"ĠÅŁeklinde":136032,"rô":136033,"ÑĢог":136034,"ĠновÑĭе":136035,"Ġס×ij×Ļ×ij":136036,"ĠtecnologÃŃa":136037,"×¡×Ľ":136038,"×¡×Ľ×ķ×Ŀ":136039,"ĠÅŀub":136040,"ĠÅŀubat":136041,"Ġ×Ķ×ŀ׾×IJ":136042,"Ġwypos":136043,"Ġwyposaż":136044,"ãģ¯ä½ķ":136045,"ãĤ¬ãĥ³":136046,"ê°ĸ":136047,"Ġкакие":136048,"Ġçocuklar":136049,"Ġ׾צ×ĵ":136050,"Ġkayıt":136051,"ĠмеÑģÑĤе":136052,"ÙħدÙĬÙĨØ©":136053,"Ġ׼×Ĵ":136054,"Ġ׼×Ĵ×ķף":136055,"ãģĹãģ¦ãĤĭ":136056,"ĠÙħاÙĬÙĪ":136057,"ãģ£ãģ¦ãģĹãģ¾ãģ£ãģŁ":136058,"ĠпÑĢогÑĢаммÑĭ":136059,"à¹ģลà¸Ļà¸Ķà¹Į":136060,"ãĥ¯ãĤ¤":136061,"ער×ķ×¥":136062,"Ñģид":136063,"ĠBöyle":136064,"Ġì²ĺìĿĮ":136065,"Ġתפק×Ļ×ĵ":136066,"ĠTrên":136067,"íĥĪ":136068,"ĠÐłÐ¾ÑģÑģий":136069,"ĠÐłÐ¾ÑģÑģийÑģкой":136070,"ĠsÃłn":136071,"Ġrègle":136072,"ĠyaklaÅŁÄ±k":136073,"à¹Ģลิà¸ģ":136074,"ĠدائÙħ":136075,"Ġ×ķ×Ĵ":136076,"ابر":136077,"Ġbè":136078,"ĠاÙĦÙĤدÙħ":136079,"ĠÑĢеÑĪениÑı":136080,"hiên":136081,"ÑĤик":136082,"ÄĦ":136083,"à¸ļรรยาà¸ģ":136084,"à¸ļรรยาà¸ģาศ":136085,"רצ×ķף":136086,"åĭķãģį":136087,"ĠGäste":136088,"Ġ기본":136089,"ĠÙĬعرÙģ":136090,"ĠSá»Ń":136091,"gÅĤÄĻb":136092,"à¹Ģà¸Ńส":136093,"×IJ×ŀ×Ļף":136094,"ĠпÑĥнк":136095,"ĠпÑĥнкÑĤ":136096,"Ġ×Ļ×ķ×ĵ×¢×Ļ×Ŀ":136097,"ãĤ«ãĥ©ãĥ¼":136098,"Ġ×ijס×ĵר":136099,"Ġbuá»ĵn":136100,"йÑĤ":136101,"йÑĤеÑģÑĮ":136102,"ãĤĴæ±ĤãĤģ":136103,"Ġ×IJ×ª×Ľ×Ŀ":136104,"Ġ모르":136105,"ظرÙĪÙģ":136106,"ÑĩеÑģÑĤво":136107,"ìĸ´ìĦľ":136108,"Ġодна":136109,"Ġkapı":136110,"Ġëħ¸ëł¥":136111,"ĠKüche":136112,"ĠاÙĦتش":136113,"Ø·ÙĬب":136114,"ĠíĬ¹íŀĪ":136115,"ĠвÑĭпÑĥÑģ":136116,"ĠвÑĭпÑĥÑģк":136117,"×ĵת×Ļ":136118,"ĠuÄŁ":136119,"ĠuÄŁra":136120,"ائÙĩا":136121,"Ġthoát":136122,"ãģªãĤĤãģ®":136123,"ÑijÑĢ":136124,"기ê°Ģ":136125,"ĠgeliÅŁme":136126,"تØŃÙĤ":136127,"تØŃÙĤÙĤ":136128,"ĠопаÑģ":136129,"бÑĢоÑģ":136130,"หุ":136131,"หุà¹īà¸Ļ":136132,"ì¼Ģ":136133,"ãĤ¹ãĥŀ":136134,"ãĤ¹ãĥŀãĥĽ":136135,"Ø£Ù쨱":136136,"Ø£ÙģØ±Ø§Ø¯":136137,"ĠThá»±c":136138,"Ġthắ":136139,"ãĥªãĥ³ãĤ¯":136140,"Ġniá»ģm":136141,"ĠHöhe":136142,"عÙħار":136143,"ÙĥÙĪØ±ÙĪÙĨ":136144,"ÙĥÙĪØ±ÙĪÙĨا":136145,"ĠÄIJến":136146,"ĠÑģамом":136147,"ĠÑĤеле":136148,"ĠÄijoán":136149,"à¸Ħวามà¸Ħิà¸Ķà¹Ģหà¹ĩà¸Ļ":136150,"ĠдиÑģк":136151,"أطÙ쨧ÙĦ":136152,"มารà¹Į":136153,"à¸Ĺหาร":136154,"à¸Ĺà¸Ļ":136155,"ĠبعÙĬد":136156,"ĠاÙĦÙĩÙĨد":136157,"åĩºãģĹãģ¦":136158,"Ġkarde":136159,"ĠkardeÅŁ":136160,"×Ķ×Ļס×ĺ×ķר":136161,"×Ķ×Ļס×ĺ×ķר×Ļ×Ķ":136162,"éģ¸ãģ³":136163,"عاÙħÙĦ":136164,"à¸Ĥยาย":136165,"Ġtürl":136166,"Ġtürlü":136167,"ĠìĿ¼ìĿ´":136168,"Ġmatéria":136169,"Ġ׼׾×ķ×ŀר":136170,"ãĥģãĥ£ãĥ¼":136171,"جÙħاعة":136172,"ĠÑģвоим":136173,"Ø¥ÙĤاÙħØ©":136174,"ä¾ĭãģĪãģ°":136175,"ساب":136176,"آخر":136177,"ÙĤدÙĬر":136178,"×IJ×ŀ×Ļ":136179,"ìĸ»":136180,"Ġ׳×ķספת":136181,"ĠÐĴлад":136182,"ĠÐĴладим":136183,"ĠÐĴладимиÑĢ":136184,"Ġestará":136185,"ãģĵãģĨãģĦãģĨ":136186,"ãĤĴ使ç͍":136187,"มาà¸ķร":136188,"มาà¸ķรà¸IJาà¸Ļ":136189,"ãģ£ãģ½":136190,"Ġnú":136191,"Ġnúi":136192,"ยาà¸ĩ":136193,"ĠاÙĦجÙĨس":136194,"Ġüstün":136195,"ëľ»":136196,"ãĤ»ãĥ«":136197,"ãģ¦ãģĦãģįãģ¾ãģĻ":136198,"Ġ×Ĺ×ķ×ĸ":136199,"Ġ×Ĺ×ķ×ĸר":136200,"ĠÐĵлав":136201,"à¹Ĥà¸Ĭà¸Ħ":136202,"íıIJ":136203,"ÙĨتظر":136204,"Ġ×Ĵ×ij×Ļ":136205,"عÙĤب":136206,"intér":136207,"intérêt":136208,"×ŀפ×Ĵ":136209,"×ŀפ×Ĵש":136210,"Ġthù":136211,"اÙģØª":136212,"Ġ×ŀשפ":136213,"Ġ×ŀשפ×ĺ×Ļ":136214,"ĠÙħÙĪØ§ÙĤع":136215,"è¦ļ":136216,"è¦ļãģĪ":136217,"×ĵ×Ļף":136218,"à¹Ģรืà¹Īà¸Ńà¸ĩราว":136219,"ãģ¾ãģĤ":136220,"Ġghế":136221,"иÑĢÑĥÑİÑĤ":136222,"à¸ģว":136223,"à¸ģวà¹īาà¸ĩ":136224,"ĠповеÑĢ":136225,"ĠповеÑĢÑħ":136226,"ĠповеÑĢÑħноÑģÑĤ":136227,"׳×ĵר":136228,"ĠконÑĨе":136229,"Ġдолжна":136230,"Ġ×Ļש×Ļר":136231,"acaģız":136232,"ìĹĶ":136233,"ĠnÃŃvel":136234,"Ġör":136235,"Ġörnek":136236,"ÙĥÙģ":136237,"ĠФедеÑĢаÑĨии":136238,"Ġ구ìĦ±":136239,"หัวà¹ĥà¸Ī":136240,"ĠVáºŃy":136241,"мед":136242,"меди":136243,"медиÑĨин":136244,"медиÑĨинÑģк":136245,"ازÙĬ":136246,"×Ĵ×ij×ķ׾":136247,"ÑĦÑĢ":136248,"Ġzusätzlich":136249,"à¸ģà¸ģ":136250,"ĠاÙĦاÙĤتصادÙĬØ©":136251,"Ġhè":136252,"luÄŁun":136253,"جÙİ":136254,"à¹Ħà¸Łà¸¥à¹Į":136255,"ÄIJT":136256,"ãģĿãģ®ä»ĸ":136257,"à¸Ĺิà¹īà¸ĩ":136258,"ĠاÙĦØ£ÙĪ":136259,"رسÙħ":136260,"æ°Ĺãģ¥":136261,"ìĿ´ë©°":136262,"ÑĮев":136263,"صط":136264,"ĠاÙĦاستث":136265,"ĠاÙĦاستثÙħار":136266,"à¸Ńาà¸Ħาร":136267,"ĠÑĤоÑĩно":136268,"ĠVân":136269,"à¸Ńร":136270,"à¸Ńรà¹Īà¸Ńย":136271,"ĠاÙĦسÙĨØ©":136272,"ĠcÆ°á»Ľi":136273,"×Ļ×Ķף":136274,"íį¼":136275,"話ãģĹ":136276,"âĹĭ":136277,"ĠìķĬìĿĢ":136278,"ãĥ¡ãĥ¼ãĤ":136279,"ãĥ¡ãĥ¼ãĤ«":136280,"ãĥ¡ãĥ¼ãĤ«ãĥ¼":136281,"ĠÑĤепло":136282,"å½¼ãĤī":136283,"Ġİz":136284,"Ġİzmir":136285,"íĻį":136286,"Ġrượ":136287,"Ġrượu":136288,"æĢĿãģĦåĩº":136289,"ĠPhạm":136290,"Ġcháu":136291,"צ×Ļ×ķת":136292,"ĠìĿ¼ë³¸":136293,"ìĤ¬ëĬĶ":136294,"ĠÑģоздан":136295,"Ġaracı":136296,"Ġער":136297,"Ġער×Ļ׼×Ķ":136298,"ĠíķĺëĤĺëĭĺìĿĺ":136299,"dziÅĤ":136300,"à¸Ľà¸£à¸°à¸ĺาà¸Ļ":136301,"ĠserÃŃa":136302,"ĠìŀĪëıĦë¡Ŀ":136303,"درج":136304,"íķľëĭ¤ëĬĶ":136305,"à¸Ńาà¸Ĺ":136306,"à¸Ńาà¸Ĺิà¸ķ":136307,"à¸Ńาà¸Ĺิà¸ķยà¹Į":136308,"ÑĤелÑĮнÑĭй":136309,"ĠخدÙħات":136310,"×ŀ׳×ĺ":136311,"Ġlược":136312,"ĠSÃłi":136313,"ĠÙĪØ§Ø¶":136314,"ĠÙĪØ§Ø¶ØŃ":136315,"غاز":136316,"ĠdoÄŁal":136317,"Ġ×ijש×Ŀ":136318,"Ġдлин":136319,"Ġإطار":136320,"Ġ×ijספר":136321,"ãĤĴä¸İ":136322,"ãĤĴä¸İãģĪ":136323,"Ġë²ķë¥ł":136324,"ĠÑĥвели":136325,"ĠÑĥвелиÑĩи":136326,"สà¹Ħà¸ķ":136327,"สà¹Ħà¸ķลà¹Į":136328,"à¹Ħà¸ģล":136329,"×ij×Ĺף":136330,"ĠìĿ´íĽĦ":136331,"Ġmunic":136332,"ĠmunicÃŃpio":136333,"تÙħØ«ÙĦ":136334,"ĠÄijáo":136335,"Hôtel":136336,"Ġlá»Ńa":136337,"ĠÄijẳng":136338,"Ñĩки":136339,"شرÙĪ":136340,"شرÙĪØ·":136341,"ĠìĿ´ë¥¼":136342,"ÙĬÙĭا":136343,"×ŀ׾×ļ":136344,"×ŀ×Ķ×Ļר×ķת":136345,"ĠобÑıзаÑĤелÑĮ":136346,"ĠобÑıзаÑĤелÑĮно":136347,"énergie":136348,"Ġmudança":136349,"Ġmụ":136350,"Ġmụn":136351,"Ġnº":136352,"ĠاÙĦتعا":136353,"ĠاÙĦتعاÙĪÙĨ":136354,"ĠاÙĦاجتÙħاعÙĬØ©":136355,"ĠплаÑģÑĤ":136356,"Ġëĵ±ìĿĺ":136357,"ãĥIJãĤ¤ãĤ¯":136358,"ÙĩجÙĪÙħ":136359,"ĠSaúde":136360,"Ġì¤ijìļĶíķľ":136361,"Ġ×Ķצ×Ļ×ij×ķר":136362,"×ª×§×Ł":136363,"ĠاÙĦعاÙĦÙħÙĬ":136364,"ĠболÑĮÑĪой":136365,"ĠÙĥÙĦÙħ":136366,"ĠÙĥÙĦÙħØ©":136367,"ãģ®ãģ§ãģ¯ãģªãģĦãģ§ãģĹãĤĩãģĨãģĭ":136368,"ĠÙħباراة":136369,"Ġש×IJ׳":136370,"Ġש×IJ׳×Ĺ׳×ķ":136371,"ãĤ¹ãĤ¿ãĤ¤ãĥ«":136372,"ĠSaÄŁ":136373,"ĠSaÄŁlık":136374,"Ġhư":136375,"׳×Ĺ×Ķ":136376,"Ġ×ijקר×ij":136377,"طعÙħ":136378,"หิà¸Ļ":136379,"à¸Ĺุà¸ģวัà¸Ļ":136380,"à¸Ħรัà¹īà¸ĩà¸Ĺีà¹Ī":136381,"ĠlÃłnh":136382,"Ġdonné":136383,"ãģĽãģĦ":136384,"جزÙĬرة":136385,"доÑĢож":136386,"ì¼ľ":136387,"تÙĨظÙĬÙģ":136388,"ãĥģãĥ§":136389,"Ġaldıģı":136390,"جاج":136391,"ĠÑĤомÑĥ":136392,"à¸Ľà¸´":136393,"Ġ×ijרשת":136394,"ãģıãģªãĤĬãģ¾ãģĻ":136395,"ĠпÑĢинÑĨип":136396,"Ġ×Ĺ׾×ķ":136397,"ëı¼":136398,"×ķ×Ĵש":136399,"سس":136400,"à¸Ľà¸¹":136401,"Ġhầu":136402,"æĦŁãģĺãĤĭ":136403,"ï¼´":136404,"دÙĪØ§":136405,"ĠÑģмог":136406,"scrição":136407,"ĠtháºŃn":136408,"Ġר×ķ×IJ×Ķ":136409,"обÑĢажен":136410,"ĠاÙĦتجارÙĬØ©":136411,"طبÙĬع":136412,"jÄħcÄħ":136413,"íĸīìľĦ":136414,"ĠновÑĭй":136415,"Ġ×ŀ×Ĺ×ĵש":136416,"æĮ¯ãĤĬ":136417,"gué":136418,"Ġ×IJ×Ļר×ķ×¢":136419,"Ġ×IJ×Ļר×ķ×¢×Ļ×Ŀ":136420,"ĠاÙĦذÙĩب":136421,"×ĵ×IJ":136422,"تاÙĨ":136423,"ãģłãģĹ":136424,"à¸Ńัà¸ķรา":136425,"à¹Ĥà¸Ī":136426,"بÙĦاد":136427,"×Ķ×Ļ×Ļ׳×ķ":136428,"ĠÑģпе":136429,"ĠÑģпеÑĨиалÑĮно":136430,"ĠÅĽwiata":136431,"ãĤĵãģ§ãģĻãĤĪ":136432,"شرÙĥØ©":136433,"ĠpÅĤyt":136434,"Ġsitué":136435,"Ġ׼×IJ׾×Ķ":136436,"ס×ijר":136437,"Ġkażd":136438,"Ġkażdym":136439,"ãĤĴæĮģãģ¤":136440,"׾×Ķ׾":136441,"׾×Ķ׾ף":136442,"ĠwÅĤas":136443,"ĠwÅĤasne":136444,"ĠsaÄŁlan":136445,"×ŀ×¢×ľ×Ķ":136446,"ĠاÙĦاÙĪÙĦ":136447,"ìĹIJìĦľëıĦ":136448,"×IJ×Ļר×ķפ×Ķ":136449,"تÙĤÙĨÙĬØ©":136450,"Ùħائ":136451,"Ùħائة":136452,"ĠcompañÃŃa":136453,"Ġsürek":136454,"Ġsürekli":136455,"ĠиÑģкÑĥÑģ":136456,"ĠиÑģкÑĥÑģÑģÑĤв":136457,"ĠBürger":136458,"ת×Ĺר":136459,"ת×Ĺר×ķת":136460,"à¸ŀรà¹īà¸Ńมà¸ģัà¸ļ":136461,"Ø´Ùħ":136462,"à¸ĸืà¸Ńวà¹Īา":136463,"è¾¼ãĤĢ":136464,"ä¼ijãģ¿":136465,"ĠاÙĦأب":136466,"ĠÑģÑĤоимоÑģÑĤÑĮ":136467,"ĠпÑĢава":136468,"mayın":136469,"หวย":136470,"ĠاÙĦطبÙĬعÙĬ":136471,"à¸Ĺีà¹Īà¸ŀัà¸ģ":136472,"ĠEstá":136473,"ÑĭваÑİÑĤ":136474,"بسÙĬ":136475,"بسÙĬØ·":136476,"Ġ×ij×¢×ijר":136477,"åı¯èĥ½ãģ§ãģĻ":136478,"Ġ×ĵ×ķ׾":136479,"Ġ×ĵ×ķ׾ר":136480,"ÙĩÙİØ§":136481,"воÑĢоÑĤ":136482,"ãģ¦ãģĦãģ¾ãģĹãģŁ":136483,"à¹Ĥà¸Ĺรศ":136484,"à¹Ĥà¸Ĺรศั":136485,"à¹Ĥà¸Ĺรศัà¸ŀ":136486,"à¹Ĥà¸Ĺรศัà¸ŀà¸Ĺà¹Į":136487,"Ġק׳":136488,"ĠاÙĦØ«ÙĨ":136489,"ĠاÙĦØ«ÙĨائÙĬØ©":136490,"Ġcoût":136491,"à¸ķิà¸Ķà¸ķัà¹īà¸ĩ":136492,"Ġörg":136493,"Ġörgüt":136494,"ĠاÙĦØ®ÙĦÙĬ":136495,"ĠاÙĦØ®ÙĦÙĬج":136496,"Ġbá»įn":136497,"×ķ׾×ķ×Ĵ×Ļ":136498,"ëŀľ":136499,"ĠÐijолÑĮ":136500,"ĠÐijолÑĮÑĪ":136501,"×Ĵ×ijר×Ļ×Ŀ":136502,"ÙĤÙĬد":136503,"×ij×Ļ×ĺ×ķ×Ļ":136504,"æīĵãģ¡":136505,"ĠolmuÅŁ":136506,"fäh":136507,"fähig":136508,"ลาà¸Ļ":136509,"ĠÙĤطر":136510,"שפ×Ķ":136511,"èªŃãĤĵãģ§":136512,"à¸Ĥวา":136513,"Ġchiếm":136514,"ãĤ¤ãĥ³ãĤ¿":136515,"ãĤ¤ãĥ³ãĤ¿ãĥ¼ãĥ":136516,"ãĤ¤ãĥ³ãĤ¿ãĥ¼ãĥį":136517,"ãĤ¤ãĥ³ãĤ¿ãĥ¼ãĥįãĥĥãĥĪ":136518,"Ġ׾ש×ŀ×ķר":136519,"ĠترÙĥ":136520,"ĠترÙĥÙĬا":136521,"ר×ķ×ĺ":136522,"ã썿ĢĿãģĦãģ¾ãģĹãģŁ":136523,"ĠاÙĦتÙĤ":136524,"Ġdư":136525,"ãģ¦ãģıãĤĮãĤĭ":136526,"ãģĹãģŁãģĵãģ¨":136527,"Ġróżne":136528,"ĠاÙĦØ·ÙģÙĦ":136529,"ĠPosté":136530,"Ġ×ŀש×ķ×Ŀ":136531,"ÑįÑĢ":136532,"ĠÑĢабоÑĤаеÑĤ":136533,"ãĤ·ãĥª":136534,"ãĤ·ãĥªãĥ¼ãĤº":136535,"Ġ×ij×Ķ×Ĺ׾×ĺ":136536,"×§×Ķ×Ļ׾×Ķ":136537,"ãĤ«ãĥ¡":136538,"ãĤ«ãĥ¡ãĥ©":136539,"O":136540,"ĠìĤ¬ìĿ´":136541,"Ġkì":136542,"ĠthÆ°á»Ľc":136543,"ضبط":136544,"ÙĤبÙĪÙĦ":136545,"åĪ¥ãģ®":136546,"Ġparticulière":136547,"ĠÑģвоем":136548,"Ġעסק":136549,"Ġעסק×Ļ×Ŀ":136550,"×ij×Ĺ×Ļר×ķת":136551,"×ij×Ļ׳×ķ":136552,"à¸ĭà¸Ń":136553,"Ġ×¢×ķ×ijר":136554,"ãģłãģ£ãģŁãģ®ãģ§":136555,"ıldıģı":136556,"Ùħدار":136557,"Ùħدارس":136558,"주ìĭľ":136559,"à¸Ńาศ":136560,"à¸Ńาศัย":136561,"Ġtấm":136562,"à¸ŀิà¸Ī":136563,"à¸ŀิà¸Īาร":136564,"à¸ŀิà¸Īารà¸ĵา":136565,"ÑĤелÑĮнÑĭе":136566,"ÑģкÑĥÑİ":136567,"ÐľÐĺ":136568,"à¹Ģà¸ģา":136569,"à¹Ģà¸ģาหล":136570,"à¹Ģà¸ģาหลี":136571,"×ĵ×Ĺ":136572,"à¹Ģà¸Ĭิà¸ĩ":136573,"ĠدÙĤÙĬÙĤØ©":136574,"íķĻìĥĿ":136575,"Ġש×IJ׾×Ķ":136576,"Ġcontrôle":136577,"Ġsituação":136578,"à¸Ĥà¸Ńà¸ĩà¸ľà¸¹à¹ī":136579,"ÙĨØ·ÙĤ":136580,"ê³¼íķĻ":136581,"หลายà¸Ħà¸Ļ":136582,"Ġnắng":136583,"ÙĤÙı":136584,"ì¡°ê±´":136585,"Ñķ":136586,"ãĥĥãģ¨":136587,"×ŀ×Ļ׾×Ķ":136588,"Grün":136589,"×Ļ×Ļ×¢":136590,"×Ļ×Ļ×¢×ķ×¥":136591,"×ŀ׳׼":136592,"ëŃIJ":136593,"×ŀ×¢×ŀ×ĵ":136594,"สำà¸Ļัà¸ģ":136595,"جدد":136596,"à¸Ħัà¸Ķ":136597,"Ġ×Ķ×ŀשפ":136598,"Ġ×Ķ×ŀשפ×Ĺ×Ķ":136599,"×ŀשק׾":136600,"ÙĦÙı":136601,"Ġtytu":136602,"ĠtytuÅĤ":136603,"ÑĪей":136604,"ĠìĿ¼ë¶Ģ":136605,"ÑĪение":136606,"Ġphóng":136607,"ĠìĹŃìĤ¬":136608,"ãĤ«ãĥ³":136609,"Ġtúi":136610,"ĠÙĨÙĪÙģ":136611,"ĠÙĨÙĪÙģÙħبر":136612,"grün":136613,"ĠاÙĦØ´ÙħاÙĦ":136614,"ÅĽwiadc":136615,"ÅĽwiadczenie":136616,"ער×Ķ":136617,"Ġ×¢×ķ×ij":136618,"Ġ×¢×ķ×ij×ĵ×Ļ×Ŀ":136619,"×ĵ×ķ×Ĵ×ŀ×IJ":136620,"ä»Ĭãģ¯":136621,"Ġvão":136622,"ĠТем":136623,"ÑģилÑĮ":136624,"Ġchợ":136625,"Ùħرا":136626,"ÙħراÙĤب":136627,"à¹Ħมà¹Īรูà¹ī":136628,"Ġرائع":136629,"×IJ׳×Ĺ׳×ķ":136630,"สà¹Īà¸ĩà¹Ģสริม":136631,"צ×Ĺ":136632,"ĠìŀĪìĸ´ìĦľ":136633,"Ġkurulu":136634,"ĠkuruluÅŁ":136635,"ĠÃĸzellik":136636,"ĠÃĸzellikle":136637,"Ġת×Ļ×§":136638,"Ġghé":136639,"ĠsprzÄĻ":136640,"ĠsprzÄĻt":136641,"ער×ķת":136642,"راØŃØ©":136643,"ãģ£ãģį":136644,"ãģ£ãģįãĤĬ":136645,"ĠìķĦëŀĺ":136646,"stituição":136647,"Ġдолжно":136648,"×Ķרש":136649,"×Ķרש×ŀ×Ķ":136650,"×Ķ׾×ļ":136651,"ãģ¡ãģª":136652,"ãģ¡ãģªãģ¿":136653,"ãģ¡ãģªãģ¿ãģ«":136654,"פ×Ĺ×ĵ":136655,"ĠاÙĦجÙħÙĬع":136656,"×ij×¢×ľ×Ļ":136657,"Ġtrùng":136658,"Ġפת×Ĺ":136659,"×ŀ׾×Ĺ×ŀת":136660,"ãĥĨãĥ¼ãĥ":136661,"ãĥĨãĥ¼ãĥŀ":136662,"Ùħتاب":136663,"Ùħتابعة":136664,"Ġ모ìĬµ":136665,"ÙĬص":136666,"åIJĪãģĨ":136667,"ĠYap":136668,"ĠYapı":136669,"ĠÑģказаÑĤÑĮ":136670,"몰":136671,"à¸Ĺีà¹Īสำà¸Ħัà¸į":136672,"ĠìĹĨìĬµëĭĪëĭ¤":136673,"Ġnhắc":136674,"Ġülkeler":136675,"Ġмногие":136676,"íķĺìħ¨":136677,"มาà¸ģà¸Ĺีà¹Īสุà¸Ķ":136678,"à¸ģà¹īา":136679,"à¸ģà¹īาว":136680,"Ġİyi":136681,"леж":136682,"лежа":136683,"ãĤ¸ãĥ§":136684,"à¸Ĺัà¸ŀ":136685,"اÙĪØ±":136686,"Ġ×Ĺ×ijר×Ļ":136687,"Ġ׾ש×Ŀ":136688,"첫":136689,"ĠTá»Ń":136690,"×ŀ×ķ׳×Ļ":136691,"ÙĤÙĪØ¯":136692,"à¸ģระà¹Ģà¸Ľ":136693,"à¸ģระà¹Ģà¸Ľà¹ĭ":136694,"à¸ģระà¹Ģà¸Ľà¹ĭา":136695,"ĠпÑĢоблемÑĭ":136696,"Ġaçıs":136697,"Ġaçısından":136698,"Ġ×Ķ×ŀ׼":136699,"ĠÙħعظÙħ":136700,"ÙĤÙĬاس":136701,"ĠпÑĢодолж":136702,"ĠпÑĢодолжа":136703,"ĠverdiÄŁi":136704,"ĠпÑĢедмеÑĤ":136705,"ãģĦãģ¾ãģĻãģĮ":136706,"ĠëĶ°ë¥¸":136707,"ĠاÙĦÙĤÙĬاÙħ":136708,"ĠØ¥ÙĦÙĬÙĩا":136709,"ТÐIJ":136710,"поз":136711,"ãĤ·ãĥ¥":136712,"ä¸ĬãģĮãĤĬ":136713,"à¹Ģà¸Ķิมà¸ŀัà¸Ļ":136714,"à¸ģุล":136715,"ØŃرÙĬØ©":136716,"×§×ij×ķצ×ķת":136717,"믿":136718,"ĠاÙĦÙħÙĨا":136719,"ĠاÙĦÙħÙĨاطÙĤ":136720,"ĠвÑĭпол":136721,"ĠвÑĭполнÑı":136722,"ãĥĭãĤ¢":136723,"Ġê²°êµŃ":136724,"×Ĺ×ķ×ŀ":136725,"×Ĺ×ķ×ŀר×Ļ×Ŀ":136726,"ĠУкÑĢаинÑĭ":136727,"หà¸Ńม":136728,"ר×Ļס":136729,"ĠÑħоÑĤел":136730,"ĠобÑĢазованиÑı":136731,"Ġkhẳng":136732,"Ġmưa":136733,"Ġgörme":136734,"Ġgüçlü":136735,"سعÙī":136736,"มัà¹Īà¸Ļà¹ĥà¸Ī":136737,"íķĺê²łìĬµëĭĪëĭ¤":136738,"ĠполÑĥ":136739,"Ġfünf":136740,"ã썿ĢĿãģ£ãģ¦ãģĦãģ¾ãģĻ":136741,"Ġê·¸ê²ĥìĿĢ":136742,"ĠdÃ¼ÅŁÃ¼nce":136743,"ìŀł":136744,"ĠHÆ°á»Ľng":136745,"ĠTiá»ĥu":136746,"Ġçift":136747,"ãģijãģ°":136748,"à¸Īà¸Ļà¸ĸึà¸ĩ":136749,"à¸Ĺำà¹Ħà¸Ķà¹ī":136750,"ĠìŀIJì²´":136751,"Ġdõ":136752,"Ġdõi":136753,"à¸Īัà¸Ļ":136754,"à¸Īัà¸Ļà¸Ĺ":136755,"à¸Īัà¸Ļà¸Ĺรà¹Į":136756,"eceÄŁini":136757,"׳×ķער":136758,"غار":136759,"ĠاÙĦØ£ÙħرÙĬÙĥÙĬ":136760,"داعش":136761,"ĠбезопаÑģноÑģÑĤи":136762,"ĠбÑİ":136763,"ĠбÑİдж":136764,"ĠбÑİджеÑĤ":136765,"ãĥĬãĤ¤":136766,"à¸ŀà¸ļวà¹Īา":136767,"daÄŁ":136768,"×IJ×ķפף":136769,"íĹĮ":136770,"ãĥĢãĤ¤ãĤ¨":136771,"ãĥĢãĤ¤ãĤ¨ãĥĥãĥĪ":136772,"ĠëĮĢíĨµ":136773,"ĠëĮĢíĨµëł¹":136774,"Dİ":136775,"Ø£ØŃداث":136776,"ĠAÄŁ":136777,"ĠAÄŁust":136778,"ĠAÄŁustos":136779,"ØŃÙĦÙĪÙĦ":136780,"ĠwÅĽ":136781,"ĠwÅĽród":136782,"ĠÑģооÑĤвеÑĤ":136783,"ĠÑģооÑĤвеÑĤÑģÑĤв":136784,"ĠÑģооÑĤвеÑĤÑģÑĤвии":136785,"ĠLuáºŃt":136786,"Ġ׼׾פ×Ļ":136787,"ĠвеÑī":136788,"ĠвеÑīеÑģÑĤв":136789,"×§×Ļ×¥":136790,"ĠبÙĩذا":136791,"عاش":136792,"à¹Ģà¸Ľà¹ĩà¸Ļà¹Ģรืà¹Īà¸Ńà¸ĩ":136793,"ТÐķ":136794,"Ġ×ij×IJ×Ļ׳×ĺר׳×ĺ":136795,"سعد":136796,"Ġ×Ķ×ĺ×Ļפ×ķ׾":136797,"פ×Ļס":136798,"à¸ĩà¹Īายà¹Ĩ":136799,"ĠGerät":136800,"׾×Ļ×ĵ×Ķ":136801,"ĠÑĢиÑģк":136802,"׾ק×Ĺ":136803,"ннаÑı":136804,"ר×Ļ×ĵ":136805,"пÑĢакÑĤи":136806,"пÑĢакÑĤик":136807,"à¸Ĥัà¹īà¸Ļà¸ķà¸Ńà¸Ļ":136808,"à¸Ļà¹Īารัà¸ģ":136809,"larınızı":136810,"à¸Ńà¸Ļุà¸įา":136811,"à¸Ńà¸Ļุà¸įาà¸ķ":136812,"ĠzdjÄĻcia":136813,"Ġbây":136814,"ÑģÑĢ":136815,"ÑģÑĢоÑĩ":136816,"ãĥĭãĥ³ãĤ°":136817,"Ġöner":136818,"Ġöneri":136819,"ĠновÑĭÑħ":136820,"دعÙĪØ©":136821,"Ġgắn":136822,"ĠاÙĦÙĦبÙĨ":136823,"ĠاÙĦÙĦبÙĨاÙĨÙĬ":136824,"ãĥĨãĤ£ãĥ¼":136825,"ĠصØŃÙĬØŃ":136826,"емÑĭÑħ":136827,"çĸ²ãĤĮ":136828,"ĠпÑĢоиÑģ":136829,"ĠпÑĢоиÑģÑħодиÑĤ":136830,"สà¸ķิ":136831,"ĠTết":136832,"Ġ×Ķ׾׾×ķ":136833,"à¹Ģรืà¹Īà¸Ńà¸ĩà¸Ļีà¹ī":136834,"×ŀ×ij׳×Ķ":136835,"Ġconteúdo":136836,"Ġاخت":136837,"ĠاختÙĬار":136838,"ÙħسÙĦ":136839,"ÙħسÙĦسÙĦ":136840,"ëıĪ":136841,"Ġ׾×Ļ×ĵ":136842,"à¸ŀิà¸ĺี":136843,"ĠÑģовÑģ":136844,"ĠÑģовÑģем":136845,"ãģĮãģĤãĤĬãģ¾ãģĹãģŁ":136846,"Ġsóng":136847,"إصÙĦاØŃ":136848,"ë§ģ":136849,"ÙģÙĬر":136850,"ĠJeżeli":136851,"ìłľëıĦ":136852,"dÅĤug":136853,"ìĥģìĿĦ":136854,"ĠcáºŃn":136855,"Ġhá»įp":136856,"أست":136857,"أستاذ":136858,"Ġ×ŀ×Ļש×Ķ":136859,"Ġ×ŀ×Ļש×Ķ×ķ":136860,"ĠdÃły":136861,"ĠchÃłng":136862,"ãģ¡ãĤĥãĤĵãģ¨":136863,"ĠÄijám":136864,"Ġswój":136865,"Ġpoderá":136866,"ĠоÑĤлиÑĩа":136867,"Ġpériode":136868,"ündig":136869,"×ĺ×¢×Ł":136870,"ÑģÑĤÑĢоиÑĤелÑĮ":136871,"רת×Ļ":136872,"Ġ×Ļ×Ķ×Ļ×ķ":136873,"×ľ×¡":136874,"ĠاÙĦÙħÙĨزÙĦ":136875,"à¸Ļิà¹īว":136876,"иÑĦика":136877,"иÑĦикаÑĨи":136878,"ðŁĺī":136879,"Ġadına":136880,"ãĢĤãĢĤãĢĤ":136881,"×IJ×Ļף":136882,"ס×Ļר":136883,"ĠÙĬعد":136884,"çŃĶãģĪ":136885,"اÙĦجز":136886,"اÙĦجزائر":136887,"енÑĮк":136888,"รห":136889,"รหัส":136890,"ĠTürkçe":136891,"꾸":136892,"Ġ×Ļ×ķ׼׾":136893,"Ġש×ķ׳×Ķ":136894,"Ġ×ij×ŀצ×ij":136895,"ĠдейÑģÑĤвиÑĤелÑĮно":136896,"ĠبأÙĨÙĩ":136897,"×ŀ×§×ĵ":136898,"Ġ×Ķשק":136899,"Ø®ÙĬارات":136900,"Ġfı":136901,"Ġfırs":136902,"Ġfırsat":136903,"ëijĺ":136904,"ĠìĦľìļ¸":136905,"Ġ×Ķ×Ĵ×ķ×£":136906,"رعا":136907,"رعاÙĬØ©":136908,"ĠKết":136909,"кÑģи":136910,"ĠÑĥÑģлÑĥги":136911,"ноÑģÑĤей":136912,"ìļ´ëıĻ":136913,"ĠобÑĬÑı":136914,"ĠобÑĬÑıвл":136915,"неж":136916,"×Ķפ×ļ":136917,"Ġ×ij×¢×Ļ׳×Ļ":136918,"ëĨĴ":136919,"ĠпÑĢоÑĨед":136920,"ĠпÑĢоÑĨедÑĥÑĢ":136921,"Ġihtiy":136922,"Ġihtiyacı":136923,"Ġë°Ķëŀį":136924,"Ġë°ĶëŀįëĭĪëĭ¤":136925,"à¸ģลัว":136926,"ĠÑģложно":136927,"×§×Ļ×Ļ×ŀת":136928,"ĠÄIJình":136929,"ĠÙħÙĦÙģ":136930,"Ġà¹Ĥà¸Ķยมี":136931,"Ġkatkı":136932,"تØŃÙĪÙĬÙĦ":136933,"à¹Ħà¸ŀ":136934,"ĠHá»į":136935,"ñe":136936,"ĠдоÑħод":136937,"Ġthoải":136938,"íķĺìŬìķ¼":136939,"ãĤ¹ãĥĿãĥ¼ãĥ":136940,"ãĤ¹ãĥĿãĥ¼ãĥĦ":136941,"ĠGòn":136942,"Ġkè":136943,"Ġkèm":136944,"é̲ãĤģ":136945,"ãĤ¹ãĥ¼ãĥ":136946,"ãĤ¹ãĥ¼ãĥij":136947,"ãĤ¹ãĥ¼ãĥijãĥ¼":136948,"ĠgiÃłu":136949,"Ġإعادة":136950,"Ġ׾×ķ×§":136951,"Ġ׾×ķ×§×Ĺ":136952,"ĠÑħоÑĩеÑĤ":136953,"×ĺ׾×ķ×ķ":136954,"×ĺ׾×ķ×ķ×Ļ×ĸ":136955,"×ĺ׾×ķ×ķ×Ļ×ĸ×Ļ×Ķ":136956,"Ġthuyết":136957,"ãģĿãĤĮãģ§":136958,"Ġvardı":136959,"à¹Ħรà¹ī":136960,"عبد":136961,"ĠRepública":136962,"ãĥ¼ãĤ¿ãĥ¼":136963,"Ġ×ŀ×IJ×ķת":136964,"à¹Ħà¸Ľà¹ģลà¹īว":136965,"Ġyapılacak":136966,"ãĤ¹ãĤ¿ãĥ¼ãĥĪ":136967,"ãģ»ãģ¼":136968,"ĠkoÅŁ":136969,"ĠмаÑĤеÑĢи":136970,"Ġsiècle":136971,"ĠاÙĦÙħختÙĦÙģ":136972,"ĠاÙĦÙħختÙĦÙ쨩":136973,"Ġ׾קר×IJ":136974,"Ġ׾קר×IJת":136975,"Ġ×Ķפ×ķ×¢×ľ":136976,"Ġtòa":136977,"ĠrÆ¡i":136978,"åij¨ãĤĬ":136979,"à¸Ŀà¸Ļ":136980,"jÅĽÄĩ":136981,"ĠìķĬìĿĦ":136982,"اÙĨتÙĤاÙĦ":136983,"ëĸł":136984,"иваеÑĤ":136985,"ãĥĪãĥ«":136986,"ĠاÙĦÙģÙĦسطÙĬÙĨÙĬØ©":136987,"à¸ģลà¹Īาววà¹Īา":136988,"اÙĥت":136989,"ĠÃĸl":136990,"ĠÑĢеÑĪи":136991,"ĠÑĢеÑĪил":136992,"Ġ׳×ķספ×ķת":136993,"Ġìłķì¹ĺ":136994,"влеÑĩен":136995,"ÙħرØŃÙĦØ©":136996,"Ġcomeça":136997,"Ġyık":136998,"ìĤ´":136999,"à¸ĺà¸Ļา":137000,"à¸ĺà¸Ļาà¸Ħาร":137001,"à¸Ńà¸Ļา":137002,"à¸Ńà¸Ļาà¸Ħ":137003,"à¸Ńà¸Ļาà¸Ħà¸ķ":137004,"Ġpequeña":137005,"ä»ķäºĭãĤĴ":137006,"ĠبذÙĦÙĥ":137007,"Ġнового":137008,"ãģĹãģ¦ãģĦãģªãģĦ":137009,"ĠاÙĦÙħÙĬاÙĩ":137010,"à¸ģà¹ĩà¹Ģà¸Ľà¹ĩà¸Ļ":137011,"ĠжÑĥÑĢ":137012,"ĠжÑĥÑĢнал":137013,"веÑģ":137014,"ختار":137015,"Ġ매ìļ°":137016,"ĠMã":137017,"ĠавÑĤомаÑĤÑĭ":137018,"ضعÙģ":137019,"ĠاÙĦÙģÙĥر":137020,"ãģ§ãģĻãģ®ãģ§":137021,"ãĥ¡ãĥ³ãĥIJãĥ¼":137022,"ĠкÑĢÑĥг":137023,"ĠاÙĦسÙĦطة":137024,"à¸Ħรัà¹īà¸ĩà¹ģรà¸ģ":137025,"à¸ģระà¸Ĺรว":137026,"à¸ģระà¸Ĺรวà¸ĩ":137027,"ÑĨов":137028,"éķ·ãģĦ":137029,"大ãģįãģĦ":137030,"ĠgeçmiÅŁ":137031,"ìĦ±ìĿ´":137032,"Ġצר×Ļ׼×Ķ":137033,"ĠмоÑī":137034,"ĠмоÑīн":137035,"Ġ×§×Ļש":137036,"Ġ×§×Ļש×ķר×Ļ×Ŀ":137037,"ĠNasıl":137038,"гÑĢан":137039,"Ġ×ŀ×ķצר×Ļ×Ŀ":137040,"Ġ×ŀס×ķ×Ĵ":137041,"Ġyür":137042,"Ġyürüt":137043,"Ġ׾×Ĺצ×ķ":137044,"×ķÖ¼":137045,"ĠìŀĪìĹĪëĭ¤":137046,"Ġterör":137047,"ĠThương":137048,"ĠÙĪÙĬÙħ":137049,"ĠÙĪÙĬÙħÙĥÙĨ":137050,"جÙĪÙĨ":137051,"ĠÙĪØºÙĬرÙĩا":137052,"×ŀפ×ķ":137053,"×Ĵ×ķר×ŀ×Ļ×Ŀ":137054,"׼×ij×Ļש":137055,"ĠاÙĦÙĦغ":137056,"ĠاÙĦÙĦغة":137057,"شرÙĥ":137058,"ĠاÙĦراب":137059,"ĠاÙĦرابع":137060,"ĠпÑĢек":137061,"ĠпÑĢекÑĢаÑģ":137062,"ĠпÑĢекÑĢаÑģн":137063,"ĠenergÃŃa":137064,"×§×ĵ×ŀ×Ļ":137065,"ãģıãģªãģ£ãģŁ":137066,"ĠÄijứ":137067,"ĠÄijứa":137068,"Servi":137069,"Serviço":137070,"Ġkaldır":137071,"åĥįãģį":137072,"Ġодеж":137073,"Ġодежд":137074,"물ìĿĦ":137075,"ãģĿãģĨãģ§":137076,"ãģĮãģĤãĤĮãģ°":137077,"ìĻķ":137078,"צ×ĵ×§":137079,"Ġartır":137080,"Ġileti":137081,"ĠiletiÅŁim":137082,"ãĤĪãģĨãģ§":137083,"ãĥĪãĥ¼":137084,"ãĤ¢ãĥĭ":137085,"ãĤ¢ãĥĭãĥ¡":137086,"×ĺ×Ļ×Ļ׾":137087,"ãĥķãĥªãĥ¼":137088,"ãĥĿãĥ³":137089,"ÐŁÑĢо":137090,"ĠعاÙĦÙĬØ©":137091,"ĠÃ¶ÄŁret":137092,"ĠÃ¶ÄŁretmen":137093,"ĠкаÑĩеÑģÑĤва":137094,"Ġ×Ķ×ĺ×ij×¢":137095,"ĠзнаÑİ":137096,"ãģ¦ãģıãĤĭ":137097,"Ġmừng":137098,"ÙħÙĪØª":137099,"ש×ķ×ŀר":137100,"×Ĺ׾×ij":137101,"ĠwzglÄĻ":137102,"ĠwzglÄĻdu":137103,"ë²Ī째":137104,"Ġtá»ĵ":137105,"Ġtá»ĵn":137106,"ãĥ¯ãĥ¼ãĤ¯":137107,"Ġpożycz":137108,"Ġpożyczk":137109,"×Ļ×ķצר×Ļ×Ŀ":137110,"ÙĥرÙħ":137111,"ĠгаÑĢ":137112,"ĠгаÑĢан":137113,"ĠгаÑĢанÑĤи":137114,"ลà¹īาà¸ĩ":137115,"ĠìĺģíĻĶ":137116,"×ĺ×Ļס":137117,"Ġthẻ":137118,"ĠìŀĪëĭ¤ê³ł":137119,"اÙĦتز":137120,"اÙĦتزاÙħ":137121,"ĠнаÑĪи":137122,"isée":137123,"ãģĵãĤĮãĤĴ":137124,"Ġmẽ":137125,"ضÙĦ":137126,"بÙĪØª":137127,"Ġ׼׼×Ķ":137128,"hợ":137129,"ĠاÙĦسÙĪØ±ÙĬØ©":137130,"Ġ×ľ×¢×ķ×ŀ":137131,"Ġ×ľ×¢×ķ×ŀת":137132,"ĠbaÅŁar":137133,"ĠbaÅŁarılı":137134,"еÑģÑĤÑĮ":137135,"à¸Ħรี":137136,"à¸Ħรีม":137137,"ĠìłĦì²´":137138,"ĠسÙĬÙĥÙĪÙĨ":137139,"Ġ×ŀ×ĵ×ķ×¢":137140,"ĠëķĮ문ìĿ´ëĭ¤":137141,"Ġcứng":137142,"gerät":137143,"ĠмиÑĢ":137144,"ĠмиÑĢе":137145,"ĠÙĥÙĬÙģÙĬØ©":137146,"Ġפר×ĺ×Ļ×Ŀ":137147,"ĠgoÅĽci":137148,"иÑĤеÑģÑĮ":137149,"ÑĥÑĪки":137150,"ؤÙħÙĨ":137151,"Ġ×IJ׼ף":137152,"ĠاÙĦرجÙĦ":137153,"Ġlá»įc":137154,"à¹Ģรียà¸ģวà¹Īา":137155,"ãģĵãģ®ãĤĪãģĨãģª":137156,"ë§Įíģ¼":137157,"ĠпеÑĩ":137158,"ÙĪÙĦات":137159,"ĠÃľye":137160,"liÄŁinde":137161,"à¸Ħะà¹ģà¸Ļ":137162,"à¸Ħะà¹ģà¸Ļà¸Ļ":137163,"ãĤĭãģĵãģ¨ãģ¯":137164,"วิà¹Ģà¸Ħร":137165,"วิà¹Ģà¸Ħราะ":137166,"วิà¹Ģà¸Ħราะหà¹Į":137167,"ĠвозможноÑģÑĤи":137168,"ĠاÙĦÙĨساء":137169,"ãĥīãĥ©ãĥŀ":137170,"Ġgüc":137171,"Ġgücü":137172,"Ġtưá»Ŀng":137173,"Ġacompaña":137174,"ãĤ¤ãĥ©":137175,"קצ×ij":137176,"ĠYö":137177,"ĠYönet":137178,"ĠYönetim":137179,"à¸ªà¸±à¸¡à¸ľ":137180,"à¸ªà¸±à¸¡à¸ľà¸±à¸ª":137181,"à¸Ļาม":137182,"ĠÄijợi":137183,"à¹ģหà¹Īà¸ĩà¸Ĭาà¸ķิ":137184,"ãģĿãĤĮãģ§ãĤĤ":137185,"ätig":137186,"ת×ķ×Ŀ":137187,"ĠbaÅŁlat":137188,"ĠвÑģей":137189,"ת×Ļ×§":137190,"ת×Ļ×§×ķף":137191,"ĠNgô":137192,"ĠGeschä":137193,"ĠGeschäfts":137194,"Ø£Ùħ":137195,"Ø£Ùħراض":137196,"à¹Ģà¸Ĺà¸Ħà¸Ļ":137197,"à¹Ģà¸Ĺà¸Ħà¸Ļิ":137198,"à¹Ģà¸Ĺà¸Ħà¸Ļิà¸Ħ":137199,"ĠменÑĮ":137200,"ĠменÑĮÑĪе":137201,"Ġölç":137202,"Ġölçü":137203,"ĠÙĬجعÙĦ":137204,"ĠÄijỡ":137205,"ש×Ļ׾":137206,"ש×Ļ׾×ķ×ij":137207,"ĠGrÃ¶ÃŁe":137208,"ĠÙĩاتÙģ":137209,"รà¹īาà¸Ļà¸Ńาหาร":137210,"×Ķ׾×Ļ׼":137211,"×Ķ׾×Ļ׼×Ļ":137212,"иÑĢÑĥÑİÑī":137213,"èĭ¥ãģĦ":137214,"ĠÃĸzel":137215,"ãģĦãģŁãĤī":137216,"à¸Ħำà¸ĸาม":137217,"ĠzostaÅĤy":137218,"Ġ×Ķס×Ļפ×ķר":137219,"×Ķ×ķ׾":137220,"×Ķ×ķ׾×ļ":137221,"à¹Ģà¸Ĭà¹Īà¸Ļà¸ģัà¸Ļ":137222,"à¹Ĥà¸Ĩ":137223,"à¹Ĥà¸Ĩษ":137224,"à¹Ĥà¸Ĩษà¸ĵา":137225,"×IJרצ×ķת":137226,"×Ĵרפ×Ļ":137227,"Ġaoût":137228,"ĠÙĬرÙĬد":137229,"تÙĪØ¬":137230,"تÙĪØ¬ÙĬÙĩ":137231,"ĠÑįÑĤап":137232,"ãĤ¹ãĤ¿ãĥ³":137233,"Ġkró":137234,"Ġkrótk":137235,"ãĤĴ使ãģĨ":137236,"ì·¨":137237,"éĸ¢ãĤı":137238,"à¸Ķà¹īวยà¸Ħวาม":137239,"à¸Ļำà¹Ģสà¸Ļà¸Ń":137240,"Ġayrıca":137241,"à¸Īà¹īาà¸ĩ":137242,"ĠÑĦоÑĤогÑĢаÑĦ":137243,"ĠвеÑĩ":137244,"ĠвеÑĩеÑĢ":137245,"åĩºãģĹãģŁ":137246,"ĠХо":137247,"Ġ×ŀר×Ĵ×Ļש":137248,"à¹ĥหà¹īà¹Ģà¸Ľà¹ĩà¸Ļ":137249,"ãĤĴ缮":137250,"ãĤĴ缮æĮĩ":137251,"׾×ŀ×Ļ×Ŀ":137252,"nÄħÅĤ":137253,"ĠÑģÑĤанд":137254,"ĠÑģÑĤандаÑĢÑĤ":137255,"ĠSüd":137256,"ĠTâm":137257,"اختبار":137258,"à¹Ģà¸ģà¸Ńรà¹Į":137259,"ÙħسرØŃ":137260,"Ġbiá»ĩn":137261,"بÙı":137262,"ĠصاÙĦ":137263,"ĠصاÙĦØŃ":137264,"ĠPhụ":137265,"íľ´":137266,"ãĥ¬ãĥĵãĥ¥ãĥ¼":137267,"Ġbụng":137268,"Ġrégime":137269,"ĠأشÙĩر":137270,"ĠÑĢабоÑĤник":137271,"à¸Ŀัà¸Ļ":137272,"اعتÙħ":137273,"اعتÙħاد":137274,"ĠзамеÑĤ":137275,"ãģ¾ãģ£ãģ¦":137276,"Ġchặt":137277,"æĿ¥ãĤĭ":137278,"ĠاÙĦÙĤÙĪØ§Øª":137279,"ãģ«åħ¥ãģ£ãģ¦":137280,"تØŃاÙĦÙģ":137281,"ÙħزÙĬد":137282,"ĠÙĬصÙĦ":137283,"ìĹ¼":137284,"à¹Ģà¸Ĭà¹ĩ":137285,"à¹Ģà¸Ĭà¹ĩà¸Ħ":137286,"Ġká»ĭ":137287,"Ġká»ĭp":137288,"ĠìķĦì§ģ":137289,"×IJ׳×Ĵ":137290,"ĠоблаÑģÑĤÑĮ":137291,"ĠpomocÄħ":137292,"Ġ×ķש׾":137293,"ëĵłì§Ģ":137294,"ĠGiám":137295,"ĠStück":137296,"Ġcháy":137297,"ĠëĤĺìĺ¤":137298,"ש×Ļ×ĺת":137299,"×ŀ×ĵר":137300,"×ŀ×ĵר×Ļ×ļ":137301,"Ġsüreç":137302,"ква":137303,"×ij׾×Ļ×Ŀ":137304,"×Ķת×Ļ":137305,"×Ķת×Ļ×Ļ×Ĺס":137306,"ÙĤباÙĦ":137307,"Ġס×ķ×Ĵ":137308,"Ġס×ķ×Ĵ×Ļ":137309,"ÑģÑĤолÑĮ":137310,"ä½ķãĤĤ":137311,"×ĸ׼×ķר":137312,"è²·ãģĨ":137313,"å®īãģı":137314,"à¸Ħรัà¹īà¸ĩà¸Ļีà¹ī":137315,"köp":137316,"ĠÑģеÑĢвиÑģ":137317,"оÑĩнÑĭÑħ":137318,"ê±°ëŀĺ":137319,"تأÙĥ":137320,"تأÙĥÙĬد":137321,"×ĵ׾ק":137322,"ĠпоÑĩем":137323,"ĠпоÑĩемÑĥ":137324,"пиÑģаÑĤÑĮ":137325,"×ijשר":137326,"ĠHÃłng":137327,"ĠTìm":137328,"Ġtrừ":137329,"ãĤ»ãĥĥãĤ¯ãĤ¹":137330,"×ķ׳×Ĵ":137331,"mızda":137332,"пÑģи":137333,"ĠìŀĪ기":137334,"Ġrút":137335,"زاÙĨ":137336,"تÙĨÙĪØ¹":137337,"ÙħÙĤا":137338,"ÙħÙĤاÙĪÙħØ©":137339,"Ġ׾צ×ķר×ļ":137340,"Ġ×ij×Ļר×ķש׾×Ļ×Ŀ":137341,"ãĥ´ãĤ£":137342,"ebile":137343,"ebileceÄŁi":137344,"ãĥ¦ãĥ¼ãĤ":137345,"ãĥ¦ãĥ¼ãĤ¶":137346,"ãĥ¦ãĥ¼ãĤ¶ãĥ¼":137347,"ãĤĴä½ľãĤĭ":137348,"ÑģмеÑĢ":137349,"ÑģмеÑĢÑĤ":137350,"Ġì§ģ":137351,"Ġì§ģìłij":137352,"ĠÐŁÐ°ÑĢ":137353,"ØŃاض":137354,"ØŃاضر":137355,"ÙħÙĥاÙģ":137356,"ÙħÙĥاÙģØŃØ©":137357,"ลิà¸Ļ":137358,"ãģ¦ãģįãģ¦":137359,"ÑĢоÑģл":137360,"ĠÄ°ÅŁte":137361,"ÙĤصÙĬر":137362,"Ġ×ij×Ĵ×Ļ׾":137363,"Ġ×ŀת×IJ×Ļ×Ŀ":137364,"Ġ×Ķ×Ĺ×ĵ":137365,"Ġ×Ķ×Ĺ×ĵש×Ķ":137366,"ר×ķ×¢":137367,"Ġproduktów":137368,"ĠÙħصدر":137369,"неÑĨ":137370,"ĠاÙĦعÙħÙĦات":137371,"Ġçıkma":137372,"ĠدبÙĬ":137373,"×§×Ļף":137374,"ת×IJר":137375,"ת×IJר×Ļ×ļ":137376,"׳×Ļ×Ļ×ĵ":137377,"صراع":137378,"lève":137379,"צ×Ļר":137380,"à¸Ķัà¸Ļ":137381,"à¹ĥหà¹īà¹Ħà¸Ķà¹ī":137382,"ãĤ¿ãĤ¤ãĥł":137383,"Ġgiảng":137384,"Ð¡ÐŁ":137385,"ĠاÙĦÙħØŃÙĦ":137386,"ĠاÙĦÙħØŃÙĦÙĬØ©":137387,"ĠTất":137388,"׾×ķ×ĺ":137389,"há»ķ":137390,"Ġaméric":137391,"Ġaméricain":137392,"Ġ×ijש׾×ij":137393,"Ġ׾×IJ×ķ×ŀ×Ļ":137394,"Ġpeça":137395,"ĠÑĢазнÑĭÑħ":137396,"ãģĦãĤĭãģ¨":137397,"ãĥĩãĥ³":137398,"סקר":137399,"Ġ×Ķ×ŀ×Ĺ×Ļר":137400,"ãģ¨ãģĦãģĨãĤĤãģ®":137401,"رتبط":137402,"ĠиÑģÑĤоÑĩ":137403,"ĠиÑģÑĤоÑĩник":137404,"สมัà¸Ħรสมาà¸Ĭิà¸ģ":137405,"Ġà¸Ĺัà¹īà¸ĩ":137406,"Ġà¸Ĺัà¹īà¸ĩà¸Ļีà¹ī":137407,"ĠTáºŃp":137408,"ãģ£ãģ¦ãģĦãģĨ":137409,"ĠاÙĦÙĪØµÙĪÙĦ":137410,"Ġdécada":137411,"ĠоÑĦоÑĢм":137412,"ĠоÑĦоÑĢмлен":137413,"สำหรัà¸ļà¸ģาร":137414,"Ġogóln":137415,"ãģĨãģ¡ãģ«":137416,"Ġvárias":137417,"ãģĻãģİãĤĭ":137418,"ÙĪÙĩا":137419,"à¹Ĥà¸Ľà¸£à¸Ķ":137420,"ĠÐłÐ¾ÑģÑģиÑı":137421,"人ãĢħ":137422,"ãģĹãģ¦ãģįãģŁ":137423,"Ġsırasında":137424,"Ġngôn":137425,"سÙĨØ©":137426,"تÙħتع":137427,"×ŀ׼×ij×Ļ":137428,"Ġnhấn":137429,"×¢×ŀ×Ļ×ĵ":137430,"Ứ":137431,"жиÑĤÑĮ":137432,"ãĤīãģĽ":137433,"gráf":137434,"gráfica":137435,"ĠÙĤÙĪÙĦ":137436,"ĠÙĤÙĪÙĦÙĩ":137437,"ëĭ¨ì²´":137438,"หà¹īา":137439,"หà¹īาม":137440,"使ãģ£ãģ¦":137441,"ת×Ļ×ij":137442,"ת×Ļ×ijת":137443,"iá»ĥu":137444,"à¹ģà¸Ĭม":137445,"à¹ģà¸Ĭà¸¡à¸Ľ":137446,"à¹ģà¸Ĭà¸¡à¸Ľà¹Į":137447,"Ậ":137448,"ĠëĤĺëĿ¼":137449,"ĠÙħباشرة":137450,"ĠtrÄĥm":137451,"سÙĥÙĪ":137452,"ĠاÙĦذÙī":137453,"Ġbiç":137454,"Ġbiçim":137455,"تراجع":137456,"ĠобеÑģп":137457,"ĠобеÑģпеÑĩ":137458,"ĠобеÑģпеÑĩива":137459,"ĠвоздÑĥÑħ":137460,"ÑĭваÑĤÑĮ":137461,"ÙĦØŃÙĤ":137462,"ĠMüdü":137463,"ĠMüdürl":137464,"ĠMüdürlÃ¼ÄŁÃ¼":137465,"Ġyaptır":137466,"Ġפרס":137467,"Ġפרס×ķ×Ŀ":137468,"Ø·ÙĪØ±":137469,"ÑģÑĤвоваÑĤÑĮ":137470,"ìŀ¥ìĿĦ":137471,"à¸Ĺีà¹Īà¸Ķีà¸Ĺีà¹Īสุà¸Ķ":137472,"à¸Ńัล":137473,"ÑĢÑİ":137474,"ÙħستÙĤبÙĦ":137475,"ÑģлÑĥÑĪ":137476,"ÑģлÑĥÑĪа":137477,"èªįãĤģ":137478,"Ġ׾×Ļ×ŀ":137479,"Ġ׾×Ļ×ŀ×ķ×ĵ×Ļ":137480,"תש×ķ×ij":137481,"תש×ķ×ij×ķת":137482,"ĠgerçekleÅŁtiril":137483,"ĠاÙĦاتÙ쨧ÙĤ":137484,"ĠÑĥÑĢовне":137485,"ĠÑĤÑĢав":137486,"Ġ×Ķ×ŀ×ķף":137487,"ØŃÙģØ§Ø¸":137488,"ĠÙħÙIJ":137489,"ĠÙħÙIJÙĨ":137490,"ĠÙħÙIJÙĨÙĴ":137491,"Ġdemás":137492,"×ŀ×ķ×ĸ×Ļ×§×Ķ":137493,"ש×Ļ×Ĺ×Ķ":137494,"Ġbú":137495,"алÑĮнÑĭм":137496,"ãĤıãģŁ":137497,"ãĤıãģŁãģĹ":137498,"ĠاÙĦÙħÙĪØ§Ø¯":137499,"×ª×Ľ×ł":137500,"×ª×Ľ×ł×ķף":137501,"ãĥŃãĥĥãĤ¯":137502,"hiếu":137503,"ĠÑĥме":137504,"ÙħØŃاÙĪÙĦØ©":137505,"×IJ×ķשר":137506,"ĠконкÑĥÑĢ":137507,"ĠконкÑĥÑĢÑģ":137508,"Ġ×ŀ×ij×Ĺ":137509,"Ġ×ŀ×ij×Ĺ×Ļ×ł×ª":137510,"Ġanlam":137511,"Ġanlamı":137512,"Ġliá»ĩt":137513,"ĠвÑħод":137514,"ĠHình":137515,"ĠÙĨÙĬ":137516,"ĠÙĨÙĬÙĪØ²":137517,"ãĤ¸ãĥ£ãĥ¼":137518,"×ij×Ļ×¥":137519,"ÑĤелÑĮнÑĭÑħ":137520,"à¸Ĺุà¸ģà¸Ńยà¹Īาà¸ĩ":137521,"ĠkiÅŁinin":137522,"Ø£Ùĥثر":137523,"ĠиÑģÑĤоÑĢии":137524,"Ġë³ĢíĻĶ":137525,"×¤×ľ×¡×ĺ":137526,"×¤×ľ×¡×ĺ×Ļ׳×Ļ":137527,"ĠÑģеÑĤ":137528,"ĠÑģеÑĤи":137529,"dıģımız":137530,"íķĺëıĦë¡Ŀ":137531,"×Ķר":137532,"×Ķר×ij×Ķ":137533,"ãģĻãĤĭãģĵãģ¨ãģ¯":137534,"Ġphiếu":137535,"تØŃسÙĬÙĨ":137536,"ĠÅĽrod":137537,"ĠÅĽrodow":137538,"ĠÅĽrodowisk":137539,"ĠÑĢаÑģÑħод":137540,"برÙĬد":137541,"ĠرÙĬ":137542,"ĠرÙĬاÙĦ":137543,"Ġ×ķ׼×ļ":137544,"ì§ĢìļĶ":137545,"׼×ŀ×ķ":137546,"Ġ×¢×ľ×Ļ×Ķ×Ŀ":137547,"fÃŃcio":137548,"Ġkararı":137549,"tıģını":137550,"ĠСов":137551,"ĠСовеÑĤ":137552,"ãģĬéĩijãĤĴ":137553,"междÑĥ":137554,"междÑĥна":137555,"междÑĥнаÑĢод":137556,"междÑĥнаÑĢодн":137557,"Ġmá»Ŀi":137558,"ĠاÙĦØ¥ÙĬر":137559,"ĠاÙĦØ¥ÙĬراÙĨÙĬ":137560,"ĠاÙĦرÙĪØ³ÙĬ":137561,"صÙĨد":137562,"صÙĨدÙĪÙĤ":137563,"ĠاÙĦØ¥ÙĨترÙĨت":137564,"Ġtắm":137565,"ĠÑĤакого":137566,"Ġ×ij׾×ķ×Ĵ":137567,"Ġücrets":137568,"Ġücretsiz":137569,"×Ĺ×ĸ×Ļר":137570,"ìĸ´ìķ¼":137571,"ĠPhần":137572,"ï¼ľ":137573,"Ġ×ĺ×ij×¢":137574,"Ġ×ĺ×ij×¢×Ļ":137575,"×IJ×ŀ×IJ":137576,"اÙĤÙĦ":137577,"Ġcondições":137578,"ÙĤاتÙĦ":137579,"ĠÑĢезÑĥлÑĮÑĤаÑĤе":137580,"ĠÑģвоими":137581,"צ×ij×Ļ×¢":137582,"géni":137583,"Ġzes":137584,"Ġzespo":137585,"ĠzespoÅĤ":137586,"ÑĪив":137587,"Ġפר×ĺ×Ļ×ķת":137588,"ÙħستشÙģ":137589,"ÙħستشÙģÙī":137590,"شرع":137591,"ĠkoÅĽci":137592,"Ġ×Ķ×IJ×Ļ׳×ĺר׳×ĺ":137593,"ĠЧеÑĢ":137594,"поÑĩÑĤ":137595,"Ġactivités":137596,"çŁ¥ãģ£ãģ¦":137597,"Ġ×ij×ĸ×Ķ":137598,"Ġyüzden":137599,"ãģªãĤĬãģ¾ãģĽãĤĵ":137600,"Ġíĺ¹":137601,"Ġíĺ¹ìĿĢ":137602,"Ġ×ŀש׳×Ķ":137603,"ĠÐĴеÑĢ":137604,"Ġ×ij×IJ×ķת×ķ":137605,"éĿ¢çϽ":137606,"éĿ¢çϽãģĦ":137607,"شرØŃ":137608,"gründe":137609,"Ù쨴":137610,"Ù쨴ÙĦ":137611,"Ġséjour":137612,"ë´IJ":137613,"Ġrôle":137614,"شعار":137615,"емÑĭе":137616,"ĠاÙĦجسÙħ":137617,"алÑĮное":137618,"Ġìĥģíĥľ":137619,"D":137620,"ë¯Ģë¡ľ":137621,"ĠÙĨÙĤØ·":137622,"ĠÙĨÙĤطة":137623,"ãģĿãģĨãģł":137624,"ãģĻãĤĭãģ®ãģĮ":137625,"หู":137626,"Ġnhá»ĭ":137627,"Ġeconómica":137628,"ס×ĺ×ķ×ĵ":137629,"ס×ĺ×ķ×ĵ׳×ĺ":137630,"มีà¹Ĥà¸Ńà¸ģาส":137631,"Ġgestão":137632,"รูà¹īวà¹Īา":137633,"Ġloạt":137634,"ĠاÙĦÙħÙı":137635,"ĠاÙĦØŃÙħÙĦ":137636,"ĠاÙĦعÙħÙĦÙĬØ©":137637,"Ġê²ĥëıĦ":137638,"ĠÐľÐ¾Ñģква":137639,"×§×ĺ×ķר":137640,"ĠподÑĢоб":137641,"ĠподÑĢобн":137642,"Ġlưng":137643,"تÙ쨳":137644,"تÙ쨳ÙĬر":137645,"ĠاÙĦبع":137646,"ĠاÙĦبعض":137647,"ئت":137648,"ÐķÐĿ":137649,"ìĹ°êµ¬":137650,"à¹ĥหà¹īà¸Ħุà¸ĵ":137651,"ãģĤãĤĬãģ¾ãģĹãģŁ":137652,"Ġbirka":137653,"Ġbirkaç":137654,"Ġİsl":137655,"Ġİslam":137656,"çĹĽãģ¿":137657,"Ġhảo":137658,"ĠмаÑı":137659,"ĠiÅŁÃ§i":137660,"ש×":137661,"ש×ģ":137662,"à¸ģารà¹Ģมืà¸Ńà¸ĩ":137663,"×ķ×Ķר":137664,"Ġchó":137665,"ëĨĢ":137666,"Ġyanlı":137667,"ĠyanlÄ±ÅŁ":137668,"幸ãģĽ":137669,"×IJר×Ĵ×ķ׳×Ļ":137670,"à¸Ńาà¸Īาร":137671,"à¸Ńาà¸Īารยà¹Į":137672,"ĠинÑĦоÑĢмаÑĨиÑİ":137673,"ÐĵÐŀ":137674,"׳×Ĺש":137675,"ĠìķĮìķĦ":137676,"ĠÑħаÑĢакÑĤеÑĢиÑģÑĤ":137677,"ĠÑħаÑĢакÑĤеÑĢиÑģÑĤик":137678,"à¸Ħุà¸ĵสามารà¸ĸ":137679,"è¦ĭãģĪãĤĭ":137680,"à¸Ĭัà¸Ķà¹Ģà¸Ī":137681,"à¸Ĭัà¸Ķà¹Ģà¸Īà¸Ļ":137682,"ĠdziaÅĤal":137683,"ĠdziaÅĤalnoÅĽci":137684,"à¹Ĥà¸ŀสà¸ķà¹Į":137685,"ĠÐļол":137686,"ĠÙģÙĩÙĬ":137687,"Ġ×ŀפ׳×Ļ":137688,"Ġ×Ķקשר":137689,"ÙħرÙĥ":137690,"ÙħرÙĥز":137691,"Ġhoá":137692,"Ġапп":137693,"ĠаппаÑĢаÑĤ":137694,"Ġpami":137695,"ĠpamiÄĻ":137696,"ĠpamiÄĻta":137697,"Ġçünkü":137698,"×ĵ×ķף":137699,"ãģ¯ãģĵãģ¡ãĤī":137700,"ĠMÃł":137701,"ĠÙĬÙĤدÙħ":137702,"ĠпÑĢез":137703,"ĠпÑĢезиденÑĤ":137704,"à¸Ńุà¸ķ":137705,"à¸Ńุà¸ķสา":137706,"à¸Ńุà¸ķสาห":137707,"à¸Ńุà¸ķสาหà¸ģรรม":137708,"ì§ĢìĽIJ":137709,"Ġ×IJפשר×ķת":137710,"schüt":137711,"schütz":137712,"ĠTiên":137713,"Ġsayılı":137714,"ĠгÑĢÑĥппÑĭ":137715,"оÑĩнÑĭй":137716,"Ġ×ľ×¢×ŀ×ķ×ĵ":137717,"ĠwrzeÅĽ":137718,"ĠwrzeÅĽnia":137719,"ĠÄIJầu":137720,"à¹Ģà¸Ĥà¹īารà¹Īวม":137721,"nızda":137722,"Ø®ÙĬص":137723,"Ġgünc":137724,"Ġgüncel":137725,"ĠÙĦÙĩذÙĩ":137726,"ĠÙĬعتبر":137727,"légi":137728,"ãĤıãģĭãĤĭ":137729,"Ġrừng":137730,"ظÙĩ":137731,"ظÙĩÙĪØ±":137732,"Ġ×ŀ×ij×Ļף":137733,"Ġ기íĥĢ":137734,"åĪĩãĤĮ":137735,"lanmÄ±ÅŁ":137736,"à¸Ĺีà¹Īมีà¸Ħวาม":137737,"Ġhá»ģ":137738,"تÙĪØ¬Ùĩ":137739,"ĠاÙĦإدارة":137740,"Ġútil":137741,"ספ×ķ":137742,"à¸Ħวามรัà¸ģ":137743,"à¹Ĥฮ":137744,"ĠполиÑĤ":137745,"ĠполиÑĤик":137746,"Ġsatın":137747,"ĠÅŀimdi":137748,"×ŀ×ķר×Ļ×Ŀ":137749,"ìķĺëĭ¤":137750,"×Ĺ×ķ×ķ":137751,"×Ĺ×ķ×ķ×Ļ×Ķ":137752,"à¸Ħà¸Ńมà¸ŀิ":137753,"à¸Ħà¸Ńมà¸ŀิว":137754,"à¸Ħà¸Ńมà¸ŀิวà¹Ģà¸ķà¸Ńรà¹Į":137755,"Ġاذا":137756,"تخاذ":137757,"ãĤ¨ãĥ«":137758,"Ġpossibilité":137759,"ยืà¸Ļยัà¸Ļ":137760,"Ġünivers":137761,"Ġüniversite":137762,"ĠاÙĦدÙĪØ±ÙĬ":137763,"ĠìķĬëĬĶëĭ¤":137764,"ĠìĦľë¡ľ":137765,"ØŃاÙĦ":137766,"Ġë¨":137767,"Ġ먼":137768,"Ġ먼ìłĢ":137769,"à¸Ĺีà¹Īà¸ĸูà¸ģ":137770,"ì§ľ":137771,"Ġskóry":137772,"лÑĮÑĨ":137773,"à¹ĥà¸Ĭà¹īà¹Ģวลา":137774,"×ijקשת":137775,"ĠذÙĪ":137776,"æĹ¥ãĢħ":137777,"ĠкоÑĤоÑĢÑĥÑİ":137778,"ĠÑĥÑĢовенÑĮ":137779,"깨":137780,"à¹Ħà¸Ĺ":137781,"ãĤµãĥĹãĥª":137782,"ãĤ¸ãĥ§ãĥ³":137783,"ãģĻãģ¹ãģį":137784,"ĠGór":137785,"ãĥĪãĤ¤":137786,"ãĥĪãĤ¤ãĥ¬":137787,"ĠyaÅŁama":137788,"Ġdá»ĭp":137789,"Ġbữa":137790,"à¸ĭุ":137791,"Ġölüm":137792,"ãģ£ãģ¦ãģıãĤĭ":137793,"à¸ģารà¸Ħà¹īา":137794,"שער":137795,"ĠÑĤипа":137796,"ĠгеÑĢ":137797,"ĠгеÑĢо":137798,"רקע":137799,"Ġuważ":137800,"Ġuważa":137801,"ש×ŀף":137802,"Ġhastalık":137803,"ãĤıãĤĮãĤĭ":137804,"baÅŁÄ±":137805,"ÑĩÑĤо":137806,"Ġ×ij×ŀר׼×ĸ":137807,"Ġìļ°ë¦¬ìĿĺ":137808,"ĠÙĥاÙĨÙĪØ§":137809,"Ġأبر":137810,"ĠأبرÙĬÙĦ":137811,"층":137812,"à¹Ħà¸Ĥà¹Ī":137813,"ĠÙĪÙĦÙĪ":137814,"à¸Ĺัว":137815,"à¸Ĺัวรà¹Į":137816,"ĠÙĪØ£Ùĥد":137817,"à¸Ĭวà¸Ļ":137818,"׾×ķ×§":137819,"æį¨":137820,"æį¨ãģ¦":137821,"Ġİçin":137822,"péri":137823,"Ġyal":137824,"Ġyalnız":137825,"ÑĮÑıн":137826,"Ġgắng":137827,"à¸ģà¹ĩยัà¸ĩ":137828,"ĠУкÑĢаин":137829,"ĠÑģами":137830,"ĠпÑĢоведен":137831,"à¸ķà¸ģà¹ģà¸ķà¹Īà¸ĩ":137832,"ĠQuân":137833,"éparation":137834,"ĠbaÅŁÄ±nda":137835,"Ġznale":137836,"Ġznaleź":137837,"ĠznaleźÄĩ":137838,"ãĤ±ãĥ¼":137839,"ãĥİãĥ¼":137840,"à¸ĸูà¸ģà¸ķà¹īà¸Ńà¸ĩ":137841,"몸":137842,"ĠëıĮ":137843,"ĠëıĮìķĦ":137844,"ĠSchüler":137845,"ĠподгоÑĤов":137846,"ĠподгоÑĤовк":137847,"عرÙĪ":137848,"عرÙĪØ¶":137849,"laÅŁtır":137850,"ĠÑģоÑģÑĤавлÑıеÑĤ":137851,"ĠпÑĢоизвод":137852,"ĠпÑĢоизводÑģÑĤва":137853,"ĠоÑģнове":137854,"ĠØ´ÙħاÙĦ":137855,"à¸ģรี":137856,"ĠgörÃ¼ÅŁme":137857,"оÑĩек":137858,"Ġ×Ĺ×ijר×Ļ×Ŀ":137859,"Ùħخاط":137860,"Ùħخاطر":137861,"ï¼Ń":137862,"רפ×IJ":137863,"ĠMẹ":137864,"ยà¸Ńมรัà¸ļ":137865,"Ġvết":137866,"خذ":137867,"ĠاÙĦتط":137868,"ĠاÙĦتطبÙĬÙĤ":137869,"à¸Ļึà¸ģ":137870,"Ġ×Ķ×Ľ×ł×¡×ª":137871,"ĠогÑĢани":137872,"ĠогÑĢаниÑĩен":137873,"ĠÃĩalÄ±ÅŁ":137874,"ĠاÙĦÙħÙĨتدÙī":137875,"à¸Īำà¸Ļวà¸Ļมาà¸ģ":137876,"ĠÑĤоÑĢÑĢ":137877,"ĠÑĤоÑĢÑĢенÑĤ":137878,"ĠìĤ´ìķĦ":137879,"à¸ŀลัà¸ĩà¸ĩาà¸Ļ":137880,"à¸Ĭัà¸Ļ":137881,"ĠÐIJндÑĢ":137882,"Ġréalisé":137883,"×ŀש×IJ":137884,"à¹ģà¸Ĭ":137885,"à¹ģà¸Ĭรà¹Į":137886,"Ġбог":137887,"มาà¹ģลà¹īว":137888,"ĠاÙĦÙĨار":137889,"Ġolmadıģı":137890,"×ĵ×¢×Ķ":137891,"ĠÑĥвеÑĢ":137892,"ĠÑĥвеÑĢен":137893,"ãĤĭãĤĤãģ®":137894,"أد":137895,"أدÙĪØ§Øª":137896,"Ġ×Ķ×ĸ×ķ×Ĵ":137897,"إعÙĦاÙħ":137898,"há»ı":137899,"ĠNähe":137900,"ĠÑĤеÑģÑĤ":137901,"Ġ×ŀ×ķ׼ר":137902,"Ġë¬¸ìłľê°Ģ":137903,"ת×ķצ×IJ×Ķ":137904,"mó":137905,"móvel":137906,"ĠاÙĦتجارة":137907,"ĠмногиÑħ":137908,"обÑīа":137909,"Ġעסק×Ļ":137910,"ĠEducação":137911,"קש×Ļ×Ŀ":137912,"établ":137913,"établissement":137914,"Ġделе":137915,"иÑĢÑĥеÑĤÑģÑı":137916,"آثار":137917,"Ġ×Ķ×ŀר׼×ĸ×Ļ":137918,"ãĥIJãĥ«":137919,"ĠвÑģÑĤÑĢеÑĩ":137920,"ãģĴãĤĭ":137921,"ĠciÄħ":137922,"ĠciÄħgu":137923,"ÙĬست":137924,"à¸łà¸²à¸§":137925,"à¸łà¸²à¸§à¸°":137926,"Ø£Ùħر":137927,"Ġожи":137928,"Ġожида":137929,"Ġá»§y":137930,"ãĥŀãĥ«":137931,"راس":137932,"оÑĩной":137933,"ת×Ĵ×ķ×ij×ķת":137934,"تعرÙĬÙģ":137935,"ĠÑģоÑĨиалÑĮно":137936,"ãĤĴéĸĭ":137937,"ĠиÑģÑģледова":137938,"Ġdú":137939,"Ġdúvida":137940,"ĠskÅĤ":137941,"ĠskÅĤada":137942,"Ġhäufig":137943,"ĠвÑĭбÑĢ":137944,"ĠвÑĭбÑĢаÑĤÑĮ":137945,"ãģ®ãģ§ãģ¯ãģªãģĦãģĭ":137946,"ĠÑģилÑĮно":137947,"ÑĤвеÑĢжден":137948,"רפ":137949,"רפ×ķ×IJ×Ķ":137950,"æĢĿãģĦãģ¾ãģĻ":137951,"ØŃرص":137952,"ש×ķתף":137953,"Ùħسجد":137954,"à¹Ĥà¸Ĭวà¹Į":137955,"емÑģÑı":137956,"вÑĪие":137957,"Ġмл":137958,"Ġмлн":137959,"Ġ׾×Ķ×ij×Ļ×IJ":137960,"ĠÙĬتعÙĦÙĤ":137961,"à¸ķูà¹ī":137962,"ĠпÑĢаз":137963,"ĠпÑĢазд":137964,"ĠпÑĢаздник":137965,"Ġнем":137966,"Ġнемного":137967,"ĠsÃłng":137968,"تÙĨسÙĬ":137969,"تÙĨسÙĬÙĤ":137970,"Ġtá»Ŀ":137971,"Ġмеди":137972,"ã쫿Ī":137973,"ã쫿λ":137974,"à¸Ħวà¹īา":137975,"ãģĭãģijãĤĭ":137976,"×ij׾×ķת":137977,"ĠÑįкÑģп":137978,"ĠÑįкÑģпеÑĢÑĤ":137979,"ĠдевÑĥÑĪ":137980,"ĠдевÑĥÑĪк":137981,"ĠØŃص":137982,"ÙĨشأ":137983,"ãģĮãģĤãĤĭãģ®ãģ§":137984,"ĠتراÙħ":137985,"ĠتراÙħب":137986,"أسÙĪØ§ÙĤ":137987,"Ġ׾פ׳×ķת":137988,"Ġاﻷ":137989,"ãģ«ãģı":137990,"ãģ«ãģıãģĦ":137991,"ĠأعÙĦÙī":137992,"Ġ׾×Ķ×ŀש×Ļ×ļ":137993,"räu":137994,"ש×ŀ×Ļ×Ŀ":137995,"åĪĨãģij":137996,"ãģĻãģ§":137997,"ãģĻãģ§ãģ«":137998,"×Ķ׾׼×Ķ":137999,"×Ĺ׾×Ļ×£":138000,"Ġì±ħ":138001,"Ġì±ħìŀĦ":138002,"à¹Ģà¸Īริ":138003,"à¹Ģà¸Īริà¸į":138004,"éģĬãģ³":138005,"جسد":138006,"สาà¸ĺ":138007,"สาà¸ĺาร":138008,"สาà¸ĺารà¸ĵ":138009,"Ġbasın":138010,"ÑĢаг":138011,"гад":138012,"ĠhoÅŁ":138013,"íķµ":138014,"×ij×Ĺ×Ļר×Ķ":138015,"×ŀס×ļ":138016,"ĠìłľíĴĪ":138017,"تÙħÙĪÙĬÙĦ":138018,"ĠLưu":138019,"ë¡ľë¶ĢíĦ°":138020,"Ġпоб":138021,"Ġпобед":138022,"ÙħÙĨذ":138023,"常ãģ«":138024,"ÙĤس":138025,"ĠاÙĦÙħصدر":138026,"ĠÙĪØ§ÙĦاست":138027,"Ġkhắp":138028,"ĠاÙĦجاÙĨب":138029,"Ġnguyá»ĩn":138030,"éĸĵéģķãģĦ":138031,"ĠÑģÑĤÑĢа":138032,"ĠÑģÑĤÑĢаÑħ":138033,"ĠÑģÑĤÑĢаÑħов":138034,"รีà¸ļ":138035,"Ġxương":138036,"Ġì°¾":138037,"Ġì°¾ìķĦ":138038,"Ġngại":138039,"гал":138040,"à¸ĭีà¹Ī":138041,"Ġ×ijפ×Ļ×Ļס×ij×ķ×§":138042,"ЦенÑĤÑĢ":138043,"Ġavaliação":138044,"Ġeconómico":138045,"×ĸף":138046,"ĠÐľÐ°Ðº":138047,"Ġinterés":138048,"à¸ģลิà¹Īà¸Ļ":138049,"ÑģÑĤÑĮÑİ":138050,"ĠÄijương":138051,"å¼·ãģı":138052,"ĠKhách":138053,"à¹Ģà¸Ļืà¹īà¸Ńหา":138054,"ĠYazı":138055,"è²·ãģ£ãģ¦":138056,"ÐłÐķ":138057,"à¹Ģà¸ŀิà¹Īมà¸Ĥึà¹īà¸Ļ":138058,"สมà¸ļู":138059,"สมà¸ļูรà¸ĵà¹Į":138060,"ĠмиÑĢов":138061,"×Ĵ׳×Ļ×Ŀ":138062,"ĠÄijức":138063,"à¸Ńารà¹Į":138064,"صاص":138065,"ãģĬãĤĪ":138066,"ãģĬãĤĪãģ³":138067,"êÌī":138068,"ĠاÙĦÙħؤتÙħر":138069,"ĠاÙĦÙħرØŃÙĦØ©":138070,"สà¸Ńà¸ļà¸ĸาม":138071,"Ġà¸Īาà¸ģà¸Ļัà¹īà¸Ļ":138072,"Ġتعد":138073,"ãģĿãģ®ãģŁãĤģ":138074,"Ġkháng":138075,"à¸Ļิà¸Ķ":138076,"ãĥĬãĥ³":138077,"ëĦ¤ìļĶ":138078,"ĠاÙĦاØŃت":138079,"ĠاÙĦاØŃتÙĦاÙĦ":138080,"ìļķ":138081,"Ġмодели":138082,"ĠпÑĢоÑĨенÑĤ":138083,"à¸ŀวà¸ģà¹Ģรา":138084,"Ġ×Ķצ×ĵ":138085,"Ġ×Ķצ×ĵ×ĵ×Ļ×Ŀ":138086,"stände":138087,"׳×Ĵר":138088,"Ġdotyc":138089,"ĠdotyczÄħ":138090,"ĠdotyczÄħce":138091,"ĠÅĽwiÄĻt":138092,"×ŀר×Ķ":138093,"ãģĻãģĶãģĦ":138094,"ãĥĩãĤ£ãĥ³ãĤ°":138095,"à¸ģารสรà¹īาà¸ĩ":138096,"ëĤ¬":138097,"Ġì°¸ìŬ":138098,"ÑģÑħ":138099,"ÑģÑħем":138100,"ÙħÙĪØ³":138101,"Ġnấu":138102,"Ġ׾×ŀ×¢×ľ×Ķ":138103,"à¹Ģà¸Ľà¹īา":138104,"à¹Ģà¸Ľà¹īาหมาย":138105,"Ġmùi":138106,"ائز":138107,"íĽĪ":138108,"×Ĺ×ij×ķר×Ķ":138109,"à¸ľà¸¹à¹īà¹ĥà¸Ĭà¹ī":138110,"Ġpaź":138111,"Ġpaździ":138112,"Ġpaździern":138113,"Ġpaździernika":138114,"ลà¸ĩà¹Ħà¸Ľ":138115,"ÙĤاع":138116,"ĠcháºŃm":138117,"Ġözellikleri":138118,"ĠÄIJo":138119,"ĠÄIJoÃłn":138120,"жение":138121,"Ġhẳ":138122,"Ġhẳn":138123,"ĠaÅŁk":138124,"ï½į":138125,"ãĥijãĤ¹":138126,"×Ķ×ķר×IJ×ķת":138127,"ĠÅ»":138128,"ĠÅ»y":138129,"×ŀ×ĸ׾":138130,"ĠÑĥкÑĢа":138131,"ĠÑĥкÑĢаин":138132,"à¹Ģà¸Ĭิ":138133,"à¹Ģà¸Ĭิà¸į":138134,"ÐłÐĺ":138135,"ĠzwiÄħzku":138136,"×Ķ×Ĺ׾×ĺת":138137,"ãĤĵãģ§ãģĻãĤĪãģŃ":138138,"ãģ¦ãģĬãĤĬ":138139,"ложиÑĤÑĮ":138140,"×ŀ×ķ׳×Ļ×Ŀ":138141,"ฮิ":138142,"ì°¬":138143,"ĠاÙĦÙħشترÙĥ":138144,"ĠdÃ¼ÅŁÃ¼k":138145,"агенÑĤ":138146,"ĠاÙĦأسبÙĪØ¹":138147,"ĠÙĤرÙĬب":138148,"инд":138149,"индив":138150,"индивид":138151,"индивидÑĥ":138152,"индивидÑĥалÑĮн":138153,"förder":138154,"Ġseçen":138155,"Ġseçenek":138156,"Ġétant":138157,"ĠлÑİбим":138158,"казÑĭваеÑĤ":138159,"วิà¸Ļ":138160,"Ġ×Ķ×ij×IJ×Ļ×Ŀ":138161,"Ġдов":138162,"ĠдоволÑĮ":138163,"ĠдоволÑĮно":138164,"×¢×ĵ×Ļ×£":138165,"Ġokre":138166,"ĠokreÅĽ":138167,"ĠokreÅĽlon":138168,"ĠترÙĬد":138169,"à¹Ģมืà¹Īà¸Ńวัà¸Ļà¸Ĺีà¹Ī":138170,"ãĤĪãģĭãģ£ãģŁ":138171,"Cumh":138172,"Cumhur":138173,"Cumhurba":138174,"CumhurbaÅŁ":138175,"CumhurbaÅŁkan":138176,"CumhurbaÅŁkanı":138177,"Ġnợ":138178,"à¸ľà¸¹à¹īà¹Ģลà¹Īà¸Ļ":138179,"Ġcomplète":138180,"à¹Ģà¸ŀศ":138181,"دÙIJ":138182,"Ġdüz":138183,"Ġdüzey":138184,"ãģ§ãģĤãĤĭãģĵãģ¨":138185,"extérieur":138186,"׳":138187,"Ġinformação":138188,"ãĤ¯ãĥªãĥĭãĥĥãĤ¯":138189,"ĠPubli":138190,"ĠPublié":138191,"ר×ķ×ĵ":138192,"à¸Ħà¸§à¸²à¸¡à¸Ľà¸¥à¸Ńà¸Ķà¸łà¸±à¸¢":138193,"ĠØ£ÙĬض":138194,"ĠØ£ÙĬضÙĭا":138195,"تسبب":138196,"ãģ¤ãĤĤãĤĬ":138197,"изма":138198,"à¸Ĥึà¹īà¸Ļà¹Ħà¸Ľ":138199,"ÙĥÙIJ":138200,"ÙĦÙĪÙħ":138201,"Ġשצר":138202,"Ġשצר×Ļ×ļ":138203,"ãģ¯ãĤĤãģ¡ãĤįãĤĵ":138204,"Ġкан":138205,"Ġканал":138206,"ãģ«ãģªãģ£ãģ¦ãģĦãģ¾ãģĻ":138207,"ĠاÙĦØ£Ùĥثر":138208,"تاØŃ":138209,"ÙĨتÙĩ":138210,"ÙĨتÙĩاء":138211,"اÙĪÙĬØ©":138212,"ĠBugün":138213,"нÑģкого":138214,"à¸Ķà¹Īวà¸Ļ":138215,"évolution":138216,"ãģ£ãģ¦ãģĦãģ¾ãģĹãģŁ":138217,"ãĤħ":138218,"ĠVương":138219,"à¸łà¸²à¸ŀย":138220,"à¸łà¸²à¸ŀยà¸Ļ":138221,"à¸łà¸²à¸ŀยà¸Ļà¸ķรà¹Į":138222,"Ġ×Ķצ׾×Ļ×Ĺ":138223,"ĠاÙĦإسÙĦاÙħÙĬ":138224,"ÙĦÙĬب":138225,"Ġedição":138226,"ÑģÑĤÑĢел":138227,"Ġkhúc":138228,"ÙĨÙħÙĪØ°":138229,"ÙĨÙħÙĪØ°Ø¬":138230,"׾צ×Ķ":138231,"ÑģÑĤавил":138232,"à¸ĸา":138233,"สรà¹īาà¸ĩà¸Ħวาม":138234,"ãģĦãģ£ãģ±":138235,"ãģĦãģ£ãģ±ãģĦ":138236,"ÑģÑĤавлен":138237,"ĠاÙĦÙĤدس":138238,"Ġngược":138239,"بخ":138240,"สหร":138241,"สหรั":138242,"สหรัà¸IJ":138243,"Ġأغ":138244,"Ġأغسط":138245,"Ġأغسطس":138246,"ãģĨãģ¾":138247,"ãģĨãģ¾ãģı":138248,"ĠêµŃìłľ":138249,"ØŃضار":138250,"Ġdừng":138251,"æĬ¼ãģĹ":138252,"تÙĪØ§":138253,"تÙĪØ§Ø¬Ø¯":138254,"ש×ŀ×Ĺ×Ķ":138255,"ãģıãĤĵ":138256,"Ġ×ijעצ":138257,"Ġ×ijעצ×Ŀ":138258,"×ŀ׳×Ļ×ķת":138259,"×ķ×Ļ×ĵ":138260,"×ķ×Ļ×ĵ×IJ×ķ":138261,"à¸Ĭิà¸ĩ":138262,"ĠpracÄĻ":138263,"ĠзаÑĤ":138264,"ĠзаÑĤем":138265,"ĠìŀIJìľł":138266,"Ġì¤Ģ":138267,"Ġì¤Ģë¹Ħ":138268,"ĠbáºŃ":138269,"ĠbáºŃc":138270,"Ġ×Ķ×ŀצ×ij":138271,"ĠÙĤÙĬÙħØ©":138272,"à¹Ģà¸Ńà¹Ģà¸Ĭ":138273,"à¹Ģà¸Ńà¹Ģà¸Ĭีย":138274,"Ġperchè":138275,"ĠاÙĦعسÙĥر":138276,"ĠاÙĦعسÙĥرÙĬØ©":138277,"جÙĬب":138278,"ëŀµ":138279,"ÙħÙĩر":138280,"ÙħÙĩرجاÙĨ":138281,"ÙħراÙĥ":138282,"ÙħراÙĥز":138283,"Ġоднако":138284,"à¸Ķีà¹Ĩ":138285,"Ġצפ×ķ":138286,"Ġkullanılan":138287,"Ġкино":138288,"ãĥĨãĤ£ãĥ³ãĤ°":138289,"ĠGiỼi":138290,"تÙĪØ²":138291,"تÙĪØ²ÙĬع":138292,"ยิà¸Ļ":138293,"ยิà¸Ļà¸Ķี":138294,"ĠcÅĵur":138295,"ĠiÅŁaret":138296,"Ġ×ij×¢×ĸר":138297,"Ġ×ij×¢×ĸרת":138298,"ĠпаÑĨи":138299,"ĠпаÑĨиенÑĤ":138300,"ãģ¿ãģŁãģĦãģ§ãģĻ":138301,"вез":138302,"лина":138303,"оде":138304,"Ġ×IJ×ķ×ª×Ł":138305,"dıģınız":138306,"ĠÐIJв":138307,"ĠÐIJвÑĤоÑĢ":138308,"ï¼®":138309,"ĠCần":138310,"ĠاÙĦاخ":138311,"ĠاÙĦاخبار":138312,"Ġê±°ìĿĺ":138313,"Ġatenção":138314,"ĠgeldiÄŁi":138315,"ãĤªãĤ¹":138316,"ãĤªãĤ¹ãĤ¹":138317,"ãĤªãĤ¹ãĤ¹ãĥ¡":138318,"евÑĭе":138319,"кÑĢÑĭл":138320,"à¹Ģà¸Ĭียà¸ĩ":138321,"à¹Ģà¸Ĭียà¸ĩà¹ĥหมà¹Ī":138322,"Ġmarço":138323,"ĠاÙĦÙħادة":138324,"Ġгол":138325,"Ġsprzedaży":138326,"Ġíķ´ê²°":138327,"ĠÐķго":138328,"ê¹Ģ":138329,"Ġ׾ק×ij×ľ×ª":138330,"ĠاÙĦÙģÙĨاÙĨ":138331,"Ġcomunicación":138332,"à¹Ģสà¹īà¸Ļà¸Ĺาà¸ĩ":138333,"íĺ¹":138334,"à¸Ĭำ":138335,"à¸Ĭำระ":138336,"Ġ׼×IJ×ŀ":138337,"Ġ׼×IJ×ŀ×ķר":138338,"à¸Ĭà¹Īาà¸ĩ":138339,"زÙĩر":138340,"Ġklientów":138341,"иваÑİÑĤ":138342,"анг":138343,"׳×ļ":138344,"Ġgá»įn":138345,"ÃľR":138346,"ìĺģìĥģ":138347,"Ġغزة":138348,"ìĿĮìĿĦ":138349,"Ġbezpo":138350,"ĠbezpoÅĽ":138351,"ĠbezpoÅĽredni":138352,"ĠاÙĦÙħÙĪØ§":138353,"ĠاÙĦÙħÙĪØ§Ø·ÙĨ":138354,"ĠاÙĦÙħÙĪØ§Ø·ÙĨÙĬÙĨ":138355,"ãĤĮãģ¾ãģĻ":138356,"ĠмаÑĤÑĩ":138357,"×IJ×ķף":138358,"ĠرسÙħÙĬ":138359,"ĠÑįкон":138360,"ĠÑįконом":138361,"ĠÑįкономиÑĩеÑģк":138362,"ãĥľãĥ¼":138363,"ĠдиÑĢ":138364,"ĠдиÑĢекÑĤоÑĢ":138365,"ĠÑģкоÑĢо":138366,"à¸ļำ":138367,"à¸ļำร":138368,"à¸ļำรุà¸ĩ":138369,"ĠÑĦÑĥÑĤ":138370,"ĠÑĦÑĥÑĤбол":138371,"Ġ×IJ×Ļ׾":138372,"Ġì¤ijêµŃ":138373,"ìľ¤":138374,"eÄŁe":138375,"à¹Ħà¸ģà¹Ī":138376,"traî":138377,"traîn":138378,"ĠÑĤÑĢÑĥб":138379,"à¹Ģà¸ļื":138380,"à¹Ģà¸ļืà¹īà¸Ńà¸ĩ":138381,"à¹ģมà¸Ļ":138382,"ĠتØŃدÙĬØ«":138383,"Ġ×Ľ×¢×ª":138384,"ØŃاسب":138385,"lıģa":138386,"×§×Ļ×Ļ×ŀ×Ļ×Ŀ":138387,"оÑģÑĤÑĮÑİ":138388,"à¸Ŀั":138389,"à¸Ŀัà¹Īà¸ĩ":138390,"شغÙĦ":138391,"ìĽ¹":138392,"Ġкаждого":138393,"Ġbölümü":138394,"หà¸Ļี":138395,"ĠistediÄŁi":138396,"Ġtrưng":138397,"ãĥĮ":138398,"ฮà¸Ń":138399,"Ø£ÙĨØ´":138400,"Ø£ÙĨشطة":138401,"ĠاÙĦÙħسÙĬ":138402,"ĠاÙĦÙħسÙĬØŃ":138403,"ลัà¸ģษà¸ĵà¹Į":138404,"Ġná»Ńa":138405,"à¸Ĺีà¹Īà¸ķà¹īà¸Ńà¸ĩà¸ģาร":138406,"ÑĪек":138407,"лÑij":138408,"Ġש×Ļ×Ķ":138409,"Ġש×Ļ×Ķ×Ļ×Ķ":138410,"Ġkhuôn":138411,"ĠÑĤÑĢебованиÑı":138412,"Ġ×ľ×¢×ĸ×ķר":138413,"ĠاÙĦعÙħر":138414,"ราà¸Ħาà¸ĸูà¸ģ":138415,"ÙĩÙıÙħÙĴ":138416,"üst":138417,"üstü":138418,"Ġденег":138419,"Ġnạ":138420,"à¸Ĥà¸Ļม":138421,"Ġблаг":138422,"Ġблагод":138423,"ĠблагодаÑĢ":138424,"ĠблагодаÑĢÑı":138425,"إسÙĦاÙħ":138426,"à¸Ļิว":138427,"çŁ¥ãĤīãģªãģĦ":138428,"Ø«ÙĤØ©":138429,"ĠголоÑģ":138430,"×IJ×ķר×Ĺ":138431,"Ġtrứng":138432,"Ġодном":138433,"ĠkoÅĦcu":138434,"Ġ×ķרק":138435,"WiÄĻ":138436,"WiÄĻcej":138437,"Ġ×IJ×Ļ׼×ķת":138438,"Ġ×IJ×Ļ׼×ķת×Ļ":138439,"ÑģоÑģ":138440,"Ġjeżeli":138441,"以ä¸ĭãģ®":138442,"å°ıãģķ":138443,"å°ıãģķãģª":138444,"ологии":138445,"ĠобÑģлÑĥж":138446,"ĠобÑģлÑĥжива":138447,"Ùĥتابة":138448,"Ġê´Ģìĭ¬":138449,"עש×Ļר":138450,"Ġarasındaki":138451,"ĠÑĢайона":138452,"ÙĪØ§Ø¬Ø¨":138453,"Ġ×ij×Ĺ×Ļ×Ļ":138454,"íķ´ì£¼":138455,"Ġgóc":138456,"айл":138457,"ĠTình":138458,"æļ®ãĤī":138459,"æļ®ãĤīãģĹ":138460,"æĻĤãģ«ãģ¯":138461,"ĠгоÑĢоде":138462,"Ġ׼×IJ×Ļ׾":138463,"Ġ׼×IJ×Ļ׾×ķ":138464,"ĠCá»Ļng":138465,"ãģ©ãģĨãģĹãģ¦ãĤĤ":138466,"×Ĺ×ķ×£":138467,"تØŃرÙĥ":138468,"ĠÑģловам":138469,"à¸Īะà¸Ĭà¹Īวย":138470,"ĠاÙĦÙħستÙĤبÙĦ":138471,"ÙĤض":138472,"ÙĤضÙĬ":138473,"×ijס×ķפ":138474,"×ijס×ķפ×ķ":138475,"iÄĻÄĩ":138476,"ĠYıl":138477,"Ø´ÙĬØ®":138478,"à¸Ħุà¸ĵà¸Īะ":138479,"ש×ŀ×ķת":138480,"Ġتعرض":138481,"Ġanálise":138482,"ĠÑģобиÑĢа":138483,"à¹Ģà¸ŀà¸Ĭ":138484,"à¹Ģà¸ŀà¸Ĭร":138485,"Ġвели":138486,"Ġвелик":138487,"สัà¹īà¸Ļ":138488,"Ġpopulação":138489,"รà¹Īวมà¸ģัà¸Ļ":138490,"×Ĺ×ŀ":138491,"×Ĺ×ŀ×Ļש×Ļ":138492,"ס×Ļס":138493,"åĨħãģ§":138494,"ĠsobÄħ":138495,"ĠYay":138496,"ĠYayın":138497,"ãĥ¡ãĥĭãĥ¥ãĥ¼":138498,"ĠпÑĢедоÑģÑĤавлÑı":138499,"ãģłã썿ĢĿãģĨ":138500,"Ġê³łê°Ŀ":138501,"Ġодним":138502,"à¹ĥà¸Ļà¹Ģรืà¹Īà¸Ńà¸ĩ":138503,"Ġsá»ķ":138504,"ĠÐĹдеÑģÑĮ":138505,"ĠизменениÑı":138506,"ĠìĿ¼ìĿĦ":138507,"ãģªãģ®ãģł":138508,"кладÑĭва":138509,"ÑĢма":138510,"Ġ×ķ×ij׼׾":138511,"تأÙħÙĬÙĨ":138512,"ĠпÑĢиÑıÑĤ":138513,"ĠпÑĢиÑıÑĤн":138514,"ÙħÙħار":138515,"ÙħÙħارسة":138516,"ãģ¨ãģªãģ£ãģ¦":138517,"ĠجÙħÙĬÙĦ":138518,"Ġì§Ī":138519,"Ġì§Ī문":138520,"Ġquestão":138521,"ié":138522,"iéndo":138523,"หà¹īà¸Ńà¸ĩà¸ŀัà¸ģ":138524,"ãĥijãĥ¼ãĥĪ":138525,"ÑĤвеÑĢжда":138526,"нÑģкой":138527,"зал":138528,"มุà¹Īà¸ĩ":138529,"á»Ĭ":138530,"Ġ×Ķ×IJ×Ĺר×ķ׳×Ķ":138531,"ĠThư":138532,"주민":138533,"ĠاÙĦعب":138534,"évén":138535,"événement":138536,"ÙĤÙĪØ§Ø¹Ø¯":138537,"دÙı":138538,"ĠìķĬìĬµëĭĪëĭ¤":138539,"Ġ보기":138540,"Ġyapılması":138541,"à¹Ģราà¸ģ":138542,"à¹Ģราà¸ģà¹ĩ":138543,"ØŃذر":138544,"ÙĤصر":138545,"ãģ¦ãģĹãģ¾ãģĦãģ¾ãģĹãģŁ":138546,"Ġà¹Ģà¸Ľà¹ĩà¸Ļà¸ķà¹īà¸Ļ":138547,"ãģ¨ãģ«":138548,"ãģ¨ãģ«ãģĭ":138549,"ãģ¨ãģ«ãģĭãģı":138550,"нÑĨе":138551,"звÑĥк":138552,"ãģĹãĤĪãģĨãģ¨":138553,"ĠاÙĦصØŃÙĬØ©":138554,"Ġש×Ķ×Ļ×ķ":138555,"ĠDiÄŁer":138556,"ÙĤÙĦÙĤ":138557,"ãĤ¸ãĥ£ãĥ³":138558,"Ġrá»Ŀi":138559,"ĠлеÑĩ":138560,"ĠлеÑĩениÑı":138561,"تباد":138562,"تبادÙĦ":138563,"צפ×Ķ":138564,"à¸Ħวามà¹Ģหà¹ĩà¸Ļ":138565,"Ġشب":138566,"ĠشبÙĥØ©":138567,"ר×Ļ×§":138568,"Ùħعد":138569,"Ùħعدات":138570,"dıģında":138571,"Ġ×ijש׳×Ļ×Ŀ":138572,"Ġ×Ķ×Ļשר×IJ׾":138573,"Ġ×Ķ×Ļשר×IJ׾×Ļת":138574,"Ġsınav":138575,"׳צ×Ļ×Ĵ":138576,"วัà¸ķà¸ĸุ":138577,"ĠاÙĦبرÙĦÙħ":138578,"ĠاÙĦبرÙĦÙħاÙĨ":138579,"tivitÃł":138580,"ãĤĵãģłãĤįãģĨ":138581,"×§×Ļ×Ļ×ŀ":138582,"ÙĦÙĬÙĥ":138583,"ĠÄijò":138584,"ĠÄijòi":138585,"ĠÐĺнÑĤеÑĢ":138586,"ĠÐĺнÑĤеÑĢнеÑĤ":138587,"ãģ«ãģ¨ãģ£ãģ¦ãģ¯":138588,"ãģ£ãģĵ":138589,"×§×ķס":138590,"ستØŃÙĤ":138591,"æķĻãģĪãģ¦":138592,"ãĥĢãĥ¡":138593,"ĠÙħÙĨزÙĦ":138594,"à¹Ģà¸ĭà¹ĩà¸Ļ":138595,"使ãģĪãĤĭ":138596,"è¦ĭç©į":138597,"è¦ĭç©įãĤĤãĤĬ":138598,"Ø£Ùģ":138599,"Ø£ÙģÙĥار":138600,"ĠигÑĢов":138601,"ĠигÑĢовÑĭе":138602,"ĠmÄĻż":138603,"ĠmÄĻżczy":138604,"ĠmÄĻżczyzn":138605,"ĠاÙĦØŃÙĤÙĬÙĤÙĬ":138606,"عبر":138607,"׼×ķ׾׳×ķ":138608,"íĿ¥":138609,"×ŀ×IJ×ķ×Ĺר":138610,"ختص":138611,"ãĥŀãĥŀ":138612,"Ġ×IJ×Ĺ×ķ×ĸ":138613,"íĮĢ":138614,"Ġrá»iji":138615,"ĠвÑĤоÑĢ":138616,"ĠвÑĤоÑĢой":138617,"Ġlẫn":138618,"пÑĢом":138619,"пÑĢомÑĭÑĪ":138620,"пÑĢомÑĭÑĪлен":138621,"пÑĢомÑĭÑĪленн":138622,"ĠоÑĤноÑĪениÑı":138623,"Ġsứ":138624,"ĠмобилÑĮ":138625,"ĠмобилÑĮн":138626,"ĠÑįÑĤомÑĥ":138627,"Ġtạp":138628,"ĠìĤ¬ê±´":138629,"ĠìķĮ볤":138630,"ÙĥÙı":138631,"ÙĥÙıÙħÙĴ":138632,"Ġ×§×ķר×Ķ":138633,"ĠÑĦиÑĢ":138634,"ĠÑĦиÑĢм":138635,"Ġsıkıntı":138636,"׳׼":138637,"׳׼×ķף":138638,"ÙĪÙĦÙĪØ¬ÙĬ":138639,"ØŃاÙĨ":138640,"Ġloạn":138641,"Ġ×IJ×ľ×£":138642,"Ġmắn":138643,"abhäng":138644,"abhängig":138645,"ĠÑĥÑĢовнÑı":138646,"Ġ׾×ij×ĵ×ķ×§":138647,"ÙĬÙħÙĨ":138648,"layın":138649,"Ġhải":138650,"Ġзавод":138651,"ĠìķĦ주":138652,"สà¸ĸา":138653,"สà¸ĸาà¸ļัà¸Ļ":138654,"Ġgüvenlik":138655,"à¹Ģà¸Ķà¹Īà¸Ļ":138656,"×ij×ĵ×§":138657,"ĠëĪ":138658,"ĠëĪĦ":138659,"ĠëĪĦ구":138660,"éĩįè¦ģãģª":138661,"รà¸Ńà¸ĩรัà¸ļ":138662,"schlie":138663,"schlieÃŁen":138664,"Ġìĸ¼":138665,"Ġìĸ¼ë§Ī":138666,"Ġìĸ¼ë§ĪëĤĺ":138667,"ÑĤики":138668,"íķľëĭ¤ê³ł":138669,"ãģłãģ£ãģŁãĤī":138670,"Ġ×Ķ×Ļ×ĺ×ij":138671,"ãģªãģijãĤĮãģ°ãģªãĤīãģªãģĦ":138672,"âÌ":138673,"ậ":138674,"Ġphạt":138675,"akÄ±ÅŁ":138676,"ãģ¦ãģĹãģ¾ãģĦãģ¾ãģĻ":138677,"à¹Ģà¸ĭà¹ĩ":138678,"ĠСегоднÑı":138679,"Ġinsanların":138680,"Ġdéveloppe":138681,"תפר":138682,"תפר×Ļ×ĺ":138683,"اÙĨتشار":138684,"ê°ij":138685,"François":138686,"Ø£ÙĦع":138687,"Ø£ÙĦعاب":138688,"ãĤĴè¶ħ":138689,"ãĤĴè¶ħãģĪ":138690,"Ġê°ĻìĬµëĭĪëĭ¤":138691,"ãĤ³ãĥ¬":138692,"ĠмеÑģÑıÑĨев":138693,"íĮħ":138694,"ĠاÙĦجاÙħعة":138695,"ìĿ¸íĦ°":138696,"ìĿ¸íĦ°ëĦ·":138697,"×ĵר×ķש":138698,"ĠÙĪØ£Ø´Ø§Ø±":138699,"ĠпÑĢавила":138700,"ãģĿãģĵãģ«":138701,"×Ĺ×ŀ×ĵ":138702,"à¹Ģหà¸ķุà¸ģารà¸ĵà¹Į":138703,"Ġê²½íĹĺ":138704,"ãģ¶ãĤĬ":138705,"׾ש":138706,"׾ש×ķף":138707,"à¹Ģà¸ĸ":138708,"ĠDoÄŁu":138709,"ĠиÑģполÑĮзование":138710,"ĠçocuÄŁu":138711,"магазине":138712,"ĠÄijiá»ĥn":138713,"Ġaslı":138714,"Ġaslında":138715,"Ġdoença":138716,"Ġساع":138717,"Ġساعات":138718,"ĠиÑģполÑĮзованиÑı":138719,"ר×ķצ×Ļ×Ŀ":138720,"ĠзнаÑĩиÑĤ":138721,"ĠÑĢам":138722,"ĠÑĢамкаÑħ":138723,"거리":138724,"ĠпÑĭÑĤа":138725,"ãĥģãĥ³":138726,"ĠпоÑģк":138727,"ĠпоÑģколÑĮ":138728,"ĠпоÑģколÑĮкÑĥ":138729,"إبر":138730,"إبراÙĩ":138731,"إبراÙĩÙĬÙħ":138732,"ĠÑĤÑĢеÑħ":138733,"ĠGenç":138734,"سÙĪÙģ":138735,"ĠveÃŃculo":138736,"ĠNgân":138737,"ĠоÑĩеÑĢедÑĮ":138738,"à¸Ħรึà¹Īà¸ĩ":138739,"×IJ×ij×Ļ":138740,"à¸ķà¹īม":138741,"ãĤĴè¡ĮãģĦ":138742,"ĠاÙĦسابÙĤØ©":138743,"наÑĨи":138744,"наÑĨиона":138745,"наÑĨионалÑĮн":138746,"Ġgestión":138747,"تÙĤد":138748,"ĠاÙĦبÙĬاÙĨ":138749,"ĠاÙĦبÙĬاÙĨات":138750,"ĠاÙĦاÙĨتخاب":138751,"ĠاÙĦاÙĨتخابات":138752,"à¹Ģà¸Ĭà¹Īา":138753,"×ĵ×IJ×Ĵ":138754,"Ġ׾×Ĵ×ŀר×Ļ":138755,"ĠتØŃتاج":138756,"Ġthôn":138757,"à¸ķà¹īà¸Ńà¸Ļ":138758,"à¸ķà¹īà¸Ńà¸Ļรัà¸ļ":138759,"女ãģ®":138760,"女ãģ®åŃIJ":138761,"Ġthợ":138762,"Ø·ØŃÙĨ":138763,"ารà¹Įà¸Ķ":138764,"ת×ŀ×Ļ×ĵ":138765,"ĠÑģамÑĭм":138766,"Ġìĭľíĸī":138767,"إصد":138768,"إصدار":138769,"ĠNghá»ĩ":138770,"ìķķ":138771,"سئ":138772,"سئÙĦ":138773,"à¸Ńาร":138774,"à¸Ńารม":138775,"à¸Ńารมà¸ĵà¹Į":138776,"à¹ģฮ":138777,"׳×ĺ׾":138778,"Ġì¢ĭìķĦ":138779,"×ķ׾׾":138780,"Ġ×ij×Ľ×ª×ij":138781,"ãĤ«ãĥ©":138782,"צע×Ļר×Ļ×Ŀ":138783,"تعبÙĬر":138784,"Ġ×ŀקר×Ķ":138785,"ĠÑĦакÑĤоÑĢ":138786,"ĠتÙħاÙħ":138787,"ĠتÙħاÙħا":138788,"ëįķ":138789,"Ġvưá»Ŀ":138790,"Ġvưá»Ŀn":138791,"ĠdÄ±ÅŁÄ±":138792,"ãģĦãģ¡":138793,"Ġ׾ק׳×ķת":138794,"ĠاÙĦعÙĦاÙĤات":138795,"пÑĥб":138796,"пÑĥбли":138797,"Ø¥ÙĬÙħ":138798,"Ø¥ÙĬÙħاÙĨ":138799,"à¸Ńำà¸Ļา":138800,"à¸Ńำà¸Ļาà¸Ī":138801,"åIJ«ãģ¾ãĤĮ":138802,"ãĤĭãģŁãĤģãģ«":138803,"ס×Ĵ":138804,"ס×Ĵ׳×ķף":138805,"تØŃدÙĬ":138806,"Ġauprès":138807,"ĠاÙĦجÙĩا":138808,"ĠاÙĦجÙĩاز":138809,"Ġ×ŀת×Ĺת":138810,"еннÑĥÑİ":138811,"Ġзим":138812,"à¸ģาà¹ģà¸Ł":138813,"Ġ×ijת×ķר":138814,"Ġnghè":138815,"Ġnghèo":138816,"ĠÐĽÑİ":138817,"ĠÐĽÑİб":138818,"תקצ×Ļ×ij":138819,"×ŀעש×Ķ":138820,"ĠاÙĦبÙĬت":138821,"צ×Ļפ":138822,"ĠобÑıзан":138823,"ĠMá»Ĺi":138824,"ĠТÑĥÑĢ":138825,"ĠÙĪØ¨Ø§ÙĦت":138826,"ĠÙĪØ¨Ø§ÙĦتاÙĦÙĬ":138827,"Ġdécision":138828,"Ġبد":138829,"Ġبدأت":138830,"Ġcục":138831,"Ġbask":138832,"Ġbaskı":138833,"Ġhatırl":138834,"Ġhatırla":138835,"å°ıãģķãģĦ":138836,"Ġgerçekten":138837,"à¸ľà¸±à¸ģ":138838,"åı¯èĥ½ãģª":138839,"×ŀ×IJס":138840,"ĠcrÃŃtica":138841,"ĠìĿĺìĽIJ":138842,"عÙĤÙĪØ¯":138843,"×ĺ׼׳":138844,"×ĺ׼׳×ķ׾×ķ×Ĵ×Ļ×Ķ":138845,"è¨ĢãģĪãģ°":138846,"ĠÙĤÙĨا":138847,"ĠÙĤÙĨاة":138848,"ĠìĿ´ê²ĥìĿĢ":138849,"تصر":138850,"à¸Łà¸±à¸Ļ":138851,"ĠÑĢеÑĨеп":138852,"ĠÑĢеÑĨепÑĤ":138853,"ĠبÙĨÙ쨳":138854,"ÑĢоÑĪ":138855,"ĠмаÑĢÑĤа":138856,"Ġsonras":138857,"Ġsonrası":138858,"×ķ×ijש":138859,"ãĥªãĤ¹ãĤ¯":138860,"ĠFrançais":138861,"á»ļ":138862,"ê°Ķ":138863,"Ġ×Ķ×ijר×Ļת":138864,"פ×Ļצ":138865,"פ×Ļצ×ķ×Ļ":138866,"ĠÙĦÙħاذا":138867,"ĠÐļиев":138868,"ĠÑģмÑĭÑģл":138869,"ê¸Īìľµ":138870,"ãĤ·ãĥ£ãĥ«":138871,"ãĥ©ãĤ¤ãĥĪ":138872,"ìĽĥ":138873,"×ŀ×Ĺר":138874,"ãĨį":138875,"Ġkullanım":138876,"Ġ×IJצ׾׳×ķ":138877,"ĠtÃłn":138878,"ãĥıãĥ¼":138879,"ãģ¨ãģ¨ãĤĤ":138880,"ãģ¨ãģ¨ãĤĤãģ«":138881,"ÑĢег":138882,"ÑĢеги":138883,"ÑĢегион":138884,"ãģªãģıãģªãĤĭ":138885,"Ġchảy":138886,"ĠجÙĩØ©":138887,"ÅĦskiej":138888,"à¸Ńีà¹Ģม":138889,"à¸Ńีà¹Ģมล":138890,"ãģįãģ£ãģ¨":138891,"ĠìĺĪìĤ°":138892,"Ġkitabı":138893,"Ġeducação":138894,"ĠbuluÅŁ":138895,"ологиÑı":138896,"ĠконкÑĢ":138897,"ĠконкÑĢеÑĤ":138898,"×Ĵ×Ļר":138899,"ĠпÑĢедлаг":138900,"ĠпÑĢедлагаеÑĤ":138901,"ĠYên":138902,"Ġíķľë²Ī":138903,"Ġ×ŀר׼×ĸ×Ļ":138904,"à¹Ģà¸Ľà¸´à¸Ķà¹Ģà¸ľà¸¢":138905,"ÑĤвеÑĢд":138906,"ĠHá»ĩ":138907,"ĠÐĵÑĢ":138908,"à¸Ŀà¹īา":138909,"×Ķשק":138910,"×Ķשקע×Ķ":138911,"ĠнаÑĥк":138912,"ìłIJìĿĦ":138913,"ĠнелÑĮ":138914,"ĠнелÑĮз":138915,"ĠнелÑĮзÑı":138916,"гин":138917,"ĠBöl":138918,"ĠBölge":138919,"Ġвла":138920,"ĠвлаÑģÑĤи":138921,"à¹Ģà¸Ļà¹ĩ":138922,"à¹Ģà¸Ļà¹ĩà¸ķ":138923,"골":138924,"Ġöld":138925,"Ġöldür":138926,"×Ľ×ł×¢":138927,"ĠاÙĦÙĩÙĬئة":138928,"تارÙĬØ®":138929,"ĠÐijÑĢ":138930,"ĠÑģмож":138931,"ĠÑģможеÑĤе":138932,"ĠLúc":138933,"à¹Ħà¸Ľà¸ĸึà¸ĩ":138934,"ĠBakanı":138935,"Ġerklärt":138936,"ĠÐIJна":138937,"Ġscène":138938,"åķıãģĦ":138939,"åķıãģĦåIJĪãĤıãģĽ":138940,"ÙħÙĩÙĨد":138941,"ÙħÙĩÙĨدس":138942,"Ġназвание":138943,"иваниÑı":138944,"ãĤĴå¤īãģĪ":138945,"ä»ĺãģįåIJĪ":138946,"ãĥijãĤ½":138947,"ãĥijãĤ½ãĤ³ãĥ³":138948,"æĺİãĤī":138949,"æĺİãĤīãģĭ":138950,"à¹Ģà¸Ńà¸ģสาร":138951,"à¹Ģà¸ģิà¸Ļà¹Ħà¸Ľ":138952,"леп":138953,"ãģĹãģŁãĤĤãģ®":138954,"ĠCâm":138955,"ĠCâmara":138956,"×§×ķ׾׳×ķ×¢":138957,"Ġ×ij×Ĵ×Ļף":138958,"Ġoczy":138959,"ĠoczywiÅĽcie":138960,"attivitÃł":138961,"ãĥĵãĥ¥ãĥ¼":138962,"Ġeducación":138963,"İYE":138964,"ê¹ĮìļĶ":138965,"ãĤ¨ãĥªãĤ¢":138966,"неÑģÑĤи":138967,"Ġmóg":138968,"ĠmógÅĤ":138969,"Ġ×§×ĺ׳×Ļ×Ŀ":138970,"ĠPrä":138971,"Ġ×ľ×¢×ij×ķר":138972,"بÙĨÙī":138973,"зол":138974,"золоÑĤ":138975,"ĠwnÄĻtr":138976,"ĠwnÄĻtrz":138977,"Ġconstrução":138978,"รัà¸ļรà¸Ńà¸ĩ":138979,"سجÙĨ":138980,"Ġ×§×ķ׳":138981,"ס×Ļפ×ķר":138982,"ĠÙħدÙī":138983,"رضÙī":138984,"плав":138985,"ï¼¥":138986,"Ġila":138987,"Ġilaç":138988,"ãĤĭãģ¹ãģį":138989,"ĠÙħÙĪÙĤÙģ":138990,"à¸ģรุ":138991,"à¸ģรุà¸ĵา":138992,"chodzÄħc":138993,"ĠÑĤÑĭÑģ":138994,"ÐķвÑĢо":138995,"ĠÙĬØŃدث":138996,"ãĥ¡ãĤ¤ãĥ³":138997,"ĠاÙĦصØŃÙĬ":138998,"ĠÐĶан":138999,"دعاء":139000,"ãĤ´ãĥ¼ãĥ«":139001,"×©×ł×ª×Ļ":139002,"×©×ł×ª×Ļ×Ļ×Ŀ":139003,"à¸Ķà¹īวยà¸ģัà¸Ļ":139004,"Ġolacaģı":139005,"Ġ×ij×ŀ×Ĺ×Ļר":139006,"×Ķ×§":139007,"×Ķ×§×ŀת":139008,"ãĥ¢ãĥİ":139009,"ĠçalÄ±ÅŁtı":139010,"Ġjóvenes":139011,"ãģĦãģıãĤī":139012,"ĠÙħعدÙĦ":139013,"ĠCÅ©ng":139014,"ĠSegún":139015,"Ġdönemde":139016,"Ġ׾×Ļ×ĵ×Ļ":139017,"ãģįãģ¡":139018,"ãģįãģ¡ãĤĵ":139019,"ãģįãģ¡ãĤĵãģ¨":139020,"Ù쨱ÙĨس":139021,"Ù쨱ÙĨسا":139022,"åIJijãģį":139023,"Ġcampaña":139024,"ĠÑģамоÑģÑĤоÑı":139025,"ĠÑģамоÑģÑĤоÑıÑĤелÑĮно":139026,"á»Ģ":139027,"ÙĤÙĪØ§":139028,"سÙĦاØŃ":139029,"à¸ģระà¹ģ":139030,"à¸ģระà¹ģส":139031,"ĠполÑĮзÑĥ":139032,"nqu":139033,"nquête":139034,"รà¹Īวมà¸ģัà¸ļ":139035,"ëĬIJëĥIJ":139036,"à¸Ĺีมà¸Ĭาà¸ķิ":139037,"Ġyıllık":139038,"ìĬ¬":139039,"ĠأصØŃاب":139040,"illé":139041,"Ġdóla":139042,"Ġdólares":139043,"Ġкож":139044,"Ġкожи":139045,"ลà¹īà¸Ń":139046,"à¹Ģรียà¸ļร":139047,"à¹Ģรียà¸ļรà¹īà¸Ńย":139048,"à¹Ģà¸ŀิ":139049,"à¹Ģà¸ŀิà¹Īà¸ĩ":139050,"ÑĢиÑĤоÑĢи":139051,"Ġíijľ":139052,"ĠíijľíĺĦ":139053,"ĠпеÑĢев":139054,"ĠпеÑĢевод":139055,"פ×Ĵ×Ļ×¢×Ķ":139056,"ĠdeÄŁerlendirme":139057,"ÙģØ§Ø¦":139058,"ĠвÑĭгод":139059,"ınızı":139060,"×ķ׼×Ļ×Ĺ":139061,"ĠдоÑģÑĤиг":139062,"ĠngÃłn":139063,"æĢĿãģ£ãģŁ":139064,"ĠÐķÑģÑĤÑĮ":139065,"ĠاÙĦرغÙħ":139066,"ĠzwiÄħzane":139067,"ربط":139068,"à¸Ļึà¸ĩ":139069,"Ġ׾×Ĺ×ķ×§":139070,"Ġszczególn":139071,"Ġszczególnie":139072,"ĠباستخداÙħ":139073,"ĠfÃŃsico":139074,"עס":139075,"עס×ķ×§":139076,"سÙĦÙĪÙĥ":139077,"ĠاØŃد":139078,"ÑĩÑijÑĤ":139079,"×ĸ׼×Ķ":139080,"Ġlá»ĩnh":139081,"ĠÙĪØŃØª":139082,"ĠÙĪØŃØªÙī":139083,"à¸Ħวามสามารà¸ĸ":139084,"à¸Ńยูà¹Īà¹ģลà¹īว":139085,"à¸ģารà¹Ģà¸Ķิà¸Ļà¸Ĺาà¸ĩ":139086,"تخذ":139087,"צ×Ļ×ķ×ĵ":139088,"ĠاÙĦأس":139089,"ĠاÙĦأسÙĩÙħ":139090,"Ġtá»ĩ":139091,"ãģ£ãģ¦ãģĦãģ¦":139092,"สรุ":139093,"à¸ªà¸£à¸¸à¸Ľ":139094,"ĠкомÑĦ":139095,"ĠкомÑĦоÑĢÑĤ":139096,"ìĺ¤ëĬĶ":139097,"ĠÑĢазв":139098,"ĠÑĢазвива":139099,"ланд":139100,"hänge":139101,"ĠبÙĨسبة":139102,"à¹Ģà¸Ĥียว":139103,"עצ×Ŀ":139104,"Ġ×ľ×ľ×Ľ×ª":139105,"ÑģоÑĨиалÑĮн":139106,"Ġëĭ¤ìĿĮê³¼":139107,"Ġרש×ķ×ŀ":139108,"×ŀר×Ĺ×ij":139109,"سÙĤØ·":139110,"Ġalanı":139111,"ĠÄijá»ĩ":139112,"é£Łãģ¹ãĤĭ":139113,"à¸Ķึà¸ĩ":139114,"Ġgegenüber":139115,"ĠبÙĩذÙĩ":139116,"à¸ĸืà¸Ńà¹Ģà¸Ľà¹ĩà¸Ļ":139117,"ëķħ":139118,"à¸Ħà¸Ļà¹Ħà¸Ĺย":139119,"ãĤ¢ãĤ¦":139120,"ãĤ¢ãĤ¦ãĥĪ":139121,"ศัà¸ģ":139122,"ศัà¸ģà¸Ķิ":139123,"ศัà¸ģà¸Ķิà¹Į":139124,"ÙĤÙĪØ§ÙĨ":139125,"ÙĤÙĪØ§ÙĨÙĬÙĨ":139126,"Ġhá»Ļp":139127,"ãģªãģıãģªãģ£ãģ¦":139128,"Ġ×IJ×ŀ׳":139129,"Ġ×IJ×ŀ׳×Ŀ":139130,"à¹Ģà¸ķืà¸Ńà¸Ļ":139131,"ĠзавиÑģим":139132,"ĠзавиÑģимоÑģÑĤи":139133,"ת×Ļ×IJ":139134,"ת×Ļ×IJ×ķר":139135,"å§ĭãĤģãģŁ":139136,"Ġngá»į":139137,"Ġngá»įt":139138,"íĴį":139139,"ê³¼ìŀ¥":139140,"Ġbại":139141,"ãģ§ãģįãģ¦":139142,"Ġcomeçar":139143,"à¸Ľà¸£à¸²à¸ģ":139144,"à¸Ľà¸£à¸²à¸ģà¸ı":139145,"ĠгодÑĭ":139146,"меÑģ":139147,"ĠاÙĦÙħستÙĪÙī":139148,"ĠÑģамÑĭе":139149,"ллеÑĢ":139150,"ãģ£ãģ¦ãģĹãģ¾ãģĦãģ¾ãģĻ":139151,"ãģ¨ãģ®ãģĵãģ¨":139152,"bió":139153,"à¸ģลà¹Īà¸Ńà¸ĩ":139154,"ĠاÙĦزÙĪØ¬":139155,"ãģ«è¡Įãģ£ãģŁ":139156,"à¸Ħà¹Īà¸Ńà¸Ļ":139157,"à¸Ħà¹Īà¸Ńà¸Ļà¸Ĥà¹īาà¸ĩ":139158,"ĠbaÄŁl":139159,"ĠbaÄŁlant":139160,"ĠbaÄŁlantı":139161,"確ãģĭ":139162,"確ãģĭãģ«":139163,"ãĥľãĥ¼ãĥ«":139164,"çµĤãĤıãĤĬ":139165,"ש×ŀר":139166,"à¸Ĺีà¹Īสามารà¸ĸ":139167,"ÙĦزÙħ":139168,"даеÑĤÑģÑı":139169,"รัà¸ļà¸Ľà¸£à¸°":139170,"รัà¸ļà¸Ľà¸£à¸°à¸Ĺาà¸Ļ":139171,"å¤īãĤıãĤĬ":139172,"ï¼¢":139173,"ĠìĺĪìĪĺëĭĺ":139174,"ãĤĪãģĨãģ¨":139175,"มัà¸ģà¸Īะ":139176,"ĠHương":139177,"ÙĨÙ쨰":139178,"×ŀ×ĵ×ĵ":139179,"ĠìĿ¸ìłķ":139180,"ÑħодиÑĤÑĮ":139181,"ĠзавиÑģиÑĤ":139182,"×ķ×ĵ×Ļ×¢":139183,"ãģĵãģ¨ãģĮãģĤãĤĬãģ¾ãģĻ":139184,"عراÙĤ":139185,"سطØŃ":139186,"à¸ģำà¹Ħร":139187,"ëĵ¤ëıĦ":139188,"×Ļצ×Ļר×Ķ":139189,"ãģĨãģĵãģ¨":139190,"ÙĦاØŃÙĤ":139191,"ãģĦãĤĮãģ°":139192,"ĠиÑģполÑĮзÑĥÑİÑĤ":139193,"ĠBợi":139194,"Ġשק׾×Ļ×Ŀ":139195,"ÑĨикл":139196,"ÐIJÐŀ":139197,"Ġ×ijש׳×Ķ":139198,"ÙĨشط":139199,"Ġש×Ļ׳×ķ×Ļ":139200,"Ġש×Ļ׳×ķ×Ļ×Ļ×Ŀ":139201,"Ġpoblación":139202,"ĠHưng":139203,"ระว":139204,"ระวัà¸ĩ":139205,"رÙĬاضة":139206,"رصد":139207,"تÙĤÙĦÙĬ":139208,"تÙĤÙĦÙĬد":139209,"Ġülkem":139210,"Ġülkemiz":139211,"à¸Ĭะ":139212,"ãĤ¯ãĥªãĥ¼ãĥł":139213,"èģŀãģĦãģŁ":139214,"Ġważ":139215,"Ġważne":139216,"ê±°ëĵł":139217,"ê±°ëĵłìļĶ":139218,"×ŀ×IJ×ij×§":139219,"×Ĺ×ĵש×ķת":139220,"ĠWroc":139221,"ĠWrocÅĤaw":139222,"ĠKültür":139223,"sist":139224,"sistência":139225,"×¢×ĸר×Ķ":139226,"Ġgương":139227,"รà¹īาà¸Ļà¸Ħà¹īา":139228,"ĠÙĪØ£ÙĪØ¶ØŃ":139229,"ándose":139230,"ãĤ·ãĥ¼ãĥ³":139231,"×IJ׳ר×Ĵ":139232,"×IJ׳ר×Ĵ×Ļ×Ķ":139233,"ãģªãģĦãģ§ãģĻ":139234,"Ġkhá»§ng":139235,"Ġ문ìĦľ":139236,"Ġ×ij×ĵ×ijר":139237,"×ĵ×Ļ×ķ":139238,"×ĵ×Ļ×ķ×ķ×Ĺ":139239,"Ġrégl":139240,"ÙħÙĪØ§Ø¯":139241,"обоÑĢ":139242,"обоÑĢоÑĤ":139243,"Ġ×Ķ×ij׾":139244,"Ġ×Ķ×ij׾×ķ×Ĵ":139245,"ØŃاÙħ":139246,"ĠاÙĦعاص":139247,"ĠاÙĦعاصÙħØ©":139248,"пеÑĢаÑĤоÑĢ":139249,"تخÙĦ":139250,"تخÙĦص":139251,"ãģŁãģłãģĹ":139252,"تسÙħ":139253,"à¹Ĥรà¸ĩà¸ŀ":139254,"à¹Ĥรà¸ĩà¸ŀยา":139255,"à¹Ĥรà¸ĩà¸ŀยาà¸ļาล":139256,"ĠYük":139257,"ĠYüksek":139258,"Ġש׳×Ļת":139259,"Ġש׳×Ļ×ª×Ł":139260,"liÄŁe":139261,"Ġפת":139262,"Ġפת×ķ×Ĺ":139263,"ĠbeÄŁ":139264,"ĠbeÄŁen":139265,"Ġ×ŀ×ķר":139266,"Ġ×ŀ×ķר׼×ij":139267,"ĠرساÙĦØ©":139268,"íĨµìĭł":139269,"Ġavalia":139270,"Ġavaliações":139271,"Ġmanh":139272,"Ġmanhã":139273,"Ġìķŀ":139274,"Ġìķŀìľ¼ë¡ľ":139275,"ÙĤتر":139276,"ÙĤترØŃ":139277,"à¹Ģà¸ģืà¸Ń":139278,"à¹Ģà¸ģืà¸Ńà¸ļ":139279,"Ġproposé":139280,"Ø£Ùħا":139281,"Ø£ÙħاÙĥÙĨ":139282,"ĠÐŀÐŀ":139283,"ĠÐŀÐŀÐŀ":139284,"ÙħÙĤار":139285,"ÙħÙĤارÙĨØ©":139286,"ëĦIJ":139287,"ãģĦãģŁãģłãģı":139288,"ÙĤÙĬÙĦ":139289,"ĠнаÑĪиÑħ":139290,"ãĤ«ãĥĥãĥĹ":139291,"×Ĺ×ľ×ª":139292,"Ġëĭ¤ë§Į":139293,"à¸Ĺัà¹Īวà¹Ĥลà¸ģ":139294,"ãĥįãĤ¿":139295,"ØŃساس":139296,"ãģ«ãģªãĤĮ":139297,"جائ":139298,"جائزة":139299,"échange":139300,"économ":139301,"économie":139302,"ТÐĺ":139303,"×¡×ª×Ľ×ľ":139304,"à¸Ĺัà¹īà¸ĩสà¸Ńà¸ĩ":139305,"ĠاÙĦخاÙħ":139306,"ĠاÙĦخاÙħس":139307,"×§×ĺ×¢":139308,"auważ":139309,"à¸ľà¸¹à¹īà¸Ĭาย":139310,"à¹ģà¸Ľà¸¥à¸ģ":139311,"åIJĮæĻĤãģ«":139312,"знаниÑı":139313,"ãģĦãģŁãģłãģįãģ¾ãģĹãģŁ":139314,"Ġ×ŀ×ij׾×Ļ":139315,"à¸Ĥà¸Ńà¹ĥหà¹ī":139316,"ĠاÙĦتربÙĬØ©":139317,"Ġdécouvert":139318,"Ġżyciu":139319,"après":139320,"Ġyab":139321,"Ġyabanc":139322,"Ġyabancı":139323,"ĠbaÅŁlayan":139324,"ìĹĪëįĺ":139325,"Ġhesabı":139326,"Ġë§Įìķ½":139327,"ë§Īëĭ¤":139328,"ĠThánh":139329,"ãĥ´ãĤ¡":139330,"à¸Ľà¸£à¸±à¸ļà¸Ľà¸£":139331,"à¸Ľà¸£à¸±à¸ļà¸Ľà¸£à¸¸à¸ĩ":139332,"ĠMặc":139333,"à¹Ģหà¸ķà¸¸à¸ľà¸¥":139334,"ĠÐijез":139335,"ĠcapacitÃł":139336,"ÅĤeÅĽ":139337,"ĠпÑĢеим":139338,"ĠпÑĢеимÑĥÑīеÑģÑĤв":139339,"ĠÅļwiÄĻt":139340,"Ġpublié":139341,"×ŀעצ×ij":139342,"ÙħشارÙĥات":139343,"à¸łà¸²à¸©":139344,"à¸łà¸²à¸©à¸µ":139345,"Ġdeuxième":139346,"ĠÙħØŃاÙ쨏":139347,"ĠÙħØŃاÙģØ¸Ø©":139348,"ĠSchön":139349,"、":139350,"Ġ×Ķ×ij×¢":139351,"Ġ×Ķ×ij×¢×Ļ×Ķ":139352,"ĠÙĪØ§ÙĦÙĦÙĩ":139353,"è¨Ģãģ£ãģŁ":139354,"à¸ķà¹īาà¸Ļ":139355,"วรรà¸ĵ":139356,"à¸Ĺิศ":139357,"ĠbaÅŁÄ±na":139358,"ĠmogÄĻ":139359,"ש×Ļפ×ķר":139360,"ĠÙĪØ¹Ø¯":139361,"ĠÙĪØ¹Ø¯Ùħ":139362,"Ġhistórico":139363,"Ġkısı":139364,"ĠìĿ´ê²Į":139365,"ĠPolÃŃtica":139366,"ĠÑģиÑĤÑĥаÑĨии":139367,"ĠkoÅĦca":139368,"×ij×ĵ×Ļ×§×Ķ":139369,"ĠاÙĦسÙĬارات":139370,"ãģªãĤīãģ°":139371,"ãĤµãĥ©":139372,"ãĤĭãģĵãģ¨ãģĮãģ§ãģįãĤĭ":139373,"Ġdecisão":139374,"×ķ×ķ×ĵ":139375,"läss":139376,"lässig":139377,"Ġ׾×Ļשר×IJ׾":139378,"ĠÙĬأتÙĬ":139379,"ר×ķ×ĸ":139380,"Ã¶ÄŁ":139381,"Ã¶ÄŁret":139382,"Ã¶ÄŁretim":139383,"Ġдек":139384,"Ġдекаб":139385,"ĠдекабÑĢÑı":139386,"Ġש×Ĺ×ķר":139387,"ãģ¦ãģıãĤĮãģŁ":139388,"عبارة":139389,"Ġélectrique":139390,"ĠاÙĦتÙĨÙħÙĬØ©":139391,"جرÙī":139392,"ĠìĪĺíĸī":139393,"à¸Ĺู":139394,"ĠÑĢеалÑĮно":139395,"ÑģпоÑģоб":139396,"à¸Ħลà¹īาย":139397,"ĠسعÙĪØ¯":139398,"önü":139399,"ĠÙģÙħÙĨ":139400,"تÙĥÙĪ":139401,"تÙĥÙĪÙĬÙĨ":139402,"ĠкаÑĩеÑģÑĤво":139403,"ĠконÑĤак":139404,"ĠконÑĤакÑĤ":139405,"ĠsözleÅŁme":139406,"à¸Ńà¹īาà¸ĩ":139407,"ĠتÙĪÙģ":139408,"ĠتÙĪÙģÙĬر":139409,"×Ķ×ĸ×ĵ":139410,"×Ķ×ĸ×ĵ×ŀ׳×ķת":139411,"ĠØ·ÙĪÙĬÙĦØ©":139412,"Ġtérmino":139413,"Ġ×IJ×Ļפ×Ķ":139414,"ãĥĵãĥ«":139415,"สà¹Ĥม":139416,"สà¹Ĥมสร":139417,"ĠاÙĦاث":139418,"ĠاÙĦاثÙĨÙĬÙĨ":139419,"евиÑĩ":139420,"Ġopinión":139421,"à¸Ľà¸§à¸Ķ":139422,"åı¤ãģĦ":139423,"รà¹Īา":139424,"ĠBiaÅĤ":139425,"ĠÑģÑĤал":139426,"ĠÑģÑĤало":139427,"ólogo":139428,"ĠìķĦëĭĪëĭ¤":139429,"Ġ×IJ×Ļת":139430,"Ġ×IJ×Ļת×ķ":139431,"à¹Ģหà¹ĩà¸Ļวà¹Īา":139432,"à¸ļารà¹Į":139433,"çĦ¼":139434,"çĦ¼ãģį":139435,"ĠìĿ´ìļ©ìŀIJ":139436,"ĠнекоÑĤоÑĢÑĭе":139437,"ksz":139438,"ksztaÅĤ":139439,"ksztaÅĤc":139440,"ãĤŃãĥ£ãĥĥãĤ·":139441,"ãĤŃãĥ£ãĥĥãĤ·ãĥ³ãĤ°":139442,"ĠroÅĽ":139443,"ĠroÅĽlin":139444,"ÑĢажа":139445,"×ij׳×Ļ×Ļ×Ķ":139446,"à¸Ľà¸£à¸ªà¸´":139447,"à¸Ľà¸£à¸ªà¸´à¸ķ":139448,"Ġgördü":139449,"×ŀ׳×Ķ×Ļ×Ĵ":139450,"å¤īãĤıãģ£ãģ¦":139451,"Ġ×IJ×Ķ":139452,"Ġ×IJ×Ķ×ijת×Ļ":139453,"à¹Ģรà¹Īà¸ĩ":139454,"Ġönünde":139455,"Ġê·¸ëĥ¥":139456,"полиÑĤ":139457,"полиÑĤиÑĩеÑģк":139458,"ãĥ¡ãĥĩãĤ£":139459,"ãĥ¡ãĥĩãĤ£ãĤ¢":139460,"ĠDetay":139461,"ĠDetaylı":139462,"ĠاÙĦصÙģØŃØ©":139463,"à¸ģารà¹Ģà¸ĩิà¸Ļ":139464,"Ġìµľê·¼":139465,"׼ש׾":139466,"I":139467,"вÑĪего":139468,"íķĺìĭ¤":139469,"ĠÐŃÑĤ":139470,"ĠÐŃÑĤоÑĤ":139471,"สื":139472,"สืà¸ļ":139473,"Ġngừng":139474,"ĠдокÑĥменÑĤов":139475,"даваÑĤÑĮ":139476,"ĠاÙĦشخصÙĬØ©":139477,"Ġצע×Ļר":139478,"درÙĥ":139479,"سØŃب":139480,"à¹Ħมà¹Īà¸Ħà¹Īà¸Ńย":139481,"Ġ×Ķ×ŀ×§×ķ×ŀ×Ļ":139482,"สัà¹Īà¸ĩà¸ĭืà¹īà¸Ń":139483,"Ġê·¸ê²ĥìĿĦ":139484,"ãģĤãĤĭãģĦ":139485,"ãģĤãĤĭãģĦãģ¯":139486,"×IJ×ķ×ĺ×ķ×ij":139487,"×IJ×ķ×ĺ×ķ×ij×ķס":139488,"кÑĨион":139489,"ĠÐľÐ¾Ð¶Ð½Ð¾":139490,"ãģıãģł":139491,"ãģıãģłãģķ":139492,"ĠинÑĦоÑĢмаÑĨиÑı":139493,"ï»Ł":139494,"ĠìŀijìĹħ":139495,"Ġ×Ļ×ķסף":139496,"إدارة":139497,"ĠاÙĦØŃاج":139498,"×ł×¡×Ļ×¢×Ķ":139499,"изаÑĨиÑı":139500,"×IJ׾×ij":139501,"×IJ׾×ij×ķ×Ŀ":139502,"пед":139503,"Ġ×§×ĺ׳×Ķ":139504,"ĠÙĨÙ쨳Ùĩا":139505,"ĠMinistério":139506,"Ġпен":139507,"ĠпенÑģи":139508,"ãĥIJãĥ©ãĥ³ãĤ¹":139509,"Ġ×Ķת×ķר×Ķ":139510,"Ġtạm":139511,"ĠìĹŃìĭľ":139512,"。":139513,"Ġthá»±":139514,"Ġısı":139515,"컨":139516,"ãģĹãģ£ãģĭãĤĬãģ¨":139517,"Ġxưa":139518,"Ġcặp":139519,"×Ĺ×Ļ×ij×ķר":139520,"วัà¸Ĵà¸Ļà¸ĺรรม":139521,"stär":139522,"stärke":139523,"ĠÑģамÑĭй":139524,"pisa":139525,"pisaÄĩ":139526,"ĠoluÅŁan":139527,"ĠاÙĦØ¥ÙħاÙħ":139528,"ĠcÄĥng":139529,"Ġgünl":139530,"Ġgünlük":139531,"Ġ׳ש×IJר":139532,"Ġkhiá»ĥn":139533,"ç¶ļãģijãĤĭ":139534,"stitución":139535,"Ġcapacité":139536,"Ġjaki":139537,"ĠjakiÅĽ":139538,"вÑĪиÑģ":139539,"вÑĪиÑģÑĮ":139540,"פע×ķ׾×ķת":139541,"ĠØŃÙĬات":139542,"ĠØŃÙĬاتÙĩ":139543,"Ġникогда":139544,"ÐĽÐ¬":139545,"Ġ×Ķ×¢×ķ×ij":139546,"Ġ×Ķ×¢×ķ×ij×ĵ×Ķ":139547,"ĠchÃło":139548,"หลายà¹Ĩ":139549,"ĠÑıн":139550,"ĠÑıнваÑĢ":139551,"ĠÑıнваÑĢÑı":139552,"à¸Īำà¹Ģà¸Ľà¹ĩà¸Ļà¸ķà¹īà¸Ńà¸ĩ":139553,"Ġhöher":139554,"ãģķãĤĮãģ¦ãģĦãģŁ":139555,"สà¸ĩสั":139556,"สà¸ĩสัย":139557,"ĠاÙĦاس":139558,"ĠاÙĦاسÙĦاÙħ":139559,"ĠاÙĦØ´Ùħس":139560,"สà¸ĸาà¸Ļี":139561,"ãĤ¯ãĥ©ãĤ¹":139562,"à¸ŀรร":139563,"à¸ŀรรà¸Ħ":139564,"põ":139565,"põe":139566,"Ġporém":139567,"à¸Ľà¸£à¸°à¸ªà¸ĩ":139568,"à¸Ľà¸£à¸°à¸ªà¸ĩà¸Ħà¹Į":139569,"powiedzie":139570,"powiedzieÄĩ":139571,"ĠмогÑĥ":139572,"Ġжел":139573,"Ġжелез":139574,"ĠاÙĦØ«ÙĤ":139575,"ĠاÙĦØ«ÙĤاÙģÙĬ":139576,"ĠпÑĢавило":139577,"Ġgdyż":139578,"פש×ķ×ĺ":139579,"ÑĢабоÑĤка":139580,"ĠÙĥرة":139581,"شدد":139582,"ÙħارÙĥ":139583,"ÙħÙĥØ©":139584,"ĠподпиÑģ":139585,"×ĺ×ķ×ķ×Ĺ":139586,"ĠÅĽc":139587,"ĠÅĽcian":139588,"ĠرجاÙĦ":139589,"Ġ×ª×ľ×ķ×Ļ":139590,"иÑĪ":139591,"иÑĪÑĮ":139592,"Ġmédec":139593,"Ġmédecin":139594,"ëįĶëĿ¼ëıĦ":139595,"ĠÑĤебÑı":139596,"Ġ׾×Ķ×ķס×Ļ×£":139597,"ãģĬ話":139598,"Ġà¹ģà¸ķà¹Īà¸ģà¹ĩ":139599,"داÙģ":139600,"داÙ쨹":139601,"ĠCùng":139602,"ãĥ»ãĥ»ãĥ»ãĥ»":139603,"ê¶ģ":139604,"ĠdeberÃŃa":139605,"หà¸Ļà¹Īวยà¸ĩาà¸Ļ":139606,"ĠvaÌĢ":139607,"Ġעצ×ŀ":139608,"Ġעצ×ŀ×Ŀ":139609,"à¹Ģà¸Ĭืà¹Īà¸Ńวà¹Īา":139610,"שקע":139611,"Ġ×Ķ׼×ķ׾":139612,"Ġ×Ķ׼×ķ׾׾":139613,"нибÑĥд":139614,"нибÑĥдÑĮ":139615,"ĠëĦĪíĿ¬":139616,"ĠобÑĢаÑī":139617,"ĠобÑĢаÑīа":139618,"Ġ×¢×ij×ķ×ĵת":139619,"ĠاÙĦÙħÙĨتخب":139620,"ıyord":139621,"ıyordu":139622,"ÙĪØ°":139623,"×Ĺש×Ļ×ij×ķת":139624,"Ġ×Ķ×¢×Ļ×§":139625,"Ġ×Ķ×¢×Ļקר×Ļ":139626,"ì¢Į":139627,"ยุà¹Ĥร":139628,"ยุà¹Ĥà¸£à¸Ľ":139629,"ĠапÑĢ":139630,"ĠапÑĢелÑı":139631,"szed":139632,"szedÅĤ":139633,"дон":139634,"à¹Ģà¸ķิà¸ļ":139635,"à¹Ģà¸ķิà¸ļà¹Ĥà¸ķ":139636,"коло":139637,"Ġkażdej":139638,"帰":139639,"帰ãĤĬ":139640,"Ġмилли":139641,"Ġмиллион":139642,"ç¾İåij³ãģĹãģĦ":139643,"تÙĤار":139644,"تÙĤارÙĬر":139645,"ĠìĿ´ë£¨":139646,"ĠìĿ´ë£¨ìĸ´":139647,"Ġsprzedaż":139648,"×Ķ×ķצ×IJ×ķת":139649,"ãĤ¢ãĤ¯ãĤ»":139650,"ãĤ¢ãĤ¯ãĤ»ãĤ¹":139651,"ר×ķ×¥":139652,"ĠгоÑģÑĥдаÑĢÑģÑĤвенн":139653,"Ø£ØŃÙĥ":139654,"Ø£ØŃÙĥاÙħ":139655,"ĠoluÅŁu":139656,"ĠAç":139657,"ĠAçık":139658,"ãĤ¸ãĥ¼":139659,"ç´łæĻ´":139660,"ç´łæĻ´ãĤīãģĹãģĦ":139661,"Ġ×ijש×ij×ķ×¢":139662,"بذ":139663,"بذÙĦ":139664,"สาà¹Ģหà¸ķุ":139665,"Ġpozosta":139666,"ĠpozostaÅĤ":139667,"ØŃرÙħ":139668,"Ġimportância":139669,"leÅŁtirme":139670,"ĠдÑĢев":139671,"Ġmóvil":139672,"ĠAynı":139673,"Ġналог":139674,"Ġналогов":139675,"Ġ×Ĺ×Ļפ×Ķ":139676,"ĠÑĦоÑĢмÑĥ":139677,"à¸Ĺà¸Ķสà¸Ńà¸ļ":139678,"ĠksiÄħżki":139679,"ĠmaÅĤe":139680,"ÙħسأÙĦ":139681,"ÙħسأÙĦØ©":139682,"^^":139683,"çãeste":139684,"éviter":139685,"ĠконÑģÑĤÑĢÑĥк":139686,"ĠконÑģÑĤÑĢÑĥкÑĨи":139687,"ï¾ŀ":139688,"Ġת×ķ׼׳":139689,"ãĤ¹ãĥĪãĥ¬ãĤ¹":139690,"ĠاÙĦاÙĤتصادÙĬ":139691,"×ŀ×ĵ×Ļ":139692,"ĠwÅĤad":139693,"ĠwÅĤadz":139694,"Ø®ÙĪÙģ":139695,"ĠмаÑĤеÑĢиалов":139696,"ãģ¨ãģ£ãģ¦ãĤĤ":139697,"Ġznajdu":139698,"ĠznajdujÄħ":139699,"ÙģØ¦Ø©":139700,"ãģ©ãģ®ãĤĪãģĨãģª":139701,"æĬijãģĪ":139702,"׳×Ĺ׾":139703,"Ġdüny":139704,"Ġdünyan":139705,"Ġdünyanın":139706,"гÑĢани":139707,"гÑĢаниÑĩ":139708,"Ġ×Ķש׾×Ļש×Ļ":139709,"Ġ×Ķ×IJש":139710,"åıĬãģ³":139711,"ìĭŃìĭľ":139712,"ìĭŃìĭľìĺ¤":139713,"Ġдолл":139714,"ĠдоллаÑĢ":139715,"ĠповÑĤоÑĢ":139716,"Ġ×Ĺ×Ļ׳×Ŀ":139717,"תפת×Ĺ":139718,"Ñĥвели":139719,"ÑĥвелиÑĩен":139720,"ãĤ«ãĥª":139721,"rawid":139722,"rawidÅĤow":139723,"×ķ×ķ׾":139724,"ãĥŁãĥ¥":139725,"ì½ĺ":139726,"ĠByÅĤ":139727,"ÐľÐIJ":139728,"عÙIJ":139729,"ĠÑģовеÑĢÑĪ":139730,"ĠÑģовеÑĢÑĪенно":139731,"Ġмой":139732,"Ġ×ķ׾×IJ×Ĺר":139733,"æħ£":139734,"æħ£ãĤĮ":139735,"ØŃاÙ쨏":139736,"Ġ무ë£Į":139737,"à¸Ħà¸ĵะà¸ģรรม":139738,"à¸Ħà¸ĵะà¸ģรรมà¸ģาร":139739,"Ġìĸ´ëĶĶ":139740,"Ġdiferen":139741,"Ġdiferença":139742,"ĠاÙĦأساس":139743,"ĠاÙĦأساسÙĬØ©":139744,"Ġ׾×IJ×Ĺר×ķ׳×Ķ":139745,"ê·ł":139746,"Ġ×Ķש׳×Ļ×Ļ×Ķ":139747,"ìľĦìĽIJìŀ¥":139748,"ลุà¸ģ":139749,"çiler":139750,"Ġ×Ķ×IJ׾×ķ":139751,"èģŀãģı":139752,"Ġ×ķ×IJפ×Ļ׾×ķ":139753,"ĠÑĢеализ":139754,"ĠÑĢеализаÑĨи":139755,"ระยะà¹Ģวลา":139756,"ĠجداÙĭ":139757,"تباع":139758,"ĠvehÃŃculo":139759,"Ġдолг":139760,"à¸Ľà¸£à¸´à¸¡à¸²à¸ĵ":139761,"ì¦IJ":139762,"Ġ׾×ŀ×§×ķ×Ŀ":139763,"ĠìĤ¬ì§Ħ":139764,"à¸Ĭà¹īา":139765,"Ġ×ŀ×¢×ķ׾×Ķ":139766,"Ġgörm":139767,"Ġgörmek":139768,"ĠÙĪÙĩذÙĩ":139769,"пеÑĢв":139770,"пеÑĢвÑĭÑħ":139771,"ê·¸ëŀĺ":139772,"ĠاÙĦبرÙĬØ·":139773,"ĠاÙĦبرÙĬطاÙĨÙĬ":139774,"ĠиÑİнÑı":139775,"ĠÐĵоÑĢ":139776,"Ġ׾ש׾×Ŀ":139777,"ÐIJÐĿ":139778,"ĠназнаÑĩен":139779,"ооÑĢ":139780,"ооÑĢÑĥж":139781,"Ġözelli":139782,"ĠözelliÄŁi":139783,"Ġниже":139784,"ç¶ļãģijãģ¦":139785,"ĠаÑĢенд":139786,"Ġkatılı":139787,"Ġkatılım":139788,"ĠإطÙĦاÙĤ":139789,"ĠÙĪØ¥Ø°Ø§":139790,"ĠокÑĤÑı":139791,"ĠокÑĤÑıбÑĢÑı":139792,"à¹Ĥà¸ķà¹":139793,"à¹Ĥà¸ķà¹Ĭ":139794,"à¹Ĥà¸ķà¹Ĭะ":139795,"Ġoldukları":139796,"ÙħÙĪÙĤع":139797,"ëĤ©":139798,"ã썿ĢĿãģ£ãģ¦ãģĦãĤĭ":139799,"Ġש×Ļ׼×ķ׾":139800,"วาà¸Ķ":139801,"سÙĬÙĦ":139802,"à¸Ĥวั":139803,"à¸Ĥวัà¸į":139804,"تØŃÙĥÙħ":139805,"ìĤŃ":139806,"Ġconnaît":139807,"×ł×¤×ª×Ĺ":139808,"Ġchặ":139809,"Ġchặn":139810,"ĠÙħØŃÙħ":139811,"ĠÙħØŃÙħÙĪØ¯":139812,"ãģ´":139813,"ĠпÑĢодÑĥкÑĨии":139814,"здÑĢав":139815,"ãģĶè¦":139816,"ãģĶ覧":139817,"×IJ×ij×IJ":139818,"Ġvéritable":139819,"ĠØ·ÙģÙĦ":139820,"ãĥĪãĥ©ãĥĸãĥ«":139821,"곡":139822,"Ġת×ŀ×ķ׳×Ķ":139823,"Ġkiên":139824,"ĠÙĤادر":139825,"Ø¥ÙĤÙĦÙĬÙħ":139826,"ĠпÑĢедпÑĢи":139827,"ĠпÑĢедпÑĢиÑıÑĤиÑı":139828,"ĠbÄĥng":139829,"Ġayında":139830,"Ġgấp":139831,"еÑħал":139832,"ĠgiÃłnh":139833,"Ġдав":139834,"Ġдавно":139835,"ìĺĢëĭ¤":139836,"à¸Ļัà¸ģà¹Ģà¸ķ":139837,"à¸Ļัà¸ģà¹Ģà¸ķะ":139838,"Ùħستشار":139839,"ستراتÙĬج":139840,"ستراتÙĬجÙĬ":139841,"رÙħز":139842,"ĠtÄ©nh":139843,"ë¡Ń":139844,"ĠÑĩеÑĤ":139845,"ĠÑĩеÑĤÑĭ":139846,"ĠÑĩеÑĤÑĭÑĢе":139847,"ĠEntão":139848,"Ġصغ":139849,"ĠصغÙĬرة":139850,"×ij×Ļ×ĺ×ķ׾":139851,"خطÙĪØ·":139852,"ĠÑĢазвиÑĤие":139853,"Ġamacıyla":139854,"à¸Ĺีวี":139855,"ĠоÑģÑĤ":139856,"ĠоÑģÑĤалÑĮн":139857,"ש×ķ׾×Ĺף":139858,"Ġ׼׳×Ļס":139859,"Ġ׼׳×Ļס×Ķ":139860,"ĠdáºŃy":139861,"ĠyaÅŁayan":139862,"Ġ×ŀ×Ķ×ķ×ķ×Ķ":139863,"ĠÑĥÑģи":139864,"ĠÑĥÑģили":139865,"×ŀפ×Ļ":139866,"ĠпÑĢоведениÑı":139867,"Ġرب":139868,"ĠربÙħا":139869,"ĠاÙĦØ£ÙĪØ³Ø·":139870,"Ġìľłì§Ģ":139871,"Ġpracownik":139872,"Ġpracowników":139873,"×ŀס×ķרת":139874,"ÙĤارب":139875,"à¸Ħวามรูà¹īสึà¸ģ":139876,"à¹ģหละ":139877,"ĠاÙĦÙĨÙĤد":139878,"Ġ×IJ׾פ×Ļ":139879,"Ùħسئ":139880,"ÙħسئÙĪÙĦ":139881,"евÑĭÑħ":139882,"клÑİÑĩениÑı":139883,"×ij×Ļ׳":139884,"×ij×Ļ׳×Ļ×Ķ×Ŀ":139885,"ש×ķ×IJ×Ķ":139886,"ĠÅŁark":139887,"ĠÅŁarkı":139888,"Ġsürec":139889,"Ġsürecin":139890,"à¹Ģà¸Ħรà¸Ķ":139891,"à¹Ģà¸Ħรà¸Ķิà¸ķ":139892,"ãĥIJãĥ¬":139893,"ĠشأÙĨ":139894,"à¹Ģà¸Ńาà¹Ħวà¹ī":139895,"niÄĻcie":139896,"רצ×Ĺ":139897,"ĠaÅŁama":139898,"׳פ×Ĵ×¢":139899,"Ġthá»Ŀ":139900,"Ġkhuẩn":139901,"diÄŁinde":139902,"ÑıÑīиÑħ":139903,"ãĥĺãĥ«":139904,"Ġüberh":139905,"Ġüberhaupt":139906,"ĠÑĤÑĢебова":139907,"ĠdÅĤugi":139908,"×ĺ×Ļף":139909,"à¸Ĥà¸Ļาà¸Ķà¹ĥหà¸įà¹Ī":139910,"ĠاÙĦØ£Ùĩ":139911,"ĠاÙĦØ£ÙĩÙĦÙĬ":139912,"ĠMüd":139913,"ĠMüdürü":139914,"Ġ×Ļ×Ķ×ķ×ĵ×Ķ":139915,"ÑĭваеÑĤÑģÑı":139916,"ساط":139917,"×Ķ×ª×ł×Ķ×Ĵ":139918,"×Ķ×ª×ł×Ķ×Ĵ×ķת":139919,"à¸ģà¸²à¸£à¸ľà¸¥à¸´à¸ķ":139920,"íĴĢ":139921,"สà¸ĸาà¸Ļà¸ģารà¸ĵà¹Į":139922,"ĠоÑĦ":139923,"ĠоÑĦиÑģ":139924,"ĠÙĦعبة":139925,"ĠstronÄĻ":139926,"Ġר×IJ×ķ×Ļ":139927,"×Ĺ×ij׾":139928,"ĠÑĢÑĭн":139929,"ĠÑĢÑĭнке":139930,"Ġ׾×ŀ×¢×Ł":139931,"اسÙĦ":139932,"หัà¸Ļ":139933,"Ġ×IJ×Ĺ×Ļ":139934,"ĠпÑĢодол":139935,"ê°Ģìŀħ":139936,"Ġ×ijר×Ĺ":139937,"Ġ×ijר×Ĺ×ij×Ļ":139938,"джеÑĢ":139939,"Ġ׾×Ĺ׾":139940,"Ġ׾×Ĺ׾×ķ×ĺ":139941,"Ġ׾×Ĺ׾×ķ×ĺ×Ļף":139942,"ศาสà¸Ļา":139943,"ãĤ¢ãĤ¤ãĥĨ":139944,"ãĤ¢ãĤ¤ãĥĨãĥł":139945,"Ġפר×ķפ":139946,"جزاء":139947,"ลà¸Ńย":139948,"ĠciaÅĤa":139949,"Ġgiết":139950,"ĠзнаÑĩиÑĤелÑĮно":139951,"Ġolmadıģ":139952,"Ġolmadıģını":139953,"нд":139954,"ндекÑģ":139955,"تأÙĥد":139956,"Ġìĸ¸":139957,"Ġìĸ¸ìłľ":139958,"aydın":139959,"ãĥīãĥ¬ãĤ¹":139960,"Ġsắt":139961,"Ġíĺ¸íħĶ":139962,"Ġë¶ģ":139963,"Ġë¶ģíķľ":139964,"ãĥijãĤ¤":139965,"Ġ×ŀש×Ĺ×§×Ļ":139966,"à¸Ħà¸Ļà¸Ńืà¹Īà¸Ļ":139967,"ĠизгоÑĤов":139968,"ĠизгоÑĤовлен":139969,"à¹Ģà¸ģียร":139970,"à¹Ģà¸ģียรà¸ķิ":139971,"תקשר":139972,"ĠÑĢаÑģÑĩеÑĤ":139973,"สà¹Ģà¸ķ":139974,"Ġlänger":139975,"ĠiÅŁlet":139976,"ĠiÅŁletme":139977,"ĠعÙĦÙĬÙĨ":139978,"ĠعÙĦÙĬÙĨا":139979,"élection":139980,"ĠاÙĦغربÙĬØ©":139981,"íĭĢ":139982,"ãĤĤãĤīãģĪ":139983,"Ġкниги":139984,"أسÙħ":139985,"أسÙħاء":139986,"Ġthá»ı":139987,"Ġthá»ıa":139988,"หà¸Ļู":139989,"Ġ×ł×¢×©×Ķ":139990,"à¸łà¸²à¸¢à¹ĥà¸ķà¹ī":139991,"à¸ŀืà¸Ĭ":139992,"رÙĬØ·":139993,"ÙģÙĪØ¶":139994,"ãģĤãĤĬãģĮãģ¨ãģĨãģĶãģĸãģĦãģ¾ãģĹãģŁ":139995,"ש×ĵ×Ķ":139996,"Ġngá»±c":139997,"ĠÑģеÑĢÑĮ":139998,"ĠÑģеÑĢÑĮезн":139999,"Tôi":140000,"Ġfiyatları":140001,"ĠвÑģÑİ":140002,"ĠCódigo":140003,"Ġ×Ķש×IJ":140004,"Ġ×Ķש×IJ׾×Ķ":140005,"ĠPública":140006,"إخ":140007,"إخÙĪØ§ÙĨ":140008,"ĠзаÑıвил":140009,"ãĥ¦ãĥ¼":140010,"ר×IJ×Ļת":140011,"volución":140012,"Ġszko":140013,"ĠszkoÅĤy":140014,"جرÙĬدة":140015,"Ġpensé":140016,"ìī¬":140017,"ĠBüyükÅŁehir":140018,"ĠØ£ÙħرÙĬ":140019,"ĠØ£ÙħرÙĬÙĥÙĬ":140020,"à¸Ļัà¸ģศึà¸ģษา":140021,"Ġtodav":140022,"ĠtodavÃŃa":140023,"ĠСан":140024,"ĠСанкÑĤ":140025,"íķĺìŀIJ":140026,"ØŃÙĪØ§ÙĦ":140027,"׼×ķשר":140028,"à¹Ģลยà¸Ħรัà¸ļ":140029,"Ġalgu":140030,"Ġalguém":140031,"Ù쨲":140032,"Ġçekil":140033,"Ġ×ĵר׼×Ļ×Ŀ":140034,"ãĥIJãĥ©":140035,"à¸ģà¹ĩสามารà¸ĸ":140036,"สà¹Īวà¸Ļลà¸Ķ":140037,"íı°":140038,"ĠPúb":140039,"ĠPúblico":140040,"à¹ģà¸Ļวà¸Ĺาà¸ĩ":140041,"×IJת×Ĵר":140042,"شاش":140043,"شاشة":140044,"ciÅĽni":140045,"ĠÃľrün":140046,"ÙĦÙĪØŃ":140047,"ĠاÙĦبÙĨ":140048,"ĠاÙĦبÙĨÙĥ":140049,"ì¡°ì¹ĺ":140050,"Ġorganización":140051,"ãģĤãĤĬãģĮãģ¨ãģĨãģĶãģĸãģĦãģ¾ãģĻ":140052,"sätze":140053,"ĠÑģемей":140054,"ÙĤصد":140055,"ÑģÑĤвеннÑĭе":140056,"Ġprécéd":140057,"Ġprécédent":140058,"à¸ģรุà¸ĩà¹Ģà¸Ĺà¸ŀฯ":140059,"ãģ¨è¨ĢãģĦ":140060,"×ij׳×Ļ×Ļף":140061,"ĠØŃÙĪ":140062,"ĠØŃÙĪØ§ÙĦÙĬ":140063,"סקס":140064,"ĠsaÄŁlamak":140065,"Ġ׾צ×Ļ×Ļף":140066,"×§×ĵש":140067,"Ġ×Ķ×ŀ×¢×¨×Ľ×ª":140068,"Ġ׾×Ķ×¢×ij×Ļר":140069,"Ġgünd":140070,"Ġgündem":140071,"ĠнаÑĪего":140072,"à¹ĥà¸Ļà¸ŀืà¹īà¸Ļà¸Ĺีà¹Ī":140073,"à¹Ģà¸Ħรืà¸Ń":140074,"à¹Ģà¸Ħรืà¸Ńà¸Ĥ":140075,"à¹Ģà¸Ħรืà¸Ńà¸Ĥà¹Īาย":140076,"ظاÙĩرة":140077,"ÙħÙĨظÙħ":140078,"ÙħÙĨظÙħات":140079,"Ùħتاز":140080,"追ãģĦ":140081,"dıkt":140082,"dıktan":140083,"ĠëįĶìļ±":140084,"ĠÐĿапÑĢимеÑĢ":140085,"twór":140086,"×ŀ×ķעצ×Ķ":140087,"ÙĥÙĪÙĥ":140088,"Щ":140089,"×ŀ×ĺפ׾":140090,"ólica":140091,"訪ãĤĮ":140092,"ĠëĮĢë¶Ģ":140093,"ĠëĮĢë¶Ģë¶Ħ":140094,"ãĤ¯ãĥªãĥĥãĤ¯":140095,"ãĤĴéģ¸":140096,"ãĤĴéģ¸ãģ¶":140097,"Ġpowsta":140098,"ĠpowstaÅĤ":140099,"Ġrazón":140100,"×ij×ķ×Ĺר":140101,"ĠÑģообÑīил":140102,"Ġ×§×ij×ķ×¢":140103,"rêt":140104,"à¸Ķีà¸Ĥึà¹īà¸Ļ":140105,"×ŀסע×ĵ":140106,"×ŀסע×ĵ×ķת":140107,"ĠÃĸsterreich":140108,"Ġ׳×Ĺש×ij":140109,"Ùħبادرة":140110,"ì´ī":140111,"×Ĵ׳×ĺ×Ļ":140112,"ä¿¡ãģĺ":140113,"duÄŁ":140114,"duÄŁunu":140115,"Ġphú":140116,"ĠاÙĦأخÙĬر":140117,"Ġتعتبر":140118,"landırıl":140119,"ãģ¨ãģ¯ãģĦ":140120,"ãģ¨ãģ¯ãģĦãģĪ":140121,"ĠاÙĦØ·ÙĦ":140122,"ĠاÙĦØ·ÙĦاب":140123,"ĠNº":140124,"éģ¿ãģij":140125,"اÙĦÙħع":140126,"اÙĦÙħعرÙĪÙģ":140127,"à¸ªà¸łà¸²":140128,"éĽ¢ãĤĮ":140129,"ĠпомоÑīÑĮ":140130,"ĠзнаеÑĤ":140131,"ãĥĹãĥ¬ãĤ¼":140132,"ãĥĹãĥ¬ãĤ¼ãĥ³ãĥĪ":140133,"Ġsupérieur":140134,"Ġש׾×Ļש×Ļ":140135,"ĠاÙĦÙĨÙĪØ¹":140136,"ãĤĵãģ§ãģĻãģŃ":140137,"à¸Ńà¸ļรม":140138,"Ġgiá»įng":140139,"ĠwzglÄĻd":140140,"ĠاÙĦÙģÙĤر":140141,"èrent":140142,"Ġ×ŀ×IJ×Ĺ":140143,"Ġ×ŀ×IJ×Ĺ×ķר×Ļ":140144,"×Ĵ×Ĵ":140145,"×Ļ×Ļ×ij":140146,"ÙħÙĦاب":140147,"ÙħÙĦابس":140148,"Ġhükü":140149,"Ġhükümet":140150,"Ġ×ŀ×Ĵ×Ļ×ij":140151,"ĠÐŀÑĩ":140152,"ĠÐŀÑĩенÑĮ":140153,"æĹ©ãģĦ":140154,"Ġconstrucción":140155,"Ġthượng":140156,"ï¼ĭ":140157,"Ġcoração":140158,"à¹Ģหลà¹ĩà¸ģ":140159,"ĠBaÅŁb":140160,"ĠBaÅŁbakan":140161,"éĢ£ãĤĮ":140162,"ãģĻãĤĭãģĵãģ¨ãģĮãģ§ãģįãģ¾ãģĻ":140163,"ĠÙĤاÙħت":140164,"ĠاÙĥثر":140165,"ÙģØ§Ø¹ÙĦ":140166,"ĠÑĦоÑĢ":140167,"ĠÑĦоÑĢÑĥм":140168,"غذÙĬ":140169,"ĠiÅŁle":140170,"ĠiÅŁleml":140171,"ĠiÅŁlemleri":140172,"ĠìĤ¬ëŀĮìĿĢ":140173,"ĠìŀijìĦ±":140174,"Ġë§Ī볨":140175,"ÙħجÙĦس":140176,"หมู":140177,"дв":140178,"двиг":140179,"двига":140180,"à¹Ģสียà¸Ĭีวิà¸ķ":140181,"×Ķתפת×Ĺ":140182,"×Ķתפת×Ĺ×ķת":140183,"ĠмеÑĤÑĢо":140184,"ĠÑģенÑĤ":140185,"ĠÑģенÑĤÑı":140186,"ĠÑģенÑĤÑıбÑĢÑı":140187,"ê³§":140188,"Ġ×ľ×¤×¢":140189,"Ġ×ľ×¤×¢×ŀ×Ļ×Ŀ":140190,"à¹Ģà¸ļีย":140191,"詳ãģĹãģı":140192,"çķ°ãģªãĤĭ":140193,"Ġİlçe":140194,"ĠAtat":140195,"ĠAtatür":140196,"ĠAtatürk":140197,"รุà¹Īà¸ĩ":140198,"Ġkaldı":140199,"Ġ주ìŀ¥":140200,"Ġprésence":140201,"Ġнаб":140202,"ĠнаблÑİ":140203,"ĠнаблÑİда":140204,"ĠÑģамого":140205,"×Ĵ×ķש":140206,"×ŀ×ĺ×ķפ":140207,"×ŀ×ĺ×ķפ׾":140208,"ĠвÑĭбиÑĢа":140209,"ĠìŀIJ리":140210,"åĪĨãģĭãĤīãģªãģĦ":140211,"ĠзÑĥб":140212,"Ġש׼×ijר":140213,"Ġدائ":140214,"ĠدائÙħا":140215,"ĠпаÑĢÑĤи":140216,"ï¼²":140217,"ĠاÙĬضا":140218,"ĠÑħоз":140219,"ĠÑħозÑı":140220,"ĠÑħозÑıй":140221,"ĠÑħозÑıйÑģÑĤв":140222,"ĠاÙĦأج":140223,"ĠاÙĦأجÙĨب":140224,"ĠاÙĦأجÙĨبÙĬØ©":140225,"ĠÐĹна":140226,"ĠApós":140227,"ĠÑįнеÑĢ":140228,"ĠÑįнеÑĢги":140229,"Ġyans":140230,"Ġyansı":140231,"ĠJusti":140232,"ĠJustiça":140233,"Ġprévu":140234,"มวล":140235,"ìŀ¥ëĭĺ":140236,"à¸ģระà¸ļ":140237,"à¸ģระà¸ļวà¸Ļ":140238,"à¸ģระà¸ļวà¸Ļà¸ģาร":140239,"×ŀ×ŀ":140240,"×ŀ×ŀ×ķצע":140241,"Ġhẹ":140242,"Ġhẹn":140243,"здание":140244,"ĠakÅŁ":140245,"ĠakÅŁam":140246,"×ĺ×ķפ":140247,"Ġgerekt":140248,"Ġgerekti":140249,"ĠgerektiÄŁini":140250,"Ġnarz":140251,"ĠnarzÄĻdzi":140252,"épo":140253,"époque":140254,"ĠThần":140255,"Ġwysoko":140256,"ĠwysokoÅĽci":140257,"à¸ľà¸¹à¹īà¸Ľ":140258,"à¸ľà¸¹à¹īà¸Ľà¹Īวย":140259,"ĠÙĬبدÙĪ":140260,"ÑĤелÑĮного":140261,"ĠвзглÑıд":140262,"ĠjednÄħ":140263,"ĠìĿĺ견":140264,"Ġà¸Ĥà¸ĵะà¸Ĺีà¹Ī":140265,"פ×Ļ×ĵ":140266,"ìĥģëĭ´":140267,"Ġmỡ":140268,"×Ķ×ŀ׾":140269,"×Ķ×ŀ׾צ×ķת":140270,"ĠÑģоÑģÑĤо":140271,"ĠÑģоÑģÑĤоиÑĤ":140272,"Ġави":140273,"Ġавиа":140274,"ĠLänder":140275,"تصÙĪÙĬر":140276,"×ŀ×ĵ×Ļ×Ķ":140277,"ìłĪì°¨":140278,"ãģ¨ãĤĬ":140279,"ãģ¨ãĤĬãģĤ":140280,"ãģ¨ãĤĬãģĤãģĪ":140281,"ãģ¨ãĤĬãģĤãģĪãģļ":140282,"ĠÑĢÑıд":140283,"ĠÑĢÑıдом":140284,"ĠNhất":140285,"ĠاÙĦÙĥاÙħÙĦ":140286,"×Ĺ׾׾":140287,"ĠGiấy":140288,"צ×ĺר":140289,"צ×ĺרף":140290,"Ġ׾×ij×ĺ׾":140291,"ĠимеÑĤÑĮ":140292,"ס×ŀ×ķ×ļ":140293,"Ġparticipação":140294,"íķľëĭ¤ë©´":140295,"ÙħÙĨتدÙĬ":140296,"ÙħÙĨتدÙĬات":140297,"ĠeÄŁlen":140298,"gänge":140299,"ربØŃ":140300,"ãĤ®ãĥ£":140301,"ĠاÙĦرÙĤÙħ":140302,"à¸ĭà¹īำ":140303,"ĠHóa":140304,"×ŀר×Ĺ×§":140305,"ØŃÙħاÙħ":140306,"بÙĪÙĥ":140307,"ĠArtÃŃculo":140308,"ãĥĦãĤ¢ãĥ¼":140309,"×Ķפ׼×Ķ":140310,"×Ĺ׾×ķף":140311,"ĠпеÑĢеÑħод":140312,"lenmiÅŁ":140313,"زراعة":140314,"Ġseñor":140315,"ãģ£ãģ¦ãģįãģ¦":140316,"إش":140317,"إشارة":140318,"ĠpodÃŃa":140319,"ĠÃľlke":140320,"нÑģкаÑı":140321,"Ġadapté":140322,"Ġdüzenlen":140323,"Ġdüzenlenen":140324,"ĠÑģÑĤала":140325,"ĠÙĬØŃتاج":140326,"Ġnier":140327,"Ġnieruch":140328,"Ġnieruchomo":140329,"ĠnieruchomoÅĽci":140330,"ãģĵãģ¨ãģĮãģĤãĤĭ":140331,"ยà¸Ńà¸Ķà¹Ģยีà¹Īยม":140332,"ĠÙħج":140333,"ĠÙħجاÙĨÙĬ":140334,"Ġзаб":140335,"Ġзабол":140336,"Ġзаболев":140337,"ĠзаболеваниÑı":140338,"ĠÅĽro":140339,"ĠÅĽrodk":140340,"ĠÅĽrodków":140341,"Ġ×Ķ׾×IJ×ķ×ŀ×Ļ":140342,"ĠdokÅĤad":140343,"ĠdokÅĤadnie":140344,"ãģŁãģıãģªãģĦ":140345,"ãģ¯ãģļãģ§ãģĻ":140346,"ã썿ĢĿãģ£ãģ¦ãģĦãģŁ":140347,"écran":140348,"ìĹħì²´":140349,"trzymaÅĤ":140350,"ÑģÑĤвеннÑĭй":140351,"ĠNotÃŃc":140352,"ĠNotÃŃcias":140353,"ÙħرÙĬ":140354,"ÙħرÙĬض":140355,"æ°Ĺè»":140356,"æ°Ĺ軽":140357,"æ°Ĺ軽ãģ«":140358,"ëĵ£":140359,"Ġ×ĵ×ķ×IJר":140360,"Ġ׾×ŀ׳":140361,"Ġ׾×ŀ׳×ķ×¢":140362,"ĠçalÄ±ÅŁÄ±yor":140363,"ĠÅŁidd":140364,"ĠÅŁiddet":140365,"ĠMặt":140366,"ĠateÅŁ":140367,"ĠполÑĥÑĩениÑı":140368,"à¹Ģà¸Ħรืà¹Īà¸Ńà¸ĩมืà¸Ń":140369,"ĠgrÃ¶ÃŁer":140370,"دائ":140371,"دائرة":140372,"Ġbulun":140373,"Ġbulunmaktadır":140374,"à¹Ģหร":140375,"à¹Ģหรีย":140376,"à¹Ģหรียà¸į":140377,"à¸Ļัà¸ģà¸Ĺà¹Īà¸Ńà¸ĩà¹Ģà¸Ĺีà¹Īยว":140378,"Ġalanında":140379,"ĠÑĥзна":140380,"ĠлеÑĩение":140381,"売ãĤĮ":140382,"Ġçevir":140383,"ĠdesteÄŁi":140384,"ĠheiÃŁt":140385,"âĸ²":140386,"ØŃØ·":140387,"à¸Ħำà¸ķà¸Ńà¸ļ":140388,"ãĤªãĥ³ãĥ©ãĤ¤ãĥ³":140389,"Ġ×ij×Ĺ×Ļ×Ļ×Ŀ":140390,"ãĥ¦ãĥĭ":140391,"Ġdüzenleme":140392,"ĠmodalitÃł":140393,"سرط":140394,"سرطاÙĨ":140395,"×ŀ׼×ķף":140396,"ĠданнÑĭй":140397,"ترت":140398,"ترتÙĬب":140399,"à¸ļาà¸ĩà¸Ħà¸Ļ":140400,"ĠÄIJá»ĭnh":140401,"มูล":140402,"มูลà¸Ħà¹Īา":140403,"ÙĨÙĤص":140404,"à¸ģารรัà¸ģษา":140405,"ĠÑĦон":140406,"ĠÑĦонд":140407,"ãĤĪãģĨãģ«ãģªãģ£ãģŁ":140408,"ÙħعاÙĦ":140409,"ÙħعاÙĦجة":140410,"ĠOsman":140411,"ĠOsmanlı":140412,"иÑĩеÑģком":140413,"à¸Ńยาà¸ģà¸Īะ":140414,"ãģķãģ¾ãģĸ":140415,"ãģķãģ¾ãģĸãģ¾":140416,"ãģķãģ¾ãģĸãģ¾ãģª":140417,"Ġת×ķ׼׾":140418,"עצ×ij":140419,"ĠاÙĦعسÙĥ":140420,"ĠاÙĦعسÙĥرÙĬ":140421,"Ġvéhic":140422,"Ġvéhicule":140423,"Ġ×Ļצ×Ĺ×§":140424,"ĠاÙĦÙĪØŃ":140425,"ĠاÙĦÙĪØŃÙĬد":140426,"ĠاÙĦعدÙĪ":140427,"ĠQuản":140428,"Ġê³µëıĻ":140429,"بدÙĦ":140430,"ĠÄijảng":140431,"Ġmá»ĩnh":140432,"Ġniezb":140433,"ĠniezbÄĻ":140434,"ĠniezbÄĻdn":140435,"Ġyayınlan":140436,"обÑīи":140437,"Ġgötür":140438,"צפ":140439,"צפ×ķ×Ļ":140440,"ĠÙĦÙĬبÙĬ":140441,"ĠÙĦÙĬبÙĬا":140442,"ØŃÙĪØ§":140443,"Ġдоб":140444,"ĠдобÑĢо":140445,"иÑĢÑĥем":140446,"ĠاÙĦØŃÙĥÙĪÙħÙĬØ©":140447,"mÃ¤ÃŁig":140448,"Ġedición":140449,"влекаÑĤелÑĮ":140450,"влекаÑĤелÑĮн":140451,"Ġ×ª×©×ľ×ķ×Ŀ":140452,"Ġ×Ķש×ķ׳×Ļ×Ŀ":140453,"มิà¸ĸุ":140454,"มิà¸ĸุà¸Ļ":140455,"มิà¸ĸุà¸Ļายà¸Ļ":140456,"é£Łãģ¹ãģ¦":140457,"ĠìĪĺì§ij":140458,"ס×ij×Ļ":140459,"ĠиÑİлÑı":140460,"Ġà¹Ħà¸Ķà¹īà¹ģà¸ģà¹Ī":140461,"׾×Ĺ×Ŀ":140462,"trä":140463,"trägt":140464,"ãģĿãĤĤãģĿãĤĤ":140465,"ÐĿÐķ":140466,"ĠвнÑĥÑĤ":140467,"ĠвнÑĥÑĤÑĢи":140468,"ãģ¨ä¸Ģç·Ĵãģ«":140469,"ãĤ«ãĥķãĤ§":140470,"Ġ×ij×Ĺ×ĵר":140471,"×Ĺ×ŀש":140472,"ãĤ¨ãĥį":140473,"ãĤ¨ãĥįãĥ«":140474,"ãĤ¨ãĥįãĥ«ãĤ®":140475,"ãĤ¨ãĥįãĥ«ãĤ®ãĥ¼":140476,"à¸Ĥà¸Ńà¸ĩà¸ķัวà¹Ģà¸Ńà¸ĩ":140477,"بÙĤاء":140478,"פס×Ļ׼":140479,"פס×Ļ׼×ķ׾×ķ×Ĵ":140480,"ãĥ¡ãĥĥ":140481,"ãĥ¡ãĥĥãĤ»":140482,"ãĥ¡ãĥĥãĤ»ãĥ¼ãĤ¸":140483,"ÙĦÙĤب":140484,"AÄŀ":140485,"שק×Ļ×¢":140486,"ÙĤساÙħ":140487,"×ĵ×ķ×Ĵ×ŀ×Ķ":140488,"æ·±ãģĦ":140489,"íĸĪëĬĶëį°":140490,"ĠrozwiÄħzanie":140491,"à¸Ļัà¹Īà¸Ļà¹Ģà¸Ńà¸ĩ":140492,"×Ļצ×ij":140493,"Ġtrông":140494,"à¹ĥà¸Ĭà¹īà¸ļริà¸ģาร":140495,"ĠاÙĦÙħÙĪØ³Ùħ":140496,"ĠдеÑĤи":140497,"ãģĹãģĭãģªãģĦ":140498,"ס×Ļף":140499,"Ġréférence":140500,"à¹ģหà¹īà¸ĩ":140501,"ãĤĤãĤīãģ£ãģŁ":140502,"Ġ׾ר׼":140503,"Ġ׾ר׼×ķש":140504,"شعÙĪØ±":140505,"ĠÐijог":140506,"Ġlazım":140507,"Ġ×Ļש׳×Ŀ":140508,"ĠпаÑĢÑĤ":140509,"ĠпаÑĢÑĤнеÑĢ":140510,"ĠÑĥника":140511,"ĠÑĥникалÑĮн":140512,"Ġmatériel":140513,"×ŀרק":140514,"Ġphưá»Ŀng":140515,"Ġзай":140516,"Ġзайм":140517,"ÙģÙĤد":140518,"UniversitÃł":140519,"×¢×¨×Ľ×Ļ×Ŀ":140520,"Ġbaño":140521,"ĠноÑı":140522,"ĠноÑıбÑĢÑı":140523,"à¸Ľà¹īาย":140524,"Ġtats":140525,"Ġtatsäch":140526,"Ġtatsächlich":140527,"ĠÑĤÑĢеÑĤÑĮ":140528,"Ñįм":140529,"ãĥĻãĥ¼ãĤ¹":140530,"Ġnhá»±a":140531,"ìĬ¤íģ¬":140532,"ĠعبداÙĦÙĦÙĩ":140533,"Ġת×ķר×Ķ":140534,"أشÙĬ":140535,"أشÙĬاء":140536,"ĠÙĦÙĦغا":140537,"ĠÙĦÙĦغاÙĬØ©":140538,"ÙħÙĪØ§ÙĤ":140539,"ÙħÙĪØ§ÙĤÙģ":140540,"ĠgÅĤówna":140541,"ĠartÄ±ÅŁ":140542,"Ġ×ŀ×§×ķ×ŀ×Ļ":140543,"ãĤ¯ãĥ©ãĥĸ":140544,"ĠسÙĪÙī":140545,"ĠìŬìĦ±":140546,"اسر":140547,"اسرائÙĬÙĦ":140548,"Ġ×ł×Ľ×ª×ij":140549,"ยà¹īà¸Ńà¸Ļ":140550,"Ġdeberá":140551,"Ġphẫu":140552,"ÑİÑīем":140553,"ĠÙĦدÙĬÙĨا":140554,"×ŀ×ĺ×Ķ":140555,"Ġ׳×ķ׾×ĵ":140556,"ĠвÑģÑĤÑĢеÑĩа":140557,"ãĤīãĤĮãģ¦ãģĦãģ¾ãģĻ":140558,"ĠcaÅĤej":140559,"ยึ":140560,"ยึà¸Ķ":140561,"поÑĤен":140562,"поÑĤенÑĨи":140563,"ĠлиÑĤ":140564,"ĠлиÑĤеÑĢ":140565,"ĠлиÑĤеÑĢаÑĤÑĥÑĢ":140566,"Ġкаждом":140567,"ĠíĮIJ":140568,"ĠíĮIJëĭ¨":140569,"à¸Īู":140570,"Ġpresença":140571,"ãģªãĤĵãģ§":140572,"ÙħÙĬاÙĩ":140573,"инÑĦоÑĢм":140574,"инÑĦоÑĢмаÑĨион":140575,"инÑĦоÑĢмаÑĨионн":140576,"ĠìŀIJìŰ":140577,"ר׼ש":140578,"Ġödül":140579,"ç¶ļãģı":140580,"ĠпÑģ":140581,"ĠпÑģиÑħ":140582,"ĠпÑģиÑħолог":140583,"تذÙĥر":140584,"Ġìŀħìŀ¥":140585,"ลà¸Ķà¹Į":140586,"ìĦłê±°":140587,"ãģ£ãģ¦ãģĬãĤĬãģ¾ãģĻ":140588,"Ġ×Ļ×¢":140589,"Ġ×Ļ×¢×§×ij":140590,"ĠاÙĦطعاÙħ":140591,"ãĥĨãĤ¹ãĥĪ":140592,"ĠTuấn":140593,"Ġparticipación":140594,"×ŀ×ķ×ŀ×Ĺ×Ķ":140595,"×Ĵרס×Ķ":140596,"ĠاÙĦتÙĨÙģÙĬ":140597,"ĠاÙĦتÙĨÙģÙĬذÙĬ":140598,"ĠбезопаÑģн":140599,"gef":140600,"gefähr":140601,"Ø´ÙĪØ±":140602,"ĠmyÅĽli":140603,"ÙĪØ§Ø´ÙĨ":140604,"ÙĪØ§Ø´ÙĨØ·ÙĨ":140605,"׳×ķסע":140606,"ÙĥÙĩ":140607,"ÙĥÙĩرب":140608,"ÙĥÙĩرباء":140609,"ĠmusiaÅĤ":140610,"ìĭ¸":140611,"ãĥĸãĥ©ãĥĥãĤ¯":140612,"Ġcréé":140613,"ÙĨÙĩار":140614,"owoÅĽÄĩ":140615,"ÙħØŃاÙĥÙħ":140616,"ĠwÅĤaÅĽ":140617,"ĠwÅĤaÅĽc":140618,"ĠwÅĤaÅĽciciel":140619,"ĠÙĬؤ":140620,"ĠÙĬؤدÙĬ":140621,"×ŀ×¢×ķ׳":140622,"×IJ×ij׾":140623,"خطأ":140624,"ĠÑħолод":140625,"×ĸ×ķ׾":140626,"ãģĵãĤĮãĤī":140627,"ãģĵãĤĮãĤīãģ®":140628,"Ġbásica":140629,"ฤà¸Ķ":140630,"ฤà¸Ķูà¸ģ":140631,"ฤà¸Ķูà¸ģา":140632,"ฤà¸Ķูà¸ģาล":140633,"èIJ½ãģ¡çĿĢ":140634,"ãģªãģĦãģĵãģ¨":140635,"صÙĪÙħ":140636,"ÙĨجØŃ":140637,"׳ק×ķ×ĵ":140638,"׳ק×ķ×ĵת":140639,"клаÑģÑģ":140640,"íķĺìĭľëĬĶ":140641,"ëĦĺ":140642,"Ġש×IJ×Ļ׳×ķ":140643,"ĠСейÑĩаÑģ":140644,"mayacaģı":140645,"Ġyapılır":140646,"ĠcategorÃŃa":140647,"عباد":140648,"ĠТеп":140649,"ĠТепеÑĢÑĮ":140650,"×Ķ×Ļס×ĺ×ķר×Ļ":140651,"hế":140652,"ãĤ³ãĥ¼ãĥī":140653,"Ġcabeça":140654,"جÙħا":140655,"جÙħاÙĩ":140656,"جÙħاÙĩÙĬر":140657,"ä½İãģĦ":140658,"ĠÑĤоваÑĢов":140659,"à¸Ĭาวà¸ļà¹īาà¸Ļ":140660,"ĠÑģÑĤанов":140661,"ĠÑģÑĤановиÑĤÑģÑı":140662,"ĠавÑĤомобилÑĮ":140663,"ĠÑģлÑĥÑĩай":140664,"à¸Ńัà¸ŀ":140665,"ĠGiriÅŁ":140666,"ĠìĿ¼ëĭ¨":140667,"ĠпÑĢоÑģ":140668,"ĠпÑĢоÑģмоÑĤÑĢ":140669,"ãģªãģıãģªãģ£ãģŁ":140670,"à¸¡à¸µà¸Ľà¸±à¸įหา":140671,"ïºİ":140672,"écoute":140673,"ĠÙħÙĪØ¬ÙĪØ¯":140674,"ĠسرÙĬع":140675,"ĠÙĪÙĩÙĨا":140676,"ĠÙĪÙĩÙĨاÙĥ":140677,"à¸Ħุà¸ĵสม":140678,"à¸Ħุà¸ĵสมà¸ļัà¸ķิ":140679,"Ġìļ°ìĦł":140680,"à¸ŀระà¸ŀุà¸Ĺà¸ĺ":140681,"好ãģ¿":140682,"ظÙĦÙħ":140683,"ĠмакÑģ":140684,"ĠмакÑģималÑĮ":140685,"ĠмакÑģималÑĮно":140686,"ãĥªãĤ¢ãĥ«":140687,"à¹ģมà¹īวà¹Īา":140688,"ĠاÙĦØŃÙĪØ§Ø±":140689,"ãĥĹãĥ©ãĤ¹":140690,"ĠعÙĦاÙĤØ©":140691,"ĠíĸīëıĻ":140692,"Ġgönderil":140693,"Ġlãi":140694,"ĠsaÄŁlıkl":140695,"ĠsaÄŁlıklı":140696,"ĠÑĪаг":140697,"Ġ×ij×IJר×Ķ":140698,"prowadziÄĩ":140699,"ãģĦãģıãģ¤ãģĭ":140700,"ĠبتارÙĬØ®":140701,"Ġ×ij×IJ×ķת×Ķ":140702,"Ġmóc":140703,"ĠÐľÐ½Ðµ":140704,"ãĥĹãĥ¬ãĥ¼":140705,"×IJ×ĸר×Ĺ":140706,"åł´åIJĪãģ«ãģ¯":140707,"使ãģĪ":140708,"à¹Ģรืà¸Ńà¸Ļ":140709,"ĠÐŁÐµÑĤ":140710,"ĠÐŁÐµÑĤÑĢ":140711,"ãģ«åħ¥ãĤĭ":140712,"Ùħادة":140713,"à¹Ģà¸ĩืà¹Īà¸Ńà¸Ļ":140714,"à¹Ģà¸ĩืà¹Īà¸Ńà¸Ļà¹Ħà¸Ĥ":140715,"ĠÑģоÑģÑĤоÑıние":140716,"ônica":140717,"ĠÑĦев":140718,"ĠÑĦевÑĢа":140719,"ĠÑĦевÑĢалÑı":140720,"Ġ×ķ×ĸ":140721,"Ġ×ķ×ĸ×IJת":140722,"à¸Ħริ":140723,"à¸Ħริส":140724,"ĠÐķÑīе":140725,"ãģ£ãģ¦ãģĹãģ¾ãģĦãģ¾ãģĹãģŁ":140726,"ĠпÑĢавиÑĤелÑĮ":140727,"ĠпÑĢавиÑĤелÑĮÑģÑĤв":140728,"Ġtäglich":140729,"Ġëĭ¹ìĭľ":140730,"×ŀ×ķ×¢×ŀ×ĵ":140731,"ĠдвоÑĢ":140732,"æīķ":140733,"æīķãģĦ":140734,"ĠÑģÑĤанеÑĤ":140735,"ĠвоздейÑģÑĤв":140736,"ĠвоздейÑģÑĤви":140737,"Ġfête":140738,"à¹Ģสา":140739,"תק×ķ×ķ×Ķ":140740,"Ġuyar":140741,"Ġuyarı":140742,"à¸ģลัà¸ļà¹Ħà¸Ľ":140743,"Ġgiưá»Ŀng":140744,"Ġва":140745,"ĠваÑĪи":140746,"ĠÄijáºŃu":140747,"ĠSpaÃŁ":140748,"ĠìķĦë§Ī":140749,"à¹Ħà¸Ķà¹īà¸ĩà¹Īาย":140750,"Ġ×Ķ×ŀ×ijקש":140751,"æĸ°ãģŁ":140752,"æĸ°ãģŁãģª":140753,"ılıyor":140754,"план":140755,"Ġ×Ķ×ijר×Ļ×IJ×ķת":140756,"ĠaÄŁrı":140757,"Ġsaygı":140758,"建ãģ¦":140759,"Ġnajwyż":140760,"Ġnajwyższ":140761,"سÙĬاسات":140762,"ãģĬå¾Ĺ":140763,"ĠاÙĦعÙĦÙĬ":140764,"ĠاÙĦعÙĦÙĬا":140765,"Ġcorazón":140766,"ì¹ĺë£Į":140767,"หัวà¸Ĥà¹īà¸Ń":140768,"ĠبØŃÙĬ":140769,"ĠبØŃÙĬØ«":140770,"звезд":140771,"بÙĪØ§Ø¨Ø©":140772,"ÐĽÐĺ":140773,"ÙĦازÙħ":140774,"Ġrozp":140775,"Ġrozpoc":140776,"ĠrozpoczÄĻ":140777,"触ãĤĮ":140778,"ĠاÙĦجÙħÙĩ":140779,"ĠاÙĦجÙħÙĩÙĪØ±":140780,"ĠspÄĻd":140781,"ĠspÄĻdz":140782,"วิà¸Ĺยาศาสà¸ķรà¹Į":140783,"иваеÑĤÑģÑı":140784,"Ġданной":140785,"Ġreprésente":140786,"ĠÄijá»ĭch":140787,"Ġ×¢×ŀ×ķ×§":140788,"à¸Ńัà¸Ļà¸ķร":140789,"à¸Ńัà¸Ļà¸ķราย":140790,"Ġestratég":140791,"Ġestratégia":140792,"padÅĤ":140793,"Ġвполн":140794,"Ġвполне":140795,"ĠпÑĢедоÑģÑĤавлен":140796,"×Ĺ׾×ķ×§":140797,"×Ĺ׾×ķקת":140798,"ãĤ¢ãĥĬ":140799,"ĠاÙĦغذ":140800,"ĠاÙĦغذائÙĬ":140801,"ĠÑĥзн":140802,"ĠÑĥзнаÑĤÑĮ":140803,"à¸ĭà¹īาย":140804,"å½ĵãģ¦":140805,"ØŃÙĬاء":140806,"Ġbásico":140807,"×§×ķ×ij×¢":140808,"ĠاÙĦÙħباراة":140809,"ĠاÙĦÙĩاتÙģ":140810,"Ġ׼׳×Ĵ×ĵ":140811,"à¸Ľà¸£à¸°à¸«à¸¢":140812,"à¸Ľà¸£à¸°à¸«à¸¢à¸±à¸Ķ":140813,"Ðļак":140814,"à¸Ĺีà¹Īà¸Ļà¹Īา":140815,"à¸Ĺีà¹Īà¸Ļà¹Īาสà¸Ļà¹ĥà¸Ī":140816,"ãģ¾ãģģ":140817,"ï½¢":140818,"Ñģкоп":140819,"Ġsonrasında":140820,"ĠurzÄħd":140821,"ĠurzÄħdzenia":140822,"׼×ķ×ķ׳":140823,"׼×ķ×ķ×ł×ª":140824,"Ġ׾×Ķת×ŀ×ķ×ĵ":140825,"Ġ׾×Ķת×ŀ×ķ×ĵ×ĵ":140826,"ĠÑģли":140827,"ĠÑģлиÑĪ":140828,"ĠÑģлиÑĪком":140829,"ĠÑģÑĤÑĥд":140830,"ĠÑģÑĤÑĥденÑĤ":140831,"Ġ×Ķ×ķ×ĵ":140832,"Ġ×Ķ×ķ×ĵ×¢×Ķ":140833,"ë¹Ħìļ©":140834,"à¸Ńยาà¸ģà¹ĥหà¹ī":140835,"Ġbá»ģ":140836,"ยุà¸Ĺà¸ĺ":140837,"ÐĺÐĿ":140838,"سائر":140839,"أصÙĪÙĦ":140840,"ĠاÙĦغرÙģ":140841,"ãģĵãģ¨ãĤĤãģĤãĤĬãģ¾ãģĻ":140842,"è¾¼ãģ¾ãĤĮ":140843,"ĠاÙĦسابع":140844,"Ġcá»§":140845,"ãģĦãģŁãģłãģĦãģŁ":140846,"ì§ĵ":140847,"ìĤ¬ë¬´":140848,"powiedź":140849,"تÙģÙĥ":140850,"تÙģÙĥÙĬر":140851,"иÑĢовки":140852,"ĠíĨµíķ´ìĦľ":140853,"ãĤ¨ãĤ¹ãĥĨ":140854,"ĠдеÑıÑĤелÑĮноÑģÑĤÑĮ":140855,"ĠданнÑĭм":140856,"Ġ×¢×ķר":140857,"Ġ×¢×ķר׼×Ļ":140858,"×ķ×ĵעת":140859,"Ġhayatını":140860,"ĠbÄħd":140861,"ĠbÄħdź":140862,"obsÅĤug":140863,"à¹Ģà¸ŀียà¸ĩà¹ģà¸Ħà¹Ī":140864,"à¸ĭà¹Īา":140865,"è²łãģij":140866,"ĠÑģÑĤÑĢем":140867,"ĠÄijá»īnh":140868,"ĠÐłÑĥÑģ":140869,"ĠNữ":140870,"Ġ׾×Ķש×Ļ×Ĵ":140871,"Ġjednoc":140872,"Ġjednocze":140873,"ĠjednoczeÅĽnie":140874,"Ġ×Ķ×Ĵ×ij×ķ×Ķ":140875,"أخÙĦاÙĤ":140876,"ĠнаÑģел":140877,"ĠнаÑģелениÑı":140878,"ĠÙĬÙĨب":140879,"ĠÙĬÙĨبغÙĬ":140880,"ãģĮãģĭ":140881,"ãģĮãģĭãģĭ":140882,"×Ĵעת":140883,"ÐŀÐł":140884,"ĠналиÑĩии":140885,"Ġë§Īì§Ģ":140886,"Ġë§Īì§Ģë§ī":140887,"ĠíĸīìĤ¬":140888,"ĠtreÅĽci":140889,"Ġê°Ģì¹ĺ":140890,"ì¦ĺ":140891,"Ġаналог":140892,"×Ķצעת":140893,"влад":140894,"владе":140895,"ĠÑģделал":140896,"Ġ׳×Ĵ×Ļש":140897,"Ġ׳×Ĵ×Ļש×ķת":140898,"полнение":140899,"à¸Ĩà¹Īา":140900,"ĠDön":140901,"׼׾׼׾×Ķ":140902,"×ŀ×ĸ×Ĵ":140903,"ÙħÙģ":140904,"ÙħÙģÙĩ":140905,"ÙħÙģÙĩÙĪÙħ":140906,"×Ķ×ĵ":140907,"×Ķ×ĵפס":140908,"×Ķ×ĵפס×Ķ":140909,"ãģĻãģİãģ¦":140910,"ĠгÑĢ":140911,"ĠгÑĢн":140912,"×ŀ×ĺ×ķס":140913,"Ġ기ìĸµ":140914,"ï¾Ł":140915,"ĠpÅĤyn":140916,"ĠGründe":140917,"ĠBücher":140918,"ĠwedÅĤug":140919,"ãģ¾ãģłãģ¾ãģł":140920,"Ġ׳×Ķ×ĵר":140921,"ĠÙĬستطÙĬع":140922,"ĠHiá»ĩp":140923,"ãĤŃãĥ£ãĥ³ãĥļ":140924,"ãĤŃãĥ£ãĥ³ãĥļãĥ¼ãĥ³":140925,"Ġthá»ķ":140926,"Ġeuropéenne":140927,"à¸ļัà¸ĩ":140928,"à¸ļัà¸ĩà¸Ħัà¸ļ":140929,"ĠszczegóÅĤowo":140930,"׳שק":140931,"ãĥķãĥ©ãĥ³ãĤ¹":140932,"×ŀ×ķ×ŀ×Ĺ×Ļ":140933,"Ġcomún":140934,"Ġçarp":140935,"ØŃتÙĬا":140936,"ØŃتÙĬاج":140937,"ØŃتÙĬاجات":140938,"ëĭ´ëĭ¹":140939,"ä½ķ度":140940,"ä½ķ度ãĤĤ":140941,"×ĵ×ij×§":140942,"ãģįãĤĮ":140943,"ãģįãĤĮãģĦ":140944,"Ġкам":140945,"ĠкамеÑĢ":140946,"ĠespecÃŃfico":140947,"Ġteléfono":140948,"à¸ķัà¹īà¸ĩà¸Ńยูà¹Ī":140949,"IÅŀ":140950,"ãģ©ãĤĵãģ©":140951,"ãģ©ãĤĵãģ©ãĤĵ":140952,"עצ×ŀ×IJ×Ļ":140953,"à¸Ķัà¸ĩà¸Ļีà¹ī":140954,"ĠÑĦоÑĢмиÑĢов":140955,"ĠÑĦоÑĢмиÑĢова":140956,"×ķ×ŀ×ij":140957,"Ġkullanımı":140958,"ÐľÐŀ":140959,"עש×Ļ":140960,"עש×Ļ×Ļ×Ķ":140961,"Ġönlem":140962,"à¹Ģà¸Ńà¹ĩ":140963,"à¹Ģà¸Ńà¹ĩม":140964,"×ŀשק×Ļ×¢":140965,"ר×Ļ×Ĺ":140966,"à¸Ĥัà¸Ķ":140967,"ĠíĻľ":140968,"ĠíĻľìļ©":140969,"à¸ĭะ":140970,"ãĤĪãģĨãģ«ãģªãĤĬãģ¾ãģĹãģŁ":140971,"ĠÑĢаÑģпÑĢ":140972,"ĠÑĢаÑģпÑĢоÑģÑĤ":140973,"ĠÑĢаÑģпÑĢоÑģÑĤÑĢан":140974,"ĠÑĢаÑģпÑĢоÑģÑĤÑĢанен":140975,"׼×Ļ×ķף":140976,"ÙĤبض":140977,"تصرÙĬØŃ":140978,"تصرÙĬØŃات":140979,"ĠоÑĢи":140980,"ĠоÑĢиг":140981,"ĠоÑĢигина":140982,"ĠоÑĢигинал":140983,"ĠاÙĦعاÙĦÙĬ":140984,"à¹ģหà¹Īà¸ĩà¸Ļีà¹ī":140985,"ãĥķãĤ¡ãĥ¼":140986,"ãģ¦ãģĦãģį":140987,"ãģ¦ãģĦãģįãģŁãģĦ":140988,"פתר":140989,"פתר×ķ׳×ķת":140990,"Ġ×ij×Ļ×Ĺ":140991,"Ġ×ij×Ļ×Ĺ×ĵ":140992,"Ġodby":140993,"ĠodbyÅĤ":140994,"ĠоÑĩеÑĢед":140995,"Ġtrương":140996,"ãĤŃãĥ³":140997,"×ŀ×ķפ":140998,"×ŀ×ķפע":140999,"ëĵľë¦½":141000,"ëĵľë¦½ëĭĪëĭ¤":141001,"à¸ŀืà¹īà¸Ļà¸IJาà¸Ļ":141002,"ìŀIJ격":141003,"ĠViá»ĩn":141004,"ĠDespués":141005,"Ġ×IJ׾×Ļ׳×ķ":141006,"Ġdurée":141007,"íĩ´":141008,"Ġmüzik":141009,"iếu":141010,"ĠÑĢазмеÑīен":141011,"ĠкÑĥд":141012,"ĠкÑĥда":141013,"غض":141014,"غضب":141015,"ĠTambém":141016,"à¸Īัà¸Ķสà¹Īà¸ĩ":141017,"à¸ģารà¹ģสà¸Ķà¸ĩ":141018,"onomÃŃa":141019,"Ġанг":141020,"Ġангли":141021,"Ġанглий":141022,"ĠанглийÑģк":141023,"Ġznal":141024,"Ġznalaz":141025,"ĠznalazÅĤ":141026,"תר×Ĵ":141027,"תר×Ĵ×ķ×Ŀ":141028,"ĠÑģнов":141029,"ĠÑģнова":141030,"ĠÑĩаÑģа":141031,"Ġcommunauté":141032,"ĠespecÃŃfica":141033,"ĠLá»ĭch":141034,"Ġlié":141035,"ÙģØ¬Ø±":141036,"à¹Ģà¸ģà¹Īà¸ĩ":141037,"عاÙĦ":141038,"عاÙĦج":141039,"Ø£ÙĨظ":141040,"Ø£ÙĨظÙħØ©":141041,"ESİ":141042,"ĠاÙĦØŃدÙĬد":141043,"à¸ŀระà¸Ńà¸ĩà¸Ħà¹Į":141044,"Ġפרשת":141045,"Ġдвиж":141046,"ĠдвижениÑı":141047,"ĠاÙĦجارÙĬ":141048,"à¸ĺาà¸Ļี":141049,"неÑģен":141050,"ĠاÙĦÙĨÙĩائÙĬ":141051,"ĠбеÑĢ":141052,"ĠбеÑĢем":141053,"ĠбеÑĢеменн":141054,"Ġdépartement":141055,"à¹Ģà¸Ĺีย":141056,"à¹Ģà¸Ĺียà¸ļ":141057,"ĠÐľÐ°ÑĢи":141058,"ĠнекоÑĤоÑĢÑĭÑħ":141059,"обеÑģп":141060,"обеÑģпеÑĩен":141061,"×Ĺ×ķ×ĸ":141062,"×Ĺ×ķ×ĸ×Ķ":141063,"ÙĨتج":141064,"à¸Īะà¹Ħà¸Ķà¹īรัà¸ļ":141065,"á»°":141066,"Ġéléments":141067,"عط":141068,"عطاء":141069,"Ġtắt":141070,"iá»ĩm":141071,"ÑİÑīиÑħÑģÑı":141072,"ãģĹãģ°":141073,"ãģĹãģ°ãĤīãģı":141074,"ĠпоможеÑĤ":141075,"à¸Ĥà¸ĵะà¸Ļีà¹ī":141076,"Ġעשר×ķת":141077,"éģķãģ£ãģ¦":141078,"ĠпÑĢог":141079,"ĠпÑĢогн":141080,"ĠпÑĢогноз":141081,"ĠtÅĤ":141082,"ĠtÅĤum":141083,"ĠtÅĤumacz":141084,"Tür":141085,"Türkiye":141086,"ãģįãģ£":141087,"ãģįãģ£ãģĭãģij":141088,"Ġ×Ķ׳×ķ׼":141089,"Ġ×Ķ׳×ķ׼×Ĺ×Ļ":141090,"ĠìĥĿìĤ°":141091,"ĠÑĦоÑĢмÑĭ":141092,"ç¾İãģĹãģĦ":141093,"à¸Ľà¸£à¸¶à¸ģ":141094,"à¸Ľà¸£à¸¶à¸ģษา":141095,"Ġlumière":141096,"ãĤªãĥ¼ãĥĹ":141097,"ãĤªãĥ¼ãĥĹãĥ³":141098,"à¸Ľà¸·à¸Ļ":141099,"วัสà¸Ķ":141100,"วัสà¸Ķุ":141101,"еÑĢÑĤв":141102,"ÙĥÙĦÙģ":141103,"ï½£":141104,"à¸ĺรรมà¸Ķา":141105,"׳×ĺר":141106,"ĠпÑĢедÑģÑĤавлÑıеÑĤ":141107,"Ġanálisis":141108,"Ġbãi":141109,"باÙĤÙĬ":141110,"à¸Ľà¸£à¸°à¹Ģà¸Ķ":141111,"à¸Ľà¸£à¸°à¹Ģà¸Ķà¹ĩà¸Ļ":141112,"ĠÑģлÑĥÑĩаÑı":141113,"ĠÑģлÑĥÑĩаÑıÑħ":141114,"ÐĽÐIJ":141115,"สัà¸ĩà¹Ģà¸ģ":141116,"สัà¸ĩà¹Ģà¸ģà¸ķ":141117,"Ġprzec":141118,"Ġprzecież":141119,"ÙħصÙĦ":141120,"ÙħصÙĦØŃØ©":141121,"ש×ķ×§×ķ׾×ĵ":141122,"ĠобоÑĢÑĥдованиÑı":141123,"ĠtrwaÅĤ":141124,"رÙĪÙħ":141125,"ìķĪëĤ´":141126,"ĠNghá»ĭ":141127,"خش":141128,"à¸ļาà¸Ħาร":141129,"à¸ļาà¸Ħารà¹Īา":141130,"ĠопÑĨион":141131,"ĠÑģозданиÑı":141132,"ãĤ³ãĤ¹ãĥĪ":141133,"Ġ×Ķ×¢×ľ×Ļ":141134,"Ġ×Ķ×¢×ľ×Ļ×ķף":141135,"läuft":141136,"ãĥĻãĤ¹ãĥĪ":141137,"Ġrê":141138,"Ġrêve":141139,"×IJ×ij×Ļ×ij":141140,"×Ļ×Ļ×ļ":141141,"ë¶Ļ":141142,"ãĤ¤ãĥ³ãĥī":141143,"ÅĤoży":141144,"ÅĤożyÄĩ":141145,"عائÙĦ":141146,"عائÙĦØ©":141147,"Ø£ÙĪØ±":141148,"Ø£ÙĪØ±Ø§ÙĤ":141149,"à¸Ĺà¹īà¸Ńà¸ĩà¸ĸ":141150,"à¸Ĺà¹īà¸Ńà¸ĩà¸ĸิà¹Īà¸Ļ":141151,"Ġähn":141152,"Ġähnlich":141153,"ãĥŁãĥĭ":141154,"à¸ľà¸¹":141155,"à¸ľà¸¹à¹īà¸Ļ":141156,"à¸ľà¸¹à¹īà¸Ļำ":141157,"ĠмаÑĤеÑĢиалÑĭ":141158,"ĠкапиÑĤ":141159,"ĠкапиÑĤал":141160,"F":141161,"Ġseçil":141162,"Ġhứng":141163,"Ġintéressant":141164,"ãģ£ãģ¦ãģĦãģı":141165,"ĠeÄŁer":141166,"ëIJĺìĹĪìĬµëĭĪëĭ¤":141167,"ĠanlaÅŁma":141168,"ãģĶåĪ©ç͍":141169,"Ġ×ij×ĸ׼":141170,"Ġ×ij×ĸ׼×ķת":141171,"ëĿ¼ë©´":141172,"ĠÙĬÙĪØ³":141173,"ĠÙĬÙĪØ³Ùģ":141174,"أسÙĦØŃØ©":141175,"ĠGefühl":141176,"ĠноÑĢмалÑĮн":141177,"ãĥĻãĥ³":141178,"ãģķãĤĮãĤĭãģĵãģ¨":141179,"ĠÐijеÑģ":141180,"ãģ¨ãģĦãģĪãģ°":141181,"ĠÙħÙĩÙħ":141182,"ĠÙħÙĩÙħØ©":141183,"ãģ§ãģĹãĤĩãģĨãģŃ":141184,"ĠêµŃëĤ´":141185,"à¹Ģมà¹ĩà¸Ķ":141186,"×ŀ×ijקר":141187,"ĠاÙĦدÙĨÙĬ":141188,"ĠاÙĦدÙĨÙĬا":141189,"à¸Ĭู":141190,"кÑĢÑĥÑĤ":141191,"Ġthoáng":141192,"Ġ׳×ĵר":141193,"Ġ׳×ĵרש":141194,"ĠÑĢаÑģÑģказал":141195,"ĠAuÃŁerdem":141196,"פ×IJר":141197,"פ×IJרק":141198,"Ġ×ŀש×Ĺ×§×Ļ×Ŀ":141199,"צר׼×Ļ×Ŀ":141200,"×ŀ×ĵ×ķ":141201,"×ŀ×ĵ×ķ×Ļ×§":141202,"èĭ¦ãģĹ":141203,"ĠÑģиг":141204,"ĠÑģигнал":141205,"ĠMá»įi":141206,"Ġtrữ":141207,"ĠnastÄĻp":141208,"ĠnastÄĻpnie":141209,"Ġì¶Ķì§Ħ":141210,"ĠاÙĦÙģÙĨد":141211,"ĠاÙĦÙģÙĨدÙĤ":141212,"koÅĦczyÅĤ":141213,"สีà¹Ī":141214,"×§×Ļ×ij":141215,"×§×Ļ×ij×ķ×¥":141216,"ĠнÑĥжнÑĭ":141217,"大åĪĩ":141218,"大åĪĩãģª":141219,"æıĽãģĪ":141220,"ת×ķס":141221,"ת×ķספת":141222,"ãģ£ãģ¦ãģĦãģªãģĦ":141223,"ĠмÑı":141224,"ĠмÑıг":141225,"ĠмÑıгк":141226,"Ġjakie":141227,"ĠjakieÅĽ":141228,"à¸ķำà¸ļ":141229,"à¸ķำà¸ļล":141230,"ĠìŀĪì§Ģ":141231,"×ij×ĺ×IJ":141232,"ĠоÑĤлиÑĩно":141233,"ÙĤÙIJ":141234,"ĠавÑĤомоб":141235,"ĠавÑĤомоби":141236,"ĠавÑĤомобилÑı":141237,"دÙĬÙħÙĤراطÙĬ":141238,"ĠاÙĦÙĪØ§":141239,"ĠاÙĦÙĪØ§ØŃد":141240,"ĠسÙĪØ±ÙĬØ©":141241,"أغÙĦ":141242,"أغÙĦب":141243,"ĠÑįкÑĢан":141244,"ãĥĹãĥ©ãĤ¤":141245,"ĠjesteÅĽ":141246,"ãĥIJãĥª":141247,"Ġ×Ķ×IJ×ķ×ķ×Ļר":141248,"ائÙĥ":141249,"à¸Ńยà¹Īาà¸ĩยิà¹Īà¸ĩ":141250,"ÑĢекÑĤ":141251,"Ġumo":141252,"Ġumoż":141253,"Ġumożli":141254,"Ġumożliw":141255,"Ġumożliwia":141256,"Ġnächste":141257,"ĠìŀĪì§Ģë§Į":141258,"ĠпÑĢедн":141259,"ĠпÑĢедназ":141260,"ĠпÑĢедназнаÑĩен":141261,"Ġmaçı":141262,"Ġpomi":141263,"ĠpomiÄĻd":141264,"ĠpomiÄĻdzy":141265,"ĠاÙĦÙĦÙĤاء":141266,"à¹Ģà¸Ķà¸Ńะ":141267,"ĠновоÑģÑĤи":141268,"×ŀ×Ĺ׾×Ķ":141269,"رÙĬاضÙĬ":141270,"à¸Ķà¸Ļ":141271,"à¸Ķà¸Ļà¸ķรี":141272,"بصر":141273,"ìĬ¤íĥĢ":141274,"scripción":141275,"Ġnapisa":141276,"ĠnapisaÅĤ":141277,"Ġ׳ש×ŀ×¢":141278,"ĠاÙĦÙħØŃÙĦÙĬ":141279,"Ġhiá»ĥn":141280,"×IJ×Ĺ":141281,"×IJ×Ĺר×IJ×Ļ":141282,"ĠгÑĢаниÑĨ":141283,"æīĭç¶ļãģį":141284,"Ùĥسب":141285,"Ġà¹ģà¸ķà¹Īà¸ĸà¹īา":141286,"à¸Ķาวà¸Ļà¹Į":141287,"à¸Ķาวà¸Ļà¹Įà¹Ĥหลà¸Ķ":141288,"ãĤĭãģĵãģ¨ãģĮãģ§ãģįãģ¾ãģĻ":141289,"åŁºæľ¬çļĦãģ«":141290,"ÙĪÙĦاد":141291,"räume":141292,"دÙģØ§Ø¹":141293,"×Ļצע":141294,"ĠOczy":141295,"ĠOczywiÅĽcie":141296,"ĠÅģ":141297,"ĠÅģa":141298,"اÙĦÙĬاب":141299,"اÙĦÙĬاباÙĨ":141300,"áºłI":141301,"ĠBirliÄŁi":141302,"×Ķ×ķצ":141303,"×Ķ×ķצ×IJת":141304,"ĠÄijua":141305,"Ġê·¸ëŁ¬ëĭĪê¹Į":141306,"Ġréalité":141307,"عÙĦاÙĤات":141308,"Jeste":141309,"JesteÅĽ":141310,"Ġмнож":141311,"ĠмножеÑģÑĤво":141312,"K":141313,"ãĥĹãĥŃãĤ¸ãĤ§":141314,"ãĥĹãĥŃãĤ¸ãĤ§ãĤ¯ãĥĪ":141315,"ĠÑĦл":141316,"ظÙĨ":141317,"×Ĵ׾×Ĵ׾":141318,"ĠmÅĤodzie":141319,"ĠmÅĤodzież":141320,"à¸Ļà¹īำà¸ķา":141321,"à¸Ļà¹īำà¸ķาล":141322,"ÐĽÐķ":141323,"×ij×ķ×ĺ":141324,"Ġ׾×Ķ×Ĵ×Ļ×ĵ":141325,"ãģĵãģ¨ãĤĤãģĤãĤĭ":141326,"زاد":141327,"×ŀ×Ļ×ĵ×¢":141328,"ĠgÅĤównie":141329,"ãĥıãĤ¦":141330,"ãĥıãĤ¦ãĤ¹":141331,"бел":141332,"Ġétape":141333,"ðŁĺĢ":141334,"ĠмоделÑĮ":141335,"aģını":141336,"ש×Ĺ×§":141337,"ש×Ĺקף":141338,"Ġniño":141339,"à¸Ĭà¹īาà¸ĩ":141340,"à¹Ģลีย":141341,"ĠÑĦоÑĢме":141342,"ĠاÙĦشرÙĬÙģ":141343,"ĠÑĥдаÑĢ":141344,"arriv":141345,"arrivée":141346,"ĠmiesiÄĻ":141347,"ĠmiesiÄĻcy":141348,"ØŃرÙĥ":141349,"ØŃرÙĥات":141350,"ĠDiá»ħn":141351,"ÐĿЫ":141352,"ãģ¾ãģ£ãģŁãģı":141353,"Ġ×Ļר×ķ×§":141354,"еÑģÑĤеÑģÑĤв":141355,"еÑģÑĤеÑģÑĤвенн":141356,"Ġê·¸ëŁ¼":141357,"ĠاÙĦÙħتÙĪ":141358,"ĠاÙĦÙħتÙĪØ³Ø·":141359,"Ġbénéfic":141360,"Ġbénéficie":141361,"Ġwybra":141362,"ĠwybraÄĩ":141363,"ĠاÙĦزÙħÙĨ":141364,"ĠпÑĢинÑı":141365,"ĠпÑĢинÑıл":141366,"Ù쨱ØŃ":141367,"Ġksz":141368,"ĠksztaÅĤ":141369,"ĠksztaÅĤt":141370,"ק׾×ĺ":141371,"×ij×ĵ×Ļקת":141372,"Ġgiấ":141373,"Ġgiấc":141374,"ĠproprietÃł":141375,"деÑĢжан":141376,"ĠKöln":141377,"ĠGüzel":141378,"×Ļפ×ķ×Ļ":141379,"ĠCuá»Ļc":141380,"ÑįÑĤаж":141381,"ترÙĥÙĬ":141382,"ترÙĥÙĬز":141383,"ложений":141384,"ĠпÑĥ":141385,"ĠпÑĥÑĤи":141386,"اختÙĦاÙģ":141387,"åĩºãģ¦ãģıãĤĭ":141388,"à¸ļุà¸ģ":141389,"âĿ¤":141390,"ÑĦан":141391,"פש×ĺ":141392,"à¸ļัà¸Ļà¹Ģà¸Ĺ":141393,"à¸ļัà¸Ļà¹Ģà¸Ĺิà¸ĩ":141394,"ĠاÙĦساد":141395,"ĠاÙĦسادس":141396,"ĠاÙĦÙĤÙĪÙħ":141397,"ĠاÙĦÙĤÙĪÙħÙĬ":141398,"Ġyönetici":141399,"ÙĩÙĪØ§Øª":141400,"ÙĩÙĪØ§ØªÙģ":141401,"Ġresponsável":141402,"ĠподдеÑĢжива":141403,"ĠاÙĦسÙĦØ·":141404,"ĠاÙĦسÙĦطات":141405,"ãģĹãģ¦ãģĬãģı":141406,"ãĥļãĥĥãĥĪ":141407,"à¸Ľà¸¸à¹Īม":141408,"ĠoglÄħda":141409,"ÙĨاÙĤ":141410,"ÙĨاÙĤØ´":141411,"à¸Ħà¸Ńà¸Ļà¹Ĥà¸Ķ":141412,"ĠMüsl":141413,"ĠMüslü":141414,"ĠMüslüman":141415,"ĠMoż":141416,"ĠMożna":141417,"Ġnumérique":141418,"Ġvá»ı":141419,"ĠسÙĬتÙħ":141420,"ĠyerleÅŁ":141421,"монÑĤаж":141422,"Ġgoût":141423,"ãģ¦ãģĬãĤĬãģ¾ãģĻ":141424,"ĠKhánh":141425,"Ġедин":141426,"ĠединÑģÑĤв":141427,"اÙĨØ®Ùģ":141428,"اÙĨØ®ÙģØ§Ø¶":141429,"ìĭľíĹĺ":141430,"Ġlặng":141431,"ĠÑĢолÑĮ":141432,"à¸ķัวà¹ģà¸Ĺà¸Ļ":141433,"à¸Ħà¹Īาà¹ĥà¸Ĭà¹ī":141434,"à¸Ħà¹Īาà¹ĥà¸Ĭà¹īà¸Īà¹Īาย":141435,"Ġverfüg":141436,"Ġverfügbar":141437,"ìĻĶëĭ¤":141438,"ãģĦãģļ":141439,"ãģĦãģļãĤĮ":141440,"ĠиÑģÑģледованиÑı":141441,"меÑīа":141442,"×Ķ×Ĺ":141443,"×Ķ×Ĺ×ĸר":141444,"à¹ģà¸Łà¸Ĭัà¹Īà¸Ļ":141445,"تصرÙģ":141446,"إرÙĩاب":141447,"ĠexercÃŃcio":141448,"Ġélev":141449,"Ġélevé":141450,"สัà¸įà¸įาà¸ĵ":141451,"ÃĸZ":141452,"ãĥĹãĥŃãĤ°":141453,"ãĥĹãĥŃãĤ°ãĥ©":141454,"ãĥĹãĥŃãĤ°ãĥ©ãĥł":141455,"ĠwewnÄĻtrzn":141456,"Ġhenüz":141457,"é£Ľãģ³":141458,"à¹Ģà¸Ķà¸Ńรà¹Į":141459,"ÑģÑĥж":141460,"ÑģÑĥжден":141461,"شعÙĪØ¨":141462,"ãģ²ãģ¨ãĤĬ":141463,"ĠwyÅĤÄħ":141464,"ĠwyÅĤÄħcznie":141465,"ĠплоÑħо":141466,"ÐĶÐķ":141467,"Ầ":141468,"ÙģØ¹Ø§ÙĦÙĬ":141469,"ÙģØ¹Ø§ÙĦÙĬات":141470,"ĠاÙĦعشر":141471,"ÑģÑĤÑĥпил":141472,"Ġyarg":141473,"Ġyargı":141474,"нÑİÑİ":141475,"×ķ×IJ×ij":141476,"Ġuç":141477,"Ġuçak":141478,"ë²½":141479,"تÙĪÙĤÙĬ":141480,"تÙĪÙĤÙĬع":141481,"Ġì¤ijìĭ¬":141482,"׳×Ļ×ķ×ķ×ĺ":141483,"Ø£ÙĥÙĦ":141484,"ç½®ãģĦãģ¦":141485,"éłĤãģį":141486,"Ġ×Ķת×ij":141487,"Ġ×Ķת×ij×Ļ×¢×Ķ":141488,"Ġdürfen":141489,"ÙħÙĤاÙĦ":141490,"ÙħÙĤاÙĦات":141491,"ĠزÙħÙĨ":141492,"à¸ŀฤศ":141493,"à¸ŀฤศà¸Ī":141494,"à¸ŀฤศà¸Īิà¸ģ":141495,"à¸ŀฤศà¸Īิà¸ģายà¸Ļ":141496,"ĠнеÑģколÑĮ":141497,"ĠнеÑģколÑĮки":141498,"ĠнеÑģколÑĮкиÑħ":141499,"Ġcriança":141500,"มิà¸ķร":141501,"×ŀ׼×Ļר×ķת":141502,"à¸ģารà¸ļริหาร":141503,"Ġtélécharg":141504,"Ġ×IJ×ķ×Ķ×ijת":141505,"ĠBüro":141506,"ä½ľãģ£ãģŁ":141507,"ĠKiÅŁi":141508,"ç¾İåij³ãģĹ":141509,"à¹Ģลยà¸Ħà¹Īะ":141510,"à¸ŀà¸ļà¸ģัà¸ļ":141511,"à¸Īà¹īา":141512,"Ġçer":141513,"Ġçerç":141514,"Ġçerçeve":141515,"ãĤĴä½ľãģ£ãģ¦":141516,"ĠпеÑĢвÑĥÑİ":141517,"×ŀצר×Ļ×Ŀ":141518,"×IJ׾×ķ×Ķ":141519,"×IJ׾×ķ×Ķ×Ļ×Ŀ":141520,"Ġagré":141521,"Ġagréable":141522,"Ġayır":141523,"İLİ":141524,"ãĤ¥":141525,"ĠíĺĦ":141526,"ĠíĺĦìĭ¤":141527,"ثاÙĦØ«":141528,"ת×ĸ":141529,"ת×ĸ×ķ׳×Ķ":141530,"ãģ¨ãģĦãģ£ãģ¦":141531,"ãģ¨ãģĦãģ£ãģ¦ãĤĤ":141532,"ĠابÙĪ":141533,"ĠÑģобак":141534,"é£Łãģ¹ãģŁ":141535,"Ġданном":141536,"à¹Ģลิ":141537,"à¹Ģลิศ":141538,"Ġíļ":141539,"Ġíļ¨":141540,"Ġíļ¨ê³¼":141541,"ãĤĤãĤīãģĪãĤĭ":141542,"׳צ׾":141543,"ÑĦик":141544,"ÑĦикÑģ":141545,"ĠjesteÅĽmy":141546,"ת×Ĺ×ķש×Ķ":141547,"à¹Ħมà¹Īà¸Ħวร":141548,"ĠØŃسÙĬÙĨ":141549,"à¸ģารลà¸ĩà¸Ĺุà¸Ļ":141550,"ë´¤":141551,"ĠÐĺменно":141552,"à¸ļà¸Ńรà¹Į":141553,"à¸ļà¸Ńรà¹Įà¸Ķ":141554,"ĠCảnh":141555,"ìĦľë¹ĦìĬ¤":141556,"Ġполов":141557,"Ġполовин":141558,"ĠзамеÑĩа":141559,"ãģĦãĤįãĤĵãģª":141560,"Ġ×ij×Ļ×§":141561,"Ġ×ij×Ļקש":141562,"лÑĥÑĪ":141563,"ãĤĴè¿İ":141564,"ãĤĴè¿İãģĪ":141565,"جرÙĬÙħØ©":141566,"Ġtây":141567,"ĠاÙĦÙĨÙĪ":141568,"ĠاÙĦÙĨÙĪÙĪÙĬ":141569,"ÃĤN":141570,"ì¿ł":141571,"หà¸Ļาว":141572,"Ġ×ij×Ĺש×ij×ķף":141573,"زار":141574,"à¸Ķาร":141575,"à¸Ķารา":141576,"ĠÅĽl":141577,"ĠÅĽlub":141578,"มีà¸Ħวามสุà¸Ĥ":141579,"Ġnhu":141580,"ĠnhuáºŃn":141581,"ÙħØŃطة":141582,"à¹Ģสืà¹īà¸Ńà¸ľà¹īา":141583,"ĠТолÑĮко":141584,"ĠÙĥس":141585,"ĠÙĥسارة":141586,"ÙħشرÙĪØ¹":141587,"niÄĻcia":141588,"×¢×Ľ×©×Ļ×ķ":141589,"تÙĦÙģ":141590,"تÙĦÙ쨲ÙĬ":141591,"تÙĦÙ쨲ÙĬÙĪÙĨ":141592,"ĠlÆ°á»Ľi":141593,"ĠÐľÐ¾ÑģквÑĭ":141594,"Ġréserve":141595,"ĠanlaÅŁ":141596,"ĠanlaÅŁÄ±l":141597,"ĠedeceÄŁi":141598,"รà¸Ńà¸ĩà¹Ģà¸Ĺà¹īา":141599,"Ġبط":141600,"ĠبطرÙĬ":141601,"ĠبطرÙĬÙĤØ©":141602,"ãģ¦ãģĹãģ¾ãģ£ãģ¦":141603,"ãĤĤãĤīãģ£ãģ¦":141604,"برج":141605,"æ±ļ":141606,"æ±ļãĤĮ":141607,"Ġchoc":141608,"Ġchocia":141609,"Ġchociaż":141610,"Ġzobac":141611,"ĠzobaczyÄĩ":141612,"пÑĢÑı":141613,"пÑĢÑıжен":141614,"ĠÑĨиÑĦ":141615,"ĠÑĨиÑĦÑĢ":141616,"Ġмам":141617,"ĠвзÑıÑĤÑĮ":141618,"Ġchạm":141619,"جسÙħ":141620,"ØŃÙħاس":141621,"à¹Ģลà¹Īม":141622,"à¸ŀิษ":141623,"×Ķפ׼×ķ":141624,"à¸Ĭà¹Īà¸Ńà¸ĩà¸Ĺาà¸ĩ":141625,"Ġвек":141626,"Ġвека":141627,"Æ¡Ìģ":141628,"Æ¡Ìģi":141629,"ĠTiá»ģn":141630,"Ġtrầm":141631,"мÑĭÑĪ":141632,"мÑĭÑĪл":141633,"ĠÑĤÑĥ":141634,"ĠÑĤÑĥÑĢиÑģÑĤ":141635,"Ġchc":141636,"ĠchcÄħ":141637,"Ġавг":141638,"ĠавгÑĥÑģÑĤ":141639,"ĠавгÑĥÑģÑĤа":141640,"ס×IJ×ķת":141641,"Ġר×Ĵ׾":141642,"à¸ľà¸¥à¸ģระà¸Ĺ":141643,"à¸ľà¸¥à¸ģระà¸Ĺà¸ļ":141644,"å¤īãĤıãĤĭ":141645,"Ġ×Ķ×IJ×Ĺר×ķ׳×Ļ×Ŀ":141646,"سÙģÙĬر":141647,"ĠÑĩаÑīе":141648,"ãģĦãĤī":141649,"ãģĦãĤīãģ£":141650,"ãģĦãĤīãģ£ãģĹãĤĥ":141651,"×ķ×ŀ׳×Ļ×Ŀ":141652,"Ġarttır":141653,"ĠChá»ĭ":141654,"Ġì¡°ì§ģ":141655,"ĠÑĥÑģпеÑħ":141656,"Ġ×¢×ķס":141657,"Ġ×¢×ķסק":141658,"ĠìĥĿëªħ":141659,"ÑĨиÑĤ":141660,"Ġregión":141661,"ÐŀÐĿ":141662,"ĠdoÄŁum":141663,"ĠyaÅŁad":141664,"ĠyaÅŁadıģı":141665,"à¸Ĺà¸Ķลà¸Ńà¸ĩ":141666,"Ġgözü":141667,"ש×Ļר×Ķ":141668,"дÑĥмал":141669,"Ġdaģı":141670,"Ġdaģıt":141671,"à¸Ĺีมà¸ĩาà¸Ļ":141672,"Ġtiá»ģm":141673,"ĠاÙĦÙĥبر":141674,"ĠاÙĦÙĥبرÙī":141675,"ì¹Ń":141676,"ĠGünc":141677,"ĠGüncelle":141678,"ĠGüncelleme":141679,"ê¹Ĭ":141680,"ĠобоÑĢÑĥдование":141681,"ĠÑĢеÑĪа":141682,"Ụ":141683,"ĠпиÑĤ":141684,"ĠпиÑĤаниÑı":141685,"à¹Ģรียà¸ļ":141686,"×Ľ×ª×Ļ×ij×Ķ":141687,"Ġпон":141688,"ĠпонÑĢав":141689,"ĠпонÑĢави":141690,"Ġ×Ķ×ķ׾×ĵ":141691,"Ġ×Ķ×ķ׾×ĵת":141692,"Ġê²ģ":141693,"Ġê²ģëĭĪëĭ¤":141694,"ĠпеÑĢвой":141695,"ãĥ©ãĤ¤ãĥķ":141696,"ĠÅŁiir":141697,"krÄĻ":141698,"krÄĻc":141699,"Ġthiá»ĥu":141700,"à¹Ģลยà¸Ĺี":141701,"à¹Ģลยà¸Ĺีà¹Ģà¸Ķียว":141702,"×ĺ×¢×ł×ķת":141703,"ائÙĩÙħ":141704,"Ġ×IJס×ķר":141705,"ĠплаÑĤеж":141706,"تردد":141707,"Ġmożliwe":141708,"ĠkhỼ":141709,"ĠkhỼp":141710,"تÙģØ§Ø¹ÙĦ":141711,"ĠÑĪколÑĮ":141712,"ĠÑĪколÑĮн":141713,"ĠÙĤصة":141714,"Ġmétier":141715,"nÄĻÅĤa":141716,"หลà¹Īà¸Ń":141717,"Ġá»§ng":141718,"Ġprzegl":141719,"ĠprzeglÄħd":141720,"ĠاÙĦÙħتعÙĦ":141721,"ĠاÙĦÙħتعÙĦÙĤØ©":141722,"ĠÑģÑĭн":141723,"Ġволн":141724,"ãĥĩãĥ¼ãĥĪ":141725,"ĠÐŃÑĤи":141726,"ĠкÑĢоме":141727,"à¸Ħารà¹Į":141728,"׳ק×ķ×ĵ×Ķ":141729,"Ġ׾ש×ŀ×ķ×¢":141730,"Ġ×ĸ×ķ׼ר":141731,"ï¼§":141732,"ÙĬÙİØ§":141733,"Ġgiá»ıi":141734,"åĥįãģı":141735,"ĠÑģни":141736,"ĠÑģнижен":141737,"à¹ģà¸Ķà¸Ķ":141738,"รุà¸Ļ":141739,"รุà¸Ļà¹ģรà¸ĩ":141740,"Ġhiá»ĩp":141741,"ografÃŃa":141742,"à¹Ģà¸Īà¸Ńรà¹Į":141743,"Ġдвиг":141744,"ĠдвигаÑĤ":141745,"ĠдвигаÑĤел":141746,"Ġüy":141747,"Ġüyeler":141748,"Ġüyeleri":141749,"ĠбÑĥк":141750,"ĠбÑĥкв":141751,"ãĤĤå¤ļãģı":141752,"Ġthiá»ĩt":141753,"ĠPaÃŃs":141754,"ĠطبÙĬعÙĬ":141755,"à¹ģà¸Īà¸ģ":141756,"ĠاÙĦصØŃÙĬØŃ":141757,"Ġappré":141758,"Ġappréci":141759,"Ġdecisión":141760,"Ġë°ĺëĵľ":141761,"Ġë°ĺëĵľìĭľ":141762,"ĠÑĤебе":141763,"ãĤ·ãĥ¼ãĤº":141764,"ãĤ·ãĥ¼ãĤºãĥ³":141765,"ĠдалÑĮн":141766,"ĠìĬ¤":141767,"ĠìĬ¤ìĬ¤":141768,"ĠìĬ¤ìĬ¤ë¡ľ":141769,"ĠThá»ĥ":141770,"ĠkarÅŁ":141771,"ĠkarÅŁÄ±s":141772,"ĠkarÅŁÄ±sında":141773,"ĠKön":141774,"ĠKönig":141775,"ивание":141776,"×ij×ķצע":141777,"глаÑģ":141778,"Ġtwó":141779,"Ġtwórc":141780,"à¸Ľà¸ģà¸Ħร":141781,"à¸Ľà¸ģà¸Ħรà¸Ńà¸ĩ":141782,"ĠGÅĤ":141783,"ĠGÅĤówn":141784,"ĠUnterstüt":141785,"ĠUnterstützung":141786,"ĠдÑĥÑħ":141787,"ĠдÑĥÑħов":141788,"Ø£ÙħاÙĨ":141789,"×Ĺשש":141790,"تظ":141791,"تظاÙĩر":141792,"ĠлÑİбом":141793,"à¸ķาร":141794,"à¸ķาราà¸ĩ":141795,"Ġkról":141796,"Ø£ØŃدث":141797,"ì¡Įëĭ¤":141798,"ÐļÑĥÑĢÑģ":141799,"ãĥĥãĥĦ":141800,"×ŀ×§×ķ×ij׾":141801,"ĠÑģимвол":141802,"Ġdésorm":141803,"Ġdésormais":141804,"wüns":141805,"wünsche":141806,"Ñĥни":141807,"ÑĥниÑĨип":141808,"ÑĥниÑĨипалÑĮн":141809,"หลัà¸ģสูà¸ķร":141810,"ÙĨتشر":141811,"Ġал":141812,"Ġалк":141813,"Ġалког":141814,"Ġалкогол":141815,"ĠÑĥÑĩиÑĤÑĭва":141816,"à¸ģำà¸ģัà¸ļ":141817,"Ġ×ľ×¤×¢×ķ׾":141818,"ĠìĹ°ê²°":141819,"sÄħd":141820,"ĠاÙĦØ£ÙĬ":141821,"ĠاÙĦØ£ÙĬاÙħ":141822,"غÙĬاب":141823,"ĠнаÑĢ":141824,"ĠнаÑĢко":141825,"×ŀ×ķ×ĵ×¢":141826,"ĠÑģеÑĢии":141827,"пиÑģÑĭва":141828,"สิว":141829,"ç¶ļãģĦãģ¦":141830,"çͳãģĹè¾¼ãģ¿":141831,"Ġ׾×Ĵר":141832,"Ġ׾×Ĵר×ķ×Ŀ":141833,"Ġдем":141834,"Ġдемо":141835,"Ġë³´ëĤ´":141836,"تÙĩدÙĬد":141837,"ĠÙħØ´ÙĬرا":141838,"Ġduy":141839,"Ġduyá»ĩt":141840,"ĠwiÄĻksze":141841,"ÙħعاÙĬ":141842,"ÙħعاÙĬÙĬر":141843,"ĠGda":141844,"ĠGdaÅĦsk":141845,"Ġrah":141846,"Ġrahats":141847,"Ġrahatsız":141848,"ר×ķצ×Ķ":141849,"lös":141850,"lösung":141851,"ĠТаким":141852,"ÑĪед":141853,"ÑĪедÑĪ":141854,"عزÙĦ":141855,"Ġרש×Ļ×ŀת":141856,"Ġ׾×Ķ×Ļ׼":141857,"Ġ׾×Ķ×Ļ×Ľ×ł×¡":141858,"ĠпÑĥÑĤ":141859,"ĠпÑĥÑĤеÑĪ":141860,"ĠпÑĥÑĤеÑĪеÑģÑĤв":141861,"ĠnotÃŃcia":141862,"ĠalÄ±ÅŁ":141863,"ĠalÄ±ÅŁver":141864,"ĠalÄ±ÅŁveriÅŁ":141865,"ĠwÅĤos":141866,"ĠwÅĤosów":141867,"Ġبغ":141868,"Ġبغداد":141869,"Ġveröffent":141870,"Ġveröffentlicht":141871,"ĠKhá":141872,"Ġtán":141873,"ëIJĺ기":141874,"Ġ방문":141875,"ÙģÙĬÙĦ":141876,"à¹Ģà¸ģิà¸Ķà¸Īาà¸ģ":141877,"åı¯æĦĽ":141878,"åı¯æĦĽãģĦ":141879,"à¸ĸุà¸ĩ":141880,"ĠzewnÄĻtrzn":141881,"à¸łà¸²à¸©à¸²à¸Ńัà¸ĩà¸ģฤษ":141882,"Ġmáxima":141883,"Ġulus":141884,"Ġuluslararası":141885,"Ġ׳×Ķ׳":141886,"à¸Ĥà¹Īาวสาร":141887,"ĠìĿĺìĤ¬":141888,"à¹Ģหลืà¸Ńà¸ĩ":141889,"ĠدÙĤ":141890,"ĠدÙĤائÙĤ":141891,"สืà¹Īà¸Ńสาร":141892,"먼":141893,"ĠÑģоÑģÑĤоÑıнии":141894,"สมาà¸Ħม":141895,"á»Ĥ":141896,"ĠÐľÐ¾Ñģков":141897,"ĠÐľÐ¾ÑģковÑģк":141898,"×ŀס×ķ×Ĵ׾":141899,"ãģĭãģĭãĤĬ":141900,"ĠTruyá»ģn":141901,"à¹ģà¸Ĥà¹ĩà¸ĩà¹ģรà¸ĩ":141902,"×ŀ×Ĺ×ĸ×Ļ×§":141903,"à¹Ĥà¸ģà¹ī":141904,"ÙĬسر":141905,"ìĶ©":141906,"×IJ×ķ×§":141907,"×IJ×ķ×§×ĺ":141908,"×IJ×ķ×§×ĺ×ķ×ijר":141909,"Ġproximité":141910,"ÙħÙĨÙĩج":141911,"ĠاÙĦجز":141912,"ĠاÙĦجزائ":141913,"ĠاÙĦجزائرÙĬ":141914,"ĠÄIJiá»ĥm":141915,"Ġденеж":141916,"Ġденежн":141917,"ÙģØŃص":141918,"Ù쨦":141919,"ĠÐijÑĥд":141920,"×Ĵ×Ļ×ĵ×ķ׾":141921,"ĠÐĴедÑĮ":141922,"عÙĦاÙħØ©":141923,"Ġ×IJ×Ĺר×ķ׳×ķת":141924,"ãģĦãģŁãģłãģĦãģ¦":141925,"سÙĦØŃ":141926,"ØŃÙĦÙħ":141927,"زÙĪØ§Ø±":141928,"Ùĥسر":141929,"×ĺקס":141930,"Ġбан":141931,"Ġбанков":141932,"ĠпÑĢож":141933,"ĠпÑĢожива":141934,"liwo":141935,"liwoÅĽci":141936,"ĠTiếp":141937,"ĠاÙĦÙħÙĨاسب":141938,"ĠاÙĦØ®ÙĬار":141939,"ãģĬãģĭ":141940,"ãģĬãģĭãģĴ":141941,"à¸Ķà¸Ńà¸ģà¹Ħมà¹ī":141942,"ämp":141943,"ämpfe":141944,"à¸ķัà¹īà¸ĩà¹ĥà¸Ī":141945,"ĠзаÑīиÑĤ":141946,"ĠзаÑīиÑĤÑĭ":141947,"ĠThưá»Ŀng":141948,"ĠصÙģ":141949,"ĠصÙģØŃØ©":141950,"×Ĺ×ķרף":141951,"ãĥIJãĥĥãĤ°":141952,"Ġ×ĵ×Ļ×Ĵ":141953,"Ġ×ĵ×Ļ×Ĵ×Ļ×ĺ":141954,"Ġ×ĵ×Ļ×Ĵ×Ļ×ĺ׾×Ļ":141955,"Ġ×Ķ×Ĺ×ķ׾×Ļ×Ŀ":141956,"веÑī":141957,"веÑīа":141958,"ĠкÑĥлÑĮÑĤ":141959,"ĠкÑĥлÑĮÑĤÑĥ":141960,"ĠкÑĥлÑĮÑĤÑĥÑĢÑĭ":141961,"ĠاÙĦاÙĨترÙĨت":141962,"Ġhöch":141963,"Ġhöchst":141964,"Ġíĺķ":141965,"Ġíĺķíĥľ":141966,"Ġвой":141967,"ĠвойнÑĭ":141968,"ÐĽÐŀ":141969,"ìĭłìļ©":141970,"Ġ×ŀ×ij×ķס":141971,"Ġ×ŀ×ij×ķסס":141972,"×ŀ׳×Ļ×¢":141973,"Ġfiyatı":141974,"ĠÑģлÑĥж":141975,"ĠÑģлÑĥжбÑĭ":141976,"à¸Ĺัศ":141977,"à¸Ĺัศà¸Ļ":141978,"ãģĵãģ¨ãģĮå¤ļãģĦ":141979,"Ġ×Ķ×ŀשת":141980,"Ġ×Ķ×ŀשת×ŀש":141981,"å¯ĦãģĽ":141982,"×ŀש׾×ķ×Ĺ":141983,"æĻĤçĤ¹":141984,"æĻĤçĤ¹ãģ§":141985,"à¸ŀรี":141986,"à¸ŀรีà¹Ģมีย":141987,"à¸ŀรีà¹Ģมียรà¹Į":141988,"à¸ŀรีà¹Ģมียรà¹Įลีà¸ģ":141989,"Ġdifficolt":141990,"ĠdifficoltÃł":141991,"ãĥ¬ãĤ¹ãĥĪ":141992,"ãĥ¬ãĤ¹ãĥĪãĥ©ãĥ³":141993,"สมà¹Ģà¸Ķà¹ĩ":141994,"สมà¹Ģà¸Ķà¹ĩà¸Ī":141995,"Ġжид":141996,"Ġжидк":141997,"ĠzupeÅĤ":141998,"ĠzupeÅĤnie":141999,"ĠÙħجر":142000,"ĠÙħجرد":142001,"ãģĮå§ĭ":142002,"ãģĮå§ĭãģ¾":142003,"ãĤŃãĥ£ãĥ©":142004,"Ġ×IJ×ķ×ķ×Ļר":142005,"ãģĬäºĴ":142006,"ãģĬäºĴãģĦ":142007,"ĠpotrÃł":142008,"ĠPaÅĦst":142009,"ĠPaÅĦstwo":142010,"ĠبÙĬاÙĨ":142011,"ĠبÙĬاÙĨات":142012,"Ġиногда":142013,"ĠÑĢа":142014,"ĠÑĢаÑģÑĤв":142015,"ĠÑĢаÑģÑĤвоÑĢ":142016,"Ġ×ĸ×ŀ׳":142017,"ยิà¹īม":142018,"ÄĨ":142019,"ãģ¾ãģķ":142020,"ãģ¾ãģķãģ«":142021,"ãĥķãĤ¡ãĤ¤ãĥ«":142022,"ĠgördÃ¼ÄŁÃ¼":142023,"สà¸ĩà¸Ħร":142024,"สà¸ĩà¸Ħราม":142025,"ĠArkadaÅŁ":142026,"ĠrozwiÄħzania":142027,"×ŀ×ķ×ĺ":142028,"piÄĻ":142029,"piÄĻt":142030,"صغر":142031,"สย":142032,"สยาม":142033,"ãĤĨãģ£ãģıãĤĬ":142034,"Ġtrần":142035,"ĠeconomÃŃa":142036,"Ġgehören":142037,"ãĤ·ãĥ§ãĥ¼":142038,"ĠsÅĤucha":142039,"à¸ŀà¸Ńà¹ĥà¸Ī":142040,"ĠоÑĤмеÑĤил":142041,"ÙĨتÙĤÙĦ":142042,"Ġpropósito":142043,"ĠваÑĪего":142044,"Ġnhắn":142045,"à¹ģà¸ĸว":142046,"ĠкомиÑģ":142047,"ĠкомиÑģÑģи":142048,"ważnie":142049,"ĠyavaÅŁ":142050,"×ŀ×Ļ×§":142051,"×ŀ×Ļ×§×ķ×Ŀ":142052,"ש×IJ×ľ×ª":142053,"Ġyıllarda":142054,"ĠЮ":142055,"ĠЮÑĢ":142056,"×ł×¡×Ļ×ij×ķת":142057,"תצ":142058,"תצ×ķ×Ĵ":142059,"ĠоднÑĥ":142060,"Ġà¸Ńยà¹Īาà¸ĩà¹Ħร":142061,"Ġà¸Ńยà¹Īาà¸ĩà¹Ħรà¸ģà¹ĩà¸ķาม":142062,"ëģ¼":142063,"à¹Ħลà¹Ī":142064,"تسÙĦÙĬÙħ":142065,"بÙĦاغ":142066,"Ġìī":142067,"Ġìī½":142068,"Ġìī½ê²Į":142069,"ãĥļãĥ³":142070,"звÑĥÑĩ":142071,"ĠWäh":142072,"ĠWährend":142073,"Ġ×Ļ×Ļת":142074,"Ġ×Ļ×Ļ×ª×Ľ×Ł":142075,"Ġkhuyên":142076,"Ġvẽ":142077,"ĠамеÑĢ":142078,"ĠамеÑĢик":142079,"ĠамеÑĢикан":142080,"ĠамеÑĢиканÑģк":142081,"عجب":142082,"ãĥĽãĥ¼ãĥłãĥļãĥ¼ãĤ¸":142083,"ĠникÑĤо":142084,"ĠÙĤÙİ":142085,"ĠÙĤÙİØ§ÙĦ":142086,"ĠÙĤÙİØ§ÙĦÙİ":142087,"ÐIJÐĹ":142088,"ÙħجÙħÙĪØ¹":142089,"ÙħجÙħÙĪØ¹Ø§Øª":142090,"ĠnecessitÃł":142091,"Ġpobli":142092,"Ġpobliżu":142093,"Ġphấn":142094,"ĠСообÑī":142095,"ÙħÙĤاط":142096,"ÙħÙĤاطع":142097,"Ġ×Ķצ×ķר×ļ":142098,"laÅŁtırma":142099,"วิà¸Ķ":142100,"วิà¸Ķี":142101,"วิà¸Ķีà¹Ĥà¸Ń":142102,"Ġ그리ìĬ¤":142103,"Ġ그리ìĬ¤ëıĦ":142104,"ãĤ¿ãĤ¤ãĥŁ":142105,"ãĤ¿ãĤ¤ãĥŁãĥ³ãĤ°":142106,"×§×ĺ×Ĵ×ķר":142107,"×§×ĺ×Ĵ×ķר×Ļ×Ķ":142108,"Ġ×Ĺ×ķפ":142109,"Ġ×Ĺ×ķפש×Ļ":142110,"أجر":142111,"Ġимени":142112,"ĠÑĢанее":142113,"à¹Ģà¸ŀืà¹Īà¸Ńà¸Ļà¹Ĩ":142114,"ĠJesús":142115,"Ñģоедин":142116,"Ñģоединен":142117,"Ġר×Ĺ×ķ×§":142118,"à¹Ĥà¸ļรา":142119,"à¹Ĥà¸ļราà¸ĵ":142120,"ĠHÆ¡n":142121,"ĠtháºŃp":142122,"تعÙĬÙĬÙĨ":142123,"ĠtartÄ±ÅŁ":142124,"ĠtartÄ±ÅŁma":142125,"ĠGespr":142126,"ĠGespräch":142127,"תר×ķפ":142128,"תר×ķפ×ķת":142129,"Ġcatégorie":142130,"ĠоказÑĭва":142131,"ĠналиÑĩие":142132,"Ġprésenté":142133,"Ġkull":142134,"Ġkulland":142135,"Ġkullandı":142136,"Ġünl":142137,"Ġünlü":142138,"ĠÙģÙĥرة":142139,"изаÑĤоÑĢ":142140,"×IJ×ķ׳":142141,"×IJ×ķ׳×Ļ×ij":142142,"×IJ×ķ׳×Ļ×ijרס":142143,"×IJ×ķ׳×Ļ×ijרס×Ļ×ĺת":142144,"ĠÑĢаÑģÑģмаÑĤ":142145,"ĠÑĢаÑģÑģмаÑĤÑĢ":142146,"ĠÑĢаÑģÑģмаÑĤÑĢива":142147,"تÙĥÙĦÙħ":142148,"ÙĥترÙĪ":142149,"ÙĥترÙĪÙĨÙĬ":142150,"ĠÑģоÑĩеÑĤ":142151,"ĠÑģоÑĩеÑĤа":142152,"ãĤĴè¦ĭãģĽ":142153,"Ġngừa":142154,"ĠÐłÐµÑģп":142155,"ĠÐłÐµÑģпÑĥб":142156,"ĠÐłÐµÑģпÑĥблик":142157,"ãĤ¦ãĤ©":142158,"ãĤ¦ãĤ©ãĥ¼":142159,"ĠÐľÐµÐ¶Ð´Ñĥ":142160,"ĠìŀĪê²Į":142161,"Ġmâ":142162,"ĠìļĶì²Ń":142163,"ضار":142164,"ลุà¹īà¸Ļ":142165,"ëĮĢíķĻêµIJ":142166,"×ĸ×Ļ׼":142167,"×ĸ×Ļ׼ר×ķף":142168,"ãĤ¹ãĥļ":142169,"ãĤ¹ãĥļãĥ¼ãĤ¹":142170,"ĠкÑĢаÑģоÑĤ":142171,"H":142172,"ê¼Ń":142173,"ãĤĴéĽĨ":142174,"ãĤĴéĽĨãĤģ":142175,"ë°Ŀ":142176,"Ġ×Ķ׳×IJ":142177,"Ġ×Ķ׳×IJש×Ŀ":142178,"Ġê°Ģìļ´":142179,"Ġê°Ģìļ´ëį°":142180,"تÙĥÙĦÙ쨩":142181,"ĠØŃÙĤÙĬÙĤÙĬ":142182,"Ġhalk":142183,"Ġhalkın":142184,"ÑİÑīÑĥÑİ":142185,"ĠÑģпин":142186,"סר×ĺף":142187,"ĠпеÑĢвого":142188,"Ġполож":142189,"ĠположиÑĤелÑĮн":142190,"Ġдл":142191,"ĠдлиÑĤелÑĮн":142192,"ĠVÄ©nh":142193,"ê´´":142194,"ĠÑģÑĭÑĢ":142195,"ĠíĨµíķĺìŬ":142196,"ë³ijìĽIJ":142197,"à¹Ĥรà¸ĩà¸ĩาà¸Ļ":142198,"รัà¸ļà¸ľà¸´à¸Ķ":142199,"รัà¸ļà¸ľà¸´à¸Ķà¸Ĭà¸Ńà¸ļ":142200,"تجÙĨب":142201,"sÅĤ":142202,"sÅĤuch":142203,"ãĤ¢ãĥ«ãĥIJ":142204,"ãĤ¢ãĥ«ãĥIJãĥł":142205,"ëī´ìĬ¤":142206,"Ġpatië":142207,"Ġpatiënt":142208,"Ġìĺ¤í":142209,"Ġìĺ¤íŀ":142210,"Ġìĺ¤íŀĪ":142211,"Ġìĺ¤íŀĪ볤":142212,"ĠDerne":142213,"ĠDerneÄŁi":142214,"wróci":142215,"wróciÄĩ":142216,"ĠобÑī":142217,"ĠобÑīеÑģÑĤв":142218,"ĠобÑīеÑģÑĤвенно":142219,"ĠêµIJìĪĺ":142220,"tıģımız":142221,"Ġ×Ķ×ŀש×Ļ×ij":142222,"körper":142223,"Ġпозвол":142224,"ĠпозволиÑĤ":142225,"ĠChiến":142226,"أخÙĪ":142227,"ĠAydın":142228,"à¸Ķà¹īาà¸Ļล":142229,"à¸Ķà¹īาà¸Ļลà¹Īาà¸ĩ":142230,"Ġdru":142231,"Ġdruż":142232,"Ġdrużyn":142233,"Ġë°ľíijľ":142234,"ĠThảo":142235,"جÙĩاد":142236,"à¸ģระà¸Ĺูà¹ī":142237,"ĠкÑĢов":142238,"ĠкÑĢови":142239,"Ġiçerik":142240,"Ġnadzie":142241,"ĠnadziejÄĻ":142242,"ĠСмоÑĤÑĢ":142243,"Ġphức":142244,"جتÙħاع":142245,"جتÙħاعÙĬØ©":142246,"компон":142247,"компоненÑĤ":142248,"Ġбил":142249,"ĠбилеÑĤ":142250,"ãĥIJãĥ³ãĥī":142251,"ĠPolÃŃcia":142252,"اÙĦتÙĩ":142253,"اÙĦتÙĩاب":142254,"ØŃرÙģ":142255,"تخط":142256,"تخطÙĬØ·":142257,"ãĤ³ãĥ¼ãĥ":142258,"ãĤ³ãĥ¼ãĥĴ":142259,"ãĤ³ãĥ¼ãĥĴãĥ¼":142260,"・・・":142261,"à¸ĭà¸Ńย":142262,"Ġcrédit":142263,"è²·ãģ£ãģŁ":142264,"ĠпоÑĢÑıд":142265,"ĠпоÑĢÑıдке":142266,"Ġphó":142267,"Ġwida":142268,"ĠwidaÄĩ":142269,"جرائÙħ":142270,"à¸ľà¸µ":142271,"ĠbÄĻdÄĻ":142272,"Ġ×ŀפת×Ĺ":142273,"ãĥijãĥ¼ãĥ":142274,"ãĥijãĥ¼ãĥĨ":142275,"ãĥijãĥ¼ãĥĨãĤ£":142276,"ãĥijãĥ¼ãĥĨãĤ£ãĥ¼":142277,"ĠKaż":142278,"ĠKażdy":142279,"ĠнеобÑħодимоÑģÑĤи":142280,"à¸Łà¸Ńรà¹Į":142281,"à¸Łà¸Ńรà¹Įม":142282,"ĠмалÑĭÑĪ":142283,"ĠплоÑĤ":142284,"ĠÑĥÑģÑĤÑĢой":142285,"ĠÑĥÑģÑĤÑĢойÑģÑĤва":142286,"à¸ĸà¸Ńà¸Ļ":142287,"ĠoluÅŁturul":142288,"ĠÅĽwiad":142289,"ĠÅĽwiadom":142290,"ÙħعÙĩد":142291,"ĠпÑĢоизведен":142292,"Æł":142293,"ר×Ļש":142294,"Ùħستث":142295,"ÙħستثÙħر":142296,"׳×Ļ×Ļר":142297,"pañ":142298,"Ġ;-)":142299,"Ġë°ľê²¬":142300,"Ġgörüyor":142301,"ÙħؤÙĦÙģ":142302,"ĠÄIJá»ģ":142303,"ĠاÙĦÙĨÙĪØ§Ø¨":142304,"×Ĺ×§×Ļר×Ķ":142305,"Ġmá»ıi":142306,"è¿°ãģ¹":142307,"ÐĿик":142308,"ìŀĸìķĦ":142309,"ìŀĸìķĦìļĶ":142310,"prowadziÅĤ":142311,"lóg":142312,"lógica":142313,"פס×ĺ":142314,"פס×ĺ×Ļ×ij׾":142315,"Ġ×ŀ×ĵ×Ķ":142316,"Ġ×ŀ×ĵ×Ķ×Ļ×Ŀ":142317,"ãģĵãģĵãģ¾ãģ§":142318,"×Ķת×Ĺ":142319,"×Ķת×Ĺ׾×Ķ":142320,"Ġפ×ķס":142321,"Ġפ×ķס×ĺ×Ļ×Ŀ":142322,"Ġнев":142323,"Ġневоз":142324,"Ġневозможно":142325,"ĠdostÄĻpny":142326,"ĠغاÙĦ":142327,"ĠغاÙĦب":142328,"ĠbezpieczeÅĦst":142329,"ĠbezpieczeÅĦstwa":142330,"åĪĨãģĭãĤĭ":142331,"ĠFührung":142332,"à¸ģีà¹ī":142333,"gemÃ¤ÃŁ":142334,"à¸Ĭà¹Īวà¸ĩà¹Ģวลา":142335,"Ġìļ°ë¦¬ëĤĺ":142336,"Ġìļ°ë¦¬ëĤĺëĿ¼":142337,"ãģ¥ãģıãĤĬ":142338,"ĠاÙĦÙħسÙĦ":142339,"ĠاÙĦÙħسÙĦØŃØ©":142340,"Ġliberté":142341,"клÑİÑĩение":142342,"Ġzamów":142343,"Ġzamówienia":142344,"รà¸ĸà¹Ħà¸Ł":142345,"Ø£ÙģÙĦ":142346,"Ø£ÙģÙĦاÙħ":142347,"Ùħراج":142348,"Ùħراجعة":142349,"Ġë¹ĦêµIJ":142350,"ĠاÙĦتاب":142351,"ĠاÙĦتابعة":142352,"Ġë§ĮëĤĺ":142353,"ĠбÑĥм":142354,"ĠбÑĥмаг":142355,"Ġgénero":142356,"Ġìŀĺ못":142357,"×ŀפ×ķר×ĺ":142358,"è²·ãģĦçī©":142359,"ĠÙĦدÙĬÙĥ":142360,"Ġ×ľ×¢×Ļת":142361,"Ġ×ľ×¢×Ļת×Ļ×Ŀ":142362,"ĠsÅĤab":142363,"ĠпÑĢедÑģÑĤавлÑı":142364,"ãĤ¿ãĤ¤ãĥĪ":142365,"ãĤ¿ãĤ¤ãĥĪãĥ«":142366,"Ùħص":142367,"ÙħصطÙģ":142368,"ÙħصطÙģÙī":142369,"Ġdifficulté":142370,"ãĥĨãĤ£ãĥĸ":142371,"ĠpewnoÅĽci":142372,"ĠpewnoÅĽciÄħ":142373,"Ġ무ìĬ¨":142374,"إرس":142375,"إرساÙĦ":142376,"ĠдалÑĮ":142377,"ĠдалÑĮÑĪе":142378,"Ġ×ľ×ł×¡":142379,"Ġ×ľ×ł×¡×ķת":142380,"หมูà¹Īà¸ļà¹īาà¸Ļ":142381,"×ŀס×ŀ׼×Ļ":142382,"أسÙĦÙĪØ¨":142383,"ĠzwÅĤ":142384,"ĠzwÅĤas":142385,"ĠzwÅĤaszc":142386,"ĠzwÅĤaszcza":142387,"ĠпÑĢеж":142388,"ĠпÑĢежде":142389,"ĠоÑĢганизаÑĨиÑı":142390,"Ġdönemin":142391,"Ġdöneminde":142392,"ĠỦ":142393,"ĠỦy":142394,"ä¸ĭãģĴ":142395,"ĠпоÑģледние":142396,"Ġgüne":142397,"ĠgüneÅŁ":142398,"Ġ×IJ×ĸר":142399,"Ġ×IJ×ĸר×Ĺ×Ļ":142400,"ãģ§ãģĤãĤįãģĨ":142401,"ĠÙĨÙĤ":142402,"ĠÙĨÙĤاط":142403,"æŃ£ãģĹãģĦ":142404,"ĠÑĢег":142405,"ĠÑĢегиона":142406,"ĠFörder":142407,"ê²½ìĺģ":142408,"dıklar":142409,"dıklarını":142410,"trzymaÄĩ":142411,"أشÙĥ":142412,"أشÙĥاÙĦ":142413,"×Ķת×IJ":142414,"×Ķת×IJ×ŀ×Ķ":142415,"à¸Ĺำà¹ĥหà¹īà¹Ģà¸ģิà¸Ķ":142416,"ĠGebä":142417,"ĠGebäude":142418,"ĠСеÑĢг":142419,"ĠСеÑĢгей":142420,"ĠздоÑĢов":142421,"ĠздоÑĢовÑĮÑı":142422,"Ġrãi":142423,"ĠпÑĢедÑĥÑģ":142424,"ĠпÑĢедÑĥÑģмоÑĤÑĢ":142425,"ĠпÑĢедÑĥÑģмоÑĤÑĢен":142426,"Ġ×Ķצ×Ļ×ij":142427,"Ġ×Ķצ×Ļ×ij×ķר×Ļ":142428,"Ġdésir":142429,"ĠноÑĩ":142430,"ĠноÑĩÑĮ":142431,"möglichkeiten":142432,"Ġ×IJ×Ĺר×ķ׳×Ļ×Ŀ":142433,"Ġsoirée":142434,"ĠNháºŃn":142435,"Ùª":142436,"à¸Ľà¸£à¸°à¸§à¸±à¸ķิศาสà¸ķรà¹Į":142437,"êµIJíĨµ":142438,"ĠأخÙĬ":142439,"Ġdécid":142440,"Ġdécidé":142441,"Ġwyja":142442,"ĠwyjaÅĽni":142443,"Ġสิ":142444,"Ġสิà¸ĩ":142445,"Ġสิà¸ĩหา":142446,"Ġสิà¸ĩหาà¸Ħม":142447,"à¹ģà¸Ńรà¹Į":142448,"หà¸Ļà¹īาà¸Īà¸Ń":142449,"סתר":142450,"Ġê¶":142451,"Ġê¶Į":142452,"Ġê¶Į리":142453,"plätze":142454,"بطÙĦ":142455,"ê±´ìĦ¤":142456,"Ġ×IJ×Ļ×ŀ×Ļ":142457,"Ġ×IJ×Ļ×ŀ×Ļ×Ļ׾":142458,"ãģ½":142459,"تراث":142460,"×IJ׾×Ļ×ŀ×ķת":142461,"ĠdisponÃŃveis":142462,"Ġzale":142463,"Ġzależy":142464,"à¸Ľà¸£à¸°à¸Ĭาสัมà¸ŀัà¸Ļà¸ĺà¹Į":142465,"ĠÅļwiat":142466,"Ġporówn":142467,"Ġporówna":142468,"Ġ׾×ĺ×ķ×ijת":142469,"×Ķ×ĸ×ŀ׳×Ķ":142470,"Ġ×Ľ×ª×ķצ×IJ×Ķ":142471,"Ġ×ijק׾":142472,"Ġ×ijק׾×ķת":142473,"ĠоÑĤкÑĢ":142474,"ĠоÑĤкÑĢÑĭва":142475,"ãĥijãĥ¯ãĥ¼":142476,"ë¿IJë§Į":142477,"ĠвÑģÑı":142478,"ĠвÑģÑıк":142479,"ãģ¨ãģªãģ£ãģ¦ãģĦãĤĭ":142480,"ĠgiáºŃn":142481,"ĠокÑĢÑĥ":142482,"ĠокÑĢÑĥжа":142483,"ĠокÑĢÑĥжаÑİÑī":142484,"ĠUniversität":142485,"ĠÑĢож":142486,"ĠÑĢожд":142487,"ĠÑĢождениÑı":142488,"Ø®ÙĬÙĦ":142489,"Ġкомпаний":142490,"ĠÑĢазлиÑĩнÑĭе":142491,"ĠЦена":142492,"׳×Ļ×ķ×ĸ":142493,"׳×Ļ×ķ×ĸ׾":142494,"׳×Ļ×ķ×ĸ׾×ĺר":142495,"Ġê³µê°Ħ":142496,"Ġê°ľëħIJ":142497,"landırma":142498,"ĠÑĥдален":142499,"à¸ŀัà¸ģà¸ľ":142500,"à¸ŀัà¸ģà¸ľà¹Īà¸Ńà¸Ļ":142501,"Ġprotección":142502,"ĠbÅĤ":142503,"ĠbÅĤÄĻd":142504,"ÃĪ":142505,"Ġíĸīë³µ":142506,"ĠÅŁÃ¼":142507,"ĠÅŁÃ¼phe":142508,"ĠíĶ":142509,"Ġíͼ":142510,"Ġíͼíķ´":142511,"Ġëĭ¤ë¥´":142512,"à¹Ħมà¹Īà¹Ģà¸ģิà¸Ļ":142513,"ãģ¿ãģª":142514,"ãģ¿ãģªãģķãĤĵ":142515,"ĠпоÑĤÑĢеб":142516,"ĠпоÑĤÑĢебиÑĤел":142517,"ĠاÙĦÙĥÙĦاÙħ":142518,"ìķĦë²Ħ":142519,"ìķĦë²Ħì§Ģ":142520,"ãĤĴ使ãģ£ãģŁ":142521,"Ġbụi":142522,"ĠпоÑĤеÑĢ":142523,"ĠпоÑĤеÑĢÑı":142524,"ĠØ¢ÙĦاÙģ":142525,"ĠнаÑģÑĤоÑıÑīее":142526,"ãģıãģªãĤĬãģ¾ãģĹãģŁ":142527,"clusão":142528,"ãĤ³ãĥĶãĥ¼":142529,"צפ×Ļ":142530,"צפ×Ļ×Ļ×Ķ":142531,"Ø®ÙĦا":142532,"Ø®ÙĦاص":142533,"ลà¹īำ":142534,"ãĥ¯ãĤ¤ãĥ³":142535,"Ġมีà¸Ļา":142536,"Ġมีà¸Ļาà¸Ħม":142537,"شخص":142538,"شخصÙĬات":142539,"Ġ×ĸ×§":142540,"Ġ×ĸ×§×ķ×§":142541,"×Ļ×Ļצ":142542,"×Ļ×Ļצ×Ĵ":142543,"èĢĥãģĪæĸ¹":142544,"Ġürünü":142545,"ĠиÑģпол":142546,"ĠиÑģполни":142547,"Ġcompañero":142548,"קצ×Ķ":142549,"×ŀ×¢×ł×Ļ×§":142550,"ÙħØŃÙħد":142551,"Ġcámara":142552,"Ġпед":142553,"Ġпедаг":142554,"Ġпедагог":142555,"маÑĢ":142556,"маÑĢк":142557,"×Ķ×ª×ł×Ĵ×ĵ":142558,"ĠìĨĮê°ľ":142559,"ĠcomunitÃł":142560,"곤":142561,"ĠNgÃłi":142562,"สà¸ĩà¸ļ":142563,"ĠmieszkaÅĦców":142564,"ĠÙĨÙĩائÙĬ":142565,"ivité":142566,"Ġиде":142567,"ĠидеалÑĮн":142568,"ĠأسبÙĪØ¹":142569,"Ġ×Ļ×¢×ľ":142570,"Ġ׾ר×IJש":142571,"Ġ׾ר×IJש×ķ׳×Ķ":142572,"ĠзапиÑģи":142573,"ĠкоÑĢпÑĥÑģ":142574,"วà¸ĩศ":142575,"วà¸ĩศà¹Į":142576,"ĠÐĶм":142577,"ĠÐĶмиÑĤ":142578,"ĠÐĶмиÑĤÑĢ":142579,"Ġkönnt":142580,"Ġbölges":142581,"Ġbölgesinde":142582,"׼×Ļ׼":142583,"׼×Ļ׼ר":142584,"ĠاÙĦإثÙĨ":142585,"ĠاÙĦإثÙĨÙĬÙĨ":142586,"Ġngá»Ļ":142587,"ì¹ł":142588,"دراج":142589,"Ġuda":142590,"ĠudaÅĤo":142591,"ìºIJ":142592,"برÙĨاÙħج":142593,"ĠÑģÑĥдеб":142594,"ĠÑģÑĥдебн":142595,"Ġzunächst":142596,"ĠEducación":142597,"ãģ¨ãģªãģ£ãģ¦ãģĦãģ¾ãģĻ":142598,"Ġ×Ķ×IJ×ŀ×Ļת×Ļ":142599,"Ġİnt":142600,"Ġİnternet":142601,"ĠcaÅĤego":142602,"ãĥĹãĥªãĥ³":142603,"إبد":142604,"إبداع":142605,"ĠпоÑĢÑĤал":142606,"à¹Ĥà¸ķà¹ī":142607,"Ġ×Ķקש×ķר":142608,"плод":142609,"ĠÙħد":142610,"ĠÙħدرÙĬد":142611,"×ŀסע×ĵ×Ķ":142612,"ĠØ´ÙĬئ":142613,"ĠØ´ÙĬئا":142614,"à¸ģà¹Īà¸Ńสรà¹īาà¸ĩ":142615,"Ġì°¸ê³ł":142616,"à¹Ģà¸Ĺร":142617,"à¹Ģà¸Ĺรà¸Ķ":142618,"Ġ×ij×ŀקר×Ļ×Ŀ":142619,"Ġbât":142620,"Ġbâtiment":142621,"åij¼ãģ³":142622,"ç´łæķµ":142623,"ç´łæķµãģª":142624,"przedsiÄĻbiorst":142625,"przedsiÄĻbiorstw":142626,"Ġ×ł×ª×ķ׳×Ļ×Ŀ":142627,"×Ĺ׾×ķ×Ŀ":142628,"รวย":142629,"ÙħÙĪØ¶ÙĪØ¹":142630,"ĠÑģобÑĢан":142631,"ведÑĥÑī":142632,"ĠÑĤеаÑĤ":142633,"ĠÑĤеаÑĤÑĢ":142634,"meye":142635,"meyeceÄŁi":142636,"ĠpieniÄħ":142637,"ĠpieniÄħd":142638,"ĠpieniÄħdze":142639,"ÑĢезиденÑĤ":142640,"ØŃصر":142641,"ìĺ¥":142642,"à¹Ģยืà¸Ńà¸Ļ":142643,"ĠÑĥни":142644,"ĠÑĥнивеÑĢ":142645,"ĠÑĥнивеÑĢÑģ":142646,"ĠÑĥнивеÑĢÑģиÑĤеÑĤ":142647,"ĠاÙĦرØŃ":142648,"ĠاÙĦرØŃÙħÙĨ":142649,"ĠÑĤеÑħнолог":142650,"ĠÑĤеÑħнологии":142651,"ìĹIJëĦĪ":142652,"ìĹIJëĦĪì§Ģ":142653,"ĠíķŃ":142654,"ĠíķŃìĥģ":142655,"à¸ĺา":142656,"à¸ĺาà¸ķุ":142657,"ĠEspañol":142658,"×ĵ×Ĵש":142659,"Ġêµī":142660,"Ġêµīìŀ¥":142661,"Ġêµīìŀ¥íŀĪ":142662,"ĠÅĤat":142663,"ĠÅĤatwo":142664,"Ġká»ĭch":142665,"إز":142666,"إزاÙĦØ©":142667,"ĠдейÑģÑĤвие":142668,"ĠsaÄŁlayan":142669,"สุà¸Ķยà¸Ńà¸Ķ":142670,"ĠzostaÄĩ":142671,"ĠdisponÃŃvel":142672,"ïºį":142673,"verständ":142674,"verständlich":142675,"twor":142676,"tworzyÄĩ":142677,"عجز":142678,"à¹Ģà¸Ĥà¹īม":142679,"ยà¹Īà¸Ńม":142680,"Ġstratég":142681,"Ġstratégie":142682,"à¸ľà¸¥à¹Ħมà¹ī":142683,"Ġê°ģì¢ħ":142684,"ĠÙħÙĪØ§":142685,"ĠÙħÙĪØ§Ø¶":142686,"ĠÙħÙĪØ§Ø¶ÙĬع":142687,"اØŃتج":142688,"اØŃتجاج":142689,"ĠẤ":142690,"ĠẤn":142691,"×ŀ×ŀש׾×Ķ":142692,"ĠÅŁekil":142693,"×ŀ×Ĺ׾":142694,"×ŀ×Ĺ׾×ķת":142695,"Ġà¸ĺ":142696,"Ġà¸ĺัà¸Ļ":142697,"Ġà¸ĺัà¸Ļวา":142698,"Ġà¸ĺัà¸Ļวาà¸Ħม":142699,"Ġìĭ¤ìłľ":142700,"Ġìĭ¤ìłľë¡ľ":142701,"ì¤ijìķĻ":142702,"ëįĶëĿ¼":142703,"ĠÑĪиÑĢ":142704,"ĠÑĪиÑĢоко":142705,"Ġsolución":142706,"วาà¸ĩà¹ģà¸ľà¸Ļ":142707,"×IJ×ķ×ĺ×ķ×ŀ":142708,"×IJ×ķ×ĺ×ķ×ŀ×ĺ×Ļ":142709,"ĠÑĢеÑģÑĤ":142710,"ĠÑĢеÑģÑĤоÑĢ":142711,"ĠÑĢеÑģÑĤоÑĢан":142712,"ëį¸":142713,"ÑĤÑĢад":142714,"ÑĤÑĢади":142715,"ÑĤÑĢадиÑĨион":142716,"ÑĤÑĢадиÑĨионн":142717,"มะà¹Ģรà¹ĩ":142718,"มะà¹Ģรà¹ĩà¸ĩ":142719,"à¹Ĥส":142720,"Ġolmasını":142721,"×ŀ×ķסר":142722,"ĠоÑĤноÑĪении":142723,"Ġê°ĢëĬ¥ìĦ±":142724,"Ġyuk":142725,"Ġyukarı":142726,"ìĨĶ":142727,"ĠÑģÑĦ":142728,"ĠÑģÑĦеÑĢе":142729,"Ġ×§×ķפ":142730,"ãĤ±ãĥ¼ãĤ":142731,"ãĤ±ãĥ¼ãĤŃ":142732,"âĢķâĢķ":142733,"ĠاÙĦØ£ÙĦÙħ":142734,"ĠاÙĦØ£ÙĦÙħاÙĨÙĬ":142735,"ẢN":142736,"ת×ķ׼׳×Ļ×ķת":142737,"ĠÑģÑĥÑīеÑģÑĤвÑĥеÑĤ":142738,"æĪijãĢħ":142739,"ĠاÙĦصادر":142740,"ĠTrá»įng":142741,"Ġад":142742,"ĠадминиÑģÑĤ":142743,"ĠадминиÑģÑĤÑĢа":142744,"ĠадминиÑģÑĤÑĢаÑĨи":142745,"ĠдÑĢÑĥгими":142746,"ÑģпеÑĪ":142747,"عÙĦاÙħات":142748,"Ġаб":142749,"ĠабÑģол":142750,"ĠабÑģолÑİÑĤ":142751,"ĠабÑģолÑİÑĤно":142752,"ฤà¸Ķู":142753,"étr":142754,"étranger":142755,"нÑıÑĤи":142756,"нÑıÑĤие":142757,"×¢×ķ׳":142758,"×¢×ķ׳ש":142759,"ĠÙĤائ":142760,"ĠÙĤائÙĦا":142761,"ĠмаÑģ":142762,"ĠмаÑģло":142763,"ãĥīãĤ¤":142764,"ãĥīãĤ¤ãĥĦ":142765,"å¿ħè¦ģãģĮãģĤãĤĬãģ¾ãģĻ":142766,"×ŀ×ķ×ĸ×Ļ×IJ":142767,"×ŀ×ķ×ĸ×Ļ×IJ×ķף":142768,"ĠNgoại":142769,"Ġkênh":142770,"à¸ģารà¸Ńà¸Ńà¸ģà¹ģà¸ļà¸ļ":142771,"×ŀפק":142772,"×ŀפק×ĵ":142773,"ÙħÙĨاز":142774,"ÙħÙĨازÙĦ":142775,"ë·°":142776,"íŤ":142777,"ÙħÙĩارات":142778,"Ġpropriété":142779,"פ×Ĵ×Ļש×Ķ":142780,"ÑĩÑĢ":142781,"ÑĩÑĢеж":142782,"ÑĩÑĢежден":142783,"×Ķ×ķצ×IJ×Ķ":142784,"ØŃÙĥÙĬÙħ":142785,"ĠíĻĪ":142786,"ĠíĻĪíİĺìĿ´ì§Ģ":142787,"åݳ":142788,"åݳãģĹãģĦ":142789,"×¢×ŀ×ĵ×Ķ":142790,"ĠAuÃŁen":142791,"سÙĪØ¡":142792,"ë¹Ī":142793,"ĠÙĪØ®":142794,"ĠÙĪØ®Ø§ØµØ©":142795,"инÑĤеÑĢ":142796,"инÑĤеÑĢеÑģ":142797,"èĩ´ãģĹãģ¾ãģĻ":142798,"Ġhüküm":142799,"à¹Ħà¸Ĥมัà¸Ļ":142800,"Ġdavran":142801,"ĠdavranÄ±ÅŁ":142802,"à¹Ģà¸ķียà¸ĩ":142803,"вÑĢем":142804,"вÑĢеменно":142805,"à¹Ģà¸Ĺศà¸ģา":142806,"à¹Ģà¸Ĺศà¸ģาล":142807,"å¼ķãģ£":142808,"å¼ķãģ£è¶ĬãģĹ":142809,"×IJר×ķ×Ĺ":142810,"×IJר×ķ×Ĺת":142811,"à¹Ģวิ":142812,"à¹Ģวิรà¹Į":142813,"à¸Ńยà¹Īาà¸ĩรวà¸Ķà¹Ģรà¹ĩว":142814,"ĠìŬíĸī":142815,"ĠÑĢанÑĮ":142816,"ĠÑĢанÑĮÑĪе":142817,"Ġzobow":142818,"ĠzobowiÄħ":142819,"ĠzobowiÄħz":142820,"Ġ×ķ׼×ŀ×ķ×ijף":142821,"ĠاÙĦÙħÙĩ":142822,"ĠاÙĦÙħÙĩÙĨÙĬ":142823,"ãĤ¢ãĤ¸":142824,"ãĤ¢ãĤ¸ãĤ¢":142825,"ë°©ìĨ¡":142826,"à¸Ńà¸Ńà¸ģà¸ģำลัà¸ĩ":142827,"à¸Ńà¸Ńà¸ģà¸ģำลัà¸ĩà¸ģาย":142828,"améli":142829,"améliorer":142830,"å½ĵãģŁãĤĬåīį":142831,"Ġregelm":142832,"ĠregelmÃ¤ÃŁig":142833,"ãģĬåĭ":142834,"ãģĬåĭ§":142835,"ãģĬåĭ§ãĤģ":142836,"Ġmưá»Ŀi":142837,"برÙħج":142838,"ĠNatürlich":142839,"ĠDÅ©ng":142840,"ĠاÙĦرجاÙĦ":142841,"Ġthép":142842,"ĠolmuÅŁtur":142843,"×ŀ×ķס×Ļ×§×Ķ":142844,"fälle":142845,"주íĥĿ":142846,"ĠاÙĦÙģØ±Øµ":142847,"ĠnajwiÄĻks":142848,"ĠnajwiÄĻkszy":142849,"ĠçaÄŁ":142850,"ĠçaÄŁrı":142851,"ì¸ł":142852,"ĠvÃŃct":142853,"ĠvÃŃctima":142854,"ĠÑģовеÑĢÑĪен":142855,"×Ķ×Ļ×Ļת×Ļ":142856,"à¹Ģà¸Ķี":142857,"à¹Ģà¸Ķีà¹ĭ":142858,"à¹Ģà¸Ķีà¹ĭยว":142859,"üyü":142860,"Ġдоп":142861,"Ġдополн":142862,"ĠдополниÑĤелÑĮно":142863,"à¹ģà¸ķà¸ģà¸ķà¹Īาà¸ĩà¸ģัà¸Ļ":142864,"Ġál":142865,"Ġálbum":142866,"à¸Ľà¸£à¸°à¸Īà¸³à¸Ľà¸µ":142867,"ĠÑĦедеÑĢ":142868,"ĠÑĦедеÑĢалÑĮн":142869,"ĠobsÅĤ":142870,"ĠobsÅĤugi":142871,"à¹Ģรืà¹Ī":142872,"à¹Ģรืà¹Īà¸Ńย":142873,"à¹Ģรืà¹Īà¸Ńยà¹Ĩ":142874,"ëģĮ":142875,"Ġnghìn":142876,"ĠBaÅŁkanlıģı":142877,"تأسÙĬ":142878,"تأسÙĬس":142879,"Ġ×ij×ij×ķקר":142880,"Ġ×¢×ij×ķ×ĵ×ķת":142881,"ĠبصÙĪØ±Ø©":142882,"ãĤıãģijãģ§ãģ¯ãģªãģĦ":142883,"führer":142884,"ãĤ¹ãĤŃ":142885,"ãĤ¹ãĤŃãĥ«":142886,"ĠاÙĦÙĤض":142887,"ĠاÙĦÙĤضÙĬØ©":142888,"ĠдолжноÑģÑĤ":142889,"ÙģØ§Ø±ÙĤ":142890,"Ġcomeçou":142891,"Ġorganisé":142892,"Ġxuân":142893,"ĠÑģообÑīаеÑĤ":142894,"ĠпÑĢид":142895,"ĠпÑĢидеÑĤÑģÑı":142896,"TÃľRK":142897,"ãĥ¬ãĥ¼ãĤ·ãĥ§ãĥ³":142898,"Không":142899,"استÙģ":142900,"استÙģØ§Ø¯Ø©":142901,"ä¸ĬãģĮãģ£ãģ¦":142902,"Ġumie":142903,"ĠumiejÄĻ":142904,"ĠumiejÄĻtn":142905,"ĠumiejÄĻtnoÅĽci":142906,"ëĤ¸":142907,"à¹Ģà¸Ļà¸Ńรà¹Į":142908,"×ĵ×ķ×ķ×Ĺ":142909,"ÃŃsimo":142910,"IÃĬ":142911,"IÃĬN":142912,"Ġalcanç":142913,"Ġà¸ķุ":142914,"Ġà¸ķุลา":142915,"Ġà¸ķุลาà¸Ħม":142916,"ש׾×ĺ×ķף":142917,"Ġélè":142918,"Ġélèves":142919,"ĠÄiju":142920,"ĠÄijuá»ķi":142921,"ĠØ£Ùģ":142922,"ĠØ£Ù쨱ÙĬ":142923,"ĠØ£Ù쨱ÙĬÙĤÙĬ":142924,"ĠØ£Ù쨱ÙĬÙĤÙĬا":142925,"ãĤĴæİ¢ãģĻ":142926,"ĠпÑĢедложениÑı":142927,"جاد":142928,"ĠÑħоÑĤÑĮ":142929,"Ñģал":142930,"Ñģалон":142931,"à¸Ľà¸£à¸°à¹Ģม":142932,"à¸Ľà¸£à¸°à¹Ģมิà¸Ļ":142933,"ãĤŃãĥĥãĥģ":142934,"ãĤŃãĥĥãĥģãĥ³":142935,"×ij×ĵ×Ļ×§×ķת":142936,"Ġchù":142937,"Ġchùa":142938,"ÐĴиде":142939,"ÐĴидео":142940,"иÑĢовка":142941,"ĠÑħоÑĤиÑĤе":142942,"Ġspécifique":142943,"รสà¸Ĭาà¸ķิ":142944,"è¾¼ãĤĵãģł":142945,"伸ãģ³":142946,"×Ķצ׾×Ĺת":142947,"ãģ©ãģ®ãĤĪãģĨãģ«":142948,"سعادة":142949,"Ġлид":142950,"ĠлидеÑĢ":142951,"มà¸ĩ":142952,"มà¸ĩà¸Ħล":142953,"ØŃاÙħÙĦ":142954,"หลุà¸Ķ":142955,"à¸Ńยà¹Īาà¸ĩà¸ķà¹Īà¸Ń":142956,"à¸Ńยà¹Īาà¸ĩà¸ķà¹Īà¸Ńà¹Ģà¸Ļืà¹Īà¸Ńà¸ĩ":142957,"ãģķãģĽãģ¦éłĤ":142958,"تسÙĪÙĬ":142959,"تسÙĪÙĬÙĤ":142960,"ĠaÅŁaģıd":142961,"ĠaÅŁaģıdaki":142962,"ĠÑĨелÑĮ":142963,"ĠÑĨелÑĮÑİ":142964,"ĠAraÅŁtırma":142965,"à¸Ĥัà¸ļรà¸ĸ":142966,"ÙĩذÙĩ":142967,"ลà¸ĩà¸Ĺะ":142968,"ลà¸ĩà¸Ĺะà¹Ģà¸ļ":142969,"ลà¸ĩà¸Ĺะà¹Ģà¸ļียà¸Ļ":142970,"تÙĥاÙħÙĦ":142971,"Ġcio":142972,"Ġcioè":142973,"ãģ¦ãģĬãģı":142974,"ĠاÙĦصØŃÙģÙĬ":142975,"ĠíĬ¹ìłķ":142976,"полниÑĤÑĮ":142977,"ãĤĵãģĺãĤĥãģªãģĦ":142978,"ãĤĵãģĺãĤĥãģªãģĦãģĭ":142979,"ĠاÙĦجÙĩ":142980,"ĠاÙĦجÙĩات":142981,"ĠÑĥÑģпеÑĪно":142982,"Ġвок":142983,"ĠвокÑĢÑĥг":142984,"ĠÑģиÑĤÑĥаÑĨиÑı":142985,"Ġ×Ķ×IJ×ŀר":142986,"Ġ×Ķ×IJ×ŀר×Ļ×§":142987,"Ġ×Ķ×IJ×ŀר×Ļ×§×IJ×Ļ":142988,"×ŀ×Ĵ×ĸ":142989,"×ŀ×Ĵ×ĸ×Ļף":142990,"ĠакÑĤÑĥ":142991,"ĠакÑĤÑĥалÑĮн":142992,"éta":142993,"étais":142994,"ĠmogÅĤa":142995,"ĠÑĤоÑĩки":142996,"Ġ×ŀ×Ķ×ŀ×¢":142997,"Ġ×ŀ×Ķ×ŀ×¢×¨×Ľ×ª":142998,"à¸¡à¸µà¸Ľà¸£à¸°à¸ªà¸´à¸Ĺà¸ĺà¸´à¸łà¸²à¸ŀ":142999,"×Ļר×Ļ×ĵ×Ķ":143000,"×Ĵר×ŀ׳":143001,"×Ĵר×ŀ׳×Ļ×Ķ":143002,"Ġглав":143003,"Ġглавное":143004,"Ġ미ëŀĺ":143005,"Ġ׳׼×ķ׳×Ķ":143006,"ĠÙĪØ·ÙĨÙĬ":143007,"opport":143008,"opportunitÃł":143009,"Ġhá»§y":143010,"ĠÙĦتØŃ":143011,"ĠÙĦتØŃÙĤÙĬÙĤ":143012,"Ġórg":143013,"Ġórgão":143014,"ãĤ¹ãĥĶ":143015,"ãĤ¹ãĥĶãĥ¼ãĥī":143016,"Ġönü":143017,"Ġönüne":143018,"ÙħعاÙħÙĦ":143019,"ש×ŀ×Ļר×Ķ":143020,"ĠвеÑģÑĮма":143021,"ĠwiÄĻkszo":143022,"ĠwiÄĻkszoÅĽÄĩ":143023,"ĠاستراتÙĬج":143024,"ĠاستراتÙĬجÙĬØ©":143025,"ĠÙ쨥":143026,"ĠÙģØ¥Ø°Ø§":143027,"à¹Ģà¸Ĭืà¹Īà¸Ńม":143028,"à¹Ģà¸Ĭืà¹Īà¸Ńมà¸ķà¹Īà¸Ń":143029,"Ġ׾פר":143030,"Ġ׾פר×ĺ×Ļ×Ŀ":143031,"ÙħضÙĬ":143032,"ĠGerçek":143033,"Ġçocukların":143034,"ÙĪØ«Ø§Ø¦ÙĤ":143035,"ĠÙħساءÙĭ":143036,"Ġunterstützt":143037,"Ġprést":143038,"Ġpréstamo":143039,"ĠÐłÐ°Ð·Ð¼ÐµÑĢ":143040,"ĠÅŁeker":143041,"Ġséculo":143042,"×ij×Ķ×Ļר":143043,"Ø´ÙĩÙĪØ±":143044,"Ġà¸Ńีà¸ģ":143045,"Ġà¸Ńีà¸ģà¸Ĺัà¹īà¸ĩ":143046,"Ġllegó":143047,"à¸¨à¸´à¸¥à¸Ľà¸°":143048,"æĪijãģĮ":143049,"æĪijãģĮå®¶":143050,"عÙĤÙĪ":143051,"عÙĤÙĪØ¨Ø§Øª":143052,"ĠFälle":143053,"ĠsÅĤuż":143054,"ĠsÅĤużb":143055,"ĠاÙĦØŃÙĤÙĪÙĤ":143056,"ĠплиÑĤ":143057,"ĠиноÑģÑĤ":143058,"ĠиноÑģÑĤÑĢан":143059,"ĠиноÑģÑĤÑĢанн":143060,"à¹ĥà¸Ļà¸Ĥà¸ĵะà¸Ĺีà¹Ī":143061,"ãĤ«ãĥĨ":143062,"ãĤ«ãĥĨãĤ´":143063,"ãĤ«ãĥĨãĤ´ãĥª":143064,"à¸Ńิส":143065,"à¸Ńิสระ":143066,"à¹Ģà¸ľà¸¢à¹ģ":143067,"à¹Ģà¸ľà¸¢à¹ģà¸ŀร":143068,"à¹Ģà¸ľà¸¢à¹ģà¸ŀรà¹Ī":143069,"ãģĬãģĦ":143070,"ãģĬãģĦãģĹãģĦ":143071,"استÙĤÙĦ":143072,"استÙĤÙĦاÙĦ":143073,"تØŃض":143074,"تØŃضÙĬر":143075,"åĬ©ãģij":143076,"ÙħراÙģÙĤ":143077,"Ġ×ĵ×ķר":143078,"Ġ×ĵ×ķרש":143079,"×ŀת×Ļ×Ļ×Ĺס":143080,"ס×Ļ׼":143081,"ס×Ļ׼×ķ×Ŀ":143082,"íĮĮíĬ¸":143083,"ĠwyÅĽ":143084,"ĠwyÅĽw":143085,"ĠwyÅĽwiet":143086,"ĠwyÅĽwietl":143087,"ĠاÙĦاÙĨساÙĨ":143088,"ĠStraÃŁen":143089,"L":143090,"ãģ«åŁº":143091,"ãģ«åŁºãģ¥":143092,"ĠcapÃŃtulo":143093,"ลุย":143094,"Ġ×Ķ×ŀקצ×ķ×¢×Ļ":143095,"ãģĤãĤĭç¨ĭ度":143096,"Ợ":143097,"ĠاÙĦÙĦا":143098,"ĠاÙĦÙĦازÙħØ©":143099,"æķĻãģĪ":143100,"Ġרש×IJ×Ļ":143101,"зав":143102,"завиÑģ":143103,"завиÑģим":143104,"à¸Ľà¸±à¸Īà¸Īัย":143105,"à¹Ģà¸ĭล":143106,"à¹Ģà¸ĭลลà¹Į":143107,"Ġdifférence":143108,"ĠAltın":143109,"ĠкÑĢай":143110,"ĠкÑĢайне":143111,"Ġзло":143112,"Ġgünümüz":143113,"ĠнаÑĤÑĥÑĢ":143114,"ĠнаÑĤÑĥÑĢалÑĮн":143115,"×Ĵ×ķ׾ש×Ļ×Ŀ":143116,"ĠкаÑĤегоÑĢ":143117,"ĠкаÑĤегоÑĢии":143118,"Ġзнак":143119,"à¸ģà¹Īà¸Ńà¸Ļหà¸Ļà¹īา":143120,"à¸ģà¹Īà¸Ńà¸Ļหà¸Ļà¹īาà¸Ļีà¹ī":143121,"ĠÙħÙĨت":143122,"ĠÙħÙĨتخب":143123,"ãĥĽãĥ¼ãĥ«":143124,"ĠевÑĢо":143125,"สว":143126,"สวม":143127,"ĠìľĦìĽIJ":143128,"ĠìľĦìĽIJëĭĺ":143129,"ĠاÙĦØŃÙĪØ«":143130,"ĠاÙĦØŃÙĪØ«ÙĬ":143131,"ĠÑģодеÑĢжиÑĤ":143132,"ãĥķãĤ¡ãĥĥãĤ·ãĥ§ãĥ³":143133,"Ġà¸ģัà¸Ļ":143134,"Ġà¸ģัà¸Ļย":143135,"Ġà¸ģัà¸Ļยายà¸Ļ":143136,"ãĤªãĥª":143137,"ãĤªãĥªãĤ¸":143138,"ãĤªãĥªãĤ¸ãĥĬãĥ«":143139,"ĠбÑĢенд":143140,"ãĤĴæĮģãģ£ãģ¦ãģĦãĤĭ":143141,"Ġinversión":143142,"Ġê°ĸ":143143,"Ġê°ĸê³ł":143144,"ĠnovitÃł":143145,"ê´Ģê´ij":143146,"Ġà¸ŀฤษ":143147,"Ġà¸ŀà¸¤à¸©à¸łà¸²":143148,"Ġà¸ŀà¸¤à¸©à¸łà¸²à¸Ħม":143149,"×ķר×Ĺ×Ļ×Ŀ":143150,"׼׾×ķ׾":143151,"Ġngạc":143152,"×Ļ×Ļש":143153,"×Ļ×Ļש×ķ×ij":143154,"fäll":143155,"fällig":143156,"ĠÑĤÑĢебÑĥеÑĤÑģÑı":143157,"Ġcará":143158,"Ġcarácter":143159,"ĠprincÃŃpio":143160,"ĠÅĤaz":143161,"ĠÅĤazien":143162,"ĠÅĤazienk":143163,"Ġgiãn":143164,"ÑģÑĤÑĢаива":143165,"Ùħساب":143166,"ÙħسابÙĤØ©":143167,"à¹Ģà¸Ħรืà¹Īà¸Ńà¸ĩà¸Ķืà¹Īม":143168,"ترÙĥÙĬب":143169,"volução":143170,"ĠÐŁÐ¾Ñĩ":143171,"ĠÐŁÐ¾Ñĩем":143172,"ĠÐŁÐ¾ÑĩемÑĥ":143173,"казалоÑģÑĮ":143174,"ĠпÑĢименениÑı":143175,"à¹Ģà¸Ĺียม":143176,"íĮĶ":143177,"à¸Ĥà¹īà¸Ńà¹Ģสà¸Ļà¸Ń":143178,"à¸Ľà¸±à¸įà¸įา":143179,"ĠобÑĥÑĩ":143180,"ĠобÑĥÑĩениÑı":143181,"ĠÑģеÑĢи":143182,"ĠÑģеÑĢиал":143183,"Ġinglés":143184,"ĠÙĦÙĥرة":143185,"Ġ×ĺ׾":143186,"Ġ×ĺ׾פ×ķף":143187,"Ġìłij":143188,"Ġìłijê·¼":143189,"×IJ×ķ×Ĵ":143190,"×IJ×ķ×Ĵ×ķס":143191,"×IJ×ķ×Ĵ×ķס×ĺ":143192,"ĠболÑĮÑĪое":143193,"ĠÐļонеÑĩно":143194,"×¢×Ļת×ķ׳":143195,"×¢×Ļת×ķ׳×IJ×Ļ":143196,"Ġкнопк":143197,"Ġзн":143198,"ĠзнаÑĤÑĮ":143199,"ĠÄijá»±":143200,"ĠÄijá»±ng":143201,"влаж":143202,"влажн":143203,"×ŀ×Ļ×ĺ×ij":143204,"ãĤ¬ãĤ¤":143205,"ãĤ¬ãĤ¤ãĥī":143206,"..........":143207,"Ġà¸ģุม":143208,"Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀ":143209,"Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀัà¸Ļ":143210,"Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀัà¸Ļà¸ĺ":143211,"Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀัà¸Ļà¸ĺà¹Į":143212,"bez":143213,"bezpieczeÅĦst":143214,"bezpieczeÅĦstw":143215,"ãĥijãĥijæ´»":143216,"عاط":143217,"عاطÙģ":143218,"ĠÄijáºŃm":143219,"ĠзÑĢ":143220,"ĠзÑĢениÑı":143221,"Ġborç":143222,"Ġнедел":143223,"ĠнеделÑİ":143224,"Ġhá»ı":143225,"Ġhá»ıng":143226,"ìŀ¥ìķł":143227,"ìŀ¥ìķłìĿ¸":143228,"ĠاÙĦعÙĦاÙĤØ©":143229,"Ġíģ¬":143230,"Ġíģ¬ê²Į":143231,"à¹Ħรà¹Ī":143232,"à¸ļาà¸Ķ":143233,"à¸ļาà¸Ķà¹Ģà¸Īà¹ĩà¸ļ":143234,"à¸Ŀรั":143235,"à¸Ŀรัà¹Īà¸ĩ":143236,"à¸Ŀรัà¹Īà¸ĩà¹Ģศ":143237,"à¸Ŀรัà¹Īà¸ĩà¹Ģศส":143238,"רע×Ļ":143239,"רע×Ļ×ķ׳×ķת":143240,"ĠëĮ":143241,"ĠëĮĵ":143242,"ĠëĮĵê¸Ģ":143243,"Ġnajb":143244,"Ġnajbli":143245,"Ġnajbliż":143246,"Ġnajbliższ":143247,"ĠиÑģполÑĮзÑĥеÑĤÑģÑı":143248,"ĠcientÃŃf":143249,"ĠcientÃŃfico":143250,"×¢×ŀ×§":143251,"Ġgợi":143252,"Ø´ØŃÙĨ":143253,"ĠÅĽm":143254,"ĠÅĽmier":143255,"ĠÅĽmierci":143256,"à¸Ħาสิà¹Ĥà¸Ļà¸Ńà¸Ńà¸Ļà¹Ħลà¸Ļà¹Į":143257,"×Ĺש×ijת×Ļ":143258,"Ġningu":143259,"Ġninguém":143260,"è¾¼ãĤģ":143261,"ãģ·":143262,"ĠÑĥг":143263,"ĠÑĥгол":143264,"ï½°":143265,"פת×Ļ×Ĺ":143266,"פת×Ļ×Ĺת":143267,"Ġ×Ķר×IJש×ķ׳×Ļ×Ŀ":143268,"pósito":143269,"ãĤŃãĥ¬ãĤ¤":143270,"ãģ©ãģĵãĤį":143271,"à¹Ģà¸Ĺà¹Īาà¹Ħ":143272,"à¹Ģà¸Ĺà¹Īาà¹Ħหร":143273,"à¹Ģà¸Ĺà¹Īาà¹Ħหรà¹Ī":143274,"ĠинÑĤеÑĢÑĮеÑĢ":143275,"ĠØŃاج":143276,"ĠØŃاجة":143277,"สีà¸Ĥาว":143278,"ìĸ¼":143279,"Ġná»Ļ":143280,"Ġná»Ļp":143281,"ĠÃŃnd":143282,"ĠÃŃndice":143283,"สำรวà¸Ī":143284,"Ġкаждой":143285,"Ġhotéis":143286,"ĠnastÄĻ":143287,"ĠnastÄĻpn":143288,"Ġ×Ķ×§×ķ×ĵ":143289,"Ġ×Ķ×§×ķ×ĵ×Ŀ":143290,"פ×ķפ":143291,"פ×ķפ×ķ׾":143292,"פ×ķפ×ķ׾ר×Ļ":143293,"вÑĪей":143294,"ãĤ·ãĥ³ãĥĹ":143295,"ãĤ·ãĥ³ãĥĹãĥ«":143296,"ĠzdjÄĻÄĩ":143297,"ĠгÑĢÑĥппа":143298,"ĠпомеÑī":143299,"ĠпомеÑīениÑı":143300,"ãģ©ãģĨãģĦãģĨ":143301,"ĠиÑģпÑĭÑĤа":143302,"ĠogÅĤ":143303,"ĠogÅĤos":143304,"ĠogÅĤoszen":143305,"ĠogÅĤoszeni":143306,"สรà¹īาà¸ĩสรร":143307,"สรà¹īาà¸ĩสรรà¸Ħà¹Į":143308,"à¸ŀรรà¸ĵ":143309,"ĠçıkÄ±ÅŁ":143310,"ĠÑĩаÑģÑĤноÑģÑĤи":143311,"Ġ×ķ×Ļ×ķתר":143312,"ç¶ļãģįãĤĴ":143313,"ç¶ļãģįãĤĴèªŃ":143314,"ç¶ļãģįãĤĴèªŃãĤĢ":143315,"à¸ģรั":143316,"à¸ģรัม":143317,"гÑĢаÑĦ":143318,"Ġвлад":143319,"ĠвладелÑĮ":143320,"ĠвладелÑĮÑĨ":143321,"ĠistediÄŁ":143322,"ĠistediÄŁiniz":143323,"×ij×ľ×¢":143324,"×ij×ľ×¢×ĵ×Ļ":143325,"ÙħÙĪØ§Ùģ":143326,"ÙħÙĪØ§ÙģÙĤØ©":143327,"Ġ×Ļ×ķר":143328,"Ġ×Ļ×ķרק":143329,"ãĤ«ãĥ¼ãĥīãĥŃãĥ¼ãĥ³":143330,"ĠاÙĦÙħØ´ÙĥÙĦ":143331,"ĠاÙĦÙħØ´ÙĥÙĦØ©":143332,"ĠêµŃíļĮ":143333,"ספ×ĺ":143334,"ספ×ĺ×ŀ":143335,"ספ×ĺ×ŀ×ijר":143336,"Ġìĸ´ëłµ":143337,"ÙĥاÙħ":143338,"ÙĥاÙħÙĬرا":143339,"schlü":143340,"schlüsse":143341,"ĠØ«ÙĨ":143342,"ĠØ«ÙĨائÙĬ":143343,"ìī½":143344,"ĠÐŀÑģоб":143345,"ĠÐŀÑģобенно":143346,"ĠинвеÑģÑĤи":143347,"ĠинвеÑģÑĤиÑĨи":143348,"اØŃتÙħ":143349,"اØŃتÙħاÙĦ":143350,"EÄŀ":143351,"EÄŀİ":143352,"íķĺê²łëĭ¤":143353,"Ġ×IJ×ijר×Ķ":143354,"Ġ×IJ×ijר×Ķ×Ŀ":143355,"Ġ×ij×Ĺ×Ļ׳×Ŀ":143356,"Ø£ÙĪØ¶":143357,"Ø£ÙĪØ¶Ø§Ø¹":143358,"Ġdél":143359,"Ġdélai":143360,"Ġ×IJ×ķ×Ķ×ij×Ļ×Ŀ":143361,"ĠÑģоÑħ":143362,"ĠÑģоÑħÑĢ":143363,"ĠÑģоÑħÑĢани":143364,"ĠдоÑģÑĤиж":143365,"ĠдоÑģÑĤижени":143366,"สิà¹Īà¸ĩà¹ģ":143367,"สิà¹Īà¸ĩà¹ģวà¸Ķ":143368,"สิà¹Īà¸ĩà¹ģวà¸Ķล":143369,"สิà¹Īà¸ĩà¹ģวà¸Ķลà¹īà¸Ńม":143370,"ĠاÙĦÙħباشر":143371,"ĠÑĦиг":143372,"ĠÑĦигÑĥÑĢ":143373,"можем":143374,"׾×ŀ×Ļ×ĵ×Ķ":143375,"Ġciné":143376,"Ġcinéma":143377,"Ġbada":143378,"ĠbadaÅĦ":143379,"جبÙĩØ©":143380,"Ġдеп":143381,"ĠдепÑĥÑĤ":143382,"ĠдепÑĥÑĤаÑĤ":143383,"Ġdistância":143384,"ĠاÙĦÙħعار":143385,"ĠاÙĦÙħعارضة":143386,"thèse":143387,"ünc":143388,"üncü":143389,"Ġданного":143390,"ĠBelgi":143391,"ĠBelgië":143392,"Ġ×ij×ij×§":143393,"Ġ×ij×ijקש×Ķ":143394,"ยà¹Īาà¸Ļ":143395,"Ġsolução":143396,"Ġ×Ķצ×ĺר":143397,"Ġ×Ķצ×ĺרפ×ķ":143398,"ĠØ£ÙĨØŃ":143399,"ĠØ£ÙĨØŃاء":143400,"ĠدÙħØ´":143401,"ĠدÙħØ´ÙĤ":143402,"มัà¹ī":143403,"มัà¹īย":143404,"Ùħغرب":143405,"استعÙħاÙĦ":143406,"ĠSÅĤow":143407,"ĠëıĻìĭľ":143408,"ĠëıĻìĭľìĹIJ":143409,"ĠÑģоÑģ":143410,"ĠÑģоÑģед":143411,"ì²ŃìĨĮ":143412,"ì²ŃìĨĮëħĦ":143413,"ĠгÑĢаÑĦ":143414,"ĠгÑĢаÑĦик":143415,"ĠìŀijìĿĢ":143416,"Ġyeti":143417,"ĠyetiÅŁtir":143418,"ĠìĿ´ê²ĥìĿ´":143419,"หà¹Īาà¸ĩ":143420,"Ø¥ÙħÙĥاÙĨ":143421,"Ø¥ÙħÙĥاÙĨÙĬØ©":143422,"استعراض":143423,"Ùħخدر":143424,"ĠÑĩÑĥÑĤÑĮ":143425,"ÙħدÙĬر":143426,"ÙħدÙĬرÙĬØ©":143427,"Ġà¹Ģมษ":143428,"Ġà¹Ģมษายà¸Ļ":143429,"ĠмеÑħ":143430,"ĠмеÑħаниз":143431,"ĠмеÑħанизм":143432,"ĠÑģÑĥм":143433,"ĠÑģÑĥммÑĥ":143434,"Ġvö":143435,"Ġvöll":143436,"Ġvöllig":143437,"ĠдÑĢÑĥз":143438,"ĠдÑĢÑĥзÑĮÑı":143439,"ãĤĴåĪ©ç͍ãģĹãģ¦":143440,"à¸ļรรà¸Īุ":143441,"pożycz":143442,"×ŀש׼":143443,"×ŀ×©×Ľ×ł×ª":143444,"×ŀ×©×Ľ×ł×ª×IJ":143445,"Ġeuropéen":143446,"Ġproprié":143447,"Ġpropriétaire":143448,"Ġkhấu":143449,"ãģĦãģŁãģłãģijãĤĭ":143450,"Ġtecrü":143451,"Ġtecrübe":143452,"×Ķ×ij":143453,"×Ķ×ij׳×Ķ":143454,"ĠcuÌ":143455,"ĠcuÌī":143456,"ĠcuÌīa":143457,"×IJ×ķ×ķ":143458,"×IJ×ķ×ķ×Ļר×Ķ":143459,"Ġ׼×ķ׾×ķ":143460,"Ulus":143461,"Uluslararası":143462,"Ġ׳×ķת":143463,"Ġ׳×ķ×ª×Ł":143464,"ãģ«åIJij":143465,"ãģ«åIJijãģijãģ¦":143466,"ë¹Ľ":143467,"à¸Ĺัà¸ģษ":143468,"à¸Ĺัà¸ģษะ":143469,"سÙĤÙĪ":143470,"سÙĤÙĪØ·":143471,"Ġвн":143472,"ĠвнеÑĪ":143473,"ĠвнеÑĪне":143474,"Ġurz":143475,"ĠurzÄĻd":143476,"Ġámb":143477,"Ġámbito":143478,"à¸Ńà¸ĺิ":143479,"à¸Ńà¸ĺิà¸ļาย":143480,"ĠÅĤad":143481,"ĠÅĤadn":143482,"ê±´ì¶ķ":143483,"wództ":143484,"wództw":143485,"Ġquestões":143486,"Ġשק":143487,"Ġשק×Ļ×ij׾":143488,"ĠmiejscowoÅĽci":143489,"Ġвал":143490,"ĠвалÑİÑĤ":143491,"häuser":143492,"หà¸Ļà¸Ńà¸ĩ":143493,"ãģ¨åħ±":143494,"ãģ¨åħ±ãģ«":143495,"ãĥıãĥ¼ãĥī":143496,"Ġê°ľìµľ":143497,"ĠоÑģновном":143498,"ĠмÑıÑģ":143499,"اعت":143500,"اعتÙĤاÙĦ":143501,"สà¸ĸิ":143502,"สà¸ĸิà¸ķิ":143503,"Ngu":143504,"Nguá»ĵn":143505,"ĠÙħجÙĦ":143506,"ĠÙħجÙĦØ©":143507,"à¹ģà¸Ĥà¸Ļ":143508,"ĠاÙĦÙĦÙĬبÙĬ":143509,"פע×Ļ׾×ķ×Ļ×ķת":143510,"Ġ×Ķרפ×ķ×IJ×Ļ":143511,"פר×ķפ":143512,"פר×ķפ×Ļ׾":143513,"ק׾×IJ":143514,"ק׾×IJס×Ļ":143515,"ÙĥتشÙģ":143516,"ãģ«ãģªãģ£ãģ¦ãģĹãģ¾ãģĨ":143517,"à¹Ģà¸Ħลà¹ĩà¸Ķ":143518,"à¹Ģà¸Ħลà¹ĩà¸Ķลัà¸ļ":143519,"Ġì»´":143520,"Ġì»´íĵ¨":143521,"Ġì»´íĵ¨íĦ°":143522,"Ġ×Ĺ×Ļ×ķ×ij×Ļ":143523,"Ġnäm":143524,"Ġnämlich":143525,"åij¼ãģ°":143526,"åij¼ãģ°ãĤĮ":143527,"ĠÑĢол":143528,"ĠÑĢоли":143529,"Ġspécialisé":143530,"à¸Ļวัà¸ķ":143531,"à¸Ļวัà¸ķà¸ģรรม":143532,"ÙĨصÙĪØµ":143533,"пеÑĢед":143534,"пеÑĢедаÑĩ":143535,"thèque":143536,"Ġר×IJ×Ļת×Ļ":143537,"ãĥĢãĤ¦ãĥ³":143538,"ãĤıãģĭ":143539,"ãĤıãģĭãģ£ãģ¦":143540,"беÑĢеж":143541,"ĠÑģек":143542,"ĠÑģекÑĢ":143543,"ĠÑģекÑĢеÑĤ":143544,"ĠпоÑģÑĤоÑıнн":143545,"à¸Ĥà¸Ļสà¹Īà¸ĩ":143546,"Ġmük":143547,"Ġmükem":143548,"Ġmükemmel":143549,"еÑĤеÑģÑĮ":143550,"ĠاÙĦسÙĨÙĪØ§Øª":143551,"ĠìłĦíĺĢ":143552,"Ġ×Ķ×ŀ×§×ķר×Ļ":143553,"Ġmüd":143554,"Ġmüdah":143555,"Ġmüdahale":143556,"Ġwyb":143557,"Ġwybór":143558,"Ġtendência":143559,"إدار":143560,"إدارÙĬØ©":143561,"Ġunterstützen":143562,"ת×ijר":143563,"ת×ijרר":143564,"Ġdiá":143565,"Ġdiálogo":143566,"ĠÃĸnce":143567,"ĠÃĸnceki":143568,"ãĤ¹ãĥĿãĥĥãĥĪ":143569,"ëĦ£":143570,"ĠGeli":143571,"ĠGeliÅŁ":143572,"ãĤĴéĢļ":143573,"ãĤĴéĢļãģĹãģ¦":143574,"ĠFuÃŁball":143575,"Ġsalari":143576,"Ġsalarié":143577,"ĠпÑĢодÑĥкÑĤов":143578,"صÙģÙĤØ©":143579,"รวà¸ļ":143580,"รวà¸ļรวม":143581,"à¹ĥà¸Ļà¸IJาà¸Ļ":143582,"à¹ĥà¸Ļà¸IJาà¸Ļะ":143583,"Ġkayna":143584,"Ġkaynaģı":143585,"ĠìŀijíĴĪ":143586,"ĠвÑĭÑĢаж":143587,"ĠвÑĭÑĢажен":143588,"ĠÑģÑĤеп":143589,"ĠÑģÑĤепени":143590,"ĠاÙĦÙħÙĪØ¬ÙĪØ¯":143591,"ĠاÙĦÙħÙĪØ¬ÙĪØ¯Ø©":143592,"ลà¹īม":143593,"ĠnajczÄĻ":143594,"ĠnajczÄĻÅĽcie":143595,"ĠnajczÄĻÅĽciej":143596,"Ġzwy":143597,"Ġzwyk":143598,"ĠzwykÅĤ":143599,"Ġê·¸ëłĩì§Ģ":143600,"à¸ģระà¸Ī":143601,"à¸ģระà¸Īาย":143602,"Ġëĭµ":143603,"Ġëĭµë³Ģ":143604,"ĠÑĢеак":143605,"ĠÑĢеакÑĨи":143606,"ĠÅĽwież":143607,"ĠÑģÑĤоимоÑģÑĤи":143608,"ÙħÙĨاÙĤ":143609,"ÙħÙĨاÙĤØ´":143610,"ÙħÙĨاÙĤشة":143611,"ĠÑħоÑĩÑĥ":143612,"ãĥľãĥ¼ãĥī":143613,"Ġróżnic":143614,"ĠкÑĢÑĭ":143615,"ĠкÑĢÑĭÑĪ":143616,"âľĵ":143617,"ãĤ³ãĥ³ãĥĨãĥ³":143618,"ãĤ³ãĥ³ãĥĨãĥ³ãĥĦ":143619,"ĠпÑĢедпоÑĩ":143620,"×ŀר×ij×Ļת":143621,"ĠØ´Ùĥ":143622,"ĠØ´Ùĥرا":143623,"Ġдал":143624,"Ġдалек":143625,"Ġдалеко":143626,"برÙĬØ·":143627,"برÙĬطاÙĨÙĬا":143628,"عÙĨا":143629,"عÙĨاÙĬØ©":143630,"ĠÑĢаÑģÑģказ":143631,"ĠÑĢаÑģÑģказÑĭва":143632,"Ø£ÙĦÙĪ":143633,"Ø£ÙĦÙĪØ§ÙĨ":143634,"æĮģãģ£ãģ¦":143635,"æĮģãģ£ãģ¦ãģĦ":143636,"Ùħبادئ":143637,"×Ķ×¢×ijר":143638,"×Ķ×¢×ijרת":143639,"Ġyayı":143640,"Ġyayıml":143641,"Ġyayımla":143642,"mát":143643,"máticos":143644,"à¸ģัà¸ĩ":143645,"à¸ģัà¸ĩวล":143646,"Ġ×ľ×¤×ª":143647,"Ġ×ľ×¤×ª×ķ×Ĺ":143648,"à¸ŀฤà¸ķิ":143649,"à¸ŀฤà¸ķิà¸ģรรม":143650,"íĤ¬":143651,"ĠокÑĢÑĥг":143652,"Ġ×ŀצ×ķ×ķ×Ķ":143653,"ÐĽÐµÐ½Ð¸":143654,"ÐĽÐµÐ½Ð¸Ð½":143655,"ĠTriá»ģu":143656,"ãĤ³ãĥŁãĥ¥":143657,"ãĤ³ãĥŁãĥ¥ãĥĭ":143658,"ãĤ³ãĥŁãĥ¥ãĥĭãĤ±":143659,"ãĤ³ãĥŁãĥ¥ãĥĭãĤ±ãĥ¼ãĤ·ãĥ§ãĥ³":143660,"ÙĥÙĨÙĬ":143661,"ÙĥÙĨÙĬسة":143662,"ãĤĴä¸Ńå¿ĥ":143663,"ãĤĴä¸Ńå¿ĥãģ«":143664,"ĠmiÄĻdz":143665,"ĠmiÄĻdzyn":143666,"ĠmiÄĻdzynar":143667,"ĠmiÄĻdzynarod":143668,"ĠmiÄĻdzynarodow":143669,"ÙĦÙĨ":143670,"ÙĦÙĨدا":143671,"برش":143672,"برشÙĦÙĪÙĨ":143673,"برشÙĦÙĪÙĨØ©":143674,"à¸ģระà¸ķุ":143675,"à¸ģระà¸ķุà¹īà¸Ļ":143676,"Ġgı":143677,"Ġgıda":143678,"à¸Ľà¸£à¸°à¸Ĺัà¸ļ":143679,"à¸Ľà¸£à¸°à¸Ĺัà¸ļà¹ĥà¸Ī":143680,"Ġë¶Ī구":143681,"Ġë¶Ī구íķĺê³ł":143682,"ĠÙĨØ·":143683,"ĠÙĨطاÙĤ":143684,"ĠÐľÐ¾Ð¶ÐµÑĤ":143685,"Präs":143686,"Präsident":143687,"ĠÑģкоÑĢ":143688,"ĠÑģкоÑĢоÑģÑĤÑĮ":143689,"Ġ×Ķ×ij×ķקר":143690,"еÑħаÑĤÑĮ":143691,"Ġgạo":143692,"Ġש×IJ×Ļ׳×Ŀ":143693,"Ġ×ij׳×ķ×Ĵ":143694,"Ġ×ij׳×ķ×Ĵ×¢":143695,"ĠопиÑģание":143696,"Ġuczni":143697,"Ġuczniów":143698,"à¹Ģà¸Ńà¹ĩà¸Ļ":143699,"Ġتش":143700,"ĠتشرÙĬÙĨ":143701,"Ġnhãn":143702,"빨":143703,"Ġcaractère":143704,"×¢×ľ×Ļ":143705,"×¢×ľ×Ļ×Ļ×Ķ":143706,"楽ãģĹãĤģãĤĭ":143707,"ĠÑģаÑħ":143708,"ĠÑģаÑħаÑĢ":143709,"дÑĥмаÑĤÑĮ":143710,"ĠÐĴозможно":143711,"صÙĬاÙĨ":143712,"صÙĬاÙĨØ©":143713,"ömür":143714,"สล":143715,"สลà¹ĩ":143716,"สลà¹ĩà¸Ń":143717,"สลà¹ĩà¸Ńà¸ķ":143718,"롯":143719,"Ġthói":143720,"grÃ¶ÃŁe":143721,"ĠksiÄĻ":143722,"ĠksiÄĻg":143723,"ĠÑĢом":143724,"ĠÑĢоман":143725,"ÙĤاسÙħ":143726,"×ŀ×ij×ķ×Ĵ":143727,"×ŀ×ij×ķ×Ĵר×Ļ×Ŀ":143728,"besch":143729,"beschäft":143730,"beschäftig":143731,"×Ķצע×Ķ":143732,"ĠÃģrea":143733,"ĠзаÑıвк":143734,"Ĺ":143735,"ĠлÑİбого":143736,"Ġม":143737,"Ġมà¸ģร":143738,"Ġมà¸ģราà¸Ħม":143739,"ÑĦиз":143740,"ÑĦизиÑĩеÑģк":143741,"инÑĦ":143742,"инÑĦек":143743,"инÑĦекÑĨи":143744,"اÙĦØ·":143745,"اÙĦطائÙģ":143746,"Ġколл":143747,"ĠколлекÑĤив":143748,"езжа":143749,"ĠسبØŃ":143750,"ĠسبØŃاÙĨ":143751,"ĠسبØŃاÙĨÙĩ":143752,"schlä":143753,"schläge":143754,"Ġди":143755,"Ġдиаг":143756,"ĠдиагноÑģÑĤ":143757,"ĠоÑĤмеÑĤиÑĤÑĮ":143758,"ТЬ":143759,"ĠاÙĦدر":143760,"ĠاÙĦدراسÙĬ":143761,"עצ×ŀ":143762,"עצ×ŀ×IJ×ķת":143763,"Ġdémarch":143764,"Ġdémarche":143765,"Ġ×ĺ×ķ×¢":143766,"Ġ×ĺ×ķ×¢×Ł":143767,"Ġfuncionários":143768,"ỵ":143769,"׾׼×IJ":143770,"׾׼×IJ×ķר×Ķ":143771,"à¸ĭà¹Ī":143772,"à¸ĭà¹Īà¸Ńม":143773,"ĠÑĩÑĥв":143774,"ĠÑĩÑĥвÑģÑĤво":143775,"âĸ¼":143776,"пÑĥÑī":143777,"пÑĥÑīен":143778,"ĠмеÑĢ":143779,"ĠмеÑĢоп":143780,"ĠмеÑĢопÑĢи":143781,"ĠмеÑĢопÑĢиÑıÑĤиÑı":143782,"Ġuçu":143783,"ĠuçuÅŁ":143784,"ãĤĴåĪ©ç͍ãģĻãĤĭ":143785,"aÄŁ":143786,"aÄŁlı":143787,"ìĺĪìĪł":143788,"à¹ģยà¹Ī":143789,"ĠاÙĦÙĥÙħ":143790,"ĠاÙĦÙĥÙħبÙĬ":143791,"ĠاÙĦÙĥÙħبÙĬÙĪØªØ±":143792,"تÙĪÙĬ":143793,"تÙĪÙĬتر":143794,"à¹Ģà¸Ĭีà¹Īยว":143795,"à¹Ģà¸Ĭีà¹Īยวà¸Ĭา":143796,"à¹Ģà¸Ĭีà¹Īยวà¸Ĭาà¸į":143797,"á»Ķ":143798,"Ġhiếm":143799,"ذاÙĥرة":143800,"Ġ×Ķ×ŀ×Ļ×ķ×Ĺ×ĵ":143801,"ĠìĪľ":143802,"ĠìĪľê°Ħ":143803,"ĠKı":143804,"ĠKısa":143805,"ĠgeleceÄŁi":143806,"пÑĢоÑĦеÑģÑģиона":143807,"пÑĢоÑĦеÑģÑģионал":143808,"Ġogó":143809,"Ġogóle":143810,"ĠgÅĤów":143811,"ĠgÅĤówne":143812,"ĠÑģÑĤилÑĮ":143813,"×IJפ׾":143814,"×IJפ׾×Ļ×§":143815,"×IJפ׾×Ļקצ×Ļ×Ķ":143816,"สมารà¹Į":143817,"สมารà¹Įà¸Ĺ":143818,"สมารà¹Įà¸Ĺà¹Ĥà¸Ł":143819,"สมารà¹Įà¸Ĺà¹Ĥà¸Łà¸Ļ":143820,"Ġthánh":143821,"ÐŁÐ¾Ð´":143822,"ÐŁÐ¾Ð´ÑĢоб":143823,"ÐŁÐ¾Ð´ÑĢобнее":143824,"ĠاÙĦتÙĪÙĨ":143825,"ĠاÙĦتÙĪÙĨسÙĬ":143826,"Ġbahçe":143827,"à¹ģà¸ģà¹īà¸Ľà¸±à¸įหา":143828,"éducation":143829,"europ":143830,"europä":143831,"europäische":143832,"ĠKsi":143833,"ĠKsiÄĻ":143834,"ĠëĦĺ":143835,"ĠëĦĺìĸ´":143836,"Ġvüc":143837,"Ġvücud":143838,"Ġyayg":143839,"Ġyaygın":143840,"Ġniekt":143841,"Ġniektóry":143842,"Ġniektórych":143843,"ãģŃãģĩ":143844,"Ġкаж":143845,"ĠкажеÑĤÑģÑı":143846,"каж":143847,"кажеÑĤ":143848,"ĠاÙĦدÙĬÙħÙĤرا":143849,"ĠاÙĦدÙĬÙħÙĤراط":143850,"ĠاÙĦدÙĬÙħÙĤراطÙĬØ©":143851,"æŃ©":143852,"æŃ©ãģĦãģ¦":143853,"Ġvaz":143854,"Ġvazge":143855,"Ġvazgeç":143856,"ĠминималÑĮ":143857,"ĠминималÑĮн":143858,"ãĥijãĤ¿":143859,"ãĥijãĤ¿ãĥ¼ãĥ³":143860,"ĠëĬ":143861,"ĠëĬIJ":143862,"ĠëĬIJëĤĮ":143863,"ãģ¡ãĤĩãģĨ":143864,"ãģ¡ãĤĩãģĨãģ©":143865,"Ġà¸ģร":143866,"Ġà¸ģรà¸ģà¸İ":143867,"Ġà¸ģรà¸ģà¸İาà¸Ħม":143868,"تجدÙĬد":143869,"ĠشاÙħÙĦ":143870,"หลัà¸ģà¸IJาà¸Ļ":143871,"ĠмаÑĢÑĪ":143872,"ĠмаÑĢÑĪÑĢÑĥÑĤ":143873,"ĠvÃŃt":143874,"ĠvÃŃtima":143875,"Ġquizá":143876,"aygı":143877,"×ĵ×ijר×Ļ×ķ":143878,"Ġизд":143879,"Ġиздели":143880,"ĠизделиÑı":143881,"пла":143882,"плаÑĩ":143883,"плаÑĩива":143884,"ä»»ãģĽ":143885,"Ġéquipé":143886,"ä¹ħãģĹãģ":143887,"ä¹ħãģĹãģ¶":143888,"ä¹ħãģĹãģ¶ãĤĬ":143889,"ĠкаÑĤ":143890,"ĠкаÑĤал":143891,"ĠкаÑĤалог":143892,"สà¹īม":143893,"ĠÑĢей":143894,"ĠÑĢейÑĤ":143895,"ĠÑĢейÑĤинг":143896,"Ġthuyá»ģn":143897,"ĠاÙĦÙħÙĤدس":143898,"espère":143899,"ãģ«åħ¥ãģ£ãģŁ":143900,"หมายà¹Ģลà¸Ĥ":143901,"ת×Ĺ×ķשת":143902,"à¸Ļà¹Īะ":143903,"ĠpeÅĤ":143904,"ĠpeÅĤne":143905,"Ġpérd":143906,"Ġpérdida":143907,"หมวà¸Ķ":143908,"หมวà¸Ķหมูà¹Ī":143909,"иÑĩеÑģкÑĥÑİ":143910,"çµĤãĤı":143911,"çµĤãĤıãģ£ãģŁ":143912,"Ġ×Ĵ×ķ×Ĵ׾":143913,"à¸Ĺำà¸Ħวาม":143914,"à¸Ĺำà¸Ħวามสะà¸Ńาà¸Ķ":143915,"Hotéis":143916,"ĠзаÑĢ":143917,"ĠзаÑĢегиÑģÑĤ":143918,"ĠзаÑĢегиÑģÑĤÑĢи":143919,"ĠзаÑĢегиÑģÑĤÑĢиÑĢова":143920,"ĠÑģобÑĭÑĤи":143921,"ĠÑģобÑĭÑĤиÑı":143922,"Ġ×ĸ׼×IJ":143923,"ÙħÙĨظÙĪÙħØ©":143924,"Ġ×Ķ×ŀצ":143925,"Ġ×Ķ×ŀצ×Ļ×IJ×ķת":143926,"ÙħÙĥÙĪÙĨ":143927,"ÙħÙĥÙĪÙĨات":143928,"ä¸ĬãģĮãĤĭ":143929,"ĠmÄĻ":143930,"ĠmÄĻsk":143931,"หรืà¸Ńà¹Ģà¸Ľà¸¥à¹Īา":143932,"ëĤ®":143933,"Ġnoktas":143934,"Ġnoktası":143935,"ĠболÑĮÑĪим":143936,"ĠлÑĥÑĩÑĪиÑħ":143937,"Ø´ÙĩÙĬد":143938,"à¸Ńำà¸Ļ":143939,"à¸Ńำà¸Ļวย":143940,"à¸Ńำà¸Ļวยà¸Ħวาม":143941,"à¸Ńำà¸Ļวยà¸Ħวามสะà¸Ķวà¸ģ":143942,"Ġев":143943,"ĠевÑĢ":143944,"ĠевÑĢоп":143945,"ĠевÑĢопей":143946,"à¸īาย":143947,"ìĦŃ":143948,"ÙħÙ쨧":143949,"ÙħÙ쨧ÙĪØ¶":143950,"ÙħÙ쨧ÙĪØ¶Ø§Øª":143951,"ë¹Į":143952,"赤ãģ¡ãĤĥãĤĵ":143953,"ĠÑĥдалоÑģÑĮ":143954,"ĠХоÑĤ":143955,"ĠХоÑĤÑı":143956,"przedsiÄĻbiorc":143957,"ĠHôm":143958,"íķĺìĺĢìĬµëĭĪëĭ¤":143959,"Ġнаг":143960,"ĠнагÑĢÑĥз":143961,"ĠнагÑĢÑĥзк":143962,"Ġ×ij×Ļ׳׾×IJ×ķ×ŀ×Ļ":143963,"Ġê°ĢëĬ¥íķľ":143964,"ĠHữu":143965,"à¸Ńุà¸Ķ":143966,"à¸Ńุà¸Ķม":143967,"ת×ķפ":143968,"ת×ķפע×Ķ":143969,"ĠmiÅĤo":143970,"ĠmiÅĤoÅĽci":143971,"ksiÄħż":143972,"ksiÄħżka":143973,"ĠاÙĦÙĦعبة":143974,"à¸īาà¸ģ":143975,"สะสม":143976,"×ŀתר":143977,"×ŀתר×Ĺש":143978,"Ġlégère":143979,"Ġ׾צפ":143980,"Ġ׾צפ×Ļ×Ķ":143981,"ĠиÑģÑĤоÑĢиÑı":143982,"ĠãĥĪãĥ©":143983,"ĠãĥĪãĥ©ãĥĥãĤ¯":143984,"ĠãĥĪãĥ©ãĥĥãĤ¯ãĥIJãĥĥãĤ¯":143985,"Ġка":143986,"ĠкаÑĦе":143987,"×ŀס×ŀ×ļ":143988,"Ġcüm":143989,"Ġcümle":143990,"à¹Ģà¸Ħลืà¹Īà¸Ńà¸Ļà¹Ħหว":143991,"ãģĬãģĿ":143992,"ãģĬãģĿãĤīãģı":143993,"ìŀIJëıĻ":143994,"ìŀIJëıĻì°¨":143995,"à¸Ńัà¸ķ":143996,"à¸Ńัà¸ķà¹Ĥà¸Ļ":143997,"à¸Ńัà¸ķà¹Ĥà¸Ļมั":143998,"à¸Ńัà¸ķà¹Ĥà¸Ļมัà¸ķิ":143999,"ĠÅŁik":144000,"ĠÅŁikay":144001,"ĠÅŁikayet":144002,"extrême":144003,"krä":144004,"kräfte":144005,"ëĤĻ":144006,"íķij":144007,"ì²Ļ":144008,"íĺĪ":144009,"ì°į":144010,"âĻ¡":144011,"ìŀĶ":144012,"뢰":144013,"íĿĶ":144014,"íĿIJ":144015,"âĩĴ":144016,"ë§Ľ":144017,"ìĬĪ":144018,"á»Ĵ":144019,"ìĺµ":144020,"âĹİ":144021,"íĤ¨":144022,"ê¿Ī":144023,"ì΍":144024,"ìĽ¨":144025,"ë§¥":144026,"ï½Ģ":144027,"J":144028,"Ẩ":144029,"ãħİ":144030,"ÑĹ":144031,"ìĦ¬":144032,"ì¹¼":144033,"ï¼¶":144034,"ìĽł":144035,"룴":144036,"Åĥ":144037,"ëĤ¼":144038,"ëĭIJ":144039,"â̹":144040,"ë¦Ń":144041,"ì§IJ":144042,"â̤":144043,"Ãħ":144044,"뾨":144045,"íĦ¸":144046,"íľĺ":144047,"ê²ģ":144048,"ë´ħ":144049,"Ãĺ":144050,"ëŃĶ":144051,"ëĺij":144052,"âĹĩ":144053,"ìĹĺ":144054,"ï»´":144055,"ë§¹":144056,"ï¾Ŀ":144057,"ìĬ·":144058,"íĥķ":144059,"ï¼ł":144060,"ì»´":144061,"ëłĮ":144062,"ì½ľ":144063,"ﻹ":144064,"ãħł":144065,"졸":144066,"ëħ¹":144067,"âĤº":144068,"âĸ¶":144069,"íĥIJ":144070,"êµ´":144071,"íij¸":144072,"ÑĶ":144073,"íͽ":144074,"Ðħ":144075,"ë°¤":144076,"Ôģ":144077,"첨":144078,"ì¶ĺ":144079,"ë²Ĺ":144080,"멸":144081,"ï¼»":144082,"ï¼½":144083,"ï¼·":144084,"ì°Į":144085,"ÃĴ":144086,"íı´":144087,"ìĵ¸":144088,"ì´Į":144089,"ëģĶ":144090,"ëĶ©":144091,"ëĩĮ":144092,"ë©Ģ":144093,"벨":144094,"ï¼µ":144095,"ë§¡":144096,"ëĭ«":144097,"฿":144098,"ãģ±":144099,"ìĩ¼":144100,"ìºł":144101,"뮤":144102,"ê±±":144103,"컬":144104,"âĦĥ":144105,"ëͱ":144106,"ëĥĪ":144107,"ìĭ±":144108,"íĻĪ":144109,"ëŀIJ":144110,"ìħĢ":144111,"ìłł":144112,"ÐĨ":144113,"ëłī":144114,"ï½ħ":144115,"ï½ı":144116,"íĻĢ":144117,"뼰":144118,"á»®":144119,"íĤ¹":144120,"ê½ĥ":144121,"ﻤ":144122,"ïºĶ":144123,"꺼":144124,"ìķī":144125,"âϦ":144126,"ï½ģ":144127,"ìĵ´":144128,"ãĢī":144129,"ì°®":144130,"ì¤ĺ":144131,"Ừ":144132,"ëģĦ":144133,"ëIJ¨":144134,"ìķĮ":144135,"íĿĺ":144136,"íħIJ":144137,"ãĢĪ":144138,"겪":144139,"ëĭ¥":144140,"ê²¼":144141,"á»Į":144142,"맨":144143,"ëģĬ":144144,"벤":144145,"ëijĶ":144146,"íĿ¡":144147,"Ử":144148,"ë¬ĺ":144149,"ãģī":144150,"ëŀ«":144151,"íĶĪ":144152,"íħį":144153,"ìŀĥ":144154,"ï½ī":144155,"ìģľ":144156,"âĸ½":144157,"묻":144158,"âĸ³":144159,"X":144160,"ìģĺ":144161,"ì¶°":144162,"ìĬ´":144163,"ìķ±":144164,"ìĩĦ":144165,"Ắ":144166,"ï´¿":144167,"ï´¾":144168,"âĤ½":144169,"ëĦĵ":144170,"룩":144171,"쳤":144172,"ê´ľ":144173,"ÃĻ":144174,"Ỿ":144175,"ï¿£":144176,"ëĵŃ":144177,"ë©ĺ":144178,"ê»´":144179,"ëł´":144180,"Ðĥ":144181,"묵":144182,"ì§Ŀ":144183,"ãģº":144184,"ðŁĺĤ":144185,"ëŀ¬":144186,"ìłĬ":144187,"ê´Ħ":144188,"ìŀĬ":144189,"íŀĮ":144190,"ìĦ¯":144191,"âĪĢ":144192,"âĸ¡":144193,"ëĢĮ":144194,"ëŀĻ":144195,"ï½ĥ":144196,"Ặ":144197,"ï¾Ħ":144198,"ïºĺ":144199,"ë¹¼":144200,"ÃĮ":144201,"âĸ·":144202,"ê¸į":144203,"ë©ĭ":144204,"ãģĥ":144205,"ìĺĨ":144206,"ìĺ®":144207,"몬":144208,"롤":144209,"볬":144210,"ëĬ¦":144211,"âĸª":144212,"ì¼ĵ":144213,"ìľĪ":144214,"ì§§":144215,"ï½½":144216,"ëĥī":144217,"ï¾Į":144218,"ëĺIJ":144219,"ï¼ĥ":144220,"á»Ħ":144221,"ì´¬":144222,"춤":144223,"ï¼¹":144224,"ï»Ń":144225,"âĤ«":144226,"ï½ĩ":144227,"ìĺ·":144228,"ëĸ¨":144229,"âī«":144230,"릿":144231,"⾨":144232,"Ù±":144233,"쯤":144234,"ê¹Ķ":144235,"ðŁĺĬ":144236,"ìĪ«":144237,"ê³±":144238,"êµ³":144239,"ï½ĭ":144240,"à¸Į":144241,"Äł":144242,"ë͏":144243,"ë°ij":144244,"ìħĭ":144245,"íİ´":144246,"âľħ":144247,"íĥij":144248,"ëĪĩ":144249,"íı¼":144250,"ðŁĺį":144251,"ìĺĽ":144252,"ﻣ":144253,"Ñĺ":144254,"ì©Į":144255,"ë¦ħ":144256,"ìĿį":144257,"ク":144258,"ëįľ":144259,"ãģħ":144260,"íݼ":144261,"ëĭĿ":144262,"ë¿Į":144263,"ì¼°":144264,"ìĭ«":144265,"ë°¥":144266,"íĽĮ":144267,"ì¨Į":144268,"ë¹Ļ":144269,"ï½İ":144270,"ë´Ħ":144271,"ìĦ¹":144272,"ï½²":144273,"ìĮĵ":144274,"Òij":144275,"ë°į":144276,"ëłĢ":144277,"íĨ¤":144278,"ッ":144279,"ë¤Ħ":144280,"꽤":144281,"ï½Ĵ":144282,"ìķ¨":144283,"ï½¼":144284,"ê¹IJ":144285,"íģIJ":144286,"âĦĸ":144287,"맺":144288,"ﺮ":144289,"ëħģ":144290,"겸":144291,"ï»ł":144292,"íĬľ":144293,"Ź":144294,"ë¥Ń":144295,"ëĪī":144296,"ï½Ķ":144297,"íĮ¬":144298,"ìŀĩ":144299,"ï¬ģ":144300,"ﻨ":144301,"ëij¥":144302,"ëŀĦ":144303,"Ù¬":144304,"íĭ´":144305,"ìŀī":144306,"Ú¾":144307,"ìĽħ":144308,"ï»®":144309,"ëĭī":144310,"âīª":144311,"âĹĦ":144312,"ëĪĮ":144313,"íĽ¼":144314,"ì¤į":144315,"Ÿ":144316,"줬":144317,"ì¾Į":144318,"ï½ĵ":144319,"ï¾Ĭ":144320,"ðŁı»":144321,"ï¾ī":144322,"Ðģ":144323,"íĺIJ":144324,"ï¾Ļ":144325,"꼬":144326,"íŀIJ":144327,"âĢ¥":144328,"ëŁŃ":144329,"ë§ŀ":144330,"ìĥ¤":144331,"ïºĴ":144332,"íĭ±":144333,"ë½ij":144334,"Ãķ":144335,"âĪļ":144336,"ëĤĦ":144337,"ê¹Ŀ":144338,"ëĨĪ":144339,"Ẻ":144340,"ìħĪ":144341,"ìĮį":144342,"âĢ¡":144343,"ï¼±":144344,"ìģ¨":144345,"âĺº":144346,"ëĴ·":144347,"ìĺ³":144348,"ðŁijį":144349,"몽":144350,"ëĤŃ":144351,"ïºŃ":144352,"ë©Ī":144353,"á»Ī":144354,"íķĢ":144355,"ëĭĻ":144356,"ë¦ĩ":144357,"ìķ¤":144358,"ìį¼":144359,"ãĥµ":144360,"Ñ£":144361,"ìľĹ":144362,"âŃIJ":144363,"ï¾ĺ":144364,"íŬ":144365,"ê¾¼":144366,"ìķĹ":144367,"ï»Į":144368,"ê±·":144369,"ëħķ":144370,"롱":144371,"ìķĬ":144372,"ï¾Ģ":144373,"ìĩł":144374,"íĮ©":144375,"ﺪ":144376,"ë§Ļ":144377,"_":144378,"ê¿Ķ":144379,"íİľ":144380,"룸":144381,"íĶĶ":144382,"ﻳ":144383,"ëıķ":144384,"ìĭ¼":144385,"á»İ":144386,"ë§ĺ":144387,"ì¢ĭ":144388,"íĨ¡":144389,"ï½±":144390,"íĿij":144391,"Ỹ":144392,"ì¦Į":144393,"칸":144394,"ëŃĺ":144395,"ï¾Ĺ":144396,"ï»ĭ":144397,"íĬĢ":144398,"ë¥Ļ":144399,"콩":144400,"ëģĹ":144401,"ëį´":144402,"ìħľ":144403,"¸":144404,"ë»IJ":144405,"ìĥµ":144406,"ê²IJ":144407,"ëĵ¬":144408,"룰":144409,"ãħĭ":144410,"ìĹī":144411,"á»ĸ":144412,"ëĦĮ":144413,"ï½¶":144414,"ë´ĩ":144415,"ëĤ³":144416,"ãĤľ":144417,"ëĸ»":144418,"íİĢ":144419,"ëį©":144420,"íķ¸":144421,"÷":144422,"ê¼¼":144423,"ëĶľ":144424,"ë°´":144425,"ë©į":144426,"âĹ¯":144427,"ìĹij":144428,"ìϼ":144429,"ïºij":144430,"ë¶ķ":144431,"롬":144432,"ï½Į":144433,"íĨ¨":144434,"ﺴ":144435,"ëłĺ":144436,"ê°¤":144437,"ìβ":144438,"Ñĵ":144439,"ìħī":144440,"ï»ĵ":144441,"ëĪĶ":144442,"ëį§":144443,"â̼":144444,"ﻲ":144445,"ê°±":144446,"ê¿Ģ":144447,"ëĭ·":144448,"Ẹ":144449,"Ẫ":144450,"ÆĴ":144451,"ëį¤":144452,"ìĪŃ":144453,"ï½Ĥ":144454,"ï½Ī":144455,"Åł":144456,"룬":144457,"ѵ":144458,"ëĸ¡":144459,"ëĥĦ":144460,"ìĦ°":144461,"ëĵĪ":144462,"ï¾ĥ":144463,"ëĩ¨":144464,"ï½IJ":144465,"êµ½":144466,"ìĹ½":144467,"ëĤĢ":144468,"묶":144469,"ï½·":144470,"ìıŁ":144471,"íĺĶ":144472,"ê¼Ī":144473,"ëģĪ":144474,"ì¥IJ":144475,"ïºĹ":144476,"ÄĮ":144477,"ëĪł":144478,"ëĸ¼":144479,"íĢ´":144480,"âī¥":144481,"ëĭŃ":144482,"ì±Ļ":144483,"ê»ı":144484,"멤":144485,"ìĥĺ":144486,"ëį®":144487,"룡":144488,"ìĤ½":144489,"ãĪľ":144490,"Ĩ":144491,"â̧":144492,"コ":144493,"Ä£":144494,"ì¦ī":144495,"ï¼¼":144496,"Û©":144497,"âĪĻ":144498,"ë°ı":144499,"ë¹ħ":144500,"ðŁĺĽ":144501,"íĪ´":144502,"ðŁĴķ":144503,"ãĢĴ":144504,"ìŀĺ":144505,"ﺤ":144506,"ï½ĸ":144507,"멾":144508,"ë²¼":144509,"ëĿĦ":144510,"ëļľ":144511,"ï»ĺ":144512,"ìĥĮ":144513,"ï½Ħ":144514,"ì©Ķ":144515,"ï½Ļ":144516,"ﺩ":144517,"Ûŀ":144518,"âĺİ":144519,"ìł¤":144520,"ëIJ©":144521,"ÅĿ":144522,"âŀ¡":144523,"ï»§":144524,"Ðı":144525,"ì«ĵ":144526,"ê³½":144527,"Éij":144528,"ãĥ²":144529,"ëĤ«":144530,"ë¦ī":144531,"ì¢ģ":144532,"ë°Ń":144533,"ðŁĺģ":144534,"ë¹µ":144535,"첩":144536,"컵":144537,"ðŁĺĺ":144538,"ë±ħ":144539,"âīĪ":144540,"ë¹ļ":144541,"ï»ľ":144542,"ðŁĻı":144543,"íģ°":144544,"ìĦŀ":144545,"ï¾ļ":144546,"ìĺ¹":144547,"ë¼Ī":144548,"ëĤ¯":144549,"ëŀ©":144550,"íļ¡":144551,"ï½ķ":144552,"íĥĵ":144553,"ëĿł":144554,"ê³ģ":144555,"ëĵĢ":144556,"ìĹł":144557,"Z":144558,"ë§ij":144559,"ëĭ¿":144560,"쿨":144561,"ãİ¡":144562,"ÐĬ":144563,"íĦ±":144564,"Ũ":144565,"ﺳ":144566,"ï¾ı":144567,"âĭħ":144568,"ê¼´":144569,"âī¤":144570,"íĮģ":144571,"Ω":144572,"궤":144573,"ìĪį":144574,"âľ¿":144575,"콤":144576,"ëĪħ":144577,"íĨ±":144578,"ãħľ":144579,"áIJħ":144580,"ÅĴ":144581,"ðŁijī":144582,"ﻦ":144583,"Ъ":144584,"ë¥ľ":144585,"íķ«":144586,"ï¾ĭ":144587,"âĻ«":144588,"ê¹ľ":144589,"ë°¸":144590,"ëĶĺ":144591,"íĿī":144592,"ï¾ģ":144593,"ï¾Ľ":144594,"볼":144595,"ê²¹":144596,"쿼":144597,"ﻬ":144598,"âŀ¤":144599,"ðŁĻģ":144600,"ïºł":144601,"ëĨ¨":144602,"믹":144603,"ê¸ĭ":144604,"ë»Ķ":144605,"ê¹ĥ":144606,"ëijij":144607,"íĭ¸":144608,"íİĻ":144609,"âŀĸ":144610,"ãĥ½":144611,"ì§ļ":144612,"ャ":144613,"ﻥ":144614,"íĮ½":144615,"âĢĴ":144616,"ìĮĢ":144617,"ìŃī":144618,"ëļ±":144619,"ãĤŀ":144620,"íĭĪ":144621,"ãĤIJ":144622,"ëīĺ":144623,"Σ":144624,"ê³°":144625,"ë¹Ĺ":144626,"ï¾İ":144627,"ðŁĺŃ":144628,"íĿł":144629,"ìĹ¿":144630,"ê°ļ":144631,"ì¤Į":144632,"ë§µ":144633,"ï½³":144634,"ãģ¢":144635,"ï»Ĺ":144636,"âī¦":144637,"Ú¤":144638,"ëłģ":144639,"ê¼½":144640,"ﻫ":144641,"âī§":144642,"ì´Ľ":144643,"ìłĿ":144644,"Ằ":144645,"âĻ£":144646,"ìºĺ":144647,"âĪĩ":144648,"ê²ī":144649,"ë°Ł":144650,"ï»Ķ":144651,"íĸĩ":144652,"âĸĴ":144653,"ðŁijı":144654,"Ãŀ":144655,"ðŁĺĨ":144656,"ﺼ":144657,"âĿĹ":144658,"ìºĶ":144659,"칩":144660,"ëĸ¤":144661,"ëĥħ":144662,"âĶľ":144663,"ï½»":144664,"ÎĶ":144665,"áĥ¦":144666,"ìŀİ":144667,"âĺĢ":144668,"âμ":144669,"ðŁĶ¥":144670,"ë°Į":144671,"ìłĸ":144672,"íĹĽ":144673,"Îķ":144674,"ïºĥ":144675,"ë¶ī":144676,"âĪŀ":144677,"íĥŃ":144678,"Ãĭ":144679,"âģĦ":144680,"ãħĩ":144681,"ëĦ¥":144682,"ëĭ®":144683,"ëł·":144684,"íĮĿ":144685,"캡":144686,"ë·Ķ":144687,"ì©į":144688,"íĤ´":144689,"ëļ«":144690,"âĵĴ":144691,"íķį":144692,"âĻĤ":144693,"ï¾Ĩ":144694,"âĨ©":144695,"ìį©":144696,"ïºķ":144697,"íĿĻ":144698,"Ñľ":144699,"íĤ·":144700,"íĿ°":144701,"íĥ±":144702,"ëķIJ":144703,"ï¾Ĵ":144704,"×ĥ":144705,"ëĮĦ":144706,"ìĺ´":144707,"ìķµ":144708,"ê¹¥":144709,"ëŀŃ":144710,"쪼":144711,"ãİĿ":144712,"ðŁĺħ":144713,"ëıĭ":144714,"몫":144715,"ﺸ":144716,"뮬":144717,"ë²ħ":144718,"ëijł":144719,"ìħ°":144720,"ì»·":144721,"ëĶª":144722,"ëħĶ":144723,"ãħ¡":144724,"ìĶ»":144725,"íķı":144726,"ëį±":144727,"ﺨ":144728,"ï¾į":144729,"ï½µ":144730,"ì¢Ģ":144731,"íİĮ":144732,"ï»°":144733,"ﺣ":144734,"Æ£":144735,"ðŁ¤£":144736,"ï·º":144737,"ëĤļ":144738,"âĭĨ":144739,"ë³į":144740,"ðŁĺĦ":144741,"ìĸĢ":144742,"ìĻł":144743,"ëĨĶ":144744,"íŨ":144745,"ï»Ľ":144746,"ï»Ŀ":144747,"á»¶":144748,"ìĸĺ":144749,"ìİĦ":144750,"ÚĨ":144751,"ï»ŀ":144752,"ëĢIJ":144753,"ê²Ķ":144754,"ﻵ":144755,"âŦ":144756,"íļŁ":144757,"ê¹ģ":144758,"ê°ĵ":144759,"ëĶ´":144760,"ìıĺ":144761,"ëļĿ":144762,"ỳ":144763,"ëŀ´":144764,"ëĦī":144765,"âĺŀ":144766,"ï½ĺ":144767,"Ž":144768,"ë¦İ":144769,"âĸ¬":144770,"ëŃī":144771,"âĩĽ":144772,"ìį¬":144773,"ïºŁ":144774,"Ëľ":144775,"ë¶ĵ":144776,"ìĽ°":144777,"Åľ":144778,"ëŃĩ":144779,"Ỳ":144780,"Ëļ":144781,"ëķĢ":144782,"âĺij":144783,"ðŁı¼":144784,"ìĸ½":144785,"âĮĴ":144786,"Ðİ":144787,"ɾ":144788,"íĮ¡":144789,"ï¾ħ":144790,"ìŀŃ":144791,"ィ":144792,"칫":144793,"ìľĮ":144794,"ÒĽ":144795,"굿":144796,"ëĭ¦":144797,"âĶĶ":144798,"ï¾ij":144799,"ì§ĸ":144800,"ìºĦ":144801,"ãĢĥ":144802,"ʼ":144803,"ê²Ł":144804,"ï½§":144805,"Ä¢":144806,"íİł":144807,"ë§·":144808,"ê°ĩ":144809,"ìĭ¹":144810,"ðŁĴ¦":144811,"ï¾ľ":144812,"ëĬĻ":144813,"벡":144814,"Å¿":144815,"ðŁĺĭ":144816,"ðŁĴª":144817,"ì¿Ħ":144818,"ë©ķ":144819,"ìѤ":144820,"ëĬĦ":144821,"ðŁĮ¸":144822,"ãĤĿ":144823,"Çİ":144824,"ï½ļ":144825,"ÄĹ":144826,"ëģĵ":144827,"ê¶IJ":144828,"áµī":144829,"ãĥĤ":144830,"ê»į":144831,"ðŁĺ¦":144832,"ãĢĿ":144833,"ð٤Ĺ":144834,"ÑŁ":144835,"ìĹİ":144836,"âľĮ":144837,"ìīIJ":144838,"ÃĨ":144839,"íĹIJ":144840,"ðŁİī":144841,"Îij":144842,"ï½Ń":144843,"ðŁĴĻ":144844,"ìĽ¬":144845,"íĢĺ":144846,"ﻢ":144847,"ðŁĺİ":144848,"íij¼":144849,"íĿ©":144850,"ï»Ħ":144851,"íħĢ":144852,"ëłIJ":144853,"쥬":144854,"Ðĭ":144855,"ìĥ·":144856,"뾬":144857,"ðŁĺĥ":144858,"ëĦ¬":144859,"륨":144860,"ìĽį":144861,"ï½Ĩ":144862,"ï½´":144863,"ãĥħ":144864,"Ãı":144865,"ﻪ":144866,"âĻł":144867,"ëĬ¬":144868,"ë±Ģ":144869,"ë°ĭ":144870,"ìĥĢ":144871,"ï½¾":144872,"ëĤ±":144873,"컸":144874,"ðŁĴĸ":144875,"ðŁijĮ":144876,"Ñŀ":144877,"ì§±":144878,"ËĨ":144879,"ðŁĵļ":144880,"âŃķ":144881,"ï¬Ĥ":144882,"ﻡ":144883,"ëij¬":144884,"íμ":144885,"âĸ¸":144886,"ê°¯":144887,"ê¹ħ":144888,"ï½®":144889,"ëĺ¥":144890,"Ä¡":144891,"íĮŁ":144892,"ÐĮ":144893,"ìĨŁ":144894,"ïºĵ":144895,"ﻼ":144896,"ÃĽ":144897,"ãĥ¾":144898,"ëĮĵ":144899,"íĴĭ":144900,"ìķĵ":144901,"ï½¹":144902,"ëĤ¡":144903,"ðŁijĩ":144904,"Ẽ":144905,"ãĢŁ":144906,"ðŁĮŁ":144907,"íĥł":144908,"ãĢĨ":144909,"âĢŁ":144910,"ë¸IJ":144911,"ðŁĮ¹":144912,"ìł¼":144913,"ðŁĵĮ":144914,"ìͬ":144915,"âĹĢ":144916,"ðŁĴĵ":144917,"ê¹İ":144918,"ìĤIJ":144919,"ìĶĮ":144920,"ÑĽ":144921,"âĶĪ":144922,"ë²³":144923,"ãİŀ":144924,"Õ¡":144925,"íĤµ":144926,"ð٤Ķ":144927,"ëĢĶ":144928,"ìĬIJ":144929,"íĻī":144930,"⾦":144931,"ëľ¯":144932,"ìł¯":144933,"ëͧ":144934,"Φ":144935,"ËĪ":144936,"ìī¼":144937,"âĹĬ":144938,"ëľ©":144939,"ëľ°":144940,"ï¾IJ":144941,"ë¿Ķ":144942,"ìĹ®":144943,"ì·Į":144944,"ﺧ":144945,"ÎĴ":144946,"ëµĻ":144947,"ï»Ĭ":144948,"ì°Ķ":144949,"íİĦ":144950,"ðŁĴĹ":144951,"Ẵ":144952,"ì°¢":144953,"íľ¼":144954,"ê½Ĥ":144955,"ì±Ķ":144956,"ìī´":144957,"âĸ¾":144958,"íΰ":144959,"ëĭĽ":144960,"âĿ£":144961,"ェ":144962,"ðŁĴľ":144963,"Ëĺ":144964,"ãħ¤":144965,"âĨĹ":144966,"íĸĦ":144967,"âϬ":144968,"ìķ°":144969,"ïºľ":144970,"âī¡":144971,"ãĢĵ":144972,"ìij¥":144973,"íĮį":144974,"íīģ":144975,"ë»Ĺ":144976,"íľł":144977,"íľ©":144978,"âľĪ":144979,"íĢĦ":144980,"ìĸĩ":144981,"ì¢ĩ":144982,"íŀĻ":144983,"몹":144984,"ãĤĽ":144985,"ðŁĺ±":144986,"ëįŁ":144987,"à¹ħ":144988,"êµ¶":144989,"Ù«":144990,"ìĶģ":144991,"âľª":144992,"ï¾Ī":144993,"ðŁĻĮ":144994,"âļ¡":144995,"Îļ":144996,"ì¼Ī":144997,"ï¾Ķ":144998,"ï¾Ĥ":144999,"êµī":145000,"ﺻ":145001,"ðŁĴĭ":145002,"á¹£":145003,"ÓĻ":145004,"ìĨľ":145005,"ìĹ£":145006,"âľ©":145007,"ìľĻ":145008,"ﺰ":145009,"Ẳ":145010,"ìŀ£":145011,"âĿĮ":145012,"âĺģ":145013,"ìķİ":145014,"Ľ":145015,"Ûģ":145016,"ãĦ±":145017,"ëŁ¿":145018,"íĮ¸":145019,"ê½ī":145020,"ìıł":145021,"ðŁįĢ":145022,"âĨĶ":145023,"ëŃ¡":145024,"ï»ģ":145025,"ï¼Ħ":145026,"ðŁĴ¥":145027,"âĺĽ":145028,"íĹ·":145029,"ëij¡":145030,"Îł":145031,"Τ":145032,"âĦĵ":145033,"ﺷ":145034,"ÎĻ":145035,"ëıĶ":145036,"짤":145037,"âĶĥ":145038,"ãĦ·":145039,"ÇĴ":145040,"ðŁ¥°":145041,"ëĶķ":145042,"ìļ¥":145043,"ì¸Ħ":145044,"íĽĶ":145045,"ïºĩ":145046,"ﺬ":145047,"ðŁĺ¢":145048,"빡":145049,"ì͹":145050,"ų":145051,"ËĿ":145052,"íİij":145053,"ï¾ĵ":145054,"ðŁĴļ":145055,"ëĬij":145056,"꺾":145057,"íĨ°":145058,"ÿ":145059,"ÐĦ":145060,"ëĮIJ":145061,"ë½Ģ":145062,"ì·Ħ":145063,"ðŁĵį":145064,"ðŁĻĪ":145065,"âĹĪ":145066,"ê¿ĩ":145067,"ì¼Ħ":145068,"íİ«":145069,"ðŁĩ·":145070,"âĶĭ":145071,"âļł":145072,"ë±ī":145073,"ìį°":145074,"ìĻĪ":145075,"ɪ":145076,"ïºĭ":145077,"ðŁĺľ":145078,"ÎŁ":145079,"ðŁĻĤ":145080,"âļ½":145081,"ÅĪ":145082,"ë¹Ķ":145083,"íĮľ":145084,"à¹ı":145085,"ìĸ¹":145086,"íĪŃ":145087,"ðŁ¥ĩ":145088,"ãĦ´":145089,"ëĶ¥":145090,"ìŃĪ":145091,"âĪĨ":145092,"ëĸ³":145093,"ë±ĥ":145094,"ìŀ¦":145095,"ï»IJ":145096,"Îľ":145097,"âľ§":145098,"Ïį":145099,"ìłĵ":145100,"âĹķ":145101,"ëĴĢ":145102,"ï»Ģ":145103,"ðŁĶ´":145104,"ê½ģ":145105,"ëĮĪ":145106,"ëİĮ":145107,"ãĤİ":145108,"â¦ģ":145109,"ì½§":145110,"ﯾ":145111,"âĿ¯":145112,"à¸ħ":145113,"ðŁĻĦ":145114,"âĿĢ":145115,"ðŁĶ¹":145116,"âĩIJ":145117,"êµµ":145118,"âĩĶ":145119,"ë¶IJ":145120,"ðŁĴĽ":145121,"ξ":145122,"íĥ¬":145123,"âĿĦ":145124,"Ò£":145125,"ã̰":145126,"âĪij":145127,"âĺ¼":145128,"âīł":145129,"Ò¯":145130,"ﺯ":145131,"꿨":145132,"âľĸ":145133,"Êĸ":145134,"íĢĢ":145135,"ê¾Ģ":145136,"íĹĿ":145137,"âĶ£":145138,"ãİľ":145139,"ëĶĽ":145140,"뾸":145141,"ﺫ":145142,"ê¿°":145143,"ðŁĩ¹":145144,"ÇIJ":145145,"ÛĴ":145146,"룻":145147,"ïºĸ":145148,"Ñļ":145149,"ëĬł":145150,"Ûķ":145151,"깡":145152,"ë¿ľ":145153,"ì²¼":145154,"ï¨ij":145155,"륵":145156,"ìį¸":145157,"íħħ":145158,"íij¹":145159,"ÖĢ":145160,"ï³Į":145161,"ãħ£":145162,"ìij¤":145163,"ì½ķ":145164,"ëķł":145165,"ðŁĮ¿":145166,"íĥĶ":145167,"ìĽģ":145168,"ζ":145169,"âŀľ":145170,"ìĬĺ":145171,"íĽĹ":145172,"ë©§":145173,"ìīĺ":145174,"Õ¶":145175,"á¹ĩ":145176,"ðŁİģ":145177,"ソ":145178,"ï¼Ĥ":145179,"á¼IJ":145180,"âľķ":145181,"âŀ¢":145182,"ëĦ¨":145183,"컫":145184,"ì¯Ķ":145185,"ì°ľ":145186,"ðŁĴ°":145187,"íħĿ":145188,"ãİı":145189,"ë³¶":145190,"Òĵ":145191,"âĨ³":145192,"ìĥ´":145193,"íģĺ":145194,"âĸĢ":145195,"ë²Ļ":145196,"à¸ĥ":145197,"á½¶":145198,"Äķ":145199,"â¬ĩ":145200,"ë¤ĺ":145201,"ðŁİµ":145202,"âľļ":145203,"ïºı":145204,"Ρ":145205,"âĹī":145206,"ðŁĴ«":145207,"ÐĪ":145208,"ìĸĦ":145209,"ì§Ļ":145210,"ï»ĥ":145211,"ðĿijĴ":145212,"ëŃĦ":145213,"âĿ¥":145214,"âĿĸ":145215,"âĺĿ":145216,"ʹ":145217,"ḥ":145218,"âĢ¿":145219,"ãħħ":145220,"ê¸ģ":145221,"ëķ¡":145222,"ëį¥":145223,"âĪ©":145224,"ê»Ħ":145225,"ë®Į":145226,"Ò±":145227,"âĪĹ":145228,"ëłĻ":145229,"ïºĮ":145230,"ËIJ":145231,"ðŁĺ³":145232,"ðŁij©":145233,"ðŁİ¶":145234,"쿵":145235,"ðŁ¤©":145236,"ê·¤":145237,"ëĮĶ":145238,"ïºIJ":145239,"Ïİ":145240,"ì¶¥":145241,"ï½Ĭ":145242,"á¹Ń":145243,"뤼":145244,"âĸ«":145245,"ì§ł":145246,"á¼Ģ":145247,"ê»ij":145248,"ëĮģ":145249,"í̏":145250,"âĻĽ":145251,"ðŁĴŀ":145252,"âĸ°":145253,"ðĿijĸ":145254,"ëĿ¤":145255,"द":145256,"ì´ĺ":145257,"ðŁĺĩ":145258,"ëͤ":145259,"ÎĹ":145260,"ðŁĻĩ":145261,"ËĽ":145262,"ì©¡":145263,"âΧ":145264,"Õ¥":145265,"ÑĻ":145266,"ëIJ¬":145267,"ëĸĦ":145268,"ðŁĮ·":145269,"ìĹĮ":145270,"ðŁĺ¥":145271,"ëĪ´":145272,"ï»ļ":145273,"ÉĽ":145274,"ïºĦ":145275,"ï»ı":145276,"ÅĮ":145277,"ë²ļ":145278,"ìĭ£":145279,"ïºĢ":145280,"Îĵ":145281,"ðŁĺĮ":145282,"ËĻ":145283,"ëŀı":145284,"ðŁĶ¸":145285,"ðŁĵ·":145286,"ëģ½":145287,"íģ½":145288,"ðŁĴ¡":145289,"ðŁĮ±":145290,"ëºı":145291,"ìģł":145292,"ìĥIJ":145293,"ëıĹ":145294,"츰":145295,"ëĪķ":145296,"ÎĿ":145297,"âģī":145298,"ðŁĮ¼":145299,"íĮł":145300,"âĭ¯":145301,"áĥĺ":145302,"⾤":145303,"ê±Ķ":145304,"íĮİ":145305,"ðŁĴ¯":145306,"ìıĻ":145307,"íĹī":145308,"ÙŃ":145309,"ì½°":145310,"ﺿ":145311,"ï»±":145312,"ì±Į":145313,"âĺķ":145314,"ðŁİĢ":145315,"ÄĿ":145316,"ë°§":145317,"ìĤ¿":145318,"áijķ":145319,"ðŁįĥ":145320,"âĩ¨":145321,"ÎĽ":145322,"ë§´":145323,"ë³ķ":145324,"áijIJ":145325,"âĸĵ":145326,"ðĿijľ":145327,"âĻ»":145328,"íĤ¥":145329,"Õ¸":145330,"ãα":145331,"ëºĢ":145332,"첸":145333,"ïºĽ":145334,"ðŁıĨ":145335,"ðŁĩª":145336,"âĿĵ":145337,"ÄĢ":145338,"ì½¥":145339,"ðŁĩ§":145340,"á½·":145341,"âľĤ":145342,"ìŀ¼":145343,"ï§¡":145344,"ðŁĵ¸":145345,"âϝ":145346,"ÉĶ":145347,"ὸ":145348,"âĮª":145349,"ï»ĸ":145350,"不":145351,"âļ«":145352,"âĶĹ":145353,"ðŁĮĪ":145354,"ﻩ":145355,"ðŁĵ²":145356,"ÏĪ":145357,"ðŁĺ¡":145358,"ðĿijİ":145359,"ìľ½":145360,"짬":145361,"ì§Ĭ":145362,"á½³":145363,"ìĮ¤":145364,"ëĤį":145365,"âīĴ":145366,"ðŁij¨":145367,"âĺĺ":145368,"Ó©":145369,"âĤĵ":145370,"âĪĤ":145371,"ï¹ģ":145372,"ðŁĴIJ":145373,"íħĥ":145374,"ðŁı½":145375,"ê·Ħ":145376,"ðŁĺı":145377,"ðŁĮº":145378,"ðŁĺĶ":145379,"ォ":145380,"âľİ":145381,"ëµĪ":145382,"ðŁĩ¸":145383,"âĢ£":145384,"âŀĶ":145385,"ëĺĺ":145386,"ìĥ¬":145387,"Êĥ":145388,"â¬ħ":145389,"ì©IJ":145390,"ðŁĻĨ":145391,"ðŁİĦ":145392,"ľ":145393,"⣶":145394,"áĥIJ":145395,"âĺ»":145396,"ì±ķ":145397,"ìģ©":145398,"ë½ķ":145399,"캣":145400,"ðŁijĪ":145401,"ðŁĻĭ":145402,"ï¾ĸ":145403,"Òļ":145404,"Õ«":145405,"ìĮĪ":145406,"ë²§":145407,"ðŁĩ®":145408,"ï½Ŀ":145409,"ðŁįģ":145410,"ìĹ¥":145411,"ij":145412,"ë½IJ":145413,"íį½":145414,"íĽij":145415,"âĤ¹":145416,"ãħģ":145417,"ìͽ":145418,"ðŁĶģ":145419,"य":145420,"ê¾¹":145421,"ëīľ":145422,"âĹ¡":145423,"íķĮ":145424,"Îĺ":145425,"룹":145426,"ìĻĵ":145427,"ðŁĩ¦":145428,"ðŁijĢ":145429,"âĶĮ":145430,"ῦ":145431,"ëĦĽ":145432,"ìĦ£":145433,"ìŃĻ":145434,"ï±ł":145435,"Îŀ":145436,"Ê»":145437,"á¿¶":145438,"âĿĿ":145439,"ê±Ģ":145440,"ëĸ´":145441,"ãĦ¹":145442,"ðŁĴİ":145443,"Ϲ":145444,"âĽħ":145445,"ï»ķ":145446,"ãĥ±":145447,"ï½Ľ":145448,"ëĮķ":145449,"ë¹½":145450,"ì¥Ķ":145451,"쿤":145452,"ðŁĸ¤":145453,"ÑĴ":145454,"ê¹į":145455,"ëİĢ":145456,"ìĭ¯":145457,"뻤":145458,"ðŁĵŀ":145459,"ðŁĵ£":145460,"ðŁĺĿ":145461,"ìį¹":145462,"ìĹ¡":145463,"ì°IJ":145464,"á½IJ":145465,"ï»Ī":145466,"âľį":145467,"Äı":145468,"ðŁĮŀ":145469,"âĦ¦":145470,"ê½Ŀ":145471,"ë»ĺ":145472,"ìα":145473,"âĶĺ":145474,"ðŁĮ»":145475,"âĤ´":145476,"âŀ¨":145477,"íIJģ":145478,"ê¶Ī":145479,"âĺ¢":145480,"ðŁĺĪ":145481,"ゥ":145482,"âĦĹ":145483,"ê°Ń":145484,"ê°¸":145485,"ë»ij":145486,"쥴":145487,"컥":145488,"ï¤Ĭ":145489,"ï»Ĵ":145490,"ðŁĺķ":145491,"âĺĶ":145492,"ìĺIJ":145493,"ðŁļĹ":145494,"ëĹĦ":145495,"ë§ı":145496,"Õ½":145497,"âĸ»":145498,"⣵":145499,"ìī°":145500,"ï»ij":145501,"âĻ©":145502,"Î¥":145503,"ðŁĺ£":145504,"âĬĤ":145505,"ãħĤ":145506,"ìħ¸":145507,"íıĦ":145508,"âľ½":145509,"ì¦Ļ":145510,"âĸ£":145511,"ê±į":145512,"ê¿ĭ":145513,"ì«Ħ":145514,"ìºĩ":145515,"ðŁĩµ":145516,"ðŁijij":145517,"âľĺ":145518,"ðĿijĽ":145519,"ìį½":145520,"ìºī":145521,"וּ":145522,"ðŁĶº":145523,"âĦ®":145524,"íĥ¤":145525,"ðŁĩº":145526,"ðŁĴµ":145527,"íħ¨":145528,"ï½ij":145529,"Ψ":145530,"ìĥ¹":145531,"ìĸķ":145532,"ì¹µ":145533,"ðŁĵ±":145534,"व":145535,"ðŁijĬ":145536,"ðŁĴĦ":145537,"ðŁĴĿ":145538,"ãĮĶ":145539,"ìĻģ":145540,"Ðĩ":145541,"à®IJ":145542,"âĸ¹":145543,"á´Ľ":145544,"âĹĺ":145545,"뺨":145546,"íĥī":145547,"ìĸĮ":145548,"ðŁIJ¶":145549,"ãĤij":145550,"Ëĩ":145551,"Åı":145552,"á½¹":145553,"ìħ§":145554,"ï¹°":145555,"ðĿij¡":145556,"ðŁĶĿ":145557,"ðŁĺ»":145558,"ðŁĴĥ":145559,"ðŁ¤¦":145560,"ðŁįĴ":145561,"í̵":145562,"âľĨ":145563,"ë¹´":145564,"理":145565,"ï»Ļ":145566,"á´Ĺ":145567,"ðŁĮ´":145568,";":145569,"ëĮij":145570,"ì¨ĭ":145571,"쵸":145572,"ðŁİĪ":145573,"ðŁıł":145574,"á½±":145575,"ÛĨ":145576,"á¿ĸ":145577,"âĢĽ":145578,"ì°¼":145579,"íķ¥":145580,"íĹ´":145581,"ðŁĩ¬":145582,"ì°Ŀ":145583,"âĪł":145584,"ï¼ĩ":145585,"âĬĻ":145586,"âĿij":145587,"ëĦĭ":145588,"ëŀĹ":145589,"ë°ī":145590,"ìĹĬ":145591,"ì¢Ĩ":145592,"íĮ¥":145593,"ï°²":145594,"ðŁĵĸ":145595,"ðŁĺ®":145596,"âļª":145597,"ðŁĺļ":145598,"âĿŀ":145599,"ðĿijŁ":145600,"ðŁİĤ":145601,"Åķ":145602,"áIJĪ":145603,"꺽":145604,"ì±ł":145605,"ïºĿ":145606,"ê¿ī":145607,"áĥł":145608,"ðŁıĥ":145609,"ðŁĴ¸":145610,"âĿģ":145611,"âĹ¾":145612,"Úª":145613,"á¹ĥ":145614,"íĬ¬":145615,"ðŁĩ±":145616,"íİŃ":145617,"ðŁĺŀ":145618,"ë¾°":145619,"á¹Ľ":145620,"뼸":145621,"âĿĤ":145622,"êĴ³":145623,"âĶIJ":145624,"íĵ°":145625,"âŀł":145626,"ê´ĺ":145627,"ëħĺ":145628,"뻥":145629,"ì¾ħ":145630,"ðŁĺIJ":145631,"âĪª":145632,"ðŁijģ":145633,"âĪ´":145634,"âĹģ":145635,"ëºIJ":145636,"ìŀ¤":145637,"ì±Ĺ":145638,"ðŁı¾":145639,"Χ":145640,"á½»":145641,"âŀ¥":145642,"ìŁĪ":145643,"ï»ī":145644,"âĸĮ":145645,"ãĥ®":145646,"ðŁ¤¤":145647,"âĩĵ":145648,"ì¼ł":145649,"á´ı":145650,"맬":145651,"뻣":145652,"ðŁĴ¬":145653,"ðŁįĵ":145654,"ĸ":145655,"Ù¹":145656,"Ê¿":145657,"á½°":145658,"ëķľ":145659,"ì°¡":145660,"ì°»":145661,"íİį":145662,"ðŁİ¯":145663,"ðŁįĤ":145664,"ðŁij§":145665,"âĻ¢":145666,"áĨŀ":145667,"âϧ":145668,"âļľ":145669,"âľī":145670,"ëĵ¦":145671,"ëŃ£":145672,"ìĪı":145673,"ìĵ±":145674,"ÅŃ":145675,"ÊĬ":145676,"âĴ¸":145677,"âĩ©":145678,"ðŁĴĶ":145679,"Õµ":145680,"Ðī":145681,"Ò»":145682,"ë§£":145683,"ìĽľ":145684,"ì¿¡":145685,"íĽħ":145686,"íĽ¤":145687,"ﺢ":145688,"âľĭ":145689,"âĪĪ":145690,"ðŁĮį":145691,"Êľ":145692,"ëĬª":145693,"ëĴ¹":145694,"ﺲ":145695,"âĸĦ":145696,"ãħĪ":145697,"ëļ¤":145698,"íİ©":145699,"â΍":145700,"ðŁ¤ª":145701,"áĥļ":145702,"ê³¶":145703,"íĬķ":145704,"ðŁĺ¬":145705,"âĪ«":145706,"ðŁijĭ":145707,"ÒIJ":145708,"íĬ¿":145709,"ðŁĶµ":145710,"ðŁĴ¨":145711,"ðŁĮĻ":145712,"ëĩ©":145713,"âľ³":145714,"ë¨ģ":145715,"ëºĦ":145716,"ìĻij":145717,"ìºħ":145718,"íıĪ":145719,"ðĿijĻ":145720,"ðŁĴĺ":145721,"ãİ¥":145722,"âĿı":145723,"âľ°":145724,"ﯿ":145725,"ëµIJ":145726,"ì¼IJ":145727,"ﺱ":145728,"Õ´":145729,"ï¬Ģ":145730,"âľ´":145731,"ð٤Ń":145732,"ðŁijĨ":145733,"âĽĶ":145734,"ê·ĵ":145735,"ìĮĮ":145736,"ðŁ¤·":145737,"ÛĶ":145738,"ðŁ§¡":145739,"ðŁĺĵ":145740,"Îĸ":145741,"âı°":145742,"ê²ľ":145743,"ëĭ³":145744,"ëİħ":145745,"ë°Ī":145746,"ï®IJ":145747,"ðŁı¡":145748,"âĨª":145749,"âĵĶ":145750,"âľĬ":145751,"ϲ":145752,"ÜIJ":145753,"ðŁĩ³":145754,"ÖĤ":145755,"âľı":145756,"ìĸĹ":145757,"ì«Ļ":145758,"ðŁĺ²":145759,"ÄŃ":145760,"âĻŃ":145761,"âĶı":145762,"âĹĮ":145763,"ðŁĺ¯":145764,"áµĴ":145765,"íĬł":145766,"Ä·":145767,"Êģ":145768,"à¤Ł":145769,"á¹ģ":145770,"á¼°":145771,"á¿Ĩ":145772,"â«":145773,"⫸":145774,"ëį«":145775,"ì³ĩ":145776,"켤":145777,"íĽ¨":145778,"ðŁĴŁ":145779,"ÊĢ":145780,"ʳ":145781,"ëĵIJ":145782,"âķ°":145783,"âĿĩ":145784,"ÇĢ":145785,"ÇĶ":145786,"É´":145787,"âĺļ":145788,"âĺľ":145789,"ê¶Ĥ":145790,"ì«Ĵ":145791,"ì±Ī":145792,"ðŁĩ¨":145793,"ðŁİ¥":145794,"ðŁĵĿ":145795,"ħ":145796,"ðĿijIJ":145797,"ÛĪ":145798,"ब":145799,"ì¬IJ":145800,"íĹ¥":145801,"âύ":145802,"ðŁį´":145803,"ï¹ı":145804,"Ëĭ":145805,"ðŁ¥º":145806,"âĸ¨":145807,"íĻĭ":145808,"âĪħ":145809,"ëģĻ":145810,"ëŀł":145811,"ìĨ¥":145812,"âĢĸ":145813,"ð٤ĺ":145814,"ðŁIJ»":145815,"áµķ":145816,"ÇĿ":145817,"âĺı":145818,"ïºļ":145819,"ï»Ĥ":145820,"ðŁļ©":145821,"ìĪŁ":145822,"ËĬ":145823,"⤵":145824,"ðŁĴ§":145825,"ãħį":145826,"ë©©":145827,"Ƭ":145828,"Îĩ":145829,"âĩ§":145830,"âĵļ":145831,"ìĤ¯":145832,"ìΝ":145833,"ëĨĭ":145834,"âľ¯":145835,"ðŁļĢ":145836,"Úĺ":145837,"Ú¨":145838,"âľŃ":145839,"ê²ħ":145840,"íĮ°":145841,"íľĻ":145842,"ðŁĮĬ":145843,"ðŁİĵ":145844,"ðŁĺĻ":145845,"Ëĥ":145846,"ðŁĴģ":145847,"ðŁijİ":145848,"âĺ¹":145849,"ðŁĺ«":145850,"ðŁĴ»":145851,"ëĤµ":145852,"ìĿĬ":145853,"íĮ»":145854,"Ò³":145855,"á½²":145856,"âŀŀ":145857,"ëĤij":145858,"ëĿĪ":145859,"죤":145860,"ﻯ":145861,"ðŁĩ©":145862,"ðŁ¥³":145863,"âĴ¼":145864,"ð٦ĭ":145865,"âĺĤ":145866,"ðŁĺ°":145867,"ðŁĻĥ":145868,"ðŁĺĴ":145869,"Ûİ":145870,"Ïķ":145871,"Ḥ":145872,"룽":145873,"ìĬ¥":145874,"ðĿijī":145875,"ÉIJ":145876,"ðŁįİ":145877,"âķ¯":145878,"âķ¹":145879,"າ":145880,"ï¾ł":145881,"ë¹ķ":145882,"ïºĨ":145883,"ʺ":145884,"Ó§":145885,"âĨł":145886,"ëĥĩ":145887,"ìİĪ":145888,"ìŁ¤":145889,"ï±¢":145890,"âķ¬":145891,"âĺł":145892,"ðŁİĬ":145893,"ãįį":145894,"ãİİ":145895,"âĺ°":145896,"âľĥ":145897,"ãħī":145898,"ë¯Ī":145899,"빤":145900,"ìıŃ":145901,"ðĿij¢":145902,"ðŁIJ¾":145903,"Åĭ":145904,"ðŁij¶":145905,"âĶĽ":145906,"ï¿¢":145907,"áĥ¡":145908,"ļ":145909,"ÅĨ":145910,"ÑIJ":145911,"ìĥĽ":145912,"ìĺĮ":145913,"챤":145914,"íħģ":145915,"íļĥ":145916,"ï³Ĭ":145917,"ðĿijĶ":145918,"ðŁĩ«":145919,"âĭ°":145920,"ðŁĺ¨":145921,"âĤ©":145922,"Õ¬":145923,"á¸į":145924,"á»´":145925,"âĨĺ":145926,"âĺ¯":145927,"ãħı":145928,"ìł¬":145929,"âĻĶ":145930,"ðŁĶĶ":145931,"ðŁĺł":145932,"ðŁĻĬ":145933,"à®ľ":145934,"á¹ħ":145935,"âĹIJ":145936,"âĿĪ":145937,"âŀ½":145938,"ìĥħ":145939,"ðĿijł":145940,"Æ¢":145941,"âĭĻ":145942,"ê°Ľ":145943,"ëĿµ":145944,"ë£Ł":145945,"ìıľ":145946,"ïºģ":145947,"ðŁĴŃ":145948,"âĬĥ":145949,"ðŁIJ°":145950,"ãħĮ":145951,"Üĵ":145952,"âŀķ":145953,"á½ģ":145954,"ìķ³":145955,"ðĿijĿ":145956,"ðŁİ¬":145957,"É¡":145958,"à¤Ĺ":145959,"áIJī":145960,"ì©ľ":145961,"ì¶§":145962,"ï³ī":145963,"ï»ħ":145964,"ðĿIJŀ":145965,"श":145966,"ðŁĵ¢":145967,"ðŁįĭ":145968,"ðŁĴħ":145969,"ï¾ķ":145970,"â¬Ĩ":145971,"âε":145972,"ð٤ij":145973,"áĥ£":145974,"ÆĦ":145975,"ѹ":145976,"á¼Ķ":145977,"ê°ł":145978,"ê´Į":145979,"ê·IJ":145980,"뼴":145981,"ì±ĺ":145982,"ï®Ń":145983,"ﺹ":145984,"ﺾ":145985,"âľĹ":145986,"âĿ¦":145987,"ðŁij¦":145988,"áĥĹ":145989,"Ù²":145990,"á½´":145991,"âĪı":145992,"âľ®":145993,"ê¹°":145994,"ë²µ":145995,"ìĦĢ":145996,"ì©Ŀ":145997,"ïºŀ":145998,"ﺽ":145999,"ðŁĩŃ":146000,"ËĤ":146001,"ðŁįij":146002,"ðŁįĮ":146003,"ðŁĶ»":146004,"깬":146005,"ìĬŃ":146006,"ìľ·":146007,"ðŁĽij":146008,"ǧ":146009,"ë¼Ľ":146010,"ﺡ":146011,"ﺺ":146012,"ðĿijļ":146013,"ðŁĵ¦":146014,"ðŁĶİ":146015,"ðŁĹĵ":146016,"áĥĶ":146017,"âľĴ":146018,"âľ¡":146019,"ðŁĮµ":146020,"âĶķ":146021,"ëĢĿ":146022,"ðŁįĬ":146023,"âĺĥ":146024,"ìĺħ":146025,"ব":146026,"ð٦ģ":146027,"âݯ":146028,"ðŁIJķ":146029,"Ñ¿":146030,"।":146031,"à¼ĭ":146032,"ê·Ī":146033,"ì«Į":146034,"ðŁĩ°":146035,"âĿī":146036,"ì«Ģ":146037,"íĿĦ":146038,"ðĿIJ¢":146039,"ðŁļ¨":146040,"âϤ":146041,"ðŁĺ©":146042,"ðŁįį":146043,"ðŁĺij":146044,"ðŁļļ":146045,"ÖĦ":146046,"ë«":146047,"뫼":146048,"à¤ı":146049,"á¿·":146050,"âĮ©":146051,"âĺIJ":146052,"âŀ£":146053,"긱":146054,"꼿":146055,"ëĦĿ":146056,"ìı´":146057,"ìļ¤":146058,"쿱":146059,"íİIJ":146060,"ðŁĴ¢":146061,"ì´IJ":146062,"âĩij":146063,"âĶĵ":146064,"âģ¾":146065,"ÜĿ":146066,"ðŁį°":146067,"â´°":146068,"Æı":146069,"ÏŁ":146070,"Úº":146071,"Ûĥ":146072,"áĦĴ":146073,"âĪŁ":146074,"âĿį":146075,"ãĦ²":146076,"ìľħ":146077,"ì¤ı":146078,"ðŁĩ²":146079,"êºĦ":146080,"ðŁİ¤":146081,"âľ£":146082,"â¸Ŀ":146083,"︵":146084,"ວ":146085,"áĢĻ":146086,"âķł":146087,"Õ¯":146088,"âı©":146089,"ðĿij£":146090,"ðŁĴ£":146091,"Åĺ":146092,"à¥IJ":146093,"âģĥ":146094,"âĮĺ":146095,"ê»Į":146096,"ìĮĶ":146097,"ðĿijĺ":146098,"ð٤ĵ":146099,"Õ¿":146100,"à¤Ń":146101,"âĮļ":146102,"âľĿ":146103,"ðŁIJ¼":146104,"ËĮ":146105,"âķļ":146106,"ï¦Ĺ":146107,"âĿķ":146108,"âķ£":146109,"ðŁIJ±":146110,"த":146111,"Ѿ":146112,"à¤ļ":146113,"à¤ľ":146114,"ìĪĦ":146115,"ìļľ":146116,"ðŁİ®":146117,"ÉĴ":146118,"Ú·":146119,"àºį":146120,"âĨµ":146121,"âĪĺ":146122,"âĿĬ":146123,"ë¿į":146124,"ìIJĪ":146125,"ìļĺ":146126,"쯧":146127,"íĥ¯":146128,"ìĸı":146129,"︰":146130,"ðŁĩ¯":146131,"ð٧ļ":146132,"ðŁĺµ":146133,"ðŁĺ·":146134,"ðŁĮ³":146135,"ລ":146136,"Äī":146137,"Ä¥":146138,"âľ¶":146139,"῾":146140,"âĬ±":146141,"âĺ¾":146142,"ê°ī":146143,"ê¼°":146144,"ëºij":146145,"ðŁĶĬ":146146,"ðŁĸIJ":146147,"Ť":146148,"Ò«":146149,"à®®":146150,"âĮĪ":146151,"âĹĹ":146152,"ëĦµ":146153,"ëħľ":146154,"ëľ¹":146155,"ðĿij¥":146156,"ðŁĴ¿":146157,"ðŁĽĴ":146158,"ÊĴ":146159,"áŀĵ":146160,"ðŁIJĿ":146161,"ð٦Ħ":146162,"ðŁį·":146163,"âĺŁ":146164,"︶":146165,"ðŁ¤Ł":146166,"Ô±":146167,"âĨ²":146168,"âĪİ":146169,"âľ«":146170,"ëĩ½":146171,"ëıIJ":146172,"ëķĦ":146173,"靈":146174,"ï§Ŀ":146175,"ïºĻ":146176,"ðŁij»":146177,"ðŁĵº":146178,"êµ¼":146179,"ìĮ©":146180,"ðŁĮ²":146181,"ȱ":146182,"íĶķ":146183,"ðŁĺ¤":146184,"ãĮ¢":146185,"ÊĶ":146186,"ड":146187,"á¼Ī":146188,"ëİĥ":146189,"멱":146190,"ë®Ī":146191,"ðĿIJ«":146192,"âĬķ":146193,"ëĥł":146194,"뻬":146195,"íĭĶ":146196,"Õ¤":146197,"á¼±":146198,"âľ¥":146199,"âĺĦ":146200,"âĪ¥":146201,"âļķ":146202,"ðŁijĦ":146203,"ðŁİħ":146204,"àºĻ":146205,"âͬ":146206,"á½µ":146207,"Õ¾":146208,"Öģ":146209,"âĹĶ":146210,"ê¿į":146211,"ëĸµ":146212,"ë©İ":146213,"ë®´":146214,"ìķ´":146215,"áĥľ":146216,"ἡ":146217,"âĶĬ":146218,"âķ®":146219,"âĹ¼":146220,"ðŁį¾":146221,"ðŁĽį":146222,"ðŁijĹ":146223,"ð٤ŀ":146224,"âľĦ":146225,"ÕĢ":146226,"ল":146227,"Ëī":146228,"⣨":146229,"į":146230,"ÏĬ":146231,"á´ľ":146232,"ë¹³":146233,"ï³ĭ":146234,"ï¿ł":146235,"Ī":146236,"âĤ¸":146237,"âľ±":146238,"ê»IJ":146239,"ëĭ»":146240,"맸":146241,"ìŀ¿":146242,"쩨":146243,"ìŃIJ":146244,"ì°¿":146245,"íħŁ":146246,"ðĿIJ§":146247,"ðĿijij":146248,"ðŁĮİ":146249,"ðŁĵ®":146250,"ðŁķĶ":146251,"âĹĻ":146252,"âĹ»":146253,"âŀ§":146254,"ìŁĿ":146255,"⾬":146256,"ãĥ°":146257,"âģĪ":146258,"âĵĺ":146259,"ðŁĴĮ":146260,"ï¬ĥ":146261,"àºĶ":146262,"ìͰ":146263,"ðŁĺª":146264,"×Ģ":146265,"ìĥ¨":146266,"ïŃĭ":146267,"ðŁįķ":146268,"ðŁĺ´":146269,"ϳ":146270,"á¼Ħ":146271,"á½ħ":146272,"âĩ¢":146273,"âķŃ":146274,"ìĺ»":146275,"íĬ¤":146276,"Üĺ":146277,"⤴":146278,"âĹį":146279,"áŀŁ":146280,"ðŁįº":146281,"áŀļ":146282,"ðŁıĬ":146283,"ðŁIJ·":146284,"ÊĮ":146285,"ὺ":146286,"âģ»":146287,"ê½Į":146288,"ëĪĹ":146289,"ëĹı":146290,"ì¿°":146291,"í̼":146292,"íįħ":146293,"ï·²":146294,"ðŁĮı":146295,"ðŁį«":146296,"ðŁį³":146297,"ðŁİ°":146298,"ðŁij°":146299,"ðŁĴ²":146300,"á¥Ļ":146301,"ðŁIJŁ":146302,"ï¿¡":146303,"ðŁĹ£":146304,"ðŁįľ":146305,"âľ²":146306,"ãİ¢":146307,"ðŁĶ°":146308,"Ἰ":146309,"á½ij":146310,"Äİ":146311,"áĦĢ":146312,"âĻķ":146313,"ëłĿ":146314,"ìĪ´":146315,"ïŃŃ":146316,"Óľ":146317,"ÔĢ":146318,"ëĢľ":146319,"ëĥĶ":146320,"ìĬĽ":146321,"ì«ij":146322,"캥":146323,"캬":146324,"ðĿij¦":146325,"ðŁĶ¶":146326,"쾨":146327,"ðĿIJļ":146328,"ðŁį»":146329,"ðŁĴį":146330,"ðŁ¤¡":146331,"ðŁķĬ":146332,"â½ĩ":146333,"âĵIJ":146334,"ðŁįŃ":146335,"ðŁįª":146336,"ðŁĶĨ":146337,"Ò¡":146338,"á´ĩ":146339,"ÉĹ":146340,"ÜĶ":146341,"âĦİ":146342,"âĿĥ":146343,"ëĹĢ":146344,"ï²Ķ":146345,"ïºĪ":146346,"ðĿIJ»":146347,"ðŁĴĬ":146348,"ðŁļ«":146349,"Ѱ":146350,"ѳ":146351,"ष":146352,"âĹł":146353,"ðŁij¤":146354,"ï¾ĩ":146355,"âĺĵ":146356,"ðŁįµ":146357,"ðŁ¤¨":146358,"âĸŃ":146359,"à®´":146360,"Ü¢":146361,"ܬ":146362,"à´®":146363,"ðŁķº":146364,"Ô¹":146365,"Õ£":146366,"à´¯":146367,"á´Ģ":146368,"âĮī":146369,"âľIJ":146370,"âŀ¦":146371,"ê¹½":146372,"ëĮľ":146373,"ðŁı¥":146374,"ðŁĵ©":146375,"Ò¹":146376,"Óĺ":146377,"à¤ħ":146378,"âĿ§":146379,"ÆĹ":146380,"âĹ½":146381,"ðŁij«":146382,"ðŁİ§":146383,"ðŁij£":146384,"âľ»":146385,"ðŁĻħ":146386,"ðŁĺĸ":146387,"ðŁĴ®":146388,"ະ":146389,"ðŁĶľ":146390,"ðŁįĦ":146391,"ð٤Ŀ":146392,"áĥĿ":146393,"áŀĢ":146394,"âĩ¦":146395,"ʾ":146396,"Ò®":146397,"Õ¼":146398,"à¤Ĩ":146399,"âĹħ":146400,"âļĵ":146401,"âļĸ":146402,"ê¿©":146403,"ë¯Ħ":146404,"ìIJIJ":146405,"ìŀ°":146406,"ì§Ń":146407,"íĭĭ":146408,"íݨ":146409,"íϧ":146410,"ï²ij":146411,"ðŁİĹ":146412,"Ù³":146413,"ðŁij¸":146414,"ম":146415,"ðŁijķ":146416,"Úµ":146417,"â̾":146418,"âŀ°":146419,"ðŁij¯":146420,"ðŁİ¼":146421,"ðŁıģ":146422,"ĺ":146423,"Êı":146424,"Ú³":146425,"âı±":146426,"ê½Ī":146427,"ëĿĮ":146428,"ìĮī":146429,"ìĹ·":146430,"ìŀ´":146431,"íĹ¹":146432,"íľ¨":146433,"ðĿĹ²":146434,"ðŁĮIJ":146435,"ðŁİĻ":146436,"ðŁıµ":146437,"íĽĻ":146438,"ðĿijħ":146439,"ðŁĺ¶":146440,"âĵħ":146441,"âķ¥":146442,"ðŁįı":146443,"ï¦İ":146444,"Õ©":146445,"ðĿIJĦ":146446,"Ó£":146447,"Ú¿":146448,"âĻļ":146449,"ðŁĶĹ":146450,"ḫ":146451,"âĭ®":146452,"âĸ¦":146453,"âĽ½":146454,"âľµ":146455,"ãħĨ":146456,"ãħĬ":146457,"ëĦĻ":146458,"ëĿ¨":146459,"ë¥Ħ":146460,"ìĦ¦":146461,"ì§°":146462,"ì§¹":146463,"íīĪ":146464,"ï§ij":146465,"ï»ĩ":146466,"ðŁĮ¾":146467,"ðŁıĸ":146468,"ðŁIJij":146469,"ðŁĴ³":146470,"ðŁĵĨ":146471,"Ûĩ":146472,"Üķ":146473,"á½½":146474,"ëĦľ":146475,"à´²":146476,"à´³":146477,"àºŃ":146478,"áĥĽ":146479,"âĿĶ":146480,"âijħ":146481,"áĥ¥":146482,"ðŁĵħ":146483,"âŀ³":146484,"á´µ":146485,"﹡":146486,"ï¹¶":146487,"ÎĨ":146488,"थ":146489,"áīµ":146490,"âĿĻ":146491,"âĿ±":146492,"ëīł":146493,"ëİł":146494,"ëıĽ":146495,"ë¿ħ":146496,"ì͏":146497,"íij¯":146498,"íŀī":146499,"íŀĽ":146500,"ï§Ħ":146501,"ïŃĺ":146502,"ﺦ":146503,"ﻸ":146504,"ðĿijĤ":146505,"ðĿijı":146506,"Ïij":146507,"Úł":146508,"áĢĶ":146509,"áŀĶ":146510,"á¹¢":146511,"ëĦ¸":146512,"ðĿIJ¨":146513,"ðŁĩ´":146514,"Õ°":146515,"ðŁijł":146516,"ðŁįĨ":146517,"ðŁıĢ":146518,"ðŁijIJ":146519,"ðŁįĩ":146520,"ðŁIJ£":146521,"áĪŃ":146522,"ܪ":146523,"ðŁĮĢ":146524,"áŀĺ":146525,"âĩĦ":146526,"ðĿIJĢ":146527,"ÊĻ":146528,"âͼ":146529,"ðŁı¿":146530,"Æ·":146531,"Èł":146532,"ѽ":146533,"âĤ¨":146534,"ê´Ń":146535,"ê¹»":146536,"ë͍":146537,"ìĪĢ":146538,"ì¾°":146539,"íĨĪ":146540,"ï®§":146541,"ﯽ":146542,"ðŁĶħ":146543,"ðŁĶ®":146544,"Å¢":146545,"ʰ":146546,"Ѹ":146547,"ण":146548,"âĬĹ":146549,"ëªĦ":146550,"ï¹·":146551,"ïºħ":146552,"ðĿIJµ":146553,"ðŁĮ¶":146554,"ðŁĵ°":146555,"ðŁĶ·":146556,"ðŁĸĴ":146557,"ðŁ¤²":146558,"ëī©":146559,"ðŁİĨ":146560,"ð٧IJ":146561,"ðŁį®":146562,"âĨº":146563,"âĿ¢":146564,"ðŁijª":146565,"ðŁij±":146566,"âĨ¡":146567,"áŀı":146568,"Úķ":146569,"ðŁį¹":146570,"ðŁĴĢ":146571,"Ë®":146572,"Ó¨":146573,"Öħ":146574,"à¤ĩ":146575,"âĤ¡":146576,"âĪķ":146577,"âĺī":146578,"ê¹¼":146579,"ê¼IJ":146580,"콸":146581,"ðĿIJ¬":146582,"ðŁıħ":146583,"ðŁijĻ":146584,"ðŁĴī":146585,"ð٤Ļ":146586,"Èĺ":146587,"ɳ":146588,"ɹ":146589,"Ùº":146590,"áĢĦ":146591,"ῳ":146592,"âļĺ":146593,"âĿĨ":146594,"ëĨī":146595,"ìĸį":146596,"ìĺĩ":146597,"ì¥ĺ":146598,"íĸħ":146599,"íĻij":146600,"ï®Ĭ":146601,"ï¿Ń":146602,"ðĿĴIJ":146603,"ðĿĹ¢":146604,"ðŁĶĸ":146605,"ðŁĶ¨":146606,"ðŁļij":146607,"ðŁļ²":146608,"Ƹ":146609,"âĹ¥":146610,"ðĿIJŃ":146611,"ðŁį½":146612,"âĹij":146613,"âĵĩ":146614,"ðŁĶ±":146615,"âľ¼":146616,"ï¹ĥ":146617,"âķ±":146618,"ãĢĹ":146619,"ðŁıĭ":146620,"ðŁļ´":146621,"ðĿIJ®":146622,"Äļ":146623,"Õı":146624,"Ķ":146625,"áĥij":146626,"Ṭ":146627,"ÄĪ":146628,"ÄĴ":146629,"Ò°":146630,"Óķ":146631,"âIJ":146632,"âIJ£":146633,"âĹ¢":146634,"âļĻ":146635,"ãħĹ":146636,"ê°¬":146637,"곪":146638,"ê»Ģ":146639,"ëĦ´":146640,"ëİģ":146641,"ëĿĶ":146642,"묽":146643,"ëŃį":146644,"ìĩ³":146645,"ì°¹":146646,"íĮ¹":146647,"íŀĿ":146648,"ï®ĭ":146649,"ï¶Ī":146650,"ðĿĴĤ":146651,"ðŁ¥Ģ":146652,"ð٦ħ":146653,"Êĺ":146654,"á¼ij":146655,"âģİ":146656,"ðŁįŀ":146657,"âĨĸ":146658,"âĨĻ":146659,"ðŁİĥ":146660,"âĦ¡":146661,"âĭ±":146662,"ðŁĶį":146663,"ನ":146664,"áµĥ":146665,"âĶ«":146666,"⦿":146667,"ðŁĩ»":146668,"Ƥ":146669,"Òı":146670,"Ò·":146671,"Ûī":146672,"à®ķ":146673,"ḳ":146674,"בּ":146675,"ðŁĨĶ":146676,"ÚŃ":146677,"Û¦":146678,"áħ¡":146679,"âĦ¹":146680,"ê¿İ":146681,"ëķĶ":146682,"ë¼ī":146683,"ìļ§":146684,"ì²µ":146685,"ì´¨":146686,"íĬĪ":146687,"íĸIJ":146688,"ðĿĹĺ":146689,"ðŁĩ¿":146690,"ðŁİĸ":146691,"ðŁijħ":146692,"ðŁĵĺ":146693,"ðŁļĻ":146694,"ðŁĽµ":146695,"à¶½":146696,"⼵":146697,"ðĿIJ³":146698,"ðĿIJ¸":146699,"âļĶ":146700,"ðŁijŃ":146701,"Óij":146702,"â͝":146703,"ðŁħ¿":146704,"ðŁĺ¹":146705,"ï¿«":146706,"⼤":146707,"ðŁĴĩ":146708,"ðŁĵİ":146709,"ðŁĸĭ":146710,"স":146711,"ðĿIJį":146712,"IJ":146713,"Ïĭ":146714,"Ѭ":146715,"Ú¬":146716,"ÜĴ":146717,"á´¬":146718,"ï¨Ħ":146719,"É£":146720,"Ëij":146721,"ϵ":146722,"ÒĿ":146723,"Û¥":146724,"Üł":146725,"à¹Ľ":146726,"áĥķ":146727,"áĬķ":146728,"á¾¶":146729,"âĤ·":146730,"âĩ¾":146731,"âķ©":146732,"âĸIJ":146733,"âĺª":146734,"âĺ®":146735,"âĿļ":146736,"âĿŃ":146737,"âŀ±":146738,"âµİ":146739,"ãıĬ":146740,"ë©ĵ":146741,"ìĹ¾":146742,"ìªĦ":146743,"íĵĮ":146744,"íķ¼":146745,"ïѬ":146746,"ðĿijĨ":146747,"ðĿijŀ":146748,"ðĿĸĬ":146749,"ðŁİ¸":146750,"ðŁıĦ":146751,"ðŁijµ":146752,"ðŁĴł":146753,"ðŁĶĺ":146754,"ðŁ¥Ĥ":146755,"Ū":146756,"à·ĥ":146757,"á´¼":146758,"âĬ°":146759,"ë³ı":146760,"ë´£":146761,"ï¥ľ":146762,"ðŁĵĪ":146763,"ðŁķ¯":146764,"ð٧Ģ":146765,"âĻIJ":146766,"ðŁĨĹ":146767,"ðŁĵķ":146768,"ð٧ģ":146769,"Ü«":146770,"âĿIJ":146771,"Õķ":146772,"à½ķ":146773,"âŀĿ":146774,"à¦ķ":146775,"ðĿIJ¶":146776,"É¢":146777,"ÎĦ":146778,"áĨ¢":146779,"âĤ±":146780,"Õį":146781,"à¡ķ":146782,"á´°":146783,"ḩ":146784,"⼷":146785,"âĿ®":146786,"ê¡ĵ":146787,"ëı¤":146788,"ëĹIJ":146789,"ëµĮ":146790,"ìijĪ":146791,"íı¿":146792,"íŵ":146793,"ðĿIJİ":146794,"ðŁĨĺ":146795,"ðŁıŁ":146796,"É¥":146797,"Õ»":146798,"à¡Ķ":146799,"à¤ĸ":146800,"á´¸":146801,"âİĻ":146802,"âİ¥":146803,"âı³":146804,"ëģķ":146805,"ëĬī":146806,"ì¡į":146807,"칡":146808,"禮":146809,"ï¬Ł":146810,"ﮫ":146811,"ﮯ":146812,"ï±ĥ":146813,"ï·»":146814,"ﺵ":146815,"ðĿĹĶ":146816,"ðĿĹ¡":146817,"ðŁİ¨":146818,"ðŁĶĴ":146819,"ÚĽ":146820,"ध":146821,"âŀ¹":146822,"áĢĢ":146823,"ðŁįħ":146824,"âŤ":146825,"à¤ł":146826,"ðŁIJ¥":146827,"áĥĴ":146828,"ðŁıĿ":146829,"ðŁį¼":146830,"ãĮ§":146831,"âĿĽ":146832,"ðŁIJĪ":146833,"য":146834,"áĢŀ":146835,"ãĢĸ":146836,"áŀĻ":146837,"প":146838,"ÕĨ":146839,"âĬĨ":146840,"âľ¾":146841,"ðŁIJĹ":146842,"ﹿ":146843,"Ħ":146844,"ÜŁ":146845,"à²ł":146846,"ಥ":146847,"áŀī":146848,"á´¥":146849,"á´©":146850,"á½Ģ":146851,"ὡ":146852,"âĨķ":146853,"âŀ¯":146854,"ê¡ij":146855,"ëij£":146856,"ë±Į":146857,"ìĪij":146858,"ìľĶ":146859,"ìŀ½":146860,"ì¨į":146861,"ðĿijĢ":146862,"ðŁĮĮ":146863,"ðŁį¦":146864,"ðŁį©":146865,"ðŁIJļ":146866,"ðŁĵĴ":146867,"ðŁĵ¹":146868,"ðŁ¥ij":146869,"Äĭ":146870,"ËĹ":146871,"Ñ«":146872,"Õ¢":146873,"Ú°":146874,"âĮĢ":146875,"âĹĤ":146876,"âĹ£":146877,"⾼":146878,"âĿĴ":146879,"âĿĺ":146880,"âŀĻ":146881,"âŀ²":146882,"ãİį":146883,"ê¡IJ":146884,"ëŀĸ":146885,"ìĬĿ":146886,"ìĽ¤":146887,"ì¡ĭ":146888,"쨰":146889,"íĹĻ":146890,"兩":146891,"ï³į":146892,"ï»İ":146893,"ðĿijĵ":146894,"ðŁĵĬ":146895,"ðŁļ¼":146896,"ï¦ģ":146897,"ðĿķĴ":146898,"ðŁijľ":146899,"ðŁij¿":146900,"ðŁĩ½":146901,"à·Ħ":146902,"âĸ´":146903,"ãįī":146904,"âĬĩ":146905,"ðŁ§¸":146906,"Ú¡":146907,"â¾ĥ":146908,"ðŁĹ»":146909,"âĵij":146910,"ðŁ¤¸":146911,"ðŁ¤¯":146912,"êĴ°":146913,"ðĿIJĵ":146914,"âĶ´":146915,"êĴ±":146916,"áĢĺ":146917,"âĽĦ":146918,"ï¹¹":146919,"ÓĶ":146920,"áĥ±":146921,"Ü¡":146922,"ßŀ":146923,"âĻı":146924,"⾸":146925,"ìij¨":146926,"ðĿIJĿ":146927,"ðĿIJ¥":146928,"ðŁįī":146929,"ðŁij¼":146930,"ðŁ¥Ŀ":146931,"ÆĶ":146932,"ݬ":146933,"फ":146934,"àºļ":146935,"á´´":146936,"á½ĸ":146937,"âĤ¶":146938,"âİ¢":146939,"âĿħ":146940,"⣫":146941,"ãİĽ":146942,"뮨":146943,"ëºĮ":146944,"ë¼ĺ":146945,"ìĨĿ":146946,"ìľ³":146947,"ìŀĮ":146948,"ì£Ĺ":146949,"ìªĺ":146950,"컹":146951,"ï·¼":146952,"ïºĤ":146953,"ðĿIJ´":146954,"ðĿIJ¼":146955,"ðŁĮļ":146956,"ðŁı«":146957,"ðŁĴ¤":146958,"ðŁĴ¶":146959,"ðŁĴ¼":146960,"Êķ":146961,"ʽ":146962,"â²Ł":146963,"ãīł":146964,"ê¡Ĵ":146965,"ëľĢ":146966,"ìĥ¾":146967,"츤":146968,"ï¥ģ":146969,"ðĿļĬ":146970,"ðŁļĥ":146971,"âŀĽ":146972,"ìħ´":146973,"áĦĭ":146974,"âĩĹ":146975,"ï§·":146976,"âĺĸ":146977,"ðŁIJ¦":146978,"⸾":146979,"ðŁĴ´":146980,"ð٤ļ":146981,"ãĬĹ":146982,"âĮĽ":146983,"áĪĽ":146984,"༺":146985,"â½ī":146986,"ðŁı¢":146987,"âĵŀ":146988,"âĺ½":146989,"ãĢĻ":146990,"ðŁ¤®":146991,"ÅIJ":146992,"áĥ¬":146993,"ðĿĹ»":146994,"ðŁįĸ":146995,"ÆĬ":146996,"ÊŁ":146997,"ßĭ":146998,"à¤ĭ":146999,"áµĶ":147000,"á¿ĥ":147001,"âĦī":147002,"âĮĭ":147003,"âı²":147004,"âĵĪ":147005,"âĵ¢":147006,"âķĶ":147007,"âļij":147008,"âĿĭ":147009,"âĿİ":147010,"⵾":147011,"âµ£":147012,"ëĴĪ":147013,"ëľģ":147014,"ë¶ĩ":147015,"ìį»":147016,"ìĺŃ":147017,"ì§¢":147018,"íĹĢ":147019,"ï§Ĭ":147020,"טּ":147021,"ﱡ":147022,"ðĿIJº":147023,"ðĿij§":147024,"ðĿĺ¦":147025,"ðŁĵ¥":147026,"ðŁĺŁ":147027,"ðŁ¥IJ":147028,"Äĸ":147029,"ɨ":147030,"áĢIJ":147031,"áĥĵ":147032,"áºĵ":147033,"á¼¶":147034,"á½Ħ":147035,"âĤ¤":147036,"âĮľ":147037,"âĮŁ":147038,"âİł":147039,"⼸":147040,"âµį":147041,"âµı":147042,"âµĵ":147043,"ãĢĺ":147044,"ë·¸":147045,"íħ¼":147046,"ï¦Į":147047,"ïŃĦ":147048,"ïŃİ":147049,"ðĿĻļ":147050,"ðĿļĺ":147051,"à¼ĵ":147052,"ëŃħ":147053,"áIJĽ":147054,"ãݾ":147055,"ï¨Ģ":147056,"ðŁĹ½":147057,"âĻŀ":147058,"Ëĸ":147059,"âĹŀ":147060,"ðŁ¤«":147061,"ðŁĺĹ":147062,"ヲ":147063,"ðŁ¤¢":147064,"âģĩ":147065,"ã̵":147066,"ðŁįĶ":147067,"áĬł":147068,"ðŁĺ¼":147069,"ðĿĹ®":147070,"ðŁIJ³":147071,"ðĿIJĭ":147072,"ðŁĨļ":147073,"ðŁĶĽ":147074,"Ñ»":147075,"ܨ":147076,"ல":147077,"âľŀ":147078,"âµĻ":147079,"êµ£":147080,"츨":147081,"ðĿIJľ":147082,"ðĿĺ°":147083,"ðŁĶ½":147084,"Ç»":147085,"Ç¿":147086,"Êĩ":147087,"ÎIJ":147088,"ÐĢ":147089,"Ñ¡":147090,"Ѳ":147091,"ÒĴ":147092,"Ù¶":147093,"ßķ":147094,"à¶±":147095,"áIJģ":147096,"âģŀ":147097,"âĸ§":147098,"âĽĪ":147099,"âľľ":147100,"âľ¹":147101,"âŁ¹":147102,"â¤ĩ":147103,"ê²Ĭ":147104,"ê¾ľ":147105,"ë¯IJ":147106,"ë³IJ":147107,"ìħ©":147108,"ìIJ¬":147109,"ìij¹":147110,"ï¤Ķ":147111,"ï¦ļ":147112,"ï¬ł":147113,"ïŃĶ":147114,"ﺶ":147115,"ðĿĴı":147116,"ðĿĸĨ":147117,"ðĿŶ":147118,"ðŁıĤ":147119,"ðŁIJ½":147120,"ðŁĴ©":147121,"ðŁĵ½":147122,"ðŁĹ¨":147123,"ðŁĹº":147124,"ðŁĺ¸":147125,"ðŁ¥§":147126,"ÅĹ":147127,"Êİ":147128,"ÒĻ":147129,"ײ":147130,"à¤Ī":147131,"á¼´":147132,"á¿ij":147133,"âµī":147134,"ãħĵ":147135,"ì½´":147136,"ðĿĸĵ":147137,"ðŁĵĹ":147138,"ðŁĶª":147139,"ðŁĸį":147140,"ÏĴ":147141,"ðŁij¬":147142,"áĥĻ":147143,"âĨ¬":147144,"âͤ":147145,"âĽ¹":147146,"âĻŁ":147147,"ðŁļ¶":147148,"ðŁij¾":147149,"âĪĭ":147150,"ðŁIJ¯":147151,"à¼İ":147152,"âľ·":147153,"ï¨Ļ":147154,"âĶ»":147155,"ðŁij¹":147156,"áĦī":147157,"ສ":147158,"â¾ı":147159,"â½ħ":147160,"ãİĸ":147161,"Ñ´":147162,"Õ®":147163,"Ú¼":147164,"áĢķ":147165,"áĨ¼":147166,"ëŃı":147167,"ðŁIJ¸":147168,"ðŁļ£":147169,"ÆĿ":147170,"Ô»":147171,"áĥ¢":147172,"ðŁį¯":147173,"ɦ":147174,"Õ¦":147175,"âĻĭ":147176,"שׂ":147177,"ðĿŦ":147178,"Çļ":147179,"ɱ":147180,"à¤ī":147181,"á´Ħ":147182,"âĻĵ":147183,"⼰":147184,"âŁª":147185,"ëĥĺ":147186,"뢸":147187,"ìĤij":147188,"ï®Ķ":147189,"ðĿķĸ":147190,"ðĿŧ":147191,"ðŁĩ¼":147192,"ðŁĵĭ":147193,"ðŁļľ":147194,"ðŁ¥¤":147195,"Ä®":147196,"Å·":147197,"ßĬ":147198,"॥":147199,"ப":147200,"áŀĦ":147201,"áµĢ":147202,"á¸ħ":147203,"á¼¢":147204,"âĪĿ":147205,"âĬ¹":147206,"âĴ¶":147207,"âķ´":147208,"⼱":147209,"âĽ³":147210,"âĽº":147211,"âŀŁ":147212,"ãıĦ":147213,"ê¸Ķ":147214,"ê¹Ł":147215,"ëĩ°":147216,"ë¹»":147217,"ìĤ¥":147218,"ìĽ»":147219,"ì°Ł":147220,"íĥ°":147221,"íĨº":147222,"íļ½":147223,"老":147224,"量":147225,"ï³Ŀ":147226,"ðĿIJ¦":147227,"ðĿĴľ":147228,"ðĿĴŁ":147229,"ðĿļĹ":147230,"ðŁİŃ":147231,"ðŁıĵ":147232,"ðŁı³":147233,"ðŁıº":147234,"ðŁIJį":147235,"ðŁijĥ":147236,"ðŁĴı":147237,"ð٤ĸ":147238,"ðŁ¤µ":147239,"Õ²":147240,"âµĶ":147241,"ëĺ¬":147242,"念":147243,"ÊĤ":147244,"áĨ«":147245,"áŀij":147246,"ðĿĸİ":147247,"ðĿĹĸ":147248,"áĦĥ":147249,"âĩł":147250,"áĢ¡":147251,"à½Ħ":147252,"âŀ¸":147253,"ï¦Ļ":147254,"âĩļ":147255,"ðŁIJ¬":147256,"ðŁIJ¢":147257,"â¾Ĵ":147258,"ðŁIJ¤":147259,"ðŁĶ«":147260,"ãĢŀ":147261,"︺":147262,"ðŁĺº":147263,"â½´":147264,"ðŁĨķ":147265,"âģ¿":147266,"ðŁį¨":147267,"à²ķ":147268,"ðŁļĺ":147269,"áŀħ":147270,"à¦ħ":147271,"áŀ¢":147272,"à¨ľ":147273,"âļĮ":147274,"ã̽":147275,"à·´":147276,"âĵĽ":147277,"áĢľ":147278,"ìĨ¨":147279,"Ë©":147280,"ÜĹ":147281,"âĭ¼":147282,"ðŁĻī":147283,"ÅĬ":147284,"Éĵ":147285,"ʲ":147286,"ΰ":147287,"Ѽ":147288,"Ô¿":147289,"à¡IJ":147290,"à¼ľ":147291,"ས":147292,"á¶ľ":147293,"âĤ²":147294,"âĨ¨":147295,"âĬ¥":147296,"âķ§":147297,"âĻľ":147298,"ãĭ¡":147299,"ë´¬":147300,"ë¶ij":147301,"ìī¿":147302,"ìİħ":147303,"ìł±":147304,"ì°§":147305,"ﲡ":147306,"ðĿĴĽ":147307,"ðĿķ£":147308,"ðĿĹľ":147309,"ðŁį²":147310,"ðŁİ©":147311,"ðŁIJIJ":147312,"ðŁIJł":147313,"ðŁij½":147314,"ðŁĴij":147315,"ðŁĵľ":147316,"ðŁķµ":147317,"ðŁļĮ":147318,"ðŁĽ£":147319,"Êĭ":147320,"Ó¯":147321,"Ù¸":147322,"ßĶ":147323,"ßĻ":147324,"à¡ĵ":147325,"á´į":147326,"ḿ":147327,"âıº":147328,"âĸ¥":147329,"뤽":147330,"íľij":147331,"ðĿIJ¹":147332,"ðĿĸĶ":147333,"ðĿļİ":147334,"ðŁĵĦ":147335,"ðŁ¦·":147336,"Æĥ":147337,"à¦Ł":147338,"âĮĤ":147339,"âĺŃ":147340,"â²ļ":147341,"ëĿķ":147342,"ðŁİ£":147343,"à®ĩ":147344,"à½Ĩ":147345,"áħµ":147346,"áĹľ":147347,"â̽":147348,"âĮ£":147349,"âģ½":147350,"ðŁĵ¬":147351,"ðŁ¤§":147352,"âĩª":147353,"â½£":147354,"âĹŁ":147355,"ï¨Ĺ":147356,"êĴª":147357,"ðŁĽĢ":147358,"ÇĤ":147359,"ðŁ¥¶":147360,"ðŁİį":147361,"ï¿©":147362,"ðŁijĴ":147363,"áµĪ":147364,"︿":147365,"áħ©":147366,"⾦":147367,"à°¤":147368,"á´ĸ":147369,"ਬ":147370,"àºĹ":147371,"༻":147372,"Ѻ":147373,"ਪ":147374,"á´³":147375,"ðĿIJĪ":147376,"à»Ģ":147377,"á´¿":147378,"âĤį":147379,"âĩ¡":147380,"âĽª":147381,"ðĿIJĤ":147382,"ðĿĴķ":147383,"ðŁIJľ":147384,"Êį":147385,"ѱ":147386,"à½ĥ":147387,"ë®IJ":147388,"ìĽ¡":147389,"ìľģ":147390,"ðĿIJ¿":147391,"ðĿķł":147392,"ðŁijĽ":147393,"ƪ":147394,"Ϻ":147395,"Ó¬":147396,"Ù¿":147397,"Ý£":147398,"àªī":147399,"ஹ":147400,"à½ij":147401,"áĨ¯":147402,"áµĩ":147403,"âĩ¥":147404,"âıª":147405,"âϰ":147406,"âļŃ":147407,"âļ¾":147408,"ãħĦ":147409,"ḛ̂":147410,"ê°Ĺ":147411,"ê²ĭ":147412,"ê²»":147413,"ê¶ľ":147414,"ê¼ĩ":147415,"ê½¹":147416,"ëĤŁ":147417,"ëħĪ":147418,"ëĭ¢":147419,"ë§Ł":147420,"ëªĨ":147421,"ëµĢ":147422,"ì½±":147423,"íĩĺ":147424,"íľľ":147425,"ï§¾":147426,"ï±µ":147427,"ï²¢":147428,"ﲤ":147429,"ðĿĴĬ":147430,"ðĿĺ¯":147431,"ðŁįĹ":147432,"ðŁıį":147433,"ðŁIJĺ":147434,"ðŁĵ¡":147435,"ðŁĶŀ":147436,"ðŁ¤³":147437,"ðŁ¥ģ":147438,"ðŁ¥Ĺ":147439,"ð٦Ĭ":147440,"ĵ":147441,"Ʀ":147442,"ǵ":147443,"ɯ":147444,"Îı":147445,"ÕĦ":147446,"Ü¥":147447,"à½ģ":147448,"ᨳ":147449,"âķ«":147450,"ãİī":147451,"ë·´":147452,"ìĨİ":147453,"ìİĮ":147454,"죵":147455,"íĽł":147456,"離":147457,"ï³ı":147458,"ﻺ":147459,"ðĿijģ":147460,"ðĿijĩ":147461,"ðĿĴĨ":147462,"ðŁİł":147463,"ðŁIJĶ":147464,"ðŁijŁ":147465,"Åĸ":147466,"à¤Į":147467,"á¾½":147468,"ê¦Ĵ":147469,"à®Ł":147470,"á´±":147471,"ðŁı°":147472,"ðŁIJŀ":147473,"à½Ģ":147474,"áĢħ":147475,"âĬ¿":147476,"ðŁIJ§":147477,"áĽģ":147478,"â¼Ī":147479,"âĶ¿":147480,"ðŁ¥´":147481,"⼿":147482,"ðŁ§ľ":147483,"ãħ¿":147484,"âĦ«":147485,"ã̳":147486,"ãĬĻ":147487,"â¼Ģ":147488,"怜":147489,"ðŁı¬":147490,"ðŁĵ»":147491,"áĬĽ":147492,"áĦħ":147493,"àºĬ":147494,"àºĽ":147495,"áħ³":147496,"ðŁij®":147497,"à®±":147498,"âĺĩ":147499,"ðĿIJı":147500,"à´µ":147501,"à»ģ":147502,"à½ı":147503,"ར":147504,"ᥱ":147505,"âĤ£":147506,"復":147507,"ïŃĻ":147508,"ï´©":147509,"ï¹Ĥ":147510,"ðŁį£":147511,"ðŁķ¹":147512,"Ïĸ":147513,"ම":147514,"ຢ":147515,"áĭŃ":147516,"âİĿ":147517,"âĹĿ":147518,"âĻĪ":147519,"âĻİ":147520,"ê½¥":147521,"ì³Ķ":147522,"ì¼ij":147523,"ï±°":147524,"ðĿijĥ":147525,"ðŁĮª":147526,"ðŁį¡":147527,"Åİ":147528,"ʦ":147529,"ѧ":147530,"Óİ":147531,"Ô´":147532,"ÚĪ":147533,"ßĵ":147534,"ß§":147535,"à¤Ķ":147536,"áĪ«":147537,"áε":147538,"áĹ©":147539,"á´ł":147540,"á¼ł":147541,"âĢĹ":147542,"âģij":147543,"âĦı":147544,"âĸĩ":147545,"â²£":147546,"ãĦ³":147547,"ãī®":147548,"ê³Ĺ":147549,"ëĦĴ":147550,"ëĸ«":147551,"ë¡Ħ":147552,"ë¹°":147553,"ë½ģ":147554,"ìĦģ":147555,"ìĮĺ":147556,"ìŁĮ":147557,"ì³ī":147558,"ì¼ķ":147559,"כּ":147560,"ï³İ":147561,"ﹸ":147562,"ï¹¾":147563,"ðĿIJĨ":147564,"ðĿij·":147565,"ðĿĽ¼":147566,"ðŁİı":147567,"ðŁİŀ":147568,"ðŁIJĻ":147569,"ðŁijĤ":147570,"ðŁĵģ":147571,"ðŁĸ±":147572,"ðŁļį":147573,"ðŁļ§":147574,"ðŁĽ¡":147575,"ð٤Ĵ":147576,"ðŁ¥ŀ":147577,"ðŁ¥©":147578,"ð٦Ģ":147579,"ð٦ĸ":147580,"Ë¢":147581,"Üļ":147582,"வ":147583,"áĢģ":147584,"áī°":147585,"âıŃ":147586,"âĻ¿":147587,"ê³ĺ":147588,"ëıĿ":147589,"ëķĥ":147590,"ìħĮ":147591,"ìĴ¸":147592,"ìĽŁ":147593,"íħĦ":147594,"íľ«":147595,"ï§ĺ":147596,"↓":147597,"ðŁı·":147598,"ðŁĶ§":147599,"ðŁ¥Ī":147600,"Æĸ":147601,"áŀĩ":147602,"áŀĸ":147603,"âģº":147604,"âĹľ":147605,"âŀ©":147606,"ê¦Ń":147607,"ëϤ":147608,"ïѼ":147609,"ðĿĻĸ":147610,"ðĿĻ£":147611,"ðĿϤ":147612,"ðŁĮĿ":147613,"ðŁĶij":147614,"ðŁĽł":147615,"àºĩ":147616,"âĺ£":147617,"ãĦ¨":147618,"ðĿĸĹ":147619,"Óĵ":147620,"âĨ£":147621,"ðŁ¥ī":147622,"ðŁĮł":147623,"ðŁĺ½":147624,"ãİł":147625,"ŧ":147626,"ðŁIJĴ":147627,"ï§IJ":147628,"ðŁĺ¿":147629,"âά":147630,"ðŁIJ®":147631,"⣱":147632,"ಡ":147633,"â¾¼":147634,"à°²":147635,"˶":147636,"âĸ¿":147637,"ÕĪ":147638,"áŀİ":147639,"áħ¥":147640,"áŀĹ":147641,"Õ§":147642,"ð٤IJ":147643,"ðŁįł":147644,"ত":147645,"ය":147646,"âĻį":147647,"ìĺĻ":147648,"íĺĵ":147649,"ﹺ":147650,"ðŁĽ³":147651,"Åī":147652,"á´İ":147653,"âıľ":147654,"âͳ":147655,"긷":147656,"ì¡Ķ":147657,"ðĿĴĪ":147658,"ðĿĴį":147659,"ðĿĴ¹":147660,"ðĿĵĩ":147661,"ðĿķŁ":147662,"ðĿĹ¹":147663,"ðŁĮħ":147664,"ðŁı´":147665,"ÄĶ":147666,"Ĥ":147667,"ŵ":147668,"Ǿ":147669,"Ïŀ":147670,"϶":147671,"Ô³":147672,"ÜĨ":147673,"ß©":147674,"à¡Ĵ":147675,"à¤ĺ":147676,"à¶ļ":147677,"à½ĸ":147678,"áģĬ":147679,"áĥŀ":147680,"áĦĤ":147681,"áĭ«":147682,"á´º":147683,"ḣ":147684,"Ḫ":147685,"á¹Ĥ":147686,"á¼·":147687,"á¿ĩ":147688,"âĩĮ":147689,"âı¬":147690,"âĻĮ":147691,"⮣":147692,"â´»":147693,"ⵣ":147694,"ê¦ķ":147695,"ꦪ":147696,"ꦮ":147697,"ê²Ħ":147698,"ê¾IJ":147699,"ëĥij":147700,"ëķĭ":147701,"롸":147702,"ë¬Ģ":147703,"ìĩ¤":147704,"ìĪ©":147705,"ìľķ":147706,"ìŃĺ":147707,"ì·°":147708,"ì·¸":147709,"íľĢ":147710,"藍":147711,"ï§į":147712,"ï±Ħ":147713,"ï³ij":147714,"ðĿIJ¤":147715,"ðĿĴĵ":147716,"ðĿĴ¶":147717,"ðĿĹ¼":147718,"ðĿĻĬ":147719,"ðŁĩ¾":147720,"ðŁĮĽ":147721,"ðŁĮ®":147722,"ðŁİĩ":147723,"ðŁİ²":147724,"ðŁıĽ":147725,"ðŁij¥":147726,"ðŁij´":147727,"ðŁĴĨ":147728,"ðŁĵĤ":147729,"ðŁĵ§":147730,"ðŁķIJ":147731,"ðŁĸķ":147732,"ðŁĺ§":147733,"ðŁĻĢ":147734,"ðŁļĴ":147735,"ðŁĽ«":147736,"ðŁ¤ł":147737,"ðŁ¥ļ":147738,"ðŁ¥Ľ":147739,"ðŁ¥£":147740,"ǯ":147741,"ȧ":147742,"ÎĬ":147743,"Ò²":147744,"×°":147745,"Ûij":147746,"áĥ©":147747,"áĦĮ":147748,"áĪį":147749,"áī¥":147750,"áıĤ":147751,"âģ±":147752,"âĬ¢":147753,"âĹĵ":147754,"âĿ°":147755,"ë¿¡":147756,"ìĽ©":147757,"íģŃ":147758,"íĨ³":147759,"íĬĦ":147760,"íĵ¸":147761,"北":147762,"若":147763,"ï±IJ":147764,"ﱯ":147765,"ï³ļ":147766,"ðĿĸĺ":147767,"ðĿĺĢ":147768,"ðŁIJĬ":147769,"ðŁIJĮ":147770,"ðŁijļ":147771,"ðŁĵĥ":147772,"ðŁļĽ":147773,"ðŁļª":147774,"ðŁ¤°":147775,"Ä´":147776,"áĥ®":147777,"áŨ":147778,"âĻ®":147779,"â²ŀ":147780,"ãĪĶ":147781,"ìħį":147782,"ãħĥ":147783,"率":147784,"ມ":147785,"Õİ":147786,"Õº":147787,"⬼":147788,"⽤":147789,"ðĿIJ²":147790,"âŀµ":147791,"áĢĽ":147792,"âĶħ":147793,"âĨŁ":147794,"â¼Ĭ":147795,"ðŁĮ½":147796,"ðŁļ¿":147797,"ï¦Ĭ":147798,"ãĦ£":147799,"⼩":147800,"ï©Ľ":147801,"ðŁį±":147802,"⾨":147803,"à´¤":147804,"áŀģ":147805,"àºŀ":147806,"Êļ":147807,"ðĿIJĴ":147808,"à´±":147809,"áŀľ":147810,"ன":147811,"à°Ĺ":147812,"à´ļ":147813,"âĩ£":147814,"ï¦ķ":147815,"Õħ":147816,"Æĺ":147817,"âĤ¦":147818,"âĶĦ":147819,"ï¦Ł":147820,"嶺":147821,"ðĿIJģ":147822,"ðĿIJĥ":147823,"ðŁį¸":147824,"ðŁIJ²":147825,"Ŷ":147826,"Éĸ":147827,"ßĺ":147828,"ฦ":147829,"à½Ķ":147830,"áĨ·":147831,"âģķ":147832,"âĵĤ":147833,"âĿľ":147834,"便":147835,"אַ":147836,"ðĿĹĿ":147837,"ðĿĹ¿":147838,"ðŁİ¾":147839,"ðŁĹĿ":147840,"ð٦Į":147841,"Æħ":147842,"Ǫ":147843,"ÒĹ":147844,"ÜĽ":147845,"ßł":147846,"à¡ij":147847,"áī£":147848,"áĬŃ":147849,"ṡ":147850,"âŀ¼":147851,"âŀ¾":147852,"â´±":147853,"ãī¡":147854,"곯":147855,"ë½Ī":147856,"ìĤĺ":147857,"ìīij":147858,"ì«ĺ":147859,"íĮĥ":147860,"íϰ":147861,"ï¤Ĺ":147862,"ðŁĮ¬":147863,"ðŁĮ°":147864,"ðŁį¤":147865,"Ä»":147866,"Åĩ":147867,"ƨ":147868,"Éķ":147869,"Ò¢":147870,"Òº":147871,"Öį":147872,"×±":147873,"Ú±":147874,"Ú½":147875,"ÛIJ":147876,"à¤Ľ":147877,"à·Ģ":147878,"à¹ļ":147879,"ຫ":147880,"á´¹":147881,"á½Ķ":147882,"á¾³":147883,"âĤĴ":147884,"âĨ´":147885,"âĩĿ":147886,"âīħ":147887,"âĮ¨":147888,"âĵĵ":147889,"âĸ¢":147890,"âļ¬":147891,"âŀŃ":147892,"â²Ĵ":147893,"ãİ¿":147894,"ê¿´":147895,"ëα":147896,"ëį¬":147897,"ëİIJ":147898,"ëIJ«":147899,"ëĶ«":147900,"ë±ģ":147901,"ìĥ¥":147902,"íĮ¼":147903,"ïŃĵ":147904,"ﮥ":147905,"ï²°":147906,"ðĿIJĩ":147907,"ðĿIJij":147908,"ðĿijĮ":147909,"ðĿĵª":147910,"ðĿķļ":147911,"ðĿĺª":147912,"ðĿĺ¼":147913,"ðĿļĽ":147914,"ðŁĩ¶":147915,"ðŁĮĦ":147916,"ðŁĮķ":147917,"ðŁĮ¤":147918,"ðŁĮ§":147919,"ðŁį¬":147920,"ðŁİĭ":147921,"ðŁİ»":147922,"ðŁı¨":147923,"ðŁIJĩ":147924,"ðŁijĵ":147925,"ðŁĵIJ":147926,"ðŁĵĻ":147927,"ðŁĶ¼":147928,"ðŁķĴ":147929,"ðŁĸı":147930,"ðŁĸ¥":147931,"ðŁ¤¬":147932,"ðŁ¥Ĭ":147933,"ðŁ¥Ĵ":147934,"ßĮ":147935,"àºĦ":147936,"á¼µ":147937,"âķ¡":147938,"Ⲥ":147939,"â´¼":147940,"âµ¢":147941,"ãΝ":147942,"ëĵ¸":147943,"ëŁĩ":147944,"ëºį":147945,"ðĿϧ":147946,"ðŁįĪ":147947,"ðŁĶ¬":147948,"ðŁĸĬ":147949,"ðŁ¤¾":147950,"Ë¡":147951,"Ü©":147952,"âĮ¡":147953,"âŃij":147954,"Ⲧ":147955,"ë©ī":147956,"ì¼Ń":147957,"¦":147958,"ðĿĴİ":147959,"ðĿĹ¥":147960,"ðŁIJµ":147961,"ðŁķ¶":147962,"ðŁķ¸":147963,"ðŁ¤ľ":147964,"Õª":147965,"áĪĭ":147966,"ðŁ¥µ":147967,"ï°ģ":147968,"áµIJ":147969,"âķĵ":147970,"áĢĸ":147971,"âĭĪ":147972,"Éŀ":147973,"âŀ®":147974,"॰":147975,"ãĨģ":147976,"ðŁĴ±":147977,"ðŁıŃ":147978,"áĨ¨":147979,"ðŁįļ":147980,"ð٦IJ":147981,"á´»":147982,"âĺĮ":147983,"à´ķ":147984,"Õ±":147985,"áħ®":147986,"ðĿIJĮ":147987,"Ŧ":147988,"àºķ":147989,"âľĻ":147990,"˳":147991,"Ôµ":147992,"âķĴ":147993,"ðĿĹĹ":147994,"ðĿĹł":147995,"Úļ":147996,"ধ":147997,"âĨĿ":147998,"âĻī":147999,"ãĮ»":148000,"ì¹Ĭ":148001,"ðĿĹº":148002,"ð٧ĺ":148003,"ì³£":148004,"ï¬Ŀ":148005,"ðŁijº":148006,"ÇŁ":148007,"ÎĪ":148008,"Ϋ":148009,"Ñ¥":148010,"Ô²":148011,"Õ¨":148012,"ܦ":148013,"à¦Ĩ":148014,"থ":148015,"áIJ¢":148016,"á¼ģ":148017,"á¼ĺ":148018,"ἦ":148019,"âĵĿ":148020,"ãΰ":148021,"ãİĹ":148022,"겡":148023,"ë¨Ģ":148024,"ì£Ķ":148025,"ì´¤":148026,"ìµĿ":148027,"ï§´":148028,"ïŃĬ":148029,"ï²Ł":148030,"ðĿIJ·":148031,"ðĿijĭ":148032,"ðĿĵī":148033,"ðĿĺµ":148034,"ðŁĴ·":148035,"ðŁĽ©":148036,"ðŁ§¹":148037,"ÅĶ":148038,"Êŀ":148039,"Ë¥":148040,"ÎĮ":148041,"Ñ©":148042,"ÓIJ":148043,"Ół":148044,"Úij":148045,"ÚĴ":148046,"ߨ":148047,"àªĪ":148048,"áIJĥ":148049,"ṯ":148050,"âĤĭ":148051,"âĤµ":148052,"âĦħ":148053,"âĦł":148054,"âĪ£":148055,"âīº":148056,"âī»":148057,"âĬĽ":148058,"âĮIJ":148059,"âİĵ":148060,"âĺ¸":148061,"âĻĴ":148062,"âļĴ":148063,"âľĩ":148064,"âľł":148065,"â´·":148066,"âµĸ":148067,"ãĦ¸":148068,"ãī¢":148069,"ãī°":148070,"êĩ´":148071,"ê´¸":148072,"êºł":148073,"ëĤı":148074,"ëĤ¢":148075,"ëIJĢ":148076,"뺴":148077,"ìĥľ":148078,"ìįħ":148079,"줫":148080,"챦":148081,"ìºij":148082,"ì¼ģ":148083,"쿳":148084,"íĤģ":148085,"íħ¡":148086,"íĴĤ":148087,"íĴī":148088,"íľĦ":148089,"ïŃª":148090,"ﮬ":148091,"ﯦ":148092,"ﱪ":148093,"ï²ı":148094,"ï´Ģ":148095,"ï»Ĩ":148096,"₩":148097,"ðĿijĹ":148098,"ðĿĸĻ":148099,"ðŁĮ¡":148100,"ðŁįĿ":148101,"ðŁį§":148102,"ðŁİ«":148103,"ðŁıĺ":148104,"ðŁıª":148105,"ðŁIJĭ":148106,"ðŁIJĽ":148107,"ðŁIJº":148108,"ðŁijĸ":148109,"ðŁijŀ":148110,"ðŁij·":148111,"ðŁĵĢ":148112,"ðŁĶĦ":148113,"ðŁĶĮ":148114,"ðŁķĻ":148115,"ðŁĻį":148116,"ðŁĻİ":148117,"ð٦į":148118,"ǰ":148119,"ÉŁ":148120,"ÊĨ":148121,"Ô¼":148122,"Úľ":148123,"ড":148124,"শ":148125,"áĴĥ":148126,"Ἡ":148127,"âĵķ":148128,"â²Ī":148129,"ê°°":148130,"ê¹ł":148131,"êºħ":148132,"ëĦ¹":148133,"ë¯ĵ":148134,"íIJĪ":148135,"ï§¶":148136,"ï®ij":148137,"ﲨ":148138,"ðĿĴī":148139,"ðĿĴĶ":148140,"ðĿŨ":148141,"ðĿĻŀ":148142,"ðĿļĴ":148143,"ðĿļķ":148144,"ðŁIJİ":148145,"ð٤ķ":148146,"ð٧Ķ":148147,"ϰ":148148,"ÔĿ":148149,"âĮĬ":148150,"âĴ¾":148151,"ãī£":148152,"ïŃ©":148153,"ðĿļŀ":148154,"Êij":148155,"দ":148156,"áĦĩ":148157,"âīĥ":148158,"â²Ģ":148159,"ìŁİ":148160,"ðĿij¶":148161,"ðĿĵ²":148162,"ðŁİ·":148163,"ðŁļ¹":148164,"àºģ":148165,"áłł":148166,"ãĦļ":148167,"ðŁIJ¿":148168,"áĽļ":148169,"âķ³":148170,"ðŁIJŃ":148171,"âĴ¹":148172,"ðĿĸļ":148173,"âĻĸ":148174,"ãβ":148175,"âĨ¾":148176,"áĦĨ":148177,"âķĽ":148178,"ð٤į":148179,"â½¥":148180,"ðŁĮ¨":148181,"âĪ®":148182,"ãĮĺ":148183,"ãįij":148184,"ï¹Ģ":148185,"âĵĹ":148186,"âĬĦ":148187,"ðŁı¹":148188,"ËĴ":148189,"ðŁ¤±":148190,"ãıľ":148191,"ðŁİĮ":148192,"ï¥Ń":148193,"ণ":148194,"ðŁİ¹":148195,"ãĬŁ":148196,"à´°":148197,"ðĿIJĶ":148198,"à´¨":148199,"à½ļ":148200,"âľº":148201,"Õ·":148202,"ðŁij³":148203,"à¦ľ":148204,"âĺĭ":148205,"âĻĬ":148206,"ãĢĽ":148207,"Èĭ":148208,"à®°":148209,"áĥ¨":148210,"âĦķ":148211,"íijĢ":148212,"ðĿĵĥ":148213,"ð٦Ķ":148214,"Ä¿":148215,"ÅĢ":148216,"Ƴ":148217,"Éļ":148218,"Öĥ":148219,"Ü£":148220,"ߣ":148221,"à¦Ń":148222,"à§¡":148223,"à¶»":148224,"ຣ":148225,"à½ĩ":148226,"Ḩ":148227,"á½Ī":148228,"⽬":148229,"ê¡Ķ":148230,"ì³Ħ":148231,"ï¨ī":148232,"ðĿIJ¡":148233,"ðĿĺ¢":148234,"ðŁį¿":148235,"ðŁİŁ":148236,"ðŁıī":148237,"ðŁĶIJ":148238,"ðŁļħ":148239,"ðŁ¤½":148240,"Æį":148241,"Ç«":148242,"ǽ":148243,"Èļ":148244,"Îī":148245,"Ó¤":148246,"Óª":148247,"ÕĬ":148248,"Ù¼":148249,"Ú´":148250,"ßĿ":148251,"à¶ľ":148252,"á¼ķ":148253,"á¿¥":148254,"âİŀ":148255,"ãĢļ":148256,"ãī¤":148257,"곸":148258,"ê·ģ":148259,"ëĵĦ":148260,"ëĵķ":148261,"ì¨Ķ":148262,"챨":148263,"ðĿIJ¾":148264,"ðĿij»":148265,"ðĿͼ":148266,"ðĿķĿ":148267,"ðĿĺŃ":148268,"ðŁĨĻ":148269,"ðŁĵ¤":148270,"ðŁĶŁ":148271,"ðŁĹ¼":148272,"Äľ":148273,"Æģ":148274,"Æ¿":148275,"dz":148276,"Ç·":148277,"Éĥ":148278,"Éł":148279,"Êī":148280,"ʧ":148281,"˲":148282,"Ï´":148283,"Õģ":148284,"Õŀ":148285,"Öĩ":148286,"ÛĤ":148287,"Ûĵ":148288,"ßĹ":148289,"ߦ":148290,"হ":148291,"ள":148292,"à´¸":148293,"à»Ĥ":148294,"áĪĿ":148295,"áĪª":148296,"áĭµ":148297,"áIJĬ":148298,"áĴª":148299,"áļĸ":148300,"áŀĽ":148301,"á´¢":148302,"áµı":148303,"áµŃ":148304,"á¶«":148305,"á¸ı":148306,"áºĴ":148307,"á¼¥":148308,"á½ķ":148309,"á½¼":148310,"âĤĬ":148311,"âĦĤ":148312,"âĦ©":148313,"âĩī":148314,"âī£":148315,"âĮł":148316,"âİŁ":148317,"âı®":148318,"âķĺ":148319,"âĹĸ":148320,"âĺ©":148321,"âĻij":148322,"âϲ":148323,"âļĽ":148324,"ãĦŁ":148325,"ãī±":148326,"ãİļ":148327,"ê¡ķ":148328,"êªĸ":148329,"ê°¹":148330,"ê²Ĩ":148331,"êµĦ":148332,"ëĩ¬":148333,"ëĭ¯":148334,"ëıł":148335,"ëĴ¬":148336,"ëĸĪ":148337,"ëĸ½":148338,"ëĺĶ":148339,"ëŀ¸":148340,"ë¸ħ":148341,"뻳":148342,"ë¿Ł":148343,"ìĤµ":148344,"ìĬī":148345,"ìľ°":148346,"ìłĭ":148347,"ìłĶ":148348,"쥡":148349,"ìŃĿ":148350,"켬":148351,"íĪĩ":148352,"íīľ":148353,"íįĦ":148354,"íĽ¾":148355,"íĿ£":148356,"朗":148357,"勞":148358,"ï¦ľ":148359,"獵":148360,"ï§ľ":148361,"ï¨Ī":148362,"שׁ":148363,"הּ":148364,"ïѽ":148365,"ï®ī":148366,"ï¯ŀ":148367,"ï°Ĵ":148368,"ï±ĩ":148369,"ï¿Ħ":148370,"ðĿIJħ":148371,"ðĿijĦ":148372,"ðĿijº":148373,"ðĿĴĹ":148374,"ðĿĵ®":148375,"ðĿķĽ":148376,"ðĿķŀ":148377,"ðĿĸij":148378,"ðĿĺģ":148379,"ðĿĺĨ":148380,"ðĿĺ¶":148381,"ðĿĻ¢":148382,"ðĿļľ":148383,"ðŁĮĥ":148384,"ðŁĮ¦":148385,"ðŁįŁ":148386,"ðŁİİ":148387,"ðŁıĻ":148388,"ðŁIJ©":148389,"ðŁIJ«":148390,"ðŁIJ´":148391,"ðŁijĶ":148392,"ðŁĵī":148393,"ðŁĵĽ":148394,"ðŁĶī":148395,"ðŁĸ¼":148396,"ðŁĹĥ":148397,"ðŁĹ¯":148398,"ðŁļĩ":148399,"ðŁļIJ":148400,"ðŁļµ":148401,"ðŁ¤¶":148402,"ðŁ¥ĭ":148403,"ðŁ¥ĵ":148404,"ðŁ¥®":148405,"ð٦İ":148406,"ðŁ¦ł":148407,"ð٧Ĵ":148408,"ðŁ§¨":148409,"ÆIJ":148410,"Çį":148411,"ÓĢ":148412,"ÔĽ":148413,"ರ":148414,"à´Ļ":148415,"áĢĴ":148416,"ê²Ŀ":148417,"ê¹¹":148418,"ë©¥":148419,"ìĸĶ":148420,"ï¤ģ":148421,"ï¤ı":148422,"ï¦ī":148423,"ï¦ĵ":148424,"ï§ī":148425,"ï²Ŀ":148426,"ðĿĹŀ":148427,"ðĿű":148428,"ðŁĮĭ":148429,"ðŁį¶":148430,"à¦ļ":148431,"ìķľ":148432,"ðĿIJ¯":148433,"ðĿļĿ":148434,"à°¨":148435,"à½ĺ":148436,"à½ł":148437,"á¡¥":148438,"á¾°":148439,"âģį":148440,"âͰ":148441,"⬾":148442,"ðĿIJł":148443,"ðĿij¯":148444,"ðĿĹĽ":148445,"ðĿĵ»":148446,"ðĿĸĪ":148447,"âŀ»":148448,"áŀł":148449,"⡱":148450,"â»ij":148451,"ðŁ§µ":148452,"廉":148453,"ðŁijĺ":148454,"ãĤĶ":148455,"â¼Ł":148456,"ãĬ¤":148457,"ï¦Ŀ":148458,"ãĮ¦":148459,"â̏":148460,"ðŁĶĻ":148461,"ã¹":148462,"㹦":148463,"ï¹ħ":148464,"ï©Į":148465,"ãī¨":148466,"︽":148467,"âį¥":148468,"ðŁļī":148469,"ðŁ¥ľ":148470,"âĵľ":148471,"â»Ŀ":148472,"ï¨ľ":148473,"ðŁĴĴ":148474,"áĦij":148475,"â¾ŀ":148476,"ï¨ģ":148477,"à´ª":148478,"áĦİ":148479,"âŀ´":148480,"ষ":148481,"áħ¬":148482,"áŀ§":148483,"âĨ¢":148484,"âķ¦":148485,"âľij":148486,"ˬ":148487,"ÕIJ":148488,"à¼Ķ":148489,"ʤ":148490,"˨":148491,"à¤ŀ":148492,"à»ĥ":148493,"à¼ļ":148494,"âĵ¥":148495,"âķľ":148496,"ðŁIJĸ":148497,"á¼Ļ":148498,"ἤ":148499,"ìĨ°":148500,"ÈĤ":148501,"ʱ":148502,"à®ļ":148503,"áĥ§":148504,"á´ĭ":148505,"á´®":148506,"âĿ¡":148507,"âŀ·":148508,"ëĿ¡":148509,"ï§¢":148510,"ﯡ":148511,"ðĿķķ":148512,"ðŁħ°":148513,"ðŁ¦¸":148514,"Ǹ":148515,"Óŀ":148516,"Ô¶":148517,"ÖĨ":148518,"Úģ":148519,"Ûĭ":148520,"áİ¥":148521,"᾿":148522,"âĶŃ":148523,"âĶ®":148524,"êĢĢ":148525,"ê±ĺ":148526,"ëIJŃ":148527,"ë½Ħ":148528,"ìĶIJ":148529,"ì¸Į":148530,"íģł":148531,"íϱ":148532,"ï¥ī":148533,"ï¨ĸ":148534,"ðĿij´":148535,"ðĿĸĴ":148536,"ðĿĺ¨":148537,"ðĿļĮ":148538,"ðŁIJ¡":148539,"ðŁij¢":148540,"ðŁĵĶ":148541,"Åħ":148542,"Æİ":148543,"È©":148544,"Òª":148545,"Ôĥ":148546,"áĥ«":148547,"á¸ĩ":148548,"⼣":148549,"ê»Ń":148550,"ë¨Ħ":148551,"ìŁĢ":148552,"줴":148553,"íļIJ":148554,"盧":148555,"ðŁŁ¢":148556,"Ƨ":148557,"ȼ":148558,"ÊĿ":148559,"ËĦ":148560,"Ëħ":148561,"Ëį":148562,"˧":148563,"Ò¥":148564,"ÕĶ":148565,"Øı":148566,"ؼ":148567,"ßIJ":148568,"ßľ":148569,"à¤ĵ":148570,"à¦Ļ":148571,"à®ĵ":148572,"à¶´":148573,"à¼į":148574,"à¼Ĵ":148575,"ལ":148576,"áĢĤ":148577,"áĢĬ":148578,"áĦĦ":148579,"áĪĺ":148580,"áĭĬ":148581,"áĮį":148582,"áijĭ":148583,"áŀĤ":148584,"áł¢":148585,"á¡Ŀ":148586,"á´¦":148587,"áµį":148588,"ᵨ":148589,"ḡ":148590,"ḯ":148591,"á¼£":148592,"âģĤ":148593,"âĦĺ":148594,"âĦľ":148595,"âĦ³":148596,"âĦµ":148597,"âĨ¦":148598,"âĩĨ":148599,"âĪ·":148600,"âĬļ":148601,"âĮ«":148602,"âĮ¯":148603,"âİĽ":148604,"âİľ":148605,"âݤ":148606,"âݦ":148607,"âİ®":148608,"âijī":148609,"âĶī":148610,"âķĻ":148611,"âĸĤ":148612,"âĹŃ":148613,"âĺĬ":148614,"âĺį":148615,"âĺĴ":148616,"âļĨ":148617,"⼧":148618,"âĽ²":148619,"âŀĺ":148620,"â¥Ħ":148621,"â´³":148622,"â´½":148623,"âµĪ":148624,"ãī¯":148625,"ãİij":148626,"㧬":148627,"êϬ":148628,"ê§ģ":148629,"곬":148630,"ê´ŀ":148631,"ê»ľ":148632,"ëħĵ":148633,"ëĭ¼":148634,"ëįĸ":148635,"ëĸ±":148636,"ëĿ°":148637,"롹":148638,"뢴":148639,"ë£Ģ":148640,"뤳":148641,"ë¨ķ":148642,"ëŃ¥":148643,"ìĦ¶":148644,"ìħ¤":148645,"ìĮķ":148646,"ìįª":148647,"ìı©":148648,"ìĴĢ":148649,"ì͝":148650,"ìĿĶ":148651,"ìĿľ":148652,"ìłŃ":148653,"짦":148654,"쨩":148655,"첬":148656,"ì³¥":148657,"켯":148658,"íĢ«":148659,"íĢŃ":148660,"íĥ¸":148661,"íĵģ":148662,"íķ¬":148663,"íŸ":148664,"íĽķ":148665,"íľŃ":148666,"íĿĹ":148667,"ï¤Į":148668,"浪":148669,"ï§¿":148670,"ï¬Ħ":148671,"ï¬ħ":148672,"ïŃij":148673,"ïŃ«":148674,"ïŃº":148675,"ï®Ĥ":148676,"ﮢ":148677,"ﮨ":148678,"ï°İ":148679,"ï°ł":148680,"ï²£":148681,"ï³IJ":148682,"ï³Ĵ":148683,"ï³ĺ":148684,"ï³ľ":148685,"ï¹¼":148686,"│":148687,"ðĿIJ©":148688,"ðĿĴļ":148689,"ðĿķĶ":148690,"ðĿķ¤":148691,"ðĿĸĮ":148692,"ðĿĹ£":148693,"ðĿŰ":148694,"ðĿĹ´":148695,"ðĿĺĤ":148696,"ðĿĺ¥":148697,"ðĿĺ®":148698,"ðĿĺ¸":148699,"ðĿĻĢ":148700,"ðĿĽ¾":148701,"ðĿľı":148702,"ðŁĮģ":148703,"ðŁĮľ":148704,"ðŁĮ¥":148705,"ðŁĮ¯":148706,"ðŁįIJ":148707,"ðŁİĴ":148708,"ðŁıĶ":148709,"ðŁıķ":148710,"ðŁı®":148711,"ðŁIJĤ":148712,"ðŁIJī":148713,"ðŁIJ¹":148714,"ðŁĶķ":148715,"ðŁĶļ":148716,"ðŁķij":148717,"ðŁķ£":148718,"ðŁĹŀ":148719,"ðŁĹ¡":148720,"ðŁĹ¿":148721,"ðŁļĨ":148722,"ðŁļĬ":148723,"ðŁļĵ":148724,"ðŁļķ":148725,"ðŁļ¾":148726,"ðŁĽģ":148727,"ðŁĽİ":148728,"ðŁĽı":148729,"ðŁ¤´":148730,"ðŁ¥ķ":148731,"ðŁ¥ĸ":148732,"ðŁ¥ł":148733,"ðŁ¥¥":148734,"ð٦Ĩ":148735,"ð٦ī":148736,"ð٦ļ":148737,"ð٧ij":148738,"ðŁ§¥":148739,"ðŁ§¿":148740,"Ű":148741,"ƺ":148742,"ɧ":148743,"àªĩ":148744,"ண":148745,"áĪĪ":148746,"áĬ¤":148747,"áĭ®":148748,"áĮĪ":148749,"áĮµ":148750,"ᥲ":148751,"âĵŁ":148752,"êϳ":148753,"ê°Ĭ":148754,"ëķģ":148755,"ëķ¨":148756,"ìĬģ":148757,"例":148758,"גּ":148759,"ðĿĸį":148760,"ðĿĺĮ":148761,"ðĿĺ³":148762,"ðĿĻ©":148763,"ðŁįĻ":148764,"ðŁĸĸ":148765,"áī³":148766,"áĭ¨":148767,"áĸĩ":148768,"áŀĮ":148769,"á¹§":148770,"âķª":148771,"âŀļ":148772,"â²ĺ":148773,"êķ":148774,"êķ¥":148775,"路":148776,"ﮣ":148777,"ï¯ł":148778,"ðĿĴĸ":148779,"ðĿķĺ":148780,"ðĿĸĩ":148781,"ðĿĹŁ":148782,"ðĿĹª":148783,"ðĿĹ¯":148784,"ðĿĻł":148785,"ðŁĵı":148786,"à¦Ĺ":148787,"âĴ»":148788,"â²ł":148789,"ðĿĵµ":148790,"Ê£":148791,"à°ľ":148792,"áĬ¢":148793,"áŀIJ":148794,"ḷ":148795,"âĦĽ":148796,"âĩĢ":148797,"âĩĬ":148798,"êĴ¦":148799,"ê¦ł":148800,"ﮤ":148801,"ðŁįĽ":148802,"ðŁ¤Ľ":148803,"ᨾ":148804,"âŀº":148805,"áķ¯":148806,"áĽı":148807,"âĩĤ":148808,"â͹":148809,"âĻĹ":148810,"ðŁĸ¨":148811,"ê¦ı":148812,"ર":148813,"áļ¨":148814,"ðŁ¤¥":148815,"ðŁ§¢":148816,"ãIJĤ":148817,"ãĦ¥":148818,"ðŁĸĮ":148819,"â¼Ĵ":148820,"ãĬ§":148821,"âį©":148822,"ð٦ij":148823,"âĶ·":148824,"ï©IJ":148825,"ï©¡":148826,"ðĵĪ":148827,"ðĵĪĴ":148828,"â»Ħ":148829,"ï¨Ĵ":148830,"âĦª":148831,"Ò§":148832,"ÚĮ":148833,"â̶":148834,"âºł":148835,"â»ģ":148836,"âĨ¸":148837,"áĦIJ":148838,"ãħIJ":148839,"à»Ħ":148840,"áĹª":148841,"âĨ¼":148842,"âĩĭ":148843,"âĩĺ":148844,"âĮij":148845,"âĸ©":148846,"ðĿIJĹ":148847,"ÄĬ":148848,"à¦ī":148849,"ìīł":148850,"ɤ":148851,"ßį":148852,"ßı":148853,"áµĹ":148854,"âĤ¥":148855,"âĵī":148856,"âĶł":148857,"â͍":148858,"âķĦ":148859,"ä¤":148860,"ä¤Ģ":148861,"껸":148862,"ï®ģ":148863,"ðĵĤ":148864,"ðĵĤĥ":148865,"ð٦ķ":148866,"ÆĽ":148867,"à¦ĩ":148868,"ãıĺ":148869,"﮼":148870,"Úĵ":148871,"ÚĿ":148872,"à¦ĵ":148873,"ද":148874,"á´ħ":148875,"á½Ļ":148876,"âģ¼":148877,"âĸİ":148878,"⼩":148879,"äĶ":148880,"äĶĢ":148881,"뻡":148882,"ìĽ½":148883,"íģĦ":148884,"良":148885,"ï±ī":148886,"ï¹»":148887,"ðĿĸĭ":148888,"ðĿĻĪ":148889,"ðĿĻª":148890,"ðĿ϶":148891,"ðŁIJĦ":148892,"ðŁIJĨ":148893,"áİ¢":148894,"á¸Į":148895,"âĿ´":148896,"ðŁı¸":148897,"ÈĿ":148898,"ɸ":148899,"Îħ":148900,"Ïľ":148901,"Ó¢":148902,"Õ¹":148903,"à´ħ":148904,"àºĪ":148905,"áĭ°":148906,"áijİ":148907,"áłµ":148908,"á¡ł":148909,"á´ī":148910,"ḵ":148911,"á¿´":148912,"âĵ£":148913,"âͶ":148914,"⽯":148915,"ê²¥":148916,"ê¿ĺ":148917,"ëģİ":148918,"ëİĪ":148919,"ë͝":148920,"ë²°":148921,"ìĺ¯":148922,"ìĽ¸":148923,"ìŀĹ":148924,"ì§ĺ":148925,"쬬":148926,"ì·¬":148927,"íģħ":148928,"íĵĶ":148929,"íĽĿ":148930,"冷":148931,"魯":148932,"沈":148933,"ï¯ĸ":148934,"ðĿĵħ":148935,"ðĿĻĦ":148936,"ðŁĵ¶":148937,"ðŁĹĴ":148938,"ðŁ¥Ķ":148939,"ðŁ¥Ń":148940,"Å®":148941,"Å´":148942,"Æī":148943,"Æ«":148944,"Çģ":148945,"Ç£":148946,"Ǻ":148947,"Ǽ":148948,"Èį":148949,"ȯ":148950,"Éľ":148951,"ʬ":148952,"Ëģ":148953,"ˤ":148954,"˵":148955,"ÏĽ":148956,"Ò¤":148957,"Ò¬":148958,"Óı":148959,"ÓĽ":148960,"Ó¡":148961,"Ó³":148962,"ÔĮ":148963,"Ô¬":148964,"Õ³":148965,"Ù»":148966,"Úī":148967,"Ú§":148968,"Üľ":148969,"ߪ":148970,"à¤Ŀ":148971,"à¦Ľ":148972,"à¨Ĩ":148973,"àªķ":148974,"ડ":148975,"à®İ":148976,"à°¬":148977,"ൻ":148978,"ർ":148979,"à¶ł":148980,"à¶Ń":148981,"à¶¶":148982,"à·Ĩ":148983,"༽":148984,"áĢļ":148985,"áħ¢":148986,"áĨ¸":148987,"áĪĢ":148988,"áĪķ":148989,"áΰ":148990,"áī¡":148991,"áī¤":148992,"áĬ¦":148993,"áĬ«":148994,"áĭĭ":148995,"áĭį":148996,"áݯ":148997,"áijŃ":148998,"áķĹ":148999,"᣼":149000,"á¥Ĵ":149001,"á©ī":149002,"áŃº":149003,"á´¡":149004,"áµĺ":149005,"ᵼ":149006,"á¶ł":149007,"á¸ģ":149008,"á¸ĭ":149009,"á¹Ļ":149010,"á¹Ŀ":149011,"Ṧ":149012,"áºħ":149013,"á¼Ĥ":149014,"á½ĥ":149015,"á½į":149016,"á½§":149017,"á¾·":149018,"â̵":149019,"âĤİ":149020,"âĦĿ":149021,"âħĢ":149022,"âĨŀ":149023,"âĨ§":149024,"âĩħ":149025,"âĪĥ":149026,"âīı":149027,"âī½":149028,"âĬŀ":149029,"âĬ¡":149030,"âĬ§":149031,"âĬ¶":149032,"âĭĦ":149033,"âİĴ":149034,"âİ¡":149035,"âİ£":149036,"âݪ":149037,"âıİ":149038,"âĵĥ":149039,"âĵĸ":149040,"âĵ¨":149041,"âķĭ":149042,"âķĸ":149043,"âķ¢":149044,"âķ²":149045,"âĸĨ":149046,"âĸĬ":149047,"âĸį":149048,"âĸ®":149049,"âĺ¡":149050,"âĺ¦":149051,"âĺ±":149052,"âĺ¿":149053,"âĻĺ":149054,"âĻĿ":149055,"âļ°":149056,"âĽij":149057,"âŀª":149058,"â¤Ŀ":149059,"⤢":149060,"⤷":149061,"â§«":149062,"â¨Ń":149063,"⨯":149064,"â±£":149065,"â²İ":149066,"⵼":149067,"ãħĶ":149068,"ãĪı":149069,"ãī²":149070,"ãī³":149071,"ãĬij":149072,"ãĭĽ":149073,"ãİIJ":149074,"겤":149075,"ê·¿":149076,"ê¹ŀ":149077,"껨":149078,"ê¼į":149079,"꿸":149080,"ëĥ¬":149081,"ëĩIJ":149082,"ëĭł":149083,"ëį¯":149084,"ëĹĮ":149085,"ëĹij":149086,"ë¥Ģ":149087,"ëªĥ":149088,"몯":149089,"뱡":149090,"ë³ĵ":149091,"ë³½":149092,"뵾":149093,"ìĤ³":149094,"ìħ¥":149095,"ìĩ½":149096,"ìı¨":149097,"ìı¸":149098,"ìķį":149099,"ìĸĸ":149100,"ìŁ¨":149101,"ì¢ĥ":149102,"ì¢į":149103,"ì¥ij":149104,"ì§¼":149105,"ì©ĥ":149106,"ì®ľ":149107,"쮸":149108,"ì³ij":149109,"ì´¥":149110,"ì¾ĥ":149111,"íħ¦":149112,"íĪ¿":149113,"íĵ½":149114,"íķ³":149115,"íĸı":149116,"íĹł":149117,"íĿ«":149118,"ï¤ĵ":149119,"ï¤ĺ":149120,"ï¥İ":149121,"略":149122,"ï¦ħ":149123,"尿":149124,"ï§ĩ":149125,"ï¬Ĩ":149126,"דּ":149127,"ï®ĩ":149128,"ï®Ī":149129,"ï®Ŀ":149130,"ﮩ":149131,"ï®±":149132,"ï¯ĺ":149133,"ï¯Ļ":149134,"ﯢ":149135,"ﯣ":149136,"ﯤ":149137,"ﯥ":149138,"ï±Ĥ":149139,"ï²Ĩ":149140,"ﲪ":149141,"ï´¼":149142,"ïºī":149143,"ïºĬ":149144,"ﺥ":149145,"ðĿij¨":149146,"ðĿij©":149147,"ðĿij²":149148,"ðĿĴĮ":149149,"ðĿĴª":149150,"ðĿĴ®":149151,"ðĿĵĤ":149152,"ðĿĵĪ":149153,"ðĿĵ¯":149154,"ðĿ͍":149155,"ðĿķĢ":149156,"ðĿķĨ":149157,"ðĿķ¦":149158,"ðĿķ§":149159,"ðĿķ«":149160,"ðĿķ·":149161,"ðĿŵ":149162,"ðĿŸ":149163,"ðĿĺĦ":149164,"ðĿĺĻ":149165,"ðĿĺł":149166,"ðĿĺ¬":149167,"ðĿĻį":149168,"ðĿĻij":149169,"ðĿĻ¡":149170,"ðĿύ":149171,"ðĿĻ·":149172,"ðĿļį":149173,"ðĿĽ¿":149174,"ðŁĥ":149175,"ðŁĥı":149176,"ðŁħĺ":149177,"ðŁī":149178,"ðŁīij":149179,"ðŁİ¡":149180,"ðŁİª":149181,"ðŁİ±":149182,"ðŁİ³":149183,"ðŁİº":149184,"ðŁıİ":149185,"ðŁıĹ":149186,"ðŁıļ":149187,"ðŁıŀ":149188,"ðŁı¦":149189,"ðŁı§":149190,"ðŁIJģ":149191,"ðŁIJħ":149192,"ðŁIJĵ":149193,"ðŁĴĤ":149194,"ðŁĵij":149195,"ðŁĵĵ":149196,"ðŁĵ¨":149197,"ðŁĵ«":149198,"ðŁĶĭ":149199,"ðŁĶŃ":149200,"ðŁĶ¯":149201,"ðŁķĹ":149202,"ðŁļĤ":149203,"ðŁļ¢":149204,"ðŁļ¦":149205,"ðŁļ¬":149206,"ðŁĽĭ":149207,"ðŁĽĮ":149208,"ðŁĽ¬":149209,"ðŁĽ¶":149210,"ðŁŁ¡":149211,"ðŁ¥ĺ":149212,"ðŁ¥Ł":149213,"ðŁ¥¦":149214,"ð٦ĩ":149215,"ð٦Ī":149216,"ð٧Ĭ":149217,"ð٧Ĺ":149218,"ðŁ§¤":149219,"Ê·":149220,"˹":149221,"á¹ļ":149222,"á½¥":149223,"âĦŁ":149224,"겯":149225,"껫":149226,"ë°·":149227,"ìĥĨ":149228,"ìĽĿ":149229,"ì¨ī":149230,"ì«ı":149231,"ï¯ķ":149232,"ðĿľĭ":149233,"ɲ":149234,"ÒŃ":149235,"ÓĪ":149236,"à½Ľ":149237,"áĭĵ":149238,"áĻŃ":149239,"áł©":149240,"á¹®":149241,"âĦĴ":149242,"âĨ»":149243,"âµĥ":149244,"ë̍":149245,"ëł§":149246,"ìī¥":149247,"ìĮľ":149248,"ìŶ":149249,"ì¨Ī":149250,"쪾":149251,"íı½":149252,"íļĶ":149253,"íĽµ":149254,"露":149255,"ï¦IJ":149256,"ï§Ĺ":149257,"ï§ļ":149258,"אָ":149259,"ðĿIJĬ":149260,"ðĿķĹ":149261,"ðĿĹļ":149262,"ðĿļĸ":149263,"ðŁħ´":149264,"Èĥ":149265,"ÉĿ":149266,"ϱ":149267,"ÓĹ":149268,"ढ":149269,"áħł":149270,"áī¦":149271,"áijĮ":149272,"áĴ¼":149273,"áŀ¡":149274,"᳨":149275,"áłŃ":149276,"á¨ħ":149277,"á¨Ķ":149278,"á´ĺ":149279,"ᶦ":149280,"á¸İ":149281,"á¼ħ":149282,"á¼¹":149283,"âĨ¯":149284,"âĵİ":149285,"ãıĮ":149286,"êī":149287,"êīĤ":149288,"ëĨ§":149289,"ëĿ±":149290,"좡":149291,"íν":149292,"ï¤ĩ":149293,"ï¤Ľ":149294,"ðĿIJķ":149295,"ðĿĵ¸":149296,"ðĿĵ¼":149297,"ðĿĹķ":149298,"ðĿĺĪ":149299,"ðŁı£":149300,"ðŁı¤":149301,"ðŁĹĦ":149302,"Ñ·":149303,"Òł":149304,"áµĸ":149305,"Ἠ":149306,"ë¬Ħ":149307,"ï°´":149308,"âν":149309,"ÕŃ":149310,"Ú¹":149311,"à¥Ł":149312,"áĢĨ":149313,"áŀĴ":149314,"ã̶":149315,"ꦫ":149316,"ï¸ĵ":149317,"ðĿIJĽ":149318,"ðĿĺĹ":149319,"ðŁıľ":149320,"ì«Ń":149321,"ð٧ŀ":149322,"à½Ĥ":149323,"âĨ¿":149324,"âĩı":149325,"âĵģ":149326,"âͧ":149327,"âķģ":149328,"âķ¤":149329,"ê¦Ĺ":149330,"ꦤ":149331,"ðŁıĪ":149332,"áŀķ":149333,"Ô½":149334,"àªĹ":149335,"à¬Ĩ":149336,"âķķ":149337,"ï½ł":149338,"⼦":149339,"⼯":149340,"â¾·":149341,"âĶĸ":149342,"à¬ĵ":149343,"âĺĹ":149344,"âįĭ":149345,"ï¨Ŀ":149346,"â¼¥":149347,"寧":149348,"âĦĬ":149349,"ãĢ´":149350,"âį¢":149351,"ð¡Ī":149352,"ð¡Ī½":149353,"難":149354,"ãĢ»":149355,"ãıĥ":149356,"說":149357,"ï¨ĺ":149358,"ðŁIJĥ":149359,"ðŁĨĸ":149360,"ðŁĹ¾":149361,"ãĦĩ":149362,"Þĭ":149363,"â¼¼":149364,"ï¨Ń":149365,"ÞĢ":149366,"ÞĦ":149367,"ÞĪ":149368,"ÞIJ":149369,"âĮĦ":149370,"â»ĺ":149371,"ãŁ¢":149372,"áħ§":149373,"ðIJĮ¿":149374,"Ë»":149375,"à²Ĺ":149376,"áĢĩ":149377,"áŀĬ":149378,"âķĩ":149379,"ãĩ¼":149380,"ãݰ":149381,"ÕĴ":149382,"ÜĪ":149383,"ߥ":149384,"à¿IJ":149385,"áĢŁ":149386,"âĨ¥":149387,"âķĮ":149388,"â½Ģ":149389,"â½°":149390,"â¾Ĭ":149391,"äĦ":149392,"äĦĢ":149393,"ðĵIJ":149394,"ðĵIJį":149395,"ðŁİ¦":149396,"âĤ¯":149397,"âĬĺ":149398,"âĦį":149399,"ʵ":149400,"Ѷ":149401,"Úĥ":149402,"à¦Ķ":149403,"à´¦":149404,"áݶ":149405,"áĵķ":149406,"Ṩ":149407,"âĤł":149408,"âĩ°":149409,"âĹĴ":149410,"â¿Ĭ":149411,"ê·±":149412,"ì¹ķ":149413,"íĪ©":149414,"ïŃĢ":149415,"ðĿĴ¸":149416,"ðĿĵĬ":149417,"ðĿĺ©":149418,"Ǧ":149419,"É«":149420,"áĬ¨":149421,"ȹ":149422,"ʯ":149423,"Ϊ":149424,"ÚĢ":149425,"áĮ¸":149426,"áİ»":149427,"áıķ":149428,"áı´":149429,"á²Ĥ":149430,"Ὠ":149431,"âıĿ":149432,"âĺĻ":149433,"ëĥ¨":149434,"ëĦ¼":149435,"ëĪĻ":149436,"ë£ħ":149437,"ìͼ":149438,"ìķĿ":149439,"ìļ¬":149440,"ìľ±":149441,"ï¥Ĥ":149442,"惡":149443,"יּ":149444,"ïŃģ":149445,"ï³Ī":149446,"ðĿĶħ":149447,"ðĿĺ¤":149448,"ðĿĻı":149449,"ðĿĻĻ":149450,"ðŁķī":149451,"ð٧Ļ":149452,"á¸ij":149453,"ê´¼":149454,"ëģį":149455,"ëĹ´":149456,"ëĿ³":149457,"ë°ŀ":149458,"ë°¢":149459,"ëµĺ":149460,"ìĤĶ":149461,"ìĦĦ":149462,"ì¼ļ":149463,"íĢł":149464,"íĬ±":149465,"íĮĸ":149466,"ï¤ij":149467,"領":149468,"隸":149469,"ï´į":149470,"ðĿĺ·":149471,"Ĭ":149472,"Ŭ":149473,"ÆĢ":149474,"Æĭ":149475,"Æľ":149476,"Çij":149477,"Çĺ":149478,"Çŀ":149479,"Ç¥":149480,"Ç®":149481,"ɰ":149482,"ɶ":149483,"É·":149484,"ɽ":149485,"ÊĪ":149486,"ÊIJ":149487,"Ëİ":149488,"ËŁ":149489,"˦":149490,"˯":149491,"ÏIJ":149492,"Ïĵ":149493,"Ï¢":149494,"Ϥ":149495,"Ϫ":149496,"ÏŃ":149497,"Ï®":149498,"Ï»":149499,"Ñł":149500,"ÑŃ":149501,"Ò¨":149502,"ÓĿ":149503,"Ô¡":149504,"Ô·":149505,"Õī":149506,"Õĵ":149507,"Õĸ":149508,"Õļ":149509,"ÕĿ":149510,"Öİ":149511,"Ø¿":149512,"Úħ":149513,"Úį":149514,"ÚĶ":149515,"ÛĬ":149516,"Û¾":149517,"ÜĻ":149518,"ÝĴ":149519,"Ýĺ":149520,"ßĴ":149521,"ßĸ":149522,"à¤Ĭ":149523,"à¤IJ":149524,"à¦ı":149525,"à¦ĸ":149526,"à§Ł":149527,"મ":149528,"હ":149529,"à®ħ":149530,"à®Ĩ":149531,"à°¡":149532,"à°°":149533,"à²ļ":149534,"ಮ":149535,"ಯ":149536,"à´Ł":149537,"à´·":149538,"ൾ":149539,"à¶ij":149540,"à¶ŀ":149541,"༼":149542,"à½ĵ":149543,"áĢĵ":149544,"áĤ¦":149545,"áĥĸ":149546,"áĥŃ":149547,"áĥ¯":149548,"áħ¨":149549,"áħª":149550,"áĨ°":149551,"áĪģ":149552,"áĪİ":149553,"áĪĵ":149554,"áĪ¥":149555,"áβ":149556,"áĪ´":149557,"áĪ»":149558,"áīł":149559,"áī²":149560,"áī¶":149561,"áĬ£":149562,"áĬ¥":149563,"áĬª":149564,"áĭĺ":149565,"áĭ²":149566,"áĭ¶":149567,"áĮ£":149568,"áį¡":149569,"áį£":149570,"áݬ":149571,"áݾ":149572,"áIJ¡":149573,"áķķ":149574,"áĸ±":149575,"áĹIJ":149576,"áĹŃ":149577,"áĺī":149578,"áļ±":149579,"ἣ":149580,"áŀ¥":149581,"áŁĶ":149582,"áł£":149583,"áłª":149584,"áł°":149585,"áł´":149586,"á¤ĸ":149587,"ᥣ":149588,"á®":149589,"᮳":149590,"á¯":149591,"á¯Ļ":149592,"á°":149593,"á°į":149594,"á´Ĭ":149595,"á´¾":149596,"áµģ":149597,"áµİ":149598,"áµŀ":149599,"ᵤ":149600,"á¶ħ":149601,"á¶ĺ":149602,"á¶Ł":149603,"á¶¢":149604,"ᶤ":149605,"á¶±":149606,"á¶»":149607,"á¸ī":149608,"á¸ŀ":149609,"Ḻ":149610,"á¹ĵ":149611,"á¹Ĺ":149612,"Ṫ":149613,"áºĬ":149614,"áºı":149615,"áºĽ":149616,"á¼ĥ":149617,"á¼Į":149618,"Ἷ":149619,"á½Ĥ":149620,"á½ĵ":149621,"á½Ĺ":149622,"ὦ":149623,"á¾±":149624,"á¾´":149625,"á¿ĺ":149626,"á¿Ł":149627,"Ὸ":149628,"âģĺ":149629,"âĤij":149630,"âĤĽ":149631,"âĤ¿":149632,"âĦĩ":149633,"âĦŀ":149634,"âĦ±":149635,"âĩŁ":149636,"âĩ²":149637,"âΤ":149638,"âζ":149639,"âīĤ":149640,"âī¾":149641,"âĬ¨":149642,"âĬ³":149643,"âĬ·":149644,"âĭĮ":149645,"âĭĺ":149646,"âĮķ":149647,"âĮ¥":149648,"âĮµ":149649,"âĮº":149650,"âį£":149651,"âį²":149652,"âįµ":149653,"âİĩ":149654,"âıĥ":149655,"âıIJ":149656,"âıł":149657,"âı¤":149658,"âı¶":149659,"âı¸":149660,"âı¹":149661,"âijĤ":149662,"âĴ·":149663,"âĴº":149664,"âĵ¡":149665,"âĵ¤":149666,"â;":149667,"âĸĺ":149668,"âĸµ":149669,"âĹª":149670,"âĹ·":149671,"âĺ¨":149672,"âĺ«":149673,"âĺ²":149674,"âĺ³":149675,"âĻĨ":149676,"âļ¤":149677,"âļ¥":149678,"âĽĵ":149679,"⼴":149680,"âĽ¾":149681,"âŀ«":149682,"âŀ¿":149683,"⣷":149684,"â¤ij":149685,"⤫":149686,"⤶":149687,"⤽":149688,"⧪":149689,"â¨Ģ":149690,"⩽":149691,"⬡":149692,"⬢":149693,"⬤":149694,"â²ĸ":149695,"Ⲫ":149696,"âµĢ":149697,"⸮":149698,"⸽":149699,"ãĢł":149700,"ãĢ·":149701,"ãĦĮ":149702,"ãĦĺ":149703,"ãħij":149704,"ãĪİ":149705,"ãĪIJ":149706,"ãĬľ":149707,"ãĮĵ":149708,"ãĮł":149709,"ãİŁ":149710,"ãݤ":149711,"ãݧ":149712,"㬮":149713,"äĪ":149714,"äĪĢ":149715,"ä°":149716,"ä°Ģ":149717,"êħ":149718,"êħī":149719,"êĩĹ":149720,"êĪ":149721,"êĪį":149722,"ê§Ĥ":149723,"ê§Ĭ":149724,"êªĢ":149725,"ê²Ī":149726,"ê²į":149727,"ê³Ģ":149728,"êµł":149729,"ê½IJ":149730,"ê¾Ī":149731,"꿱":149732,"ëĥı":149733,"ëĦij":149734,"ëħ¤":149735,"ëĩ¸":149736,"ëμ":149737,"ëīħ":149738,"ëĬ£":149739,"ëĭº":149740,"ëįŀ":149741,"ëIJĮ":149742,"ëķ¸":149743,"ëĺł":149744,"ëĻĩ":149745,"ëĻĪ":149746,"ëľ½":149747,"ëŀĶ":149748,"ëłľ":149749,"ë£IJ":149750,"ë§Ģ":149751,"ë§Ĭ":149752,"ëªĢ":149753,"ë¬Ń":149754,"믾":149755,"ë³ľ":149756,"ë´Ĭ":149757,"ëµī":149758,"ë·ľ":149759,"ë¸Ģ":149760,"ë¹ĭ":149761,"ìģĦ":149762,"ìĤ£":149763,"ìĤ»":149764,"ìĦµ":149765,"ìħĴ":149766,"ìīĪ":149767,"ìīĶ":149768,"ìĬĮ":149769,"ìĬĻ":149770,"ìIJ´":149771,"ìĵº":149772,"ìķļ":149773,"ìķº":149774,"ìĸľ":149775,"ìĹª":149776,"ìĺľ":149777,"ìϤ":149778,"ìļĽ":149779,"ìļº":149780,"ìĿħ":149781,"ìĿı":149782,"ìĿŃ":149783,"ìĿ¶":149784,"ìłĽ":149785,"ì¡Ī":149786,"ì¢ī":149787,"ì¢Ķ":149788,"ì©ł":149789,"ìŃĮ":149790,"쯩":149791,"ì´£":149792,"ì¸ķ":149793,"ì¹Ł":149794,"쾡":149795,"ì¿Ļ":149796,"íģĩ":149797,"íģī":149798,"íĩĢ":149799,"íζ":149800,"íĸij":149801,"íĸ¤":149802,"íĹħ":149803,"íľı":149804,"íĿĿ":149805,"ï¤Ĵ":149806,"ï¤ķ":149807,"郎":149808,"ï¥ħ":149809,"ï¥ĩ":149810,"ï¥ı":149811,"ï¥ļ":149812,"ï¥Ł":149813,"ï¦Ħ":149814,"ï¦Ī":149815,"令":149816,"囹":149817,"零":149818,"ï§ģ":149819,"ï§ĥ":149820,"ï§Ķ":149821,"ï§ł":149822,"ï§£":149823,"ï§®":149824,"ïŃIJ":149825,"ïŃĸ":149826,"ïѦ":149827,"ïŃ´":149828,"ïѵ":149829,"ïѶ":149830,"ïѸ":149831,"ï®Į":149832,"ï®İ":149833,"ï®ŀ":149834,"ï®Ł":149835,"ﮡ":149836,"ﮪ":149837,"ï¯Ķ":149838,"ï¯Ĺ":149839,"ï¯ļ":149840,"ï¯Ľ":149841,"ï¯Ŀ":149842,"ï¯Ł":149843,"ﯧ":149844,"ﯨ":149845,"ﯫ":149846,"ﯯ":149847,"ﯰ":149848,"ﯱ":149849,"ﯲ":149850,"ﯳ":149851,"ﯴ":149852,"ﯵ":149853,"ﯶ":149854,"ï°Ģ":149855,"ï±ħ":149856,"ï±Ķ":149857,"ï±´":149858,"ï²ģ":149859,"ï³ķ":149860,"ï·½":149861,"ï¸ķ":149862,"︱":149863,"ï¹£":149864,"ï¹½":149865,"ï»į":149866,"ï¾±":149867,"ðĿIJĻ":149868,"ðĿIJ½":149869,"ðĿij¤":149870,"ðĿij®":149871,"ðĿijµ":149872,"ðĿĴĥ":149873,"ðĿĴĦ":149874,"ðĿĵŃ":149875,"ðĿĵ·":149876,"ðĿĶĸ":149877,"ðĿĶŀ":149878,"ðĿĶ¢":149879,"ðĿͦ":149880,"ðĿͬ":149881,"ðĿķĦ":149882,"ðĿķĬ":149883,"ðĿķİ":149884,"ðĿķĻ":149885,"ðĿķľ":149886,"ðĿķŃ":149887,"ðĿķ³":149888,"ðĿķ¸":149889,"ðĿķ¾":149890,"ðĿĸī":149891,"ðĿĸı":149892,"ðĿĺĩ":149893,"ðĿĺī":149894,"ðĿĺĸ":149895,"ðĿĺĽ":149896,"ðĿĺŀ":149897,"ðĿĺ«":149898,"ðĿĺ¾":149899,"ðĿĻĩ":149900,"ðĿĻī":149901,"ðĿĻĭ":149902,"ðĿĻİ":149903,"ðĿĻĺ":149904,"ðĿĻ¥":149905,"ðĿļĥ":149906,"ðĿļIJ":149907,"ðĿļĶ":149908,"ðĿľĥ":149909,"ðŁĦ·":149910,"ðŁħĿ":149911,"ðŁħ¾":149912,"ðŁĨĤ":149913,"ðŁĨĵ":149914,"ðŁĮĤ":149915,"ðŁĮĨ":149916,"ðŁĮī":149917,"ðŁĮij":149918,"ðŁĮĺ":149919,"ðŁĮ©":149920,"ðŁĮ«":149921,"ðŁį¢":149922,"ðŁį¥":149923,"ðŁİĽ":149924,"ðŁİ¢":149925,"ðŁİ´":149926,"ðŁij¡":149927,"ðŁĴ¾":149928,"ðŁĵŃ":149929,"ðŁĶĪ":149930,"ðŁĶ¦":149931,"ðŁĶ²":149932,"ðŁĶ³":149933,"ðŁķĵ":149934,"ðŁķķ":149935,"ðŁķĺ":149936,"ðŁķŁ":149937,"ðŁķ·":149938,"ðŁĹ³":149939,"ðŁļĦ":149940,"ðŁļĶ":149941,"ðŁļĸ":149942,"ðŁĽIJ":149943,"ðŁĽ¤":149944,"ðŁĽ¸":149945,"ðŁł":149946,"ðŁł³":149947,"ðŁ¤¹":149948,"ðŁ¥ĥ":149949,"ðŁ¥¨":149950,"ðŁ¥ª":149951,"ðŁ¥¾":149952,"ð٦ĥ":149953,"ð٦Ĵ":149954,"ð٦Ļ":149955,"ðŁ¦¶":149956,"ðŁ§ł":149957,"ðŁ§ª":149958,"ð٧Ń":149959,"ðŁ§²":149960,"ð£·":149961,"ð£·Ń":149962,"ð¦ĺ":149963,"ð¦ĺĴ":149964,"Æij":149965,"ÇĻ":149966,"È®":149967,"Øł":149968,"ÚĦ":149969,"ÜĢ":149970,"ߢ":149971,"áīĢ":149972,"áĬIJ":149973,"áİł":149974,"áºŀ":149975,"ëĪŀ":149976,"ëķŁ":149977,"ë£ģ":149978,"ë¤Ĺ":149979,"ìĦ¥":149980,"ìħij":149981,"ìĸIJ":149982,"ìĽĽ":149983,"ì£ķ":149984,"íİı":149985,"íĽĵ":149986,"梁":149987,"ï³Ľ":149988,"ï´«":149989,"ðĸ§":149990,"ðĸ§·":149991,"ðĿķģ":149992,"ðŁIJª":149993,"ðŁĴĪ":149994,"ðŁĵł":149995,"ðŁķĽ":149996,"ðŁķ´":149997,"ÑĿ":149998,"ÓĬ":149999,"ॲ":150000,"પ":150001,"áĥ¤":150002,"áįIJ":150003,"á¶°":150004,"á¼Ŀ":150005,"Ὡ":150006,"âĭĭ":150007,"âĴ½":150008,"âϾ":150009,"â½Ķ":150010,"⾯":150011,"ãĦĴ":150012,"ãħļ":150013,"ëIJį":150014,"ë·ģ":150015,"ìĭĢ":150016,"ìļĿ":150017,"쥰":150018,"캴":150019,"íĭī":150020,"íĿ½":150021,"ï¦Ģ":150022,"樂":150023,"ï§ħ":150024,"ï§ĵ":150025,"ïѝ":150026,"ï®Ĩ":150027,"ðIJ¤ķ":150028,"ðĿIJŁ":150029,"ðĿĴħ":150030,"ðĿĵľ":150031,"ðĿͰ":150032,"ðĿĶ»":150033,"ðĿĺį":150034,"ðĿϝ":150035,"ðŁĦ½":150036,"ðŁħĤ":150037,"ðŁħĶ":150038,"ðŁħ½":150039,"ðŁĵ´":150040,"ð٧ĸ":150041,"ÓĴ":150042,"Ḳ":150043,"ëī¼":150044,"Çı":150045,"Èĵ":150046,"ʸ":150047,"ÕĤ":150048,"Ûħ":150049,"ß¡":150050,"ߣ":150051,"ய":150052,"à°Ī":150053,"ಸ":150054,"ຮ":150055,"à¼ķ":150056,"áĢİ":150057,"áĨ¡":150058,"áIJĭ":150059,"áIJķ":150060,"áij¯":150061,"áŀĨ":150062,"á¨ķ":150063,"á©Ī":150064,"âģħ":150065,"âĨļ":150066,"âĶİ":150067,"âł©":150068,"â²Ĥ":150069,"â²Ķ":150070,"Ⲩ":150071,"ãĬļ":150072,"íĵ²":150073,"ðĿijĪ":150074,"ðĿij¬":150075,"ðĿij¹":150076,"ðĿĴ¾":150077,"ðĿĵ±":150078,"ðĿĵ½":150079,"ðĿķ¯":150080,"ðĿķ»":150081,"ðĿĺ½":150082,"ðĿļĨ":150083,"ðŁĦ°":150084,"ðŁIJ¨":150085,"Òķ":150086,"à²ħ":150087,"ï¨Ĩ":150088,"ðĿij°":150089,"ðŁĦ¸":150090,"Ôİ":150091,"Øį":150092,"Ùµ":150093,"ಶ":150094,"áĢĪ":150095,"áĺĹ":150096,"᳸":150097,"á¡¡":150098,"ᨲ":150099,"á©ģ":150100,"á´·":150101,"áµ§":150102,"âķ¨":150103,"âļģ":150104,"â¾Ŀ":150105,"ã̼":150106,"ãĦı":150107,"êĴ«":150108,"ꦥ":150109,"ꦩ":150110,"ꦲ":150111,"ìĺ¼":150112,"íĵIJ":150113,"ðĵĩ":150114,"ðĵĩ¼":150115,"ðĿķ¿":150116,"ðŁĽ´":150117,"먾":150118,"ವ":150119,"à´İ":150120,"à¼Ģ":150121,"âĩĸ":150122,"ãĪ«":150123,"âĵĢ":150124,"áħ´":150125,"áļ¾":150126,"áĽŀ":150127,"Ἣ":150128,"ᥴ":150129,"âĨĽ":150130,"âĨ¶":150131,"âĩ¤":150132,"âķŁ":150133,"âĺ·":150134,"âļIJ":150135,"ðŁ§´":150136,"á¹³":150137,"âĶį":150138,"âĶĴ":150139,"âĶ©":150140,"âͦ":150141,"â¾µ":150142,"àªľ":150143,"ત":150144,"âĩĻ":150145,"âͱ":150146,"âķĢ":150147,"â½Ĭ":150148,"ï½Ł":150149,"ଡ":150150,"ðł®":150151,"ðł®·":150152,"âķĥ":150153,"â°Ķ":150154,"ãĬ¦":150155,"ðŁİIJ":150156,"ãĩ°":150157,"â¼Ŀ":150158,"â¾Ķ":150159,"â½Ĵ":150160,"âłĴ":150161,"都":150162,"ï©Ĵ":150163,"免":150164,"ï©ĸ":150165,"ðĵı¸":150166,"ãĮĥ":150167,"ðĸ¤":150168,"ðĸ¤IJ":150169,"ï¦Ń":150170,"âĬħ":150171,"â¾³":150172,"ä´¥":150173,"ï©ķ":150174,"ðŁĮĶ":150175,"áŀĭ":150176,"âļį":150177,"â¼ĭ":150178,"ãİĺ":150179,"ðIJĮ²":150180,"É©":150181,"áİij":150182,"âĨ®":150183,"âĩĥ":150184,"âļİ":150185,"ãĩ±":150186,"ãĭ©":150187,"ãĮ¶":150188,"êĻª":150189,"ëݬ":150190,"ï¨IJ":150191,"ï¨Ľ":150192,"ï©Ĭ":150193,"ï©į":150194,"ðĵħ":150195,"ðĵħº":150196,"Ï¡":150197,"Èij":150198,"ÉĤ":150199,"Ôĵ":150200,"ßİ":150201,"à´§":150202,"áĢī":150203,"áĢĭ":150204,"áĢij":150205,"áĢł":150206,"áļĻ":150207,"á¨Ħ":150208,"ᨩ":150209,"ᨹ":150210,"á©ĵ":150211,"ᬾ":150212,"á´Ļ":150213,"áµij":150214,"âĤŃ":150215,"âĨ°":150216,"âľģ":150217,"â½IJ":150218,"ãĭ¯":150219,"ãĮ½":150220,"íĨ¢":150221,"錄":150222,"ðŁĤ":150223,"ðŁĤ»":150224,"ÈĴ":150225,"ͺ":150226,"Ô¥":150227,"Õij":150228,"Ú¶":150229,"à§İ":150230,"à¶®":150231,"àºĸ":150232,"àºľ":150233,"ຽ":150234,"áĥ»":150235,"áħ¯":150236,"áĭŀ":150237,"áĸķ":150238,"á´Ī":150239,"á¶Ĩ":150240,"Ḿ":150241,"á¹¼":150242,"Ῠ":150243,"âĦĭ":150244,"âĦŃ":150245,"âα":150246,"âĮĵ":150247,"âĶĩ":150248,"âĶ¢":150249,"â±®":150250,"â²Ħ":150251,"ãĩ¾":150252,"ãά":150253,"븡":150254,"ìIJī":150255,"íĻĽ":150256,"ðĿķª":150257,"ƹ":150258,"Ͳ":150259,"Óģ":150260,"Û¼":150261,"ফ":150262,"áħŁ":150263,"áīĨ":150264,"áįĪ":150265,"áºĸ":150266,"á½ī":150267,"â͏":150268,"⽩":150269,"êľ":150270,"êľ¥":150271,"êµħ":150272,"ëĤĶ":150273,"ëĦł":150274,"ëĩĹ":150275,"ëĻĿ":150276,"ìļ¯":150277,"ìļ·":150278,"ìŁĽ":150279,"ì·IJ":150280,"íŁ¬":150281,"íŁ®":150282,"íŁ°":150283,"ï¦Ĩ":150284,"鈴":150285,"ï²ŀ":150286,"ﳤ":150287,"ï³¥":150288,"ðIJĮ¸":150289,"ðĿĶı":150290,"ðĿķ®":150291,"ðĿĺ£":150292,"à¦Ī":150293,"âıı":150294,"ãĦĸ":150295,"ê²ĩ":150296,"ëĸĺ":150297,"ëľ·":150298,"ëŀĴ":150299,"ë¡ĵ":150300,"ë¢ī":150301,"ë£ĥ":150302,"ë§ĭ":150303,"ë²ĭ":150304,"ìĤ·":150305,"ìĪķ":150306,"ìĮ¨":150307,"ìĵ»":150308,"ìĸĬ":150309,"ìϬ":150310,"ìĿ»":150311,"ì¦ģ":150312,"쵤":150313,"ì·ĥ":150314,"íĢľ":150315,"íħī":150316,"íįł":150317,"íıħ":150318,"íij±":150319,"íķķ":150320,"íĸł":150321,"íĿķ":150322,"ÆĻ":150323,"Æļ":150324,"Æŀ":150325,"Çĥ":150326,"ÇĬ":150327,"Çľ":150328,"Ǥ":150329,"ÇŃ":150330,"ǹ":150331,"ÈĢ":150332,"Èģ":150333,"Èħ":150334,"Èī":150335,"ÈĹ":150336,"ÈŁ":150337,"Ȥ":150338,"È¥":150339,"Ȩ":150340,"ȵ":150341,"Ⱥ":150342,"È»":150343,"ÉĮ":150344,"É®":150345,"Êħ":150346,"Ê¥":150347,"ʨ":150348,"Ëĵ":150349,"ËĶ":150350,"Ëł":150351,"Ë£":150352,"˸":150353,"Í´":150354,"ÏĹ":150355,"Ïĺ":150356,"ÏĻ":150357,"Ïļ":150358,"ÏĿ":150359,"Ϩ":150360,"Ϭ":150361,"Ͼ":150362,"Ï¿":150363,"Ѫ":150364,"ÒĢ":150365,"Òľ":150366,"Ò¼":150367,"Ò½":150368,"ÓĤ":150369,"Óħ":150370,"Óĩ":150371,"Óį":150372,"Óĸ":150373,"ÓŁ":150374,"Ó«":150375,"Ó±":150376,"ÔĨ":150377,"Ôĩ":150378,"Ôº":150379,"Õĭ":150380,"Öī":150381,"ØĪ":150382,"ØĬ":150383,"ؽ":150384,"ؾ":150385,"Ù·":150386,"ÚĤ":150387,"ÚĬ":150388,"Úĸ":150389,"ÚĹ":150390,"Ú£":150391,"Ú«":150392,"Ú¸":150393,"ÛĢ":150394,"Ûį":150395,"Û½":150396,"Üī":150397,"ܤ":150398,"ݧ":150399,"Ý´":150400,"Þĥ":150401,"Þ¤":150402,"Þ¥":150403,"ßļ":150404,"߼":150405,"ߤ":150406,"àłį":150407,"àłĵ":150408,"àł³":150409,"à¡¢":150410,"à¥ł":150411,"à§ł":150412,"৺":150413,"à¨Ĭ":150414,"à¨IJ":150415,"ਮ":150416,"ਯ":150417,"ਰ":150418,"ਸ":150419,"àªĨ":150420,"ળ":150421,"વ":150422,"ઽ":150423,"à¬Į":150424,"à¬ĺ":150425,"ଽ":150426,"à®ĥ":150427,"ஸ":150428,"à°Ĩ":150429,"à°ķ":150430,"à°¦":150431,"à²Ĩ":150432,"à²Ĭ":150433,"à²Į":150434,"à²IJ":150435,"à²Ľ":150436,"ತ":150437,"ದ":150438,"ಪ":150439,"ಲ":150440,"ಹ":150441,"à´Ĩ":150442,"à´ı":150443,"à´Ĺ":150444,"à´«":150445,"à´¹":150446,"ൺ":150447,"ൽ":150448,"à¶ħ":150449,"à¶Ĭ":150450,"à¶Ķ":150451,"à¶§":150452,"à¶«":150453,"à¶°":150454,"à¼Ħ":150455,"à¼ħ":150456,"à¼Ĭ":150457,"à½Ļ":150458,"ཡ":150459,"ཧ":150460,"à¿Ģ":150461,"à¿Ļ":150462,"áĢĿ":150463,"á̧":150464,"áĢ©":150465,"áĢ¿":150466,"áģµ":150467,"áĤģ":150468,"áĤ½":150469,"áĥĤ":150470,"áĥª":150471,"áĦĬ":150472,"áĦ¢":150473,"áħ¦":150474,"áħŃ":150475,"áĨ®":150476,"áĨ±":150477,"áĨ»":150478,"áĩ":150479,"áĩĤ":150480,"áĪħ":150481,"áĪī":150482,"áĪĮ":150483,"áĪIJ":150484,"áĪĴ":150485,"áĪĻ":150486,"áĪļ":150487,"áĪľ":150488,"áĪŀ":150489,"áĪ©":150490,"áγ":150491,"áĪº":150492,"áν":150493,"áīħ":150494,"áī¢":150495,"áī±":150496,"áī´":150497,"áĬĥ":150498,"áĬį":150499,"áĬĸ":150500,"áĬ®":150501,"áĬ¸":150502,"áĭĽ":150503,"áĭĿ":150504,"áĭ³":150505,"áĮģ":150506,"áĮħ":150507,"áĮ¥":150508,"áĮ¦":150509,"áĮ¨":150510,"áįĬ":150511,"áįį":150512,"áįķ":150513,"áįĸ":150514,"áį¢":150515,"áį¤":150516,"áİĴ":150517,"áݪ":150518,"áıģ":150519,"áıIJ":150520,"áıŁ":150521,"áIJĤ":150522,"áIJĸ":150523,"áIJĿ":150524,"áIJŀ":150525,"áIJŁ":150526,"áIJł":150527,"áijĸ":150528,"áĴĭ":150529,"áĴį":150530,"áĴ¡":150531,"áĵ«":150532,"áĶķ":150533,"áķĭ":150534,"áķij":150535,"áķĻ":150536,"áķļ":150537,"áķĽ":150538,"áķ¤":150539,"áķ¦":150540,"áķ®":150541,"áķ¼":150542,"áĸĵ":150543,"áĹĹ":150544,"áĹ¢":150545,"áĹ¯":150546,"áĹ·":150547,"áĺĦ":150548,"áĺij":150549,"áĽĤ":150550,"áĽĻ":150551,"áŀį":150552,"áłĨ":150553,"áł¡":150554,"᳦":150555,"áł®":150556,"áł¯":150557,"áł²":150558,"áł·":150559,"á¡į":150560,"á¡ŀ":150561,"ᡤ":150562,"á¡´":150563,"ᡵ":150564,"á¤ĵ":150565,"á¥ĸ":150566,"ᥰ":150567,"ᨦ":150568,"ᨧ":150569,"ᨨ":150570,"ᨪ":150571,"ᨬ":150572,"ᨯ":150573,"ᨳ":150574,"ᨵ":150575,"á©ĥ":150576,"á¬ķ":150577,"áŃ£":150578,"á±":150579,"á±ļ":150580,"á²ł":150581,"á´ĵ":150582,"á´¶":150583,"áµĤ":150584,"áµĮ":150585,"áµ¥":150586,"áµ´":150587,"á¶ĩ":150588,"á¸Ī":150589,"ḳ":150590,"ḧ":150591,"Ḵ":150592,"Ḿ":150593,"á¹Ģ":150594,"á¹ĸ":150595,"á¹Ł":150596,"á¹ł":150597,"ṫ":150598,"á¹±":150599,"á¹·":150600,"ṿ":150601,"áºĦ":150602,"áºį":150603,"áºij":150604,"áºĹ":150605,"á¼ī":150606,"á¼ĵ":150607,"á¼Ń":150608,"á½ĭ":150609,"á½Ĵ":150610,"á½ł":150611,"á½£":150612,"á¾Ħ":150613,"á¾ı":150614,"á¾ij":150615,"á¾Ĺ":150616,"ᾦ":150617,"á¾§":150618,"á¾¾":150619,"á¿Ħ":150620,"á¿ĵ":150621,"á¿¡":150622,"Ῥ":150623,"âģļ":150624,"âĤĮ":150625,"âĦģ":150626,"âĦĶ":150627,"âĦ£":150628,"âĦ§":150629,"âĦ¯":150630,"âĦ°":150631,"âĦ´":150632,"âħħ":150633,"âĨľ":150634,"âĨ«":150635,"âĨŃ":150636,"âĨ±":150637,"âĨ¹":150638,"âĨ½":150639,"âĩĩ":150640,"âĩľ":150641,"âĩµ":150642,"âĪī":150643,"âĪĬ":150644,"âĪĸ":150645,"âĪľ":150646,"âξ":150647,"âīĢ":150648,"âīĭ":150649,"âīĮ":150650,"âīĵ":150651,"âīľ":150652,"âī´":150653,"âī¿":150654,"âĬĬ":150655,"âĬĭ":150656,"âĬĶ":150657,"âĬĸ":150658,"âĬ£":150659,"âĬ¦":150660,"âĭİ":150661,"âĭª":150662,"âĭ²":150663,"âĮ¦":150664,"âĮ§":150665,"âįº":150666,"âİĪ":150667,"âݨ":150668,"âݬ":150669,"âݳ":150670,"âݼ":150671,"âݾ":150672,"âıĮ":150673,"âıļ":150674,"âı«":150675,"âı¯":150676,"âıµ":150677,"âĴľ":150678,"âĴĿ":150679,"âĴ«":150680,"âĵĦ":150681,"âĵĬ":150682,"âĵĻ":150683,"âĵ©":150684,"âĶij":150685,"âĶĻ":150686,"âĶļ":150687,"âĶ¥":150688,"âķħ":150689,"âķī":150690,"âķį":150691,"âķı":150692,"âķŀ":150693,"âĸļ":150694,"âĸ¯":150695,"âĹĥ":150696,"âĹļ":150697,"âŬ":150698,"âĹ´":150699,"âĺĪ":150700,"âĺ¤":150701,"âĺ¥":150702,"âĺ§":150703,"âĺ¬":150704,"âĻģ":150705,"âϱ":150706,"âļĥ":150707,"âļĦ":150708,"âļħ":150709,"âļı":150710,"âļļ":150711,"âļŀ":150712,"âļŁ":150713,"âļ±":150714,"âļ²":150715,"âľĢ":150716,"⾣":150717,"âľ¢":150718,"âĿµ":150719,"âŁ¡":150720,"⣦":150721,"⣧":150722,"âŁ³":150723,"âŁ¾":150724,"âŁ¿":150725,"âłĩ":150726,"â¤Ħ":150727,"⤺":150728,"â¥Ĥ":150729,"⥹":150730,"â§ī":150731,"â§¼":150732,"â§½":150733,"â¨į":150734,"â¬Ĭ":150735,"⬣":150736,"âŃŀ":150737,"â®ŀ":150738,"⮳":150739,"â¯Ī":150740,"â¯ij":150741,"ⱳ":150742,"â±±":150743,"â²Ń":150744,"â´¹":150745,"âµķ":150746,"⸾":150747,"⺫":150748,"â¼Ĩ":150749,"â¼ł":150750,"â½Ł":150751,"â½¼":150752,"â¾Ľ":150753,"â¾§":150754,"â¿ĥ":150755,"â¿»":150756,"ãĤķ":150757,"ãĤŁ":150758,"ãĦĽ":150759,"ãĦ¡":150760,"ãĦ¶":150761,"ãĦº":150762,"ãħĴ":150763,"ãħŁ":150764,"ãĨĢ":150765,"ãĩ»":150766,"ãĪij":150767,"ãĪŃ":150768,"ãĪ®":150769,"ãγ":150770,"ãι":150771,"ãī¥":150772,"ãī¦":150773,"ãī¹":150774,"ãī¿":150775,"ãĬŀ":150776,"ãĬ¨":150777,"ãĭij":150778,"ãĭ¥":150779,"ãĭ´":150780,"ãĭº":150781,"ãİĦ":150782,"ãİķ":150783,"ãݯ":150784,"ãıĤ":150785,"ãıĪ":150786,"ãıĵ":150787,"ãıĸ":150788,"ãı±":150789,"ãIJ±":150790,"ãŁģ":150791,"ã¢":150792,"㢨":150793,"ã¨":150794,"㨳":150795,"㫪":150796,"ã«´":150797,"ã¶³":150798,"㺾":150799,"äĢ":150800,"äĢĢ":150801,"äĭ":150802,"äĭĮ":150803,"äĮĢ":150804,"äIJĢ":150805,"äłĢ":150806,"äł":150807,"äł¼":150808,"ä§":150809,"ä§ŀ":150810,"䨰":150811,"䨺":150812,"ä´Ģ":150813,"ä·":150814,"ä·ħ":150815,"ä·¸":150816,"êĤ":150817,"êĤ«":150818,"êĮ":150819,"êĮ¼":150820,"êį":150821,"êį²":150822,"êĴµ":150823,"êĵ":150824,"êĵ½":150825,"êĻŃ":150826,"êĿĽ":150827,"êĿ¥":150828,"êŀ":150829,"êŀĬ":150830,"ê¦Ĩ":150831,"ê¦ĩ":150832,"ê¦Ł":150833,"ꦨ":150834,"ê§Ī":150835,"ê©":150836,"ê©Ł":150837,"êªĭ":150838,"êªij":150839,"êªķ":150840,"êªĹ":150841,"êªľ":150842,"ꪮ":150843,"ꪱ":150844,"ꪻ":150845,"ꪼ":150846,"ê«Ģ":150847,"ê«Ŀ":150848,"ê°ĥ":150849,"ê°ĺ":150850,"ê±ľ":150851,"ê²ĵ":150852,"ê²ļ":150853,"ê³Ļ":150854,"ê³¾":150855,"ê´Ĺ":150856,"ê´Ļ":150857,"êµĽ":150858,"ê¶ĥ":150859,"ê¶ķ":150860,"궨":150861,"긩":150862,"긿":150863,"ê¹Ħ":150864,"ê¹Ĩ":150865,"ê¹ī":150866,"ê¹ĵ":150867,"ê¹¢":150868,"ê¹£":150869,"깸":150870,"꺳":150871,"ê¿ı":150872,"ê¿ķ":150873,"ê¿§":150874,"ëĢ©":150875,"ëģħ":150876,"ëĥµ":150877,"ëĦĸ":150878,"ëĦĹ":150879,"ëĦ¢":150880,"ëħĤ":150881,"ëĨIJ":150882,"ëĩľ":150883,"ëĪĭ":150884,"ëĪļ":150885,"ëīį":150886,"ëī¨":150887,"ëĬļ":150888,"ëĬ¡":150889,"ëĭľ":150890,"ëĭª":150891,"ëĮĺ":150892,"ëĮ¤":150893,"ëĮ¸":150894,"ëİŁ":150895,"ëı¨":150896,"ëIJĦ":150897,"ëIJı":150898,"ëIJ´":150899,"ëIJ¸":150900,"ëijģ":150901,"ëij¿":150902,"ëĴ¨":150903,"ëĵ·":150904,"ëĶ®":150905,"ëͲ":150906,"ëķ§":150907,"ëĸĶ":150908,"ëĸª":150909,"ëĺŃ":150910,"ëļĢ":150911,"ëļł":150912,"ëĽĶ":150913,"뼩":150914,"ëľħ":150915,"ëŀķ":150916,"ëŀ°":150917,"ëŁIJ":150918,"ëł¡":150919,"ë¡ŀ":150920,"ë¡£":150921,"롵":150922,"ë£Ħ":150923,"ë£į":150924,"뤳":150925,"ë¦į":150926,"ë¦ı":150927,"릳":150928,"ë§Ħ":150929,"ë§Ĩ":150930,"ë§į":150931,"ë§ľ":150932,"ë§«":150933,"ë§»":150934,"먮":150935,"ë©Ĥ":150936,"ë©Ń":150937,"몴":150938,"묾":150939,"묳":150940,"묫":150941,"묾":150942,"ëѬ":150943,"ë®ĺ":150944,"뮹":150945,"ë¯ķ":150946,"ë¯ľ":150947,"ë°¨":150948,"ë°ª":150949,"ë±Ķ":150950,"ë²ĺ":150951,"ë²Ľ":150952,"ë²±":150953,"ë²´":150954,"ë´½":150955,"뵤":150956,"뵨":150957,"ë·Ĺ":150958,"ë·ĺ":150959,"ë¸ĵ":150960,"븾":150961,"빪":150962,"ëºĥ":150963,"ëºĺ":150964,"뺵":150965,"ë»´":150966,"ë¼IJ":150967,"ë¾Ķ":150968,"ìģŃ":150969,"ìĤł":150970,"ìĤ®":150971,"ìĥı":150972,"ìĥĻ":150973,"ìĦº":150974,"ìħ¢":150975,"ìĨĢ":150976,"ìĨħ":150977,"ìĨ¤":150978,"ìĨ¦":150979,"ìĨ¬":150980,"ìĩ±":150981,"ìε":150982,"ìĭ¨":150983,"ìĭ´":150984,"ìĮ°":150985,"ìįľ":150986,"ìİĹ":150987,"ìİĺ":150988,"ìݼ":150989,"ìijī":150990,"ìijĿ":150991,"ìij»":150992,"ìĴĶ":150993,"ìĴ¯":150994,"ìĵ©":150995,"ìķIJ":150996,"ìķĸ":150997,"ìĸł":150998,"ìĸ¾":150999,"ìĹĥ":151000,"ìĹĹ":151001,"ìĹľ":151002,"ìŨ":151003,"ìĺĤ":151004,"ìĺĦ":151005,"ìĺı":151006,"ìĺ¾":151007,"ìĺ¿":151008,"ìľ§":151009,"ìĿIJ":151010,"ìĿĸ":151011,"ìĿ·":151012,"ìŀį":151013,"ìŀı":151014,"ìŀ¨":151015,"ìŀª":151016,"ìŀ³":151017,"ìł¡":151018,"ìł´":151019,"ìł¹":151020,"ì¡Ģ":151021,"졪":151022,"졵":151023,"ì¢IJ":151024,"좨":151025,"ì£Į":151026,"ì£Ļ":151027,"죳":151028,"ì¦ij":151029,"ì§¥":151030,"ì§´":151031,"ì§¾":151032,"ì¨ĵ":151033,"ì¨ķ":151034,"ì©°":151035,"ì©»":151036,"쩼":151037,"ìªĹ":151038,"ì¬Ķ":151039,"ì¬ĺ":151040,"ì®®":151041,"ì¯ķ":151042,"ì¯ĺ":151043,"ì°İ":151044,"ì°¯":151045,"ì±ĥ":151046,"ì±µ":151047,"ì²§":151048,"ì²®":151049,"첯":151050,"쳬":151051,"ì´ĭ":151052,"ì´¢":151053,"ìµ¥":151054,"ì¶£":151055,"ì¸Ī":151056,"ì¸Ļ":151057,"캤":151058,"ìºŃ":151059,"컽":151060,"ì¼Ļ":151061,"콬":151062,"ì¾Ģ":151063,"ì¿ħ":151064,"쿽":151065,"íĢħ":151066,"íģ¦":151067,"íĤħ":151068,"íĥ¶":151069,"íĥ¹":151070,"íĦĶ":151071,"íħ£":151072,"íĨĦ":151073,"íĨ§":151074,"íĨ¹":151075,"íĩ¼":151076,"íī¤":151077,"íĬ½":151078,"íĭĤ":151079,"íĭij":151080,"íįĪ":151081,"íįĻ":151082,"íį¿":151083,"íݶ":151084,"íIJĿ":151085,"íĴľ":151086,"íĵĿ":151087,"íĵª":151088,"íĵ±":151089,"íĵ·":151090,"íĵ¼":151091,"íĶĻ":151092,"íĶł":151093,"íķļ":151094,"íķĽ":151095,"íķŀ":151096,"íķŁ":151097,"íķ§":151098,"íķ¶":151099,"íĸĬ":151100,"íĸĭ":151101,"íĸį":151102,"íĸĶ":151103,"íĸĺ":151104,"íĸ¡":151105,"íĸ¬":151106,"íĹ£":151107,"íĹ¿":151108,"íĺĸ":151109,"íĺŃ":151110,"íļ°":151111,"íĽį":151112,"íĽ½":151113,"íĿŁ":151114,"íĿŃ":151115,"íĿ´":151116,"íŀľ":151117,"ï¤ī":151118,"ï¤Ń":151119,"爐":151120,"蘆":151121,"祿":151122,"ï¥Ģ":151123,"ï¥ij":151124,"ï¥Ĵ":151125,"ï¥ķ":151126,"ï¥ĺ":151127,"ï¥Ļ":151128,"參":151129,"塞":151130,"殺":151131,"勵":151132,"ï¦ĭ":151133,"ï¦ı":151134,"ï¦Ķ":151135,"ï¦ĸ":151136,"ï¦ĺ":151137,"ï¦Ľ":151138,"ï¦ł":151139,"瑩":151140,"羚":151141,"了":151142,"僚":151143,"料":151144,"ï§Ĩ":151145,"ï§ĸ":151146,"ï§Ľ":151147,"ï§ŀ":151148,"ï§Ł":151149,"ï§§":151150,"ï§³":151151,"狀":151152,"ï§½":151153,"ï¨ĥ":151154,"ï¨ļ":151155,"諸":151156,"ï©Ł":151157,"ﬤ":151158,"שּׁ":151159,"לּ":151160,"ïŃĴ":151161,"ïŃķ":151162,"ïŃĽ":151163,"ïŃĿ":151164,"ïŃŀ":151165,"ïŃŁ":151166,"ïѤ":151167,"ïѧ":151168,"ïѨ":151169,"ïŃ®":151170,"ïѰ":151171,"ïѱ":151172,"ïŃ·":151173,"ïѹ":151174,"ïŃ»":151175,"ï®Ģ":151176,"ï®ĥ":151177,"ï®Ħ":151178,"ï®ħ":151179,"ï®į":151180,"ï®Ĵ":151181,"ï®ĵ":151182,"ï®ķ":151183,"ﮦ":151184,"ï®®":151185,"ï®°":151186,"ï¯ĵ":151187,"ï¯ľ":151188,"ﯩ":151189,"ﯪ":151190,"ﯬ":151191,"ï¯Ń":151192,"ﯮ":151193,"ﯷ":151194,"ﯹ":151195,"ﯻ":151196,"ﯼ":151197,"ï°ĥ":151198,"ï°Į":151199,"ï°IJ":151200,"ï°ĺ":151201,"ï°Ļ":151202,"ï°ľ":151203,"ï°ŀ":151204,"ï°¢":151205,"ï°®":151206,"ï°°":151207,"ï°¼":151208,"ï°¿":151209,"ï±Ģ":151210,"ï±ģ":151211,"ï±Ī":151212,"ï±ĭ":151213,"ï±ı":151214,"ï±Ń":151215,"ï²Ģ":151216,"ï²ĩ":151217,"ï²Ī":151218,"ï²ĭ":151219,"ï²İ":151220,"ï²Ĵ":151221,"ï²ľ":151222,"ï²ł":151223,"ﲬ":151224,"ï²»":151225,"ï³ĩ":151226,"ï³Ķ":151227,"ï³£":151228,"ﳫ":151229,"ï´ĺ":151230,"ï´°":151231,"ï´½":151232,"ï¶":151233,"ï¶°":151234,"ï¸ĸ":151235,"︴":151236,"︹":151237,"ï¹į":151238,"ï¹Ĺ":151239,"ï¹¢":151240,"﹤":151241,"﹩":151242,"ï¹±":151243,"ï¾°":151244,"ï¿Ĥ":151245,"ï¿®":151246,"ðIJĮ°":151247,"ðIJĮ¹":151248,"ðIJĮº":151249,"ðIJĮ½":151250,"ðIJįĤ":151251,"ðIJįĥ":151252,"ðIJįĦ":151253,"ðIJİ":151254,"ðIJݹ":151255,"ðIJ¤Ĥ":151256,"ðIJ¤į":151257,"ðIJ¤ı":151258,"ðIJ¤ĵ":151259,"ðIJŃī":151260,"ðIJŃį":151261,"ðIJ°ĩ":151262,"ðIJ°°":151263,"ðijĤ":151264,"ðijĤĦ":151265,"ðijĺ":151266,"ðijĺģ":151267,"ðĴĢ":151268,"ðĴ̏":151269,"ðĴģ":151270,"ðĴģº":151271,"ðĴĦ":151272,"ðĴĦ·":151273,"ðĴĬ":151274,"ðĴĬij":151275,"ðĴĭ":151276,"ðĴĭĹ":151277,"ðĴĮ":151278,"ðĴĮ¨":151279,"ðĵĥ¢":151280,"ðĵĥ°":151281,"ðĸł":151282,"ðĸłļ":151283,"ðĿĦĥ":151284,"ðĿĦħ":151285,"ðĿĦķ":151286,"ðĿĦĻ":151287,"ðĿĦ±":151288,"ðĿĦ´":151289,"ðĿĦ¹":151290,"ðĿħİ":151291,"ðĿħª":151292,"ðĿĨ£":151293,"ðĿĨ³":151294,"ðĿĨ¹":151295,"ðĿĩĬ":151296,"ðĿĩĹ":151297,"ðĿĩļ":151298,"ðĿĩľ":151299,"ðĿĩł":151300,"ðĿIJī":151301,"ðĿIJĸ":151302,"ðĿIJĺ":151303,"ðĿIJ£":151304,"ðĿIJ±":151305,"ðĿijĬ":151306,"ðĿijŃ":151307,"ðĿij¼":151308,"ðĿij½":151309,"ðĿĴ°":151310,"ðĿĴ·":151311,"ðĿĴ¿":151312,"ðĿĵģ":151313,"ðĿĵĭ":151314,"ðĿĵİ":151315,"ðĿĵĴ":151316,"ðĿĵĺ":151317,"ðĿĵ¢":151318,"ðĿĵ¦":151319,"ðĿĵ«":151320,"ðĿĵ¿":151321,"ðĿĶİ":151322,"ðĿͱ":151323,"ðĿĶ´":151324,"ðĿĶ·":151325,"ðĿ͏":151326,"ðĿͽ":151327,"ðĿķĤ":151328,"ðĿķĥ":151329,"ðĿķĭ":151330,"ðĿķı":151331,"ðĿķIJ":151332,"ðĿķ¥":151333,"ðĿķ´":151334,"ðĿķº":151335,"ðĿĸIJ":151336,"ðĿĸĽ":151337,"ðĿĸĿ":151338,"ðĿĸŀ":151339,"ðĿĹ©":151340,"ðĿĹ³":151341,"ðĿĹ½":151342,"ðĿĺĬ":151343,"ðĿĺĭ":151344,"ðĿĺĶ":151345,"ðĿĺ±":151346,"ðĿĺ´":151347,"ðĿĺ¿":151348,"ðĿĻĴ":151349,"ðĿĻĿ":151350,"ðĿĻŁ":151351,"ðĿϬ":151352,"ðĿĻŃ":151353,"ðĿĻ»":151354,"ðĿϾ":151355,"ðĿļĪ":151356,"ðĿļĭ":151357,"ðĿļij":151358,"ðĿļŁ":151359,"ðĿļł":151360,"ðĿļ£":151361,"ðĿĽ½":151362,"ðĿľĤ":151363,"ðĿľĶ":151364,"ðĿľĻ":151365,"ðŁĢ":151366,"ðŁĢĦ":151367,"ðŁĦ²":151368,"ðŁĦ¶":151369,"ðŁħIJ":151370,"ðŁħĸ":151371,"ðŁħļ":151372,"ðŁħĽ":151373,"ðŁħ¦":151374,"ðŁħ¶":151375,"ðŁħ»":151376,"ðŁħ¼":151377,"ðŁĨĥ":151378,"ðŁĨĨ":151379,"ðŁĨİ":151380,"ðŁĪ¯":151381,"ðŁĪ²":151382,"ðŁĪ¹":151383,"ðŁĮĩ":151384,"ðŁĮĵ":151385,"ðŁįĺ":151386,"ðŁİij":151387,"ðŁİ¿":151388,"ðŁıı":151389,"ðŁıĴ":151390,"ðŁı©":151391,"ðŁı¯":151392,"ðŁIJĢ":151393,"ðŁijĿ":151394,"ðŁĴ¹":151395,"ðŁĴº":151396,"ðŁĵŁ":151397,"ðŁĵª":151398,"ðŁĵ¼":151399,"ðŁĶĢ":151400,"ðŁĶĤ":151401,"ðŁĶĥ":151402,"ðŁĶĩ":151403,"ðŁĶĵ":151404,"ðŁĶ¢":151405,"ðŁĶ¤":151406,"ðŁĶ©":151407,"ðŁķĸ":151408,"ðŁķļ":151409,"ðŁķľ":151410,"ðŁķĿ":151411,"ðŁķŀ":151412,"ðŁķł":151413,"ðŁķ¢":151414,"ðŁķ³":151415,"ðŁĸĩ":151416,"ðŁĸij":151417,"ðŁĸ¶":151418,"ðŁĹģ":151419,"Ѩ":151420,"Úİ":151421,"á¡Į":151422,"Ḱ":151423,"áºĢ":151424,"á¼®":151425,"á½Ŀ":151426,"âĦ¬":151427,"âļ§":151428,"⼤":151429,"㳬":151430,"êĻĭ":151431,"ê¸ij":151432,"ëĶī":151433,"ëĹį":151434,"ë¡ij":151435,"ë¯ij":151436,"ë»ħ":151437,"ë¼Ŀ":151438,"ìĦIJ":151439,"ìī¡":151440,"ìĭ²":151441,"ìı±":151442,"ìŤ":151443,"ìĿ©":151444,"ìĿ¿":151445,"ìŁĻ":151446,"ìł°":151447,"ì¥ī":151448,"íĬŃ":151449,"íķ®":151450,"ï®ı":151451,"ðŁħ±":151452,"ðŁĨĴ":151453,"ðŁķĭ":151454,"Éĺ":151455,"Êĵ":151456,"Õĥ":151457,"à´´":151458,"à½ħ":151459,"áĨº":151460,"áĪĬ":151461,"á΍":151462,"áξ":151463,"áīIJ":151464,"áĮĥ":151465,"áĮ½":151466,"áĶŃ":151467,"áłĤ":151468,"ᳬ":151469,"ᨸ":151470,"á©ĭ":151471,"á¶ı":151472,"á¾Ķ":151473,"á¿IJ":151474,"á¿ļ":151475,"âĻĻ":151476,"âļĤ":151477,"âļĹ":151478,"â¡¢":151479,"⤦":151480,"ëĸ°":151481,"ë¤Ĥ":151482,"ë§ł":151483,"ë±ĭ":151484,"ë±IJ":151485,"ìĽ¢":151486,"ìľ¾":151487,"ì³ħ":151488,"ì»ģ":151489,"íģ»":151490,"íĥĻ":151491,"íĵĸ":151492,"íĵŃ":151493,"íķ±":151494,"íĽľ":151495,"ï¤ħ":151496,"ï¤Ĩ":151497,"ï¦ĥ":151498,"ï§©":151499,"ï¨Ĥ":151500,"ðIJ¤Ķ":151501,"ðIJŃĵ":151502,"ðIJ°¼":151503,"ðĿĵŀ":151504,"ðĿĵ°":151505,"ðĿĻľ":151506,"ðĿļģ":151507,"ðŁħ¢":151508,"ðŁıĩ":151509,"Ȳ":151510,"ʶ":151511,"ÔĪ":151512,"Ôij":151513,"Ýĵ":151514,"Ý¥":151515,"à¤ij":151516,"ॱ":151517,"à¬ī":151518,"à°³":151519,"à°µ":151520,"à²Ł":151521,"áĢı":151522,"áģ¼":151523,"áī¨":151524,"áĬĴ":151525,"áĭ©":151526,"áĮĦ":151527,"áĮĶ":151528,"áIJ§":151529,"áĴĮ":151530,"áĶħ":151531,"áĶĬ":151532,"áłĦ":151533,"á¨ģ":151534,"á¸ĥ":151535,"ḻ":151536,"âĶŀ":151537,"âĺµ":151538,"âļ£":151539,"â²¢":151540,"ãĪª":151541,"ä¶µ":151542,"ê²Ļ":151543,"ê²´":151544,"ê³Ĥ":151545,"롼":151546,"ìĨĬ":151547,"ì¼ĩ":151548,"íĭį":151549,"íĵ¬":151550,"íĵ®":151551,"íĵ¶":151552,"íĵ»":151553,"臘":151554,"ï¥ł":151555,"辰":151556,"ïѲ":151557,"ðIJŃĬ":151558,"ðIJ±ħ":151559,"ðĸ¥":151560,"ðĸ¥¨":151561,"ðĿij³":151562,"ðĿĵķ":151563,"ðĿĵ¬":151564,"ðĿĵ¹":151565,"ðĿĵ¾":151566,"ðĿĶĵ":151567,"ðĿķį":151568,"ðĿķ¡":151569,"ðĿķ±":151570,"ðĿĸĸ":151571,"ðĿĺı":151572,"ðĿĺIJ":151573,"ðĿĺļ":151574,"ðĿĻ®":151575,"ðĿϰ":151576,"ðĿϏ":151577,"ðĿĻº":151578,"ðĿϼ":151579,"ðĿϽ":151580,"ðĿĻ¿":151581,"ðĿļĦ":151582,"ðĿļı":151583,"ðŁħħ":151584,"ðŁħĵ":151585,"ÆĪ":151586,"àłĮ":151587,"áϳ":151588,"áļĮ":151589,"áĽħ":151590,"áĽIJ":151591,"á¤Ĭ":151592,"á¸Ĭ":151593,"âͽ":151594,"âķĬ":151595,"âĽĩ":151596,"âĽı":151597,"âĿª":151598,"âĿ«":151599,"⣰":151600,"ãĦį":151601,"ãĦĵ":151602,"ãĦ§":151603,"ãħĸ":151604,"ãī«":151605,"ê¦Ķ":151606,"ï±Ĭ":151607,"àºĤ":151608,"áħ£":151609,"á¥Ķ":151610,"ᥤ":151611,"âĨ¤":151612,"âĨ·":151613,"âĩŀ":151614,"âĸ¤":151615,"âŀ¶":151616,"ãμ":151617,"嘆":151618,"ðĵı§":151619,"âͲ":151620,"âĢ´":151621,"âĴŁ":151622,"âĴ¡":151623,"â°Ĥ":151624,"â°į":151625,"â°İ":151626,"â°IJ":151627,"â°ij":151628,"â°Ł":151629,"â°ł":151630,"â°¡":151631,"â¼Ń":151632,"ãĬ¥":151633,"âĴł":151634,"⽺":151635,"ãĩº":151636,"ãĩ½":151637,"ï¨Ĭ":151638,"áķ·":151639,"âį¨":151640,"âºŁ":151641,"â½Ĺ":151642} \ No newline at end of file diff --git a/comfy_extras/nodes_edit_model.py b/comfy_extras/nodes_edit_model.py new file mode 100644 index 000000000..b69f79715 --- /dev/null +++ b/comfy_extras/nodes_edit_model.py @@ -0,0 +1,26 @@ +import node_helpers + + +class ReferenceLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning": ("CONDITIONING", ), + }, + "optional": {"latent": ("LATENT", ),} + } + + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "append" + + CATEGORY = "advanced/conditioning/edit_models" + DESCRIPTION = "This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images." + + def append(self, conditioning, latent=None): + if latent is not None: + conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [latent["samples"]]}, append=True) + return (conditioning, ) + + +NODE_CLASS_MAPPINGS = { + "ReferenceLatent": ReferenceLatent, +} diff --git a/nodes.py b/nodes.py index bfc342275..11aa50fce 100644 --- a/nodes.py +++ b/nodes.py @@ -920,7 +920,7 @@ class CLIPLoader: @classmethod def INPUT_TYPES(s): return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace"], ), + "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), @@ -930,7 +930,7 @@ class CLIPLoader: CATEGORY = "advanced/loaders" - DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl\n hidream: llama-3.1 (Recommend) or t5" + DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl\n hidream: llama-3.1 (Recommend) or t5\nomnigen2: qwen vl 2.5 3B" def load_clip(self, clip_name, type="stable_diffusion", device="default"): clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) @@ -2279,6 +2279,7 @@ def init_builtin_extra_nodes(): "nodes_ace.py", "nodes_string.py", "nodes_camera_trajectory.py", + "nodes_edit_model.py", ] import_failed = [] From 93a49a45de2efc4f99e20693d6de0927423ba273 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 25 Jun 2025 23:33:02 -0700 Subject: [PATCH 0272/1073] Bump minimum transformers version. (#8671) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 15fde2849..9a1ed2072 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ torchvision torchaudio numpy>=1.25.0 einops -transformers>=4.28.1 +transformers>=4.37.2 tokenizers>=0.13.3 sentencepiece safetensors>=0.4.2 From a96e65df18980b856a764a40bda524042434f8b2 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 26 Jun 2025 00:39:09 -0700 Subject: [PATCH 0273/1073] Disable omnigen2 fp16 on older pytorch versions. (#8672) --- comfy/model_management.py | 7 +++++++ comfy/supported_models.py | 7 ++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 054291432..816caf18f 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1290,6 +1290,13 @@ def supports_fp8_compute(device=None): return True +def extended_fp16_support(): + # TODO: check why some models work with fp16 on newer torch versions but not on older + if torch_version_numeric < (2, 7): + return False + + return True + def soft_empty_cache(force=False): global cpu_state if cpu_state == CPUState.MPS: diff --git a/comfy/supported_models.py b/comfy/supported_models.py index f4413d647..2669ca01e 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1197,11 +1197,16 @@ class Omnigen2(supported_models_base.BASE): unet_extra_config = {} latent_format = latent_formats.Flux - supported_inference_dtypes = [torch.float16, torch.bfloat16, torch.float32] + supported_inference_dtypes = [torch.bfloat16, torch.float32] vae_key_prefix = ["vae."] text_encoder_key_prefix = ["text_encoders."] + def __init__(self, unet_config): + super().__init__(unet_config) + if comfy.model_management.extended_fp16_support(): + self.supported_inference_dtypes = [torch.float16] + self.supported_inference_dtypes + def get_model(self, state_dict, prefix="", device=None): out = model_base.Omnigen2(self, device=device) return out From ef5266b1c1ffabcfec147416f108da56abb565ad Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 26 Jun 2025 08:28:41 -0700 Subject: [PATCH 0274/1073] Support Flux Kontext Dev model. (#8679) --- comfy/ldm/flux/model.py | 42 ++++++++++++++++++++++++++++++----- comfy/model_base.py | 16 ++++++++++++++ comfy_extras/nodes_flux.py | 45 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+), 6 deletions(-) diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index 846703d52..8f4d99f54 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -195,20 +195,50 @@ class Flux(nn.Module): img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels) return img - def forward(self, x, timestep, context, y=None, guidance=None, control=None, transformer_options={}, **kwargs): + def process_img(self, x, index=0, h_offset=0, w_offset=0): bs, c, h, w = x.shape patch_size = self.patch_size x = comfy.ldm.common_dit.pad_to_patch_size(x, (patch_size, patch_size)) img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size) - h_len = ((h + (patch_size // 2)) // patch_size) w_len = ((w + (patch_size // 2)) // patch_size) + + h_offset = ((h_offset + (patch_size // 2)) // patch_size) + w_offset = ((w_offset + (patch_size // 2)) // patch_size) + img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) - img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) - img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) - img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs) + img_ids[:, :, 0] = img_ids[:, :, 1] + index + img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(h_offset, h_len - 1 + h_offset, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) + img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(w_offset, w_len - 1 + w_offset, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) + return img, repeat(img_ids, "h w c -> b (h w) c", b=bs) + + def forward(self, x, timestep, context, y=None, guidance=None, ref_latents=None, control=None, transformer_options={}, **kwargs): + bs, c, h_orig, w_orig = x.shape + patch_size = self.patch_size + + h_len = ((h_orig + (patch_size // 2)) // patch_size) + w_len = ((w_orig + (patch_size // 2)) // patch_size) + img, img_ids = self.process_img(x) + img_tokens = img.shape[1] + if ref_latents is not None: + h = 0 + w = 0 + for ref in ref_latents: + h_offset = 0 + w_offset = 0 + if ref.shape[-2] + h > ref.shape[-1] + w: + w_offset = w + else: + h_offset = h + + kontext, kontext_ids = self.process_img(ref, index=1, h_offset=h_offset, w_offset=w_offset) + img = torch.cat([img, kontext], dim=1) + img_ids = torch.cat([img_ids, kontext_ids], dim=1) + h = max(h, ref.shape[-2] + h_offset) + w = max(w, ref.shape[-1] + w_offset) txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype) out = self.forward_orig(img, img_ids, context, txt_ids, timestep, y, guidance, control, transformer_options, attn_mask=kwargs.get("attention_mask", None)) - return rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2)[:,:,:h,:w] + out = out[:, :img_tokens] + return rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2)[:,:,:h_orig,:w_orig] diff --git a/comfy/model_base.py b/comfy/model_base.py index 12b0f3dc9..fcdfde378 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -816,6 +816,7 @@ class PixArt(BaseModel): class Flux(BaseModel): def __init__(self, model_config, model_type=ModelType.FLUX, device=None, unet_model=comfy.ldm.flux.model.Flux): super().__init__(model_config, model_type, device=device, unet_model=unet_model) + self.memory_usage_factor_conds = ("kontext",) def concat_cond(self, **kwargs): try: @@ -876,8 +877,23 @@ class Flux(BaseModel): guidance = kwargs.get("guidance", 3.5) if guidance is not None: out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance])) + + ref_latents = kwargs.get("reference_latents", None) + if ref_latents is not None: + latents = [] + for lat in ref_latents: + latents.append(self.process_latent_in(lat)) + out['ref_latents'] = comfy.conds.CONDList(latents) return out + def extra_conds_shapes(self, **kwargs): + out = {} + ref_latents = kwargs.get("reference_latents", None) + if ref_latents is not None: + out['ref_latents'] = list([1, 16, sum(map(lambda a: math.prod(a.size()), ref_latents)) // 16]) + return out + + class GenmoMochi(BaseModel): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.genmo.joint_model.asymm_models_joint.AsymmDiTJoint) diff --git a/comfy_extras/nodes_flux.py b/comfy_extras/nodes_flux.py index ad6c15f37..8a8a17698 100644 --- a/comfy_extras/nodes_flux.py +++ b/comfy_extras/nodes_flux.py @@ -1,4 +1,5 @@ import node_helpers +import comfy.utils class CLIPTextEncodeFlux: @classmethod @@ -56,8 +57,52 @@ class FluxDisableGuidance: return (c, ) +PREFERED_KONTEXT_RESOLUTIONS = [ + (672, 1568), + (688, 1504), + (720, 1456), + (752, 1392), + (800, 1328), + (832, 1248), + (880, 1184), + (944, 1104), + (1024, 1024), + (1104, 944), + (1184, 880), + (1248, 832), + (1328, 800), + (1392, 752), + (1456, 720), + (1504, 688), + (1568, 672), +] + + +class FluxKontextImageScale: + @classmethod + def INPUT_TYPES(s): + return {"required": {"image": ("IMAGE", ), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "scale" + + CATEGORY = "advanced/conditioning/flux" + DESCRIPTION = "This node resizes the image to one that is more optimal for flux kontext." + + def scale(self, image): + width = image.shape[2] + height = image.shape[1] + aspect_ratio = width / height + _, width, height = min((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS) + image = comfy.utils.common_upscale(image.movedim(-1, 1), width, height, "lanczos", "center").movedim(1, -1) + return (image, ) + + NODE_CLASS_MAPPINGS = { "CLIPTextEncodeFlux": CLIPTextEncodeFlux, "FluxGuidance": FluxGuidance, "FluxDisableGuidance": FluxDisableGuidance, + "FluxKontextImageScale": FluxKontextImageScale, } From 68f4496b8ea51934883df46fce946da74f7b78eb Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Thu, 26 Jun 2025 08:29:03 -0700 Subject: [PATCH 0275/1073] Update frontend to 1.23.3 (#8678) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9a1ed2072..68b9abd4f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.22.2 +comfyui-frontend-package==1.23.3 comfyui-workflow-templates==0.1.29 comfyui-embedded-docs==0.2.2 torch From 7d8cf4cacc45e0ab58f1446a51287de17f6de6f5 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 26 Jun 2025 08:39:40 -0700 Subject: [PATCH 0276/1073] Update requirements.txt (#8680) --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 68b9abd4f..2006d48d9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ comfyui-frontend-package==1.23.3 -comfyui-workflow-templates==0.1.29 -comfyui-embedded-docs==0.2.2 +comfyui-workflow-templates==0.1.30 +comfyui-embedded-docs==0.2.3 torch torchsde torchvision From b976f934ae112ff515d2c7fe362a1a118ddd7072 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Thu, 26 Jun 2025 08:44:12 -0700 Subject: [PATCH 0277/1073] Update frontend to 1.23.4 (#8681) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2006d48d9..82e168b52 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.23.3 +comfyui-frontend-package==1.23.4 comfyui-workflow-templates==0.1.30 comfyui-embedded-docs==0.2.3 torch From 6493709d6aa3db3fa0179b4d8da003145a750ded Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 26 Jun 2025 11:47:07 -0400 Subject: [PATCH 0278/1073] ComfyUI version 0.3.42 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index fedd3466f..26cada11a 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.41" +__version__ = "0.3.42" diff --git a/pyproject.toml b/pyproject.toml index c572ad4c6..2c6894c6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.41" +version = "0.3.42" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From bd951a714f8c736680fe13e735eee71acf73dd4c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 26 Jun 2025 09:26:29 -0700 Subject: [PATCH 0279/1073] Add Flux Kontext and Omnigen 2 models to readme. (#8682) --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 6366280e7..7e6a3a0b1 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,9 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Lumina Image 2.0](https://comfyanonymous.github.io/ComfyUI_examples/lumina2/) - [HiDream](https://comfyanonymous.github.io/ComfyUI_examples/hidream/) - [Cosmos Predict2](https://comfyanonymous.github.io/ComfyUI_examples/cosmos_predict2/) +- Image Editing Models + - [Omnigen 2](https://comfyanonymous.github.io/ComfyUI_examples/omnigen/) + - [Flux Kontext](https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-kontext-image-editing-model) - Video Models - [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/) - [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/) From 9093301a49a90b654a7f37f6f621784b78579e2d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 27 Jun 2025 11:14:56 -0700 Subject: [PATCH 0280/1073] Don't add tiny bit of random noise when VAE encoding. (#8705) Shouldn't change outputs but might make things a tiny bit more deterministic. --- comfy/ldm/models/autoencoder.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/comfy/ldm/models/autoencoder.py b/comfy/ldm/models/autoencoder.py index e6493155e..13bd6e16b 100644 --- a/comfy/ldm/models/autoencoder.py +++ b/comfy/ldm/models/autoencoder.py @@ -11,7 +11,7 @@ from comfy.ldm.modules.ema import LitEma import comfy.ops class DiagonalGaussianRegularizer(torch.nn.Module): - def __init__(self, sample: bool = True): + def __init__(self, sample: bool = False): super().__init__() self.sample = sample @@ -19,16 +19,12 @@ class DiagonalGaussianRegularizer(torch.nn.Module): yield from () def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]: - log = dict() posterior = DiagonalGaussianDistribution(z) if self.sample: z = posterior.sample() else: z = posterior.mode() - kl_loss = posterior.kl() - kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] - log["kl_loss"] = kl_loss - return z, log + return z, None class AbstractAutoencoder(torch.nn.Module): From c36be0ea09ddd89b69dd786dbb525402c7041408 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 27 Jun 2025 14:21:12 -0700 Subject: [PATCH 0281/1073] Fix memory estimation bug with kontext. (#8709) --- comfy/model_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index fcdfde378..4392355ea 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -816,7 +816,7 @@ class PixArt(BaseModel): class Flux(BaseModel): def __init__(self, model_config, model_type=ModelType.FLUX, device=None, unet_model=comfy.ldm.flux.model.Flux): super().__init__(model_config, model_type, device=device, unet_model=unet_model) - self.memory_usage_factor_conds = ("kontext",) + self.memory_usage_factor_conds = ("ref_latents",) def concat_cond(self, **kwargs): try: From e18f53cca9062cc6b165e16712772437b80333f2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 27 Jun 2025 17:22:02 -0400 Subject: [PATCH 0282/1073] ComfyUI version 0.3.43 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 26cada11a..c98c90499 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.42" +__version__ = "0.3.43" diff --git a/pyproject.toml b/pyproject.toml index 2c6894c6e..9d0f90032 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.42" +version = "0.3.43" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From ba9548f75637fbc091c87083b9fc361264ccc4e6 Mon Sep 17 00:00:00 2001 From: xufeng <57523724+lgldlk@users.noreply.github.com> Date: Sun, 29 Jun 2025 03:24:02 +0800 Subject: [PATCH 0283/1073] =?UTF-8?q?=E2=80=9C--whitelist-custom-nodes?= =?UTF-8?q?=E2=80=9D=20args=20for=20comfy=20core=20to=20go=20with=20?= =?UTF-8?q?=E2=80=9C--disable-all-custom-nodes=E2=80=9D=20for=20developmen?= =?UTF-8?q?t=20purposes=20=20(#8592)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: “--whitelist-custom-nodes” args for comfy core to go with “--disable-all-custom-nodes” for development purposes * feat: Simplify custom nodes whitelist logic to use consistent code paths --- comfy/cli_args.py | 1 + main.py | 11 +++++++---- nodes.py | 3 +++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 741ecac3f..7234a7ba0 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -151,6 +151,7 @@ parser.add_argument("--windows-standalone-build", action="store_true", help="Win parser.add_argument("--disable-metadata", action="store_true", help="Disable saving prompt metadata in files.") parser.add_argument("--disable-all-custom-nodes", action="store_true", help="Disable loading all custom nodes.") +parser.add_argument("--whitelist-custom-nodes", type=str, nargs='+', default=[], help="Specify custom node folders to load even when --disable-all-custom-nodes is enabled.") parser.add_argument("--disable-api-nodes", action="store_true", help="Disable loading all api nodes.") parser.add_argument("--multi-user", action="store_true", help="Enables per-user storage.") diff --git a/main.py b/main.py index 0d7c97dcb..5dd3c92d2 100644 --- a/main.py +++ b/main.py @@ -66,9 +66,6 @@ def execute_prestartup_script(): logging.error(f"Failed to execute startup-script: {script_path} / {e}") return False - if args.disable_all_custom_nodes: - return - node_paths = folder_paths.get_folder_paths("custom_nodes") for custom_node_path in node_paths: possible_modules = os.listdir(custom_node_path) @@ -81,6 +78,9 @@ def execute_prestartup_script(): script_path = os.path.join(module_path, "prestartup_script.py") if os.path.exists(script_path): + if args.disable_all_custom_nodes and possible_module not in args.whitelist_custom_nodes: + logging.info(f"Prestartup Skipping {possible_module} due to disable_all_custom_nodes and whitelist_custom_nodes") + continue time_before = time.perf_counter() success = execute_script(script_path) node_prestartup_times.append((time.perf_counter() - time_before, module_path, success)) @@ -276,7 +276,10 @@ def start_comfyui(asyncio_loop=None): prompt_server = server.PromptServer(asyncio_loop) hook_breaker_ac10a0.save_functions() - nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes, init_api_nodes=not args.disable_api_nodes) + nodes.init_extra_nodes( + init_custom_nodes=True, + init_api_nodes=not args.disable_api_nodes + ) hook_breaker_ac10a0.restore_functions() cuda_malloc_warning() diff --git a/nodes.py b/nodes.py index 11aa50fce..99411a1fe 100644 --- a/nodes.py +++ b/nodes.py @@ -2187,6 +2187,9 @@ def init_external_custom_nodes(): module_path = os.path.join(custom_node_path, possible_module) if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue if module_path.endswith(".disabled"): continue + if args.disable_all_custom_nodes and possible_module not in args.whitelist_custom_nodes: + logging.info(f"Skipping {possible_module} due to disable_all_custom_nodes and whitelist_custom_nodes") + continue time_before = time.perf_counter() success = load_custom_node(module_path, base_node_names, module_parent="custom_nodes") node_import_times.append((time.perf_counter() - time_before, module_path, success)) From a3cf272522f9820c3f379aa821729404cb4cf821 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 28 Jun 2025 12:53:40 -0700 Subject: [PATCH 0284/1073] Skip custom node logic completely if disabled and no whitelisted nodes. (#8719) --- main.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/main.py b/main.py index 5dd3c92d2..d488c0f4c 100644 --- a/main.py +++ b/main.py @@ -55,6 +55,9 @@ def apply_custom_paths(): def execute_prestartup_script(): + if args.disable_all_custom_nodes and len(args.whitelist_custom_nodes) == 0: + return + def execute_script(script_path): module_name = os.path.splitext(script_path)[0] try: @@ -277,7 +280,7 @@ def start_comfyui(asyncio_loop=None): hook_breaker_ac10a0.save_functions() nodes.init_extra_nodes( - init_custom_nodes=True, + init_custom_nodes=(not args.disable_all_custom_nodes) or len(args.whitelist_custom_nodes) > 0, init_api_nodes=not args.disable_api_nodes ) hook_breaker_ac10a0.restore_functions() From 396454fa410781008015c73f7e0a5014dac0609e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 28 Jun 2025 15:12:56 -0700 Subject: [PATCH 0285/1073] Reorder the schedulers so simple is the default one. (#8722) --- comfy/samplers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index efe9bf867..078a675f4 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -1039,13 +1039,13 @@ class SchedulerHandler(NamedTuple): use_ms: bool = True SCHEDULER_HANDLERS = { - "normal": SchedulerHandler(normal_scheduler), + "simple": SchedulerHandler(simple_scheduler), + "sgm_uniform": SchedulerHandler(partial(normal_scheduler, sgm=True)), "karras": SchedulerHandler(k_diffusion_sampling.get_sigmas_karras, use_ms=False), "exponential": SchedulerHandler(k_diffusion_sampling.get_sigmas_exponential, use_ms=False), - "sgm_uniform": SchedulerHandler(partial(normal_scheduler, sgm=True)), - "simple": SchedulerHandler(simple_scheduler), "ddim_uniform": SchedulerHandler(ddim_scheduler), "beta": SchedulerHandler(beta_scheduler), + "normal": SchedulerHandler(normal_scheduler), "linear_quadratic": SchedulerHandler(linear_quadratic_schedule), "kl_optimal": SchedulerHandler(kl_optimal_scheduler, use_ms=False), } From 5b4eb021cb392680c62ef5c2bc1afe560bde37b3 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Sun, 29 Jun 2025 06:13:13 +0800 Subject: [PATCH 0286/1073] Perpneg guider with updated pre and post-cfg (#8698) --- comfy_extras/nodes_perpneg.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py index 6c6f71767..f051cbf9a 100644 --- a/comfy_extras/nodes_perpneg.py +++ b/comfy_extras/nodes_perpneg.py @@ -69,8 +69,17 @@ class Guider_PerpNeg(comfy.samplers.CFGGuider): negative_cond = self.conds.get("negative", None) empty_cond = self.conds.get("empty_negative_prompt", None) - (noise_pred_pos, noise_pred_neg, noise_pred_empty) = \ - comfy.samplers.calc_cond_batch(self.inner_model, [positive_cond, negative_cond, empty_cond], x, timestep, model_options) + conds = [positive_cond, negative_cond, empty_cond] + + out = comfy.samplers.calc_cond_batch(self.inner_model, conds, x, timestep, model_options) + + # Apply pre_cfg_functions since sampling_function() is skipped + for fn in model_options.get("sampler_pre_cfg_function", []): + args = {"conds":conds, "conds_out": out, "cond_scale": self.cfg, "timestep": timestep, + "input": x, "sigma": timestep, "model": self.inner_model, "model_options": model_options} + out = fn(args) + + noise_pred_pos, noise_pred_neg, noise_pred_empty = out cfg_result = perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_empty, self.neg_scale, self.cfg) # normally this would be done in cfg_function, but we skipped @@ -82,6 +91,7 @@ class Guider_PerpNeg(comfy.samplers.CFGGuider): "denoised": cfg_result, "cond": positive_cond, "uncond": negative_cond, + "cond_scale": self.cfg, "model": self.inner_model, "uncond_denoised": noise_pred_neg, "cond_denoised": noise_pred_pos, From e195c1b13ff96a9ee2c2de503cbf9a7a99babec5 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 28 Jun 2025 16:11:16 -0700 Subject: [PATCH 0287/1073] Make stable release workflow publish drafts. (#8723) --- .github/workflows/stable-release.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index a046ff9ea..61105abe4 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -102,5 +102,4 @@ jobs: file: ComfyUI_windows_portable_nvidia.7z tag: ${{ inputs.git_tag }} overwrite: true - prerelease: true - make_latest: false + draft: true From 2a0b138feb7e8ed36cbf195dd1c2d49b469f1490 Mon Sep 17 00:00:00 2001 From: bmcomfy Date: Sat, 28 Jun 2025 16:11:40 -0700 Subject: [PATCH 0288/1073] build: add gh action to process releases (#8652) --- .github/workflows/release-webhook.yml | 108 ++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 .github/workflows/release-webhook.yml diff --git a/.github/workflows/release-webhook.yml b/.github/workflows/release-webhook.yml new file mode 100644 index 000000000..6fceb7560 --- /dev/null +++ b/.github/workflows/release-webhook.yml @@ -0,0 +1,108 @@ +name: Release Webhook + +on: + release: + types: [published] + +jobs: + send-webhook: + runs-on: ubuntu-latest + steps: + - name: Send release webhook + env: + WEBHOOK_URL: ${{ secrets.RELEASE_GITHUB_WEBHOOK_URL }} + WEBHOOK_SECRET: ${{ secrets.RELEASE_GITHUB_WEBHOOK_SECRET }} + run: | + # Generate UUID for delivery ID + DELIVERY_ID=$(uuidgen) + HOOK_ID="release-webhook-$(date +%s)" + + # Create webhook payload matching GitHub release webhook format + PAYLOAD=$(cat < Date: Sun, 29 Jun 2025 03:38:40 -0700 Subject: [PATCH 0289/1073] Fix contiguous issue with pytorch nightly. (#8729) --- comfy/text_encoders/t5.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/text_encoders/t5.py b/comfy/text_encoders/t5.py index 49f0ba4fe..36bf35309 100644 --- a/comfy/text_encoders/t5.py +++ b/comfy/text_encoders/t5.py @@ -146,7 +146,7 @@ class T5Attention(torch.nn.Module): ) values = self.relative_attention_bias(relative_position_bucket, out_dtype=dtype) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) - return values + return values.contiguous() def forward(self, x, mask=None, past_bias=None, optimized_attention=None): q = self.q(x) From cf49a2c5b575d59574e17ba3268f1938107d635a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 30 Jun 2025 11:18:25 -0700 Subject: [PATCH 0290/1073] Dual cfg node optimizations when cfg is 1.0 (#8747) --- comfy_extras/nodes_custom_sampler.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 3e5be3d3c..fc506a0cc 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -609,8 +609,14 @@ class Guider_DualCFG(comfy.samplers.CFGGuider): def predict_noise(self, x, timestep, model_options={}, seed=None): negative_cond = self.conds.get("negative", None) middle_cond = self.conds.get("middle", None) + positive_cond = self.conds.get("positive", None) + if model_options.get("disable_cfg1_optimization", False) == False: + if math.isclose(self.cfg2, 1.0): + negative_cond = None + if math.isclose(self.cfg1, 1.0): + middle_cond = None - out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, middle_cond, self.conds.get("positive", None)], x, timestep, model_options) + out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, middle_cond, positive_cond], x, timestep, model_options) return comfy.samplers.cfg_function(self.inner_model, out[1], out[0], self.cfg2, x, timestep, model_options=model_options, cond=middle_cond, uncond=negative_cond) + (out[2] - out[1]) * self.cfg1 class DualCFGGuider: From c46268bf60454ce0634b56d1b29f50f04fbc162b Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 1 Jul 2025 02:18:43 +0800 Subject: [PATCH 0291/1073] Update requirements.txt (#8741) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 82e168b52..479a29eec 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.30 +comfyui-workflow-templates==0.1.31 comfyui-embedded-docs==0.2.3 torch torchsde From f02de13316b24436eb69222d7bc8181b73eeccb2 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Tue, 1 Jul 2025 14:33:07 +0800 Subject: [PATCH 0292/1073] Add TCFG node (#8730) --- comfy_extras/nodes_tcfg.py | 71 ++++++++++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 72 insertions(+) create mode 100644 comfy_extras/nodes_tcfg.py diff --git a/comfy_extras/nodes_tcfg.py b/comfy_extras/nodes_tcfg.py new file mode 100644 index 000000000..35b89a73f --- /dev/null +++ b/comfy_extras/nodes_tcfg.py @@ -0,0 +1,71 @@ +# TCFG: Tangential Damping Classifier-free Guidance - (arXiv: https://arxiv.org/abs/2503.18137) + +import torch + +from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict + + +def score_tangential_damping(cond_score: torch.Tensor, uncond_score: torch.Tensor) -> torch.Tensor: + """Drop tangential components from uncond score to align with cond score.""" + # (B, 1, ...) + batch_num = cond_score.shape[0] + cond_score_flat = cond_score.reshape(batch_num, 1, -1).float() + uncond_score_flat = uncond_score.reshape(batch_num, 1, -1).float() + + # Score matrix A (B, 2, ...) + score_matrix = torch.cat((uncond_score_flat, cond_score_flat), dim=1) + try: + _, _, Vh = torch.linalg.svd(score_matrix, full_matrices=False) + except RuntimeError: + # Fallback to CPU + _, _, Vh = torch.linalg.svd(score_matrix.cpu(), full_matrices=False) + + # Drop the tangential components + v1 = Vh[:, 0:1, :].to(uncond_score_flat.device) # (B, 1, ...) + uncond_score_td = (uncond_score_flat @ v1.transpose(-2, -1)) * v1 + return uncond_score_td.reshape_as(uncond_score).to(uncond_score.dtype) + + +class TCFG(ComfyNodeABC): + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "model": (IO.MODEL, {}), + } + } + + RETURN_TYPES = (IO.MODEL,) + RETURN_NAMES = ("patched_model",) + FUNCTION = "patch" + + CATEGORY = "advanced/guidance" + DESCRIPTION = "TCFG – Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality." + + def patch(self, model): + m = model.clone() + + def tangential_damping_cfg(args): + # Assume [cond, uncond, ...] + x = args["input"] + conds_out = args["conds_out"] + if len(conds_out) <= 1 or None in args["conds"][:2]: + # Skip when either cond or uncond is None + return conds_out + cond_pred = conds_out[0] + uncond_pred = conds_out[1] + uncond_td = score_tangential_damping(x - cond_pred, x - uncond_pred) + uncond_pred_td = x - uncond_td + return [cond_pred, uncond_pred_td] + conds_out[2:] + + m.set_model_sampler_pre_cfg_function(tangential_damping_cfg) + return (m,) + + +NODE_CLASS_MAPPINGS = { + "TCFG": TCFG, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "TCFG": "Tangential Damping CFG", +} diff --git a/nodes.py b/nodes.py index 99411a1fe..1b465b9e6 100644 --- a/nodes.py +++ b/nodes.py @@ -2283,6 +2283,7 @@ def init_builtin_extra_nodes(): "nodes_string.py", "nodes_camera_trajectory.py", "nodes_edit_model.py", + "nodes_tcfg.py" ] import_failed = [] From b22e97dcfa1736190cfcafd6091c4da885fcf48a Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Tue, 1 Jul 2025 14:38:52 +0800 Subject: [PATCH 0293/1073] Migrate ER-SDE from VE to VP algorithm and add its sampler node (#8744) Apply alpha scaling in the algorithm for reverse-time SDE and add custom ER-SDE sampler node for other solver types (SDE, ODE). --- comfy/k_diffusion/sampling.py | 65 ++++++++++++++++------------ comfy_extras/nodes_custom_sampler.py | 42 ++++++++++++++++++ 2 files changed, 80 insertions(+), 27 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 739468872..e231d6a3d 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -1447,14 +1447,15 @@ def sample_gradient_estimation(model, x, sigmas, extra_args=None, callback=None, old_d = d return x + @torch.no_grad() def sample_gradient_estimation_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2.): return sample_gradient_estimation(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, ge_gamma=ge_gamma, cfg_pp=True) + @torch.no_grad() -def sample_er_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., noise_sampler=None, noise_scaler=None, max_stage=3): - """ - Extended Reverse-Time SDE solver (VE ER-SDE-Solver-3). Arxiv: https://arxiv.org/abs/2309.06169. +def sample_er_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1.0, noise_sampler=None, noise_scaler=None, max_stage=3): + """Extended Reverse-Time SDE solver (VP ER-SDE-Solver-3). arXiv: https://arxiv.org/abs/2309.06169. Code reference: https://github.com/QinpengCui/ER-SDE-Solver/blob/main/er_sde_solver.py. """ extra_args = {} if extra_args is None else extra_args @@ -1462,12 +1463,18 @@ def sample_er_sde(model, x, sigmas, extra_args=None, callback=None, disable=None noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler s_in = x.new_ones([x.shape[0]]) - def default_noise_scaler(sigma): - return sigma * ((sigma ** 0.3).exp() + 10.0) - noise_scaler = default_noise_scaler if noise_scaler is None else noise_scaler + def default_er_sde_noise_scaler(x): + return x * ((x ** 0.3).exp() + 10.0) + + noise_scaler = default_er_sde_noise_scaler if noise_scaler is None else noise_scaler num_integration_points = 200.0 point_indice = torch.arange(0, num_integration_points, dtype=torch.float32, device=x.device) + model_sampling = model.inner_model.model_patcher.get_model_object("model_sampling") + sigmas = offset_first_sigma_for_snr(sigmas, model_sampling) + half_log_snrs = sigma_to_half_log_snr(sigmas, model_sampling) + er_lambdas = half_log_snrs.neg().exp() # er_lambda_t = sigma_t / alpha_t + old_denoised = None old_denoised_d = None @@ -1478,32 +1485,36 @@ def sample_er_sde(model, x, sigmas, extra_args=None, callback=None, disable=None stage_used = min(max_stage, i + 1) if sigmas[i + 1] == 0: x = denoised - elif stage_used == 1: - r = noise_scaler(sigmas[i + 1]) / noise_scaler(sigmas[i]) - x = r * x + (1 - r) * denoised else: - r = noise_scaler(sigmas[i + 1]) / noise_scaler(sigmas[i]) - x = r * x + (1 - r) * denoised + er_lambda_s, er_lambda_t = er_lambdas[i], er_lambdas[i + 1] + alpha_s = sigmas[i] / er_lambda_s + alpha_t = sigmas[i + 1] / er_lambda_t + r_alpha = alpha_t / alpha_s + r = noise_scaler(er_lambda_t) / noise_scaler(er_lambda_s) - dt = sigmas[i + 1] - sigmas[i] - sigma_step_size = -dt / num_integration_points - sigma_pos = sigmas[i + 1] + point_indice * sigma_step_size - scaled_pos = noise_scaler(sigma_pos) + # Stage 1 Euler + x = r_alpha * r * x + alpha_t * (1 - r) * denoised - # Stage 2 - s = torch.sum(1 / scaled_pos) * sigma_step_size - denoised_d = (denoised - old_denoised) / (sigmas[i] - sigmas[i - 1]) - x = x + (dt + s * noise_scaler(sigmas[i + 1])) * denoised_d + if stage_used >= 2: + dt = er_lambda_t - er_lambda_s + lambda_step_size = -dt / num_integration_points + lambda_pos = er_lambda_t + point_indice * lambda_step_size + scaled_pos = noise_scaler(lambda_pos) - if stage_used >= 3: - # Stage 3 - s_u = torch.sum((sigma_pos - sigmas[i]) / scaled_pos) * sigma_step_size - denoised_u = (denoised_d - old_denoised_d) / ((sigmas[i] - sigmas[i - 2]) / 2) - x = x + ((dt ** 2) / 2 + s_u * noise_scaler(sigmas[i + 1])) * denoised_u - old_denoised_d = denoised_d + # Stage 2 + s = torch.sum(1 / scaled_pos) * lambda_step_size + denoised_d = (denoised - old_denoised) / (er_lambda_s - er_lambdas[i - 1]) + x = x + alpha_t * (dt + s * noise_scaler(er_lambda_t)) * denoised_d - if s_noise != 0 and sigmas[i + 1] > 0: - x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * (sigmas[i + 1] ** 2 - sigmas[i] ** 2 * r ** 2).sqrt().nan_to_num(nan=0.0) + if stage_used >= 3: + # Stage 3 + s_u = torch.sum((lambda_pos - er_lambda_s) / scaled_pos) * lambda_step_size + denoised_u = (denoised_d - old_denoised_d) / ((er_lambda_s - er_lambdas[i - 2]) / 2) + x = x + alpha_t * ((dt ** 2) / 2 + s_u * noise_scaler(er_lambda_t)) * denoised_u + old_denoised_d = denoised_d + + if s_noise > 0: + x = x + alpha_t * noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * (er_lambda_t ** 2 - er_lambda_s ** 2 * r ** 2).sqrt().nan_to_num(nan=0.0) old_denoised = denoised return x diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index fc506a0cc..b3a772714 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -2,6 +2,7 @@ import math import comfy.samplers import comfy.sample from comfy.k_diffusion import sampling as k_diffusion_sampling +from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict import latent_preview import torch import comfy.utils @@ -480,6 +481,46 @@ class SamplerDPMAdaptative: "s_noise":s_noise }) return (sampler, ) + +class SamplerER_SDE(ComfyNodeABC): + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "solver_type": (IO.COMBO, {"options": ["ER-SDE", "Reverse-time SDE", "ODE"]}), + "max_stage": (IO.INT, {"default": 3, "min": 1, "max": 3}), + "eta": ( + IO.FLOAT, + {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": False, "tooltip": "Stochastic strength of reverse-time SDE.\nWhen eta=0, it reduces to deterministic ODE. This setting doesn't apply to ER-SDE solver type."}, + ), + "s_noise": (IO.FLOAT, {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": False}), + } + } + + RETURN_TYPES = (IO.SAMPLER,) + CATEGORY = "sampling/custom_sampling/samplers" + + FUNCTION = "get_sampler" + + def get_sampler(self, solver_type, max_stage, eta, s_noise): + if solver_type == "ODE" or (solver_type == "Reverse-time SDE" and eta == 0): + eta = 0 + s_noise = 0 + + def reverse_time_sde_noise_scaler(x): + return x ** (eta + 1) + + if solver_type == "ER-SDE": + # Use the default one in sample_er_sde() + noise_scaler = None + else: + noise_scaler = reverse_time_sde_noise_scaler + + sampler_name = "er_sde" + sampler = comfy.samplers.ksampler(sampler_name, {"s_noise": s_noise, "noise_scaler": noise_scaler, "max_stage": max_stage}) + return (sampler,) + + class Noise_EmptyNoise: def __init__(self): self.seed = 0 @@ -787,6 +828,7 @@ NODE_CLASS_MAPPINGS = { "SamplerDPMPP_SDE": SamplerDPMPP_SDE, "SamplerDPMPP_2S_Ancestral": SamplerDPMPP_2S_Ancestral, "SamplerDPMAdaptative": SamplerDPMAdaptative, + "SamplerER_SDE": SamplerER_SDE, "SplitSigmas": SplitSigmas, "SplitSigmasDenoise": SplitSigmasDenoise, "FlipSigmas": FlipSigmas, From 772de7c00653fc3a825762f555e836d071a4dc80 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 1 Jul 2025 00:09:07 -0700 Subject: [PATCH 0294/1073] PerpNeg Guider optimizations. (#8753) --- comfy_extras/nodes_perpneg.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py index f051cbf9a..89e5eef90 100644 --- a/comfy_extras/nodes_perpneg.py +++ b/comfy_extras/nodes_perpneg.py @@ -4,6 +4,7 @@ import comfy.sampler_helpers import comfy.samplers import comfy.utils import node_helpers +import math def perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_nocond, neg_scale, cond_scale): pos = noise_pred_pos - noise_pred_nocond @@ -69,6 +70,12 @@ class Guider_PerpNeg(comfy.samplers.CFGGuider): negative_cond = self.conds.get("negative", None) empty_cond = self.conds.get("empty_negative_prompt", None) + if model_options.get("disable_cfg1_optimization", False) == False: + if math.isclose(self.neg_scale, 0.0): + negative_cond = None + if math.isclose(self.cfg, 1.0): + empty_cond = None + conds = [positive_cond, negative_cond, empty_cond] out = comfy.samplers.calc_cond_batch(self.inner_model, conds, x, timestep, model_options) From 79ed75274874590967ff13ac73c5d84262d489d0 Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Tue, 1 Jul 2025 20:43:48 -0400 Subject: [PATCH 0295/1073] support upload 3d model to custom subfolder (#8597) --- comfy_extras/nodes_load_3d.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_load_3d.py b/comfy_extras/nodes_load_3d.py index 40d03e18a..899608149 100644 --- a/comfy_extras/nodes_load_3d.py +++ b/comfy_extras/nodes_load_3d.py @@ -5,6 +5,8 @@ import os from comfy.comfy_types import IO from comfy_api.input_impl import VideoFromFile +from pathlib import Path + def normalize_path(path): return path.replace('\\', '/') @@ -16,7 +18,14 @@ class Load3D(): os.makedirs(input_dir, exist_ok=True) - files = [normalize_path(os.path.join("3d", f)) for f in os.listdir(input_dir) if f.endswith(('.gltf', '.glb', '.obj', '.fbx', '.stl'))] + input_path = Path(input_dir) + base_path = Path(folder_paths.get_input_directory()) + + files = [ + normalize_path(str(file_path.relative_to(base_path))) + for file_path in input_path.rglob("*") + if file_path.suffix.lower() in {'.gltf', '.glb', '.obj', '.fbx', '.stl'} + ] return {"required": { "model_file": (sorted(files), {"file_upload": True}), @@ -61,7 +70,14 @@ class Load3DAnimation(): os.makedirs(input_dir, exist_ok=True) - files = [normalize_path(os.path.join("3d", f)) for f in os.listdir(input_dir) if f.endswith(('.gltf', '.glb', '.fbx'))] + input_path = Path(input_dir) + base_path = Path(folder_paths.get_input_directory()) + + files = [ + normalize_path(str(file_path.relative_to(base_path))) + for file_path in input_path.rglob("*") + if file_path.suffix.lower() in {'.gltf', '.glb', '.fbx'} + ] return {"required": { "model_file": (sorted(files), {"file_upload": True}), From 111f583e00cbd7b08149856f2b6de7a58ea65c0b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 1 Jul 2025 21:57:13 -0700 Subject: [PATCH 0296/1073] Fallback to regular op when fp8 op throws exception. (#8761) --- comfy/ops.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 431c8f89d..2cc9bbc27 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -336,9 +336,12 @@ class fp8_ops(manual_cast): return None def forward_comfy_cast_weights(self, input): - out = fp8_linear(self, input) - if out is not None: - return out + try: + out = fp8_linear(self, input) + if out is not None: + return out + except Exception as e: + logging.info("Exception during fp8 op: {}".format(e)) weight, bias = cast_bias_weight(self, input) return torch.nn.functional.linear(input, weight, bias) From 9f1069290c53c738998204eb87e82e595808871f Mon Sep 17 00:00:00 2001 From: Harel Cain Date: Wed, 2 Jul 2025 21:34:51 +0200 Subject: [PATCH 0297/1073] nodes_lt: fixes to latent conditioning at index > 0 (#8769) --- comfy_extras/nodes_lt.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_lt.py b/comfy_extras/nodes_lt.py index e6dc122ca..b5058667a 100644 --- a/comfy_extras/nodes_lt.py +++ b/comfy_extras/nodes_lt.py @@ -134,8 +134,8 @@ class LTXVAddGuide: _, num_keyframes = get_keyframe_idxs(cond) latent_count = latent_length - num_keyframes frame_idx = frame_idx if frame_idx >= 0 else max((latent_count - 1) * time_scale_factor + 1 + frame_idx, 0) - if guide_length > 1: - frame_idx = frame_idx // time_scale_factor * time_scale_factor # frame index must be divisible by 8 + if guide_length > 1 and frame_idx != 0: + frame_idx = (frame_idx - 1) // time_scale_factor * time_scale_factor + 1 # frame index - 1 must be divisible by 8 or frame_idx == 0 latent_idx = (frame_idx + time_scale_factor - 1) // time_scale_factor @@ -144,7 +144,7 @@ class LTXVAddGuide: def add_keyframe_index(self, cond, frame_idx, guiding_latent, scale_factors): keyframe_idxs, _ = get_keyframe_idxs(cond) _, latent_coords = self._patchifier.patchify(guiding_latent) - pixel_coords = latent_to_pixel_coords(latent_coords, scale_factors, True) + pixel_coords = latent_to_pixel_coords(latent_coords, scale_factors, causal_fix=frame_idx == 0) # we need the causal fix only if we're placing the new latents at index 0 pixel_coords[:, 0] += frame_idx if keyframe_idxs is None: keyframe_idxs = pixel_coords From 34c8eeec065856e835e5ccebfceb2ea3b76110a7 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 2 Jul 2025 12:35:11 -0700 Subject: [PATCH 0298/1073] Fix ImageColorToMask not returning right mask values. (#8771) --- comfy_extras/nodes_mask.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 99b264a32..ab387a2fc 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -152,7 +152,7 @@ class ImageColorToMask: def image_to_mask(self, image, color): temp = (torch.clamp(image, 0, 1.0) * 255.0).round().to(torch.int) temp = torch.bitwise_left_shift(temp[:,:,:,0], 16) + torch.bitwise_left_shift(temp[:,:,:,1], 8) + temp[:,:,:,2] - mask = torch.where(temp == color, 255, 0).float() + mask = torch.where(temp == color, 1.0, 0).float() return (mask,) class SolidMask: From d9277301d28e732e82d0de1d5948aa00acbf6b65 Mon Sep 17 00:00:00 2001 From: City <125218114+city96@users.noreply.github.com> Date: Thu, 3 Jul 2025 02:13:43 +0200 Subject: [PATCH 0299/1073] Initial code for new SLG node (#8759) --- comfy/model_patcher.py | 3 ++ comfy/samplers.py | 6 +++- comfy_extras/nodes_slg.py | 68 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 1 deletion(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index b1d6d4395..52e76b5f3 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -379,6 +379,9 @@ class ModelPatcher: def set_model_sampler_pre_cfg_function(self, pre_cfg_function, disable_cfg1_optimization=False): self.model_options = set_model_options_pre_cfg_function(self.model_options, pre_cfg_function, disable_cfg1_optimization) + def set_model_sampler_calc_cond_batch_function(self, sampler_calc_cond_batch_function): + self.model_options["sampler_calc_cond_batch_function"] = sampler_calc_cond_batch_function + def set_model_unet_function_wrapper(self, unet_wrapper_function: UnetWrapperFunction): self.model_options["model_function_wrapper"] = unet_wrapper_function diff --git a/comfy/samplers.py b/comfy/samplers.py index 078a675f4..25ccaf39f 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -373,7 +373,11 @@ def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_option uncond_ = uncond conds = [cond, uncond_] - out = calc_cond_batch(model, conds, x, timestep, model_options) + if "sampler_calc_cond_batch_function" in model_options: + args = {"conds": conds, "input": x, "sigma": timestep, "model": model, "model_options": model_options} + out = model_options["sampler_calc_cond_batch_function"](args) + else: + out = calc_cond_batch(model, conds, x, timestep, model_options) for fn in model_options.get("sampler_pre_cfg_function", []): args = {"conds":conds, "conds_out": out, "cond_scale": cond_scale, "timestep": timestep, diff --git a/comfy_extras/nodes_slg.py b/comfy_extras/nodes_slg.py index 2fa09e250..7adff202e 100644 --- a/comfy_extras/nodes_slg.py +++ b/comfy_extras/nodes_slg.py @@ -78,7 +78,75 @@ class SkipLayerGuidanceDiT: return (m, ) +class SkipLayerGuidanceDiTSimple: + ''' + Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass. + ''' + @classmethod + def INPUT_TYPES(s): + return {"required": {"model": ("MODEL", ), + "double_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}), + "single_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}), + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "skip_guidance" + EXPERIMENTAL = True + + DESCRIPTION = "Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass." + + CATEGORY = "advanced/guidance" + + def skip_guidance(self, model, start_percent, end_percent, double_layers="", single_layers=""): + def skip(args, extra_args): + return args + + model_sampling = model.get_model_object("model_sampling") + sigma_start = model_sampling.percent_to_sigma(start_percent) + sigma_end = model_sampling.percent_to_sigma(end_percent) + + double_layers = re.findall(r'\d+', double_layers) + double_layers = [int(i) for i in double_layers] + + single_layers = re.findall(r'\d+', single_layers) + single_layers = [int(i) for i in single_layers] + + if len(double_layers) == 0 and len(single_layers) == 0: + return (model, ) + + def calc_cond_batch_function(args): + x = args["input"] + model = args["model"] + conds = args["conds"] + sigma = args["sigma"] + + model_options = args["model_options"] + slg_model_options = model_options.copy() + + for layer in double_layers: + slg_model_options = comfy.model_patcher.set_model_options_patch_replace(slg_model_options, skip, "dit", "double_block", layer) + + for layer in single_layers: + slg_model_options = comfy.model_patcher.set_model_options_patch_replace(slg_model_options, skip, "dit", "single_block", layer) + + cond, uncond = conds + sigma_ = sigma[0].item() + if sigma_ >= sigma_end and sigma_ <= sigma_start and uncond is not None: + cond_out, _ = comfy.samplers.calc_cond_batch(model, [cond, None], x, sigma, model_options) + _, uncond_out = comfy.samplers.calc_cond_batch(model, [None, uncond], x, sigma, slg_model_options) + out = [cond_out, uncond_out] + else: + out = comfy.samplers.calc_cond_batch(model, conds, x, sigma, model_options) + + return out + + m = model.clone() + m.set_model_sampler_calc_cond_batch_function(calc_cond_batch_function) + + return (m, ) NODE_CLASS_MAPPINGS = { "SkipLayerGuidanceDiT": SkipLayerGuidanceDiT, + "SkipLayerGuidanceDiTSimple": SkipLayerGuidanceDiTSimple, } From e9af97ba1aeb79316713d7e6ce218ecb5145614f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 3 Jul 2025 11:39:11 -0700 Subject: [PATCH 0300/1073] Use torch cu129 for nvidia pytorch nightly. (#8786) * update nightly workflow with cu129 * Remove unused file to lower standalone size. --- .github/workflows/windows_release_nightly_pytorch.yml | 6 ++++-- README.md | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index eb5ed9c91..5bdc940de 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -7,7 +7,7 @@ on: description: 'cuda version' required: true type: string - default: "128" + default: "129" python_minor: description: 'python minor version' @@ -19,7 +19,7 @@ on: description: 'python patch version' required: true type: string - default: "2" + default: "5" # push: # branches: # - master @@ -53,6 +53,8 @@ jobs: ls ../temp_wheel_dir ./python.exe -s -m pip install --pre ../temp_wheel_dir/* sed -i '1i../ComfyUI' ./python3${{ inputs.python_minor }}._pth + + rm ./Lib/site-packages/torch/lib/dnnl.lib #I don't think this is actually used and I need the space cd .. git clone --depth 1 https://github.com/comfyanonymous/taesd diff --git a/README.md b/README.md index 7e6a3a0b1..55d745a10 100644 --- a/README.md +++ b/README.md @@ -243,7 +243,7 @@ Nvidia users should install stable pytorch using this command: This is the command to install pytorch nightly instead which might have performance improvements. -```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu128``` +```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu129``` #### Troubleshooting From ae26cd99b567f95fd4dfc243358c9f6d82e076c8 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Fri, 4 Jul 2025 02:41:16 +0800 Subject: [PATCH 0301/1073] Update template to 0.1.32 (#8782) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 479a29eec..27d385389 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.31 +comfyui-workflow-templates==0.1.32 comfyui-embedded-docs==0.2.3 torch torchsde From f74fc4d9279bf0519e25f9b670cc039d089aba09 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 3 Jul 2025 16:16:30 -0700 Subject: [PATCH 0302/1073] Add ImageRotate and ImageFlip nodes. (#8789) --- comfy_extras/nodes_images.py | 45 ++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index ed54ccc57..fba80e2ae 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -583,6 +583,49 @@ class GetImageSize: return width, height, batch_size +class ImageRotate: + @classmethod + def INPUT_TYPES(s): + return {"required": { "image": (IO.IMAGE,), + "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],), + }} + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "rotate" + + CATEGORY = "image/transform" + + def rotate(self, image, rotation): + rotate_by = 0 + if rotation.startswith("90"): + rotate_by = 1 + elif rotation.startswith("180"): + rotate_by = 2 + elif rotation.startswith("270"): + rotate_by = 3 + + image = torch.rot90(image, k=rotate_by, dims=[2, 1]) + return (image,) + +class ImageFlip: + @classmethod + def INPUT_TYPES(s): + return {"required": { "image": (IO.IMAGE,), + "flip_method": (["x-axis: vertically", "y-axis: horizontally"],), + }} + RETURN_TYPES = (IO.IMAGE,) + FUNCTION = "flip" + + CATEGORY = "image/transform" + + def flip(self, image, flip_method): + if flip_method.startswith("x"): + image = torch.flip(image, dims=[1]) + elif flip_method.startswith("y"): + image = torch.flip(image, dims=[2]) + + return (image,) + + NODE_CLASS_MAPPINGS = { "ImageCrop": ImageCrop, "RepeatImageBatch": RepeatImageBatch, @@ -594,4 +637,6 @@ NODE_CLASS_MAPPINGS = { "ImageStitch": ImageStitch, "ResizeAndPadImage": ResizeAndPadImage, "GetImageSize": GetImageSize, + "ImageRotate": ImageRotate, + "ImageFlip": ImageFlip, } From f41f323c52e34dffac7142732bdbdc4e54daa421 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Fri, 4 Jul 2025 07:20:53 +0800 Subject: [PATCH 0303/1073] Add the denoising step to several samplers (#8780) --- comfy/k_diffusion/sampling.py | 36 +++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index e231d6a3d..34218337a 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -412,9 +412,13 @@ def sample_lms(model, x, sigmas, extra_args=None, callback=None, disable=None, o ds.pop(0) if callback is not None: callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) - cur_order = min(i + 1, order) - coeffs = [linear_multistep_coeff(cur_order, sigmas_cpu, i, j) for j in range(cur_order)] - x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds))) + if sigmas[i + 1] == 0: + # Denoising step + x = denoised + else: + cur_order = min(i + 1, order) + coeffs = [linear_multistep_coeff(cur_order, sigmas_cpu, i, j) for j in range(cur_order)] + x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds))) return x @@ -1067,7 +1071,9 @@ def sample_ipndm(model, x, sigmas, extra_args=None, callback=None, disable=None, d_cur = (x_cur - denoised) / t_cur order = min(max_order, i+1) - if order == 1: # First Euler step. + if t_next == 0: # Denoising step + x_next = denoised + elif order == 1: # First Euler step. x_next = x_cur + (t_next - t_cur) * d_cur elif order == 2: # Use one history point. x_next = x_cur + (t_next - t_cur) * (3 * d_cur - buffer_model[-1]) / 2 @@ -1085,6 +1091,7 @@ def sample_ipndm(model, x, sigmas, extra_args=None, callback=None, disable=None, return x_next + #From https://github.com/zju-pi/diff-sampler/blob/main/diff-solvers-main/solvers.py #under Apache 2 license def sample_ipndm_v(model, x, sigmas, extra_args=None, callback=None, disable=None, max_order=4): @@ -1108,7 +1115,9 @@ def sample_ipndm_v(model, x, sigmas, extra_args=None, callback=None, disable=Non d_cur = (x_cur - denoised) / t_cur order = min(max_order, i+1) - if order == 1: # First Euler step. + if t_next == 0: # Denoising step + x_next = denoised + elif order == 1: # First Euler step. x_next = x_cur + (t_next - t_cur) * d_cur elif order == 2: # Use one history point. h_n = (t_next - t_cur) @@ -1148,6 +1157,7 @@ def sample_ipndm_v(model, x, sigmas, extra_args=None, callback=None, disable=Non return x_next + #From https://github.com/zju-pi/diff-sampler/blob/main/diff-solvers-main/solvers.py #under Apache 2 license @torch.no_grad() @@ -1198,6 +1208,7 @@ def sample_deis(model, x, sigmas, extra_args=None, callback=None, disable=None, return x_next + @torch.no_grad() def sample_euler_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None): extra_args = {} if extra_args is None else extra_args @@ -1404,6 +1415,7 @@ def sample_res_multistep_ancestral(model, x, sigmas, extra_args=None, callback=N def sample_res_multistep_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None): return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=eta, cfg_pp=True) + @torch.no_grad() def sample_gradient_estimation(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2., cfg_pp=False): """Gradient-estimation sampler. Paper: https://openreview.net/pdf?id=o2ND9v0CeK""" @@ -1430,19 +1442,19 @@ def sample_gradient_estimation(model, x, sigmas, extra_args=None, callback=None, if callback is not None: callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) dt = sigmas[i + 1] - sigmas[i] - if i == 0: + if sigmas[i + 1] == 0: + # Denoising step + x = denoised + else: # Euler method if cfg_pp: x = denoised + d * sigmas[i + 1] else: x = x + d * dt - else: - # Gradient estimation - if cfg_pp: + + if i >= 1: + # Gradient estimation d_bar = (ge_gamma - 1) * (d - old_d) - x = denoised + d * sigmas[i + 1] + d_bar * dt - else: - d_bar = ge_gamma * d + (1 - ge_gamma) * old_d x = x + d_bar * dt old_d = d return x From 27870ec3c30e56be9707d89a120eb7f0e2836be1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 4 Jul 2025 01:49:11 -0700 Subject: [PATCH 0304/1073] Add that ckpt files are loaded safely to README. (#8791) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 55d745a10..ba8892b17 100644 --- a/README.md +++ b/README.md @@ -86,6 +86,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - Smart memory management: can automatically run models on GPUs with as low as 1GB vram. - Works even if you don't have a GPU with: ```--cpu``` (slow) - Can load ckpt, safetensors and diffusers models/checkpoints. Standalone VAEs and CLIP models. +- Safe loading of ckpt, pt, pth, etc.. files. - Embeddings/Textual inversion - [Loras (regular, locon and loha)](https://comfyanonymous.github.io/ComfyUI_examples/lora/) - [Hypernetworks](https://comfyanonymous.github.io/ComfyUI_examples/hypernetworks/) @@ -101,7 +102,6 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Model Merging](https://comfyanonymous.github.io/ComfyUI_examples/model_merging/) - [LCM models and Loras](https://comfyanonymous.github.io/ComfyUI_examples/lcm/) - Latent previews with [TAESD](#how-to-show-high-quality-previews) -- Starts up very fast. - Works fully offline: core will never download anything unless you want to. - Optional API nodes to use paid models from external providers through the online [Comfy API](https://docs.comfy.org/tutorials/api-nodes/overview). - [Config file](extra_model_paths.yaml.example) to set the search paths for models. From ee615ac26916f15290b63a20d4feb07ea702d5da Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 5 Jul 2025 11:34:57 -0700 Subject: [PATCH 0305/1073] Add warning when loading file unsafely. (#8800) --- comfy/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/utils.py b/comfy/utils.py index 1f8d71292..47981d8f6 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -77,6 +77,7 @@ def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False): if safe_load or ALWAYS_SAFE_LOAD: pl_sd = torch.load(ckpt, map_location=device, weights_only=True, **torch_args) else: + logging.warning("WARNING: loading {} unsafely, upgrade your pytorch to 2.4 or newer to load this file safely.".format(ckpt)) pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle) if "state_dict" in pl_sd: sd = pl_sd["state_dict"] From 75d327abd5ee8fb6a1ec992ffdb8e43926574a63 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 6 Jul 2025 04:07:39 -0700 Subject: [PATCH 0306/1073] Remove some useless code. (#8812) --- comfy/gligen.py | 47 +---------------------------------------------- 1 file changed, 1 insertion(+), 46 deletions(-) diff --git a/comfy/gligen.py b/comfy/gligen.py index 161d8a5e5..1d7b6c2f4 100644 --- a/comfy/gligen.py +++ b/comfy/gligen.py @@ -1,55 +1,10 @@ import math import torch from torch import nn -from .ldm.modules.attention import CrossAttention -from inspect import isfunction +from .ldm.modules.attention import CrossAttention, FeedForward import comfy.ops ops = comfy.ops.manual_cast -def exists(val): - return val is not None - - -def uniq(arr): - return{el: True for el in arr}.keys() - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -# feedforward -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = ops.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * torch.nn.functional.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - ops.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - ops.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - class GatedCrossAttentionDense(nn.Module): def __init__(self, query_dim, context_dim, n_heads, d_head): From 7eab7d29447429645c4c27a7e6c85d2be8ed79ff Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 6 Jul 2025 11:01:32 -0700 Subject: [PATCH 0307/1073] Remove dependency on deprecated torchaudio.save function (#8815) --- comfy_extras/nodes_audio.py | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index 49af1eae4..aca09e4cc 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -133,14 +133,6 @@ def save_audio(self, audio, filename_prefix="ComfyUI", format="flac", prompt=Non if sample_rate != audio["sample_rate"]: waveform = torchaudio.functional.resample(waveform, audio["sample_rate"], sample_rate) - # Create in-memory WAV buffer - wav_buffer = io.BytesIO() - torchaudio.save(wav_buffer, waveform, sample_rate, format="WAV") - wav_buffer.seek(0) # Rewind for reading - - # Use PyAV to convert and add metadata - input_container = av.open(wav_buffer) - # Create output with specified format output_buffer = io.BytesIO() output_container = av.open(output_buffer, mode='w', format=format) @@ -150,7 +142,6 @@ def save_audio(self, audio, filename_prefix="ComfyUI", format="flac", prompt=Non output_container.metadata[key] = value # Set up the output stream with appropriate properties - input_container.streams.audio[0] if format == "opus": out_stream = output_container.add_stream("libopus", rate=sample_rate) if quality == "64k": @@ -175,18 +166,15 @@ def save_audio(self, audio, filename_prefix="ComfyUI", format="flac", prompt=Non else: #format == "flac": out_stream = output_container.add_stream("flac", rate=sample_rate) - - # Copy frames from input to output - for frame in input_container.decode(audio=0): - frame.pts = None # Let PyAV handle timestamps - output_container.mux(out_stream.encode(frame)) + frame = av.AudioFrame.from_ndarray(waveform.movedim(0, 1).reshape(1, -1).float().numpy(), format='flt', layout='mono' if waveform.shape[0] == 1 else 'stereo') + frame.sample_rate = sample_rate + output_container.mux(out_stream.encode(frame)) # Flush encoder output_container.mux(out_stream.encode(None)) # Close containers output_container.close() - input_container.close() # Write the output to file output_buffer.seek(0) From e740dfd8061e696c9701c917e05649eca9b4f631 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 7 Jul 2025 00:16:00 -0700 Subject: [PATCH 0308/1073] Fix warning in audio save nodes. (#8818) --- comfy_extras/nodes_audio.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index aca09e4cc..8cd647846 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -168,6 +168,7 @@ def save_audio(self, audio, filename_prefix="ComfyUI", format="flac", prompt=Non frame = av.AudioFrame.from_ndarray(waveform.movedim(0, 1).reshape(1, -1).float().numpy(), format='flt', layout='mono' if waveform.shape[0] == 1 else 'stereo') frame.sample_rate = sample_rate + frame.pts = 0 output_container.mux(out_stream.encode(frame)) # Flush encoder From 059cd38aa21aa6b91ca117a92218532fcd6e9c75 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 8 Jul 2025 08:43:56 +0800 Subject: [PATCH 0309/1073] Update template and node docs package version (#8825) --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 27d385389..03e29ab44 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.32 -comfyui-embedded-docs==0.2.3 +comfyui-workflow-templates==0.1.33 +comfyui-embedded-docs==0.2.4 torch torchsde torchvision From 1359c969e4962c2373eaccdf6fdcc9214c6957c2 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Mon, 7 Jul 2025 20:35:41 -0700 Subject: [PATCH 0310/1073] Update template to 0.1.34 (#8829) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 03e29ab44..eb51479f6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.33 +comfyui-workflow-templates==0.1.34 comfyui-embedded-docs==0.2.4 torch torchsde From b5e97db9ac69637dc0e29d3e6259bb2a333a0b27 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 8 Jul 2025 20:52:02 +0800 Subject: [PATCH 0311/1073] Update template to 0.1.35 (#8831) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index eb51479f6..19a40ca0e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.34 +comfyui-workflow-templates==0.1.35 comfyui-embedded-docs==0.2.4 torch torchsde From 9fd0cd7cf7534d44624457e37b0902b2cd6cb04f Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Tue, 8 Jul 2025 05:54:30 -0700 Subject: [PATCH 0312/1073] Add Moonvalley nodes (#8832) --- comfy_api/input/video_types.py | 19 +- comfy_api/input_impl/video_types.py | 9 + comfy_api_nodes/apis/__init__.py | 171 +++++++- comfy_api_nodes/nodes_moonvalley.py | 639 ++++++++++++++++++++++++++++ nodes.py | 1 + 5 files changed, 837 insertions(+), 2 deletions(-) create mode 100644 comfy_api_nodes/nodes_moonvalley.py diff --git a/comfy_api/input/video_types.py b/comfy_api/input/video_types.py index dc22d34ff..bb936e0a4 100644 --- a/comfy_api/input/video_types.py +++ b/comfy_api/input/video_types.py @@ -1,6 +1,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import Optional +from typing import Optional, Union +import io from comfy_api.util import VideoContainer, VideoCodec, VideoComponents class VideoInput(ABC): @@ -31,6 +32,22 @@ class VideoInput(ABC): """ pass + def get_stream_source(self) -> Union[str, io.BytesIO]: + """ + Get a streamable source for the video. This allows processing without + loading the entire video into memory. + + Returns: + Either a file path (str) or a BytesIO object that can be opened with av. + + Default implementation creates a BytesIO buffer, but subclasses should + override this for better performance when possible. + """ + buffer = io.BytesIO() + self.save_to(buffer) + buffer.seek(0) + return buffer + # Provide a default implementation, but subclasses can provide optimized versions # if possible. def get_dimensions(self) -> tuple[int, int]: diff --git a/comfy_api/input_impl/video_types.py b/comfy_api/input_impl/video_types.py index 197f6558c..9ae818f4e 100644 --- a/comfy_api/input_impl/video_types.py +++ b/comfy_api/input_impl/video_types.py @@ -64,6 +64,15 @@ class VideoFromFile(VideoInput): """ self.__file = file + def get_stream_source(self) -> str | io.BytesIO: + """ + Return the underlying file source for efficient streaming. + This avoids unnecessary memory copies when the source is already a file path. + """ + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) + return self.__file + def get_dimensions(self) -> tuple[int, int]: """ Returns the dimensions of the video input. diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index e38d38cc9..086028abe 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: filtered-openapi.yaml -# timestamp: 2025-05-19T21:38:55+00:00 +# timestamp: 2025-07-06T09:47:31+00:00 from __future__ import annotations @@ -1355,6 +1355,158 @@ class ModelResponseProperties(BaseModel): ) +class Keyframes(BaseModel): + image_url: Optional[str] = None + + +class MoonvalleyPromptResponse(BaseModel): + error: Optional[Dict[str, Any]] = None + frame_conditioning: Optional[Dict[str, Any]] = None + id: Optional[str] = None + inference_params: Optional[Dict[str, Any]] = None + meta: Optional[Dict[str, Any]] = None + model_params: Optional[Dict[str, Any]] = None + output_url: Optional[str] = None + prompt_text: Optional[str] = None + status: Optional[str] = None + + +class MoonvalleyTextToVideoInferenceParams(BaseModel): + add_quality_guidance: Optional[bool] = Field( + True, description='Whether to add quality guidance' + ) + caching_coefficient: Optional[float] = Field( + 0.3, description='Caching coefficient for optimization' + ) + caching_cooldown: Optional[int] = Field( + 3, description='Number of caching cooldown steps' + ) + caching_warmup: Optional[int] = Field( + 3, description='Number of caching warmup steps' + ) + clip_value: Optional[float] = Field( + 3, description='CLIP value for generation control' + ) + conditioning_frame_index: Optional[int] = Field( + 0, description='Index of the conditioning frame' + ) + cooldown_steps: Optional[int] = Field( + None, description='Number of cooldown steps (calculated based on num_frames)' + ) + fps: Optional[int] = Field( + 24, description='Frames per second of the generated video' + ) + guidance_scale: Optional[float] = Field( + 12.5, description='Guidance scale for generation control' + ) + height: Optional[int] = Field( + 1080, description='Height of the generated video in pixels' + ) + negative_prompt: Optional[str] = Field(None, description='Negative prompt text') + num_frames: Optional[int] = Field(64, description='Number of frames to generate') + seed: Optional[int] = Field( + None, description='Random seed for generation (default: random)' + ) + shift_value: Optional[float] = Field( + 3, description='Shift value for generation control' + ) + steps: Optional[int] = Field(80, description='Number of denoising steps') + use_guidance_schedule: Optional[bool] = Field( + True, description='Whether to use guidance scheduling' + ) + use_negative_prompts: Optional[bool] = Field( + False, description='Whether to use negative prompts' + ) + use_timestep_transform: Optional[bool] = Field( + True, description='Whether to use timestep transformation' + ) + warmup_steps: Optional[int] = Field( + None, description='Number of warmup steps (calculated based on num_frames)' + ) + width: Optional[int] = Field( + 1920, description='Width of the generated video in pixels' + ) + + +class MoonvalleyTextToVideoRequest(BaseModel): + image_url: Optional[str] = None + inference_params: Optional[MoonvalleyTextToVideoInferenceParams] = None + prompt_text: Optional[str] = None + webhook_url: Optional[str] = None + + +class MoonvalleyUploadFileRequest(BaseModel): + file: Optional[StrictBytes] = None + + +class MoonvalleyUploadFileResponse(BaseModel): + access_url: Optional[str] = None + + +class MoonvalleyVideoToVideoInferenceParams(BaseModel): + add_quality_guidance: Optional[bool] = Field( + True, description='Whether to add quality guidance' + ) + caching_coefficient: Optional[float] = Field( + 0.3, description='Caching coefficient for optimization' + ) + caching_cooldown: Optional[int] = Field( + 3, description='Number of caching cooldown steps' + ) + caching_warmup: Optional[int] = Field( + 3, description='Number of caching warmup steps' + ) + clip_value: Optional[float] = Field( + 3, description='CLIP value for generation control' + ) + conditioning_frame_index: Optional[int] = Field( + 0, description='Index of the conditioning frame' + ) + cooldown_steps: Optional[int] = Field( + None, description='Number of cooldown steps (calculated based on num_frames)' + ) + guidance_scale: Optional[float] = Field( + 12.5, description='Guidance scale for generation control' + ) + negative_prompt: Optional[str] = Field(None, description='Negative prompt text') + seed: Optional[int] = Field( + None, description='Random seed for generation (default: random)' + ) + shift_value: Optional[float] = Field( + 3, description='Shift value for generation control' + ) + steps: Optional[int] = Field(80, description='Number of denoising steps') + use_guidance_schedule: Optional[bool] = Field( + True, description='Whether to use guidance scheduling' + ) + use_negative_prompts: Optional[bool] = Field( + False, description='Whether to use negative prompts' + ) + use_timestep_transform: Optional[bool] = Field( + True, description='Whether to use timestep transformation' + ) + warmup_steps: Optional[int] = Field( + None, description='Number of warmup steps (calculated based on num_frames)' + ) + + +class ControlType(str, Enum): + motion_control = 'motion_control' + pose_control = 'pose_control' + + +class MoonvalleyVideoToVideoRequest(BaseModel): + control_type: ControlType = Field( + ..., description='Supported types for video control' + ) + inference_params: Optional[MoonvalleyVideoToVideoInferenceParams] = None + prompt_text: str = Field(..., description='Describes the video to generate') + video_url: str = Field(..., description='Url to control video') + webhook_url: Optional[str] = Field( + None, description='Optional webhook URL for notifications' + ) + + class Moderation(str, Enum): low = 'low' auto = 'auto' @@ -3107,6 +3259,23 @@ class LumaUpscaleVideoGenerationRequest(BaseModel): resolution: Optional[LumaVideoModelOutputResolution] = None +class MoonvalleyImageToVideoRequest(MoonvalleyTextToVideoRequest): + keyframes: Optional[Dict[str, Keyframes]] = None + + +class MoonvalleyResizeVideoRequest(MoonvalleyVideoToVideoRequest): + frame_position: Optional[List[int]] = Field(None, max_length=2, min_length=2) + frame_resolution: Optional[List[int]] = Field(None, max_length=2, min_length=2) + scale: Optional[List[int]] = Field(None, max_length=2, min_length=2) + + +class MoonvalleyTextToImageRequest(BaseModel): + image_url: Optional[str] = None + inference_params: Optional[MoonvalleyTextToVideoInferenceParams] = None + prompt_text: Optional[str] = None + webhook_url: Optional[str] = None + + class OutputContent(RootModel[Union[OutputTextContent, OutputAudioContent]]): root: Union[OutputTextContent, OutputAudioContent] diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py new file mode 100644 index 000000000..6e937411c --- /dev/null +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -0,0 +1,639 @@ +import logging +from typing import Any, Callable, Optional, TypeVar +import random +import torch +from comfy_api_nodes.util.validation_utils import get_image_dimensions, validate_image_dimensions, validate_video_dimensions + + +from comfy_api_nodes.apis import ( + MoonvalleyTextToVideoRequest, + MoonvalleyTextToVideoInferenceParams, + MoonvalleyVideoToVideoInferenceParams, + MoonvalleyVideoToVideoRequest, + MoonvalleyPromptResponse +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, + EmptyRequest, +) +from comfy_api_nodes.apinode_utils import ( + download_url_to_video_output, + upload_images_to_comfyapi, + upload_video_to_comfyapi, +) +from comfy_api_nodes.mapper_utils import model_field_to_node_input + +from comfy_api.input.video_types import VideoInput +from comfy.comfy_types.node_typing import IO +from comfy_api.input_impl import VideoFromFile +import av +import io + +API_UPLOADS_ENDPOINT = "/proxy/moonvalley/uploads" +API_PROMPTS_ENDPOINT = "/proxy/moonvalley/prompts" +API_VIDEO2VIDEO_ENDPOINT = "/proxy/moonvalley/prompts/video-to-video" +API_TXT2VIDEO_ENDPOINT = "/proxy/moonvalley/prompts/text-to-video" +API_IMG2VIDEO_ENDPOINT = "/proxy/moonvalley/prompts/image-to-video" + +MIN_WIDTH = 300 +MIN_HEIGHT = 300 + +MAX_WIDTH = 10000 +MAX_HEIGHT = 10000 + +MIN_VID_WIDTH = 300 +MIN_VID_HEIGHT = 300 + +MAX_VID_WIDTH = 10000 +MAX_VID_HEIGHT = 10000 + +MAX_VIDEO_SIZE = 1024 * 1024 * 1024 # 1 GB max for in-memory video processing + +MOONVALLEY_MAREY_MAX_PROMPT_LENGTH = 5000 +R = TypeVar("R") +class MoonvalleyApiError(Exception): + """Base exception for Moonvalley API errors.""" + pass + +def is_valid_task_creation_response(response: MoonvalleyPromptResponse) -> bool: + """Verifies that the initial response contains a task ID.""" + return bool(response.id) + +def validate_task_creation_response(response) -> None: + if not is_valid_task_creation_response(response): + error_msg = f"Moonvalley Marey API: Initial request failed. Code: {response.code}, Message: {response.message}, Data: {response}" + logging.error(error_msg) + raise MoonvalleyApiError(error_msg) + +def get_video_from_response(response): + video = response.output_url + logging.info( + "Moonvalley Marey API: Task %s succeeded. Video URL: %s", response.id, video + ) + return video + + +def get_video_url_from_response(response) -> Optional[str]: + """Returns the first video url from the Moonvalley video generation task result. + Will not raise an error if the response is not valid. + """ + if response: + return str(get_video_from_response(response)) + else: + return None + + +def poll_until_finished( + auth_kwargs: dict[str, str], + api_endpoint: ApiEndpoint[Any, R], + result_url_extractor: Optional[Callable[[R], str]] = None, + node_id: Optional[str] = None, +) -> R: + """Polls the Moonvalley API endpoint until the task reaches a terminal state, then returns the response.""" + return PollingOperation( + poll_endpoint=api_endpoint, + completed_statuses=[ + "completed", + ], + max_poll_attempts=240, # 64 minutes with 16s interval + poll_interval=16.0, + failed_statuses=["error"], + status_extractor=lambda response: ( + response.status + if response and response.status + else None + ), + auth_kwargs=auth_kwargs, + result_url_extractor=result_url_extractor, + node_id=node_id, + ).execute() + +def validate_prompts(prompt:str, negative_prompt: str, max_length=MOONVALLEY_MAREY_MAX_PROMPT_LENGTH): + """Verifies that the prompt isn't empty and that neither prompt is too long.""" + if not prompt: + raise ValueError("Positive prompt is empty") + if len(prompt) > max_length: + raise ValueError(f"Positive prompt is too long: {len(prompt)} characters") + if negative_prompt and len(negative_prompt) > max_length: + raise ValueError( + f"Negative prompt is too long: {len(negative_prompt)} characters" + ) + return True + +def validate_input_media(width, height, with_frame_conditioning, num_frames_in=None): + # inference validation + # T = num_frames + # in all cases, the following must be true: T divisible by 16 and H,W by 8. in addition... + # with image conditioning: H*W must be divisible by 8192 + # without image conditioning: T divisible by 32 + if num_frames_in and not num_frames_in % 16 == 0 : + return False, ( + "The input video total frame count must be divisible by 16!" + ) + + if height % 8 != 0 or width % 8 != 0: + return False, ( + f"Height ({height}) and width ({width}) must be " "divisible by 8" + ) + + if with_frame_conditioning: + if (height * width) % 8192 != 0: + return False, ( + f"Height * width ({height * width}) must be " + "divisible by 8192 for frame conditioning" + ) + else: + if num_frames_in and not num_frames_in % 32 == 0 : + return False, ( + "The input video total frame count must be divisible by 32!" + ) + + +def validate_input_image(image: torch.Tensor, with_frame_conditioning: bool=False) -> None: + """ + Validates the input image adheres to the expectations of the API: + - The image resolution should not be less than 300*300px + - The aspect ratio of the image should be between 1:2.5 ~ 2.5:1 + + """ + height, width = get_image_dimensions(image) + validate_input_media(width, height, with_frame_conditioning ) + validate_image_dimensions(image, min_width=300, min_height=300, max_height=MAX_HEIGHT, max_width=MAX_WIDTH) + +def validate_input_video(video: VideoInput, num_frames_out: int, with_frame_conditioning: bool=False): + try: + width, height = video.get_dimensions() + except Exception as e: + logging.error("Error getting dimensions of video: %s", e) + raise ValueError(f"Cannot get video dimensions: {e}") from e + + validate_input_media(width, height, with_frame_conditioning) + validate_video_dimensions(video, min_width=MIN_VID_WIDTH, min_height=MIN_VID_HEIGHT, max_width=MAX_VID_WIDTH, max_height=MAX_VID_HEIGHT) + + trimmed_video = validate_input_video_length(video, num_frames_out) + return trimmed_video + + +def validate_input_video_length(video: VideoInput, num_frames: int): + + if video.get_duration() > 60: + raise MoonvalleyApiError("Input Video lenth should be less than 1min. Please trim.") + + if num_frames == 128: + if video.get_duration() < 5: + raise MoonvalleyApiError("Input Video length is less than 5s. Please use a video longer than or equal to 5s.") + if video.get_duration() > 5: + # trim video to 5s + video = trim_video(video, 5) + if num_frames == 256: + if video.get_duration() < 10: + raise MoonvalleyApiError("Input Video length is less than 10s. Please use a video longer than or equal to 10s.") + if video.get_duration() > 10: + # trim video to 10s + video = trim_video(video, 10) + return video + +def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: + """ + Returns a new VideoInput object trimmed from the beginning to the specified duration, + using av to avoid loading entire video into memory. + + Args: + video: Input video to trim + duration_sec: Duration in seconds to keep from the beginning + + Returns: + VideoFromFile object that owns the output buffer + """ + output_buffer = io.BytesIO() + + input_container = None + output_container = None + + try: + # Get the stream source - this avoids loading entire video into memory + # when the source is already a file path + input_source = video.get_stream_source() + + # Open containers + input_container = av.open(input_source, mode='r') + output_container = av.open(output_buffer, mode='w', format='mp4') + + # Set up output streams for re-encoding + video_stream = None + audio_stream = None + + for stream in input_container.streams: + logging.info(f"Found stream: type={stream.type}, class={type(stream)}") + if isinstance(stream, av.VideoStream): + # Create output video stream with same parameters + video_stream = output_container.add_stream('h264', rate=stream.average_rate) + video_stream.width = stream.width + video_stream.height = stream.height + video_stream.pix_fmt = 'yuv420p' + logging.info(f"Added video stream: {stream.width}x{stream.height} @ {stream.average_rate}fps") + elif isinstance(stream, av.AudioStream): + # Create output audio stream with same parameters + audio_stream = output_container.add_stream('aac', rate=stream.sample_rate) + audio_stream.sample_rate = stream.sample_rate + audio_stream.layout = stream.layout + logging.info(f"Added audio stream: {stream.sample_rate}Hz, {stream.channels} channels") + + # Calculate target frame count that's divisible by 32 + fps = input_container.streams.video[0].average_rate + estimated_frames = int(duration_sec * fps) + target_frames = (estimated_frames // 32) * 32 # Round down to nearest multiple of 32 + + if target_frames == 0: + raise ValueError("Video too short: need at least 32 frames for Moonvalley") + + frame_count = 0 + audio_frame_count = 0 + + # Decode and re-encode video frames + if video_stream: + for frame in input_container.decode(video=0): + if frame_count >= target_frames: + break + + # Re-encode frame + for packet in video_stream.encode(frame): + output_container.mux(packet) + frame_count += 1 + + # Flush encoder + for packet in video_stream.encode(): + output_container.mux(packet) + + logging.info(f"Encoded {frame_count} video frames (target: {target_frames})") + + # Decode and re-encode audio frames + if audio_stream: + input_container.seek(0) # Reset to beginning for audio + for frame in input_container.decode(audio=0): + if frame.time >= duration_sec: + break + + # Re-encode frame + for packet in audio_stream.encode(frame): + output_container.mux(packet) + audio_frame_count += 1 + + # Flush encoder + for packet in audio_stream.encode(): + output_container.mux(packet) + + logging.info(f"Encoded {audio_frame_count} audio frames") + + # Close containers + output_container.close() + input_container.close() + + + # Return as VideoFromFile using the buffer + output_buffer.seek(0) + return VideoFromFile(output_buffer) + + except Exception as e: + # Clean up on error + if input_container is not None: + input_container.close() + if output_container is not None: + output_container.close() + raise RuntimeError(f"Failed to trim video: {str(e)}") from e + +# --- BaseMoonvalleyVideoNode --- +class BaseMoonvalleyVideoNode: + def parseWidthHeightFromRes(self, resolution: str): + # Accepts a string like "16:9 (1920 x 1080)" and returns width, height as a dict + res_map = { + "16:9 (1920 x 1080)": {"width": 1920, "height": 1080}, + "9:16 (1080 x 1920)": {"width": 1080, "height": 1920}, + "1:1 (1152 x 1152)": {"width": 1152, "height": 1152}, + "4:3 (1440 x 1080)": {"width": 1440, "height": 1080}, + "3:4 (1080 x 1440)": {"width": 1080, "height": 1440}, + "21:9 (2560 x 1080)": {"width": 2560, "height": 1080}, + } + if resolution in res_map: + return res_map[resolution] + else: + # Default to 1920x1080 if unknown + return {"width": 1920, "height": 1080} + + def parseControlParameter(self, value): + control_map = { + "Motion Transfer": "motion_control", + "Canny": "canny_control", + "Pose Transfer": "pose_control", + "Depth": "depth_control" + } + if value in control_map: + return control_map[value] + else: + return control_map["Motion Transfer"] + + def get_response( + self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None + ) -> MoonvalleyPromptResponse: + return poll_until_finished( + auth_kwargs, + ApiEndpoint( + path=f"{API_PROMPTS_ENDPOINT}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=MoonvalleyPromptResponse, + ), + result_url_extractor=get_video_url_from_response, + node_id=node_id, + ) + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "prompt": model_field_to_node_input( + IO.STRING, MoonvalleyTextToVideoRequest, "prompt_text", + multiline=True + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, + MoonvalleyTextToVideoInferenceParams, + "negative_prompt", + multiline=True, + default="gopro, bright, contrast, static, overexposed, bright, vignette, artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, flare, saturation, distorted, warped, wide angle, contrast, saturated, vibrant, glowing, cross dissolve, texture, videogame, saturation, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, blown out, horrible, blurry, worst quality, bad, transition, dissolve, cross-dissolve, melt, fade in, fade out, wobbly, weird, low quality, plastic, stock footage, video camera, boring, static", + ), + + "resolution": (IO.COMBO, { + "options": ["16:9 (1920 x 1080)", + "9:16 (1080 x 1920)", + "1:1 (1152 x 1152)", + "4:3 (1440 x 1080)", + "3:4 (1080 x 1440)", + "21:9 (2560 x 1080)"], + "default": "16:9 (1920 x 1080)", + "tooltip": "Resolution of the output video", + }), + # "length": (IO.COMBO,{"options":['5s','10s'], "default": '5s'}), + "prompt_adherence": model_field_to_node_input(IO.FLOAT,MoonvalleyTextToVideoInferenceParams,"guidance_scale",default=7.0, step=1, min=1, max=20), + "seed": model_field_to_node_input(IO.INT,MoonvalleyTextToVideoInferenceParams, "seed", default=random.randint(0, 2**32 - 1), min=0, max=4294967295, step=1, display="number", tooltip="Random seed value", control_after_generate=True), + "steps": model_field_to_node_input(IO.INT, MoonvalleyTextToVideoInferenceParams, "steps", default=100, min=1, max=100), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + "optional": { + "image": model_field_to_node_input( + IO.IMAGE, + MoonvalleyTextToVideoRequest, + "image_url", + tooltip="The reference image used to generate the video", + ), + } + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "generate" + CATEGORY = "api node/video/Moonvalley Marey" + API_NODE = True + + def generate(self, **kwargs): + return None + +# --- MoonvalleyImg2VideoNode --- +class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): + + @classmethod + def INPUT_TYPES(cls): + return super().INPUT_TYPES() + + RETURN_TYPES = ("VIDEO",) + RETURN_NAMES = ("video",) + DESCRIPTION = "Moonvalley Marey Image to Video Node" + + def generate(self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs): + image = kwargs.get("image", None) + if (image is None): + raise MoonvalleyApiError("image is required") + total_frames = get_total_frames_from_length() + + validate_input_image(image,True) + validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) + width_height = self.parseWidthHeightFromRes(kwargs.get("resolution")) + + inference_params=MoonvalleyTextToVideoInferenceParams( + negative_prompt=negative_prompt, + steps=kwargs.get("steps"), + seed=kwargs.get("seed"), + guidance_scale=kwargs.get("prompt_adherence"), + num_frames=total_frames, + width=width_height.get("width"), + height=width_height.get("height"), + use_negative_prompts=True + ) + """Upload image to comfy backend to have a URL available for further processing""" + # Get MIME type from tensor - assuming PNG format for image tensors + mime_type = "image/png" + + image_url = upload_images_to_comfyapi(image, max_images=1, auth_kwargs=kwargs, mime_type=mime_type)[0] + + request = MoonvalleyTextToVideoRequest( + image_url=image_url, + prompt_text=prompt, + inference_params=inference_params + ) + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint(path=API_IMG2VIDEO_ENDPOINT, + method=HttpMethod.POST, + request_model=MoonvalleyTextToVideoRequest, + response_model=MoonvalleyPromptResponse + ), + request=request, + auth_kwargs=kwargs, + ) + task_creation_response = initial_operation.execute() + validate_task_creation_response(task_creation_response) + task_id = task_creation_response.id + + final_response = self.get_response( + task_id, auth_kwargs=kwargs, node_id=unique_id + ) + video = download_url_to_video_output(final_response.output_url) + return (video, ) + +# --- MoonvalleyVid2VidNode --- +class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): + def __init__(self): + super().__init__() + + @classmethod + def INPUT_TYPES(cls): + input_types = super().INPUT_TYPES() + for param in ["resolution", "image"]: + if param in input_types["required"]: + del input_types["required"][param] + if param in input_types["optional"]: + del input_types["optional"][param] + input_types["optional"] = { + "video": (IO.VIDEO, {"default": "", "multiline": False, "tooltip": "The reference video used to generate the output video. Input a 5s video for 128 frames and a 10s video for 256 frames. Longer videos will be trimmed automatically."}), + "control_type": ( + ["Motion Transfer", "Pose Transfer"], + {"default": "Motion Transfer"}, + ), + "motion_intensity": ( + "INT", + { + "default": 100, + "step": 1, + "min": 0, + "max": 100, + "tooltip": "Only used if control_type is 'Motion Transfer'", + }, + ) + } + + return input_types + + RETURN_TYPES = ("VIDEO",) + RETURN_NAMES = ("video",) + + def generate(self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs): + video = kwargs.get("video") + num_frames = get_total_frames_from_length() + + if not video : + raise MoonvalleyApiError("video is required") + + + """Validate video input""" + video_url="" + if video: + validated_video = validate_input_video(video, num_frames, False) + video_url = upload_video_to_comfyapi(validated_video, auth_kwargs=kwargs) + + control_type = kwargs.get("control_type") + motion_intensity = kwargs.get("motion_intensity") + + """Validate prompts and inference input""" + validate_prompts(prompt, negative_prompt) + inference_params=MoonvalleyVideoToVideoInferenceParams( + negative_prompt=negative_prompt, + steps=kwargs.get("steps"), + seed=kwargs.get("seed"), + guidance_scale=kwargs.get("prompt_adherence"), + control_params={'motion_intensity': motion_intensity} + ) + + control = self.parseControlParameter(control_type) + + request = MoonvalleyVideoToVideoRequest( + control_type=control, + video_url=video_url, + prompt_text=prompt, + inference_params=inference_params + ) + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint(path=API_VIDEO2VIDEO_ENDPOINT, + method=HttpMethod.POST, + request_model=MoonvalleyVideoToVideoRequest, + response_model=MoonvalleyPromptResponse + ), + request=request, + auth_kwargs=kwargs, + ) + task_creation_response = initial_operation.execute() + validate_task_creation_response(task_creation_response) + task_id = task_creation_response.id + + final_response = self.get_response( + task_id, auth_kwargs=kwargs, node_id=unique_id + ) + + video = download_url_to_video_output(final_response.output_url) + + return (video, ) + +# --- MoonvalleyTxt2VideoNode --- +class MoonvalleyTxt2VideoNode(BaseMoonvalleyVideoNode): + def __init__(self): + super().__init__() + + RETURN_TYPES = ("VIDEO",) + RETURN_NAMES = ("video",) + + @classmethod + def INPUT_TYPES(cls): + input_types = super().INPUT_TYPES() + # Remove image-specific parameters + for param in ["image"]: + if param in input_types["optional"]: + del input_types["optional"][param] + return input_types + + def generate(self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs): + validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) + width_height = self.parseWidthHeightFromRes(kwargs.get("resolution")) + num_frames = get_total_frames_from_length() + + inference_params=MoonvalleyTextToVideoInferenceParams( + negative_prompt=negative_prompt, + steps=kwargs.get("steps"), + seed=kwargs.get("seed"), + guidance_scale=kwargs.get("prompt_adherence"), + num_frames=num_frames, + width=width_height.get("width"), + height=width_height.get("height"), + ) + request = MoonvalleyTextToVideoRequest( + prompt_text=prompt, + inference_params=inference_params + ) + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint(path=API_TXT2VIDEO_ENDPOINT, + method=HttpMethod.POST, + request_model=MoonvalleyTextToVideoRequest, + response_model=MoonvalleyPromptResponse + ), + request=request, + auth_kwargs=kwargs, + ) + task_creation_response = initial_operation.execute() + validate_task_creation_response(task_creation_response) + task_id = task_creation_response.id + + final_response = self.get_response( + task_id, auth_kwargs=kwargs, node_id=unique_id + ) + + video = download_url_to_video_output(final_response.output_url) + return (video, ) + + + +NODE_CLASS_MAPPINGS = { + "MoonvalleyImg2VideoNode": MoonvalleyImg2VideoNode, + "MoonvalleyTxt2VideoNode": MoonvalleyTxt2VideoNode, + # "MoonvalleyVideo2VideoNode": MoonvalleyVideo2VideoNode, +} + + +NODE_DISPLAY_NAME_MAPPINGS = { + "MoonvalleyImg2VideoNode": "Moonvalley Marey Image to Video", + "MoonvalleyTxt2VideoNode": "Moonvalley Marey Text to Video", + # "MoonvalleyVideo2VideoNode": "Moonvalley Marey Video to Video", +} + +def get_total_frames_from_length(length="5s"): + # if length == '5s': + # return 128 + # elif length == '10s': + # return 256 + return 128 + # else: + # raise MoonvalleyApiError("length is required") diff --git a/nodes.py b/nodes.py index 1b465b9e6..231d4d4de 100644 --- a/nodes.py +++ b/nodes.py @@ -2310,6 +2310,7 @@ def init_builtin_api_nodes(): "nodes_pika.py", "nodes_runway.py", "nodes_tripo.py", + "nodes_moonvalley.py", "nodes_rodin.py", "nodes_gemini.py", ] From c5de4955bb91a2b136027a698aaecb8d19e3d892 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 8 Jul 2025 08:56:38 -0400 Subject: [PATCH 0313/1073] ComfyUI version 0.3.44 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index c98c90499..7981fbaca 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.43" +__version__ = "0.3.44" diff --git a/pyproject.toml b/pyproject.toml index 9d0f90032..96ead2157 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.43" +version = "0.3.44" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 974254218ab873e8b9642b6a467d56842cd228c4 Mon Sep 17 00:00:00 2001 From: josephrocca <1167575+josephrocca@users.noreply.github.com> Date: Wed, 9 Jul 2025 03:56:59 +0800 Subject: [PATCH 0314/1073] Un-hardcode chroma patch_size (#8840) --- comfy/ldm/chroma/model.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/comfy/ldm/chroma/model.py b/comfy/ldm/chroma/model.py index c75023a31..06021d4f2 100644 --- a/comfy/ldm/chroma/model.py +++ b/comfy/ldm/chroma/model.py @@ -254,13 +254,12 @@ class Chroma(nn.Module): def forward(self, x, timestep, context, guidance, control=None, transformer_options={}, **kwargs): bs, c, h, w = x.shape - patch_size = 2 - x = comfy.ldm.common_dit.pad_to_patch_size(x, (patch_size, patch_size)) + x = comfy.ldm.common_dit.pad_to_patch_size(x, (self.patch_size, self.patch_size)) - img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size) + img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=self.patch_size, pw=self.patch_size) - h_len = ((h + (patch_size // 2)) // patch_size) - w_len = ((w + (patch_size // 2)) // patch_size) + h_len = ((h + (self.patch_size // 2)) // self.patch_size) + w_len = ((w + (self.patch_size // 2)) // self.patch_size) img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) @@ -268,4 +267,4 @@ class Chroma(nn.Module): txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype) out = self.forward_orig(img, img_ids, context, txt_ids, timestep, guidance, control, transformer_options, attn_mask=kwargs.get("attention_mask", None)) - return rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2)[:,:,:h,:w] + return rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=self.patch_size, pw=self.patch_size)[:,:,:h,:w] From aac10ad23a8f65243fcfca7afa7a72c1740312d6 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Wed, 9 Jul 2025 04:17:06 +0800 Subject: [PATCH 0315/1073] Add SA-Solver sampler (#8834) --- comfy/k_diffusion/sa_solver.py | 121 +++++++++++++++++++++++++++ comfy/k_diffusion/sampling.py | 111 ++++++++++++++++++++++++ comfy/samplers.py | 2 +- comfy_extras/nodes_custom_sampler.py | 45 ++++++++++ 4 files changed, 278 insertions(+), 1 deletion(-) create mode 100644 comfy/k_diffusion/sa_solver.py diff --git a/comfy/k_diffusion/sa_solver.py b/comfy/k_diffusion/sa_solver.py new file mode 100644 index 000000000..0c6821b60 --- /dev/null +++ b/comfy/k_diffusion/sa_solver.py @@ -0,0 +1,121 @@ +# SA-Solver: Stochastic Adams Solver (NeurIPS 2023, arXiv:2309.05019) +# Conference: https://proceedings.neurips.cc/paper_files/paper/2023/file/f4a6806490d31216a3ba667eb240c897-Paper-Conference.pdf +# Codebase ref: https://github.com/scxue/SA-Solver + +import math +from typing import Union, Callable +import torch + + +def compute_exponential_coeffs(s: torch.Tensor, t: torch.Tensor, solver_order: int, tau_t: float) -> torch.Tensor: + """Compute (1 + tau^2) * integral of exp((1 + tau^2) * x) * x^p dx from s to t with exp((1 + tau^2) * t) factored out, using integration by parts. + + Integral of exp((1 + tau^2) * x) * x^p dx + = product_terms[p] - (p / (1 + tau^2)) * integral of exp((1 + tau^2) * x) * x^(p-1) dx, + with base case p=0 where integral equals product_terms[0]. + + where + product_terms[p] = x^p * exp((1 + tau^2) * x) / (1 + tau^2). + + Construct a recursive coefficient matrix following the above recursive relation to compute all integral terms up to p = (solver_order - 1). + Return coefficients used by the SA-Solver in data prediction mode. + + Args: + s: Start time s. + t: End time t. + solver_order: Current order of the solver. + tau_t: Stochastic strength parameter in the SDE. + + Returns: + Exponential coefficients used in data prediction, with exp((1 + tau^2) * t) factored out, ordered from p=0 to p=solver_order−1, shape (solver_order,). + """ + tau_mul = 1 + tau_t ** 2 + h = t - s + p = torch.arange(solver_order, dtype=s.dtype, device=s.device) + + # product_terms after factoring out exp((1 + tau^2) * t) + # Includes (1 + tau^2) factor from outside the integral + product_terms_factored = (t ** p - s ** p * (-tau_mul * h).exp()) + + # Lower triangular recursive coefficient matrix + # Accumulates recursive coefficients based on p / (1 + tau^2) + recursive_depth_mat = p.unsqueeze(1) - p.unsqueeze(0) + log_factorial = (p + 1).lgamma() + recursive_coeff_mat = log_factorial.unsqueeze(1) - log_factorial.unsqueeze(0) + if tau_t > 0: + recursive_coeff_mat = recursive_coeff_mat - (recursive_depth_mat * math.log(tau_mul)) + signs = torch.where(recursive_depth_mat % 2 == 0, 1.0, -1.0) + recursive_coeff_mat = (recursive_coeff_mat.exp() * signs).tril() + + return recursive_coeff_mat @ product_terms_factored + + +def compute_simple_stochastic_adams_b_coeffs(sigma_next: torch.Tensor, curr_lambdas: torch.Tensor, lambda_s: torch.Tensor, lambda_t: torch.Tensor, tau_t: float, is_corrector_step: bool = False) -> torch.Tensor: + """Compute simple order-2 b coefficients from SA-Solver paper (Appendix D. Implementation Details).""" + tau_mul = 1 + tau_t ** 2 + h = lambda_t - lambda_s + alpha_t = sigma_next * lambda_t.exp() + if is_corrector_step: + # Simplified 1-step (order-2) corrector + b_1 = alpha_t * (0.5 * tau_mul * h) + b_2 = alpha_t * (-h * tau_mul).expm1().neg() - b_1 + else: + # Simplified 2-step predictor + b_2 = alpha_t * (0.5 * tau_mul * h ** 2) / (curr_lambdas[-2] - lambda_s) + b_1 = alpha_t * (-h * tau_mul).expm1().neg() - b_2 + return torch.stack([b_2, b_1]) + + +def compute_stochastic_adams_b_coeffs(sigma_next: torch.Tensor, curr_lambdas: torch.Tensor, lambda_s: torch.Tensor, lambda_t: torch.Tensor, tau_t: float, simple_order_2: bool = False, is_corrector_step: bool = False) -> torch.Tensor: + """Compute b_i coefficients for the SA-Solver (see eqs. 15 and 18). + + The solver order corresponds to the number of input lambdas (half-logSNR points). + + Args: + sigma_next: Sigma at end time t. + curr_lambdas: Lambda time points used to construct the Lagrange basis, shape (N,). + lambda_s: Lambda at start time s. + lambda_t: Lambda at end time t. + tau_t: Stochastic strength parameter in the SDE. + simple_order_2: Whether to enable the simple order-2 scheme. + is_corrector_step: Flag for corrector step in simple order-2 mode. + + Returns: + b_i coefficients for the SA-Solver, shape (N,), where N is the solver order. + """ + num_timesteps = curr_lambdas.shape[0] + + if simple_order_2 and num_timesteps == 2: + return compute_simple_stochastic_adams_b_coeffs(sigma_next, curr_lambdas, lambda_s, lambda_t, tau_t, is_corrector_step) + + # Compute coefficients by solving a linear system from Lagrange basis interpolation + exp_integral_coeffs = compute_exponential_coeffs(lambda_s, lambda_t, num_timesteps, tau_t) + vandermonde_matrix_T = torch.vander(curr_lambdas, num_timesteps, increasing=True).T + lagrange_integrals = torch.linalg.solve(vandermonde_matrix_T, exp_integral_coeffs) + + # (sigma_t * exp(-tau^2 * lambda_t)) * exp((1 + tau^2) * lambda_t) + # = sigma_t * exp(lambda_t) = alpha_t + # exp((1 + tau^2) * lambda_t) is extracted from the integral + alpha_t = sigma_next * lambda_t.exp() + return alpha_t * lagrange_integrals + + +def get_tau_interval_func(start_sigma: float, end_sigma: float, eta: float = 1.0) -> Callable[[Union[torch.Tensor, float]], float]: + """Return a function that controls the stochasticity of SA-Solver. + + When eta = 0, SA-Solver runs as ODE. The official approach uses + time t to determine the SDE interval, while here we use sigma instead. + + See: + https://github.com/scxue/SA-Solver/blob/main/README.md + """ + + def tau_func(sigma: Union[torch.Tensor, float]) -> float: + if eta <= 0: + return 0.0 # ODE + + if isinstance(sigma, torch.Tensor): + sigma = sigma.item() + return eta if start_sigma >= sigma >= end_sigma else 0.0 + + return tau_func diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 34218337a..2ed415b1f 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -9,6 +9,7 @@ from tqdm.auto import trange, tqdm from . import utils from . import deis +from . import sa_solver import comfy.model_patcher import comfy.model_sampling @@ -1648,3 +1649,113 @@ def sample_seeds_3(model, x, sigmas, extra_args=None, callback=None, disable=Non if inject_noise: x = x + sigmas[i + 1] * (noise_coeff_3 * noise_1 + noise_coeff_2 * noise_2 + noise_coeff_1 * noise_3) * s_noise return x + + +@torch.no_grad() +def sample_sa_solver(model, x, sigmas, extra_args=None, callback=None, disable=False, tau_func=None, s_noise=1.0, noise_sampler=None, predictor_order=3, corrector_order=4, use_pece=False, simple_order_2=False): + """Stochastic Adams Solver with predictor-corrector method (NeurIPS 2023).""" + if len(sigmas) <= 1: + return x + extra_args = {} if extra_args is None else extra_args + seed = extra_args.get("seed", None) + noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler + s_in = x.new_ones([x.shape[0]]) + + model_sampling = model.inner_model.model_patcher.get_model_object("model_sampling") + sigmas = offset_first_sigma_for_snr(sigmas, model_sampling) + lambdas = sigma_to_half_log_snr(sigmas, model_sampling=model_sampling) + + if tau_func is None: + # Use default interval for stochastic sampling + start_sigma = model_sampling.percent_to_sigma(0.2) + end_sigma = model_sampling.percent_to_sigma(0.8) + tau_func = sa_solver.get_tau_interval_func(start_sigma, end_sigma, eta=1.0) + + max_used_order = max(predictor_order, corrector_order) + x_pred = x # x: current state, x_pred: predicted next state + + h = 0.0 + tau_t = 0.0 + noise = 0.0 + pred_list = [] + + # Lower order near the end to improve stability + lower_order_to_end = sigmas[-1].item() == 0 + + for i in trange(len(sigmas) - 1, disable=disable): + # Evaluation + denoised = model(x_pred, sigmas[i] * s_in, **extra_args) + if callback is not None: + callback({"x": x_pred, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised}) + pred_list.append(denoised) + pred_list = pred_list[-max_used_order:] + + predictor_order_used = min(predictor_order, len(pred_list)) + if i == 0 or (sigmas[i + 1] == 0 and not use_pece): + corrector_order_used = 0 + else: + corrector_order_used = min(corrector_order, len(pred_list)) + + if lower_order_to_end: + predictor_order_used = min(predictor_order_used, len(sigmas) - 2 - i) + corrector_order_used = min(corrector_order_used, len(sigmas) - 1 - i) + + # Corrector + if corrector_order_used == 0: + # Update by the predicted state + x = x_pred + else: + curr_lambdas = lambdas[i - corrector_order_used + 1:i + 1] + b_coeffs = sa_solver.compute_stochastic_adams_b_coeffs( + sigmas[i], + curr_lambdas, + lambdas[i - 1], + lambdas[i], + tau_t, + simple_order_2, + is_corrector_step=True, + ) + pred_mat = torch.stack(pred_list[-corrector_order_used:], dim=1) # (B, K, ...) + corr_res = torch.tensordot(pred_mat, b_coeffs, dims=([1], [0])) # (B, ...) + x = sigmas[i] / sigmas[i - 1] * (-(tau_t ** 2) * h).exp() * x + corr_res + + if tau_t > 0 and s_noise > 0: + # The noise from the previous predictor step + x = x + noise + + if use_pece: + # Evaluate the corrected state + denoised = model(x, sigmas[i] * s_in, **extra_args) + pred_list[-1] = denoised + + # Predictor + if sigmas[i + 1] == 0: + # Denoising step + x = denoised + else: + tau_t = tau_func(sigmas[i + 1]) + curr_lambdas = lambdas[i - predictor_order_used + 1:i + 1] + b_coeffs = sa_solver.compute_stochastic_adams_b_coeffs( + sigmas[i + 1], + curr_lambdas, + lambdas[i], + lambdas[i + 1], + tau_t, + simple_order_2, + is_corrector_step=False, + ) + pred_mat = torch.stack(pred_list[-predictor_order_used:], dim=1) # (B, K, ...) + pred_res = torch.tensordot(pred_mat, b_coeffs, dims=([1], [0])) # (B, ...) + h = lambdas[i + 1] - lambdas[i] + x_pred = sigmas[i + 1] / sigmas[i] * (-(tau_t ** 2) * h).exp() * x + pred_res + + if tau_t > 0 and s_noise > 0: + noise = noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * tau_t ** 2 * h).expm1().neg().sqrt() * s_noise + x_pred = x_pred + noise + return x + + +@torch.no_grad() +def sample_sa_solver_pece(model, x, sigmas, extra_args=None, callback=None, disable=False, tau_func=None, s_noise=1.0, noise_sampler=None, predictor_order=3, corrector_order=4, simple_order_2=False): + """Stochastic Adams Solver with PECE (Predict–Evaluate–Correct–Evaluate) mode (NeurIPS 2023).""" + return sample_sa_solver(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, tau_func=tau_func, s_noise=s_noise, noise_sampler=noise_sampler, predictor_order=predictor_order, corrector_order=corrector_order, use_pece=True, simple_order_2=simple_order_2) diff --git a/comfy/samplers.py b/comfy/samplers.py index 25ccaf39f..e93d2a315 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -720,7 +720,7 @@ KSAMPLER_NAMES = ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_c "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", - "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3"] + "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece"] class KSAMPLER(Sampler): def __init__(self, sampler_function, extra_options={}, inpaint_options={}): diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index b3a772714..33bc41842 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -2,6 +2,7 @@ import math import comfy.samplers import comfy.sample from comfy.k_diffusion import sampling as k_diffusion_sampling +from comfy.k_diffusion import sa_solver from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict import latent_preview import torch @@ -521,6 +522,49 @@ class SamplerER_SDE(ComfyNodeABC): return (sampler,) +class SamplerSASolver(ComfyNodeABC): + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "model": (IO.MODEL, {}), + "eta": (IO.FLOAT, {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": False},), + "sde_start_percent": (IO.FLOAT, {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001},), + "sde_end_percent": (IO.FLOAT, {"default": 0.8, "min": 0.0, "max": 1.0, "step": 0.001},), + "s_noise": (IO.FLOAT, {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": False},), + "predictor_order": (IO.INT, {"default": 3, "min": 1, "max": 6}), + "corrector_order": (IO.INT, {"default": 4, "min": 0, "max": 6}), + "use_pece": (IO.BOOLEAN, {}), + "simple_order_2": (IO.BOOLEAN, {}), + } + } + + RETURN_TYPES = (IO.SAMPLER,) + CATEGORY = "sampling/custom_sampling/samplers" + + FUNCTION = "get_sampler" + + def get_sampler(self, model, eta, sde_start_percent, sde_end_percent, s_noise, predictor_order, corrector_order, use_pece, simple_order_2): + model_sampling = model.get_model_object("model_sampling") + start_sigma = model_sampling.percent_to_sigma(sde_start_percent) + end_sigma = model_sampling.percent_to_sigma(sde_end_percent) + tau_func = sa_solver.get_tau_interval_func(start_sigma, end_sigma, eta=eta) + + sampler_name = "sa_solver" + sampler = comfy.samplers.ksampler( + sampler_name, + { + "tau_func": tau_func, + "s_noise": s_noise, + "predictor_order": predictor_order, + "corrector_order": corrector_order, + "use_pece": use_pece, + "simple_order_2": simple_order_2, + }, + ) + return (sampler,) + + class Noise_EmptyNoise: def __init__(self): self.seed = 0 @@ -829,6 +873,7 @@ NODE_CLASS_MAPPINGS = { "SamplerDPMPP_2S_Ancestral": SamplerDPMPP_2S_Ancestral, "SamplerDPMAdaptative": SamplerDPMAdaptative, "SamplerER_SDE": SamplerER_SDE, + "SamplerSASolver": SamplerSASolver, "SplitSigmas": SplitSigmas, "SplitSigmasDenoise": SplitSigmasDenoise, "FlipSigmas": FlipSigmas, From 181a9bf26d4445e160645f6c81dc2ee29e7b6a08 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Wed, 9 Jul 2025 08:18:04 +0800 Subject: [PATCH 0316/1073] Support Multi Image-Caption dataset in lora training node (#8819) * initial impl of multi img/text dataset * Update nodes_train.py * Support Kohya-ss structure --- comfy_extras/nodes_train.py | 125 +++++++++++++++++++++++++++++++++--- 1 file changed, 115 insertions(+), 10 deletions(-) diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py index fbff01010..17caf5ad5 100644 --- a/comfy_extras/nodes_train.py +++ b/comfy_extras/nodes_train.py @@ -75,7 +75,7 @@ class BiasDiff(torch.nn.Module): return self.passive_memory_usage() -def load_and_process_images(image_files, input_dir, resize_method="None"): +def load_and_process_images(image_files, input_dir, resize_method="None", w=None, h=None): """Utility function to load and process a list of images. Args: @@ -90,7 +90,6 @@ def load_and_process_images(image_files, input_dir, resize_method="None"): raise ValueError("No valid images found in input") output_images = [] - w, h = None, None for file in image_files: image_path = os.path.join(input_dir, file) @@ -206,6 +205,103 @@ class LoadImageSetFromFolderNode: return (output_tensor,) +class LoadImageTextSetFromFolderNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "folder": (folder_paths.get_input_subfolders(), {"tooltip": "The folder to load images from."}), + "clip": (IO.CLIP, {"tooltip": "The CLIP model used for encoding the text."}), + }, + "optional": { + "resize_method": ( + ["None", "Stretch", "Crop", "Pad"], + {"default": "None"}, + ), + "width": ( + IO.INT, + { + "default": -1, + "min": -1, + "max": 10000, + "step": 1, + "tooltip": "The width to resize the images to. -1 means use the original width.", + }, + ), + "height": ( + IO.INT, + { + "default": -1, + "min": -1, + "max": 10000, + "step": 1, + "tooltip": "The height to resize the images to. -1 means use the original height.", + }, + ) + }, + } + + RETURN_TYPES = ("IMAGE", IO.CONDITIONING,) + FUNCTION = "load_images" + CATEGORY = "loaders" + EXPERIMENTAL = True + DESCRIPTION = "Loads a batch of images and caption from a directory for training." + + def load_images(self, folder, clip, resize_method, width=None, height=None): + if clip is None: + raise RuntimeError("ERROR: clip input is invalid: None\n\nIf the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model.") + + logging.info(f"Loading images from folder: {folder}") + + sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder) + valid_extensions = [".png", ".jpg", ".jpeg", ".webp"] + + image_files = [] + for item in os.listdir(sub_input_dir): + path = os.path.join(sub_input_dir, item) + if any(item.lower().endswith(ext) for ext in valid_extensions): + image_files.append(path) + elif os.path.isdir(path): + # Support kohya-ss/sd-scripts folder structure + repeat = 1 + if item.split("_")[0].isdigit(): + repeat = int(item.split("_")[0]) + image_files.extend([ + os.path.join(path, f) for f in os.listdir(path) if any(f.lower().endswith(ext) for ext in valid_extensions) + ] * repeat) + + caption_file_path = [ + f.replace(os.path.splitext(f)[1], ".txt") + for f in image_files + ] + captions = [] + for caption_file in caption_file_path: + caption_path = os.path.join(sub_input_dir, caption_file) + if os.path.exists(caption_path): + with open(caption_path, "r", encoding="utf-8") as f: + caption = f.read().strip() + captions.append(caption) + else: + captions.append("") + + width = width if width != -1 else None + height = height if height != -1 else None + output_tensor = load_and_process_images(image_files, sub_input_dir, resize_method, width, height) + + logging.info(f"Loaded {len(output_tensor)} images from {sub_input_dir}.") + + logging.info(f"Encoding captions from {sub_input_dir}.") + conditions = [] + empty_cond = clip.encode_from_tokens_scheduled(clip.tokenize("")) + for text in captions: + if text == "": + conditions.append(empty_cond) + tokens = clip.tokenize(text) + conditions.extend(clip.encode_from_tokens_scheduled(tokens)) + logging.info(f"Encoded {len(conditions)} captions from {sub_input_dir}.") + return (output_tensor, conditions) + + def draw_loss_graph(loss_map, steps): width, height = 500, 300 img = Image.new("RGB", (width, height), "white") @@ -381,6 +477,13 @@ class TrainLoraNode: latents = latents["samples"].to(dtype) num_images = latents.shape[0] + logging.info(f"Total Images: {num_images}, Total Captions: {len(positive)}") + if len(positive) == 1 and num_images > 1: + positive = positive * num_images + elif len(positive) != num_images: + raise ValueError( + f"Number of positive conditions ({len(positive)}) does not match number of images ({num_images})." + ) with torch.inference_mode(False): lora_sd = {} @@ -474,6 +577,7 @@ class TrainLoraNode: # setup models for m in find_all_highest_child_module_with_forward(mp.model.diffusion_model): patch(m) + mp.model.requires_grad_(False) comfy.model_management.load_models_gpu([mp], memory_required=1e20, force_full_load=True) # Setup sampler and guider like in test script @@ -486,7 +590,6 @@ class TrainLoraNode: ) guider = comfy_extras.nodes_custom_sampler.Guider_Basic(mp) guider.set_conds(positive) # Set conditioning from input - ss = comfy_extras.nodes_custom_sampler.SamplerCustomAdvanced() # yoland: this currently resize to the first image in the dataset @@ -495,21 +598,21 @@ class TrainLoraNode: try: for step in (pbar:=tqdm.trange(steps, desc="Training LoRA", smoothing=0.01, disable=not comfy.utils.PROGRESS_BAR_ENABLED)): # Generate random sigma - sigma = mp.model.model_sampling.percent_to_sigma( + sigmas = [mp.model.model_sampling.percent_to_sigma( torch.rand((1,)).item() - ) - sigma = torch.tensor([sigma]) + ) for _ in range(min(batch_size, num_images))] + sigmas = torch.tensor(sigmas) noise = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(step * 1000 + seed) indices = torch.randperm(num_images)[:batch_size] - ss.sample( - noise, guider, train_sampler, sigma, {"samples": latents[indices].clone()} - ) + batch_latent = latents[indices].clone() + guider.set_conds([positive[i] for i in indices]) # Set conditioning from input + guider.sample(noise.generate_noise({"samples": batch_latent}), batch_latent, train_sampler, sigmas, seed=noise.seed) finally: for m in mp.model.modules(): unpatch(m) - del ss, train_sampler, optimizer + del train_sampler, optimizer torch.cuda.empty_cache() for adapter in all_weight_adapters: @@ -697,6 +800,7 @@ NODE_CLASS_MAPPINGS = { "SaveLoRANode": SaveLoRA, "LoraModelLoader": LoraModelLoader, "LoadImageSetFromFolderNode": LoadImageSetFromFolderNode, + "LoadImageTextSetFromFolderNode": LoadImageTextSetFromFolderNode, "LossGraphNode": LossGraphNode, } @@ -705,5 +809,6 @@ NODE_DISPLAY_NAME_MAPPINGS = { "SaveLoRANode": "Save LoRA Weights", "LoraModelLoader": "Load LoRA Model", "LoadImageSetFromFolderNode": "Load Image Dataset from Folder", + "LoadImageTextSetFromFolderNode": "Load Image and Text Dataset from Folder", "LossGraphNode": "Plot Loss Graph", } From 5612670ee48ce500aab98e362b3372ab06d1d659 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 9 Jul 2025 00:45:48 -0700 Subject: [PATCH 0317/1073] Remove unmaintained notebook. (#8845) --- README.md | 4 - notebooks/comfyui_colab.ipynb | 322 ---------------------------------- 2 files changed, 326 deletions(-) delete mode 100644 notebooks/comfyui_colab.ipynb diff --git a/README.md b/README.md index ba8892b17..0e021a687 100644 --- a/README.md +++ b/README.md @@ -178,10 +178,6 @@ If you have trouble extracting it, right click the file -> properties -> unblock See the [Config file](extra_model_paths.yaml.example) to set the search paths for models. In the standalone windows build you can find this file in the ComfyUI directory. Rename this file to extra_model_paths.yaml and edit it with your favorite text editor. -## Jupyter Notebook - -To run it on services like paperspace, kaggle or colab you can use my [Jupyter Notebook](notebooks/comfyui_colab.ipynb) - ## [comfy-cli](https://docs.comfy.org/comfy-cli/getting-started) diff --git a/notebooks/comfyui_colab.ipynb b/notebooks/comfyui_colab.ipynb deleted file mode 100644 index 5560b5ff9..000000000 --- a/notebooks/comfyui_colab.ipynb +++ /dev/null @@ -1,322 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "aaaaaaaaaa" - }, - "source": [ - "Git clone the repo and install the requirements. (ignore the pip errors about protobuf)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "bbbbbbbbbb" - }, - "outputs": [], - "source": [ - "#@title Environment Setup\n", - "\n", - "\n", - "OPTIONS = {}\n", - "\n", - "USE_GOOGLE_DRIVE = False #@param {type:\"boolean\"}\n", - "UPDATE_COMFY_UI = True #@param {type:\"boolean\"}\n", - "WORKSPACE = 'ComfyUI'\n", - "OPTIONS['USE_GOOGLE_DRIVE'] = USE_GOOGLE_DRIVE\n", - "OPTIONS['UPDATE_COMFY_UI'] = UPDATE_COMFY_UI\n", - "\n", - "if OPTIONS['USE_GOOGLE_DRIVE']:\n", - " !echo \"Mounting Google Drive...\"\n", - " %cd /\n", - " \n", - " from google.colab import drive\n", - " drive.mount('/content/drive')\n", - "\n", - " WORKSPACE = \"/content/drive/MyDrive/ComfyUI\"\n", - " %cd /content/drive/MyDrive\n", - "\n", - "![ ! -d $WORKSPACE ] && echo -= Initial setup ComfyUI =- && git clone https://github.com/comfyanonymous/ComfyUI\n", - "%cd $WORKSPACE\n", - "\n", - "if OPTIONS['UPDATE_COMFY_UI']:\n", - " !echo -= Updating ComfyUI =-\n", - " !git pull\n", - "\n", - "!echo -= Install dependencies =-\n", - "!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu121 --extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu117" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "cccccccccc" - }, - "source": [ - "Download some models/checkpoints/vae or custom comfyui nodes (uncomment the commands for the ones you want)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "dddddddddd" - }, - "outputs": [], - "source": [ - "# Checkpoints\n", - "\n", - "### SDXL\n", - "### I recommend these workflow examples: https://comfyanonymous.github.io/ComfyUI_examples/sdxl/\n", - "\n", - "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors -P ./models/checkpoints/\n", - "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors -P ./models/checkpoints/\n", - "\n", - "# SDXL ReVision\n", - "#!wget -c https://huggingface.co/comfyanonymous/clip_vision_g/resolve/main/clip_vision_g.safetensors -P ./models/clip_vision/\n", - "\n", - "# SD1.5\n", - "!wget -c https://huggingface.co/Comfy-Org/stable-diffusion-v1-5-archive/resolve/main/v1-5-pruned-emaonly-fp16.safetensors -P ./models/checkpoints/\n", - "\n", - "# SD2\n", - "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-ema-pruned.safetensors -P ./models/checkpoints/\n", - "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.safetensors -P ./models/checkpoints/\n", - "\n", - "# Some SD1.5 anime style\n", - "#!wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix2/AbyssOrangeMix2_hard.safetensors -P ./models/checkpoints/\n", - "#!wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1_orangemixs.safetensors -P ./models/checkpoints/\n", - "#!wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A3_orangemixs.safetensors -P ./models/checkpoints/\n", - "#!wget -c https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/anything-v3-fp16-pruned.safetensors -P ./models/checkpoints/\n", - "\n", - "# Waifu Diffusion 1.5 (anime style SD2.x 768-v)\n", - "#!wget -c https://huggingface.co/waifu-diffusion/wd-1-5-beta3/resolve/main/wd-illusion-fp16.safetensors -P ./models/checkpoints/\n", - "\n", - "\n", - "# unCLIP models\n", - "#!wget -c https://huggingface.co/comfyanonymous/illuminatiDiffusionV1_v11_unCLIP/resolve/main/illuminatiDiffusionV1_v11-unclip-h-fp16.safetensors -P ./models/checkpoints/\n", - "#!wget -c https://huggingface.co/comfyanonymous/wd-1.5-beta2_unCLIP/resolve/main/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors -P ./models/checkpoints/\n", - "\n", - "\n", - "# VAE\n", - "!wget -c https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors -P ./models/vae/\n", - "#!wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/VAEs/orangemix.vae.pt -P ./models/vae/\n", - "#!wget -c https://huggingface.co/hakurei/waifu-diffusion-v1-4/resolve/main/vae/kl-f8-anime2.ckpt -P ./models/vae/\n", - "\n", - "\n", - "# Loras\n", - "#!wget -c https://civitai.com/api/download/models/10350 -O ./models/loras/theovercomer8sContrastFix_sd21768.safetensors #theovercomer8sContrastFix SD2.x 768-v\n", - "#!wget -c https://civitai.com/api/download/models/10638 -O ./models/loras/theovercomer8sContrastFix_sd15.safetensors #theovercomer8sContrastFix SD1.x\n", - "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors -P ./models/loras/ #SDXL offset noise lora\n", - "\n", - "\n", - "# T2I-Adapter\n", - "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_depth_sd14v1.pth -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_seg_sd14v1.pth -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_sketch_sd14v1.pth -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_keypose_sd14v1.pth -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_openpose_sd14v1.pth -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_color_sd14v1.pth -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_canny_sd14v1.pth -P ./models/controlnet/\n", - "\n", - "# T2I Styles Model\n", - "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_style_sd14v1.pth -P ./models/style_models/\n", - "\n", - "# CLIPVision model (needed for styles model)\n", - "#!wget -c https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin -O ./models/clip_vision/clip_vit14.bin\n", - "\n", - "\n", - "# ControlNet\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_canny_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_lineart_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_openpose_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_scribble_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_seg_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_softedge_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11u_sd15_tile_fp16.safetensors -P ./models/controlnet/\n", - "\n", - "# ControlNet SDXL\n", - "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-canny-rank256.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-depth-rank256.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors -P ./models/controlnet/\n", - "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors -P ./models/controlnet/\n", - "\n", - "# Controlnet Preprocessor nodes by Fannovel16\n", - "#!cd custom_nodes && git clone https://github.com/Fannovel16/comfy_controlnet_preprocessors; cd comfy_controlnet_preprocessors && python install.py\n", - "\n", - "\n", - "# GLIGEN\n", - "#!wget -c https://huggingface.co/comfyanonymous/GLIGEN_pruned_safetensors/resolve/main/gligen_sd14_textbox_pruned_fp16.safetensors -P ./models/gligen/\n", - "\n", - "\n", - "# ESRGAN upscale model\n", - "#!wget -c https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P ./models/upscale_models/\n", - "#!wget -c https://huggingface.co/sberbank-ai/Real-ESRGAN/resolve/main/RealESRGAN_x2.pth -P ./models/upscale_models/\n", - "#!wget -c https://huggingface.co/sberbank-ai/Real-ESRGAN/resolve/main/RealESRGAN_x4.pth -P ./models/upscale_models/\n", - "\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "kkkkkkkkkkkkkkk" - }, - "source": [ - "### Run ComfyUI with cloudflared (Recommended Way)\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "jjjjjjjjjjjjjj" - }, - "outputs": [], - "source": [ - "!wget https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb\n", - "!dpkg -i cloudflared-linux-amd64.deb\n", - "\n", - "import subprocess\n", - "import threading\n", - "import time\n", - "import socket\n", - "import urllib.request\n", - "\n", - "def iframe_thread(port):\n", - " while True:\n", - " time.sleep(0.5)\n", - " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", - " result = sock.connect_ex(('127.0.0.1', port))\n", - " if result == 0:\n", - " break\n", - " sock.close()\n", - " print(\"\\nComfyUI finished loading, trying to launch cloudflared (if it gets stuck here cloudflared is having issues)\\n\")\n", - "\n", - " p = subprocess.Popen([\"cloudflared\", \"tunnel\", \"--url\", \"http://127.0.0.1:{}\".format(port)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n", - " for line in p.stderr:\n", - " l = line.decode()\n", - " if \"trycloudflare.com \" in l:\n", - " print(\"This is the URL to access ComfyUI:\", l[l.find(\"http\"):], end='')\n", - " #print(l, end='')\n", - "\n", - "\n", - "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", - "\n", - "!python main.py --dont-print-server" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "kkkkkkkkkkkkkk" - }, - "source": [ - "### Run ComfyUI with localtunnel\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "jjjjjjjjjjjjj" - }, - "outputs": [], - "source": [ - "!npm install -g localtunnel\n", - "\n", - "import threading\n", - "\n", - "def iframe_thread(port):\n", - " while True:\n", - " time.sleep(0.5)\n", - " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", - " result = sock.connect_ex(('127.0.0.1', port))\n", - " if result == 0:\n", - " break\n", - " sock.close()\n", - " print(\"\\nComfyUI finished loading, trying to launch localtunnel (if it gets stuck here localtunnel is having issues)\\n\")\n", - "\n", - " print(\"The password/enpoint ip for localtunnel is:\", urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"))\n", - " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n", - " for line in p.stdout:\n", - " print(line.decode(), end='')\n", - "\n", - "\n", - "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", - "\n", - "!python main.py --dont-print-server" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gggggggggg" - }, - "source": [ - "### Run ComfyUI with colab iframe (use only in case the previous way with localtunnel doesn't work)\n", - "\n", - "You should see the ui appear in an iframe. If you get a 403 error, it's your firefox settings or an extension that's messing things up.\n", - "\n", - "If you want to open it in another window use the link.\n", - "\n", - "Note that some UI features like live image previews won't work because the colab iframe blocks websockets." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "hhhhhhhhhh" - }, - "outputs": [], - "source": [ - "import threading\n", - "def iframe_thread(port):\n", - " while True:\n", - " time.sleep(0.5)\n", - " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", - " result = sock.connect_ex(('127.0.0.1', port))\n", - " if result == 0:\n", - " break\n", - " sock.close()\n", - " from google.colab import output\n", - " output.serve_kernel_port_as_iframe(port, height=1024)\n", - " print(\"to open it in a window you can open this link here:\")\n", - " output.serve_kernel_port_as_window(port)\n", - "\n", - "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", - "\n", - "!python main.py --dont-print-server" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "provenance": [] - }, - "gpuClass": "standard", - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} From 1205afc708d963d160f38c1d6613a384ddf6c564 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Wed, 9 Jul 2025 23:41:22 +0800 Subject: [PATCH 0318/1073] Better training loop implementation (#8820) --- comfy_extras/nodes_train.py | 122 +++++++++++++++++++++++------------- 1 file changed, 80 insertions(+), 42 deletions(-) diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py index 17caf5ad5..3d05fdab5 100644 --- a/comfy_extras/nodes_train.py +++ b/comfy_extras/nodes_train.py @@ -23,38 +23,78 @@ from comfy.comfy_types.node_typing import IO from comfy.weight_adapter import adapters +def make_batch_extra_option_dict(d, indicies, full_size=None): + new_dict = {} + for k, v in d.items(): + newv = v + if isinstance(v, dict): + newv = make_batch_extra_option_dict(v, indicies, full_size=full_size) + elif isinstance(v, torch.Tensor): + if full_size is None or v.size(0) == full_size: + newv = v[indicies] + elif isinstance(v, (list, tuple)) and len(v) == full_size: + newv = [v[i] for i in indicies] + new_dict[k] = newv + return new_dict + + class TrainSampler(comfy.samplers.Sampler): - def __init__(self, loss_fn, optimizer, loss_callback=None): + def __init__(self, loss_fn, optimizer, loss_callback=None, batch_size=1, total_steps=1, seed=0, training_dtype=torch.bfloat16): self.loss_fn = loss_fn self.optimizer = optimizer self.loss_callback = loss_callback + self.batch_size = batch_size + self.total_steps = total_steps + self.seed = seed + self.training_dtype = training_dtype def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): - self.optimizer.zero_grad() - noise = model_wrap.inner_model.model_sampling.noise_scaling(sigmas, noise, latent_image, False) - latent = model_wrap.inner_model.model_sampling.noise_scaling( - torch.zeros_like(sigmas), - torch.zeros_like(noise, requires_grad=True), - latent_image, - False - ) + cond = model_wrap.conds["positive"] + dataset_size = sigmas.size(0) + torch.cuda.empty_cache() + for i in (pbar:=tqdm.trange(self.total_steps, desc="Training LoRA", smoothing=0.01, disable=not comfy.utils.PROGRESS_BAR_ENABLED)): + noisegen = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(self.seed + i * 1000) + indicies = torch.randperm(dataset_size)[:self.batch_size].tolist() - # Ensure model is in training mode and computing gradients - # x0 pred - denoised = model_wrap(noise, sigmas, **extra_args) - try: - loss = self.loss_fn(denoised, latent.clone()) - except RuntimeError as e: - if "does not require grad and does not have a grad_fn" in str(e): - logging.info("WARNING: This is likely due to the model is loaded in inference mode.") - loss.backward() - if self.loss_callback: - self.loss_callback(loss.item()) + batch_latent = torch.stack([latent_image[i] for i in indicies]) + batch_noise = noisegen.generate_noise({"samples": batch_latent}).to(batch_latent.device) + batch_sigmas = [ + model_wrap.inner_model.model_sampling.percent_to_sigma( + torch.rand((1,)).item() + ) for _ in range(min(self.batch_size, dataset_size)) + ] + batch_sigmas = torch.tensor(batch_sigmas).to(batch_latent.device) - self.optimizer.step() - # torch.cuda.memory._dump_snapshot("trainn.pickle") - # torch.cuda.memory._record_memory_history(enabled=None) + xt = model_wrap.inner_model.model_sampling.noise_scaling( + batch_sigmas, + batch_noise, + batch_latent, + False + ) + x0 = model_wrap.inner_model.model_sampling.noise_scaling( + torch.zeros_like(batch_sigmas), + torch.zeros_like(batch_noise), + batch_latent, + False + ) + + model_wrap.conds["positive"] = [ + cond[i] for i in indicies + ] + batch_extra_args = make_batch_extra_option_dict(extra_args, indicies, full_size=dataset_size) + + with torch.autocast(xt.device.type, dtype=self.training_dtype): + x0_pred = model_wrap(xt, batch_sigmas, **batch_extra_args) + loss = self.loss_fn(x0_pred, x0) + loss.backward() + if self.loss_callback: + self.loss_callback(loss.item()) + pbar.set_postfix({"loss": f"{loss.item():.4f}"}) + + self.optimizer.step() + self.optimizer.zero_grad() + torch.cuda.empty_cache() return torch.zeros_like(latent_image) @@ -584,36 +624,34 @@ class TrainLoraNode: loss_map = {"loss": []} def loss_callback(loss): loss_map["loss"].append(loss) - pbar.set_postfix({"loss": f"{loss:.4f}"}) train_sampler = TrainSampler( - criterion, optimizer, loss_callback=loss_callback + criterion, + optimizer, + loss_callback=loss_callback, + batch_size=batch_size, + total_steps=steps, + seed=seed, + training_dtype=dtype ) guider = comfy_extras.nodes_custom_sampler.Guider_Basic(mp) guider.set_conds(positive) # Set conditioning from input - # yoland: this currently resize to the first image in the dataset - # Training loop - torch.cuda.empty_cache() try: - for step in (pbar:=tqdm.trange(steps, desc="Training LoRA", smoothing=0.01, disable=not comfy.utils.PROGRESS_BAR_ENABLED)): - # Generate random sigma - sigmas = [mp.model.model_sampling.percent_to_sigma( - torch.rand((1,)).item() - ) for _ in range(min(batch_size, num_images))] - sigmas = torch.tensor(sigmas) - - noise = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(step * 1000 + seed) - - indices = torch.randperm(num_images)[:batch_size] - batch_latent = latents[indices].clone() - guider.set_conds([positive[i] for i in indices]) # Set conditioning from input - guider.sample(noise.generate_noise({"samples": batch_latent}), batch_latent, train_sampler, sigmas, seed=noise.seed) + # Generate dummy sigmas and noise + sigmas = torch.tensor(range(num_images)) + noise = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(seed) + guider.sample( + noise.generate_noise({"samples": latents}), + latents, + train_sampler, + sigmas, + seed=noise.seed + ) finally: for m in mp.model.modules(): unpatch(m) del train_sampler, optimizer - torch.cuda.empty_cache() for adapter in all_weight_adapters: adapter.requires_grad_(False) From 1fd306824d35bf2669f6be46fadab37efd7081c4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 9 Jul 2025 22:03:27 -0700 Subject: [PATCH 0319/1073] Add warning to catch torch import mistakes. (#8852) --- main.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/main.py b/main.py index d488c0f4c..0b1987ef4 100644 --- a/main.py +++ b/main.py @@ -127,6 +127,9 @@ if __name__ == "__main__": import cuda_malloc +if 'torch' in sys.modules: + logging.warning("WARNING: Potential Error in code: Torch already imported, torch should never be imported before this point.") + import comfy.utils import execution From 2b653e8c18f18792e7f080df611c8a35f1d0fdf4 Mon Sep 17 00:00:00 2001 From: guill Date: Thu, 10 Jul 2025 11:46:19 -0700 Subject: [PATCH 0320/1073] Support for async node functions (#8830) * Support for async execution functions This commit adds support for node execution functions defined as async. When a node's execution function is defined as async, we can continue executing other nodes while it is processing. Standard uses of `await` should "just work", but people will still have to be careful if they spawn actual threads. Because torch doesn't really have async/await versions of functions, this won't particularly help with most locally-executing nodes, but it does work for e.g. web requests to other machines. In addition to the execute function, the `VALIDATE_INPUTS` and `check_lazy_status` functions can also be defined as async, though we'll only resolve one node at a time right now for those. * Add the execution model tests to CI * Add a missing file It looks like this got caught by .gitignore? There's probably a better place to put it, but I'm not sure what that is. * Add the websocket library for automated tests * Add additional tests for async error cases Also fixes one bug that was found when an async function throws an error after being scheduled on a task. * Add a feature flags message to reduce bandwidth We now only send 1 preview message of the latest type the client can support. We'll add a console warning when the client fails to send a feature flags message at some point in the future. * Add async tests to CI * Don't actually add new tests in this PR Will do it in a separate PR * Resolve unit test in GPU-less runner * Just remove the tests that GHA can't handle * Change line endings to UNIX-style * Avoid loading model_management.py so early Because model_management.py has a top-level `logging.info`, we have to be careful not to import that file before we call `setup_logging`. If we do, we end up having the default logging handler registered in addition to our custom one. --- comfy/utils.py | 5 +- comfy_api/feature_flags.py | 69 +++ comfy_execution/caching.py | 53 +-- comfy_execution/graph.py | 20 +- comfy_execution/progress.py | 347 +++++++++++++++ comfy_execution/utils.py | 46 ++ execution.py | 135 ++++-- main.py | 32 +- protocol.py | 7 + server.py | 98 ++++- tests-unit/feature_flags_test.py | 98 +++++ tests-unit/requirements.txt | 1 + tests-unit/websocket_feature_flags_test.py | 77 ++++ tests/inference/extra_model_paths.yaml | 2 +- tests/inference/test_async_nodes.py | 410 ++++++++++++++++++ tests/inference/test_execution.py | 65 ++- .../testing_nodes/testing-pack/__init__.py | 49 ++- .../testing-pack/async_test_nodes.py | 343 +++++++++++++++ .../testing-pack/specific_tests.py | 136 ++++++ 19 files changed, 1898 insertions(+), 95 deletions(-) create mode 100644 comfy_api/feature_flags.py create mode 100644 comfy_execution/progress.py create mode 100644 comfy_execution/utils.py create mode 100644 protocol.py create mode 100644 tests-unit/feature_flags_test.py create mode 100644 tests-unit/websocket_feature_flags_test.py create mode 100644 tests/inference/test_async_nodes.py create mode 100644 tests/inference/testing_nodes/testing-pack/async_test_nodes.py diff --git a/comfy/utils.py b/comfy/utils.py index 47981d8f6..f8e01f713 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -998,11 +998,12 @@ def set_progress_bar_global_hook(function): PROGRESS_BAR_HOOK = function class ProgressBar: - def __init__(self, total): + def __init__(self, total, node_id=None): global PROGRESS_BAR_HOOK self.total = total self.current = 0 self.hook = PROGRESS_BAR_HOOK + self.node_id = node_id def update_absolute(self, value, total=None, preview=None): if total is not None: @@ -1011,7 +1012,7 @@ class ProgressBar: value = self.total self.current = value if self.hook is not None: - self.hook(self.current, self.total, preview) + self.hook(self.current, self.total, preview, node_id=self.node_id) def update(self, value): self.update_absolute(self.current + value) diff --git a/comfy_api/feature_flags.py b/comfy_api/feature_flags.py new file mode 100644 index 000000000..0d4389a6e --- /dev/null +++ b/comfy_api/feature_flags.py @@ -0,0 +1,69 @@ +""" +Feature flags module for ComfyUI WebSocket protocol negotiation. + +This module handles capability negotiation between frontend and backend, +allowing graceful protocol evolution while maintaining backward compatibility. +""" + +from typing import Any, Dict + +from comfy.cli_args import args + +# Default server capabilities +SERVER_FEATURE_FLAGS: Dict[str, Any] = { + "supports_preview_metadata": True, + "max_upload_size": args.max_upload_size * 1024 * 1024, # Convert MB to bytes +} + + +def get_connection_feature( + sockets_metadata: Dict[str, Dict[str, Any]], + sid: str, + feature_name: str, + default: Any = False +) -> Any: + """ + Get a feature flag value for a specific connection. + + Args: + sockets_metadata: Dictionary of socket metadata + sid: Session ID of the connection + feature_name: Name of the feature to check + default: Default value if feature not found + + Returns: + Feature value or default if not found + """ + if sid not in sockets_metadata: + return default + + return sockets_metadata[sid].get("feature_flags", {}).get(feature_name, default) + + +def supports_feature( + sockets_metadata: Dict[str, Dict[str, Any]], + sid: str, + feature_name: str +) -> bool: + """ + Check if a connection supports a specific feature. + + Args: + sockets_metadata: Dictionary of socket metadata + sid: Session ID of the connection + feature_name: Name of the feature to check + + Returns: + Boolean indicating if feature is supported + """ + return get_connection_feature(sockets_metadata, sid, feature_name, False) is True + + +def get_server_features() -> Dict[str, Any]: + """ + Get the server's feature flags. + + Returns: + Dictionary of server feature flags + """ + return SERVER_FEATURE_FLAGS.copy() diff --git a/comfy_execution/caching.py b/comfy_execution/caching.py index dbb37b89f..41224ce3b 100644 --- a/comfy_execution/caching.py +++ b/comfy_execution/caching.py @@ -1,6 +1,7 @@ import itertools from typing import Sequence, Mapping, Dict from comfy_execution.graph import DynamicPrompt +from abc import ABC, abstractmethod import nodes @@ -16,12 +17,13 @@ def include_unique_id_in_input(class_type: str) -> bool: NODE_CLASS_CONTAINS_UNIQUE_ID[class_type] = "UNIQUE_ID" in class_def.INPUT_TYPES().get("hidden", {}).values() return NODE_CLASS_CONTAINS_UNIQUE_ID[class_type] -class CacheKeySet: +class CacheKeySet(ABC): def __init__(self, dynprompt, node_ids, is_changed_cache): self.keys = {} self.subcache_keys = {} - def add_keys(self, node_ids): + @abstractmethod + async def add_keys(self, node_ids): raise NotImplementedError() def all_node_ids(self): @@ -60,9 +62,8 @@ class CacheKeySetID(CacheKeySet): def __init__(self, dynprompt, node_ids, is_changed_cache): super().__init__(dynprompt, node_ids, is_changed_cache) self.dynprompt = dynprompt - self.add_keys(node_ids) - def add_keys(self, node_ids): + async def add_keys(self, node_ids): for node_id in node_ids: if node_id in self.keys: continue @@ -77,37 +78,36 @@ class CacheKeySetInputSignature(CacheKeySet): super().__init__(dynprompt, node_ids, is_changed_cache) self.dynprompt = dynprompt self.is_changed_cache = is_changed_cache - self.add_keys(node_ids) def include_node_id_in_input(self) -> bool: return False - def add_keys(self, node_ids): + async def add_keys(self, node_ids): for node_id in node_ids: if node_id in self.keys: continue if not self.dynprompt.has_node(node_id): continue node = self.dynprompt.get_node(node_id) - self.keys[node_id] = self.get_node_signature(self.dynprompt, node_id) + self.keys[node_id] = await self.get_node_signature(self.dynprompt, node_id) self.subcache_keys[node_id] = (node_id, node["class_type"]) - def get_node_signature(self, dynprompt, node_id): + async def get_node_signature(self, dynprompt, node_id): signature = [] ancestors, order_mapping = self.get_ordered_ancestry(dynprompt, node_id) - signature.append(self.get_immediate_node_signature(dynprompt, node_id, order_mapping)) + signature.append(await self.get_immediate_node_signature(dynprompt, node_id, order_mapping)) for ancestor_id in ancestors: - signature.append(self.get_immediate_node_signature(dynprompt, ancestor_id, order_mapping)) + signature.append(await self.get_immediate_node_signature(dynprompt, ancestor_id, order_mapping)) return to_hashable(signature) - def get_immediate_node_signature(self, dynprompt, node_id, ancestor_order_mapping): + async def get_immediate_node_signature(self, dynprompt, node_id, ancestor_order_mapping): if not dynprompt.has_node(node_id): # This node doesn't exist -- we can't cache it. return [float("NaN")] node = dynprompt.get_node(node_id) class_type = node["class_type"] class_def = nodes.NODE_CLASS_MAPPINGS[class_type] - signature = [class_type, self.is_changed_cache.get(node_id)] + signature = [class_type, await self.is_changed_cache.get(node_id)] if self.include_node_id_in_input() or (hasattr(class_def, "NOT_IDEMPOTENT") and class_def.NOT_IDEMPOTENT) or include_unique_id_in_input(class_type): signature.append(node_id) inputs = node["inputs"] @@ -150,9 +150,10 @@ class BasicCache: self.cache = {} self.subcaches = {} - def set_prompt(self, dynprompt, node_ids, is_changed_cache): + async def set_prompt(self, dynprompt, node_ids, is_changed_cache): self.dynprompt = dynprompt self.cache_key_set = self.key_class(dynprompt, node_ids, is_changed_cache) + await self.cache_key_set.add_keys(node_ids) self.is_changed_cache = is_changed_cache self.initialized = True @@ -201,13 +202,13 @@ class BasicCache: else: return None - def _ensure_subcache(self, node_id, children_ids): + async def _ensure_subcache(self, node_id, children_ids): subcache_key = self.cache_key_set.get_subcache_key(node_id) subcache = self.subcaches.get(subcache_key, None) if subcache is None: subcache = BasicCache(self.key_class) self.subcaches[subcache_key] = subcache - subcache.set_prompt(self.dynprompt, children_ids, self.is_changed_cache) + await subcache.set_prompt(self.dynprompt, children_ids, self.is_changed_cache) return subcache def _get_subcache(self, node_id): @@ -259,10 +260,10 @@ class HierarchicalCache(BasicCache): assert cache is not None cache._set_immediate(node_id, value) - def ensure_subcache_for(self, node_id, children_ids): + async def ensure_subcache_for(self, node_id, children_ids): cache = self._get_cache_for(node_id) assert cache is not None - return cache._ensure_subcache(node_id, children_ids) + return await cache._ensure_subcache(node_id, children_ids) class LRUCache(BasicCache): def __init__(self, key_class, max_size=100): @@ -273,8 +274,8 @@ class LRUCache(BasicCache): self.used_generation = {} self.children = {} - def set_prompt(self, dynprompt, node_ids, is_changed_cache): - super().set_prompt(dynprompt, node_ids, is_changed_cache) + async def set_prompt(self, dynprompt, node_ids, is_changed_cache): + await super().set_prompt(dynprompt, node_ids, is_changed_cache) self.generation += 1 for node_id in node_ids: self._mark_used(node_id) @@ -303,11 +304,11 @@ class LRUCache(BasicCache): self._mark_used(node_id) return self._set_immediate(node_id, value) - def ensure_subcache_for(self, node_id, children_ids): + async def ensure_subcache_for(self, node_id, children_ids): # Just uses subcaches for tracking 'live' nodes - super()._ensure_subcache(node_id, children_ids) + await super()._ensure_subcache(node_id, children_ids) - self.cache_key_set.add_keys(children_ids) + await self.cache_key_set.add_keys(children_ids) self._mark_used(node_id) cache_key = self.cache_key_set.get_data_key(node_id) self.children[cache_key] = [] @@ -337,7 +338,7 @@ class DependencyAwareCache(BasicCache): self.ancestors = {} # Maps node_id -> set of ancestor node_ids self.executed_nodes = set() # Tracks nodes that have been executed - def set_prompt(self, dynprompt, node_ids, is_changed_cache): + async def set_prompt(self, dynprompt, node_ids, is_changed_cache): """ Clear the entire cache and rebuild the dependency graph. @@ -354,7 +355,7 @@ class DependencyAwareCache(BasicCache): self.executed_nodes.clear() # Call the parent method to initialize the cache with the new prompt - super().set_prompt(dynprompt, node_ids, is_changed_cache) + await super().set_prompt(dynprompt, node_ids, is_changed_cache) # Rebuild the dependency graph self._build_dependency_graph(dynprompt, node_ids) @@ -405,7 +406,7 @@ class DependencyAwareCache(BasicCache): """ return self._get_immediate(node_id) - def ensure_subcache_for(self, node_id, children_ids): + async def ensure_subcache_for(self, node_id, children_ids): """ Ensure a subcache exists for a node and update dependencies. @@ -416,7 +417,7 @@ class DependencyAwareCache(BasicCache): Returns: The subcache object for the node. """ - subcache = super()._ensure_subcache(node_id, children_ids) + subcache = await super()._ensure_subcache(node_id, children_ids) for child_id in children_ids: self.descendants[node_id].add(child_id) self.ancestors[child_id].add(node_id) diff --git a/comfy_execution/graph.py b/comfy_execution/graph.py index a2799b52e..c79243e1e 100644 --- a/comfy_execution/graph.py +++ b/comfy_execution/graph.py @@ -2,6 +2,7 @@ from __future__ import annotations from typing import Type, Literal import nodes +import asyncio from comfy_execution.graph_utils import is_link from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, InputTypeOptions @@ -100,6 +101,8 @@ class TopologicalSort: self.pendingNodes = {} self.blockCount = {} # Number of nodes this node is directly blocked by self.blocking = {} # Which nodes are blocked by this node + self.externalBlocks = 0 + self.unblockedEvent = asyncio.Event() def get_input_info(self, unique_id, input_name): class_type = self.dynprompt.get_node(unique_id)["class_type"] @@ -153,6 +156,16 @@ class TopologicalSort: for link in links: self.add_strong_link(*link) + def add_external_block(self, node_id): + assert node_id in self.blockCount, "Can't add external block to a node that isn't pending" + self.externalBlocks += 1 + self.blockCount[node_id] += 1 + def unblock(): + self.externalBlocks -= 1 + self.blockCount[node_id] -= 1 + self.unblockedEvent.set() + return unblock + def is_cached(self, node_id): return False @@ -181,11 +194,16 @@ class ExecutionList(TopologicalSort): def is_cached(self, node_id): return self.output_cache.get(node_id) is not None - def stage_node_execution(self): + async def stage_node_execution(self): assert self.staged_node_id is None if self.is_empty(): return None, None, None available = self.get_ready_nodes() + while len(available) == 0 and self.externalBlocks > 0: + # Wait for an external block to be released + await self.unblockedEvent.wait() + self.unblockedEvent.clear() + available = self.get_ready_nodes() if len(available) == 0: cycled_nodes = self.get_nodes_in_cycle() # Because cycles composed entirely of static nodes are caught during initial validation, diff --git a/comfy_execution/progress.py b/comfy_execution/progress.py new file mode 100644 index 000000000..5645b3e3c --- /dev/null +++ b/comfy_execution/progress.py @@ -0,0 +1,347 @@ +from typing import TypedDict, Dict, Optional +from typing_extensions import override +from PIL import Image +from enum import Enum +from abc import ABC +from tqdm import tqdm +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from comfy_execution.graph import DynamicPrompt +from protocol import BinaryEventTypes +from comfy_api import feature_flags + + +class NodeState(Enum): + Pending = "pending" + Running = "running" + Finished = "finished" + Error = "error" + + +class NodeProgressState(TypedDict): + """ + A class to represent the state of a node's progress. + """ + + state: NodeState + value: float + max: float + + +class ProgressHandler(ABC): + """ + Abstract base class for progress handlers. + Progress handlers receive progress updates and display them in various ways. + """ + + def __init__(self, name: str): + self.name = name + self.enabled = True + + def set_registry(self, registry: "ProgressRegistry"): + pass + + def start_handler(self, node_id: str, state: NodeProgressState, prompt_id: str): + """Called when a node starts processing""" + pass + + def update_handler( + self, + node_id: str, + value: float, + max_value: float, + state: NodeProgressState, + prompt_id: str, + image: Optional[Image.Image] = None, + ): + """Called when a node's progress is updated""" + pass + + def finish_handler(self, node_id: str, state: NodeProgressState, prompt_id: str): + """Called when a node finishes processing""" + pass + + def reset(self): + """Called when the progress registry is reset""" + pass + + def enable(self): + """Enable this handler""" + self.enabled = True + + def disable(self): + """Disable this handler""" + self.enabled = False + + +class CLIProgressHandler(ProgressHandler): + """ + Handler that displays progress using tqdm progress bars in the CLI. + """ + + def __init__(self): + super().__init__("cli") + self.progress_bars: Dict[str, tqdm] = {} + + @override + def start_handler(self, node_id: str, state: NodeProgressState, prompt_id: str): + # Create a new tqdm progress bar + if node_id not in self.progress_bars: + self.progress_bars[node_id] = tqdm( + total=state["max"], + desc=f"Node {node_id}", + unit="steps", + leave=True, + position=len(self.progress_bars), + ) + + @override + def update_handler( + self, + node_id: str, + value: float, + max_value: float, + state: NodeProgressState, + prompt_id: str, + image: Optional[Image.Image] = None, + ): + # Handle case where start_handler wasn't called + if node_id not in self.progress_bars: + self.progress_bars[node_id] = tqdm( + total=max_value, + desc=f"Node {node_id}", + unit="steps", + leave=True, + position=len(self.progress_bars), + ) + self.progress_bars[node_id].update(value) + else: + # Update existing progress bar + if max_value != self.progress_bars[node_id].total: + self.progress_bars[node_id].total = max_value + # Calculate the update amount (difference from current position) + current_position = self.progress_bars[node_id].n + update_amount = value - current_position + if update_amount > 0: + self.progress_bars[node_id].update(update_amount) + + @override + def finish_handler(self, node_id: str, state: NodeProgressState, prompt_id: str): + # Complete and close the progress bar if it exists + if node_id in self.progress_bars: + # Ensure the bar shows 100% completion + remaining = state["max"] - self.progress_bars[node_id].n + if remaining > 0: + self.progress_bars[node_id].update(remaining) + self.progress_bars[node_id].close() + del self.progress_bars[node_id] + + @override + def reset(self): + # Close all progress bars + for bar in self.progress_bars.values(): + bar.close() + self.progress_bars.clear() + + +class WebUIProgressHandler(ProgressHandler): + """ + Handler that sends progress updates to the WebUI via WebSockets. + """ + + def __init__(self, server_instance): + super().__init__("webui") + self.server_instance = server_instance + + def set_registry(self, registry: "ProgressRegistry"): + self.registry = registry + + def _send_progress_state(self, prompt_id: str, nodes: Dict[str, NodeProgressState]): + """Send the current progress state to the client""" + if self.server_instance is None: + return + + # Only send info for non-pending nodes + active_nodes = { + node_id: { + "value": state["value"], + "max": state["max"], + "state": state["state"].value, + "node_id": node_id, + "prompt_id": prompt_id, + "display_node_id": self.registry.dynprompt.get_display_node_id(node_id), + "parent_node_id": self.registry.dynprompt.get_parent_node_id(node_id), + "real_node_id": self.registry.dynprompt.get_real_node_id(node_id), + } + for node_id, state in nodes.items() + if state["state"] != NodeState.Pending + } + + # Send a combined progress_state message with all node states + self.server_instance.send_sync( + "progress_state", {"prompt_id": prompt_id, "nodes": active_nodes} + ) + + @override + def start_handler(self, node_id: str, state: NodeProgressState, prompt_id: str): + # Send progress state of all nodes + if self.registry: + self._send_progress_state(prompt_id, self.registry.nodes) + + @override + def update_handler( + self, + node_id: str, + value: float, + max_value: float, + state: NodeProgressState, + prompt_id: str, + image: Optional[Image.Image] = None, + ): + # Send progress state of all nodes + if self.registry: + self._send_progress_state(prompt_id, self.registry.nodes) + if image: + # Only send new format if client supports it + if feature_flags.supports_feature( + self.server_instance.sockets_metadata, + self.server_instance.client_id, + "supports_preview_metadata", + ): + metadata = { + "node_id": node_id, + "prompt_id": prompt_id, + "display_node_id": self.registry.dynprompt.get_display_node_id( + node_id + ), + "parent_node_id": self.registry.dynprompt.get_parent_node_id( + node_id + ), + "real_node_id": self.registry.dynprompt.get_real_node_id(node_id), + } + self.server_instance.send_sync( + BinaryEventTypes.PREVIEW_IMAGE_WITH_METADATA, + (image, metadata), + self.server_instance.client_id, + ) + + @override + def finish_handler(self, node_id: str, state: NodeProgressState, prompt_id: str): + # Send progress state of all nodes + if self.registry: + self._send_progress_state(prompt_id, self.registry.nodes) + + +class ProgressRegistry: + """ + Registry that maintains node progress state and notifies registered handlers. + """ + + def __init__(self, prompt_id: str, dynprompt: "DynamicPrompt"): + self.prompt_id = prompt_id + self.dynprompt = dynprompt + self.nodes: Dict[str, NodeProgressState] = {} + self.handlers: Dict[str, ProgressHandler] = {} + + def register_handler(self, handler: ProgressHandler) -> None: + """Register a progress handler""" + self.handlers[handler.name] = handler + + def unregister_handler(self, handler_name: str) -> None: + """Unregister a progress handler""" + if handler_name in self.handlers: + # Allow handler to clean up resources + self.handlers[handler_name].reset() + del self.handlers[handler_name] + + def enable_handler(self, handler_name: str) -> None: + """Enable a progress handler""" + if handler_name in self.handlers: + self.handlers[handler_name].enable() + + def disable_handler(self, handler_name: str) -> None: + """Disable a progress handler""" + if handler_name in self.handlers: + self.handlers[handler_name].disable() + + def ensure_entry(self, node_id: str) -> NodeProgressState: + """Ensure a node entry exists""" + if node_id not in self.nodes: + self.nodes[node_id] = NodeProgressState( + state=NodeState.Pending, value=0, max=1 + ) + return self.nodes[node_id] + + def start_progress(self, node_id: str) -> None: + """Start progress tracking for a node""" + entry = self.ensure_entry(node_id) + entry["state"] = NodeState.Running + entry["value"] = 0.0 + entry["max"] = 1.0 + + # Notify all enabled handlers + for handler in self.handlers.values(): + if handler.enabled: + handler.start_handler(node_id, entry, self.prompt_id) + + def update_progress( + self, node_id: str, value: float, max_value: float, image: Optional[Image.Image] + ) -> None: + """Update progress for a node""" + entry = self.ensure_entry(node_id) + entry["state"] = NodeState.Running + entry["value"] = value + entry["max"] = max_value + + # Notify all enabled handlers + for handler in self.handlers.values(): + if handler.enabled: + handler.update_handler( + node_id, value, max_value, entry, self.prompt_id, image + ) + + def finish_progress(self, node_id: str) -> None: + """Finish progress tracking for a node""" + entry = self.ensure_entry(node_id) + entry["state"] = NodeState.Finished + entry["value"] = entry["max"] + + # Notify all enabled handlers + for handler in self.handlers.values(): + if handler.enabled: + handler.finish_handler(node_id, entry, self.prompt_id) + + def reset_handlers(self) -> None: + """Reset all handlers""" + for handler in self.handlers.values(): + handler.reset() + +# Global registry instance +global_progress_registry: ProgressRegistry | None = None + +def reset_progress_state(prompt_id: str, dynprompt: "DynamicPrompt") -> None: + global global_progress_registry + + # Reset existing handlers if registry exists + if global_progress_registry is not None: + global_progress_registry.reset_handlers() + + # Create new registry + global_progress_registry = ProgressRegistry(prompt_id, dynprompt) + + +def add_progress_handler(handler: ProgressHandler) -> None: + registry = get_progress_state() + handler.set_registry(registry) + registry.register_handler(handler) + + +def get_progress_state() -> ProgressRegistry: + global global_progress_registry + if global_progress_registry is None: + from comfy_execution.graph import DynamicPrompt + + global_progress_registry = ProgressRegistry( + prompt_id="", dynprompt=DynamicPrompt({}) + ) + return global_progress_registry diff --git a/comfy_execution/utils.py b/comfy_execution/utils.py new file mode 100644 index 000000000..62d32f101 --- /dev/null +++ b/comfy_execution/utils.py @@ -0,0 +1,46 @@ +import contextvars +from typing import Optional, NamedTuple + +class ExecutionContext(NamedTuple): + """ + Context information about the currently executing node. + + Attributes: + node_id: The ID of the currently executing node + list_index: The index in a list being processed (for operations on batches/lists) + """ + prompt_id: str + node_id: str + list_index: Optional[int] + +current_executing_context: contextvars.ContextVar[Optional[ExecutionContext]] = contextvars.ContextVar("current_executing_context", default=None) + +def get_executing_context() -> Optional[ExecutionContext]: + return current_executing_context.get(None) + +class CurrentNodeContext: + """ + Context manager for setting the current executing node context. + + Sets the current_executing_context on enter and resets it on exit. + + Example: + with CurrentNodeContext(node_id="123", list_index=0): + # Code that should run with the current node context set + process_image() + """ + def __init__(self, prompt_id: str, node_id: str, list_index: Optional[int] = None): + self.context = ExecutionContext( + prompt_id= prompt_id, + node_id= node_id, + list_index= list_index + ) + self.token = None + + def __enter__(self): + self.token = current_executing_context.set(self.context) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.token is not None: + current_executing_context.reset(self.token) diff --git a/execution.py b/execution.py index f6006fa12..90cefc023 100644 --- a/execution.py +++ b/execution.py @@ -8,12 +8,14 @@ import time import traceback from enum import Enum from typing import List, Literal, NamedTuple, Optional +import asyncio import torch import comfy.model_management import nodes from comfy_execution.caching import ( + BasicCache, CacheKeySetID, CacheKeySetInputSignature, DependencyAwareCache, @@ -28,6 +30,8 @@ from comfy_execution.graph import ( ) from comfy_execution.graph_utils import GraphBuilder, is_link from comfy_execution.validation import validate_node_input +from comfy_execution.progress import get_progress_state, reset_progress_state, add_progress_handler, WebUIProgressHandler +from comfy_execution.utils import CurrentNodeContext class ExecutionResult(Enum): @@ -39,12 +43,13 @@ class DuplicateNodeError(Exception): pass class IsChangedCache: - def __init__(self, dynprompt, outputs_cache): + def __init__(self, prompt_id: str, dynprompt: DynamicPrompt, outputs_cache: BasicCache): + self.prompt_id = prompt_id self.dynprompt = dynprompt self.outputs_cache = outputs_cache self.is_changed = {} - def get(self, node_id): + async def get(self, node_id): if node_id in self.is_changed: return self.is_changed[node_id] @@ -62,7 +67,8 @@ class IsChangedCache: # Intentionally do not use cached outputs here. We only want constants in IS_CHANGED input_data_all, _ = get_input_data(node["inputs"], class_def, node_id, None) try: - is_changed = _map_node_over_list(class_def, input_data_all, "IS_CHANGED") + is_changed = await _async_map_node_over_list(self.prompt_id, node_id, class_def, input_data_all, "IS_CHANGED") + is_changed = await resolve_map_node_over_list_results(is_changed) node["is_changed"] = [None if isinstance(x, ExecutionBlocker) else x for x in is_changed] except Exception as e: logging.warning("WARNING: {}".format(e)) @@ -164,7 +170,19 @@ def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, e map_node_over_list = None #Don't hook this please -def _map_node_over_list(obj, input_data_all, func, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None): +async def resolve_map_node_over_list_results(results): + remaining = [x for x in results if isinstance(x, asyncio.Task) and not x.done()] + if len(remaining) == 0: + return [x.result() if isinstance(x, asyncio.Task) else x for x in results] + else: + done, pending = await asyncio.wait(remaining) + for task in done: + exc = task.exception() + if exc is not None: + raise exc + return [x.result() if isinstance(x, asyncio.Task) else x for x in results] + +async def _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, func, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None): # check if node wants the lists input_is_list = getattr(obj, "INPUT_IS_LIST", False) @@ -178,7 +196,7 @@ def _map_node_over_list(obj, input_data_all, func, allow_interrupt=False, execut return {k: v[i if len(v) > i else -1] for k, v in d.items()} results = [] - def process_inputs(inputs, index=None, input_is_list=False): + async def process_inputs(inputs, index=None, input_is_list=False): if allow_interrupt: nodes.before_node_execution() execution_block = None @@ -194,20 +212,37 @@ def _map_node_over_list(obj, input_data_all, func, allow_interrupt=False, execut if execution_block is None: if pre_execute_cb is not None and index is not None: pre_execute_cb(index) - results.append(getattr(obj, func)(**inputs)) + f = getattr(obj, func) + if inspect.iscoroutinefunction(f): + async def async_wrapper(f, prompt_id, unique_id, list_index, args): + with CurrentNodeContext(prompt_id, unique_id, list_index): + return await f(**args) + task = asyncio.create_task(async_wrapper(f, prompt_id, unique_id, index, args=inputs)) + # Give the task a chance to execute without yielding + await asyncio.sleep(0) + if task.done(): + result = task.result() + results.append(result) + else: + results.append(task) + else: + with CurrentNodeContext(prompt_id, unique_id, index): + result = f(**inputs) + results.append(result) else: results.append(execution_block) if input_is_list: - process_inputs(input_data_all, 0, input_is_list=input_is_list) + await process_inputs(input_data_all, 0, input_is_list=input_is_list) elif max_len_input == 0: - process_inputs({}) + await process_inputs({}) else: for i in range(max_len_input): input_dict = slice_dict(input_data_all, i) - process_inputs(input_dict, i) + await process_inputs(input_dict, i) return results + def merge_result_data(results, obj): # check which outputs need concatenating output = [] @@ -229,11 +264,18 @@ def merge_result_data(results, obj): output.append([o[i] for o in results]) return output -def get_output_data(obj, input_data_all, execution_block_cb=None, pre_execute_cb=None): +async def get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=None, pre_execute_cb=None): + return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) + has_pending_task = any(isinstance(r, asyncio.Task) and not r.done() for r in return_values) + if has_pending_task: + return return_values, {}, False, has_pending_task + output, ui, has_subgraph = get_output_from_returns(return_values, obj) + return output, ui, has_subgraph, False + +def get_output_from_returns(return_values, obj): results = [] uis = [] subgraph_results = [] - return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) has_subgraph = False for i in range(len(return_values)): r = return_values[i] @@ -267,6 +309,10 @@ def get_output_data(obj, input_data_all, execution_block_cb=None, pre_execute_cb else: output = [] ui = dict() + # TODO: Think there's an existing bug here + # If we're performing a subgraph expansion, we probably shouldn't be returning UI values yet. + # They'll get cached without the completed subgraphs. It's an edge case and I'm not aware of + # any nodes that use both subgraph expansion and custom UI outputs, but might be a problem in the future. if len(uis) > 0: ui = {k: [y for x in uis for y in x[k]] for k in uis[0].keys()} return output, ui, has_subgraph @@ -279,7 +325,7 @@ def format_value(x): else: return str(x) -def execute(server, dynprompt, caches, current_item, extra_data, executed, prompt_id, execution_list, pending_subgraph_results): +async def execute(server, dynprompt, caches, current_item, extra_data, executed, prompt_id, execution_list, pending_subgraph_results, pending_async_nodes): unique_id = current_item real_node_id = dynprompt.get_real_node_id(unique_id) display_node_id = dynprompt.get_display_node_id(unique_id) @@ -291,11 +337,26 @@ def execute(server, dynprompt, caches, current_item, extra_data, executed, promp if server.client_id is not None: cached_output = caches.ui.get(unique_id) or {} server.send_sync("executed", { "node": unique_id, "display_node": display_node_id, "output": cached_output.get("output",None), "prompt_id": prompt_id }, server.client_id) + get_progress_state().finish_progress(unique_id) return (ExecutionResult.SUCCESS, None, None) input_data_all = None try: - if unique_id in pending_subgraph_results: + if unique_id in pending_async_nodes: + results = [] + for r in pending_async_nodes[unique_id]: + if isinstance(r, asyncio.Task): + try: + results.append(r.result()) + except Exception as ex: + # An async task failed - propagate the exception up + del pending_async_nodes[unique_id] + raise ex + else: + results.append(r) + del pending_async_nodes[unique_id] + output_data, output_ui, has_subgraph = get_output_from_returns(results, class_def) + elif unique_id in pending_subgraph_results: cached_results = pending_subgraph_results[unique_id] resolved_outputs = [] for is_subgraph, result in cached_results: @@ -317,6 +378,7 @@ def execute(server, dynprompt, caches, current_item, extra_data, executed, promp output_ui = [] has_subgraph = False else: + get_progress_state().start_progress(unique_id) input_data_all, missing_keys = get_input_data(inputs, class_def, unique_id, caches.outputs, dynprompt, extra_data) if server.client_id is not None: server.last_node_id = display_node_id @@ -328,7 +390,8 @@ def execute(server, dynprompt, caches, current_item, extra_data, executed, promp caches.objects.set(unique_id, obj) if hasattr(obj, "check_lazy_status"): - required_inputs = _map_node_over_list(obj, input_data_all, "check_lazy_status", allow_interrupt=True) + required_inputs = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, "check_lazy_status", allow_interrupt=True) + required_inputs = await resolve_map_node_over_list_results(required_inputs) required_inputs = set(sum([r for r in required_inputs if isinstance(r,list)], [])) required_inputs = [x for x in required_inputs if isinstance(x,str) and ( x not in input_data_all or x in missing_keys @@ -357,8 +420,18 @@ def execute(server, dynprompt, caches, current_item, extra_data, executed, promp else: return block def pre_execute_cb(call_index): + # TODO - How to handle this with async functions without contextvars (which requires Python 3.12)? GraphBuilder.set_default_prefix(unique_id, call_index, 0) - output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) + output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) + if has_pending_tasks: + pending_async_nodes[unique_id] = output_data + unblock = execution_list.add_external_block(unique_id) + async def await_completion(): + tasks = [x for x in output_data if isinstance(x, asyncio.Task)] + await asyncio.gather(*tasks, return_exceptions=True) + unblock() + asyncio.create_task(await_completion()) + return (ExecutionResult.PENDING, None, None) if len(output_ui) > 0: caches.ui.set(unique_id, { "meta": { @@ -401,7 +474,8 @@ def execute(server, dynprompt, caches, current_item, extra_data, executed, promp cached_outputs.append((True, node_outputs)) new_node_ids = set(new_node_ids) for cache in caches.all: - cache.ensure_subcache_for(unique_id, new_node_ids).clean_unused() + subcache = await cache.ensure_subcache_for(unique_id, new_node_ids) + subcache.clean_unused() for node_id in new_output_ids: execution_list.add_node(node_id) for link in new_output_links: @@ -446,6 +520,7 @@ def execute(server, dynprompt, caches, current_item, extra_data, executed, promp return (ExecutionResult.FAILURE, error_details, ex) + get_progress_state().finish_progress(unique_id) executed.add(unique_id) return (ExecutionResult.SUCCESS, None, None) @@ -500,6 +575,11 @@ class PromptExecutor: self.add_message("execution_error", mes, broadcast=False) def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): + asyncio_loop = asyncio.new_event_loop() + asyncio.set_event_loop(asyncio_loop) + asyncio.run(self.execute_async(prompt, prompt_id, extra_data, execute_outputs)) + + async def execute_async(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): nodes.interrupt_processing(False) if "client_id" in extra_data: @@ -512,9 +592,11 @@ class PromptExecutor: with torch.inference_mode(): dynamic_prompt = DynamicPrompt(prompt) - is_changed_cache = IsChangedCache(dynamic_prompt, self.caches.outputs) + reset_progress_state(prompt_id, dynamic_prompt) + add_progress_handler(WebUIProgressHandler(self.server)) + is_changed_cache = IsChangedCache(prompt_id, dynamic_prompt, self.caches.outputs) for cache in self.caches.all: - cache.set_prompt(dynamic_prompt, prompt.keys(), is_changed_cache) + await cache.set_prompt(dynamic_prompt, prompt.keys(), is_changed_cache) cache.clean_unused() cached_nodes = [] @@ -527,6 +609,7 @@ class PromptExecutor: { "nodes": cached_nodes, "prompt_id": prompt_id}, broadcast=False) pending_subgraph_results = {} + pending_async_nodes = {} # TODO - Unify this with pending_subgraph_results executed = set() execution_list = ExecutionList(dynamic_prompt, self.caches.outputs) current_outputs = self.caches.outputs.all_node_ids() @@ -534,12 +617,13 @@ class PromptExecutor: execution_list.add_node(node_id) while not execution_list.is_empty(): - node_id, error, ex = execution_list.stage_node_execution() + node_id, error, ex = await execution_list.stage_node_execution() if error is not None: self.handle_execution_error(prompt_id, dynamic_prompt.original_prompt, current_outputs, executed, error, ex) break - result, error, ex = execute(self.server, dynamic_prompt, self.caches, node_id, extra_data, executed, prompt_id, execution_list, pending_subgraph_results) + assert node_id is not None, "Node ID should not be None at this point" + result, error, ex = await execute(self.server, dynamic_prompt, self.caches, node_id, extra_data, executed, prompt_id, execution_list, pending_subgraph_results, pending_async_nodes) self.success = result != ExecutionResult.FAILURE if result == ExecutionResult.FAILURE: self.handle_execution_error(prompt_id, dynamic_prompt.original_prompt, current_outputs, executed, error, ex) @@ -569,7 +653,7 @@ class PromptExecutor: comfy.model_management.unload_all_models() -def validate_inputs(prompt, item, validated): +async def validate_inputs(prompt_id, prompt, item, validated): unique_id = item if unique_id in validated: return validated[unique_id] @@ -646,7 +730,7 @@ def validate_inputs(prompt, item, validated): errors.append(error) continue try: - r = validate_inputs(prompt, o_id, validated) + r = await validate_inputs(prompt_id, prompt, o_id, validated) if r[0] is False: # `r` will be set in `validated[o_id]` already valid = False @@ -771,7 +855,8 @@ def validate_inputs(prompt, item, validated): input_filtered['input_types'] = [received_types] #ret = obj_class.VALIDATE_INPUTS(**input_filtered) - ret = _map_node_over_list(obj_class, input_filtered, "VALIDATE_INPUTS") + ret = await _async_map_node_over_list(prompt_id, unique_id, obj_class, input_filtered, "VALIDATE_INPUTS") + ret = await resolve_map_node_over_list_results(ret) for x in input_filtered: for i, r in enumerate(ret): if r is not True and not isinstance(r, ExecutionBlocker): @@ -804,7 +889,7 @@ def full_type_name(klass): return klass.__qualname__ return module + '.' + klass.__qualname__ -def validate_prompt(prompt): +async def validate_prompt(prompt_id, prompt): outputs = set() for x in prompt: if 'class_type' not in prompt[x]: @@ -847,7 +932,7 @@ def validate_prompt(prompt): valid = False reasons = [] try: - m = validate_inputs(prompt, o, validated) + m = await validate_inputs(prompt_id, prompt, o, validated) valid = m[0] reasons = m[1] except Exception as ex: diff --git a/main.py b/main.py index 0b1987ef4..2b4ffafd4 100644 --- a/main.py +++ b/main.py @@ -11,6 +11,9 @@ import itertools import utils.extra_config import logging import sys +from comfy_execution.progress import get_progress_state +from comfy_execution.utils import get_executing_context +from comfy_api import feature_flags if __name__ == "__main__": #NOTE: These do not do anything on core ComfyUI, they are for custom nodes. @@ -134,7 +137,7 @@ import comfy.utils import execution import server -from server import BinaryEventTypes +from protocol import BinaryEventTypes import nodes import comfy.model_management import comfyui_version @@ -230,15 +233,34 @@ async def run(server_instance, address='', port=8188, verbose=True, call_on_star server_instance.start_multi_address(addresses, call_on_start, verbose), server_instance.publish_loop() ) - def hijack_progress(server_instance): - def hook(value, total, preview_image): + def hook(value, total, preview_image, prompt_id=None, node_id=None): + executing_context = get_executing_context() + if prompt_id is None and executing_context is not None: + prompt_id = executing_context.prompt_id + if node_id is None and executing_context is not None: + node_id = executing_context.node_id comfy.model_management.throw_exception_if_processing_interrupted() - progress = {"value": value, "max": total, "prompt_id": server_instance.last_prompt_id, "node": server_instance.last_node_id} + if prompt_id is None: + prompt_id = server_instance.last_prompt_id + if node_id is None: + node_id = server_instance.last_node_id + progress = {"value": value, "max": total, "prompt_id": prompt_id, "node": node_id} + get_progress_state().update_progress(node_id, value, total, preview_image) server_instance.send_sync("progress", progress, server_instance.client_id) if preview_image is not None: - server_instance.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server_instance.client_id) + # Only send old method if client doesn't support preview metadata + if not feature_flags.supports_feature( + server_instance.sockets_metadata, + server_instance.client_id, + "supports_preview_metadata", + ): + server_instance.send_sync( + BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, + preview_image, + server_instance.client_id, + ) comfy.utils.set_progress_bar_global_hook(hook) diff --git a/protocol.py b/protocol.py new file mode 100644 index 000000000..038a0a840 --- /dev/null +++ b/protocol.py @@ -0,0 +1,7 @@ + +class BinaryEventTypes: + PREVIEW_IMAGE = 1 + UNENCODED_PREVIEW_IMAGE = 2 + TEXT = 3 + PREVIEW_IMAGE_WITH_METADATA = 4 + diff --git a/server.py b/server.py index 878b5eeb1..e8bad9f4e 100644 --- a/server.py +++ b/server.py @@ -26,6 +26,7 @@ import mimetypes from comfy.cli_args import args import comfy.utils import comfy.model_management +from comfy_api import feature_flags import node_helpers from comfyui_version import __version__ from app.frontend_management import FrontendManager @@ -35,11 +36,7 @@ from app.model_manager import ModelFileManager from app.custom_node_manager import CustomNodeManager from typing import Optional, Union from api_server.routes.internal.internal_routes import InternalRoutes - -class BinaryEventTypes: - PREVIEW_IMAGE = 1 - UNENCODED_PREVIEW_IMAGE = 2 - TEXT = 3 +from protocol import BinaryEventTypes async def send_socket_catch_exception(function, message): try: @@ -178,6 +175,7 @@ class PromptServer(): max_upload_size = round(args.max_upload_size * 1024 * 1024) self.app = web.Application(client_max_size=max_upload_size, middlewares=middlewares) self.sockets = dict() + self.sockets_metadata = dict() self.web_root = ( FrontendManager.init_frontend(args.front_end_version) if args.front_end_root is None @@ -202,20 +200,53 @@ class PromptServer(): else: sid = uuid.uuid4().hex + # Store WebSocket for backward compatibility self.sockets[sid] = ws + # Store metadata separately + self.sockets_metadata[sid] = {"feature_flags": {}} try: # Send initial state to the new client - await self.send("status", { "status": self.get_queue_info(), 'sid': sid }, sid) + await self.send("status", {"status": self.get_queue_info(), "sid": sid}, sid) # On reconnect if we are the currently executing client send the current node if self.client_id == sid and self.last_node_id is not None: await self.send("executing", { "node": self.last_node_id }, sid) + # Flag to track if we've received the first message + first_message = True + async for msg in ws: if msg.type == aiohttp.WSMsgType.ERROR: logging.warning('ws connection closed with exception %s' % ws.exception()) + elif msg.type == aiohttp.WSMsgType.TEXT: + try: + data = json.loads(msg.data) + # Check if first message is feature flags + if first_message and data.get("type") == "feature_flags": + # Store client feature flags + client_flags = data.get("data", {}) + self.sockets_metadata[sid]["feature_flags"] = client_flags + + # Send server feature flags in response + await self.send( + "feature_flags", + feature_flags.get_server_features(), + sid, + ) + + logging.info( + f"Feature flags negotiated for client {sid}: {client_flags}" + ) + first_message = False + except json.JSONDecodeError: + logging.warning( + f"Invalid JSON received from client {sid}: {msg.data}" + ) + except Exception as e: + logging.error(f"Error processing WebSocket message: {e}") finally: self.sockets.pop(sid, None) + self.sockets_metadata.pop(sid, None) return ws @routes.get("/") @@ -548,6 +579,10 @@ class PromptServer(): } return web.json_response(system_stats) + @routes.get("/features") + async def get_features(request): + return web.json_response(feature_flags.get_server_features()) + @routes.get("/prompt") async def get_prompt(request): return web.json_response(self.get_queue_info()) @@ -643,7 +678,8 @@ class PromptServer(): if "prompt" in json_data: prompt = json_data["prompt"] - valid = execution.validate_prompt(prompt) + prompt_id = str(uuid.uuid4()) + valid = await execution.validate_prompt(prompt_id, prompt) extra_data = {} if "extra_data" in json_data: extra_data = json_data["extra_data"] @@ -651,7 +687,6 @@ class PromptServer(): if "client_id" in json_data: extra_data["client_id"] = json_data["client_id"] if valid[0]: - prompt_id = str(uuid.uuid4()) outputs_to_execute = valid[2] self.prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute)) response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]} @@ -766,6 +801,10 @@ class PromptServer(): async def send(self, event, data, sid=None): if event == BinaryEventTypes.UNENCODED_PREVIEW_IMAGE: await self.send_image(data, sid=sid) + elif event == BinaryEventTypes.PREVIEW_IMAGE_WITH_METADATA: + # data is (preview_image, metadata) + preview_image, metadata = data + await self.send_image_with_metadata(preview_image, metadata, sid=sid) elif isinstance(data, (bytes, bytearray)): await self.send_bytes(event, data, sid) else: @@ -804,6 +843,43 @@ class PromptServer(): preview_bytes = bytesIO.getvalue() await self.send_bytes(BinaryEventTypes.PREVIEW_IMAGE, preview_bytes, sid=sid) + async def send_image_with_metadata(self, image_data, metadata=None, sid=None): + image_type = image_data[0] + image = image_data[1] + max_size = image_data[2] + if max_size is not None: + if hasattr(Image, 'Resampling'): + resampling = Image.Resampling.BILINEAR + else: + resampling = Image.Resampling.LANCZOS + + image = ImageOps.contain(image, (max_size, max_size), resampling) + + mimetype = "image/png" if image_type == "PNG" else "image/jpeg" + + # Prepare metadata + if metadata is None: + metadata = {} + metadata["image_type"] = mimetype + + # Serialize metadata as JSON + import json + metadata_json = json.dumps(metadata).encode('utf-8') + metadata_length = len(metadata_json) + + # Prepare image data + bytesIO = BytesIO() + image.save(bytesIO, format=image_type, quality=95, compress_level=1) + image_bytes = bytesIO.getvalue() + + # Combine metadata and image + combined_data = bytearray() + combined_data.extend(struct.pack(">I", metadata_length)) + combined_data.extend(metadata_json) + combined_data.extend(image_bytes) + + await self.send_bytes(BinaryEventTypes.PREVIEW_IMAGE_WITH_METADATA, combined_data, sid=sid) + async def send_bytes(self, event, data, sid=None): message = self.encode_bytes(event, data) @@ -845,10 +921,10 @@ class PromptServer(): ssl_ctx = None scheme = "http" if args.tls_keyfile and args.tls_certfile: - ssl_ctx = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_SERVER, verify_mode=ssl.CERT_NONE) - ssl_ctx.load_cert_chain(certfile=args.tls_certfile, + ssl_ctx = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_SERVER, verify_mode=ssl.CERT_NONE) + ssl_ctx.load_cert_chain(certfile=args.tls_certfile, keyfile=args.tls_keyfile) - scheme = "https" + scheme = "https" if verbose: logging.info("Starting server\n") diff --git a/tests-unit/feature_flags_test.py b/tests-unit/feature_flags_test.py new file mode 100644 index 000000000..f2702cfc8 --- /dev/null +++ b/tests-unit/feature_flags_test.py @@ -0,0 +1,98 @@ +"""Tests for feature flags functionality.""" + +from comfy_api.feature_flags import ( + get_connection_feature, + supports_feature, + get_server_features, + SERVER_FEATURE_FLAGS, +) + + +class TestFeatureFlags: + """Test suite for feature flags functions.""" + + def test_get_server_features_returns_copy(self): + """Test that get_server_features returns a copy of the server flags.""" + features = get_server_features() + # Verify it's a copy by modifying it + features["test_flag"] = True + # Original should be unchanged + assert "test_flag" not in SERVER_FEATURE_FLAGS + + def test_get_server_features_contains_expected_flags(self): + """Test that server features contain expected flags.""" + features = get_server_features() + assert "supports_preview_metadata" in features + assert features["supports_preview_metadata"] is True + assert "max_upload_size" in features + assert isinstance(features["max_upload_size"], (int, float)) + + def test_get_connection_feature_with_missing_sid(self): + """Test getting feature for non-existent session ID.""" + sockets_metadata = {} + result = get_connection_feature(sockets_metadata, "missing_sid", "some_feature") + assert result is False # Default value + + def test_get_connection_feature_with_custom_default(self): + """Test getting feature with custom default value.""" + sockets_metadata = {} + result = get_connection_feature( + sockets_metadata, "missing_sid", "some_feature", default="custom_default" + ) + assert result == "custom_default" + + def test_get_connection_feature_with_feature_flags(self): + """Test getting feature from connection with feature flags.""" + sockets_metadata = { + "sid1": { + "feature_flags": { + "supports_preview_metadata": True, + "custom_feature": "value", + }, + } + } + result = get_connection_feature(sockets_metadata, "sid1", "supports_preview_metadata") + assert result is True + + result = get_connection_feature(sockets_metadata, "sid1", "custom_feature") + assert result == "value" + + def test_get_connection_feature_missing_feature(self): + """Test getting non-existent feature from connection.""" + sockets_metadata = { + "sid1": {"feature_flags": {"existing_feature": True}} + } + result = get_connection_feature(sockets_metadata, "sid1", "missing_feature") + assert result is False + + def test_supports_feature_returns_boolean(self): + """Test that supports_feature always returns boolean.""" + sockets_metadata = { + "sid1": { + "feature_flags": { + "bool_feature": True, + "string_feature": "value", + "none_feature": None, + }, + } + } + + # True boolean feature + assert supports_feature(sockets_metadata, "sid1", "bool_feature") is True + + # Non-boolean values should return False + assert supports_feature(sockets_metadata, "sid1", "string_feature") is False + assert supports_feature(sockets_metadata, "sid1", "none_feature") is False + assert supports_feature(sockets_metadata, "sid1", "missing_feature") is False + + def test_supports_feature_with_missing_connection(self): + """Test supports_feature with missing connection.""" + sockets_metadata = {} + assert supports_feature(sockets_metadata, "missing_sid", "any_feature") is False + + def test_empty_feature_flags_dict(self): + """Test connection with empty feature flags dictionary.""" + sockets_metadata = {"sid1": {"feature_flags": {}}} + result = get_connection_feature(sockets_metadata, "sid1", "any_feature") + assert result is False + assert supports_feature(sockets_metadata, "sid1", "any_feature") is False diff --git a/tests-unit/requirements.txt b/tests-unit/requirements.txt index d70d00f4b..3a6790ee0 100644 --- a/tests-unit/requirements.txt +++ b/tests-unit/requirements.txt @@ -1,3 +1,4 @@ pytest>=7.8.0 pytest-aiohttp pytest-asyncio +websocket-client diff --git a/tests-unit/websocket_feature_flags_test.py b/tests-unit/websocket_feature_flags_test.py new file mode 100644 index 000000000..e93b2e1dd --- /dev/null +++ b/tests-unit/websocket_feature_flags_test.py @@ -0,0 +1,77 @@ +"""Simplified tests for WebSocket feature flags functionality.""" +from comfy_api import feature_flags + + +class TestWebSocketFeatureFlags: + """Test suite for WebSocket feature flags integration.""" + + def test_server_feature_flags_response(self): + """Test server feature flags are properly formatted.""" + features = feature_flags.get_server_features() + + # Check expected server features + assert "supports_preview_metadata" in features + assert features["supports_preview_metadata"] is True + assert "max_upload_size" in features + assert isinstance(features["max_upload_size"], (int, float)) + + def test_progress_py_checks_feature_flags(self): + """Test that progress.py checks feature flags before sending metadata.""" + # This simulates the check in progress.py + client_id = "test_client" + sockets_metadata = {"test_client": {"feature_flags": {}}} + + # The actual check would be in progress.py + supports_metadata = feature_flags.supports_feature( + sockets_metadata, client_id, "supports_preview_metadata" + ) + + assert supports_metadata is False + + def test_multiple_clients_different_features(self): + """Test handling multiple clients with different feature support.""" + sockets_metadata = { + "modern_client": { + "feature_flags": {"supports_preview_metadata": True} + }, + "legacy_client": { + "feature_flags": {} + } + } + + # Check modern client + assert feature_flags.supports_feature( + sockets_metadata, "modern_client", "supports_preview_metadata" + ) is True + + # Check legacy client + assert feature_flags.supports_feature( + sockets_metadata, "legacy_client", "supports_preview_metadata" + ) is False + + def test_feature_negotiation_message_format(self): + """Test the format of feature negotiation messages.""" + # Client message format + client_message = { + "type": "feature_flags", + "data": { + "supports_preview_metadata": True, + "api_version": "1.0.0" + } + } + + # Verify structure + assert client_message["type"] == "feature_flags" + assert "supports_preview_metadata" in client_message["data"] + + # Server response format (what would be sent) + server_features = feature_flags.get_server_features() + server_message = { + "type": "feature_flags", + "data": server_features + } + + # Verify structure + assert server_message["type"] == "feature_flags" + assert "supports_preview_metadata" in server_message["data"] + assert server_message["data"]["supports_preview_metadata"] is True diff --git a/tests/inference/extra_model_paths.yaml b/tests/inference/extra_model_paths.yaml index 75b2e1ae4..68e056564 100644 --- a/tests/inference/extra_model_paths.yaml +++ b/tests/inference/extra_model_paths.yaml @@ -1,4 +1,4 @@ # Config for testing nodes testing: - custom_nodes: tests/inference/testing_nodes + custom_nodes: testing_nodes diff --git a/tests/inference/test_async_nodes.py b/tests/inference/test_async_nodes.py new file mode 100644 index 000000000..b243bbca9 --- /dev/null +++ b/tests/inference/test_async_nodes.py @@ -0,0 +1,410 @@ +import pytest +import time +import torch +import urllib.error +import numpy as np +import subprocess + +from pytest import fixture +from comfy_execution.graph_utils import GraphBuilder +from tests.inference.test_execution import ComfyClient + + +@pytest.mark.execution +class TestAsyncNodes: + @fixture(scope="class", autouse=True, params=[ + (False, 0), + (True, 0), + (True, 100), + ]) + def _server(self, args_pytest, request): + pargs = [ + 'python','main.py', + '--output-directory', args_pytest["output_dir"], + '--listen', args_pytest["listen"], + '--port', str(args_pytest["port"]), + '--extra-model-paths-config', 'tests/inference/extra_model_paths.yaml', + ] + use_lru, lru_size = request.param + if use_lru: + pargs += ['--cache-lru', str(lru_size)] + # Running server with args: pargs + p = subprocess.Popen(pargs) + yield + p.kill() + torch.cuda.empty_cache() + + @fixture(scope="class", autouse=True) + def shared_client(self, args_pytest, _server): + client = ComfyClient() + n_tries = 5 + for i in range(n_tries): + time.sleep(4) + try: + client.connect(listen=args_pytest["listen"], port=args_pytest["port"]) + except ConnectionRefusedError: + # Retrying... + pass + else: + break + yield client + del client + torch.cuda.empty_cache() + + @fixture + def client(self, shared_client, request): + shared_client.set_test_name(f"async_nodes[{request.node.name}]") + yield shared_client + + @fixture + def builder(self, request): + yield GraphBuilder(prefix=request.node.name) + + # Happy Path Tests + + def test_basic_async_execution(self, client: ComfyClient, builder: GraphBuilder): + """Test that a basic async node executes correctly.""" + g = builder + image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + sleep_node = g.node("TestSleep", value=image.out(0), seconds=0.1) + output = g.node("SaveImage", images=sleep_node.out(0)) + + result = client.run(g) + + # Verify execution completed + assert result.did_run(sleep_node), "Async sleep node should have executed" + assert result.did_run(output), "Output node should have executed" + + # Verify the image passed through correctly + result_images = result.get_images(output) + assert len(result_images) == 1, "Should have 1 image" + assert np.array(result_images[0]).min() == 0 and np.array(result_images[0]).max() == 0, "Image should be black" + + def test_multiple_async_parallel_execution(self, client: ComfyClient, builder: GraphBuilder): + """Test that multiple async nodes execute in parallel.""" + g = builder + image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + + # Create multiple async sleep nodes with different durations + sleep1 = g.node("TestSleep", value=image.out(0), seconds=0.3) + sleep2 = g.node("TestSleep", value=image.out(0), seconds=0.4) + sleep3 = g.node("TestSleep", value=image.out(0), seconds=0.5) + + # Add outputs for each + _output1 = g.node("PreviewImage", images=sleep1.out(0)) + _output2 = g.node("PreviewImage", images=sleep2.out(0)) + _output3 = g.node("PreviewImage", images=sleep3.out(0)) + + start_time = time.time() + result = client.run(g) + elapsed_time = time.time() - start_time + + # Should take ~0.5s (max duration) not 1.2s (sum of durations) + assert elapsed_time < 0.8, f"Parallel execution took {elapsed_time}s, expected < 0.8s" + + # Verify all nodes executed + assert result.did_run(sleep1) and result.did_run(sleep2) and result.did_run(sleep3) + + def test_async_with_dependencies(self, client: ComfyClient, builder: GraphBuilder): + """Test async nodes with proper dependency handling.""" + g = builder + image1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + image2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) + + # Chain of async operations + sleep1 = g.node("TestSleep", value=image1.out(0), seconds=0.2) + sleep2 = g.node("TestSleep", value=image2.out(0), seconds=0.2) + + # Average depends on both async results + average = g.node("TestVariadicAverage", input1=sleep1.out(0), input2=sleep2.out(0)) + output = g.node("SaveImage", images=average.out(0)) + + result = client.run(g) + + # Verify execution order + assert result.did_run(sleep1) and result.did_run(sleep2) + assert result.did_run(average) and result.did_run(output) + + # Verify averaged result + result_images = result.get_images(output) + avg_value = np.array(result_images[0]).mean() + assert abs(avg_value - 127.5) < 1, f"Average value {avg_value} should be ~127.5" + + def test_async_validate_inputs(self, client: ComfyClient, builder: GraphBuilder): + """Test async VALIDATE_INPUTS function.""" + g = builder + # Create a test node with async validation + validation_node = g.node("TestAsyncValidation", value=5.0, threshold=10.0) + g.node("SaveImage", images=validation_node.out(0)) + + # Should pass validation + result = client.run(g) + assert result.did_run(validation_node) + + # Test validation failure + validation_node.inputs['threshold'] = 3.0 # Will fail since value > threshold + with pytest.raises(urllib.error.HTTPError): + client.run(g) + + def test_async_lazy_evaluation(self, client: ComfyClient, builder: GraphBuilder): + """Test async nodes with lazy evaluation.""" + g = builder + input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) + mask = g.node("StubMask", value=0.0, height=512, width=512, batch_size=1) + + # Create async nodes that will be evaluated lazily + sleep1 = g.node("TestSleep", value=input1.out(0), seconds=0.3) + sleep2 = g.node("TestSleep", value=input2.out(0), seconds=0.3) + + # Use lazy mix that only needs sleep1 (mask=0.0) + lazy_mix = g.node("TestLazyMixImages", image1=sleep1.out(0), image2=sleep2.out(0), mask=mask.out(0)) + g.node("SaveImage", images=lazy_mix.out(0)) + + start_time = time.time() + result = client.run(g) + elapsed_time = time.time() - start_time + + # Should only execute sleep1, not sleep2 + assert elapsed_time < 0.5, f"Should skip sleep2, took {elapsed_time}s" + assert result.did_run(sleep1), "Sleep1 should have executed" + assert not result.did_run(sleep2), "Sleep2 should have been skipped" + + def test_async_check_lazy_status(self, client: ComfyClient, builder: GraphBuilder): + """Test async check_lazy_status function.""" + g = builder + # Create a node with async check_lazy_status + lazy_node = g.node("TestAsyncLazyCheck", + input1="value1", + input2="value2", + condition=True) + g.node("SaveImage", images=lazy_node.out(0)) + + result = client.run(g) + assert result.did_run(lazy_node) + + # Error Handling Tests + + def test_async_execution_error(self, client: ComfyClient, builder: GraphBuilder): + """Test that async execution errors are properly handled.""" + g = builder + image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + # Create an async node that will error + error_node = g.node("TestAsyncError", value=image.out(0), error_after=0.1) + g.node("SaveImage", images=error_node.out(0)) + + try: + client.run(g) + assert False, "Should have raised an error" + except Exception as e: + assert 'prompt_id' in e.args[0], f"Did not get proper error message: {e}" + assert e.args[0]['node_id'] == error_node.id, "Error should be from async error node" + + def test_async_validation_error(self, client: ComfyClient, builder: GraphBuilder): + """Test async validation error handling.""" + g = builder + # Node with async validation that will fail + validation_node = g.node("TestAsyncValidationError", value=15.0, max_value=10.0) + g.node("SaveImage", images=validation_node.out(0)) + + with pytest.raises(urllib.error.HTTPError) as exc_info: + client.run(g) + # Verify it's a validation error + assert exc_info.value.code == 400 + + def test_async_timeout_handling(self, client: ComfyClient, builder: GraphBuilder): + """Test handling of async operations that timeout.""" + g = builder + image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + # Very long sleep that would timeout + timeout_node = g.node("TestAsyncTimeout", value=image.out(0), timeout=0.5, operation_time=2.0) + g.node("SaveImage", images=timeout_node.out(0)) + + try: + client.run(g) + assert False, "Should have raised a timeout error" + except Exception as e: + assert 'timeout' in str(e).lower(), f"Expected timeout error, got: {e}" + + def test_concurrent_async_error_recovery(self, client: ComfyClient, builder: GraphBuilder): + """Test that workflow can recover after async errors.""" + g = builder + image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + + # First run with error + error_node = g.node("TestAsyncError", value=image.out(0), error_after=0.1) + g.node("SaveImage", images=error_node.out(0)) + + try: + client.run(g) + except Exception: + pass # Expected + + # Second run should succeed + g2 = GraphBuilder(prefix="recovery_test") + image2 = g2.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) + sleep_node = g2.node("TestSleep", value=image2.out(0), seconds=0.1) + g2.node("SaveImage", images=sleep_node.out(0)) + + result = client.run(g2) + assert result.did_run(sleep_node), "Should be able to run after error" + + def test_sync_error_during_async_execution(self, client: ComfyClient, builder: GraphBuilder): + """Test handling when sync node errors while async node is executing.""" + g = builder + image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + + # Async node that takes time + sleep_node = g.node("TestSleep", value=image.out(0), seconds=0.5) + + # Sync node that will error immediately + error_node = g.node("TestSyncError", value=image.out(0)) + + # Both feed into output + g.node("PreviewImage", images=sleep_node.out(0)) + g.node("PreviewImage", images=error_node.out(0)) + + try: + client.run(g) + assert False, "Should have raised an error" + except Exception as e: + # Verify the sync error was caught even though async was running + assert 'prompt_id' in e.args[0] + + # Edge Cases + + def test_async_with_execution_blocker(self, client: ComfyClient, builder: GraphBuilder): + """Test async nodes with execution blockers.""" + g = builder + image1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + image2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) + + # Async sleep nodes + sleep1 = g.node("TestSleep", value=image1.out(0), seconds=0.2) + sleep2 = g.node("TestSleep", value=image2.out(0), seconds=0.2) + + # Create list of images + image_list = g.node("TestMakeListNode", value1=sleep1.out(0), value2=sleep2.out(0)) + + # Create list of blocking conditions - [False, True] to block only the second item + int1 = g.node("StubInt", value=1) + int2 = g.node("StubInt", value=2) + block_list = g.node("TestMakeListNode", value1=int1.out(0), value2=int2.out(0)) + + # Compare each value against 2, so first is False (1 != 2) and second is True (2 == 2) + compare = g.node("TestIntConditions", a=block_list.out(0), b=2, operation="==") + + # Block based on the comparison results + blocker = g.node("TestExecutionBlocker", input=image_list.out(0), block=compare.out(0), verbose=False) + + output = g.node("PreviewImage", images=blocker.out(0)) + + result = client.run(g) + images = result.get_images(output) + assert len(images) == 1, "Should have blocked second image" + + def test_async_caching_behavior(self, client: ComfyClient, builder: GraphBuilder): + """Test that async nodes are properly cached.""" + g = builder + image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + sleep_node = g.node("TestSleep", value=image.out(0), seconds=0.2) + g.node("SaveImage", images=sleep_node.out(0)) + + # First run + result1 = client.run(g) + assert result1.did_run(sleep_node), "Should run first time" + + # Second run - should be cached + start_time = time.time() + result2 = client.run(g) + elapsed_time = time.time() - start_time + + assert not result2.did_run(sleep_node), "Should be cached" + assert elapsed_time < 0.1, f"Cached run took {elapsed_time}s, should be instant" + + def test_async_with_dynamic_prompts(self, client: ComfyClient, builder: GraphBuilder): + """Test async nodes within dynamically generated prompts.""" + g = builder + image1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + image2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) + + # Node that generates async nodes dynamically + dynamic_async = g.node("TestDynamicAsyncGeneration", + image1=image1.out(0), + image2=image2.out(0), + num_async_nodes=3, + sleep_duration=0.2) + g.node("SaveImage", images=dynamic_async.out(0)) + + start_time = time.time() + result = client.run(g) + elapsed_time = time.time() - start_time + + # Should execute async nodes in parallel within dynamic prompt + assert elapsed_time < 0.5, f"Dynamic async execution took {elapsed_time}s" + assert result.did_run(dynamic_async) + + def test_async_resource_cleanup(self, client: ComfyClient, builder: GraphBuilder): + """Test that async resources are properly cleaned up.""" + g = builder + image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + + # Create multiple async nodes that use resources + resource_nodes = [] + for i in range(5): + node = g.node("TestAsyncResourceUser", + value=image.out(0), + resource_id=f"resource_{i}", + duration=0.1) + resource_nodes.append(node) + g.node("PreviewImage", images=node.out(0)) + + result = client.run(g) + + # Verify all nodes executed + for node in resource_nodes: + assert result.did_run(node) + + # Run again to ensure resources were cleaned up + result2 = client.run(g) + # Should be cached but not error due to resource conflicts + for node in resource_nodes: + assert not result2.did_run(node), "Should be cached" + + def test_async_cancellation(self, client: ComfyClient, builder: GraphBuilder): + """Test cancellation of async operations.""" + # This would require implementing cancellation in the client + # For now, we'll test that long-running async operations can be interrupted + pass # TODO: Implement when cancellation API is available + + def test_mixed_sync_async_execution(self, client: ComfyClient, builder: GraphBuilder): + """Test workflows with both sync and async nodes.""" + g = builder + image1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + image2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) + mask = g.node("StubMask", value=0.5, height=512, width=512, batch_size=1) + + # Mix of sync and async operations + # Sync: lazy mix images + sync_op1 = g.node("TestLazyMixImages", image1=image1.out(0), image2=image2.out(0), mask=mask.out(0)) + # Async: sleep + async_op1 = g.node("TestSleep", value=sync_op1.out(0), seconds=0.2) + # Sync: custom validation + sync_op2 = g.node("TestCustomValidation1", input1=async_op1.out(0), input2=0.5) + # Async: sleep again + async_op2 = g.node("TestSleep", value=sync_op2.out(0), seconds=0.2) + + output = g.node("SaveImage", images=async_op2.out(0)) + + result = client.run(g) + + # Verify all nodes executed in correct order + assert result.did_run(sync_op1) + assert result.did_run(async_op1) + assert result.did_run(sync_op2) + assert result.did_run(async_op2) + + # Image should be a mix of black and white (gray) + result_images = result.get_images(output) + avg_value = np.array(result_images[0]).mean() + assert abs(avg_value - 63.75) < 5, f"Average value {avg_value} should be ~63.75" diff --git a/tests/inference/test_execution.py b/tests/inference/test_execution.py index 5cda5c1ae..9d3d685cc 100644 --- a/tests/inference/test_execution.py +++ b/tests/inference/test_execution.py @@ -252,7 +252,7 @@ class TestExecution: @pytest.mark.parametrize("test_type, test_value", [ ("StubInt", 5), - ("StubFloat", 5.0) + ("StubMask", 5.0) ]) def test_validation_error_edge1(self, test_type, test_value, client: ComfyClient, builder: GraphBuilder): g = builder @@ -497,6 +497,69 @@ class TestExecution: assert numpy.array(images[0]).min() == 63 and numpy.array(images[0]).max() == 63, "Image should have value 0.25" assert not result.did_run(test_node), "The execution should have been cached" + def test_parallel_sleep_nodes(self, client: ComfyClient, builder: GraphBuilder): + g = builder + image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + + # Create sleep nodes for each duration + sleep_node1 = g.node("TestSleep", value=image.out(0), seconds=2.8) + sleep_node2 = g.node("TestSleep", value=image.out(0), seconds=2.9) + sleep_node3 = g.node("TestSleep", value=image.out(0), seconds=3.0) + + # Add outputs to verify the execution + _output1 = g.node("PreviewImage", images=sleep_node1.out(0)) + _output2 = g.node("PreviewImage", images=sleep_node2.out(0)) + _output3 = g.node("PreviewImage", images=sleep_node3.out(0)) + + start_time = time.time() + result = client.run(g) + elapsed_time = time.time() - start_time + + # The test should take around 0.4 seconds (the longest sleep duration) + # plus some overhead, but definitely less than the sum of all sleeps (0.9s) + # We'll allow for up to 0.8s total to account for overhead + assert elapsed_time < 4.0, f"Parallel execution took {elapsed_time}s, expected less than 0.8s" + + # Verify that all nodes executed + assert result.did_run(sleep_node1), "Sleep node 1 should have run" + assert result.did_run(sleep_node2), "Sleep node 2 should have run" + assert result.did_run(sleep_node3), "Sleep node 3 should have run" + + def test_parallel_sleep_expansion(self, client: ComfyClient, builder: GraphBuilder): + g = builder + # Create input images with different values + image1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + image2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) + image3 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) + + # Create a TestParallelSleep node that expands into multiple TestSleep nodes + parallel_sleep = g.node("TestParallelSleep", + image1=image1.out(0), + image2=image2.out(0), + image3=image3.out(0), + sleep1=0.4, + sleep2=0.5, + sleep3=0.6) + output = g.node("SaveImage", images=parallel_sleep.out(0)) + + start_time = time.time() + result = client.run(g) + elapsed_time = time.time() - start_time + + # Similar to the previous test, expect parallel execution of the sleep nodes + # which should complete in less than the sum of all sleeps + assert elapsed_time < 0.8, f"Expansion execution took {elapsed_time}s, expected less than 0.8s" + + # Verify the parallel sleep node executed + assert result.did_run(parallel_sleep), "ParallelSleep node should have run" + + # Verify we get an image as output (blend of the three input images) + result_images = result.get_images(output) + assert len(result_images) == 1, "Should have 1 image" + # Average pixel value should be around 170 (255 * 2 // 3) + avg_value = numpy.array(result_images[0]).mean() + assert avg_value == 170, f"Image average value {avg_value} should be 170" + # This tests that nodes with OUTPUT_IS_LIST function correctly when they receive an ExecutionBlocker # as input. We also test that when that list (containing an ExecutionBlocker) is passed to a node, # only that one entry in the list is blocked. diff --git a/tests/inference/testing_nodes/testing-pack/__init__.py b/tests/inference/testing_nodes/testing-pack/__init__.py index dcc71659a..20f9533c7 100644 --- a/tests/inference/testing_nodes/testing-pack/__init__.py +++ b/tests/inference/testing_nodes/testing-pack/__init__.py @@ -1,23 +1,26 @@ -from .specific_tests import TEST_NODE_CLASS_MAPPINGS, TEST_NODE_DISPLAY_NAME_MAPPINGS -from .flow_control import FLOW_CONTROL_NODE_CLASS_MAPPINGS, FLOW_CONTROL_NODE_DISPLAY_NAME_MAPPINGS -from .util import UTILITY_NODE_CLASS_MAPPINGS, UTILITY_NODE_DISPLAY_NAME_MAPPINGS -from .conditions import CONDITION_NODE_CLASS_MAPPINGS, CONDITION_NODE_DISPLAY_NAME_MAPPINGS -from .stubs import TEST_STUB_NODE_CLASS_MAPPINGS, TEST_STUB_NODE_DISPLAY_NAME_MAPPINGS - -# NODE_CLASS_MAPPINGS = GENERAL_NODE_CLASS_MAPPINGS.update(COMPONENT_NODE_CLASS_MAPPINGS) -# NODE_DISPLAY_NAME_MAPPINGS = GENERAL_NODE_DISPLAY_NAME_MAPPINGS.update(COMPONENT_NODE_DISPLAY_NAME_MAPPINGS) - -NODE_CLASS_MAPPINGS = {} -NODE_CLASS_MAPPINGS.update(TEST_NODE_CLASS_MAPPINGS) -NODE_CLASS_MAPPINGS.update(FLOW_CONTROL_NODE_CLASS_MAPPINGS) -NODE_CLASS_MAPPINGS.update(UTILITY_NODE_CLASS_MAPPINGS) -NODE_CLASS_MAPPINGS.update(CONDITION_NODE_CLASS_MAPPINGS) -NODE_CLASS_MAPPINGS.update(TEST_STUB_NODE_CLASS_MAPPINGS) - -NODE_DISPLAY_NAME_MAPPINGS = {} -NODE_DISPLAY_NAME_MAPPINGS.update(TEST_NODE_DISPLAY_NAME_MAPPINGS) -NODE_DISPLAY_NAME_MAPPINGS.update(FLOW_CONTROL_NODE_DISPLAY_NAME_MAPPINGS) -NODE_DISPLAY_NAME_MAPPINGS.update(UTILITY_NODE_DISPLAY_NAME_MAPPINGS) -NODE_DISPLAY_NAME_MAPPINGS.update(CONDITION_NODE_DISPLAY_NAME_MAPPINGS) -NODE_DISPLAY_NAME_MAPPINGS.update(TEST_STUB_NODE_DISPLAY_NAME_MAPPINGS) - +from .specific_tests import TEST_NODE_CLASS_MAPPINGS, TEST_NODE_DISPLAY_NAME_MAPPINGS +from .flow_control import FLOW_CONTROL_NODE_CLASS_MAPPINGS, FLOW_CONTROL_NODE_DISPLAY_NAME_MAPPINGS +from .util import UTILITY_NODE_CLASS_MAPPINGS, UTILITY_NODE_DISPLAY_NAME_MAPPINGS +from .conditions import CONDITION_NODE_CLASS_MAPPINGS, CONDITION_NODE_DISPLAY_NAME_MAPPINGS +from .stubs import TEST_STUB_NODE_CLASS_MAPPINGS, TEST_STUB_NODE_DISPLAY_NAME_MAPPINGS +from .async_test_nodes import ASYNC_TEST_NODE_CLASS_MAPPINGS, ASYNC_TEST_NODE_DISPLAY_NAME_MAPPINGS + +# NODE_CLASS_MAPPINGS = GENERAL_NODE_CLASS_MAPPINGS.update(COMPONENT_NODE_CLASS_MAPPINGS) +# NODE_DISPLAY_NAME_MAPPINGS = GENERAL_NODE_DISPLAY_NAME_MAPPINGS.update(COMPONENT_NODE_DISPLAY_NAME_MAPPINGS) + +NODE_CLASS_MAPPINGS = {} +NODE_CLASS_MAPPINGS.update(TEST_NODE_CLASS_MAPPINGS) +NODE_CLASS_MAPPINGS.update(FLOW_CONTROL_NODE_CLASS_MAPPINGS) +NODE_CLASS_MAPPINGS.update(UTILITY_NODE_CLASS_MAPPINGS) +NODE_CLASS_MAPPINGS.update(CONDITION_NODE_CLASS_MAPPINGS) +NODE_CLASS_MAPPINGS.update(TEST_STUB_NODE_CLASS_MAPPINGS) +NODE_CLASS_MAPPINGS.update(ASYNC_TEST_NODE_CLASS_MAPPINGS) + +NODE_DISPLAY_NAME_MAPPINGS = {} +NODE_DISPLAY_NAME_MAPPINGS.update(TEST_NODE_DISPLAY_NAME_MAPPINGS) +NODE_DISPLAY_NAME_MAPPINGS.update(FLOW_CONTROL_NODE_DISPLAY_NAME_MAPPINGS) +NODE_DISPLAY_NAME_MAPPINGS.update(UTILITY_NODE_DISPLAY_NAME_MAPPINGS) +NODE_DISPLAY_NAME_MAPPINGS.update(CONDITION_NODE_DISPLAY_NAME_MAPPINGS) +NODE_DISPLAY_NAME_MAPPINGS.update(TEST_STUB_NODE_DISPLAY_NAME_MAPPINGS) +NODE_DISPLAY_NAME_MAPPINGS.update(ASYNC_TEST_NODE_DISPLAY_NAME_MAPPINGS) + diff --git a/tests/inference/testing_nodes/testing-pack/async_test_nodes.py b/tests/inference/testing_nodes/testing-pack/async_test_nodes.py new file mode 100644 index 000000000..547eea6f4 --- /dev/null +++ b/tests/inference/testing_nodes/testing-pack/async_test_nodes.py @@ -0,0 +1,343 @@ +import torch +import asyncio +from typing import Dict +from comfy.utils import ProgressBar +from comfy_execution.graph_utils import GraphBuilder +from comfy.comfy_types.node_typing import ComfyNodeABC +from comfy.comfy_types import IO + + +class TestAsyncValidation(ComfyNodeABC): + """Test node with async VALIDATE_INPUTS.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("FLOAT", {"default": 5.0}), + "threshold": ("FLOAT", {"default": 10.0}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "process" + CATEGORY = "_for_testing/async" + + @classmethod + async def VALIDATE_INPUTS(cls, value, threshold): + # Simulate async validation (e.g., checking remote service) + await asyncio.sleep(0.05) + + if value > threshold: + return f"Value {value} exceeds threshold {threshold}" + return True + + def process(self, value, threshold): + # Create image based on value + intensity = value / 10.0 + image = torch.ones([1, 512, 512, 3]) * intensity + return (image,) + + +class TestAsyncError(ComfyNodeABC): + """Test node that errors during async execution.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": (IO.ANY, {}), + "error_after": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 10.0}), + }, + } + + RETURN_TYPES = (IO.ANY,) + FUNCTION = "error_execution" + CATEGORY = "_for_testing/async" + + async def error_execution(self, value, error_after): + await asyncio.sleep(error_after) + raise RuntimeError("Intentional async execution error for testing") + + +class TestAsyncValidationError(ComfyNodeABC): + """Test node with async validation that always fails.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("FLOAT", {"default": 5.0}), + "max_value": ("FLOAT", {"default": 10.0}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "process" + CATEGORY = "_for_testing/async" + + @classmethod + async def VALIDATE_INPUTS(cls, value, max_value): + await asyncio.sleep(0.05) + # Always fail validation for values > max_value + if value > max_value: + return f"Async validation failed: {value} > {max_value}" + return True + + def process(self, value, max_value): + # This won't be reached if validation fails + image = torch.ones([1, 512, 512, 3]) * (value / max_value) + return (image,) + + +class TestAsyncTimeout(ComfyNodeABC): + """Test node that simulates timeout scenarios.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": (IO.ANY, {}), + "timeout": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 10.0}), + "operation_time": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 10.0}), + }, + } + + RETURN_TYPES = (IO.ANY,) + FUNCTION = "timeout_execution" + CATEGORY = "_for_testing/async" + + async def timeout_execution(self, value, timeout, operation_time): + try: + # This will timeout if operation_time > timeout + await asyncio.wait_for(asyncio.sleep(operation_time), timeout=timeout) + return (value,) + except asyncio.TimeoutError: + raise RuntimeError(f"Operation timed out after {timeout} seconds") + + +class TestSyncError(ComfyNodeABC): + """Test node that errors synchronously (for mixed sync/async testing).""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": (IO.ANY, {}), + }, + } + + RETURN_TYPES = (IO.ANY,) + FUNCTION = "sync_error" + CATEGORY = "_for_testing/async" + + def sync_error(self, value): + raise RuntimeError("Intentional sync execution error for testing") + + +class TestAsyncLazyCheck(ComfyNodeABC): + """Test node with async check_lazy_status.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "input1": (IO.ANY, {"lazy": True}), + "input2": (IO.ANY, {"lazy": True}), + "condition": ("BOOLEAN", {"default": True}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "process" + CATEGORY = "_for_testing/async" + + async def check_lazy_status(self, condition, input1, input2): + # Simulate async checking (e.g., querying remote service) + await asyncio.sleep(0.05) + + needed = [] + if condition and input1 is None: + needed.append("input1") + if not condition and input2 is None: + needed.append("input2") + return needed + + def process(self, input1, input2, condition): + # Return a simple image + return (torch.ones([1, 512, 512, 3]),) + + +class TestDynamicAsyncGeneration(ComfyNodeABC): + """Test node that dynamically generates async nodes.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "num_async_nodes": ("INT", {"default": 3, "min": 1, "max": 10}), + "sleep_duration": ("FLOAT", {"default": 0.2, "min": 0.1, "max": 1.0}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "generate_async_workflow" + CATEGORY = "_for_testing/async" + + def generate_async_workflow(self, image1, image2, num_async_nodes, sleep_duration): + g = GraphBuilder() + + # Create multiple async sleep nodes + sleep_nodes = [] + for i in range(num_async_nodes): + image = image1 if i % 2 == 0 else image2 + sleep_node = g.node("TestSleep", value=image, seconds=sleep_duration) + sleep_nodes.append(sleep_node) + + # Average all results + if len(sleep_nodes) == 1: + final_node = sleep_nodes[0] + else: + avg_inputs = {"input1": sleep_nodes[0].out(0)} + for i, node in enumerate(sleep_nodes[1:], 2): + avg_inputs[f"input{i}"] = node.out(0) + final_node = g.node("TestVariadicAverage", **avg_inputs) + + return { + "result": (final_node.out(0),), + "expand": g.finalize(), + } + + +class TestAsyncResourceUser(ComfyNodeABC): + """Test node that uses resources during async execution.""" + + # Class-level resource tracking for testing + _active_resources: Dict[str, bool] = {} + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": (IO.ANY, {}), + "resource_id": ("STRING", {"default": "resource_0"}), + "duration": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0}), + }, + } + + RETURN_TYPES = (IO.ANY,) + FUNCTION = "use_resource" + CATEGORY = "_for_testing/async" + + async def use_resource(self, value, resource_id, duration): + # Check if resource is already in use + if self._active_resources.get(resource_id, False): + raise RuntimeError(f"Resource {resource_id} is already in use!") + + # Mark resource as in use + self._active_resources[resource_id] = True + + try: + # Simulate resource usage + await asyncio.sleep(duration) + return (value,) + finally: + # Always clean up resource + self._active_resources[resource_id] = False + + +class TestAsyncBatchProcessing(ComfyNodeABC): + """Test async processing of batched inputs.""" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "process_time_per_item": ("FLOAT", {"default": 0.1, "min": 0.01, "max": 1.0}), + }, + "hidden": { + "unique_id": "UNIQUE_ID", + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "process_batch" + CATEGORY = "_for_testing/async" + + async def process_batch(self, images, process_time_per_item, unique_id): + batch_size = images.shape[0] + pbar = ProgressBar(batch_size, node_id=unique_id) + + # Process each image in the batch + processed = [] + for i in range(batch_size): + # Simulate async processing + await asyncio.sleep(process_time_per_item) + + # Simple processing: invert the image + processed_image = 1.0 - images[i:i+1] + processed.append(processed_image) + + pbar.update(1) + + # Stack processed images + result = torch.cat(processed, dim=0) + return (result,) + + +class TestAsyncConcurrentLimit(ComfyNodeABC): + """Test concurrent execution limits for async nodes.""" + + _semaphore = asyncio.Semaphore(2) # Only allow 2 concurrent executions + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": (IO.ANY, {}), + "duration": ("FLOAT", {"default": 0.5, "min": 0.1, "max": 2.0}), + "node_id": ("INT", {"default": 0}), + }, + } + + RETURN_TYPES = (IO.ANY,) + FUNCTION = "limited_execution" + CATEGORY = "_for_testing/async" + + async def limited_execution(self, value, duration, node_id): + async with self._semaphore: + # Node {node_id} acquired semaphore + await asyncio.sleep(duration) + # Node {node_id} releasing semaphore + return (value,) + + +# Add node mappings +ASYNC_TEST_NODE_CLASS_MAPPINGS = { + "TestAsyncValidation": TestAsyncValidation, + "TestAsyncError": TestAsyncError, + "TestAsyncValidationError": TestAsyncValidationError, + "TestAsyncTimeout": TestAsyncTimeout, + "TestSyncError": TestSyncError, + "TestAsyncLazyCheck": TestAsyncLazyCheck, + "TestDynamicAsyncGeneration": TestDynamicAsyncGeneration, + "TestAsyncResourceUser": TestAsyncResourceUser, + "TestAsyncBatchProcessing": TestAsyncBatchProcessing, + "TestAsyncConcurrentLimit": TestAsyncConcurrentLimit, +} + +ASYNC_TEST_NODE_DISPLAY_NAME_MAPPINGS = { + "TestAsyncValidation": "Test Async Validation", + "TestAsyncError": "Test Async Error", + "TestAsyncValidationError": "Test Async Validation Error", + "TestAsyncTimeout": "Test Async Timeout", + "TestSyncError": "Test Sync Error", + "TestAsyncLazyCheck": "Test Async Lazy Check", + "TestDynamicAsyncGeneration": "Test Dynamic Async Generation", + "TestAsyncResourceUser": "Test Async Resource User", + "TestAsyncBatchProcessing": "Test Async Batch Processing", + "TestAsyncConcurrentLimit": "Test Async Concurrent Limit", +} diff --git a/tests/inference/testing_nodes/testing-pack/specific_tests.py b/tests/inference/testing_nodes/testing-pack/specific_tests.py index 9d05ab14f..657d49f2f 100644 --- a/tests/inference/testing_nodes/testing-pack/specific_tests.py +++ b/tests/inference/testing_nodes/testing-pack/specific_tests.py @@ -1,6 +1,11 @@ import torch +import time +import asyncio +from comfy.utils import ProgressBar from .tools import VariantSupport from comfy_execution.graph_utils import GraphBuilder +from comfy.comfy_types.node_typing import ComfyNodeABC +from comfy.comfy_types import IO class TestLazyMixImages: @classmethod @@ -333,6 +338,131 @@ class TestMixedExpansionReturns: "expand": g.finalize(), } +class TestSamplingInExpansion: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 100}), + "cfg": ("FLOAT", {"default": 7.0, "min": 0.0, "max": 30.0}), + "prompt": ("STRING", {"multiline": True, "default": "a beautiful landscape with mountains and trees"}), + "negative_prompt": ("STRING", {"multiline": True, "default": "blurry, bad quality, worst quality"}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "sampling_in_expansion" + + CATEGORY = "Testing/Nodes" + + def sampling_in_expansion(self, model, clip, vae, seed, steps, cfg, prompt, negative_prompt): + g = GraphBuilder() + + # Create a basic image generation workflow using the input model, clip and vae + # 1. Setup text prompts using the provided CLIP model + positive_prompt = g.node("CLIPTextEncode", + text=prompt, + clip=clip) + negative_prompt = g.node("CLIPTextEncode", + text=negative_prompt, + clip=clip) + + # 2. Create empty latent with specified size + empty_latent = g.node("EmptyLatentImage", width=512, height=512, batch_size=1) + + # 3. Setup sampler and generate image latent + sampler = g.node("KSampler", + model=model, + positive=positive_prompt.out(0), + negative=negative_prompt.out(0), + latent_image=empty_latent.out(0), + seed=seed, + steps=steps, + cfg=cfg, + sampler_name="euler_ancestral", + scheduler="normal") + + # 4. Decode latent to image using VAE + output = g.node("VAEDecode", samples=sampler.out(0), vae=vae) + + return { + "result": (output.out(0),), + "expand": g.finalize(), + } + +class TestSleep(ComfyNodeABC): + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": (IO.ANY, {}), + "seconds": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 0.01, "tooltip": "The amount of seconds to sleep."}), + }, + "hidden": { + "unique_id": "UNIQUE_ID", + }, + } + RETURN_TYPES = (IO.ANY,) + FUNCTION = "sleep" + + CATEGORY = "_for_testing" + + async def sleep(self, value, seconds, unique_id): + pbar = ProgressBar(seconds, node_id=unique_id) + start = time.time() + expiration = start + seconds + now = start + while now < expiration: + now = time.time() + pbar.update_absolute(now - start) + await asyncio.sleep(0.01) + return (value,) + +class TestParallelSleep(ComfyNodeABC): + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image1": ("IMAGE", ), + "image2": ("IMAGE", ), + "image3": ("IMAGE", ), + "sleep1": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 10.0, "step": 0.01}), + "sleep2": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 10.0, "step": 0.01}), + "sleep3": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 10.0, "step": 0.01}), + }, + "hidden": { + "unique_id": "UNIQUE_ID", + }, + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "parallel_sleep" + CATEGORY = "_for_testing" + OUTPUT_NODE = True + + def parallel_sleep(self, image1, image2, image3, sleep1, sleep2, sleep3, unique_id): + # Create a graph dynamically with three TestSleep nodes + g = GraphBuilder() + + # Create sleep nodes for each duration and image + sleep_node1 = g.node("TestSleep", value=image1, seconds=sleep1) + sleep_node2 = g.node("TestSleep", value=image2, seconds=sleep2) + sleep_node3 = g.node("TestSleep", value=image3, seconds=sleep3) + + # Blend the results using TestVariadicAverage + blend = g.node("TestVariadicAverage", + input1=sleep_node1.out(0), + input2=sleep_node2.out(0), + input3=sleep_node3.out(0)) + + return { + "result": (blend.out(0),), + "expand": g.finalize(), + } + TEST_NODE_CLASS_MAPPINGS = { "TestLazyMixImages": TestLazyMixImages, "TestVariadicAverage": TestVariadicAverage, @@ -345,6 +475,9 @@ TEST_NODE_CLASS_MAPPINGS = { "TestCustomValidation5": TestCustomValidation5, "TestDynamicDependencyCycle": TestDynamicDependencyCycle, "TestMixedExpansionReturns": TestMixedExpansionReturns, + "TestSamplingInExpansion": TestSamplingInExpansion, + "TestSleep": TestSleep, + "TestParallelSleep": TestParallelSleep, } TEST_NODE_DISPLAY_NAME_MAPPINGS = { @@ -359,4 +492,7 @@ TEST_NODE_DISPLAY_NAME_MAPPINGS = { "TestCustomValidation5": "Custom Validation 5", "TestDynamicDependencyCycle": "Dynamic Dependency Cycle", "TestMixedExpansionReturns": "Mixed Expansion Returns", + "TestSamplingInExpansion": "Sampling In Expansion", + "TestSleep": "Test Sleep", + "TestParallelSleep": "Test Parallel Sleep", } From b7ff5bd14def548519cc09770b794ad176868761 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 10 Jul 2025 12:21:18 -0700 Subject: [PATCH 0321/1073] Fix python3.9 (#8858) --- comfy_execution/progress.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_execution/progress.py b/comfy_execution/progress.py index 5645b3e3c..73dba3f75 100644 --- a/comfy_execution/progress.py +++ b/comfy_execution/progress.py @@ -317,7 +317,7 @@ class ProgressRegistry: handler.reset() # Global registry instance -global_progress_registry: ProgressRegistry | None = None +global_progress_registry: ProgressRegistry = None def reset_progress_state(prompt_id: str, dynprompt: "DynamicPrompt") -> None: global global_progress_registry From 8f05fb48eaab6b5d6fd0747c5be358a60d93e186 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Thu, 10 Jul 2025 15:00:29 -0700 Subject: [PATCH 0322/1073] [fix] increase Kling API polling timeout to prevent user timeouts (#8860) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extends polling duration from 10 minutes to ~68 minutes (256 attempts × 16 seconds) to accommodate longer Kling API operations that were frequently timing out for users. --- comfy_api_nodes/nodes_kling.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 641cd6353..69e9e5cf0 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -132,6 +132,8 @@ def poll_until_finished( result_url_extractor=result_url_extractor, estimated_duration=estimated_duration, node_id=node_id, + poll_interval=16.0, + max_poll_attempts=256, ).execute() From 938d3e8216b9936913f105593b6127428feef473 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 10 Jul 2025 23:37:51 -0700 Subject: [PATCH 0323/1073] Remove windows line endings. (#8866) --- comfy/ldm/pixart/pixartms.py | 512 +++++++++++++++---------------- comfy/text_encoders/pixart_t5.py | 84 ++--- 2 files changed, 298 insertions(+), 298 deletions(-) diff --git a/comfy/ldm/pixart/pixartms.py b/comfy/ldm/pixart/pixartms.py index 7d4eebdce..d1ac49d84 100644 --- a/comfy/ldm/pixart/pixartms.py +++ b/comfy/ldm/pixart/pixartms.py @@ -1,256 +1,256 @@ -# Based on: -# https://github.com/PixArt-alpha/PixArt-alpha [Apache 2.0 license] -# https://github.com/PixArt-alpha/PixArt-sigma [Apache 2.0 license] -import torch -import torch.nn as nn - -from .blocks import ( - t2i_modulate, - CaptionEmbedder, - AttentionKVCompress, - MultiHeadCrossAttention, - T2IFinalLayer, - SizeEmbedder, -) -from comfy.ldm.modules.diffusionmodules.mmdit import TimestepEmbedder, PatchEmbed, Mlp, get_1d_sincos_pos_embed_from_grid_torch - - -def get_2d_sincos_pos_embed_torch(embed_dim, w, h, pe_interpolation=1.0, base_size=16, device=None, dtype=torch.float32): - grid_h, grid_w = torch.meshgrid( - torch.arange(h, device=device, dtype=dtype) / (h/base_size) / pe_interpolation, - torch.arange(w, device=device, dtype=dtype) / (w/base_size) / pe_interpolation, - indexing='ij' - ) - emb_h = get_1d_sincos_pos_embed_from_grid_torch(embed_dim // 2, grid_h, device=device, dtype=dtype) - emb_w = get_1d_sincos_pos_embed_from_grid_torch(embed_dim // 2, grid_w, device=device, dtype=dtype) - emb = torch.cat([emb_w, emb_h], dim=1) # (H*W, D) - return emb - -class PixArtMSBlock(nn.Module): - """ - A PixArt block with adaptive layer norm zero (adaLN-Zero) conditioning. - """ - def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, drop_path=0., input_size=None, - sampling=None, sr_ratio=1, qk_norm=False, dtype=None, device=None, operations=None, **block_kwargs): - super().__init__() - self.hidden_size = hidden_size - self.norm1 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - self.attn = AttentionKVCompress( - hidden_size, num_heads=num_heads, qkv_bias=True, sampling=sampling, sr_ratio=sr_ratio, - qk_norm=qk_norm, dtype=dtype, device=device, operations=operations, **block_kwargs - ) - self.cross_attn = MultiHeadCrossAttention( - hidden_size, num_heads, dtype=dtype, device=device, operations=operations, **block_kwargs - ) - self.norm2 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - # to be compatible with lower version pytorch - approx_gelu = lambda: nn.GELU(approximate="tanh") - self.mlp = Mlp( - in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, - dtype=dtype, device=device, operations=operations - ) - self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size ** 0.5) - - def forward(self, x, y, t, mask=None, HW=None, **kwargs): - B, N, C = x.shape - - shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None].to(dtype=x.dtype, device=x.device) + t.reshape(B, 6, -1)).chunk(6, dim=1) - x = x + (gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa), HW=HW)) - x = x + self.cross_attn(x, y, mask) - x = x + (gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp))) - - return x - - -### Core PixArt Model ### -class PixArtMS(nn.Module): - """ - Diffusion model with a Transformer backbone. - """ - def __init__( - self, - input_size=32, - patch_size=2, - in_channels=4, - hidden_size=1152, - depth=28, - num_heads=16, - mlp_ratio=4.0, - class_dropout_prob=0.1, - learn_sigma=True, - pred_sigma=True, - drop_path: float = 0., - caption_channels=4096, - pe_interpolation=None, - pe_precision=None, - config=None, - model_max_length=120, - micro_condition=True, - qk_norm=False, - kv_compress_config=None, - dtype=None, - device=None, - operations=None, - **kwargs, - ): - nn.Module.__init__(self) - self.dtype = dtype - self.pred_sigma = pred_sigma - self.in_channels = in_channels - self.out_channels = in_channels * 2 if pred_sigma else in_channels - self.patch_size = patch_size - self.num_heads = num_heads - self.pe_interpolation = pe_interpolation - self.pe_precision = pe_precision - self.hidden_size = hidden_size - self.depth = depth - - approx_gelu = lambda: nn.GELU(approximate="tanh") - self.t_block = nn.Sequential( - nn.SiLU(), - operations.Linear(hidden_size, 6 * hidden_size, bias=True, dtype=dtype, device=device) - ) - self.x_embedder = PatchEmbed( - patch_size=patch_size, - in_chans=in_channels, - embed_dim=hidden_size, - bias=True, - dtype=dtype, - device=device, - operations=operations - ) - self.t_embedder = TimestepEmbedder( - hidden_size, dtype=dtype, device=device, operations=operations, - ) - self.y_embedder = CaptionEmbedder( - in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, - act_layer=approx_gelu, token_num=model_max_length, - dtype=dtype, device=device, operations=operations, - ) - - self.micro_conditioning = micro_condition - if self.micro_conditioning: - self.csize_embedder = SizeEmbedder(hidden_size//3, dtype=dtype, device=device, operations=operations) - self.ar_embedder = SizeEmbedder(hidden_size//3, dtype=dtype, device=device, operations=operations) - - # For fixed sin-cos embedding: - # num_patches = (input_size // patch_size) * (input_size // patch_size) - # self.base_size = input_size // self.patch_size - # self.register_buffer("pos_embed", torch.zeros(1, num_patches, hidden_size)) - - drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule - if kv_compress_config is None: - kv_compress_config = { - 'sampling': None, - 'scale_factor': 1, - 'kv_compress_layer': [], - } - self.blocks = nn.ModuleList([ - PixArtMSBlock( - hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i], - sampling=kv_compress_config['sampling'], - sr_ratio=int(kv_compress_config['scale_factor']) if i in kv_compress_config['kv_compress_layer'] else 1, - qk_norm=qk_norm, - dtype=dtype, - device=device, - operations=operations, - ) - for i in range(depth) - ]) - self.final_layer = T2IFinalLayer( - hidden_size, patch_size, self.out_channels, dtype=dtype, device=device, operations=operations - ) - - def forward_orig(self, x, timestep, y, mask=None, c_size=None, c_ar=None, **kwargs): - """ - Original forward pass of PixArt. - x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) - t: (N,) tensor of diffusion timesteps - y: (N, 1, 120, C) conditioning - ar: (N, 1): aspect ratio - cs: (N ,2) size conditioning for height/width - """ - B, C, H, W = x.shape - c_res = (H + W) // 2 - pe_interpolation = self.pe_interpolation - if pe_interpolation is None or self.pe_precision is not None: - # calculate pe_interpolation on-the-fly - pe_interpolation = round(c_res / (512/8.0), self.pe_precision or 0) - - pos_embed = get_2d_sincos_pos_embed_torch( - self.hidden_size, - h=(H // self.patch_size), - w=(W // self.patch_size), - pe_interpolation=pe_interpolation, - base_size=((round(c_res / 64) * 64) // self.patch_size), - device=x.device, - dtype=x.dtype, - ).unsqueeze(0) - - x = self.x_embedder(x) + pos_embed # (N, T, D), where T = H * W / patch_size ** 2 - t = self.t_embedder(timestep, x.dtype) # (N, D) - - if self.micro_conditioning and (c_size is not None and c_ar is not None): - bs = x.shape[0] - c_size = self.csize_embedder(c_size, bs) # (N, D) - c_ar = self.ar_embedder(c_ar, bs) # (N, D) - t = t + torch.cat([c_size, c_ar], dim=1) - - t0 = self.t_block(t) - y = self.y_embedder(y, self.training) # (N, D) - - if mask is not None: - if mask.shape[0] != y.shape[0]: - mask = mask.repeat(y.shape[0] // mask.shape[0], 1) - mask = mask.squeeze(1).squeeze(1) - y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1]) - y_lens = mask.sum(dim=1).tolist() - else: - y_lens = None - y = y.squeeze(1).view(1, -1, x.shape[-1]) - for block in self.blocks: - x = block(x, y, t0, y_lens, (H, W), **kwargs) # (N, T, D) - - x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels) - x = self.unpatchify(x, H, W) # (N, out_channels, H, W) - - return x - - def forward(self, x, timesteps, context, c_size=None, c_ar=None, **kwargs): - B, C, H, W = x.shape - - # Fallback for missing microconds - if self.micro_conditioning: - if c_size is None: - c_size = torch.tensor([H*8, W*8], dtype=x.dtype, device=x.device).repeat(B, 1) - - if c_ar is None: - c_ar = torch.tensor([H/W], dtype=x.dtype, device=x.device).repeat(B, 1) - - ## Still accepts the input w/o that dim but returns garbage - if len(context.shape) == 3: - context = context.unsqueeze(1) - - ## run original forward pass - out = self.forward_orig(x, timesteps, context, c_size=c_size, c_ar=c_ar) - - ## only return EPS - if self.pred_sigma: - return out[:, :self.in_channels] - return out - - def unpatchify(self, x, h, w): - """ - x: (N, T, patch_size**2 * C) - imgs: (N, H, W, C) - """ - c = self.out_channels - p = self.x_embedder.patch_size[0] - h = h // self.patch_size - w = w // self.patch_size - assert h * w == x.shape[1] - - x = x.reshape(shape=(x.shape[0], h, w, p, p, c)) - x = torch.einsum('nhwpqc->nchpwq', x) - imgs = x.reshape(shape=(x.shape[0], c, h * p, w * p)) - return imgs +# Based on: +# https://github.com/PixArt-alpha/PixArt-alpha [Apache 2.0 license] +# https://github.com/PixArt-alpha/PixArt-sigma [Apache 2.0 license] +import torch +import torch.nn as nn + +from .blocks import ( + t2i_modulate, + CaptionEmbedder, + AttentionKVCompress, + MultiHeadCrossAttention, + T2IFinalLayer, + SizeEmbedder, +) +from comfy.ldm.modules.diffusionmodules.mmdit import TimestepEmbedder, PatchEmbed, Mlp, get_1d_sincos_pos_embed_from_grid_torch + + +def get_2d_sincos_pos_embed_torch(embed_dim, w, h, pe_interpolation=1.0, base_size=16, device=None, dtype=torch.float32): + grid_h, grid_w = torch.meshgrid( + torch.arange(h, device=device, dtype=dtype) / (h/base_size) / pe_interpolation, + torch.arange(w, device=device, dtype=dtype) / (w/base_size) / pe_interpolation, + indexing='ij' + ) + emb_h = get_1d_sincos_pos_embed_from_grid_torch(embed_dim // 2, grid_h, device=device, dtype=dtype) + emb_w = get_1d_sincos_pos_embed_from_grid_torch(embed_dim // 2, grid_w, device=device, dtype=dtype) + emb = torch.cat([emb_w, emb_h], dim=1) # (H*W, D) + return emb + +class PixArtMSBlock(nn.Module): + """ + A PixArt block with adaptive layer norm zero (adaLN-Zero) conditioning. + """ + def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, drop_path=0., input_size=None, + sampling=None, sr_ratio=1, qk_norm=False, dtype=None, device=None, operations=None, **block_kwargs): + super().__init__() + self.hidden_size = hidden_size + self.norm1 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) + self.attn = AttentionKVCompress( + hidden_size, num_heads=num_heads, qkv_bias=True, sampling=sampling, sr_ratio=sr_ratio, + qk_norm=qk_norm, dtype=dtype, device=device, operations=operations, **block_kwargs + ) + self.cross_attn = MultiHeadCrossAttention( + hidden_size, num_heads, dtype=dtype, device=device, operations=operations, **block_kwargs + ) + self.norm2 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) + # to be compatible with lower version pytorch + approx_gelu = lambda: nn.GELU(approximate="tanh") + self.mlp = Mlp( + in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, + dtype=dtype, device=device, operations=operations + ) + self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size ** 0.5) + + def forward(self, x, y, t, mask=None, HW=None, **kwargs): + B, N, C = x.shape + + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None].to(dtype=x.dtype, device=x.device) + t.reshape(B, 6, -1)).chunk(6, dim=1) + x = x + (gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa), HW=HW)) + x = x + self.cross_attn(x, y, mask) + x = x + (gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp))) + + return x + + +### Core PixArt Model ### +class PixArtMS(nn.Module): + """ + Diffusion model with a Transformer backbone. + """ + def __init__( + self, + input_size=32, + patch_size=2, + in_channels=4, + hidden_size=1152, + depth=28, + num_heads=16, + mlp_ratio=4.0, + class_dropout_prob=0.1, + learn_sigma=True, + pred_sigma=True, + drop_path: float = 0., + caption_channels=4096, + pe_interpolation=None, + pe_precision=None, + config=None, + model_max_length=120, + micro_condition=True, + qk_norm=False, + kv_compress_config=None, + dtype=None, + device=None, + operations=None, + **kwargs, + ): + nn.Module.__init__(self) + self.dtype = dtype + self.pred_sigma = pred_sigma + self.in_channels = in_channels + self.out_channels = in_channels * 2 if pred_sigma else in_channels + self.patch_size = patch_size + self.num_heads = num_heads + self.pe_interpolation = pe_interpolation + self.pe_precision = pe_precision + self.hidden_size = hidden_size + self.depth = depth + + approx_gelu = lambda: nn.GELU(approximate="tanh") + self.t_block = nn.Sequential( + nn.SiLU(), + operations.Linear(hidden_size, 6 * hidden_size, bias=True, dtype=dtype, device=device) + ) + self.x_embedder = PatchEmbed( + patch_size=patch_size, + in_chans=in_channels, + embed_dim=hidden_size, + bias=True, + dtype=dtype, + device=device, + operations=operations + ) + self.t_embedder = TimestepEmbedder( + hidden_size, dtype=dtype, device=device, operations=operations, + ) + self.y_embedder = CaptionEmbedder( + in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, + act_layer=approx_gelu, token_num=model_max_length, + dtype=dtype, device=device, operations=operations, + ) + + self.micro_conditioning = micro_condition + if self.micro_conditioning: + self.csize_embedder = SizeEmbedder(hidden_size//3, dtype=dtype, device=device, operations=operations) + self.ar_embedder = SizeEmbedder(hidden_size//3, dtype=dtype, device=device, operations=operations) + + # For fixed sin-cos embedding: + # num_patches = (input_size // patch_size) * (input_size // patch_size) + # self.base_size = input_size // self.patch_size + # self.register_buffer("pos_embed", torch.zeros(1, num_patches, hidden_size)) + + drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule + if kv_compress_config is None: + kv_compress_config = { + 'sampling': None, + 'scale_factor': 1, + 'kv_compress_layer': [], + } + self.blocks = nn.ModuleList([ + PixArtMSBlock( + hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i], + sampling=kv_compress_config['sampling'], + sr_ratio=int(kv_compress_config['scale_factor']) if i in kv_compress_config['kv_compress_layer'] else 1, + qk_norm=qk_norm, + dtype=dtype, + device=device, + operations=operations, + ) + for i in range(depth) + ]) + self.final_layer = T2IFinalLayer( + hidden_size, patch_size, self.out_channels, dtype=dtype, device=device, operations=operations + ) + + def forward_orig(self, x, timestep, y, mask=None, c_size=None, c_ar=None, **kwargs): + """ + Original forward pass of PixArt. + x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) + t: (N,) tensor of diffusion timesteps + y: (N, 1, 120, C) conditioning + ar: (N, 1): aspect ratio + cs: (N ,2) size conditioning for height/width + """ + B, C, H, W = x.shape + c_res = (H + W) // 2 + pe_interpolation = self.pe_interpolation + if pe_interpolation is None or self.pe_precision is not None: + # calculate pe_interpolation on-the-fly + pe_interpolation = round(c_res / (512/8.0), self.pe_precision or 0) + + pos_embed = get_2d_sincos_pos_embed_torch( + self.hidden_size, + h=(H // self.patch_size), + w=(W // self.patch_size), + pe_interpolation=pe_interpolation, + base_size=((round(c_res / 64) * 64) // self.patch_size), + device=x.device, + dtype=x.dtype, + ).unsqueeze(0) + + x = self.x_embedder(x) + pos_embed # (N, T, D), where T = H * W / patch_size ** 2 + t = self.t_embedder(timestep, x.dtype) # (N, D) + + if self.micro_conditioning and (c_size is not None and c_ar is not None): + bs = x.shape[0] + c_size = self.csize_embedder(c_size, bs) # (N, D) + c_ar = self.ar_embedder(c_ar, bs) # (N, D) + t = t + torch.cat([c_size, c_ar], dim=1) + + t0 = self.t_block(t) + y = self.y_embedder(y, self.training) # (N, D) + + if mask is not None: + if mask.shape[0] != y.shape[0]: + mask = mask.repeat(y.shape[0] // mask.shape[0], 1) + mask = mask.squeeze(1).squeeze(1) + y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1]) + y_lens = mask.sum(dim=1).tolist() + else: + y_lens = None + y = y.squeeze(1).view(1, -1, x.shape[-1]) + for block in self.blocks: + x = block(x, y, t0, y_lens, (H, W), **kwargs) # (N, T, D) + + x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels) + x = self.unpatchify(x, H, W) # (N, out_channels, H, W) + + return x + + def forward(self, x, timesteps, context, c_size=None, c_ar=None, **kwargs): + B, C, H, W = x.shape + + # Fallback for missing microconds + if self.micro_conditioning: + if c_size is None: + c_size = torch.tensor([H*8, W*8], dtype=x.dtype, device=x.device).repeat(B, 1) + + if c_ar is None: + c_ar = torch.tensor([H/W], dtype=x.dtype, device=x.device).repeat(B, 1) + + ## Still accepts the input w/o that dim but returns garbage + if len(context.shape) == 3: + context = context.unsqueeze(1) + + ## run original forward pass + out = self.forward_orig(x, timesteps, context, c_size=c_size, c_ar=c_ar) + + ## only return EPS + if self.pred_sigma: + return out[:, :self.in_channels] + return out + + def unpatchify(self, x, h, w): + """ + x: (N, T, patch_size**2 * C) + imgs: (N, H, W, C) + """ + c = self.out_channels + p = self.x_embedder.patch_size[0] + h = h // self.patch_size + w = w // self.patch_size + assert h * w == x.shape[1] + + x = x.reshape(shape=(x.shape[0], h, w, p, p, c)) + x = torch.einsum('nhwpqc->nchpwq', x) + imgs = x.reshape(shape=(x.shape[0], c, h * p, w * p)) + return imgs diff --git a/comfy/text_encoders/pixart_t5.py b/comfy/text_encoders/pixart_t5.py index b8de6bc4e..5f383de07 100644 --- a/comfy/text_encoders/pixart_t5.py +++ b/comfy/text_encoders/pixart_t5.py @@ -1,42 +1,42 @@ -import os - -from comfy import sd1_clip -import comfy.text_encoders.t5 -import comfy.text_encoders.sd3_clip -from comfy.sd1_clip import gen_empty_tokens - -from transformers import T5TokenizerFast - -class T5XXLModel(comfy.text_encoders.sd3_clip.T5XXLModel): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def gen_empty_tokens(self, special_tokens, *args, **kwargs): - # PixArt expects the negative to be all pad tokens - special_tokens = special_tokens.copy() - special_tokens.pop("end") - return gen_empty_tokens(special_tokens, *args, **kwargs) - -class PixArtT5XXL(sd1_clip.SD1ClipModel): - def __init__(self, device="cpu", dtype=None, model_options={}): - super().__init__(device=device, dtype=dtype, name="t5xxl", clip_model=T5XXLModel, model_options=model_options) - -class T5XXLTokenizer(sd1_clip.SDTokenizer): - def __init__(self, embedding_directory=None, tokenizer_data={}): - tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") - super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_data=tokenizer_data) # no padding - -class PixArtTokenizer(sd1_clip.SD1Tokenizer): - def __init__(self, embedding_directory=None, tokenizer_data={}): - super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="t5xxl", tokenizer=T5XXLTokenizer) - -def pixart_te(dtype_t5=None, t5xxl_scaled_fp8=None): - class PixArtTEModel_(PixArtT5XXL): - def __init__(self, device="cpu", dtype=None, model_options={}): - if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options: - model_options = model_options.copy() - model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8 - if dtype is None: - dtype = dtype_t5 - super().__init__(device=device, dtype=dtype, model_options=model_options) - return PixArtTEModel_ +import os + +from comfy import sd1_clip +import comfy.text_encoders.t5 +import comfy.text_encoders.sd3_clip +from comfy.sd1_clip import gen_empty_tokens + +from transformers import T5TokenizerFast + +class T5XXLModel(comfy.text_encoders.sd3_clip.T5XXLModel): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def gen_empty_tokens(self, special_tokens, *args, **kwargs): + # PixArt expects the negative to be all pad tokens + special_tokens = special_tokens.copy() + special_tokens.pop("end") + return gen_empty_tokens(special_tokens, *args, **kwargs) + +class PixArtT5XXL(sd1_clip.SD1ClipModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + super().__init__(device=device, dtype=dtype, name="t5xxl", clip_model=T5XXLModel, model_options=model_options) + +class T5XXLTokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") + super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_data=tokenizer_data) # no padding + +class PixArtTokenizer(sd1_clip.SD1Tokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="t5xxl", tokenizer=T5XXLTokenizer) + +def pixart_te(dtype_t5=None, t5xxl_scaled_fp8=None): + class PixArtTEModel_(PixArtT5XXL): + def __init__(self, device="cpu", dtype=None, model_options={}): + if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options: + model_options = model_options.copy() + model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8 + if dtype is None: + dtype = dtype_t5 + super().__init__(device=device, dtype=dtype, model_options=model_options) + return PixArtTEModel_ From 7bc7dd2aa2030a87135a3c8d20f636d52627ca9d Mon Sep 17 00:00:00 2001 From: JettHu <35261585+JettHu@users.noreply.github.com> Date: Sat, 12 Jul 2025 00:51:06 +0800 Subject: [PATCH 0324/1073] Execute async node earlier (#8865) --- comfy_execution/graph.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/comfy_execution/graph.py b/comfy_execution/graph.py index c79243e1e..60e2ab91e 100644 --- a/comfy_execution/graph.py +++ b/comfy_execution/graph.py @@ -3,6 +3,7 @@ from typing import Type, Literal import nodes import asyncio +import inspect from comfy_execution.graph_utils import is_link from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, InputTypeOptions @@ -239,8 +240,15 @@ class ExecutionList(TopologicalSort): return True return False + # If an available node is async, do that first. + # This will execute the asynchronous function earlier, reducing the overall time. + def is_async(node_id): + class_type = self.dynprompt.get_node(node_id)["class_type"] + class_def = nodes.NODE_CLASS_MAPPINGS[class_type] + return inspect.iscoroutinefunction(getattr(class_def, class_def.FUNCTION)) + for node_id in node_list: - if is_output(node_id): + if is_output(node_id) or is_async(node_id): return node_id #This should handle the VAEDecode -> preview case From b43916a134d503bd5cab15dfa281f429527e448b Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Sat, 12 Jul 2025 00:52:58 +0800 Subject: [PATCH 0325/1073] Fix fresca's input and output (#8871) --- comfy_extras/nodes_fresca.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_fresca.py b/comfy_extras/nodes_fresca.py index ee310c874..65c2d0d0e 100644 --- a/comfy_extras/nodes_fresca.py +++ b/comfy_extras/nodes_fresca.py @@ -71,8 +71,11 @@ class FreSca: DESCRIPTION = "Applies frequency-dependent scaling to the guidance" def patch(self, model, scale_low, scale_high, freq_cutoff): def custom_cfg_function(args): - cond = args["conds_out"][0] - uncond = args["conds_out"][1] + conds_out = args["conds_out"] + if len(conds_out) <= 1 or None in args["conds"][:2]: + return conds_out + cond = conds_out[0] + uncond = conds_out[1] guidance = cond - uncond filtered_guidance = Fourier_filter( @@ -83,7 +86,7 @@ class FreSca: ) filtered_cond = filtered_guidance + uncond - return [filtered_cond, uncond] + return [filtered_cond, uncond] + conds_out[2:] m = model.clone() m.set_model_sampler_pre_cfg_function(custom_cfg_function) From b40143984c1bed9cd1bc73a373a80a90c625eb0f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 12 Jul 2025 00:49:26 -0700 Subject: [PATCH 0326/1073] Add model detection error hint for lora. (#8880) --- comfy/sd.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 5b95cf75a..8081b167c 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -18,6 +18,7 @@ import comfy.ldm.hunyuan3d.vae import comfy.ldm.ace.vae.music_dcae_pipeline import yaml import math +import os import comfy.utils @@ -977,6 +978,12 @@ def load_gligen(ckpt_path): model = model.half() return comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device()) +def model_detection_error_hint(path, state_dict): + filename = os.path.basename(path) + if 'lora' in filename.lower(): + return "\nHINT: This seems to be a Lora file and Lora files should be put in the lora folder and loaded with a lora loader node.." + return "" + def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None): logging.warning("Warning: The load checkpoint with config function is deprecated and will eventually be removed, please use the other one.") model, clip, vae, _ = load_checkpoint_guess_config(ckpt_path, output_vae=output_vae, output_clip=output_clip, output_clipvision=False, embedding_directory=embedding_directory, output_model=True) @@ -1005,7 +1012,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o sd, metadata = comfy.utils.load_torch_file(ckpt_path, return_metadata=True) out = load_state_dict_guess_config(sd, output_vae, output_clip, output_clipvision, embedding_directory, output_model, model_options, te_model_options=te_model_options, metadata=metadata) if out is None: - raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path)) + raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(ckpt_path, model_detection_error_hint(ckpt_path, sd))) return out def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True, model_options={}, te_model_options={}, metadata=None): @@ -1177,7 +1184,7 @@ def load_diffusion_model(unet_path, model_options={}): model = load_diffusion_model_state_dict(sd, model_options=model_options) if model is None: logging.error("ERROR UNSUPPORTED DIFFUSION MODEL {}".format(unet_path)) - raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path)) + raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(unet_path, model_detection_error_hint(unet_path, sd))) return model def load_unet(unet_path, dtype=None): From 480375f3495e9e1437faf47eb2a11222c9acf3f0 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sun, 13 Jul 2025 01:46:27 -0700 Subject: [PATCH 0327/1073] Remove auth tokens from history storage (#8889) Remove auth_token_comfy_org and api_key_comfy_org from extra_data before storing prompt history to prevent sensitive authentication tokens from being persisted in the history endpoint response. --- execution.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/execution.py b/execution.py index 90cefc023..bd638afba 100644 --- a/execution.py +++ b/execution.py @@ -1045,6 +1045,12 @@ class PromptQueue: if status is not None: status_dict = copy.deepcopy(status._asdict()) + # Remove auth tokens from extra_data before storing in history + if "auth_token_comfy_org" in prompt[3]: + del prompt[3]["auth_token_comfy_org"] + if "api_key_comfy_org" in prompt[3]: + del prompt[3]["api_key_comfy_org"] + self.history[prompt[1]] = { "prompt": prompt, "outputs": {}, From 4831e9c2c47b97f85fd771521f247a017d1f43e1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 13 Jul 2025 01:59:17 -0700 Subject: [PATCH 0328/1073] Refactor previous pr. (#8893) --- execution.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/execution.py b/execution.py index bd638afba..c3a62f1cb 100644 --- a/execution.py +++ b/execution.py @@ -123,6 +123,8 @@ class CacheSet: } return result +SENSITIVE_EXTRA_DATA_KEYS = ("auth_token_comfy_org", "api_key_comfy_org") + def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, extra_data={}): valid_inputs = class_def.INPUT_TYPES() input_data_all = {} @@ -1045,11 +1047,10 @@ class PromptQueue: if status is not None: status_dict = copy.deepcopy(status._asdict()) - # Remove auth tokens from extra_data before storing in history - if "auth_token_comfy_org" in prompt[3]: - del prompt[3]["auth_token_comfy_org"] - if "api_key_comfy_org" in prompt[3]: - del prompt[3]["api_key_comfy_org"] + # Remove sensitive data from extra_data before storing in history + for sensitive_val in SENSITIVE_EXTRA_DATA_KEYS: + if sensitive_val in prompt[3]: + prompt[3].pop(sensitive_val) self.history[prompt[1]] = { "prompt": prompt, From 9ca581c9416d799db0d7c55ac957a5fa486798c8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 14 Jul 2025 10:10:20 -0700 Subject: [PATCH 0329/1073] Remove windows line endings. (#8902) --- comfy_extras/nodes_pixart.py | 48 ++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/comfy_extras/nodes_pixart.py b/comfy_extras/nodes_pixart.py index c7209c468..8d9276afe 100644 --- a/comfy_extras/nodes_pixart.py +++ b/comfy_extras/nodes_pixart.py @@ -1,24 +1,24 @@ -from nodes import MAX_RESOLUTION - -class CLIPTextEncodePixArtAlpha: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), - "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), - # "aspect_ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ), - }} - - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" - CATEGORY = "advanced/conditioning" - DESCRIPTION = "Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma." - - def encode(self, clip, width, height, text): - tokens = clip.tokenize(text) - return (clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height}),) - -NODE_CLASS_MAPPINGS = { - "CLIPTextEncodePixArtAlpha": CLIPTextEncodePixArtAlpha, -} +from nodes import MAX_RESOLUTION + +class CLIPTextEncodePixArtAlpha: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + # "aspect_ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ), + }} + + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "encode" + CATEGORY = "advanced/conditioning" + DESCRIPTION = "Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma." + + def encode(self, clip, width, height, text): + tokens = clip.tokenize(text) + return (clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height}),) + +NODE_CLASS_MAPPINGS = { + "CLIPTextEncodePixArtAlpha": CLIPTextEncodePixArtAlpha, +} From 861c3bbb3d2330dc7dff7567ffcf07946ace23b8 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 15 Jul 2025 01:27:57 +0800 Subject: [PATCH 0330/1073] Upate template to 0.1.36 (#8904) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 19a40ca0e..7705918a8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.35 +comfyui-workflow-templates==0.1.36 comfyui-embedded-docs==0.2.4 torch torchsde From 260a5ca5d9997bfa1cec5a4922cb066187e6daf0 Mon Sep 17 00:00:00 2001 From: FeepingCreature <540727+FeepingCreature@users.noreply.github.com> Date: Mon, 14 Jul 2025 20:48:31 +0200 Subject: [PATCH 0331/1073] Allow the prompt request to specify the prompt ID. (#8189) This makes it easier to write asynchronous clients that submit requests, because they can store the task immediately. Duplicate prompt IDs are rejected by the job queue. --- script_examples/websockets_api_example.py | 11 ++++++----- server.py | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/script_examples/websockets_api_example.py b/script_examples/websockets_api_example.py index d696d2bba..58f26cfb6 100644 --- a/script_examples/websockets_api_example.py +++ b/script_examples/websockets_api_example.py @@ -10,11 +10,11 @@ import urllib.parse server_address = "127.0.0.1:8188" client_id = str(uuid.uuid4()) -def queue_prompt(prompt): - p = {"prompt": prompt, "client_id": client_id} +def queue_prompt(prompt, prompt_id): + p = {"prompt": prompt, "client_id": client_id, "prompt_id": prompt_id} data = json.dumps(p).encode('utf-8') - req = urllib.request.Request("http://{}/prompt".format(server_address), data=data) - return json.loads(urllib.request.urlopen(req).read()) + req = urllib.request.Request("http://{}/prompt".format(server_address), data=data) + urllib.request.urlopen(req).read() def get_image(filename, subfolder, folder_type): data = {"filename": filename, "subfolder": subfolder, "type": folder_type} @@ -27,7 +27,8 @@ def get_history(prompt_id): return json.loads(response.read()) def get_images(ws, prompt): - prompt_id = queue_prompt(prompt)['prompt_id'] + prompt_id = str(uuid.uuid4()) + queue_prompt(prompt, prompt_id) output_images = {} while True: out = ws.recv() diff --git a/server.py b/server.py index e8bad9f4e..71a58f0fa 100644 --- a/server.py +++ b/server.py @@ -678,7 +678,7 @@ class PromptServer(): if "prompt" in json_data: prompt = json_data["prompt"] - prompt_id = str(uuid.uuid4()) + prompt_id = str(json_data.get("prompt_id", uuid.uuid4())) valid = await execution.validate_prompt(prompt_id, prompt) extra_data = {} if "extra_data" in json_data: From 543c24108c565a7dc21920d2c509b7596020eac1 Mon Sep 17 00:00:00 2001 From: Yoland Yan <4950057+yoland68@users.noreply.github.com> Date: Mon, 14 Jul 2025 17:45:55 -0700 Subject: [PATCH 0332/1073] Fix wrong reference bug (#8910) --- comfy/supported_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 2669ca01e..2ca3857f7 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1214,7 +1214,7 @@ class Omnigen2(supported_models_base.BASE): def clip_target(self, state_dict={}): pref = self.text_encoder_key_prefix[0] hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_3b.transformer.".format(pref)) - return supported_models_base.ClipTarget(comfy.text_encoders.omnigen2.LuminaTokenizer, comfy.text_encoders.omnigen2.te(**hunyuan_detect)) + return supported_models_base.ClipTarget(comfy.text_encoders.omnigen2.Omnigen2Tokenizer, comfy.text_encoders.omnigen2.te(**hunyuan_detect)) models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep, Omnigen2] From 9dabda19f046796f1169b593188b9b1b9263705e Mon Sep 17 00:00:00 2001 From: Yoland Yan <4950057+yoland68@users.noreply.github.com> Date: Mon, 14 Jul 2025 17:59:35 -0700 Subject: [PATCH 0333/1073] Update nodes_gemini.py (#8912) --- comfy_api_nodes/nodes_gemini.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index ae7b04846..5935ab2bb 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -406,7 +406,7 @@ class GeminiInputFiles(ComfyNodeABC): def create_file_part(self, file_path: str) -> GeminiPart: mime_type = ( - GeminiMimeType.pdf + GeminiMimeType.application_pdf if file_path.endswith(".pdf") else GeminiMimeType.text_plain ) From b1ae4126c39dd2a2831d4b88f91c7d18bc307ef7 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 14 Jul 2025 23:27:18 -0700 Subject: [PATCH 0334/1073] Add action to detect windows line endings. (#8917) --- .github/workflows/check-line-endings.yml | 40 ++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 .github/workflows/check-line-endings.yml diff --git a/.github/workflows/check-line-endings.yml b/.github/workflows/check-line-endings.yml new file mode 100644 index 000000000..f20dca565 --- /dev/null +++ b/.github/workflows/check-line-endings.yml @@ -0,0 +1,40 @@ +name: Check for Windows Line Endings + +on: + pull_request: + branches: ['*'] # Trigger on all pull requests to any branch + +jobs: + check-line-endings: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 # Fetch all history to compare changes + + - name: Check for Windows line endings (CRLF) + run: | + # Get the list of changed files in the PR + CHANGED_FILES=$(git diff --name-only origin/${{ github.base_ref }}..HEAD) + + # Flag to track if CRLF is found + CRLF_FOUND=false + + # Loop through each changed file + for FILE in $CHANGED_FILES; do + # Check if the file exists and is a text file + if [ -f "$FILE" ] && file "$FILE" | grep -q "text"; then + # Check for CRLF line endings + if grep -UP '\r$' "$FILE"; then + echo "Error: Windows line endings (CRLF) detected in $FILE" + CRLF_FOUND=true + fi + fi + done + + # Exit with error if CRLF was found + if [ "$CRLF_FOUND" = true ]; then + exit 1 + fi From 6b8062f4141f6655c4cea5422c29aa0a8b532cdf Mon Sep 17 00:00:00 2001 From: Brandon Wallace Date: Tue, 15 Jul 2025 20:08:27 -0500 Subject: [PATCH 0335/1073] Fix MaskComposite error when destination has 2 dimensions (#8915) Fix code that is using the original `destination` input instead of the reshaped value. --- comfy_extras/nodes_mask.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index ab387a2fc..2b0f8dd5d 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -247,7 +247,7 @@ class MaskComposite: visible_width, visible_height = (right - left, bottom - top,) source_portion = source[:, :visible_height, :visible_width] - destination_portion = destination[:, top:bottom, left:right] + destination_portion = output[:, top:bottom, left:right] if operation == "multiply": output[:, top:bottom, left:right] = destination_portion * source_portion From 50afba747cd3413a6f6eb6703c627a24d2e6f165 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 16 Jul 2025 00:42:17 -0700 Subject: [PATCH 0336/1073] Add attempt to work around the safetensors mmap issue. (#8928) --- comfy/cli_args.py | 1 + comfy/utils.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 7234a7ba0..ef0d4337e 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -144,6 +144,7 @@ class PerformanceFeature(enum.Enum): parser.add_argument("--fast", nargs="*", type=PerformanceFeature, help="Enable some untested and potentially quality deteriorating optimizations. --fast with no arguments enables everything. You can pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: fp16_accumulation fp8_matrix_mult cublas_ops") parser.add_argument("--mmap-torch-files", action="store_true", help="Use mmap when loading ckpt/pt files.") +parser.add_argument("--disable-mmap", action="store_true", help="Don't use mmap when loading safetensors.") parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") diff --git a/comfy/utils.py b/comfy/utils.py index f8e01f713..9c076a0e0 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -31,6 +31,7 @@ from einops import rearrange from comfy.cli_args import args MMAP_TORCH_FILES = args.mmap_torch_files +DISABLE_MMAP = args.disable_mmap ALWAYS_SAFE_LOAD = False if hasattr(torch.serialization, "add_safe_globals"): # TODO: this was added in pytorch 2.4, the unsafe path should be removed once earlier versions are deprecated @@ -58,7 +59,10 @@ def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False): with safetensors.safe_open(ckpt, framework="pt", device=device.type) as f: sd = {} for k in f.keys(): - sd[k] = f.get_tensor(k) + tensor = f.get_tensor(k) + if DISABLE_MMAP: # TODO: Not sure if this is the best way to bypass the mmap issues + tensor = tensor.to(device=device, copy=True) + sd[k] = tensor if return_metadata: metadata = f.metadata() except Exception as e: From 9bc2798f72947d6b76b3650c88a7b2f0afc1a8d0 Mon Sep 17 00:00:00 2001 From: Harel Cain Date: Wed, 16 Jul 2025 19:54:38 +0200 Subject: [PATCH 0337/1073] LTXV VAE decoder: switch default padding mode (#8930) --- comfy/ldm/lightricks/vae/causal_video_autoencoder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/lightricks/vae/causal_video_autoencoder.py b/comfy/ldm/lightricks/vae/causal_video_autoencoder.py index f91870d71..75ed069ad 100644 --- a/comfy/ldm/lightricks/vae/causal_video_autoencoder.py +++ b/comfy/ldm/lightricks/vae/causal_video_autoencoder.py @@ -973,7 +973,7 @@ class VideoVAE(nn.Module): norm_layer=config.get("norm_layer", "group_norm"), causal=config.get("causal_decoder", False), timestep_conditioning=self.timestep_conditioning, - spatial_padding_mode=config.get("spatial_padding_mode", "zeros"), + spatial_padding_mode=config.get("spatial_padding_mode", "reflect"), ) self.per_channel_statistics = processor() From 491fafbd6428e59c52093d76fde0bc10dfa723bb Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 16 Jul 2025 11:42:07 -0700 Subject: [PATCH 0338/1073] Silence clip tokenizer warning. (#8934) --- comfy/sd1_tokenizer/tokenizer_config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd1_tokenizer/tokenizer_config.json b/comfy/sd1_tokenizer/tokenizer_config.json index 5ba7bf706..8f7b3151d 100644 --- a/comfy/sd1_tokenizer/tokenizer_config.json +++ b/comfy/sd1_tokenizer/tokenizer_config.json @@ -18,7 +18,7 @@ "single_word": false }, "errors": "replace", - "model_max_length": 77, + "model_max_length": 8192, "name_or_path": "openai/clip-vit-large-patch14", "pad_token": "<|endoftext|>", "special_tokens_map_file": "./special_tokens_map.json", From 650838fd6fcb24e66bf82b3c75383f35433d7b8a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 17 Jul 2025 01:11:07 -0700 Subject: [PATCH 0339/1073] Experimental CFGNorm node. (#8942) This is from the new hidream e1 1 model code. Figured it might be useful as a generic cfg trick. --- comfy_extras/nodes_cfg.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_cfg.py b/comfy_extras/nodes_cfg.py index 1fb686644..1acaf15bc 100644 --- a/comfy_extras/nodes_cfg.py +++ b/comfy_extras/nodes_cfg.py @@ -40,6 +40,32 @@ class CFGZeroStar: m.set_model_sampler_post_cfg_function(cfg_zero_star) return (m, ) +class CFGNorm: + @classmethod + def INPUT_TYPES(s): + return {"required": {"model": ("MODEL",), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + RETURN_NAMES = ("patched_model",) + FUNCTION = "patch" + CATEGORY = "advanced/guidance" + + def patch(self, model, strength): + m = model.clone() + def cfg_norm(args): + cond_p = args['cond_denoised'] + pred_text_ = args["denoised"] + + norm_full_cond = torch.norm(cond_p, dim=1, keepdim=True) + norm_pred_text = torch.norm(pred_text_, dim=1, keepdim=True) + scale = (norm_full_cond / (norm_pred_text + 1e-8)).clamp(min=0.0, max=1.0) + return pred_text_ * scale * strength + + m.set_model_sampler_post_cfg_function(cfg_norm) + return (m, ) + NODE_CLASS_MAPPINGS = { - "CFGZeroStar": CFGZeroStar + "CFGZeroStar": CFGZeroStar, + "CFGNorm": CFGNorm, } From 7f492522b6dcb142ff2c4d3438310773d9a80551 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 18 Jul 2025 02:43:02 -0700 Subject: [PATCH 0340/1073] Forgot this (#8957) --- comfy_extras/nodes_cfg.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy_extras/nodes_cfg.py b/comfy_extras/nodes_cfg.py index 1acaf15bc..5abdc115a 100644 --- a/comfy_extras/nodes_cfg.py +++ b/comfy_extras/nodes_cfg.py @@ -50,6 +50,7 @@ class CFGNorm: RETURN_NAMES = ("patched_model",) FUNCTION = "patch" CATEGORY = "advanced/guidance" + EXPERIMENTAL = True def patch(self, model, strength): m = model.clone() From 1b96fae1d4a8425c44d4d3bd60acd818d05bf4f6 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 19 Jul 2025 01:55:23 -0700 Subject: [PATCH 0341/1073] Add nested style of dual cfg to DualCFGGuider node. (#8965) --- comfy_extras/nodes_custom_sampler.py | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 33bc41842..d17737e1a 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -683,9 +683,10 @@ class CFGGuider: return (guider,) class Guider_DualCFG(comfy.samplers.CFGGuider): - def set_cfg(self, cfg1, cfg2): + def set_cfg(self, cfg1, cfg2, nested=False): self.cfg1 = cfg1 self.cfg2 = cfg2 + self.nested = nested def set_conds(self, positive, middle, negative): middle = node_helpers.conditioning_set_values(middle, {"prompt_type": "negative"}) @@ -695,14 +696,20 @@ class Guider_DualCFG(comfy.samplers.CFGGuider): negative_cond = self.conds.get("negative", None) middle_cond = self.conds.get("middle", None) positive_cond = self.conds.get("positive", None) - if model_options.get("disable_cfg1_optimization", False) == False: - if math.isclose(self.cfg2, 1.0): - negative_cond = None - if math.isclose(self.cfg1, 1.0): - middle_cond = None - out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, middle_cond, positive_cond], x, timestep, model_options) - return comfy.samplers.cfg_function(self.inner_model, out[1], out[0], self.cfg2, x, timestep, model_options=model_options, cond=middle_cond, uncond=negative_cond) + (out[2] - out[1]) * self.cfg1 + if self.nested: + out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, middle_cond, positive_cond], x, timestep, model_options) + pred_text = comfy.samplers.cfg_function(self.inner_model, out[2], out[1], self.cfg1, x, timestep, model_options=model_options, cond=positive_cond, uncond=middle_cond) + return out[0] + self.cfg2 * (pred_text - out[0]) + else: + if model_options.get("disable_cfg1_optimization", False) == False: + if math.isclose(self.cfg2, 1.0): + negative_cond = None + if math.isclose(self.cfg1, 1.0): + middle_cond = None + + out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, middle_cond, positive_cond], x, timestep, model_options) + return comfy.samplers.cfg_function(self.inner_model, out[1], out[0], self.cfg2, x, timestep, model_options=model_options, cond=middle_cond, uncond=negative_cond) + (out[2] - out[1]) * self.cfg1 class DualCFGGuider: @classmethod @@ -714,6 +721,7 @@ class DualCFGGuider: "negative": ("CONDITIONING", ), "cfg_conds": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), "cfg_cond2_negative": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), + "style": (["regular", "nested"],), } } @@ -722,10 +730,10 @@ class DualCFGGuider: FUNCTION = "get_guider" CATEGORY = "sampling/custom_sampling/guiders" - def get_guider(self, model, cond1, cond2, negative, cfg_conds, cfg_cond2_negative): + def get_guider(self, model, cond1, cond2, negative, cfg_conds, cfg_cond2_negative, style): guider = Guider_DualCFG(model) guider.set_conds(cond1, cond2, negative) - guider.set_cfg(cfg_conds, cfg_cond2_negative) + guider.set_cfg(cfg_conds, cfg_cond2_negative, nested=(style == "nested")) return (guider,) class DisableNoise: From 1da5639e865a50f921d870a92c7c87110ce20c48 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sat, 19 Jul 2025 18:08:00 +0800 Subject: [PATCH 0342/1073] Update template to 0.1.37 (#8967) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7705918a8..a7e44095f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.36 +comfyui-workflow-templates==0.1.37 comfyui-embedded-docs==0.2.4 torch torchsde From 100c2478eaba71ab735539fdc00c9d0de49bc224 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Sun, 20 Jul 2025 11:09:11 +0800 Subject: [PATCH 0343/1073] Add SamplingPercentToSigma node (#8963) It's helpful to adjust start_percent or end_percent based on the corresponding sigma. --- comfy_extras/nodes_custom_sampler.py | 30 ++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index d17737e1a..d011f433b 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -301,6 +301,35 @@ class ExtendIntermediateSigmas: return (extended_sigmas,) + +class SamplingPercentToSigma: + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "model": (IO.MODEL, {}), + "sampling_percent": (IO.FLOAT, {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.0001}), + "return_actual_sigma": (IO.BOOLEAN, {"default": False, "tooltip": "Return the actual sigma value instead of the value used for interval checks.\nThis only affects results at 0.0 and 1.0."}), + } + } + + RETURN_TYPES = (IO.FLOAT,) + RETURN_NAMES = ("sigma_value",) + CATEGORY = "sampling/custom_sampling/sigmas" + + FUNCTION = "get_sigma" + + def get_sigma(self, model, sampling_percent, return_actual_sigma): + model_sampling = model.get_model_object("model_sampling") + sigma_val = model_sampling.percent_to_sigma(sampling_percent) + if return_actual_sigma: + if sampling_percent == 0.0: + sigma_val = model_sampling.sigma_max.item() + elif sampling_percent == 1.0: + sigma_val = model_sampling.sigma_min.item() + return (sigma_val,) + + class KSamplerSelect: @classmethod def INPUT_TYPES(s): @@ -887,6 +916,7 @@ NODE_CLASS_MAPPINGS = { "FlipSigmas": FlipSigmas, "SetFirstSigma": SetFirstSigma, "ExtendIntermediateSigmas": ExtendIntermediateSigmas, + "SamplingPercentToSigma": SamplingPercentToSigma, "CFGGuider": CFGGuider, "DualCFGGuider": DualCFGGuider, From a0c0785635a9f4d2da64b58fef063825f386d8da Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 19 Jul 2025 22:24:09 -0700 Subject: [PATCH 0344/1073] Document what the fast_fp16_accumulation is in the portable. (#8973) --- .ci/windows_base_files/README_VERY_IMPORTANT.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.ci/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_base_files/README_VERY_IMPORTANT.txt index d46acbcbf..8ab70c890 100755 --- a/.ci/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/windows_base_files/README_VERY_IMPORTANT.txt @@ -4,6 +4,9 @@ if you have a NVIDIA gpu: run_nvidia_gpu.bat +if you want to enable the fast fp16 accumulation (faster for fp16 models with slightly less quality): + +run_nvidia_gpu_fast_fp16_accumulation.bat To run it in slow CPU mode: From 7d627f764c2137d816a39adbc358cb28c1718a47 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Mon, 21 Jul 2025 03:58:35 +0800 Subject: [PATCH 0345/1073] Update template to 0.1.39 (#8981) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a7e44095f..8f6a6d112 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.37 +comfyui-workflow-templates==0.1.39 comfyui-embedded-docs==0.2.4 torch torchsde From 9a470e073e2742d4edd6e7ea1ce28d861a77d9c4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 21 Jul 2025 14:05:43 -0400 Subject: [PATCH 0346/1073] ComfyUI version 0.3.45 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 7981fbaca..180ecaf8a 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.44" +__version__ = "0.3.45" diff --git a/pyproject.toml b/pyproject.toml index 96ead2157..b1d6d9df6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.44" +version = "0.3.45" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 54a45b996729b361ea12f473de760e481dcf1f0a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 21 Jul 2025 11:19:14 -0700 Subject: [PATCH 0347/1073] Replace torchaudio.load with pyav. (#8989) --- comfy_extras/nodes_audio.py | 58 ++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index 8cd647846..38697240e 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -278,6 +278,62 @@ class PreviewAudio(SaveAudio): "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, } +def f32_pcm(wav: torch.Tensor) -> torch.Tensor: + """Convert audio to float 32 bits PCM format.""" + if wav.dtype.is_floating_point: + return wav + elif wav.dtype == torch.int16: + return wav.float() / (2 ** 15) + elif wav.dtype == torch.int32: + return wav.float() / (2 ** 31) + raise ValueError(f"Unsupported wav dtype: {wav.dtype}") + +def load(filepath: str, frame_offset: int = 0, num_frames: int = -1) -> tuple[torch.Tensor, int]: + with av.open(filepath) as af: + if not af.streams.audio: + raise ValueError("No audio stream found in the file.") + + stream = af.streams.audio[0] + sr = stream.codec_context.sample_rate + n_channels = stream.channels + + seek_time = frame_offset / sr if frame_offset > 0 else 0.0 + duration = num_frames / sr if num_frames > 0 else -1.0 + + sample_offset = int(sr * seek_time) + num_samples = int(sr * duration) if duration >= 0 else -1 + + # Small negative offset for MP3 artifacts, NOTE: this is LLM code so idk if it's actually necessary' + seek_sec = max(0, seek_time - 0.1) if filepath.lower().endswith('.mp3') else seek_time + af.seek(int(seek_sec / stream.time_base), stream=stream) + + frames = [] + length = 0 + for frame in af.decode(streams=stream.index): + current_offset = int(frame.rate * frame.pts * frame.time_base) + strip = max(0, sample_offset - current_offset) + + buf = torch.from_numpy(frame.to_ndarray()) + if buf.shape[0] != n_channels: + buf = buf.view(-1, n_channels).t() + + buf = buf[:, strip:] + frames.append(buf) + length += buf.shape[1] + + if num_samples > 0 and length >= num_samples: + break + + if not frames: + raise ValueError("No audio frames decoded.") + + wav = torch.cat(frames, dim=1) + if num_samples > 0: + wav = wav[:, :num_samples] + + wav = f32_pcm(wav) + return wav, sr + class LoadAudio: @classmethod def INPUT_TYPES(s): @@ -292,7 +348,7 @@ class LoadAudio: def load(self, audio): audio_path = folder_paths.get_annotated_filepath(audio) - waveform, sample_rate = torchaudio.load(audio_path) + waveform, sample_rate = load(audio_path) audio = {"waveform": waveform.unsqueeze(0), "sample_rate": sample_rate} return (audio, ) From 5249e45a1c7d91656ebefdebe3815005ec3d39d7 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 21 Jul 2025 12:23:41 -0700 Subject: [PATCH 0348/1073] Add hidream e1.1 example to readme. (#8990) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 0e021a687..d004364ee 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - Image Editing Models - [Omnigen 2](https://comfyanonymous.github.io/ComfyUI_examples/omnigen/) - [Flux Kontext](https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-kontext-image-editing-model) + - [HiDream E1.1](https://comfyanonymous.github.io/ComfyUI_examples/hidream/#hidream-e11) - Video Models - [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/) - [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/) From 0aa1c58b04b27311c6ba38b1d9949e7e20037d00 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 21 Jul 2025 13:48:25 -0700 Subject: [PATCH 0349/1073] This is not needed. (#8991) --- comfy_extras/nodes_audio.py | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index 38697240e..a90b31779 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -288,7 +288,7 @@ def f32_pcm(wav: torch.Tensor) -> torch.Tensor: return wav.float() / (2 ** 31) raise ValueError(f"Unsupported wav dtype: {wav.dtype}") -def load(filepath: str, frame_offset: int = 0, num_frames: int = -1) -> tuple[torch.Tensor, int]: +def load(filepath: str) -> tuple[torch.Tensor, int]: with av.open(filepath) as af: if not af.streams.audio: raise ValueError("No audio stream found in the file.") @@ -297,40 +297,20 @@ def load(filepath: str, frame_offset: int = 0, num_frames: int = -1) -> tuple[to sr = stream.codec_context.sample_rate n_channels = stream.channels - seek_time = frame_offset / sr if frame_offset > 0 else 0.0 - duration = num_frames / sr if num_frames > 0 else -1.0 - - sample_offset = int(sr * seek_time) - num_samples = int(sr * duration) if duration >= 0 else -1 - - # Small negative offset for MP3 artifacts, NOTE: this is LLM code so idk if it's actually necessary' - seek_sec = max(0, seek_time - 0.1) if filepath.lower().endswith('.mp3') else seek_time - af.seek(int(seek_sec / stream.time_base), stream=stream) - frames = [] length = 0 for frame in af.decode(streams=stream.index): - current_offset = int(frame.rate * frame.pts * frame.time_base) - strip = max(0, sample_offset - current_offset) - buf = torch.from_numpy(frame.to_ndarray()) if buf.shape[0] != n_channels: buf = buf.view(-1, n_channels).t() - buf = buf[:, strip:] frames.append(buf) length += buf.shape[1] - if num_samples > 0 and length >= num_samples: - break - if not frames: raise ValueError("No audio frames decoded.") wav = torch.cat(frames, dim=1) - if num_samples > 0: - wav = wav[:, :num_samples] - wav = f32_pcm(wav) return wav, sr From 5ac9ec214ba3ef1632701416f27948a57ec60919 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 22 Jul 2025 01:07:51 -0700 Subject: [PATCH 0350/1073] Try to fix line endings workflow. (#9001) --- .github/workflows/check-line-endings.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/check-line-endings.yml b/.github/workflows/check-line-endings.yml index f20dca565..03b3e3ced 100644 --- a/.github/workflows/check-line-endings.yml +++ b/.github/workflows/check-line-endings.yml @@ -17,6 +17,7 @@ jobs: - name: Check for Windows line endings (CRLF) run: | # Get the list of changed files in the PR + git merge origin/${{ github.base_ref }} --no-edit CHANGED_FILES=$(git diff --name-only origin/${{ github.base_ref }}..HEAD) # Flag to track if CRLF is found From 255f1398638b265a47d0e74fb4759fe6cfc3b3d4 Mon Sep 17 00:00:00 2001 From: Simon Lui <502929+simonlui@users.noreply.github.com> Date: Tue, 22 Jul 2025 12:20:09 -0700 Subject: [PATCH 0351/1073] Add xpu version for async offload and some other things. (#9004) --- comfy/model_management.py | 41 +++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 816caf18f..ab1e9bf3a 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -101,7 +101,7 @@ if args.directml is not None: lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default. try: - import intel_extension_for_pytorch as ipex + import intel_extension_for_pytorch as ipex # noqa: F401 _ = torch.xpu.device_count() xpu_available = xpu_available or torch.xpu.is_available() except: @@ -186,8 +186,12 @@ def get_total_memory(dev=None, torch_total_too=False): elif is_intel_xpu(): stats = torch.xpu.memory_stats(dev) mem_reserved = stats['reserved_bytes.all.current'] + if torch_version_numeric < (2, 6): + mem_total_xpu = torch.xpu.get_device_properties(dev).total_memory + else: + _, mem_total_xpu = torch.xpu.mem_get_info(dev) mem_total_torch = mem_reserved - mem_total = torch.xpu.get_device_properties(dev).total_memory + mem_total = mem_total_xpu elif is_ascend_npu(): stats = torch.npu.memory_stats(dev) mem_reserved = stats['reserved_bytes.all.current'] @@ -929,7 +933,7 @@ def device_supports_non_blocking(device): if is_device_mps(device): return False #pytorch bug? mps doesn't support non blocking if is_intel_xpu(): - return False + return True if args.deterministic: #TODO: figure out why deterministic breaks non blocking from gpu to cpu (previews) return False if directml_enabled: @@ -968,6 +972,8 @@ def get_offload_stream(device): stream_counter = (stream_counter + 1) % len(ss) if is_device_cuda(device): ss[stream_counter].wait_stream(torch.cuda.current_stream()) + elif is_device_xpu(device): + ss[stream_counter].wait_stream(torch.xpu.current_stream()) stream_counters[device] = stream_counter return s elif is_device_cuda(device): @@ -979,6 +985,15 @@ def get_offload_stream(device): stream_counter = (stream_counter + 1) % len(ss) stream_counters[device] = stream_counter return s + elif is_device_xpu(device): + ss = [] + for k in range(NUM_STREAMS): + ss.append(torch.xpu.Stream(device=device, priority=0)) + STREAMS[device] = ss + s = ss[stream_counter] + stream_counter = (stream_counter + 1) % len(ss) + stream_counters[device] = stream_counter + return s return None def sync_stream(device, stream): @@ -986,6 +1001,8 @@ def sync_stream(device, stream): return if is_device_cuda(device): torch.cuda.current_stream().wait_stream(stream) + elif is_device_xpu(device): + torch.xpu.current_stream().wait_stream(stream) def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False, stream=None): if device is None or weight.device == device: @@ -1092,8 +1109,11 @@ def get_free_memory(dev=None, torch_free_too=False): stats = torch.xpu.memory_stats(dev) mem_active = stats['active_bytes.all.current'] mem_reserved = stats['reserved_bytes.all.current'] + if torch_version_numeric < (2, 6): + mem_free_xpu = torch.xpu.get_device_properties(dev).total_memory - mem_reserved + else: + mem_free_xpu, _ = torch.xpu.mem_get_info(dev) mem_free_torch = mem_reserved - mem_active - mem_free_xpu = torch.xpu.get_device_properties(dev).total_memory - mem_reserved mem_free_total = mem_free_xpu + mem_free_torch elif is_ascend_npu(): stats = torch.npu.memory_stats(dev) @@ -1142,6 +1162,9 @@ def is_device_cpu(device): def is_device_mps(device): return is_device_type(device, 'mps') +def is_device_xpu(device): + return is_device_type(device, 'xpu') + def is_device_cuda(device): return is_device_type(device, 'cuda') @@ -1173,7 +1196,10 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True, ma return False if is_intel_xpu(): - return True + if torch_version_numeric < (2, 3): + return True + else: + return torch.xpu.get_device_properties(device).has_fp16 if is_ascend_npu(): return True @@ -1236,7 +1262,10 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma return False if is_intel_xpu(): - return True + if torch_version_numeric < (2, 6): + return True + else: + return torch.xpu.get_device_capability(device)['has_bfloat16_conversions'] if is_ascend_npu(): return True From 5ad33787dee43d36f8d054c590818b3153b55370 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 23 Jul 2025 11:20:49 -0700 Subject: [PATCH 0352/1073] Add default device argument. (#9023) --- comfy/cli_args.py | 3 ++- comfy/model_management.py | 1 + main.py | 9 +++++++++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index ef0d4337e..0d760d524 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -49,7 +49,8 @@ parser.add_argument("--temp-directory", type=str, default=None, help="Set the Co parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory. Overrides --base-directory.") parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.") parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.") -parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.") +parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use. All other devices will not be visible.") +parser.add_argument("--default-device", type=int, default=None, metavar="DEFAULT_DEVICE_ID", help="Set the id of the default device, all other devices will stay visible.") cm_group = parser.add_mutually_exclusive_group() cm_group.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync (enabled by default for torch 2.0 and up).") cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Disable cudaMallocAsync.") diff --git a/comfy/model_management.py b/comfy/model_management.py index ab1e9bf3a..346673895 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -880,6 +880,7 @@ def vae_dtype(device=None, allowed_dtypes=[]): return d # NOTE: bfloat16 seems to work on AMD for the VAE but is extremely slow in some cases compared to fp32 + # slowness still a problem on pytorch nightly 2.9.0.dev20250720+rocm6.4 tested on RDNA3 if d == torch.bfloat16 and (not is_amd()) and should_use_bf16(device): return d diff --git a/main.py b/main.py index 2b4ffafd4..e8ca8152a 100644 --- a/main.py +++ b/main.py @@ -115,6 +115,15 @@ if os.name == "nt": logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) if __name__ == "__main__": + if args.default_device is not None: + default_dev = args.default_device + devices = list(range(32)) + devices.remove(default_dev) + devices.insert(0, default_dev) + devices = ','.join(map(str, devices)) + os.environ['CUDA_VISIBLE_DEVICES'] = str(devices) + os.environ['HIP_VISIBLE_DEVICES'] = str(devices) + if args.cuda_device is not None: os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device) From 39dda1d40d1f2f18ccda8ade860932d0b8a07af4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 23 Jul 2025 15:10:59 -0700 Subject: [PATCH 0353/1073] Fix xpu function not implemented. (#9026) --- comfy/model_management.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 346673895..746b063ed 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -186,10 +186,7 @@ def get_total_memory(dev=None, torch_total_too=False): elif is_intel_xpu(): stats = torch.xpu.memory_stats(dev) mem_reserved = stats['reserved_bytes.all.current'] - if torch_version_numeric < (2, 6): - mem_total_xpu = torch.xpu.get_device_properties(dev).total_memory - else: - _, mem_total_xpu = torch.xpu.mem_get_info(dev) + mem_total_xpu = torch.xpu.get_device_properties(dev).total_memory mem_total_torch = mem_reserved mem_total = mem_total_xpu elif is_ascend_npu(): From a86a58c308c2423e86054462a8c9f1125536a034 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 23 Jul 2025 15:18:20 -0700 Subject: [PATCH 0354/1073] Fix xpu function not implemented p2. (#9027) --- comfy/model_management.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 746b063ed..42873d09b 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1107,10 +1107,7 @@ def get_free_memory(dev=None, torch_free_too=False): stats = torch.xpu.memory_stats(dev) mem_active = stats['active_bytes.all.current'] mem_reserved = stats['reserved_bytes.all.current'] - if torch_version_numeric < (2, 6): - mem_free_xpu = torch.xpu.get_device_properties(dev).total_memory - mem_reserved - else: - mem_free_xpu, _ = torch.xpu.mem_get_info(dev) + mem_free_xpu = torch.xpu.get_device_properties(dev).total_memory - mem_reserved mem_free_torch = mem_reserved - mem_active mem_free_total = mem_free_xpu + mem_free_torch elif is_ascend_npu(): From d3504e1778c0cc8992b04fe30dc0fae239c13713 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 23 Jul 2025 16:21:29 -0700 Subject: [PATCH 0355/1073] Enable pytorch attention by default for gfx1201 on torch 2.8 (#9029) --- comfy/model_management.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 42873d09b..e8b9b5c81 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -308,7 +308,10 @@ try: logging.info("ROCm version: {}".format(rocm_version)) if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much - if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx1201 and gfx950 + if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx950 + ENABLE_PYTORCH_ATTENTION = True + if torch_version_numeric >= (2, 8): + if any((a in arch) for a in ["gfx1201"]): ENABLE_PYTORCH_ATTENTION = True if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4): if any((a in arch) for a in ["gfx1201", "gfx942", "gfx950"]): # TODO: more arches From e78d2304966b6265fa2320b4d87dca534ea15642 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 23 Jul 2025 16:37:43 -0700 Subject: [PATCH 0356/1073] Only enable cuda malloc on cuda torch. (#9031) --- cuda_malloc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cuda_malloc.py b/cuda_malloc.py index eb2857c5f..c1d9ae3ca 100644 --- a/cuda_malloc.py +++ b/cuda_malloc.py @@ -74,7 +74,8 @@ if not args.cuda_malloc: module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) version = module.__version__ - if int(version[0]) >= 2: #enable by default for torch version 2.0 and up + + if int(version[0]) >= 2 and "+cu" in version: #enable by default for torch version 2.0 and up only on cuda torch args.cuda_malloc = cuda_malloc_supported() except: pass From e729a5cc1157bc0ece7daae9583c3a5a3ba95fbb Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Thu, 24 Jul 2025 07:47:05 +0800 Subject: [PATCH 0357/1073] Separate denoised and noise estimation in Euler CFG++ (#9008) This will change their behavior with the sampling CONST type. It also combines euler_cfg_pp and euler_ancestral_cfg_pp into one main function. --- comfy/k_diffusion/sampling.py | 64 +++++++++++++++++------------------ 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 2ed415b1f..a2bc492fd 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -1210,39 +1210,21 @@ def sample_deis(model, x, sigmas, extra_args=None, callback=None, disable=None, return x_next -@torch.no_grad() -def sample_euler_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None): - extra_args = {} if extra_args is None else extra_args - - temp = [0] - def post_cfg_function(args): - temp[0] = args["uncond_denoised"] - return args["denoised"] - - model_options = extra_args.get("model_options", {}).copy() - extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True) - - s_in = x.new_ones([x.shape[0]]) - for i in trange(len(sigmas) - 1, disable=disable): - sigma_hat = sigmas[i] - denoised = model(x, sigma_hat * s_in, **extra_args) - d = to_d(x, sigma_hat, temp[0]) - if callback is not None: - callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised}) - # Euler method - x = denoised + d * sigmas[i + 1] - return x - @torch.no_grad() def sample_euler_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None): - """Ancestral sampling with Euler method steps.""" + """Ancestral sampling with Euler method steps (CFG++).""" extra_args = {} if extra_args is None else extra_args seed = extra_args.get("seed", None) noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler - temp = [0] + model_sampling = model.inner_model.model_patcher.get_model_object("model_sampling") + lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling) + + uncond_denoised = None + def post_cfg_function(args): - temp[0] = args["uncond_denoised"] + nonlocal uncond_denoised + uncond_denoised = args["uncond_denoised"] return args["denoised"] model_options = extra_args.get("model_options", {}).copy() @@ -1251,15 +1233,33 @@ def sample_euler_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=No s_in = x.new_ones([x.shape[0]]) for i in trange(len(sigmas) - 1, disable=disable): denoised = model(x, sigmas[i] * s_in, **extra_args) - sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta) if callback is not None: callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) - d = to_d(x, sigmas[i], temp[0]) - # Euler method - x = denoised + d * sigma_down - if sigmas[i + 1] > 0: - x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up + if sigmas[i + 1] == 0: + # Denoising step + x = denoised + else: + alpha_s = sigmas[i] * lambda_fn(sigmas[i]).exp() + alpha_t = sigmas[i + 1] * lambda_fn(sigmas[i + 1]).exp() + d = to_d(x, sigmas[i], alpha_s * uncond_denoised) # to noise + + # DDIM stochastic sampling + sigma_down, sigma_up = get_ancestral_step(sigmas[i] / alpha_s, sigmas[i + 1] / alpha_t, eta=eta) + sigma_down = alpha_t * sigma_down + + # Euler method + x = alpha_t * denoised + sigma_down * d + if eta > 0 and s_noise > 0: + x = x + alpha_t * noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up return x + + +@torch.no_grad() +def sample_euler_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None): + """Euler method steps (CFG++).""" + return sample_euler_ancestral_cfg_pp(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=0.0, s_noise=0.0, noise_sampler=None) + + @torch.no_grad() def sample_dpmpp_2s_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None): """Ancestral sampling with DPM-Solver++(2S) second-order steps.""" From eb2f78b4e09b1970e2fc51fc5d2e062f1a826399 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Thu, 24 Jul 2025 08:57:27 +0800 Subject: [PATCH 0358/1073] [Training Node] algo support, grad acc, optional grad ckpt (#9015) * Add factorization utils for lokr * Add lokr train impl * Add loha train impl * Add adapter map for algo selection * Add optional grad ckpt and algo selection * Update __init__.py * correct key name for loha * Use custom fwd/bwd func and better init for loha * Support gradient accumulation * Fix bugs of loha * use more stable init * Add OFT training * linting --- comfy/weight_adapter/__init__.py | 13 ++- comfy/weight_adapter/base.py | 40 +++++++++ comfy/weight_adapter/loha.py | 134 ++++++++++++++++++++++++++++++- comfy/weight_adapter/lokr.py | 86 +++++++++++++++++++- comfy/weight_adapter/oft.py | 67 +++++++++++++++- comfy_extras/nodes_train.py | 47 ++++++++--- 6 files changed, 372 insertions(+), 15 deletions(-) diff --git a/comfy/weight_adapter/__init__.py b/comfy/weight_adapter/__init__.py index 560b82be3..b40f920e4 100644 --- a/comfy/weight_adapter/__init__.py +++ b/comfy/weight_adapter/__init__.py @@ -15,9 +15,20 @@ adapters: list[type[WeightAdapterBase]] = [ OFTAdapter, BOFTAdapter, ] +adapter_maps: dict[str, type[WeightAdapterBase]] = { + "LoRA": LoRAAdapter, + "LoHa": LoHaAdapter, + "LoKr": LoKrAdapter, + "OFT": OFTAdapter, + ## We disable not implemented algo for now + # "GLoRA": GLoRAAdapter, + # "BOFT": BOFTAdapter, +} + __all__ = [ "WeightAdapterBase", "WeightAdapterTrainBase", - "adapters" + "adapters", + "adapter_maps", ] + [a.__name__ for a in adapters] diff --git a/comfy/weight_adapter/base.py b/comfy/weight_adapter/base.py index b5c7db423..43644b106 100644 --- a/comfy/weight_adapter/base.py +++ b/comfy/weight_adapter/base.py @@ -133,3 +133,43 @@ def tucker_weight_from_conv(up, down, mid): def tucker_weight(wa, wb, t): temp = torch.einsum("i j ..., j r -> i r ...", t, wb) return torch.einsum("i j ..., i r -> r j ...", temp, wa) + + +def factorization(dimension: int, factor: int = -1) -> tuple[int, int]: + """ + return a tuple of two value of input dimension decomposed by the number closest to factor + second value is higher or equal than first value. + + examples) + factor + -1 2 4 8 16 ... + 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 + 128 -> 8, 16 128 -> 2, 64 128 -> 4, 32 128 -> 8, 16 128 -> 8, 16 + 250 -> 10, 25 250 -> 2, 125 250 -> 2, 125 250 -> 5, 50 250 -> 10, 25 + 360 -> 8, 45 360 -> 2, 180 360 -> 4, 90 360 -> 8, 45 360 -> 12, 30 + 512 -> 16, 32 512 -> 2, 256 512 -> 4, 128 512 -> 8, 64 512 -> 16, 32 + 1024 -> 32, 32 1024 -> 2, 512 1024 -> 4, 256 1024 -> 8, 128 1024 -> 16, 64 + """ + + if factor > 0 and (dimension % factor) == 0 and dimension >= factor**2: + m = factor + n = dimension // factor + if m > n: + n, m = m, n + return m, n + if factor < 0: + factor = dimension + m, n = 1, dimension + length = m + n + while m < n: + new_m = m + 1 + while dimension % new_m != 0: + new_m += 1 + new_n = dimension // new_m + if new_m + new_n > length or new_m > factor: + break + else: + m, n = new_m, new_n + if m > n: + n, m = m, n + return m, n diff --git a/comfy/weight_adapter/loha.py b/comfy/weight_adapter/loha.py index ce79abad5..55c97a3af 100644 --- a/comfy/weight_adapter/loha.py +++ b/comfy/weight_adapter/loha.py @@ -3,7 +3,120 @@ from typing import Optional import torch import comfy.model_management -from .base import WeightAdapterBase, weight_decompose +from .base import WeightAdapterBase, WeightAdapterTrainBase, weight_decompose + + +class HadaWeight(torch.autograd.Function): + @staticmethod + def forward(ctx, w1u, w1d, w2u, w2d, scale=torch.tensor(1)): + ctx.save_for_backward(w1d, w1u, w2d, w2u, scale) + diff_weight = ((w1u @ w1d) * (w2u @ w2d)) * scale + return diff_weight + + @staticmethod + def backward(ctx, grad_out): + (w1d, w1u, w2d, w2u, scale) = ctx.saved_tensors + grad_out = grad_out * scale + temp = grad_out * (w2u @ w2d) + grad_w1u = temp @ w1d.T + grad_w1d = w1u.T @ temp + + temp = grad_out * (w1u @ w1d) + grad_w2u = temp @ w2d.T + grad_w2d = w2u.T @ temp + + del temp + return grad_w1u, grad_w1d, grad_w2u, grad_w2d, None + + +class HadaWeightTucker(torch.autograd.Function): + @staticmethod + def forward(ctx, t1, w1u, w1d, t2, w2u, w2d, scale=torch.tensor(1)): + ctx.save_for_backward(t1, w1d, w1u, t2, w2d, w2u, scale) + + rebuild1 = torch.einsum("i j ..., j r, i p -> p r ...", t1, w1d, w1u) + rebuild2 = torch.einsum("i j ..., j r, i p -> p r ...", t2, w2d, w2u) + + return rebuild1 * rebuild2 * scale + + @staticmethod + def backward(ctx, grad_out): + (t1, w1d, w1u, t2, w2d, w2u, scale) = ctx.saved_tensors + grad_out = grad_out * scale + + temp = torch.einsum("i j ..., j r -> i r ...", t2, w2d) + rebuild = torch.einsum("i j ..., i r -> r j ...", temp, w2u) + + grad_w = rebuild * grad_out + del rebuild + + grad_w1u = torch.einsum("r j ..., i j ... -> r i", temp, grad_w) + grad_temp = torch.einsum("i j ..., i r -> r j ...", grad_w, w1u.T) + del grad_w, temp + + grad_w1d = torch.einsum("i r ..., i j ... -> r j", t1, grad_temp) + grad_t1 = torch.einsum("i j ..., j r -> i r ...", grad_temp, w1d.T) + del grad_temp + + temp = torch.einsum("i j ..., j r -> i r ...", t1, w1d) + rebuild = torch.einsum("i j ..., i r -> r j ...", temp, w1u) + + grad_w = rebuild * grad_out + del rebuild + + grad_w2u = torch.einsum("r j ..., i j ... -> r i", temp, grad_w) + grad_temp = torch.einsum("i j ..., i r -> r j ...", grad_w, w2u.T) + del grad_w, temp + + grad_w2d = torch.einsum("i r ..., i j ... -> r j", t2, grad_temp) + grad_t2 = torch.einsum("i j ..., j r -> i r ...", grad_temp, w2d.T) + del grad_temp + return grad_t1, grad_w1u, grad_w1d, grad_t2, grad_w2u, grad_w2d, None + + +class LohaDiff(WeightAdapterTrainBase): + def __init__(self, weights): + super().__init__() + # Unpack weights tuple from LoHaAdapter + w1a, w1b, alpha, w2a, w2b, t1, t2, _ = weights + + # Create trainable parameters + self.hada_w1_a = torch.nn.Parameter(w1a) + self.hada_w1_b = torch.nn.Parameter(w1b) + self.hada_w2_a = torch.nn.Parameter(w2a) + self.hada_w2_b = torch.nn.Parameter(w2b) + + self.use_tucker = False + if t1 is not None and t2 is not None: + self.use_tucker = True + self.hada_t1 = torch.nn.Parameter(t1) + self.hada_t2 = torch.nn.Parameter(t2) + else: + # Keep the attributes for consistent access + self.hada_t1 = None + self.hada_t2 = None + + # Store rank and non-trainable alpha + self.rank = w1b.shape[0] + self.alpha = torch.nn.Parameter(torch.tensor(alpha), requires_grad=False) + + def __call__(self, w): + org_dtype = w.dtype + + scale = self.alpha / self.rank + if self.use_tucker: + diff_weight = HadaWeightTucker.apply(self.hada_t1, self.hada_w1_a, self.hada_w1_b, self.hada_t2, self.hada_w2_a, self.hada_w2_b, scale) + else: + diff_weight = HadaWeight.apply(self.hada_w1_a, self.hada_w1_b, self.hada_w2_a, self.hada_w2_b, scale) + + # Add the scaled difference to the original weight + weight = w.to(diff_weight) + diff_weight.reshape(w.shape) + + return weight.to(org_dtype) + + def passive_memory_usage(self): + """Calculates memory usage of the trainable parameters.""" + return sum(param.numel() * param.element_size() for param in self.parameters()) class LoHaAdapter(WeightAdapterBase): @@ -13,6 +126,25 @@ class LoHaAdapter(WeightAdapterBase): self.loaded_keys = loaded_keys self.weights = weights + @classmethod + def create_train(cls, weight, rank=1, alpha=1.0): + out_dim = weight.shape[0] + in_dim = weight.shape[1:].numel() + mat1 = torch.empty(out_dim, rank, device=weight.device, dtype=weight.dtype) + mat2 = torch.empty(rank, in_dim, device=weight.device, dtype=weight.dtype) + torch.nn.init.normal_(mat1, 0.1) + torch.nn.init.constant_(mat2, 0.0) + mat3 = torch.empty(out_dim, rank, device=weight.device, dtype=weight.dtype) + mat4 = torch.empty(rank, in_dim, device=weight.device, dtype=weight.dtype) + torch.nn.init.normal_(mat3, 0.1) + torch.nn.init.normal_(mat4, 0.01) + return LohaDiff( + (mat1, mat2, alpha, mat3, mat4, None, None, None) + ) + + def to_train(self): + return LohaDiff(self.weights) + @classmethod def load( cls, diff --git a/comfy/weight_adapter/lokr.py b/comfy/weight_adapter/lokr.py index 51233db2d..49b0be55f 100644 --- a/comfy/weight_adapter/lokr.py +++ b/comfy/weight_adapter/lokr.py @@ -3,7 +3,77 @@ from typing import Optional import torch import comfy.model_management -from .base import WeightAdapterBase, weight_decompose +from .base import ( + WeightAdapterBase, + WeightAdapterTrainBase, + weight_decompose, + factorization, +) + + +class LokrDiff(WeightAdapterTrainBase): + def __init__(self, weights): + super().__init__() + (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2, dora_scale) = weights + self.use_tucker = False + if lokr_w1_a is not None: + _, rank_a = lokr_w1_a.shape[0], lokr_w1_a.shape[1] + rank_a, _ = lokr_w1_b.shape[0], lokr_w1_b.shape[1] + self.lokr_w1_a = torch.nn.Parameter(lokr_w1_a) + self.lokr_w1_b = torch.nn.Parameter(lokr_w1_b) + self.w1_rebuild = True + self.ranka = rank_a + + if lokr_w2_a is not None: + _, rank_b = lokr_w2_a.shape[0], lokr_w2_a.shape[1] + rank_b, _ = lokr_w2_b.shape[0], lokr_w2_b.shape[1] + self.lokr_w2_a = torch.nn.Parameter(lokr_w2_a) + self.lokr_w2_b = torch.nn.Parameter(lokr_w2_b) + if lokr_t2 is not None: + self.use_tucker = True + self.lokr_t2 = torch.nn.Parameter(lokr_t2) + self.w2_rebuild = True + self.rankb = rank_b + + if lokr_w1 is not None: + self.lokr_w1 = torch.nn.Parameter(lokr_w1) + self.w1_rebuild = False + + if lokr_w2 is not None: + self.lokr_w2 = torch.nn.Parameter(lokr_w2) + self.w2_rebuild = False + + self.alpha = torch.nn.Parameter(torch.tensor(alpha), requires_grad=False) + + @property + def w1(self): + if self.w1_rebuild: + return (self.lokr_w1_a @ self.lokr_w1_b) * (self.alpha / self.ranka) + else: + return self.lokr_w1 + + @property + def w2(self): + if self.w2_rebuild: + if self.use_tucker: + w2 = torch.einsum( + 'i j k l, j r, i p -> p r k l', + self.lokr_t2, + self.lokr_w2_b, + self.lokr_w2_a + ) + else: + w2 = self.lokr_w2_a @ self.lokr_w2_b + return w2 * (self.alpha / self.rankb) + else: + return self.lokr_w2 + + def __call__(self, w): + diff = torch.kron(self.w1, self.w2) + return w + diff.reshape(w.shape).to(w) + + def passive_memory_usage(self): + return sum(param.numel() * param.element_size() for param in self.parameters()) class LoKrAdapter(WeightAdapterBase): @@ -13,6 +83,20 @@ class LoKrAdapter(WeightAdapterBase): self.loaded_keys = loaded_keys self.weights = weights + @classmethod + def create_train(cls, weight, rank=1, alpha=1.0): + out_dim = weight.shape[0] + in_dim = weight.shape[1:].numel() + out1, out2 = factorization(out_dim, rank) + in1, in2 = factorization(in_dim, rank) + mat1 = torch.empty(out1, in1, device=weight.device, dtype=weight.dtype) + mat2 = torch.empty(out2, in2, device=weight.device, dtype=weight.dtype) + torch.nn.init.kaiming_uniform_(mat2, a=5**0.5) + torch.nn.init.constant_(mat1, 0.0) + return LokrDiff( + (mat1, mat2, alpha, None, None, None, None, None, None) + ) + @classmethod def load( cls, diff --git a/comfy/weight_adapter/oft.py b/comfy/weight_adapter/oft.py index 25009eca3..9d4982083 100644 --- a/comfy/weight_adapter/oft.py +++ b/comfy/weight_adapter/oft.py @@ -3,7 +3,58 @@ from typing import Optional import torch import comfy.model_management -from .base import WeightAdapterBase, weight_decompose +from .base import WeightAdapterBase, WeightAdapterTrainBase, weight_decompose, factorization + + +class OFTDiff(WeightAdapterTrainBase): + def __init__(self, weights): + super().__init__() + # Unpack weights tuple from LoHaAdapter + blocks, rescale, alpha, _ = weights + + # Create trainable parameters + self.oft_blocks = torch.nn.Parameter(blocks) + if rescale is not None: + self.rescale = torch.nn.Parameter(rescale) + self.rescaled = True + else: + self.rescaled = False + self.block_num, self.block_size, _ = blocks.shape + self.constraint = float(alpha) + self.alpha = torch.nn.Parameter(torch.tensor(alpha), requires_grad=False) + + def __call__(self, w): + org_dtype = w.dtype + I = torch.eye(self.block_size, device=self.oft_blocks.device) + + ## generate r + # for Q = -Q^T + q = self.oft_blocks - self.oft_blocks.transpose(1, 2) + normed_q = q + if self.constraint: + q_norm = torch.norm(q) + 1e-8 + if q_norm > self.constraint: + normed_q = q * self.constraint / q_norm + # use float() to prevent unsupported type + r = (I + normed_q) @ (I - normed_q).float().inverse() + + ## Apply chunked matmul on weight + _, *shape = w.shape + org_weight = w.to(dtype=r.dtype) + org_weight = org_weight.unflatten(0, (self.block_num, self.block_size)) + # Init R=0, so add I on it to ensure the output of step0 is original model output + weight = torch.einsum( + "k n m, k n ... -> k m ...", + r, + org_weight, + ).flatten(0, 1) + if self.rescaled: + weight = self.rescale * weight + return weight.to(org_dtype) + + def passive_memory_usage(self): + """Calculates memory usage of the trainable parameters.""" + return sum(param.numel() * param.element_size() for param in self.parameters()) class OFTAdapter(WeightAdapterBase): @@ -13,6 +64,18 @@ class OFTAdapter(WeightAdapterBase): self.loaded_keys = loaded_keys self.weights = weights + @classmethod + def create_train(cls, weight, rank=1, alpha=1.0): + out_dim = weight.shape[0] + block_size, block_num = factorization(out_dim, rank) + block = torch.zeros(block_num, block_size, block_size, device=weight.device, dtype=weight.dtype) + return OFTDiff( + (block, None, alpha, None) + ) + + def to_train(self): + return OFTDiff(self.weights) + @classmethod def load( cls, @@ -60,6 +123,8 @@ class OFTAdapter(WeightAdapterBase): blocks = v[0] rescale = v[1] alpha = v[2] + if alpha is None: + alpha = 0 dora_scale = v[3] blocks = comfy.model_management.cast_to_device(blocks, weight.device, intermediate_dtype) diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py index 3d05fdab5..c3aaaee9b 100644 --- a/comfy_extras/nodes_train.py +++ b/comfy_extras/nodes_train.py @@ -20,7 +20,7 @@ import folder_paths import node_helpers from comfy.cli_args import args from comfy.comfy_types.node_typing import IO -from comfy.weight_adapter import adapters +from comfy.weight_adapter import adapters, adapter_maps def make_batch_extra_option_dict(d, indicies, full_size=None): @@ -39,13 +39,13 @@ def make_batch_extra_option_dict(d, indicies, full_size=None): class TrainSampler(comfy.samplers.Sampler): - - def __init__(self, loss_fn, optimizer, loss_callback=None, batch_size=1, total_steps=1, seed=0, training_dtype=torch.bfloat16): + def __init__(self, loss_fn, optimizer, loss_callback=None, batch_size=1, grad_acc=1, total_steps=1, seed=0, training_dtype=torch.bfloat16): self.loss_fn = loss_fn self.optimizer = optimizer self.loss_callback = loss_callback self.batch_size = batch_size self.total_steps = total_steps + self.grad_acc = grad_acc self.seed = seed self.training_dtype = training_dtype @@ -92,8 +92,9 @@ class TrainSampler(comfy.samplers.Sampler): self.loss_callback(loss.item()) pbar.set_postfix({"loss": f"{loss.item():.4f}"}) - self.optimizer.step() - self.optimizer.zero_grad() + if (i+1) % self.grad_acc == 0: + self.optimizer.step() + self.optimizer.zero_grad() torch.cuda.empty_cache() return torch.zeros_like(latent_image) @@ -419,6 +420,16 @@ class TrainLoraNode: "tooltip": "The batch size to use for training.", }, ), + "grad_accumulation_steps": ( + IO.INT, + { + "default": 1, + "min": 1, + "max": 1024, + "step": 1, + "tooltip": "The number of gradient accumulation steps to use for training.", + } + ), "steps": ( IO.INT, { @@ -478,6 +489,17 @@ class TrainLoraNode: ["bf16", "fp32"], {"default": "bf16", "tooltip": "The dtype to use for lora."}, ), + "algorithm": ( + list(adapter_maps.keys()), + {"default": list(adapter_maps.keys())[0], "tooltip": "The algorithm to use for training."}, + ), + "gradient_checkpointing": ( + IO.BOOLEAN, + { + "default": True, + "tooltip": "Use gradient checkpointing for training.", + } + ), "existing_lora": ( folder_paths.get_filename_list("loras") + ["[None]"], { @@ -501,6 +523,7 @@ class TrainLoraNode: positive, batch_size, steps, + grad_accumulation_steps, learning_rate, rank, optimizer, @@ -508,6 +531,8 @@ class TrainLoraNode: seed, training_dtype, lora_dtype, + algorithm, + gradient_checkpointing, existing_lora, ): mp = model.clone() @@ -558,10 +583,8 @@ class TrainLoraNode: if existing_adapter is not None: break else: - # If no existing adapter found, use LoRA - # We will add algo option in the future existing_adapter = None - adapter_cls = adapters[0] + adapter_cls = adapter_maps[algorithm] if existing_adapter is not None: train_adapter = existing_adapter.to_train().to(lora_dtype) @@ -615,8 +638,9 @@ class TrainLoraNode: criterion = torch.nn.SmoothL1Loss() # setup models - for m in find_all_highest_child_module_with_forward(mp.model.diffusion_model): - patch(m) + if gradient_checkpointing: + for m in find_all_highest_child_module_with_forward(mp.model.diffusion_model): + patch(m) mp.model.requires_grad_(False) comfy.model_management.load_models_gpu([mp], memory_required=1e20, force_full_load=True) @@ -629,7 +653,8 @@ class TrainLoraNode: optimizer, loss_callback=loss_callback, batch_size=batch_size, - total_steps=steps, + grad_acc=grad_accumulation_steps, + total_steps=steps*grad_accumulation_steps, seed=seed, training_dtype=dtype ) From 0ccc88b03fbe190135e24ac04612565f8f0756b4 Mon Sep 17 00:00:00 2001 From: honglyua Date: Fri, 25 Jul 2025 01:57:36 +0800 Subject: [PATCH 0359/1073] Support Iluvatar CoreX (#8585) * Support Iluvatar CoreX Co-authored-by: mingjiang.li --- README.md | 7 +++++++ comfy/model_management.py | 23 ++++++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index d004364ee..a148623cd 100644 --- a/README.md +++ b/README.md @@ -294,6 +294,13 @@ For models compatible with Cambricon Extension for PyTorch (torch_mlu). Here's a 2. Next, install the PyTorch(torch_mlu) following the instructions on the [Installation](https://www.cambricon.com/docs/sdk_1.15.0/cambricon_pytorch_1.17.0/user_guide_1.9/index.html) 3. Launch ComfyUI by running `python main.py` +#### Iluvatar Corex + +For models compatible with Iluvatar Extension for PyTorch. Here's a step-by-step guide tailored to your platform and installation method: + +1. Install the Iluvatar Corex Toolkit by adhering to the platform-specific instructions on the [Installation](https://support.iluvatar.com/#/DocumentCentre?id=1&nameCenter=2&productId=520117912052801536) +2. Launch ComfyUI by running `python main.py` + # Running ```python main.py``` diff --git a/comfy/model_management.py b/comfy/model_management.py index e8b9b5c81..9add54ceb 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -128,6 +128,11 @@ try: except: mlu_available = False +try: + ixuca_available = hasattr(torch, "corex") +except: + ixuca_available = False + if args.cpu: cpu_state = CPUState.CPU @@ -151,6 +156,12 @@ def is_mlu(): return True return False +def is_ixuca(): + global ixuca_available + if ixuca_available: + return True + return False + def get_torch_device(): global directml_enabled global cpu_state @@ -289,7 +300,7 @@ try: if torch_version_numeric[0] >= 2: if ENABLE_PYTORCH_ATTENTION == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False: ENABLE_PYTORCH_ATTENTION = True - if is_intel_xpu() or is_ascend_npu() or is_mlu(): + if is_intel_xpu() or is_ascend_npu() or is_mlu() or is_ixuca(): if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: ENABLE_PYTORCH_ATTENTION = True except: @@ -1045,6 +1056,8 @@ def xformers_enabled(): return False if is_mlu(): return False + if is_ixuca(): + return False if directml_enabled: return False return XFORMERS_IS_AVAILABLE @@ -1080,6 +1093,8 @@ def pytorch_attention_flash_attention(): return True if is_amd(): return True #if you have pytorch attention enabled on AMD it probably supports at least mem efficient attention + if is_ixuca(): + return True return False def force_upcast_attention_dtype(): @@ -1205,6 +1220,9 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True, ma if is_mlu(): return True + if is_ixuca(): + return True + if torch.version.hip: return True @@ -1268,6 +1286,9 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma if is_ascend_npu(): return True + if is_ixuca(): + return True + if is_amd(): arch = torch.cuda.get_device_properties(device).gcnArchName if any((a in arch) for a in ["gfx1030", "gfx1031", "gfx1010", "gfx1011", "gfx1012", "gfx906", "gfx900", "gfx803"]): # RDNA2 and older don't support bf16 From d03ae077b4330f58e7caba53ff94e7fd58d0dc7d Mon Sep 17 00:00:00 2001 From: SHIVANSH GUPTA <121501003+shivansh-gupta4@users.noreply.github.com> Date: Thu, 24 Jul 2025 23:35:54 +0530 Subject: [PATCH 0360/1073] Added parameter required_frontend_version in the /system_stats API response (#8875) * Added the parameter required_frontend_version in the /system_stats api response * Update server.py * Created a function get_required_frontend_version and wrote tests for it * Refactored the function to return currently installed frontend pacakage version * Moved required_frontend to a new function and imported that in server.py * Corrected test cases using mocking techniques * Corrected files to comply with ruff formatting --- app/frontend_management.py | 47 +++++++++++++++++--- server.py | 2 + tests-unit/app_test/frontend_manager_test.py | 35 ++++++++++++++- 3 files changed, 77 insertions(+), 7 deletions(-) diff --git a/app/frontend_management.py b/app/frontend_management.py index 001ebbecb..0bee73685 100644 --- a/app/frontend_management.py +++ b/app/frontend_management.py @@ -29,18 +29,48 @@ def frontend_install_warning_message(): This error is happening because the ComfyUI frontend is no longer shipped as part of the main repo but as a pip package instead. """.strip() +def parse_version(version: str) -> tuple[int, int, int]: + return tuple(map(int, version.split("."))) + +def is_valid_version(version: str) -> bool: + """Validate if a string is a valid semantic version (X.Y.Z format).""" + pattern = r"^(\d+)\.(\d+)\.(\d+)$" + return bool(re.match(pattern, version)) + +def get_installed_frontend_version(): + """Get the currently installed frontend package version.""" + frontend_version_str = version("comfyui-frontend-package") + return frontend_version_str + +def get_required_frontend_version(): + """Get the required frontend version from requirements.txt.""" + try: + with open(requirements_path, "r", encoding="utf-8") as f: + for line in f: + line = line.strip() + if line.startswith("comfyui-frontend-package=="): + version_str = line.split("==")[-1] + if not is_valid_version(version_str): + logging.error(f"Invalid version format in requirements.txt: {version_str}") + return None + return version_str + logging.error("comfyui-frontend-package not found in requirements.txt") + return None + except FileNotFoundError: + logging.error("requirements.txt not found. Cannot determine required frontend version.") + return None + except Exception as e: + logging.error(f"Error reading requirements.txt: {e}") + return None def check_frontend_version(): """Check if the frontend version is up to date.""" - def parse_version(version: str) -> tuple[int, int, int]: - return tuple(map(int, version.split("."))) - try: - frontend_version_str = version("comfyui-frontend-package") + frontend_version_str = get_installed_frontend_version() frontend_version = parse_version(frontend_version_str) - with open(requirements_path, "r", encoding="utf-8") as f: - required_frontend = parse_version(f.readline().split("=")[-1]) + required_frontend_str = get_required_frontend_version() + required_frontend = parse_version(required_frontend_str) if frontend_version < required_frontend: app.logger.log_startup_warning( f""" @@ -168,6 +198,11 @@ def download_release_asset_zip(release: Release, destination_path: str) -> None: class FrontendManager: CUSTOM_FRONTENDS_ROOT = str(Path(__file__).parents[1] / "web_custom_versions") + @classmethod + def get_required_frontend_version(cls) -> str: + """Get the required frontend package version.""" + return get_required_frontend_version() + @classmethod def default_frontend_path(cls) -> str: try: diff --git a/server.py b/server.py index 71a58f0fa..f4de0079b 100644 --- a/server.py +++ b/server.py @@ -553,6 +553,7 @@ class PromptServer(): ram_free = comfy.model_management.get_free_memory(cpu_device) vram_total, torch_vram_total = comfy.model_management.get_total_memory(device, torch_total_too=True) vram_free, torch_vram_free = comfy.model_management.get_free_memory(device, torch_free_too=True) + required_frontend_version = FrontendManager.get_required_frontend_version() system_stats = { "system": { @@ -560,6 +561,7 @@ class PromptServer(): "ram_total": ram_total, "ram_free": ram_free, "comfyui_version": __version__, + "required_frontend_version": required_frontend_version, "python_version": sys.version, "pytorch_version": comfy.model_management.torch_version, "embedded_python": os.path.split(os.path.split(sys.executable)[0])[1] == "python_embeded", diff --git a/tests-unit/app_test/frontend_manager_test.py b/tests-unit/app_test/frontend_manager_test.py index ce67df6c6..ce43ac564 100644 --- a/tests-unit/app_test/frontend_manager_test.py +++ b/tests-unit/app_test/frontend_manager_test.py @@ -1,7 +1,7 @@ import argparse import pytest from requests.exceptions import HTTPError -from unittest.mock import patch +from unittest.mock import patch, mock_open from app.frontend_management import ( FrontendManager, @@ -172,3 +172,36 @@ def test_init_frontend_fallback_on_error(): # Assert assert frontend_path == "/default/path" mock_check.assert_called_once() + + +def test_get_frontend_version(): + # Arrange + expected_version = "1.25.0" + mock_requirements_content = """torch +torchsde +comfyui-frontend-package==1.25.0 +other-package==1.0.0 +numpy""" + + # Act + with patch("builtins.open", mock_open(read_data=mock_requirements_content)): + version = FrontendManager.get_required_frontend_version() + + # Assert + assert version == expected_version + + +def test_get_frontend_version_invalid_semver(): + # Arrange + mock_requirements_content = """torch +torchsde +comfyui-frontend-package==1.29.3.75 +other-package==1.0.0 +numpy""" + + # Act + with patch("builtins.open", mock_open(read_data=mock_requirements_content)): + version = FrontendManager.get_required_frontend_version() + + # Assert + assert version is None From 69cb57b3426b08a82e7fb713b0b48c23725f3da7 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 24 Jul 2025 12:06:25 -0700 Subject: [PATCH 0361/1073] Print xpu device name. (#9035) --- comfy/model_management.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 9add54ceb..232d363aa 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -392,6 +392,8 @@ def get_torch_device_name(device): except: allocator_backend = "" return "{} {} : {}".format(device, torch.cuda.get_device_name(device), allocator_backend) + elif device.type == "xpu": + return "{} {}".format(device, torch.xpu.get_device_name(device)) else: return "{}".format(device.type) elif is_intel_xpu(): From 4293e4da214f77a3fde97c15f0691307e61bc18d Mon Sep 17 00:00:00 2001 From: Eugene Fairley Date: Thu, 24 Jul 2025 17:59:19 -0700 Subject: [PATCH 0362/1073] Add WAN ATI support (#8874) * Add WAN ATI support * Fixes * Fix length * Remove extra functions * Fix * Fix * Ruff fix * Remove torch.no_grad * Add batch trajectory logic * Scale inputs before and after motion patch * Batch image/trajectory * Ruff fix * Clean up --- comfy/utils.py | 20 +++ comfy_extras/nodes_wan.py | 305 +++++++++++++++++++++++++++++++++++++- 2 files changed, 324 insertions(+), 1 deletion(-) diff --git a/comfy/utils.py b/comfy/utils.py index 9c076a0e0..fab28cf08 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -698,6 +698,26 @@ def resize_to_batch_size(tensor, batch_size): return output +def resize_list_to_batch_size(l, batch_size): + in_batch_size = len(l) + if in_batch_size == batch_size or in_batch_size == 0: + return l + + if batch_size <= 1: + return l[:batch_size] + + output = [] + if batch_size < in_batch_size: + scale = (in_batch_size - 1) / (batch_size - 1) + for i in range(batch_size): + output.append(l[min(round(i * scale), in_batch_size - 1)]) + else: + scale = in_batch_size / batch_size + for i in range(batch_size): + output.append(l[min(math.floor((i + 0.5) * scale), in_batch_size - 1)]) + + return output + def convert_sd_to(state_dict, dtype): keys = list(state_dict.keys()) for k in keys: diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index d6097a104..d71908f31 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -1,3 +1,4 @@ +import math import nodes import node_helpers import torch @@ -5,7 +6,9 @@ import comfy.model_management import comfy.utils import comfy.latent_formats import comfy.clip_vision - +import json +import numpy as np +from typing import Tuple class WanImageToVideo: @classmethod @@ -383,7 +386,307 @@ class WanPhantomSubjectToVideo: out_latent["samples"] = latent return (positive, cond2, negative, out_latent) +def parse_json_tracks(tracks): + """Parse JSON track data into a standardized format""" + tracks_data = [] + try: + # If tracks is a string, try to parse it as JSON + if isinstance(tracks, str): + parsed = json.loads(tracks.replace("'", '"')) + tracks_data.extend(parsed) + else: + # If tracks is a list of strings, parse each one + for track_str in tracks: + parsed = json.loads(track_str.replace("'", '"')) + tracks_data.append(parsed) + + # Check if we have a single track (dict with x,y) or a list of tracks + if tracks_data and isinstance(tracks_data[0], dict) and 'x' in tracks_data[0]: + # Single track detected, wrap it in a list + tracks_data = [tracks_data] + elif tracks_data and isinstance(tracks_data[0], list) and tracks_data[0] and isinstance(tracks_data[0][0], dict) and 'x' in tracks_data[0][0]: + # Already a list of tracks, nothing to do + pass + else: + # Unexpected format + pass + + except json.JSONDecodeError: + tracks_data = [] + return tracks_data + +def process_tracks(tracks_np: np.ndarray, frame_size: Tuple[int, int], num_frames, quant_multi: int = 8, **kwargs): + # tracks: shape [t, h, w, 3] => samples align with 24 fps, model trained with 16 fps. + # frame_size: tuple (W, H) + tracks = torch.from_numpy(tracks_np).float() + + if tracks.shape[1] == 121: + tracks = torch.permute(tracks, (1, 0, 2, 3)) + + tracks, visibles = tracks[..., :2], tracks[..., 2:3] + + short_edge = min(*frame_size) + + frame_center = torch.tensor([*frame_size]).type_as(tracks) / 2 + tracks = tracks - frame_center + + tracks = tracks / short_edge * 2 + + visibles = visibles * 2 - 1 + + trange = torch.linspace(-1, 1, tracks.shape[0]).view(-1, 1, 1, 1).expand(*visibles.shape) + + out_ = torch.cat([trange, tracks, visibles], dim=-1).view(121, -1, 4) + + out_0 = out_[:1] + + out_l = out_[1:] # 121 => 120 | 1 + a = 120 // math.gcd(120, num_frames) + b = num_frames // math.gcd(120, num_frames) + out_l = torch.repeat_interleave(out_l, b, dim=0)[1::a] # 120 => 120 * b => 120 * b / a == F + + final_result = torch.cat([out_0, out_l], dim=0) + + return final_result + +FIXED_LENGTH = 121 +def pad_pts(tr): + """Convert list of {x,y} to (FIXED_LENGTH,1,3) array, padding/truncating.""" + pts = np.array([[p['x'], p['y'], 1] for p in tr], dtype=np.float32) + n = pts.shape[0] + if n < FIXED_LENGTH: + pad = np.zeros((FIXED_LENGTH - n, 3), dtype=np.float32) + pts = np.vstack((pts, pad)) + else: + pts = pts[:FIXED_LENGTH] + return pts.reshape(FIXED_LENGTH, 1, 3) + +def ind_sel(target: torch.Tensor, ind: torch.Tensor, dim: int = 1): + """Index selection utility function""" + assert ( + len(ind.shape) > dim + ), "Index must have the target dim, but get dim: %d, ind shape: %s" % (dim, str(ind.shape)) + + target = target.expand( + *tuple( + [ind.shape[k] if target.shape[k] == 1 else -1 for k in range(dim)] + + [ + -1, + ] + * (len(target.shape) - dim) + ) + ) + + ind_pad = ind + + if len(target.shape) > dim + 1: + for _ in range(len(target.shape) - (dim + 1)): + ind_pad = ind_pad.unsqueeze(-1) + ind_pad = ind_pad.expand(*(-1,) * (dim + 1), *target.shape[(dim + 1) : :]) + + return torch.gather(target, dim=dim, index=ind_pad) + +def merge_final(vert_attr: torch.Tensor, weight: torch.Tensor, vert_assign: torch.Tensor): + """Merge vertex attributes with weights""" + target_dim = len(vert_assign.shape) - 1 + if len(vert_attr.shape) == 2: + assert vert_attr.shape[0] > vert_assign.max() + new_shape = [1] * target_dim + list(vert_attr.shape) + tensor = vert_attr.reshape(new_shape) + sel_attr = ind_sel(tensor, vert_assign.type(torch.long), dim=target_dim) + else: + assert vert_attr.shape[1] > vert_assign.max() + new_shape = [vert_attr.shape[0]] + [1] * (target_dim - 1) + list(vert_attr.shape[1:]) + tensor = vert_attr.reshape(new_shape) + sel_attr = ind_sel(tensor, vert_assign.type(torch.long), dim=target_dim) + + final_attr = torch.sum(sel_attr * weight.unsqueeze(-1), dim=-2) + return final_attr + + +def _patch_motion_single( + tracks: torch.FloatTensor, # (B, T, N, 4) + vid: torch.FloatTensor, # (C, T, H, W) + temperature: float, + vae_divide: tuple, + topk: int, +): + """Apply motion patching based on tracks""" + _, T, H, W = vid.shape + N = tracks.shape[2] + _, tracks_xy, visible = torch.split( + tracks, [1, 2, 1], dim=-1 + ) # (B, T, N, 2) | (B, T, N, 1) + tracks_n = tracks_xy / torch.tensor([W / min(H, W), H / min(H, W)], device=tracks_xy.device) + tracks_n = tracks_n.clamp(-1, 1) + visible = visible.clamp(0, 1) + + xx = torch.linspace(-W / min(H, W), W / min(H, W), W) + yy = torch.linspace(-H / min(H, W), H / min(H, W), H) + + grid = torch.stack(torch.meshgrid(yy, xx, indexing="ij")[::-1], dim=-1).to( + tracks_xy.device + ) + + tracks_pad = tracks_xy[:, 1:] + visible_pad = visible[:, 1:] + + visible_align = visible_pad.view(T - 1, 4, *visible_pad.shape[2:]).sum(1) + tracks_align = (tracks_pad * visible_pad).view(T - 1, 4, *tracks_pad.shape[2:]).sum( + 1 + ) / (visible_align + 1e-5) + dist_ = ( + (tracks_align[:, None, None] - grid[None, :, :, None]).pow(2).sum(-1) + ) # T, H, W, N + weight = torch.exp(-dist_ * temperature) * visible_align.clamp(0, 1).view( + T - 1, 1, 1, N + ) + vert_weight, vert_index = torch.topk( + weight, k=min(topk, weight.shape[-1]), dim=-1 + ) + + grid_mode = "bilinear" + point_feature = torch.nn.functional.grid_sample( + vid.permute(1, 0, 2, 3)[:1], + tracks_n[:, :1].type(vid.dtype), + mode=grid_mode, + padding_mode="zeros", + align_corners=False, + ) + point_feature = point_feature.squeeze(0).squeeze(1).permute(1, 0) # N, C=16 + + out_feature = merge_final(point_feature, vert_weight, vert_index).permute(3, 0, 1, 2) # T - 1, H, W, C => C, T - 1, H, W + out_weight = vert_weight.sum(-1) # T - 1, H, W + + # out feature -> already soft weighted + mix_feature = out_feature + vid[:, 1:] * (1 - out_weight.clamp(0, 1)) + + out_feature_full = torch.cat([vid[:, :1], mix_feature], dim=1) # C, T, H, W + out_mask_full = torch.cat([torch.ones_like(out_weight[:1]), out_weight], dim=0) # T, H, W + + return out_mask_full[None].expand(vae_divide[0], -1, -1, -1), out_feature_full + + +def patch_motion( + tracks: torch.FloatTensor, # (B, TB, T, N, 4) + vid: torch.FloatTensor, # (C, T, H, W) + temperature: float = 220.0, + vae_divide: tuple = (4, 16), + topk: int = 2, +): + B = len(tracks) + + # Process each batch separately + out_masks = [] + out_features = [] + + for b in range(B): + mask, feature = _patch_motion_single( + tracks[b], # (T, N, 4) + vid[b], # (C, T, H, W) + temperature, + vae_divide, + topk + ) + out_masks.append(mask) + out_features.append(feature) + + # Stack results: (B, C, T, H, W) + out_mask_full = torch.stack(out_masks, dim=0) + out_feature_full = torch.stack(out_features, dim=0) + + return out_mask_full, out_feature_full + +class WanTrackToVideo: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "vae": ("VAE", ), + "tracks": ("STRING", {"multiline": True, "default": "[]"}), + "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "temperature": ("FLOAT", {"default": 220.0, "min": 1.0, "max": 1000.0, "step": 0.1}), + "topk": ("INT", {"default": 2, "min": 1, "max": 10}), + "start_image": ("IMAGE", ), + }, + "optional": { + "clip_vision_output": ("CLIP_VISION_OUTPUT", ), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + FUNCTION = "encode" + + CATEGORY = "conditioning/video_models" + + def encode(self, positive, negative, vae, tracks, width, height, length, batch_size, + temperature, topk, start_image=None, clip_vision_output=None): + + tracks_data = parse_json_tracks(tracks) + + if not tracks_data: + return WanImageToVideo().encode(positive, negative, vae, width, height, length, batch_size, start_image=start_image, clip_vision_output=clip_vision_output) + + latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], + device=comfy.model_management.intermediate_device()) + + if isinstance(tracks_data[0][0], dict): + tracks_data = [tracks_data] + + processed_tracks = [] + for batch in tracks_data: + arrs = [] + for track in batch: + pts = pad_pts(track) + arrs.append(pts) + + tracks_np = np.stack(arrs, axis=0) + processed_tracks.append(process_tracks(tracks_np, (width, height), length - 1).unsqueeze(0)) + + if start_image is not None: + start_image = comfy.utils.common_upscale(start_image[:batch_size].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + videos = torch.ones((start_image.shape[0], length, height, width, start_image.shape[-1]), device=start_image.device, dtype=start_image.dtype) * 0.5 + for i in range(start_image.shape[0]): + videos[i, 0] = start_image[i] + + latent_videos = [] + videos = comfy.utils.resize_to_batch_size(videos, batch_size) + for i in range(batch_size): + latent_videos += [vae.encode(videos[i, :, :, :, :3])] + y = torch.cat(latent_videos, dim=0) + + # Scale latent since patch_motion is non-linear + y = comfy.latent_formats.Wan21().process_in(y) + + processed_tracks = comfy.utils.resize_list_to_batch_size(processed_tracks, batch_size) + res = patch_motion( + processed_tracks, y, temperature=temperature, topk=topk, vae_divide=(4, 16) + ) + + mask, concat_latent_image = res + concat_latent_image = comfy.latent_formats.Wan21().process_out(concat_latent_image) + mask = -mask + 1.0 # Invert mask to match expected format + positive = node_helpers.conditioning_set_values(positive, + {"concat_mask": mask, + "concat_latent_image": concat_latent_image}) + negative = node_helpers.conditioning_set_values(negative, + {"concat_mask": mask, + "concat_latent_image": concat_latent_image}) + + if clip_vision_output is not None: + positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output}) + negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output}) + + out_latent = {} + out_latent["samples"] = latent + return (positive, negative, out_latent) + NODE_CLASS_MAPPINGS = { + "WanTrackToVideo": WanTrackToVideo, "WanImageToVideo": WanImageToVideo, "WanFunControlToVideo": WanFunControlToVideo, "WanFunInpaintToVideo": WanFunInpaintToVideo, From e6e5d33b351fc5ed8334d74dac77b283ecea8708 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 25 Jul 2025 01:58:28 -0700 Subject: [PATCH 0363/1073] Remove useless code. (#9041) This is only needed on old pytorch 2.0 and older. --- comfy/ldm/wan/vae.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/comfy/ldm/wan/vae.py b/comfy/ldm/wan/vae.py index a8ebc5ec6..a83c6edfd 100644 --- a/comfy/ldm/wan/vae.py +++ b/comfy/ldm/wan/vae.py @@ -52,15 +52,6 @@ class RMS_norm(nn.Module): x, dim=(1 if self.channel_first else -1)) * self.scale * self.gamma.to(x) + (self.bias.to(x) if self.bias is not None else 0) -class Upsample(nn.Upsample): - - def forward(self, x): - """ - Fix bfloat16 support for nearest neighbor interpolation. - """ - return super().forward(x.float()).type_as(x) - - class Resample(nn.Module): def __init__(self, dim, mode): @@ -73,11 +64,11 @@ class Resample(nn.Module): # layers if mode == 'upsample2d': self.resample = nn.Sequential( - Upsample(scale_factor=(2., 2.), mode='nearest-exact'), + nn.Upsample(scale_factor=(2., 2.), mode='nearest-exact'), ops.Conv2d(dim, dim // 2, 3, padding=1)) elif mode == 'upsample3d': self.resample = nn.Sequential( - Upsample(scale_factor=(2., 2.), mode='nearest-exact'), + nn.Upsample(scale_factor=(2., 2.), mode='nearest-exact'), ops.Conv2d(dim, dim // 2, 3, padding=1)) self.time_conv = CausalConv3d( dim, dim * 2, (3, 1, 1), padding=(1, 0, 0)) From 93bc2f8e4d5dace2328b861579df24f91684e27e Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sat, 26 Jul 2025 01:24:23 +0800 Subject: [PATCH 0364/1073] Update template to 0.1.40 (#9048) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8f6a6d112..33a59b4be 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.39 +comfyui-workflow-templates==0.1.40 comfyui-embedded-docs==0.2.4 torch torchsde From c0207b473fa9ad413fad6d5658449356e39758cc Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 25 Jul 2025 14:25:08 -0700 Subject: [PATCH 0365/1073] Fix issue with line endings github workflow. (#9053) --- .github/workflows/check-line-endings.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/check-line-endings.yml b/.github/workflows/check-line-endings.yml index 03b3e3ced..eeb594d6c 100644 --- a/.github/workflows/check-line-endings.yml +++ b/.github/workflows/check-line-endings.yml @@ -17,8 +17,7 @@ jobs: - name: Check for Windows line endings (CRLF) run: | # Get the list of changed files in the PR - git merge origin/${{ github.base_ref }} --no-edit - CHANGED_FILES=$(git diff --name-only origin/${{ github.base_ref }}..HEAD) + CHANGED_FILES=$(git diff --name-only ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}) # Flag to track if CRLF is found CRLF_FOUND=false From c60467a148c2b6f7b9fe47725362361f2de9ae50 Mon Sep 17 00:00:00 2001 From: Thor-ATX Date: Sat, 26 Jul 2025 09:27:03 +1200 Subject: [PATCH 0366/1073] Update negative prompt for Moonvalley nodes (#9038) Co-authored-by: thorsten --- comfy_api_nodes/nodes_moonvalley.py | 359 +++++++++++++++++----------- 1 file changed, 224 insertions(+), 135 deletions(-) diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 6e937411c..057021efa 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -2,7 +2,11 @@ import logging from typing import Any, Callable, Optional, TypeVar import random import torch -from comfy_api_nodes.util.validation_utils import get_image_dimensions, validate_image_dimensions, validate_video_dimensions +from comfy_api_nodes.util.validation_utils import ( + get_image_dimensions, + validate_image_dimensions, + validate_video_dimensions, +) from comfy_api_nodes.apis import ( @@ -10,7 +14,7 @@ from comfy_api_nodes.apis import ( MoonvalleyTextToVideoInferenceParams, MoonvalleyVideoToVideoInferenceParams, MoonvalleyVideoToVideoRequest, - MoonvalleyPromptResponse + MoonvalleyPromptResponse, ) from comfy_api_nodes.apis.client import ( ApiEndpoint, @@ -54,20 +58,26 @@ MAX_VIDEO_SIZE = 1024 * 1024 * 1024 # 1 GB max for in-memory video processing MOONVALLEY_MAREY_MAX_PROMPT_LENGTH = 5000 R = TypeVar("R") + + class MoonvalleyApiError(Exception): """Base exception for Moonvalley API errors.""" + pass + def is_valid_task_creation_response(response: MoonvalleyPromptResponse) -> bool: """Verifies that the initial response contains a task ID.""" return bool(response.id) + def validate_task_creation_response(response) -> None: if not is_valid_task_creation_response(response): error_msg = f"Moonvalley Marey API: Initial request failed. Code: {response.code}, Message: {response.message}, Data: {response}" logging.error(error_msg) raise MoonvalleyApiError(error_msg) + def get_video_from_response(response): video = response.output_url logging.info( @@ -102,16 +112,17 @@ def poll_until_finished( poll_interval=16.0, failed_statuses=["error"], status_extractor=lambda response: ( - response.status - if response and response.status - else None + response.status if response and response.status else None ), auth_kwargs=auth_kwargs, result_url_extractor=result_url_extractor, node_id=node_id, ).execute() -def validate_prompts(prompt:str, negative_prompt: str, max_length=MOONVALLEY_MAREY_MAX_PROMPT_LENGTH): + +def validate_prompts( + prompt: str, negative_prompt: str, max_length=MOONVALLEY_MAREY_MAX_PROMPT_LENGTH +): """Verifies that the prompt isn't empty and that neither prompt is too long.""" if not prompt: raise ValueError("Positive prompt is empty") @@ -123,16 +134,15 @@ def validate_prompts(prompt:str, negative_prompt: str, max_length=MOONVALLEY_MAR ) return True + def validate_input_media(width, height, with_frame_conditioning, num_frames_in=None): - # inference validation - # T = num_frames - # in all cases, the following must be true: T divisible by 16 and H,W by 8. in addition... - # with image conditioning: H*W must be divisible by 8192 - # without image conditioning: T divisible by 32 - if num_frames_in and not num_frames_in % 16 == 0 : - return False, ( - "The input video total frame count must be divisible by 16!" - ) + # inference validation + # T = num_frames + # in all cases, the following must be true: T divisible by 16 and H,W by 8. in addition... + # with image conditioning: H*W must be divisible by 8192 + # without image conditioning: T divisible by 32 + if num_frames_in and not num_frames_in % 16 == 0: + return False, ("The input video total frame count must be divisible by 16!") if height % 8 != 0 or width % 8 != 0: return False, ( @@ -146,13 +156,13 @@ def validate_input_media(width, height, with_frame_conditioning, num_frames_in=N "divisible by 8192 for frame conditioning" ) else: - if num_frames_in and not num_frames_in % 32 == 0 : - return False, ( - "The input video total frame count must be divisible by 32!" - ) + if num_frames_in and not num_frames_in % 32 == 0: + return False, ("The input video total frame count must be divisible by 32!") -def validate_input_image(image: torch.Tensor, with_frame_conditioning: bool=False) -> None: +def validate_input_image( + image: torch.Tensor, with_frame_conditioning: bool = False +) -> None: """ Validates the input image adheres to the expectations of the API: - The image resolution should not be less than 300*300px @@ -160,10 +170,15 @@ def validate_input_image(image: torch.Tensor, with_frame_conditioning: bool=Fals """ height, width = get_image_dimensions(image) - validate_input_media(width, height, with_frame_conditioning ) - validate_image_dimensions(image, min_width=300, min_height=300, max_height=MAX_HEIGHT, max_width=MAX_WIDTH) + validate_input_media(width, height, with_frame_conditioning) + validate_image_dimensions( + image, min_width=300, min_height=300, max_height=MAX_HEIGHT, max_width=MAX_WIDTH + ) -def validate_input_video(video: VideoInput, num_frames_out: int, with_frame_conditioning: bool=False): + +def validate_input_video( + video: VideoInput, num_frames_out: int, with_frame_conditioning: bool = False +): try: width, height = video.get_dimensions() except Exception as e: @@ -171,7 +186,13 @@ def validate_input_video(video: VideoInput, num_frames_out: int, with_frame_cond raise ValueError(f"Cannot get video dimensions: {e}") from e validate_input_media(width, height, with_frame_conditioning) - validate_video_dimensions(video, min_width=MIN_VID_WIDTH, min_height=MIN_VID_HEIGHT, max_width=MAX_VID_WIDTH, max_height=MAX_VID_HEIGHT) + validate_video_dimensions( + video, + min_width=MIN_VID_WIDTH, + min_height=MIN_VID_HEIGHT, + max_width=MAX_VID_WIDTH, + max_height=MAX_VID_HEIGHT, + ) trimmed_video = validate_input_video_length(video, num_frames_out) return trimmed_video @@ -180,22 +201,29 @@ def validate_input_video(video: VideoInput, num_frames_out: int, with_frame_cond def validate_input_video_length(video: VideoInput, num_frames: int): if video.get_duration() > 60: - raise MoonvalleyApiError("Input Video lenth should be less than 1min. Please trim.") + raise MoonvalleyApiError( + "Input Video lenth should be less than 1min. Please trim." + ) if num_frames == 128: - if video.get_duration() < 5: - raise MoonvalleyApiError("Input Video length is less than 5s. Please use a video longer than or equal to 5s.") - if video.get_duration() > 5: - # trim video to 5s - video = trim_video(video, 5) + if video.get_duration() < 5: + raise MoonvalleyApiError( + "Input Video length is less than 5s. Please use a video longer than or equal to 5s." + ) + if video.get_duration() > 5: + # trim video to 5s + video = trim_video(video, 5) if num_frames == 256: if video.get_duration() < 10: - raise MoonvalleyApiError("Input Video length is less than 10s. Please use a video longer than or equal to 10s.") + raise MoonvalleyApiError( + "Input Video length is less than 10s. Please use a video longer than or equal to 10s." + ) if video.get_duration() > 10: # trim video to 10s video = trim_video(video, 10) return video + def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: """ Returns a new VideoInput object trimmed from the beginning to the specified duration, @@ -219,8 +247,8 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: input_source = video.get_stream_source() # Open containers - input_container = av.open(input_source, mode='r') - output_container = av.open(output_buffer, mode='w', format='mp4') + input_container = av.open(input_source, mode="r") + output_container = av.open(output_buffer, mode="w", format="mp4") # Set up output streams for re-encoding video_stream = None @@ -230,22 +258,32 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: logging.info(f"Found stream: type={stream.type}, class={type(stream)}") if isinstance(stream, av.VideoStream): # Create output video stream with same parameters - video_stream = output_container.add_stream('h264', rate=stream.average_rate) + video_stream = output_container.add_stream( + "h264", rate=stream.average_rate + ) video_stream.width = stream.width video_stream.height = stream.height - video_stream.pix_fmt = 'yuv420p' - logging.info(f"Added video stream: {stream.width}x{stream.height} @ {stream.average_rate}fps") + video_stream.pix_fmt = "yuv420p" + logging.info( + f"Added video stream: {stream.width}x{stream.height} @ {stream.average_rate}fps" + ) elif isinstance(stream, av.AudioStream): # Create output audio stream with same parameters - audio_stream = output_container.add_stream('aac', rate=stream.sample_rate) + audio_stream = output_container.add_stream( + "aac", rate=stream.sample_rate + ) audio_stream.sample_rate = stream.sample_rate audio_stream.layout = stream.layout - logging.info(f"Added audio stream: {stream.sample_rate}Hz, {stream.channels} channels") + logging.info( + f"Added audio stream: {stream.sample_rate}Hz, {stream.channels} channels" + ) # Calculate target frame count that's divisible by 32 fps = input_container.streams.video[0].average_rate estimated_frames = int(duration_sec * fps) - target_frames = (estimated_frames // 32) * 32 # Round down to nearest multiple of 32 + target_frames = ( + estimated_frames // 32 + ) * 32 # Round down to nearest multiple of 32 if target_frames == 0: raise ValueError("Video too short: need at least 32 frames for Moonvalley") @@ -268,7 +306,9 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: for packet in video_stream.encode(): output_container.mux(packet) - logging.info(f"Encoded {frame_count} video frames (target: {target_frames})") + logging.info( + f"Encoded {frame_count} video frames (target: {target_frames})" + ) # Decode and re-encode audio frames if audio_stream: @@ -292,7 +332,6 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: output_container.close() input_container.close() - # Return as VideoFromFile using the buffer output_buffer.seek(0) return VideoFromFile(output_buffer) @@ -305,6 +344,7 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: output_container.close() raise RuntimeError(f"Failed to trim video: {str(e)}") from e + # --- BaseMoonvalleyVideoNode --- class BaseMoonvalleyVideoNode: def parseWidthHeightFromRes(self, resolution: str): @@ -328,7 +368,7 @@ class BaseMoonvalleyVideoNode: "Motion Transfer": "motion_control", "Canny": "canny_control", "Pose Transfer": "pose_control", - "Depth": "depth_control" + "Depth": "depth_control", } if value in control_map: return control_map[value] @@ -355,31 +395,63 @@ class BaseMoonvalleyVideoNode: return { "required": { "prompt": model_field_to_node_input( - IO.STRING, MoonvalleyTextToVideoRequest, "prompt_text", - multiline=True + IO.STRING, + MoonvalleyTextToVideoRequest, + "prompt_text", + multiline=True, ), "negative_prompt": model_field_to_node_input( IO.STRING, MoonvalleyTextToVideoInferenceParams, "negative_prompt", multiline=True, - default="gopro, bright, contrast, static, overexposed, bright, vignette, artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, flare, saturation, distorted, warped, wide angle, contrast, saturated, vibrant, glowing, cross dissolve, texture, videogame, saturation, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, blown out, horrible, blurry, worst quality, bad, transition, dissolve, cross-dissolve, melt, fade in, fade out, wobbly, weird, low quality, plastic, stock footage, video camera, boring, static", + default="low-poly, flat shader, bad rigging, stiff animation, uncanny eyes, low-quality textures, looping glitch, cheap effect, overbloom, bloom spam, default lighting, game asset, stiff face, ugly specular, AI artifacts", ), - - "resolution": (IO.COMBO, { - "options": ["16:9 (1920 x 1080)", - "9:16 (1080 x 1920)", - "1:1 (1152 x 1152)", - "4:3 (1440 x 1080)", - "3:4 (1080 x 1440)", - "21:9 (2560 x 1080)"], + "resolution": ( + IO.COMBO, + { + "options": [ + "16:9 (1920 x 1080)", + "9:16 (1080 x 1920)", + "1:1 (1152 x 1152)", + "4:3 (1440 x 1080)", + "3:4 (1080 x 1440)", + "21:9 (2560 x 1080)", + ], "default": "16:9 (1920 x 1080)", "tooltip": "Resolution of the output video", - }), + }, + ), # "length": (IO.COMBO,{"options":['5s','10s'], "default": '5s'}), - "prompt_adherence": model_field_to_node_input(IO.FLOAT,MoonvalleyTextToVideoInferenceParams,"guidance_scale",default=7.0, step=1, min=1, max=20), - "seed": model_field_to_node_input(IO.INT,MoonvalleyTextToVideoInferenceParams, "seed", default=random.randint(0, 2**32 - 1), min=0, max=4294967295, step=1, display="number", tooltip="Random seed value", control_after_generate=True), - "steps": model_field_to_node_input(IO.INT, MoonvalleyTextToVideoInferenceParams, "steps", default=100, min=1, max=100), + "prompt_adherence": model_field_to_node_input( + IO.FLOAT, + MoonvalleyTextToVideoInferenceParams, + "guidance_scale", + default=7.0, + step=1, + min=1, + max=20, + ), + "seed": model_field_to_node_input( + IO.INT, + MoonvalleyTextToVideoInferenceParams, + "seed", + default=random.randint(0, 2**32 - 1), + min=0, + max=4294967295, + step=1, + display="number", + tooltip="Random seed value", + control_after_generate=True, + ), + "steps": model_field_to_node_input( + IO.INT, + MoonvalleyTextToVideoInferenceParams, + "steps", + default=100, + min=1, + max=100, + ), }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", @@ -393,7 +465,7 @@ class BaseMoonvalleyVideoNode: "image_url", tooltip="The reference image used to generate the video", ), - } + }, } RETURN_TYPES = ("STRING",) @@ -404,6 +476,7 @@ class BaseMoonvalleyVideoNode: def generate(self, **kwargs): return None + # --- MoonvalleyImg2VideoNode --- class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): @@ -415,43 +488,46 @@ class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): RETURN_NAMES = ("video",) DESCRIPTION = "Moonvalley Marey Image to Video Node" - def generate(self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs): + def generate( + self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs + ): image = kwargs.get("image", None) - if (image is None): + if image is None: raise MoonvalleyApiError("image is required") total_frames = get_total_frames_from_length() - validate_input_image(image,True) + validate_input_image(image, True) validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) width_height = self.parseWidthHeightFromRes(kwargs.get("resolution")) - inference_params=MoonvalleyTextToVideoInferenceParams( - negative_prompt=negative_prompt, - steps=kwargs.get("steps"), - seed=kwargs.get("seed"), - guidance_scale=kwargs.get("prompt_adherence"), - num_frames=total_frames, - width=width_height.get("width"), - height=width_height.get("height"), - use_negative_prompts=True - ) + inference_params = MoonvalleyTextToVideoInferenceParams( + negative_prompt=negative_prompt, + steps=kwargs.get("steps"), + seed=kwargs.get("seed"), + guidance_scale=kwargs.get("prompt_adherence"), + num_frames=total_frames, + width=width_height.get("width"), + height=width_height.get("height"), + use_negative_prompts=True, + ) """Upload image to comfy backend to have a URL available for further processing""" # Get MIME type from tensor - assuming PNG format for image tensors mime_type = "image/png" - image_url = upload_images_to_comfyapi(image, max_images=1, auth_kwargs=kwargs, mime_type=mime_type)[0] + image_url = upload_images_to_comfyapi( + image, max_images=1, auth_kwargs=kwargs, mime_type=mime_type + )[0] request = MoonvalleyTextToVideoRequest( - image_url=image_url, - prompt_text=prompt, - inference_params=inference_params - ) + image_url=image_url, prompt_text=prompt, inference_params=inference_params + ) initial_operation = SynchronousOperation( - endpoint=ApiEndpoint(path=API_IMG2VIDEO_ENDPOINT, - method=HttpMethod.POST, - request_model=MoonvalleyTextToVideoRequest, - response_model=MoonvalleyPromptResponse - ), + endpoint=ApiEndpoint( + path=API_IMG2VIDEO_ENDPOINT, + method=HttpMethod.POST, + request_model=MoonvalleyTextToVideoRequest, + response_model=MoonvalleyPromptResponse, + ), request=request, auth_kwargs=kwargs, ) @@ -463,7 +539,8 @@ class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): task_id, auth_kwargs=kwargs, node_id=unique_id ) video = download_url_to_video_output(final_response.output_url) - return (video, ) + return (video,) + # --- MoonvalleyVid2VidNode --- class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): @@ -479,38 +556,46 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): if param in input_types["optional"]: del input_types["optional"][param] input_types["optional"] = { - "video": (IO.VIDEO, {"default": "", "multiline": False, "tooltip": "The reference video used to generate the output video. Input a 5s video for 128 frames and a 10s video for 256 frames. Longer videos will be trimmed automatically."}), - "control_type": ( - ["Motion Transfer", "Pose Transfer"], - {"default": "Motion Transfer"}, - ), - "motion_intensity": ( - "INT", - { - "default": 100, - "step": 1, - "min": 0, - "max": 100, - "tooltip": "Only used if control_type is 'Motion Transfer'", - }, - ) - } + "video": ( + IO.VIDEO, + { + "default": "", + "multiline": False, + "tooltip": "The reference video used to generate the output video. Input a 5s video for 128 frames and a 10s video for 256 frames. Longer videos will be trimmed automatically.", + }, + ), + "control_type": ( + ["Motion Transfer", "Pose Transfer"], + {"default": "Motion Transfer"}, + ), + "motion_intensity": ( + "INT", + { + "default": 100, + "step": 1, + "min": 0, + "max": 100, + "tooltip": "Only used if control_type is 'Motion Transfer'", + }, + ), + } return input_types RETURN_TYPES = ("VIDEO",) RETURN_NAMES = ("video",) - def generate(self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs): + def generate( + self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs + ): video = kwargs.get("video") num_frames = get_total_frames_from_length() - if not video : + if not video: raise MoonvalleyApiError("video is required") - """Validate video input""" - video_url="" + video_url = "" if video: validated_video = validate_input_video(video, num_frames, False) video_url = upload_video_to_comfyapi(validated_video, auth_kwargs=kwargs) @@ -520,29 +605,30 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): """Validate prompts and inference input""" validate_prompts(prompt, negative_prompt) - inference_params=MoonvalleyVideoToVideoInferenceParams( + inference_params = MoonvalleyVideoToVideoInferenceParams( negative_prompt=negative_prompt, steps=kwargs.get("steps"), seed=kwargs.get("seed"), guidance_scale=kwargs.get("prompt_adherence"), - control_params={'motion_intensity': motion_intensity} + control_params={"motion_intensity": motion_intensity}, ) control = self.parseControlParameter(control_type) request = MoonvalleyVideoToVideoRequest( - control_type=control, - video_url=video_url, - prompt_text=prompt, - inference_params=inference_params - ) + control_type=control, + video_url=video_url, + prompt_text=prompt, + inference_params=inference_params, + ) initial_operation = SynchronousOperation( - endpoint=ApiEndpoint(path=API_VIDEO2VIDEO_ENDPOINT, - method=HttpMethod.POST, - request_model=MoonvalleyVideoToVideoRequest, - response_model=MoonvalleyPromptResponse - ), + endpoint=ApiEndpoint( + path=API_VIDEO2VIDEO_ENDPOINT, + method=HttpMethod.POST, + request_model=MoonvalleyVideoToVideoRequest, + response_model=MoonvalleyPromptResponse, + ), request=request, auth_kwargs=kwargs, ) @@ -556,7 +642,8 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): video = download_url_to_video_output(final_response.output_url) - return (video, ) + return (video,) + # --- MoonvalleyTxt2VideoNode --- class MoonvalleyTxt2VideoNode(BaseMoonvalleyVideoNode): @@ -575,31 +662,33 @@ class MoonvalleyTxt2VideoNode(BaseMoonvalleyVideoNode): del input_types["optional"][param] return input_types - def generate(self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs): + def generate( + self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs + ): validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) width_height = self.parseWidthHeightFromRes(kwargs.get("resolution")) num_frames = get_total_frames_from_length() - inference_params=MoonvalleyTextToVideoInferenceParams( - negative_prompt=negative_prompt, - steps=kwargs.get("steps"), - seed=kwargs.get("seed"), - guidance_scale=kwargs.get("prompt_adherence"), - num_frames=num_frames, - width=width_height.get("width"), - height=width_height.get("height"), - ) + inference_params = MoonvalleyTextToVideoInferenceParams( + negative_prompt=negative_prompt, + steps=kwargs.get("steps"), + seed=kwargs.get("seed"), + guidance_scale=kwargs.get("prompt_adherence"), + num_frames=num_frames, + width=width_height.get("width"), + height=width_height.get("height"), + ) request = MoonvalleyTextToVideoRequest( - prompt_text=prompt, - inference_params=inference_params - ) + prompt_text=prompt, inference_params=inference_params + ) initial_operation = SynchronousOperation( - endpoint=ApiEndpoint(path=API_TXT2VIDEO_ENDPOINT, - method=HttpMethod.POST, - request_model=MoonvalleyTextToVideoRequest, - response_model=MoonvalleyPromptResponse - ), + endpoint=ApiEndpoint( + path=API_TXT2VIDEO_ENDPOINT, + method=HttpMethod.POST, + request_model=MoonvalleyTextToVideoRequest, + response_model=MoonvalleyPromptResponse, + ), request=request, auth_kwargs=kwargs, ) @@ -612,8 +701,7 @@ class MoonvalleyTxt2VideoNode(BaseMoonvalleyVideoNode): ) video = download_url_to_video_output(final_response.output_url) - return (video, ) - + return (video,) NODE_CLASS_MAPPINGS = { @@ -629,6 +717,7 @@ NODE_DISPLAY_NAME_MAPPINGS = { # "MoonvalleyVideo2VideoNode": "Moonvalley Marey Video to Video", } + def get_total_frames_from_length(length="5s"): # if length == '5s': # return 128 From b850d9a8bb2c99989fe79d1fded26ab5c103c7b2 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 25 Jul 2025 18:25:45 -0700 Subject: [PATCH 0367/1073] Add map_function to get_history. (#9056) --- execution.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/execution.py b/execution.py index c3a62f1cb..8a9663a7d 100644 --- a/execution.py +++ b/execution.py @@ -1097,7 +1097,7 @@ class PromptQueue: return True return False - def get_history(self, prompt_id=None, max_items=None, offset=-1): + def get_history(self, prompt_id=None, max_items=None, offset=-1, map_function=None): with self.mutex: if prompt_id is None: out = {} @@ -1106,13 +1106,21 @@ class PromptQueue: offset = len(self.history) - max_items for k in self.history: if i >= offset: - out[k] = self.history[k] + p = self.history[k] + if map_function is not None: + p = map_function(p) + out[k] = p if max_items is not None and len(out) >= max_items: break i += 1 return out elif prompt_id in self.history: - return {prompt_id: copy.deepcopy(self.history[prompt_id])} + p = self.history[prompt_id] + if map_function is None: + p = copy.deepcopy(p) + else: + p = map_function(p) + return {prompt_id: p} else: return {} From 0621d73a9c56fdc9e79aad87ed260135639bca50 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 26 Jul 2025 01:44:19 -0700 Subject: [PATCH 0368/1073] Remove useless code. (#9059) --- comfy/ldm/wan/vae.py | 41 ----------------------------------------- 1 file changed, 41 deletions(-) diff --git a/comfy/ldm/wan/vae.py b/comfy/ldm/wan/vae.py index a83c6edfd..6b07840fc 100644 --- a/comfy/ldm/wan/vae.py +++ b/comfy/ldm/wan/vae.py @@ -148,29 +148,6 @@ class Resample(nn.Module): feat_idx[0] += 1 return x - def init_weight(self, conv): - conv_weight = conv.weight - nn.init.zeros_(conv_weight) - c1, c2, t, h, w = conv_weight.size() - one_matrix = torch.eye(c1, c2) - init_matrix = one_matrix - nn.init.zeros_(conv_weight) - #conv_weight.data[:,:,-1,1,1] = init_matrix * 0.5 - conv_weight.data[:, :, 1, 0, 0] = init_matrix #* 0.5 - conv.weight.data.copy_(conv_weight) - nn.init.zeros_(conv.bias.data) - - def init_weight2(self, conv): - conv_weight = conv.weight.data - nn.init.zeros_(conv_weight) - c1, c2, t, h, w = conv_weight.size() - init_matrix = torch.eye(c1 // 2, c2) - #init_matrix = repeat(init_matrix, 'o ... -> (o 2) ...').permute(1,0,2).contiguous().reshape(c1,c2) - conv_weight[:c1 // 2, :, -1, 0, 0] = init_matrix - conv_weight[c1 // 2:, :, -1, 0, 0] = init_matrix - conv.weight.data.copy_(conv_weight) - nn.init.zeros_(conv.bias.data) - class ResidualBlock(nn.Module): @@ -485,12 +462,6 @@ class WanVAE(nn.Module): self.decoder = Decoder3d(dim, z_dim, dim_mult, num_res_blocks, attn_scales, self.temperal_upsample, dropout) - def forward(self, x): - mu, log_var = self.encode(x) - z = self.reparameterize(mu, log_var) - x_recon = self.decode(z) - return x_recon, mu, log_var - def encode(self, x): self.clear_cache() ## cache @@ -536,18 +507,6 @@ class WanVAE(nn.Module): self.clear_cache() return out - def reparameterize(self, mu, log_var): - std = torch.exp(0.5 * log_var) - eps = torch.randn_like(std) - return eps * std + mu - - def sample(self, imgs, deterministic=False): - mu, log_var = self.encode(imgs) - if deterministic: - return mu - std = torch.exp(0.5 * log_var.clamp(-30.0, 20.0)) - return mu + std * torch.randn_like(std) - def clear_cache(self): self._conv_num = count_conv3d(self.decoder) self._conv_idx = [0] From 1ef70fcde4b84e8cd743c4f1fd9cdce24bbadbad Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sun, 27 Jul 2025 05:25:33 +0800 Subject: [PATCH 0369/1073] Fix the broken link (#9060) --- comfy_api_nodes/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_api_nodes/README.md b/comfy_api_nodes/README.md index 64a389cc1..f56d6c860 100644 --- a/comfy_api_nodes/README.md +++ b/comfy_api_nodes/README.md @@ -2,7 +2,7 @@ ## Introduction -Below are a collection of nodes that work by calling external APIs. More information available in our [docs](https://docs.comfy.org/tutorials/api-nodes/overview#api-nodes). +Below are a collection of nodes that work by calling external APIs. More information available in our [docs](https://docs.comfy.org/tutorials/api-nodes/overview). ## Development From 78672d0ee6d20d8269f324474643e5cc00f1c348 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 27 Jul 2025 04:42:58 -0700 Subject: [PATCH 0370/1073] Small readme update. (#9071) --- README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index a148623cd..8a15136aa 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith ## Features - Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything. - Image Models - - SD1.x, SD2.x, + - SD1.x, SD2.x ([unCLIP](https://comfyanonymous.github.io/ComfyUI_examples/unclip/)) - [SDXL](https://comfyanonymous.github.io/ComfyUI_examples/sdxl/), [SDXL Turbo](https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/) - [Stable Cascade](https://comfyanonymous.github.io/ComfyUI_examples/stable_cascade/) - [SD3 and SD3.5](https://comfyanonymous.github.io/ComfyUI_examples/sd3/) @@ -84,9 +84,9 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Hunyuan3D 2.0](https://docs.comfy.org/tutorials/3d/hunyuan3D-2) - Asynchronous Queue system - Many optimizations: Only re-executes the parts of the workflow that changes between executions. -- Smart memory management: can automatically run models on GPUs with as low as 1GB vram. +- Smart memory management: can automatically run large models on GPUs with as low as 1GB vram with smart offloading. - Works even if you don't have a GPU with: ```--cpu``` (slow) -- Can load ckpt, safetensors and diffusers models/checkpoints. Standalone VAEs and CLIP models. +- Can load ckpt and safetensors: All in one checkpoints or standalone diffusion models, VAEs and CLIP models. - Safe loading of ckpt, pt, pth, etc.. files. - Embeddings/Textual inversion - [Loras (regular, locon and loha)](https://comfyanonymous.github.io/ComfyUI_examples/lora/) @@ -98,7 +98,6 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Inpainting](https://comfyanonymous.github.io/ComfyUI_examples/inpaint/) with both regular and inpainting models. - [ControlNet and T2I-Adapter](https://comfyanonymous.github.io/ComfyUI_examples/controlnet/) - [Upscale Models (ESRGAN, ESRGAN variants, SwinIR, Swin2SR, etc...)](https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/) -- [unCLIP Models](https://comfyanonymous.github.io/ComfyUI_examples/unclip/) - [GLIGEN](https://comfyanonymous.github.io/ComfyUI_examples/gligen/) - [Model Merging](https://comfyanonymous.github.io/ComfyUI_examples/model_merging/) - [LCM models and Loras](https://comfyanonymous.github.io/ComfyUI_examples/lcm/) From e6d9f6274494c5ac96295deb1bea54de50189059 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sun, 27 Jul 2025 16:51:36 -0700 Subject: [PATCH 0371/1073] Add Moonvalley Marey V2V node with updated input validation (#9069) * [moonvalley] Update V2V node to match API specification - Add exact resolution validation for supported resolutions (1920x1080, 1080x1920, 1152x1152, 1536x1152, 1152x1536) - Change frame count validation from divisible by 32 to 16 - Add MP4 container format validation - Remove internal parameters (steps, guidance_scale) from V2V inference params - Update video duration handling to support only 5 seconds (auto-trim if longer) - Add motion_intensity parameter (0-100) for Motion Transfer control type - Add get_container_format() method to VideoInput classes * update negative prompt --- comfy_api/input/video_types.py | 13 ++ comfy_api/input_impl/video_types.py | 12 ++ comfy_api_nodes/nodes_moonvalley.py | 225 +++++++++++++++------------- 3 files changed, 145 insertions(+), 105 deletions(-) diff --git a/comfy_api/input/video_types.py b/comfy_api/input/video_types.py index bb936e0a4..5d95dc507 100644 --- a/comfy_api/input/video_types.py +++ b/comfy_api/input/video_types.py @@ -2,6 +2,7 @@ from __future__ import annotations from abc import ABC, abstractmethod from typing import Optional, Union import io +import av from comfy_api.util import VideoContainer, VideoCodec, VideoComponents class VideoInput(ABC): @@ -70,3 +71,15 @@ class VideoInput(ABC): components = self.get_components() frame_count = components.images.shape[0] return float(frame_count / components.frame_rate) + + def get_container_format(self) -> str: + """ + Returns the container format of the video (e.g., 'mp4', 'mov', 'avi'). + + Returns: + Container format as string + """ + # Default implementation - subclasses should override for better performance + source = self.get_stream_source() + with av.open(source, mode="r") as container: + return container.format.name diff --git a/comfy_api/input_impl/video_types.py b/comfy_api/input_impl/video_types.py index 9ae818f4e..91e7c1bfa 100644 --- a/comfy_api/input_impl/video_types.py +++ b/comfy_api/input_impl/video_types.py @@ -121,6 +121,18 @@ class VideoFromFile(VideoInput): raise ValueError(f"Could not determine duration for file '{self.__file}'") + def get_container_format(self) -> str: + """ + Returns the container format of the video (e.g., 'mp4', 'mov', 'avi'). + + Returns: + Container format as string + """ + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) + with av.open(self.__file, mode='r') as container: + return container.format.name + def get_components_internal(self, container: InputContainer) -> VideoComponents: # Get video frames frames = [] diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 057021efa..789fcef02 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -5,7 +5,6 @@ import torch from comfy_api_nodes.util.validation_utils import ( get_image_dimensions, validate_image_dimensions, - validate_video_dimensions, ) @@ -176,54 +175,76 @@ def validate_input_image( ) -def validate_input_video( - video: VideoInput, num_frames_out: int, with_frame_conditioning: bool = False -): +def validate_video_to_video_input(video: VideoInput) -> VideoInput: + """ + Validates and processes video input for Moonvalley Video-to-Video generation. + + Args: + video: Input video to validate + + Returns: + Validated and potentially trimmed video + + Raises: + ValueError: If video doesn't meet requirements + MoonvalleyApiError: If video duration is too short + """ + width, height = _get_video_dimensions(video) + _validate_video_dimensions(width, height) + _validate_container_format(video) + + return _validate_and_trim_duration(video) + + +def _get_video_dimensions(video: VideoInput) -> tuple[int, int]: + """Extracts video dimensions with error handling.""" try: - width, height = video.get_dimensions() + return video.get_dimensions() except Exception as e: logging.error("Error getting dimensions of video: %s", e) raise ValueError(f"Cannot get video dimensions: {e}") from e - validate_input_media(width, height, with_frame_conditioning) - validate_video_dimensions( - video, - min_width=MIN_VID_WIDTH, - min_height=MIN_VID_HEIGHT, - max_width=MAX_VID_WIDTH, - max_height=MAX_VID_HEIGHT, - ) - trimmed_video = validate_input_video_length(video, num_frames_out) - return trimmed_video +def _validate_video_dimensions(width: int, height: int) -> None: + """Validates video dimensions meet Moonvalley V2V requirements.""" + supported_resolutions = { + (1920, 1080), (1080, 1920), (1152, 1152), + (1536, 1152), (1152, 1536) + } + + if (width, height) not in supported_resolutions: + supported_list = ', '.join([f'{w}x{h}' for w, h in sorted(supported_resolutions)]) + raise ValueError(f"Resolution {width}x{height} not supported. Supported: {supported_list}") -def validate_input_video_length(video: VideoInput, num_frames: int): +def _validate_container_format(video: VideoInput) -> None: + """Validates video container format is MP4.""" + container_format = video.get_container_format() + if container_format not in ['mp4', 'mov,mp4,m4a,3gp,3g2,mj2']: + raise ValueError(f"Only MP4 container format supported. Got: {container_format}") - if video.get_duration() > 60: - raise MoonvalleyApiError( - "Input Video lenth should be less than 1min. Please trim." - ) - if num_frames == 128: - if video.get_duration() < 5: - raise MoonvalleyApiError( - "Input Video length is less than 5s. Please use a video longer than or equal to 5s." - ) - if video.get_duration() > 5: - # trim video to 5s - video = trim_video(video, 5) - if num_frames == 256: - if video.get_duration() < 10: - raise MoonvalleyApiError( - "Input Video length is less than 10s. Please use a video longer than or equal to 10s." - ) - if video.get_duration() > 10: - # trim video to 10s - video = trim_video(video, 10) +def _validate_and_trim_duration(video: VideoInput) -> VideoInput: + """Validates video duration and trims to 5 seconds if needed.""" + duration = video.get_duration() + _validate_minimum_duration(duration) + return _trim_if_too_long(video, duration) + + +def _validate_minimum_duration(duration: float) -> None: + """Ensures video is at least 5 seconds long.""" + if duration < 5: + raise MoonvalleyApiError("Input video must be at least 5 seconds long.") + + +def _trim_if_too_long(video: VideoInput, duration: float) -> VideoInput: + """Trims video to 5 seconds if longer.""" + if duration > 5: + return trim_video(video, 5) return video + def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: """ Returns a new VideoInput object trimmed from the beginning to the specified duration, @@ -278,15 +299,13 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: f"Added audio stream: {stream.sample_rate}Hz, {stream.channels} channels" ) - # Calculate target frame count that's divisible by 32 + # Calculate target frame count that's divisible by 16 fps = input_container.streams.video[0].average_rate estimated_frames = int(duration_sec * fps) - target_frames = ( - estimated_frames // 32 - ) * 32 # Round down to nearest multiple of 32 + target_frames = (estimated_frames // 16) * 16 # Round down to nearest multiple of 16 if target_frames == 0: - raise ValueError("Video too short: need at least 32 frames for Moonvalley") + raise ValueError("Video too short: need at least 16 frames for Moonvalley") frame_count = 0 audio_frame_count = 0 @@ -353,8 +372,8 @@ class BaseMoonvalleyVideoNode: "16:9 (1920 x 1080)": {"width": 1920, "height": 1080}, "9:16 (1080 x 1920)": {"width": 1080, "height": 1920}, "1:1 (1152 x 1152)": {"width": 1152, "height": 1152}, - "4:3 (1440 x 1080)": {"width": 1440, "height": 1080}, - "3:4 (1080 x 1440)": {"width": 1080, "height": 1440}, + "4:3 (1536 x 1152)": {"width": 1536, "height": 1152}, + "3:4 (1152 x 1536)": {"width": 1152, "height": 1536}, "21:9 (2560 x 1080)": {"width": 2560, "height": 1080}, } if resolution in res_map: @@ -494,7 +513,6 @@ class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): image = kwargs.get("image", None) if image is None: raise MoonvalleyApiError("image is required") - total_frames = get_total_frames_from_length() validate_input_image(image, True) validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) @@ -505,7 +523,7 @@ class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): steps=kwargs.get("steps"), seed=kwargs.get("seed"), guidance_scale=kwargs.get("prompt_adherence"), - num_frames=total_frames, + num_frames=128, width=width_height.get("width"), height=width_height.get("height"), use_negative_prompts=True, @@ -549,39 +567,45 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): @classmethod def INPUT_TYPES(cls): - input_types = super().INPUT_TYPES() - for param in ["resolution", "image"]: - if param in input_types["required"]: - del input_types["required"][param] - if param in input_types["optional"]: - del input_types["optional"][param] - input_types["optional"] = { - "video": ( - IO.VIDEO, - { - "default": "", - "multiline": False, - "tooltip": "The reference video used to generate the output video. Input a 5s video for 128 frames and a 10s video for 256 frames. Longer videos will be trimmed automatically.", - }, - ), - "control_type": ( - ["Motion Transfer", "Pose Transfer"], - {"default": "Motion Transfer"}, - ), - "motion_intensity": ( - "INT", - { - "default": 100, - "step": 1, - "min": 0, - "max": 100, - "tooltip": "Only used if control_type is 'Motion Transfer'", - }, - ), + return { + "required": { + "prompt": model_field_to_node_input( + IO.STRING, MoonvalleyVideoToVideoRequest, "prompt_text", + multiline=True + ), + "negative_prompt": model_field_to_node_input( + IO.STRING, + MoonvalleyVideoToVideoInferenceParams, + "negative_prompt", + multiline=True, + default="low-poly, flat shader, bad rigging, stiff animation, uncanny eyes, low-quality textures, looping glitch, cheap effect, overbloom, bloom spam, default lighting, game asset, stiff face, ugly specular, AI artifacts" + ), + "seed": model_field_to_node_input(IO.INT,MoonvalleyVideoToVideoInferenceParams, "seed", default=random.randint(0, 2**32 - 1), min=0, max=4294967295, step=1, display="number", tooltip="Random seed value", control_after_generate=True), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + "optional": { + "video": (IO.VIDEO, {"default": "", "multiline": False, "tooltip": "The reference video used to generate the output video. Must be at least 5 seconds long. Videos longer than 5s will be automatically trimmed. Only MP4 format supported."}), + "control_type": ( + ["Motion Transfer", "Pose Transfer"], + {"default": "Motion Transfer"}, + ), + "motion_intensity": ( + "INT", + { + "default": 100, + "step": 1, + "min": 0, + "max": 100, + "tooltip": "Only used if control_type is 'Motion Transfer'", + }, + ) + } } - return input_types - RETURN_TYPES = ("VIDEO",) RETURN_NAMES = ("video",) @@ -589,15 +613,13 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs ): video = kwargs.get("video") - num_frames = get_total_frames_from_length() if not video: raise MoonvalleyApiError("video is required") - """Validate video input""" video_url = "" if video: - validated_video = validate_input_video(video, num_frames, False) + validated_video = validate_video_to_video_input(video) video_url = upload_video_to_comfyapi(validated_video, auth_kwargs=kwargs) control_type = kwargs.get("control_type") @@ -605,12 +627,16 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): """Validate prompts and inference input""" validate_prompts(prompt, negative_prompt) - inference_params = MoonvalleyVideoToVideoInferenceParams( + + # Only include motion_intensity for Motion Transfer + control_params = {} + if control_type == "Motion Transfer" and motion_intensity is not None: + control_params['motion_intensity'] = motion_intensity + + inference_params=MoonvalleyVideoToVideoInferenceParams( negative_prompt=negative_prompt, - steps=kwargs.get("steps"), seed=kwargs.get("seed"), - guidance_scale=kwargs.get("prompt_adherence"), - control_params={"motion_intensity": motion_intensity}, + control_params=control_params ) control = self.parseControlParameter(control_type) @@ -667,17 +693,16 @@ class MoonvalleyTxt2VideoNode(BaseMoonvalleyVideoNode): ): validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) width_height = self.parseWidthHeightFromRes(kwargs.get("resolution")) - num_frames = get_total_frames_from_length() - inference_params = MoonvalleyTextToVideoInferenceParams( - negative_prompt=negative_prompt, - steps=kwargs.get("steps"), - seed=kwargs.get("seed"), - guidance_scale=kwargs.get("prompt_adherence"), - num_frames=num_frames, - width=width_height.get("width"), - height=width_height.get("height"), - ) + inference_params=MoonvalleyTextToVideoInferenceParams( + negative_prompt=negative_prompt, + steps=kwargs.get("steps"), + seed=kwargs.get("seed"), + guidance_scale=kwargs.get("prompt_adherence"), + num_frames=128, + width=width_height.get("width"), + height=width_height.get("height"), + ) request = MoonvalleyTextToVideoRequest( prompt_text=prompt, inference_params=inference_params ) @@ -707,22 +732,12 @@ class MoonvalleyTxt2VideoNode(BaseMoonvalleyVideoNode): NODE_CLASS_MAPPINGS = { "MoonvalleyImg2VideoNode": MoonvalleyImg2VideoNode, "MoonvalleyTxt2VideoNode": MoonvalleyTxt2VideoNode, - # "MoonvalleyVideo2VideoNode": MoonvalleyVideo2VideoNode, + "MoonvalleyVideo2VideoNode": MoonvalleyVideo2VideoNode, } NODE_DISPLAY_NAME_MAPPINGS = { "MoonvalleyImg2VideoNode": "Moonvalley Marey Image to Video", "MoonvalleyTxt2VideoNode": "Moonvalley Marey Text to Video", - # "MoonvalleyVideo2VideoNode": "Moonvalley Marey Video to Video", + "MoonvalleyVideo2VideoNode": "Moonvalley Marey Video to Video", } - - -def get_total_frames_from_length(length="5s"): - # if length == '5s': - # return 128 - # elif length == '10s': - # return 256 - return 128 - # else: - # raise MoonvalleyApiError("length is required") From d0210fe2e5df25b329926e20e3be32451fd5b841 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Mon, 28 Jul 2025 19:55:02 +0800 Subject: [PATCH 0372/1073] Update template to 0.1.41 (#9079) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 33a59b4be..14a085a2c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.40 +comfyui-workflow-templates==0.1.41 comfyui-embedded-docs==0.2.4 torch torchsde From a88788dce6b0d7b5e2876c7cd0121b45e80f4ad8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 28 Jul 2025 05:00:23 -0700 Subject: [PATCH 0373/1073] Wan 2.2 support. (#9080) --- comfy/latent_formats.py | 76 ++++ comfy/ldm/wan/model.py | 16 +- comfy/ldm/wan/vae2_2.py | 726 ++++++++++++++++++++++++++++++++++++++ comfy/model_base.py | 30 +- comfy/model_detection.py | 2 + comfy/sd.py | 36 +- comfy/supported_models.py | 15 +- comfy_extras/nodes_wan.py | 44 +++ 8 files changed, 926 insertions(+), 19 deletions(-) create mode 100644 comfy/ldm/wan/vae2_2.py diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index 82d9f9bb8..caf4991fc 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -457,6 +457,82 @@ class Wan21(LatentFormat): latents_std = self.latents_std.to(latent.device, latent.dtype) return latent * latents_std / self.scale_factor + latents_mean +class Wan22(Wan21): + latent_channels = 48 + latent_dimensions = 3 + + latent_rgb_factors = [ + [ 0.0119, 0.0103, 0.0046], + [-0.1062, -0.0504, 0.0165], + [ 0.0140, 0.0409, 0.0491], + [-0.0813, -0.0677, 0.0607], + [ 0.0656, 0.0851, 0.0808], + [ 0.0264, 0.0463, 0.0912], + [ 0.0295, 0.0326, 0.0590], + [-0.0244, -0.0270, 0.0025], + [ 0.0443, -0.0102, 0.0288], + [-0.0465, -0.0090, -0.0205], + [ 0.0359, 0.0236, 0.0082], + [-0.0776, 0.0854, 0.1048], + [ 0.0564, 0.0264, 0.0561], + [ 0.0006, 0.0594, 0.0418], + [-0.0319, -0.0542, -0.0637], + [-0.0268, 0.0024, 0.0260], + [ 0.0539, 0.0265, 0.0358], + [-0.0359, -0.0312, -0.0287], + [-0.0285, -0.1032, -0.1237], + [ 0.1041, 0.0537, 0.0622], + [-0.0086, -0.0374, -0.0051], + [ 0.0390, 0.0670, 0.2863], + [ 0.0069, 0.0144, 0.0082], + [ 0.0006, -0.0167, 0.0079], + [ 0.0313, -0.0574, -0.0232], + [-0.1454, -0.0902, -0.0481], + [ 0.0714, 0.0827, 0.0447], + [-0.0304, -0.0574, -0.0196], + [ 0.0401, 0.0384, 0.0204], + [-0.0758, -0.0297, -0.0014], + [ 0.0568, 0.1307, 0.1372], + [-0.0055, -0.0310, -0.0380], + [ 0.0239, -0.0305, 0.0325], + [-0.0663, -0.0673, -0.0140], + [-0.0416, -0.0047, -0.0023], + [ 0.0166, 0.0112, -0.0093], + [-0.0211, 0.0011, 0.0331], + [ 0.1833, 0.1466, 0.2250], + [-0.0368, 0.0370, 0.0295], + [-0.3441, -0.3543, -0.2008], + [-0.0479, -0.0489, -0.0420], + [-0.0660, -0.0153, 0.0800], + [-0.0101, 0.0068, 0.0156], + [-0.0690, -0.0452, -0.0927], + [-0.0145, 0.0041, 0.0015], + [ 0.0421, 0.0451, 0.0373], + [ 0.0504, -0.0483, -0.0356], + [-0.0837, 0.0168, 0.0055] + ] + + latent_rgb_factors_bias = [0.0317, -0.0878, -0.1388] + + def __init__(self): + self.scale_factor = 1.0 + self.latents_mean = torch.tensor([ + -0.2289, -0.0052, -0.1323, -0.2339, -0.2799, 0.0174, 0.1838, 0.1557, + -0.1382, 0.0542, 0.2813, 0.0891, 0.1570, -0.0098, 0.0375, -0.1825, + -0.2246, -0.1207, -0.0698, 0.5109, 0.2665, -0.2108, -0.2158, 0.2502, + -0.2055, -0.0322, 0.1109, 0.1567, -0.0729, 0.0899, -0.2799, -0.1230, + -0.0313, -0.1649, 0.0117, 0.0723, -0.2839, -0.2083, -0.0520, 0.3748, + 0.0152, 0.1957, 0.1433, -0.2944, 0.3573, -0.0548, -0.1681, -0.0667, + ]).view(1, self.latent_channels, 1, 1, 1) + self.latents_std = torch.tensor([ + 0.4765, 1.0364, 0.4514, 1.1677, 0.5313, 0.4990, 0.4818, 0.5013, + 0.8158, 1.0344, 0.5894, 1.0901, 0.6885, 0.6165, 0.8454, 0.4978, + 0.5759, 0.3523, 0.7135, 0.6804, 0.5833, 1.4146, 0.8986, 0.5659, + 0.7069, 0.5338, 0.4889, 0.4917, 0.4069, 0.4999, 0.6866, 0.4093, + 0.5709, 0.6065, 0.6415, 0.4944, 0.5726, 1.2042, 0.5458, 1.6887, + 0.3971, 1.0600, 0.3943, 0.5537, 0.5444, 0.4089, 0.7468, 0.7744 + ]).view(1, self.latent_channels, 1, 1, 1) + class Hunyuan3Dv2(LatentFormat): latent_channels = 64 latent_dimensions = 1 diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 1d6edb354..b9e47e9f7 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -201,8 +201,10 @@ class WanAttentionBlock(nn.Module): freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2] """ # assert e.dtype == torch.float32 - - e = (comfy.model_management.cast_to(self.modulation, dtype=x.dtype, device=x.device) + e).chunk(6, dim=1) + if e.ndim < 4: + e = (comfy.model_management.cast_to(self.modulation, dtype=x.dtype, device=x.device) + e).chunk(6, dim=1) + else: + e = (comfy.model_management.cast_to(self.modulation, dtype=x.dtype, device=x.device).unsqueeze(0) + e).unbind(2) # assert e[0].dtype == torch.float32 # self-attention @@ -325,7 +327,10 @@ class Head(nn.Module): e(Tensor): Shape [B, C] """ # assert e.dtype == torch.float32 - e = (comfy.model_management.cast_to(self.modulation, dtype=x.dtype, device=x.device) + e.unsqueeze(1)).chunk(2, dim=1) + if e.ndim < 3: + e = (comfy.model_management.cast_to(self.modulation, dtype=x.dtype, device=x.device) + e.unsqueeze(1)).chunk(2, dim=1) + else: + e = (comfy.model_management.cast_to(self.modulation, dtype=x.dtype, device=x.device).unsqueeze(0) + e.unsqueeze(2)).unbind(2) x = (self.head(self.norm(x) * (1 + e[1]) + e[0])) return x @@ -506,8 +511,9 @@ class WanModel(torch.nn.Module): # time embeddings e = self.time_embedding( - sinusoidal_embedding_1d(self.freq_dim, t).to(dtype=x[0].dtype)) - e0 = self.time_projection(e).unflatten(1, (6, self.dim)) + sinusoidal_embedding_1d(self.freq_dim, t.flatten()).to(dtype=x[0].dtype)) + e = e.reshape(t.shape[0], -1, e.shape[-1]) + e0 = self.time_projection(e).unflatten(2, (6, self.dim)) # context context = self.text_embedding(context) diff --git a/comfy/ldm/wan/vae2_2.py b/comfy/ldm/wan/vae2_2.py new file mode 100644 index 000000000..c2c150e10 --- /dev/null +++ b/comfy/ldm/wan/vae2_2.py @@ -0,0 +1,726 @@ +# original version: https://github.com/Wan-Video/Wan2.2/blob/main/wan/modules/vae2_2.py +# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from .vae import AttentionBlock, CausalConv3d, RMS_norm + +import comfy.ops +ops = comfy.ops.disable_weight_init + +CACHE_T = 2 + + +class Resample(nn.Module): + + def __init__(self, dim, mode): + assert mode in ( + "none", + "upsample2d", + "upsample3d", + "downsample2d", + "downsample3d", + ) + super().__init__() + self.dim = dim + self.mode = mode + + # layers + if mode == "upsample2d": + self.resample = nn.Sequential( + nn.Upsample(scale_factor=(2.0, 2.0), mode="nearest-exact"), + ops.Conv2d(dim, dim, 3, padding=1), + ) + elif mode == "upsample3d": + self.resample = nn.Sequential( + nn.Upsample(scale_factor=(2.0, 2.0), mode="nearest-exact"), + ops.Conv2d(dim, dim, 3, padding=1), + # ops.Conv2d(dim, dim//2, 3, padding=1) + ) + self.time_conv = CausalConv3d( + dim, dim * 2, (3, 1, 1), padding=(1, 0, 0)) + elif mode == "downsample2d": + self.resample = nn.Sequential( + nn.ZeroPad2d((0, 1, 0, 1)), + ops.Conv2d(dim, dim, 3, stride=(2, 2))) + elif mode == "downsample3d": + self.resample = nn.Sequential( + nn.ZeroPad2d((0, 1, 0, 1)), + ops.Conv2d(dim, dim, 3, stride=(2, 2))) + self.time_conv = CausalConv3d( + dim, dim, (3, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0)) + else: + self.resample = nn.Identity() + + def forward(self, x, feat_cache=None, feat_idx=[0]): + b, c, t, h, w = x.size() + if self.mode == "upsample3d": + if feat_cache is not None: + idx = feat_idx[0] + if feat_cache[idx] is None: + feat_cache[idx] = "Rep" + feat_idx[0] += 1 + else: + cache_x = x[:, :, -CACHE_T:, :, :].clone() + if (cache_x.shape[2] < 2 and feat_cache[idx] is not None and + feat_cache[idx] != "Rep"): + # cache last frame of last two chunk + cache_x = torch.cat( + [ + feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to( + cache_x.device), + cache_x, + ], + dim=2, + ) + if (cache_x.shape[2] < 2 and feat_cache[idx] is not None and + feat_cache[idx] == "Rep"): + cache_x = torch.cat( + [ + torch.zeros_like(cache_x).to(cache_x.device), + cache_x + ], + dim=2, + ) + if feat_cache[idx] == "Rep": + x = self.time_conv(x) + else: + x = self.time_conv(x, feat_cache[idx]) + feat_cache[idx] = cache_x + feat_idx[0] += 1 + x = x.reshape(b, 2, c, t, h, w) + x = torch.stack((x[:, 0, :, :, :, :], x[:, 1, :, :, :, :]), + 3) + x = x.reshape(b, c, t * 2, h, w) + t = x.shape[2] + x = rearrange(x, "b c t h w -> (b t) c h w") + x = self.resample(x) + x = rearrange(x, "(b t) c h w -> b c t h w", t=t) + + if self.mode == "downsample3d": + if feat_cache is not None: + idx = feat_idx[0] + if feat_cache[idx] is None: + feat_cache[idx] = x.clone() + feat_idx[0] += 1 + else: + cache_x = x[:, :, -1:, :, :].clone() + x = self.time_conv( + torch.cat([feat_cache[idx][:, :, -1:, :, :], x], 2)) + feat_cache[idx] = cache_x + feat_idx[0] += 1 + return x + + +class ResidualBlock(nn.Module): + + def __init__(self, in_dim, out_dim, dropout=0.0): + super().__init__() + self.in_dim = in_dim + self.out_dim = out_dim + + # layers + self.residual = nn.Sequential( + RMS_norm(in_dim, images=False), + nn.SiLU(), + CausalConv3d(in_dim, out_dim, 3, padding=1), + RMS_norm(out_dim, images=False), + nn.SiLU(), + nn.Dropout(dropout), + CausalConv3d(out_dim, out_dim, 3, padding=1), + ) + self.shortcut = ( + CausalConv3d(in_dim, out_dim, 1) + if in_dim != out_dim else nn.Identity()) + + def forward(self, x, feat_cache=None, feat_idx=[0]): + h = self.shortcut(x) + for layer in self.residual: + if isinstance(layer, CausalConv3d) and feat_cache is not None: + idx = feat_idx[0] + cache_x = x[:, :, -CACHE_T:, :, :].clone() + if cache_x.shape[2] < 2 and feat_cache[idx] is not None: + # cache last frame of last two chunk + cache_x = torch.cat( + [ + feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to( + cache_x.device), + cache_x, + ], + dim=2, + ) + x = layer(x, feat_cache[idx]) + feat_cache[idx] = cache_x + feat_idx[0] += 1 + else: + x = layer(x) + return x + h + + +def patchify(x, patch_size): + if patch_size == 1: + return x + if x.dim() == 4: + x = rearrange( + x, "b c (h q) (w r) -> b (c r q) h w", q=patch_size, r=patch_size) + elif x.dim() == 5: + x = rearrange( + x, + "b c f (h q) (w r) -> b (c r q) f h w", + q=patch_size, + r=patch_size, + ) + else: + raise ValueError(f"Invalid input shape: {x.shape}") + + return x + + +def unpatchify(x, patch_size): + if patch_size == 1: + return x + + if x.dim() == 4: + x = rearrange( + x, "b (c r q) h w -> b c (h q) (w r)", q=patch_size, r=patch_size) + elif x.dim() == 5: + x = rearrange( + x, + "b (c r q) f h w -> b c f (h q) (w r)", + q=patch_size, + r=patch_size, + ) + return x + + +class AvgDown3D(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + factor_t, + factor_s=1, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.factor_t = factor_t + self.factor_s = factor_s + self.factor = self.factor_t * self.factor_s * self.factor_s + + assert in_channels * self.factor % out_channels == 0 + self.group_size = in_channels * self.factor // out_channels + + def forward(self, x: torch.Tensor) -> torch.Tensor: + pad_t = (self.factor_t - x.shape[2] % self.factor_t) % self.factor_t + pad = (0, 0, 0, 0, pad_t, 0) + x = F.pad(x, pad) + B, C, T, H, W = x.shape + x = x.view( + B, + C, + T // self.factor_t, + self.factor_t, + H // self.factor_s, + self.factor_s, + W // self.factor_s, + self.factor_s, + ) + x = x.permute(0, 1, 3, 5, 7, 2, 4, 6).contiguous() + x = x.view( + B, + C * self.factor, + T // self.factor_t, + H // self.factor_s, + W // self.factor_s, + ) + x = x.view( + B, + self.out_channels, + self.group_size, + T // self.factor_t, + H // self.factor_s, + W // self.factor_s, + ) + x = x.mean(dim=2) + return x + + +class DupUp3D(nn.Module): + + def __init__( + self, + in_channels: int, + out_channels: int, + factor_t, + factor_s=1, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + + self.factor_t = factor_t + self.factor_s = factor_s + self.factor = self.factor_t * self.factor_s * self.factor_s + + assert out_channels * self.factor % in_channels == 0 + self.repeats = out_channels * self.factor // in_channels + + def forward(self, x: torch.Tensor, first_chunk=False) -> torch.Tensor: + x = x.repeat_interleave(self.repeats, dim=1) + x = x.view( + x.size(0), + self.out_channels, + self.factor_t, + self.factor_s, + self.factor_s, + x.size(2), + x.size(3), + x.size(4), + ) + x = x.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous() + x = x.view( + x.size(0), + self.out_channels, + x.size(2) * self.factor_t, + x.size(4) * self.factor_s, + x.size(6) * self.factor_s, + ) + if first_chunk: + x = x[:, :, self.factor_t - 1:, :, :] + return x + + +class Down_ResidualBlock(nn.Module): + + def __init__(self, + in_dim, + out_dim, + dropout, + mult, + temperal_downsample=False, + down_flag=False): + super().__init__() + + # Shortcut path with downsample + self.avg_shortcut = AvgDown3D( + in_dim, + out_dim, + factor_t=2 if temperal_downsample else 1, + factor_s=2 if down_flag else 1, + ) + + # Main path with residual blocks and downsample + downsamples = [] + for _ in range(mult): + downsamples.append(ResidualBlock(in_dim, out_dim, dropout)) + in_dim = out_dim + + # Add the final downsample block + if down_flag: + mode = "downsample3d" if temperal_downsample else "downsample2d" + downsamples.append(Resample(out_dim, mode=mode)) + + self.downsamples = nn.Sequential(*downsamples) + + def forward(self, x, feat_cache=None, feat_idx=[0]): + x_copy = x.clone() + for module in self.downsamples: + x = module(x, feat_cache, feat_idx) + + return x + self.avg_shortcut(x_copy) + + +class Up_ResidualBlock(nn.Module): + + def __init__(self, + in_dim, + out_dim, + dropout, + mult, + temperal_upsample=False, + up_flag=False): + super().__init__() + # Shortcut path with upsample + if up_flag: + self.avg_shortcut = DupUp3D( + in_dim, + out_dim, + factor_t=2 if temperal_upsample else 1, + factor_s=2 if up_flag else 1, + ) + else: + self.avg_shortcut = None + + # Main path with residual blocks and upsample + upsamples = [] + for _ in range(mult): + upsamples.append(ResidualBlock(in_dim, out_dim, dropout)) + in_dim = out_dim + + # Add the final upsample block + if up_flag: + mode = "upsample3d" if temperal_upsample else "upsample2d" + upsamples.append(Resample(out_dim, mode=mode)) + + self.upsamples = nn.Sequential(*upsamples) + + def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=False): + x_main = x.clone() + for module in self.upsamples: + x_main = module(x_main, feat_cache, feat_idx) + if self.avg_shortcut is not None: + x_shortcut = self.avg_shortcut(x, first_chunk) + return x_main + x_shortcut + else: + return x_main + + +class Encoder3d(nn.Module): + + def __init__( + self, + dim=128, + z_dim=4, + dim_mult=[1, 2, 4, 4], + num_res_blocks=2, + attn_scales=[], + temperal_downsample=[True, True, False], + dropout=0.0, + ): + super().__init__() + self.dim = dim + self.z_dim = z_dim + self.dim_mult = dim_mult + self.num_res_blocks = num_res_blocks + self.attn_scales = attn_scales + self.temperal_downsample = temperal_downsample + + # dimensions + dims = [dim * u for u in [1] + dim_mult] + scale = 1.0 + + # init block + self.conv1 = CausalConv3d(12, dims[0], 3, padding=1) + + # downsample blocks + downsamples = [] + for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])): + t_down_flag = ( + temperal_downsample[i] + if i < len(temperal_downsample) else False) + downsamples.append( + Down_ResidualBlock( + in_dim=in_dim, + out_dim=out_dim, + dropout=dropout, + mult=num_res_blocks, + temperal_downsample=t_down_flag, + down_flag=i != len(dim_mult) - 1, + )) + scale /= 2.0 + self.downsamples = nn.Sequential(*downsamples) + + # middle blocks + self.middle = nn.Sequential( + ResidualBlock(out_dim, out_dim, dropout), + AttentionBlock(out_dim), + ResidualBlock(out_dim, out_dim, dropout), + ) + + # # output blocks + self.head = nn.Sequential( + RMS_norm(out_dim, images=False), + nn.SiLU(), + CausalConv3d(out_dim, z_dim, 3, padding=1), + ) + + def forward(self, x, feat_cache=None, feat_idx=[0]): + + if feat_cache is not None: + idx = feat_idx[0] + cache_x = x[:, :, -CACHE_T:, :, :].clone() + if cache_x.shape[2] < 2 and feat_cache[idx] is not None: + cache_x = torch.cat( + [ + feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to( + cache_x.device), + cache_x, + ], + dim=2, + ) + x = self.conv1(x, feat_cache[idx]) + feat_cache[idx] = cache_x + feat_idx[0] += 1 + else: + x = self.conv1(x) + + ## downsamples + for layer in self.downsamples: + if feat_cache is not None: + x = layer(x, feat_cache, feat_idx) + else: + x = layer(x) + + ## middle + for layer in self.middle: + if isinstance(layer, ResidualBlock) and feat_cache is not None: + x = layer(x, feat_cache, feat_idx) + else: + x = layer(x) + + ## head + for layer in self.head: + if isinstance(layer, CausalConv3d) and feat_cache is not None: + idx = feat_idx[0] + cache_x = x[:, :, -CACHE_T:, :, :].clone() + if cache_x.shape[2] < 2 and feat_cache[idx] is not None: + cache_x = torch.cat( + [ + feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to( + cache_x.device), + cache_x, + ], + dim=2, + ) + x = layer(x, feat_cache[idx]) + feat_cache[idx] = cache_x + feat_idx[0] += 1 + else: + x = layer(x) + + return x + + +class Decoder3d(nn.Module): + + def __init__( + self, + dim=128, + z_dim=4, + dim_mult=[1, 2, 4, 4], + num_res_blocks=2, + attn_scales=[], + temperal_upsample=[False, True, True], + dropout=0.0, + ): + super().__init__() + self.dim = dim + self.z_dim = z_dim + self.dim_mult = dim_mult + self.num_res_blocks = num_res_blocks + self.attn_scales = attn_scales + self.temperal_upsample = temperal_upsample + + # dimensions + dims = [dim * u for u in [dim_mult[-1]] + dim_mult[::-1]] + # init block + self.conv1 = CausalConv3d(z_dim, dims[0], 3, padding=1) + + # middle blocks + self.middle = nn.Sequential( + ResidualBlock(dims[0], dims[0], dropout), + AttentionBlock(dims[0]), + ResidualBlock(dims[0], dims[0], dropout), + ) + + # upsample blocks + upsamples = [] + for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])): + t_up_flag = temperal_upsample[i] if i < len( + temperal_upsample) else False + upsamples.append( + Up_ResidualBlock( + in_dim=in_dim, + out_dim=out_dim, + dropout=dropout, + mult=num_res_blocks + 1, + temperal_upsample=t_up_flag, + up_flag=i != len(dim_mult) - 1, + )) + self.upsamples = nn.Sequential(*upsamples) + + # output blocks + self.head = nn.Sequential( + RMS_norm(out_dim, images=False), + nn.SiLU(), + CausalConv3d(out_dim, 12, 3, padding=1), + ) + + def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=False): + if feat_cache is not None: + idx = feat_idx[0] + cache_x = x[:, :, -CACHE_T:, :, :].clone() + if cache_x.shape[2] < 2 and feat_cache[idx] is not None: + cache_x = torch.cat( + [ + feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to( + cache_x.device), + cache_x, + ], + dim=2, + ) + x = self.conv1(x, feat_cache[idx]) + feat_cache[idx] = cache_x + feat_idx[0] += 1 + else: + x = self.conv1(x) + + for layer in self.middle: + if isinstance(layer, ResidualBlock) and feat_cache is not None: + x = layer(x, feat_cache, feat_idx) + else: + x = layer(x) + + ## upsamples + for layer in self.upsamples: + if feat_cache is not None: + x = layer(x, feat_cache, feat_idx, first_chunk) + else: + x = layer(x) + + ## head + for layer in self.head: + if isinstance(layer, CausalConv3d) and feat_cache is not None: + idx = feat_idx[0] + cache_x = x[:, :, -CACHE_T:, :, :].clone() + if cache_x.shape[2] < 2 and feat_cache[idx] is not None: + cache_x = torch.cat( + [ + feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to( + cache_x.device), + cache_x, + ], + dim=2, + ) + x = layer(x, feat_cache[idx]) + feat_cache[idx] = cache_x + feat_idx[0] += 1 + else: + x = layer(x) + return x + + +def count_conv3d(model): + count = 0 + for m in model.modules(): + if isinstance(m, CausalConv3d): + count += 1 + return count + + +class WanVAE(nn.Module): + + def __init__( + self, + dim=160, + dec_dim=256, + z_dim=16, + dim_mult=[1, 2, 4, 4], + num_res_blocks=2, + attn_scales=[], + temperal_downsample=[True, True, False], + dropout=0.0, + ): + super().__init__() + self.dim = dim + self.z_dim = z_dim + self.dim_mult = dim_mult + self.num_res_blocks = num_res_blocks + self.attn_scales = attn_scales + self.temperal_downsample = temperal_downsample + self.temperal_upsample = temperal_downsample[::-1] + + # modules + self.encoder = Encoder3d( + dim, + z_dim * 2, + dim_mult, + num_res_blocks, + attn_scales, + self.temperal_downsample, + dropout, + ) + self.conv1 = CausalConv3d(z_dim * 2, z_dim * 2, 1) + self.conv2 = CausalConv3d(z_dim, z_dim, 1) + self.decoder = Decoder3d( + dec_dim, + z_dim, + dim_mult, + num_res_blocks, + attn_scales, + self.temperal_upsample, + dropout, + ) + + def encode(self, x): + self.clear_cache() + x = patchify(x, patch_size=2) + t = x.shape[2] + iter_ = 1 + (t - 1) // 4 + for i in range(iter_): + self._enc_conv_idx = [0] + if i == 0: + out = self.encoder( + x[:, :, :1, :, :], + feat_cache=self._enc_feat_map, + feat_idx=self._enc_conv_idx, + ) + else: + out_ = self.encoder( + x[:, :, 1 + 4 * (i - 1):1 + 4 * i, :, :], + feat_cache=self._enc_feat_map, + feat_idx=self._enc_conv_idx, + ) + out = torch.cat([out, out_], 2) + mu, log_var = self.conv1(out).chunk(2, dim=1) + self.clear_cache() + return mu + + def decode(self, z): + self.clear_cache() + iter_ = z.shape[2] + x = self.conv2(z) + for i in range(iter_): + self._conv_idx = [0] + if i == 0: + out = self.decoder( + x[:, :, i:i + 1, :, :], + feat_cache=self._feat_map, + feat_idx=self._conv_idx, + first_chunk=True, + ) + else: + out_ = self.decoder( + x[:, :, i:i + 1, :, :], + feat_cache=self._feat_map, + feat_idx=self._conv_idx, + ) + out = torch.cat([out, out_], 2) + out = unpatchify(out, patch_size=2) + self.clear_cache() + return out + + def reparameterize(self, mu, log_var): + std = torch.exp(0.5 * log_var) + eps = torch.randn_like(std) + return eps * std + mu + + def sample(self, imgs, deterministic=False): + mu, log_var = self.encode(imgs) + if deterministic: + return mu + std = torch.exp(0.5 * log_var.clamp(-30.0, 20.0)) + return mu + std * torch.randn_like(std) + + def clear_cache(self): + self._conv_num = count_conv3d(self.decoder) + self._conv_idx = [0] + self._feat_map = [None] * self._conv_num + # cache encode + self._enc_conv_num = count_conv3d(self.encoder) + self._enc_conv_idx = [0] + self._enc_feat_map = [None] * self._enc_conv_num diff --git a/comfy/model_base.py b/comfy/model_base.py index 4392355ea..d019b991a 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1097,8 +1097,9 @@ class WAN21(BaseModel): image[:, i: i + 16] = self.process_latent_in(image[:, i: i + 16]) image = utils.resize_to_batch_size(image, noise.shape[0]) - if not self.image_to_video or extra_channels == image.shape[1]: - return image + if extra_channels != image.shape[1] + 4: + if not self.image_to_video or extra_channels == image.shape[1]: + return image if image.shape[1] > (extra_channels - 4): image = image[:, :(extra_channels - 4)] @@ -1182,6 +1183,31 @@ class WAN21_Camera(WAN21): out['camera_conditions'] = comfy.conds.CONDRegular(camera_conditions) return out +class WAN22(BaseModel): + def __init__(self, model_config, model_type=ModelType.FLOW, image_to_video=False, device=None): + super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model.WanModel) + self.image_to_video = image_to_video + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + + denoise_mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None)) + if denoise_mask is not None: + out["denoise_mask"] = comfy.conds.CONDRegular(denoise_mask) + return out + + def process_timestep(self, timestep, x, denoise_mask=None, **kwargs): + if denoise_mask is None: + return timestep + temp_ts = (torch.mean(denoise_mask[:, :, :, ::2, ::2], dim=1, keepdim=True) * timestep.view([timestep.shape[0]] + [1] * (denoise_mask.ndim - 1))).reshape(timestep.shape[0], -1) + return temp_ts + + def scale_latent_inpaint(self, sigma, noise, latent_image, **kwargs): + return latent_image + class Hunyuan3Dv2(BaseModel): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.hunyuan3d.model.Hunyuan3Dv2) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 18232ade3..9fc1f42de 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -346,7 +346,9 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config = {} dit_config["image_model"] = "wan2.1" dim = state_dict['{}head.modulation'.format(key_prefix)].shape[-1] + out_dim = state_dict['{}head.head.weight'.format(key_prefix)].shape[0] // 4 dit_config["dim"] = dim + dit_config["out_dim"] = out_dim dit_config["num_heads"] = dim // 128 dit_config["ffn_dim"] = state_dict['{}blocks.0.ffn.0.weight'.format(key_prefix)].shape[0] dit_config["num_layers"] = count_blocks(state_dict_keys, '{}blocks.'.format(key_prefix) + '{}.') diff --git a/comfy/sd.py b/comfy/sd.py index 8081b167c..e0498e585 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -14,6 +14,7 @@ import comfy.ldm.genmo.vae.model import comfy.ldm.lightricks.vae.causal_video_autoencoder import comfy.ldm.cosmos.vae import comfy.ldm.wan.vae +import comfy.ldm.wan.vae2_2 import comfy.ldm.hunyuan3d.vae import comfy.ldm.ace.vae.music_dcae_pipeline import yaml @@ -420,17 +421,30 @@ class VAE: self.memory_used_encode = lambda shape, dtype: (50 * (round((shape[2] + 7) / 8) * 8) * shape[3] * shape[4]) * model_management.dtype_size(dtype) self.working_dtypes = [torch.bfloat16, torch.float32] elif "decoder.middle.0.residual.0.gamma" in sd: - self.upscale_ratio = (lambda a: max(0, a * 4 - 3), 8, 8) - self.upscale_index_formula = (4, 8, 8) - self.downscale_ratio = (lambda a: max(0, math.floor((a + 3) / 4)), 8, 8) - self.downscale_index_formula = (4, 8, 8) - self.latent_dim = 3 - self.latent_channels = 16 - ddconfig = {"dim": 96, "z_dim": self.latent_channels, "dim_mult": [1, 2, 4, 4], "num_res_blocks": 2, "attn_scales": [], "temperal_downsample": [False, True, True], "dropout": 0.0} - self.first_stage_model = comfy.ldm.wan.vae.WanVAE(**ddconfig) - self.working_dtypes = [torch.bfloat16, torch.float16, torch.float32] - self.memory_used_encode = lambda shape, dtype: 6000 * shape[3] * shape[4] * model_management.dtype_size(dtype) - self.memory_used_decode = lambda shape, dtype: 7000 * shape[3] * shape[4] * (8 * 8) * model_management.dtype_size(dtype) + if "decoder.upsamples.0.upsamples.0.residual.2.weight" in sd: # Wan 2.2 VAE + self.upscale_ratio = (lambda a: max(0, a * 4 - 3), 16, 16) + self.upscale_index_formula = (4, 16, 16) + self.downscale_ratio = (lambda a: max(0, math.floor((a + 3) / 4)), 16, 16) + self.downscale_index_formula = (4, 16, 16) + self.latent_dim = 3 + self.latent_channels = 48 + ddconfig = {"dim": 160, "z_dim": self.latent_channels, "dim_mult": [1, 2, 4, 4], "num_res_blocks": 2, "attn_scales": [], "temperal_downsample": [False, True, True], "dropout": 0.0} + self.first_stage_model = comfy.ldm.wan.vae2_2.WanVAE(**ddconfig) + self.working_dtypes = [torch.bfloat16, torch.float16, torch.float32] + self.memory_used_encode = lambda shape, dtype: 3300 * shape[3] * shape[4] * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: 8000 * shape[3] * shape[4] * (16 * 16) * model_management.dtype_size(dtype) + else: # Wan 2.1 VAE + self.upscale_ratio = (lambda a: max(0, a * 4 - 3), 8, 8) + self.upscale_index_formula = (4, 8, 8) + self.downscale_ratio = (lambda a: max(0, math.floor((a + 3) / 4)), 8, 8) + self.downscale_index_formula = (4, 8, 8) + self.latent_dim = 3 + self.latent_channels = 16 + ddconfig = {"dim": 96, "z_dim": self.latent_channels, "dim_mult": [1, 2, 4, 4], "num_res_blocks": 2, "attn_scales": [], "temperal_downsample": [False, True, True], "dropout": 0.0} + self.first_stage_model = comfy.ldm.wan.vae.WanVAE(**ddconfig) + self.working_dtypes = [torch.bfloat16, torch.float16, torch.float32] + self.memory_used_encode = lambda shape, dtype: 6000 * shape[3] * shape[4] * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: 7000 * shape[3] * shape[4] * (8 * 8) * model_management.dtype_size(dtype) elif "geo_decoder.cross_attn_decoder.ln_1.bias" in sd: self.latent_dim = 1 ln_post = "geo_decoder.ln_post.weight" in sd diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 2ca3857f7..8f3f4652d 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1059,6 +1059,19 @@ class WAN21_Vace(WAN21_T2V): out = model_base.WAN21_Vace(self, image_to_video=False, device=device) return out +class WAN22_T2V(WAN21_T2V): + unet_config = { + "image_model": "wan2.1", + "model_type": "t2v", + "out_dim": 48, + } + + latent_format = latent_formats.Wan22 + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.WAN22(self, image_to_video=True, device=device) + return out + class Hunyuan3Dv2(supported_models_base.BASE): unet_config = { "image_model": "hunyuan3d2", @@ -1217,6 +1230,6 @@ class Omnigen2(supported_models_base.BASE): return supported_models_base.ClipTarget(comfy.text_encoders.omnigen2.Omnigen2Tokenizer, comfy.text_encoders.omnigen2.te(**hunyuan_detect)) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep, Omnigen2] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep, Omnigen2] models += [SVD_img2vid] diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index d71908f31..0b92c68ac 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -685,6 +685,49 @@ class WanTrackToVideo: out_latent["samples"] = latent return (positive, negative, out_latent) + +class Wan22ImageToVideoLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": {"vae": ("VAE", ), + "width": ("INT", {"default": 1280, "min": 32, "max": nodes.MAX_RESOLUTION, "step": 32}), + "height": ("INT", {"default": 704, "min": 32, "max": nodes.MAX_RESOLUTION, "step": 32}), + "length": ("INT", {"default": 49, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + }, + "optional": {"start_image": ("IMAGE", ), + }} + + + RETURN_TYPES = ("LATENT",) + FUNCTION = "encode" + + CATEGORY = "conditioning/inpaint" + + def encode(self, vae, width, height, length, batch_size, start_image=None): + latent = torch.zeros([1, 48, ((length - 1) // 4) + 1, height // 16, width // 16], device=comfy.model_management.intermediate_device()) + + if start_image is None: + out_latent = {} + out_latent["samples"] = latent + return (out_latent,) + + mask = torch.ones([latent.shape[0], 1, ((length - 1) // 4) + 1, latent.shape[-2], latent.shape[-1]], device=comfy.model_management.intermediate_device()) + + if start_image is not None: + start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + latent_temp = vae.encode(start_image) + latent[:, :, :latent_temp.shape[-3]] = latent_temp + mask[:, :, :latent_temp.shape[-3]] *= 0.0 + + out_latent = {} + latent_format = comfy.latent_formats.Wan22() + latent = latent_format.process_out(latent) * mask + latent * (1.0 - mask) + out_latent["samples"] = latent.repeat((batch_size, ) + (1,) * (latent.ndim - 1)) + out_latent["noise_mask"] = mask.repeat((batch_size, ) + (1,) * (mask.ndim - 1)) + return (out_latent,) + + NODE_CLASS_MAPPINGS = { "WanTrackToVideo": WanTrackToVideo, "WanImageToVideo": WanImageToVideo, @@ -695,4 +738,5 @@ NODE_CLASS_MAPPINGS = { "TrimVideoLatent": TrimVideoLatent, "WanCameraImageToVideo": WanCameraImageToVideo, "WanPhantomSubjectToVideo": WanPhantomSubjectToVideo, + "Wan22ImageToVideoLatent": Wan22ImageToVideoLatent, } From 9f1388c0a38b9b6ebde0cdde904d94d709d3ca82 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 28 Jul 2025 05:01:53 -0700 Subject: [PATCH 0374/1073] Add wan2.2 to readme. (#9081) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 8a15136aa..befc4c006 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Hunyuan Video](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/) - [Nvidia Cosmos](https://comfyanonymous.github.io/ComfyUI_examples/cosmos/) and [Cosmos Predict2](https://comfyanonymous.github.io/ComfyUI_examples/cosmos_predict2/) - [Wan 2.1](https://comfyanonymous.github.io/ComfyUI_examples/wan/) + - [Wan 2.2](https://comfyanonymous.github.io/ComfyUI_examples/wan22/) - Audio Models - [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/) - [ACE Step](https://comfyanonymous.github.io/ComfyUI_examples/audio/) From 5d4cc3ba1b412b9acacd37fd23d59e0e1654f83c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 28 Jul 2025 08:04:04 -0400 Subject: [PATCH 0375/1073] ComfyUI 0.3.46 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 180ecaf8a..315710dd2 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.45" +__version__ = "0.3.46" diff --git a/pyproject.toml b/pyproject.toml index b1d6d9df6..59c4c70fb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.45" +version = "0.3.46" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From c60dc4177c16d50da025bda4ac7fd513bb86e699 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 28 Jul 2025 11:48:19 -0700 Subject: [PATCH 0376/1073] Remove unecessary clones in the wan2.2 VAE. (#9083) --- comfy/ldm/wan/vae2_2.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/wan/vae2_2.py b/comfy/ldm/wan/vae2_2.py index c2c150e10..b9c2d1a26 100644 --- a/comfy/ldm/wan/vae2_2.py +++ b/comfy/ldm/wan/vae2_2.py @@ -136,7 +136,7 @@ class ResidualBlock(nn.Module): if in_dim != out_dim else nn.Identity()) def forward(self, x, feat_cache=None, feat_idx=[0]): - h = self.shortcut(x) + old_x = x for layer in self.residual: if isinstance(layer, CausalConv3d) and feat_cache is not None: idx = feat_idx[0] @@ -156,7 +156,7 @@ class ResidualBlock(nn.Module): feat_idx[0] += 1 else: x = layer(x) - return x + h + return x + self.shortcut(old_x) def patchify(x, patch_size): @@ -327,7 +327,7 @@ class Down_ResidualBlock(nn.Module): self.downsamples = nn.Sequential(*downsamples) def forward(self, x, feat_cache=None, feat_idx=[0]): - x_copy = x.clone() + x_copy = x for module in self.downsamples: x = module(x, feat_cache, feat_idx) @@ -369,7 +369,7 @@ class Up_ResidualBlock(nn.Module): self.upsamples = nn.Sequential(*upsamples) def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=False): - x_main = x.clone() + x_main = x for module in self.upsamples: x_main = module(x_main, feat_cache, feat_idx) if self.avg_shortcut is not None: From 7d593baf919f468670425c0d9068ead8a3e9b05f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 29 Jul 2025 01:07:45 -0700 Subject: [PATCH 0377/1073] Extra reserved vram on large cards on windows. (#9093) --- comfy/model_management.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 232d363aa..9e6149d60 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -529,6 +529,8 @@ WINDOWS = any(platform.win32_ver()) EXTRA_RESERVED_VRAM = 400 * 1024 * 1024 if WINDOWS: EXTRA_RESERVED_VRAM = 600 * 1024 * 1024 #Windows is higher because of the shared vram issue + if total_vram > (15 * 1024): # more extra reserved vram on 16GB+ cards + EXTRA_RESERVED_VRAM += 100 * 1024 * 1024 if args.reserve_vram is not None: EXTRA_RESERVED_VRAM = args.reserve_vram * 1024 * 1024 * 1024 From dca6bdd4fa70770f910ea80e61c72e1f450530d9 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 29 Jul 2025 16:44:18 -0700 Subject: [PATCH 0378/1073] Make wan2.2 5B i2v take a lot less memory. (#9102) --- comfy/ldm/wan/model.py | 21 ++++++++++++++++----- comfy/model_base.py | 2 +- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index b9e47e9f7..a93a13c86 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -146,6 +146,15 @@ WAN_CROSSATTENTION_CLASSES = { } +def repeat_e(e, x): + repeats = 1 + if e.shape[1] > 1: + repeats = x.shape[1] // e.shape[1] + if repeats == 1: + return e + return torch.repeat_interleave(e, repeats, dim=1) + + class WanAttentionBlock(nn.Module): def __init__(self, @@ -201,6 +210,7 @@ class WanAttentionBlock(nn.Module): freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2] """ # assert e.dtype == torch.float32 + if e.ndim < 4: e = (comfy.model_management.cast_to(self.modulation, dtype=x.dtype, device=x.device) + e).chunk(6, dim=1) else: @@ -209,15 +219,15 @@ class WanAttentionBlock(nn.Module): # self-attention y = self.self_attn( - self.norm1(x) * (1 + e[1]) + e[0], + self.norm1(x) * (1 + repeat_e(e[1], x)) + repeat_e(e[0], x), freqs) - x = x + y * e[2] + x = x + y * repeat_e(e[2], x) # cross-attention & ffn x = x + self.cross_attn(self.norm3(x), context, context_img_len=context_img_len) - y = self.ffn(self.norm2(x) * (1 + e[4]) + e[3]) - x = x + y * e[5] + y = self.ffn(self.norm2(x) * (1 + repeat_e(e[4], x)) + repeat_e(e[3], x)) + x = x + y * repeat_e(e[5], x) return x @@ -331,7 +341,8 @@ class Head(nn.Module): e = (comfy.model_management.cast_to(self.modulation, dtype=x.dtype, device=x.device) + e.unsqueeze(1)).chunk(2, dim=1) else: e = (comfy.model_management.cast_to(self.modulation, dtype=x.dtype, device=x.device).unsqueeze(0) + e.unsqueeze(2)).unbind(2) - x = (self.head(self.norm(x) * (1 + e[1]) + e[0])) + + x = (self.head(self.norm(x) * (1 + repeat_e(e[1], x)) + repeat_e(e[0], x))) return x diff --git a/comfy/model_base.py b/comfy/model_base.py index d019b991a..6b7978949 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1202,7 +1202,7 @@ class WAN22(BaseModel): def process_timestep(self, timestep, x, denoise_mask=None, **kwargs): if denoise_mask is None: return timestep - temp_ts = (torch.mean(denoise_mask[:, :, :, ::2, ::2], dim=1, keepdim=True) * timestep.view([timestep.shape[0]] + [1] * (denoise_mask.ndim - 1))).reshape(timestep.shape[0], -1) + temp_ts = (torch.mean(denoise_mask[:, :, :, :, :], dim=(1, 3, 4), keepdim=True) * timestep.view([timestep.shape[0]] + [1] * (denoise_mask.ndim - 1))).reshape(timestep.shape[0], -1) return temp_ts def scale_latent_inpaint(self, sigma, noise, latent_image, **kwargs): From 2f74e17975696d829af455845c584574bbc85774 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 29 Jul 2025 20:08:25 -0400 Subject: [PATCH 0379/1073] ComfyUI version 0.3.47 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 315710dd2..20a2e892a 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.46" +__version__ = "0.3.47" diff --git a/pyproject.toml b/pyproject.toml index 59c4c70fb..723c93069 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.46" +version = "0.3.47" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 0a3d062e0660741146d50f6601e3eeca211d92d5 Mon Sep 17 00:00:00 2001 From: guill Date: Tue, 29 Jul 2025 19:17:22 -0700 Subject: [PATCH 0380/1073] ComfyAPI Core v0.0.2 (#8962) * ComfyAPI Core v0.0.2 * Respond to PR feedback * Fix Python 3.9 errors * Fix missing backward compatibility proxy * Reorganize types a bit The input types, input impls, and utility types are now all available in the versioned API. See the change in `comfy_extras/nodes_video.py` for an example of their usage. * Remove the need for `--generate-api-stubs` * Fix generated stubs differing by Python version * Fix ruff formatting issues --- comfy_api/generate_api_stubs.py | 86 ++ comfy_api/input/__init__.py | 12 +- comfy_api/input/basic_types.py | 32 +- comfy_api/input/video_types.py | 89 +- comfy_api/input_impl/__init__.py | 4 +- comfy_api/input_impl/video_types.py | 326 +----- comfy_api/internal/__init__.py | 7 + comfy_api/internal/api_registry.py | 39 + comfy_api/internal/async_to_sync.py | 987 ++++++++++++++++++ comfy_api/internal/singleton.py | 33 + comfy_api/latest/__init__.py | 106 ++ comfy_api/latest/_input/__init__.py | 10 + comfy_api/latest/_input/basic_types.py | 42 + comfy_api/latest/_input/video_types.py | 85 ++ comfy_api/latest/_input_impl/__init__.py | 7 + comfy_api/latest/_input_impl/video_types.py | 324 ++++++ comfy_api/latest/_util/__init__.py | 8 + comfy_api/latest/_util/video_types.py | 52 + .../latest/generated/ComfyAPISyncStub.pyi | 20 + comfy_api/util.py | 8 + comfy_api/util/__init__.py | 4 +- comfy_api/util/video_types.py | 61 +- comfy_api/v0_0_1/__init__.py | 42 + .../v0_0_1/generated/ComfyAPISyncStub.pyi | 20 + comfy_api/v0_0_2/__init__.py | 43 + .../v0_0_2/generated/ComfyAPISyncStub.pyi | 20 + comfy_api/version_list.py | 12 + comfy_api_nodes/apis/request_logger.py | 2 + comfy_api_nodes/nodes_gemini.py | 2 + comfy_execution/progress.py | 16 +- comfy_extras/nodes_video.py | 23 +- main.py | 4 +- nodes.py | 36 +- pyproject.toml | 2 +- .../testing_nodes/testing-pack/__init__.py | 4 +- .../testing-pack/api_test_nodes.py | 78 ++ 36 files changed, 2128 insertions(+), 518 deletions(-) create mode 100644 comfy_api/generate_api_stubs.py create mode 100644 comfy_api/internal/__init__.py create mode 100644 comfy_api/internal/api_registry.py create mode 100644 comfy_api/internal/async_to_sync.py create mode 100644 comfy_api/internal/singleton.py create mode 100644 comfy_api/latest/__init__.py create mode 100644 comfy_api/latest/_input/__init__.py create mode 100644 comfy_api/latest/_input/basic_types.py create mode 100644 comfy_api/latest/_input/video_types.py create mode 100644 comfy_api/latest/_input_impl/__init__.py create mode 100644 comfy_api/latest/_input_impl/video_types.py create mode 100644 comfy_api/latest/_util/__init__.py create mode 100644 comfy_api/latest/_util/video_types.py create mode 100644 comfy_api/latest/generated/ComfyAPISyncStub.pyi create mode 100644 comfy_api/util.py create mode 100644 comfy_api/v0_0_1/__init__.py create mode 100644 comfy_api/v0_0_1/generated/ComfyAPISyncStub.pyi create mode 100644 comfy_api/v0_0_2/__init__.py create mode 100644 comfy_api/v0_0_2/generated/ComfyAPISyncStub.pyi create mode 100644 comfy_api/version_list.py create mode 100644 tests/inference/testing_nodes/testing-pack/api_test_nodes.py diff --git a/comfy_api/generate_api_stubs.py b/comfy_api/generate_api_stubs.py new file mode 100644 index 000000000..604a7eced --- /dev/null +++ b/comfy_api/generate_api_stubs.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +""" +Script to generate .pyi stub files for the synchronous API wrappers. +This allows generating stubs without running the full ComfyUI application. +""" + +import os +import sys +import logging +import importlib + +# Add ComfyUI to path so we can import modules +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from comfy_api.internal.async_to_sync import AsyncToSyncConverter +from comfy_api.version_list import supported_versions + + +def generate_stubs_for_module(module_name: str) -> None: + """Generate stub files for a specific module that exports ComfyAPI and ComfyAPISync.""" + try: + # Import the module + module = importlib.import_module(module_name) + + # Check if module has ComfyAPISync (the sync wrapper) + if hasattr(module, "ComfyAPISync"): + # Module already has a sync class + api_class = getattr(module, "ComfyAPI", None) + sync_class = getattr(module, "ComfyAPISync") + + if api_class: + # Generate the stub file + AsyncToSyncConverter.generate_stub_file(api_class, sync_class) + logging.info(f"Generated stub file for {module_name}") + else: + logging.warning( + f"Module {module_name} has ComfyAPISync but no ComfyAPI" + ) + + elif hasattr(module, "ComfyAPI"): + # Module only has async API, need to create sync wrapper first + from comfy_api.internal.async_to_sync import create_sync_class + + api_class = getattr(module, "ComfyAPI") + sync_class = create_sync_class(api_class) + + # Generate the stub file + AsyncToSyncConverter.generate_stub_file(api_class, sync_class) + logging.info(f"Generated stub file for {module_name}") + else: + logging.warning( + f"Module {module_name} does not export ComfyAPI or ComfyAPISync" + ) + + except Exception as e: + logging.error(f"Failed to generate stub for {module_name}: {e}") + import traceback + + traceback.print_exc() + + +def main(): + """Main function to generate all API stub files.""" + logging.basicConfig(level=logging.INFO) + + logging.info("Starting stub generation...") + + # Dynamically get module names from supported_versions + api_modules = [] + for api_class in supported_versions: + # Extract module name from the class + module_name = api_class.__module__ + if module_name not in api_modules: + api_modules.append(module_name) + + logging.info(f"Found {len(api_modules)} API modules: {api_modules}") + + # Generate stubs for each module + for module_name in api_modules: + generate_stubs_for_module(module_name) + + logging.info("Stub generation complete!") + + +if __name__ == "__main__": + main() diff --git a/comfy_api/input/__init__.py b/comfy_api/input/__init__.py index 66667946f..68ff78270 100644 --- a/comfy_api/input/__init__.py +++ b/comfy_api/input/__init__.py @@ -1,8 +1,16 @@ -from .basic_types import ImageInput, AudioInput -from .video_types import VideoInput +# This file only exists for backwards compatibility. +from comfy_api.latest._input import ( + ImageInput, + AudioInput, + MaskInput, + LatentInput, + VideoInput, +) __all__ = [ "ImageInput", "AudioInput", + "MaskInput", + "LatentInput", "VideoInput", ] diff --git a/comfy_api/input/basic_types.py b/comfy_api/input/basic_types.py index 033fb7e27..5eadce86a 100644 --- a/comfy_api/input/basic_types.py +++ b/comfy_api/input/basic_types.py @@ -1,20 +1,14 @@ -import torch -from typing import TypedDict - -ImageInput = torch.Tensor -""" -An image in format [B, H, W, C] where B is the batch size, C is the number of channels, -""" - -class AudioInput(TypedDict): - """ - TypedDict representing audio input. - """ - - waveform: torch.Tensor - """ - Tensor in the format [B, C, T] where B is the batch size, C is the number of channels, - """ - - sample_rate: int +# This file only exists for backwards compatibility. +from comfy_api.latest._input.basic_types import ( + ImageInput, + AudioInput, + MaskInput, + LatentInput, +) +__all__ = [ + "ImageInput", + "AudioInput", + "MaskInput", + "LatentInput", +] diff --git a/comfy_api/input/video_types.py b/comfy_api/input/video_types.py index 5d95dc507..9ace78cbc 100644 --- a/comfy_api/input/video_types.py +++ b/comfy_api/input/video_types.py @@ -1,85 +1,6 @@ -from __future__ import annotations -from abc import ABC, abstractmethod -from typing import Optional, Union -import io -import av -from comfy_api.util import VideoContainer, VideoCodec, VideoComponents +# This file only exists for backwards compatibility. +from comfy_api.latest._input.video_types import VideoInput -class VideoInput(ABC): - """ - Abstract base class for video input types. - """ - - @abstractmethod - def get_components(self) -> VideoComponents: - """ - Abstract method to get the video components (images, audio, and frame rate). - - Returns: - VideoComponents containing images, audio, and frame rate - """ - pass - - @abstractmethod - def save_to( - self, - path: str, - format: VideoContainer = VideoContainer.AUTO, - codec: VideoCodec = VideoCodec.AUTO, - metadata: Optional[dict] = None - ): - """ - Abstract method to save the video input to a file. - """ - pass - - def get_stream_source(self) -> Union[str, io.BytesIO]: - """ - Get a streamable source for the video. This allows processing without - loading the entire video into memory. - - Returns: - Either a file path (str) or a BytesIO object that can be opened with av. - - Default implementation creates a BytesIO buffer, but subclasses should - override this for better performance when possible. - """ - buffer = io.BytesIO() - self.save_to(buffer) - buffer.seek(0) - return buffer - - # Provide a default implementation, but subclasses can provide optimized versions - # if possible. - def get_dimensions(self) -> tuple[int, int]: - """ - Returns the dimensions of the video input. - - Returns: - Tuple of (width, height) - """ - components = self.get_components() - return components.images.shape[2], components.images.shape[1] - - def get_duration(self) -> float: - """ - Returns the duration of the video in seconds. - - Returns: - Duration in seconds - """ - components = self.get_components() - frame_count = components.images.shape[0] - return float(frame_count / components.frame_rate) - - def get_container_format(self) -> str: - """ - Returns the container format of the video (e.g., 'mp4', 'mov', 'avi'). - - Returns: - Container format as string - """ - # Default implementation - subclasses should override for better performance - source = self.get_stream_source() - with av.open(source, mode="r") as container: - return container.format.name +__all__ = [ + "VideoInput", +] diff --git a/comfy_api/input_impl/__init__.py b/comfy_api/input_impl/__init__.py index 02901b8b9..b78ff0c08 100644 --- a/comfy_api/input_impl/__init__.py +++ b/comfy_api/input_impl/__init__.py @@ -1,7 +1,7 @@ -from .video_types import VideoFromFile, VideoFromComponents +# This file only exists for backwards compatibility. +from comfy_api.latest._input_impl import VideoFromFile, VideoFromComponents __all__ = [ - # Implementations "VideoFromFile", "VideoFromComponents", ] diff --git a/comfy_api/input_impl/video_types.py b/comfy_api/input_impl/video_types.py index 91e7c1bfa..bd2e56ad5 100644 --- a/comfy_api/input_impl/video_types.py +++ b/comfy_api/input_impl/video_types.py @@ -1,324 +1,2 @@ -from __future__ import annotations -from av.container import InputContainer -from av.subtitles.stream import SubtitleStream -from fractions import Fraction -from typing import Optional -from comfy_api.input import AudioInput -import av -import io -import json -import numpy as np -import torch -from comfy_api.input import VideoInput -from comfy_api.util import VideoContainer, VideoCodec, VideoComponents - - -def container_to_output_format(container_format: str | None) -> str | None: - """ - A container's `format` may be a comma-separated list of formats. - E.g., iso container's `format` may be `mov,mp4,m4a,3gp,3g2,mj2`. - However, writing to a file/stream with `av.open` requires a single format, - or `None` to auto-detect. - """ - if not container_format: - return None # Auto-detect - - if "," not in container_format: - return container_format - - formats = container_format.split(",") - return formats[0] - - -def get_open_write_kwargs( - dest: str | io.BytesIO, container_format: str, to_format: str | None -) -> dict: - """Get kwargs for writing a `VideoFromFile` to a file/stream with `av.open`""" - open_kwargs = { - "mode": "w", - # If isobmff, preserve custom metadata tags (workflow, prompt, extra_pnginfo) - "options": {"movflags": "use_metadata_tags"}, - } - - is_write_to_buffer = isinstance(dest, io.BytesIO) - if is_write_to_buffer: - # Set output format explicitly, since it cannot be inferred from file extension - if to_format == VideoContainer.AUTO: - to_format = container_format.lower() - elif isinstance(to_format, str): - to_format = to_format.lower() - open_kwargs["format"] = container_to_output_format(to_format) - - return open_kwargs - - -class VideoFromFile(VideoInput): - """ - Class representing video input from a file. - """ - - def __init__(self, file: str | io.BytesIO): - """ - Initialize the VideoFromFile object based off of either a path on disk or a BytesIO object - containing the file contents. - """ - self.__file = file - - def get_stream_source(self) -> str | io.BytesIO: - """ - Return the underlying file source for efficient streaming. - This avoids unnecessary memory copies when the source is already a file path. - """ - if isinstance(self.__file, io.BytesIO): - self.__file.seek(0) - return self.__file - - def get_dimensions(self) -> tuple[int, int]: - """ - Returns the dimensions of the video input. - - Returns: - Tuple of (width, height) - """ - if isinstance(self.__file, io.BytesIO): - self.__file.seek(0) # Reset the BytesIO object to the beginning - with av.open(self.__file, mode='r') as container: - for stream in container.streams: - if stream.type == 'video': - assert isinstance(stream, av.VideoStream) - return stream.width, stream.height - raise ValueError(f"No video stream found in file '{self.__file}'") - - def get_duration(self) -> float: - """ - Returns the duration of the video in seconds. - - Returns: - Duration in seconds - """ - if isinstance(self.__file, io.BytesIO): - self.__file.seek(0) - with av.open(self.__file, mode="r") as container: - if container.duration is not None: - return float(container.duration / av.time_base) - - # Fallback: calculate from frame count and frame rate - video_stream = next( - (s for s in container.streams if s.type == "video"), None - ) - if video_stream and video_stream.frames and video_stream.average_rate: - return float(video_stream.frames / video_stream.average_rate) - - # Last resort: decode frames to count them - if video_stream and video_stream.average_rate: - frame_count = 0 - container.seek(0) - for packet in container.demux(video_stream): - for _ in packet.decode(): - frame_count += 1 - if frame_count > 0: - return float(frame_count / video_stream.average_rate) - - raise ValueError(f"Could not determine duration for file '{self.__file}'") - - def get_container_format(self) -> str: - """ - Returns the container format of the video (e.g., 'mp4', 'mov', 'avi'). - - Returns: - Container format as string - """ - if isinstance(self.__file, io.BytesIO): - self.__file.seek(0) - with av.open(self.__file, mode='r') as container: - return container.format.name - - def get_components_internal(self, container: InputContainer) -> VideoComponents: - # Get video frames - frames = [] - for frame in container.decode(video=0): - img = frame.to_ndarray(format='rgb24') # shape: (H, W, 3) - img = torch.from_numpy(img) / 255.0 # shape: (H, W, 3) - frames.append(img) - - images = torch.stack(frames) if len(frames) > 0 else torch.zeros(0, 3, 0, 0) - - # Get frame rate - video_stream = next(s for s in container.streams if s.type == 'video') - frame_rate = Fraction(video_stream.average_rate) if video_stream and video_stream.average_rate else Fraction(1) - - # Get audio if available - audio = None - try: - container.seek(0) # Reset the container to the beginning - for stream in container.streams: - if stream.type != 'audio': - continue - assert isinstance(stream, av.AudioStream) - audio_frames = [] - for packet in container.demux(stream): - for frame in packet.decode(): - assert isinstance(frame, av.AudioFrame) - audio_frames.append(frame.to_ndarray()) # shape: (channels, samples) - if len(audio_frames) > 0: - audio_data = np.concatenate(audio_frames, axis=1) # shape: (channels, total_samples) - audio_tensor = torch.from_numpy(audio_data).unsqueeze(0) # shape: (1, channels, total_samples) - audio = AudioInput({ - "waveform": audio_tensor, - "sample_rate": int(stream.sample_rate) if stream.sample_rate else 1, - }) - except StopIteration: - pass # No audio stream - - metadata = container.metadata - return VideoComponents(images=images, audio=audio, frame_rate=frame_rate, metadata=metadata) - - def get_components(self) -> VideoComponents: - if isinstance(self.__file, io.BytesIO): - self.__file.seek(0) # Reset the BytesIO object to the beginning - with av.open(self.__file, mode='r') as container: - return self.get_components_internal(container) - raise ValueError(f"No video stream found in file '{self.__file}'") - - def save_to( - self, - path: str | io.BytesIO, - format: VideoContainer = VideoContainer.AUTO, - codec: VideoCodec = VideoCodec.AUTO, - metadata: Optional[dict] = None - ): - if isinstance(self.__file, io.BytesIO): - self.__file.seek(0) # Reset the BytesIO object to the beginning - with av.open(self.__file, mode='r') as container: - container_format = container.format.name - video_encoding = container.streams.video[0].codec.name if len(container.streams.video) > 0 else None - reuse_streams = True - if format != VideoContainer.AUTO and format not in container_format.split(","): - reuse_streams = False - if codec != VideoCodec.AUTO and codec != video_encoding and video_encoding is not None: - reuse_streams = False - - if not reuse_streams: - components = self.get_components_internal(container) - video = VideoFromComponents(components) - return video.save_to( - path, - format=format, - codec=codec, - metadata=metadata - ) - - streams = container.streams - - open_kwargs = get_open_write_kwargs(path, container_format, format) - with av.open(path, **open_kwargs) as output_container: - # Copy over the original metadata - for key, value in container.metadata.items(): - if metadata is None or key not in metadata: - output_container.metadata[key] = value - - # Add our new metadata - if metadata is not None: - for key, value in metadata.items(): - if isinstance(value, str): - output_container.metadata[key] = value - else: - output_container.metadata[key] = json.dumps(value) - - # Add streams to the new container - stream_map = {} - for stream in streams: - if isinstance(stream, (av.VideoStream, av.AudioStream, SubtitleStream)): - out_stream = output_container.add_stream_from_template(template=stream, opaque=True) - stream_map[stream] = out_stream - - # Write packets to the new container - for packet in container.demux(): - if packet.stream in stream_map and packet.dts is not None: - packet.stream = stream_map[packet.stream] - output_container.mux(packet) - -class VideoFromComponents(VideoInput): - """ - Class representing video input from tensors. - """ - - def __init__(self, components: VideoComponents): - self.__components = components - - def get_components(self) -> VideoComponents: - return VideoComponents( - images=self.__components.images, - audio=self.__components.audio, - frame_rate=self.__components.frame_rate - ) - - def save_to( - self, - path: str, - format: VideoContainer = VideoContainer.AUTO, - codec: VideoCodec = VideoCodec.AUTO, - metadata: Optional[dict] = None - ): - if format != VideoContainer.AUTO and format != VideoContainer.MP4: - raise ValueError("Only MP4 format is supported for now") - if codec != VideoCodec.AUTO and codec != VideoCodec.H264: - raise ValueError("Only H264 codec is supported for now") - with av.open(path, mode='w', options={'movflags': 'use_metadata_tags'}) as output: - # Add metadata before writing any streams - if metadata is not None: - for key, value in metadata.items(): - output.metadata[key] = json.dumps(value) - - frame_rate = Fraction(round(self.__components.frame_rate * 1000), 1000) - # Create a video stream - video_stream = output.add_stream('h264', rate=frame_rate) - video_stream.width = self.__components.images.shape[2] - video_stream.height = self.__components.images.shape[1] - video_stream.pix_fmt = 'yuv420p' - - # Create an audio stream - audio_sample_rate = 1 - audio_stream: Optional[av.AudioStream] = None - if self.__components.audio: - audio_sample_rate = int(self.__components.audio['sample_rate']) - audio_stream = output.add_stream('aac', rate=audio_sample_rate) - audio_stream.sample_rate = audio_sample_rate - audio_stream.format = 'fltp' - - # Encode video - for i, frame in enumerate(self.__components.images): - img = (frame * 255).clamp(0, 255).byte().cpu().numpy() # shape: (H, W, 3) - frame = av.VideoFrame.from_ndarray(img, format='rgb24') - frame = frame.reformat(format='yuv420p') # Convert to YUV420P as required by h264 - packet = video_stream.encode(frame) - output.mux(packet) - - # Flush video - packet = video_stream.encode(None) - output.mux(packet) - - if audio_stream and self.__components.audio: - # Encode audio - samples_per_frame = int(audio_sample_rate / frame_rate) - num_frames = self.__components.audio['waveform'].shape[2] // samples_per_frame - for i in range(num_frames): - start = i * samples_per_frame - end = start + samples_per_frame - # TODO(Feature) - Add support for stereo audio - chunk = ( - self.__components.audio["waveform"][0, 0, start:end] - .unsqueeze(0) - .contiguous() - .numpy() - ) - audio_frame = av.AudioFrame.from_ndarray(chunk, format='fltp', layout='mono') - audio_frame.sample_rate = audio_sample_rate - audio_frame.pts = i * samples_per_frame - for packet in audio_stream.encode(audio_frame): - output.mux(packet) - - # Flush audio - for packet in audio_stream.encode(None): - output.mux(packet) - +# This file only exists for backwards compatibility. +from comfy_api.latest._input_impl.video_types import * # noqa: F403 diff --git a/comfy_api/internal/__init__.py b/comfy_api/internal/__init__.py new file mode 100644 index 000000000..c00b1fdbb --- /dev/null +++ b/comfy_api/internal/__init__.py @@ -0,0 +1,7 @@ +# Internal infrastructure for ComfyAPI +from .api_registry import ( + ComfyAPIBase as ComfyAPIBase, + ComfyAPIWithVersion as ComfyAPIWithVersion, + register_versions as register_versions, + get_all_versions as get_all_versions, +) diff --git a/comfy_api/internal/api_registry.py b/comfy_api/internal/api_registry.py new file mode 100644 index 000000000..7e3375cf6 --- /dev/null +++ b/comfy_api/internal/api_registry.py @@ -0,0 +1,39 @@ +from typing import Type, List, NamedTuple +from comfy_api.internal.singleton import ProxiedSingleton +from packaging import version as packaging_version + + +class ComfyAPIBase(ProxiedSingleton): + def __init__(self): + pass + + +class ComfyAPIWithVersion(NamedTuple): + version: str + api_class: Type[ComfyAPIBase] + + +def parse_version(version_str: str) -> packaging_version.Version: + """ + Parses a version string into a packaging_version.Version object. + Raises ValueError if the version string is invalid. + """ + if version_str == "latest": + return packaging_version.parse("9999999.9999999.9999999") + return packaging_version.parse(version_str) + + +registered_versions: List[ComfyAPIWithVersion] = [] + + +def register_versions(versions: List[ComfyAPIWithVersion]): + versions.sort(key=lambda x: parse_version(x.version)) + global registered_versions + registered_versions = versions + + +def get_all_versions() -> List[ComfyAPIWithVersion]: + """ + Returns a list of all registered ComfyAPI versions. + """ + return registered_versions diff --git a/comfy_api/internal/async_to_sync.py b/comfy_api/internal/async_to_sync.py new file mode 100644 index 000000000..f5f805a62 --- /dev/null +++ b/comfy_api/internal/async_to_sync.py @@ -0,0 +1,987 @@ +import asyncio +import concurrent.futures +import contextvars +import functools +import inspect +import logging +import os +import textwrap +import threading +from enum import Enum +from typing import Optional, Type, get_origin, get_args + + +class TypeTracker: + """Tracks types discovered during stub generation for automatic import generation.""" + + def __init__(self): + self.discovered_types = {} # type_name -> (module, qualname) + self.builtin_types = { + "Any", + "Dict", + "List", + "Optional", + "Tuple", + "Union", + "Set", + "Sequence", + "cast", + "NamedTuple", + "str", + "int", + "float", + "bool", + "None", + "bytes", + "object", + "type", + "dict", + "list", + "tuple", + "set", + } + self.already_imported = ( + set() + ) # Track types already imported to avoid duplicates + + def track_type(self, annotation): + """Track a type annotation and record its module/import info.""" + if annotation is None or annotation is type(None): + return + + # Skip builtins and typing module types we already import + type_name = getattr(annotation, "__name__", None) + if type_name and ( + type_name in self.builtin_types or type_name in self.already_imported + ): + return + + # Get module and qualname + module = getattr(annotation, "__module__", None) + qualname = getattr(annotation, "__qualname__", type_name or "") + + # Skip types from typing module (they're already imported) + if module == "typing": + return + + # Skip UnionType and GenericAlias from types module as they're handled specially + if module == "types" and type_name in ("UnionType", "GenericAlias"): + return + + if module and module not in ["builtins", "__main__"]: + # Store the type info + if type_name: + self.discovered_types[type_name] = (module, qualname) + + def get_imports(self, main_module_name: str) -> list[str]: + """Generate import statements for all discovered types.""" + imports = [] + imports_by_module = {} + + for type_name, (module, qualname) in sorted(self.discovered_types.items()): + # Skip types from the main module (they're already imported) + if main_module_name and module == main_module_name: + continue + + if module not in imports_by_module: + imports_by_module[module] = [] + if type_name not in imports_by_module[module]: # Avoid duplicates + imports_by_module[module].append(type_name) + + # Generate import statements + for module, types in sorted(imports_by_module.items()): + if len(types) == 1: + imports.append(f"from {module} import {types[0]}") + else: + imports.append(f"from {module} import {', '.join(sorted(set(types)))}") + + return imports + + +class AsyncToSyncConverter: + """ + Provides utilities to convert async classes to sync classes with proper type hints. + """ + + _thread_pool: Optional[concurrent.futures.ThreadPoolExecutor] = None + _thread_pool_lock = threading.Lock() + _thread_pool_initialized = False + + @classmethod + def get_thread_pool(cls, max_workers=None) -> concurrent.futures.ThreadPoolExecutor: + """Get or create the shared thread pool with proper thread-safe initialization.""" + # Fast path - check if already initialized without acquiring lock + if cls._thread_pool_initialized: + assert cls._thread_pool is not None, "Thread pool should be initialized" + return cls._thread_pool + + # Slow path - acquire lock and create pool if needed + with cls._thread_pool_lock: + if not cls._thread_pool_initialized: + cls._thread_pool = concurrent.futures.ThreadPoolExecutor( + max_workers=max_workers, thread_name_prefix="async_to_sync_" + ) + cls._thread_pool_initialized = True + + # This should never be None at this point, but add assertion for type checker + assert cls._thread_pool is not None + return cls._thread_pool + + @classmethod + def run_async_in_thread(cls, coro_func, *args, **kwargs): + """ + Run an async function in a separate thread from the thread pool. + Blocks until the async function completes. + Properly propagates contextvars between threads and manages event loops. + """ + # Capture current context - this includes all context variables + context = contextvars.copy_context() + + # Store the result and any exception that occurs + result_container: dict = {"result": None, "exception": None} + + # Function that runs in the thread pool + def run_in_thread(): + # Create new event loop for this thread + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + try: + # Create the coroutine within the context + async def run_with_context(): + # The coroutine function might access context variables + return await coro_func(*args, **kwargs) + + # Run the coroutine with the captured context + # This ensures all context variables are available in the async function + result = context.run(loop.run_until_complete, run_with_context()) + result_container["result"] = result + except Exception as e: + # Store the exception to re-raise in the calling thread + result_container["exception"] = e + finally: + # Ensure event loop is properly closed to prevent warnings + try: + # Cancel any remaining tasks + pending = asyncio.all_tasks(loop) + for task in pending: + task.cancel() + + # Run the loop briefly to handle cancellations + if pending: + loop.run_until_complete( + asyncio.gather(*pending, return_exceptions=True) + ) + except Exception: + pass # Ignore errors during cleanup + + # Close the event loop + loop.close() + + # Clear the event loop from the thread + asyncio.set_event_loop(None) + + # Submit to thread pool and wait for result + thread_pool = cls.get_thread_pool() + future = thread_pool.submit(run_in_thread) + future.result() # Wait for completion + + # Re-raise any exception that occurred in the thread + if result_container["exception"] is not None: + raise result_container["exception"] + + return result_container["result"] + + @classmethod + def create_sync_class(cls, async_class: Type, thread_pool_size=10) -> Type: + """ + Creates a new class with synchronous versions of all async methods. + + Args: + async_class: The async class to convert + thread_pool_size: Size of thread pool to use + + Returns: + A new class with sync versions of all async methods + """ + sync_class_name = "ComfyAPISyncStub" + cls.get_thread_pool(thread_pool_size) + + # Create a proper class with docstrings and proper base classes + sync_class_dict = { + "__doc__": async_class.__doc__, + "__module__": async_class.__module__, + "__qualname__": sync_class_name, + "__orig_class__": async_class, # Store original class for typing references + } + + # Create __init__ method + def __init__(self, *args, **kwargs): + self._async_instance = async_class(*args, **kwargs) + + # Handle annotated class attributes (like execution: Execution) + # Get all annotations from the class hierarchy + all_annotations = {} + for base_class in reversed(inspect.getmro(async_class)): + if hasattr(base_class, "__annotations__"): + all_annotations.update(base_class.__annotations__) + + # For each annotated attribute, check if it needs to be created or wrapped + for attr_name, attr_type in all_annotations.items(): + if hasattr(self._async_instance, attr_name): + # Attribute exists on the instance + attr = getattr(self._async_instance, attr_name) + # Check if this attribute needs a sync wrapper + if hasattr(attr, "__class__"): + from comfy_api.internal.singleton import ProxiedSingleton + + if isinstance(attr, ProxiedSingleton): + # Create a sync version of this attribute + try: + sync_attr_class = cls.create_sync_class(attr.__class__) + # Create instance of the sync wrapper with the async instance + sync_attr = object.__new__(sync_attr_class) # type: ignore + sync_attr._async_instance = attr + setattr(self, attr_name, sync_attr) + except Exception: + # If we can't create a sync version, keep the original + setattr(self, attr_name, attr) + else: + # Not async, just copy the reference + setattr(self, attr_name, attr) + else: + # Attribute doesn't exist, but is annotated - create it + # This handles cases like execution: Execution + if isinstance(attr_type, type): + # Check if the type is defined as an inner class + if hasattr(async_class, attr_type.__name__): + inner_class = getattr(async_class, attr_type.__name__) + from comfy_api.internal.singleton import ProxiedSingleton + + # Create an instance of the inner class + try: + # For ProxiedSingleton classes, get or create the singleton instance + if issubclass(inner_class, ProxiedSingleton): + async_instance = inner_class.get_instance() + else: + async_instance = inner_class() + + # Create sync wrapper + sync_attr_class = cls.create_sync_class(inner_class) + sync_attr = object.__new__(sync_attr_class) # type: ignore + sync_attr._async_instance = async_instance + setattr(self, attr_name, sync_attr) + # Also set on the async instance for consistency + setattr(self._async_instance, attr_name, async_instance) + except Exception as e: + logging.warning( + f"Failed to create instance for {attr_name}: {e}" + ) + + # Handle other instance attributes that might not be annotated + for name, attr in inspect.getmembers(self._async_instance): + if name.startswith("_") or hasattr(self, name): + continue + + # If attribute is an instance of a class, and that class is defined in the original class + # we need to check if it needs a sync wrapper + if isinstance(attr, object) and not isinstance( + attr, (str, int, float, bool, list, dict, tuple) + ): + from comfy_api.internal.singleton import ProxiedSingleton + + if isinstance(attr, ProxiedSingleton): + # Create a sync version of this nested class + try: + sync_attr_class = cls.create_sync_class(attr.__class__) + # Create instance of the sync wrapper with the async instance + sync_attr = object.__new__(sync_attr_class) # type: ignore + sync_attr._async_instance = attr + setattr(self, name, sync_attr) + except Exception: + # If we can't create a sync version, keep the original + setattr(self, name, attr) + + sync_class_dict["__init__"] = __init__ + + # Process methods from the async class + for name, method in inspect.getmembers( + async_class, predicate=inspect.isfunction + ): + if name.startswith("_"): + continue + + # Extract the actual return type from a coroutine + if inspect.iscoroutinefunction(method): + # Create sync version of async method with proper signature + @functools.wraps(method) + def sync_method(self, *args, _method_name=name, **kwargs): + async_method = getattr(self._async_instance, _method_name) + return AsyncToSyncConverter.run_async_in_thread( + async_method, *args, **kwargs + ) + + # Add to the class dict + sync_class_dict[name] = sync_method + else: + # For regular methods, create a proxy method + @functools.wraps(method) + def proxy_method(self, *args, _method_name=name, **kwargs): + method = getattr(self._async_instance, _method_name) + return method(*args, **kwargs) + + # Add to the class dict + sync_class_dict[name] = proxy_method + + # Handle property access + for name, prop in inspect.getmembers( + async_class, lambda x: isinstance(x, property) + ): + + def make_property(name, prop_obj): + def getter(self): + value = getattr(self._async_instance, name) + if inspect.iscoroutinefunction(value): + + def sync_fn(*args, **kwargs): + return AsyncToSyncConverter.run_async_in_thread( + value, *args, **kwargs + ) + + return sync_fn + return value + + def setter(self, value): + setattr(self._async_instance, name, value) + + return property(getter, setter if prop_obj.fset else None) + + sync_class_dict[name] = make_property(name, prop) + + # Create the class + sync_class = type(sync_class_name, (object,), sync_class_dict) + + return sync_class + + @classmethod + def _format_type_annotation( + cls, annotation, type_tracker: Optional[TypeTracker] = None + ) -> str: + """Convert a type annotation to its string representation for stub files.""" + if ( + annotation is inspect.Parameter.empty + or annotation is inspect.Signature.empty + ): + return "Any" + + # Handle None type + if annotation is type(None): + return "None" + + # Track the type if we have a tracker + if type_tracker: + type_tracker.track_type(annotation) + + # Try using typing.get_origin/get_args for Python 3.8+ + try: + origin = get_origin(annotation) + args = get_args(annotation) + + if origin is not None: + # Track the origin type + if type_tracker: + type_tracker.track_type(origin) + + # Get the origin name + origin_name = getattr(origin, "__name__", str(origin)) + if "." in origin_name: + origin_name = origin_name.split(".")[-1] + + # Special handling for types.UnionType (Python 3.10+ pipe operator) + # Convert to old-style Union for compatibility + if str(origin) == "" or origin_name == "UnionType": + origin_name = "Union" + + # Format arguments recursively + if args: + formatted_args = [] + for arg in args: + # Track each type in the union + if type_tracker: + type_tracker.track_type(arg) + formatted_args.append(cls._format_type_annotation(arg, type_tracker)) + return f"{origin_name}[{', '.join(formatted_args)}]" + else: + return origin_name + except (AttributeError, TypeError): + # Fallback for older Python versions or non-generic types + pass + + # Handle generic types the old way for compatibility + if hasattr(annotation, "__origin__") and hasattr(annotation, "__args__"): + origin = annotation.__origin__ + origin_name = ( + origin.__name__ + if hasattr(origin, "__name__") + else str(origin).split("'")[1] + ) + + # Format each type argument + args = [] + for arg in annotation.__args__: + args.append(cls._format_type_annotation(arg, type_tracker)) + + return f"{origin_name}[{', '.join(args)}]" + + # Handle regular types with __name__ + if hasattr(annotation, "__name__"): + return annotation.__name__ + + # Handle special module types (like types from typing module) + if hasattr(annotation, "__module__") and hasattr(annotation, "__qualname__"): + # For types like typing.Literal, typing.TypedDict, etc. + return annotation.__qualname__ + + # Last resort: string conversion with cleanup + type_str = str(annotation) + + # Clean up common patterns more robustly + if type_str.startswith(""): + type_str = type_str[8:-2] # Remove "" + + # Remove module prefixes for common modules + for prefix in ["typing.", "builtins.", "types."]: + if type_str.startswith(prefix): + type_str = type_str[len(prefix) :] + + # Handle special cases + if type_str in ("_empty", "inspect._empty"): + return "None" + + # Fix NoneType (this should rarely be needed now) + if type_str == "NoneType": + return "None" + + return type_str + + @classmethod + def _extract_coroutine_return_type(cls, annotation): + """Extract the actual return type from a Coroutine annotation.""" + if hasattr(annotation, "__args__") and len(annotation.__args__) > 2: + # Coroutine[Any, Any, ReturnType] -> extract ReturnType + return annotation.__args__[2] + return annotation + + @classmethod + def _format_parameter_default(cls, default_value) -> str: + """Format a parameter's default value for stub files.""" + if default_value is inspect.Parameter.empty: + return "" + elif default_value is None: + return " = None" + elif isinstance(default_value, bool): + return f" = {default_value}" + elif default_value == {}: + return " = {}" + elif default_value == []: + return " = []" + else: + return f" = {default_value}" + + @classmethod + def _format_method_parameters( + cls, + sig: inspect.Signature, + skip_self: bool = True, + type_hints: Optional[dict] = None, + type_tracker: Optional[TypeTracker] = None, + ) -> str: + """Format method parameters for stub files.""" + params = [] + if type_hints is None: + type_hints = {} + + for i, (param_name, param) in enumerate(sig.parameters.items()): + if i == 0 and param_name == "self" and skip_self: + params.append("self") + else: + # Get type annotation from type hints if available, otherwise from signature + annotation = type_hints.get(param_name, param.annotation) + type_str = cls._format_type_annotation(annotation, type_tracker) + + # Get default value + default_str = cls._format_parameter_default(param.default) + + # Combine parameter parts + if annotation is inspect.Parameter.empty: + params.append(f"{param_name}: Any{default_str}") + else: + params.append(f"{param_name}: {type_str}{default_str}") + + return ", ".join(params) + + @classmethod + def _generate_method_signature( + cls, + method_name: str, + method, + is_async: bool = False, + type_tracker: Optional[TypeTracker] = None, + ) -> str: + """Generate a complete method signature for stub files.""" + sig = inspect.signature(method) + + # Try to get evaluated type hints to resolve string annotations + try: + from typing import get_type_hints + type_hints = get_type_hints(method) + except Exception: + # Fallback to empty dict if we can't get type hints + type_hints = {} + + # For async methods, extract the actual return type + return_annotation = type_hints.get('return', sig.return_annotation) + if is_async and inspect.iscoroutinefunction(method): + return_annotation = cls._extract_coroutine_return_type(return_annotation) + + # Format parameters with type hints + params_str = cls._format_method_parameters(sig, type_hints=type_hints, type_tracker=type_tracker) + + # Format return type + return_type = cls._format_type_annotation(return_annotation, type_tracker) + if return_annotation is inspect.Signature.empty: + return_type = "None" + + return f"def {method_name}({params_str}) -> {return_type}: ..." + + @classmethod + def _generate_imports( + cls, async_class: Type, type_tracker: TypeTracker + ) -> list[str]: + """Generate import statements for the stub file.""" + imports = [] + + # Add standard typing imports + imports.append( + "from typing import Any, Dict, List, Optional, Tuple, Union, Set, Sequence, cast, NamedTuple" + ) + + # Add imports from the original module + if async_class.__module__ != "builtins": + module = inspect.getmodule(async_class) + additional_types = [] + + if module: + # Check if module has __all__ defined + module_all = getattr(module, "__all__", None) + + for name, obj in sorted(inspect.getmembers(module)): + if isinstance(obj, type): + # Skip if __all__ is defined and this name isn't in it + # unless it's already been tracked as used in type annotations + if module_all is not None and name not in module_all: + # Check if this type was actually used in annotations + if name not in type_tracker.discovered_types: + continue + + # Check for NamedTuple + if issubclass(obj, tuple) and hasattr(obj, "_fields"): + additional_types.append(name) + # Mark as already imported + type_tracker.already_imported.add(name) + # Check for Enum + elif issubclass(obj, Enum) and name != "Enum": + additional_types.append(name) + # Mark as already imported + type_tracker.already_imported.add(name) + + if additional_types: + type_imports = ", ".join([async_class.__name__] + additional_types) + imports.append(f"from {async_class.__module__} import {type_imports}") + else: + imports.append( + f"from {async_class.__module__} import {async_class.__name__}" + ) + + # Add imports for all discovered types + # Pass the main module name to avoid duplicate imports + imports.extend( + type_tracker.get_imports(main_module_name=async_class.__module__) + ) + + # Add base module import if needed + if hasattr(inspect.getmodule(async_class), "__name__"): + module_name = inspect.getmodule(async_class).__name__ + if "." in module_name: + base_module = module_name.split(".")[0] + # Only add if not already importing from it + if not any(imp.startswith(f"from {base_module}") for imp in imports): + imports.append(f"import {base_module}") + + return imports + + @classmethod + def _get_class_attributes(cls, async_class: Type) -> list[tuple[str, Type]]: + """Extract class attributes that are classes themselves.""" + class_attributes = [] + + # Look for class attributes that are classes + for name, attr in sorted(inspect.getmembers(async_class)): + if isinstance(attr, type) and not name.startswith("_"): + class_attributes.append((name, attr)) + elif ( + hasattr(async_class, "__annotations__") + and name in async_class.__annotations__ + ): + annotation = async_class.__annotations__[name] + if isinstance(annotation, type): + class_attributes.append((name, annotation)) + + return class_attributes + + @classmethod + def _generate_inner_class_stub( + cls, + name: str, + attr: Type, + indent: str = " ", + type_tracker: Optional[TypeTracker] = None, + ) -> list[str]: + """Generate stub for an inner class.""" + stub_lines = [] + stub_lines.append(f"{indent}class {name}Sync:") + + # Add docstring if available + if hasattr(attr, "__doc__") and attr.__doc__: + stub_lines.extend( + cls._format_docstring_for_stub(attr.__doc__, f"{indent} ") + ) + + # Add __init__ if it exists + if hasattr(attr, "__init__"): + try: + init_method = getattr(attr, "__init__") + init_sig = inspect.signature(init_method) + + # Try to get type hints + try: + from typing import get_type_hints + init_hints = get_type_hints(init_method) + except Exception: + init_hints = {} + + # Format parameters + params_str = cls._format_method_parameters( + init_sig, type_hints=init_hints, type_tracker=type_tracker + ) + # Add __init__ docstring if available (before the method) + if hasattr(init_method, "__doc__") and init_method.__doc__: + stub_lines.extend( + cls._format_docstring_for_stub( + init_method.__doc__, f"{indent} " + ) + ) + stub_lines.append( + f"{indent} def __init__({params_str}) -> None: ..." + ) + except (ValueError, TypeError): + stub_lines.append( + f"{indent} def __init__(self, *args, **kwargs) -> None: ..." + ) + + # Add methods to the inner class + has_methods = False + for method_name, method in sorted( + inspect.getmembers(attr, predicate=inspect.isfunction) + ): + if method_name.startswith("_"): + continue + + has_methods = True + try: + # Add method docstring if available (before the method signature) + if method.__doc__: + stub_lines.extend( + cls._format_docstring_for_stub(method.__doc__, f"{indent} ") + ) + + method_sig = cls._generate_method_signature( + method_name, method, is_async=True, type_tracker=type_tracker + ) + stub_lines.append(f"{indent} {method_sig}") + except (ValueError, TypeError): + stub_lines.append( + f"{indent} def {method_name}(self, *args, **kwargs): ..." + ) + + if not has_methods: + stub_lines.append(f"{indent} pass") + + return stub_lines + + @classmethod + def _format_docstring_for_stub( + cls, docstring: str, indent: str = " " + ) -> list[str]: + """Format a docstring for inclusion in a stub file with proper indentation.""" + if not docstring: + return [] + + # First, dedent the docstring to remove any existing indentation + dedented = textwrap.dedent(docstring).strip() + + # Split into lines + lines = dedented.split("\n") + + # Build the properly indented docstring + result = [] + result.append(f'{indent}"""') + + for line in lines: + if line.strip(): # Non-empty line + result.append(f"{indent}{line}") + else: # Empty line + result.append("") + + result.append(f'{indent}"""') + return result + + @classmethod + def _post_process_stub_content(cls, stub_content: list[str]) -> list[str]: + """Post-process stub content to fix any remaining issues.""" + processed = [] + + for line in stub_content: + # Skip processing imports + if line.startswith(("from ", "import ")): + processed.append(line) + continue + + # Fix method signatures missing return types + if ( + line.strip().startswith("def ") + and line.strip().endswith(": ...") + and ") -> " not in line + ): + # Add -> None for methods without return annotation + line = line.replace(": ...", " -> None: ...") + + processed.append(line) + + return processed + + @classmethod + def generate_stub_file(cls, async_class: Type, sync_class: Type) -> None: + """ + Generate a .pyi stub file for the sync class to help IDEs with type checking. + """ + try: + # Only generate stub if we can determine module path + if async_class.__module__ == "__main__": + return + + module = inspect.getmodule(async_class) + if not module: + return + + module_path = module.__file__ + if not module_path: + return + + # Create stub file path in a 'generated' subdirectory + module_dir = os.path.dirname(module_path) + stub_dir = os.path.join(module_dir, "generated") + + # Ensure the generated directory exists + os.makedirs(stub_dir, exist_ok=True) + + module_name = os.path.basename(module_path) + if module_name.endswith(".py"): + module_name = module_name[:-3] + + sync_stub_path = os.path.join(stub_dir, f"{sync_class.__name__}.pyi") + + # Create a type tracker for this stub generation + type_tracker = TypeTracker() + + stub_content = [] + + # We'll generate imports after processing all methods to capture all types + # Leave a placeholder for imports + imports_placeholder_index = len(stub_content) + stub_content.append("") # Will be replaced with imports later + + # Class definition + stub_content.append(f"class {sync_class.__name__}:") + + # Docstring + if async_class.__doc__: + stub_content.extend( + cls._format_docstring_for_stub(async_class.__doc__, " ") + ) + + # Generate __init__ + try: + init_method = async_class.__init__ + init_signature = inspect.signature(init_method) + + # Try to get type hints for __init__ + try: + from typing import get_type_hints + init_hints = get_type_hints(init_method) + except Exception: + init_hints = {} + + # Format parameters + params_str = cls._format_method_parameters( + init_signature, type_hints=init_hints, type_tracker=type_tracker + ) + # Add __init__ docstring if available (before the method) + if hasattr(init_method, "__doc__") and init_method.__doc__: + stub_content.extend( + cls._format_docstring_for_stub(init_method.__doc__, " ") + ) + stub_content.append(f" def __init__({params_str}) -> None: ...") + except (ValueError, TypeError): + stub_content.append( + " def __init__(self, *args, **kwargs) -> None: ..." + ) + + stub_content.append("") # Add newline after __init__ + + # Get class attributes + class_attributes = cls._get_class_attributes(async_class) + + # Generate inner classes + for name, attr in class_attributes: + inner_class_stub = cls._generate_inner_class_stub( + name, attr, type_tracker=type_tracker + ) + stub_content.extend(inner_class_stub) + stub_content.append("") # Add newline after the inner class + + # Add methods to the main class + processed_methods = set() # Keep track of methods we've processed + for name, method in sorted( + inspect.getmembers(async_class, predicate=inspect.isfunction) + ): + if name.startswith("_") or name in processed_methods: + continue + + processed_methods.add(name) + + try: + method_sig = cls._generate_method_signature( + name, method, is_async=True, type_tracker=type_tracker + ) + + # Add docstring if available (before the method signature for proper formatting) + if method.__doc__: + stub_content.extend( + cls._format_docstring_for_stub(method.__doc__, " ") + ) + + stub_content.append(f" {method_sig}") + + stub_content.append("") # Add newline after each method + + except (ValueError, TypeError): + # If we can't get the signature, just add a simple stub + stub_content.append(f" def {name}(self, *args, **kwargs): ...") + stub_content.append("") # Add newline + + # Add properties + for name, prop in sorted( + inspect.getmembers(async_class, lambda x: isinstance(x, property)) + ): + stub_content.append(" @property") + stub_content.append(f" def {name}(self) -> Any: ...") + if prop.fset: + stub_content.append(f" @{name}.setter") + stub_content.append( + f" def {name}(self, value: Any) -> None: ..." + ) + stub_content.append("") # Add newline after each property + + # Add placeholders for the nested class instances + # Check the actual attribute names from class annotations and attributes + attribute_mappings = {} + + # First check annotations for typed attributes (including from parent classes) + # Collect all annotations from the class hierarchy + all_annotations = {} + for base_class in reversed(inspect.getmro(async_class)): + if hasattr(base_class, "__annotations__"): + all_annotations.update(base_class.__annotations__) + + for attr_name, attr_type in sorted(all_annotations.items()): + for class_name, class_type in class_attributes: + # If the class type matches the annotated type + if ( + attr_type == class_type + or (hasattr(attr_type, "__name__") and attr_type.__name__ == class_name) + or (isinstance(attr_type, str) and attr_type == class_name) + ): + attribute_mappings[class_name] = attr_name + + # Remove the extra checking - annotations should be sufficient + + # Add the attribute declarations with proper names + for class_name, class_type in class_attributes: + # Check if there's a mapping from annotation + attr_name = attribute_mappings.get(class_name, class_name) + # Use the annotation name if it exists, even if the attribute doesn't exist yet + # This is because the attribute might be created at runtime + stub_content.append(f" {attr_name}: {class_name}Sync") + + stub_content.append("") # Add a final newline + + # Now generate imports with all discovered types + imports = cls._generate_imports(async_class, type_tracker) + + # Deduplicate imports while preserving order + seen = set() + unique_imports = [] + for imp in imports: + if imp not in seen: + seen.add(imp) + unique_imports.append(imp) + else: + logging.warning(f"Duplicate import detected: {imp}") + + # Replace the placeholder with actual imports + stub_content[imports_placeholder_index : imports_placeholder_index + 1] = ( + unique_imports + ) + + # Post-process stub content + stub_content = cls._post_process_stub_content(stub_content) + + # Write stub file + with open(sync_stub_path, "w") as f: + f.write("\n".join(stub_content)) + + logging.info(f"Generated stub file: {sync_stub_path}") + + except Exception as e: + # If stub generation fails, log the error but don't break the main functionality + logging.error( + f"Error generating stub file for {sync_class.__name__}: {str(e)}" + ) + import traceback + + logging.error(traceback.format_exc()) + + +def create_sync_class(async_class: Type, thread_pool_size=10) -> Type: + """ + Creates a sync version of an async class + + Args: + async_class: The async class to convert + thread_pool_size: Size of thread pool to use + + Returns: + A new class with sync versions of all async methods + """ + return AsyncToSyncConverter.create_sync_class(async_class, thread_pool_size) diff --git a/comfy_api/internal/singleton.py b/comfy_api/internal/singleton.py new file mode 100644 index 000000000..75f16f98e --- /dev/null +++ b/comfy_api/internal/singleton.py @@ -0,0 +1,33 @@ +from typing import Type, TypeVar + +class SingletonMetaclass(type): + T = TypeVar("T", bound="SingletonMetaclass") + _instances = {} + + def __call__(cls, *args, **kwargs): + if cls not in cls._instances: + cls._instances[cls] = super(SingletonMetaclass, cls).__call__( + *args, **kwargs + ) + return cls._instances[cls] + + def inject_instance(cls: Type[T], instance: T) -> None: + assert cls not in SingletonMetaclass._instances, ( + "Cannot inject instance after first instantiation" + ) + SingletonMetaclass._instances[cls] = instance + + def get_instance(cls: Type[T], *args, **kwargs) -> T: + """ + Gets the singleton instance of the class, creating it if it doesn't exist. + """ + if cls not in SingletonMetaclass._instances: + SingletonMetaclass._instances[cls] = super( + SingletonMetaclass, cls + ).__call__(*args, **kwargs) + return cls._instances[cls] + + +class ProxiedSingleton(object, metaclass=SingletonMetaclass): + def __init__(self): + super().__init__() diff --git a/comfy_api/latest/__init__.py b/comfy_api/latest/__init__.py new file mode 100644 index 000000000..e1f3a3655 --- /dev/null +++ b/comfy_api/latest/__init__.py @@ -0,0 +1,106 @@ +from __future__ import annotations + +from typing import Type, TYPE_CHECKING +from comfy_api.internal import ComfyAPIBase +from comfy_api.internal.singleton import ProxiedSingleton +from comfy_api.internal.async_to_sync import create_sync_class +from comfy_api.latest._input import ImageInput, AudioInput, MaskInput, LatentInput, VideoInput +from comfy_api.latest._input_impl import VideoFromFile, VideoFromComponents +from comfy_api.latest._util import VideoCodec, VideoContainer, VideoComponents +from comfy_execution.utils import get_executing_context +from comfy_execution.progress import get_progress_state, PreviewImageTuple +from PIL import Image +from comfy.cli_args import args +import numpy as np + + +class ComfyAPI_latest(ComfyAPIBase): + VERSION = "latest" + STABLE = False + + class Execution(ProxiedSingleton): + async def set_progress( + self, + value: float, + max_value: float, + node_id: str | None = None, + preview_image: Image.Image | ImageInput | None = None, + ignore_size_limit: bool = False, + ) -> None: + """ + Update the progress bar displayed in the ComfyUI interface. + + This function allows custom nodes and API calls to report their progress + back to the user interface, providing visual feedback during long operations. + + Migration from previous API: comfy.utils.PROGRESS_BAR_HOOK + """ + executing_context = get_executing_context() + if node_id is None and executing_context is not None: + node_id = executing_context.node_id + if node_id is None: + raise ValueError("node_id must be provided if not in executing context") + + # Convert preview_image to PreviewImageTuple if needed + to_display: PreviewImageTuple | Image.Image | ImageInput | None = preview_image + if to_display is not None: + # First convert to PIL Image if needed + if isinstance(to_display, ImageInput): + # Convert ImageInput (torch.Tensor) to PIL Image + # Handle tensor shape [B, H, W, C] -> get first image if batch + tensor = to_display + if len(tensor.shape) == 4: + tensor = tensor[0] + + # Convert to numpy array and scale to 0-255 + image_np = (tensor.cpu().numpy() * 255).astype(np.uint8) + to_display = Image.fromarray(image_np) + + if isinstance(to_display, Image.Image): + # Detect image format from PIL Image + image_format = to_display.format if to_display.format else "JPEG" + # Use None for preview_size if ignore_size_limit is True + preview_size = None if ignore_size_limit else args.preview_size + to_display = (image_format, to_display, preview_size) + + get_progress_state().update_progress( + node_id=node_id, + value=value, + max_value=max_value, + image=to_display, + ) + + execution: Execution + +class Input: + Image = ImageInput + Audio = AudioInput + Mask = MaskInput + Latent = LatentInput + Video = VideoInput + +class InputImpl: + VideoFromFile = VideoFromFile + VideoFromComponents = VideoFromComponents + +class Types: + VideoCodec = VideoCodec + VideoContainer = VideoContainer + VideoComponents = VideoComponents + +ComfyAPI = ComfyAPI_latest + +# Create a synchronous version of the API +if TYPE_CHECKING: + import comfy_api.latest.generated.ComfyAPISyncStub # type: ignore + + ComfyAPISync: Type[comfy_api.latest.generated.ComfyAPISyncStub.ComfyAPISyncStub] +ComfyAPISync = create_sync_class(ComfyAPI_latest) + +__all__ = [ + "ComfyAPI", + "ComfyAPISync", + "Input", + "InputImpl", + "Types", +] diff --git a/comfy_api/latest/_input/__init__.py b/comfy_api/latest/_input/__init__.py new file mode 100644 index 000000000..14f0e72f4 --- /dev/null +++ b/comfy_api/latest/_input/__init__.py @@ -0,0 +1,10 @@ +from .basic_types import ImageInput, AudioInput, MaskInput, LatentInput +from .video_types import VideoInput + +__all__ = [ + "ImageInput", + "AudioInput", + "VideoInput", + "MaskInput", + "LatentInput", +] diff --git a/comfy_api/latest/_input/basic_types.py b/comfy_api/latest/_input/basic_types.py new file mode 100644 index 000000000..245c6cbb1 --- /dev/null +++ b/comfy_api/latest/_input/basic_types.py @@ -0,0 +1,42 @@ +import torch +from typing import TypedDict, List, Optional + +ImageInput = torch.Tensor +""" +An image in format [B, H, W, C] where B is the batch size, C is the number of channels, +""" + +MaskInput = torch.Tensor +""" +A mask in format [B, H, W] where B is the batch size +""" + +class AudioInput(TypedDict): + """ + TypedDict representing audio input. + """ + + waveform: torch.Tensor + """ + Tensor in the format [B, C, T] where B is the batch size, C is the number of channels, + """ + + sample_rate: int + +class LatentInput(TypedDict): + """ + TypedDict representing latent input. + """ + + samples: torch.Tensor + """ + Tensor in the format [B, C, H, W] where B is the batch size, C is the number of channels, + H is the height, and W is the width. + """ + + noise_mask: Optional[MaskInput] + """ + Optional noise mask tensor in the same format as samples. + """ + + batch_index: Optional[List[int]] diff --git a/comfy_api/latest/_input/video_types.py b/comfy_api/latest/_input/video_types.py new file mode 100644 index 000000000..5d95dc507 --- /dev/null +++ b/comfy_api/latest/_input/video_types.py @@ -0,0 +1,85 @@ +from __future__ import annotations +from abc import ABC, abstractmethod +from typing import Optional, Union +import io +import av +from comfy_api.util import VideoContainer, VideoCodec, VideoComponents + +class VideoInput(ABC): + """ + Abstract base class for video input types. + """ + + @abstractmethod + def get_components(self) -> VideoComponents: + """ + Abstract method to get the video components (images, audio, and frame rate). + + Returns: + VideoComponents containing images, audio, and frame rate + """ + pass + + @abstractmethod + def save_to( + self, + path: str, + format: VideoContainer = VideoContainer.AUTO, + codec: VideoCodec = VideoCodec.AUTO, + metadata: Optional[dict] = None + ): + """ + Abstract method to save the video input to a file. + """ + pass + + def get_stream_source(self) -> Union[str, io.BytesIO]: + """ + Get a streamable source for the video. This allows processing without + loading the entire video into memory. + + Returns: + Either a file path (str) or a BytesIO object that can be opened with av. + + Default implementation creates a BytesIO buffer, but subclasses should + override this for better performance when possible. + """ + buffer = io.BytesIO() + self.save_to(buffer) + buffer.seek(0) + return buffer + + # Provide a default implementation, but subclasses can provide optimized versions + # if possible. + def get_dimensions(self) -> tuple[int, int]: + """ + Returns the dimensions of the video input. + + Returns: + Tuple of (width, height) + """ + components = self.get_components() + return components.images.shape[2], components.images.shape[1] + + def get_duration(self) -> float: + """ + Returns the duration of the video in seconds. + + Returns: + Duration in seconds + """ + components = self.get_components() + frame_count = components.images.shape[0] + return float(frame_count / components.frame_rate) + + def get_container_format(self) -> str: + """ + Returns the container format of the video (e.g., 'mp4', 'mov', 'avi'). + + Returns: + Container format as string + """ + # Default implementation - subclasses should override for better performance + source = self.get_stream_source() + with av.open(source, mode="r") as container: + return container.format.name diff --git a/comfy_api/latest/_input_impl/__init__.py b/comfy_api/latest/_input_impl/__init__.py new file mode 100644 index 000000000..02901b8b9 --- /dev/null +++ b/comfy_api/latest/_input_impl/__init__.py @@ -0,0 +1,7 @@ +from .video_types import VideoFromFile, VideoFromComponents + +__all__ = [ + # Implementations + "VideoFromFile", + "VideoFromComponents", +] diff --git a/comfy_api/latest/_input_impl/video_types.py b/comfy_api/latest/_input_impl/video_types.py new file mode 100644 index 000000000..28de9651d --- /dev/null +++ b/comfy_api/latest/_input_impl/video_types.py @@ -0,0 +1,324 @@ +from __future__ import annotations +from av.container import InputContainer +from av.subtitles.stream import SubtitleStream +from fractions import Fraction +from typing import Optional +from comfy_api.latest._input import AudioInput, VideoInput +import av +import io +import json +import numpy as np +import torch +from comfy_api.latest._util import VideoContainer, VideoCodec, VideoComponents + + +def container_to_output_format(container_format: str | None) -> str | None: + """ + A container's `format` may be a comma-separated list of formats. + E.g., iso container's `format` may be `mov,mp4,m4a,3gp,3g2,mj2`. + However, writing to a file/stream with `av.open` requires a single format, + or `None` to auto-detect. + """ + if not container_format: + return None # Auto-detect + + if "," not in container_format: + return container_format + + formats = container_format.split(",") + return formats[0] + + +def get_open_write_kwargs( + dest: str | io.BytesIO, container_format: str, to_format: str | None +) -> dict: + """Get kwargs for writing a `VideoFromFile` to a file/stream with `av.open`""" + open_kwargs = { + "mode": "w", + # If isobmff, preserve custom metadata tags (workflow, prompt, extra_pnginfo) + "options": {"movflags": "use_metadata_tags"}, + } + + is_write_to_buffer = isinstance(dest, io.BytesIO) + if is_write_to_buffer: + # Set output format explicitly, since it cannot be inferred from file extension + if to_format == VideoContainer.AUTO: + to_format = container_format.lower() + elif isinstance(to_format, str): + to_format = to_format.lower() + open_kwargs["format"] = container_to_output_format(to_format) + + return open_kwargs + + +class VideoFromFile(VideoInput): + """ + Class representing video input from a file. + """ + + def __init__(self, file: str | io.BytesIO): + """ + Initialize the VideoFromFile object based off of either a path on disk or a BytesIO object + containing the file contents. + """ + self.__file = file + + def get_stream_source(self) -> str | io.BytesIO: + """ + Return the underlying file source for efficient streaming. + This avoids unnecessary memory copies when the source is already a file path. + """ + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) + return self.__file + + def get_dimensions(self) -> tuple[int, int]: + """ + Returns the dimensions of the video input. + + Returns: + Tuple of (width, height) + """ + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) # Reset the BytesIO object to the beginning + with av.open(self.__file, mode='r') as container: + for stream in container.streams: + if stream.type == 'video': + assert isinstance(stream, av.VideoStream) + return stream.width, stream.height + raise ValueError(f"No video stream found in file '{self.__file}'") + + def get_duration(self) -> float: + """ + Returns the duration of the video in seconds. + + Returns: + Duration in seconds + """ + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) + with av.open(self.__file, mode="r") as container: + if container.duration is not None: + return float(container.duration / av.time_base) + + # Fallback: calculate from frame count and frame rate + video_stream = next( + (s for s in container.streams if s.type == "video"), None + ) + if video_stream and video_stream.frames and video_stream.average_rate: + return float(video_stream.frames / video_stream.average_rate) + + # Last resort: decode frames to count them + if video_stream and video_stream.average_rate: + frame_count = 0 + container.seek(0) + for packet in container.demux(video_stream): + for _ in packet.decode(): + frame_count += 1 + if frame_count > 0: + return float(frame_count / video_stream.average_rate) + + raise ValueError(f"Could not determine duration for file '{self.__file}'") + + def get_container_format(self) -> str: + """ + Returns the container format of the video (e.g., 'mp4', 'mov', 'avi'). + + Returns: + Container format as string + """ + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) + with av.open(self.__file, mode='r') as container: + return container.format.name + + def get_components_internal(self, container: InputContainer) -> VideoComponents: + # Get video frames + frames = [] + for frame in container.decode(video=0): + img = frame.to_ndarray(format='rgb24') # shape: (H, W, 3) + img = torch.from_numpy(img) / 255.0 # shape: (H, W, 3) + frames.append(img) + + images = torch.stack(frames) if len(frames) > 0 else torch.zeros(0, 3, 0, 0) + + # Get frame rate + video_stream = next(s for s in container.streams if s.type == 'video') + frame_rate = Fraction(video_stream.average_rate) if video_stream and video_stream.average_rate else Fraction(1) + + # Get audio if available + audio = None + try: + container.seek(0) # Reset the container to the beginning + for stream in container.streams: + if stream.type != 'audio': + continue + assert isinstance(stream, av.AudioStream) + audio_frames = [] + for packet in container.demux(stream): + for frame in packet.decode(): + assert isinstance(frame, av.AudioFrame) + audio_frames.append(frame.to_ndarray()) # shape: (channels, samples) + if len(audio_frames) > 0: + audio_data = np.concatenate(audio_frames, axis=1) # shape: (channels, total_samples) + audio_tensor = torch.from_numpy(audio_data).unsqueeze(0) # shape: (1, channels, total_samples) + audio = AudioInput({ + "waveform": audio_tensor, + "sample_rate": int(stream.sample_rate) if stream.sample_rate else 1, + }) + except StopIteration: + pass # No audio stream + + metadata = container.metadata + return VideoComponents(images=images, audio=audio, frame_rate=frame_rate, metadata=metadata) + + def get_components(self) -> VideoComponents: + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) # Reset the BytesIO object to the beginning + with av.open(self.__file, mode='r') as container: + return self.get_components_internal(container) + raise ValueError(f"No video stream found in file '{self.__file}'") + + def save_to( + self, + path: str | io.BytesIO, + format: VideoContainer = VideoContainer.AUTO, + codec: VideoCodec = VideoCodec.AUTO, + metadata: Optional[dict] = None + ): + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) # Reset the BytesIO object to the beginning + with av.open(self.__file, mode='r') as container: + container_format = container.format.name + video_encoding = container.streams.video[0].codec.name if len(container.streams.video) > 0 else None + reuse_streams = True + if format != VideoContainer.AUTO and format not in container_format.split(","): + reuse_streams = False + if codec != VideoCodec.AUTO and codec != video_encoding and video_encoding is not None: + reuse_streams = False + + if not reuse_streams: + components = self.get_components_internal(container) + video = VideoFromComponents(components) + return video.save_to( + path, + format=format, + codec=codec, + metadata=metadata + ) + + streams = container.streams + + open_kwargs = get_open_write_kwargs(path, container_format, format) + with av.open(path, **open_kwargs) as output_container: + # Copy over the original metadata + for key, value in container.metadata.items(): + if metadata is None or key not in metadata: + output_container.metadata[key] = value + + # Add our new metadata + if metadata is not None: + for key, value in metadata.items(): + if isinstance(value, str): + output_container.metadata[key] = value + else: + output_container.metadata[key] = json.dumps(value) + + # Add streams to the new container + stream_map = {} + for stream in streams: + if isinstance(stream, (av.VideoStream, av.AudioStream, SubtitleStream)): + out_stream = output_container.add_stream_from_template(template=stream, opaque=True) + stream_map[stream] = out_stream + + # Write packets to the new container + for packet in container.demux(): + if packet.stream in stream_map and packet.dts is not None: + packet.stream = stream_map[packet.stream] + output_container.mux(packet) + +class VideoFromComponents(VideoInput): + """ + Class representing video input from tensors. + """ + + def __init__(self, components: VideoComponents): + self.__components = components + + def get_components(self) -> VideoComponents: + return VideoComponents( + images=self.__components.images, + audio=self.__components.audio, + frame_rate=self.__components.frame_rate + ) + + def save_to( + self, + path: str, + format: VideoContainer = VideoContainer.AUTO, + codec: VideoCodec = VideoCodec.AUTO, + metadata: Optional[dict] = None + ): + if format != VideoContainer.AUTO and format != VideoContainer.MP4: + raise ValueError("Only MP4 format is supported for now") + if codec != VideoCodec.AUTO and codec != VideoCodec.H264: + raise ValueError("Only H264 codec is supported for now") + with av.open(path, mode='w', options={'movflags': 'use_metadata_tags'}) as output: + # Add metadata before writing any streams + if metadata is not None: + for key, value in metadata.items(): + output.metadata[key] = json.dumps(value) + + frame_rate = Fraction(round(self.__components.frame_rate * 1000), 1000) + # Create a video stream + video_stream = output.add_stream('h264', rate=frame_rate) + video_stream.width = self.__components.images.shape[2] + video_stream.height = self.__components.images.shape[1] + video_stream.pix_fmt = 'yuv420p' + + # Create an audio stream + audio_sample_rate = 1 + audio_stream: Optional[av.AudioStream] = None + if self.__components.audio: + audio_sample_rate = int(self.__components.audio['sample_rate']) + audio_stream = output.add_stream('aac', rate=audio_sample_rate) + audio_stream.sample_rate = audio_sample_rate + audio_stream.format = 'fltp' + + # Encode video + for i, frame in enumerate(self.__components.images): + img = (frame * 255).clamp(0, 255).byte().cpu().numpy() # shape: (H, W, 3) + frame = av.VideoFrame.from_ndarray(img, format='rgb24') + frame = frame.reformat(format='yuv420p') # Convert to YUV420P as required by h264 + packet = video_stream.encode(frame) + output.mux(packet) + + # Flush video + packet = video_stream.encode(None) + output.mux(packet) + + if audio_stream and self.__components.audio: + # Encode audio + samples_per_frame = int(audio_sample_rate / frame_rate) + num_frames = self.__components.audio['waveform'].shape[2] // samples_per_frame + for i in range(num_frames): + start = i * samples_per_frame + end = start + samples_per_frame + # TODO(Feature) - Add support for stereo audio + chunk = ( + self.__components.audio["waveform"][0, 0, start:end] + .unsqueeze(0) + .contiguous() + .numpy() + ) + audio_frame = av.AudioFrame.from_ndarray(chunk, format='fltp', layout='mono') + audio_frame.sample_rate = audio_sample_rate + audio_frame.pts = i * samples_per_frame + for packet in audio_stream.encode(audio_frame): + output.mux(packet) + + # Flush audio + for packet in audio_stream.encode(None): + output.mux(packet) + + diff --git a/comfy_api/latest/_util/__init__.py b/comfy_api/latest/_util/__init__.py new file mode 100644 index 000000000..9019c46db --- /dev/null +++ b/comfy_api/latest/_util/__init__.py @@ -0,0 +1,8 @@ +from .video_types import VideoContainer, VideoCodec, VideoComponents + +__all__ = [ + # Utility Types + "VideoContainer", + "VideoCodec", + "VideoComponents", +] diff --git a/comfy_api/latest/_util/video_types.py b/comfy_api/latest/_util/video_types.py new file mode 100644 index 000000000..c3e3d8e3a --- /dev/null +++ b/comfy_api/latest/_util/video_types.py @@ -0,0 +1,52 @@ +from __future__ import annotations +from dataclasses import dataclass +from enum import Enum +from fractions import Fraction +from typing import Optional +from comfy_api.latest._input import ImageInput, AudioInput + +class VideoCodec(str, Enum): + AUTO = "auto" + H264 = "h264" + + @classmethod + def as_input(cls) -> list[str]: + """ + Returns a list of codec names that can be used as node input. + """ + return [member.value for member in cls] + +class VideoContainer(str, Enum): + AUTO = "auto" + MP4 = "mp4" + + @classmethod + def as_input(cls) -> list[str]: + """ + Returns a list of container names that can be used as node input. + """ + return [member.value for member in cls] + + @classmethod + def get_extension(cls, value) -> str: + """ + Returns the file extension for the container. + """ + if isinstance(value, str): + value = cls(value) + if value == VideoContainer.MP4 or value == VideoContainer.AUTO: + return "mp4" + return "" + +@dataclass +class VideoComponents: + """ + Dataclass representing the components of a video. + """ + + images: ImageInput + frame_rate: Fraction + audio: Optional[AudioInput] = None + metadata: Optional[dict] = None + + diff --git a/comfy_api/latest/generated/ComfyAPISyncStub.pyi b/comfy_api/latest/generated/ComfyAPISyncStub.pyi new file mode 100644 index 000000000..525c074dd --- /dev/null +++ b/comfy_api/latest/generated/ComfyAPISyncStub.pyi @@ -0,0 +1,20 @@ +from typing import Any, Dict, List, Optional, Tuple, Union, Set, Sequence, cast, NamedTuple +from comfy_api.latest import ComfyAPI_latest +from PIL.Image import Image +from torch import Tensor +class ComfyAPISyncStub: + def __init__(self) -> None: ... + + class ExecutionSync: + def __init__(self) -> None: ... + """ + Update the progress bar displayed in the ComfyUI interface. + + This function allows custom nodes and API calls to report their progress + back to the user interface, providing visual feedback during long operations. + + Migration from previous API: comfy.utils.PROGRESS_BAR_HOOK + """ + def set_progress(self, value: float, max_value: float, node_id: Union[str, None] = None, preview_image: Union[Image, Tensor, None] = None, ignore_size_limit: bool = False) -> None: ... + + execution: ExecutionSync diff --git a/comfy_api/util.py b/comfy_api/util.py new file mode 100644 index 000000000..1aa9606d2 --- /dev/null +++ b/comfy_api/util.py @@ -0,0 +1,8 @@ +# This file only exists for backwards compatibility. +from comfy_api.latest._util import VideoCodec, VideoContainer, VideoComponents + +__all__ = [ + "VideoCodec", + "VideoContainer", + "VideoComponents", +] diff --git a/comfy_api/util/__init__.py b/comfy_api/util/__init__.py index 9019c46db..4c8a89d1e 100644 --- a/comfy_api/util/__init__.py +++ b/comfy_api/util/__init__.py @@ -1,7 +1,7 @@ -from .video_types import VideoContainer, VideoCodec, VideoComponents +# This file only exists for backwards compatibility. +from comfy_api.latest._util import VideoContainer, VideoCodec, VideoComponents __all__ = [ - # Utility Types "VideoContainer", "VideoCodec", "VideoComponents", diff --git a/comfy_api/util/video_types.py b/comfy_api/util/video_types.py index d09663db9..68c780d64 100644 --- a/comfy_api/util/video_types.py +++ b/comfy_api/util/video_types.py @@ -1,51 +1,12 @@ -from __future__ import annotations -from dataclasses import dataclass -from enum import Enum -from fractions import Fraction -from typing import Optional -from comfy_api.input import ImageInput, AudioInput - -class VideoCodec(str, Enum): - AUTO = "auto" - H264 = "h264" - - @classmethod - def as_input(cls) -> list[str]: - """ - Returns a list of codec names that can be used as node input. - """ - return [member.value for member in cls] - -class VideoContainer(str, Enum): - AUTO = "auto" - MP4 = "mp4" - - @classmethod - def as_input(cls) -> list[str]: - """ - Returns a list of container names that can be used as node input. - """ - return [member.value for member in cls] - - @classmethod - def get_extension(cls, value) -> str: - """ - Returns the file extension for the container. - """ - if isinstance(value, str): - value = cls(value) - if value == VideoContainer.MP4 or value == VideoContainer.AUTO: - return "mp4" - return "" - -@dataclass -class VideoComponents: - """ - Dataclass representing the components of a video. - """ - - images: ImageInput - frame_rate: Fraction - audio: Optional[AudioInput] = None - metadata: Optional[dict] = None +# This file only exists for backwards compatibility. +from comfy_api.latest._util.video_types import ( + VideoContainer, + VideoCodec, + VideoComponents, +) +__all__ = [ + "VideoContainer", + "VideoCodec", + "VideoComponents", +] diff --git a/comfy_api/v0_0_1/__init__.py b/comfy_api/v0_0_1/__init__.py new file mode 100644 index 000000000..93608771d --- /dev/null +++ b/comfy_api/v0_0_1/__init__.py @@ -0,0 +1,42 @@ +from comfy_api.v0_0_2 import ( + ComfyAPIAdapter_v0_0_2, + Input as Input_v0_0_2, + InputImpl as InputImpl_v0_0_2, + Types as Types_v0_0_2, +) +from typing import Type, TYPE_CHECKING +from comfy_api.internal.async_to_sync import create_sync_class + + +# This version only exists to serve as a template for future version adapters. +# There is no reason anyone should ever use it. +class ComfyAPIAdapter_v0_0_1(ComfyAPIAdapter_v0_0_2): + VERSION = "0.0.1" + STABLE = True + +class Input(Input_v0_0_2): + pass + +class InputImpl(InputImpl_v0_0_2): + pass + +class Types(Types_v0_0_2): + pass + +ComfyAPI = ComfyAPIAdapter_v0_0_1 + +# Create a synchronous version of the API +if TYPE_CHECKING: + from comfy_api.v0_0_1.generated.ComfyAPISyncStub import ComfyAPISyncStub # type: ignore + + ComfyAPISync: Type[ComfyAPISyncStub] + +ComfyAPISync = create_sync_class(ComfyAPIAdapter_v0_0_1) + +__all__ = [ + "ComfyAPI", + "ComfyAPISync", + "Input", + "InputImpl", + "Types", +] diff --git a/comfy_api/v0_0_1/generated/ComfyAPISyncStub.pyi b/comfy_api/v0_0_1/generated/ComfyAPISyncStub.pyi new file mode 100644 index 000000000..270030324 --- /dev/null +++ b/comfy_api/v0_0_1/generated/ComfyAPISyncStub.pyi @@ -0,0 +1,20 @@ +from typing import Any, Dict, List, Optional, Tuple, Union, Set, Sequence, cast, NamedTuple +from comfy_api.v0_0_1 import ComfyAPIAdapter_v0_0_1 +from PIL.Image import Image +from torch import Tensor +class ComfyAPISyncStub: + def __init__(self) -> None: ... + + class ExecutionSync: + def __init__(self) -> None: ... + """ + Update the progress bar displayed in the ComfyUI interface. + + This function allows custom nodes and API calls to report their progress + back to the user interface, providing visual feedback during long operations. + + Migration from previous API: comfy.utils.PROGRESS_BAR_HOOK + """ + def set_progress(self, value: float, max_value: float, node_id: Union[str, None] = None, preview_image: Union[Image, Tensor, None] = None, ignore_size_limit: bool = False) -> None: ... + + execution: ExecutionSync diff --git a/comfy_api/v0_0_2/__init__.py b/comfy_api/v0_0_2/__init__.py new file mode 100644 index 000000000..ea83833fb --- /dev/null +++ b/comfy_api/v0_0_2/__init__.py @@ -0,0 +1,43 @@ +from comfy_api.latest import ( + ComfyAPI_latest, + Input as Input_latest, + InputImpl as InputImpl_latest, + Types as Types_latest, +) +from typing import Type, TYPE_CHECKING +from comfy_api.internal.async_to_sync import create_sync_class + + +class ComfyAPIAdapter_v0_0_2(ComfyAPI_latest): + VERSION = "0.0.2" + STABLE = False + + +class Input(Input_latest): + pass + + +class InputImpl(InputImpl_latest): + pass + + +class Types(Types_latest): + pass + + +ComfyAPI = ComfyAPIAdapter_v0_0_2 + +# Create a synchronous version of the API +if TYPE_CHECKING: + from comfy_api.v0_0_2.generated.ComfyAPISyncStub import ComfyAPISyncStub # type: ignore + + ComfyAPISync: Type[ComfyAPISyncStub] +ComfyAPISync = create_sync_class(ComfyAPIAdapter_v0_0_2) + +__all__ = [ + "ComfyAPI", + "ComfyAPISync", + "Input", + "InputImpl", + "Types", +] diff --git a/comfy_api/v0_0_2/generated/ComfyAPISyncStub.pyi b/comfy_api/v0_0_2/generated/ComfyAPISyncStub.pyi new file mode 100644 index 000000000..7fcec685e --- /dev/null +++ b/comfy_api/v0_0_2/generated/ComfyAPISyncStub.pyi @@ -0,0 +1,20 @@ +from typing import Any, Dict, List, Optional, Tuple, Union, Set, Sequence, cast, NamedTuple +from comfy_api.v0_0_2 import ComfyAPIAdapter_v0_0_2 +from PIL.Image import Image +from torch import Tensor +class ComfyAPISyncStub: + def __init__(self) -> None: ... + + class ExecutionSync: + def __init__(self) -> None: ... + """ + Update the progress bar displayed in the ComfyUI interface. + + This function allows custom nodes and API calls to report their progress + back to the user interface, providing visual feedback during long operations. + + Migration from previous API: comfy.utils.PROGRESS_BAR_HOOK + """ + def set_progress(self, value: float, max_value: float, node_id: Union[str, None] = None, preview_image: Union[Image, Tensor, None] = None, ignore_size_limit: bool = False) -> None: ... + + execution: ExecutionSync diff --git a/comfy_api/version_list.py b/comfy_api/version_list.py new file mode 100644 index 000000000..7cb1871d5 --- /dev/null +++ b/comfy_api/version_list.py @@ -0,0 +1,12 @@ +from comfy_api.latest import ComfyAPI_latest +from comfy_api.v0_0_2 import ComfyAPIAdapter_v0_0_2 +from comfy_api.v0_0_1 import ComfyAPIAdapter_v0_0_1 +from comfy_api.internal import ComfyAPIBase +from typing import List, Type + +supported_versions: List[Type[ComfyAPIBase]] = [ + ComfyAPI_latest, + ComfyAPIAdapter_v0_0_2, + ComfyAPIAdapter_v0_0_1, +] + diff --git a/comfy_api_nodes/apis/request_logger.py b/comfy_api_nodes/apis/request_logger.py index 93517ede9..42901e141 100644 --- a/comfy_api_nodes/apis/request_logger.py +++ b/comfy_api_nodes/apis/request_logger.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import datetime import json diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 5935ab2bb..af33279d5 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -2,6 +2,8 @@ API Nodes for Gemini Multimodal LLM Usage via Remote API See: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference """ +from __future__ import annotations + import os from enum import Enum diff --git a/comfy_execution/progress.py b/comfy_execution/progress.py index 73dba3f75..e8f5ede1e 100644 --- a/comfy_execution/progress.py +++ b/comfy_execution/progress.py @@ -1,4 +1,6 @@ -from typing import TypedDict, Dict, Optional +from __future__ import annotations + +from typing import TypedDict, Dict, Optional, Tuple from typing_extensions import override from PIL import Image from enum import Enum @@ -10,6 +12,7 @@ if TYPE_CHECKING: from protocol import BinaryEventTypes from comfy_api import feature_flags +PreviewImageTuple = Tuple[str, Image.Image, Optional[int]] class NodeState(Enum): Pending = "pending" @@ -52,7 +55,7 @@ class ProgressHandler(ABC): max_value: float, state: NodeProgressState, prompt_id: str, - image: Optional[Image.Image] = None, + image: PreviewImageTuple | None = None, ): """Called when a node's progress is updated""" pass @@ -103,7 +106,7 @@ class CLIProgressHandler(ProgressHandler): max_value: float, state: NodeProgressState, prompt_id: str, - image: Optional[Image.Image] = None, + image: PreviewImageTuple | None = None, ): # Handle case where start_handler wasn't called if node_id not in self.progress_bars: @@ -196,7 +199,7 @@ class WebUIProgressHandler(ProgressHandler): max_value: float, state: NodeProgressState, prompt_id: str, - image: Optional[Image.Image] = None, + image: PreviewImageTuple | None = None, ): # Send progress state of all nodes if self.registry: @@ -231,7 +234,6 @@ class WebUIProgressHandler(ProgressHandler): if self.registry: self._send_progress_state(prompt_id, self.registry.nodes) - class ProgressRegistry: """ Registry that maintains node progress state and notifies registered handlers. @@ -285,7 +287,7 @@ class ProgressRegistry: handler.start_handler(node_id, entry, self.prompt_id) def update_progress( - self, node_id: str, value: float, max_value: float, image: Optional[Image.Image] + self, node_id: str, value: float, max_value: float, image: PreviewImageTuple | None = None ) -> None: """Update progress for a node""" entry = self.ensure_entry(node_id) @@ -317,7 +319,7 @@ class ProgressRegistry: handler.reset() # Global registry instance -global_progress_registry: ProgressRegistry = None +global_progress_registry: ProgressRegistry | None = None def reset_progress_state(prompt_id: str, dynprompt: "DynamicPrompt") -> None: global global_progress_registry diff --git a/comfy_extras/nodes_video.py b/comfy_extras/nodes_video.py index 61f7171b2..969f888b9 100644 --- a/comfy_extras/nodes_video.py +++ b/comfy_extras/nodes_video.py @@ -8,9 +8,7 @@ import json from typing import Optional, Literal from fractions import Fraction from comfy.comfy_types import IO, FileLocator, ComfyNodeABC -from comfy_api.input import ImageInput, AudioInput, VideoInput -from comfy_api.util import VideoContainer, VideoCodec, VideoComponents -from comfy_api.input_impl import VideoFromFile, VideoFromComponents +from comfy_api.latest import Input, InputImpl, Types from comfy.cli_args import args class SaveWEBM: @@ -91,8 +89,8 @@ class SaveVideo(ComfyNodeABC): "required": { "video": (IO.VIDEO, {"tooltip": "The video to save."}), "filename_prefix": ("STRING", {"default": "video/ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}), - "format": (VideoContainer.as_input(), {"default": "auto", "tooltip": "The format to save the video as."}), - "codec": (VideoCodec.as_input(), {"default": "auto", "tooltip": "The codec to use for the video."}), + "format": (Types.VideoContainer.as_input(), {"default": "auto", "tooltip": "The format to save the video as."}), + "codec": (Types.VideoCodec.as_input(), {"default": "auto", "tooltip": "The codec to use for the video."}), }, "hidden": { "prompt": "PROMPT", @@ -108,7 +106,7 @@ class SaveVideo(ComfyNodeABC): CATEGORY = "image/video" DESCRIPTION = "Saves the input images to your ComfyUI output directory." - def save_video(self, video: VideoInput, filename_prefix, format, codec, prompt=None, extra_pnginfo=None): + def save_video(self, video: Input.Video, filename_prefix, format, codec, prompt=None, extra_pnginfo=None): filename_prefix += self.prefix_append width, height = video.get_dimensions() full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path( @@ -127,7 +125,7 @@ class SaveVideo(ComfyNodeABC): metadata["prompt"] = prompt if len(metadata) > 0: saved_metadata = metadata - file = f"{filename}_{counter:05}_.{VideoContainer.get_extension(format)}" + file = f"{filename}_{counter:05}_.{Types.VideoContainer.get_extension(format)}" video.save_to( os.path.join(full_output_folder, file), format=format, @@ -163,9 +161,9 @@ class CreateVideo(ComfyNodeABC): CATEGORY = "image/video" DESCRIPTION = "Create a video from images." - def create_video(self, images: ImageInput, fps: float, audio: Optional[AudioInput] = None): - return (VideoFromComponents( - VideoComponents( + def create_video(self, images: Input.Image, fps: float, audio: Optional[Input.Audio] = None): + return (InputImpl.VideoFromComponents( + Types.VideoComponents( images=images, audio=audio, frame_rate=Fraction(fps), @@ -187,7 +185,7 @@ class GetVideoComponents(ComfyNodeABC): CATEGORY = "image/video" DESCRIPTION = "Extracts all components from a video: frames, audio, and framerate." - def get_components(self, video: VideoInput): + def get_components(self, video: Input.Video): components = video.get_components() return (components.images, components.audio, float(components.frame_rate)) @@ -208,7 +206,7 @@ class LoadVideo(ComfyNodeABC): FUNCTION = "load_video" def load_video(self, file): video_path = folder_paths.get_annotated_filepath(file) - return (VideoFromFile(video_path),) + return (InputImpl.VideoFromFile(video_path),) @classmethod def IS_CHANGED(cls, file): @@ -239,3 +237,4 @@ NODE_DISPLAY_NAME_MAPPINGS = { "GetVideoComponents": "Get Video Components", "LoadVideo": "Load Video", } + diff --git a/main.py b/main.py index e8ca8152a..9b2a33011 100644 --- a/main.py +++ b/main.py @@ -313,10 +313,10 @@ def start_comfyui(asyncio_loop=None): prompt_server = server.PromptServer(asyncio_loop) hook_breaker_ac10a0.save_functions() - nodes.init_extra_nodes( + asyncio_loop.run_until_complete(nodes.init_extra_nodes( init_custom_nodes=(not args.disable_all_custom_nodes) or len(args.whitelist_custom_nodes) > 0, init_api_nodes=not args.disable_api_nodes - ) + )) hook_breaker_ac10a0.restore_functions() cuda_malloc_warning() diff --git a/nodes.py b/nodes.py index 231d4d4de..54e530388 100644 --- a/nodes.py +++ b/nodes.py @@ -1,6 +1,7 @@ from __future__ import annotations import torch + import os import sys import json @@ -26,6 +27,8 @@ import comfy.sd import comfy.utils import comfy.controlnet from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict, FileLocator +from comfy_api.internal import register_versions, ComfyAPIWithVersion +from comfy_api.version_list import supported_versions import comfy.clip_vision @@ -2101,7 +2104,7 @@ def get_module_name(module_path: str) -> str: return base_path -def load_custom_node(module_path: str, ignore=set(), module_parent="custom_nodes") -> bool: +async def load_custom_node(module_path: str, ignore=set(), module_parent="custom_nodes") -> bool: module_name = get_module_name(module_path) if os.path.isfile(module_path): sp = os.path.splitext(module_path) @@ -2165,7 +2168,7 @@ def load_custom_node(module_path: str, ignore=set(), module_parent="custom_nodes logging.warning(f"Cannot import {module_path} module for custom nodes: {e}") return False -def init_external_custom_nodes(): +async def init_external_custom_nodes(): """ Initializes the external custom nodes. @@ -2191,7 +2194,7 @@ def init_external_custom_nodes(): logging.info(f"Skipping {possible_module} due to disable_all_custom_nodes and whitelist_custom_nodes") continue time_before = time.perf_counter() - success = load_custom_node(module_path, base_node_names, module_parent="custom_nodes") + success = await load_custom_node(module_path, base_node_names, module_parent="custom_nodes") node_import_times.append((time.perf_counter() - time_before, module_path, success)) if len(node_import_times) > 0: @@ -2204,7 +2207,7 @@ def init_external_custom_nodes(): logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) logging.info("") -def init_builtin_extra_nodes(): +async def init_builtin_extra_nodes(): """ Initializes the built-in extra nodes in ComfyUI. @@ -2288,13 +2291,13 @@ def init_builtin_extra_nodes(): import_failed = [] for node_file in extras_files: - if not load_custom_node(os.path.join(extras_dir, node_file), module_parent="comfy_extras"): + if not await load_custom_node(os.path.join(extras_dir, node_file), module_parent="comfy_extras"): import_failed.append(node_file) return import_failed -def init_builtin_api_nodes(): +async def init_builtin_api_nodes(): api_nodes_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_api_nodes") api_nodes_files = [ "nodes_ideogram.py", @@ -2315,26 +2318,35 @@ def init_builtin_api_nodes(): "nodes_gemini.py", ] - if not load_custom_node(os.path.join(api_nodes_dir, "canary.py"), module_parent="comfy_api_nodes"): + if not await load_custom_node(os.path.join(api_nodes_dir, "canary.py"), module_parent="comfy_api_nodes"): return api_nodes_files import_failed = [] for node_file in api_nodes_files: - if not load_custom_node(os.path.join(api_nodes_dir, node_file), module_parent="comfy_api_nodes"): + if not await load_custom_node(os.path.join(api_nodes_dir, node_file), module_parent="comfy_api_nodes"): import_failed.append(node_file) return import_failed +async def init_public_apis(): + register_versions([ + ComfyAPIWithVersion( + version=getattr(v, "VERSION"), + api_class=v + ) for v in supported_versions + ]) -def init_extra_nodes(init_custom_nodes=True, init_api_nodes=True): - import_failed = init_builtin_extra_nodes() +async def init_extra_nodes(init_custom_nodes=True, init_api_nodes=True): + await init_public_apis() + + import_failed = await init_builtin_extra_nodes() import_failed_api = [] if init_api_nodes: - import_failed_api = init_builtin_api_nodes() + import_failed_api = await init_builtin_api_nodes() if init_custom_nodes: - init_external_custom_nodes() + await init_external_custom_nodes() else: logging.info("Skipping loading of custom nodes") diff --git a/pyproject.toml b/pyproject.toml index 723c93069..244fdd232 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,4 +21,4 @@ lint.select = [ # See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f "F", ] -exclude = ["*.ipynb"] +exclude = ["*.ipynb", "**/generated/*.pyi"] diff --git a/tests/inference/testing_nodes/testing-pack/__init__.py b/tests/inference/testing_nodes/testing-pack/__init__.py index 20f9533c7..3d5ac8a94 100644 --- a/tests/inference/testing_nodes/testing-pack/__init__.py +++ b/tests/inference/testing_nodes/testing-pack/__init__.py @@ -4,6 +4,7 @@ from .util import UTILITY_NODE_CLASS_MAPPINGS, UTILITY_NODE_DISPLAY_NAME_MAPPING from .conditions import CONDITION_NODE_CLASS_MAPPINGS, CONDITION_NODE_DISPLAY_NAME_MAPPINGS from .stubs import TEST_STUB_NODE_CLASS_MAPPINGS, TEST_STUB_NODE_DISPLAY_NAME_MAPPINGS from .async_test_nodes import ASYNC_TEST_NODE_CLASS_MAPPINGS, ASYNC_TEST_NODE_DISPLAY_NAME_MAPPINGS +from .api_test_nodes import API_TEST_NODE_CLASS_MAPPINGS, API_TEST_NODE_DISPLAY_NAME_MAPPINGS # NODE_CLASS_MAPPINGS = GENERAL_NODE_CLASS_MAPPINGS.update(COMPONENT_NODE_CLASS_MAPPINGS) # NODE_DISPLAY_NAME_MAPPINGS = GENERAL_NODE_DISPLAY_NAME_MAPPINGS.update(COMPONENT_NODE_DISPLAY_NAME_MAPPINGS) @@ -15,6 +16,7 @@ NODE_CLASS_MAPPINGS.update(UTILITY_NODE_CLASS_MAPPINGS) NODE_CLASS_MAPPINGS.update(CONDITION_NODE_CLASS_MAPPINGS) NODE_CLASS_MAPPINGS.update(TEST_STUB_NODE_CLASS_MAPPINGS) NODE_CLASS_MAPPINGS.update(ASYNC_TEST_NODE_CLASS_MAPPINGS) +NODE_CLASS_MAPPINGS.update(API_TEST_NODE_CLASS_MAPPINGS) NODE_DISPLAY_NAME_MAPPINGS = {} NODE_DISPLAY_NAME_MAPPINGS.update(TEST_NODE_DISPLAY_NAME_MAPPINGS) @@ -23,4 +25,4 @@ NODE_DISPLAY_NAME_MAPPINGS.update(UTILITY_NODE_DISPLAY_NAME_MAPPINGS) NODE_DISPLAY_NAME_MAPPINGS.update(CONDITION_NODE_DISPLAY_NAME_MAPPINGS) NODE_DISPLAY_NAME_MAPPINGS.update(TEST_STUB_NODE_DISPLAY_NAME_MAPPINGS) NODE_DISPLAY_NAME_MAPPINGS.update(ASYNC_TEST_NODE_DISPLAY_NAME_MAPPINGS) - +NODE_DISPLAY_NAME_MAPPINGS.update(API_TEST_NODE_DISPLAY_NAME_MAPPINGS) diff --git a/tests/inference/testing_nodes/testing-pack/api_test_nodes.py b/tests/inference/testing_nodes/testing-pack/api_test_nodes.py new file mode 100644 index 000000000..b2eaae05e --- /dev/null +++ b/tests/inference/testing_nodes/testing-pack/api_test_nodes.py @@ -0,0 +1,78 @@ +import asyncio +import time +from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict +from comfy_api.v0_0_2 import ComfyAPI, ComfyAPISync + +api = ComfyAPI() +api_sync = ComfyAPISync() + + +class TestAsyncProgressUpdate(ComfyNodeABC): + """Test node with async VALIDATE_INPUTS.""" + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "value": (IO.ANY, {}), + "sleep_seconds": (IO.FLOAT, {"default": 1.0}), + }, + } + + RETURN_TYPES = (IO.ANY,) + FUNCTION = "execute" + CATEGORY = "_for_testing/async" + + async def execute(self, value, sleep_seconds): + start = time.time() + expiration = start + sleep_seconds + now = start + while now < expiration: + now = time.time() + await api.execution.set_progress( + value=(now - start) / sleep_seconds, + max_value=1.0, + ) + await asyncio.sleep(0.01) + return (value,) + + +class TestSyncProgressUpdate(ComfyNodeABC): + """Test node with async VALIDATE_INPUTS.""" + + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "value": (IO.ANY, {}), + "sleep_seconds": (IO.FLOAT, {"default": 1.0}), + }, + } + + RETURN_TYPES = (IO.ANY,) + FUNCTION = "execute" + CATEGORY = "_for_testing/async" + + def execute(self, value, sleep_seconds): + start = time.time() + expiration = start + sleep_seconds + now = start + while now < expiration: + now = time.time() + api_sync.execution.set_progress( + value=(now - start) / sleep_seconds, + max_value=1.0, + ) + time.sleep(0.01) + return (value,) + + +API_TEST_NODE_CLASS_MAPPINGS = { + "TestAsyncProgressUpdate": TestAsyncProgressUpdate, + "TestSyncProgressUpdate": TestSyncProgressUpdate, +} + +API_TEST_NODE_DISPLAY_NAME_MAPPINGS = { + "TestAsyncProgressUpdate": "Async Progress Update Test Node", + "TestSyncProgressUpdate": "Sync Progress Update Test Node", +} From d2aaef029cfb60611f2c9aad1b5dfb7070f9c162 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 30 Jul 2025 10:50:49 +0800 Subject: [PATCH 0381/1073] Update template to 0.1.44 (#9104) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 14a085a2c..8f2f6a56c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.41 +comfyui-workflow-templates==0.1.44 comfyui-embedded-docs==0.2.4 torch torchsde From da9dab7edd36fa75e010d2c8498e1ba23b8b97dd Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 30 Jul 2025 02:55:26 -0700 Subject: [PATCH 0382/1073] Small wan camera memory optimization. (#9111) --- comfy/ldm/wan/model.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index a93a13c86..86d0795e9 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -769,8 +769,7 @@ class CameraWanModel(WanModel): # embeddings x = self.patch_embedding(x.float()).to(x.dtype) if self.control_adapter is not None and camera_conditions is not None: - x_camera = self.control_adapter(camera_conditions).to(x.dtype) - x = x + x_camera + x = x + self.control_adapter(camera_conditions).to(x.dtype) grid_sizes = x.shape[2:] x = x.flatten(2).transpose(1, 2) From 61b08d4ba65fec37070376bf50da3ec3c534e859 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Thu, 31 Jul 2025 07:25:56 +0800 Subject: [PATCH 0383/1073] Replace manual x * sigmoid(x) with torch silu in VAE nonlinearity (#9057) --- comfy/ldm/cosmos/cosmos_tokenizer/utils.py | 3 ++- comfy/ldm/modules/diffusionmodules/model.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/cosmos/cosmos_tokenizer/utils.py b/comfy/ldm/cosmos/cosmos_tokenizer/utils.py index 3af8d0d05..ca993006f 100644 --- a/comfy/ldm/cosmos/cosmos_tokenizer/utils.py +++ b/comfy/ldm/cosmos/cosmos_tokenizer/utils.py @@ -58,7 +58,8 @@ def is_odd(n: int) -> bool: def nonlinearity(x): - return x * torch.sigmoid(x) + # x * sigmoid(x) + return torch.nn.functional.silu(x) def Normalize(in_channels, num_groups=32): diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 8162742cf..5c0373b74 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -36,7 +36,7 @@ def get_timestep_embedding(timesteps, embedding_dim): def nonlinearity(x): # swish - return x*torch.sigmoid(x) + return torch.nn.functional.silu(x) def Normalize(in_channels, num_groups=32): From 97eb256a355b434bbc96ec27bbce33dd10273857 Mon Sep 17 00:00:00 2001 From: guill Date: Wed, 30 Jul 2025 19:55:28 -0700 Subject: [PATCH 0384/1073] Add support for partial execution in backend (#9123) When a prompt is submitted, it can optionally include `partial_execution_targets` as a list of ids. If it does, rather than adding all outputs to the execution list, we add only those in the list. --- execution.py | 7 +- server.py | 7 +- tests/inference/test_async_nodes.py | 15 +- tests/inference/test_execution.py | 202 ++++++++++++++++-- .../testing-pack/specific_tests.py | 21 ++ 5 files changed, 233 insertions(+), 19 deletions(-) diff --git a/execution.py b/execution.py index 8a9663a7d..cde14c52f 100644 --- a/execution.py +++ b/execution.py @@ -7,7 +7,7 @@ import threading import time import traceback from enum import Enum -from typing import List, Literal, NamedTuple, Optional +from typing import List, Literal, NamedTuple, Optional, Union import asyncio import torch @@ -891,7 +891,7 @@ def full_type_name(klass): return klass.__qualname__ return module + '.' + klass.__qualname__ -async def validate_prompt(prompt_id, prompt): +async def validate_prompt(prompt_id, prompt, partial_execution_list: Union[list[str], None]): outputs = set() for x in prompt: if 'class_type' not in prompt[x]: @@ -915,7 +915,8 @@ async def validate_prompt(prompt_id, prompt): return (False, error, [], {}) if hasattr(class_, 'OUTPUT_NODE') and class_.OUTPUT_NODE is True: - outputs.add(x) + if partial_execution_list is None or x in partial_execution_list: + outputs.add(x) if len(outputs) == 0: error = { diff --git a/server.py b/server.py index f4de0079b..3e06d2fbb 100644 --- a/server.py +++ b/server.py @@ -681,7 +681,12 @@ class PromptServer(): if "prompt" in json_data: prompt = json_data["prompt"] prompt_id = str(json_data.get("prompt_id", uuid.uuid4())) - valid = await execution.validate_prompt(prompt_id, prompt) + + partial_execution_targets = None + if "partial_execution_targets" in json_data: + partial_execution_targets = json_data["partial_execution_targets"] + + valid = await execution.validate_prompt(prompt_id, prompt, partial_execution_targets) extra_data = {} if "extra_data" in json_data: extra_data = json_data["extra_data"] diff --git a/tests/inference/test_async_nodes.py b/tests/inference/test_async_nodes.py index b243bbca9..f029953dd 100644 --- a/tests/inference/test_async_nodes.py +++ b/tests/inference/test_async_nodes.py @@ -7,7 +7,7 @@ import subprocess from pytest import fixture from comfy_execution.graph_utils import GraphBuilder -from tests.inference.test_execution import ComfyClient +from tests.inference.test_execution import ComfyClient, run_warmup @pytest.mark.execution @@ -24,6 +24,7 @@ class TestAsyncNodes: '--listen', args_pytest["listen"], '--port', str(args_pytest["port"]), '--extra-model-paths-config', 'tests/inference/extra_model_paths.yaml', + '--cpu', ] use_lru, lru_size = request.param if use_lru: @@ -82,6 +83,9 @@ class TestAsyncNodes: def test_multiple_async_parallel_execution(self, client: ComfyClient, builder: GraphBuilder): """Test that multiple async nodes execute in parallel.""" + # Warmup execution to ensure server is fully initialized + run_warmup(client) + g = builder image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) @@ -148,6 +152,9 @@ class TestAsyncNodes: def test_async_lazy_evaluation(self, client: ComfyClient, builder: GraphBuilder): """Test async nodes with lazy evaluation.""" + # Warmup execution to ensure server is fully initialized + run_warmup(client, prefix="warmup_lazy") + g = builder input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) @@ -305,6 +312,9 @@ class TestAsyncNodes: def test_async_caching_behavior(self, client: ComfyClient, builder: GraphBuilder): """Test that async nodes are properly cached.""" + # Warmup execution to ensure server is fully initialized + run_warmup(client, prefix="warmup_cache") + g = builder image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) sleep_node = g.node("TestSleep", value=image.out(0), seconds=0.2) @@ -324,6 +334,9 @@ class TestAsyncNodes: def test_async_with_dynamic_prompts(self, client: ComfyClient, builder: GraphBuilder): """Test async nodes within dynamically generated prompts.""" + # Warmup execution to ensure server is fully initialized + run_warmup(client, prefix="warmup_dynamic") + g = builder image1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) image2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) diff --git a/tests/inference/test_execution.py b/tests/inference/test_execution.py index 9d3d685cc..e7b29302e 100644 --- a/tests/inference/test_execution.py +++ b/tests/inference/test_execution.py @@ -15,10 +15,18 @@ import urllib.parse import urllib.error from comfy_execution.graph_utils import GraphBuilder, Node +def run_warmup(client, prefix="warmup"): + """Run a simple workflow to warm up the server.""" + warmup_g = GraphBuilder(prefix=prefix) + warmup_image = warmup_g.node("StubImage", content="BLACK", height=32, width=32, batch_size=1) + warmup_g.node("PreviewImage", images=warmup_image.out(0)) + client.run(warmup_g) + class RunResult: def __init__(self, prompt_id: str): self.outputs: Dict[str,Dict] = {} self.runs: Dict[str,bool] = {} + self.cached: Dict[str,bool] = {} self.prompt_id: str = prompt_id def get_output(self, node: Node): @@ -27,6 +35,13 @@ class RunResult: def did_run(self, node: Node): return self.runs.get(node.id, False) + def was_cached(self, node: Node): + return self.cached.get(node.id, False) + + def was_executed(self, node: Node): + """Returns True if node was either run or cached""" + return self.did_run(node) or self.was_cached(node) + def get_images(self, node: Node): output = self.get_output(node) if output is None: @@ -51,8 +66,10 @@ class ComfyClient: ws.connect("ws://{}/ws?clientId={}".format(self.server_address, self.client_id)) self.ws = ws - def queue_prompt(self, prompt): + def queue_prompt(self, prompt, partial_execution_targets=None): p = {"prompt": prompt, "client_id": self.client_id} + if partial_execution_targets is not None: + p["partial_execution_targets"] = partial_execution_targets data = json.dumps(p).encode('utf-8') req = urllib.request.Request("http://{}/prompt".format(self.server_address), data=data) return json.loads(urllib.request.urlopen(req).read()) @@ -70,13 +87,13 @@ class ComfyClient: def set_test_name(self, name): self.test_name = name - def run(self, graph): + def run(self, graph, partial_execution_targets=None): prompt = graph.finalize() for node in graph.nodes.values(): if node.class_type == 'SaveImage': node.inputs['filename_prefix'] = self.test_name - prompt_id = self.queue_prompt(prompt)['prompt_id'] + prompt_id = self.queue_prompt(prompt, partial_execution_targets)['prompt_id'] result = RunResult(prompt_id) while True: out = self.ws.recv() @@ -92,7 +109,10 @@ class ComfyClient: elif message['type'] == 'execution_error': raise Exception(message['data']) elif message['type'] == 'execution_cached': - pass # Probably want to store this off for testing + if message['data']['prompt_id'] == prompt_id: + cached_nodes = message['data'].get('nodes', []) + for node_id in cached_nodes: + result.cached[node_id] = True history = self.get_history(prompt_id)[prompt_id] for node_id in history['outputs']: @@ -130,6 +150,7 @@ class TestExecution: '--listen', args_pytest["listen"], '--port', str(args_pytest["port"]), '--extra-model-paths-config', 'tests/inference/extra_model_paths.yaml', + '--cpu', ] use_lru, lru_size = request.param if use_lru: @@ -498,12 +519,15 @@ class TestExecution: assert not result.did_run(test_node), "The execution should have been cached" def test_parallel_sleep_nodes(self, client: ComfyClient, builder: GraphBuilder): + # Warmup execution to ensure server is fully initialized + run_warmup(client) + g = builder image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) # Create sleep nodes for each duration - sleep_node1 = g.node("TestSleep", value=image.out(0), seconds=2.8) - sleep_node2 = g.node("TestSleep", value=image.out(0), seconds=2.9) + sleep_node1 = g.node("TestSleep", value=image.out(0), seconds=2.9) + sleep_node2 = g.node("TestSleep", value=image.out(0), seconds=3.1) sleep_node3 = g.node("TestSleep", value=image.out(0), seconds=3.0) # Add outputs to verify the execution @@ -515,10 +539,9 @@ class TestExecution: result = client.run(g) elapsed_time = time.time() - start_time - # The test should take around 0.4 seconds (the longest sleep duration) - # plus some overhead, but definitely less than the sum of all sleeps (0.9s) - # We'll allow for up to 0.8s total to account for overhead - assert elapsed_time < 4.0, f"Parallel execution took {elapsed_time}s, expected less than 0.8s" + # The test should take around 3.0 seconds (the longest sleep duration) + # plus some overhead, but definitely less than the sum of all sleeps (9.0s) + assert elapsed_time < 8.9, f"Parallel execution took {elapsed_time}s, expected less than 8.9s" # Verify that all nodes executed assert result.did_run(sleep_node1), "Sleep node 1 should have run" @@ -526,6 +549,9 @@ class TestExecution: assert result.did_run(sleep_node3), "Sleep node 3 should have run" def test_parallel_sleep_expansion(self, client: ComfyClient, builder: GraphBuilder): + # Warmup execution to ensure server is fully initialized + run_warmup(client) + g = builder # Create input images with different values image1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) @@ -537,9 +563,9 @@ class TestExecution: image1=image1.out(0), image2=image2.out(0), image3=image3.out(0), - sleep1=0.4, - sleep2=0.5, - sleep3=0.6) + sleep1=4.8, + sleep2=4.9, + sleep3=5.0) output = g.node("SaveImage", images=parallel_sleep.out(0)) start_time = time.time() @@ -548,7 +574,7 @@ class TestExecution: # Similar to the previous test, expect parallel execution of the sleep nodes # which should complete in less than the sum of all sleeps - assert elapsed_time < 0.8, f"Expansion execution took {elapsed_time}s, expected less than 0.8s" + assert elapsed_time < 10.0, f"Expansion execution took {elapsed_time}s, expected less than 5.5s" # Verify the parallel sleep node executed assert result.did_run(parallel_sleep), "ParallelSleep node should have run" @@ -585,3 +611,151 @@ class TestExecution: assert len(images) == 2, "Should have 2 images" assert numpy.array(images[0]).min() == 0 and numpy.array(images[0]).max() == 0, "First image should be black" assert numpy.array(images[1]).min() == 0 and numpy.array(images[1]).max() == 0, "Second image should also be black" + + # Output nodes included in the partial execution list are executed + def test_partial_execution_included_outputs(self, client: ComfyClient, builder: GraphBuilder): + g = builder + input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) + + # Create two separate output nodes + output1 = g.node("SaveImage", images=input1.out(0)) + output2 = g.node("SaveImage", images=input2.out(0)) + + # Run with partial execution targeting only output1 + result = client.run(g, partial_execution_targets=[output1.id]) + + assert result.was_executed(input1), "Input1 should have been executed (run or cached)" + assert result.was_executed(output1), "Output1 should have been executed (run or cached)" + assert not result.did_run(input2), "Input2 should not have run" + assert not result.did_run(output2), "Output2 should not have run" + + # Verify only output1 produced results + assert len(result.get_images(output1)) == 1, "Output1 should have produced an image" + assert len(result.get_images(output2)) == 0, "Output2 should not have produced an image" + + # Output nodes NOT included in the partial execution list are NOT executed + def test_partial_execution_excluded_outputs(self, client: ComfyClient, builder: GraphBuilder): + g = builder + input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) + input3 = g.node("StubImage", content="NOISE", height=512, width=512, batch_size=1) + + # Create three output nodes + output1 = g.node("SaveImage", images=input1.out(0)) + output2 = g.node("SaveImage", images=input2.out(0)) + output3 = g.node("SaveImage", images=input3.out(0)) + + # Run with partial execution targeting only output1 and output3 + result = client.run(g, partial_execution_targets=[output1.id, output3.id]) + + assert result.was_executed(input1), "Input1 should have been executed" + assert result.was_executed(input3), "Input3 should have been executed" + assert result.was_executed(output1), "Output1 should have been executed" + assert result.was_executed(output3), "Output3 should have been executed" + assert not result.did_run(input2), "Input2 should not have run" + assert not result.did_run(output2), "Output2 should not have run" + + # Output nodes NOT in list ARE executed if necessary for nodes that are in the list + def test_partial_execution_dependencies(self, client: ComfyClient, builder: GraphBuilder): + g = builder + input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + + # Create a processing chain with an OUTPUT_NODE that has socket outputs + output_with_socket = g.node("TestOutputNodeWithSocketOutput", image=input1.out(0), value=2.0) + + # Create another node that depends on the output_with_socket + dependent_node = g.node("TestLazyMixImages", + image1=output_with_socket.out(0), + image2=input1.out(0), + mask=g.node("StubMask", value=0.5, height=512, width=512, batch_size=1).out(0)) + + # Create the final output + final_output = g.node("SaveImage", images=dependent_node.out(0)) + + # Run with partial execution targeting only the final output + result = client.run(g, partial_execution_targets=[final_output.id]) + + # All nodes should have been executed because they're dependencies + assert result.was_executed(input1), "Input1 should have been executed" + assert result.was_executed(output_with_socket), "Output with socket should have been executed (dependency)" + assert result.was_executed(dependent_node), "Dependent node should have been executed" + assert result.was_executed(final_output), "Final output should have been executed" + + # Lazy execution works with partial execution + def test_partial_execution_with_lazy_nodes(self, client: ComfyClient, builder: GraphBuilder): + g = builder + input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) + input3 = g.node("StubImage", content="NOISE", height=512, width=512, batch_size=1) + + # Create masks that will trigger different lazy execution paths + mask1 = g.node("StubMask", value=0.0, height=512, width=512, batch_size=1) # Will only need image1 + mask2 = g.node("StubMask", value=0.5, height=512, width=512, batch_size=1) # Will need both images + + # Create two lazy mix nodes + lazy_mix1 = g.node("TestLazyMixImages", image1=input1.out(0), image2=input2.out(0), mask=mask1.out(0)) + lazy_mix2 = g.node("TestLazyMixImages", image1=input2.out(0), image2=input3.out(0), mask=mask2.out(0)) + + output1 = g.node("SaveImage", images=lazy_mix1.out(0)) + output2 = g.node("SaveImage", images=lazy_mix2.out(0)) + + # Run with partial execution targeting only output1 + result = client.run(g, partial_execution_targets=[output1.id]) + + # For output1 path - only input1 should run due to lazy evaluation (mask=0.0) + assert result.was_executed(input1), "Input1 should have been executed" + assert not result.did_run(input2), "Input2 should not have run (lazy evaluation)" + assert result.was_executed(mask1), "Mask1 should have been executed" + assert result.was_executed(lazy_mix1), "Lazy mix1 should have been executed" + assert result.was_executed(output1), "Output1 should have been executed" + + # Nothing from output2 path should run + assert not result.did_run(input3), "Input3 should not have run" + assert not result.did_run(mask2), "Mask2 should not have run" + assert not result.did_run(lazy_mix2), "Lazy mix2 should not have run" + assert not result.did_run(output2), "Output2 should not have run" + + # Multiple OUTPUT_NODEs with dependencies + def test_partial_execution_multiple_output_nodes(self, client: ComfyClient, builder: GraphBuilder): + g = builder + input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1) + + # Create a chain of OUTPUT_NODEs + output_node1 = g.node("TestOutputNodeWithSocketOutput", image=input1.out(0), value=1.5) + output_node2 = g.node("TestOutputNodeWithSocketOutput", image=output_node1.out(0), value=2.0) + + # Create regular output nodes + save1 = g.node("SaveImage", images=output_node1.out(0)) + save2 = g.node("SaveImage", images=output_node2.out(0)) + save3 = g.node("SaveImage", images=input2.out(0)) + + # Run targeting only save2 + result = client.run(g, partial_execution_targets=[save2.id]) + + # Should run: input1, output_node1, output_node2, save2 + assert result.was_executed(input1), "Input1 should have been executed" + assert result.was_executed(output_node1), "Output node 1 should have been executed (dependency)" + assert result.was_executed(output_node2), "Output node 2 should have been executed (dependency)" + assert result.was_executed(save2), "Save2 should have been executed" + + # Should NOT run: input2, save1, save3 + assert not result.did_run(input2), "Input2 should not have run" + assert not result.did_run(save1), "Save1 should not have run" + assert not result.did_run(save3), "Save3 should not have run" + + # Empty partial execution list (should execute nothing) + def test_partial_execution_empty_list(self, client: ComfyClient, builder: GraphBuilder): + g = builder + input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) + _output1 = g.node("SaveImage", images=input1.out(0)) + + # Run with empty partial execution list + try: + _result = client.run(g, partial_execution_targets=[]) + # Should get an error because no outputs are selected + assert False, "Should have raised an error for empty partial execution list" + except urllib.error.HTTPError: + pass # Expected behavior + diff --git a/tests/inference/testing_nodes/testing-pack/specific_tests.py b/tests/inference/testing_nodes/testing-pack/specific_tests.py index 657d49f2f..4f8f01ae4 100644 --- a/tests/inference/testing_nodes/testing-pack/specific_tests.py +++ b/tests/inference/testing_nodes/testing-pack/specific_tests.py @@ -463,6 +463,25 @@ class TestParallelSleep(ComfyNodeABC): "expand": g.finalize(), } +class TestOutputNodeWithSocketOutput: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0}), + }, + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "process" + CATEGORY = "_for_testing" + OUTPUT_NODE = True + + def process(self, image, value): + # Apply value scaling and return both as output and socket + result = image * value + return (result,) + TEST_NODE_CLASS_MAPPINGS = { "TestLazyMixImages": TestLazyMixImages, "TestVariadicAverage": TestVariadicAverage, @@ -478,6 +497,7 @@ TEST_NODE_CLASS_MAPPINGS = { "TestSamplingInExpansion": TestSamplingInExpansion, "TestSleep": TestSleep, "TestParallelSleep": TestParallelSleep, + "TestOutputNodeWithSocketOutput": TestOutputNodeWithSocketOutput, } TEST_NODE_DISPLAY_NAME_MAPPINGS = { @@ -495,4 +515,5 @@ TEST_NODE_DISPLAY_NAME_MAPPINGS = { "TestSamplingInExpansion": "Sampling In Expansion", "TestSleep": "Test Sleep", "TestParallelSleep": "Test Parallel Sleep", + "TestOutputNodeWithSocketOutput": "Test Output Node With Socket Output", } From 97b8a2c26a335fe70ac6cfb44bf225454f51d700 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 31 Jul 2025 02:46:23 -0700 Subject: [PATCH 0385/1073] More accurate explanation of release process. (#9126) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index befc4c006..2abd8e600 100644 --- a/README.md +++ b/README.md @@ -111,7 +111,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git ## Release Process -ComfyUI follows a weekly release cycle every Friday, with three interconnected repositories: +ComfyUI follows a weekly release cycle targeting Friday but this regularly changes because of model releases or large changes to the codebase. There are three interconnected repositories: 1. **[ComfyUI Core](https://github.com/comfyanonymous/ComfyUI)** - Releases a new stable version (e.g., v0.7.0) From 4887743a2aef67e05909aeea61f6cdc93e269de3 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Thu, 31 Jul 2025 15:02:12 -0700 Subject: [PATCH 0386/1073] V3 Node Schema Definition - initial (#8656) --- comfy_api/internal/__init__.py | 143 +++ comfy_api/latest/__init__.py | 18 + comfy_api/latest/_io.py | 1618 ++++++++++++++++++++++++++++++++ comfy_api/latest/_resources.py | 72 ++ comfy_api/latest/_ui.py | 457 +++++++++ comfy_api/v0_0_2/__init__.py | 2 + comfy_execution/graph.py | 23 +- comfy_execution/graph_utils.py | 16 + execution.py | 140 ++- nodes.py | 37 +- server.py | 3 + 11 files changed, 2475 insertions(+), 54 deletions(-) create mode 100644 comfy_api/latest/_io.py create mode 100644 comfy_api/latest/_resources.py create mode 100644 comfy_api/latest/_ui.py diff --git a/comfy_api/internal/__init__.py b/comfy_api/internal/__init__.py index c00b1fdbb..4ca02e320 100644 --- a/comfy_api/internal/__init__.py +++ b/comfy_api/internal/__init__.py @@ -5,3 +5,146 @@ from .api_registry import ( register_versions as register_versions, get_all_versions as get_all_versions, ) + +import asyncio +from dataclasses import asdict +from typing import Callable, Optional + + +def first_real_override(cls: type, name: str, *, base: type=None) -> Optional[Callable]: + """Return the *callable* override of `name` visible on `cls`, or None if every + implementation up to (and including) `base` is the placeholder defined on `base`. + + If base is not provided, it will assume cls has a GET_BASE_CLASS + """ + if base is None: + if not hasattr(cls, "GET_BASE_CLASS"): + raise ValueError("base is required if cls does not have a GET_BASE_CLASS; is this a valid ComfyNode subclass?") + base = cls.GET_BASE_CLASS() + base_attr = getattr(base, name, None) + if base_attr is None: + return None + base_func = base_attr.__func__ + for c in cls.mro(): # NodeB, NodeA, ComfyNode, object … + if c is base: # reached the placeholder – we're done + break + if name in c.__dict__: # first class that *defines* the attr + func = getattr(c, name).__func__ + if func is not base_func: # real override + return getattr(cls, name) # bound to *cls* + return None + + +class _ComfyNodeInternal: + """Class that all V3-based APIs inherit from for ComfyNode. + + This is intended to only be referenced within execution.py, as it has to handle all V3 APIs going forward.""" + @classmethod + def GET_NODE_INFO_V1(cls): + ... + + +class _NodeOutputInternal: + """Class that all V3-based APIs inherit from for NodeOutput. + + This is intended to only be referenced within execution.py, as it has to handle all V3 APIs going forward.""" + ... + + +def as_pruned_dict(dataclass_obj): + '''Return dict of dataclass object with pruned None values.''' + return prune_dict(asdict(dataclass_obj)) + +def prune_dict(d: dict): + return {k: v for k,v in d.items() if v is not None} + + +def is_class(obj): + ''' + Returns True if is a class type. + Returns False if is a class instance. + ''' + return isinstance(obj, type) + + +def copy_class(cls: type) -> type: + ''' + Copy a class and its attributes. + ''' + if cls is None: + return None + cls_dict = { + k: v for k, v in cls.__dict__.items() + if k not in ('__dict__', '__weakref__', '__module__', '__doc__') + } + # new class + new_cls = type( + cls.__name__, + (cls,), + cls_dict + ) + # metadata preservation + new_cls.__module__ = cls.__module__ + new_cls.__doc__ = cls.__doc__ + return new_cls + + +class classproperty(object): + def __init__(self, f): + self.f = f + def __get__(self, obj, owner): + return self.f(owner) + + +# NOTE: this was ai generated and validated by hand +def shallow_clone_class(cls, new_name=None): + ''' + Shallow clone a class while preserving super() functionality. + ''' + new_name = new_name or f"{cls.__name__}Clone" + # Include the original class in the bases to maintain proper inheritance + new_bases = (cls,) + cls.__bases__ + return type(new_name, new_bases, dict(cls.__dict__)) + +# NOTE: this was ai generated and validated by hand +def lock_class(cls): + ''' + Lock a class so that its top-levelattributes cannot be modified. + ''' + # Locked instance __setattr__ + def locked_instance_setattr(self, name, value): + raise AttributeError( + f"Cannot set attribute '{name}' on immutable instance of {type(self).__name__}" + ) + # Locked metaclass + class LockedMeta(type(cls)): + def __setattr__(cls_, name, value): + raise AttributeError( + f"Cannot modify class attribute '{name}' on locked class '{cls_.__name__}'" + ) + # Rebuild class with locked behavior + locked_dict = dict(cls.__dict__) + locked_dict['__setattr__'] = locked_instance_setattr + + return LockedMeta(cls.__name__, cls.__bases__, locked_dict) + + +def make_locked_method_func(type_obj, func, class_clone): + """ + Returns a function that, when called with **inputs, will execute: + getattr(type_obj, func).__func__(lock_class(class_clone), **inputs) + + Supports both synchronous and asynchronous methods. + """ + locked_class = lock_class(class_clone) + method = getattr(type_obj, func).__func__ + + # Check if the original method is async + if asyncio.iscoroutinefunction(method): + async def wrapped_async_func(**inputs): + return await method(locked_class, **inputs) + return wrapped_async_func + else: + def wrapped_func(**inputs): + return method(locked_class, **inputs) + return wrapped_func diff --git a/comfy_api/latest/__init__.py b/comfy_api/latest/__init__.py index e1f3a3655..2cee65aa9 100644 --- a/comfy_api/latest/__init__.py +++ b/comfy_api/latest/__init__.py @@ -1,5 +1,6 @@ from __future__ import annotations +from abc import ABC, abstractmethod from typing import Type, TYPE_CHECKING from comfy_api.internal import ComfyAPIBase from comfy_api.internal.singleton import ProxiedSingleton @@ -7,6 +8,9 @@ from comfy_api.internal.async_to_sync import create_sync_class from comfy_api.latest._input import ImageInput, AudioInput, MaskInput, LatentInput, VideoInput from comfy_api.latest._input_impl import VideoFromFile, VideoFromComponents from comfy_api.latest._util import VideoCodec, VideoContainer, VideoComponents +from comfy_api.latest._io import _IO as io #noqa: F401 +from comfy_api.latest._ui import _UI as ui #noqa: F401 +# from comfy_api.latest._resources import _RESOURCES as resources #noqa: F401 from comfy_execution.utils import get_executing_context from comfy_execution.progress import get_progress_state, PreviewImageTuple from PIL import Image @@ -72,6 +76,19 @@ class ComfyAPI_latest(ComfyAPIBase): execution: Execution +class ComfyExtension(ABC): + async def on_load(self) -> None: + """ + Called when an extension is loaded. + This should be used to initialize any global resources neeeded by the extension. + """ + + @abstractmethod + async def get_node_list(self) -> list[type[io.ComfyNode]]: + """ + Returns a list of nodes that this extension provides. + """ + class Input: Image = ImageInput Audio = AudioInput @@ -103,4 +120,5 @@ __all__ = [ "Input", "InputImpl", "Types", + "ComfyExtension", ] diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py new file mode 100644 index 000000000..ec1efb51d --- /dev/null +++ b/comfy_api/latest/_io.py @@ -0,0 +1,1618 @@ +from __future__ import annotations + +import copy +import inspect +from abc import ABC, abstractmethod +from collections import Counter +from dataclasses import asdict, dataclass +from enum import Enum +from typing import Any, Callable, Literal, TypedDict, TypeVar, TYPE_CHECKING +from typing_extensions import NotRequired, final + +# used for type hinting +import torch + +if TYPE_CHECKING: + from spandrel import ImageModelDescriptor + from comfy.clip_vision import ClipVisionModel + from comfy.clip_vision import Output as ClipVisionOutput_ + from comfy.controlnet import ControlNet + from comfy.hooks import HookGroup, HookKeyframeGroup + from comfy.model_patcher import ModelPatcher + from comfy.samplers import CFGGuider, Sampler + from comfy.sd import CLIP, VAE + from comfy.sd import StyleModel as StyleModel_ + from comfy_api.input import VideoInput +from comfy_api.internal import (_ComfyNodeInternal, _NodeOutputInternal, classproperty, copy_class, first_real_override, is_class, + prune_dict, shallow_clone_class) +from comfy_api.latest._resources import Resources, ResourcesLocal +from comfy_execution.graph_utils import ExecutionBlocker + +# from comfy_extras.nodes_images import SVG as SVG_ # NOTE: needs to be moved before can be imported due to circular reference + +class FolderType(str, Enum): + input = "input" + output = "output" + temp = "temp" + + +class UploadType(str, Enum): + image = "image_upload" + audio = "audio_upload" + video = "video_upload" + model = "file_upload" + + +class RemoteOptions: + def __init__(self, route: str, refresh_button: bool, control_after_refresh: Literal["first", "last"]="first", + timeout: int=None, max_retries: int=None, refresh: int=None): + self.route = route + """The route to the remote source.""" + self.refresh_button = refresh_button + """Specifies whether to show a refresh button in the UI below the widget.""" + self.control_after_refresh = control_after_refresh + """Specifies the control after the refresh button is clicked. If "first", the first item will be automatically selected, and so on.""" + self.timeout = timeout + """The maximum amount of time to wait for a response from the remote source in milliseconds.""" + self.max_retries = max_retries + """The maximum number of retries before aborting the request.""" + self.refresh = refresh + """The TTL of the remote input's value in milliseconds. Specifies the interval at which the remote input's value is refreshed.""" + + def as_dict(self): + return prune_dict({ + "route": self.route, + "refresh_button": self.refresh_button, + "control_after_refresh": self.control_after_refresh, + "timeout": self.timeout, + "max_retries": self.max_retries, + "refresh": self.refresh, + }) + + +class NumberDisplay(str, Enum): + number = "number" + slider = "slider" + + +class _StringIOType(str): + def __ne__(self, value: object) -> bool: + if self == "*" or value == "*": + return False + if not isinstance(value, str): + return True + a = frozenset(self.split(",")) + b = frozenset(value.split(",")) + return not (b.issubset(a) or a.issubset(b)) + +class _ComfyType(ABC): + Type = Any + io_type: str = None + +# NOTE: this is a workaround to make the decorator return the correct type +T = TypeVar("T", bound=type) +def comfytype(io_type: str, **kwargs): + ''' + Decorator to mark nested classes as ComfyType; io_type will be bound to the class. + + A ComfyType may have the following attributes: + - Type = + - class Input(Input): ... + - class Output(Output): ... + ''' + def decorator(cls: T) -> T: + if isinstance(cls, _ComfyType) or issubclass(cls, _ComfyType): + # clone Input and Output classes to avoid modifying the original class + new_cls = cls + if hasattr(new_cls, "Input"): + new_cls.Input = copy_class(new_cls.Input) + if hasattr(new_cls, "Output"): + new_cls.Output = copy_class(new_cls.Output) + else: + # copy class attributes except for special ones that shouldn't be in type() + cls_dict = { + k: v for k, v in cls.__dict__.items() + if k not in ('__dict__', '__weakref__', '__module__', '__doc__') + } + # new class + new_cls: ComfyTypeIO = type( + cls.__name__, + (cls, ComfyTypeIO), + cls_dict + ) + # metadata preservation + new_cls.__module__ = cls.__module__ + new_cls.__doc__ = cls.__doc__ + # assign ComfyType attributes, if needed + # NOTE: use __ne__ trick for io_type (see node_typing.IO.__ne__ for details) + new_cls.io_type = _StringIOType(io_type) + if hasattr(new_cls, "Input") and new_cls.Input is not None: + new_cls.Input.Parent = new_cls + if hasattr(new_cls, "Output") and new_cls.Output is not None: + new_cls.Output.Parent = new_cls + return new_cls + return decorator + +def Custom(io_type: str) -> type[ComfyTypeIO]: + '''Create a ComfyType for a custom io_type.''' + @comfytype(io_type=io_type) + class CustomComfyType(ComfyTypeIO): + ... + return CustomComfyType + +class _IO_V3: + ''' + Base class for V3 Inputs and Outputs. + ''' + Parent: _ComfyType = None + + def __init__(self): + pass + + @property + def io_type(self): + return self.Parent.io_type + + @property + def Type(self): + return self.Parent.Type + +class Input(_IO_V3): + ''' + Base class for a V3 Input. + ''' + def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None): + super().__init__() + self.id = id + self.display_name = display_name + self.optional = optional + self.tooltip = tooltip + self.lazy = lazy + self.extra_dict = extra_dict if extra_dict is not None else {} + + def as_dict(self): + return prune_dict({ + "display_name": self.display_name, + "optional": self.optional, + "tooltip": self.tooltip, + "lazy": self.lazy, + }) | prune_dict(self.extra_dict) + + def get_io_type(self): + return _StringIOType(self.io_type) + +class WidgetInput(Input): + ''' + Base class for a V3 Input with widget. + ''' + def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, + default: Any=None, + socketless: bool=None, widget_type: str=None, force_input: bool=None, extra_dict=None): + super().__init__(id, display_name, optional, tooltip, lazy, extra_dict) + self.default = default + self.socketless = socketless + self.widget_type = widget_type + self.force_input = force_input + + def as_dict(self): + return super().as_dict() | prune_dict({ + "default": self.default, + "socketless": self.socketless, + "widgetType": self.widget_type, + "forceInput": self.force_input, + }) + + def get_io_type(self): + return self.widget_type if self.widget_type is not None else super().get_io_type() + + +class Output(_IO_V3): + def __init__(self, id: str=None, display_name: str=None, tooltip: str=None, + is_output_list=False): + self.id = id + self.display_name = display_name + self.tooltip = tooltip + self.is_output_list = is_output_list + + def as_dict(self): + return prune_dict({ + "display_name": self.display_name, + "tooltip": self.tooltip, + "is_output_list": self.is_output_list, + }) + + def get_io_type(self): + return self.io_type + + +class ComfyTypeI(_ComfyType): + '''ComfyType subclass that only has a default Input class - intended for types that only have Inputs.''' + class Input(Input): + ... + +class ComfyTypeIO(ComfyTypeI): + '''ComfyType subclass that has default Input and Output classes; useful for types with both Inputs and Outputs.''' + class Output(Output): + ... + + +@comfytype(io_type="BOOLEAN") +class Boolean(ComfyTypeIO): + Type = bool + + class Input(WidgetInput): + '''Boolean input.''' + def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, + default: bool=None, label_on: str=None, label_off: str=None, + socketless: bool=None, force_input: bool=None): + super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input) + self.label_on = label_on + self.label_off = label_off + self.default: bool + + def as_dict(self): + return super().as_dict() | prune_dict({ + "label_on": self.label_on, + "label_off": self.label_off, + }) + +@comfytype(io_type="INT") +class Int(ComfyTypeIO): + Type = int + + class Input(WidgetInput): + '''Integer input.''' + def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, + default: int=None, min: int=None, max: int=None, step: int=None, control_after_generate: bool=None, + display_mode: NumberDisplay=None, socketless: bool=None, force_input: bool=None): + super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input) + self.min = min + self.max = max + self.step = step + self.control_after_generate = control_after_generate + self.display_mode = display_mode + self.default: int + + def as_dict(self): + return super().as_dict() | prune_dict({ + "min": self.min, + "max": self.max, + "step": self.step, + "control_after_generate": self.control_after_generate, + "display": self.display_mode.value if self.display_mode else None, + }) + +@comfytype(io_type="FLOAT") +class Float(ComfyTypeIO): + Type = float + + class Input(WidgetInput): + '''Float input.''' + def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, + default: float=None, min: float=None, max: float=None, step: float=None, round: float=None, + display_mode: NumberDisplay=None, socketless: bool=None, force_input: bool=None): + super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input) + self.min = min + self.max = max + self.step = step + self.round = round + self.display_mode = display_mode + self.default: float + + def as_dict(self): + return super().as_dict() | prune_dict({ + "min": self.min, + "max": self.max, + "step": self.step, + "round": self.round, + "display": self.display_mode, + }) + +@comfytype(io_type="STRING") +class String(ComfyTypeIO): + Type = str + + class Input(WidgetInput): + '''String input.''' + def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, + multiline=False, placeholder: str=None, default: str=None, dynamic_prompts: bool=None, + socketless: bool=None, force_input: bool=None): + super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input) + self.multiline = multiline + self.placeholder = placeholder + self.dynamic_prompts = dynamic_prompts + self.default: str + + def as_dict(self): + return super().as_dict() | prune_dict({ + "multiline": self.multiline, + "placeholder": self.placeholder, + "dynamicPrompts": self.dynamic_prompts, + }) + +@comfytype(io_type="COMBO") +class Combo(ComfyTypeI): + Type = str + class Input(WidgetInput): + """Combo input (dropdown).""" + Type = str + def __init__(self, id: str, options: list[str]=None, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, + default: str=None, control_after_generate: bool=None, + upload: UploadType=None, image_folder: FolderType=None, + remote: RemoteOptions=None, + socketless: bool=None): + super().__init__(id, display_name, optional, tooltip, lazy, default, socketless) + self.multiselect = False + self.options = options + self.control_after_generate = control_after_generate + self.upload = upload + self.image_folder = image_folder + self.remote = remote + self.default: str + + def as_dict(self): + return super().as_dict() | prune_dict({ + "multiselect": self.multiselect, + "options": self.options, + "control_after_generate": self.control_after_generate, + **({self.upload.value: True} if self.upload is not None else {}), + "image_folder": self.image_folder.value if self.image_folder else None, + "remote": self.remote.as_dict() if self.remote else None, + }) + + +@comfytype(io_type="COMBO") +class MultiCombo(ComfyTypeI): + '''Multiselect Combo input (dropdown for selecting potentially more than one value).''' + # TODO: something is wrong with the serialization, frontend does not recognize it as multiselect + Type = list[str] + class Input(Combo.Input): + def __init__(self, id: str, options: list[str], display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, + default: list[str]=None, placeholder: str=None, chip: bool=None, control_after_generate: bool=None, + socketless: bool=None): + super().__init__(id, options, display_name, optional, tooltip, lazy, default, control_after_generate, socketless=socketless) + self.multiselect = True + self.placeholder = placeholder + self.chip = chip + self.default: list[str] + + def as_dict(self): + to_return = super().as_dict() | prune_dict({ + "multi_select": self.multiselect, + "placeholder": self.placeholder, + "chip": self.chip, + }) + return to_return + +@comfytype(io_type="IMAGE") +class Image(ComfyTypeIO): + Type = torch.Tensor + + +@comfytype(io_type="WAN_CAMERA_EMBEDDING") +class WanCameraEmbedding(ComfyTypeIO): + Type = torch.Tensor + + +@comfytype(io_type="WEBCAM") +class Webcam(ComfyTypeIO): + Type = str + + class Input(WidgetInput): + """Webcam input.""" + Type = str + def __init__( + self, id: str, display_name: str=None, optional=False, + tooltip: str=None, lazy: bool=None, default: str=None, socketless: bool=None + ): + super().__init__(id, display_name, optional, tooltip, lazy, default, socketless) + + +@comfytype(io_type="MASK") +class Mask(ComfyTypeIO): + Type = torch.Tensor + +@comfytype(io_type="LATENT") +class Latent(ComfyTypeIO): + '''Latents are stored as a dictionary.''' + class LatentDict(TypedDict): + samples: torch.Tensor + '''Latent tensors.''' + noise_mask: NotRequired[torch.Tensor] + batch_index: NotRequired[list[int]] + type: NotRequired[str] + '''Only needed if dealing with these types: audio, hunyuan3dv2''' + Type = LatentDict + +@comfytype(io_type="CONDITIONING") +class Conditioning(ComfyTypeIO): + class PooledDict(TypedDict): + pooled_output: torch.Tensor + '''Pooled output from CLIP.''' + control: NotRequired[ControlNet] + '''ControlNet to apply to conditioning.''' + control_apply_to_uncond: NotRequired[bool] + '''Whether to apply ControlNet to matching negative conditioning at sample time, if applicable.''' + cross_attn_controlnet: NotRequired[torch.Tensor] + '''CrossAttn from CLIP to use for controlnet only.''' + pooled_output_controlnet: NotRequired[torch.Tensor] + '''Pooled output from CLIP to use for controlnet only.''' + gligen: NotRequired[tuple[str, Gligen, list[tuple[torch.Tensor, int, ...]]]] + '''GLIGEN to apply to conditioning.''' + area: NotRequired[tuple[int, ...] | tuple[str, float, ...]] + '''Set area of conditioning. First half of values apply to dimensions, the second half apply to coordinates. + By default, the dimensions are based on total pixel amount, but the first value can be set to "percentage" to use a percentage of the image size instead. + + (1024, 1024, 0, 0) would apply conditioning to the top-left 1024x1024 pixels. + + ("percentage", 0.5, 0.5, 0, 0) would apply conditioning to the top-left 50% of the image.''' # TODO: verify its actually top-left + strength: NotRequired[float] + '''Strength of conditioning. Default strength is 1.0.''' + mask: NotRequired[torch.Tensor] + '''Mask to apply conditioning to.''' + mask_strength: NotRequired[float] + '''Strength of conditioning mask. Default strength is 1.0.''' + set_area_to_bounds: NotRequired[bool] + '''Whether conditioning mask should determine bounds of area - if set to false, latents are sampled at full resolution and result is applied in mask.''' + concat_latent_image: NotRequired[torch.Tensor] + '''Used for inpainting and specific models.''' + concat_mask: NotRequired[torch.Tensor] + '''Used for inpainting and specific models.''' + concat_image: NotRequired[torch.Tensor] + '''Used by SD_4XUpscale_Conditioning.''' + noise_augmentation: NotRequired[float] + '''Used by SD_4XUpscale_Conditioning.''' + hooks: NotRequired[HookGroup] + '''Applies hooks to conditioning.''' + default: NotRequired[bool] + '''Whether to this conditioning is 'default'; default conditioning gets applied to any areas of the image that have no masks/areas applied, assuming at least one area/mask is present during sampling.''' + start_percent: NotRequired[float] + '''Determines relative step to begin applying conditioning, expressed as a float between 0.0 and 1.0.''' + end_percent: NotRequired[float] + '''Determines relative step to end applying conditioning, expressed as a float between 0.0 and 1.0.''' + clip_start_percent: NotRequired[float] + '''Internal variable for conditioning scheduling - start of application, expressed as a float between 0.0 and 1.0.''' + clip_end_percent: NotRequired[float] + '''Internal variable for conditioning scheduling - end of application, expressed as a float between 0.0 and 1.0.''' + attention_mask: NotRequired[torch.Tensor] + '''Masks text conditioning; used by StyleModel among others.''' + attention_mask_img_shape: NotRequired[tuple[int, ...]] + '''Masks text conditioning; used by StyleModel among others.''' + unclip_conditioning: NotRequired[list[dict]] + '''Used by unCLIP.''' + conditioning_lyrics: NotRequired[torch.Tensor] + '''Used by AceT5Model.''' + seconds_start: NotRequired[float] + '''Used by StableAudio.''' + seconds_total: NotRequired[float] + '''Used by StableAudio.''' + lyrics_strength: NotRequired[float] + '''Used by AceStepAudio.''' + width: NotRequired[int] + '''Used by certain models (e.g. CLIPTextEncodeSDXL/Refiner, PixArtAlpha).''' + height: NotRequired[int] + '''Used by certain models (e.g. CLIPTextEncodeSDXL/Refiner, PixArtAlpha).''' + aesthetic_score: NotRequired[float] + '''Used by CLIPTextEncodeSDXL/Refiner.''' + crop_w: NotRequired[int] + '''Used by CLIPTextEncodeSDXL.''' + crop_h: NotRequired[int] + '''Used by CLIPTextEncodeSDXL.''' + target_width: NotRequired[int] + '''Used by CLIPTextEncodeSDXL.''' + target_height: NotRequired[int] + '''Used by CLIPTextEncodeSDXL.''' + reference_latents: NotRequired[list[torch.Tensor]] + '''Used by ReferenceLatent.''' + guidance: NotRequired[float] + '''Used by Flux-like models with guidance embed.''' + guiding_frame_index: NotRequired[int] + '''Used by Hunyuan ImageToVideo.''' + ref_latent: NotRequired[torch.Tensor] + '''Used by Hunyuan ImageToVideo.''' + keyframe_idxs: NotRequired[list[int]] + '''Used by LTXV.''' + frame_rate: NotRequired[float] + '''Used by LTXV.''' + stable_cascade_prior: NotRequired[torch.Tensor] + '''Used by StableCascade.''' + elevation: NotRequired[list[float]] + '''Used by SV3D.''' + azimuth: NotRequired[list[float]] + '''Used by SV3D.''' + motion_bucket_id: NotRequired[int] + '''Used by SVD-like models.''' + fps: NotRequired[int] + '''Used by SVD-like models.''' + augmentation_level: NotRequired[float] + '''Used by SVD-like models.''' + clip_vision_output: NotRequired[ClipVisionOutput_] + '''Used by WAN-like models.''' + vace_frames: NotRequired[torch.Tensor] + '''Used by WAN VACE.''' + vace_mask: NotRequired[torch.Tensor] + '''Used by WAN VACE.''' + vace_strength: NotRequired[float] + '''Used by WAN VACE.''' + camera_conditions: NotRequired[Any] # TODO: assign proper type once defined + '''Used by WAN Camera.''' + time_dim_concat: NotRequired[torch.Tensor] + '''Used by WAN Phantom Subject.''' + + CondList = list[tuple[torch.Tensor, PooledDict]] + Type = CondList + +@comfytype(io_type="SAMPLER") +class Sampler(ComfyTypeIO): + if TYPE_CHECKING: + Type = Sampler + +@comfytype(io_type="SIGMAS") +class Sigmas(ComfyTypeIO): + Type = torch.Tensor + +@comfytype(io_type="NOISE") +class Noise(ComfyTypeIO): + Type = torch.Tensor + +@comfytype(io_type="GUIDER") +class Guider(ComfyTypeIO): + if TYPE_CHECKING: + Type = CFGGuider + +@comfytype(io_type="CLIP") +class Clip(ComfyTypeIO): + if TYPE_CHECKING: + Type = CLIP + +@comfytype(io_type="CONTROL_NET") +class ControlNet(ComfyTypeIO): + if TYPE_CHECKING: + Type = ControlNet + +@comfytype(io_type="VAE") +class Vae(ComfyTypeIO): + if TYPE_CHECKING: + Type = VAE + +@comfytype(io_type="MODEL") +class Model(ComfyTypeIO): + if TYPE_CHECKING: + Type = ModelPatcher + +@comfytype(io_type="CLIP_VISION") +class ClipVision(ComfyTypeIO): + if TYPE_CHECKING: + Type = ClipVisionModel + +@comfytype(io_type="CLIP_VISION_OUTPUT") +class ClipVisionOutput(ComfyTypeIO): + if TYPE_CHECKING: + Type = ClipVisionOutput_ + +@comfytype(io_type="STYLE_MODEL") +class StyleModel(ComfyTypeIO): + if TYPE_CHECKING: + Type = StyleModel_ + +@comfytype(io_type="GLIGEN") +class Gligen(ComfyTypeIO): + '''ModelPatcher that wraps around a 'Gligen' model.''' + if TYPE_CHECKING: + Type = ModelPatcher + +@comfytype(io_type="UPSCALE_MODEL") +class UpscaleModel(ComfyTypeIO): + if TYPE_CHECKING: + Type = ImageModelDescriptor + +@comfytype(io_type="AUDIO") +class Audio(ComfyTypeIO): + class AudioDict(TypedDict): + waveform: torch.Tensor + sampler_rate: int + Type = AudioDict + +@comfytype(io_type="VIDEO") +class Video(ComfyTypeIO): + if TYPE_CHECKING: + Type = VideoInput + +@comfytype(io_type="SVG") +class SVG(ComfyTypeIO): + Type = Any # TODO: SVG class is defined in comfy_extras/nodes_images.py, causing circular reference; should be moved to somewhere else before referenced directly in v3 + +@comfytype(io_type="LORA_MODEL") +class LoraModel(ComfyTypeIO): + Type = dict[str, torch.Tensor] + +@comfytype(io_type="LOSS_MAP") +class LossMap(ComfyTypeIO): + class LossMapDict(TypedDict): + loss: list[torch.Tensor] + Type = LossMapDict + +@comfytype(io_type="VOXEL") +class Voxel(ComfyTypeIO): + Type = Any # TODO: VOXEL class is defined in comfy_extras/nodes_hunyuan3d.py; should be moved to somewhere else before referenced directly in v3 + +@comfytype(io_type="MESH") +class Mesh(ComfyTypeIO): + Type = Any # TODO: MESH class is defined in comfy_extras/nodes_hunyuan3d.py; should be moved to somewhere else before referenced directly in v3 + +@comfytype(io_type="HOOKS") +class Hooks(ComfyTypeIO): + if TYPE_CHECKING: + Type = HookGroup + +@comfytype(io_type="HOOK_KEYFRAMES") +class HookKeyframes(ComfyTypeIO): + if TYPE_CHECKING: + Type = HookKeyframeGroup + +@comfytype(io_type="TIMESTEPS_RANGE") +class TimestepsRange(ComfyTypeIO): + '''Range defined by start and endpoint, between 0.0 and 1.0.''' + Type = tuple[int, int] + +@comfytype(io_type="LATENT_OPERATION") +class LatentOperation(ComfyTypeIO): + Type = Callable[[torch.Tensor], torch.Tensor] + +@comfytype(io_type="FLOW_CONTROL") +class FlowControl(ComfyTypeIO): + # NOTE: only used in testing_nodes right now + Type = tuple[str, Any] + +@comfytype(io_type="ACCUMULATION") +class Accumulation(ComfyTypeIO): + # NOTE: only used in testing_nodes right now + class AccumulationDict(TypedDict): + accum: list[Any] + Type = AccumulationDict + + +@comfytype(io_type="LOAD3D_CAMERA") +class Load3DCamera(ComfyTypeIO): + class CameraInfo(TypedDict): + position: dict[str, float | int] + target: dict[str, float | int] + zoom: int + cameraType: str + + Type = CameraInfo + + +@comfytype(io_type="LOAD_3D") +class Load3D(ComfyTypeIO): + """3D models are stored as a dictionary.""" + class Model3DDict(TypedDict): + image: str + mask: str + normal: str + camera_info: Load3DCamera.CameraInfo + recording: NotRequired[str] + + Type = Model3DDict + + +@comfytype(io_type="LOAD_3D_ANIMATION") +class Load3DAnimation(Load3D): + ... + + +@comfytype(io_type="PHOTOMAKER") +class Photomaker(ComfyTypeIO): + Type = Any + + +@comfytype(io_type="POINT") +class Point(ComfyTypeIO): + Type = Any # NOTE: I couldn't find any references in core code to POINT io_type. Does this exist? + +@comfytype(io_type="FACE_ANALYSIS") +class FaceAnalysis(ComfyTypeIO): + Type = Any # NOTE: I couldn't find any references in core code to POINT io_type. Does this exist? + +@comfytype(io_type="BBOX") +class BBOX(ComfyTypeIO): + Type = Any # NOTE: I couldn't find any references in core code to POINT io_type. Does this exist? + +@comfytype(io_type="SEGS") +class SEGS(ComfyTypeIO): + Type = Any # NOTE: I couldn't find any references in core code to POINT io_type. Does this exist? + +@comfytype(io_type="*") +class AnyType(ComfyTypeIO): + Type = Any + +@comfytype(io_type="COMFY_MULTITYPED_V3") +class MultiType: + Type = Any + class Input(Input): + ''' + Input that permits more than one input type; if `id` is an instance of `ComfyType.Input`, then that input will be used to create a widget (if applicable) with overridden values. + ''' + def __init__(self, id: str | Input, types: list[type[_ComfyType] | _ComfyType], display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None): + # if id is an Input, then use that Input with overridden values + self.input_override = None + if isinstance(id, Input): + self.input_override = copy.copy(id) + optional = id.optional if id.optional is True else optional + tooltip = id.tooltip if id.tooltip is not None else tooltip + display_name = id.display_name if id.display_name is not None else display_name + lazy = id.lazy if id.lazy is not None else lazy + id = id.id + # if is a widget input, make sure widget_type is set appropriately + if isinstance(self.input_override, WidgetInput): + self.input_override.widget_type = self.input_override.get_io_type() + super().__init__(id, display_name, optional, tooltip, lazy, extra_dict) + self._io_types = types + + @property + def io_types(self) -> list[type[Input]]: + ''' + Returns list of Input class types permitted. + ''' + io_types = [] + for x in self._io_types: + if not is_class(x): + io_types.append(type(x)) + else: + io_types.append(x) + return io_types + + def get_io_type(self): + # ensure types are unique and order is preserved + str_types = [x.io_type for x in self.io_types] + if self.input_override is not None: + str_types.insert(0, self.input_override.get_io_type()) + return ",".join(list(dict.fromkeys(str_types))) + + def as_dict(self): + if self.input_override is not None: + return self.input_override.as_dict() | super().as_dict() + else: + return super().as_dict() + +class DynamicInput(Input, ABC): + ''' + Abstract class for dynamic input registration. + ''' + @abstractmethod + def get_dynamic(self) -> list[Input]: + ... + +class DynamicOutput(Output, ABC): + ''' + Abstract class for dynamic output registration. + ''' + def __init__(self, id: str=None, display_name: str=None, tooltip: str=None, + is_output_list=False): + super().__init__(id, display_name, tooltip, is_output_list) + + @abstractmethod + def get_dynamic(self) -> list[Output]: + ... + + +@comfytype(io_type="COMFY_AUTOGROW_V3") +class AutogrowDynamic(ComfyTypeI): + Type = list[Any] + class Input(DynamicInput): + def __init__(self, id: str, template_input: Input, min: int=1, max: int=None, + display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None): + super().__init__(id, display_name, optional, tooltip, lazy, extra_dict) + self.template_input = template_input + if min is not None: + assert(min >= 1) + if max is not None: + assert(max >= 1) + self.min = min + self.max = max + + def get_dynamic(self) -> list[Input]: + curr_count = 1 + new_inputs = [] + for i in range(self.min): + new_input = copy.copy(self.template_input) + new_input.id = f"{new_input.id}{curr_count}_${self.id}_ag$" + if new_input.display_name is not None: + new_input.display_name = f"{new_input.display_name}{curr_count}" + new_input.optional = self.optional or new_input.optional + if isinstance(self.template_input, WidgetInput): + new_input.force_input = True + new_inputs.append(new_input) + curr_count += 1 + # pretend to expand up to max + for i in range(curr_count-1, self.max): + new_input = copy.copy(self.template_input) + new_input.id = f"{new_input.id}{curr_count}_${self.id}_ag$" + if new_input.display_name is not None: + new_input.display_name = f"{new_input.display_name}{curr_count}" + new_input.optional = True + if isinstance(self.template_input, WidgetInput): + new_input.force_input = True + new_inputs.append(new_input) + curr_count += 1 + return new_inputs + +@comfytype(io_type="COMFY_COMBODYNAMIC_V3") +class ComboDynamic(ComfyTypeI): + class Input(DynamicInput): + def __init__(self, id: str): + pass + +@comfytype(io_type="COMFY_MATCHTYPE_V3") +class MatchType(ComfyTypeIO): + class Template: + def __init__(self, template_id: str, allowed_types: _ComfyType | list[_ComfyType]): + self.template_id = template_id + self.allowed_types = [allowed_types] if isinstance(allowed_types, _ComfyType) else allowed_types + + def as_dict(self): + return { + "template_id": self.template_id, + "allowed_types": "".join(t.io_type for t in self.allowed_types), + } + + class Input(DynamicInput): + def __init__(self, id: str, template: MatchType.Template, + display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None): + super().__init__(id, display_name, optional, tooltip, lazy, extra_dict) + self.template = template + + def get_dynamic(self) -> list[Input]: + return [self] + + def as_dict(self): + return super().as_dict() | prune_dict({ + "template": self.template.as_dict(), + }) + + class Output(DynamicOutput): + def __init__(self, id: str, template: MatchType.Template, display_name: str=None, tooltip: str=None, + is_output_list=False): + super().__init__(id, display_name, tooltip, is_output_list) + self.template = template + + def get_dynamic(self) -> list[Output]: + return [self] + + def as_dict(self): + return super().as_dict() | prune_dict({ + "template": self.template.as_dict(), + }) + + +class HiddenHolder: + def __init__(self, unique_id: str, prompt: Any, + extra_pnginfo: Any, dynprompt: Any, + auth_token_comfy_org: str, api_key_comfy_org: str, **kwargs): + self.unique_id = unique_id + """UNIQUE_ID is the unique identifier of the node, and matches the id property of the node on the client side. It is commonly used in client-server communications (see messages).""" + self.prompt = prompt + """PROMPT is the complete prompt sent by the client to the server. See the prompt object for a full description.""" + self.extra_pnginfo = extra_pnginfo + """EXTRA_PNGINFO is a dictionary that will be copied into the metadata of any .png files saved. Custom nodes can store additional information in this dictionary for saving (or as a way to communicate with a downstream node).""" + self.dynprompt = dynprompt + """DYNPROMPT is an instance of comfy_execution.graph.DynamicPrompt. It differs from PROMPT in that it may mutate during the course of execution in response to Node Expansion.""" + self.auth_token_comfy_org = auth_token_comfy_org + """AUTH_TOKEN_COMFY_ORG is a token acquired from signing into a ComfyOrg account on frontend.""" + self.api_key_comfy_org = api_key_comfy_org + """API_KEY_COMFY_ORG is an API Key generated by ComfyOrg that allows skipping signing into a ComfyOrg account on frontend.""" + + def __getattr__(self, key: str): + '''If hidden variable not found, return None.''' + return None + + @classmethod + def from_dict(cls, d: dict | None): + if d is None: + d = {} + return cls( + unique_id=d.get(Hidden.unique_id, None), + prompt=d.get(Hidden.prompt, None), + extra_pnginfo=d.get(Hidden.extra_pnginfo, None), + dynprompt=d.get(Hidden.dynprompt, None), + auth_token_comfy_org=d.get(Hidden.auth_token_comfy_org, None), + api_key_comfy_org=d.get(Hidden.api_key_comfy_org, None), + ) + +class Hidden(str, Enum): + ''' + Enumerator for requesting hidden variables in nodes. + ''' + unique_id = "UNIQUE_ID" + """UNIQUE_ID is the unique identifier of the node, and matches the id property of the node on the client side. It is commonly used in client-server communications (see messages).""" + prompt = "PROMPT" + """PROMPT is the complete prompt sent by the client to the server. See the prompt object for a full description.""" + extra_pnginfo = "EXTRA_PNGINFO" + """EXTRA_PNGINFO is a dictionary that will be copied into the metadata of any .png files saved. Custom nodes can store additional information in this dictionary for saving (or as a way to communicate with a downstream node).""" + dynprompt = "DYNPROMPT" + """DYNPROMPT is an instance of comfy_execution.graph.DynamicPrompt. It differs from PROMPT in that it may mutate during the course of execution in response to Node Expansion.""" + auth_token_comfy_org = "AUTH_TOKEN_COMFY_ORG" + """AUTH_TOKEN_COMFY_ORG is a token acquired from signing into a ComfyOrg account on frontend.""" + api_key_comfy_org = "API_KEY_COMFY_ORG" + """API_KEY_COMFY_ORG is an API Key generated by ComfyOrg that allows skipping signing into a ComfyOrg account on frontend.""" + + +@dataclass +class NodeInfoV1: + input: dict=None + input_order: dict[str, list[str]]=None + output: list[str]=None + output_is_list: list[bool]=None + output_name: list[str]=None + output_tooltips: list[str]=None + name: str=None + display_name: str=None + description: str=None + python_module: Any=None + category: str=None + output_node: bool=None + deprecated: bool=None + experimental: bool=None + api_node: bool=None + +@dataclass +class NodeInfoV3: + input: dict=None + output: dict=None + hidden: list[str]=None + name: str=None + display_name: str=None + description: str=None + category: str=None + output_node: bool=None + deprecated: bool=None + experimental: bool=None + api_node: bool=None + + +@dataclass +class Schema: + """Definition of V3 node properties.""" + + node_id: str + """ID of node - should be globally unique. If this is a custom node, add a prefix or postfix to avoid name clashes.""" + display_name: str = None + """Display name of node.""" + category: str = "sd" + """The category of the node, as per the "Add Node" menu.""" + inputs: list[Input]=None + outputs: list[Output]=None + hidden: list[Hidden]=None + description: str="" + """Node description, shown as a tooltip when hovering over the node.""" + is_input_list: bool = False + """A flag indicating if this node implements the additional code necessary to deal with OUTPUT_IS_LIST nodes. + + All inputs of ``type`` will become ``list[type]``, regardless of how many items are passed in. This also affects ``check_lazy_status``. + + From the docs: + + A node can also override the default input behaviour and receive the whole list in a single call. This is done by setting a class attribute `INPUT_IS_LIST` to ``True``. + + Comfy Docs: https://docs.comfy.org/custom-nodes/backend/lists#list-processing + """ + is_output_node: bool=False + """Flags this node as an output node, causing any inputs it requires to be executed. + + If a node is not connected to any output nodes, that node will not be executed. Usage:: + + From the docs: + + By default, a node is not considered an output. Set ``OUTPUT_NODE = True`` to specify that it is. + + Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#output-node + """ + is_deprecated: bool=False + """Flags a node as deprecated, indicating to users that they should find alternatives to this node.""" + is_experimental: bool=False + """Flags a node as experimental, informing users that it may change or not work as expected.""" + is_api_node: bool=False + """Flags a node as an API node. See: https://docs.comfy.org/tutorials/api-nodes/overview.""" + not_idempotent: bool=False + """Flags a node as not idempotent; when True, the node will run and not reuse the cached outputs when identical inputs are provided on a different node in the graph.""" + enable_expand: bool=False + """Flags a node as expandable, allowing NodeOutput to include 'expand' property.""" + + def validate(self): + '''Validate the schema: + - verify ids on inputs and outputs are unique - both internally and in relation to each other + ''' + input_ids = [i.id for i in self.inputs] if self.inputs is not None else [] + output_ids = [o.id for o in self.outputs] if self.outputs is not None else [] + input_set = set(input_ids) + output_set = set(output_ids) + issues = [] + # verify ids are unique per list + if len(input_set) != len(input_ids): + issues.append(f"Input ids must be unique, but {[item for item, count in Counter(input_ids).items() if count > 1]} are not.") + if len(output_set) != len(output_ids): + issues.append(f"Output ids must be unique, but {[item for item, count in Counter(output_ids).items() if count > 1]} are not.") + # verify ids are unique between lists + intersection = input_set & output_set + if len(intersection) > 0: + issues.append(f"Ids must be unique between inputs and outputs, but {intersection} are not.") + if len(issues) > 0: + raise ValueError("\n".join(issues)) + + def finalize(self): + """Add hidden based on selected schema options, and give outputs without ids default ids.""" + # if is an api_node, will need key-related hidden + if self.is_api_node: + if self.hidden is None: + self.hidden = [] + if Hidden.auth_token_comfy_org not in self.hidden: + self.hidden.append(Hidden.auth_token_comfy_org) + if Hidden.api_key_comfy_org not in self.hidden: + self.hidden.append(Hidden.api_key_comfy_org) + # if is an output_node, will need prompt and extra_pnginfo + if self.is_output_node: + if self.hidden is None: + self.hidden = [] + if Hidden.prompt not in self.hidden: + self.hidden.append(Hidden.prompt) + if Hidden.extra_pnginfo not in self.hidden: + self.hidden.append(Hidden.extra_pnginfo) + # give outputs without ids default ids + if self.outputs is not None: + for i, output in enumerate(self.outputs): + if output.id is None: + output.id = f"_{i}_{output.io_type}_" + + def get_v1_info(self, cls) -> NodeInfoV1: + # get V1 inputs + input = { + "required": {} + } + if self.inputs: + for i in self.inputs: + if isinstance(i, DynamicInput): + dynamic_inputs = i.get_dynamic() + for d in dynamic_inputs: + add_to_dict_v1(d, input) + else: + add_to_dict_v1(i, input) + if self.hidden: + for hidden in self.hidden: + input.setdefault("hidden", {})[hidden.name] = (hidden.value,) + # create separate lists from output fields + output = [] + output_is_list = [] + output_name = [] + output_tooltips = [] + if self.outputs: + for o in self.outputs: + output.append(o.io_type) + output_is_list.append(o.is_output_list) + output_name.append(o.display_name if o.display_name else o.io_type) + output_tooltips.append(o.tooltip if o.tooltip else None) + + info = NodeInfoV1( + input=input, + input_order={key: list(value.keys()) for (key, value) in input.items()}, + output=output, + output_is_list=output_is_list, + output_name=output_name, + output_tooltips=output_tooltips, + name=self.node_id, + display_name=self.display_name, + category=self.category, + description=self.description, + output_node=self.is_output_node, + deprecated=self.is_deprecated, + experimental=self.is_experimental, + api_node=self.is_api_node, + python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes") + ) + return info + + + def get_v3_info(self, cls) -> NodeInfoV3: + input_dict = {} + output_dict = {} + hidden_list = [] + # TODO: make sure dynamic types will be handled correctly + if self.inputs: + for input in self.inputs: + add_to_dict_v3(input, input_dict) + if self.outputs: + for output in self.outputs: + add_to_dict_v3(output, output_dict) + if self.hidden: + for hidden in self.hidden: + hidden_list.append(hidden.value) + + info = NodeInfoV3( + input=input_dict, + output=output_dict, + hidden=hidden_list, + name=self.node_id, + display_name=self.display_name, + description=self.description, + category=self.category, + output_node=self.is_output_node, + deprecated=self.is_deprecated, + experimental=self.is_experimental, + api_node=self.is_api_node, + python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes") + ) + return info + + +def add_to_dict_v1(i: Input, input: dict): + key = "optional" if i.optional else "required" + as_dict = i.as_dict() + # for v1, we don't want to include the optional key + as_dict.pop("optional", None) + input.setdefault(key, {})[i.id] = (i.get_io_type(), as_dict) + +def add_to_dict_v3(io: Input | Output, d: dict): + d[io.id] = (io.get_io_type(), io.as_dict()) + + + +class _ComfyNodeBaseInternal(_ComfyNodeInternal): + """Common base class for storing internal methods and properties; DO NOT USE for defining nodes.""" + + RELATIVE_PYTHON_MODULE = None + SCHEMA = None + + # filled in during execution + resources: Resources = None + hidden: HiddenHolder = None + + @classmethod + @abstractmethod + def define_schema(cls) -> Schema: + """Override this function with one that returns a Schema instance.""" + raise NotImplementedError + + @classmethod + @abstractmethod + def execute(cls, **kwargs) -> NodeOutput: + """Override this function with one that performs node's actions.""" + raise NotImplementedError + + @classmethod + def validate_inputs(cls, **kwargs) -> bool: + """Optionally, define this function to validate inputs; equivalent to V1's VALIDATE_INPUTS.""" + raise NotImplementedError + + @classmethod + def fingerprint_inputs(cls, **kwargs) -> Any: + """Optionally, define this function to fingerprint inputs; equivalent to V1's IS_CHANGED.""" + raise NotImplementedError + + @classmethod + def check_lazy_status(cls, **kwargs) -> list[str]: + """Optionally, define this function to return a list of input names that should be evaluated. + + This basic mixin impl. requires all inputs. + + :kwargs: All node inputs will be included here. If the input is ``None``, it should be assumed that it has not yet been evaluated. \ + When using ``INPUT_IS_LIST = True``, unevaluated will instead be ``(None,)``. + + Params should match the nodes execution ``FUNCTION`` (self, and all inputs by name). + Will be executed repeatedly until it returns an empty list, or all requested items were already evaluated (and sent as params). + + Comfy Docs: https://docs.comfy.org/custom-nodes/backend/lazy_evaluation#defining-check-lazy-status + """ + return [name for name in kwargs if kwargs[name] is None] + + def __init__(self): + self.local_resources: ResourcesLocal = None + self.__class__.VALIDATE_CLASS() + + @classmethod + def GET_BASE_CLASS(cls): + return _ComfyNodeBaseInternal + + @final + @classmethod + def VALIDATE_CLASS(cls): + if first_real_override(cls, "define_schema") is None: + raise Exception(f"No define_schema function was defined for node class {cls.__name__}.") + if first_real_override(cls, "execute") is None: + raise Exception(f"No execute function was defined for node class {cls.__name__}.") + + @classproperty + def FUNCTION(cls): # noqa + if inspect.iscoroutinefunction(cls.execute): + return "EXECUTE_NORMALIZED_ASYNC" + return "EXECUTE_NORMALIZED" + + @final + @classmethod + def EXECUTE_NORMALIZED(cls, *args, **kwargs) -> NodeOutput: + to_return = cls.execute(*args, **kwargs) + if to_return is None: + to_return = NodeOutput() + elif isinstance(to_return, NodeOutput): + pass + elif isinstance(to_return, tuple): + to_return = NodeOutput(*to_return) + elif isinstance(to_return, dict): + to_return = NodeOutput.from_dict(to_return) + elif isinstance(to_return, ExecutionBlocker): + to_return = NodeOutput(block_execution=to_return.message) + else: + raise Exception(f"Invalid return type from node: {type(to_return)}") + if to_return.expand is not None and not cls.SCHEMA.enable_expand: + raise Exception(f"Node {cls.__name__} is not expandable, but expand included in NodeOutput; developer should set enable_expand=True on node's Schema to allow this.") + return to_return + + @final + @classmethod + async def EXECUTE_NORMALIZED_ASYNC(cls, *args, **kwargs) -> NodeOutput: + to_return = await cls.execute(*args, **kwargs) + if to_return is None: + to_return = NodeOutput() + elif isinstance(to_return, NodeOutput): + pass + elif isinstance(to_return, tuple): + to_return = NodeOutput(*to_return) + elif isinstance(to_return, dict): + to_return = NodeOutput.from_dict(to_return) + elif isinstance(to_return, ExecutionBlocker): + to_return = NodeOutput(block_execution=to_return.message) + else: + raise Exception(f"Invalid return type from node: {type(to_return)}") + if to_return.expand is not None and not cls.SCHEMA.enable_expand: + raise Exception(f"Node {cls.__name__} is not expandable, but expand included in NodeOutput; developer should set enable_expand=True on node's Schema to allow this.") + return to_return + + @final + @classmethod + def PREPARE_CLASS_CLONE(cls, hidden_inputs: dict) -> type[ComfyNode]: + """Creates clone of real node class to prevent monkey-patching.""" + c_type: type[ComfyNode] = cls if is_class(cls) else type(cls) + type_clone: type[ComfyNode] = shallow_clone_class(c_type) + # set hidden + type_clone.hidden = HiddenHolder.from_dict(hidden_inputs) + return type_clone + + @final + @classmethod + def GET_NODE_INFO_V3(cls) -> dict[str, Any]: + schema = cls.GET_SCHEMA() + info = schema.get_v3_info(cls) + return asdict(info) + ############################################# + # V1 Backwards Compatibility code + #-------------------------------------------- + @final + @classmethod + def GET_NODE_INFO_V1(cls) -> dict[str, Any]: + schema = cls.GET_SCHEMA() + info = schema.get_v1_info(cls) + return asdict(info) + + _DESCRIPTION = None + @final + @classproperty + def DESCRIPTION(cls): # noqa + if cls._DESCRIPTION is None: + cls.GET_SCHEMA() + return cls._DESCRIPTION + + _CATEGORY = None + @final + @classproperty + def CATEGORY(cls): # noqa + if cls._CATEGORY is None: + cls.GET_SCHEMA() + return cls._CATEGORY + + _EXPERIMENTAL = None + @final + @classproperty + def EXPERIMENTAL(cls): # noqa + if cls._EXPERIMENTAL is None: + cls.GET_SCHEMA() + return cls._EXPERIMENTAL + + _DEPRECATED = None + @final + @classproperty + def DEPRECATED(cls): # noqa + if cls._DEPRECATED is None: + cls.GET_SCHEMA() + return cls._DEPRECATED + + _API_NODE = None + @final + @classproperty + def API_NODE(cls): # noqa + if cls._API_NODE is None: + cls.GET_SCHEMA() + return cls._API_NODE + + _OUTPUT_NODE = None + @final + @classproperty + def OUTPUT_NODE(cls): # noqa + if cls._OUTPUT_NODE is None: + cls.GET_SCHEMA() + return cls._OUTPUT_NODE + + _INPUT_IS_LIST = None + @final + @classproperty + def INPUT_IS_LIST(cls): # noqa + if cls._INPUT_IS_LIST is None: + cls.GET_SCHEMA() + return cls._INPUT_IS_LIST + _OUTPUT_IS_LIST = None + + @final + @classproperty + def OUTPUT_IS_LIST(cls): # noqa + if cls._OUTPUT_IS_LIST is None: + cls.GET_SCHEMA() + return cls._OUTPUT_IS_LIST + + _RETURN_TYPES = None + @final + @classproperty + def RETURN_TYPES(cls): # noqa + if cls._RETURN_TYPES is None: + cls.GET_SCHEMA() + return cls._RETURN_TYPES + + _RETURN_NAMES = None + @final + @classproperty + def RETURN_NAMES(cls): # noqa + if cls._RETURN_NAMES is None: + cls.GET_SCHEMA() + return cls._RETURN_NAMES + + _OUTPUT_TOOLTIPS = None + @final + @classproperty + def OUTPUT_TOOLTIPS(cls): # noqa + if cls._OUTPUT_TOOLTIPS is None: + cls.GET_SCHEMA() + return cls._OUTPUT_TOOLTIPS + + _NOT_IDEMPOTENT = None + @final + @classproperty + def NOT_IDEMPOTENT(cls): # noqa + if cls._NOT_IDEMPOTENT is None: + cls.GET_SCHEMA() + return cls._NOT_IDEMPOTENT + + @final + @classmethod + def INPUT_TYPES(cls, include_hidden=True, return_schema=False) -> dict[str, dict] | tuple[dict[str, dict], Schema]: + schema = cls.FINALIZE_SCHEMA() + info = schema.get_v1_info(cls) + input = info.input + if not include_hidden: + input.pop("hidden", None) + if return_schema: + return input, schema + return input + + @final + @classmethod + def FINALIZE_SCHEMA(cls): + """Call define_schema and finalize it.""" + schema = cls.define_schema() + schema.finalize() + return schema + + @final + @classmethod + def GET_SCHEMA(cls) -> Schema: + """Validate node class, finalize schema, validate schema, and set expected class properties.""" + cls.VALIDATE_CLASS() + schema = cls.FINALIZE_SCHEMA() + schema.validate() + if cls._DESCRIPTION is None: + cls._DESCRIPTION = schema.description + if cls._CATEGORY is None: + cls._CATEGORY = schema.category + if cls._EXPERIMENTAL is None: + cls._EXPERIMENTAL = schema.is_experimental + if cls._DEPRECATED is None: + cls._DEPRECATED = schema.is_deprecated + if cls._API_NODE is None: + cls._API_NODE = schema.is_api_node + if cls._OUTPUT_NODE is None: + cls._OUTPUT_NODE = schema.is_output_node + if cls._INPUT_IS_LIST is None: + cls._INPUT_IS_LIST = schema.is_input_list + if cls._NOT_IDEMPOTENT is None: + cls._NOT_IDEMPOTENT = schema.not_idempotent + + if cls._RETURN_TYPES is None: + output = [] + output_name = [] + output_is_list = [] + output_tooltips = [] + if schema.outputs: + for o in schema.outputs: + output.append(o.io_type) + output_name.append(o.display_name if o.display_name else o.io_type) + output_is_list.append(o.is_output_list) + output_tooltips.append(o.tooltip if o.tooltip else None) + + cls._RETURN_TYPES = output + cls._RETURN_NAMES = output_name + cls._OUTPUT_IS_LIST = output_is_list + cls._OUTPUT_TOOLTIPS = output_tooltips + cls.SCHEMA = schema + return schema + #-------------------------------------------- + ############################################# + + +class ComfyNode(_ComfyNodeBaseInternal): + """Common base class for all V3 nodes.""" + + @classmethod + @abstractmethod + def define_schema(cls) -> Schema: + """Override this function with one that returns a Schema instance.""" + raise NotImplementedError + + @classmethod + @abstractmethod + def execute(cls, **kwargs) -> NodeOutput: + """Override this function with one that performs node's actions.""" + raise NotImplementedError + + @classmethod + def validate_inputs(cls, **kwargs) -> bool: + """Optionally, define this function to validate inputs; equivalent to V1's VALIDATE_INPUTS.""" + raise NotImplementedError + + @classmethod + def fingerprint_inputs(cls, **kwargs) -> Any: + """Optionally, define this function to fingerprint inputs; equivalent to V1's IS_CHANGED.""" + raise NotImplementedError + + @classmethod + def check_lazy_status(cls, **kwargs) -> list[str]: + """Optionally, define this function to return a list of input names that should be evaluated. + + This basic mixin impl. requires all inputs. + + :kwargs: All node inputs will be included here. If the input is ``None``, it should be assumed that it has not yet been evaluated. \ + When using ``INPUT_IS_LIST = True``, unevaluated will instead be ``(None,)``. + + Params should match the nodes execution ``FUNCTION`` (self, and all inputs by name). + Will be executed repeatedly until it returns an empty list, or all requested items were already evaluated (and sent as params). + + Comfy Docs: https://docs.comfy.org/custom-nodes/backend/lazy_evaluation#defining-check-lazy-status + """ + return [name for name in kwargs if kwargs[name] is None] + + @final + @classmethod + def GET_BASE_CLASS(cls): + """DO NOT override this class. Will break things in execution.py.""" + return ComfyNode + + +class NodeOutput(_NodeOutputInternal): + ''' + Standardized output of a node; can pass in any number of args and/or a UIOutput into 'ui' kwarg. + ''' + def __init__(self, *args: Any, ui: _UIOutput | dict=None, expand: dict=None, block_execution: str=None): + self.args = args + self.ui = ui + self.expand = expand + self.block_execution = block_execution + + @property + def result(self): + return self.args if len(self.args) > 0 else None + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> "NodeOutput": + args = () + ui = None + expand = None + if "result" in data: + result = data["result"] + if isinstance(result, ExecutionBlocker): + return cls(block_execution=result.message) + args = result + if "ui" in data: + ui = data["ui"] + if "expand" in data: + expand = data["expand"] + return cls(args=args, ui=ui, expand=expand) + + def __getitem__(self, index) -> Any: + return self.args[index] + +class _UIOutput(ABC): + def __init__(self): + pass + + @abstractmethod + def as_dict(self) -> dict: + ... + + +class _IO: + FolderType = FolderType + UploadType = UploadType + RemoteOptions = RemoteOptions + NumberDisplay = NumberDisplay + + comfytype = staticmethod(comfytype) + Custom = staticmethod(Custom) + Input = Input + WidgetInput = WidgetInput + Output = Output + ComfyTypeI = ComfyTypeI + ComfyTypeIO = ComfyTypeIO + #--------------------------------- + # Supported Types + Boolean = Boolean + Int = Int + Float = Float + String = String + Combo = Combo + MultiCombo = MultiCombo + Image = Image + WanCameraEmbedding = WanCameraEmbedding + Webcam = Webcam + Mask = Mask + Latent = Latent + Conditioning = Conditioning + Sampler = Sampler + Sigmas = Sigmas + Noise = Noise + Guider = Guider + Clip = Clip + ControlNet = ControlNet + Vae = Vae + Model = Model + ClipVision = ClipVision + ClipVisionOutput = ClipVisionOutput + StyleModel = StyleModel + Gligen = Gligen + UpscaleModel = UpscaleModel + Audio = Audio + Video = Video + SVG = SVG + LoraModel = LoraModel + LossMap = LossMap + Voxel = Voxel + Mesh = Mesh + Hooks = Hooks + HookKeyframes = HookKeyframes + TimestepsRange = TimestepsRange + LatentOperation = LatentOperation + FlowControl = FlowControl + Accumulation = Accumulation + Load3DCamera = Load3DCamera + Load3D = Load3D + Load3DAnimation = Load3DAnimation + Photomaker = Photomaker + Point = Point + FaceAnalysis = FaceAnalysis + BBOX = BBOX + SEGS = SEGS + AnyType = AnyType + MultiType = MultiType + #--------------------------------- + HiddenHolder = HiddenHolder + Hidden = Hidden + NodeInfoV1 = NodeInfoV1 + NodeInfoV3 = NodeInfoV3 + Schema = Schema + ComfyNode = ComfyNode + NodeOutput = NodeOutput + add_to_dict_v1 = staticmethod(add_to_dict_v1) + add_to_dict_v3 = staticmethod(add_to_dict_v3) diff --git a/comfy_api/latest/_resources.py b/comfy_api/latest/_resources.py new file mode 100644 index 000000000..a6bdda972 --- /dev/null +++ b/comfy_api/latest/_resources.py @@ -0,0 +1,72 @@ +from __future__ import annotations +import comfy.utils +import folder_paths +import logging +from abc import ABC, abstractmethod +from typing import Any +import torch + +class ResourceKey(ABC): + Type = Any + def __init__(self): + ... + +class TorchDictFolderFilename(ResourceKey): + '''Key for requesting a torch file via file_name from a folder category.''' + Type = dict[str, torch.Tensor] + def __init__(self, folder_name: str, file_name: str): + self.folder_name = folder_name + self.file_name = file_name + + def __hash__(self): + return hash((self.folder_name, self.file_name)) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, TorchDictFolderFilename): + return False + return self.folder_name == other.folder_name and self.file_name == other.file_name + + def __str__(self): + return f"{self.folder_name} -> {self.file_name}" + +class Resources(ABC): + def __init__(self): + ... + + @abstractmethod + def get(self, key: ResourceKey, default: Any=...) -> Any: + pass + +class ResourcesLocal(Resources): + def __init__(self): + super().__init__() + self.local_resources: dict[ResourceKey, Any] = {} + + def get(self, key: ResourceKey, default: Any=...) -> Any: + cached = self.local_resources.get(key, None) + if cached is not None: + logging.info(f"Using cached resource '{key}'") + return cached + logging.info(f"Loading resource '{key}'") + to_return = None + if isinstance(key, TorchDictFolderFilename): + if default is ...: + to_return = comfy.utils.load_torch_file(folder_paths.get_full_path_or_raise(key.folder_name, key.file_name), safe_load=True) + else: + full_path = folder_paths.get_full_path(key.folder_name, key.file_name) + if full_path is not None: + to_return = comfy.utils.load_torch_file(full_path, safe_load=True) + + if to_return is not None: + self.local_resources[key] = to_return + return to_return + if default is not ...: + return default + raise Exception(f"Unsupported resource key type: {type(key)}") + + +class _RESOURCES: + ResourceKey = ResourceKey + TorchDictFolderFilename = TorchDictFolderFilename + Resources = Resources + ResourcesLocal = ResourcesLocal diff --git a/comfy_api/latest/_ui.py b/comfy_api/latest/_ui.py new file mode 100644 index 000000000..6b8a39d58 --- /dev/null +++ b/comfy_api/latest/_ui.py @@ -0,0 +1,457 @@ +from __future__ import annotations + +import json +import os +import random +from io import BytesIO +from typing import Type + +import av +import numpy as np +import torch +import torchaudio +from PIL import Image as PILImage +from PIL.PngImagePlugin import PngInfo + +import folder_paths + +# used for image preview +from comfy.cli_args import args +from comfy_api.latest._io import ComfyNode, FolderType, Image, _UIOutput + + +class SavedResult(dict): + def __init__(self, filename: str, subfolder: str, type: FolderType): + super().__init__(filename=filename, subfolder=subfolder,type=type.value) + + @property + def filename(self) -> str: + return self["filename"] + + @property + def subfolder(self) -> str: + return self["subfolder"] + + @property + def type(self) -> FolderType: + return FolderType(self["type"]) + + +class SavedImages(_UIOutput): + """A UI output class to represent one or more saved images, potentially animated.""" + def __init__(self, results: list[SavedResult], is_animated: bool = False): + super().__init__() + self.results = results + self.is_animated = is_animated + + def as_dict(self) -> dict: + data = {"images": self.results} + if self.is_animated: + data["animated"] = (True,) + return data + + +class SavedAudios(_UIOutput): + """UI wrapper around one or more audio files on disk (FLAC / MP3 / Opus).""" + def __init__(self, results: list[SavedResult]): + super().__init__() + self.results = results + + def as_dict(self) -> dict: + return {"audio": self.results} + + +def _get_directory_by_folder_type(folder_type: FolderType) -> str: + if folder_type == FolderType.input: + return folder_paths.get_input_directory() + if folder_type == FolderType.output: + return folder_paths.get_output_directory() + return folder_paths.get_temp_directory() + + +class ImageSaveHelper: + """A helper class with static methods to handle image saving and metadata.""" + + @staticmethod + def _convert_tensor_to_pil(image_tensor: torch.Tensor) -> PILImage.Image: + """Converts a single torch tensor to a PIL Image.""" + return PILImage.fromarray(np.clip(255.0 * image_tensor.cpu().numpy(), 0, 255).astype(np.uint8)) + + @staticmethod + def _create_png_metadata(cls: Type[ComfyNode] | None) -> PngInfo | None: + """Creates a PngInfo object with prompt and extra_pnginfo.""" + if args.disable_metadata or cls is None or not cls.hidden: + return None + metadata = PngInfo() + if cls.hidden.prompt: + metadata.add_text("prompt", json.dumps(cls.hidden.prompt)) + if cls.hidden.extra_pnginfo: + for x in cls.hidden.extra_pnginfo: + metadata.add_text(x, json.dumps(cls.hidden.extra_pnginfo[x])) + return metadata + + @staticmethod + def _create_animated_png_metadata(cls: Type[ComfyNode] | None) -> PngInfo | None: + """Creates a PngInfo object with prompt and extra_pnginfo for animated PNGs (APNG).""" + if args.disable_metadata or cls is None or not cls.hidden: + return None + metadata = PngInfo() + if cls.hidden.prompt: + metadata.add( + b"comf", + "prompt".encode("latin-1", "strict") + + b"\0" + + json.dumps(cls.hidden.prompt).encode("latin-1", "strict"), + after_idat=True, + ) + if cls.hidden.extra_pnginfo: + for x in cls.hidden.extra_pnginfo: + metadata.add( + b"comf", + x.encode("latin-1", "strict") + + b"\0" + + json.dumps(cls.hidden.extra_pnginfo[x]).encode("latin-1", "strict"), + after_idat=True, + ) + return metadata + + @staticmethod + def _create_webp_metadata(pil_image: PILImage.Image, cls: Type[ComfyNode] | None) -> PILImage.Exif: + """Creates EXIF metadata bytes for WebP images.""" + exif_data = pil_image.getexif() + if args.disable_metadata or cls is None or cls.hidden is None: + return exif_data + if cls.hidden.prompt is not None: + exif_data[0x0110] = "prompt:{}".format(json.dumps(cls.hidden.prompt)) # EXIF 0x0110 = Model + if cls.hidden.extra_pnginfo is not None: + inital_exif_tag = 0x010F # EXIF 0x010f = Make + for key, value in cls.hidden.extra_pnginfo.items(): + exif_data[inital_exif_tag] = "{}:{}".format(key, json.dumps(value)) + inital_exif_tag -= 1 + return exif_data + + @staticmethod + def save_images( + images, filename_prefix: str, folder_type: FolderType, cls: Type[ComfyNode] | None, compress_level = 4, + ) -> list[SavedResult]: + """Saves a batch of images as individual PNG files.""" + full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path( + filename_prefix, _get_directory_by_folder_type(folder_type), images[0].shape[1], images[0].shape[0] + ) + results = [] + metadata = ImageSaveHelper._create_png_metadata(cls) + for batch_number, image_tensor in enumerate(images): + img = ImageSaveHelper._convert_tensor_to_pil(image_tensor) + filename_with_batch_num = filename.replace("%batch_num%", str(batch_number)) + file = f"{filename_with_batch_num}_{counter:05}_.png" + img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=compress_level) + results.append(SavedResult(file, subfolder, folder_type)) + counter += 1 + return results + + @staticmethod + def get_save_images_ui(images, filename_prefix: str, cls: Type[ComfyNode] | None, compress_level=4) -> SavedImages: + """Saves a batch of images and returns a UI object for the node output.""" + return SavedImages( + ImageSaveHelper.save_images( + images, + filename_prefix=filename_prefix, + folder_type=FolderType.output, + cls=cls, + compress_level=compress_level, + ) + ) + + @staticmethod + def save_animated_png( + images, filename_prefix: str, folder_type: FolderType, cls: Type[ComfyNode] | None, fps: float, compress_level: int + ) -> SavedResult: + """Saves a batch of images as a single animated PNG.""" + full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path( + filename_prefix, _get_directory_by_folder_type(folder_type), images[0].shape[1], images[0].shape[0] + ) + pil_images = [ImageSaveHelper._convert_tensor_to_pil(img) for img in images] + metadata = ImageSaveHelper._create_animated_png_metadata(cls) + file = f"{filename}_{counter:05}_.png" + save_path = os.path.join(full_output_folder, file) + pil_images[0].save( + save_path, + pnginfo=metadata, + compress_level=compress_level, + save_all=True, + duration=int(1000.0 / fps), + append_images=pil_images[1:], + ) + return SavedResult(file, subfolder, folder_type) + + @staticmethod + def get_save_animated_png_ui( + images, filename_prefix: str, cls: Type[ComfyNode] | None, fps: float, compress_level: int + ) -> SavedImages: + """Saves an animated PNG and returns a UI object for the node output.""" + result = ImageSaveHelper.save_animated_png( + images, + filename_prefix=filename_prefix, + folder_type=FolderType.output, + cls=cls, + fps=fps, + compress_level=compress_level, + ) + return SavedImages([result], is_animated=len(images) > 1) + + @staticmethod + def save_animated_webp( + images, + filename_prefix: str, + folder_type: FolderType, + cls: Type[ComfyNode] | None, + fps: float, + lossless: bool, + quality: int, + method: int, + ) -> SavedResult: + """Saves a batch of images as a single animated WebP.""" + full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path( + filename_prefix, _get_directory_by_folder_type(folder_type), images[0].shape[1], images[0].shape[0] + ) + pil_images = [ImageSaveHelper._convert_tensor_to_pil(img) for img in images] + pil_exif = ImageSaveHelper._create_webp_metadata(pil_images[0], cls) + file = f"{filename}_{counter:05}_.webp" + pil_images[0].save( + os.path.join(full_output_folder, file), + save_all=True, + duration=int(1000.0 / fps), + append_images=pil_images[1:], + exif=pil_exif, + lossless=lossless, + quality=quality, + method=method, + ) + return SavedResult(file, subfolder, folder_type) + + @staticmethod + def get_save_animated_webp_ui( + images, + filename_prefix: str, + cls: Type[ComfyNode] | None, + fps: float, + lossless: bool, + quality: int, + method: int, + ) -> SavedImages: + """Saves an animated WebP and returns a UI object for the node output.""" + result = ImageSaveHelper.save_animated_webp( + images, + filename_prefix=filename_prefix, + folder_type=FolderType.output, + cls=cls, + fps=fps, + lossless=lossless, + quality=quality, + method=method, + ) + return SavedImages([result], is_animated=len(images) > 1) + + +class AudioSaveHelper: + """A helper class with static methods to handle audio saving and metadata.""" + _OPUS_RATES = [8000, 12000, 16000, 24000, 48000] + + @staticmethod + def save_audio( + audio: dict, + filename_prefix: str, + folder_type: FolderType, + cls: Type[ComfyNode] | None, + format: str = "flac", + quality: str = "128k", + ) -> list[SavedResult]: + full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path( + filename_prefix, _get_directory_by_folder_type(folder_type) + ) + + metadata = {} + if not args.disable_metadata and cls is not None: + if cls.hidden.prompt is not None: + metadata["prompt"] = json.dumps(cls.hidden.prompt) + if cls.hidden.extra_pnginfo is not None: + for x in cls.hidden.extra_pnginfo: + metadata[x] = json.dumps(cls.hidden.extra_pnginfo[x]) + + results = [] + for batch_number, waveform in enumerate(audio["waveform"].cpu()): + filename_with_batch_num = filename.replace("%batch_num%", str(batch_number)) + file = f"{filename_with_batch_num}_{counter:05}_.{format}" + output_path = os.path.join(full_output_folder, file) + + # Use original sample rate initially + sample_rate = audio["sample_rate"] + + # Handle Opus sample rate requirements + if format == "opus": + if sample_rate > 48000: + sample_rate = 48000 + elif sample_rate not in AudioSaveHelper._OPUS_RATES: + # Find the next highest supported rate + for rate in sorted(AudioSaveHelper._OPUS_RATES): + if rate > sample_rate: + sample_rate = rate + break + if sample_rate not in AudioSaveHelper._OPUS_RATES: # Fallback if still not supported + sample_rate = 48000 + + # Resample if necessary + if sample_rate != audio["sample_rate"]: + waveform = torchaudio.functional.resample(waveform, audio["sample_rate"], sample_rate) + + # Create output with specified format + output_buffer = BytesIO() + output_container = av.open(output_buffer, mode="w", format=format) + + # Set metadata on the container + for key, value in metadata.items(): + output_container.metadata[key] = value + + # Set up the output stream with appropriate properties + if format == "opus": + out_stream = output_container.add_stream("libopus", rate=sample_rate) + if quality == "64k": + out_stream.bit_rate = 64000 + elif quality == "96k": + out_stream.bit_rate = 96000 + elif quality == "128k": + out_stream.bit_rate = 128000 + elif quality == "192k": + out_stream.bit_rate = 192000 + elif quality == "320k": + out_stream.bit_rate = 320000 + elif format == "mp3": + out_stream = output_container.add_stream("libmp3lame", rate=sample_rate) + if quality == "V0": + # TODO i would really love to support V3 and V5 but there doesn't seem to be a way to set the qscale level, the property below is a bool + out_stream.codec_context.qscale = 1 + elif quality == "128k": + out_stream.bit_rate = 128000 + elif quality == "320k": + out_stream.bit_rate = 320000 + else: # format == "flac": + out_stream = output_container.add_stream("flac", rate=sample_rate) + + frame = av.AudioFrame.from_ndarray( + waveform.movedim(0, 1).reshape(1, -1).float().numpy(), + format="flt", + layout="mono" if waveform.shape[0] == 1 else "stereo", + ) + frame.sample_rate = sample_rate + frame.pts = 0 + output_container.mux(out_stream.encode(frame)) + + # Flush encoder + output_container.mux(out_stream.encode(None)) + + # Close containers + output_container.close() + + # Write the output to file + output_buffer.seek(0) + with open(output_path, "wb") as f: + f.write(output_buffer.getbuffer()) + + results.append(SavedResult(file, subfolder, folder_type)) + counter += 1 + + return results + + @staticmethod + def get_save_audio_ui( + audio, filename_prefix: str, cls: Type[ComfyNode] | None, format: str = "flac", quality: str = "128k", + ) -> SavedAudios: + """Save and instantly wrap for UI.""" + return SavedAudios( + AudioSaveHelper.save_audio( + audio, + filename_prefix=filename_prefix, + folder_type=FolderType.output, + cls=cls, + format=format, + quality=quality, + ) + ) + + +class PreviewImage(_UIOutput): + def __init__(self, image: Image.Type, animated: bool = False, cls: Type[ComfyNode] = None, **kwargs): + self.values = ImageSaveHelper.save_images( + image, + filename_prefix="ComfyUI_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for _ in range(5)), + folder_type=FolderType.temp, + cls=cls, + compress_level=1, + ) + self.animated = animated + + def as_dict(self): + return { + "images": self.values, + "animated": (self.animated,) + } + + +class PreviewMask(PreviewImage): + def __init__(self, mask: PreviewMask.Type, animated: bool=False, cls: ComfyNode=None, **kwargs): + preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) + super().__init__(preview, animated, cls, **kwargs) + + +class PreviewAudio(_UIOutput): + def __init__(self, audio: dict, cls: Type[ComfyNode] = None, **kwargs): + self.values = AudioSaveHelper.save_audio( + audio, + filename_prefix="ComfyUI_temp_" + "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(5)), + folder_type=FolderType.temp, + cls=cls, + format="flac", + quality="128k", + ) + + def as_dict(self) -> dict: + return {"audio": self.values} + + +class PreviewVideo(_UIOutput): + def __init__(self, values: list[SavedResult | dict], **kwargs): + self.values = values + + def as_dict(self): + return {"images": self.values, "animated": (True,)} + + +class PreviewUI3D(_UIOutput): + def __init__(self, model_file, camera_info, **kwargs): + self.model_file = model_file + self.camera_info = camera_info + + def as_dict(self): + return {"result": [self.model_file, self.camera_info]} + + +class PreviewText(_UIOutput): + def __init__(self, value: str, **kwargs): + self.value = value + + def as_dict(self): + return {"text": (self.value,)} + + +class _UI: + SavedResult = SavedResult + SavedImages = SavedImages + SavedAudios = SavedAudios + ImageSaveHelper = ImageSaveHelper + AudioSaveHelper = AudioSaveHelper + PreviewImage = PreviewImage + PreviewMask = PreviewMask + PreviewAudio = PreviewAudio + PreviewVideo = PreviewVideo + PreviewUI3D = PreviewUI3D + PreviewText = PreviewText diff --git a/comfy_api/v0_0_2/__init__.py b/comfy_api/v0_0_2/__init__.py index ea83833fb..de0f95001 100644 --- a/comfy_api/v0_0_2/__init__.py +++ b/comfy_api/v0_0_2/__init__.py @@ -6,6 +6,7 @@ from comfy_api.latest import ( ) from typing import Type, TYPE_CHECKING from comfy_api.internal.async_to_sync import create_sync_class +from comfy_api.latest import io, ui, ComfyExtension #noqa: F401 class ComfyAPIAdapter_v0_0_2(ComfyAPI_latest): @@ -40,4 +41,5 @@ __all__ = [ "Input", "InputImpl", "Types", + "ComfyExtension", ] diff --git a/comfy_execution/graph.py b/comfy_execution/graph.py index 60e2ab91e..f4b427265 100644 --- a/comfy_execution/graph.py +++ b/comfy_execution/graph.py @@ -4,9 +4,12 @@ from typing import Type, Literal import nodes import asyncio import inspect -from comfy_execution.graph_utils import is_link +from comfy_execution.graph_utils import is_link, ExecutionBlocker from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, InputTypeOptions +# NOTE: ExecutionBlocker code got moved to graph_utils.py to prevent torch being imported too soon during unit tests +ExecutionBlocker = ExecutionBlocker + class DependencyCycleError(Exception): pass @@ -294,21 +297,3 @@ class ExecutionList(TopologicalSort): del blocked_by[node_id] to_remove = [node_id for node_id in blocked_by if len(blocked_by[node_id]) == 0] return list(blocked_by.keys()) - -class ExecutionBlocker: - """ - Return this from a node and any users will be blocked with the given error message. - If the message is None, execution will be blocked silently instead. - Generally, you should avoid using this functionality unless absolutely necessary. Whenever it's - possible, a lazy input will be more efficient and have a better user experience. - This functionality is useful in two cases: - 1. You want to conditionally prevent an output node from executing. (Particularly a built-in node - like SaveImage. For your own output nodes, I would recommend just adding a BOOL input and using - lazy evaluation to let it conditionally disable itself.) - 2. You have a node with multiple possible outputs, some of which are invalid and should not be used. - (I would recommend not making nodes like this in the future -- instead, make multiple nodes with - different outputs. Unfortunately, there are several popular existing nodes using this pattern.) - """ - def __init__(self, message): - self.message = message - diff --git a/comfy_execution/graph_utils.py b/comfy_execution/graph_utils.py index 8595e942d..496d2c634 100644 --- a/comfy_execution/graph_utils.py +++ b/comfy_execution/graph_utils.py @@ -137,3 +137,19 @@ def add_graph_prefix(graph, outputs, prefix): return new_graph, tuple(new_outputs) +class ExecutionBlocker: + """ + Return this from a node and any users will be blocked with the given error message. + If the message is None, execution will be blocked silently instead. + Generally, you should avoid using this functionality unless absolutely necessary. Whenever it's + possible, a lazy input will be more efficient and have a better user experience. + This functionality is useful in two cases: + 1. You want to conditionally prevent an output node from executing. (Particularly a built-in node + like SaveImage. For your own output nodes, I would recommend just adding a BOOL input and using + lazy evaluation to let it conditionally disable itself.) + 2. You have a node with multiple possible outputs, some of which are invalid and should not be used. + (I would recommend not making nodes like this in the future -- instead, make multiple nodes with + different outputs. Unfortunately, there are several popular existing nodes using this pattern.) + """ + def __init__(self, message): + self.message = message diff --git a/execution.py b/execution.py index cde14c52f..952f0cc5c 100644 --- a/execution.py +++ b/execution.py @@ -32,6 +32,8 @@ from comfy_execution.graph_utils import GraphBuilder, is_link from comfy_execution.validation import validate_node_input from comfy_execution.progress import get_progress_state, reset_progress_state, add_progress_handler, WebUIProgressHandler from comfy_execution.utils import CurrentNodeContext +from comfy_api.internal import _ComfyNodeInternal, _NodeOutputInternal, first_real_override, is_class, make_locked_method_func +from comfy_api.latest import io class ExecutionResult(Enum): @@ -56,7 +58,15 @@ class IsChangedCache: node = self.dynprompt.get_node(node_id) class_type = node["class_type"] class_def = nodes.NODE_CLASS_MAPPINGS[class_type] - if not hasattr(class_def, "IS_CHANGED"): + has_is_changed = False + is_changed_name = None + if issubclass(class_def, _ComfyNodeInternal) and first_real_override(class_def, "fingerprint_inputs") is not None: + has_is_changed = True + is_changed_name = "fingerprint_inputs" + elif hasattr(class_def, "IS_CHANGED"): + has_is_changed = True + is_changed_name = "IS_CHANGED" + if not has_is_changed: self.is_changed[node_id] = False return self.is_changed[node_id] @@ -65,9 +75,9 @@ class IsChangedCache: return self.is_changed[node_id] # Intentionally do not use cached outputs here. We only want constants in IS_CHANGED - input_data_all, _ = get_input_data(node["inputs"], class_def, node_id, None) + input_data_all, _, hidden_inputs = get_input_data(node["inputs"], class_def, node_id, None) try: - is_changed = await _async_map_node_over_list(self.prompt_id, node_id, class_def, input_data_all, "IS_CHANGED") + is_changed = await _async_map_node_over_list(self.prompt_id, node_id, class_def, input_data_all, is_changed_name) is_changed = await resolve_map_node_over_list_results(is_changed) node["is_changed"] = [None if isinstance(x, ExecutionBlocker) else x for x in is_changed] except Exception as e: @@ -126,9 +136,14 @@ class CacheSet: SENSITIVE_EXTRA_DATA_KEYS = ("auth_token_comfy_org", "api_key_comfy_org") def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, extra_data={}): - valid_inputs = class_def.INPUT_TYPES() + is_v3 = issubclass(class_def, _ComfyNodeInternal) + if is_v3: + valid_inputs, schema = class_def.INPUT_TYPES(include_hidden=False, return_schema=True) + else: + valid_inputs = class_def.INPUT_TYPES() input_data_all = {} missing_keys = {} + hidden_inputs_v3 = {} for x in inputs: input_data = inputs[x] _, input_category, input_info = get_input_info(class_def, x, valid_inputs) @@ -153,22 +168,37 @@ def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, e elif input_category is not None: input_data_all[x] = [input_data] - if "hidden" in valid_inputs: - h = valid_inputs["hidden"] - for x in h: - if h[x] == "PROMPT": - input_data_all[x] = [dynprompt.get_original_prompt() if dynprompt is not None else {}] - if h[x] == "DYNPROMPT": - input_data_all[x] = [dynprompt] - if h[x] == "EXTRA_PNGINFO": - input_data_all[x] = [extra_data.get('extra_pnginfo', None)] - if h[x] == "UNIQUE_ID": - input_data_all[x] = [unique_id] - if h[x] == "AUTH_TOKEN_COMFY_ORG": - input_data_all[x] = [extra_data.get("auth_token_comfy_org", None)] - if h[x] == "API_KEY_COMFY_ORG": - input_data_all[x] = [extra_data.get("api_key_comfy_org", None)] - return input_data_all, missing_keys + if is_v3: + if schema.hidden: + if io.Hidden.prompt in schema.hidden: + hidden_inputs_v3[io.Hidden.prompt] = dynprompt.get_original_prompt() if dynprompt is not None else {} + if io.Hidden.dynprompt in schema.hidden: + hidden_inputs_v3[io.Hidden.dynprompt] = dynprompt + if io.Hidden.extra_pnginfo in schema.hidden: + hidden_inputs_v3[io.Hidden.extra_pnginfo] = extra_data.get('extra_pnginfo', None) + if io.Hidden.unique_id in schema.hidden: + hidden_inputs_v3[io.Hidden.unique_id] = unique_id + if io.Hidden.auth_token_comfy_org in schema.hidden: + hidden_inputs_v3[io.Hidden.auth_token_comfy_org] = extra_data.get("auth_token_comfy_org", None) + if io.Hidden.api_key_comfy_org in schema.hidden: + hidden_inputs_v3[io.Hidden.api_key_comfy_org] = extra_data.get("api_key_comfy_org", None) + else: + if "hidden" in valid_inputs: + h = valid_inputs["hidden"] + for x in h: + if h[x] == "PROMPT": + input_data_all[x] = [dynprompt.get_original_prompt() if dynprompt is not None else {}] + if h[x] == "DYNPROMPT": + input_data_all[x] = [dynprompt] + if h[x] == "EXTRA_PNGINFO": + input_data_all[x] = [extra_data.get('extra_pnginfo', None)] + if h[x] == "UNIQUE_ID": + input_data_all[x] = [unique_id] + if h[x] == "AUTH_TOKEN_COMFY_ORG": + input_data_all[x] = [extra_data.get("auth_token_comfy_org", None)] + if h[x] == "API_KEY_COMFY_ORG": + input_data_all[x] = [extra_data.get("api_key_comfy_org", None)] + return input_data_all, missing_keys, hidden_inputs_v3 map_node_over_list = None #Don't hook this please @@ -184,7 +214,7 @@ async def resolve_map_node_over_list_results(results): raise exc return [x.result() if isinstance(x, asyncio.Task) else x for x in results] -async def _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, func, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None): +async def _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, func, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None, hidden_inputs=None): # check if node wants the lists input_is_list = getattr(obj, "INPUT_IS_LIST", False) @@ -214,7 +244,22 @@ async def _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, f if execution_block is None: if pre_execute_cb is not None and index is not None: pre_execute_cb(index) - f = getattr(obj, func) + # V3 + if isinstance(obj, _ComfyNodeInternal) or (is_class(obj) and issubclass(obj, _ComfyNodeInternal)): + # if is just a class, then assign no resources or state, just create clone + if is_class(obj): + type_obj = obj + obj.VALIDATE_CLASS() + class_clone = obj.PREPARE_CLASS_CLONE(hidden_inputs) + # otherwise, use class instance to populate/reuse some fields + else: + type_obj = type(obj) + type_obj.VALIDATE_CLASS() + class_clone = type_obj.PREPARE_CLASS_CLONE(hidden_inputs) + f = make_locked_method_func(type_obj, func, class_clone) + # V1 + else: + f = getattr(obj, func) if inspect.iscoroutinefunction(f): async def async_wrapper(f, prompt_id, unique_id, list_index, args): with CurrentNodeContext(prompt_id, unique_id, list_index): @@ -266,8 +311,8 @@ def merge_result_data(results, obj): output.append([o[i] for o in results]) return output -async def get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=None, pre_execute_cb=None): - return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) +async def get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=None, pre_execute_cb=None, hidden_inputs=None): + return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs) has_pending_task = any(isinstance(r, asyncio.Task) and not r.done() for r in return_values) if has_pending_task: return return_values, {}, False, has_pending_task @@ -298,6 +343,26 @@ def get_output_from_returns(return_values, obj): result = tuple([result] * len(obj.RETURN_TYPES)) results.append(result) subgraph_results.append((None, result)) + elif isinstance(r, _NodeOutputInternal): + # V3 + if r.ui is not None: + if isinstance(r.ui, dict): + uis.append(r.ui) + else: + uis.append(r.ui.as_dict()) + if r.expand is not None: + has_subgraph = True + new_graph = r.expand + result = r.result + if r.block_execution is not None: + result = tuple([ExecutionBlocker(r.block_execution)] * len(obj.RETURN_TYPES)) + subgraph_results.append((new_graph, result)) + elif r.result is not None: + result = r.result + if r.block_execution is not None: + result = tuple([ExecutionBlocker(r.block_execution)] * len(obj.RETURN_TYPES)) + results.append(result) + subgraph_results.append((None, result)) else: if isinstance(r, ExecutionBlocker): r = tuple([r] * len(obj.RETURN_TYPES)) @@ -381,7 +446,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, has_subgraph = False else: get_progress_state().start_progress(unique_id) - input_data_all, missing_keys = get_input_data(inputs, class_def, unique_id, caches.outputs, dynprompt, extra_data) + input_data_all, missing_keys, hidden_inputs = get_input_data(inputs, class_def, unique_id, caches.outputs, dynprompt, extra_data) if server.client_id is not None: server.last_node_id = display_node_id server.send_sync("executing", { "node": unique_id, "display_node": display_node_id, "prompt_id": prompt_id }, server.client_id) @@ -391,8 +456,12 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, obj = class_def() caches.objects.set(unique_id, obj) - if hasattr(obj, "check_lazy_status"): - required_inputs = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, "check_lazy_status", allow_interrupt=True) + if issubclass(class_def, _ComfyNodeInternal): + lazy_status_present = first_real_override(class_def, "check_lazy_status") is not None + else: + lazy_status_present = getattr(obj, "check_lazy_status", None) is not None + if lazy_status_present: + required_inputs = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, "check_lazy_status", allow_interrupt=True, hidden_inputs=hidden_inputs) required_inputs = await resolve_map_node_over_list_results(required_inputs) required_inputs = set(sum([r for r in required_inputs if isinstance(r,list)], [])) required_inputs = [x for x in required_inputs if isinstance(x,str) and ( @@ -424,7 +493,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, def pre_execute_cb(call_index): # TODO - How to handle this with async functions without contextvars (which requires Python 3.12)? GraphBuilder.set_default_prefix(unique_id, call_index, 0) - output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) + output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs) if has_pending_tasks: pending_async_nodes[unique_id] = output_data unblock = execution_list.add_external_block(unique_id) @@ -672,8 +741,14 @@ async def validate_inputs(prompt_id, prompt, item, validated): validate_function_inputs = [] validate_has_kwargs = False - if hasattr(obj_class, "VALIDATE_INPUTS"): - argspec = inspect.getfullargspec(obj_class.VALIDATE_INPUTS) + if issubclass(obj_class, _ComfyNodeInternal): + validate_function_name = "validate_inputs" + validate_function = first_real_override(obj_class, validate_function_name) + else: + validate_function_name = "VALIDATE_INPUTS" + validate_function = getattr(obj_class, validate_function_name, None) + if validate_function is not None: + argspec = inspect.getfullargspec(validate_function) validate_function_inputs = argspec.args validate_has_kwargs = argspec.varkw is not None received_types = {} @@ -848,7 +923,7 @@ async def validate_inputs(prompt_id, prompt, item, validated): continue if len(validate_function_inputs) > 0 or validate_has_kwargs: - input_data_all, _ = get_input_data(inputs, obj_class, unique_id) + input_data_all, _, hidden_inputs = get_input_data(inputs, obj_class, unique_id) input_filtered = {} for x in input_data_all: if x in validate_function_inputs or validate_has_kwargs: @@ -856,8 +931,7 @@ async def validate_inputs(prompt_id, prompt, item, validated): if 'input_types' in validate_function_inputs: input_filtered['input_types'] = [received_types] - #ret = obj_class.VALIDATE_INPUTS(**input_filtered) - ret = await _async_map_node_over_list(prompt_id, unique_id, obj_class, input_filtered, "VALIDATE_INPUTS") + ret = await _async_map_node_over_list(prompt_id, unique_id, obj_class, input_filtered, validate_function_name, hidden_inputs=hidden_inputs) ret = await resolve_map_node_over_list_results(ret) for x in input_filtered: for i, r in enumerate(ret): diff --git a/nodes.py b/nodes.py index 54e530388..da4a46366 100644 --- a/nodes.py +++ b/nodes.py @@ -6,6 +6,7 @@ import os import sys import json import hashlib +import inspect import traceback import math import time @@ -29,6 +30,7 @@ import comfy.controlnet from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict, FileLocator from comfy_api.internal import register_versions, ComfyAPIWithVersion from comfy_api.version_list import supported_versions +from comfy_api.latest import io, ComfyExtension import comfy.clip_vision @@ -2152,6 +2154,7 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom if os.path.isdir(web_dir): EXTENSION_WEB_DIRS[module_name] = web_dir + # V1 node definition if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None: for name, node_cls in module.NODE_CLASS_MAPPINGS.items(): if name not in ignore: @@ -2160,8 +2163,38 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None: NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS) return True + # V3 Extension Definition + elif hasattr(module, "comfy_entrypoint"): + entrypoint = getattr(module, "comfy_entrypoint") + if not callable(entrypoint): + logging.warning(f"comfy_entrypoint in {module_path} is not callable, skipping.") + return False + try: + if inspect.iscoroutinefunction(entrypoint): + extension = await entrypoint() + else: + extension = entrypoint() + if not isinstance(extension, ComfyExtension): + logging.warning(f"comfy_entrypoint in {module_path} did not return a ComfyExtension, skipping.") + return False + node_list = await extension.get_node_list() + if not isinstance(node_list, list): + logging.warning(f"comfy_entrypoint in {module_path} did not return a list of nodes, skipping.") + return False + for node_cls in node_list: + node_cls: io.ComfyNode + schema = node_cls.GET_SCHEMA() + if schema.node_id not in ignore: + NODE_CLASS_MAPPINGS[schema.node_id] = node_cls + node_cls.RELATIVE_PYTHON_MODULE = "{}.{}".format(module_parent, get_module_name(module_path)) + if schema.display_name is not None: + NODE_DISPLAY_NAME_MAPPINGS[schema.node_id] = schema.display_name + return True + except Exception as e: + logging.warning(f"Error while calling comfy_entrypoint in {module_path}: {e}") + return False else: - logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.") + logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS or NODES_LIST (need one).") return False except Exception as e: logging.warning(traceback.format_exc()) @@ -2286,7 +2319,7 @@ async def init_builtin_extra_nodes(): "nodes_string.py", "nodes_camera_trajectory.py", "nodes_edit_model.py", - "nodes_tcfg.py" + "nodes_tcfg.py", ] import_failed = [] diff --git a/server.py b/server.py index 3e06d2fbb..0553a0dd7 100644 --- a/server.py +++ b/server.py @@ -30,6 +30,7 @@ from comfy_api import feature_flags import node_helpers from comfyui_version import __version__ from app.frontend_management import FrontendManager +from comfy_api.internal import _ComfyNodeInternal from app.user_manager import UserManager from app.model_manager import ModelFileManager @@ -591,6 +592,8 @@ class PromptServer(): def node_info(node_class): obj_class = nodes.NODE_CLASS_MAPPINGS[node_class] + if issubclass(obj_class, _ComfyNodeInternal): + return obj_class.GET_NODE_INFO_V1() info = {} info['input'] = obj_class.INPUT_TYPES() info['input_order'] = {key: list(value.keys()) for (key, value) in obj_class.INPUT_TYPES().items()} From 5ee381c058d606209dcafb568af20196e7884fc8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 31 Jul 2025 20:33:27 -0700 Subject: [PATCH 0387/1073] Fix WanFirstLastFrameToVideo node when no clip vision. (#9134) --- comfy_extras/nodes_wan.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 0b92c68ac..0067d054d 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -149,6 +149,7 @@ class WanFirstLastFrameToVideo: positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) + clip_vision_output = None if clip_vision_start_image is not None: clip_vision_output = clip_vision_start_image From 4696d74305e98a96bda5685b7f11d6ba167c2ed3 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Fri, 1 Aug 2025 15:06:18 +0800 Subject: [PATCH 0388/1073] update template to 0.1.45 (#9135) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8f2f6a56c..3828c5b91 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.44 +comfyui-workflow-templates==0.1.45 comfyui-embedded-docs==0.2.4 torch torchsde From 1e638a140b2f459595fafc73ade5ea5b4024d4b4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 1 Aug 2025 02:25:38 -0700 Subject: [PATCH 0389/1073] Tiny wan vae optimizations. (#9136) --- comfy/ldm/wan/vae.py | 13 +++++++++---- comfy/ldm/wan/vae2_2.py | 2 +- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/comfy/ldm/wan/vae.py b/comfy/ldm/wan/vae.py index 6b07840fc..791596938 100644 --- a/comfy/ldm/wan/vae.py +++ b/comfy/ldm/wan/vae.py @@ -24,12 +24,17 @@ class CausalConv3d(ops.Conv3d): self.padding[1], 2 * self.padding[0], 0) self.padding = (0, 0, 0) - def forward(self, x, cache_x=None): + def forward(self, x, cache_x=None, cache_list=None, cache_idx=None): + if cache_list is not None: + cache_x = cache_list[cache_idx] + cache_list[cache_idx] = None + padding = list(self._padding) if cache_x is not None and self._padding[4] > 0: cache_x = cache_x.to(x.device) x = torch.cat([cache_x, x], dim=2) padding[4] -= cache_x.shape[2] + del cache_x x = F.pad(x, padding) return super().forward(x) @@ -166,7 +171,7 @@ class ResidualBlock(nn.Module): if in_dim != out_dim else nn.Identity() def forward(self, x, feat_cache=None, feat_idx=[0]): - h = self.shortcut(x) + old_x = x for layer in self.residual: if isinstance(layer, CausalConv3d) and feat_cache is not None: idx = feat_idx[0] @@ -178,12 +183,12 @@ class ResidualBlock(nn.Module): cache_x.device), cache_x ], dim=2) - x = layer(x, feat_cache[idx]) + x = layer(x, cache_list=feat_cache, cache_idx=idx) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = layer(x) - return x + h + return x + self.shortcut(old_x) class AttentionBlock(nn.Module): diff --git a/comfy/ldm/wan/vae2_2.py b/comfy/ldm/wan/vae2_2.py index b9c2d1a26..1f6d584a2 100644 --- a/comfy/ldm/wan/vae2_2.py +++ b/comfy/ldm/wan/vae2_2.py @@ -151,7 +151,7 @@ class ResidualBlock(nn.Module): ], dim=2, ) - x = layer(x, feat_cache[idx]) + x = layer(x, cache_list=feat_cache, cache_idx=idx) feat_cache[idx] = cache_x feat_idx[0] += 1 else: From bff60b5cfc10d1b037a95746226ac6698dc3e373 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 1 Aug 2025 20:03:22 -0400 Subject: [PATCH 0390/1073] ComfyUI version 0.3.48 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 20a2e892a..7b29e338d 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.47" +__version__ = "0.3.48" diff --git a/pyproject.toml b/pyproject.toml index 244fdd232..256677fad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.47" +version = "0.3.48" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 3dfefc88d00bde744b729b073058a57e149cddc1 Mon Sep 17 00:00:00 2001 From: Johnpaul Chiwetelu <49923152+Myestery@users.noreply.github.com> Date: Sat, 2 Aug 2025 03:02:06 +0100 Subject: [PATCH 0391/1073] API for Recently Used Items (#8792) * feat: add file creation time to model file metadata and user file info * fix linting --- app/model_manager.py | 21 ++++++++++++++++----- app/user_manager.py | 4 +++- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/app/model_manager.py b/app/model_manager.py index 74d942fb8..ab36bca74 100644 --- a/app/model_manager.py +++ b/app/model_manager.py @@ -130,10 +130,21 @@ class ModelFileManager: for file_name in filenames: try: - relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory) - result.append(relative_path) - except: - logging.warning(f"Warning: Unable to access {file_name}. Skipping this file.") + full_path = os.path.join(dirpath, file_name) + relative_path = os.path.relpath(full_path, directory) + + # Get file metadata + file_info = { + "name": relative_path, + "pathIndex": pathIndex, + "modified": os.path.getmtime(full_path), # Add modification time + "created": os.path.getctime(full_path), # Add creation time + "size": os.path.getsize(full_path) # Add file size + } + result.append(file_info) + + except Exception as e: + logging.warning(f"Warning: Unable to access {file_name}. Error: {e}. Skipping this file.") continue for d in subdirs: @@ -144,7 +155,7 @@ class ModelFileManager: logging.warning(f"Warning: Unable to access {path}. Skipping this path.") continue - return [{"name": f, "pathIndex": pathIndex} for f in result], dirs, time.perf_counter() + return result, dirs, time.perf_counter() def get_model_previews(self, filepath: str) -> list[str | BytesIO]: dirname = os.path.dirname(filepath) diff --git a/app/user_manager.py b/app/user_manager.py index d31da5b9b..0ec3e46ea 100644 --- a/app/user_manager.py +++ b/app/user_manager.py @@ -20,13 +20,15 @@ class FileInfo(TypedDict): path: str size: int modified: int + created: int def get_file_info(path: str, relative_to: str) -> FileInfo: return { "path": os.path.relpath(path, relative_to).replace(os.sep, '/'), "size": os.path.getsize(path), - "modified": os.path.getmtime(path) + "modified": os.path.getmtime(path), + "created": os.path.getctime(path) } From fbcc23945dc377c8623bbee6132f15a93ac0c84a Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sun, 3 Aug 2025 02:15:29 +0800 Subject: [PATCH 0392/1073] Update template to 0.1.47 (#9153) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3828c5b91..ffa7dce65 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.45 +comfyui-workflow-templates==0.1.47 comfyui-embedded-docs==0.2.4 torch torchsde From 5f582a97572e87ebfa655d379e8c8f7611c0249f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 2 Aug 2025 12:00:13 -0700 Subject: [PATCH 0393/1073] Make sure all the conds are on the right device. (#9151) --- comfy/model_base.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 6b7978949..3ff8106d7 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -106,10 +106,12 @@ def model_sampling(model_config, model_type): return ModelSampling(model_config) -def convert_tensor(extra, dtype): +def convert_tensor(extra, dtype, device): if hasattr(extra, "dtype"): if extra.dtype != torch.int and extra.dtype != torch.long: - extra = extra.to(dtype) + extra = extra.to(dtype=dtype, device=device) + else: + extra = extra.to(device=device) return extra @@ -174,15 +176,16 @@ class BaseModel(torch.nn.Module): context = context.to(dtype) extra_conds = {} + device = xc.device for o in kwargs: extra = kwargs[o] if hasattr(extra, "dtype"): - extra = convert_tensor(extra, dtype) + extra = convert_tensor(extra, dtype, device) elif isinstance(extra, list): ex = [] for ext in extra: - ex.append(convert_tensor(ext, dtype)) + ex.append(convert_tensor(ext, dtype, device)) extra = ex extra_conds[o] = extra From 13aaa66ec21c397240a9b972d818430b39112588 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 2 Aug 2025 12:09:23 -0700 Subject: [PATCH 0394/1073] Make sure context is on the right device. (#9154) --- comfy/model_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 3ff8106d7..4556ee138 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -171,12 +171,12 @@ class BaseModel(torch.nn.Module): dtype = self.manual_cast_dtype xc = xc.to(dtype) + device = xc.device t = self.model_sampling.timestep(t).float() if context is not None: - context = context.to(dtype) + context = context.to(dtype=dtype, device=device) extra_conds = {} - device = xc.device for o in kwargs: extra = kwargs[o] From aebac221937b511d46fe601656acdc753435b849 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 3 Aug 2025 04:08:11 -0700 Subject: [PATCH 0395/1073] Cleanup. (#9160) --- comfy/controlnet.py | 1 - 1 file changed, 1 deletion(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 9a47b86f2..6ed8bd756 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -43,7 +43,6 @@ if TYPE_CHECKING: def broadcast_image_to(tensor, target_batch_size, batched_number): current_batch_size = tensor.shape[0] - #print(current_batch_size, target_batch_size) if current_batch_size == 1: return tensor From 182f90b5eca2baa25474223759039925b286d562 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 4 Aug 2025 00:11:53 -0700 Subject: [PATCH 0396/1073] Lower cond vram use by casting at the same time as device transfer. (#9159) --- comfy/conds.py | 14 +++++++------- comfy/model_base.py | 6 +++--- comfy/samplers.py | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/comfy/conds.py b/comfy/conds.py index 2af2a43a3..f2564e7ef 100644 --- a/comfy/conds.py +++ b/comfy/conds.py @@ -10,8 +10,8 @@ class CONDRegular: def _copy_with(self, cond): return self.__class__(cond) - def process_cond(self, batch_size, device, **kwargs): - return self._copy_with(comfy.utils.repeat_to_batch_size(self.cond, batch_size).to(device)) + def process_cond(self, batch_size, **kwargs): + return self._copy_with(comfy.utils.repeat_to_batch_size(self.cond, batch_size)) def can_concat(self, other): if self.cond.shape != other.cond.shape: @@ -29,14 +29,14 @@ class CONDRegular: class CONDNoiseShape(CONDRegular): - def process_cond(self, batch_size, device, area, **kwargs): + def process_cond(self, batch_size, area, **kwargs): data = self.cond if area is not None: dims = len(area) // 2 for i in range(dims): data = data.narrow(i + 2, area[i + dims], area[i]) - return self._copy_with(comfy.utils.repeat_to_batch_size(data, batch_size).to(device)) + return self._copy_with(comfy.utils.repeat_to_batch_size(data, batch_size)) class CONDCrossAttn(CONDRegular): @@ -73,7 +73,7 @@ class CONDConstant(CONDRegular): def __init__(self, cond): self.cond = cond - def process_cond(self, batch_size, device, **kwargs): + def process_cond(self, batch_size, **kwargs): return self._copy_with(self.cond) def can_concat(self, other): @@ -92,10 +92,10 @@ class CONDList(CONDRegular): def __init__(self, cond): self.cond = cond - def process_cond(self, batch_size, device, **kwargs): + def process_cond(self, batch_size, **kwargs): out = [] for c in self.cond: - out.append(comfy.utils.repeat_to_batch_size(c, batch_size).to(device)) + out.append(comfy.utils.repeat_to_batch_size(c, batch_size)) return self._copy_with(out) diff --git a/comfy/model_base.py b/comfy/model_base.py index 4556ee138..3a9c031ea 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -109,9 +109,9 @@ def model_sampling(model_config, model_type): def convert_tensor(extra, dtype, device): if hasattr(extra, "dtype"): if extra.dtype != torch.int and extra.dtype != torch.long: - extra = extra.to(dtype=dtype, device=device) + extra = comfy.model_management.cast_to_device(extra, device, dtype) else: - extra = extra.to(device=device) + extra = comfy.model_management.cast_to_device(extra, device, None) return extra @@ -174,7 +174,7 @@ class BaseModel(torch.nn.Module): device = xc.device t = self.model_sampling.timestep(t).float() if context is not None: - context = context.to(dtype=dtype, device=device) + context = comfy.model_management.cast_to_device(context, device, dtype) extra_conds = {} for o in kwargs: diff --git a/comfy/samplers.py b/comfy/samplers.py index e93d2a315..ad2f40cdc 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -89,7 +89,7 @@ def get_area_and_mult(conds, x_in, timestep_in): conditioning = {} model_conds = conds["model_conds"] for c in model_conds: - conditioning[c] = model_conds[c].process_cond(batch_size=x_in.shape[0], device=x_in.device, area=area) + conditioning[c] = model_conds[c].process_cond(batch_size=x_in.shape[0], area=area) hooks = conds.get('hooks', None) control = conds.get('control', None) From 140ffc7fdc53e810030f060e421c1f528c2d2ab9 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 4 Aug 2025 00:28:12 -0700 Subject: [PATCH 0397/1073] Fix broken controlnet from last PR. (#9167) --- comfy/controlnet.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 6ed8bd756..988acdb57 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -28,6 +28,7 @@ import comfy.model_detection import comfy.model_patcher import comfy.ops import comfy.latent_formats +import comfy.model_base import comfy.cldm.cldm import comfy.t2i_adapter.adapter @@ -264,12 +265,12 @@ class ControlNet(ControlBase): for c in self.extra_conds: temp = cond.get(c, None) if temp is not None: - extra[c] = temp.to(dtype) + extra[c] = comfy.model_base.convert_tensor(temp, dtype, x_noisy.device) timestep = self.model_sampling_current.timestep(t) x_noisy = self.model_sampling_current.calculate_input(t, x_noisy) - control = self.control_model(x=x_noisy.to(dtype), hint=self.cond_hint, timesteps=timestep.to(dtype), context=context.to(dtype), **extra) + control = self.control_model(x=x_noisy.to(dtype), hint=self.cond_hint, timesteps=timestep.to(dtype), context=comfy.model_management.cast_to_device(context, x_noisy.device, dtype), **extra) return self.control_merge(control, control_prev, output_dtype=None) def copy(self): From 7991341e89cab521441641505ac4b0eea292a829 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 4 Aug 2025 01:02:40 -0700 Subject: [PATCH 0398/1073] Various fixes for broken things from earlier PR. (#9168) --- comfy/model_base.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 3a9c031ea..f9591f292 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -401,7 +401,7 @@ class SD21UNCLIP(BaseModel): unclip_conditioning = kwargs.get("unclip_conditioning", None) device = kwargs["device"] if unclip_conditioning is None: - return torch.zeros((1, self.adm_channels)) + return torch.zeros((1, self.adm_channels), device=device) else: return unclip_adm(unclip_conditioning, device, self.noise_augmentor, kwargs.get("unclip_noise_augment_merge", 0.05), kwargs.get("seed", 0) - 10) @@ -409,7 +409,7 @@ def sdxl_pooled(args, noise_augmentor): if "unclip_conditioning" in args: return unclip_adm(args.get("unclip_conditioning", None), args["device"], noise_augmentor, seed=args.get("seed", 0) - 10)[:,:1280] else: - return args["pooled_output"] + return args["pooled_output"].to(device=args["device"]) class SDXLRefiner(BaseModel): def __init__(self, model_config, model_type=ModelType.EPS, device=None): @@ -615,9 +615,11 @@ class IP2P: if image is None: image = torch.zeros_like(noise) + else: + image = image.to(device=device) if image.shape[1:] != noise.shape[1:]: - image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") + image = utils.common_upscale(image, noise.shape[-1], noise.shape[-2], "bilinear", "center") image = utils.resize_to_batch_size(image, noise.shape[0]) return self.process_ip2p_image_in(image) @@ -696,7 +698,7 @@ class StableCascade_B(BaseModel): #size of prior doesn't really matter if zeros because it gets resized but I still want it to get batched prior = kwargs.get("stable_cascade_prior", torch.zeros((1, 16, (noise.shape[2] * 4) // 42, (noise.shape[3] * 4) // 42), dtype=noise.dtype, layout=noise.layout, device=noise.device)) - out["effnet"] = comfy.conds.CONDRegular(prior) + out["effnet"] = comfy.conds.CONDRegular(prior.to(device=noise.device)) out["sca"] = comfy.conds.CONDRegular(torch.zeros((1,))) return out @@ -1161,10 +1163,10 @@ class WAN21_Vace(WAN21): vace_frames_out = [] for j in range(len(vace_frames)): - vf = vace_frames[j].clone() + vf = vace_frames[j].to(device=noise.device, dtype=noise.dtype, copy=True) for i in range(0, vf.shape[1], 16): vf[:, i:i + 16] = self.process_latent_in(vf[:, i:i + 16]) - vf = torch.cat([vf, mask[j]], dim=1) + vf = torch.cat([vf, mask[j].to(device=noise.device, dtype=noise.dtype)], dim=1) vace_frames_out.append(vf) vace_frames = torch.stack(vace_frames_out, dim=1) From 84f9759424ccbd8de710960c79f0f1d28eef2776 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 4 Aug 2025 01:20:12 -0700 Subject: [PATCH 0399/1073] Add some warnings and prevent crash when cond devices don't match. (#9169) --- comfy/conds.py | 7 +++++++ comfy/model_base.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/comfy/conds.py b/comfy/conds.py index f2564e7ef..5af3e93ea 100644 --- a/comfy/conds.py +++ b/comfy/conds.py @@ -1,6 +1,7 @@ import torch import math import comfy.utils +import logging class CONDRegular: @@ -16,6 +17,9 @@ class CONDRegular: def can_concat(self, other): if self.cond.shape != other.cond.shape: return False + if self.cond.device != other.cond.device: + logging.warning("WARNING: conds not on same device, skipping concat.") + return False return True def concat(self, others): @@ -51,6 +55,9 @@ class CONDCrossAttn(CONDRegular): diff = mult_min // min(s1[1], s2[1]) if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much return False + if self.cond.device != other.cond.device: + logging.warning("WARNING: conds not on same device: skipping concat.") + return False return True def concat(self, others): diff --git a/comfy/model_base.py b/comfy/model_base.py index f9591f292..2db81e244 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -409,7 +409,7 @@ def sdxl_pooled(args, noise_augmentor): if "unclip_conditioning" in args: return unclip_adm(args.get("unclip_conditioning", None), args["device"], noise_augmentor, seed=args.get("seed", 0) - 10)[:,:1280] else: - return args["pooled_output"].to(device=args["device"]) + return args["pooled_output"] class SDXLRefiner(BaseModel): def __init__(self, model_config, model_type=ModelType.EPS, device=None): From 03895dea7c4a6cc93fa362cd11ca450217d74b13 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 4 Aug 2025 01:33:04 -0700 Subject: [PATCH 0400/1073] Fix another issue with the PR. (#9170) --- comfy/model_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 2db81e244..a06686436 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -162,7 +162,7 @@ class BaseModel(torch.nn.Module): xc = self.model_sampling.calculate_input(sigma, x) if c_concat is not None: - xc = torch.cat([xc] + [c_concat], dim=1) + xc = torch.cat([xc] + [comfy.model_management.cast_to_device(c_concat, xc.device, xc.dtype)], dim=1) context = c_crossattn dtype = self.get_dtype() From c012400240d4867cd63a45220eb791b91ad47617 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 4 Aug 2025 19:53:25 -0700 Subject: [PATCH 0401/1073] Initial support for qwen image model. (#9179) --- comfy/ldm/qwen_image/model.py | 399 ++++++++++++++++++++++++++++++ comfy/model_base.py | 12 + comfy/model_detection.py | 7 +- comfy/sd.py | 12 +- comfy/supported_models.py | 32 ++- comfy/text_encoders/llama.py | 26 ++ comfy/text_encoders/qwen_image.py | 71 ++++++ nodes.py | 2 +- 8 files changed, 557 insertions(+), 4 deletions(-) create mode 100644 comfy/ldm/qwen_image/model.py create mode 100644 comfy/text_encoders/qwen_image.py diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py new file mode 100644 index 000000000..ff631a60f --- /dev/null +++ b/comfy/ldm/qwen_image/model.py @@ -0,0 +1,399 @@ +# https://github.com/QwenLM/Qwen-Image (Apache 2.0) +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Optional, Tuple +from einops import repeat + +from comfy.ldm.lightricks.model import TimestepEmbedding, Timesteps +from comfy.ldm.modules.attention import optimized_attention_masked +from comfy.ldm.flux.layers import EmbedND + + +class GELU(nn.Module): + def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True, dtype=None, device=None, operations=None): + super().__init__() + self.proj = operations.Linear(dim_in, dim_out, bias=bias, dtype=dtype, device=device) + self.approximate = approximate + + def forward(self, hidden_states): + hidden_states = self.proj(hidden_states) + hidden_states = F.gelu(hidden_states, approximate=self.approximate) + return hidden_states + + +class FeedForward(nn.Module): + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + mult: int = 4, + dropout: float = 0.0, + inner_dim=None, + bias: bool = True, + dtype=None, device=None, operations=None + ): + super().__init__() + if inner_dim is None: + inner_dim = int(dim * mult) + dim_out = dim_out if dim_out is not None else dim + + self.net = nn.ModuleList([]) + self.net.append(GELU(dim, inner_dim, approximate="tanh", bias=bias, dtype=dtype, device=device, operations=operations)) + self.net.append(nn.Dropout(dropout)) + self.net.append(operations.Linear(inner_dim, dim_out, bias=bias, dtype=dtype, device=device)) + + def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor: + for module in self.net: + hidden_states = module(hidden_states) + return hidden_states + + +def apply_rotary_emb(x, freqs_cis): + if x.shape[1] == 0: + return x + + t_ = x.reshape(*x.shape[:-1], -1, 1, 2) + t_out = freqs_cis[..., 0] * t_[..., 0] + freqs_cis[..., 1] * t_[..., 1] + return t_out.reshape(*x.shape) + + +class QwenTimestepProjEmbeddings(nn.Module): + def __init__(self, embedding_dim, pooled_projection_dim, dtype=None, device=None, operations=None): + super().__init__() + self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0, scale=1000) + self.timestep_embedder = TimestepEmbedding( + in_channels=256, + time_embed_dim=embedding_dim, + dtype=dtype, + device=device, + operations=operations + ) + + def forward(self, timestep, hidden_states): + timesteps_proj = self.time_proj(timestep) + timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_states.dtype)) + return timesteps_emb + + +class Attention(nn.Module): + def __init__( + self, + query_dim: int, + dim_head: int = 64, + heads: int = 8, + dropout: float = 0.0, + bias: bool = False, + eps: float = 1e-5, + out_bias: bool = True, + out_dim: int = None, + out_context_dim: int = None, + dtype=None, + device=None, + operations=None + ): + super().__init__() + self.inner_dim = out_dim if out_dim is not None else dim_head * heads + self.inner_kv_dim = self.inner_dim + self.heads = heads + self.dim_head = dim_head + self.out_dim = out_dim if out_dim is not None else query_dim + self.out_context_dim = out_context_dim if out_context_dim is not None else query_dim + self.dropout = dropout + + # Q/K normalization + self.norm_q = operations.RMSNorm(dim_head, eps=eps, elementwise_affine=True, dtype=dtype, device=device) + self.norm_k = operations.RMSNorm(dim_head, eps=eps, elementwise_affine=True, dtype=dtype, device=device) + self.norm_added_q = operations.RMSNorm(dim_head, eps=eps, dtype=dtype, device=device) + self.norm_added_k = operations.RMSNorm(dim_head, eps=eps, dtype=dtype, device=device) + + # Image stream projections + self.to_q = operations.Linear(query_dim, self.inner_dim, bias=bias, dtype=dtype, device=device) + self.to_k = operations.Linear(query_dim, self.inner_kv_dim, bias=bias, dtype=dtype, device=device) + self.to_v = operations.Linear(query_dim, self.inner_kv_dim, bias=bias, dtype=dtype, device=device) + + # Text stream projections + self.add_q_proj = operations.Linear(query_dim, self.inner_dim, bias=bias, dtype=dtype, device=device) + self.add_k_proj = operations.Linear(query_dim, self.inner_kv_dim, bias=bias, dtype=dtype, device=device) + self.add_v_proj = operations.Linear(query_dim, self.inner_kv_dim, bias=bias, dtype=dtype, device=device) + + # Output projections + self.to_out = nn.ModuleList([ + operations.Linear(self.inner_dim, self.out_dim, bias=out_bias, dtype=dtype, device=device), + nn.Dropout(dropout) + ]) + self.to_add_out = operations.Linear(self.inner_dim, self.out_context_dim, bias=out_bias, dtype=dtype, device=device) + + def forward( + self, + hidden_states: torch.FloatTensor, # Image stream + encoder_hidden_states: torch.FloatTensor = None, # Text stream + encoder_hidden_states_mask: torch.FloatTensor = None, + attention_mask: Optional[torch.FloatTensor] = None, + image_rotary_emb: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + seq_txt = encoder_hidden_states.shape[1] + + img_query = self.to_q(hidden_states).unflatten(-1, (self.heads, -1)) + img_key = self.to_k(hidden_states).unflatten(-1, (self.heads, -1)) + img_value = self.to_v(hidden_states).unflatten(-1, (self.heads, -1)) + + txt_query = self.add_q_proj(encoder_hidden_states).unflatten(-1, (self.heads, -1)) + txt_key = self.add_k_proj(encoder_hidden_states).unflatten(-1, (self.heads, -1)) + txt_value = self.add_v_proj(encoder_hidden_states).unflatten(-1, (self.heads, -1)) + + img_query = self.norm_q(img_query) + img_key = self.norm_k(img_key) + txt_query = self.norm_added_q(txt_query) + txt_key = self.norm_added_k(txt_key) + + joint_query = torch.cat([txt_query, img_query], dim=1) + joint_key = torch.cat([txt_key, img_key], dim=1) + joint_value = torch.cat([txt_value, img_value], dim=1) + + joint_query = apply_rotary_emb(joint_query, image_rotary_emb) + joint_key = apply_rotary_emb(joint_key, image_rotary_emb) + + joint_query = joint_query.flatten(start_dim=2) + joint_key = joint_key.flatten(start_dim=2) + joint_value = joint_value.flatten(start_dim=2) + + joint_hidden_states = optimized_attention_masked(joint_query, joint_key, joint_value, self.heads, attention_mask) + + txt_attn_output = joint_hidden_states[:, :seq_txt, :] + img_attn_output = joint_hidden_states[:, seq_txt:, :] + + img_attn_output = self.to_out[0](img_attn_output) + img_attn_output = self.to_out[1](img_attn_output) + txt_attn_output = self.to_add_out(txt_attn_output) + + return img_attn_output, txt_attn_output + + +class QwenImageTransformerBlock(nn.Module): + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + eps: float = 1e-6, + dtype=None, + device=None, + operations=None + ): + super().__init__() + self.dim = dim + self.num_attention_heads = num_attention_heads + self.attention_head_dim = attention_head_dim + + self.img_mod = nn.Sequential( + nn.SiLU(), + operations.Linear(dim, 6 * dim, bias=True, dtype=dtype, device=device), + ) + self.img_norm1 = operations.LayerNorm(dim, elementwise_affine=False, eps=eps, dtype=dtype, device=device) + self.img_norm2 = operations.LayerNorm(dim, elementwise_affine=False, eps=eps, dtype=dtype, device=device) + self.img_mlp = FeedForward(dim=dim, dim_out=dim, dtype=dtype, device=device, operations=operations) + + self.txt_mod = nn.Sequential( + nn.SiLU(), + operations.Linear(dim, 6 * dim, bias=True, dtype=dtype, device=device), + ) + self.txt_norm1 = operations.LayerNorm(dim, elementwise_affine=False, eps=eps, dtype=dtype, device=device) + self.txt_norm2 = operations.LayerNorm(dim, elementwise_affine=False, eps=eps, dtype=dtype, device=device) + self.txt_mlp = FeedForward(dim=dim, dim_out=dim, dtype=dtype, device=device, operations=operations) + + self.attn = Attention( + query_dim=dim, + dim_head=attention_head_dim, + heads=num_attention_heads, + out_dim=dim, + bias=True, + eps=eps, + dtype=dtype, + device=device, + operations=operations, + ) + + def _modulate(self, x, mod_params): + shift, scale, gate = mod_params.chunk(3, dim=-1) + return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1), gate.unsqueeze(1) + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor, + encoder_hidden_states_mask: torch.Tensor, + temb: torch.Tensor, + image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + img_mod_params = self.img_mod(temb) + txt_mod_params = self.txt_mod(temb) + img_mod1, img_mod2 = img_mod_params.chunk(2, dim=-1) + txt_mod1, txt_mod2 = txt_mod_params.chunk(2, dim=-1) + + img_normed = self.img_norm1(hidden_states) + img_modulated, img_gate1 = self._modulate(img_normed, img_mod1) + txt_normed = self.txt_norm1(encoder_hidden_states) + txt_modulated, txt_gate1 = self._modulate(txt_normed, txt_mod1) + + img_attn_output, txt_attn_output = self.attn( + hidden_states=img_modulated, + encoder_hidden_states=txt_modulated, + encoder_hidden_states_mask=encoder_hidden_states_mask, + image_rotary_emb=image_rotary_emb, + ) + + hidden_states = hidden_states + img_gate1 * img_attn_output + encoder_hidden_states = encoder_hidden_states + txt_gate1 * txt_attn_output + + img_normed2 = self.img_norm2(hidden_states) + img_modulated2, img_gate2 = self._modulate(img_normed2, img_mod2) + hidden_states = hidden_states + img_gate2 * self.img_mlp(img_modulated2) + + txt_normed2 = self.txt_norm2(encoder_hidden_states) + txt_modulated2, txt_gate2 = self._modulate(txt_normed2, txt_mod2) + encoder_hidden_states = encoder_hidden_states + txt_gate2 * self.txt_mlp(txt_modulated2) + + return encoder_hidden_states, hidden_states + + +class LastLayer(nn.Module): + def __init__( + self, + embedding_dim: int, + conditioning_embedding_dim: int, + elementwise_affine=False, + eps=1e-6, + bias=True, + dtype=None, device=None, operations=None + ): + super().__init__() + self.silu = nn.SiLU() + self.linear = operations.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias, dtype=dtype, device=device) + self.norm = operations.LayerNorm(embedding_dim, eps, elementwise_affine=False, bias=bias, dtype=dtype, device=device) + + def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: + emb = self.linear(self.silu(conditioning_embedding)) + scale, shift = torch.chunk(emb, 2, dim=1) + x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] + return x + + +class QwenImageTransformer2DModel(nn.Module): + def __init__( + self, + patch_size: int = 2, + in_channels: int = 64, + out_channels: Optional[int] = 16, + num_layers: int = 60, + attention_head_dim: int = 128, + num_attention_heads: int = 24, + joint_attention_dim: int = 3584, + pooled_projection_dim: int = 768, + guidance_embeds: bool = False, + axes_dims_rope: Tuple[int, int, int] = (16, 56, 56), + image_model=None, + dtype=None, + device=None, + operations=None, + ): + super().__init__() + self.dtype = dtype + self.patch_size = patch_size + self.out_channels = out_channels or in_channels + self.inner_dim = num_attention_heads * attention_head_dim + + self.pe_embedder = EmbedND(dim=attention_head_dim, theta=10000, axes_dim=list(axes_dims_rope)) + + self.time_text_embed = QwenTimestepProjEmbeddings( + embedding_dim=self.inner_dim, + pooled_projection_dim=pooled_projection_dim, + dtype=dtype, + device=device, + operations=operations + ) + + self.txt_norm = operations.RMSNorm(joint_attention_dim, eps=1e-6, dtype=dtype, device=device) + self.img_in = operations.Linear(in_channels, self.inner_dim, dtype=dtype, device=device) + self.txt_in = operations.Linear(joint_attention_dim, self.inner_dim, dtype=dtype, device=device) + + self.transformer_blocks = nn.ModuleList([ + QwenImageTransformerBlock( + dim=self.inner_dim, + num_attention_heads=num_attention_heads, + attention_head_dim=attention_head_dim, + dtype=dtype, + device=device, + operations=operations + ) + for _ in range(num_layers) + ]) + + self.norm_out = LastLayer(self.inner_dim, self.inner_dim, dtype=dtype, device=device, operations=operations) + self.proj_out = operations.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True, dtype=dtype, device=device) + self.gradient_checkpointing = False + + def pos_embeds(self, x, context): + bs, c, t, h, w = x.shape + patch_size = self.patch_size + h_len = ((h + (patch_size // 2)) // patch_size) + w_len = ((w + (patch_size // 2)) // patch_size) + + img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) + img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) + img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) + img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs) + + txt_start = round(max(h_len, w_len)) + txt_ids = torch.linspace(txt_start, txt_start + context.shape[1], steps=context.shape[1], device=x.device, dtype=x.dtype).reshape(1, -1, 1).repeat(bs, 1, 3) + ids = torch.cat((txt_ids, img_ids), dim=1) + return self.pe_embedder(ids).squeeze(1).unsqueeze(2).to(x.dtype) + + def forward( + self, + x, + timesteps, + context, + attention_mask=None, + guidance: torch.Tensor = None, + **kwargs + ): + timestep = timesteps + encoder_hidden_states = context + encoder_hidden_states_mask = attention_mask + + image_rotary_emb = self.pos_embeds(x, context) + + orig_shape = x.shape + hidden_states = x.view(orig_shape[0], orig_shape[1], orig_shape[-2] // 2, 2, orig_shape[-1] // 2, 2) + hidden_states = hidden_states.permute(0, 2, 4, 1, 3, 5) + hidden_states = hidden_states.reshape(orig_shape[0], (orig_shape[-2] // 2) * (orig_shape[-1] // 2), orig_shape[1] * 4) + + hidden_states = self.img_in(hidden_states) + encoder_hidden_states = self.txt_norm(encoder_hidden_states) + encoder_hidden_states = self.txt_in(encoder_hidden_states) + + if guidance is not None: + guidance = guidance * 1000 + + temb = ( + self.time_text_embed(timestep, hidden_states) + if guidance is None + else self.time_text_embed(timestep, guidance, hidden_states) + ) + + for block in self.transformer_blocks: + encoder_hidden_states, hidden_states = block( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + encoder_hidden_states_mask=encoder_hidden_states_mask, + temb=temb, + image_rotary_emb=image_rotary_emb, + ) + + hidden_states = self.norm_out(hidden_states, temb) + hidden_states = self.proj_out(hidden_states) + + hidden_states = hidden_states.view(orig_shape[0], orig_shape[-2] // 2, orig_shape[-1] // 2, orig_shape[1], 2, 2) + hidden_states = hidden_states.permute(0, 3, 1, 4, 2, 5) + return hidden_states.reshape(orig_shape) diff --git a/comfy/model_base.py b/comfy/model_base.py index a06686436..8a2d9cbe6 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -42,6 +42,7 @@ import comfy.ldm.hidream.model import comfy.ldm.chroma.model import comfy.ldm.ace.model import comfy.ldm.omnigen.omnigen2 +import comfy.ldm.qwen_image.model import comfy.model_management import comfy.patcher_extension @@ -1308,3 +1309,14 @@ class Omnigen2(BaseModel): if ref_latents is not None: out['ref_latents'] = list([1, 16, sum(map(lambda a: math.prod(a.size()), ref_latents)) // 16]) return out + +class QwenImage(BaseModel): + def __init__(self, model_config, model_type=ModelType.FLUX, device=None): + super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.qwen_image.model.QwenImageTransformer2DModel) + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + return out diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 9fc1f42de..8b57ebd2f 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -481,6 +481,11 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["timestep_scale"] = 1000.0 return dit_config + if '{}txt_norm.weight'.format(key_prefix) in state_dict_keys: # Qwen Image + dit_config = {} + dit_config["image_model"] = "qwen_image" + return dit_config + if '{}input_blocks.0.0.weight'.format(key_prefix) not in state_dict_keys: return None @@ -867,7 +872,7 @@ def convert_diffusers_mmdit(state_dict, output_prefix=""): depth_single_blocks = count_blocks(state_dict, 'single_transformer_blocks.{}.') hidden_size = state_dict["x_embedder.bias"].shape[0] sd_map = comfy.utils.flux_to_diffusers({"depth": depth, "depth_single_blocks": depth_single_blocks, "hidden_size": hidden_size}, output_prefix=output_prefix) - elif 'transformer_blocks.0.attn.add_q_proj.weight' in state_dict: #SD3 + elif 'transformer_blocks.0.attn.add_q_proj.weight' in state_dict and 'pos_embed.proj.weight' in state_dict: #SD3 num_blocks = count_blocks(state_dict, 'transformer_blocks.{}.') depth = state_dict["pos_embed.proj.weight"].shape[0] // 64 sd_map = comfy.utils.mmdit_to_diffusers({"depth": depth, "num_blocks": num_blocks}, output_prefix=output_prefix) diff --git a/comfy/sd.py b/comfy/sd.py index e0498e585..bb5d61fb3 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -47,6 +47,7 @@ import comfy.text_encoders.wan import comfy.text_encoders.hidream import comfy.text_encoders.ace import comfy.text_encoders.omnigen2 +import comfy.text_encoders.qwen_image import comfy.model_patcher import comfy.lora @@ -771,6 +772,7 @@ class CLIPType(Enum): CHROMA = 15 ACE = 16 OMNIGEN2 = 17 + QWEN_IMAGE = 18 def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}): @@ -791,6 +793,7 @@ class TEModel(Enum): T5_XXL_OLD = 8 GEMMA_2_2B = 9 QWEN25_3B = 10 + QWEN25_7B = 11 def detect_te_model(sd): if "text_model.encoder.layers.30.mlp.fc1.weight" in sd: @@ -812,7 +815,11 @@ def detect_te_model(sd): if 'model.layers.0.post_feedforward_layernorm.weight' in sd: return TEModel.GEMMA_2_2B if 'model.layers.0.self_attn.k_proj.bias' in sd: - return TEModel.QWEN25_3B + weight = sd['model.layers.0.self_attn.k_proj.bias'] + if weight.shape[0] == 256: + return TEModel.QWEN25_3B + if weight.shape[0] == 512: + return TEModel.QWEN25_7B if "model.layers.0.post_attention_layernorm.weight" in sd: return TEModel.LLAMA3_8 return None @@ -917,6 +924,9 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip elif te_model == TEModel.QWEN25_3B: clip_target.clip = comfy.text_encoders.omnigen2.te(**llama_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.omnigen2.Omnigen2Tokenizer + elif te_model == TEModel.QWEN25_7B: + clip_target.clip = comfy.text_encoders.qwen_image.te(**llama_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.qwen_image.QwenImageTokenizer else: # clip_l if clip_type == CLIPType.SD3: diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 8f3f4652d..880055bd3 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -19,6 +19,7 @@ import comfy.text_encoders.lumina2 import comfy.text_encoders.wan import comfy.text_encoders.ace import comfy.text_encoders.omnigen2 +import comfy.text_encoders.qwen_image from . import supported_models_base from . import latent_formats @@ -1229,7 +1230,36 @@ class Omnigen2(supported_models_base.BASE): hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_3b.transformer.".format(pref)) return supported_models_base.ClipTarget(comfy.text_encoders.omnigen2.Omnigen2Tokenizer, comfy.text_encoders.omnigen2.te(**hunyuan_detect)) +class QwenImage(supported_models_base.BASE): + unet_config = { + "image_model": "qwen_image", + } -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep, Omnigen2] + sampling_settings = { + "multiplier": 1.0, + "shift": 2.6, + } + + memory_usage_factor = 1.8 #TODO + + unet_extra_config = {} + latent_format = latent_formats.Wan21 + + supported_inference_dtypes = [torch.bfloat16, torch.float32] + + vae_key_prefix = ["vae."] + text_encoder_key_prefix = ["text_encoders."] + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.QwenImage(self, device=device) + return out + + def clip_target(self, state_dict={}): + pref = self.text_encoder_key_prefix[0] + hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_7b.transformer.".format(pref)) + return supported_models_base.ClipTarget(comfy.text_encoders.qwen_image.QwenImageTokenizer, comfy.text_encoders.qwen_image.te(**hunyuan_detect)) + + +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep, Omnigen2, QwenImage] models += [SVD_img2vid] diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index 7fbd0f604..1da6a0c94 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -43,6 +43,23 @@ class Qwen25_3BConfig: mlp_activation = "silu" qkv_bias = True +@dataclass +class Qwen25_7BVLI_Config: + vocab_size: int = 152064 + hidden_size: int = 3584 + intermediate_size: int = 18944 + num_hidden_layers: int = 28 + num_attention_heads: int = 28 + num_key_value_heads: int = 4 + max_position_embeddings: int = 128000 + rms_norm_eps: float = 1e-6 + rope_theta: float = 1000000.0 + transformer_type: str = "llama" + head_dim = 128 + rms_norm_add = False + mlp_activation = "silu" + qkv_bias = True + @dataclass class Gemma2_2B_Config: vocab_size: int = 256000 @@ -348,6 +365,15 @@ class Qwen25_3B(BaseLlama, torch.nn.Module): self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) self.dtype = dtype +class Qwen25_7BVLI(BaseLlama, torch.nn.Module): + def __init__(self, config_dict, dtype, device, operations): + super().__init__() + config = Qwen25_7BVLI_Config(**config_dict) + self.num_layers = config.num_hidden_layers + + self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) + self.dtype = dtype + class Gemma2_2B(BaseLlama, torch.nn.Module): def __init__(self, config_dict, dtype, device, operations): super().__init__() diff --git a/comfy/text_encoders/qwen_image.py b/comfy/text_encoders/qwen_image.py new file mode 100644 index 000000000..ce5c98097 --- /dev/null +++ b/comfy/text_encoders/qwen_image.py @@ -0,0 +1,71 @@ +from transformers import Qwen2Tokenizer +from comfy import sd1_clip +import comfy.text_encoders.llama +import os +import torch +import numbers + +class Qwen25_7BVLITokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer") + super().__init__(tokenizer_path, pad_with_end=False, embedding_size=3584, embedding_key='qwen25_7b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, pad_token=151643, tokenizer_data=tokenizer_data) + + +class QwenImageTokenizer(sd1_clip.SD1Tokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="qwen25_7b", tokenizer=Qwen25_7BVLITokenizer) + self.llama_template = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" + + def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None,**kwargs): + if llama_template is None: + llama_text = self.llama_template.format(text) + else: + llama_text = llama_template.format(text) + return super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, **kwargs) + + +class Qwen25_7BVLIModel(sd1_clip.SDClipModel): + def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=True, model_options={}): + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen25_7BVLI, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) + + +class QwenImageTEModel(sd1_clip.SD1ClipModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + super().__init__(device=device, dtype=dtype, name="qwen25_7b", clip_model=Qwen25_7BVLIModel, model_options=model_options) + + def encode_token_weights(self, token_weight_pairs): + out, pooled, extra = super().encode_token_weights(token_weight_pairs) + tok_pairs = token_weight_pairs["qwen25_7b"][0] + count_im_start = 0 + for i, v in enumerate(tok_pairs): + elem = v[0] + if not torch.is_tensor(elem): + if isinstance(elem, numbers.Integral): + if elem == 151644 and count_im_start < 2: + template_end = i + count_im_start += 1 + + if out.shape[1] > (template_end + 3): + if tok_pairs[template_end + 1][0] == 872: + if tok_pairs[template_end + 2][0] == 198: + template_end += 3 + + out = out[:, template_end:] + + extra["attention_mask"] = extra["attention_mask"][:, template_end:] + if extra["attention_mask"].sum() == torch.numel(extra["attention_mask"]): + extra.pop("attention_mask") # attention mask is useless if no masked elements + + return out, pooled, extra + + +def te(dtype_llama=None, llama_scaled_fp8=None): + class QwenImageTEModel_(QwenImageTEModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: + model_options = model_options.copy() + model_options["scaled_fp8"] = llama_scaled_fp8 + if dtype_llama is not None: + dtype = dtype_llama + super().__init__(device=device, dtype=dtype, model_options=model_options) + return QwenImageTEModel_ diff --git a/nodes.py b/nodes.py index da4a46366..9bedbcaca 100644 --- a/nodes.py +++ b/nodes.py @@ -925,7 +925,7 @@ class CLIPLoader: @classmethod def INPUT_TYPES(s): return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2"], ), + "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), From f69609bbd6c20f4814e313f8974656b187a9bee2 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Mon, 4 Aug 2025 22:52:25 -0700 Subject: [PATCH 0402/1073] Add Veo3 video generation node with audio support (#9110) - Create new Veo3VideoGenerationNode that extends VeoVideoGenerationNode - Add support for generateAudio parameter (only for Veo3 models) - Support new Veo3 models: veo-3.0-generate-001, veo-3.0-fast-generate-001 - Fix Veo3 duration constraint to 8 seconds only - Update original node to be clearly Veo 2 only - Update API paths to use model parameter: /proxy/veo/{model}/generate - Regenerate API types from staging to include generateAudio parameter - Fix TripoModelVersion enum reference after regeneration - Mark generated API types file in .gitattributes --- .gitattributes | 1 + comfy_api_nodes/apis/__init__.py | 2656 ++++++++++++++++++++++++++++- comfy_api_nodes/apis/tripo_api.py | 2 +- comfy_api_nodes/nodes_veo2.py | 98 +- 4 files changed, 2664 insertions(+), 93 deletions(-) diff --git a/.gitattributes b/.gitattributes index 4391de678..5b3c15bb4 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,3 @@ /web/assets/** linguist-generated /web/** linguist-vendored +comfy_api_nodes/apis/__init__.py linguist-generated diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index 086028abe..54298e8a9 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: filtered-openapi.yaml -# timestamp: 2025-07-06T09:47:31+00:00 +# timestamp: 2025-07-30T08:54:00+00:00 from __future__ import annotations @@ -37,6 +37,99 @@ class AuditLog(BaseModel): ) +class BFLAsyncResponse(BaseModel): + id: str = Field(..., title='Id') + polling_url: str = Field(..., title='Polling Url') + + +class BFLAsyncWebhookResponse(BaseModel): + id: str = Field(..., title='Id') + status: str = Field(..., title='Status') + webhook_url: str = Field(..., title='Webhook Url') + + +class CannyHighThreshold(RootModel[int]): + root: int = Field( + ..., + description='High threshold for Canny edge detection', + ge=0, + le=500, + title='Canny High Threshold', + ) + + +class CannyLowThreshold(RootModel[int]): + root: int = Field( + ..., + description='Low threshold for Canny edge detection', + ge=0, + le=500, + title='Canny Low Threshold', + ) + + +class Guidance(RootModel[float]): + root: float = Field( + ..., + description='Guidance strength for the image generation process', + ge=1.0, + le=100.0, + title='Guidance', + ) + + +class Steps(RootModel[int]): + root: int = Field( + ..., + description='Number of steps for the image generation process', + ge=15, + le=50, + title='Steps', + ) + + +class WebhookUrl(RootModel[AnyUrl]): + root: AnyUrl = Field( + ..., description='URL to receive webhook notifications', title='Webhook Url' + ) + + +class BFLFluxKontextMaxGenerateRequest(BaseModel): + guidance: Optional[float] = Field( + 3, description='The guidance scale for generation', ge=1.0, le=20.0 + ) + input_image: str = Field(..., description='Base64 encoded image to be edited') + prompt: str = Field( + ..., description='The text prompt describing what to edit on the image' + ) + steps: Optional[int] = Field( + 50, description='Number of inference steps', ge=1, le=50 + ) + + +class BFLFluxKontextMaxGenerateResponse(BaseModel): + id: str = Field(..., description='Job ID for tracking') + polling_url: str = Field(..., description='URL to poll for results') + + +class BFLFluxKontextProGenerateRequest(BaseModel): + guidance: Optional[float] = Field( + 3, description='The guidance scale for generation', ge=1.0, le=20.0 + ) + input_image: str = Field(..., description='Base64 encoded image to be edited') + prompt: str = Field( + ..., description='The text prompt describing what to edit on the image' + ) + steps: Optional[int] = Field( + 50, description='Number of inference steps', ge=1, le=50 + ) + + +class BFLFluxKontextProGenerateResponse(BaseModel): + id: str = Field(..., description='Job ID for tracking') + polling_url: str = Field(..., description='URL to poll for results') + + class OutputFormat(str, Enum): jpeg = 'jpeg' png = 'png' @@ -68,6 +161,67 @@ class BFLFluxPro11GenerateResponse(BaseModel): polling_url: str = Field(..., description='URL to poll for results') +class Bottom(RootModel[int]): + root: int = Field( + ..., + description='Number of pixels to expand at the bottom of the image', + ge=0, + le=2048, + title='Bottom', + ) + + +class Guidance2(RootModel[float]): + root: float = Field( + ..., + description='Guidance strength for the image generation process', + ge=1.5, + le=100.0, + title='Guidance', + ) + + +class Left(RootModel[int]): + root: int = Field( + ..., + description='Number of pixels to expand on the left side of the image', + ge=0, + le=2048, + title='Left', + ) + + +class Right(RootModel[int]): + root: int = Field( + ..., + description='Number of pixels to expand on the right side of the image', + ge=0, + le=2048, + title='Right', + ) + + +class Steps2(RootModel[int]): + root: int = Field( + ..., + description='Number of steps for the image generation process', + examples=[50], + ge=15, + le=50, + title='Steps', + ) + + +class Top(RootModel[int]): + root: int = Field( + ..., + description='Number of pixels to expand at the top of the image', + ge=0, + le=2048, + title='Top', + ) + + class BFLFluxProGenerateRequest(BaseModel): guidance_scale: Optional[float] = Field( None, description='The guidance scale for generation.', ge=1.0, le=20.0 @@ -96,7 +250,71 @@ class BFLFluxProGenerateResponse(BaseModel): polling_url: str = Field(..., description='URL to poll for the generation result.') +class BFLOutputFormat(str, Enum): + jpeg = 'jpeg' + png = 'png' + + +class BFLValidationError(BaseModel): + loc: List[Union[str, int]] = Field(..., title='Location') + msg: str = Field(..., title='Message') + type: str = Field(..., title='Error Type') + + class Status(str, Enum): + success = 'success' + not_found = 'not_found' + error = 'error' + + +class ClaimMyNodeRequest(BaseModel): + GH_TOKEN: str = Field( + ..., description='GitHub token to verify if the user owns the repo of the node' + ) + + +class ComfyNode(BaseModel): + category: Optional[str] = Field( + None, + description='UI category where the node is listed, used for grouping nodes.', + ) + comfy_node_name: Optional[str] = Field( + None, description='Unique identifier for the node' + ) + deprecated: Optional[bool] = Field( + None, + description='Indicates if the node is deprecated. Deprecated nodes are hidden in the UI.', + ) + description: Optional[str] = Field( + None, description="Brief description of the node's functionality or purpose." + ) + experimental: Optional[bool] = Field( + None, + description='Indicates if the node is experimental, subject to changes or removal.', + ) + function: Optional[str] = Field( + None, description='Name of the entry-point function to execute the node.' + ) + input_types: Optional[str] = Field(None, description='Defines input parameters') + output_is_list: Optional[List[bool]] = Field( + None, description='Boolean values indicating if each output is a list.' + ) + return_names: Optional[str] = Field( + None, description='Names of the outputs for clarity in workflows.' + ) + return_types: Optional[str] = Field( + None, description='Specifies the types of outputs produced by the node.' + ) + + +class ComfyNodeCloudBuildInfo(BaseModel): + build_id: Optional[str] = None + location: Optional[str] = None + project_id: Optional[str] = None + project_number: Optional[str] = None + + +class Status1(str, Enum): in_progress = 'in_progress' completed = 'completed' incomplete = 'incomplete' @@ -113,7 +331,7 @@ class ComputerToolCall(BaseModel): description='An identifier used when responding to the tool call with output.\n', ) id: str = Field(..., description='The unique ID of the computer call.') - status: Status = Field( + status: Status1 = Field( ..., description='The status of the item. One of `in_progress`, `completed`, or\n`incomplete`. Populated when items are returned via API.\n', ) @@ -156,6 +374,7 @@ class Customer(BaseModel): None, description='The date and time the user was created' ) email: Optional[str] = Field(None, description='The email address for this user') + has_fund: Optional[bool] = Field(None, description='Whether the user has funds') id: str = Field(..., description='The firebase UID of the user') is_admin: Optional[bool] = Field(None, description='Whether the user is an admin') metronome_id: Optional[str] = Field(None, description='The Metronome customer ID') @@ -194,6 +413,16 @@ class Type2(str, Enum): message = 'message' +class Error(BaseModel): + details: Optional[List[str]] = Field( + None, + description='Optional detailed information about the error or hints for resolving it.', + ) + message: Optional[str] = Field( + None, description='A clear and concise description of the error.' + ) + + class ErrorResponse(BaseModel): error: str message: str @@ -221,7 +450,7 @@ class Result(BaseModel): ) -class Status1(str, Enum): +class Status2(str, Enum): in_progress = 'in_progress' searching = 'searching' completed = 'completed' @@ -241,7 +470,7 @@ class FileSearchToolCall(BaseModel): results: Optional[List[Result]] = Field( None, description='The results of the file search tool call.\n' ) - status: Status1 = Field( + status: Status2 = Field( ..., description='The status of the file search tool call. One of `in_progress`, \n`searching`, `incomplete` or `failed`,\n', ) @@ -266,7 +495,7 @@ class FunctionTool(BaseModel): type: Literal['FunctionTool'] = Field(..., description='The type of tool') -class Status2(str, Enum): +class Status3(str, Enum): in_progress = 'in_progress' completed = 'completed' incomplete = 'incomplete' @@ -288,7 +517,7 @@ class FunctionToolCall(BaseModel): None, description='The unique ID of the function tool call.\n' ) name: str = Field(..., description='The name of the function to run.\n') - status: Optional[Status2] = Field( + status: Optional[Status3] = Field( None, description='The status of the item. One of `in_progress`, `completed`, or\n`incomplete`. Populated when items are returned via API.\n', ) @@ -442,6 +671,95 @@ class GeminiVideoMetadata(BaseModel): startOffset: Optional[GeminiOffset] = None +class GitCommitSummary(BaseModel): + author: Optional[str] = Field(None, description='The author of the commit') + branch_name: Optional[str] = Field( + None, description='The branch where the commit was made' + ) + commit_hash: Optional[str] = Field(None, description='The hash of the commit') + commit_name: Optional[str] = Field(None, description='The name of the commit') + status_summary: Optional[Dict[str, str]] = Field( + None, description='A map of operating system to status pairs' + ) + timestamp: Optional[datetime] = Field( + None, description='The timestamp when the commit was made' + ) + + +class GithubEnterprise(BaseModel): + avatar_url: str = Field(..., description='URL to the enterprise avatar') + created_at: datetime = Field(..., description='When the enterprise was created') + description: Optional[str] = Field(None, description='The enterprise description') + html_url: str = Field(..., description='The HTML URL of the enterprise') + id: int = Field(..., description='The enterprise ID') + name: str = Field(..., description='The enterprise name') + node_id: str = Field(..., description='The enterprise node ID') + slug: str = Field(..., description='The enterprise slug') + updated_at: datetime = Field( + ..., description='When the enterprise was last updated' + ) + website_url: Optional[str] = Field(None, description='The enterprise website URL') + + +class RepositorySelection(str, Enum): + selected = 'selected' + all = 'all' + + +class GithubOrganization(BaseModel): + avatar_url: str = Field(..., description="URL to the organization's avatar") + description: Optional[str] = Field(None, description='The organization description') + events_url: str = Field(..., description="The API URL of the organization's events") + hooks_url: str = Field(..., description="The API URL of the organization's hooks") + id: int = Field(..., description='The organization ID') + issues_url: str = Field(..., description="The API URL of the organization's issues") + login: str = Field(..., description="The organization's login name") + members_url: str = Field( + ..., description="The API URL of the organization's members" + ) + node_id: str = Field(..., description='The organization node ID') + public_members_url: str = Field( + ..., description="The API URL of the organization's public members" + ) + repos_url: str = Field( + ..., description="The API URL of the organization's repositories" + ) + url: str = Field(..., description='The API URL of the organization') + + +class State(str, Enum): + uploaded = 'uploaded' + open = 'open' + + +class Action(str, Enum): + published = 'published' + unpublished = 'unpublished' + created = 'created' + edited = 'edited' + deleted = 'deleted' + prereleased = 'prereleased' + released = 'released' + + +class Type7(str, Enum): + Bot = 'Bot' + User = 'User' + Organization = 'Organization' + + +class GithubUser(BaseModel): + avatar_url: str = Field(..., description="URL to the user's avatar") + gravatar_id: Optional[str] = Field(None, description="The user's gravatar ID") + html_url: str = Field(..., description='The HTML URL of the user') + id: int = Field(..., description="The user's ID") + login: str = Field(..., description="The user's login name") + node_id: str = Field(..., description="The user's node ID") + site_admin: bool = Field(..., description='Whether the user is a site admin') + type: Type7 = Field(..., description='The type of user') + url: str = Field(..., description='The API URL of the user') + + class IdeogramColorPalette1(BaseModel): name: str = Field(..., description='Name of the preset color palette') @@ -689,7 +1007,7 @@ class Includable(str, Enum): computer_call_output_output_image_url = 'computer_call_output.output.image_url' -class Type7(str, Enum): +class Type8(str, Enum): input_file = 'input_file' @@ -703,7 +1021,7 @@ class InputFileContent(BaseModel): filename: Optional[str] = Field( None, description='The name of the file to be sent to the model.' ) - type: Type7 = Field( + type: Type8 = Field( ..., description='The type of the input item. Always `input_file`.' ) @@ -714,7 +1032,7 @@ class Detail(str, Enum): auto = 'auto' -class Type8(str, Enum): +class Type9(str, Enum): input_image = 'input_image' @@ -730,7 +1048,7 @@ class InputImageContent(BaseModel): None, description='The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image in a data URL.', ) - type: Type8 = Field( + type: Type9 = Field( ..., description='The type of the input item. Always `input_image`.' ) @@ -741,17 +1059,17 @@ class Role3(str, Enum): developer = 'developer' -class Type9(str, Enum): +class Type10(str, Enum): message = 'message' -class Type10(str, Enum): +class Type11(str, Enum): input_text = 'input_text' class InputTextContent(BaseModel): text: str = Field(..., description='The text input to the model.') - type: Type10 = Field( + type: Type11 = Field( ..., description='The type of the input item. Always `input_text`.' ) @@ -923,7 +1241,7 @@ class ResourcePackType(str, Enum): constant_period = 'constant_period' -class Status4(str, Enum): +class Status5(str, Enum): toBeOnline = 'toBeOnline' online = 'online' expired = 'expired' @@ -949,7 +1267,7 @@ class ResourcePackSubscribeInfo(BaseModel): None, description='Resource package type (decreasing_total=decreasing total, constant_period=constant periodicity)', ) - status: Optional[Status4] = Field(None, description='Resource Package Status') + status: Optional[Status5] = Field(None, description='Resource Package Status') total_quantity: Optional[float] = Field(None, description='Total quantity') @@ -1113,7 +1431,7 @@ class LumaError(BaseModel): detail: Optional[str] = Field(None, description='The error message') -class Type11(str, Enum): +class Type12(str, Enum): generation = 'generation' @@ -1153,7 +1471,7 @@ class LumaImageRef(BaseModel): ) -class Type12(str, Enum): +class Type13(str, Enum): image = 'image' @@ -1223,6 +1541,36 @@ class LumaVideoModelOutputResolution( root: Union[LumaVideoModelOutputResolution1, str] +class MachineStats(BaseModel): + cpu_capacity: Optional[str] = Field(None, description='Total CPU on the machine.') + disk_capacity: Optional[str] = Field( + None, description='Total disk capacity on the machine.' + ) + gpu_type: Optional[str] = Field( + None, description='The GPU type. eg. NVIDIA Tesla K80' + ) + initial_cpu: Optional[str] = Field( + None, description='Initial CPU available before the job starts.' + ) + initial_disk: Optional[str] = Field( + None, description='Initial disk available before the job starts.' + ) + initial_ram: Optional[str] = Field( + None, description='Initial RAM available before the job starts.' + ) + machine_name: Optional[str] = Field(None, description='Name of the machine.') + memory_capacity: Optional[str] = Field( + None, description='Total memory on the machine.' + ) + os_version: Optional[str] = Field( + None, description='The operating system version. eg. Ubuntu Linux 20.04' + ) + pip_freeze: Optional[str] = Field(None, description='The pip freeze output') + vram_time_series: Optional[Dict[str, Any]] = Field( + None, description='Time series of VRAM usage.' + ) + + class MinimaxBaseResponse(BaseModel): status_code: int = Field( ..., @@ -1251,7 +1599,7 @@ class MinimaxFileRetrieveResponse(BaseModel): file: File -class Status5(str, Enum): +class Status6(str, Enum): Queueing = 'Queueing' Preparing = 'Preparing' Processing = 'Processing' @@ -1265,7 +1613,7 @@ class MinimaxTaskResultResponse(BaseModel): None, description='After the task status changes to Success, this field returns the file ID corresponding to the generated video.', ) - status: Status5 = Field( + status: Status6 = Field( ..., description="Task status: 'Queueing' (in queue), 'Preparing' (task is preparing), 'Processing' (generating), 'Success' (task completed successfully), or 'Fail' (task failed).", ) @@ -1326,6 +1674,22 @@ class MinimaxVideoGenerationResponse(BaseModel): ) +class Modality(str, Enum): + MODALITY_UNSPECIFIED = 'MODALITY_UNSPECIFIED' + TEXT = 'TEXT' + IMAGE = 'IMAGE' + VIDEO = 'VIDEO' + AUDIO = 'AUDIO' + DOCUMENT = 'DOCUMENT' + + +class ModalityTokenCount(BaseModel): + modality: Optional[Modality] = None + tokenCount: Optional[int] = Field( + None, description='Number of tokens for the given modality.' + ) + + class Truncation(str, Enum): disabled = 'disabled' auto = 'auto' @@ -1391,13 +1755,13 @@ class MoonvalleyTextToVideoInferenceParams(BaseModel): 0, description='Index of the conditioning frame' ) cooldown_steps: Optional[int] = Field( - None, description='Number of cooldown steps (calculated based on num_frames)' + 75, description='Number of cooldown steps (calculated based on num_frames)' ) fps: Optional[int] = Field( 24, description='Frames per second of the generated video' ) guidance_scale: Optional[float] = Field( - 12.5, description='Guidance scale for generation control' + 10, description='Guidance scale for generation control' ) height: Optional[int] = Field( 1080, description='Height of the generated video in pixels' @@ -1421,7 +1785,7 @@ class MoonvalleyTextToVideoInferenceParams(BaseModel): True, description='Whether to use timestep transformation' ) warmup_steps: Optional[int] = Field( - None, description='Number of warmup steps (calculated based on num_frames)' + 0, description='Number of warmup steps (calculated based on num_frames)' ) width: Optional[int] = Field( 1920, description='Width of the generated video in pixels' @@ -1463,10 +1827,10 @@ class MoonvalleyVideoToVideoInferenceParams(BaseModel): 0, description='Index of the conditioning frame' ) cooldown_steps: Optional[int] = Field( - None, description='Number of cooldown steps (calculated based on num_frames)' + 36, description='Number of cooldown steps (calculated based on num_frames)' ) guidance_scale: Optional[float] = Field( - 12.5, description='Guidance scale for generation control' + 15, description='Guidance scale for generation control' ) negative_prompt: Optional[str] = Field(None, description='Negative prompt text') seed: Optional[int] = Field( @@ -1486,7 +1850,7 @@ class MoonvalleyVideoToVideoInferenceParams(BaseModel): True, description='Whether to use timestep transformation' ) warmup_steps: Optional[int] = Field( - None, description='Number of warmup steps (calculated based on num_frames)' + 24, description='Number of warmup steps (calculated based on num_frames)' ) @@ -1507,6 +1871,34 @@ class MoonvalleyVideoToVideoRequest(BaseModel): ) +class NodeStatus(str, Enum): + NodeStatusActive = 'NodeStatusActive' + NodeStatusDeleted = 'NodeStatusDeleted' + NodeStatusBanned = 'NodeStatusBanned' + + +class NodeVersionIdentifier(BaseModel): + node_id: str = Field(..., description='The unique identifier of the node') + version: str = Field(..., description='The version of the node') + + +class NodeVersionStatus(str, Enum): + NodeVersionStatusActive = 'NodeVersionStatusActive' + NodeVersionStatusDeleted = 'NodeVersionStatusDeleted' + NodeVersionStatusBanned = 'NodeVersionStatusBanned' + NodeVersionStatusPending = 'NodeVersionStatusPending' + NodeVersionStatusFlagged = 'NodeVersionStatusFlagged' + + +class NodeVersionUpdateRequest(BaseModel): + changelog: Optional[str] = Field( + None, description='The changelog describing the version changes.' + ) + deprecated: Optional[bool] = Field( + None, description='Whether the version is deprecated.' + ) + + class Moderation(str, Enum): low = 'low' auto = 'auto' @@ -1723,38 +2115,57 @@ class Object(str, Enum): response = 'response' -class Status6(str, Enum): +class Status7(str, Enum): completed = 'completed' failed = 'failed' in_progress = 'in_progress' incomplete = 'incomplete' -class Type13(str, Enum): +class Type14(str, Enum): output_audio = 'output_audio' class OutputAudioContent(BaseModel): data: str = Field(..., description='Base64-encoded audio data') transcript: str = Field(..., description='Transcript of the audio') - type: Type13 = Field(..., description='The type of output content') + type: Type14 = Field(..., description='The type of output content') class Role4(str, Enum): assistant = 'assistant' -class Type14(str, Enum): +class Type15(str, Enum): message = 'message' -class Type15(str, Enum): +class Type16(str, Enum): output_text = 'output_text' class OutputTextContent(BaseModel): text: str = Field(..., description='The text content') - type: Type15 = Field(..., description='The type of output content') + type: Type16 = Field(..., description='The type of output content') + + +class PersonalAccessToken(BaseModel): + createdAt: Optional[datetime] = Field( + None, description='[Output Only]The date and time the token was created.' + ) + description: Optional[str] = Field( + None, + description="Optional. A more detailed description of the token's intended use.", + ) + id: Optional[UUID] = Field(None, description='Unique identifier for the GitCommit') + name: Optional[str] = Field( + None, + description='Required. The name of the token. Can be a simple description.', + ) + token: Optional[str] = Field( + None, + description='[Output Only]. The personal access token. Only returned during creation.', + ) class AspectRatio1(RootModel[float]): @@ -1961,7 +2372,7 @@ class PixverseVideoResponse(BaseModel): Resp: Optional[Resp1] = None -class Status7(int, Enum): +class Status8(int, Enum): integer_1 = 1 integer_5 = 5 integer_6 = 6 @@ -1980,7 +2391,7 @@ class Resp2(BaseModel): resolution_ratio: Optional[int] = None seed: Optional[int] = None size: Optional[int] = None - status: Optional[Status7] = Field( + status: Optional[Status8] = Field( None, description='Video generation status codes:\n* 1 - Generation successful\n* 5 - Generating\n* 6 - Deleted\n* 7 - Contents moderation failed\n* 8 - Generation failed\n', ) @@ -1994,6 +2405,17 @@ class PixverseVideoResultResponse(BaseModel): Resp: Optional[Resp2] = None +class PublisherStatus(str, Enum): + PublisherStatusActive = 'PublisherStatusActive' + PublisherStatusBanned = 'PublisherStatusBanned' + + +class PublisherUser(BaseModel): + email: Optional[str] = Field(None, description='The email address for this user.') + id: Optional[str] = Field(None, description='The unique id for this user.') + name: Optional[str] = Field(None, description='The name for this user.') + + class RgbItem(RootModel[int]): root: int = Field(..., ge=0, le=255) @@ -2020,13 +2442,13 @@ class ReasoningEffort(str, Enum): high = 'high' -class Status8(str, Enum): +class Status9(str, Enum): in_progress = 'in_progress' completed = 'completed' incomplete = 'incomplete' -class Type16(str, Enum): +class Type17(str, Enum): summary_text = 'summary_text' @@ -2035,12 +2457,12 @@ class SummaryItem(BaseModel): ..., description='A short summary of the reasoning used by the model when generating\nthe response.\n', ) - type: Type16 = Field( + type: Type17 = Field( ..., description='The type of the object. Always `summary_text`.\n' ) -class Type17(str, Enum): +class Type18(str, Enum): reasoning = 'reasoning' @@ -2048,16 +2470,31 @@ class ReasoningItem(BaseModel): id: str = Field( ..., description='The unique identifier of the reasoning content.\n' ) - status: Optional[Status8] = Field( + status: Optional[Status9] = Field( None, description='The status of the item. One of `in_progress`, `completed`, or\n`incomplete`. Populated when items are returned via API.\n', ) summary: List[SummaryItem] = Field(..., description='Reasoning text contents.\n') - type: Type17 = Field( + type: Type18 = Field( ..., description='The type of the object. Always `reasoning`.\n' ) +class RecraftImageColor(BaseModel): + rgb: Optional[List[int]] = None + std: Optional[List[float]] = None + weight: Optional[float] = None + + +class RecraftImageFeatures(BaseModel): + nsfw_score: Optional[float] = None + + +class RecraftImageFormat(str, Enum): + webp = 'webp' + png = 'png' + + class Controls(BaseModel): artistic_level: Optional[int] = Field( None, @@ -2111,12 +2548,143 @@ class RecraftImageGenerationResponse(BaseModel): data: List[Datum3] = Field(..., description='Array of generated image information') +class RecraftImageStyle(str, Enum): + digital_illustration = 'digital_illustration' + icon = 'icon' + realistic_image = 'realistic_image' + vector_illustration = 'vector_illustration' + + +class RecraftImageSubStyle(str, Enum): + field_2d_art_poster = '2d_art_poster' + field_3d = '3d' + field_80s = '80s' + glow = 'glow' + grain = 'grain' + hand_drawn = 'hand_drawn' + infantile_sketch = 'infantile_sketch' + kawaii = 'kawaii' + pixel_art = 'pixel_art' + psychedelic = 'psychedelic' + seamless = 'seamless' + voxel = 'voxel' + watercolor = 'watercolor' + broken_line = 'broken_line' + colored_outline = 'colored_outline' + colored_shapes = 'colored_shapes' + colored_shapes_gradient = 'colored_shapes_gradient' + doodle_fill = 'doodle_fill' + doodle_offset_fill = 'doodle_offset_fill' + offset_fill = 'offset_fill' + outline = 'outline' + outline_gradient = 'outline_gradient' + uneven_fill = 'uneven_fill' + field_70s = '70s' + cartoon = 'cartoon' + doodle_line_art = 'doodle_line_art' + engraving = 'engraving' + flat_2 = 'flat_2' + kawaii_1 = 'kawaii' + line_art = 'line_art' + linocut = 'linocut' + seamless_1 = 'seamless' + b_and_w = 'b_and_w' + enterprise = 'enterprise' + hard_flash = 'hard_flash' + hdr = 'hdr' + motion_blur = 'motion_blur' + natural_light = 'natural_light' + studio_portrait = 'studio_portrait' + line_circuit = 'line_circuit' + field_2d_art_poster_2 = '2d_art_poster_2' + engraving_color = 'engraving_color' + flat_air_art = 'flat_air_art' + hand_drawn_outline = 'hand_drawn_outline' + handmade_3d = 'handmade_3d' + stickers_drawings = 'stickers_drawings' + plastic = 'plastic' + pictogram = 'pictogram' + + +class RecraftResponseFormat(str, Enum): + url = 'url' + b64_json = 'b64_json' + + +class RecraftTextLayoutItem(BaseModel): + bbox: List[List[float]] + text: str + + +class RecraftTransformModel(str, Enum): + refm1 = 'refm1' + recraft20b = 'recraft20b' + recraftv2 = 'recraftv2' + recraftv3 = 'recraftv3' + flux1_1pro = 'flux1_1pro' + flux1dev = 'flux1dev' + imagen3 = 'imagen3' + hidream_i1_dev = 'hidream_i1_dev' + + +class RecraftUserControls(BaseModel): + artistic_level: Optional[int] = None + background_color: Optional[RecraftImageColor] = None + colors: Optional[List[RecraftImageColor]] = None + no_text: Optional[bool] = None + + +class Attention(str, Enum): + low = 'low' + medium = 'medium' + high = 'high' + + +class Project(str, Enum): + comfyui = 'comfyui' + comfyui_frontend = 'comfyui_frontend' + desktop = 'desktop' + + +class ReleaseNote(BaseModel): + attention: Attention = Field( + ..., description='The attention level for this release' + ) + content: str = Field( + ..., description='The content of the release note in markdown format' + ) + id: int = Field(..., description='Unique identifier for the release note') + project: Project = Field( + ..., description='The project this release note belongs to' + ) + published_at: datetime = Field( + ..., description='When the release note was published' + ) + version: str = Field(..., description='The version of the release') + + class RenderingSpeed(str, Enum): BALANCED = 'BALANCED' TURBO = 'TURBO' QUALITY = 'QUALITY' +class Type19(str, Enum): + response_completed = 'response.completed' + + +class Type20(str, Enum): + response_content_part_added = 'response.content_part.added' + + +class Type21(str, Enum): + response_content_part_done = 'response.content_part.done' + + +class Type22(str, Enum): + response_created = 'response.created' + + class ResponseErrorCode(str, Enum): server_error = 'server_error' rate_limit_exceeded = 'rate_limit_exceeded' @@ -2138,12 +2706,27 @@ class ResponseErrorCode(str, Enum): image_file_not_found = 'image_file_not_found' -class Type18(str, Enum): +class Type23(str, Enum): + error = 'error' + + +class ResponseErrorEvent(BaseModel): + code: str = Field(..., description='The error code.\n') + message: str = Field(..., description='The error message.\n') + param: str = Field(..., description='The error parameter.\n') + type: Type23 = Field(..., description='The type of the event. Always `error`.\n') + + +class Type24(str, Enum): + response_failed = 'response.failed' + + +class Type25(str, Enum): json_object = 'json_object' class ResponseFormatJsonObject(BaseModel): - type: Type18 = Field( + type: Type25 = Field( ..., description='The type of response format being defined. Always `json_object`.', ) @@ -2156,16 +2739,32 @@ class ResponseFormatJsonSchemaSchema(BaseModel): ) -class Type19(str, Enum): +class Type26(str, Enum): text = 'text' class ResponseFormatText(BaseModel): - type: Type19 = Field( + type: Type26 = Field( ..., description='The type of response format being defined. Always `text`.' ) +class Type27(str, Enum): + response_in_progress = 'response.in_progress' + + +class Type28(str, Enum): + response_incomplete = 'response.incomplete' + + +class Type29(str, Enum): + response_output_item_added = 'response.output_item.added' + + +class Type30(str, Enum): + response_output_item_done = 'response.output_item.done' + + class Truncation1(str, Enum): auto = 'auto' disabled = 'disabled' @@ -2200,10 +2799,6 @@ class Rodin3DCheckStatusRequest(BaseModel): ) -class Rodin3DCheckStatusResponse(BaseModel): - pass - - class Rodin3DDownloadRequest(BaseModel): task_uuid: str = Field(..., description='Task UUID') @@ -2235,6 +2830,13 @@ class RodinResourceItem(BaseModel): url: Optional[str] = Field(None, description='Download url') +class RodinStatusOptions(str, Enum): + Done = 'Done' + Failed = 'Failed' + Generating = 'Generating' + Waiting = 'Waiting' + + class RodinTierType(str, Enum): Regular = 'Regular' Sketch = 'Sketch' @@ -2325,6 +2927,7 @@ class RunwayTextToImageAspectRatioEnum(str, Enum): field_1808_768 = '1808:768' field_2112_912 = '2112:912' + class Model4(str, Enum): gen4_image = 'gen4_image' @@ -2350,6 +2953,38 @@ class RunwayTextToImageResponse(BaseModel): id: Optional[str] = Field(None, description='Task ID') +class Name(str, Enum): + content_moderation = 'content_moderation' + + +class StabilityContentModerationResponse(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new) you file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: Name = Field( + ..., + description='Our content moderation system has flagged some part of your request and subsequently denied it. You were not charged for this request. While this may at times be frustrating, it is necessary to maintain the integrity of our platform and ensure a safe experience for all users. If you would like to provide feedback, please use the [Support Form](https://kb.stability.ai/knowledge-base/kb-tickets/new).', + ) + + +class StabilityCreativity(RootModel[float]): + root: float = Field( + ..., + description='Controls the likelihood of creating additional details not heavily conditioned by the init image.', + ge=0.2, + le=0.5, + ) + + class StabilityError(BaseModel): errors: List[str] = Field( ..., @@ -2371,7 +3006,17 @@ class StabilityError(BaseModel): ) -class Status9(str, Enum): +class StabilityGenerationID(RootModel[str]): + root: str = Field( + ..., + description='The `id` of a generation, typically used for async generations, that can be used to check the status of the generation or retrieve the result.', + examples=['a6dc6c6e20acda010fe14d71f180658f2896ed9b4ec25aa99a6ff06c796987c4'], + max_length=64, + min_length=64, + ) + + +class Status10(str, Enum): in_progress = 'in-progress' @@ -2379,10 +3024,860 @@ class StabilityGetResultResponse202(BaseModel): id: Optional[str] = Field( None, description='The ID of the generation result.', examples=[1234567890] ) - status: Optional[Status9] = None + status: Optional[Status10] = None -class Type20(str, Enum): +class AspectRatio3(str, Enum): + field_21_9 = '21:9' + field_16_9 = '16:9' + field_3_2 = '3:2' + field_5_4 = '5:4' + field_1_1 = '1:1' + field_4_5 = '4:5' + field_2_3 = '2:3' + field_9_16 = '9:16' + field_9_21 = '9:21' + + +class Mode(str, Enum): + text_to_image = 'text-to-image' + image_to_image = 'image-to-image' + + +class Model5(str, Enum): + sd3_5_large = 'sd3.5-large' + sd3_5_large_turbo = 'sd3.5-large-turbo' + sd3_5_medium = 'sd3.5-medium' + + +class OutputFormat3(str, Enum): + png = 'png' + jpeg = 'jpeg' + + +class StylePreset(str, Enum): + enhance = 'enhance' + anime = 'anime' + photographic = 'photographic' + digital_art = 'digital-art' + comic_book = 'comic-book' + fantasy_art = 'fantasy-art' + line_art = 'line-art' + analog_film = 'analog-film' + neon_punk = 'neon-punk' + isometric = 'isometric' + low_poly = 'low-poly' + origami = 'origami' + modeling_compound = 'modeling-compound' + cinematic = 'cinematic' + field_3d_model = '3d-model' + pixel_art = 'pixel-art' + tile_texture = 'tile-texture' + + +class StabilityImageGenerationSD3Request(BaseModel): + aspect_ratio: Optional[AspectRatio3] = Field( + '1:1', + description='Controls the aspect ratio of the generated image. Defaults to 1:1.\n\n> **Important:** This parameter is only valid for **text-to-image** requests.', + ) + cfg_scale: Optional[float] = Field( + None, + description='How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt). The _Large_ and _Medium_ models use a default of `4`. The _Turbo_ model uses a default of `1`.', + ge=1.0, + le=10.0, + ) + image: Optional[StrictBytes] = Field( + None, + description='The image to use as the starting point for the generation.\n\nSupported formats:\n\n\n\n - jpeg\n - png\n - webp\n\nSupported dimensions:\n\n\n\n - Every side must be at least 64 pixels\n\n> **Important:** This parameter is only valid for **image-to-image** requests.', + ) + mode: Optional[Mode] = Field( + 'text-to-image', + description='Controls whether this is a text-to-image or image-to-image generation, which affects which parameters are required:\n- **text-to-image** requires only the `prompt` parameter\n- **image-to-image** requires the `prompt`, `image`, and `strength` parameters', + title='GenerationMode', + ) + model: Optional[Model5] = Field( + 'sd3.5-large', + description='The model to use for generation.\n\n- `sd3.5-large` requires 6.5 credits per generation\n- `sd3.5-large-turbo` requires 4 credits per generation\n- `sd3.5-medium` requires 3.5 credits per generation\n- As of the April 17, 2025, `sd3-large`, `sd3-large-turbo` and `sd3-medium`\n\n\n\n are re-routed to their `sd3.5-[model version]` equivalent, at the same price.', + ) + negative_prompt: Optional[str] = Field( + None, + description='Keywords of what you **do not** wish to see in the output image.\nThis is an advanced feature.', + max_length=10000, + ) + output_format: Optional[OutputFormat3] = Field( + 'png', description='Dictates the `content-type` of the generated image.' + ) + prompt: str = Field( + ..., + description='What you wish to see in the output image. A strong, descriptive prompt that clearly defines\nelements, colors, and subjects will lead to better results.', + max_length=10000, + min_length=1, + ) + seed: Optional[float] = Field( + 0, + description="A specific value that is used to guide the 'randomness' of the generation. (Omit this parameter or pass `0` to use a random seed.)", + ge=0.0, + le=4294967294.0, + ) + strength: Optional[float] = Field( + None, + description='Sometimes referred to as _denoising_, this parameter controls how much influence the\n`image` parameter has on the generated image. A value of 0 would yield an image that\nis identical to the input. A value of 1 would be as if you passed in no image at all.\n\n> **Important:** This parameter is only valid for **image-to-image** requests.', + ge=0.0, + le=1.0, + ) + style_preset: Optional[StylePreset] = Field( + None, description='Guides the image model towards a particular style.' + ) + + +class FinishReason(str, Enum): + SUCCESS = 'SUCCESS' + CONTENT_FILTERED = 'CONTENT_FILTERED' + + +class StabilityImageGenrationSD3Response200(BaseModel): + finish_reason: FinishReason = Field( + ..., + description='The reason the generation finished.\n\n- `SUCCESS` = successful generation.\n- `CONTENT_FILTERED` = successful generation, however the output violated our content moderation\npolicy and has been blurred as a result.', + examples=['SUCCESS'], + ) + image: str = Field( + ..., + description='The generated image, encoded to base64.', + examples=['AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1...'], + ) + seed: Optional[float] = Field( + 0, + description='The seed used as random noise for this generation.', + examples=[343940597], + ge=0.0, + le=4294967294.0, + ) + + +class StabilityImageGenrationSD3Response400(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationSD3Response413(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationSD3Response422(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationSD3Response429(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationSD3Response500(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class OutputFormat4(str, Enum): + jpeg = 'jpeg' + png = 'png' + webp = 'webp' + + +class StabilityImageGenrationUpscaleConservativeRequest(BaseModel): + creativity: Optional[StabilityCreativity] = Field( + default_factory=lambda: StabilityCreativity.model_validate(0.35) + ) + image: StrictBytes = Field( + ..., + description='The image you wish to upscale.\n\nSupported Formats:\n- jpeg\n- png\n- webp\n\nValidation Rules:\n- Every side must be at least 64 pixels\n- Total pixel count must be between 4,096 and 9,437,184 pixels\n- The aspect ratio must be between 1:2.5 and 2.5:1', + examples=['./some/image.png'], + ) + negative_prompt: Optional[str] = Field( + None, + description='A blurb of text describing what you **do not** wish to see in the output image.\nThis is an advanced feature.', + max_length=10000, + ) + output_format: Optional[OutputFormat4] = Field( + 'png', description='Dictates the `content-type` of the generated image.' + ) + prompt: str = Field( + ..., + description="What you wish to see in the output image. A strong, descriptive prompt that clearly defines\nelements, colors, and subjects will lead to better results.\n\nTo control the weight of a given word use the format `(word:weight)`,\nwhere `word` is the word you'd like to control the weight of and `weight`\nis a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`\nwould convey a sky that was blue and green, but more green than blue.", + max_length=10000, + min_length=1, + ) + seed: Optional[float] = Field( + 0, + description="A specific value that is used to guide the 'randomness' of the generation. (Omit this parameter or pass `0` to use a random seed.)", + ge=0.0, + le=4294967294.0, + ) + + +class StabilityImageGenrationUpscaleConservativeResponse200(BaseModel): + finish_reason: FinishReason = Field( + ..., + description='The reason the generation finished.\n\n- `SUCCESS` = successful generation.\n- `CONTENT_FILTERED` = successful generation, however the output violated our content moderation\npolicy and has been blurred as a result.', + examples=['SUCCESS'], + ) + image: str = Field( + ..., + description='The generated image, encoded to base64.', + examples=['AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1...'], + ) + seed: Optional[float] = Field( + 0, + description='The seed used as random noise for this generation.', + examples=[343940597], + ge=0.0, + le=4294967294.0, + ) + + +class StabilityImageGenrationUpscaleConservativeResponse400(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleConservativeResponse413(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleConservativeResponse422(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleConservativeResponse429(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleConservativeResponse500(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleCreativeRequest(BaseModel): + creativity: Optional[float] = Field( + 0.3, + description='Indicates how creative the model should be when upscaling an image.\nHigher values will result in more details being added to the image during upscaling.', + ge=0.1, + le=0.5, + ) + image: StrictBytes = Field( + ..., + description='The image you wish to upscale.\n\nSupported Formats:\n- jpeg\n- png\n- webp\n\nValidation Rules:\n- Every side must be at least 64 pixels\n- Total pixel count must be between 4,096 and 1,048,576 pixels', + examples=['./some/image.png'], + ) + negative_prompt: Optional[str] = Field( + None, + description='A blurb of text describing what you **do not** wish to see in the output image.\nThis is an advanced feature.', + max_length=10000, + ) + output_format: Optional[OutputFormat4] = Field( + 'png', description='Dictates the `content-type` of the generated image.' + ) + prompt: str = Field( + ..., + description="What you wish to see in the output image. A strong, descriptive prompt that clearly defines\nelements, colors, and subjects will lead to better results.\n\nTo control the weight of a given word use the format `(word:weight)`,\nwhere `word` is the word you'd like to control the weight of and `weight`\nis a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`\nwould convey a sky that was blue and green, but more green than blue.", + max_length=10000, + min_length=1, + ) + seed: Optional[float] = Field( + 0, + description="A specific value that is used to guide the 'randomness' of the generation. (Omit this parameter or pass `0` to use a random seed.)", + ge=0.0, + le=4294967294.0, + ) + style_preset: Optional[StylePreset] = Field( + None, description='Guides the image model towards a particular style.' + ) + + +class StabilityImageGenrationUpscaleCreativeResponse200(BaseModel): + id: StabilityGenerationID + + +class StabilityImageGenrationUpscaleCreativeResponse400(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleCreativeResponse413(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleCreativeResponse422(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleCreativeResponse429(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleCreativeResponse500(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleFastRequest(BaseModel): + image: StrictBytes = Field( + ..., + description='The image you wish to upscale.\n\nSupported Formats:\n- jpeg\n- png\n- webp\n\nValidation Rules:\n- Width must be between 32 and 1,536 pixels\n- Height must be between 32 and 1,536 pixels\n- Total pixel count must be between 1,024 and 1,048,576 pixels', + examples=['./some/image.png'], + ) + output_format: Optional[OutputFormat4] = Field( + 'png', description='Dictates the `content-type` of the generated image.' + ) + + +class StabilityImageGenrationUpscaleFastResponse200(BaseModel): + finish_reason: FinishReason = Field( + ..., + description='The reason the generation finished.\n\n- `SUCCESS` = successful generation.\n- `CONTENT_FILTERED` = successful generation, however the output violated our content moderation\npolicy and has been blurred as a result.', + examples=['SUCCESS'], + ) + image: str = Field( + ..., + description='The generated image, encoded to base64.', + examples=['AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1...'], + ) + seed: Optional[float] = Field( + 0, + description='The seed used as random noise for this generation.', + examples=[343940597], + ge=0.0, + le=4294967294.0, + ) + + +class StabilityImageGenrationUpscaleFastResponse400(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleFastResponse413(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleFastResponse422(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleFastResponse429(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityImageGenrationUpscaleFastResponse500(BaseModel): + errors: List[str] = Field( + ..., + description='One or more error messages indicating what went wrong.', + examples=[['some-field: is required']], + min_length=1, + ) + id: str = Field( + ..., + description='A unique identifier associated with this error. Please include this in any [support tickets](https://kb.stability.ai/knowledge-base/kb-tickets/new)\nyou file, as it will greatly assist us in diagnosing the root cause of the problem.', + examples=['a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4'], + min_length=1, + ) + name: str = Field( + ..., + description='Short-hand name for an error, useful for discriminating between errors with the same status code.', + examples=['bad_request'], + min_length=1, + ) + + +class StabilityStabilityClientID(RootModel[str]): + root: str = Field( + ..., + description='The name of your application, used to help us communicate app-specific debugging or moderation issues to you.', + examples=['my-awesome-app'], + max_length=256, + ) + + +class StabilityStabilityClientUserID(RootModel[str]): + root: str = Field( + ..., + description='A unique identifier for your end user. Used to help us communicate user-specific debugging or moderation issues to you. Feel free to obfuscate this value to protect user privacy.', + examples=['DiscordUser#9999'], + max_length=256, + ) + + +class StabilityStabilityClientVersion(RootModel[str]): + root: str = Field( + ..., + description='The version of your application, used to help us communicate version-specific debugging or moderation issues to you.', + examples=['1.2.1'], + max_length=256, + ) + + +class StorageFile(BaseModel): + file_path: Optional[str] = Field(None, description='Path to the file in storage') + id: Optional[UUID] = Field( + None, description='Unique identifier for the storage file' + ) + public_url: Optional[str] = Field(None, description='Public URL') + + +class StripeAddress(BaseModel): + city: Optional[str] = None + country: Optional[str] = None + line1: Optional[str] = None + line2: Optional[str] = None + postal_code: Optional[str] = None + state: Optional[str] = None + + +class StripeAmountDetails(BaseModel): + tip: Optional[Dict[str, Any]] = None + + +class StripeBillingDetails(BaseModel): + address: Optional[StripeAddress] = None + email: Optional[str] = None + name: Optional[str] = None + phone: Optional[str] = None + tax_id: Optional[Any] = None + + +class Checks(BaseModel): + address_line1_check: Optional[Any] = None + address_postal_code_check: Optional[Any] = None + cvc_check: Optional[str] = None + + +class ExtendedAuthorization(BaseModel): + status: Optional[str] = None + + +class IncrementalAuthorization(BaseModel): + status: Optional[str] = None + + +class Multicapture(BaseModel): + status: Optional[str] = None + + +class NetworkToken(BaseModel): + used: Optional[bool] = None + + +class Overcapture(BaseModel): + maximum_amount_capturable: Optional[int] = None + status: Optional[str] = None + + +class StripeCardDetails(BaseModel): + amount_authorized: Optional[int] = None + authorization_code: Optional[Any] = None + brand: Optional[str] = None + checks: Optional[Checks] = None + country: Optional[str] = None + exp_month: Optional[int] = None + exp_year: Optional[int] = None + extended_authorization: Optional[ExtendedAuthorization] = None + fingerprint: Optional[str] = None + funding: Optional[str] = None + incremental_authorization: Optional[IncrementalAuthorization] = None + installments: Optional[Any] = None + last4: Optional[str] = None + mandate: Optional[Any] = None + multicapture: Optional[Multicapture] = None + network: Optional[str] = None + network_token: Optional[NetworkToken] = None + network_transaction_id: Optional[str] = None + overcapture: Optional[Overcapture] = None + regulated_status: Optional[str] = None + three_d_secure: Optional[Any] = None + wallet: Optional[Any] = None + + +class Object1(str, Enum): + charge = 'charge' + + +class Object2(str, Enum): + event = 'event' + + +class Type31(str, Enum): + payment_intent_succeeded = 'payment_intent.succeeded' + + +class StripeOutcome(BaseModel): + advice_code: Optional[Any] = None + network_advice_code: Optional[Any] = None + network_decline_code: Optional[Any] = None + network_status: Optional[str] = None + reason: Optional[Any] = None + risk_level: Optional[str] = None + risk_score: Optional[int] = None + seller_message: Optional[str] = None + type: Optional[str] = None + + +class Object3(str, Enum): + payment_intent = 'payment_intent' + + +class StripePaymentMethodDetails(BaseModel): + card: Optional[StripeCardDetails] = None + type: Optional[str] = None + + +class Card(BaseModel): + installments: Optional[Any] = None + mandate_options: Optional[Any] = None + network: Optional[Any] = None + request_three_d_secure: Optional[str] = None + + +class StripePaymentMethodOptions(BaseModel): + card: Optional[Card] = None + + +class StripeRefundList(BaseModel): + data: Optional[List[Dict[str, Any]]] = None + has_more: Optional[bool] = None + object: Optional[str] = None + total_count: Optional[int] = None + url: Optional[str] = None + + +class StripeRequestInfo(BaseModel): + id: Optional[str] = None + idempotency_key: Optional[str] = None + + +class StripeShipping(BaseModel): + address: Optional[StripeAddress] = None + carrier: Optional[str] = None + name: Optional[str] = None + phone: Optional[str] = None + tracking_number: Optional[str] = None + + +class Type32(str, Enum): json_schema = 'json_schema' @@ -2400,19 +3895,19 @@ class TextResponseFormatJsonSchema(BaseModel): False, description='Whether to enable strict schema adherence when generating the output.\nIf set to true, the model will always follow the exact schema defined\nin the `schema` field. Only a subset of JSON Schema is supported when\n`strict` is `true`. To learn more, read the [Structured Outputs\nguide](/docs/guides/structured-outputs).\n', ) - type: Type20 = Field( + type: Type32 = Field( ..., description='The type of response format being defined. Always `json_schema`.', ) -class Type21(str, Enum): +class Type33(str, Enum): function = 'function' class ToolChoiceFunction(BaseModel): name: str = Field(..., description='The name of the function to call.') - type: Type21 = Field( + type: Type33 = Field( ..., description='For function calling, the type is always `function`.' ) @@ -2423,7 +3918,7 @@ class ToolChoiceOptions(str, Enum): required = 'required' -class Type22(str, Enum): +class Type34(str, Enum): file_search = 'file_search' web_search_preview = 'web_search_preview' computer_use_preview = 'computer_use_preview' @@ -2431,7 +3926,7 @@ class Type22(str, Enum): class ToolChoiceTypes(BaseModel): - type: Type22 = Field( + type: Type34 = Field( ..., description='The type of hosted tool the model should to use. Learn more about\n[built-in tools](/docs/guides/tools).\n\nAllowed values are:\n- `file_search`\n- `web_search_preview`\n- `computer_use_preview`\n', ) @@ -2499,9 +3994,9 @@ class TripoModelStyle(str, Enum): class TripoModelVersion(str, Enum): - V2_5 = 'v2.5-20250123' - V2_0 = 'v2.0-20240919' - V1_4 = 'v1.4-20240625' + v2_5_20250123 = 'v2.5-20250123' + v2_0_20240919 = 'v2.0-20240919' + v1_4_20240625 = 'v1.4-20240625' class TripoMultiviewMode(str, Enum): @@ -2547,13 +4042,13 @@ class Code1(int, Enum): integer_0 = 0 -class Data8(BaseModel): +class Data9(BaseModel): task_id: str = Field(..., description='used for getTask') class TripoSuccessTask(BaseModel): code: Code1 - data: Data8 + data: Data9 class Topology(str, Enum): @@ -2570,7 +4065,7 @@ class Output(BaseModel): topology: Optional[Topology] = None -class Status10(str, Enum): +class Status11(str, Enum): queued = 'queued' running = 'running' success = 'success' @@ -2586,7 +4081,7 @@ class TripoTask(BaseModel): input: Dict[str, Any] output: Output progress: int = Field(..., ge=0, le=100) - status: Status10 + status: Status11 task_id: str type: str @@ -2650,6 +4145,18 @@ class TripoTypeTextureModel(str, Enum): texture_model = 'texture_model' +class User(BaseModel): + email: Optional[str] = Field(None, description='The email address for this user.') + id: Optional[str] = Field(None, description='The unique id for this user.') + isAdmin: Optional[bool] = Field( + None, description='Indicates if the user has admin privileges.' + ) + isApproved: Optional[bool] = Field( + None, description='Indicates if the user is approved.' + ) + name: Optional[str] = Field(None, description='The name for this user.') + + class Veo2GenVidPollRequest(BaseModel): operationName: str = Field( ..., @@ -2660,7 +4167,7 @@ class Veo2GenVidPollRequest(BaseModel): ) -class Error(BaseModel): +class Error1(BaseModel): code: Optional[int] = Field(None, description='Error code') message: Optional[str] = Field(None, description='Error message') @@ -2692,7 +4199,7 @@ class Response(BaseModel): class Veo2GenVidPollResponse(BaseModel): done: Optional[bool] = None - error: Optional[Error] = Field( + error: Optional[Error1] = Field( None, description='Error details if operation failed' ) name: Optional[str] = None @@ -2753,13 +4260,102 @@ class Veo2GenVidResponse(BaseModel): ) +class VeoGenVidPollRequest(BaseModel): + operationName: str = Field( + ..., + description='Full operation name (from predict response)', + examples=[ + 'projects/PROJECT_ID/locations/us-central1/publishers/google/models/MODEL_ID/operations/OPERATION_ID' + ], + ) + + +class Response1(BaseModel): + field_type: Optional[str] = Field( + None, + alias='@type', + examples=[ + 'type.googleapis.com/cloud.ai.large_models.vision.GenerateVideoResponse' + ], + ) + raiMediaFilteredCount: Optional[int] = Field( + None, description='Count of media filtered by responsible AI policies' + ) + raiMediaFilteredReasons: Optional[List[str]] = Field( + None, description='Reasons why media was filtered by responsible AI policies' + ) + videos: Optional[List[Video]] = None + + +class VeoGenVidPollResponse(BaseModel): + done: Optional[bool] = None + error: Optional[Error1] = Field( + None, description='Error details if operation failed' + ) + name: Optional[str] = None + response: Optional[Response1] = Field( + None, description='The actual prediction response if done is true' + ) + + +class Image2(BaseModel): + bytesBase64Encoded: str + gcsUri: Optional[str] = None + mimeType: Optional[str] = None + + +class Image3(BaseModel): + bytesBase64Encoded: Optional[str] = None + gcsUri: str + mimeType: Optional[str] = None + + +class Instance1(BaseModel): + image: Optional[Union[Image2, Image3]] = Field( + None, description='Optional image to guide video generation' + ) + prompt: str = Field(..., description='Text description of the video') + + +class Parameters1(BaseModel): + aspectRatio: Optional[str] = Field(None, examples=['16:9']) + durationSeconds: Optional[int] = None + enhancePrompt: Optional[bool] = None + generateAudio: Optional[bool] = Field( + None, + description='Generate audio for the video. Only supported by veo 3 models.', + ) + negativePrompt: Optional[str] = None + personGeneration: Optional[PersonGeneration1] = None + sampleCount: Optional[int] = None + seed: Optional[int] = None + storageUri: Optional[str] = Field( + None, description='Optional Cloud Storage URI to upload the video' + ) + + +class VeoGenVidRequest(BaseModel): + instances: Optional[List[Instance1]] = None + parameters: Optional[Parameters1] = None + + +class VeoGenVidResponse(BaseModel): + name: str = Field( + ..., + description='Operation resource name', + examples=[ + 'projects/PROJECT_ID/locations/us-central1/publishers/google/models/MODEL_ID/operations/a1b07c8e-7b5a-4aba-bb34-3e1ccb8afcc8' + ], + ) + + class SearchContextSize(str, Enum): low = 'low' medium = 'medium' high = 'high' -class Type23(str, Enum): +class Type35(str, Enum): web_search_preview = 'web_search_preview' web_search_preview_2025_03_11 = 'web_search_preview_2025_03_11' @@ -2775,30 +4371,348 @@ class WebSearchPreviewTool(BaseModel): ) -class Status11(str, Enum): +class Status12(str, Enum): in_progress = 'in_progress' searching = 'searching' completed = 'completed' failed = 'failed' -class Type24(str, Enum): +class Type36(str, Enum): web_search_call = 'web_search_call' class WebSearchToolCall(BaseModel): id: str = Field(..., description='The unique ID of the web search tool call.\n') - status: Status11 = Field( + status: Status12 = Field( ..., description='The status of the web search tool call.\n' ) - type: Type24 = Field( + type: Type36 = Field( ..., description='The type of the web search tool call. Always `web_search_call`.\n', ) -class CreateModelResponseProperties(ModelResponseProperties): - pass +class WorkflowRunStatus(str, Enum): + WorkflowRunStatusStarted = 'WorkflowRunStatusStarted' + WorkflowRunStatusFailed = 'WorkflowRunStatusFailed' + WorkflowRunStatusCompleted = 'WorkflowRunStatusCompleted' + + +class ActionJobResult(BaseModel): + action_job_id: Optional[str] = Field( + None, description='Identifier of the job this result belongs to' + ) + action_run_id: Optional[str] = Field( + None, description='Identifier of the run this result belongs to' + ) + author: Optional[str] = Field(None, description='The author of the commit') + avg_vram: Optional[int] = Field( + None, description='The average VRAM used by the job' + ) + branch_name: Optional[str] = Field( + None, description='Name of the relevant git branch' + ) + comfy_run_flags: Optional[str] = Field( + None, description='The comfy run flags. E.g. `--low-vram`' + ) + commit_hash: Optional[str] = Field(None, description='The hash of the commit') + commit_id: Optional[str] = Field(None, description='The ID of the commit') + commit_message: Optional[str] = Field(None, description='The message of the commit') + commit_time: Optional[int] = Field( + None, description='The Unix timestamp when the commit was made' + ) + cuda_version: Optional[str] = Field(None, description='CUDA version used') + end_time: Optional[int] = Field( + None, description='The end time of the job as a Unix timestamp.' + ) + git_repo: Optional[str] = Field(None, description='The repository name') + id: Optional[UUID] = Field(None, description='Unique identifier for the job result') + job_trigger_user: Optional[str] = Field( + None, description='The user who triggered the job.' + ) + machine_stats: Optional[MachineStats] = None + operating_system: Optional[str] = Field(None, description='Operating system used') + peak_vram: Optional[int] = Field(None, description='The peak VRAM used by the job') + pr_number: Optional[str] = Field(None, description='The pull request number') + python_version: Optional[str] = Field(None, description='PyTorch version used') + pytorch_version: Optional[str] = Field(None, description='PyTorch version used') + start_time: Optional[int] = Field( + None, description='The start time of the job as a Unix timestamp.' + ) + status: Optional[WorkflowRunStatus] = None + storage_file: Optional[StorageFile] = None + workflow_name: Optional[str] = Field(None, description='Name of the workflow') + + +class BFLCannyInputs(BaseModel): + canny_high_threshold: Optional[CannyHighThreshold] = Field( + default_factory=lambda: CannyHighThreshold.model_validate(200), + description='High threshold for Canny edge detection', + title='Canny High Threshold', + ) + canny_low_threshold: Optional[CannyLowThreshold] = Field( + default_factory=lambda: CannyLowThreshold.model_validate(50), + description='Low threshold for Canny edge detection', + title='Canny Low Threshold', + ) + control_image: Optional[str] = Field( + None, + description='Base64 encoded image to use as control input if no preprocessed image is provided', + title='Control Image', + ) + guidance: Optional[Guidance] = Field( + default_factory=lambda: Guidance.model_validate(30), + description='Guidance strength for the image generation process', + title='Guidance', + ) + output_format: Optional[BFLOutputFormat] = Field( + 'jpeg', + description="Output format for the generated image. Can be 'jpeg' or 'png'.", + ) + preprocessed_image: Optional[str] = Field( + None, + description='Optional pre-processed image that will bypass the control preprocessing step', + title='Preprocessed Image', + ) + prompt: str = Field( + ..., + description='Text prompt for image generation', + examples=['ein fantastisches bild'], + title='Prompt', + ) + prompt_upsampling: Optional[bool] = Field( + False, + description='Whether to perform upsampling on the prompt', + title='Prompt Upsampling', + ) + safety_tolerance: Optional[int] = Field( + 2, + description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.', + ge=0, + le=6, + title='Safety Tolerance', + ) + seed: Optional[int] = Field( + None, + description='Optional seed for reproducibility', + examples=[42], + title='Seed', + ) + steps: Optional[Steps] = Field( + default_factory=lambda: Steps.model_validate(50), + description='Number of steps for the image generation process', + title='Steps', + ) + webhook_secret: Optional[str] = Field( + None, + description='Optional secret for webhook signature verification', + title='Webhook Secret', + ) + webhook_url: Optional[WebhookUrl] = Field( + None, description='URL to receive webhook notifications', title='Webhook Url' + ) + + +class BFLDepthInputs(BaseModel): + control_image: Optional[str] = Field( + None, + description='Base64 encoded image to use as control input', + title='Control Image', + ) + guidance: Optional[Guidance] = Field( + default_factory=lambda: Guidance.model_validate(15), + description='Guidance strength for the image generation process', + title='Guidance', + ) + output_format: Optional[BFLOutputFormat] = Field( + 'jpeg', + description="Output format for the generated image. Can be 'jpeg' or 'png'.", + ) + preprocessed_image: Optional[str] = Field( + None, + description='Optional pre-processed image that will bypass the control preprocessing step', + title='Preprocessed Image', + ) + prompt: str = Field( + ..., + description='Text prompt for image generation', + examples=['ein fantastisches bild'], + title='Prompt', + ) + prompt_upsampling: Optional[bool] = Field( + False, + description='Whether to perform upsampling on the prompt', + title='Prompt Upsampling', + ) + safety_tolerance: Optional[int] = Field( + 2, + description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.', + ge=0, + le=6, + title='Safety Tolerance', + ) + seed: Optional[int] = Field( + None, + description='Optional seed for reproducibility', + examples=[42], + title='Seed', + ) + steps: Optional[Steps] = Field( + default_factory=lambda: Steps.model_validate(50), + description='Number of steps for the image generation process', + title='Steps', + ) + webhook_secret: Optional[str] = Field( + None, + description='Optional secret for webhook signature verification', + title='Webhook Secret', + ) + webhook_url: Optional[WebhookUrl] = Field( + None, description='URL to receive webhook notifications', title='Webhook Url' + ) + + +class BFLFluxProExpandInputs(BaseModel): + bottom: Optional[Bottom] = Field( + 0, + description='Number of pixels to expand at the bottom of the image', + title='Bottom', + ) + guidance: Optional[Guidance2] = Field( + default_factory=lambda: Guidance2.model_validate(60), + description='Guidance strength for the image generation process', + title='Guidance', + ) + image: str = Field( + ..., + description='A Base64-encoded string representing the image you wish to expand.', + title='Image', + ) + left: Optional[Left] = Field( + 0, + description='Number of pixels to expand on the left side of the image', + title='Left', + ) + output_format: Optional[BFLOutputFormat] = Field( + 'jpeg', + description="Output format for the generated image. Can be 'jpeg' or 'png'.", + ) + prompt: Optional[str] = Field( + '', + description='The description of the changes you want to make. This text guides the expansion process, allowing you to specify features, styles, or modifications for the expanded areas.', + examples=['ein fantastisches bild'], + title='Prompt', + ) + prompt_upsampling: Optional[bool] = Field( + False, + description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation', + title='Prompt Upsampling', + ) + right: Optional[Right] = Field( + 0, + description='Number of pixels to expand on the right side of the image', + title='Right', + ) + safety_tolerance: Optional[int] = Field( + 2, + description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.', + examples=[2], + ge=0, + le=6, + title='Safety Tolerance', + ) + seed: Optional[int] = Field( + None, description='Optional seed for reproducibility', title='Seed' + ) + steps: Optional[Steps2] = Field( + default_factory=lambda: Steps2.model_validate(50), + description='Number of steps for the image generation process', + examples=[50], + title='Steps', + ) + top: Optional[Top] = Field( + 0, description='Number of pixels to expand at the top of the image', title='Top' + ) + webhook_secret: Optional[str] = Field( + None, + description='Optional secret for webhook signature verification', + title='Webhook Secret', + ) + webhook_url: Optional[WebhookUrl] = Field( + None, description='URL to receive webhook notifications', title='Webhook Url' + ) + + +class BFLFluxProFillInputs(BaseModel): + guidance: Optional[Guidance2] = Field( + default_factory=lambda: Guidance2.model_validate(60), + description='Guidance strength for the image generation process', + title='Guidance', + ) + image: str = Field( + ..., + description='A Base64-encoded string representing the image you wish to modify. Can contain alpha mask if desired.', + title='Image', + ) + mask: Optional[str] = Field( + None, + description='A Base64-encoded string representing a mask for the areas you want to modify in the image. The mask should be the same dimensions as the image and in black and white. Black areas (0%) indicate no modification, while white areas (100%) specify areas for inpainting. Optional if you provide an alpha mask in the original image. Validation: The endpoint verifies that the dimensions of the mask match the original image.', + title='Mask', + ) + output_format: Optional[BFLOutputFormat] = Field( + 'jpeg', + description="Output format for the generated image. Can be 'jpeg' or 'png'.", + ) + prompt: Optional[str] = Field( + '', + description='The description of the changes you want to make. This text guides the inpainting process, allowing you to specify features, styles, or modifications for the masked area.', + examples=['ein fantastisches bild'], + title='Prompt', + ) + prompt_upsampling: Optional[bool] = Field( + False, + description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation', + title='Prompt Upsampling', + ) + safety_tolerance: Optional[int] = Field( + 2, + description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.', + examples=[2], + ge=0, + le=6, + title='Safety Tolerance', + ) + seed: Optional[int] = Field( + None, description='Optional seed for reproducibility', title='Seed' + ) + steps: Optional[Steps2] = Field( + default_factory=lambda: Steps2.model_validate(50), + description='Number of steps for the image generation process', + examples=[50], + title='Steps', + ) + webhook_secret: Optional[str] = Field( + None, + description='Optional secret for webhook signature verification', + title='Webhook Secret', + ) + webhook_url: Optional[WebhookUrl] = Field( + None, description='URL to receive webhook notifications', title='Webhook Url' + ) + + +class BFLHTTPValidationError(BaseModel): + detail: Optional[List[BFLValidationError]] = Field(None, title='Detail') + + +class BulkNodeVersionsRequest(BaseModel): + node_versions: List[NodeVersionIdentifier] = Field( + ..., description='List of node ID and version pairs to retrieve' + ) + + +CreateModelResponseProperties = ModelResponseProperties class GeminiInlineData(BaseModel): @@ -2841,6 +4755,125 @@ class GeminiSystemInstructionContent(BaseModel): ) +class GeminiUsageMetadata(BaseModel): + cachedContentTokenCount: Optional[int] = Field( + None, + description='Output only. Number of tokens in the cached part in the input (the cached content).', + ) + candidatesTokenCount: Optional[int] = Field( + None, description='Number of tokens in the response(s).' + ) + candidatesTokensDetails: Optional[List[ModalityTokenCount]] = Field( + None, description='Breakdown of candidate tokens by modality.' + ) + promptTokenCount: Optional[int] = Field( + None, + description='Number of tokens in the request. When cachedContent is set, this is still the total effective prompt size meaning this includes the number of tokens in the cached content.', + ) + promptTokensDetails: Optional[List[ModalityTokenCount]] = Field( + None, description='Breakdown of prompt tokens by modality.' + ) + thoughtsTokenCount: Optional[int] = Field( + None, description='Number of tokens present in thoughts output.' + ) + toolUsePromptTokenCount: Optional[int] = Field( + None, description='Number of tokens present in tool-use prompt(s).' + ) + + +class GithubInstallation(BaseModel): + access_tokens_url: str = Field(..., description='The API URL for access tokens') + account: GithubUser + app_id: int = Field(..., description='The GitHub App ID') + created_at: datetime = Field(..., description='When the installation was created') + events: List[str] = Field( + ..., description='The events the installation subscribes to' + ) + html_url: str = Field(..., description='The HTML URL of the installation') + id: int = Field(..., description='The installation ID') + permissions: Dict[str, Any] = Field(..., description='The installation permissions') + repositories_url: str = Field(..., description='The API URL for repositories') + repository_selection: RepositorySelection = Field( + ..., description='Repository selection for the installation' + ) + single_file_name: Optional[str] = Field( + None, description='The single file name if applicable' + ) + target_id: int = Field(..., description='The target ID') + target_type: str = Field(..., description='The target type') + updated_at: datetime = Field( + ..., description='When the installation was last updated' + ) + + +class GithubReleaseAsset(BaseModel): + browser_download_url: str = Field(..., description='The browser download URL') + content_type: str = Field(..., description='The content type of the asset') + created_at: datetime = Field(..., description='When the asset was created') + download_count: int = Field(..., description='The number of downloads') + id: int = Field(..., description='The asset ID') + label: Optional[str] = Field(None, description='The label of the asset') + name: str = Field(..., description='The name of the asset') + node_id: str = Field(..., description='The asset node ID') + size: int = Field(..., description='The size of the asset in bytes') + state: State = Field(..., description='The state of the asset') + updated_at: datetime = Field(..., description='When the asset was last updated') + uploader: GithubUser + + +class Release(BaseModel): + assets: List[GithubReleaseAsset] = Field(..., description='Array of release assets') + assets_url: Optional[str] = Field(None, description='The URL to the release assets') + author: GithubUser + body: Optional[str] = Field(None, description='The release notes/body') + created_at: datetime = Field(..., description='When the release was created') + draft: bool = Field(..., description='Whether the release is a draft') + html_url: str = Field(..., description='The HTML URL of the release') + id: int = Field(..., description='The ID of the release') + name: Optional[str] = Field(None, description='The name of the release') + node_id: str = Field(..., description='The node ID of the release') + prerelease: bool = Field(..., description='Whether the release is a prerelease') + published_at: Optional[datetime] = Field( + None, description='When the release was published' + ) + tag_name: str = Field(..., description='The tag name of the release') + tarball_url: str = Field(..., description='URL to the tarball') + target_commitish: str = Field( + ..., description='The branch or commit the release was created from' + ) + upload_url: Optional[str] = Field( + None, description='The URL to upload release assets' + ) + url: str = Field(..., description='The API URL of the release') + zipball_url: str = Field(..., description='URL to the zipball') + + +class GithubRepository(BaseModel): + clone_url: str = Field(..., description='The clone URL of the repository') + created_at: datetime = Field(..., description='When the repository was created') + default_branch: str = Field(..., description='The default branch of the repository') + description: Optional[str] = Field(None, description='The repository description') + fork: bool = Field(..., description='Whether the repository is a fork') + full_name: str = Field( + ..., description='The full name of the repository (owner/repo)' + ) + git_url: str = Field(..., description='The git URL of the repository') + html_url: str = Field(..., description='The HTML URL of the repository') + id: int = Field(..., description='The repository ID') + name: str = Field(..., description='The name of the repository') + node_id: str = Field(..., description='The repository node ID') + owner: GithubUser + private: bool = Field(..., description='Whether the repository is private') + pushed_at: datetime = Field( + ..., description='When the repository was last pushed to' + ) + ssh_url: str = Field(..., description='The SSH URL of the repository') + updated_at: datetime = Field( + ..., description='When the repository was last updated' + ) + url: str = Field(..., description='The API URL of the repository') + + class IdeogramV3EditRequest(BaseModel): color_palette: Optional[IdeogramColorPalette] = None image: Optional[StrictBytes] = Field( @@ -3276,6 +5309,52 @@ class MoonvalleyTextToImageRequest(BaseModel): webhook_url: Optional[str] = None +class NodeVersion(BaseModel): + changelog: Optional[str] = Field( + None, description='Summary of changes made in this version' + ) + comfy_node_extract_status: Optional[str] = Field( + None, description='The status of comfy node extraction process.' + ) + createdAt: Optional[datetime] = Field( + None, description='The date and time the version was created.' + ) + dependencies: Optional[List[str]] = Field( + None, description='A list of pip dependencies required by the node.' + ) + deprecated: Optional[bool] = Field( + None, description='Indicates if this version is deprecated.' + ) + downloadUrl: Optional[str] = Field( + None, description='[Output Only] URL to download this version of the node' + ) + id: Optional[str] = None + node_id: Optional[str] = Field( + None, description='The unique identifier of the node.' + ) + status: Optional[NodeVersionStatus] = None + status_reason: Optional[str] = Field( + None, description='The reason for the status change.' + ) + supported_accelerators: Optional[List[str]] = Field( + None, + description='List of accelerators (e.g. CUDA, DirectML, ROCm) that this node supports', + ) + supported_comfyui_frontend_version: Optional[str] = Field( + None, description='Supported versions of ComfyUI frontend' + ) + supported_comfyui_version: Optional[str] = Field( + None, description='Supported versions of ComfyUI' + ) + supported_os: Optional[List[str]] = Field( + None, description='List of operating systems that this node supports' + ) + version: Optional[str] = Field( + None, + description='The version identifier, following semantic versioning. Must be unique for the node.', + ) + + class OutputContent(RootModel[Union[OutputTextContent, OutputAudioContent]]): root: Union[OutputTextContent, OutputAudioContent] @@ -3283,7 +5362,7 @@ class OutputContent(RootModel[Union[OutputTextContent, OutputAudioContent]]): class OutputMessage(BaseModel): content: List[OutputContent] = Field(..., description='The content of the message') role: Role4 = Field(..., description='The role of the message') - type: Type14 = Field(..., description='The type of output item') + type: Type15 = Field(..., description='The type of output item') class PikaBodyGenerate22I2vGenerate22I2vPost(BaseModel): @@ -3333,6 +5412,16 @@ class PikaHTTPValidationError(BaseModel): detail: Optional[List[PikaValidationError]] = Field(None, title='Detail') +class PublisherMember(BaseModel): + id: Optional[str] = Field( + None, description='The unique identifier for the publisher member.' + ) + role: Optional[str] = Field( + None, description='The role of the user in the publisher.' + ) + user: Optional[PublisherUser] = None + + class Reasoning(BaseModel): effort: Optional[ReasoningEffort] = 'medium' generate_summary: Optional[GenerateSummary] = Field( @@ -3345,13 +5434,88 @@ class Reasoning(BaseModel): ) +class RecraftImage(BaseModel): + b64_json: Optional[str] = None + features: Optional[RecraftImageFeatures] = None + image_id: UUID + revised_prompt: Optional[str] = None + url: Optional[str] = None + + +class RecraftProcessImageRequest(BaseModel): + image: StrictBytes + image_format: Optional[RecraftImageFormat] = None + response_format: Optional[RecraftResponseFormat] = None + + +class RecraftProcessImageResponse(BaseModel): + created: int + credits: int + image: RecraftImage + + +class RecraftTextLayout(RootModel[List[RecraftTextLayoutItem]]): + root: List[RecraftTextLayoutItem] + + +class RecraftTransformImageWithMaskRequest(BaseModel): + block_nsfw: Optional[bool] = None + calculate_features: Optional[bool] = None + image: StrictBytes + image_format: Optional[RecraftImageFormat] = None + mask: StrictBytes + model: Optional[RecraftTransformModel] = None + n: Optional[int] = None + negative_prompt: Optional[str] = None + prompt: str + response_format: Optional[RecraftResponseFormat] = None + style: Optional[RecraftImageStyle] = None + style_id: Optional[UUID] = None + substyle: Optional[RecraftImageSubStyle] = None + text_layout: Optional[RecraftTextLayout] = None + + +class ResponseContentPartAddedEvent(BaseModel): + content_index: int = Field( + ..., description='The index of the content part that was added.' + ) + item_id: str = Field( + ..., description='The ID of the output item that the content part was added to.' + ) + output_index: int = Field( + ..., + description='The index of the output item that the content part was added to.', + ) + part: OutputContent + type: Type20 = Field( + ..., description='The type of the event. Always `response.content_part.added`.' + ) + + +class ResponseContentPartDoneEvent(BaseModel): + content_index: int = Field( + ..., description='The index of the content part that is done.' + ) + item_id: str = Field( + ..., description='The ID of the output item that the content part was added to.' + ) + output_index: int = Field( + ..., + description='The index of the output item that the content part was added to.', + ) + part: OutputContent + type: Type21 = Field( + ..., description='The type of the event. Always `response.content_part.done`.' + ) + + class ResponseError(BaseModel): code: ResponseErrorCode message: str = Field(..., description='A human-readable description of the error.') class Rodin3DDownloadResponse(BaseModel): - list: Optional[RodinResourceItem] = None + list: Optional[List[RodinResourceItem]] = None class Rodin3DGenerateRequest(BaseModel): @@ -3371,6 +5535,11 @@ class Rodin3DGenerateResponse(BaseModel): uuid: Optional[str] = Field(None, description='Task UUID') +class RodinCheckStatusJobItem(BaseModel): + status: Optional[RodinStatusOptions] = None + uuid: Optional[str] = Field(None, description='sub uuid') + + class RunwayImageToVideoRequest(BaseModel): duration: RunwayDurationEnum model: RunwayModelEnum @@ -3384,6 +5553,109 @@ class RunwayImageToVideoRequest(BaseModel): ) +class StripeCharge(BaseModel): + amount: Optional[int] = None + amount_captured: Optional[int] = None + amount_refunded: Optional[int] = None + application: Optional[str] = None + application_fee: Optional[str] = None + application_fee_amount: Optional[int] = None + balance_transaction: Optional[str] = None + billing_details: Optional[StripeBillingDetails] = None + calculated_statement_descriptor: Optional[str] = None + captured: Optional[bool] = None + created: Optional[int] = None + currency: Optional[str] = None + customer: Optional[str] = None + description: Optional[str] = None + destination: Optional[Any] = None + dispute: Optional[Any] = None + disputed: Optional[bool] = None + failure_balance_transaction: Optional[Any] = None + failure_code: Optional[Any] = None + failure_message: Optional[Any] = None + fraud_details: Optional[Dict[str, Any]] = None + id: Optional[str] = None + invoice: Optional[Any] = None + livemode: Optional[bool] = None + metadata: Optional[Dict[str, Any]] = None + object: Optional[Object1] = None + on_behalf_of: Optional[Any] = None + order: Optional[Any] = None + outcome: Optional[StripeOutcome] = None + paid: Optional[bool] = None + payment_intent: Optional[str] = None + payment_method: Optional[str] = None + payment_method_details: Optional[StripePaymentMethodDetails] = None + radar_options: Optional[Dict[str, Any]] = None + receipt_email: Optional[str] = None + receipt_number: Optional[str] = None + receipt_url: Optional[str] = None + refunded: Optional[bool] = None + refunds: Optional[StripeRefundList] = None + review: Optional[Any] = None + shipping: Optional[StripeShipping] = None + source: Optional[Any] = None + source_transfer: Optional[Any] = None + statement_descriptor: Optional[Any] = None + statement_descriptor_suffix: Optional[Any] = None + status: Optional[str] = None + transfer_data: Optional[Any] = None + transfer_group: Optional[Any] = None + + +class StripeChargeList(BaseModel): + data: Optional[List[StripeCharge]] = None + has_more: Optional[bool] = None + object: Optional[str] = None + total_count: Optional[int] = None + url: Optional[str] = None + + +class StripePaymentIntent(BaseModel): + amount: Optional[int] = None + amount_capturable: Optional[int] = None + amount_details: Optional[StripeAmountDetails] = None + amount_received: Optional[int] = None + application: Optional[str] = None + application_fee_amount: Optional[int] = None + automatic_payment_methods: Optional[Any] = None + canceled_at: Optional[int] = None + cancellation_reason: Optional[str] = None + capture_method: Optional[str] = None + charges: Optional[StripeChargeList] = None + client_secret: Optional[str] = None + confirmation_method: Optional[str] = None + created: Optional[int] = None + currency: Optional[str] = None + customer: Optional[str] = None + description: Optional[str] = None + id: Optional[str] = None + invoice: Optional[str] = None + last_payment_error: Optional[Any] = None + latest_charge: Optional[str] = None + livemode: Optional[bool] = None + metadata: Optional[Dict[str, Any]] = None + next_action: Optional[Any] = None + object: Optional[Object3] = None + on_behalf_of: Optional[Any] = None + payment_method: Optional[str] = None + payment_method_configuration_details: Optional[Any] = None + payment_method_options: Optional[StripePaymentMethodOptions] = None + payment_method_types: Optional[List[str]] = None + processing: Optional[Any] = None + receipt_email: Optional[str] = None + review: Optional[Any] = None + setup_future_usage: Optional[Any] = None + shipping: Optional[StripeShipping] = None + source: Optional[Any] = None + statement_descriptor: Optional[Any] = None + statement_descriptor_suffix: Optional[Any] = None + status: Optional[str] = None + transfer_data: Optional[Any] = None + transfer_group: Optional[Any] = None + + class TextResponseFormatConfiguration( RootModel[ Union[ @@ -3411,6 +5683,22 @@ class Tool( ] = Field(..., discriminator='type') +class BulkNodeVersionResult(BaseModel): + error_message: Optional[str] = Field( + None, + description='Error message if retrieval failed (only present if status is error)', + ) + identifier: NodeVersionIdentifier + node_version: Optional[NodeVersion] = None + status: Status = Field(..., description='Status of the retrieval operation') + + +class BulkNodeVersionsResponse(BaseModel): + node_versions: List[BulkNodeVersionResult] = Field( + ..., description='List of retrieved node versions with their status' + ) + + class EasyInputMessage(BaseModel): content: Union[str, InputMessageContentList] = Field( ..., @@ -3439,6 +5727,16 @@ class GeminiGenerateContentRequest(BaseModel): videoMetadata: Optional[GeminiVideoMetadata] = None +class GithubReleaseWebhook(BaseModel): + action: Action = Field(..., description='The action performed on the release') + enterprise: Optional[GithubEnterprise] = None + installation: Optional[GithubInstallation] = None + organization: Optional[GithubOrganization] = None + release: Release = Field(..., description='The release object') + repository: GithubRepository + sender: GithubUser + + class ImagenGenerateImageRequest(BaseModel): instances: List[ImagenImageGenerationInstance] parameters: ImagenImageGenerationParameters @@ -3447,8 +5745,8 @@ class ImagenGenerateImageRequest(BaseModel): class InputMessage(BaseModel): content: Optional[InputMessageContentList] = None role: Optional[Role3] = None - status: Optional[Status2] = None - type: Optional[Type9] = None + status: Optional[Status3] = None + type: Optional[Type10] = None class Item( @@ -3519,6 +5817,70 @@ class OutputItem( ] +class Publisher(BaseModel): + createdAt: Optional[datetime] = Field( + None, description='The date and time the publisher was created.' + ) + description: Optional[str] = None + id: Optional[str] = Field( + None, + description="The unique identifier for the publisher. It's akin to a username. Should be lowercase.", + ) + logo: Optional[str] = Field(None, description="URL to the publisher's logo.") + members: Optional[List[PublisherMember]] = Field( + None, description='A list of members in the publisher.' + ) + name: Optional[str] = None + source_code_repo: Optional[str] = None + status: Optional[PublisherStatus] = None + support: Optional[str] = None + website: Optional[str] = None + + +class RecraftGenerateImageResponse(BaseModel): + created: int + credits: int + data: List[RecraftImage] + + +class RecraftImageToImageRequest(BaseModel): + block_nsfw: Optional[bool] = None + calculate_features: Optional[bool] = None + controls: Optional[RecraftUserControls] = None + image: StrictBytes + image_format: Optional[RecraftImageFormat] = None + model: Optional[RecraftTransformModel] = None + n: Optional[int] = None + negative_prompt: Optional[str] = None + prompt: str + response_format: Optional[RecraftResponseFormat] = None + strength: float + style: Optional[RecraftImageStyle] = None + style_id: Optional[UUID] = None + substyle: Optional[RecraftImageSubStyle] = None + text_layout: Optional[RecraftTextLayout] = None + + +class ResponseOutputItemAddedEvent(BaseModel): + item: OutputItem + output_index: int = Field( + ..., description='The index of the output item that was added.\n' + ) + type: Type29 = Field( + ..., description='The type of the event. Always `response.output_item.added`.\n' + ) + + +class ResponseOutputItemDoneEvent(BaseModel): + item: OutputItem + output_index: int = Field( + ..., description='The index of the output item that was marked done.\n' + ) + type: Type30 = Field( + ..., description='The type of the event. Always `response.output_item.done`.\n' + ) + + class Text(BaseModel): format: Optional[TextResponseFormatConfiguration] = None @@ -3552,6 +5914,28 @@ class ResponseProperties(BaseModel): ) +class Rodin3DCheckStatusResponse(BaseModel): + jobs: Optional[List[RodinCheckStatusJobItem]] = Field( + None, description='Details for the generation status.' + ) + + +class Data8(BaseModel): + object: Optional[StripePaymentIntent] = None + + +class StripeEvent(BaseModel): + api_version: Optional[str] = None + created: Optional[int] = None + data: Data8 + id: str + livemode: Optional[bool] = None + object: Object2 + pending_webhooks: Optional[int] = None + request: Optional[StripeRequestInfo] = None + type: Type31 + + class GeminiCandidate(BaseModel): citationMetadata: Optional[GeminiCitationMetadata] = None content: Optional[GeminiContent] = None @@ -3562,12 +5946,67 @@ class GeminiCandidate(BaseModel): class GeminiGenerateContentResponse(BaseModel): candidates: Optional[List[GeminiCandidate]] = None promptFeedback: Optional[GeminiPromptFeedback] = None + usageMetadata: Optional[GeminiUsageMetadata] = None class InputItem(RootModel[Union[EasyInputMessage, Item]]): root: Union[EasyInputMessage, Item] +class Node(BaseModel): + author: Optional[str] = None + banner_url: Optional[str] = Field(None, description="URL to the node's banner.") + category: Optional[str] = Field(None, description='The category of the node.') + created_at: Optional[datetime] = Field( + None, description='The date and time when the node was created' + ) + description: Optional[str] = None + downloads: Optional[int] = Field( + None, description='The number of downloads of the node.' + ) + github_stars: Optional[int] = Field( + None, description='Number of stars on the GitHub repository.' + ) + icon: Optional[str] = Field(None, description="URL to the node's icon.") + id: Optional[str] = Field(None, description='The unique identifier of the node.') + latest_version: Optional[NodeVersion] = None + license: Optional[str] = Field( + None, description="The path to the LICENSE file in the node's repository." + ) + name: Optional[str] = Field(None, description='The display name of the node.') + preempted_comfy_node_names: Optional[List[str]] = Field( + None, description='A list of Comfy node names that are preempted by this node.' + ) + publisher: Optional[Publisher] = None + rating: Optional[float] = Field(None, description='The average rating of the node.') + repository: Optional[str] = Field(None, description="URL to the node's repository.") + search_ranking: Optional[int] = Field( + None, + description="A numerical value representing the node's search ranking, used for sorting search results.", + ) + status: Optional[NodeStatus] = None + status_detail: Optional[str] = Field( + None, description='The status detail of the node.' + ) + supported_accelerators: Optional[List[str]] = Field( + None, + description='List of accelerators (e.g. CUDA, DirectML, ROCm) that this node supports', + ) + supported_comfyui_frontend_version: Optional[str] = Field( + None, description='Supported versions of ComfyUI frontend' + ) + supported_comfyui_version: Optional[str] = Field( + None, description='Supported versions of ComfyUI' + ) + supported_os: Optional[List[str]] = Field( + None, description='List of operating systems that this node supports' + ) + tags: Optional[List[str]] = None + translations: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description='Translations of node metadata in different languages.' + ) + + class OpenAICreateResponse(CreateModelResponseProperties, ResponseProperties): include: Optional[List[Includable]] = Field( None, @@ -3615,8 +6054,73 @@ class OpenAIResponse(ModelResponseProperties, ResponseProperties): parallel_tool_calls: Optional[bool] = Field( True, description='Whether to allow the model to run tool calls in parallel.\n' ) - status: Optional[Status6] = Field( + status: Optional[Status7] = Field( None, description='The status of the response generation. One of `completed`, `failed`, `in_progress`, or `incomplete`.', ) usage: Optional[ResponseUsage] = None + + +class ResponseCompletedEvent(BaseModel): + response: OpenAIResponse + type: Type19 = Field( + ..., description='The type of the event. Always `response.completed`.' + ) + + +class ResponseCreatedEvent(BaseModel): + response: OpenAIResponse + type: Type22 = Field( + ..., description='The type of the event. Always `response.created`.' + ) + + +class ResponseFailedEvent(BaseModel): + response: OpenAIResponse + type: Type24 = Field( + ..., description='The type of the event. Always `response.failed`.\n' + ) + + +class ResponseInProgressEvent(BaseModel): + response: OpenAIResponse + type: Type27 = Field( + ..., description='The type of the event. Always `response.in_progress`.\n' + ) + + +class ResponseIncompleteEvent(BaseModel): + response: OpenAIResponse + type: Type28 = Field( + ..., description='The type of the event. Always `response.incomplete`.\n' + ) + + +class OpenAIResponseStreamEvent( + RootModel[ + Union[ + ResponseCreatedEvent, + ResponseInProgressEvent, + ResponseCompletedEvent, + ResponseFailedEvent, + ResponseIncompleteEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseErrorEvent, + ] + ] +): + root: Union[ + ResponseCreatedEvent, + ResponseInProgressEvent, + ResponseCompletedEvent, + ResponseFailedEvent, + ResponseIncompleteEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseErrorEvent, + ] = Field(..., description='Events that can be emitted during response streaming') diff --git a/comfy_api_nodes/apis/tripo_api.py b/comfy_api_nodes/apis/tripo_api.py index 626e8d277..9f43d4d09 100644 --- a/comfy_api_nodes/apis/tripo_api.py +++ b/comfy_api_nodes/apis/tripo_api.py @@ -127,7 +127,7 @@ class TripoTextToModelRequest(BaseModel): type: TripoTaskType = Field(TripoTaskType.TEXT_TO_MODEL, description='Type of task') prompt: str = Field(..., description='The text prompt describing the model to generate', max_length=1024) negative_prompt: Optional[str] = Field(None, description='The negative text prompt', max_length=1024) - model_version: Optional[TripoModelVersion] = TripoModelVersion.V2_5 + model_version: Optional[TripoModelVersion] = TripoModelVersion.v2_5_20250123 face_limit: Optional[int] = Field(None, description='The number of faces to limit the generation to') texture: Optional[bool] = Field(True, description='Whether to apply texture to the generated model') pbr: Optional[bool] = Field(True, description='Whether to apply PBR to the generated model') diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index df846d5dd..97bfe20e6 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -8,10 +8,10 @@ from typing import Optional from comfy.comfy_types.node_typing import IO, ComfyNodeABC from comfy_api.input_impl.video_types import VideoFromFile from comfy_api_nodes.apis import ( - Veo2GenVidRequest, - Veo2GenVidResponse, - Veo2GenVidPollRequest, - Veo2GenVidPollResponse + VeoGenVidRequest, + VeoGenVidResponse, + VeoGenVidPollRequest, + VeoGenVidPollResponse ) from comfy_api_nodes.apis.client import ( ApiEndpoint, @@ -35,7 +35,7 @@ def convert_image_to_base64(image: torch.Tensor): return tensor_to_base64_string(scaled_image) -def get_video_url_from_response(poll_response: Veo2GenVidPollResponse) -> Optional[str]: +def get_video_url_from_response(poll_response: VeoGenVidPollResponse) -> Optional[str]: if ( poll_response.response and hasattr(poll_response.response, "videos") @@ -130,6 +130,14 @@ class VeoVideoGenerationNode(ComfyNodeABC): "default": None, "tooltip": "Optional reference image to guide video generation", }), + "model": ( + IO.COMBO, + { + "options": ["veo-2.0-generate-001"], + "default": "veo-2.0-generate-001", + "tooltip": "Veo 2 model to use for video generation", + }, + ), }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", @@ -141,7 +149,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): RETURN_TYPES = (IO.VIDEO,) FUNCTION = "generate_video" CATEGORY = "api node/video/Veo" - DESCRIPTION = "Generates videos from text prompts using Google's Veo API" + DESCRIPTION = "Generates videos from text prompts using Google's Veo 2 API" API_NODE = True def generate_video( @@ -154,6 +162,8 @@ class VeoVideoGenerationNode(ComfyNodeABC): person_generation="ALLOW", seed=0, image=None, + model="veo-2.0-generate-001", + generate_audio=False, unique_id: Optional[str] = None, **kwargs, ): @@ -188,16 +198,19 @@ class VeoVideoGenerationNode(ComfyNodeABC): parameters["negativePrompt"] = negative_prompt if seed > 0: parameters["seed"] = seed + # Only add generateAudio for Veo 3 models + if "veo-3.0" in model: + parameters["generateAudio"] = generate_audio # Initial request to start video generation initial_operation = SynchronousOperation( endpoint=ApiEndpoint( - path="/proxy/veo/generate", + path=f"/proxy/veo/{model}/generate", method=HttpMethod.POST, - request_model=Veo2GenVidRequest, - response_model=Veo2GenVidResponse + request_model=VeoGenVidRequest, + response_model=VeoGenVidResponse ), - request=Veo2GenVidRequest( + request=VeoGenVidRequest( instances=instances, parameters=parameters ), @@ -223,16 +236,16 @@ class VeoVideoGenerationNode(ComfyNodeABC): # Define the polling operation poll_operation = PollingOperation( poll_endpoint=ApiEndpoint( - path="/proxy/veo/poll", + path=f"/proxy/veo/{model}/poll", method=HttpMethod.POST, - request_model=Veo2GenVidPollRequest, - response_model=Veo2GenVidPollResponse + request_model=VeoGenVidPollRequest, + response_model=VeoGenVidPollResponse ), completed_statuses=["completed"], failed_statuses=[], # No failed statuses, we'll handle errors after polling status_extractor=status_extractor, progress_extractor=progress_extractor, - request=Veo2GenVidPollRequest( + request=VeoGenVidPollRequest( operationName=operation_name ), auth_kwargs=kwargs, @@ -298,11 +311,64 @@ class VeoVideoGenerationNode(ComfyNodeABC): return (VideoFromFile(video_io),) -# Register the node +class Veo3VideoGenerationNode(VeoVideoGenerationNode): + """ + Generates videos from text prompts using Google's Veo 3 API. + + Supported models: + - veo-3.0-generate-001 + - veo-3.0-fast-generate-001 + + This node extends the base Veo node with Veo 3 specific features including + audio generation and fixed 8-second duration. + """ + + @classmethod + def INPUT_TYPES(s): + parent_input = super().INPUT_TYPES() + + # Update model options for Veo 3 + parent_input["optional"]["model"] = ( + IO.COMBO, + { + "options": ["veo-3.0-generate-001", "veo-3.0-fast-generate-001"], + "default": "veo-3.0-generate-001", + "tooltip": "Veo 3 model to use for video generation", + }, + ) + + # Add generateAudio parameter + parent_input["optional"]["generate_audio"] = ( + IO.BOOLEAN, + { + "default": False, + "tooltip": "Generate audio for the video. Supported by all Veo 3 models.", + } + ) + + # Update duration constraints for Veo 3 (only 8 seconds supported) + parent_input["optional"]["duration_seconds"] = ( + IO.INT, + { + "default": 8, + "min": 8, + "max": 8, + "step": 1, + "display": "number", + "tooltip": "Duration of the output video in seconds (Veo 3 only supports 8 seconds)", + }, + ) + + return parent_input + + +# Register the nodes NODE_CLASS_MAPPINGS = { "VeoVideoGenerationNode": VeoVideoGenerationNode, + "Veo3VideoGenerationNode": Veo3VideoGenerationNode, } NODE_DISPLAY_NAME_MAPPINGS = { - "VeoVideoGenerationNode": "Google Veo2 Video Generation", + "VeoVideoGenerationNode": "Google Veo 2 Video Generation", + "Veo3VideoGenerationNode": "Google Veo 3 Video Generation", } From 5be6fd09ffb46cfdff240fb5b96dd8c06b2a0344 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 5 Aug 2025 15:48:56 +0800 Subject: [PATCH 0403/1073] Update template to 0.1.48 (#9182) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ffa7dce65..470060ab4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.47 +comfyui-workflow-templates==0.1.48 comfyui-embedded-docs==0.2.4 torch torchsde From d044a243986700aae552acdebf7e767ae8282e37 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 5 Aug 2025 03:12:27 -0700 Subject: [PATCH 0404/1073] Fix default shift and any latent size for qwen image model. (#9186) --- comfy/ldm/qwen_image/model.py | 9 +++++---- comfy/supported_models.py | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index ff631a60f..c15ab8e40 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -8,7 +8,7 @@ from einops import repeat from comfy.ldm.lightricks.model import TimestepEmbedding, Timesteps from comfy.ldm.modules.attention import optimized_attention_masked from comfy.ldm.flux.layers import EmbedND - +import comfy.ldm.common_dit class GELU(nn.Module): def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True, dtype=None, device=None, operations=None): @@ -364,8 +364,9 @@ class QwenImageTransformer2DModel(nn.Module): image_rotary_emb = self.pos_embeds(x, context) - orig_shape = x.shape - hidden_states = x.view(orig_shape[0], orig_shape[1], orig_shape[-2] // 2, 2, orig_shape[-1] // 2, 2) + hidden_states = comfy.ldm.common_dit.pad_to_patch_size(x, (1, self.patch_size, self.patch_size)) + orig_shape = hidden_states.shape + hidden_states = hidden_states.view(orig_shape[0], orig_shape[1], orig_shape[-2] // 2, 2, orig_shape[-1] // 2, 2) hidden_states = hidden_states.permute(0, 2, 4, 1, 3, 5) hidden_states = hidden_states.reshape(orig_shape[0], (orig_shape[-2] // 2) * (orig_shape[-1] // 2), orig_shape[1] * 4) @@ -396,4 +397,4 @@ class QwenImageTransformer2DModel(nn.Module): hidden_states = hidden_states.view(orig_shape[0], orig_shape[-2] // 2, orig_shape[-1] // 2, orig_shape[1], 2, 2) hidden_states = hidden_states.permute(0, 3, 1, 4, 2, 5) - return hidden_states.reshape(orig_shape) + return hidden_states.reshape(orig_shape)[:, :, :, :x.shape[-2], :x.shape[-1]] diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 880055bd3..156ff9e26 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1237,7 +1237,7 @@ class QwenImage(supported_models_base.BASE): sampling_settings = { "multiplier": 1.0, - "shift": 2.6, + "shift": 1.15, } memory_usage_factor = 1.8 #TODO From da1ad9b5163fb848f3ec87ccc4fd0c8069f6eff0 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 5 Aug 2025 19:24:12 +0800 Subject: [PATCH 0405/1073] Update template to 0.1.51 (#9187) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 470060ab4..9a6b04cf2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.48 +comfyui-workflow-templates==0.1.51 comfyui-embedded-docs==0.2.4 torch torchsde From 32a95bba8ac91e8a610c35ce4d9963d2453118c1 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 5 Aug 2025 07:33:02 -0400 Subject: [PATCH 0406/1073] ComfyUI version 0.3.49 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 7b29e338d..5e2d09c81 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.48" +__version__ = "0.3.49" diff --git a/pyproject.toml b/pyproject.toml index 256677fad..3c530ba85 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.48" +version = "0.3.49" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From d8c51ba15aef6b0df86a7ea0203881be55d7579b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 5 Aug 2025 04:41:18 -0700 Subject: [PATCH 0407/1073] Add Qwen Image model to readme. (#9191) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 2abd8e600..119098f5c 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Lumina Image 2.0](https://comfyanonymous.github.io/ComfyUI_examples/lumina2/) - [HiDream](https://comfyanonymous.github.io/ComfyUI_examples/hidream/) - [Cosmos Predict2](https://comfyanonymous.github.io/ComfyUI_examples/cosmos_predict2/) + - [Qwen Image](https://comfyanonymous.github.io/ComfyUI_examples/qwen_image/) - Image Editing Models - [Omnigen 2](https://comfyanonymous.github.io/ComfyUI_examples/omnigen/) - [Flux Kontext](https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-kontext-image-editing-model) From 9126c0cfe49508a64c429f97b45664b241aab3f2 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 6 Aug 2025 01:07:04 -0700 Subject: [PATCH 0408/1073] Qwen Image model merging node. (#9202) --- .../nodes_model_merging_model_specific.py | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/comfy_extras/nodes_model_merging_model_specific.py b/comfy_extras/nodes_model_merging_model_specific.py index 2c93cd84f..55eb3ccfe 100644 --- a/comfy_extras/nodes_model_merging_model_specific.py +++ b/comfy_extras/nodes_model_merging_model_specific.py @@ -314,6 +314,29 @@ class ModelMergeCosmosPredict2_14B(comfy_extras.nodes_model_merging.ModelMergeBl return {"required": arg_dict} +class ModelMergeQwenImage(comfy_extras.nodes_model_merging.ModelMergeBlocks): + CATEGORY = "advanced/model_merging/model_specific" + + @classmethod + def INPUT_TYPES(s): + arg_dict = { "model1": ("MODEL",), + "model2": ("MODEL",)} + + argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}) + + arg_dict["pos_embeds."] = argument + arg_dict["img_in."] = argument + arg_dict["txt_norm."] = argument + arg_dict["txt_in."] = argument + arg_dict["time_text_embed."] = argument + + for i in range(60): + arg_dict["transformer_blocks.{}.".format(i)] = argument + + arg_dict["proj_out."] = argument + + return {"required": arg_dict} + NODE_CLASS_MAPPINGS = { "ModelMergeSD1": ModelMergeSD1, "ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks @@ -329,4 +352,5 @@ NODE_CLASS_MAPPINGS = { "ModelMergeWAN2_1": ModelMergeWAN2_1, "ModelMergeCosmosPredict2_2B": ModelMergeCosmosPredict2_2B, "ModelMergeCosmosPredict2_14B": ModelMergeCosmosPredict2_14B, + "ModelMergeQwenImage": ModelMergeQwenImage, } From 4c3e57b0ae9fb7ff1322977915efe7e98544d15d Mon Sep 17 00:00:00 2001 From: flybirdxx <1119577418@qq.com> Date: Thu, 7 Aug 2025 01:23:11 +0800 Subject: [PATCH 0409/1073] Fixed an issue where qwenLora could not be loaded properly. (#9208) --- comfy/lora.py | 9 +++++++++ comfy/weight_adapter/lora.py | 5 +++++ 2 files changed, 14 insertions(+) diff --git a/comfy/lora.py b/comfy/lora.py index 387d5c52a..6686b7229 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -293,6 +293,15 @@ def model_lora_keys_unet(model, key_map={}): key_lora = k[len("diffusion_model."):-len(".weight")] key_map["{}".format(key_lora)] = k + if isinstance(model, comfy.model_base.QwenImage): + for k in sdk: + if k.startswith("diffusion_model.") and k.endswith(".weight"): #QwenImage lora format + key_lora = k[len("diffusion_model."):-len(".weight")] + # Direct mapping for transformer_blocks format (QwenImage LoRA format) + key_map["{}".format(key_lora)] = k + # Support transformer prefix format + key_map["transformer.{}".format(key_lora)] = k + return key_map diff --git a/comfy/weight_adapter/lora.py b/comfy/weight_adapter/lora.py index 729dbd9e6..47aa17d13 100644 --- a/comfy/weight_adapter/lora.py +++ b/comfy/weight_adapter/lora.py @@ -96,6 +96,7 @@ class LoRAAdapter(WeightAdapterBase): diffusers3_lora = "{}.lora.up.weight".format(x) mochi_lora = "{}.lora_B".format(x) transformers_lora = "{}.lora_linear_layer.up.weight".format(x) + qwen_default_lora = "{}.lora_B.default.weight".format(x) A_name = None if regular_lora in lora.keys(): @@ -122,6 +123,10 @@ class LoRAAdapter(WeightAdapterBase): A_name = transformers_lora B_name = "{}.lora_linear_layer.down.weight".format(x) mid_name = None + elif qwen_default_lora in lora.keys(): + A_name = qwen_default_lora + B_name = "{}.lora_A.default.weight".format(x) + mid_name = None if A_name is not None: mid = None From 32691b16f4e1a897e461e77f9d6dceba2d6f0cd1 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Thu, 7 Aug 2025 01:26:29 +0800 Subject: [PATCH 0410/1073] Update template to 0.1.52 (#9206) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9a6b04cf2..d6926d610 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.51 +comfyui-workflow-templates==0.1.52 comfyui-embedded-docs==0.2.4 torch torchsde From 37d620a6b85f61b824363ed8170db373726ca45a Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Wed, 6 Aug 2025 16:52:39 -0700 Subject: [PATCH 0411/1073] Update frontend to v1.24.3 (#9175) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d6926d610..2f4692b03 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.23.4 +comfyui-frontend-package==1.24.4 comfyui-workflow-templates==0.1.52 comfyui-embedded-docs==0.2.4 torch From 05df2df489f6b237f63c5f7d42a943ae2be417e9 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 7 Aug 2025 08:20:40 -0700 Subject: [PATCH 0412/1073] Fix RepeatLatentBatch not working on multi dim latents. (#9227) --- nodes.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nodes.py b/nodes.py index 9bedbcaca..9448f9c1b 100644 --- a/nodes.py +++ b/nodes.py @@ -1229,12 +1229,12 @@ class RepeatLatentBatch: s = samples.copy() s_in = samples["samples"] - s["samples"] = s_in.repeat((amount, 1,1,1)) + s["samples"] = s_in.repeat((amount,) + ((1,) * (s_in.ndim - 1))) if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1: masks = samples["noise_mask"] if masks.shape[0] < s_in.shape[0]: - masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]] - s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1)) + masks = masks.repeat((math.ceil(s_in.shape[0] / masks.shape[0]),) + ((1,) * (masks.ndim - 1)))[:s_in.shape[0]] + s["noise_mask"] = samples["noise_mask"].repeat((amount,) + ((1,) * (samples["noise_mask"].ndim - 1))) if "batch_index" in s: offset = max(s["batch_index"]) - min(s["batch_index"]) + 1 s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]] From 42974a448c39af50c5f72d8c70267f9fe2971cd2 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Thu, 7 Aug 2025 14:54:09 -0700 Subject: [PATCH 0413/1073] _ui.py import torchaudio safety check (#9234) * Added safety around torchaudio import in _ui.py * Trusted cursor too much, fixed torchaudio bool --- comfy_api/latest/_ui.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/comfy_api/latest/_ui.py b/comfy_api/latest/_ui.py index 6b8a39d58..61597038f 100644 --- a/comfy_api/latest/_ui.py +++ b/comfy_api/latest/_ui.py @@ -9,7 +9,11 @@ from typing import Type import av import numpy as np import torch -import torchaudio +try: + import torchaudio + TORCH_AUDIO_AVAILABLE = True +except ImportError: + TORCH_AUDIO_AVAILABLE = False from PIL import Image as PILImage from PIL.PngImagePlugin import PngInfo @@ -302,6 +306,8 @@ class AudioSaveHelper: # Resample if necessary if sample_rate != audio["sample_rate"]: + if not TORCH_AUDIO_AVAILABLE: + raise Exception("torchaudio is not available; cannot resample audio.") waveform = torchaudio.functional.resample(waveform, audio["sample_rate"], sample_rate) # Create output with specified format From bf2a1b5b1ef72b240454f3ac44f5209af45efe00 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 8 Aug 2025 06:37:50 +0300 Subject: [PATCH 0414/1073] async API nodes (#9129) * converted API nodes to async * converted BFL API nodes to async * fixed client bug; converted gemini, ideogram, minimax * fixed client bug; converted openai nodes * fixed client bug; converted moonvalley, pika nodes * fixed client bug; converted kling, luma nodes * converted pixverse, rodin nodes * converted tripo, veo2 * converted recraft nodes * add lost log_request_response call --- comfy_api_nodes/apinode_utils.py | 152 ++--- comfy_api_nodes/apis/client.py | 901 +++++++++++----------------- comfy_api_nodes/nodes_bfl.py | 134 +++-- comfy_api_nodes/nodes_gemini.py | 4 +- comfy_api_nodes/nodes_ideogram.py | 22 +- comfy_api_nodes/nodes_kling.py | 130 ++-- comfy_api_nodes/nodes_luma.py | 70 ++- comfy_api_nodes/nodes_minimax.py | 14 +- comfy_api_nodes/nodes_moonvalley.py | 38 +- comfy_api_nodes/nodes_openai.py | 28 +- comfy_api_nodes/nodes_pika.py | 63 +- comfy_api_nodes/nodes_pixverse.py | 46 +- comfy_api_nodes/nodes_recraft.py | 44 +- comfy_api_nodes/nodes_rodin.py | 147 ++--- comfy_api_nodes/nodes_runway.py | 54 +- comfy_api_nodes/nodes_stability.py | 23 +- comfy_api_nodes/nodes_tripo.py | 69 ++- comfy_api_nodes/nodes_veo2.py | 15 +- 18 files changed, 878 insertions(+), 1076 deletions(-) diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index 788e2803f..f953f86df 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -1,4 +1,5 @@ from __future__ import annotations +import aiohttp import io import logging import mimetypes @@ -21,7 +22,6 @@ from server import PromptServer import numpy as np from PIL import Image -import requests import torch import math import base64 @@ -30,7 +30,7 @@ from io import BytesIO import av -def download_url_to_video_output(video_url: str, timeout: int = None) -> VideoFromFile: +async def download_url_to_video_output(video_url: str, timeout: int = None) -> VideoFromFile: """Downloads a video from a URL and returns a `VIDEO` output. Args: @@ -39,7 +39,7 @@ def download_url_to_video_output(video_url: str, timeout: int = None) -> VideoFr Returns: A Comfy node `VIDEO` output. """ - video_io = download_url_to_bytesio(video_url, timeout) + video_io = await download_url_to_bytesio(video_url, timeout) if video_io is None: error_msg = f"Failed to download video from {video_url}" logging.error(error_msg) @@ -62,7 +62,7 @@ def downscale_image_tensor(image, total_pixels=1536 * 1024) -> torch.Tensor: return s -def validate_and_cast_response( +async def validate_and_cast_response( response, timeout: int = None, node_id: Union[str, None] = None ) -> torch.Tensor: """Validates and casts a response to a torch.Tensor. @@ -86,35 +86,24 @@ def validate_and_cast_response( image_tensors: list[torch.Tensor] = [] # Process each image in the data array - for image_data in data: - image_url = image_data.url - b64_data = image_data.b64_json + async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session: + for img_data in data: + img_bytes: bytes + if img_data.b64_json: + img_bytes = base64.b64decode(img_data.b64_json) + elif img_data.url: + if node_id: + PromptServer.instance.send_progress_text(f"Result URL: {img_data.url}", node_id) + async with session.get(img_data.url) as resp: + if resp.status != 200: + raise ValueError("Failed to download generated image") + img_bytes = await resp.read() + else: + raise ValueError("Invalid image payload – neither URL nor base64 data present.") - if not image_url and not b64_data: - raise ValueError("No image was generated in the response") - - if b64_data: - img_data = base64.b64decode(b64_data) - img = Image.open(io.BytesIO(img_data)) - - elif image_url: - if node_id: - PromptServer.instance.send_progress_text( - f"Result URL: {image_url}", node_id - ) - img_response = requests.get(image_url, timeout=timeout) - if img_response.status_code != 200: - raise ValueError("Failed to download the image") - img = Image.open(io.BytesIO(img_response.content)) - - img = img.convert("RGBA") - - # Convert to numpy array, normalize to float32 between 0 and 1 - img_array = np.array(img).astype(np.float32) / 255.0 - img_tensor = torch.from_numpy(img_array) - - # Add to list of tensors - image_tensors.append(img_tensor) + pil_img = Image.open(BytesIO(img_bytes)).convert("RGBA") + arr = np.asarray(pil_img).astype(np.float32) / 255.0 + image_tensors.append(torch.from_numpy(arr)) return torch.stack(image_tensors, dim=0) @@ -175,7 +164,7 @@ def mimetype_to_extension(mime_type: str) -> str: return mime_type.split("/")[-1].lower() -def download_url_to_bytesio(url: str, timeout: int = None) -> BytesIO: +async def download_url_to_bytesio(url: str, timeout: int = None) -> BytesIO: """Downloads content from a URL using requests and returns it as BytesIO. Args: @@ -185,9 +174,11 @@ def download_url_to_bytesio(url: str, timeout: int = None) -> BytesIO: Returns: BytesIO object containing the downloaded content. """ - response = requests.get(url, stream=True, timeout=timeout) - response.raise_for_status() # Raises HTTPError for bad responses (4XX or 5XX) - return BytesIO(response.content) + timeout_cfg = aiohttp.ClientTimeout(total=timeout) if timeout else None + async with aiohttp.ClientSession(timeout=timeout_cfg) as session: + async with session.get(url) as resp: + resp.raise_for_status() # Raises HTTPError for bad responses (4XX or 5XX) + return BytesIO(await resp.read()) def bytesio_to_image_tensor(image_bytesio: BytesIO, mode: str = "RGBA") -> torch.Tensor: @@ -210,15 +201,15 @@ def bytesio_to_image_tensor(image_bytesio: BytesIO, mode: str = "RGBA") -> torch return torch.from_numpy(image_array).unsqueeze(0) -def download_url_to_image_tensor(url: str, timeout: int = None) -> torch.Tensor: +async def download_url_to_image_tensor(url: str, timeout: int = None) -> torch.Tensor: """Downloads an image from a URL and returns a [B, H, W, C] tensor.""" - image_bytesio = download_url_to_bytesio(url, timeout) + image_bytesio = await download_url_to_bytesio(url, timeout) return bytesio_to_image_tensor(image_bytesio) -def process_image_response(response: requests.Response) -> torch.Tensor: +def process_image_response(response_content: bytes | str) -> torch.Tensor: """Uses content from a Response object and converts it to a torch.Tensor""" - return bytesio_to_image_tensor(BytesIO(response.content)) + return bytesio_to_image_tensor(BytesIO(response_content)) def _tensor_to_pil(image: torch.Tensor, total_pixels: int = 2048 * 2048) -> Image.Image: @@ -336,10 +327,10 @@ def text_filepath_to_data_uri(filepath: str) -> str: return f"data:{mime_type};base64,{base64_string}" -def upload_file_to_comfyapi( +async def upload_file_to_comfyapi( file_bytes_io: BytesIO, filename: str, - upload_mime_type: str, + upload_mime_type: Optional[str], auth_kwargs: Optional[dict[str, str]] = None, ) -> str: """ @@ -354,7 +345,10 @@ def upload_file_to_comfyapi( Returns: The download URL for the uploaded file. """ - request_object = UploadRequest(file_name=filename, content_type=upload_mime_type) + if upload_mime_type is None: + request_object = UploadRequest(file_name=filename) + else: + request_object = UploadRequest(file_name=filename, content_type=upload_mime_type) operation = SynchronousOperation( endpoint=ApiEndpoint( path="/customers/storage", @@ -366,12 +360,8 @@ def upload_file_to_comfyapi( auth_kwargs=auth_kwargs, ) - response: UploadResponse = operation.execute() - upload_response = ApiClient.upload_file( - response.upload_url, file_bytes_io, content_type=upload_mime_type - ) - upload_response.raise_for_status() - + response: UploadResponse = await operation.execute() + await ApiClient.upload_file(response.upload_url, file_bytes_io, content_type=upload_mime_type) return response.download_url @@ -399,7 +389,7 @@ def video_to_base64_string( return base64.b64encode(video_bytes_io.getvalue()).decode("utf-8") -def upload_video_to_comfyapi( +async def upload_video_to_comfyapi( video: VideoInput, auth_kwargs: Optional[dict[str, str]] = None, container: VideoContainer = VideoContainer.MP4, @@ -439,9 +429,7 @@ def upload_video_to_comfyapi( video.save_to(video_bytes_io, format=container, codec=codec) video_bytes_io.seek(0) - return upload_file_to_comfyapi( - video_bytes_io, filename, upload_mime_type, auth_kwargs - ) + return await upload_file_to_comfyapi(video_bytes_io, filename, upload_mime_type, auth_kwargs) def audio_tensor_to_contiguous_ndarray(waveform: torch.Tensor) -> np.ndarray: @@ -501,7 +489,7 @@ def audio_ndarray_to_bytesio( return audio_bytes_io -def upload_audio_to_comfyapi( +async def upload_audio_to_comfyapi( audio: AudioInput, auth_kwargs: Optional[dict[str, str]] = None, container_format: str = "mp4", @@ -527,7 +515,7 @@ def upload_audio_to_comfyapi( audio_data_np, sample_rate, container_format, codec_name ) - return upload_file_to_comfyapi(audio_bytes_io, filename, mime_type, auth_kwargs) + return await upload_file_to_comfyapi(audio_bytes_io, filename, mime_type, auth_kwargs) def audio_to_base64_string( @@ -544,7 +532,7 @@ def audio_to_base64_string( return base64.b64encode(audio_bytes).decode("utf-8") -def upload_images_to_comfyapi( +async def upload_images_to_comfyapi( image: torch.Tensor, max_images=8, auth_kwargs: Optional[dict[str, str]] = None, @@ -561,55 +549,15 @@ def upload_images_to_comfyapi( mime_type: Optional MIME type for the image. """ # if batch, try to upload each file if max_images is greater than 0 - idx_image = 0 download_urls: list[str] = [] is_batch = len(image.shape) > 3 - batch_length = 1 - if is_batch: - batch_length = image.shape[0] - while True: - curr_image = image - if len(image.shape) > 3: - curr_image = image[idx_image] - # get BytesIO version of image - img_binary = tensor_to_bytesio(curr_image, mime_type=mime_type) - # first, request upload/download urls from comfy API - if not mime_type: - request_object = UploadRequest(file_name=img_binary.name) - else: - request_object = UploadRequest( - file_name=img_binary.name, content_type=mime_type - ) - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/customers/storage", - method=HttpMethod.POST, - request_model=UploadRequest, - response_model=UploadResponse, - ), - request=request_object, - auth_kwargs=auth_kwargs, - ) - response = operation.execute() + batch_len = image.shape[0] if is_batch else 1 - upload_response = ApiClient.upload_file( - response.upload_url, img_binary, content_type=mime_type - ) - # verify success - try: - upload_response.raise_for_status() - except requests.exceptions.HTTPError as e: - raise ValueError(f"Could not upload one or more images: {e}") from e - # add download_url to list - download_urls.append(response.download_url) - - idx_image += 1 - # stop uploading additional files if done - if is_batch and max_images > 0: - if idx_image >= max_images: - break - if idx_image >= batch_length: - break + for idx in range(min(batch_len, max_images)): + tensor = image[idx] if is_batch else image + img_io = tensor_to_bytesio(tensor, mime_type=mime_type) + url = await upload_file_to_comfyapi(img_io, img_io.name, mime_type, auth_kwargs) + download_urls.append(url) return download_urls diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 2a4bac88b..4ad0b783b 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -43,7 +43,7 @@ operation = ApiOperation( endpoint=user_info_endpoint, request=request ) -user_profile = operation.execute(client=api_client) # Returns immediately with the result +user_profile = await operation.execute(client=api_client) # Returns immediately with the result # Example 2: Asynchronous API Operation with Polling @@ -87,18 +87,19 @@ operation = PollingOperation( ) # This will make the initial request and then poll until completion -result = operation.execute(client=api_client) # Returns the final ImageGenerationResult when done +result = await operation.execute(client=api_client) # Returns the final ImageGenerationResult when done """ from __future__ import annotations +import aiohttp +import asyncio import logging -import time import io import socket +from aiohttp.client_exceptions import ClientError, ClientResponseError from typing import Dict, Type, Optional, Any, TypeVar, Generic, Callable, Tuple from enum import Enum import json -import requests from urllib.parse import urljoin, urlparse from pydantic import BaseModel, Field import uuid # For generating unique operation IDs @@ -174,6 +175,7 @@ class ApiClient: retry_delay: float = 1.0, retry_backoff_factor: float = 2.0, retry_status_codes: Optional[Tuple[int, ...]] = None, + session: Optional[aiohttp.ClientSession] = None, ): self.base_url = base_url self.auth_token = auth_token @@ -186,13 +188,16 @@ class ApiClient: # Default retry status codes: 408 (Request Timeout), 429 (Too Many Requests), # 500, 502, 503, 504 (Server Errors) self.retry_status_codes = retry_status_codes or (408, 429, 500, 502, 503, 504) + self._session: Optional[aiohttp.ClientSession] = session + self._owns_session = session is None # Track if we have to close it - def _generate_operation_id(self, path: str) -> str: + @staticmethod + def _generate_operation_id(path: str) -> str: """Generates a unique operation ID for logging.""" return f"{path.strip('/').replace('/', '_')}_{uuid.uuid4().hex[:8]}" + @staticmethod def _create_json_payload_args( - self, data: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, ) -> Dict[str, Any]: @@ -203,31 +208,53 @@ class ApiClient: def _create_form_data_args( self, - data: Dict[str, Any], - files: Dict[str, Any], + data: Dict[str, Any] | None, + files: Dict[str, Any] | None, headers: Optional[Dict[str, str]] = None, - multipart_parser = None, + multipart_parser: Callable | None = None, ) -> Dict[str, Any]: if headers and "Content-Type" in headers: del headers["Content-Type"] - if multipart_parser: + if multipart_parser and data: data = multipart_parser(data) - return { - "data": data, - "files": files, - "headers": headers, - } + form = aiohttp.FormData(default_to_multipart=True) + if data: # regular text fields + for k, v in data.items(): + if v is None: + continue # aiohttp fails to serialize "None" values + # aiohttp expects strings or bytes; convert enums etc. + form.add_field(k, str(v) if not isinstance(v, (bytes, bytearray)) else v) + if files: + file_iter = files if isinstance(files, list) else files.items() + for field_name, file_obj in file_iter: + if file_obj is None: + continue # aiohttp fails to serialize "None" values + # file_obj can be (filename, bytes/io.BytesIO, content_type) tuple + if isinstance(file_obj, tuple): + filename, file_value, content_type = self._unpack_tuple(file_obj) + else: + file_value = file_obj + filename = getattr(file_obj, "name", field_name) + content_type = "application/octet-stream" + + form.add_field( + name=field_name, + value=file_value, + filename=filename, + content_type=content_type, + ) + return {"data": form, "headers": headers or {}} + + @staticmethod def _create_urlencoded_form_data_args( - self, data: Dict[str, Any], headers: Optional[Dict[str, str]] = None, ) -> Dict[str, Any]: headers = headers or {} headers["Content-Type"] = "application/x-www-form-urlencoded" - return { "data": data, "headers": headers, @@ -244,7 +271,7 @@ class ApiClient: return headers - def _check_connectivity(self, target_url: str) -> Dict[str, bool]: + async def _check_connectivity(self, target_url: str) -> Dict[str, bool]: """ Check connectivity to determine if network issues are local or server-related. @@ -258,52 +285,39 @@ class ApiClient: "internet_accessible": False, "api_accessible": False, "is_local_issue": False, - "is_api_issue": False + "is_api_issue": False, } + timeout = aiohttp.ClientTimeout(total=5.0) + async with aiohttp.ClientSession(timeout=timeout) as session: + try: + async with session.get("https://www.google.com", ssl=self.verify_ssl) as resp: + results["internet_accessible"] = resp.status < 500 + except (ClientError, asyncio.TimeoutError, socket.gaierror): + results["is_local_issue"] = True + return results # cannot reach the internet – early exit - # First check basic internet connectivity using a reliable external site - try: - # Use a reliable external domain for checking basic connectivity - check_response = requests.get("https://www.google.com", - timeout=5.0, - verify=self.verify_ssl) - if check_response.status_code < 500: - results["internet_accessible"] = True - except (requests.RequestException, socket.error): - results["internet_accessible"] = False - results["is_local_issue"] = True - return results - - # Now check API server connectivity - try: - # Extract domain from the target URL to do a simpler health check - parsed_url = urlparse(target_url) - api_base = f"{parsed_url.scheme}://{parsed_url.netloc}" - - # Try to reach the API domain - api_response = requests.get(f"{api_base}/health", timeout=5.0, verify=self.verify_ssl) - if api_response.status_code < 500: - results["api_accessible"] = True - else: - results["api_accessible"] = False - results["is_api_issue"] = True - except requests.RequestException: - results["api_accessible"] = False - # If we can reach the internet but not the API, it's an API issue - results["is_api_issue"] = True + # Now check API health endpoint + parsed = urlparse(target_url) + health_url = f"{parsed.scheme}://{parsed.netloc}/health" + try: + async with session.get(health_url, ssl=self.verify_ssl) as resp: + results["api_accessible"] = resp.status < 500 + except ClientError: + pass # leave as False + results["is_api_issue"] = results["internet_accessible"] and not results["api_accessible"] return results - def request( + async def request( self, method: str, path: str, params: Optional[Dict[str, Any]] = None, data: Optional[Dict[str, Any]] = None, - files: Optional[Dict[str, Any]] = None, + files: Optional[Dict[str, Any] | list[tuple[str, Any]]] = None, headers: Optional[Dict[str, str]] = None, content_type: str = "application/json", - multipart_parser: Callable = None, + multipart_parser: Callable | None = None, retry_count: int = 0, # Used internally for tracking retries ) -> Dict[str, Any]: """ @@ -327,18 +341,19 @@ class ApiClient: ApiServerError: If the API server is unreachable but internet is working Exception: For other request failures """ - # Use urljoin but ensure path is relative to avoid absolute path behavior - relative_path = path.lstrip('/') + + # Build full URL and merge headers + relative_path = path.lstrip("/") url = urljoin(self.base_url, relative_path) - self.check_auth(self.auth_token, self.comfy_api_key) - # Combine default headers with any provided headers + self._check_auth(self.auth_token, self.comfy_api_key) + request_headers = self.get_headers() if headers: request_headers.update(headers) - - # Let requests handle the content type when files are present. if files: - del request_headers["Content-Type"] + request_headers.pop("Content-Type", None) + if params: + params = {k: v for k, v in params.items() if v is not None} # aiohttp fails to serialize None values logging.debug(f"[DEBUG] Request Headers: {request_headers}") logging.debug(f"[DEBUG] Files: {files}") @@ -346,11 +361,9 @@ class ApiClient: logging.debug(f"[DEBUG] Data: {data}") if content_type == "application/x-www-form-urlencoded": - payload_args = self._create_urlencoded_form_data_args(data, request_headers) + payload_args = self._create_urlencoded_form_data_args(data or {}, request_headers) elif content_type == "multipart/form-data": - payload_args = self._create_form_data_args( - data, files, request_headers, multipart_parser - ) + payload_args = self._create_form_data_args(data, files, request_headers, multipart_parser) else: payload_args = self._create_json_payload_args(data, request_headers) @@ -361,220 +374,67 @@ class ApiClient: request_url=url, request_headers=request_headers, request_params=params, - request_data=data if content_type == "application/json" else "[form-data or other]" + request_data=data if content_type == "application/json" else "[form-data or other]", ) + session = await self._get_session() try: - response = requests.request( - method=method, - url=url, + async with session.request( + method, + url, params=params, - timeout=self.timeout, - verify=self.verify_ssl, + ssl=self.verify_ssl, **payload_args, - ) + ) as resp: + if resp.status >= 400: + try: + error_data = await resp.json() + except (aiohttp.ContentTypeError, json.JSONDecodeError): + error_data = await resp.text() - # Check if we should retry based on status code - if (response.status_code in self.retry_status_codes and - retry_count < self.max_retries): + return await self._handle_http_error( + ClientResponseError(resp.request_info, resp.history, status=resp.status, message=error_data), + operation_id, + method, + url, + params, + data, + files, + headers, + content_type, + multipart_parser, + retry_count=retry_count, + response_content=error_data, + ) - # Calculate delay with exponential backoff - delay = self.retry_delay * (self.retry_backoff_factor ** retry_count) - - logging.warning( - f"Request failed with status {response.status_code}. " - f"Retrying in {delay:.2f}s ({retry_count + 1}/{self.max_retries})" - ) - - time.sleep(delay) - return self.request( - method=method, - path=path, - params=params, - data=data, - files=files, - headers=headers, - content_type=content_type, - multipart_parser=multipart_parser, - retry_count=retry_count + 1, - ) - - # Raise exception for error status codes - response.raise_for_status() - - # Log successful response - response_content_to_log = response.content - try: - # Attempt to parse JSON for prettier logging, fallback to raw content - response_content_to_log = response.json() - except json.JSONDecodeError: - pass # Keep as bytes/str if not JSON - - request_logger.log_request_response( - operation_id=operation_id, - request_method=method, # Pass request details again for context in log - request_url=url, - response_status_code=response.status_code, - response_headers=dict(response.headers), - response_content=response_content_to_log - ) - - except requests.ConnectionError as e: - error_message = f"ConnectionError: {str(e)}" - request_logger.log_request_response( - operation_id=operation_id, - request_method=method, - request_url=url, - error_message=error_message - ) - # Only perform connectivity check if we've exhausted all retries - if retry_count >= self.max_retries: - # Check connectivity to determine if it's a local or API issue - connectivity = self._check_connectivity(self.base_url) - - if connectivity["is_local_issue"]: - raise LocalNetworkError( - "Unable to connect to the API server due to local network issues. " - "Please check your internet connection and try again." - ) from e - elif connectivity["is_api_issue"]: - raise ApiServerError( - f"The API server at {self.base_url} is currently unreachable. " - f"The service may be experiencing issues. Please try again later." - ) from e - - # If we haven't exhausted retries yet, retry the request - if retry_count < self.max_retries: - delay = self.retry_delay * (self.retry_backoff_factor ** retry_count) - logging.warning( - f"Connection error: {str(e)}. " - f"Retrying in {delay:.2f}s ({retry_count + 1}/{self.max_retries})" - ) - time.sleep(delay) - return self.request( - method=method, - path=path, - params=params, - data=data, - files=files, - headers=headers, - content_type=content_type, - multipart_parser=multipart_parser, - retry_count=retry_count + 1, - ) - - # If we've exhausted retries and didn't identify the specific issue, - # raise a generic exception - final_error_message = ( - f"Unable to connect to the API server after {self.max_retries} attempts. " - f"Please check your internet connection or try again later." - ) - request_logger.log_request_response( # Log final failure - operation_id=operation_id, - request_method=method, request_url=url, - error_message=final_error_message - ) - raise Exception(final_error_message) from e - - except requests.Timeout as e: - error_message = f"Timeout: {str(e)}" - request_logger.log_request_response( - operation_id=operation_id, - request_method=method, request_url=url, - error_message=error_message - ) - # Retry timeouts if we haven't exhausted retries - if retry_count < self.max_retries: - delay = self.retry_delay * (self.retry_backoff_factor ** retry_count) - logging.warning( - f"Request timed out. " - f"Retrying in {delay:.2f}s ({retry_count + 1}/{self.max_retries})" - ) - time.sleep(delay) - return self.request( - method=method, - path=path, - params=params, - data=data, - files=files, - headers=headers, - content_type=content_type, - multipart_parser=multipart_parser, - retry_count=retry_count + 1, - ) - final_error_message = ( - f"Request timed out after {self.timeout} seconds and {self.max_retries} retry attempts. " - f"The server might be experiencing high load or the operation is taking longer than expected." - ) - request_logger.log_request_response( # Log final failure - operation_id=operation_id, - request_method=method, request_url=url, - error_message=final_error_message - ) - raise Exception(final_error_message) from e - - except requests.HTTPError as e: - status_code = e.response.status_code if hasattr(e, "response") else None - original_error_message = f"HTTP Error: {str(e)}" - error_content_for_log = None - if hasattr(e, "response") and e.response is not None: - error_content_for_log = e.response.content + # Success – parse JSON (safely) and log try: - error_content_for_log = e.response.json() - except json.JSONDecodeError: - pass + payload = await resp.json() + response_content_to_log = payload + except (aiohttp.ContentTypeError, json.JSONDecodeError): + payload = {} + response_content_to_log = await resp.text() - - # Try to extract detailed error message from JSON response for user display - # but log the full error content. - user_display_error_message = original_error_message - - try: - if hasattr(e, "response") and e.response is not None and e.response.content: - error_json = e.response.json() - if "error" in error_json and "message" in error_json["error"]: - user_display_error_message = f"API Error: {error_json['error']['message']}" - if "type" in error_json["error"]: - user_display_error_message += f" (Type: {error_json['error']['type']})" - elif isinstance(error_json, dict): # Handle cases where error is just a JSON dict - user_display_error_message = f"API Error: {json.dumps(error_json)}" - else: # Non-dict JSON error - user_display_error_message = f"API Error: {str(error_json)}" - except json.JSONDecodeError: - # If not JSON, use the raw content if it's not too long, or a summary - if hasattr(e, "response") and e.response is not None and e.response.content: - raw_content = e.response.content.decode(errors='ignore') - if len(raw_content) < 200: # Arbitrary limit for display - user_display_error_message = f"API Error (raw): {raw_content}" - else: - user_display_error_message = f"API Error (raw, status {status_code})" - - request_logger.log_request_response( - operation_id=operation_id, - request_method=method, request_url=url, - response_status_code=status_code, - response_headers=dict(e.response.headers) if hasattr(e, "response") and e.response is not None else None, - response_content=error_content_for_log, - error_message=original_error_message # Log the original exception string as error - ) - - logging.debug(f"[DEBUG] API Error: {user_display_error_message} (Status: {status_code})") - if hasattr(e, "response") and e.response is not None and e.response.content: - logging.debug(f"[DEBUG] Response content: {e.response.content}") - - # Retry if the status code is in our retry list and we haven't exhausted retries - if (status_code in self.retry_status_codes and - retry_count < self.max_retries): - - delay = self.retry_delay * (self.retry_backoff_factor ** retry_count) - logging.warning( - f"HTTP error {status_code}. " - f"Retrying in {delay:.2f}s ({retry_count + 1}/{self.max_retries})" + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, + request_url=url, + response_status_code=resp.status, + response_headers=dict(resp.headers), + response_content=response_content_to_log, ) - time.sleep(delay) - return self.request( - method=method, - path=path, + return payload + + except (ClientError, asyncio.TimeoutError, socket.gaierror) as e: + # Treat as *connection* problem – optionally retry, else escalate + if retry_count < self.max_retries: + delay = self.retry_delay * (self.retry_backoff_factor ** retry_count) + logging.warning("Connection error. Retrying in %.2fs (%s/%s): %s", delay, retry_count + 1, + self.max_retries, str(e)) + await asyncio.sleep(delay) + return await self.request( + method, + path, params=params, data=data, files=files, @@ -583,40 +443,34 @@ class ApiClient: multipart_parser=multipart_parser, retry_count=retry_count + 1, ) + # One final connectivity check for diagnostics + connectivity = await self._check_connectivity(self.base_url) + if connectivity["is_local_issue"]: + raise LocalNetworkError( + "Unable to connect to the API server due to local network issues. " + "Please check your internet connection and try again." + ) from e + raise ApiServerError( + f"The API server at {self.base_url} is currently unreachable. " + f"The service may be experiencing issues. Please try again later." + ) from e - # Specific error messages for common status codes for user display - if status_code == 401: - user_display_error_message = "Unauthorized: Please login first to use this node." - elif status_code == 402: - user_display_error_message = "Payment Required: Please add credits to your account to use this node." - elif status_code == 409: - user_display_error_message = "There is a problem with your account. Please contact support@comfy.org." - elif status_code == 429: - user_display_error_message = "Rate Limit Exceeded: Please try again later." - # else, user_display_error_message remains as parsed from response or original HTTPError string - - raise Exception(user_display_error_message) # Raise with the user-friendly message - - # Parse and return JSON response - if response.content: - return response.json() - return {} - - def check_auth(self, auth_token, comfy_api_key): + @staticmethod + def _check_auth(auth_token, comfy_api_key): """Verify that an auth token is present or comfy_api_key is present""" if auth_token is None and comfy_api_key is None: raise Exception("Unauthorized: Please login first to use this node.") return auth_token or comfy_api_key @staticmethod - def upload_file( + async def upload_file( upload_url: str, file: io.BytesIO | str, content_type: str | None = None, max_retries: int = 3, retry_delay: float = 1.0, retry_backoff_factor: float = 2.0, - ): + ) -> aiohttp.ClientResponse: """Upload a file to the API with retry logic. Args: @@ -627,112 +481,167 @@ class ApiClient: retry_delay: Initial delay between retries in seconds retry_backoff_factor: Multiplier for the delay after each retry """ - headers = {} + headers: Dict[str, str] = {} + skip_auto_headers: set[str] = set() if content_type: headers["Content-Type"] = content_type + else: + # tell aiohttp not to add Content-Type that will break the request signature and result in a 403 status. + skip_auto_headers.add("Content-Type") - # Prepare the file data + # Extract file bytes if isinstance(file, io.BytesIO): - file.seek(0) # Ensure we're at the start of the file + file.seek(0) data = file.read() elif isinstance(file, str): with open(file, "rb") as f: data = f.read() else: - raise ValueError("File must be either a BytesIO object or a file path string") + raise ValueError("File must be BytesIO or str path") - # Try the upload with retries - last_exception = None - operation_id = f"upload_{upload_url.split('/')[-1]}_{uuid.uuid4().hex[:8]}" # Simplified ID for uploads - - # Log initial attempt (without full file data for brevity) + operation_id = f"upload_{upload_url.split('/')[-1]}_{uuid.uuid4().hex[:8]}" request_logger.log_request_response( operation_id=operation_id, request_method="PUT", request_url=upload_url, request_headers=headers, - request_data=f"[File data of type {content_type or 'unknown'}, size {len(data)} bytes]" + request_data=f"[File data {len(data)} bytes]", ) - for retry_attempt in range(max_retries + 1): + delay = retry_delay + for attempt in range(max_retries + 1): try: - response = requests.put(upload_url, data=data, headers=headers) - response.raise_for_status() + timeout = aiohttp.ClientTimeout(total=None) # honour server side timeouts + async with aiohttp.ClientSession(timeout=timeout) as session: + async with session.put( + upload_url, data=data, headers=headers, skip_auto_headers=skip_auto_headers, + ) as resp: + resp.raise_for_status() + request_logger.log_request_response( + operation_id=operation_id, + request_method="PUT", + request_url=upload_url, + response_status_code=resp.status, + response_headers=dict(resp.headers), + response_content="File uploaded successfully.", + ) + return resp + except (ClientError, asyncio.TimeoutError) as e: request_logger.log_request_response( operation_id=operation_id, - request_method="PUT", request_url=upload_url, # For context - response_status_code=response.status_code, - response_headers=dict(response.headers), - response_content="File uploaded successfully." # Or response.text if available + request_method="PUT", + request_url=upload_url, + response_status_code=e.status if hasattr(e, "status") else None, + response_headers=dict(e.headers) if getattr(e, "headers") else None, + response_content=None, + error_message=f"{type(e).__name__}: {str(e)}", ) - return response - - except (requests.ConnectionError, requests.Timeout, requests.HTTPError) as e: - last_exception = e - error_message_for_log = f"{type(e).__name__}: {str(e)}" - response_content_for_log = None - status_code_for_log = None - headers_for_log = None - - if hasattr(e, 'response') and e.response is not None: - status_code_for_log = e.response.status_code - headers_for_log = dict(e.response.headers) - try: - response_content_for_log = e.response.json() - except json.JSONDecodeError: - response_content_for_log = e.response.content - - - request_logger.log_request_response( - operation_id=operation_id, - request_method="PUT", request_url=upload_url, - response_status_code=status_code_for_log, - response_headers=headers_for_log, - response_content=response_content_for_log, - error_message=error_message_for_log - ) - - if retry_attempt < max_retries: - delay = retry_delay * (retry_backoff_factor ** retry_attempt) + if attempt < max_retries: logging.warning( - f"File upload failed: {str(e)}. " - f"Retrying in {delay:.2f}s ({retry_attempt + 1}/{max_retries})" + "Upload failed (%s/%s). Retrying in %.2fs. %s", attempt + 1, max_retries, delay, str(e) ) - time.sleep(delay) + await asyncio.sleep(delay) + delay *= retry_backoff_factor else: - break # Max retries reached + raise NetworkError(f"Failed to upload file after {max_retries + 1} attempts: {e}") from e - # If we've exhausted all retries, determine the final error type and raise - final_error_message = f"Failed to upload file after {max_retries + 1} attempts. Error: {str(last_exception)}" - try: - # Check basic internet connectivity - check_response = requests.get("https://www.google.com", timeout=5.0, verify=True) # Assuming verify=True is desired - if check_response.status_code >= 500: # Google itself has an issue (rare) - final_error_message = (f"Failed to upload file. Internet connectivity check to Google failed " - f"(status {check_response.status_code}). Original error: {str(last_exception)}") - # Not raising LocalNetworkError here as Google itself might be down. - # If Google is reachable, the issue is likely with the upload server or a more specific local problem - # not caught by a simple Google ping (e.g., DNS for the specific upload URL, firewall). - # The original last_exception is probably most relevant. + async def _handle_http_error( + self, + exc: ClientResponseError, + operation_id: str, + *req_meta, + retry_count: int, + response_content: dict | str = "", + ) -> Dict[str, Any]: + status_code = exc.status + if status_code == 401: + user_friendly = "Unauthorized: Please login first to use this node." + elif status_code == 402: + user_friendly = "Payment Required: Please add credits to your account to use this node." + elif status_code == 409: + user_friendly = "There is a problem with your account. Please contact support@comfy.org." + elif status_code == 429: + user_friendly = "Rate Limit Exceeded: Please try again later." + else: + if isinstance(response_content, dict): + if "error" in response_content and "message" in response_content["error"]: + user_friendly = f"API Error: {response_content['error']['message']}" + if "type" in response_content["error"]: + user_friendly += f" (Type: {response_content['error']['type']})" + else: # Handle cases where error is just a JSON dict with unknown format + user_friendly = f"API Error: {json.dumps(response_content)}" + else: + if len(response_content) < 200: # Arbitrary limit for display + user_friendly = f"API Error (raw): {response_content}" + else: + user_friendly = f"API Error (raw, status {response_content})" - except (requests.RequestException, socket.error) as conn_check_exc: - # Could not reach Google, likely a local network issue - final_error_message = (f"Failed to upload file due to network connectivity issues " - f"(cannot reach Google: {str(conn_check_exc)}). " - f"Original upload error: {str(last_exception)}") - request_logger.log_request_response( # Log final failure reason - operation_id=operation_id, - request_method="PUT", request_url=upload_url, - error_message=final_error_message - ) - raise LocalNetworkError(final_error_message) from last_exception - - request_logger.log_request_response( # Log final failure reason if not LocalNetworkError + request_logger.log_request_response( operation_id=operation_id, - request_method="PUT", request_url=upload_url, - error_message=final_error_message + request_method=req_meta[0], + request_url=req_meta[1], + response_status_code=exc.status, + response_headers=dict(req_meta[5]) if req_meta[5] else None, + response_content=response_content, + error_message=f"HTTP Error {exc.status}", ) - raise Exception(final_error_message) from last_exception + + logging.debug(f"[DEBUG] API Error: {user_friendly} (Status: {status_code})") + if response_content: + logging.debug(f"[DEBUG] Response content: {response_content}") + + # Retry if eligible + if status_code in self.retry_status_codes and retry_count < self.max_retries: + delay = self.retry_delay * (self.retry_backoff_factor ** retry_count) + logging.warning( + "HTTP error %s. Retrying in %.2fs (%s/%s)", + status_code, + delay, + retry_count + 1, + self.max_retries, + ) + await asyncio.sleep(delay) + return await self.request( + req_meta[0], # method + req_meta[1].replace(self.base_url, ""), # path + params=req_meta[2], + data=req_meta[3], + files=req_meta[4], + headers=req_meta[5], + content_type=req_meta[6], + multipart_parser=req_meta[7], + retry_count=retry_count + 1, + ) + + raise Exception(user_friendly) from exc + + @staticmethod + def _unpack_tuple(t): + """Helper to normalise (filename, file, content_type) tuples.""" + if len(t) == 3: + return t + elif len(t) == 2: + return t[0], t[1], "application/octet-stream" + else: + raise ValueError("files tuple must be (filename, file[, content_type])") + + async def _get_session(self) -> aiohttp.ClientSession: + if self._session is None or self._session.closed: + timeout = aiohttp.ClientTimeout(total=self.timeout) + self._session = aiohttp.ClientSession(timeout=timeout) + self._owns_session = True + return self._session + + async def close(self) -> None: + if self._owns_session and self._session and not self._session.closed: + await self._session.close() + + async def __aenter__(self) -> "ApiClient": + """Allow usage as async‑context‑manager – ensures clean teardown""" + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.close() class ApiEndpoint(Generic[T, R]): @@ -763,31 +672,28 @@ class ApiEndpoint(Generic[T, R]): class SynchronousOperation(Generic[T, R]): - """ - Represents a single synchronous API operation. - """ + """Represents a single synchronous API operation.""" def __init__( self, endpoint: ApiEndpoint[T, R], request: T, - files: Optional[Dict[str, Any]] = None, + files: Optional[Dict[str, Any] | list[tuple[str, Any]]] = None, api_base: str | None = None, auth_token: Optional[str] = None, comfy_api_key: Optional[str] = None, - auth_kwargs: Optional[Dict[str,str]] = None, + auth_kwargs: Optional[Dict[str, str]] = None, timeout: float = 604800.0, verify_ssl: bool = True, content_type: str = "application/json", - multipart_parser: Callable = None, + multipart_parser: Callable | None = None, max_retries: int = 3, retry_delay: float = 1.0, retry_backoff_factor: float = 2.0, - ): + ) -> None: self.endpoint = endpoint self.request = request - self.response = None - self.error = None + self.files = files self.api_base: str = api_base or args.comfy_api_base self.auth_token = auth_token self.comfy_api_key = comfy_api_key @@ -796,91 +702,64 @@ class SynchronousOperation(Generic[T, R]): self.comfy_api_key = auth_kwargs.get("comfy_api_key", self.comfy_api_key) self.timeout = timeout self.verify_ssl = verify_ssl - self.files = files self.content_type = content_type self.multipart_parser = multipart_parser self.max_retries = max_retries self.retry_delay = retry_delay self.retry_backoff_factor = retry_backoff_factor - def execute(self, client: Optional[ApiClient] = None) -> R: - """Execute the API operation using the provided client or create one with retry support""" - try: - # Create client if not provided - if client is None: - client = ApiClient( - base_url=self.api_base, - auth_token=self.auth_token, - comfy_api_key=self.comfy_api_key, - timeout=self.timeout, - verify_ssl=self.verify_ssl, - max_retries=self.max_retries, - retry_delay=self.retry_delay, - retry_backoff_factor=self.retry_backoff_factor, - ) - - # Convert request model to dict, but use None for EmptyRequest - request_dict = ( - None - if isinstance(self.request, EmptyRequest) - else self.request.model_dump(exclude_none=True) + async def execute(self, client: Optional[ApiClient] = None) -> R: + owns_client = client is None + if owns_client: + client = ApiClient( + base_url=self.api_base, + auth_token=self.auth_token, + comfy_api_key=self.comfy_api_key, + timeout=self.timeout, + verify_ssl=self.verify_ssl, + max_retries=self.max_retries, + retry_delay=self.retry_delay, + retry_backoff_factor=self.retry_backoff_factor, ) - if request_dict: - for key, value in request_dict.items(): - if isinstance(value, Enum): - request_dict[key] = value.value - # Debug log for request + try: + request_dict: Optional[Dict[str, Any]] + if isinstance(self.request, EmptyRequest): + request_dict = None + else: + request_dict = self.request.model_dump(exclude_none=True) + for k, v in list(request_dict.items()): + if isinstance(v, Enum): + request_dict[k] = v.value + logging.debug( f"[DEBUG] API Request: {self.endpoint.method.value} {self.endpoint.path}" ) logging.debug(f"[DEBUG] Request Data: {json.dumps(request_dict, indent=2)}") logging.debug(f"[DEBUG] Query Params: {self.endpoint.query_params}") - # Make the request with built-in retry - resp = client.request( - method=self.endpoint.method.value, - path=self.endpoint.path, - data=request_dict, + response_json = await client.request( + self.endpoint.method.value, + self.endpoint.path, params=self.endpoint.query_params, + data=request_dict, files=self.files, content_type=self.content_type, - multipart_parser=self.multipart_parser + multipart_parser=self.multipart_parser, ) - # Debug log for response logging.debug("=" * 50) logging.debug("[DEBUG] RESPONSE DETAILS:") logging.debug("[DEBUG] Status Code: 200 (Success)") - logging.debug(f"[DEBUG] Response Body: {json.dumps(resp, indent=2)}") + logging.debug(f"[DEBUG] Response Body: {json.dumps(response_json, indent=2)}") logging.debug("=" * 50) - # Parse and return the response - return self._parse_response(resp) - - except LocalNetworkError as e: - # Propagate specific network error types - logging.error(f"[ERROR] Local network error: {str(e)}") - raise - - except ApiServerError as e: - # Propagate API server errors - logging.error(f"[ERROR] API server error: {str(e)}") - raise - - except Exception as e: - logging.error(f"[ERROR] API Exception: {str(e)}") - raise Exception(str(e)) - - def _parse_response(self, resp): - """Parse response data - can be overridden by subclasses""" - # The response is already the complete object, don't extract just the "data" field - # as that would lose the outer structure (created timestamp, etc.) - - # Parse response using the provided model - self.response = self.endpoint.response_model.model_validate(resp) - logging.debug(f"[DEBUG] Parsed Response: {self.response}") - return self.response + parsed_response = self.endpoint.response_model.model_validate(response_json) + logging.debug(f"[DEBUG] Parsed Response: {parsed_response}") + return parsed_response + finally: + if owns_client: + await client.close() class TaskStatus(str, Enum): @@ -892,23 +771,21 @@ class TaskStatus(str, Enum): class PollingOperation(Generic[T, R]): - """ - Represents an asynchronous API operation that requires polling for completion. - """ + """Represents an asynchronous API operation that requires polling for completion.""" def __init__( self, poll_endpoint: ApiEndpoint[EmptyRequest, R], - completed_statuses: list, - failed_statuses: list, + completed_statuses: list[str], + failed_statuses: list[str], status_extractor: Callable[[R], str], - progress_extractor: Callable[[R], float] = None, - result_url_extractor: Callable[[R], str] = None, + progress_extractor: Callable[[R], float] | None = None, + result_url_extractor: Callable[[R], str] | None = None, request: Optional[T] = None, api_base: str | None = None, auth_token: Optional[str] = None, comfy_api_key: Optional[str] = None, - auth_kwargs: Optional[Dict[str,str]] = None, + auth_kwargs: Optional[Dict[str, str]] = None, poll_interval: float = 5.0, max_poll_attempts: int = 120, # Default max polling attempts (10 minutes with 5s interval) max_retries: int = 3, # Max retries per individual API call @@ -916,7 +793,7 @@ class PollingOperation(Generic[T, R]): retry_backoff_factor: float = 2.0, estimated_duration: Optional[float] = None, node_id: Optional[str] = None, - ): + ) -> None: self.poll_endpoint = poll_endpoint self.request = request self.api_base: str = api_base or args.comfy_api_base @@ -931,100 +808,73 @@ class PollingOperation(Generic[T, R]): self.retry_delay = retry_delay self.retry_backoff_factor = retry_backoff_factor self.estimated_duration = estimated_duration - - # Polling configuration - self.status_extractor = status_extractor or ( - lambda x: getattr(x, "status", None) - ) + self.status_extractor = status_extractor or (lambda x: getattr(x, "status", None)) self.progress_extractor = progress_extractor self.result_url_extractor = result_url_extractor self.node_id = node_id self.completed_statuses = completed_statuses self.failed_statuses = failed_statuses + self.final_response: Optional[R] = None - # For storing response data - self.final_response = None - self.error = None - - def execute(self, client: Optional[ApiClient] = None) -> R: - """Execute the polling operation using the provided client. If failed, raise an exception.""" + async def execute(self, client: Optional[ApiClient] = None) -> R: + owns_client = client is None + if owns_client: + client = ApiClient( + base_url=self.api_base, + auth_token=self.auth_token, + comfy_api_key=self.comfy_api_key, + max_retries=self.max_retries, + retry_delay=self.retry_delay, + retry_backoff_factor=self.retry_backoff_factor, + ) try: - if client is None: - client = ApiClient( - base_url=self.api_base, - auth_token=self.auth_token, - comfy_api_key=self.comfy_api_key, - max_retries=self.max_retries, - retry_delay=self.retry_delay, - retry_backoff_factor=self.retry_backoff_factor, - ) - return self._poll_until_complete(client) - except LocalNetworkError as e: - # Provide clear message for local network issues - raise Exception( - f"Polling failed due to local network issues. Please check your internet connection. " - f"Details: {str(e)}" - ) from e - except ApiServerError as e: - # Provide clear message for API server issues - raise Exception( - f"Polling failed due to API server issues. The service may be experiencing problems. " - f"Please try again later. Details: {str(e)}" - ) from e - except Exception as e: - raise Exception(f"Error during polling: {str(e)}") + return await self._poll_until_complete(client) + finally: + if owns_client: + await client.close() def _display_text_on_node(self, text: str): - """Sends text to the client which will be displayed on the node in the UI""" if not self.node_id: return - PromptServer.instance.send_progress_text(text, self.node_id) - def _display_time_progress_on_node(self, time_completed: int): + def _display_time_progress_on_node(self, time_completed: int | float): if not self.node_id: return - if self.estimated_duration is not None: - estimated_time_remaining = max( - 0, int(self.estimated_duration) - int(time_completed) - ) - message = f"Task in progress: {time_completed:.0f}s (~{estimated_time_remaining:.0f}s remaining)" + remaining = max(0, int(self.estimated_duration) - time_completed) + message = f"Task in progress: {time_completed}s (~{remaining}s remaining)" else: - message = f"Task in progress: {time_completed:.0f}s" + message = f"Task in progress: {time_completed}s" self._display_text_on_node(message) def _check_task_status(self, response: R) -> TaskStatus: - """Check task status using the status extractor function""" try: status = self.status_extractor(response) if status in self.completed_statuses: return TaskStatus.COMPLETED - elif status in self.failed_statuses: + if status in self.failed_statuses: return TaskStatus.FAILED return TaskStatus.PENDING except Exception as e: - logging.error(f"Error extracting status: {e}") + logging.error("Error extracting status: %s", e) return TaskStatus.PENDING - def _poll_until_complete(self, client: ApiClient) -> R: + async def _poll_until_complete(self, client: ApiClient) -> R: """Poll until the task is complete""" - poll_count = 0 consecutive_errors = 0 max_consecutive_errors = min(5, self.max_retries * 2) # Limit consecutive errors if self.progress_extractor: progress = utils.ProgressBar(PROGRESS_BAR_MAX) - while poll_count < self.max_poll_attempts: + status = TaskStatus.PENDING + for poll_count in range(1, self.max_poll_attempts + 1): try: - poll_count += 1 logging.debug(f"[DEBUG] Polling attempt #{poll_count}") request_dict = ( - self.request.model_dump(exclude_none=True) - if self.request is not None - else None + None if self.request is None else self.request.model_dump(exclude_none=True) ) if poll_count == 1: @@ -1036,18 +886,14 @@ class PollingOperation(Generic[T, R]): ) # Query task status - resp = client.request( - method=self.poll_endpoint.method.value, - path=self.poll_endpoint.path, + resp = await client.request( + self.poll_endpoint.method.value, + self.poll_endpoint.path, params=self.poll_endpoint.query_params, data=request_dict, ) - - # Successfully got a response, reset consecutive error count - consecutive_errors = 0 - - # Parse response - response_obj = self.poll_endpoint.response_model.model_validate(resp) + consecutive_errors = 0 # reset on success + response_obj: R = self.poll_endpoint.response_model.model_validate(resp) # Check if task is complete status = self._check_task_status(response_obj) @@ -1065,45 +911,30 @@ class PollingOperation(Generic[T, R]): result_url = self.result_url_extractor(response_obj) if result_url: message = f"Result URL: {result_url}" - else: - message = "Task completed successfully!" logging.debug(f"[DEBUG] {message}") self._display_text_on_node(message) self.final_response = response_obj if self.progress_extractor: progress.update(100) return self.final_response - elif status == TaskStatus.FAILED: + if status == TaskStatus.FAILED: message = f"Task failed: {json.dumps(resp)}" logging.error(f"[DEBUG] {message}") raise Exception(message) - else: - logging.debug("[DEBUG] Task still pending, continuing to poll...") - - # Wait before polling again - logging.debug( - f"[DEBUG] Waiting {self.poll_interval} seconds before next poll" - ) + logging.debug("[DEBUG] Task still pending, continuing to poll...") + # Task pending – wait for i in range(int(self.poll_interval)): - time_completed = (poll_count * self.poll_interval) + i - self._display_time_progress_on_node(time_completed) - time.sleep(1) + self._display_time_progress_on_node((poll_count - 1) * self.poll_interval + i) + await asyncio.sleep(1) - except (LocalNetworkError, ApiServerError) as e: - # For network-related errors, increment error count and potentially abort + except (LocalNetworkError, ApiServerError, NetworkError) as e: consecutive_errors += 1 if consecutive_errors >= max_consecutive_errors: raise Exception( - f"Polling aborted after {consecutive_errors} consecutive network errors: {str(e)}" + f"Polling aborted after {consecutive_errors} network errors: {str(e)}" ) from e - - # Log the error but continue polling - logging.warning( - f"Network error during polling (attempt {poll_count}/{self.max_poll_attempts}): {str(e)}. " - f"Will retry in {self.poll_interval} seconds." - ) - time.sleep(self.poll_interval) - + logging.warning("Network error (%s/%s): %s", consecutive_errors, max_consecutive_errors, str(e)) + await asyncio.sleep(self.poll_interval) except Exception as e: # For other errors, increment count and potentially abort consecutive_errors += 1 @@ -1117,10 +948,10 @@ class PollingOperation(Generic[T, R]): f"Error during polling (attempt {poll_count}/{self.max_poll_attempts}): {str(e)}. " f"Will retry in {self.poll_interval} seconds." ) - time.sleep(self.poll_interval) + await asyncio.sleep(self.poll_interval) # If we've exhausted all polling attempts raise Exception( - f"Polling timed out after {poll_count} attempts ({poll_count * self.poll_interval} seconds). " - f"The operation may still be running on the server but is taking longer than expected." + f"Polling timed out after {self.max_poll_attempts} attempts (" f"{self.max_poll_attempts * self.poll_interval} seconds). " + "The operation may still be running on the server but is taking longer than expected." ) diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py index d93fbd778..c09be8d5b 100644 --- a/comfy_api_nodes/nodes_bfl.py +++ b/comfy_api_nodes/nodes_bfl.py @@ -1,3 +1,4 @@ +import asyncio import io from inspect import cleandoc from typing import Union, Optional @@ -28,7 +29,7 @@ from comfy_api_nodes.apinode_utils import ( import numpy as np from PIL import Image -import requests +import aiohttp import torch import base64 import time @@ -44,18 +45,18 @@ def convert_mask_to_image(mask: torch.Tensor): return mask -def handle_bfl_synchronous_operation( +async def handle_bfl_synchronous_operation( operation: SynchronousOperation, timeout_bfl_calls=360, node_id: Union[str, None] = None, ): - response_api: BFLFluxProGenerateResponse = operation.execute() - return _poll_until_generated( + response_api: BFLFluxProGenerateResponse = await operation.execute() + return await _poll_until_generated( response_api.polling_url, timeout=timeout_bfl_calls, node_id=node_id ) -def _poll_until_generated( +async def _poll_until_generated( polling_url: str, timeout=360, node_id: Union[str, None] = None ): # used bfl-comfy-nodes to verify code implementation: @@ -66,55 +67,56 @@ def _poll_until_generated( retry_404_seconds = 2 retry_202_seconds = 2 retry_pending_seconds = 1 - request = requests.Request(method=HttpMethod.GET, url=polling_url) - # NOTE: should True loop be replaced with checking if workflow has been interrupted? - while True: - if node_id: - time_elapsed = time.time() - start_time - PromptServer.instance.send_progress_text( - f"Generating ({time_elapsed:.0f}s)", node_id - ) - response = requests.Session().send(request.prepare()) - if response.status_code == 200: - result = response.json() - if result["status"] == BFLStatus.ready: - img_url = result["result"]["sample"] - if node_id: - PromptServer.instance.send_progress_text( - f"Result URL: {img_url}", node_id - ) - img_response = requests.get(img_url) - return process_image_response(img_response) - elif result["status"] in [ - BFLStatus.request_moderated, - BFLStatus.content_moderated, - ]: - status = result["status"] - raise Exception( - f"BFL API did not return an image due to: {status}." + async with aiohttp.ClientSession() as session: + # NOTE: should True loop be replaced with checking if workflow has been interrupted? + while True: + if node_id: + time_elapsed = time.time() - start_time + PromptServer.instance.send_progress_text( + f"Generating ({time_elapsed:.0f}s)", node_id ) - elif result["status"] == BFLStatus.error: - raise Exception(f"BFL API encountered an error: {result}.") - elif result["status"] == BFLStatus.pending: - time.sleep(retry_pending_seconds) - continue - elif response.status_code == 404: - if retries_404 < max_retries_404: - retries_404 += 1 - time.sleep(retry_404_seconds) - continue - raise Exception( - f"BFL API could not find task after {max_retries_404} tries." - ) - elif response.status_code == 202: - time.sleep(retry_202_seconds) - elif time.time() - start_time > timeout: - raise Exception( - f"BFL API experienced a timeout; could not return request under {timeout} seconds." - ) - else: - raise Exception(f"BFL API encountered an error: {response.json()}") + + async with session.get(polling_url) as response: + if response.status == 200: + result = await response.json() + if result["status"] == BFLStatus.ready: + img_url = result["result"]["sample"] + if node_id: + PromptServer.instance.send_progress_text( + f"Result URL: {img_url}", node_id + ) + async with session.get(img_url) as img_resp: + return process_image_response(await img_resp.content.read()) + elif result["status"] in [ + BFLStatus.request_moderated, + BFLStatus.content_moderated, + ]: + status = result["status"] + raise Exception( + f"BFL API did not return an image due to: {status}." + ) + elif result["status"] == BFLStatus.error: + raise Exception(f"BFL API encountered an error: {result}.") + elif result["status"] == BFLStatus.pending: + await asyncio.sleep(retry_pending_seconds) + continue + elif response.status == 404: + if retries_404 < max_retries_404: + retries_404 += 1 + await asyncio.sleep(retry_404_seconds) + continue + raise Exception( + f"BFL API could not find task after {max_retries_404} tries." + ) + elif response.status == 202: + await asyncio.sleep(retry_202_seconds) + elif time.time() - start_time > timeout: + raise Exception( + f"BFL API experienced a timeout; could not return request under {timeout} seconds." + ) + else: + raise Exception(f"BFL API encountered an error: {response.json()}") def convert_image_to_base64(image: torch.Tensor): scaled_image = downscale_image_tensor(image, total_pixels=2048 * 2048) @@ -222,7 +224,7 @@ class FluxProUltraImageNode(ComfyNodeABC): API_NODE = True CATEGORY = "api node/image/BFL" - def api_call( + async def api_call( self, prompt: str, aspect_ratio: str, @@ -266,7 +268,7 @@ class FluxProUltraImageNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) + output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) @@ -354,7 +356,7 @@ class FluxKontextProImageNode(ComfyNodeABC): BFL_PATH = "/proxy/bfl/flux-kontext-pro/generate" - def api_call( + async def api_call( self, prompt: str, aspect_ratio: str, @@ -397,7 +399,7 @@ class FluxKontextProImageNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) + output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) @@ -489,7 +491,7 @@ class FluxProImageNode(ComfyNodeABC): API_NODE = True CATEGORY = "api node/image/BFL" - def api_call( + async def api_call( self, prompt: str, prompt_upsampling, @@ -524,7 +526,7 @@ class FluxProImageNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) + output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) @@ -632,7 +634,7 @@ class FluxProExpandNode(ComfyNodeABC): API_NODE = True CATEGORY = "api node/image/BFL" - def api_call( + async def api_call( self, image: torch.Tensor, prompt: str, @@ -670,7 +672,7 @@ class FluxProExpandNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) + output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) @@ -744,7 +746,7 @@ class FluxProFillNode(ComfyNodeABC): API_NODE = True CATEGORY = "api node/image/BFL" - def api_call( + async def api_call( self, image: torch.Tensor, mask: torch.Tensor, @@ -780,7 +782,7 @@ class FluxProFillNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) + output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) @@ -879,7 +881,7 @@ class FluxProCannyNode(ComfyNodeABC): API_NODE = True CATEGORY = "api node/image/BFL" - def api_call( + async def api_call( self, control_image: torch.Tensor, prompt: str, @@ -929,7 +931,7 @@ class FluxProCannyNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) + output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) @@ -1008,7 +1010,7 @@ class FluxProDepthNode(ComfyNodeABC): API_NODE = True CATEGORY = "api node/image/BFL" - def api_call( + async def api_call( self, control_image: torch.Tensor, prompt: str, @@ -1045,7 +1047,7 @@ class FluxProDepthNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - output_image = handle_bfl_synchronous_operation(operation, node_id=unique_id) + output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) return (output_image,) diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index af33279d5..3751fb2a1 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -303,7 +303,7 @@ class GeminiNode(ComfyNodeABC): """ return GeminiPart(text=text) - def api_call( + async def api_call( self, prompt: str, model: GeminiModel, @@ -332,7 +332,7 @@ class GeminiNode(ComfyNodeABC): parts.extend(files) # Create response - response = SynchronousOperation( + response = await SynchronousOperation( endpoint=get_gemini_endpoint(model), request=GeminiGenerateContentRequest( contents=[ diff --git a/comfy_api_nodes/nodes_ideogram.py b/comfy_api_nodes/nodes_ideogram.py index b8487355f..db24e6da4 100644 --- a/comfy_api_nodes/nodes_ideogram.py +++ b/comfy_api_nodes/nodes_ideogram.py @@ -212,7 +212,7 @@ V3_RESOLUTIONS= [ "1536x640" ] -def download_and_process_images(image_urls): +async def download_and_process_images(image_urls): """Helper function to download and process multiple images from URLs""" # Initialize list to store image tensors @@ -220,7 +220,7 @@ def download_and_process_images(image_urls): for image_url in image_urls: # Using functions from apinode_utils.py to handle downloading and processing - image_bytesio = download_url_to_bytesio(image_url) # Download image content to BytesIO + image_bytesio = await download_url_to_bytesio(image_url) # Download image content to BytesIO img_tensor = bytesio_to_image_tensor(image_bytesio, mode="RGB") # Convert to torch.Tensor with RGB mode image_tensors.append(img_tensor) @@ -328,7 +328,7 @@ class IdeogramV1(ComfyNodeABC): DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True - def api_call( + async def api_call( self, prompt, turbo=False, @@ -367,7 +367,7 @@ class IdeogramV1(ComfyNodeABC): auth_kwargs=kwargs, ) - response = operation.execute() + response = await operation.execute() if not response.data or len(response.data) == 0: raise Exception("No images were generated in the response") @@ -378,7 +378,7 @@ class IdeogramV1(ComfyNodeABC): raise Exception("No image URLs were generated in the response") display_image_urls_on_node(image_urls, unique_id) - return (download_and_process_images(image_urls),) + return (await download_and_process_images(image_urls),) class IdeogramV2(ComfyNodeABC): @@ -487,7 +487,7 @@ class IdeogramV2(ComfyNodeABC): DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True - def api_call( + async def api_call( self, prompt, turbo=False, @@ -543,7 +543,7 @@ class IdeogramV2(ComfyNodeABC): auth_kwargs=kwargs, ) - response = operation.execute() + response = await operation.execute() if not response.data or len(response.data) == 0: raise Exception("No images were generated in the response") @@ -554,7 +554,7 @@ class IdeogramV2(ComfyNodeABC): raise Exception("No image URLs were generated in the response") display_image_urls_on_node(image_urls, unique_id) - return (download_and_process_images(image_urls),) + return (await download_and_process_images(image_urls),) class IdeogramV3(ComfyNodeABC): """ @@ -653,7 +653,7 @@ class IdeogramV3(ComfyNodeABC): DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True - def api_call( + async def api_call( self, prompt, image=None, @@ -774,7 +774,7 @@ class IdeogramV3(ComfyNodeABC): ) # Execute the operation and process response - response = operation.execute() + response = await operation.execute() if not response.data or len(response.data) == 0: raise Exception("No images were generated in the response") @@ -785,7 +785,7 @@ class IdeogramV3(ComfyNodeABC): raise Exception("No image URLs were generated in the response") display_image_urls_on_node(image_urls, unique_id) - return (download_and_process_images(image_urls),) + return (await download_and_process_images(image_urls),) NODE_CLASS_MAPPINGS = { diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 69e9e5cf0..9d9eb5628 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -109,7 +109,7 @@ class KlingApiError(Exception): pass -def poll_until_finished( +async def poll_until_finished( auth_kwargs: dict[str, str], api_endpoint: ApiEndpoint[Any, R], result_url_extractor: Optional[Callable[[R], str]] = None, @@ -117,7 +117,7 @@ def poll_until_finished( node_id: Optional[str] = None, ) -> R: """Polls the Kling API endpoint until the task reaches a terminal state, then returns the response.""" - return PollingOperation( + return await PollingOperation( poll_endpoint=api_endpoint, completed_statuses=[ KlingTaskStatus.succeed.value, @@ -278,18 +278,18 @@ def get_images_urls_from_response(response) -> Optional[str]: return None -def video_result_to_node_output( +async def video_result_to_node_output( video: KlingVideoResult, ) -> tuple[VideoFromFile, str, str]: """Converts a KlingVideoResult to a tuple of (VideoFromFile, str, str) to be used as a ComfyUI node output.""" return ( - download_url_to_video_output(video.url), + await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration), ) -def image_result_to_node_output( +async def image_result_to_node_output( images: list[KlingImageResult], ) -> torch.Tensor: """ @@ -297,9 +297,9 @@ def image_result_to_node_output( If multiple images are returned, they will be stacked along the batch dimension. """ if len(images) == 1: - return download_url_to_image_tensor(images[0].url) + return await download_url_to_image_tensor(str(images[0].url)) else: - return torch.cat([download_url_to_image_tensor(image.url) for image in images]) + return torch.cat([await download_url_to_image_tensor(str(image.url)) for image in images]) class KlingNodeBase(ComfyNodeABC): @@ -467,10 +467,10 @@ class KlingTextToVideoNode(KlingNodeBase): RETURN_NAMES = ("VIDEO", "video_id", "duration") DESCRIPTION = "Kling Text to Video Node" - def get_response( + async def get_response( self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None ) -> KlingText2VideoResponse: - return poll_until_finished( + return await poll_until_finished( auth_kwargs, ApiEndpoint( path=f"{PATH_TEXT_TO_VIDEO}/{task_id}", @@ -483,7 +483,7 @@ class KlingTextToVideoNode(KlingNodeBase): node_id=node_id, ) - def api_call( + async def api_call( self, prompt: str, negative_prompt: str, @@ -519,17 +519,17 @@ class KlingTextToVideoNode(KlingNodeBase): auth_kwargs=kwargs, ) - task_creation_response = initial_operation.execute() + task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response( + final_response = await self.get_response( task_id, auth_kwargs=kwargs, node_id=unique_id ) validate_video_result_response(final_response) video = get_video_from_response(final_response) - return video_result_to_node_output(video) + return await video_result_to_node_output(video) class KlingCameraControlT2VNode(KlingTextToVideoNode): @@ -581,7 +581,7 @@ class KlingCameraControlT2VNode(KlingTextToVideoNode): DESCRIPTION = "Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text." - def api_call( + async def api_call( self, prompt: str, negative_prompt: str, @@ -591,7 +591,7 @@ class KlingCameraControlT2VNode(KlingTextToVideoNode): unique_id: Optional[str] = None, **kwargs, ): - return super().api_call( + return await super().api_call( model_name=KlingVideoGenModelName.kling_v1, cfg_scale=cfg_scale, mode=KlingVideoGenMode.std, @@ -670,10 +670,10 @@ class KlingImage2VideoNode(KlingNodeBase): RETURN_NAMES = ("VIDEO", "video_id", "duration") DESCRIPTION = "Kling Image to Video Node" - def get_response( + async def get_response( self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None ) -> KlingImage2VideoResponse: - return poll_until_finished( + return await poll_until_finished( auth_kwargs, ApiEndpoint( path=f"{PATH_IMAGE_TO_VIDEO}/{task_id}", @@ -686,7 +686,7 @@ class KlingImage2VideoNode(KlingNodeBase): node_id=node_id, ) - def api_call( + async def api_call( self, start_frame: torch.Tensor, prompt: str, @@ -733,17 +733,17 @@ class KlingImage2VideoNode(KlingNodeBase): auth_kwargs=kwargs, ) - task_creation_response = initial_operation.execute() + task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response( + final_response = await self.get_response( task_id, auth_kwargs=kwargs, node_id=unique_id ) validate_video_result_response(final_response) video = get_video_from_response(final_response) - return video_result_to_node_output(video) + return await video_result_to_node_output(video) class KlingCameraControlI2VNode(KlingImage2VideoNode): @@ -798,7 +798,7 @@ class KlingCameraControlI2VNode(KlingImage2VideoNode): DESCRIPTION = "Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image." - def api_call( + async def api_call( self, start_frame: torch.Tensor, prompt: str, @@ -809,7 +809,7 @@ class KlingCameraControlI2VNode(KlingImage2VideoNode): unique_id: Optional[str] = None, **kwargs, ): - return super().api_call( + return await super().api_call( model_name=KlingVideoGenModelName.kling_v1_5, start_frame=start_frame, cfg_scale=cfg_scale, @@ -897,7 +897,7 @@ class KlingStartEndFrameNode(KlingImage2VideoNode): DESCRIPTION = "Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last." - def api_call( + async def api_call( self, start_frame: torch.Tensor, end_frame: torch.Tensor, @@ -912,7 +912,7 @@ class KlingStartEndFrameNode(KlingImage2VideoNode): mode, duration, model_name = KlingStartEndFrameNode.get_mode_string_mapping()[ mode ] - return super().api_call( + return await super().api_call( prompt=prompt, negative_prompt=negative_prompt, model_name=model_name, @@ -964,10 +964,10 @@ class KlingVideoExtendNode(KlingNodeBase): RETURN_NAMES = ("VIDEO", "video_id", "duration") DESCRIPTION = "Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes." - def get_response( + async def get_response( self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None ) -> KlingVideoExtendResponse: - return poll_until_finished( + return await poll_until_finished( auth_kwargs, ApiEndpoint( path=f"{PATH_VIDEO_EXTEND}/{task_id}", @@ -980,7 +980,7 @@ class KlingVideoExtendNode(KlingNodeBase): node_id=node_id, ) - def api_call( + async def api_call( self, prompt: str, negative_prompt: str, @@ -1006,17 +1006,17 @@ class KlingVideoExtendNode(KlingNodeBase): auth_kwargs=kwargs, ) - task_creation_response = initial_operation.execute() + task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response( + final_response = await self.get_response( task_id, auth_kwargs=kwargs, node_id=unique_id ) validate_video_result_response(final_response) video = get_video_from_response(final_response) - return video_result_to_node_output(video) + return await video_result_to_node_output(video) class KlingVideoEffectsBase(KlingNodeBase): @@ -1025,10 +1025,10 @@ class KlingVideoEffectsBase(KlingNodeBase): RETURN_TYPES = ("VIDEO", "STRING", "STRING") RETURN_NAMES = ("VIDEO", "video_id", "duration") - def get_response( + async def get_response( self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None ) -> KlingVideoEffectsResponse: - return poll_until_finished( + return await poll_until_finished( auth_kwargs, ApiEndpoint( path=f"{PATH_VIDEO_EFFECTS}/{task_id}", @@ -1041,7 +1041,7 @@ class KlingVideoEffectsBase(KlingNodeBase): node_id=node_id, ) - def api_call( + async def api_call( self, dual_character: bool, effect_scene: KlingDualCharacterEffectsScene | KlingSingleImageEffectsScene, @@ -1084,17 +1084,17 @@ class KlingVideoEffectsBase(KlingNodeBase): auth_kwargs=kwargs, ) - task_creation_response = initial_operation.execute() + task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response( + final_response = await self.get_response( task_id, auth_kwargs=kwargs, node_id=unique_id ) validate_video_result_response(final_response) video = get_video_from_response(final_response) - return video_result_to_node_output(video) + return await video_result_to_node_output(video) class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase): @@ -1142,7 +1142,7 @@ class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase): RETURN_TYPES = ("VIDEO", "STRING") RETURN_NAMES = ("VIDEO", "duration") - def api_call( + async def api_call( self, image_left: torch.Tensor, image_right: torch.Tensor, @@ -1153,7 +1153,7 @@ class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase): unique_id: Optional[str] = None, **kwargs, ): - video, _, duration = super().api_call( + video, _, duration = await super().api_call( dual_character=True, effect_scene=effect_scene, model_name=model_name, @@ -1208,7 +1208,7 @@ class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase): DESCRIPTION = "Achieve different special effects when generating a video based on the effect_scene." - def api_call( + async def api_call( self, image: torch.Tensor, effect_scene: KlingSingleImageEffectsScene, @@ -1217,7 +1217,7 @@ class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase): unique_id: Optional[str] = None, **kwargs, ): - return super().api_call( + return await super().api_call( dual_character=False, effect_scene=effect_scene, model_name=model_name, @@ -1253,11 +1253,11 @@ class KlingLipSyncBase(KlingNodeBase): f"Text is too long. Maximum length is {MAX_PROMPT_LENGTH_LIP_SYNC} characters." ) - def get_response( + async def get_response( self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None ) -> KlingLipSyncResponse: """Polls the Kling API endpoint until the task reaches a terminal state.""" - return poll_until_finished( + return await poll_until_finished( auth_kwargs, ApiEndpoint( path=f"{PATH_LIP_SYNC}/{task_id}", @@ -1270,7 +1270,7 @@ class KlingLipSyncBase(KlingNodeBase): node_id=node_id, ) - def api_call( + async def api_call( self, video: VideoInput, audio: Optional[AudioInput] = None, @@ -1287,12 +1287,12 @@ class KlingLipSyncBase(KlingNodeBase): self.validate_lip_sync_video(video) # Upload video to Comfy API and get download URL - video_url = upload_video_to_comfyapi(video, auth_kwargs=kwargs) + video_url = await upload_video_to_comfyapi(video, auth_kwargs=kwargs) logging.info("Uploaded video to Comfy API. URL: %s", video_url) # Upload the audio file to Comfy API and get download URL if audio: - audio_url = upload_audio_to_comfyapi(audio, auth_kwargs=kwargs) + audio_url = await upload_audio_to_comfyapi(audio, auth_kwargs=kwargs) logging.info("Uploaded audio to Comfy API. URL: %s", audio_url) else: audio_url = None @@ -1319,17 +1319,17 @@ class KlingLipSyncBase(KlingNodeBase): auth_kwargs=kwargs, ) - task_creation_response = initial_operation.execute() + task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response( + final_response = await self.get_response( task_id, auth_kwargs=kwargs, node_id=unique_id ) validate_video_result_response(final_response) video = get_video_from_response(final_response) - return video_result_to_node_output(video) + return await video_result_to_node_output(video) class KlingLipSyncAudioToVideoNode(KlingLipSyncBase): @@ -1357,7 +1357,7 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase): DESCRIPTION = "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length." - def api_call( + async def api_call( self, video: VideoInput, audio: AudioInput, @@ -1365,7 +1365,7 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase): unique_id: Optional[str] = None, **kwargs, ): - return super().api_call( + return await super().api_call( video=video, audio=audio, voice_language=voice_language, @@ -1469,7 +1469,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase): DESCRIPTION = "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length." - def api_call( + async def api_call( self, video: VideoInput, text: str, @@ -1479,7 +1479,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase): **kwargs, ): voice_id, voice_language = KlingLipSyncTextToVideoNode.get_voice_config()[voice] - return super().api_call( + return await super().api_call( video=video, text=text, voice_language=voice_language, @@ -1533,10 +1533,10 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase): DESCRIPTION = "Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background." - def get_response( + async def get_response( self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None ) -> KlingVirtualTryOnResponse: - return poll_until_finished( + return await poll_until_finished( auth_kwargs, ApiEndpoint( path=f"{PATH_VIRTUAL_TRY_ON}/{task_id}", @@ -1549,7 +1549,7 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase): node_id=node_id, ) - def api_call( + async def api_call( self, human_image: torch.Tensor, cloth_image: torch.Tensor, @@ -1572,17 +1572,17 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase): auth_kwargs=kwargs, ) - task_creation_response = initial_operation.execute() + task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response( + final_response = await self.get_response( task_id, auth_kwargs=kwargs, node_id=unique_id ) validate_image_result_response(final_response) images = get_images_from_response(final_response) - return (image_result_to_node_output(images),) + return (await image_result_to_node_output(images),) class KlingImageGenerationNode(KlingImageGenerationBase): @@ -1655,13 +1655,13 @@ class KlingImageGenerationNode(KlingImageGenerationBase): DESCRIPTION = "Kling Image Generation Node. Generate an image from a text prompt with an optional reference image." - def get_response( + async def get_response( self, task_id: str, auth_kwargs: Optional[dict[str, str]], node_id: Optional[str] = None, ) -> KlingImageGenerationsResponse: - return poll_until_finished( + return await poll_until_finished( auth_kwargs, ApiEndpoint( path=f"{PATH_IMAGE_GENERATIONS}/{task_id}", @@ -1674,7 +1674,7 @@ class KlingImageGenerationNode(KlingImageGenerationBase): node_id=node_id, ) - def api_call( + async def api_call( self, model_name: KlingImageGenModelName, prompt: str, @@ -1714,17 +1714,17 @@ class KlingImageGenerationNode(KlingImageGenerationBase): auth_kwargs=kwargs, ) - task_creation_response = initial_operation.execute() + task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = self.get_response( + final_response = await self.get_response( task_id, auth_kwargs=kwargs, node_id=unique_id ) validate_image_result_response(final_response) images = get_images_from_response(final_response) - return (image_result_to_node_output(images),) + return (await image_result_to_node_output(images),) NODE_CLASS_MAPPINGS = { diff --git a/comfy_api_nodes/nodes_luma.py b/comfy_api_nodes/nodes_luma.py index 525dc38e6..b3c32bed5 100644 --- a/comfy_api_nodes/nodes_luma.py +++ b/comfy_api_nodes/nodes_luma.py @@ -38,7 +38,7 @@ from comfy_api_nodes.apinode_utils import ( ) from server import PromptServer -import requests +import aiohttp import torch from io import BytesIO @@ -217,7 +217,7 @@ class LumaImageGenerationNode(ComfyNodeABC): }, } - def api_call( + async def api_call( self, prompt: str, model: str, @@ -234,19 +234,19 @@ class LumaImageGenerationNode(ComfyNodeABC): # handle image_luma_ref api_image_ref = None if image_luma_ref is not None: - api_image_ref = self._convert_luma_refs( + api_image_ref = await self._convert_luma_refs( image_luma_ref, max_refs=4, auth_kwargs=kwargs, ) # handle style_luma_ref api_style_ref = None if style_image is not None: - api_style_ref = self._convert_style_image( + api_style_ref = await self._convert_style_image( style_image, weight=style_image_weight, auth_kwargs=kwargs, ) # handle character_ref images character_ref = None if character_image is not None: - download_urls = upload_images_to_comfyapi( + download_urls = await upload_images_to_comfyapi( character_image, max_images=4, auth_kwargs=kwargs, ) character_ref = LumaCharacterRef( @@ -270,7 +270,7 @@ class LumaImageGenerationNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - response_api: LumaGeneration = operation.execute() + response_api: LumaGeneration = await operation.execute() operation = PollingOperation( poll_endpoint=ApiEndpoint( @@ -286,19 +286,20 @@ class LumaImageGenerationNode(ComfyNodeABC): node_id=unique_id, auth_kwargs=kwargs, ) - response_poll = operation.execute() + response_poll = await operation.execute() - img_response = requests.get(response_poll.assets.image) - img = process_image_response(img_response) + async with aiohttp.ClientSession() as session: + async with session.get(response_poll.assets.image) as img_response: + img = process_image_response(await img_response.content.read()) return (img,) - def _convert_luma_refs( + async def _convert_luma_refs( self, luma_ref: LumaReferenceChain, max_refs: int, auth_kwargs: Optional[dict[str,str]] = None ): luma_urls = [] ref_count = 0 for ref in luma_ref.refs: - download_urls = upload_images_to_comfyapi( + download_urls = await upload_images_to_comfyapi( ref.image, max_images=1, auth_kwargs=auth_kwargs ) luma_urls.append(download_urls[0]) @@ -307,13 +308,13 @@ class LumaImageGenerationNode(ComfyNodeABC): break return luma_ref.create_api_model(download_urls=luma_urls, max_refs=max_refs) - def _convert_style_image( + async def _convert_style_image( self, style_image: torch.Tensor, weight: float, auth_kwargs: Optional[dict[str,str]] = None ): chain = LumaReferenceChain( first_ref=LumaReference(image=style_image, weight=weight) ) - return self._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs) + return await self._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs) class LumaImageModifyNode(ComfyNodeABC): @@ -370,7 +371,7 @@ class LumaImageModifyNode(ComfyNodeABC): }, } - def api_call( + async def api_call( self, prompt: str, model: str, @@ -381,7 +382,7 @@ class LumaImageModifyNode(ComfyNodeABC): **kwargs, ): # first, upload image - download_urls = upload_images_to_comfyapi( + download_urls = await upload_images_to_comfyapi( image, max_images=1, auth_kwargs=kwargs, ) image_url = download_urls[0] @@ -402,7 +403,7 @@ class LumaImageModifyNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - response_api: LumaGeneration = operation.execute() + response_api: LumaGeneration = await operation.execute() operation = PollingOperation( poll_endpoint=ApiEndpoint( @@ -418,10 +419,11 @@ class LumaImageModifyNode(ComfyNodeABC): node_id=unique_id, auth_kwargs=kwargs, ) - response_poll = operation.execute() + response_poll = await operation.execute() - img_response = requests.get(response_poll.assets.image) - img = process_image_response(img_response) + async with aiohttp.ClientSession() as session: + async with session.get(response_poll.assets.image) as img_response: + img = process_image_response(await img_response.content.read()) return (img,) @@ -494,7 +496,7 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): }, } - def api_call( + async def api_call( self, prompt: str, model: str, @@ -529,7 +531,7 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - response_api: LumaGeneration = operation.execute() + response_api: LumaGeneration = await operation.execute() if unique_id: PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", unique_id) @@ -549,10 +551,11 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): estimated_duration=LUMA_T2V_AVERAGE_DURATION, auth_kwargs=kwargs, ) - response_poll = operation.execute() + response_poll = await operation.execute() - vid_response = requests.get(response_poll.assets.video) - return (VideoFromFile(BytesIO(vid_response.content)),) + async with aiohttp.ClientSession() as session: + async with session.get(response_poll.assets.video) as vid_response: + return (VideoFromFile(BytesIO(await vid_response.content.read())),) class LumaImageToVideoGenerationNode(ComfyNodeABC): @@ -626,7 +629,7 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): }, } - def api_call( + async def api_call( self, prompt: str, model: str, @@ -644,7 +647,7 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): raise Exception( "At least one of first_image and last_image requires an input." ) - keyframes = self._convert_to_keyframes(first_image, last_image, auth_kwargs=kwargs) + keyframes = await self._convert_to_keyframes(first_image, last_image, auth_kwargs=kwargs) duration = duration if model != LumaVideoModel.ray_1_6 else None resolution = resolution if model != LumaVideoModel.ray_1_6 else None @@ -667,7 +670,7 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - response_api: LumaGeneration = operation.execute() + response_api: LumaGeneration = await operation.execute() if unique_id: PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", unique_id) @@ -687,12 +690,13 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): estimated_duration=LUMA_I2V_AVERAGE_DURATION, auth_kwargs=kwargs, ) - response_poll = operation.execute() + response_poll = await operation.execute() - vid_response = requests.get(response_poll.assets.video) - return (VideoFromFile(BytesIO(vid_response.content)),) + async with aiohttp.ClientSession() as session: + async with session.get(response_poll.assets.video) as vid_response: + return (VideoFromFile(BytesIO(await vid_response.content.read())),) - def _convert_to_keyframes( + async def _convert_to_keyframes( self, first_image: torch.Tensor = None, last_image: torch.Tensor = None, @@ -703,12 +707,12 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): frame0 = None frame1 = None if first_image is not None: - download_urls = upload_images_to_comfyapi( + download_urls = await upload_images_to_comfyapi( first_image, max_images=1, auth_kwargs=auth_kwargs, ) frame0 = LumaImageReference(type="image", url=download_urls[0]) if last_image is not None: - download_urls = upload_images_to_comfyapi( + download_urls = await upload_images_to_comfyapi( last_image, max_images=1, auth_kwargs=auth_kwargs, ) frame1 = LumaImageReference(type="image", url=download_urls[0]) diff --git a/comfy_api_nodes/nodes_minimax.py b/comfy_api_nodes/nodes_minimax.py index 9b46636db..58d2ed90c 100644 --- a/comfy_api_nodes/nodes_minimax.py +++ b/comfy_api_nodes/nodes_minimax.py @@ -86,7 +86,7 @@ class MinimaxTextToVideoNode: API_NODE = True OUTPUT_NODE = True - def generate_video( + async def generate_video( self, prompt_text, seed=0, @@ -104,12 +104,12 @@ class MinimaxTextToVideoNode: # upload image, if passed in image_url = None if image is not None: - image_url = upload_images_to_comfyapi(image, max_images=1, auth_kwargs=kwargs)[0] + image_url = (await upload_images_to_comfyapi(image, max_images=1, auth_kwargs=kwargs))[0] # TODO: figure out how to deal with subject properly, API returns invalid params when using S2V-01 model subject_reference = None if subject is not None: - subject_url = upload_images_to_comfyapi(subject, max_images=1, auth_kwargs=kwargs)[0] + subject_url = (await upload_images_to_comfyapi(subject, max_images=1, auth_kwargs=kwargs))[0] subject_reference = [SubjectReferenceItem(image=subject_url)] @@ -130,7 +130,7 @@ class MinimaxTextToVideoNode: ), auth_kwargs=kwargs, ) - response = video_generate_operation.execute() + response = await video_generate_operation.execute() task_id = response.task_id if not task_id: @@ -151,7 +151,7 @@ class MinimaxTextToVideoNode: node_id=unique_id, auth_kwargs=kwargs, ) - task_result = video_generate_operation.execute() + task_result = await video_generate_operation.execute() file_id = task_result.file_id if file_id is None: @@ -167,7 +167,7 @@ class MinimaxTextToVideoNode: request=EmptyRequest(), auth_kwargs=kwargs, ) - file_result = file_retrieve_operation.execute() + file_result = await file_retrieve_operation.execute() file_url = file_result.file.download_url if file_url is None: @@ -182,7 +182,7 @@ class MinimaxTextToVideoNode: message = f"Result URL: {file_url}" PromptServer.instance.send_progress_text(message, unique_id) - video_io = download_url_to_bytesio(file_url) + video_io = await download_url_to_bytesio(file_url) if video_io is None: error_msg = f"Failed to download video from {file_url}" logging.error(error_msg) diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 789fcef02..164ca3ea5 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -95,14 +95,14 @@ def get_video_url_from_response(response) -> Optional[str]: return None -def poll_until_finished( +async def poll_until_finished( auth_kwargs: dict[str, str], api_endpoint: ApiEndpoint[Any, R], result_url_extractor: Optional[Callable[[R], str]] = None, node_id: Optional[str] = None, ) -> R: """Polls the Moonvalley API endpoint until the task reaches a terminal state, then returns the response.""" - return PollingOperation( + return await PollingOperation( poll_endpoint=api_endpoint, completed_statuses=[ "completed", @@ -394,10 +394,10 @@ class BaseMoonvalleyVideoNode: else: return control_map["Motion Transfer"] - def get_response( + async def get_response( self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None ) -> MoonvalleyPromptResponse: - return poll_until_finished( + return await poll_until_finished( auth_kwargs, ApiEndpoint( path=f"{API_PROMPTS_ENDPOINT}/{task_id}", @@ -507,7 +507,7 @@ class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): RETURN_NAMES = ("video",) DESCRIPTION = "Moonvalley Marey Image to Video Node" - def generate( + async def generate( self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs ): image = kwargs.get("image", None) @@ -532,9 +532,9 @@ class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): # Get MIME type from tensor - assuming PNG format for image tensors mime_type = "image/png" - image_url = upload_images_to_comfyapi( + image_url = (await upload_images_to_comfyapi( image, max_images=1, auth_kwargs=kwargs, mime_type=mime_type - )[0] + ))[0] request = MoonvalleyTextToVideoRequest( image_url=image_url, prompt_text=prompt, inference_params=inference_params @@ -549,14 +549,14 @@ class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): request=request, auth_kwargs=kwargs, ) - task_creation_response = initial_operation.execute() + task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.id - final_response = self.get_response( + final_response = await self.get_response( task_id, auth_kwargs=kwargs, node_id=unique_id ) - video = download_url_to_video_output(final_response.output_url) + video = await download_url_to_video_output(final_response.output_url) return (video,) @@ -609,7 +609,7 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): RETURN_TYPES = ("VIDEO",) RETURN_NAMES = ("video",) - def generate( + async def generate( self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs ): video = kwargs.get("video") @@ -620,7 +620,7 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): video_url = "" if video: validated_video = validate_video_to_video_input(video) - video_url = upload_video_to_comfyapi(validated_video, auth_kwargs=kwargs) + video_url = await upload_video_to_comfyapi(validated_video, auth_kwargs=kwargs) control_type = kwargs.get("control_type") motion_intensity = kwargs.get("motion_intensity") @@ -658,15 +658,15 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): request=request, auth_kwargs=kwargs, ) - task_creation_response = initial_operation.execute() + task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.id - final_response = self.get_response( + final_response = await self.get_response( task_id, auth_kwargs=kwargs, node_id=unique_id ) - video = download_url_to_video_output(final_response.output_url) + video = await download_url_to_video_output(final_response.output_url) return (video,) @@ -688,7 +688,7 @@ class MoonvalleyTxt2VideoNode(BaseMoonvalleyVideoNode): del input_types["optional"][param] return input_types - def generate( + async def generate( self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs ): validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) @@ -717,15 +717,15 @@ class MoonvalleyTxt2VideoNode(BaseMoonvalleyVideoNode): request=request, auth_kwargs=kwargs, ) - task_creation_response = initial_operation.execute() + task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.id - final_response = self.get_response( + final_response = await self.get_response( task_id, auth_kwargs=kwargs, node_id=unique_id ) - video = download_url_to_video_output(final_response.output_url) + video = await download_url_to_video_output(final_response.output_url) return (video,) diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index be1d2de4a..ab3c5363b 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -163,7 +163,7 @@ class OpenAIDalle2(ComfyNodeABC): DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True - def api_call( + async def api_call( self, prompt, seed=0, @@ -233,9 +233,9 @@ class OpenAIDalle2(ComfyNodeABC): auth_kwargs=kwargs, ) - response = operation.execute() + response = await operation.execute() - img_tensor = validate_and_cast_response(response, node_id=unique_id) + img_tensor = await validate_and_cast_response(response, node_id=unique_id) return (img_tensor,) @@ -311,7 +311,7 @@ class OpenAIDalle3(ComfyNodeABC): DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True - def api_call( + async def api_call( self, prompt, seed=0, @@ -343,9 +343,9 @@ class OpenAIDalle3(ComfyNodeABC): auth_kwargs=kwargs, ) - response = operation.execute() + response = await operation.execute() - img_tensor = validate_and_cast_response(response, node_id=unique_id) + img_tensor = await validate_and_cast_response(response, node_id=unique_id) return (img_tensor,) @@ -446,7 +446,7 @@ class OpenAIGPTImage1(ComfyNodeABC): DESCRIPTION = cleandoc(__doc__ or "") API_NODE = True - def api_call( + async def api_call( self, prompt, seed=0, @@ -537,9 +537,9 @@ class OpenAIGPTImage1(ComfyNodeABC): auth_kwargs=kwargs, ) - response = operation.execute() + response = await operation.execute() - img_tensor = validate_and_cast_response(response, node_id=unique_id) + img_tensor = await validate_and_cast_response(response, node_id=unique_id) return (img_tensor,) @@ -623,7 +623,7 @@ class OpenAIChatNode(OpenAITextNode): DESCRIPTION = "Generate text responses from an OpenAI model." - def get_result_response( + async def get_result_response( self, response_id: str, include: Optional[list[Includable]] = None, @@ -639,7 +639,7 @@ class OpenAIChatNode(OpenAITextNode): creation above for more information. """ - return PollingOperation( + return await PollingOperation( poll_endpoint=ApiEndpoint( path=f"{RESPONSES_ENDPOINT}/{response_id}", method=HttpMethod.GET, @@ -784,7 +784,7 @@ class OpenAIChatNode(OpenAITextNode): self.history[session_id] = new_history - def api_call( + async def api_call( self, prompt: str, persist_context: bool, @@ -815,7 +815,7 @@ class OpenAIChatNode(OpenAITextNode): previous_response_id = None # Create response - create_response = SynchronousOperation( + create_response = await SynchronousOperation( endpoint=ApiEndpoint( path=RESPONSES_ENDPOINT, method=HttpMethod.POST, @@ -848,7 +848,7 @@ class OpenAIChatNode(OpenAITextNode): response_id = create_response.id # Get result output - result_response = self.get_result_response(response_id, auth_kwargs=kwargs) + result_response = await self.get_result_response(response_id, auth_kwargs=kwargs) output_text = self.parse_output_text_from_response(result_response) # Update history diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py index 1cc708564..a8dc43cb3 100644 --- a/comfy_api_nodes/nodes_pika.py +++ b/comfy_api_nodes/nodes_pika.py @@ -122,7 +122,7 @@ class PikaNodeBase(ComfyNodeABC): FUNCTION = "api_call" RETURN_TYPES = ("VIDEO",) - def poll_for_task_status( + async def poll_for_task_status( self, task_id: str, auth_kwargs: Optional[dict[str, str]] = None, @@ -152,9 +152,9 @@ class PikaNodeBase(ComfyNodeABC): node_id=node_id, estimated_duration=60 ) - return polling_operation.execute() + return await polling_operation.execute() - def execute_task( + async def execute_task( self, initial_operation: SynchronousOperation[R, PikaGenerateResponse], auth_kwargs: Optional[dict[str, str]] = None, @@ -169,14 +169,14 @@ class PikaNodeBase(ComfyNodeABC): Returns: A tuple containing the video file as a VIDEO output. """ - initial_response = initial_operation.execute() + initial_response = await initial_operation.execute() if not is_valid_initial_response(initial_response): error_msg = f"Pika initial request failed. Code: {initial_response.code}, Message: {initial_response.message}, Data: {initial_response.data}" logging.error(error_msg) raise PikaApiError(error_msg) task_id = initial_response.video_id - final_response = self.poll_for_task_status(task_id, auth_kwargs) + final_response = await self.poll_for_task_status(task_id, auth_kwargs) if not is_valid_video_response(final_response): error_msg = ( f"Pika task {task_id} succeeded but no video data found in response." @@ -187,7 +187,7 @@ class PikaNodeBase(ComfyNodeABC): video_url = str(final_response.url) logging.info("Pika task %s succeeded. Video URL: %s", task_id, video_url) - return (download_url_to_video_output(video_url),) + return (await download_url_to_video_output(video_url),) class PikaImageToVideoV2_2(PikaNodeBase): @@ -212,7 +212,7 @@ class PikaImageToVideoV2_2(PikaNodeBase): DESCRIPTION = "Sends an image and prompt to the Pika API v2.2 to generate a video." - def api_call( + async def api_call( self, image: torch.Tensor, prompt_text: str, @@ -251,7 +251,7 @@ class PikaImageToVideoV2_2(PikaNodeBase): auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) class PikaTextToVideoNodeV2_2(PikaNodeBase): @@ -281,7 +281,7 @@ class PikaTextToVideoNodeV2_2(PikaNodeBase): DESCRIPTION = "Sends a text prompt to the Pika API v2.2 to generate a video." - def api_call( + async def api_call( self, prompt_text: str, negative_prompt: str, @@ -311,7 +311,7 @@ class PikaTextToVideoNodeV2_2(PikaNodeBase): content_type="application/x-www-form-urlencoded", ) - return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) class PikaScenesV2_2(PikaNodeBase): @@ -361,7 +361,7 @@ class PikaScenesV2_2(PikaNodeBase): DESCRIPTION = "Combine your images to create a video with the objects in them. Upload multiple images as ingredients and generate a high-quality video that incorporates all of them." - def api_call( + async def api_call( self, prompt_text: str, negative_prompt: str, @@ -420,7 +420,7 @@ class PikaScenesV2_2(PikaNodeBase): auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) class PikAdditionsNode(PikaNodeBase): @@ -462,7 +462,7 @@ class PikAdditionsNode(PikaNodeBase): DESCRIPTION = "Add any object or image into your video. Upload a video and specify what you'd like to add to create a seamlessly integrated result." - def api_call( + async def api_call( self, video: VideoInput, image: torch.Tensor, @@ -481,10 +481,10 @@ class PikAdditionsNode(PikaNodeBase): image_bytes_io = tensor_to_bytesio(image) image_bytes_io.seek(0) - pika_files = [ - ("video", ("video.mp4", video_bytes_io, "video/mp4")), - ("image", ("image.png", image_bytes_io, "image/png")), - ] + pika_files = { + "video": ("video.mp4", video_bytes_io, "video/mp4"), + "image": ("image.png", image_bytes_io, "image/png"), + } # Prepare non-file data pika_request_data = PikaBodyGeneratePikadditionsGeneratePikadditionsPost( @@ -506,7 +506,7 @@ class PikAdditionsNode(PikaNodeBase): auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) class PikaSwapsNode(PikaNodeBase): @@ -558,7 +558,7 @@ class PikaSwapsNode(PikaNodeBase): DESCRIPTION = "Swap out any object or region of your video with a new image or object. Define areas to replace either with a mask or coordinates." RETURN_TYPES = ("VIDEO",) - def api_call( + async def api_call( self, video: VideoInput, image: torch.Tensor, @@ -587,11 +587,11 @@ class PikaSwapsNode(PikaNodeBase): image_bytes_io = tensor_to_bytesio(image) image_bytes_io.seek(0) - pika_files = [ - ("video", ("video.mp4", video_bytes_io, "video/mp4")), - ("image", ("image.png", image_bytes_io, "image/png")), - ("modifyRegionMask", ("mask.png", mask_bytes_io, "image/png")), - ] + pika_files = { + "video": ("video.mp4", video_bytes_io, "video/mp4"), + "image": ("image.png", image_bytes_io, "image/png"), + "modifyRegionMask": ("mask.png", mask_bytes_io, "image/png"), + } # Prepare non-file data pika_request_data = PikaBodyGeneratePikaswapsGeneratePikaswapsPost( @@ -613,7 +613,7 @@ class PikaSwapsNode(PikaNodeBase): auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) class PikaffectsNode(PikaNodeBase): @@ -664,7 +664,7 @@ class PikaffectsNode(PikaNodeBase): DESCRIPTION = "Generate a video with a specific Pikaffect. Supported Pikaffects: Cake-ify, Crumble, Crush, Decapitate, Deflate, Dissolve, Explode, Eye-pop, Inflate, Levitate, Melt, Peel, Poke, Squish, Ta-da, Tear" - def api_call( + async def api_call( self, image: torch.Tensor, pikaffect: str, @@ -693,7 +693,7 @@ class PikaffectsNode(PikaNodeBase): auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) class PikaStartEndFrameNode2_2(PikaNodeBase): @@ -718,7 +718,7 @@ class PikaStartEndFrameNode2_2(PikaNodeBase): DESCRIPTION = "Generate a video by combining your first and last frame. Upload two images to define the start and end points, and let the AI create a smooth transition between them." - def api_call( + async def api_call( self, image_start: torch.Tensor, image_end: torch.Tensor, @@ -732,10 +732,7 @@ class PikaStartEndFrameNode2_2(PikaNodeBase): ) -> tuple[VideoFromFile]: pika_files = [ - ( - "keyFrames", - ("image_start.png", tensor_to_bytesio(image_start), "image/png"), - ), + ("keyFrames", ("image_start.png", tensor_to_bytesio(image_start), "image/png")), ("keyFrames", ("image_end.png", tensor_to_bytesio(image_end), "image/png")), ] @@ -758,7 +755,7 @@ class PikaStartEndFrameNode2_2(PikaNodeBase): auth_kwargs=kwargs, ) - return self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) NODE_CLASS_MAPPINGS = { diff --git a/comfy_api_nodes/nodes_pixverse.py b/comfy_api_nodes/nodes_pixverse.py index ef4a9a802..7c5a52feb 100644 --- a/comfy_api_nodes/nodes_pixverse.py +++ b/comfy_api_nodes/nodes_pixverse.py @@ -30,7 +30,7 @@ from comfy.comfy_types.node_typing import IO, ComfyNodeABC from comfy_api.input_impl import VideoFromFile import torch -import requests +import aiohttp from io import BytesIO @@ -47,7 +47,7 @@ def get_video_url_from_response( return str(response.Resp.url) -def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None): +async def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None): # first, upload image to Pixverse and get image id to use in actual generation call files = {"image": tensor_to_bytesio(image)} operation = SynchronousOperation( @@ -62,7 +62,7 @@ def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None): content_type="multipart/form-data", auth_kwargs=auth_kwargs, ) - response_upload: PixverseImageUploadResponse = operation.execute() + response_upload: PixverseImageUploadResponse = await operation.execute() if response_upload.Resp is None: raise Exception( @@ -164,7 +164,7 @@ class PixverseTextToVideoNode(ComfyNodeABC): }, } - def api_call( + async def api_call( self, prompt: str, aspect_ratio: str, @@ -205,7 +205,7 @@ class PixverseTextToVideoNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - response_api = operation.execute() + response_api = await operation.execute() if response_api.Resp is None: raise Exception(f"PixVerse request failed: '{response_api.ErrMsg}'") @@ -229,11 +229,11 @@ class PixverseTextToVideoNode(ComfyNodeABC): result_url_extractor=get_video_url_from_response, estimated_duration=AVERAGE_DURATION_T2V, ) - response_poll = operation.execute() + response_poll = await operation.execute() - vid_response = requests.get(response_poll.Resp.url) - - return (VideoFromFile(BytesIO(vid_response.content)),) + async with aiohttp.ClientSession() as session: + async with session.get(response_poll.Resp.url) as vid_response: + return (VideoFromFile(BytesIO(await vid_response.content.read())),) class PixverseImageToVideoNode(ComfyNodeABC): @@ -302,7 +302,7 @@ class PixverseImageToVideoNode(ComfyNodeABC): }, } - def api_call( + async def api_call( self, image: torch.Tensor, prompt: str, @@ -316,7 +316,7 @@ class PixverseImageToVideoNode(ComfyNodeABC): **kwargs, ): validate_string(prompt, strip_whitespace=False) - img_id = upload_image_to_pixverse(image, auth_kwargs=kwargs) + img_id = await upload_image_to_pixverse(image, auth_kwargs=kwargs) # 1080p is limited to 5 seconds duration # only normal motion_mode supported for 1080p or for non-5 second duration @@ -345,7 +345,7 @@ class PixverseImageToVideoNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - response_api = operation.execute() + response_api = await operation.execute() if response_api.Resp is None: raise Exception(f"PixVerse request failed: '{response_api.ErrMsg}'") @@ -369,10 +369,11 @@ class PixverseImageToVideoNode(ComfyNodeABC): result_url_extractor=get_video_url_from_response, estimated_duration=AVERAGE_DURATION_I2V, ) - response_poll = operation.execute() + response_poll = await operation.execute() - vid_response = requests.get(response_poll.Resp.url) - return (VideoFromFile(BytesIO(vid_response.content)),) + async with aiohttp.ClientSession() as session: + async with session.get(response_poll.Resp.url) as vid_response: + return (VideoFromFile(BytesIO(await vid_response.content.read())),) class PixverseTransitionVideoNode(ComfyNodeABC): @@ -436,7 +437,7 @@ class PixverseTransitionVideoNode(ComfyNodeABC): }, } - def api_call( + async def api_call( self, first_frame: torch.Tensor, last_frame: torch.Tensor, @@ -450,8 +451,8 @@ class PixverseTransitionVideoNode(ComfyNodeABC): **kwargs, ): validate_string(prompt, strip_whitespace=False) - first_frame_id = upload_image_to_pixverse(first_frame, auth_kwargs=kwargs) - last_frame_id = upload_image_to_pixverse(last_frame, auth_kwargs=kwargs) + first_frame_id = await upload_image_to_pixverse(first_frame, auth_kwargs=kwargs) + last_frame_id = await upload_image_to_pixverse(last_frame, auth_kwargs=kwargs) # 1080p is limited to 5 seconds duration # only normal motion_mode supported for 1080p or for non-5 second duration @@ -480,7 +481,7 @@ class PixverseTransitionVideoNode(ComfyNodeABC): ), auth_kwargs=kwargs, ) - response_api = operation.execute() + response_api = await operation.execute() if response_api.Resp is None: raise Exception(f"PixVerse request failed: '{response_api.ErrMsg}'") @@ -504,10 +505,11 @@ class PixverseTransitionVideoNode(ComfyNodeABC): result_url_extractor=get_video_url_from_response, estimated_duration=AVERAGE_DURATION_T2V, ) - response_poll = operation.execute() + response_poll = await operation.execute() - vid_response = requests.get(response_poll.Resp.url) - return (VideoFromFile(BytesIO(vid_response.content)),) + async with aiohttp.ClientSession() as session: + async with session.get(response_poll.Resp.url) as vid_response: + return (VideoFromFile(BytesIO(await vid_response.content.read())),) NODE_CLASS_MAPPINGS = { diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py index e369c4b7e..c8516b368 100644 --- a/comfy_api_nodes/nodes_recraft.py +++ b/comfy_api_nodes/nodes_recraft.py @@ -37,7 +37,7 @@ from io import BytesIO from PIL import UnidentifiedImageError -def handle_recraft_file_request( +async def handle_recraft_file_request( image: torch.Tensor, path: str, mask: torch.Tensor=None, @@ -71,13 +71,13 @@ def handle_recraft_file_request( auth_kwargs=auth_kwargs, multipart_parser=recraft_multipart_parser, ) - response: RecraftImageGenerationResponse = operation.execute() + response: RecraftImageGenerationResponse = await operation.execute() all_bytesio = [] if response.image is not None: - all_bytesio.append(download_url_to_bytesio(response.image.url, timeout=timeout)) + all_bytesio.append(await download_url_to_bytesio(response.image.url, timeout=timeout)) else: for data in response.data: - all_bytesio.append(download_url_to_bytesio(data.url, timeout=timeout)) + all_bytesio.append(await download_url_to_bytesio(data.url, timeout=timeout)) return all_bytesio @@ -395,7 +395,7 @@ class RecraftTextToImageNode: }, } - def api_call( + async def api_call( self, prompt: str, size: str, @@ -439,7 +439,7 @@ class RecraftTextToImageNode: ), auth_kwargs=kwargs, ) - response: RecraftImageGenerationResponse = operation.execute() + response: RecraftImageGenerationResponse = await operation.execute() images = [] urls = [] for data in response.data: @@ -451,7 +451,7 @@ class RecraftTextToImageNode: f"Result URL: {urls_string}", unique_id ) image = bytesio_to_image_tensor( - download_url_to_bytesio(data.url, timeout=1024) + await download_url_to_bytesio(data.url, timeout=1024) ) if len(image.shape) < 4: image = image.unsqueeze(0) @@ -538,7 +538,7 @@ class RecraftImageToImageNode: }, } - def api_call( + async def api_call( self, image: torch.Tensor, prompt: str, @@ -578,7 +578,7 @@ class RecraftImageToImageNode: total = image.shape[0] pbar = ProgressBar(total) for i in range(total): - sub_bytes = handle_recraft_file_request( + sub_bytes = await handle_recraft_file_request( image=image[i], path="/proxy/recraft/images/imageToImage", request=request, @@ -654,7 +654,7 @@ class RecraftImageInpaintingNode: }, } - def api_call( + async def api_call( self, image: torch.Tensor, mask: torch.Tensor, @@ -690,7 +690,7 @@ class RecraftImageInpaintingNode: total = image.shape[0] pbar = ProgressBar(total) for i in range(total): - sub_bytes = handle_recraft_file_request( + sub_bytes = await handle_recraft_file_request( image=image[i], mask=mask[i:i+1], path="/proxy/recraft/images/inpaint", @@ -779,7 +779,7 @@ class RecraftTextToVectorNode: }, } - def api_call( + async def api_call( self, prompt: str, substyle: str, @@ -821,7 +821,7 @@ class RecraftTextToVectorNode: ), auth_kwargs=kwargs, ) - response: RecraftImageGenerationResponse = operation.execute() + response: RecraftImageGenerationResponse = await operation.execute() svg_data = [] urls = [] for data in response.data: @@ -831,7 +831,7 @@ class RecraftTextToVectorNode: PromptServer.instance.send_progress_text( f"Result URL: {' '.join(urls)}", unique_id ) - svg_data.append(download_url_to_bytesio(data.url, timeout=1024)) + svg_data.append(await download_url_to_bytesio(data.url, timeout=1024)) return (SVG(svg_data),) @@ -861,7 +861,7 @@ class RecraftVectorizeImageNode: }, } - def api_call( + async def api_call( self, image: torch.Tensor, **kwargs, @@ -870,7 +870,7 @@ class RecraftVectorizeImageNode: total = image.shape[0] pbar = ProgressBar(total) for i in range(total): - sub_bytes = handle_recraft_file_request( + sub_bytes = await handle_recraft_file_request( image=image[i], path="/proxy/recraft/images/vectorize", auth_kwargs=kwargs, @@ -942,7 +942,7 @@ class RecraftReplaceBackgroundNode: }, } - def api_call( + async def api_call( self, image: torch.Tensor, prompt: str, @@ -973,7 +973,7 @@ class RecraftReplaceBackgroundNode: total = image.shape[0] pbar = ProgressBar(total) for i in range(total): - sub_bytes = handle_recraft_file_request( + sub_bytes = await handle_recraft_file_request( image=image[i], path="/proxy/recraft/images/replaceBackground", request=request, @@ -1011,7 +1011,7 @@ class RecraftRemoveBackgroundNode: }, } - def api_call( + async def api_call( self, image: torch.Tensor, **kwargs, @@ -1020,7 +1020,7 @@ class RecraftRemoveBackgroundNode: total = image.shape[0] pbar = ProgressBar(total) for i in range(total): - sub_bytes = handle_recraft_file_request( + sub_bytes = await handle_recraft_file_request( image=image[i], path="/proxy/recraft/images/removeBackground", auth_kwargs=kwargs, @@ -1062,7 +1062,7 @@ class RecraftCrispUpscaleNode: }, } - def api_call( + async def api_call( self, image: torch.Tensor, **kwargs, @@ -1071,7 +1071,7 @@ class RecraftCrispUpscaleNode: total = image.shape[0] pbar = ProgressBar(total) for i in range(total): - sub_bytes = handle_recraft_file_request( + sub_bytes = await handle_recraft_file_request( image=image[i], path=self.RECRAFT_PATH, auth_kwargs=kwargs, diff --git a/comfy_api_nodes/nodes_rodin.py b/comfy_api_nodes/nodes_rodin.py index 67f90478c..c89d087e5 100644 --- a/comfy_api_nodes/nodes_rodin.py +++ b/comfy_api_nodes/nodes_rodin.py @@ -9,11 +9,10 @@ from __future__ import annotations from inspect import cleandoc from comfy.comfy_types.node_typing import IO import folder_paths as comfy_paths -import requests +import aiohttp import os import datetime -import shutil -import time +import asyncio import io import logging import math @@ -66,7 +65,6 @@ def create_task_error(response: Rodin3DGenerateResponse): return hasattr(response, "error") - class Rodin3DAPI: """ Generate 3D Assets using Rodin API @@ -123,8 +121,8 @@ class Rodin3DAPI: else: return "Generating" - def CreateGenerateTask(self, images=None, seed=1, material="PBR", quality="medium", tier="Regular", mesh_mode="Quad", **kwargs): - if images == None: + async def create_generate_task(self, images=None, seed=1, material="PBR", quality="medium", tier="Regular", mesh_mode="Quad", **kwargs): + if images is None: raise Exception("Rodin 3D generate requires at least 1 image.") if len(images) >= 5: raise Exception("Rodin 3D generate requires up to 5 image.") @@ -155,7 +153,7 @@ class Rodin3DAPI: auth_kwargs=kwargs, ) - response = operation.execute() + response = await operation.execute() if create_task_error(response): error_message = f"Rodin3D Create 3D generate Task Failed. Message: {response.message}, error: {response.error}" @@ -168,7 +166,7 @@ class Rodin3DAPI: logging.info(f"[ Rodin3D API - Submit Jobs ] UUID: {task_uuid}") return task_uuid, subscription_key - def poll_for_task_status(self, subscription_key, **kwargs) -> Rodin3DCheckStatusResponse: + async def poll_for_task_status(self, subscription_key, **kwargs) -> Rodin3DCheckStatusResponse: path = "/proxy/rodin/api/v2/status" @@ -191,11 +189,9 @@ class Rodin3DAPI: logging.info("[ Rodin3D API - CheckStatus ] Generate Start!") - return poll_operation.execute() + return await poll_operation.execute() - - - def GetRodinDownloadList(self, uuid, **kwargs) -> Rodin3DDownloadResponse: + async def get_rodin_download_list(self, uuid, **kwargs) -> Rodin3DDownloadResponse: logging.info("[ Rodin3D API - Downloading ] Generate Successfully!") path = "/proxy/rodin/api/v2/download" @@ -212,53 +208,59 @@ class Rodin3DAPI: auth_kwargs=kwargs ) - return operation.execute() + return await operation.execute() - def GetQualityAndMode(self, PolyCount): - if PolyCount == "200K-Triangle": + def get_quality_mode(self, poly_count): + if poly_count == "200K-Triangle": mesh_mode = "Raw" quality = "medium" else: mesh_mode = "Quad" - if PolyCount == "4K-Quad": + if poly_count == "4K-Quad": quality = "extra-low" - elif PolyCount == "8K-Quad": + elif poly_count == "8K-Quad": quality = "low" - elif PolyCount == "18K-Quad": + elif poly_count == "18K-Quad": quality = "medium" - elif PolyCount == "50K-Quad": + elif poly_count == "50K-Quad": quality = "high" else: quality = "medium" return mesh_mode, quality - def DownLoadFiles(self, Url_List): - Save_path = os.path.join(comfy_paths.get_output_directory(), "Rodin3D", datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) - os.makedirs(Save_path, exist_ok=True) + async def download_files(self, url_list): + save_path = os.path.join(comfy_paths.get_output_directory(), "Rodin3D", datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) + os.makedirs(save_path, exist_ok=True) model_file_path = None - for Item in Url_List.list: - url = Item.url - file_name = Item.name - file_path = os.path.join(Save_path, file_name) - if file_path.endswith(".glb"): - model_file_path = file_path - logging.info(f"[ Rodin3D API - download_files ] Downloading file: {file_path}") - max_retries = 5 - for attempt in range(max_retries): - try: - with requests.get(url, stream=True) as r: - r.raise_for_status() - with open(file_path, "wb") as f: - shutil.copyfileobj(r.raw, f) - break - except Exception as e: - logging.info(f"[ Rodin3D API - download_files ] Error downloading {file_path}:{e}") - if attempt < max_retries - 1: - logging.info("Retrying...") - time.sleep(2) - else: - logging.info(f"[ Rodin3D API - download_files ] Failed to download {file_path} after {max_retries} attempts.") + async with aiohttp.ClientSession() as session: + for i in url_list.list: + url = i.url + file_name = i.name + file_path = os.path.join(save_path, file_name) + if file_path.endswith(".glb"): + model_file_path = file_path + logging.info(f"[ Rodin3D API - download_files ] Downloading file: {file_path}") + max_retries = 5 + for attempt in range(max_retries): + try: + async with session.get(url) as resp: + resp.raise_for_status() + with open(file_path, "wb") as f: + async for chunk in resp.content.iter_chunked(32 * 1024): + f.write(chunk) + break + except Exception as e: + logging.info(f"[ Rodin3D API - download_files ] Error downloading {file_path}:{e}") + if attempt < max_retries - 1: + logging.info("Retrying...") + await asyncio.sleep(2) + else: + logging.info( + "[ Rodin3D API - download_files ] Failed to download %s after %s attempts.", + file_path, + max_retries, + ) return model_file_path @@ -285,7 +287,7 @@ class Rodin3D_Regular(Rodin3DAPI): }, } - def api_call( + async def api_call( self, Images, Seed, @@ -298,14 +300,17 @@ class Rodin3D_Regular(Rodin3DAPI): m_images = [] for i in range(num_images): m_images.append(Images[i]) - mesh_mode, quality = self.GetQualityAndMode(Polygon_count) - task_uuid, subscription_key = self.CreateGenerateTask(images=m_images, seed=Seed, material=Material_Type, quality=quality, tier=tier, mesh_mode=mesh_mode, **kwargs) - self.poll_for_task_status(subscription_key, **kwargs) - Download_List = self.GetRodinDownloadList(task_uuid, **kwargs) - model = self.DownLoadFiles(Download_List) + mesh_mode, quality = self.get_quality_mode(Polygon_count) + task_uuid, subscription_key = await self.create_generate_task(images=m_images, seed=Seed, material=Material_Type, + quality=quality, tier=tier, mesh_mode=mesh_mode, + **kwargs) + await self.poll_for_task_status(subscription_key, **kwargs) + download_list = await self.get_rodin_download_list(task_uuid, **kwargs) + model = await self.download_files(download_list) return (model,) + class Rodin3D_Detail(Rodin3DAPI): @classmethod def INPUT_TYPES(s): @@ -328,7 +333,7 @@ class Rodin3D_Detail(Rodin3DAPI): }, } - def api_call( + async def api_call( self, Images, Seed, @@ -341,14 +346,17 @@ class Rodin3D_Detail(Rodin3DAPI): m_images = [] for i in range(num_images): m_images.append(Images[i]) - mesh_mode, quality = self.GetQualityAndMode(Polygon_count) - task_uuid, subscription_key = self.CreateGenerateTask(images=m_images, seed=Seed, material=Material_Type, quality=quality, tier=tier, mesh_mode=mesh_mode, **kwargs) - self.poll_for_task_status(subscription_key, **kwargs) - Download_List = self.GetRodinDownloadList(task_uuid, **kwargs) - model = self.DownLoadFiles(Download_List) + mesh_mode, quality = self.get_quality_mode(Polygon_count) + task_uuid, subscription_key = await self.create_generate_task(images=m_images, seed=Seed, material=Material_Type, + quality=quality, tier=tier, mesh_mode=mesh_mode, + **kwargs) + await self.poll_for_task_status(subscription_key, **kwargs) + download_list = await self.get_rodin_download_list(task_uuid, **kwargs) + model = await self.download_files(download_list) return (model,) + class Rodin3D_Smooth(Rodin3DAPI): @classmethod def INPUT_TYPES(s): @@ -371,7 +379,7 @@ class Rodin3D_Smooth(Rodin3DAPI): }, } - def api_call( + async def api_call( self, Images, Seed, @@ -384,14 +392,17 @@ class Rodin3D_Smooth(Rodin3DAPI): m_images = [] for i in range(num_images): m_images.append(Images[i]) - mesh_mode, quality = self.GetQualityAndMode(Polygon_count) - task_uuid, subscription_key = self.CreateGenerateTask(images=m_images, seed=Seed, material=Material_Type, quality=quality, tier=tier, mesh_mode=mesh_mode, **kwargs) - self.poll_for_task_status(subscription_key, **kwargs) - Download_List = self.GetRodinDownloadList(task_uuid, **kwargs) - model = self.DownLoadFiles(Download_List) + mesh_mode, quality = self.get_quality_mode(Polygon_count) + task_uuid, subscription_key = await self.create_generate_task(images=m_images, seed=Seed, material=Material_Type, + quality=quality, tier=tier, mesh_mode=mesh_mode, + **kwargs) + await self.poll_for_task_status(subscription_key, **kwargs) + download_list = await self.get_rodin_download_list(task_uuid, **kwargs) + model = await self.download_files(download_list) return (model,) + class Rodin3D_Sketch(Rodin3DAPI): @classmethod def INPUT_TYPES(s): @@ -423,7 +434,7 @@ class Rodin3D_Sketch(Rodin3DAPI): }, } - def api_call( + async def api_call( self, Images, Seed, @@ -437,10 +448,12 @@ class Rodin3D_Sketch(Rodin3DAPI): material_type = "PBR" quality = "medium" mesh_mode = "Quad" - task_uuid, subscription_key = self.CreateGenerateTask(images=m_images, seed=Seed, material=material_type, quality=quality, tier=tier, mesh_mode=mesh_mode, **kwargs) - self.poll_for_task_status(subscription_key, **kwargs) - Download_List = self.GetRodinDownloadList(task_uuid, **kwargs) - model = self.DownLoadFiles(Download_List) + task_uuid, subscription_key = await self.create_generate_task( + images=m_images, seed=Seed, material=material_type, quality=quality, tier=tier, mesh_mode=mesh_mode, **kwargs + ) + await self.poll_for_task_status(subscription_key, **kwargs) + download_list = await self.get_rodin_download_list(task_uuid, **kwargs) + model = await self.download_files(download_list) return (model,) diff --git a/comfy_api_nodes/nodes_runway.py b/comfy_api_nodes/nodes_runway.py index af4b321f9..98024a9fa 100644 --- a/comfy_api_nodes/nodes_runway.py +++ b/comfy_api_nodes/nodes_runway.py @@ -99,14 +99,14 @@ def validate_input_image(image: torch.Tensor) -> bool: return image.shape[2] < 8000 and image.shape[1] < 8000 -def poll_until_finished( +async def poll_until_finished( auth_kwargs: dict[str, str], api_endpoint: ApiEndpoint[Any, TaskStatusResponse], estimated_duration: Optional[int] = None, node_id: Optional[str] = None, ) -> TaskStatusResponse: """Polls the Runway API endpoint until the task reaches a terminal state, then returns the response.""" - return PollingOperation( + return await PollingOperation( poll_endpoint=api_endpoint, completed_statuses=[ TaskStatus.SUCCEEDED.value, @@ -115,7 +115,7 @@ def poll_until_finished( TaskStatus.FAILED.value, TaskStatus.CANCELLED.value, ], - status_extractor=lambda response: (response.status.value), + status_extractor=lambda response: response.status.value, auth_kwargs=auth_kwargs, result_url_extractor=get_video_url_from_task_status, estimated_duration=estimated_duration, @@ -167,11 +167,11 @@ class RunwayVideoGenNode(ComfyNodeABC): ) return True - def get_response( + async def get_response( self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None ) -> RunwayImageToVideoResponse: """Poll the task status until it is finished then get the response.""" - return poll_until_finished( + return await poll_until_finished( auth_kwargs, ApiEndpoint( path=f"{PATH_GET_TASK_STATUS}/{task_id}", @@ -183,7 +183,7 @@ class RunwayVideoGenNode(ComfyNodeABC): node_id=node_id, ) - def generate_video( + async def generate_video( self, request: RunwayImageToVideoRequest, auth_kwargs: dict[str, str], @@ -200,15 +200,15 @@ class RunwayVideoGenNode(ComfyNodeABC): auth_kwargs=auth_kwargs, ) - initial_response = initial_operation.execute() + initial_response = await initial_operation.execute() self.validate_task_created(initial_response) task_id = initial_response.id - final_response = self.get_response(task_id, auth_kwargs, node_id) + final_response = await self.get_response(task_id, auth_kwargs, node_id) self.validate_response(final_response) video_url = get_video_url_from_task_status(final_response) - return (download_url_to_video_output(video_url),) + return (await download_url_to_video_output(video_url),) class RunwayImageToVideoNodeGen3a(RunwayVideoGenNode): @@ -250,7 +250,7 @@ class RunwayImageToVideoNodeGen3a(RunwayVideoGenNode): }, } - def api_call( + async def api_call( self, prompt: str, start_frame: torch.Tensor, @@ -265,7 +265,7 @@ class RunwayImageToVideoNodeGen3a(RunwayVideoGenNode): validate_input_image(start_frame) # Upload image - download_urls = upload_images_to_comfyapi( + download_urls = await upload_images_to_comfyapi( start_frame, max_images=1, mime_type="image/png", @@ -274,7 +274,7 @@ class RunwayImageToVideoNodeGen3a(RunwayVideoGenNode): if len(download_urls) != 1: raise RunwayApiError("Failed to upload one or more images to comfy api.") - return self.generate_video( + return await self.generate_video( RunwayImageToVideoRequest( promptText=prompt, seed=seed, @@ -333,7 +333,7 @@ class RunwayImageToVideoNodeGen4(RunwayVideoGenNode): }, } - def api_call( + async def api_call( self, prompt: str, start_frame: torch.Tensor, @@ -348,7 +348,7 @@ class RunwayImageToVideoNodeGen4(RunwayVideoGenNode): validate_input_image(start_frame) # Upload image - download_urls = upload_images_to_comfyapi( + download_urls = await upload_images_to_comfyapi( start_frame, max_images=1, mime_type="image/png", @@ -357,7 +357,7 @@ class RunwayImageToVideoNodeGen4(RunwayVideoGenNode): if len(download_urls) != 1: raise RunwayApiError("Failed to upload one or more images to comfy api.") - return self.generate_video( + return await self.generate_video( RunwayImageToVideoRequest( promptText=prompt, seed=seed, @@ -382,10 +382,10 @@ class RunwayFirstLastFrameNode(RunwayVideoGenNode): DESCRIPTION = "Upload first and last keyframes, draft a prompt, and generate a video. More complex transitions, such as cases where the Last frame is completely different from the First frame, may benefit from the longer 10s duration. This would give the generation more time to smoothly transition between the two inputs. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3." - def get_response( + async def get_response( self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None ) -> RunwayImageToVideoResponse: - return poll_until_finished( + return await poll_until_finished( auth_kwargs, ApiEndpoint( path=f"{PATH_GET_TASK_STATUS}/{task_id}", @@ -437,7 +437,7 @@ class RunwayFirstLastFrameNode(RunwayVideoGenNode): }, } - def api_call( + async def api_call( self, prompt: str, start_frame: torch.Tensor, @@ -455,7 +455,7 @@ class RunwayFirstLastFrameNode(RunwayVideoGenNode): # Upload images stacked_input_images = image_tensor_pair_to_batch(start_frame, end_frame) - download_urls = upload_images_to_comfyapi( + download_urls = await upload_images_to_comfyapi( stacked_input_images, max_images=2, mime_type="image/png", @@ -464,7 +464,7 @@ class RunwayFirstLastFrameNode(RunwayVideoGenNode): if len(download_urls) != 2: raise RunwayApiError("Failed to upload one or more images to comfy api.") - return self.generate_video( + return await self.generate_video( RunwayImageToVideoRequest( promptText=prompt, seed=seed, @@ -543,11 +543,11 @@ class RunwayTextToImageNode(ComfyNodeABC): ) return True - def get_response( + async def get_response( self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None ) -> TaskStatusResponse: """Poll the task status until it is finished then get the response.""" - return poll_until_finished( + return await poll_until_finished( auth_kwargs, ApiEndpoint( path=f"{PATH_GET_TASK_STATUS}/{task_id}", @@ -559,7 +559,7 @@ class RunwayTextToImageNode(ComfyNodeABC): node_id=node_id, ) - def api_call( + async def api_call( self, prompt: str, ratio: str, @@ -574,7 +574,7 @@ class RunwayTextToImageNode(ComfyNodeABC): reference_images = None if reference_image is not None: validate_input_image(reference_image) - download_urls = upload_images_to_comfyapi( + download_urls = await upload_images_to_comfyapi( reference_image, max_images=1, mime_type="image/png", @@ -605,19 +605,19 @@ class RunwayTextToImageNode(ComfyNodeABC): auth_kwargs=kwargs, ) - initial_response = initial_operation.execute() + initial_response = await initial_operation.execute() self.validate_task_created(initial_response) task_id = initial_response.id # Poll for completion - final_response = self.get_response( + final_response = await self.get_response( task_id, auth_kwargs=kwargs, node_id=unique_id ) self.validate_response(final_response) # Download and return image image_url = get_image_url_from_task_status(final_response) - return (download_url_to_image_tensor(image_url),) + return (await download_url_to_image_tensor(image_url),) NODE_CLASS_MAPPINGS = { diff --git a/comfy_api_nodes/nodes_stability.py b/comfy_api_nodes/nodes_stability.py index 02e421678..31309d831 100644 --- a/comfy_api_nodes/nodes_stability.py +++ b/comfy_api_nodes/nodes_stability.py @@ -124,7 +124,7 @@ class StabilityStableImageUltraNode: }, } - def api_call(self, prompt: str, aspect_ratio: str, style_preset: str, seed: int, + async def api_call(self, prompt: str, aspect_ratio: str, style_preset: str, seed: int, negative_prompt: str=None, image: torch.Tensor = None, image_denoise: float=None, **kwargs): validate_string(prompt, strip_whitespace=False) @@ -163,7 +163,7 @@ class StabilityStableImageUltraNode: content_type="multipart/form-data", auth_kwargs=kwargs, ) - response_api = operation.execute() + response_api = await operation.execute() if response_api.finish_reason != "SUCCESS": raise Exception(f"Stable Image Ultra generation failed: {response_api.finish_reason}.") @@ -257,7 +257,7 @@ class StabilityStableImageSD_3_5Node: }, } - def api_call(self, model: str, prompt: str, aspect_ratio: str, style_preset: str, seed: int, cfg_scale: float, + async def api_call(self, model: str, prompt: str, aspect_ratio: str, style_preset: str, seed: int, cfg_scale: float, negative_prompt: str=None, image: torch.Tensor = None, image_denoise: float=None, **kwargs): validate_string(prompt, strip_whitespace=False) @@ -302,7 +302,7 @@ class StabilityStableImageSD_3_5Node: content_type="multipart/form-data", auth_kwargs=kwargs, ) - response_api = operation.execute() + response_api = await operation.execute() if response_api.finish_reason != "SUCCESS": raise Exception(f"Stable Diffusion 3.5 Image generation failed: {response_api.finish_reason}.") @@ -374,7 +374,7 @@ class StabilityUpscaleConservativeNode: }, } - def api_call(self, image: torch.Tensor, prompt: str, creativity: float, seed: int, negative_prompt: str=None, + async def api_call(self, image: torch.Tensor, prompt: str, creativity: float, seed: int, negative_prompt: str=None, **kwargs): validate_string(prompt, strip_whitespace=False) image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read() @@ -403,7 +403,7 @@ class StabilityUpscaleConservativeNode: content_type="multipart/form-data", auth_kwargs=kwargs, ) - response_api = operation.execute() + response_api = await operation.execute() if response_api.finish_reason != "SUCCESS": raise Exception(f"Stability Upscale Conservative generation failed: {response_api.finish_reason}.") @@ -480,7 +480,7 @@ class StabilityUpscaleCreativeNode: }, } - def api_call(self, image: torch.Tensor, prompt: str, creativity: float, style_preset: str, seed: int, negative_prompt: str=None, + async def api_call(self, image: torch.Tensor, prompt: str, creativity: float, style_preset: str, seed: int, negative_prompt: str=None, **kwargs): validate_string(prompt, strip_whitespace=False) image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read() @@ -512,7 +512,7 @@ class StabilityUpscaleCreativeNode: content_type="multipart/form-data", auth_kwargs=kwargs, ) - response_api = operation.execute() + response_api = await operation.execute() operation = PollingOperation( poll_endpoint=ApiEndpoint( @@ -527,7 +527,7 @@ class StabilityUpscaleCreativeNode: status_extractor=lambda x: get_async_dummy_status(x), auth_kwargs=kwargs, ) - response_poll: StabilityResultsGetResponse = operation.execute() + response_poll: StabilityResultsGetResponse = await operation.execute() if response_poll.finish_reason != "SUCCESS": raise Exception(f"Stability Upscale Creative generation failed: {response_poll.finish_reason}.") @@ -563,8 +563,7 @@ class StabilityUpscaleFastNode: }, } - def api_call(self, image: torch.Tensor, - **kwargs): + async def api_call(self, image: torch.Tensor, **kwargs): image_binary = tensor_to_bytesio(image, total_pixels=4096*4096).read() files = { @@ -583,7 +582,7 @@ class StabilityUpscaleFastNode: content_type="multipart/form-data", auth_kwargs=kwargs, ) - response_api = operation.execute() + response_api = await operation.execute() if response_api.finish_reason != "SUCCESS": raise Exception(f"Stability Upscale Fast failed: {response_api.finish_reason}.") diff --git a/comfy_api_nodes/nodes_tripo.py b/comfy_api_nodes/nodes_tripo.py index 65f3b21f5..d08cf9007 100644 --- a/comfy_api_nodes/nodes_tripo.py +++ b/comfy_api_nodes/nodes_tripo.py @@ -37,8 +37,8 @@ from comfy_api_nodes.apinode_utils import ( ) -def upload_image_to_tripo(image, **kwargs): - urls = upload_images_to_comfyapi(image, max_images=1, auth_kwargs=kwargs) +async def upload_image_to_tripo(image, **kwargs): + urls = await upload_images_to_comfyapi(image, max_images=1, auth_kwargs=kwargs) return TripoFileReference(TripoUrlReference(url=urls[0], type="jpeg")) def get_model_url_from_response(response: TripoTaskResponse) -> str: @@ -49,7 +49,7 @@ def get_model_url_from_response(response: TripoTaskResponse) -> str: raise RuntimeError(f"Failed to get model url from response: {response}") -def poll_until_finished( +async def poll_until_finished( kwargs: dict[str, str], response: TripoTaskResponse, ) -> tuple[str, str]: @@ -57,7 +57,7 @@ def poll_until_finished( if response.code != 0: raise RuntimeError(f"Failed to generate mesh: {response.error}") task_id = response.data.task_id - response_poll = PollingOperation( + response_poll = await PollingOperation( poll_endpoint=ApiEndpoint( path=f"/proxy/tripo/v2/openapi/task/{task_id}", method=HttpMethod.GET, @@ -80,7 +80,7 @@ def poll_until_finished( ).execute() if response_poll.data.status == TripoTaskStatus.SUCCESS: url = get_model_url_from_response(response_poll) - bytesio = download_url_to_bytesio(url) + bytesio = await download_url_to_bytesio(url) # Save the downloaded model file model_file = f"tripo_model_{task_id}.glb" with open(os.path.join(get_output_directory(), model_file), "wb") as f: @@ -88,6 +88,7 @@ def poll_until_finished( return model_file, task_id raise RuntimeError(f"Failed to generate mesh: {response_poll}") + class TripoTextToModelNode: """ Generates 3D models synchronously based on a text prompt using Tripo's API. @@ -126,11 +127,11 @@ class TripoTextToModelNode: API_NODE = True OUTPUT_NODE = True - def generate_mesh(self, prompt, negative_prompt=None, model_version=None, style=None, texture=None, pbr=None, image_seed=None, model_seed=None, texture_seed=None, texture_quality=None, face_limit=None, quad=None, **kwargs): + async def generate_mesh(self, prompt, negative_prompt=None, model_version=None, style=None, texture=None, pbr=None, image_seed=None, model_seed=None, texture_seed=None, texture_quality=None, face_limit=None, quad=None, **kwargs): style_enum = None if style == "None" else style if not prompt: raise RuntimeError("Prompt is required") - response = SynchronousOperation( + response = await SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/tripo/v2/openapi/task", method=HttpMethod.POST, @@ -155,7 +156,8 @@ class TripoTextToModelNode: ), auth_kwargs=kwargs, ).execute() - return poll_until_finished(kwargs, response) + return await poll_until_finished(kwargs, response) + class TripoImageToModelNode: """ @@ -195,12 +197,12 @@ class TripoImageToModelNode: API_NODE = True OUTPUT_NODE = True - def generate_mesh(self, image, model_version=None, style=None, texture=None, pbr=None, model_seed=None, orientation=None, texture_alignment=None, texture_seed=None, texture_quality=None, face_limit=None, quad=None, **kwargs): + async def generate_mesh(self, image, model_version=None, style=None, texture=None, pbr=None, model_seed=None, orientation=None, texture_alignment=None, texture_seed=None, texture_quality=None, face_limit=None, quad=None, **kwargs): style_enum = None if style == "None" else style if image is None: raise RuntimeError("Image is required") - tripo_file = upload_image_to_tripo(image, **kwargs) - response = SynchronousOperation( + tripo_file = await upload_image_to_tripo(image, **kwargs) + response = await SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/tripo/v2/openapi/task", method=HttpMethod.POST, @@ -225,7 +227,8 @@ class TripoImageToModelNode: ), auth_kwargs=kwargs, ).execute() - return poll_until_finished(kwargs, response) + return await poll_until_finished(kwargs, response) + class TripoMultiviewToModelNode: """ @@ -267,7 +270,7 @@ class TripoMultiviewToModelNode: API_NODE = True OUTPUT_NODE = True - def generate_mesh(self, image, image_left=None, image_back=None, image_right=None, model_version=None, orientation=None, texture=None, pbr=None, model_seed=None, texture_seed=None, texture_quality=None, texture_alignment=None, face_limit=None, quad=None, **kwargs): + async def generate_mesh(self, image, image_left=None, image_back=None, image_right=None, model_version=None, orientation=None, texture=None, pbr=None, model_seed=None, texture_seed=None, texture_quality=None, texture_alignment=None, face_limit=None, quad=None, **kwargs): if image is None: raise RuntimeError("front image for multiview is required") images = [] @@ -282,11 +285,11 @@ class TripoMultiviewToModelNode: for image_name in ["image", "image_left", "image_back", "image_right"]: image_ = image_dict[image_name] if image_ is not None: - tripo_file = upload_image_to_tripo(image_, **kwargs) + tripo_file = await upload_image_to_tripo(image_, **kwargs) images.append(tripo_file) else: images.append(TripoFileEmptyReference()) - response = SynchronousOperation( + response = await SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/tripo/v2/openapi/task", method=HttpMethod.POST, @@ -309,7 +312,8 @@ class TripoMultiviewToModelNode: ), auth_kwargs=kwargs, ).execute() - return poll_until_finished(kwargs, response) + return await poll_until_finished(kwargs, response) + class TripoTextureNode: @classmethod @@ -340,8 +344,8 @@ class TripoTextureNode: OUTPUT_NODE = True AVERAGE_DURATION = 80 - def generate_mesh(self, model_task_id, texture=None, pbr=None, texture_seed=None, texture_quality=None, texture_alignment=None, **kwargs): - response = SynchronousOperation( + async def generate_mesh(self, model_task_id, texture=None, pbr=None, texture_seed=None, texture_quality=None, texture_alignment=None, **kwargs): + response = await SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/tripo/v2/openapi/task", method=HttpMethod.POST, @@ -358,7 +362,7 @@ class TripoTextureNode: ), auth_kwargs=kwargs, ).execute() - return poll_until_finished(kwargs, response) + return await poll_until_finished(kwargs, response) class TripoRefineNode: @@ -387,8 +391,8 @@ class TripoRefineNode: OUTPUT_NODE = True AVERAGE_DURATION = 240 - def generate_mesh(self, model_task_id, **kwargs): - response = SynchronousOperation( + async def generate_mesh(self, model_task_id, **kwargs): + response = await SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/tripo/v2/openapi/task", method=HttpMethod.POST, @@ -400,7 +404,7 @@ class TripoRefineNode: ), auth_kwargs=kwargs, ).execute() - return poll_until_finished(kwargs, response) + return await poll_until_finished(kwargs, response) class TripoRigNode: @@ -425,8 +429,8 @@ class TripoRigNode: OUTPUT_NODE = True AVERAGE_DURATION = 180 - def generate_mesh(self, original_model_task_id, **kwargs): - response = SynchronousOperation( + async def generate_mesh(self, original_model_task_id, **kwargs): + response = await SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/tripo/v2/openapi/task", method=HttpMethod.POST, @@ -440,7 +444,8 @@ class TripoRigNode: ), auth_kwargs=kwargs, ).execute() - return poll_until_finished(kwargs, response) + return await poll_until_finished(kwargs, response) + class TripoRetargetNode: @classmethod @@ -475,8 +480,8 @@ class TripoRetargetNode: OUTPUT_NODE = True AVERAGE_DURATION = 30 - def generate_mesh(self, animation, original_model_task_id, **kwargs): - response = SynchronousOperation( + async def generate_mesh(self, animation, original_model_task_id, **kwargs): + response = await SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/tripo/v2/openapi/task", method=HttpMethod.POST, @@ -491,7 +496,8 @@ class TripoRetargetNode: ), auth_kwargs=kwargs, ).execute() - return poll_until_finished(kwargs, response) + return await poll_until_finished(kwargs, response) + class TripoConversionNode: @classmethod @@ -529,10 +535,10 @@ class TripoConversionNode: OUTPUT_NODE = True AVERAGE_DURATION = 30 - def generate_mesh(self, original_model_task_id, format, quad, face_limit, texture_size, texture_format, **kwargs): + async def generate_mesh(self, original_model_task_id, format, quad, face_limit, texture_size, texture_format, **kwargs): if not original_model_task_id: raise RuntimeError("original_model_task_id is required") - response = SynchronousOperation( + response = await SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/tripo/v2/openapi/task", method=HttpMethod.POST, @@ -549,7 +555,8 @@ class TripoConversionNode: ), auth_kwargs=kwargs, ).execute() - return poll_until_finished(kwargs, response) + return await poll_until_finished(kwargs, response) + NODE_CLASS_MAPPINGS = { "TripoTextToModelNode": TripoTextToModelNode, diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index 97bfe20e6..e25dab2f5 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -1,7 +1,7 @@ import io import logging import base64 -import requests +import aiohttp import torch from typing import Optional @@ -152,7 +152,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): DESCRIPTION = "Generates videos from text prompts using Google's Veo 2 API" API_NODE = True - def generate_video( + async def generate_video( self, prompt, aspect_ratio="16:9", @@ -217,7 +217,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): auth_kwargs=kwargs, ) - initial_response = initial_operation.execute() + initial_response = await initial_operation.execute() operation_name = initial_response.name logging.info(f"Veo generation started with operation name: {operation_name}") @@ -256,7 +256,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): ) # Execute the polling operation - poll_response = poll_operation.execute() + poll_response = await poll_operation.execute() # Now check for errors in the final response # Check for error in poll response @@ -281,7 +281,6 @@ class VeoVideoGenerationNode(ComfyNodeABC): raise Exception(error_message) # Extract video data - video_data = None if poll_response.response and hasattr(poll_response.response, 'videos') and poll_response.response.videos and len(poll_response.response.videos) > 0: video = poll_response.response.videos[0] @@ -291,9 +290,9 @@ class VeoVideoGenerationNode(ComfyNodeABC): video_data = base64.b64decode(video.bytesBase64Encoded) elif hasattr(video, 'gcsUri') and video.gcsUri: # Download from URL - video_url = video.gcsUri - video_response = requests.get(video_url) - video_data = video_response.content + async with aiohttp.ClientSession() as session: + async with session.get(video.gcsUri) as video_response: + video_data = await video_response.content.read() else: raise Exception("Video returned but no data or URL was provided") else: From 735bb4bdb186bd4f39b9c924c24b8b39a7ef8b0d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 8 Aug 2025 01:21:00 -0700 Subject: [PATCH 0415/1073] Users report gfx1201 is buggy on flux with pytorch attention. (#9244) --- comfy/model_management.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 9e6149d60..dc5b4711d 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -321,9 +321,9 @@ try: if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx950 ENABLE_PYTORCH_ATTENTION = True - if torch_version_numeric >= (2, 8): - if any((a in arch) for a in ["gfx1201"]): - ENABLE_PYTORCH_ATTENTION = True +# if torch_version_numeric >= (2, 8): +# if any((a in arch) for a in ["gfx1201"]): +# ENABLE_PYTORCH_ATTENTION = True if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4): if any((a in arch) for a in ["gfx1201", "gfx942", "gfx950"]): # TODO: more arches SUPPORT_FP8_OPS = True From 5828607ccfef82a82931d8b66f3fd1176e04588f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 9 Aug 2025 09:49:25 -0700 Subject: [PATCH 0416/1073] Not sure if AMD actually support fp16 acc but it doesn't crash. (#9258) --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index dc5b4711d..c08f759e5 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -340,7 +340,7 @@ if ENABLE_PYTORCH_ATTENTION: PRIORITIZE_FP16 = False # TODO: remove and replace with something that shows exactly which dtype is faster than the other try: - if is_nvidia() and PerformanceFeature.Fp16Accumulation in args.fast: + if (is_nvidia() or is_amd()) and PerformanceFeature.Fp16Accumulation in args.fast: torch.backends.cuda.matmul.allow_fp16_accumulation = True PRIORITIZE_FP16 = True # TODO: limit to cards where it actually boosts performance logging.info("Enabled fp16 accumulation.") From 0552de7c7d6bcdd515da115d6756fd30494c7ff4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 10 Aug 2025 02:03:47 -0700 Subject: [PATCH 0417/1073] Bump pytorch cuda and rocm versions in readme instructions. (#9273) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 119098f5c..e4cff01a9 100644 --- a/README.md +++ b/README.md @@ -203,7 +203,7 @@ Put your VAE in: models/vae ### AMD GPUs (Linux only) AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version: -```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.3``` +```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.4``` This is the command to install the nightly with ROCm 6.4 which might have some performance improvements: @@ -237,7 +237,7 @@ Additional discussion and help can be found [here](https://github.com/comfyanony Nvidia users should install stable pytorch using this command: -```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu128``` +```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu129``` This is the command to install pytorch nightly instead which might have performance improvements. From 966f3a52061b5e300f36c6de0d07c47d6ad12f76 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 11 Aug 2025 02:53:01 -0700 Subject: [PATCH 0418/1073] Only show feature flags log when verbose. (#9281) --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index 0553a0dd7..8f9c88ebf 100644 --- a/server.py +++ b/server.py @@ -235,7 +235,7 @@ class PromptServer(): sid, ) - logging.info( + logging.debug( f"Feature flags negotiated for client {sid}: {client_flags}" ) first_message = False From fa340add552497a264071fd7f6c407ff4aa10449 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 11 Aug 2025 23:48:17 +0300 Subject: [PATCH 0419/1073] remove creation of non-used asyncio_loop (#9284) --- execution.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/execution.py b/execution.py index 952f0cc5c..1dc35738b 100644 --- a/execution.py +++ b/execution.py @@ -646,8 +646,6 @@ class PromptExecutor: self.add_message("execution_error", mes, broadcast=False) def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): - asyncio_loop = asyncio.new_event_loop() - asyncio.set_event_loop(asyncio_loop) asyncio.run(self.execute_async(prompt, prompt_id, extra_data, execute_outputs)) async def execute_async(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): From 629b17383718e1f46dbba101ea83ec897fbe3082 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 12 Aug 2025 04:52:12 +0800 Subject: [PATCH 0420/1073] Update template & embedded docs (#9283) * Update template & embedded docs * Update embedded docs to 0.2.6 --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 2f4692b03..2fb38ef27 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ comfyui-frontend-package==1.24.4 -comfyui-workflow-templates==0.1.52 -comfyui-embedded-docs==0.2.4 +comfyui-workflow-templates==0.1.53 +comfyui-embedded-docs==0.2.6 torch torchsde torchvision From 2208aa616d3ad193cd37ef57076d4f5243cecdd3 Mon Sep 17 00:00:00 2001 From: PsychoLogicAu Date: Tue, 12 Aug 2025 06:56:16 +1000 Subject: [PATCH 0421/1073] Support SimpleTuner lycoris lora for Qwen-Image (#9280) --- comfy/lora.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/lora.py b/comfy/lora.py index 6686b7229..00358884b 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -301,6 +301,7 @@ def model_lora_keys_unet(model, key_map={}): key_map["{}".format(key_lora)] = k # Support transformer prefix format key_map["transformer.{}".format(key_lora)] = k + key_map["lycoris_{}".format(key_lora.replace(".", "_"))] = k #SimpleTuner lycoris format return key_map From f4231a80b1b904b45ade0def9b37320c4adfe71b Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 12 Aug 2025 00:15:14 +0300 Subject: [PATCH 0422/1073] fix(Kling Image API Node): do not pass "image_type" when no image (#9271) * fix(Kling Image API Node): do not pass "image_type" when no image * fix(Kling Image API Node): raise client-side error when kling_v1 is used with reference image --- comfy_api_nodes/nodes_kling.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 9d9eb5628..9d483bb0e 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -1690,7 +1690,11 @@ class KlingImageGenerationNode(KlingImageGenerationBase): ): self.validate_prompt(prompt, negative_prompt) - if image is not None: + if image is None: + image_type = None + elif model_name == KlingImageGenModelName.kling_v1: + raise ValueError(f"The model {KlingImageGenModelName.kling_v1.value} does not support reference images.") + else: image = tensor_to_base64_string(image) initial_operation = SynchronousOperation( From 1e3ae1eed8b925430e3b114ea6b7d08ea698e305 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 13 Aug 2025 05:14:27 +0800 Subject: [PATCH 0423/1073] Update template to 0.1.58 (#9302) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2fb38ef27..82af5690b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.24.4 -comfyui-workflow-templates==0.1.53 +comfyui-workflow-templates==0.1.58 comfyui-embedded-docs==0.2.6 torch torchsde From e1d4f36d8df7446ebb1a5f2bf9c708c38a159f22 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 12 Aug 2025 17:13:04 -0700 Subject: [PATCH 0424/1073] Update test release package workflow with python 3.13 cu129. (#9306) --- .github/workflows/windows_release_package.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/windows_release_package.yml b/.github/workflows/windows_release_package.yml index 3926a65f3..b51746285 100644 --- a/.github/workflows/windows_release_package.yml +++ b/.github/workflows/windows_release_package.yml @@ -7,19 +7,19 @@ on: description: 'cuda version' required: true type: string - default: "128" + default: "129" python_minor: description: 'python minor version' required: true type: string - default: "12" + default: "13" python_patch: description: 'python patch version' required: true type: string - default: "10" + default: "6" # push: # branches: # - master @@ -64,6 +64,8 @@ jobs: ./python.exe get-pip.py ./python.exe -s -m pip install ../cu${{ inputs.cu }}_python_deps/* sed -i '1i../ComfyUI' ./python3${{ inputs.python_minor }}._pth + + rm ./Lib/site-packages/torch/lib/dnnl.lib #I don't think this is actually used and I need the space cd .. git clone --depth 1 https://github.com/comfyanonymous/taesd From 560d38f34c5bd532f89f2178f01ee819cf145820 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 12 Aug 2025 20:26:33 -0700 Subject: [PATCH 0425/1073] Wan2.2 fun control support. (#9292) --- comfy/ldm/wan/model.py | 19 +++++++++++++ comfy/model_base.py | 10 ++++++- comfy/model_detection.py | 5 ++++ comfy_extras/nodes_wan.py | 58 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 91 insertions(+), 1 deletion(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 86d0795e9..4e2d99566 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -391,6 +391,7 @@ class WanModel(torch.nn.Module): cross_attn_norm=True, eps=1e-6, flf_pos_embed_token_number=None, + in_dim_ref_conv=None, image_model=None, device=None, dtype=None, @@ -484,6 +485,11 @@ class WanModel(torch.nn.Module): else: self.img_emb = None + if in_dim_ref_conv is not None: + self.ref_conv = operations.Conv2d(in_dim_ref_conv, dim, kernel_size=patch_size[1:], stride=patch_size[1:], device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + else: + self.ref_conv = None + def forward_orig( self, x, @@ -526,6 +532,13 @@ class WanModel(torch.nn.Module): e = e.reshape(t.shape[0], -1, e.shape[-1]) e0 = self.time_projection(e).unflatten(2, (6, self.dim)) + full_ref = None + if self.ref_conv is not None: + full_ref = kwargs.get("reference_latent", None) + if full_ref is not None: + full_ref = self.ref_conv(full_ref).flatten(2).transpose(1, 2) + x = torch.concat((full_ref, x), dim=1) + # context context = self.text_embedding(context) @@ -552,6 +565,9 @@ class WanModel(torch.nn.Module): # head x = self.head(x, e) + if full_ref is not None: + x = x[:, full_ref.shape[1]:] + # unpatchify x = self.unpatchify(x, grid_sizes) return x @@ -570,6 +586,9 @@ class WanModel(torch.nn.Module): x = torch.cat([x, time_dim_concat], dim=2) t_len = ((x.shape[2] + (patch_size[0] // 2)) // patch_size[0]) + if self.ref_conv is not None and "reference_latent" in kwargs: + t_len += 1 + img_ids = torch.zeros((t_len, h_len, w_len, 3), device=x.device, dtype=x.dtype) img_ids[:, :, :, 0] = img_ids[:, :, :, 0] + torch.linspace(0, t_len - 1, steps=t_len, device=x.device, dtype=x.dtype).reshape(-1, 1, 1) img_ids[:, :, :, 1] = img_ids[:, :, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).reshape(1, -1, 1) diff --git a/comfy/model_base.py b/comfy/model_base.py index 8a2d9cbe6..cde61df7c 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1124,7 +1124,11 @@ class WAN21(BaseModel): mask = mask.repeat(1, 4, 1, 1, 1) mask = utils.resize_to_batch_size(mask, noise.shape[0]) - return torch.cat((mask, image), dim=1) + concat_mask_index = kwargs.get("concat_mask_index", 0) + if concat_mask_index != 0: + return torch.cat((image[:, :concat_mask_index], mask, image[:, concat_mask_index:]), dim=1) + else: + return torch.cat((mask, image), dim=1) def extra_conds(self, **kwargs): out = super().extra_conds(**kwargs) @@ -1140,6 +1144,10 @@ class WAN21(BaseModel): if time_dim_concat is not None: out['time_dim_concat'] = comfy.conds.CONDRegular(self.process_latent_in(time_dim_concat)) + reference_latents = kwargs.get("reference_latents", None) + if reference_latents is not None: + out['reference_latent'] = comfy.conds.CONDRegular(self.process_latent_in(reference_latents[-1])[:, :, 0]) + return out diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 8b57ebd2f..8acc51e20 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -373,6 +373,11 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): flf_weight = state_dict.get('{}img_emb.emb_pos'.format(key_prefix)) if flf_weight is not None: dit_config["flf_pos_embed_token_number"] = flf_weight.shape[1] + + ref_conv_weight = state_dict.get('{}ref_conv.weight'.format(key_prefix)) + if ref_conv_weight is not None: + dit_config["in_dim_ref_conv"] = ref_conv_weight.shape[1] + return dit_config if '{}latent_in.weight'.format(key_prefix) in state_dict_keys: # Hunyuan 3D diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 0067d054d..f80c83ba6 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -103,6 +103,63 @@ class WanFunControlToVideo: out_latent["samples"] = latent return (positive, negative, out_latent) +class Wan22FunControlToVideo: + @classmethod + def INPUT_TYPES(s): + return {"required": {"positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "vae": ("VAE", ), + "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), + "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + }, + "optional": {"ref_image": ("IMAGE", ), + "control_video": ("IMAGE", ), + # "start_image": ("IMAGE", ), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + FUNCTION = "encode" + + CATEGORY = "conditioning/video_models" + + def encode(self, positive, negative, vae, width, height, length, batch_size, ref_image=None, start_image=None, control_video=None): + latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + concat_latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + concat_latent = comfy.latent_formats.Wan21().process_out(concat_latent) + concat_latent = concat_latent.repeat(1, 2, 1, 1, 1) + mask = torch.ones((1, 1, latent.shape[2] * 4, latent.shape[-2], latent.shape[-1])) + + if start_image is not None: + start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + concat_latent_image = vae.encode(start_image[:, :, :, :3]) + concat_latent[:,16:,:concat_latent_image.shape[2]] = concat_latent_image[:,:,:concat_latent.shape[2]] + mask[:, :, :start_image.shape[0] + 3] = 0.0 + + ref_latent = None + if ref_image is not None: + ref_image = comfy.utils.common_upscale(ref_image[:1].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + ref_latent = vae.encode(ref_image[:, :, :, :3]) + + if control_video is not None: + control_video = comfy.utils.common_upscale(control_video[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + concat_latent_image = vae.encode(control_video[:, :, :, :3]) + concat_latent[:,:16,:concat_latent_image.shape[2]] = concat_latent_image[:,:,:concat_latent.shape[2]] + + mask = mask.view(1, mask.shape[2] // 4, 4, mask.shape[3], mask.shape[4]).transpose(1, 2) + positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent, "concat_mask": mask, "concat_mask_index": 16}) + negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent, "concat_mask": mask, "concat_mask_index": 16}) + + if ref_latent is not None: + positive = node_helpers.conditioning_set_values(positive, {"reference_latents": [ref_latent]}, append=True) + negative = node_helpers.conditioning_set_values(negative, {"reference_latents": [ref_latent]}, append=True) + + out_latent = {} + out_latent["samples"] = latent + return (positive, negative, out_latent) + class WanFirstLastFrameToVideo: @classmethod def INPUT_TYPES(s): @@ -733,6 +790,7 @@ NODE_CLASS_MAPPINGS = { "WanTrackToVideo": WanTrackToVideo, "WanImageToVideo": WanImageToVideo, "WanFunControlToVideo": WanFunControlToVideo, + "Wan22FunControlToVideo": Wan22FunControlToVideo, "WanFunInpaintToVideo": WanFunInpaintToVideo, "WanFirstLastFrameToVideo": WanFirstLastFrameToVideo, "WanVaceToVideo": WanVaceToVideo, From 898d88e10e45f38500ca6044280bab4ca2f2d273 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Tue, 12 Aug 2025 20:34:58 -0700 Subject: [PATCH 0426/1073] Make torchaudio exception catching less specific (#9309) --- comfy_api/latest/_ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_api/latest/_ui.py b/comfy_api/latest/_ui.py index 61597038f..26a55615f 100644 --- a/comfy_api/latest/_ui.py +++ b/comfy_api/latest/_ui.py @@ -12,7 +12,7 @@ import torch try: import torchaudio TORCH_AUDIO_AVAILABLE = True -except ImportError: +except: TORCH_AUDIO_AVAILABLE = False from PIL import Image as PILImage from PIL.PngImagePlugin import PngInfo From 3294782d19c3af0c6166aafe0465fe6b59571d17 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 13 Aug 2025 14:50:50 +0800 Subject: [PATCH 0427/1073] Update template to 0.1.59 (#9313) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 82af5690b..bfb31a73f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.24.4 -comfyui-workflow-templates==0.1.58 +comfyui-workflow-templates==0.1.59 comfyui-embedded-docs==0.2.6 torch torchsde From 5ca8e2fac3b6826261c5991b0663b69eff60b3a1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 13 Aug 2025 00:01:12 -0700 Subject: [PATCH 0428/1073] Update release workflow to python3.13 pytorch cu129 (#9315) * Try to reduce size of portable even more. * Update stable release workflow to python 3.13 cu129 * Update dependencies workflow to python3.13 cu129 --- .github/workflows/stable-release.yml | 15 ++++++++++----- .../workflows/windows_release_dependencies.yml | 6 +++--- .github/workflows/windows_release_package.yml | 2 ++ 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index 61105abe4..a5a1ed2d0 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -12,17 +12,17 @@ on: description: 'CUDA version' required: true type: string - default: "128" + default: "129" python_minor: description: 'Python minor version' required: true type: string - default: "12" + default: "13" python_patch: description: 'Python patch version' required: true type: string - default: "10" + default: "6" jobs: @@ -66,8 +66,13 @@ jobs: curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py ./python.exe get-pip.py ./python.exe -s -m pip install ../cu${{ inputs.cu }}_python_deps/* - sed -i '1i../ComfyUI' ./python3${{ inputs.python_minor }}._pth - cd .. + sed -i '1i../ComfyUI' ./python3${{ inputs.python_minor }}._pth + + rm ./Lib/site-packages/torch/lib/dnnl.lib #I don't think this is actually used and I need the space + rm ./Lib/site-packages/torch/lib/libprotoc.lib + rm ./Lib/site-packages/torch/lib/libprotobuf.lib + + cd .. git clone --depth 1 https://github.com/comfyanonymous/taesd cp taesd/*.safetensors ./ComfyUI_copy/models/vae_approx/ diff --git a/.github/workflows/windows_release_dependencies.yml b/.github/workflows/windows_release_dependencies.yml index dfdb96d50..7761cc1ed 100644 --- a/.github/workflows/windows_release_dependencies.yml +++ b/.github/workflows/windows_release_dependencies.yml @@ -17,19 +17,19 @@ on: description: 'cuda version' required: true type: string - default: "128" + default: "129" python_minor: description: 'python minor version' required: true type: string - default: "12" + default: "13" python_patch: description: 'python patch version' required: true type: string - default: "10" + default: "6" # push: # branches: # - master diff --git a/.github/workflows/windows_release_package.yml b/.github/workflows/windows_release_package.yml index b51746285..3334e6839 100644 --- a/.github/workflows/windows_release_package.yml +++ b/.github/workflows/windows_release_package.yml @@ -66,6 +66,8 @@ jobs: sed -i '1i../ComfyUI' ./python3${{ inputs.python_minor }}._pth rm ./Lib/site-packages/torch/lib/dnnl.lib #I don't think this is actually used and I need the space + rm ./Lib/site-packages/torch/lib/libprotoc.lib + rm ./Lib/site-packages/torch/lib/libprotobuf.lib cd .. git clone --depth 1 https://github.com/comfyanonymous/taesd From e400f26c8fc9867248394616a4b58ecc4c53fbfd Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 13 Aug 2025 00:44:54 -0700 Subject: [PATCH 0429/1073] Downgrade frontend for release. (#9316) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index bfb31a73f..56ed85e01 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.24.4 +comfyui-frontend-package==1.23.4 comfyui-workflow-templates==0.1.59 comfyui-embedded-docs==0.2.6 torch From d5c1954d5cd4a789bbf84d2b75a955a5a3f93de8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 13 Aug 2025 03:46:38 -0400 Subject: [PATCH 0430/1073] ComfyUI version 0.3.50 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 5e2d09c81..29ec07ca6 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.49" +__version__ = "0.3.50" diff --git a/pyproject.toml b/pyproject.toml index 3c530ba85..659b5730a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.49" +version = "0.3.50" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 615eb52049df98cebdd67bc672b66dc059171d7c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 13 Aug 2025 00:48:06 -0700 Subject: [PATCH 0431/1073] Put back frontend version. (#9317) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 56ed85e01..bfb31a73f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.23.4 +comfyui-frontend-package==1.24.4 comfyui-workflow-templates==0.1.59 comfyui-embedded-docs==0.2.6 torch From afa0a45206832b0e64e38454b7841d1da7ca56e4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 13 Aug 2025 11:42:08 -0700 Subject: [PATCH 0432/1073] Reduce portable size again. (#9323) * compress more * test * not needed --- .github/workflows/stable-release.yml | 2 +- .github/workflows/windows_release_package.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index a5a1ed2d0..2bc8e5905 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -90,7 +90,7 @@ jobs: cd .. - "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=512m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable + "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=768m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia.7z cd ComfyUI_windows_portable diff --git a/.github/workflows/windows_release_package.yml b/.github/workflows/windows_release_package.yml index 3334e6839..46375698e 100644 --- a/.github/workflows/windows_release_package.yml +++ b/.github/workflows/windows_release_package.yml @@ -86,7 +86,7 @@ jobs: cd .. - "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=512m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable + "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=768m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable mv ComfyUI_windows_portable.7z ComfyUI/new_ComfyUI_windows_portable_nvidia_cu${{ inputs.cu }}_or_cpu.7z cd ComfyUI_windows_portable From 3da5a07510794c37d437cbea1d94065bb0aa8ebc Mon Sep 17 00:00:00 2001 From: contentis Date: Wed, 13 Aug 2025 20:53:27 +0200 Subject: [PATCH 0433/1073] SDPA backend priority (#9299) --- comfy/ldm/hunyuan3d/vae.py | 2 +- comfy/ldm/modules/attention.py | 4 ++-- comfy/ldm/modules/diffusionmodules/model.py | 2 +- comfy/ops.py | 13 +++++++++++++ 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/hunyuan3d/vae.py b/comfy/ldm/hunyuan3d/vae.py index 5eb2c6548..bea6090a2 100644 --- a/comfy/ldm/hunyuan3d/vae.py +++ b/comfy/ldm/hunyuan3d/vae.py @@ -178,7 +178,7 @@ class FourierEmbedder(nn.Module): class CrossAttentionProcessor: def __call__(self, attn, q, k, v): - out = F.scaled_dot_product_attention(q, k, v) + out = ops.scaled_dot_product_attention(q, k, v) return out diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 35d2270ee..19c3c7af1 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -448,7 +448,7 @@ def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_resha mask = mask.unsqueeze(1) if SDP_BATCH_LIMIT >= b: - out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) + out = ops.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) if not skip_output_reshape: out = ( out.transpose(1, 2).reshape(b, -1, heads * dim_head) @@ -461,7 +461,7 @@ def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_resha if mask.shape[0] > 1: m = mask[i : i + SDP_BATCH_LIMIT] - out[i : i + SDP_BATCH_LIMIT] = torch.nn.functional.scaled_dot_product_attention( + out[i : i + SDP_BATCH_LIMIT] = ops.scaled_dot_product_attention( q[i : i + SDP_BATCH_LIMIT], k[i : i + SDP_BATCH_LIMIT], v[i : i + SDP_BATCH_LIMIT], diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 5c0373b74..79160412f 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -285,7 +285,7 @@ def pytorch_attention(q, k, v): ) try: - out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) + out = ops.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) out = out.transpose(2, 3).reshape(orig_shape) except model_management.OOM_EXCEPTION: logging.warning("scaled_dot_product_attention OOMed: switched to slice attention") diff --git a/comfy/ops.py b/comfy/ops.py index 2cc9bbc27..8b7b662b6 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -23,9 +23,18 @@ from comfy.cli_args import args, PerformanceFeature import comfy.float import comfy.rmsnorm import contextlib +from torch.nn.attention import SDPBackend, sdpa_kernel cast_to = comfy.model_management.cast_to #TODO: remove once no more references +SDPA_BACKEND_PRIORITY = [ + SDPBackend.FLASH_ATTENTION, + SDPBackend.EFFICIENT_ATTENTION, + SDPBackend.MATH, +] +if torch.cuda.is_available(): + SDPA_BACKEND_PRIORITY.insert(0, SDPBackend.CUDNN_ATTENTION) + def cast_to_input(weight, input, non_blocking=False, copy=True): return comfy.model_management.cast_to(weight, input.dtype, input.device, non_blocking=non_blocking, copy=copy) @@ -249,6 +258,10 @@ class disable_weight_init: else: raise ValueError(f"unsupported dimensions: {dims}") + @staticmethod + @sdpa_kernel(backends=SDPA_BACKEND_PRIORITY, set_priority=True) + def scaled_dot_product_attention(q, k, v, *args, **kwargs): + return torch.nn.functional.scaled_dot_product_attention(q, k, v, *args, **kwargs) class manual_cast(disable_weight_init): class Linear(disable_weight_init.Linear): From 9df8792d4b894a8ea8034414ef63f70deee4b1af Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 13 Aug 2025 12:12:41 -0700 Subject: [PATCH 0434/1073] Make last PR not crash comfy on old pytorch. (#9324) --- comfy/ldm/hunyuan3d/vae.py | 2 +- comfy/ldm/modules/attention.py | 4 +-- comfy/ldm/modules/diffusionmodules/model.py | 2 +- comfy/ops.py | 36 +++++++++++++-------- 4 files changed, 27 insertions(+), 17 deletions(-) diff --git a/comfy/ldm/hunyuan3d/vae.py b/comfy/ldm/hunyuan3d/vae.py index bea6090a2..6e8cbf1d9 100644 --- a/comfy/ldm/hunyuan3d/vae.py +++ b/comfy/ldm/hunyuan3d/vae.py @@ -178,7 +178,7 @@ class FourierEmbedder(nn.Module): class CrossAttentionProcessor: def __call__(self, attn, q, k, v): - out = ops.scaled_dot_product_attention(q, k, v) + out = comfy.ops.scaled_dot_product_attention(q, k, v) return out diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 19c3c7af1..043df28df 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -448,7 +448,7 @@ def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_resha mask = mask.unsqueeze(1) if SDP_BATCH_LIMIT >= b: - out = ops.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) + out = comfy.ops.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) if not skip_output_reshape: out = ( out.transpose(1, 2).reshape(b, -1, heads * dim_head) @@ -461,7 +461,7 @@ def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_resha if mask.shape[0] > 1: m = mask[i : i + SDP_BATCH_LIMIT] - out[i : i + SDP_BATCH_LIMIT] = ops.scaled_dot_product_attention( + out[i : i + SDP_BATCH_LIMIT] = comfy.ops.scaled_dot_product_attention( q[i : i + SDP_BATCH_LIMIT], k[i : i + SDP_BATCH_LIMIT], v[i : i + SDP_BATCH_LIMIT], diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 79160412f..1fd12b35a 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -285,7 +285,7 @@ def pytorch_attention(q, k, v): ) try: - out = ops.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) + out = comfy.ops.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) out = out.transpose(2, 3).reshape(orig_shape) except model_management.OOM_EXCEPTION: logging.warning("scaled_dot_product_attention OOMed: switched to slice attention") diff --git a/comfy/ops.py b/comfy/ops.py index 8b7b662b6..be312d714 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -23,18 +23,32 @@ from comfy.cli_args import args, PerformanceFeature import comfy.float import comfy.rmsnorm import contextlib -from torch.nn.attention import SDPBackend, sdpa_kernel + + +def scaled_dot_product_attention(q, k, v, *args, **kwargs): + return torch.nn.functional.scaled_dot_product_attention(q, k, v, *args, **kwargs) + + +try: + if torch.cuda.is_available(): + from torch.nn.attention import SDPBackend, sdpa_kernel + + SDPA_BACKEND_PRIORITY = [ + SDPBackend.FLASH_ATTENTION, + SDPBackend.EFFICIENT_ATTENTION, + SDPBackend.MATH, + ] + + SDPA_BACKEND_PRIORITY.insert(0, SDPBackend.CUDNN_ATTENTION) + + @sdpa_kernel(backends=SDPA_BACKEND_PRIORITY, set_priority=True) + def scaled_dot_product_attention(q, k, v, *args, **kwargs): + return torch.nn.functional.scaled_dot_product_attention(q, k, v, *args, **kwargs) +except (ModuleNotFoundError, TypeError): + logging.warning("Could not set sdpa backend priority.") cast_to = comfy.model_management.cast_to #TODO: remove once no more references -SDPA_BACKEND_PRIORITY = [ - SDPBackend.FLASH_ATTENTION, - SDPBackend.EFFICIENT_ATTENTION, - SDPBackend.MATH, -] -if torch.cuda.is_available(): - SDPA_BACKEND_PRIORITY.insert(0, SDPBackend.CUDNN_ATTENTION) - def cast_to_input(weight, input, non_blocking=False, copy=True): return comfy.model_management.cast_to(weight, input.dtype, input.device, non_blocking=non_blocking, copy=copy) @@ -258,10 +272,6 @@ class disable_weight_init: else: raise ValueError(f"unsupported dimensions: {dims}") - @staticmethod - @sdpa_kernel(backends=SDPA_BACKEND_PRIORITY, set_priority=True) - def scaled_dot_product_attention(q, k, v, *args, **kwargs): - return torch.nn.functional.scaled_dot_product_attention(q, k, v, *args, **kwargs) class manual_cast(disable_weight_init): class Linear(disable_weight_init.Linear): From c991a5da658667cf29f2916bef096fa7b18afd47 Mon Sep 17 00:00:00 2001 From: Simon Lui <502929+simonlui@users.noreply.github.com> Date: Wed, 13 Aug 2025 16:13:35 -0700 Subject: [PATCH 0435/1073] Fix XPU iGPU regressions (#9322) * Change bf16 check and switch non-blocking to off default with option to force to regain speed on certain classes of iGPUs and refactor xpu check. * Turn non_blocking off by default for xpu. * Update README.md for Intel GPUs. --- README.md | 28 ++++++++++------------------ comfy/cli_args.py | 2 ++ comfy/model_management.py | 21 +++++++++++++-------- 3 files changed, 25 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index e4cff01a9..fa99a8cbe 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ ComfyUI lets you design and execute advanced stable diffusion pipelines using a ## Get Started #### [Desktop Application](https://www.comfy.org/download) -- The easiest way to get started. +- The easiest way to get started. - Available on Windows & macOS. #### [Windows Portable Package](#installing) @@ -211,27 +211,19 @@ This is the command to install the nightly with ROCm 6.4 which might have some p ### Intel GPUs (Windows and Linux) -(Option 1) Intel Arc GPU users can install native PyTorch with torch.xpu support using pip (currently available in PyTorch nightly builds). More information can be found [here](https://pytorch.org/docs/main/notes/get_start_xpu.html) - -1. To install PyTorch nightly, use the following command: +(Option 1) Intel Arc GPU users can install native PyTorch with torch.xpu support using pip. More information can be found [here](https://pytorch.org/docs/main/notes/get_start_xpu.html) + +1. To install PyTorch xpu, use the following command: + +```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/xpu``` + +This is the command to install the Pytorch xpu nightly which might have some performance improvements: ```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/xpu``` -2. Launch ComfyUI by running `python main.py` - - (Option 2) Alternatively, Intel GPUs supported by Intel Extension for PyTorch (IPEX) can leverage IPEX for improved performance. -1. For Intel® Arc™ A-Series Graphics utilizing IPEX, create a conda environment and use the commands below: - -``` -conda install libuv -pip install torch==2.3.1.post0+cxx11.abi torchvision==0.18.1.post0+cxx11.abi torchaudio==2.3.1.post0+cxx11.abi intel-extension-for-pytorch==2.3.110.post0+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ -``` - -For other supported Intel GPUs with IPEX, visit [Installation](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=gpu) for more information. - -Additional discussion and help can be found [here](https://github.com/comfyanonymous/ComfyUI/discussions/476). +1. visit [Installation](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=gpu) for more information. ### NVIDIA @@ -352,7 +344,7 @@ Generate a self-signed certificate (not appropriate for shared/production use) a Use `--tls-keyfile key.pem --tls-certfile cert.pem` to enable TLS/SSL, the app will now be accessible with `https://...` instead of `http://...`. -> Note: Windows users can use [alexisrolland/docker-openssl](https://github.com/alexisrolland/docker-openssl) or one of the [3rd party binary distributions](https://wiki.openssl.org/index.php/Binaries) to run the command example above. +> Note: Windows users can use [alexisrolland/docker-openssl](https://github.com/alexisrolland/docker-openssl) or one of the [3rd party binary distributions](https://wiki.openssl.org/index.php/Binaries) to run the command example above.

If you use a container, note that the volume mount `-v` can be a relative path so `... -v ".\:/openssl-certs" ...` would create the key & cert files in the current directory of your command prompt or powershell terminal. ## Support and dev channel diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 0d760d524..de3e85c08 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -132,6 +132,8 @@ parser.add_argument("--reserve-vram", type=float, default=None, help="Set the am parser.add_argument("--async-offload", action="store_true", help="Use async weight offloading.") +parser.add_argument("--force-non-blocking", action="store_true", help="Force ComfyUI to use non-blocking operations for all applicable tensors. This may improve performance on some non-Nvidia systems but can cause issues with some workflows.") + parser.add_argument("--default-hashing-function", type=str, choices=['md5', 'sha1', 'sha256', 'sha512'], default='sha256', help="Allows you to choose the hash function to use for duplicate filename / contents comparison. Default is sha256.") parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.") diff --git a/comfy/model_management.py b/comfy/model_management.py index c08f759e5..2a9f18068 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -78,7 +78,6 @@ try: torch_version = torch.version.__version__ temp = torch_version.split(".") torch_version_numeric = (int(temp[0]), int(temp[1])) - xpu_available = (torch_version_numeric[0] < 2 or (torch_version_numeric[0] == 2 and torch_version_numeric[1] <= 4)) and torch.xpu.is_available() except: pass @@ -102,10 +101,14 @@ if args.directml is not None: try: import intel_extension_for_pytorch as ipex # noqa: F401 - _ = torch.xpu.device_count() - xpu_available = xpu_available or torch.xpu.is_available() except: - xpu_available = xpu_available or (hasattr(torch, "xpu") and torch.xpu.is_available()) + pass + +try: + _ = torch.xpu.device_count() + xpu_available = torch.xpu.is_available() +except: + xpu_available = False try: if torch.backends.mps.is_available(): @@ -946,10 +949,12 @@ def pick_weight_dtype(dtype, fallback_dtype, device=None): return dtype def device_supports_non_blocking(device): + if args.force_non_blocking: + return True if is_device_mps(device): return False #pytorch bug? mps doesn't support non blocking - if is_intel_xpu(): - return True + if is_intel_xpu(): #xpu does support non blocking but it is slower on iGPUs for some reason so disable by default until situation changes + return False if args.deterministic: #TODO: figure out why deterministic breaks non blocking from gpu to cpu (previews) return False if directml_enabled: @@ -1282,10 +1287,10 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma return False if is_intel_xpu(): - if torch_version_numeric < (2, 6): + if torch_version_numeric < (2, 3): return True else: - return torch.xpu.get_device_capability(device)['has_bfloat16_conversions'] + return torch.xpu.is_bf16_supported() if is_ascend_npu(): return True From e4f7ea105f4b3034593f316560d952b80453e344 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Wed, 13 Aug 2025 18:33:05 -0700 Subject: [PATCH 0436/1073] Added context window support to core sampling code (#9238) * Added initial support for basic context windows - in progress * Add prepare_sampling wrapper for context window to more accurately estimate latent memory requirements, fixed merging wrappers/callbacks dicts in prepare_model_patcher * Made context windows compatible with different dimensions; works for WAN, but results are bad * Fix comfy.patcher_extension.merge_nested_dicts calls in prepare_model_patcher in sampler_helpers.py * Considering adding some callbacks to context window code to allow extensions of behavior without the need to rewrite code * Made dim slicing cleaner * Add Wan Context WIndows node for testing * Made context schedule and fuse method functions be stored on the handler instead of needing to be registered in core code to be found * Moved some code around between node_context_windows.py and context_windows.py * Change manual context window nodes names/ids * Added callbacks to IndexListContexHandler * Adjusted default values for context_length and context_overlap, made schema.inputs definition for WAN Context Windows less annoying * Make get_resized_cond more robust for various dim sizes * Fix typo * Another small fix --- comfy/context_windows.py | 537 ++++++++++++++++++++++++++ comfy/sampler_helpers.py | 6 +- comfy/samplers.py | 11 +- comfy_extras/nodes_context_windows.py | 89 +++++ nodes.py | 1 + 5 files changed, 639 insertions(+), 5 deletions(-) create mode 100644 comfy/context_windows.py create mode 100644 comfy_extras/nodes_context_windows.py diff --git a/comfy/context_windows.py b/comfy/context_windows.py new file mode 100644 index 000000000..928b111df --- /dev/null +++ b/comfy/context_windows.py @@ -0,0 +1,537 @@ +from __future__ import annotations +from typing import TYPE_CHECKING, Callable +import torch +import numpy as np +import collections +from dataclasses import dataclass +from abc import ABC, abstractmethod +import logging +import comfy.model_management +import comfy.patcher_extension +if TYPE_CHECKING: + from comfy.model_base import BaseModel + from comfy.model_patcher import ModelPatcher + from comfy.controlnet import ControlBase + + +class ContextWindowABC(ABC): + def __init__(self): + ... + + @abstractmethod + def get_tensor(self, full: torch.Tensor) -> torch.Tensor: + """ + Get torch.Tensor applicable to current window. + """ + raise NotImplementedError("Not implemented.") + + @abstractmethod + def add_window(self, full: torch.Tensor, to_add: torch.Tensor) -> torch.Tensor: + """ + Apply torch.Tensor of window to the full tensor, in place. Returns reference to updated full tensor, not a copy. + """ + raise NotImplementedError("Not implemented.") + +class ContextHandlerABC(ABC): + def __init__(self): + ... + + @abstractmethod + def should_use_context(self, model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep: torch.Tensor, model_options: dict[str]) -> bool: + raise NotImplementedError("Not implemented.") + + @abstractmethod + def get_resized_cond(self, cond_in: list[dict], x_in: torch.Tensor, window: ContextWindowABC, device=None) -> list: + raise NotImplementedError("Not implemented.") + + @abstractmethod + def execute(self, calc_cond_batch: Callable, model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep: torch.Tensor, model_options: dict[str]): + raise NotImplementedError("Not implemented.") + + + +class IndexListContextWindow(ContextWindowABC): + def __init__(self, index_list: list[int], dim: int=0): + self.index_list = index_list + self.context_length = len(index_list) + self.dim = dim + + def get_tensor(self, full: torch.Tensor, device=None, dim=None) -> torch.Tensor: + if dim is None: + dim = self.dim + if dim == 0 and full.shape[dim] == 1: + return full + idx = [slice(None)] * dim + [self.index_list] + return full[idx].to(device) + + def add_window(self, full: torch.Tensor, to_add: torch.Tensor, dim=None) -> torch.Tensor: + if dim is None: + dim = self.dim + idx = [slice(None)] * dim + [self.index_list] + full[idx] += to_add + return full + + +class IndexListCallbacks: + EVALUATE_CONTEXT_WINDOWS = "evaluate_context_windows" + COMBINE_CONTEXT_WINDOW_RESULTS = "combine_context_window_results" + EXECUTE_START = "execute_start" + EXECUTE_CLEANUP = "execute_cleanup" + + def init_callbacks(self): + return {} + + +@dataclass +class ContextSchedule: + name: str + func: Callable + +@dataclass +class ContextFuseMethod: + name: str + func: Callable + +ContextResults = collections.namedtuple("ContextResults", ['window_idx', 'sub_conds_out', 'sub_conds', 'window']) +class IndexListContextHandler(ContextHandlerABC): + def __init__(self, context_schedule: ContextSchedule, fuse_method: ContextFuseMethod, context_length: int=1, context_overlap: int=0, context_stride: int=1, closed_loop=False, dim=0): + self.context_schedule = context_schedule + self.fuse_method = fuse_method + self.context_length = context_length + self.context_overlap = context_overlap + self.context_stride = context_stride + self.closed_loop = closed_loop + self.dim = dim + self._step = 0 + + self.callbacks = {} + + def should_use_context(self, model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep: torch.Tensor, model_options: dict[str]) -> bool: + # for now, assume first dim is batch - should have stored on BaseModel in actual implementation + if x_in.size(self.dim) > self.context_length: + logging.info(f"Using context windows {self.context_length} for {x_in.size(self.dim)} frames.") + return True + return False + + def prepare_control_objects(self, control: ControlBase, device=None) -> ControlBase: + if control.previous_controlnet is not None: + self.prepare_control_objects(control.previous_controlnet, device) + return control + + def get_resized_cond(self, cond_in: list[dict], x_in: torch.Tensor, window: IndexListContextWindow, device=None) -> list: + if cond_in is None: + return None + # reuse or resize cond items to match context requirements + resized_cond = [] + # cond object is a list containing a dict - outer list is irrelevant, so just loop through it + for actual_cond in cond_in: + resized_actual_cond = actual_cond.copy() + # now we are in the inner dict - "pooled_output" is a tensor, "control" is a ControlBase object, "model_conds" is dictionary + for key in actual_cond: + try: + cond_item = actual_cond[key] + if isinstance(cond_item, torch.Tensor): + # check that tensor is the expected length - x.size(0) + if self.dim < cond_item.ndim and cond_item.size(self.dim) == x_in.size(self.dim): + # if so, it's subsetting time - tell controls the expected indeces so they can handle them + actual_cond_item = window.get_tensor(cond_item) + resized_actual_cond[key] = actual_cond_item.to(device) + else: + resized_actual_cond[key] = cond_item.to(device) + # look for control + elif key == "control": + resized_actual_cond[key] = self.prepare_control_objects(cond_item, device) + elif isinstance(cond_item, dict): + new_cond_item = cond_item.copy() + # when in dictionary, look for tensors and CONDCrossAttn [comfy/conds.py] (has cond attr that is a tensor) + for cond_key, cond_value in new_cond_item.items(): + if isinstance(cond_value, torch.Tensor): + if cond_value.ndim < self.dim and cond_value.size(0) == x_in.size(self.dim): + new_cond_item[cond_key] = window.get_tensor(cond_value, device) + # if has cond that is a Tensor, check if needs to be subset + elif hasattr(cond_value, "cond") and isinstance(cond_value.cond, torch.Tensor): + if cond_value.cond.ndim < self.dim and cond_value.cond.size(0) == x_in.size(self.dim): + new_cond_item[cond_key] = cond_value._copy_with(window.get_tensor(cond_value.cond, device)) + elif cond_key == "num_video_frames": # for SVD + new_cond_item[cond_key] = cond_value._copy_with(cond_value.cond) + new_cond_item[cond_key].cond = window.context_length + resized_actual_cond[key] = new_cond_item + else: + resized_actual_cond[key] = cond_item + finally: + del cond_item # just in case to prevent VRAM issues + resized_cond.append(resized_actual_cond) + return resized_cond + + def set_step(self, timestep: torch.Tensor, model_options: dict[str]): + indexes = torch.where(model_options["transformer_options"]["sample_sigmas"] == timestep[0]) + self._step = int(indexes[0]) + + def get_context_windows(self, model: BaseModel, x_in: torch.Tensor, model_options: dict[str]) -> list[IndexListContextWindow]: + full_length = x_in.size(self.dim) # TODO: choose dim based on model + context_windows = self.context_schedule.func(full_length, self, model_options) + context_windows = [IndexListContextWindow(window, dim=self.dim) for window in context_windows] + return context_windows + + def execute(self, calc_cond_batch: Callable, model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep: torch.Tensor, model_options: dict[str]): + self.set_step(timestep, model_options) + context_windows = self.get_context_windows(model, x_in, model_options) + enumerated_context_windows = list(enumerate(context_windows)) + + conds_final = [torch.zeros_like(x_in) for _ in conds] + if self.fuse_method.name == ContextFuseMethods.RELATIVE: + counts_final = [torch.ones(get_shape_for_dim(x_in, self.dim), device=x_in.device) for _ in conds] + else: + counts_final = [torch.zeros(get_shape_for_dim(x_in, self.dim), device=x_in.device) for _ in conds] + biases_final = [([0.0] * x_in.shape[self.dim]) for _ in conds] + + for callback in comfy.patcher_extension.get_all_callbacks(IndexListCallbacks.EXECUTE_START, self.callbacks): + callback(self, model, x_in, conds, timestep, model_options) + + for enum_window in enumerated_context_windows: + results = self.evaluate_context_windows(calc_cond_batch, model, x_in, conds, timestep, [enum_window], model_options) + for result in results: + self.combine_context_window_results(x_in, result.sub_conds_out, result.sub_conds, result.window, result.window_idx, len(enumerated_context_windows), timestep, + conds_final, counts_final, biases_final) + try: + # finalize conds + if self.fuse_method.name == ContextFuseMethods.RELATIVE: + # relative is already normalized, so return as is + del counts_final + return conds_final + else: + # normalize conds via division by context usage counts + for i in range(len(conds_final)): + conds_final[i] /= counts_final[i] + del counts_final + return conds_final + finally: + for callback in comfy.patcher_extension.get_all_callbacks(IndexListCallbacks.EXECUTE_CLEANUP, self.callbacks): + callback(self, model, x_in, conds, timestep, model_options) + + def evaluate_context_windows(self, calc_cond_batch: Callable, model: BaseModel, x_in: torch.Tensor, conds, timestep: torch.Tensor, enumerated_context_windows: list[tuple[int, IndexListContextWindow]], + model_options, device=None, first_device=None): + results: list[ContextResults] = [] + for window_idx, window in enumerated_context_windows: + # allow processing to end between context window executions for faster Cancel + comfy.model_management.throw_exception_if_processing_interrupted() + + for callback in comfy.patcher_extension.get_all_callbacks(IndexListCallbacks.EVALUATE_CONTEXT_WINDOWS, self.callbacks): + callback(self, model, x_in, conds, timestep, model_options, window_idx, window, model_options, device, first_device) + + # update exposed params + model_options["transformer_options"]["context_window"] = window + # get subsections of x, timestep, conds + sub_x = window.get_tensor(x_in, device) + sub_timestep = window.get_tensor(timestep, device, dim=0) + sub_conds = [self.get_resized_cond(cond, x_in, window, device) for cond in conds] + + sub_conds_out = calc_cond_batch(model, sub_conds, sub_x, sub_timestep, model_options) + if device is not None: + for i in range(len(sub_conds_out)): + sub_conds_out[i] = sub_conds_out[i].to(x_in.device) + results.append(ContextResults(window_idx, sub_conds_out, sub_conds, window)) + return results + + + def combine_context_window_results(self, x_in: torch.Tensor, sub_conds_out, sub_conds, window: IndexListContextWindow, window_idx: int, total_windows: int, timestep: torch.Tensor, + conds_final: list[torch.Tensor], counts_final: list[torch.Tensor], biases_final: list[torch.Tensor]): + if self.fuse_method.name == ContextFuseMethods.RELATIVE: + for pos, idx in enumerate(window.index_list): + # bias is the influence of a specific index in relation to the whole context window + bias = 1 - abs(idx - (window.index_list[0] + window.index_list[-1]) / 2) / ((window.index_list[-1] - window.index_list[0] + 1e-2) / 2) + bias = max(1e-2, bias) + # take weighted average relative to total bias of current idx + for i in range(len(sub_conds_out)): + bias_total = biases_final[i][idx] + prev_weight = (bias_total / (bias_total + bias)) + new_weight = (bias / (bias_total + bias)) + # account for dims of tensors + idx_window = [slice(None)] * self.dim + [idx] + pos_window = [slice(None)] * self.dim + [pos] + # apply new values + conds_final[i][idx_window] = conds_final[i][idx_window] * prev_weight + sub_conds_out[i][pos_window] * new_weight + biases_final[i][idx] = bias_total + bias + else: + # add conds and counts based on weights of fuse method + weights = get_context_weights(window.context_length, x_in.shape[self.dim], window.index_list, self, sigma=timestep) + weights_tensor = match_weights_to_dim(weights, x_in, self.dim, device=x_in.device) + for i in range(len(sub_conds_out)): + window.add_window(conds_final[i], sub_conds_out[i] * weights_tensor) + window.add_window(counts_final[i], weights_tensor) + + for callback in comfy.patcher_extension.get_all_callbacks(IndexListCallbacks.COMBINE_CONTEXT_WINDOW_RESULTS, self.callbacks): + callback(self, x_in, sub_conds_out, sub_conds, window, window_idx, total_windows, timestep, conds_final, counts_final, biases_final) + + +def _prepare_sampling_wrapper(executor, model, noise_shape: torch.Tensor, *args, **kwargs): + # limit noise_shape length to context_length for more accurate vram use estimation + model_options = kwargs.get("model_options", None) + if model_options is None: + raise Exception("model_options not found in prepare_sampling_wrapper; this should never happen, something went wrong.") + handler: IndexListContextHandler = model_options.get("context_handler", None) + if handler is not None: + noise_shape = list(noise_shape) + noise_shape[handler.dim] = min(noise_shape[handler.dim], handler.context_length) + return executor(model, noise_shape, *args, **kwargs) + + +def create_prepare_sampling_wrapper(model: ModelPatcher): + model.add_wrapper_with_key( + comfy.patcher_extension.WrappersMP.PREPARE_SAMPLING, + "ContextWindows_prepare_sampling", + _prepare_sampling_wrapper + ) + + +def match_weights_to_dim(weights: list[float], x_in: torch.Tensor, dim: int, device=None) -> torch.Tensor: + total_dims = len(x_in.shape) + weights_tensor = torch.Tensor(weights).to(device=device) + for _ in range(dim): + weights_tensor = weights_tensor.unsqueeze(0) + for _ in range(total_dims - dim - 1): + weights_tensor = weights_tensor.unsqueeze(-1) + return weights_tensor + +def get_shape_for_dim(x_in: torch.Tensor, dim: int) -> list[int]: + total_dims = len(x_in.shape) + shape = [] + for _ in range(dim): + shape.append(1) + shape.append(x_in.shape[dim]) + for _ in range(total_dims - dim - 1): + shape.append(1) + return shape + +class ContextSchedules: + UNIFORM_LOOPED = "looped_uniform" + UNIFORM_STANDARD = "standard_uniform" + STATIC_STANDARD = "standard_static" + BATCHED = "batched" + + +# from https://github.com/neggles/animatediff-cli/blob/main/src/animatediff/pipelines/context.py +def create_windows_uniform_looped(num_frames: int, handler: IndexListContextHandler, model_options: dict[str]): + windows = [] + if num_frames < handler.context_length: + windows.append(list(range(num_frames))) + return windows + + context_stride = min(handler.context_stride, int(np.ceil(np.log2(num_frames / handler.context_length))) + 1) + # obtain uniform windows as normal, looping and all + for context_step in 1 << np.arange(context_stride): + pad = int(round(num_frames * ordered_halving(handler._step))) + for j in range( + int(ordered_halving(handler._step) * context_step) + pad, + num_frames + pad + (0 if handler.closed_loop else -handler.context_overlap), + (handler.context_length * context_step - handler.context_overlap), + ): + windows.append([e % num_frames for e in range(j, j + handler.context_length * context_step, context_step)]) + + return windows + +def create_windows_uniform_standard(num_frames: int, handler: IndexListContextHandler, model_options: dict[str]): + # unlike looped, uniform_straight does NOT allow windows that loop back to the beginning; + # instead, they get shifted to the corresponding end of the frames. + # in the case that a window (shifted or not) is identical to the previous one, it gets skipped. + windows = [] + if num_frames <= handler.context_length: + windows.append(list(range(num_frames))) + return windows + + context_stride = min(handler.context_stride, int(np.ceil(np.log2(num_frames / handler.context_length))) + 1) + # first, obtain uniform windows as normal, looping and all + for context_step in 1 << np.arange(context_stride): + pad = int(round(num_frames * ordered_halving(handler._step))) + for j in range( + int(ordered_halving(handler._step) * context_step) + pad, + num_frames + pad + (-handler.context_overlap), + (handler.context_length * context_step - handler.context_overlap), + ): + windows.append([e % num_frames for e in range(j, j + handler.context_length * context_step, context_step)]) + + # now that windows are created, shift any windows that loop, and delete duplicate windows + delete_idxs = [] + win_i = 0 + while win_i < len(windows): + # if window is rolls over itself, need to shift it + is_roll, roll_idx = does_window_roll_over(windows[win_i], num_frames) + if is_roll: + roll_val = windows[win_i][roll_idx] # roll_val might not be 0 for windows of higher strides + shift_window_to_end(windows[win_i], num_frames=num_frames) + # check if next window (cyclical) is missing roll_val + if roll_val not in windows[(win_i+1) % len(windows)]: + # need to insert new window here - just insert window starting at roll_val + windows.insert(win_i+1, list(range(roll_val, roll_val + handler.context_length))) + # delete window if it's not unique + for pre_i in range(0, win_i): + if windows[win_i] == windows[pre_i]: + delete_idxs.append(win_i) + break + win_i += 1 + + # reverse delete_idxs so that they will be deleted in an order that doesn't break idx correlation + delete_idxs.reverse() + for i in delete_idxs: + windows.pop(i) + + return windows + + +def create_windows_static_standard(num_frames: int, handler: IndexListContextHandler, model_options: dict[str]): + windows = [] + if num_frames <= handler.context_length: + windows.append(list(range(num_frames))) + return windows + # always return the same set of windows + delta = handler.context_length - handler.context_overlap + for start_idx in range(0, num_frames, delta): + # if past the end of frames, move start_idx back to allow same context_length + ending = start_idx + handler.context_length + if ending >= num_frames: + final_delta = ending - num_frames + final_start_idx = start_idx - final_delta + windows.append(list(range(final_start_idx, final_start_idx + handler.context_length))) + break + windows.append(list(range(start_idx, start_idx + handler.context_length))) + return windows + + +def create_windows_batched(num_frames: int, handler: IndexListContextHandler, model_options: dict[str]): + windows = [] + if num_frames <= handler.context_length: + windows.append(list(range(num_frames))) + return windows + # always return the same set of windows; + # no overlap, just cut up based on context_length; + # last window size will be different if num_frames % opts.context_length != 0 + for start_idx in range(0, num_frames, handler.context_length): + windows.append(list(range(start_idx, min(start_idx + handler.context_length, num_frames)))) + return windows + + +def create_windows_default(num_frames: int, handler: IndexListContextHandler): + return [list(range(num_frames))] + + +CONTEXT_MAPPING = { + ContextSchedules.UNIFORM_LOOPED: create_windows_uniform_looped, + ContextSchedules.UNIFORM_STANDARD: create_windows_uniform_standard, + ContextSchedules.STATIC_STANDARD: create_windows_static_standard, + ContextSchedules.BATCHED: create_windows_batched, +} + + +def get_matching_context_schedule(context_schedule: str) -> ContextSchedule: + func = CONTEXT_MAPPING.get(context_schedule, None) + if func is None: + raise ValueError(f"Unknown context_schedule '{context_schedule}'.") + return ContextSchedule(context_schedule, func) + + +def get_context_weights(length: int, full_length: int, idxs: list[int], handler: IndexListContextHandler, sigma: torch.Tensor=None): + return handler.fuse_method.func(length, sigma=sigma, handler=handler, full_length=full_length, idxs=idxs) + + +def create_weights_flat(length: int, **kwargs) -> list[float]: + # weight is the same for all + return [1.0] * length + +def create_weights_pyramid(length: int, **kwargs) -> list[float]: + # weight is based on the distance away from the edge of the context window; + # based on weighted average concept in FreeNoise paper + if length % 2 == 0: + max_weight = length // 2 + weight_sequence = list(range(1, max_weight + 1, 1)) + list(range(max_weight, 0, -1)) + else: + max_weight = (length + 1) // 2 + weight_sequence = list(range(1, max_weight, 1)) + [max_weight] + list(range(max_weight - 1, 0, -1)) + return weight_sequence + +def create_weights_overlap_linear(length: int, full_length: int, idxs: list[int], handler: IndexListContextHandler, **kwargs): + # based on code in Kijai's WanVideoWrapper: https://github.com/kijai/ComfyUI-WanVideoWrapper/blob/dbb2523b37e4ccdf45127e5ae33e31362f755c8e/nodes.py#L1302 + # only expected overlap is given different weights + weights_torch = torch.ones((length)) + # blend left-side on all except first window + if min(idxs) > 0: + ramp_up = torch.linspace(1e-37, 1, handler.context_overlap) + weights_torch[:handler.context_overlap] = ramp_up + # blend right-side on all except last window + if max(idxs) < full_length-1: + ramp_down = torch.linspace(1, 1e-37, handler.context_overlap) + weights_torch[-handler.context_overlap:] = ramp_down + return weights_torch + +class ContextFuseMethods: + FLAT = "flat" + PYRAMID = "pyramid" + RELATIVE = "relative" + OVERLAP_LINEAR = "overlap-linear" + + LIST = [PYRAMID, FLAT, OVERLAP_LINEAR] + LIST_STATIC = [PYRAMID, RELATIVE, FLAT, OVERLAP_LINEAR] + + +FUSE_MAPPING = { + ContextFuseMethods.FLAT: create_weights_flat, + ContextFuseMethods.PYRAMID: create_weights_pyramid, + ContextFuseMethods.RELATIVE: create_weights_pyramid, + ContextFuseMethods.OVERLAP_LINEAR: create_weights_overlap_linear, +} + +def get_matching_fuse_method(fuse_method: str) -> ContextFuseMethod: + func = FUSE_MAPPING.get(fuse_method, None) + if func is None: + raise ValueError(f"Unknown fuse_method '{fuse_method}'.") + return ContextFuseMethod(fuse_method, func) + +# Returns fraction that has denominator that is a power of 2 +def ordered_halving(val): + # get binary value, padded with 0s for 64 bits + bin_str = f"{val:064b}" + # flip binary value, padding included + bin_flip = bin_str[::-1] + # convert binary to int + as_int = int(bin_flip, 2) + # divide by 1 << 64, equivalent to 2**64, or 18446744073709551616, + # or b10000000000000000000000000000000000000000000000000000000000000000 (1 with 64 zero's) + return as_int / (1 << 64) + + +def get_missing_indexes(windows: list[list[int]], num_frames: int) -> list[int]: + all_indexes = list(range(num_frames)) + for w in windows: + for val in w: + try: + all_indexes.remove(val) + except ValueError: + pass + return all_indexes + + +def does_window_roll_over(window: list[int], num_frames: int) -> tuple[bool, int]: + prev_val = -1 + for i, val in enumerate(window): + val = val % num_frames + if val < prev_val: + return True, i + prev_val = val + return False, -1 + + +def shift_window_to_start(window: list[int], num_frames: int): + start_val = window[0] + for i in range(len(window)): + # 1) subtract each element by start_val to move vals relative to the start of all frames + # 2) add num_frames and take modulus to get adjusted vals + window[i] = ((window[i] - start_val) + num_frames) % num_frames + + +def shift_window_to_end(window: list[int], num_frames: int): + # 1) shift window to start + shift_window_to_start(window, num_frames) + end_val = window[-1] + end_delta = num_frames - end_val - 1 + for i in range(len(window)): + # 2) add end_delta to each val to slide windows to end + window[i] = window[i] + end_delta diff --git a/comfy/sampler_helpers.py b/comfy/sampler_helpers.py index 8dbc41455..e46971afb 100644 --- a/comfy/sampler_helpers.py +++ b/comfy/sampler_helpers.py @@ -149,7 +149,7 @@ def cleanup_models(conds, models): cleanup_additional_models(set(control_cleanup)) -def prepare_model_patcher(model: 'ModelPatcher', conds, model_options: dict): +def prepare_model_patcher(model: ModelPatcher, conds, model_options: dict): ''' Registers hooks from conds. ''' @@ -158,8 +158,8 @@ def prepare_model_patcher(model: 'ModelPatcher', conds, model_options: dict): for k in conds: get_hooks_from_cond(conds[k], hooks) # add wrappers and callbacks from ModelPatcher to transformer_options - model_options["transformer_options"]["wrappers"] = comfy.patcher_extension.copy_nested_dicts(model.wrappers) - model_options["transformer_options"]["callbacks"] = comfy.patcher_extension.copy_nested_dicts(model.callbacks) + comfy.patcher_extension.merge_nested_dicts(model_options["transformer_options"].setdefault("wrappers", {}), model.wrappers, copy_dict1=False) + comfy.patcher_extension.merge_nested_dicts(model_options["transformer_options"].setdefault("callbacks", {}), model.callbacks, copy_dict1=False) # begin registering hooks registered = comfy.hooks.HookGroup() target_dict = comfy.hooks.create_target_dict(comfy.hooks.EnumWeightTarget.Model) diff --git a/comfy/samplers.py b/comfy/samplers.py index ad2f40cdc..d5390d64e 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -16,6 +16,7 @@ import comfy.sampler_helpers import comfy.model_patcher import comfy.patcher_extension import comfy.hooks +import comfy.context_windows import scipy.stats import numpy @@ -198,14 +199,20 @@ def finalize_default_conds(model: 'BaseModel', hooked_to_run: dict[comfy.hooks.H hooked_to_run.setdefault(p.hooks, list()) hooked_to_run[p.hooks] += [(p, i)] -def calc_cond_batch(model: 'BaseModel', conds: list[list[dict]], x_in: torch.Tensor, timestep, model_options): +def calc_cond_batch(model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep, model_options: dict[str]): + handler: comfy.context_windows.ContextHandlerABC = model_options.get("context_handler", None) + if handler is None or not handler.should_use_context(model, conds, x_in, timestep, model_options): + return _calc_cond_batch_outer(model, conds, x_in, timestep, model_options) + return handler.execute(_calc_cond_batch_outer, model, conds, x_in, timestep, model_options) + +def _calc_cond_batch_outer(model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep, model_options): executor = comfy.patcher_extension.WrapperExecutor.new_executor( _calc_cond_batch, comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.CALC_COND_BATCH, model_options, is_model_options=True) ) return executor.execute(model, conds, x_in, timestep, model_options) -def _calc_cond_batch(model: 'BaseModel', conds: list[list[dict]], x_in: torch.Tensor, timestep, model_options): +def _calc_cond_batch(model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep, model_options): out_conds = [] out_counts = [] # separate conds by matching hooks diff --git a/comfy_extras/nodes_context_windows.py b/comfy_extras/nodes_context_windows.py new file mode 100644 index 000000000..1c3d9e697 --- /dev/null +++ b/comfy_extras/nodes_context_windows.py @@ -0,0 +1,89 @@ +from __future__ import annotations +from comfy_api.latest import ComfyExtension, io +import comfy.context_windows +import nodes + + +class ContextWindowsManualNode(io.ComfyNode): + @classmethod + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="ContextWindowsManual", + display_name="Context Windows (Manual)", + category="context", + description="Manually set context windows.", + inputs=[ + io.Model.Input("model", tooltip="The model to apply context windows to during sampling."), + io.Int.Input("context_length", min=1, default=16, tooltip="The length of the context window."), + io.Int.Input("context_overlap", min=0, default=4, tooltip="The overlap of the context window."), + io.Combo.Input("context_schedule", options=[ + comfy.context_windows.ContextSchedules.STATIC_STANDARD, + comfy.context_windows.ContextSchedules.UNIFORM_STANDARD, + comfy.context_windows.ContextSchedules.UNIFORM_LOOPED, + comfy.context_windows.ContextSchedules.BATCHED, + ], tooltip="The stride of the context window."), + io.Int.Input("context_stride", min=1, default=1, tooltip="The stride of the context window; only applicable to uniform schedules."), + io.Boolean.Input("closed_loop", default=False, tooltip="Whether to close the context window loop; only applicable to looped schedules."), + io.Combo.Input("fuse_method", options=comfy.context_windows.ContextFuseMethods.LIST_STATIC, default=comfy.context_windows.ContextFuseMethods.PYRAMID, tooltip="The method to use to fuse the context windows."), + io.Int.Input("dim", min=0, max=5, default=0, tooltip="The dimension to apply the context windows to."), + ], + outputs=[ + io.Model.Output(tooltip="The model with context windows applied during sampling."), + ], + is_experimental=True, + ) + + @classmethod + def execute(cls, model: io.Model.Type, context_length: int, context_overlap: int, context_schedule: str, context_stride: int, closed_loop: bool, fuse_method: str, dim: int) -> io.Model: + model = model.clone() + model.model_options["context_handler"] = comfy.context_windows.IndexListContextHandler( + context_schedule=comfy.context_windows.get_matching_context_schedule(context_schedule), + fuse_method=comfy.context_windows.get_matching_fuse_method(fuse_method), + context_length=context_length, + context_overlap=context_overlap, + context_stride=context_stride, + closed_loop=closed_loop, + dim=dim) + # make memory usage calculation only take into account the context window latents + comfy.context_windows.create_prepare_sampling_wrapper(model) + return io.NodeOutput(model) + +class WanContextWindowsManualNode(ContextWindowsManualNode): + @classmethod + def define_schema(cls) -> io.Schema: + schema = super().define_schema() + schema.node_id = "WanContextWindowsManual" + schema.display_name = "WAN Context Windows (Manual)" + schema.description = "Manually set context windows for WAN-like models (dim=2)." + schema.inputs = [ + io.Model.Input("model", tooltip="The model to apply context windows to during sampling."), + io.Int.Input("context_length", min=1, max=nodes.MAX_RESOLUTION, step=4, default=81, tooltip="The length of the context window."), + io.Int.Input("context_overlap", min=0, default=30, tooltip="The overlap of the context window."), + io.Combo.Input("context_schedule", options=[ + comfy.context_windows.ContextSchedules.STATIC_STANDARD, + comfy.context_windows.ContextSchedules.UNIFORM_STANDARD, + comfy.context_windows.ContextSchedules.UNIFORM_LOOPED, + comfy.context_windows.ContextSchedules.BATCHED, + ], tooltip="The stride of the context window."), + io.Int.Input("context_stride", min=1, default=1, tooltip="The stride of the context window; only applicable to uniform schedules."), + io.Boolean.Input("closed_loop", default=False, tooltip="Whether to close the context window loop; only applicable to looped schedules."), + io.Combo.Input("fuse_method", options=comfy.context_windows.ContextFuseMethods.LIST_STATIC, default=comfy.context_windows.ContextFuseMethods.PYRAMID, tooltip="The method to use to fuse the context windows."), + ] + return schema + + @classmethod + def execute(cls, model: io.Model.Type, context_length: int, context_overlap: int, context_schedule: str, context_stride: int, closed_loop: bool, fuse_method: str) -> io.Model: + context_length = max(((context_length - 1) // 4) + 1, 1) # at least length 1 + context_overlap = max(((context_overlap - 1) // 4) + 1, 0) # at least overlap 0 + return super().execute(model, context_length, context_overlap, context_schedule, context_stride, closed_loop, fuse_method, dim=2) + + +class ContextWindowsExtension(ComfyExtension): + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + ContextWindowsManualNode, + WanContextWindowsManualNode, + ] + +def comfy_entrypoint(): + return ContextWindowsExtension() diff --git a/nodes.py b/nodes.py index 9448f9c1b..860a236aa 100644 --- a/nodes.py +++ b/nodes.py @@ -2320,6 +2320,7 @@ async def init_builtin_extra_nodes(): "nodes_camera_trajectory.py", "nodes_edit_model.py", "nodes_tcfg.py", + "nodes_context_windows.py", ] import_failed = [] From 72fd4d22b6a4fa11a3f737c9a633e7d635a42181 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 14 Aug 2025 13:03:21 -0700 Subject: [PATCH 0437/1073] av is an essential dependency. (#9341) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index bfb31a73f..551002b5b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,11 +20,11 @@ tqdm psutil alembic SQLAlchemy +av>=14.2.0 #non essential dependencies: kornia>=0.7.1 spandrel soundfile -av>=14.2.0 pydantic~=2.0 pydantic-settings~=2.0 From 644b23ac0b92442b475e44397c62aa8de929d546 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Fri, 15 Aug 2025 07:36:53 +1000 Subject: [PATCH 0438/1073] Make custom node testing checkbox optional in issue templates (#9342) The checkbox for confirming custom node testing is now optional in both bug report and user support templates. This allows users to submit issues even if they haven't been able to test with custom nodes disabled, making the reporting process more accessible. --- .github/ISSUE_TEMPLATE/bug-report.yml | 2 +- .github/ISSUE_TEMPLATE/user-support.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 69ce998eb..3cf2717b7 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -22,7 +22,7 @@ body: description: Please confirm you have tried to reproduce the issue with all custom nodes disabled. options: - label: I have tried disabling custom nodes and the issue persists (see [how to disable custom nodes](https://docs.comfy.org/troubleshooting/custom-node-issues#step-1%3A-test-with-all-custom-nodes-disabled) if you need help) - required: true + required: false - type: textarea attributes: label: Expected Behavior diff --git a/.github/ISSUE_TEMPLATE/user-support.yml b/.github/ISSUE_TEMPLATE/user-support.yml index 50657d493..281661f92 100644 --- a/.github/ISSUE_TEMPLATE/user-support.yml +++ b/.github/ISSUE_TEMPLATE/user-support.yml @@ -18,7 +18,7 @@ body: description: Please confirm you have tried to reproduce the issue with all custom nodes disabled. options: - label: I have tried disabling custom nodes and the issue persists (see [how to disable custom nodes](https://docs.comfy.org/troubleshooting/custom-node-issues#step-1%3A-test-with-all-custom-nodes-disabled) if you need help) - required: true + required: false - type: textarea attributes: label: Your question From fa570cbf599657e73c636872616c0b1f8e74f692 Mon Sep 17 00:00:00 2001 From: Yoland Yan <4950057+yoland68@users.noreply.github.com> Date: Thu, 14 Aug 2025 16:44:22 -0700 Subject: [PATCH 0439/1073] Update CODEOWNERS (#9343) --- CODEOWNERS | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index c4acbf06e..c8acd66d5 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -5,20 +5,21 @@ # Inlined the team members for now. # Maintainers -*.md @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/tests/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/tests-unit/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/notebooks/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/script_examples/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/.github/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/requirements.txt @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne -/pyproject.toml @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne +*.md @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill +/tests/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill +/tests-unit/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill +/notebooks/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill +/script_examples/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill +/.github/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill +/requirements.txt @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill +/pyproject.toml @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill # Python web server -/api_server/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne -/app/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne -/utils/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne +/api_server/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne @guill +/app/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne @guill +/utils/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne @guill # Node developers -/comfy_extras/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne -/comfy/comfy_types/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne +/comfy_extras/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne @guill +/comfy/comfy_types/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne @guill +/comfy_api_nodes/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne @guill From deebee4ff6fd1b2713683d22e5e2e07170daa867 Mon Sep 17 00:00:00 2001 From: guill Date: Thu, 14 Aug 2025 18:46:55 -0700 Subject: [PATCH 0440/1073] Update default parameters for Moonvalley video nodes (#9290) * Update default parameters for Moonvalley video nodes - Changed default negative prompts to a more extensive list for both BaseMoonvalleyVideoNode and MoonvalleyVideo2VideoNode. - Updated default guidance scale values for both nodes to enhance prompt adherence. - Set a fixed default seed value for consistency in video generation. * no message * ruff fix --------- Co-authored-by: thorsten --- comfy_api_nodes/nodes_moonvalley.py | 128 ++++++++++++++++++++-------- 1 file changed, 91 insertions(+), 37 deletions(-) diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 164ca3ea5..806a70e06 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -1,6 +1,5 @@ import logging from typing import Any, Callable, Optional, TypeVar -import random import torch from comfy_api_nodes.util.validation_utils import ( get_image_dimensions, @@ -208,20 +207,29 @@ def _get_video_dimensions(video: VideoInput) -> tuple[int, int]: def _validate_video_dimensions(width: int, height: int) -> None: """Validates video dimensions meet Moonvalley V2V requirements.""" supported_resolutions = { - (1920, 1080), (1080, 1920), (1152, 1152), - (1536, 1152), (1152, 1536) + (1920, 1080), + (1080, 1920), + (1152, 1152), + (1536, 1152), + (1152, 1536), } if (width, height) not in supported_resolutions: - supported_list = ', '.join([f'{w}x{h}' for w, h in sorted(supported_resolutions)]) - raise ValueError(f"Resolution {width}x{height} not supported. Supported: {supported_list}") + supported_list = ", ".join( + [f"{w}x{h}" for w, h in sorted(supported_resolutions)] + ) + raise ValueError( + f"Resolution {width}x{height} not supported. Supported: {supported_list}" + ) def _validate_container_format(video: VideoInput) -> None: """Validates video container format is MP4.""" container_format = video.get_container_format() - if container_format not in ['mp4', 'mov,mp4,m4a,3gp,3g2,mj2']: - raise ValueError(f"Only MP4 container format supported. Got: {container_format}") + if container_format not in ["mp4", "mov,mp4,m4a,3gp,3g2,mj2"]: + raise ValueError( + f"Only MP4 container format supported. Got: {container_format}" + ) def _validate_and_trim_duration(video: VideoInput) -> VideoInput: @@ -244,7 +252,6 @@ def _trim_if_too_long(video: VideoInput, duration: float) -> VideoInput: return video - def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: """ Returns a new VideoInput object trimmed from the beginning to the specified duration, @@ -302,7 +309,9 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: # Calculate target frame count that's divisible by 16 fps = input_container.streams.video[0].average_rate estimated_frames = int(duration_sec * fps) - target_frames = (estimated_frames // 16) * 16 # Round down to nearest multiple of 16 + target_frames = ( + estimated_frames // 16 + ) * 16 # Round down to nearest multiple of 16 if target_frames == 0: raise ValueError("Video too short: need at least 16 frames for Moonvalley") @@ -424,7 +433,7 @@ class BaseMoonvalleyVideoNode: MoonvalleyTextToVideoInferenceParams, "negative_prompt", multiline=True, - default="low-poly, flat shader, bad rigging, stiff animation, uncanny eyes, low-quality textures, looping glitch, cheap effect, overbloom, bloom spam, default lighting, game asset, stiff face, ugly specular, AI artifacts", + default=" gopro, bright, contrast, static, overexposed, vignette, artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, wobbly, weird, low quality, plastic, stock footage, video camera, boring", ), "resolution": ( IO.COMBO, @@ -441,12 +450,11 @@ class BaseMoonvalleyVideoNode: "tooltip": "Resolution of the output video", }, ), - # "length": (IO.COMBO,{"options":['5s','10s'], "default": '5s'}), "prompt_adherence": model_field_to_node_input( IO.FLOAT, MoonvalleyTextToVideoInferenceParams, "guidance_scale", - default=7.0, + default=10.0, step=1, min=1, max=20, @@ -455,13 +463,12 @@ class BaseMoonvalleyVideoNode: IO.INT, MoonvalleyTextToVideoInferenceParams, "seed", - default=random.randint(0, 2**32 - 1), + default=9, min=0, max=4294967295, step=1, display="number", tooltip="Random seed value", - control_after_generate=True, ), "steps": model_field_to_node_input( IO.INT, @@ -532,9 +539,11 @@ class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): # Get MIME type from tensor - assuming PNG format for image tensors mime_type = "image/png" - image_url = (await upload_images_to_comfyapi( - image, max_images=1, auth_kwargs=kwargs, mime_type=mime_type - ))[0] + image_url = ( + await upload_images_to_comfyapi( + image, max_images=1, auth_kwargs=kwargs, mime_type=mime_type + ) + )[0] request = MoonvalleyTextToVideoRequest( image_url=image_url, prompt_text=prompt, inference_params=inference_params @@ -570,17 +579,39 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): return { "required": { "prompt": model_field_to_node_input( - IO.STRING, MoonvalleyVideoToVideoRequest, "prompt_text", - multiline=True + IO.STRING, + MoonvalleyVideoToVideoRequest, + "prompt_text", + multiline=True, ), "negative_prompt": model_field_to_node_input( IO.STRING, MoonvalleyVideoToVideoInferenceParams, "negative_prompt", multiline=True, - default="low-poly, flat shader, bad rigging, stiff animation, uncanny eyes, low-quality textures, looping glitch, cheap effect, overbloom, bloom spam, default lighting, game asset, stiff face, ugly specular, AI artifacts" + default=" gopro, bright, contrast, static, overexposed, vignette, artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, wobbly, weird, low quality, plastic, stock footage, video camera, boring", + ), + "seed": model_field_to_node_input( + IO.INT, + MoonvalleyVideoToVideoInferenceParams, + "seed", + default=9, + min=0, + max=4294967295, + step=1, + display="number", + tooltip="Random seed value", + control_after_generate=False, + ), + "prompt_adherence": model_field_to_node_input( + IO.FLOAT, + MoonvalleyVideoToVideoInferenceParams, + "guidance_scale", + default=10.0, + step=1, + min=1, + max=20, ), - "seed": model_field_to_node_input(IO.INT,MoonvalleyVideoToVideoInferenceParams, "seed", default=random.randint(0, 2**32 - 1), min=0, max=4294967295, step=1, display="number", tooltip="Random seed value", control_after_generate=True), }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", @@ -588,7 +619,14 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): "unique_id": "UNIQUE_ID", }, "optional": { - "video": (IO.VIDEO, {"default": "", "multiline": False, "tooltip": "The reference video used to generate the output video. Must be at least 5 seconds long. Videos longer than 5s will be automatically trimmed. Only MP4 format supported."}), + "video": ( + IO.VIDEO, + { + "default": "", + "multiline": False, + "tooltip": "The reference video used to generate the output video. Must be at least 5 seconds long. Videos longer than 5s will be automatically trimmed. Only MP4 format supported.", + }, + ), "control_type": ( ["Motion Transfer", "Pose Transfer"], {"default": "Motion Transfer"}, @@ -602,8 +640,14 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): "max": 100, "tooltip": "Only used if control_type is 'Motion Transfer'", }, - ) - } + ), + "image": model_field_to_node_input( + IO.IMAGE, + MoonvalleyTextToVideoRequest, + "image_url", + tooltip="The reference image used to generate the video", + ), + }, } RETURN_TYPES = ("VIDEO",) @@ -613,6 +657,7 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs ): video = kwargs.get("video") + image = kwargs.get("image", None) if not video: raise MoonvalleyApiError("video is required") @@ -620,8 +665,16 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): video_url = "" if video: validated_video = validate_video_to_video_input(video) - video_url = await upload_video_to_comfyapi(validated_video, auth_kwargs=kwargs) + video_url = await upload_video_to_comfyapi( + validated_video, auth_kwargs=kwargs + ) + mime_type = "image/png" + if not image is None: + validate_input_image(image, with_frame_conditioning=True) + image_url = await upload_images_to_comfyapi( + image=image, auth_kwargs=kwargs, max_images=1, mime_type=mime_type + ) control_type = kwargs.get("control_type") motion_intensity = kwargs.get("motion_intensity") @@ -631,12 +684,12 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): # Only include motion_intensity for Motion Transfer control_params = {} if control_type == "Motion Transfer" and motion_intensity is not None: - control_params['motion_intensity'] = motion_intensity + control_params["motion_intensity"] = motion_intensity - inference_params=MoonvalleyVideoToVideoInferenceParams( + inference_params = MoonvalleyVideoToVideoInferenceParams( negative_prompt=negative_prompt, seed=kwargs.get("seed"), - control_params=control_params + control_params=control_params, ) control = self.parseControlParameter(control_type) @@ -647,6 +700,7 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): prompt_text=prompt, inference_params=inference_params, ) + request.image_url = image_url if not image is None else None initial_operation = SynchronousOperation( endpoint=ApiEndpoint( @@ -694,15 +748,15 @@ class MoonvalleyTxt2VideoNode(BaseMoonvalleyVideoNode): validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) width_height = self.parseWidthHeightFromRes(kwargs.get("resolution")) - inference_params=MoonvalleyTextToVideoInferenceParams( - negative_prompt=negative_prompt, - steps=kwargs.get("steps"), - seed=kwargs.get("seed"), - guidance_scale=kwargs.get("prompt_adherence"), - num_frames=128, - width=width_height.get("width"), - height=width_height.get("height"), - ) + inference_params = MoonvalleyTextToVideoInferenceParams( + negative_prompt=negative_prompt, + steps=kwargs.get("steps"), + seed=kwargs.get("seed"), + guidance_scale=kwargs.get("prompt_adherence"), + num_frames=128, + width=width_height.get("width"), + height=width_height.get("height"), + ) request = MoonvalleyTextToVideoRequest( prompt_text=prompt, inference_params=inference_params ) From 5d65d6753b195d674ce16522d6c34f9a33f36269 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 15 Aug 2025 04:48:41 +0300 Subject: [PATCH 0441/1073] convert WAN nodes to V3 schema (#9201) --- comfy_extras/nodes_wan.py | 549 +++++++++++++++++++++----------------- 1 file changed, 298 insertions(+), 251 deletions(-) diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index f80c83ba6..694a183f6 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -9,29 +9,35 @@ import comfy.clip_vision import json import numpy as np from typing import Tuple +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io -class WanImageToVideo: +class WanImageToVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "vae": ("VAE", ), - "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - }, - "optional": {"clip_vision_output": ("CLIP_VISION_OUTPUT", ), - "start_image": ("IMAGE", ), - }} + def define_schema(cls): + return io.Schema( + node_id="WanImageToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.ClipVisionOutput.Input("clip_vision_output", optional=True), + io.Image.Input("start_image", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - FUNCTION = "encode" - - CATEGORY = "conditioning/video_models" - - def encode(self, positive, negative, vae, width, height, length, batch_size, start_image=None, clip_vision_output=None): + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None, clip_vision_output=None) -> io.NodeOutput: latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) if start_image is not None: start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) @@ -51,32 +57,36 @@ class WanImageToVideo: out_latent = {} out_latent["samples"] = latent - return (positive, negative, out_latent) + return io.NodeOutput(positive, negative, out_latent) -class WanFunControlToVideo: +class WanFunControlToVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "vae": ("VAE", ), - "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - }, - "optional": {"clip_vision_output": ("CLIP_VISION_OUTPUT", ), - "start_image": ("IMAGE", ), - "control_video": ("IMAGE", ), - }} + def define_schema(cls): + return io.Schema( + node_id="WanFunControlToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.ClipVisionOutput.Input("clip_vision_output", optional=True), + io.Image.Input("start_image", optional=True), + io.Image.Input("control_video", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - FUNCTION = "encode" - - CATEGORY = "conditioning/video_models" - - def encode(self, positive, negative, vae, width, height, length, batch_size, start_image=None, clip_vision_output=None, control_video=None): + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None, clip_vision_output=None, control_video=None) -> io.NodeOutput: latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) concat_latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) concat_latent = comfy.latent_formats.Wan21().process_out(concat_latent) @@ -101,31 +111,34 @@ class WanFunControlToVideo: out_latent = {} out_latent["samples"] = latent - return (positive, negative, out_latent) + return io.NodeOutput(positive, negative, out_latent) -class Wan22FunControlToVideo: +class Wan22FunControlToVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "vae": ("VAE", ), - "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - }, - "optional": {"ref_image": ("IMAGE", ), - "control_video": ("IMAGE", ), - # "start_image": ("IMAGE", ), - }} + def define_schema(cls): + return io.Schema( + node_id="Wan22FunControlToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Image.Input("ref_image", optional=True), + io.Image.Input("control_video", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - FUNCTION = "encode" - - CATEGORY = "conditioning/video_models" - - def encode(self, positive, negative, vae, width, height, length, batch_size, ref_image=None, start_image=None, control_video=None): + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, ref_image=None, start_image=None, control_video=None) -> io.NodeOutput: latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) concat_latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) concat_latent = comfy.latent_formats.Wan21().process_out(concat_latent) @@ -158,32 +171,36 @@ class Wan22FunControlToVideo: out_latent = {} out_latent["samples"] = latent - return (positive, negative, out_latent) + return io.NodeOutput(positive, negative, out_latent) -class WanFirstLastFrameToVideo: +class WanFirstLastFrameToVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "vae": ("VAE", ), - "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - }, - "optional": {"clip_vision_start_image": ("CLIP_VISION_OUTPUT", ), - "clip_vision_end_image": ("CLIP_VISION_OUTPUT", ), - "start_image": ("IMAGE", ), - "end_image": ("IMAGE", ), - }} + def define_schema(cls): + return io.Schema( + node_id="WanFirstLastFrameToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.ClipVisionOutput.Input("clip_vision_start_image", optional=True), + io.ClipVisionOutput.Input("clip_vision_end_image", optional=True), + io.Image.Input("start_image", optional=True), + io.Image.Input("end_image", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - FUNCTION = "encode" - - CATEGORY = "conditioning/video_models" - - def encode(self, positive, negative, vae, width, height, length, batch_size, start_image=None, end_image=None, clip_vision_start_image=None, clip_vision_end_image=None): + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None, end_image=None, clip_vision_start_image=None, clip_vision_end_image=None) -> io.NodeOutput: latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) if start_image is not None: start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) @@ -224,62 +241,70 @@ class WanFirstLastFrameToVideo: out_latent = {} out_latent["samples"] = latent - return (positive, negative, out_latent) + return io.NodeOutput(positive, negative, out_latent) -class WanFunInpaintToVideo: +class WanFunInpaintToVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "vae": ("VAE", ), - "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - }, - "optional": {"clip_vision_output": ("CLIP_VISION_OUTPUT", ), - "start_image": ("IMAGE", ), - "end_image": ("IMAGE", ), - }} + def define_schema(cls): + return io.Schema( + node_id="WanFunInpaintToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.ClipVisionOutput.Input("clip_vision_output", optional=True), + io.Image.Input("start_image", optional=True), + io.Image.Input("end_image", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - FUNCTION = "encode" - - CATEGORY = "conditioning/video_models" - - def encode(self, positive, negative, vae, width, height, length, batch_size, start_image=None, end_image=None, clip_vision_output=None): + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None, end_image=None, clip_vision_output=None) -> io.NodeOutput: flfv = WanFirstLastFrameToVideo() - return flfv.encode(positive, negative, vae, width, height, length, batch_size, start_image=start_image, end_image=end_image, clip_vision_start_image=clip_vision_output) + return flfv.execute(positive, negative, vae, width, height, length, batch_size, start_image=start_image, end_image=end_image, clip_vision_start_image=clip_vision_output) -class WanVaceToVideo: +class WanVaceToVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "vae": ("VAE", ), - "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1000.0, "step": 0.01}), - }, - "optional": {"control_video": ("IMAGE", ), - "control_masks": ("MASK", ), - "reference_image": ("IMAGE", ), - }} + def define_schema(cls): + return io.Schema( + node_id="WanVaceToVideo", + category="conditioning/video_models", + is_experimental=True, + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Float.Input("strength", default=1.0, min=0.0, max=1000.0, step=0.01), + io.Image.Input("control_video", optional=True), + io.Mask.Input("control_masks", optional=True), + io.Image.Input("reference_image", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + io.Int.Output(display_name="trim_latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT", "INT") - RETURN_NAMES = ("positive", "negative", "latent", "trim_latent") - FUNCTION = "encode" - - CATEGORY = "conditioning/video_models" - - EXPERIMENTAL = True - - def encode(self, positive, negative, vae, width, height, length, batch_size, strength, control_video=None, control_masks=None, reference_image=None): + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, strength, control_video=None, control_masks=None, reference_image=None) -> io.NodeOutput: latent_length = ((length - 1) // 4) + 1 if control_video is not None: control_video = comfy.utils.common_upscale(control_video[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) @@ -336,52 +361,59 @@ class WanVaceToVideo: latent = torch.zeros([batch_size, 16, latent_length, height // 8, width // 8], device=comfy.model_management.intermediate_device()) out_latent = {} out_latent["samples"] = latent - return (positive, negative, out_latent, trim_latent) + return io.NodeOutput(positive, negative, out_latent, trim_latent) -class TrimVideoLatent: +class TrimVideoLatent(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "samples": ("LATENT",), - "trim_amount": ("INT", {"default": 0, "min": 0, "max": 99999}), - }} + def define_schema(cls): + return io.Schema( + node_id="TrimVideoLatent", + category="latent/video", + is_experimental=True, + inputs=[ + io.Latent.Input("samples"), + io.Int.Input("trim_amount", default=0, min=0, max=99999), + ], + outputs=[ + io.Latent.Output(), + ], + ) - RETURN_TYPES = ("LATENT",) - FUNCTION = "op" - - CATEGORY = "latent/video" - - EXPERIMENTAL = True - - def op(self, samples, trim_amount): + @classmethod + def execute(cls, samples, trim_amount) -> io.NodeOutput: samples_out = samples.copy() s1 = samples["samples"] samples_out["samples"] = s1[:, :, trim_amount:] - return (samples_out,) + return io.NodeOutput(samples_out) -class WanCameraImageToVideo: +class WanCameraImageToVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "vae": ("VAE", ), - "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - }, - "optional": {"clip_vision_output": ("CLIP_VISION_OUTPUT", ), - "start_image": ("IMAGE", ), - "camera_conditions": ("WAN_CAMERA_EMBEDDING", ), - }} + def define_schema(cls): + return io.Schema( + node_id="WanCameraImageToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.ClipVisionOutput.Input("clip_vision_output", optional=True), + io.Image.Input("start_image", optional=True), + io.WanCameraEmbedding.Input("camera_conditions", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - FUNCTION = "encode" - - CATEGORY = "conditioning/video_models" - - def encode(self, positive, negative, vae, width, height, length, batch_size, start_image=None, clip_vision_output=None, camera_conditions=None): + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None, clip_vision_output=None, camera_conditions=None) -> io.NodeOutput: latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) concat_latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) concat_latent = comfy.latent_formats.Wan21().process_out(concat_latent) @@ -404,29 +436,34 @@ class WanCameraImageToVideo: out_latent = {} out_latent["samples"] = latent - return (positive, negative, out_latent) + return io.NodeOutput(positive, negative, out_latent) -class WanPhantomSubjectToVideo: +class WanPhantomSubjectToVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "vae": ("VAE", ), - "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - }, - "optional": {"images": ("IMAGE", ), - }} + def define_schema(cls): + return io.Schema( + node_id="WanPhantomSubjectToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Image.Input("images", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative_text"), + io.Conditioning.Output(display_name="negative_img_text"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative_text", "negative_img_text", "latent") - FUNCTION = "encode" - - CATEGORY = "conditioning/video_models" - - def encode(self, positive, negative, vae, width, height, length, batch_size, images): + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, images) -> io.NodeOutput: latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) cond2 = negative if images is not None: @@ -442,7 +479,7 @@ class WanPhantomSubjectToVideo: out_latent = {} out_latent["samples"] = latent - return (positive, cond2, negative, out_latent) + return io.NodeOutput(positive, cond2, negative, out_latent) def parse_json_tracks(tracks): """Parse JSON track data into a standardized format""" @@ -655,39 +692,41 @@ def patch_motion( return out_mask_full, out_feature_full -class WanTrackToVideo: +class WanTrackToVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "vae": ("VAE", ), - "tracks": ("STRING", {"multiline": True, "default": "[]"}), - "width": ("INT", {"default": 832, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 81, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - "temperature": ("FLOAT", {"default": 220.0, "min": 1.0, "max": 1000.0, "step": 0.1}), - "topk": ("INT", {"default": 2, "min": 1, "max": 10}), - "start_image": ("IMAGE", ), - }, - "optional": { - "clip_vision_output": ("CLIP_VISION_OUTPUT", ), - }} + def define_schema(cls): + return io.Schema( + node_id="WanPhantomSubjectToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.String.Input("tracks", multiline=True, default="[]"), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Float.Input("temperature", default=220.0, min=1.0, max=1000.0, step=0.1), + io.Int.Input("topk", default=2, min=1, max=10), + io.Image.Input("start_image"), + io.ClipVisionOutput.Input("clip_vision_output", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - FUNCTION = "encode" - - CATEGORY = "conditioning/video_models" - - def encode(self, positive, negative, vae, tracks, width, height, length, batch_size, - temperature, topk, start_image=None, clip_vision_output=None): + @classmethod + def execute(cls, positive, negative, vae, tracks, width, height, length, batch_size, + temperature, topk, start_image=None, clip_vision_output=None) -> io.NodeOutput: tracks_data = parse_json_tracks(tracks) if not tracks_data: - return WanImageToVideo().encode(positive, negative, vae, width, height, length, batch_size, start_image=start_image, clip_vision_output=clip_vision_output) + return WanImageToVideo().execute(positive, negative, vae, width, height, length, batch_size, start_image=start_image, clip_vision_output=clip_vision_output) latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) @@ -741,34 +780,36 @@ class WanTrackToVideo: out_latent = {} out_latent["samples"] = latent - return (positive, negative, out_latent) + return io.NodeOutput(positive, negative, out_latent) -class Wan22ImageToVideoLatent: +class Wan22ImageToVideoLatent(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"vae": ("VAE", ), - "width": ("INT", {"default": 1280, "min": 32, "max": nodes.MAX_RESOLUTION, "step": 32}), - "height": ("INT", {"default": 704, "min": 32, "max": nodes.MAX_RESOLUTION, "step": 32}), - "length": ("INT", {"default": 49, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - }, - "optional": {"start_image": ("IMAGE", ), - }} + def define_schema(cls): + return io.Schema( + node_id="Wan22ImageToVideoLatent", + category="conditioning/inpaint", + inputs=[ + io.Vae.Input("vae"), + io.Int.Input("width", default=1280, min=32, max=nodes.MAX_RESOLUTION, step=32), + io.Int.Input("height", default=704, min=32, max=nodes.MAX_RESOLUTION, step=32), + io.Int.Input("length", default=49, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Image.Input("start_image", optional=True), + ], + outputs=[ + io.Latent.Output(), + ], + ) - - RETURN_TYPES = ("LATENT",) - FUNCTION = "encode" - - CATEGORY = "conditioning/inpaint" - - def encode(self, vae, width, height, length, batch_size, start_image=None): + @classmethod + def execute(cls, vae, width, height, length, batch_size, start_image=None) -> io.NodeOutput: latent = torch.zeros([1, 48, ((length - 1) // 4) + 1, height // 16, width // 16], device=comfy.model_management.intermediate_device()) if start_image is None: out_latent = {} out_latent["samples"] = latent - return (out_latent,) + return io.NodeOutput(out_latent) mask = torch.ones([latent.shape[0], 1, ((length - 1) // 4) + 1, latent.shape[-2], latent.shape[-1]], device=comfy.model_management.intermediate_device()) @@ -783,19 +824,25 @@ class Wan22ImageToVideoLatent: latent = latent_format.process_out(latent) * mask + latent * (1.0 - mask) out_latent["samples"] = latent.repeat((batch_size, ) + (1,) * (latent.ndim - 1)) out_latent["noise_mask"] = mask.repeat((batch_size, ) + (1,) * (mask.ndim - 1)) - return (out_latent,) + return io.NodeOutput(out_latent) -NODE_CLASS_MAPPINGS = { - "WanTrackToVideo": WanTrackToVideo, - "WanImageToVideo": WanImageToVideo, - "WanFunControlToVideo": WanFunControlToVideo, - "Wan22FunControlToVideo": Wan22FunControlToVideo, - "WanFunInpaintToVideo": WanFunInpaintToVideo, - "WanFirstLastFrameToVideo": WanFirstLastFrameToVideo, - "WanVaceToVideo": WanVaceToVideo, - "TrimVideoLatent": TrimVideoLatent, - "WanCameraImageToVideo": WanCameraImageToVideo, - "WanPhantomSubjectToVideo": WanPhantomSubjectToVideo, - "Wan22ImageToVideoLatent": Wan22ImageToVideoLatent, -} +class WanExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + WanTrackToVideo, + WanImageToVideo, + WanFunControlToVideo, + Wan22FunControlToVideo, + WanFunInpaintToVideo, + WanFirstLastFrameToVideo, + WanVaceToVideo, + TrimVideoLatent, + WanCameraImageToVideo, + WanPhantomSubjectToVideo, + Wan22ImageToVideoLatent, + ] + +async def comfy_entrypoint() -> WanExtension: + return WanExtension() From ad19a069f68a19566632b9bda3e72f4eed8a22d8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 14 Aug 2025 20:16:01 -0700 Subject: [PATCH 0442/1073] Make SLG nodes work on Qwen Image model. (#9345) --- comfy/ldm/qwen_image/model.py | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index c15ab8e40..99843f88d 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -356,6 +356,7 @@ class QwenImageTransformer2DModel(nn.Module): context, attention_mask=None, guidance: torch.Tensor = None, + transformer_options={}, **kwargs ): timestep = timesteps @@ -383,14 +384,26 @@ class QwenImageTransformer2DModel(nn.Module): else self.time_text_embed(timestep, guidance, hidden_states) ) - for block in self.transformer_blocks: - encoder_hidden_states, hidden_states = block( - hidden_states=hidden_states, - encoder_hidden_states=encoder_hidden_states, - encoder_hidden_states_mask=encoder_hidden_states_mask, - temb=temb, - image_rotary_emb=image_rotary_emb, - ) + patches_replace = transformer_options.get("patches_replace", {}) + blocks_replace = patches_replace.get("dit", {}) + + for i, block in enumerate(self.transformer_blocks): + if ("double_block", i) in blocks_replace: + def block_wrap(args): + out = {} + out["txt"], out["img"] = block(hidden_states=args["img"], encoder_hidden_states=args["txt"], encoder_hidden_states_mask=encoder_hidden_states_mask, temb=args["vec"], image_rotary_emb=args["pe"]) + return out + out = blocks_replace[("double_block", i)]({"img": hidden_states, "txt": encoder_hidden_states, "vec": temb, "pe": image_rotary_emb}, {"original_block": block_wrap}) + hidden_states = out["img"] + encoder_hidden_states = out["txt"] + else: + encoder_hidden_states, hidden_states = block( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + encoder_hidden_states_mask=encoder_hidden_states_mask, + temb=temb, + image_rotary_emb=image_rotary_emb, + ) hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) From f0d5d0111f1f78bc8ce5d1f3968f19e40cd2ce7b Mon Sep 17 00:00:00 2001 From: "Xiangxi Guo (Ryan)" Date: Thu, 14 Aug 2025 20:41:37 -0700 Subject: [PATCH 0443/1073] Avoid torch compile graphbreak for older pytorch versions (#9344) Turns out torch.compile has some gaps in context manager decorator syntax support. I've sent patches to fix that in PyTorch, but it won't be available for all the folks running older versions of PyTorch, hence this trivial patch. --- comfy/ops.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index be312d714..2be35f76a 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -41,9 +41,11 @@ try: SDPA_BACKEND_PRIORITY.insert(0, SDPBackend.CUDNN_ATTENTION) - @sdpa_kernel(backends=SDPA_BACKEND_PRIORITY, set_priority=True) def scaled_dot_product_attention(q, k, v, *args, **kwargs): - return torch.nn.functional.scaled_dot_product_attention(q, k, v, *args, **kwargs) + # Use this (rather than the decorator syntax) to eliminate graph + # break for pytorch < 2.9 + with sdpa_kernel(SDPA_BACKEND_PRIORITY, set_priority=True): + return torch.nn.functional.scaled_dot_product_attention(q, k, v, *args, **kwargs) except (ModuleNotFoundError, TypeError): logging.warning("Could not set sdpa backend priority.") From 4e5c230f6a957962961794c07f02be748076c771 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 14 Aug 2025 20:44:02 -0700 Subject: [PATCH 0444/1073] Fix last commit not working on older pytorch. (#9346) --- comfy/ops.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 2be35f76a..18e7db705 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -32,20 +32,21 @@ def scaled_dot_product_attention(q, k, v, *args, **kwargs): try: if torch.cuda.is_available(): from torch.nn.attention import SDPBackend, sdpa_kernel + import inspect + if "set_priority" in inspect.signature(sdpa_kernel).parameters: + SDPA_BACKEND_PRIORITY = [ + SDPBackend.FLASH_ATTENTION, + SDPBackend.EFFICIENT_ATTENTION, + SDPBackend.MATH, + ] - SDPA_BACKEND_PRIORITY = [ - SDPBackend.FLASH_ATTENTION, - SDPBackend.EFFICIENT_ATTENTION, - SDPBackend.MATH, - ] + SDPA_BACKEND_PRIORITY.insert(0, SDPBackend.CUDNN_ATTENTION) - SDPA_BACKEND_PRIORITY.insert(0, SDPBackend.CUDNN_ATTENTION) - - def scaled_dot_product_attention(q, k, v, *args, **kwargs): - # Use this (rather than the decorator syntax) to eliminate graph - # break for pytorch < 2.9 - with sdpa_kernel(SDPA_BACKEND_PRIORITY, set_priority=True): - return torch.nn.functional.scaled_dot_product_attention(q, k, v, *args, **kwargs) + def scaled_dot_product_attention(q, k, v, *args, **kwargs): + with sdpa_kernel(SDPA_BACKEND_PRIORITY, set_priority=True): + return torch.nn.functional.scaled_dot_product_attention(q, k, v, *args, **kwargs) + else: + logging.warning("Torch version too old to set sdpa backend priority.") except (ModuleNotFoundError, TypeError): logging.warning("Could not set sdpa backend priority.") From e08ecfbd8a9deda8939b14d7f1ff7d7139f1a4ed Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 14 Aug 2025 21:22:26 -0700 Subject: [PATCH 0445/1073] Add warning when using old pytorch. (#9347) --- comfy/rmsnorm.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/rmsnorm.py b/comfy/rmsnorm.py index 66ae8321d..555542a46 100644 --- a/comfy/rmsnorm.py +++ b/comfy/rmsnorm.py @@ -1,6 +1,7 @@ import torch import comfy.model_management import numbers +import logging RMSNorm = None @@ -9,6 +10,7 @@ try: RMSNorm = torch.nn.RMSNorm except: rms_norm_torch = None + logging.warning("Please update pytorch to use native RMSNorm") def rms_norm(x, weight=None, eps=1e-6): From 027c63f63a7f5f380a4df1057c548410b0a87606 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 15 Aug 2025 21:57:47 +0300 Subject: [PATCH 0446/1073] fix(OpenAIGPTImage1): set correct MIME type for multipart uploads to OpenAI edits (#9348) --- comfy_api_nodes/nodes_openai.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index ab3c5363b..cbff2b2d2 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -464,8 +464,6 @@ class OpenAIGPTImage1(ComfyNodeABC): path = "/proxy/openai/images/generations" content_type = "application/json" request_class = OpenAIImageGenerationRequest - img_binaries = [] - mask_binary = None files = [] if image is not None: @@ -484,14 +482,11 @@ class OpenAIGPTImage1(ComfyNodeABC): img_byte_arr = io.BytesIO() img.save(img_byte_arr, format="PNG") img_byte_arr.seek(0) - img_binary = img_byte_arr - img_binary.name = f"image_{i}.png" - img_binaries.append(img_binary) if batch_size == 1: - files.append(("image", img_binary)) + files.append(("image", (f"image_{i}.png", img_byte_arr, "image/png"))) else: - files.append(("image[]", img_binary)) + files.append(("image[]", (f"image_{i}.png", img_byte_arr, "image/png"))) if mask is not None: if image is None: @@ -511,9 +506,7 @@ class OpenAIGPTImage1(ComfyNodeABC): mask_img_byte_arr = io.BytesIO() mask_img.save(mask_img_byte_arr, format="PNG") mask_img_byte_arr.seek(0) - mask_binary = mask_img_byte_arr - mask_binary.name = "mask.png" - files.append(("mask", mask_binary)) + files.append(("mask", ("mask.png", mask_img_byte_arr, "image/png"))) # Build the operation operation = SynchronousOperation( From c308a8840aebf06649364e8e175862250a2d8823 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 15 Aug 2025 12:50:39 -0700 Subject: [PATCH 0447/1073] Add FluxKontextMultiReferenceLatentMethod node. (#9356) This node is only useful if someone trains the kontext model to properly use multiple reference images via the index method. The default is the offset method which feeds the multiple images like if they were stitched together as one. This method works with the current flux kontext model. --- comfy/ldm/flux/model.py | 24 ++++++++++++++++-------- comfy/model_base.py | 4 ++++ comfy_extras/nodes_flux.py | 19 +++++++++++++++++++ 3 files changed, 39 insertions(+), 8 deletions(-) diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index 8f4d99f54..c4de82795 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -224,19 +224,27 @@ class Flux(nn.Module): if ref_latents is not None: h = 0 w = 0 + index = 0 + index_ref_method = kwargs.get("ref_latents_method", "offset") == "index" for ref in ref_latents: - h_offset = 0 - w_offset = 0 - if ref.shape[-2] + h > ref.shape[-1] + w: - w_offset = w + if index_ref_method: + index += 1 + h_offset = 0 + w_offset = 0 else: - h_offset = h + index = 1 + h_offset = 0 + w_offset = 0 + if ref.shape[-2] + h > ref.shape[-1] + w: + w_offset = w + else: + h_offset = h + h = max(h, ref.shape[-2] + h_offset) + w = max(w, ref.shape[-1] + w_offset) - kontext, kontext_ids = self.process_img(ref, index=1, h_offset=h_offset, w_offset=w_offset) + kontext, kontext_ids = self.process_img(ref, index=index, h_offset=h_offset, w_offset=w_offset) img = torch.cat([img, kontext], dim=1) img_ids = torch.cat([img_ids, kontext_ids], dim=1) - h = max(h, ref.shape[-2] + h_offset) - w = max(w, ref.shape[-1] + w_offset) txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype) out = self.forward_orig(img, img_ids, context, txt_ids, timestep, y, guidance, control, transformer_options, attn_mask=kwargs.get("attention_mask", None)) diff --git a/comfy/model_base.py b/comfy/model_base.py index cde61df7c..bf874b875 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -890,6 +890,10 @@ class Flux(BaseModel): for lat in ref_latents: latents.append(self.process_latent_in(lat)) out['ref_latents'] = comfy.conds.CONDList(latents) + + ref_latents_method = kwargs.get("reference_latents_method", None) + if ref_latents_method is not None: + out['ref_latents_method'] = comfy.conds.CONDConstant(ref_latents_method) return out def extra_conds_shapes(self, **kwargs): diff --git a/comfy_extras/nodes_flux.py b/comfy_extras/nodes_flux.py index 8a8a17698..c8db75bb3 100644 --- a/comfy_extras/nodes_flux.py +++ b/comfy_extras/nodes_flux.py @@ -100,9 +100,28 @@ class FluxKontextImageScale: return (image, ) +class FluxKontextMultiReferenceLatentMethod: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "conditioning": ("CONDITIONING", ), + "reference_latents_method": (("offset", "index"), ), + }} + + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "append" + EXPERIMENTAL = True + + CATEGORY = "advanced/conditioning/flux" + + def append(self, conditioning, reference_latents_method): + c = node_helpers.conditioning_set_values(conditioning, {"reference_latents_method": reference_latents_method}) + return (c, ) + NODE_CLASS_MAPPINGS = { "CLIPTextEncodeFlux": CLIPTextEncodeFlux, "FluxGuidance": FluxGuidance, "FluxDisableGuidance": FluxDisableGuidance, "FluxKontextImageScale": FluxKontextImageScale, + "FluxKontextMultiReferenceLatentMethod": FluxKontextMultiReferenceLatentMethod, } From 1702e6df16b0a52e147f19e3d5c5548c25a64339 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 15 Aug 2025 14:29:58 -0700 Subject: [PATCH 0448/1073] Implement wan2.2 camera model. (#9357) Use the old WanCameraImageToVideo node. --- comfy/ldm/wan/model.py | 7 ++++++- comfy/model_detection.py | 5 ++++- comfy/supported_models.py | 14 +++++++++++++- comfy_extras/nodes_wan.py | 7 +++++-- 4 files changed, 28 insertions(+), 5 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 4e2d99566..9d3741be3 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -768,7 +768,12 @@ class CameraWanModel(WanModel): operations=None, ): - super().__init__(model_type='i2v', patch_size=patch_size, text_len=text_len, in_dim=in_dim, dim=dim, ffn_dim=ffn_dim, freq_dim=freq_dim, text_dim=text_dim, out_dim=out_dim, num_heads=num_heads, num_layers=num_layers, window_size=window_size, qk_norm=qk_norm, cross_attn_norm=cross_attn_norm, eps=eps, flf_pos_embed_token_number=flf_pos_embed_token_number, image_model=image_model, device=device, dtype=dtype, operations=operations) + if model_type == 'camera': + model_type = 'i2v' + else: + model_type = 't2v' + + super().__init__(model_type=model_type, patch_size=patch_size, text_len=text_len, in_dim=in_dim, dim=dim, ffn_dim=ffn_dim, freq_dim=freq_dim, text_dim=text_dim, out_dim=out_dim, num_heads=num_heads, num_layers=num_layers, window_size=window_size, qk_norm=qk_norm, cross_attn_norm=cross_attn_norm, eps=eps, flf_pos_embed_token_number=flf_pos_embed_token_number, image_model=image_model, device=device, dtype=dtype, operations=operations) operation_settings = {"operations": operations, "device": device, "dtype": dtype} self.control_adapter = WanCamAdapter(in_dim_control_adapter, dim, kernel_size=patch_size[1:], stride=patch_size[1:], operation_settings=operation_settings) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 8acc51e20..2bec0541e 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -364,7 +364,10 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["vace_in_dim"] = state_dict['{}vace_patch_embedding.weight'.format(key_prefix)].shape[1] dit_config["vace_layers"] = count_blocks(state_dict_keys, '{}vace_blocks.'.format(key_prefix) + '{}.') elif '{}control_adapter.conv.weight'.format(key_prefix) in state_dict_keys: - dit_config["model_type"] = "camera" + if '{}img_emb.proj.0.bias'.format(key_prefix) in state_dict_keys: + dit_config["model_type"] = "camera" + else: + dit_config["model_type"] = "camera_2.2" else: if '{}img_emb.proj.0.bias'.format(key_prefix) in state_dict_keys: dit_config["model_type"] = "i2v" diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 156ff9e26..7ed6dfd69 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1046,6 +1046,18 @@ class WAN21_Camera(WAN21_T2V): def get_model(self, state_dict, prefix="", device=None): out = model_base.WAN21_Camera(self, image_to_video=False, device=device) return out + +class WAN22_Camera(WAN21_T2V): + unet_config = { + "image_model": "wan2.1", + "model_type": "camera_2.2", + "in_dim": 36, + } + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.WAN21_Camera(self, image_to_video=False, device=device) + return out + class WAN21_Vace(WAN21_T2V): unet_config = { "image_model": "wan2.1", @@ -1260,6 +1272,6 @@ class QwenImage(supported_models_base.BASE): return supported_models_base.ClipTarget(comfy.text_encoders.qwen_image.QwenImageTokenizer, comfy.text_encoders.qwen_image.te(**hunyuan_detect)) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep, Omnigen2, QwenImage] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep, Omnigen2, QwenImage] models += [SVD_img2vid] diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 694a183f6..83a990688 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -422,9 +422,12 @@ class WanCameraImageToVideo(io.ComfyNode): start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) concat_latent_image = vae.encode(start_image[:, :, :, :3]) concat_latent[:,:,:concat_latent_image.shape[2]] = concat_latent_image[:,:,:concat_latent.shape[2]] + mask = torch.ones((1, 1, latent.shape[2] * 4, latent.shape[-2], latent.shape[-1])) + mask[:, :, :start_image.shape[0] + 3] = 0.0 + mask = mask.view(1, mask.shape[2] // 4, 4, mask.shape[3], mask.shape[4]).transpose(1, 2) - positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent}) - negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent}) + positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent, "concat_mask": mask}) + negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent, "concat_mask": mask}) if camera_conditions is not None: positive = node_helpers.conditioning_set_values(positive, {'camera_conditions': camera_conditions}) From ed2e33c69a291094c4fcc13d8426c49844a6363c Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Fri, 15 Aug 2025 20:32:58 -0700 Subject: [PATCH 0449/1073] bump frontend version to 1.25.8 (#9361) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 551002b5b..2ae44ebe1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.24.4 +comfyui-frontend-package==1.25.8 comfyui-workflow-templates==0.1.59 comfyui-embedded-docs==0.2.6 torch From 20a84166d0d37dd6833caa6cadf3bfac8c241b48 Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Sat, 16 Aug 2025 02:07:12 -0400 Subject: [PATCH 0450/1073] record audio node (#8716) * record audio node * sf --- comfy_extras/nodes_audio.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index a90b31779..3b23f65d8 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -346,6 +346,24 @@ class LoadAudio: return "Invalid audio file: {}".format(audio) return True +class RecordAudio: + @classmethod + def INPUT_TYPES(s): + return {"required": {"audio": ("AUDIO_RECORD", {})}} + + CATEGORY = "audio" + + RETURN_TYPES = ("AUDIO", ) + FUNCTION = "load" + + def load(self, audio): + audio_path = folder_paths.get_annotated_filepath(audio) + + waveform, sample_rate = torchaudio.load(audio_path) + audio = {"waveform": waveform.unsqueeze(0), "sample_rate": sample_rate} + return (audio, ) + + NODE_CLASS_MAPPINGS = { "EmptyLatentAudio": EmptyLatentAudio, "VAEEncodeAudio": VAEEncodeAudio, @@ -356,6 +374,7 @@ NODE_CLASS_MAPPINGS = { "LoadAudio": LoadAudio, "PreviewAudio": PreviewAudio, "ConditioningStableAudio": ConditioningStableAudio, + "RecordAudio": RecordAudio, } NODE_DISPLAY_NAME_MAPPINGS = { @@ -367,4 +386,5 @@ NODE_DISPLAY_NAME_MAPPINGS = { "SaveAudio": "Save Audio (FLAC)", "SaveAudioMP3": "Save Audio (MP3)", "SaveAudioOpus": "Save Audio (Opus)", + "RecordAudio": "Record Audio", } From 0f2b8525bcafe213e8421a49564a90f926e81f2e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 16 Aug 2025 14:51:28 -0700 Subject: [PATCH 0451/1073] Qwen image model refactor. (#9375) --- comfy/ldm/qwen_image/model.py | 36 +++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index 99843f88d..40d8fd979 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -333,21 +333,25 @@ class QwenImageTransformer2DModel(nn.Module): self.proj_out = operations.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True, dtype=dtype, device=device) self.gradient_checkpointing = False - def pos_embeds(self, x, context): + def process_img(self, x, index=0, h_offset=0, w_offset=0): bs, c, t, h, w = x.shape patch_size = self.patch_size + hidden_states = comfy.ldm.common_dit.pad_to_patch_size(x, (1, self.patch_size, self.patch_size)) + orig_shape = hidden_states.shape + hidden_states = hidden_states.view(orig_shape[0], orig_shape[1], orig_shape[-2] // 2, 2, orig_shape[-1] // 2, 2) + hidden_states = hidden_states.permute(0, 2, 4, 1, 3, 5) + hidden_states = hidden_states.reshape(orig_shape[0], (orig_shape[-2] // 2) * (orig_shape[-1] // 2), orig_shape[1] * 4) h_len = ((h + (patch_size // 2)) // patch_size) w_len = ((w + (patch_size // 2)) // patch_size) - img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) - img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) - img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) - img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs) + h_offset = ((h_offset + (patch_size // 2)) // patch_size) + w_offset = ((w_offset + (patch_size // 2)) // patch_size) - txt_start = round(max(h_len, w_len)) - txt_ids = torch.linspace(txt_start, txt_start + context.shape[1], steps=context.shape[1], device=x.device, dtype=x.dtype).reshape(1, -1, 1).repeat(bs, 1, 3) - ids = torch.cat((txt_ids, img_ids), dim=1) - return self.pe_embedder(ids).squeeze(1).unsqueeze(2).to(x.dtype) + img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) + img_ids[:, :, 0] = img_ids[:, :, 1] + index + img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(h_offset, h_len - 1 + h_offset, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) + img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(w_offset, w_len - 1 + w_offset, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) + return hidden_states, repeat(img_ids, "h w c -> b (h w) c", b=bs), orig_shape def forward( self, @@ -363,13 +367,13 @@ class QwenImageTransformer2DModel(nn.Module): encoder_hidden_states = context encoder_hidden_states_mask = attention_mask - image_rotary_emb = self.pos_embeds(x, context) + hidden_states, img_ids, orig_shape = self.process_img(x) + num_embeds = hidden_states.shape[1] - hidden_states = comfy.ldm.common_dit.pad_to_patch_size(x, (1, self.patch_size, self.patch_size)) - orig_shape = hidden_states.shape - hidden_states = hidden_states.view(orig_shape[0], orig_shape[1], orig_shape[-2] // 2, 2, orig_shape[-1] // 2, 2) - hidden_states = hidden_states.permute(0, 2, 4, 1, 3, 5) - hidden_states = hidden_states.reshape(orig_shape[0], (orig_shape[-2] // 2) * (orig_shape[-1] // 2), orig_shape[1] * 4) + txt_start = round(max(((x.shape[-1] + (self.patch_size // 2)) // self.patch_size), ((x.shape[-2] + (self.patch_size // 2)) // self.patch_size))) + txt_ids = torch.linspace(txt_start, txt_start + context.shape[1], steps=context.shape[1], device=x.device, dtype=x.dtype).reshape(1, -1, 1).repeat(x.shape[0], 1, 3) + ids = torch.cat((txt_ids, img_ids), dim=1) + image_rotary_emb = self.pe_embedder(ids).squeeze(1).unsqueeze(2).to(x.dtype) hidden_states = self.img_in(hidden_states) encoder_hidden_states = self.txt_norm(encoder_hidden_states) @@ -408,6 +412,6 @@ class QwenImageTransformer2DModel(nn.Module): hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) - hidden_states = hidden_states.view(orig_shape[0], orig_shape[-2] // 2, orig_shape[-1] // 2, orig_shape[1], 2, 2) + hidden_states = hidden_states[:, :num_embeds].view(orig_shape[0], orig_shape[-2] // 2, orig_shape[-1] // 2, orig_shape[1], 2, 2) hidden_states = hidden_states.permute(0, 3, 1, 4, 2, 5) return hidden_states.reshape(orig_shape)[:, :, :, :x.shape[-2], :x.shape[-1]] From ed43784b0d04e5b8e8ff2c057fa84b9c5132aaf2 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 17 Aug 2025 13:45:39 -0700 Subject: [PATCH 0452/1073] WIP Qwen edit model: The diffusion model part. (#9383) --- comfy/ldm/qwen_image/model.py | 26 ++++++++++++++++++++++++++ comfy/model_base.py | 10 ++++++++++ 2 files changed, 36 insertions(+) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index 40d8fd979..a3c726299 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -360,6 +360,7 @@ class QwenImageTransformer2DModel(nn.Module): context, attention_mask=None, guidance: torch.Tensor = None, + ref_latents=None, transformer_options={}, **kwargs ): @@ -370,6 +371,31 @@ class QwenImageTransformer2DModel(nn.Module): hidden_states, img_ids, orig_shape = self.process_img(x) num_embeds = hidden_states.shape[1] + if ref_latents is not None: + h = 0 + w = 0 + index = 0 + index_ref_method = kwargs.get("ref_latents_method", "index") == "index" + for ref in ref_latents: + if index_ref_method: + index += 1 + h_offset = 0 + w_offset = 0 + else: + index = 1 + h_offset = 0 + w_offset = 0 + if ref.shape[-2] + h > ref.shape[-1] + w: + w_offset = w + else: + h_offset = h + h = max(h, ref.shape[-2] + h_offset) + w = max(w, ref.shape[-1] + w_offset) + + kontext, kontext_ids, _ = self.process_img(ref, index=index, h_offset=h_offset, w_offset=w_offset) + hidden_states = torch.cat([hidden_states, kontext], dim=1) + img_ids = torch.cat([img_ids, kontext_ids], dim=1) + txt_start = round(max(((x.shape[-1] + (self.patch_size // 2)) // self.patch_size), ((x.shape[-2] + (self.patch_size // 2)) // self.patch_size))) txt_ids = torch.linspace(txt_start, txt_start + context.shape[1], steps=context.shape[1], device=x.device, dtype=x.dtype).reshape(1, -1, 1).repeat(x.shape[0], 1, 3) ids = torch.cat((txt_ids, img_ids), dim=1) diff --git a/comfy/model_base.py b/comfy/model_base.py index bf874b875..15bd7abef 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1331,4 +1331,14 @@ class QwenImage(BaseModel): cross_attn = kwargs.get("cross_attn", None) if cross_attn is not None: out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + ref_latents = kwargs.get("reference_latents", None) + if ref_latents is not None: + latents = [] + for lat in ref_latents: + latents.append(self.process_latent_in(lat)) + out['ref_latents'] = comfy.conds.CONDList(latents) + + ref_latents_method = kwargs.get("reference_latents_method", None) + if ref_latents_method is not None: + out['ref_latents_method'] = comfy.conds.CONDConstant(ref_latents_method) return out From d4e353a94ec5a8cb15ed151990a9518b890e5d4f Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Mon, 18 Aug 2025 05:38:40 +0800 Subject: [PATCH 0453/1073] Update template to 0.1.60 (#9377) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2ae44ebe1..72a700028 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.25.8 -comfyui-workflow-templates==0.1.59 +comfyui-workflow-templates==0.1.60 comfyui-embedded-docs==0.2.6 torch torchsde From 7f3b9b16c6636cb1201213574892d33c2a35e4ba Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Sun, 17 Aug 2025 15:54:07 -0700 Subject: [PATCH 0454/1073] Make step index detection much more robust (#9392) --- comfy/context_windows.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/comfy/context_windows.py b/comfy/context_windows.py index 928b111df..041f380f9 100644 --- a/comfy/context_windows.py +++ b/comfy/context_windows.py @@ -164,8 +164,11 @@ class IndexListContextHandler(ContextHandlerABC): return resized_cond def set_step(self, timestep: torch.Tensor, model_options: dict[str]): - indexes = torch.where(model_options["transformer_options"]["sample_sigmas"] == timestep[0]) - self._step = int(indexes[0]) + mask = torch.isclose(model_options["transformer_options"]["sample_sigmas"], timestep, rtol=0.0001) + matches = torch.nonzero(mask) + if torch.numel(matches) == 0: + raise Exception("No sample_sigmas matched current timestep; something went wrong.") + self._step = int(matches[0].item()) def get_context_windows(self, model: BaseModel, x_in: torch.Tensor, model_options: dict[str]) -> list[IndexListContextWindow]: full_length = x_in.size(self.dim) # TODO: choose dim based on model From da2efeaec6609265051165bfb413a2a4c84cf4bb Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sun, 17 Aug 2025 20:21:02 -0700 Subject: [PATCH 0455/1073] Bump frontend to 1.25.9 (#9394) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 72a700028..c7a5c47ab 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.25.8 +comfyui-frontend-package==1.25.9 comfyui-workflow-templates==0.1.60 comfyui-embedded-docs==0.2.6 torch From bd2ab73976a4e245db3e057795328c89bfd98a88 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 18 Aug 2025 10:26:55 +0300 Subject: [PATCH 0456/1073] fix(WAN-nodes): invalid nodeid for WanTrackToVideo (#9396) --- comfy_extras/nodes_wan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 83a990688..0fff02f76 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -699,7 +699,7 @@ class WanTrackToVideo(io.ComfyNode): @classmethod def define_schema(cls): return io.Schema( - node_id="WanPhantomSubjectToVideo", + node_id="WanTrackToVideo", category="conditioning/video_models", inputs=[ io.Conditioning.Input("positive"), From 4977f203fa8e9e3ab22884c8ace8f9b540d48952 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 18 Aug 2025 19:38:34 -0700 Subject: [PATCH 0457/1073] P2 of qwen edit model. (#9412) * P2 of qwen edit model. * Typo. * Fix normal qwen. * Fix. * Make the TextEncodeQwenImageEdit also set the ref latent. If you don't want it to set the ref latent and want to use the ReferenceLatent node with your custom latent instead just disconnect the VAE. --- comfy/clip_model.py | 2 +- comfy/model_base.py | 8 + comfy/sd1_clip.py | 11 +- comfy/text_encoders/bert.py | 2 +- comfy/text_encoders/llama.py | 43 ++- comfy/text_encoders/qwen_image.py | 20 +- comfy/text_encoders/qwen_vl.py | 428 ++++++++++++++++++++++++++++++ comfy/text_encoders/t5.py | 2 +- comfy_extras/nodes_qwen.py | 63 +++++ nodes.py | 1 + 10 files changed, 565 insertions(+), 15 deletions(-) create mode 100644 comfy/text_encoders/qwen_vl.py create mode 100644 comfy_extras/nodes_qwen.py diff --git a/comfy/clip_model.py b/comfy/clip_model.py index c8294d483..7e47d8a55 100644 --- a/comfy/clip_model.py +++ b/comfy/clip_model.py @@ -97,7 +97,7 @@ class CLIPTextModel_(torch.nn.Module): self.encoder = CLIPEncoder(num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) self.final_layer_norm = operations.LayerNorm(embed_dim, dtype=dtype, device=device) - def forward(self, input_tokens=None, attention_mask=None, embeds=None, num_tokens=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=torch.float32): + def forward(self, input_tokens=None, attention_mask=None, embeds=None, num_tokens=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=torch.float32, embeds_info=[]): if embeds is not None: x = embeds + comfy.ops.cast_to(self.embeddings.position_embedding.weight, dtype=dtype, device=embeds.device) else: diff --git a/comfy/model_base.py b/comfy/model_base.py index 15bd7abef..6c861b15e 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1325,6 +1325,7 @@ class Omnigen2(BaseModel): class QwenImage(BaseModel): def __init__(self, model_config, model_type=ModelType.FLUX, device=None): super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.qwen_image.model.QwenImageTransformer2DModel) + self.memory_usage_factor_conds = ("ref_latents",) def extra_conds(self, **kwargs): out = super().extra_conds(**kwargs) @@ -1342,3 +1343,10 @@ class QwenImage(BaseModel): if ref_latents_method is not None: out['ref_latents_method'] = comfy.conds.CONDConstant(ref_latents_method) return out + + def extra_conds_shapes(self, **kwargs): + out = {} + ref_latents = kwargs.get("reference_latents", None) + if ref_latents is not None: + out['ref_latents'] = list([1, 16, sum(map(lambda a: math.prod(a.size()), ref_latents)) // 16]) + return out diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index ade340fd1..1e8adbe69 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -204,17 +204,19 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): tokens_embed = self.transformer.get_input_embeddings()(tokens_embed, out_dtype=torch.float32) index = 0 pad_extra = 0 + embeds_info = [] for o in other_embeds: emb = o[1] if torch.is_tensor(emb): emb = {"type": "embedding", "data": emb} + extra = None emb_type = emb.get("type", None) if emb_type == "embedding": emb = emb.get("data", None) else: if hasattr(self.transformer, "preprocess_embed"): - emb = self.transformer.preprocess_embed(emb, device=device) + emb, extra = self.transformer.preprocess_embed(emb, device=device) else: emb = None @@ -229,6 +231,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): tokens_embed = torch.cat([tokens_embed[:, :ind], emb, tokens_embed[:, ind:]], dim=1) attention_mask = attention_mask[:ind] + [1] * emb_shape + attention_mask[ind:] index += emb_shape - 1 + embeds_info.append({"type": emb_type, "index": ind, "size": emb_shape, "extra": extra}) else: index += -1 pad_extra += emb_shape @@ -243,11 +246,11 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): attention_masks.append(attention_mask) num_tokens.append(sum(attention_mask)) - return torch.cat(embeds_out), torch.tensor(attention_masks, device=device, dtype=torch.long), num_tokens + return torch.cat(embeds_out), torch.tensor(attention_masks, device=device, dtype=torch.long), num_tokens, embeds_info def forward(self, tokens): device = self.transformer.get_input_embeddings().weight.device - embeds, attention_mask, num_tokens = self.process_tokens(tokens, device) + embeds, attention_mask, num_tokens, embeds_info = self.process_tokens(tokens, device) attention_mask_model = None if self.enable_attention_masks: @@ -258,7 +261,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): else: intermediate_output = self.layer_idx - outputs = self.transformer(None, attention_mask_model, embeds=embeds, num_tokens=num_tokens, intermediate_output=intermediate_output, final_layer_norm_intermediate=self.layer_norm_hidden_state, dtype=torch.float32) + outputs = self.transformer(None, attention_mask_model, embeds=embeds, num_tokens=num_tokens, intermediate_output=intermediate_output, final_layer_norm_intermediate=self.layer_norm_hidden_state, dtype=torch.float32, embeds_info=embeds_info) if self.layer == "last": z = outputs[0].float() diff --git a/comfy/text_encoders/bert.py b/comfy/text_encoders/bert.py index 551b03162..ed4638a9a 100644 --- a/comfy/text_encoders/bert.py +++ b/comfy/text_encoders/bert.py @@ -116,7 +116,7 @@ class BertModel_(torch.nn.Module): self.embeddings = BertEmbeddings(config_dict["vocab_size"], config_dict["max_position_embeddings"], config_dict["type_vocab_size"], config_dict["pad_token_id"], embed_dim, layer_norm_eps, dtype, device, operations) self.encoder = BertEncoder(config_dict["num_hidden_layers"], embed_dim, config_dict["intermediate_size"], config_dict["num_attention_heads"], layer_norm_eps, dtype, device, operations) - def forward(self, input_tokens, attention_mask=None, embeds=None, num_tokens=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None): + def forward(self, input_tokens, attention_mask=None, embeds=None, num_tokens=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None, embeds_info=[]): x = self.embeddings(input_tokens, embeds=embeds, dtype=dtype) mask = None if attention_mask is not None: diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index 1da6a0c94..9d90d5a61 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -2,12 +2,14 @@ import torch import torch.nn as nn from dataclasses import dataclass from typing import Optional, Any +import math from comfy.ldm.modules.attention import optimized_attention_for_device import comfy.model_management import comfy.ldm.common_dit import comfy.model_management +from . import qwen_vl @dataclass class Llama2Config: @@ -100,12 +102,10 @@ def rotate_half(x): return torch.cat((-x2, x1), dim=-1) -def precompute_freqs_cis(head_dim, seq_len, theta, device=None): +def precompute_freqs_cis(head_dim, position_ids, theta, device=None): theta_numerator = torch.arange(0, head_dim, 2, device=device).float() inv_freq = 1.0 / (theta ** (theta_numerator / head_dim)) - position_ids = torch.arange(0, seq_len, device=device).unsqueeze(0) - inv_freq_expanded = inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) @@ -277,7 +277,7 @@ class Llama2_(nn.Module): self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype) # self.lm_head = ops.Linear(config.hidden_size, config.vocab_size, bias=False, device=device, dtype=dtype) - def forward(self, x, attention_mask=None, embeds=None, num_tokens=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None): + def forward(self, x, attention_mask=None, embeds=None, num_tokens=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None, position_ids=None, embeds_info=[]): if embeds is not None: x = embeds else: @@ -286,8 +286,11 @@ class Llama2_(nn.Module): if self.normalize_in: x *= self.config.hidden_size ** 0.5 + if position_ids is None: + position_ids = torch.arange(0, x.shape[1], device=x.device).unsqueeze(0) + freqs_cis = precompute_freqs_cis(self.config.head_dim, - x.shape[1], + position_ids, self.config.rope_theta, device=x.device) @@ -372,8 +375,38 @@ class Qwen25_7BVLI(BaseLlama, torch.nn.Module): self.num_layers = config.num_hidden_layers self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) + self.visual = qwen_vl.Qwen2VLVisionTransformer(hidden_size=1280, output_hidden_size=config.hidden_size, device=device, dtype=dtype, ops=operations) self.dtype = dtype + def preprocess_embed(self, embed, device): + if embed["type"] == "image": + image, grid = qwen_vl.process_qwen2vl_images(embed["data"]) + return self.visual(image.to(device, dtype=torch.float32), grid), grid + return None, None + + def forward(self, x, attention_mask=None, embeds=None, num_tokens=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None, embeds_info=[]): + grid = None + for e in embeds_info: + if e.get("type") == "image": + grid = e.get("extra", None) + position_ids = torch.zeros((3, embeds.shape[1]), device=embeds.device) + start = e.get("index") + position_ids[:, :start] = torch.arange(0, start, device=embeds.device) + end = e.get("size") + start + len_max = int(grid.max()) // 2 + start_next = len_max + start + position_ids[:, end:] = torch.arange(start_next, start_next + (embeds.shape[1] - end), device=embeds.device) + position_ids[0, start:end] = start + max_d = int(grid[0][1]) // 2 + position_ids[1, start:end] = torch.arange(start, start + max_d, device=embeds.device).unsqueeze(1).repeat(1, math.ceil((end - start) / max_d)).flatten(0)[:end - start] + max_d = int(grid[0][2]) // 2 + position_ids[2, start:end] = torch.arange(start, start + max_d, device=embeds.device).unsqueeze(0).repeat(math.ceil((end - start) / max_d), 1).flatten(0)[:end - start] + + if grid is None: + position_ids = None + + return super().forward(x, attention_mask=attention_mask, embeds=embeds, num_tokens=num_tokens, intermediate_output=intermediate_output, final_layer_norm_intermediate=final_layer_norm_intermediate, dtype=dtype, position_ids=position_ids) + class Gemma2_2B(BaseLlama, torch.nn.Module): def __init__(self, config_dict, dtype, device, operations): super().__init__() diff --git a/comfy/text_encoders/qwen_image.py b/comfy/text_encoders/qwen_image.py index ce5c98097..f07318d6c 100644 --- a/comfy/text_encoders/qwen_image.py +++ b/comfy/text_encoders/qwen_image.py @@ -15,13 +15,27 @@ class QwenImageTokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="qwen25_7b", tokenizer=Qwen25_7BVLITokenizer) self.llama_template = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" + self.llama_template_images = "<|im_start|>system\nDescribe the key features of the input image \\(color, shape, size, texture, objects, background\\), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n" - def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None,**kwargs): + def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, images=[], **kwargs): if llama_template is None: - llama_text = self.llama_template.format(text) + if len(images) > 0: + llama_text = self.llama_template_images.format(text) + else: + llama_text = self.llama_template.format(text) else: llama_text = llama_template.format(text) - return super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, **kwargs) + tokens = super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, **kwargs) + key_name = next(iter(tokens)) + embed_count = 0 + qwen_tokens = tokens[key_name] + for r in qwen_tokens: + for i in range(len(r)): + if r[i][0] == 151655: + if len(images) > embed_count: + r[i] = ({"type": "image", "data": images[embed_count], "original_type": "image"},) + r[i][1:] + embed_count += 1 + return tokens class Qwen25_7BVLIModel(sd1_clip.SDClipModel): diff --git a/comfy/text_encoders/qwen_vl.py b/comfy/text_encoders/qwen_vl.py new file mode 100644 index 000000000..3b18ce730 --- /dev/null +++ b/comfy/text_encoders/qwen_vl.py @@ -0,0 +1,428 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Optional, Tuple +import math +from comfy.ldm.modules.attention import optimized_attention_for_device + + +def process_qwen2vl_images( + images: torch.Tensor, + min_pixels: int = 3136, + max_pixels: int = 12845056, + patch_size: int = 14, + temporal_patch_size: int = 2, + merge_size: int = 2, + image_mean: list = None, + image_std: list = None, +): + if image_mean is None: + image_mean = [0.48145466, 0.4578275, 0.40821073] + if image_std is None: + image_std = [0.26862954, 0.26130258, 0.27577711] + + batch_size, height, width, channels = images.shape + device = images.device + # dtype = images.dtype + + images = images.permute(0, 3, 1, 2) + + grid_thw_list = [] + img = images[0] + + factor = patch_size * merge_size + + h_bar = round(height / factor) * factor + w_bar = round(width / factor) * factor + + if h_bar * w_bar > max_pixels: + beta = math.sqrt((height * width) / max_pixels) + h_bar = max(factor, math.floor(height / beta / factor) * factor) + w_bar = max(factor, math.floor(width / beta / factor) * factor) + elif h_bar * w_bar < min_pixels: + beta = math.sqrt(min_pixels / (height * width)) + h_bar = math.ceil(height * beta / factor) * factor + w_bar = math.ceil(width * beta / factor) * factor + + img_resized = F.interpolate( + img.unsqueeze(0), + size=(h_bar, w_bar), + mode='bilinear', + align_corners=False + ).squeeze(0) + + normalized = img_resized.clone() + for c in range(3): + normalized[c] = (img_resized[c] - image_mean[c]) / image_std[c] + + grid_h = h_bar // patch_size + grid_w = w_bar // patch_size + grid_thw = torch.tensor([1, grid_h, grid_w], device=device, dtype=torch.long) + + pixel_values = normalized + grid_thw_list.append(grid_thw) + image_grid_thw = torch.stack(grid_thw_list) + + grid_t = 1 + channel = pixel_values.shape[0] + pixel_values = pixel_values.unsqueeze(0).repeat(2, 1, 1, 1) + + patches = pixel_values.reshape( + grid_t, + temporal_patch_size, + channel, + grid_h // merge_size, + merge_size, + patch_size, + grid_w // merge_size, + merge_size, + patch_size, + ) + + patches = patches.permute(0, 3, 6, 4, 7, 2, 1, 5, 8) + flatten_patches = patches.reshape( + grid_t * grid_h * grid_w, + channel * temporal_patch_size * patch_size * patch_size + ) + + return flatten_patches, image_grid_thw + + +class VisionPatchEmbed(nn.Module): + def __init__( + self, + patch_size: int = 14, + temporal_patch_size: int = 2, + in_channels: int = 3, + embed_dim: int = 3584, + device=None, + dtype=None, + ops=None, + ): + super().__init__() + self.patch_size = patch_size + self.temporal_patch_size = temporal_patch_size + self.in_channels = in_channels + self.embed_dim = embed_dim + + kernel_size = [temporal_patch_size, patch_size, patch_size] + self.proj = ops.Conv3d( + in_channels, + embed_dim, + kernel_size=kernel_size, + stride=kernel_size, + bias=False, + device=device, + dtype=dtype + ) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = hidden_states.view( + -1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size + ) + hidden_states = self.proj(hidden_states) + return hidden_states.view(-1, self.embed_dim) + + +def rotate_half(x): + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb_vision(q, k, cos, sin): + cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float() + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class VisionRotaryEmbedding(nn.Module): + def __init__(self, dim: int, theta: float = 10000.0): + super().__init__() + self.dim = dim + self.theta = theta + + def forward(self, seqlen: int, device) -> torch.Tensor: + inv_freq = 1.0 / (self.theta ** (torch.arange(0, self.dim, 2, dtype=torch.float, device=device) / self.dim)) + seq = torch.arange(seqlen, device=inv_freq.device, dtype=inv_freq.dtype) + freqs = torch.outer(seq, inv_freq) + return freqs + + +class PatchMerger(nn.Module): + def __init__(self, dim: int, context_dim: int, spatial_merge_size: int = 2, device=None, dtype=None, ops=None): + super().__init__() + self.hidden_size = context_dim * (spatial_merge_size ** 2) + self.ln_q = ops.RMSNorm(context_dim, eps=1e-6, device=device, dtype=dtype) + self.mlp = nn.Sequential( + ops.Linear(self.hidden_size, self.hidden_size, device=device, dtype=dtype), + nn.GELU(), + ops.Linear(self.hidden_size, dim, device=device, dtype=dtype), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.ln_q(x).reshape(-1, self.hidden_size) + x = self.mlp(x) + return x + + +class VisionAttention(nn.Module): + def __init__(self, hidden_size: int, num_heads: int, device=None, dtype=None, ops=None): + super().__init__() + self.hidden_size = hidden_size + self.num_heads = num_heads + self.head_dim = hidden_size // num_heads + self.scaling = self.head_dim ** -0.5 + + self.qkv = ops.Linear(hidden_size, hidden_size * 3, bias=True, device=device, dtype=dtype) + self.proj = ops.Linear(hidden_size, hidden_size, bias=True, device=device, dtype=dtype) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + cu_seqlens=None, + optimized_attention=None, + ) -> torch.Tensor: + if hidden_states.dim() == 2: + seq_length, _ = hidden_states.shape + batch_size = 1 + hidden_states = hidden_states.unsqueeze(0) + else: + batch_size, seq_length, _ = hidden_states.shape + + qkv = self.qkv(hidden_states) + qkv = qkv.reshape(batch_size, seq_length, 3, self.num_heads, self.head_dim) + query_states, key_states, value_states = qkv.reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) + + if position_embeddings is not None: + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin) + + query_states = query_states.transpose(0, 1).unsqueeze(0) + key_states = key_states.transpose(0, 1).unsqueeze(0) + value_states = value_states.transpose(0, 1).unsqueeze(0) + + lengths = cu_seqlens[1:] - cu_seqlens[:-1] + splits = [ + torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states) + ] + + attn_outputs = [ + optimized_attention(q, k, v, self.num_heads, skip_reshape=True) + for q, k, v in zip(*splits) + ] + attn_output = torch.cat(attn_outputs, dim=1) + attn_output = attn_output.reshape(seq_length, -1) + attn_output = self.proj(attn_output) + + return attn_output + + +class VisionMLP(nn.Module): + def __init__(self, hidden_size: int, intermediate_size: int, device=None, dtype=None, ops=None): + super().__init__() + self.gate_proj = ops.Linear(hidden_size, intermediate_size, bias=True, device=device, dtype=dtype) + self.up_proj = ops.Linear(hidden_size, intermediate_size, bias=True, device=device, dtype=dtype) + self.down_proj = ops.Linear(intermediate_size, hidden_size, bias=True, device=device, dtype=dtype) + self.act_fn = nn.SiLU() + + def forward(self, hidden_state): + return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state)) + + +class VisionBlock(nn.Module): + def __init__(self, hidden_size: int, intermediate_size: int, num_heads: int, device=None, dtype=None, ops=None): + super().__init__() + self.norm1 = ops.RMSNorm(hidden_size, eps=1e-6, device=device, dtype=dtype) + self.norm2 = ops.RMSNorm(hidden_size, eps=1e-6, device=device, dtype=dtype) + self.attn = VisionAttention(hidden_size, num_heads, device=device, dtype=dtype, ops=ops) + self.mlp = VisionMLP(hidden_size, intermediate_size, device=device, dtype=dtype, ops=ops) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + cu_seqlens=None, + optimized_attention=None, + ) -> torch.Tensor: + residual = hidden_states + hidden_states = self.norm1(hidden_states) + hidden_states = self.attn(hidden_states, position_embeddings, cu_seqlens, optimized_attention) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + return hidden_states + + +class Qwen2VLVisionTransformer(nn.Module): + def __init__( + self, + hidden_size: int = 3584, + output_hidden_size: int = 3584, + intermediate_size: int = 3420, + num_heads: int = 16, + num_layers: int = 32, + patch_size: int = 14, + temporal_patch_size: int = 2, + spatial_merge_size: int = 2, + window_size: int = 112, + device=None, + dtype=None, + ops=None + ): + super().__init__() + self.hidden_size = hidden_size + self.patch_size = patch_size + self.spatial_merge_size = spatial_merge_size + self.window_size = window_size + self.fullatt_block_indexes = [7, 15, 23, 31] + + self.patch_embed = VisionPatchEmbed( + patch_size=patch_size, + temporal_patch_size=temporal_patch_size, + in_channels=3, + embed_dim=hidden_size, + device=device, + dtype=dtype, + ops=ops, + ) + + head_dim = hidden_size // num_heads + self.rotary_pos_emb = VisionRotaryEmbedding(head_dim // 2) + + self.blocks = nn.ModuleList([ + VisionBlock(hidden_size, intermediate_size, num_heads, device, dtype, ops) + for _ in range(num_layers) + ]) + + self.merger = PatchMerger( + dim=output_hidden_size, + context_dim=hidden_size, + spatial_merge_size=spatial_merge_size, + device=device, + dtype=dtype, + ops=ops, + ) + + def get_window_index(self, grid_thw): + window_index = [] + cu_window_seqlens = [0] + window_index_id = 0 + vit_merger_window_size = self.window_size // self.spatial_merge_size // self.patch_size + + for grid_t, grid_h, grid_w in grid_thw: + llm_grid_h = grid_h // self.spatial_merge_size + llm_grid_w = grid_w // self.spatial_merge_size + + index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w) + + pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size + pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size + num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size + num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size + + index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100) + index_padded = index_padded.reshape( + grid_t, + num_windows_h, + vit_merger_window_size, + num_windows_w, + vit_merger_window_size, + ) + index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape( + grid_t, + num_windows_h * num_windows_w, + vit_merger_window_size, + vit_merger_window_size, + ) + + seqlens = (index_padded != -100).sum([2, 3]).reshape(-1) + index_padded = index_padded.reshape(-1) + index_new = index_padded[index_padded != -100] + window_index.append(index_new + window_index_id) + + cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_size * self.spatial_merge_size + cu_window_seqlens[-1] + cu_window_seqlens.extend(cu_seqlens_tmp.tolist()) + window_index_id += (grid_t * llm_grid_h * llm_grid_w).item() + + window_index = torch.cat(window_index, dim=0) + return window_index, cu_window_seqlens + + def get_position_embeddings(self, grid_thw, device): + pos_ids = [] + + for t, h, w in grid_thw: + hpos_ids = torch.arange(h, device=device).unsqueeze(1).expand(-1, w) + hpos_ids = hpos_ids.reshape( + h // self.spatial_merge_size, + self.spatial_merge_size, + w // self.spatial_merge_size, + self.spatial_merge_size, + ) + hpos_ids = hpos_ids.permute(0, 2, 1, 3).flatten() + + wpos_ids = torch.arange(w, device=device).unsqueeze(0).expand(h, -1) + wpos_ids = wpos_ids.reshape( + h // self.spatial_merge_size, + self.spatial_merge_size, + w // self.spatial_merge_size, + self.spatial_merge_size, + ) + wpos_ids = wpos_ids.permute(0, 2, 1, 3).flatten() + + pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) + + pos_ids = torch.cat(pos_ids, dim=0) + max_grid_size = grid_thw[:, 1:].max() + rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size, device) + return rotary_pos_emb_full[pos_ids].flatten(1) + + def forward( + self, + pixel_values: torch.Tensor, + image_grid_thw: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + optimized_attention = optimized_attention_for_device(pixel_values.device, mask=False, small_input=True) + + hidden_states = self.patch_embed(pixel_values) + + window_index, cu_window_seqlens = self.get_window_index(image_grid_thw) + cu_window_seqlens = torch.tensor(cu_window_seqlens, device=hidden_states.device) + cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens) + + position_embeddings = self.get_position_embeddings(image_grid_thw, hidden_states.device) + + seq_len, _ = hidden_states.size() + spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size + + hidden_states = hidden_states.reshape(seq_len // spatial_merge_unit, spatial_merge_unit, -1) + hidden_states = hidden_states[window_index, :, :] + hidden_states = hidden_states.reshape(seq_len, -1) + + position_embeddings = position_embeddings.reshape(seq_len // spatial_merge_unit, spatial_merge_unit, -1) + position_embeddings = position_embeddings[window_index, :, :] + position_embeddings = position_embeddings.reshape(seq_len, -1) + position_embeddings = torch.cat((position_embeddings, position_embeddings), dim=-1) + position_embeddings = (position_embeddings.cos(), position_embeddings.sin()) + + cu_seqlens = torch.repeat_interleave(image_grid_thw[:, 1] * image_grid_thw[:, 2], image_grid_thw[:, 0]).cumsum( + dim=0, + dtype=torch.int32, + ) + cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) + + for i, block in enumerate(self.blocks): + if i in self.fullatt_block_indexes: + cu_seqlens_now = cu_seqlens + else: + cu_seqlens_now = cu_window_seqlens + hidden_states = block(hidden_states, position_embeddings, cu_seqlens_now, optimized_attention=optimized_attention) + + hidden_states = self.merger(hidden_states) + return hidden_states diff --git a/comfy/text_encoders/t5.py b/comfy/text_encoders/t5.py index 36bf35309..e8588992a 100644 --- a/comfy/text_encoders/t5.py +++ b/comfy/text_encoders/t5.py @@ -199,7 +199,7 @@ class T5Stack(torch.nn.Module): self.final_layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device, operations=operations) # self.dropout = nn.Dropout(config.dropout_rate) - def forward(self, x, attention_mask=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None): + def forward(self, x, attention_mask=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None, embeds_info=[]): mask = None if attention_mask is not None: mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]) diff --git a/comfy_extras/nodes_qwen.py b/comfy_extras/nodes_qwen.py new file mode 100644 index 000000000..b5088fae2 --- /dev/null +++ b/comfy_extras/nodes_qwen.py @@ -0,0 +1,63 @@ +import node_helpers +import comfy.utils + +PREFERRED_QWENIMAGE_RESOLUTIONS = [ + (672, 1568), + (688, 1504), + (720, 1456), + (752, 1392), + (800, 1328), + (832, 1248), + (880, 1184), + (944, 1104), + (1024, 1024), + (1104, 944), + (1184, 880), + (1248, 832), + (1328, 800), + (1392, 752), + (1456, 720), + (1504, 688), + (1568, 672), +] + + +class TextEncodeQwenImageEdit: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "clip": ("CLIP", ), + "prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}), + }, + "optional": {"vae": ("VAE", ), + "image": ("IMAGE", ),}} + + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "encode" + + CATEGORY = "advanced/conditioning" + + def encode(self, clip, prompt, vae=None, image=None): + ref_latent = None + if image is None: + images = [] + else: + images = [image] + if vae is not None: + width = image.shape[2] + height = image.shape[1] + aspect_ratio = width / height + _, width, height = min((abs(aspect_ratio - w / h), w, h) for w, h in PREFERRED_QWENIMAGE_RESOLUTIONS) + image = comfy.utils.common_upscale(image.movedim(-1, 1), width, height, "lanczos", "center").movedim(1, -1) + ref_latent = vae.encode(image[:, :, :, :3]) + + tokens = clip.tokenize(prompt, images=images) + conditioning = clip.encode_from_tokens_scheduled(tokens) + if ref_latent is not None: + conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True) + return (conditioning, ) + + +NODE_CLASS_MAPPINGS = { + "TextEncodeQwenImageEdit": TextEncodeQwenImageEdit, +} diff --git a/nodes.py b/nodes.py index 860a236aa..b3fa9c51a 100644 --- a/nodes.py +++ b/nodes.py @@ -2321,6 +2321,7 @@ async def init_builtin_extra_nodes(): "nodes_edit_model.py", "nodes_tcfg.py", "nodes_context_windows.py", + "nodes_qwen.py", ] import_failed = [] From 36b5127fd3eee8eaf95ff7296a61269ed56d53c0 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 19 Aug 2025 23:28:07 +0300 Subject: [PATCH 0458/1073] api_nodes: add kling-v2-1 and v2-1-master (#9257) --- comfy_api_nodes/apis/__init__.py | 3 +++ comfy_api_nodes/nodes_kling.py | 2 ++ 2 files changed, 5 insertions(+) diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index 54298e8a9..c6f91e9d6 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -1315,6 +1315,7 @@ class KlingTaskStatus(str, Enum): class KlingTextToVideoModelName(str, Enum): kling_v1 = 'kling-v1' kling_v1_6 = 'kling-v1-6' + kling_v2_1_master = 'kling-v2-1-master' class KlingVideoGenAspectRatio(str, Enum): @@ -1347,6 +1348,8 @@ class KlingVideoGenModelName(str, Enum): kling_v1_5 = 'kling-v1-5' kling_v1_6 = 'kling-v1-6' kling_v2_master = 'kling-v2-master' + kling_v2_1 = 'kling-v2-1' + kling_v2_1_master = 'kling-v2-1-master' class KlingVideoResult(BaseModel): diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 9d483bb0e..9fa390985 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -421,6 +421,8 @@ class KlingTextToVideoNode(KlingNodeBase): "pro mode / 10s duration / kling-v2-master": ("pro", "10", "kling-v2-master"), "standard mode / 5s duration / kling-v2-master": ("std", "5", "kling-v2-master"), "standard mode / 10s duration / kling-v2-master": ("std", "10", "kling-v2-master"), + "pro mode / 5s duration / kling-v2-1-master": ("pro", "5", "kling-v2-1-master"), + "pro mode / 10s duration / kling-v2-1-master": ("pro", "10", "kling-v2-1-master"), } @classmethod From f16a70ba670e11de549af188663a87c77c5bc0c2 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 19 Aug 2025 23:28:27 +0300 Subject: [PATCH 0459/1073] api_nodes: add MinimaxHailuoVideoNode node (#9262) --- comfy_api_nodes/apis/__init__.py | 13 ++- comfy_api_nodes/nodes_minimax.py | 185 ++++++++++++++++++++++++++++++- 2 files changed, 191 insertions(+), 7 deletions(-) diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index c6f91e9d6..7a09df55b 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -1623,13 +1623,14 @@ class MinimaxTaskResultResponse(BaseModel): task_id: str = Field(..., description='The task ID being queried.') -class Model(str, Enum): +class MiniMaxModel(str, Enum): T2V_01_Director = 'T2V-01-Director' I2V_01_Director = 'I2V-01-Director' S2V_01 = 'S2V-01' I2V_01 = 'I2V-01' I2V_01_live = 'I2V-01-live' T2V_01 = 'T2V-01' + Hailuo_02 = 'MiniMax-Hailuo-02' class SubjectReferenceItem(BaseModel): @@ -1651,7 +1652,7 @@ class MinimaxVideoGenerationRequest(BaseModel): None, description='URL or base64 encoding of the first frame image. Required when model is I2V-01, I2V-01-Director, or I2V-01-live.', ) - model: Model = Field( + model: MiniMaxModel = Field( ..., description='Required. ID of model. Options: T2V-01-Director, I2V-01-Director, S2V-01, I2V-01, I2V-01-live, T2V-01', ) @@ -1668,6 +1669,14 @@ class MinimaxVideoGenerationRequest(BaseModel): None, description='Only available when model is S2V-01. The model will generate a video based on the subject uploaded through this parameter.', ) + duration: Optional[int] = Field( + None, + description="The length of the output video in seconds." + ) + resolution: Optional[str] = Field( + None, + description="The dimensions of the video display. 1080p corresponds to 1920 x 1080 pixels, 768p corresponds to 1366 x 768 pixels." + ) class MinimaxVideoGenerationResponse(BaseModel): diff --git a/comfy_api_nodes/nodes_minimax.py b/comfy_api_nodes/nodes_minimax.py index 58d2ed90c..bb3c9e710 100644 --- a/comfy_api_nodes/nodes_minimax.py +++ b/comfy_api_nodes/nodes_minimax.py @@ -1,3 +1,4 @@ +from inspect import cleandoc from typing import Union import logging import torch @@ -10,7 +11,7 @@ from comfy_api_nodes.apis import ( MinimaxFileRetrieveResponse, MinimaxTaskResultResponse, SubjectReferenceItem, - Model + MiniMaxModel ) from comfy_api_nodes.apis.client import ( ApiEndpoint, @@ -84,7 +85,6 @@ class MinimaxTextToVideoNode: FUNCTION = "generate_video" CATEGORY = "api node/video/MiniMax" API_NODE = True - OUTPUT_NODE = True async def generate_video( self, @@ -121,7 +121,7 @@ class MinimaxTextToVideoNode: response_model=MinimaxVideoGenerationResponse, ), request=MinimaxVideoGenerationRequest( - model=Model(model), + model=MiniMaxModel(model), prompt=prompt_text, callback_url=None, first_frame_image=image_url, @@ -251,7 +251,6 @@ class MinimaxImageToVideoNode(MinimaxTextToVideoNode): FUNCTION = "generate_video" CATEGORY = "api node/video/MiniMax" API_NODE = True - OUTPUT_NODE = True class MinimaxSubjectToVideoNode(MinimaxTextToVideoNode): @@ -313,7 +312,181 @@ class MinimaxSubjectToVideoNode(MinimaxTextToVideoNode): FUNCTION = "generate_video" CATEGORY = "api node/video/MiniMax" API_NODE = True - OUTPUT_NODE = True + + +class MinimaxHailuoVideoNode: + """Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "prompt_text": ( + "STRING", + { + "multiline": True, + "default": "", + "tooltip": "Text prompt to guide the video generation.", + }, + ), + }, + "optional": { + "seed": ( + IO.INT, + { + "default": 0, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "The random seed used for creating the noise.", + }, + ), + "first_frame_image": ( + IO.IMAGE, + { + "tooltip": "Optional image to use as the first frame to generate a video." + }, + ), + "prompt_optimizer": ( + IO.BOOLEAN, + { + "tooltip": "Optimize prompt to improve generation quality when needed.", + "default": True, + }, + ), + "duration": ( + IO.COMBO, + { + "tooltip": "The length of the output video in seconds.", + "default": 6, + "options": [6, 10], + }, + ), + "resolution": ( + IO.COMBO, + { + "tooltip": "The dimensions of the video display. " + "1080p corresponds to 1920 x 1080 pixels, 768p corresponds to 1366 x 768 pixels.", + "default": "768P", + "options": ["768P", "1080P"], + }, + ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + RETURN_TYPES = ("VIDEO",) + DESCRIPTION = cleandoc(__doc__ or "") + FUNCTION = "generate_video" + CATEGORY = "api node/video/MiniMax" + API_NODE = True + + async def generate_video( + self, + prompt_text, + seed=0, + first_frame_image: torch.Tensor=None, # used for ImageToVideo + prompt_optimizer=True, + duration=6, + resolution="768P", + model="MiniMax-Hailuo-02", + unique_id: Union[str, None]=None, + **kwargs, + ): + if first_frame_image is None: + validate_string(prompt_text, field_name="prompt_text") + + if model == "MiniMax-Hailuo-02" and resolution.upper() == "1080P" and duration != 6: + raise Exception( + "When model is MiniMax-Hailuo-02 and resolution is 1080P, duration is limited to 6 seconds." + ) + + # upload image, if passed in + image_url = None + if first_frame_image is not None: + image_url = (await upload_images_to_comfyapi(first_frame_image, max_images=1, auth_kwargs=kwargs))[0] + + video_generate_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/minimax/video_generation", + method=HttpMethod.POST, + request_model=MinimaxVideoGenerationRequest, + response_model=MinimaxVideoGenerationResponse, + ), + request=MinimaxVideoGenerationRequest( + model=MiniMaxModel(model), + prompt=prompt_text, + callback_url=None, + first_frame_image=image_url, + prompt_optimizer=prompt_optimizer, + duration=duration, + resolution=resolution, + ), + auth_kwargs=kwargs, + ) + response = await video_generate_operation.execute() + + task_id = response.task_id + if not task_id: + raise Exception(f"MiniMax generation failed: {response.base_resp}") + + average_duration = 120 if resolution == "768P" else 240 + video_generate_operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path="/proxy/minimax/query/video_generation", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=MinimaxTaskResultResponse, + query_params={"task_id": task_id}, + ), + completed_statuses=["Success"], + failed_statuses=["Fail"], + status_extractor=lambda x: x.status.value, + estimated_duration=average_duration, + node_id=unique_id, + auth_kwargs=kwargs, + ) + task_result = await video_generate_operation.execute() + + file_id = task_result.file_id + if file_id is None: + raise Exception("Request was not successful. Missing file ID.") + file_retrieve_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/minimax/files/retrieve", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=MinimaxFileRetrieveResponse, + query_params={"file_id": int(file_id)}, + ), + request=EmptyRequest(), + auth_kwargs=kwargs, + ) + file_result = await file_retrieve_operation.execute() + + file_url = file_result.file.download_url + if file_url is None: + raise Exception( + f"No video was found in the response. Full response: {file_result.model_dump()}" + ) + logging.info(f"Generated video URL: {file_url}") + if unique_id: + if hasattr(file_result.file, "backup_download_url"): + message = f"Result URL: {file_url}\nBackup URL: {file_result.file.backup_download_url}" + else: + message = f"Result URL: {file_url}" + PromptServer.instance.send_progress_text(message, unique_id) + + video_io = await download_url_to_bytesio(file_url) + if video_io is None: + error_msg = f"Failed to download video from {file_url}" + logging.error(error_msg) + raise Exception(error_msg) + return (VideoFromFile(video_io),) # A dictionary that contains all nodes you want to export with their names @@ -322,6 +495,7 @@ NODE_CLASS_MAPPINGS = { "MinimaxTextToVideoNode": MinimaxTextToVideoNode, "MinimaxImageToVideoNode": MinimaxImageToVideoNode, # "MinimaxSubjectToVideoNode": MinimaxSubjectToVideoNode, + "MinimaxHailuoVideoNode": MinimaxHailuoVideoNode, } # A dictionary that contains the friendly/humanly readable titles for the nodes @@ -329,4 +503,5 @@ NODE_DISPLAY_NAME_MAPPINGS = { "MinimaxTextToVideoNode": "MiniMax Text to Video", "MinimaxImageToVideoNode": "MiniMax Image to Video", "MinimaxSubjectToVideoNode": "MiniMax Subject to Video", + "MinimaxHailuoVideoNode": "MiniMax Hailuo Video", } From 07a927517cfaf099fec3903e8973f758e62d65f9 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 19 Aug 2025 23:29:01 +0300 Subject: [PATCH 0460/1073] api_nodes: add GPT-5 series models (#9325) --- comfy_api_nodes/nodes_openai.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index cbff2b2d2..674c9ede0 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -80,6 +80,9 @@ class SupportedOpenAIModel(str, Enum): gpt_4_1 = "gpt-4.1" gpt_4_1_mini = "gpt-4.1-mini" gpt_4_1_nano = "gpt-4.1-nano" + gpt_5 = "gpt-5" + gpt_5_mini = "gpt-5-mini" + gpt_5_nano = "gpt-5-nano" class OpenAIDalle2(ComfyNodeABC): From d844d8b13bfd6a83b0a7d0491aa2978ac44a6158 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 19 Aug 2025 23:29:24 +0300 Subject: [PATCH 0461/1073] api_nodes: added release version of google's models (#9304) --- comfy_api_nodes/nodes_gemini.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 3751fb2a1..ba4167a50 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -46,6 +46,8 @@ class GeminiModel(str, Enum): gemini_2_5_pro_preview_05_06 = "gemini-2.5-pro-preview-05-06" gemini_2_5_flash_preview_04_17 = "gemini-2.5-flash-preview-04-17" + gemini_2_5_pro = "gemini-2.5-pro" + gemini_2_5_flash = "gemini-2.5-flash" def get_gemini_endpoint( @@ -97,7 +99,7 @@ class GeminiNode(ComfyNodeABC): { "tooltip": "The Gemini model to use for generating responses.", "options": [model.value for model in GeminiModel], - "default": GeminiModel.gemini_2_5_pro_preview_05_06.value, + "default": GeminiModel.gemini_2_5_pro.value, }, ), "seed": ( From 54d8fdbed0a7b171ab8cfb02e29a7e0dc5fe78fd Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 19 Aug 2025 23:30:06 +0300 Subject: [PATCH 0462/1073] feat(api-nodes): add Vidu Video nodes (#9368) --- comfy_api_nodes/nodes_vidu.py | 622 +++++++++++++++++++++++ comfy_api_nodes/util/validation_utils.py | 53 ++ nodes.py | 1 + 3 files changed, 676 insertions(+) create mode 100644 comfy_api_nodes/nodes_vidu.py diff --git a/comfy_api_nodes/nodes_vidu.py b/comfy_api_nodes/nodes_vidu.py new file mode 100644 index 000000000..2f441948c --- /dev/null +++ b/comfy_api_nodes/nodes_vidu.py @@ -0,0 +1,622 @@ +import logging +from enum import Enum +from typing import Any, Callable, Optional, Literal, TypeVar +from typing_extensions import override + +import torch +from pydantic import BaseModel, Field + +from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api_nodes.util.validation_utils import ( + validate_aspect_ratio_closeness, + validate_image_dimensions, + validate_image_aspect_ratio_range, + get_number_of_images, +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, + EmptyRequest, +) +from comfy_api_nodes.apinode_utils import download_url_to_video_output, upload_images_to_comfyapi + + +VIDU_TEXT_TO_VIDEO = "/proxy/vidu/text2video" +VIDU_IMAGE_TO_VIDEO = "/proxy/vidu/img2video" +VIDU_REFERENCE_VIDEO = "/proxy/vidu/reference2video" +VIDU_START_END_VIDEO = "/proxy/vidu/start-end2video" +VIDU_GET_GENERATION_STATUS = "/proxy/vidu/tasks/%s/creations" + +R = TypeVar("R") + +class VideoModelName(str, Enum): + vidu_q1 = 'viduq1' + + +class AspectRatio(str, Enum): + r_16_9 = "16:9" + r_9_16 = "9:16" + r_1_1 = "1:1" + + +class Resolution(str, Enum): + r_1080p = "1080p" + + +class MovementAmplitude(str, Enum): + auto = "auto" + small = "small" + medium = "medium" + large = "large" + + +class TaskCreationRequest(BaseModel): + model: VideoModelName = VideoModelName.vidu_q1 + prompt: Optional[str] = Field(None, max_length=1500) + duration: Optional[Literal[5]] = 5 + seed: Optional[int] = Field(0, ge=0, le=2147483647) + aspect_ratio: Optional[AspectRatio] = AspectRatio.r_16_9 + resolution: Optional[Resolution] = Resolution.r_1080p + movement_amplitude: Optional[MovementAmplitude] = MovementAmplitude.auto + images: Optional[list[str]] = Field(None, description="Base64 encoded string or image URL") + + +class TaskStatus(str, Enum): + created = "created" + queueing = "queueing" + processing = "processing" + success = "success" + failed = "failed" + + +class TaskCreationResponse(BaseModel): + task_id: str = Field(...) + state: TaskStatus = Field(...) + created_at: str = Field(...) + code: Optional[int] = Field(None, description="Error code") + + +class TaskResult(BaseModel): + id: str = Field(..., description="Creation id") + url: str = Field(..., description="The URL of the generated results, valid for one hour") + cover_url: str = Field(..., description="The cover URL of the generated results, valid for one hour") + + +class TaskStatusResponse(BaseModel): + state: TaskStatus = Field(...) + err_code: Optional[str] = Field(None) + creations: list[TaskResult] = Field(..., description="Generated results") + + +async def poll_until_finished( + auth_kwargs: dict[str, str], + api_endpoint: ApiEndpoint[Any, R], + result_url_extractor: Optional[Callable[[R], str]] = None, + estimated_duration: Optional[int] = None, + node_id: Optional[str] = None, +) -> R: + return await PollingOperation( + poll_endpoint=api_endpoint, + completed_statuses=[TaskStatus.success.value], + failed_statuses=[TaskStatus.failed.value], + status_extractor=lambda response: response.state.value, + auth_kwargs=auth_kwargs, + result_url_extractor=result_url_extractor, + estimated_duration=estimated_duration, + node_id=node_id, + poll_interval=16.0, + max_poll_attempts=256, + ).execute() + + +def get_video_url_from_response(response) -> Optional[str]: + if response.creations: + return response.creations[0].url + return None + + +def get_video_from_response(response) -> TaskResult: + if not response.creations: + error_msg = f"Vidu request does not contain results. State: {response.state}, Error Code: {response.err_code}" + logging.info(error_msg) + raise RuntimeError(error_msg) + logging.info("Vidu task %s succeeded. Video URL: %s", response.creations[0].id, response.creations[0].url) + return response.creations[0] + + +async def execute_task( + vidu_endpoint: str, + auth_kwargs: Optional[dict[str, str]], + payload: TaskCreationRequest, + estimated_duration: int, + node_id: str, +) -> R: + response = await SynchronousOperation( + endpoint=ApiEndpoint( + path=vidu_endpoint, + method=HttpMethod.POST, + request_model=TaskCreationRequest, + response_model=TaskCreationResponse, + ), + request=payload, + auth_kwargs=auth_kwargs, + ).execute() + if response.state == TaskStatus.failed: + error_msg = f"Vidu request failed. Code: {response.code}" + logging.error(error_msg) + raise RuntimeError(error_msg) + return await poll_until_finished( + auth_kwargs, + ApiEndpoint( + path=VIDU_GET_GENERATION_STATUS % response.task_id, + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=TaskStatusResponse, + ), + result_url_extractor=get_video_url_from_response, + estimated_duration=estimated_duration, + node_id=node_id, + ) + + +class ViduTextToVideoNode(comfy_io.ComfyNode): + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="ViduTextToVideoNode", + display_name="Vidu Text To Video Generation", + category="api node/video/Vidu", + description="Generate video from text prompt", + inputs=[ + comfy_io.Combo.Input( + "model", + options=[model.value for model in VideoModelName], + default=VideoModelName.vidu_q1.value, + tooltip="Model name", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + tooltip="A textual description for video generation", + ), + comfy_io.Int.Input( + "duration", + default=5, + min=5, + max=5, + step=1, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Duration of the output video in seconds", + optional=True, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed for video generation (0 for random)", + optional=True, + ), + comfy_io.Combo.Input( + "aspect_ratio", + options=[model.value for model in AspectRatio], + default=AspectRatio.r_16_9.value, + tooltip="The aspect ratio of the output video", + optional=True, + ), + comfy_io.Combo.Input( + "resolution", + options=[model.value for model in Resolution], + default=Resolution.r_1080p.value, + tooltip="Supported values may vary by model & duration", + optional=True, + ), + comfy_io.Combo.Input( + "movement_amplitude", + options=[model.value for model in MovementAmplitude], + default=MovementAmplitude.auto.value, + tooltip="The movement amplitude of objects in the frame", + optional=True, + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + prompt: str, + duration: int, + seed: int, + aspect_ratio: str, + resolution: str, + movement_amplitude: str, + ) -> comfy_io.NodeOutput: + if not prompt: + raise ValueError("The prompt field is required and cannot be empty.") + payload = TaskCreationRequest( + model_name=model, + prompt=prompt, + duration=duration, + seed=seed, + aspect_ratio=aspect_ratio, + resolution=resolution, + movement_amplitude=movement_amplitude, + ) + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + results = await execute_task(VIDU_TEXT_TO_VIDEO, auth, payload, 320, cls.hidden.unique_id) + return comfy_io.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) + + +class ViduImageToVideoNode(comfy_io.ComfyNode): + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="ViduImageToVideoNode", + display_name="Vidu Image To Video Generation", + category="api node/video/Vidu", + description="Generate video from image and optional prompt", + inputs=[ + comfy_io.Combo.Input( + "model", + options=[model.value for model in VideoModelName], + default=VideoModelName.vidu_q1.value, + tooltip="Model name", + ), + comfy_io.Image.Input( + "image", + tooltip="An image to be used as the start frame of the generated video", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="A textual description for video generation", + optional=True, + ), + comfy_io.Int.Input( + "duration", + default=5, + min=5, + max=5, + step=1, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Duration of the output video in seconds", + optional=True, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed for video generation (0 for random)", + optional=True, + ), + comfy_io.Combo.Input( + "resolution", + options=[model.value for model in Resolution], + default=Resolution.r_1080p.value, + tooltip="Supported values may vary by model & duration", + optional=True, + ), + comfy_io.Combo.Input( + "movement_amplitude", + options=[model.value for model in MovementAmplitude], + default=MovementAmplitude.auto.value, + tooltip="The movement amplitude of objects in the frame", + optional=True, + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + image: torch.Tensor, + prompt: str, + duration: int, + seed: int, + resolution: str, + movement_amplitude: str, + ) -> comfy_io.NodeOutput: + if get_number_of_images(image) > 1: + raise ValueError("Only one input image is allowed.") + validate_image_aspect_ratio_range(image, (1, 4), (4, 1)) + payload = TaskCreationRequest( + model_name=model, + prompt=prompt, + duration=duration, + seed=seed, + resolution=resolution, + movement_amplitude=movement_amplitude, + ) + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + payload.images = await upload_images_to_comfyapi( + image, + max_images=1, + mime_type="image/png", + auth_kwargs=auth, + ) + results = await execute_task(VIDU_IMAGE_TO_VIDEO, auth, payload, 120, cls.hidden.unique_id) + return comfy_io.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) + + +class ViduReferenceVideoNode(comfy_io.ComfyNode): + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="ViduReferenceVideoNode", + display_name="Vidu Reference To Video Generation", + category="api node/video/Vidu", + description="Generate video from multiple images and prompt", + inputs=[ + comfy_io.Combo.Input( + "model", + options=[model.value for model in VideoModelName], + default=VideoModelName.vidu_q1.value, + tooltip="Model name", + ), + comfy_io.Image.Input( + "images", + tooltip="Images to use as references to generate a video with consistent subjects (max 7 images).", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + tooltip="A textual description for video generation", + ), + comfy_io.Int.Input( + "duration", + default=5, + min=5, + max=5, + step=1, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Duration of the output video in seconds", + optional=True, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed for video generation (0 for random)", + optional=True, + ), + comfy_io.Combo.Input( + "aspect_ratio", + options=[model.value for model in AspectRatio], + default=AspectRatio.r_16_9.value, + tooltip="The aspect ratio of the output video", + optional=True, + ), + comfy_io.Combo.Input( + "resolution", + options=[model.value for model in Resolution], + default=Resolution.r_1080p.value, + tooltip="Supported values may vary by model & duration", + optional=True, + ), + comfy_io.Combo.Input( + "movement_amplitude", + options=[model.value for model in MovementAmplitude], + default=MovementAmplitude.auto.value, + tooltip="The movement amplitude of objects in the frame", + optional=True, + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + images: torch.Tensor, + prompt: str, + duration: int, + seed: int, + aspect_ratio: str, + resolution: str, + movement_amplitude: str, + ) -> comfy_io.NodeOutput: + if not prompt: + raise ValueError("The prompt field is required and cannot be empty.") + a = get_number_of_images(images) + if a > 7: + raise ValueError("Too many images, maximum allowed is 7.") + for image in images: + validate_image_aspect_ratio_range(image, (1, 4), (4, 1)) + validate_image_dimensions(image, min_width=128, min_height=128) + payload = TaskCreationRequest( + model_name=model, + prompt=prompt, + duration=duration, + seed=seed, + aspect_ratio=aspect_ratio, + resolution=resolution, + movement_amplitude=movement_amplitude, + ) + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + payload.images = await upload_images_to_comfyapi( + images, + max_images=7, + mime_type="image/png", + auth_kwargs=auth, + ) + results = await execute_task(VIDU_REFERENCE_VIDEO, auth, payload, 120, cls.hidden.unique_id) + return comfy_io.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) + + +class ViduStartEndToVideoNode(comfy_io.ComfyNode): + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="ViduStartEndToVideoNode", + display_name="Vidu Start End To Video Generation", + category="api node/video/Vidu", + description="Generate a video from start and end frames and a prompt", + inputs=[ + comfy_io.Combo.Input( + "model", + options=[model.value for model in VideoModelName], + default=VideoModelName.vidu_q1.value, + tooltip="Model name", + ), + comfy_io.Image.Input( + "first_frame", + tooltip="Start frame", + ), + comfy_io.Image.Input( + "end_frame", + tooltip="End frame", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + tooltip="A textual description for video generation", + optional=True, + ), + comfy_io.Int.Input( + "duration", + default=5, + min=5, + max=5, + step=1, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Duration of the output video in seconds", + optional=True, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed for video generation (0 for random)", + optional=True, + ), + comfy_io.Combo.Input( + "resolution", + options=[model.value for model in Resolution], + default=Resolution.r_1080p.value, + tooltip="Supported values may vary by model & duration", + optional=True, + ), + comfy_io.Combo.Input( + "movement_amplitude", + options=[model.value for model in MovementAmplitude], + default=MovementAmplitude.auto.value, + tooltip="The movement amplitude of objects in the frame", + optional=True, + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + first_frame: torch.Tensor, + end_frame: torch.Tensor, + prompt: str, + duration: int, + seed: int, + resolution: str, + movement_amplitude: str, + ) -> comfy_io.NodeOutput: + validate_aspect_ratio_closeness(first_frame, end_frame, min_rel=0.8, max_rel=1.25, strict=False) + payload = TaskCreationRequest( + model_name=model, + prompt=prompt, + duration=duration, + seed=seed, + resolution=resolution, + movement_amplitude=movement_amplitude, + ) + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + payload.images = [ + (await upload_images_to_comfyapi(frame, max_images=1, mime_type="image/png", auth_kwargs=auth))[0] + for frame in (first_frame, end_frame) + ] + results = await execute_task(VIDU_START_END_VIDEO, auth, payload, 96, cls.hidden.unique_id) + return comfy_io.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) + + +class ViduExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + ViduTextToVideoNode, + ViduImageToVideoNode, + ViduReferenceVideoNode, + ViduStartEndToVideoNode, + ] + +async def comfy_entrypoint() -> ViduExtension: + return ViduExtension() diff --git a/comfy_api_nodes/util/validation_utils.py b/comfy_api_nodes/util/validation_utils.py index 031b9fbd3..606b794bf 100644 --- a/comfy_api_nodes/util/validation_utils.py +++ b/comfy_api_nodes/util/validation_utils.py @@ -53,6 +53,53 @@ def validate_image_aspect_ratio( ) +def validate_image_aspect_ratio_range( + image: torch.Tensor, + min_ratio: tuple[float, float], # e.g. (1, 4) + max_ratio: tuple[float, float], # e.g. (4, 1) + *, + strict: bool = True, # True -> (min, max); False -> [min, max] +) -> float: + a1, b1 = min_ratio + a2, b2 = max_ratio + if a1 <= 0 or b1 <= 0 or a2 <= 0 or b2 <= 0: + raise ValueError("Ratios must be positive, like (1, 4) or (4, 1).") + lo, hi = (a1 / b1), (a2 / b2) + if lo > hi: + lo, hi = hi, lo + a1, b1, a2, b2 = a2, b2, a1, b1 # swap only for error text + w, h = get_image_dimensions(image) + if w <= 0 or h <= 0: + raise ValueError(f"Invalid image dimensions: {w}x{h}") + ar = w / h + ok = (lo < ar < hi) if strict else (lo <= ar <= hi) + if not ok: + op = "<" if strict else "≤" + raise ValueError(f"Image aspect ratio {ar:.6g} is outside allowed range: {a1}:{b1} {op} ratio {op} {a2}:{b2}") + return ar + + +def validate_aspect_ratio_closeness( + start_img, + end_img, + min_rel: float, + max_rel: float, + *, + strict: bool = False, # True => exclusive, False => inclusive +) -> None: + w1, h1 = get_image_dimensions(start_img) + w2, h2 = get_image_dimensions(end_img) + if min(w1, h1, w2, h2) <= 0: + raise ValueError("Invalid image dimensions") + ar1 = w1 / h1 + ar2 = w2 / h2 + # Normalize so it is symmetric (no need to check both ar1/ar2 and ar2/ar1) + closeness = max(ar1, ar2) / min(ar1, ar2) + limit = max(max_rel, 1.0 / min_rel) # for 0.8..1.25 this is 1.25 + if (closeness >= limit) if strict else (closeness > limit): + raise ValueError(f"Aspect ratios must be close: start/end={ar1/ar2:.4f}, allowed range {min_rel}–{max_rel}.") + + def validate_video_dimensions( video: VideoInput, min_width: Optional[int] = None, @@ -98,3 +145,9 @@ def validate_video_duration( raise ValueError( f"Video duration must be at most {max_duration}s, got {duration}s" ) + + +def get_number_of_images(images): + if isinstance(images, torch.Tensor): + return images.shape[0] if images.ndim >= 4 else 1 + return len(images) diff --git a/nodes.py b/nodes.py index b3fa9c51a..35dda1b19 100644 --- a/nodes.py +++ b/nodes.py @@ -2351,6 +2351,7 @@ async def init_builtin_api_nodes(): "nodes_moonvalley.py", "nodes_rodin.py", "nodes_gemini.py", + "nodes_vidu.py", ] if not await load_custom_node(os.path.join(api_nodes_dir, "canary.py"), module_parent="comfy_api_nodes"): From bddd69618bf4463209c3681babfcbebd9b9aed85 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 19 Aug 2025 13:49:01 -0700 Subject: [PATCH 0463/1073] Change the TextEncodeQwenImageEdit node to use logic closer to reference. (#9432) --- comfy_extras/nodes_qwen.py | 37 +++++++++++-------------------------- 1 file changed, 11 insertions(+), 26 deletions(-) diff --git a/comfy_extras/nodes_qwen.py b/comfy_extras/nodes_qwen.py index b5088fae2..fff89556f 100644 --- a/comfy_extras/nodes_qwen.py +++ b/comfy_extras/nodes_qwen.py @@ -1,25 +1,6 @@ import node_helpers import comfy.utils - -PREFERRED_QWENIMAGE_RESOLUTIONS = [ - (672, 1568), - (688, 1504), - (720, 1456), - (752, 1392), - (800, 1328), - (832, 1248), - (880, 1184), - (944, 1104), - (1024, 1024), - (1104, 944), - (1184, 880), - (1248, 832), - (1328, 800), - (1392, 752), - (1456, 720), - (1504, 688), - (1568, 672), -] +import math class TextEncodeQwenImageEdit: @@ -42,13 +23,17 @@ class TextEncodeQwenImageEdit: if image is None: images = [] else: - images = [image] + samples = image.movedim(-1, 1) + total = int(1024 * 1024) + + scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) + width = round(samples.shape[3] * scale_by) + height = round(samples.shape[2] * scale_by) + + s = comfy.utils.common_upscale(samples, width, height, "area", "disabled") + image = s.movedim(1, -1) + images = [image[:, :, :, :3]] if vae is not None: - width = image.shape[2] - height = image.shape[1] - aspect_ratio = width / height - _, width, height = min((abs(aspect_ratio - w / h), w, h) for w, h in PREFERRED_QWENIMAGE_RESOLUTIONS) - image = comfy.utils.common_upscale(image.movedim(-1, 1), width, height, "lanczos", "center").movedim(1, -1) ref_latent = vae.encode(image[:, :, :, :3]) tokens = clip.tokenize(prompt, images=images) From dfa791eb4bfcaac3eb9b2b33fa15ae5a25589bb8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 19 Aug 2025 17:47:42 -0700 Subject: [PATCH 0464/1073] Rope fix for qwen vl. (#9435) --- comfy/text_encoders/llama.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index 9d90d5a61..4c976058f 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -27,6 +27,7 @@ class Llama2Config: rms_norm_add = False mlp_activation = "silu" qkv_bias = False + rope_dims = None @dataclass class Qwen25_3BConfig: @@ -44,6 +45,7 @@ class Qwen25_3BConfig: rms_norm_add = False mlp_activation = "silu" qkv_bias = True + rope_dims = None @dataclass class Qwen25_7BVLI_Config: @@ -61,6 +63,7 @@ class Qwen25_7BVLI_Config: rms_norm_add = False mlp_activation = "silu" qkv_bias = True + rope_dims = [16, 24, 24] @dataclass class Gemma2_2B_Config: @@ -78,6 +81,7 @@ class Gemma2_2B_Config: rms_norm_add = True mlp_activation = "gelu_pytorch_tanh" qkv_bias = False + rope_dims = None class RMSNorm(nn.Module): def __init__(self, dim: int, eps: float = 1e-5, add=False, device=None, dtype=None): @@ -102,7 +106,7 @@ def rotate_half(x): return torch.cat((-x2, x1), dim=-1) -def precompute_freqs_cis(head_dim, position_ids, theta, device=None): +def precompute_freqs_cis(head_dim, position_ids, theta, rope_dims=None, device=None): theta_numerator = torch.arange(0, head_dim, 2, device=device).float() inv_freq = 1.0 / (theta ** (theta_numerator / head_dim)) @@ -112,12 +116,20 @@ def precompute_freqs_cis(head_dim, position_ids, theta, device=None): emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() + if rope_dims is not None and position_ids.shape[0] > 1: + mrope_section = rope_dims * 2 + cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(0) + sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(0) + else: + cos = cos.unsqueeze(1) + sin = sin.unsqueeze(1) + return (cos, sin) def apply_rope(xq, xk, freqs_cis): - cos = freqs_cis[0].unsqueeze(1) - sin = freqs_cis[1].unsqueeze(1) + cos = freqs_cis[0] + sin = freqs_cis[1] q_embed = (xq * cos) + (rotate_half(xq) * sin) k_embed = (xk * cos) + (rotate_half(xk) * sin) return q_embed, k_embed @@ -292,6 +304,7 @@ class Llama2_(nn.Module): freqs_cis = precompute_freqs_cis(self.config.head_dim, position_ids, self.config.rope_theta, + self.config.rope_dims, device=x.device) mask = None From 7cd2c4bd6ab20f35a6bb1b1f2252c3ea16da4777 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 19 Aug 2025 21:45:27 -0700 Subject: [PATCH 0465/1073] Qwen rotary embeddings should now match reference code. (#9437) --- comfy/ldm/qwen_image/model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index a3c726299..bf3940313 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -349,8 +349,8 @@ class QwenImageTransformer2DModel(nn.Module): img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) img_ids[:, :, 0] = img_ids[:, :, 1] + index - img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(h_offset, h_len - 1 + h_offset, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) - img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(w_offset, w_len - 1 + w_offset, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) + img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(h_offset, h_len - 1 + h_offset, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) - (h_len // 2) + img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(w_offset, w_len - 1 + w_offset, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) - (w_len // 2) return hidden_states, repeat(img_ids, "h w c -> b (h w) c", b=bs), orig_shape def forward( @@ -396,7 +396,7 @@ class QwenImageTransformer2DModel(nn.Module): hidden_states = torch.cat([hidden_states, kontext], dim=1) img_ids = torch.cat([img_ids, kontext_ids], dim=1) - txt_start = round(max(((x.shape[-1] + (self.patch_size // 2)) // self.patch_size), ((x.shape[-2] + (self.patch_size // 2)) // self.patch_size))) + txt_start = round(max(((x.shape[-1] + (self.patch_size // 2)) // self.patch_size) // 2, ((x.shape[-2] + (self.patch_size // 2)) // self.patch_size) // 2)) txt_ids = torch.linspace(txt_start, txt_start + context.shape[1], steps=context.shape[1], device=x.device, dtype=x.dtype).reshape(1, -1, 1).repeat(x.shape[0], 1, 3) ids = torch.cat((txt_ids, img_ids), dim=1) image_rotary_emb = self.pe_embedder(ids).squeeze(1).unsqueeze(2).to(x.dtype) From 5a8f502db5889873ffa13132b603b7b6daac605a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 19 Aug 2025 22:08:11 -0700 Subject: [PATCH 0466/1073] Disable prompt weights for qwen. (#9438) --- comfy/sd1_clip.py | 5 ++++- comfy/text_encoders/qwen_image.py | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 1e8adbe69..f8a7c2a1b 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -534,7 +534,10 @@ class SDTokenizer: min_padding = tokenizer_options.get("{}_min_padding".format(self.embedding_key), self.min_padding) text = escape_important(text) - parsed_weights = token_weights(text, 1.0) + if kwargs.get("disable_weights", False): + parsed_weights = [(text, 1.0)] + else: + parsed_weights = token_weights(text, 1.0) # tokenize words tokens = [] diff --git a/comfy/text_encoders/qwen_image.py b/comfy/text_encoders/qwen_image.py index f07318d6c..6646b1003 100644 --- a/comfy/text_encoders/qwen_image.py +++ b/comfy/text_encoders/qwen_image.py @@ -15,7 +15,7 @@ class QwenImageTokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="qwen25_7b", tokenizer=Qwen25_7BVLITokenizer) self.llama_template = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" - self.llama_template_images = "<|im_start|>system\nDescribe the key features of the input image \\(color, shape, size, texture, objects, background\\), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n" + self.llama_template_images = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n" def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, images=[], **kwargs): if llama_template is None: @@ -25,7 +25,7 @@ class QwenImageTokenizer(sd1_clip.SD1Tokenizer): llama_text = self.llama_template.format(text) else: llama_text = llama_template.format(text) - tokens = super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, **kwargs) + tokens = super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, disable_weights=True, **kwargs) key_name = next(iter(tokens)) embed_count = 0 qwen_tokens = tokens[key_name] From 8d38ea3bbf7e77ed7e7aee401b157dab211c5307 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 19 Aug 2025 23:58:54 -0700 Subject: [PATCH 0467/1073] Fix bf16 precision issue with qwen image embeddings. (#9441) --- comfy/ldm/qwen_image/model.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index bf3940313..49f66b90a 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -347,7 +347,7 @@ class QwenImageTransformer2DModel(nn.Module): h_offset = ((h_offset + (patch_size // 2)) // patch_size) w_offset = ((w_offset + (patch_size // 2)) // patch_size) - img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) + img_ids = torch.zeros((h_len, w_len, 3), device=x.device) img_ids[:, :, 0] = img_ids[:, :, 1] + index img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(h_offset, h_len - 1 + h_offset, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) - (h_len // 2) img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(w_offset, w_len - 1 + w_offset, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) - (w_len // 2) @@ -397,9 +397,10 @@ class QwenImageTransformer2DModel(nn.Module): img_ids = torch.cat([img_ids, kontext_ids], dim=1) txt_start = round(max(((x.shape[-1] + (self.patch_size // 2)) // self.patch_size) // 2, ((x.shape[-2] + (self.patch_size // 2)) // self.patch_size) // 2)) - txt_ids = torch.linspace(txt_start, txt_start + context.shape[1], steps=context.shape[1], device=x.device, dtype=x.dtype).reshape(1, -1, 1).repeat(x.shape[0], 1, 3) + txt_ids = torch.arange(txt_start, txt_start + context.shape[1], device=x.device).reshape(1, -1, 1).repeat(x.shape[0], 1, 3) ids = torch.cat((txt_ids, img_ids), dim=1) image_rotary_emb = self.pe_embedder(ids).squeeze(1).unsqueeze(2).to(x.dtype) + del ids, txt_ids, img_ids hidden_states = self.img_in(hidden_states) encoder_hidden_states = self.txt_norm(encoder_hidden_states) From 2f52e8f05f2039dd67e9b9783b8397350a548b95 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 20 Aug 2025 15:15:09 +0800 Subject: [PATCH 0468/1073] Bump template to 0.1.62 (#9419) * Bump template to 0.1.61 * Bump template to 0.1.62 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c7a5c47ab..8d928d826 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.25.9 -comfyui-workflow-templates==0.1.60 +comfyui-workflow-templates==0.1.62 comfyui-embedded-docs==0.2.6 torch torchsde From 7139d6d93fc7b5481a69b687080bd36f7b531c46 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 20 Aug 2025 03:15:30 -0400 Subject: [PATCH 0469/1073] ComfyUI version 0.3.51 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 29ec07ca6..65f06cf37 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.50" +__version__ = "0.3.51" diff --git a/pyproject.toml b/pyproject.toml index 659b5730a..ecbf04303 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.50" +version = "0.3.51" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From fe01885acf892de636b1b2743903812099bd42e3 Mon Sep 17 00:00:00 2001 From: Harel Cain Date: Wed, 20 Aug 2025 09:33:10 +0200 Subject: [PATCH 0470/1073] LTXV: fix key frame noise mask dimensions for when real noise mask exists (#9425) --- comfy_extras/nodes_lt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_lt.py b/comfy_extras/nodes_lt.py index b5058667a..f82337a67 100644 --- a/comfy_extras/nodes_lt.py +++ b/comfy_extras/nodes_lt.py @@ -166,7 +166,7 @@ class LTXVAddGuide: negative = self.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors) mask = torch.full( - (noise_mask.shape[0], 1, guiding_latent.shape[2], 1, 1), + (noise_mask.shape[0], 1, guiding_latent.shape[2], noise_mask.shape[3], noise_mask.shape[4]), 1.0 - strength, dtype=noise_mask.dtype, device=noise_mask.device, From e73a9dbe30434280c69d852ea78cc4bf88bfd501 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 20 Aug 2025 14:34:13 -0700 Subject: [PATCH 0471/1073] Add that qwen edit model is supported to readme. (#9463) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index fa99a8cbe..79a8a8c79 100644 --- a/README.md +++ b/README.md @@ -71,6 +71,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Omnigen 2](https://comfyanonymous.github.io/ComfyUI_examples/omnigen/) - [Flux Kontext](https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-kontext-image-editing-model) - [HiDream E1.1](https://comfyanonymous.github.io/ComfyUI_examples/hidream/#hidream-e11) + - [Qwen Image Edit](https://comfyanonymous.github.io/ComfyUI_examples/qwen_image/#edit-model) - Video Models - [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/) - [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/) From 0963493a9c3b6565f8537288a0fb90991391ec41 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 20 Aug 2025 19:26:37 -0700 Subject: [PATCH 0472/1073] Support for Qwen Diffsynth Controlnets canny and depth. (#9465) These are not real controlnets but actually a patch on the model so they will be treated as such. Put them in the models/model_patches/ folder. Use the new ModelPatchLoader and QwenImageDiffsynthControlnet nodes. --- comfy/ldm/qwen_image/model.py | 7 + comfy/model_management.py | 8 +- comfy/model_patcher.py | 27 ++++ comfy_api/latest/_io.py | 4 + comfy_extras/nodes_model_patch.py | 138 ++++++++++++++++++++ models/model_patches/put_model_patches_here | 0 nodes.py | 1 + 7 files changed, 184 insertions(+), 1 deletion(-) create mode 100644 comfy_extras/nodes_model_patch.py create mode 100644 models/model_patches/put_model_patches_here diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index 49f66b90a..2503583cb 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -416,6 +416,7 @@ class QwenImageTransformer2DModel(nn.Module): ) patches_replace = transformer_options.get("patches_replace", {}) + patches = transformer_options.get("patches", {}) blocks_replace = patches_replace.get("dit", {}) for i, block in enumerate(self.transformer_blocks): @@ -436,6 +437,12 @@ class QwenImageTransformer2DModel(nn.Module): image_rotary_emb=image_rotary_emb, ) + if "double_block" in patches: + for p in patches["double_block"]: + out = p({"img": hidden_states, "txt": encoder_hidden_states, "x": x, "block_index": i}) + hidden_states = out["img"] + encoder_hidden_states = out["txt"] + hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) diff --git a/comfy/model_management.py b/comfy/model_management.py index 2a9f18068..d08aee1fe 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -593,7 +593,13 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu else: minimum_memory_required = max(inference_memory, minimum_memory_required + extra_reserved_memory()) - models = set(models) + models_temp = set() + for m in models: + models_temp.add(m) + for mm in m.model_patches_models(): + models_temp.add(mm) + + models = models_temp models_to_load = [] diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 52e76b5f3..a944cb421 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -430,6 +430,9 @@ class ModelPatcher: def set_model_forward_timestep_embed_patch(self, patch): self.set_model_patch(patch, "forward_timestep_embed_patch") + def set_model_double_block_patch(self, patch): + self.set_model_patch(patch, "double_block") + def add_object_patch(self, name, obj): self.object_patches[name] = obj @@ -486,6 +489,30 @@ class ModelPatcher: if hasattr(wrap_func, "to"): self.model_options["model_function_wrapper"] = wrap_func.to(device) + def model_patches_models(self): + to = self.model_options["transformer_options"] + models = [] + if "patches" in to: + patches = to["patches"] + for name in patches: + patch_list = patches[name] + for i in range(len(patch_list)): + if hasattr(patch_list[i], "models"): + models += patch_list[i].models() + if "patches_replace" in to: + patches = to["patches_replace"] + for name in patches: + patch_list = patches[name] + for k in patch_list: + if hasattr(patch_list[k], "models"): + models += patch_list[k].models() + if "model_function_wrapper" in self.model_options: + wrap_func = self.model_options["model_function_wrapper"] + if hasattr(wrap_func, "models"): + models += wrap_func.models() + + return models + def model_dtype(self): if hasattr(self.model, "get_dtype"): return self.model.get_dtype() diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index ec1efb51d..a3a21facc 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -726,6 +726,10 @@ class SEGS(ComfyTypeIO): class AnyType(ComfyTypeIO): Type = Any +@comfytype(io_type="MODEL_PATCH") +class MODEL_PATCH(ComfyTypeIO): + Type = Any + @comfytype(io_type="COMFY_MULTITYPED_V3") class MultiType: Type = Any diff --git a/comfy_extras/nodes_model_patch.py b/comfy_extras/nodes_model_patch.py new file mode 100644 index 000000000..bb239bc45 --- /dev/null +++ b/comfy_extras/nodes_model_patch.py @@ -0,0 +1,138 @@ +import torch +import folder_paths +import comfy.utils +import comfy.ops +import comfy.model_management +import comfy.ldm.common_dit +import comfy.latent_formats + + +class BlockWiseControlBlock(torch.nn.Module): + # [linear, gelu, linear] + def __init__(self, dim: int = 3072, device=None, dtype=None, operations=None): + super().__init__() + self.x_rms = operations.RMSNorm(dim, eps=1e-6) + self.y_rms = operations.RMSNorm(dim, eps=1e-6) + self.input_proj = operations.Linear(dim, dim) + self.act = torch.nn.GELU() + self.output_proj = operations.Linear(dim, dim) + + def forward(self, x, y): + x, y = self.x_rms(x), self.y_rms(y) + x = self.input_proj(x + y) + x = self.act(x) + x = self.output_proj(x) + return x + + +class QwenImageBlockWiseControlNet(torch.nn.Module): + def __init__( + self, + num_layers: int = 60, + in_dim: int = 64, + additional_in_dim: int = 0, + dim: int = 3072, + device=None, dtype=None, operations=None + ): + super().__init__() + self.img_in = operations.Linear(in_dim + additional_in_dim, dim, device=device, dtype=dtype) + self.controlnet_blocks = torch.nn.ModuleList( + [ + BlockWiseControlBlock(dim, device=device, dtype=dtype, operations=operations) + for _ in range(num_layers) + ] + ) + + def process_input_latent_image(self, latent_image): + latent_image = comfy.latent_formats.Wan21().process_in(latent_image) + patch_size = 2 + hidden_states = comfy.ldm.common_dit.pad_to_patch_size(latent_image, (1, patch_size, patch_size)) + orig_shape = hidden_states.shape + hidden_states = hidden_states.view(orig_shape[0], orig_shape[1], orig_shape[-2] // 2, 2, orig_shape[-1] // 2, 2) + hidden_states = hidden_states.permute(0, 2, 4, 1, 3, 5) + hidden_states = hidden_states.reshape(orig_shape[0], (orig_shape[-2] // 2) * (orig_shape[-1] // 2), orig_shape[1] * 4) + return self.img_in(hidden_states) + + def control_block(self, img, controlnet_conditioning, block_id): + return self.controlnet_blocks[block_id](img, controlnet_conditioning) + + +class ModelPatchLoader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "name": (folder_paths.get_filename_list("model_patches"), ), + }} + RETURN_TYPES = ("MODEL_PATCH",) + FUNCTION = "load_model_patch" + EXPERIMENTAL = True + + CATEGORY = "advanced/loaders" + + def load_model_patch(self, name): + model_patch_path = folder_paths.get_full_path_or_raise("model_patches", name) + sd = comfy.utils.load_torch_file(model_patch_path, safe_load=True) + dtype = comfy.utils.weight_dtype(sd) + # TODO: this node will work with more types of model patches + model = QwenImageBlockWiseControlNet(device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast) + model.load_state_dict(sd) + model = comfy.model_patcher.ModelPatcher(model, load_device=comfy.model_management.get_torch_device(), offload_device=comfy.model_management.unet_offload_device()) + return (model,) + + +class DiffSynthCnetPatch: + def __init__(self, model_patch, vae, image, strength): + self.encoded_image = model_patch.model.process_input_latent_image(vae.encode(image)) + self.model_patch = model_patch + self.vae = vae + self.image = image + self.strength = strength + + def __call__(self, kwargs): + x = kwargs.get("x") + img = kwargs.get("img") + block_index = kwargs.get("block_index") + if self.encoded_image is None or self.encoded_image.shape[1:] != img.shape[1:]: + spacial_compression = self.vae.spacial_compression_encode() + image_scaled = comfy.utils.common_upscale(self.image.movedim(-1, 1), x.shape[-1] * spacial_compression, x.shape[-2] * spacial_compression, "area", "center") + loaded_models = comfy.model_management.loaded_models(only_currently_used=True) + self.encoded_image = self.model_patch.model.process_input_latent_image(self.vae.encode(image_scaled.movedim(1, -1))) + comfy.model_management.load_models_gpu(loaded_models) + + img = img + (self.model_patch.model.control_block(img, self.encoded_image.to(img.dtype), block_index) * self.strength) + kwargs['img'] = img + return kwargs + + def to(self, device_or_dtype): + if isinstance(device_or_dtype, torch.device): + self.encoded_image = self.encoded_image.to(device_or_dtype) + return self + + def models(self): + return [self.model_patch] + +class QwenImageDiffsynthControlnet: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "model_patch": ("MODEL_PATCH",), + "vae": ("VAE",), + "image": ("IMAGE",), + "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "diffsynth_controlnet" + EXPERIMENTAL = True + + CATEGORY = "advanced/loaders/qwen" + + def diffsynth_controlnet(self, model, model_patch, vae, image, strength): + model_patched = model.clone() + image = image[:, :, :, :3] + model_patched.set_model_double_block_patch(DiffSynthCnetPatch(model_patch, vae, image, strength)) + return (model_patched,) + + +NODE_CLASS_MAPPINGS = { + "ModelPatchLoader": ModelPatchLoader, + "QwenImageDiffsynthControlnet": QwenImageDiffsynthControlnet, +} diff --git a/models/model_patches/put_model_patches_here b/models/model_patches/put_model_patches_here new file mode 100644 index 000000000..e69de29bb diff --git a/nodes.py b/nodes.py index 35dda1b19..9681750d3 100644 --- a/nodes.py +++ b/nodes.py @@ -2322,6 +2322,7 @@ async def init_builtin_extra_nodes(): "nodes_tcfg.py", "nodes_context_windows.py", "nodes_qwen.py", + "nodes_model_patch.py" ] import_failed = [] From 0737b7e0d245de20192064da4888debbef3241c2 Mon Sep 17 00:00:00 2001 From: saurabh-pingale Date: Thu, 21 Aug 2025 07:57:57 +0530 Subject: [PATCH 0473/1073] fix(userdata): catch invalid workflow filenames (#9434) (#9445) --- app/user_manager.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/app/user_manager.py b/app/user_manager.py index 0ec3e46ea..a2d376c0c 100644 --- a/app/user_manager.py +++ b/app/user_manager.py @@ -363,10 +363,17 @@ class UserManager(): if not overwrite and os.path.exists(path): return web.Response(status=409, text="File already exists") - body = await request.read() + try: + body = await request.read() - with open(path, "wb") as f: - f.write(body) + with open(path, "wb") as f: + f.write(body) + except OSError as e: + logging.warning(f"Error saving file '{path}': {e}") + return web.Response( + status=400, + reason="Invalid filename. Please avoid special characters like :\\/*?\"<>|" + ) user_path = self.get_request_user_filepath(request, None) if full_info: From 9fa1036f60b5264302072453be524aa55928bbaf Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 20 Aug 2025 20:09:35 -0700 Subject: [PATCH 0474/1073] Forgot this. (#9470) --- folder_paths.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/folder_paths.py b/folder_paths.py index 9ec952940..b34af39e8 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -46,6 +46,8 @@ folder_names_and_paths["photomaker"] = ([os.path.join(models_dir, "photomaker")] folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""}) +folder_names_and_paths["model_patches"] = ([os.path.join(models_dir, "model_patches")], supported_pt_extensions) + output_directory = os.path.join(base_path, "output") temp_directory = os.path.join(base_path, "temp") input_directory = os.path.join(base_path, "input") From 1b2de2642d38099acdde7c460d133d93e91074f0 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 20 Aug 2025 21:33:49 -0700 Subject: [PATCH 0475/1073] Support diffsynth inpaint controlnet (model patch). (#9471) --- comfy_extras/nodes_model_patch.py | 39 ++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/comfy_extras/nodes_model_patch.py b/comfy_extras/nodes_model_patch.py index bb239bc45..3eaada9bc 100644 --- a/comfy_extras/nodes_model_patch.py +++ b/comfy_extras/nodes_model_patch.py @@ -35,6 +35,7 @@ class QwenImageBlockWiseControlNet(torch.nn.Module): device=None, dtype=None, operations=None ): super().__init__() + self.additional_in_dim = additional_in_dim self.img_in = operations.Linear(in_dim + additional_in_dim, dim, device=device, dtype=dtype) self.controlnet_blocks = torch.nn.ModuleList( [ @@ -44,7 +45,7 @@ class QwenImageBlockWiseControlNet(torch.nn.Module): ) def process_input_latent_image(self, latent_image): - latent_image = comfy.latent_formats.Wan21().process_in(latent_image) + latent_image[:, :16] = comfy.latent_formats.Wan21().process_in(latent_image[:, :16]) patch_size = 2 hidden_states = comfy.ldm.common_dit.pad_to_patch_size(latent_image, (1, patch_size, patch_size)) orig_shape = hidden_states.shape @@ -73,19 +74,33 @@ class ModelPatchLoader: sd = comfy.utils.load_torch_file(model_patch_path, safe_load=True) dtype = comfy.utils.weight_dtype(sd) # TODO: this node will work with more types of model patches - model = QwenImageBlockWiseControlNet(device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast) + additional_in_dim = sd["img_in.weight"].shape[1] - 64 + model = QwenImageBlockWiseControlNet(additional_in_dim=additional_in_dim, device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast) model.load_state_dict(sd) model = comfy.model_patcher.ModelPatcher(model, load_device=comfy.model_management.get_torch_device(), offload_device=comfy.model_management.unet_offload_device()) return (model,) class DiffSynthCnetPatch: - def __init__(self, model_patch, vae, image, strength): - self.encoded_image = model_patch.model.process_input_latent_image(vae.encode(image)) + def __init__(self, model_patch, vae, image, strength, mask=None): self.model_patch = model_patch self.vae = vae self.image = image self.strength = strength + self.mask = mask + self.encoded_image = model_patch.model.process_input_latent_image(self.encode_latent_cond(image)) + + def encode_latent_cond(self, image): + latent_image = self.vae.encode(image) + if self.model_patch.model.additional_in_dim > 0: + if self.mask is None: + mask_ = torch.ones_like(latent_image)[:, :self.model_patch.model.additional_in_dim // 4] + else: + mask_ = comfy.utils.common_upscale(self.mask.mean(dim=1, keepdim=True), latent_image.shape[-1], latent_image.shape[-2], "bilinear", "none") + + return torch.cat([latent_image, mask_], dim=1) + else: + return latent_image def __call__(self, kwargs): x = kwargs.get("x") @@ -95,7 +110,7 @@ class DiffSynthCnetPatch: spacial_compression = self.vae.spacial_compression_encode() image_scaled = comfy.utils.common_upscale(self.image.movedim(-1, 1), x.shape[-1] * spacial_compression, x.shape[-2] * spacial_compression, "area", "center") loaded_models = comfy.model_management.loaded_models(only_currently_used=True) - self.encoded_image = self.model_patch.model.process_input_latent_image(self.vae.encode(image_scaled.movedim(1, -1))) + self.encoded_image = self.model_patch.model.process_input_latent_image(self.encode_latent_cond(image_scaled.movedim(1, -1))) comfy.model_management.load_models_gpu(loaded_models) img = img + (self.model_patch.model.control_block(img, self.encoded_image.to(img.dtype), block_index) * self.strength) @@ -118,17 +133,25 @@ class QwenImageDiffsynthControlnet: "vae": ("VAE",), "image": ("IMAGE",), "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), - }} + }, + "optional": {"mask": ("MASK",)}} RETURN_TYPES = ("MODEL",) FUNCTION = "diffsynth_controlnet" EXPERIMENTAL = True CATEGORY = "advanced/loaders/qwen" - def diffsynth_controlnet(self, model, model_patch, vae, image, strength): + def diffsynth_controlnet(self, model, model_patch, vae, image, strength, mask=None): model_patched = model.clone() image = image[:, :, :, :3] - model_patched.set_model_double_block_patch(DiffSynthCnetPatch(model_patch, vae, image, strength)) + if mask is not None: + if mask.ndim == 3: + mask = mask.unsqueeze(1) + if mask.ndim == 4: + mask = mask.unsqueeze(2) + mask = 1.0 - mask + + model_patched.set_model_double_block_patch(DiffSynthCnetPatch(model_patch, vae, image, strength, mask)) return (model_patched,) From bc49106837b627eb657fc86f2e475770ac5ce68a Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 22 Aug 2025 05:03:57 +0300 Subject: [PATCH 0476/1073] convert String nodes to V3 schema (#9370) --- comfy_extras/nodes_string.py | 449 ++++++++++++++++++----------------- 1 file changed, 237 insertions(+), 212 deletions(-) diff --git a/comfy_extras/nodes_string.py b/comfy_extras/nodes_string.py index b1a8ceef0..571d89f62 100644 --- a/comfy_extras/nodes_string.py +++ b/comfy_extras/nodes_string.py @@ -1,77 +1,91 @@ import re +from typing_extensions import override -from comfy.comfy_types.node_typing import IO +from comfy_api.latest import ComfyExtension, io -class StringConcatenate(): + +class StringConcatenate(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "string_a": (IO.STRING, {"multiline": True}), - "string_b": (IO.STRING, {"multiline": True}), - "delimiter": (IO.STRING, {"multiline": False, "default": ""}) - } - } + def define_schema(cls): + return io.Schema( + node_id="StringConcatenate", + display_name="Concatenate", + category="utils/string", + inputs=[ + io.String.Input("string_a", multiline=True), + io.String.Input("string_b", multiline=True), + io.String.Input("delimiter", multiline=False, default=""), + ], + outputs=[ + io.String.Output(), + ] + ) - RETURN_TYPES = (IO.STRING,) - FUNCTION = "execute" - CATEGORY = "utils/string" - - def execute(self, string_a, string_b, delimiter, **kwargs): - return delimiter.join((string_a, string_b)), - -class StringSubstring(): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "string": (IO.STRING, {"multiline": True}), - "start": (IO.INT, {}), - "end": (IO.INT, {}), - } - } + def execute(cls, string_a, string_b, delimiter): + return io.NodeOutput(delimiter.join((string_a, string_b))) - RETURN_TYPES = (IO.STRING,) - FUNCTION = "execute" - CATEGORY = "utils/string" - def execute(self, string, start, end, **kwargs): - return string[start:end], - -class StringLength(): +class StringSubstring(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "string": (IO.STRING, {"multiline": True}) - } - } + def define_schema(cls): + return io.Schema( + node_id="StringSubstring", + display_name="Substring", + category="utils/string", + inputs=[ + io.String.Input("string", multiline=True), + io.Int.Input("start"), + io.Int.Input("end"), + ], + outputs=[ + io.String.Output(), + ] + ) - RETURN_TYPES = (IO.INT,) - RETURN_NAMES = ("length",) - FUNCTION = "execute" - CATEGORY = "utils/string" - - def execute(self, string, **kwargs): - length = len(string) - - return length, - -class CaseConverter(): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "string": (IO.STRING, {"multiline": True}), - "mode": (IO.COMBO, {"options": ["UPPERCASE", "lowercase", "Capitalize", "Title Case"]}) - } - } + def execute(cls, string, start, end): + return io.NodeOutput(string[start:end]) - RETURN_TYPES = (IO.STRING,) - FUNCTION = "execute" - CATEGORY = "utils/string" - def execute(self, string, mode, **kwargs): +class StringLength(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="StringLength", + display_name="Length", + category="utils/string", + inputs=[ + io.String.Input("string", multiline=True), + ], + outputs=[ + io.Int.Output(display_name="length"), + ] + ) + + @classmethod + def execute(cls, string): + return io.NodeOutput(len(string)) + + +class CaseConverter(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="CaseConverter", + display_name="Case Converter", + category="utils/string", + inputs=[ + io.String.Input("string", multiline=True), + io.Combo.Input("mode", options=["UPPERCASE", "lowercase", "Capitalize", "Title Case"]), + ], + outputs=[ + io.String.Output(), + ] + ) + + @classmethod + def execute(cls, string, mode): if mode == "UPPERCASE": result = string.upper() elif mode == "lowercase": @@ -83,24 +97,27 @@ class CaseConverter(): else: result = string - return result, + return io.NodeOutput(result) -class StringTrim(): +class StringTrim(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "string": (IO.STRING, {"multiline": True}), - "mode": (IO.COMBO, {"options": ["Both", "Left", "Right"]}) - } - } + def define_schema(cls): + return io.Schema( + node_id="StringTrim", + display_name="Trim", + category="utils/string", + inputs=[ + io.String.Input("string", multiline=True), + io.Combo.Input("mode", options=["Both", "Left", "Right"]), + ], + outputs=[ + io.String.Output(), + ] + ) - RETURN_TYPES = (IO.STRING,) - FUNCTION = "execute" - CATEGORY = "utils/string" - - def execute(self, string, mode, **kwargs): + @classmethod + def execute(cls, string, mode): if mode == "Both": result = string.strip() elif mode == "Left": @@ -110,70 +127,78 @@ class StringTrim(): else: result = string - return result, + return io.NodeOutput(result) -class StringReplace(): + +class StringReplace(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "string": (IO.STRING, {"multiline": True}), - "find": (IO.STRING, {"multiline": True}), - "replace": (IO.STRING, {"multiline": True}) - } - } + def define_schema(cls): + return io.Schema( + node_id="StringReplace", + display_name="Replace", + category="utils/string", + inputs=[ + io.String.Input("string", multiline=True), + io.String.Input("find", multiline=True), + io.String.Input("replace", multiline=True), + ], + outputs=[ + io.String.Output(), + ] + ) - RETURN_TYPES = (IO.STRING,) - FUNCTION = "execute" - CATEGORY = "utils/string" - - def execute(self, string, find, replace, **kwargs): - result = string.replace(find, replace) - return result, - - -class StringContains(): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "string": (IO.STRING, {"multiline": True}), - "substring": (IO.STRING, {"multiline": True}), - "case_sensitive": (IO.BOOLEAN, {"default": True}) - } - } + def execute(cls, string, find, replace): + return io.NodeOutput(string.replace(find, replace)) - RETURN_TYPES = (IO.BOOLEAN,) - RETURN_NAMES = ("contains",) - FUNCTION = "execute" - CATEGORY = "utils/string" - def execute(self, string, substring, case_sensitive, **kwargs): +class StringContains(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="StringContains", + display_name="Contains", + category="utils/string", + inputs=[ + io.String.Input("string", multiline=True), + io.String.Input("substring", multiline=True), + io.Boolean.Input("case_sensitive", default=True), + ], + outputs=[ + io.Boolean.Output(display_name="contains"), + ] + ) + + @classmethod + def execute(cls, string, substring, case_sensitive): if case_sensitive: contains = substring in string else: contains = substring.lower() in string.lower() - return contains, + return io.NodeOutput(contains) -class StringCompare(): +class StringCompare(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "string_a": (IO.STRING, {"multiline": True}), - "string_b": (IO.STRING, {"multiline": True}), - "mode": (IO.COMBO, {"options": ["Starts With", "Ends With", "Equal"]}), - "case_sensitive": (IO.BOOLEAN, {"default": True}) - } - } + def define_schema(cls): + return io.Schema( + node_id="StringCompare", + display_name="Compare", + category="utils/string", + inputs=[ + io.String.Input("string_a", multiline=True), + io.String.Input("string_b", multiline=True), + io.Combo.Input("mode", options=["Starts With", "Ends With", "Equal"]), + io.Boolean.Input("case_sensitive", default=True), + ], + outputs=[ + io.Boolean.Output(), + ] + ) - RETURN_TYPES = (IO.BOOLEAN,) - FUNCTION = "execute" - CATEGORY = "utils/string" - - def execute(self, string_a, string_b, mode, case_sensitive, **kwargs): + @classmethod + def execute(cls, string_a, string_b, mode, case_sensitive): if case_sensitive: a = string_a b = string_b @@ -182,31 +207,34 @@ class StringCompare(): b = string_b.lower() if mode == "Equal": - return a == b, + return io.NodeOutput(a == b) elif mode == "Starts With": - return a.startswith(b), + return io.NodeOutput(a.startswith(b)) elif mode == "Ends With": - return a.endswith(b), + return io.NodeOutput(a.endswith(b)) -class RegexMatch(): + +class RegexMatch(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "string": (IO.STRING, {"multiline": True}), - "regex_pattern": (IO.STRING, {"multiline": True}), - "case_insensitive": (IO.BOOLEAN, {"default": True}), - "multiline": (IO.BOOLEAN, {"default": False}), - "dotall": (IO.BOOLEAN, {"default": False}) - } - } + def define_schema(cls): + return io.Schema( + node_id="RegexMatch", + display_name="Regex Match", + category="utils/string", + inputs=[ + io.String.Input("string", multiline=True), + io.String.Input("regex_pattern", multiline=True), + io.Boolean.Input("case_insensitive", default=True), + io.Boolean.Input("multiline", default=False), + io.Boolean.Input("dotall", default=False), + ], + outputs=[ + io.Boolean.Output(display_name="matches"), + ] + ) - RETURN_TYPES = (IO.BOOLEAN,) - RETURN_NAMES = ("matches",) - FUNCTION = "execute" - CATEGORY = "utils/string" - - def execute(self, string, regex_pattern, case_insensitive, multiline, dotall, **kwargs): + @classmethod + def execute(cls, string, regex_pattern, case_insensitive, multiline, dotall): flags = 0 if case_insensitive: @@ -223,29 +251,32 @@ class RegexMatch(): except re.error: result = False - return result, + return io.NodeOutput(result) -class RegexExtract(): +class RegexExtract(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "string": (IO.STRING, {"multiline": True}), - "regex_pattern": (IO.STRING, {"multiline": True}), - "mode": (IO.COMBO, {"options": ["First Match", "All Matches", "First Group", "All Groups"]}), - "case_insensitive": (IO.BOOLEAN, {"default": True}), - "multiline": (IO.BOOLEAN, {"default": False}), - "dotall": (IO.BOOLEAN, {"default": False}), - "group_index": (IO.INT, {"default": 1, "min": 0, "max": 100}) - } - } + def define_schema(cls): + return io.Schema( + node_id="RegexExtract", + display_name="Regex Extract", + category="utils/string", + inputs=[ + io.String.Input("string", multiline=True), + io.String.Input("regex_pattern", multiline=True), + io.Combo.Input("mode", options=["First Match", "All Matches", "First Group", "All Groups"]), + io.Boolean.Input("case_insensitive", default=True), + io.Boolean.Input("multiline", default=False), + io.Boolean.Input("dotall", default=False), + io.Int.Input("group_index", default=1, min=0, max=100), + ], + outputs=[ + io.String.Output(), + ] + ) - RETURN_TYPES = (IO.STRING,) - FUNCTION = "execute" - CATEGORY = "utils/string" - - def execute(self, string, regex_pattern, mode, case_insensitive, multiline, dotall, group_index, **kwargs): + @classmethod + def execute(cls, string, regex_pattern, mode, case_insensitive, multiline, dotall, group_index): join_delimiter = "\n" flags = 0 @@ -294,32 +325,33 @@ class RegexExtract(): except re.error: result = "" - return result, + return io.NodeOutput(result) -class RegexReplace(): - DESCRIPTION = "Find and replace text using regex patterns." +class RegexReplace(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "string": (IO.STRING, {"multiline": True}), - "regex_pattern": (IO.STRING, {"multiline": True}), - "replace": (IO.STRING, {"multiline": True}), - }, - "optional": { - "case_insensitive": (IO.BOOLEAN, {"default": True}), - "multiline": (IO.BOOLEAN, {"default": False}), - "dotall": (IO.BOOLEAN, {"default": False, "tooltip": "When enabled, the dot (.) character will match any character including newline characters. When disabled, dots won't match newlines."}), - "count": (IO.INT, {"default": 0, "min": 0, "max": 100, "tooltip": "Maximum number of replacements to make. Set to 0 to replace all occurrences (default). Set to 1 to replace only the first match, 2 for the first two matches, etc."}), - } - } + def define_schema(cls): + return io.Schema( + node_id="RegexReplace", + display_name="Regex Replace", + category="utils/string", + description="Find and replace text using regex patterns.", + inputs=[ + io.String.Input("string", multiline=True), + io.String.Input("regex_pattern", multiline=True), + io.String.Input("replace", multiline=True), + io.Boolean.Input("case_insensitive", default=True, optional=True), + io.Boolean.Input("multiline", default=False, optional=True), + io.Boolean.Input("dotall", default=False, optional=True, tooltip="When enabled, the dot (.) character will match any character including newline characters. When disabled, dots won't match newlines."), + io.Int.Input("count", default=0, min=0, max=100, optional=True, tooltip="Maximum number of replacements to make. Set to 0 to replace all occurrences (default). Set to 1 to replace only the first match, 2 for the first two matches, etc."), + ], + outputs=[ + io.String.Output(), + ] + ) - RETURN_TYPES = (IO.STRING,) - FUNCTION = "execute" - CATEGORY = "utils/string" - - def execute(self, string, regex_pattern, replace, case_insensitive=True, multiline=False, dotall=False, count=0, **kwargs): + @classmethod + def execute(cls, string, regex_pattern, replace, case_insensitive=True, multiline=False, dotall=False, count=0): flags = 0 if case_insensitive: @@ -329,32 +361,25 @@ class RegexReplace(): if dotall: flags |= re.DOTALL result = re.sub(regex_pattern, replace, string, count=count, flags=flags) - return result, + return io.NodeOutput(result) -NODE_CLASS_MAPPINGS = { - "StringConcatenate": StringConcatenate, - "StringSubstring": StringSubstring, - "StringLength": StringLength, - "CaseConverter": CaseConverter, - "StringTrim": StringTrim, - "StringReplace": StringReplace, - "StringContains": StringContains, - "StringCompare": StringCompare, - "RegexMatch": RegexMatch, - "RegexExtract": RegexExtract, - "RegexReplace": RegexReplace, -} -NODE_DISPLAY_NAME_MAPPINGS = { - "StringConcatenate": "Concatenate", - "StringSubstring": "Substring", - "StringLength": "Length", - "CaseConverter": "Case Converter", - "StringTrim": "Trim", - "StringReplace": "Replace", - "StringContains": "Contains", - "StringCompare": "Compare", - "RegexMatch": "Regex Match", - "RegexExtract": "Regex Extract", - "RegexReplace": "Regex Replace", -} +class StringExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + StringConcatenate, + StringSubstring, + StringLength, + CaseConverter, + StringTrim, + StringReplace, + StringContains, + StringCompare, + RegexMatch, + RegexExtract, + RegexReplace, + ] + +async def comfy_entrypoint() -> StringExtension: + return StringExtension() From bab08f40d10c8737c3424e35bbff873bcb2333bd Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 22 Aug 2025 05:05:36 +0300 Subject: [PATCH 0477/1073] v3 nodes (part a) (#9149) --- comfy_extras/nodes_ace.py | 80 +++++++----- comfy_extras/nodes_advanced_samplers.py | 88 +++++++------ comfy_extras/nodes_apg.py | 72 +++++++---- comfy_extras/nodes_attention_multiply.py | 154 ++++++++++++++--------- 4 files changed, 239 insertions(+), 155 deletions(-) diff --git a/comfy_extras/nodes_ace.py b/comfy_extras/nodes_ace.py index cbfec15a2..1409233c9 100644 --- a/comfy_extras/nodes_ace.py +++ b/comfy_extras/nodes_ace.py @@ -1,49 +1,63 @@ import torch +from typing_extensions import override + import comfy.model_management import node_helpers +from comfy_api.latest import ComfyExtension, io -class TextEncodeAceStepAudio: + +class TextEncodeAceStepAudio(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "clip": ("CLIP", ), - "tags": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "lyrics": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "lyrics_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - }} - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" + def define_schema(cls): + return io.Schema( + node_id="TextEncodeAceStepAudio", + category="conditioning", + inputs=[ + io.Clip.Input("clip"), + io.String.Input("tags", multiline=True, dynamic_prompts=True), + io.String.Input("lyrics", multiline=True, dynamic_prompts=True), + io.Float.Input("lyrics_strength", default=1.0, min=0.0, max=10.0, step=0.01), + ], + outputs=[io.Conditioning.Output()], + ) - CATEGORY = "conditioning" - - def encode(self, clip, tags, lyrics, lyrics_strength): + @classmethod + def execute(cls, clip, tags, lyrics, lyrics_strength) -> io.NodeOutput: tokens = clip.tokenize(tags, lyrics=lyrics) conditioning = clip.encode_from_tokens_scheduled(tokens) conditioning = node_helpers.conditioning_set_values(conditioning, {"lyrics_strength": lyrics_strength}) - return (conditioning, ) + return io.NodeOutput(conditioning) -class EmptyAceStepLatentAudio: - def __init__(self): - self.device = comfy.model_management.intermediate_device() +class EmptyAceStepLatentAudio(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="EmptyAceStepLatentAudio", + category="latent/audio", + inputs=[ + io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1), + io.Int.Input( + "batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch." + ), + ], + outputs=[io.Latent.Output()], + ) @classmethod - def INPUT_TYPES(s): - return {"required": {"seconds": ("FLOAT", {"default": 120.0, "min": 1.0, "max": 1000.0, "step": 0.1}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}), - }} - RETURN_TYPES = ("LATENT",) - FUNCTION = "generate" - - CATEGORY = "latent/audio" - - def generate(self, seconds, batch_size): + def execute(cls, seconds, batch_size) -> io.NodeOutput: length = int(seconds * 44100 / 512 / 8) - latent = torch.zeros([batch_size, 8, 16, length], device=self.device) - return ({"samples": latent, "type": "audio"}, ) + latent = torch.zeros([batch_size, 8, 16, length], device=comfy.model_management.intermediate_device()) + return io.NodeOutput({"samples": latent, "type": "audio"}) -NODE_CLASS_MAPPINGS = { - "TextEncodeAceStepAudio": TextEncodeAceStepAudio, - "EmptyAceStepLatentAudio": EmptyAceStepLatentAudio, -} +class AceExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + TextEncodeAceStepAudio, + EmptyAceStepLatentAudio, + ] + +async def comfy_entrypoint() -> AceExtension: + return AceExtension() diff --git a/comfy_extras/nodes_advanced_samplers.py b/comfy_extras/nodes_advanced_samplers.py index 5fbb096fb..5532ffe6a 100644 --- a/comfy_extras/nodes_advanced_samplers.py +++ b/comfy_extras/nodes_advanced_samplers.py @@ -1,8 +1,13 @@ +import numpy as np +import torch +from tqdm.auto import trange +from typing_extensions import override + +import comfy.model_patcher import comfy.samplers import comfy.utils -import torch -import numpy as np -from tqdm.auto import trange +from comfy.k_diffusion.sampling import to_d +from comfy_api.latest import ComfyExtension, io @torch.no_grad() @@ -33,30 +38,29 @@ def sample_lcm_upscale(model, x, sigmas, extra_args=None, callback=None, disable return x -class SamplerLCMUpscale: - upscale_methods = ["bislerp", "nearest-exact", "bilinear", "area", "bicubic"] +class SamplerLCMUpscale(io.ComfyNode): + UPSCALE_METHODS = ["bislerp", "nearest-exact", "bilinear", "area", "bicubic"] @classmethod - def INPUT_TYPES(s): - return {"required": - {"scale_ratio": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 20.0, "step": 0.01}), - "scale_steps": ("INT", {"default": -1, "min": -1, "max": 1000, "step": 1}), - "upscale_method": (s.upscale_methods,), - } - } - RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling/samplers" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="SamplerLCMUpscale", + category="sampling/custom_sampling/samplers", + inputs=[ + io.Float.Input("scale_ratio", default=1.0, min=0.1, max=20.0, step=0.01), + io.Int.Input("scale_steps", default=-1, min=-1, max=1000, step=1), + io.Combo.Input("upscale_method", options=cls.UPSCALE_METHODS), + ], + outputs=[io.Sampler.Output()], + ) - FUNCTION = "get_sampler" - - def get_sampler(self, scale_ratio, scale_steps, upscale_method): + @classmethod + def execute(cls, scale_ratio, scale_steps, upscale_method) -> io.NodeOutput: if scale_steps < 0: scale_steps = None sampler = comfy.samplers.KSAMPLER(sample_lcm_upscale, extra_options={"total_upscale": scale_ratio, "upscale_steps": scale_steps, "upscale_method": upscale_method}) - return (sampler, ) + return io.NodeOutput(sampler) -from comfy.k_diffusion.sampling import to_d -import comfy.model_patcher @torch.no_grad() def sample_euler_pp(model, x, sigmas, extra_args=None, callback=None, disable=None): @@ -82,30 +86,36 @@ def sample_euler_pp(model, x, sigmas, extra_args=None, callback=None, disable=No return x -class SamplerEulerCFGpp: +class SamplerEulerCFGpp(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"version": (["regular", "alternative"],),} - } - RETURN_TYPES = ("SAMPLER",) - # CATEGORY = "sampling/custom_sampling/samplers" - CATEGORY = "_for_testing" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="SamplerEulerCFGpp", + display_name="SamplerEulerCFG++", + category="_for_testing", # "sampling/custom_sampling/samplers" + inputs=[ + io.Combo.Input("version", options=["regular", "alternative"]), + ], + outputs=[io.Sampler.Output()], + is_experimental=True, + ) - FUNCTION = "get_sampler" - - def get_sampler(self, version): + @classmethod + def execute(cls, version) -> io.NodeOutput: if version == "alternative": sampler = comfy.samplers.KSAMPLER(sample_euler_pp) else: sampler = comfy.samplers.ksampler("euler_cfg_pp") - return (sampler, ) + return io.NodeOutput(sampler) -NODE_CLASS_MAPPINGS = { - "SamplerLCMUpscale": SamplerLCMUpscale, - "SamplerEulerCFGpp": SamplerEulerCFGpp, -} -NODE_DISPLAY_NAME_MAPPINGS = { - "SamplerEulerCFGpp": "SamplerEulerCFG++", -} +class AdvancedSamplersExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + SamplerLCMUpscale, + SamplerEulerCFGpp, + ] + +async def comfy_entrypoint() -> AdvancedSamplersExtension: + return AdvancedSamplersExtension() diff --git a/comfy_extras/nodes_apg.py b/comfy_extras/nodes_apg.py index 25b21b1b8..f27ae7da8 100644 --- a/comfy_extras/nodes_apg.py +++ b/comfy_extras/nodes_apg.py @@ -1,4 +1,8 @@ import torch +from typing_extensions import override + +from comfy_api.latest import ComfyExtension, io + def project(v0, v1): v1 = torch.nn.functional.normalize(v1, dim=[-1, -2, -3]) @@ -6,22 +10,45 @@ def project(v0, v1): v0_orthogonal = v0 - v0_parallel return v0_parallel, v0_orthogonal -class APG: +class APG(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "model": ("MODEL",), - "eta": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01, "tooltip": "Controls the scale of the parallel guidance vector. Default CFG behavior at a setting of 1."}), - "norm_threshold": ("FLOAT", {"default": 5.0, "min": 0.0, "max": 50.0, "step": 0.1, "tooltip": "Normalize guidance vector to this value, normalization disable at a setting of 0."}), - "momentum": ("FLOAT", {"default": 0.0, "min": -5.0, "max": 1.0, "step": 0.01, "tooltip":"Controls a running average of guidance during diffusion, disabled at a setting of 0."}), - } - } - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" - CATEGORY = "sampling/custom_sampling" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="APG", + display_name="Adaptive Projected Guidance", + category="sampling/custom_sampling", + inputs=[ + io.Model.Input("model"), + io.Float.Input( + "eta", + default=1.0, + min=-10.0, + max=10.0, + step=0.01, + tooltip="Controls the scale of the parallel guidance vector. Default CFG behavior at a setting of 1.", + ), + io.Float.Input( + "norm_threshold", + default=5.0, + min=0.0, + max=50.0, + step=0.1, + tooltip="Normalize guidance vector to this value, normalization disable at a setting of 0.", + ), + io.Float.Input( + "momentum", + default=0.0, + min=-5.0, + max=1.0, + step=0.01, + tooltip="Controls a running average of guidance during diffusion, disabled at a setting of 0.", + ), + ], + outputs=[io.Model.Output()], + ) - def patch(self, model, eta, norm_threshold, momentum): + @classmethod + def execute(cls, model, eta, norm_threshold, momentum) -> io.NodeOutput: running_avg = 0 prev_sigma = None @@ -65,12 +92,15 @@ class APG: m = model.clone() m.set_model_sampler_pre_cfg_function(pre_cfg_function) - return (m,) + return io.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "APG": APG, -} -NODE_DISPLAY_NAME_MAPPINGS = { - "APG": "Adaptive Projected Guidance", -} +class ApgExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + APG, + ] + +async def comfy_entrypoint() -> ApgExtension: + return ApgExtension() diff --git a/comfy_extras/nodes_attention_multiply.py b/comfy_extras/nodes_attention_multiply.py index 4747eb395..c0e494c2a 100644 --- a/comfy_extras/nodes_attention_multiply.py +++ b/comfy_extras/nodes_attention_multiply.py @@ -1,3 +1,7 @@ +from typing_extensions import override + +from comfy_api.latest import ComfyExtension, io + def attention_multiply(attn, model, q, k, v, out): m = model.clone() @@ -16,57 +20,71 @@ def attention_multiply(attn, model, q, k, v, out): return m -class UNetSelfAttentionMultiply: +class UNetSelfAttentionMultiply(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "q": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "k": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "v": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "out": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="UNetSelfAttentionMultiply", + category="_for_testing/attention_experiments", + inputs=[ + io.Model.Input("model"), + io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("k", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("v", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("out", default=1.0, min=0.0, max=10.0, step=0.01), + ], + outputs=[io.Model.Output()], + is_experimental=True, + ) - CATEGORY = "_for_testing/attention_experiments" - - def patch(self, model, q, k, v, out): + @classmethod + def execute(cls, model, q, k, v, out) -> io.NodeOutput: m = attention_multiply("attn1", model, q, k, v, out) - return (m, ) + return io.NodeOutput(m) -class UNetCrossAttentionMultiply: + +class UNetCrossAttentionMultiply(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "q": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "k": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "v": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "out": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="UNetCrossAttentionMultiply", + category="_for_testing/attention_experiments", + inputs=[ + io.Model.Input("model"), + io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("k", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("v", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("out", default=1.0, min=0.0, max=10.0, step=0.01), + ], + outputs=[io.Model.Output()], + is_experimental=True, + ) - CATEGORY = "_for_testing/attention_experiments" - - def patch(self, model, q, k, v, out): + @classmethod + def execute(cls, model, q, k, v, out) -> io.NodeOutput: m = attention_multiply("attn2", model, q, k, v, out) - return (m, ) + return io.NodeOutput(m) -class CLIPAttentionMultiply: + +class CLIPAttentionMultiply(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "clip": ("CLIP",), - "q": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "k": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "v": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "out": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - }} - RETURN_TYPES = ("CLIP",) - FUNCTION = "patch" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="CLIPAttentionMultiply", + category="_for_testing/attention_experiments", + inputs=[ + io.Clip.Input("clip"), + io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("k", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("v", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("out", default=1.0, min=0.0, max=10.0, step=0.01), + ], + outputs=[io.Clip.Output()], + is_experimental=True, + ) - CATEGORY = "_for_testing/attention_experiments" - - def patch(self, clip, q, k, v, out): + @classmethod + def execute(cls, clip, q, k, v, out) -> io.NodeOutput: m = clip.clone() sd = m.patcher.model_state_dict() @@ -79,23 +97,28 @@ class CLIPAttentionMultiply: m.add_patches({key: (None,)}, 0.0, v) if key.endswith("self_attn.out_proj.weight") or key.endswith("self_attn.out_proj.bias"): m.add_patches({key: (None,)}, 0.0, out) - return (m, ) + return io.NodeOutput(m) -class UNetTemporalAttentionMultiply: + +class UNetTemporalAttentionMultiply(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "self_structural": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "self_temporal": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "cross_structural": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "cross_temporal": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="UNetTemporalAttentionMultiply", + category="_for_testing/attention_experiments", + inputs=[ + io.Model.Input("model"), + io.Float.Input("self_structural", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("self_temporal", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("cross_structural", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("cross_temporal", default=1.0, min=0.0, max=10.0, step=0.01), + ], + outputs=[io.Model.Output()], + is_experimental=True, + ) - CATEGORY = "_for_testing/attention_experiments" - - def patch(self, model, self_structural, self_temporal, cross_structural, cross_temporal): + @classmethod + def execute(cls, model, self_structural, self_temporal, cross_structural, cross_temporal) -> io.NodeOutput: m = model.clone() sd = model.model_state_dict() @@ -110,11 +133,18 @@ class UNetTemporalAttentionMultiply: m.add_patches({k: (None,)}, 0.0, cross_temporal) else: m.add_patches({k: (None,)}, 0.0, cross_structural) - return (m, ) + return io.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "UNetSelfAttentionMultiply": UNetSelfAttentionMultiply, - "UNetCrossAttentionMultiply": UNetCrossAttentionMultiply, - "CLIPAttentionMultiply": CLIPAttentionMultiply, - "UNetTemporalAttentionMultiply": UNetTemporalAttentionMultiply, -} + +class AttentionMultiplyExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + UNetSelfAttentionMultiply, + UNetCrossAttentionMultiply, + CLIPAttentionMultiply, + UNetTemporalAttentionMultiply, + ] + +async def comfy_entrypoint() -> AttentionMultiplyExtension: + return AttentionMultiplyExtension() From eb39019daae96128ee848d0b7837ede299518a7c Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 22 Aug 2025 05:06:13 +0300 Subject: [PATCH 0478/1073] [V3] convert Google Veo API node to the V3 schema (#9272) * convert Google Veo API node to the V3 schema * use own full io.Schema for Veo3VideoGenerationNode * fixed typo * use auth_kwargs instead of auth_token/comfy_api_key --- comfy_api_nodes/nodes_veo2.py | 332 +++++++++++++++++++--------------- 1 file changed, 190 insertions(+), 142 deletions(-) diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index e25dab2f5..251aecd42 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -1,17 +1,18 @@ -import io import logging import base64 import aiohttp import torch +from io import BytesIO from typing import Optional +from typing_extensions import override -from comfy.comfy_types.node_typing import IO, ComfyNodeABC +from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.input_impl.video_types import VideoFromFile from comfy_api_nodes.apis import ( VeoGenVidRequest, VeoGenVidResponse, VeoGenVidPollRequest, - VeoGenVidPollResponse + VeoGenVidPollResponse, ) from comfy_api_nodes.apis.client import ( ApiEndpoint, @@ -22,7 +23,7 @@ from comfy_api_nodes.apis.client import ( from comfy_api_nodes.apinode_utils import ( downscale_image_tensor, - tensor_to_base64_string + tensor_to_base64_string, ) AVERAGE_DURATION_VIDEO_GEN = 32 @@ -50,7 +51,7 @@ def get_video_url_from_response(poll_response: VeoGenVidPollResponse) -> Optiona return None -class VeoVideoGenerationNode(ComfyNodeABC): +class VeoVideoGenerationNode(comfy_io.ComfyNode): """ Generates videos from text prompts using Google's Veo API. @@ -59,101 +60,93 @@ class VeoVideoGenerationNode(ComfyNodeABC): """ @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Text description of the video", - }, + def define_schema(cls): + return comfy_io.Schema( + node_id="VeoVideoGenerationNode", + display_name="Google Veo 2 Video Generation", + category="api node/video/Veo", + description="Generates videos from text prompts using Google's Veo 2 API", + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Text description of the video", ), - "aspect_ratio": ( - IO.COMBO, - { - "options": ["16:9", "9:16"], - "default": "16:9", - "tooltip": "Aspect ratio of the output video", - }, + comfy_io.Combo.Input( + "aspect_ratio", + options=["16:9", "9:16"], + default="16:9", + tooltip="Aspect ratio of the output video", ), - }, - "optional": { - "negative_prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Negative text prompt to guide what to avoid in the video", - }, + comfy_io.String.Input( + "negative_prompt", + multiline=True, + default="", + tooltip="Negative text prompt to guide what to avoid in the video", + optional=True, ), - "duration_seconds": ( - IO.INT, - { - "default": 5, - "min": 5, - "max": 8, - "step": 1, - "display": "number", - "tooltip": "Duration of the output video in seconds", - }, + comfy_io.Int.Input( + "duration_seconds", + default=5, + min=5, + max=8, + step=1, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Duration of the output video in seconds", + optional=True, ), - "enhance_prompt": ( - IO.BOOLEAN, - { - "default": True, - "tooltip": "Whether to enhance the prompt with AI assistance", - } + comfy_io.Boolean.Input( + "enhance_prompt", + default=True, + tooltip="Whether to enhance the prompt with AI assistance", + optional=True, ), - "person_generation": ( - IO.COMBO, - { - "options": ["ALLOW", "BLOCK"], - "default": "ALLOW", - "tooltip": "Whether to allow generating people in the video", - }, + comfy_io.Combo.Input( + "person_generation", + options=["ALLOW", "BLOCK"], + default="ALLOW", + tooltip="Whether to allow generating people in the video", + optional=True, ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFF, - "step": 1, - "display": "number", - "control_after_generate": True, - "tooltip": "Seed for video generation (0 for random)", - }, + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFF, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed for video generation (0 for random)", + optional=True, ), - "image": (IO.IMAGE, { - "default": None, - "tooltip": "Optional reference image to guide video generation", - }), - "model": ( - IO.COMBO, - { - "options": ["veo-2.0-generate-001"], - "default": "veo-2.0-generate-001", - "tooltip": "Veo 2 model to use for video generation", - }, + comfy_io.Image.Input( + "image", + tooltip="Optional reference image to guide video generation", + optional=True, ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + comfy_io.Combo.Input( + "model", + options=["veo-2.0-generate-001"], + default="veo-2.0-generate-001", + tooltip="Veo 2 model to use for video generation", + optional=True, + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - RETURN_TYPES = (IO.VIDEO,) - FUNCTION = "generate_video" - CATEGORY = "api node/video/Veo" - DESCRIPTION = "Generates videos from text prompts using Google's Veo 2 API" - API_NODE = True - - async def generate_video( - self, + @classmethod + async def execute( + cls, prompt, aspect_ratio="16:9", negative_prompt="", @@ -164,8 +157,6 @@ class VeoVideoGenerationNode(ComfyNodeABC): image=None, model="veo-2.0-generate-001", generate_audio=False, - unique_id: Optional[str] = None, - **kwargs, ): # Prepare the instances for the request instances = [] @@ -202,6 +193,10 @@ class VeoVideoGenerationNode(ComfyNodeABC): if "veo-3.0" in model: parameters["generateAudio"] = generate_audio + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } # Initial request to start video generation initial_operation = SynchronousOperation( endpoint=ApiEndpoint( @@ -214,7 +209,7 @@ class VeoVideoGenerationNode(ComfyNodeABC): instances=instances, parameters=parameters ), - auth_kwargs=kwargs, + auth_kwargs=auth, ) initial_response = await initial_operation.execute() @@ -248,10 +243,10 @@ class VeoVideoGenerationNode(ComfyNodeABC): request=VeoGenVidPollRequest( operationName=operation_name ), - auth_kwargs=kwargs, + auth_kwargs=auth, poll_interval=5.0, result_url_extractor=get_video_url_from_response, - node_id=unique_id, + node_id=cls.hidden.unique_id, estimated_duration=AVERAGE_DURATION_VIDEO_GEN, ) @@ -304,10 +299,10 @@ class VeoVideoGenerationNode(ComfyNodeABC): logging.info("Video generation completed successfully") # Convert video data to BytesIO object - video_io = io.BytesIO(video_data) + video_io = BytesIO(video_data) # Return VideoFromFile object - return (VideoFromFile(video_io),) + return comfy_io.NodeOutput(VideoFromFile(video_io)) class Veo3VideoGenerationNode(VeoVideoGenerationNode): @@ -323,51 +318,104 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode): """ @classmethod - def INPUT_TYPES(s): - parent_input = super().INPUT_TYPES() - - # Update model options for Veo 3 - parent_input["optional"]["model"] = ( - IO.COMBO, - { - "options": ["veo-3.0-generate-001", "veo-3.0-fast-generate-001"], - "default": "veo-3.0-generate-001", - "tooltip": "Veo 3 model to use for video generation", - }, + def define_schema(cls): + return comfy_io.Schema( + node_id="Veo3VideoGenerationNode", + display_name="Google Veo 3 Video Generation", + category="api node/video/Veo", + description="Generates videos from text prompts using Google's Veo 3 API", + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Text description of the video", + ), + comfy_io.Combo.Input( + "aspect_ratio", + options=["16:9", "9:16"], + default="16:9", + tooltip="Aspect ratio of the output video", + ), + comfy_io.String.Input( + "negative_prompt", + multiline=True, + default="", + tooltip="Negative text prompt to guide what to avoid in the video", + optional=True, + ), + comfy_io.Int.Input( + "duration_seconds", + default=8, + min=8, + max=8, + step=1, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Duration of the output video in seconds (Veo 3 only supports 8 seconds)", + optional=True, + ), + comfy_io.Boolean.Input( + "enhance_prompt", + default=True, + tooltip="Whether to enhance the prompt with AI assistance", + optional=True, + ), + comfy_io.Combo.Input( + "person_generation", + options=["ALLOW", "BLOCK"], + default="ALLOW", + tooltip="Whether to allow generating people in the video", + optional=True, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFF, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed for video generation (0 for random)", + optional=True, + ), + comfy_io.Image.Input( + "image", + tooltip="Optional reference image to guide video generation", + optional=True, + ), + comfy_io.Combo.Input( + "model", + options=["veo-3.0-generate-001", "veo-3.0-fast-generate-001"], + default="veo-3.0-generate-001", + tooltip="Veo 3 model to use for video generation", + optional=True, + ), + comfy_io.Boolean.Input( + "generate_audio", + default=False, + tooltip="Generate audio for the video. Supported by all Veo 3 models.", + optional=True, + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, ) - # Add generateAudio parameter - parent_input["optional"]["generate_audio"] = ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Generate audio for the video. Supported by all Veo 3 models.", - } - ) - # Update duration constraints for Veo 3 (only 8 seconds supported) - parent_input["optional"]["duration_seconds"] = ( - IO.INT, - { - "default": 8, - "min": 8, - "max": 8, - "step": 1, - "display": "number", - "tooltip": "Duration of the output video in seconds (Veo 3 only supports 8 seconds)", - }, - ) +class VeoExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + VeoVideoGenerationNode, + Veo3VideoGenerationNode, + ] - return parent_input - - -# Register the nodes -NODE_CLASS_MAPPINGS = { - "VeoVideoGenerationNode": VeoVideoGenerationNode, - "Veo3VideoGenerationNode": Veo3VideoGenerationNode, -} - -NODE_DISPLAY_NAME_MAPPINGS = { - "VeoVideoGenerationNode": "Google Veo 2 Video Generation", - "Veo3VideoGenerationNode": "Google Veo 3 Video Generation", -} +async def comfy_entrypoint() -> VeoExtension: + return VeoExtension() From 7ed73d12d13c2c389e0469c46c2db635a7d74278 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 22 Aug 2025 05:06:51 +0300 Subject: [PATCH 0479/1073] [V3] convert Ideogram API nodes to the V3 schema (#9278) * convert Ideogram API nodes to the V3 schema * use auth_kwargs instead of auth_token/comfy_api_key --- comfy_api_nodes/nodes_ideogram.py | 536 ++++++++++++++---------------- 1 file changed, 257 insertions(+), 279 deletions(-) diff --git a/comfy_api_nodes/nodes_ideogram.py b/comfy_api_nodes/nodes_ideogram.py index db24e6da4..d28895f3e 100644 --- a/comfy_api_nodes/nodes_ideogram.py +++ b/comfy_api_nodes/nodes_ideogram.py @@ -1,8 +1,8 @@ -from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict -from inspect import cleandoc +from io import BytesIO +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io as comfy_io from PIL import Image import numpy as np -import io import torch from comfy_api_nodes.apis import ( IdeogramGenerateRequest, @@ -246,90 +246,81 @@ def display_image_urls_on_node(image_urls, node_id): PromptServer.instance.send_progress_text(urls_text, node_id) -class IdeogramV1(ComfyNodeABC): - """ - Generates images using the Ideogram V1 model. - """ - - def __init__(self): - pass +class IdeogramV1(comfy_io.ComfyNode): @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation", - }, + def define_schema(cls): + return comfy_io.Schema( + node_id="IdeogramV1", + display_name="Ideogram V1", + category="api node/image/Ideogram", + description="Generates images using the Ideogram V1 model.", + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation", ), - "turbo": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Whether to use turbo mode (faster generation, potentially lower quality)", - } + comfy_io.Boolean.Input( + "turbo", + default=False, + tooltip="Whether to use turbo mode (faster generation, potentially lower quality)", ), - }, - "optional": { - "aspect_ratio": ( - IO.COMBO, - { - "options": list(V1_V2_RATIO_MAP.keys()), - "default": "1:1", - "tooltip": "The aspect ratio for image generation.", - }, + comfy_io.Combo.Input( + "aspect_ratio", + options=list(V1_V2_RATIO_MAP.keys()), + default="1:1", + tooltip="The aspect ratio for image generation.", + optional=True, ), - "magic_prompt_option": ( - IO.COMBO, - { - "options": ["AUTO", "ON", "OFF"], - "default": "AUTO", - "tooltip": "Determine if MagicPrompt should be used in generation", - }, + comfy_io.Combo.Input( + "magic_prompt_option", + options=["AUTO", "ON", "OFF"], + default="AUTO", + tooltip="Determine if MagicPrompt should be used in generation", + optional=True, ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2147483647, - "step": 1, - "control_after_generate": True, - "display": "number", - }, + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + control_after_generate=True, + display_mode=comfy_io.NumberDisplay.number, + optional=True, ), - "negative_prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Description of what to exclude from the image", - }, + comfy_io.String.Input( + "negative_prompt", + multiline=True, + default="", + tooltip="Description of what to exclude from the image", + optional=True, ), - "num_images": ( - IO.INT, - {"default": 1, "min": 1, "max": 8, "step": 1, "display": "number"}, + comfy_io.Int.Input( + "num_images", + default=1, + min=1, + max=8, + step=1, + display_mode=comfy_io.NumberDisplay.number, + optional=True, ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + ) - RETURN_TYPES = (IO.IMAGE,) - FUNCTION = "api_call" - CATEGORY = "api node/image/Ideogram" - DESCRIPTION = cleandoc(__doc__ or "") - API_NODE = True - - async def api_call( - self, + @classmethod + async def execute( + cls, prompt, turbo=False, aspect_ratio="1:1", @@ -337,13 +328,15 @@ class IdeogramV1(ComfyNodeABC): seed=0, negative_prompt="", num_images=1, - unique_id=None, - **kwargs, ): # Determine the model based on turbo setting aspect_ratio = V1_V2_RATIO_MAP.get(aspect_ratio, None) model = "V_1_TURBO" if turbo else "V_1" + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/ideogram/generate", @@ -364,7 +357,7 @@ class IdeogramV1(ComfyNodeABC): negative_prompt=negative_prompt if negative_prompt else None, ) ), - auth_kwargs=kwargs, + auth_kwargs=auth, ) response = await operation.execute() @@ -377,93 +370,85 @@ class IdeogramV1(ComfyNodeABC): if not image_urls: raise Exception("No image URLs were generated in the response") - display_image_urls_on_node(image_urls, unique_id) - return (await download_and_process_images(image_urls),) + display_image_urls_on_node(image_urls, cls.hidden.unique_id) + return comfy_io.NodeOutput(await download_and_process_images(image_urls)) -class IdeogramV2(ComfyNodeABC): - """ - Generates images using the Ideogram V2 model. - """ - - def __init__(self): - pass +class IdeogramV2(comfy_io.ComfyNode): @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation", - }, + def define_schema(cls): + return comfy_io.Schema( + node_id="IdeogramV2", + display_name="Ideogram V2", + category="api node/image/Ideogram", + description="Generates images using the Ideogram V2 model.", + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation", ), - "turbo": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Whether to use turbo mode (faster generation, potentially lower quality)", - } + comfy_io.Boolean.Input( + "turbo", + default=False, + tooltip="Whether to use turbo mode (faster generation, potentially lower quality)", ), - }, - "optional": { - "aspect_ratio": ( - IO.COMBO, - { - "options": list(V1_V2_RATIO_MAP.keys()), - "default": "1:1", - "tooltip": "The aspect ratio for image generation. Ignored if resolution is not set to AUTO.", - }, + comfy_io.Combo.Input( + "aspect_ratio", + options=list(V1_V2_RATIO_MAP.keys()), + default="1:1", + tooltip="The aspect ratio for image generation. Ignored if resolution is not set to AUTO.", + optional=True, ), - "resolution": ( - IO.COMBO, - { - "options": list(V1_V1_RES_MAP.keys()), - "default": "Auto", - "tooltip": "The resolution for image generation. If not set to AUTO, this overrides the aspect_ratio setting.", - }, + comfy_io.Combo.Input( + "resolution", + options=list(V1_V1_RES_MAP.keys()), + default="Auto", + tooltip="The resolution for image generation. " + "If not set to AUTO, this overrides the aspect_ratio setting.", + optional=True, ), - "magic_prompt_option": ( - IO.COMBO, - { - "options": ["AUTO", "ON", "OFF"], - "default": "AUTO", - "tooltip": "Determine if MagicPrompt should be used in generation", - }, + comfy_io.Combo.Input( + "magic_prompt_option", + options=["AUTO", "ON", "OFF"], + default="AUTO", + tooltip="Determine if MagicPrompt should be used in generation", + optional=True, ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2147483647, - "step": 1, - "control_after_generate": True, - "display": "number", - }, + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + control_after_generate=True, + display_mode=comfy_io.NumberDisplay.number, + optional=True, ), - "style_type": ( - IO.COMBO, - { - "options": ["AUTO", "GENERAL", "REALISTIC", "DESIGN", "RENDER_3D", "ANIME"], - "default": "NONE", - "tooltip": "Style type for generation (V2 only)", - }, + comfy_io.Combo.Input( + "style_type", + options=["AUTO", "GENERAL", "REALISTIC", "DESIGN", "RENDER_3D", "ANIME"], + default="NONE", + tooltip="Style type for generation (V2 only)", + optional=True, ), - "negative_prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Description of what to exclude from the image", - }, + comfy_io.String.Input( + "negative_prompt", + multiline=True, + default="", + tooltip="Description of what to exclude from the image", + optional=True, ), - "num_images": ( - IO.INT, - {"default": 1, "min": 1, "max": 8, "step": 1, "display": "number"}, + comfy_io.Int.Input( + "num_images", + default=1, + min=1, + max=8, + step=1, + display_mode=comfy_io.NumberDisplay.number, + optional=True, ), #"color_palette": ( # IO.STRING, @@ -473,22 +458,20 @@ class IdeogramV2(ComfyNodeABC): # "tooltip": "Color palette preset name or hex colors with weights", # }, #), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + ) - RETURN_TYPES = (IO.IMAGE,) - FUNCTION = "api_call" - CATEGORY = "api node/image/Ideogram" - DESCRIPTION = cleandoc(__doc__ or "") - API_NODE = True - - async def api_call( - self, + @classmethod + async def execute( + cls, prompt, turbo=False, aspect_ratio="1:1", @@ -499,8 +482,6 @@ class IdeogramV2(ComfyNodeABC): negative_prompt="", num_images=1, color_palette="", - unique_id=None, - **kwargs, ): aspect_ratio = V1_V2_RATIO_MAP.get(aspect_ratio, None) resolution = V1_V1_RES_MAP.get(resolution, None) @@ -517,6 +498,10 @@ class IdeogramV2(ComfyNodeABC): else: final_aspect_ratio = aspect_ratio if aspect_ratio != "ASPECT_1_1" else None + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/ideogram/generate", @@ -540,7 +525,7 @@ class IdeogramV2(ComfyNodeABC): color_palette=color_palette if color_palette else None, ) ), - auth_kwargs=kwargs, + auth_kwargs=auth, ) response = await operation.execute() @@ -553,108 +538,99 @@ class IdeogramV2(ComfyNodeABC): if not image_urls: raise Exception("No image URLs were generated in the response") - display_image_urls_on_node(image_urls, unique_id) - return (await download_and_process_images(image_urls),) + display_image_urls_on_node(image_urls, cls.hidden.unique_id) + return comfy_io.NodeOutput(await download_and_process_images(image_urls)) -class IdeogramV3(ComfyNodeABC): - """ - Generates images using the Ideogram V3 model. Supports both regular image generation from text prompts and image editing with mask. - """ - def __init__(self): - pass +class IdeogramV3(comfy_io.ComfyNode): @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation or editing", - }, + def define_schema(cls): + return comfy_io.Schema( + node_id="IdeogramV3", + display_name="Ideogram V3", + category="api node/image/Ideogram", + description="Generates images using the Ideogram V3 model. " + "Supports both regular image generation from text prompts and image editing with mask.", + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation or editing", ), - }, - "optional": { - "image": ( - IO.IMAGE, - { - "default": None, - "tooltip": "Optional reference image for image editing.", - }, + comfy_io.Image.Input( + "image", + tooltip="Optional reference image for image editing.", + optional=True, ), - "mask": ( - IO.MASK, - { - "default": None, - "tooltip": "Optional mask for inpainting (white areas will be replaced)", - }, + comfy_io.Mask.Input( + "mask", + tooltip="Optional mask for inpainting (white areas will be replaced)", + optional=True, ), - "aspect_ratio": ( - IO.COMBO, - { - "options": list(V3_RATIO_MAP.keys()), - "default": "1:1", - "tooltip": "The aspect ratio for image generation. Ignored if resolution is not set to Auto.", - }, + comfy_io.Combo.Input( + "aspect_ratio", + options=list(V3_RATIO_MAP.keys()), + default="1:1", + tooltip="The aspect ratio for image generation. Ignored if resolution is not set to Auto.", + optional=True, ), - "resolution": ( - IO.COMBO, - { - "options": V3_RESOLUTIONS, - "default": "Auto", - "tooltip": "The resolution for image generation. If not set to Auto, this overrides the aspect_ratio setting.", - }, + comfy_io.Combo.Input( + "resolution", + options=V3_RESOLUTIONS, + default="Auto", + tooltip="The resolution for image generation. " + "If not set to Auto, this overrides the aspect_ratio setting.", + optional=True, ), - "magic_prompt_option": ( - IO.COMBO, - { - "options": ["AUTO", "ON", "OFF"], - "default": "AUTO", - "tooltip": "Determine if MagicPrompt should be used in generation", - }, + comfy_io.Combo.Input( + "magic_prompt_option", + options=["AUTO", "ON", "OFF"], + default="AUTO", + tooltip="Determine if MagicPrompt should be used in generation", + optional=True, ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2147483647, - "step": 1, - "control_after_generate": True, - "display": "number", - }, + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + control_after_generate=True, + display_mode=comfy_io.NumberDisplay.number, + optional=True, ), - "num_images": ( - IO.INT, - {"default": 1, "min": 1, "max": 8, "step": 1, "display": "number"}, + comfy_io.Int.Input( + "num_images", + default=1, + min=1, + max=8, + step=1, + display_mode=comfy_io.NumberDisplay.number, + optional=True, ), - "rendering_speed": ( - IO.COMBO, - { - "options": ["BALANCED", "TURBO", "QUALITY"], - "default": "BALANCED", - "tooltip": "Controls the trade-off between generation speed and quality", - }, + comfy_io.Combo.Input( + "rendering_speed", + options=["BALANCED", "TURBO", "QUALITY"], + default="BALANCED", + tooltip="Controls the trade-off between generation speed and quality", + optional=True, ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + ) - RETURN_TYPES = (IO.IMAGE,) - FUNCTION = "api_call" - CATEGORY = "api node/image/Ideogram" - DESCRIPTION = cleandoc(__doc__ or "") - API_NODE = True - - async def api_call( - self, + @classmethod + async def execute( + cls, prompt, image=None, mask=None, @@ -664,9 +640,11 @@ class IdeogramV3(ComfyNodeABC): seed=0, num_images=1, rendering_speed="BALANCED", - unique_id=None, - **kwargs, ): + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } # Check if both image and mask are provided for editing mode if image is not None and mask is not None: # Edit mode @@ -686,7 +664,7 @@ class IdeogramV3(ComfyNodeABC): # Process image img_np = (input_tensor.numpy() * 255).astype(np.uint8) img = Image.fromarray(img_np) - img_byte_arr = io.BytesIO() + img_byte_arr = BytesIO() img.save(img_byte_arr, format="PNG") img_byte_arr.seek(0) img_binary = img_byte_arr @@ -695,7 +673,7 @@ class IdeogramV3(ComfyNodeABC): # Process mask - white areas will be replaced mask_np = (mask.squeeze().cpu().numpy() * 255).astype(np.uint8) mask_img = Image.fromarray(mask_np) - mask_byte_arr = io.BytesIO() + mask_byte_arr = BytesIO() mask_img.save(mask_byte_arr, format="PNG") mask_byte_arr.seek(0) mask_binary = mask_byte_arr @@ -729,7 +707,7 @@ class IdeogramV3(ComfyNodeABC): "mask": mask_binary, }, content_type="multipart/form-data", - auth_kwargs=kwargs, + auth_kwargs=auth, ) elif image is not None or mask is not None: @@ -770,7 +748,7 @@ class IdeogramV3(ComfyNodeABC): response_model=IdeogramGenerateResponse, ), request=gen_request, - auth_kwargs=kwargs, + auth_kwargs=auth, ) # Execute the operation and process response @@ -784,18 +762,18 @@ class IdeogramV3(ComfyNodeABC): if not image_urls: raise Exception("No image URLs were generated in the response") - display_image_urls_on_node(image_urls, unique_id) - return (await download_and_process_images(image_urls),) + display_image_urls_on_node(image_urls, cls.hidden.unique_id) + return comfy_io.NodeOutput(await download_and_process_images(image_urls)) -NODE_CLASS_MAPPINGS = { - "IdeogramV1": IdeogramV1, - "IdeogramV2": IdeogramV2, - "IdeogramV3": IdeogramV3, -} +class IdeogramExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + IdeogramV1, + IdeogramV2, + IdeogramV3, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "IdeogramV1": "Ideogram V1", - "IdeogramV2": "Ideogram V2", - "IdeogramV3": "Ideogram V3", -} +async def comfy_entrypoint() -> IdeogramExtension: + return IdeogramExtension() From f7bd5e58dd03e799e02f6851b84b51e14ad0da7b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 21 Aug 2025 20:18:04 -0700 Subject: [PATCH 0480/1073] Make it easier to implement future qwen controlnets. (#9485) --- comfy/controlnet.py | 4 ++-- comfy/ldm/qwen_image/model.py | 16 +++++++++++++--- comfy/model_detection.py | 2 ++ 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 988acdb57..6cb69dcdf 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -236,11 +236,11 @@ class ControlNet(ControlBase): self.cond_hint = None compression_ratio = self.compression_ratio if self.vae is not None: - compression_ratio *= self.vae.downscale_ratio + compression_ratio *= self.vae.spacial_compression_encode() else: if self.latent_format is not None: raise ValueError("This Controlnet needs a VAE but none was provided, please use a ControlNetApply node with a VAE input and connect it.") - self.cond_hint = comfy.utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * compression_ratio, x_noisy.shape[2] * compression_ratio, self.upscale_algorithm, "center") + self.cond_hint = comfy.utils.common_upscale(self.cond_hint_original, x_noisy.shape[-1] * compression_ratio, x_noisy.shape[-2] * compression_ratio, self.upscale_algorithm, "center") self.cond_hint = self.preprocess_image(self.cond_hint) if self.vae is not None: loaded_models = comfy.model_management.loaded_models(only_currently_used=True) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index 2503583cb..d0e39833a 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -293,6 +293,7 @@ class QwenImageTransformer2DModel(nn.Module): guidance_embeds: bool = False, axes_dims_rope: Tuple[int, int, int] = (16, 56, 56), image_model=None, + final_layer=True, dtype=None, device=None, operations=None, @@ -300,6 +301,7 @@ class QwenImageTransformer2DModel(nn.Module): super().__init__() self.dtype = dtype self.patch_size = patch_size + self.in_channels = in_channels self.out_channels = out_channels or in_channels self.inner_dim = num_attention_heads * attention_head_dim @@ -329,9 +331,9 @@ class QwenImageTransformer2DModel(nn.Module): for _ in range(num_layers) ]) - self.norm_out = LastLayer(self.inner_dim, self.inner_dim, dtype=dtype, device=device, operations=operations) - self.proj_out = operations.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True, dtype=dtype, device=device) - self.gradient_checkpointing = False + if final_layer: + self.norm_out = LastLayer(self.inner_dim, self.inner_dim, dtype=dtype, device=device, operations=operations) + self.proj_out = operations.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True, dtype=dtype, device=device) def process_img(self, x, index=0, h_offset=0, w_offset=0): bs, c, t, h, w = x.shape @@ -362,6 +364,7 @@ class QwenImageTransformer2DModel(nn.Module): guidance: torch.Tensor = None, ref_latents=None, transformer_options={}, + control=None, **kwargs ): timestep = timesteps @@ -443,6 +446,13 @@ class QwenImageTransformer2DModel(nn.Module): hidden_states = out["img"] encoder_hidden_states = out["txt"] + if control is not None: # Controlnet + control_i = control.get("input") + if i < len(control_i): + add = control_i[i] + if add is not None: + hidden_states += add + hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 2bec0541e..0caff53e0 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -492,6 +492,8 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): if '{}txt_norm.weight'.format(key_prefix) in state_dict_keys: # Qwen Image dit_config = {} dit_config["image_model"] = "qwen_image" + dit_config["in_channels"] = state_dict['{}img_in.weight'.format(key_prefix)].shape[1] + dit_config["num_layers"] = count_blocks(state_dict_keys, '{}transformer_blocks.'.format(key_prefix) + '{}.') return dit_config if '{}input_blocks.0.0.weight'.format(key_prefix) not in state_dict_keys: From ff57793659702d502506047445f0972b10b6b9fe Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 21 Aug 2025 21:53:11 -0700 Subject: [PATCH 0481/1073] Support InstantX Qwen controlnet. (#9488) --- comfy/controlnet.py | 13 +++++ comfy/ldm/qwen_image/controlnet.py | 77 ++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+) create mode 100644 comfy/ldm/qwen_image/controlnet.py diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 6cb69dcdf..e3dfedf55 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -36,6 +36,7 @@ import comfy.ldm.cascade.controlnet import comfy.cldm.mmdit import comfy.ldm.hydit.controlnet import comfy.ldm.flux.controlnet +import comfy.ldm.qwen_image.controlnet import comfy.cldm.dit_embedder from typing import TYPE_CHECKING if TYPE_CHECKING: @@ -582,6 +583,15 @@ def load_controlnet_flux_instantx(sd, model_options={}): control = ControlNet(control_model, compression_ratio=1, latent_format=latent_format, concat_mask=concat_mask, load_device=load_device, manual_cast_dtype=manual_cast_dtype, extra_conds=extra_conds) return control +def load_controlnet_qwen_instantx(sd, model_options={}): + model_config, operations, load_device, unet_dtype, manual_cast_dtype, offload_device = controlnet_config(sd, model_options=model_options) + control_model = comfy.ldm.qwen_image.controlnet.QwenImageControlNetModel(operations=operations, device=offload_device, dtype=unet_dtype, **model_config.unet_config) + control_model = controlnet_load_state_dict(control_model, sd) + latent_format = comfy.latent_formats.Wan21() + extra_conds = [] + control = ControlNet(control_model, compression_ratio=1, latent_format=latent_format, load_device=load_device, manual_cast_dtype=manual_cast_dtype, extra_conds=extra_conds) + return control + def convert_mistoline(sd): return comfy.utils.state_dict_prefix_replace(sd, {"single_controlnet_blocks.": "controlnet_single_blocks."}) @@ -655,8 +665,11 @@ def load_controlnet_state_dict(state_dict, model=None, model_options={}): return load_controlnet_sd35(controlnet_data, model_options=model_options) #Stability sd3.5 format else: return load_controlnet_mmdit(controlnet_data, model_options=model_options) #SD3 diffusers controlnet + elif "transformer_blocks.0.img_mlp.net.0.proj.weight" in controlnet_data: + return load_controlnet_qwen_instantx(controlnet_data, model_options=model_options) elif "controlnet_x_embedder.weight" in controlnet_data: return load_controlnet_flux_instantx(controlnet_data, model_options=model_options) + elif "controlnet_blocks.0.linear.weight" in controlnet_data: #mistoline flux return load_controlnet_flux_xlabs_mistoline(convert_mistoline(controlnet_data), mistoline=True, model_options=model_options) diff --git a/comfy/ldm/qwen_image/controlnet.py b/comfy/ldm/qwen_image/controlnet.py new file mode 100644 index 000000000..92ac3cf0a --- /dev/null +++ b/comfy/ldm/qwen_image/controlnet.py @@ -0,0 +1,77 @@ +import torch +import math + +from .model import QwenImageTransformer2DModel + + +class QwenImageControlNetModel(QwenImageTransformer2DModel): + def __init__( + self, + extra_condition_channels=0, + dtype=None, + device=None, + operations=None, + **kwargs + ): + super().__init__(final_layer=False, dtype=dtype, device=device, operations=operations, **kwargs) + self.main_model_double = 60 + + # controlnet_blocks + self.controlnet_blocks = torch.nn.ModuleList([]) + for _ in range(len(self.transformer_blocks)): + self.controlnet_blocks.append(operations.Linear(self.inner_dim, self.inner_dim, device=device, dtype=dtype)) + self.controlnet_x_embedder = operations.Linear(self.in_channels + extra_condition_channels, self.inner_dim, device=device, dtype=dtype) + + def forward( + self, + x, + timesteps, + context, + attention_mask=None, + guidance: torch.Tensor = None, + ref_latents=None, + hint=None, + transformer_options={}, + **kwargs + ): + timestep = timesteps + encoder_hidden_states = context + encoder_hidden_states_mask = attention_mask + + hidden_states, img_ids, orig_shape = self.process_img(x) + hint, _, _ = self.process_img(hint) + + txt_start = round(max(((x.shape[-1] + (self.patch_size // 2)) // self.patch_size) // 2, ((x.shape[-2] + (self.patch_size // 2)) // self.patch_size) // 2)) + txt_ids = torch.arange(txt_start, txt_start + context.shape[1], device=x.device).reshape(1, -1, 1).repeat(x.shape[0], 1, 3) + ids = torch.cat((txt_ids, img_ids), dim=1) + image_rotary_emb = self.pe_embedder(ids).squeeze(1).unsqueeze(2).to(x.dtype) + del ids, txt_ids, img_ids + + hidden_states = self.img_in(hidden_states) + self.controlnet_x_embedder(hint) + encoder_hidden_states = self.txt_norm(encoder_hidden_states) + encoder_hidden_states = self.txt_in(encoder_hidden_states) + + if guidance is not None: + guidance = guidance * 1000 + + temb = ( + self.time_text_embed(timestep, hidden_states) + if guidance is None + else self.time_text_embed(timestep, guidance, hidden_states) + ) + + repeat = math.ceil(self.main_model_double / len(self.controlnet_blocks)) + + controlnet_block_samples = () + for i, block in enumerate(self.transformer_blocks): + encoder_hidden_states, hidden_states = block( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + encoder_hidden_states_mask=encoder_hidden_states_mask, + temb=temb, + image_rotary_emb=image_rotary_emb, + ) + + controlnet_block_samples = controlnet_block_samples + (self.controlnet_blocks[i](hidden_states),) * repeat + + return {"input": controlnet_block_samples[:self.main_model_double]} From 497d41fb500668635aa4a782549e9a0caa24375e Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 22 Aug 2025 20:50:35 +0300 Subject: [PATCH 0482/1073] feat(api-nodes): change "OpenAI Chat" display name to "OpenAI ChatGPT" (#9443) --- comfy_api_nodes/nodes_openai.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index 674c9ede0..e3b81de75 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -998,7 +998,7 @@ NODE_DISPLAY_NAME_MAPPINGS = { "OpenAIDalle2": "OpenAI DALL·E 2", "OpenAIDalle3": "OpenAI DALL·E 3", "OpenAIGPTImage1": "OpenAI GPT Image 1", - "OpenAIChatNode": "OpenAI Chat", - "OpenAIInputFiles": "OpenAI Chat Input Files", - "OpenAIChatConfig": "OpenAI Chat Advanced Options", + "OpenAIChatNode": "OpenAI ChatGPT", + "OpenAIInputFiles": "OpenAI ChatGPT Input Files", + "OpenAIChatConfig": "OpenAI ChatGPT Advanced Options", } From 050c67323c33f6543309d4f09df706ec8c9a1389 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 22 Aug 2025 20:51:14 +0300 Subject: [PATCH 0483/1073] feat(api-nodes): add copy button to Gemini Chat node (#9440) --- comfy_api_nodes/nodes_gemini.py | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index ba4167a50..78c402a7a 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -5,7 +5,10 @@ See: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/infer from __future__ import annotations +import json +import time import os +import uuid from enum import Enum from typing import Optional, Literal @@ -350,7 +353,27 @@ class GeminiNode(ComfyNodeABC): # Get result output output_text = self.get_text_from_response(response) if unique_id and output_text: - PromptServer.instance.send_progress_text(output_text, node_id=unique_id) + # Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button. + render_spec = { + "node_id": unique_id, + "component": "ChatHistoryWidget", + "props": { + "history": json.dumps( + [ + { + "prompt": prompt, + "response": output_text, + "response_id": str(uuid.uuid4()), + "timestamp": time.time(), + } + ] + ), + }, + } + PromptServer.instance.send_sync( + "display_component", + render_spec, + ) return (output_text or "Empty response from Gemini model...",) From ca4e96a8ae6c9ee8d40fe35100ed9b2247e71e40 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sat, 23 Aug 2025 05:40:18 +0800 Subject: [PATCH 0484/1073] Update template to 0.1.65 (#9501) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8d928d826..6b53fabc1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.25.9 -comfyui-workflow-templates==0.1.62 +comfyui-workflow-templates==0.1.65 comfyui-embedded-docs==0.2.6 torch torchsde From fe31ad02768c66c61b3dc12f5d4bdfe8990ce25c Mon Sep 17 00:00:00 2001 From: contentis Date: Sat, 23 Aug 2025 01:39:15 +0200 Subject: [PATCH 0485/1073] Add elementwise fusions (#9495) * Add elementwise fusions * Add addcmul pattern to Qwen --- comfy/ldm/modules/diffusionmodules/mmdit.py | 12 +++++++----- comfy/ldm/qwen_image/model.py | 12 ++++++------ comfy/ldm/wan/model.py | 14 +++++++------- 3 files changed, 20 insertions(+), 18 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/mmdit.py b/comfy/ldm/modules/diffusionmodules/mmdit.py index eaf3e73a4..4d6beba2d 100644 --- a/comfy/ldm/modules/diffusionmodules/mmdit.py +++ b/comfy/ldm/modules/diffusionmodules/mmdit.py @@ -109,7 +109,7 @@ class PatchEmbed(nn.Module): def modulate(x, shift, scale): if shift is None: shift = torch.zeros_like(scale) - return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) + return torch.addcmul(shift.unsqueeze(1), x, 1+ scale.unsqueeze(1)) ################################################################################# @@ -564,10 +564,7 @@ class DismantledBlock(nn.Module): assert not self.pre_only attn1 = self.attn.post_attention(attn) attn2 = self.attn2.post_attention(attn2) - out1 = gate_msa.unsqueeze(1) * attn1 - out2 = gate_msa2.unsqueeze(1) * attn2 - x = x + out1 - x = x + out2 + x = gate_cat(x, gate_msa, gate_msa2, attn1, attn2) x = x + gate_mlp.unsqueeze(1) * self.mlp( modulate(self.norm2(x), shift_mlp, scale_mlp) ) @@ -594,6 +591,11 @@ class DismantledBlock(nn.Module): ) return self.post_attention(attn, *intermediates) +def gate_cat(x, gate_msa, gate_msa2, attn1, attn2): + out1 = gate_msa.unsqueeze(1) * attn1 + out2 = gate_msa2.unsqueeze(1) * attn2 + x = torch.stack([x, out1, out2], dim=0).sum(dim=0) + return x def block_mixing(*args, use_checkpoint=True, **kwargs): if use_checkpoint: diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index d0e39833a..af00ff119 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -214,9 +214,9 @@ class QwenImageTransformerBlock(nn.Module): operations=operations, ) - def _modulate(self, x, mod_params): - shift, scale, gate = mod_params.chunk(3, dim=-1) - return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1), gate.unsqueeze(1) + def _modulate(self, x: torch.Tensor, mod_params: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + shift, scale, gate = torch.chunk(mod_params, 3, dim=-1) + return torch.addcmul(shift.unsqueeze(1), x, 1 + scale.unsqueeze(1)), gate.unsqueeze(1) def forward( self, @@ -248,11 +248,11 @@ class QwenImageTransformerBlock(nn.Module): img_normed2 = self.img_norm2(hidden_states) img_modulated2, img_gate2 = self._modulate(img_normed2, img_mod2) - hidden_states = hidden_states + img_gate2 * self.img_mlp(img_modulated2) + hidden_states = torch.addcmul(hidden_states, img_gate2, self.img_mlp(img_modulated2)) txt_normed2 = self.txt_norm2(encoder_hidden_states) txt_modulated2, txt_gate2 = self._modulate(txt_normed2, txt_mod2) - encoder_hidden_states = encoder_hidden_states + txt_gate2 * self.txt_mlp(txt_modulated2) + encoder_hidden_states = torch.addcmul(encoder_hidden_states, txt_gate2, self.txt_mlp(txt_modulated2)) return encoder_hidden_states, hidden_states @@ -275,7 +275,7 @@ class LastLayer(nn.Module): def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: emb = self.linear(self.silu(conditioning_embedding)) scale, shift = torch.chunk(emb, 2, dim=1) - x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] + x = torch.addcmul(shift[:, None, :], self.norm(x), (1 + scale)[:, None, :]) return x diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 9d3741be3..0726b8e1b 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -148,8 +148,8 @@ WAN_CROSSATTENTION_CLASSES = { def repeat_e(e, x): repeats = 1 - if e.shape[1] > 1: - repeats = x.shape[1] // e.shape[1] + if e.size(1) > 1: + repeats = x.size(1) // e.size(1) if repeats == 1: return e return torch.repeat_interleave(e, repeats, dim=1) @@ -219,15 +219,15 @@ class WanAttentionBlock(nn.Module): # self-attention y = self.self_attn( - self.norm1(x) * (1 + repeat_e(e[1], x)) + repeat_e(e[0], x), + torch.addcmul(repeat_e(e[0], x), self.norm1(x), 1 + repeat_e(e[1], x)), freqs) - x = x + y * repeat_e(e[2], x) + x = torch.addcmul(x, y, repeat_e(e[2], x)) # cross-attention & ffn x = x + self.cross_attn(self.norm3(x), context, context_img_len=context_img_len) - y = self.ffn(self.norm2(x) * (1 + repeat_e(e[4], x)) + repeat_e(e[3], x)) - x = x + y * repeat_e(e[5], x) + y = self.ffn(torch.addcmul(repeat_e(e[3], x), self.norm2(x), 1 + repeat_e(e[4], x))) + x = torch.addcmul(x, y, repeat_e(e[5], x)) return x @@ -342,7 +342,7 @@ class Head(nn.Module): else: e = (comfy.model_management.cast_to(self.modulation, dtype=x.dtype, device=x.device).unsqueeze(0) + e.unsqueeze(2)).unbind(2) - x = (self.head(self.norm(x) * (1 + repeat_e(e[1], x)) + repeat_e(e[0], x))) + x = (self.head(torch.addcmul(repeat_e(e[0], x), self.norm(x), 1 + repeat_e(e[1], x)))) return x From fc247150fec502b1834390516b556a87003f1d79 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Fri, 22 Aug 2025 19:41:08 -0700 Subject: [PATCH 0486/1073] Implement EasyCache and Invent LazyCache (#9496) * Attempting a universal implementation of EasyCache, starting with flux as test; I screwed up the math a bit, but when I set it just right it works. * Fixed math to make threshold work as expected, refactored code to use EasyCacheHolder instead of a dict wrapped by object * Use sigmas from transformer_options instead of timesteps to be compatible with a greater amount of models, make end_percent work * Make log statement when not skipping useful, preparing for per-cond caching * Added DIFFUSION_MODEL wrapper around forward function for wan model * Add subsampling for heuristic inputs * Add subsampling to output_prev (output_prev_subsampled now) * Properly consider conds in EasyCache logic * Created SuperEasyCache to test what happens if caching and reuse is moved outside the scope of conds, added PREDICT_NOISE wrapper to facilitate this test * Change max reuse_threshold to 3.0 * Mark EasyCache/SuperEasyCache as experimental (beta) * Make Lumina2 compatible with EasyCache * Add EasyCache support for Qwen Image * Fix missing comma, curse you Cursor * Add EasyCache support to AceStep * Add EasyCache support to Chroma * Added EasyCache support to Cosmos Predict t2i * Make EasyCache not crash with Cosmos Predict ImagToVideo latents, but does not work well at all * Add EasyCache support to hidream * Added EasyCache support to hunyuan video * Added EasyCache support to hunyuan3d * Added EasyCache support to LTXV (not very good, but does not crash) * Implemented EasyCache for aura_flow * Renamed SuperEasyCache to LazyCache, hardcoded subsample_factor to 8 on nodes * Eatra logging when verbose is true for EasyCache --- comfy/ldm/ace/model.py | 24 +- comfy/ldm/aura/mmdit.py | 8 + comfy/ldm/chroma/model.py | 8 + comfy/ldm/cosmos/model.py | 38 +++ comfy/ldm/cosmos/predict2.py | 17 +- comfy/ldm/flux/model.py | 8 + comfy/ldm/hidream/model.py | 19 +- comfy/ldm/hunyuan3d/model.py | 8 + comfy/ldm/hunyuan_video/model.py | 8 + comfy/ldm/lightricks/model.py | 8 + comfy/ldm/lumina/model.py | 10 +- comfy/ldm/qwen_image/model.py | 10 +- comfy/ldm/wan/model.py | 8 + comfy/patcher_extension.py | 1 + comfy/samplers.py | 9 +- comfy_extras/nodes_easycache.py | 459 +++++++++++++++++++++++++++++++ nodes.py | 3 +- 17 files changed, 639 insertions(+), 7 deletions(-) create mode 100644 comfy_extras/nodes_easycache.py diff --git a/comfy/ldm/ace/model.py b/comfy/ldm/ace/model.py index 12c524701..41d85eeb5 100644 --- a/comfy/ldm/ace/model.py +++ b/comfy/ldm/ace/model.py @@ -19,6 +19,7 @@ import torch from torch import nn import comfy.model_management +import comfy.patcher_extension from comfy.ldm.lightricks.model import TimestepEmbedding, Timesteps from .attention import LinearTransformerBlock, t2i_modulate @@ -343,7 +344,28 @@ class ACEStepTransformer2DModel(nn.Module): output = self.final_layer(hidden_states, embedded_timestep, output_length) return output - def forward( + def forward(self, + x, + timestep, + attention_mask=None, + context: Optional[torch.Tensor] = None, + text_attention_mask: Optional[torch.LongTensor] = None, + speaker_embeds: Optional[torch.FloatTensor] = None, + lyric_token_idx: Optional[torch.LongTensor] = None, + lyric_mask: Optional[torch.LongTensor] = None, + block_controlnet_hidden_states: Optional[Union[List[torch.Tensor], torch.Tensor]] = None, + controlnet_scale: Union[float, torch.Tensor] = 1.0, + lyrics_strength=1.0, + **kwargs + ): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, kwargs.get("transformer_options", {})) + ).execute(x, timestep, attention_mask, context, text_attention_mask, speaker_embeds, lyric_token_idx, lyric_mask, block_controlnet_hidden_states, + controlnet_scale, lyrics_strength, **kwargs) + + def _forward( self, x, timestep, diff --git a/comfy/ldm/aura/mmdit.py b/comfy/ldm/aura/mmdit.py index 1258ae11f..d7f32b5e8 100644 --- a/comfy/ldm/aura/mmdit.py +++ b/comfy/ldm/aura/mmdit.py @@ -9,6 +9,7 @@ import torch.nn.functional as F from comfy.ldm.modules.attention import optimized_attention import comfy.ops +import comfy.patcher_extension import comfy.ldm.common_dit def modulate(x, shift, scale): @@ -436,6 +437,13 @@ class MMDiT(nn.Module): return x + pos_encoding.reshape(1, -1, self.positional_encoding.shape[-1]) def forward(self, x, timestep, context, transformer_options={}, **kwargs): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) + ).execute(x, timestep, context, transformer_options, **kwargs) + + def _forward(self, x, timestep, context, transformer_options={}, **kwargs): patches_replace = transformer_options.get("patches_replace", {}) # patchify x, add PE b, c, h, w = x.shape diff --git a/comfy/ldm/chroma/model.py b/comfy/ldm/chroma/model.py index 06021d4f2..5cff44dc8 100644 --- a/comfy/ldm/chroma/model.py +++ b/comfy/ldm/chroma/model.py @@ -5,6 +5,7 @@ from dataclasses import dataclass import torch from torch import Tensor, nn from einops import rearrange, repeat +import comfy.patcher_extension import comfy.ldm.common_dit from comfy.ldm.flux.layers import ( @@ -253,6 +254,13 @@ class Chroma(nn.Module): return img def forward(self, x, timestep, context, guidance, control=None, transformer_options={}, **kwargs): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) + ).execute(x, timestep, context, guidance, control, transformer_options, **kwargs) + + def _forward(self, x, timestep, context, guidance, control=None, transformer_options={}, **kwargs): bs, c, h, w = x.shape x = comfy.ldm.common_dit.pad_to_patch_size(x, (self.patch_size, self.patch_size)) diff --git a/comfy/ldm/cosmos/model.py b/comfy/ldm/cosmos/model.py index 4836e0b69..53698b758 100644 --- a/comfy/ldm/cosmos/model.py +++ b/comfy/ldm/cosmos/model.py @@ -27,6 +27,8 @@ from torchvision import transforms from enum import Enum import logging +import comfy.patcher_extension + from .blocks import ( FinalLayer, GeneralDITTransformerBlock, @@ -435,6 +437,42 @@ class GeneralDIT(nn.Module): latent_condition_sigma: Optional[torch.Tensor] = None, condition_video_augment_sigma: Optional[torch.Tensor] = None, **kwargs, + ): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, kwargs.get("transformer_options", {})) + ).execute(x, + timesteps, + context, + attention_mask, + fps, + image_size, + padding_mask, + scalar_feature, + data_type, + latent_condition, + latent_condition_sigma, + condition_video_augment_sigma, + **kwargs) + + def _forward( + self, + x: torch.Tensor, + timesteps: torch.Tensor, + context: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + # crossattn_emb: torch.Tensor, + # crossattn_mask: Optional[torch.Tensor] = None, + fps: Optional[torch.Tensor] = None, + image_size: Optional[torch.Tensor] = None, + padding_mask: Optional[torch.Tensor] = None, + scalar_feature: Optional[torch.Tensor] = None, + data_type: Optional[DataType] = DataType.VIDEO, + latent_condition: Optional[torch.Tensor] = None, + latent_condition_sigma: Optional[torch.Tensor] = None, + condition_video_augment_sigma: Optional[torch.Tensor] = None, + **kwargs, ): """ Args: diff --git a/comfy/ldm/cosmos/predict2.py b/comfy/ldm/cosmos/predict2.py index 316117f77..fcc83ba76 100644 --- a/comfy/ldm/cosmos/predict2.py +++ b/comfy/ldm/cosmos/predict2.py @@ -11,6 +11,7 @@ import math from .position_embedding import VideoRopePosition3DEmb, LearnablePosEmbAxis from torchvision import transforms +import comfy.patcher_extension from comfy.ldm.modules.attention import optimized_attention def apply_rotary_pos_emb( @@ -805,7 +806,21 @@ class MiniTrainDIT(nn.Module): ) return x_B_C_Tt_Hp_Wp - def forward( + def forward(self, + x: torch.Tensor, + timesteps: torch.Tensor, + context: torch.Tensor, + fps: Optional[torch.Tensor] = None, + padding_mask: Optional[torch.Tensor] = None, + **kwargs, + ): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, kwargs.get("transformer_options", {})) + ).execute(x, timesteps, context, fps, padding_mask, **kwargs) + + def _forward( self, x: torch.Tensor, timesteps: torch.Tensor, diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index c4de82795..0a77fa097 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -6,6 +6,7 @@ import torch from torch import Tensor, nn from einops import rearrange, repeat import comfy.ldm.common_dit +import comfy.patcher_extension from .layers import ( DoubleStreamBlock, @@ -214,6 +215,13 @@ class Flux(nn.Module): return img, repeat(img_ids, "h w c -> b (h w) c", b=bs) def forward(self, x, timestep, context, y=None, guidance=None, ref_latents=None, control=None, transformer_options={}, **kwargs): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) + ).execute(x, timestep, context, y, guidance, ref_latents, control, transformer_options, **kwargs) + + def _forward(self, x, timestep, context, y=None, guidance=None, ref_latents=None, control=None, transformer_options={}, **kwargs): bs, c, h_orig, w_orig = x.shape patch_size = self.patch_size diff --git a/comfy/ldm/hidream/model.py b/comfy/ldm/hidream/model.py index 0305747bf..ae49cf945 100644 --- a/comfy/ldm/hidream/model.py +++ b/comfy/ldm/hidream/model.py @@ -13,6 +13,7 @@ from comfy.ldm.flux.layers import LastLayer from comfy.ldm.modules.attention import optimized_attention import comfy.model_management +import comfy.patcher_extension import comfy.ldm.common_dit @@ -692,7 +693,23 @@ class HiDreamImageTransformer2DModel(nn.Module): raise NotImplementedError return x, x_masks, img_sizes - def forward( + def forward(self, + x: torch.Tensor, + t: torch.Tensor, + y: Optional[torch.Tensor] = None, + context: Optional[torch.Tensor] = None, + encoder_hidden_states_llama3=None, + image_cond=None, + control = None, + transformer_options = {}, + ): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) + ).execute(x, t, y, context, encoder_hidden_states_llama3, image_cond, control, transformer_options) + + def _forward( self, x: torch.Tensor, t: torch.Tensor, diff --git a/comfy/ldm/hunyuan3d/model.py b/comfy/ldm/hunyuan3d/model.py index 4e18358f0..0fa5e78c1 100644 --- a/comfy/ldm/hunyuan3d/model.py +++ b/comfy/ldm/hunyuan3d/model.py @@ -7,6 +7,7 @@ from comfy.ldm.flux.layers import ( SingleStreamBlock, timestep_embedding, ) +import comfy.patcher_extension class Hunyuan3Dv2(nn.Module): @@ -67,6 +68,13 @@ class Hunyuan3Dv2(nn.Module): self.final_layer = LastLayer(hidden_size, 1, in_channels, dtype=dtype, device=device, operations=operations) def forward(self, x, timestep, context, guidance=None, transformer_options={}, **kwargs): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) + ).execute(x, timestep, context, guidance, transformer_options, **kwargs) + + def _forward(self, x, timestep, context, guidance=None, transformer_options={}, **kwargs): x = x.movedim(-1, -2) timestep = 1.0 - timestep txt = context diff --git a/comfy/ldm/hunyuan_video/model.py b/comfy/ldm/hunyuan_video/model.py index fbd8d4196..da1011596 100644 --- a/comfy/ldm/hunyuan_video/model.py +++ b/comfy/ldm/hunyuan_video/model.py @@ -1,6 +1,7 @@ #Based on Flux code because of weird hunyuan video code license. import torch +import comfy.patcher_extension import comfy.ldm.flux.layers import comfy.ldm.modules.diffusionmodules.mmdit from comfy.ldm.modules.attention import optimized_attention @@ -348,6 +349,13 @@ class HunyuanVideo(nn.Module): return repeat(img_ids, "t h w c -> b (t h w) c", b=bs) def forward(self, x, timestep, context, y, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, control=None, transformer_options={}, **kwargs): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) + ).execute(x, timestep, context, y, guidance, attention_mask, guiding_frame_index, ref_latent, control, transformer_options, **kwargs) + + def _forward(self, x, timestep, context, y, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, control=None, transformer_options={}, **kwargs): bs, c, t, h, w = x.shape img_ids = self.img_ids(x) txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype) diff --git a/comfy/ldm/lightricks/model.py b/comfy/ldm/lightricks/model.py index ad9a7daea..aa2ea62b1 100644 --- a/comfy/ldm/lightricks/model.py +++ b/comfy/ldm/lightricks/model.py @@ -1,5 +1,6 @@ import torch from torch import nn +import comfy.patcher_extension import comfy.ldm.modules.attention import comfy.ldm.common_dit from einops import rearrange @@ -420,6 +421,13 @@ class LTXVModel(torch.nn.Module): self.patchifier = SymmetricPatchifier(1) def forward(self, x, timestep, context, attention_mask, frame_rate=25, transformer_options={}, keyframe_idxs=None, **kwargs): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) + ).execute(x, timestep, context, attention_mask, frame_rate, transformer_options, keyframe_idxs, **kwargs) + + def _forward(self, x, timestep, context, attention_mask, frame_rate=25, transformer_options={}, keyframe_idxs=None, **kwargs): patches_replace = transformer_options.get("patches_replace", {}) orig_shape = list(x.shape) diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index f8dc4d7db..e08ed817d 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -11,6 +11,7 @@ import comfy.ldm.common_dit from comfy.ldm.modules.diffusionmodules.mmdit import TimestepEmbedder from comfy.ldm.modules.attention import optimized_attention_masked from comfy.ldm.flux.layers import EmbedND +import comfy.patcher_extension def modulate(x, scale): @@ -590,8 +591,15 @@ class NextDiT(nn.Module): return padded_full_embed, mask, img_sizes, l_effective_cap_len, freqs_cis - # def forward(self, x, t, cap_feats, cap_mask): def forward(self, x, timesteps, context, num_tokens, attention_mask=None, **kwargs): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, kwargs.get("transformer_options", {})) + ).execute(x, timesteps, context, num_tokens, attention_mask, **kwargs) + + # def forward(self, x, t, cap_feats, cap_mask): + def _forward(self, x, timesteps, context, num_tokens, attention_mask=None, **kwargs): t = 1.0 - timesteps cap_feats = context cap_mask = attention_mask diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index af00ff119..57a458210 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -9,6 +9,7 @@ from comfy.ldm.lightricks.model import TimestepEmbedding, Timesteps from comfy.ldm.modules.attention import optimized_attention_masked from comfy.ldm.flux.layers import EmbedND import comfy.ldm.common_dit +import comfy.patcher_extension class GELU(nn.Module): def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True, dtype=None, device=None, operations=None): @@ -355,7 +356,14 @@ class QwenImageTransformer2DModel(nn.Module): img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(w_offset, w_len - 1 + w_offset, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) - (w_len // 2) return hidden_states, repeat(img_ids, "h w c -> b (h w) c", b=bs), orig_shape - def forward( + def forward(self, x, timestep, context, attention_mask=None, guidance=None, ref_latents=None, transformer_options={}, **kwargs): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) + ).execute(x, timestep, context, attention_mask, guidance, ref_latents, transformer_options, **kwargs) + + def _forward( self, x, timesteps, diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 0726b8e1b..1885d9730 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -11,6 +11,7 @@ from comfy.ldm.flux.layers import EmbedND from comfy.ldm.flux.math import apply_rope import comfy.ldm.common_dit import comfy.model_management +import comfy.patcher_extension def sinusoidal_embedding_1d(dim, position): @@ -573,6 +574,13 @@ class WanModel(torch.nn.Module): return x def forward(self, x, timestep, context, clip_fea=None, time_dim_concat=None, transformer_options={}, **kwargs): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) + ).execute(x, timestep, context, clip_fea, time_dim_concat, transformer_options, **kwargs) + + def _forward(self, x, timestep, context, clip_fea=None, time_dim_concat=None, transformer_options={}, **kwargs): bs, c, t, h, w = x.shape x = comfy.ldm.common_dit.pad_to_patch_size(x, self.patch_size) diff --git a/comfy/patcher_extension.py b/comfy/patcher_extension.py index 965958f4c..46cc7b2a8 100644 --- a/comfy/patcher_extension.py +++ b/comfy/patcher_extension.py @@ -50,6 +50,7 @@ class WrappersMP: OUTER_SAMPLE = "outer_sample" PREPARE_SAMPLING = "prepare_sampling" SAMPLER_SAMPLE = "sampler_sample" + PREDICT_NOISE = "predict_noise" CALC_COND_BATCH = "calc_cond_batch" APPLY_MODEL = "apply_model" DIFFUSION_MODEL = "diffusion_model" diff --git a/comfy/samplers.py b/comfy/samplers.py index d5390d64e..ec7e0b350 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -953,7 +953,14 @@ class CFGGuider: self.original_conds[k] = comfy.sampler_helpers.convert_cond(conds[k]) def __call__(self, *args, **kwargs): - return self.predict_noise(*args, **kwargs) + return self.outer_predict_noise(*args, **kwargs) + + def outer_predict_noise(self, x, timestep, model_options={}, seed=None): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self.predict_noise, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.PREDICT_NOISE, self.model_options, is_model_options=True) + ).execute(x, timestep, model_options, seed) def predict_noise(self, x, timestep, model_options={}, seed=None): return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed) diff --git a/comfy_extras/nodes_easycache.py b/comfy_extras/nodes_easycache.py new file mode 100644 index 000000000..e2b2efcd9 --- /dev/null +++ b/comfy_extras/nodes_easycache.py @@ -0,0 +1,459 @@ +from __future__ import annotations +from typing import TYPE_CHECKING, Union +from comfy_api.latest import io, ComfyExtension +import comfy.patcher_extension +import logging +import torch +import comfy.model_patcher +if TYPE_CHECKING: + from uuid import UUID + + +def easycache_forward_wrapper(executor, *args, **kwargs): + # get values from args + x: torch.Tensor = args[0] + transformer_options: dict[str] = args[-1] + if not isinstance(transformer_options, dict): + transformer_options = kwargs.get("transformer_options") + if not transformer_options: + transformer_options = args[-2] + easycache: EasyCacheHolder = transformer_options["easycache"] + sigmas = transformer_options["sigmas"] + uuids = transformer_options["uuids"] + if sigmas is not None and easycache.is_past_end_timestep(sigmas): + return executor(*args, **kwargs) + # prepare next x_prev + has_first_cond_uuid = easycache.has_first_cond_uuid(uuids) + next_x_prev = x + input_change = None + do_easycache = easycache.should_do_easycache(sigmas) + if do_easycache: + # if first cond marked this step for skipping, skip it and use appropriate cached values + if easycache.skip_current_step: + if easycache.verbose: + logging.info(f"EasyCache [verbose] - was marked to skip this step by {easycache.first_cond_uuid}. Present uuids: {uuids}") + return easycache.apply_cache_diff(x, uuids) + if easycache.initial_step: + easycache.first_cond_uuid = uuids[0] + has_first_cond_uuid = easycache.has_first_cond_uuid(uuids) + easycache.initial_step = False + if has_first_cond_uuid: + if easycache.has_x_prev_subsampled(): + input_change = (easycache.subsample(x, uuids, clone=False) - easycache.x_prev_subsampled).flatten().abs().mean() + if easycache.has_output_prev_norm() and easycache.has_relative_transformation_rate(): + approx_output_change_rate = (easycache.relative_transformation_rate * input_change) / easycache.output_prev_norm + easycache.cumulative_change_rate += approx_output_change_rate + if easycache.cumulative_change_rate < easycache.reuse_threshold: + if easycache.verbose: + logging.info(f"EasyCache [verbose] - skipping step; cumulative_change_rate: {easycache.cumulative_change_rate}, reuse_threshold: {easycache.reuse_threshold}") + # other conds should also skip this step, and instead use their cached values + easycache.skip_current_step = True + return easycache.apply_cache_diff(x, uuids) + else: + if easycache.verbose: + logging.info(f"EasyCache [verbose] - NOT skipping step; cumulative_change_rate: {easycache.cumulative_change_rate}, reuse_threshold: {easycache.reuse_threshold}") + easycache.cumulative_change_rate = 0.0 + + output: torch.Tensor = executor(*args, **kwargs) + if has_first_cond_uuid and easycache.has_output_prev_norm(): + output_change = (easycache.subsample(output, uuids, clone=False) - easycache.output_prev_subsampled).flatten().abs().mean() + if easycache.verbose: + output_change_rate = output_change / easycache.output_prev_norm + easycache.output_change_rates.append(output_change_rate.item()) + if easycache.has_relative_transformation_rate(): + approx_output_change_rate = (easycache.relative_transformation_rate * input_change) / easycache.output_prev_norm + easycache.approx_output_change_rates.append(approx_output_change_rate.item()) + if easycache.verbose: + logging.info(f"EasyCache [verbose] - approx_output_change_rate: {approx_output_change_rate}") + if input_change is not None: + easycache.relative_transformation_rate = output_change / input_change + if easycache.verbose: + logging.info(f"EasyCache [verbose] - output_change_rate: {output_change_rate}") + # TODO: allow cache_diff to be offloaded + easycache.update_cache_diff(output, next_x_prev, uuids) + if has_first_cond_uuid: + easycache.x_prev_subsampled = easycache.subsample(next_x_prev, uuids) + easycache.output_prev_subsampled = easycache.subsample(output, uuids) + easycache.output_prev_norm = output.flatten().abs().mean() + if easycache.verbose: + logging.info(f"EasyCache [verbose] - x_prev_subsampled: {easycache.x_prev_subsampled.shape}") + return output + +def lazycache_predict_noise_wrapper(executor, *args, **kwargs): + # get values from args + x: torch.Tensor = args[0] + timestep: float = args[1] + model_options: dict[str] = args[2] + easycache: LazyCacheHolder = model_options["transformer_options"]["easycache"] + if easycache.is_past_end_timestep(timestep): + return executor(*args, **kwargs) + # prepare next x_prev + next_x_prev = x + input_change = None + do_easycache = easycache.should_do_easycache(timestep) + if do_easycache: + if easycache.has_x_prev_subsampled(): + if easycache.has_x_prev_subsampled(): + input_change = (easycache.subsample(x, clone=False) - easycache.x_prev_subsampled).flatten().abs().mean() + if easycache.has_output_prev_norm() and easycache.has_relative_transformation_rate(): + approx_output_change_rate = (easycache.relative_transformation_rate * input_change) / easycache.output_prev_norm + easycache.cumulative_change_rate += approx_output_change_rate + if easycache.cumulative_change_rate < easycache.reuse_threshold: + if easycache.verbose: + logging.info(f"LazyCache [verbose] - skipping step; cumulative_change_rate: {easycache.cumulative_change_rate}, reuse_threshold: {easycache.reuse_threshold}") + # other conds should also skip this step, and instead use their cached values + easycache.skip_current_step = True + return easycache.apply_cache_diff(x) + else: + if easycache.verbose: + logging.info(f"LazyCache [verbose] - NOT skipping step; cumulative_change_rate: {easycache.cumulative_change_rate}, reuse_threshold: {easycache.reuse_threshold}") + easycache.cumulative_change_rate = 0.0 + output: torch.Tensor = executor(*args, **kwargs) + if easycache.has_output_prev_norm(): + output_change = (easycache.subsample(output, clone=False) - easycache.output_prev_subsampled).flatten().abs().mean() + if easycache.verbose: + output_change_rate = output_change / easycache.output_prev_norm + easycache.output_change_rates.append(output_change_rate.item()) + if easycache.has_relative_transformation_rate(): + approx_output_change_rate = (easycache.relative_transformation_rate * input_change) / easycache.output_prev_norm + easycache.approx_output_change_rates.append(approx_output_change_rate.item()) + if easycache.verbose: + logging.info(f"LazyCache [verbose] - approx_output_change_rate: {approx_output_change_rate}") + if input_change is not None: + easycache.relative_transformation_rate = output_change / input_change + if easycache.verbose: + logging.info(f"LazyCache [verbose] - output_change_rate: {output_change_rate}") + # TODO: allow cache_diff to be offloaded + easycache.update_cache_diff(output, next_x_prev) + easycache.x_prev_subsampled = easycache.subsample(next_x_prev) + easycache.output_prev_subsampled = easycache.subsample(output) + easycache.output_prev_norm = output.flatten().abs().mean() + if easycache.verbose: + logging.info(f"LazyCache [verbose] - x_prev_subsampled: {easycache.x_prev_subsampled.shape}") + return output + +def easycache_calc_cond_batch_wrapper(executor, *args, **kwargs): + model_options = args[-1] + easycache: EasyCacheHolder = model_options["transformer_options"]["easycache"] + easycache.skip_current_step = False + # TODO: check if first_cond_uuid is active at this timestep; otherwise, EasyCache needs to be partially reset + return executor(*args, **kwargs) + +def easycache_sample_wrapper(executor, *args, **kwargs): + """ + This OUTER_SAMPLE wrapper makes sure easycache is prepped for current run, and all memory usage is cleared at the end. + """ + try: + guider = executor.class_obj + orig_model_options = guider.model_options + guider.model_options = comfy.model_patcher.create_model_options_clone(orig_model_options) + # clone and prepare timesteps + guider.model_options["transformer_options"]["easycache"] = guider.model_options["transformer_options"]["easycache"].clone().prepare_timesteps(guider.model_patcher.model.model_sampling) + easycache: Union[EasyCacheHolder, LazyCacheHolder] = guider.model_options['transformer_options']['easycache'] + logging.info(f"{easycache.name} enabled - threshold: {easycache.reuse_threshold}, start_percent: {easycache.start_percent}, end_percent: {easycache.end_percent}") + return executor(*args, **kwargs) + finally: + easycache = guider.model_options['transformer_options']['easycache'] + output_change_rates = easycache.output_change_rates + approx_output_change_rates = easycache.approx_output_change_rates + if easycache.verbose: + logging.info(f"{easycache.name} [verbose] - output_change_rates {len(output_change_rates)}: {output_change_rates}") + logging.info(f"{easycache.name} [verbose] - approx_output_change_rates {len(approx_output_change_rates)}: {approx_output_change_rates}") + total_steps = len(args[3])-1 + logging.info(f"{easycache.name} - skipped {easycache.total_steps_skipped}/{total_steps} steps ({total_steps/(total_steps-easycache.total_steps_skipped):.2f}x speedup).") + easycache.reset() + guider.model_options = orig_model_options + + +class EasyCacheHolder: + def __init__(self, reuse_threshold: float, start_percent: float, end_percent: float, subsample_factor: int, offload_cache_diff: bool, verbose: bool=False): + self.name = "EasyCache" + self.reuse_threshold = reuse_threshold + self.start_percent = start_percent + self.end_percent = end_percent + self.subsample_factor = subsample_factor + self.offload_cache_diff = offload_cache_diff + self.verbose = verbose + # timestep values + self.start_t = 0.0 + self.end_t = 0.0 + # control values + self.relative_transformation_rate: float = None + self.cumulative_change_rate = 0.0 + self.initial_step = True + self.skip_current_step = False + # cache values + self.first_cond_uuid = None + self.x_prev_subsampled: torch.Tensor = None + self.output_prev_subsampled: torch.Tensor = None + self.output_prev_norm: torch.Tensor = None + self.uuid_cache_diffs: dict[UUID, torch.Tensor] = {} + self.output_change_rates = [] + self.approx_output_change_rates = [] + self.total_steps_skipped = 0 + # how to deal with mismatched dims + self.allow_mismatch = True + self.cut_from_start = True + + def is_past_end_timestep(self, timestep: float) -> bool: + return not (timestep[0] > self.end_t).item() + + def should_do_easycache(self, timestep: float) -> bool: + return (timestep[0] <= self.start_t).item() + + def has_x_prev_subsampled(self) -> bool: + return self.x_prev_subsampled is not None + + def has_output_prev_subsampled(self) -> bool: + return self.output_prev_subsampled is not None + + def has_output_prev_norm(self) -> bool: + return self.output_prev_norm is not None + + def has_relative_transformation_rate(self) -> bool: + return self.relative_transformation_rate is not None + + def prepare_timesteps(self, model_sampling): + self.start_t = model_sampling.percent_to_sigma(self.start_percent) + self.end_t = model_sampling.percent_to_sigma(self.end_percent) + return self + + def subsample(self, x: torch.Tensor, uuids: list[UUID], clone: bool = True) -> torch.Tensor: + batch_offset = x.shape[0] // len(uuids) + uuid_idx = uuids.index(self.first_cond_uuid) + if self.subsample_factor > 1: + to_return = x[uuid_idx*batch_offset:(uuid_idx+1)*batch_offset, ..., ::self.subsample_factor, ::self.subsample_factor] + if clone: + return to_return.clone() + return to_return + to_return = x[uuid_idx*batch_offset:(uuid_idx+1)*batch_offset, ...] + if clone: + return to_return.clone() + return to_return + + def apply_cache_diff(self, x: torch.Tensor, uuids: list[UUID]): + if self.first_cond_uuid in uuids: + self.total_steps_skipped += 1 + batch_offset = x.shape[0] // len(uuids) + for i, uuid in enumerate(uuids): + # if cached dims don't match x dims, cut off excess and hope for the best (cosmos world2video) + if x.shape[1:] != self.uuid_cache_diffs[uuid].shape[1:]: + if not self.allow_mismatch: + raise ValueError(f"Cached dims {self.uuid_cache_diffs[uuid].shape} don't match x dims {x.shape} - this is no good") + slicing = [] + skip_this_dim = True + for dim_u, dim_x in zip(self.uuid_cache_diffs[uuid].shape, x.shape): + if skip_this_dim: + skip_this_dim = False + continue + if dim_u != dim_x: + if self.cut_from_start: + slicing.append(slice(dim_x-dim_u, None)) + else: + slicing.append(slice(None, dim_u)) + else: + slicing.append(slice(None)) + slicing = [slice(i*batch_offset,(i+1)*batch_offset)] + slicing + x = x[slicing] + x += self.uuid_cache_diffs[uuid].to(x.device) + return x + + def update_cache_diff(self, output: torch.Tensor, x: torch.Tensor, uuids: list[UUID]): + # if output dims don't match x dims, cut off excess and hope for the best (cosmos world2video) + if output.shape[1:] != x.shape[1:]: + if not self.allow_mismatch: + raise ValueError(f"Output dims {output.shape} don't match x dims {x.shape} - this is no good") + slicing = [] + skip_dim = True + for dim_o, dim_x in zip(output.shape, x.shape): + if not skip_dim and dim_o != dim_x: + if self.cut_from_start: + slicing.append(slice(dim_x-dim_o, None)) + else: + slicing.append(slice(None, dim_o)) + else: + slicing.append(slice(None)) + skip_dim = False + x = x[slicing] + diff = output - x + batch_offset = diff.shape[0] // len(uuids) + for i, uuid in enumerate(uuids): + self.uuid_cache_diffs[uuid] = diff[i*batch_offset:(i+1)*batch_offset, ...] + + def has_first_cond_uuid(self, uuids: list[UUID]) -> bool: + return self.first_cond_uuid in uuids + + def reset(self): + self.relative_transformation_rate = 0.0 + self.cumulative_change_rate = 0.0 + self.initial_step = True + self.skip_current_step = False + self.output_change_rates = [] + self.first_cond_uuid = None + del self.x_prev_subsampled + self.x_prev_subsampled = None + del self.output_prev_subsampled + self.output_prev_subsampled = None + del self.output_prev_norm + self.output_prev_norm = None + del self.uuid_cache_diffs + self.uuid_cache_diffs = {} + self.total_steps_skipped = 0 + return self + + def clone(self): + return EasyCacheHolder(self.reuse_threshold, self.start_percent, self.end_percent, self.subsample_factor, self.offload_cache_diff, self.verbose) + + +class EasyCacheNode(io.ComfyNode): + @classmethod + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="EasyCache", + display_name="EasyCache", + description="Native EasyCache implementation.", + category="advanced/debug/model", + is_experimental=True, + inputs=[ + io.Model.Input("model", tooltip="The model to add EasyCache to."), + io.Float.Input("reuse_threshold", min=0.0, default=0.2, max=3.0, step=0.01, tooltip="The threshold for reusing cached steps."), + io.Float.Input("start_percent", min=0.0, default=0.15, max=1.0, step=0.01, tooltip="The relative sampling step to begin use of EasyCache."), + io.Float.Input("end_percent", min=0.0, default=0.95, max=1.0, step=0.01, tooltip="The relative sampling step to end use of EasyCache."), + io.Boolean.Input("verbose", default=False, tooltip="Whether to log verbose information."), + ], + outputs=[ + io.Model.Output(tooltip="The model with EasyCache."), + ], + ) + + @classmethod + def execute(cls, model: io.Model.Type, reuse_threshold: float, start_percent: float, end_percent: float, verbose: bool) -> io.NodeOutput: + model = model.clone() + model.model_options["transformer_options"]["easycache"] = EasyCacheHolder(reuse_threshold, start_percent, end_percent, subsample_factor=8, offload_cache_diff=False, verbose=verbose) + model.add_wrapper_with_key(comfy.patcher_extension.WrappersMP.OUTER_SAMPLE, "easycache", easycache_sample_wrapper) + model.add_wrapper_with_key(comfy.patcher_extension.WrappersMP.CALC_COND_BATCH, "easycache", easycache_calc_cond_batch_wrapper) + model.add_wrapper_with_key(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, "easycache", easycache_forward_wrapper) + return io.NodeOutput(model) + + +class LazyCacheHolder: + def __init__(self, reuse_threshold: float, start_percent: float, end_percent: float, subsample_factor: int, offload_cache_diff: bool, verbose: bool=False): + self.name = "LazyCache" + self.reuse_threshold = reuse_threshold + self.start_percent = start_percent + self.end_percent = end_percent + self.subsample_factor = subsample_factor + self.offload_cache_diff = offload_cache_diff + self.verbose = verbose + # timestep values + self.start_t = 0.0 + self.end_t = 0.0 + # control values + self.relative_transformation_rate: float = None + self.cumulative_change_rate = 0.0 + self.initial_step = True + # cache values + self.x_prev_subsampled: torch.Tensor = None + self.output_prev_subsampled: torch.Tensor = None + self.output_prev_norm: torch.Tensor = None + self.cache_diff: torch.Tensor = None + self.output_change_rates = [] + self.approx_output_change_rates = [] + self.total_steps_skipped = 0 + + def has_cache_diff(self) -> bool: + return self.cache_diff is not None + + def is_past_end_timestep(self, timestep: float) -> bool: + return not (timestep[0] > self.end_t).item() + + def should_do_easycache(self, timestep: float) -> bool: + return (timestep[0] <= self.start_t).item() + + def has_x_prev_subsampled(self) -> bool: + return self.x_prev_subsampled is not None + + def has_output_prev_subsampled(self) -> bool: + return self.output_prev_subsampled is not None + + def has_output_prev_norm(self) -> bool: + return self.output_prev_norm is not None + + def has_relative_transformation_rate(self) -> bool: + return self.relative_transformation_rate is not None + + def prepare_timesteps(self, model_sampling): + self.start_t = model_sampling.percent_to_sigma(self.start_percent) + self.end_t = model_sampling.percent_to_sigma(self.end_percent) + return self + + def subsample(self, x: torch.Tensor, clone: bool = True) -> torch.Tensor: + if self.subsample_factor > 1: + to_return = x[..., ::self.subsample_factor, ::self.subsample_factor] + if clone: + return to_return.clone() + return to_return + if clone: + return x.clone() + return x + + def apply_cache_diff(self, x: torch.Tensor): + self.total_steps_skipped += 1 + return x + self.cache_diff.to(x.device) + + def update_cache_diff(self, output: torch.Tensor, x: torch.Tensor): + self.cache_diff = output - x + + def reset(self): + self.relative_transformation_rate = 0.0 + self.cumulative_change_rate = 0.0 + self.initial_step = True + self.output_change_rates = [] + self.approx_output_change_rates = [] + del self.cache_diff + self.cache_diff = None + self.total_steps_skipped = 0 + return self + + def clone(self): + return LazyCacheHolder(self.reuse_threshold, self.start_percent, self.end_percent, self.subsample_factor, self.offload_cache_diff, self.verbose) + +class LazyCacheNode(io.ComfyNode): + @classmethod + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="LazyCache", + display_name="LazyCache", + description="A homebrew version of EasyCache - even 'easier' version of EasyCache to implement. Overall works worse than EasyCache, but better in some rare cases AND universal compatibility with everything in ComfyUI.", + category="advanced/debug/model", + is_experimental=True, + inputs=[ + io.Model.Input("model", tooltip="The model to add LazyCache to."), + io.Float.Input("reuse_threshold", min=0.0, default=0.2, max=3.0, step=0.01, tooltip="The threshold for reusing cached steps."), + io.Float.Input("start_percent", min=0.0, default=0.15, max=1.0, step=0.01, tooltip="The relative sampling step to begin use of LazyCache."), + io.Float.Input("end_percent", min=0.0, default=0.95, max=1.0, step=0.01, tooltip="The relative sampling step to end use of LazyCache."), + io.Boolean.Input("verbose", default=False, tooltip="Whether to log verbose information."), + ], + outputs=[ + io.Model.Output(tooltip="The model with LazyCache."), + ], + ) + + @classmethod + def execute(cls, model: io.Model.Type, reuse_threshold: float, start_percent: float, end_percent: float, verbose: bool) -> io.NodeOutput: + model = model.clone() + model.model_options["transformer_options"]["easycache"] = LazyCacheHolder(reuse_threshold, start_percent, end_percent, subsample_factor=8, offload_cache_diff=False, verbose=verbose) + model.add_wrapper_with_key(comfy.patcher_extension.WrappersMP.OUTER_SAMPLE, "lazycache", easycache_sample_wrapper) + model.add_wrapper_with_key(comfy.patcher_extension.WrappersMP.PREDICT_NOISE, "lazycache", lazycache_predict_noise_wrapper) + return io.NodeOutput(model) + + +class EasyCacheExtension(ComfyExtension): + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + EasyCacheNode, + LazyCacheNode, + ] + +def comfy_entrypoint(): + return EasyCacheExtension() diff --git a/nodes.py b/nodes.py index 9681750d3..723ce3384 100644 --- a/nodes.py +++ b/nodes.py @@ -2322,7 +2322,8 @@ async def init_builtin_extra_nodes(): "nodes_tcfg.py", "nodes_context_windows.py", "nodes_qwen.py", - "nodes_model_patch.py" + "nodes_model_patch.py", + "nodes_easycache.py", ] import_failed = [] From 41048c69b4ccf63f876213a95a51cdde1cb0ab84 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 22 Aug 2025 20:15:44 -0700 Subject: [PATCH 0487/1073] Fix Conditioning masks on 3d latents. (#9506) --- comfy/samplers.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index ec7e0b350..c7dfef4ea 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -17,6 +17,7 @@ import comfy.model_patcher import comfy.patcher_extension import comfy.hooks import comfy.context_windows +import comfy.utils import scipy.stats import numpy @@ -61,7 +62,7 @@ def get_area_and_mult(conds, x_in, timestep_in): if "mask_strength" in conds: mask_strength = conds["mask_strength"] mask = conds['mask'] - assert (mask.shape[1:] == x_in.shape[2:]) + # assert (mask.shape[1:] == x_in.shape[2:]) mask = mask[:input_x.shape[0]] if area is not None: @@ -69,7 +70,7 @@ def get_area_and_mult(conds, x_in, timestep_in): mask = mask.narrow(i + 1, area[len(dims) + i], area[i]) mask = mask * mask_strength - mask = mask.unsqueeze(1).repeat(input_x.shape[0] // mask.shape[0], input_x.shape[1], 1, 1) + mask = mask.unsqueeze(1).repeat((input_x.shape[0] // mask.shape[0], input_x.shape[1]) + (1, ) * (mask.ndim - 1)) else: mask = torch.ones_like(input_x) mult = mask * strength @@ -553,7 +554,10 @@ def resolve_areas_and_cond_masks_multidim(conditions, dims, device): if len(mask.shape) == len(dims): mask = mask.unsqueeze(0) if mask.shape[1:] != dims: - mask = torch.nn.functional.interpolate(mask.unsqueeze(1), size=dims, mode='bilinear', align_corners=False).squeeze(1) + if mask.ndim < 4: + mask = comfy.utils.common_upscale(mask.unsqueeze(1), dims[-1], dims[-2], 'bilinear', 'none').squeeze(1) + else: + mask = comfy.utils.common_upscale(mask, dims[-1], dims[-2], 'bilinear', 'none') if modified.get("set_area_to_bounds", False): #TODO: handle dim != 2 bounds = torch.max(torch.abs(mask),dim=0).values.unsqueeze(0) From 59eddda90030b61f172e155bc1e2526a51a27dff Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 22 Aug 2025 22:36:44 -0700 Subject: [PATCH 0488/1073] Python 3.13 is well supported. (#9511) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 79a8a8c79..99a50571b 100644 --- a/README.md +++ b/README.md @@ -192,7 +192,7 @@ comfy install ## Manual Install (Windows, Linux) -python 3.13 is supported but using 3.12 is recommended because some custom nodes and their dependencies might not support it yet. +Python 3.13 is very well supported. If you have trouble with some custom node dependencies you can try 3.12 Git clone this repo. From 8be0d22ab76a3d548c9c376fd816b39d4c028c12 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 23 Aug 2025 10:56:17 -0700 Subject: [PATCH 0489/1073] Don't use the annoying new navigation mode by default. (#9518) --- app/app_settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/app_settings.py b/app/app_settings.py index c7ac73bf6..eb69133a3 100644 --- a/app/app_settings.py +++ b/app/app_settings.py @@ -25,7 +25,7 @@ class AppSettings(): logging.error(f"The user settings file is corrupted: {file}") return {} else: - return {} + return {"Comfy.Canvas.NavigationMode": "legacy"} def save_settings(self, request, settings): file = self.user_manager.get_request_user_filepath( From 3e316c6338503a535801db3ddac9572a38a607ef Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sat, 23 Aug 2025 14:54:01 -0700 Subject: [PATCH 0490/1073] Update frontend to v1.25.10 and revert navigation mode override (#9522) - Update comfyui-frontend-package from 1.25.9 to 1.25.10 - Revert forced legacy navigation mode from PR #9518 - Frontend v1.25.10 includes proper navigation mode fixes and improved display text --- app/app_settings.py | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/app/app_settings.py b/app/app_settings.py index eb69133a3..c7ac73bf6 100644 --- a/app/app_settings.py +++ b/app/app_settings.py @@ -25,7 +25,7 @@ class AppSettings(): logging.error(f"The user settings file is corrupted: {file}") return {} else: - return {"Comfy.Canvas.NavigationMode": "legacy"} + return {} def save_settings(self, request, settings): file = self.user_manager.get_request_user_filepath( diff --git a/requirements.txt b/requirements.txt index 6b53fabc1..131484ce8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.25.9 +comfyui-frontend-package==1.25.10 comfyui-workflow-templates==0.1.65 comfyui-embedded-docs==0.2.6 torch From 71ed4a399ec76a75aa2870b772d2022e4b9a69a3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 23 Aug 2025 18:57:09 -0400 Subject: [PATCH 0491/1073] ComfyUI version 0.3.52 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 65f06cf37..834c3e8c2 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.51" +__version__ = "0.3.52" diff --git a/pyproject.toml b/pyproject.toml index ecbf04303..f6e765a81 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.51" +version = "0.3.52" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 95ac7794b7c735de8e5426442507d08edd29bec5 Mon Sep 17 00:00:00 2001 From: blepping <157360029+blepping@users.noreply.github.com> Date: Sun, 24 Aug 2025 13:29:49 -0600 Subject: [PATCH 0492/1073] Fix EasyCache/LazyCache crash when tensor shape/dtype/device changes during sampling (#9528) * Fix EasyCache/LazyCache crash when tensor shape/dtype/device changes during sampling * Fix missing LazyCache check_metadata method Ensure LazyCache reset method resets all the tensor state values --- comfy_extras/nodes_easycache.py | 34 +++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/comfy_extras/nodes_easycache.py b/comfy_extras/nodes_easycache.py index e2b2efcd9..9d2988f5f 100644 --- a/comfy_extras/nodes_easycache.py +++ b/comfy_extras/nodes_easycache.py @@ -28,6 +28,7 @@ def easycache_forward_wrapper(executor, *args, **kwargs): input_change = None do_easycache = easycache.should_do_easycache(sigmas) if do_easycache: + easycache.check_metadata(x) # if first cond marked this step for skipping, skip it and use appropriate cached values if easycache.skip_current_step: if easycache.verbose: @@ -92,6 +93,7 @@ def lazycache_predict_noise_wrapper(executor, *args, **kwargs): input_change = None do_easycache = easycache.should_do_easycache(timestep) if do_easycache: + easycache.check_metadata(x) if easycache.has_x_prev_subsampled(): if easycache.has_x_prev_subsampled(): input_change = (easycache.subsample(x, clone=False) - easycache.x_prev_subsampled).flatten().abs().mean() @@ -194,6 +196,7 @@ class EasyCacheHolder: # how to deal with mismatched dims self.allow_mismatch = True self.cut_from_start = True + self.state_metadata = None def is_past_end_timestep(self, timestep: float) -> bool: return not (timestep[0] > self.end_t).item() @@ -283,6 +286,17 @@ class EasyCacheHolder: def has_first_cond_uuid(self, uuids: list[UUID]) -> bool: return self.first_cond_uuid in uuids + def check_metadata(self, x: torch.Tensor) -> bool: + metadata = (x.device, x.dtype, x.shape[1:]) + if self.state_metadata is None: + self.state_metadata = metadata + return True + if metadata == self.state_metadata: + return True + logging.warn(f"{self.name} - Tensor shape, dtype or device changed, resetting state") + self.reset() + return False + def reset(self): self.relative_transformation_rate = 0.0 self.cumulative_change_rate = 0.0 @@ -299,6 +313,7 @@ class EasyCacheHolder: del self.uuid_cache_diffs self.uuid_cache_diffs = {} self.total_steps_skipped = 0 + self.state_metadata = None return self def clone(self): @@ -360,6 +375,7 @@ class LazyCacheHolder: self.output_change_rates = [] self.approx_output_change_rates = [] self.total_steps_skipped = 0 + self.state_metadata = None def has_cache_diff(self) -> bool: return self.cache_diff is not None @@ -404,6 +420,17 @@ class LazyCacheHolder: def update_cache_diff(self, output: torch.Tensor, x: torch.Tensor): self.cache_diff = output - x + def check_metadata(self, x: torch.Tensor) -> bool: + metadata = (x.device, x.dtype, x.shape) + if self.state_metadata is None: + self.state_metadata = metadata + return True + if metadata == self.state_metadata: + return True + logging.warn(f"{self.name} - Tensor shape, dtype or device changed, resetting state") + self.reset() + return False + def reset(self): self.relative_transformation_rate = 0.0 self.cumulative_change_rate = 0.0 @@ -412,7 +439,14 @@ class LazyCacheHolder: self.approx_output_change_rates = [] del self.cache_diff self.cache_diff = None + del self.x_prev_subsampled + self.x_prev_subsampled = None + del self.output_prev_subsampled + self.output_prev_subsampled = None + del self.output_prev_norm + self.output_prev_norm = None self.total_steps_skipped = 0 + self.state_metadata = None return self def clone(self): From f6b93d41a03081fad3c1a01221eac9c42d6790df Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 24 Aug 2025 12:40:32 -0700 Subject: [PATCH 0493/1073] Remove models from readme that are not fully implemented. (#9535) Cosmos model implementations are currently missing the safety part so it is technically not fully implemented and should not be advertised as such. --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 99a50571b..8024870c2 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,6 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/) - [Lumina Image 2.0](https://comfyanonymous.github.io/ComfyUI_examples/lumina2/) - [HiDream](https://comfyanonymous.github.io/ComfyUI_examples/hidream/) - - [Cosmos Predict2](https://comfyanonymous.github.io/ComfyUI_examples/cosmos_predict2/) - [Qwen Image](https://comfyanonymous.github.io/ComfyUI_examples/qwen_image/) - Image Editing Models - [Omnigen 2](https://comfyanonymous.github.io/ComfyUI_examples/omnigen/) @@ -77,7 +76,6 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/) - [LTX-Video](https://comfyanonymous.github.io/ComfyUI_examples/ltxv/) - [Hunyuan Video](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/) - - [Nvidia Cosmos](https://comfyanonymous.github.io/ComfyUI_examples/cosmos/) and [Cosmos Predict2](https://comfyanonymous.github.io/ComfyUI_examples/cosmos_predict2/) - [Wan 2.1](https://comfyanonymous.github.io/ComfyUI_examples/wan/) - [Wan 2.2](https://comfyanonymous.github.io/ComfyUI_examples/wan22/) - Audio Models From e633a47ad1b875e52758be27ec34cb8907ebe1fb Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 25 Aug 2025 17:13:54 -0700 Subject: [PATCH 0494/1073] Add models/audio_encoders directory. (#9548) --- folder_paths.py | 2 ++ models/audio_encoders/put_audio_encoder_models_here | 0 2 files changed, 2 insertions(+) create mode 100644 models/audio_encoders/put_audio_encoder_models_here diff --git a/folder_paths.py b/folder_paths.py index b34af39e8..f110d832b 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -48,6 +48,8 @@ folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers" folder_names_and_paths["model_patches"] = ([os.path.join(models_dir, "model_patches")], supported_pt_extensions) +folder_names_and_paths["audio_encoders"] = ([os.path.join(models_dir, "audio_encoders")], supported_pt_extensions) + output_directory = os.path.join(base_path, "output") temp_directory = os.path.join(base_path, "temp") input_directory = os.path.join(base_path, "input") diff --git a/models/audio_encoders/put_audio_encoder_models_here b/models/audio_encoders/put_audio_encoder_models_here new file mode 100644 index 000000000..e69de29bb From 914c2a29731be9c082f773c4b95892f553ac5ae8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 25 Aug 2025 20:26:47 -0700 Subject: [PATCH 0495/1073] Implement wav2vec2 as an audio encoder model. (#9549) This is useless on its own but there are multiple models that use it. --- comfy/audio_encoders/audio_encoders.py | 42 +++++ comfy/audio_encoders/wav2vec2.py | 207 +++++++++++++++++++++++++ comfy_api/latest/_io.py | 8 + comfy_extras/nodes_audio_encoder.py | 44 ++++++ nodes.py | 1 + 5 files changed, 302 insertions(+) create mode 100644 comfy/audio_encoders/audio_encoders.py create mode 100644 comfy/audio_encoders/wav2vec2.py create mode 100644 comfy_extras/nodes_audio_encoder.py diff --git a/comfy/audio_encoders/audio_encoders.py b/comfy/audio_encoders/audio_encoders.py new file mode 100644 index 000000000..538c21bd5 --- /dev/null +++ b/comfy/audio_encoders/audio_encoders.py @@ -0,0 +1,42 @@ +from .wav2vec2 import Wav2Vec2Model +import comfy.model_management +import comfy.ops +import comfy.utils +import logging +import torchaudio + + +class AudioEncoderModel(): + def __init__(self, config): + self.load_device = comfy.model_management.text_encoder_device() + offload_device = comfy.model_management.text_encoder_offload_device() + self.dtype = comfy.model_management.text_encoder_dtype(self.load_device) + self.model = Wav2Vec2Model(dtype=self.dtype, device=offload_device, operations=comfy.ops.manual_cast) + self.model.eval() + self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) + self.model_sample_rate = 16000 + + def load_sd(self, sd): + return self.model.load_state_dict(sd, strict=False) + + def get_sd(self): + return self.model.state_dict() + + def encode_audio(self, audio, sample_rate): + comfy.model_management.load_model_gpu(self.patcher) + audio = torchaudio.functional.resample(audio, sample_rate, self.model_sample_rate) + out, all_layers = self.model(audio.to(self.load_device)) + outputs = {} + outputs["encoded_audio"] = out + outputs["encoded_audio_all_layers"] = all_layers + return outputs + + +def load_audio_encoder_from_sd(sd, prefix=""): + audio_encoder = AudioEncoderModel(None) + sd = comfy.utils.state_dict_prefix_replace(sd, {"wav2vec2.": ""}) + m, u = audio_encoder.load_sd(sd) + if len(m) > 0: + logging.warning("missing audio encoder: {}".format(m)) + + return audio_encoder diff --git a/comfy/audio_encoders/wav2vec2.py b/comfy/audio_encoders/wav2vec2.py new file mode 100644 index 000000000..de906622a --- /dev/null +++ b/comfy/audio_encoders/wav2vec2.py @@ -0,0 +1,207 @@ +import torch +import torch.nn as nn +from comfy.ldm.modules.attention import optimized_attention_masked + + +class LayerNormConv(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride, bias=False, dtype=None, device=None, operations=None): + super().__init__() + self.conv = operations.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, bias=bias, device=device, dtype=dtype) + self.layer_norm = operations.LayerNorm(out_channels, elementwise_affine=True, device=device, dtype=dtype) + + def forward(self, x): + x = self.conv(x) + return torch.nn.functional.gelu(self.layer_norm(x.transpose(-2, -1)).transpose(-2, -1)) + + +class ConvFeatureEncoder(nn.Module): + def __init__(self, conv_dim, dtype=None, device=None, operations=None): + super().__init__() + self.conv_layers = nn.ModuleList([ + LayerNormConv(1, conv_dim, kernel_size=10, stride=5, bias=True, device=device, dtype=dtype, operations=operations), + LayerNormConv(conv_dim, conv_dim, kernel_size=3, stride=2, bias=True, device=device, dtype=dtype, operations=operations), + LayerNormConv(conv_dim, conv_dim, kernel_size=3, stride=2, bias=True, device=device, dtype=dtype, operations=operations), + LayerNormConv(conv_dim, conv_dim, kernel_size=3, stride=2, bias=True, device=device, dtype=dtype, operations=operations), + LayerNormConv(conv_dim, conv_dim, kernel_size=3, stride=2, bias=True, device=device, dtype=dtype, operations=operations), + LayerNormConv(conv_dim, conv_dim, kernel_size=2, stride=2, bias=True, device=device, dtype=dtype, operations=operations), + LayerNormConv(conv_dim, conv_dim, kernel_size=2, stride=2, bias=True, device=device, dtype=dtype, operations=operations), + ]) + + def forward(self, x): + x = x.unsqueeze(1) + + for conv in self.conv_layers: + x = conv(x) + + return x.transpose(1, 2) + + +class FeatureProjection(nn.Module): + def __init__(self, conv_dim, embed_dim, dtype=None, device=None, operations=None): + super().__init__() + self.layer_norm = operations.LayerNorm(conv_dim, eps=1e-05, device=device, dtype=dtype) + self.projection = operations.Linear(conv_dim, embed_dim, device=device, dtype=dtype) + + def forward(self, x): + x = self.layer_norm(x) + x = self.projection(x) + return x + + +class PositionalConvEmbedding(nn.Module): + def __init__(self, embed_dim=768, kernel_size=128, groups=16): + super().__init__() + self.conv = nn.Conv1d( + embed_dim, + embed_dim, + kernel_size=kernel_size, + padding=kernel_size // 2, + groups=groups, + ) + self.conv = torch.nn.utils.parametrizations.weight_norm(self.conv, name="weight", dim=2) + self.activation = nn.GELU() + + def forward(self, x): + x = x.transpose(1, 2) + x = self.conv(x)[:, :, :-1] + x = self.activation(x) + x = x.transpose(1, 2) + return x + + +class TransformerEncoder(nn.Module): + def __init__( + self, + embed_dim=768, + num_heads=12, + num_layers=12, + mlp_ratio=4.0, + dtype=None, device=None, operations=None + ): + super().__init__() + + self.pos_conv_embed = PositionalConvEmbedding(embed_dim=embed_dim) + self.layers = nn.ModuleList([ + TransformerEncoderLayer( + embed_dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + device=device, dtype=dtype, operations=operations + ) + for _ in range(num_layers) + ]) + + self.layer_norm = operations.LayerNorm(embed_dim, eps=1e-05, device=device, dtype=dtype) + + def forward(self, x, mask=None): + x = x + self.pos_conv_embed(x) + all_x = () + for layer in self.layers: + all_x += (x,) + x = layer(x, mask) + x = self.layer_norm(x) + all_x += (x,) + return x, all_x + + +class Attention(nn.Module): + def __init__(self, embed_dim, num_heads, bias=True, dtype=None, device=None, operations=None): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.head_dim = embed_dim // num_heads + + self.k_proj = operations.Linear(embed_dim, embed_dim, bias=bias, device=device, dtype=dtype) + self.v_proj = operations.Linear(embed_dim, embed_dim, bias=bias, device=device, dtype=dtype) + self.q_proj = operations.Linear(embed_dim, embed_dim, bias=bias, device=device, dtype=dtype) + self.out_proj = operations.Linear(embed_dim, embed_dim, bias=bias, device=device, dtype=dtype) + + def forward(self, x, mask=None): + assert (mask is None) # TODO? + q = self.q_proj(x) + k = self.k_proj(x) + v = self.v_proj(x) + + out = optimized_attention_masked(q, k, v, self.num_heads) + return self.out_proj(out) + + +class FeedForward(nn.Module): + def __init__(self, embed_dim, mlp_ratio, dtype=None, device=None, operations=None): + super().__init__() + self.intermediate_dense = operations.Linear(embed_dim, int(embed_dim * mlp_ratio), device=device, dtype=dtype) + self.output_dense = operations.Linear(int(embed_dim * mlp_ratio), embed_dim, device=device, dtype=dtype) + + def forward(self, x): + x = self.intermediate_dense(x) + x = torch.nn.functional.gelu(x) + x = self.output_dense(x) + return x + + +class TransformerEncoderLayer(nn.Module): + def __init__( + self, + embed_dim=768, + num_heads=12, + mlp_ratio=4.0, + dtype=None, device=None, operations=None + ): + super().__init__() + + self.attention = Attention(embed_dim, num_heads, device=device, dtype=dtype, operations=operations) + + self.layer_norm = operations.LayerNorm(embed_dim, device=device, dtype=dtype) + self.feed_forward = FeedForward(embed_dim, mlp_ratio, device=device, dtype=dtype, operations=operations) + self.final_layer_norm = operations.LayerNorm(embed_dim, device=device, dtype=dtype) + + def forward(self, x, mask=None): + residual = x + x = self.layer_norm(x) + x = self.attention(x, mask=mask) + x = residual + x + + x = x + self.feed_forward(self.final_layer_norm(x)) + return x + + +class Wav2Vec2Model(nn.Module): + """Complete Wav2Vec 2.0 model.""" + + def __init__( + self, + embed_dim=1024, + final_dim=256, + num_heads=16, + num_layers=24, + dtype=None, device=None, operations=None + ): + super().__init__() + + conv_dim = 512 + self.feature_extractor = ConvFeatureEncoder(conv_dim, device=device, dtype=dtype, operations=operations) + self.feature_projection = FeatureProjection(conv_dim, embed_dim, device=device, dtype=dtype, operations=operations) + + self.masked_spec_embed = nn.Parameter(torch.empty(embed_dim, device=device, dtype=dtype)) + + self.encoder = TransformerEncoder( + embed_dim=embed_dim, + num_heads=num_heads, + num_layers=num_layers, + device=device, dtype=dtype, operations=operations + ) + + def forward(self, x, mask_time_indices=None, return_dict=False): + + x = torch.mean(x, dim=1) + + x = (x - x.mean()) / torch.sqrt(x.var() + 1e-7) + + features = self.feature_extractor(x) + features = self.feature_projection(features) + + batch_size, seq_len, _ = features.shape + + x, all_x = self.encoder(features) + + return x, all_x diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index a3a21facc..5cb474459 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -730,6 +730,14 @@ class AnyType(ComfyTypeIO): class MODEL_PATCH(ComfyTypeIO): Type = Any +@comfytype(io_type="AUDIO_ENCODER") +class AUDIO_ENCODER(ComfyTypeIO): + Type = Any + +@comfytype(io_type="AUDIO_ENCODER_OUTPUT") +class AUDIO_ENCODER_OUTPUT(ComfyTypeIO): + Type = Any + @comfytype(io_type="COMFY_MULTITYPED_V3") class MultiType: Type = Any diff --git a/comfy_extras/nodes_audio_encoder.py b/comfy_extras/nodes_audio_encoder.py new file mode 100644 index 000000000..39a140fef --- /dev/null +++ b/comfy_extras/nodes_audio_encoder.py @@ -0,0 +1,44 @@ +import folder_paths +import comfy.audio_encoders.audio_encoders +import comfy.utils + + +class AudioEncoderLoader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "audio_encoder_name": (folder_paths.get_filename_list("audio_encoders"), ), + }} + RETURN_TYPES = ("AUDIO_ENCODER",) + FUNCTION = "load_model" + + CATEGORY = "loaders" + + def load_model(self, audio_encoder_name): + audio_encoder_name = folder_paths.get_full_path_or_raise("audio_encoders", audio_encoder_name) + sd = comfy.utils.load_torch_file(audio_encoder_name, safe_load=True) + audio_encoder = comfy.audio_encoders.audio_encoders.load_audio_encoder_from_sd(sd) + if audio_encoder is None: + raise RuntimeError("ERROR: audio encoder file is invalid and does not contain a valid model.") + return (audio_encoder,) + + +class AudioEncoderEncode: + @classmethod + def INPUT_TYPES(s): + return {"required": { "audio_encoder": ("AUDIO_ENCODER",), + "audio": ("AUDIO",), + }} + RETURN_TYPES = ("AUDIO_ENCODER_OUTPUT",) + FUNCTION = "encode" + + CATEGORY = "conditioning" + + def encode(self, audio_encoder, audio): + output = audio_encoder.encode_audio(audio["waveform"], audio["sample_rate"]) + return (output,) + + +NODE_CLASS_MAPPINGS = { + "AudioEncoderLoader": AudioEncoderLoader, + "AudioEncoderEncode": AudioEncoderEncode, +} diff --git a/nodes.py b/nodes.py index 723ce3384..0aff6b14a 100644 --- a/nodes.py +++ b/nodes.py @@ -2324,6 +2324,7 @@ async def init_builtin_extra_nodes(): "nodes_qwen.py", "nodes_model_patch.py", "nodes_easycache.py", + "nodes_audio_encoder.py", ] import_failed = [] From 39aa06bd5d630e50c88d3be1586d21737c4387c1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 26 Aug 2025 09:50:46 -0700 Subject: [PATCH 0496/1073] Make AudioEncoderOutput usable in v3 node schema. (#9554) --- comfy_api/latest/_io.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 5cb474459..e0ee943a7 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -731,11 +731,11 @@ class MODEL_PATCH(ComfyTypeIO): Type = Any @comfytype(io_type="AUDIO_ENCODER") -class AUDIO_ENCODER(ComfyTypeIO): +class AudioEncoder(ComfyTypeIO): Type = Any @comfytype(io_type="AUDIO_ENCODER_OUTPUT") -class AUDIO_ENCODER_OUTPUT(ComfyTypeIO): +class AudioEncoderOutput(ComfyTypeIO): Type = Any @comfytype(io_type="COMFY_MULTITYPED_V3") @@ -1592,6 +1592,7 @@ class _IO: Model = Model ClipVision = ClipVision ClipVisionOutput = ClipVisionOutput + AudioEncoderOutput = AudioEncoderOutput StyleModel = StyleModel Gligen = Gligen UpscaleModel = UpscaleModel From 5352abc6d389570455776c457738db54367cd6cb Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 27 Aug 2025 01:33:54 +0800 Subject: [PATCH 0497/1073] Update template to 0.1.66 (#9557) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 131484ce8..db59bb38c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.25.10 -comfyui-workflow-templates==0.1.65 +comfyui-workflow-templates==0.1.66 comfyui-embedded-docs==0.2.6 torch torchsde From 47f4db3e84874ca6076e5cdbb345444faec83028 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Tue, 26 Aug 2025 19:20:44 -0700 Subject: [PATCH 0498/1073] Adding Google Gemini Image API node (#9566) * bigcat88's progress on adding Google Gemini Image node * Made Google Gemini Image node functional * Bump frontend version to get static pricing badge on Gemini Image node --- comfy_api_nodes/apis/gemini_api.py | 19 ++ comfy_api_nodes/nodes_gemini.py | 388 ++++++++++++++++++++++------- requirements.txt | 2 +- 3 files changed, 314 insertions(+), 95 deletions(-) create mode 100644 comfy_api_nodes/apis/gemini_api.py diff --git a/comfy_api_nodes/apis/gemini_api.py b/comfy_api_nodes/apis/gemini_api.py new file mode 100644 index 000000000..138bf035d --- /dev/null +++ b/comfy_api_nodes/apis/gemini_api.py @@ -0,0 +1,19 @@ +from __future__ import annotations + +from typing import List, Optional + +from comfy_api_nodes.apis import GeminiGenerationConfig, GeminiContent, GeminiSafetySetting, GeminiSystemInstructionContent, GeminiTool, GeminiVideoMetadata +from pydantic import BaseModel + + +class GeminiImageGenerationConfig(GeminiGenerationConfig): + responseModalities: Optional[List[str]] = None + + +class GeminiImageGenerateContentRequest(BaseModel): + contents: List[GeminiContent] + generationConfig: Optional[GeminiImageGenerationConfig] = None + safetySettings: Optional[List[GeminiSafetySetting]] = None + systemInstruction: Optional[GeminiSystemInstructionContent] = None + tools: Optional[List[GeminiTool]] = None + videoMetadata: Optional[GeminiVideoMetadata] = None diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 78c402a7a..baa379b75 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -4,11 +4,12 @@ See: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/infer """ from __future__ import annotations - import json import time import os import uuid +import base64 +from io import BytesIO from enum import Enum from typing import Optional, Literal @@ -25,6 +26,7 @@ from comfy_api_nodes.apis import ( GeminiPart, GeminiMimeType, ) +from comfy_api_nodes.apis.gemini_api import GeminiImageGenerationConfig, GeminiImageGenerateContentRequest from comfy_api_nodes.apis.client import ( ApiEndpoint, HttpMethod, @@ -35,6 +37,7 @@ from comfy_api_nodes.apinode_utils import ( audio_to_base64_string, video_to_base64_string, tensor_to_base64_string, + bytesio_to_image_tensor, ) @@ -53,6 +56,14 @@ class GeminiModel(str, Enum): gemini_2_5_flash = "gemini-2.5-flash" +class GeminiImageModel(str, Enum): + """ + Gemini Image Model Names allowed by comfy-api + """ + + gemini_2_5_flash_image_preview = "gemini-2.5-flash-image-preview" + + def get_gemini_endpoint( model: GeminiModel, ) -> ApiEndpoint[GeminiGenerateContentRequest, GeminiGenerateContentResponse]: @@ -75,6 +86,135 @@ def get_gemini_endpoint( ) +def get_gemini_image_endpoint( + model: GeminiImageModel, +) -> ApiEndpoint[GeminiGenerateContentRequest, GeminiGenerateContentResponse]: + """ + Get the API endpoint for a given Gemini model. + + Args: + model: The Gemini model to use, either as enum or string value. + + Returns: + ApiEndpoint configured for the specific Gemini model. + """ + if isinstance(model, str): + model = GeminiImageModel(model) + return ApiEndpoint( + path=f"{GEMINI_BASE_ENDPOINT}/{model.value}", + method=HttpMethod.POST, + request_model=GeminiImageGenerateContentRequest, + response_model=GeminiGenerateContentResponse, + ) + + +def create_image_parts(image_input: torch.Tensor) -> list[GeminiPart]: + """ + Convert image tensor input to Gemini API compatible parts. + + Args: + image_input: Batch of image tensors from ComfyUI. + + Returns: + List of GeminiPart objects containing the encoded images. + """ + image_parts: list[GeminiPart] = [] + for image_index in range(image_input.shape[0]): + image_as_b64 = tensor_to_base64_string( + image_input[image_index].unsqueeze(0) + ) + image_parts.append( + GeminiPart( + inlineData=GeminiInlineData( + mimeType=GeminiMimeType.image_png, + data=image_as_b64, + ) + ) + ) + return image_parts + + +def create_text_part(text: str) -> GeminiPart: + """ + Create a text part for the Gemini API request. + + Args: + text: The text content to include in the request. + + Returns: + A GeminiPart object with the text content. + """ + return GeminiPart(text=text) + + +def get_parts_from_response( + response: GeminiGenerateContentResponse +) -> list[GeminiPart]: + """ + Extract all parts from the Gemini API response. + + Args: + response: The API response from Gemini. + + Returns: + List of response parts from the first candidate. + """ + return response.candidates[0].content.parts + + +def get_parts_by_type( + response: GeminiGenerateContentResponse, part_type: Literal["text"] | str +) -> list[GeminiPart]: + """ + Filter response parts by their type. + + Args: + response: The API response from Gemini. + part_type: Type of parts to extract ("text" or a MIME type). + + Returns: + List of response parts matching the requested type. + """ + parts = [] + for part in get_parts_from_response(response): + if part_type == "text" and hasattr(part, "text") and part.text: + parts.append(part) + elif ( + hasattr(part, "inlineData") + and part.inlineData + and part.inlineData.mimeType == part_type + ): + parts.append(part) + # Skip parts that don't match the requested type + return parts + + +def get_text_from_response(response: GeminiGenerateContentResponse) -> str: + """ + Extract and concatenate all text parts from the response. + + Args: + response: The API response from Gemini. + + Returns: + Combined text from all text parts in the response. + """ + parts = get_parts_by_type(response, "text") + return "\n".join([part.text for part in parts]) + + +def get_image_from_response(response: GeminiGenerateContentResponse) -> torch.Tensor: + image_tensors: list[torch.Tensor] = [] + parts = get_parts_by_type(response, "image/png") + for part in parts: + image_data = base64.b64decode(part.inlineData.data) + returned_image = bytesio_to_image_tensor(BytesIO(image_data)) + image_tensors.append(returned_image) + if len(image_tensors) == 0: + return torch.zeros((1,1024,1024,4)) + return torch.cat(image_tensors, dim=0) + + class GeminiNode(ComfyNodeABC): """ Node to generate text responses from a Gemini model. @@ -159,59 +299,6 @@ class GeminiNode(ComfyNodeABC): CATEGORY = "api node/text/Gemini" API_NODE = True - def get_parts_from_response( - self, response: GeminiGenerateContentResponse - ) -> list[GeminiPart]: - """ - Extract all parts from the Gemini API response. - - Args: - response: The API response from Gemini. - - Returns: - List of response parts from the first candidate. - """ - return response.candidates[0].content.parts - - def get_parts_by_type( - self, response: GeminiGenerateContentResponse, part_type: Literal["text"] | str - ) -> list[GeminiPart]: - """ - Filter response parts by their type. - - Args: - response: The API response from Gemini. - part_type: Type of parts to extract ("text" or a MIME type). - - Returns: - List of response parts matching the requested type. - """ - parts = [] - for part in self.get_parts_from_response(response): - if part_type == "text" and hasattr(part, "text") and part.text: - parts.append(part) - elif ( - hasattr(part, "inlineData") - and part.inlineData - and part.inlineData.mimeType == part_type - ): - parts.append(part) - # Skip parts that don't match the requested type - return parts - - def get_text_from_response(self, response: GeminiGenerateContentResponse) -> str: - """ - Extract and concatenate all text parts from the response. - - Args: - response: The API response from Gemini. - - Returns: - Combined text from all text parts in the response. - """ - parts = self.get_parts_by_type(response, "text") - return "\n".join([part.text for part in parts]) - def create_video_parts(self, video_input: IO.VIDEO, **kwargs) -> list[GeminiPart]: """ Convert video input to Gemini API compatible parts. @@ -271,43 +358,6 @@ class GeminiNode(ComfyNodeABC): ) return audio_parts - def create_image_parts(self, image_input: torch.Tensor) -> list[GeminiPart]: - """ - Convert image tensor input to Gemini API compatible parts. - - Args: - image_input: Batch of image tensors from ComfyUI. - - Returns: - List of GeminiPart objects containing the encoded images. - """ - image_parts: list[GeminiPart] = [] - for image_index in range(image_input.shape[0]): - image_as_b64 = tensor_to_base64_string( - image_input[image_index].unsqueeze(0) - ) - image_parts.append( - GeminiPart( - inlineData=GeminiInlineData( - mimeType=GeminiMimeType.image_png, - data=image_as_b64, - ) - ) - ) - return image_parts - - def create_text_part(self, text: str) -> GeminiPart: - """ - Create a text part for the Gemini API request. - - Args: - text: The text content to include in the request. - - Returns: - A GeminiPart object with the text content. - """ - return GeminiPart(text=text) - async def api_call( self, prompt: str, @@ -323,11 +373,11 @@ class GeminiNode(ComfyNodeABC): validate_string(prompt, strip_whitespace=False) # Create parts list with text prompt as the first part - parts: list[GeminiPart] = [self.create_text_part(prompt)] + parts: list[GeminiPart] = [create_text_part(prompt)] # Add other modal parts if images is not None: - image_parts = self.create_image_parts(images) + image_parts = create_image_parts(images) parts.extend(image_parts) if audio is not None: parts.extend(self.create_audio_parts(audio)) @@ -351,7 +401,7 @@ class GeminiNode(ComfyNodeABC): ).execute() # Get result output - output_text = self.get_text_from_response(response) + output_text = get_text_from_response(response) if unique_id and output_text: # Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button. render_spec = { @@ -462,12 +512,162 @@ class GeminiInputFiles(ComfyNodeABC): return (files,) +class GeminiImage(ComfyNodeABC): + """ + Node to generate text and image responses from a Gemini model. + + This node allows users to interact with Google's Gemini AI models, providing + multimodal inputs (text, images, files) to generate coherent + text and image responses. The node works with the latest Gemini models, handling the + API communication and response parsing. + """ + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "prompt": ( + IO.STRING, + { + "multiline": True, + "default": "", + "tooltip": "Text prompt for generation", + }, + ), + "model": ( + IO.COMBO, + { + "tooltip": "The Gemini model to use for generating responses.", + "options": [model.value for model in GeminiImageModel], + "default": GeminiImageModel.gemini_2_5_flash_image_preview.value, + }, + ), + "seed": ( + IO.INT, + { + "default": 42, + "min": 0, + "max": 0xFFFFFFFFFFFFFFFF, + "control_after_generate": True, + "tooltip": "When seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used.", + }, + ), + }, + "optional": { + "images": ( + IO.IMAGE, + { + "default": None, + "tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node.", + }, + ), + "files": ( + "GEMINI_INPUT_FILES", + { + "default": None, + "tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node.", + }, + ), + # TODO: later we can add this parameter later + # "n": ( + # IO.INT, + # { + # "default": 1, + # "min": 1, + # "max": 8, + # "step": 1, + # "display": "number", + # "tooltip": "How many images to generate", + # }, + # ), + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + "unique_id": "UNIQUE_ID", + }, + } + + RETURN_TYPES = (IO.IMAGE, IO.STRING) + FUNCTION = "api_call" + CATEGORY = "api node/image/Gemini" + DESCRIPTION = "Edit images synchronously via Google API." + API_NODE = True + + async def api_call( + self, + prompt: str, + model: GeminiImageModel, + images: Optional[IO.IMAGE] = None, + files: Optional[list[GeminiPart]] = None, + n=1, + unique_id: Optional[str] = None, + **kwargs, + ): + # Validate inputs + validate_string(prompt, strip_whitespace=True, min_length=1) + # Create parts list with text prompt as the first part + parts: list[GeminiPart] = [create_text_part(prompt)] + + # Add other modal parts + if images is not None: + image_parts = create_image_parts(images) + parts.extend(image_parts) + if files is not None: + parts.extend(files) + + response = await SynchronousOperation( + endpoint=get_gemini_image_endpoint(model), + request=GeminiImageGenerateContentRequest( + contents=[ + GeminiContent( + role="user", + parts=parts, + ), + ], + generationConfig=GeminiImageGenerationConfig( + responseModalities=["TEXT","IMAGE"] + ) + ), + auth_kwargs=kwargs, + ).execute() + + output_image = get_image_from_response(response) + output_text = get_text_from_response(response) + if unique_id and output_text: + # Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button. + render_spec = { + "node_id": unique_id, + "component": "ChatHistoryWidget", + "props": { + "history": json.dumps( + [ + { + "prompt": prompt, + "response": output_text, + "response_id": str(uuid.uuid4()), + "timestamp": time.time(), + } + ] + ), + }, + } + PromptServer.instance.send_sync( + "display_component", + render_spec, + ) + + output_text = output_text or "Empty response from Gemini model..." + return (output_image, output_text,) + + NODE_CLASS_MAPPINGS = { "GeminiNode": GeminiNode, + "GeminiImageNode": GeminiImage, "GeminiInputFiles": GeminiInputFiles, } NODE_DISPLAY_NAME_MAPPINGS = { "GeminiNode": "Google Gemini", + "GeminiImageNode": "Google Gemini Image", "GeminiInputFiles": "Gemini Input Files", } diff --git a/requirements.txt b/requirements.txt index db59bb38c..174f3d4d1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.25.10 +comfyui-frontend-package==1.25.11 comfyui-workflow-templates==0.1.66 comfyui-embedded-docs==0.2.6 torch From 6a193ac557b2b35a6d2ea1916b0b8d5d9ee9b1ba Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 27 Aug 2025 12:10:20 +0800 Subject: [PATCH 0499/1073] Update template to 0.1.68 (#9569) * Update template to 0.1.67 * Update template to 0.1.68 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 174f3d4d1..93d88859d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.25.11 -comfyui-workflow-templates==0.1.66 +comfyui-workflow-templates==0.1.68 comfyui-embedded-docs==0.2.6 torch torchsde From 88aee596a30e9b80ca831c42a0ae70e0d22b61ae Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 26 Aug 2025 22:10:34 -0700 Subject: [PATCH 0500/1073] WIP Wan 2.2 S2V model. (#9568) --- comfy/ldm/wan/model.py | 508 ++++++++++++++++++++++++++++++++++++-- comfy/model_base.py | 23 ++ comfy/model_detection.py | 2 + comfy/supported_models.py | 15 +- comfy_extras/nodes_wan.py | 175 +++++++++++++ 5 files changed, 707 insertions(+), 16 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 1885d9730..dedfb47e2 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -4,7 +4,7 @@ import math import torch import torch.nn as nn -from einops import repeat +from einops import rearrange from comfy.ldm.modules.attention import optimized_attention from comfy.ldm.flux.layers import EmbedND @@ -153,7 +153,10 @@ def repeat_e(e, x): repeats = x.size(1) // e.size(1) if repeats == 1: return e - return torch.repeat_interleave(e, repeats, dim=1) + if repeats * e.size(1) == x.size(1): + return torch.repeat_interleave(e, repeats, dim=1) + else: + return torch.repeat_interleave(e, repeats + 1, dim=1)[:, :x.size(1)] class WanAttentionBlock(nn.Module): @@ -573,6 +576,28 @@ class WanModel(torch.nn.Module): x = self.unpatchify(x, grid_sizes) return x + def rope_encode(self, t, h, w, t_start=0, steps_t=None, steps_h=None, steps_w=None, device=None, dtype=None): + patch_size = self.patch_size + t_len = ((t + (patch_size[0] // 2)) // patch_size[0]) + h_len = ((h + (patch_size[1] // 2)) // patch_size[1]) + w_len = ((w + (patch_size[2] // 2)) // patch_size[2]) + + if steps_t is None: + steps_t = t_len + if steps_h is None: + steps_h = h_len + if steps_w is None: + steps_w = w_len + + img_ids = torch.zeros((steps_t, steps_h, steps_w, 3), device=device, dtype=dtype) + img_ids[:, :, :, 0] = img_ids[:, :, :, 0] + torch.linspace(t_start, t_start + (t_len - 1), steps=steps_t, device=device, dtype=dtype).reshape(-1, 1, 1) + img_ids[:, :, :, 1] = img_ids[:, :, :, 1] + torch.linspace(0, h_len - 1, steps=steps_h, device=device, dtype=dtype).reshape(1, -1, 1) + img_ids[:, :, :, 2] = img_ids[:, :, :, 2] + torch.linspace(0, w_len - 1, steps=steps_w, device=device, dtype=dtype).reshape(1, 1, -1) + img_ids = img_ids.reshape(1, -1, img_ids.shape[-1]) + + freqs = self.rope_embedder(img_ids).movedim(1, 2) + return freqs + def forward(self, x, timestep, context, clip_fea=None, time_dim_concat=None, transformer_options={}, **kwargs): return comfy.patcher_extension.WrapperExecutor.new_class_executor( self._forward, @@ -584,26 +609,16 @@ class WanModel(torch.nn.Module): bs, c, t, h, w = x.shape x = comfy.ldm.common_dit.pad_to_patch_size(x, self.patch_size) - patch_size = self.patch_size - t_len = ((t + (patch_size[0] // 2)) // patch_size[0]) - h_len = ((h + (patch_size[1] // 2)) // patch_size[1]) - w_len = ((w + (patch_size[2] // 2)) // patch_size[2]) - + t_len = t if time_dim_concat is not None: time_dim_concat = comfy.ldm.common_dit.pad_to_patch_size(time_dim_concat, self.patch_size) x = torch.cat([x, time_dim_concat], dim=2) - t_len = ((x.shape[2] + (patch_size[0] // 2)) // patch_size[0]) + t_len = x.shape[2] if self.ref_conv is not None and "reference_latent" in kwargs: t_len += 1 - img_ids = torch.zeros((t_len, h_len, w_len, 3), device=x.device, dtype=x.dtype) - img_ids[:, :, :, 0] = img_ids[:, :, :, 0] + torch.linspace(0, t_len - 1, steps=t_len, device=x.device, dtype=x.dtype).reshape(-1, 1, 1) - img_ids[:, :, :, 1] = img_ids[:, :, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).reshape(1, -1, 1) - img_ids[:, :, :, 2] = img_ids[:, :, :, 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).reshape(1, 1, -1) - img_ids = repeat(img_ids, "t h w c -> b (t h w) c", b=bs) - - freqs = self.rope_embedder(img_ids).movedim(1, 2) + freqs = self.rope_encode(t_len, h, w, device=x.device, dtype=x.dtype) return self.forward_orig(x, timestep, context, clip_fea=clip_fea, freqs=freqs, transformer_options=transformer_options, **kwargs)[:, :, :t, :h, :w] def unpatchify(self, x, grid_sizes): @@ -839,3 +854,466 @@ class CameraWanModel(WanModel): # unpatchify x = self.unpatchify(x, grid_sizes) return x + + +class CausalConv1d(nn.Module): + + def __init__(self, + chan_in, + chan_out, + kernel_size=3, + stride=1, + dilation=1, + pad_mode='replicate', + operations=None, + **kwargs): + super().__init__() + + self.pad_mode = pad_mode + padding = (kernel_size - 1, 0) # T + self.time_causal_padding = padding + + self.conv = operations.Conv1d( + chan_in, + chan_out, + kernel_size, + stride=stride, + dilation=dilation, + **kwargs) + + def forward(self, x): + x = torch.nn.functional.pad(x, self.time_causal_padding, mode=self.pad_mode) + return self.conv(x) + + +class MotionEncoder_tc(nn.Module): + + def __init__(self, + in_dim: int, + hidden_dim: int, + num_heads=int, + need_global=True, + dtype=None, + device=None, + operations=None,): + factory_kwargs = {"dtype": dtype, "device": device} + super().__init__() + + self.num_heads = num_heads + self.need_global = need_global + self.conv1_local = CausalConv1d(in_dim, hidden_dim // 4 * num_heads, 3, stride=1, operations=operations, **factory_kwargs) + if need_global: + self.conv1_global = CausalConv1d( + in_dim, hidden_dim // 4, 3, stride=1, operations=operations, **factory_kwargs) + self.norm1 = operations.LayerNorm( + hidden_dim // 4, + elementwise_affine=False, + eps=1e-6, + **factory_kwargs) + self.act = nn.SiLU() + self.conv2 = CausalConv1d(hidden_dim // 4, hidden_dim // 2, 3, stride=2, operations=operations, **factory_kwargs) + self.conv3 = CausalConv1d(hidden_dim // 2, hidden_dim, 3, stride=2, operations=operations, **factory_kwargs) + + if need_global: + self.final_linear = operations.Linear(hidden_dim, hidden_dim, **factory_kwargs) + + self.norm1 = operations.LayerNorm( + hidden_dim // 4, + elementwise_affine=False, + eps=1e-6, + **factory_kwargs) + + self.norm2 = operations.LayerNorm( + hidden_dim // 2, + elementwise_affine=False, + eps=1e-6, + **factory_kwargs) + + self.norm3 = operations.LayerNorm( + hidden_dim, elementwise_affine=False, eps=1e-6, **factory_kwargs) + + self.padding_tokens = nn.Parameter(torch.empty(1, 1, 1, hidden_dim, **factory_kwargs)) + + def forward(self, x): + x = rearrange(x, 'b t c -> b c t') + x_ori = x.clone() + b, c, t = x.shape + x = self.conv1_local(x) + x = rearrange(x, 'b (n c) t -> (b n) t c', n=self.num_heads) + x = self.norm1(x) + x = self.act(x) + x = rearrange(x, 'b t c -> b c t') + x = self.conv2(x) + x = rearrange(x, 'b c t -> b t c') + x = self.norm2(x) + x = self.act(x) + x = rearrange(x, 'b t c -> b c t') + x = self.conv3(x) + x = rearrange(x, 'b c t -> b t c') + x = self.norm3(x) + x = self.act(x) + x = rearrange(x, '(b n) t c -> b t n c', b=b) + padding = comfy.model_management.cast_to(self.padding_tokens, dtype=x.dtype, device=x.device).repeat(b, x.shape[1], 1, 1) + x = torch.cat([x, padding], dim=-2) + x_local = x.clone() + + if not self.need_global: + return x_local + + x = self.conv1_global(x_ori) + x = rearrange(x, 'b c t -> b t c') + x = self.norm1(x) + x = self.act(x) + x = rearrange(x, 'b t c -> b c t') + x = self.conv2(x) + x = rearrange(x, 'b c t -> b t c') + x = self.norm2(x) + x = self.act(x) + x = rearrange(x, 'b t c -> b c t') + x = self.conv3(x) + x = rearrange(x, 'b c t -> b t c') + x = self.norm3(x) + x = self.act(x) + x = self.final_linear(x) + x = rearrange(x, '(b n) t c -> b t n c', b=b) + + return x, x_local + + +class CausalAudioEncoder(nn.Module): + + def __init__(self, + dim=5120, + num_layers=25, + out_dim=2048, + video_rate=8, + num_token=4, + need_global=False, + dtype=None, + device=None, + operations=None): + super().__init__() + self.encoder = MotionEncoder_tc( + in_dim=dim, + hidden_dim=out_dim, + num_heads=num_token, + need_global=need_global, dtype=dtype, device=device, operations=operations) + weight = torch.empty((1, num_layers, 1, 1), dtype=dtype, device=device) + + self.weights = torch.nn.Parameter(weight) + self.act = torch.nn.SiLU() + + def forward(self, features): + # features B * num_layers * dim * video_length + weights = self.act(comfy.model_management.cast_to(self.weights, dtype=features.dtype, device=features.device)) + weights_sum = weights.sum(dim=1, keepdims=True) + weighted_feat = ((features * weights) / weights_sum).sum( + dim=1) # b dim f + weighted_feat = weighted_feat.permute(0, 2, 1) # b f dim + res = self.encoder(weighted_feat) # b f n dim + return res # b f n dim + + +class AdaLayerNorm(nn.Module): + def __init__(self, embedding_dim, output_dim=None, norm_elementwise_affine=False, norm_eps=1e-5, dtype=None, device=None, operations=None): + super().__init__() + + output_dim = output_dim or embedding_dim * 2 + + self.silu = nn.SiLU() + self.linear = operations.Linear(embedding_dim, output_dim, dtype=dtype, device=device) + self.norm = operations.LayerNorm(output_dim // 2, norm_eps, norm_elementwise_affine, dtype=dtype, device=device) + + def forward(self, x, temb): + temb = self.linear(self.silu(temb)) + shift, scale = temb.chunk(2, dim=1) + shift = shift[:, None, :] + scale = scale[:, None, :] + x = self.norm(x) * (1 + scale) + shift + return x + + +class AudioInjector_WAN(nn.Module): + + def __init__(self, + dim=2048, + num_heads=32, + inject_layer=[0, 27], + root_net=None, + enable_adain=False, + adain_dim=2048, + adain_mode=None, + dtype=None, + device=None, + operations=None): + super().__init__() + self.enable_adain = enable_adain + self.adain_mode = adain_mode + self.injected_block_id = {} + audio_injector_id = 0 + for inject_id in inject_layer: + self.injected_block_id[inject_id] = audio_injector_id + audio_injector_id += 1 + + self.injector = nn.ModuleList([ + WanT2VCrossAttention( + dim=dim, + num_heads=num_heads, + qk_norm=True, operation_settings={"operations": operations, "device": device, "dtype": dtype} + ) for _ in range(audio_injector_id) + ]) + self.injector_pre_norm_feat = nn.ModuleList([ + operations.LayerNorm( + dim, + elementwise_affine=False, + eps=1e-6, dtype=dtype, device=device + ) for _ in range(audio_injector_id) + ]) + self.injector_pre_norm_vec = nn.ModuleList([ + operations.LayerNorm( + dim, + elementwise_affine=False, + eps=1e-6, dtype=dtype, device=device + ) for _ in range(audio_injector_id) + ]) + if enable_adain: + self.injector_adain_layers = nn.ModuleList([ + AdaLayerNorm( + output_dim=dim * 2, embedding_dim=adain_dim, dtype=dtype, device=device, operations=operations) + for _ in range(audio_injector_id) + ]) + if adain_mode != "attn_norm": + self.injector_adain_output_layers = nn.ModuleList( + [operations.Linear(dim, dim, dtype=dtype, device=device) for _ in range(audio_injector_id)]) + + def forward(self, x, block_id, audio_emb, audio_emb_global, seq_len): + audio_attn_id = self.injected_block_id.get(block_id, None) + if audio_attn_id is None: + return x + + num_frames = audio_emb.shape[1] + input_hidden_states = rearrange(x[:, :seq_len], "b (t n) c -> (b t) n c", t=num_frames) + if self.enable_adain and self.adain_mode == "attn_norm": + audio_emb_global = rearrange(audio_emb_global, "b t n c -> (b t) n c") + adain_hidden_states = self.injector_adain_layers[audio_attn_id](input_hidden_states, temb=audio_emb_global[:, 0]) + attn_hidden_states = adain_hidden_states + else: + attn_hidden_states = self.injector_pre_norm_feat[audio_attn_id](input_hidden_states) + audio_emb = rearrange(audio_emb, "b t n c -> (b t) n c", t=num_frames) + attn_audio_emb = audio_emb + residual_out = self.injector[audio_attn_id](x=attn_hidden_states, context=attn_audio_emb) + residual_out = rearrange( + residual_out, "(b t) n c -> b (t n) c", t=num_frames) + x[:, :seq_len] = x[:, :seq_len] + residual_out + return x + + +class FramePackMotioner(nn.Module): + def __init__( + self, + inner_dim=1024, + num_heads=16, # Used to indicate the number of heads in the backbone network; unrelated to this module's design + zip_frame_buckets=[ + 1, 2, 16 + ], # Three numbers representing the number of frames sampled for patch operations from the nearest to the farthest frames + drop_mode="drop", # If not "drop", it will use "padd", meaning padding instead of deletion + dtype=None, + device=None, + operations=None): + super().__init__() + self.proj = operations.Conv3d(16, inner_dim, kernel_size=(1, 2, 2), stride=(1, 2, 2), dtype=dtype, device=device) + self.proj_2x = operations.Conv3d(16, inner_dim, kernel_size=(2, 4, 4), stride=(2, 4, 4), dtype=dtype, device=device) + self.proj_4x = operations.Conv3d(16, inner_dim, kernel_size=(4, 8, 8), stride=(4, 8, 8), dtype=dtype, device=device) + self.zip_frame_buckets = zip_frame_buckets + + self.inner_dim = inner_dim + self.num_heads = num_heads + + self.drop_mode = drop_mode + + def forward(self, motion_latents, rope_embedder, add_last_motion=2): + lat_height, lat_width = motion_latents.shape[3], motion_latents.shape[4] + padd_lat = torch.zeros(motion_latents.shape[0], 16, sum(self.zip_frame_buckets), lat_height, lat_width).to(device=motion_latents.device, dtype=motion_latents.dtype) + overlap_frame = min(padd_lat.shape[2], motion_latents.shape[2]) + if overlap_frame > 0: + padd_lat[:, :, -overlap_frame:] = motion_latents[:, :, -overlap_frame:] + + if add_last_motion < 2 and self.drop_mode != "drop": + zero_end_frame = sum(self.zip_frame_buckets[:len(self.zip_frame_buckets) - add_last_motion - 1]) + padd_lat[:, :, -zero_end_frame:] = 0 + + clean_latents_4x, clean_latents_2x, clean_latents_post = padd_lat[:, :, -sum(self.zip_frame_buckets):, :, :].split(self.zip_frame_buckets[::-1], dim=2) # 16, 2 ,1 + + # patchfy + clean_latents_post = self.proj(clean_latents_post).flatten(2).transpose(1, 2) + clean_latents_2x = self.proj_2x(clean_latents_2x) + l_2x_shape = clean_latents_2x.shape + clean_latents_2x = clean_latents_2x.flatten(2).transpose(1, 2) + clean_latents_4x = self.proj_4x(clean_latents_4x) + l_4x_shape = clean_latents_4x.shape + clean_latents_4x = clean_latents_4x.flatten(2).transpose(1, 2) + + if add_last_motion < 2 and self.drop_mode == "drop": + clean_latents_post = clean_latents_post[:, : + 0] if add_last_motion < 2 else clean_latents_post + clean_latents_2x = clean_latents_2x[:, : + 0] if add_last_motion < 1 else clean_latents_2x + + motion_lat = torch.cat([clean_latents_post, clean_latents_2x, clean_latents_4x], dim=1) + + rope_post = rope_embedder.rope_encode(1, lat_height, lat_width, t_start=-1, device=motion_latents.device, dtype=motion_latents.dtype) + rope_2x = rope_embedder.rope_encode(1, lat_height, lat_width, t_start=-3, steps_h=l_2x_shape[-2], steps_w=l_2x_shape[-1], device=motion_latents.device, dtype=motion_latents.dtype) + rope_4x = rope_embedder.rope_encode(4, lat_height, lat_width, t_start=-19, steps_h=l_4x_shape[-2], steps_w=l_4x_shape[-1], device=motion_latents.device, dtype=motion_latents.dtype) + + rope = torch.cat([rope_post, rope_2x, rope_4x], dim=1) + return motion_lat, rope + + +class WanModel_S2V(WanModel): + def __init__(self, + model_type='s2v', + patch_size=(1, 2, 2), + text_len=512, + in_dim=16, + dim=2048, + ffn_dim=8192, + freq_dim=256, + text_dim=4096, + out_dim=16, + num_heads=16, + num_layers=32, + window_size=(-1, -1), + qk_norm=True, + cross_attn_norm=True, + eps=1e-6, + audio_dim=1024, + num_audio_token=4, + enable_adain=True, + cond_dim=16, + audio_inject_layers=[0, 4, 8, 12, 16, 20, 24, 27, 30, 33, 36, 39], + adain_mode="attn_norm", + framepack_drop_mode="padd", + image_model=None, + device=None, + dtype=None, + operations=None, + ): + + super().__init__(model_type='t2v', patch_size=patch_size, text_len=text_len, in_dim=in_dim, dim=dim, ffn_dim=ffn_dim, freq_dim=freq_dim, text_dim=text_dim, out_dim=out_dim, num_heads=num_heads, num_layers=num_layers, window_size=window_size, qk_norm=qk_norm, cross_attn_norm=cross_attn_norm, eps=eps, image_model=image_model, device=device, dtype=dtype, operations=operations) + + self.trainable_cond_mask = operations.Embedding(3, self.dim, device=device, dtype=dtype) + + self.casual_audio_encoder = CausalAudioEncoder( + dim=audio_dim, + out_dim=self.dim, + num_token=num_audio_token, + need_global=enable_adain, dtype=dtype, device=device, operations=operations) + + if cond_dim > 0: + self.cond_encoder = operations.Conv3d( + cond_dim, + self.dim, + kernel_size=self.patch_size, + stride=self.patch_size, device=device, dtype=dtype) + + self.audio_injector = AudioInjector_WAN( + dim=self.dim, + num_heads=self.num_heads, + inject_layer=audio_inject_layers, + root_net=self, + enable_adain=enable_adain, + adain_dim=self.dim, + adain_mode=adain_mode, + dtype=dtype, device=device, operations=operations + ) + + self.frame_packer = FramePackMotioner( + inner_dim=self.dim, + num_heads=self.num_heads, + zip_frame_buckets=[1, 2, 16], + drop_mode=framepack_drop_mode, + dtype=dtype, device=device, operations=operations) + + def forward_orig( + self, + x, + t, + context, + audio_embed=None, + reference_latent=None, + control_video=None, + reference_motion=None, + clip_fea=None, + freqs=None, + transformer_options={}, + **kwargs, + ): + if audio_embed is not None: + num_embeds = x.shape[-3] * 4 + audio_emb_global, audio_emb = self.casual_audio_encoder(audio_embed[:, :, :, :num_embeds]) + else: + audio_emb = None + + # embeddings + x = self.patch_embedding(x.float()).to(x.dtype) + if control_video is not None: + x = x + self.cond_encoder(control_video) + + if t.ndim == 1: + t = t.unsqueeze(1).repeat(1, x.shape[2]) + + grid_sizes = x.shape[2:] + x = x.flatten(2).transpose(1, 2) + seq_len = x.size(1) + + cond_mask_weight = comfy.model_management.cast_to(self.trainable_cond_mask.weight, dtype=x.dtype, device=x.device).unsqueeze(1).unsqueeze(1) + x = x + cond_mask_weight[0] + + if reference_latent is not None: + ref = self.patch_embedding(reference_latent.float()).to(x.dtype) + ref = ref.flatten(2).transpose(1, 2) + freqs_ref = self.rope_encode(reference_latent.shape[-3], reference_latent.shape[-2], reference_latent.shape[-1], t_start=30, device=x.device, dtype=x.dtype) + ref = ref + cond_mask_weight[1] + x = torch.cat([x, ref], dim=1) + freqs = torch.cat([freqs, freqs_ref], dim=1) + t = torch.cat([t, torch.zeros((t.shape[0], reference_latent.shape[-3]), device=t.device, dtype=t.dtype)], dim=1) + + if reference_motion is not None: + motion_encoded, freqs_motion = self.frame_packer(reference_motion, self) + motion_encoded = motion_encoded + cond_mask_weight[2] + x = torch.cat([x, motion_encoded], dim=1) + freqs = torch.cat([freqs, freqs_motion], dim=1) + + t = torch.repeat_interleave(t, 2, dim=1) + t = torch.cat([t, torch.zeros((t.shape[0], 3), device=t.device, dtype=t.dtype)], dim=1) + + # time embeddings + e = self.time_embedding( + sinusoidal_embedding_1d(self.freq_dim, t.flatten()).to(dtype=x[0].dtype)) + e = e.reshape(t.shape[0], -1, e.shape[-1]) + e0 = self.time_projection(e).unflatten(2, (6, self.dim)) + + # context + context = self.text_embedding(context) + + + patches_replace = transformer_options.get("patches_replace", {}) + blocks_replace = patches_replace.get("dit", {}) + for i, block in enumerate(self.blocks): + if ("double_block", i) in blocks_replace: + def block_wrap(args): + out = {} + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"]) + return out + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs}, {"original_block": block_wrap}) + x = out["img"] + else: + x = block(x, e=e0, freqs=freqs, context=context) + if audio_emb is not None: + x = self.audio_injector(x, i, audio_emb, audio_emb_global, seq_len) + # head + x = self.head(x, e) + + # unpatchify + x = self.unpatchify(x, grid_sizes) + return x diff --git a/comfy/model_base.py b/comfy/model_base.py index 6c861b15e..18d55c1c4 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1201,6 +1201,29 @@ class WAN21_Camera(WAN21): out['camera_conditions'] = comfy.conds.CONDRegular(camera_conditions) return out +class WAN22_S2V(WAN21): + def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + super(WAN21, self).__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model.WanModel_S2V) + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + audio_embed = kwargs.get("audio_embed", None) + if audio_embed is not None: + out['audio_embed'] = comfy.conds.CONDRegular(audio_embed) + + reference_latents = kwargs.get("reference_latents", None) + if reference_latents is not None: + out['reference_latent'] = comfy.conds.CONDRegular(self.process_latent_in(reference_latents[-1])) + + reference_motion = kwargs.get("reference_motion", None) + if reference_motion is not None: + out['reference_motion'] = comfy.conds.CONDRegular(self.process_latent_in(reference_motion)) + + control_video = kwargs.get("control_video", None) + if control_video is not None: + out['control_video'] = comfy.conds.CONDRegular(self.process_latent_in(control_video)) + return out + class WAN22(BaseModel): def __init__(self, model_config, model_type=ModelType.FLOW, image_to_video=False, device=None): super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model.WanModel) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 0caff53e0..9f3ab64df 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -368,6 +368,8 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["model_type"] = "camera" else: dit_config["model_type"] = "camera_2.2" + elif '{}casual_audio_encoder.encoder.final_linear.weight'.format(key_prefix) in state_dict_keys: + dit_config["model_type"] = "s2v" else: if '{}img_emb.proj.0.bias'.format(key_prefix) in state_dict_keys: dit_config["model_type"] = "i2v" diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 7ed6dfd69..ce571e6cb 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1072,6 +1072,19 @@ class WAN21_Vace(WAN21_T2V): out = model_base.WAN21_Vace(self, image_to_video=False, device=device) return out +class WAN22_S2V(WAN21_T2V): + unet_config = { + "image_model": "wan2.1", + "model_type": "s2v", + } + + def __init__(self, unet_config): + super().__init__(unet_config) + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.WAN22_S2V(self, device=device) + return out + class WAN22_T2V(WAN21_T2V): unet_config = { "image_model": "wan2.1", @@ -1272,6 +1285,6 @@ class QwenImage(supported_models_base.BASE): return supported_models_base.ClipTarget(comfy.text_encoders.qwen_image.QwenImageTokenizer, comfy.text_encoders.qwen_image.te(**hunyuan_detect)) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep, Omnigen2, QwenImage] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep, Omnigen2, QwenImage] models += [SVD_img2vid] diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 0fff02f76..89ff74d85 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -786,6 +786,180 @@ class WanTrackToVideo(io.ComfyNode): return io.NodeOutput(positive, negative, out_latent) +def linear_interpolation(features, input_fps, output_fps, output_len=None): + """ + features: shape=[1, T, 512] + input_fps: fps for audio, f_a + output_fps: fps for video, f_m + output_len: video length + """ + features = features.transpose(1, 2) # [1, 512, T] + seq_len = features.shape[2] / float(input_fps) # T/f_a + if output_len is None: + output_len = int(seq_len * output_fps) # f_m*T/f_a + output_features = torch.nn.functional.interpolate( + features, size=output_len, align_corners=True, + mode='linear') # [1, 512, output_len] + return output_features.transpose(1, 2) # [1, output_len, 512] + + +def get_sample_indices(original_fps, + total_frames, + target_fps, + num_sample, + fixed_start=None): + required_duration = num_sample / target_fps + required_origin_frames = int(np.ceil(required_duration * original_fps)) + if required_duration > total_frames / original_fps: + raise ValueError("required_duration must be less than video length") + + if not fixed_start is None and fixed_start >= 0: + start_frame = fixed_start + else: + max_start = total_frames - required_origin_frames + if max_start < 0: + raise ValueError("video length is too short") + start_frame = np.random.randint(0, max_start + 1) + start_time = start_frame / original_fps + + end_time = start_time + required_duration + time_points = np.linspace(start_time, end_time, num_sample, endpoint=False) + + frame_indices = np.round(np.array(time_points) * original_fps).astype(int) + frame_indices = np.clip(frame_indices, 0, total_frames - 1) + return frame_indices + + +def get_audio_embed_bucket_fps(audio_embed, fps=16, batch_frames=81, m=0, video_rate=30): + num_layers, audio_frame_num, audio_dim = audio_embed.shape + + if num_layers > 1: + return_all_layers = True + else: + return_all_layers = False + + scale = video_rate / fps + + min_batch_num = int(audio_frame_num / (batch_frames * scale)) + 1 + + bucket_num = min_batch_num * batch_frames + padd_audio_num = math.ceil(min_batch_num * batch_frames / fps * video_rate) - audio_frame_num + batch_idx = get_sample_indices( + original_fps=video_rate, + total_frames=audio_frame_num + padd_audio_num, + target_fps=fps, + num_sample=bucket_num, + fixed_start=0) + batch_audio_eb = [] + audio_sample_stride = int(video_rate / fps) + for bi in batch_idx: + if bi < audio_frame_num: + + chosen_idx = list( + range(bi - m * audio_sample_stride, bi + (m + 1) * audio_sample_stride, audio_sample_stride)) + chosen_idx = [0 if c < 0 else c for c in chosen_idx] + chosen_idx = [ + audio_frame_num - 1 if c >= audio_frame_num else c + for c in chosen_idx + ] + + if return_all_layers: + frame_audio_embed = audio_embed[:, chosen_idx].flatten( + start_dim=-2, end_dim=-1) + else: + frame_audio_embed = audio_embed[0][chosen_idx].flatten() + else: + frame_audio_embed = torch.zeros([audio_dim * (2 * m + 1)], device=audio_embed.device) if not return_all_layers \ + else torch.zeros([num_layers, audio_dim * (2 * m + 1)], device=audio_embed.device) + batch_audio_eb.append(frame_audio_embed) + batch_audio_eb = torch.cat([c.unsqueeze(0) for c in batch_audio_eb], dim=0) + + return batch_audio_eb, min_batch_num + + +class WanSoundImageToVideo(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="WanSoundImageToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=77, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.AudioEncoderOutput.Input("audio_encoder_output", optional=True), + io.Image.Input("ref_image", optional=True), + io.Image.Input("control_video", optional=True), + io.Image.Input("ref_motion", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + is_experimental=True, + ) + + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, ref_image=None, audio_encoder_output=None, control_video=None, ref_motion=None) -> io.NodeOutput: + latent_t = ((length - 1) // 4) + 1 + if audio_encoder_output is not None: + feat = torch.cat(audio_encoder_output["encoded_audio_all_layers"]) + video_rate = 30 + fps = 16 + feat = linear_interpolation(feat, input_fps=50, output_fps=video_rate) + audio_embed_bucket, num_repeat = get_audio_embed_bucket_fps(feat, fps=fps, batch_frames=latent_t * 4, m=0, video_rate=video_rate) + audio_embed_bucket = audio_embed_bucket.unsqueeze(0) + if len(audio_embed_bucket.shape) == 3: + audio_embed_bucket = audio_embed_bucket.permute(0, 2, 1) + elif len(audio_embed_bucket.shape) == 4: + audio_embed_bucket = audio_embed_bucket.permute(0, 2, 3, 1) + + positive = node_helpers.conditioning_set_values(positive, {"audio_embed": audio_embed_bucket}) + negative = node_helpers.conditioning_set_values(negative, {"audio_embed": audio_embed_bucket}) + + if ref_image is not None: + ref_image = comfy.utils.common_upscale(ref_image[:1].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + ref_latent = vae.encode(ref_image[:, :, :, :3]) + positive = node_helpers.conditioning_set_values(positive, {"reference_latents": [ref_latent]}, append=True) + negative = node_helpers.conditioning_set_values(negative, {"reference_latents": [ref_latent]}, append=True) + + if ref_motion is not None: + if ref_motion.shape[0] > 73: + ref_motion = ref_motion[-73:] + + ref_motion = comfy.utils.common_upscale(ref_motion.movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + + if ref_motion.shape[0] < 73: + r = torch.ones([73, height, width, 3]) * 0.5 + r[-ref_motion.shape[0]:] = ref_motion + ref_motion = r + + ref_motion = vae.encode(ref_motion[:, :, :, :3]) + positive = node_helpers.conditioning_set_values(positive, {"reference_motion": ref_motion}) + negative = node_helpers.conditioning_set_values(negative, {"reference_motion": ref_motion}) + + latent = torch.zeros([batch_size, 16, latent_t, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + + control_video_out = comfy.latent_formats.Wan21().process_out(torch.zeros_like(latent)) + if control_video is not None: + control_video = comfy.utils.common_upscale(control_video[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + control_video = vae.encode(control_video[:, :, :, :3]) + control_video_out[:, :, :control_video.shape[2]] = control_video + + # TODO: check if zero is better than none if none provided + positive = node_helpers.conditioning_set_values(positive, {"control_video": control_video_out}) + negative = node_helpers.conditioning_set_values(negative, {"control_video": control_video_out}) + + out_latent = {} + out_latent["samples"] = latent + return io.NodeOutput(positive, negative, out_latent) + + class Wan22ImageToVideoLatent(io.ComfyNode): @classmethod def define_schema(cls): @@ -844,6 +1018,7 @@ class WanExtension(ComfyExtension): TrimVideoLatent, WanCameraImageToVideo, WanPhantomSubjectToVideo, + WanSoundImageToVideo, Wan22ImageToVideoLatent, ] From 31a37686d02aeaba8ea827933832be7601b31fac Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 27 Aug 2025 09:44:29 -0700 Subject: [PATCH 0501/1073] Negative audio in s2v should be zeros. (#9578) --- comfy_extras/nodes_wan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 89ff74d85..312260f00 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -920,7 +920,7 @@ class WanSoundImageToVideo(io.ComfyNode): audio_embed_bucket = audio_embed_bucket.permute(0, 2, 3, 1) positive = node_helpers.conditioning_set_values(positive, {"audio_embed": audio_embed_bucket}) - negative = node_helpers.conditioning_set_values(negative, {"audio_embed": audio_embed_bucket}) + negative = node_helpers.conditioning_set_values(negative, {"audio_embed": audio_embed_bucket * 0.0}) if ref_image is not None: ref_image = comfy.utils.common_upscale(ref_image[:1].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) From b20ba1f27cbd4e1c84cf8ec72b345723de9e7c80 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Thu, 28 Aug 2025 00:45:02 +0800 Subject: [PATCH 0502/1073] Fix #9537 (#9576) --- comfy/weight_adapter/lokr.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/weight_adapter/lokr.py b/comfy/weight_adapter/lokr.py index 49b0be55f..563c835f5 100644 --- a/comfy/weight_adapter/lokr.py +++ b/comfy/weight_adapter/lokr.py @@ -97,6 +97,9 @@ class LoKrAdapter(WeightAdapterBase): (mat1, mat2, alpha, None, None, None, None, None, None) ) + def to_train(self): + return LokrDiff(self.weights) + @classmethod def load( cls, From b5ac6ed7ce73294e0025ffe3b16452d8434b83c7 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 27 Aug 2025 12:26:28 -0700 Subject: [PATCH 0503/1073] Fixes to make controlnet type models work on qwen edit and kontext. (#9581) --- comfy/ldm/flux/model.py | 4 ++-- comfy/ldm/qwen_image/model.py | 2 +- comfy_extras/nodes_model_patch.py | 8 +++++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index 0a77fa097..1344c3a57 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -158,7 +158,7 @@ class Flux(nn.Module): if i < len(control_i): add = control_i[i] if add is not None: - img += add + img[:, :add.shape[1]] += add if img.dtype == torch.float16: img = torch.nan_to_num(img, nan=0.0, posinf=65504, neginf=-65504) @@ -189,7 +189,7 @@ class Flux(nn.Module): if i < len(control_o): add = control_o[i] if add is not None: - img[:, txt.shape[1] :, ...] += add + img[:, txt.shape[1] : txt.shape[1] + add.shape[1], ...] += add img = img[:, txt.shape[1] :, ...] diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index 57a458210..04071f31c 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -459,7 +459,7 @@ class QwenImageTransformer2DModel(nn.Module): if i < len(control_i): add = control_i[i] if add is not None: - hidden_states += add + hidden_states[:, :add.shape[1]] += add hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) diff --git a/comfy_extras/nodes_model_patch.py b/comfy_extras/nodes_model_patch.py index 3eaada9bc..32c40ced3 100644 --- a/comfy_extras/nodes_model_patch.py +++ b/comfy_extras/nodes_model_patch.py @@ -89,6 +89,7 @@ class DiffSynthCnetPatch: self.strength = strength self.mask = mask self.encoded_image = model_patch.model.process_input_latent_image(self.encode_latent_cond(image)) + self.encoded_image_size = (image.shape[1], image.shape[2]) def encode_latent_cond(self, image): latent_image = self.vae.encode(image) @@ -106,14 +107,15 @@ class DiffSynthCnetPatch: x = kwargs.get("x") img = kwargs.get("img") block_index = kwargs.get("block_index") - if self.encoded_image is None or self.encoded_image.shape[1:] != img.shape[1:]: - spacial_compression = self.vae.spacial_compression_encode() + spacial_compression = self.vae.spacial_compression_encode() + if self.encoded_image is None or self.encoded_image_size != (x.shape[-1] * spacial_compression, x.shape[-2] * spacial_compression): image_scaled = comfy.utils.common_upscale(self.image.movedim(-1, 1), x.shape[-1] * spacial_compression, x.shape[-2] * spacial_compression, "area", "center") loaded_models = comfy.model_management.loaded_models(only_currently_used=True) self.encoded_image = self.model_patch.model.process_input_latent_image(self.encode_latent_cond(image_scaled.movedim(1, -1))) + self.encoded_image_size = (image_scaled.shape[-2], image_scaled.shape[-1]) comfy.model_management.load_models_gpu(loaded_models) - img = img + (self.model_patch.model.control_block(img, self.encoded_image.to(img.dtype), block_index) * self.strength) + img[:, :self.encoded_image.shape[1]] += (self.model_patch.model.control_block(img[:, :self.encoded_image.shape[1]], self.encoded_image.to(img.dtype), block_index) * self.strength) kwargs['img'] = img return kwargs From 496888fd68813033c260195bf70e4d11181e5454 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 27 Aug 2025 13:06:40 -0700 Subject: [PATCH 0504/1073] Improve s2v performance when generating videos longer than 120 frames. (#9582) --- comfy/ldm/wan/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index dedfb47e2..e70446c86 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -1255,6 +1255,7 @@ class WanModel_S2V(WanModel): audio_emb = None # embeddings + bs, _, time, height, width = x.shape x = self.patch_embedding(x.float()).to(x.dtype) if control_video is not None: x = x + self.cond_encoder(control_video) @@ -1272,7 +1273,7 @@ class WanModel_S2V(WanModel): if reference_latent is not None: ref = self.patch_embedding(reference_latent.float()).to(x.dtype) ref = ref.flatten(2).transpose(1, 2) - freqs_ref = self.rope_encode(reference_latent.shape[-3], reference_latent.shape[-2], reference_latent.shape[-1], t_start=30, device=x.device, dtype=x.dtype) + freqs_ref = self.rope_encode(reference_latent.shape[-3], reference_latent.shape[-2], reference_latent.shape[-1], t_start=max(30, time + 9), device=x.device, dtype=x.dtype) ref = ref + cond_mask_weight[1] x = torch.cat([x, ref], dim=1) freqs = torch.cat([freqs, freqs_ref], dim=1) @@ -1296,7 +1297,6 @@ class WanModel_S2V(WanModel): # context context = self.text_embedding(context) - patches_replace = transformer_options.get("patches_replace", {}) blocks_replace = patches_replace.get("dit", {}) for i, block in enumerate(self.blocks): From 491755325cc189d0aa1513b12fac738c87e38de6 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 27 Aug 2025 16:02:42 -0700 Subject: [PATCH 0505/1073] Better s2v memory estimation. (#9584) --- comfy/ldm/wan/model.py | 2 ++ comfy/model_base.py | 25 +++++++++++++++++++++++-- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index e70446c86..47857dc2b 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -1278,6 +1278,7 @@ class WanModel_S2V(WanModel): x = torch.cat([x, ref], dim=1) freqs = torch.cat([freqs, freqs_ref], dim=1) t = torch.cat([t, torch.zeros((t.shape[0], reference_latent.shape[-3]), device=t.device, dtype=t.dtype)], dim=1) + del ref, freqs_ref if reference_motion is not None: motion_encoded, freqs_motion = self.frame_packer(reference_motion, self) @@ -1287,6 +1288,7 @@ class WanModel_S2V(WanModel): t = torch.repeat_interleave(t, 2, dim=1) t = torch.cat([t, torch.zeros((t.shape[0], 3), device=t.device, dtype=t.dtype)], dim=1) + del motion_encoded, freqs_motion # time embeddings e = self.time_embedding( diff --git a/comfy/model_base.py b/comfy/model_base.py index 18d55c1c4..ce29fdc49 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -150,6 +150,7 @@ class BaseModel(torch.nn.Module): logging.debug("adm {}".format(self.adm_channels)) self.memory_usage_factor = model_config.memory_usage_factor self.memory_usage_factor_conds = () + self.memory_usage_shape_process = {} def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, transformer_options={}, **kwargs): return comfy.patcher_extension.WrapperExecutor.new_class_executor( @@ -350,8 +351,15 @@ class BaseModel(torch.nn.Module): input_shapes = [input_shape] for c in self.memory_usage_factor_conds: shape = cond_shapes.get(c, None) - if shape is not None and len(shape) > 0: - input_shapes += shape + if shape is not None: + if c in self.memory_usage_shape_process: + out = [] + for s in shape: + out.append(self.memory_usage_shape_process[c](s)) + shape = out + + if len(shape) > 0: + input_shapes += shape if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention(): dtype = self.get_dtype() @@ -1204,6 +1212,8 @@ class WAN21_Camera(WAN21): class WAN22_S2V(WAN21): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): super(WAN21, self).__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model.WanModel_S2V) + self.memory_usage_factor_conds = ("reference_latent", "reference_motion") + self.memory_usage_shape_process = {"reference_motion": lambda shape: [shape[0], shape[1], 1.5, shape[-2], shape[-1]]} def extra_conds(self, **kwargs): out = super().extra_conds(**kwargs) @@ -1224,6 +1234,17 @@ class WAN22_S2V(WAN21): out['control_video'] = comfy.conds.CONDRegular(self.process_latent_in(control_video)) return out + def extra_conds_shapes(self, **kwargs): + out = {} + ref_latents = kwargs.get("reference_latents", None) + if ref_latents is not None: + out['reference_latent'] = list([1, 16, sum(map(lambda a: math.prod(a.size()), ref_latents)) // 16]) + + reference_motion = kwargs.get("reference_motion", None) + if reference_motion is not None: + out['reference_motion'] = reference_motion.shape + return out + class WAN22(BaseModel): def __init__(self, model_config, model_type=ModelType.FLOW, image_to_video=False, device=None): super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model.WanModel) From 3aad339b63f03e17dc6ebae035b90afc2fefb627 Mon Sep 17 00:00:00 2001 From: Gangin Park Date: Thu, 28 Aug 2025 08:07:31 +0900 Subject: [PATCH 0506/1073] Add DPM++ 2M SDE Heun (RES) sampler (#9542) --- comfy/k_diffusion/sampling.py | 15 +++++++++++++++ comfy/samplers.py | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) mode change 100644 => 100755 comfy/samplers.py diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index a2bc492fd..fe6844b17 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -853,6 +853,11 @@ def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disabl return x +@torch.no_grad() +def sample_dpmpp_2m_sde_heun(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='heun'): + return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type) + + @torch.no_grad() def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None): """DPM-Solver++(3M) SDE.""" @@ -925,6 +930,16 @@ def sample_dpmpp_3m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, di return sample_dpmpp_3m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler) +@torch.no_grad() +def sample_dpmpp_2m_sde_heun_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='heun'): + if len(sigmas) <= 1: + return x + extra_args = {} if extra_args is None else extra_args + sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() + noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler + return sample_dpmpp_2m_sde_heun(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type) + + @torch.no_grad() def sample_dpmpp_2m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'): if len(sigmas) <= 1: diff --git a/comfy/samplers.py b/comfy/samplers.py old mode 100644 new mode 100755 index c7dfef4ea..b3202cec6 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -729,7 +729,7 @@ class Sampler: KSAMPLER_NAMES = ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2","dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", - "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", + "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece"] From 38f697d953c3989db67e543795768bf954ae0231 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 27 Aug 2025 19:28:10 -0700 Subject: [PATCH 0507/1073] Add a LatentConcat node. (#9587) --- comfy_extras/nodes_latent.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py index f33ed1bee..247d886a1 100644 --- a/comfy_extras/nodes_latent.py +++ b/comfy_extras/nodes_latent.py @@ -105,6 +105,38 @@ class LatentInterpolate: samples_out["samples"] = st * (m1 * ratio + m2 * (1.0 - ratio)) return (samples_out,) +class LatentConcat: + @classmethod + def INPUT_TYPES(s): + return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",), "dim": (["x", "-x", "y", "-y", "t", "-t"], )}} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "op" + + CATEGORY = "latent/advanced" + + def op(self, samples1, samples2, dim): + samples_out = samples1.copy() + + s1 = samples1["samples"] + s2 = samples2["samples"] + s2 = comfy.utils.repeat_to_batch_size(s2, s1.shape[0]) + + if "-" in dim: + c = (s2, s1) + else: + c = (s1, s2) + + if "x" in dim: + dim = -1 + elif "y" in dim: + dim = -2 + elif "t" in dim: + dim = -3 + + samples_out["samples"] = torch.cat(c, dim=dim) + return (samples_out,) + class LatentBatch: @classmethod def INPUT_TYPES(s): @@ -279,6 +311,7 @@ NODE_CLASS_MAPPINGS = { "LatentSubtract": LatentSubtract, "LatentMultiply": LatentMultiply, "LatentInterpolate": LatentInterpolate, + "LatentConcat": LatentConcat, "LatentBatch": LatentBatch, "LatentBatchSeedBehavior": LatentBatchSeedBehavior, "LatentApplyOperation": LatentApplyOperation, From 4aa79dbf2c5118853659fc7f7f8590594ab72417 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 27 Aug 2025 20:08:17 -0700 Subject: [PATCH 0508/1073] Adjust flux mem usage factor a bit. (#9588) --- comfy/supported_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index ce571e6cb..76260de00 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -700,7 +700,7 @@ class Flux(supported_models_base.BASE): unet_extra_config = {} latent_format = latent_formats.Flux - memory_usage_factor = 2.8 + memory_usage_factor = 3.1 # TODO: debug why flux mem usage is so weird on windows. supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32] From 0eb821a7b6612af0fa3aaa8302739788a4bd629e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 27 Aug 2025 23:09:06 -0400 Subject: [PATCH 0509/1073] ComfyUI 0.3.53 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 834c3e8c2..d6fdc47fe 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.52" +__version__ = "0.3.53" diff --git a/pyproject.toml b/pyproject.toml index f6e765a81..a71ad2bbf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.52" +version = "0.3.53" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From ce0052c087cb1e81ba01e8afbe362bec54eeb665 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 28 Aug 2025 07:37:42 -0700 Subject: [PATCH 0510/1073] Fix diffsynth controlnet regression. (#9597) --- comfy_extras/nodes_model_patch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_model_patch.py b/comfy_extras/nodes_model_patch.py index 32c40ced3..65e766b52 100644 --- a/comfy_extras/nodes_model_patch.py +++ b/comfy_extras/nodes_model_patch.py @@ -108,7 +108,7 @@ class DiffSynthCnetPatch: img = kwargs.get("img") block_index = kwargs.get("block_index") spacial_compression = self.vae.spacial_compression_encode() - if self.encoded_image is None or self.encoded_image_size != (x.shape[-1] * spacial_compression, x.shape[-2] * spacial_compression): + if self.encoded_image is None or self.encoded_image_size != (x.shape[-2] * spacial_compression, x.shape[-1] * spacial_compression): image_scaled = comfy.utils.common_upscale(self.image.movedim(-1, 1), x.shape[-1] * spacial_compression, x.shape[-2] * spacial_compression, "area", "center") loaded_models = comfy.model_management.loaded_models(only_currently_used=True) self.encoded_image = self.model_patch.model.process_input_latent_image(self.encode_latent_cond(image_scaled.movedim(1, -1))) From 00636101771cb373354d6294cc6567deda2635f6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 28 Aug 2025 10:44:57 -0400 Subject: [PATCH 0511/1073] ComfyUI version 0.3.54 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index d6fdc47fe..7034953fd 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.53" +__version__ = "0.3.54" diff --git a/pyproject.toml b/pyproject.toml index a71ad2bbf..9f9ac1e21 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.53" +version = "0.3.54" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From edde0b50431e296f61f79205e25cb01f653013a2 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 28 Aug 2025 14:59:48 -0700 Subject: [PATCH 0512/1073] WanSoundImageToVideoExtend node to manually extend s2v video. (#9606) --- comfy_extras/nodes_wan.py | 145 +++++++++++++++++++++++++------------- 1 file changed, 97 insertions(+), 48 deletions(-) diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 312260f00..0a55bd5d0 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -877,6 +877,67 @@ def get_audio_embed_bucket_fps(audio_embed, fps=16, batch_frames=81, m=0, video_ return batch_audio_eb, min_batch_num +def wan_sound_to_video(positive, negative, vae, width, height, length, batch_size, frame_offset=0, ref_image=None, audio_encoder_output=None, control_video=None, ref_motion=None, ref_motion_latent=None): + latent_t = ((length - 1) // 4) + 1 + if audio_encoder_output is not None: + feat = torch.cat(audio_encoder_output["encoded_audio_all_layers"]) + video_rate = 30 + fps = 16 + feat = linear_interpolation(feat, input_fps=50, output_fps=video_rate) + batch_frames = latent_t * 4 + audio_embed_bucket, num_repeat = get_audio_embed_bucket_fps(feat, fps=fps, batch_frames=batch_frames, m=0, video_rate=video_rate) + audio_embed_bucket = audio_embed_bucket.unsqueeze(0) + if len(audio_embed_bucket.shape) == 3: + audio_embed_bucket = audio_embed_bucket.permute(0, 2, 1) + elif len(audio_embed_bucket.shape) == 4: + audio_embed_bucket = audio_embed_bucket.permute(0, 2, 3, 1) + + audio_embed_bucket = audio_embed_bucket[:, :, :, frame_offset:frame_offset + batch_frames] + positive = node_helpers.conditioning_set_values(positive, {"audio_embed": audio_embed_bucket}) + negative = node_helpers.conditioning_set_values(negative, {"audio_embed": audio_embed_bucket * 0.0}) + frame_offset += batch_frames + + if ref_image is not None: + ref_image = comfy.utils.common_upscale(ref_image[:1].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + ref_latent = vae.encode(ref_image[:, :, :, :3]) + positive = node_helpers.conditioning_set_values(positive, {"reference_latents": [ref_latent]}, append=True) + negative = node_helpers.conditioning_set_values(negative, {"reference_latents": [ref_latent]}, append=True) + + if ref_motion is not None: + if ref_motion.shape[0] > 73: + ref_motion = ref_motion[-73:] + + ref_motion = comfy.utils.common_upscale(ref_motion.movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + + if ref_motion.shape[0] < 73: + r = torch.ones([73, height, width, 3]) * 0.5 + r[-ref_motion.shape[0]:] = ref_motion + ref_motion = r + + ref_motion_latent = vae.encode(ref_motion[:, :, :, :3]) + + if ref_motion_latent is not None: + ref_motion_latent = ref_motion_latent[:, :, -19:] + positive = node_helpers.conditioning_set_values(positive, {"reference_motion": ref_motion_latent}) + negative = node_helpers.conditioning_set_values(negative, {"reference_motion": ref_motion_latent}) + + latent = torch.zeros([batch_size, 16, latent_t, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + + control_video_out = comfy.latent_formats.Wan21().process_out(torch.zeros_like(latent)) + if control_video is not None: + control_video = comfy.utils.common_upscale(control_video[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + control_video = vae.encode(control_video[:, :, :, :3]) + control_video_out[:, :, :control_video.shape[2]] = control_video + + # TODO: check if zero is better than none if none provided + positive = node_helpers.conditioning_set_values(positive, {"control_video": control_video_out}) + negative = node_helpers.conditioning_set_values(negative, {"control_video": control_video_out}) + + out_latent = {} + out_latent["samples"] = latent + return positive, negative, out_latent, frame_offset + + class WanSoundImageToVideo(io.ComfyNode): @classmethod def define_schema(cls): @@ -906,57 +967,44 @@ class WanSoundImageToVideo(io.ComfyNode): @classmethod def execute(cls, positive, negative, vae, width, height, length, batch_size, ref_image=None, audio_encoder_output=None, control_video=None, ref_motion=None) -> io.NodeOutput: - latent_t = ((length - 1) // 4) + 1 - if audio_encoder_output is not None: - feat = torch.cat(audio_encoder_output["encoded_audio_all_layers"]) - video_rate = 30 - fps = 16 - feat = linear_interpolation(feat, input_fps=50, output_fps=video_rate) - audio_embed_bucket, num_repeat = get_audio_embed_bucket_fps(feat, fps=fps, batch_frames=latent_t * 4, m=0, video_rate=video_rate) - audio_embed_bucket = audio_embed_bucket.unsqueeze(0) - if len(audio_embed_bucket.shape) == 3: - audio_embed_bucket = audio_embed_bucket.permute(0, 2, 1) - elif len(audio_embed_bucket.shape) == 4: - audio_embed_bucket = audio_embed_bucket.permute(0, 2, 3, 1) + positive, negative, out_latent, frame_offset = wan_sound_to_video(positive, negative, vae, width, height, length, batch_size, ref_image=ref_image, audio_encoder_output=audio_encoder_output, + control_video=control_video, ref_motion=ref_motion) + return io.NodeOutput(positive, negative, out_latent) - positive = node_helpers.conditioning_set_values(positive, {"audio_embed": audio_embed_bucket}) - negative = node_helpers.conditioning_set_values(negative, {"audio_embed": audio_embed_bucket * 0.0}) - if ref_image is not None: - ref_image = comfy.utils.common_upscale(ref_image[:1].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) - ref_latent = vae.encode(ref_image[:, :, :, :3]) - positive = node_helpers.conditioning_set_values(positive, {"reference_latents": [ref_latent]}, append=True) - negative = node_helpers.conditioning_set_values(negative, {"reference_latents": [ref_latent]}, append=True) +class WanSoundImageToVideoExtend(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="WanSoundImageToVideoExtend", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("length", default=77, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Latent.Input("video_latent"), + io.AudioEncoderOutput.Input("audio_encoder_output", optional=True), + io.Image.Input("ref_image", optional=True), + io.Image.Input("control_video", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + is_experimental=True, + ) - if ref_motion is not None: - if ref_motion.shape[0] > 73: - ref_motion = ref_motion[-73:] - - ref_motion = comfy.utils.common_upscale(ref_motion.movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) - - if ref_motion.shape[0] < 73: - r = torch.ones([73, height, width, 3]) * 0.5 - r[-ref_motion.shape[0]:] = ref_motion - ref_motion = r - - ref_motion = vae.encode(ref_motion[:, :, :, :3]) - positive = node_helpers.conditioning_set_values(positive, {"reference_motion": ref_motion}) - negative = node_helpers.conditioning_set_values(negative, {"reference_motion": ref_motion}) - - latent = torch.zeros([batch_size, 16, latent_t, height // 8, width // 8], device=comfy.model_management.intermediate_device()) - - control_video_out = comfy.latent_formats.Wan21().process_out(torch.zeros_like(latent)) - if control_video is not None: - control_video = comfy.utils.common_upscale(control_video[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) - control_video = vae.encode(control_video[:, :, :, :3]) - control_video_out[:, :, :control_video.shape[2]] = control_video - - # TODO: check if zero is better than none if none provided - positive = node_helpers.conditioning_set_values(positive, {"control_video": control_video_out}) - negative = node_helpers.conditioning_set_values(negative, {"control_video": control_video_out}) - - out_latent = {} - out_latent["samples"] = latent + @classmethod + def execute(cls, positive, negative, vae, length, video_latent, ref_image=None, audio_encoder_output=None, control_video=None) -> io.NodeOutput: + video_latent = video_latent["samples"] + width = video_latent.shape[-1] * 8 + height = video_latent.shape[-2] * 8 + batch_size = video_latent.shape[0] + frame_offset = video_latent.shape[-3] * 4 + positive, negative, out_latent, frame_offset = wan_sound_to_video(positive, negative, vae, width, height, length, batch_size, frame_offset=frame_offset, ref_image=ref_image, audio_encoder_output=audio_encoder_output, + control_video=control_video, ref_motion=None, ref_motion_latent=video_latent) return io.NodeOutput(positive, negative, out_latent) @@ -1019,6 +1067,7 @@ class WanExtension(ComfyExtension): WanCameraImageToVideo, WanPhantomSubjectToVideo, WanSoundImageToVideo, + WanSoundImageToVideoExtend, Wan22ImageToVideoLatent, ] From 1c184c29eb2a8f6fdd4e49f27347809090038e3f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 28 Aug 2025 15:34:01 -0700 Subject: [PATCH 0513/1073] Fix issue with s2v node when extending past audio length. (#9608) --- comfy_extras/nodes_wan.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 0a55bd5d0..2cbc93ceb 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -893,9 +893,10 @@ def wan_sound_to_video(positive, negative, vae, width, height, length, batch_siz audio_embed_bucket = audio_embed_bucket.permute(0, 2, 3, 1) audio_embed_bucket = audio_embed_bucket[:, :, :, frame_offset:frame_offset + batch_frames] - positive = node_helpers.conditioning_set_values(positive, {"audio_embed": audio_embed_bucket}) - negative = node_helpers.conditioning_set_values(negative, {"audio_embed": audio_embed_bucket * 0.0}) - frame_offset += batch_frames + if audio_embed_bucket.shape[3] > 0: + positive = node_helpers.conditioning_set_values(positive, {"audio_embed": audio_embed_bucket}) + negative = node_helpers.conditioning_set_values(negative, {"audio_embed": audio_embed_bucket * 0.0}) + frame_offset += batch_frames if ref_image is not None: ref_image = comfy.utils.common_upscale(ref_image[:1].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) From d28b39d93dc498110e28ca32c8f39e6de631aa42 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 28 Aug 2025 16:38:28 -0700 Subject: [PATCH 0514/1073] Add a LatentCut node to cut latents. (#9609) --- comfy_extras/nodes_latent.py | 37 ++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py index 247d886a1..0f90cf60c 100644 --- a/comfy_extras/nodes_latent.py +++ b/comfy_extras/nodes_latent.py @@ -1,6 +1,7 @@ import comfy.utils import comfy_extras.nodes_post_processing import torch +import nodes def reshape_latent_to(target_shape, latent, repeat_batch=True): @@ -137,6 +138,41 @@ class LatentConcat: samples_out["samples"] = torch.cat(c, dim=dim) return (samples_out,) +class LatentCut: + @classmethod + def INPUT_TYPES(s): + return {"required": {"samples": ("LATENT",), + "dim": (["x", "y", "t"], ), + "index": ("INT", {"default": 0, "min": -nodes.MAX_RESOLUTION, "max": nodes.MAX_RESOLUTION, "step": 1}), + "amount": ("INT", {"default": 1, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 1})}} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "op" + + CATEGORY = "latent/advanced" + + def op(self, samples, dim, index, amount): + samples_out = samples.copy() + + s1 = samples["samples"] + + if "x" in dim: + dim = s1.ndim - 1 + elif "y" in dim: + dim = s1.ndim - 2 + elif "t" in dim: + dim = s1.ndim - 3 + + if index >= 0: + index = min(index, s1.shape[dim] - 1) + amount = min(s1.shape[dim] - index, amount) + else: + index = max(index, -s1.shape[dim]) + amount = min(-index, amount) + + samples_out["samples"] = torch.narrow(s1, dim, index, amount) + return (samples_out,) + class LatentBatch: @classmethod def INPUT_TYPES(s): @@ -312,6 +348,7 @@ NODE_CLASS_MAPPINGS = { "LatentMultiply": LatentMultiply, "LatentInterpolate": LatentInterpolate, "LatentConcat": LatentConcat, + "LatentCut": LatentCut, "LatentBatch": LatentBatch, "LatentBatchSeedBehavior": LatentBatchSeedBehavior, "LatentApplyOperation": LatentApplyOperation, From e80a14ad5073d9eba175c2d2c768a5ca8e4c63ea Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 28 Aug 2025 19:13:07 -0700 Subject: [PATCH 0515/1073] Support wan2.2 5B fun control model. (#9611) Use the Wan22FunControlToVideo node. --- comfy/model_base.py | 15 ++++++--------- comfy_extras/nodes_wan.py | 19 ++++++++++++------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index ce29fdc49..56a6798be 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1110,9 +1110,10 @@ class WAN21(BaseModel): shape_image[1] = extra_channels image = torch.zeros(shape_image, dtype=noise.dtype, layout=noise.layout, device=noise.device) else: + latent_dim = self.latent_format.latent_channels image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") - for i in range(0, image.shape[1], 16): - image[:, i: i + 16] = self.process_latent_in(image[:, i: i + 16]) + for i in range(0, image.shape[1], latent_dim): + image[:, i: i + latent_dim] = self.process_latent_in(image[:, i: i + latent_dim]) image = utils.resize_to_batch_size(image, noise.shape[0]) if extra_channels != image.shape[1] + 4: @@ -1245,18 +1246,14 @@ class WAN22_S2V(WAN21): out['reference_motion'] = reference_motion.shape return out -class WAN22(BaseModel): +class WAN22(WAN21): def __init__(self, model_config, model_type=ModelType.FLOW, image_to_video=False, device=None): - super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model.WanModel) + super(WAN21, self).__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model.WanModel) self.image_to_video = image_to_video def extra_conds(self, **kwargs): out = super().extra_conds(**kwargs) - cross_attn = kwargs.get("cross_attn", None) - if cross_attn is not None: - out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) - - denoise_mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None)) + denoise_mask = kwargs.get("denoise_mask", None) if denoise_mask is not None: out["denoise_mask"] = comfy.conds.CONDRegular(denoise_mask) return out diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 2cbc93ceb..8c1d36613 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -139,16 +139,21 @@ class Wan22FunControlToVideo(io.ComfyNode): @classmethod def execute(cls, positive, negative, vae, width, height, length, batch_size, ref_image=None, start_image=None, control_video=None) -> io.NodeOutput: - latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) - concat_latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) - concat_latent = comfy.latent_formats.Wan21().process_out(concat_latent) + spacial_scale = vae.spacial_compression_encode() + latent_channels = vae.latent_channels + latent = torch.zeros([batch_size, latent_channels, ((length - 1) // 4) + 1, height // spacial_scale, width // spacial_scale], device=comfy.model_management.intermediate_device()) + concat_latent = torch.zeros([batch_size, latent_channels, ((length - 1) // 4) + 1, height // spacial_scale, width // spacial_scale], device=comfy.model_management.intermediate_device()) + if latent_channels == 48: + concat_latent = comfy.latent_formats.Wan22().process_out(concat_latent) + else: + concat_latent = comfy.latent_formats.Wan21().process_out(concat_latent) concat_latent = concat_latent.repeat(1, 2, 1, 1, 1) mask = torch.ones((1, 1, latent.shape[2] * 4, latent.shape[-2], latent.shape[-1])) if start_image is not None: start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) concat_latent_image = vae.encode(start_image[:, :, :, :3]) - concat_latent[:,16:,:concat_latent_image.shape[2]] = concat_latent_image[:,:,:concat_latent.shape[2]] + concat_latent[:,latent_channels:,:concat_latent_image.shape[2]] = concat_latent_image[:,:,:concat_latent.shape[2]] mask[:, :, :start_image.shape[0] + 3] = 0.0 ref_latent = None @@ -159,11 +164,11 @@ class Wan22FunControlToVideo(io.ComfyNode): if control_video is not None: control_video = comfy.utils.common_upscale(control_video[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) concat_latent_image = vae.encode(control_video[:, :, :, :3]) - concat_latent[:,:16,:concat_latent_image.shape[2]] = concat_latent_image[:,:,:concat_latent.shape[2]] + concat_latent[:,:latent_channels,:concat_latent_image.shape[2]] = concat_latent_image[:,:,:concat_latent.shape[2]] mask = mask.view(1, mask.shape[2] // 4, 4, mask.shape[3], mask.shape[4]).transpose(1, 2) - positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent, "concat_mask": mask, "concat_mask_index": 16}) - negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent, "concat_mask": mask, "concat_mask_index": 16}) + positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent, "concat_mask": mask, "concat_mask_index": latent_channels}) + negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent, "concat_mask": mask, "concat_mask_index": latent_channels}) if ref_latent is not None: positive = node_helpers.conditioning_set_values(positive, {"reference_latents": [ref_latent]}, append=True) From c7bb3e2bceaad7accd52c23d22b97a1b6808304b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 28 Aug 2025 19:46:57 -0700 Subject: [PATCH 0516/1073] Support the 5B fun inpaint model. (#9614) Use the WanFunInpaintToVideo node without the clip_vision_output. --- comfy_extras/nodes_wan.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 8c1d36613..4f73369f5 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -206,7 +206,8 @@ class WanFirstLastFrameToVideo(io.ComfyNode): @classmethod def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None, end_image=None, clip_vision_start_image=None, clip_vision_end_image=None) -> io.NodeOutput: - latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + spacial_scale = vae.spacial_compression_encode() + latent = torch.zeros([batch_size, vae.latent_channels, ((length - 1) // 4) + 1, height // spacial_scale, width // spacial_scale], device=comfy.model_management.intermediate_device()) if start_image is not None: start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) if end_image is not None: From 15aa9222c4d1fc74f5190d7c7e56ef986d0d7146 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 29 Aug 2025 01:12:00 -0700 Subject: [PATCH 0517/1073] Trim audio to video when saving video. (#9617) --- comfy_api/latest/_input_impl/video_types.py | 34 ++++++--------------- 1 file changed, 9 insertions(+), 25 deletions(-) diff --git a/comfy_api/latest/_input_impl/video_types.py b/comfy_api/latest/_input_impl/video_types.py index 28de9651d..f646504c8 100644 --- a/comfy_api/latest/_input_impl/video_types.py +++ b/comfy_api/latest/_input_impl/video_types.py @@ -8,6 +8,7 @@ import av import io import json import numpy as np +import math import torch from comfy_api.latest._util import VideoContainer, VideoCodec, VideoComponents @@ -282,8 +283,6 @@ class VideoFromComponents(VideoInput): if self.__components.audio: audio_sample_rate = int(self.__components.audio['sample_rate']) audio_stream = output.add_stream('aac', rate=audio_sample_rate) - audio_stream.sample_rate = audio_sample_rate - audio_stream.format = 'fltp' # Encode video for i, frame in enumerate(self.__components.images): @@ -298,27 +297,12 @@ class VideoFromComponents(VideoInput): output.mux(packet) if audio_stream and self.__components.audio: - # Encode audio - samples_per_frame = int(audio_sample_rate / frame_rate) - num_frames = self.__components.audio['waveform'].shape[2] // samples_per_frame - for i in range(num_frames): - start = i * samples_per_frame - end = start + samples_per_frame - # TODO(Feature) - Add support for stereo audio - chunk = ( - self.__components.audio["waveform"][0, 0, start:end] - .unsqueeze(0) - .contiguous() - .numpy() - ) - audio_frame = av.AudioFrame.from_ndarray(chunk, format='fltp', layout='mono') - audio_frame.sample_rate = audio_sample_rate - audio_frame.pts = i * samples_per_frame - for packet in audio_stream.encode(audio_frame): - output.mux(packet) - - # Flush audio - for packet in audio_stream.encode(None): - output.mux(packet) - + waveform = self.__components.audio['waveform'] + waveform = waveform[:, :, :math.ceil((audio_sample_rate / frame_rate) * self.__components.images.shape[0])] + frame = av.AudioFrame.from_ndarray(waveform.movedim(2, 1).reshape(1, -1).float().numpy(), format='flt', layout='mono' if waveform.shape[1] == 1 else 'stereo') + frame.sample_rate = audio_sample_rate + frame.pts = 0 + output.mux(audio_stream.encode(frame)) + # Flush encoder + output.mux(audio_stream.encode(None)) From 2efb2cbc38714074b0a48a9f4d70fa43f41499f4 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Fri, 29 Aug 2025 18:03:25 +0800 Subject: [PATCH 0518/1073] Update template to 0.1.70 (#9620) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 93d88859d..7f64aacca 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.25.11 -comfyui-workflow-templates==0.1.68 +comfyui-workflow-templates==0.1.70 comfyui-embedded-docs==0.2.6 torch torchsde From a86aaa430183068e2a264495c802c81d05eb350a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 29 Aug 2025 05:33:29 -0400 Subject: [PATCH 0519/1073] ComfyUI v0.3.55 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 7034953fd..36777e285 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.54" +__version__ = "0.3.55" diff --git a/pyproject.toml b/pyproject.toml index 9f9ac1e21..04514b4a8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.54" +version = "0.3.55" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 885015eecf649d6e49e1ade68e4475b434517b82 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 29 Aug 2025 20:06:04 -0700 Subject: [PATCH 0520/1073] Lower ram usage on windows. (#9628) --- main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/main.py b/main.py index 9b2a33011..b23d50816 100644 --- a/main.py +++ b/main.py @@ -112,6 +112,7 @@ import gc if os.name == "nt": + os.environ['MIMALLOC_PURGE_DELAY'] = '0' logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) if __name__ == "__main__": From 4449e147692366ac8b9bd3b8834c771bc81e91ac Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 30 Aug 2025 06:31:19 -0400 Subject: [PATCH 0521/1073] ComfyUI version 0.3.56 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 36777e285..e8e039373 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.55" +__version__ = "0.3.56" diff --git a/pyproject.toml b/pyproject.toml index 04514b4a8..cfd5d45ef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.55" +version = "0.3.56" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From f949094b3cbc33779dbf8d3fd140028f8044d5c1 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sun, 31 Aug 2025 06:19:21 +0300 Subject: [PATCH 0522/1073] convert Stable Cascade nodes to V3 schema (#9373) --- comfy_extras/nodes_stable_cascade.py | 165 +++++++++++++++------------ 1 file changed, 93 insertions(+), 72 deletions(-) diff --git a/comfy_extras/nodes_stable_cascade.py b/comfy_extras/nodes_stable_cascade.py index 003403215..04c0b366a 100644 --- a/comfy_extras/nodes_stable_cascade.py +++ b/comfy_extras/nodes_stable_cascade.py @@ -17,55 +17,61 @@ """ import torch -import nodes +from typing_extensions import override + import comfy.utils +import nodes +from comfy_api.latest import ComfyExtension, io -class StableCascade_EmptyLatentImage: - def __init__(self, device="cpu"): - self.device = device +class StableCascade_EmptyLatentImage(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="StableCascade_EmptyLatentImage", + category="latent/stable_cascade", + inputs=[ + io.Int.Input("width", default=1024, min=256, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("height", default=1024, min=256, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("compression", default=42, min=4, max=128, step=1), + io.Int.Input("batch_size", default=1, min=1, max=4096), + ], + outputs=[ + io.Latent.Output(display_name="stage_c"), + io.Latent.Output(display_name="stage_b"), + ], + ) @classmethod - def INPUT_TYPES(s): - return {"required": { - "width": ("INT", {"default": 1024, "min": 256, "max": nodes.MAX_RESOLUTION, "step": 8}), - "height": ("INT", {"default": 1024, "min": 256, "max": nodes.MAX_RESOLUTION, "step": 8}), - "compression": ("INT", {"default": 42, "min": 4, "max": 128, "step": 1}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}) - }} - RETURN_TYPES = ("LATENT", "LATENT") - RETURN_NAMES = ("stage_c", "stage_b") - FUNCTION = "generate" - - CATEGORY = "latent/stable_cascade" - - def generate(self, width, height, compression, batch_size=1): + def execute(cls, width, height, compression, batch_size=1): c_latent = torch.zeros([batch_size, 16, height // compression, width // compression]) b_latent = torch.zeros([batch_size, 4, height // 4, width // 4]) - return ({ + return io.NodeOutput({ "samples": c_latent, }, { "samples": b_latent, }) -class StableCascade_StageC_VAEEncode: - def __init__(self, device="cpu"): - self.device = device + +class StableCascade_StageC_VAEEncode(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="StableCascade_StageC_VAEEncode", + category="latent/stable_cascade", + inputs=[ + io.Image.Input("image"), + io.Vae.Input("vae"), + io.Int.Input("compression", default=42, min=4, max=128, step=1), + ], + outputs=[ + io.Latent.Output(display_name="stage_c"), + io.Latent.Output(display_name="stage_b"), + ], + ) @classmethod - def INPUT_TYPES(s): - return {"required": { - "image": ("IMAGE",), - "vae": ("VAE", ), - "compression": ("INT", {"default": 42, "min": 4, "max": 128, "step": 1}), - }} - RETURN_TYPES = ("LATENT", "LATENT") - RETURN_NAMES = ("stage_c", "stage_b") - FUNCTION = "generate" - - CATEGORY = "latent/stable_cascade" - - def generate(self, image, vae, compression): + def execute(cls, image, vae, compression): width = image.shape[-2] height = image.shape[-3] out_width = (width // compression) * vae.downscale_ratio @@ -75,51 +81,59 @@ class StableCascade_StageC_VAEEncode: c_latent = vae.encode(s[:,:,:,:3]) b_latent = torch.zeros([c_latent.shape[0], 4, (height // 8) * 2, (width // 8) * 2]) - return ({ + return io.NodeOutput({ "samples": c_latent, }, { "samples": b_latent, }) -class StableCascade_StageB_Conditioning: + +class StableCascade_StageB_Conditioning(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "conditioning": ("CONDITIONING",), - "stage_c": ("LATENT",), - }} - RETURN_TYPES = ("CONDITIONING",) + def define_schema(cls): + return io.Schema( + node_id="StableCascade_StageB_Conditioning", + category="conditioning/stable_cascade", + inputs=[ + io.Conditioning.Input("conditioning"), + io.Latent.Input("stage_c"), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) - FUNCTION = "set_prior" - - CATEGORY = "conditioning/stable_cascade" - - def set_prior(self, conditioning, stage_c): + @classmethod + def execute(cls, conditioning, stage_c): c = [] for t in conditioning: d = t[1].copy() - d['stable_cascade_prior'] = stage_c['samples'] + d["stable_cascade_prior"] = stage_c["samples"] n = [t[0], d] c.append(n) - return (c, ) + return io.NodeOutput(c) -class StableCascade_SuperResolutionControlnet: - def __init__(self, device="cpu"): - self.device = device + +class StableCascade_SuperResolutionControlnet(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="StableCascade_SuperResolutionControlnet", + category="_for_testing/stable_cascade", + is_experimental=True, + inputs=[ + io.Image.Input("image"), + io.Vae.Input("vae"), + ], + outputs=[ + io.Image.Output(display_name="controlnet_input"), + io.Latent.Output(display_name="stage_c"), + io.Latent.Output(display_name="stage_b"), + ], + ) @classmethod - def INPUT_TYPES(s): - return {"required": { - "image": ("IMAGE",), - "vae": ("VAE", ), - }} - RETURN_TYPES = ("IMAGE", "LATENT", "LATENT") - RETURN_NAMES = ("controlnet_input", "stage_c", "stage_b") - FUNCTION = "generate" - - EXPERIMENTAL = True - CATEGORY = "_for_testing/stable_cascade" - - def generate(self, image, vae): + def execute(cls, image, vae): width = image.shape[-2] height = image.shape[-3] batch_size = image.shape[0] @@ -127,15 +141,22 @@ class StableCascade_SuperResolutionControlnet: c_latent = torch.zeros([batch_size, 16, height // 16, width // 16]) b_latent = torch.zeros([batch_size, 4, height // 2, width // 2]) - return (controlnet_input, { + return io.NodeOutput(controlnet_input, { "samples": c_latent, }, { "samples": b_latent, }) -NODE_CLASS_MAPPINGS = { - "StableCascade_EmptyLatentImage": StableCascade_EmptyLatentImage, - "StableCascade_StageB_Conditioning": StableCascade_StageB_Conditioning, - "StableCascade_StageC_VAEEncode": StableCascade_StageC_VAEEncode, - "StableCascade_SuperResolutionControlnet": StableCascade_SuperResolutionControlnet, -} + +class StableCascadeExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + StableCascade_EmptyLatentImage, + StableCascade_StageB_Conditioning, + StableCascade_StageC_VAEEncode, + StableCascade_SuperResolutionControlnet, + ] + +async def comfy_entrypoint() -> StableCascadeExtension: + return StableCascadeExtension() From fea9ea8268d9fc0f4245f3fdc4a417ab802033e9 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sun, 31 Aug 2025 06:19:54 +0300 Subject: [PATCH 0523/1073] convert Video nodes to V3 schema (#9489) --- comfy_extras/nodes_video.py | 286 +++++++++++++++++------------------- 1 file changed, 132 insertions(+), 154 deletions(-) diff --git a/comfy_extras/nodes_video.py b/comfy_extras/nodes_video.py index 969f888b9..69fabb12e 100644 --- a/comfy_extras/nodes_video.py +++ b/comfy_extras/nodes_video.py @@ -5,52 +5,49 @@ import av import torch import folder_paths import json -from typing import Optional, Literal +from typing import Optional +from typing_extensions import override from fractions import Fraction -from comfy.comfy_types import IO, FileLocator, ComfyNodeABC -from comfy_api.latest import Input, InputImpl, Types +from comfy_api.input import AudioInput, ImageInput, VideoInput +from comfy_api.input_impl import VideoFromComponents, VideoFromFile +from comfy_api.util import VideoCodec, VideoComponents, VideoContainer +from comfy_api.latest import ComfyExtension, io, ui from comfy.cli_args import args -class SaveWEBM: - def __init__(self): - self.output_dir = folder_paths.get_output_directory() - self.type = "output" - self.prefix_append = "" +class SaveWEBM(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="SaveWEBM", + category="image/video", + is_experimental=True, + inputs=[ + io.Image.Input("images"), + io.String.Input("filename_prefix", default="ComfyUI"), + io.Combo.Input("codec", options=["vp9", "av1"]), + io.Float.Input("fps", default=24.0, min=0.01, max=1000.0, step=0.01), + io.Float.Input("crf", default=32.0, min=0, max=63.0, step=1, tooltip="Higher crf means lower quality with a smaller file size, lower crf means higher quality higher filesize."), + ], + outputs=[], + hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo], + is_output_node=True, + ) @classmethod - def INPUT_TYPES(s): - return {"required": - {"images": ("IMAGE", ), - "filename_prefix": ("STRING", {"default": "ComfyUI"}), - "codec": (["vp9", "av1"],), - "fps": ("FLOAT", {"default": 24.0, "min": 0.01, "max": 1000.0, "step": 0.01}), - "crf": ("FLOAT", {"default": 32.0, "min": 0, "max": 63.0, "step": 1, "tooltip": "Higher crf means lower quality with a smaller file size, lower crf means higher quality higher filesize."}), - }, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, - } - - RETURN_TYPES = () - FUNCTION = "save_images" - - OUTPUT_NODE = True - - CATEGORY = "image/video" - - EXPERIMENTAL = True - - def save_images(self, images, codec, fps, filename_prefix, crf, prompt=None, extra_pnginfo=None): - filename_prefix += self.prefix_append - full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) + def execute(cls, images, codec, fps, filename_prefix, crf) -> io.NodeOutput: + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path( + filename_prefix, folder_paths.get_output_directory(), images[0].shape[1], images[0].shape[0] + ) file = f"{filename}_{counter:05}_.webm" container = av.open(os.path.join(full_output_folder, file), mode="w") - if prompt is not None: - container.metadata["prompt"] = json.dumps(prompt) + if cls.hidden.prompt is not None: + container.metadata["prompt"] = json.dumps(cls.hidden.prompt) - if extra_pnginfo is not None: - for x in extra_pnginfo: - container.metadata[x] = json.dumps(extra_pnginfo[x]) + if cls.hidden.extra_pnginfo is not None: + for x in cls.hidden.extra_pnginfo: + container.metadata[x] = json.dumps(cls.hidden.extra_pnginfo[x]) codec_map = {"vp9": "libvpx-vp9", "av1": "libsvtav1"} stream = container.add_stream(codec_map[codec], rate=Fraction(round(fps * 1000), 1000)) @@ -69,63 +66,46 @@ class SaveWEBM: container.mux(stream.encode()) container.close() - results: list[FileLocator] = [{ - "filename": file, - "subfolder": subfolder, - "type": self.type - }] + return io.NodeOutput(ui=ui.PreviewVideo([ui.SavedResult(file, subfolder, io.FolderType.output)])) - return {"ui": {"images": results, "animated": (True,)}} # TODO: frontend side - -class SaveVideo(ComfyNodeABC): - def __init__(self): - self.output_dir = folder_paths.get_output_directory() - self.type: Literal["output"] = "output" - self.prefix_append = "" +class SaveVideo(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="SaveVideo", + display_name="Save Video", + category="image/video", + description="Saves the input images to your ComfyUI output directory.", + inputs=[ + io.Video.Input("video", tooltip="The video to save."), + io.String.Input("filename_prefix", default="video/ComfyUI", tooltip="The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."), + io.Combo.Input("format", options=VideoContainer.as_input(), default="auto", tooltip="The format to save the video as."), + io.Combo.Input("codec", options=VideoCodec.as_input(), default="auto", tooltip="The codec to use for the video."), + ], + outputs=[], + hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo], + is_output_node=True, + ) @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "video": (IO.VIDEO, {"tooltip": "The video to save."}), - "filename_prefix": ("STRING", {"default": "video/ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}), - "format": (Types.VideoContainer.as_input(), {"default": "auto", "tooltip": "The format to save the video as."}), - "codec": (Types.VideoCodec.as_input(), {"default": "auto", "tooltip": "The codec to use for the video."}), - }, - "hidden": { - "prompt": "PROMPT", - "extra_pnginfo": "EXTRA_PNGINFO" - }, - } - - RETURN_TYPES = () - FUNCTION = "save_video" - - OUTPUT_NODE = True - - CATEGORY = "image/video" - DESCRIPTION = "Saves the input images to your ComfyUI output directory." - - def save_video(self, video: Input.Video, filename_prefix, format, codec, prompt=None, extra_pnginfo=None): - filename_prefix += self.prefix_append + def execute(cls, video: VideoInput, filename_prefix, format, codec) -> io.NodeOutput: width, height = video.get_dimensions() full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path( filename_prefix, - self.output_dir, + folder_paths.get_output_directory(), width, height ) - results: list[FileLocator] = list() saved_metadata = None if not args.disable_metadata: metadata = {} - if extra_pnginfo is not None: - metadata.update(extra_pnginfo) - if prompt is not None: - metadata["prompt"] = prompt + if cls.hidden.extra_pnginfo is not None: + metadata.update(cls.hidden.extra_pnginfo) + if cls.hidden.prompt is not None: + metadata["prompt"] = cls.hidden.prompt if len(metadata) > 0: saved_metadata = metadata - file = f"{filename}_{counter:05}_.{Types.VideoContainer.get_extension(format)}" + file = f"{filename}_{counter:05}_.{VideoContainer.get_extension(format)}" video.save_to( os.path.join(full_output_folder, file), format=format, @@ -133,83 +113,82 @@ class SaveVideo(ComfyNodeABC): metadata=saved_metadata ) - results.append({ - "filename": file, - "subfolder": subfolder, - "type": self.type - }) - counter += 1 + return io.NodeOutput(ui=ui.PreviewVideo([ui.SavedResult(file, subfolder, io.FolderType.output)])) - return { "ui": { "images": results, "animated": (True,) } } -class CreateVideo(ComfyNodeABC): +class CreateVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "images": (IO.IMAGE, {"tooltip": "The images to create a video from."}), - "fps": ("FLOAT", {"default": 30.0, "min": 1.0, "max": 120.0, "step": 1.0}), - }, - "optional": { - "audio": (IO.AUDIO, {"tooltip": "The audio to add to the video."}), - } - } + def define_schema(cls): + return io.Schema( + node_id="CreateVideo", + display_name="Create Video", + category="image/video", + description="Create a video from images.", + inputs=[ + io.Image.Input("images", tooltip="The images to create a video from."), + io.Float.Input("fps", default=30.0, min=1.0, max=120.0, step=1.0), + io.Audio.Input("audio", optional=True, tooltip="The audio to add to the video."), + ], + outputs=[ + io.Video.Output(), + ], + ) - RETURN_TYPES = (IO.VIDEO,) - FUNCTION = "create_video" - - CATEGORY = "image/video" - DESCRIPTION = "Create a video from images." - - def create_video(self, images: Input.Image, fps: float, audio: Optional[Input.Audio] = None): - return (InputImpl.VideoFromComponents( - Types.VideoComponents( - images=images, - audio=audio, - frame_rate=Fraction(fps), - ) - ),) - -class GetVideoComponents(ComfyNodeABC): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "video": (IO.VIDEO, {"tooltip": "The video to extract components from."}), - } - } - RETURN_TYPES = (IO.IMAGE, IO.AUDIO, IO.FLOAT) - RETURN_NAMES = ("images", "audio", "fps") - FUNCTION = "get_components" + def execute(cls, images: ImageInput, fps: float, audio: Optional[AudioInput] = None) -> io.NodeOutput: + return io.NodeOutput( + VideoFromComponents(VideoComponents(images=images, audio=audio, frame_rate=Fraction(fps))) + ) - CATEGORY = "image/video" - DESCRIPTION = "Extracts all components from a video: frames, audio, and framerate." +class GetVideoComponents(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="GetVideoComponents", + display_name="Get Video Components", + category="image/video", + description="Extracts all components from a video: frames, audio, and framerate.", + inputs=[ + io.Video.Input("video", tooltip="The video to extract components from."), + ], + outputs=[ + io.Image.Output(display_name="images"), + io.Audio.Output(display_name="audio"), + io.Float.Output(display_name="fps"), + ], + ) - def get_components(self, video: Input.Video): + @classmethod + def execute(cls, video: VideoInput) -> io.NodeOutput: components = video.get_components() - return (components.images, components.audio, float(components.frame_rate)) + return io.NodeOutput(components.images, components.audio, float(components.frame_rate)) -class LoadVideo(ComfyNodeABC): +class LoadVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(cls): + def define_schema(cls): input_dir = folder_paths.get_input_directory() files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] files = folder_paths.filter_files_content_types(files, ["video"]) - return {"required": - {"file": (sorted(files), {"video_upload": True})}, - } - - CATEGORY = "image/video" - - RETURN_TYPES = (IO.VIDEO,) - FUNCTION = "load_video" - def load_video(self, file): - video_path = folder_paths.get_annotated_filepath(file) - return (InputImpl.VideoFromFile(video_path),) + return io.Schema( + node_id="LoadVideo", + display_name="Load Video", + category="image/video", + inputs=[ + io.Combo.Input("file", options=sorted(files), upload=io.UploadType.video), + ], + outputs=[ + io.Video.Output(), + ], + ) @classmethod - def IS_CHANGED(cls, file): + def execute(cls, file) -> io.NodeOutput: + video_path = folder_paths.get_annotated_filepath(file) + return io.NodeOutput(VideoFromFile(video_path)) + + @classmethod + def fingerprint_inputs(s, file): video_path = folder_paths.get_annotated_filepath(file) mod_time = os.path.getmtime(video_path) # Instead of hashing the file, we can just use the modification time to avoid @@ -217,24 +196,23 @@ class LoadVideo(ComfyNodeABC): return mod_time @classmethod - def VALIDATE_INPUTS(cls, file): + def validate_inputs(s, file): if not folder_paths.exists_annotated_filepath(file): return "Invalid video file: {}".format(file) return True -NODE_CLASS_MAPPINGS = { - "SaveWEBM": SaveWEBM, - "SaveVideo": SaveVideo, - "CreateVideo": CreateVideo, - "GetVideoComponents": GetVideoComponents, - "LoadVideo": LoadVideo, -} -NODE_DISPLAY_NAME_MAPPINGS = { - "SaveVideo": "Save Video", - "CreateVideo": "Create Video", - "GetVideoComponents": "Get Video Components", - "LoadVideo": "Load Video", -} +class VideoExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + SaveWEBM, + SaveVideo, + CreateVideo, + GetVideoComponents, + LoadVideo, + ] +async def comfy_entrypoint() -> VideoExtension: + return VideoExtension() From d2c502e629ba948029abc13ef1b456b9f4bbbdaa Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sun, 31 Aug 2025 06:20:17 +0300 Subject: [PATCH 0524/1073] convert nodes_stability.py to V3 schema (#9497) --- comfy_api_nodes/nodes_stability.py | 678 ++++++++++++++++------------- 1 file changed, 365 insertions(+), 313 deletions(-) diff --git a/comfy_api_nodes/nodes_stability.py b/comfy_api_nodes/nodes_stability.py index 31309d831..e05cb6bb2 100644 --- a/comfy_api_nodes/nodes_stability.py +++ b/comfy_api_nodes/nodes_stability.py @@ -1,5 +1,8 @@ from inspect import cleandoc -from comfy.comfy_types.node_typing import IO +from typing import Optional +from typing_extensions import override + +from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api_nodes.apis.stability_api import ( StabilityUpscaleConservativeRequest, StabilityUpscaleCreativeRequest, @@ -46,87 +49,94 @@ def get_async_dummy_status(x: StabilityResultsGetResponse): return StabilityPollStatus.in_progress -class StabilityStableImageUltraNode: +class StabilityStableImageUltraNode(comfy_io.ComfyNode): """ Generates images synchronously based on prompt and resolution. """ - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Stability AI" - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines" + - "What you wish to see in the output image. A strong, descriptive prompt that clearly defines" + + def define_schema(cls): + return comfy_io.Schema( + node_id="StabilityStableImageUltraNode", + display_name="Stability AI Stable Image Ultra", + category="api node/image/Stability AI", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines" + "elements, colors, and subjects will lead to better results. " + "To control the weight of a given word use the format `(word:weight)`," + "where `word` is the word you'd like to control the weight of and `weight`" + "is a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`" + - "would convey a sky that was blue and green, but more green than blue." - }, + "would convey a sky that was blue and green, but more green than blue.", ), - "aspect_ratio": ([x.value for x in StabilityAspectRatio], - { - "default": StabilityAspectRatio.ratio_1_1, - "tooltip": "Aspect ratio of generated image.", - }, + comfy_io.Combo.Input( + "aspect_ratio", + options=[x.value for x in StabilityAspectRatio], + default=StabilityAspectRatio.ratio_1_1.value, + tooltip="Aspect ratio of generated image.", ), - "style_preset": (get_stability_style_presets(), - { - "tooltip": "Optional desired style of generated image.", - }, + comfy_io.Combo.Input( + "style_preset", + options=get_stability_style_presets(), + tooltip="Optional desired style of generated image.", ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 4294967294, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=4294967294, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", ), - }, - "optional": { - "image": (IO.IMAGE,), - "negative_prompt": ( - IO.STRING, - { - "default": "", - "forceInput": True, - "tooltip": "A blurb of text describing what you do not wish to see in the output image. This is an advanced feature." - }, + comfy_io.Image.Input( + "image", + optional=True, ), - "image_denoise": ( - IO.FLOAT, - { - "default": 0.5, - "min": 0.0, - "max": 1.0, - "step": 0.01, - "tooltip": "Denoise of input image; 0.0 yields image identical to input, 1.0 is as if no image was provided at all.", - }, + comfy_io.String.Input( + "negative_prompt", + default="", + tooltip="A blurb of text describing what you do not wish to see in the output image. This is an advanced feature.", + force_input=True, + optional=True, ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } + comfy_io.Float.Input( + "image_denoise", + default=0.5, + min=0.0, + max=1.0, + step=0.01, + tooltip="Denoise of input image; 0.0 yields image identical to input, 1.0 is as if no image was provided at all.", + optional=True, + ), + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - async def api_call(self, prompt: str, aspect_ratio: str, style_preset: str, seed: int, - negative_prompt: str=None, image: torch.Tensor = None, image_denoise: float=None, - **kwargs): + @classmethod + async def execute( + cls, + prompt: str, + aspect_ratio: str, + style_preset: str, + seed: int, + image: Optional[torch.Tensor] = None, + negative_prompt: str = "", + image_denoise: Optional[float] = 0.5, + ) -> comfy_io.NodeOutput: validate_string(prompt, strip_whitespace=False) # prepare image binary if image present image_binary = None @@ -144,6 +154,11 @@ class StabilityStableImageUltraNode: "image": image_binary } + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/stability/v2beta/stable-image/generate/ultra", @@ -161,7 +176,7 @@ class StabilityStableImageUltraNode: ), files=files, content_type="multipart/form-data", - auth_kwargs=kwargs, + auth_kwargs=auth, ) response_api = await operation.execute() @@ -171,95 +186,106 @@ class StabilityStableImageUltraNode: image_data = base64.b64decode(response_api.image) returned_image = bytesio_to_image_tensor(BytesIO(image_data)) - return (returned_image,) + return comfy_io.NodeOutput(returned_image) -class StabilityStableImageSD_3_5Node: +class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode): """ Generates images synchronously based on prompt and resolution. """ - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Stability AI" + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="StabilityStableImageSD_3_5Node", + display_name="Stability AI Stable Diffusion 3.5 Image", + category="api node/image/Stability AI", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.", + ), + comfy_io.Combo.Input( + "model", + options=[x.value for x in Stability_SD3_5_Model], + ), + comfy_io.Combo.Input( + "aspect_ratio", + options=[x.value for x in StabilityAspectRatio], + default=StabilityAspectRatio.ratio_1_1.value, + tooltip="Aspect ratio of generated image.", + ), + comfy_io.Combo.Input( + "style_preset", + options=get_stability_style_presets(), + tooltip="Optional desired style of generated image.", + ), + comfy_io.Float.Input( + "cfg_scale", + default=4.0, + min=1.0, + max=10.0, + step=0.1, + tooltip="How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt)", + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=4294967294, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", + ), + comfy_io.Image.Input( + "image", + optional=True, + ), + comfy_io.String.Input( + "negative_prompt", + default="", + tooltip="Keywords of what you do not wish to see in the output image. This is an advanced feature.", + force_input=True, + optional=True, + ), + comfy_io.Float.Input( + "image_denoise", + default=0.5, + min=0.0, + max=1.0, + step=0.01, + tooltip="Denoise of input image; 0.0 yields image identical to input, 1.0 is as if no image was provided at all.", + optional=True, + ), + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results." - }, - ), - "model": ([x.value for x in Stability_SD3_5_Model],), - "aspect_ratio": ([x.value for x in StabilityAspectRatio], - { - "default": StabilityAspectRatio.ratio_1_1, - "tooltip": "Aspect ratio of generated image.", - }, - ), - "style_preset": (get_stability_style_presets(), - { - "tooltip": "Optional desired style of generated image.", - }, - ), - "cfg_scale": ( - IO.FLOAT, - { - "default": 4.0, - "min": 1.0, - "max": 10.0, - "step": 0.1, - "tooltip": "How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt)", - }, - ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 4294967294, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, - ), - }, - "optional": { - "image": (IO.IMAGE,), - "negative_prompt": ( - IO.STRING, - { - "default": "", - "forceInput": True, - "tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature." - }, - ), - "image_denoise": ( - IO.FLOAT, - { - "default": 0.5, - "min": 0.0, - "max": 1.0, - "step": 0.01, - "tooltip": "Denoise of input image; 0.0 yields image identical to input, 1.0 is as if no image was provided at all.", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } - - async def api_call(self, model: str, prompt: str, aspect_ratio: str, style_preset: str, seed: int, cfg_scale: float, - negative_prompt: str=None, image: torch.Tensor = None, image_denoise: float=None, - **kwargs): + async def execute( + cls, + model: str, + prompt: str, + aspect_ratio: str, + style_preset: str, + seed: int, + cfg_scale: float, + image: Optional[torch.Tensor] = None, + negative_prompt: str = "", + image_denoise: Optional[float] = 0.5, + ) -> comfy_io.NodeOutput: validate_string(prompt, strip_whitespace=False) # prepare image binary if image present image_binary = None @@ -280,6 +306,11 @@ class StabilityStableImageSD_3_5Node: "image": image_binary } + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/stability/v2beta/stable-image/generate/sd3", @@ -300,7 +331,7 @@ class StabilityStableImageSD_3_5Node: ), files=files, content_type="multipart/form-data", - auth_kwargs=kwargs, + auth_kwargs=auth, ) response_api = await operation.execute() @@ -310,72 +341,75 @@ class StabilityStableImageSD_3_5Node: image_data = base64.b64decode(response_api.image) returned_image = bytesio_to_image_tensor(BytesIO(image_data)) - return (returned_image,) + return comfy_io.NodeOutput(returned_image) -class StabilityUpscaleConservativeNode: +class StabilityUpscaleConservativeNode(comfy_io.ComfyNode): """ Upscale image with minimal alterations to 4K resolution. """ - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Stability AI" + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="StabilityUpscaleConservativeNode", + display_name="Stability AI Upscale Conservative", + category="api node/image/Stability AI", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("image"), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.", + ), + comfy_io.Float.Input( + "creativity", + default=0.35, + min=0.2, + max=0.5, + step=0.01, + tooltip="Controls the likelihood of creating additional details not heavily conditioned by the init image.", + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=4294967294, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", + ), + comfy_io.String.Input( + "negative_prompt", + default="", + tooltip="Keywords of what you do not wish to see in the output image. This is an advanced feature.", + force_input=True, + optional=True, + ), + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE,), - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results." - }, - ), - "creativity": ( - IO.FLOAT, - { - "default": 0.35, - "min": 0.2, - "max": 0.5, - "step": 0.01, - "tooltip": "Controls the likelihood of creating additional details not heavily conditioned by the init image.", - }, - ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 4294967294, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, - ), - }, - "optional": { - "negative_prompt": ( - IO.STRING, - { - "default": "", - "forceInput": True, - "tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature." - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } - - async def api_call(self, image: torch.Tensor, prompt: str, creativity: float, seed: int, negative_prompt: str=None, - **kwargs): + async def execute( + cls, + image: torch.Tensor, + prompt: str, + creativity: float, + seed: int, + negative_prompt: str = "", + ) -> comfy_io.NodeOutput: validate_string(prompt, strip_whitespace=False) image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read() @@ -386,6 +420,11 @@ class StabilityUpscaleConservativeNode: "image": image_binary } + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/stability/v2beta/stable-image/upscale/conservative", @@ -401,7 +440,7 @@ class StabilityUpscaleConservativeNode: ), files=files, content_type="multipart/form-data", - auth_kwargs=kwargs, + auth_kwargs=auth, ) response_api = await operation.execute() @@ -411,77 +450,81 @@ class StabilityUpscaleConservativeNode: image_data = base64.b64decode(response_api.image) returned_image = bytesio_to_image_tensor(BytesIO(image_data)) - return (returned_image,) + return comfy_io.NodeOutput(returned_image) -class StabilityUpscaleCreativeNode: +class StabilityUpscaleCreativeNode(comfy_io.ComfyNode): """ Upscale image with minimal alterations to 4K resolution. """ - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Stability AI" + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="StabilityUpscaleCreativeNode", + display_name="Stability AI Upscale Creative", + category="api node/image/Stability AI", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("image"), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.", + ), + comfy_io.Float.Input( + "creativity", + default=0.3, + min=0.1, + max=0.5, + step=0.01, + tooltip="Controls the likelihood of creating additional details not heavily conditioned by the init image.", + ), + comfy_io.Combo.Input( + "style_preset", + options=get_stability_style_presets(), + tooltip="Optional desired style of generated image.", + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=4294967294, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", + ), + comfy_io.String.Input( + "negative_prompt", + default="", + tooltip="Keywords of what you do not wish to see in the output image. This is an advanced feature.", + force_input=True, + optional=True, + ), + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE,), - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results." - }, - ), - "creativity": ( - IO.FLOAT, - { - "default": 0.3, - "min": 0.1, - "max": 0.5, - "step": 0.01, - "tooltip": "Controls the likelihood of creating additional details not heavily conditioned by the init image.", - }, - ), - "style_preset": (get_stability_style_presets(), - { - "tooltip": "Optional desired style of generated image.", - }, - ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 4294967294, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, - ), - }, - "optional": { - "negative_prompt": ( - IO.STRING, - { - "default": "", - "forceInput": True, - "tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature." - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } - - async def api_call(self, image: torch.Tensor, prompt: str, creativity: float, style_preset: str, seed: int, negative_prompt: str=None, - **kwargs): + async def execute( + cls, + image: torch.Tensor, + prompt: str, + creativity: float, + style_preset: str, + seed: int, + negative_prompt: str = "", + ) -> comfy_io.NodeOutput: validate_string(prompt, strip_whitespace=False) image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read() @@ -494,6 +537,11 @@ class StabilityUpscaleCreativeNode: "image": image_binary } + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/stability/v2beta/stable-image/upscale/creative", @@ -510,7 +558,7 @@ class StabilityUpscaleCreativeNode: ), files=files, content_type="multipart/form-data", - auth_kwargs=kwargs, + auth_kwargs=auth, ) response_api = await operation.execute() @@ -525,7 +573,8 @@ class StabilityUpscaleCreativeNode: completed_statuses=[StabilityPollStatus.finished], failed_statuses=[StabilityPollStatus.failed], status_extractor=lambda x: get_async_dummy_status(x), - auth_kwargs=kwargs, + auth_kwargs=auth, + node_id=cls.hidden.unique_id, ) response_poll: StabilityResultsGetResponse = await operation.execute() @@ -535,41 +584,48 @@ class StabilityUpscaleCreativeNode: image_data = base64.b64decode(response_poll.result) returned_image = bytesio_to_image_tensor(BytesIO(image_data)) - return (returned_image,) + return comfy_io.NodeOutput(returned_image) -class StabilityUpscaleFastNode: +class StabilityUpscaleFastNode(comfy_io.ComfyNode): """ Quickly upscales an image via Stability API call to 4x its original size; intended for upscaling low-quality/compressed images. """ - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Stability AI" + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="StabilityUpscaleFastNode", + display_name="Stability AI Upscale Fast", + category="api node/image/Stability AI", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("image"), + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE,), - }, - "optional": { - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } - - async def api_call(self, image: torch.Tensor, **kwargs): + async def execute(cls, image: torch.Tensor) -> comfy_io.NodeOutput: image_binary = tensor_to_bytesio(image, total_pixels=4096*4096).read() files = { "image": image_binary } + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/stability/v2beta/stable-image/upscale/fast", @@ -580,7 +636,7 @@ class StabilityUpscaleFastNode: request=EmptyRequest(), files=files, content_type="multipart/form-data", - auth_kwargs=kwargs, + auth_kwargs=auth, ) response_api = await operation.execute() @@ -590,24 +646,20 @@ class StabilityUpscaleFastNode: image_data = base64.b64decode(response_api.image) returned_image = bytesio_to_image_tensor(BytesIO(image_data)) - return (returned_image,) + return comfy_io.NodeOutput(returned_image) -# A dictionary that contains all nodes you want to export with their names -# NOTE: names should be globally unique -NODE_CLASS_MAPPINGS = { - "StabilityStableImageUltraNode": StabilityStableImageUltraNode, - "StabilityStableImageSD_3_5Node": StabilityStableImageSD_3_5Node, - "StabilityUpscaleConservativeNode": StabilityUpscaleConservativeNode, - "StabilityUpscaleCreativeNode": StabilityUpscaleCreativeNode, - "StabilityUpscaleFastNode": StabilityUpscaleFastNode, -} +class StabilityExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + StabilityStableImageUltraNode, + StabilityStableImageSD_3_5Node, + StabilityUpscaleConservativeNode, + StabilityUpscaleCreativeNode, + StabilityUpscaleFastNode, + ] -# A dictionary that contains the friendly/humanly readable titles for the nodes -NODE_DISPLAY_NAME_MAPPINGS = { - "StabilityStableImageUltraNode": "Stability AI Stable Image Ultra", - "StabilityStableImageSD_3_5Node": "Stability AI Stable Diffusion 3.5 Image", - "StabilityUpscaleConservativeNode": "Stability AI Upscale Conservative", - "StabilityUpscaleCreativeNode": "Stability AI Upscale Creative", - "StabilityUpscaleFastNode": "Stability AI Upscale Fast", -} + +async def comfy_entrypoint() -> StabilityExtension: + return StabilityExtension() From fe442fac2eccd0cc66999b48d3c518623cafe4fc Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sun, 31 Aug 2025 06:21:58 +0300 Subject: [PATCH 0525/1073] convert Primitive nodes to V3 schema (#9372) --- comfy_extras/nodes_primitive.py | 169 +++++++++++++++++--------------- 1 file changed, 90 insertions(+), 79 deletions(-) diff --git a/comfy_extras/nodes_primitive.py b/comfy_extras/nodes_primitive.py index 1f93f87a7..5a1aeba80 100644 --- a/comfy_extras/nodes_primitive.py +++ b/comfy_extras/nodes_primitive.py @@ -1,98 +1,109 @@ -# Primitive nodes that are evaluated at backend. -from __future__ import annotations - import sys +from typing_extensions import override -from comfy.comfy_types.node_typing import ComfyNodeABC, InputTypeDict, IO +from comfy_api.latest import ComfyExtension, io -class String(ComfyNodeABC): +class String(io.ComfyNode): @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": {"value": (IO.STRING, {})}, - } + def define_schema(cls): + return io.Schema( + node_id="PrimitiveString", + display_name="String", + category="utils/primitive", + inputs=[ + io.String.Input("value"), + ], + outputs=[io.String.Output()], + ) - RETURN_TYPES = (IO.STRING,) - FUNCTION = "execute" - CATEGORY = "utils/primitive" - - def execute(self, value: str) -> tuple[str]: - return (value,) - - -class StringMultiline(ComfyNodeABC): @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": {"value": (IO.STRING, {"multiline": True,},)}, - } - - RETURN_TYPES = (IO.STRING,) - FUNCTION = "execute" - CATEGORY = "utils/primitive" - - def execute(self, value: str) -> tuple[str]: - return (value,) + def execute(cls, value: str) -> io.NodeOutput: + return io.NodeOutput(value) -class Int(ComfyNodeABC): +class StringMultiline(io.ComfyNode): @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": {"value": (IO.INT, {"min": -sys.maxsize, "max": sys.maxsize, "control_after_generate": True})}, - } + def define_schema(cls): + return io.Schema( + node_id="PrimitiveStringMultiline", + display_name="String (Multiline)", + category="utils/primitive", + inputs=[ + io.String.Input("value", multiline=True), + ], + outputs=[io.String.Output()], + ) - RETURN_TYPES = (IO.INT,) - FUNCTION = "execute" - CATEGORY = "utils/primitive" - - def execute(self, value: int) -> tuple[int]: - return (value,) - - -class Float(ComfyNodeABC): @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": {"value": (IO.FLOAT, {"min": -sys.maxsize, "max": sys.maxsize})}, - } - - RETURN_TYPES = (IO.FLOAT,) - FUNCTION = "execute" - CATEGORY = "utils/primitive" - - def execute(self, value: float) -> tuple[float]: - return (value,) + def execute(cls, value: str) -> io.NodeOutput: + return io.NodeOutput(value) -class Boolean(ComfyNodeABC): +class Int(io.ComfyNode): @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": {"value": (IO.BOOLEAN, {})}, - } + def define_schema(cls): + return io.Schema( + node_id="PrimitiveInt", + display_name="Int", + category="utils/primitive", + inputs=[ + io.Int.Input("value", min=-sys.maxsize, max=sys.maxsize, control_after_generate=True), + ], + outputs=[io.Int.Output()], + ) - RETURN_TYPES = (IO.BOOLEAN,) - FUNCTION = "execute" - CATEGORY = "utils/primitive" - - def execute(self, value: bool) -> tuple[bool]: - return (value,) + @classmethod + def execute(cls, value: int) -> io.NodeOutput: + return io.NodeOutput(value) -NODE_CLASS_MAPPINGS = { - "PrimitiveString": String, - "PrimitiveStringMultiline": StringMultiline, - "PrimitiveInt": Int, - "PrimitiveFloat": Float, - "PrimitiveBoolean": Boolean, -} +class Float(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="PrimitiveFloat", + display_name="Float", + category="utils/primitive", + inputs=[ + io.Float.Input("value", min=-sys.maxsize, max=sys.maxsize), + ], + outputs=[io.Float.Output()], + ) -NODE_DISPLAY_NAME_MAPPINGS = { - "PrimitiveString": "String", - "PrimitiveStringMultiline": "String (Multiline)", - "PrimitiveInt": "Int", - "PrimitiveFloat": "Float", - "PrimitiveBoolean": "Boolean", -} + @classmethod + def execute(cls, value: float) -> io.NodeOutput: + return io.NodeOutput(value) + + +class Boolean(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="PrimitiveBoolean", + display_name="Boolean", + category="utils/primitive", + inputs=[ + io.Boolean.Input("value"), + ], + outputs=[io.Boolean.Output()], + ) + + @classmethod + def execute(cls, value: bool) -> io.NodeOutput: + return io.NodeOutput(value) + + +class PrimitivesExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + String, + StringMultiline, + Int, + Float, + Boolean, + ] + +async def comfy_entrypoint() -> PrimitivesExtension: + return PrimitivesExtension() From 32a627bf1feadb83abba97906a27978b927abd33 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Sun, 31 Aug 2025 12:01:45 +0800 Subject: [PATCH 0526/1073] SEEDS: update noise decomposition and refactor (#9633) - Update the decomposition to reflect interval dependency - Extract phi computations into functions - Use torch.lerp for interpolation --- comfy/k_diffusion/sampling.py | 135 ++++++++++++++++++---------------- 1 file changed, 73 insertions(+), 62 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index fe6844b17..2d7e09838 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -171,6 +171,16 @@ def offset_first_sigma_for_snr(sigmas, model_sampling, percent_offset=1e-4): return sigmas +def ei_h_phi_1(h: torch.Tensor) -> torch.Tensor: + """Compute the result of h*phi_1(h) in exponential integrator methods.""" + return torch.expm1(h) + + +def ei_h_phi_2(h: torch.Tensor) -> torch.Tensor: + """Compute the result of h*phi_2(h) in exponential integrator methods.""" + return (torch.expm1(h) - h) / h + + @torch.no_grad() def sample_euler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.): """Implements Algorithm 2 (Euler steps) from Karras et al. (2022).""" @@ -1550,13 +1560,12 @@ def sample_er_sde(model, x, sigmas, extra_args=None, callback=None, disable=None @torch.no_grad() def sample_seeds_2(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=0.5): """SEEDS-2 - Stochastic Explicit Exponential Derivative-free Solvers (VP Data Prediction) stage 2. - arXiv: https://arxiv.org/abs/2305.14267 + arXiv: https://arxiv.org/abs/2305.14267 (NeurIPS 2023) """ extra_args = {} if extra_args is None else extra_args seed = extra_args.get("seed", None) noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler s_in = x.new_ones([x.shape[0]]) - inject_noise = eta > 0 and s_noise > 0 model_sampling = model.inner_model.model_patcher.get_model_object('model_sampling') @@ -1564,55 +1573,53 @@ def sample_seeds_2(model, x, sigmas, extra_args=None, callback=None, disable=Non lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling) sigmas = offset_first_sigma_for_snr(sigmas, model_sampling) + fac = 1 / (2 * r) + for i in trange(len(sigmas) - 1, disable=disable): denoised = model(x, sigmas[i] * s_in, **extra_args) if callback is not None: callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) + if sigmas[i + 1] == 0: x = denoised - else: - lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1]) - h = lambda_t - lambda_s - h_eta = h * (eta + 1) - lambda_s_1 = lambda_s + r * h - fac = 1 / (2 * r) - sigma_s_1 = sigma_fn(lambda_s_1) + continue - # alpha_t = sigma_t * exp(log(alpha_t / sigma_t)) = sigma_t * exp(lambda_t) - alpha_s_1 = sigma_s_1 * lambda_s_1.exp() - alpha_t = sigmas[i + 1] * lambda_t.exp() + lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1]) + h = lambda_t - lambda_s + h_eta = h * (eta + 1) + lambda_s_1 = torch.lerp(lambda_s, lambda_t, r) + sigma_s_1 = sigma_fn(lambda_s_1) - coeff_1, coeff_2 = (-r * h_eta).expm1(), (-h_eta).expm1() - if inject_noise: - # 0 < r < 1 - noise_coeff_1 = (-2 * r * h * eta).expm1().neg().sqrt() - noise_coeff_2 = (-r * h * eta).exp() * (-2 * (1 - r) * h * eta).expm1().neg().sqrt() - noise_1, noise_2 = noise_sampler(sigmas[i], sigma_s_1), noise_sampler(sigma_s_1, sigmas[i + 1]) + alpha_s_1 = sigma_s_1 * lambda_s_1.exp() + alpha_t = sigmas[i + 1] * lambda_t.exp() - # Step 1 - x_2 = sigma_s_1 / sigmas[i] * (-r * h * eta).exp() * x - alpha_s_1 * coeff_1 * denoised - if inject_noise: - x_2 = x_2 + sigma_s_1 * (noise_coeff_1 * noise_1) * s_noise - denoised_2 = model(x_2, sigma_s_1 * s_in, **extra_args) + # Step 1 + x_2 = sigma_s_1 / sigmas[i] * (-r * h * eta).exp() * x - alpha_s_1 * ei_h_phi_1(-r * h_eta) * denoised + if inject_noise: + sde_noise = (-2 * r * h * eta).expm1().neg().sqrt() * noise_sampler(sigmas[i], sigma_s_1) + x_2 = x_2 + sde_noise * sigma_s_1 * s_noise + denoised_2 = model(x_2, sigma_s_1 * s_in, **extra_args) - # Step 2 - denoised_d = (1 - fac) * denoised + fac * denoised_2 - x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x - alpha_t * coeff_2 * denoised_d - if inject_noise: - x = x + sigmas[i + 1] * (noise_coeff_2 * noise_1 + noise_coeff_1 * noise_2) * s_noise + # Step 2 + denoised_d = torch.lerp(denoised, denoised_2, fac) + x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x - alpha_t * ei_h_phi_1(-h_eta) * denoised_d + if inject_noise: + segment_factor = (r - 1) * h * eta + sde_noise = sde_noise * segment_factor.exp() + sde_noise = sde_noise + segment_factor.mul(2).expm1().neg().sqrt() * noise_sampler(sigma_s_1, sigmas[i + 1]) + x = x + sde_noise * sigmas[i + 1] * s_noise return x @torch.no_grad() def sample_seeds_3(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r_1=1./3, r_2=2./3): """SEEDS-3 - Stochastic Explicit Exponential Derivative-free Solvers (VP Data Prediction) stage 3. - arXiv: https://arxiv.org/abs/2305.14267 + arXiv: https://arxiv.org/abs/2305.14267 (NeurIPS 2023) """ extra_args = {} if extra_args is None else extra_args seed = extra_args.get("seed", None) noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler s_in = x.new_ones([x.shape[0]]) - inject_noise = eta > 0 and s_noise > 0 model_sampling = model.inner_model.model_patcher.get_model_object('model_sampling') @@ -1624,45 +1631,49 @@ def sample_seeds_3(model, x, sigmas, extra_args=None, callback=None, disable=Non denoised = model(x, sigmas[i] * s_in, **extra_args) if callback is not None: callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) + if sigmas[i + 1] == 0: x = denoised - else: - lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1]) - h = lambda_t - lambda_s - h_eta = h * (eta + 1) - lambda_s_1 = lambda_s + r_1 * h - lambda_s_2 = lambda_s + r_2 * h - sigma_s_1, sigma_s_2 = sigma_fn(lambda_s_1), sigma_fn(lambda_s_2) + continue - # alpha_t = sigma_t * exp(log(alpha_t / sigma_t)) = sigma_t * exp(lambda_t) - alpha_s_1 = sigma_s_1 * lambda_s_1.exp() - alpha_s_2 = sigma_s_2 * lambda_s_2.exp() - alpha_t = sigmas[i + 1] * lambda_t.exp() + lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1]) + h = lambda_t - lambda_s + h_eta = h * (eta + 1) + lambda_s_1 = torch.lerp(lambda_s, lambda_t, r_1) + lambda_s_2 = torch.lerp(lambda_s, lambda_t, r_2) + sigma_s_1, sigma_s_2 = sigma_fn(lambda_s_1), sigma_fn(lambda_s_2) - coeff_1, coeff_2, coeff_3 = (-r_1 * h_eta).expm1(), (-r_2 * h_eta).expm1(), (-h_eta).expm1() - if inject_noise: - # 0 < r_1 < r_2 < 1 - noise_coeff_1 = (-2 * r_1 * h * eta).expm1().neg().sqrt() - noise_coeff_2 = (-r_1 * h * eta).exp() * (-2 * (r_2 - r_1) * h * eta).expm1().neg().sqrt() - noise_coeff_3 = (-r_2 * h * eta).exp() * (-2 * (1 - r_2) * h * eta).expm1().neg().sqrt() - noise_1, noise_2, noise_3 = noise_sampler(sigmas[i], sigma_s_1), noise_sampler(sigma_s_1, sigma_s_2), noise_sampler(sigma_s_2, sigmas[i + 1]) + alpha_s_1 = sigma_s_1 * lambda_s_1.exp() + alpha_s_2 = sigma_s_2 * lambda_s_2.exp() + alpha_t = sigmas[i + 1] * lambda_t.exp() - # Step 1 - x_2 = sigma_s_1 / sigmas[i] * (-r_1 * h * eta).exp() * x - alpha_s_1 * coeff_1 * denoised - if inject_noise: - x_2 = x_2 + sigma_s_1 * (noise_coeff_1 * noise_1) * s_noise - denoised_2 = model(x_2, sigma_s_1 * s_in, **extra_args) + # Step 1 + x_2 = sigma_s_1 / sigmas[i] * (-r_1 * h * eta).exp() * x - alpha_s_1 * ei_h_phi_1(-r_1 * h_eta) * denoised + if inject_noise: + sde_noise = (-2 * r_1 * h * eta).expm1().neg().sqrt() * noise_sampler(sigmas[i], sigma_s_1) + x_2 = x_2 + sde_noise * sigma_s_1 * s_noise + denoised_2 = model(x_2, sigma_s_1 * s_in, **extra_args) - # Step 2 - x_3 = sigma_s_2 / sigmas[i] * (-r_2 * h * eta).exp() * x - alpha_s_2 * coeff_2 * denoised + (r_2 / r_1) * alpha_s_2 * (coeff_2 / (r_2 * h_eta) + 1) * (denoised_2 - denoised) - if inject_noise: - x_3 = x_3 + sigma_s_2 * (noise_coeff_2 * noise_1 + noise_coeff_1 * noise_2) * s_noise - denoised_3 = model(x_3, sigma_s_2 * s_in, **extra_args) + # Step 2 + a3_2 = r_2 / r_1 * ei_h_phi_2(-r_2 * h_eta) + a3_1 = ei_h_phi_1(-r_2 * h_eta) - a3_2 + x_3 = sigma_s_2 / sigmas[i] * (-r_2 * h * eta).exp() * x - alpha_s_2 * (a3_1 * denoised + a3_2 * denoised_2) + if inject_noise: + segment_factor = (r_1 - r_2) * h * eta + sde_noise = sde_noise * segment_factor.exp() + sde_noise = sde_noise + segment_factor.mul(2).expm1().neg().sqrt() * noise_sampler(sigma_s_1, sigma_s_2) + x_3 = x_3 + sde_noise * sigma_s_2 * s_noise + denoised_3 = model(x_3, sigma_s_2 * s_in, **extra_args) - # Step 3 - x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x - alpha_t * coeff_3 * denoised + (1. / r_2) * alpha_t * (coeff_3 / h_eta + 1) * (denoised_3 - denoised) - if inject_noise: - x = x + sigmas[i + 1] * (noise_coeff_3 * noise_1 + noise_coeff_2 * noise_2 + noise_coeff_1 * noise_3) * s_noise + # Step 3 + b3 = ei_h_phi_2(-h_eta) / r_2 + b1 = ei_h_phi_1(-h_eta) - b3 + x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x - alpha_t * (b1 * denoised + b3 * denoised_3) + if inject_noise: + segment_factor = (r_2 - 1) * h * eta + sde_noise = sde_noise * segment_factor.exp() + sde_noise = sde_noise + segment_factor.mul(2).expm1().neg().sqrt() * noise_sampler(sigma_s_2, sigmas[i + 1]) + x = x + sde_noise * sigmas[i + 1] * s_noise return x From 9b151559721ff6c8d93150f3d8a53259a23911cd Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 30 Aug 2025 22:32:10 -0700 Subject: [PATCH 0527/1073] Probably not necessary anymore. (#9646) --- main.py | 1 - 1 file changed, 1 deletion(-) diff --git a/main.py b/main.py index b23d50816..c33f0e17b 100644 --- a/main.py +++ b/main.py @@ -113,7 +113,6 @@ import gc if os.name == "nt": os.environ['MIMALLOC_PURGE_DELAY'] = '0' - logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) if __name__ == "__main__": if args.default_device is not None: From 27e067ce505c102fd0f2be0f1242016c59a6816f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 1 Sep 2025 15:54:02 -0700 Subject: [PATCH 0528/1073] Implement the USO subject identity lora. (#9674) Use the lora with FluxContextMultiReferenceLatentMethod node set to "uso" and a ReferenceLatent node with the reference image. --- comfy/ldm/flux/model.py | 10 ++++++++-- comfy/lora.py | 4 ++++ comfy/lora_convert.py | 19 +++++++++++++++++++ comfy_extras/nodes_flux.py | 2 +- 4 files changed, 32 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index 1344c3a57..1e62f4626 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -233,12 +233,18 @@ class Flux(nn.Module): h = 0 w = 0 index = 0 - index_ref_method = kwargs.get("ref_latents_method", "offset") == "index" + ref_latents_method = kwargs.get("ref_latents_method", "offset") for ref in ref_latents: - if index_ref_method: + if ref_latents_method == "index": index += 1 h_offset = 0 w_offset = 0 + elif ref_latents_method == "uso": + index = 0 + h_offset = h_len * patch_size + h + w_offset = w_len * patch_size + w + h += ref.shape[-2] + w += ref.shape[-1] else: index = 1 h_offset = 0 diff --git a/comfy/lora.py b/comfy/lora.py index 00358884b..4a44f1318 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -260,6 +260,10 @@ def model_lora_keys_unet(model, key_map={}): key_map["transformer.{}".format(k[:-len(".weight")])] = to #simpletrainer and probably regular diffusers flux lora format key_map["lycoris_{}".format(k[:-len(".weight")].replace(".", "_"))] = to #simpletrainer lycoris key_map["lora_transformer_{}".format(k[:-len(".weight")].replace(".", "_"))] = to #onetrainer + for k in sdk: + hidden_size = model.model_config.unet_config.get("hidden_size", 0) + if k.endswith(".weight") and ".linear1." in k: + key_map["{}".format(k.replace(".linear1.weight", ".linear1_qkv"))] = (k, (0, 0, hidden_size * 3)) if isinstance(model, comfy.model_base.GenmoMochi): for k in sdk: diff --git a/comfy/lora_convert.py b/comfy/lora_convert.py index 3e00b63db..9d8d21efe 100644 --- a/comfy/lora_convert.py +++ b/comfy/lora_convert.py @@ -15,10 +15,29 @@ def convert_lora_bfl_control(sd): #BFL loras for Flux def convert_lora_wan_fun(sd): #Wan Fun loras return comfy.utils.state_dict_prefix_replace(sd, {"lora_unet__": "lora_unet_"}) +def convert_uso_lora(sd): + sd_out = {} + for k in sd: + tensor = sd[k] + k_to = "diffusion_model.{}".format(k.replace(".down.weight", ".lora_down.weight") + .replace(".up.weight", ".lora_up.weight") + .replace(".qkv_lora2.", ".txt_attn.qkv.") + .replace(".qkv_lora1.", ".img_attn.qkv.") + .replace(".proj_lora1.", ".img_attn.proj.") + .replace(".proj_lora2.", ".txt_attn.proj.") + .replace(".qkv_lora.", ".linear1_qkv.") + .replace(".proj_lora.", ".linear2.") + .replace(".processor.", ".") + ) + sd_out[k_to] = tensor + return sd_out + def convert_lora(sd): if "img_in.lora_A.weight" in sd and "single_blocks.0.norm.key_norm.scale" in sd: return convert_lora_bfl_control(sd) if "lora_unet__blocks_0_cross_attn_k.lora_down.weight" in sd: return convert_lora_wan_fun(sd) + if "single_blocks.37.processor.qkv_lora.up.weight" in sd and "double_blocks.18.processor.qkv_lora2.up.weight" in sd: + return convert_uso_lora(sd) return sd diff --git a/comfy_extras/nodes_flux.py b/comfy_extras/nodes_flux.py index c8db75bb3..1bf7ddd92 100644 --- a/comfy_extras/nodes_flux.py +++ b/comfy_extras/nodes_flux.py @@ -105,7 +105,7 @@ class FluxKontextMultiReferenceLatentMethod: def INPUT_TYPES(s): return {"required": { "conditioning": ("CONDITIONING", ), - "reference_latents_method": (("offset", "index"), ), + "reference_latents_method": (("offset", "index", "uso"), ), }} RETURN_TYPES = ("CONDITIONING",) From e2d1e5dad98dbbcf505703ea8663f20101e6570a Mon Sep 17 00:00:00 2001 From: contentis Date: Tue, 2 Sep 2025 02:33:50 +0200 Subject: [PATCH 0529/1073] Enable Convolution AutoTuning (#9301) --- comfy/cli_args.py | 1 + comfy/ops.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index de3e85c08..72eeaea9a 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -143,6 +143,7 @@ class PerformanceFeature(enum.Enum): Fp16Accumulation = "fp16_accumulation" Fp8MatrixMultiplication = "fp8_matrix_mult" CublasOps = "cublas_ops" + AutoTune = "autotune" parser.add_argument("--fast", nargs="*", type=PerformanceFeature, help="Enable some untested and potentially quality deteriorating optimizations. --fast with no arguments enables everything. You can pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: fp16_accumulation fp8_matrix_mult cublas_ops") diff --git a/comfy/ops.py b/comfy/ops.py index 18e7db705..55e958adb 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -52,6 +52,9 @@ except (ModuleNotFoundError, TypeError): cast_to = comfy.model_management.cast_to #TODO: remove once no more references +if torch.cuda.is_available() and torch.backends.cudnn.is_available() and PerformanceFeature.AutoTune in args.fast: + torch.backends.cudnn.benchmark = True + def cast_to_input(weight, input, non_blocking=False, copy=True): return comfy.model_management.cast_to(weight, input.dtype, input.device, non_blocking=non_blocking, copy=copy) From 3412d53b1d69e4dfedf7e86c3092cea085094053 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 2 Sep 2025 12:36:22 -0700 Subject: [PATCH 0530/1073] USO style reference. (#9677) Load the projector.safetensors file with the ModelPatchLoader node and use the siglip_vision_patch14_384.safetensors "clip vision" model and the USOStyleReferenceNode. --- comfy/clip_model.py | 12 +- comfy/clip_vision.py | 18 ++- comfy/ldm/flux/model.py | 11 +- comfy/model_patcher.py | 3 + comfy_extras/nodes_model_patch.py | 186 +++++++++++++++++++++++++++++- 5 files changed, 222 insertions(+), 8 deletions(-) diff --git a/comfy/clip_model.py b/comfy/clip_model.py index 7e47d8a55..7c0cadab5 100644 --- a/comfy/clip_model.py +++ b/comfy/clip_model.py @@ -61,8 +61,12 @@ class CLIPEncoder(torch.nn.Module): def forward(self, x, mask=None, intermediate_output=None): optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None, small_input=True) + all_intermediate = None if intermediate_output is not None: - if intermediate_output < 0: + if intermediate_output == "all": + all_intermediate = [] + intermediate_output = None + elif intermediate_output < 0: intermediate_output = len(self.layers) + intermediate_output intermediate = None @@ -70,6 +74,12 @@ class CLIPEncoder(torch.nn.Module): x = l(x, mask, optimized_attention) if i == intermediate_output: intermediate = x.clone() + if all_intermediate is not None: + all_intermediate.append(x.unsqueeze(1).clone()) + + if all_intermediate is not None: + intermediate = torch.cat(all_intermediate, dim=1) + return x, intermediate class CLIPEmbeddings(torch.nn.Module): diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index 00aab9164..2fa410cb7 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -50,7 +50,13 @@ class ClipVisionModel(): self.image_size = config.get("image_size", 224) self.image_mean = config.get("image_mean", [0.48145466, 0.4578275, 0.40821073]) self.image_std = config.get("image_std", [0.26862954, 0.26130258, 0.27577711]) - model_class = IMAGE_ENCODERS.get(config.get("model_type", "clip_vision_model")) + model_type = config.get("model_type", "clip_vision_model") + model_class = IMAGE_ENCODERS.get(model_type) + if model_type == "siglip_vision_model": + self.return_all_hidden_states = True + else: + self.return_all_hidden_states = False + self.load_device = comfy.model_management.text_encoder_device() offload_device = comfy.model_management.text_encoder_offload_device() self.dtype = comfy.model_management.text_encoder_dtype(self.load_device) @@ -68,12 +74,18 @@ class ClipVisionModel(): def encode_image(self, image, crop=True): comfy.model_management.load_model_gpu(self.patcher) pixel_values = clip_preprocess(image.to(self.load_device), size=self.image_size, mean=self.image_mean, std=self.image_std, crop=crop).float() - out = self.model(pixel_values=pixel_values, intermediate_output=-2) + out = self.model(pixel_values=pixel_values, intermediate_output='all' if self.return_all_hidden_states else -2) outputs = Output() outputs["last_hidden_state"] = out[0].to(comfy.model_management.intermediate_device()) outputs["image_embeds"] = out[2].to(comfy.model_management.intermediate_device()) - outputs["penultimate_hidden_states"] = out[1].to(comfy.model_management.intermediate_device()) + if self.return_all_hidden_states: + all_hs = out[1].to(comfy.model_management.intermediate_device()) + outputs["penultimate_hidden_states"] = all_hs[:, -2] + outputs["all_hidden_states"] = all_hs + else: + outputs["penultimate_hidden_states"] = out[1].to(comfy.model_management.intermediate_device()) + outputs["mm_projected"] = out[3] return outputs diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index 1e62f4626..d4be6bb61 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -106,6 +106,7 @@ class Flux(nn.Module): if y is None: y = torch.zeros((img.shape[0], self.params.vec_in_dim), device=img.device, dtype=img.dtype) + patches = transformer_options.get("patches", {}) patches_replace = transformer_options.get("patches_replace", {}) if img.ndim != 3 or txt.ndim != 3: raise ValueError("Input img and txt tensors must have 3 dimensions.") @@ -117,9 +118,17 @@ class Flux(nn.Module): if guidance is not None: vec = vec + self.guidance_in(timestep_embedding(guidance, 256).to(img.dtype)) - vec = vec + self.vector_in(y[:,:self.params.vec_in_dim]) + vec = vec + self.vector_in(y[:, :self.params.vec_in_dim]) txt = self.txt_in(txt) + if "post_input" in patches: + for p in patches["post_input"]: + out = p({"img": img, "txt": txt, "img_ids": img_ids, "txt_ids": txt_ids}) + img = out["img"] + txt = out["txt"] + img_ids = out["img_ids"] + txt_ids = out["txt_ids"] + if img_ids is not None: ids = torch.cat((txt_ids, img_ids), dim=1) pe = self.pe_embedder(ids) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index a944cb421..1fd03d9d1 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -433,6 +433,9 @@ class ModelPatcher: def set_model_double_block_patch(self, patch): self.set_model_patch(patch, "double_block") + def set_model_post_input_patch(self, patch): + self.set_model_patch(patch, "post_input") + def add_object_patch(self, name, obj): self.object_patches[name] = obj diff --git a/comfy_extras/nodes_model_patch.py b/comfy_extras/nodes_model_patch.py index 65e766b52..783c59b6b 100644 --- a/comfy_extras/nodes_model_patch.py +++ b/comfy_extras/nodes_model_patch.py @@ -1,4 +1,5 @@ import torch +from torch import nn import folder_paths import comfy.utils import comfy.ops @@ -58,6 +59,136 @@ class QwenImageBlockWiseControlNet(torch.nn.Module): return self.controlnet_blocks[block_id](img, controlnet_conditioning) +class SigLIPMultiFeatProjModel(torch.nn.Module): + """ + SigLIP Multi-Feature Projection Model for processing style features from different layers + and projecting them into a unified hidden space. + + Args: + siglip_token_nums (int): Number of SigLIP tokens, default 257 + style_token_nums (int): Number of style tokens, default 256 + siglip_token_dims (int): Dimension of SigLIP tokens, default 1536 + hidden_size (int): Hidden layer size, default 3072 + context_layer_norm (bool): Whether to use context layer normalization, default False + """ + + def __init__( + self, + siglip_token_nums: int = 729, + style_token_nums: int = 64, + siglip_token_dims: int = 1152, + hidden_size: int = 3072, + context_layer_norm: bool = True, + device=None, dtype=None, operations=None + ): + super().__init__() + + # High-level feature processing (layer -2) + self.high_embedding_linear = nn.Sequential( + operations.Linear(siglip_token_nums, style_token_nums), + nn.SiLU() + ) + self.high_layer_norm = ( + operations.LayerNorm(siglip_token_dims) if context_layer_norm else nn.Identity() + ) + self.high_projection = operations.Linear(siglip_token_dims, hidden_size, bias=True) + + # Mid-level feature processing (layer -11) + self.mid_embedding_linear = nn.Sequential( + operations.Linear(siglip_token_nums, style_token_nums), + nn.SiLU() + ) + self.mid_layer_norm = ( + operations.LayerNorm(siglip_token_dims) if context_layer_norm else nn.Identity() + ) + self.mid_projection = operations.Linear(siglip_token_dims, hidden_size, bias=True) + + # Low-level feature processing (layer -20) + self.low_embedding_linear = nn.Sequential( + operations.Linear(siglip_token_nums, style_token_nums), + nn.SiLU() + ) + self.low_layer_norm = ( + operations.LayerNorm(siglip_token_dims) if context_layer_norm else nn.Identity() + ) + self.low_projection = operations.Linear(siglip_token_dims, hidden_size, bias=True) + + def forward(self, siglip_outputs): + """ + Forward pass function + + Args: + siglip_outputs: Output from SigLIP model, containing hidden_states + + Returns: + torch.Tensor: Concatenated multi-layer features with shape [bs, 3*style_token_nums, hidden_size] + """ + dtype = next(self.high_embedding_linear.parameters()).dtype + + # Process high-level features (layer -2) + high_embedding = self._process_layer_features( + siglip_outputs[2], + self.high_embedding_linear, + self.high_layer_norm, + self.high_projection, + dtype + ) + + # Process mid-level features (layer -11) + mid_embedding = self._process_layer_features( + siglip_outputs[1], + self.mid_embedding_linear, + self.mid_layer_norm, + self.mid_projection, + dtype + ) + + # Process low-level features (layer -20) + low_embedding = self._process_layer_features( + siglip_outputs[0], + self.low_embedding_linear, + self.low_layer_norm, + self.low_projection, + dtype + ) + + # Concatenate features from all layersmodel_patch + return torch.cat((high_embedding, mid_embedding, low_embedding), dim=1) + + def _process_layer_features( + self, + hidden_states: torch.Tensor, + embedding_linear: nn.Module, + layer_norm: nn.Module, + projection: nn.Module, + dtype: torch.dtype + ) -> torch.Tensor: + """ + Helper function to process features from a single layer + + Args: + hidden_states: Input hidden states [bs, seq_len, dim] + embedding_linear: Embedding linear layer + layer_norm: Layer normalization + projection: Projection layer + dtype: Target data type + + Returns: + torch.Tensor: Processed features [bs, style_token_nums, hidden_size] + """ + # Transform dimensions: [bs, seq_len, dim] -> [bs, dim, seq_len] -> [bs, dim, style_token_nums] -> [bs, style_token_nums, dim] + embedding = embedding_linear( + hidden_states.to(dtype).transpose(1, 2) + ).transpose(1, 2) + + # Apply layer normalization + embedding = layer_norm(embedding) + + # Project to target hidden space + embedding = projection(embedding) + + return embedding + class ModelPatchLoader: @classmethod def INPUT_TYPES(s): @@ -73,9 +204,14 @@ class ModelPatchLoader: model_patch_path = folder_paths.get_full_path_or_raise("model_patches", name) sd = comfy.utils.load_torch_file(model_patch_path, safe_load=True) dtype = comfy.utils.weight_dtype(sd) - # TODO: this node will work with more types of model patches - additional_in_dim = sd["img_in.weight"].shape[1] - 64 - model = QwenImageBlockWiseControlNet(additional_in_dim=additional_in_dim, device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast) + + if 'controlnet_blocks.0.y_rms.weight' in sd: + additional_in_dim = sd["img_in.weight"].shape[1] - 64 + model = QwenImageBlockWiseControlNet(additional_in_dim=additional_in_dim, device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast) + elif 'feature_embedder.mid_layer_norm.bias' in sd: + sd = comfy.utils.state_dict_prefix_replace(sd, {"feature_embedder.": ""}, filter_keys=True) + model = SigLIPMultiFeatProjModel(device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast) + model.load_state_dict(sd) model = comfy.model_patcher.ModelPatcher(model, load_device=comfy.model_management.get_torch_device(), offload_device=comfy.model_management.unet_offload_device()) return (model,) @@ -157,7 +293,51 @@ class QwenImageDiffsynthControlnet: return (model_patched,) +class UsoStyleProjectorPatch: + def __init__(self, model_patch, encoded_image): + self.model_patch = model_patch + self.encoded_image = encoded_image + + def __call__(self, kwargs): + txt_ids = kwargs.get("txt_ids") + txt = kwargs.get("txt") + siglip_embedding = self.model_patch.model(self.encoded_image.to(txt.dtype)).to(txt.dtype) + txt = torch.cat([siglip_embedding, txt], dim=1) + kwargs['txt'] = txt + kwargs['txt_ids'] = torch.cat([torch.zeros(siglip_embedding.shape[0], siglip_embedding.shape[1], 3, dtype=txt_ids.dtype, device=txt_ids.device), txt_ids], dim=1) + return kwargs + + def to(self, device_or_dtype): + if isinstance(device_or_dtype, torch.device): + self.encoded_image = self.encoded_image.to(device_or_dtype) + return self + + def models(self): + return [self.model_patch] + + +class USOStyleReference: + @classmethod + def INPUT_TYPES(s): + return {"required": {"model": ("MODEL",), + "model_patch": ("MODEL_PATCH",), + "clip_vision_output": ("CLIP_VISION_OUTPUT", ), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "apply_patch" + EXPERIMENTAL = True + + CATEGORY = "advanced/model_patches/flux" + + def apply_patch(self, model, model_patch, clip_vision_output): + encoded_image = torch.stack((clip_vision_output.all_hidden_states[:, -20], clip_vision_output.all_hidden_states[:, -11], clip_vision_output.penultimate_hidden_states)) + model_patched = model.clone() + model_patched.set_model_post_input_patch(UsoStyleProjectorPatch(model_patch, encoded_image)) + return (model_patched,) + + NODE_CLASS_MAPPINGS = { "ModelPatchLoader": ModelPatchLoader, "QwenImageDiffsynthControlnet": QwenImageDiffsynthControlnet, + "USOStyleReference": USOStyleReference, } From e3018c2a5aeb99f0c5b595621949a451686ce55a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 2 Sep 2025 13:12:07 -0700 Subject: [PATCH 0531/1073] uso -> uxo/uno as requested. (#9688) --- comfy/ldm/flux/model.py | 2 +- comfy_extras/nodes_flux.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index d4be6bb61..8ea7d4f57 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -248,7 +248,7 @@ class Flux(nn.Module): index += 1 h_offset = 0 w_offset = 0 - elif ref_latents_method == "uso": + elif ref_latents_method == "uxo": index = 0 h_offset = h_len * patch_size + h w_offset = w_len * patch_size + w diff --git a/comfy_extras/nodes_flux.py b/comfy_extras/nodes_flux.py index 1bf7ddd92..25e029ffd 100644 --- a/comfy_extras/nodes_flux.py +++ b/comfy_extras/nodes_flux.py @@ -105,7 +105,7 @@ class FluxKontextMultiReferenceLatentMethod: def INPUT_TYPES(s): return {"required": { "conditioning": ("CONDITIONING", ), - "reference_latents_method": (("offset", "index", "uso"), ), + "reference_latents_method": (("offset", "index", "uxo/uno"), ), }} RETURN_TYPES = ("CONDITIONING",) @@ -115,6 +115,8 @@ class FluxKontextMultiReferenceLatentMethod: CATEGORY = "advanced/conditioning/flux" def append(self, conditioning, reference_latents_method): + if "uxo" in reference_latents_method or "uso" in reference_latents_method: + reference_latents_method = "uxo" c = node_helpers.conditioning_set_values(conditioning, {"reference_latents_method": reference_latents_method}) return (c, ) From 464ba1d6140eda6a0173703ac00c69f7fddab6ba Mon Sep 17 00:00:00 2001 From: Deep Roy Date: Tue, 2 Sep 2025 19:41:10 -0400 Subject: [PATCH 0532/1073] Accept prompt_id in interrupt handler (#9607) * Accept prompt_id in interrupt handler * remove a log --- server.py | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/server.py b/server.py index 8f9c88ebf..3d323eaf8 100644 --- a/server.py +++ b/server.py @@ -729,7 +729,34 @@ class PromptServer(): @routes.post("/interrupt") async def post_interrupt(request): - nodes.interrupt_processing() + try: + json_data = await request.json() + except json.JSONDecodeError: + json_data = {} + + # Check if a specific prompt_id was provided for targeted interruption + prompt_id = json_data.get('prompt_id') + if prompt_id: + currently_running, _ = self.prompt_queue.get_current_queue() + + # Check if the prompt_id matches any currently running prompt + should_interrupt = False + for item in currently_running: + # item structure: (number, prompt_id, prompt, extra_data, outputs_to_execute) + if item[1] == prompt_id: + logging.info(f"Interrupting prompt {prompt_id}") + should_interrupt = True + break + + if should_interrupt: + nodes.interrupt_processing() + else: + logging.info(f"Prompt {prompt_id} is not currently running, skipping interrupt") + else: + # No prompt_id provided, do a global interrupt + logging.info("Global interrupt (no prompt_id specified)") + nodes.interrupt_processing() + return web.Response(status=200) @routes.post("/free") From 1bcb469089a71fb1946b9f14e994df1b42b83def Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 2 Sep 2025 17:05:57 -0700 Subject: [PATCH 0533/1073] ImageScaleToMaxDimension node. (#9689) --- comfy_extras/nodes_images.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index fba80e2ae..392aea32c 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -625,6 +625,37 @@ class ImageFlip: return (image,) +class ImageScaleToMaxDimension: + upscale_methods = ["area", "lanczos", "bilinear", "nearest-exact", "bilinear", "bicubic"] + + @classmethod + def INPUT_TYPES(s): + return {"required": {"image": ("IMAGE",), + "upscale_method": (s.upscale_methods,), + "largest_size": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1})}} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "upscale" + + CATEGORY = "image/upscaling" + + def upscale(self, image, upscale_method, largest_size): + height = image.shape[1] + width = image.shape[2] + + if height > width: + width = round((width / height) * largest_size) + height = largest_size + elif width > height: + height = round((height / width) * largest_size) + width = largest_size + else: + height = largest_size + width = largest_size + + samples = image.movedim(-1, 1) + s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled") + s = s.movedim(1, -1) + return (s,) NODE_CLASS_MAPPINGS = { "ImageCrop": ImageCrop, @@ -639,4 +670,5 @@ NODE_CLASS_MAPPINGS = { "GetImageSize": GetImageSize, "ImageRotate": ImageRotate, "ImageFlip": ImageFlip, + "ImageScaleToMaxDimension": ImageScaleToMaxDimension, } From 4f5812b93712e0f52ae8fe80a89e8b5e7d0fa309 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 3 Sep 2025 08:06:41 +0800 Subject: [PATCH 0534/1073] Update template to 0.1.73 (#9686) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7f64aacca..4ebe6cc2a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.25.11 -comfyui-workflow-templates==0.1.70 +comfyui-workflow-templates==0.1.73 comfyui-embedded-docs==0.2.6 torch torchsde From 26d5b86da8ceb4589ee70f12ff2209b93a2d99e0 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 3 Sep 2025 23:17:07 +0300 Subject: [PATCH 0535/1073] feat(api-nodes): add ByteDance Image nodes (#9477) --- comfy_api_nodes/nodes_bytedance.py | 336 +++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 337 insertions(+) create mode 100644 comfy_api_nodes/nodes_bytedance.py diff --git a/comfy_api_nodes/nodes_bytedance.py b/comfy_api_nodes/nodes_bytedance.py new file mode 100644 index 000000000..fb6aba7fa --- /dev/null +++ b/comfy_api_nodes/nodes_bytedance.py @@ -0,0 +1,336 @@ +import logging +from enum import Enum +from typing import Optional +from typing_extensions import override + +import torch +from pydantic import BaseModel, Field + +from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api_nodes.util.validation_utils import ( + validate_image_aspect_ratio_range, + get_number_of_images, +) +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, +) +from comfy_api_nodes.apinode_utils import download_url_to_image_tensor, upload_images_to_comfyapi, validate_string + + +BYTEPLUS_ENDPOINT = "/proxy/byteplus/api/v3/images/generations" + + +class Text2ImageModelName(str, Enum): + seedream3 = "seedream-3-0-t2i-250415" + + +class Image2ImageModelName(str, Enum): + seededit3 = "seededit-3-0-i2i-250628" + + +class Text2ImageTaskCreationRequest(BaseModel): + model: Text2ImageModelName = Text2ImageModelName.seedream3 + prompt: str = Field(...) + response_format: Optional[str] = Field("url") + size: Optional[str] = Field(None) + seed: Optional[int] = Field(0, ge=0, le=2147483647) + guidance_scale: Optional[float] = Field(..., ge=1.0, le=10.0) + watermark: Optional[bool] = Field(True) + + +class Image2ImageTaskCreationRequest(BaseModel): + model: Image2ImageModelName = Image2ImageModelName.seededit3 + prompt: str = Field(...) + response_format: Optional[str] = Field("url") + image: str = Field(..., description="Base64 encoded string or image URL") + size: Optional[str] = Field("adaptive") + seed: Optional[int] = Field(..., ge=0, le=2147483647) + guidance_scale: Optional[float] = Field(..., ge=1.0, le=10.0) + watermark: Optional[bool] = Field(True) + + +class ImageTaskCreationResponse(BaseModel): + model: str = Field(...) + created: int = Field(..., description="Unix timestamp (in seconds) indicating time when the request was created.") + data: list = Field([], description="Contains information about the generated image(s).") + error: dict = Field({}, description="Contains `code` and `message` fields in case of error.") + + +RECOMMENDED_PRESETS = [ + ("1024x1024 (1:1)", 1024, 1024), + ("864x1152 (3:4)", 864, 1152), + ("1152x864 (4:3)", 1152, 864), + ("1280x720 (16:9)", 1280, 720), + ("720x1280 (9:16)", 720, 1280), + ("832x1248 (2:3)", 832, 1248), + ("1248x832 (3:2)", 1248, 832), + ("1512x648 (21:9)", 1512, 648), + ("2048x2048 (1:1)", 2048, 2048), + ("Custom", None, None), +] + + +def get_image_url_from_response(response: ImageTaskCreationResponse) -> str: + if response.error: + error_msg = f"ByteDance request failed. Code: {response.error['code']}, message: {response.error['message']}" + logging.info(error_msg) + raise RuntimeError(error_msg) + logging.info("ByteDance task succeeded, image URL: %s", response.data[0]["url"]) + return response.data[0]["url"] + + +class ByteDanceImageNode(comfy_io.ComfyNode): + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="ByteDanceImageNode", + display_name="ByteDance Image", + category="api node/image/ByteDance", + description="Generate images using ByteDance models via api based on prompt", + inputs=[ + comfy_io.Combo.Input( + "model", + options=[model.value for model in Text2ImageModelName], + default=Text2ImageModelName.seedream3.value, + tooltip="Model name", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + tooltip="The text prompt used to generate the image", + ), + comfy_io.Combo.Input( + "size_preset", + options=[label for label, _, _ in RECOMMENDED_PRESETS], + tooltip="Pick a recommended size. Select Custom to use the width and height below", + ), + comfy_io.Int.Input( + "width", + default=1024, + min=512, + max=2048, + step=64, + tooltip="Custom width for image. Value is working only if `size_preset` is set to `Custom`", + ), + comfy_io.Int.Input( + "height", + default=1024, + min=512, + max=2048, + step=64, + tooltip="Custom height for image. Value is working only if `size_preset` is set to `Custom`", + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed to use for generation", + optional=True, + ), + comfy_io.Float.Input( + "guidance_scale", + default=2.5, + min=1.0, + max=10.0, + step=0.01, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Higher value makes the image follow the prompt more closely", + optional=True, + ), + comfy_io.Boolean.Input( + "watermark", + default=True, + tooltip="Whether to add an \"AI generated\" watermark to the image", + optional=True, + ), + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + prompt: str, + size_preset: str, + width: int, + height: int, + seed: int, + guidance_scale: float, + watermark: bool, + ) -> comfy_io.NodeOutput: + validate_string(prompt, strip_whitespace=True, min_length=1) + w = h = None + for label, tw, th in RECOMMENDED_PRESETS: + if label == size_preset: + w, h = tw, th + break + + if w is None or h is None: + w, h = width, height + if not (512 <= w <= 2048) or not (512 <= h <= 2048): + raise ValueError( + f"Custom size out of range: {w}x{h}. " + "Both width and height must be between 512 and 2048 pixels." + ) + + payload = Text2ImageTaskCreationRequest( + model=model, + prompt=prompt, + size=f"{w}x{h}", + seed=seed, + guidance_scale=guidance_scale, + watermark=watermark, + ) + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + response = await SynchronousOperation( + endpoint=ApiEndpoint( + path=BYTEPLUS_ENDPOINT, + method=HttpMethod.POST, + request_model=Text2ImageTaskCreationRequest, + response_model=ImageTaskCreationResponse, + ), + request=payload, + auth_kwargs=auth_kwargs, + ).execute() + return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) + + +class ByteDanceImageEditNode(comfy_io.ComfyNode): + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="ByteDanceImageEditNode", + display_name="ByteDance Image Edit", + category="api node/video/ByteDance", + description="Edit images using ByteDance models via api based on prompt", + inputs=[ + comfy_io.Combo.Input( + "model", + options=[model.value for model in Image2ImageModelName], + default=Image2ImageModelName.seededit3.value, + tooltip="Model name", + ), + comfy_io.Image.Input( + "image", + tooltip="The base image to edit", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Instruction to edit image", + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed to use for generation", + optional=True, + ), + comfy_io.Float.Input( + "guidance_scale", + default=5.5, + min=1.0, + max=10.0, + step=0.01, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Higher value makes the image follow the prompt more closely", + optional=True, + ), + comfy_io.Boolean.Input( + "watermark", + default=True, + tooltip="Whether to add an \"AI generated\" watermark to the image", + optional=True, + ), + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + image: torch.Tensor, + prompt: str, + seed: int, + guidance_scale: float, + watermark: bool, + ) -> comfy_io.NodeOutput: + validate_string(prompt, strip_whitespace=True, min_length=1) + if get_number_of_images(image) != 1: + raise ValueError("Exactly one input image is required.") + validate_image_aspect_ratio_range(image, (1, 3), (3, 1)) + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + source_url = (await upload_images_to_comfyapi( + image, + max_images=1, + mime_type="image/png", + auth_kwargs=auth_kwargs, + ))[0] + payload = Image2ImageTaskCreationRequest( + model=model, + prompt=prompt, + image=source_url, + seed=seed, + guidance_scale=guidance_scale, + watermark=watermark, + ) + response = await SynchronousOperation( + endpoint=ApiEndpoint( + path=BYTEPLUS_ENDPOINT, + method=HttpMethod.POST, + request_model=Image2ImageTaskCreationRequest, + response_model=ImageTaskCreationResponse, + ), + request=payload, + auth_kwargs=auth_kwargs, + ).execute() + return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) + + +class ByteDanceExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + ByteDanceImageNode, + ByteDanceImageEditNode, + ] + +async def comfy_entrypoint() -> ByteDanceExtension: + return ByteDanceExtension() diff --git a/nodes.py b/nodes.py index 0aff6b14a..6c2f9dd14 100644 --- a/nodes.py +++ b/nodes.py @@ -2344,6 +2344,7 @@ async def init_builtin_api_nodes(): "nodes_veo2.py", "nodes_kling.py", "nodes_bfl.py", + "nodes_bytedance.py", "nodes_luma.py", "nodes_recraft.py", "nodes_pixverse.py", From 50333f1715c03aa4100711eb6d075516a4021d24 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 3 Sep 2025 23:17:37 +0300 Subject: [PATCH 0536/1073] api nodes(Ideogram): add Ideogram Character (#9616) * api nodes(Ideogram): add Ideogram Character * rename renderingSpeed default value from 'balanced' to 'default' --- comfy_api_nodes/apis/__init__.py | 22 ++++++++- comfy_api_nodes/nodes_ideogram.py | 77 ++++++++++++++++++++++++++++--- 2 files changed, 91 insertions(+), 8 deletions(-) diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index 7a09df55b..78a23db30 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -951,7 +951,11 @@ class MagicPrompt2(str, Enum): class StyleType1(str, Enum): + AUTO = 'AUTO' GENERAL = 'GENERAL' + REALISTIC = 'REALISTIC' + DESIGN = 'DESIGN' + FICTION = 'FICTION' class ImagenImageGenerationInstance(BaseModel): @@ -2676,7 +2680,7 @@ class ReleaseNote(BaseModel): class RenderingSpeed(str, Enum): - BALANCED = 'BALANCED' + DEFAULT = 'DEFAULT' TURBO = 'TURBO' QUALITY = 'QUALITY' @@ -4918,6 +4922,14 @@ class IdeogramV3EditRequest(BaseModel): None, description='A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format.', ) + character_reference_images: Optional[List[str]] = Field( + None, + description='Generations with character reference are subject to the character reference pricing. A set of images to use as character references (maximum total size 10MB across all character references), currently only supports 1 character reference image. The images should be in JPEG, PNG or WebP format.' + ) + character_reference_images_mask: Optional[List[str]] = Field( + None, + description='Optional masks for character reference images. When provided, must match the number of character_reference_images. Each mask should be a grayscale image of the same dimensions as the corresponding character reference image. The images should be in JPEG, PNG or WebP format.' + ) class IdeogramV3Request(BaseModel): @@ -4951,6 +4963,14 @@ class IdeogramV3Request(BaseModel): style_type: Optional[StyleType1] = Field( None, description='The type of style to apply' ) + character_reference_images: Optional[List[str]] = Field( + None, + description='Generations with character reference are subject to the character reference pricing. A set of images to use as character references (maximum total size 10MB across all character references), currently only supports 1 character reference image. The images should be in JPEG, PNG or WebP format.' + ) + character_reference_images_mask: Optional[List[str]] = Field( + None, + description='Optional masks for character reference images. When provided, must match the number of character_reference_images. Each mask should be a grayscale image of the same dimensions as the corresponding character reference image. The images should be in JPEG, PNG or WebP format.' + ) class ImagenGenerateImageResponse(BaseModel): diff --git a/comfy_api_nodes/nodes_ideogram.py b/comfy_api_nodes/nodes_ideogram.py index d28895f3e..2d1c32e4f 100644 --- a/comfy_api_nodes/nodes_ideogram.py +++ b/comfy_api_nodes/nodes_ideogram.py @@ -255,6 +255,7 @@ class IdeogramV1(comfy_io.ComfyNode): display_name="Ideogram V1", category="api node/image/Ideogram", description="Generates images using the Ideogram V1 model.", + is_api_node=True, inputs=[ comfy_io.String.Input( "prompt", @@ -383,6 +384,7 @@ class IdeogramV2(comfy_io.ComfyNode): display_name="Ideogram V2", category="api node/image/Ideogram", description="Generates images using the Ideogram V2 model.", + is_api_node=True, inputs=[ comfy_io.String.Input( "prompt", @@ -552,6 +554,7 @@ class IdeogramV3(comfy_io.ComfyNode): category="api node/image/Ideogram", description="Generates images using the Ideogram V3 model. " "Supports both regular image generation from text prompts and image editing with mask.", + is_api_node=True, inputs=[ comfy_io.String.Input( "prompt", @@ -612,11 +615,21 @@ class IdeogramV3(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "rendering_speed", - options=["BALANCED", "TURBO", "QUALITY"], - default="BALANCED", + options=["DEFAULT", "TURBO", "QUALITY"], + default="DEFAULT", tooltip="Controls the trade-off between generation speed and quality", optional=True, ), + comfy_io.Image.Input( + "character_image", + tooltip="Image to use as character reference.", + optional=True, + ), + comfy_io.Mask.Input( + "character_mask", + tooltip="Optional mask for character reference image.", + optional=True, + ), ], outputs=[ comfy_io.Image.Output(), @@ -639,12 +652,46 @@ class IdeogramV3(comfy_io.ComfyNode): magic_prompt_option="AUTO", seed=0, num_images=1, - rendering_speed="BALANCED", + rendering_speed="DEFAULT", + character_image=None, + character_mask=None, ): auth = { "auth_token": cls.hidden.auth_token_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org, } + if rendering_speed == "BALANCED": # for backward compatibility + rendering_speed = "DEFAULT" + + character_img_binary = None + character_mask_binary = None + + if character_image is not None: + input_tensor = character_image.squeeze().cpu() + if character_mask is not None: + character_mask = resize_mask_to_image(character_mask, character_image, allow_gradient=False) + character_mask = 1.0 - character_mask + if character_mask.shape[1:] != character_image.shape[1:-1]: + raise Exception("Character mask and image must be the same size") + + mask_np = (character_mask.squeeze().cpu().numpy() * 255).astype(np.uint8) + mask_img = Image.fromarray(mask_np) + mask_byte_arr = BytesIO() + mask_img.save(mask_byte_arr, format="PNG") + mask_byte_arr.seek(0) + character_mask_binary = mask_byte_arr + character_mask_binary.name = "mask.png" + + img_np = (input_tensor.numpy() * 255).astype(np.uint8) + img = Image.fromarray(img_np) + img_byte_arr = BytesIO() + img.save(img_byte_arr, format="PNG") + img_byte_arr.seek(0) + character_img_binary = img_byte_arr + character_img_binary.name = "image.png" + elif character_mask is not None: + raise Exception("Character mask requires character image to be present") + # Check if both image and mask are provided for editing mode if image is not None and mask is not None: # Edit mode @@ -693,6 +740,15 @@ class IdeogramV3(comfy_io.ComfyNode): if num_images > 1: edit_request.num_images = num_images + files = { + "image": img_binary, + "mask": mask_binary, + } + if character_img_binary: + files["character_reference_images"] = character_img_binary + if character_mask_binary: + files["character_mask_binary"] = character_mask_binary + # Execute the operation for edit mode operation = SynchronousOperation( endpoint=ApiEndpoint( @@ -702,10 +758,7 @@ class IdeogramV3(comfy_io.ComfyNode): response_model=IdeogramGenerateResponse, ), request=edit_request, - files={ - "image": img_binary, - "mask": mask_binary, - }, + files=files, content_type="multipart/form-data", auth_kwargs=auth, ) @@ -739,6 +792,14 @@ class IdeogramV3(comfy_io.ComfyNode): if num_images > 1: gen_request.num_images = num_images + files = {} + if character_img_binary: + files["character_reference_images"] = character_img_binary + if character_mask_binary: + files["character_mask_binary"] = character_mask_binary + if files: + gen_request.style_type = "AUTO" + # Execute the operation for generation mode operation = SynchronousOperation( endpoint=ApiEndpoint( @@ -748,6 +809,8 @@ class IdeogramV3(comfy_io.ComfyNode): response_model=IdeogramGenerateResponse, ), request=gen_request, + files=files if files else None, + content_type="multipart/form-data", auth_kwargs=auth, ) From 22da0a83e9a251ca16b9753bf808bfa9f4b023d8 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 3 Sep 2025 23:18:27 +0300 Subject: [PATCH 0537/1073] [V3] convert Runway API nodes to the V3 schema (#9487) * convert RunAway API nodes to the V3 schema * fixed small typo * fix: add tooltip for "seed" input --- comfy_api_nodes/nodes_runway.py | 744 +++++++++++++++----------------- 1 file changed, 357 insertions(+), 387 deletions(-) diff --git a/comfy_api_nodes/nodes_runway.py b/comfy_api_nodes/nodes_runway.py index 98024a9fa..27b2bf748 100644 --- a/comfy_api_nodes/nodes_runway.py +++ b/comfy_api_nodes/nodes_runway.py @@ -12,6 +12,7 @@ User Guides: """ from typing import Union, Optional, Any +from typing_extensions import override from enum import Enum import torch @@ -46,9 +47,9 @@ from comfy_api_nodes.apinode_utils import ( validate_string, download_url_to_image_tensor, ) -from comfy_api_nodes.mapper_utils import model_field_to_node_input from comfy_api.input_impl import VideoFromFile -from comfy.comfy_types.node_typing import IO, ComfyNodeABC +from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api_nodes.util.validation_utils import validate_image_dimensions, validate_image_aspect_ratio PATH_IMAGE_TO_VIDEO = "/proxy/runway/image_to_video" PATH_TEXT_TO_IMAGE = "/proxy/runway/text_to_image" @@ -85,20 +86,11 @@ class RunwayGen3aAspectRatio(str, Enum): def get_video_url_from_task_status(response: TaskStatusResponse) -> Union[str, None]: """Returns the video URL from the task status response if it exists.""" - if response.output and len(response.output) > 0: + if hasattr(response, "output") and len(response.output) > 0: return response.output[0] return None -# TODO: replace with updated image validation utils (upstream) -def validate_input_image(image: torch.Tensor) -> bool: - """ - Validate the input image is within the size limits for the Runway API. - See: https://docs.dev.runwayml.com/assets/inputs/#common-error-reasons - """ - return image.shape[2] < 8000 and image.shape[1] < 8000 - - async def poll_until_finished( auth_kwargs: dict[str, str], api_endpoint: ApiEndpoint[Any, TaskStatusResponse], @@ -134,458 +126,438 @@ def extract_progress_from_task_status( def get_image_url_from_task_status(response: TaskStatusResponse) -> Union[str, None]: """Returns the image URL from the task status response if it exists.""" - if response.output and len(response.output) > 0: + if hasattr(response, "output") and len(response.output) > 0: return response.output[0] return None -class RunwayVideoGenNode(ComfyNodeABC): - """Runway Video Node Base.""" +async def get_response( + task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None, estimated_duration: Optional[int] = None +) -> TaskStatusResponse: + """Poll the task status until it is finished then get the response.""" + return await poll_until_finished( + auth_kwargs, + ApiEndpoint( + path=f"{PATH_GET_TASK_STATUS}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=TaskStatusResponse, + ), + estimated_duration=estimated_duration, + node_id=node_id, + ) - RETURN_TYPES = ("VIDEO",) - FUNCTION = "api_call" - CATEGORY = "api node/video/Runway" - API_NODE = True - def validate_task_created(self, response: RunwayImageToVideoResponse) -> bool: - """ - Validate the task creation response from the Runway API matches - expected format. - """ - if not bool(response.id): - raise RunwayApiError("Invalid initial response from Runway API.") - return True +async def generate_video( + request: RunwayImageToVideoRequest, + auth_kwargs: dict[str, str], + node_id: Optional[str] = None, + estimated_duration: Optional[int] = None, +) -> VideoFromFile: + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_IMAGE_TO_VIDEO, + method=HttpMethod.POST, + request_model=RunwayImageToVideoRequest, + response_model=RunwayImageToVideoResponse, + ), + request=request, + auth_kwargs=auth_kwargs, + ) - def validate_response(self, response: RunwayImageToVideoResponse) -> bool: - """ - Validate the successful task status response from the Runway API - matches expected format. - """ - if not response.output or len(response.output) == 0: - raise RunwayApiError( - "Runway task succeeded but no video data found in response." - ) - return True + initial_response = await initial_operation.execute() - async def get_response( - self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None - ) -> RunwayImageToVideoResponse: - """Poll the task status until it is finished then get the response.""" - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_GET_TASK_STATUS}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=TaskStatusResponse, - ), - estimated_duration=AVERAGE_DURATION_FLF_SECONDS, - node_id=node_id, + final_response = await get_response(initial_response.id, auth_kwargs, node_id, estimated_duration) + if not final_response.output: + raise RunwayApiError("Runway task succeeded but no video data found in response.") + + video_url = get_video_url_from_task_status(final_response) + return await download_url_to_video_output(video_url) + + +class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode): + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="RunwayImageToVideoNodeGen3a", + display_name="Runway Image to Video (Gen3a Turbo)", + category="api node/video/Runway", + description="Generate a video from a single starting frame using Gen3a Turbo model. " + "Before diving in, review these best practices to ensure that " + "your input selections will set your generation up for success: " + "https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.", + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Text prompt for the generation", + ), + comfy_io.Image.Input( + "start_frame", + tooltip="Start frame to be used for the video", + ), + comfy_io.Combo.Input( + "duration", + options=[model.value for model in Duration], + ), + comfy_io.Combo.Input( + "ratio", + options=[model.value for model in RunwayGen3aAspectRatio], + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=4294967295, + step=1, + control_after_generate=True, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Random seed for generation", + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, ) - async def generate_video( - self, - request: RunwayImageToVideoRequest, - auth_kwargs: dict[str, str], - node_id: Optional[str] = None, - ) -> tuple[VideoFromFile]: - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_IMAGE_TO_VIDEO, - method=HttpMethod.POST, - request_model=RunwayImageToVideoRequest, - response_model=RunwayImageToVideoResponse, - ), - request=request, + @classmethod + async def execute( + cls, + prompt: str, + start_frame: torch.Tensor, + duration: str, + ratio: str, + seed: int, + ) -> comfy_io.NodeOutput: + validate_string(prompt, min_length=1) + validate_image_dimensions(start_frame, max_width=7999, max_height=7999) + validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) + + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + + download_urls = await upload_images_to_comfyapi( + start_frame, + max_images=1, + mime_type="image/png", auth_kwargs=auth_kwargs, ) - initial_response = await initial_operation.execute() - self.validate_task_created(initial_response) - task_id = initial_response.id - - final_response = await self.get_response(task_id, auth_kwargs, node_id) - self.validate_response(final_response) - - video_url = get_video_url_from_task_status(final_response) - return (await download_url_to_video_output(video_url),) + return comfy_io.NodeOutput( + await generate_video( + RunwayImageToVideoRequest( + promptText=prompt, + seed=seed, + model=Model("gen3a_turbo"), + duration=Duration(duration), + ratio=AspectRatio(ratio), + promptImage=RunwayPromptImageObject( + root=[ + RunwayPromptImageDetailedObject( + uri=str(download_urls[0]), position="first" + ) + ] + ), + ), + auth_kwargs=auth_kwargs, + node_id=cls.hidden.unique_id, + ) + ) -class RunwayImageToVideoNodeGen3a(RunwayVideoGenNode): - """Runway Image to Video Node using Gen3a Turbo model.""" - - DESCRIPTION = "Generate a video from a single starting frame using Gen3a Turbo model. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo." +class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": model_field_to_node_input( - IO.STRING, RunwayImageToVideoRequest, "promptText", multiline=True + def define_schema(cls): + return comfy_io.Schema( + node_id="RunwayImageToVideoNodeGen4", + display_name="Runway Image to Video (Gen4 Turbo)", + category="api node/video/Runway", + description="Generate a video from a single starting frame using Gen4 Turbo model. " + "Before diving in, review these best practices to ensure that " + "your input selections will set your generation up for success: " + "https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.", + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Text prompt for the generation", ), - "start_frame": ( - IO.IMAGE, - {"tooltip": "Start frame to be used for the video"}, + comfy_io.Image.Input( + "start_frame", + tooltip="Start frame to be used for the video", ), - "duration": model_field_to_node_input( - IO.COMBO, RunwayImageToVideoRequest, "duration", enum_type=Duration + comfy_io.Combo.Input( + "duration", + options=[model.value for model in Duration], ), - "ratio": model_field_to_node_input( - IO.COMBO, - RunwayImageToVideoRequest, + comfy_io.Combo.Input( "ratio", - enum_type=RunwayGen3aAspectRatio, + options=[model.value for model in RunwayGen4TurboAspectRatio], ), - "seed": model_field_to_node_input( - IO.INT, - RunwayImageToVideoRequest, + comfy_io.Int.Input( "seed", + default=0, + min=0, + max=4294967295, + step=1, control_after_generate=True, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Random seed for generation", ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - async def api_call( - self, + @classmethod + async def execute( + cls, prompt: str, start_frame: torch.Tensor, duration: str, ratio: str, seed: int, - unique_id: Optional[str] = None, - **kwargs, - ) -> tuple[VideoFromFile]: - # Validate inputs + ) -> comfy_io.NodeOutput: validate_string(prompt, min_length=1) - validate_input_image(start_frame) + validate_image_dimensions(start_frame, max_width=7999, max_height=7999) + validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) + + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } - # Upload image download_urls = await upload_images_to_comfyapi( start_frame, max_images=1, mime_type="image/png", - auth_kwargs=kwargs, + auth_kwargs=auth_kwargs, ) - if len(download_urls) != 1: - raise RunwayApiError("Failed to upload one or more images to comfy api.") - return await self.generate_video( - RunwayImageToVideoRequest( - promptText=prompt, - seed=seed, - model=Model("gen3a_turbo"), - duration=Duration(duration), - ratio=AspectRatio(ratio), - promptImage=RunwayPromptImageObject( - root=[ - RunwayPromptImageDetailedObject( - uri=str(download_urls[0]), position="first" - ) - ] + return comfy_io.NodeOutput( + await generate_video( + RunwayImageToVideoRequest( + promptText=prompt, + seed=seed, + model=Model("gen4_turbo"), + duration=Duration(duration), + ratio=AspectRatio(ratio), + promptImage=RunwayPromptImageObject( + root=[ + RunwayPromptImageDetailedObject( + uri=str(download_urls[0]), position="first" + ) + ] + ), ), - ), - auth_kwargs=kwargs, - node_id=unique_id, + auth_kwargs=auth_kwargs, + node_id=cls.hidden.unique_id, + estimated_duration=AVERAGE_DURATION_FLF_SECONDS, + ) ) -class RunwayImageToVideoNodeGen4(RunwayVideoGenNode): - """Runway Image to Video Node using Gen4 Turbo model.""" - - DESCRIPTION = "Generate a video from a single starting frame using Gen4 Turbo model. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video." +class RunwayFirstLastFrameNode(comfy_io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": model_field_to_node_input( - IO.STRING, RunwayImageToVideoRequest, "promptText", multiline=True + def define_schema(cls): + return comfy_io.Schema( + node_id="RunwayFirstLastFrameNode", + display_name="Runway First-Last-Frame to Video", + category="api node/video/Runway", + description="Upload first and last keyframes, draft a prompt, and generate a video. " + "More complex transitions, such as cases where the Last frame is completely different " + "from the First frame, may benefit from the longer 10s duration. " + "This would give the generation more time to smoothly transition between the two inputs. " + "Before diving in, review these best practices to ensure that your input selections " + "will set your generation up for success: " + "https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.", + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Text prompt for the generation", ), - "start_frame": ( - IO.IMAGE, - {"tooltip": "Start frame to be used for the video"}, + comfy_io.Image.Input( + "start_frame", + tooltip="Start frame to be used for the video", ), - "duration": model_field_to_node_input( - IO.COMBO, RunwayImageToVideoRequest, "duration", enum_type=Duration + comfy_io.Image.Input( + "end_frame", + tooltip="End frame to be used for the video. Supported for gen3a_turbo only.", ), - "ratio": model_field_to_node_input( - IO.COMBO, - RunwayImageToVideoRequest, + comfy_io.Combo.Input( + "duration", + options=[model.value for model in Duration], + ), + comfy_io.Combo.Input( "ratio", - enum_type=RunwayGen4TurboAspectRatio, + options=[model.value for model in RunwayGen3aAspectRatio], ), - "seed": model_field_to_node_input( - IO.INT, - RunwayImageToVideoRequest, + comfy_io.Int.Input( "seed", + default=0, + min=0, + max=4294967295, + step=1, control_after_generate=True, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Random seed for generation", ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - async def api_call( - self, - prompt: str, - start_frame: torch.Tensor, - duration: str, - ratio: str, - seed: int, - unique_id: Optional[str] = None, - **kwargs, - ) -> tuple[VideoFromFile]: - # Validate inputs - validate_string(prompt, min_length=1) - validate_input_image(start_frame) - - # Upload image - download_urls = await upload_images_to_comfyapi( - start_frame, - max_images=1, - mime_type="image/png", - auth_kwargs=kwargs, - ) - if len(download_urls) != 1: - raise RunwayApiError("Failed to upload one or more images to comfy api.") - - return await self.generate_video( - RunwayImageToVideoRequest( - promptText=prompt, - seed=seed, - model=Model("gen4_turbo"), - duration=Duration(duration), - ratio=AspectRatio(ratio), - promptImage=RunwayPromptImageObject( - root=[ - RunwayPromptImageDetailedObject( - uri=str(download_urls[0]), position="first" - ) - ] - ), - ), - auth_kwargs=kwargs, - node_id=unique_id, - ) - - -class RunwayFirstLastFrameNode(RunwayVideoGenNode): - """Runway First-Last Frame Node.""" - - DESCRIPTION = "Upload first and last keyframes, draft a prompt, and generate a video. More complex transitions, such as cases where the Last frame is completely different from the First frame, may benefit from the longer 10s duration. This would give the generation more time to smoothly transition between the two inputs. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3." - - async def get_response( - self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None - ) -> RunwayImageToVideoResponse: - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_GET_TASK_STATUS}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=TaskStatusResponse, - ), - estimated_duration=AVERAGE_DURATION_FLF_SECONDS, - node_id=node_id, + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": model_field_to_node_input( - IO.STRING, RunwayImageToVideoRequest, "promptText", multiline=True - ), - "start_frame": ( - IO.IMAGE, - {"tooltip": "Start frame to be used for the video"}, - ), - "end_frame": ( - IO.IMAGE, - { - "tooltip": "End frame to be used for the video. Supported for gen3a_turbo only." - }, - ), - "duration": model_field_to_node_input( - IO.COMBO, RunwayImageToVideoRequest, "duration", enum_type=Duration - ), - "ratio": model_field_to_node_input( - IO.COMBO, - RunwayImageToVideoRequest, - "ratio", - enum_type=RunwayGen3aAspectRatio, - ), - "seed": model_field_to_node_input( - IO.INT, - RunwayImageToVideoRequest, - "seed", - control_after_generate=True, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "unique_id": "UNIQUE_ID", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } - - async def api_call( - self, + async def execute( + cls, prompt: str, start_frame: torch.Tensor, end_frame: torch.Tensor, duration: str, ratio: str, seed: int, - unique_id: Optional[str] = None, - **kwargs, - ) -> tuple[VideoFromFile]: - # Validate inputs + ) -> comfy_io.NodeOutput: validate_string(prompt, min_length=1) - validate_input_image(start_frame) - validate_input_image(end_frame) + validate_image_dimensions(start_frame, max_width=7999, max_height=7999) + validate_image_dimensions(end_frame, max_width=7999, max_height=7999) + validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) + validate_image_aspect_ratio(end_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) + + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } - # Upload images stacked_input_images = image_tensor_pair_to_batch(start_frame, end_frame) download_urls = await upload_images_to_comfyapi( stacked_input_images, max_images=2, mime_type="image/png", - auth_kwargs=kwargs, + auth_kwargs=auth_kwargs, ) if len(download_urls) != 2: raise RunwayApiError("Failed to upload one or more images to comfy api.") - return await self.generate_video( - RunwayImageToVideoRequest( - promptText=prompt, - seed=seed, - model=Model("gen3a_turbo"), - duration=Duration(duration), - ratio=AspectRatio(ratio), - promptImage=RunwayPromptImageObject( - root=[ - RunwayPromptImageDetailedObject( - uri=str(download_urls[0]), position="first" - ), - RunwayPromptImageDetailedObject( - uri=str(download_urls[1]), position="last" - ), - ] + return comfy_io.NodeOutput( + await generate_video( + RunwayImageToVideoRequest( + promptText=prompt, + seed=seed, + model=Model("gen3a_turbo"), + duration=Duration(duration), + ratio=AspectRatio(ratio), + promptImage=RunwayPromptImageObject( + root=[ + RunwayPromptImageDetailedObject( + uri=str(download_urls[0]), position="first" + ), + RunwayPromptImageDetailedObject( + uri=str(download_urls[1]), position="last" + ), + ] + ), ), - ), - auth_kwargs=kwargs, - node_id=unique_id, + auth_kwargs=auth_kwargs, + node_id=cls.hidden.unique_id, + estimated_duration=AVERAGE_DURATION_FLF_SECONDS, + ) ) -class RunwayTextToImageNode(ComfyNodeABC): - """Runway Text to Image Node.""" - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "api_call" - CATEGORY = "api node/image/Runway" - API_NODE = True - DESCRIPTION = "Generate an image from a text prompt using Runway's Gen 4 model. You can also include reference images to guide the generation." +class RunwayTextToImageNode(comfy_io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": model_field_to_node_input( - IO.STRING, RunwayTextToImageRequest, "promptText", multiline=True + def define_schema(cls): + return comfy_io.Schema( + node_id="RunwayTextToImageNode", + display_name="Runway Text to Image", + category="api node/image/Runway", + description="Generate an image from a text prompt using Runway's Gen 4 model. " + "You can also include reference image to guide the generation.", + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Text prompt for the generation", ), - "ratio": model_field_to_node_input( - IO.COMBO, - RunwayTextToImageRequest, + comfy_io.Combo.Input( "ratio", - enum_type=RunwayTextToImageAspectRatioEnum, + options=[model.value for model in RunwayTextToImageAspectRatioEnum], ), - }, - "optional": { - "reference_image": ( - IO.IMAGE, - {"tooltip": "Optional reference image to guide the generation"}, - ) - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - def validate_task_created(self, response: RunwayTextToImageResponse) -> bool: - """ - Validate the task creation response from the Runway API matches - expected format. - """ - if not bool(response.id): - raise RunwayApiError("Invalid initial response from Runway API.") - return True - - def validate_response(self, response: TaskStatusResponse) -> bool: - """ - Validate the successful task status response from the Runway API - matches expected format. - """ - if not response.output or len(response.output) == 0: - raise RunwayApiError( - "Runway task succeeded but no image data found in response." - ) - return True - - async def get_response( - self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None - ) -> TaskStatusResponse: - """Poll the task status until it is finished then get the response.""" - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_GET_TASK_STATUS}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=TaskStatusResponse, - ), - estimated_duration=AVERAGE_DURATION_T2I_SECONDS, - node_id=node_id, + comfy_io.Image.Input( + "reference_image", + tooltip="Optional reference image to guide the generation", + optional=True, + ), + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, ) - async def api_call( - self, + @classmethod + async def execute( + cls, prompt: str, ratio: str, reference_image: Optional[torch.Tensor] = None, - unique_id: Optional[str] = None, - **kwargs, - ) -> tuple[torch.Tensor]: - # Validate inputs + ) -> comfy_io.NodeOutput: validate_string(prompt, min_length=1) + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + # Prepare reference images if provided reference_images = None if reference_image is not None: - validate_input_image(reference_image) + validate_image_dimensions(reference_image, max_width=7999, max_height=7999) + validate_image_aspect_ratio(reference_image, min_aspect_ratio=0.5, max_aspect_ratio=2.0) download_urls = await upload_images_to_comfyapi( reference_image, max_images=1, mime_type="image/png", - auth_kwargs=kwargs, + auth_kwargs=auth_kwargs, ) - if len(download_urls) != 1: - raise RunwayApiError("Failed to upload reference image to comfy api.") - reference_images = [ReferenceImage(uri=str(download_urls[0]))] - # Create request request = RunwayTextToImageRequest( promptText=prompt, model=Model4.gen4_image, @@ -593,7 +565,6 @@ class RunwayTextToImageNode(ComfyNodeABC): referenceImages=reference_images, ) - # Execute initial request initial_operation = SynchronousOperation( endpoint=ApiEndpoint( path=PATH_TEXT_TO_IMAGE, @@ -602,34 +573,33 @@ class RunwayTextToImageNode(ComfyNodeABC): response_model=RunwayTextToImageResponse, ), request=request, - auth_kwargs=kwargs, + auth_kwargs=auth_kwargs, ) initial_response = await initial_operation.execute() - self.validate_task_created(initial_response) - task_id = initial_response.id # Poll for completion - final_response = await self.get_response( - task_id, auth_kwargs=kwargs, node_id=unique_id + final_response = await get_response( + initial_response.id, + auth_kwargs=auth_kwargs, + node_id=cls.hidden.unique_id, + estimated_duration=AVERAGE_DURATION_T2I_SECONDS, ) - self.validate_response(final_response) + if not final_response.output: + raise RunwayApiError("Runway task succeeded but no image data found in response.") - # Download and return image - image_url = get_image_url_from_task_status(final_response) - return (await download_url_to_image_tensor(image_url),) + return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_task_status(final_response))) -NODE_CLASS_MAPPINGS = { - "RunwayFirstLastFrameNode": RunwayFirstLastFrameNode, - "RunwayImageToVideoNodeGen3a": RunwayImageToVideoNodeGen3a, - "RunwayImageToVideoNodeGen4": RunwayImageToVideoNodeGen4, - "RunwayTextToImageNode": RunwayTextToImageNode, -} +class RunwayExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + RunwayFirstLastFrameNode, + RunwayImageToVideoNodeGen3a, + RunwayImageToVideoNodeGen4, + RunwayTextToImageNode, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "RunwayFirstLastFrameNode": "Runway First-Last-Frame to Video", - "RunwayImageToVideoNodeGen3a": "Runway Image to Video (Gen3a Turbo)", - "RunwayImageToVideoNodeGen4": "Runway Image to Video (Gen4 Turbo)", - "RunwayTextToImageNode": "Runway Text to Image", -} +async def comfy_entrypoint() -> RunwayExtension: + return RunwayExtension() From 4368d8f87f580f526e8b2bc43bf0ac88e2b67033 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 3 Sep 2025 15:43:29 -0700 Subject: [PATCH 0538/1073] Update comment in api example. (#9708) --- script_examples/basic_api_example.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/script_examples/basic_api_example.py b/script_examples/basic_api_example.py index 9128420c4..7e20cc2c1 100644 --- a/script_examples/basic_api_example.py +++ b/script_examples/basic_api_example.py @@ -3,11 +3,7 @@ from urllib import request #This is the ComfyUI api prompt format. -#If you want it for a specific workflow you can "enable dev mode options" -#in the settings of the UI (gear beside the "Queue Size: ") this will enable -#a button on the UI to save workflows in api format. - -#keep in mind ComfyUI is pre alpha software so this format will change a bit. +#If you want it for a specific workflow you can "File -> Export (API)" in the interface. #this is the one for the default workflow prompt_text = """ From f48d05a2d17fe1a69e08fbabfb080e3779b36225 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 4 Sep 2025 04:21:38 +0300 Subject: [PATCH 0539/1073] convert AlignYourStepsScheduler node to V3 schema (#9226) --- comfy_extras/nodes_align_your_steps.py | 50 +++++++++++++++++--------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/comfy_extras/nodes_align_your_steps.py b/comfy_extras/nodes_align_your_steps.py index 8d856d0e8..edd5dadd4 100644 --- a/comfy_extras/nodes_align_your_steps.py +++ b/comfy_extras/nodes_align_your_steps.py @@ -1,6 +1,10 @@ #from: https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html import numpy as np import torch +from typing_extensions import override + +from comfy_api.latest import ComfyExtension, io + def loglinear_interp(t_steps, num_steps): """ @@ -19,25 +23,30 @@ NOISE_LEVELS = {"SD1": [14.6146412293, 6.4745760956, 3.8636745985, 2.694615152 "SDXL":[14.6146412293, 6.3184485287, 3.7681790315, 2.1811480769, 1.3405244945, 0.8620721141, 0.5550693289, 0.3798540708, 0.2332364134, 0.1114188177, 0.0291671582], "SVD": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002]} -class AlignYourStepsScheduler: +class AlignYourStepsScheduler(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"model_type": (["SD1", "SDXL", "SVD"], ), - "steps": ("INT", {"default": 10, "min": 1, "max": 10000}), - "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/schedulers" - - FUNCTION = "get_sigmas" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="AlignYourStepsScheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Combo.Input("model_type", options=["SD1", "SDXL", "SVD"]), + io.Int.Input("steps", default=10, min=1, max=10000), + io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01), + ], + outputs=[io.Sigmas.Output()], + ) def get_sigmas(self, model_type, steps, denoise): + # Deprecated: use the V3 schema's `execute` method instead of this. + return AlignYourStepsScheduler().execute(model_type, steps, denoise).result + + @classmethod + def execute(cls, model_type, steps, denoise) -> io.NodeOutput: total_steps = steps if denoise < 1.0: if denoise <= 0.0: - return (torch.FloatTensor([]),) + return io.NodeOutput(torch.FloatTensor([])) total_steps = round(steps * denoise) sigmas = NOISE_LEVELS[model_type][:] @@ -46,8 +55,15 @@ class AlignYourStepsScheduler: sigmas = sigmas[-(total_steps + 1):] sigmas[-1] = 0 - return (torch.FloatTensor(sigmas), ) + return io.NodeOutput(torch.FloatTensor(sigmas)) -NODE_CLASS_MAPPINGS = { - "AlignYourStepsScheduler": AlignYourStepsScheduler, -} + +class AlignYourStepsExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + AlignYourStepsScheduler, + ] + +async def comfy_entrypoint() -> AlignYourStepsExtension: + return AlignYourStepsExtension() From 72855db715096bc378817b1aaffcf232fdc39659 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 3 Sep 2025 19:20:13 -0700 Subject: [PATCH 0540/1073] Fix potential rope issue. (#9710) --- comfy/ldm/audio/dit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/audio/dit.py b/comfy/ldm/audio/dit.py index 179c5b67e..d0d69bbdc 100644 --- a/comfy/ldm/audio/dit.py +++ b/comfy/ldm/audio/dit.py @@ -632,7 +632,7 @@ class ContinuousTransformer(nn.Module): # Attention layers if self.rotary_pos_emb is not None: - rotary_pos_emb = self.rotary_pos_emb.forward_from_seq_len(x.shape[1], dtype=x.dtype, device=x.device) + rotary_pos_emb = self.rotary_pos_emb.forward_from_seq_len(x.shape[1], dtype=torch.float, device=x.device) else: rotary_pos_emb = None From b71f9bcb7143b8cd4fff627bb91b60739c915d4c Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Thu, 4 Sep 2025 14:14:02 +0800 Subject: [PATCH 0541/1073] Update template to 0.1.75 (#9711) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4ebe6cc2a..3008a5dc3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.25.11 -comfyui-workflow-templates==0.1.73 +comfyui-workflow-templates==0.1.75 comfyui-embedded-docs==0.2.6 torch torchsde From b0338e930bbc1f9d01f005f224573d5994732932 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 4 Sep 2025 02:15:57 -0400 Subject: [PATCH 0542/1073] ComfyUI 0.3.57 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index e8e039373..4cc3c8647 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.56" +__version__ = "0.3.57" diff --git a/pyproject.toml b/pyproject.toml index cfd5d45ef..d75cd04a2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.56" +version = "0.3.57" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From a9f1bb10a52ce08a3f21e6fc562554671c85c3d5 Mon Sep 17 00:00:00 2001 From: guill Date: Thu, 4 Sep 2025 16:13:28 -0700 Subject: [PATCH 0543/1073] Fix progress update crossover between users (#9706) * Fix showing progress from other sessions Because `client_id` was missing from ths `progress_state` message, it was being sent to all connected sessions. This technically meant that if someone had a graph with the same nodes, they would see the progress updates for others. Also added a test to prevent reoccurance and moved the tests around to make CI easier to hook up. * Fix CI issues related to timing-sensitive tests --- .github/workflows/test-execution.yml | 30 +++ comfy_execution/progress.py | 3 +- tests/conftest.py | 6 + .../extra_model_paths.yaml | 0 .../test_async_nodes.py | 28 ++- .../test_execution.py | 13 +- tests/execution/test_progress_isolation.py | 233 ++++++++++++++++++ .../testing_nodes/testing-pack/__init__.py | 0 .../testing-pack/api_test_nodes.py | 0 .../testing-pack/async_test_nodes.py | 0 .../testing_nodes/testing-pack/conditions.py | 0 .../testing-pack/flow_control.py | 0 .../testing-pack/specific_tests.py | 0 .../testing_nodes/testing-pack/stubs.py | 0 .../testing_nodes/testing-pack/tools.py | 0 .../testing_nodes/testing-pack/util.py | 0 16 files changed, 295 insertions(+), 18 deletions(-) create mode 100644 .github/workflows/test-execution.yml rename tests/{inference => execution}/extra_model_paths.yaml (100%) rename tests/{inference => execution}/test_async_nodes.py (95%) rename tests/{inference => execution}/test_execution.py (98%) create mode 100644 tests/execution/test_progress_isolation.py rename tests/{inference => execution}/testing_nodes/testing-pack/__init__.py (100%) rename tests/{inference => execution}/testing_nodes/testing-pack/api_test_nodes.py (100%) rename tests/{inference => execution}/testing_nodes/testing-pack/async_test_nodes.py (100%) rename tests/{inference => execution}/testing_nodes/testing-pack/conditions.py (100%) rename tests/{inference => execution}/testing_nodes/testing-pack/flow_control.py (100%) rename tests/{inference => execution}/testing_nodes/testing-pack/specific_tests.py (100%) rename tests/{inference => execution}/testing_nodes/testing-pack/stubs.py (100%) rename tests/{inference => execution}/testing_nodes/testing-pack/tools.py (100%) rename tests/{inference => execution}/testing_nodes/testing-pack/util.py (100%) diff --git a/.github/workflows/test-execution.yml b/.github/workflows/test-execution.yml new file mode 100644 index 000000000..00ef07ebf --- /dev/null +++ b/.github/workflows/test-execution.yml @@ -0,0 +1,30 @@ +name: Execution Tests + +on: + push: + branches: [ main, master ] + pull_request: + branches: [ main, master ] + +jobs: + test: + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + runs-on: ${{ matrix.os }} + continue-on-error: true + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.12' + - name: Install requirements + run: | + python -m pip install --upgrade pip + pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu + pip install -r requirements.txt + pip install -r tests-unit/requirements.txt + - name: Run Execution Tests + run: | + python -m pytest tests/execution -v --skip-timing-checks diff --git a/comfy_execution/progress.py b/comfy_execution/progress.py index e8f5ede1e..f951a3350 100644 --- a/comfy_execution/progress.py +++ b/comfy_execution/progress.py @@ -181,8 +181,9 @@ class WebUIProgressHandler(ProgressHandler): } # Send a combined progress_state message with all node states + # Include client_id to ensure message is only sent to the initiating client self.server_instance.send_sync( - "progress_state", {"prompt_id": prompt_id, "nodes": active_nodes} + "progress_state", {"prompt_id": prompt_id, "nodes": active_nodes}, self.server_instance.client_id ) @override diff --git a/tests/conftest.py b/tests/conftest.py index 4e30eb581..290e3a5c0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,6 +6,7 @@ def pytest_addoption(parser): parser.addoption('--output_dir', action="store", default='tests/inference/samples', help='Output directory for generated images') parser.addoption("--listen", type=str, default="127.0.0.1", metavar="IP", nargs="?", const="0.0.0.0", help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") parser.addoption("--port", type=int, default=8188, help="Set the listen port.") + parser.addoption("--skip-timing-checks", action="store_true", default=False, help="Skip timing-related assertions in tests (useful for CI environments with variable performance)") # This initializes args at the beginning of the test session @pytest.fixture(scope="session", autouse=True) @@ -19,6 +20,11 @@ def args_pytest(pytestconfig): return args +@pytest.fixture(scope="session") +def skip_timing_checks(pytestconfig): + """Fixture that returns whether timing checks should be skipped.""" + return pytestconfig.getoption("--skip-timing-checks") + def pytest_collection_modifyitems(items): # Modifies items so tests run in the correct order diff --git a/tests/inference/extra_model_paths.yaml b/tests/execution/extra_model_paths.yaml similarity index 100% rename from tests/inference/extra_model_paths.yaml rename to tests/execution/extra_model_paths.yaml diff --git a/tests/inference/test_async_nodes.py b/tests/execution/test_async_nodes.py similarity index 95% rename from tests/inference/test_async_nodes.py rename to tests/execution/test_async_nodes.py index f029953dd..c771b4b36 100644 --- a/tests/inference/test_async_nodes.py +++ b/tests/execution/test_async_nodes.py @@ -7,7 +7,7 @@ import subprocess from pytest import fixture from comfy_execution.graph_utils import GraphBuilder -from tests.inference.test_execution import ComfyClient, run_warmup +from tests.execution.test_execution import ComfyClient, run_warmup @pytest.mark.execution @@ -23,7 +23,7 @@ class TestAsyncNodes: '--output-directory', args_pytest["output_dir"], '--listen', args_pytest["listen"], '--port', str(args_pytest["port"]), - '--extra-model-paths-config', 'tests/inference/extra_model_paths.yaml', + '--extra-model-paths-config', 'tests/execution/extra_model_paths.yaml', '--cpu', ] use_lru, lru_size = request.param @@ -81,7 +81,7 @@ class TestAsyncNodes: assert len(result_images) == 1, "Should have 1 image" assert np.array(result_images[0]).min() == 0 and np.array(result_images[0]).max() == 0, "Image should be black" - def test_multiple_async_parallel_execution(self, client: ComfyClient, builder: GraphBuilder): + def test_multiple_async_parallel_execution(self, client: ComfyClient, builder: GraphBuilder, skip_timing_checks): """Test that multiple async nodes execute in parallel.""" # Warmup execution to ensure server is fully initialized run_warmup(client) @@ -104,7 +104,8 @@ class TestAsyncNodes: elapsed_time = time.time() - start_time # Should take ~0.5s (max duration) not 1.2s (sum of durations) - assert elapsed_time < 0.8, f"Parallel execution took {elapsed_time}s, expected < 0.8s" + if not skip_timing_checks: + assert elapsed_time < 0.8, f"Parallel execution took {elapsed_time}s, expected < 0.8s" # Verify all nodes executed assert result.did_run(sleep1) and result.did_run(sleep2) and result.did_run(sleep3) @@ -150,7 +151,7 @@ class TestAsyncNodes: with pytest.raises(urllib.error.HTTPError): client.run(g) - def test_async_lazy_evaluation(self, client: ComfyClient, builder: GraphBuilder): + def test_async_lazy_evaluation(self, client: ComfyClient, builder: GraphBuilder, skip_timing_checks): """Test async nodes with lazy evaluation.""" # Warmup execution to ensure server is fully initialized run_warmup(client, prefix="warmup_lazy") @@ -173,7 +174,8 @@ class TestAsyncNodes: elapsed_time = time.time() - start_time # Should only execute sleep1, not sleep2 - assert elapsed_time < 0.5, f"Should skip sleep2, took {elapsed_time}s" + if not skip_timing_checks: + assert elapsed_time < 0.5, f"Should skip sleep2, took {elapsed_time}s" assert result.did_run(sleep1), "Sleep1 should have executed" assert not result.did_run(sleep2), "Sleep2 should have been skipped" @@ -310,7 +312,7 @@ class TestAsyncNodes: images = result.get_images(output) assert len(images) == 1, "Should have blocked second image" - def test_async_caching_behavior(self, client: ComfyClient, builder: GraphBuilder): + def test_async_caching_behavior(self, client: ComfyClient, builder: GraphBuilder, skip_timing_checks): """Test that async nodes are properly cached.""" # Warmup execution to ensure server is fully initialized run_warmup(client, prefix="warmup_cache") @@ -330,9 +332,10 @@ class TestAsyncNodes: elapsed_time = time.time() - start_time assert not result2.did_run(sleep_node), "Should be cached" - assert elapsed_time < 0.1, f"Cached run took {elapsed_time}s, should be instant" + if not skip_timing_checks: + assert elapsed_time < 0.1, f"Cached run took {elapsed_time}s, should be instant" - def test_async_with_dynamic_prompts(self, client: ComfyClient, builder: GraphBuilder): + def test_async_with_dynamic_prompts(self, client: ComfyClient, builder: GraphBuilder, skip_timing_checks): """Test async nodes within dynamically generated prompts.""" # Warmup execution to ensure server is fully initialized run_warmup(client, prefix="warmup_dynamic") @@ -345,8 +348,8 @@ class TestAsyncNodes: dynamic_async = g.node("TestDynamicAsyncGeneration", image1=image1.out(0), image2=image2.out(0), - num_async_nodes=3, - sleep_duration=0.2) + num_async_nodes=5, + sleep_duration=0.4) g.node("SaveImage", images=dynamic_async.out(0)) start_time = time.time() @@ -354,7 +357,8 @@ class TestAsyncNodes: elapsed_time = time.time() - start_time # Should execute async nodes in parallel within dynamic prompt - assert elapsed_time < 0.5, f"Dynamic async execution took {elapsed_time}s" + if not skip_timing_checks: + assert elapsed_time < 1.0, f"Dynamic async execution took {elapsed_time}s" assert result.did_run(dynamic_async) def test_async_resource_cleanup(self, client: ComfyClient, builder: GraphBuilder): diff --git a/tests/inference/test_execution.py b/tests/execution/test_execution.py similarity index 98% rename from tests/inference/test_execution.py rename to tests/execution/test_execution.py index e7b29302e..8ea05fdd8 100644 --- a/tests/inference/test_execution.py +++ b/tests/execution/test_execution.py @@ -149,7 +149,7 @@ class TestExecution: '--output-directory', args_pytest["output_dir"], '--listen', args_pytest["listen"], '--port', str(args_pytest["port"]), - '--extra-model-paths-config', 'tests/inference/extra_model_paths.yaml', + '--extra-model-paths-config', 'tests/execution/extra_model_paths.yaml', '--cpu', ] use_lru, lru_size = request.param @@ -518,7 +518,7 @@ class TestExecution: assert numpy.array(images[0]).min() == 63 and numpy.array(images[0]).max() == 63, "Image should have value 0.25" assert not result.did_run(test_node), "The execution should have been cached" - def test_parallel_sleep_nodes(self, client: ComfyClient, builder: GraphBuilder): + def test_parallel_sleep_nodes(self, client: ComfyClient, builder: GraphBuilder, skip_timing_checks): # Warmup execution to ensure server is fully initialized run_warmup(client) @@ -541,14 +541,15 @@ class TestExecution: # The test should take around 3.0 seconds (the longest sleep duration) # plus some overhead, but definitely less than the sum of all sleeps (9.0s) - assert elapsed_time < 8.9, f"Parallel execution took {elapsed_time}s, expected less than 8.9s" + if not skip_timing_checks: + assert elapsed_time < 8.9, f"Parallel execution took {elapsed_time}s, expected less than 8.9s" # Verify that all nodes executed assert result.did_run(sleep_node1), "Sleep node 1 should have run" assert result.did_run(sleep_node2), "Sleep node 2 should have run" assert result.did_run(sleep_node3), "Sleep node 3 should have run" - def test_parallel_sleep_expansion(self, client: ComfyClient, builder: GraphBuilder): + def test_parallel_sleep_expansion(self, client: ComfyClient, builder: GraphBuilder, skip_timing_checks): # Warmup execution to ensure server is fully initialized run_warmup(client) @@ -574,7 +575,9 @@ class TestExecution: # Similar to the previous test, expect parallel execution of the sleep nodes # which should complete in less than the sum of all sleeps - assert elapsed_time < 10.0, f"Expansion execution took {elapsed_time}s, expected less than 5.5s" + # Lots of leeway here since Windows CI is slow + if not skip_timing_checks: + assert elapsed_time < 13.0, f"Expansion execution took {elapsed_time}s" # Verify the parallel sleep node executed assert result.did_run(parallel_sleep), "ParallelSleep node should have run" diff --git a/tests/execution/test_progress_isolation.py b/tests/execution/test_progress_isolation.py new file mode 100644 index 000000000..93dc0d41b --- /dev/null +++ b/tests/execution/test_progress_isolation.py @@ -0,0 +1,233 @@ +"""Test that progress updates are properly isolated between WebSocket clients.""" + +import json +import pytest +import time +import threading +import uuid +import websocket +from typing import List, Dict, Any +from comfy_execution.graph_utils import GraphBuilder +from tests.execution.test_execution import ComfyClient + + +class ProgressTracker: + """Tracks progress messages received by a WebSocket client.""" + + def __init__(self, client_id: str): + self.client_id = client_id + self.progress_messages: List[Dict[str, Any]] = [] + self.lock = threading.Lock() + + def add_message(self, message: Dict[str, Any]): + """Thread-safe addition of progress messages.""" + with self.lock: + self.progress_messages.append(message) + + def get_messages_for_prompt(self, prompt_id: str) -> List[Dict[str, Any]]: + """Get all progress messages for a specific prompt_id.""" + with self.lock: + return [ + msg for msg in self.progress_messages + if msg.get('data', {}).get('prompt_id') == prompt_id + ] + + def has_cross_contamination(self, own_prompt_id: str) -> bool: + """Check if this client received progress for other prompts.""" + with self.lock: + for msg in self.progress_messages: + msg_prompt_id = msg.get('data', {}).get('prompt_id') + if msg_prompt_id and msg_prompt_id != own_prompt_id: + return True + return False + + +class IsolatedClient(ComfyClient): + """Extended ComfyClient that tracks all WebSocket messages.""" + + def __init__(self): + super().__init__() + self.progress_tracker = None + self.all_messages: List[Dict[str, Any]] = [] + + def connect(self, listen='127.0.0.1', port=8188, client_id=None): + """Connect with a specific client_id and set up message tracking.""" + if client_id is None: + client_id = str(uuid.uuid4()) + super().connect(listen, port, client_id) + self.progress_tracker = ProgressTracker(client_id) + + def listen_for_messages(self, duration: float = 5.0): + """Listen for WebSocket messages for a specified duration.""" + end_time = time.time() + duration + self.ws.settimeout(0.5) # Non-blocking with timeout + + while time.time() < end_time: + try: + out = self.ws.recv() + if isinstance(out, str): + message = json.loads(out) + self.all_messages.append(message) + + # Track progress_state messages + if message.get('type') == 'progress_state': + self.progress_tracker.add_message(message) + except websocket.WebSocketTimeoutException: + continue + except Exception: + # Log error silently in test context + break + + +@pytest.mark.execution +class TestProgressIsolation: + """Test suite for verifying progress update isolation between clients.""" + + @pytest.fixture(scope="class", autouse=True) + def _server(self, args_pytest): + """Start the ComfyUI server for testing.""" + import subprocess + pargs = [ + 'python', 'main.py', + '--output-directory', args_pytest["output_dir"], + '--listen', args_pytest["listen"], + '--port', str(args_pytest["port"]), + '--extra-model-paths-config', 'tests/execution/extra_model_paths.yaml', + '--cpu', + ] + p = subprocess.Popen(pargs) + yield + p.kill() + + def start_client_with_retry(self, listen: str, port: int, client_id: str = None): + """Start client with connection retries.""" + client = IsolatedClient() + # Connect to server (with retries) + n_tries = 5 + for i in range(n_tries): + time.sleep(4) + try: + client.connect(listen, port, client_id) + return client + except ConnectionRefusedError as e: + print(e) # noqa: T201 + print(f"({i+1}/{n_tries}) Retrying...") # noqa: T201 + raise ConnectionRefusedError(f"Failed to connect after {n_tries} attempts") + + def test_progress_isolation_between_clients(self, args_pytest): + """Test that progress updates are isolated between different clients.""" + listen = args_pytest["listen"] + port = args_pytest["port"] + + # Create two separate clients with unique IDs + client_a_id = "client_a_" + str(uuid.uuid4()) + client_b_id = "client_b_" + str(uuid.uuid4()) + + try: + # Connect both clients with retries + client_a = self.start_client_with_retry(listen, port, client_a_id) + client_b = self.start_client_with_retry(listen, port, client_b_id) + + # Create simple workflows for both clients + graph_a = GraphBuilder(prefix="client_a") + image_a = graph_a.node("StubImage", content="BLACK", height=256, width=256, batch_size=1) + graph_a.node("PreviewImage", images=image_a.out(0)) + + graph_b = GraphBuilder(prefix="client_b") + image_b = graph_b.node("StubImage", content="WHITE", height=256, width=256, batch_size=1) + graph_b.node("PreviewImage", images=image_b.out(0)) + + # Submit workflows from both clients + prompt_a = graph_a.finalize() + prompt_b = graph_b.finalize() + + response_a = client_a.queue_prompt(prompt_a) + prompt_id_a = response_a['prompt_id'] + + response_b = client_b.queue_prompt(prompt_b) + prompt_id_b = response_b['prompt_id'] + + # Start threads to listen for messages on both clients + def listen_client_a(): + client_a.listen_for_messages(duration=10.0) + + def listen_client_b(): + client_b.listen_for_messages(duration=10.0) + + thread_a = threading.Thread(target=listen_client_a) + thread_b = threading.Thread(target=listen_client_b) + + thread_a.start() + thread_b.start() + + # Wait for threads to complete + thread_a.join() + thread_b.join() + + # Verify isolation + # Client A should only receive progress for prompt_id_a + assert not client_a.progress_tracker.has_cross_contamination(prompt_id_a), \ + f"Client A received progress updates for other clients' workflows. " \ + f"Expected only {prompt_id_a}, but got messages for multiple prompts." + + # Client B should only receive progress for prompt_id_b + assert not client_b.progress_tracker.has_cross_contamination(prompt_id_b), \ + f"Client B received progress updates for other clients' workflows. " \ + f"Expected only {prompt_id_b}, but got messages for multiple prompts." + + # Verify each client received their own progress updates + client_a_messages = client_a.progress_tracker.get_messages_for_prompt(prompt_id_a) + client_b_messages = client_b.progress_tracker.get_messages_for_prompt(prompt_id_b) + + assert len(client_a_messages) > 0, \ + "Client A did not receive any progress updates for its own workflow" + assert len(client_b_messages) > 0, \ + "Client B did not receive any progress updates for its own workflow" + + # Ensure no cross-contamination + client_a_other = client_a.progress_tracker.get_messages_for_prompt(prompt_id_b) + client_b_other = client_b.progress_tracker.get_messages_for_prompt(prompt_id_a) + + assert len(client_a_other) == 0, \ + f"Client A incorrectly received {len(client_a_other)} progress updates for Client B's workflow" + assert len(client_b_other) == 0, \ + f"Client B incorrectly received {len(client_b_other)} progress updates for Client A's workflow" + + finally: + # Clean up connections + if hasattr(client_a, 'ws'): + client_a.ws.close() + if hasattr(client_b, 'ws'): + client_b.ws.close() + + def test_progress_with_missing_client_id(self, args_pytest): + """Test that progress updates handle missing client_id gracefully.""" + listen = args_pytest["listen"] + port = args_pytest["port"] + + try: + # Connect client with retries + client = self.start_client_with_retry(listen, port) + + # Create a simple workflow + graph = GraphBuilder(prefix="test_missing_id") + image = graph.node("StubImage", content="BLACK", height=128, width=128, batch_size=1) + graph.node("PreviewImage", images=image.out(0)) + + # Submit workflow + prompt = graph.finalize() + response = client.queue_prompt(prompt) + prompt_id = response['prompt_id'] + + # Listen for messages + client.listen_for_messages(duration=5.0) + + # Should still receive progress updates for own workflow + messages = client.progress_tracker.get_messages_for_prompt(prompt_id) + assert len(messages) > 0, \ + "Client did not receive progress updates even though it initiated the workflow" + + finally: + if hasattr(client, 'ws'): + client.ws.close() + diff --git a/tests/inference/testing_nodes/testing-pack/__init__.py b/tests/execution/testing_nodes/testing-pack/__init__.py similarity index 100% rename from tests/inference/testing_nodes/testing-pack/__init__.py rename to tests/execution/testing_nodes/testing-pack/__init__.py diff --git a/tests/inference/testing_nodes/testing-pack/api_test_nodes.py b/tests/execution/testing_nodes/testing-pack/api_test_nodes.py similarity index 100% rename from tests/inference/testing_nodes/testing-pack/api_test_nodes.py rename to tests/execution/testing_nodes/testing-pack/api_test_nodes.py diff --git a/tests/inference/testing_nodes/testing-pack/async_test_nodes.py b/tests/execution/testing_nodes/testing-pack/async_test_nodes.py similarity index 100% rename from tests/inference/testing_nodes/testing-pack/async_test_nodes.py rename to tests/execution/testing_nodes/testing-pack/async_test_nodes.py diff --git a/tests/inference/testing_nodes/testing-pack/conditions.py b/tests/execution/testing_nodes/testing-pack/conditions.py similarity index 100% rename from tests/inference/testing_nodes/testing-pack/conditions.py rename to tests/execution/testing_nodes/testing-pack/conditions.py diff --git a/tests/inference/testing_nodes/testing-pack/flow_control.py b/tests/execution/testing_nodes/testing-pack/flow_control.py similarity index 100% rename from tests/inference/testing_nodes/testing-pack/flow_control.py rename to tests/execution/testing_nodes/testing-pack/flow_control.py diff --git a/tests/inference/testing_nodes/testing-pack/specific_tests.py b/tests/execution/testing_nodes/testing-pack/specific_tests.py similarity index 100% rename from tests/inference/testing_nodes/testing-pack/specific_tests.py rename to tests/execution/testing_nodes/testing-pack/specific_tests.py diff --git a/tests/inference/testing_nodes/testing-pack/stubs.py b/tests/execution/testing_nodes/testing-pack/stubs.py similarity index 100% rename from tests/inference/testing_nodes/testing-pack/stubs.py rename to tests/execution/testing_nodes/testing-pack/stubs.py diff --git a/tests/inference/testing_nodes/testing-pack/tools.py b/tests/execution/testing_nodes/testing-pack/tools.py similarity index 100% rename from tests/inference/testing_nodes/testing-pack/tools.py rename to tests/execution/testing_nodes/testing-pack/tools.py diff --git a/tests/inference/testing_nodes/testing-pack/util.py b/tests/execution/testing_nodes/testing-pack/util.py similarity index 100% rename from tests/inference/testing_nodes/testing-pack/util.py rename to tests/execution/testing_nodes/testing-pack/util.py From 261421e21899abc8168c71efd8694ade020bcee2 Mon Sep 17 00:00:00 2001 From: "Yousef R. Gamaleldin" <81116377+yousef-rafat@users.noreply.github.com> Date: Fri, 5 Sep 2025 03:36:20 +0300 Subject: [PATCH 0544/1073] Add Hunyuan 3D 2.1 Support (#8714) --- comfy/clip_vision.py | 231 ++++++++- comfy/image_encoders/dino2.py | 33 +- comfy/image_encoders/dino2_large.json | 22 + comfy/latent_formats.py | 5 + comfy/ldm/hunyuan3d/vae.py | 569 ++++++++++++++++++---- comfy/ldm/hunyuan3dv2_1/hunyuandit.py | 658 ++++++++++++++++++++++++++ comfy/model_base.py | 17 + comfy/model_detection.py | 14 + comfy/sd.py | 49 +- comfy/supported_models.py | 13 +- comfy_extras/nodes_hunyuan3d.py | 24 +- nodes.py | 29 +- requirements.txt | 2 +- 13 files changed, 1537 insertions(+), 129 deletions(-) create mode 100644 comfy/image_encoders/dino2_large.json create mode 100644 comfy/ldm/hunyuan3dv2_1/hunyuandit.py diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index 2fa410cb7..4bc640e8b 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -17,10 +17,227 @@ class Output: def __setitem__(self, key, item): setattr(self, key, item) -def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], crop=True): + +def cubic_kernel(x, a: float = -0.75): + absx = x.abs() + absx2 = absx ** 2 + absx3 = absx ** 3 + + w = (a + 2) * absx3 - (a + 3) * absx2 + 1 + w2 = a * absx3 - 5*a * absx2 + 8*a * absx - 4*a + + return torch.where(absx <= 1, w, torch.where(absx < 2, w2, torch.zeros_like(x))) + +def get_indices_weights(in_size, out_size, scale): + # OpenCV-style half-pixel mapping + x = torch.arange(out_size, dtype=torch.float32) + x = (x + 0.5) / scale - 0.5 + + x0 = x.floor().long() + dx = x.unsqueeze(1) - (x0.unsqueeze(1) + torch.arange(-1, 3)) + + weights = cubic_kernel(dx) + weights = weights / weights.sum(dim=1, keepdim=True) + + indices = x0.unsqueeze(1) + torch.arange(-1, 3) + indices = indices.clamp(0, in_size - 1) + + return indices, weights + +def resize_cubic_1d(x, out_size, dim): + b, c, h, w = x.shape + in_size = h if dim == 2 else w + scale = out_size / in_size + + indices, weights = get_indices_weights(in_size, out_size, scale) + + if dim == 2: + x = x.permute(0, 1, 3, 2) + x = x.reshape(-1, h) + else: + x = x.reshape(-1, w) + + gathered = x[:, indices] + out = (gathered * weights.unsqueeze(0)).sum(dim=2) + + if dim == 2: + out = out.reshape(b, c, w, out_size).permute(0, 1, 3, 2) + else: + out = out.reshape(b, c, h, out_size) + + return out + +def resize_cubic(img: torch.Tensor, size: tuple) -> torch.Tensor: + """ + Resize image using OpenCV-equivalent INTER_CUBIC interpolation. + Implemented in pure PyTorch + """ + + if img.ndim == 3: + img = img.unsqueeze(0) + + img = img.permute(0, 3, 1, 2) + + out_h, out_w = size + img = resize_cubic_1d(img, out_h, dim=2) + img = resize_cubic_1d(img, out_w, dim=3) + return img + +def resize_area(img: torch.Tensor, size: tuple) -> torch.Tensor: + # vectorized implementation for OpenCV's INTER_AREA using pure PyTorch + original_shape = img.shape + is_hwc = False + + if img.ndim == 3: + if img.shape[0] <= 4: + img = img.unsqueeze(0) + else: + is_hwc = True + img = img.permute(2, 0, 1).unsqueeze(0) + elif img.ndim == 4: + pass + else: + raise ValueError("Expected image with 3 or 4 dims.") + + B, C, H, W = img.shape + out_h, out_w = size + scale_y = H / out_h + scale_x = W / out_w + + device = img.device + + # compute the grid boundries + y_start = torch.arange(out_h, device=device).float() * scale_y + y_end = y_start + scale_y + x_start = torch.arange(out_w, device=device).float() * scale_x + x_end = x_start + scale_x + + # for each output pixel, we will compute the range for it + y_start_int = torch.floor(y_start).long() + y_end_int = torch.ceil(y_end).long() + x_start_int = torch.floor(x_start).long() + x_end_int = torch.ceil(x_end).long() + + # We will build the weighted sums by iterating over contributing input pixels once + output = torch.zeros((B, C, out_h, out_w), dtype=torch.float32, device=device) + area = torch.zeros((out_h, out_w), dtype=torch.float32, device=device) + + max_kernel_h = int(torch.max(y_end_int - y_start_int).item()) + max_kernel_w = int(torch.max(x_end_int - x_start_int).item()) + + for dy in range(max_kernel_h): + for dx in range(max_kernel_w): + # compute the weights for this offset for all output pixels + + y_idx = y_start_int.unsqueeze(1) + dy + x_idx = x_start_int.unsqueeze(0) + dx + + # clamp indices to image boundaries + y_idx_clamped = torch.clamp(y_idx, 0, H - 1) + x_idx_clamped = torch.clamp(x_idx, 0, W - 1) + + # compute weights by broadcasting + y_weight = (torch.min(y_end.unsqueeze(1), y_idx_clamped.float() + 1.0) - torch.max(y_start.unsqueeze(1), y_idx_clamped.float())).clamp(min=0) + x_weight = (torch.min(x_end.unsqueeze(0), x_idx_clamped.float() + 1.0) - torch.max(x_start.unsqueeze(0), x_idx_clamped.float())).clamp(min=0) + + weight = (y_weight * x_weight) + + y_expand = y_idx_clamped.expand(out_h, out_w) + x_expand = x_idx_clamped.expand(out_h, out_w) + + + pixels = img[:, :, y_expand, x_expand] + + # unsqueeze to broadcast + w = weight.unsqueeze(0).unsqueeze(0) + + output += pixels * w + area += weight + + # Normalize by area + output /= area.unsqueeze(0).unsqueeze(0) + + if is_hwc: + return output[0].permute(1, 2, 0) + elif img.shape[0] == 1 and original_shape[0] <= 4: + return output[0] + else: + return output + +def recenter(image, border_ratio: float = 0.2): + + if image.shape[-1] == 4: + mask = image[..., 3] + else: + mask = torch.ones_like(image[..., 0:1]) * 255 + image = torch.concatenate([image, mask], axis=-1) + mask = mask[..., 0] + + H, W, C = image.shape + + size = max(H, W) + result = torch.zeros((size, size, C), dtype = torch.uint8) + + # as_tuple to match numpy behaviour + x_coords, y_coords = torch.nonzero(mask, as_tuple=True) + + y_min, y_max = y_coords.min(), y_coords.max() + x_min, x_max = x_coords.min(), x_coords.max() + + h = x_max - x_min + w = y_max - y_min + + if h == 0 or w == 0: + raise ValueError('input image is empty') + + desired_size = int(size * (1 - border_ratio)) + scale = desired_size / max(h, w) + + h2 = int(h * scale) + w2 = int(w * scale) + + x2_min = (size - h2) // 2 + x2_max = x2_min + h2 + + y2_min = (size - w2) // 2 + y2_max = y2_min + w2 + + # note: opencv takes columns first (opposite to pytorch and numpy that take the row first) + result[x2_min:x2_max, y2_min:y2_max] = resize_area(image[x_min:x_max, y_min:y_max], (h2, w2)) + + bg = torch.ones((result.shape[0], result.shape[1], 3), dtype = torch.uint8) * 255 + + mask = result[..., 3:].to(torch.float32) / 255 + result = result[..., :3] * mask + bg * (1 - mask) + + mask = mask * 255 + result = result.clip(0, 255).to(torch.uint8) + mask = mask.clip(0, 255).to(torch.uint8) + + return result + +def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], + crop=True, value_range = (-1, 1), border_ratio: float = None, recenter_size: int = 512): + + if border_ratio is not None: + + image = (image * 255).clamp(0, 255).to(torch.uint8) + image = [recenter(img, border_ratio = border_ratio) for img in image] + + image = torch.stack(image, dim = 0) + image = resize_cubic(image, size = (recenter_size, recenter_size)) + + image = image / 255 * 2 - 1 + low, high = value_range + + image = (image - low) / (high - low) + image = image.permute(0, 2, 3, 1) + image = image[:, :, :, :3] if image.shape[3] > 3 else image + mean = torch.tensor(mean, device=image.device, dtype=image.dtype) std = torch.tensor(std, device=image.device, dtype=image.dtype) + image = image.movedim(-1, 1) if not (image.shape[2] == size and image.shape[3] == size): if crop: @@ -29,7 +246,7 @@ def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], s else: scale_size = (size, size) - image = torch.nn.functional.interpolate(image, size=scale_size, mode="bicubic", antialias=True) + image = torch.nn.functional.interpolate(image, size=scale_size, mode="bilinear" if border_ratio is not None else "bicubic", antialias=True) h = (image.shape[2] - size)//2 w = (image.shape[3] - size)//2 image = image[:,:,h:h+size,w:w+size] @@ -71,9 +288,9 @@ class ClipVisionModel(): def get_sd(self): return self.model.state_dict() - def encode_image(self, image, crop=True): + def encode_image(self, image, crop=True, border_ratio: float = None): comfy.model_management.load_model_gpu(self.patcher) - pixel_values = clip_preprocess(image.to(self.load_device), size=self.image_size, mean=self.image_mean, std=self.image_std, crop=crop).float() + pixel_values = clip_preprocess(image.to(self.load_device), size=self.image_size, mean=self.image_mean, std=self.image_std, crop=crop, border_ratio=border_ratio).float() out = self.model(pixel_values=pixel_values, intermediate_output='all' if self.return_all_hidden_states else -2) outputs = Output() @@ -136,8 +353,12 @@ def load_clipvision_from_sd(sd, prefix="", convert_keys=False): json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_vitl_336.json") else: json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_vitl.json") - elif "embeddings.patch_embeddings.projection.weight" in sd: + + # Dinov2 + elif 'encoder.layer.39.layer_scale2.lambda1' in sd: json_config = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "image_encoders"), "dino2_giant.json") + elif 'encoder.layer.23.layer_scale2.lambda1' in sd: + json_config = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "image_encoders"), "dino2_large.json") else: return None diff --git a/comfy/image_encoders/dino2.py b/comfy/image_encoders/dino2.py index 976f98c65..9b6dace9d 100644 --- a/comfy/image_encoders/dino2.py +++ b/comfy/image_encoders/dino2.py @@ -31,6 +31,20 @@ class LayerScale(torch.nn.Module): def forward(self, x): return x * comfy.model_management.cast_to_device(self.lambda1, x.device, x.dtype) +class Dinov2MLP(torch.nn.Module): + def __init__(self, hidden_size: int, dtype, device, operations): + super().__init__() + + mlp_ratio = 4 + hidden_features = int(hidden_size * mlp_ratio) + self.fc1 = operations.Linear(hidden_size, hidden_features, bias = True, device=device, dtype=dtype) + self.fc2 = operations.Linear(hidden_features, hidden_size, bias = True, device=device, dtype=dtype) + + def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: + hidden_state = self.fc1(hidden_state) + hidden_state = torch.nn.functional.gelu(hidden_state) + hidden_state = self.fc2(hidden_state) + return hidden_state class SwiGLUFFN(torch.nn.Module): def __init__(self, dim, dtype, device, operations): @@ -50,12 +64,15 @@ class SwiGLUFFN(torch.nn.Module): class Dino2Block(torch.nn.Module): - def __init__(self, dim, num_heads, layer_norm_eps, dtype, device, operations): + def __init__(self, dim, num_heads, layer_norm_eps, dtype, device, operations, use_swiglu_ffn): super().__init__() self.attention = Dino2AttentionBlock(dim, num_heads, layer_norm_eps, dtype, device, operations) self.layer_scale1 = LayerScale(dim, dtype, device, operations) self.layer_scale2 = LayerScale(dim, dtype, device, operations) - self.mlp = SwiGLUFFN(dim, dtype, device, operations) + if use_swiglu_ffn: + self.mlp = SwiGLUFFN(dim, dtype, device, operations) + else: + self.mlp = Dinov2MLP(dim, dtype, device, operations) self.norm1 = operations.LayerNorm(dim, eps=layer_norm_eps, dtype=dtype, device=device) self.norm2 = operations.LayerNorm(dim, eps=layer_norm_eps, dtype=dtype, device=device) @@ -66,9 +83,10 @@ class Dino2Block(torch.nn.Module): class Dino2Encoder(torch.nn.Module): - def __init__(self, dim, num_heads, layer_norm_eps, num_layers, dtype, device, operations): + def __init__(self, dim, num_heads, layer_norm_eps, num_layers, dtype, device, operations, use_swiglu_ffn): super().__init__() - self.layer = torch.nn.ModuleList([Dino2Block(dim, num_heads, layer_norm_eps, dtype, device, operations) for _ in range(num_layers)]) + self.layer = torch.nn.ModuleList([Dino2Block(dim, num_heads, layer_norm_eps, dtype, device, operations, use_swiglu_ffn = use_swiglu_ffn) + for _ in range(num_layers)]) def forward(self, x, intermediate_output=None): optimized_attention = optimized_attention_for_device(x.device, False, small_input=True) @@ -78,8 +96,8 @@ class Dino2Encoder(torch.nn.Module): intermediate_output = len(self.layer) + intermediate_output intermediate = None - for i, l in enumerate(self.layer): - x = l(x, optimized_attention) + for i, layer in enumerate(self.layer): + x = layer(x, optimized_attention) if i == intermediate_output: intermediate = x.clone() return x, intermediate @@ -128,9 +146,10 @@ class Dinov2Model(torch.nn.Module): dim = config_dict["hidden_size"] heads = config_dict["num_attention_heads"] layer_norm_eps = config_dict["layer_norm_eps"] + use_swiglu_ffn = config_dict["use_swiglu_ffn"] self.embeddings = Dino2Embeddings(dim, dtype, device, operations) - self.encoder = Dino2Encoder(dim, heads, layer_norm_eps, num_layers, dtype, device, operations) + self.encoder = Dino2Encoder(dim, heads, layer_norm_eps, num_layers, dtype, device, operations, use_swiglu_ffn = use_swiglu_ffn) self.layernorm = operations.LayerNorm(dim, eps=layer_norm_eps, dtype=dtype, device=device) def forward(self, pixel_values, attention_mask=None, intermediate_output=None): diff --git a/comfy/image_encoders/dino2_large.json b/comfy/image_encoders/dino2_large.json new file mode 100644 index 000000000..43fbb58ff --- /dev/null +++ b/comfy/image_encoders/dino2_large.json @@ -0,0 +1,22 @@ +{ + "hidden_size": 1024, + "use_mask_token": true, + "patch_size": 14, + "image_size": 518, + "num_channels": 3, + "num_attention_heads": 16, + "initializer_range": 0.02, + "attention_probs_dropout_prob": 0.0, + "hidden_dropout_prob": 0.0, + "hidden_act": "gelu", + "mlp_ratio": 4, + "model_type": "dinov2", + "num_hidden_layers": 24, + "layer_norm_eps": 1e-6, + "qkv_bias": true, + "use_swiglu_ffn": false, + "layerscale_value": 1.0, + "drop_path_rate": 0.0, + "image_mean": [0.485, 0.456, 0.406], + "image_std": [0.229, 0.224, 0.225] +} diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index caf4991fc..0d84994b0 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -538,6 +538,11 @@ class Hunyuan3Dv2(LatentFormat): latent_dimensions = 1 scale_factor = 0.9990943042622529 +class Hunyuan3Dv2_1(LatentFormat): + scale_factor = 1.0039506158752403 + latent_channels = 64 + latent_dimensions = 1 + class Hunyuan3Dv2mini(LatentFormat): latent_channels = 64 latent_dimensions = 1 diff --git a/comfy/ldm/hunyuan3d/vae.py b/comfy/ldm/hunyuan3d/vae.py index 6e8cbf1d9..760944827 100644 --- a/comfy/ldm/hunyuan3d/vae.py +++ b/comfy/ldm/hunyuan3d/vae.py @@ -4,81 +4,458 @@ import torch import torch.nn as nn import torch.nn.functional as F - - -from typing import Union, Tuple, List, Callable, Optional - import numpy as np -from einops import repeat, rearrange +import math from tqdm import tqdm + +from typing import Optional + import logging import comfy.ops ops = comfy.ops.disable_weight_init -def generate_dense_grid_points( - bbox_min: np.ndarray, - bbox_max: np.ndarray, - octree_resolution: int, - indexing: str = "ij", -): - length = bbox_max - bbox_min - num_cells = octree_resolution +def fps(src: torch.Tensor, batch: torch.Tensor, sampling_ratio: float, start_random: bool = True): - x = np.linspace(bbox_min[0], bbox_max[0], int(num_cells) + 1, dtype=np.float32) - y = np.linspace(bbox_min[1], bbox_max[1], int(num_cells) + 1, dtype=np.float32) - z = np.linspace(bbox_min[2], bbox_max[2], int(num_cells) + 1, dtype=np.float32) - [xs, ys, zs] = np.meshgrid(x, y, z, indexing=indexing) - xyz = np.stack((xs, ys, zs), axis=-1) - grid_size = [int(num_cells) + 1, int(num_cells) + 1, int(num_cells) + 1] + # manually create the pointer vector + assert src.size(0) == batch.numel() - return xyz, grid_size, length + batch_size = int(batch.max()) + 1 + deg = src.new_zeros(batch_size, dtype = torch.long) + + deg.scatter_add_(0, batch, torch.ones_like(batch)) + + ptr_vec = deg.new_zeros(batch_size + 1) + torch.cumsum(deg, 0, out=ptr_vec[1:]) + + #return fps_sampling(src, ptr_vec, ratio) + sampled_indicies = [] + + for b in range(batch_size): + # start and the end of each batch + start, end = ptr_vec[b].item(), ptr_vec[b + 1].item() + # points from the point cloud + points = src[start:end] + + num_points = points.size(0) + num_samples = max(1, math.ceil(num_points * sampling_ratio)) + + selected = torch.zeros(num_samples, device = src.device, dtype = torch.long) + distances = torch.full((num_points,), float("inf"), device = src.device) + + # select a random start point + if start_random: + farthest = torch.randint(0, num_points, (1,), device = src.device) + else: + farthest = torch.tensor([0], device = src.device, dtype = torch.long) + + for i in range(num_samples): + selected[i] = farthest + centroid = points[farthest].squeeze(0) + dist = torch.norm(points - centroid, dim = 1) # compute euclidean distance + distances = torch.minimum(distances, dist) + farthest = torch.argmax(distances) + + sampled_indicies.append(torch.arange(start, end)[selected]) + + return torch.cat(sampled_indicies, dim = 0) +class PointCrossAttention(nn.Module): + def __init__(self, + num_latents: int, + downsample_ratio: float, + pc_size: int, + pc_sharpedge_size: int, + point_feats: int, + width: int, + heads: int, + layers: int, + fourier_embedder, + normal_pe: bool = False, + qkv_bias: bool = False, + use_ln_post: bool = True, + qk_norm: bool = True): + + super().__init__() + + self.fourier_embedder = fourier_embedder + + self.pc_size = pc_size + self.normal_pe = normal_pe + self.downsample_ratio = downsample_ratio + self.pc_sharpedge_size = pc_sharpedge_size + self.num_latents = num_latents + self.point_feats = point_feats + + self.input_proj = nn.Linear(self.fourier_embedder.out_dim + point_feats, width) + + self.cross_attn = ResidualCrossAttentionBlock( + width = width, + heads = heads, + qkv_bias = qkv_bias, + qk_norm = qk_norm + ) + + self.self_attn = None + if layers > 0: + self.self_attn = Transformer( + width = width, + heads = heads, + qkv_bias = qkv_bias, + qk_norm = qk_norm, + layers = layers + ) + + if use_ln_post: + self.ln_post = nn.LayerNorm(width) + else: + self.ln_post = None + + def sample_points_and_latents(self, point_cloud: torch.Tensor, features: torch.Tensor): + + """ + Subsample points randomly from the point cloud (input_pc) + Further sample the subsampled points to get query_pc + take the fourier embeddings for both input and query pc + + Mental Note: FPS-sampled points (query_pc) act as latent tokens that attend to and learn from the broader context in input_pc. + Goal: get a smaller represenation (query_pc) to represent the entire scence structure by learning from a broader subset (input_pc). + More computationally efficient. + + Features are additional information for each point in the cloud + """ + + B, _, D = point_cloud.shape + + num_latents = int(self.num_latents) + + num_random_query = self.pc_size / (self.pc_size + self.pc_sharpedge_size) * num_latents + num_sharpedge_query = num_latents - num_random_query + + # Split random and sharpedge surface points + random_pc, sharpedge_pc = torch.split(point_cloud, [self.pc_size, self.pc_sharpedge_size], dim=1) + + # assert statements + assert random_pc.shape[1] <= self.pc_size, "Random surface points size must be less than or equal to pc_size" + assert sharpedge_pc.shape[1] <= self.pc_sharpedge_size, "Sharpedge surface points size must be less than or equal to pc_sharpedge_size" + + input_random_pc_size = int(num_random_query * self.downsample_ratio) + random_query_pc, random_input_pc, random_idx_pc, random_idx_query = \ + self.subsample(pc = random_pc, num_query = num_random_query, input_pc_size = input_random_pc_size) + + input_sharpedge_pc_size = int(num_sharpedge_query * self.downsample_ratio) + + if input_sharpedge_pc_size == 0: + sharpedge_input_pc = torch.zeros(B, 0, D, dtype = random_input_pc.dtype).to(point_cloud.device) + sharpedge_query_pc = torch.zeros(B, 0, D, dtype= random_query_pc.dtype).to(point_cloud.device) + + else: + sharpedge_query_pc, sharpedge_input_pc, sharpedge_idx_pc, sharpedge_idx_query = \ + self.subsample(pc = sharpedge_pc, num_query = num_sharpedge_query, input_pc_size = input_sharpedge_pc_size) + + # concat the random and sharpedges + query_pc = torch.cat([random_query_pc, sharpedge_query_pc], dim = 1) + input_pc = torch.cat([random_input_pc, sharpedge_input_pc], dim = 1) + + query = self.fourier_embedder(query_pc) + data = self.fourier_embedder(input_pc) + + if self.point_feats > 0: + random_surface_features, sharpedge_surface_features = torch.split(features, [self.pc_size, self.pc_sharpedge_size], dim = 1) + + input_random_surface_features, query_random_features = \ + self.handle_features(features = random_surface_features, idx_pc = random_idx_pc, batch_size = B, + input_pc_size = input_random_pc_size, idx_query = random_idx_query) + + if input_sharpedge_pc_size == 0: + input_sharpedge_surface_features = torch.zeros(B, 0, self.point_feats, + dtype = input_random_surface_features.dtype, device = point_cloud.device) + + query_sharpedge_features = torch.zeros(B, 0, self.point_feats, + dtype = query_random_features.dtype, device = point_cloud.device) + else: + + input_sharpedge_surface_features, query_sharpedge_features = \ + self.handle_features(idx_pc = sharpedge_idx_pc, features = sharpedge_surface_features, + batch_size = B, idx_query = sharpedge_idx_query, input_pc_size = input_sharpedge_pc_size) + + query_features = torch.cat([query_random_features, query_sharpedge_features], dim = 1) + input_features = torch.cat([input_random_surface_features, input_sharpedge_surface_features], dim = 1) + + if self.normal_pe: + # apply the fourier embeddings on the first 3 dims (xyz) + input_features_pe = self.fourier_embedder(input_features[..., :3]) + query_features_pe = self.fourier_embedder(query_features[..., :3]) + # replace the first 3 dims with the new PE ones + input_features = torch.cat([input_features_pe, input_features[..., :3]], dim = -1) + query_features = torch.cat([query_features_pe, query_features[..., :3]], dim = -1) + + # concat at the channels dim + query = torch.cat([query, query_features], dim = -1) + data = torch.cat([data, input_features], dim = -1) + + # don't return pc_info to avoid unnecessary memory usuage + return query.view(B, -1, query.shape[-1]), data.view(B, -1, data.shape[-1]) + + def forward(self, point_cloud: torch.Tensor, features: torch.Tensor): + + query, data = self.sample_points_and_latents(point_cloud = point_cloud, features = features) + + # apply projections + query = self.input_proj(query) + data = self.input_proj(data) + + # apply cross attention between query and data + latents = self.cross_attn(query, data) + + if self.self_attn is not None: + latents = self.self_attn(latents) + + if self.ln_post is not None: + latents = self.ln_post(latents) + + return latents -class VanillaVolumeDecoder: + def subsample(self, pc, num_query, input_pc_size: int): + + """ + num_query: number of points to keep after FPS + input_pc_size: number of points to select before FPS + """ + + B, _, D = pc.shape + query_ratio = num_query / input_pc_size + + # random subsampling of points inside the point cloud + idx_pc = torch.randperm(pc.shape[1], device = pc.device)[:input_pc_size] + input_pc = pc[:, idx_pc, :] + + # flatten to allow applying fps across the whole batch + flattent_input_pc = input_pc.view(B * input_pc_size, D) + + # construct a batch_down tensor to tell fps + # which points belong to which batch + N_down = int(flattent_input_pc.shape[0] / B) + batch_down = torch.arange(B).to(pc.device) + batch_down = torch.repeat_interleave(batch_down, N_down) + + idx_query = fps(flattent_input_pc, batch_down, sampling_ratio = query_ratio) + query_pc = flattent_input_pc[idx_query].view(B, -1, D) + + return query_pc, input_pc, idx_pc, idx_query + + def handle_features(self, features, idx_pc, input_pc_size, batch_size: int, idx_query): + + B = batch_size + + input_surface_features = features[:, idx_pc, :] + flattent_input_features = input_surface_features.view(B * input_pc_size, -1) + query_features = flattent_input_features[idx_query].view(B, -1, + flattent_input_features.shape[-1]) + + return input_surface_features, query_features + +def normalize_mesh(mesh, scale = 0.9999): + """Normalize mesh to fit in [-scale, scale]. Translate mesh so its center is [0,0,0]""" + + bbox = mesh.bounds + center = (bbox[1] + bbox[0]) / 2 + + max_extent = (bbox[1] - bbox[0]).max() + mesh.apply_translation(-center) + mesh.apply_scale((2 * scale) / max_extent) + + return mesh + +def sample_pointcloud(mesh, num = 200000): + """ Uniformly sample points from the surface of the mesh """ + + points, face_idx = mesh.sample(num, return_index = True) + normals = mesh.face_normals[face_idx] + return torch.from_numpy(points.astype(np.float32)), torch.from_numpy(normals.astype(np.float32)) + +def detect_sharp_edges(mesh, threshold=0.985): + """Return edge indices (a, b) that lie on sharp boundaries of the mesh.""" + + V, F = mesh.vertices, mesh.faces + VN, FN = mesh.vertex_normals, mesh.face_normals + + sharp_mask = np.ones(V.shape[0]) + for i in range(3): + indices = F[:, i] + alignment = np.einsum('ij,ij->i', VN[indices], FN) + dot_stack = np.stack((sharp_mask[indices], alignment), axis=-1) + sharp_mask[indices] = np.min(dot_stack, axis=-1) + + edge_a = np.concatenate([F[:, 0], F[:, 1], F[:, 2]]) + edge_b = np.concatenate([F[:, 1], F[:, 2], F[:, 0]]) + sharp_edges = (sharp_mask[edge_a] < threshold) & (sharp_mask[edge_b] < threshold) + + return edge_a[sharp_edges], edge_b[sharp_edges] + + +def sharp_sample_pointcloud(mesh, num = 16384): + """ Sample points preferentially from sharp edges in the mesh. """ + + edge_a, edge_b = detect_sharp_edges(mesh) + V, VN = mesh.vertices, mesh.vertex_normals + + va, vb = V[edge_a], V[edge_b] + na, nb = VN[edge_a], VN[edge_b] + + edge_lengths = np.linalg.norm(vb - va, axis=-1) + weights = edge_lengths / edge_lengths.sum() + + indices = np.searchsorted(np.cumsum(weights), np.random.rand(num)) + t = np.random.rand(num, 1) + + samples = t * va[indices] + (1 - t) * vb[indices] + normals = t * na[indices] + (1 - t) * nb[indices] + + return samples.astype(np.float32), normals.astype(np.float32) + +def load_surface_sharpedge(mesh, num_points=4096, num_sharp_points=4096, sharpedge_flag = True, device = "cuda"): + """Load a surface with optional sharp-edge annotations from a trimesh mesh.""" + + import trimesh + + try: + mesh_full = trimesh.util.concatenate(mesh.dump()) + except Exception: + mesh_full = trimesh.util.concatenate(mesh) + + mesh_full = normalize_mesh(mesh_full) + + faces = mesh_full.faces + vertices = mesh_full.vertices + origin_face_count = faces.shape[0] + + mesh_surface = trimesh.Trimesh(vertices=vertices, faces=faces[:origin_face_count]) + mesh_fill = trimesh.Trimesh(vertices=vertices, faces=faces[origin_face_count:]) + + area_surface = mesh_surface.area + area_fill = mesh_fill.area + total_area = area_surface + area_fill + + sample_num = 499712 // 2 + fill_ratio = area_fill / total_area if total_area > 0 else 0 + + num_fill = int(sample_num * fill_ratio) + num_surface = sample_num - num_fill + + surf_pts, surf_normals = sample_pointcloud(mesh_surface, num_surface) + fill_pts, fill_normals = (torch.zeros(0, 3), torch.zeros(0, 3)) if num_fill == 0 else sample_pointcloud(mesh_fill, num_fill) + + sharp_pts, sharp_normals = sharp_sample_pointcloud(mesh_surface, sample_num) + + def assemble_tensor(points, normals, label=None): + + data = torch.cat([points, normals], dim=1).half().to(device) + + if label is not None: + label_tensor = torch.full((data.shape[0], 1), float(label), dtype=torch.float16).to(device) + data = torch.cat([data, label_tensor], dim=1) + + return data + + surface = assemble_tensor(torch.cat([surf_pts.to(device), fill_pts.to(device)], dim=0), + torch.cat([surf_normals.to(device), fill_normals.to(device)], dim=0), + label = 0 if sharpedge_flag else None) + + sharp_surface = assemble_tensor(torch.from_numpy(sharp_pts), torch.from_numpy(sharp_normals), + label = 1 if sharpedge_flag else None) + + rng = np.random.default_rng() + + surface = surface[rng.choice(surface.shape[0], num_points, replace = False)] + sharp_surface = sharp_surface[rng.choice(sharp_surface.shape[0], num_sharp_points, replace = False)] + + full = torch.cat([surface, sharp_surface], dim = 0).unsqueeze(0) + + return full + +class SharpEdgeSurfaceLoader: + """ Load mesh surface and sharp edge samples. """ + + def __init__(self, num_uniform_points = 8192, num_sharp_points = 8192): + + self.num_uniform_points = num_uniform_points + self.num_sharp_points = num_sharp_points + self.total_points = num_uniform_points + num_sharp_points + + def __call__(self, mesh_input, device = "cuda"): + mesh = self._load_mesh(mesh_input) + return load_surface_sharpedge(mesh, self.num_uniform_points, self.num_sharp_points, device = device) + + @staticmethod + def _load_mesh(mesh_input): + import trimesh + + if isinstance(mesh_input, str): + mesh = trimesh.load(mesh_input, force="mesh", merge_primitives = True) + else: + mesh = mesh_input + + if isinstance(mesh, trimesh.Scene): + combined = None + for obj in mesh.geometry.values(): + combined = obj if combined is None else combined + obj + return combined + + return mesh + +class DiagonalGaussianDistribution: + def __init__(self, params: torch.Tensor, feature_dim: int = -1): + + # divide quant channels (8) into mean and log variance + self.mean, self.logvar = torch.chunk(params, 2, dim = feature_dim) + + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.std = torch.exp(0.5 * self.logvar) + + def sample(self): + + eps = torch.randn_like(self.std) + z = self.mean + eps * self.std + + return z + +################################################ +# Volume Decoder +################################################ + +class VanillaVolumeDecoder(): @torch.no_grad() - def __call__( - self, - latents: torch.FloatTensor, - geo_decoder: Callable, - bounds: Union[Tuple[float], List[float], float] = 1.01, - num_chunks: int = 10000, - octree_resolution: int = None, - enable_pbar: bool = True, - **kwargs, - ): - device = latents.device - dtype = latents.dtype - batch_size = latents.shape[0] + def __call__(self, latents: torch.Tensor, geo_decoder: callable, octree_resolution: int, bounds = 1.01, + num_chunks: int = 10_000, enable_pbar: bool = True, **kwargs): - # 1. generate query points if isinstance(bounds, float): bounds = [-bounds, -bounds, -bounds, bounds, bounds, bounds] - bbox_min, bbox_max = np.array(bounds[0:3]), np.array(bounds[3:6]) - xyz_samples, grid_size, length = generate_dense_grid_points( - bbox_min=bbox_min, - bbox_max=bbox_max, - octree_resolution=octree_resolution, - indexing="ij" - ) - xyz_samples = torch.from_numpy(xyz_samples).to(device, dtype=dtype).contiguous().reshape(-1, 3) + bbox_min, bbox_max = torch.tensor(bounds[:3]), torch.tensor(bounds[3:]) + + x = torch.linspace(bbox_min[0], bbox_max[0], int(octree_resolution) + 1, dtype = torch.float32) + y = torch.linspace(bbox_min[1], bbox_max[1], int(octree_resolution) + 1, dtype = torch.float32) + z = torch.linspace(bbox_min[2], bbox_max[2], int(octree_resolution) + 1, dtype = torch.float32) + + [xs, ys, zs] = torch.meshgrid(x, y, z, indexing = "ij") + xyz = torch.stack((xs, ys, zs), axis=-1).to(latents.device, dtype = latents.dtype).contiguous().reshape(-1, 3) + grid_size = [int(octree_resolution) + 1, int(octree_resolution) + 1, int(octree_resolution) + 1] - # 2. latents to 3d volume batch_logits = [] - for start in tqdm(range(0, xyz_samples.shape[0], num_chunks), desc="Volume Decoding", + for start in tqdm(range(0, xyz.shape[0], num_chunks), desc="Volume Decoding", disable=not enable_pbar): - chunk_queries = xyz_samples[start: start + num_chunks, :] - chunk_queries = repeat(chunk_queries, "p c -> b p c", b=batch_size) - logits = geo_decoder(queries=chunk_queries, latents=latents) + + chunk_queries = xyz[start: start + num_chunks, :] + chunk_queries = chunk_queries.unsqueeze(0).repeat(latents.shape[0], 1, 1) + logits = geo_decoder(queries = chunk_queries, latents = latents) batch_logits.append(logits) - grid_logits = torch.cat(batch_logits, dim=1) - grid_logits = grid_logits.view((batch_size, *grid_size)).float() + grid_logits = torch.cat(batch_logits, dim = 1) + grid_logits = grid_logits.view((latents.shape[0], *grid_size)).float() return grid_logits - class FourierEmbedder(nn.Module): """The sin/cosine positional embedding. Given an input tensor `x` of shape [n_batch, ..., c_dim], it converts each feature dimension of `x[..., i]` into: @@ -175,13 +552,11 @@ class FourierEmbedder(nn.Module): else: return x - class CrossAttentionProcessor: def __call__(self, attn, q, k, v): out = comfy.ops.scaled_dot_product_attention(q, k, v) return out - class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ @@ -232,38 +607,41 @@ class MLP(nn.Module): def forward(self, x): return self.drop_path(self.c_proj(self.gelu(self.c_fc(x)))) - class QKVMultiheadCrossAttention(nn.Module): def __init__( self, - *, heads: int, + n_data = None, width=None, qk_norm=False, norm_layer=ops.LayerNorm ): super().__init__() self.heads = heads + self.n_data = n_data self.q_norm = norm_layer(width // heads, elementwise_affine=True, eps=1e-6) if qk_norm else nn.Identity() self.k_norm = norm_layer(width // heads, elementwise_affine=True, eps=1e-6) if qk_norm else nn.Identity() - self.attn_processor = CrossAttentionProcessor() - def forward(self, q, kv): + _, n_ctx, _ = q.shape bs, n_data, width = kv.shape + attn_ch = width // self.heads // 2 q = q.view(bs, n_ctx, self.heads, -1) + kv = kv.view(bs, n_data, self.heads, -1) k, v = torch.split(kv, attn_ch, dim=-1) q = self.q_norm(q) k = self.k_norm(k) - q, k, v = map(lambda t: rearrange(t, 'b n h d -> b h n d', h=self.heads), (q, k, v)) - out = self.attn_processor(self, q, k, v) - out = out.transpose(1, 2).reshape(bs, n_ctx, -1) - return out + q, k, v = [t.permute(0, 2, 1, 3) for t in (q, k, v)] + out = F.scaled_dot_product_attention(q, k, v) + + out = out.transpose(1, 2).reshape(bs, n_ctx, -1) + + return out class MultiheadCrossAttention(nn.Module): def __init__( @@ -306,7 +684,6 @@ class MultiheadCrossAttention(nn.Module): x = self.c_proj(x) return x - class ResidualCrossAttentionBlock(nn.Module): def __init__( self, @@ -366,7 +743,7 @@ class QKVMultiheadAttention(nn.Module): q = self.q_norm(q) k = self.k_norm(k) - q, k, v = map(lambda t: rearrange(t, 'b n h d -> b h n d', h=self.heads), (q, k, v)) + q, k, v = [t.permute(0, 2, 1, 3) for t in (q, k, v)] out = F.scaled_dot_product_attention(q, k, v).transpose(1, 2).reshape(bs, n_ctx, -1) return out @@ -383,8 +760,7 @@ class MultiheadAttention(nn.Module): drop_path_rate: float = 0.0 ): super().__init__() - self.width = width - self.heads = heads + self.c_qkv = ops.Linear(width, width * 3, bias=qkv_bias) self.c_proj = ops.Linear(width, width) self.attention = QKVMultiheadAttention( @@ -491,7 +867,7 @@ class CrossAttentionDecoder(nn.Module): self.query_proj = ops.Linear(self.fourier_embedder.out_dim, width) if self.downsample_ratio != 1: self.latents_proj = ops.Linear(width * downsample_ratio, width) - if self.enable_ln_post == False: + if not self.enable_ln_post: qk_norm = False self.cross_attn_decoder = ResidualCrossAttentionBlock( width=width, @@ -522,28 +898,44 @@ class CrossAttentionDecoder(nn.Module): class ShapeVAE(nn.Module): def __init__( - self, - *, - embed_dim: int, - width: int, - heads: int, - num_decoder_layers: int, - geo_decoder_downsample_ratio: int = 1, - geo_decoder_mlp_expand_ratio: int = 4, - geo_decoder_ln_post: bool = True, - num_freqs: int = 8, - include_pi: bool = True, - qkv_bias: bool = True, - qk_norm: bool = False, - label_type: str = "binary", - drop_path_rate: float = 0.0, - scale_factor: float = 1.0, + self, + *, + num_latents: int = 4096, + embed_dim: int = 64, + width: int = 1024, + heads: int = 16, + num_decoder_layers: int = 16, + num_encoder_layers: int = 8, + pc_size: int = 81920, + pc_sharpedge_size: int = 0, + point_feats: int = 4, + downsample_ratio: int = 20, + geo_decoder_downsample_ratio: int = 1, + geo_decoder_mlp_expand_ratio: int = 4, + geo_decoder_ln_post: bool = True, + num_freqs: int = 8, + qkv_bias: bool = False, + qk_norm: bool = True, + drop_path_rate: float = 0.0, + include_pi: bool = False, + scale_factor: float = 1.0039506158752403, + label_type: str = "binary", ): super().__init__() self.geo_decoder_ln_post = geo_decoder_ln_post self.fourier_embedder = FourierEmbedder(num_freqs=num_freqs, include_pi=include_pi) + self.encoder = PointCrossAttention(layers = num_encoder_layers, + num_latents = num_latents, + downsample_ratio = downsample_ratio, + heads = heads, + pc_size = pc_size, + width = width, + point_feats = point_feats, + fourier_embedder = self.fourier_embedder, + pc_sharpedge_size = pc_sharpedge_size) + self.post_kl = ops.Linear(embed_dim, width) self.transformer = Transformer( @@ -583,5 +975,14 @@ class ShapeVAE(nn.Module): grid_logits = self.volume_decoder(latents, self.geo_decoder, bounds=bounds, num_chunks=num_chunks, octree_resolution=octree_resolution, enable_pbar=enable_pbar) return grid_logits.movedim(-2, -1) - def encode(self, x): - return None + def encode(self, surface): + + pc, feats = surface[:, :, :3], surface[:, :, 3:] + latents = self.encoder(pc, feats) + + moments = self.pre_kl(latents) + posterior = DiagonalGaussianDistribution(moments, feature_dim = -1) + + latents = posterior.sample() + + return latents diff --git a/comfy/ldm/hunyuan3dv2_1/hunyuandit.py b/comfy/ldm/hunyuan3dv2_1/hunyuandit.py new file mode 100644 index 000000000..48575bb3c --- /dev/null +++ b/comfy/ldm/hunyuan3dv2_1/hunyuandit.py @@ -0,0 +1,658 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from comfy.ldm.modules.attention import optimized_attention + +class GELU(nn.Module): + + def __init__(self, dim_in: int, dim_out: int, operations, device, dtype): + super().__init__() + self.proj = operations.Linear(dim_in, dim_out, device = device, dtype = dtype) + + def gelu(self, gate: torch.Tensor) -> torch.Tensor: + + if gate.device.type == "mps": + return F.gelu(gate.to(dtype = torch.float32)).to(dtype = gate.dtype) + + return F.gelu(gate) + + def forward(self, hidden_states): + + hidden_states = self.proj(hidden_states) + hidden_states = self.gelu(hidden_states) + + return hidden_states + +class FeedForward(nn.Module): + + def __init__(self, dim: int, dim_out = None, mult: int = 4, + dropout: float = 0.0, inner_dim = None, operations = None, device = None, dtype = None): + + super().__init__() + if inner_dim is None: + inner_dim = int(dim * mult) + + dim_out = dim_out if dim_out is not None else dim + + act_fn = GELU(dim, inner_dim, operations = operations, device = device, dtype = dtype) + + self.net = nn.ModuleList([]) + self.net.append(act_fn) + + self.net.append(nn.Dropout(dropout)) + self.net.append(operations.Linear(inner_dim, dim_out, device = device, dtype = dtype)) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + for module in self.net: + hidden_states = module(hidden_states) + return hidden_states + +class AddAuxLoss(torch.autograd.Function): + + @staticmethod + def forward(ctx, x, loss): + # do nothing in forward (no computation) + ctx.requires_aux_loss = loss.requires_grad + ctx.dtype = loss.dtype + + return x + + @staticmethod + def backward(ctx, grad_output): + # add the aux loss gradients + grad_loss = None + # put the aux grad the same as the main grad loss + # aux grad contributes equally + if ctx.requires_aux_loss: + grad_loss = torch.ones(1, dtype = ctx.dtype, device = grad_output.device) + + return grad_output, grad_loss + +class MoEGate(nn.Module): + + def __init__(self, embed_dim, num_experts=16, num_experts_per_tok=2, aux_loss_alpha=0.01, device = None, dtype = None): + + super().__init__() + self.top_k = num_experts_per_tok + self.n_routed_experts = num_experts + + self.alpha = aux_loss_alpha + + self.gating_dim = embed_dim + self.weight = nn.Parameter(torch.empty((self.n_routed_experts, self.gating_dim), device = device, dtype = dtype)) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + + # flatten hidden states + hidden_states = hidden_states.view(-1, hidden_states.size(-1)) + + # get logits and pass it to softmax + logits = F.linear(hidden_states, self.weight, bias = None) + scores = logits.softmax(dim = -1) + + topk_weight, topk_idx = torch.topk(scores, k = self.top_k, dim = -1, sorted = False) + + if self.training and self.alpha > 0.0: + scores_for_aux = scores + + # used bincount instead of one hot encoding + counts = torch.bincount(topk_idx.view(-1), minlength = self.n_routed_experts).float() + ce = counts / topk_idx.numel() # normalized expert usage + + # mean expert score + Pi = scores_for_aux.mean(0) + + # expert balance loss + aux_loss = (Pi * ce * self.n_routed_experts).sum() * self.alpha + else: + aux_loss = None + + return topk_idx, topk_weight, aux_loss + +class MoEBlock(nn.Module): + def __init__(self, dim, num_experts: int = 6, moe_top_k: int = 2, dropout: float = 0.0, + ff_inner_dim: int = None, operations = None, device = None, dtype = None): + super().__init__() + + self.moe_top_k = moe_top_k + self.num_experts = num_experts + + self.experts = nn.ModuleList([ + FeedForward(dim, dropout = dropout, inner_dim = ff_inner_dim, operations = operations, device = device, dtype = dtype) + for _ in range(num_experts) + ]) + + self.gate = MoEGate(dim, num_experts = num_experts, num_experts_per_tok = moe_top_k, device = device, dtype = dtype) + self.shared_experts = FeedForward(dim, dropout = dropout, inner_dim = ff_inner_dim, operations = operations, device = device, dtype = dtype) + + def forward(self, hidden_states) -> torch.Tensor: + + identity = hidden_states + orig_shape = hidden_states.shape + topk_idx, topk_weight, aux_loss = self.gate(hidden_states) + + hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) + flat_topk_idx = topk_idx.view(-1) + + if self.training: + + hidden_states = hidden_states.repeat_interleave(self.moe_top_k, dim = 0) + y = torch.empty_like(hidden_states, dtype = hidden_states.dtype) + + for i, expert in enumerate(self.experts): + tmp = expert(hidden_states[flat_topk_idx == i]) + y[flat_topk_idx == i] = tmp.to(hidden_states.dtype) + + y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim = 1) + y = y.view(*orig_shape) + + y = AddAuxLoss.apply(y, aux_loss) + else: + y = self.moe_infer(hidden_states, flat_expert_indices = flat_topk_idx,flat_expert_weights = topk_weight.view(-1, 1)).view(*orig_shape) + + y = y + self.shared_experts(identity) + + return y + + @torch.no_grad() + def moe_infer(self, x, flat_expert_indices, flat_expert_weights): + + expert_cache = torch.zeros_like(x) + idxs = flat_expert_indices.argsort() + + # no need for .numpy().cpu() here + tokens_per_expert = flat_expert_indices.bincount().cumsum(0) + token_idxs = idxs // self.moe_top_k + + for i, end_idx in enumerate(tokens_per_expert): + + start_idx = 0 if i == 0 else tokens_per_expert[i-1] + + if start_idx == end_idx: + continue + + expert = self.experts[i] + exp_token_idx = token_idxs[start_idx:end_idx] + + expert_tokens = x[exp_token_idx] + expert_out = expert(expert_tokens) + + expert_out.mul_(flat_expert_weights[idxs[start_idx:end_idx]]) + + # use index_add_ with a 1-D index tensor directly avoids building a large [N, D] index map and extra memcopy required by scatter_reduce_ + # + avoid dtype conversion + expert_cache.index_add_(0, exp_token_idx, expert_out) + + return expert_cache + +class Timesteps(nn.Module): + def __init__(self, num_channels: int, downscale_freq_shift: float = 0.0, + scale: float = 1.0, max_period: int = 10000): + super().__init__() + + self.num_channels = num_channels + half_dim = num_channels // 2 + + # precompute the “inv_freq” vector once + exponent = -math.log(max_period) * torch.arange( + half_dim, dtype=torch.float32 + ) / (half_dim - downscale_freq_shift) + + inv_freq = torch.exp(exponent) + + # pad + if num_channels % 2 == 1: + # we’ll pad a zero at the end of the cos-half + inv_freq = torch.cat([inv_freq, inv_freq.new_zeros(1)]) + + # register to buffer so it moves with the device + self.register_buffer("inv_freq", inv_freq, persistent = False) + self.scale = scale + + def forward(self, timesteps: torch.Tensor): + + x = timesteps.float().unsqueeze(1) * self.inv_freq.to(timesteps.device).unsqueeze(0) + + + # fused CUDA kernels for sin and cos + sin_emb = x.sin() + cos_emb = x.cos() + + emb = torch.cat([sin_emb, cos_emb], dim = 1) + + # scale factor + if self.scale != 1.0: + emb = emb * self.scale + + # If we padded inv_freq for odd, emb is already wide enough; otherwise: + if emb.shape[1] > self.num_channels: + emb = emb[:, :self.num_channels] + + return emb + +class TimestepEmbedder(nn.Module): + def __init__(self, hidden_size, frequency_embedding_size = 256, cond_proj_dim = None, operations = None, device = None, dtype = None): + super().__init__() + + self.mlp = nn.Sequential( + operations.Linear(hidden_size, frequency_embedding_size, bias=True, device = device, dtype = dtype), + nn.GELU(), + operations.Linear(frequency_embedding_size, hidden_size, bias=True, device = device, dtype = dtype), + ) + self.frequency_embedding_size = frequency_embedding_size + + if cond_proj_dim is not None: + self.cond_proj = operations.Linear(cond_proj_dim, frequency_embedding_size, bias=False, device = device, dtype = dtype) + + self.time_embed = Timesteps(hidden_size) + + def forward(self, timesteps, condition): + + timestep_embed = self.time_embed(timesteps).type(self.mlp[0].weight.dtype) + + if condition is not None: + cond_embed = self.cond_proj(condition) + timestep_embed = timestep_embed + cond_embed + + time_conditioned = self.mlp(timestep_embed.to(self.mlp[0].weight.device)) + + # for broadcasting with image tokens + return time_conditioned.unsqueeze(1) + +class MLP(nn.Module): + def __init__(self, *, width: int, operations = None, device = None, dtype = None): + super().__init__() + self.width = width + self.fc1 = operations.Linear(width, width * 4, device = device, dtype = dtype) + self.fc2 = operations.Linear(width * 4, width, device = device, dtype = dtype) + self.gelu = nn.GELU() + + def forward(self, x): + return self.fc2(self.gelu(self.fc1(x))) + +class CrossAttention(nn.Module): + def __init__( + self, + qdim, + kdim, + num_heads, + qkv_bias=True, + qk_norm=False, + norm_layer=nn.LayerNorm, + use_fp16: bool = False, + operations = None, + dtype = None, + device = None, + **kwargs, + ): + super().__init__() + self.qdim = qdim + self.kdim = kdim + + self.num_heads = num_heads + self.head_dim = self.qdim // num_heads + + self.scale = self.head_dim ** -0.5 + + self.to_q = operations.Linear(qdim, qdim, bias=qkv_bias, device = device, dtype = dtype) + self.to_k = operations.Linear(kdim, qdim, bias=qkv_bias, device = device, dtype = dtype) + self.to_v = operations.Linear(kdim, qdim, bias=qkv_bias, device = device, dtype = dtype) + + if use_fp16: + eps = 1.0 / 65504 + else: + eps = 1e-6 + + if norm_layer == nn.LayerNorm: + norm_layer = operations.LayerNorm + else: + norm_layer = operations.RMSNorm + + self.q_norm = norm_layer(self.head_dim, elementwise_affine=True, eps = eps, device = device, dtype = dtype) if qk_norm else nn.Identity() + self.k_norm = norm_layer(self.head_dim, elementwise_affine=True, eps = eps, device = device, dtype = dtype) if qk_norm else nn.Identity() + self.out_proj = operations.Linear(qdim, qdim, bias=True, device = device, dtype = dtype) + + def forward(self, x, y): + + b, s1, _ = x.shape + _, s2, _ = y.shape + + y = y.to(next(self.to_k.parameters()).dtype) + + q = self.to_q(x) + k = self.to_k(y) + v = self.to_v(y) + + kv = torch.cat((k, v), dim=-1) + split_size = kv.shape[-1] // self.num_heads // 2 + + kv = kv.view(1, -1, self.num_heads, split_size * 2) + k, v = torch.split(kv, split_size, dim=-1) + + q = q.view(b, s1, self.num_heads, self.head_dim) + k = k.view(b, s2, self.num_heads, self.head_dim) + v = v.reshape(b, s2, self.num_heads * self.head_dim) + + q = self.q_norm(q) + k = self.k_norm(k) + + x = optimized_attention( + q.reshape(b, s1, self.num_heads * self.head_dim), + k.reshape(b, s2, self.num_heads * self.head_dim), + v, + heads=self.num_heads, + ) + + out = self.out_proj(x) + + return out + +class Attention(nn.Module): + + def __init__( + self, + dim, + num_heads, + qkv_bias = True, + qk_norm = False, + norm_layer = nn.LayerNorm, + use_fp16: bool = False, + operations = None, + device = None, + dtype = None + ): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.head_dim = self.dim // num_heads + self.scale = self.head_dim ** -0.5 + + self.to_q = operations.Linear(dim, dim, bias = qkv_bias, device = device, dtype = dtype) + self.to_k = operations.Linear(dim, dim, bias = qkv_bias, device = device, dtype = dtype) + self.to_v = operations.Linear(dim, dim, bias = qkv_bias, device = device, dtype = dtype) + + if use_fp16: + eps = 1.0 / 65504 + else: + eps = 1e-6 + + if norm_layer == nn.LayerNorm: + norm_layer = operations.LayerNorm + else: + norm_layer = operations.RMSNorm + + self.q_norm = norm_layer(self.head_dim, elementwise_affine=True, eps = eps, device = device, dtype = dtype) if qk_norm else nn.Identity() + self.k_norm = norm_layer(self.head_dim, elementwise_affine=True, eps = eps, device = device, dtype = dtype) if qk_norm else nn.Identity() + self.out_proj = operations.Linear(dim, dim, device = device, dtype = dtype) + + def forward(self, x): + B, N, _ = x.shape + + query = self.to_q(x) + key = self.to_k(x) + value = self.to_v(x) + + qkv_combined = torch.cat((query, key, value), dim=-1) + split_size = qkv_combined.shape[-1] // self.num_heads // 3 + + qkv = qkv_combined.view(1, -1, self.num_heads, split_size * 3) + query, key, value = torch.split(qkv, split_size, dim=-1) + + query = query.reshape(B, N, self.num_heads, self.head_dim) + key = key.reshape(B, N, self.num_heads, self.head_dim) + value = value.reshape(B, N, self.num_heads * self.head_dim) + + query = self.q_norm(query) + key = self.k_norm(key) + + x = optimized_attention( + query.reshape(B, N, self.num_heads * self.head_dim), + key.reshape(B, N, self.num_heads * self.head_dim), + value, + heads=self.num_heads, + ) + + x = self.out_proj(x) + return x + +class HunYuanDiTBlock(nn.Module): + def __init__( + self, + hidden_size, + c_emb_size, + num_heads, + text_states_dim=1024, + qk_norm=False, + norm_layer=nn.LayerNorm, + qk_norm_layer=nn.RMSNorm, + qkv_bias=True, + skip_connection=True, + timested_modulate=False, + use_moe: bool = False, + num_experts: int = 8, + moe_top_k: int = 2, + use_fp16: bool = False, + operations = None, + device = None, dtype = None + ): + super().__init__() + + # eps can't be 1e-6 in fp16 mode because of numerical stability issues + if use_fp16: + eps = 1.0 / 65504 + else: + eps = 1e-6 + + self.norm1 = norm_layer(hidden_size, elementwise_affine = True, eps = eps, device = device, dtype = dtype) + + self.attn1 = Attention(hidden_size, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, + norm_layer=qk_norm_layer, use_fp16 = use_fp16, device = device, dtype = dtype, operations = operations) + + self.norm2 = norm_layer(hidden_size, elementwise_affine = True, eps = eps, device = device, dtype = dtype) + + self.timested_modulate = timested_modulate + if self.timested_modulate: + self.default_modulation = nn.Sequential( + nn.SiLU(), + operations.Linear(c_emb_size, hidden_size, bias=True, device = device, dtype = dtype) + ) + + self.attn2 = CrossAttention(hidden_size, text_states_dim, num_heads=num_heads, qkv_bias=qkv_bias, + qk_norm=qk_norm, norm_layer=qk_norm_layer, use_fp16 = use_fp16, + device = device, dtype = dtype, operations = operations) + + self.norm3 = norm_layer(hidden_size, elementwise_affine = True, eps = eps, device = device, dtype = dtype) + + if skip_connection: + self.skip_norm = norm_layer(hidden_size, elementwise_affine = True, eps = eps, device = device, dtype = dtype) + self.skip_linear = operations.Linear(2 * hidden_size, hidden_size, device = device, dtype = dtype) + else: + self.skip_linear = None + + self.use_moe = use_moe + + if self.use_moe: + self.moe = MoEBlock( + hidden_size, + num_experts = num_experts, + moe_top_k = moe_top_k, + dropout = 0.0, + ff_inner_dim = int(hidden_size * 4.0), + device = device, dtype = dtype, + operations = operations + ) + else: + self.mlp = MLP(width=hidden_size, operations=operations, device = device, dtype = dtype) + + def forward(self, hidden_states, conditioning=None, text_states=None, skip_tensor=None): + + if self.skip_linear is not None: + combined = torch.cat([skip_tensor, hidden_states], dim=-1) + hidden_states = self.skip_linear(combined) + hidden_states = self.skip_norm(hidden_states) + + # self attention + if self.timested_modulate: + modulation_shift = self.default_modulation(conditioning).unsqueeze(dim=1) + hidden_states = hidden_states + modulation_shift + + self_attn_out = self.attn1(self.norm1(hidden_states)) + hidden_states = hidden_states + self_attn_out + + # cross attention + hidden_states = hidden_states + self.attn2(self.norm2(hidden_states), text_states) + + # MLP Layer + mlp_input = self.norm3(hidden_states) + + if self.use_moe: + hidden_states = hidden_states + self.moe(mlp_input) + else: + hidden_states = hidden_states + self.mlp(mlp_input) + + return hidden_states + +class FinalLayer(nn.Module): + + def __init__(self, final_hidden_size, out_channels, operations, use_fp16: bool = False, device = None, dtype = None): + super().__init__() + + if use_fp16: + eps = 1.0 / 65504 + else: + eps = 1e-6 + + self.norm_final = operations.LayerNorm(final_hidden_size, elementwise_affine = True, eps = eps, device = device, dtype = dtype) + self.linear = operations.Linear(final_hidden_size, out_channels, bias = True, device = device, dtype = dtype) + + def forward(self, x): + x = self.norm_final(x) + x = x[:, 1:] + x = self.linear(x) + return x + +class HunYuanDiTPlain(nn.Module): + + # init with the defaults values from https://huggingface.co/tencent/Hunyuan3D-2.1/blob/main/hunyuan3d-dit-v2-1/config.yaml + def __init__( + self, + in_channels: int = 64, + hidden_size: int = 2048, + context_dim: int = 1024, + depth: int = 21, + num_heads: int = 16, + qk_norm: bool = True, + qkv_bias: bool = False, + num_moe_layers: int = 6, + guidance_cond_proj_dim = 2048, + norm_type = 'layer', + num_experts: int = 8, + moe_top_k: int = 2, + use_fp16: bool = False, + dtype = None, + device = None, + operations = None, + **kwargs + ): + + self.dtype = dtype + + super().__init__() + + self.depth = depth + + self.in_channels = in_channels + self.out_channels = in_channels + + self.num_heads = num_heads + self.hidden_size = hidden_size + + norm = operations.LayerNorm if norm_type == 'layer' else operations.RMSNorm + qk_norm = operations.RMSNorm + + self.context_dim = context_dim + self.guidance_cond_proj_dim = guidance_cond_proj_dim + + self.x_embedder = operations.Linear(in_channels, hidden_size, bias = True, device = device, dtype = dtype) + self.t_embedder = TimestepEmbedder(hidden_size, hidden_size * 4, cond_proj_dim = guidance_cond_proj_dim, device = device, dtype = dtype, operations = operations) + + + # HUnYuanDiT Blocks + self.blocks = nn.ModuleList([ + HunYuanDiTBlock(hidden_size=hidden_size, + c_emb_size=hidden_size, + num_heads=num_heads, + text_states_dim=context_dim, + qk_norm=qk_norm, + norm_layer = norm, + qk_norm_layer = qk_norm, + skip_connection=layer > depth // 2, + qkv_bias=qkv_bias, + use_moe=True if depth - layer <= num_moe_layers else False, + num_experts=num_experts, + moe_top_k=moe_top_k, + use_fp16 = use_fp16, + device = device, dtype = dtype, operations = operations) + for layer in range(depth) + ]) + + self.depth = depth + + self.final_layer = FinalLayer(hidden_size, self.out_channels, use_fp16 = use_fp16, operations = operations, device = device, dtype = dtype) + + def forward(self, x, t, context, transformer_options = {}, **kwargs): + + x = x.movedim(-1, -2) + uncond_emb, cond_emb = context.chunk(2, dim = 0) + + context = torch.cat([cond_emb, uncond_emb], dim = 0) + main_condition = context + + t = 1.0 - t + + time_embedded = self.t_embedder(t, condition = kwargs.get('guidance_cond')) + + x = x.to(dtype = next(self.x_embedder.parameters()).dtype) + x_embedded = self.x_embedder(x) + + combined = torch.cat([time_embedded, x_embedded], dim=1) + + def block_wrap(args): + return block( + args["x"], + args["t"], + args["cond"], + skip_tensor=args.get("skip"),) + + skip_stack = [] + patches_replace = transformer_options.get("patches_replace", {}) + blocks_replace = patches_replace.get("dit", {}) + for idx, block in enumerate(self.blocks): + if idx <= self.depth // 2: + skip_input = None + else: + skip_input = skip_stack.pop() + + if ("block", idx) in blocks_replace: + + combined = blocks_replace[("block", idx)]( + { + "x": combined, + "t": time_embedded, + "cond": main_condition, + "skip": skip_input, + }, + {"original_block": block_wrap}, + ) + else: + combined = block(combined, time_embedded, main_condition, skip_tensor=skip_input) + + if idx < self.depth // 2: + skip_stack.append(combined) + + output = self.final_layer(combined) + output = output.movedim(-2, -1) * (-1.0) + + cond_emb, uncond_emb = output.chunk(2, dim = 0) + return torch.cat([uncond_emb, cond_emb]) diff --git a/comfy/model_base.py b/comfy/model_base.py index 56a6798be..39a3344bc 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -16,6 +16,8 @@ along with this program. If not, see . """ +import comfy.ldm.hunyuan3dv2_1 +import comfy.ldm.hunyuan3dv2_1.hunyuandit import torch import logging from comfy.ldm.modules.diffusionmodules.openaimodel import UNetModel, Timestep @@ -1282,6 +1284,21 @@ class Hunyuan3Dv2(BaseModel): out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance])) return out +class Hunyuan3Dv2_1(BaseModel): + def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.hunyuan3dv2_1.hunyuandit.HunYuanDiTPlain) + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + + guidance = kwargs.get("guidance", 5.0) + if guidance is not None: + out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance])) + return out + class HiDream(BaseModel): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.hidream.model.HiDreamImageTransformer2DModel) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 9f3ab64df..75552ede9 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -400,6 +400,20 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["guidance_embed"] = "{}guidance_in.in_layer.weight".format(key_prefix) in state_dict_keys return dit_config + if f"{key_prefix}t_embedder.mlp.2.weight" in state_dict_keys: # Hunyuan 3D 2.1 + + dit_config = {} + dit_config["image_model"] = "hunyuan3d2_1" + dit_config["in_channels"] = state_dict[f"{key_prefix}x_embedder.weight"].shape[1] + dit_config["context_dim"] = 1024 + dit_config["hidden_size"] = state_dict[f"{key_prefix}x_embedder.weight"].shape[0] + dit_config["mlp_ratio"] = 4.0 + dit_config["num_heads"] = 16 + dit_config["depth"] = count_blocks(state_dict_keys, f"{key_prefix}blocks.{{}}") + dit_config["qkv_bias"] = False + dit_config["guidance_cond_proj_dim"] = None#f"{key_prefix}t_embedder.cond_proj.weight" in state_dict_keys + return dit_config + if '{}caption_projection.0.linear.weight'.format(key_prefix) in state_dict_keys: # HiDream dit_config = {} dit_config["image_model"] = "hidream" diff --git a/comfy/sd.py b/comfy/sd.py index bb5d61fb3..014f797ca 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -446,17 +446,29 @@ class VAE: self.working_dtypes = [torch.bfloat16, torch.float16, torch.float32] self.memory_used_encode = lambda shape, dtype: 6000 * shape[3] * shape[4] * model_management.dtype_size(dtype) self.memory_used_decode = lambda shape, dtype: 7000 * shape[3] * shape[4] * (8 * 8) * model_management.dtype_size(dtype) + # Hunyuan 3d v2 2.0 & 2.1 elif "geo_decoder.cross_attn_decoder.ln_1.bias" in sd: + self.latent_dim = 1 - ln_post = "geo_decoder.ln_post.weight" in sd - inner_size = sd["geo_decoder.output_proj.weight"].shape[1] - downsample_ratio = sd["post_kl.weight"].shape[0] // inner_size - mlp_expand = sd["geo_decoder.cross_attn_decoder.mlp.c_fc.weight"].shape[0] // inner_size - self.memory_used_encode = lambda shape, dtype: (1000 * shape[2]) * model_management.dtype_size(dtype) # TODO - self.memory_used_decode = lambda shape, dtype: (1024 * 1024 * 1024 * 2.0) * model_management.dtype_size(dtype) # TODO - ddconfig = {"embed_dim": 64, "num_freqs": 8, "include_pi": False, "heads": 16, "width": 1024, "num_decoder_layers": 16, "qkv_bias": False, "qk_norm": True, "geo_decoder_mlp_expand_ratio": mlp_expand, "geo_decoder_downsample_ratio": downsample_ratio, "geo_decoder_ln_post": ln_post} - self.first_stage_model = comfy.ldm.hunyuan3d.vae.ShapeVAE(**ddconfig) + + def estimate_memory(shape, dtype, num_layers = 16, kv_cache_multiplier = 2): + batch, num_tokens, hidden_dim = shape + dtype_size = model_management.dtype_size(dtype) + + total_mem = batch * num_tokens * hidden_dim * dtype_size * (1 + kv_cache_multiplier * num_layers) + return total_mem + + # better memory estimations + self.memory_used_encode = lambda shape, dtype, num_layers = 8, kv_cache_multiplier = 0:\ + estimate_memory(shape, dtype, num_layers, kv_cache_multiplier) + + self.memory_used_decode = lambda shape, dtype, num_layers = 16, kv_cache_multiplier = 2: \ + estimate_memory(shape, dtype, num_layers, kv_cache_multiplier) + + self.first_stage_model = comfy.ldm.hunyuan3d.vae.ShapeVAE() self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] + + elif "vocoder.backbone.channel_layers.0.0.bias" in sd: #Ace Step Audio self.first_stage_model = comfy.ldm.ace.vae.music_dcae_pipeline.MusicDCAE(source_sample_rate=44100) self.memory_used_encode = lambda shape, dtype: (shape[2] * 330) * model_management.dtype_size(dtype) @@ -1046,6 +1058,27 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c model = None model_patcher = None + if isinstance(sd, dict) and all(k in sd for k in ["model", "vae", "conditioner"]): + from collections import OrderedDict + import gc + + merged_sd = OrderedDict() + + for k, v in sd["model"].items(): + merged_sd[f"model.{k}"] = v + + for k, v in sd["vae"].items(): + merged_sd[f"vae.{k}"] = v + + for key, value in sd["conditioner"].items(): + merged_sd[f"conditioner.{key}"] = value + + sd = merged_sd + + del merged_sd + gc.collect() + torch.cuda.empty_cache() + diffusion_model_prefix = model_detection.unet_prefix_from_state_dict(sd) parameters = comfy.utils.calculate_parameters(sd, diffusion_model_prefix) weight_dtype = comfy.utils.weight_dtype(sd, diffusion_model_prefix) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 76260de00..75dad277d 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1128,6 +1128,17 @@ class Hunyuan3Dv2(supported_models_base.BASE): def clip_target(self, state_dict={}): return None +class Hunyuan3Dv2_1(Hunyuan3Dv2): + unet_config = { + "image_model": "hunyuan3d2_1", + } + + latent_format = latent_formats.Hunyuan3Dv2_1 + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.Hunyuan3Dv2_1(self, device = device) + return out + class Hunyuan3Dv2mini(Hunyuan3Dv2): unet_config = { "image_model": "hunyuan3d2", @@ -1285,6 +1296,6 @@ class QwenImage(supported_models_base.BASE): return supported_models_base.ClipTarget(comfy.text_encoders.qwen_image.QwenImageTokenizer, comfy.text_encoders.qwen_image.te(**hunyuan_detect)) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep, Omnigen2, QwenImage] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ACEStep, Omnigen2, QwenImage] models += [SVD_img2vid] diff --git a/comfy_extras/nodes_hunyuan3d.py b/comfy_extras/nodes_hunyuan3d.py index 51e45336a..f6e71e0a8 100644 --- a/comfy_extras/nodes_hunyuan3d.py +++ b/comfy_extras/nodes_hunyuan3d.py @@ -8,13 +8,16 @@ import folder_paths import comfy.model_management from comfy.cli_args import args - class EmptyLatentHunyuan3Dv2: @classmethod def INPUT_TYPES(s): - return {"required": {"resolution": ("INT", {"default": 3072, "min": 1, "max": 8192}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}), - }} + return { + "required": { + "resolution": ("INT", {"default": 3072, "min": 1, "max": 8192}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}), + } + } + RETURN_TYPES = ("LATENT",) FUNCTION = "generate" @@ -24,7 +27,6 @@ class EmptyLatentHunyuan3Dv2: latent = torch.zeros([batch_size, 64, resolution], device=comfy.model_management.intermediate_device()) return ({"samples": latent, "type": "hunyuan3dv2"}, ) - class Hunyuan3Dv2Conditioning: @classmethod def INPUT_TYPES(s): @@ -81,7 +83,6 @@ class VOXEL: def __init__(self, data): self.data = data - class VAEDecodeHunyuan3D: @classmethod def INPUT_TYPES(s): @@ -99,7 +100,6 @@ class VAEDecodeHunyuan3D: voxels = VOXEL(vae.decode(samples["samples"], vae_options={"num_chunks": num_chunks, "octree_resolution": octree_resolution})) return (voxels, ) - def voxel_to_mesh(voxels, threshold=0.5, device=None): if device is None: device = torch.device("cpu") @@ -230,13 +230,9 @@ def voxel_to_mesh_surfnet(voxels, threshold=0.5, device=None): [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1] ], device=device) - corner_values = torch.zeros((cell_positions.shape[0], 8), device=device) - for c, (dz, dy, dx) in enumerate(corner_offsets): - corner_values[:, c] = padded[ - cell_positions[:, 0] + dz, - cell_positions[:, 1] + dy, - cell_positions[:, 2] + dx - ] + pos = cell_positions.unsqueeze(1) + corner_offsets.unsqueeze(0) + z_idx, y_idx, x_idx = pos.unbind(-1) + corner_values = padded[z_idx, y_idx, x_idx] corner_signs = corner_values > threshold has_inside = torch.any(corner_signs, dim=1) diff --git a/nodes.py b/nodes.py index 6c2f9dd14..1afe5601a 100644 --- a/nodes.py +++ b/nodes.py @@ -998,20 +998,31 @@ class CLIPVisionLoader: class CLIPVisionEncode: @classmethod def INPUT_TYPES(s): - return {"required": { "clip_vision": ("CLIP_VISION",), - "image": ("IMAGE",), - "crop": (["center", "none"],) - }} + return { + "required": { + "clip_vision": ("CLIP_VISION",), + "image": ("IMAGE",), + "crop": (["center", "none", "recenter"],), + }, + "optional": { + "border_ratio": ("FLOAT", {"default": 0.15, "min": 0.0, "max": 0.5, "step": 0.01, "visible_if": {"crop": "recenter"},}), + } + } + RETURN_TYPES = ("CLIP_VISION_OUTPUT",) FUNCTION = "encode" CATEGORY = "conditioning" - def encode(self, clip_vision, image, crop): - crop_image = True - if crop != "center": - crop_image = False - output = clip_vision.encode_image(image, crop=crop_image) + def encode(self, clip_vision, image, crop, border_ratio): + crop_image = crop == "center" + + if crop == "recenter": + crop_image = True + else: + border_ratio = None + + output = clip_vision.encode_image(image, crop=crop_image, border_ratio = border_ratio) return (output,) class StyleModelLoader: diff --git a/requirements.txt b/requirements.txt index 3008a5dc3..564fa6e23 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,4 +27,4 @@ kornia>=0.7.1 spandrel soundfile pydantic~=2.0 -pydantic-settings~=2.0 +pydantic-settings~=2.0 \ No newline at end of file From c9ebe70072213a875ffbe40cc1b36820b2005211 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 4 Sep 2025 17:39:02 -0700 Subject: [PATCH 0545/1073] Some changes to the previous hunyuan PR. (#9725) --- comfy/clip_vision.py | 225 +------------------------------------------ comfy/sd.py | 21 ---- nodes.py | 29 ++---- requirements.txt | 2 +- 4 files changed, 14 insertions(+), 263 deletions(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index 4bc640e8b..447b1ce4a 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -17,227 +17,10 @@ class Output: def __setitem__(self, key, item): setattr(self, key, item) - -def cubic_kernel(x, a: float = -0.75): - absx = x.abs() - absx2 = absx ** 2 - absx3 = absx ** 3 - - w = (a + 2) * absx3 - (a + 3) * absx2 + 1 - w2 = a * absx3 - 5*a * absx2 + 8*a * absx - 4*a - - return torch.where(absx <= 1, w, torch.where(absx < 2, w2, torch.zeros_like(x))) - -def get_indices_weights(in_size, out_size, scale): - # OpenCV-style half-pixel mapping - x = torch.arange(out_size, dtype=torch.float32) - x = (x + 0.5) / scale - 0.5 - - x0 = x.floor().long() - dx = x.unsqueeze(1) - (x0.unsqueeze(1) + torch.arange(-1, 3)) - - weights = cubic_kernel(dx) - weights = weights / weights.sum(dim=1, keepdim=True) - - indices = x0.unsqueeze(1) + torch.arange(-1, 3) - indices = indices.clamp(0, in_size - 1) - - return indices, weights - -def resize_cubic_1d(x, out_size, dim): - b, c, h, w = x.shape - in_size = h if dim == 2 else w - scale = out_size / in_size - - indices, weights = get_indices_weights(in_size, out_size, scale) - - if dim == 2: - x = x.permute(0, 1, 3, 2) - x = x.reshape(-1, h) - else: - x = x.reshape(-1, w) - - gathered = x[:, indices] - out = (gathered * weights.unsqueeze(0)).sum(dim=2) - - if dim == 2: - out = out.reshape(b, c, w, out_size).permute(0, 1, 3, 2) - else: - out = out.reshape(b, c, h, out_size) - - return out - -def resize_cubic(img: torch.Tensor, size: tuple) -> torch.Tensor: - """ - Resize image using OpenCV-equivalent INTER_CUBIC interpolation. - Implemented in pure PyTorch - """ - - if img.ndim == 3: - img = img.unsqueeze(0) - - img = img.permute(0, 3, 1, 2) - - out_h, out_w = size - img = resize_cubic_1d(img, out_h, dim=2) - img = resize_cubic_1d(img, out_w, dim=3) - return img - -def resize_area(img: torch.Tensor, size: tuple) -> torch.Tensor: - # vectorized implementation for OpenCV's INTER_AREA using pure PyTorch - original_shape = img.shape - is_hwc = False - - if img.ndim == 3: - if img.shape[0] <= 4: - img = img.unsqueeze(0) - else: - is_hwc = True - img = img.permute(2, 0, 1).unsqueeze(0) - elif img.ndim == 4: - pass - else: - raise ValueError("Expected image with 3 or 4 dims.") - - B, C, H, W = img.shape - out_h, out_w = size - scale_y = H / out_h - scale_x = W / out_w - - device = img.device - - # compute the grid boundries - y_start = torch.arange(out_h, device=device).float() * scale_y - y_end = y_start + scale_y - x_start = torch.arange(out_w, device=device).float() * scale_x - x_end = x_start + scale_x - - # for each output pixel, we will compute the range for it - y_start_int = torch.floor(y_start).long() - y_end_int = torch.ceil(y_end).long() - x_start_int = torch.floor(x_start).long() - x_end_int = torch.ceil(x_end).long() - - # We will build the weighted sums by iterating over contributing input pixels once - output = torch.zeros((B, C, out_h, out_w), dtype=torch.float32, device=device) - area = torch.zeros((out_h, out_w), dtype=torch.float32, device=device) - - max_kernel_h = int(torch.max(y_end_int - y_start_int).item()) - max_kernel_w = int(torch.max(x_end_int - x_start_int).item()) - - for dy in range(max_kernel_h): - for dx in range(max_kernel_w): - # compute the weights for this offset for all output pixels - - y_idx = y_start_int.unsqueeze(1) + dy - x_idx = x_start_int.unsqueeze(0) + dx - - # clamp indices to image boundaries - y_idx_clamped = torch.clamp(y_idx, 0, H - 1) - x_idx_clamped = torch.clamp(x_idx, 0, W - 1) - - # compute weights by broadcasting - y_weight = (torch.min(y_end.unsqueeze(1), y_idx_clamped.float() + 1.0) - torch.max(y_start.unsqueeze(1), y_idx_clamped.float())).clamp(min=0) - x_weight = (torch.min(x_end.unsqueeze(0), x_idx_clamped.float() + 1.0) - torch.max(x_start.unsqueeze(0), x_idx_clamped.float())).clamp(min=0) - - weight = (y_weight * x_weight) - - y_expand = y_idx_clamped.expand(out_h, out_w) - x_expand = x_idx_clamped.expand(out_h, out_w) - - - pixels = img[:, :, y_expand, x_expand] - - # unsqueeze to broadcast - w = weight.unsqueeze(0).unsqueeze(0) - - output += pixels * w - area += weight - - # Normalize by area - output /= area.unsqueeze(0).unsqueeze(0) - - if is_hwc: - return output[0].permute(1, 2, 0) - elif img.shape[0] == 1 and original_shape[0] <= 4: - return output[0] - else: - return output - -def recenter(image, border_ratio: float = 0.2): - - if image.shape[-1] == 4: - mask = image[..., 3] - else: - mask = torch.ones_like(image[..., 0:1]) * 255 - image = torch.concatenate([image, mask], axis=-1) - mask = mask[..., 0] - - H, W, C = image.shape - - size = max(H, W) - result = torch.zeros((size, size, C), dtype = torch.uint8) - - # as_tuple to match numpy behaviour - x_coords, y_coords = torch.nonzero(mask, as_tuple=True) - - y_min, y_max = y_coords.min(), y_coords.max() - x_min, x_max = x_coords.min(), x_coords.max() - - h = x_max - x_min - w = y_max - y_min - - if h == 0 or w == 0: - raise ValueError('input image is empty') - - desired_size = int(size * (1 - border_ratio)) - scale = desired_size / max(h, w) - - h2 = int(h * scale) - w2 = int(w * scale) - - x2_min = (size - h2) // 2 - x2_max = x2_min + h2 - - y2_min = (size - w2) // 2 - y2_max = y2_min + w2 - - # note: opencv takes columns first (opposite to pytorch and numpy that take the row first) - result[x2_min:x2_max, y2_min:y2_max] = resize_area(image[x_min:x_max, y_min:y_max], (h2, w2)) - - bg = torch.ones((result.shape[0], result.shape[1], 3), dtype = torch.uint8) * 255 - - mask = result[..., 3:].to(torch.float32) / 255 - result = result[..., :3] * mask + bg * (1 - mask) - - mask = mask * 255 - result = result.clip(0, 255).to(torch.uint8) - mask = mask.clip(0, 255).to(torch.uint8) - - return result - -def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], - crop=True, value_range = (-1, 1), border_ratio: float = None, recenter_size: int = 512): - - if border_ratio is not None: - - image = (image * 255).clamp(0, 255).to(torch.uint8) - image = [recenter(img, border_ratio = border_ratio) for img in image] - - image = torch.stack(image, dim = 0) - image = resize_cubic(image, size = (recenter_size, recenter_size)) - - image = image / 255 * 2 - 1 - low, high = value_range - - image = (image - low) / (high - low) - image = image.permute(0, 2, 3, 1) - +def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], crop=True): image = image[:, :, :, :3] if image.shape[3] > 3 else image - mean = torch.tensor(mean, device=image.device, dtype=image.dtype) std = torch.tensor(std, device=image.device, dtype=image.dtype) - image = image.movedim(-1, 1) if not (image.shape[2] == size and image.shape[3] == size): if crop: @@ -246,7 +29,7 @@ def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], s else: scale_size = (size, size) - image = torch.nn.functional.interpolate(image, size=scale_size, mode="bilinear" if border_ratio is not None else "bicubic", antialias=True) + image = torch.nn.functional.interpolate(image, size=scale_size, mode="bicubic", antialias=True) h = (image.shape[2] - size)//2 w = (image.shape[3] - size)//2 image = image[:,:,h:h+size,w:w+size] @@ -288,9 +71,9 @@ class ClipVisionModel(): def get_sd(self): return self.model.state_dict() - def encode_image(self, image, crop=True, border_ratio: float = None): + def encode_image(self, image, crop=True): comfy.model_management.load_model_gpu(self.patcher) - pixel_values = clip_preprocess(image.to(self.load_device), size=self.image_size, mean=self.image_mean, std=self.image_std, crop=crop, border_ratio=border_ratio).float() + pixel_values = clip_preprocess(image.to(self.load_device), size=self.image_size, mean=self.image_mean, std=self.image_std, crop=crop).float() out = self.model(pixel_values=pixel_values, intermediate_output='all' if self.return_all_hidden_states else -2) outputs = Output() diff --git a/comfy/sd.py b/comfy/sd.py index 014f797ca..be5aa8dc8 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -1058,27 +1058,6 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c model = None model_patcher = None - if isinstance(sd, dict) and all(k in sd for k in ["model", "vae", "conditioner"]): - from collections import OrderedDict - import gc - - merged_sd = OrderedDict() - - for k, v in sd["model"].items(): - merged_sd[f"model.{k}"] = v - - for k, v in sd["vae"].items(): - merged_sd[f"vae.{k}"] = v - - for key, value in sd["conditioner"].items(): - merged_sd[f"conditioner.{key}"] = value - - sd = merged_sd - - del merged_sd - gc.collect() - torch.cuda.empty_cache() - diffusion_model_prefix = model_detection.unet_prefix_from_state_dict(sd) parameters = comfy.utils.calculate_parameters(sd, diffusion_model_prefix) weight_dtype = comfy.utils.weight_dtype(sd, diffusion_model_prefix) diff --git a/nodes.py b/nodes.py index 1afe5601a..6c2f9dd14 100644 --- a/nodes.py +++ b/nodes.py @@ -998,31 +998,20 @@ class CLIPVisionLoader: class CLIPVisionEncode: @classmethod def INPUT_TYPES(s): - return { - "required": { - "clip_vision": ("CLIP_VISION",), - "image": ("IMAGE",), - "crop": (["center", "none", "recenter"],), - }, - "optional": { - "border_ratio": ("FLOAT", {"default": 0.15, "min": 0.0, "max": 0.5, "step": 0.01, "visible_if": {"crop": "recenter"},}), - } - } - + return {"required": { "clip_vision": ("CLIP_VISION",), + "image": ("IMAGE",), + "crop": (["center", "none"],) + }} RETURN_TYPES = ("CLIP_VISION_OUTPUT",) FUNCTION = "encode" CATEGORY = "conditioning" - def encode(self, clip_vision, image, crop, border_ratio): - crop_image = crop == "center" - - if crop == "recenter": - crop_image = True - else: - border_ratio = None - - output = clip_vision.encode_image(image, crop=crop_image, border_ratio = border_ratio) + def encode(self, clip_vision, image, crop): + crop_image = True + if crop != "center": + crop_image = False + output = clip_vision.encode_image(image, crop=crop_image) return (output,) class StyleModelLoader: diff --git a/requirements.txt b/requirements.txt index 564fa6e23..3008a5dc3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,4 +27,4 @@ kornia>=0.7.1 spandrel soundfile pydantic~=2.0 -pydantic-settings~=2.0 \ No newline at end of file +pydantic-settings~=2.0 From 3493b9cb1f9a9a66b1b86ed908cf87bc382b647a Mon Sep 17 00:00:00 2001 From: Arjan Singh <1598641+arjansingh@users.noreply.github.com> Date: Fri, 5 Sep 2025 11:32:25 -0700 Subject: [PATCH 0546/1073] fix: add cache headers for images (#9560) --- middleware/__init__.py | 1 + middleware/cache_middleware.py | 52 ++++ server.py | 11 +- tests-unit/server_test/test_cache_control.py | 255 +++++++++++++++++++ 4 files changed, 311 insertions(+), 8 deletions(-) create mode 100644 middleware/__init__.py create mode 100644 middleware/cache_middleware.py create mode 100644 tests-unit/server_test/test_cache_control.py diff --git a/middleware/__init__.py b/middleware/__init__.py new file mode 100644 index 000000000..2d7c7c3a9 --- /dev/null +++ b/middleware/__init__.py @@ -0,0 +1 @@ +"""Server middleware modules""" diff --git a/middleware/cache_middleware.py b/middleware/cache_middleware.py new file mode 100644 index 000000000..374ef7934 --- /dev/null +++ b/middleware/cache_middleware.py @@ -0,0 +1,52 @@ +"""Cache control middleware for ComfyUI server""" + +from aiohttp import web +from typing import Callable, Awaitable + +# Time in seconds +ONE_HOUR: int = 3600 +ONE_DAY: int = 86400 +IMG_EXTENSIONS = ( + ".jpg", + ".jpeg", + ".png", + ".ppm", + ".bmp", + ".pgm", + ".tif", + ".tiff", + ".webp", +) + + +@web.middleware +async def cache_control( + request: web.Request, handler: Callable[[web.Request], Awaitable[web.Response]] +) -> web.Response: + """Cache control middleware that sets appropriate cache headers based on file type and response status""" + response: web.Response = await handler(request) + + if ( + request.path.endswith(".js") + or request.path.endswith(".css") + or request.path.endswith("index.json") + ): + response.headers.setdefault("Cache-Control", "no-cache") + return response + + # Early return for non-image files - no cache headers needed + if not request.path.lower().endswith(IMG_EXTENSIONS): + return response + + # Handle image files + if response.status == 404: + response.headers.setdefault("Cache-Control", f"public, max-age={ONE_HOUR}") + elif response.status in (200, 201, 202, 203, 204, 205, 206, 301, 308): + # Success responses and permanent redirects - cache for 1 day + response.headers.setdefault("Cache-Control", f"public, max-age={ONE_DAY}") + elif response.status in (302, 303, 307): + # Temporary redirects - no cache + response.headers.setdefault("Cache-Control", "no-cache") + # Note: 304 Not Modified falls through - no cache headers set + + return response diff --git a/server.py b/server.py index 3d323eaf8..43816a8cd 100644 --- a/server.py +++ b/server.py @@ -39,20 +39,15 @@ from typing import Optional, Union from api_server.routes.internal.internal_routes import InternalRoutes from protocol import BinaryEventTypes +# Import cache control middleware +from middleware.cache_middleware import cache_control + async def send_socket_catch_exception(function, message): try: await function(message) except (aiohttp.ClientError, aiohttp.ClientPayloadError, ConnectionResetError, BrokenPipeError, ConnectionError) as err: logging.warning("send error: {}".format(err)) -@web.middleware -async def cache_control(request: web.Request, handler): - response: web.Response = await handler(request) - if request.path.endswith('.js') or request.path.endswith('.css') or request.path.endswith('index.json'): - response.headers.setdefault('Cache-Control', 'no-cache') - return response - - @web.middleware async def compress_body(request: web.Request, handler): accept_encoding = request.headers.get("Accept-Encoding", "") diff --git a/tests-unit/server_test/test_cache_control.py b/tests-unit/server_test/test_cache_control.py new file mode 100644 index 000000000..8de59125a --- /dev/null +++ b/tests-unit/server_test/test_cache_control.py @@ -0,0 +1,255 @@ +"""Tests for server cache control middleware""" + +import pytest +from aiohttp import web +from aiohttp.test_utils import make_mocked_request +from typing import Dict, Any + +from middleware.cache_middleware import cache_control, ONE_HOUR, ONE_DAY, IMG_EXTENSIONS + +pytestmark = pytest.mark.asyncio # Apply asyncio mark to all tests + +# Test configuration data +CACHE_SCENARIOS = [ + # Image file scenarios + { + "name": "image_200_status", + "path": "/test.jpg", + "status": 200, + "expected_cache": f"public, max-age={ONE_DAY}", + "should_have_header": True, + }, + { + "name": "image_404_status", + "path": "/missing.jpg", + "status": 404, + "expected_cache": f"public, max-age={ONE_HOUR}", + "should_have_header": True, + }, + # JavaScript/CSS scenarios + { + "name": "js_no_cache", + "path": "/script.js", + "status": 200, + "expected_cache": "no-cache", + "should_have_header": True, + }, + { + "name": "css_no_cache", + "path": "/styles.css", + "status": 200, + "expected_cache": "no-cache", + "should_have_header": True, + }, + { + "name": "index_json_no_cache", + "path": "/api/index.json", + "status": 200, + "expected_cache": "no-cache", + "should_have_header": True, + }, + # Non-matching files + { + "name": "html_no_header", + "path": "/index.html", + "status": 200, + "expected_cache": None, + "should_have_header": False, + }, + { + "name": "txt_no_header", + "path": "/data.txt", + "status": 200, + "expected_cache": None, + "should_have_header": False, + }, + { + "name": "api_endpoint_no_header", + "path": "/api/endpoint", + "status": 200, + "expected_cache": None, + "should_have_header": False, + }, + { + "name": "pdf_no_header", + "path": "/file.pdf", + "status": 200, + "expected_cache": None, + "should_have_header": False, + }, +] + +# Status code scenarios for images +IMAGE_STATUS_SCENARIOS = [ + # Success statuses get long cache + {"status": 200, "expected": f"public, max-age={ONE_DAY}"}, + {"status": 201, "expected": f"public, max-age={ONE_DAY}"}, + {"status": 202, "expected": f"public, max-age={ONE_DAY}"}, + {"status": 204, "expected": f"public, max-age={ONE_DAY}"}, + {"status": 206, "expected": f"public, max-age={ONE_DAY}"}, + # Permanent redirects get long cache + {"status": 301, "expected": f"public, max-age={ONE_DAY}"}, + {"status": 308, "expected": f"public, max-age={ONE_DAY}"}, + # Temporary redirects get no cache + {"status": 302, "expected": "no-cache"}, + {"status": 303, "expected": "no-cache"}, + {"status": 307, "expected": "no-cache"}, + # 404 gets short cache + {"status": 404, "expected": f"public, max-age={ONE_HOUR}"}, +] + +# Case sensitivity test paths +CASE_SENSITIVITY_PATHS = ["/image.JPG", "/photo.PNG", "/pic.JpEg"] + +# Edge case test paths +EDGE_CASE_PATHS = [ + { + "name": "query_strings_ignored", + "path": "/image.jpg?v=123&size=large", + "expected": f"public, max-age={ONE_DAY}", + }, + { + "name": "multiple_dots_in_path", + "path": "/image.min.jpg", + "expected": f"public, max-age={ONE_DAY}", + }, + { + "name": "nested_paths_with_images", + "path": "/static/images/photo.jpg", + "expected": f"public, max-age={ONE_DAY}", + }, +] + + +class TestCacheControl: + """Test cache control middleware functionality""" + + @pytest.fixture + def status_handler_factory(self): + """Create a factory for handlers that return specific status codes""" + + def factory(status: int, headers: Dict[str, str] = None): + async def handler(request): + return web.Response(status=status, headers=headers or {}) + + return handler + + return factory + + @pytest.fixture + def mock_handler(self, status_handler_factory): + """Create a mock handler that returns a response with 200 status""" + return status_handler_factory(200) + + @pytest.fixture + def handler_with_existing_cache(self, status_handler_factory): + """Create a handler that returns response with existing Cache-Control header""" + return status_handler_factory(200, {"Cache-Control": "max-age=3600"}) + + async def assert_cache_header( + self, + response: web.Response, + expected_cache: str = None, + should_have_header: bool = True, + ): + """Helper to assert cache control headers""" + if should_have_header: + assert "Cache-Control" in response.headers + if expected_cache: + assert response.headers["Cache-Control"] == expected_cache + else: + assert "Cache-Control" not in response.headers + + # Parameterized tests + @pytest.mark.parametrize("scenario", CACHE_SCENARIOS, ids=lambda x: x["name"]) + async def test_cache_control_scenarios( + self, scenario: Dict[str, Any], status_handler_factory + ): + """Test various cache control scenarios""" + handler = status_handler_factory(scenario["status"]) + request = make_mocked_request("GET", scenario["path"]) + response = await cache_control(request, handler) + + assert response.status == scenario["status"] + await self.assert_cache_header( + response, scenario["expected_cache"], scenario["should_have_header"] + ) + + @pytest.mark.parametrize("ext", IMG_EXTENSIONS) + async def test_all_image_extensions(self, ext: str, mock_handler): + """Test all defined image extensions are handled correctly""" + request = make_mocked_request("GET", f"/image{ext}") + response = await cache_control(request, mock_handler) + + assert response.status == 200 + assert "Cache-Control" in response.headers + assert response.headers["Cache-Control"] == f"public, max-age={ONE_DAY}" + + @pytest.mark.parametrize( + "status_scenario", IMAGE_STATUS_SCENARIOS, ids=lambda x: f"status_{x['status']}" + ) + async def test_image_status_codes( + self, status_scenario: Dict[str, Any], status_handler_factory + ): + """Test different status codes for image requests""" + handler = status_handler_factory(status_scenario["status"]) + request = make_mocked_request("GET", "/image.jpg") + response = await cache_control(request, handler) + + assert response.status == status_scenario["status"] + assert "Cache-Control" in response.headers + assert response.headers["Cache-Control"] == status_scenario["expected"] + + @pytest.mark.parametrize("path", CASE_SENSITIVITY_PATHS) + async def test_case_insensitive_image_extension(self, path: str, mock_handler): + """Test that image extensions are matched case-insensitively""" + request = make_mocked_request("GET", path) + response = await cache_control(request, mock_handler) + + assert "Cache-Control" in response.headers + assert response.headers["Cache-Control"] == f"public, max-age={ONE_DAY}" + + @pytest.mark.parametrize("edge_case", EDGE_CASE_PATHS, ids=lambda x: x["name"]) + async def test_edge_cases(self, edge_case: Dict[str, str], mock_handler): + """Test edge cases like query strings, nested paths, etc.""" + request = make_mocked_request("GET", edge_case["path"]) + response = await cache_control(request, mock_handler) + + assert "Cache-Control" in response.headers + assert response.headers["Cache-Control"] == edge_case["expected"] + + # Header preservation tests (special cases not covered by parameterization) + async def test_js_preserves_existing_headers(self, handler_with_existing_cache): + """Test that .js files preserve existing Cache-Control headers""" + request = make_mocked_request("GET", "/script.js") + response = await cache_control(request, handler_with_existing_cache) + + # setdefault should preserve existing header + assert response.headers["Cache-Control"] == "max-age=3600" + + async def test_css_preserves_existing_headers(self, handler_with_existing_cache): + """Test that .css files preserve existing Cache-Control headers""" + request = make_mocked_request("GET", "/styles.css") + response = await cache_control(request, handler_with_existing_cache) + + # setdefault should preserve existing header + assert response.headers["Cache-Control"] == "max-age=3600" + + async def test_image_preserves_existing_headers(self, status_handler_factory): + """Test that image cache headers preserve existing Cache-Control""" + handler = status_handler_factory(200, {"Cache-Control": "private, no-cache"}) + request = make_mocked_request("GET", "/image.jpg") + response = await cache_control(request, handler) + + # setdefault should preserve existing header + assert response.headers["Cache-Control"] == "private, no-cache" + + async def test_304_not_modified_inherits_cache(self, status_handler_factory): + """Test that 304 Not Modified doesn't set cache headers for images""" + handler = status_handler_factory(304, {"Cache-Control": "max-age=7200"}) + request = make_mocked_request("GET", "/not-modified.jpg") + response = await cache_control(request, handler) + + assert response.status == 304 + # Should preserve existing cache header, not override + assert response.headers["Cache-Control"] == "max-age=7200" From 2ee7879a0bdbf507bfd26f8b36eca2fef147c29d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 5 Sep 2025 11:57:35 -0700 Subject: [PATCH 0547/1073] Fix lowvram issues with hunyuan3d 2.1 (#9735) --- comfy/ldm/hunyuan3dv2_1/hunyuandit.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/hunyuan3dv2_1/hunyuandit.py b/comfy/ldm/hunyuan3dv2_1/hunyuandit.py index 48575bb3c..ca1a83001 100644 --- a/comfy/ldm/hunyuan3dv2_1/hunyuandit.py +++ b/comfy/ldm/hunyuan3dv2_1/hunyuandit.py @@ -3,6 +3,7 @@ import torch import torch.nn as nn import torch.nn.functional as F from comfy.ldm.modules.attention import optimized_attention +import comfy.model_management class GELU(nn.Module): @@ -88,7 +89,7 @@ class MoEGate(nn.Module): hidden_states = hidden_states.view(-1, hidden_states.size(-1)) # get logits and pass it to softmax - logits = F.linear(hidden_states, self.weight, bias = None) + logits = F.linear(hidden_states, comfy.model_management.cast_to(self.weight, dtype=hidden_states.dtype, device=hidden_states.device), bias = None) scores = logits.softmax(dim = -1) topk_weight, topk_idx = torch.topk(scores, k = self.top_k, dim = -1, sorted = False) @@ -255,7 +256,7 @@ class TimestepEmbedder(nn.Module): cond_embed = self.cond_proj(condition) timestep_embed = timestep_embed + cond_embed - time_conditioned = self.mlp(timestep_embed.to(self.mlp[0].weight.device)) + time_conditioned = self.mlp(timestep_embed) # for broadcasting with image tokens return time_conditioned.unsqueeze(1) From ea6cdd2631fbca6ed81b95796150c32c9a029f0d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 5 Sep 2025 22:05:05 -0700 Subject: [PATCH 0548/1073] Print all fast options in --help (#9737) --- comfy/cli_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 72eeaea9a..cc1f12482 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -145,7 +145,7 @@ class PerformanceFeature(enum.Enum): CublasOps = "cublas_ops" AutoTune = "autotune" -parser.add_argument("--fast", nargs="*", type=PerformanceFeature, help="Enable some untested and potentially quality deteriorating optimizations. --fast with no arguments enables everything. You can pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: fp16_accumulation fp8_matrix_mult cublas_ops") +parser.add_argument("--fast", nargs="*", type=PerformanceFeature, help="Enable some untested and potentially quality deteriorating optimizations. --fast with no arguments enables everything. You can pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: {}".format(" ".join(map(lambda c: c.value, PerformanceFeature)))) parser.add_argument("--mmap-torch-files", action="store_true", help="Use mmap when loading ckpt/pt files.") parser.add_argument("--disable-mmap", action="store_true", help="Don't use mmap when loading safetensors.") From 27a0fcccc376fef6f035ed97664db8aa7e2e6117 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 6 Sep 2025 20:25:22 -0700 Subject: [PATCH 0549/1073] Enable bf16 VAE on RDNA4. (#9746) --- comfy/model_management.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index d08aee1fe..17516b6ed 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -289,6 +289,21 @@ def is_amd(): return True return False +def amd_min_version(device=None, min_rdna_version=0): + if not is_amd(): + return False + + arch = torch.cuda.get_device_properties(device).gcnArchName + if arch.startswith('gfx') and len(arch) == 7: + try: + cmp_rdna_version = int(arch[4]) + 2 + except: + cmp_rdna_version = 0 + if cmp_rdna_version >= min_rdna_version: + return True + + return False + MIN_WEIGHT_MEMORY_RATIO = 0.4 if is_nvidia(): MIN_WEIGHT_MEMORY_RATIO = 0.0 @@ -905,7 +920,9 @@ def vae_dtype(device=None, allowed_dtypes=[]): # NOTE: bfloat16 seems to work on AMD for the VAE but is extremely slow in some cases compared to fp32 # slowness still a problem on pytorch nightly 2.9.0.dev20250720+rocm6.4 tested on RDNA3 - if d == torch.bfloat16 and (not is_amd()) and should_use_bf16(device): + # also a problem on RDNA4 except fp32 is also slow there. + # This is due to large bf16 convolutions being extremely slow. + if d == torch.bfloat16 and ((not is_amd()) or amd_min_version(device, min_rdna_version=4)) and should_use_bf16(device): return d return torch.float32 From bcbd7884e3af5ee8b6ab848da2a3123f247d6114 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 6 Sep 2025 21:29:38 -0700 Subject: [PATCH 0550/1073] Don't enable pytorch attention on AMD if triton isn't available. (#9747) --- comfy/model_management.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 17516b6ed..cb6580f73 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -22,6 +22,7 @@ from enum import Enum from comfy.cli_args import args, PerformanceFeature import torch import sys +import importlib import platform import weakref import gc @@ -336,12 +337,13 @@ try: logging.info("AMD arch: {}".format(arch)) logging.info("ROCm version: {}".format(rocm_version)) if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: - if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much - if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx950 - ENABLE_PYTORCH_ATTENTION = True -# if torch_version_numeric >= (2, 8): -# if any((a in arch) for a in ["gfx1201"]): -# ENABLE_PYTORCH_ATTENTION = True + if importlib.util.find_spec('triton') is not None: # AMD efficient attention implementation depends on triton. TODO: better way of detecting if it's compiled in or not. + if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much + if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx950 + ENABLE_PYTORCH_ATTENTION = True +# if torch_version_numeric >= (2, 8): +# if any((a in arch) for a in ["gfx1201"]): +# ENABLE_PYTORCH_ATTENTION = True if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4): if any((a in arch) for a in ["gfx1201", "gfx942", "gfx950"]): # TODO: more arches SUPPORT_FP8_OPS = True From fb763d43332aaf15e96350cf1c25e2a1927423f1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 7 Sep 2025 18:16:29 -0700 Subject: [PATCH 0551/1073] Fix amd_min_version crash when cpu device. (#9754) --- comfy/model_management.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index cb6580f73..bbfc3c7a1 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -294,6 +294,9 @@ def amd_min_version(device=None, min_rdna_version=0): if not is_amd(): return False + if is_device_cpu(device): + return False + arch = torch.cuda.get_device_properties(device).gcnArchName if arch.startswith('gfx') and len(arch) == 7: try: From bd1d9bcd5fcdb8379ce5a8020cb2b8f42de1b7c7 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Mon, 8 Sep 2025 12:07:04 -0700 Subject: [PATCH 0552/1073] Add ZeroDivisionError catch for EasyCache logging statement (#9768) --- comfy_extras/nodes_easycache.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_easycache.py b/comfy_extras/nodes_easycache.py index 9d2988f5f..c170e9fd9 100644 --- a/comfy_extras/nodes_easycache.py +++ b/comfy_extras/nodes_easycache.py @@ -162,7 +162,12 @@ def easycache_sample_wrapper(executor, *args, **kwargs): logging.info(f"{easycache.name} [verbose] - output_change_rates {len(output_change_rates)}: {output_change_rates}") logging.info(f"{easycache.name} [verbose] - approx_output_change_rates {len(approx_output_change_rates)}: {approx_output_change_rates}") total_steps = len(args[3])-1 - logging.info(f"{easycache.name} - skipped {easycache.total_steps_skipped}/{total_steps} steps ({total_steps/(total_steps-easycache.total_steps_skipped):.2f}x speedup).") + # catch division by zero for log statement; sucks to crash after all sampling is done + try: + speedup = total_steps/(total_steps-easycache.total_steps_skipped) + except ZeroDivisionError: + speedup = 1.0 + logging.info(f"{easycache.name} - skipped {easycache.total_steps_skipped}/{total_steps} steps ({speedup:.2f}x speedup).") easycache.reset() guider.model_options = orig_model_options From 97652d26b81f83fc9a3675be55ede7762fafb7bd Mon Sep 17 00:00:00 2001 From: contentis Date: Mon, 8 Sep 2025 21:08:18 +0200 Subject: [PATCH 0553/1073] Add explicit casting in apply_rope for Qwen VL (#9759) --- comfy/text_encoders/llama.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index 4c976058f..5e11956b5 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -128,11 +128,12 @@ def precompute_freqs_cis(head_dim, position_ids, theta, rope_dims=None, device=N def apply_rope(xq, xk, freqs_cis): + org_dtype = xq.dtype cos = freqs_cis[0] sin = freqs_cis[1] q_embed = (xq * cos) + (rotate_half(xq) * sin) k_embed = (xk * cos) + (rotate_half(xk) * sin) - return q_embed, k_embed + return q_embed.to(org_dtype), k_embed.to(org_dtype) class Attention(nn.Module): From 103a12cb668303f197b22f52bb2981bb1539beea Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 8 Sep 2025 14:30:26 -0700 Subject: [PATCH 0554/1073] Support qwen inpaint controlnet. (#9772) --- comfy/controlnet.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index e3dfedf55..f08ff4b36 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -253,7 +253,10 @@ class ControlNet(ControlBase): to_concat = [] for c in self.extra_concat_orig: c = c.to(self.cond_hint.device) - c = comfy.utils.common_upscale(c, self.cond_hint.shape[3], self.cond_hint.shape[2], self.upscale_algorithm, "center") + c = comfy.utils.common_upscale(c, self.cond_hint.shape[-1], self.cond_hint.shape[-2], self.upscale_algorithm, "center") + if c.ndim < self.cond_hint.ndim: + c = c.unsqueeze(2) + c = comfy.utils.repeat_to_batch_size(c, self.cond_hint.shape[2], dim=2) to_concat.append(comfy.utils.repeat_to_batch_size(c, self.cond_hint.shape[0])) self.cond_hint = torch.cat([self.cond_hint] + to_concat, dim=1) @@ -585,11 +588,18 @@ def load_controlnet_flux_instantx(sd, model_options={}): def load_controlnet_qwen_instantx(sd, model_options={}): model_config, operations, load_device, unet_dtype, manual_cast_dtype, offload_device = controlnet_config(sd, model_options=model_options) - control_model = comfy.ldm.qwen_image.controlnet.QwenImageControlNetModel(operations=operations, device=offload_device, dtype=unet_dtype, **model_config.unet_config) + control_latent_channels = sd.get("controlnet_x_embedder.weight").shape[1] + + extra_condition_channels = 0 + concat_mask = False + if control_latent_channels == 68: #inpaint controlnet + extra_condition_channels = control_latent_channels - 64 + concat_mask = True + control_model = comfy.ldm.qwen_image.controlnet.QwenImageControlNetModel(extra_condition_channels=extra_condition_channels, operations=operations, device=offload_device, dtype=unet_dtype, **model_config.unet_config) control_model = controlnet_load_state_dict(control_model, sd) latent_format = comfy.latent_formats.Wan21() extra_conds = [] - control = ControlNet(control_model, compression_ratio=1, latent_format=latent_format, load_device=load_device, manual_cast_dtype=manual_cast_dtype, extra_conds=extra_conds) + control = ControlNet(control_model, compression_ratio=1, latent_format=latent_format, concat_mask=concat_mask, load_device=load_device, manual_cast_dtype=manual_cast_dtype, extra_conds=extra_conds) return control def convert_mistoline(sd): From f73b176abd6b3e3b587b668fa6748107deef311c Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 9 Sep 2025 21:40:29 +0300 Subject: [PATCH 0555/1073] add ByteDance video API nodes (#9712) --- comfy_api_nodes/nodes_bytedance.py | 697 ++++++++++++++++++++++++++++- 1 file changed, 686 insertions(+), 11 deletions(-) diff --git a/comfy_api_nodes/nodes_bytedance.py b/comfy_api_nodes/nodes_bytedance.py index fb6aba7fa..064df2d10 100644 --- a/comfy_api_nodes/nodes_bytedance.py +++ b/comfy_api_nodes/nodes_bytedance.py @@ -1,6 +1,7 @@ import logging +import math from enum import Enum -from typing import Optional +from typing import Literal, Optional, Type, Union from typing_extensions import override import torch @@ -10,28 +11,53 @@ from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api_nodes.util.validation_utils import ( validate_image_aspect_ratio_range, get_number_of_images, + validate_image_dimensions, ) from comfy_api_nodes.apis.client import ( ApiEndpoint, + EmptyRequest, HttpMethod, SynchronousOperation, + PollingOperation, + T, +) +from comfy_api_nodes.apinode_utils import ( + download_url_to_image_tensor, + download_url_to_video_output, + upload_images_to_comfyapi, + validate_string, + image_tensor_pair_to_batch, ) -from comfy_api_nodes.apinode_utils import download_url_to_image_tensor, upload_images_to_comfyapi, validate_string -BYTEPLUS_ENDPOINT = "/proxy/byteplus/api/v3/images/generations" +BYTEPLUS_IMAGE_ENDPOINT = "/proxy/byteplus/api/v3/images/generations" + +# Long-running tasks endpoints(e.g., video) +BYTEPLUS_TASK_ENDPOINT = "/proxy/byteplus/api/v3/contents/generations/tasks" +BYTEPLUS_TASK_STATUS_ENDPOINT = "/proxy/byteplus/api/v3/contents/generations/tasks" # + /{task_id} class Text2ImageModelName(str, Enum): - seedream3 = "seedream-3-0-t2i-250415" + seedream_3 = "seedream-3-0-t2i-250415" class Image2ImageModelName(str, Enum): - seededit3 = "seededit-3-0-i2i-250628" + seededit_3 = "seededit-3-0-i2i-250628" + + +class Text2VideoModelName(str, Enum): + seedance_1_pro = "seedance-1-0-pro-250528" + seedance_1_lite = "seedance-1-0-lite-t2v-250428" + + +class Image2VideoModelName(str, Enum): + """note(August 31): Pro model only supports FirstFrame: https://docs.byteplus.com/en/docs/ModelArk/1520757""" + seedance_1_pro = "seedance-1-0-pro-250528" + seedance_1_lite = "seedance-1-0-lite-i2v-250428" class Text2ImageTaskCreationRequest(BaseModel): - model: Text2ImageModelName = Text2ImageModelName.seedream3 + model: Text2ImageModelName = Text2ImageModelName.seedream_3 prompt: str = Field(...) response_format: Optional[str] = Field("url") size: Optional[str] = Field(None) @@ -41,7 +67,7 @@ class Text2ImageTaskCreationRequest(BaseModel): class Image2ImageTaskCreationRequest(BaseModel): - model: Image2ImageModelName = Image2ImageModelName.seededit3 + model: Image2ImageModelName = Image2ImageModelName.seededit_3 prompt: str = Field(...) response_format: Optional[str] = Field("url") image: str = Field(..., description="Base64 encoded string or image URL") @@ -58,6 +84,52 @@ class ImageTaskCreationResponse(BaseModel): error: dict = Field({}, description="Contains `code` and `message` fields in case of error.") +class TaskTextContent(BaseModel): + type: str = Field("text") + text: str = Field(...) + + +class TaskImageContentUrl(BaseModel): + url: str = Field(...) + + +class TaskImageContent(BaseModel): + type: str = Field("image_url") + image_url: TaskImageContentUrl = Field(...) + role: Optional[Literal["first_frame", "last_frame", "reference_image"]] = Field(None) + + +class Text2VideoTaskCreationRequest(BaseModel): + model: Text2VideoModelName = Text2VideoModelName.seedance_1_pro + content: list[TaskTextContent] = Field(..., min_length=1) + + +class Image2VideoTaskCreationRequest(BaseModel): + model: Image2VideoModelName = Image2VideoModelName.seedance_1_pro + content: list[Union[TaskTextContent, TaskImageContent]] = Field(..., min_length=2) + + +class TaskCreationResponse(BaseModel): + id: str = Field(...) + + +class TaskStatusError(BaseModel): + code: str = Field(...) + message: str = Field(...) + + +class TaskStatusResult(BaseModel): + video_url: str = Field(...) + + +class TaskStatusResponse(BaseModel): + id: str = Field(...) + model: str = Field(...) + status: Literal["queued", "running", "cancelled", "succeeded", "failed"] = Field(...) + error: Optional[TaskStatusError] = Field(None) + content: Optional[TaskStatusResult] = Field(None) + + RECOMMENDED_PRESETS = [ ("1024x1024 (1:1)", 1024, 1024), ("864x1152 (3:4)", 864, 1152), @@ -71,6 +143,25 @@ RECOMMENDED_PRESETS = [ ("Custom", None, None), ] +# The time in this dictionary are given for 10 seconds duration. +VIDEO_TASKS_EXECUTION_TIME = { + "seedance-1-0-lite-t2v-250428": { + "480p": 40, + "720p": 60, + "1080p": 90, + }, + "seedance-1-0-lite-i2v-250428": { + "480p": 40, + "720p": 60, + "1080p": 90, + }, + "seedance-1-0-pro-250528": { + "480p": 70, + "720p": 85, + "1080p": 115, + }, +} + def get_image_url_from_response(response: ImageTaskCreationResponse) -> str: if response.error: @@ -81,6 +172,42 @@ def get_image_url_from_response(response: ImageTaskCreationResponse) -> str: return response.data[0]["url"] +def get_video_url_from_task_status(response: TaskStatusResponse) -> Union[str, None]: + """Returns the video URL from the task status response if it exists.""" + if hasattr(response, "content") and response.content: + return response.content.video_url + return None + + +async def poll_until_finished( + auth_kwargs: dict[str, str], + task_id: str, + estimated_duration: Optional[int] = None, + node_id: Optional[str] = None, +) -> TaskStatusResponse: + """Polls the ByteDance API endpoint until the task reaches a terminal state, then returns the response.""" + return await PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"{BYTEPLUS_TASK_STATUS_ENDPOINT}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=TaskStatusResponse, + ), + completed_statuses=[ + "succeeded", + ], + failed_statuses=[ + "cancelled", + "failed", + ], + status_extractor=lambda response: response.status, + auth_kwargs=auth_kwargs, + result_url_extractor=get_video_url_from_task_status, + estimated_duration=estimated_duration, + node_id=node_id, + ).execute() + + class ByteDanceImageNode(comfy_io.ComfyNode): @classmethod @@ -94,7 +221,7 @@ class ByteDanceImageNode(comfy_io.ComfyNode): comfy_io.Combo.Input( "model", options=[model.value for model in Text2ImageModelName], - default=Text2ImageModelName.seedream3.value, + default=Text2ImageModelName.seedream_3.value, tooltip="Model name", ), comfy_io.String.Input( @@ -203,7 +330,7 @@ class ByteDanceImageNode(comfy_io.ComfyNode): } response = await SynchronousOperation( endpoint=ApiEndpoint( - path=BYTEPLUS_ENDPOINT, + path=BYTEPLUS_IMAGE_ENDPOINT, method=HttpMethod.POST, request_model=Text2ImageTaskCreationRequest, response_model=ImageTaskCreationResponse, @@ -227,7 +354,7 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode): comfy_io.Combo.Input( "model", options=[model.value for model in Image2ImageModelName], - default=Image2ImageModelName.seededit3.value, + default=Image2ImageModelName.seededit_3.value, tooltip="Model name", ), comfy_io.Image.Input( @@ -313,7 +440,7 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode): ) response = await SynchronousOperation( endpoint=ApiEndpoint( - path=BYTEPLUS_ENDPOINT, + path=BYTEPLUS_IMAGE_ENDPOINT, method=HttpMethod.POST, request_model=Image2ImageTaskCreationRequest, response_model=ImageTaskCreationResponse, @@ -324,12 +451,560 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode): return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) +class ByteDanceTextToVideoNode(comfy_io.ComfyNode): + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="ByteDanceTextToVideoNode", + display_name="ByteDance Text to Video", + category="api node/video/ByteDance", + description="Generate video using ByteDance models via api based on prompt", + inputs=[ + comfy_io.Combo.Input( + "model", + options=[model.value for model in Text2VideoModelName], + default=Text2VideoModelName.seedance_1_pro.value, + tooltip="Model name", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + tooltip="The text prompt used to generate the video.", + ), + comfy_io.Combo.Input( + "resolution", + options=["480p", "720p", "1080p"], + tooltip="The resolution of the output video.", + ), + comfy_io.Combo.Input( + "aspect_ratio", + options=["16:9", "4:3", "1:1", "3:4", "9:16", "21:9"], + tooltip="The aspect ratio of the output video.", + ), + comfy_io.Int.Input( + "duration", + default=5, + min=3, + max=12, + step=1, + tooltip="The duration of the output video in seconds.", + display_mode=comfy_io.NumberDisplay.slider, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed to use for generation.", + optional=True, + ), + comfy_io.Boolean.Input( + "camera_fixed", + default=False, + tooltip="Specifies whether to fix the camera. The platform appends an instruction " + "to fix the camera to your prompt, but does not guarantee the actual effect.", + optional=True, + ), + comfy_io.Boolean.Input( + "watermark", + default=True, + tooltip="Whether to add an \"AI generated\" watermark to the video.", + optional=True, + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + prompt: str, + resolution: str, + aspect_ratio: str, + duration: int, + seed: int, + camera_fixed: bool, + watermark: bool, + ) -> comfy_io.NodeOutput: + validate_string(prompt, strip_whitespace=True, min_length=1) + raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"]) + + prompt = ( + f"{prompt} " + f"--resolution {resolution} " + f"--ratio {aspect_ratio} " + f"--duration {duration} " + f"--seed {seed} " + f"--camerafixed {str(camera_fixed).lower()} " + f"--watermark {str(watermark).lower()}" + ) + + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + return await process_video_task( + request_model=Text2VideoTaskCreationRequest, + payload=Text2VideoTaskCreationRequest( + model=model, + content=[TaskTextContent(text=prompt)], + ), + auth_kwargs=auth_kwargs, + node_id=cls.hidden.unique_id, + estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))), + ) + + +class ByteDanceImageToVideoNode(comfy_io.ComfyNode): + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="ByteDanceImageToVideoNode", + display_name="ByteDance Image to Video", + category="api node/video/ByteDance", + description="Generate video using ByteDance models via api based on image and prompt", + inputs=[ + comfy_io.Combo.Input( + "model", + options=[model.value for model in Image2VideoModelName], + default=Image2VideoModelName.seedance_1_pro.value, + tooltip="Model name", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + tooltip="The text prompt used to generate the video.", + ), + comfy_io.Image.Input( + "image", + tooltip="First frame to be used for the video.", + ), + comfy_io.Combo.Input( + "resolution", + options=["480p", "720p", "1080p"], + tooltip="The resolution of the output video.", + ), + comfy_io.Combo.Input( + "aspect_ratio", + options=["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"], + tooltip="The aspect ratio of the output video.", + ), + comfy_io.Int.Input( + "duration", + default=5, + min=3, + max=12, + step=1, + tooltip="The duration of the output video in seconds.", + display_mode=comfy_io.NumberDisplay.slider, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed to use for generation.", + optional=True, + ), + comfy_io.Boolean.Input( + "camera_fixed", + default=False, + tooltip="Specifies whether to fix the camera. The platform appends an instruction " + "to fix the camera to your prompt, but does not guarantee the actual effect.", + optional=True, + ), + comfy_io.Boolean.Input( + "watermark", + default=True, + tooltip="Whether to add an \"AI generated\" watermark to the video.", + optional=True, + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + prompt: str, + image: torch.Tensor, + resolution: str, + aspect_ratio: str, + duration: int, + seed: int, + camera_fixed: bool, + watermark: bool, + ) -> comfy_io.NodeOutput: + validate_string(prompt, strip_whitespace=True, min_length=1) + raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"]) + validate_image_dimensions(image, min_width=300, min_height=300, max_width=6000, max_height=6000) + validate_image_aspect_ratio_range(image, (2, 5), (5, 2), strict=False) # 0.4 to 2.5 + + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + + image_url = (await upload_images_to_comfyapi(image, max_images=1, auth_kwargs=auth_kwargs))[0] + + prompt = ( + f"{prompt} " + f"--resolution {resolution} " + f"--ratio {aspect_ratio} " + f"--duration {duration} " + f"--seed {seed} " + f"--camerafixed {str(camera_fixed).lower()} " + f"--watermark {str(watermark).lower()}" + ) + + return await process_video_task( + request_model=Image2VideoTaskCreationRequest, + payload=Image2VideoTaskCreationRequest( + model=model, + content=[TaskTextContent(text=prompt), TaskImageContent(image_url=TaskImageContentUrl(url=image_url))], + ), + auth_kwargs=auth_kwargs, + node_id=cls.hidden.unique_id, + estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))), + ) + + +class ByteDanceFirstLastFrameNode(comfy_io.ComfyNode): + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="ByteDanceFirstLastFrameNode", + display_name="ByteDance First-Last-Frame to Video", + category="api node/video/ByteDance", + description="Generate video using prompt and first and last frames.", + inputs=[ + comfy_io.Combo.Input( + "model", + options=[Image2VideoModelName.seedance_1_lite.value], + default=Image2VideoModelName.seedance_1_lite.value, + tooltip="Model name", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + tooltip="The text prompt used to generate the video.", + ), + comfy_io.Image.Input( + "first_frame", + tooltip="First frame to be used for the video.", + ), + comfy_io.Image.Input( + "last_frame", + tooltip="Last frame to be used for the video.", + ), + comfy_io.Combo.Input( + "resolution", + options=["480p", "720p", "1080p"], + tooltip="The resolution of the output video.", + ), + comfy_io.Combo.Input( + "aspect_ratio", + options=["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"], + tooltip="The aspect ratio of the output video.", + ), + comfy_io.Int.Input( + "duration", + default=5, + min=3, + max=12, + step=1, + tooltip="The duration of the output video in seconds.", + display_mode=comfy_io.NumberDisplay.slider, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed to use for generation.", + optional=True, + ), + comfy_io.Boolean.Input( + "camera_fixed", + default=False, + tooltip="Specifies whether to fix the camera. The platform appends an instruction " + "to fix the camera to your prompt, but does not guarantee the actual effect.", + optional=True, + ), + comfy_io.Boolean.Input( + "watermark", + default=True, + tooltip="Whether to add an \"AI generated\" watermark to the video.", + optional=True, + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + prompt: str, + first_frame: torch.Tensor, + last_frame: torch.Tensor, + resolution: str, + aspect_ratio: str, + duration: int, + seed: int, + camera_fixed: bool, + watermark: bool, + ) -> comfy_io.NodeOutput: + validate_string(prompt, strip_whitespace=True, min_length=1) + raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"]) + for i in (first_frame, last_frame): + validate_image_dimensions(i, min_width=300, min_height=300, max_width=6000, max_height=6000) + validate_image_aspect_ratio_range(i, (2, 5), (5, 2), strict=False) # 0.4 to 2.5 + + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + + download_urls = await upload_images_to_comfyapi( + image_tensor_pair_to_batch(first_frame, last_frame), + max_images=2, + mime_type="image/png", + auth_kwargs=auth_kwargs, + ) + + prompt = ( + f"{prompt} " + f"--resolution {resolution} " + f"--ratio {aspect_ratio} " + f"--duration {duration} " + f"--seed {seed} " + f"--camerafixed {str(camera_fixed).lower()} " + f"--watermark {str(watermark).lower()}" + ) + + return await process_video_task( + request_model=Image2VideoTaskCreationRequest, + payload=Image2VideoTaskCreationRequest( + model=model, + content=[ + TaskTextContent(text=prompt), + TaskImageContent(image_url=TaskImageContentUrl(url=str(download_urls[0])), role="first_frame"), + TaskImageContent(image_url=TaskImageContentUrl(url=str(download_urls[1])), role="last_frame"), + ], + ), + auth_kwargs=auth_kwargs, + node_id=cls.hidden.unique_id, + estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))), + ) + + +class ByteDanceImageReferenceNode(comfy_io.ComfyNode): + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="ByteDanceImageReferenceNode", + display_name="ByteDance Reference Images to Video", + category="api node/video/ByteDance", + description="Generate video using prompt and reference images.", + inputs=[ + comfy_io.Combo.Input( + "model", + options=[Image2VideoModelName.seedance_1_lite.value], + default=Image2VideoModelName.seedance_1_lite.value, + tooltip="Model name", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + tooltip="The text prompt used to generate the video.", + ), + comfy_io.Image.Input( + "images", + tooltip="One to four images.", + ), + comfy_io.Combo.Input( + "resolution", + options=["480p", "720p"], + tooltip="The resolution of the output video.", + ), + comfy_io.Combo.Input( + "aspect_ratio", + options=["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"], + tooltip="The aspect ratio of the output video.", + ), + comfy_io.Int.Input( + "duration", + default=5, + min=3, + max=12, + step=1, + tooltip="The duration of the output video in seconds.", + display_mode=comfy_io.NumberDisplay.slider, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed to use for generation.", + optional=True, + ), + comfy_io.Boolean.Input( + "watermark", + default=True, + tooltip="Whether to add an \"AI generated\" watermark to the video.", + optional=True, + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + prompt: str, + images: torch.Tensor, + resolution: str, + aspect_ratio: str, + duration: int, + seed: int, + watermark: bool, + ) -> comfy_io.NodeOutput: + validate_string(prompt, strip_whitespace=True, min_length=1) + raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "watermark"]) + for image in images: + validate_image_dimensions(image, min_width=300, min_height=300, max_width=6000, max_height=6000) + validate_image_aspect_ratio_range(image, (2, 5), (5, 2), strict=False) # 0.4 to 2.5 + + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + + image_urls = await upload_images_to_comfyapi( + images, max_images=4, mime_type="image/png", auth_kwargs=auth_kwargs + ) + + prompt = ( + f"{prompt} " + f"--resolution {resolution} " + f"--ratio {aspect_ratio} " + f"--duration {duration} " + f"--seed {seed} " + f"--watermark {str(watermark).lower()}" + ) + x = [ + TaskTextContent(text=prompt), + *[TaskImageContent(image_url=TaskImageContentUrl(url=str(i)), role="reference_image") for i in image_urls] + ] + return await process_video_task( + request_model=Image2VideoTaskCreationRequest, + payload=Image2VideoTaskCreationRequest( + model=model, + content=x, + ), + auth_kwargs=auth_kwargs, + node_id=cls.hidden.unique_id, + estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))), + ) + + +async def process_video_task( + request_model: Type[T], + payload: Union[Text2VideoTaskCreationRequest, Image2VideoTaskCreationRequest], + auth_kwargs: dict, + node_id: str, + estimated_duration: int | None, +) -> comfy_io.NodeOutput: + initial_response = await SynchronousOperation( + endpoint=ApiEndpoint( + path=BYTEPLUS_TASK_ENDPOINT, + method=HttpMethod.POST, + request_model=request_model, + response_model=TaskCreationResponse, + ), + request=payload, + auth_kwargs=auth_kwargs, + ).execute() + response = await poll_until_finished( + auth_kwargs, + initial_response.id, + estimated_duration=estimated_duration, + node_id=node_id, + ) + return comfy_io.NodeOutput(await download_url_to_video_output(get_video_url_from_task_status(response))) + + +def raise_if_text_params(prompt: str, text_params: list[str]) -> None: + for i in text_params: + if f"--{i} " in prompt: + raise ValueError( + f"--{i} is not allowed in the prompt, use the appropriated widget input to change this value." + ) + + class ByteDanceExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: return [ ByteDanceImageNode, ByteDanceImageEditNode, + ByteDanceTextToVideoNode, + ByteDanceImageToVideoNode, + ByteDanceFirstLastFrameNode, + ByteDanceImageReferenceNode, ] async def comfy_entrypoint() -> ByteDanceExtension: From b288fb0db88281532d813d4fb83f715f88b54ffc Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 9 Sep 2025 15:09:56 -0700 Subject: [PATCH 0556/1073] Small refactor of some vae code. (#9787) --- comfy/ldm/modules/diffusionmodules/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 1fd12b35a..8f598a848 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -145,7 +145,7 @@ class Downsample(nn.Module): class ResnetBlock(nn.Module): def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, - dropout, temb_channels=512, conv_op=ops.Conv2d): + dropout=0.0, temb_channels=512, conv_op=ops.Conv2d): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels @@ -183,7 +183,7 @@ class ResnetBlock(nn.Module): stride=1, padding=0) - def forward(self, x, temb): + def forward(self, x, temb=None): h = x h = self.norm1(h) h = self.swish(h) From 206595f854c67538d5921d36326acbfeb69c5ac2 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Tue, 9 Sep 2025 18:33:36 -0700 Subject: [PATCH 0557/1073] Change validate_inputs' output typehint to 'bool | str' and update docstrings (#9786) --- comfy_api/latest/_io.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index e0ee943a7..f770109d5 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -1190,13 +1190,18 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal): raise NotImplementedError @classmethod - def validate_inputs(cls, **kwargs) -> bool: - """Optionally, define this function to validate inputs; equivalent to V1's VALIDATE_INPUTS.""" + def validate_inputs(cls, **kwargs) -> bool | str: + """Optionally, define this function to validate inputs; equivalent to V1's VALIDATE_INPUTS. + + If the function returns a string, it will be used as the validation error message for the node. + """ raise NotImplementedError @classmethod def fingerprint_inputs(cls, **kwargs) -> Any: - """Optionally, define this function to fingerprint inputs; equivalent to V1's IS_CHANGED.""" + """Optionally, define this function to fingerprint inputs; equivalent to V1's IS_CHANGED. + + If this function returns the same value as last run, the node will not be executed.""" raise NotImplementedError @classmethod From 5c33872e2f355e51adf212d5b5c83815b7fe77b0 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 9 Sep 2025 21:23:47 -0700 Subject: [PATCH 0558/1073] Fix issue on old torch. (#9791) --- comfy/ldm/hunyuan3dv2_1/hunyuandit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/hunyuan3dv2_1/hunyuandit.py b/comfy/ldm/hunyuan3dv2_1/hunyuandit.py index ca1a83001..d48d9d642 100644 --- a/comfy/ldm/hunyuan3dv2_1/hunyuandit.py +++ b/comfy/ldm/hunyuan3dv2_1/hunyuandit.py @@ -426,7 +426,7 @@ class HunYuanDiTBlock(nn.Module): text_states_dim=1024, qk_norm=False, norm_layer=nn.LayerNorm, - qk_norm_layer=nn.RMSNorm, + qk_norm_layer=True, qkv_bias=True, skip_connection=True, timested_modulate=False, From 85e34643f874aec2ab9eed6a8499f2aefa81486e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 9 Sep 2025 23:05:07 -0700 Subject: [PATCH 0559/1073] Support hunyuan image 2.1 regular model. (#9792) --- comfy/latent_formats.py | 5 + comfy/ldm/hunyuan_video/model.py | 102 +- comfy/ldm/hunyuan_video/vae.py | 136 ++ comfy/model_base.py | 24 + comfy/model_detection.py | 28 +- comfy/sd.py | 31 +- comfy/supported_models.py | 27 +- .../byt5_config_small_glyph.json | 22 + .../byt5_tokenizer/added_tokens.json | 127 ++ .../byt5_tokenizer/special_tokens_map.json | 150 +++ .../byt5_tokenizer/tokenizer_config.json | 1163 +++++++++++++++++ comfy/text_encoders/hunyuan_image.py | 100 ++ comfy_extras/nodes_hunyuan.py | 15 + nodes.py | 6 +- 14 files changed, 1906 insertions(+), 30 deletions(-) create mode 100644 comfy/ldm/hunyuan_video/vae.py create mode 100644 comfy/text_encoders/byt5_config_small_glyph.json create mode 100644 comfy/text_encoders/byt5_tokenizer/added_tokens.json create mode 100644 comfy/text_encoders/byt5_tokenizer/special_tokens_map.json create mode 100644 comfy/text_encoders/byt5_tokenizer/tokenizer_config.json create mode 100644 comfy/text_encoders/hunyuan_image.py diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index 0d84994b0..859ae8421 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -533,6 +533,11 @@ class Wan22(Wan21): 0.3971, 1.0600, 0.3943, 0.5537, 0.5444, 0.4089, 0.7468, 0.7744 ]).view(1, self.latent_channels, 1, 1, 1) +class HunyuanImage21(LatentFormat): + latent_channels = 64 + latent_dimensions = 2 + scale_factor = 0.75289 + class Hunyuan3Dv2(LatentFormat): latent_channels = 64 latent_dimensions = 1 diff --git a/comfy/ldm/hunyuan_video/model.py b/comfy/ldm/hunyuan_video/model.py index da1011596..ca289c5bd 100644 --- a/comfy/ldm/hunyuan_video/model.py +++ b/comfy/ldm/hunyuan_video/model.py @@ -40,6 +40,7 @@ class HunyuanVideoParams: patch_size: list qkv_bias: bool guidance_embed: bool + byt5: bool class SelfAttentionRef(nn.Module): @@ -161,6 +162,30 @@ class TokenRefiner(nn.Module): x = self.individual_token_refiner(x, c, mask) return x + +class ByT5Mapper(nn.Module): + def __init__(self, in_dim, out_dim, hidden_dim, out_dim1, use_res=False, dtype=None, device=None, operations=None): + super().__init__() + self.layernorm = operations.LayerNorm(in_dim, dtype=dtype, device=device) + self.fc1 = operations.Linear(in_dim, hidden_dim, dtype=dtype, device=device) + self.fc2 = operations.Linear(hidden_dim, out_dim, dtype=dtype, device=device) + self.fc3 = operations.Linear(out_dim, out_dim1, dtype=dtype, device=device) + self.use_res = use_res + self.act_fn = nn.GELU() + + def forward(self, x): + if self.use_res: + res = x + x = self.layernorm(x) + x = self.fc1(x) + x = self.act_fn(x) + x = self.fc2(x) + x2 = self.act_fn(x) + x2 = self.fc3(x2) + if self.use_res: + x2 = x2 + res + return x2 + class HunyuanVideo(nn.Module): """ Transformer model for flow matching on sequences. @@ -185,9 +210,13 @@ class HunyuanVideo(nn.Module): self.num_heads = params.num_heads self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim) - self.img_in = comfy.ldm.modules.diffusionmodules.mmdit.PatchEmbed(None, self.patch_size, self.in_channels, self.hidden_size, conv3d=True, dtype=dtype, device=device, operations=operations) + self.img_in = comfy.ldm.modules.diffusionmodules.mmdit.PatchEmbed(None, self.patch_size, self.in_channels, self.hidden_size, conv3d=len(self.patch_size) == 3, dtype=dtype, device=device, operations=operations) self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size, dtype=dtype, device=device, operations=operations) - self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size, dtype=dtype, device=device, operations=operations) + if params.vec_in_dim is not None: + self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size, dtype=dtype, device=device, operations=operations) + else: + self.vector_in = None + self.guidance_in = ( MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size, dtype=dtype, device=device, operations=operations) if params.guidance_embed else nn.Identity() ) @@ -215,6 +244,18 @@ class HunyuanVideo(nn.Module): ] ) + if params.byt5: + self.byt5_in = ByT5Mapper( + in_dim=1472, + out_dim=2048, + hidden_dim=2048, + out_dim1=self.hidden_size, + use_res=False, + dtype=dtype, device=device, operations=operations + ) + else: + self.byt5_in = None + if final_layer: self.final_layer = LastLayer(self.hidden_size, self.patch_size[-1], self.out_channels, dtype=dtype, device=device, operations=operations) @@ -226,7 +267,8 @@ class HunyuanVideo(nn.Module): txt_ids: Tensor, txt_mask: Tensor, timesteps: Tensor, - y: Tensor, + y: Tensor = None, + txt_byt5=None, guidance: Tensor = None, guiding_frame_index=None, ref_latent=None, @@ -250,13 +292,17 @@ class HunyuanVideo(nn.Module): if guiding_frame_index is not None: token_replace_vec = self.time_in(timestep_embedding(guiding_frame_index, 256, time_factor=1.0)) - vec_ = self.vector_in(y[:, :self.params.vec_in_dim]) - vec = torch.cat([(vec_ + token_replace_vec).unsqueeze(1), (vec_ + vec).unsqueeze(1)], dim=1) + if self.vector_in is not None: + vec_ = self.vector_in(y[:, :self.params.vec_in_dim]) + vec = torch.cat([(vec_ + token_replace_vec).unsqueeze(1), (vec_ + vec).unsqueeze(1)], dim=1) + else: + vec = torch.cat([(token_replace_vec).unsqueeze(1), (vec).unsqueeze(1)], dim=1) frame_tokens = (initial_shape[-1] // self.patch_size[-1]) * (initial_shape[-2] // self.patch_size[-2]) modulation_dims = [(0, frame_tokens, 0), (frame_tokens, None, 1)] modulation_dims_txt = [(0, None, 1)] else: - vec = vec + self.vector_in(y[:, :self.params.vec_in_dim]) + if self.vector_in is not None: + vec = vec + self.vector_in(y[:, :self.params.vec_in_dim]) modulation_dims = None modulation_dims_txt = None @@ -269,6 +315,12 @@ class HunyuanVideo(nn.Module): txt = self.txt_in(txt, timesteps, txt_mask) + if self.byt5_in is not None and txt_byt5 is not None: + txt_byt5 = self.byt5_in(txt_byt5) + txt_byt5_ids = torch.zeros((txt_ids.shape[0], txt_byt5.shape[1], txt_ids.shape[-1]), device=txt_ids.device, dtype=txt_ids.dtype) + txt = torch.cat((txt, txt_byt5), dim=1) + txt_ids = torch.cat((txt_ids, txt_byt5_ids), dim=1) + ids = torch.cat((img_ids, txt_ids), dim=1) pe = self.pe_embedder(ids) @@ -328,12 +380,16 @@ class HunyuanVideo(nn.Module): img = self.final_layer(img, vec, modulation_dims=modulation_dims) # (N, T, patch_size ** 2 * out_channels) - shape = initial_shape[-3:] + shape = initial_shape[-len(self.patch_size):] for i in range(len(shape)): shape[i] = shape[i] // self.patch_size[i] img = img.reshape([img.shape[0]] + shape + [self.out_channels] + self.patch_size) - img = img.permute(0, 4, 1, 5, 2, 6, 3, 7) - img = img.reshape(initial_shape[0], self.out_channels, initial_shape[2], initial_shape[3], initial_shape[4]) + if img.ndim == 8: + img = img.permute(0, 4, 1, 5, 2, 6, 3, 7) + img = img.reshape(initial_shape[0], self.out_channels, initial_shape[2], initial_shape[3], initial_shape[4]) + else: + img = img.permute(0, 3, 1, 4, 2, 5) + img = img.reshape(initial_shape[0], self.out_channels, initial_shape[2], initial_shape[3]) return img def img_ids(self, x): @@ -348,16 +404,30 @@ class HunyuanVideo(nn.Module): img_ids[:, :, :, 2] = img_ids[:, :, :, 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).reshape(1, 1, -1) return repeat(img_ids, "t h w c -> b (t h w) c", b=bs) - def forward(self, x, timestep, context, y, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, control=None, transformer_options={}, **kwargs): + def img_ids_2d(self, x): + bs, c, h, w = x.shape + patch_size = self.patch_size + h_len = ((h + (patch_size[0] // 2)) // patch_size[0]) + w_len = ((w + (patch_size[1] // 2)) // patch_size[1]) + img_ids = torch.zeros((h_len, w_len, 2), device=x.device, dtype=x.dtype) + img_ids[:, :, 0] = img_ids[:, :, 0] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) + img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) + return repeat(img_ids, "h w c -> b (h w) c", b=bs) + + def forward(self, x, timestep, context, y=None, txt_byt5=None, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, control=None, transformer_options={}, **kwargs): return comfy.patcher_extension.WrapperExecutor.new_class_executor( self._forward, self, comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) - ).execute(x, timestep, context, y, guidance, attention_mask, guiding_frame_index, ref_latent, control, transformer_options, **kwargs) + ).execute(x, timestep, context, y, txt_byt5, guidance, attention_mask, guiding_frame_index, ref_latent, control, transformer_options, **kwargs) - def _forward(self, x, timestep, context, y, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, control=None, transformer_options={}, **kwargs): - bs, c, t, h, w = x.shape - img_ids = self.img_ids(x) - txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype) - out = self.forward_orig(x, img_ids, context, txt_ids, attention_mask, timestep, y, guidance, guiding_frame_index, ref_latent, control=control, transformer_options=transformer_options) + def _forward(self, x, timestep, context, y=None, txt_byt5=None, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, control=None, transformer_options={}, **kwargs): + bs = x.shape[0] + if len(self.patch_size) == 3: + img_ids = self.img_ids(x) + txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype) + else: + img_ids = self.img_ids_2d(x) + txt_ids = torch.zeros((bs, context.shape[1], 2), device=x.device, dtype=x.dtype) + out = self.forward_orig(x, img_ids, context, txt_ids, attention_mask, timestep, y, txt_byt5, guidance, guiding_frame_index, ref_latent, control=control, transformer_options=transformer_options) return out diff --git a/comfy/ldm/hunyuan_video/vae.py b/comfy/ldm/hunyuan_video/vae.py new file mode 100644 index 000000000..8d406089b --- /dev/null +++ b/comfy/ldm/hunyuan_video/vae.py @@ -0,0 +1,136 @@ +import torch.nn as nn +import torch.nn.functional as F +from comfy.ldm.modules.diffusionmodules.model import ResnetBlock, AttnBlock +import comfy.ops +ops = comfy.ops.disable_weight_init + + +class PixelShuffle2D(nn.Module): + def __init__(self, in_dim, out_dim, op=ops.Conv2d): + super().__init__() + self.conv = op(in_dim, out_dim >> 2, 3, 1, 1) + self.ratio = (in_dim << 2) // out_dim + + def forward(self, x): + b, c, h, w = x.shape + h2, w2 = h >> 1, w >> 1 + y = self.conv(x).view(b, -1, h2, 2, w2, 2).permute(0, 3, 5, 1, 2, 4).reshape(b, -1, h2, w2) + r = x.view(b, c, h2, 2, w2, 2).permute(0, 3, 5, 1, 2, 4).reshape(b, c << 2, h2, w2) + return y + r.view(b, y.shape[1], self.ratio, h2, w2).mean(2) + + +class PixelUnshuffle2D(nn.Module): + def __init__(self, in_dim, out_dim, op=ops.Conv2d): + super().__init__() + self.conv = op(in_dim, out_dim << 2, 3, 1, 1) + self.scale = (out_dim << 2) // in_dim + + def forward(self, x): + b, c, h, w = x.shape + h2, w2 = h << 1, w << 1 + y = self.conv(x).view(b, 2, 2, -1, h, w).permute(0, 3, 4, 1, 5, 2).reshape(b, -1, h2, w2) + r = x.repeat_interleave(self.scale, 1).view(b, 2, 2, -1, h, w).permute(0, 3, 4, 1, 5, 2).reshape(b, -1, h2, w2) + return y + r + + +class Encoder(nn.Module): + def __init__(self, in_channels, z_channels, block_out_channels, num_res_blocks, + ffactor_spatial, downsample_match_channel=True, **_): + super().__init__() + self.z_channels = z_channels + self.block_out_channels = block_out_channels + self.num_res_blocks = num_res_blocks + self.conv_in = ops.Conv2d(in_channels, block_out_channels[0], 3, 1, 1) + + self.down = nn.ModuleList() + ch = block_out_channels[0] + depth = (ffactor_spatial >> 1).bit_length() + + for i, tgt in enumerate(block_out_channels): + stage = nn.Module() + stage.block = nn.ModuleList([ResnetBlock(in_channels=ch if j == 0 else tgt, + out_channels=tgt, + temb_channels=0, + conv_op=ops.Conv2d) + for j in range(num_res_blocks)]) + ch = tgt + if i < depth: + nxt = block_out_channels[i + 1] if i + 1 < len(block_out_channels) and downsample_match_channel else ch + stage.downsample = PixelShuffle2D(ch, nxt, ops.Conv2d) + ch = nxt + self.down.append(stage) + + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=ops.Conv2d) + self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv2d) + self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=ops.Conv2d) + + self.norm_out = nn.GroupNorm(32, ch, 1e-6, True) + self.conv_out = ops.Conv2d(ch, z_channels << 1, 3, 1, 1) + + def forward(self, x): + x = self.conv_in(x) + + for stage in self.down: + for blk in stage.block: + x = blk(x) + if hasattr(stage, 'downsample'): + x = stage.downsample(x) + + x = self.mid.block_2(self.mid.attn_1(self.mid.block_1(x))) + + b, c, h, w = x.shape + grp = c // (self.z_channels << 1) + skip = x.view(b, c // grp, grp, h, w).mean(2) + + return self.conv_out(F.silu(self.norm_out(x))) + skip + + +class Decoder(nn.Module): + def __init__(self, z_channels, out_channels, block_out_channels, num_res_blocks, + ffactor_spatial, upsample_match_channel=True, **_): + super().__init__() + block_out_channels = block_out_channels[::-1] + self.z_channels = z_channels + self.block_out_channels = block_out_channels + self.num_res_blocks = num_res_blocks + + ch = block_out_channels[0] + self.conv_in = ops.Conv2d(z_channels, ch, 3, 1, 1) + + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=ops.Conv2d) + self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv2d) + self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=ops.Conv2d) + + self.up = nn.ModuleList() + depth = (ffactor_spatial >> 1).bit_length() + + for i, tgt in enumerate(block_out_channels): + stage = nn.Module() + stage.block = nn.ModuleList([ResnetBlock(in_channels=ch if j == 0 else tgt, + out_channels=tgt, + temb_channels=0, + conv_op=ops.Conv2d) + for j in range(num_res_blocks + 1)]) + ch = tgt + if i < depth: + nxt = block_out_channels[i + 1] if i + 1 < len(block_out_channels) and upsample_match_channel else ch + stage.upsample = PixelUnshuffle2D(ch, nxt, ops.Conv2d) + ch = nxt + self.up.append(stage) + + self.norm_out = nn.GroupNorm(32, ch, 1e-6, True) + self.conv_out = ops.Conv2d(ch, out_channels, 3, 1, 1) + + def forward(self, z): + x = self.conv_in(z) + z.repeat_interleave(self.block_out_channels[0] // self.z_channels, 1) + x = self.mid.block_2(self.mid.attn_1(self.mid.block_1(x))) + + for stage in self.up: + for blk in stage.block: + x = blk(x) + if hasattr(stage, 'upsample'): + x = stage.upsample(x) + + return self.conv_out(F.silu(self.norm_out(x))) diff --git a/comfy/model_base.py b/comfy/model_base.py index 39a3344bc..993ff65e6 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1408,3 +1408,27 @@ class QwenImage(BaseModel): if ref_latents is not None: out['ref_latents'] = list([1, 16, sum(map(lambda a: math.prod(a.size()), ref_latents)) // 16]) return out + +class HunyuanImage21(BaseModel): + def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.hunyuan_video.model.HunyuanVideo) + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + attention_mask = kwargs.get("attention_mask", None) + if attention_mask is not None: + if torch.numel(attention_mask) != attention_mask.sum(): + out['attention_mask'] = comfy.conds.CONDRegular(attention_mask) + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + + conditioning_byt5small = kwargs.get("conditioning_byt5small", None) + if conditioning_byt5small is not None: + out['txt_byt5'] = comfy.conds.CONDRegular(conditioning_byt5small) + + guidance = kwargs.get("guidance", 6.0) + if guidance is not None: + out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance])) + + return out diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 75552ede9..dbcbe5f5a 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -136,20 +136,32 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): if '{}txt_in.individual_token_refiner.blocks.0.norm1.weight'.format(key_prefix) in state_dict_keys: #Hunyuan Video dit_config = {} + in_w = state_dict['{}img_in.proj.weight'.format(key_prefix)] + out_w = state_dict['{}final_layer.linear.weight'.format(key_prefix)] dit_config["image_model"] = "hunyuan_video" - dit_config["in_channels"] = state_dict['{}img_in.proj.weight'.format(key_prefix)].shape[1] #SkyReels img2video has 32 input channels - dit_config["patch_size"] = [1, 2, 2] - dit_config["out_channels"] = 16 - dit_config["vec_in_dim"] = 768 - dit_config["context_in_dim"] = 4096 - dit_config["hidden_size"] = 3072 + dit_config["in_channels"] = in_w.shape[1] #SkyReels img2video has 32 input channels + dit_config["patch_size"] = list(in_w.shape[2:]) + dit_config["out_channels"] = out_w.shape[0] // math.prod(dit_config["patch_size"]) + if '{}vector_in.in_layer.weight'.format(key_prefix) in state_dict: + dit_config["vec_in_dim"] = 768 + dit_config["axes_dim"] = [16, 56, 56] + else: + dit_config["vec_in_dim"] = None + dit_config["axes_dim"] = [64, 64] + + dit_config["context_in_dim"] = state_dict['{}txt_in.input_embedder.weight'.format(key_prefix)].shape[1] + dit_config["hidden_size"] = in_w.shape[0] dit_config["mlp_ratio"] = 4.0 - dit_config["num_heads"] = 24 + dit_config["num_heads"] = in_w.shape[0] // 128 dit_config["depth"] = count_blocks(state_dict_keys, '{}double_blocks.'.format(key_prefix) + '{}.') dit_config["depth_single_blocks"] = count_blocks(state_dict_keys, '{}single_blocks.'.format(key_prefix) + '{}.') - dit_config["axes_dim"] = [16, 56, 56] dit_config["theta"] = 256 dit_config["qkv_bias"] = True + if '{}byt5_in.fc1.weight'.format(key_prefix) in state_dict: + dit_config["byt5"] = True + else: + dit_config["byt5"] = False + guidance_keys = list(filter(lambda a: a.startswith("{}guidance_in.".format(key_prefix)), state_dict_keys)) dit_config["guidance_embed"] = len(guidance_keys) > 0 return dit_config diff --git a/comfy/sd.py b/comfy/sd.py index be5aa8dc8..9dd9a74d4 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -17,6 +17,7 @@ import comfy.ldm.wan.vae import comfy.ldm.wan.vae2_2 import comfy.ldm.hunyuan3d.vae import comfy.ldm.ace.vae.music_dcae_pipeline +import comfy.ldm.hunyuan_video.vae import yaml import math import os @@ -48,6 +49,7 @@ import comfy.text_encoders.hidream import comfy.text_encoders.ace import comfy.text_encoders.omnigen2 import comfy.text_encoders.qwen_image +import comfy.text_encoders.hunyuan_image import comfy.model_patcher import comfy.lora @@ -328,6 +330,19 @@ class VAE: self.first_stage_model = StageC_coder() self.downscale_ratio = 32 self.latent_channels = 16 + elif "decoder.conv_in.weight" in sd and sd['decoder.conv_in.weight'].shape[1] == 64: + ddconfig = {"block_out_channels": [128, 256, 512, 512, 1024, 1024], "in_channels": 3, "out_channels": 3, "num_res_blocks": 2, "ffactor_spatial": 32, "downsample_match_channel": True, "upsample_match_channel": True} + self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.weight"].shape[1] + self.downscale_ratio = 32 + self.upscale_ratio = 32 + self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] + self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer"}, + encoder_config={'target': "comfy.ldm.hunyuan_video.vae.Encoder", 'params': ddconfig}, + decoder_config={'target': "comfy.ldm.hunyuan_video.vae.Decoder", 'params': ddconfig}) + + self.memory_used_encode = lambda shape, dtype: (700 * shape[2] * shape[3]) * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: (700 * shape[2] * shape[3] * 32 * 32) * model_management.dtype_size(dtype) + elif "decoder.conv_in.weight" in sd: #default SD1.x/SD2.x VAE parameters ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} @@ -785,6 +800,7 @@ class CLIPType(Enum): ACE = 16 OMNIGEN2 = 17 QWEN_IMAGE = 18 + HUNYUAN_IMAGE = 19 def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}): @@ -806,6 +822,7 @@ class TEModel(Enum): GEMMA_2_2B = 9 QWEN25_3B = 10 QWEN25_7B = 11 + BYT5_SMALL_GLYPH = 12 def detect_te_model(sd): if "text_model.encoder.layers.30.mlp.fc1.weight" in sd: @@ -823,6 +840,9 @@ def detect_te_model(sd): if 'encoder.block.23.layer.1.DenseReluDense.wi.weight' in sd: return TEModel.T5_XXL_OLD if "encoder.block.0.layer.0.SelfAttention.k.weight" in sd: + weight = sd['encoder.block.0.layer.0.SelfAttention.k.weight'] + if weight.shape[0] == 384: + return TEModel.BYT5_SMALL_GLYPH return TEModel.T5_BASE if 'model.layers.0.post_feedforward_layernorm.weight' in sd: return TEModel.GEMMA_2_2B @@ -937,8 +957,12 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.omnigen2.te(**llama_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.omnigen2.Omnigen2Tokenizer elif te_model == TEModel.QWEN25_7B: - clip_target.clip = comfy.text_encoders.qwen_image.te(**llama_detect(clip_data)) - clip_target.tokenizer = comfy.text_encoders.qwen_image.QwenImageTokenizer + if clip_type == CLIPType.HUNYUAN_IMAGE: + clip_target.clip = comfy.text_encoders.hunyuan_image.te(byt5=False, **llama_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.hunyuan_image.HunyuanImageTokenizer + else: + clip_target.clip = comfy.text_encoders.qwen_image.te(**llama_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.qwen_image.QwenImageTokenizer else: # clip_l if clip_type == CLIPType.SD3: @@ -982,6 +1006,9 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.hidream.hidream_clip(clip_l=clip_l, clip_g=clip_g, t5=t5, llama=llama, **t5_kwargs, **llama_kwargs) clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer + elif clip_type == CLIPType.HUNYUAN_IMAGE: + clip_target.clip = comfy.text_encoders.hunyuan_image.te(**llama_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.hunyuan_image.HunyuanImageTokenizer else: clip_target.clip = sdxl_clip.SDXLClipModel clip_target.tokenizer = sdxl_clip.SDXLTokenizer diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 75dad277d..aa953b462 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -20,6 +20,7 @@ import comfy.text_encoders.wan import comfy.text_encoders.ace import comfy.text_encoders.omnigen2 import comfy.text_encoders.qwen_image +import comfy.text_encoders.hunyuan_image from . import supported_models_base from . import latent_formats @@ -1295,7 +1296,31 @@ class QwenImage(supported_models_base.BASE): hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_7b.transformer.".format(pref)) return supported_models_base.ClipTarget(comfy.text_encoders.qwen_image.QwenImageTokenizer, comfy.text_encoders.qwen_image.te(**hunyuan_detect)) +class HunyuanImage21(HunyuanVideo): + unet_config = { + "image_model": "hunyuan_video", + "vec_in_dim": None, + } -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ACEStep, Omnigen2, QwenImage] + sampling_settings = { + "shift": 5.0, + } + + latent_format = latent_formats.HunyuanImage21 + + memory_usage_factor = 7.7 + + supported_inference_dtypes = [torch.bfloat16, torch.float32] + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.HunyuanImage21(self, device=device) + return out + + def clip_target(self, state_dict={}): + pref = self.text_encoder_key_prefix[0] + hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_7b.transformer.".format(pref)) + return supported_models_base.ClipTarget(comfy.text_encoders.hunyuan_image.HunyuanImageTokenizer, comfy.text_encoders.hunyuan_image.te(**hunyuan_detect)) + +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ACEStep, Omnigen2, QwenImage] models += [SVD_img2vid] diff --git a/comfy/text_encoders/byt5_config_small_glyph.json b/comfy/text_encoders/byt5_config_small_glyph.json new file mode 100644 index 000000000..0239c7164 --- /dev/null +++ b/comfy/text_encoders/byt5_config_small_glyph.json @@ -0,0 +1,22 @@ +{ + "d_ff": 3584, + "d_kv": 64, + "d_model": 1472, + "decoder_start_token_id": 0, + "dropout_rate": 0.1, + "eos_token_id": 1, + "dense_act_fn": "gelu_pytorch_tanh", + "initializer_factor": 1.0, + "is_encoder_decoder": true, + "is_gated_act": true, + "layer_norm_epsilon": 1e-06, + "model_type": "t5", + "num_decoder_layers": 4, + "num_heads": 6, + "num_layers": 12, + "output_past": true, + "pad_token_id": 0, + "relative_attention_num_buckets": 32, + "tie_word_embeddings": false, + "vocab_size": 1510 +} diff --git a/comfy/text_encoders/byt5_tokenizer/added_tokens.json b/comfy/text_encoders/byt5_tokenizer/added_tokens.json new file mode 100644 index 000000000..93c190b56 --- /dev/null +++ b/comfy/text_encoders/byt5_tokenizer/added_tokens.json @@ -0,0 +1,127 @@ +{ + "": 259, + "": 359, + "": 360, + "": 361, + "": 362, + "": 363, + "": 364, + "": 365, + "": 366, + "": 367, + "": 368, + "": 269, + "": 369, + "": 370, + "": 371, + "": 372, + "": 373, + "": 374, + "": 375, + "": 376, + "": 377, + "": 378, + "": 270, + "": 379, + "": 380, + "": 381, + "": 382, + "": 383, + "": 271, + "": 272, + "": 273, + "": 274, + "": 275, + "": 276, + "": 277, + "": 278, + "": 260, + "": 279, + "": 280, + "": 281, + "": 282, + "": 283, + "": 284, + "": 285, + "": 286, + "": 287, + "": 288, + "": 261, + "": 289, + "": 290, + "": 291, + "": 292, + "": 293, + "": 294, + "": 295, + "": 296, + "": 297, + "": 298, + "": 262, + "": 299, + "": 300, + "": 301, + "": 302, + "": 303, + "": 304, + "": 305, + "": 306, + "": 307, + "": 308, + "": 263, + "": 309, + "": 310, + "": 311, + "": 312, + "": 313, + "": 314, + "": 315, + "": 316, + "": 317, + "": 318, + "": 264, + "": 319, + "": 320, + "": 321, + "": 322, + "": 323, + "": 324, + "": 325, + "": 326, + "": 327, + "": 328, + "": 265, + "": 329, + "": 330, + "": 331, + "": 332, + "": 333, + "": 334, + "": 335, + "": 336, + "": 337, + "": 338, + "": 266, + "": 339, + "": 340, + "": 341, + "": 342, + "": 343, + "": 344, + "": 345, + "": 346, + "": 347, + "": 348, + "": 267, + "": 349, + "": 350, + "": 351, + "": 352, + "": 353, + "": 354, + "": 355, + "": 356, + "": 357, + "": 358, + "": 268 +} diff --git a/comfy/text_encoders/byt5_tokenizer/special_tokens_map.json b/comfy/text_encoders/byt5_tokenizer/special_tokens_map.json new file mode 100644 index 000000000..04fd58b5f --- /dev/null +++ b/comfy/text_encoders/byt5_tokenizer/special_tokens_map.json @@ -0,0 +1,150 @@ +{ + "additional_special_tokens": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "eos_token": { + "content": "
", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "unk_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + } +} diff --git a/comfy/text_encoders/byt5_tokenizer/tokenizer_config.json b/comfy/text_encoders/byt5_tokenizer/tokenizer_config.json new file mode 100644 index 000000000..5b1fe24c1 --- /dev/null +++ b/comfy/text_encoders/byt5_tokenizer/tokenizer_config.json @@ -0,0 +1,1163 @@ +{ + "added_tokens_decoder": { + "0": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "
", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false, + "special": true + }, + "259": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "260": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "261": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "262": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "263": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "264": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "265": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "266": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "267": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "268": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "269": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "270": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "271": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "272": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "273": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "274": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "275": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "276": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "277": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "278": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "279": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "280": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "281": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "282": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "283": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "284": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "285": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "286": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "287": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "288": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "289": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "290": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "291": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "292": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "293": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "294": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "295": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "296": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "297": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "298": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "299": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "300": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "301": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "302": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "303": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "304": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "305": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "306": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "307": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "308": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "309": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "310": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "311": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "312": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "313": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "314": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "315": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "316": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "317": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "318": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "319": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "320": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "321": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "322": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "323": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "324": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "325": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "326": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "327": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "328": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "329": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "330": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "331": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "332": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "333": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "334": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "335": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "336": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "337": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "338": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "339": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "340": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "341": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "342": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "343": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "344": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "345": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "346": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "347": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "348": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "349": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "350": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "351": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "352": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "353": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "354": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "355": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "356": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "357": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "358": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "359": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "360": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "361": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "362": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "363": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "364": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "365": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "366": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "367": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "368": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "369": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "370": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "371": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "372": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "373": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "374": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "375": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "376": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "377": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "378": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "379": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "380": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "381": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "382": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "383": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "clean_up_tokenization_spaces": false, + "eos_token": "", + "extra_ids": 0, + "extra_special_tokens": {}, + "model_max_length": 1000000000000000019884624838656, + "pad_token": "", + "tokenizer_class": "ByT5Tokenizer", + "unk_token": "" +} diff --git a/comfy/text_encoders/hunyuan_image.py b/comfy/text_encoders/hunyuan_image.py new file mode 100644 index 000000000..be396cae7 --- /dev/null +++ b/comfy/text_encoders/hunyuan_image.py @@ -0,0 +1,100 @@ +from comfy import sd1_clip +import comfy.text_encoders.llama +from .qwen_image import QwenImageTokenizer, QwenImageTEModel +from transformers import ByT5Tokenizer +import os +import re + +class ByT5SmallTokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "byt5_tokenizer") + super().__init__(tokenizer_path, pad_with_end=False, embedding_size=1472, embedding_key='byt5_small', tokenizer_class=ByT5Tokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_data=tokenizer_data) + +class HunyuanImageTokenizer(QwenImageTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + self.llama_template = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>" + # self.llama_template_images = "{}" + self.byt5 = ByT5SmallTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + + def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): + out = super().tokenize_with_weights(text, return_word_ids, **kwargs) + + # ByT5 processing for HunyuanImage + text_prompt_texts = [] + pattern_quote_single = r'\'(.*?)\'' + pattern_quote_double = r'\"(.*?)\"' + pattern_quote_chinese_single = r'‘(.*?)’' + pattern_quote_chinese_double = r'“(.*?)”' + + matches_quote_single = re.findall(pattern_quote_single, text) + matches_quote_double = re.findall(pattern_quote_double, text) + matches_quote_chinese_single = re.findall(pattern_quote_chinese_single, text) + matches_quote_chinese_double = re.findall(pattern_quote_chinese_double, text) + + text_prompt_texts.extend(matches_quote_single) + text_prompt_texts.extend(matches_quote_double) + text_prompt_texts.extend(matches_quote_chinese_single) + text_prompt_texts.extend(matches_quote_chinese_double) + + if len(text_prompt_texts) > 0: + out['byt5'] = self.byt5.tokenize_with_weights(''.join(map(lambda a: 'Text "{}". '.format(a), text_prompt_texts)), return_word_ids, **kwargs) + return out + +class Qwen25_7BVLIModel(sd1_clip.SDClipModel): + def __init__(self, device="cpu", layer="hidden", layer_idx=-3, dtype=None, attention_mask=True, model_options={}): + llama_scaled_fp8 = model_options.get("qwen_scaled_fp8", None) + if llama_scaled_fp8 is not None: + model_options = model_options.copy() + model_options["scaled_fp8"] = llama_scaled_fp8 + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen25_7BVLI, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) + + +class ByT5SmallModel(sd1_clip.SDClipModel): + def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, model_options={}): + textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "byt5_config_small_glyph.json") + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, model_options=model_options, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=True, zero_out_masked=True) + + +class HunyuanImageTEModel(QwenImageTEModel): + def __init__(self, byt5=True, device="cpu", dtype=None, model_options={}): + super(QwenImageTEModel, self).__init__(device=device, dtype=dtype, name="qwen25_7b", clip_model=Qwen25_7BVLIModel, model_options=model_options) + + if byt5: + self.byt5_small = ByT5SmallModel(device=device, dtype=dtype, model_options=model_options) + else: + self.byt5_small = None + + def encode_token_weights(self, token_weight_pairs): + cond, p, extra = super().encode_token_weights(token_weight_pairs) + if self.byt5_small is not None and "byt5" in token_weight_pairs: + out = self.byt5_small.encode_token_weights(token_weight_pairs["byt5"]) + extra["conditioning_byt5small"] = out[0] + return cond, p, extra + + def set_clip_options(self, options): + super().set_clip_options(options) + if self.byt5_small is not None: + self.byt5_small.set_clip_options(options) + + def reset_clip_options(self): + super().reset_clip_options() + if self.byt5_small is not None: + self.byt5_small.reset_clip_options() + + def load_sd(self, sd): + if "encoder.block.0.layer.0.SelfAttention.o.weight" in sd: + return self.byt5_small.load_sd(sd) + else: + return super().load_sd(sd) + +def te(byt5=True, dtype_llama=None, llama_scaled_fp8=None): + class QwenImageTEModel_(HunyuanImageTEModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: + model_options = model_options.copy() + model_options["qwen_scaled_fp8"] = llama_scaled_fp8 + if dtype_llama is not None: + dtype = dtype_llama + super().__init__(byt5=byt5, device=device, dtype=dtype, model_options=model_options) + return QwenImageTEModel_ diff --git a/comfy_extras/nodes_hunyuan.py b/comfy_extras/nodes_hunyuan.py index d7278e7a7..ce031ceb2 100644 --- a/comfy_extras/nodes_hunyuan.py +++ b/comfy_extras/nodes_hunyuan.py @@ -113,6 +113,20 @@ class HunyuanImageToVideo: out_latent["samples"] = latent return (positive, out_latent) +class EmptyHunyuanImageLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": { "width": ("INT", {"default": 2048, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}), + "height": ("INT", {"default": 2048, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}} + RETURN_TYPES = ("LATENT",) + FUNCTION = "generate" + + CATEGORY = "latent" + + def generate(self, width, height, batch_size=1): + latent = torch.zeros([batch_size, 64, height // 32, width // 32], device=comfy.model_management.intermediate_device()) + return ({"samples":latent}, ) NODE_CLASS_MAPPINGS = { @@ -120,4 +134,5 @@ NODE_CLASS_MAPPINGS = { "TextEncodeHunyuanVideo_ImageToVideo": TextEncodeHunyuanVideo_ImageToVideo, "EmptyHunyuanLatentVideo": EmptyHunyuanLatentVideo, "HunyuanImageToVideo": HunyuanImageToVideo, + "EmptyHunyuanImageLatent": EmptyHunyuanImageLatent, } diff --git a/nodes.py b/nodes.py index 6c2f9dd14..2befb4b75 100644 --- a/nodes.py +++ b/nodes.py @@ -925,7 +925,7 @@ class CLIPLoader: @classmethod def INPUT_TYPES(s): return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image"], ), + "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), @@ -953,7 +953,7 @@ class DualCLIPLoader: def INPUT_TYPES(s): return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ), "clip_name2": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["sdxl", "sd3", "flux", "hunyuan_video", "hidream"], ), + "type": (["sdxl", "sd3", "flux", "hunyuan_video", "hidream", "hunyuan_image"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), @@ -963,7 +963,7 @@ class DualCLIPLoader: CATEGORY = "advanced/loaders" - DESCRIPTION = "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5\nhidream: at least one of t5 or llama, recommended t5 and llama" + DESCRIPTION = "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5\nhidream: at least one of t5 or llama, recommended t5 and llama\nhunyuan_image: qwen2.5vl 7b and byt5 small" def load_clip(self, clip_name1, clip_name2, type, device="default"): clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) From 70fc0425b36515926c6414aee9f2269b27880cc2 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 10 Sep 2025 14:09:16 +0800 Subject: [PATCH 0560/1073] Update template to 0.1.76 (#9793) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3008a5dc3..ea1931d78 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.25.11 -comfyui-workflow-templates==0.1.75 +comfyui-workflow-templates==0.1.76 comfyui-embedded-docs==0.2.6 torch torchsde From 543888d3d84a6ec4c4273838d5179845840e3226 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 9 Sep 2025 23:15:34 -0700 Subject: [PATCH 0561/1073] Fix lowvram issue with hunyuan image vae. (#9794) --- comfy/ldm/hunyuan_video/vae.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/hunyuan_video/vae.py b/comfy/ldm/hunyuan_video/vae.py index 8d406089b..40c12b183 100644 --- a/comfy/ldm/hunyuan_video/vae.py +++ b/comfy/ldm/hunyuan_video/vae.py @@ -65,7 +65,7 @@ class Encoder(nn.Module): self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv2d) self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=ops.Conv2d) - self.norm_out = nn.GroupNorm(32, ch, 1e-6, True) + self.norm_out = ops.GroupNorm(32, ch, 1e-6, True) self.conv_out = ops.Conv2d(ch, z_channels << 1, 3, 1, 1) def forward(self, x): @@ -120,7 +120,7 @@ class Decoder(nn.Module): ch = nxt self.up.append(stage) - self.norm_out = nn.GroupNorm(32, ch, 1e-6, True) + self.norm_out = ops.GroupNorm(32, ch, 1e-6, True) self.conv_out = ops.Conv2d(ch, out_channels, 3, 1, 1) def forward(self, z): From de44b95db6c7ef107f26e7edf30748b608afaa48 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 10 Sep 2025 12:06:47 +0300 Subject: [PATCH 0562/1073] add StabilityAudio API nodes (#9749) --- comfy_api_nodes/apinode_utils.py | 65 +++++ comfy_api_nodes/apis/stability_api.py | 22 ++ comfy_api_nodes/nodes_stability.py | 312 ++++++++++++++++++++++- comfy_api_nodes/util/validation_utils.py | 20 +- 4 files changed, 415 insertions(+), 4 deletions(-) diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index f953f86df..37438f835 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -518,6 +518,71 @@ async def upload_audio_to_comfyapi( return await upload_file_to_comfyapi(audio_bytes_io, filename, mime_type, auth_kwargs) +def f32_pcm(wav: torch.Tensor) -> torch.Tensor: + """Convert audio to float 32 bits PCM format. Copy-paste from nodes_audio.py file.""" + if wav.dtype.is_floating_point: + return wav + elif wav.dtype == torch.int16: + return wav.float() / (2 ** 15) + elif wav.dtype == torch.int32: + return wav.float() / (2 ** 31) + raise ValueError(f"Unsupported wav dtype: {wav.dtype}") + + +def audio_bytes_to_audio_input(audio_bytes: bytes,) -> dict: + """ + Decode any common audio container from bytes using PyAV and return + a Comfy AUDIO dict: {"waveform": [1, C, T] float32, "sample_rate": int}. + """ + with av.open(io.BytesIO(audio_bytes)) as af: + if not af.streams.audio: + raise ValueError("No audio stream found in response.") + stream = af.streams.audio[0] + + in_sr = int(stream.codec_context.sample_rate) + out_sr = in_sr + + frames: list[torch.Tensor] = [] + n_channels = stream.channels or 1 + + for frame in af.decode(streams=stream.index): + arr = frame.to_ndarray() # shape can be [C, T] or [T, C] or [T] + buf = torch.from_numpy(arr) + if buf.ndim == 1: + buf = buf.unsqueeze(0) # [T] -> [1, T] + elif buf.shape[0] != n_channels and buf.shape[-1] == n_channels: + buf = buf.transpose(0, 1).contiguous() # [T, C] -> [C, T] + elif buf.shape[0] != n_channels: + buf = buf.reshape(-1, n_channels).t().contiguous() # fallback to [C, T] + frames.append(buf) + + if not frames: + raise ValueError("Decoded zero audio frames.") + + wav = torch.cat(frames, dim=1) # [C, T] + wav = f32_pcm(wav) + return {"waveform": wav.unsqueeze(0).contiguous(), "sample_rate": out_sr} + + +def audio_input_to_mp3(audio: AudioInput) -> io.BytesIO: + waveform = audio["waveform"].cpu() + + output_buffer = io.BytesIO() + output_container = av.open(output_buffer, mode='w', format="mp3") + + out_stream = output_container.add_stream("libmp3lame", rate=audio["sample_rate"]) + out_stream.bit_rate = 320000 + + frame = av.AudioFrame.from_ndarray(waveform.movedim(0, 1).reshape(1, -1).float().numpy(), format='flt', layout='mono' if waveform.shape[0] == 1 else 'stereo') + frame.sample_rate = audio["sample_rate"] + frame.pts = 0 + output_container.mux(out_stream.encode(frame)) + output_container.mux(out_stream.encode(None)) + output_container.close() + output_buffer.seek(0) + return output_buffer + + def audio_to_base64_string( audio: AudioInput, container_format: str = "mp4", codec_name: str = "aac" ) -> str: diff --git a/comfy_api_nodes/apis/stability_api.py b/comfy_api_nodes/apis/stability_api.py index 47c87daec..718360187 100644 --- a/comfy_api_nodes/apis/stability_api.py +++ b/comfy_api_nodes/apis/stability_api.py @@ -125,3 +125,25 @@ class StabilityResultsGetResponse(BaseModel): class StabilityAsyncResponse(BaseModel): id: Optional[str] = Field(None) + + +class StabilityTextToAudioRequest(BaseModel): + model: str = Field(...) + prompt: str = Field(...) + duration: int = Field(190, ge=1, le=190) + seed: int = Field(0, ge=0, le=4294967294) + steps: int = Field(8, ge=4, le=8) + output_format: str = Field("wav") + + +class StabilityAudioToAudioRequest(StabilityTextToAudioRequest): + strength: float = Field(0.01, ge=0.01, le=1.0) + + +class StabilityAudioInpaintRequest(StabilityTextToAudioRequest): + mask_start: int = Field(30, ge=0, le=190) + mask_end: int = Field(190, ge=0, le=190) + + +class StabilityAudioResponse(BaseModel): + audio: Optional[str] = Field(None) diff --git a/comfy_api_nodes/nodes_stability.py b/comfy_api_nodes/nodes_stability.py index e05cb6bb2..5ba5ed986 100644 --- a/comfy_api_nodes/nodes_stability.py +++ b/comfy_api_nodes/nodes_stability.py @@ -2,7 +2,7 @@ from inspect import cleandoc from typing import Optional from typing_extensions import override -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, Input, io as comfy_io from comfy_api_nodes.apis.stability_api import ( StabilityUpscaleConservativeRequest, StabilityUpscaleCreativeRequest, @@ -15,6 +15,10 @@ from comfy_api_nodes.apis.stability_api import ( Stability_SD3_5_Model, Stability_SD3_5_GenerationMode, get_stability_style_presets, + StabilityTextToAudioRequest, + StabilityAudioToAudioRequest, + StabilityAudioInpaintRequest, + StabilityAudioResponse, ) from comfy_api_nodes.apis.client import ( ApiEndpoint, @@ -27,7 +31,10 @@ from comfy_api_nodes.apinode_utils import ( bytesio_to_image_tensor, tensor_to_bytesio, validate_string, + audio_bytes_to_audio_input, + audio_input_to_mp3, ) +from comfy_api_nodes.util.validation_utils import validate_audio_duration import torch import base64 @@ -649,6 +656,306 @@ class StabilityUpscaleFastNode(comfy_io.ComfyNode): return comfy_io.NodeOutput(returned_image) +class StabilityTextToAudio(comfy_io.ComfyNode): + """Generates high-quality music and sound effects from text descriptions.""" + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="StabilityTextToAudio", + display_name="Stability AI Text To Audio", + category="api node/audio/Stability AI", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Combo.Input( + "model", + options=["stable-audio-2.5"], + ), + comfy_io.String.Input("prompt", multiline=True, default=""), + comfy_io.Int.Input( + "duration", + default=190, + min=1, + max=190, + step=1, + tooltip="Controls the duration in seconds of the generated audio.", + optional=True, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=4294967294, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="The random seed used for generation.", + optional=True, + ), + comfy_io.Int.Input( + "steps", + default=8, + min=4, + max=8, + step=1, + tooltip="Controls the number of sampling steps.", + optional=True, + ), + ], + outputs=[ + comfy_io.Audio.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute(cls, model: str, prompt: str, duration: int, seed: int, steps: int) -> comfy_io.NodeOutput: + validate_string(prompt, max_length=10000) + payload = StabilityTextToAudioRequest(prompt=prompt, model=model, duration=duration, seed=seed, steps=steps) + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/stability/v2beta/audio/stable-audio-2/text-to-audio", + method=HttpMethod.POST, + request_model=StabilityTextToAudioRequest, + response_model=StabilityAudioResponse, + ), + request=payload, + content_type="multipart/form-data", + auth_kwargs= { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + ) + response_api = await operation.execute() + if not response_api.audio: + raise ValueError("No audio file was received in response.") + return comfy_io.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) + + +class StabilityAudioToAudio(comfy_io.ComfyNode): + """Transforms existing audio samples into new high-quality compositions using text instructions.""" + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="StabilityAudioToAudio", + display_name="Stability AI Audio To Audio", + category="api node/audio/Stability AI", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Combo.Input( + "model", + options=["stable-audio-2.5"], + ), + comfy_io.String.Input("prompt", multiline=True, default=""), + comfy_io.Audio.Input("audio", tooltip="Audio must be between 6 and 190 seconds long."), + comfy_io.Int.Input( + "duration", + default=190, + min=1, + max=190, + step=1, + tooltip="Controls the duration in seconds of the generated audio.", + optional=True, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=4294967294, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="The random seed used for generation.", + optional=True, + ), + comfy_io.Int.Input( + "steps", + default=8, + min=4, + max=8, + step=1, + tooltip="Controls the number of sampling steps.", + optional=True, + ), + comfy_io.Float.Input( + "strength", + default=1, + min=0.01, + max=1.0, + step=0.01, + display_mode=comfy_io.NumberDisplay.slider, + tooltip="Parameter controls how much influence the audio parameter has on the generated audio.", + optional=True, + ), + ], + outputs=[ + comfy_io.Audio.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, model: str, prompt: str, audio: Input.Audio, duration: int, seed: int, steps: int, strength: float + ) -> comfy_io.NodeOutput: + validate_string(prompt, max_length=10000) + validate_audio_duration(audio, 6, 190) + payload = StabilityAudioToAudioRequest( + prompt=prompt, model=model, duration=duration, seed=seed, steps=steps, strength=strength + ) + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/stability/v2beta/audio/stable-audio-2/audio-to-audio", + method=HttpMethod.POST, + request_model=StabilityAudioToAudioRequest, + response_model=StabilityAudioResponse, + ), + request=payload, + content_type="multipart/form-data", + files={"audio": audio_input_to_mp3(audio)}, + auth_kwargs= { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + ) + response_api = await operation.execute() + if not response_api.audio: + raise ValueError("No audio file was received in response.") + return comfy_io.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) + + +class StabilityAudioInpaint(comfy_io.ComfyNode): + """Transforms part of existing audio sample using text instructions.""" + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="StabilityAudioInpaint", + display_name="Stability AI Audio Inpaint", + category="api node/audio/Stability AI", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Combo.Input( + "model", + options=["stable-audio-2.5"], + ), + comfy_io.String.Input("prompt", multiline=True, default=""), + comfy_io.Audio.Input("audio", tooltip="Audio must be between 6 and 190 seconds long."), + comfy_io.Int.Input( + "duration", + default=190, + min=1, + max=190, + step=1, + tooltip="Controls the duration in seconds of the generated audio.", + optional=True, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=4294967294, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="The random seed used for generation.", + optional=True, + ), + comfy_io.Int.Input( + "steps", + default=8, + min=4, + max=8, + step=1, + tooltip="Controls the number of sampling steps.", + optional=True, + ), + comfy_io.Int.Input( + "mask_start", + default=30, + min=0, + max=190, + step=1, + optional=True, + ), + comfy_io.Int.Input( + "mask_end", + default=190, + min=0, + max=190, + step=1, + optional=True, + ), + ], + outputs=[ + comfy_io.Audio.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + prompt: str, + audio: Input.Audio, + duration: int, + seed: int, + steps: int, + mask_start: int, + mask_end: int, + ) -> comfy_io.NodeOutput: + validate_string(prompt, max_length=10000) + if mask_end <= mask_start: + raise ValueError(f"Value of mask_end({mask_end}) should be greater then mask_start({mask_start})") + validate_audio_duration(audio, 6, 190) + + payload = StabilityAudioInpaintRequest( + prompt=prompt, + model=model, + duration=duration, + seed=seed, + steps=steps, + mask_start=mask_start, + mask_end=mask_end, + ) + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/stability/v2beta/audio/stable-audio-2/inpaint", + method=HttpMethod.POST, + request_model=StabilityAudioInpaintRequest, + response_model=StabilityAudioResponse, + ), + request=payload, + content_type="multipart/form-data", + files={"audio": audio_input_to_mp3(audio)}, + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + ) + response_api = await operation.execute() + if not response_api.audio: + raise ValueError("No audio file was received in response.") + return comfy_io.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) + + class StabilityExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: @@ -658,6 +965,9 @@ class StabilityExtension(ComfyExtension): StabilityUpscaleConservativeNode, StabilityUpscaleCreativeNode, StabilityUpscaleFastNode, + StabilityTextToAudio, + StabilityAudioToAudio, + StabilityAudioInpaint, ] diff --git a/comfy_api_nodes/util/validation_utils.py b/comfy_api_nodes/util/validation_utils.py index 606b794bf..ca913e9b3 100644 --- a/comfy_api_nodes/util/validation_utils.py +++ b/comfy_api_nodes/util/validation_utils.py @@ -2,7 +2,7 @@ import logging from typing import Optional import torch -from comfy_api.input.video_types import VideoInput +from comfy_api.latest import Input def get_image_dimensions(image: torch.Tensor) -> tuple[int, int]: @@ -101,7 +101,7 @@ def validate_aspect_ratio_closeness( def validate_video_dimensions( - video: VideoInput, + video: Input.Video, min_width: Optional[int] = None, max_width: Optional[int] = None, min_height: Optional[int] = None, @@ -126,7 +126,7 @@ def validate_video_dimensions( def validate_video_duration( - video: VideoInput, + video: Input.Video, min_duration: Optional[float] = None, max_duration: Optional[float] = None, ): @@ -151,3 +151,17 @@ def get_number_of_images(images): if isinstance(images, torch.Tensor): return images.shape[0] if images.ndim >= 4 else 1 return len(images) + + +def validate_audio_duration( + audio: Input.Audio, + min_duration: Optional[float] = None, + max_duration: Optional[float] = None, +) -> None: + sr = int(audio["sample_rate"]) + dur = int(audio["waveform"].shape[-1]) / sr + eps = 1.0 / sr + if min_duration is not None and dur + eps < min_duration: + raise ValueError(f"Audio duration must be at least {min_duration}s, got {dur + eps:.2f}s") + if max_duration is not None and dur - eps > max_duration: + raise ValueError(f"Audio duration must be at most {max_duration}s, got {dur - eps:.2f}s") From 8d7c930246bd33c32eb957b01ab0d364af6e81c0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 10 Sep 2025 10:51:02 -0400 Subject: [PATCH 0563/1073] ComfyUI version v0.3.58 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 4cc3c8647..37361bd75 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.57" +__version__ = "0.3.58" diff --git a/pyproject.toml b/pyproject.toml index d75cd04a2..f02ab9126 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.57" +version = "0.3.58" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 9b0553809cbac084aac0576892aca3e448eb07c7 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 11 Sep 2025 00:13:18 +0300 Subject: [PATCH 0564/1073] add new ByteDanceSeedream (4.0) node (#9802) --- comfy_api_nodes/nodes_bytedance.py | 208 ++++++++++++++++++++++++++++- 1 file changed, 207 insertions(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_bytedance.py b/comfy_api_nodes/nodes_bytedance.py index 064df2d10..369a3a4fe 100644 --- a/comfy_api_nodes/nodes_bytedance.py +++ b/comfy_api_nodes/nodes_bytedance.py @@ -77,6 +77,22 @@ class Image2ImageTaskCreationRequest(BaseModel): watermark: Optional[bool] = Field(True) +class Seedream4Options(BaseModel): + max_images: int = Field(15) + + +class Seedream4TaskCreationRequest(BaseModel): + model: str = Field("seedream-4-0-250828") + prompt: str = Field(...) + response_format: str = Field("url") + image: Optional[list[str]] = Field(None, description="Image URLs") + size: str = Field(...) + seed: int = Field(..., ge=0, le=2147483647) + sequential_image_generation: str = Field("disabled") + sequential_image_generation_options: Seedream4Options = Field(Seedream4Options(max_images=15)) + watermark: bool = Field(True) + + class ImageTaskCreationResponse(BaseModel): model: str = Field(...) created: int = Field(..., description="Unix timestamp (in seconds) indicating time when the request was created.") @@ -143,6 +159,19 @@ RECOMMENDED_PRESETS = [ ("Custom", None, None), ] +RECOMMENDED_PRESETS_SEEDREAM_4 = [ + ("2048x2048 (1:1)", 2048, 2048), + ("2304x1728 (4:3)", 2304, 1728), + ("1728x2304 (3:4)", 1728, 2304), + ("2560x1440 (16:9)", 2560, 1440), + ("1440x2560 (9:16)", 1440, 2560), + ("2496x1664 (3:2)", 2496, 1664), + ("1664x2496 (2:3)", 1664, 2496), + ("3024x1296 (21:9)", 3024, 1296), + ("4096x4096 (1:1)", 4096, 4096), + ("Custom", None, None), +] + # The time in this dictionary are given for 10 seconds duration. VIDEO_TASKS_EXECUTION_TIME = { "seedance-1-0-lite-t2v-250428": { @@ -348,7 +377,7 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode): return comfy_io.Schema( node_id="ByteDanceImageEditNode", display_name="ByteDance Image Edit", - category="api node/video/ByteDance", + category="api node/image/ByteDance", description="Edit images using ByteDance models via api based on prompt", inputs=[ comfy_io.Combo.Input( @@ -451,6 +480,182 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode): return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) +class ByteDanceSeedreamNode(comfy_io.ComfyNode): + + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="ByteDanceSeedreamNode", + display_name="ByteDance Seedream 4", + category="api node/image/ByteDance", + description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.", + inputs=[ + comfy_io.Combo.Input( + "model", + options=["seedream-4-0-250828"], + tooltip="Model name", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Text prompt for creating or editing an image.", + ), + comfy_io.Image.Input( + "image", + tooltip="Input image(s) for image-to-image generation. " + "List of 1-10 images for single or multi-reference generation.", + optional=True, + ), + comfy_io.Combo.Input( + "size_preset", + options=[label for label, _, _ in RECOMMENDED_PRESETS_SEEDREAM_4], + tooltip="Pick a recommended size. Select Custom to use the width and height below.", + ), + comfy_io.Int.Input( + "width", + default=2048, + min=1024, + max=4096, + step=64, + tooltip="Custom width for image. Value is working only if `size_preset` is set to `Custom`", + optional=True, + ), + comfy_io.Int.Input( + "height", + default=2048, + min=1024, + max=4096, + step=64, + tooltip="Custom height for image. Value is working only if `size_preset` is set to `Custom`", + optional=True, + ), + comfy_io.Combo.Input( + "sequential_image_generation", + options=["disabled", "auto"], + tooltip="Group image generation mode. " + "'disabled' generates a single image. " + "'auto' lets the model decide whether to generate multiple related images " + "(e.g., story scenes, character variations).", + optional=True, + ), + comfy_io.Int.Input( + "max_images", + default=1, + min=1, + max=15, + step=1, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Maximum number of images to generate when sequential_image_generation='auto'. " + "Total images (input + generated) cannot exceed 15.", + optional=True, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed to use for generation.", + optional=True, + ), + comfy_io.Boolean.Input( + "watermark", + default=True, + tooltip="Whether to add an \"AI generated\" watermark to the image.", + optional=True, + ), + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + prompt: str, + image: torch.Tensor = None, + size_preset: str = RECOMMENDED_PRESETS_SEEDREAM_4[0][0], + width: int = 2048, + height: int = 2048, + sequential_image_generation: str = "disabled", + max_images: int = 1, + seed: int = 0, + watermark: bool = True, + ) -> comfy_io.NodeOutput: + validate_string(prompt, strip_whitespace=True, min_length=1) + w = h = None + for label, tw, th in RECOMMENDED_PRESETS_SEEDREAM_4: + if label == size_preset: + w, h = tw, th + break + + if w is None or h is None: + w, h = width, height + if not (1024 <= w <= 4096) or not (1024 <= h <= 4096): + raise ValueError( + f"Custom size out of range: {w}x{h}. " + "Both width and height must be between 1024 and 4096 pixels." + ) + n_input_images = get_number_of_images(image) if image is not None else 0 + if n_input_images > 10: + raise ValueError(f"Maximum of 10 reference images are supported, but {n_input_images} received.") + if sequential_image_generation == "auto" and n_input_images + max_images > 15: + raise ValueError( + "The maximum number of generated images plus the number of reference images cannot exceed 15." + ) + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + reference_images_urls = [] + if n_input_images: + for i in image: + validate_image_aspect_ratio_range(i, (1, 3), (3, 1)) + reference_images_urls = (await upload_images_to_comfyapi( + image, + max_images=n_input_images, + mime_type="image/png", + auth_kwargs=auth_kwargs, + )) + payload = Seedream4TaskCreationRequest( + model=model, + prompt=prompt, + image=reference_images_urls, + size=f"{w}x{h}", + seed=seed, + sequential_image_generation=sequential_image_generation, + sequential_image_generation_options=Seedream4Options(max_images=max_images), + watermark=watermark, + ) + response = await SynchronousOperation( + endpoint=ApiEndpoint( + path=BYTEPLUS_IMAGE_ENDPOINT, + method=HttpMethod.POST, + request_model=Seedream4TaskCreationRequest, + response_model=ImageTaskCreationResponse, + ), + request=payload, + auth_kwargs=auth_kwargs, + ).execute() + + if len(response.data) == 1: + return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) + return comfy_io.NodeOutput( + torch.cat([await download_url_to_image_tensor(str(i["url"])) for i in response.data]) + ) + + class ByteDanceTextToVideoNode(comfy_io.ComfyNode): @classmethod @@ -1001,6 +1206,7 @@ class ByteDanceExtension(ComfyExtension): return [ ByteDanceImageNode, ByteDanceImageEditNode, + ByteDanceSeedreamNode, ByteDanceTextToVideoNode, ByteDanceImageToVideoNode, ByteDanceFirstLastFrameNode, From df34f1549a431c85a6326e87075a206228697cde Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Thu, 11 Sep 2025 05:16:41 +0800 Subject: [PATCH 0565/1073] Update template to 0.1.78 (#9806) * Update template to 0.1.77 * Update template to 0.1.78 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ea1931d78..d31df0fec 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.25.11 -comfyui-workflow-templates==0.1.76 +comfyui-workflow-templates==0.1.78 comfyui-embedded-docs==0.2.6 torch torchsde From 72212fef660bcd7d9702fa52011d089c027a64d8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 10 Sep 2025 17:25:41 -0400 Subject: [PATCH 0566/1073] ComfyUI version 0.3.59 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 37361bd75..ee58205f5 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.58" +__version__ = "0.3.59" diff --git a/pyproject.toml b/pyproject.toml index f02ab9126..a7fc1a5a6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.58" +version = "0.3.59" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From e01e99d075852b94e93f27ea64ab862a49a7d2cc Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 10 Sep 2025 20:17:34 -0700 Subject: [PATCH 0567/1073] Support hunyuan image distilled model. (#9807) --- comfy/ldm/hunyuan_video/model.py | 14 ++++++++++++++ comfy/model_detection.py | 12 ++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/hunyuan_video/model.py b/comfy/ldm/hunyuan_video/model.py index ca289c5bd..7732182a4 100644 --- a/comfy/ldm/hunyuan_video/model.py +++ b/comfy/ldm/hunyuan_video/model.py @@ -41,6 +41,7 @@ class HunyuanVideoParams: qkv_bias: bool guidance_embed: bool byt5: bool + meanflow: bool class SelfAttentionRef(nn.Module): @@ -256,6 +257,11 @@ class HunyuanVideo(nn.Module): else: self.byt5_in = None + if params.meanflow: + self.time_r_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size, dtype=dtype, device=device, operations=operations) + else: + self.time_r_in = None + if final_layer: self.final_layer = LastLayer(self.hidden_size, self.patch_size[-1], self.out_channels, dtype=dtype, device=device, operations=operations) @@ -282,6 +288,14 @@ class HunyuanVideo(nn.Module): img = self.img_in(img) vec = self.time_in(timestep_embedding(timesteps, 256, time_factor=1.0).to(img.dtype)) + if self.time_r_in is not None: + w = torch.where(transformer_options['sigmas'][0] == transformer_options['sample_sigmas'])[0] # This most likely could be improved + if len(w) > 0: + timesteps_r = transformer_options['sample_sigmas'][w[0] + 1] + timesteps_r = timesteps_r.unsqueeze(0).to(device=timesteps.device, dtype=timesteps.dtype) + vec_r = self.time_r_in(timestep_embedding(timesteps_r, 256, time_factor=1000.0).to(img.dtype)) + vec = (vec + vec_r) / 2 + if ref_latent is not None: ref_latent_ids = self.img_ids(ref_latent) ref_latent = self.img_in(ref_latent) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index dbcbe5f5a..fe983cede 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -142,12 +142,20 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["in_channels"] = in_w.shape[1] #SkyReels img2video has 32 input channels dit_config["patch_size"] = list(in_w.shape[2:]) dit_config["out_channels"] = out_w.shape[0] // math.prod(dit_config["patch_size"]) - if '{}vector_in.in_layer.weight'.format(key_prefix) in state_dict: + if any(s.startswith('{}vector_in.'.format(key_prefix)) for s in state_dict_keys): dit_config["vec_in_dim"] = 768 - dit_config["axes_dim"] = [16, 56, 56] else: dit_config["vec_in_dim"] = None + + if len(dit_config["patch_size"]) == 2: dit_config["axes_dim"] = [64, 64] + else: + dit_config["axes_dim"] = [16, 56, 56] + + if any(s.startswith('{}time_r_in.'.format(key_prefix)) for s in state_dict_keys): + dit_config["meanflow"] = True + else: + dit_config["meanflow"] = False dit_config["context_in_dim"] = state_dict['{}txt_in.input_embedder.weight'.format(key_prefix)].shape[1] dit_config["hidden_size"] = in_w.shape[0] From df6850fae8a75126cb7a645e38d58cebcfd51096 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Fri, 12 Sep 2025 02:59:26 +0800 Subject: [PATCH 0568/1073] Update template to 0.1.81 (#9811) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d31df0fec..0e21967ef 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.25.11 -comfyui-workflow-templates==0.1.78 +comfyui-workflow-templates==0.1.81 comfyui-embedded-docs==0.2.6 torch torchsde From 18de0b28305fd8bf002d74e91c0630bd76b01d6b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 11 Sep 2025 16:33:02 -0700 Subject: [PATCH 0569/1073] Fast preview for hunyuan image. (#9814) --- comfy/latent_formats.py | 68 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index 859ae8421..f975b5e11 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -538,6 +538,74 @@ class HunyuanImage21(LatentFormat): latent_dimensions = 2 scale_factor = 0.75289 + latent_rgb_factors = [ + [-0.0154, -0.0397, -0.0521], + [ 0.0005, 0.0093, 0.0006], + [-0.0805, -0.0773, -0.0586], + [-0.0494, -0.0487, -0.0498], + [-0.0212, -0.0076, -0.0261], + [-0.0179, -0.0417, -0.0505], + [ 0.0158, 0.0310, 0.0239], + [ 0.0409, 0.0516, 0.0201], + [ 0.0350, 0.0553, 0.0036], + [-0.0447, -0.0327, -0.0479], + [-0.0038, -0.0221, -0.0365], + [-0.0423, -0.0718, -0.0654], + [ 0.0039, 0.0368, 0.0104], + [ 0.0655, 0.0217, 0.0122], + [ 0.0490, 0.1638, 0.2053], + [ 0.0932, 0.0829, 0.0650], + [-0.0186, -0.0209, -0.0135], + [-0.0080, -0.0076, -0.0148], + [-0.0284, -0.0201, 0.0011], + [-0.0642, -0.0294, -0.0777], + [-0.0035, 0.0076, -0.0140], + [ 0.0519, 0.0731, 0.0887], + [-0.0102, 0.0095, 0.0704], + [ 0.0068, 0.0218, -0.0023], + [-0.0726, -0.0486, -0.0519], + [ 0.0260, 0.0295, 0.0263], + [ 0.0250, 0.0333, 0.0341], + [ 0.0168, -0.0120, -0.0174], + [ 0.0226, 0.1037, 0.0114], + [ 0.2577, 0.1906, 0.1604], + [-0.0646, -0.0137, -0.0018], + [-0.0112, 0.0309, 0.0358], + [-0.0347, 0.0146, -0.0481], + [ 0.0234, 0.0179, 0.0201], + [ 0.0157, 0.0313, 0.0225], + [ 0.0423, 0.0675, 0.0524], + [-0.0031, 0.0027, -0.0255], + [ 0.0447, 0.0555, 0.0330], + [-0.0152, 0.0103, 0.0299], + [-0.0755, -0.0489, -0.0635], + [ 0.0853, 0.0788, 0.1017], + [-0.0272, -0.0294, -0.0471], + [ 0.0440, 0.0400, -0.0137], + [ 0.0335, 0.0317, -0.0036], + [-0.0344, -0.0621, -0.0984], + [-0.0127, -0.0630, -0.0620], + [-0.0648, 0.0360, 0.0924], + [-0.0781, -0.0801, -0.0409], + [ 0.0363, 0.0613, 0.0499], + [ 0.0238, 0.0034, 0.0041], + [-0.0135, 0.0258, 0.0310], + [ 0.0614, 0.1086, 0.0589], + [ 0.0428, 0.0350, 0.0205], + [ 0.0153, 0.0173, -0.0018], + [-0.0288, -0.0455, -0.0091], + [ 0.0344, 0.0109, -0.0157], + [-0.0205, -0.0247, -0.0187], + [ 0.0487, 0.0126, 0.0064], + [-0.0220, -0.0013, 0.0074], + [-0.0203, -0.0094, -0.0048], + [-0.0719, 0.0429, -0.0442], + [ 0.1042, 0.0497, 0.0356], + [-0.0659, -0.0578, -0.0280], + [-0.0060, -0.0322, -0.0234]] + + latent_rgb_factors_bias = [0.0007, -0.0256, -0.0206] + class Hunyuan3Dv2(LatentFormat): latent_channels = 64 latent_dimensions = 1 From 33bd9ed9cb941127b335244c6cc0a8cdc1ac1696 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 11 Sep 2025 21:43:20 -0700 Subject: [PATCH 0570/1073] Implement hunyuan image refiner model. (#9817) --- comfy/latent_formats.py | 5 + comfy/ldm/hunyuan_video/model.py | 11 +- comfy/ldm/hunyuan_video/vae_refiner.py | 268 ++++++++++++++++++++ comfy/ldm/models/autoencoder.py | 6 + comfy/ldm/modules/diffusionmodules/model.py | 10 +- comfy/model_base.py | 20 ++ comfy/sd.py | 17 +- comfy/supported_models.py | 19 +- comfy_extras/nodes_hunyuan.py | 23 ++ 9 files changed, 367 insertions(+), 12 deletions(-) create mode 100644 comfy/ldm/hunyuan_video/vae_refiner.py diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index f975b5e11..894540879 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -606,6 +606,11 @@ class HunyuanImage21(LatentFormat): latent_rgb_factors_bias = [0.0007, -0.0256, -0.0206] +class HunyuanImage21Refiner(LatentFormat): + latent_channels = 64 + latent_dimensions = 3 + scale_factor = 1.03682 + class Hunyuan3Dv2(LatentFormat): latent_channels = 64 latent_dimensions = 1 diff --git a/comfy/ldm/hunyuan_video/model.py b/comfy/ldm/hunyuan_video/model.py index 7732182a4..ca86b8bb1 100644 --- a/comfy/ldm/hunyuan_video/model.py +++ b/comfy/ldm/hunyuan_video/model.py @@ -278,6 +278,7 @@ class HunyuanVideo(nn.Module): guidance: Tensor = None, guiding_frame_index=None, ref_latent=None, + disable_time_r=False, control=None, transformer_options={}, ) -> Tensor: @@ -288,7 +289,7 @@ class HunyuanVideo(nn.Module): img = self.img_in(img) vec = self.time_in(timestep_embedding(timesteps, 256, time_factor=1.0).to(img.dtype)) - if self.time_r_in is not None: + if (self.time_r_in is not None) and (not disable_time_r): w = torch.where(transformer_options['sigmas'][0] == transformer_options['sample_sigmas'])[0] # This most likely could be improved if len(w) > 0: timesteps_r = transformer_options['sample_sigmas'][w[0] + 1] @@ -428,14 +429,14 @@ class HunyuanVideo(nn.Module): img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) return repeat(img_ids, "h w c -> b (h w) c", b=bs) - def forward(self, x, timestep, context, y=None, txt_byt5=None, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, control=None, transformer_options={}, **kwargs): + def forward(self, x, timestep, context, y=None, txt_byt5=None, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, disable_time_r=False, control=None, transformer_options={}, **kwargs): return comfy.patcher_extension.WrapperExecutor.new_class_executor( self._forward, self, comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) - ).execute(x, timestep, context, y, txt_byt5, guidance, attention_mask, guiding_frame_index, ref_latent, control, transformer_options, **kwargs) + ).execute(x, timestep, context, y, txt_byt5, guidance, attention_mask, guiding_frame_index, ref_latent, disable_time_r, control, transformer_options, **kwargs) - def _forward(self, x, timestep, context, y=None, txt_byt5=None, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, control=None, transformer_options={}, **kwargs): + def _forward(self, x, timestep, context, y=None, txt_byt5=None, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, disable_time_r=False, control=None, transformer_options={}, **kwargs): bs = x.shape[0] if len(self.patch_size) == 3: img_ids = self.img_ids(x) @@ -443,5 +444,5 @@ class HunyuanVideo(nn.Module): else: img_ids = self.img_ids_2d(x) txt_ids = torch.zeros((bs, context.shape[1], 2), device=x.device, dtype=x.dtype) - out = self.forward_orig(x, img_ids, context, txt_ids, attention_mask, timestep, y, txt_byt5, guidance, guiding_frame_index, ref_latent, control=control, transformer_options=transformer_options) + out = self.forward_orig(x, img_ids, context, txt_ids, attention_mask, timestep, y, txt_byt5, guidance, guiding_frame_index, ref_latent, disable_time_r=disable_time_r, control=control, transformer_options=transformer_options) return out diff --git a/comfy/ldm/hunyuan_video/vae_refiner.py b/comfy/ldm/hunyuan_video/vae_refiner.py new file mode 100644 index 000000000..e3fff9bbe --- /dev/null +++ b/comfy/ldm/hunyuan_video/vae_refiner.py @@ -0,0 +1,268 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from comfy.ldm.modules.diffusionmodules.model import ResnetBlock, AttnBlock, VideoConv3d +import comfy.ops +import comfy.ldm.models.autoencoder +ops = comfy.ops.disable_weight_init + +class RMS_norm(nn.Module): + def __init__(self, dim): + super().__init__() + shape = (dim, 1, 1, 1) + self.scale = dim**0.5 + self.gamma = nn.Parameter(torch.empty(shape)) + + def forward(self, x): + return F.normalize(x, dim=1) * self.scale * self.gamma + +class DnSmpl(nn.Module): + def __init__(self, ic, oc, tds=True): + super().__init__() + fct = 2 * 2 * 2 if tds else 1 * 2 * 2 + assert oc % fct == 0 + self.conv = VideoConv3d(ic, oc // fct, kernel_size=3) + + self.tds = tds + self.gs = fct * ic // oc + + def forward(self, x): + r1 = 2 if self.tds else 1 + h = self.conv(x) + + if self.tds: + hf = h[:, :, :1, :, :] + b, c, f, ht, wd = hf.shape + hf = hf.reshape(b, c, f, ht // 2, 2, wd // 2, 2) + hf = hf.permute(0, 4, 6, 1, 2, 3, 5) + hf = hf.reshape(b, 2 * 2 * c, f, ht // 2, wd // 2) + hf = torch.cat([hf, hf], dim=1) + + hn = h[:, :, 1:, :, :] + b, c, frms, ht, wd = hn.shape + nf = frms // r1 + hn = hn.reshape(b, c, nf, r1, ht // 2, 2, wd // 2, 2) + hn = hn.permute(0, 3, 5, 7, 1, 2, 4, 6) + hn = hn.reshape(b, r1 * 2 * 2 * c, nf, ht // 2, wd // 2) + + h = torch.cat([hf, hn], dim=2) + + xf = x[:, :, :1, :, :] + b, ci, f, ht, wd = xf.shape + xf = xf.reshape(b, ci, f, ht // 2, 2, wd // 2, 2) + xf = xf.permute(0, 4, 6, 1, 2, 3, 5) + xf = xf.reshape(b, 2 * 2 * ci, f, ht // 2, wd // 2) + B, C, T, H, W = xf.shape + xf = xf.view(B, h.shape[1], self.gs // 2, T, H, W).mean(dim=2) + + xn = x[:, :, 1:, :, :] + b, ci, frms, ht, wd = xn.shape + nf = frms // r1 + xn = xn.reshape(b, ci, nf, r1, ht // 2, 2, wd // 2, 2) + xn = xn.permute(0, 3, 5, 7, 1, 2, 4, 6) + xn = xn.reshape(b, r1 * 2 * 2 * ci, nf, ht // 2, wd // 2) + B, C, T, H, W = xn.shape + xn = xn.view(B, h.shape[1], self.gs, T, H, W).mean(dim=2) + sc = torch.cat([xf, xn], dim=2) + else: + b, c, frms, ht, wd = h.shape + nf = frms // r1 + h = h.reshape(b, c, nf, r1, ht // 2, 2, wd // 2, 2) + h = h.permute(0, 3, 5, 7, 1, 2, 4, 6) + h = h.reshape(b, r1 * 2 * 2 * c, nf, ht // 2, wd // 2) + + b, ci, frms, ht, wd = x.shape + nf = frms // r1 + sc = x.reshape(b, ci, nf, r1, ht // 2, 2, wd // 2, 2) + sc = sc.permute(0, 3, 5, 7, 1, 2, 4, 6) + sc = sc.reshape(b, r1 * 2 * 2 * ci, nf, ht // 2, wd // 2) + B, C, T, H, W = sc.shape + sc = sc.view(B, h.shape[1], self.gs, T, H, W).mean(dim=2) + + return h + sc + + +class UpSmpl(nn.Module): + def __init__(self, ic, oc, tus=True): + super().__init__() + fct = 2 * 2 * 2 if tus else 1 * 2 * 2 + self.conv = VideoConv3d(ic, oc * fct, kernel_size=3) + + self.tus = tus + self.rp = fct * oc // ic + + def forward(self, x): + r1 = 2 if self.tus else 1 + h = self.conv(x) + + if self.tus: + hf = h[:, :, :1, :, :] + b, c, f, ht, wd = hf.shape + nc = c // (2 * 2) + hf = hf.reshape(b, 2, 2, nc, f, ht, wd) + hf = hf.permute(0, 3, 4, 5, 1, 6, 2) + hf = hf.reshape(b, nc, f, ht * 2, wd * 2) + hf = hf[:, : hf.shape[1] // 2] + + hn = h[:, :, 1:, :, :] + b, c, frms, ht, wd = hn.shape + nc = c // (r1 * 2 * 2) + hn = hn.reshape(b, r1, 2, 2, nc, frms, ht, wd) + hn = hn.permute(0, 4, 5, 1, 6, 2, 7, 3) + hn = hn.reshape(b, nc, frms * r1, ht * 2, wd * 2) + + h = torch.cat([hf, hn], dim=2) + + xf = x[:, :, :1, :, :] + b, ci, f, ht, wd = xf.shape + xf = xf.repeat_interleave(repeats=self.rp // 2, dim=1) + b, c, f, ht, wd = xf.shape + nc = c // (2 * 2) + xf = xf.reshape(b, 2, 2, nc, f, ht, wd) + xf = xf.permute(0, 3, 4, 5, 1, 6, 2) + xf = xf.reshape(b, nc, f, ht * 2, wd * 2) + + xn = x[:, :, 1:, :, :] + xn = xn.repeat_interleave(repeats=self.rp, dim=1) + b, c, frms, ht, wd = xn.shape + nc = c // (r1 * 2 * 2) + xn = xn.reshape(b, r1, 2, 2, nc, frms, ht, wd) + xn = xn.permute(0, 4, 5, 1, 6, 2, 7, 3) + xn = xn.reshape(b, nc, frms * r1, ht * 2, wd * 2) + sc = torch.cat([xf, xn], dim=2) + else: + b, c, frms, ht, wd = h.shape + nc = c // (r1 * 2 * 2) + h = h.reshape(b, r1, 2, 2, nc, frms, ht, wd) + h = h.permute(0, 4, 5, 1, 6, 2, 7, 3) + h = h.reshape(b, nc, frms * r1, ht * 2, wd * 2) + + sc = x.repeat_interleave(repeats=self.rp, dim=1) + b, c, frms, ht, wd = sc.shape + nc = c // (r1 * 2 * 2) + sc = sc.reshape(b, r1, 2, 2, nc, frms, ht, wd) + sc = sc.permute(0, 4, 5, 1, 6, 2, 7, 3) + sc = sc.reshape(b, nc, frms * r1, ht * 2, wd * 2) + + return h + sc + +class Encoder(nn.Module): + def __init__(self, in_channels, z_channels, block_out_channels, num_res_blocks, + ffactor_spatial, ffactor_temporal, downsample_match_channel=True, **_): + super().__init__() + self.z_channels = z_channels + self.block_out_channels = block_out_channels + self.num_res_blocks = num_res_blocks + self.conv_in = VideoConv3d(in_channels, block_out_channels[0], 3, 1, 1) + + self.down = nn.ModuleList() + ch = block_out_channels[0] + depth = (ffactor_spatial >> 1).bit_length() + depth_temporal = ((ffactor_spatial // ffactor_temporal) >> 1).bit_length() + + for i, tgt in enumerate(block_out_channels): + stage = nn.Module() + stage.block = nn.ModuleList([ResnetBlock(in_channels=ch if j == 0 else tgt, + out_channels=tgt, + temb_channels=0, + conv_op=VideoConv3d, norm_op=RMS_norm) + for j in range(num_res_blocks)]) + ch = tgt + if i < depth: + nxt = block_out_channels[i + 1] if i + 1 < len(block_out_channels) and downsample_match_channel else ch + stage.downsample = DnSmpl(ch, nxt, tds=i >= depth_temporal) + ch = nxt + self.down.append(stage) + + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=VideoConv3d, norm_op=RMS_norm) + self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv3d, norm_op=RMS_norm) + self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=VideoConv3d, norm_op=RMS_norm) + + self.norm_out = RMS_norm(ch) + self.conv_out = VideoConv3d(ch, z_channels << 1, 3, 1, 1) + + self.regul = comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer() + + def forward(self, x): + x = x.unsqueeze(2) + x = self.conv_in(x) + + for stage in self.down: + for blk in stage.block: + x = blk(x) + if hasattr(stage, 'downsample'): + x = stage.downsample(x) + + x = self.mid.block_2(self.mid.attn_1(self.mid.block_1(x))) + + b, c, t, h, w = x.shape + grp = c // (self.z_channels << 1) + skip = x.view(b, c // grp, grp, t, h, w).mean(2) + + out = self.conv_out(F.silu(self.norm_out(x))) + skip + out = self.regul(out)[0] + + out = torch.cat((out[:, :, :1], out), dim=2) + out = out.permute(0, 2, 1, 3, 4) + b, f_times_2, c, h, w = out.shape + out = out.reshape(b, f_times_2 // 2, 2 * c, h, w) + out = out.permute(0, 2, 1, 3, 4).contiguous() + return out + +class Decoder(nn.Module): + def __init__(self, z_channels, out_channels, block_out_channels, num_res_blocks, + ffactor_spatial, ffactor_temporal, upsample_match_channel=True, **_): + super().__init__() + block_out_channels = block_out_channels[::-1] + self.z_channels = z_channels + self.block_out_channels = block_out_channels + self.num_res_blocks = num_res_blocks + + ch = block_out_channels[0] + self.conv_in = VideoConv3d(z_channels, ch, 3) + + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=VideoConv3d, norm_op=RMS_norm) + self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv3d, norm_op=RMS_norm) + self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=VideoConv3d, norm_op=RMS_norm) + + self.up = nn.ModuleList() + depth = (ffactor_spatial >> 1).bit_length() + depth_temporal = (ffactor_temporal >> 1).bit_length() + + for i, tgt in enumerate(block_out_channels): + stage = nn.Module() + stage.block = nn.ModuleList([ResnetBlock(in_channels=ch if j == 0 else tgt, + out_channels=tgt, + temb_channels=0, + conv_op=VideoConv3d, norm_op=RMS_norm) + for j in range(num_res_blocks + 1)]) + ch = tgt + if i < depth: + nxt = block_out_channels[i + 1] if i + 1 < len(block_out_channels) and upsample_match_channel else ch + stage.upsample = UpSmpl(ch, nxt, tus=i < depth_temporal) + ch = nxt + self.up.append(stage) + + self.norm_out = RMS_norm(ch) + self.conv_out = VideoConv3d(ch, out_channels, 3) + + def forward(self, z): + z = z.permute(0, 2, 1, 3, 4) + b, f, c, h, w = z.shape + z = z.reshape(b, f, 2, c // 2, h, w) + z = z.permute(0, 1, 2, 3, 4, 5).reshape(b, f * 2, c // 2, h, w) + z = z.permute(0, 2, 1, 3, 4) + z = z[:, :, 1:] + + x = self.conv_in(z) + z.repeat_interleave(self.block_out_channels[0] // self.z_channels, 1) + x = self.mid.block_2(self.mid.attn_1(self.mid.block_1(x))) + + for stage in self.up: + for blk in stage.block: + x = blk(x) + if hasattr(stage, 'upsample'): + x = stage.upsample(x) + + return self.conv_out(F.silu(self.norm_out(x))) diff --git a/comfy/ldm/models/autoencoder.py b/comfy/ldm/models/autoencoder.py index 13bd6e16b..611d36a1b 100644 --- a/comfy/ldm/models/autoencoder.py +++ b/comfy/ldm/models/autoencoder.py @@ -26,6 +26,12 @@ class DiagonalGaussianRegularizer(torch.nn.Module): z = posterior.mode() return z, None +class EmptyRegularizer(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]: + return z, None class AbstractAutoencoder(torch.nn.Module): """ diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 8f598a848..4245eedca 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -145,7 +145,7 @@ class Downsample(nn.Module): class ResnetBlock(nn.Module): def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, - dropout=0.0, temb_channels=512, conv_op=ops.Conv2d): + dropout=0.0, temb_channels=512, conv_op=ops.Conv2d, norm_op=Normalize): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels @@ -153,7 +153,7 @@ class ResnetBlock(nn.Module): self.use_conv_shortcut = conv_shortcut self.swish = torch.nn.SiLU(inplace=True) - self.norm1 = Normalize(in_channels) + self.norm1 = norm_op(in_channels) self.conv1 = conv_op(in_channels, out_channels, kernel_size=3, @@ -162,7 +162,7 @@ class ResnetBlock(nn.Module): if temb_channels > 0: self.temb_proj = ops.Linear(temb_channels, out_channels) - self.norm2 = Normalize(out_channels) + self.norm2 = norm_op(out_channels) self.dropout = torch.nn.Dropout(dropout, inplace=True) self.conv2 = conv_op(out_channels, out_channels, @@ -305,11 +305,11 @@ def vae_attention(): return normal_attention class AttnBlock(nn.Module): - def __init__(self, in_channels, conv_op=ops.Conv2d): + def __init__(self, in_channels, conv_op=ops.Conv2d, norm_op=Normalize): super().__init__() self.in_channels = in_channels - self.norm = Normalize(in_channels) + self.norm = norm_op(in_channels) self.q = conv_op(in_channels, in_channels, kernel_size=1, diff --git a/comfy/model_base.py b/comfy/model_base.py index 993ff65e6..c69a9d1ad 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1432,3 +1432,23 @@ class HunyuanImage21(BaseModel): out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance])) return out + +class HunyuanImage21Refiner(HunyuanImage21): + def concat_cond(self, **kwargs): + noise = kwargs.get("noise", None) + image = kwargs.get("concat_latent_image", None) + device = kwargs["device"] + + if image is None: + shape_image = list(noise.shape) + image = torch.zeros(shape_image, dtype=noise.dtype, layout=noise.layout, device=noise.device) + else: + image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") + image = self.process_latent_in(image) + image = utils.resize_to_batch_size(image, noise.shape[0]) + return image + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + out['disable_time_r'] = comfy.conds.CONDConstant(True) + return out diff --git a/comfy/sd.py b/comfy/sd.py index 9dd9a74d4..02ddc7239 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -285,6 +285,7 @@ class VAE: self.process_output = lambda image: torch.clamp((image + 1.0) / 2.0, min=0.0, max=1.0) self.working_dtypes = [torch.bfloat16, torch.float32] self.disable_offload = False + self.not_video = False self.downscale_index_formula = None self.upscale_index_formula = None @@ -409,6 +410,20 @@ class VAE: self.downscale_ratio = (lambda a: max(0, math.floor((a + 7) / 8)), 32, 32) self.downscale_index_formula = (8, 32, 32) self.working_dtypes = [torch.bfloat16, torch.float32] + elif "decoder.conv_in.conv.weight" in sd and sd['decoder.conv_in.conv.weight'].shape[1] == 32: + ddconfig = {"block_out_channels": [128, 256, 512, 1024, 1024], "in_channels": 3, "out_channels": 3, "num_res_blocks": 2, "ffactor_spatial": 16, "ffactor_temporal": 4, "downsample_match_channel": True, "upsample_match_channel": True} + self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.conv.weight"].shape[1] + self.downscale_ratio = 16 + self.upscale_ratio = 16 + self.latent_dim = 3 + self.not_video = True + self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] + self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.EmptyRegularizer"}, + encoder_config={'target': "comfy.ldm.hunyuan_video.vae_refiner.Encoder", 'params': ddconfig}, + decoder_config={'target': "comfy.ldm.hunyuan_video.vae_refiner.Decoder", 'params': ddconfig}) + + self.memory_used_encode = lambda shape, dtype: (1400 * shape[-2] * shape[-1]) * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: (1400 * shape[-3] * shape[-2] * shape[-1] * 16 * 16) * model_management.dtype_size(dtype) elif "decoder.conv_in.conv.weight" in sd: ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} ddconfig["conv3d"] = True @@ -669,7 +684,7 @@ class VAE: self.throw_exception_if_invalid() pixel_samples = self.vae_encode_crop_pixels(pixel_samples) pixel_samples = pixel_samples.movedim(-1, 1) - if self.latent_dim == 3 and pixel_samples.ndim < 5: + if not self.not_video and self.latent_dim == 3 and pixel_samples.ndim < 5: pixel_samples = pixel_samples.movedim(1, 0).unsqueeze(0) try: memory_used = self.memory_used_encode(pixel_samples.shape, self.vae_dtype) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index aa953b462..ba1b8c313 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1321,6 +1321,23 @@ class HunyuanImage21(HunyuanVideo): hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_7b.transformer.".format(pref)) return supported_models_base.ClipTarget(comfy.text_encoders.hunyuan_image.HunyuanImageTokenizer, comfy.text_encoders.hunyuan_image.te(**hunyuan_detect)) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ACEStep, Omnigen2, QwenImage] +class HunyuanImage21Refiner(HunyuanVideo): + unet_config = { + "image_model": "hunyuan_video", + "patch_size": [1, 1, 1], + "vec_in_dim": None, + } + + sampling_settings = { + "shift": 1.0, + } + + latent_format = latent_formats.HunyuanImage21Refiner + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.HunyuanImage21Refiner(self, device=device) + return out + +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ACEStep, Omnigen2, QwenImage] models += [SVD_img2vid] diff --git a/comfy_extras/nodes_hunyuan.py b/comfy_extras/nodes_hunyuan.py index ce031ceb2..351a7e2cb 100644 --- a/comfy_extras/nodes_hunyuan.py +++ b/comfy_extras/nodes_hunyuan.py @@ -128,6 +128,28 @@ class EmptyHunyuanImageLatent: latent = torch.zeros([batch_size, 64, height // 32, width // 32], device=comfy.model_management.intermediate_device()) return ({"samples":latent}, ) +class HunyuanRefinerLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": {"positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "latent": ("LATENT", ), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + + FUNCTION = "execute" + + def execute(self, positive, negative, latent): + latent = latent["samples"] + + positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": latent}) + negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": latent}) + out_latent = {} + out_latent["samples"] = torch.zeros([latent.shape[0], 32, latent.shape[-3], latent.shape[-2], latent.shape[-1]], device=comfy.model_management.intermediate_device()) + return (positive, negative, out_latent) + NODE_CLASS_MAPPINGS = { "CLIPTextEncodeHunyuanDiT": CLIPTextEncodeHunyuanDiT, @@ -135,4 +157,5 @@ NODE_CLASS_MAPPINGS = { "EmptyHunyuanLatentVideo": EmptyHunyuanLatentVideo, "HunyuanImageToVideo": HunyuanImageToVideo, "EmptyHunyuanImageLatent": EmptyHunyuanImageLatent, + "HunyuanRefinerLatent": HunyuanRefinerLatent, } From 15ec9ea958d1c5d374add598b571a585541d4863 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Thu, 11 Sep 2025 21:44:20 -0700 Subject: [PATCH 0571/1073] Add Output to V3 Combo type to match what is possible with V1 (#9813) --- comfy_api/latest/_io.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index f770109d5..4826818df 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -331,7 +331,7 @@ class String(ComfyTypeIO): }) @comfytype(io_type="COMBO") -class Combo(ComfyTypeI): +class Combo(ComfyTypeIO): Type = str class Input(WidgetInput): """Combo input (dropdown).""" @@ -360,6 +360,14 @@ class Combo(ComfyTypeI): "remote": self.remote.as_dict() if self.remote else None, }) + class Output(Output): + def __init__(self, id: str=None, display_name: str=None, options: list[str]=None, tooltip: str=None, is_output_list=False): + super().__init__(id, display_name, tooltip, is_output_list) + self.options = options if options is not None else [] + + @property + def io_type(self): + return self.options @comfytype(io_type="COMBO") class MultiCombo(ComfyTypeI): From d6b977b2e680e98ad18a37ee13783da4f30e15f4 Mon Sep 17 00:00:00 2001 From: Benjamin Lu Date: Thu, 11 Sep 2025 21:46:01 -0700 Subject: [PATCH 0572/1073] Bump frontend to 1.26.11 (#9809) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0e21967ef..de5af5fac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.25.11 +comfyui-frontend-package==1.26.11 comfyui-workflow-templates==0.1.81 comfyui-embedded-docs==0.2.6 torch From fd2b820ec28e9575877dc6c51949b2d28dc78894 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 12 Sep 2025 13:03:08 -0700 Subject: [PATCH 0573/1073] Add noise augmentation to hunyuan image refiner. (#9831) This was missing and should help with colors being blown out. --- comfy/model_base.py | 4 ++++ comfy_extras/nodes_hunyuan.py | 8 ++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index c69a9d1ad..8422051bf 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1437,6 +1437,7 @@ class HunyuanImage21Refiner(HunyuanImage21): def concat_cond(self, **kwargs): noise = kwargs.get("noise", None) image = kwargs.get("concat_latent_image", None) + noise_augmentation = kwargs.get("noise_augmentation", 0.0) device = kwargs["device"] if image is None: @@ -1446,6 +1447,9 @@ class HunyuanImage21Refiner(HunyuanImage21): image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") image = self.process_latent_in(image) image = utils.resize_to_batch_size(image, noise.shape[0]) + if noise_augmentation > 0: + noise = torch.randn(image.shape, generator=torch.manual_seed(kwargs.get("seed", 0) - 10), dtype=image.dtype, device="cpu").to(image.device) + image = noise_augmentation * noise + (1.0 - noise_augmentation) * image return image def extra_conds(self, **kwargs): diff --git a/comfy_extras/nodes_hunyuan.py b/comfy_extras/nodes_hunyuan.py index 351a7e2cb..db398cdf1 100644 --- a/comfy_extras/nodes_hunyuan.py +++ b/comfy_extras/nodes_hunyuan.py @@ -134,6 +134,7 @@ class HunyuanRefinerLatent: return {"required": {"positive": ("CONDITIONING", ), "negative": ("CONDITIONING", ), "latent": ("LATENT", ), + "noise_augmentation": ("FLOAT", {"default": 0.10, "min": 0.0, "max": 1.0, "step": 0.01}), }} RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") @@ -141,11 +142,10 @@ class HunyuanRefinerLatent: FUNCTION = "execute" - def execute(self, positive, negative, latent): + def execute(self, positive, negative, latent, noise_augmentation): latent = latent["samples"] - - positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": latent}) - negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": latent}) + positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": latent, "noise_augmentation": noise_augmentation}) + negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": latent, "noise_augmentation": noise_augmentation}) out_latent = {} out_latent["samples"] = torch.zeros([latent.shape[0], 32, latent.shape[-3], latent.shape[-2], latent.shape[-1]], device=comfy.model_management.intermediate_device()) return (positive, negative, out_latent) From e600520f8aa583c79caa286a8d7d584edc3d059b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 12 Sep 2025 13:35:34 -0700 Subject: [PATCH 0574/1073] Fix hunyuan refiner blownout colors at noise aug less than 0.25 (#9832) --- comfy/model_base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 8422051bf..4176bca25 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1449,7 +1449,9 @@ class HunyuanImage21Refiner(HunyuanImage21): image = utils.resize_to_batch_size(image, noise.shape[0]) if noise_augmentation > 0: noise = torch.randn(image.shape, generator=torch.manual_seed(kwargs.get("seed", 0) - 10), dtype=image.dtype, device="cpu").to(image.device) - image = noise_augmentation * noise + (1.0 - noise_augmentation) * image + image = noise_augmentation * noise + min(1.0 - noise_augmentation, 0.75) * image + else: + image = 0.75 * image return image def extra_conds(self, **kwargs): From 7757d5a657cbe9b22d1f3538ee0bc5387d3f5459 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 12 Sep 2025 13:40:12 -0700 Subject: [PATCH 0575/1073] Set default hunyuan refiner shift to 4.0 (#9833) --- comfy/supported_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index ba1b8c313..472ea0ae9 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1329,7 +1329,7 @@ class HunyuanImage21Refiner(HunyuanVideo): } sampling_settings = { - "shift": 1.0, + "shift": 4.0, } latent_format = latent_formats.HunyuanImage21Refiner From 0aa074a420c450fd7793d83c6f8d66009a1ca2a2 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 13 Sep 2025 00:29:03 +0300 Subject: [PATCH 0576/1073] add kling-v2-1 model to the KlingStartEndFrame node (#9630) --- comfy_api_nodes/nodes_kling.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 9fa390985..5f55b2cc9 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -846,6 +846,8 @@ class KlingStartEndFrameNode(KlingImage2VideoNode): "pro mode / 10s duration / kling-v1-5": ("pro", "10", "kling-v1-5"), "pro mode / 5s duration / kling-v1-6": ("pro", "5", "kling-v1-6"), "pro mode / 10s duration / kling-v1-6": ("pro", "10", "kling-v1-6"), + "pro mode / 5s duration / kling-v2-1": ("pro", "5", "kling-v2-1"), + "pro mode / 10s duration / kling-v2-1": ("pro", "10", "kling-v2-1"), } @classmethod From 45bc1f5c00307f3e85871ecfb46acaa2365b0096 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 13 Sep 2025 00:37:31 +0300 Subject: [PATCH 0577/1073] convert Minimax API nodes to the V3 schema (#9693) --- comfy_api_nodes/nodes_minimax.py | 732 ++++++++++++++++--------------- 1 file changed, 378 insertions(+), 354 deletions(-) diff --git a/comfy_api_nodes/nodes_minimax.py b/comfy_api_nodes/nodes_minimax.py index bb3c9e710..bf560661c 100644 --- a/comfy_api_nodes/nodes_minimax.py +++ b/comfy_api_nodes/nodes_minimax.py @@ -1,9 +1,10 @@ from inspect import cleandoc -from typing import Union +from typing import Optional import logging import torch -from comfy.comfy_types.node_typing import IO +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.input_impl.video_types import VideoFromFile from comfy_api_nodes.apis import ( MinimaxVideoGenerationRequest, @@ -11,7 +12,7 @@ from comfy_api_nodes.apis import ( MinimaxFileRetrieveResponse, MinimaxTaskResultResponse, SubjectReferenceItem, - MiniMaxModel + MiniMaxModel, ) from comfy_api_nodes.apis.client import ( ApiEndpoint, @@ -31,372 +32,398 @@ from server import PromptServer I2V_AVERAGE_DURATION = 114 T2V_AVERAGE_DURATION = 234 -class MinimaxTextToVideoNode: + +async def _generate_mm_video( + *, + auth: dict[str, str], + node_id: str, + prompt_text: str, + seed: int, + model: str, + image: Optional[torch.Tensor] = None, # used for ImageToVideo + subject: Optional[torch.Tensor] = None, # used for SubjectToVideo + average_duration: Optional[int] = None, +) -> comfy_io.NodeOutput: + if image is None: + validate_string(prompt_text, field_name="prompt_text") + # upload image, if passed in + image_url = None + if image is not None: + image_url = (await upload_images_to_comfyapi(image, max_images=1, auth_kwargs=auth))[0] + + # TODO: figure out how to deal with subject properly, API returns invalid params when using S2V-01 model + subject_reference = None + if subject is not None: + subject_url = (await upload_images_to_comfyapi(subject, max_images=1, auth_kwargs=auth))[0] + subject_reference = [SubjectReferenceItem(image=subject_url)] + + + video_generate_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/minimax/video_generation", + method=HttpMethod.POST, + request_model=MinimaxVideoGenerationRequest, + response_model=MinimaxVideoGenerationResponse, + ), + request=MinimaxVideoGenerationRequest( + model=MiniMaxModel(model), + prompt=prompt_text, + callback_url=None, + first_frame_image=image_url, + subject_reference=subject_reference, + prompt_optimizer=None, + ), + auth_kwargs=auth, + ) + response = await video_generate_operation.execute() + + task_id = response.task_id + if not task_id: + raise Exception(f"MiniMax generation failed: {response.base_resp}") + + video_generate_operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path="/proxy/minimax/query/video_generation", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=MinimaxTaskResultResponse, + query_params={"task_id": task_id}, + ), + completed_statuses=["Success"], + failed_statuses=["Fail"], + status_extractor=lambda x: x.status.value, + estimated_duration=average_duration, + node_id=node_id, + auth_kwargs=auth, + ) + task_result = await video_generate_operation.execute() + + file_id = task_result.file_id + if file_id is None: + raise Exception("Request was not successful. Missing file ID.") + file_retrieve_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/minimax/files/retrieve", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=MinimaxFileRetrieveResponse, + query_params={"file_id": int(file_id)}, + ), + request=EmptyRequest(), + auth_kwargs=auth, + ) + file_result = await file_retrieve_operation.execute() + + file_url = file_result.file.download_url + if file_url is None: + raise Exception( + f"No video was found in the response. Full response: {file_result.model_dump()}" + ) + logging.info("Generated video URL: %s", file_url) + if node_id: + if hasattr(file_result.file, "backup_download_url"): + message = f"Result URL: {file_url}\nBackup URL: {file_result.file.backup_download_url}" + else: + message = f"Result URL: {file_url}" + PromptServer.instance.send_progress_text(message, node_id) + + # Download and return as VideoFromFile + video_io = await download_url_to_bytesio(file_url) + if video_io is None: + error_msg = f"Failed to download video from {file_url}" + logging.error(error_msg) + raise Exception(error_msg) + return comfy_io.NodeOutput(VideoFromFile(video_io)) + + +class MinimaxTextToVideoNode(comfy_io.ComfyNode): """ Generates videos synchronously based on a prompt, and optional parameters using MiniMax's API. """ - AVERAGE_DURATION = T2V_AVERAGE_DURATION + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="MinimaxTextToVideoNode", + display_name="MiniMax Text to Video", + category="api node/video/MiniMax", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.String.Input( + "prompt_text", + multiline=True, + default="", + tooltip="Text prompt to guide the video generation", + ), + comfy_io.Combo.Input( + "model", + options=["T2V-01", "T2V-01-Director"], + default="T2V-01", + tooltip="Model to use for video generation", + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + step=1, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", + optional=True, + ), + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt_text": ( - "STRING", - { - "multiline": True, - "default": "", - "tooltip": "Text prompt to guide the video generation", - }, - ), - "model": ( - [ - "T2V-01", - "T2V-01-Director", - ], - { - "default": "T2V-01", - "tooltip": "Model to use for video generation", - }, - ), + async def execute( + cls, + prompt_text: str, + model: str = "T2V-01", + seed: int = 0, + ) -> comfy_io.NodeOutput: + return await _generate_mm_video( + auth={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, }, - "optional": { - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = ("VIDEO",) - DESCRIPTION = "Generates videos from prompts using MiniMax's API" - FUNCTION = "generate_video" - CATEGORY = "api node/video/MiniMax" - API_NODE = True - - async def generate_video( - self, - prompt_text, - seed=0, - model="T2V-01", - image: torch.Tensor=None, # used for ImageToVideo - subject: torch.Tensor=None, # used for SubjectToVideo - unique_id: Union[str, None]=None, - **kwargs, - ): - ''' - Function used between MiniMax nodes - supports T2V, I2V, and S2V, based on provided arguments. - ''' - if image is None: - validate_string(prompt_text, field_name="prompt_text") - # upload image, if passed in - image_url = None - if image is not None: - image_url = (await upload_images_to_comfyapi(image, max_images=1, auth_kwargs=kwargs))[0] - - # TODO: figure out how to deal with subject properly, API returns invalid params when using S2V-01 model - subject_reference = None - if subject is not None: - subject_url = (await upload_images_to_comfyapi(subject, max_images=1, auth_kwargs=kwargs))[0] - subject_reference = [SubjectReferenceItem(image=subject_url)] - - - video_generate_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/minimax/video_generation", - method=HttpMethod.POST, - request_model=MinimaxVideoGenerationRequest, - response_model=MinimaxVideoGenerationResponse, - ), - request=MinimaxVideoGenerationRequest( - model=MiniMaxModel(model), - prompt=prompt_text, - callback_url=None, - first_frame_image=image_url, - subject_reference=subject_reference, - prompt_optimizer=None, - ), - auth_kwargs=kwargs, + node_id=cls.hidden.unique_id, + prompt_text=prompt_text, + seed=seed, + model=model, + image=None, + subject=None, + average_duration=T2V_AVERAGE_DURATION, ) - response = await video_generate_operation.execute() - - task_id = response.task_id - if not task_id: - raise Exception(f"MiniMax generation failed: {response.base_resp}") - - video_generate_operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path="/proxy/minimax/query/video_generation", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=MinimaxTaskResultResponse, - query_params={"task_id": task_id}, - ), - completed_statuses=["Success"], - failed_statuses=["Fail"], - status_extractor=lambda x: x.status.value, - estimated_duration=self.AVERAGE_DURATION, - node_id=unique_id, - auth_kwargs=kwargs, - ) - task_result = await video_generate_operation.execute() - - file_id = task_result.file_id - if file_id is None: - raise Exception("Request was not successful. Missing file ID.") - file_retrieve_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/minimax/files/retrieve", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=MinimaxFileRetrieveResponse, - query_params={"file_id": int(file_id)}, - ), - request=EmptyRequest(), - auth_kwargs=kwargs, - ) - file_result = await file_retrieve_operation.execute() - - file_url = file_result.file.download_url - if file_url is None: - raise Exception( - f"No video was found in the response. Full response: {file_result.model_dump()}" - ) - logging.info(f"Generated video URL: {file_url}") - if unique_id: - if hasattr(file_result.file, "backup_download_url"): - message = f"Result URL: {file_url}\nBackup URL: {file_result.file.backup_download_url}" - else: - message = f"Result URL: {file_url}" - PromptServer.instance.send_progress_text(message, unique_id) - - video_io = await download_url_to_bytesio(file_url) - if video_io is None: - error_msg = f"Failed to download video from {file_url}" - logging.error(error_msg) - raise Exception(error_msg) - return (VideoFromFile(video_io),) -class MinimaxImageToVideoNode(MinimaxTextToVideoNode): +class MinimaxImageToVideoNode(comfy_io.ComfyNode): """ Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API. """ - AVERAGE_DURATION = I2V_AVERAGE_DURATION + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="MinimaxImageToVideoNode", + display_name="MiniMax Image to Video", + category="api node/video/MiniMax", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input( + "image", + tooltip="Image to use as first frame of video generation", + ), + comfy_io.String.Input( + "prompt_text", + multiline=True, + default="", + tooltip="Text prompt to guide the video generation", + ), + comfy_io.Combo.Input( + "model", + options=["I2V-01-Director", "I2V-01", "I2V-01-live"], + default="I2V-01", + tooltip="Model to use for video generation", + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + step=1, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", + optional=True, + ), + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ( - IO.IMAGE, - { - "tooltip": "Image to use as first frame of video generation" - }, - ), - "prompt_text": ( - "STRING", - { - "multiline": True, - "default": "", - "tooltip": "Text prompt to guide the video generation", - }, - ), - "model": ( - [ - "I2V-01-Director", - "I2V-01", - "I2V-01-live", - ], - { - "default": "I2V-01", - "tooltip": "Model to use for video generation", - }, - ), + async def execute( + cls, + image: torch.Tensor, + prompt_text: str, + model: str = "I2V-01", + seed: int = 0, + ) -> comfy_io.NodeOutput: + return await _generate_mm_video( + auth={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, }, - "optional": { - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = ("VIDEO",) - DESCRIPTION = "Generates videos from an image and prompts using MiniMax's API" - FUNCTION = "generate_video" - CATEGORY = "api node/video/MiniMax" - API_NODE = True + node_id=cls.hidden.unique_id, + prompt_text=prompt_text, + seed=seed, + model=model, + image=image, + subject=None, + average_duration=I2V_AVERAGE_DURATION, + ) -class MinimaxSubjectToVideoNode(MinimaxTextToVideoNode): +class MinimaxSubjectToVideoNode(comfy_io.ComfyNode): """ Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API. """ - AVERAGE_DURATION = T2V_AVERAGE_DURATION + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="MinimaxSubjectToVideoNode", + display_name="MiniMax Subject to Video", + category="api node/video/MiniMax", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input( + "subject", + tooltip="Image of subject to reference for video generation", + ), + comfy_io.String.Input( + "prompt_text", + multiline=True, + default="", + tooltip="Text prompt to guide the video generation", + ), + comfy_io.Combo.Input( + "model", + options=["S2V-01"], + default="S2V-01", + tooltip="Model to use for video generation", + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + step=1, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", + optional=True, + ), + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "subject": ( - IO.IMAGE, - { - "tooltip": "Image of subject to reference video generation" - }, - ), - "prompt_text": ( - "STRING", - { - "multiline": True, - "default": "", - "tooltip": "Text prompt to guide the video generation", - }, - ), - "model": ( - [ - "S2V-01", - ], - { - "default": "S2V-01", - "tooltip": "Model to use for video generation", - }, - ), + async def execute( + cls, + subject: torch.Tensor, + prompt_text: str, + model: str = "S2V-01", + seed: int = 0, + ) -> comfy_io.NodeOutput: + return await _generate_mm_video( + auth={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, }, - "optional": { - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = ("VIDEO",) - DESCRIPTION = "Generates videos from an image and prompts using MiniMax's API" - FUNCTION = "generate_video" - CATEGORY = "api node/video/MiniMax" - API_NODE = True + node_id=cls.hidden.unique_id, + prompt_text=prompt_text, + seed=seed, + model=model, + image=None, + subject=subject, + average_duration=T2V_AVERAGE_DURATION, + ) -class MinimaxHailuoVideoNode: +class MinimaxHailuoVideoNode(comfy_io.ComfyNode): """Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.""" @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt_text": ( - "STRING", - { - "multiline": True, - "default": "", - "tooltip": "Text prompt to guide the video generation.", - }, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="MinimaxHailuoVideoNode", + display_name="MiniMax Hailuo Video", + category="api node/video/MiniMax", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.String.Input( + "prompt_text", + multiline=True, + default="", + tooltip="Text prompt to guide the video generation.", ), - }, - "optional": { - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + step=1, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", + optional=True, ), - "first_frame_image": ( - IO.IMAGE, - { - "tooltip": "Optional image to use as the first frame to generate a video." - }, + comfy_io.Image.Input( + "first_frame_image", + tooltip="Optional image to use as the first frame to generate a video.", + optional=True, ), - "prompt_optimizer": ( - IO.BOOLEAN, - { - "tooltip": "Optimize prompt to improve generation quality when needed.", - "default": True, - }, + comfy_io.Boolean.Input( + "prompt_optimizer", + default=True, + tooltip="Optimize prompt to improve generation quality when needed.", + optional=True, ), - "duration": ( - IO.COMBO, - { - "tooltip": "The length of the output video in seconds.", - "default": 6, - "options": [6, 10], - }, + comfy_io.Combo.Input( + "duration", + options=[6, 10], + default=6, + tooltip="The length of the output video in seconds.", + optional=True, ), - "resolution": ( - IO.COMBO, - { - "tooltip": "The dimensions of the video display. " - "1080p corresponds to 1920 x 1080 pixels, 768p corresponds to 1366 x 768 pixels.", - "default": "768P", - "options": ["768P", "1080P"], - }, + comfy_io.Combo.Input( + "resolution", + options=["768P", "1080P"], + default="768P", + tooltip="The dimensions of the video display. 1080p is 1920x1080, 768p is 1366x768.", + optional=True, ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + prompt_text: str, + seed: int = 0, + first_frame_image: Optional[torch.Tensor] = None, # used for ImageToVideo + prompt_optimizer: bool = True, + duration: int = 6, + resolution: str = "768P", + model: str = "MiniMax-Hailuo-02", + ) -> comfy_io.NodeOutput: + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, } - - RETURN_TYPES = ("VIDEO",) - DESCRIPTION = cleandoc(__doc__ or "") - FUNCTION = "generate_video" - CATEGORY = "api node/video/MiniMax" - API_NODE = True - - async def generate_video( - self, - prompt_text, - seed=0, - first_frame_image: torch.Tensor=None, # used for ImageToVideo - prompt_optimizer=True, - duration=6, - resolution="768P", - model="MiniMax-Hailuo-02", - unique_id: Union[str, None]=None, - **kwargs, - ): if first_frame_image is None: validate_string(prompt_text, field_name="prompt_text") @@ -408,7 +435,7 @@ class MinimaxHailuoVideoNode: # upload image, if passed in image_url = None if first_frame_image is not None: - image_url = (await upload_images_to_comfyapi(first_frame_image, max_images=1, auth_kwargs=kwargs))[0] + image_url = (await upload_images_to_comfyapi(first_frame_image, max_images=1, auth_kwargs=auth))[0] video_generate_operation = SynchronousOperation( endpoint=ApiEndpoint( @@ -426,7 +453,7 @@ class MinimaxHailuoVideoNode: duration=duration, resolution=resolution, ), - auth_kwargs=kwargs, + auth_kwargs=auth, ) response = await video_generate_operation.execute() @@ -447,8 +474,8 @@ class MinimaxHailuoVideoNode: failed_statuses=["Fail"], status_extractor=lambda x: x.status.value, estimated_duration=average_duration, - node_id=unique_id, - auth_kwargs=kwargs, + node_id=cls.hidden.unique_id, + auth_kwargs=auth, ) task_result = await video_generate_operation.execute() @@ -464,7 +491,7 @@ class MinimaxHailuoVideoNode: query_params={"file_id": int(file_id)}, ), request=EmptyRequest(), - auth_kwargs=kwargs, + auth_kwargs=auth, ) file_result = await file_retrieve_operation.execute() @@ -474,34 +501,31 @@ class MinimaxHailuoVideoNode: f"No video was found in the response. Full response: {file_result.model_dump()}" ) logging.info(f"Generated video URL: {file_url}") - if unique_id: + if cls.hidden.unique_id: if hasattr(file_result.file, "backup_download_url"): message = f"Result URL: {file_url}\nBackup URL: {file_result.file.backup_download_url}" else: message = f"Result URL: {file_url}" - PromptServer.instance.send_progress_text(message, unique_id) + PromptServer.instance.send_progress_text(message, cls.hidden.unique_id) video_io = await download_url_to_bytesio(file_url) if video_io is None: error_msg = f"Failed to download video from {file_url}" logging.error(error_msg) raise Exception(error_msg) - return (VideoFromFile(video_io),) + return comfy_io.NodeOutput(VideoFromFile(video_io)) -# A dictionary that contains all nodes you want to export with their names -# NOTE: names should be globally unique -NODE_CLASS_MAPPINGS = { - "MinimaxTextToVideoNode": MinimaxTextToVideoNode, - "MinimaxImageToVideoNode": MinimaxImageToVideoNode, - # "MinimaxSubjectToVideoNode": MinimaxSubjectToVideoNode, - "MinimaxHailuoVideoNode": MinimaxHailuoVideoNode, -} +class MinimaxExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + MinimaxTextToVideoNode, + MinimaxImageToVideoNode, + # MinimaxSubjectToVideoNode, + MinimaxHailuoVideoNode, + ] -# A dictionary that contains the friendly/humanly readable titles for the nodes -NODE_DISPLAY_NAME_MAPPINGS = { - "MinimaxTextToVideoNode": "MiniMax Text to Video", - "MinimaxImageToVideoNode": "MiniMax Image to Video", - "MinimaxSubjectToVideoNode": "MiniMax Subject to Video", - "MinimaxHailuoVideoNode": "MiniMax Hailuo Video", -} + +async def comfy_entrypoint() -> MinimaxExtension: + return MinimaxExtension() From f9d2e4b742af9aea3c9ffa822397c1b86cec9304 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 13 Sep 2025 00:38:12 +0300 Subject: [PATCH 0578/1073] convert WanCameraEmbedding node to V3 schema (#9714) --- comfy_extras/nodes_camera_trajectory.py | 81 ++++++++++++++++--------- 1 file changed, 51 insertions(+), 30 deletions(-) diff --git a/comfy_extras/nodes_camera_trajectory.py b/comfy_extras/nodes_camera_trajectory.py index 5e0e39f91..eb7ef363c 100644 --- a/comfy_extras/nodes_camera_trajectory.py +++ b/comfy_extras/nodes_camera_trajectory.py @@ -2,12 +2,12 @@ import nodes import torch import numpy as np from einops import rearrange +from typing_extensions import override import comfy.model_management +from comfy_api.latest import ComfyExtension, io -MAX_RESOLUTION = nodes.MAX_RESOLUTION - CAMERA_DICT = { "base_T_norm": 1.5, "base_angle": np.pi/3, @@ -148,32 +148,47 @@ def get_camera_motion(angle, T, speed, n=81): RT = np.stack(RT) return RT -class WanCameraEmbedding: +class WanCameraEmbedding(io.ComfyNode): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "camera_pose":(["Static","Pan Up","Pan Down","Pan Left","Pan Right","Zoom In","Zoom Out","Anti Clockwise (ACW)", "ClockWise (CW)"],{"default":"Static"}), - "width": ("INT", {"default": 832, "min": 16, "max": MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 81, "min": 1, "max": MAX_RESOLUTION, "step": 4}), - }, - "optional":{ - "speed":("FLOAT",{"default":1.0, "min": 0, "max": 10.0, "step": 0.1}), - "fx":("FLOAT",{"default":0.5, "min": 0, "max": 1, "step": 0.000000001}), - "fy":("FLOAT",{"default":0.5, "min": 0, "max": 1, "step": 0.000000001}), - "cx":("FLOAT",{"default":0.5, "min": 0, "max": 1, "step": 0.01}), - "cy":("FLOAT",{"default":0.5, "min": 0, "max": 1, "step": 0.01}), - } + def define_schema(cls): + return io.Schema( + node_id="WanCameraEmbedding", + category="camera", + inputs=[ + io.Combo.Input( + "camera_pose", + options=[ + "Static", + "Pan Up", + "Pan Down", + "Pan Left", + "Pan Right", + "Zoom In", + "Zoom Out", + "Anti Clockwise (ACW)", + "ClockWise (CW)", + ], + default="Static", + ), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Float.Input("speed", default=1.0, min=0, max=10.0, step=0.1, optional=True), + io.Float.Input("fx", default=0.5, min=0, max=1, step=0.000000001, optional=True), + io.Float.Input("fy", default=0.5, min=0, max=1, step=0.000000001, optional=True), + io.Float.Input("cx", default=0.5, min=0, max=1, step=0.01, optional=True), + io.Float.Input("cy", default=0.5, min=0, max=1, step=0.01, optional=True), + ], + outputs=[ + io.WanCameraEmbedding.Output(display_name="camera_embedding"), + io.Int.Output(display_name="width"), + io.Int.Output(display_name="height"), + io.Int.Output(display_name="length"), + ], + ) - } - - RETURN_TYPES = ("WAN_CAMERA_EMBEDDING","INT","INT","INT") - RETURN_NAMES = ("camera_embedding","width","height","length") - FUNCTION = "run" - CATEGORY = "camera" - - def run(self, camera_pose, width, height, length, speed=1.0, fx=0.5, fy=0.5, cx=0.5, cy=0.5): + @classmethod + def execute(cls, camera_pose, width, height, length, speed=1.0, fx=0.5, fy=0.5, cx=0.5, cy=0.5) -> io.NodeOutput: """ Use Camera trajectory as extrinsic parameters to calculate Plücker embeddings (Sitzmannet al., 2021) Adapted from https://github.com/aigc-apps/VideoX-Fun/blob/main/comfyui/comfyui_nodes.py @@ -210,9 +225,15 @@ class WanCameraEmbedding: control_camera_video = control_camera_video.contiguous().view(b, f // 4, 4, c, h, w).transpose(2, 3) control_camera_video = control_camera_video.contiguous().view(b, f // 4, c * 4, h, w).transpose(1, 2) - return (control_camera_video, width, height, length) + return io.NodeOutput(control_camera_video, width, height, length) -NODE_CLASS_MAPPINGS = { - "WanCameraEmbedding": WanCameraEmbedding, -} +class CameraTrajectoryExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + WanCameraEmbedding, + ] + +async def comfy_entrypoint() -> CameraTrajectoryExtension: + return CameraTrajectoryExtension() From dcb883498337bcb2758fa9e7b326ea3b63c6b8d4 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 13 Sep 2025 00:38:46 +0300 Subject: [PATCH 0579/1073] convert Cosmos nodes to V3 schema (#9721) --- comfy_extras/nodes_cosmos.py | 129 +++++++++++++++++++---------------- 1 file changed, 72 insertions(+), 57 deletions(-) diff --git a/comfy_extras/nodes_cosmos.py b/comfy_extras/nodes_cosmos.py index 4f4960551..7dd129d19 100644 --- a/comfy_extras/nodes_cosmos.py +++ b/comfy_extras/nodes_cosmos.py @@ -1,25 +1,32 @@ +from typing_extensions import override import nodes import torch import comfy.model_management import comfy.utils import comfy.latent_formats +from comfy_api.latest import ComfyExtension, io -class EmptyCosmosLatentVideo: + +class EmptyCosmosLatentVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "width": ("INT", {"default": 1280, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 704, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 121, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 8}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}} - RETURN_TYPES = ("LATENT",) - FUNCTION = "generate" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="EmptyCosmosLatentVideo", + category="latent/video", + inputs=[ + io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=704, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=121, min=1, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("batch_size", default=1, min=1, max=4096), + ], + outputs=[io.Latent.Output()], + ) - CATEGORY = "latent/video" - - def generate(self, width, height, length, batch_size=1): + @classmethod + def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput: latent = torch.zeros([batch_size, 16, ((length - 1) // 8) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) - return ({"samples": latent}, ) + return io.NodeOutput({"samples": latent}) def vae_encode_with_padding(vae, image, width, height, length, padding=0): @@ -33,31 +40,31 @@ def vae_encode_with_padding(vae, image, width, height, length, padding=0): return latent_temp[:, :, :latent_len] -class CosmosImageToVideoLatent: +class CosmosImageToVideoLatent(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"vae": ("VAE", ), - "width": ("INT", {"default": 1280, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 704, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 121, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 8}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - }, - "optional": {"start_image": ("IMAGE", ), - "end_image": ("IMAGE", ), - }} + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="CosmosImageToVideoLatent", + category="conditioning/inpaint", + inputs=[ + io.Vae.Input("vae"), + io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=704, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=121, min=1, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Image.Input("start_image", optional=True), + io.Image.Input("end_image", optional=True), + ], + outputs=[io.Latent.Output()], + ) - - RETURN_TYPES = ("LATENT",) - FUNCTION = "encode" - - CATEGORY = "conditioning/inpaint" - - def encode(self, vae, width, height, length, batch_size, start_image=None, end_image=None): + @classmethod + def execute(cls, vae, width, height, length, batch_size, start_image=None, end_image=None) -> io.NodeOutput: latent = torch.zeros([1, 16, ((length - 1) // 8) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) if start_image is None and end_image is None: out_latent = {} out_latent["samples"] = latent - return (out_latent,) + return io.NodeOutput(out_latent) mask = torch.ones([latent.shape[0], 1, ((length - 1) // 8) + 1, latent.shape[-2], latent.shape[-1]], device=comfy.model_management.intermediate_device()) @@ -74,33 +81,33 @@ class CosmosImageToVideoLatent: out_latent = {} out_latent["samples"] = latent.repeat((batch_size, ) + (1,) * (latent.ndim - 1)) out_latent["noise_mask"] = mask.repeat((batch_size, ) + (1,) * (mask.ndim - 1)) - return (out_latent,) + return io.NodeOutput(out_latent) -class CosmosPredict2ImageToVideoLatent: +class CosmosPredict2ImageToVideoLatent(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"vae": ("VAE", ), - "width": ("INT", {"default": 848, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 93, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - }, - "optional": {"start_image": ("IMAGE", ), - "end_image": ("IMAGE", ), - }} + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="CosmosPredict2ImageToVideoLatent", + category="conditioning/inpaint", + inputs=[ + io.Vae.Input("vae"), + io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=93, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Image.Input("start_image", optional=True), + io.Image.Input("end_image", optional=True), + ], + outputs=[io.Latent.Output()], + ) - - RETURN_TYPES = ("LATENT",) - FUNCTION = "encode" - - CATEGORY = "conditioning/inpaint" - - def encode(self, vae, width, height, length, batch_size, start_image=None, end_image=None): + @classmethod + def execute(cls, vae, width, height, length, batch_size, start_image=None, end_image=None) -> io.NodeOutput: latent = torch.zeros([1, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) if start_image is None and end_image is None: out_latent = {} out_latent["samples"] = latent - return (out_latent,) + return io.NodeOutput(out_latent) mask = torch.ones([latent.shape[0], 1, ((length - 1) // 4) + 1, latent.shape[-2], latent.shape[-1]], device=comfy.model_management.intermediate_device()) @@ -119,10 +126,18 @@ class CosmosPredict2ImageToVideoLatent: latent = latent_format.process_out(latent) * mask + latent * (1.0 - mask) out_latent["samples"] = latent.repeat((batch_size, ) + (1,) * (latent.ndim - 1)) out_latent["noise_mask"] = mask.repeat((batch_size, ) + (1,) * (mask.ndim - 1)) - return (out_latent,) + return io.NodeOutput(out_latent) -NODE_CLASS_MAPPINGS = { - "EmptyCosmosLatentVideo": EmptyCosmosLatentVideo, - "CosmosImageToVideoLatent": CosmosImageToVideoLatent, - "CosmosPredict2ImageToVideoLatent": CosmosPredict2ImageToVideoLatent, -} + +class CosmosExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + EmptyCosmosLatentVideo, + CosmosImageToVideoLatent, + CosmosPredict2ImageToVideoLatent, + ] + + +async def comfy_entrypoint() -> CosmosExtension: + return CosmosExtension() From ba68e83f1c103eb4cb57fe01328706a0574fff3c Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 13 Sep 2025 00:39:30 +0300 Subject: [PATCH 0580/1073] convert nodes_cond.py to V3 schema (#9719) --- comfy_extras/nodes_cond.py | 75 ++++++++++++++++++++++++-------------- 1 file changed, 47 insertions(+), 28 deletions(-) diff --git a/comfy_extras/nodes_cond.py b/comfy_extras/nodes_cond.py index 58c16f621..8b06e3de9 100644 --- a/comfy_extras/nodes_cond.py +++ b/comfy_extras/nodes_cond.py @@ -1,15 +1,25 @@ +from typing_extensions import override + +from comfy_api.latest import ComfyExtension, io -class CLIPTextEncodeControlnet: +class CLIPTextEncodeControlnet(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"clip": ("CLIP", ), "conditioning": ("CONDITIONING", ), "text": ("STRING", {"multiline": True, "dynamicPrompts": True})}} - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="CLIPTextEncodeControlnet", + category="_for_testing/conditioning", + inputs=[ + io.Clip.Input("clip"), + io.Conditioning.Input("conditioning"), + io.String.Input("text", multiline=True, dynamic_prompts=True), + ], + outputs=[io.Conditioning.Output()], + is_experimental=True, + ) - CATEGORY = "_for_testing/conditioning" - - def encode(self, clip, conditioning, text): + @classmethod + def execute(cls, clip, conditioning, text) -> io.NodeOutput: tokens = clip.tokenize(text) cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) c = [] @@ -18,32 +28,41 @@ class CLIPTextEncodeControlnet: n[1]['cross_attn_controlnet'] = cond n[1]['pooled_output_controlnet'] = pooled c.append(n) - return (c, ) + return io.NodeOutput(c) -class T5TokenizerOptions: +class T5TokenizerOptions(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "clip": ("CLIP", ), - "min_padding": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}), - "min_length": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}), - } - } + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="T5TokenizerOptions", + category="_for_testing/conditioning", + inputs=[ + io.Clip.Input("clip"), + io.Int.Input("min_padding", default=0, min=0, max=10000, step=1), + io.Int.Input("min_length", default=0, min=0, max=10000, step=1), + ], + outputs=[io.Clip.Output()], + is_experimental=True, + ) - CATEGORY = "_for_testing/conditioning" - RETURN_TYPES = ("CLIP",) - FUNCTION = "set_options" - - def set_options(self, clip, min_padding, min_length): + @classmethod + def execute(cls, clip, min_padding, min_length) -> io.NodeOutput: clip = clip.clone() for t5_type in ["t5xxl", "pile_t5xl", "t5base", "mt5xl", "umt5xxl"]: clip.set_tokenizer_option("{}_min_padding".format(t5_type), min_padding) clip.set_tokenizer_option("{}_min_length".format(t5_type), min_length) - return (clip, ) + return io.NodeOutput(clip) -NODE_CLASS_MAPPINGS = { - "CLIPTextEncodeControlnet": CLIPTextEncodeControlnet, - "T5TokenizerOptions": T5TokenizerOptions, -} + +class CondExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + CLIPTextEncodeControlnet, + T5TokenizerOptions, + ] + + +async def comfy_entrypoint() -> CondExtension: + return CondExtension() From 53c9c7d39ad8a459e84a29e46a3e053154ef6013 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 13 Sep 2025 00:39:55 +0300 Subject: [PATCH 0581/1073] convert CFG nodes to V3 schema (#9717) --- comfy_extras/nodes_cfg.py | 71 +++++++++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 26 deletions(-) diff --git a/comfy_extras/nodes_cfg.py b/comfy_extras/nodes_cfg.py index 5abdc115a..4ebb4b51e 100644 --- a/comfy_extras/nodes_cfg.py +++ b/comfy_extras/nodes_cfg.py @@ -1,5 +1,10 @@ +from typing_extensions import override + import torch +from comfy_api.latest import ComfyExtension, io + + # https://github.com/WeichenFan/CFG-Zero-star def optimized_scale(positive, negative): positive_flat = positive.reshape(positive.shape[0], -1) @@ -16,17 +21,20 @@ def optimized_scale(positive, negative): return st_star.reshape([positive.shape[0]] + [1] * (positive.ndim - 1)) -class CFGZeroStar: +class CFGZeroStar(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"model": ("MODEL",), - }} - RETURN_TYPES = ("MODEL",) - RETURN_NAMES = ("patched_model",) - FUNCTION = "patch" - CATEGORY = "advanced/guidance" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="CFGZeroStar", + category="advanced/guidance", + inputs=[ + io.Model.Input("model"), + ], + outputs=[io.Model.Output(display_name="patched_model")], + ) - def patch(self, model): + @classmethod + def execute(cls, model) -> io.NodeOutput: m = model.clone() def cfg_zero_star(args): guidance_scale = args['cond_scale'] @@ -38,21 +46,24 @@ class CFGZeroStar: return out + uncond_p * (alpha - 1.0) + guidance_scale * uncond_p * (1.0 - alpha) m.set_model_sampler_post_cfg_function(cfg_zero_star) - return (m, ) + return io.NodeOutput(m) -class CFGNorm: +class CFGNorm(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"model": ("MODEL",), - "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}), - }} - RETURN_TYPES = ("MODEL",) - RETURN_NAMES = ("patched_model",) - FUNCTION = "patch" - CATEGORY = "advanced/guidance" - EXPERIMENTAL = True + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="CFGNorm", + category="advanced/guidance", + inputs=[ + io.Model.Input("model"), + io.Float.Input("strength", default=1.0, min=0.0, max=100.0, step=0.01), + ], + outputs=[io.Model.Output(display_name="patched_model")], + is_experimental=True, + ) - def patch(self, model, strength): + @classmethod + def execute(cls, model, strength) -> io.NodeOutput: m = model.clone() def cfg_norm(args): cond_p = args['cond_denoised'] @@ -64,9 +75,17 @@ class CFGNorm: return pred_text_ * scale * strength m.set_model_sampler_post_cfg_function(cfg_norm) - return (m, ) + return io.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "CFGZeroStar": CFGZeroStar, - "CFGNorm": CFGNorm, -} + +class CfgExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + CFGZeroStar, + CFGNorm, + ] + + +async def comfy_entrypoint() -> CfgExtension: + return CfgExtension() From af99928f2218fc240dcfb3688ec47317ca146a78 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 13 Sep 2025 00:40:34 +0300 Subject: [PATCH 0582/1073] convert Canny node to V3 schema (#9743) --- comfy_extras/nodes_canny.py | 46 +++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/comfy_extras/nodes_canny.py b/comfy_extras/nodes_canny.py index d85e6b856..576f3640a 100644 --- a/comfy_extras/nodes_canny.py +++ b/comfy_extras/nodes_canny.py @@ -1,25 +1,41 @@ from kornia.filters import canny +from typing_extensions import override + import comfy.model_management +from comfy_api.latest import ComfyExtension, io -class Canny: +class Canny(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"image": ("IMAGE",), - "low_threshold": ("FLOAT", {"default": 0.4, "min": 0.01, "max": 0.99, "step": 0.01}), - "high_threshold": ("FLOAT", {"default": 0.8, "min": 0.01, "max": 0.99, "step": 0.01}) - }} + def define_schema(cls): + return io.Schema( + node_id="Canny", + category="image/preprocessors", + inputs=[ + io.Image.Input("image"), + io.Float.Input("low_threshold", default=0.4, min=0.01, max=0.99, step=0.01), + io.Float.Input("high_threshold", default=0.8, min=0.01, max=0.99, step=0.01), + ], + outputs=[io.Image.Output()], + ) - RETURN_TYPES = ("IMAGE",) - FUNCTION = "detect_edge" + @classmethod + def detect_edge(cls, image, low_threshold, high_threshold): + # Deprecated: use the V3 schema's `execute` method instead of this. + return cls.execute(image, low_threshold, high_threshold) - CATEGORY = "image/preprocessors" - - def detect_edge(self, image, low_threshold, high_threshold): + @classmethod + def execute(cls, image, low_threshold, high_threshold) -> io.NodeOutput: output = canny(image.to(comfy.model_management.get_torch_device()).movedim(-1, 1), low_threshold, high_threshold) img_out = output[1].to(comfy.model_management.intermediate_device()).repeat(1, 3, 1, 1).movedim(1, -1) - return (img_out,) + return io.NodeOutput(img_out) -NODE_CLASS_MAPPINGS = { - "Canny": Canny, -} + +class CannyExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [Canny] + + +async def comfy_entrypoint() -> CannyExtension: + return CannyExtension() From 581bae2af30b0839a39734bd97006c4009f9d70a Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 13 Sep 2025 00:41:26 +0300 Subject: [PATCH 0583/1073] convert Moonvalley API nodes to the V3 schema (#9698) --- comfy_api_nodes/nodes_moonvalley.py | 572 +++++++++++++++------------- 1 file changed, 298 insertions(+), 274 deletions(-) diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 806a70e06..08e838fef 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -1,6 +1,7 @@ import logging from typing import Any, Callable, Optional, TypeVar import torch +from typing_extensions import override from comfy_api_nodes.util.validation_utils import ( get_image_dimensions, validate_image_dimensions, @@ -26,11 +27,9 @@ from comfy_api_nodes.apinode_utils import ( upload_images_to_comfyapi, upload_video_to_comfyapi, ) -from comfy_api_nodes.mapper_utils import model_field_to_node_input -from comfy_api.input.video_types import VideoInput -from comfy.comfy_types.node_typing import IO -from comfy_api.input_impl import VideoFromFile +from comfy_api.input import VideoInput +from comfy_api.latest import ComfyExtension, InputImpl, io as comfy_io import av import io @@ -362,7 +361,7 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: # Return as VideoFromFile using the buffer output_buffer.seek(0) - return VideoFromFile(output_buffer) + return InputImpl.VideoFromFile(output_buffer) except Exception as e: # Clean up on error @@ -373,166 +372,150 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: raise RuntimeError(f"Failed to trim video: {str(e)}") from e -# --- BaseMoonvalleyVideoNode --- -class BaseMoonvalleyVideoNode: - def parseWidthHeightFromRes(self, resolution: str): - # Accepts a string like "16:9 (1920 x 1080)" and returns width, height as a dict - res_map = { - "16:9 (1920 x 1080)": {"width": 1920, "height": 1080}, - "9:16 (1080 x 1920)": {"width": 1080, "height": 1920}, - "1:1 (1152 x 1152)": {"width": 1152, "height": 1152}, - "4:3 (1536 x 1152)": {"width": 1536, "height": 1152}, - "3:4 (1152 x 1536)": {"width": 1152, "height": 1536}, - "21:9 (2560 x 1080)": {"width": 2560, "height": 1080}, - } - if resolution in res_map: - return res_map[resolution] - else: - # Default to 1920x1080 if unknown - return {"width": 1920, "height": 1080} +def parse_width_height_from_res(resolution: str): + # Accepts a string like "16:9 (1920 x 1080)" and returns width, height as a dict + res_map = { + "16:9 (1920 x 1080)": {"width": 1920, "height": 1080}, + "9:16 (1080 x 1920)": {"width": 1080, "height": 1920}, + "1:1 (1152 x 1152)": {"width": 1152, "height": 1152}, + "4:3 (1536 x 1152)": {"width": 1536, "height": 1152}, + "3:4 (1152 x 1536)": {"width": 1152, "height": 1536}, + "21:9 (2560 x 1080)": {"width": 2560, "height": 1080}, + } + return res_map.get(resolution, {"width": 1920, "height": 1080}) - def parseControlParameter(self, value): - control_map = { - "Motion Transfer": "motion_control", - "Canny": "canny_control", - "Pose Transfer": "pose_control", - "Depth": "depth_control", - } - if value in control_map: - return control_map[value] - else: - return control_map["Motion Transfer"] - async def get_response( - self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None - ) -> MoonvalleyPromptResponse: - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{API_PROMPTS_ENDPOINT}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=MoonvalleyPromptResponse, - ), - result_url_extractor=get_video_url_from_response, - node_id=node_id, - ) +def parse_control_parameter(value): + control_map = { + "Motion Transfer": "motion_control", + "Canny": "canny_control", + "Pose Transfer": "pose_control", + "Depth": "depth_control", + } + return control_map.get(value, control_map["Motion Transfer"]) + + +async def get_response( + task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None +) -> MoonvalleyPromptResponse: + return await poll_until_finished( + auth_kwargs, + ApiEndpoint( + path=f"{API_PROMPTS_ENDPOINT}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=MoonvalleyPromptResponse, + ), + result_url_extractor=get_video_url_from_response, + node_id=node_id, + ) + + +class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "prompt": model_field_to_node_input( - IO.STRING, - MoonvalleyTextToVideoRequest, - "prompt_text", + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="MoonvalleyImg2VideoNode", + display_name="Moonvalley Marey Image to Video", + category="api node/video/Moonvalley Marey", + description="Moonvalley Marey Image to Video Node", + inputs=[ + comfy_io.Image.Input( + "image", + tooltip="The reference image used to generate the video", + ), + comfy_io.String.Input( + "prompt", multiline=True, ), - "negative_prompt": model_field_to_node_input( - IO.STRING, - MoonvalleyTextToVideoInferenceParams, + comfy_io.String.Input( "negative_prompt", multiline=True, - default=" gopro, bright, contrast, static, overexposed, vignette, artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, wobbly, weird, low quality, plastic, stock footage, video camera, boring", + default=" gopro, bright, contrast, static, overexposed, vignette, " + "artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, " + "flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, " + "cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, " + "blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, " + "wobbly, weird, low quality, plastic, stock footage, video camera, boring", + tooltip="Negative prompt text", ), - "resolution": ( - IO.COMBO, - { - "options": [ - "16:9 (1920 x 1080)", - "9:16 (1080 x 1920)", - "1:1 (1152 x 1152)", - "4:3 (1440 x 1080)", - "3:4 (1080 x 1440)", - "21:9 (2560 x 1080)", - ], - "default": "16:9 (1920 x 1080)", - "tooltip": "Resolution of the output video", - }, + comfy_io.Combo.Input( + "resolution", + options=[ + "16:9 (1920 x 1080)", + "9:16 (1080 x 1920)", + "1:1 (1152 x 1152)", + "4:3 (1536 x 1152)", + "3:4 (1152 x 1536)", + "21:9 (2560 x 1080)", + ], + default="16:9 (1920 x 1080)", + tooltip="Resolution of the output video", ), - "prompt_adherence": model_field_to_node_input( - IO.FLOAT, - MoonvalleyTextToVideoInferenceParams, - "guidance_scale", + comfy_io.Float.Input( + "prompt_adherence", default=10.0, - step=1, - min=1, - max=20, + min=1.0, + max=20.0, + step=1.0, + tooltip="Guidance scale for generation control", ), - "seed": model_field_to_node_input( - IO.INT, - MoonvalleyTextToVideoInferenceParams, + comfy_io.Int.Input( "seed", default=9, min=0, max=4294967295, step=1, - display="number", + display_mode=comfy_io.NumberDisplay.number, tooltip="Random seed value", ), - "steps": model_field_to_node_input( - IO.INT, - MoonvalleyTextToVideoInferenceParams, + comfy_io.Int.Input( "steps", default=100, min=1, max=100, + step=1, + tooltip="Number of denoising steps", ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - "optional": { - "image": model_field_to_node_input( - IO.IMAGE, - MoonvalleyTextToVideoRequest, - "image_url", - tooltip="The reference image used to generate the video", - ), - }, - } - - RETURN_TYPES = ("STRING",) - FUNCTION = "generate" - CATEGORY = "api node/video/Moonvalley Marey" - API_NODE = True - - def generate(self, **kwargs): - return None - - -# --- MoonvalleyImg2VideoNode --- -class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(cls): - return super().INPUT_TYPES() - - RETURN_TYPES = ("VIDEO",) - RETURN_NAMES = ("video",) - DESCRIPTION = "Moonvalley Marey Image to Video Node" - - async def generate( - self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs - ): - image = kwargs.get("image", None) - if image is None: - raise MoonvalleyApiError("image is required") - + async def execute( + cls, + image: torch.Tensor, + prompt: str, + negative_prompt: str, + resolution: str, + prompt_adherence: float, + seed: int, + steps: int, + ) -> comfy_io.NodeOutput: validate_input_image(image, True) validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) - width_height = self.parseWidthHeightFromRes(kwargs.get("resolution")) + width_height = parse_width_height_from_res(resolution) + + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } inference_params = MoonvalleyTextToVideoInferenceParams( negative_prompt=negative_prompt, - steps=kwargs.get("steps"), - seed=kwargs.get("seed"), - guidance_scale=kwargs.get("prompt_adherence"), + steps=steps, + seed=seed, + guidance_scale=prompt_adherence, num_frames=128, - width=width_height.get("width"), - height=width_height.get("height"), + width=width_height["width"], + height=width_height["height"], use_negative_prompts=True, ) """Upload image to comfy backend to have a URL available for further processing""" @@ -541,7 +524,7 @@ class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): image_url = ( await upload_images_to_comfyapi( - image, max_images=1, auth_kwargs=kwargs, mime_type=mime_type + image, max_images=1, auth_kwargs=auth, mime_type=mime_type ) )[0] @@ -556,127 +539,102 @@ class MoonvalleyImg2VideoNode(BaseMoonvalleyVideoNode): response_model=MoonvalleyPromptResponse, ), request=request, - auth_kwargs=kwargs, + auth_kwargs=auth, ) task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.id - final_response = await self.get_response( - task_id, auth_kwargs=kwargs, node_id=unique_id + final_response = await get_response( + task_id, auth_kwargs=auth, node_id=cls.hidden.unique_id ) video = await download_url_to_video_output(final_response.output_url) - return (video,) + return comfy_io.NodeOutput(video) -# --- MoonvalleyVid2VidNode --- -class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): - def __init__(self): - super().__init__() +class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "prompt": model_field_to_node_input( - IO.STRING, - MoonvalleyVideoToVideoRequest, - "prompt_text", + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="MoonvalleyVideo2VideoNode", + display_name="Moonvalley Marey Video to Video", + category="api node/video/Moonvalley Marey", + description="", + inputs=[ + comfy_io.String.Input( + "prompt", multiline=True, + tooltip="Describes the video to generate", ), - "negative_prompt": model_field_to_node_input( - IO.STRING, - MoonvalleyVideoToVideoInferenceParams, + comfy_io.String.Input( "negative_prompt", multiline=True, - default=" gopro, bright, contrast, static, overexposed, vignette, artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, wobbly, weird, low quality, plastic, stock footage, video camera, boring", + default=" gopro, bright, contrast, static, overexposed, vignette, " + "artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, " + "flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, " + "cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, " + "blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, " + "wobbly, weird, low quality, plastic, stock footage, video camera, boring", + tooltip="Negative prompt text", ), - "seed": model_field_to_node_input( - IO.INT, - MoonvalleyVideoToVideoInferenceParams, + comfy_io.Int.Input( "seed", default=9, min=0, max=4294967295, step=1, - display="number", + display_mode=comfy_io.NumberDisplay.number, tooltip="Random seed value", control_after_generate=False, ), - "prompt_adherence": model_field_to_node_input( - IO.FLOAT, - MoonvalleyVideoToVideoInferenceParams, - "guidance_scale", - default=10.0, + comfy_io.Video.Input( + "video", + tooltip="The reference video used to generate the output video. Must be at least 5 seconds long. " + "Videos longer than 5s will be automatically trimmed. Only MP4 format supported.", + ), + comfy_io.Combo.Input( + "control_type", + options=["Motion Transfer", "Pose Transfer"], + default="Motion Transfer", + optional=True, + ), + comfy_io.Int.Input( + "motion_intensity", + default=100, + min=0, + max=100, step=1, - min=1, - max=20, + tooltip="Only used if control_type is 'Motion Transfer'", + optional=True, ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - "optional": { - "video": ( - IO.VIDEO, - { - "default": "", - "multiline": False, - "tooltip": "The reference video used to generate the output video. Must be at least 5 seconds long. Videos longer than 5s will be automatically trimmed. Only MP4 format supported.", - }, - ), - "control_type": ( - ["Motion Transfer", "Pose Transfer"], - {"default": "Motion Transfer"}, - ), - "motion_intensity": ( - "INT", - { - "default": 100, - "step": 1, - "min": 0, - "max": 100, - "tooltip": "Only used if control_type is 'Motion Transfer'", - }, - ), - "image": model_field_to_node_input( - IO.IMAGE, - MoonvalleyTextToVideoRequest, - "image_url", - tooltip="The reference image used to generate the video", - ), - }, + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + prompt: str, + negative_prompt: str, + seed: int, + video: Optional[VideoInput] = None, + control_type: str = "Motion Transfer", + motion_intensity: Optional[int] = 100, + ) -> comfy_io.NodeOutput: + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, } - RETURN_TYPES = ("VIDEO",) - RETURN_NAMES = ("video",) - - async def generate( - self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs - ): - video = kwargs.get("video") - image = kwargs.get("image", None) - - if not video: - raise MoonvalleyApiError("video is required") - - video_url = "" - if video: - validated_video = validate_video_to_video_input(video) - video_url = await upload_video_to_comfyapi( - validated_video, auth_kwargs=kwargs - ) - mime_type = "image/png" - - if not image is None: - validate_input_image(image, with_frame_conditioning=True) - image_url = await upload_images_to_comfyapi( - image=image, auth_kwargs=kwargs, max_images=1, mime_type=mime_type - ) - control_type = kwargs.get("control_type") - motion_intensity = kwargs.get("motion_intensity") + validated_video = validate_video_to_video_input(video) + video_url = await upload_video_to_comfyapi(validated_video, auth_kwargs=auth) """Validate prompts and inference input""" validate_prompts(prompt, negative_prompt) @@ -688,11 +646,11 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): inference_params = MoonvalleyVideoToVideoInferenceParams( negative_prompt=negative_prompt, - seed=kwargs.get("seed"), + seed=seed, control_params=control_params, ) - control = self.parseControlParameter(control_type) + control = parse_control_parameter(control_type) request = MoonvalleyVideoToVideoRequest( control_type=control, @@ -700,7 +658,6 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): prompt_text=prompt, inference_params=inference_params, ) - request.image_url = image_url if not image is None else None initial_operation = SynchronousOperation( endpoint=ApiEndpoint( @@ -710,58 +667,125 @@ class MoonvalleyVideo2VideoNode(BaseMoonvalleyVideoNode): response_model=MoonvalleyPromptResponse, ), request=request, - auth_kwargs=kwargs, + auth_kwargs=auth, ) task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.id - final_response = await self.get_response( - task_id, auth_kwargs=kwargs, node_id=unique_id + final_response = await get_response( + task_id, auth_kwargs=auth, node_id=cls.hidden.unique_id ) video = await download_url_to_video_output(final_response.output_url) - - return (video,) + return comfy_io.NodeOutput(video) -# --- MoonvalleyTxt2VideoNode --- -class MoonvalleyTxt2VideoNode(BaseMoonvalleyVideoNode): - def __init__(self): - super().__init__() - - RETURN_TYPES = ("VIDEO",) - RETURN_NAMES = ("video",) +class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode): @classmethod - def INPUT_TYPES(cls): - input_types = super().INPUT_TYPES() - # Remove image-specific parameters - for param in ["image"]: - if param in input_types["optional"]: - del input_types["optional"][param] - return input_types + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="MoonvalleyTxt2VideoNode", + display_name="Moonvalley Marey Text to Video", + category="api node/video/Moonvalley Marey", + description="", + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + ), + comfy_io.String.Input( + "negative_prompt", + multiline=True, + default=" gopro, bright, contrast, static, overexposed, vignette, " + "artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, " + "flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, " + "cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, " + "blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, " + "wobbly, weird, low quality, plastic, stock footage, video camera, boring", + tooltip="Negative prompt text", + ), + comfy_io.Combo.Input( + "resolution", + options=[ + "16:9 (1920 x 1080)", + "9:16 (1080 x 1920)", + "1:1 (1152 x 1152)", + "4:3 (1536 x 1152)", + "3:4 (1152 x 1536)", + "21:9 (2560 x 1080)", + ], + default="16:9 (1920 x 1080)", + tooltip="Resolution of the output video", + ), + comfy_io.Float.Input( + "prompt_adherence", + default=10.0, + min=1.0, + max=20.0, + step=1.0, + tooltip="Guidance scale for generation control", + ), + comfy_io.Int.Input( + "seed", + default=9, + min=0, + max=4294967295, + step=1, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Random seed value", + ), + comfy_io.Int.Input( + "steps", + default=100, + min=1, + max=100, + step=1, + tooltip="Inference steps", + ), + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - async def generate( - self, prompt, negative_prompt, unique_id: Optional[str] = None, **kwargs - ): + @classmethod + async def execute( + cls, + prompt: str, + negative_prompt: str, + resolution: str, + prompt_adherence: float, + seed: int, + steps: int, + ) -> comfy_io.NodeOutput: validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) - width_height = self.parseWidthHeightFromRes(kwargs.get("resolution")) + width_height = parse_width_height_from_res(resolution) + + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } inference_params = MoonvalleyTextToVideoInferenceParams( negative_prompt=negative_prompt, - steps=kwargs.get("steps"), - seed=kwargs.get("seed"), - guidance_scale=kwargs.get("prompt_adherence"), + steps=steps, + seed=seed, + guidance_scale=prompt_adherence, num_frames=128, - width=width_height.get("width"), - height=width_height.get("height"), + width=width_height["width"], + height=width_height["height"], ) request = MoonvalleyTextToVideoRequest( prompt_text=prompt, inference_params=inference_params ) - initial_operation = SynchronousOperation( + init_op = SynchronousOperation( endpoint=ApiEndpoint( path=API_TXT2VIDEO_ENDPOINT, method=HttpMethod.POST, @@ -769,29 +793,29 @@ class MoonvalleyTxt2VideoNode(BaseMoonvalleyVideoNode): response_model=MoonvalleyPromptResponse, ), request=request, - auth_kwargs=kwargs, + auth_kwargs=auth, ) - task_creation_response = await initial_operation.execute() + task_creation_response = await init_op.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.id - final_response = await self.get_response( - task_id, auth_kwargs=kwargs, node_id=unique_id + final_response = await get_response( + task_id, auth_kwargs=auth, node_id=cls.hidden.unique_id ) video = await download_url_to_video_output(final_response.output_url) - return (video,) + return comfy_io.NodeOutput(video) -NODE_CLASS_MAPPINGS = { - "MoonvalleyImg2VideoNode": MoonvalleyImg2VideoNode, - "MoonvalleyTxt2VideoNode": MoonvalleyTxt2VideoNode, - "MoonvalleyVideo2VideoNode": MoonvalleyVideo2VideoNode, -} +class MoonvalleyExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + MoonvalleyImg2VideoNode, + MoonvalleyTxt2VideoNode, + MoonvalleyVideo2VideoNode, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "MoonvalleyImg2VideoNode": "Moonvalley Marey Image to Video", - "MoonvalleyTxt2VideoNode": "Moonvalley Marey Text to Video", - "MoonvalleyVideo2VideoNode": "Moonvalley Marey Video to Video", -} +async def comfy_entrypoint() -> MoonvalleyExtension: + return MoonvalleyExtension() From b149e2e1e302e75ce5b47e9b823b42b304d70b4b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 12 Sep 2025 14:53:15 -0700 Subject: [PATCH 0584/1073] Better way of doing the generator for the hunyuan image noise aug. (#9834) --- comfy/model_base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 4176bca25..324d89cff 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1448,7 +1448,9 @@ class HunyuanImage21Refiner(HunyuanImage21): image = self.process_latent_in(image) image = utils.resize_to_batch_size(image, noise.shape[0]) if noise_augmentation > 0: - noise = torch.randn(image.shape, generator=torch.manual_seed(kwargs.get("seed", 0) - 10), dtype=image.dtype, device="cpu").to(image.device) + generator = torch.Generator(device="cpu") + generator.manual_seed(kwargs.get("seed", 0) - 10) + noise = torch.randn(image.shape, generator=generator, dtype=image.dtype, device="cpu").to(image.device) image = noise_augmentation * noise + min(1.0 - noise_augmentation, 0.75) * image else: image = 0.75 * image From d7f40442f91a02946cab7445c6204bf154b1e86f Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Fri, 12 Sep 2025 15:07:38 -0700 Subject: [PATCH 0585/1073] Enable Runtime Selection of Attention Functions (#9639) * Looking into a @wrap_attn decorator to look for 'optimized_attention_override' entry in transformer_options * Created logging code for this branch so that it can be used to track down all the code paths where transformer_options would need to be added * Fix memory usage issue with inspect * Made WAN attention receive transformer_options, test node added to wan to test out attention override later * Added **kwargs to all attention functions so transformer_options could potentially be passed through * Make sure wrap_attn doesn't make itself recurse infinitely, attempt to load SageAttention and FlashAttention if not enabled so that they can be marked as available or not, create registry for available attention * Turn off attention logging for now, make AttentionOverrideTestNode have a dropdown with available attention (this is a test node only) * Make flux work with optimized_attention_override * Add logs to verify optimized_attention_override is passed all the way into attention function * Make Qwen work with optimized_attention_override * Made hidream work with optimized_attention_override * Made wan patches_replace work with optimized_attention_override * Made SD3 work with optimized_attention_override * Made HunyuanVideo work with optimized_attention_override * Made Mochi work with optimized_attention_override * Made LTX work with optimized_attention_override * Made StableAudio work with optimized_attention_override * Made optimized_attention_override work with ACE Step * Made Hunyuan3D work with optimized_attention_override * Make CosmosPredict2 work with optimized_attention_override * Made CosmosVideo work with optimized_attention_override * Made Omnigen 2 work with optimized_attention_override * Made StableCascade work with optimized_attention_override * Made AuraFlow work with optimized_attention_override * Made Lumina work with optimized_attention_override * Made Chroma work with optimized_attention_override * Made SVD work with optimized_attention_override * Fix WanI2VCrossAttention so that it expects to receive transformer_options * Fixed Wan2.1 Fun Camera transformer_options passthrough * Fixed WAN 2.1 VACE transformer_options passthrough * Add optimized to get_attention_function * Disable attention logs for now * Remove attention logging code * Remove _register_core_attention_functions, as we wouldn't want someone to call that, just in case * Satisfy ruff * Remove AttentionOverrideTest node, that's something to cook up for later --- comfy/ldm/ace/attention.py | 9 +- comfy/ldm/ace/model.py | 4 + comfy/ldm/audio/dit.py | 25 ++-- comfy/ldm/aura/mmdit.py | 29 ++--- comfy/ldm/cascade/common.py | 12 +- comfy/ldm/cascade/stage_b.py | 14 +-- comfy/ldm/cascade/stage_c.py | 14 +-- comfy/ldm/chroma/layers.py | 8 +- comfy/ldm/chroma/model.py | 17 ++- comfy/ldm/cosmos/blocks.py | 10 +- comfy/ldm/cosmos/model.py | 2 + comfy/ldm/cosmos/predict2.py | 17 ++- comfy/ldm/flux/layers.py | 10 +- comfy/ldm/flux/math.py | 4 +- comfy/ldm/flux/model.py | 17 ++- .../genmo/joint_model/asymm_models_joint.py | 11 +- comfy/ldm/hidream/model.py | 18 ++- comfy/ldm/hunyuan3d/model.py | 17 ++- comfy/ldm/hunyuan_video/model.py | 25 ++-- comfy/ldm/lightricks/model.py | 19 +-- comfy/ldm/lumina/model.py | 17 ++- comfy/ldm/modules/attention.py | 114 +++++++++++++----- comfy/ldm/modules/diffusionmodules/mmdit.py | 9 +- comfy/ldm/omnigen/omnigen2.py | 23 ++-- comfy/ldm/qwen_image/model.py | 12 +- comfy/ldm/wan/model.py | 38 +++--- 26 files changed, 316 insertions(+), 179 deletions(-) diff --git a/comfy/ldm/ace/attention.py b/comfy/ldm/ace/attention.py index f20a01669..670eb9783 100644 --- a/comfy/ldm/ace/attention.py +++ b/comfy/ldm/ace/attention.py @@ -133,6 +133,7 @@ class Attention(nn.Module): hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, + transformer_options={}, **cross_attention_kwargs, ) -> torch.Tensor: return self.processor( @@ -140,6 +141,7 @@ class Attention(nn.Module): hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, + transformer_options=transformer_options, **cross_attention_kwargs, ) @@ -366,6 +368,7 @@ class CustomerAttnProcessor2_0: encoder_attention_mask: Optional[torch.FloatTensor] = None, rotary_freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]] = None, rotary_freqs_cis_cross: Union[torch.Tensor, Tuple[torch.Tensor]] = None, + transformer_options={}, *args, **kwargs, ) -> torch.Tensor: @@ -433,7 +436,7 @@ class CustomerAttnProcessor2_0: # the output of sdp = (batch, num_heads, seq_len, head_dim) hidden_states = optimized_attention( - query, key, value, heads=query.shape[1], mask=attention_mask, skip_reshape=True, + query, key, value, heads=query.shape[1], mask=attention_mask, skip_reshape=True, transformer_options=transformer_options, ).to(query.dtype) # linear proj @@ -697,6 +700,7 @@ class LinearTransformerBlock(nn.Module): rotary_freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]] = None, rotary_freqs_cis_cross: Union[torch.Tensor, Tuple[torch.Tensor]] = None, temb: torch.FloatTensor = None, + transformer_options={}, ): N = hidden_states.shape[0] @@ -720,6 +724,7 @@ class LinearTransformerBlock(nn.Module): encoder_attention_mask=encoder_attention_mask, rotary_freqs_cis=rotary_freqs_cis, rotary_freqs_cis_cross=rotary_freqs_cis_cross, + transformer_options=transformer_options, ) else: attn_output, _ = self.attn( @@ -729,6 +734,7 @@ class LinearTransformerBlock(nn.Module): encoder_attention_mask=None, rotary_freqs_cis=rotary_freqs_cis, rotary_freqs_cis_cross=None, + transformer_options=transformer_options, ) if self.use_adaln_single: @@ -743,6 +749,7 @@ class LinearTransformerBlock(nn.Module): encoder_attention_mask=encoder_attention_mask, rotary_freqs_cis=rotary_freqs_cis, rotary_freqs_cis_cross=rotary_freqs_cis_cross, + transformer_options=transformer_options, ) hidden_states = attn_output + hidden_states diff --git a/comfy/ldm/ace/model.py b/comfy/ldm/ace/model.py index 41d85eeb5..399329853 100644 --- a/comfy/ldm/ace/model.py +++ b/comfy/ldm/ace/model.py @@ -314,6 +314,7 @@ class ACEStepTransformer2DModel(nn.Module): output_length: int = 0, block_controlnet_hidden_states: Optional[Union[List[torch.Tensor], torch.Tensor]] = None, controlnet_scale: Union[float, torch.Tensor] = 1.0, + transformer_options={}, ): embedded_timestep = self.timestep_embedder(self.time_proj(timestep).to(dtype=hidden_states.dtype)) temb = self.t_block(embedded_timestep) @@ -339,6 +340,7 @@ class ACEStepTransformer2DModel(nn.Module): rotary_freqs_cis=rotary_freqs_cis, rotary_freqs_cis_cross=encoder_rotary_freqs_cis, temb=temb, + transformer_options=transformer_options, ) output = self.final_layer(hidden_states, embedded_timestep, output_length) @@ -393,6 +395,7 @@ class ACEStepTransformer2DModel(nn.Module): output_length = hidden_states.shape[-1] + transformer_options = kwargs.get("transformer_options", {}) output = self.decode( hidden_states=hidden_states, attention_mask=attention_mask, @@ -402,6 +405,7 @@ class ACEStepTransformer2DModel(nn.Module): output_length=output_length, block_controlnet_hidden_states=block_controlnet_hidden_states, controlnet_scale=controlnet_scale, + transformer_options=transformer_options, ) return output diff --git a/comfy/ldm/audio/dit.py b/comfy/ldm/audio/dit.py index d0d69bbdc..ca865189e 100644 --- a/comfy/ldm/audio/dit.py +++ b/comfy/ldm/audio/dit.py @@ -298,7 +298,8 @@ class Attention(nn.Module): mask = None, context_mask = None, rotary_pos_emb = None, - causal = None + causal = None, + transformer_options={}, ): h, kv_h, has_context = self.num_heads, self.kv_heads, context is not None @@ -363,7 +364,7 @@ class Attention(nn.Module): heads_per_kv_head = h // kv_h k, v = map(lambda t: t.repeat_interleave(heads_per_kv_head, dim = 1), (k, v)) - out = optimized_attention(q, k, v, h, skip_reshape=True) + out = optimized_attention(q, k, v, h, skip_reshape=True, transformer_options=transformer_options) out = self.to_out(out) if mask is not None: @@ -488,7 +489,8 @@ class TransformerBlock(nn.Module): global_cond=None, mask = None, context_mask = None, - rotary_pos_emb = None + rotary_pos_emb = None, + transformer_options={} ): if self.global_cond_dim is not None and self.global_cond_dim > 0 and global_cond is not None: @@ -498,12 +500,12 @@ class TransformerBlock(nn.Module): residual = x x = self.pre_norm(x) x = x * (1 + scale_self) + shift_self - x = self.self_attn(x, mask = mask, rotary_pos_emb = rotary_pos_emb) + x = self.self_attn(x, mask = mask, rotary_pos_emb = rotary_pos_emb, transformer_options=transformer_options) x = x * torch.sigmoid(1 - gate_self) x = x + residual if context is not None: - x = x + self.cross_attn(self.cross_attend_norm(x), context = context, context_mask = context_mask) + x = x + self.cross_attn(self.cross_attend_norm(x), context = context, context_mask = context_mask, transformer_options=transformer_options) if self.conformer is not None: x = x + self.conformer(x) @@ -517,10 +519,10 @@ class TransformerBlock(nn.Module): x = x + residual else: - x = x + self.self_attn(self.pre_norm(x), mask = mask, rotary_pos_emb = rotary_pos_emb) + x = x + self.self_attn(self.pre_norm(x), mask = mask, rotary_pos_emb = rotary_pos_emb, transformer_options=transformer_options) if context is not None: - x = x + self.cross_attn(self.cross_attend_norm(x), context = context, context_mask = context_mask) + x = x + self.cross_attn(self.cross_attend_norm(x), context = context, context_mask = context_mask, transformer_options=transformer_options) if self.conformer is not None: x = x + self.conformer(x) @@ -606,7 +608,8 @@ class ContinuousTransformer(nn.Module): return_info = False, **kwargs ): - patches_replace = kwargs.get("transformer_options", {}).get("patches_replace", {}) + transformer_options = kwargs.get("transformer_options", {}) + patches_replace = transformer_options.get("patches_replace", {}) batch, seq, device = *x.shape[:2], x.device context = kwargs["context"] @@ -645,13 +648,13 @@ class ContinuousTransformer(nn.Module): if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} - out["img"] = layer(args["img"], rotary_pos_emb=args["pe"], global_cond=args["vec"], context=args["txt"]) + out["img"] = layer(args["img"], rotary_pos_emb=args["pe"], global_cond=args["vec"], context=args["txt"], transformer_options=args["transformer_options"]) return out - out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": global_cond, "pe": rotary_pos_emb}, {"original_block": block_wrap}) + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": global_cond, "pe": rotary_pos_emb, "transformer_options": transformer_options}, {"original_block": block_wrap}) x = out["img"] else: - x = layer(x, rotary_pos_emb = rotary_pos_emb, global_cond=global_cond, context=context) + x = layer(x, rotary_pos_emb = rotary_pos_emb, global_cond=global_cond, context=context, transformer_options=transformer_options) # x = checkpoint(layer, x, rotary_pos_emb = rotary_pos_emb, global_cond=global_cond, **kwargs) if return_info: diff --git a/comfy/ldm/aura/mmdit.py b/comfy/ldm/aura/mmdit.py index d7f32b5e8..66d9613b6 100644 --- a/comfy/ldm/aura/mmdit.py +++ b/comfy/ldm/aura/mmdit.py @@ -85,7 +85,7 @@ class SingleAttention(nn.Module): ) #@torch.compile() - def forward(self, c): + def forward(self, c, transformer_options={}): bsz, seqlen1, _ = c.shape @@ -95,7 +95,7 @@ class SingleAttention(nn.Module): v = v.view(bsz, seqlen1, self.n_heads, self.head_dim) q, k = self.q_norm1(q), self.k_norm1(k) - output = optimized_attention(q.permute(0, 2, 1, 3), k.permute(0, 2, 1, 3), v.permute(0, 2, 1, 3), self.n_heads, skip_reshape=True) + output = optimized_attention(q.permute(0, 2, 1, 3), k.permute(0, 2, 1, 3), v.permute(0, 2, 1, 3), self.n_heads, skip_reshape=True, transformer_options=transformer_options) c = self.w1o(output) return c @@ -144,7 +144,7 @@ class DoubleAttention(nn.Module): #@torch.compile() - def forward(self, c, x): + def forward(self, c, x, transformer_options={}): bsz, seqlen1, _ = c.shape bsz, seqlen2, _ = x.shape @@ -168,7 +168,7 @@ class DoubleAttention(nn.Module): torch.cat([cv, xv], dim=1), ) - output = optimized_attention(q.permute(0, 2, 1, 3), k.permute(0, 2, 1, 3), v.permute(0, 2, 1, 3), self.n_heads, skip_reshape=True) + output = optimized_attention(q.permute(0, 2, 1, 3), k.permute(0, 2, 1, 3), v.permute(0, 2, 1, 3), self.n_heads, skip_reshape=True, transformer_options=transformer_options) c, x = output.split([seqlen1, seqlen2], dim=1) c = self.w1o(c) @@ -207,7 +207,7 @@ class MMDiTBlock(nn.Module): self.is_last = is_last #@torch.compile() - def forward(self, c, x, global_cond, **kwargs): + def forward(self, c, x, global_cond, transformer_options={}, **kwargs): cres, xres = c, x @@ -225,7 +225,7 @@ class MMDiTBlock(nn.Module): x = modulate(self.normX1(x), xshift_msa, xscale_msa) # attention - c, x = self.attn(c, x) + c, x = self.attn(c, x, transformer_options=transformer_options) c = self.normC2(cres + cgate_msa.unsqueeze(1) * c) @@ -255,13 +255,13 @@ class DiTBlock(nn.Module): self.mlp = MLP(dim, hidden_dim=dim * 4, dtype=dtype, device=device, operations=operations) #@torch.compile() - def forward(self, cx, global_cond, **kwargs): + def forward(self, cx, global_cond, transformer_options={}, **kwargs): cxres = cx shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.modCX( global_cond ).chunk(6, dim=1) cx = modulate(self.norm1(cx), shift_msa, scale_msa) - cx = self.attn(cx) + cx = self.attn(cx, transformer_options=transformer_options) cx = self.norm2(cxres + gate_msa.unsqueeze(1) * cx) mlpout = self.mlp(modulate(cx, shift_mlp, scale_mlp)) cx = gate_mlp.unsqueeze(1) * mlpout @@ -473,13 +473,14 @@ class MMDiT(nn.Module): out = {} out["txt"], out["img"] = layer(args["txt"], args["img"], - args["vec"]) + args["vec"], + transformer_options=args["transformer_options"]) return out - out = blocks_replace[("double_block", i)]({"img": x, "txt": c, "vec": global_cond}, {"original_block": block_wrap}) + out = blocks_replace[("double_block", i)]({"img": x, "txt": c, "vec": global_cond, "transformer_options": transformer_options}, {"original_block": block_wrap}) c = out["txt"] x = out["img"] else: - c, x = layer(c, x, global_cond, **kwargs) + c, x = layer(c, x, global_cond, transformer_options=transformer_options, **kwargs) if len(self.single_layers) > 0: c_len = c.size(1) @@ -488,13 +489,13 @@ class MMDiT(nn.Module): if ("single_block", i) in blocks_replace: def block_wrap(args): out = {} - out["img"] = layer(args["img"], args["vec"]) + out["img"] = layer(args["img"], args["vec"], transformer_options=args["transformer_options"]) return out - out = blocks_replace[("single_block", i)]({"img": cx, "vec": global_cond}, {"original_block": block_wrap}) + out = blocks_replace[("single_block", i)]({"img": cx, "vec": global_cond, "transformer_options": transformer_options}, {"original_block": block_wrap}) cx = out["img"] else: - cx = layer(cx, global_cond, **kwargs) + cx = layer(cx, global_cond, transformer_options=transformer_options, **kwargs) x = cx[:, c_len:] diff --git a/comfy/ldm/cascade/common.py b/comfy/ldm/cascade/common.py index 3eaa0c821..42ef98c7a 100644 --- a/comfy/ldm/cascade/common.py +++ b/comfy/ldm/cascade/common.py @@ -32,12 +32,12 @@ class OptimizedAttention(nn.Module): self.out_proj = operations.Linear(c, c, bias=True, dtype=dtype, device=device) - def forward(self, q, k, v): + def forward(self, q, k, v, transformer_options={}): q = self.to_q(q) k = self.to_k(k) v = self.to_v(v) - out = optimized_attention(q, k, v, self.heads) + out = optimized_attention(q, k, v, self.heads, transformer_options=transformer_options) return self.out_proj(out) @@ -47,13 +47,13 @@ class Attention2D(nn.Module): self.attn = OptimizedAttention(c, nhead, dtype=dtype, device=device, operations=operations) # self.attn = nn.MultiheadAttention(c, nhead, dropout=dropout, bias=True, batch_first=True, dtype=dtype, device=device) - def forward(self, x, kv, self_attn=False): + def forward(self, x, kv, self_attn=False, transformer_options={}): orig_shape = x.shape x = x.view(x.size(0), x.size(1), -1).permute(0, 2, 1) # Bx4xHxW -> Bx(HxW)x4 if self_attn: kv = torch.cat([x, kv], dim=1) # x = self.attn(x, kv, kv, need_weights=False)[0] - x = self.attn(x, kv, kv) + x = self.attn(x, kv, kv, transformer_options=transformer_options) x = x.permute(0, 2, 1).view(*orig_shape) return x @@ -114,9 +114,9 @@ class AttnBlock(nn.Module): operations.Linear(c_cond, c, dtype=dtype, device=device) ) - def forward(self, x, kv): + def forward(self, x, kv, transformer_options={}): kv = self.kv_mapper(kv) - x = x + self.attention(self.norm(x), kv, self_attn=self.self_attn) + x = x + self.attention(self.norm(x), kv, self_attn=self.self_attn, transformer_options=transformer_options) return x diff --git a/comfy/ldm/cascade/stage_b.py b/comfy/ldm/cascade/stage_b.py index 773830956..428c67fdf 100644 --- a/comfy/ldm/cascade/stage_b.py +++ b/comfy/ldm/cascade/stage_b.py @@ -173,7 +173,7 @@ class StageB(nn.Module): clip = self.clip_norm(clip) return clip - def _down_encode(self, x, r_embed, clip): + def _down_encode(self, x, r_embed, clip, transformer_options={}): level_outputs = [] block_group = zip(self.down_blocks, self.down_downscalers, self.down_repeat_mappers) for down_block, downscaler, repmap in block_group: @@ -187,7 +187,7 @@ class StageB(nn.Module): elif isinstance(block, AttnBlock) or ( hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, AttnBlock)): - x = block(x, clip) + x = block(x, clip, transformer_options=transformer_options) elif isinstance(block, TimestepBlock) or ( hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, TimestepBlock)): @@ -199,7 +199,7 @@ class StageB(nn.Module): level_outputs.insert(0, x) return level_outputs - def _up_decode(self, level_outputs, r_embed, clip): + def _up_decode(self, level_outputs, r_embed, clip, transformer_options={}): x = level_outputs[0] block_group = zip(self.up_blocks, self.up_upscalers, self.up_repeat_mappers) for i, (up_block, upscaler, repmap) in enumerate(block_group): @@ -216,7 +216,7 @@ class StageB(nn.Module): elif isinstance(block, AttnBlock) or ( hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, AttnBlock)): - x = block(x, clip) + x = block(x, clip, transformer_options=transformer_options) elif isinstance(block, TimestepBlock) or ( hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, TimestepBlock)): @@ -228,7 +228,7 @@ class StageB(nn.Module): x = upscaler(x) return x - def forward(self, x, r, effnet, clip, pixels=None, **kwargs): + def forward(self, x, r, effnet, clip, pixels=None, transformer_options={}, **kwargs): if pixels is None: pixels = x.new_zeros(x.size(0), 3, 8, 8) @@ -245,8 +245,8 @@ class StageB(nn.Module): nn.functional.interpolate(effnet, size=x.shape[-2:], mode='bilinear', align_corners=True)) x = x + nn.functional.interpolate(self.pixels_mapper(pixels), size=x.shape[-2:], mode='bilinear', align_corners=True) - level_outputs = self._down_encode(x, r_embed, clip) - x = self._up_decode(level_outputs, r_embed, clip) + level_outputs = self._down_encode(x, r_embed, clip, transformer_options=transformer_options) + x = self._up_decode(level_outputs, r_embed, clip, transformer_options=transformer_options) return self.clf(x) def update_weights_ema(self, src_model, beta=0.999): diff --git a/comfy/ldm/cascade/stage_c.py b/comfy/ldm/cascade/stage_c.py index b952d0349..ebc4434e2 100644 --- a/comfy/ldm/cascade/stage_c.py +++ b/comfy/ldm/cascade/stage_c.py @@ -182,7 +182,7 @@ class StageC(nn.Module): clip = self.clip_norm(clip) return clip - def _down_encode(self, x, r_embed, clip, cnet=None): + def _down_encode(self, x, r_embed, clip, cnet=None, transformer_options={}): level_outputs = [] block_group = zip(self.down_blocks, self.down_downscalers, self.down_repeat_mappers) for down_block, downscaler, repmap in block_group: @@ -201,7 +201,7 @@ class StageC(nn.Module): elif isinstance(block, AttnBlock) or ( hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, AttnBlock)): - x = block(x, clip) + x = block(x, clip, transformer_options=transformer_options) elif isinstance(block, TimestepBlock) or ( hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, TimestepBlock)): @@ -213,7 +213,7 @@ class StageC(nn.Module): level_outputs.insert(0, x) return level_outputs - def _up_decode(self, level_outputs, r_embed, clip, cnet=None): + def _up_decode(self, level_outputs, r_embed, clip, cnet=None, transformer_options={}): x = level_outputs[0] block_group = zip(self.up_blocks, self.up_upscalers, self.up_repeat_mappers) for i, (up_block, upscaler, repmap) in enumerate(block_group): @@ -235,7 +235,7 @@ class StageC(nn.Module): elif isinstance(block, AttnBlock) or ( hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, AttnBlock)): - x = block(x, clip) + x = block(x, clip, transformer_options=transformer_options) elif isinstance(block, TimestepBlock) or ( hasattr(block, '_fsdp_wrapped_module') and isinstance(block._fsdp_wrapped_module, TimestepBlock)): @@ -247,7 +247,7 @@ class StageC(nn.Module): x = upscaler(x) return x - def forward(self, x, r, clip_text, clip_text_pooled, clip_img, control=None, **kwargs): + def forward(self, x, r, clip_text, clip_text_pooled, clip_img, control=None, transformer_options={}, **kwargs): # Process the conditioning embeddings r_embed = self.gen_r_embedding(r).to(dtype=x.dtype) for c in self.t_conds: @@ -262,8 +262,8 @@ class StageC(nn.Module): # Model Blocks x = self.embedding(x) - level_outputs = self._down_encode(x, r_embed, clip, cnet) - x = self._up_decode(level_outputs, r_embed, clip, cnet) + level_outputs = self._down_encode(x, r_embed, clip, cnet, transformer_options=transformer_options) + x = self._up_decode(level_outputs, r_embed, clip, cnet, transformer_options=transformer_options) return self.clf(x) def update_weights_ema(self, src_model, beta=0.999): diff --git a/comfy/ldm/chroma/layers.py b/comfy/ldm/chroma/layers.py index 2a0dec606..fc7110cce 100644 --- a/comfy/ldm/chroma/layers.py +++ b/comfy/ldm/chroma/layers.py @@ -76,7 +76,7 @@ class DoubleStreamBlock(nn.Module): ) self.flipped_img_txt = flipped_img_txt - def forward(self, img: Tensor, txt: Tensor, pe: Tensor, vec: Tensor, attn_mask=None): + def forward(self, img: Tensor, txt: Tensor, pe: Tensor, vec: Tensor, attn_mask=None, transformer_options={}): (img_mod1, img_mod2), (txt_mod1, txt_mod2) = vec # prepare image for attention @@ -95,7 +95,7 @@ class DoubleStreamBlock(nn.Module): attn = attention(torch.cat((txt_q, img_q), dim=2), torch.cat((txt_k, img_k), dim=2), torch.cat((txt_v, img_v), dim=2), - pe=pe, mask=attn_mask) + pe=pe, mask=attn_mask, transformer_options=transformer_options) txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :] @@ -148,7 +148,7 @@ class SingleStreamBlock(nn.Module): self.mlp_act = nn.GELU(approximate="tanh") - def forward(self, x: Tensor, pe: Tensor, vec: Tensor, attn_mask=None) -> Tensor: + def forward(self, x: Tensor, pe: Tensor, vec: Tensor, attn_mask=None, transformer_options={}) -> Tensor: mod = vec x_mod = torch.addcmul(mod.shift, 1 + mod.scale, self.pre_norm(x)) qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1) @@ -157,7 +157,7 @@ class SingleStreamBlock(nn.Module): q, k = self.norm(q, k, v) # compute attention - attn = attention(q, k, v, pe=pe, mask=attn_mask) + attn = attention(q, k, v, pe=pe, mask=attn_mask, transformer_options=transformer_options) # compute activation in mlp stream, cat again and run second linear layer output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2)) x.addcmul_(mod.gate, output) diff --git a/comfy/ldm/chroma/model.py b/comfy/ldm/chroma/model.py index 5cff44dc8..4f709f87d 100644 --- a/comfy/ldm/chroma/model.py +++ b/comfy/ldm/chroma/model.py @@ -193,14 +193,16 @@ class Chroma(nn.Module): txt=args["txt"], vec=args["vec"], pe=args["pe"], - attn_mask=args.get("attn_mask")) + attn_mask=args.get("attn_mask"), + transformer_options=args.get("transformer_options")) return out out = blocks_replace[("double_block", i)]({"img": img, "txt": txt, "vec": double_mod, "pe": pe, - "attn_mask": attn_mask}, + "attn_mask": attn_mask, + "transformer_options": transformer_options}, {"original_block": block_wrap}) txt = out["txt"] img = out["img"] @@ -209,7 +211,8 @@ class Chroma(nn.Module): txt=txt, vec=double_mod, pe=pe, - attn_mask=attn_mask) + attn_mask=attn_mask, + transformer_options=transformer_options) if control is not None: # Controlnet control_i = control.get("input") @@ -229,17 +232,19 @@ class Chroma(nn.Module): out["img"] = block(args["img"], vec=args["vec"], pe=args["pe"], - attn_mask=args.get("attn_mask")) + attn_mask=args.get("attn_mask"), + transformer_options=args.get("transformer_options")) return out out = blocks_replace[("single_block", i)]({"img": img, "vec": single_mod, "pe": pe, - "attn_mask": attn_mask}, + "attn_mask": attn_mask, + "transformer_options": transformer_options}, {"original_block": block_wrap}) img = out["img"] else: - img = block(img, vec=single_mod, pe=pe, attn_mask=attn_mask) + img = block(img, vec=single_mod, pe=pe, attn_mask=attn_mask, transformer_options=transformer_options) if control is not None: # Controlnet control_o = control.get("output") diff --git a/comfy/ldm/cosmos/blocks.py b/comfy/ldm/cosmos/blocks.py index 5c4356a3f..afb43d469 100644 --- a/comfy/ldm/cosmos/blocks.py +++ b/comfy/ldm/cosmos/blocks.py @@ -176,6 +176,7 @@ class Attention(nn.Module): context=None, mask=None, rope_emb=None, + transformer_options={}, **kwargs, ): """ @@ -184,7 +185,7 @@ class Attention(nn.Module): context (Optional[Tensor]): The key tensor of shape [B, Mk, K] or use x as context [self attention] if None """ q, k, v = self.cal_qkv(x, context, mask, rope_emb=rope_emb, **kwargs) - out = optimized_attention(q, k, v, self.heads, skip_reshape=True, mask=mask, skip_output_reshape=True) + out = optimized_attention(q, k, v, self.heads, skip_reshape=True, mask=mask, skip_output_reshape=True, transformer_options=transformer_options) del q, k, v out = rearrange(out, " b n s c -> s b (n c)") return self.to_out(out) @@ -546,6 +547,7 @@ class VideoAttn(nn.Module): context: Optional[torch.Tensor] = None, crossattn_mask: Optional[torch.Tensor] = None, rope_emb_L_1_1_D: Optional[torch.Tensor] = None, + transformer_options: Optional[dict] = {}, ) -> torch.Tensor: """ Forward pass for video attention. @@ -571,6 +573,7 @@ class VideoAttn(nn.Module): context_M_B_D, crossattn_mask, rope_emb=rope_emb_L_1_1_D, + transformer_options=transformer_options, ) x_T_H_W_B_D = rearrange(x_THW_B_D, "(t h w) b d -> t h w b d", h=H, w=W) return x_T_H_W_B_D @@ -665,6 +668,7 @@ class DITBuildingBlock(nn.Module): crossattn_mask: Optional[torch.Tensor] = None, rope_emb_L_1_1_D: Optional[torch.Tensor] = None, adaln_lora_B_3D: Optional[torch.Tensor] = None, + transformer_options: Optional[dict] = {}, ) -> torch.Tensor: """ Forward pass for dynamically configured blocks with adaptive normalization. @@ -702,6 +706,7 @@ class DITBuildingBlock(nn.Module): adaln_norm_state(self.norm_state, x, scale_1_1_1_B_D, shift_1_1_1_B_D), context=None, rope_emb_L_1_1_D=rope_emb_L_1_1_D, + transformer_options=transformer_options, ) elif self.block_type in ["cross_attn", "ca"]: x = x + gate_1_1_1_B_D * self.block( @@ -709,6 +714,7 @@ class DITBuildingBlock(nn.Module): context=crossattn_emb, crossattn_mask=crossattn_mask, rope_emb_L_1_1_D=rope_emb_L_1_1_D, + transformer_options=transformer_options, ) else: raise ValueError(f"Unknown block type: {self.block_type}") @@ -784,6 +790,7 @@ class GeneralDITTransformerBlock(nn.Module): crossattn_mask: Optional[torch.Tensor] = None, rope_emb_L_1_1_D: Optional[torch.Tensor] = None, adaln_lora_B_3D: Optional[torch.Tensor] = None, + transformer_options: Optional[dict] = {}, ) -> torch.Tensor: for block in self.blocks: x = block( @@ -793,5 +800,6 @@ class GeneralDITTransformerBlock(nn.Module): crossattn_mask, rope_emb_L_1_1_D=rope_emb_L_1_1_D, adaln_lora_B_3D=adaln_lora_B_3D, + transformer_options=transformer_options, ) return x diff --git a/comfy/ldm/cosmos/model.py b/comfy/ldm/cosmos/model.py index 53698b758..52ef7ef43 100644 --- a/comfy/ldm/cosmos/model.py +++ b/comfy/ldm/cosmos/model.py @@ -520,6 +520,7 @@ class GeneralDIT(nn.Module): x.shape == extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D.shape ), f"{x.shape} != {extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D.shape} {original_shape}" + transformer_options = kwargs.get("transformer_options", {}) for _, block in self.blocks.items(): assert ( self.blocks["block0"].x_format == block.x_format @@ -534,6 +535,7 @@ class GeneralDIT(nn.Module): crossattn_mask, rope_emb_L_1_1_D=rope_emb_L_1_1_D, adaln_lora_B_3D=adaln_lora_B_3D, + transformer_options=transformer_options, ) x_B_T_H_W_D = rearrange(x, "T H W B D -> B T H W D") diff --git a/comfy/ldm/cosmos/predict2.py b/comfy/ldm/cosmos/predict2.py index fcc83ba76..07a4fc79f 100644 --- a/comfy/ldm/cosmos/predict2.py +++ b/comfy/ldm/cosmos/predict2.py @@ -44,7 +44,7 @@ class GPT2FeedForward(nn.Module): return x -def torch_attention_op(q_B_S_H_D: torch.Tensor, k_B_S_H_D: torch.Tensor, v_B_S_H_D: torch.Tensor) -> torch.Tensor: +def torch_attention_op(q_B_S_H_D: torch.Tensor, k_B_S_H_D: torch.Tensor, v_B_S_H_D: torch.Tensor, transformer_options: Optional[dict] = {}) -> torch.Tensor: """Computes multi-head attention using PyTorch's native implementation. This function provides a PyTorch backend alternative to Transformer Engine's attention operation. @@ -71,7 +71,7 @@ def torch_attention_op(q_B_S_H_D: torch.Tensor, k_B_S_H_D: torch.Tensor, v_B_S_H q_B_H_S_D = rearrange(q_B_S_H_D, "b ... h k -> b h ... k").view(in_q_shape[0], in_q_shape[-2], -1, in_q_shape[-1]) k_B_H_S_D = rearrange(k_B_S_H_D, "b ... h v -> b h ... v").view(in_k_shape[0], in_k_shape[-2], -1, in_k_shape[-1]) v_B_H_S_D = rearrange(v_B_S_H_D, "b ... h v -> b h ... v").view(in_k_shape[0], in_k_shape[-2], -1, in_k_shape[-1]) - return optimized_attention(q_B_H_S_D, k_B_H_S_D, v_B_H_S_D, in_q_shape[-2], skip_reshape=True) + return optimized_attention(q_B_H_S_D, k_B_H_S_D, v_B_H_S_D, in_q_shape[-2], skip_reshape=True, transformer_options=transformer_options) class Attention(nn.Module): @@ -180,8 +180,8 @@ class Attention(nn.Module): return q, k, v - def compute_attention(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor: - result = self.attn_op(q, k, v) # [B, S, H, D] + def compute_attention(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, transformer_options: Optional[dict] = {}) -> torch.Tensor: + result = self.attn_op(q, k, v, transformer_options=transformer_options) # [B, S, H, D] return self.output_dropout(self.output_proj(result)) def forward( @@ -189,6 +189,7 @@ class Attention(nn.Module): x: torch.Tensor, context: Optional[torch.Tensor] = None, rope_emb: Optional[torch.Tensor] = None, + transformer_options: Optional[dict] = {}, ) -> torch.Tensor: """ Args: @@ -196,7 +197,7 @@ class Attention(nn.Module): context (Optional[Tensor]): The key tensor of shape [B, Mk, K] or use x as context [self attention] if None """ q, k, v = self.compute_qkv(x, context, rope_emb=rope_emb) - return self.compute_attention(q, k, v) + return self.compute_attention(q, k, v, transformer_options=transformer_options) class Timesteps(nn.Module): @@ -459,6 +460,7 @@ class Block(nn.Module): rope_emb_L_1_1_D: Optional[torch.Tensor] = None, adaln_lora_B_T_3D: Optional[torch.Tensor] = None, extra_per_block_pos_emb: Optional[torch.Tensor] = None, + transformer_options: Optional[dict] = {}, ) -> torch.Tensor: if extra_per_block_pos_emb is not None: x_B_T_H_W_D = x_B_T_H_W_D + extra_per_block_pos_emb @@ -512,6 +514,7 @@ class Block(nn.Module): rearrange(normalized_x_B_T_H_W_D, "b t h w d -> b (t h w) d"), None, rope_emb=rope_emb_L_1_1_D, + transformer_options=transformer_options, ), "b (t h w) d -> b t h w d", t=T, @@ -525,6 +528,7 @@ class Block(nn.Module): layer_norm_cross_attn: Callable, _scale_cross_attn_B_T_1_1_D: torch.Tensor, _shift_cross_attn_B_T_1_1_D: torch.Tensor, + transformer_options: Optional[dict] = {}, ) -> torch.Tensor: _normalized_x_B_T_H_W_D = _fn( _x_B_T_H_W_D, layer_norm_cross_attn, _scale_cross_attn_B_T_1_1_D, _shift_cross_attn_B_T_1_1_D @@ -534,6 +538,7 @@ class Block(nn.Module): rearrange(_normalized_x_B_T_H_W_D, "b t h w d -> b (t h w) d"), crossattn_emb, rope_emb=rope_emb_L_1_1_D, + transformer_options=transformer_options, ), "b (t h w) d -> b t h w d", t=T, @@ -547,6 +552,7 @@ class Block(nn.Module): self.layer_norm_cross_attn, scale_cross_attn_B_T_1_1_D, shift_cross_attn_B_T_1_1_D, + transformer_options=transformer_options, ) x_B_T_H_W_D = result_B_T_H_W_D * gate_cross_attn_B_T_1_1_D + x_B_T_H_W_D @@ -865,6 +871,7 @@ class MiniTrainDIT(nn.Module): "rope_emb_L_1_1_D": rope_emb_L_1_1_D.unsqueeze(1).unsqueeze(0), "adaln_lora_B_T_3D": adaln_lora_B_T_3D, "extra_per_block_pos_emb": extra_pos_emb_B_T_H_W_D_or_T_H_W_B_D, + "transformer_options": kwargs.get("transformer_options", {}), } for block in self.blocks: x_B_T_H_W_D = block( diff --git a/comfy/ldm/flux/layers.py b/comfy/ldm/flux/layers.py index 113eb2096..ef21b416b 100644 --- a/comfy/ldm/flux/layers.py +++ b/comfy/ldm/flux/layers.py @@ -159,7 +159,7 @@ class DoubleStreamBlock(nn.Module): ) self.flipped_img_txt = flipped_img_txt - def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor, attn_mask=None, modulation_dims_img=None, modulation_dims_txt=None): + def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor, attn_mask=None, modulation_dims_img=None, modulation_dims_txt=None, transformer_options={}): img_mod1, img_mod2 = self.img_mod(vec) txt_mod1, txt_mod2 = self.txt_mod(vec) @@ -182,7 +182,7 @@ class DoubleStreamBlock(nn.Module): attn = attention(torch.cat((img_q, txt_q), dim=2), torch.cat((img_k, txt_k), dim=2), torch.cat((img_v, txt_v), dim=2), - pe=pe, mask=attn_mask) + pe=pe, mask=attn_mask, transformer_options=transformer_options) img_attn, txt_attn = attn[:, : img.shape[1]], attn[:, img.shape[1]:] else: @@ -190,7 +190,7 @@ class DoubleStreamBlock(nn.Module): attn = attention(torch.cat((txt_q, img_q), dim=2), torch.cat((txt_k, img_k), dim=2), torch.cat((txt_v, img_v), dim=2), - pe=pe, mask=attn_mask) + pe=pe, mask=attn_mask, transformer_options=transformer_options) txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1]:] @@ -244,7 +244,7 @@ class SingleStreamBlock(nn.Module): self.mlp_act = nn.GELU(approximate="tanh") self.modulation = Modulation(hidden_size, double=False, dtype=dtype, device=device, operations=operations) - def forward(self, x: Tensor, vec: Tensor, pe: Tensor, attn_mask=None, modulation_dims=None) -> Tensor: + def forward(self, x: Tensor, vec: Tensor, pe: Tensor, attn_mask=None, modulation_dims=None, transformer_options={}) -> Tensor: mod, _ = self.modulation(vec) qkv, mlp = torch.split(self.linear1(apply_mod(self.pre_norm(x), (1 + mod.scale), mod.shift, modulation_dims)), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1) @@ -252,7 +252,7 @@ class SingleStreamBlock(nn.Module): q, k = self.norm(q, k, v) # compute attention - attn = attention(q, k, v, pe=pe, mask=attn_mask) + attn = attention(q, k, v, pe=pe, mask=attn_mask, transformer_options=transformer_options) # compute activation in mlp stream, cat again and run second linear layer output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2)) x += apply_mod(output, mod.gate, None, modulation_dims) diff --git a/comfy/ldm/flux/math.py b/comfy/ldm/flux/math.py index 3e0978176..4d743cda2 100644 --- a/comfy/ldm/flux/math.py +++ b/comfy/ldm/flux/math.py @@ -6,7 +6,7 @@ from comfy.ldm.modules.attention import optimized_attention import comfy.model_management -def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None) -> Tensor: +def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None, transformer_options={}) -> Tensor: q_shape = q.shape k_shape = k.shape @@ -17,7 +17,7 @@ def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None) -> Tensor: k = (pe[..., 0] * k[..., 0] + pe[..., 1] * k[..., 1]).reshape(*k_shape).type_as(v) heads = q.shape[1] - x = optimized_attention(q, k, v, heads, skip_reshape=True, mask=mask) + x = optimized_attention(q, k, v, heads, skip_reshape=True, mask=mask, transformer_options=transformer_options) return x diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index 8ea7d4f57..14f90cea5 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -144,14 +144,16 @@ class Flux(nn.Module): txt=args["txt"], vec=args["vec"], pe=args["pe"], - attn_mask=args.get("attn_mask")) + attn_mask=args.get("attn_mask"), + transformer_options=args.get("transformer_options")) return out out = blocks_replace[("double_block", i)]({"img": img, "txt": txt, "vec": vec, "pe": pe, - "attn_mask": attn_mask}, + "attn_mask": attn_mask, + "transformer_options": transformer_options}, {"original_block": block_wrap}) txt = out["txt"] img = out["img"] @@ -160,7 +162,8 @@ class Flux(nn.Module): txt=txt, vec=vec, pe=pe, - attn_mask=attn_mask) + attn_mask=attn_mask, + transformer_options=transformer_options) if control is not None: # Controlnet control_i = control.get("input") @@ -181,17 +184,19 @@ class Flux(nn.Module): out["img"] = block(args["img"], vec=args["vec"], pe=args["pe"], - attn_mask=args.get("attn_mask")) + attn_mask=args.get("attn_mask"), + transformer_options=args.get("transformer_options")) return out out = blocks_replace[("single_block", i)]({"img": img, "vec": vec, "pe": pe, - "attn_mask": attn_mask}, + "attn_mask": attn_mask, + "transformer_options": transformer_options}, {"original_block": block_wrap}) img = out["img"] else: - img = block(img, vec=vec, pe=pe, attn_mask=attn_mask) + img = block(img, vec=vec, pe=pe, attn_mask=attn_mask, transformer_options=transformer_options) if control is not None: # Controlnet control_o = control.get("output") diff --git a/comfy/ldm/genmo/joint_model/asymm_models_joint.py b/comfy/ldm/genmo/joint_model/asymm_models_joint.py index 366a8b713..5c1bb4d42 100644 --- a/comfy/ldm/genmo/joint_model/asymm_models_joint.py +++ b/comfy/ldm/genmo/joint_model/asymm_models_joint.py @@ -109,6 +109,7 @@ class AsymmetricAttention(nn.Module): scale_x: torch.Tensor, # (B, dim_x), modulation for pre-RMSNorm. scale_y: torch.Tensor, # (B, dim_y), modulation for pre-RMSNorm. crop_y, + transformer_options={}, **rope_rotation, ) -> Tuple[torch.Tensor, torch.Tensor]: rope_cos = rope_rotation.get("rope_cos") @@ -143,7 +144,7 @@ class AsymmetricAttention(nn.Module): xy = optimized_attention(q, k, - v, self.num_heads, skip_reshape=True) + v, self.num_heads, skip_reshape=True, transformer_options=transformer_options) x, y = torch.tensor_split(xy, (q_x.shape[1],), dim=1) x = self.proj_x(x) @@ -224,6 +225,7 @@ class AsymmetricJointBlock(nn.Module): x: torch.Tensor, c: torch.Tensor, y: torch.Tensor, + transformer_options={}, **attn_kwargs, ): """Forward pass of a block. @@ -256,6 +258,7 @@ class AsymmetricJointBlock(nn.Module): y, scale_x=scale_msa_x, scale_y=scale_msa_y, + transformer_options=transformer_options, **attn_kwargs, ) @@ -524,10 +527,11 @@ class AsymmDiTJoint(nn.Module): args["txt"], rope_cos=args["rope_cos"], rope_sin=args["rope_sin"], - crop_y=args["num_tokens"] + crop_y=args["num_tokens"], + transformer_options=args["transformer_options"] ) return out - out = blocks_replace[("double_block", i)]({"img": x, "txt": y_feat, "vec": c, "rope_cos": rope_cos, "rope_sin": rope_sin, "num_tokens": num_tokens}, {"original_block": block_wrap}) + out = blocks_replace[("double_block", i)]({"img": x, "txt": y_feat, "vec": c, "rope_cos": rope_cos, "rope_sin": rope_sin, "num_tokens": num_tokens, "transformer_options": transformer_options}, {"original_block": block_wrap}) y_feat = out["txt"] x = out["img"] else: @@ -538,6 +542,7 @@ class AsymmDiTJoint(nn.Module): rope_cos=rope_cos, rope_sin=rope_sin, crop_y=num_tokens, + transformer_options=transformer_options, ) # (B, M, D), (B, L, D) del y_feat # Final layers don't use dense text features. diff --git a/comfy/ldm/hidream/model.py b/comfy/ldm/hidream/model.py index ae49cf945..28d81c79e 100644 --- a/comfy/ldm/hidream/model.py +++ b/comfy/ldm/hidream/model.py @@ -72,8 +72,8 @@ class TimestepEmbed(nn.Module): return t_emb -def attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor): - return optimized_attention(query.view(query.shape[0], -1, query.shape[-1] * query.shape[-2]), key.view(key.shape[0], -1, key.shape[-1] * key.shape[-2]), value.view(value.shape[0], -1, value.shape[-1] * value.shape[-2]), query.shape[2]) +def attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, transformer_options={}): + return optimized_attention(query.view(query.shape[0], -1, query.shape[-1] * query.shape[-2]), key.view(key.shape[0], -1, key.shape[-1] * key.shape[-2]), value.view(value.shape[0], -1, value.shape[-1] * value.shape[-2]), query.shape[2], transformer_options=transformer_options) class HiDreamAttnProcessor_flashattn: @@ -86,6 +86,7 @@ class HiDreamAttnProcessor_flashattn: image_tokens_masks: Optional[torch.FloatTensor] = None, text_tokens: Optional[torch.FloatTensor] = None, rope: torch.FloatTensor = None, + transformer_options={}, *args, **kwargs, ) -> torch.FloatTensor: @@ -133,7 +134,7 @@ class HiDreamAttnProcessor_flashattn: query = torch.cat([query_1, query_2], dim=-1) key = torch.cat([key_1, key_2], dim=-1) - hidden_states = attention(query, key, value) + hidden_states = attention(query, key, value, transformer_options=transformer_options) if not attn.single: hidden_states_i, hidden_states_t = torch.split(hidden_states, [num_image_tokens, num_text_tokens], dim=1) @@ -199,6 +200,7 @@ class HiDreamAttention(nn.Module): image_tokens_masks: torch.FloatTensor = None, norm_text_tokens: torch.FloatTensor = None, rope: torch.FloatTensor = None, + transformer_options={}, ) -> torch.Tensor: return self.processor( self, @@ -206,6 +208,7 @@ class HiDreamAttention(nn.Module): image_tokens_masks = image_tokens_masks, text_tokens = norm_text_tokens, rope = rope, + transformer_options=transformer_options, ) @@ -406,7 +409,7 @@ class HiDreamImageSingleTransformerBlock(nn.Module): text_tokens: Optional[torch.FloatTensor] = None, adaln_input: Optional[torch.FloatTensor] = None, rope: torch.FloatTensor = None, - + transformer_options={}, ) -> torch.FloatTensor: wtype = image_tokens.dtype shift_msa_i, scale_msa_i, gate_msa_i, shift_mlp_i, scale_mlp_i, gate_mlp_i = \ @@ -419,6 +422,7 @@ class HiDreamImageSingleTransformerBlock(nn.Module): norm_image_tokens, image_tokens_masks, rope = rope, + transformer_options=transformer_options, ) image_tokens = gate_msa_i * attn_output_i + image_tokens @@ -483,6 +487,7 @@ class HiDreamImageTransformerBlock(nn.Module): text_tokens: Optional[torch.FloatTensor] = None, adaln_input: Optional[torch.FloatTensor] = None, rope: torch.FloatTensor = None, + transformer_options={}, ) -> torch.FloatTensor: wtype = image_tokens.dtype shift_msa_i, scale_msa_i, gate_msa_i, shift_mlp_i, scale_mlp_i, gate_mlp_i, \ @@ -500,6 +505,7 @@ class HiDreamImageTransformerBlock(nn.Module): image_tokens_masks, norm_text_tokens, rope = rope, + transformer_options=transformer_options, ) image_tokens = gate_msa_i * attn_output_i + image_tokens @@ -550,6 +556,7 @@ class HiDreamImageBlock(nn.Module): text_tokens: Optional[torch.FloatTensor] = None, adaln_input: torch.FloatTensor = None, rope: torch.FloatTensor = None, + transformer_options={}, ) -> torch.FloatTensor: return self.block( image_tokens, @@ -557,6 +564,7 @@ class HiDreamImageBlock(nn.Module): text_tokens, adaln_input, rope, + transformer_options=transformer_options, ) @@ -786,6 +794,7 @@ class HiDreamImageTransformer2DModel(nn.Module): text_tokens = cur_encoder_hidden_states, adaln_input = adaln_input, rope = rope, + transformer_options=transformer_options, ) initial_encoder_hidden_states = initial_encoder_hidden_states[:, :initial_encoder_hidden_states_seq_len] block_id += 1 @@ -809,6 +818,7 @@ class HiDreamImageTransformer2DModel(nn.Module): text_tokens=None, adaln_input=adaln_input, rope=rope, + transformer_options=transformer_options, ) hidden_states = hidden_states[:, :hidden_states_seq_len] block_id += 1 diff --git a/comfy/ldm/hunyuan3d/model.py b/comfy/ldm/hunyuan3d/model.py index 0fa5e78c1..4991b1645 100644 --- a/comfy/ldm/hunyuan3d/model.py +++ b/comfy/ldm/hunyuan3d/model.py @@ -99,14 +99,16 @@ class Hunyuan3Dv2(nn.Module): txt=args["txt"], vec=args["vec"], pe=args["pe"], - attn_mask=args.get("attn_mask")) + attn_mask=args.get("attn_mask"), + transformer_options=args["transformer_options"]) return out out = blocks_replace[("double_block", i)]({"img": img, "txt": txt, "vec": vec, "pe": pe, - "attn_mask": attn_mask}, + "attn_mask": attn_mask, + "transformer_options": transformer_options}, {"original_block": block_wrap}) txt = out["txt"] img = out["img"] @@ -115,7 +117,8 @@ class Hunyuan3Dv2(nn.Module): txt=txt, vec=vec, pe=pe, - attn_mask=attn_mask) + attn_mask=attn_mask, + transformer_options=transformer_options) img = torch.cat((txt, img), 1) @@ -126,17 +129,19 @@ class Hunyuan3Dv2(nn.Module): out["img"] = block(args["img"], vec=args["vec"], pe=args["pe"], - attn_mask=args.get("attn_mask")) + attn_mask=args.get("attn_mask"), + transformer_options=args["transformer_options"]) return out out = blocks_replace[("single_block", i)]({"img": img, "vec": vec, "pe": pe, - "attn_mask": attn_mask}, + "attn_mask": attn_mask, + "transformer_options": transformer_options}, {"original_block": block_wrap}) img = out["img"] else: - img = block(img, vec=vec, pe=pe, attn_mask=attn_mask) + img = block(img, vec=vec, pe=pe, attn_mask=attn_mask, transformer_options=transformer_options) img = img[:, txt.shape[1]:, ...] img = self.final_layer(img, vec) diff --git a/comfy/ldm/hunyuan_video/model.py b/comfy/ldm/hunyuan_video/model.py index ca86b8bb1..5132e6c07 100644 --- a/comfy/ldm/hunyuan_video/model.py +++ b/comfy/ldm/hunyuan_video/model.py @@ -80,13 +80,13 @@ class TokenRefinerBlock(nn.Module): operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), ) - def forward(self, x, c, mask): + def forward(self, x, c, mask, transformer_options={}): mod1, mod2 = self.adaLN_modulation(c).chunk(2, dim=1) norm_x = self.norm1(x) qkv = self.self_attn.qkv(norm_x) q, k, v = qkv.reshape(qkv.shape[0], qkv.shape[1], 3, self.heads, -1).permute(2, 0, 3, 1, 4) - attn = optimized_attention(q, k, v, self.heads, mask=mask, skip_reshape=True) + attn = optimized_attention(q, k, v, self.heads, mask=mask, skip_reshape=True, transformer_options=transformer_options) x = x + self.self_attn.proj(attn) * mod1.unsqueeze(1) x = x + self.mlp(self.norm2(x)) * mod2.unsqueeze(1) @@ -117,14 +117,14 @@ class IndividualTokenRefiner(nn.Module): ] ) - def forward(self, x, c, mask): + def forward(self, x, c, mask, transformer_options={}): m = None if mask is not None: m = mask.view(mask.shape[0], 1, 1, mask.shape[1]).repeat(1, 1, mask.shape[1], 1) m = m + m.transpose(2, 3) for block in self.blocks: - x = block(x, c, m) + x = block(x, c, m, transformer_options=transformer_options) return x @@ -152,6 +152,7 @@ class TokenRefiner(nn.Module): x, timesteps, mask, + transformer_options={}, ): t = self.t_embedder(timestep_embedding(timesteps, 256, time_factor=1.0).to(x.dtype)) # m = mask.float().unsqueeze(-1) @@ -160,7 +161,7 @@ class TokenRefiner(nn.Module): c = t + self.c_embedder(c.to(x.dtype)) x = self.input_embedder(x) - x = self.individual_token_refiner(x, c, mask) + x = self.individual_token_refiner(x, c, mask, transformer_options=transformer_options) return x @@ -328,7 +329,7 @@ class HunyuanVideo(nn.Module): if txt_mask is not None and not torch.is_floating_point(txt_mask): txt_mask = (txt_mask - 1).to(img.dtype) * torch.finfo(img.dtype).max - txt = self.txt_in(txt, timesteps, txt_mask) + txt = self.txt_in(txt, timesteps, txt_mask, transformer_options=transformer_options) if self.byt5_in is not None and txt_byt5 is not None: txt_byt5 = self.byt5_in(txt_byt5) @@ -352,14 +353,14 @@ class HunyuanVideo(nn.Module): if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} - out["img"], out["txt"] = block(img=args["img"], txt=args["txt"], vec=args["vec"], pe=args["pe"], attn_mask=args["attention_mask"], modulation_dims_img=args["modulation_dims_img"], modulation_dims_txt=args["modulation_dims_txt"]) + out["img"], out["txt"] = block(img=args["img"], txt=args["txt"], vec=args["vec"], pe=args["pe"], attn_mask=args["attention_mask"], modulation_dims_img=args["modulation_dims_img"], modulation_dims_txt=args["modulation_dims_txt"], transformer_options=args["transformer_options"]) return out - out = blocks_replace[("double_block", i)]({"img": img, "txt": txt, "vec": vec, "pe": pe, "attention_mask": attn_mask, 'modulation_dims_img': modulation_dims, 'modulation_dims_txt': modulation_dims_txt}, {"original_block": block_wrap}) + out = blocks_replace[("double_block", i)]({"img": img, "txt": txt, "vec": vec, "pe": pe, "attention_mask": attn_mask, 'modulation_dims_img': modulation_dims, 'modulation_dims_txt': modulation_dims_txt, 'transformer_options': transformer_options}, {"original_block": block_wrap}) txt = out["txt"] img = out["img"] else: - img, txt = block(img=img, txt=txt, vec=vec, pe=pe, attn_mask=attn_mask, modulation_dims_img=modulation_dims, modulation_dims_txt=modulation_dims_txt) + img, txt = block(img=img, txt=txt, vec=vec, pe=pe, attn_mask=attn_mask, modulation_dims_img=modulation_dims, modulation_dims_txt=modulation_dims_txt, transformer_options=transformer_options) if control is not None: # Controlnet control_i = control.get("input") @@ -374,13 +375,13 @@ class HunyuanVideo(nn.Module): if ("single_block", i) in blocks_replace: def block_wrap(args): out = {} - out["img"] = block(args["img"], vec=args["vec"], pe=args["pe"], attn_mask=args["attention_mask"], modulation_dims=args["modulation_dims"]) + out["img"] = block(args["img"], vec=args["vec"], pe=args["pe"], attn_mask=args["attention_mask"], modulation_dims=args["modulation_dims"], transformer_options=args["transformer_options"]) return out - out = blocks_replace[("single_block", i)]({"img": img, "vec": vec, "pe": pe, "attention_mask": attn_mask, 'modulation_dims': modulation_dims}, {"original_block": block_wrap}) + out = blocks_replace[("single_block", i)]({"img": img, "vec": vec, "pe": pe, "attention_mask": attn_mask, 'modulation_dims': modulation_dims, 'transformer_options': transformer_options}, {"original_block": block_wrap}) img = out["img"] else: - img = block(img, vec=vec, pe=pe, attn_mask=attn_mask, modulation_dims=modulation_dims) + img = block(img, vec=vec, pe=pe, attn_mask=attn_mask, modulation_dims=modulation_dims, transformer_options=transformer_options) if control is not None: # Controlnet control_o = control.get("output") diff --git a/comfy/ldm/lightricks/model.py b/comfy/ldm/lightricks/model.py index aa2ea62b1..def365ba7 100644 --- a/comfy/ldm/lightricks/model.py +++ b/comfy/ldm/lightricks/model.py @@ -271,7 +271,7 @@ class CrossAttention(nn.Module): self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout)) - def forward(self, x, context=None, mask=None, pe=None): + def forward(self, x, context=None, mask=None, pe=None, transformer_options={}): q = self.to_q(x) context = x if context is None else context k = self.to_k(context) @@ -285,9 +285,9 @@ class CrossAttention(nn.Module): k = apply_rotary_emb(k, pe) if mask is None: - out = comfy.ldm.modules.attention.optimized_attention(q, k, v, self.heads, attn_precision=self.attn_precision) + out = comfy.ldm.modules.attention.optimized_attention(q, k, v, self.heads, attn_precision=self.attn_precision, transformer_options=transformer_options) else: - out = comfy.ldm.modules.attention.optimized_attention_masked(q, k, v, self.heads, mask, attn_precision=self.attn_precision) + out = comfy.ldm.modules.attention.optimized_attention_masked(q, k, v, self.heads, mask, attn_precision=self.attn_precision, transformer_options=transformer_options) return self.to_out(out) @@ -303,12 +303,12 @@ class BasicTransformerBlock(nn.Module): self.scale_shift_table = nn.Parameter(torch.empty(6, dim, device=device, dtype=dtype)) - def forward(self, x, context=None, attention_mask=None, timestep=None, pe=None): + def forward(self, x, context=None, attention_mask=None, timestep=None, pe=None, transformer_options={}): shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None, None].to(device=x.device, dtype=x.dtype) + timestep.reshape(x.shape[0], timestep.shape[1], self.scale_shift_table.shape[0], -1)).unbind(dim=2) - x += self.attn1(comfy.ldm.common_dit.rms_norm(x) * (1 + scale_msa) + shift_msa, pe=pe) * gate_msa + x += self.attn1(comfy.ldm.common_dit.rms_norm(x) * (1 + scale_msa) + shift_msa, pe=pe, transformer_options=transformer_options) * gate_msa - x += self.attn2(x, context=context, mask=attention_mask) + x += self.attn2(x, context=context, mask=attention_mask, transformer_options=transformer_options) y = comfy.ldm.common_dit.rms_norm(x) * (1 + scale_mlp) + shift_mlp x += self.ff(y) * gate_mlp @@ -479,10 +479,10 @@ class LTXVModel(torch.nn.Module): if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} - out["img"] = block(args["img"], context=args["txt"], attention_mask=args["attention_mask"], timestep=args["vec"], pe=args["pe"]) + out["img"] = block(args["img"], context=args["txt"], attention_mask=args["attention_mask"], timestep=args["vec"], pe=args["pe"], transformer_options=args["transformer_options"]) return out - out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "attention_mask": attention_mask, "vec": timestep, "pe": pe}, {"original_block": block_wrap}) + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "attention_mask": attention_mask, "vec": timestep, "pe": pe, "transformer_options": transformer_options}, {"original_block": block_wrap}) x = out["img"] else: x = block( @@ -490,7 +490,8 @@ class LTXVModel(torch.nn.Module): context=context, attention_mask=attention_mask, timestep=timestep, - pe=pe + pe=pe, + transformer_options=transformer_options, ) # 3. Output diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index e08ed817d..f87d98ac0 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -104,6 +104,7 @@ class JointAttention(nn.Module): x: torch.Tensor, x_mask: torch.Tensor, freqs_cis: torch.Tensor, + transformer_options={}, ) -> torch.Tensor: """ @@ -140,7 +141,7 @@ class JointAttention(nn.Module): if n_rep >= 1: xk = xk.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3) xv = xv.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3) - output = optimized_attention_masked(xq.movedim(1, 2), xk.movedim(1, 2), xv.movedim(1, 2), self.n_local_heads, x_mask, skip_reshape=True) + output = optimized_attention_masked(xq.movedim(1, 2), xk.movedim(1, 2), xv.movedim(1, 2), self.n_local_heads, x_mask, skip_reshape=True, transformer_options=transformer_options) return self.out(output) @@ -268,6 +269,7 @@ class JointTransformerBlock(nn.Module): x_mask: torch.Tensor, freqs_cis: torch.Tensor, adaln_input: Optional[torch.Tensor]=None, + transformer_options={}, ): """ Perform a forward pass through the TransformerBlock. @@ -290,6 +292,7 @@ class JointTransformerBlock(nn.Module): modulate(self.attention_norm1(x), scale_msa), x_mask, freqs_cis, + transformer_options=transformer_options, ) ) x = x + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2( @@ -304,6 +307,7 @@ class JointTransformerBlock(nn.Module): self.attention_norm1(x), x_mask, freqs_cis, + transformer_options=transformer_options, ) ) x = x + self.ffn_norm2( @@ -494,7 +498,7 @@ class NextDiT(nn.Module): return imgs def patchify_and_embed( - self, x: List[torch.Tensor] | torch.Tensor, cap_feats: torch.Tensor, cap_mask: torch.Tensor, t: torch.Tensor, num_tokens + self, x: List[torch.Tensor] | torch.Tensor, cap_feats: torch.Tensor, cap_mask: torch.Tensor, t: torch.Tensor, num_tokens, transformer_options={} ) -> Tuple[torch.Tensor, torch.Tensor, List[Tuple[int, int]], List[int], torch.Tensor]: bsz = len(x) pH = pW = self.patch_size @@ -554,7 +558,7 @@ class NextDiT(nn.Module): # refine context for layer in self.context_refiner: - cap_feats = layer(cap_feats, cap_mask, cap_freqs_cis) + cap_feats = layer(cap_feats, cap_mask, cap_freqs_cis, transformer_options=transformer_options) # refine image flat_x = [] @@ -573,7 +577,7 @@ class NextDiT(nn.Module): padded_img_embed = self.x_embedder(padded_img_embed) padded_img_mask = padded_img_mask.unsqueeze(1) for layer in self.noise_refiner: - padded_img_embed = layer(padded_img_embed, padded_img_mask, img_freqs_cis, t) + padded_img_embed = layer(padded_img_embed, padded_img_mask, img_freqs_cis, t, transformer_options=transformer_options) if cap_mask is not None: mask = torch.zeros(bsz, max_seq_len, dtype=dtype, device=device) @@ -616,12 +620,13 @@ class NextDiT(nn.Module): cap_feats = self.cap_embedder(cap_feats) # (N, L, D) # todo check if able to batchify w.o. redundant compute + transformer_options = kwargs.get("transformer_options", {}) x_is_tensor = isinstance(x, torch.Tensor) - x, mask, img_size, cap_size, freqs_cis = self.patchify_and_embed(x, cap_feats, cap_mask, t, num_tokens) + x, mask, img_size, cap_size, freqs_cis = self.patchify_and_embed(x, cap_feats, cap_mask, t, num_tokens, transformer_options=transformer_options) freqs_cis = freqs_cis.to(x.device) for layer in self.layers: - x = layer(x, mask, freqs_cis, adaln_input) + x = layer(x, mask, freqs_cis, adaln_input, transformer_options=transformer_options) x = self.final_layer(x, adaln_input) x = self.unpatchify(x, img_size, cap_size, return_tensor=x_is_tensor)[:,:,:h,:w] diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 043df28df..bf2553c37 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -5,8 +5,9 @@ import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat -from typing import Optional +from typing import Optional, Any, Callable, Union import logging +import functools from .diffusionmodules.util import AlphaBlender, timestep_embedding from .sub_quadratic_attention import efficient_dot_product_attention @@ -17,23 +18,45 @@ if model_management.xformers_enabled(): import xformers import xformers.ops -if model_management.sage_attention_enabled(): - try: - from sageattention import sageattn - except ModuleNotFoundError as e: +SAGE_ATTENTION_IS_AVAILABLE = False +try: + from sageattention import sageattn + SAGE_ATTENTION_IS_AVAILABLE = True +except ModuleNotFoundError as e: + if model_management.sage_attention_enabled(): if e.name == "sageattention": logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention") else: raise e exit(-1) -if model_management.flash_attention_enabled(): - try: - from flash_attn import flash_attn_func - except ModuleNotFoundError: +FLASH_ATTENTION_IS_AVAILABLE = False +try: + from flash_attn import flash_attn_func + FLASH_ATTENTION_IS_AVAILABLE = True +except ModuleNotFoundError: + if model_management.flash_attention_enabled(): logging.error(f"\n\nTo use the `--use-flash-attention` feature, the `flash-attn` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install flash-attn") exit(-1) +REGISTERED_ATTENTION_FUNCTIONS = {} +def register_attention_function(name: str, func: Callable): + # avoid replacing existing functions + if name not in REGISTERED_ATTENTION_FUNCTIONS: + REGISTERED_ATTENTION_FUNCTIONS[name] = func + else: + logging.warning(f"Attention function {name} already registered, skipping registration.") + +def get_attention_function(name: str, default: Any=...) -> Union[Callable, None]: + if name == "optimized": + return optimized_attention + elif name not in REGISTERED_ATTENTION_FUNCTIONS: + if default is ...: + raise KeyError(f"Attention function {name} not found.") + else: + return default + return REGISTERED_ATTENTION_FUNCTIONS[name] + from comfy.cli_args import args import comfy.ops ops = comfy.ops.disable_weight_init @@ -91,7 +114,27 @@ class FeedForward(nn.Module): def Normalize(in_channels, dtype=None, device=None): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device) -def attention_basic(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False): + +def wrap_attn(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + remove_attn_wrapper_key = False + try: + if "_inside_attn_wrapper" not in kwargs: + transformer_options = kwargs.get("transformer_options", None) + remove_attn_wrapper_key = True + kwargs["_inside_attn_wrapper"] = True + if transformer_options is not None: + if "optimized_attention_override" in transformer_options: + return transformer_options["optimized_attention_override"](func, *args, **kwargs) + return func(*args, **kwargs) + finally: + if remove_attn_wrapper_key: + del kwargs["_inside_attn_wrapper"] + return wrapper + +@wrap_attn +def attention_basic(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False, **kwargs): attn_precision = get_attn_precision(attn_precision, q.dtype) if skip_reshape: @@ -159,8 +202,8 @@ def attention_basic(q, k, v, heads, mask=None, attn_precision=None, skip_reshape ) return out - -def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False): +@wrap_attn +def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False, **kwargs): attn_precision = get_attn_precision(attn_precision, query.dtype) if skip_reshape: @@ -230,7 +273,8 @@ def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None, hidden_states = hidden_states.unflatten(0, (-1, heads)).transpose(1,2).flatten(start_dim=2) return hidden_states -def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False): +@wrap_attn +def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False, **kwargs): attn_precision = get_attn_precision(attn_precision, q.dtype) if skip_reshape: @@ -359,7 +403,8 @@ try: except: pass -def attention_xformers(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False): +@wrap_attn +def attention_xformers(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False, **kwargs): b = q.shape[0] dim_head = q.shape[-1] # check to make sure xformers isn't broken @@ -374,7 +419,7 @@ def attention_xformers(q, k, v, heads, mask=None, attn_precision=None, skip_resh disabled_xformers = True if disabled_xformers: - return attention_pytorch(q, k, v, heads, mask, skip_reshape=skip_reshape) + return attention_pytorch(q, k, v, heads, mask, skip_reshape=skip_reshape, **kwargs) if skip_reshape: # b h k d -> b k h d @@ -427,8 +472,8 @@ else: #TODO: other GPUs ? SDP_BATCH_LIMIT = 2**31 - -def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False): +@wrap_attn +def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False, **kwargs): if skip_reshape: b, _, _, dim_head = q.shape else: @@ -470,8 +515,8 @@ def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_resha ).transpose(1, 2).reshape(-1, q.shape[2], heads * dim_head) return out - -def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False): +@wrap_attn +def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False, **kwargs): if skip_reshape: b, _, _, dim_head = q.shape tensor_layout = "HND" @@ -501,7 +546,7 @@ def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape= lambda t: t.transpose(1, 2), (q, k, v), ) - return attention_pytorch(q, k, v, heads, mask=mask, skip_reshape=True, skip_output_reshape=skip_output_reshape) + return attention_pytorch(q, k, v, heads, mask=mask, skip_reshape=True, skip_output_reshape=skip_output_reshape, **kwargs) if tensor_layout == "HND": if not skip_output_reshape: @@ -534,8 +579,8 @@ except AttributeError as error: dropout_p: float = 0.0, causal: bool = False) -> torch.Tensor: assert False, f"Could not define flash_attn_wrapper: {FLASH_ATTN_ERROR}" - -def attention_flash(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False): +@wrap_attn +def attention_flash(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False, **kwargs): if skip_reshape: b, _, _, dim_head = q.shape else: @@ -597,6 +642,19 @@ else: optimized_attention_masked = optimized_attention + +# register core-supported attention functions +if SAGE_ATTENTION_IS_AVAILABLE: + register_attention_function("sage", attention_sage) +if FLASH_ATTENTION_IS_AVAILABLE: + register_attention_function("flash", attention_flash) +if model_management.xformers_enabled(): + register_attention_function("xformers", attention_xformers) +register_attention_function("pytorch", attention_pytorch) +register_attention_function("sub_quad", attention_sub_quad) +register_attention_function("split", attention_split) + + def optimized_attention_for_device(device, mask=False, small_input=False): if small_input: if model_management.pytorch_attention_enabled(): @@ -629,7 +687,7 @@ class CrossAttention(nn.Module): self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout)) - def forward(self, x, context=None, value=None, mask=None): + def forward(self, x, context=None, value=None, mask=None, transformer_options={}): q = self.to_q(x) context = default(context, x) k = self.to_k(context) @@ -640,9 +698,9 @@ class CrossAttention(nn.Module): v = self.to_v(context) if mask is None: - out = optimized_attention(q, k, v, self.heads, attn_precision=self.attn_precision) + out = optimized_attention(q, k, v, self.heads, attn_precision=self.attn_precision, transformer_options=transformer_options) else: - out = optimized_attention_masked(q, k, v, self.heads, mask, attn_precision=self.attn_precision) + out = optimized_attention_masked(q, k, v, self.heads, mask, attn_precision=self.attn_precision, transformer_options=transformer_options) return self.to_out(out) @@ -746,7 +804,7 @@ class BasicTransformerBlock(nn.Module): n = attn1_replace_patch[block_attn1](n, context_attn1, value_attn1, extra_options) n = self.attn1.to_out(n) else: - n = self.attn1(n, context=context_attn1, value=value_attn1) + n = self.attn1(n, context=context_attn1, value=value_attn1, transformer_options=transformer_options) if "attn1_output_patch" in transformer_patches: patch = transformer_patches["attn1_output_patch"] @@ -786,7 +844,7 @@ class BasicTransformerBlock(nn.Module): n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options) n = self.attn2.to_out(n) else: - n = self.attn2(n, context=context_attn2, value=value_attn2) + n = self.attn2(n, context=context_attn2, value=value_attn2, transformer_options=transformer_options) if "attn2_output_patch" in transformer_patches: patch = transformer_patches["attn2_output_patch"] @@ -1017,7 +1075,7 @@ class SpatialVideoTransformer(SpatialTransformer): B, S, C = x_mix.shape x_mix = rearrange(x_mix, "(b t) s c -> (b s) t c", t=timesteps) - x_mix = mix_block(x_mix, context=time_context) #TODO: transformer_options + x_mix = mix_block(x_mix, context=time_context, transformer_options=transformer_options) x_mix = rearrange( x_mix, "(b s) t c -> (b t) s c", s=S, b=B // timesteps, c=C, t=timesteps ) diff --git a/comfy/ldm/modules/diffusionmodules/mmdit.py b/comfy/ldm/modules/diffusionmodules/mmdit.py index 4d6beba2d..42f406f1a 100644 --- a/comfy/ldm/modules/diffusionmodules/mmdit.py +++ b/comfy/ldm/modules/diffusionmodules/mmdit.py @@ -606,7 +606,7 @@ def block_mixing(*args, use_checkpoint=True, **kwargs): return _block_mixing(*args, **kwargs) -def _block_mixing(context, x, context_block, x_block, c): +def _block_mixing(context, x, context_block, x_block, c, transformer_options={}): context_qkv, context_intermediates = context_block.pre_attention(context, c) if x_block.x_block_self_attn: @@ -622,6 +622,7 @@ def _block_mixing(context, x, context_block, x_block, c): attn = optimized_attention( qkv[0], qkv[1], qkv[2], heads=x_block.attn.num_heads, + transformer_options=transformer_options, ) context_attn, x_attn = ( attn[:, : context_qkv[0].shape[1]], @@ -637,6 +638,7 @@ def _block_mixing(context, x, context_block, x_block, c): attn2 = optimized_attention( x_qkv2[0], x_qkv2[1], x_qkv2[2], heads=x_block.attn2.num_heads, + transformer_options=transformer_options, ) x = x_block.post_attention_x(x_attn, attn2, *x_intermediates) else: @@ -958,10 +960,10 @@ class MMDiT(nn.Module): if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} - out["txt"], out["img"] = self.joint_blocks[i](args["txt"], args["img"], c=args["vec"]) + out["txt"], out["img"] = self.joint_blocks[i](args["txt"], args["img"], c=args["vec"], transformer_options=args["transformer_options"]) return out - out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": c_mod}, {"original_block": block_wrap}) + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": c_mod, "transformer_options": transformer_options}, {"original_block": block_wrap}) context = out["txt"] x = out["img"] else: @@ -970,6 +972,7 @@ class MMDiT(nn.Module): x, c=c_mod, use_checkpoint=self.use_checkpoint, + transformer_options=transformer_options, ) if control is not None: control_o = control.get("output") diff --git a/comfy/ldm/omnigen/omnigen2.py b/comfy/ldm/omnigen/omnigen2.py index 4884449f8..82edc92da 100644 --- a/comfy/ldm/omnigen/omnigen2.py +++ b/comfy/ldm/omnigen/omnigen2.py @@ -120,7 +120,7 @@ class Attention(nn.Module): nn.Dropout(0.0) ) - def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None) -> torch.Tensor: + def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, transformer_options={}) -> torch.Tensor: batch_size, sequence_length, _ = hidden_states.shape query = self.to_q(hidden_states) @@ -146,7 +146,7 @@ class Attention(nn.Module): key = key.repeat_interleave(self.heads // self.kv_heads, dim=1) value = value.repeat_interleave(self.heads // self.kv_heads, dim=1) - hidden_states = optimized_attention_masked(query, key, value, self.heads, attention_mask, skip_reshape=True) + hidden_states = optimized_attention_masked(query, key, value, self.heads, attention_mask, skip_reshape=True, transformer_options=transformer_options) hidden_states = self.to_out[0](hidden_states) return hidden_states @@ -182,16 +182,16 @@ class OmniGen2TransformerBlock(nn.Module): self.norm2 = operations.RMSNorm(dim, eps=norm_eps, dtype=dtype, device=device) self.ffn_norm2 = operations.RMSNorm(dim, eps=norm_eps, dtype=dtype, device=device) - def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, image_rotary_emb: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: + def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, image_rotary_emb: torch.Tensor, temb: Optional[torch.Tensor] = None, transformer_options={}) -> torch.Tensor: if self.modulation: norm_hidden_states, gate_msa, scale_mlp, gate_mlp = self.norm1(hidden_states, temb) - attn_output = self.attn(norm_hidden_states, norm_hidden_states, attention_mask, image_rotary_emb) + attn_output = self.attn(norm_hidden_states, norm_hidden_states, attention_mask, image_rotary_emb, transformer_options=transformer_options) hidden_states = hidden_states + gate_msa.unsqueeze(1).tanh() * self.norm2(attn_output) mlp_output = self.feed_forward(self.ffn_norm1(hidden_states) * (1 + scale_mlp.unsqueeze(1))) hidden_states = hidden_states + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(mlp_output) else: norm_hidden_states = self.norm1(hidden_states) - attn_output = self.attn(norm_hidden_states, norm_hidden_states, attention_mask, image_rotary_emb) + attn_output = self.attn(norm_hidden_states, norm_hidden_states, attention_mask, image_rotary_emb, transformer_options=transformer_options) hidden_states = hidden_states + self.norm2(attn_output) mlp_output = self.feed_forward(self.ffn_norm1(hidden_states)) hidden_states = hidden_states + self.ffn_norm2(mlp_output) @@ -390,7 +390,7 @@ class OmniGen2Transformer2DModel(nn.Module): ref_img_sizes, img_sizes, ) - def img_patch_embed_and_refine(self, hidden_states, ref_image_hidden_states, padded_img_mask, padded_ref_img_mask, noise_rotary_emb, ref_img_rotary_emb, l_effective_ref_img_len, l_effective_img_len, temb): + def img_patch_embed_and_refine(self, hidden_states, ref_image_hidden_states, padded_img_mask, padded_ref_img_mask, noise_rotary_emb, ref_img_rotary_emb, l_effective_ref_img_len, l_effective_img_len, temb, transformer_options={}): batch_size = len(hidden_states) hidden_states = self.x_embedder(hidden_states) @@ -405,17 +405,17 @@ class OmniGen2Transformer2DModel(nn.Module): shift += ref_img_len for layer in self.noise_refiner: - hidden_states = layer(hidden_states, padded_img_mask, noise_rotary_emb, temb) + hidden_states = layer(hidden_states, padded_img_mask, noise_rotary_emb, temb, transformer_options=transformer_options) if ref_image_hidden_states is not None: for layer in self.ref_image_refiner: - ref_image_hidden_states = layer(ref_image_hidden_states, padded_ref_img_mask, ref_img_rotary_emb, temb) + ref_image_hidden_states = layer(ref_image_hidden_states, padded_ref_img_mask, ref_img_rotary_emb, temb, transformer_options=transformer_options) hidden_states = torch.cat([ref_image_hidden_states, hidden_states], dim=1) return hidden_states - def forward(self, x, timesteps, context, num_tokens, ref_latents=None, attention_mask=None, **kwargs): + def forward(self, x, timesteps, context, num_tokens, ref_latents=None, attention_mask=None, transformer_options={}, **kwargs): B, C, H, W = x.shape hidden_states = comfy.ldm.common_dit.pad_to_patch_size(x, (self.patch_size, self.patch_size)) _, _, H_padded, W_padded = hidden_states.shape @@ -444,7 +444,7 @@ class OmniGen2Transformer2DModel(nn.Module): ) for layer in self.context_refiner: - text_hidden_states = layer(text_hidden_states, text_attention_mask, context_rotary_emb) + text_hidden_states = layer(text_hidden_states, text_attention_mask, context_rotary_emb, transformer_options=transformer_options) img_len = hidden_states.shape[1] combined_img_hidden_states = self.img_patch_embed_and_refine( @@ -453,13 +453,14 @@ class OmniGen2Transformer2DModel(nn.Module): noise_rotary_emb, ref_img_rotary_emb, l_effective_ref_img_len, l_effective_img_len, temb, + transformer_options=transformer_options, ) hidden_states = torch.cat([text_hidden_states, combined_img_hidden_states], dim=1) attention_mask = None for layer in self.layers: - hidden_states = layer(hidden_states, attention_mask, rotary_emb, temb) + hidden_states = layer(hidden_states, attention_mask, rotary_emb, temb, transformer_options=transformer_options) hidden_states = self.norm_out(hidden_states, temb) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index 04071f31c..b9f60c2b7 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -132,6 +132,7 @@ class Attention(nn.Module): encoder_hidden_states_mask: torch.FloatTensor = None, attention_mask: Optional[torch.FloatTensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, + transformer_options={}, ) -> Tuple[torch.Tensor, torch.Tensor]: seq_txt = encoder_hidden_states.shape[1] @@ -159,7 +160,7 @@ class Attention(nn.Module): joint_key = joint_key.flatten(start_dim=2) joint_value = joint_value.flatten(start_dim=2) - joint_hidden_states = optimized_attention_masked(joint_query, joint_key, joint_value, self.heads, attention_mask) + joint_hidden_states = optimized_attention_masked(joint_query, joint_key, joint_value, self.heads, attention_mask, transformer_options=transformer_options) txt_attn_output = joint_hidden_states[:, :seq_txt, :] img_attn_output = joint_hidden_states[:, seq_txt:, :] @@ -226,6 +227,7 @@ class QwenImageTransformerBlock(nn.Module): encoder_hidden_states_mask: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + transformer_options={}, ) -> Tuple[torch.Tensor, torch.Tensor]: img_mod_params = self.img_mod(temb) txt_mod_params = self.txt_mod(temb) @@ -242,6 +244,7 @@ class QwenImageTransformerBlock(nn.Module): encoder_hidden_states=txt_modulated, encoder_hidden_states_mask=encoder_hidden_states_mask, image_rotary_emb=image_rotary_emb, + transformer_options=transformer_options, ) hidden_states = hidden_states + img_gate1 * img_attn_output @@ -434,9 +437,9 @@ class QwenImageTransformer2DModel(nn.Module): if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} - out["txt"], out["img"] = block(hidden_states=args["img"], encoder_hidden_states=args["txt"], encoder_hidden_states_mask=encoder_hidden_states_mask, temb=args["vec"], image_rotary_emb=args["pe"]) + out["txt"], out["img"] = block(hidden_states=args["img"], encoder_hidden_states=args["txt"], encoder_hidden_states_mask=encoder_hidden_states_mask, temb=args["vec"], image_rotary_emb=args["pe"], transformer_options=args["transformer_options"]) return out - out = blocks_replace[("double_block", i)]({"img": hidden_states, "txt": encoder_hidden_states, "vec": temb, "pe": image_rotary_emb}, {"original_block": block_wrap}) + out = blocks_replace[("double_block", i)]({"img": hidden_states, "txt": encoder_hidden_states, "vec": temb, "pe": image_rotary_emb, "transformer_options": transformer_options}, {"original_block": block_wrap}) hidden_states = out["img"] encoder_hidden_states = out["txt"] else: @@ -446,11 +449,12 @@ class QwenImageTransformer2DModel(nn.Module): encoder_hidden_states_mask=encoder_hidden_states_mask, temb=temb, image_rotary_emb=image_rotary_emb, + transformer_options=transformer_options, ) if "double_block" in patches: for p in patches["double_block"]: - out = p({"img": hidden_states, "txt": encoder_hidden_states, "x": x, "block_index": i}) + out = p({"img": hidden_states, "txt": encoder_hidden_states, "x": x, "block_index": i, "transformer_options": transformer_options}) hidden_states = out["img"] encoder_hidden_states = out["txt"] diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 47857dc2b..63472ada2 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -52,7 +52,7 @@ class WanSelfAttention(nn.Module): self.norm_q = operation_settings.get("operations").RMSNorm(dim, eps=eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) if qk_norm else nn.Identity() self.norm_k = operation_settings.get("operations").RMSNorm(dim, eps=eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) if qk_norm else nn.Identity() - def forward(self, x, freqs): + def forward(self, x, freqs, transformer_options={}): r""" Args: x(Tensor): Shape [B, L, num_heads, C / num_heads] @@ -75,6 +75,7 @@ class WanSelfAttention(nn.Module): k.view(b, s, n * d), v, heads=self.num_heads, + transformer_options=transformer_options, ) x = self.o(x) @@ -83,7 +84,7 @@ class WanSelfAttention(nn.Module): class WanT2VCrossAttention(WanSelfAttention): - def forward(self, x, context, **kwargs): + def forward(self, x, context, transformer_options={}, **kwargs): r""" Args: x(Tensor): Shape [B, L1, C] @@ -95,7 +96,7 @@ class WanT2VCrossAttention(WanSelfAttention): v = self.v(context) # compute attention - x = optimized_attention(q, k, v, heads=self.num_heads) + x = optimized_attention(q, k, v, heads=self.num_heads, transformer_options=transformer_options) x = self.o(x) return x @@ -116,7 +117,7 @@ class WanI2VCrossAttention(WanSelfAttention): # self.alpha = nn.Parameter(torch.zeros((1, ))) self.norm_k_img = operation_settings.get("operations").RMSNorm(dim, eps=eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) if qk_norm else nn.Identity() - def forward(self, x, context, context_img_len): + def forward(self, x, context, context_img_len, transformer_options={}): r""" Args: x(Tensor): Shape [B, L1, C] @@ -131,9 +132,9 @@ class WanI2VCrossAttention(WanSelfAttention): v = self.v(context) k_img = self.norm_k_img(self.k_img(context_img)) v_img = self.v_img(context_img) - img_x = optimized_attention(q, k_img, v_img, heads=self.num_heads) + img_x = optimized_attention(q, k_img, v_img, heads=self.num_heads, transformer_options=transformer_options) # compute attention - x = optimized_attention(q, k, v, heads=self.num_heads) + x = optimized_attention(q, k, v, heads=self.num_heads, transformer_options=transformer_options) # output x = x + img_x @@ -206,6 +207,7 @@ class WanAttentionBlock(nn.Module): freqs, context, context_img_len=257, + transformer_options={}, ): r""" Args: @@ -224,12 +226,12 @@ class WanAttentionBlock(nn.Module): # self-attention y = self.self_attn( torch.addcmul(repeat_e(e[0], x), self.norm1(x), 1 + repeat_e(e[1], x)), - freqs) + freqs, transformer_options=transformer_options) x = torch.addcmul(x, y, repeat_e(e[2], x)) # cross-attention & ffn - x = x + self.cross_attn(self.norm3(x), context, context_img_len=context_img_len) + x = x + self.cross_attn(self.norm3(x), context, context_img_len=context_img_len, transformer_options=transformer_options) y = self.ffn(torch.addcmul(repeat_e(e[3], x), self.norm2(x), 1 + repeat_e(e[4], x))) x = torch.addcmul(x, y, repeat_e(e[5], x)) return x @@ -559,12 +561,12 @@ class WanModel(torch.nn.Module): if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} - out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len) + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len, transformer_options=args["transformer_options"]) return out - out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs}, {"original_block": block_wrap}) + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs, "transformer_options": transformer_options}, {"original_block": block_wrap}) x = out["img"] else: - x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) + x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len, transformer_options=transformer_options) # head x = self.head(x, e) @@ -742,17 +744,17 @@ class VaceWanModel(WanModel): if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} - out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len) + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len, transformer_options=args["transformer_options"]) return out - out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs}, {"original_block": block_wrap}) + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs, "transformer_options": transformer_options}, {"original_block": block_wrap}) x = out["img"] else: - x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) + x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len, transformer_options=transformer_options) ii = self.vace_layers_mapping.get(i, None) if ii is not None: for iii in range(len(c)): - c_skip, c[iii] = self.vace_blocks[ii](c[iii], x=x_orig, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) + c_skip, c[iii] = self.vace_blocks[ii](c[iii], x=x_orig, e=e0, freqs=freqs, context=context, context_img_len=context_img_len, transformer_options=transformer_options) x += c_skip * vace_strength[iii] del c_skip # head @@ -841,12 +843,12 @@ class CameraWanModel(WanModel): if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} - out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len) + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len, transformer_options=args["transformer_options"]) return out - out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs}, {"original_block": block_wrap}) + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs, "transformer_options": transformer_options}, {"original_block": block_wrap}) x = out["img"] else: - x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len) + x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len, transformer_options=transformer_options) # head x = self.head(x, e) From a3b04de7004cc19dee9364bd71e62bab05475810 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 12 Sep 2025 16:46:46 -0700 Subject: [PATCH 0586/1073] Hunyuan refiner vae now works with tiled. (#9836) --- comfy/ldm/hunyuan_video/vae_refiner.py | 1 - comfy/sd.py | 21 +++++++++++++++------ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/comfy/ldm/hunyuan_video/vae_refiner.py b/comfy/ldm/hunyuan_video/vae_refiner.py index e3fff9bbe..c6f742710 100644 --- a/comfy/ldm/hunyuan_video/vae_refiner.py +++ b/comfy/ldm/hunyuan_video/vae_refiner.py @@ -185,7 +185,6 @@ class Encoder(nn.Module): self.regul = comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer() def forward(self, x): - x = x.unsqueeze(2) x = self.conv_in(x) for stage in self.down: diff --git a/comfy/sd.py b/comfy/sd.py index 02ddc7239..f8f1a89e8 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -412,9 +412,12 @@ class VAE: self.working_dtypes = [torch.bfloat16, torch.float32] elif "decoder.conv_in.conv.weight" in sd and sd['decoder.conv_in.conv.weight'].shape[1] == 32: ddconfig = {"block_out_channels": [128, 256, 512, 1024, 1024], "in_channels": 3, "out_channels": 3, "num_res_blocks": 2, "ffactor_spatial": 16, "ffactor_temporal": 4, "downsample_match_channel": True, "upsample_match_channel": True} - self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.conv.weight"].shape[1] - self.downscale_ratio = 16 - self.upscale_ratio = 16 + ddconfig['z_channels'] = sd["decoder.conv_in.conv.weight"].shape[1] + self.latent_channels = 64 + self.upscale_ratio = (lambda a: max(0, a * 4 - 3), 16, 16) + self.upscale_index_formula = (4, 16, 16) + self.downscale_ratio = (lambda a: max(0, math.floor((a + 3) / 4)), 16, 16) + self.downscale_index_formula = (4, 16, 16) self.latent_dim = 3 self.not_video = True self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] @@ -684,8 +687,11 @@ class VAE: self.throw_exception_if_invalid() pixel_samples = self.vae_encode_crop_pixels(pixel_samples) pixel_samples = pixel_samples.movedim(-1, 1) - if not self.not_video and self.latent_dim == 3 and pixel_samples.ndim < 5: - pixel_samples = pixel_samples.movedim(1, 0).unsqueeze(0) + if self.latent_dim == 3 and pixel_samples.ndim < 5: + if not self.not_video: + pixel_samples = pixel_samples.movedim(1, 0).unsqueeze(0) + else: + pixel_samples = pixel_samples.unsqueeze(2) try: memory_used = self.memory_used_encode(pixel_samples.shape, self.vae_dtype) model_management.load_models_gpu([self.patcher], memory_required=memory_used, force_full_load=self.disable_offload) @@ -719,7 +725,10 @@ class VAE: dims = self.latent_dim pixel_samples = pixel_samples.movedim(-1, 1) if dims == 3: - pixel_samples = pixel_samples.movedim(1, 0).unsqueeze(0) + if not self.not_video: + pixel_samples = pixel_samples.movedim(1, 0).unsqueeze(0) + else: + pixel_samples = pixel_samples.unsqueeze(2) memory_used = self.memory_used_encode(pixel_samples.shape, self.vae_dtype) # TODO: calculate mem required for tile model_management.load_models_gpu([self.patcher], memory_required=memory_used, force_full_load=self.disable_offload) From 2559dee49202365bc97218b98121e796f57dfcb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Sat, 13 Sep 2025 04:52:58 +0300 Subject: [PATCH 0587/1073] Support wav2vec base models (#9637) * Support wav2vec base models * trim trailing whitespace * Do interpolation after --- comfy/audio_encoders/audio_encoders.py | 36 ++++++++++- comfy/audio_encoders/wav2vec2.py | 87 +++++++++++++++++++------- 2 files changed, 99 insertions(+), 24 deletions(-) diff --git a/comfy/audio_encoders/audio_encoders.py b/comfy/audio_encoders/audio_encoders.py index 538c21bd5..d1ec78f69 100644 --- a/comfy/audio_encoders/audio_encoders.py +++ b/comfy/audio_encoders/audio_encoders.py @@ -11,7 +11,13 @@ class AudioEncoderModel(): self.load_device = comfy.model_management.text_encoder_device() offload_device = comfy.model_management.text_encoder_offload_device() self.dtype = comfy.model_management.text_encoder_dtype(self.load_device) - self.model = Wav2Vec2Model(dtype=self.dtype, device=offload_device, operations=comfy.ops.manual_cast) + model_config = dict(config) + model_config.update({ + "dtype": self.dtype, + "device": offload_device, + "operations": comfy.ops.manual_cast + }) + self.model = Wav2Vec2Model(**model_config) self.model.eval() self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) self.model_sample_rate = 16000 @@ -25,7 +31,7 @@ class AudioEncoderModel(): def encode_audio(self, audio, sample_rate): comfy.model_management.load_model_gpu(self.patcher) audio = torchaudio.functional.resample(audio, sample_rate, self.model_sample_rate) - out, all_layers = self.model(audio.to(self.load_device)) + out, all_layers = self.model(audio.to(self.load_device), sr=self.model_sample_rate) outputs = {} outputs["encoded_audio"] = out outputs["encoded_audio_all_layers"] = all_layers @@ -33,8 +39,32 @@ class AudioEncoderModel(): def load_audio_encoder_from_sd(sd, prefix=""): - audio_encoder = AudioEncoderModel(None) sd = comfy.utils.state_dict_prefix_replace(sd, {"wav2vec2.": ""}) + embed_dim = sd["encoder.layer_norm.bias"].shape[0] + if embed_dim == 1024:# large + config = { + "embed_dim": 1024, + "num_heads": 16, + "num_layers": 24, + "conv_norm": True, + "conv_bias": True, + "do_normalize": True, + "do_stable_layer_norm": True + } + elif embed_dim == 768: # base + config = { + "embed_dim": 768, + "num_heads": 12, + "num_layers": 12, + "conv_norm": False, + "conv_bias": False, + "do_normalize": False, # chinese-wav2vec2-base has this False + "do_stable_layer_norm": False + } + else: + raise RuntimeError("ERROR: audio encoder file is invalid or unsupported embed_dim: {}".format(embed_dim)) + + audio_encoder = AudioEncoderModel(config) m, u = audio_encoder.load_sd(sd) if len(m) > 0: logging.warning("missing audio encoder: {}".format(m)) diff --git a/comfy/audio_encoders/wav2vec2.py b/comfy/audio_encoders/wav2vec2.py index de906622a..ef10dcd2a 100644 --- a/comfy/audio_encoders/wav2vec2.py +++ b/comfy/audio_encoders/wav2vec2.py @@ -13,19 +13,49 @@ class LayerNormConv(nn.Module): x = self.conv(x) return torch.nn.functional.gelu(self.layer_norm(x.transpose(-2, -1)).transpose(-2, -1)) +class LayerGroupNormConv(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride, bias=False, dtype=None, device=None, operations=None): + super().__init__() + self.conv = operations.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, bias=bias, device=device, dtype=dtype) + self.layer_norm = operations.GroupNorm(num_groups=out_channels, num_channels=out_channels, affine=True, device=device, dtype=dtype) + + def forward(self, x): + x = self.conv(x) + return torch.nn.functional.gelu(self.layer_norm(x)) + +class ConvNoNorm(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride, bias=False, dtype=None, device=None, operations=None): + super().__init__() + self.conv = operations.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, bias=bias, device=device, dtype=dtype) + + def forward(self, x): + x = self.conv(x) + return torch.nn.functional.gelu(x) + class ConvFeatureEncoder(nn.Module): - def __init__(self, conv_dim, dtype=None, device=None, operations=None): + def __init__(self, conv_dim, conv_bias=False, conv_norm=True, dtype=None, device=None, operations=None): super().__init__() - self.conv_layers = nn.ModuleList([ - LayerNormConv(1, conv_dim, kernel_size=10, stride=5, bias=True, device=device, dtype=dtype, operations=operations), - LayerNormConv(conv_dim, conv_dim, kernel_size=3, stride=2, bias=True, device=device, dtype=dtype, operations=operations), - LayerNormConv(conv_dim, conv_dim, kernel_size=3, stride=2, bias=True, device=device, dtype=dtype, operations=operations), - LayerNormConv(conv_dim, conv_dim, kernel_size=3, stride=2, bias=True, device=device, dtype=dtype, operations=operations), - LayerNormConv(conv_dim, conv_dim, kernel_size=3, stride=2, bias=True, device=device, dtype=dtype, operations=operations), - LayerNormConv(conv_dim, conv_dim, kernel_size=2, stride=2, bias=True, device=device, dtype=dtype, operations=operations), - LayerNormConv(conv_dim, conv_dim, kernel_size=2, stride=2, bias=True, device=device, dtype=dtype, operations=operations), - ]) + if conv_norm: + self.conv_layers = nn.ModuleList([ + LayerNormConv(1, conv_dim, kernel_size=10, stride=5, bias=True, device=device, dtype=dtype, operations=operations), + LayerNormConv(conv_dim, conv_dim, kernel_size=3, stride=2, bias=conv_bias, device=device, dtype=dtype, operations=operations), + LayerNormConv(conv_dim, conv_dim, kernel_size=3, stride=2, bias=conv_bias, device=device, dtype=dtype, operations=operations), + LayerNormConv(conv_dim, conv_dim, kernel_size=3, stride=2, bias=conv_bias, device=device, dtype=dtype, operations=operations), + LayerNormConv(conv_dim, conv_dim, kernel_size=3, stride=2, bias=conv_bias, device=device, dtype=dtype, operations=operations), + LayerNormConv(conv_dim, conv_dim, kernel_size=2, stride=2, bias=conv_bias, device=device, dtype=dtype, operations=operations), + LayerNormConv(conv_dim, conv_dim, kernel_size=2, stride=2, bias=conv_bias, device=device, dtype=dtype, operations=operations), + ]) + else: + self.conv_layers = nn.ModuleList([ + LayerGroupNormConv(1, conv_dim, kernel_size=10, stride=5, bias=conv_bias, device=device, dtype=dtype, operations=operations), + ConvNoNorm(conv_dim, conv_dim, kernel_size=3, stride=2, bias=conv_bias, device=device, dtype=dtype, operations=operations), + ConvNoNorm(conv_dim, conv_dim, kernel_size=3, stride=2, bias=conv_bias, device=device, dtype=dtype, operations=operations), + ConvNoNorm(conv_dim, conv_dim, kernel_size=3, stride=2, bias=conv_bias, device=device, dtype=dtype, operations=operations), + ConvNoNorm(conv_dim, conv_dim, kernel_size=3, stride=2, bias=conv_bias, device=device, dtype=dtype, operations=operations), + ConvNoNorm(conv_dim, conv_dim, kernel_size=2, stride=2, bias=conv_bias, device=device, dtype=dtype, operations=operations), + ConvNoNorm(conv_dim, conv_dim, kernel_size=2, stride=2, bias=conv_bias, device=device, dtype=dtype, operations=operations), + ]) def forward(self, x): x = x.unsqueeze(1) @@ -76,6 +106,7 @@ class TransformerEncoder(nn.Module): num_heads=12, num_layers=12, mlp_ratio=4.0, + do_stable_layer_norm=True, dtype=None, device=None, operations=None ): super().__init__() @@ -86,20 +117,25 @@ class TransformerEncoder(nn.Module): embed_dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, + do_stable_layer_norm=do_stable_layer_norm, device=device, dtype=dtype, operations=operations ) for _ in range(num_layers) ]) self.layer_norm = operations.LayerNorm(embed_dim, eps=1e-05, device=device, dtype=dtype) + self.do_stable_layer_norm = do_stable_layer_norm def forward(self, x, mask=None): x = x + self.pos_conv_embed(x) all_x = () + if not self.do_stable_layer_norm: + x = self.layer_norm(x) for layer in self.layers: all_x += (x,) x = layer(x, mask) - x = self.layer_norm(x) + if self.do_stable_layer_norm: + x = self.layer_norm(x) all_x += (x,) return x, all_x @@ -145,6 +181,7 @@ class TransformerEncoderLayer(nn.Module): embed_dim=768, num_heads=12, mlp_ratio=4.0, + do_stable_layer_norm=True, dtype=None, device=None, operations=None ): super().__init__() @@ -154,15 +191,19 @@ class TransformerEncoderLayer(nn.Module): self.layer_norm = operations.LayerNorm(embed_dim, device=device, dtype=dtype) self.feed_forward = FeedForward(embed_dim, mlp_ratio, device=device, dtype=dtype, operations=operations) self.final_layer_norm = operations.LayerNorm(embed_dim, device=device, dtype=dtype) + self.do_stable_layer_norm = do_stable_layer_norm def forward(self, x, mask=None): residual = x - x = self.layer_norm(x) + if self.do_stable_layer_norm: + x = self.layer_norm(x) x = self.attention(x, mask=mask) x = residual + x - - x = x + self.feed_forward(self.final_layer_norm(x)) - return x + if not self.do_stable_layer_norm: + x = self.layer_norm(x) + return self.final_layer_norm(x + self.feed_forward(x)) + else: + return x + self.feed_forward(self.final_layer_norm(x)) class Wav2Vec2Model(nn.Module): @@ -174,34 +215,38 @@ class Wav2Vec2Model(nn.Module): final_dim=256, num_heads=16, num_layers=24, + conv_norm=True, + conv_bias=True, + do_normalize=True, + do_stable_layer_norm=True, dtype=None, device=None, operations=None ): super().__init__() conv_dim = 512 - self.feature_extractor = ConvFeatureEncoder(conv_dim, device=device, dtype=dtype, operations=operations) + self.feature_extractor = ConvFeatureEncoder(conv_dim, conv_norm=conv_norm, conv_bias=conv_bias, device=device, dtype=dtype, operations=operations) self.feature_projection = FeatureProjection(conv_dim, embed_dim, device=device, dtype=dtype, operations=operations) self.masked_spec_embed = nn.Parameter(torch.empty(embed_dim, device=device, dtype=dtype)) + self.do_normalize = do_normalize self.encoder = TransformerEncoder( embed_dim=embed_dim, num_heads=num_heads, num_layers=num_layers, + do_stable_layer_norm=do_stable_layer_norm, device=device, dtype=dtype, operations=operations ) - def forward(self, x, mask_time_indices=None, return_dict=False): - + def forward(self, x, sr=16000, mask_time_indices=None, return_dict=False): x = torch.mean(x, dim=1) - x = (x - x.mean()) / torch.sqrt(x.var() + 1e-7) + if self.do_normalize: + x = (x - x.mean()) / torch.sqrt(x.var() + 1e-7) features = self.feature_extractor(x) features = self.feature_projection(features) - batch_size, seq_len, _ = features.shape x, all_x = self.encoder(features) - return x, all_x From 29bf807b0e2d89402d555d08bd8e9df15e636f0c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 12 Sep 2025 18:57:04 -0700 Subject: [PATCH 0588/1073] Cleanup. (#9838) --- comfy/audio_encoders/audio_encoders.py | 2 +- comfy/audio_encoders/wav2vec2.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/audio_encoders/audio_encoders.py b/comfy/audio_encoders/audio_encoders.py index d1ec78f69..6fb5b08e9 100644 --- a/comfy/audio_encoders/audio_encoders.py +++ b/comfy/audio_encoders/audio_encoders.py @@ -31,7 +31,7 @@ class AudioEncoderModel(): def encode_audio(self, audio, sample_rate): comfy.model_management.load_model_gpu(self.patcher) audio = torchaudio.functional.resample(audio, sample_rate, self.model_sample_rate) - out, all_layers = self.model(audio.to(self.load_device), sr=self.model_sample_rate) + out, all_layers = self.model(audio.to(self.load_device)) outputs = {} outputs["encoded_audio"] = out outputs["encoded_audio_all_layers"] = all_layers diff --git a/comfy/audio_encoders/wav2vec2.py b/comfy/audio_encoders/wav2vec2.py index ef10dcd2a..4e34a40a7 100644 --- a/comfy/audio_encoders/wav2vec2.py +++ b/comfy/audio_encoders/wav2vec2.py @@ -238,7 +238,7 @@ class Wav2Vec2Model(nn.Module): device=device, dtype=dtype, operations=operations ) - def forward(self, x, sr=16000, mask_time_indices=None, return_dict=False): + def forward(self, x, mask_time_indices=None, return_dict=False): x = torch.mean(x, dim=1) if self.do_normalize: From e5e70636e7b7b54695220a88ab036c1607959736 Mon Sep 17 00:00:00 2001 From: Kimbing Ng <50580578+KimbingNg@users.noreply.github.com> Date: Sun, 14 Sep 2025 04:59:19 +0800 Subject: [PATCH 0589/1073] Remove single quote pattern to avoid wrong matches (#9842) --- comfy/text_encoders/hunyuan_image.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/comfy/text_encoders/hunyuan_image.py b/comfy/text_encoders/hunyuan_image.py index be396cae7..699eddc33 100644 --- a/comfy/text_encoders/hunyuan_image.py +++ b/comfy/text_encoders/hunyuan_image.py @@ -22,17 +22,14 @@ class HunyuanImageTokenizer(QwenImageTokenizer): # ByT5 processing for HunyuanImage text_prompt_texts = [] - pattern_quote_single = r'\'(.*?)\'' pattern_quote_double = r'\"(.*?)\"' pattern_quote_chinese_single = r'‘(.*?)’' pattern_quote_chinese_double = r'“(.*?)”' - matches_quote_single = re.findall(pattern_quote_single, text) matches_quote_double = re.findall(pattern_quote_double, text) matches_quote_chinese_single = re.findall(pattern_quote_chinese_single, text) matches_quote_chinese_double = re.findall(pattern_quote_chinese_double, text) - text_prompt_texts.extend(matches_quote_single) text_prompt_texts.extend(matches_quote_double) text_prompt_texts.extend(matches_quote_chinese_single) text_prompt_texts.extend(matches_quote_chinese_double) From c1297f4eb38a63e2f99c9fa76e32e3a36c933b85 Mon Sep 17 00:00:00 2001 From: blepping <157360029+blepping@users.noreply.github.com> Date: Sat, 13 Sep 2025 15:58:43 -0600 Subject: [PATCH 0590/1073] Add support for Chroma Radiance (#9682) * Initial Chroma Radiance support * Minor Chroma Radiance cleanups * Update Radiance nodes to ensure latents/images are on the intermediate device * Fix Chroma Radiance memory estimation. * Increase Chroma Radiance memory usage factor * Increase Chroma Radiance memory usage factor once again * Ensure images are multiples of 16 for Chroma Radiance Add batch dimension and fix channels when necessary in ChromaRadianceImageToLatent node * Tile Chroma Radiance NeRF to reduce memory consumption, update memory usage factor * Update Radiance to support conv nerf final head type. * Allow setting NeRF embedder dtype for Radiance Bump Radiance nerf tile size to 32 Support EasyCache/LazyCache on Radiance (maybe) * Add ChromaRadianceStubVAE node * Crop Radiance image inputs to multiples of 16 instead of erroring to be in line with existing VAE behavior * Convert Chroma Radiance nodes to V3 schema. * Add ChromaRadianceOptions node and backend support. Cleanups/refactoring to reduce code duplication with Chroma. * Fix overriding the NeRF embedder dtype for Chroma Radiance * Minor Chroma Radiance cleanups * Move Chroma Radiance to its own directory in ldm Minor code cleanups and tooltip improvements * Fix Chroma Radiance embedder dtype overriding * Remove Radiance dynamic nerf_embedder dtype override feature * Unbork Radiance NeRF embedder init * Remove Chroma Radiance image conversion and stub VAE nodes Add a chroma_radiance option to the VAELoader builtin node which uses comfy.sd.PixelspaceConversionVAE Add a PixelspaceConversionVAE to comfy.sd for converting BHWC 0..1 <-> BCHW -1..1 --- comfy/latent_formats.py | 17 ++ comfy/ldm/chroma/model.py | 10 +- comfy/ldm/chroma_radiance/layers.py | 206 ++++++++++++++++ comfy/ldm/chroma_radiance/model.py | 328 ++++++++++++++++++++++++++ comfy/model_base.py | 9 +- comfy/model_detection.py | 14 +- comfy/sd.py | 60 +++++ comfy/supported_models.py | 15 +- comfy_extras/nodes_chroma_radiance.py | 114 +++++++++ nodes.py | 6 +- 10 files changed, 770 insertions(+), 9 deletions(-) create mode 100644 comfy/ldm/chroma_radiance/layers.py create mode 100644 comfy/ldm/chroma_radiance/model.py create mode 100644 comfy_extras/nodes_chroma_radiance.py diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index 894540879..77e642a94 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -629,3 +629,20 @@ class Hunyuan3Dv2mini(LatentFormat): class ACEAudio(LatentFormat): latent_channels = 8 latent_dimensions = 2 + +class ChromaRadiance(LatentFormat): + latent_channels = 3 + + def __init__(self): + self.latent_rgb_factors = [ + # R G B + [ 1.0, 0.0, 0.0 ], + [ 0.0, 1.0, 0.0 ], + [ 0.0, 0.0, 1.0 ] + ] + + def process_in(self, latent): + return latent + + def process_out(self, latent): + return latent diff --git a/comfy/ldm/chroma/model.py b/comfy/ldm/chroma/model.py index 4f709f87d..ad1c523fe 100644 --- a/comfy/ldm/chroma/model.py +++ b/comfy/ldm/chroma/model.py @@ -151,8 +151,6 @@ class Chroma(nn.Module): attn_mask: Tensor = None, ) -> Tensor: patches_replace = transformer_options.get("patches_replace", {}) - if img.ndim != 3 or txt.ndim != 3: - raise ValueError("Input img and txt tensors must have 3 dimensions.") # running on sequences img img = self.img_in(img) @@ -254,8 +252,9 @@ class Chroma(nn.Module): img[:, txt.shape[1] :, ...] += add img = img[:, txt.shape[1] :, ...] - final_mod = self.get_modulations(mod_vectors, "final") - img = self.final_layer(img, vec=final_mod) # (N, T, patch_size ** 2 * out_channels) + if hasattr(self, "final_layer"): + final_mod = self.get_modulations(mod_vectors, "final") + img = self.final_layer(img, vec=final_mod) # (N, T, patch_size ** 2 * out_channels) return img def forward(self, x, timestep, context, guidance, control=None, transformer_options={}, **kwargs): @@ -271,6 +270,9 @@ class Chroma(nn.Module): img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=self.patch_size, pw=self.patch_size) + if img.ndim != 3 or context.ndim != 3: + raise ValueError("Input img and txt tensors must have 3 dimensions.") + h_len = ((h + (self.patch_size // 2)) // self.patch_size) w_len = ((w + (self.patch_size // 2)) // self.patch_size) img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) diff --git a/comfy/ldm/chroma_radiance/layers.py b/comfy/ldm/chroma_radiance/layers.py new file mode 100644 index 000000000..3c7bc9b6b --- /dev/null +++ b/comfy/ldm/chroma_radiance/layers.py @@ -0,0 +1,206 @@ +# Adapted from https://github.com/lodestone-rock/flow +from functools import lru_cache + +import torch +from torch import nn + +from comfy.ldm.flux.layers import RMSNorm + + +class NerfEmbedder(nn.Module): + """ + An embedder module that combines input features with a 2D positional + encoding that mimics the Discrete Cosine Transform (DCT). + + This module takes an input tensor of shape (B, P^2, C), where P is the + patch size, and enriches it with positional information before projecting + it to a new hidden size. + """ + def __init__( + self, + in_channels: int, + hidden_size_input: int, + max_freqs: int, + dtype=None, + device=None, + operations=None, + ): + """ + Initializes the NerfEmbedder. + + Args: + in_channels (int): The number of channels in the input tensor. + hidden_size_input (int): The desired dimension of the output embedding. + max_freqs (int): The number of frequency components to use for both + the x and y dimensions of the positional encoding. + The total number of positional features will be max_freqs^2. + """ + super().__init__() + self.dtype = dtype + self.max_freqs = max_freqs + self.hidden_size_input = hidden_size_input + + # A linear layer to project the concatenated input features and + # positional encodings to the final output dimension. + self.embedder = nn.Sequential( + operations.Linear(in_channels + max_freqs**2, hidden_size_input, dtype=dtype, device=device) + ) + + @lru_cache(maxsize=4) + def fetch_pos(self, patch_size: int, device: torch.device, dtype: torch.dtype) -> torch.Tensor: + """ + Generates and caches 2D DCT-like positional embeddings for a given patch size. + + The LRU cache is a performance optimization that avoids recomputing the + same positional grid on every forward pass. + + Args: + patch_size (int): The side length of the square input patch. + device: The torch device to create the tensors on. + dtype: The torch dtype for the tensors. + + Returns: + A tensor of shape (1, patch_size^2, max_freqs^2) containing the + positional embeddings. + """ + # Create normalized 1D coordinate grids from 0 to 1. + pos_x = torch.linspace(0, 1, patch_size, device=device, dtype=dtype) + pos_y = torch.linspace(0, 1, patch_size, device=device, dtype=dtype) + + # Create a 2D meshgrid of coordinates. + pos_y, pos_x = torch.meshgrid(pos_y, pos_x, indexing="ij") + + # Reshape positions to be broadcastable with frequencies. + # Shape becomes (patch_size^2, 1, 1). + pos_x = pos_x.reshape(-1, 1, 1) + pos_y = pos_y.reshape(-1, 1, 1) + + # Create a 1D tensor of frequency values from 0 to max_freqs-1. + freqs = torch.linspace(0, self.max_freqs - 1, self.max_freqs, dtype=dtype, device=device) + + # Reshape frequencies to be broadcastable for creating 2D basis functions. + # freqs_x shape: (1, max_freqs, 1) + # freqs_y shape: (1, 1, max_freqs) + freqs_x = freqs[None, :, None] + freqs_y = freqs[None, None, :] + + # A custom weighting coefficient, not part of standard DCT. + # This seems to down-weight the contribution of higher-frequency interactions. + coeffs = (1 + freqs_x * freqs_y) ** -1 + + # Calculate the 1D cosine basis functions for x and y coordinates. + # This is the core of the DCT formulation. + dct_x = torch.cos(pos_x * freqs_x * torch.pi) + dct_y = torch.cos(pos_y * freqs_y * torch.pi) + + # Combine the 1D basis functions to create 2D basis functions by element-wise + # multiplication, and apply the custom coefficients. Broadcasting handles the + # combination of all (pos_x, freqs_x) with all (pos_y, freqs_y). + # The result is flattened into a feature vector for each position. + dct = (dct_x * dct_y * coeffs).view(1, -1, self.max_freqs ** 2) + + return dct + + def forward(self, inputs: torch.Tensor) -> torch.Tensor: + """ + Forward pass for the embedder. + + Args: + inputs (Tensor): The input tensor of shape (B, P^2, C). + + Returns: + Tensor: The output tensor of shape (B, P^2, hidden_size_input). + """ + # Get the batch size, number of pixels, and number of channels. + B, P2, C = inputs.shape + + # Infer the patch side length from the number of pixels (P^2). + patch_size = int(P2 ** 0.5) + + input_dtype = inputs.dtype + inputs = inputs.to(dtype=self.dtype) + + # Fetch the pre-computed or cached positional embeddings. + dct = self.fetch_pos(patch_size, inputs.device, self.dtype) + + # Repeat the positional embeddings for each item in the batch. + dct = dct.repeat(B, 1, 1) + + # Concatenate the original input features with the positional embeddings + # along the feature dimension. + inputs = torch.cat((inputs, dct), dim=-1) + + # Project the combined tensor to the target hidden size. + return self.embedder(inputs).to(dtype=input_dtype) + + +class NerfGLUBlock(nn.Module): + """ + A NerfBlock using a Gated Linear Unit (GLU) like MLP. + """ + def __init__(self, hidden_size_s: int, hidden_size_x: int, mlp_ratio, dtype=None, device=None, operations=None): + super().__init__() + # The total number of parameters for the MLP is increased to accommodate + # the gate, value, and output projection matrices. + # We now need to generate parameters for 3 matrices. + total_params = 3 * hidden_size_x**2 * mlp_ratio + self.param_generator = operations.Linear(hidden_size_s, total_params, dtype=dtype, device=device) + self.norm = RMSNorm(hidden_size_x, dtype=dtype, device=device, operations=operations) + self.mlp_ratio = mlp_ratio + + + def forward(self, x: torch.Tensor, s: torch.Tensor) -> torch.Tensor: + batch_size, num_x, hidden_size_x = x.shape + mlp_params = self.param_generator(s) + + # Split the generated parameters into three parts for the gate, value, and output projection. + fc1_gate_params, fc1_value_params, fc2_params = mlp_params.chunk(3, dim=-1) + + # Reshape the parameters into matrices for batch matrix multiplication. + fc1_gate = fc1_gate_params.view(batch_size, hidden_size_x, hidden_size_x * self.mlp_ratio) + fc1_value = fc1_value_params.view(batch_size, hidden_size_x, hidden_size_x * self.mlp_ratio) + fc2 = fc2_params.view(batch_size, hidden_size_x * self.mlp_ratio, hidden_size_x) + + # Normalize the generated weight matrices as in the original implementation. + fc1_gate = torch.nn.functional.normalize(fc1_gate, dim=-2) + fc1_value = torch.nn.functional.normalize(fc1_value, dim=-2) + fc2 = torch.nn.functional.normalize(fc2, dim=-2) + + res_x = x + x = self.norm(x) + + # Apply the final output projection. + x = torch.bmm(torch.nn.functional.silu(torch.bmm(x, fc1_gate)) * torch.bmm(x, fc1_value), fc2) + + return x + res_x + + +class NerfFinalLayer(nn.Module): + def __init__(self, hidden_size, out_channels, dtype=None, device=None, operations=None): + super().__init__() + self.norm = RMSNorm(hidden_size, dtype=dtype, device=device, operations=operations) + self.linear = operations.Linear(hidden_size, out_channels, dtype=dtype, device=device) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # RMSNorm normalizes over the last dimension, but our channel dim (C) is at dim=1. + # So we temporarily move the channel dimension to the end for the norm operation. + return self.linear(self.norm(x.movedim(1, -1))).movedim(-1, 1) + + +class NerfFinalLayerConv(nn.Module): + def __init__(self, hidden_size: int, out_channels: int, dtype=None, device=None, operations=None): + super().__init__() + self.norm = RMSNorm(hidden_size, dtype=dtype, device=device, operations=operations) + self.conv = operations.Conv2d( + in_channels=hidden_size, + out_channels=out_channels, + kernel_size=3, + padding=1, + dtype=dtype, + device=device, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # RMSNorm normalizes over the last dimension, but our channel dim (C) is at dim=1. + # So we temporarily move the channel dimension to the end for the norm operation. + return self.conv(self.norm(x.movedim(1, -1)).movedim(-1, 1)) diff --git a/comfy/ldm/chroma_radiance/model.py b/comfy/ldm/chroma_radiance/model.py new file mode 100644 index 000000000..f7eb7a22e --- /dev/null +++ b/comfy/ldm/chroma_radiance/model.py @@ -0,0 +1,328 @@ +# Credits: +# Original Flux code can be found on: https://github.com/black-forest-labs/flux +# Chroma Radiance adaption referenced from https://github.com/lodestone-rock/flow + +from dataclasses import dataclass +from typing import Optional + +import torch +from torch import Tensor, nn +from einops import repeat +import comfy.ldm.common_dit + +from comfy.ldm.flux.layers import EmbedND + +from comfy.ldm.chroma.model import Chroma, ChromaParams +from comfy.ldm.chroma.layers import ( + DoubleStreamBlock, + SingleStreamBlock, + Approximator, +) +from .layers import ( + NerfEmbedder, + NerfGLUBlock, + NerfFinalLayer, + NerfFinalLayerConv, +) + + +@dataclass +class ChromaRadianceParams(ChromaParams): + patch_size: int + nerf_hidden_size: int + nerf_mlp_ratio: int + nerf_depth: int + nerf_max_freqs: int + # Setting nerf_tile_size to 0 disables tiling. + nerf_tile_size: int + # Currently one of linear (legacy) or conv. + nerf_final_head_type: str + # None means use the same dtype as the model. + nerf_embedder_dtype: Optional[torch.dtype] + + +class ChromaRadiance(Chroma): + """ + Transformer model for flow matching on sequences. + """ + + def __init__(self, image_model=None, final_layer=True, dtype=None, device=None, operations=None, **kwargs): + if operations is None: + raise RuntimeError("Attempt to create ChromaRadiance object without setting operations") + nn.Module.__init__(self) + self.dtype = dtype + params = ChromaRadianceParams(**kwargs) + self.params = params + self.patch_size = params.patch_size + self.in_channels = params.in_channels + self.out_channels = params.out_channels + if params.hidden_size % params.num_heads != 0: + raise ValueError( + f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}" + ) + pe_dim = params.hidden_size // params.num_heads + if sum(params.axes_dim) != pe_dim: + raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}") + self.hidden_size = params.hidden_size + self.num_heads = params.num_heads + self.in_dim = params.in_dim + self.out_dim = params.out_dim + self.hidden_dim = params.hidden_dim + self.n_layers = params.n_layers + self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim) + self.img_in_patch = operations.Conv2d( + params.in_channels, + params.hidden_size, + kernel_size=params.patch_size, + stride=params.patch_size, + bias=True, + dtype=dtype, + device=device, + ) + self.txt_in = operations.Linear(params.context_in_dim, self.hidden_size, dtype=dtype, device=device) + # set as nn identity for now, will overwrite it later. + self.distilled_guidance_layer = Approximator( + in_dim=self.in_dim, + hidden_dim=self.hidden_dim, + out_dim=self.out_dim, + n_layers=self.n_layers, + dtype=dtype, device=device, operations=operations + ) + + + self.double_blocks = nn.ModuleList( + [ + DoubleStreamBlock( + self.hidden_size, + self.num_heads, + mlp_ratio=params.mlp_ratio, + qkv_bias=params.qkv_bias, + dtype=dtype, device=device, operations=operations + ) + for _ in range(params.depth) + ] + ) + + self.single_blocks = nn.ModuleList( + [ + SingleStreamBlock( + self.hidden_size, + self.num_heads, + mlp_ratio=params.mlp_ratio, + dtype=dtype, device=device, operations=operations, + ) + for _ in range(params.depth_single_blocks) + ] + ) + + # pixel channel concat with DCT + self.nerf_image_embedder = NerfEmbedder( + in_channels=params.in_channels, + hidden_size_input=params.nerf_hidden_size, + max_freqs=params.nerf_max_freqs, + dtype=params.nerf_embedder_dtype or dtype, + device=device, + operations=operations, + ) + + self.nerf_blocks = nn.ModuleList([ + NerfGLUBlock( + hidden_size_s=params.hidden_size, + hidden_size_x=params.nerf_hidden_size, + mlp_ratio=params.nerf_mlp_ratio, + dtype=dtype, + device=device, + operations=operations, + ) for _ in range(params.nerf_depth) + ]) + + if params.nerf_final_head_type == "linear": + self.nerf_final_layer = NerfFinalLayer( + params.nerf_hidden_size, + out_channels=params.in_channels, + dtype=dtype, + device=device, + operations=operations, + ) + elif params.nerf_final_head_type == "conv": + self.nerf_final_layer_conv = NerfFinalLayerConv( + params.nerf_hidden_size, + out_channels=params.in_channels, + dtype=dtype, + device=device, + operations=operations, + ) + else: + errstr = f"Unsupported nerf_final_head_type {params.nerf_final_head_type}" + raise ValueError(errstr) + + self.skip_mmdit = [] + self.skip_dit = [] + self.lite = False + + @property + def _nerf_final_layer(self) -> nn.Module: + if self.params.nerf_final_head_type == "linear": + return self.nerf_final_layer + if self.params.nerf_final_head_type == "conv": + return self.nerf_final_layer_conv + # Impossible to get here as we raise an error on unexpected types on initialization. + raise NotImplementedError + + def img_in(self, img: Tensor) -> Tensor: + img = self.img_in_patch(img) # -> [B, Hidden, H/P, W/P] + # flatten into a sequence for the transformer. + return img.flatten(2).transpose(1, 2) # -> [B, NumPatches, Hidden] + + def forward_nerf( + self, + img_orig: Tensor, + img_out: Tensor, + params: ChromaRadianceParams, + ) -> Tensor: + B, C, H, W = img_orig.shape + num_patches = img_out.shape[1] + patch_size = params.patch_size + + # Store the raw pixel values of each patch for the NeRF head later. + # unfold creates patches: [B, C * P * P, NumPatches] + nerf_pixels = nn.functional.unfold(img_orig, kernel_size=patch_size, stride=patch_size) + nerf_pixels = nerf_pixels.transpose(1, 2) # -> [B, NumPatches, C * P * P] + + if params.nerf_tile_size > 0 and num_patches > params.nerf_tile_size: + # Enable tiling if nerf_tile_size isn't 0 and we actually have more patches than + # the tile size. + img_dct = self.forward_tiled_nerf(img_out, nerf_pixels, B, C, num_patches, patch_size, params) + else: + # Reshape for per-patch processing + nerf_hidden = img_out.reshape(B * num_patches, params.hidden_size) + nerf_pixels = nerf_pixels.reshape(B * num_patches, C, patch_size**2).transpose(1, 2) + + # Get DCT-encoded pixel embeddings [pixel-dct] + img_dct = self.nerf_image_embedder(nerf_pixels) + + # Pass through the dynamic MLP blocks (the NeRF) + for block in self.nerf_blocks: + img_dct = block(img_dct, nerf_hidden) + + # Reassemble the patches into the final image. + img_dct = img_dct.transpose(1, 2) # -> [B*NumPatches, C, P*P] + # Reshape to combine with batch dimension for fold + img_dct = img_dct.reshape(B, num_patches, -1) # -> [B, NumPatches, C*P*P] + img_dct = img_dct.transpose(1, 2) # -> [B, C*P*P, NumPatches] + img_dct = nn.functional.fold( + img_dct, + output_size=(H, W), + kernel_size=patch_size, + stride=patch_size, + ) + return self._nerf_final_layer(img_dct) + + def forward_tiled_nerf( + self, + nerf_hidden: Tensor, + nerf_pixels: Tensor, + batch: int, + channels: int, + num_patches: int, + patch_size: int, + params: ChromaRadianceParams, + ) -> Tensor: + """ + Processes the NeRF head in tiles to save memory. + nerf_hidden has shape [B, L, D] + nerf_pixels has shape [B, L, C * P * P] + """ + tile_size = params.nerf_tile_size + output_tiles = [] + # Iterate over the patches in tiles. The dimension L (num_patches) is at index 1. + for i in range(0, num_patches, tile_size): + end = min(i + tile_size, num_patches) + + # Slice the current tile from the input tensors + nerf_hidden_tile = nerf_hidden[:, i:end, :] + nerf_pixels_tile = nerf_pixels[:, i:end, :] + + # Get the actual number of patches in this tile (can be smaller for the last tile) + num_patches_tile = nerf_hidden_tile.shape[1] + + # Reshape the tile for per-patch processing + # [B, NumPatches_tile, D] -> [B * NumPatches_tile, D] + nerf_hidden_tile = nerf_hidden_tile.reshape(batch * num_patches_tile, params.hidden_size) + # [B, NumPatches_tile, C*P*P] -> [B*NumPatches_tile, C, P*P] -> [B*NumPatches_tile, P*P, C] + nerf_pixels_tile = nerf_pixels_tile.reshape(batch * num_patches_tile, channels, patch_size**2).transpose(1, 2) + + # get DCT-encoded pixel embeddings [pixel-dct] + img_dct_tile = self.nerf_image_embedder(nerf_pixels_tile) + + # pass through the dynamic MLP blocks (the NeRF) + for block in self.nerf_blocks: + img_dct_tile = block(img_dct_tile, nerf_hidden_tile) + + output_tiles.append(img_dct_tile) + + # Concatenate the processed tiles along the patch dimension + return torch.cat(output_tiles, dim=0) + + def radiance_get_override_params(self, overrides: dict) -> ChromaRadianceParams: + params = self.params + if not overrides: + return params + params_dict = {k: getattr(params, k) for k in params.__dataclass_fields__} + nullable_keys = frozenset(("nerf_embedder_dtype",)) + bad_keys = tuple(k for k in overrides if k not in params_dict) + if bad_keys: + e = f"Unknown key(s) in transformer_options chroma_radiance_options: {', '.join(bad_keys)}" + raise ValueError(e) + bad_keys = tuple( + k + for k, v in overrides.items() + if type(v) != type(getattr(params, k)) and (v is not None or k not in nullable_keys) + ) + if bad_keys: + e = f"Invalid value(s) in transformer_options chroma_radiance_options: {', '.join(bad_keys)}" + raise ValueError(e) + # At this point it's all valid keys and values so we can merge with the existing params. + params_dict |= overrides + return params.__class__(**params_dict) + + def _forward( + self, + x: Tensor, + timestep: Tensor, + context: Tensor, + guidance: Optional[Tensor], + control: Optional[dict]=None, + transformer_options: dict={}, + **kwargs: dict, + ) -> Tensor: + bs, c, h, w = x.shape + img = comfy.ldm.common_dit.pad_to_patch_size(x, (self.patch_size, self.patch_size)) + + if img.ndim != 4: + raise ValueError("Input img tensor must be in [B, C, H, W] format.") + if context.ndim != 3: + raise ValueError("Input txt tensors must have 3 dimensions.") + + params = self.radiance_get_override_params(transformer_options.get("chroma_radiance_options", {})) + + h_len = ((h + (self.patch_size // 2)) // self.patch_size) + w_len = ((w + (self.patch_size // 2)) // self.patch_size) + img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) + img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) + img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) + img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs) + txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype) + + img_out = self.forward_orig( + img, + img_ids, + context, + txt_ids, + timestep, + guidance, + control, + transformer_options, + attn_mask=kwargs.get("attention_mask", None), + ) + return self.forward_nerf(img, img_out, params) diff --git a/comfy/model_base.py b/comfy/model_base.py index 324d89cff..252dfcf69 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -42,6 +42,7 @@ import comfy.ldm.wan.model import comfy.ldm.hunyuan3d.model import comfy.ldm.hidream.model import comfy.ldm.chroma.model +import comfy.ldm.chroma_radiance.model import comfy.ldm.ace.model import comfy.ldm.omnigen.omnigen2 import comfy.ldm.qwen_image.model @@ -1320,8 +1321,8 @@ class HiDream(BaseModel): return out class Chroma(Flux): - def __init__(self, model_config, model_type=ModelType.FLUX, device=None): - super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.chroma.model.Chroma) + def __init__(self, model_config, model_type=ModelType.FLUX, device=None, unet_model=comfy.ldm.chroma.model.Chroma): + super().__init__(model_config, model_type, device=device, unet_model=unet_model) def extra_conds(self, **kwargs): out = super().extra_conds(**kwargs) @@ -1331,6 +1332,10 @@ class Chroma(Flux): out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance])) return out +class ChromaRadiance(Chroma): + def __init__(self, model_config, model_type=ModelType.FLUX, device=None): + super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.chroma_radiance.model.ChromaRadiance) + class ACEStep(BaseModel): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.ace.model.ACEStepTransformer2DModel) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index fe983cede..03d44f65e 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -174,7 +174,7 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["guidance_embed"] = len(guidance_keys) > 0 return dit_config - if '{}double_blocks.0.img_attn.norm.key_norm.scale'.format(key_prefix) in state_dict_keys and '{}img_in.weight'.format(key_prefix) in state_dict_keys: #Flux + if '{}double_blocks.0.img_attn.norm.key_norm.scale'.format(key_prefix) in state_dict_keys and ('{}img_in.weight'.format(key_prefix) in state_dict_keys or f"{key_prefix}distilled_guidance_layer.norms.0.scale" in state_dict_keys): #Flux, Chroma or Chroma Radiance (has no img_in.weight) dit_config = {} dit_config["image_model"] = "flux" dit_config["in_channels"] = 16 @@ -204,6 +204,18 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["out_dim"] = 3072 dit_config["hidden_dim"] = 5120 dit_config["n_layers"] = 5 + if f"{key_prefix}nerf_blocks.0.norm.scale" in state_dict_keys: #Chroma Radiance + dit_config["image_model"] = "chroma_radiance" + dit_config["in_channels"] = 3 + dit_config["out_channels"] = 3 + dit_config["patch_size"] = 16 + dit_config["nerf_hidden_size"] = 64 + dit_config["nerf_mlp_ratio"] = 4 + dit_config["nerf_depth"] = 4 + dit_config["nerf_max_freqs"] = 8 + dit_config["nerf_tile_size"] = 32 + dit_config["nerf_final_head_type"] = "conv" if f"{key_prefix}nerf_final_layer_conv.norm.scale" in state_dict_keys else "linear" + dit_config["nerf_embedder_dtype"] = torch.float32 else: dit_config["guidance_embed"] = "{}guidance_in.in_layer.weight".format(key_prefix) in state_dict_keys return dit_config diff --git a/comfy/sd.py b/comfy/sd.py index f8f1a89e8..cb92802e9 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -785,6 +785,66 @@ class VAE: except: return None +# "Fake" VAE that converts from IMAGE B, H, W, C and values on the scale of 0..1 +# to LATENT B, C, H, W and values on the scale of -1..1. +class PixelspaceConversionVAE: + def __init__(self, size_increment: int=16): + self.intermediate_device = comfy.model_management.intermediate_device() + self.size_increment = size_increment + + def vae_encode_crop_pixels(self, pixels: torch.Tensor) -> torch.Tensor: + if self.size_increment == 1: + return pixels + dims = pixels.shape[1:-1] + for d in range(len(dims)): + d_adj = (dims[d] // self.size_increment) * self.size_increment + if d_adj == d: + continue + d_offset = (dims[d] % self.size_increment) // 2 + pixels = pixels.narrow(d + 1, d_offset, d_adj) + return pixels + + def encode(self, pixels: torch.Tensor, *_args, **_kwargs) -> torch.Tensor: + if pixels.ndim == 3: + pixels = pixels.unsqueeze(0) + elif pixels.ndim != 4: + raise ValueError("Unexpected input image shape") + # Ensure the image has spatial dimensions that are multiples of 16. + pixels = self.vae_encode_crop_pixels(pixels) + h, w, c = pixels.shape[1:] + if h < self.size_increment or w < self.size_increment: + raise ValueError(f"Image inputs must have height/width of at least {self.size_increment} pixel(s).") + pixels= pixels[..., :3] + if c == 1: + pixels = pixels.expand(-1, -1, -1, 3) + elif c != 3: + raise ValueError("Unexpected number of channels in input image") + # Rescale to -1..1 and move the channel dimension to position 1. + latent = pixels.to(device=self.intermediate_device, dtype=torch.float32, copy=True) + latent = latent.clamp_(0, 1).movedim(-1, 1).contiguous() + latent -= 0.5 + latent *= 2 + return latent.clamp_(-1, 1) + + def decode(self, samples: torch.Tensor, *_args, **_kwargs) -> torch.Tensor: + # Rescale to 0..1 and move the channel dimension to the end. + img = samples.to(device=self.intermediate_device, dtype=torch.float32, copy=True) + img = img.clamp_(-1, 1).movedim(1, -1).contiguous() + img += 1.0 + img *= 0.5 + return img.clamp_(0, 1) + + encode_tiled = encode + decode_tiled = decode + + @classmethod + def spacial_compression_decode(cls) -> int: + # This just exists so the tiled VAE nodes don't crash. + return 1 + + spacial_compression_encode = spacial_compression_decode + temporal_compression_decode = spacial_compression_decode + class StyleModel: def __init__(self, model, device="cpu"): self.model = model diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 472ea0ae9..be36b5dfe 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1205,6 +1205,19 @@ class Chroma(supported_models_base.BASE): t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref)) return supported_models_base.ClipTarget(comfy.text_encoders.pixart_t5.PixArtTokenizer, comfy.text_encoders.pixart_t5.pixart_te(**t5_detect)) +class ChromaRadiance(Chroma): + unet_config = { + "image_model": "chroma_radiance", + } + + latent_format = comfy.latent_formats.ChromaRadiance + + # Pixel-space model, no spatial compression for model input. + memory_usage_factor = 0.0325 + + def get_model(self, state_dict, prefix="", device=None): + return model_base.ChromaRadiance(self, device=device) + class ACEStep(supported_models_base.BASE): unet_config = { "audio_model": "ace", @@ -1338,6 +1351,6 @@ class HunyuanImage21Refiner(HunyuanVideo): out = model_base.HunyuanImage21Refiner(self, device=device) return out -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ACEStep, Omnigen2, QwenImage] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage] models += [SVD_img2vid] diff --git a/comfy_extras/nodes_chroma_radiance.py b/comfy_extras/nodes_chroma_radiance.py new file mode 100644 index 000000000..381989818 --- /dev/null +++ b/comfy_extras/nodes_chroma_radiance.py @@ -0,0 +1,114 @@ +from typing_extensions import override +from typing import Callable + +import torch + +import comfy.model_management +from comfy_api.latest import ComfyExtension, io + +import nodes + +class EmptyChromaRadianceLatentImage(io.ComfyNode): + @classmethod + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="EmptyChromaRadianceLatentImage", + category="latent/chroma_radiance", + inputs=[ + io.Int.Input(id="width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input(id="height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input(id="batch_size", default=1, min=1, max=4096), + ], + outputs=[io.Latent().Output()], + ) + + @classmethod + def execute(cls, *, width: int, height: int, batch_size: int=1) -> io.NodeOutput: + latent = torch.zeros((batch_size, 3, height, width), device=comfy.model_management.intermediate_device()) + return io.NodeOutput({"samples":latent}) + + +class ChromaRadianceOptions(io.ComfyNode): + @classmethod + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="ChromaRadianceOptions", + category="model_patches/chroma_radiance", + description="Allows setting advanced options for the Chroma Radiance model.", + inputs=[ + io.Model.Input(id="model"), + io.Boolean.Input( + id="preserve_wrapper", + default=True, + tooltip="When enabled, will delegate to an existing model function wrapper if it exists. Generally should be left enabled.", + ), + io.Float.Input( + id="start_sigma", + default=1.0, + min=0.0, + max=1.0, + tooltip="First sigma that these options will be in effect.", + ), + io.Float.Input( + id="end_sigma", + default=0.0, + min=0.0, + max=1.0, + tooltip="Last sigma that these options will be in effect.", + ), + io.Int.Input( + id="nerf_tile_size", + default=-1, + min=-1, + tooltip="Allows overriding the default NeRF tile size. -1 means use the default (32). 0 means use non-tiling mode (may require a lot of VRAM).", + ), + ], + outputs=[io.Model.Output()], + ) + + @classmethod + def execute( + cls, + *, + model: io.Model.Type, + preserve_wrapper: bool, + start_sigma: float, + end_sigma: float, + nerf_tile_size: int, + ) -> io.NodeOutput: + radiance_options = {} + if nerf_tile_size >= 0: + radiance_options["nerf_tile_size"] = nerf_tile_size + + if not radiance_options: + return io.NodeOutput(model) + + old_wrapper = model.model_options.get("model_function_wrapper") + + def model_function_wrapper(apply_model: Callable, args: dict) -> torch.Tensor: + c = args["c"].copy() + sigma = args["timestep"].max().detach().cpu().item() + if end_sigma <= sigma <= start_sigma: + transformer_options = c.get("transformer_options", {}).copy() + transformer_options["chroma_radiance_options"] = radiance_options.copy() + c["transformer_options"] = transformer_options + if not (preserve_wrapper and old_wrapper): + return apply_model(args["input"], args["timestep"], **c) + return old_wrapper(apply_model, args | {"c": c}) + + model = model.clone() + model.set_model_unet_function_wrapper(model_function_wrapper) + return io.NodeOutput(model) + + +class ChromaRadianceExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + EmptyChromaRadianceLatentImage, + ChromaRadianceOptions, + ] + + +async def comfy_entrypoint() -> ChromaRadianceExtension: + return ChromaRadianceExtension() diff --git a/nodes.py b/nodes.py index 2befb4b75..76b8cbac8 100644 --- a/nodes.py +++ b/nodes.py @@ -730,6 +730,7 @@ class VAELoader: vaes.append("taesd3") if f1_taesd_dec and f1_taesd_enc: vaes.append("taef1") + vaes.append("chroma_radiance") return vaes @staticmethod @@ -772,7 +773,9 @@ class VAELoader: #TODO: scale factor? def load_vae(self, vae_name): - if vae_name in ["taesd", "taesdxl", "taesd3", "taef1"]: + if vae_name == "chroma_radiance": + return (comfy.sd.PixelspaceConversionVAE(),) + elif vae_name in ["taesd", "taesdxl", "taesd3", "taef1"]: sd = self.load_taesd(vae_name) else: vae_path = folder_paths.get_full_path_or_raise("vae", vae_name) @@ -2322,6 +2325,7 @@ async def init_builtin_extra_nodes(): "nodes_tcfg.py", "nodes_context_windows.py", "nodes_qwen.py", + "nodes_chroma_radiance.py", "nodes_model_patch.py", "nodes_easycache.py", "nodes_audio_encoder.py", From 80b7c9455bf7afba7a9e95a1eb76b172408ab56c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 13 Sep 2025 15:03:34 -0700 Subject: [PATCH 0591/1073] Changes to the previous radiance commit. (#9851) --- comfy/ldm/chroma_radiance/model.py | 7 +-- comfy/pixel_space_convert.py | 16 +++++++ comfy/sd.py | 69 +++++------------------------- comfy/supported_models.py | 2 +- nodes.py | 7 +-- 5 files changed, 35 insertions(+), 66 deletions(-) create mode 100644 comfy/pixel_space_convert.py diff --git a/comfy/ldm/chroma_radiance/model.py b/comfy/ldm/chroma_radiance/model.py index f7eb7a22e..47aa11b04 100644 --- a/comfy/ldm/chroma_radiance/model.py +++ b/comfy/ldm/chroma_radiance/model.py @@ -306,8 +306,9 @@ class ChromaRadiance(Chroma): params = self.radiance_get_override_params(transformer_options.get("chroma_radiance_options", {})) - h_len = ((h + (self.patch_size // 2)) // self.patch_size) - w_len = ((w + (self.patch_size // 2)) // self.patch_size) + h_len = (img.shape[-2] // self.patch_size) + w_len = (img.shape[-1] // self.patch_size) + img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) @@ -325,4 +326,4 @@ class ChromaRadiance(Chroma): transformer_options, attn_mask=kwargs.get("attention_mask", None), ) - return self.forward_nerf(img, img_out, params) + return self.forward_nerf(img, img_out, params)[:, :, :h, :w] diff --git a/comfy/pixel_space_convert.py b/comfy/pixel_space_convert.py new file mode 100644 index 000000000..049bbcfb4 --- /dev/null +++ b/comfy/pixel_space_convert.py @@ -0,0 +1,16 @@ +import torch + + +# "Fake" VAE that converts from IMAGE B, H, W, C and values on the scale of 0..1 +# to LATENT B, C, H, W and values on the scale of -1..1. +class PixelspaceConversionVAE(torch.nn.Module): + def __init__(self): + super().__init__() + self.pixel_space_vae = torch.nn.Parameter(torch.tensor(1.0)) + + def encode(self, pixels: torch.Tensor, *_args, **_kwargs) -> torch.Tensor: + return pixels + + def decode(self, samples: torch.Tensor, *_args, **_kwargs) -> torch.Tensor: + return samples + diff --git a/comfy/sd.py b/comfy/sd.py index cb92802e9..2df340739 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -18,6 +18,7 @@ import comfy.ldm.wan.vae2_2 import comfy.ldm.hunyuan3d.vae import comfy.ldm.ace.vae.music_dcae_pipeline import comfy.ldm.hunyuan_video.vae +import comfy.pixel_space_convert import yaml import math import os @@ -516,6 +517,15 @@ class VAE: self.working_dtypes = [torch.bfloat16, torch.float16, torch.float32] self.disable_offload = True self.extra_1d_channel = 16 + elif "pixel_space_vae" in sd: + self.first_stage_model = comfy.pixel_space_convert.PixelspaceConversionVAE() + self.memory_used_encode = lambda shape, dtype: (1 * shape[2] * shape[3]) * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: (1 * shape[2] * shape[3]) * model_management.dtype_size(dtype) + self.downscale_ratio = 1 + self.upscale_ratio = 1 + self.latent_channels = 3 + self.latent_dim = 2 + self.output_channels = 3 else: logging.warning("WARNING: No VAE weights detected, VAE not initalized.") self.first_stage_model = None @@ -785,65 +795,6 @@ class VAE: except: return None -# "Fake" VAE that converts from IMAGE B, H, W, C and values on the scale of 0..1 -# to LATENT B, C, H, W and values on the scale of -1..1. -class PixelspaceConversionVAE: - def __init__(self, size_increment: int=16): - self.intermediate_device = comfy.model_management.intermediate_device() - self.size_increment = size_increment - - def vae_encode_crop_pixels(self, pixels: torch.Tensor) -> torch.Tensor: - if self.size_increment == 1: - return pixels - dims = pixels.shape[1:-1] - for d in range(len(dims)): - d_adj = (dims[d] // self.size_increment) * self.size_increment - if d_adj == d: - continue - d_offset = (dims[d] % self.size_increment) // 2 - pixels = pixels.narrow(d + 1, d_offset, d_adj) - return pixels - - def encode(self, pixels: torch.Tensor, *_args, **_kwargs) -> torch.Tensor: - if pixels.ndim == 3: - pixels = pixels.unsqueeze(0) - elif pixels.ndim != 4: - raise ValueError("Unexpected input image shape") - # Ensure the image has spatial dimensions that are multiples of 16. - pixels = self.vae_encode_crop_pixels(pixels) - h, w, c = pixels.shape[1:] - if h < self.size_increment or w < self.size_increment: - raise ValueError(f"Image inputs must have height/width of at least {self.size_increment} pixel(s).") - pixels= pixels[..., :3] - if c == 1: - pixels = pixels.expand(-1, -1, -1, 3) - elif c != 3: - raise ValueError("Unexpected number of channels in input image") - # Rescale to -1..1 and move the channel dimension to position 1. - latent = pixels.to(device=self.intermediate_device, dtype=torch.float32, copy=True) - latent = latent.clamp_(0, 1).movedim(-1, 1).contiguous() - latent -= 0.5 - latent *= 2 - return latent.clamp_(-1, 1) - - def decode(self, samples: torch.Tensor, *_args, **_kwargs) -> torch.Tensor: - # Rescale to 0..1 and move the channel dimension to the end. - img = samples.to(device=self.intermediate_device, dtype=torch.float32, copy=True) - img = img.clamp_(-1, 1).movedim(1, -1).contiguous() - img += 1.0 - img *= 0.5 - return img.clamp_(0, 1) - - encode_tiled = encode - decode_tiled = decode - - @classmethod - def spacial_compression_decode(cls) -> int: - # This just exists so the tiled VAE nodes don't crash. - return 1 - - spacial_compression_encode = spacial_compression_decode - temporal_compression_decode = spacial_compression_decode class StyleModel: def __init__(self, model, device="cpu"): diff --git a/comfy/supported_models.py b/comfy/supported_models.py index be36b5dfe..557902d11 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1213,7 +1213,7 @@ class ChromaRadiance(Chroma): latent_format = comfy.latent_formats.ChromaRadiance # Pixel-space model, no spatial compression for model input. - memory_usage_factor = 0.0325 + memory_usage_factor = 0.038 def get_model(self, state_dict, prefix="", device=None): return model_base.ChromaRadiance(self, device=device) diff --git a/nodes.py b/nodes.py index 76b8cbac8..5a5fdcb8e 100644 --- a/nodes.py +++ b/nodes.py @@ -730,7 +730,7 @@ class VAELoader: vaes.append("taesd3") if f1_taesd_dec and f1_taesd_enc: vaes.append("taef1") - vaes.append("chroma_radiance") + vaes.append("pixel_space") return vaes @staticmethod @@ -773,8 +773,9 @@ class VAELoader: #TODO: scale factor? def load_vae(self, vae_name): - if vae_name == "chroma_radiance": - return (comfy.sd.PixelspaceConversionVAE(),) + if vae_name == "pixel_space": + sd = {} + sd["pixel_space_vae"] = torch.tensor(1.0) elif vae_name in ["taesd", "taesdxl", "taesd3", "taef1"]: sd = self.load_taesd(vae_name) else: From f228367c5e3906de194968fa9b6fbe7aa9987bfa Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Sat, 13 Sep 2025 18:34:21 -0700 Subject: [PATCH 0592/1073] Make ModuleNotFoundError ImportError instead (#9850) --- comfy/ldm/modules/attention.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index bf2553c37..9dd1a43c1 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -22,7 +22,7 @@ SAGE_ATTENTION_IS_AVAILABLE = False try: from sageattention import sageattn SAGE_ATTENTION_IS_AVAILABLE = True -except ModuleNotFoundError as e: +except ImportError as e: if model_management.sage_attention_enabled(): if e.name == "sageattention": logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention") @@ -34,7 +34,7 @@ FLASH_ATTENTION_IS_AVAILABLE = False try: from flash_attn import flash_attn_func FLASH_ATTENTION_IS_AVAILABLE = True -except ModuleNotFoundError: +except ImportError: if model_management.flash_attention_enabled(): logging.error(f"\n\nTo use the `--use-flash-attention` feature, the `flash-attn` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install flash-attn") exit(-1) From 4f1f26ac6c11b803bbc83cb347178e2f9b5e421b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 14 Sep 2025 01:05:38 -0700 Subject: [PATCH 0593/1073] Add that hunyuan image is supported to readme. (#9857) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 8024870c2..3f6cfc2ed 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Lumina Image 2.0](https://comfyanonymous.github.io/ComfyUI_examples/lumina2/) - [HiDream](https://comfyanonymous.github.io/ComfyUI_examples/hidream/) - [Qwen Image](https://comfyanonymous.github.io/ComfyUI_examples/qwen_image/) + - [Hunyuan Image 2.1](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_image/) - Image Editing Models - [Omnigen 2](https://comfyanonymous.github.io/ComfyUI_examples/omnigen/) - [Flux Kontext](https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-kontext-image-editing-model) From 47a9cde5d3045c42f20baafb9855fb96959124f0 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 15 Sep 2025 15:10:55 -0700 Subject: [PATCH 0594/1073] Support the omnigen2 umo lora. (#9886) --- comfy/lora.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/comfy/lora.py b/comfy/lora.py index 4a44f1318..36d26293a 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -297,6 +297,12 @@ def model_lora_keys_unet(model, key_map={}): key_lora = k[len("diffusion_model."):-len(".weight")] key_map["{}".format(key_lora)] = k + if isinstance(model, comfy.model_base.Omnigen2): + for k in sdk: + if k.startswith("diffusion_model.") and k.endswith(".weight"): + key_lora = k[len("diffusion_model."):-len(".weight")] + key_map["{}".format(key_lora)] = k + if isinstance(model, comfy.model_base.QwenImage): for k in sdk: if k.startswith("diffusion_model.") and k.endswith(".weight"): #QwenImage lora format From 1a85483da159f2800407ae5a8a45eb0d88ffce2d Mon Sep 17 00:00:00 2001 From: blepping <157360029+blepping@users.noreply.github.com> Date: Mon, 15 Sep 2025 18:05:03 -0600 Subject: [PATCH 0595/1073] Fix depending on asserts to raise an exception in BatchedBrownianTree and Flash attn module (#9884) Correctly handle the case where w0 is passed by kwargs in BatchedBrownianTree --- comfy/k_diffusion/sampling.py | 35 +++++++++++++++++----------------- comfy/ldm/modules/attention.py | 3 ++- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 2d7e09838..0e2cda291 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -86,24 +86,24 @@ class BatchedBrownianTree: """A wrapper around torchsde.BrownianTree that enables batches of entropy.""" def __init__(self, x, t0, t1, seed=None, **kwargs): - self.cpu_tree = True - if "cpu" in kwargs: - self.cpu_tree = kwargs.pop("cpu") + self.cpu_tree = kwargs.pop("cpu", True) t0, t1, self.sign = self.sort(t0, t1) - w0 = kwargs.get('w0', torch.zeros_like(x)) + w0 = kwargs.pop('w0', None) + if w0 is None: + w0 = torch.zeros_like(x) + self.batched = False if seed is None: - seed = torch.randint(0, 2 ** 63 - 1, []).item() - self.batched = True - try: - assert len(seed) == x.shape[0] + seed = (torch.randint(0, 2 ** 63 - 1, ()).item(),) + elif isinstance(seed, (tuple, list)): + if len(seed) != x.shape[0]: + raise ValueError("Passing a list or tuple of seeds to BatchedBrownianTree requires a length matching the batch size.") + self.batched = True w0 = w0[0] - except TypeError: - seed = [seed] - self.batched = False - if self.cpu_tree: - self.trees = [torchsde.BrownianTree(t0.cpu(), w0.cpu(), t1.cpu(), entropy=s, **kwargs) for s in seed] else: - self.trees = [torchsde.BrownianTree(t0, w0, t1, entropy=s, **kwargs) for s in seed] + seed = (seed,) + if self.cpu_tree: + t0, w0, t1 = t0.detach().cpu(), w0.detach().cpu(), t1.detach().cpu() + self.trees = tuple(torchsde.BrownianTree(t0, w0, t1, entropy=s, **kwargs) for s in seed) @staticmethod def sort(a, b): @@ -111,11 +111,10 @@ class BatchedBrownianTree: def __call__(self, t0, t1): t0, t1, sign = self.sort(t0, t1) + device, dtype = t0.device, t0.dtype if self.cpu_tree: - w = torch.stack([tree(t0.cpu().float(), t1.cpu().float()).to(t0.dtype).to(t0.device) for tree in self.trees]) * (self.sign * sign) - else: - w = torch.stack([tree(t0, t1) for tree in self.trees]) * (self.sign * sign) - + t0, t1 = t0.detach().cpu().float(), t1.detach().cpu().float() + w = torch.stack([tree(t0, t1) for tree in self.trees]).to(device=device, dtype=dtype) * (self.sign * sign) return w if self.batched else w[0] diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 9dd1a43c1..7437e0567 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -600,7 +600,8 @@ def attention_flash(q, k, v, heads, mask=None, attn_precision=None, skip_reshape mask = mask.unsqueeze(1) try: - assert mask is None + if mask is not None: + raise RuntimeError("Mask must not be set for Flash attention") out = flash_attn_wrapper( q.transpose(1, 2), k.transpose(1, 2), From a39ac59c3e3fddc8b278899814f0bd5371abb11f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 15 Sep 2025 22:19:50 -0700 Subject: [PATCH 0596/1073] Add encoder part of whisper large v3 as an audio encoder model. (#9894) Not useful yet but some models use it. --- comfy/audio_encoders/audio_encoders.py | 58 +++++--- comfy/audio_encoders/whisper.py | 186 +++++++++++++++++++++++++ 2 files changed, 224 insertions(+), 20 deletions(-) create mode 100755 comfy/audio_encoders/whisper.py diff --git a/comfy/audio_encoders/audio_encoders.py b/comfy/audio_encoders/audio_encoders.py index 6fb5b08e9..0550b2f9b 100644 --- a/comfy/audio_encoders/audio_encoders.py +++ b/comfy/audio_encoders/audio_encoders.py @@ -1,4 +1,5 @@ from .wav2vec2 import Wav2Vec2Model +from .whisper import WhisperLargeV3 import comfy.model_management import comfy.ops import comfy.utils @@ -11,13 +12,18 @@ class AudioEncoderModel(): self.load_device = comfy.model_management.text_encoder_device() offload_device = comfy.model_management.text_encoder_offload_device() self.dtype = comfy.model_management.text_encoder_dtype(self.load_device) + model_type = config.pop("model_type") model_config = dict(config) model_config.update({ "dtype": self.dtype, "device": offload_device, "operations": comfy.ops.manual_cast }) - self.model = Wav2Vec2Model(**model_config) + + if model_type == "wav2vec2": + self.model = Wav2Vec2Model(**model_config) + elif model_type == "whisper3": + self.model = WhisperLargeV3(**model_config) self.model.eval() self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) self.model_sample_rate = 16000 @@ -40,33 +46,45 @@ class AudioEncoderModel(): def load_audio_encoder_from_sd(sd, prefix=""): sd = comfy.utils.state_dict_prefix_replace(sd, {"wav2vec2.": ""}) - embed_dim = sd["encoder.layer_norm.bias"].shape[0] - if embed_dim == 1024:# large - config = { - "embed_dim": 1024, - "num_heads": 16, - "num_layers": 24, - "conv_norm": True, - "conv_bias": True, - "do_normalize": True, - "do_stable_layer_norm": True + if "encoder.layer_norm.bias" in sd: #wav2vec2 + embed_dim = sd["encoder.layer_norm.bias"].shape[0] + if embed_dim == 1024:# large + config = { + "model_type": "wav2vec2", + "embed_dim": 1024, + "num_heads": 16, + "num_layers": 24, + "conv_norm": True, + "conv_bias": True, + "do_normalize": True, + "do_stable_layer_norm": True + } + elif embed_dim == 768: # base + config = { + "model_type": "wav2vec2", + "embed_dim": 768, + "num_heads": 12, + "num_layers": 12, + "conv_norm": False, + "conv_bias": False, + "do_normalize": False, # chinese-wav2vec2-base has this False + "do_stable_layer_norm": False } - elif embed_dim == 768: # base + else: + raise RuntimeError("ERROR: audio encoder file is invalid or unsupported embed_dim: {}".format(embed_dim)) + elif "model.encoder.embed_positions.weight" in sd: + sd = comfy.utils.state_dict_prefix_replace(sd, {"model.": ""}) config = { - "embed_dim": 768, - "num_heads": 12, - "num_layers": 12, - "conv_norm": False, - "conv_bias": False, - "do_normalize": False, # chinese-wav2vec2-base has this False - "do_stable_layer_norm": False + "model_type": "whisper3", } else: - raise RuntimeError("ERROR: audio encoder file is invalid or unsupported embed_dim: {}".format(embed_dim)) + raise RuntimeError("ERROR: audio encoder not supported.") audio_encoder = AudioEncoderModel(config) m, u = audio_encoder.load_sd(sd) if len(m) > 0: logging.warning("missing audio encoder: {}".format(m)) + if len(u) > 0: + logging.warning("unexpected audio encoder: {}".format(u)) return audio_encoder diff --git a/comfy/audio_encoders/whisper.py b/comfy/audio_encoders/whisper.py new file mode 100755 index 000000000..93d3782f1 --- /dev/null +++ b/comfy/audio_encoders/whisper.py @@ -0,0 +1,186 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchaudio +from typing import Optional +from comfy.ldm.modules.attention import optimized_attention_masked +import comfy.ops + +class WhisperFeatureExtractor(nn.Module): + def __init__(self, n_mels=128, device=None): + super().__init__() + self.sample_rate = 16000 + self.n_fft = 400 + self.hop_length = 160 + self.n_mels = n_mels + self.chunk_length = 30 + self.n_samples = 480000 + + self.mel_spectrogram = torchaudio.transforms.MelSpectrogram( + sample_rate=self.sample_rate, + n_fft=self.n_fft, + hop_length=self.hop_length, + n_mels=self.n_mels, + f_min=0, + f_max=8000, + norm="slaney", + mel_scale="slaney", + ).to(device) + + def __call__(self, audio): + audio = torch.mean(audio, dim=1) + batch_size = audio.shape[0] + processed_audio = [] + + for i in range(batch_size): + aud = audio[i] + if aud.shape[0] > self.n_samples: + aud = aud[:self.n_samples] + elif aud.shape[0] < self.n_samples: + aud = F.pad(aud, (0, self.n_samples - aud.shape[0])) + processed_audio.append(aud) + + audio = torch.stack(processed_audio) + + mel_spec = self.mel_spectrogram(audio.to(self.mel_spectrogram.spectrogram.window.device))[:, :, :-1].to(audio.device) + + log_mel_spec = torch.clamp(mel_spec, min=1e-10).log10() + log_mel_spec = torch.maximum(log_mel_spec, log_mel_spec.max() - 8.0) + log_mel_spec = (log_mel_spec + 4.0) / 4.0 + + return log_mel_spec + + +class MultiHeadAttention(nn.Module): + def __init__(self, d_model: int, n_heads: int, dtype=None, device=None, operations=None): + super().__init__() + assert d_model % n_heads == 0 + + self.d_model = d_model + self.n_heads = n_heads + self.d_k = d_model // n_heads + + self.q_proj = operations.Linear(d_model, d_model, dtype=dtype, device=device) + self.k_proj = operations.Linear(d_model, d_model, bias=False, dtype=dtype, device=device) + self.v_proj = operations.Linear(d_model, d_model, dtype=dtype, device=device) + self.out_proj = operations.Linear(d_model, d_model, dtype=dtype, device=device) + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + batch_size, seq_len, _ = query.shape + + q = self.q_proj(query) + k = self.k_proj(key) + v = self.v_proj(value) + + attn_output = optimized_attention_masked(q, k, v, self.n_heads, mask) + attn_output = self.out_proj(attn_output) + + return attn_output + + +class EncoderLayer(nn.Module): + def __init__(self, d_model: int, n_heads: int, d_ff: int, dtype=None, device=None, operations=None): + super().__init__() + + self.self_attn = MultiHeadAttention(d_model, n_heads, dtype=dtype, device=device, operations=operations) + self.self_attn_layer_norm = operations.LayerNorm(d_model, dtype=dtype, device=device) + + self.fc1 = operations.Linear(d_model, d_ff, dtype=dtype, device=device) + self.fc2 = operations.Linear(d_ff, d_model, dtype=dtype, device=device) + self.final_layer_norm = operations.LayerNorm(d_model, dtype=dtype, device=device) + + def forward( + self, + x: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None + ) -> torch.Tensor: + residual = x + x = self.self_attn_layer_norm(x) + x = self.self_attn(x, x, x, attention_mask) + x = residual + x + + residual = x + x = self.final_layer_norm(x) + x = self.fc1(x) + x = F.gelu(x) + x = self.fc2(x) + x = residual + x + + return x + + +class AudioEncoder(nn.Module): + def __init__( + self, + n_mels: int = 128, + n_ctx: int = 1500, + n_state: int = 1280, + n_head: int = 20, + n_layer: int = 32, + dtype=None, + device=None, + operations=None + ): + super().__init__() + + self.conv1 = operations.Conv1d(n_mels, n_state, kernel_size=3, padding=1, dtype=dtype, device=device) + self.conv2 = operations.Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1, dtype=dtype, device=device) + + self.embed_positions = operations.Embedding(n_ctx, n_state, dtype=dtype, device=device) + + self.layers = nn.ModuleList([ + EncoderLayer(n_state, n_head, n_state * 4, dtype=dtype, device=device, operations=operations) + for _ in range(n_layer) + ]) + + self.layer_norm = operations.LayerNorm(n_state, dtype=dtype, device=device) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = F.gelu(self.conv1(x)) + x = F.gelu(self.conv2(x)) + + x = x.transpose(1, 2) + + x = x + comfy.ops.cast_to_input(self.embed_positions.weight[:, :x.shape[1]], x) + + all_x = () + for layer in self.layers: + all_x += (x,) + x = layer(x) + + x = self.layer_norm(x) + all_x += (x,) + return x, all_x + + +class WhisperLargeV3(nn.Module): + def __init__( + self, + n_mels: int = 128, + n_audio_ctx: int = 1500, + n_audio_state: int = 1280, + n_audio_head: int = 20, + n_audio_layer: int = 32, + dtype=None, + device=None, + operations=None + ): + super().__init__() + + self.feature_extractor = WhisperFeatureExtractor(n_mels=n_mels, device=device) + + self.encoder = AudioEncoder( + n_mels, n_audio_ctx, n_audio_state, n_audio_head, n_audio_layer, + dtype=dtype, device=device, operations=operations + ) + + def forward(self, audio): + mel = self.feature_extractor(audio) + x, all_x = self.encoder(mel) + return x, all_x From e42682b24ef033a93001ba27cc5c5aa461a61d8d Mon Sep 17 00:00:00 2001 From: rattus128 <46076784+rattus128@users.noreply.github.com> Date: Wed, 17 Sep 2025 09:21:14 +1000 Subject: [PATCH 0597/1073] Reduce Peak WAN inference VRAM usage (#9898) * flux: Do the xq and xk ropes one at a time This was doing independendent interleaved tensor math on the q and k tensors, leading to the holding of more than the minimum intermediates in VRAM. On a bad day, it would VRAM OOM on xk intermediates. Do everything q and then everything k, so torch can garbage collect all of qs intermediates before k allocates its intermediates. This reduces peak VRAM usage for some WAN2.2 inferences (at least). * wan: Optimize qkv intermediates on attention As commented. The former logic computed independent pieces of QKV in parallel which help more inference intermediates in VRAM spiking VRAM usage. Fully roping Q and garbage collecting the intermediates before touching K reduces the peak inference VRAM usage. --- comfy/ldm/flux/math.py | 11 +++++------ comfy/ldm/wan/model.py | 22 +++++++++++++--------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/comfy/ldm/flux/math.py b/comfy/ldm/flux/math.py index 4d743cda2..fb7cd7586 100644 --- a/comfy/ldm/flux/math.py +++ b/comfy/ldm/flux/math.py @@ -35,11 +35,10 @@ def rope(pos: Tensor, dim: int, theta: int) -> Tensor: out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2) return out.to(dtype=torch.float32, device=pos.device) +def apply_rope1(x: Tensor, freqs_cis: Tensor): + x_ = x.to(dtype=freqs_cis.dtype).reshape(*x.shape[:-1], -1, 1, 2) + x_out = freqs_cis[..., 0] * x_[..., 0] + freqs_cis[..., 1] * x_[..., 1] + return x_out.reshape(*x.shape).type_as(x) def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor): - xq_ = xq.to(dtype=freqs_cis.dtype).reshape(*xq.shape[:-1], -1, 1, 2) - xk_ = xk.to(dtype=freqs_cis.dtype).reshape(*xk.shape[:-1], -1, 1, 2) - xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1] - xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1] - return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk) - + return apply_rope1(xq, freqs_cis), apply_rope1(xk, freqs_cis) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 63472ada2..67dcf8f1e 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -8,7 +8,7 @@ from einops import rearrange from comfy.ldm.modules.attention import optimized_attention from comfy.ldm.flux.layers import EmbedND -from comfy.ldm.flux.math import apply_rope +from comfy.ldm.flux.math import apply_rope1 import comfy.ldm.common_dit import comfy.model_management import comfy.patcher_extension @@ -60,20 +60,24 @@ class WanSelfAttention(nn.Module): """ b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim - # query, key, value function - def qkv_fn(x): + def qkv_fn_q(x): q = self.norm_q(self.q(x)).view(b, s, n, d) - k = self.norm_k(self.k(x)).view(b, s, n, d) - v = self.v(x).view(b, s, n * d) - return q, k, v + return apply_rope1(q, freqs) - q, k, v = qkv_fn(x) - q, k = apply_rope(q, k, freqs) + def qkv_fn_k(x): + k = self.norm_k(self.k(x)).view(b, s, n, d) + return apply_rope1(k, freqs) + + #These two are VRAM hogs, so we want to do all of q computation and + #have pytorch garbage collect the intermediates on the sub function + #return before we touch k + q = qkv_fn_q(x) + k = qkv_fn_k(x) x = optimized_attention( q.view(b, s, n * d), k.view(b, s, n * d), - v, + self.v(x).view(b, s, n * d), heads=self.num_heads, transformer_options=transformer_options, ) From 9288c78fc5fae74d3fa7787736dea442e996303f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 16 Sep 2025 21:12:48 -0700 Subject: [PATCH 0598/1073] Support the HuMo model. (#9903) --- comfy/audio_encoders/audio_encoders.py | 1 + comfy/ldm/wan/model.py | 259 ++++++++++++++++++++++++- comfy/model_base.py | 17 ++ comfy/model_detection.py | 2 + comfy/supported_models.py | 12 +- comfy_extras/nodes_wan.py | 98 ++++++++++ 6 files changed, 383 insertions(+), 6 deletions(-) diff --git a/comfy/audio_encoders/audio_encoders.py b/comfy/audio_encoders/audio_encoders.py index 0550b2f9b..46ef21c95 100644 --- a/comfy/audio_encoders/audio_encoders.py +++ b/comfy/audio_encoders/audio_encoders.py @@ -41,6 +41,7 @@ class AudioEncoderModel(): outputs = {} outputs["encoded_audio"] = out outputs["encoded_audio_all_layers"] = all_layers + outputs["audio_samples"] = audio.shape[2] return outputs diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 67dcf8f1e..b3b7da5d5 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -34,7 +34,9 @@ class WanSelfAttention(nn.Module): num_heads, window_size=(-1, -1), qk_norm=True, - eps=1e-6, operation_settings={}): + eps=1e-6, + kv_dim=None, + operation_settings={}): assert dim % num_heads == 0 super().__init__() self.dim = dim @@ -43,11 +45,13 @@ class WanSelfAttention(nn.Module): self.window_size = window_size self.qk_norm = qk_norm self.eps = eps + if kv_dim is None: + kv_dim = dim # layers self.q = operation_settings.get("operations").Linear(dim, dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) - self.k = operation_settings.get("operations").Linear(dim, dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) - self.v = operation_settings.get("operations").Linear(dim, dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.k = operation_settings.get("operations").Linear(kv_dim, dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.v = operation_settings.get("operations").Linear(kv_dim, dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.o = operation_settings.get("operations").Linear(dim, dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) self.norm_q = operation_settings.get("operations").RMSNorm(dim, eps=eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) if qk_norm else nn.Identity() self.norm_k = operation_settings.get("operations").RMSNorm(dim, eps=eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) if qk_norm else nn.Identity() @@ -402,6 +406,7 @@ class WanModel(torch.nn.Module): eps=1e-6, flf_pos_embed_token_number=None, in_dim_ref_conv=None, + wan_attn_block_class=WanAttentionBlock, image_model=None, device=None, dtype=None, @@ -479,8 +484,8 @@ class WanModel(torch.nn.Module): # blocks cross_attn_type = 't2v_cross_attn' if model_type == 't2v' else 'i2v_cross_attn' self.blocks = nn.ModuleList([ - WanAttentionBlock(cross_attn_type, dim, ffn_dim, num_heads, - window_size, qk_norm, cross_attn_norm, eps, operation_settings=operation_settings) + wan_attn_block_class(cross_attn_type, dim, ffn_dim, num_heads, + window_size, qk_norm, cross_attn_norm, eps, operation_settings=operation_settings) for _ in range(num_layers) ]) @@ -1325,3 +1330,247 @@ class WanModel_S2V(WanModel): # unpatchify x = self.unpatchify(x, grid_sizes) return x + + +class WanT2VCrossAttentionGather(WanSelfAttention): + + def forward(self, x, context, transformer_options={}, **kwargs): + r""" + Args: + x(Tensor): Shape [B, L1, C] - video tokens + context(Tensor): Shape [B, L2, C] - audio tokens with shape [B, frames*16, 1536] + """ + b, n, d = x.size(0), self.num_heads, self.head_dim + + q = self.norm_q(self.q(x)) + k = self.norm_k(self.k(context)) + v = self.v(context) + + # Handle audio temporal structure (16 tokens per frame) + k = k.reshape(-1, 16, n, d).transpose(1, 2) + v = v.reshape(-1, 16, n, d).transpose(1, 2) + + # Handle video spatial structure + q = q.reshape(k.shape[0], -1, n, d).transpose(1, 2) + + x = optimized_attention(q, k, v, heads=self.num_heads, skip_reshape=True, skip_output_reshape=True, transformer_options=transformer_options) + + x = x.transpose(1, 2).view(b, -1, n, d).flatten(2) + x = self.o(x) + return x + + +class AudioCrossAttentionWrapper(nn.Module): + def __init__(self, dim, kv_dim, num_heads, qk_norm=True, eps=1e-6, operation_settings={}): + super().__init__() + + self.audio_cross_attn = WanT2VCrossAttentionGather(dim, num_heads, qk_norm, kv_dim, eps, operation_settings=operation_settings) + self.norm1_audio = operation_settings.get("operations").LayerNorm(dim, eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + + def forward(self, x, audio, transformer_options={}): + x = x + self.audio_cross_attn(self.norm1_audio(x), audio, transformer_options=transformer_options) + return x + + +class WanAttentionBlockAudio(WanAttentionBlock): + + def __init__(self, + cross_attn_type, + dim, + ffn_dim, + num_heads, + window_size=(-1, -1), + qk_norm=True, + cross_attn_norm=False, + eps=1e-6, operation_settings={}): + super().__init__(cross_attn_type, dim, ffn_dim, num_heads, window_size, qk_norm, cross_attn_norm, eps, operation_settings) + self.audio_cross_attn_wrapper = AudioCrossAttentionWrapper(dim, 1536, num_heads, qk_norm, eps, operation_settings=operation_settings) + + def forward( + self, + x, + e, + freqs, + context, + context_img_len=257, + audio=None, + transformer_options={}, + ): + r""" + Args: + x(Tensor): Shape [B, L, C] + e(Tensor): Shape [B, 6, C] + freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2] + """ + # assert e.dtype == torch.float32 + + if e.ndim < 4: + e = (comfy.model_management.cast_to(self.modulation, dtype=x.dtype, device=x.device) + e).chunk(6, dim=1) + else: + e = (comfy.model_management.cast_to(self.modulation, dtype=x.dtype, device=x.device).unsqueeze(0) + e).unbind(2) + # assert e[0].dtype == torch.float32 + + # self-attention + y = self.self_attn( + torch.addcmul(repeat_e(e[0], x), self.norm1(x), 1 + repeat_e(e[1], x)), + freqs, transformer_options=transformer_options) + + x = torch.addcmul(x, y, repeat_e(e[2], x)) + + # cross-attention & ffn + x = x + self.cross_attn(self.norm3(x), context, context_img_len=context_img_len, transformer_options=transformer_options) + if audio is not None: + x = self.audio_cross_attn_wrapper(x, audio, transformer_options=transformer_options) + y = self.ffn(torch.addcmul(repeat_e(e[3], x), self.norm2(x), 1 + repeat_e(e[4], x))) + x = torch.addcmul(x, y, repeat_e(e[5], x)) + return x + +class DummyAdapterLayer(nn.Module): + def __init__(self, layer): + super().__init__() + self.layer = layer + + def forward(self, *args, **kwargs): + return self.layer(*args, **kwargs) + + +class AudioProjModel(nn.Module): + def __init__( + self, + seq_len=5, + blocks=13, # add a new parameter blocks + channels=768, # add a new parameter channels + intermediate_dim=512, + output_dim=1536, + context_tokens=16, + device=None, + dtype=None, + operations=None, + ): + super().__init__() + + self.seq_len = seq_len + self.blocks = blocks + self.channels = channels + self.input_dim = seq_len * blocks * channels # update input_dim to be the product of blocks and channels. + self.intermediate_dim = intermediate_dim + self.context_tokens = context_tokens + self.output_dim = output_dim + + # define multiple linear layers + self.audio_proj_glob_1 = DummyAdapterLayer(operations.Linear(self.input_dim, intermediate_dim, dtype=dtype, device=device)) + self.audio_proj_glob_2 = DummyAdapterLayer(operations.Linear(intermediate_dim, intermediate_dim, dtype=dtype, device=device)) + self.audio_proj_glob_3 = DummyAdapterLayer(operations.Linear(intermediate_dim, context_tokens * output_dim, dtype=dtype, device=device)) + + self.audio_proj_glob_norm = DummyAdapterLayer(operations.LayerNorm(output_dim, dtype=dtype, device=device)) + + def forward(self, audio_embeds): + video_length = audio_embeds.shape[1] + audio_embeds = rearrange(audio_embeds, "bz f w b c -> (bz f) w b c") + batch_size, window_size, blocks, channels = audio_embeds.shape + audio_embeds = audio_embeds.view(batch_size, window_size * blocks * channels) + + audio_embeds = torch.relu(self.audio_proj_glob_1(audio_embeds)) + audio_embeds = torch.relu(self.audio_proj_glob_2(audio_embeds)) + + context_tokens = self.audio_proj_glob_3(audio_embeds).reshape(batch_size, self.context_tokens, self.output_dim) + + context_tokens = self.audio_proj_glob_norm(context_tokens) + context_tokens = rearrange(context_tokens, "(bz f) m c -> bz f m c", f=video_length) + + return context_tokens + + +class HumoWanModel(WanModel): + r""" + Wan diffusion backbone supporting both text-to-video and image-to-video. + """ + + def __init__(self, + model_type='humo', + patch_size=(1, 2, 2), + text_len=512, + in_dim=16, + dim=2048, + ffn_dim=8192, + freq_dim=256, + text_dim=4096, + out_dim=16, + num_heads=16, + num_layers=32, + window_size=(-1, -1), + qk_norm=True, + cross_attn_norm=True, + eps=1e-6, + flf_pos_embed_token_number=None, + image_model=None, + audio_token_num=16, + device=None, + dtype=None, + operations=None, + ): + + super().__init__(model_type='t2v', patch_size=patch_size, text_len=text_len, in_dim=in_dim, dim=dim, ffn_dim=ffn_dim, freq_dim=freq_dim, text_dim=text_dim, out_dim=out_dim, num_heads=num_heads, num_layers=num_layers, window_size=window_size, qk_norm=qk_norm, cross_attn_norm=cross_attn_norm, eps=eps, flf_pos_embed_token_number=flf_pos_embed_token_number, wan_attn_block_class=WanAttentionBlockAudio, image_model=image_model, device=device, dtype=dtype, operations=operations) + + self.audio_proj = AudioProjModel(seq_len=8, blocks=5, channels=1280, intermediate_dim=512, output_dim=1536, context_tokens=audio_token_num, dtype=dtype, device=device, operations=operations) + + def forward_orig( + self, + x, + t, + context, + freqs=None, + audio_embed=None, + reference_latent=None, + transformer_options={}, + **kwargs, + ): + bs, _, time, height, width = x.shape + + # embeddings + x = self.patch_embedding(x.float()).to(x.dtype) + grid_sizes = x.shape[2:] + x = x.flatten(2).transpose(1, 2) + + # time embeddings + e = self.time_embedding( + sinusoidal_embedding_1d(self.freq_dim, t.flatten()).to(dtype=x[0].dtype)) + e = e.reshape(t.shape[0], -1, e.shape[-1]) + e0 = self.time_projection(e).unflatten(2, (6, self.dim)) + + if reference_latent is not None: + ref = self.patch_embedding(reference_latent.float()).to(x.dtype) + ref = ref.flatten(2).transpose(1, 2) + freqs_ref = self.rope_encode(reference_latent.shape[-3], reference_latent.shape[-2], reference_latent.shape[-1], t_start=time, device=x.device, dtype=x.dtype) + x = torch.cat([x, ref], dim=1) + freqs = torch.cat([freqs, freqs_ref], dim=1) + del ref, freqs_ref + + # context + context = self.text_embedding(context) + context_img_len = None + + if audio_embed is not None: + audio = self.audio_proj(audio_embed).permute(0, 3, 1, 2).flatten(2).transpose(1, 2) + else: + audio = None + + patches_replace = transformer_options.get("patches_replace", {}) + blocks_replace = patches_replace.get("dit", {}) + for i, block in enumerate(self.blocks): + if ("double_block", i) in blocks_replace: + def block_wrap(args): + out = {} + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len, audio=audio, transformer_options=args["transformer_options"]) + return out + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs, "transformer_options": transformer_options}, {"original_block": block_wrap}) + x = out["img"] + else: + x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len, audio=audio, transformer_options=transformer_options) + + # head + x = self.head(x, e) + + # unpatchify + x = self.unpatchify(x, grid_sizes) + return x diff --git a/comfy/model_base.py b/comfy/model_base.py index 252dfcf69..cf99035da 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1213,6 +1213,23 @@ class WAN21_Camera(WAN21): out['camera_conditions'] = comfy.conds.CONDRegular(camera_conditions) return out +class WAN21_HuMo(WAN21): + def __init__(self, model_config, model_type=ModelType.FLOW, image_to_video=False, device=None): + super(WAN21, self).__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model.HumoWanModel) + self.image_to_video = image_to_video + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + + audio_embed = kwargs.get("audio_embed", None) + if audio_embed is not None: + out['audio_embed'] = comfy.conds.CONDRegular(audio_embed) + + reference_latents = kwargs.get("reference_latents", None) + if reference_latents is not None: + out['reference_latent'] = comfy.conds.CONDRegular(self.process_latent_in(reference_latents[-1])) + return out + class WAN22_S2V(WAN21): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): super(WAN21, self).__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model.WanModel_S2V) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 03d44f65e..72621bed6 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -402,6 +402,8 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["model_type"] = "camera_2.2" elif '{}casual_audio_encoder.encoder.final_linear.weight'.format(key_prefix) in state_dict_keys: dit_config["model_type"] = "s2v" + elif '{}audio_proj.audio_proj_glob_1.layer.bias'.format(key_prefix) in state_dict_keys: + dit_config["model_type"] = "humo" else: if '{}img_emb.proj.0.bias'.format(key_prefix) in state_dict_keys: dit_config["model_type"] = "i2v" diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 557902d11..213b5b92c 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1073,6 +1073,16 @@ class WAN21_Vace(WAN21_T2V): out = model_base.WAN21_Vace(self, image_to_video=False, device=device) return out +class WAN21_HuMo(WAN21_T2V): + unet_config = { + "image_model": "wan2.1", + "model_type": "humo", + } + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.WAN21_HuMo(self, image_to_video=False, device=device) + return out + class WAN22_S2V(WAN21_T2V): unet_config = { "image_model": "wan2.1", @@ -1351,6 +1361,6 @@ class HunyuanImage21Refiner(HunyuanVideo): out = model_base.HunyuanImage21Refiner(self, device=device) return out -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage] models += [SVD_img2vid] diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 4f73369f5..0b8b55813 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -1015,6 +1015,103 @@ class WanSoundImageToVideoExtend(io.ComfyNode): return io.NodeOutput(positive, negative, out_latent) +def get_audio_emb_window(audio_emb, frame_num, frame0_idx, audio_shift=2): + zero_audio_embed = torch.zeros((audio_emb.shape[1], audio_emb.shape[2]), dtype=audio_emb.dtype, device=audio_emb.device) + zero_audio_embed_3 = torch.zeros((3, audio_emb.shape[1], audio_emb.shape[2]), dtype=audio_emb.dtype, device=audio_emb.device) # device=audio_emb.device + iter_ = 1 + (frame_num - 1) // 4 + audio_emb_wind = [] + for lt_i in range(iter_): + if lt_i == 0: + st = frame0_idx + lt_i - 2 + ed = frame0_idx + lt_i + 3 + wind_feat = torch.stack([ + audio_emb[i] if (0 <= i < audio_emb.shape[0]) else zero_audio_embed + for i in range(st, ed) + ], dim=0) + wind_feat = torch.cat((zero_audio_embed_3, wind_feat), dim=0) + else: + st = frame0_idx + 1 + 4 * (lt_i - 1) - audio_shift + ed = frame0_idx + 1 + 4 * lt_i + audio_shift + wind_feat = torch.stack([ + audio_emb[i] if (0 <= i < audio_emb.shape[0]) else zero_audio_embed + for i in range(st, ed) + ], dim=0) + audio_emb_wind.append(wind_feat) + audio_emb_wind = torch.stack(audio_emb_wind, dim=0) + + return audio_emb_wind, ed - audio_shift + + +class WanHuMoImageToVideo(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="WanHuMoImageToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=97, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.AudioEncoderOutput.Input("audio_encoder_output", optional=True), + io.Image.Input("ref_image", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + is_experimental=True, + ) + + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, ref_image=None, audio_encoder_output=None) -> io.NodeOutput: + latent_t = ((length - 1) // 4) + 1 + latent = torch.zeros([batch_size, 16, latent_t, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + + if ref_image is not None: + ref_image = comfy.utils.common_upscale(ref_image[:1].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + ref_latent = vae.encode(ref_image[:, :, :, :3]) + positive = node_helpers.conditioning_set_values(positive, {"reference_latents": [ref_latent]}, append=True) + negative = node_helpers.conditioning_set_values(negative, {"reference_latents": [torch.zeros_like(ref_latent)]}, append=True) + else: + zero_latent = torch.zeros([batch_size, 16, 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + positive = node_helpers.conditioning_set_values(positive, {"reference_latents": [zero_latent]}, append=True) + negative = node_helpers.conditioning_set_values(negative, {"reference_latents": [zero_latent]}, append=True) + + if audio_encoder_output is not None: + audio_emb = torch.stack(audio_encoder_output["encoded_audio_all_layers"], dim=2) + audio_len = audio_encoder_output["audio_samples"] // 640 + audio_emb = audio_emb[:, :audio_len * 2] + + feat0 = linear_interpolation(audio_emb[:, :, 0: 8].mean(dim=2), 50, 25) + feat1 = linear_interpolation(audio_emb[:, :, 8: 16].mean(dim=2), 50, 25) + feat2 = linear_interpolation(audio_emb[:, :, 16: 24].mean(dim=2), 50, 25) + feat3 = linear_interpolation(audio_emb[:, :, 24: 32].mean(dim=2), 50, 25) + feat4 = linear_interpolation(audio_emb[:, :, 32], 50, 25) + audio_emb = torch.stack([feat0, feat1, feat2, feat3, feat4], dim=2)[0] # [T, 5, 1280] + audio_emb, _ = get_audio_emb_window(audio_emb, length, frame0_idx=0) + + # pad for ref latent + zero_audio_pad = torch.zeros(ref_latent.shape[2], *audio_emb.shape[1:], device=audio_emb.device, dtype=audio_emb.dtype) + audio_emb = torch.cat([audio_emb, zero_audio_pad], dim=0) + + audio_emb = audio_emb.unsqueeze(0) + audio_emb_neg = torch.zeros_like(audio_emb) + positive = node_helpers.conditioning_set_values(positive, {"audio_embed": audio_emb}) + negative = node_helpers.conditioning_set_values(negative, {"audio_embed": audio_emb_neg}) + else: + zero_audio = torch.zeros([batch_size, latent_t + 1, 8, 5, 1280], device=comfy.model_management.intermediate_device()) + positive = node_helpers.conditioning_set_values(positive, {"audio_embed": zero_audio}) + negative = node_helpers.conditioning_set_values(negative, {"audio_embed": zero_audio}) + + out_latent = {} + out_latent["samples"] = latent + return io.NodeOutput(positive, negative, out_latent) + class Wan22ImageToVideoLatent(io.ComfyNode): @classmethod def define_schema(cls): @@ -1075,6 +1172,7 @@ class WanExtension(ComfyExtension): WanPhantomSubjectToVideo, WanSoundImageToVideo, WanSoundImageToVideoExtend, + WanHuMoImageToVideo, Wan22ImageToVideoLatent, ] From dd611a7700956f45f393dee32fb8505de176dc66 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 17 Sep 2025 15:39:24 -0700 Subject: [PATCH 0599/1073] Support the HuMo 17B model. (#9912) --- comfy/ldm/wan/model.py | 2 +- comfy/model_base.py | 29 ++++++++++++++++++++++++++--- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index b3b7da5d5..9cf3c171d 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -1364,7 +1364,7 @@ class AudioCrossAttentionWrapper(nn.Module): def __init__(self, dim, kv_dim, num_heads, qk_norm=True, eps=1e-6, operation_settings={}): super().__init__() - self.audio_cross_attn = WanT2VCrossAttentionGather(dim, num_heads, qk_norm, kv_dim, eps, operation_settings=operation_settings) + self.audio_cross_attn = WanT2VCrossAttentionGather(dim, num_heads, qk_norm=qk_norm, kv_dim=kv_dim, eps=eps, operation_settings=operation_settings) self.norm1_audio = operation_settings.get("operations").LayerNorm(dim, eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) def forward(self, x, audio, transformer_options={}): diff --git a/comfy/model_base.py b/comfy/model_base.py index cf99035da..70b67b7c1 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1220,14 +1220,37 @@ class WAN21_HuMo(WAN21): def extra_conds(self, **kwargs): out = super().extra_conds(**kwargs) + noise = kwargs.get("noise", None) audio_embed = kwargs.get("audio_embed", None) if audio_embed is not None: out['audio_embed'] = comfy.conds.CONDRegular(audio_embed) - reference_latents = kwargs.get("reference_latents", None) - if reference_latents is not None: - out['reference_latent'] = comfy.conds.CONDRegular(self.process_latent_in(reference_latents[-1])) + if "c_concat" not in out: # 1.7B model + reference_latents = kwargs.get("reference_latents", None) + if reference_latents is not None: + out['reference_latent'] = comfy.conds.CONDRegular(self.process_latent_in(reference_latents[-1])) + else: + noise_shape = list(noise.shape) + noise_shape[1] += 4 + concat_latent = torch.zeros(noise_shape, device=noise.device, dtype=noise.dtype) + zero_vae_values_first = torch.tensor([0.8660, -0.4326, -0.0017, -0.4884, -0.5283, 0.9207, -0.9896, 0.4433, -0.5543, -0.0113, 0.5753, -0.6000, -0.8346, -0.3497, -0.1926, -0.6938]).view(1, 16, 1, 1, 1) + zero_vae_values_second = torch.tensor([1.0869, -1.2370, 0.0206, -0.4357, -0.6411, 2.0307, -1.5972, 1.2659, -0.8595, -0.4654, 0.9638, -1.6330, -1.4310, -0.1098, -0.3856, -1.4583]).view(1, 16, 1, 1, 1) + zero_vae_values = torch.tensor([0.8642, -1.8583, 0.1577, 0.1350, -0.3641, 2.5863, -1.9670, 1.6065, -1.0475, -0.8678, 1.1734, -1.8138, -1.5933, -0.7721, -0.3289, -1.3745]).view(1, 16, 1, 1, 1) + concat_latent[:, 4:] = zero_vae_values + concat_latent[:, 4:, :1] = zero_vae_values_first + concat_latent[:, 4:, 1:2] = zero_vae_values_second + out['c_concat'] = comfy.conds.CONDNoiseShape(concat_latent) + reference_latents = kwargs.get("reference_latents", None) + if reference_latents is not None: + ref_latent = self.process_latent_in(reference_latents[-1]) + ref_latent_shape = list(ref_latent.shape) + ref_latent_shape[1] += 4 + ref_latent_shape[1] + ref_latent_full = torch.zeros(ref_latent_shape, device=ref_latent.device, dtype=ref_latent.dtype) + ref_latent_full[:, 20:] = ref_latent + ref_latent_full[:, 16:20] = 1.0 + out['reference_latent'] = comfy.conds.CONDRegular(ref_latent_full) + return out class WAN22_S2V(WAN21): From 8d6653fca676a08df3e11654672fed92a183d147 Mon Sep 17 00:00:00 2001 From: DELUXA Date: Fri, 19 Sep 2025 02:50:37 +0300 Subject: [PATCH 0600/1073] Enable fp8 ops by default on gfx1200 (#9926) --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index bbfc3c7a1..d880f1970 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -348,7 +348,7 @@ try: # if any((a in arch) for a in ["gfx1201"]): # ENABLE_PYTORCH_ATTENTION = True if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4): - if any((a in arch) for a in ["gfx1201", "gfx942", "gfx950"]): # TODO: more arches + if any((a in arch) for a in ["gfx1200", "gfx1201", "gfx942", "gfx950"]): # TODO: more arches SUPPORT_FP8_OPS = True except: From 1ea8c540640913b247248e46c907fb9b92a9dd4b Mon Sep 17 00:00:00 2001 From: Jodh Singh Date: Thu, 18 Sep 2025 19:51:16 -0400 Subject: [PATCH 0601/1073] make kernel of same type as image to avoid mismatch issues (#9932) --- comfy_extras/nodes_post_processing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index cb1a0d883..ed7a07152 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -233,6 +233,7 @@ class Sharpen: kernel_size = sharpen_radius * 2 + 1 kernel = gaussian_kernel(kernel_size, sigma, device=image.device) * -(alpha*10) + kernel = kernel.to(dtype=image.dtype) center = kernel_size // 2 kernel[center, center] = kernel[center, center] - kernel.sum() + 1.0 kernel = kernel.repeat(channels, 1, 1).unsqueeze(1) From 24b0fce099c56d18ceb1f4f6b9455fee55e154ce Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 18 Sep 2025 16:54:16 -0700 Subject: [PATCH 0602/1073] Do padding of audio embed in model for humo for more flexibility. (#9935) --- comfy/ldm/wan/model.py | 3 +++ comfy_extras/nodes_wan.py | 4 ---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 9cf3c171d..2dac5980c 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -1551,6 +1551,9 @@ class HumoWanModel(WanModel): context_img_len = None if audio_embed is not None: + if reference_latent is not None: + zero_audio_pad = torch.zeros(audio_embed.shape[0], reference_latent.shape[-3], *audio_embed.shape[2:], device=audio_embed.device, dtype=audio_embed.dtype) + audio_embed = torch.cat([audio_embed, zero_audio_pad], dim=1) audio = self.audio_proj(audio_embed).permute(0, 3, 1, 2).flatten(2).transpose(1, 2) else: audio = None diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 0b8b55813..5f10edcff 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -1095,10 +1095,6 @@ class WanHuMoImageToVideo(io.ComfyNode): audio_emb = torch.stack([feat0, feat1, feat2, feat3, feat4], dim=2)[0] # [T, 5, 1280] audio_emb, _ = get_audio_emb_window(audio_emb, length, frame0_idx=0) - # pad for ref latent - zero_audio_pad = torch.zeros(ref_latent.shape[2], *audio_emb.shape[1:], device=audio_emb.device, dtype=audio_emb.dtype) - audio_emb = torch.cat([audio_emb, zero_audio_pad], dim=0) - audio_emb = audio_emb.unsqueeze(0) audio_emb_neg = torch.zeros_like(audio_emb) positive = node_helpers.conditioning_set_values(positive, {"audio_embed": audio_emb}) From 711bcf33ee505a997674f4a9125e69d2a5a3c180 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Fri, 19 Sep 2025 00:03:30 -0700 Subject: [PATCH 0603/1073] Bump frontend to 1.26.13 (#9933) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index de5af5fac..79187efaa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.26.11 +comfyui-frontend-package==1.26.13 comfyui-workflow-templates==0.1.81 comfyui-embedded-docs==0.2.6 torch From dc95b6acc0ef4962460592d417db4024f7160586 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 19 Sep 2025 00:07:17 -0700 Subject: [PATCH 0604/1073] Basic WIP support for the wan animate model. (#9939) --- comfy/ldm/wan/model_animate.py | 548 +++++++++++++++++++++++++++++++++ comfy/model_base.py | 18 ++ comfy/model_detection.py | 2 + comfy/supported_models.py | 15 +- comfy_extras/nodes_wan.py | 84 +++++ 5 files changed, 666 insertions(+), 1 deletion(-) create mode 100644 comfy/ldm/wan/model_animate.py diff --git a/comfy/ldm/wan/model_animate.py b/comfy/ldm/wan/model_animate.py new file mode 100644 index 000000000..542f54110 --- /dev/null +++ b/comfy/ldm/wan/model_animate.py @@ -0,0 +1,548 @@ +from torch import nn +import torch +from typing import Tuple, Optional +from einops import rearrange +import torch.nn.functional as F +import math +from .model import WanModel, sinusoidal_embedding_1d +from comfy.ldm.modules.attention import optimized_attention +import comfy.model_management + +class CausalConv1d(nn.Module): + + def __init__(self, chan_in, chan_out, kernel_size=3, stride=1, dilation=1, pad_mode="replicate", operations=None, **kwargs): + super().__init__() + + self.pad_mode = pad_mode + padding = (kernel_size - 1, 0) # T + self.time_causal_padding = padding + + self.conv = operations.Conv1d(chan_in, chan_out, kernel_size, stride=stride, dilation=dilation, **kwargs) + + def forward(self, x): + x = F.pad(x, self.time_causal_padding, mode=self.pad_mode) + return self.conv(x) + + +class FaceEncoder(nn.Module): + def __init__(self, in_dim: int, hidden_dim: int, num_heads=int, dtype=None, device=None, operations=None): + factory_kwargs = {"dtype": dtype, "device": device} + super().__init__() + + self.num_heads = num_heads + self.conv1_local = CausalConv1d(in_dim, 1024 * num_heads, 3, stride=1, operations=operations, **factory_kwargs) + self.norm1 = operations.LayerNorm(hidden_dim // 8, elementwise_affine=False, eps=1e-6, **factory_kwargs) + self.act = nn.SiLU() + self.conv2 = CausalConv1d(1024, 1024, 3, stride=2, operations=operations, **factory_kwargs) + self.conv3 = CausalConv1d(1024, 1024, 3, stride=2, operations=operations, **factory_kwargs) + + self.out_proj = operations.Linear(1024, hidden_dim, **factory_kwargs) + self.norm1 = operations.LayerNorm(1024, elementwise_affine=False, eps=1e-6, **factory_kwargs) + + self.norm2 = operations.LayerNorm(1024, elementwise_affine=False, eps=1e-6, **factory_kwargs) + + self.norm3 = operations.LayerNorm(1024, elementwise_affine=False, eps=1e-6, **factory_kwargs) + + self.padding_tokens = nn.Parameter(torch.empty(1, 1, 1, hidden_dim, **factory_kwargs)) + + def forward(self, x): + + x = rearrange(x, "b t c -> b c t") + b, c, t = x.shape + + x = self.conv1_local(x) + x = rearrange(x, "b (n c) t -> (b n) t c", n=self.num_heads) + + x = self.norm1(x) + x = self.act(x) + x = rearrange(x, "b t c -> b c t") + x = self.conv2(x) + x = rearrange(x, "b c t -> b t c") + x = self.norm2(x) + x = self.act(x) + x = rearrange(x, "b t c -> b c t") + x = self.conv3(x) + x = rearrange(x, "b c t -> b t c") + x = self.norm3(x) + x = self.act(x) + x = self.out_proj(x) + x = rearrange(x, "(b n) t c -> b t n c", b=b) + padding = comfy.model_management.cast_to(self.padding_tokens, dtype=x.dtype, device=x.device).repeat(b, x.shape[1], 1, 1) + x = torch.cat([x, padding], dim=-2) + x_local = x.clone() + + return x_local + + +def get_norm_layer(norm_layer, operations=None): + """ + Get the normalization layer. + + Args: + norm_layer (str): The type of normalization layer. + + Returns: + norm_layer (nn.Module): The normalization layer. + """ + if norm_layer == "layer": + return operations.LayerNorm + elif norm_layer == "rms": + return operations.RMSNorm + else: + raise NotImplementedError(f"Norm layer {norm_layer} is not implemented") + + +class FaceAdapter(nn.Module): + def __init__( + self, + hidden_dim: int, + heads_num: int, + qk_norm: bool = True, + qk_norm_type: str = "rms", + num_adapter_layers: int = 1, + dtype=None, device=None, operations=None + ): + + factory_kwargs = {"dtype": dtype, "device": device} + super().__init__() + self.hidden_size = hidden_dim + self.heads_num = heads_num + self.fuser_blocks = nn.ModuleList( + [ + FaceBlock( + self.hidden_size, + self.heads_num, + qk_norm=qk_norm, + qk_norm_type=qk_norm_type, + operations=operations, + **factory_kwargs, + ) + for _ in range(num_adapter_layers) + ] + ) + + def forward( + self, + x: torch.Tensor, + motion_embed: torch.Tensor, + idx: int, + freqs_cis_q: Tuple[torch.Tensor, torch.Tensor] = None, + freqs_cis_k: Tuple[torch.Tensor, torch.Tensor] = None, + ) -> torch.Tensor: + + return self.fuser_blocks[idx](x, motion_embed, freqs_cis_q, freqs_cis_k) + + + +class FaceBlock(nn.Module): + def __init__( + self, + hidden_size: int, + heads_num: int, + qk_norm: bool = True, + qk_norm_type: str = "rms", + qk_scale: float = None, + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, + operations=None + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + + self.deterministic = False + self.hidden_size = hidden_size + self.heads_num = heads_num + head_dim = hidden_size // heads_num + self.scale = qk_scale or head_dim**-0.5 + + self.linear1_kv = operations.Linear(hidden_size, hidden_size * 2, **factory_kwargs) + self.linear1_q = operations.Linear(hidden_size, hidden_size, **factory_kwargs) + + self.linear2 = operations.Linear(hidden_size, hidden_size, **factory_kwargs) + + qk_norm_layer = get_norm_layer(qk_norm_type, operations=operations) + self.q_norm = ( + qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs) if qk_norm else nn.Identity() + ) + self.k_norm = ( + qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs) if qk_norm else nn.Identity() + ) + + self.pre_norm_feat = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs) + + self.pre_norm_motion = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs) + + def forward( + self, + x: torch.Tensor, + motion_vec: torch.Tensor, + motion_mask: Optional[torch.Tensor] = None, + # use_context_parallel=False, + ) -> torch.Tensor: + + B, T, N, C = motion_vec.shape + T_comp = T + + x_motion = self.pre_norm_motion(motion_vec) + x_feat = self.pre_norm_feat(x) + + kv = self.linear1_kv(x_motion) + q = self.linear1_q(x_feat) + + k, v = rearrange(kv, "B L N (K H D) -> K B L N H D", K=2, H=self.heads_num) + q = rearrange(q, "B S (H D) -> B S H D", H=self.heads_num) + + # Apply QK-Norm if needed. + q = self.q_norm(q).to(v) + k = self.k_norm(k).to(v) + + k = rearrange(k, "B L N H D -> (B L) N H D") + v = rearrange(v, "B L N H D -> (B L) N H D") + + q = rearrange(q, "B (L S) H D -> (B L) S (H D)", L=T_comp) + + attn = optimized_attention(q, k, v, heads=self.heads_num) + + attn = rearrange(attn, "(B L) S C -> B (L S) C", L=T_comp) + + output = self.linear2(attn) + + if motion_mask is not None: + output = output * rearrange(motion_mask, "B T H W -> B (T H W)").unsqueeze(-1) + + return output + +# https://github.com/XPixelGroup/BasicSR/blob/8d56e3a045f9fb3e1d8872f92ee4a4f07f886b0a/basicsr/ops/upfirdn2d/upfirdn2d.py#L162 +def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): + _, minor, in_h, in_w = input.shape + kernel_h, kernel_w = kernel.shape + + out = input.view(-1, minor, in_h, 1, in_w, 1) + out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0]) + out = out.view(-1, minor, in_h * up_y, in_w * up_x) + + out = F.pad(out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) + out = out[:, :, max(-pad_y0, 0): out.shape[2] - max(-pad_y1, 0), max(-pad_x0, 0): out.shape[3] - max(-pad_x1, 0)] + + out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) + w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) + out = F.conv2d(out, w) + out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) + return out[:, :, ::down_y, ::down_x] + +def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): + return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) + +# https://github.com/XPixelGroup/BasicSR/blob/8d56e3a045f9fb3e1d8872f92ee4a4f07f886b0a/basicsr/ops/fused_act/fused_act.py#L81 +class FusedLeakyReLU(torch.nn.Module): + def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5, dtype=None, device=None): + super().__init__() + self.bias = torch.nn.Parameter(torch.empty(1, channel, 1, 1, dtype=dtype, device=device)) + self.negative_slope = negative_slope + self.scale = scale + + def forward(self, input): + return fused_leaky_relu(input, comfy.model_management.cast_to(self.bias, device=input.device, dtype=input.dtype), self.negative_slope, self.scale) + +def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): + return F.leaky_relu(input + bias, negative_slope) * scale + +class Blur(torch.nn.Module): + def __init__(self, kernel, pad, dtype=None, device=None): + super().__init__() + kernel = torch.tensor(kernel, dtype=dtype, device=device) + kernel = kernel[None, :] * kernel[:, None] + kernel = kernel / kernel.sum() + self.register_buffer('kernel', kernel) + self.pad = pad + + def forward(self, input): + return upfirdn2d(input, comfy.model_management.cast_to(self.kernel, dtype=input.dtype, device=input.device), pad=self.pad) + +#https://github.com/XPixelGroup/BasicSR/blob/8d56e3a045f9fb3e1d8872f92ee4a4f07f886b0a/basicsr/archs/stylegan2_arch.py#L590 +class ScaledLeakyReLU(torch.nn.Module): + def __init__(self, negative_slope=0.2): + super().__init__() + self.negative_slope = negative_slope + + def forward(self, input): + return F.leaky_relu(input, negative_slope=self.negative_slope) + +# https://github.com/XPixelGroup/BasicSR/blob/8d56e3a045f9fb3e1d8872f92ee4a4f07f886b0a/basicsr/archs/stylegan2_arch.py#L605 +class EqualConv2d(torch.nn.Module): + def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, dtype=None, device=None, operations=None): + super().__init__() + self.weight = torch.nn.Parameter(torch.empty(out_channel, in_channel, kernel_size, kernel_size, device=device, dtype=dtype)) + self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) + self.stride = stride + self.padding = padding + self.bias = torch.nn.Parameter(torch.empty(out_channel, device=device, dtype=dtype)) if bias else None + + def forward(self, input): + if self.bias is None: + bias = None + else: + bias = comfy.model_management.cast_to(self.bias, device=input.device, dtype=input.dtype) + + return F.conv2d(input, comfy.model_management.cast_to(self.weight, device=input.device, dtype=input.dtype) * self.scale, bias=bias, stride=self.stride, padding=self.padding) + +# https://github.com/XPixelGroup/BasicSR/blob/8d56e3a045f9fb3e1d8872f92ee4a4f07f886b0a/basicsr/archs/stylegan2_arch.py#L134 +class EqualLinear(torch.nn.Module): + def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None, dtype=None, device=None, operations=None): + super().__init__() + self.weight = torch.nn.Parameter(torch.empty(out_dim, in_dim, device=device, dtype=dtype)) + self.bias = torch.nn.Parameter(torch.empty(out_dim, device=device, dtype=dtype)) if bias else None + self.activation = activation + self.scale = (1 / math.sqrt(in_dim)) * lr_mul + self.lr_mul = lr_mul + + def forward(self, input): + if self.bias is None: + bias = None + else: + bias = comfy.model_management.cast_to(self.bias, device=input.device, dtype=input.dtype) * self.lr_mul + + if self.activation: + out = F.linear(input, comfy.model_management.cast_to(self.weight, device=input.device, dtype=input.dtype) * self.scale) + return fused_leaky_relu(out, bias) + return F.linear(input, comfy.model_management.cast_to(self.weight, device=input.device, dtype=input.dtype) * self.scale, bias=bias) + +# https://github.com/XPixelGroup/BasicSR/blob/8d56e3a045f9fb3e1d8872f92ee4a4f07f886b0a/basicsr/archs/stylegan2_arch.py#L654 +class ConvLayer(torch.nn.Sequential): + def __init__(self, in_channel, out_channel, kernel_size, downsample=False, blur_kernel=[1, 3, 3, 1], bias=True, activate=True, dtype=None, device=None, operations=None): + layers = [] + + if downsample: + factor = 2 + p = (len(blur_kernel) - factor) + (kernel_size - 1) + layers.append(Blur(blur_kernel, pad=((p + 1) // 2, p // 2))) + stride, padding = 2, 0 + else: + stride, padding = 1, kernel_size // 2 + + layers.append(EqualConv2d(in_channel, out_channel, kernel_size, padding=padding, stride=stride, bias=bias and not activate, dtype=dtype, device=device, operations=operations)) + + if activate: + layers.append(FusedLeakyReLU(out_channel) if bias else ScaledLeakyReLU(0.2)) + + super().__init__(*layers) + +# https://github.com/XPixelGroup/BasicSR/blob/8d56e3a045f9fb3e1d8872f92ee4a4f07f886b0a/basicsr/archs/stylegan2_arch.py#L704 +class ResBlock(torch.nn.Module): + def __init__(self, in_channel, out_channel, dtype=None, device=None, operations=None): + super().__init__() + self.conv1 = ConvLayer(in_channel, in_channel, 3, dtype=dtype, device=device, operations=operations) + self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True, dtype=dtype, device=device, operations=operations) + self.skip = ConvLayer(in_channel, out_channel, 1, downsample=True, activate=False, bias=False, dtype=dtype, device=device, operations=operations) + + def forward(self, input): + out = self.conv2(self.conv1(input)) + skip = self.skip(input) + return (out + skip) / math.sqrt(2) + + +class EncoderApp(torch.nn.Module): + def __init__(self, w_dim=512, dtype=None, device=None, operations=None): + super().__init__() + kwargs = {"device": device, "dtype": dtype, "operations": operations} + + self.convs = torch.nn.ModuleList([ + ConvLayer(3, 32, 1, **kwargs), ResBlock(32, 64, **kwargs), + ResBlock(64, 128, **kwargs), ResBlock(128, 256, **kwargs), + ResBlock(256, 512, **kwargs), ResBlock(512, 512, **kwargs), + ResBlock(512, 512, **kwargs), ResBlock(512, 512, **kwargs), + EqualConv2d(512, w_dim, 4, padding=0, bias=False, **kwargs) + ]) + + def forward(self, x): + h = x + for conv in self.convs: + h = conv(h) + return h.squeeze(-1).squeeze(-1) + +class Encoder(torch.nn.Module): + def __init__(self, dim=512, motion_dim=20, dtype=None, device=None, operations=None): + super().__init__() + self.net_app = EncoderApp(dim, dtype=dtype, device=device, operations=operations) + self.fc = torch.nn.Sequential(*[EqualLinear(dim, dim, dtype=dtype, device=device, operations=operations) for _ in range(4)] + [EqualLinear(dim, motion_dim, dtype=dtype, device=device, operations=operations)]) + + def encode_motion(self, x): + return self.fc(self.net_app(x)) + +class Direction(torch.nn.Module): + def __init__(self, motion_dim, dtype=None, device=None, operations=None): + super().__init__() + self.weight = torch.nn.Parameter(torch.empty(512, motion_dim, device=device, dtype=dtype)) + self.motion_dim = motion_dim + + def forward(self, input): + stabilized_weight = comfy.model_management.cast_to(self.weight, device=input.device, dtype=input.dtype) + 1e-8 * torch.eye(512, self.motion_dim, device=input.device, dtype=input.dtype) + Q, _ = torch.linalg.qr(stabilized_weight.float()) + if input is None: + return Q + return torch.sum(input.unsqueeze(-1) * Q.T.to(input.dtype), dim=1) + +class Synthesis(torch.nn.Module): + def __init__(self, motion_dim, dtype=None, device=None, operations=None): + super().__init__() + self.direction = Direction(motion_dim, dtype=dtype, device=device, operations=operations) + +class Generator(torch.nn.Module): + def __init__(self, style_dim=512, motion_dim=20, dtype=None, device=None, operations=None): + super().__init__() + self.enc = Encoder(style_dim, motion_dim, dtype=dtype, device=device, operations=operations) + self.dec = Synthesis(motion_dim, dtype=dtype, device=device, operations=operations) + + def get_motion(self, img): + motion_feat = self.enc.encode_motion(img) + return self.dec.direction(motion_feat) + +class AnimateWanModel(WanModel): + r""" + Wan diffusion backbone supporting both text-to-video and image-to-video. + """ + + def __init__(self, + model_type='animate', + patch_size=(1, 2, 2), + text_len=512, + in_dim=16, + dim=2048, + ffn_dim=8192, + freq_dim=256, + text_dim=4096, + out_dim=16, + num_heads=16, + num_layers=32, + window_size=(-1, -1), + qk_norm=True, + cross_attn_norm=True, + eps=1e-6, + flf_pos_embed_token_number=None, + motion_encoder_dim=512, + image_model=None, + device=None, + dtype=None, + operations=None, + ): + + super().__init__(model_type='i2v', patch_size=patch_size, text_len=text_len, in_dim=in_dim, dim=dim, ffn_dim=ffn_dim, freq_dim=freq_dim, text_dim=text_dim, out_dim=out_dim, num_heads=num_heads, num_layers=num_layers, window_size=window_size, qk_norm=qk_norm, cross_attn_norm=cross_attn_norm, eps=eps, flf_pos_embed_token_number=flf_pos_embed_token_number, image_model=image_model, device=device, dtype=dtype, operations=operations) + + self.pose_patch_embedding = operations.Conv3d( + 16, dim, kernel_size=patch_size, stride=patch_size, device=device, dtype=dtype + ) + + self.motion_encoder = Generator(style_dim=512, motion_dim=20, device=device, dtype=dtype, operations=operations) + + self.face_adapter = FaceAdapter( + heads_num=self.num_heads, + hidden_dim=self.dim, + num_adapter_layers=self.num_layers // 5, + device=device, dtype=dtype, operations=operations + ) + + self.face_encoder = FaceEncoder( + in_dim=motion_encoder_dim, + hidden_dim=self.dim, + num_heads=4, + device=device, dtype=dtype, operations=operations + ) + + def after_patch_embedding(self, x, pose_latents, face_pixel_values): + if pose_latents is not None: + pose_latents = self.pose_patch_embedding(pose_latents) + x[:, :, 1:] += pose_latents + + if face_pixel_values is None: + return x, None + + b, c, T, h, w = face_pixel_values.shape + face_pixel_values = rearrange(face_pixel_values, "b c t h w -> (b t) c h w") + encode_bs = 8 + face_pixel_values_tmp = [] + for i in range(math.ceil(face_pixel_values.shape[0] / encode_bs)): + face_pixel_values_tmp.append(self.motion_encoder.get_motion(face_pixel_values[i * encode_bs: (i + 1) * encode_bs])) + + motion_vec = torch.cat(face_pixel_values_tmp) + + motion_vec = rearrange(motion_vec, "(b t) c -> b t c", t=T) + motion_vec = self.face_encoder(motion_vec) + + B, L, H, C = motion_vec.shape + pad_face = torch.zeros(B, 1, H, C).type_as(motion_vec) + motion_vec = torch.cat([pad_face, motion_vec], dim=1) + + if motion_vec.shape[1] < x.shape[2]: + B, L, H, C = motion_vec.shape + pad = torch.zeros(B, x.shape[2] - motion_vec.shape[1], H, C).type_as(motion_vec) + motion_vec = torch.cat([motion_vec, pad], dim=1) + else: + motion_vec = motion_vec[:, :x.shape[2]] + return x, motion_vec + + def forward_orig( + self, + x, + t, + context, + clip_fea=None, + pose_latents=None, + face_pixel_values=None, + freqs=None, + transformer_options={}, + **kwargs, + ): + # embeddings + x = self.patch_embedding(x.float()).to(x.dtype) + x, motion_vec = self.after_patch_embedding(x, pose_latents, face_pixel_values) + grid_sizes = x.shape[2:] + x = x.flatten(2).transpose(1, 2) + + # time embeddings + e = self.time_embedding( + sinusoidal_embedding_1d(self.freq_dim, t.flatten()).to(dtype=x[0].dtype)) + e = e.reshape(t.shape[0], -1, e.shape[-1]) + e0 = self.time_projection(e).unflatten(2, (6, self.dim)) + + full_ref = None + if self.ref_conv is not None: + full_ref = kwargs.get("reference_latent", None) + if full_ref is not None: + full_ref = self.ref_conv(full_ref).flatten(2).transpose(1, 2) + x = torch.concat((full_ref, x), dim=1) + + # context + context = self.text_embedding(context) + + context_img_len = None + if clip_fea is not None: + if self.img_emb is not None: + context_clip = self.img_emb(clip_fea) # bs x 257 x dim + context = torch.concat([context_clip, context], dim=1) + context_img_len = clip_fea.shape[-2] + + patches_replace = transformer_options.get("patches_replace", {}) + blocks_replace = patches_replace.get("dit", {}) + for i, block in enumerate(self.blocks): + if ("double_block", i) in blocks_replace: + def block_wrap(args): + out = {} + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len, transformer_options=args["transformer_options"]) + return out + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs, "transformer_options": transformer_options}, {"original_block": block_wrap}) + x = out["img"] + else: + x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len, transformer_options=transformer_options) + + if i % 5 == 0 and motion_vec is not None: + x = x + self.face_adapter.fuser_blocks[i // 5](x, motion_vec) + + # head + x = self.head(x, e) + + if full_ref is not None: + x = x[:, full_ref.shape[1]:] + + # unpatchify + x = self.unpatchify(x, grid_sizes) + return x diff --git a/comfy/model_base.py b/comfy/model_base.py index 70b67b7c1..b0b9cde7d 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -39,6 +39,7 @@ import comfy.ldm.cosmos.model import comfy.ldm.cosmos.predict2 import comfy.ldm.lumina.model import comfy.ldm.wan.model +import comfy.ldm.wan.model_animate import comfy.ldm.hunyuan3d.model import comfy.ldm.hidream.model import comfy.ldm.chroma.model @@ -1253,6 +1254,23 @@ class WAN21_HuMo(WAN21): return out +class WAN22_Animate(WAN21): + def __init__(self, model_config, model_type=ModelType.FLOW, image_to_video=False, device=None): + super(WAN21, self).__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model_animate.AnimateWanModel) + self.image_to_video = image_to_video + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + + face_video_pixels = kwargs.get("face_video_pixels", None) + if face_video_pixels is not None: + out['face_pixel_values'] = comfy.conds.CONDRegular(face_video_pixels) + + pose_latents = kwargs.get("pose_video_latent", None) + if pose_latents is not None: + out['pose_latents'] = comfy.conds.CONDRegular(self.process_latent_in(pose_latents)) + return out + class WAN22_S2V(WAN21): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): super(WAN21, self).__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model.WanModel_S2V) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 72621bed6..46415c17a 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -404,6 +404,8 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["model_type"] = "s2v" elif '{}audio_proj.audio_proj_glob_1.layer.bias'.format(key_prefix) in state_dict_keys: dit_config["model_type"] = "humo" + elif '{}face_adapter.fuser_blocks.0.k_norm.weight'.format(key_prefix) in state_dict_keys: + dit_config["model_type"] = "animate" else: if '{}img_emb.proj.0.bias'.format(key_prefix) in state_dict_keys: dit_config["model_type"] = "i2v" diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 213b5b92c..1fbb6aef4 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1096,6 +1096,19 @@ class WAN22_S2V(WAN21_T2V): out = model_base.WAN22_S2V(self, device=device) return out +class WAN22_Animate(WAN21_T2V): + unet_config = { + "image_model": "wan2.1", + "model_type": "animate", + } + + def __init__(self, unet_config): + super().__init__(unet_config) + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.WAN22_Animate(self, device=device) + return out + class WAN22_T2V(WAN21_T2V): unet_config = { "image_model": "wan2.1", @@ -1361,6 +1374,6 @@ class HunyuanImage21Refiner(HunyuanVideo): out = model_base.HunyuanImage21Refiner(self, device=device) return out -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, WAN22_Animate, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage] models += [SVD_img2vid] diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 5f10edcff..4187a5619 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -1108,6 +1108,89 @@ class WanHuMoImageToVideo(io.ComfyNode): out_latent["samples"] = latent return io.NodeOutput(positive, negative, out_latent) +class WanAnimateToVideo(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="WanAnimateToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=77, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.ClipVisionOutput.Input("clip_vision_output", optional=True), + io.Image.Input("reference_image", optional=True), + io.Image.Input("face_video", optional=True), + io.Image.Input("pose_video", optional=True), + io.Int.Input("continue_motion_max_frames", default=5, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Image.Input("continue_motion", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + io.Int.Output(display_name="trim_latent"), + ], + is_experimental=True, + ) + + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, continue_motion_max_frames, reference_image=None, clip_vision_output=None, face_video=None, pose_video=None, continue_motion=None) -> io.NodeOutput: + latent_length = ((length - 1) // 4) + 1 + latent_width = width // 8 + latent_height = height // 8 + trim_latent = 0 + + if reference_image is None: + reference_image = torch.zeros((1, height, width, 3)) + + image = comfy.utils.common_upscale(reference_image[:length].movedim(-1, 1), width, height, "area", "center").movedim(1, -1) + concat_latent_image = vae.encode(image[:, :, :, :3]) + mask = torch.zeros((1, 1, concat_latent_image.shape[2], concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=concat_latent_image.device, dtype=concat_latent_image.dtype) + trim_latent += concat_latent_image.shape[2] + + if clip_vision_output is not None: + positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output}) + negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output}) + + if face_video is not None: + face_video = comfy.utils.common_upscale(face_video[:length].movedim(-1, 1), 512, 512, "area", "center") * 2.0 - 1.0 + face_video = face_video.movedim(0, 1).unsqueeze(0) + positive = node_helpers.conditioning_set_values(positive, {"face_video_pixels": face_video}) + negative = node_helpers.conditioning_set_values(negative, {"face_video_pixels": face_video * 0.0 - 1.0}) + + if pose_video is not None: + pose_video = comfy.utils.common_upscale(pose_video[:length].movedim(-1, 1), width, height, "area", "center").movedim(1, -1) + pose_video_latent = vae.encode(pose_video[:, :, :, :3]) + positive = node_helpers.conditioning_set_values(positive, {"pose_video_latent": pose_video_latent}) + negative = node_helpers.conditioning_set_values(negative, {"pose_video_latent": pose_video_latent}) + + if continue_motion is None: + image = torch.ones((length, height, width, 3)) * 0.5 + else: + continue_motion = continue_motion[-continue_motion_max_frames:] + continue_motion = comfy.utils.common_upscale(continue_motion[-length:].movedim(-1, 1), width, height, "area", "center").movedim(1, -1) + image = torch.ones((length, height, width, continue_motion.shape[-1]), device=continue_motion.device, dtype=continue_motion.dtype) * 0.5 + image[:continue_motion.shape[0]] = continue_motion + + concat_latent_image = torch.cat((concat_latent_image, vae.encode(image[:, :, :, :3])), dim=2) + mask_refmotion = torch.ones((1, 1, latent_length, concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=mask.device, dtype=mask.dtype) + if continue_motion is not None: + mask_refmotion[:, :, :((continue_motion.shape[0] - 1) // 4) + 1] = 0.0 + + mask = torch.cat((mask, mask_refmotion), dim=2) + positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) + negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) + + latent = torch.zeros([batch_size, 16, latent_length + trim_latent, latent_height, latent_width], device=comfy.model_management.intermediate_device()) + out_latent = {} + out_latent["samples"] = latent + return io.NodeOutput(positive, negative, out_latent, trim_latent) + class Wan22ImageToVideoLatent(io.ComfyNode): @classmethod def define_schema(cls): @@ -1169,6 +1252,7 @@ class WanExtension(ComfyExtension): WanSoundImageToVideo, WanSoundImageToVideoExtend, WanHuMoImageToVideo, + WanAnimateToVideo, Wan22ImageToVideoLatent, ] From 9fdf8c25abb2133803063a9be395cac774fce611 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 19 Sep 2025 23:02:43 +0300 Subject: [PATCH 0605/1073] api_nodes: reduce default timeout from 7 days to 2 hours (#9918) --- comfy_api_nodes/apis/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 4ad0b783b..0aed906fb 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -683,7 +683,7 @@ class SynchronousOperation(Generic[T, R]): auth_token: Optional[str] = None, comfy_api_key: Optional[str] = None, auth_kwargs: Optional[Dict[str, str]] = None, - timeout: float = 604800.0, + timeout: float = 7200.0, verify_ssl: bool = True, content_type: str = "application/json", multipart_parser: Callable | None = None, From 852704c81a652cc53fbe53c5f47dea0e50d0534e Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 19 Sep 2025 23:04:51 +0300 Subject: [PATCH 0606/1073] fix(seedream4): add flag to ignore error on partial success (#9952) --- comfy_api_nodes/nodes_bytedance.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/comfy_api_nodes/nodes_bytedance.py b/comfy_api_nodes/nodes_bytedance.py index 369a3a4fe..a7eeaf15a 100644 --- a/comfy_api_nodes/nodes_bytedance.py +++ b/comfy_api_nodes/nodes_bytedance.py @@ -567,6 +567,12 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode): tooltip="Whether to add an \"AI generated\" watermark to the image.", optional=True, ), + comfy_io.Boolean.Input( + "fail_on_partial", + default=True, + tooltip="If enabled, abort execution if any requested images are missing or return an error.", + optional=True, + ), ], outputs=[ comfy_io.Image.Output(), @@ -592,6 +598,7 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode): max_images: int = 1, seed: int = 0, watermark: bool = True, + fail_on_partial: bool = True, ) -> comfy_io.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) w = h = None @@ -651,9 +658,10 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode): if len(response.data) == 1: return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) - return comfy_io.NodeOutput( - torch.cat([await download_url_to_image_tensor(str(i["url"])) for i in response.data]) - ) + urls = [str(d["url"]) for d in response.data if isinstance(d, dict) and "url" in d] + if fail_on_partial and len(urls) < len(response.data): + raise RuntimeError(f"Only {len(urls)} of {len(response.data)} images were generated before error.") + return comfy_io.NodeOutput(torch.cat([await download_url_to_image_tensor(i) for i in urls])) class ByteDanceTextToVideoNode(comfy_io.ComfyNode): @@ -1171,7 +1179,7 @@ async def process_video_task( payload: Union[Text2VideoTaskCreationRequest, Image2VideoTaskCreationRequest], auth_kwargs: dict, node_id: str, - estimated_duration: int | None, + estimated_duration: Optional[int], ) -> comfy_io.NodeOutput: initial_response = await SynchronousOperation( endpoint=ApiEndpoint( From e8df53b764c7dfce1a9235f6ee70a17cfdece3ff Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 19 Sep 2025 15:48:56 -0700 Subject: [PATCH 0607/1073] Update WanAnimateToVideo to more easily extend videos. (#9959) --- comfy/ldm/wan/model_animate.py | 2 +- comfy_extras/nodes_wan.py | 63 +++++++++++++++++++++++++--------- 2 files changed, 47 insertions(+), 18 deletions(-) diff --git a/comfy/ldm/wan/model_animate.py b/comfy/ldm/wan/model_animate.py index 542f54110..7c87835d4 100644 --- a/comfy/ldm/wan/model_animate.py +++ b/comfy/ldm/wan/model_animate.py @@ -451,7 +451,7 @@ class AnimateWanModel(WanModel): def after_patch_embedding(self, x, pose_latents, face_pixel_values): if pose_latents is not None: pose_latents = self.pose_patch_embedding(pose_latents) - x[:, :, 1:] += pose_latents + x[:, :, 1:pose_latents.shape[2] + 1] += pose_latents[:, :, :x.shape[2] - 1] if face_pixel_values is None: return x, None diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 4187a5619..3e5fef535 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -1128,18 +1128,22 @@ class WanAnimateToVideo(io.ComfyNode): io.Image.Input("pose_video", optional=True), io.Int.Input("continue_motion_max_frames", default=5, min=1, max=nodes.MAX_RESOLUTION, step=4), io.Image.Input("continue_motion", optional=True), + io.Int.Input("video_frame_offset", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1, tooltip="The amount of frames to seek in all the input videos. Used for generating longer videos by chunk. Connect to the video_frame_offset output of the previous node for extending a video."), ], outputs=[ io.Conditioning.Output(display_name="positive"), io.Conditioning.Output(display_name="negative"), io.Latent.Output(display_name="latent"), io.Int.Output(display_name="trim_latent"), + io.Int.Output(display_name="trim_image"), + io.Int.Output(display_name="video_frame_offset"), ], is_experimental=True, ) @classmethod - def execute(cls, positive, negative, vae, width, height, length, batch_size, continue_motion_max_frames, reference_image=None, clip_vision_output=None, face_video=None, pose_video=None, continue_motion=None) -> io.NodeOutput: + def execute(cls, positive, negative, vae, width, height, length, batch_size, continue_motion_max_frames, video_frame_offset, reference_image=None, clip_vision_output=None, face_video=None, pose_video=None, continue_motion=None) -> io.NodeOutput: + trim_to_pose_video = False latent_length = ((length - 1) // 4) + 1 latent_width = width // 8 latent_height = height // 8 @@ -1152,35 +1156,60 @@ class WanAnimateToVideo(io.ComfyNode): concat_latent_image = vae.encode(image[:, :, :, :3]) mask = torch.zeros((1, 1, concat_latent_image.shape[2], concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=concat_latent_image.device, dtype=concat_latent_image.dtype) trim_latent += concat_latent_image.shape[2] + ref_motion_latent_length = 0 + + if continue_motion is None: + image = torch.ones((length, height, width, 3)) * 0.5 + else: + continue_motion = continue_motion[-continue_motion_max_frames:] + video_frame_offset -= continue_motion.shape[0] + video_frame_offset = max(0, video_frame_offset) + continue_motion = comfy.utils.common_upscale(continue_motion[-length:].movedim(-1, 1), width, height, "area", "center").movedim(1, -1) + image = torch.ones((length, height, width, continue_motion.shape[-1]), device=continue_motion.device, dtype=continue_motion.dtype) * 0.5 + image[:continue_motion.shape[0]] = continue_motion + ref_motion_latent_length += ((continue_motion.shape[0] - 1) // 4) + 1 if clip_vision_output is not None: positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output}) negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output}) + if pose_video is not None: + if pose_video.shape[0] <= video_frame_offset: + pose_video = None + else: + pose_video = pose_video[video_frame_offset:] + + if pose_video is not None: + pose_video = comfy.utils.common_upscale(pose_video[:length].movedim(-1, 1), width, height, "area", "center").movedim(1, -1) + if not trim_to_pose_video: + if pose_video.shape[0] < length: + pose_video = torch.cat((pose_video,) + (pose_video[-1:],) * (length - pose_video.shape[0]), dim=0) + + pose_video_latent = vae.encode(pose_video[:, :, :, :3]) + positive = node_helpers.conditioning_set_values(positive, {"pose_video_latent": pose_video_latent}) + negative = node_helpers.conditioning_set_values(negative, {"pose_video_latent": pose_video_latent}) + + if trim_to_pose_video: + latent_length = pose_video_latent.shape[2] + length = latent_length * 4 - 3 + image = image[:length] + + if face_video is not None: + if face_video.shape[0] <= video_frame_offset: + face_video = None + else: + face_video = face_video[video_frame_offset:] + if face_video is not None: face_video = comfy.utils.common_upscale(face_video[:length].movedim(-1, 1), 512, 512, "area", "center") * 2.0 - 1.0 face_video = face_video.movedim(0, 1).unsqueeze(0) positive = node_helpers.conditioning_set_values(positive, {"face_video_pixels": face_video}) negative = node_helpers.conditioning_set_values(negative, {"face_video_pixels": face_video * 0.0 - 1.0}) - if pose_video is not None: - pose_video = comfy.utils.common_upscale(pose_video[:length].movedim(-1, 1), width, height, "area", "center").movedim(1, -1) - pose_video_latent = vae.encode(pose_video[:, :, :, :3]) - positive = node_helpers.conditioning_set_values(positive, {"pose_video_latent": pose_video_latent}) - negative = node_helpers.conditioning_set_values(negative, {"pose_video_latent": pose_video_latent}) - - if continue_motion is None: - image = torch.ones((length, height, width, 3)) * 0.5 - else: - continue_motion = continue_motion[-continue_motion_max_frames:] - continue_motion = comfy.utils.common_upscale(continue_motion[-length:].movedim(-1, 1), width, height, "area", "center").movedim(1, -1) - image = torch.ones((length, height, width, continue_motion.shape[-1]), device=continue_motion.device, dtype=continue_motion.dtype) * 0.5 - image[:continue_motion.shape[0]] = continue_motion - concat_latent_image = torch.cat((concat_latent_image, vae.encode(image[:, :, :, :3])), dim=2) mask_refmotion = torch.ones((1, 1, latent_length, concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=mask.device, dtype=mask.dtype) if continue_motion is not None: - mask_refmotion[:, :, :((continue_motion.shape[0] - 1) // 4) + 1] = 0.0 + mask_refmotion[:, :, :ref_motion_latent_length] = 0.0 mask = torch.cat((mask, mask_refmotion), dim=2) positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) @@ -1189,7 +1218,7 @@ class WanAnimateToVideo(io.ComfyNode): latent = torch.zeros([batch_size, 16, latent_length + trim_latent, latent_height, latent_width], device=comfy.model_management.intermediate_device()) out_latent = {} out_latent["samples"] = latent - return io.NodeOutput(positive, negative, out_latent, trim_latent) + return io.NodeOutput(positive, negative, out_latent, trim_latent, max(0, ref_motion_latent_length * 4 - 3), video_frame_offset + length) class Wan22ImageToVideoLatent(io.ComfyNode): @classmethod From 66241cef31f21247ec8b450d699250fd83b3ff7c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 19 Sep 2025 23:24:10 -0700 Subject: [PATCH 0608/1073] Add inputs for character replacement to the WanAnimateToVideo node. (#9960) --- comfy_extras/nodes_wan.py | 40 +++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 3e5fef535..9cca6fb2e 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -1127,6 +1127,8 @@ class WanAnimateToVideo(io.ComfyNode): io.Image.Input("face_video", optional=True), io.Image.Input("pose_video", optional=True), io.Int.Input("continue_motion_max_frames", default=5, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Image.Input("background_video", optional=True), + io.Mask.Input("character_mask", optional=True), io.Image.Input("continue_motion", optional=True), io.Int.Input("video_frame_offset", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1, tooltip="The amount of frames to seek in all the input videos. Used for generating longer videos by chunk. Connect to the video_frame_offset output of the previous node for extending a video."), ], @@ -1142,7 +1144,7 @@ class WanAnimateToVideo(io.ComfyNode): ) @classmethod - def execute(cls, positive, negative, vae, width, height, length, batch_size, continue_motion_max_frames, video_frame_offset, reference_image=None, clip_vision_output=None, face_video=None, pose_video=None, continue_motion=None) -> io.NodeOutput: + def execute(cls, positive, negative, vae, width, height, length, batch_size, continue_motion_max_frames, video_frame_offset, reference_image=None, clip_vision_output=None, face_video=None, pose_video=None, continue_motion=None, background_video=None, character_mask=None) -> io.NodeOutput: trim_to_pose_video = False latent_length = ((length - 1) // 4) + 1 latent_width = width // 8 @@ -1154,7 +1156,7 @@ class WanAnimateToVideo(io.ComfyNode): image = comfy.utils.common_upscale(reference_image[:length].movedim(-1, 1), width, height, "area", "center").movedim(1, -1) concat_latent_image = vae.encode(image[:, :, :, :3]) - mask = torch.zeros((1, 1, concat_latent_image.shape[2], concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=concat_latent_image.device, dtype=concat_latent_image.dtype) + mask = torch.zeros((1, 4, concat_latent_image.shape[-3], concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=concat_latent_image.device, dtype=concat_latent_image.dtype) trim_latent += concat_latent_image.shape[2] ref_motion_latent_length = 0 @@ -1206,11 +1208,37 @@ class WanAnimateToVideo(io.ComfyNode): positive = node_helpers.conditioning_set_values(positive, {"face_video_pixels": face_video}) negative = node_helpers.conditioning_set_values(negative, {"face_video_pixels": face_video * 0.0 - 1.0}) - concat_latent_image = torch.cat((concat_latent_image, vae.encode(image[:, :, :, :3])), dim=2) - mask_refmotion = torch.ones((1, 1, latent_length, concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=mask.device, dtype=mask.dtype) - if continue_motion is not None: - mask_refmotion[:, :, :ref_motion_latent_length] = 0.0 + ref_images_num = max(0, ref_motion_latent_length * 4 - 3) + if background_video is not None: + if background_video.shape[0] > video_frame_offset: + background_video = background_video[video_frame_offset:] + background_video = comfy.utils.common_upscale(background_video[:length].movedim(-1, 1), width, height, "area", "center").movedim(1, -1) + if background_video.shape[0] > ref_images_num: + image[ref_images_num:background_video.shape[0] - ref_images_num] = background_video[ref_images_num:] + mask_refmotion = torch.ones((1, 1, latent_length * 4, concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=mask.device, dtype=mask.dtype) + if continue_motion is not None: + mask_refmotion[:, :, :ref_motion_latent_length * 4] = 0.0 + + if character_mask is not None: + if character_mask.shape[0] > video_frame_offset or character_mask.shape[0] == 1: + if character_mask.shape[0] == 1: + character_mask = character_mask.repeat((length,) + (1,) * (character_mask.ndim - 1)) + else: + character_mask = character_mask[video_frame_offset:] + if character_mask.ndim == 3: + character_mask = character_mask.unsqueeze(1) + character_mask = character_mask.movedim(0, 1) + if character_mask.ndim == 4: + character_mask = character_mask.unsqueeze(1) + character_mask = comfy.utils.common_upscale(character_mask[:, :, :length], concat_latent_image.shape[-1], concat_latent_image.shape[-2], "nearest-exact", "center") + if character_mask.shape[2] > ref_images_num: + mask_refmotion[:, :, ref_images_num:character_mask.shape[2] + ref_images_num] = character_mask[:, :, ref_images_num:] + + concat_latent_image = torch.cat((concat_latent_image, vae.encode(image[:, :, :, :3])), dim=2) + + + mask_refmotion = mask_refmotion.view(1, mask_refmotion.shape[2] // 4, 4, mask_refmotion.shape[3], mask_refmotion.shape[4]).transpose(1, 2) mask = torch.cat((mask, mask_refmotion), dim=2) positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) From 9ed3c5cc09c55d2fffa67b59d9d21e3b44d7653e Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Sat, 20 Sep 2025 18:10:39 -0700 Subject: [PATCH 0609/1073] [Reviving #5709] Add strength input to Differential Diffusion (#9957) * Update nodes_differential_diffusion.py * Update nodes_differential_diffusion.py * Make strength optional to avoid validation errors when loading old workflows, adjust step --------- Co-authored-by: ThereforeGames --- comfy_extras/nodes_differential_diffusion.py | 33 +++++++++++++++----- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/comfy_extras/nodes_differential_diffusion.py b/comfy_extras/nodes_differential_diffusion.py index 98dbbf102..255ac420d 100644 --- a/comfy_extras/nodes_differential_diffusion.py +++ b/comfy_extras/nodes_differential_diffusion.py @@ -5,19 +5,30 @@ import torch class DifferentialDiffusion(): @classmethod def INPUT_TYPES(s): - return {"required": {"model": ("MODEL", ), - }} + return { + "required": { + "model": ("MODEL", ), + }, + "optional": { + "strength": ("FLOAT", { + "default": 1.0, + "min": 0.0, + "max": 1.0, + "step": 0.01, + }), + } + } RETURN_TYPES = ("MODEL",) FUNCTION = "apply" CATEGORY = "_for_testing" INIT = False - def apply(self, model): + def apply(self, model, strength=1.0): model = model.clone() - model.set_model_denoise_mask_function(self.forward) - return (model,) + model.set_model_denoise_mask_function(lambda *args, **kwargs: self.forward(*args, **kwargs, strength=strength)) + return (model, ) - def forward(self, sigma: torch.Tensor, denoise_mask: torch.Tensor, extra_options: dict): + def forward(self, sigma: torch.Tensor, denoise_mask: torch.Tensor, extra_options: dict, strength: float): model = extra_options["model"] step_sigmas = extra_options["sigmas"] sigma_to = model.inner_model.model_sampling.sigma_min @@ -31,7 +42,15 @@ class DifferentialDiffusion(): threshold = (current_ts - ts_to) / (ts_from - ts_to) - return (denoise_mask >= threshold).to(denoise_mask.dtype) + # Generate the binary mask based on the threshold + binary_mask = (denoise_mask >= threshold).to(denoise_mask.dtype) + + # Blend binary mask with the original denoise_mask using strength + if strength and strength < 1: + blended_mask = strength * binary_mask + (1 - strength) * denoise_mask + return blended_mask + else: + return binary_mask NODE_CLASS_MAPPINGS = { From 7be2b49b6b3430783555bc6bc8fcb3f46d5392e7 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sun, 21 Sep 2025 09:24:48 +0800 Subject: [PATCH 0610/1073] Fix LoRA Trainer bugs with FP8 models. (#9854) * Fix adapter weight init * Fix fp8 model training * Avoid inference tensor --- comfy/ops.py | 13 +++++++------ comfy/weight_adapter/loha.py | 8 ++++---- comfy/weight_adapter/lokr.py | 4 ++-- comfy/weight_adapter/lora.py | 4 ++-- comfy/weight_adapter/oft.py | 2 +- comfy_extras/nodes_train.py | 18 ++++++++++++++++++ 6 files changed, 34 insertions(+), 15 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 55e958adb..9d7dedd37 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -365,12 +365,13 @@ class fp8_ops(manual_cast): return None def forward_comfy_cast_weights(self, input): - try: - out = fp8_linear(self, input) - if out is not None: - return out - except Exception as e: - logging.info("Exception during fp8 op: {}".format(e)) + if not self.training: + try: + out = fp8_linear(self, input) + if out is not None: + return out + except Exception as e: + logging.info("Exception during fp8 op: {}".format(e)) weight, bias = cast_bias_weight(self, input) return torch.nn.functional.linear(input, weight, bias) diff --git a/comfy/weight_adapter/loha.py b/comfy/weight_adapter/loha.py index 55c97a3af..0abb2d403 100644 --- a/comfy/weight_adapter/loha.py +++ b/comfy/weight_adapter/loha.py @@ -130,12 +130,12 @@ class LoHaAdapter(WeightAdapterBase): def create_train(cls, weight, rank=1, alpha=1.0): out_dim = weight.shape[0] in_dim = weight.shape[1:].numel() - mat1 = torch.empty(out_dim, rank, device=weight.device, dtype=weight.dtype) - mat2 = torch.empty(rank, in_dim, device=weight.device, dtype=weight.dtype) + mat1 = torch.empty(out_dim, rank, device=weight.device, dtype=torch.float32) + mat2 = torch.empty(rank, in_dim, device=weight.device, dtype=torch.float32) torch.nn.init.normal_(mat1, 0.1) torch.nn.init.constant_(mat2, 0.0) - mat3 = torch.empty(out_dim, rank, device=weight.device, dtype=weight.dtype) - mat4 = torch.empty(rank, in_dim, device=weight.device, dtype=weight.dtype) + mat3 = torch.empty(out_dim, rank, device=weight.device, dtype=torch.float32) + mat4 = torch.empty(rank, in_dim, device=weight.device, dtype=torch.float32) torch.nn.init.normal_(mat3, 0.1) torch.nn.init.normal_(mat4, 0.01) return LohaDiff( diff --git a/comfy/weight_adapter/lokr.py b/comfy/weight_adapter/lokr.py index 563c835f5..9b2aff2d7 100644 --- a/comfy/weight_adapter/lokr.py +++ b/comfy/weight_adapter/lokr.py @@ -89,8 +89,8 @@ class LoKrAdapter(WeightAdapterBase): in_dim = weight.shape[1:].numel() out1, out2 = factorization(out_dim, rank) in1, in2 = factorization(in_dim, rank) - mat1 = torch.empty(out1, in1, device=weight.device, dtype=weight.dtype) - mat2 = torch.empty(out2, in2, device=weight.device, dtype=weight.dtype) + mat1 = torch.empty(out1, in1, device=weight.device, dtype=torch.float32) + mat2 = torch.empty(out2, in2, device=weight.device, dtype=torch.float32) torch.nn.init.kaiming_uniform_(mat2, a=5**0.5) torch.nn.init.constant_(mat1, 0.0) return LokrDiff( diff --git a/comfy/weight_adapter/lora.py b/comfy/weight_adapter/lora.py index 47aa17d13..4db004e50 100644 --- a/comfy/weight_adapter/lora.py +++ b/comfy/weight_adapter/lora.py @@ -66,8 +66,8 @@ class LoRAAdapter(WeightAdapterBase): def create_train(cls, weight, rank=1, alpha=1.0): out_dim = weight.shape[0] in_dim = weight.shape[1:].numel() - mat1 = torch.empty(out_dim, rank, device=weight.device, dtype=weight.dtype) - mat2 = torch.empty(rank, in_dim, device=weight.device, dtype=weight.dtype) + mat1 = torch.empty(out_dim, rank, device=weight.device, dtype=torch.float32) + mat2 = torch.empty(rank, in_dim, device=weight.device, dtype=torch.float32) torch.nn.init.kaiming_uniform_(mat1, a=5**0.5) torch.nn.init.constant_(mat2, 0.0) return LoraDiff( diff --git a/comfy/weight_adapter/oft.py b/comfy/weight_adapter/oft.py index 9d4982083..c0aab9635 100644 --- a/comfy/weight_adapter/oft.py +++ b/comfy/weight_adapter/oft.py @@ -68,7 +68,7 @@ class OFTAdapter(WeightAdapterBase): def create_train(cls, weight, rank=1, alpha=1.0): out_dim = weight.shape[0] block_size, block_num = factorization(out_dim, rank) - block = torch.zeros(block_num, block_size, block_size, device=weight.device, dtype=weight.dtype) + block = torch.zeros(block_num, block_size, block_size, device=weight.device, dtype=torch.float32) return OFTDiff( (block, None, alpha, None) ) diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py index c3aaaee9b..9e6ec6780 100644 --- a/comfy_extras/nodes_train.py +++ b/comfy_extras/nodes_train.py @@ -38,6 +38,23 @@ def make_batch_extra_option_dict(d, indicies, full_size=None): return new_dict +def process_cond_list(d, prefix=""): + if hasattr(d, "__iter__") and not hasattr(d, "items"): + for index, item in enumerate(d): + process_cond_list(item, f"{prefix}.{index}") + return d + elif hasattr(d, "items"): + for k, v in list(d.items()): + if isinstance(v, dict): + process_cond_list(v, f"{prefix}.{k}") + elif isinstance(v, torch.Tensor): + d[k] = v.clone() + elif isinstance(v, (list, tuple)): + for index, item in enumerate(v): + process_cond_list(item, f"{prefix}.{k}.{index}") + return d + + class TrainSampler(comfy.samplers.Sampler): def __init__(self, loss_fn, optimizer, loss_callback=None, batch_size=1, grad_acc=1, total_steps=1, seed=0, training_dtype=torch.bfloat16): self.loss_fn = loss_fn @@ -50,6 +67,7 @@ class TrainSampler(comfy.samplers.Sampler): self.training_dtype = training_dtype def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): + model_wrap.conds = process_cond_list(model_wrap.conds) cond = model_wrap.conds["positive"] dataset_size = sigmas.size(0) torch.cuda.empty_cache() From d1d9eb94b1096c9b3f963bf152bd6b9cd330c3a4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 20 Sep 2025 19:09:35 -0700 Subject: [PATCH 0611/1073] Lower wan memory estimation value a bit. (#9964) Previous pr reduced the peak memory requirement. --- comfy/supported_models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 1fbb6aef4..4064bdae1 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -995,7 +995,7 @@ class WAN21_T2V(supported_models_base.BASE): unet_extra_config = {} latent_format = latent_formats.Wan21 - memory_usage_factor = 1.0 + memory_usage_factor = 0.9 supported_inference_dtypes = [torch.float16, torch.bfloat16, torch.float32] @@ -1004,7 +1004,7 @@ class WAN21_T2V(supported_models_base.BASE): def __init__(self, unet_config): super().__init__(unet_config) - self.memory_usage_factor = self.unet_config.get("dim", 2000) / 2000 + self.memory_usage_factor = self.unet_config.get("dim", 2000) / 2222 def get_model(self, state_dict, prefix="", device=None): out = model_base.WAN21(self, device=device) From 27bc181c49249f11da2d8a14f84f3bdb58a0615f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 21 Sep 2025 16:48:31 -0700 Subject: [PATCH 0612/1073] Set some wan nodes as no longer experimental. (#9976) --- comfy_extras/nodes_wan.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 9cca6fb2e..b1e9babb5 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -287,7 +287,6 @@ class WanVaceToVideo(io.ComfyNode): return io.Schema( node_id="WanVaceToVideo", category="conditioning/video_models", - is_experimental=True, inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -375,7 +374,6 @@ class TrimVideoLatent(io.ComfyNode): return io.Schema( node_id="TrimVideoLatent", category="latent/video", - is_experimental=True, inputs=[ io.Latent.Input("samples"), io.Int.Input("trim_amount", default=0, min=0, max=99999), @@ -969,7 +967,6 @@ class WanSoundImageToVideo(io.ComfyNode): io.Conditioning.Output(display_name="negative"), io.Latent.Output(display_name="latent"), ], - is_experimental=True, ) @classmethod @@ -1000,7 +997,6 @@ class WanSoundImageToVideoExtend(io.ComfyNode): io.Conditioning.Output(display_name="negative"), io.Latent.Output(display_name="latent"), ], - is_experimental=True, ) @classmethod From 1fee8827cb8160c85d96c375413ac590311525dc Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 22 Sep 2025 13:49:48 -0700 Subject: [PATCH 0613/1073] Support for qwen edit plus model. Use the new TextEncodeQwenImageEditPlus. (#9986) --- comfy/text_encoders/llama.py | 16 +++++++---- comfy_extras/nodes_qwen.py | 55 ++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 6 deletions(-) diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index 5e11956b5..c5a48ba9f 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -400,21 +400,25 @@ class Qwen25_7BVLI(BaseLlama, torch.nn.Module): def forward(self, x, attention_mask=None, embeds=None, num_tokens=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None, embeds_info=[]): grid = None + position_ids = None + offset = 0 for e in embeds_info: if e.get("type") == "image": grid = e.get("extra", None) - position_ids = torch.zeros((3, embeds.shape[1]), device=embeds.device) start = e.get("index") - position_ids[:, :start] = torch.arange(0, start, device=embeds.device) + if position_ids is None: + position_ids = torch.zeros((3, embeds.shape[1]), device=embeds.device) + position_ids[:, :start] = torch.arange(0, start, device=embeds.device) end = e.get("size") + start len_max = int(grid.max()) // 2 start_next = len_max + start - position_ids[:, end:] = torch.arange(start_next, start_next + (embeds.shape[1] - end), device=embeds.device) - position_ids[0, start:end] = start + position_ids[:, end:] = torch.arange(start_next + offset, start_next + (embeds.shape[1] - end) + offset, device=embeds.device) + position_ids[0, start:end] = start + offset max_d = int(grid[0][1]) // 2 - position_ids[1, start:end] = torch.arange(start, start + max_d, device=embeds.device).unsqueeze(1).repeat(1, math.ceil((end - start) / max_d)).flatten(0)[:end - start] + position_ids[1, start:end] = torch.arange(start + offset, start + max_d + offset, device=embeds.device).unsqueeze(1).repeat(1, math.ceil((end - start) / max_d)).flatten(0)[:end - start] max_d = int(grid[0][2]) // 2 - position_ids[2, start:end] = torch.arange(start, start + max_d, device=embeds.device).unsqueeze(0).repeat(math.ceil((end - start) / max_d), 1).flatten(0)[:end - start] + position_ids[2, start:end] = torch.arange(start + offset, start + max_d + offset, device=embeds.device).unsqueeze(0).repeat(math.ceil((end - start) / max_d), 1).flatten(0)[:end - start] + offset += len_max - (end - start) if grid is None: position_ids = None diff --git a/comfy_extras/nodes_qwen.py b/comfy_extras/nodes_qwen.py index fff89556f..49747dc7a 100644 --- a/comfy_extras/nodes_qwen.py +++ b/comfy_extras/nodes_qwen.py @@ -43,6 +43,61 @@ class TextEncodeQwenImageEdit: return (conditioning, ) +class TextEncodeQwenImageEditPlus: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "clip": ("CLIP", ), + "prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}), + }, + "optional": {"vae": ("VAE", ), + "image1": ("IMAGE", ), + "image2": ("IMAGE", ), + "image3": ("IMAGE", ), + }} + + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "encode" + + CATEGORY = "advanced/conditioning" + + def encode(self, clip, prompt, vae=None, image1=None, image2=None, image3=None): + ref_latents = [] + images = [image1, image2, image3] + images_vl = [] + llama_template = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" + image_prompt = "" + + for i, image in enumerate(images): + if image is not None: + samples = image.movedim(-1, 1) + total = int(384 * 384) + + scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) + width = round(samples.shape[3] * scale_by) + height = round(samples.shape[2] * scale_by) + + s = comfy.utils.common_upscale(samples, width, height, "area", "disabled") + images_vl.append(s.movedim(1, -1)) + if vae is not None: + total = int(1024 * 1024) + scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) + width = round(samples.shape[3] * scale_by / 8.0) * 8 + height = round(samples.shape[2] * scale_by / 8.0) * 8 + + s = comfy.utils.common_upscale(samples, width, height, "area", "disabled") + ref_latents.append(vae.encode(s.movedim(1, -1)[:, :, :, :3])) + + image_prompt += "Picture {}: <|vision_start|><|image_pad|><|vision_end|>".format(i + 1) + + tokens = clip.tokenize(image_prompt + prompt, images=images_vl, llama_template=llama_template) + conditioning = clip.encode_from_tokens_scheduled(tokens) + if len(ref_latents) > 0: + conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": ref_latents}, append=True) + return (conditioning, ) + + NODE_CLASS_MAPPINGS = { "TextEncodeQwenImageEdit": TextEncodeQwenImageEdit, + "TextEncodeQwenImageEditPlus": TextEncodeQwenImageEditPlus, } From e3206351b07852f2127a56abd898ee77f7f4c25f Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Mon, 22 Sep 2025 14:12:32 -0700 Subject: [PATCH 0614/1073] add offset param (#9977) --- server.py | 9 ++- tests/execution/test_execution.py | 105 +++++++++++++++++++++++++++++- 2 files changed, 112 insertions(+), 2 deletions(-) diff --git a/server.py b/server.py index 43816a8cd..603677397 100644 --- a/server.py +++ b/server.py @@ -645,7 +645,14 @@ class PromptServer(): max_items = request.rel_url.query.get("max_items", None) if max_items is not None: max_items = int(max_items) - return web.json_response(self.prompt_queue.get_history(max_items=max_items)) + + offset = request.rel_url.query.get("offset", None) + if offset is not None: + offset = int(offset) + else: + offset = -1 + + return web.json_response(self.prompt_queue.get_history(max_items=max_items, offset=offset)) @routes.get("/history/{prompt_id}") async def get_history_prompt_id(request): diff --git a/tests/execution/test_execution.py b/tests/execution/test_execution.py index 8ea05fdd8..ef73ad9fd 100644 --- a/tests/execution/test_execution.py +++ b/tests/execution/test_execution.py @@ -84,6 +84,21 @@ class ComfyClient: with urllib.request.urlopen("http://{}/history/{}".format(self.server_address, prompt_id)) as response: return json.loads(response.read()) + def get_all_history(self, max_items=None, offset=None): + url = "http://{}/history".format(self.server_address) + params = {} + if max_items is not None: + params["max_items"] = max_items + if offset is not None: + params["offset"] = offset + + if params: + url_values = urllib.parse.urlencode(params) + url = "{}?{}".format(url, url_values) + + with urllib.request.urlopen(url) as response: + return json.loads(response.read()) + def set_test_name(self, name): self.test_name = name @@ -498,7 +513,6 @@ class TestExecution: assert len(images1) == 1, "Should have 1 image" assert len(images2) == 1, "Should have 1 image" - # This tests that only constant outputs are used in the call to `IS_CHANGED` def test_is_changed_with_outputs(self, client: ComfyClient, builder: GraphBuilder): g = builder @@ -762,3 +776,92 @@ class TestExecution: except urllib.error.HTTPError: pass # Expected behavior + def _create_history_item(self, client, builder): + g = GraphBuilder(prefix="offset_test") + input_node = g.node( + "StubImage", content="BLACK", height=32, width=32, batch_size=1 + ) + g.node("SaveImage", images=input_node.out(0)) + return client.run(g) + + def test_offset_returns_different_items_than_beginning_of_history( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test that offset skips items at the beginning""" + for _ in range(5): + self._create_history_item(client, builder) + + first_two = client.get_all_history(max_items=2, offset=0) + next_two = client.get_all_history(max_items=2, offset=2) + + assert set(first_two.keys()).isdisjoint( + set(next_two.keys()) + ), "Offset should skip initial items" + + def test_offset_beyond_history_length_returns_empty( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test offset larger than total history returns empty result""" + self._create_history_item(client, builder) + + result = client.get_all_history(offset=100) + assert len(result) == 0, "Large offset should return no items" + + def test_offset_at_exact_history_length_returns_empty( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test offset equal to history length returns empty""" + for _ in range(3): + self._create_history_item(client, builder) + + all_history = client.get_all_history() + result = client.get_all_history(offset=len(all_history)) + assert len(result) == 0, "Offset at history length should return empty" + + def test_offset_zero_equals_no_offset_parameter( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test offset=0 behaves same as omitting offset""" + self._create_history_item(client, builder) + + with_zero = client.get_all_history(offset=0) + without_offset = client.get_all_history() + + assert with_zero == without_offset, "offset=0 should equal no offset" + + def test_offset_without_max_items_skips_from_beginning( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test offset alone (no max_items) returns remaining items""" + for _ in range(4): + self._create_history_item(client, builder) + + all_items = client.get_all_history() + offset_items = client.get_all_history(offset=2) + + assert ( + len(offset_items) == len(all_items) - 2 + ), "Offset should skip specified number of items" + + def test_offset_with_max_items_returns_correct_window( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test offset + max_items returns correct slice of history""" + for _ in range(6): + self._create_history_item(client, builder) + + window = client.get_all_history(max_items=2, offset=1) + assert len(window) <= 2, "Should respect max_items limit" + + def test_offset_near_end_returns_remaining_items_only( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test offset near end of history returns only remaining items""" + for _ in range(3): + self._create_history_item(client, builder) + + all_history = client.get_all_history() + # Offset to near the end + result = client.get_all_history(max_items=5, offset=len(all_history) - 1) + + assert len(result) <= 1, "Should return at most 1 item when offset is near end" From 8a5ac527e60fcd48ec228d309d49ab28ac79def8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 22 Sep 2025 14:26:58 -0700 Subject: [PATCH 0615/1073] Fix bug with WanAnimateToVideo node. (#9988) --- comfy_extras/nodes_wan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index b1e9babb5..6c16a2673 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -1210,7 +1210,7 @@ class WanAnimateToVideo(io.ComfyNode): background_video = background_video[video_frame_offset:] background_video = comfy.utils.common_upscale(background_video[:length].movedim(-1, 1), width, height, "area", "center").movedim(1, -1) if background_video.shape[0] > ref_images_num: - image[ref_images_num:background_video.shape[0] - ref_images_num] = background_video[ref_images_num:] + image[ref_images_num:background_video.shape[0]] = background_video[ref_images_num:] mask_refmotion = torch.ones((1, 1, latent_length * 4, concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=mask.device, dtype=mask.dtype) if continue_motion is not None: From 707b2638ecd82360c0a67e1d86cc4fdeae218d03 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 22 Sep 2025 14:34:33 -0700 Subject: [PATCH 0616/1073] Fix bug with WanAnimateToVideo. (#9990) --- comfy_extras/nodes_wan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_wan.py b/comfy_extras/nodes_wan.py index 6c16a2673..b0bd471bf 100644 --- a/comfy_extras/nodes_wan.py +++ b/comfy_extras/nodes_wan.py @@ -1229,7 +1229,7 @@ class WanAnimateToVideo(io.ComfyNode): character_mask = character_mask.unsqueeze(1) character_mask = comfy.utils.common_upscale(character_mask[:, :, :length], concat_latent_image.shape[-1], concat_latent_image.shape[-2], "nearest-exact", "center") if character_mask.shape[2] > ref_images_num: - mask_refmotion[:, :, ref_images_num:character_mask.shape[2] + ref_images_num] = character_mask[:, :, ref_images_num:] + mask_refmotion[:, :, ref_images_num:character_mask.shape[2]] = character_mask[:, :, ref_images_num:] concat_latent_image = torch.cat((concat_latent_image, vae.encode(image[:, :, :, :3])), dim=2) From 145b0e4f79b5d9e815bb781ba29ccd057bb52dab Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 23 Sep 2025 23:22:35 +0800 Subject: [PATCH 0617/1073] update template to 0.1.86 (#9998) * update template to 0.1.84 * update template to 0.1.85 * Update template to 0.1.86 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 79187efaa..2980bebdd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.26.13 -comfyui-workflow-templates==0.1.81 +comfyui-workflow-templates==0.1.86 comfyui-embedded-docs==0.2.6 torch torchsde From e8087907995497c6971ee64bd5fa02cb49c1eda6 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 23 Sep 2025 18:36:47 +0300 Subject: [PATCH 0618/1073] feat(api-nodes): add wan t2i, t2v, i2v nodes (#9996) --- comfy_api_nodes/nodes_wan.py | 602 +++++++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 603 insertions(+) create mode 100644 comfy_api_nodes/nodes_wan.py diff --git a/comfy_api_nodes/nodes_wan.py b/comfy_api_nodes/nodes_wan.py new file mode 100644 index 000000000..db5bd41c1 --- /dev/null +++ b/comfy_api_nodes/nodes_wan.py @@ -0,0 +1,602 @@ +import re +from typing import Optional, Type, Union +from typing_extensions import override + +import torch +from pydantic import BaseModel, Field +from comfy_api.latest import ComfyExtension, Input, io as comfy_io +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, + EmptyRequest, + R, + T, +) +from comfy_api_nodes.util.validation_utils import get_number_of_images, validate_audio_duration + +from comfy_api_nodes.apinode_utils import ( + download_url_to_image_tensor, + download_url_to_video_output, + tensor_to_base64_string, + audio_to_base64_string, +) + +class Text2ImageInputField(BaseModel): + prompt: str = Field(...) + negative_prompt: Optional[str] = Field(None) + + +class Text2VideoInputField(BaseModel): + prompt: str = Field(...) + negative_prompt: Optional[str] = Field(None) + audio_url: Optional[str] = Field(None) + + +class Image2VideoInputField(BaseModel): + prompt: str = Field(...) + negative_prompt: Optional[str] = Field(None) + img_url: str = Field(...) + audio_url: Optional[str] = Field(None) + + +class Txt2ImageParametersField(BaseModel): + size: str = Field(...) + n: int = Field(1, description="Number of images to generate.") # we support only value=1 + seed: int = Field(..., ge=0, le=2147483647) + prompt_extend: bool = Field(True) + watermark: bool = Field(True) + + +class Text2VideoParametersField(BaseModel): + size: str = Field(...) + seed: int = Field(..., ge=0, le=2147483647) + duration: int = Field(5, ge=5, le=10) + prompt_extend: bool = Field(True) + watermark: bool = Field(True) + audio: bool = Field(False, description="Should be audio generated automatically") + + +class Image2VideoParametersField(BaseModel): + resolution: str = Field(...) + seed: int = Field(..., ge=0, le=2147483647) + duration: int = Field(5, ge=5, le=10) + prompt_extend: bool = Field(True) + watermark: bool = Field(True) + audio: bool = Field(False, description="Should be audio generated automatically") + + +class Text2ImageTaskCreationRequest(BaseModel): + model: str = Field(...) + input: Text2ImageInputField = Field(...) + parameters: Txt2ImageParametersField = Field(...) + + +class Text2VideoTaskCreationRequest(BaseModel): + model: str = Field(...) + input: Text2VideoInputField = Field(...) + parameters: Text2VideoParametersField = Field(...) + + +class Image2VideoTaskCreationRequest(BaseModel): + model: str = Field(...) + input: Image2VideoInputField = Field(...) + parameters: Image2VideoParametersField = Field(...) + + +class TaskCreationOutputField(BaseModel): + task_id: str = Field(...) + task_status: str = Field(...) + + +class TaskCreationResponse(BaseModel): + output: Optional[TaskCreationOutputField] = Field(None) + request_id: str = Field(...) + code: Optional[str] = Field(None, description="The error code of the failed request.") + message: Optional[str] = Field(None, description="Details of the failed request.") + + +class TaskResult(BaseModel): + url: Optional[str] = Field(None) + code: Optional[str] = Field(None) + message: Optional[str] = Field(None) + + +class ImageTaskStatusOutputField(TaskCreationOutputField): + task_id: str = Field(...) + task_status: str = Field(...) + results: Optional[list[TaskResult]] = Field(None) + + +class VideoTaskStatusOutputField(TaskCreationOutputField): + task_id: str = Field(...) + task_status: str = Field(...) + video_url: Optional[str] = Field(None) + code: Optional[str] = Field(None) + message: Optional[str] = Field(None) + + +class ImageTaskStatusResponse(BaseModel): + output: Optional[ImageTaskStatusOutputField] = Field(None) + request_id: str = Field(...) + + +class VideoTaskStatusResponse(BaseModel): + output: Optional[VideoTaskStatusOutputField] = Field(None) + request_id: str = Field(...) + + +RES_IN_PARENS = re.compile(r'\((\d+)\s*[x×]\s*(\d+)\)') + + +async def process_task( + auth_kwargs: dict[str, str], + url: str, + request_model: Type[T], + response_model: Type[R], + payload: Union[Text2ImageTaskCreationRequest, Text2VideoTaskCreationRequest, Image2VideoTaskCreationRequest], + node_id: str, + estimated_duration: int, + poll_interval: int, +) -> Type[R]: + initial_response = await SynchronousOperation( + endpoint=ApiEndpoint( + path=url, + method=HttpMethod.POST, + request_model=request_model, + response_model=TaskCreationResponse, + ), + request=payload, + auth_kwargs=auth_kwargs, + ).execute() + + if not initial_response.output: + raise Exception(f"Unknown error occurred: {initial_response.code} - {initial_response.message}") + + return await PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=response_model, + ), + completed_statuses=["SUCCEEDED"], + failed_statuses=["FAILED", "CANCELED", "UNKNOWN"], + status_extractor=lambda x: x.output.task_status, + estimated_duration=estimated_duration, + poll_interval=poll_interval, + node_id=node_id, + auth_kwargs=auth_kwargs, + ).execute() + + +class WanTextToImageApi(comfy_io.ComfyNode): + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="WanTextToImageApi", + display_name="Wan Text to Image", + category="api node/image/Wan", + description="Generates image based on text prompt.", + inputs=[ + comfy_io.Combo.Input( + "model", + options=["wan2.5-t2i-preview"], + default="wan2.5-t2i-preview", + tooltip="Model to use.", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", + ), + comfy_io.String.Input( + "negative_prompt", + multiline=True, + default="", + tooltip="Negative text prompt to guide what to avoid.", + optional=True, + ), + comfy_io.Int.Input( + "width", + default=1024, + min=768, + max=1440, + step=32, + optional=True, + ), + comfy_io.Int.Input( + "height", + default=1024, + min=768, + max=1440, + step=32, + optional=True, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed to use for generation.", + optional=True, + ), + comfy_io.Boolean.Input( + "prompt_extend", + default=True, + tooltip="Whether to enhance the prompt with AI assistance.", + optional=True, + ), + comfy_io.Boolean.Input( + "watermark", + default=True, + tooltip="Whether to add an \"AI generated\" watermark to the result.", + optional=True, + ), + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + prompt: str, + negative_prompt: str = "", + width: int = 1024, + height: int = 1024, + seed: int = 0, + prompt_extend: bool = True, + watermark: bool = True, + ): + payload = Text2ImageTaskCreationRequest( + model=model, + input=Text2ImageInputField(prompt=prompt, negative_prompt=negative_prompt), + parameters=Txt2ImageParametersField( + size=f"{width}*{height}", + seed=seed, + prompt_extend=prompt_extend, + watermark=watermark, + ), + ) + response = await process_task( + { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + "/proxy/wan/api/v1/services/aigc/text2image/image-synthesis", + request_model=Text2ImageTaskCreationRequest, + response_model=ImageTaskStatusResponse, + payload=payload, + node_id=cls.hidden.unique_id, + estimated_duration=9, + poll_interval=3, + ) + return comfy_io.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url))) + + +class WanTextToVideoApi(comfy_io.ComfyNode): + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="WanTextToVideoApi", + display_name="Wan Text to Video", + category="api node/video/Wan", + description="Generates video based on text prompt.", + inputs=[ + comfy_io.Combo.Input( + "model", + options=["wan2.5-t2v-preview"], + default="wan2.5-t2v-preview", + tooltip="Model to use.", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", + ), + comfy_io.String.Input( + "negative_prompt", + multiline=True, + default="", + tooltip="Negative text prompt to guide what to avoid.", + optional=True, + ), + comfy_io.Combo.Input( + "size", + options=[ + "480p: 1:1 (624x624)", + "480p: 16:9 (832x480)", + "480p: 9:16 (480x832)", + "720p: 1:1 (960x960)", + "720p: 16:9 (1280x720)", + "720p: 9:16 (720x1280)", + "720p: 4:3 (1088x832)", + "720p: 3:4 (832x1088)", + "1080p: 1:1 (1440x1440)", + "1080p: 16:9 (1920x1080)", + "1080p: 9:16 (1080x1920)", + "1080p: 4:3 (1632x1248)", + "1080p: 3:4 (1248x1632)", + ], + default="480p: 1:1 (624x624)", + optional=True, + ), + comfy_io.Int.Input( + "duration", + default=5, + min=5, + max=10, + step=5, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Available durations: 5 and 10 seconds", + optional=True, + ), + comfy_io.Audio.Input( + "audio", + optional=True, + tooltip="Audio must contain a clear, loud voice, without extraneous noise, background music.", + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed to use for generation.", + optional=True, + ), + comfy_io.Boolean.Input( + "generate_audio", + default=False, + optional=True, + tooltip="If there is no audio input, generate audio automatically.", + ), + comfy_io.Boolean.Input( + "prompt_extend", + default=True, + tooltip="Whether to enhance the prompt with AI assistance.", + optional=True, + ), + comfy_io.Boolean.Input( + "watermark", + default=True, + tooltip="Whether to add an \"AI generated\" watermark to the result.", + optional=True, + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + prompt: str, + negative_prompt: str = "", + size: str = "480p: 1:1 (624x624)", + duration: int = 5, + audio: Optional[Input.Audio] = None, + seed: int = 0, + generate_audio: bool = False, + prompt_extend: bool = True, + watermark: bool = True, + ): + width, height = RES_IN_PARENS.search(size).groups() + audio_url = None + if audio is not None: + validate_audio_duration(audio, 3.0, 29.0) + audio_url = "data:audio/mp3;base64," + audio_to_base64_string(audio, "mp3", "libmp3lame") + payload = Text2VideoTaskCreationRequest( + model=model, + input=Text2VideoInputField(prompt=prompt, negative_prompt=negative_prompt, audio_url=audio_url), + parameters=Text2VideoParametersField( + size=f"{width}*{height}", + duration=duration, + seed=seed, + audio=generate_audio, + prompt_extend=prompt_extend, + watermark=watermark, + ), + ) + response = await process_task( + { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + "/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis", + request_model=Text2VideoTaskCreationRequest, + response_model=VideoTaskStatusResponse, + payload=payload, + node_id=cls.hidden.unique_id, + estimated_duration=120 * int(duration / 5), + poll_interval=6, + ) + return comfy_io.NodeOutput(await download_url_to_video_output(response.output.video_url)) + + +class WanImageToVideoApi(comfy_io.ComfyNode): + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="WanImageToVideoApi", + display_name="Wan Image to Video", + category="api node/video/Wan", + description="Generates video based on the first frame and text prompt.", + inputs=[ + comfy_io.Combo.Input( + "model", + options=["wan2.5-i2v-preview"], + default="wan2.5-i2v-preview", + tooltip="Model to use.", + ), + comfy_io.Image.Input( + "image", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", + ), + comfy_io.String.Input( + "negative_prompt", + multiline=True, + default="", + tooltip="Negative text prompt to guide what to avoid.", + optional=True, + ), + comfy_io.Combo.Input( + "resolution", + options=[ + "480P", + "720P", + "1080P", + ], + default="480P", + optional=True, + ), + comfy_io.Int.Input( + "duration", + default=5, + min=5, + max=10, + step=5, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Available durations: 5 and 10 seconds", + optional=True, + ), + comfy_io.Audio.Input( + "audio", + optional=True, + tooltip="Audio must contain a clear, loud voice, without extraneous noise, background music.", + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed to use for generation.", + optional=True, + ), + comfy_io.Boolean.Input( + "generate_audio", + default=False, + optional=True, + tooltip="If there is no audio input, generate audio automatically.", + ), + comfy_io.Boolean.Input( + "prompt_extend", + default=True, + tooltip="Whether to enhance the prompt with AI assistance.", + optional=True, + ), + comfy_io.Boolean.Input( + "watermark", + default=True, + tooltip="Whether to add an \"AI generated\" watermark to the result.", + optional=True, + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + image: torch.Tensor, + prompt: str, + negative_prompt: str = "", + resolution: str = "480P", + duration: int = 5, + audio: Optional[Input.Audio] = None, + seed: int = 0, + generate_audio: bool = False, + prompt_extend: bool = True, + watermark: bool = True, + ): + if get_number_of_images(image) != 1: + raise ValueError("Exactly one input image is required.") + image_url = "data:image/png;base64," + tensor_to_base64_string(image, total_pixels=2000*2000) + audio_url = None + if audio is not None: + validate_audio_duration(audio, 3.0, 29.0) + audio_url = "data:audio/mp3;base64," + audio_to_base64_string(audio, "mp3", "libmp3lame") + payload = Image2VideoTaskCreationRequest( + model=model, + input=Image2VideoInputField( + prompt=prompt, negative_prompt=negative_prompt, img_url=image_url, audio_url=audio_url + ), + parameters=Image2VideoParametersField( + resolution=resolution, + duration=duration, + seed=seed, + audio=generate_audio, + prompt_extend=prompt_extend, + watermark=watermark, + ), + ) + response = await process_task( + { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + "/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis", + request_model=Image2VideoTaskCreationRequest, + response_model=VideoTaskStatusResponse, + payload=payload, + node_id=cls.hidden.unique_id, + estimated_duration=120 * int(duration / 5), + poll_interval=6, + ) + return comfy_io.NodeOutput(await download_url_to_video_output(response.output.video_url)) + + +class WanApiExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + WanTextToImageApi, + WanTextToVideoApi, + WanImageToVideoApi, + ] + + +async def comfy_entrypoint() -> WanApiExtension: + return WanApiExtension() diff --git a/nodes.py b/nodes.py index 5a5fdcb8e..1a6784b68 100644 --- a/nodes.py +++ b/nodes.py @@ -2361,6 +2361,7 @@ async def init_builtin_api_nodes(): "nodes_rodin.py", "nodes_gemini.py", "nodes_vidu.py", + "nodes_wan.py", ] if not await load_custom_node(os.path.join(api_nodes_dir, "canary.py"), module_parent="comfy_api_nodes"): From b8730510db30c8858e1e5d8e126ef19eac395560 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 23 Sep 2025 11:50:33 -0400 Subject: [PATCH 0619/1073] ComfyUI version 0.3.60 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index ee58205f5..d469a8194 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.59" +__version__ = "0.3.60" diff --git a/pyproject.toml b/pyproject.toml index a7fc1a5a6..7340c320b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.59" +version = "0.3.60" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 341b4adefd308cbcf82c07effc255f2770b3b3e2 Mon Sep 17 00:00:00 2001 From: Changrz <51637999+WhiteGiven@users.noreply.github.com> Date: Thu, 25 Sep 2025 02:05:37 +0800 Subject: [PATCH 0620/1073] Rodin3D - add [Rodin3D Gen-2 generate] api-node (#9994) * update Rodin api node * update rodin3d gen2 api node * fix images limited bug --- comfy_api_nodes/apis/rodin_api.py | 3 +- comfy_api_nodes/nodes_rodin.py | 140 ++++++++++++++++++++++++------ 2 files changed, 117 insertions(+), 26 deletions(-) diff --git a/comfy_api_nodes/apis/rodin_api.py b/comfy_api_nodes/apis/rodin_api.py index b0cf171fa..02cf42c29 100644 --- a/comfy_api_nodes/apis/rodin_api.py +++ b/comfy_api_nodes/apis/rodin_api.py @@ -9,8 +9,9 @@ class Rodin3DGenerateRequest(BaseModel): seed: int = Field(..., description="seed_") tier: str = Field(..., description="Tier of generation.") material: str = Field(..., description="The material type.") - quality: str = Field(..., description="The generation quality of the mesh.") + quality_override: int = Field(..., description="The poly count of the mesh.") mesh_mode: str = Field(..., description="It controls the type of faces of generated models.") + TAPose: Optional[bool] = Field(None, description="") class GenerateJobsData(BaseModel): uuids: List[str] = Field(..., description="str LIST") diff --git a/comfy_api_nodes/nodes_rodin.py b/comfy_api_nodes/nodes_rodin.py index c89d087e5..1af393eba 100644 --- a/comfy_api_nodes/nodes_rodin.py +++ b/comfy_api_nodes/nodes_rodin.py @@ -121,10 +121,10 @@ class Rodin3DAPI: else: return "Generating" - async def create_generate_task(self, images=None, seed=1, material="PBR", quality="medium", tier="Regular", mesh_mode="Quad", **kwargs): + async def create_generate_task(self, images=None, seed=1, material="PBR", quality_override=18000, tier="Regular", mesh_mode="Quad", TAPose = False, **kwargs): if images is None: raise Exception("Rodin 3D generate requires at least 1 image.") - if len(images) >= 5: + if len(images) > 5: raise Exception("Rodin 3D generate requires up to 5 image.") path = "/proxy/rodin/api/v2/rodin" @@ -139,8 +139,9 @@ class Rodin3DAPI: seed=seed, tier=tier, material=material, - quality=quality, - mesh_mode=mesh_mode + quality_override=quality_override, + mesh_mode=mesh_mode, + TAPose=TAPose, ), files=[ ( @@ -211,23 +212,36 @@ class Rodin3DAPI: return await operation.execute() def get_quality_mode(self, poly_count): - if poly_count == "200K-Triangle": + polycount = poly_count.split("-") + poly = polycount[1] + count = polycount[0] + if poly == "Triangle": mesh_mode = "Raw" - quality = "medium" + elif poly == "Quad": + mesh_mode = "Quad" else: mesh_mode = "Quad" - if poly_count == "4K-Quad": - quality = "extra-low" - elif poly_count == "8K-Quad": - quality = "low" - elif poly_count == "18K-Quad": - quality = "medium" - elif poly_count == "50K-Quad": - quality = "high" - else: - quality = "medium" - return mesh_mode, quality + if count == "4K": + quality_override = 4000 + elif count == "8K": + quality_override = 8000 + elif count == "18K": + quality_override = 18000 + elif count == "50K": + quality_override = 50000 + elif count == "2K": + quality_override = 2000 + elif count == "20K": + quality_override = 20000 + elif count == "150K": + quality_override = 150000 + elif count == "500K": + quality_override = 500000 + else: + quality_override = 18000 + + return mesh_mode, quality_override async def download_files(self, url_list): save_path = os.path.join(comfy_paths.get_output_directory(), "Rodin3D", datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) @@ -300,9 +314,9 @@ class Rodin3D_Regular(Rodin3DAPI): m_images = [] for i in range(num_images): m_images.append(Images[i]) - mesh_mode, quality = self.get_quality_mode(Polygon_count) + mesh_mode, quality_override = self.get_quality_mode(Polygon_count) task_uuid, subscription_key = await self.create_generate_task(images=m_images, seed=Seed, material=Material_Type, - quality=quality, tier=tier, mesh_mode=mesh_mode, + quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, **kwargs) await self.poll_for_task_status(subscription_key, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs) @@ -346,9 +360,9 @@ class Rodin3D_Detail(Rodin3DAPI): m_images = [] for i in range(num_images): m_images.append(Images[i]) - mesh_mode, quality = self.get_quality_mode(Polygon_count) + mesh_mode, quality_override = self.get_quality_mode(Polygon_count) task_uuid, subscription_key = await self.create_generate_task(images=m_images, seed=Seed, material=Material_Type, - quality=quality, tier=tier, mesh_mode=mesh_mode, + quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, **kwargs) await self.poll_for_task_status(subscription_key, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs) @@ -392,9 +406,9 @@ class Rodin3D_Smooth(Rodin3DAPI): m_images = [] for i in range(num_images): m_images.append(Images[i]) - mesh_mode, quality = self.get_quality_mode(Polygon_count) + mesh_mode, quality_override = self.get_quality_mode(Polygon_count) task_uuid, subscription_key = await self.create_generate_task(images=m_images, seed=Seed, material=Material_Type, - quality=quality, tier=tier, mesh_mode=mesh_mode, + quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, **kwargs) await self.poll_for_task_status(subscription_key, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs) @@ -446,10 +460,10 @@ class Rodin3D_Sketch(Rodin3DAPI): for i in range(num_images): m_images.append(Images[i]) material_type = "PBR" - quality = "medium" + quality_override = 18000 mesh_mode = "Quad" task_uuid, subscription_key = await self.create_generate_task( - images=m_images, seed=Seed, material=material_type, quality=quality, tier=tier, mesh_mode=mesh_mode, **kwargs + images=m_images, seed=Seed, material=material_type, quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, **kwargs ) await self.poll_for_task_status(subscription_key, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs) @@ -457,6 +471,80 @@ class Rodin3D_Sketch(Rodin3DAPI): return (model,) +class Rodin3D_Gen2(Rodin3DAPI): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "Images": + ( + IO.IMAGE, + { + "forceInput":True, + } + ) + }, + "optional": { + "Seed": ( + IO.INT, + { + "default":0, + "min":0, + "max":65535, + "display":"number" + } + ), + "Material_Type": ( + IO.COMBO, + { + "options": ["PBR", "Shaded"], + "default": "PBR" + } + ), + "Polygon_count": ( + IO.COMBO, + { + "options": ["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "2K-Triangle", "20K-Triangle", "150K-Triangle", "500K-Triangle"], + "default": "500K-Triangle" + } + ), + "TAPose": ( + IO.BOOLEAN, + { + "default": False, + } + ) + }, + "hidden": { + "auth_token": "AUTH_TOKEN_COMFY_ORG", + "comfy_api_key": "API_KEY_COMFY_ORG", + }, + } + + async def api_call( + self, + Images, + Seed, + Material_Type, + Polygon_count, + TAPose, + **kwargs + ): + tier = "Gen-2" + num_images = Images.shape[0] + m_images = [] + for i in range(num_images): + m_images.append(Images[i]) + mesh_mode, quality_override = self.get_quality_mode(Polygon_count) + task_uuid, subscription_key = await self.create_generate_task(images=m_images, seed=Seed, material=Material_Type, + quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, TAPose=TAPose, + **kwargs) + await self.poll_for_task_status(subscription_key, **kwargs) + download_list = await self.get_rodin_download_list(task_uuid, **kwargs) + model = await self.download_files(download_list) + + return (model,) + # A dictionary that contains all nodes you want to export with their names # NOTE: names should be globally unique NODE_CLASS_MAPPINGS = { @@ -464,6 +552,7 @@ NODE_CLASS_MAPPINGS = { "Rodin3D_Detail": Rodin3D_Detail, "Rodin3D_Smooth": Rodin3D_Smooth, "Rodin3D_Sketch": Rodin3D_Sketch, + "Rodin3D_Gen2": Rodin3D_Gen2, } # A dictionary that contains the friendly/humanly readable titles for the nodes @@ -472,4 +561,5 @@ NODE_DISPLAY_NAME_MAPPINGS = { "Rodin3D_Detail": "Rodin 3D Generate - Detail Generate", "Rodin3D_Smooth": "Rodin 3D Generate - Smooth Generate", "Rodin3D_Sketch": "Rodin 3D Generate - Sketch Generate", + "Rodin3D_Gen2": "Rodin 3D Generate - Gen-2 Generate", } From fd79d32f38fd24adca5a6e8214f05050f287c9db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Thu, 25 Sep 2025 01:59:29 +0300 Subject: [PATCH 0621/1073] Add new audio nodes (#9908) * Add new audio nodes - TrimAudioDuration - SplitAudioChannels - AudioConcat - AudioMerge - AudioAdjustVolume * Update nodes_audio.py * Add EmptyAudio -node * Change duration to Float (allows sub seconds) --- comfy_extras/nodes_audio.py | 223 ++++++++++++++++++++++++++++++++++++ 1 file changed, 223 insertions(+) diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index 3b23f65d8..51c8b9dd9 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -11,6 +11,7 @@ import json import random import hashlib import node_helpers +import logging from comfy.cli_args import args from comfy.comfy_types import FileLocator @@ -364,6 +365,216 @@ class RecordAudio: return (audio, ) +class TrimAudioDuration: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "audio": ("AUDIO",), + "start_index": ("FLOAT", {"default": 0.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.01, "tooltip": "Start time in seconds, can be negative to count from the end (supports sub-seconds)."}), + "duration": ("FLOAT", {"default": 60.0, "min": 0.0, "step": 0.01, "tooltip": "Duration in seconds"}), + }, + } + + FUNCTION = "trim" + RETURN_TYPES = ("AUDIO",) + CATEGORY = "audio" + DESCRIPTION = "Trim audio tensor into chosen time range." + + def trim(self, audio, start_index, duration): + waveform = audio["waveform"] + sample_rate = audio["sample_rate"] + audio_length = waveform.shape[-1] + + if start_index < 0: + start_frame = audio_length + int(round(start_index * sample_rate)) + else: + start_frame = int(round(start_index * sample_rate)) + start_frame = max(0, min(start_frame, audio_length - 1)) + + end_frame = start_frame + int(round(duration * sample_rate)) + end_frame = max(0, min(end_frame, audio_length)) + + if start_frame >= end_frame: + raise ValueError("AudioTrim: Start time must be less than end time and be within the audio length.") + + return ({"waveform": waveform[..., start_frame:end_frame], "sample_rate": sample_rate},) + + +class SplitAudioChannels: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "audio": ("AUDIO",), + }} + + RETURN_TYPES = ("AUDIO", "AUDIO") + RETURN_NAMES = ("left", "right") + FUNCTION = "separate" + CATEGORY = "audio" + DESCRIPTION = "Separates the audio into left and right channels." + + def separate(self, audio): + waveform = audio["waveform"] + sample_rate = audio["sample_rate"] + + if waveform.shape[1] != 2: + raise ValueError("AudioSplit: Input audio has only one channel.") + + left_channel = waveform[..., 0:1, :] + right_channel = waveform[..., 1:2, :] + + return ({"waveform": left_channel, "sample_rate": sample_rate}, {"waveform": right_channel, "sample_rate": sample_rate}) + + +def match_audio_sample_rates(waveform_1, sample_rate_1, waveform_2, sample_rate_2): + if sample_rate_1 != sample_rate_2: + if sample_rate_1 > sample_rate_2: + waveform_2 = torchaudio.functional.resample(waveform_2, sample_rate_2, sample_rate_1) + output_sample_rate = sample_rate_1 + logging.info(f"Resampling audio2 from {sample_rate_2}Hz to {sample_rate_1}Hz for merging.") + else: + waveform_1 = torchaudio.functional.resample(waveform_1, sample_rate_1, sample_rate_2) + output_sample_rate = sample_rate_2 + logging.info(f"Resampling audio1 from {sample_rate_1}Hz to {sample_rate_2}Hz for merging.") + else: + output_sample_rate = sample_rate_1 + return waveform_1, waveform_2, output_sample_rate + + +class AudioConcat: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "audio1": ("AUDIO",), + "audio2": ("AUDIO",), + "direction": (['after', 'before'], {"default": 'after', "tooltip": "Whether to append audio2 after or before audio1."}), + }} + + RETURN_TYPES = ("AUDIO",) + FUNCTION = "concat" + CATEGORY = "audio" + DESCRIPTION = "Concatenates the audio1 to audio2 in the specified direction." + + def concat(self, audio1, audio2, direction): + waveform_1 = audio1["waveform"] + waveform_2 = audio2["waveform"] + sample_rate_1 = audio1["sample_rate"] + sample_rate_2 = audio2["sample_rate"] + + if waveform_1.shape[1] == 1: + waveform_1 = waveform_1.repeat(1, 2, 1) + logging.info("AudioConcat: Converted mono audio1 to stereo by duplicating the channel.") + if waveform_2.shape[1] == 1: + waveform_2 = waveform_2.repeat(1, 2, 1) + logging.info("AudioConcat: Converted mono audio2 to stereo by duplicating the channel.") + + waveform_1, waveform_2, output_sample_rate = match_audio_sample_rates(waveform_1, sample_rate_1, waveform_2, sample_rate_2) + + if direction == 'after': + concatenated_audio = torch.cat((waveform_1, waveform_2), dim=2) + elif direction == 'before': + concatenated_audio = torch.cat((waveform_2, waveform_1), dim=2) + + return ({"waveform": concatenated_audio, "sample_rate": output_sample_rate},) + + +class AudioMerge: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "audio1": ("AUDIO",), + "audio2": ("AUDIO",), + "merge_method": (["add", "mean", "subtract", "multiply"], {"tooltip": "The method used to combine the audio waveforms."}), + }, + } + + FUNCTION = "merge" + RETURN_TYPES = ("AUDIO",) + CATEGORY = "audio" + DESCRIPTION = "Combine two audio tracks by overlaying their waveforms." + + def merge(self, audio1, audio2, merge_method): + waveform_1 = audio1["waveform"] + waveform_2 = audio2["waveform"] + sample_rate_1 = audio1["sample_rate"] + sample_rate_2 = audio2["sample_rate"] + + waveform_1, waveform_2, output_sample_rate = match_audio_sample_rates(waveform_1, sample_rate_1, waveform_2, sample_rate_2) + + length_1 = waveform_1.shape[-1] + length_2 = waveform_2.shape[-1] + + if length_2 > length_1: + logging.info(f"AudioMerge: Trimming audio2 from {length_2} to {length_1} samples to match audio1 length.") + waveform_2 = waveform_2[..., :length_1] + elif length_2 < length_1: + logging.info(f"AudioMerge: Padding audio2 from {length_2} to {length_1} samples to match audio1 length.") + pad_shape = list(waveform_2.shape) + pad_shape[-1] = length_1 - length_2 + pad_tensor = torch.zeros(pad_shape, dtype=waveform_2.dtype, device=waveform_2.device) + waveform_2 = torch.cat((waveform_2, pad_tensor), dim=-1) + + if merge_method == "add": + waveform = waveform_1 + waveform_2 + elif merge_method == "subtract": + waveform = waveform_1 - waveform_2 + elif merge_method == "multiply": + waveform = waveform_1 * waveform_2 + elif merge_method == "mean": + waveform = (waveform_1 + waveform_2) / 2 + + max_val = waveform.abs().max() + if max_val > 1.0: + waveform = waveform / max_val + + return ({"waveform": waveform, "sample_rate": output_sample_rate},) + + +class AudioAdjustVolume: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "audio": ("AUDIO",), + "volume": ("INT", {"default": 1.0, "min": -100, "max": 100, "tooltip": "Volume adjustment in decibels (dB). 0 = no change, +6 = double, -6 = half, etc"}), + }} + + RETURN_TYPES = ("AUDIO",) + FUNCTION = "adjust_volume" + CATEGORY = "audio" + + def adjust_volume(self, audio, volume): + if volume == 0: + return (audio,) + waveform = audio["waveform"] + sample_rate = audio["sample_rate"] + + gain = 10 ** (volume / 20) + waveform = waveform * gain + + return ({"waveform": waveform, "sample_rate": sample_rate},) + + +class EmptyAudio: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "duration": ("FLOAT", {"default": 60.0, "min": 0.0, "max": 0xffffffffffffffff, "step": 0.01, "tooltip": "Duration of the empty audio clip in seconds"}), + "sample_rate": ("INT", {"default": 44100, "tooltip": "Sample rate of the empty audio clip."}), + "channels": ("INT", {"default": 2, "min": 1, "max": 2, "tooltip": "Number of audio channels (1 for mono, 2 for stereo)."}), + }} + + RETURN_TYPES = ("AUDIO",) + FUNCTION = "create_empty_audio" + CATEGORY = "audio" + + def create_empty_audio(self, duration, sample_rate, channels): + num_samples = int(round(duration * sample_rate)) + waveform = torch.zeros((1, channels, num_samples), dtype=torch.float32) + return ({"waveform": waveform, "sample_rate": sample_rate},) + + NODE_CLASS_MAPPINGS = { "EmptyLatentAudio": EmptyLatentAudio, "VAEEncodeAudio": VAEEncodeAudio, @@ -375,6 +586,12 @@ NODE_CLASS_MAPPINGS = { "PreviewAudio": PreviewAudio, "ConditioningStableAudio": ConditioningStableAudio, "RecordAudio": RecordAudio, + "TrimAudioDuration": TrimAudioDuration, + "SplitAudioChannels": SplitAudioChannels, + "AudioConcat": AudioConcat, + "AudioMerge": AudioMerge, + "AudioAdjustVolume": AudioAdjustVolume, + "EmptyAudio": EmptyAudio, } NODE_DISPLAY_NAME_MAPPINGS = { @@ -387,4 +604,10 @@ NODE_DISPLAY_NAME_MAPPINGS = { "SaveAudioMP3": "Save Audio (MP3)", "SaveAudioOpus": "Save Audio (Opus)", "RecordAudio": "Record Audio", + "TrimAudioDuration": "Trim Audio Duration", + "SplitAudioChannels": "Split Audio Channels", + "AudioConcat": "Audio Concat", + "AudioMerge": "Audio Merge", + "AudioAdjustVolume": "Audio Adjust Volume", + "EmptyAudio": "Empty Audio", } From fccab99ec0fcd13e80fa59bc73bccff31f9450ca Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 24 Sep 2025 17:09:42 -0700 Subject: [PATCH 0622/1073] Fix issue with .view() in HuMo. (#10014) --- comfy/ldm/wan/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 2dac5980c..54616e6eb 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -1355,7 +1355,7 @@ class WanT2VCrossAttentionGather(WanSelfAttention): x = optimized_attention(q, k, v, heads=self.num_heads, skip_reshape=True, skip_output_reshape=True, transformer_options=transformer_options) - x = x.transpose(1, 2).view(b, -1, n, d).flatten(2) + x = x.transpose(1, 2).reshape(b, -1, n * d) x = self.o(x) return x From c8d2117f02bcad6d8316ffd8273bdc27adf83b44 Mon Sep 17 00:00:00 2001 From: Guy Niv <43928922+guyniv@users.noreply.github.com> Date: Thu, 25 Sep 2025 05:35:12 +0300 Subject: [PATCH 0623/1073] Fix memory leak by properly detaching model finalizer (#9979) When unloading models in load_models_gpu(), the model finalizer was not being explicitly detached, leading to a memory leak. This caused linear memory consumption increase over time as models are repeatedly loaded and unloaded. This change prevents orphaned finalizer references from accumulating in memory during model switching operations. --- comfy/model_management.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index d880f1970..c5b817b62 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -645,7 +645,9 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu if loaded_model.model.is_clone(current_loaded_models[i].model): to_unload = [i] + to_unload for i in to_unload: - current_loaded_models.pop(i).model.detach(unpatch_all=False) + model_to_unload = current_loaded_models.pop(i) + model_to_unload.model.detach(unpatch_all=False) + model_to_unload.model_finalizer.detach() total_memory_required = {} for loaded_model in models_to_load: From ce4cb2389c8ce63cf8735f200b8672a2c1be0950 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 25 Sep 2025 14:20:13 -0700 Subject: [PATCH 0624/1073] Make LatentCompositeMasked work with basic video latents. (#10023) --- comfy_extras/nodes_mask.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 2b0f8dd5d..a5e405008 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -12,35 +12,38 @@ from nodes import MAX_RESOLUTION def composite(destination, source, x, y, mask = None, multiplier = 8, resize_source = False): source = source.to(destination.device) if resize_source: - source = torch.nn.functional.interpolate(source, size=(destination.shape[2], destination.shape[3]), mode="bilinear") + source = torch.nn.functional.interpolate(source, size=(destination.shape[-2], destination.shape[-1]), mode="bilinear") source = comfy.utils.repeat_to_batch_size(source, destination.shape[0]) - x = max(-source.shape[3] * multiplier, min(x, destination.shape[3] * multiplier)) - y = max(-source.shape[2] * multiplier, min(y, destination.shape[2] * multiplier)) + x = max(-source.shape[-1] * multiplier, min(x, destination.shape[-1] * multiplier)) + y = max(-source.shape[-2] * multiplier, min(y, destination.shape[-2] * multiplier)) left, top = (x // multiplier, y // multiplier) - right, bottom = (left + source.shape[3], top + source.shape[2],) + right, bottom = (left + source.shape[-1], top + source.shape[-2],) if mask is None: mask = torch.ones_like(source) else: mask = mask.to(destination.device, copy=True) - mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[2], source.shape[3]), mode="bilinear") + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[-2], source.shape[-1]), mode="bilinear") mask = comfy.utils.repeat_to_batch_size(mask, source.shape[0]) # calculate the bounds of the source that will be overlapping the destination # this prevents the source trying to overwrite latent pixels that are out of bounds # of the destination - visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),) + visible_width, visible_height = (destination.shape[-1] - left + min(0, x), destination.shape[-2] - top + min(0, y),) mask = mask[:, :, :visible_height, :visible_width] + if mask.ndim < source.ndim: + mask = mask.unsqueeze(1) + inverse_mask = torch.ones_like(mask) - mask - source_portion = mask * source[:, :, :visible_height, :visible_width] - destination_portion = inverse_mask * destination[:, :, top:bottom, left:right] + source_portion = mask * source[..., :visible_height, :visible_width] + destination_portion = inverse_mask * destination[..., top:bottom, left:right] - destination[:, :, top:bottom, left:right] = source_portion + destination_portion + destination[..., top:bottom, left:right] = source_portion + destination_portion return destination class LatentCompositeMasked: From 2b7f9a8196304badb5fe58e5c734e4b182ad0fdf Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 26 Sep 2025 11:12:43 -0700 Subject: [PATCH 0625/1073] Fix the failing unit test. (#10037) --- .github/workflows/test-unit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-unit.yml b/.github/workflows/test-unit.yml index 78c918031..00caf5b8a 100644 --- a/.github/workflows/test-unit.yml +++ b/.github/workflows/test-unit.yml @@ -10,7 +10,7 @@ jobs: test: strategy: matrix: - os: [ubuntu-latest, windows-latest, macos-latest] + os: [ubuntu-latest, windows-2022, macos-latest] runs-on: ${{ matrix.os }} continue-on-error: true steps: From c4a46e943c12c7f3f6ac72f8fb51caad514ec9b6 Mon Sep 17 00:00:00 2001 From: Yoland Yan <4950057+yoland68@users.noreply.github.com> Date: Fri, 26 Sep 2025 14:08:16 -0700 Subject: [PATCH 0626/1073] Add @kosinkadink as code owner (#10041) Updated CODEOWNERS to include @kosinkadink as a code owner. --- CODEOWNERS | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index c8acd66d5..b7aca9b26 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,25 +1,3 @@ # Admins * @comfyanonymous - -# Note: Github teams syntax cannot be used here as the repo is not owned by Comfy-Org. -# Inlined the team members for now. - -# Maintainers -*.md @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill -/tests/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill -/tests-unit/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill -/notebooks/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill -/script_examples/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill -/.github/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill -/requirements.txt @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill -/pyproject.toml @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill - -# Python web server -/api_server/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne @guill -/app/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne @guill -/utils/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne @guill - -# Node developers -/comfy_extras/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne @guill -/comfy/comfy_types/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne @guill -/comfy_api_nodes/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne @guill +* @kosinkadink From 76eb1d72c3e5bef51d6ca8a26bf996972d3f6d1a Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 00:10:49 +0300 Subject: [PATCH 0627/1073] convert nodes_rebatch.py to V3 schema (#9945) --- comfy_extras/nodes_rebatch.py | 97 ++++++++++++++++++++--------------- 1 file changed, 56 insertions(+), 41 deletions(-) diff --git a/comfy_extras/nodes_rebatch.py b/comfy_extras/nodes_rebatch.py index e29cb9ed1..5f4e82aef 100644 --- a/comfy_extras/nodes_rebatch.py +++ b/comfy_extras/nodes_rebatch.py @@ -1,18 +1,25 @@ +from typing_extensions import override import torch -class LatentRebatch: +from comfy_api.latest import ComfyExtension, io + + +class LatentRebatch(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "latents": ("LATENT",), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - }} - RETURN_TYPES = ("LATENT",) - INPUT_IS_LIST = True - OUTPUT_IS_LIST = (True, ) - - FUNCTION = "rebatch" - - CATEGORY = "latent/batch" + def define_schema(cls): + return io.Schema( + node_id="RebatchLatents", + display_name="Rebatch Latents", + category="latent/batch", + is_input_list=True, + inputs=[ + io.Latent.Input("latents"), + io.Int.Input("batch_size", default=1, min=1, max=4096), + ], + outputs=[ + io.Latent.Output(is_output_list=True), + ], + ) @staticmethod def get_batch(latents, list_ind, offset): @@ -53,7 +60,8 @@ class LatentRebatch: result = [torch.cat((b1, b2)) if torch.is_tensor(b1) else b1 + b2 for b1, b2 in zip(batch1, batch2)] return result - def rebatch(self, latents, batch_size): + @classmethod + def execute(cls, latents, batch_size): batch_size = batch_size[0] output_list = [] @@ -63,24 +71,24 @@ class LatentRebatch: for i in range(len(latents)): # fetch new entry of list #samples, masks, indices = self.get_batch(latents, i) - next_batch = self.get_batch(latents, i, processed) + next_batch = cls.get_batch(latents, i, processed) processed += len(next_batch[2]) # set to current if current is None if current_batch[0] is None: current_batch = next_batch # add previous to list if dimensions do not match elif next_batch[0].shape[-1] != current_batch[0].shape[-1] or next_batch[0].shape[-2] != current_batch[0].shape[-2]: - sliced, _ = self.slice_batch(current_batch, 1, batch_size) + sliced, _ = cls.slice_batch(current_batch, 1, batch_size) output_list.append({'samples': sliced[0][0], 'noise_mask': sliced[1][0], 'batch_index': sliced[2][0]}) current_batch = next_batch # cat if everything checks out else: - current_batch = self.cat_batch(current_batch, next_batch) + current_batch = cls.cat_batch(current_batch, next_batch) # add to list if dimensions gone above target batch size if current_batch[0].shape[0] > batch_size: num = current_batch[0].shape[0] // batch_size - sliced, remainder = self.slice_batch(current_batch, num, batch_size) + sliced, remainder = cls.slice_batch(current_batch, num, batch_size) for i in range(num): output_list.append({'samples': sliced[0][i], 'noise_mask': sliced[1][i], 'batch_index': sliced[2][i]}) @@ -89,7 +97,7 @@ class LatentRebatch: #add remainder if current_batch[0] is not None: - sliced, _ = self.slice_batch(current_batch, 1, batch_size) + sliced, _ = cls.slice_batch(current_batch, 1, batch_size) output_list.append({'samples': sliced[0][0], 'noise_mask': sliced[1][0], 'batch_index': sliced[2][0]}) #get rid of empty masks @@ -97,23 +105,27 @@ class LatentRebatch: if s['noise_mask'].mean() == 1.0: del s['noise_mask'] - return (output_list,) + return io.NodeOutput(output_list) -class ImageRebatch: +class ImageRebatch(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "images": ("IMAGE",), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - }} - RETURN_TYPES = ("IMAGE",) - INPUT_IS_LIST = True - OUTPUT_IS_LIST = (True, ) + def define_schema(cls): + return io.Schema( + node_id="RebatchImages", + display_name="Rebatch Images", + category="image/batch", + is_input_list=True, + inputs=[ + io.Image.Input("images"), + io.Int.Input("batch_size", default=1, min=1, max=4096), + ], + outputs=[ + io.Image.Output(is_output_list=True), + ], + ) - FUNCTION = "rebatch" - - CATEGORY = "image/batch" - - def rebatch(self, images, batch_size): + @classmethod + def execute(cls, images, batch_size): batch_size = batch_size[0] output_list = [] @@ -125,14 +137,17 @@ class ImageRebatch: for i in range(0, len(all_images), batch_size): output_list.append(torch.cat(all_images[i:i+batch_size], dim=0)) - return (output_list,) + return io.NodeOutput(output_list) -NODE_CLASS_MAPPINGS = { - "RebatchLatents": LatentRebatch, - "RebatchImages": ImageRebatch, -} -NODE_DISPLAY_NAME_MAPPINGS = { - "RebatchLatents": "Rebatch Latents", - "RebatchImages": "Rebatch Images", -} +class RebatchExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + LatentRebatch, + ImageRebatch, + ] + + +async def comfy_entrypoint() -> RebatchExtension: + return RebatchExtension() From 7ea173c1873ec22df6edabc80a912a08ae2d521b Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 00:12:04 +0300 Subject: [PATCH 0628/1073] convert nodes_fresca.py to V3 schema (#9951) --- comfy_extras/nodes_fresca.py | 61 +++++++++++++++++++++--------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/comfy_extras/nodes_fresca.py b/comfy_extras/nodes_fresca.py index 65c2d0d0e..f308eb0c1 100644 --- a/comfy_extras/nodes_fresca.py +++ b/comfy_extras/nodes_fresca.py @@ -1,6 +1,8 @@ # Code based on https://github.com/WikiChao/FreSca (MIT License) import torch import torch.fft as fft +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io def Fourier_filter(x, scale_low=1.0, scale_high=1.5, freq_cutoff=20): @@ -51,25 +53,31 @@ def Fourier_filter(x, scale_low=1.0, scale_high=1.5, freq_cutoff=20): return x_filtered -class FreSca: +class FreSca(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "model": ("MODEL",), - "scale_low": ("FLOAT", {"default": 1.0, "min": 0, "max": 10, "step": 0.01, - "tooltip": "Scaling factor for low-frequency components"}), - "scale_high": ("FLOAT", {"default": 1.25, "min": 0, "max": 10, "step": 0.01, - "tooltip": "Scaling factor for high-frequency components"}), - "freq_cutoff": ("INT", {"default": 20, "min": 1, "max": 10000, "step": 1, - "tooltip": "Number of frequency indices around center to consider as low-frequency"}), - } - } - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" - CATEGORY = "_for_testing" - DESCRIPTION = "Applies frequency-dependent scaling to the guidance" - def patch(self, model, scale_low, scale_high, freq_cutoff): + def define_schema(cls): + return io.Schema( + node_id="FreSca", + display_name="FreSca", + category="_for_testing", + description="Applies frequency-dependent scaling to the guidance", + inputs=[ + io.Model.Input("model"), + io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01, + tooltip="Scaling factor for low-frequency components"), + io.Float.Input("scale_high", default=1.25, min=0, max=10, step=0.01, + tooltip="Scaling factor for high-frequency components"), + io.Int.Input("freq_cutoff", default=20, min=1, max=10000, step=1, + tooltip="Number of frequency indices around center to consider as low-frequency"), + ], + outputs=[ + io.Model.Output(), + ], + is_experimental=True, + ) + + @classmethod + def execute(cls, model, scale_low, scale_high, freq_cutoff): def custom_cfg_function(args): conds_out = args["conds_out"] if len(conds_out) <= 1 or None in args["conds"][:2]: @@ -91,13 +99,16 @@ class FreSca: m = model.clone() m.set_model_sampler_pre_cfg_function(custom_cfg_function) - return (m,) + return io.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "FreSca": FreSca, -} +class FreScaExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + FreSca, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "FreSca": "FreSca", -} + +async def comfy_entrypoint() -> FreScaExtension: + return FreScaExtension() From 80718908a9ac1045ece84285ca568511dcc9bc46 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 00:12:38 +0300 Subject: [PATCH 0629/1073] convert nodes_sdupscale.py to V3 schema (#9943) --- comfy_extras/nodes_sdupscale.py | 54 +++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 19 deletions(-) diff --git a/comfy_extras/nodes_sdupscale.py b/comfy_extras/nodes_sdupscale.py index bba67e8dd..31b373370 100644 --- a/comfy_extras/nodes_sdupscale.py +++ b/comfy_extras/nodes_sdupscale.py @@ -1,23 +1,31 @@ +from typing_extensions import override + import torch import comfy.utils +from comfy_api.latest import ComfyExtension, io -class SD_4XUpscale_Conditioning: +class SD_4XUpscale_Conditioning(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "images": ("IMAGE",), - "positive": ("CONDITIONING",), - "negative": ("CONDITIONING",), - "scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), - }} - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") + def define_schema(cls): + return io.Schema( + node_id="SD_4XUpscale_Conditioning", + category="conditioning/upscale_diffusion", + inputs=[ + io.Image.Input("images"), + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Float.Input("scale_ratio", default=4.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("noise_augmentation", default=0.0, min=0.0, max=1.0, step=0.001), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - FUNCTION = "encode" - - CATEGORY = "conditioning/upscale_diffusion" - - def encode(self, images, positive, negative, scale_ratio, noise_augmentation): + @classmethod + def execute(cls, images, positive, negative, scale_ratio, noise_augmentation): width = max(1, round(images.shape[-2] * scale_ratio)) height = max(1, round(images.shape[-3] * scale_ratio)) @@ -39,8 +47,16 @@ class SD_4XUpscale_Conditioning: out_cn.append(n) latent = torch.zeros([images.shape[0], 4, height // 4, width // 4]) - return (out_cp, out_cn, {"samples":latent}) + return io.NodeOutput(out_cp, out_cn, {"samples":latent}) -NODE_CLASS_MAPPINGS = { - "SD_4XUpscale_Conditioning": SD_4XUpscale_Conditioning, -} + +class SdUpscaleExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + SD_4XUpscale_Conditioning, + ] + + +async def comfy_entrypoint() -> SdUpscaleExtension: + return SdUpscaleExtension() From a061b06321b4e91d05c7c436b1e9b188360c5377 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 00:13:05 +0300 Subject: [PATCH 0630/1073] convert nodes_tcfg.py to V3 schema (#9942) --- comfy_extras/nodes_tcfg.py | 51 +++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/comfy_extras/nodes_tcfg.py b/comfy_extras/nodes_tcfg.py index 35b89a73f..1a6767770 100644 --- a/comfy_extras/nodes_tcfg.py +++ b/comfy_extras/nodes_tcfg.py @@ -1,8 +1,9 @@ # TCFG: Tangential Damping Classifier-free Guidance - (arXiv: https://arxiv.org/abs/2503.18137) +from typing_extensions import override import torch -from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict +from comfy_api.latest import ComfyExtension, io def score_tangential_damping(cond_score: torch.Tensor, uncond_score: torch.Tensor) -> torch.Tensor: @@ -26,23 +27,24 @@ def score_tangential_damping(cond_score: torch.Tensor, uncond_score: torch.Tenso return uncond_score_td.reshape_as(uncond_score).to(uncond_score.dtype) -class TCFG(ComfyNodeABC): +class TCFG(io.ComfyNode): @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "model": (IO.MODEL, {}), - } - } + def define_schema(cls): + return io.Schema( + node_id="TCFG", + display_name="Tangential Damping CFG", + category="advanced/guidance", + description="TCFG – Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality.", + inputs=[ + io.Model.Input("model"), + ], + outputs=[ + io.Model.Output(display_name="patched_model"), + ], + ) - RETURN_TYPES = (IO.MODEL,) - RETURN_NAMES = ("patched_model",) - FUNCTION = "patch" - - CATEGORY = "advanced/guidance" - DESCRIPTION = "TCFG – Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality." - - def patch(self, model): + @classmethod + def execute(cls, model): m = model.clone() def tangential_damping_cfg(args): @@ -59,13 +61,16 @@ class TCFG(ComfyNodeABC): return [cond_pred, uncond_pred_td] + conds_out[2:] m.set_model_sampler_pre_cfg_function(tangential_damping_cfg) - return (m,) + return io.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "TCFG": TCFG, -} +class TcfgExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + TCFG, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "TCFG": "Tangential Damping CFG", -} + +async def comfy_entrypoint() -> TcfgExtension: + return TcfgExtension() From d20576e6a3527d0763ba8d7a72c70ee66829690a Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 00:13:52 +0300 Subject: [PATCH 0631/1073] convert nodes_sag.py to V3 schema (#9940) --- comfy_extras/nodes_sag.py | 50 +++++++++++++++++++++++++-------------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/comfy_extras/nodes_sag.py b/comfy_extras/nodes_sag.py index 1bd8d7364..0f47db30b 100644 --- a/comfy_extras/nodes_sag.py +++ b/comfy_extras/nodes_sag.py @@ -2,10 +2,13 @@ import torch from torch import einsum import torch.nn.functional as F import math +from typing_extensions import override from einops import rearrange, repeat from comfy.ldm.modules.attention import optimized_attention import comfy.samplers +from comfy_api.latest import ComfyExtension, io + # from comfy/ldm/modules/attention.py # but modified to return attention scores as well as output @@ -104,19 +107,26 @@ def gaussian_blur_2d(img, kernel_size, sigma): img = F.conv2d(img, kernel2d, groups=img.shape[-3]) return img -class SelfAttentionGuidance: +class SelfAttentionGuidance(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "scale": ("FLOAT", {"default": 0.5, "min": -2.0, "max": 5.0, "step": 0.01}), - "blur_sigma": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 10.0, "step": 0.1}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls): + return io.Schema( + node_id="SelfAttentionGuidance", + display_name="Self-Attention Guidance", + category="_for_testing", + inputs=[ + io.Model.Input("model"), + io.Float.Input("scale", default=0.5, min=-2.0, max=5.0, step=0.01), + io.Float.Input("blur_sigma", default=2.0, min=0.0, max=10.0, step=0.1), + ], + outputs=[ + io.Model.Output(), + ], + is_experimental=True, + ) - CATEGORY = "_for_testing" - - def patch(self, model, scale, blur_sigma): + @classmethod + def execute(cls, model, scale, blur_sigma): m = model.clone() attn_scores = None @@ -170,12 +180,16 @@ class SelfAttentionGuidance: # unet.mid_block.attentions[0].transformer_blocks[0].attn1.patch m.set_model_attn1_replace(attn_and_record, "middle", 0, 0) - return (m, ) + return io.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "SelfAttentionGuidance": SelfAttentionGuidance, -} -NODE_DISPLAY_NAME_MAPPINGS = { - "SelfAttentionGuidance": "Self-Attention Guidance", -} +class SagExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + SelfAttentionGuidance, + ] + + +async def comfy_entrypoint() -> SagExtension: + return SagExtension() From 2103e393350d297ef77497a1b14a8199d4a1f1b4 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 00:14:42 +0300 Subject: [PATCH 0632/1073] convert nodes_post_processing to V3 schema (#9491) --- comfy_extras/nodes_post_processing.py | 249 ++++++++++++-------------- 1 file changed, 111 insertions(+), 138 deletions(-) diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index ed7a07152..34c388a5a 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -1,3 +1,4 @@ +from typing_extensions import override import numpy as np import torch import torch.nn.functional as F @@ -7,33 +8,27 @@ import math import comfy.utils import comfy.model_management import node_helpers +from comfy_api.latest import ComfyExtension, io -class Blend: - def __init__(self): - pass +class Blend(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="ImageBlend", + category="image/postprocessing", + inputs=[ + io.Image.Input("image1"), + io.Image.Input("image2"), + io.Float.Input("blend_factor", default=0.5, min=0.0, max=1.0, step=0.01), + io.Combo.Input("blend_mode", options=["normal", "multiply", "screen", "overlay", "soft_light", "difference"]), + ], + outputs=[ + io.Image.Output(), + ], + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image1": ("IMAGE",), - "image2": ("IMAGE",), - "blend_factor": ("FLOAT", { - "default": 0.5, - "min": 0.0, - "max": 1.0, - "step": 0.01 - }), - "blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light", "difference"],), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "blend_images" - - CATEGORY = "image/postprocessing" - - def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str): + def execute(cls, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str) -> io.NodeOutput: image1, image2 = node_helpers.image_alpha_fix(image1, image2) image2 = image2.to(image1.device) if image1.shape != image2.shape: @@ -41,12 +36,13 @@ class Blend: image2 = comfy.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center') image2 = image2.permute(0, 2, 3, 1) - blended_image = self.blend_mode(image1, image2, blend_mode) + blended_image = cls.blend_mode(image1, image2, blend_mode) blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor blended_image = torch.clamp(blended_image, 0, 1) - return (blended_image,) + return io.NodeOutput(blended_image) - def blend_mode(self, img1, img2, mode): + @classmethod + def blend_mode(cls, img1, img2, mode): if mode == "normal": return img2 elif mode == "multiply": @@ -56,13 +52,13 @@ class Blend: elif mode == "overlay": return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2)) elif mode == "soft_light": - return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (self.g(img1) - img1)) + return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (cls.g(img1) - img1)) elif mode == "difference": return img1 - img2 - else: - raise ValueError(f"Unsupported blend mode: {mode}") + raise ValueError(f"Unsupported blend mode: {mode}") - def g(self, x): + @classmethod + def g(cls, x): return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x)) def gaussian_kernel(kernel_size: int, sigma: float, device=None): @@ -71,38 +67,26 @@ def gaussian_kernel(kernel_size: int, sigma: float, device=None): g = torch.exp(-(d * d) / (2.0 * sigma * sigma)) return g / g.sum() -class Blur: - def __init__(self): - pass +class Blur(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="ImageBlur", + category="image/postprocessing", + inputs=[ + io.Image.Input("image"), + io.Int.Input("blur_radius", default=1, min=1, max=31, step=1), + io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.1), + ], + outputs=[ + io.Image.Output(), + ], + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "blur_radius": ("INT", { - "default": 1, - "min": 1, - "max": 31, - "step": 1 - }), - "sigma": ("FLOAT", { - "default": 1.0, - "min": 0.1, - "max": 10.0, - "step": 0.1 - }), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "blur" - - CATEGORY = "image/postprocessing" - - def blur(self, image: torch.Tensor, blur_radius: int, sigma: float): + def execute(cls, image: torch.Tensor, blur_radius: int, sigma: float) -> io.NodeOutput: if blur_radius == 0: - return (image,) + return io.NodeOutput(image) image = image.to(comfy.model_management.get_torch_device()) batch_size, height, width, channels = image.shape @@ -115,31 +99,24 @@ class Blur: blurred = F.conv2d(padded_image, kernel, padding=kernel_size // 2, groups=channels)[:,:,blur_radius:-blur_radius, blur_radius:-blur_radius] blurred = blurred.permute(0, 2, 3, 1) - return (blurred.to(comfy.model_management.intermediate_device()),) + return io.NodeOutput(blurred.to(comfy.model_management.intermediate_device())) -class Quantize: - def __init__(self): - pass +class Quantize(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "colors": ("INT", { - "default": 256, - "min": 1, - "max": 256, - "step": 1 - }), - "dither": (["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"],), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "quantize" - - CATEGORY = "image/postprocessing" + def define_schema(cls): + return io.Schema( + node_id="ImageQuantize", + category="image/postprocessing", + inputs=[ + io.Image.Input("image"), + io.Int.Input("colors", default=256, min=1, max=256, step=1), + io.Combo.Input("dither", options=["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"]), + ], + outputs=[ + io.Image.Output(), + ], + ) @staticmethod def bayer(im, pal_im, order): @@ -167,7 +144,8 @@ class Quantize: im = im.quantize(palette=pal_im, dither=Image.Dither.NONE) return im - def quantize(self, image: torch.Tensor, colors: int, dither: str): + @classmethod + def execute(cls, image: torch.Tensor, colors: int, dither: str) -> io.NodeOutput: batch_size, height, width, _ = image.shape result = torch.zeros_like(image) @@ -187,46 +165,29 @@ class Quantize: quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255 result[b] = quantized_array - return (result,) + return io.NodeOutput(result) -class Sharpen: - def __init__(self): - pass +class Sharpen(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="ImageSharpen", + category="image/postprocessing", + inputs=[ + io.Image.Input("image"), + io.Int.Input("sharpen_radius", default=1, min=1, max=31, step=1), + io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.01), + io.Float.Input("alpha", default=1.0, min=0.0, max=5.0, step=0.01), + ], + outputs=[ + io.Image.Output(), + ], + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "sharpen_radius": ("INT", { - "default": 1, - "min": 1, - "max": 31, - "step": 1 - }), - "sigma": ("FLOAT", { - "default": 1.0, - "min": 0.1, - "max": 10.0, - "step": 0.01 - }), - "alpha": ("FLOAT", { - "default": 1.0, - "min": 0.0, - "max": 5.0, - "step": 0.01 - }), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "sharpen" - - CATEGORY = "image/postprocessing" - - def sharpen(self, image: torch.Tensor, sharpen_radius: int, sigma:float, alpha: float): + def execute(cls, image: torch.Tensor, sharpen_radius: int, sigma:float, alpha: float) -> io.NodeOutput: if sharpen_radius == 0: - return (image,) + return io.NodeOutput(image) batch_size, height, width, channels = image.shape image = image.to(comfy.model_management.get_torch_device()) @@ -245,23 +206,29 @@ class Sharpen: result = torch.clamp(sharpened, 0, 1) - return (result.to(comfy.model_management.intermediate_device()),) + return io.NodeOutput(result.to(comfy.model_management.intermediate_device())) -class ImageScaleToTotalPixels: +class ImageScaleToTotalPixels(io.ComfyNode): upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"] crop_methods = ["disabled", "center"] @classmethod - def INPUT_TYPES(s): - return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,), - "megapixels": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 16.0, "step": 0.01}), - }} - RETURN_TYPES = ("IMAGE",) - FUNCTION = "upscale" + def define_schema(cls): + return io.Schema( + node_id="ImageScaleToTotalPixels", + category="image/upscaling", + inputs=[ + io.Image.Input("image"), + io.Combo.Input("upscale_method", options=cls.upscale_methods), + io.Float.Input("megapixels", default=1.0, min=0.01, max=16.0, step=0.01), + ], + outputs=[ + io.Image.Output(), + ], + ) - CATEGORY = "image/upscaling" - - def upscale(self, image, upscale_method, megapixels): + @classmethod + def execute(cls, image, upscale_method, megapixels) -> io.NodeOutput: samples = image.movedim(-1,1) total = int(megapixels * 1024 * 1024) @@ -271,12 +238,18 @@ class ImageScaleToTotalPixels: s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled") s = s.movedim(1,-1) - return (s,) + return io.NodeOutput(s) -NODE_CLASS_MAPPINGS = { - "ImageBlend": Blend, - "ImageBlur": Blur, - "ImageQuantize": Quantize, - "ImageSharpen": Sharpen, - "ImageScaleToTotalPixels": ImageScaleToTotalPixels, -} +class PostProcessingExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + Blend, + Blur, + Quantize, + Sharpen, + ImageScaleToTotalPixels, + ] + +async def comfy_entrypoint() -> PostProcessingExtension: + return PostProcessingExtension() From cd66d72b464fd9d344baa426b50a5f0e5e512f99 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 00:15:44 +0300 Subject: [PATCH 0633/1073] convert CLIPTextEncodeSDXL nodes to V3 schema (#9716) --- comfy_extras/nodes_clip_sdxl.py | 93 +++++++++++++++++++-------------- 1 file changed, 55 insertions(+), 38 deletions(-) diff --git a/comfy_extras/nodes_clip_sdxl.py b/comfy_extras/nodes_clip_sdxl.py index 14269caf3..520ff0e3c 100644 --- a/comfy_extras/nodes_clip_sdxl.py +++ b/comfy_extras/nodes_clip_sdxl.py @@ -1,43 +1,52 @@ -from nodes import MAX_RESOLUTION +from typing_extensions import override -class CLIPTextEncodeSDXLRefiner: +import nodes +from comfy_api.latest import ComfyExtension, io + + +class CLIPTextEncodeSDXLRefiner(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "ascore": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 1000.0, "step": 0.01}), - "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), - "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), - "text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ), - }} - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" + def define_schema(cls): + return io.Schema( + node_id="CLIPTextEncodeSDXLRefiner", + category="advanced/conditioning", + inputs=[ + io.Float.Input("ascore", default=6.0, min=0.0, max=1000.0, step=0.01), + io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION), + io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION), + io.String.Input("text", multiline=True, dynamic_prompts=True), + io.Clip.Input("clip"), + ], + outputs=[io.Conditioning.Output()], + ) - CATEGORY = "advanced/conditioning" - - def encode(self, clip, ascore, width, height, text): + @classmethod + def execute(cls, clip, ascore, width, height, text) -> io.NodeOutput: tokens = clip.tokenize(text) - return (clip.encode_from_tokens_scheduled(tokens, add_dict={"aesthetic_score": ascore, "width": width, "height": height}), ) + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"aesthetic_score": ascore, "width": width, "height": height})) -class CLIPTextEncodeSDXL: +class CLIPTextEncodeSDXL(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "clip": ("CLIP", ), - "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), - "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), - "crop_w": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}), - "crop_h": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}), - "target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), - "target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), - "text_g": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "text_l": ("STRING", {"multiline": True, "dynamicPrompts": True}), - }} - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" + def define_schema(cls): + return io.Schema( + node_id="CLIPTextEncodeSDXL", + category="advanced/conditioning", + inputs=[ + io.Clip.Input("clip"), + io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION), + io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION), + io.Int.Input("crop_w", default=0, min=0, max=nodes.MAX_RESOLUTION), + io.Int.Input("crop_h", default=0, min=0, max=nodes.MAX_RESOLUTION), + io.Int.Input("target_width", default=1024, min=0, max=nodes.MAX_RESOLUTION), + io.Int.Input("target_height", default=1024, min=0, max=nodes.MAX_RESOLUTION), + io.String.Input("text_g", multiline=True, dynamic_prompts=True), + io.String.Input("text_l", multiline=True, dynamic_prompts=True), + ], + outputs=[io.Conditioning.Output()], + ) - CATEGORY = "advanced/conditioning" - - def encode(self, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l): + @classmethod + def execute(cls, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l) -> io.NodeOutput: tokens = clip.tokenize(text_g) tokens["l"] = clip.tokenize(text_l)["l"] if len(tokens["l"]) != len(tokens["g"]): @@ -46,9 +55,17 @@ class CLIPTextEncodeSDXL: tokens["l"] += empty["l"] while len(tokens["l"]) > len(tokens["g"]): tokens["g"] += empty["g"] - return (clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}), ) + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height})) -NODE_CLASS_MAPPINGS = { - "CLIPTextEncodeSDXLRefiner": CLIPTextEncodeSDXLRefiner, - "CLIPTextEncodeSDXL": CLIPTextEncodeSDXL, -} + +class ClipSdxlExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + CLIPTextEncodeSDXLRefiner, + CLIPTextEncodeSDXL, + ] + + +async def comfy_entrypoint() -> ClipSdxlExtension: + return ClipSdxlExtension() From 1e098d61327e1c02c1a47b2626514474aa8e3c7e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 26 Sep 2025 15:34:17 -0700 Subject: [PATCH 0634/1073] Don't add template to qwen2.5vl when template is in prompt. (#10043) Make the hunyuan image refiner template_end 36. --- comfy/text_encoders/hunyuan_image.py | 8 ++++- comfy/text_encoders/qwen_image.py | 46 +++++++++++++++++----------- 2 files changed, 35 insertions(+), 19 deletions(-) diff --git a/comfy/text_encoders/hunyuan_image.py b/comfy/text_encoders/hunyuan_image.py index 699eddc33..ff04726e1 100644 --- a/comfy/text_encoders/hunyuan_image.py +++ b/comfy/text_encoders/hunyuan_image.py @@ -63,7 +63,13 @@ class HunyuanImageTEModel(QwenImageTEModel): self.byt5_small = None def encode_token_weights(self, token_weight_pairs): - cond, p, extra = super().encode_token_weights(token_weight_pairs) + tok_pairs = token_weight_pairs["qwen25_7b"][0] + template_end = -1 + if tok_pairs[0][0] == 27: + if len(tok_pairs) > 36: # refiner prompt uses a fixed 36 template_end + template_end = 36 + + cond, p, extra = super().encode_token_weights(token_weight_pairs, template_end=template_end) if self.byt5_small is not None and "byt5" in token_weight_pairs: out = self.byt5_small.encode_token_weights(token_weight_pairs["byt5"]) extra["conditioning_byt5small"] = out[0] diff --git a/comfy/text_encoders/qwen_image.py b/comfy/text_encoders/qwen_image.py index 6646b1003..40fa67937 100644 --- a/comfy/text_encoders/qwen_image.py +++ b/comfy/text_encoders/qwen_image.py @@ -18,13 +18,22 @@ class QwenImageTokenizer(sd1_clip.SD1Tokenizer): self.llama_template_images = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n" def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, images=[], **kwargs): - if llama_template is None: - if len(images) > 0: - llama_text = self.llama_template_images.format(text) - else: - llama_text = self.llama_template.format(text) + skip_template = False + if text.startswith('<|im_start|>'): + skip_template = True + if text.startswith('<|start_header_id|>'): + skip_template = True + + if skip_template: + llama_text = text else: - llama_text = llama_template.format(text) + if llama_template is None: + if len(images) > 0: + llama_text = self.llama_template_images.format(text) + else: + llama_text = self.llama_template.format(text) + else: + llama_text = llama_template.format(text) tokens = super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, disable_weights=True, **kwargs) key_name = next(iter(tokens)) embed_count = 0 @@ -47,22 +56,23 @@ class QwenImageTEModel(sd1_clip.SD1ClipModel): def __init__(self, device="cpu", dtype=None, model_options={}): super().__init__(device=device, dtype=dtype, name="qwen25_7b", clip_model=Qwen25_7BVLIModel, model_options=model_options) - def encode_token_weights(self, token_weight_pairs): + def encode_token_weights(self, token_weight_pairs, template_end=-1): out, pooled, extra = super().encode_token_weights(token_weight_pairs) tok_pairs = token_weight_pairs["qwen25_7b"][0] count_im_start = 0 - for i, v in enumerate(tok_pairs): - elem = v[0] - if not torch.is_tensor(elem): - if isinstance(elem, numbers.Integral): - if elem == 151644 and count_im_start < 2: - template_end = i - count_im_start += 1 + if template_end == -1: + for i, v in enumerate(tok_pairs): + elem = v[0] + if not torch.is_tensor(elem): + if isinstance(elem, numbers.Integral): + if elem == 151644 and count_im_start < 2: + template_end = i + count_im_start += 1 - if out.shape[1] > (template_end + 3): - if tok_pairs[template_end + 1][0] == 872: - if tok_pairs[template_end + 2][0] == 198: - template_end += 3 + if out.shape[1] > (template_end + 3): + if tok_pairs[template_end + 1][0] == 872: + if tok_pairs[template_end + 2][0] == 198: + template_end += 3 out = out[:, template_end:] From 196954ab8c55bc4ac48113686a57ce250677c7b5 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Fri, 26 Sep 2025 19:55:03 -0700 Subject: [PATCH 0635/1073] Add 'input_cond' and 'input_uncond' to the args dictionary passed into sampler_cfg_function (#10044) --- comfy/samplers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index b3202cec6..c59e296a1 100755 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -360,7 +360,7 @@ def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options): def cfg_function(model, cond_pred, uncond_pred, cond_scale, x, timestep, model_options={}, cond=None, uncond=None): if "sampler_cfg_function" in model_options: args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep, - "cond_denoised": cond_pred, "uncond_denoised": uncond_pred, "model": model, "model_options": model_options} + "cond_denoised": cond_pred, "uncond_denoised": uncond_pred, "model": model, "model_options": model_options, "input_cond": cond, "input_uncond": uncond} cfg_result = x - model_options["sampler_cfg_function"](args) else: cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale @@ -390,7 +390,7 @@ def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_option for fn in model_options.get("sampler_pre_cfg_function", []): args = {"conds":conds, "conds_out": out, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep, "model": model, "model_options": model_options} - out = fn(args) + out = fn(args) return cfg_function(model, out[0], out[1], cond_scale, x, timestep, model_options=model_options, cond=cond, uncond=uncond_) From 0572029fee48741a8cf34a8e4d485898c5ab5dfd Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sat, 27 Sep 2025 12:18:16 +0800 Subject: [PATCH 0636/1073] Update template to 0.1.88 (#10046) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2980bebdd..b3f81e8fa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.26.13 -comfyui-workflow-templates==0.1.86 +comfyui-workflow-templates==0.1.88 comfyui-embedded-docs==0.2.6 torch torchsde From 255572188f79e5c58fa997bf73529021129459a9 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Fri, 26 Sep 2025 21:29:13 -0700 Subject: [PATCH 0637/1073] Add workflow templates version tracking to system_stats (#9089) Adds installed and required workflow templates version information to the /system_stats endpoint, allowing the frontend to detect and notify users when their templates package is outdated. - Add get_installed_templates_version() and get_required_templates_version() methods to FrontendManager - Include templates version info in system_stats response - Add comprehensive unit tests for the new functionality --- app/frontend_management.py | 33 +++++++++ server.py | 4 ++ tests-unit/app_test/frontend_manager_test.py | 71 ++++++++++++++++++++ 3 files changed, 108 insertions(+) diff --git a/app/frontend_management.py b/app/frontend_management.py index 0bee73685..cce0c117d 100644 --- a/app/frontend_management.py +++ b/app/frontend_management.py @@ -42,6 +42,7 @@ def get_installed_frontend_version(): frontend_version_str = version("comfyui-frontend-package") return frontend_version_str + def get_required_frontend_version(): """Get the required frontend version from requirements.txt.""" try: @@ -63,6 +64,7 @@ def get_required_frontend_version(): logging.error(f"Error reading requirements.txt: {e}") return None + def check_frontend_version(): """Check if the frontend version is up to date.""" @@ -203,6 +205,37 @@ class FrontendManager: """Get the required frontend package version.""" return get_required_frontend_version() + @classmethod + def get_installed_templates_version(cls) -> str: + """Get the currently installed workflow templates package version.""" + try: + templates_version_str = version("comfyui-workflow-templates") + return templates_version_str + except Exception: + return None + + @classmethod + def get_required_templates_version(cls) -> str: + """Get the required workflow templates version from requirements.txt.""" + try: + with open(requirements_path, "r", encoding="utf-8") as f: + for line in f: + line = line.strip() + if line.startswith("comfyui-workflow-templates=="): + version_str = line.split("==")[-1] + if not is_valid_version(version_str): + logging.error(f"Invalid templates version format in requirements.txt: {version_str}") + return None + return version_str + logging.error("comfyui-workflow-templates not found in requirements.txt") + return None + except FileNotFoundError: + logging.error("requirements.txt not found. Cannot determine required templates version.") + return None + except Exception as e: + logging.error(f"Error reading requirements.txt: {e}") + return None + @classmethod def default_frontend_path(cls) -> str: try: diff --git a/server.py b/server.py index 603677397..80e9d3fa7 100644 --- a/server.py +++ b/server.py @@ -550,6 +550,8 @@ class PromptServer(): vram_total, torch_vram_total = comfy.model_management.get_total_memory(device, torch_total_too=True) vram_free, torch_vram_free = comfy.model_management.get_free_memory(device, torch_free_too=True) required_frontend_version = FrontendManager.get_required_frontend_version() + installed_templates_version = FrontendManager.get_installed_templates_version() + required_templates_version = FrontendManager.get_required_templates_version() system_stats = { "system": { @@ -558,6 +560,8 @@ class PromptServer(): "ram_free": ram_free, "comfyui_version": __version__, "required_frontend_version": required_frontend_version, + "installed_templates_version": installed_templates_version, + "required_templates_version": required_templates_version, "python_version": sys.version, "pytorch_version": comfy.model_management.torch_version, "embedded_python": os.path.split(os.path.split(sys.executable)[0])[1] == "python_embeded", diff --git a/tests-unit/app_test/frontend_manager_test.py b/tests-unit/app_test/frontend_manager_test.py index ce43ac564..643f04e72 100644 --- a/tests-unit/app_test/frontend_manager_test.py +++ b/tests-unit/app_test/frontend_manager_test.py @@ -205,3 +205,74 @@ numpy""" # Assert assert version is None + + +def test_get_templates_version(): + # Arrange + expected_version = "0.1.41" + mock_requirements_content = """torch +torchsde +comfyui-frontend-package==1.25.0 +comfyui-workflow-templates==0.1.41 +other-package==1.0.0 +numpy""" + + # Act + with patch("builtins.open", mock_open(read_data=mock_requirements_content)): + version = FrontendManager.get_required_templates_version() + + # Assert + assert version == expected_version + + +def test_get_templates_version_not_found(): + # Arrange + mock_requirements_content = """torch +torchsde +comfyui-frontend-package==1.25.0 +other-package==1.0.0 +numpy""" + + # Act + with patch("builtins.open", mock_open(read_data=mock_requirements_content)): + version = FrontendManager.get_required_templates_version() + + # Assert + assert version is None + + +def test_get_templates_version_invalid_semver(): + # Arrange + mock_requirements_content = """torch +torchsde +comfyui-workflow-templates==1.0.0.beta +other-package==1.0.0 +numpy""" + + # Act + with patch("builtins.open", mock_open(read_data=mock_requirements_content)): + version = FrontendManager.get_required_templates_version() + + # Assert + assert version is None + + +def test_get_installed_templates_version(): + # Arrange + expected_version = "0.1.40" + + # Act + with patch("app.frontend_management.version", return_value=expected_version): + version = FrontendManager.get_installed_templates_version() + + # Assert + assert version == expected_version + + +def test_get_installed_templates_version_not_installed(): + # Act + with patch("app.frontend_management.version", side_effect=Exception("Package not found")): + version = FrontendManager.get_installed_templates_version() + + # Assert + assert version is None From a9cf1cd249773632949bec2262f921f64378127f Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 09:13:05 +0300 Subject: [PATCH 0638/1073] convert nodes_hidream.py to V3 schema (#9946) --- comfy_extras/nodes_hidream.py | 88 +++++++++++++++++++++-------------- 1 file changed, 53 insertions(+), 35 deletions(-) diff --git a/comfy_extras/nodes_hidream.py b/comfy_extras/nodes_hidream.py index dfb98597b..eee683ee1 100644 --- a/comfy_extras/nodes_hidream.py +++ b/comfy_extras/nodes_hidream.py @@ -1,55 +1,73 @@ +from typing_extensions import override + import folder_paths import comfy.sd import comfy.model_management +from comfy_api.latest import ComfyExtension, io -class QuadrupleCLIPLoader: +class QuadrupleCLIPLoader(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ), - "clip_name2": (folder_paths.get_filename_list("text_encoders"), ), - "clip_name3": (folder_paths.get_filename_list("text_encoders"), ), - "clip_name4": (folder_paths.get_filename_list("text_encoders"), ) - }} - RETURN_TYPES = ("CLIP",) - FUNCTION = "load_clip" + def define_schema(cls): + return io.Schema( + node_id="QuadrupleCLIPLoader", + category="advanced/loaders", + description="[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct", + inputs=[ + io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")), + io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")), + io.Combo.Input("clip_name3", options=folder_paths.get_filename_list("text_encoders")), + io.Combo.Input("clip_name4", options=folder_paths.get_filename_list("text_encoders")), + ], + outputs=[ + io.Clip.Output(), + ] + ) - CATEGORY = "advanced/loaders" - - DESCRIPTION = "[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct" - - def load_clip(self, clip_name1, clip_name2, clip_name3, clip_name4): + @classmethod + def execute(cls, clip_name1, clip_name2, clip_name3, clip_name4): clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1) clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2) clip_path3 = folder_paths.get_full_path_or_raise("text_encoders", clip_name3) clip_path4 = folder_paths.get_full_path_or_raise("text_encoders", clip_name4) clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2, clip_path3, clip_path4], embedding_directory=folder_paths.get_folder_paths("embeddings")) - return (clip,) + return io.NodeOutput(clip) -class CLIPTextEncodeHiDream: +class CLIPTextEncodeHiDream(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "clip": ("CLIP", ), - "clip_l": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "clip_g": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "t5xxl": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "llama": ("STRING", {"multiline": True, "dynamicPrompts": True}) - }} - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" - - CATEGORY = "advanced/conditioning" - - def encode(self, clip, clip_l, clip_g, t5xxl, llama): + def define_schema(cls): + return io.Schema( + node_id="CLIPTextEncodeHiDream", + category="advanced/conditioning", + inputs=[ + io.Clip.Input("clip"), + io.String.Input("clip_l", multiline=True, dynamic_prompts=True), + io.String.Input("clip_g", multiline=True, dynamic_prompts=True), + io.String.Input("t5xxl", multiline=True, dynamic_prompts=True), + io.String.Input("llama", multiline=True, dynamic_prompts=True), + ], + outputs=[ + io.Conditioning.Output(), + ] + ) + @classmethod + def execute(cls, clip, clip_l, clip_g, t5xxl, llama): tokens = clip.tokenize(clip_g) tokens["l"] = clip.tokenize(clip_l)["l"] tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"] tokens["llama"] = clip.tokenize(llama)["llama"] - return (clip.encode_from_tokens_scheduled(tokens), ) + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens)) -NODE_CLASS_MAPPINGS = { - "QuadrupleCLIPLoader": QuadrupleCLIPLoader, - "CLIPTextEncodeHiDream": CLIPTextEncodeHiDream, -} + +class HiDreamExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + QuadrupleCLIPLoader, + CLIPTextEncodeHiDream, + ] + + +async def comfy_entrypoint() -> HiDreamExtension: + return HiDreamExtension() From 6b4b671ce7b6c412c2db9f9f83ff8e27dbcfd959 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 12:27:01 +0300 Subject: [PATCH 0639/1073] convert nodes_bfl.py to V3 schema (#10033) --- comfy_api_nodes/nodes_bfl.py | 1056 ++++++++++++++++------------------ 1 file changed, 489 insertions(+), 567 deletions(-) diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py index c09be8d5b..77914021d 100644 --- a/comfy_api_nodes/nodes_bfl.py +++ b/comfy_api_nodes/nodes_bfl.py @@ -2,7 +2,8 @@ import asyncio import io from inspect import cleandoc from typing import Union, Optional -from comfy.comfy_types.node_typing import IO, ComfyNodeABC +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api_nodes.apis.bfl_api import ( BFLStatus, BFLFluxExpandImageRequest, @@ -130,7 +131,7 @@ def convert_image_to_base64(image: torch.Tensor): return base64.b64encode(img_byte_arr.getvalue()).decode() -class FluxProUltraImageNode(ComfyNodeABC): +class FluxProUltraImageNode(comfy_io.ComfyNode): """ Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution. """ @@ -141,71 +142,67 @@ class FluxProUltraImageNode(ComfyNodeABC): MAXIMUM_RATIO_STR = "4:1" @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation", - }, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="FluxProUltraImageNode", + display_name="Flux 1.1 [pro] Ultra Image", + category="api node/image/BFL", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation", ), - "prompt_upsampling": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", - }, + comfy_io.Boolean.Input( + "prompt_upsampling", + default=False, + tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", ), - "aspect_ratio": ( - IO.STRING, - { - "default": "16:9", - "tooltip": "Aspect ratio of image; must be between 1:4 and 4:1.", - }, + comfy_io.String.Input( + "aspect_ratio", + default="16:9", + tooltip="Aspect ratio of image; must be between 1:4 and 4:1.", ), - "raw": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "When True, generate less processed, more natural-looking images.", - }, + comfy_io.Boolean.Input( + "raw", + default=False, + tooltip="When True, generate less processed, more natural-looking images.", ), - }, - "optional": { - "image_prompt": (IO.IMAGE,), - "image_prompt_strength": ( - IO.FLOAT, - { - "default": 0.1, - "min": 0.0, - "max": 1.0, - "step": 0.01, - "tooltip": "Blend between the prompt and the image prompt.", - }, + comfy_io.Image.Input( + "image_prompt", + optional=True, ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + comfy_io.Float.Input( + "image_prompt_strength", + default=0.1, + min=0.0, + max=1.0, + step=0.01, + tooltip="Blend between the prompt and the image prompt.", + optional=True, + ), + ], + outputs=[comfy_io.Image.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def VALIDATE_INPUTS(cls, aspect_ratio: str): + def validate_inputs(cls, aspect_ratio: str): try: validate_aspect_ratio( aspect_ratio, @@ -218,14 +215,9 @@ class FluxProUltraImageNode(ComfyNodeABC): return str(e) return True - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/BFL" - - async def api_call( - self, + @classmethod + async def execute( + cls, prompt: str, aspect_ratio: str, prompt_upsampling=False, @@ -233,9 +225,7 @@ class FluxProUltraImageNode(ComfyNodeABC): seed=0, image_prompt=None, image_prompt_strength=0.1, - unique_id: Union[str, None] = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: if image_prompt is None: validate_string(prompt, strip_whitespace=False) operation = SynchronousOperation( @@ -251,10 +241,10 @@ class FluxProUltraImageNode(ComfyNodeABC): seed=seed, aspect_ratio=validate_aspect_ratio( aspect_ratio, - minimum_ratio=self.MINIMUM_RATIO, - maximum_ratio=self.MAXIMUM_RATIO, - minimum_ratio_str=self.MINIMUM_RATIO_STR, - maximum_ratio_str=self.MAXIMUM_RATIO_STR, + minimum_ratio=cls.MINIMUM_RATIO, + maximum_ratio=cls.MAXIMUM_RATIO, + minimum_ratio_str=cls.MINIMUM_RATIO_STR, + maximum_ratio_str=cls.MAXIMUM_RATIO_STR, ), raw=raw, image_prompt=( @@ -266,13 +256,16 @@ class FluxProUltraImageNode(ComfyNodeABC): None if image_prompt is None else round(image_prompt_strength, 2) ), ), - auth_kwargs=kwargs, + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) - return (output_image,) + output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) + return comfy_io.NodeOutput(output_image) -class FluxKontextProImageNode(ComfyNodeABC): +class FluxKontextProImageNode(comfy_io.ComfyNode): """ Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio. """ @@ -283,81 +276,73 @@ class FluxKontextProImageNode(ComfyNodeABC): MAXIMUM_RATIO_STR = "4:1" @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation - specify what and how to edit.", - }, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id=cls.NODE_ID, + display_name=cls.DISPLAY_NAME, + category="api node/image/BFL", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation - specify what and how to edit.", ), - "aspect_ratio": ( - IO.STRING, - { - "default": "16:9", - "tooltip": "Aspect ratio of image; must be between 1:4 and 4:1.", - }, + comfy_io.String.Input( + "aspect_ratio", + default="16:9", + tooltip="Aspect ratio of image; must be between 1:4 and 4:1.", ), - "guidance": ( - IO.FLOAT, - { - "default": 3.0, - "min": 0.1, - "max": 99.0, - "step": 0.1, - "tooltip": "Guidance strength for the image generation process" - }, + comfy_io.Float.Input( + "guidance", + default=3.0, + min=0.1, + max=99.0, + step=0.1, + tooltip="Guidance strength for the image generation process", ), - "steps": ( - IO.INT, - { - "default": 50, - "min": 1, - "max": 150, - "tooltip": "Number of steps for the image generation process" - }, + comfy_io.Int.Input( + "steps", + default=50, + min=1, + max=150, + tooltip="Number of steps for the image generation process", ), - "seed": ( - IO.INT, - { - "default": 1234, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, + comfy_io.Int.Input( + "seed", + default=1234, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", ), - "prompt_upsampling": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", - }, + comfy_io.Boolean.Input( + "prompt_upsampling", + default=False, + tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - }, - "optional": { - "input_image": (IO.IMAGE,), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/BFL" + comfy_io.Image.Input( + "input_image", + optional=True, + ), + ], + outputs=[comfy_io.Image.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) BFL_PATH = "/proxy/bfl/flux-kontext-pro/generate" + NODE_ID = "FluxKontextProImageNode" + DISPLAY_NAME = "Flux.1 Kontext [pro] Image" - async def api_call( - self, + @classmethod + async def execute( + cls, prompt: str, aspect_ratio: str, guidance: float, @@ -365,21 +350,19 @@ class FluxKontextProImageNode(ComfyNodeABC): input_image: Optional[torch.Tensor]=None, seed=0, prompt_upsampling=False, - unique_id: Union[str, None] = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: aspect_ratio = validate_aspect_ratio( aspect_ratio, - minimum_ratio=self.MINIMUM_RATIO, - maximum_ratio=self.MAXIMUM_RATIO, - minimum_ratio_str=self.MINIMUM_RATIO_STR, - maximum_ratio_str=self.MAXIMUM_RATIO_STR, + minimum_ratio=cls.MINIMUM_RATIO, + maximum_ratio=cls.MAXIMUM_RATIO, + minimum_ratio_str=cls.MINIMUM_RATIO_STR, + maximum_ratio_str=cls.MAXIMUM_RATIO_STR, ) if input_image is None: validate_string(prompt, strip_whitespace=False) operation = SynchronousOperation( endpoint=ApiEndpoint( - path=self.BFL_PATH, + path=cls.BFL_PATH, method=HttpMethod.POST, request_model=BFLFluxKontextProGenerateRequest, response_model=BFLFluxProGenerateResponse, @@ -397,10 +380,13 @@ class FluxKontextProImageNode(ComfyNodeABC): else convert_image_to_base64(input_image) ) ), - auth_kwargs=kwargs, + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) - return (output_image,) + output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) + return comfy_io.NodeOutput(output_image) class FluxKontextMaxImageNode(FluxKontextProImageNode): @@ -410,63 +396,60 @@ class FluxKontextMaxImageNode(FluxKontextProImageNode): DESCRIPTION = cleandoc(__doc__ or "") BFL_PATH = "/proxy/bfl/flux-kontext-max/generate" + NODE_ID = "FluxKontextMaxImageNode" + DISPLAY_NAME = "Flux.1 Kontext [max] Image" -class FluxProImageNode(ComfyNodeABC): +class FluxProImageNode(comfy_io.ComfyNode): """ Generates images synchronously based on prompt and resolution. """ @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation", - }, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="FluxProImageNode", + display_name="Flux 1.1 [pro] Image", + category="api node/image/BFL", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation", ), - "prompt_upsampling": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", - }, + comfy_io.Boolean.Input( + "prompt_upsampling", + default=False, + tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - "width": ( - IO.INT, - { - "default": 1024, - "min": 256, - "max": 1440, - "step": 32, - }, + comfy_io.Int.Input( + "width", + default=1024, + min=256, + max=1440, + step=32, ), - "height": ( - IO.INT, - { - "default": 768, - "min": 256, - "max": 1440, - "step": 32, - }, + comfy_io.Int.Input( + "height", + default=768, + min=256, + max=1440, + step=32, ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", + ), + comfy_io.Image.Input( + "image_prompt", + optional=True, ), - }, - "optional": { - "image_prompt": (IO.IMAGE,), # "image_prompt_strength": ( # IO.FLOAT, # { @@ -477,22 +460,19 @@ class FluxProImageNode(ComfyNodeABC): # "tooltip": "Blend between the prompt and the image prompt.", # }, # ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[comfy_io.Image.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/BFL" - - async def api_call( - self, + @classmethod + async def execute( + cls, prompt: str, prompt_upsampling, width: int, @@ -500,9 +480,7 @@ class FluxProImageNode(ComfyNodeABC): seed=0, image_prompt=None, # image_prompt_strength=0.1, - unique_id: Union[str, None] = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: image_prompt = ( image_prompt if image_prompt is None @@ -524,118 +502,103 @@ class FluxProImageNode(ComfyNodeABC): seed=seed, image_prompt=image_prompt, ), - auth_kwargs=kwargs, + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) - return (output_image,) + output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) + return comfy_io.NodeOutput(output_image) -class FluxProExpandNode(ComfyNodeABC): +class FluxProExpandNode(comfy_io.ComfyNode): """ Outpaints image based on prompt. """ @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE,), - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation", - }, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="FluxProExpandNode", + display_name="Flux.1 Expand Image", + category="api node/image/BFL", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("image"), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation", ), - "prompt_upsampling": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", - }, + comfy_io.Boolean.Input( + "prompt_upsampling", + default=False, + tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - "top": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2048, - "tooltip": "Number of pixels to expand at the top of the image" - }, + comfy_io.Int.Input( + "top", + default=0, + min=0, + max=2048, + tooltip="Number of pixels to expand at the top of the image", ), - "bottom": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2048, - "tooltip": "Number of pixels to expand at the bottom of the image" - }, + comfy_io.Int.Input( + "bottom", + default=0, + min=0, + max=2048, + tooltip="Number of pixels to expand at the bottom of the image", ), - "left": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2048, - "tooltip": "Number of pixels to expand at the left side of the image" - }, + comfy_io.Int.Input( + "left", + default=0, + min=0, + max=2048, + tooltip="Number of pixels to expand at the left of the image", ), - "right": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2048, - "tooltip": "Number of pixels to expand at the right side of the image" - }, + comfy_io.Int.Input( + "right", + default=0, + min=0, + max=2048, + tooltip="Number of pixels to expand at the right of the image", ), - "guidance": ( - IO.FLOAT, - { - "default": 60, - "min": 1.5, - "max": 100, - "tooltip": "Guidance strength for the image generation process" - }, + comfy_io.Float.Input( + "guidance", + default=60, + min=1.5, + max=100, + tooltip="Guidance strength for the image generation process", ), - "steps": ( - IO.INT, - { - "default": 50, - "min": 15, - "max": 50, - "tooltip": "Number of steps for the image generation process" - }, + comfy_io.Int.Input( + "steps", + default=50, + min=15, + max=50, + tooltip="Number of steps for the image generation process", ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", ), - }, - "optional": {}, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[comfy_io.Image.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/BFL" - - async def api_call( - self, + @classmethod + async def execute( + cls, image: torch.Tensor, prompt: str, prompt_upsampling: bool, @@ -646,9 +609,7 @@ class FluxProExpandNode(ComfyNodeABC): steps: int, guidance: float, seed=0, - unique_id: Union[str, None] = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: image = convert_image_to_base64(image) operation = SynchronousOperation( @@ -670,84 +631,77 @@ class FluxProExpandNode(ComfyNodeABC): seed=seed, image=image, ), - auth_kwargs=kwargs, + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) - return (output_image,) + output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) + return comfy_io.NodeOutput(output_image) -class FluxProFillNode(ComfyNodeABC): +class FluxProFillNode(comfy_io.ComfyNode): """ Inpaints image based on mask and prompt. """ @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE,), - "mask": (IO.MASK,), - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation", - }, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="FluxProFillNode", + display_name="Flux.1 Fill Image", + category="api node/image/BFL", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("image"), + comfy_io.Mask.Input("mask"), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation", ), - "prompt_upsampling": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", - }, + comfy_io.Boolean.Input( + "prompt_upsampling", + default=False, + tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - "guidance": ( - IO.FLOAT, - { - "default": 60, - "min": 1.5, - "max": 100, - "tooltip": "Guidance strength for the image generation process" - }, + comfy_io.Float.Input( + "guidance", + default=60, + min=1.5, + max=100, + tooltip="Guidance strength for the image generation process", ), - "steps": ( - IO.INT, - { - "default": 50, - "min": 15, - "max": 50, - "tooltip": "Number of steps for the image generation process" - }, + comfy_io.Int.Input( + "steps", + default=50, + min=15, + max=50, + tooltip="Number of steps for the image generation process", ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", ), - }, - "optional": {}, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[comfy_io.Image.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/BFL" - - async def api_call( - self, + @classmethod + async def execute( + cls, image: torch.Tensor, mask: torch.Tensor, prompt: str, @@ -755,9 +709,7 @@ class FluxProFillNode(ComfyNodeABC): steps: int, guidance: float, seed=0, - unique_id: Union[str, None] = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: # prepare mask mask = resize_mask_to_image(mask, image) mask = convert_image_to_base64(convert_mask_to_image(mask)) @@ -780,109 +732,96 @@ class FluxProFillNode(ComfyNodeABC): image=image, mask=mask, ), - auth_kwargs=kwargs, + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) - return (output_image,) + output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) + return comfy_io.NodeOutput(output_image) -class FluxProCannyNode(ComfyNodeABC): +class FluxProCannyNode(comfy_io.ComfyNode): """ Generate image using a control image (canny). """ @classmethod - def INPUT_TYPES(s): - return { - "required": { - "control_image": (IO.IMAGE,), - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation", - }, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="FluxProCannyNode", + display_name="Flux.1 Canny Control Image", + category="api node/image/BFL", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("control_image"), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation", ), - "prompt_upsampling": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", - }, + comfy_io.Boolean.Input( + "prompt_upsampling", + default=False, + tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - "canny_low_threshold": ( - IO.FLOAT, - { - "default": 0.1, - "min": 0.01, - "max": 0.99, - "step": 0.01, - "tooltip": "Low threshold for Canny edge detection; ignored if skip_processing is True" - }, + comfy_io.Float.Input( + "canny_low_threshold", + default=0.1, + min=0.01, + max=0.99, + step=0.01, + tooltip="Low threshold for Canny edge detection; ignored if skip_processing is True", ), - "canny_high_threshold": ( - IO.FLOAT, - { - "default": 0.4, - "min": 0.01, - "max": 0.99, - "step": 0.01, - "tooltip": "High threshold for Canny edge detection; ignored if skip_processing is True" - }, + comfy_io.Float.Input( + "canny_high_threshold", + default=0.4, + min=0.01, + max=0.99, + step=0.01, + tooltip="High threshold for Canny edge detection; ignored if skip_processing is True", ), - "skip_preprocessing": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Whether to skip preprocessing; set to True if control_image already is canny-fied, False if it is a raw image.", - }, + comfy_io.Boolean.Input( + "skip_preprocessing", + default=False, + tooltip="Whether to skip preprocessing; set to True if control_image already is canny-fied, False if it is a raw image.", ), - "guidance": ( - IO.FLOAT, - { - "default": 30, - "min": 1, - "max": 100, - "tooltip": "Guidance strength for the image generation process" - }, + comfy_io.Float.Input( + "guidance", + default=30, + min=1, + max=100, + tooltip="Guidance strength for the image generation process", ), - "steps": ( - IO.INT, - { - "default": 50, - "min": 15, - "max": 50, - "tooltip": "Number of steps for the image generation process" - }, + comfy_io.Int.Input( + "steps", + default=50, + min=15, + max=50, + tooltip="Number of steps for the image generation process", ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", ), - }, - "optional": {}, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[comfy_io.Image.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/BFL" - - async def api_call( - self, + @classmethod + async def execute( + cls, control_image: torch.Tensor, prompt: str, prompt_upsampling: bool, @@ -892,9 +831,7 @@ class FluxProCannyNode(ComfyNodeABC): steps: int, guidance: float, seed=0, - unique_id: Union[str, None] = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: control_image = convert_image_to_base64(control_image[:, :, :, :3]) preprocessed_image = None @@ -929,89 +866,80 @@ class FluxProCannyNode(ComfyNodeABC): canny_high_threshold=canny_high_threshold, preprocessed_image=preprocessed_image, ), - auth_kwargs=kwargs, + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) - return (output_image,) + output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) + return comfy_io.NodeOutput(output_image) -class FluxProDepthNode(ComfyNodeABC): +class FluxProDepthNode(comfy_io.ComfyNode): """ Generate image using a control image (depth). """ @classmethod - def INPUT_TYPES(s): - return { - "required": { - "control_image": (IO.IMAGE,), - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation", - }, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="FluxProDepthNode", + display_name="Flux.1 Depth Control Image", + category="api node/image/BFL", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("control_image"), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation", ), - "prompt_upsampling": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", - }, + comfy_io.Boolean.Input( + "prompt_upsampling", + default=False, + tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - "skip_preprocessing": ( - IO.BOOLEAN, - { - "default": False, - "tooltip": "Whether to skip preprocessing; set to True if control_image already is depth-ified, False if it is a raw image.", - }, + comfy_io.Boolean.Input( + "skip_preprocessing", + default=False, + tooltip="Whether to skip preprocessing; set to True if control_image already is depth-ified, False if it is a raw image.", ), - "guidance": ( - IO.FLOAT, - { - "default": 15, - "min": 1, - "max": 100, - "tooltip": "Guidance strength for the image generation process" - }, + comfy_io.Float.Input( + "guidance", + default=15, + min=1, + max=100, + tooltip="Guidance strength for the image generation process", ), - "steps": ( - IO.INT, - { - "default": 50, - "min": 15, - "max": 50, - "tooltip": "Number of steps for the image generation process" - }, + comfy_io.Int.Input( + "steps", + default=50, + min=15, + max=50, + tooltip="Number of steps for the image generation process", ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "The random seed used for creating the noise.", - }, + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", ), - }, - "optional": {}, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[comfy_io.Image.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/BFL" - - async def api_call( - self, + @classmethod + async def execute( + cls, control_image: torch.Tensor, prompt: str, prompt_upsampling: bool, @@ -1019,9 +947,7 @@ class FluxProDepthNode(ComfyNodeABC): steps: int, guidance: float, seed=0, - unique_id: Union[str, None] = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: control_image = convert_image_to_base64(control_image[:,:,:,:3]) preprocessed_image = None @@ -1045,33 +971,29 @@ class FluxProDepthNode(ComfyNodeABC): control_image=control_image, preprocessed_image=preprocessed_image, ), - auth_kwargs=kwargs, + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=unique_id) - return (output_image,) + output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) + return comfy_io.NodeOutput(output_image) -# A dictionary that contains all nodes you want to export with their names -# NOTE: names should be globally unique -NODE_CLASS_MAPPINGS = { - "FluxProUltraImageNode": FluxProUltraImageNode, - # "FluxProImageNode": FluxProImageNode, - "FluxKontextProImageNode": FluxKontextProImageNode, - "FluxKontextMaxImageNode": FluxKontextMaxImageNode, - "FluxProExpandNode": FluxProExpandNode, - "FluxProFillNode": FluxProFillNode, - "FluxProCannyNode": FluxProCannyNode, - "FluxProDepthNode": FluxProDepthNode, -} +class BFLExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + FluxProUltraImageNode, + # FluxProImageNode, + FluxKontextProImageNode, + FluxKontextMaxImageNode, + FluxProExpandNode, + FluxProFillNode, + FluxProCannyNode, + FluxProDepthNode, + ] -# A dictionary that contains the friendly/humanly readable titles for the nodes -NODE_DISPLAY_NAME_MAPPINGS = { - "FluxProUltraImageNode": "Flux 1.1 [pro] Ultra Image", - # "FluxProImageNode": "Flux 1.1 [pro] Image", - "FluxKontextProImageNode": "Flux.1 Kontext [pro] Image", - "FluxKontextMaxImageNode": "Flux.1 Kontext [max] Image", - "FluxProExpandNode": "Flux.1 Expand Image", - "FluxProFillNode": "Flux.1 Fill Image", - "FluxProCannyNode": "Flux.1 Canny Control Image", - "FluxProDepthNode": "Flux.1 Depth Control Image", -} + +async def comfy_entrypoint() -> BFLExtension: + return BFLExtension() From bcfd80dd79ccfa77a7da69380795fbb55b65b1ba Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 12:28:11 +0300 Subject: [PATCH 0640/1073] convert nodes_luma.py to V3 schema (#10030) --- comfy_api_nodes/nodes_luma.py | 774 +++++++++++++++++----------------- 1 file changed, 396 insertions(+), 378 deletions(-) diff --git a/comfy_api_nodes/nodes_luma.py b/comfy_api_nodes/nodes_luma.py index b3c32bed5..9cd02ffd2 100644 --- a/comfy_api_nodes/nodes_luma.py +++ b/comfy_api_nodes/nodes_luma.py @@ -1,7 +1,8 @@ from __future__ import annotations from inspect import cleandoc from typing import Optional -from comfy.comfy_types.node_typing import IO, ComfyNodeABC +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.input_impl.video_types import VideoFromFile from comfy_api_nodes.apis.luma_api import ( LumaImageModel, @@ -51,174 +52,186 @@ def image_result_url_extractor(response: LumaGeneration): def video_result_url_extractor(response: LumaGeneration): return response.assets.video if hasattr(response, "assets") and hasattr(response.assets, "video") else None -class LumaReferenceNode(ComfyNodeABC): +class LumaReferenceNode(comfy_io.ComfyNode): """ Holds an image and weight for use with Luma Generate Image node. """ - RETURN_TYPES = (LumaIO.LUMA_REF,) - RETURN_NAMES = ("luma_ref",) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "create_luma_reference" - CATEGORY = "api node/image/Luma" + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="LumaReferenceNode", + display_name="Luma Reference", + category="api node/image/Luma", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input( + "image", + tooltip="Image to use as reference.", + ), + comfy_io.Float.Input( + "weight", + default=1.0, + min=0.0, + max=1.0, + step=0.01, + tooltip="Weight of image reference.", + ), + comfy_io.Custom(LumaIO.LUMA_REF).Input( + "luma_ref", + optional=True, + ), + ], + outputs=[comfy_io.Custom(LumaIO.LUMA_REF).Output(display_name="luma_ref")], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ( - IO.IMAGE, - { - "tooltip": "Image to use as reference.", - }, - ), - "weight": ( - IO.FLOAT, - { - "default": 1.0, - "min": 0.0, - "max": 1.0, - "step": 0.01, - "tooltip": "Weight of image reference.", - }, - ), - }, - "optional": {"luma_ref": (LumaIO.LUMA_REF,)}, - } - - def create_luma_reference( - self, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None - ): + def execute( + cls, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None + ) -> comfy_io.NodeOutput: if luma_ref is not None: luma_ref = luma_ref.clone() else: luma_ref = LumaReferenceChain() luma_ref.add(LumaReference(image=image, weight=round(weight, 2))) - return (luma_ref,) + return comfy_io.NodeOutput(luma_ref) -class LumaConceptsNode(ComfyNodeABC): +class LumaConceptsNode(comfy_io.ComfyNode): """ Holds one or more Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes. """ - RETURN_TYPES = (LumaIO.LUMA_CONCEPTS,) - RETURN_NAMES = ("luma_concepts",) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "create_concepts" - CATEGORY = "api node/video/Luma" + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="LumaConceptsNode", + display_name="Luma Concepts", + category="api node/video/Luma", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Combo.Input( + "concept1", + options=get_luma_concepts(include_none=True), + ), + comfy_io.Combo.Input( + "concept2", + options=get_luma_concepts(include_none=True), + ), + comfy_io.Combo.Input( + "concept3", + options=get_luma_concepts(include_none=True), + ), + comfy_io.Combo.Input( + "concept4", + options=get_luma_concepts(include_none=True), + ), + comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input( + "luma_concepts", + tooltip="Optional Camera Concepts to add to the ones chosen here.", + optional=True, + ), + ], + outputs=[comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Output(display_name="luma_concepts")], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "concept1": (get_luma_concepts(include_none=True),), - "concept2": (get_luma_concepts(include_none=True),), - "concept3": (get_luma_concepts(include_none=True),), - "concept4": (get_luma_concepts(include_none=True),), - }, - "optional": { - "luma_concepts": ( - LumaIO.LUMA_CONCEPTS, - { - "tooltip": "Optional Camera Concepts to add to the ones chosen here." - }, - ), - }, - } - - def create_concepts( - self, + def execute( + cls, concept1: str, concept2: str, concept3: str, concept4: str, luma_concepts: LumaConceptChain = None, - ): + ) -> comfy_io.NodeOutput: chain = LumaConceptChain(str_list=[concept1, concept2, concept3, concept4]) if luma_concepts is not None: chain = luma_concepts.clone_and_merge(chain) - return (chain,) + return comfy_io.NodeOutput(chain) -class LumaImageGenerationNode(ComfyNodeABC): +class LumaImageGenerationNode(comfy_io.ComfyNode): """ Generates images synchronously based on prompt and aspect ratio. """ - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Luma" + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="LumaImageNode", + display_name="Luma Text to Image", + category="api node/image/Luma", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation", + ), + comfy_io.Combo.Input( + "model", + options=[model.value for model in LumaImageModel], + ), + comfy_io.Combo.Input( + "aspect_ratio", + options=[ratio.value for ratio in LumaAspectRatio], + default=LumaAspectRatio.ratio_16_9, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + ), + comfy_io.Float.Input( + "style_image_weight", + default=1.0, + min=0.0, + max=1.0, + step=0.01, + tooltip="Weight of style image. Ignored if no style_image provided.", + ), + comfy_io.Custom(LumaIO.LUMA_REF).Input( + "image_luma_ref", + tooltip="Luma Reference node connection to influence generation with input images; up to 4 images can be considered.", + optional=True, + ), + comfy_io.Image.Input( + "style_image", + tooltip="Style reference image; only 1 image will be used.", + optional=True, + ), + comfy_io.Image.Input( + "character_image", + tooltip="Character reference images; can be a batch of multiple, up to 4 images can be considered.", + optional=True, + ), + ], + outputs=[comfy_io.Image.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation", - }, - ), - "model": ([model.value for model in LumaImageModel],), - "aspect_ratio": ( - [ratio.value for ratio in LumaAspectRatio], - { - "default": LumaAspectRatio.ratio_16_9, - }, - ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", - }, - ), - "style_image_weight": ( - IO.FLOAT, - { - "default": 1.0, - "min": 0.0, - "max": 1.0, - "step": 0.01, - "tooltip": "Weight of style image. Ignored if no style_image provided.", - }, - ), - }, - "optional": { - "image_luma_ref": ( - LumaIO.LUMA_REF, - { - "tooltip": "Luma Reference node connection to influence generation with input images; up to 4 images can be considered." - }, - ), - "style_image": ( - IO.IMAGE, - {"tooltip": "Style reference image; only 1 image will be used."}, - ), - "character_image": ( - IO.IMAGE, - { - "tooltip": "Character reference images; can be a batch of multiple, up to 4 images can be considered." - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - async def api_call( - self, + async def execute( + cls, prompt: str, model: str, aspect_ratio: str, @@ -227,27 +240,29 @@ class LumaImageGenerationNode(ComfyNodeABC): image_luma_ref: LumaReferenceChain = None, style_image: torch.Tensor = None, character_image: torch.Tensor = None, - unique_id: str = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=3) + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } # handle image_luma_ref api_image_ref = None if image_luma_ref is not None: - api_image_ref = await self._convert_luma_refs( - image_luma_ref, max_refs=4, auth_kwargs=kwargs, + api_image_ref = await cls._convert_luma_refs( + image_luma_ref, max_refs=4, auth_kwargs=auth_kwargs, ) # handle style_luma_ref api_style_ref = None if style_image is not None: - api_style_ref = await self._convert_style_image( - style_image, weight=style_image_weight, auth_kwargs=kwargs, + api_style_ref = await cls._convert_style_image( + style_image, weight=style_image_weight, auth_kwargs=auth_kwargs, ) # handle character_ref images character_ref = None if character_image is not None: download_urls = await upload_images_to_comfyapi( - character_image, max_images=4, auth_kwargs=kwargs, + character_image, max_images=4, auth_kwargs=auth_kwargs, ) character_ref = LumaCharacterRef( identity0=LumaImageIdentity(images=download_urls) @@ -268,7 +283,7 @@ class LumaImageGenerationNode(ComfyNodeABC): style_ref=api_style_ref, character_ref=character_ref, ), - auth_kwargs=kwargs, + auth_kwargs=auth_kwargs, ) response_api: LumaGeneration = await operation.execute() @@ -283,18 +298,19 @@ class LumaImageGenerationNode(ComfyNodeABC): failed_statuses=[LumaState.failed], status_extractor=lambda x: x.state, result_url_extractor=image_result_url_extractor, - node_id=unique_id, - auth_kwargs=kwargs, + node_id=cls.hidden.unique_id, + auth_kwargs=auth_kwargs, ) response_poll = await operation.execute() async with aiohttp.ClientSession() as session: async with session.get(response_poll.assets.image) as img_response: img = process_image_response(await img_response.content.read()) - return (img,) + return comfy_io.NodeOutput(img) + @classmethod async def _convert_luma_refs( - self, luma_ref: LumaReferenceChain, max_refs: int, auth_kwargs: Optional[dict[str,str]] = None + cls, luma_ref: LumaReferenceChain, max_refs: int, auth_kwargs: Optional[dict[str,str]] = None ): luma_urls = [] ref_count = 0 @@ -308,82 +324,84 @@ class LumaImageGenerationNode(ComfyNodeABC): break return luma_ref.create_api_model(download_urls=luma_urls, max_refs=max_refs) + @classmethod async def _convert_style_image( - self, style_image: torch.Tensor, weight: float, auth_kwargs: Optional[dict[str,str]] = None + cls, style_image: torch.Tensor, weight: float, auth_kwargs: Optional[dict[str,str]] = None ): chain = LumaReferenceChain( first_ref=LumaReference(image=style_image, weight=weight) ) - return await self._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs) + return await cls._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs) -class LumaImageModifyNode(ComfyNodeABC): +class LumaImageModifyNode(comfy_io.ComfyNode): """ Modifies images synchronously based on prompt and aspect ratio. """ - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Luma" + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="LumaImageModifyNode", + display_name="Luma Image to Image", + category="api node/image/Luma", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input( + "image", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation", + ), + comfy_io.Float.Input( + "image_weight", + default=0.1, + min=0.0, + max=0.98, + step=0.01, + tooltip="Weight of the image; the closer to 1.0, the less the image will be modified.", + ), + comfy_io.Combo.Input( + "model", + options=[model.value for model in LumaImageModel], + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + ), + ], + outputs=[comfy_io.Image.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE,), - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation", - }, - ), - "image_weight": ( - IO.FLOAT, - { - "default": 0.1, - "min": 0.0, - "max": 0.98, - "step": 0.01, - "tooltip": "Weight of the image; the closer to 1.0, the less the image will be modified.", - }, - ), - "model": ([model.value for model in LumaImageModel],), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", - }, - ), - }, - "optional": {}, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - async def api_call( - self, + async def execute( + cls, prompt: str, model: str, image: torch.Tensor, image_weight: float, seed, - unique_id: str = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } # first, upload image download_urls = await upload_images_to_comfyapi( - image, max_images=1, auth_kwargs=kwargs, + image, max_images=1, auth_kwargs=auth_kwargs, ) image_url = download_urls[0] # next, make Luma call with download url provided @@ -401,7 +419,7 @@ class LumaImageModifyNode(ComfyNodeABC): url=image_url, weight=round(max(min(1.0-image_weight, 0.98), 0.0), 2) ), ), - auth_kwargs=kwargs, + auth_kwargs=auth_kwargs, ) response_api: LumaGeneration = await operation.execute() @@ -416,88 +434,84 @@ class LumaImageModifyNode(ComfyNodeABC): failed_statuses=[LumaState.failed], status_extractor=lambda x: x.state, result_url_extractor=image_result_url_extractor, - node_id=unique_id, - auth_kwargs=kwargs, + node_id=cls.hidden.unique_id, + auth_kwargs=auth_kwargs, ) response_poll = await operation.execute() async with aiohttp.ClientSession() as session: async with session.get(response_poll.assets.image) as img_response: img = process_image_response(await img_response.content.read()) - return (img,) + return comfy_io.NodeOutput(img) -class LumaTextToVideoGenerationNode(ComfyNodeABC): +class LumaTextToVideoGenerationNode(comfy_io.ComfyNode): """ Generates videos synchronously based on prompt and output_size. """ - RETURN_TYPES = (IO.VIDEO,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/video/Luma" + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="LumaVideoNode", + display_name="Luma Text to Video", + category="api node/video/Luma", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the video generation", + ), + comfy_io.Combo.Input( + "model", + options=[model.value for model in LumaVideoModel], + ), + comfy_io.Combo.Input( + "aspect_ratio", + options=[ratio.value for ratio in LumaAspectRatio], + default=LumaAspectRatio.ratio_16_9, + ), + comfy_io.Combo.Input( + "resolution", + options=[resolution.value for resolution in LumaVideoOutputResolution], + default=LumaVideoOutputResolution.res_540p, + ), + comfy_io.Combo.Input( + "duration", + options=[dur.value for dur in LumaVideoModelOutputDuration], + ), + comfy_io.Boolean.Input( + "loop", + default=False, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + ), + comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input( + "luma_concepts", + tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.", + optional=True, + ) + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the video generation", - }, - ), - "model": ([model.value for model in LumaVideoModel],), - "aspect_ratio": ( - [ratio.value for ratio in LumaAspectRatio], - { - "default": LumaAspectRatio.ratio_16_9, - }, - ), - "resolution": ( - [resolution.value for resolution in LumaVideoOutputResolution], - { - "default": LumaVideoOutputResolution.res_540p, - }, - ), - "duration": ([dur.value for dur in LumaVideoModelOutputDuration],), - "loop": ( - IO.BOOLEAN, - { - "default": False, - }, - ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", - }, - ), - }, - "optional": { - "luma_concepts": ( - LumaIO.LUMA_CONCEPTS, - { - "tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node." - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - async def api_call( - self, + async def execute( + cls, prompt: str, model: str, aspect_ratio: str, @@ -506,13 +520,15 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): loop: bool, seed, luma_concepts: LumaConceptChain = None, - unique_id: str = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: validate_string(prompt, strip_whitespace=False, min_length=3) duration = duration if model != LumaVideoModel.ray_1_6 else None resolution = resolution if model != LumaVideoModel.ray_1_6 else None + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/luma/generations", @@ -529,12 +545,12 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): loop=loop, concepts=luma_concepts.create_api_model() if luma_concepts else None, ), - auth_kwargs=kwargs, + auth_kwargs=auth_kwargs, ) response_api: LumaGeneration = await operation.execute() - if unique_id: - PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", unique_id) + if cls.hidden.unique_id: + PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", cls.hidden.unique_id) operation = PollingOperation( poll_endpoint=ApiEndpoint( @@ -547,90 +563,94 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC): failed_statuses=[LumaState.failed], status_extractor=lambda x: x.state, result_url_extractor=video_result_url_extractor, - node_id=unique_id, + node_id=cls.hidden.unique_id, estimated_duration=LUMA_T2V_AVERAGE_DURATION, - auth_kwargs=kwargs, + auth_kwargs=auth_kwargs, ) response_poll = await operation.execute() async with aiohttp.ClientSession() as session: async with session.get(response_poll.assets.video) as vid_response: - return (VideoFromFile(BytesIO(await vid_response.content.read())),) + return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) -class LumaImageToVideoGenerationNode(ComfyNodeABC): +class LumaImageToVideoGenerationNode(comfy_io.ComfyNode): """ Generates videos synchronously based on prompt, input images, and output_size. """ - RETURN_TYPES = (IO.VIDEO,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/video/Luma" + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="LumaImageToVideoNode", + display_name="Luma Image to Video", + category="api node/video/Luma", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the video generation", + ), + comfy_io.Combo.Input( + "model", + options=[model.value for model in LumaVideoModel], + ), + # comfy_io.Combo.Input( + # "aspect_ratio", + # options=[ratio.value for ratio in LumaAspectRatio], + # default=LumaAspectRatio.ratio_16_9, + # ), + comfy_io.Combo.Input( + "resolution", + options=[resolution.value for resolution in LumaVideoOutputResolution], + default=LumaVideoOutputResolution.res_540p, + ), + comfy_io.Combo.Input( + "duration", + options=[dur.value for dur in LumaVideoModelOutputDuration], + ), + comfy_io.Boolean.Input( + "loop", + default=False, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", + ), + comfy_io.Image.Input( + "first_image", + tooltip="First frame of generated video.", + optional=True, + ), + comfy_io.Image.Input( + "last_image", + tooltip="Last frame of generated video.", + optional=True, + ), + comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input( + "luma_concepts", + tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.", + optional=True, + ) + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the video generation", - }, - ), - "model": ([model.value for model in LumaVideoModel],), - # "aspect_ratio": ([ratio.value for ratio in LumaAspectRatio], { - # "default": LumaAspectRatio.ratio_16_9, - # }), - "resolution": ( - [resolution.value for resolution in LumaVideoOutputResolution], - { - "default": LumaVideoOutputResolution.res_540p, - }, - ), - "duration": ([dur.value for dur in LumaVideoModelOutputDuration],), - "loop": ( - IO.BOOLEAN, - { - "default": False, - }, - ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", - }, - ), - }, - "optional": { - "first_image": ( - IO.IMAGE, - {"tooltip": "First frame of generated video."}, - ), - "last_image": (IO.IMAGE, {"tooltip": "Last frame of generated video."}), - "luma_concepts": ( - LumaIO.LUMA_CONCEPTS, - { - "tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node." - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - async def api_call( - self, + async def execute( + cls, prompt: str, model: str, resolution: str, @@ -640,14 +660,16 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): first_image: torch.Tensor = None, last_image: torch.Tensor = None, luma_concepts: LumaConceptChain = None, - unique_id: str = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: if first_image is None and last_image is None: raise Exception( "At least one of first_image and last_image requires an input." ) - keyframes = await self._convert_to_keyframes(first_image, last_image, auth_kwargs=kwargs) + auth_kwargs = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + keyframes = await cls._convert_to_keyframes(first_image, last_image, auth_kwargs=auth_kwargs) duration = duration if model != LumaVideoModel.ray_1_6 else None resolution = resolution if model != LumaVideoModel.ray_1_6 else None @@ -668,12 +690,12 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): keyframes=keyframes, concepts=luma_concepts.create_api_model() if luma_concepts else None, ), - auth_kwargs=kwargs, + auth_kwargs=auth_kwargs, ) response_api: LumaGeneration = await operation.execute() - if unique_id: - PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", unique_id) + if cls.hidden.unique_id: + PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", cls.hidden.unique_id) operation = PollingOperation( poll_endpoint=ApiEndpoint( @@ -686,18 +708,19 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): failed_statuses=[LumaState.failed], status_extractor=lambda x: x.state, result_url_extractor=video_result_url_extractor, - node_id=unique_id, + node_id=cls.hidden.unique_id, estimated_duration=LUMA_I2V_AVERAGE_DURATION, - auth_kwargs=kwargs, + auth_kwargs=auth_kwargs, ) response_poll = await operation.execute() async with aiohttp.ClientSession() as session: async with session.get(response_poll.assets.video) as vid_response: - return (VideoFromFile(BytesIO(await vid_response.content.read())),) + return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) + @classmethod async def _convert_to_keyframes( - self, + cls, first_image: torch.Tensor = None, last_image: torch.Tensor = None, auth_kwargs: Optional[dict[str,str]] = None, @@ -719,23 +742,18 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC): return LumaKeyframes(frame0=frame0, frame1=frame1) -# A dictionary that contains all nodes you want to export with their names -# NOTE: names should be globally unique -NODE_CLASS_MAPPINGS = { - "LumaImageNode": LumaImageGenerationNode, - "LumaImageModifyNode": LumaImageModifyNode, - "LumaVideoNode": LumaTextToVideoGenerationNode, - "LumaImageToVideoNode": LumaImageToVideoGenerationNode, - "LumaReferenceNode": LumaReferenceNode, - "LumaConceptsNode": LumaConceptsNode, -} +class LumaExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + LumaImageGenerationNode, + LumaImageModifyNode, + LumaTextToVideoGenerationNode, + LumaImageToVideoGenerationNode, + LumaReferenceNode, + LumaConceptsNode, + ] -# A dictionary that contains the friendly/humanly readable titles for the nodes -NODE_DISPLAY_NAME_MAPPINGS = { - "LumaImageNode": "Luma Text to Image", - "LumaImageModifyNode": "Luma Image to Image", - "LumaVideoNode": "Luma Text to Video", - "LumaImageToVideoNode": "Luma Image to Video", - "LumaReferenceNode": "Luma Reference", - "LumaConceptsNode": "Luma Concepts", -} + +async def comfy_entrypoint() -> LumaExtension: + return LumaExtension() From ad5aef2d0c8517e971129db1dfb0d0108d8341a8 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 12:34:32 +0300 Subject: [PATCH 0641/1073] convert nodes_pixart.py to V3 schema (#10019) --- comfy_extras/nodes_pixart.py | 52 +++++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 19 deletions(-) diff --git a/comfy_extras/nodes_pixart.py b/comfy_extras/nodes_pixart.py index 8d9276afe..a23e87b1f 100644 --- a/comfy_extras/nodes_pixart.py +++ b/comfy_extras/nodes_pixart.py @@ -1,24 +1,38 @@ -from nodes import MAX_RESOLUTION +from typing_extensions import override +import nodes +from comfy_api.latest import ComfyExtension, io -class CLIPTextEncodePixArtAlpha: +class CLIPTextEncodePixArtAlpha(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), - "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), - # "aspect_ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ), - }} + def define_schema(cls): + return io.Schema( + node_id="CLIPTextEncodePixArtAlpha", + category="advanced/conditioning", + description="Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma.", + inputs=[ + io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION), + io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION), + # "aspect_ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + io.String.Input("text", multiline=True, dynamic_prompts=True), + io.Clip.Input("clip"), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" - CATEGORY = "advanced/conditioning" - DESCRIPTION = "Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma." - - def encode(self, clip, width, height, text): + @classmethod + def execute(cls, clip, width, height, text): tokens = clip.tokenize(text) - return (clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height}),) + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height})) -NODE_CLASS_MAPPINGS = { - "CLIPTextEncodePixArtAlpha": CLIPTextEncodePixArtAlpha, -} + +class PixArtExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + CLIPTextEncodePixArtAlpha, + ] + +async def comfy_entrypoint() -> PixArtExtension: + return PixArtExtension() From 7eca95657cf7a70c15d598c969b890a164a300a1 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 12:36:43 +0300 Subject: [PATCH 0642/1073] convert nodes_photomaker.py to V3 schema (#10017) --- comfy_extras/nodes_photomaker.py | 74 ++++++++++++++++++++------------ 1 file changed, 46 insertions(+), 28 deletions(-) diff --git a/comfy_extras/nodes_photomaker.py b/comfy_extras/nodes_photomaker.py index d358ed6d5..228183c07 100644 --- a/comfy_extras/nodes_photomaker.py +++ b/comfy_extras/nodes_photomaker.py @@ -4,6 +4,8 @@ import folder_paths import comfy.clip_model import comfy.clip_vision import comfy.ops +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io # code for model from: https://github.com/TencentARC/PhotoMaker/blob/main/photomaker/model.py under Apache License Version 2.0 VISION_CONFIG_DICT = { @@ -116,41 +118,52 @@ class PhotoMakerIDEncoder(comfy.clip_model.CLIPVisionModelProjection): return updated_prompt_embeds -class PhotoMakerLoader: +class PhotoMakerLoader(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "photomaker_model_name": (folder_paths.get_filename_list("photomaker"), )}} + def define_schema(cls): + return io.Schema( + node_id="PhotoMakerLoader", + category="_for_testing/photomaker", + inputs=[ + io.Combo.Input("photomaker_model_name", options=folder_paths.get_filename_list("photomaker")), + ], + outputs=[ + io.Photomaker.Output(), + ], + is_experimental=True, + ) - RETURN_TYPES = ("PHOTOMAKER",) - FUNCTION = "load_photomaker_model" - - CATEGORY = "_for_testing/photomaker" - - def load_photomaker_model(self, photomaker_model_name): + @classmethod + def execute(cls, photomaker_model_name): photomaker_model_path = folder_paths.get_full_path_or_raise("photomaker", photomaker_model_name) photomaker_model = PhotoMakerIDEncoder() data = comfy.utils.load_torch_file(photomaker_model_path, safe_load=True) if "id_encoder" in data: data = data["id_encoder"] photomaker_model.load_state_dict(data) - return (photomaker_model,) + return io.NodeOutput(photomaker_model) -class PhotoMakerEncode: +class PhotoMakerEncode(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "photomaker": ("PHOTOMAKER",), - "image": ("IMAGE",), - "clip": ("CLIP", ), - "text": ("STRING", {"multiline": True, "dynamicPrompts": True, "default": "photograph of photomaker"}), - }} + def define_schema(cls): + return io.Schema( + node_id="PhotoMakerEncode", + category="_for_testing/photomaker", + inputs=[ + io.Photomaker.Input("photomaker"), + io.Image.Input("image"), + io.Clip.Input("clip"), + io.String.Input("text", multiline=True, dynamic_prompts=True, default="photograph of photomaker"), + ], + outputs=[ + io.Conditioning.Output(), + ], + is_experimental=True, + ) - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "apply_photomaker" - - CATEGORY = "_for_testing/photomaker" - - def apply_photomaker(self, photomaker, image, clip, text): + @classmethod + def execute(cls, photomaker, image, clip, text): special_token = "photomaker" pixel_values = comfy.clip_vision.clip_preprocess(image.to(photomaker.load_device)).float() try: @@ -178,11 +191,16 @@ class PhotoMakerEncode: else: out = cond - return ([[out, {"pooled_output": pooled}]], ) + return io.NodeOutput([[out, {"pooled_output": pooled}]]) -NODE_CLASS_MAPPINGS = { - "PhotoMakerLoader": PhotoMakerLoader, - "PhotoMakerEncode": PhotoMakerEncode, -} +class PhotomakerExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + PhotoMakerLoader, + PhotoMakerEncode, + ] +async def comfy_entrypoint() -> PhotomakerExtension: + return PhotomakerExtension() From 160698eb418269d64fbbe8c34db27a4d1ddb0540 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 27 Sep 2025 22:25:35 +0300 Subject: [PATCH 0643/1073] convert nodes_qwen.py to V3 schema (#10049) --- comfy_extras/nodes_qwen.py | 88 ++++++++++++++++++++++---------------- 1 file changed, 51 insertions(+), 37 deletions(-) diff --git a/comfy_extras/nodes_qwen.py b/comfy_extras/nodes_qwen.py index 49747dc7a..525239ae5 100644 --- a/comfy_extras/nodes_qwen.py +++ b/comfy_extras/nodes_qwen.py @@ -1,24 +1,29 @@ import node_helpers import comfy.utils import math +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io -class TextEncodeQwenImageEdit: +class TextEncodeQwenImageEdit(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "clip": ("CLIP", ), - "prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}), - }, - "optional": {"vae": ("VAE", ), - "image": ("IMAGE", ),}} + def define_schema(cls): + return io.Schema( + node_id="TextEncodeQwenImageEdit", + category="advanced/conditioning", + inputs=[ + io.Clip.Input("clip"), + io.String.Input("prompt", multiline=True, dynamic_prompts=True), + io.Vae.Input("vae", optional=True), + io.Image.Input("image", optional=True), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" - - CATEGORY = "advanced/conditioning" - - def encode(self, clip, prompt, vae=None, image=None): + @classmethod + def execute(cls, clip, prompt, vae=None, image=None) -> io.NodeOutput: ref_latent = None if image is None: images = [] @@ -40,28 +45,30 @@ class TextEncodeQwenImageEdit: conditioning = clip.encode_from_tokens_scheduled(tokens) if ref_latent is not None: conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True) - return (conditioning, ) + return io.NodeOutput(conditioning) -class TextEncodeQwenImageEditPlus: +class TextEncodeQwenImageEditPlus(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "clip": ("CLIP", ), - "prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}), - }, - "optional": {"vae": ("VAE", ), - "image1": ("IMAGE", ), - "image2": ("IMAGE", ), - "image3": ("IMAGE", ), - }} + def define_schema(cls): + return io.Schema( + node_id="TextEncodeQwenImageEditPlus", + category="advanced/conditioning", + inputs=[ + io.Clip.Input("clip"), + io.String.Input("prompt", multiline=True, dynamic_prompts=True), + io.Vae.Input("vae", optional=True), + io.Image.Input("image1", optional=True), + io.Image.Input("image2", optional=True), + io.Image.Input("image3", optional=True), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" - - CATEGORY = "advanced/conditioning" - - def encode(self, clip, prompt, vae=None, image1=None, image2=None, image3=None): + @classmethod + def execute(cls, clip, prompt, vae=None, image1=None, image2=None, image3=None) -> io.NodeOutput: ref_latents = [] images = [image1, image2, image3] images_vl = [] @@ -94,10 +101,17 @@ class TextEncodeQwenImageEditPlus: conditioning = clip.encode_from_tokens_scheduled(tokens) if len(ref_latents) > 0: conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": ref_latents}, append=True) - return (conditioning, ) + return io.NodeOutput(conditioning) -NODE_CLASS_MAPPINGS = { - "TextEncodeQwenImageEdit": TextEncodeQwenImageEdit, - "TextEncodeQwenImageEditPlus": TextEncodeQwenImageEditPlus, -} +class QwenExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + TextEncodeQwenImageEdit, + TextEncodeQwenImageEditPlus, + ] + + +async def comfy_entrypoint() -> QwenExtension: + return QwenExtension() From 653ceab4148a9fbc050ebceb674acef760792b77 Mon Sep 17 00:00:00 2001 From: rattus128 <46076784+rattus128@users.noreply.github.com> Date: Sun, 28 Sep 2025 08:14:16 +1000 Subject: [PATCH 0644/1073] Reduce Peak WAN inference VRAM usage - part II (#10062) * flux: math: Use _addcmul to avoid expensive VRAM intermediate The rope process can be the VRAM peak and this intermediate for the addition result before releasing the original can OOM. addcmul_ it. * wan: Delete the self attention before cross attention This saves VRAM when the cross attention and FFN are in play as the VRAM peak. --- comfy/ldm/flux/math.py | 5 ++++- comfy/ldm/wan/model.py | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/comfy/ldm/flux/math.py b/comfy/ldm/flux/math.py index fb7cd7586..8deda0d4a 100644 --- a/comfy/ldm/flux/math.py +++ b/comfy/ldm/flux/math.py @@ -37,7 +37,10 @@ def rope(pos: Tensor, dim: int, theta: int) -> Tensor: def apply_rope1(x: Tensor, freqs_cis: Tensor): x_ = x.to(dtype=freqs_cis.dtype).reshape(*x.shape[:-1], -1, 1, 2) - x_out = freqs_cis[..., 0] * x_[..., 0] + freqs_cis[..., 1] * x_[..., 1] + + x_out = freqs_cis[..., 0] * x_[..., 0] + x_out.addcmul_(freqs_cis[..., 1], x_[..., 1]) + return x_out.reshape(*x.shape).type_as(x) def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor): diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 54616e6eb..0dc650ced 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -237,6 +237,7 @@ class WanAttentionBlock(nn.Module): freqs, transformer_options=transformer_options) x = torch.addcmul(x, y, repeat_e(e[2], x)) + del y # cross-attention & ffn x = x + self.cross_attn(self.norm3(x), context, context_img_len=context_img_len, transformer_options=transformer_options) From 40ae495ddcbc04846e91ccad3e844bb34d98c6fd Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 27 Sep 2025 17:28:49 -0700 Subject: [PATCH 0645/1073] Improvements to the stable release workflow. (#10065) --- .github/workflows/stable-release.yml | 39 ++++++++++++------- .../windows_release_dependencies.yml | 3 +- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index 2bc8e5905..b39b42acd 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -8,11 +8,11 @@ on: description: 'Git tag' required: true type: string - cu: - description: 'CUDA version' + cache_tag: + description: 'Cached dependencies tag' required: true type: string - default: "129" + default: "cu129" python_minor: description: 'Python minor version' required: true @@ -23,7 +23,11 @@ on: required: true type: string default: "6" - + rel_name: + description: 'Release name' + required: true + type: string + default: "nvidia" jobs: package_comfy_windows: @@ -42,15 +46,15 @@ jobs: id: cache with: path: | - cu${{ inputs.cu }}_python_deps.tar + ${{ inputs.cache_tag }}_python_deps.tar update_comfyui_and_python_dependencies.bat - key: ${{ runner.os }}-build-cu${{ inputs.cu }}-${{ inputs.python_minor }} + key: ${{ runner.os }}-build-${{ inputs.cache_tag }}-${{ inputs.python_minor }} - shell: bash run: | - mv cu${{ inputs.cu }}_python_deps.tar ../ + mv ${{ inputs.cache_tag }}_python_deps.tar ../ mv update_comfyui_and_python_dependencies.bat ../ cd .. - tar xf cu${{ inputs.cu }}_python_deps.tar + tar xf ${{ inputs.cache_tag }}_python_deps.tar pwd ls @@ -65,12 +69,19 @@ jobs: echo 'import site' >> ./python3${{ inputs.python_minor }}._pth curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py ./python.exe get-pip.py - ./python.exe -s -m pip install ../cu${{ inputs.cu }}_python_deps/* + ./python.exe -s -m pip install ../${{ inputs.cache_tag }}_python_deps/* + + grep comfyui ../ComfyUI/requirements.txt ./requirements_comfyui.txt + ./python.exe -s -m pip install -r requirements_comfyui.txt + rm requirements_comfyui.txt + sed -i '1i../ComfyUI' ./python3${{ inputs.python_minor }}._pth - rm ./Lib/site-packages/torch/lib/dnnl.lib #I don't think this is actually used and I need the space - rm ./Lib/site-packages/torch/lib/libprotoc.lib - rm ./Lib/site-packages/torch/lib/libprotobuf.lib + if test -f ./Lib/site-packages/torch/lib/dnnl.lib; then + rm ./Lib/site-packages/torch/lib/dnnl.lib #I don't think this is actually used and I need the space + rm ./Lib/site-packages/torch/lib/libprotoc.lib + rm ./Lib/site-packages/torch/lib/libprotobuf.lib + fi cd .. @@ -91,7 +102,7 @@ jobs: cd .. "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=768m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable - mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia.7z + mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_${{ inputs.rel_name }}.7z cd ComfyUI_windows_portable python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu @@ -104,7 +115,7 @@ jobs: uses: svenstaro/upload-release-action@v2 with: repo_token: ${{ secrets.GITHUB_TOKEN }} - file: ComfyUI_windows_portable_nvidia.7z + file: ComfyUI_windows_portable_${{ inputs.rel_name }}.7z tag: ${{ inputs.git_tag }} overwrite: true draft: true diff --git a/.github/workflows/windows_release_dependencies.yml b/.github/workflows/windows_release_dependencies.yml index 7761cc1ed..f1e2946e6 100644 --- a/.github/workflows/windows_release_dependencies.yml +++ b/.github/workflows/windows_release_dependencies.yml @@ -56,7 +56,8 @@ jobs: ..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r ../ComfyUI/requirements.txt pygit2 pause" > update_comfyui_and_python_dependencies.bat - python -m pip wheel --no-cache-dir torch torchvision torchaudio ${{ inputs.xformers }} ${{ inputs.extra_dependencies }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r requirements.txt pygit2 -w ./temp_wheel_dir + grep -v comfyui requirements.txt > requirements_nocomfyui.txt + python -m pip wheel --no-cache-dir torch torchvision torchaudio ${{ inputs.xformers }} ${{ inputs.extra_dependencies }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r requirements_nocomfyui.txt pygit2 -w ./temp_wheel_dir python -m pip install --no-cache-dir ./temp_wheel_dir/* echo installed basic ls -lah temp_wheel_dir From 896f2e653c02769371e113906d70a24306d87a58 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 27 Sep 2025 18:30:35 -0700 Subject: [PATCH 0646/1073] Fix typo in release workflow. (#10066) --- .github/workflows/stable-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index b39b42acd..619b0e995 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -71,7 +71,7 @@ jobs: ./python.exe get-pip.py ./python.exe -s -m pip install ../${{ inputs.cache_tag }}_python_deps/* - grep comfyui ../ComfyUI/requirements.txt ./requirements_comfyui.txt + grep comfyui ../ComfyUI/requirements.txt > ./requirements_comfyui.txt ./python.exe -s -m pip install -r requirements_comfyui.txt rm requirements_comfyui.txt From a1127b232d221432be065f8e765f3538e62a2f41 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sun, 28 Sep 2025 05:11:36 +0300 Subject: [PATCH 0647/1073] convert nodes_lotus.py to V3 schema (#10057) --- comfy_extras/nodes_lotus.py | 42 +++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/comfy_extras/nodes_lotus.py b/comfy_extras/nodes_lotus.py index 739dbdd3d..9f62ba2bf 100644 --- a/comfy_extras/nodes_lotus.py +++ b/comfy_extras/nodes_lotus.py @@ -1,20 +1,22 @@ +from typing_extensions import override + import torch import comfy.model_management as mm +from comfy_api.latest import ComfyExtension, io -class LotusConditioning: + +class LotusConditioning(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - }, - } + def define_schema(cls): + return io.Schema( + node_id="LotusConditioning", + category="conditioning/lotus", + inputs=[], + outputs=[io.Conditioning.Output(display_name="conditioning")], + ) - RETURN_TYPES = ("CONDITIONING",) - RETURN_NAMES = ("conditioning",) - FUNCTION = "conditioning" - CATEGORY = "conditioning/lotus" - - def conditioning(self): + @classmethod + def execute(cls) -> io.NodeOutput: device = mm.get_torch_device() #lotus uses a frozen encoder and null conditioning, i'm just inlining the results of that operation since it doesn't change #and getting parity with the reference implementation would otherwise require inference and 800mb of tensors @@ -22,8 +24,16 @@ class LotusConditioning: cond = [[prompt_embeds, {}]] - return (cond,) + return io.NodeOutput(cond) -NODE_CLASS_MAPPINGS = { - "LotusConditioning" : LotusConditioning, -} + +class LotusExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + LotusConditioning, + ] + + +async def comfy_entrypoint() -> LotusExtension: + return LotusExtension() From 1cf86f5ae5706ff141f8d51ed9ba96ecdcdcb695 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sun, 28 Sep 2025 05:12:51 +0300 Subject: [PATCH 0648/1073] convert nodes_lumina2.py to V3 schema (#10058) --- comfy_extras/nodes_lumina2.py | 99 +++++++++++++++++++++-------------- 1 file changed, 61 insertions(+), 38 deletions(-) diff --git a/comfy_extras/nodes_lumina2.py b/comfy_extras/nodes_lumina2.py index 275189785..89ff2397a 100644 --- a/comfy_extras/nodes_lumina2.py +++ b/comfy_extras/nodes_lumina2.py @@ -1,20 +1,27 @@ -from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict +from typing_extensions import override import torch +from comfy_api.latest import ComfyExtension, io -class RenormCFG: + +class RenormCFG(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "cfg_trunc": ("FLOAT", {"default": 100, "min": 0.0, "max": 100.0, "step": 0.01}), - "renorm_cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls): + return io.Schema( + node_id="RenormCFG", + category="advanced/model", + inputs=[ + io.Model.Input("model"), + io.Float.Input("cfg_trunc", default=100, min=0.0, max=100.0, step=0.01), + io.Float.Input("renorm_cfg", default=1.0, min=0.0, max=100.0, step=0.01), + ], + outputs=[ + io.Model.Output(), + ], + ) - CATEGORY = "advanced/model" - - def patch(self, model, cfg_trunc, renorm_cfg): + @classmethod + def execute(cls, model, cfg_trunc, renorm_cfg) -> io.NodeOutput: def renorm_cfg_func(args): cond_denoised = args["cond_denoised"] uncond_denoised = args["uncond_denoised"] @@ -53,10 +60,10 @@ class RenormCFG: m = model.clone() m.set_model_sampler_cfg_function(renorm_cfg_func) - return (m, ) + return io.NodeOutput(m) -class CLIPTextEncodeLumina2(ComfyNodeABC): +class CLIPTextEncodeLumina2(io.ComfyNode): SYSTEM_PROMPT = { "superior": "You are an assistant designed to generate superior images with the superior "\ "degree of image-text alignment based on textual prompts or user prompts.", @@ -69,36 +76,52 @@ class CLIPTextEncodeLumina2(ComfyNodeABC): "Alignment: You are an assistant designed to generate high-quality images with the highest "\ "degree of image-text alignment based on textual prompts." @classmethod - def INPUT_TYPES(s) -> InputTypeDict: - return { - "required": { - "system_prompt": (list(CLIPTextEncodeLumina2.SYSTEM_PROMPT.keys()), {"tooltip": CLIPTextEncodeLumina2.SYSTEM_PROMPT_TIP}), - "user_prompt": (IO.STRING, {"multiline": True, "dynamicPrompts": True, "tooltip": "The text to be encoded."}), - "clip": (IO.CLIP, {"tooltip": "The CLIP model used for encoding the text."}) - } - } - RETURN_TYPES = (IO.CONDITIONING,) - OUTPUT_TOOLTIPS = ("A conditioning containing the embedded text used to guide the diffusion model.",) - FUNCTION = "encode" + def define_schema(cls): + return io.Schema( + node_id="CLIPTextEncodeLumina2", + display_name="CLIP Text Encode for Lumina2", + category="conditioning", + description="Encodes a system prompt and a user prompt using a CLIP model into an embedding " + "that can be used to guide the diffusion model towards generating specific images.", + inputs=[ + io.Combo.Input( + "system_prompt", + options=list(cls.SYSTEM_PROMPT.keys()), + tooltip=cls.SYSTEM_PROMPT_TIP, + ), + io.String.Input( + "user_prompt", + multiline=True, + dynamic_prompts=True, + tooltip="The text to be encoded.", + ), + io.Clip.Input("clip", tooltip="The CLIP model used for encoding the text."), + ], + outputs=[ + io.Conditioning.Output( + tooltip="A conditioning containing the embedded text used to guide the diffusion model.", + ), + ], + ) - CATEGORY = "conditioning" - DESCRIPTION = "Encodes a system prompt and a user prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images." - - def encode(self, clip, user_prompt, system_prompt): + @classmethod + def execute(cls, clip, user_prompt, system_prompt) -> io.NodeOutput: if clip is None: raise RuntimeError("ERROR: clip input is invalid: None\n\nIf the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model.") - system_prompt = CLIPTextEncodeLumina2.SYSTEM_PROMPT[system_prompt] + system_prompt = cls.SYSTEM_PROMPT[system_prompt] prompt = f'{system_prompt} {user_prompt}' tokens = clip.tokenize(prompt) - return (clip.encode_from_tokens_scheduled(tokens), ) + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens)) -NODE_CLASS_MAPPINGS = { - "CLIPTextEncodeLumina2": CLIPTextEncodeLumina2, - "RenormCFG": RenormCFG -} +class Lumina2Extension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + CLIPTextEncodeLumina2, + RenormCFG, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "CLIPTextEncodeLumina2": "CLIP Text Encode for Lumina2", -} +async def comfy_entrypoint() -> Lumina2Extension: + return Lumina2Extension() From 2dadb348602f8f452eb2a1d8720f6029dc4039a2 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sun, 28 Sep 2025 05:16:22 +0300 Subject: [PATCH 0649/1073] convert nodes_hypertile.py to V3 schema (#10061) --- comfy_extras/nodes_hypertile.py | 59 +++++++++++++++++++++------------ 1 file changed, 38 insertions(+), 21 deletions(-) diff --git a/comfy_extras/nodes_hypertile.py b/comfy_extras/nodes_hypertile.py index b366117c7..0ad5e6773 100644 --- a/comfy_extras/nodes_hypertile.py +++ b/comfy_extras/nodes_hypertile.py @@ -1,9 +1,11 @@ #Taken from: https://github.com/tfernd/HyperTile/ import math +from typing_extensions import override from einops import rearrange # Use torch rng for consistency across generations from torch import randint +from comfy_api.latest import ComfyExtension, io def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: min_value = min(min_value, value) @@ -20,25 +22,31 @@ def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: return ns[idx] -class HyperTile: +class HyperTile(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "tile_size": ("INT", {"default": 256, "min": 1, "max": 2048}), - "swap_size": ("INT", {"default": 2, "min": 1, "max": 128}), - "max_depth": ("INT", {"default": 0, "min": 0, "max": 10}), - "scale_depth": ("BOOLEAN", {"default": False}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls): + return io.Schema( + node_id="HyperTile", + category="model_patches/unet", + inputs=[ + io.Model.Input("model"), + io.Int.Input("tile_size", default=256, min=1, max=2048), + io.Int.Input("swap_size", default=2, min=1, max=128), + io.Int.Input("max_depth", default=0, min=0, max=10), + io.Boolean.Input("scale_depth", default=False), + ], + outputs=[ + io.Model.Output(), + ], + ) - CATEGORY = "model_patches/unet" - - def patch(self, model, tile_size, swap_size, max_depth, scale_depth): + @classmethod + def execute(cls, model, tile_size, swap_size, max_depth, scale_depth) -> io.NodeOutput: latent_tile_size = max(32, tile_size) // 8 - self.temp = None + temp = None def hypertile_in(q, k, v, extra_options): + nonlocal temp model_chans = q.shape[-2] orig_shape = extra_options['original_shape'] apply_to = [] @@ -58,14 +66,15 @@ class HyperTile: if nh * nw > 1: q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) - self.temp = (nh, nw, h, w) + temp = (nh, nw, h, w) return q, k, v return q, k, v def hypertile_out(out, extra_options): - if self.temp is not None: - nh, nw, h, w = self.temp - self.temp = None + nonlocal temp + if temp is not None: + nh, nw, h, w = temp + temp = None out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw) out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw) return out @@ -76,6 +85,14 @@ class HyperTile: m.set_model_attn1_output_patch(hypertile_out) return (m, ) -NODE_CLASS_MAPPINGS = { - "HyperTile": HyperTile, -} + +class HyperTileExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + HyperTile, + ] + + +async def comfy_entrypoint() -> HyperTileExtension: + return HyperTileExtension() From 1364548c721a466adcdc60e49ee291b0d4255245 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rui=20Wang=20=28=E7=8E=8B=E7=91=9E=29?= Date: Sun, 28 Sep 2025 10:36:02 +0800 Subject: [PATCH 0650/1073] feat: ComfyUI can be run on the specified Ascend NPU (#9663) * feature: Set the Ascend NPU to use a single one * Enable the `--cuda-device` parameter to support both CUDA and Ascend NPUs simultaneously. * Make the code just set the ASCENT_RT_VISIBLE_DEVICES environment variable without any other edits to master branch --------- Co-authored-by: Jedrzej Kosinski --- main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/main.py b/main.py index c33f0e17b..70696fcc3 100644 --- a/main.py +++ b/main.py @@ -127,6 +127,7 @@ if __name__ == "__main__": if args.cuda_device is not None: os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device) + os.environ["ASCEND_RT_VISIBLE_DEVICES"] = str(args.cuda_device) logging.info("Set cuda device to: {}".format(args.cuda_device)) if args.oneapi_device_selector is not None: From 555f902fc1ed20e98201f9102172f0fc190c2c42 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 27 Sep 2025 19:43:25 -0700 Subject: [PATCH 0651/1073] Fix stable workflow creating multiple draft releases. (#10067) --- .github/workflows/stable-release.yml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index 619b0e995..924bdec90 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -112,10 +112,9 @@ jobs: ls - name: Upload binaries to release - uses: svenstaro/upload-release-action@v2 + uses: softprops/action-gh-release@v2 with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: ComfyUI_windows_portable_${{ inputs.rel_name }}.7z - tag: ${{ inputs.git_tag }} - overwrite: true + files: ComfyUI_windows_portable_${{ inputs.rel_name }}.7z + tag_name: ${{ inputs.git_tag }} draft: true + overwrite_files: true From b60dc316272ba139e06b8a7b2f5f5b622c9afe20 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 28 Sep 2025 10:41:32 -0700 Subject: [PATCH 0652/1073] Update command to install latest nighly pytorch. (#10085) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3f6cfc2ed..5a257687b 100644 --- a/README.md +++ b/README.md @@ -233,7 +233,7 @@ Nvidia users should install stable pytorch using this command: This is the command to install pytorch nightly instead which might have performance improvements. -```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu129``` +```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu130``` #### Troubleshooting From 6ec1cfe101206229ff3af5c3d3675b3b92477067 Mon Sep 17 00:00:00 2001 From: Changrz <51637999+WhiteGiven@users.noreply.github.com> Date: Tue, 30 Sep 2025 02:59:12 +0800 Subject: [PATCH 0653/1073] [Rodin3d api nodes] Updated the name of the save file path (changed from timestamp to UUID). (#10011) * Update savepath name from time to uuid * delete lib --- comfy_api_nodes/nodes_rodin.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/comfy_api_nodes/nodes_rodin.py b/comfy_api_nodes/nodes_rodin.py index 1af393eba..817efb0f5 100644 --- a/comfy_api_nodes/nodes_rodin.py +++ b/comfy_api_nodes/nodes_rodin.py @@ -11,7 +11,6 @@ from comfy.comfy_types.node_typing import IO import folder_paths as comfy_paths import aiohttp import os -import datetime import asyncio import io import logging @@ -243,8 +242,8 @@ class Rodin3DAPI: return mesh_mode, quality_override - async def download_files(self, url_list): - save_path = os.path.join(comfy_paths.get_output_directory(), "Rodin3D", datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) + async def download_files(self, url_list, task_uuid): + save_path = os.path.join(comfy_paths.get_output_directory(), f"Rodin3D_{task_uuid}") os.makedirs(save_path, exist_ok=True) model_file_path = None async with aiohttp.ClientSession() as session: @@ -320,7 +319,7 @@ class Rodin3D_Regular(Rodin3DAPI): **kwargs) await self.poll_for_task_status(subscription_key, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs) - model = await self.download_files(download_list) + model = await self.download_files(download_list, task_uuid) return (model,) @@ -366,7 +365,7 @@ class Rodin3D_Detail(Rodin3DAPI): **kwargs) await self.poll_for_task_status(subscription_key, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs) - model = await self.download_files(download_list) + model = await self.download_files(download_list, task_uuid) return (model,) @@ -412,7 +411,7 @@ class Rodin3D_Smooth(Rodin3DAPI): **kwargs) await self.poll_for_task_status(subscription_key, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs) - model = await self.download_files(download_list) + model = await self.download_files(download_list, task_uuid) return (model,) @@ -467,7 +466,7 @@ class Rodin3D_Sketch(Rodin3DAPI): ) await self.poll_for_task_status(subscription_key, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs) - model = await self.download_files(download_list) + model = await self.download_files(download_list, task_uuid) return (model,) From c8276f8c6bee54b494fd5bec8dfb87ed21a3fa65 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 30 Sep 2025 02:59:42 +0800 Subject: [PATCH 0654/1073] Update template to 0.1.91 (#10096) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b3f81e8fa..45d3e1607 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.26.13 -comfyui-workflow-templates==0.1.88 +comfyui-workflow-templates==0.1.91 comfyui-embedded-docs==0.2.6 torch torchsde From 05a258efd84bfb00e2618eb9b7937b8fef1e82ed Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 29 Sep 2025 22:01:04 +0300 Subject: [PATCH 0655/1073] add WanImageToImageApi node (#10094) --- comfy_api_nodes/nodes_wan.py | 149 ++++++++++++++++++++++++++++++++++- 1 file changed, 148 insertions(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_wan.py b/comfy_api_nodes/nodes_wan.py index db5bd41c1..0be5daadb 100644 --- a/comfy_api_nodes/nodes_wan.py +++ b/comfy_api_nodes/nodes_wan.py @@ -28,6 +28,12 @@ class Text2ImageInputField(BaseModel): negative_prompt: Optional[str] = Field(None) +class Image2ImageInputField(BaseModel): + prompt: str = Field(...) + negative_prompt: Optional[str] = Field(None) + images: list[str] = Field(..., min_length=1, max_length=2) + + class Text2VideoInputField(BaseModel): prompt: str = Field(...) negative_prompt: Optional[str] = Field(None) @@ -49,6 +55,13 @@ class Txt2ImageParametersField(BaseModel): watermark: bool = Field(True) +class Image2ImageParametersField(BaseModel): + size: Optional[str] = Field(None) + n: int = Field(1, description="Number of images to generate.") # we support only value=1 + seed: int = Field(..., ge=0, le=2147483647) + watermark: bool = Field(True) + + class Text2VideoParametersField(BaseModel): size: str = Field(...) seed: int = Field(..., ge=0, le=2147483647) @@ -73,6 +86,12 @@ class Text2ImageTaskCreationRequest(BaseModel): parameters: Txt2ImageParametersField = Field(...) +class Image2ImageTaskCreationRequest(BaseModel): + model: str = Field(...) + input: Image2ImageInputField = Field(...) + parameters: Image2ImageParametersField = Field(...) + + class Text2VideoTaskCreationRequest(BaseModel): model: str = Field(...) input: Text2VideoInputField = Field(...) @@ -135,7 +154,12 @@ async def process_task( url: str, request_model: Type[T], response_model: Type[R], - payload: Union[Text2ImageTaskCreationRequest, Text2VideoTaskCreationRequest, Image2VideoTaskCreationRequest], + payload: Union[ + Text2ImageTaskCreationRequest, + Image2ImageTaskCreationRequest, + Text2VideoTaskCreationRequest, + Image2VideoTaskCreationRequest, + ], node_id: str, estimated_duration: int, poll_interval: int, @@ -288,6 +312,128 @@ class WanTextToImageApi(comfy_io.ComfyNode): return comfy_io.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url))) +class WanImageToImageApi(comfy_io.ComfyNode): + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="WanImageToImageApi", + display_name="Wan Image to Image", + category="api node/image/Wan", + description="Generates an image from one or two input images and a text prompt. " + "The output image is currently fixed at 1.6 MP; its aspect ratio matches the input image(s).", + inputs=[ + comfy_io.Combo.Input( + "model", + options=["wan2.5-i2i-preview"], + default="wan2.5-i2i-preview", + tooltip="Model to use.", + ), + comfy_io.Image.Input( + "image", + tooltip="Single-image editing or multi-image fusion, maximum 2 images.", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", + ), + comfy_io.String.Input( + "negative_prompt", + multiline=True, + default="", + tooltip="Negative text prompt to guide what to avoid.", + optional=True, + ), + # redo this later as an optional combo of recommended resolutions + # comfy_io.Int.Input( + # "width", + # default=1280, + # min=384, + # max=1440, + # step=16, + # optional=True, + # ), + # comfy_io.Int.Input( + # "height", + # default=1280, + # min=384, + # max=1440, + # step=16, + # optional=True, + # ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed to use for generation.", + optional=True, + ), + comfy_io.Boolean.Input( + "watermark", + default=True, + tooltip="Whether to add an \"AI generated\" watermark to the result.", + optional=True, + ), + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + image: torch.Tensor, + prompt: str, + negative_prompt: str = "", + # width: int = 1024, + # height: int = 1024, + seed: int = 0, + watermark: bool = True, + ): + n_images = get_number_of_images(image) + if n_images not in (1, 2): + raise ValueError(f"Expected 1 or 2 input images, got {n_images}.") + images = [] + for i in image: + images.append("data:image/png;base64," + tensor_to_base64_string(i, total_pixels=4096*4096)) + payload = Image2ImageTaskCreationRequest( + model=model, + input=Image2ImageInputField(prompt=prompt, negative_prompt=negative_prompt, images=images), + parameters=Image2ImageParametersField( + # size=f"{width}*{height}", + seed=seed, + watermark=watermark, + ), + ) + response = await process_task( + { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + "/proxy/wan/api/v1/services/aigc/image2image/image-synthesis", + request_model=Image2ImageTaskCreationRequest, + response_model=ImageTaskStatusResponse, + payload=payload, + node_id=cls.hidden.unique_id, + estimated_duration=42, + poll_interval=3, + ) + return comfy_io.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url))) + + class WanTextToVideoApi(comfy_io.ComfyNode): @classmethod def define_schema(cls): @@ -593,6 +739,7 @@ class WanApiExtension(ComfyExtension): async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: return [ WanTextToImageApi, + WanImageToImageApi, WanTextToVideoApi, WanImageToVideoApi, ] From b1111c2062ce35d4292bcd94f27c099a13c619cb Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 29 Sep 2025 22:03:35 +0300 Subject: [PATCH 0656/1073] convert nodes_mochi.py to V3 schema (#10069) --- comfy_extras/nodes_mochi.py | 49 +++++++++++++++++++++++++------------ 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/comfy_extras/nodes_mochi.py b/comfy_extras/nodes_mochi.py index 1c474faa9..d750194fc 100644 --- a/comfy_extras/nodes_mochi.py +++ b/comfy_extras/nodes_mochi.py @@ -1,23 +1,40 @@ -import nodes +from typing_extensions import override import torch import comfy.model_management +import nodes +from comfy_api.latest import ComfyExtension, io -class EmptyMochiLatentVideo: + +class EmptyMochiLatentVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "width": ("INT", {"default": 848, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 25, "min": 7, "max": nodes.MAX_RESOLUTION, "step": 6}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}} - RETURN_TYPES = ("LATENT",) - FUNCTION = "generate" + def define_schema(cls): + return io.Schema( + node_id="EmptyMochiLatentVideo", + category="latent/video", + inputs=[ + io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=25, min=7, max=nodes.MAX_RESOLUTION, step=6), + io.Int.Input("batch_size", default=1, min=1, max=4096), + ], + outputs=[ + io.Latent.Output(), + ], + ) - CATEGORY = "latent/video" - - def generate(self, width, height, length, batch_size=1): + @classmethod + def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput: latent = torch.zeros([batch_size, 12, ((length - 1) // 6) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) - return ({"samples":latent}, ) + return io.NodeOutput({"samples": latent}) -NODE_CLASS_MAPPINGS = { - "EmptyMochiLatentVideo": EmptyMochiLatentVideo, -} + +class MochiExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + EmptyMochiLatentVideo, + ] + + +async def comfy_entrypoint() -> MochiExtension: + return MochiExtension() From 041b8824f50e01803637d5e83c3f4edaf628f43a Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 29 Sep 2025 22:05:28 +0300 Subject: [PATCH 0657/1073] convert nodes_perpneg.py to V3 schema (#10081) --- comfy_extras/nodes_perpneg.py | 93 +++++++++++++++++++++-------------- 1 file changed, 55 insertions(+), 38 deletions(-) diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py index 89e5eef90..cd068ce9c 100644 --- a/comfy_extras/nodes_perpneg.py +++ b/comfy_extras/nodes_perpneg.py @@ -5,6 +5,9 @@ import comfy.samplers import comfy.utils import node_helpers import math +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io + def perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_nocond, neg_scale, cond_scale): pos = noise_pred_pos - noise_pred_nocond @@ -16,20 +19,27 @@ def perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_nocond, neg_scale, co return cfg_result #TODO: This node should be removed, it has been replaced with PerpNegGuider -class PerpNeg: +class PerpNeg(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"model": ("MODEL", ), - "empty_conditioning": ("CONDITIONING", ), - "neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls): + return io.Schema( + node_id="PerpNeg", + display_name="Perp-Neg (DEPRECATED by PerpNegGuider)", + category="_for_testing", + inputs=[ + io.Model.Input("model"), + io.Conditioning.Input("empty_conditioning"), + io.Float.Input("neg_scale", default=1.0, min=0.0, max=100.0, step=0.01), + ], + outputs=[ + io.Model.Output(), + ], + is_experimental=True, + is_deprecated=True, + ) - CATEGORY = "_for_testing" - DEPRECATED = True - - def patch(self, model, empty_conditioning, neg_scale): + @classmethod + def execute(cls, model, empty_conditioning, neg_scale) -> io.NodeOutput: m = model.clone() nocond = comfy.sampler_helpers.convert_cond(empty_conditioning) @@ -50,7 +60,7 @@ class PerpNeg: m.set_model_sampler_cfg_function(cfg_function) - return (m, ) + return io.NodeOutput(m) class Guider_PerpNeg(comfy.samplers.CFGGuider): @@ -112,35 +122,42 @@ class Guider_PerpNeg(comfy.samplers.CFGGuider): return cfg_result -class PerpNegGuider: +class PerpNegGuider(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"model": ("MODEL",), - "positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "empty_conditioning": ("CONDITIONING", ), - "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), - "neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}), - } - } + def define_schema(cls): + return io.Schema( + node_id="PerpNegGuider", + category="_for_testing", + inputs=[ + io.Model.Input("model"), + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Conditioning.Input("empty_conditioning"), + io.Float.Input("cfg", default=8.0, min=0.0, max=100.0, step=0.1, round=0.01), + io.Float.Input("neg_scale", default=1.0, min=0.0, max=100.0, step=0.01), + ], + outputs=[ + io.Guider.Output(), + ], + is_experimental=True, + ) - RETURN_TYPES = ("GUIDER",) - - FUNCTION = "get_guider" - CATEGORY = "_for_testing" - - def get_guider(self, model, positive, negative, empty_conditioning, cfg, neg_scale): + @classmethod + def execute(cls, model, positive, negative, empty_conditioning, cfg, neg_scale) -> io.NodeOutput: guider = Guider_PerpNeg(model) guider.set_conds(positive, negative, empty_conditioning) guider.set_cfg(cfg, neg_scale) - return (guider,) + return io.NodeOutput(guider) -NODE_CLASS_MAPPINGS = { - "PerpNeg": PerpNeg, - "PerpNegGuider": PerpNegGuider, -} -NODE_DISPLAY_NAME_MAPPINGS = { - "PerpNeg": "Perp-Neg (DEPRECATED by PerpNegGuider)", -} +class PerpNegExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + PerpNeg, + PerpNegGuider, + ] + + +async def comfy_entrypoint() -> PerpNegExtension: + return PerpNegExtension() From ed0f4a609b5e6821f97db5cb1715068c25f78e7b Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Mon, 29 Sep 2025 12:16:02 -0700 Subject: [PATCH 0658/1073] dont cache new locale entry points (#10101) --- middleware/cache_middleware.py | 11 ++++++----- tests-unit/server_test/test_cache_control.py | 7 +++++++ 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/middleware/cache_middleware.py b/middleware/cache_middleware.py index 374ef7934..f02135369 100644 --- a/middleware/cache_middleware.py +++ b/middleware/cache_middleware.py @@ -26,11 +26,12 @@ async def cache_control( """Cache control middleware that sets appropriate cache headers based on file type and response status""" response: web.Response = await handler(request) - if ( - request.path.endswith(".js") - or request.path.endswith(".css") - or request.path.endswith("index.json") - ): + path_filename = request.path.rsplit("/", 1)[-1] + is_entry_point = path_filename.startswith("index") and path_filename.endswith( + ".json" + ) + + if request.path.endswith(".js") or request.path.endswith(".css") or is_entry_point: response.headers.setdefault("Cache-Control", "no-cache") return response diff --git a/tests-unit/server_test/test_cache_control.py b/tests-unit/server_test/test_cache_control.py index 8de59125a..fa68d9408 100644 --- a/tests-unit/server_test/test_cache_control.py +++ b/tests-unit/server_test/test_cache_control.py @@ -48,6 +48,13 @@ CACHE_SCENARIOS = [ "expected_cache": "no-cache", "should_have_header": True, }, + { + "name": "localized_index_json_no_cache", + "path": "/templates/index.zh.json", + "status": 200, + "expected_cache": "no-cache", + "should_have_header": True, + }, # Non-matching files { "name": "html_no_header", From 8accf50908094d9cd39168981fa5394274d25491 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 29 Sep 2025 22:35:51 +0300 Subject: [PATCH 0659/1073] convert nodes_mahiro.py to V3 schema (#10070) --- comfy_extras/nodes_mahiro.py | 50 ++++++++++++++++++++++++------------ 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/comfy_extras/nodes_mahiro.py b/comfy_extras/nodes_mahiro.py index 8fcdfba75..07b3353f4 100644 --- a/comfy_extras/nodes_mahiro.py +++ b/comfy_extras/nodes_mahiro.py @@ -1,17 +1,29 @@ +from typing_extensions import override import torch import torch.nn.functional as F -class Mahiro: +from comfy_api.latest import ComfyExtension, io + + +class Mahiro(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"model": ("MODEL",), - }} - RETURN_TYPES = ("MODEL",) - RETURN_NAMES = ("patched_model",) - FUNCTION = "patch" - CATEGORY = "_for_testing" - DESCRIPTION = "Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt." - def patch(self, model): + def define_schema(cls): + return io.Schema( + node_id="Mahiro", + display_name="Mahiro is so cute that she deserves a better guidance function!! (。・ω・。)", + category="_for_testing", + description="Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.", + inputs=[ + io.Model.Input("model"), + ], + outputs=[ + io.Model.Output(display_name="patched_model"), + ], + is_experimental=True, + ) + + @classmethod + def execute(cls, model) -> io.NodeOutput: m = model.clone() def mahiro_normd(args): scale: float = args['cond_scale'] @@ -30,12 +42,16 @@ class Mahiro: wm = (simsc*cfg + (4-simsc)*leap) / 4 return wm m.set_model_sampler_post_cfg_function(mahiro_normd) - return (m, ) + return io.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "Mahiro": Mahiro -} -NODE_DISPLAY_NAME_MAPPINGS = { - "Mahiro": "Mahiro is so cute that she deserves a better guidance function!! (。・ω・。)", -} +class MahiroExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + Mahiro, + ] + + +async def comfy_entrypoint() -> MahiroExtension: + return MahiroExtension() From 7f38e4c538de2fa38d0539c18577cdd0e5d251c2 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 29 Sep 2025 14:27:52 -0700 Subject: [PATCH 0660/1073] Add action to create cached deps with manually specified torch. (#10102) --- .../windows_release_dependencies_manual.yml | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 .github/workflows/windows_release_dependencies_manual.yml diff --git a/.github/workflows/windows_release_dependencies_manual.yml b/.github/workflows/windows_release_dependencies_manual.yml new file mode 100644 index 000000000..0799feef1 --- /dev/null +++ b/.github/workflows/windows_release_dependencies_manual.yml @@ -0,0 +1,64 @@ +name: "Windows Release dependencies Manual" + +on: + workflow_dispatch: + inputs: + torch_dependencies: + description: 'torch dependencies' + required: false + type: string + default: "torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu128" + cache_tag: + description: 'Cached dependencies tag' + required: true + type: string + default: "cu128" + + python_minor: + description: 'python minor version' + required: true + type: string + default: "12" + + python_patch: + description: 'python patch version' + required: true + type: string + default: "10" + +jobs: + build_dependencies: + runs-on: windows-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.${{ inputs.python_minor }}.${{ inputs.python_patch }} + + - shell: bash + run: | + echo "@echo off + call update_comfyui.bat nopause + echo - + echo This will try to update pytorch and all python dependencies. + echo - + echo If you just want to update normally, close this and run update_comfyui.bat instead. + echo - + pause + ..\python_embeded\python.exe -s -m pip install --upgrade ${{ inputs.torch_dependencies }} -r ../ComfyUI/requirements.txt pygit2 + pause" > update_comfyui_and_python_dependencies.bat + + grep -v comfyui requirements.txt > requirements_nocomfyui.txt + python -m pip wheel --no-cache-dir ${{ inputs.torch_dependencies }} -r requirements_nocomfyui.txt pygit2 -w ./temp_wheel_dir + python -m pip install --no-cache-dir ./temp_wheel_dir/* + echo installed basic + ls -lah temp_wheel_dir + mv temp_wheel_dir ${{ inputs.cache_tag }}_python_deps + tar cf ${{ inputs.cache_tag }}_python_deps.tar ${{ inputs.cache_tag }}_python_deps + + - uses: actions/cache/save@v4 + with: + path: | + ${{ inputs.cache_tag }}_python_deps.tar + update_comfyui_and_python_dependencies.bat + key: ${{ runner.os }}-build-${{ inputs.cache_tag }}-${{ inputs.python_minor }} From 1673ace19b9d63a8dc0d388aafdb54abf2497892 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 29 Sep 2025 16:08:42 -0700 Subject: [PATCH 0661/1073] Make the final release test optional in the stable release action. (#10103) --- .github/workflows/stable-release.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index 924bdec90..5eb4a0783 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -28,6 +28,11 @@ on: required: true type: string default: "nvidia" + test_release: + description: 'Test Release' + required: true + type: boolean + default: true jobs: package_comfy_windows: @@ -104,6 +109,10 @@ jobs: "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=768m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_${{ inputs.rel_name }}.7z + - shell: bash + if: ${{ inputs.test_release }} + run: | + cd .. cd ComfyUI_windows_portable python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu From 0db6aabed3942ea71258d25d32dc971a2a2421af Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 29 Sep 2025 16:54:05 -0700 Subject: [PATCH 0662/1073] Different base files for different release. (#10104) --- .github/workflows/stable-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index 5eb4a0783..40e1bc157 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -101,7 +101,7 @@ jobs: mkdir update cp -r ComfyUI/.ci/update_windows/* ./update/ - cp -r ComfyUI/.ci/windows_base_files/* ./ + cp -r ComfyUI/.ci/windows_${{ inputs.rel_name }}_base_files/* ./ cp ../update_comfyui_and_python_dependencies.bat ./update/ cd .. From 375884842314a2234ddc29132b03c741ce81443b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 29 Sep 2025 16:54:37 -0700 Subject: [PATCH 0663/1073] Different base files for nvidia and amd portables. (#10105) --- .../run_amd_gpu.bat} | 0 .../README_VERY_IMPORTANT.txt | 0 .../run_cpu.bat | 0 .ci/windows_nvidia_base_files/run_nvidia_gpu.bat | 2 ++ .../run_nvidia_gpu_fast_fp16_accumulation.bat | 0 .github/workflows/windows_release_nightly_pytorch.yml | 2 +- .github/workflows/windows_release_package.yml | 2 +- 7 files changed, 4 insertions(+), 2 deletions(-) rename .ci/{windows_base_files/run_nvidia_gpu.bat => windows_amd_base_files/run_amd_gpu.bat} (100%) rename .ci/{windows_base_files => windows_nvidia_base_files}/README_VERY_IMPORTANT.txt (100%) rename .ci/{windows_base_files => windows_nvidia_base_files}/run_cpu.bat (100%) create mode 100755 .ci/windows_nvidia_base_files/run_nvidia_gpu.bat rename .ci/{windows_base_files => windows_nvidia_base_files}/run_nvidia_gpu_fast_fp16_accumulation.bat (100%) diff --git a/.ci/windows_base_files/run_nvidia_gpu.bat b/.ci/windows_amd_base_files/run_amd_gpu.bat similarity index 100% rename from .ci/windows_base_files/run_nvidia_gpu.bat rename to .ci/windows_amd_base_files/run_amd_gpu.bat diff --git a/.ci/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_nvidia_base_files/README_VERY_IMPORTANT.txt similarity index 100% rename from .ci/windows_base_files/README_VERY_IMPORTANT.txt rename to .ci/windows_nvidia_base_files/README_VERY_IMPORTANT.txt diff --git a/.ci/windows_base_files/run_cpu.bat b/.ci/windows_nvidia_base_files/run_cpu.bat similarity index 100% rename from .ci/windows_base_files/run_cpu.bat rename to .ci/windows_nvidia_base_files/run_cpu.bat diff --git a/.ci/windows_nvidia_base_files/run_nvidia_gpu.bat b/.ci/windows_nvidia_base_files/run_nvidia_gpu.bat new file mode 100755 index 000000000..274d7c948 --- /dev/null +++ b/.ci/windows_nvidia_base_files/run_nvidia_gpu.bat @@ -0,0 +1,2 @@ +.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build +pause diff --git a/.ci/windows_base_files/run_nvidia_gpu_fast_fp16_accumulation.bat b/.ci/windows_nvidia_base_files/run_nvidia_gpu_fast_fp16_accumulation.bat similarity index 100% rename from .ci/windows_base_files/run_nvidia_gpu_fast_fp16_accumulation.bat rename to .ci/windows_nvidia_base_files/run_nvidia_gpu_fast_fp16_accumulation.bat diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index 5bdc940de..ca1ef71ae 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -68,7 +68,7 @@ jobs: mkdir update cp -r ComfyUI/.ci/update_windows/* ./update/ - cp -r ComfyUI/.ci/windows_base_files/* ./ + cp -r ComfyUI/.ci/windows_nvidia_base_files/* ./ cp -r ComfyUI/.ci/windows_nightly_base_files/* ./ echo "call update_comfyui.bat nopause diff --git a/.github/workflows/windows_release_package.yml b/.github/workflows/windows_release_package.yml index 46375698e..7955325fc 100644 --- a/.github/workflows/windows_release_package.yml +++ b/.github/workflows/windows_release_package.yml @@ -81,7 +81,7 @@ jobs: mkdir update cp -r ComfyUI/.ci/update_windows/* ./update/ - cp -r ComfyUI/.ci/windows_base_files/* ./ + cp -r ComfyUI/.ci/windows_nvidia_base_files/* ./ cp ../update_comfyui_and_python_dependencies.bat ./update/ cd .. From 342cf644ce495dafaa31dd49d42c47c5e242e701 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:05:44 -0700 Subject: [PATCH 0664/1073] Add a way to have different names for stable nvidia portables. (#10106) --- .github/workflows/stable-release.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index 40e1bc157..1cbbfbf69 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -28,6 +28,11 @@ on: required: true type: string default: "nvidia" + rel_extra_name: + description: 'Release extra name' + required: false + type: string + default: "" test_release: description: 'Test Release' required: true @@ -107,7 +112,7 @@ jobs: cd .. "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=768m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable - mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_${{ inputs.rel_name }}.7z + mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_${{ inputs.rel_name }}${{ inputs.rel_extra_name }}.7z - shell: bash if: ${{ inputs.test_release }} @@ -123,7 +128,7 @@ jobs: - name: Upload binaries to release uses: softprops/action-gh-release@v2 with: - files: ComfyUI_windows_portable_${{ inputs.rel_name }}.7z + files: ComfyUI_windows_portable_${{ inputs.rel_name }}${{ inputs.rel_extra_name }}.7z tag_name: ${{ inputs.git_tag }} draft: true overwrite_files: true From bed4b49d08d80e195cb42d5294037fc6b631942e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:31:15 -0700 Subject: [PATCH 0665/1073] Add action to do the full stable release. (#10107) --- .github/workflows/release-stable-all.yml | 49 ++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 .github/workflows/release-stable-all.yml diff --git a/.github/workflows/release-stable-all.yml b/.github/workflows/release-stable-all.yml new file mode 100644 index 000000000..aac84d637 --- /dev/null +++ b/.github/workflows/release-stable-all.yml @@ -0,0 +1,49 @@ +name: "Release Stable All Portable Versions" + +on: + workflow_dispatch: + inputs: + git_tag: + description: 'Git tag' + required: true + type: string + +jobs: + release_nvidia_default: + name: "Release NVIDIA Default (cu129)" + uses: ./.github/workflows/stable-release.yml + with: + git_tag: ${{ inputs.git_tag }} + cache_tag: "cu129" + python_minor: "13" + python_patch: "6" + rel_name: "nvidia" + rel_extra_name: "" + test_release: true + secrets: inherit + + release_nvidia_cu128: + name: "Release NVIDIA cu128" + uses: ./.github/workflows/stable-release.yml + with: + git_tag: ${{ inputs.git_tag }} + cache_tag: "cu128" + python_minor: "12" + python_patch: "10" + rel_name: "nvidia" + rel_extra_name: "_cu128" + test_release: true + secrets: inherit + + release_amd_rocm: + name: "Release AMD ROCm 6.4.4" + uses: ./.github/workflows/stable-release.yml + with: + git_tag: ${{ inputs.git_tag }} + cache_tag: "rocm644" + python_minor: "12" + python_patch: "10" + rel_name: "amd" + rel_extra_name: "" + test_release: false + secrets: inherit From 447884b65740d9f4160ef13d55adb49ca111140e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:37:51 -0700 Subject: [PATCH 0666/1073] Make stable release workflow callable. (#10108) --- .github/workflows/stable-release.yml | 36 ++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index 1cbbfbf69..28484a9d1 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -2,6 +2,42 @@ name: "Release Stable Version" on: + workflow_call: + inputs: + git_tag: + description: 'Git tag' + required: true + type: string + cache_tag: + description: 'Cached dependencies tag' + required: true + type: string + default: "cu129" + python_minor: + description: 'Python minor version' + required: true + type: string + default: "13" + python_patch: + description: 'Python patch version' + required: true + type: string + default: "6" + rel_name: + description: 'Release name' + required: true + type: string + default: "nvidia" + rel_extra_name: + description: 'Release extra name' + required: false + type: string + default: "" + test_release: + description: 'Test Release' + required: true + type: boolean + default: true workflow_dispatch: inputs: git_tag: From 414a178fb690ef9998f65419f03ef1a83cf559de Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 29 Sep 2025 20:03:02 -0700 Subject: [PATCH 0667/1073] Add basic readme for AMD portable. (#10109) --- .../README_VERY_IMPORTANT.txt | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100755 .ci/windows_amd_base_files/README_VERY_IMPORTANT.txt diff --git a/.ci/windows_amd_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_amd_base_files/README_VERY_IMPORTANT.txt new file mode 100755 index 000000000..570ac3398 --- /dev/null +++ b/.ci/windows_amd_base_files/README_VERY_IMPORTANT.txt @@ -0,0 +1,24 @@ +As of the time of writing this you need this preview driver for best results: +https://www.amd.com/en/resources/support-articles/release-notes/RN-AMDGPU-WINDOWS-PYTORCH-PREVIEW.html + +HOW TO RUN: + +if you have a AMD gpu: + +run_amd_gpu.bat + + +IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints + +You can download the stable diffusion XL one from: https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors + + +RECOMMENDED WAY TO UPDATE: +To update the ComfyUI code: update\update_comfyui.bat + + +TO SHARE MODELS BETWEEN COMFYUI AND ANOTHER UI: +In the ComfyUI directory you will find a file: extra_model_paths.yaml.example +Rename this file to: extra_model_paths.yaml and edit it with your favorite text editor. + + From 977a4ed8c55ade53d0d6cfe1fe8a6396ee35a2ec Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 29 Sep 2025 23:04:42 -0400 Subject: [PATCH 0668/1073] ComfyUI version 0.3.61 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index d469a8194..737b72131 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.60" +__version__ = "0.3.61" diff --git a/pyproject.toml b/pyproject.toml index 7340c320b..e851560f7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.60" +version = "0.3.61" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 6e079abc3a3fc0fb98e2a0848877874151310ed1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 29 Sep 2025 20:11:37 -0700 Subject: [PATCH 0669/1073] Workflow permission fix. (#10110) --- .github/workflows/release-stable-all.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.github/workflows/release-stable-all.yml b/.github/workflows/release-stable-all.yml index aac84d637..5c1024599 100644 --- a/.github/workflows/release-stable-all.yml +++ b/.github/workflows/release-stable-all.yml @@ -10,6 +10,10 @@ on: jobs: release_nvidia_default: + permissions: + contents: "write" + packages: "write" + pull-requests: "read" name: "Release NVIDIA Default (cu129)" uses: ./.github/workflows/stable-release.yml with: @@ -23,6 +27,10 @@ jobs: secrets: inherit release_nvidia_cu128: + permissions: + contents: "write" + packages: "write" + pull-requests: "read" name: "Release NVIDIA cu128" uses: ./.github/workflows/stable-release.yml with: @@ -36,6 +44,10 @@ jobs: secrets: inherit release_amd_rocm: + permissions: + contents: "write" + packages: "write" + pull-requests: "read" name: "Release AMD ROCm 6.4.4" uses: ./.github/workflows/stable-release.yml with: From f48d7230de2f7b10fe8bfda3d7f53241d19c7266 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 30 Sep 2025 09:17:49 -0700 Subject: [PATCH 0670/1073] Add new portable links to readme. (#10112) --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 5a257687b..8f24a33ee 100644 --- a/README.md +++ b/README.md @@ -176,6 +176,12 @@ Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you If you have trouble extracting it, right click the file -> properties -> unblock +#### Alternative Downloads: + +[Experimental portable for AMD GPUs](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_amd.7z) + +[Portable with pytorch cuda 12.8 and python 3.12](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_nvidia_cu128.7z) (Supports Nvidia 10 series and older GPUs). + #### How do I share models between another UI and ComfyUI? See the [Config file](extra_model_paths.yaml.example) to set the search paths for models. In the standalone windows build you can find this file in the ComfyUI directory. Rename this file to extra_model_paths.yaml and edit it with your favorite text editor. From 631b9ae861bf8bdd3c538da232e4c8938448e59d Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 30 Sep 2025 20:21:47 +0300 Subject: [PATCH 0671/1073] fix(Rodin3D-Gen2): missing "task_uuid" parameter (#10128) --- comfy_api_nodes/nodes_rodin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_rodin.py b/comfy_api_nodes/nodes_rodin.py index 817efb0f5..633ac46d3 100644 --- a/comfy_api_nodes/nodes_rodin.py +++ b/comfy_api_nodes/nodes_rodin.py @@ -540,7 +540,7 @@ class Rodin3D_Gen2(Rodin3DAPI): **kwargs) await self.poll_for_task_status(subscription_key, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs) - model = await self.download_files(download_list) + model = await self.download_files(download_list, task_uuid) return (model,) From b682a73c55a6434fdd9293d45ace969597f8ad65 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 30 Sep 2025 20:43:41 +0300 Subject: [PATCH 0672/1073] enable Seedance Pro model in the FirstLastFrame node (#10120) --- comfy_api_nodes/nodes_bytedance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_bytedance.py b/comfy_api_nodes/nodes_bytedance.py index a7eeaf15a..654d6a362 100644 --- a/comfy_api_nodes/nodes_bytedance.py +++ b/comfy_api_nodes/nodes_bytedance.py @@ -920,7 +920,7 @@ class ByteDanceFirstLastFrameNode(comfy_io.ComfyNode): inputs=[ comfy_io.Combo.Input( "model", - options=[Image2VideoModelName.seedance_1_lite.value], + options=[model.value for model in Image2VideoModelName], default=Image2VideoModelName.seedance_1_lite.value, tooltip="Model name", ), From bab8ba20bf47d985d6b1d73627c2add76bd4e716 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 30 Sep 2025 15:12:07 -0400 Subject: [PATCH 0673/1073] ComfyUI version 0.3.62. --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 737b72131..ac76fbe35 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.61" +__version__ = "0.3.62" diff --git a/pyproject.toml b/pyproject.toml index e851560f7..d0a76c6d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.61" +version = "0.3.62" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From c4a8cf60ab5d6eaf052b7a08f5ee97104acf7a2f Mon Sep 17 00:00:00 2001 From: AustinMroz Date: Tue, 30 Sep 2025 22:12:32 -0700 Subject: [PATCH 0674/1073] Bump frontend to 1.27.7 (#10133) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 45d3e1607..588c5dcf0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.26.13 +comfyui-frontend-package==1.27.7 comfyui-workflow-templates==0.1.91 comfyui-embedded-docs==0.2.6 torch From 638097829d2352a1c78ab4fbb1e028d1e7cff012 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 1 Oct 2025 09:00:22 +0300 Subject: [PATCH 0675/1073] convert nodes_audio_encoder.py to V3 schema (#10123) --- comfy_api/latest/_io.py | 1 + comfy_extras/nodes_audio_encoder.py | 68 ++++++++++++++++++----------- 2 files changed, 44 insertions(+), 25 deletions(-) diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 4826818df..2d95cffd6 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -1605,6 +1605,7 @@ class _IO: Model = Model ClipVision = ClipVision ClipVisionOutput = ClipVisionOutput + AudioEncoder = AudioEncoder AudioEncoderOutput = AudioEncoderOutput StyleModel = StyleModel Gligen = Gligen diff --git a/comfy_extras/nodes_audio_encoder.py b/comfy_extras/nodes_audio_encoder.py index 39a140fef..13aacd41a 100644 --- a/comfy_extras/nodes_audio_encoder.py +++ b/comfy_extras/nodes_audio_encoder.py @@ -1,44 +1,62 @@ import folder_paths import comfy.audio_encoders.audio_encoders import comfy.utils +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io -class AudioEncoderLoader: +class AudioEncoderLoader(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "audio_encoder_name": (folder_paths.get_filename_list("audio_encoders"), ), - }} - RETURN_TYPES = ("AUDIO_ENCODER",) - FUNCTION = "load_model" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="AudioEncoderLoader", + category="loaders", + inputs=[ + io.Combo.Input( + "audio_encoder_name", + options=folder_paths.get_filename_list("audio_encoders"), + ), + ], + outputs=[io.AudioEncoder.Output()], + ) - CATEGORY = "loaders" - - def load_model(self, audio_encoder_name): + @classmethod + def execute(cls, audio_encoder_name) -> io.NodeOutput: audio_encoder_name = folder_paths.get_full_path_or_raise("audio_encoders", audio_encoder_name) sd = comfy.utils.load_torch_file(audio_encoder_name, safe_load=True) audio_encoder = comfy.audio_encoders.audio_encoders.load_audio_encoder_from_sd(sd) if audio_encoder is None: raise RuntimeError("ERROR: audio encoder file is invalid and does not contain a valid model.") - return (audio_encoder,) + return io.NodeOutput(audio_encoder) -class AudioEncoderEncode: +class AudioEncoderEncode(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "audio_encoder": ("AUDIO_ENCODER",), - "audio": ("AUDIO",), - }} - RETURN_TYPES = ("AUDIO_ENCODER_OUTPUT",) - FUNCTION = "encode" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="AudioEncoderEncode", + category="conditioning", + inputs=[ + io.AudioEncoder.Input("audio_encoder"), + io.Audio.Input("audio"), + ], + outputs=[io.AudioEncoderOutput.Output()], + ) - CATEGORY = "conditioning" - - def encode(self, audio_encoder, audio): + @classmethod + def execute(cls, audio_encoder, audio) -> io.NodeOutput: output = audio_encoder.encode_audio(audio["waveform"], audio["sample_rate"]) - return (output,) + return io.NodeOutput(output) -NODE_CLASS_MAPPINGS = { - "AudioEncoderLoader": AudioEncoderLoader, - "AudioEncoderEncode": AudioEncoderEncode, -} +class AudioEncoder(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + AudioEncoderLoader, + AudioEncoderEncode, + ] + + +async def comfy_entrypoint() -> AudioEncoder: + return AudioEncoder() From 7eb7160db487feb891ceabdf985b09f9a8091869 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 1 Oct 2025 22:16:59 +0300 Subject: [PATCH 0676/1073] convert nodes_gits.py to V3 schema (#9949) --- comfy_extras/nodes_gits.py | 49 ++++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/comfy_extras/nodes_gits.py b/comfy_extras/nodes_gits.py index 47b1dd049..25367560a 100644 --- a/comfy_extras/nodes_gits.py +++ b/comfy_extras/nodes_gits.py @@ -1,6 +1,8 @@ # from https://github.com/zju-pi/diff-sampler/tree/main/gits-main import numpy as np import torch +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io def loglinear_interp(t_steps, num_steps): """ @@ -333,25 +335,28 @@ NOISE_LEVELS = { ], } -class GITSScheduler: +class GITSScheduler(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"coeff": ("FLOAT", {"default": 1.20, "min": 0.80, "max": 1.50, "step": 0.05}), - "steps": ("INT", {"default": 10, "min": 2, "max": 1000}), - "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/schedulers" + def define_schema(cls): + return io.Schema( + node_id="GITSScheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Float.Input("coeff", default=1.20, min=0.80, max=1.50, step=0.05), + io.Int.Input("steps", default=10, min=2, max=1000), + io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01), + ], + outputs=[ + io.Sigmas.Output(), + ], + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, coeff, steps, denoise): + @classmethod + def execute(cls, coeff, steps, denoise): total_steps = steps if denoise < 1.0: if denoise <= 0.0: - return (torch.FloatTensor([]),) + return io.NodeOutput(torch.FloatTensor([])) total_steps = round(steps * denoise) if steps <= 20: @@ -362,8 +367,16 @@ class GITSScheduler: sigmas = sigmas[-(total_steps + 1):] sigmas[-1] = 0 - return (torch.FloatTensor(sigmas), ) + return io.NodeOutput(torch.FloatTensor(sigmas)) -NODE_CLASS_MAPPINGS = { - "GITSScheduler": GITSScheduler, -} + +class GITSSchedulerExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + GITSScheduler, + ] + + +async def comfy_entrypoint() -> GITSSchedulerExtension: + return GITSSchedulerExtension() From e0210ce0a7140e0c61bce7fdb964b5e5e8d31619 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 1 Oct 2025 22:17:33 +0300 Subject: [PATCH 0677/1073] convert nodes_differential_diffusion.py to V3 schema (#10056) --- comfy_extras/nodes_differential_diffusion.py | 69 ++++++++++++-------- 1 file changed, 40 insertions(+), 29 deletions(-) diff --git a/comfy_extras/nodes_differential_diffusion.py b/comfy_extras/nodes_differential_diffusion.py index 255ac420d..6dfdf466c 100644 --- a/comfy_extras/nodes_differential_diffusion.py +++ b/comfy_extras/nodes_differential_diffusion.py @@ -1,34 +1,41 @@ # code adapted from https://github.com/exx8/differential-diffusion +from typing_extensions import override + import torch +from comfy_api.latest import ComfyExtension, io -class DifferentialDiffusion(): + +class DifferentialDiffusion(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "model": ("MODEL", ), - }, - "optional": { - "strength": ("FLOAT", { - "default": 1.0, - "min": 0.0, - "max": 1.0, - "step": 0.01, - }), - } - } - RETURN_TYPES = ("MODEL",) - FUNCTION = "apply" - CATEGORY = "_for_testing" - INIT = False + def define_schema(cls): + return io.Schema( + node_id="DifferentialDiffusion", + display_name="Differential Diffusion", + category="_for_testing", + inputs=[ + io.Model.Input("model"), + io.Float.Input( + "strength", + default=1.0, + min=0.0, + max=1.0, + step=0.01, + optional=True, + ), + ], + outputs=[io.Model.Output()], + is_experimental=True, + ) - def apply(self, model, strength=1.0): + @classmethod + def execute(cls, model, strength=1.0) -> io.NodeOutput: model = model.clone() - model.set_model_denoise_mask_function(lambda *args, **kwargs: self.forward(*args, **kwargs, strength=strength)) - return (model, ) + model.set_model_denoise_mask_function(lambda *args, **kwargs: cls.forward(*args, **kwargs, strength=strength)) + return io.NodeOutput(model) - def forward(self, sigma: torch.Tensor, denoise_mask: torch.Tensor, extra_options: dict, strength: float): + @classmethod + def forward(cls, sigma: torch.Tensor, denoise_mask: torch.Tensor, extra_options: dict, strength: float): model = extra_options["model"] step_sigmas = extra_options["sigmas"] sigma_to = model.inner_model.model_sampling.sigma_min @@ -53,9 +60,13 @@ class DifferentialDiffusion(): return binary_mask -NODE_CLASS_MAPPINGS = { - "DifferentialDiffusion": DifferentialDiffusion, -} -NODE_DISPLAY_NAME_MAPPINGS = { - "DifferentialDiffusion": "Differential Diffusion", -} +class DifferentialDiffusionExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + DifferentialDiffusion, + ] + + +async def comfy_entrypoint() -> DifferentialDiffusionExtension: + return DifferentialDiffusionExtension() From 3af1881455fb0c44c3030b2d61b79302933386d2 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 1 Oct 2025 22:18:04 +0300 Subject: [PATCH 0678/1073] convert nodes_optimalsteps.py to V3 schema (#10074) --- comfy_extras/nodes_optimalsteps.py | 52 +++++++++++++++++++----------- 1 file changed, 33 insertions(+), 19 deletions(-) diff --git a/comfy_extras/nodes_optimalsteps.py b/comfy_extras/nodes_optimalsteps.py index e7c851ca2..73f0104d8 100644 --- a/comfy_extras/nodes_optimalsteps.py +++ b/comfy_extras/nodes_optimalsteps.py @@ -1,9 +1,12 @@ # from https://github.com/bebebe666/OptimalSteps - import numpy as np import torch +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io + + def loglinear_interp(t_steps, num_steps): """ Performs log-linear interpolation of a given array of decreasing numbers. @@ -23,25 +26,28 @@ NOISE_LEVELS = {"FLUX": [0.9968, 0.9886, 0.9819, 0.975, 0.966, 0.9471, 0.9158, 0 "Chroma": [0.992, 0.99, 0.988, 0.985, 0.982, 0.978, 0.973, 0.968, 0.961, 0.953, 0.943, 0.931, 0.917, 0.9, 0.881, 0.858, 0.832, 0.802, 0.769, 0.731, 0.69, 0.646, 0.599, 0.55, 0.501, 0.451, 0.402, 0.355, 0.311, 0.27, 0.232, 0.199, 0.169, 0.143, 0.12, 0.101, 0.084, 0.07, 0.058, 0.048, 0.001], } -class OptimalStepsScheduler: +class OptimalStepsScheduler(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"model_type": (["FLUX", "Wan", "Chroma"], ), - "steps": ("INT", {"default": 20, "min": 3, "max": 1000}), - "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/schedulers" + def define_schema(cls): + return io.Schema( + node_id="OptimalStepsScheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Combo.Input("model_type", options=["FLUX", "Wan", "Chroma"]), + io.Int.Input("steps", default=20, min=3, max=1000), + io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01), + ], + outputs=[ + io.Sigmas.Output(), + ], + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, model_type, steps, denoise): + @classmethod + def execute(cls, model_type, steps, denoise) ->io.NodeOutput: total_steps = steps if denoise < 1.0: if denoise <= 0.0: - return (torch.FloatTensor([]),) + return io.NodeOutput(torch.FloatTensor([])) total_steps = round(steps * denoise) sigmas = NOISE_LEVELS[model_type][:] @@ -50,8 +56,16 @@ class OptimalStepsScheduler: sigmas = sigmas[-(total_steps + 1):] sigmas[-1] = 0 - return (torch.FloatTensor(sigmas), ) + return io.NodeOutput(torch.FloatTensor(sigmas)) -NODE_CLASS_MAPPINGS = { - "OptimalStepsScheduler": OptimalStepsScheduler, -} + +class OptimalStepsExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + OptimalStepsScheduler, + ] + + +async def comfy_entrypoint() -> OptimalStepsExtension: + return OptimalStepsExtension() From 11bab7be76d0bfdb326e8aea53cdfebd99b42cc5 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 1 Oct 2025 22:18:49 +0300 Subject: [PATCH 0679/1073] convert nodes_pag.py to V3 schema (#10080) --- comfy_extras/nodes_pag.py | 49 +++++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/comfy_extras/nodes_pag.py b/comfy_extras/nodes_pag.py index eb28196f4..79fea5f0c 100644 --- a/comfy_extras/nodes_pag.py +++ b/comfy_extras/nodes_pag.py @@ -3,25 +3,30 @@ #My modified one here is more basic but has less chances of breaking with ComfyUI updates. +from typing_extensions import override + import comfy.model_patcher import comfy.samplers +from comfy_api.latest import ComfyExtension, io -class PerturbedAttentionGuidance: + +class PerturbedAttentionGuidance(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "model": ("MODEL",), - "scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": 0.01}), - } - } + def define_schema(cls): + return io.Schema( + node_id="PerturbedAttentionGuidance", + category="model_patches/unet", + inputs=[ + io.Model.Input("model"), + io.Float.Input("scale", default=3.0, min=0.0, max=100.0, step=0.01, round=0.01), + ], + outputs=[ + io.Model.Output(), + ], + ) - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" - - CATEGORY = "model_patches/unet" - - def patch(self, model, scale): + @classmethod + def execute(cls, model, scale) -> io.NodeOutput: unet_block = "middle" unet_block_id = 0 m = model.clone() @@ -49,8 +54,16 @@ class PerturbedAttentionGuidance: m.set_model_sampler_post_cfg_function(post_cfg_function) - return (m,) + return io.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "PerturbedAttentionGuidance": PerturbedAttentionGuidance, -} + +class PAGExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + PerturbedAttentionGuidance, + ] + + +async def comfy_entrypoint() -> PAGExtension: + return PAGExtension() From d9c0a4053d955c7fd3400be07001bc4e774591e1 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 1 Oct 2025 22:19:56 +0300 Subject: [PATCH 0680/1073] convert nodes_lt.py to V3 schema (#10084) --- comfy_extras/nodes_lt.py | 412 ++++++++++++++++++++++----------------- 1 file changed, 228 insertions(+), 184 deletions(-) diff --git a/comfy_extras/nodes_lt.py b/comfy_extras/nodes_lt.py index f82337a67..b51d15804 100644 --- a/comfy_extras/nodes_lt.py +++ b/comfy_extras/nodes_lt.py @@ -1,4 +1,3 @@ -import io import nodes import node_helpers import torch @@ -8,46 +7,60 @@ import comfy.utils import math import numpy as np import av +from io import BytesIO +from typing_extensions import override from comfy.ldm.lightricks.symmetric_patchifier import SymmetricPatchifier, latent_to_pixel_coords +from comfy_api.latest import ComfyExtension, io -class EmptyLTXVLatentVideo: +class EmptyLTXVLatentVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "width": ("INT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}), - "height": ("INT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}), - "length": ("INT", {"default": 97, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 8}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}} - RETURN_TYPES = ("LATENT",) - FUNCTION = "generate" + def define_schema(cls): + return io.Schema( + node_id="EmptyLTXVLatentVideo", + category="latent/video/ltxv", + inputs=[ + io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32), + io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32), + io.Int.Input("length", default=97, min=1, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("batch_size", default=1, min=1, max=4096), + ], + outputs=[ + io.Latent.Output(), + ], + ) - CATEGORY = "latent/video/ltxv" - - def generate(self, width, height, length, batch_size=1): + @classmethod + def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput: latent = torch.zeros([batch_size, 128, ((length - 1) // 8) + 1, height // 32, width // 32], device=comfy.model_management.intermediate_device()) - return ({"samples": latent}, ) + return io.NodeOutput({"samples": latent}) -class LTXVImgToVideo: +class LTXVImgToVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "vae": ("VAE",), - "image": ("IMAGE",), - "width": ("INT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}), - "height": ("INT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}), - "length": ("INT", {"default": 97, "min": 9, "max": nodes.MAX_RESOLUTION, "step": 8}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0}), - }} + def define_schema(cls): + return io.Schema( + node_id="LTXVImgToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Image.Input("image"), + io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32), + io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32), + io.Int.Input("length", default=97, min=9, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Float.Input("strength", default=1.0, min=0.0, max=1.0), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - - CATEGORY = "conditioning/video_models" - FUNCTION = "generate" - - def generate(self, positive, negative, image, vae, width, height, length, batch_size, strength): + @classmethod + def execute(cls, positive, negative, image, vae, width, height, length, batch_size, strength) -> io.NodeOutput: pixels = comfy.utils.common_upscale(image.movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) encode_pixels = pixels[:, :, :, :3] t = vae.encode(encode_pixels) @@ -62,7 +75,7 @@ class LTXVImgToVideo: ) conditioning_latent_frames_mask[:, :, :t.shape[2]] = 1.0 - strength - return (positive, negative, {"samples": latent, "noise_mask": conditioning_latent_frames_mask}, ) + return io.NodeOutput(positive, negative, {"samples": latent, "noise_mask": conditioning_latent_frames_mask}) def conditioning_get_any_value(conditioning, key, default=None): @@ -93,35 +106,46 @@ def get_keyframe_idxs(cond): num_keyframes = torch.unique(keyframe_idxs[:, 0]).shape[0] return keyframe_idxs, num_keyframes -class LTXVAddGuide: +class LTXVAddGuide(io.ComfyNode): + NUM_PREFIX_FRAMES = 2 + PATCHIFIER = SymmetricPatchifier(1) + @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "vae": ("VAE",), - "latent": ("LATENT",), - "image": ("IMAGE", {"tooltip": "Image or video to condition the latent video on. Must be 8*n + 1 frames." - "If the video is not 8*n + 1 frames, it will be cropped to the nearest 8*n + 1 frames."}), - "frame_idx": ("INT", {"default": 0, "min": -9999, "max": 9999, - "tooltip": "Frame index to start the conditioning at. For single-frame images or " - "videos with 1-8 frames, any frame_idx value is acceptable. For videos with 9+ " - "frames, frame_idx must be divisible by 8, otherwise it will be rounded down to " - "the nearest multiple of 8. Negative values are counted from the end of the video."}), - "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - } - } + def define_schema(cls): + return io.Schema( + node_id="LTXVAddGuide", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Latent.Input("latent"), + io.Image.Input( + "image", + tooltip="Image or video to condition the latent video on. Must be 8*n + 1 frames. " + "If the video is not 8*n + 1 frames, it will be cropped to the nearest 8*n + 1 frames.", + ), + io.Int.Input( + "frame_idx", + default=0, + min=-9999, + max=9999, + tooltip="Frame index to start the conditioning at. " + "For single-frame images or videos with 1-8 frames, any frame_idx value is acceptable. " + "For videos with 9+ frames, frame_idx must be divisible by 8, otherwise it will be rounded " + "down to the nearest multiple of 8. Negative values are counted from the end of the video.", + ), + io.Float.Input("strength", default=1.0, min=0.0, max=1.0, step=0.01), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - - CATEGORY = "conditioning/video_models" - FUNCTION = "generate" - - def __init__(self): - self._num_prefix_frames = 2 - self._patchifier = SymmetricPatchifier(1) - - def encode(self, vae, latent_width, latent_height, images, scale_factors): + @classmethod + def encode(cls, vae, latent_width, latent_height, images, scale_factors): time_scale_factor, width_scale_factor, height_scale_factor = scale_factors images = images[:(images.shape[0] - 1) // time_scale_factor * time_scale_factor + 1] pixels = comfy.utils.common_upscale(images.movedim(-1, 1), latent_width * width_scale_factor, latent_height * height_scale_factor, "bilinear", crop="disabled").movedim(1, -1) @@ -129,7 +153,8 @@ class LTXVAddGuide: t = vae.encode(encode_pixels) return encode_pixels, t - def get_latent_index(self, cond, latent_length, guide_length, frame_idx, scale_factors): + @classmethod + def get_latent_index(cls, cond, latent_length, guide_length, frame_idx, scale_factors): time_scale_factor, _, _ = scale_factors _, num_keyframes = get_keyframe_idxs(cond) latent_count = latent_length - num_keyframes @@ -141,9 +166,10 @@ class LTXVAddGuide: return frame_idx, latent_idx - def add_keyframe_index(self, cond, frame_idx, guiding_latent, scale_factors): + @classmethod + def add_keyframe_index(cls, cond, frame_idx, guiding_latent, scale_factors): keyframe_idxs, _ = get_keyframe_idxs(cond) - _, latent_coords = self._patchifier.patchify(guiding_latent) + _, latent_coords = cls.PATCHIFIER.patchify(guiding_latent) pixel_coords = latent_to_pixel_coords(latent_coords, scale_factors, causal_fix=frame_idx == 0) # we need the causal fix only if we're placing the new latents at index 0 pixel_coords[:, 0] += frame_idx if keyframe_idxs is None: @@ -152,8 +178,9 @@ class LTXVAddGuide: keyframe_idxs = torch.cat([keyframe_idxs, pixel_coords], dim=2) return node_helpers.conditioning_set_values(cond, {"keyframe_idxs": keyframe_idxs}) - def append_keyframe(self, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors): - _, latent_idx = self.get_latent_index( + @classmethod + def append_keyframe(cls, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors): + _, latent_idx = cls.get_latent_index( cond=positive, latent_length=latent_image.shape[2], guide_length=guiding_latent.shape[2], @@ -162,8 +189,8 @@ class LTXVAddGuide: ) noise_mask[:, :, latent_idx:latent_idx + guiding_latent.shape[2]] = 1.0 - positive = self.add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors) - negative = self.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors) + positive = cls.add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors) + negative = cls.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors) mask = torch.full( (noise_mask.shape[0], 1, guiding_latent.shape[2], noise_mask.shape[3], noise_mask.shape[4]), @@ -176,7 +203,8 @@ class LTXVAddGuide: noise_mask = torch.cat([noise_mask, mask], dim=2) return positive, negative, latent_image, noise_mask - def replace_latent_frames(self, latent_image, noise_mask, guiding_latent, latent_idx, strength): + @classmethod + def replace_latent_frames(cls, latent_image, noise_mask, guiding_latent, latent_idx, strength): cond_length = guiding_latent.shape[2] assert latent_image.shape[2] >= latent_idx + cond_length, "Conditioning frames exceed the length of the latent sequence." @@ -195,20 +223,21 @@ class LTXVAddGuide: return latent_image, noise_mask - def generate(self, positive, negative, vae, latent, image, frame_idx, strength): + @classmethod + def execute(cls, positive, negative, vae, latent, image, frame_idx, strength) -> io.NodeOutput: scale_factors = vae.downscale_index_formula latent_image = latent["samples"] noise_mask = get_noise_mask(latent) _, _, latent_length, latent_height, latent_width = latent_image.shape - image, t = self.encode(vae, latent_width, latent_height, image, scale_factors) + image, t = cls.encode(vae, latent_width, latent_height, image, scale_factors) - frame_idx, latent_idx = self.get_latent_index(positive, latent_length, len(image), frame_idx, scale_factors) + frame_idx, latent_idx = cls.get_latent_index(positive, latent_length, len(image), frame_idx, scale_factors) assert latent_idx + t.shape[2] <= latent_length, "Conditioning frames exceed the length of the latent sequence." - num_prefix_frames = min(self._num_prefix_frames, t.shape[2]) + num_prefix_frames = min(cls.NUM_PREFIX_FRAMES, t.shape[2]) - positive, negative, latent_image, noise_mask = self.append_keyframe( + positive, negative, latent_image, noise_mask = cls.append_keyframe( positive, negative, frame_idx, @@ -223,9 +252,9 @@ class LTXVAddGuide: t = t[:, :, num_prefix_frames:] if t.shape[2] == 0: - return (positive, negative, {"samples": latent_image, "noise_mask": noise_mask},) + return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask}) - latent_image, noise_mask = self.replace_latent_frames( + latent_image, noise_mask = cls.replace_latent_frames( latent_image, noise_mask, t, @@ -233,34 +262,35 @@ class LTXVAddGuide: strength, ) - return (positive, negative, {"samples": latent_image, "noise_mask": noise_mask},) + return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask}) -class LTXVCropGuides: +class LTXVCropGuides(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "latent": ("LATENT",), - } - } + def define_schema(cls): + return io.Schema( + node_id="LTXVCropGuides", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Latent.Input("latent"), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - - CATEGORY = "conditioning/video_models" - FUNCTION = "crop" - - def __init__(self): - self._patchifier = SymmetricPatchifier(1) - - def crop(self, positive, negative, latent): + @classmethod + def execute(cls, positive, negative, latent) -> io.NodeOutput: latent_image = latent["samples"].clone() noise_mask = get_noise_mask(latent) _, num_keyframes = get_keyframe_idxs(positive) if num_keyframes == 0: - return (positive, negative, {"samples": latent_image, "noise_mask": noise_mask},) + return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask},) latent_image = latent_image[:, :, :-num_keyframes] noise_mask = noise_mask[:, :, :-num_keyframes] @@ -268,44 +298,52 @@ class LTXVCropGuides: positive = node_helpers.conditioning_set_values(positive, {"keyframe_idxs": None}) negative = node_helpers.conditioning_set_values(negative, {"keyframe_idxs": None}) - return (positive, negative, {"samples": latent_image, "noise_mask": noise_mask},) + return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask}) -class LTXVConditioning: +class LTXVConditioning(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "frame_rate": ("FLOAT", {"default": 25.0, "min": 0.0, "max": 1000.0, "step": 0.01}), - }} - RETURN_TYPES = ("CONDITIONING", "CONDITIONING") - RETURN_NAMES = ("positive", "negative") - FUNCTION = "append" + def define_schema(cls): + return io.Schema( + node_id="LTXVConditioning", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Float.Input("frame_rate", default=25.0, min=0.0, max=1000.0, step=0.01), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + ], + ) - CATEGORY = "conditioning/video_models" - - def append(self, positive, negative, frame_rate): + @classmethod + def execute(cls, positive, negative, frame_rate) -> io.NodeOutput: positive = node_helpers.conditioning_set_values(positive, {"frame_rate": frame_rate}) negative = node_helpers.conditioning_set_values(negative, {"frame_rate": frame_rate}) - return (positive, negative) + return io.NodeOutput(positive, negative) -class ModelSamplingLTXV: +class ModelSamplingLTXV(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "max_shift": ("FLOAT", {"default": 2.05, "min": 0.0, "max": 100.0, "step":0.01}), - "base_shift": ("FLOAT", {"default": 0.95, "min": 0.0, "max": 100.0, "step":0.01}), - }, - "optional": {"latent": ("LATENT",), } - } + def define_schema(cls): + return io.Schema( + node_id="ModelSamplingLTXV", + category="advanced/model", + inputs=[ + io.Model.Input("model"), + io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01), + io.Float.Input("base_shift", default=0.95, min=0.0, max=100.0, step=0.01), + io.Latent.Input("latent", optional=True), + ], + outputs=[ + io.Model.Output(), + ], + ) - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" - - CATEGORY = "advanced/model" - - def patch(self, model, max_shift, base_shift, latent=None): + @classmethod + def execute(cls, model, max_shift, base_shift, latent=None) -> io.NodeOutput: m = model.clone() if latent is None: @@ -329,37 +367,41 @@ class ModelSamplingLTXV: model_sampling.set_parameters(shift=shift) m.add_object_patch("model_sampling", model_sampling) - return (m, ) + return io.NodeOutput(m) -class LTXVScheduler: +class LTXVScheduler(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), - "max_shift": ("FLOAT", {"default": 2.05, "min": 0.0, "max": 100.0, "step":0.01}), - "base_shift": ("FLOAT", {"default": 0.95, "min": 0.0, "max": 100.0, "step":0.01}), - "stretch": ("BOOLEAN", { - "default": True, - "tooltip": "Stretch the sigmas to be in the range [terminal, 1]." - }), - "terminal": ( - "FLOAT", - { - "default": 0.1, "min": 0.0, "max": 0.99, "step": 0.01, - "tooltip": "The terminal value of the sigmas after stretching." - }, - ), - }, - "optional": {"latent": ("LATENT",), } - } + def define_schema(cls): + return io.Schema( + node_id="LTXVScheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Int.Input("steps", default=20, min=1, max=10000), + io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01), + io.Float.Input("base_shift", default=0.95, min=0.0, max=100.0, step=0.01), + io.Boolean.Input( + id="stretch", + default=True, + tooltip="Stretch the sigmas to be in the range [terminal, 1].", + ), + io.Float.Input( + id="terminal", + default=0.1, + min=0.0, + max=0.99, + step=0.01, + tooltip="The terminal value of the sigmas after stretching.", + ), + io.Latent.Input("latent", optional=True), + ], + outputs=[ + io.Sigmas.Output(), + ], + ) - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/schedulers" - - FUNCTION = "get_sigmas" - - def get_sigmas(self, steps, max_shift, base_shift, stretch, terminal, latent=None): + @classmethod + def execute(cls, steps, max_shift, base_shift, stretch, terminal, latent=None) -> io.NodeOutput: if latent is None: tokens = 4096 else: @@ -389,7 +431,7 @@ class LTXVScheduler: stretched = 1.0 - (one_minus_z / scale_factor) sigmas[non_zero_mask] = stretched - return (sigmas,) + return io.NodeOutput(sigmas) def encode_single_frame(output_file, image_array: np.ndarray, crf): container = av.open(output_file, "w", format="mp4") @@ -423,52 +465,54 @@ def preprocess(image: torch.Tensor, crf=29): return image image_array = (image[:(image.shape[0] // 2) * 2, :(image.shape[1] // 2) * 2] * 255.0).byte().cpu().numpy() - with io.BytesIO() as output_file: + with BytesIO() as output_file: encode_single_frame(output_file, image_array, crf) video_bytes = output_file.getvalue() - with io.BytesIO(video_bytes) as video_file: + with BytesIO(video_bytes) as video_file: image_array = decode_single_frame(video_file) tensor = torch.tensor(image_array, dtype=image.dtype, device=image.device) / 255.0 return tensor -class LTXVPreprocess: +class LTXVPreprocess(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "img_compression": ( - "INT", - { - "default": 35, - "min": 0, - "max": 100, - "tooltip": "Amount of compression to apply on image.", - }, + def define_schema(cls): + return io.Schema( + node_id="LTXVPreprocess", + category="image", + inputs=[ + io.Image.Input("image"), + io.Int.Input( + id="img_compression", default=35, min=0, max=100, tooltip="Amount of compression to apply on image." ), - } - } + ], + outputs=[ + io.Image.Output(display_name="output_image"), + ], + ) - FUNCTION = "preprocess" - RETURN_TYPES = ("IMAGE",) - RETURN_NAMES = ("output_image",) - CATEGORY = "image" - - def preprocess(self, image, img_compression): + @classmethod + def execute(cls, image, img_compression) -> io.NodeOutput: output_images = [] for i in range(image.shape[0]): output_images.append(preprocess(image[i], img_compression)) - return (torch.stack(output_images),) + return io.NodeOutput(torch.stack(output_images)) -NODE_CLASS_MAPPINGS = { - "EmptyLTXVLatentVideo": EmptyLTXVLatentVideo, - "LTXVImgToVideo": LTXVImgToVideo, - "ModelSamplingLTXV": ModelSamplingLTXV, - "LTXVConditioning": LTXVConditioning, - "LTXVScheduler": LTXVScheduler, - "LTXVAddGuide": LTXVAddGuide, - "LTXVPreprocess": LTXVPreprocess, - "LTXVCropGuides": LTXVCropGuides, -} +class LtxvExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + EmptyLTXVLatentVideo, + LTXVImgToVideo, + ModelSamplingLTXV, + LTXVConditioning, + LTXVScheduler, + LTXVAddGuide, + LTXVPreprocess, + LTXVCropGuides, + ] + + +async def comfy_entrypoint() -> LtxvExtension: + return LtxvExtension() From e4f99b479a19730bea890567129f4032b4dd4787 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 1 Oct 2025 22:20:30 +0300 Subject: [PATCH 0681/1073] convert nodes_ip2p.pt to V3 schema (#10097) --- comfy_extras/nodes_ip2p.py | 54 +++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/comfy_extras/nodes_ip2p.py b/comfy_extras/nodes_ip2p.py index c2e70a84c..78f29915d 100644 --- a/comfy_extras/nodes_ip2p.py +++ b/comfy_extras/nodes_ip2p.py @@ -1,21 +1,30 @@ import torch -class InstructPixToPixConditioning: +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io + + +class InstructPixToPixConditioning(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "vae": ("VAE", ), - "pixels": ("IMAGE", ), - }} + def define_schema(cls): + return io.Schema( + node_id="InstructPixToPixConditioning", + category="conditioning/instructpix2pix", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Image.Input("pixels"), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - FUNCTION = "encode" - - CATEGORY = "conditioning/instructpix2pix" - - def encode(self, positive, negative, pixels, vae): + @classmethod + def execute(cls, positive, negative, pixels, vae) -> io.NodeOutput: x = (pixels.shape[1] // 8) * 8 y = (pixels.shape[2] // 8) * 8 @@ -38,8 +47,17 @@ class InstructPixToPixConditioning: n = [t[0], d] c.append(n) out.append(c) - return (out[0], out[1], out_latent) + return io.NodeOutput(out[0], out[1], out_latent) + + +class InstructPix2PixExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + InstructPixToPixConditioning, + ] + + +async def comfy_entrypoint() -> InstructPix2PixExtension: + return InstructPix2PixExtension() -NODE_CLASS_MAPPINGS = { - "InstructPixToPixConditioning": InstructPixToPixConditioning, -} From a6f83a4a1a70d720c16d66feb5d87fee5998acdf Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 1 Oct 2025 14:19:13 -0700 Subject: [PATCH 0682/1073] Support the new hunyuan vae. (#10150) --- comfy/ldm/hunyuan_video/vae_refiner.py | 112 ++++++++++++++++--------- comfy/sd.py | 70 ++++++++++------ 2 files changed, 116 insertions(+), 66 deletions(-) diff --git a/comfy/ldm/hunyuan_video/vae_refiner.py b/comfy/ldm/hunyuan_video/vae_refiner.py index c6f742710..c2a0b507d 100644 --- a/comfy/ldm/hunyuan_video/vae_refiner.py +++ b/comfy/ldm/hunyuan_video/vae_refiner.py @@ -1,7 +1,7 @@ import torch import torch.nn as nn import torch.nn.functional as F -from comfy.ldm.modules.diffusionmodules.model import ResnetBlock, AttnBlock, VideoConv3d +from comfy.ldm.modules.diffusionmodules.model import ResnetBlock, AttnBlock, VideoConv3d, Normalize import comfy.ops import comfy.ldm.models.autoencoder ops = comfy.ops.disable_weight_init @@ -17,11 +17,12 @@ class RMS_norm(nn.Module): return F.normalize(x, dim=1) * self.scale * self.gamma class DnSmpl(nn.Module): - def __init__(self, ic, oc, tds=True): + def __init__(self, ic, oc, tds=True, refiner_vae=True, op=VideoConv3d): super().__init__() fct = 2 * 2 * 2 if tds else 1 * 2 * 2 assert oc % fct == 0 - self.conv = VideoConv3d(ic, oc // fct, kernel_size=3) + self.conv = op(ic, oc // fct, kernel_size=3, stride=1, padding=1) + self.refiner_vae = refiner_vae self.tds = tds self.gs = fct * ic // oc @@ -30,7 +31,7 @@ class DnSmpl(nn.Module): r1 = 2 if self.tds else 1 h = self.conv(x) - if self.tds: + if self.tds and self.refiner_vae: hf = h[:, :, :1, :, :] b, c, f, ht, wd = hf.shape hf = hf.reshape(b, c, f, ht // 2, 2, wd // 2, 2) @@ -66,6 +67,7 @@ class DnSmpl(nn.Module): sc = torch.cat([xf, xn], dim=2) else: b, c, frms, ht, wd = h.shape + nf = frms // r1 h = h.reshape(b, c, nf, r1, ht // 2, 2, wd // 2, 2) h = h.permute(0, 3, 5, 7, 1, 2, 4, 6) @@ -83,10 +85,11 @@ class DnSmpl(nn.Module): class UpSmpl(nn.Module): - def __init__(self, ic, oc, tus=True): + def __init__(self, ic, oc, tus=True, refiner_vae=True, op=VideoConv3d): super().__init__() fct = 2 * 2 * 2 if tus else 1 * 2 * 2 - self.conv = VideoConv3d(ic, oc * fct, kernel_size=3) + self.conv = op(ic, oc * fct, kernel_size=3, stride=1, padding=1) + self.refiner_vae = refiner_vae self.tus = tus self.rp = fct * oc // ic @@ -95,7 +98,7 @@ class UpSmpl(nn.Module): r1 = 2 if self.tus else 1 h = self.conv(x) - if self.tus: + if self.tus and self.refiner_vae: hf = h[:, :, :1, :, :] b, c, f, ht, wd = hf.shape nc = c // (2 * 2) @@ -148,43 +151,56 @@ class UpSmpl(nn.Module): class Encoder(nn.Module): def __init__(self, in_channels, z_channels, block_out_channels, num_res_blocks, - ffactor_spatial, ffactor_temporal, downsample_match_channel=True, **_): + ffactor_spatial, ffactor_temporal, downsample_match_channel=True, refiner_vae=True, **_): super().__init__() self.z_channels = z_channels self.block_out_channels = block_out_channels self.num_res_blocks = num_res_blocks - self.conv_in = VideoConv3d(in_channels, block_out_channels[0], 3, 1, 1) + self.ffactor_temporal = ffactor_temporal + + self.refiner_vae = refiner_vae + if self.refiner_vae: + conv_op = VideoConv3d + norm_op = RMS_norm + else: + conv_op = ops.Conv3d + norm_op = Normalize + + self.conv_in = conv_op(in_channels, block_out_channels[0], 3, 1, 1) self.down = nn.ModuleList() ch = block_out_channels[0] depth = (ffactor_spatial >> 1).bit_length() - depth_temporal = ((ffactor_spatial // ffactor_temporal) >> 1).bit_length() + depth_temporal = ((ffactor_spatial // self.ffactor_temporal) >> 1).bit_length() for i, tgt in enumerate(block_out_channels): stage = nn.Module() stage.block = nn.ModuleList([ResnetBlock(in_channels=ch if j == 0 else tgt, out_channels=tgt, temb_channels=0, - conv_op=VideoConv3d, norm_op=RMS_norm) + conv_op=conv_op, norm_op=norm_op) for j in range(num_res_blocks)]) ch = tgt if i < depth: nxt = block_out_channels[i + 1] if i + 1 < len(block_out_channels) and downsample_match_channel else ch - stage.downsample = DnSmpl(ch, nxt, tds=i >= depth_temporal) + stage.downsample = DnSmpl(ch, nxt, tds=i >= depth_temporal, refiner_vae=self.refiner_vae, op=conv_op) ch = nxt self.down.append(stage) self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=VideoConv3d, norm_op=RMS_norm) - self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv3d, norm_op=RMS_norm) - self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=VideoConv3d, norm_op=RMS_norm) + self.mid.block_1 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=conv_op, norm_op=norm_op) + self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv3d, norm_op=norm_op) + self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=conv_op, norm_op=norm_op) - self.norm_out = RMS_norm(ch) - self.conv_out = VideoConv3d(ch, z_channels << 1, 3, 1, 1) + self.norm_out = norm_op(ch) + self.conv_out = conv_op(ch, z_channels << 1, 3, 1, 1) self.regul = comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer() def forward(self, x): + if not self.refiner_vae and x.shape[2] == 1: + x = x.expand(-1, -1, self.ffactor_temporal, -1, -1) + x = self.conv_in(x) for stage in self.down: @@ -200,31 +216,42 @@ class Encoder(nn.Module): skip = x.view(b, c // grp, grp, t, h, w).mean(2) out = self.conv_out(F.silu(self.norm_out(x))) + skip - out = self.regul(out)[0] - out = torch.cat((out[:, :, :1], out), dim=2) - out = out.permute(0, 2, 1, 3, 4) - b, f_times_2, c, h, w = out.shape - out = out.reshape(b, f_times_2 // 2, 2 * c, h, w) - out = out.permute(0, 2, 1, 3, 4).contiguous() + if self.refiner_vae: + out = self.regul(out)[0] + + out = torch.cat((out[:, :, :1], out), dim=2) + out = out.permute(0, 2, 1, 3, 4) + b, f_times_2, c, h, w = out.shape + out = out.reshape(b, f_times_2 // 2, 2 * c, h, w) + out = out.permute(0, 2, 1, 3, 4).contiguous() + return out class Decoder(nn.Module): def __init__(self, z_channels, out_channels, block_out_channels, num_res_blocks, - ffactor_spatial, ffactor_temporal, upsample_match_channel=True, **_): + ffactor_spatial, ffactor_temporal, upsample_match_channel=True, refiner_vae=True, **_): super().__init__() block_out_channels = block_out_channels[::-1] self.z_channels = z_channels self.block_out_channels = block_out_channels self.num_res_blocks = num_res_blocks + self.refiner_vae = refiner_vae + if self.refiner_vae: + conv_op = VideoConv3d + norm_op = RMS_norm + else: + conv_op = ops.Conv3d + norm_op = Normalize + ch = block_out_channels[0] - self.conv_in = VideoConv3d(z_channels, ch, 3) + self.conv_in = conv_op(z_channels, ch, kernel_size=3, stride=1, padding=1) self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=VideoConv3d, norm_op=RMS_norm) - self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv3d, norm_op=RMS_norm) - self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=VideoConv3d, norm_op=RMS_norm) + self.mid.block_1 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=conv_op, norm_op=norm_op) + self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv3d, norm_op=norm_op) + self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=conv_op, norm_op=norm_op) self.up = nn.ModuleList() depth = (ffactor_spatial >> 1).bit_length() @@ -235,25 +262,26 @@ class Decoder(nn.Module): stage.block = nn.ModuleList([ResnetBlock(in_channels=ch if j == 0 else tgt, out_channels=tgt, temb_channels=0, - conv_op=VideoConv3d, norm_op=RMS_norm) + conv_op=conv_op, norm_op=norm_op) for j in range(num_res_blocks + 1)]) ch = tgt if i < depth: nxt = block_out_channels[i + 1] if i + 1 < len(block_out_channels) and upsample_match_channel else ch - stage.upsample = UpSmpl(ch, nxt, tus=i < depth_temporal) + stage.upsample = UpSmpl(ch, nxt, tus=i < depth_temporal, refiner_vae=self.refiner_vae, op=conv_op) ch = nxt self.up.append(stage) - self.norm_out = RMS_norm(ch) - self.conv_out = VideoConv3d(ch, out_channels, 3) + self.norm_out = norm_op(ch) + self.conv_out = conv_op(ch, out_channels, 3, stride=1, padding=1) def forward(self, z): - z = z.permute(0, 2, 1, 3, 4) - b, f, c, h, w = z.shape - z = z.reshape(b, f, 2, c // 2, h, w) - z = z.permute(0, 1, 2, 3, 4, 5).reshape(b, f * 2, c // 2, h, w) - z = z.permute(0, 2, 1, 3, 4) - z = z[:, :, 1:] + if self.refiner_vae: + z = z.permute(0, 2, 1, 3, 4) + b, f, c, h, w = z.shape + z = z.reshape(b, f, 2, c // 2, h, w) + z = z.permute(0, 1, 2, 3, 4, 5).reshape(b, f * 2, c // 2, h, w) + z = z.permute(0, 2, 1, 3, 4) + z = z[:, :, 1:] x = self.conv_in(z) + z.repeat_interleave(self.block_out_channels[0] // self.z_channels, 1) x = self.mid.block_2(self.mid.attn_1(self.mid.block_1(x))) @@ -264,4 +292,10 @@ class Decoder(nn.Module): if hasattr(stage, 'upsample'): x = stage.upsample(x) - return self.conv_out(F.silu(self.norm_out(x))) + out = self.conv_out(F.silu(self.norm_out(x))) + + if not self.refiner_vae: + if z.shape[-3] == 1: + out = out[:, :, -1:] + + return out diff --git a/comfy/sd.py b/comfy/sd.py index 2df340739..873ad20f2 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -332,35 +332,51 @@ class VAE: self.first_stage_model = StageC_coder() self.downscale_ratio = 32 self.latent_channels = 16 - elif "decoder.conv_in.weight" in sd and sd['decoder.conv_in.weight'].shape[1] == 64: - ddconfig = {"block_out_channels": [128, 256, 512, 512, 1024, 1024], "in_channels": 3, "out_channels": 3, "num_res_blocks": 2, "ffactor_spatial": 32, "downsample_match_channel": True, "upsample_match_channel": True} - self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.weight"].shape[1] - self.downscale_ratio = 32 - self.upscale_ratio = 32 - self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] - self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer"}, - encoder_config={'target': "comfy.ldm.hunyuan_video.vae.Encoder", 'params': ddconfig}, - decoder_config={'target': "comfy.ldm.hunyuan_video.vae.Decoder", 'params': ddconfig}) - - self.memory_used_encode = lambda shape, dtype: (700 * shape[2] * shape[3]) * model_management.dtype_size(dtype) - self.memory_used_decode = lambda shape, dtype: (700 * shape[2] * shape[3] * 32 * 32) * model_management.dtype_size(dtype) - elif "decoder.conv_in.weight" in sd: - #default SD1.x/SD2.x VAE parameters - ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} - - if 'encoder.down.2.downsample.conv.weight' not in sd and 'decoder.up.3.upsample.conv.weight' not in sd: #Stable diffusion x4 upscaler VAE - ddconfig['ch_mult'] = [1, 2, 4] - self.downscale_ratio = 4 - self.upscale_ratio = 4 - - self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.weight"].shape[1] - if 'post_quant_conv.weight' in sd: - self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=sd['post_quant_conv.weight'].shape[1]) - else: + if sd['decoder.conv_in.weight'].shape[1] == 64: + ddconfig = {"block_out_channels": [128, 256, 512, 512, 1024, 1024], "in_channels": 3, "out_channels": 3, "num_res_blocks": 2, "ffactor_spatial": 32, "downsample_match_channel": True, "upsample_match_channel": True} + self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.weight"].shape[1] + self.downscale_ratio = 32 + self.upscale_ratio = 32 + self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer"}, - encoder_config={'target': "comfy.ldm.modules.diffusionmodules.model.Encoder", 'params': ddconfig}, - decoder_config={'target': "comfy.ldm.modules.diffusionmodules.model.Decoder", 'params': ddconfig}) + encoder_config={'target': "comfy.ldm.hunyuan_video.vae.Encoder", 'params': ddconfig}, + decoder_config={'target': "comfy.ldm.hunyuan_video.vae.Decoder", 'params': ddconfig}) + + self.memory_used_encode = lambda shape, dtype: (700 * shape[2] * shape[3]) * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: (700 * shape[2] * shape[3] * 32 * 32) * model_management.dtype_size(dtype) + elif sd['decoder.conv_in.weight'].shape[1] == 32: + ddconfig = {"block_out_channels": [128, 256, 512, 1024, 1024], "in_channels": 3, "out_channels": 3, "num_res_blocks": 2, "ffactor_spatial": 16, "ffactor_temporal": 4, "downsample_match_channel": True, "upsample_match_channel": True, "refiner_vae": False} + self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.weight"].shape[1] + self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] + self.upscale_ratio = (lambda a: max(0, a * 4 - 3), 16, 16) + self.upscale_index_formula = (4, 16, 16) + self.downscale_ratio = (lambda a: max(0, math.floor((a + 3) / 4)), 16, 16) + self.downscale_index_formula = (4, 16, 16) + self.latent_dim = 3 + self.not_video = True + self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer"}, + encoder_config={'target': "comfy.ldm.hunyuan_video.vae_refiner.Encoder", 'params': ddconfig}, + decoder_config={'target': "comfy.ldm.hunyuan_video.vae_refiner.Decoder", 'params': ddconfig}) + + self.memory_used_encode = lambda shape, dtype: (2800 * shape[-2] * shape[-1]) * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: (2800 * shape[-3] * shape[-2] * shape[-1] * 16 * 16) * model_management.dtype_size(dtype) + else: + #default SD1.x/SD2.x VAE parameters + ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} + + if 'encoder.down.2.downsample.conv.weight' not in sd and 'decoder.up.3.upsample.conv.weight' not in sd: #Stable diffusion x4 upscaler VAE + ddconfig['ch_mult'] = [1, 2, 4] + self.downscale_ratio = 4 + self.upscale_ratio = 4 + + self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.weight"].shape[1] + if 'post_quant_conv.weight' in sd: + self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=sd['post_quant_conv.weight'].shape[1]) + else: + self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer"}, + encoder_config={'target': "comfy.ldm.modules.diffusionmodules.model.Encoder", 'params': ddconfig}, + decoder_config={'target': "comfy.ldm.modules.diffusionmodules.model.Decoder", 'params': ddconfig}) elif "decoder.layers.1.layers.0.beta" in sd: self.first_stage_model = AudioOobleckVAE() self.memory_used_encode = lambda shape, dtype: (1000 * shape[2]) * model_management.dtype_size(dtype) From bb32d4ec3141333df26fcdaee0c3c08e41b7b249 Mon Sep 17 00:00:00 2001 From: Koratahiu Date: Thu, 2 Oct 2025 00:59:07 +0300 Subject: [PATCH 0683/1073] feat: Add Epsilon Scaling node for exposure bias correction (#10132) --- comfy_extras/nodes_eps.py | 60 +++++++++++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 61 insertions(+) create mode 100644 comfy_extras/nodes_eps.py diff --git a/comfy_extras/nodes_eps.py b/comfy_extras/nodes_eps.py new file mode 100644 index 000000000..c8818f096 --- /dev/null +++ b/comfy_extras/nodes_eps.py @@ -0,0 +1,60 @@ +class EpsilonScaling: + """ + Implements the Epsilon Scaling method from 'Elucidating the Exposure Bias in Diffusion Models' + (https://arxiv.org/abs/2308.15321v6). + + This method mitigates exposure bias by scaling the predicted noise during sampling, + which can significantly improve sample quality. This implementation uses the "uniform schedule" + recommended by the paper for its practicality and effectiveness. + """ + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "scaling_factor": ("FLOAT", { + "default": 1.005, + "min": 0.5, + "max": 1.5, + "step": 0.001, + "display": "number" + }), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "model_patches/unet" + + def patch(self, model, scaling_factor): + # Prevent division by zero, though the UI's min value should prevent this. + if scaling_factor == 0: + scaling_factor = 1e-9 + + def epsilon_scaling_function(args): + """ + This function is applied after the CFG guidance has been calculated. + It recalculates the denoised latent by scaling the predicted noise. + """ + denoised = args["denoised"] + x = args["input"] + + noise_pred = x - denoised + + scaled_noise_pred = noise_pred / scaling_factor + + new_denoised = x - scaled_noise_pred + + return new_denoised + + # Clone the model patcher to avoid modifying the original model in place + model_clone = model.clone() + + model_clone.set_model_sampler_post_cfg_function(epsilon_scaling_function) + + return (model_clone,) + +NODE_CLASS_MAPPINGS = { + "Epsilon Scaling": EpsilonScaling +} diff --git a/nodes.py b/nodes.py index 1a6784b68..88d712993 100644 --- a/nodes.py +++ b/nodes.py @@ -2297,6 +2297,7 @@ async def init_builtin_extra_nodes(): "nodes_gits.py", "nodes_controlnet.py", "nodes_hunyuan.py", + "nodes_eps.py", "nodes_flux.py", "nodes_lora_extract.py", "nodes_torch_compile.py", From 911331c06c16aa80633c5438c58edb32dbfdff50 Mon Sep 17 00:00:00 2001 From: rattus128 <46076784+rattus128@users.noreply.github.com> Date: Thu, 2 Oct 2025 08:40:28 +1000 Subject: [PATCH 0684/1073] sd: fix VAE tiled fallback VRAM leak (#10139) When the VAE catches this VRAM OOM, it launches the fallback logic straight from the exception context. Python however refs the entire call stack that caused the exception including any local variables for the sake of exception report and debugging. In the case of tensors, this can hold on the references to GBs of VRAM and inhibit the VRAM allocated from freeing them. So dump the except context completely before going back to the VAE via the tiler by getting out of the except block with nothing but a flag. The greately increases the reliability of the tiler fallback, especially on low VRAM cards, as with the bug, if the leak randomly leaked more than the headroom needed for a single tile, the tiler would fallback would OOM and fail the flow. --- comfy/sd.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index 873ad20f2..be225ad03 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -652,6 +652,7 @@ class VAE: def decode(self, samples_in, vae_options={}): self.throw_exception_if_invalid() pixel_samples = None + do_tile = False try: memory_used = self.memory_used_decode(samples_in.shape, self.vae_dtype) model_management.load_models_gpu([self.patcher], memory_required=memory_used, force_full_load=self.disable_offload) @@ -667,6 +668,13 @@ class VAE: pixel_samples[x:x+batch_number] = out except model_management.OOM_EXCEPTION: logging.warning("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.") + #NOTE: We don't know what tensors were allocated to stack variables at the time of the + #exception and the exception itself refs them all until we get out of this except block. + #So we just set a flag for tiler fallback so that tensor gc can happen once the + #exception is fully off the books. + do_tile = True + + if do_tile: dims = samples_in.ndim - 2 if dims == 1 or self.extra_1d_channel is not None: pixel_samples = self.decode_tiled_1d(samples_in) @@ -713,6 +721,7 @@ class VAE: self.throw_exception_if_invalid() pixel_samples = self.vae_encode_crop_pixels(pixel_samples) pixel_samples = pixel_samples.movedim(-1, 1) + do_tile = False if self.latent_dim == 3 and pixel_samples.ndim < 5: if not self.not_video: pixel_samples = pixel_samples.movedim(1, 0).unsqueeze(0) @@ -734,6 +743,13 @@ class VAE: except model_management.OOM_EXCEPTION: logging.warning("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.") + #NOTE: We don't know what tensors were allocated to stack variables at the time of the + #exception and the exception itself refs them all until we get out of this except block. + #So we just set a flag for tiler fallback so that tensor gc can happen once the + #exception is fully off the books. + do_tile = True + + if do_tile: if self.latent_dim == 3: tile = 256 overlap = tile // 4 From 4965c0e2acf39d84e82cb63dd6cc4400299d0a61 Mon Sep 17 00:00:00 2001 From: rattus128 <46076784+rattus128@users.noreply.github.com> Date: Thu, 2 Oct 2025 08:42:16 +1000 Subject: [PATCH 0685/1073] WAN: Fix cache VRAM leak on error (#10141) If this suffers an exception (such as a VRAM oom) it will leave the encode() and decode() methods which skips the cleanup of the WAN feature cache. The comfy node cache then ultimately keeps a reference this object which is in turn reffing large tensors from the failed execution. The feature cache is currently setup at a class variable on the encoder/decoder however, the encode and decode functions always clear it on both entry and exit of normal execution. Its likely the design intent is this is usable as a streaming encoder where the input comes in batches, however the functions as they are today don't support that. So simplify by bringing the cache back to local variable, so that if it does VRAM OOM the cache itself is properly garbage when the encode()/decode() functions dissappear from the stack. --- comfy/ldm/wan/vae.py | 37 ++++++++++++++----------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/comfy/ldm/wan/vae.py b/comfy/ldm/wan/vae.py index 791596938..ccbb25822 100644 --- a/comfy/ldm/wan/vae.py +++ b/comfy/ldm/wan/vae.py @@ -468,55 +468,46 @@ class WanVAE(nn.Module): attn_scales, self.temperal_upsample, dropout) def encode(self, x): - self.clear_cache() + conv_idx = [0] + feat_map = [None] * count_conv3d(self.decoder) ## cache t = x.shape[2] iter_ = 1 + (t - 1) // 4 ## 对encode输入的x,按时间拆分为1、4、4、4.... for i in range(iter_): - self._enc_conv_idx = [0] + conv_idx = [0] if i == 0: out = self.encoder( x[:, :, :1, :, :], - feat_cache=self._enc_feat_map, - feat_idx=self._enc_conv_idx) + feat_cache=feat_map, + feat_idx=conv_idx) else: out_ = self.encoder( x[:, :, 1 + 4 * (i - 1):1 + 4 * i, :, :], - feat_cache=self._enc_feat_map, - feat_idx=self._enc_conv_idx) + feat_cache=feat_map, + feat_idx=conv_idx) out = torch.cat([out, out_], 2) mu, log_var = self.conv1(out).chunk(2, dim=1) - self.clear_cache() return mu def decode(self, z): - self.clear_cache() + conv_idx = [0] + feat_map = [None] * count_conv3d(self.decoder) # z: [b,c,t,h,w] iter_ = z.shape[2] x = self.conv2(z) for i in range(iter_): - self._conv_idx = [0] + conv_idx = [0] if i == 0: out = self.decoder( x[:, :, i:i + 1, :, :], - feat_cache=self._feat_map, - feat_idx=self._conv_idx) + feat_cache=feat_map, + feat_idx=conv_idx) else: out_ = self.decoder( x[:, :, i:i + 1, :, :], - feat_cache=self._feat_map, - feat_idx=self._conv_idx) + feat_cache=feat_map, + feat_idx=conv_idx) out = torch.cat([out, out_], 2) - self.clear_cache() return out - - def clear_cache(self): - self._conv_num = count_conv3d(self.decoder) - self._conv_idx = [0] - self._feat_map = [None] * self._conv_num - #cache encode - self._enc_conv_num = count_conv3d(self.encoder) - self._enc_conv_idx = [0] - self._enc_feat_map = [None] * self._enc_conv_num From 0e9d1724be327c79ba86159d868f0b57adb8c384 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 1 Oct 2025 21:33:05 -0700 Subject: [PATCH 0686/1073] Add a .bat to the AMD portable to disable smart memory. (#10153) --- .ci/windows_amd_base_files/README_VERY_IMPORTANT.txt | 5 ++++- .../run_amd_gpu_disable_smart_memory.bat | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100755 .ci/windows_amd_base_files/run_amd_gpu_disable_smart_memory.bat diff --git a/.ci/windows_amd_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_amd_base_files/README_VERY_IMPORTANT.txt index 570ac3398..96a500be2 100755 --- a/.ci/windows_amd_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/windows_amd_base_files/README_VERY_IMPORTANT.txt @@ -3,10 +3,13 @@ https://www.amd.com/en/resources/support-articles/release-notes/RN-AMDGPU-WINDOW HOW TO RUN: -if you have a AMD gpu: +If you have a AMD gpu: run_amd_gpu.bat +If you have memory issues you can try disabling the smart memory management by running comfyui with: + +run_amd_gpu_disable_smart_memory.bat IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints diff --git a/.ci/windows_amd_base_files/run_amd_gpu_disable_smart_memory.bat b/.ci/windows_amd_base_files/run_amd_gpu_disable_smart_memory.bat new file mode 100755 index 000000000..cece0aeb2 --- /dev/null +++ b/.ci/windows_amd_base_files/run_amd_gpu_disable_smart_memory.bat @@ -0,0 +1,2 @@ +.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build --disable-smart-memory +pause From 8f4ee9984c0c3864290e4fea81cfea2ba281717d Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 2 Oct 2025 23:53:00 +0300 Subject: [PATCH 0687/1073] convert nodes_morphology.py to V3 schema (#10159) --- comfy_extras/nodes_morphology.py | 116 +++++++++++++++++++------------ 1 file changed, 70 insertions(+), 46 deletions(-) diff --git a/comfy_extras/nodes_morphology.py b/comfy_extras/nodes_morphology.py index 075b26c40..67377e1bc 100644 --- a/comfy_extras/nodes_morphology.py +++ b/comfy_extras/nodes_morphology.py @@ -1,24 +1,34 @@ import torch import comfy.model_management +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io from kornia.morphology import dilation, erosion, opening, closing, gradient, top_hat, bottom_hat import kornia.color -class Morphology: +class Morphology(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"image": ("IMAGE",), - "operation": (["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"],), - "kernel_size": ("INT", {"default": 3, "min": 3, "max": 999, "step": 1}), - }} + def define_schema(cls): + return io.Schema( + node_id="Morphology", + display_name="ImageMorphology", + category="image/postprocessing", + inputs=[ + io.Image.Input("image"), + io.Combo.Input( + "operation", + options=["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"], + ), + io.Int.Input("kernel_size", default=3, min=3, max=999, step=1), + ], + outputs=[ + io.Image.Output(), + ], + ) - RETURN_TYPES = ("IMAGE",) - FUNCTION = "process" - - CATEGORY = "image/postprocessing" - - def process(self, image, operation, kernel_size): + @classmethod + def execute(cls, image, operation, kernel_size) -> io.NodeOutput: device = comfy.model_management.get_torch_device() kernel = torch.ones(kernel_size, kernel_size, device=device) image_k = image.to(device).movedim(-1, 1) @@ -39,49 +49,63 @@ class Morphology: else: raise ValueError(f"Invalid operation {operation} for morphology. Must be one of 'erode', 'dilate', 'open', 'close', 'gradient', 'tophat', 'bottomhat'") img_out = output.to(comfy.model_management.intermediate_device()).movedim(1, -1) - return (img_out,) + return io.NodeOutput(img_out) -class ImageRGBToYUV: +class ImageRGBToYUV(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "image": ("IMAGE",), - }} + def define_schema(cls): + return io.Schema( + node_id="ImageRGBToYUV", + category="image/batch", + inputs=[ + io.Image.Input("image"), + ], + outputs=[ + io.Image.Output(display_name="Y"), + io.Image.Output(display_name="U"), + io.Image.Output(display_name="V"), + ], + ) - RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE") - RETURN_NAMES = ("Y", "U", "V") - FUNCTION = "execute" - - CATEGORY = "image/batch" - - def execute(self, image): + @classmethod + def execute(cls, image) -> io.NodeOutput: out = kornia.color.rgb_to_ycbcr(image.movedim(-1, 1)).movedim(1, -1) - return (out[..., 0:1].expand_as(image), out[..., 1:2].expand_as(image), out[..., 2:3].expand_as(image)) + return io.NodeOutput(out[..., 0:1].expand_as(image), out[..., 1:2].expand_as(image), out[..., 2:3].expand_as(image)) -class ImageYUVToRGB: +class ImageYUVToRGB(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"Y": ("IMAGE",), - "U": ("IMAGE",), - "V": ("IMAGE",), - }} + def define_schema(cls): + return io.Schema( + node_id="ImageYUVToRGB", + category="image/batch", + inputs=[ + io.Image.Input("Y"), + io.Image.Input("U"), + io.Image.Input("V"), + ], + outputs=[ + io.Image.Output(), + ], + ) - RETURN_TYPES = ("IMAGE",) - FUNCTION = "execute" - - CATEGORY = "image/batch" - - def execute(self, Y, U, V): + @classmethod + def execute(cls, Y, U, V) -> io.NodeOutput: image = torch.cat([torch.mean(Y, dim=-1, keepdim=True), torch.mean(U, dim=-1, keepdim=True), torch.mean(V, dim=-1, keepdim=True)], dim=-1) out = kornia.color.ycbcr_to_rgb(image.movedim(-1, 1)).movedim(1, -1) - return (out,) + return io.NodeOutput(out) -NODE_CLASS_MAPPINGS = { - "Morphology": Morphology, - "ImageRGBToYUV": ImageRGBToYUV, - "ImageYUVToRGB": ImageYUVToRGB, -} -NODE_DISPLAY_NAME_MAPPINGS = { - "Morphology": "ImageMorphology", -} +class MorphologyExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + Morphology, + ImageRGBToYUV, + ImageYUVToRGB, + ] + + +async def comfy_entrypoint() -> MorphologyExtension: + return MorphologyExtension() + From f6e3e9a456127a7e539929f42ea6cac838197879 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 3 Oct 2025 00:50:31 +0300 Subject: [PATCH 0688/1073] fix(api-nodes): made logging path to be smaller (#10156) --- comfy_api_nodes/apis/client.py | 5 +- comfy_api_nodes/apis/request_logger.py | 72 ++++++++++++++++++++------ 2 files changed, 59 insertions(+), 18 deletions(-) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 0aed906fb..18a694675 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -95,6 +95,7 @@ import aiohttp import asyncio import logging import io +import os import socket from aiohttp.client_exceptions import ClientError, ClientResponseError from typing import Dict, Type, Optional, Any, TypeVar, Generic, Callable, Tuple @@ -499,7 +500,9 @@ class ApiClient: else: raise ValueError("File must be BytesIO or str path") - operation_id = f"upload_{upload_url.split('/')[-1]}_{uuid.uuid4().hex[:8]}" + parsed = urlparse(upload_url) + basename = os.path.basename(parsed.path) or parsed.netloc or "upload" + operation_id = f"upload_{basename}_{uuid.uuid4().hex[:8]}" request_logger.log_request_response( operation_id=operation_id, request_method="PUT", diff --git a/comfy_api_nodes/apis/request_logger.py b/comfy_api_nodes/apis/request_logger.py index 42901e141..2e0ca5380 100644 --- a/comfy_api_nodes/apis/request_logger.py +++ b/comfy_api_nodes/apis/request_logger.py @@ -4,16 +4,18 @@ import os import datetime import json import logging +import re +import hashlib +from typing import Any + import folder_paths # Get the logger instance logger = logging.getLogger(__name__) + def get_log_directory(): - """ - Ensures the API log directory exists within ComfyUI's temp directory - and returns its path. - """ + """Ensures the API log directory exists within ComfyUI's temp directory and returns its path.""" base_temp_dir = folder_paths.get_temp_directory() log_dir = os.path.join(base_temp_dir, "api_logs") try: @@ -24,42 +26,77 @@ def get_log_directory(): return base_temp_dir return log_dir -def _format_data_for_logging(data): + +def _sanitize_filename_component(name: str) -> str: + if not name: + return "log" + sanitized = re.sub(r"[^A-Za-z0-9._-]+", "_", name) # Replace disallowed characters with underscore + sanitized = sanitized.strip(" ._") # Windows: trailing dots or spaces are not allowed + if not sanitized: + sanitized = "log" + return sanitized + + +def _short_hash(*parts: str, length: int = 10) -> str: + return hashlib.sha1(("|".join(parts)).encode("utf-8")).hexdigest()[:length] + + +def _build_log_filepath(log_dir: str, operation_id: str, request_url: str) -> str: + """Build log filepath. We keep it well under common path length limits aiming for <= 240 characters total.""" + timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f") + slug = _sanitize_filename_component(operation_id) # Best-effort human-readable slug from operation_id + h = _short_hash(operation_id or "", request_url or "") # Short hash ties log to the full operation and URL + + # Compute how much room we have for the slug given the directory length + # Keep total path length reasonably below ~260 on Windows. + max_total_path = 240 + prefix = f"{timestamp}_" + suffix = f"_{h}.log" + if not slug: + slug = "op" + max_filename_len = max(60, max_total_path - len(log_dir) - 1) + max_slug_len = max(8, max_filename_len - len(prefix) - len(suffix)) + if len(slug) > max_slug_len: + slug = slug[:max_slug_len].rstrip(" ._-") + return os.path.join(log_dir, f"{prefix}{slug}{suffix}") + + +def _format_data_for_logging(data: Any) -> str: """Helper to format data (dict, str, bytes) for logging.""" if isinstance(data, bytes): try: - return data.decode('utf-8') # Try to decode as text + return data.decode("utf-8") # Try to decode as text except UnicodeDecodeError: return f"[Binary data of length {len(data)} bytes]" elif isinstance(data, (dict, list)): try: return json.dumps(data, indent=2, ensure_ascii=False) except TypeError: - return str(data) # Fallback for non-serializable objects + return str(data) # Fallback for non-serializable objects return str(data) + def log_request_response( operation_id: str, request_method: str, request_url: str, request_headers: dict | None = None, request_params: dict | None = None, - request_data: any = None, + request_data: Any = None, response_status_code: int | None = None, response_headers: dict | None = None, - response_content: any = None, - error_message: str | None = None + response_content: Any = None, + error_message: str | None = None, ): """ Logs API request and response details to a file in the temp/api_logs directory. + Filenames are sanitized and length-limited for cross-platform safety. + If we still fail to write, we fall back to appending into api.log. """ log_dir = get_log_directory() - timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f") - filename = f"{timestamp}_{operation_id.replace('/', '_').replace(':', '_')}.log" - filepath = os.path.join(log_dir, filename) - - log_content = [] + filepath = _build_log_filepath(log_dir, operation_id, request_url) + log_content: list[str] = [] log_content.append(f"Timestamp: {datetime.datetime.now().isoformat()}") log_content.append(f"Operation ID: {operation_id}") log_content.append("-" * 30 + " REQUEST " + "-" * 30) @@ -69,7 +106,7 @@ def log_request_response( log_content.append(f"Headers:\n{_format_data_for_logging(request_headers)}") if request_params: log_content.append(f"Params:\n{_format_data_for_logging(request_params)}") - if request_data: + if request_data is not None: log_content.append(f"Data/Body:\n{_format_data_for_logging(request_data)}") log_content.append("\n" + "-" * 30 + " RESPONSE " + "-" * 30) @@ -77,7 +114,7 @@ def log_request_response( log_content.append(f"Status Code: {response_status_code}") if response_headers: log_content.append(f"Headers:\n{_format_data_for_logging(response_headers)}") - if response_content: + if response_content is not None: log_content.append(f"Content:\n{_format_data_for_logging(response_content)}") if error_message: log_content.append(f"Error:\n{error_message}") @@ -89,6 +126,7 @@ def log_request_response( except Exception as e: logger.error(f"Error writing API log to {filepath}: {e}") + if __name__ == '__main__': # Example usage (for testing the logger directly) logger.setLevel(logging.DEBUG) From e9364ee279f65d0546fea1796c3cd2e0b7e1965f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 2 Oct 2025 14:57:15 -0700 Subject: [PATCH 0689/1073] Turn on TORCH_ROCM_AOTRITON_ENABLE_EXPERIMENTAL by default. (#10168) --- main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/main.py b/main.py index 70696fcc3..35857dba8 100644 --- a/main.py +++ b/main.py @@ -115,6 +115,7 @@ if os.name == "nt": os.environ['MIMALLOC_PURGE_DELAY'] = '0' if __name__ == "__main__": + os.environ['TORCH_ROCM_AOTRITON_ENABLE_EXPERIMENTAL'] = '1' if args.default_device is not None: default_dev = args.default_device devices = list(range(32)) From 1395bce9f707e52ec613eeaa87ea690518cfe0a8 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 3 Oct 2025 01:20:29 +0300 Subject: [PATCH 0690/1073] update example_node to use V3 schema (#9723) --- custom_nodes/example_node.py.example | 161 +++++++++++---------------- 1 file changed, 68 insertions(+), 93 deletions(-) diff --git a/custom_nodes/example_node.py.example b/custom_nodes/example_node.py.example index 29ab2aa72..779c35787 100644 --- a/custom_nodes/example_node.py.example +++ b/custom_nodes/example_node.py.example @@ -1,96 +1,70 @@ -class Example: +from typing_extensions import override + +from comfy_api.latest import ComfyExtension, io + + +class Example(io.ComfyNode): """ - A example node + An example node Class methods ------------- - INPUT_TYPES (dict): - Tell the main program input parameters of nodes. - IS_CHANGED: + define_schema (io.Schema): + Tell the main program the metadata, input, output parameters of nodes. + fingerprint_inputs: optional method to control when the node is re executed. + check_lazy_status: + optional method to control list of input names that need to be evaluated. - Attributes - ---------- - RETURN_TYPES (`tuple`): - The type of each element in the output tuple. - RETURN_NAMES (`tuple`): - Optional: The name of each output in the output tuple. - FUNCTION (`str`): - The name of the entry-point method. For example, if `FUNCTION = "execute"` then it will run Example().execute() - OUTPUT_NODE ([`bool`]): - If this node is an output node that outputs a result/image from the graph. The SaveImage node is an example. - The backend iterates on these output nodes and tries to execute all their parents if their parent graph is properly connected. - Assumed to be False if not present. - CATEGORY (`str`): - The category the node should appear in the UI. - DEPRECATED (`bool`): - Indicates whether the node is deprecated. Deprecated nodes are hidden by default in the UI, but remain - functional in existing workflows that use them. - EXPERIMENTAL (`bool`): - Indicates whether the node is experimental. Experimental nodes are marked as such in the UI and may be subject to - significant changes or removal in future versions. Use with caution in production workflows. - execute(s) -> tuple || None: - The entry point method. The name of this method must be the same as the value of property `FUNCTION`. - For example, if `FUNCTION = "execute"` then this method's name must be `execute`, if `FUNCTION = "foo"` then it must be `foo`. """ - def __init__(self): - pass @classmethod - def INPUT_TYPES(s): + def define_schema(cls) -> io.Schema: """ - Return a dictionary which contains config for all input fields. - Some types (string): "MODEL", "VAE", "CLIP", "CONDITIONING", "LATENT", "IMAGE", "INT", "STRING", "FLOAT". - Input types "INT", "STRING" or "FLOAT" are special values for fields on the node. - The type can be a list for selection. - - Returns: `dict`: - - Key input_fields_group (`string`): Can be either required, hidden or optional. A node class must have property `required` - - Value input_fields (`dict`): Contains input fields config: - * Key field_name (`string`): Name of a entry-point method's argument - * Value field_config (`tuple`): - + First value is a string indicate the type of field or a list for selection. - + Second value is a config for type "INT", "STRING" or "FLOAT". + Return a schema which contains all information about the node. + Some types: "Model", "Vae", "Clip", "Conditioning", "Latent", "Image", "Int", "String", "Float", "Combo". + For outputs the "io.Model.Output" should be used, for inputs the "io.Model.Input" can be used. + The type can be a "Combo" - this will be a list for selection. """ - return { - "required": { - "image": ("IMAGE",), - "int_field": ("INT", { - "default": 0, - "min": 0, #Minimum value - "max": 4096, #Maximum value - "step": 64, #Slider's step - "display": "number", # Cosmetic only: display as "number" or "slider" - "lazy": True # Will only be evaluated if check_lazy_status requires it - }), - "float_field": ("FLOAT", { - "default": 1.0, - "min": 0.0, - "max": 10.0, - "step": 0.01, - "round": 0.001, #The value representing the precision to round to, will be set to the step value by default. Can be set to False to disable rounding. - "display": "number", - "lazy": True - }), - "print_to_screen": (["enable", "disable"],), - "string_field": ("STRING", { - "multiline": False, #True if you want the field to look like the one on the ClipTextEncode node - "default": "Hello World!", - "lazy": True - }), - }, - } + return io.Schema( + node_id="Example", + display_name="Example Node", + category="Example", + inputs=[ + io.Image.Input("image"), + io.Int.Input( + "int_field", + min=0, + max=4096, + step=64, # Slider's step + display_mode=io.NumberDisplay.number, # Cosmetic only: display as "number" or "slider" + lazy=True, # Will only be evaluated if check_lazy_status requires it + ), + io.Float.Input( + "float_field", + default=1.0, + min=0.0, + max=10.0, + step=0.01, + round=0.001, #The value representing the precision to round to, will be set to the step value by default. Can be set to False to disable rounding. + display_mode=io.NumberDisplay.number, + lazy=True, + ), + io.Combo.Input("print_to_screen", options=["enable", "disable"]), + io.String.Input( + "string_field", + multiline=False, # True if you want the field to look like the one on the ClipTextEncode node + default="Hello world!", + lazy=True, + ) + ], + outputs=[ + io.Image.Output(), + ], + ) - RETURN_TYPES = ("IMAGE",) - #RETURN_NAMES = ("image_output_name",) - - FUNCTION = "test" - - #OUTPUT_NODE = False - - CATEGORY = "Example" - - def check_lazy_status(self, image, string_field, int_field, float_field, print_to_screen): + @classmethod + def check_lazy_status(cls, image, string_field, int_field, float_field, print_to_screen): """ Return a list of input names that need to be evaluated. @@ -107,7 +81,8 @@ class Example: else: return [] - def test(self, image, string_field, int_field, float_field, print_to_screen): + @classmethod + def execute(cls, image, string_field, int_field, float_field, print_to_screen) -> io.NodeOutput: if print_to_screen == "enable": print(f"""Your input contains: string_field aka input text: {string_field} @@ -116,7 +91,7 @@ class Example: """) #do some processing on the image, in this example I just invert it image = 1.0 - image - return (image,) + return io.NodeOutput(image) """ The node will always be re executed if any of the inputs change but @@ -127,7 +102,7 @@ class Example: changes between executions the LoadImage node is executed again. """ #@classmethod - #def IS_CHANGED(s, image, string_field, int_field, float_field, print_to_screen): + #def fingerprint_inputs(s, image, string_field, int_field, float_field, print_to_screen): # return "" # Set the web directory, any .js file in that directory will be loaded by the frontend as a frontend extension @@ -143,13 +118,13 @@ async def get_hello(request): return web.json_response("hello") -# A dictionary that contains all nodes you want to export with their names -# NOTE: names should be globally unique -NODE_CLASS_MAPPINGS = { - "Example": Example -} +class ExampleExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + Example, + ] -# A dictionary that contains the friendly/humanly readable titles for the nodes -NODE_DISPLAY_NAME_MAPPINGS = { - "Example": "Example Node" -} + +async def comfy_entrypoint() -> ExampleExtension: # ComfyUI calls this to load your extension and its nodes. + return ExampleExtension() From 4ffea0e864275301329ddb5ecc3fbc7211d7a802 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 3 Oct 2025 02:14:28 +0300 Subject: [PATCH 0691/1073] feat(linter, api-nodes): add pylint for comfy_api_nodes folder (#10157) --- .github/workflows/ruff.yml | 25 ++++++++++++++ comfy_api_nodes/apis/__init__.py | 1 + comfy_api_nodes/apis/client.py | 2 +- comfy_api_nodes/apis/rodin_api.py | 4 --- pyproject.toml | 54 +++++++++++++++++++++++++++++++ 5 files changed, 81 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 4c1a02594..b24d86a6b 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -21,3 +21,28 @@ jobs: - name: Run Ruff run: ruff check . + + pylint: + name: Run Pylint + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.12' + + - name: Install requirements + run: | + python -m pip install --upgrade pip + pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu + pip install -r requirements.txt + + - name: Install Pylint + run: pip install pylint + + - name: Run Pylint + run: pylint comfy_api_nodes diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index 78a23db30..98f9e540d 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -2,6 +2,7 @@ # filename: filtered-openapi.yaml # timestamp: 2025-07-30T08:54:00+00:00 +# pylint: disable from __future__ import annotations from datetime import date, datetime diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 18a694675..79de3c262 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -535,7 +535,7 @@ class ApiClient: request_method="PUT", request_url=upload_url, response_status_code=e.status if hasattr(e, "status") else None, - response_headers=dict(e.headers) if getattr(e, "headers") else None, + response_headers=dict(e.headers) if hasattr(e, "headers") else None, response_content=None, error_message=f"{type(e).__name__}: {str(e)}", ) diff --git a/comfy_api_nodes/apis/rodin_api.py b/comfy_api_nodes/apis/rodin_api.py index 02cf42c29..fc26a6e73 100644 --- a/comfy_api_nodes/apis/rodin_api.py +++ b/comfy_api_nodes/apis/rodin_api.py @@ -52,7 +52,3 @@ class RodinResourceItem(BaseModel): class Rodin3DDownloadResponse(BaseModel): list: List[RodinResourceItem] = Field(..., description="Source List") - - - - diff --git a/pyproject.toml b/pyproject.toml index d0a76c6d0..598af4157 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,3 +22,57 @@ lint.select = [ "F", ] exclude = ["*.ipynb", "**/generated/*.pyi"] + +[tool.pylint] +master.py-version = "3.9" +master.extension-pkg-allow-list = [ + "pydantic", +] +reports.output-format = "colorized" +similarities.ignore-imports = "yes" +messages_control.disable = [ + "missing-module-docstring", + "missing-class-docstring", + "missing-function-docstring", + "line-too-long", + "too-few-public-methods", + "too-many-public-methods", + "too-many-instance-attributes", + "too-many-positional-arguments", + "broad-exception-raised", + "too-many-lines", + "invalid-name", + "unused-argument", + "broad-exception-caught", + "consider-using-with", + "fixme", + "too-many-statements", + "too-many-branches", + "too-many-locals", + "too-many-arguments", + "duplicate-code", + "abstract-method", + "superfluous-parens", + "arguments-differ", + "redefined-builtin", + "unnecessary-lambda", + "dangerous-default-value", + # next warnings should be fixed in future + "bad-classmethod-argument", # Class method should have 'cls' as first argument + "wrong-import-order", # Standard imports should be placed before third party imports + "logging-fstring-interpolation", # Use lazy % formatting in logging functions + "ungrouped-imports", + "unnecessary-pass", + "unidiomatic-typecheck", + "unnecessary-lambda-assignment", + "bad-indentation", + "no-else-return", + "no-else-raise", + "invalid-overridden-method", + "unused-variable", + "pointless-string-statement", + "inconsistent-return-statements", + "import-outside-toplevel", + "reimported", + "redefined-outer-name", +] From ed3ca78e080d697b6cf29497c07e14ee9c27a3ac Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 3 Oct 2025 21:26:34 +0300 Subject: [PATCH 0692/1073] feat(api-nodes): add kling-2-5-turbo to txt2video and img2video nodes (#10155) --- comfy_api_nodes/apis/__init__.py | 2 ++ comfy_api_nodes/nodes_kling.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/comfy_api_nodes/apis/__init__.py b/comfy_api_nodes/apis/__init__.py index 98f9e540d..ee2aa1ce6 100644 --- a/comfy_api_nodes/apis/__init__.py +++ b/comfy_api_nodes/apis/__init__.py @@ -1321,6 +1321,7 @@ class KlingTextToVideoModelName(str, Enum): kling_v1 = 'kling-v1' kling_v1_6 = 'kling-v1-6' kling_v2_1_master = 'kling-v2-1-master' + kling_v2_5_turbo = 'kling-v2-5-turbo' class KlingVideoGenAspectRatio(str, Enum): @@ -1355,6 +1356,7 @@ class KlingVideoGenModelName(str, Enum): kling_v2_master = 'kling-v2-master' kling_v2_1 = 'kling-v2-1' kling_v2_1_master = 'kling-v2-1-master' + kling_v2_5_turbo = 'kling-v2-5-turbo' class KlingVideoResult(BaseModel): diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 5f55b2cc9..d8646f106 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -423,6 +423,8 @@ class KlingTextToVideoNode(KlingNodeBase): "standard mode / 10s duration / kling-v2-master": ("std", "10", "kling-v2-master"), "pro mode / 5s duration / kling-v2-1-master": ("pro", "5", "kling-v2-1-master"), "pro mode / 10s duration / kling-v2-1-master": ("pro", "10", "kling-v2-1-master"), + "pro mode / 5s duration / kling-v2-5-turbo": ("pro", "5", "kling-v2-5-turbo"), + "pro mode / 10s duration / kling-v2-5-turbo": ("pro", "10", "kling-v2-5-turbo"), } @classmethod From 8a293372ecdea0ff8647921eaf3bb10c3d992abf Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 3 Oct 2025 21:40:27 +0300 Subject: [PATCH 0693/1073] fix(api-nodes): reimport of base64 in Gemini node (#10181) --- comfy_api_nodes/nodes_gemini.py | 1 - pyproject.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index baa379b75..151cb4044 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -490,7 +490,6 @@ class GeminiInputFiles(ComfyNodeABC): # Use base64 string directly, not the data URI with open(file_path, "rb") as f: file_content = f.read() - import base64 base64_str = base64.b64encode(file_content).decode("utf-8") return GeminiPart( diff --git a/pyproject.toml b/pyproject.toml index 598af4157..7952f7f37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,6 +73,5 @@ messages_control.disable = [ "pointless-string-statement", "inconsistent-return-statements", "import-outside-toplevel", - "reimported", "redefined-outer-name", ] From c2c5a7d5f80579bb44c11de0ce6eff94d1c111b9 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 3 Oct 2025 21:41:06 +0300 Subject: [PATCH 0694/1073] fix(api-nodes): bad indentation in Recraft API node function (#10175) --- comfy_api_nodes/nodes_recraft.py | 78 ++++++++++++++++---------------- pyproject.toml | 1 - 2 files changed, 39 insertions(+), 40 deletions(-) diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py index c8516b368..a006104b7 100644 --- a/comfy_api_nodes/nodes_recraft.py +++ b/comfy_api_nodes/nodes_recraft.py @@ -38,48 +38,48 @@ from PIL import UnidentifiedImageError async def handle_recraft_file_request( - image: torch.Tensor, - path: str, - mask: torch.Tensor=None, - total_pixels=4096*4096, - timeout=1024, - request=None, - auth_kwargs: dict[str,str] = None, - ) -> list[BytesIO]: - """ - Handle sending common Recraft file-only request to get back file bytes. - """ - if request is None: - request = EmptyRequest() + image: torch.Tensor, + path: str, + mask: torch.Tensor=None, + total_pixels=4096*4096, + timeout=1024, + request=None, + auth_kwargs: dict[str,str] = None, +) -> list[BytesIO]: + """ + Handle sending common Recraft file-only request to get back file bytes. + """ + if request is None: + request = EmptyRequest() - files = { - 'image': tensor_to_bytesio(image, total_pixels=total_pixels).read() - } - if mask is not None: - files['mask'] = tensor_to_bytesio(mask, total_pixels=total_pixels).read() + files = { + 'image': tensor_to_bytesio(image, total_pixels=total_pixels).read() + } + if mask is not None: + files['mask'] = tensor_to_bytesio(mask, total_pixels=total_pixels).read() - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=path, - method=HttpMethod.POST, - request_model=type(request), - response_model=RecraftImageGenerationResponse, - ), - request=request, - files=files, - content_type="multipart/form-data", - auth_kwargs=auth_kwargs, - multipart_parser=recraft_multipart_parser, - ) - response: RecraftImageGenerationResponse = await operation.execute() - all_bytesio = [] - if response.image is not None: - all_bytesio.append(await download_url_to_bytesio(response.image.url, timeout=timeout)) - else: - for data in response.data: - all_bytesio.append(await download_url_to_bytesio(data.url, timeout=timeout)) + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=type(request), + response_model=RecraftImageGenerationResponse, + ), + request=request, + files=files, + content_type="multipart/form-data", + auth_kwargs=auth_kwargs, + multipart_parser=recraft_multipart_parser, + ) + response: RecraftImageGenerationResponse = await operation.execute() + all_bytesio = [] + if response.image is not None: + all_bytesio.append(await download_url_to_bytesio(response.image.url, timeout=timeout)) + else: + for data in response.data: + all_bytesio.append(await download_url_to_bytesio(data.url, timeout=timeout)) - return all_bytesio + return all_bytesio def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, converted_to_check: list[list]=None, is_list=False) -> dict: diff --git a/pyproject.toml b/pyproject.toml index 7952f7f37..240919a43 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,7 +65,6 @@ messages_control.disable = [ "unnecessary-pass", "unidiomatic-typecheck", "unnecessary-lambda-assignment", - "bad-indentation", "no-else-return", "no-else-raise", "invalid-overridden-method", From 3e68bc342cd60b909b4117c1b68a3afc62ef875c Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 3 Oct 2025 21:43:54 +0300 Subject: [PATCH 0695/1073] convert nodes_torch_compile.py to V3 schema (#10173) --- comfy_extras/nodes_torch_compile.py | 46 +++++++++++++++++++---------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/comfy_extras/nodes_torch_compile.py b/comfy_extras/nodes_torch_compile.py index 605536678..adbeece2f 100644 --- a/comfy_extras/nodes_torch_compile.py +++ b/comfy_extras/nodes_torch_compile.py @@ -1,23 +1,39 @@ +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io from comfy_api.torch_helpers import set_torch_compile_wrapper -class TorchCompileModel: +class TorchCompileModel(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "backend": (["inductor", "cudagraphs"],), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="TorchCompileModel", + category="_for_testing", + inputs=[ + io.Model.Input("model"), + io.Combo.Input( + "backend", + options=["inductor", "cudagraphs"], + ), + ], + outputs=[io.Model.Output()], + is_experimental=True, + ) - CATEGORY = "_for_testing" - EXPERIMENTAL = True - - def patch(self, model, backend): + @classmethod + def execute(cls, model, backend) -> io.NodeOutput: m = model.clone() set_torch_compile_wrapper(model=m, backend=backend) - return (m, ) + return io.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "TorchCompileModel": TorchCompileModel, -} + +class TorchCompileExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + TorchCompileModel, + ] + + +async def comfy_entrypoint() -> TorchCompileExtension: + return TorchCompileExtension() From d7aa414141f02a456801704a3da323fa2ed8f5cc Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 3 Oct 2025 21:45:02 +0300 Subject: [PATCH 0696/1073] convert nodes_eps.py to V3 schema (#10172) --- comfy_extras/nodes_eps.py | 62 ++++++++++++++++++++++++--------------- 1 file changed, 38 insertions(+), 24 deletions(-) diff --git a/comfy_extras/nodes_eps.py b/comfy_extras/nodes_eps.py index c8818f096..7852d85e5 100644 --- a/comfy_extras/nodes_eps.py +++ b/comfy_extras/nodes_eps.py @@ -1,4 +1,9 @@ -class EpsilonScaling: +from typing_extensions import override + +from comfy_api.latest import ComfyExtension, io + + +class EpsilonScaling(io.ComfyNode): """ Implements the Epsilon Scaling method from 'Elucidating the Exposure Bias in Diffusion Models' (https://arxiv.org/abs/2308.15321v6). @@ -8,26 +13,28 @@ class EpsilonScaling: recommended by the paper for its practicality and effectiveness. """ @classmethod - def INPUT_TYPES(s): - return { - "required": { - "model": ("MODEL",), - "scaling_factor": ("FLOAT", { - "default": 1.005, - "min": 0.5, - "max": 1.5, - "step": 0.001, - "display": "number" - }), - } - } + def define_schema(cls): + return io.Schema( + node_id="Epsilon Scaling", + category="model_patches/unet", + inputs=[ + io.Model.Input("model"), + io.Float.Input( + "scaling_factor", + default=1.005, + min=0.5, + max=1.5, + step=0.001, + display_mode=io.NumberDisplay.number, + ), + ], + outputs=[ + io.Model.Output(), + ], + ) - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" - - CATEGORY = "model_patches/unet" - - def patch(self, model, scaling_factor): + @classmethod + def execute(cls, model, scaling_factor) -> io.NodeOutput: # Prevent division by zero, though the UI's min value should prevent this. if scaling_factor == 0: scaling_factor = 1e-9 @@ -53,8 +60,15 @@ class EpsilonScaling: model_clone.set_model_sampler_post_cfg_function(epsilon_scaling_function) - return (model_clone,) + return io.NodeOutput(model_clone) -NODE_CLASS_MAPPINGS = { - "Epsilon Scaling": EpsilonScaling -} + +class EpsilonScalingExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + EpsilonScaling, + ] + +async def comfy_entrypoint() -> EpsilonScalingExtension: + return EpsilonScalingExtension() From 8c26d7bbe6663f589f0a9562921aafb3c48955c6 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 3 Oct 2025 21:48:21 +0300 Subject: [PATCH 0697/1073] convert nodes_pixverse.py to V3 schema (#10177) --- comfy_api_nodes/nodes_pixverse.py | 471 +++++++++++++++--------------- 1 file changed, 238 insertions(+), 233 deletions(-) diff --git a/comfy_api_nodes/nodes_pixverse.py b/comfy_api_nodes/nodes_pixverse.py index 7c5a52feb..eb98e9653 100644 --- a/comfy_api_nodes/nodes_pixverse.py +++ b/comfy_api_nodes/nodes_pixverse.py @@ -1,5 +1,7 @@ from inspect import cleandoc from typing import Optional +from typing_extensions import override +from io import BytesIO from comfy_api_nodes.apis.pixverse_api import ( PixverseTextVideoRequest, PixverseImageVideoRequest, @@ -26,12 +28,11 @@ from comfy_api_nodes.apinode_utils import ( tensor_to_bytesio, validate_string, ) -from comfy.comfy_types.node_typing import IO, ComfyNodeABC from comfy_api.input_impl import VideoFromFile +from comfy_api.latest import ComfyExtension, io as comfy_io import torch import aiohttp -from io import BytesIO AVERAGE_DURATION_T2V = 32 @@ -72,100 +73,101 @@ async def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None): return response_upload.Resp.img_id -class PixverseTemplateNode: +class PixverseTemplateNode(comfy_io.ComfyNode): """ Select template for PixVerse Video generation. """ - RETURN_TYPES = (PixverseIO.TEMPLATE,) - RETURN_NAMES = ("pixverse_template",) - FUNCTION = "create_template" - CATEGORY = "api node/video/PixVerse" + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="PixverseTemplateNode", + display_name="PixVerse Template", + category="api node/video/PixVerse", + inputs=[ + comfy_io.Combo.Input("template", options=[list(pixverse_templates.keys())]), + ], + outputs=[comfy_io.Custom(PixverseIO.TEMPLATE).Output(display_name="pixverse_template")], + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "template": (list(pixverse_templates.keys()),), - } - } - - def create_template(self, template: str): + def execute(cls, template: str) -> comfy_io.NodeOutput: template_id = pixverse_templates.get(template, None) if template_id is None: raise Exception(f"Template '{template}' is not recognized.") # just return the integer - return (template_id,) + return comfy_io.NodeOutput(template_id) -class PixverseTextToVideoNode(ComfyNodeABC): +class PixverseTextToVideoNode(comfy_io.ComfyNode): """ Generates videos based on prompt and output_size. """ - RETURN_TYPES = (IO.VIDEO,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/video/PixVerse" + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="PixverseTextToVideoNode", + display_name="PixVerse Text to Video", + category="api node/video/PixVerse", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the video generation", + ), + comfy_io.Combo.Input( + "aspect_ratio", + options=[ratio.value for ratio in PixverseAspectRatio], + ), + comfy_io.Combo.Input( + "quality", + options=[resolution.value for resolution in PixverseQuality], + default=PixverseQuality.res_540p, + ), + comfy_io.Combo.Input( + "duration_seconds", + options=[dur.value for dur in PixverseDuration], + ), + comfy_io.Combo.Input( + "motion_mode", + options=[mode.value for mode in PixverseMotionMode], + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + control_after_generate=True, + tooltip="Seed for video generation.", + ), + comfy_io.String.Input( + "negative_prompt", + default="", + force_input=True, + tooltip="An optional text description of undesired elements on an image.", + optional=True, + ), + comfy_io.Custom(PixverseIO.TEMPLATE).Input( + "pixverse_template", + tooltip="An optional template to influence style of generation, created by the PixVerse Template node.", + optional=True, + ), + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the video generation", - }, - ), - "aspect_ratio": ([ratio.value for ratio in PixverseAspectRatio],), - "quality": ( - [resolution.value for resolution in PixverseQuality], - { - "default": PixverseQuality.res_540p, - }, - ), - "duration_seconds": ([dur.value for dur in PixverseDuration],), - "motion_mode": ([mode.value for mode in PixverseMotionMode],), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2147483647, - "control_after_generate": True, - "tooltip": "Seed for video generation.", - }, - ), - }, - "optional": { - "negative_prompt": ( - IO.STRING, - { - "default": "", - "forceInput": True, - "tooltip": "An optional text description of undesired elements on an image.", - }, - ), - "pixverse_template": ( - PixverseIO.TEMPLATE, - { - "tooltip": "An optional template to influence style of generation, created by the PixVerse Template node." - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - async def api_call( - self, + async def execute( + cls, prompt: str, aspect_ratio: str, quality: str, @@ -174,9 +176,7 @@ class PixverseTextToVideoNode(ComfyNodeABC): seed, negative_prompt: str = None, pixverse_template: int = None, - unique_id: Optional[str] = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: validate_string(prompt, strip_whitespace=False) # 1080p is limited to 5 seconds duration # only normal motion_mode supported for 1080p or for non-5 second duration @@ -186,6 +186,10 @@ class PixverseTextToVideoNode(ComfyNodeABC): elif duration_seconds != PixverseDuration.dur_5: motion_mode = PixverseMotionMode.normal + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/pixverse/video/text/generate", @@ -203,7 +207,7 @@ class PixverseTextToVideoNode(ComfyNodeABC): template_id=pixverse_template, seed=seed, ), - auth_kwargs=kwargs, + auth_kwargs=auth, ) response_api = await operation.execute() @@ -224,8 +228,8 @@ class PixverseTextToVideoNode(ComfyNodeABC): PixverseStatus.deleted, ], status_extractor=lambda x: x.Resp.status, - auth_kwargs=kwargs, - node_id=unique_id, + auth_kwargs=auth, + node_id=cls.hidden.unique_id, result_url_extractor=get_video_url_from_response, estimated_duration=AVERAGE_DURATION_T2V, ) @@ -233,77 +237,75 @@ class PixverseTextToVideoNode(ComfyNodeABC): async with aiohttp.ClientSession() as session: async with session.get(response_poll.Resp.url) as vid_response: - return (VideoFromFile(BytesIO(await vid_response.content.read())),) + return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) -class PixverseImageToVideoNode(ComfyNodeABC): +class PixverseImageToVideoNode(comfy_io.ComfyNode): """ Generates videos based on prompt and output_size. """ - RETURN_TYPES = (IO.VIDEO,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/video/PixVerse" + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="PixverseImageToVideoNode", + display_name="PixVerse Image to Video", + category="api node/video/PixVerse", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("image"), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the video generation", + ), + comfy_io.Combo.Input( + "quality", + options=[resolution.value for resolution in PixverseQuality], + default=PixverseQuality.res_540p, + ), + comfy_io.Combo.Input( + "duration_seconds", + options=[dur.value for dur in PixverseDuration], + ), + comfy_io.Combo.Input( + "motion_mode", + options=[mode.value for mode in PixverseMotionMode], + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + control_after_generate=True, + tooltip="Seed for video generation.", + ), + comfy_io.String.Input( + "negative_prompt", + default="", + force_input=True, + tooltip="An optional text description of undesired elements on an image.", + optional=True, + ), + comfy_io.Custom(PixverseIO.TEMPLATE).Input( + "pixverse_template", + tooltip="An optional template to influence style of generation, created by the PixVerse Template node.", + optional=True, + ), + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE,), - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the video generation", - }, - ), - "quality": ( - [resolution.value for resolution in PixverseQuality], - { - "default": PixverseQuality.res_540p, - }, - ), - "duration_seconds": ([dur.value for dur in PixverseDuration],), - "motion_mode": ([mode.value for mode in PixverseMotionMode],), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2147483647, - "control_after_generate": True, - "tooltip": "Seed for video generation.", - }, - ), - }, - "optional": { - "negative_prompt": ( - IO.STRING, - { - "default": "", - "forceInput": True, - "tooltip": "An optional text description of undesired elements on an image.", - }, - ), - "pixverse_template": ( - PixverseIO.TEMPLATE, - { - "tooltip": "An optional template to influence style of generation, created by the PixVerse Template node." - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - async def api_call( - self, + async def execute( + cls, image: torch.Tensor, prompt: str, quality: str, @@ -312,11 +314,13 @@ class PixverseImageToVideoNode(ComfyNodeABC): seed, negative_prompt: str = None, pixverse_template: int = None, - unique_id: Optional[str] = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: validate_string(prompt, strip_whitespace=False) - img_id = await upload_image_to_pixverse(image, auth_kwargs=kwargs) + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + img_id = await upload_image_to_pixverse(image, auth_kwargs=auth) # 1080p is limited to 5 seconds duration # only normal motion_mode supported for 1080p or for non-5 second duration @@ -343,7 +347,7 @@ class PixverseImageToVideoNode(ComfyNodeABC): template_id=pixverse_template, seed=seed, ), - auth_kwargs=kwargs, + auth_kwargs=auth, ) response_api = await operation.execute() @@ -364,8 +368,8 @@ class PixverseImageToVideoNode(ComfyNodeABC): PixverseStatus.deleted, ], status_extractor=lambda x: x.Resp.status, - auth_kwargs=kwargs, - node_id=unique_id, + auth_kwargs=auth, + node_id=cls.hidden.unique_id, result_url_extractor=get_video_url_from_response, estimated_duration=AVERAGE_DURATION_I2V, ) @@ -373,72 +377,71 @@ class PixverseImageToVideoNode(ComfyNodeABC): async with aiohttp.ClientSession() as session: async with session.get(response_poll.Resp.url) as vid_response: - return (VideoFromFile(BytesIO(await vid_response.content.read())),) + return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) -class PixverseTransitionVideoNode(ComfyNodeABC): +class PixverseTransitionVideoNode(comfy_io.ComfyNode): """ Generates videos based on prompt and output_size. """ - RETURN_TYPES = (IO.VIDEO,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/video/PixVerse" + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="PixverseTransitionVideoNode", + display_name="PixVerse Transition Video", + category="api node/video/PixVerse", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("first_frame"), + comfy_io.Image.Input("last_frame"), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the video generation", + ), + comfy_io.Combo.Input( + "quality", + options=[resolution.value for resolution in PixverseQuality], + default=PixverseQuality.res_540p, + ), + comfy_io.Combo.Input( + "duration_seconds", + options=[dur.value for dur in PixverseDuration], + ), + comfy_io.Combo.Input( + "motion_mode", + options=[mode.value for mode in PixverseMotionMode], + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + control_after_generate=True, + tooltip="Seed for video generation.", + ), + comfy_io.String.Input( + "negative_prompt", + default="", + force_input=True, + tooltip="An optional text description of undesired elements on an image.", + optional=True, + ), + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "first_frame": (IO.IMAGE,), - "last_frame": (IO.IMAGE,), - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the video generation", - }, - ), - "quality": ( - [resolution.value for resolution in PixverseQuality], - { - "default": PixverseQuality.res_540p, - }, - ), - "duration_seconds": ([dur.value for dur in PixverseDuration],), - "motion_mode": ([mode.value for mode in PixverseMotionMode],), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2147483647, - "control_after_generate": True, - "tooltip": "Seed for video generation.", - }, - ), - }, - "optional": { - "negative_prompt": ( - IO.STRING, - { - "default": "", - "forceInput": True, - "tooltip": "An optional text description of undesired elements on an image.", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - async def api_call( - self, + async def execute( + cls, first_frame: torch.Tensor, last_frame: torch.Tensor, prompt: str, @@ -447,12 +450,14 @@ class PixverseTransitionVideoNode(ComfyNodeABC): motion_mode: str, seed, negative_prompt: str = None, - unique_id: Optional[str] = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: validate_string(prompt, strip_whitespace=False) - first_frame_id = await upload_image_to_pixverse(first_frame, auth_kwargs=kwargs) - last_frame_id = await upload_image_to_pixverse(last_frame, auth_kwargs=kwargs) + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + first_frame_id = await upload_image_to_pixverse(first_frame, auth_kwargs=auth) + last_frame_id = await upload_image_to_pixverse(last_frame, auth_kwargs=auth) # 1080p is limited to 5 seconds duration # only normal motion_mode supported for 1080p or for non-5 second duration @@ -479,7 +484,7 @@ class PixverseTransitionVideoNode(ComfyNodeABC): negative_prompt=negative_prompt if negative_prompt else None, seed=seed, ), - auth_kwargs=kwargs, + auth_kwargs=auth, ) response_api = await operation.execute() @@ -500,8 +505,8 @@ class PixverseTransitionVideoNode(ComfyNodeABC): PixverseStatus.deleted, ], status_extractor=lambda x: x.Resp.status, - auth_kwargs=kwargs, - node_id=unique_id, + auth_kwargs=auth, + node_id=cls.hidden.unique_id, result_url_extractor=get_video_url_from_response, estimated_duration=AVERAGE_DURATION_T2V, ) @@ -509,19 +514,19 @@ class PixverseTransitionVideoNode(ComfyNodeABC): async with aiohttp.ClientSession() as session: async with session.get(response_poll.Resp.url) as vid_response: - return (VideoFromFile(BytesIO(await vid_response.content.read())),) + return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) -NODE_CLASS_MAPPINGS = { - "PixverseTextToVideoNode": PixverseTextToVideoNode, - "PixverseImageToVideoNode": PixverseImageToVideoNode, - "PixverseTransitionVideoNode": PixverseTransitionVideoNode, - "PixverseTemplateNode": PixverseTemplateNode, -} +class PixVerseExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + PixverseTextToVideoNode, + PixverseImageToVideoNode, + PixverseTransitionVideoNode, + PixverseTemplateNode, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "PixverseTextToVideoNode": "PixVerse Text to Video", - "PixverseImageToVideoNode": "PixVerse Image to Video", - "PixverseTransitionVideoNode": "PixVerse Transition Video", - "PixverseTemplateNode": "PixVerse Template", -} + +async def comfy_entrypoint() -> PixVerseExtension: + return PixVerseExtension() From 5c8e986e273d8af8b976fddbaed726e8278cf1fe Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 3 Oct 2025 21:50:38 +0300 Subject: [PATCH 0698/1073] convert nodes_tomesd.py to V3 schema (#10180) --- comfy_extras/nodes_tomesd.py | 50 +++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/comfy_extras/nodes_tomesd.py b/comfy_extras/nodes_tomesd.py index 9f77c06fc..87bf29b8f 100644 --- a/comfy_extras/nodes_tomesd.py +++ b/comfy_extras/nodes_tomesd.py @@ -1,7 +1,9 @@ #Taken from: https://github.com/dbolya/tomesd import torch -from typing import Tuple, Callable +from typing import Tuple, Callable, Optional +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io import math def do_nothing(x: torch.Tensor, mode:str=None): @@ -144,33 +146,45 @@ def get_functions(x, ratio, original_shape): -class TomePatchModel: +class TomePatchModel(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls): + return io.Schema( + node_id="TomePatchModel", + category="model_patches/unet", + inputs=[ + io.Model.Input("model"), + io.Float.Input("ratio", default=0.3, min=0.0, max=1.0, step=0.01), + ], + outputs=[io.Model.Output()], + ) - CATEGORY = "model_patches/unet" - - def patch(self, model, ratio): - self.u = None + @classmethod + def execute(cls, model, ratio) -> io.NodeOutput: + u: Optional[Callable] = None def tomesd_m(q, k, v, extra_options): + nonlocal u #NOTE: In the reference code get_functions takes x (input of the transformer block) as the argument instead of q #however from my basic testing it seems that using q instead gives better results - m, self.u = get_functions(q, ratio, extra_options["original_shape"]) + m, u = get_functions(q, ratio, extra_options["original_shape"]) return m(q), k, v def tomesd_u(n, extra_options): - return self.u(n) + nonlocal u + return u(n) m = model.clone() m.set_model_attn1_patch(tomesd_m) m.set_model_attn1_output_patch(tomesd_u) - return (m, ) + return io.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "TomePatchModel": TomePatchModel, -} +class TomePatchModelExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + TomePatchModel, + ] + + +async def comfy_entrypoint() -> TomePatchModelExtension: + return TomePatchModelExtension() From 4614ee09ca1aaca7ee8067d6c5c30695582326ff Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 3 Oct 2025 23:24:42 +0300 Subject: [PATCH 0699/1073] convert nodes_edit_model.py to V3 schema (#10147) --- comfy_extras/nodes_edit_model.py | 46 ++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/comfy_extras/nodes_edit_model.py b/comfy_extras/nodes_edit_model.py index b69f79715..36da66f34 100644 --- a/comfy_extras/nodes_edit_model.py +++ b/comfy_extras/nodes_edit_model.py @@ -1,26 +1,38 @@ import node_helpers +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io -class ReferenceLatent: +class ReferenceLatent(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"conditioning": ("CONDITIONING", ), - }, - "optional": {"latent": ("LATENT", ),} - } + def define_schema(cls): + return io.Schema( + node_id="ReferenceLatent", + category="advanced/conditioning/edit_models", + description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.", + inputs=[ + io.Conditioning.Input("conditioning"), + io.Latent.Input("latent", optional=True), + ], + outputs=[ + io.Conditioning.Output(), + ] + ) - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "append" - - CATEGORY = "advanced/conditioning/edit_models" - DESCRIPTION = "This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images." - - def append(self, conditioning, latent=None): + @classmethod + def execute(cls, conditioning, latent=None) -> io.NodeOutput: if latent is not None: conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [latent["samples"]]}, append=True) - return (conditioning, ) + return io.NodeOutput(conditioning) -NODE_CLASS_MAPPINGS = { - "ReferenceLatent": ReferenceLatent, -} +class EditModelExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + ReferenceLatent, + ] + + +def comfy_entrypoint() -> EditModelExtension: + return EditModelExtension() From 93d859cfaaad150c2a1e5e54c8f14765fa79ecb5 Mon Sep 17 00:00:00 2001 From: Finn-Hecker Date: Fri, 3 Oct 2025 23:32:19 +0200 Subject: [PATCH 0700/1073] Fix type annotation syntax in MotionEncoder_tc __init__ (#10186) ## Summary Fixed incorrect type hint syntax in `MotionEncoder_tc.__init__()` parameter list. ## Changes - Line 647: Changed `num_heads=int` to `num_heads: int` - This corrects the parameter annotation from a default value assignment to proper type hint syntax ## Details The parameter was using assignment syntax (`=`) instead of type annotation syntax (`:`), which would incorrectly set the default value to the `int` class itself rather than annotating the expected type. --- comfy/ldm/wan/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 0dc650ced..90c347d3d 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -903,7 +903,7 @@ class MotionEncoder_tc(nn.Module): def __init__(self, in_dim: int, hidden_dim: int, - num_heads=int, + num_heads: int, need_global=True, dtype=None, device=None, From 08726b64fe767f47bf074a05bedd6db45314c4c9 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 3 Oct 2025 15:22:43 -0700 Subject: [PATCH 0701/1073] Update amd nightly command in readme. (#10189) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8f24a33ee..1224a6176 100644 --- a/README.md +++ b/README.md @@ -211,9 +211,9 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins ```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.4``` -This is the command to install the nightly with ROCm 6.4 which might have some performance improvements: +This is the command to install the nightly with ROCm 7.0 which might have some performance improvements: -```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.4``` +```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm7.0``` ### Intel GPUs (Windows and Linux) From bbd683098e7d18700f025b2f0a4f6a44a3176602 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 3 Oct 2025 20:37:43 -0700 Subject: [PATCH 0702/1073] Add instructions to install nightly AMD pytorch for windows. (#10190) * Add instructions to install nightly AMD pytorch for windows. * Update README.md --- README.md | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 1224a6176..4a5a17cda 100644 --- a/README.md +++ b/README.md @@ -206,7 +206,8 @@ Put your SD checkpoints (the huge ckpt/safetensors files) in: models/checkpoints Put your VAE in: models/vae -### AMD GPUs (Linux only) +### AMD GPUs (Linux) + AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version: ```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.4``` @@ -215,6 +216,23 @@ This is the command to install the nightly with ROCm 7.0 which might have some p ```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm7.0``` + +### AMD GPUs (Experimental: Windows and Linux), RDNA 3, 3.5 and 4 only. + +These have less hardware support than the builds above but they work on windows. You also need to install the pytorch version specific to your hardware. + +RDNA 3 (RX 7000 series): + +```pip install --pre torch torchvision torchaudio --index-url https://rocm.nightlies.amd.com/v2/gfx110X-dgpu/``` + +RDNA 3.5 (Strix halo/Ryzen AI Max+ 365): + +```pip install --pre torch torchvision torchaudio --index-url https://rocm.nightlies.amd.com/v2/gfx1151/``` + +RDNA 4 (RX 9000 series): + +```pip install --pre torch torchvision torchaudio --index-url https://rocm.nightlies.amd.com/v2/gfx120X-all/``` + ### Intel GPUs (Windows and Linux) (Option 1) Intel Arc GPU users can install native PyTorch with torch.xpu support using pip. More information can be found [here](https://pytorch.org/docs/main/notes/get_start_xpu.html) @@ -270,12 +288,6 @@ You can install ComfyUI in Apple Mac silicon (M1 or M2) with any recent macOS ve > **Note**: Remember to add your models, VAE, LoRAs etc. to the corresponding Comfy folders, as discussed in [ComfyUI manual installation](#manual-install-windows-linux). -#### DirectML (AMD Cards on Windows) - -This is very badly supported and is not recommended. There are some unofficial builds of pytorch ROCm on windows that exist that will give you a much better experience than this. This readme will be updated once official pytorch ROCm builds for windows come out. - -```pip install torch-directml``` Then you can launch ComfyUI with: ```python main.py --directml``` - #### Ascend NPUs For models compatible with Ascend Extension for PyTorch (torch_npu). To get started, ensure your environment meets the prerequisites outlined on the [installation](https://ascend.github.io/docs/sources/ascend/quick_install.html) page. Here's a step-by-step guide tailored to your platform and installation method: From 22f99fb97edaccf450152c8bf7c4068c1d331899 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 4 Oct 2025 22:22:57 +0300 Subject: [PATCH 0703/1073] fix(api-nodes): enable 2 more pylint rules, removed non needed code (#10192) --- comfy_api_nodes/nodes_gemini.py | 3 +- comfy_api_nodes/nodes_moonvalley.py | 49 ++--------------------------- pyproject.toml | 2 -- 3 files changed, 4 insertions(+), 50 deletions(-) diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 151cb4044..309e9a2d2 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -39,6 +39,7 @@ from comfy_api_nodes.apinode_utils import ( tensor_to_base64_string, bytesio_to_image_tensor, ) +from comfy_api.util import VideoContainer, VideoCodec GEMINI_BASE_ENDPOINT = "/proxy/vertexai/gemini" @@ -310,7 +311,7 @@ class GeminiNode(ComfyNodeABC): Returns: List of GeminiPart objects containing the encoded video. """ - from comfy_api.util import VideoContainer, VideoCodec + base_64_string = video_to_base64_string( video_input, container_format=VideoContainer.MP4, diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 08e838fef..6467dd614 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -2,11 +2,7 @@ import logging from typing import Any, Callable, Optional, TypeVar import torch from typing_extensions import override -from comfy_api_nodes.util.validation_utils import ( - get_image_dimensions, - validate_image_dimensions, -) - +from comfy_api_nodes.util.validation_utils import validate_image_dimensions from comfy_api_nodes.apis import ( MoonvalleyTextToVideoRequest, @@ -132,47 +128,6 @@ def validate_prompts( return True -def validate_input_media(width, height, with_frame_conditioning, num_frames_in=None): - # inference validation - # T = num_frames - # in all cases, the following must be true: T divisible by 16 and H,W by 8. in addition... - # with image conditioning: H*W must be divisible by 8192 - # without image conditioning: T divisible by 32 - if num_frames_in and not num_frames_in % 16 == 0: - return False, ("The input video total frame count must be divisible by 16!") - - if height % 8 != 0 or width % 8 != 0: - return False, ( - f"Height ({height}) and width ({width}) must be " "divisible by 8" - ) - - if with_frame_conditioning: - if (height * width) % 8192 != 0: - return False, ( - f"Height * width ({height * width}) must be " - "divisible by 8192 for frame conditioning" - ) - else: - if num_frames_in and not num_frames_in % 32 == 0: - return False, ("The input video total frame count must be divisible by 32!") - - -def validate_input_image( - image: torch.Tensor, with_frame_conditioning: bool = False -) -> None: - """ - Validates the input image adheres to the expectations of the API: - - The image resolution should not be less than 300*300px - - The aspect ratio of the image should be between 1:2.5 ~ 2.5:1 - - """ - height, width = get_image_dimensions(image) - validate_input_media(width, height, with_frame_conditioning) - validate_image_dimensions( - image, min_width=300, min_height=300, max_height=MAX_HEIGHT, max_width=MAX_WIDTH - ) - - def validate_video_to_video_input(video: VideoInput) -> VideoInput: """ Validates and processes video input for Moonvalley Video-to-Video generation. @@ -499,7 +454,7 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): seed: int, steps: int, ) -> comfy_io.NodeOutput: - validate_input_image(image, True) + validate_image_dimensions(image, min_width=300, min_height=300, max_height=MAX_HEIGHT, max_width=MAX_WIDTH) validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) width_height = parse_width_height_from_res(resolution) diff --git a/pyproject.toml b/pyproject.toml index 240919a43..383e7d10a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,7 +70,5 @@ messages_control.disable = [ "invalid-overridden-method", "unused-variable", "pointless-string-statement", - "inconsistent-return-statements", - "import-outside-toplevel", "redefined-outer-name", ] From 2ed74f7ac78d3ff713d0a8583695c31055914b76 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 4 Oct 2025 22:29:09 +0300 Subject: [PATCH 0704/1073] convert nodes_rodin.py to V3 schema (#10195) --- comfy_api_nodes/nodes_rodin.py | 941 +++++++++++++++++---------------- 1 file changed, 478 insertions(+), 463 deletions(-) diff --git a/comfy_api_nodes/nodes_rodin.py b/comfy_api_nodes/nodes_rodin.py index 633ac46d3..bd758f762 100644 --- a/comfy_api_nodes/nodes_rodin.py +++ b/comfy_api_nodes/nodes_rodin.py @@ -7,14 +7,15 @@ Rodin API docs: https://developer.hyper3d.ai/ from __future__ import annotations from inspect import cleandoc -from comfy.comfy_types.node_typing import IO import folder_paths as comfy_paths import aiohttp import os import asyncio -import io import logging import math +from typing import Optional +from io import BytesIO +from typing_extensions import override from PIL import Image from comfy_api_nodes.apis.rodin_api import ( Rodin3DGenerateRequest, @@ -31,428 +32,436 @@ from comfy_api_nodes.apis.client import ( SynchronousOperation, PollingOperation, ) +from comfy_api.latest import ComfyExtension, io as comfy_io -COMMON_PARAMETERS = { - "Seed": ( - IO.INT, - { - "default":0, - "min":0, - "max":65535, - "display":"number" - } +COMMON_PARAMETERS = [ + comfy_io.Int.Input( + "Seed", + default=0, + min=0, + max=65535, + display_mode=comfy_io.NumberDisplay.number, + optional=True, ), - "Material_Type": ( - IO.COMBO, - { - "options": ["PBR", "Shaded"], - "default": "PBR" - } + comfy_io.Combo.Input("Material_Type", options=["PBR", "Shaded"], default="PBR", optional=True), + comfy_io.Combo.Input( + "Polygon_count", + options=["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "200K-Triangle"], + default="18K-Quad", + optional=True, ), - "Polygon_count": ( - IO.COMBO, - { - "options": ["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "200K-Triangle"], - "default": "18K-Quad" - } +] + + +def get_quality_mode(poly_count): + polycount = poly_count.split("-") + poly = polycount[1] + count = polycount[0] + if poly == "Triangle": + mesh_mode = "Raw" + elif poly == "Quad": + mesh_mode = "Quad" + else: + mesh_mode = "Quad" + + if count == "4K": + quality_override = 4000 + elif count == "8K": + quality_override = 8000 + elif count == "18K": + quality_override = 18000 + elif count == "50K": + quality_override = 50000 + elif count == "2K": + quality_override = 2000 + elif count == "20K": + quality_override = 20000 + elif count == "150K": + quality_override = 150000 + elif count == "500K": + quality_override = 500000 + else: + quality_override = 18000 + + return mesh_mode, quality_override + + +def tensor_to_filelike(tensor, max_pixels: int = 2048*2048): + """ + Converts a PyTorch tensor to a file-like object. + + Args: + - tensor (torch.Tensor): A tensor representing an image of shape (H, W, C) + where C is the number of channels (3 for RGB), H is height, and W is width. + + Returns: + - io.BytesIO: A file-like object containing the image data. + """ + array = tensor.cpu().numpy() + array = (array * 255).astype('uint8') + image = Image.fromarray(array, 'RGB') + + original_width, original_height = image.size + original_pixels = original_width * original_height + if original_pixels > max_pixels: + scale = math.sqrt(max_pixels / original_pixels) + new_width = int(original_width * scale) + new_height = int(original_height * scale) + else: + new_width, new_height = original_width, original_height + + if new_width != original_width or new_height != original_height: + image = image.resize((new_width, new_height), Image.Resampling.LANCZOS) + + img_byte_arr = BytesIO() + image.save(img_byte_arr, format='PNG') # PNG is used for lossless compression + img_byte_arr.seek(0) + return img_byte_arr + + +async def create_generate_task( + images=None, + seed=1, + material="PBR", + quality_override=18000, + tier="Regular", + mesh_mode="Quad", + TAPose = False, + auth_kwargs: Optional[dict[str, str]] = None, +): + if images is None: + raise Exception("Rodin 3D generate requires at least 1 image.") + if len(images) > 5: + raise Exception("Rodin 3D generate requires up to 5 image.") + + path = "/proxy/rodin/api/v2/rodin" + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=path, + method=HttpMethod.POST, + request_model=Rodin3DGenerateRequest, + response_model=Rodin3DGenerateResponse, + ), + request=Rodin3DGenerateRequest( + seed=seed, + tier=tier, + material=material, + quality_override=quality_override, + mesh_mode=mesh_mode, + TAPose=TAPose, + ), + files=[ + ( + "images", + open(image, "rb") if isinstance(image, str) else tensor_to_filelike(image) + ) + for image in images if image is not None + ], + content_type="multipart/form-data", + auth_kwargs=auth_kwargs, ) -} -def create_task_error(response: Rodin3DGenerateResponse): - """Check if the response has error""" - return hasattr(response, "error") + response = await operation.execute() + + if hasattr(response, "error"): + error_message = f"Rodin3D Create 3D generate Task Failed. Message: {response.message}, error: {response.error}" + logging.error(error_message) + raise Exception(error_message) + + logging.info("[ Rodin3D API - Submit Jobs ] Submit Generate Task Success!") + subscription_key = response.jobs.subscription_key + task_uuid = response.uuid + logging.info(f"[ Rodin3D API - Submit Jobs ] UUID: {task_uuid}") + return task_uuid, subscription_key -class Rodin3DAPI: - """ - Generate 3D Assets using Rodin API - """ - RETURN_TYPES = (IO.STRING,) - RETURN_NAMES = ("3D Model Path",) - CATEGORY = "api node/3d/Rodin" - DESCRIPTION = cleandoc(__doc__ or "") - FUNCTION = "api_call" - API_NODE = True - - def tensor_to_filelike(self, tensor, max_pixels: int = 2048*2048): - """ - Converts a PyTorch tensor to a file-like object. - - Args: - - tensor (torch.Tensor): A tensor representing an image of shape (H, W, C) - where C is the number of channels (3 for RGB), H is height, and W is width. - - Returns: - - io.BytesIO: A file-like object containing the image data. - """ - array = tensor.cpu().numpy() - array = (array * 255).astype('uint8') - image = Image.fromarray(array, 'RGB') - - original_width, original_height = image.size - original_pixels = original_width * original_height - if original_pixels > max_pixels: - scale = math.sqrt(max_pixels / original_pixels) - new_width = int(original_width * scale) - new_height = int(original_height * scale) - else: - new_width, new_height = original_width, original_height - - if new_width != original_width or new_height != original_height: - image = image.resize((new_width, new_height), Image.Resampling.LANCZOS) - - img_byte_arr = io.BytesIO() - image.save(img_byte_arr, format='PNG') # PNG is used for lossless compression - img_byte_arr.seek(0) - return img_byte_arr - - def check_rodin_status(self, response: Rodin3DCheckStatusResponse) -> str: - has_failed = any(job.status == JobStatus.Failed for job in response.jobs) - all_done = all(job.status == JobStatus.Done for job in response.jobs) - status_list = [str(job.status) for job in response.jobs] - logging.info(f"[ Rodin3D API - CheckStatus ] Generate Status: {status_list}") - if has_failed: - logging.error(f"[ Rodin3D API - CheckStatus ] Generate Failed: {status_list}, Please try again.") - raise Exception("[ Rodin3D API ] Generate Failed, Please Try again.") - elif all_done: - return "DONE" - else: - return "Generating" - - async def create_generate_task(self, images=None, seed=1, material="PBR", quality_override=18000, tier="Regular", mesh_mode="Quad", TAPose = False, **kwargs): - if images is None: - raise Exception("Rodin 3D generate requires at least 1 image.") - if len(images) > 5: - raise Exception("Rodin 3D generate requires up to 5 image.") - - path = "/proxy/rodin/api/v2/rodin" - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=path, - method=HttpMethod.POST, - request_model=Rodin3DGenerateRequest, - response_model=Rodin3DGenerateResponse, - ), - request=Rodin3DGenerateRequest( - seed=seed, - tier=tier, - material=material, - quality_override=quality_override, - mesh_mode=mesh_mode, - TAPose=TAPose, - ), - files=[ - ( - "images", - open(image, "rb") if isinstance(image, str) else self.tensor_to_filelike(image) - ) - for image in images if image is not None - ], - content_type = "multipart/form-data", - auth_kwargs=kwargs, - ) - - response = await operation.execute() - - if create_task_error(response): - error_message = f"Rodin3D Create 3D generate Task Failed. Message: {response.message}, error: {response.error}" - logging.error(error_message) - raise Exception(error_message) - - logging.info("[ Rodin3D API - Submit Jobs ] Submit Generate Task Success!") - subscription_key = response.jobs.subscription_key - task_uuid = response.uuid - logging.info(f"[ Rodin3D API - Submit Jobs ] UUID: {task_uuid}") - return task_uuid, subscription_key - - async def poll_for_task_status(self, subscription_key, **kwargs) -> Rodin3DCheckStatusResponse: - - path = "/proxy/rodin/api/v2/status" - - poll_operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path = path, - method=HttpMethod.POST, - request_model=Rodin3DCheckStatusRequest, - response_model=Rodin3DCheckStatusResponse, - ), - request=Rodin3DCheckStatusRequest( - subscription_key = subscription_key - ), - completed_statuses=["DONE"], - failed_statuses=["FAILED"], - status_extractor=self.check_rodin_status, - poll_interval=3.0, - auth_kwargs=kwargs, - ) - - logging.info("[ Rodin3D API - CheckStatus ] Generate Start!") - - return await poll_operation.execute() - - async def get_rodin_download_list(self, uuid, **kwargs) -> Rodin3DDownloadResponse: - logging.info("[ Rodin3D API - Downloading ] Generate Successfully!") - - path = "/proxy/rodin/api/v2/download" - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=path, - method=HttpMethod.POST, - request_model=Rodin3DDownloadRequest, - response_model=Rodin3DDownloadResponse, - ), - request=Rodin3DDownloadRequest( - task_uuid=uuid - ), - auth_kwargs=kwargs - ) - - return await operation.execute() - - def get_quality_mode(self, poly_count): - polycount = poly_count.split("-") - poly = polycount[1] - count = polycount[0] - if poly == "Triangle": - mesh_mode = "Raw" - elif poly == "Quad": - mesh_mode = "Quad" - else: - mesh_mode = "Quad" - - if count == "4K": - quality_override = 4000 - elif count == "8K": - quality_override = 8000 - elif count == "18K": - quality_override = 18000 - elif count == "50K": - quality_override = 50000 - elif count == "2K": - quality_override = 2000 - elif count == "20K": - quality_override = 20000 - elif count == "150K": - quality_override = 150000 - elif count == "500K": - quality_override = 500000 - else: - quality_override = 18000 - - return mesh_mode, quality_override - - async def download_files(self, url_list, task_uuid): - save_path = os.path.join(comfy_paths.get_output_directory(), f"Rodin3D_{task_uuid}") - os.makedirs(save_path, exist_ok=True) - model_file_path = None - async with aiohttp.ClientSession() as session: - for i in url_list.list: - url = i.url - file_name = i.name - file_path = os.path.join(save_path, file_name) - if file_path.endswith(".glb"): - model_file_path = file_path - logging.info(f"[ Rodin3D API - download_files ] Downloading file: {file_path}") - max_retries = 5 - for attempt in range(max_retries): - try: - async with session.get(url) as resp: - resp.raise_for_status() - with open(file_path, "wb") as f: - async for chunk in resp.content.iter_chunked(32 * 1024): - f.write(chunk) - break - except Exception as e: - logging.info(f"[ Rodin3D API - download_files ] Error downloading {file_path}:{e}") - if attempt < max_retries - 1: - logging.info("Retrying...") - await asyncio.sleep(2) - else: - logging.info( - "[ Rodin3D API - download_files ] Failed to download %s after %s attempts.", - file_path, - max_retries, - ) - - return model_file_path +def check_rodin_status(response: Rodin3DCheckStatusResponse) -> str: + all_done = all(job.status == JobStatus.Done for job in response.jobs) + status_list = [str(job.status) for job in response.jobs] + logging.info(f"[ Rodin3D API - CheckStatus ] Generate Status: {status_list}") + if any(job.status == JobStatus.Failed for job in response.jobs): + logging.error(f"[ Rodin3D API - CheckStatus ] Generate Failed: {status_list}, Please try again.") + raise Exception("[ Rodin3D API ] Generate Failed, Please Try again.") + if all_done: + return "DONE" + return "Generating" -class Rodin3D_Regular(Rodin3DAPI): +async def poll_for_task_status( + subscription_key, auth_kwargs: Optional[dict[str, str]] = None, +) -> Rodin3DCheckStatusResponse: + poll_operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path="/proxy/rodin/api/v2/status", + method=HttpMethod.POST, + request_model=Rodin3DCheckStatusRequest, + response_model=Rodin3DCheckStatusResponse, + ), + request=Rodin3DCheckStatusRequest(subscription_key=subscription_key), + completed_statuses=["DONE"], + failed_statuses=["FAILED"], + status_extractor=check_rodin_status, + poll_interval=3.0, + auth_kwargs=auth_kwargs, + ) + logging.info("[ Rodin3D API - CheckStatus ] Generate Start!") + return await poll_operation.execute() + + +async def get_rodin_download_list(uuid, auth_kwargs: Optional[dict[str, str]] = None) -> Rodin3DDownloadResponse: + logging.info("[ Rodin3D API - Downloading ] Generate Successfully!") + operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/rodin/api/v2/download", + method=HttpMethod.POST, + request_model=Rodin3DDownloadRequest, + response_model=Rodin3DDownloadResponse, + ), + request=Rodin3DDownloadRequest(task_uuid=uuid), + auth_kwargs=auth_kwargs, + ) + return await operation.execute() + + +async def download_files(url_list, task_uuid): + save_path = os.path.join(comfy_paths.get_output_directory(), f"Rodin3D_{task_uuid}") + os.makedirs(save_path, exist_ok=True) + model_file_path = None + async with aiohttp.ClientSession() as session: + for i in url_list.list: + url = i.url + file_name = i.name + file_path = os.path.join(save_path, file_name) + if file_path.endswith(".glb"): + model_file_path = file_path + logging.info(f"[ Rodin3D API - download_files ] Downloading file: {file_path}") + max_retries = 5 + for attempt in range(max_retries): + try: + async with session.get(url) as resp: + resp.raise_for_status() + with open(file_path, "wb") as f: + async for chunk in resp.content.iter_chunked(32 * 1024): + f.write(chunk) + break + except Exception as e: + logging.info(f"[ Rodin3D API - download_files ] Error downloading {file_path}:{e}") + if attempt < max_retries - 1: + logging.info("Retrying...") + await asyncio.sleep(2) + else: + logging.info( + "[ Rodin3D API - download_files ] Failed to download %s after %s attempts.", + file_path, + max_retries, + ) + return model_file_path + + +class Rodin3D_Regular(comfy_io.ComfyNode): + """Generate 3D Assets using Rodin API""" + @classmethod - def INPUT_TYPES(s): - return { - "required": { - "Images": - ( - IO.IMAGE, - { - "forceInput":True, - } - ) - }, - "optional": { - **COMMON_PARAMETERS - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="Rodin3D_Regular", + display_name="Rodin 3D Generate - Regular Generate", + category="api node/3d/Rodin", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("Images"), + *COMMON_PARAMETERS, + ], + outputs=[comfy_io.String.Output(display_name="3D Model Path")], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + ], + is_api_node=True, + ) - async def api_call( - self, + @classmethod + async def execute( + cls, Images, Seed, Material_Type, Polygon_count, - **kwargs - ): + ) -> comfy_io.NodeOutput: tier = "Regular" num_images = Images.shape[0] m_images = [] for i in range(num_images): m_images.append(Images[i]) - mesh_mode, quality_override = self.get_quality_mode(Polygon_count) - task_uuid, subscription_key = await self.create_generate_task(images=m_images, seed=Seed, material=Material_Type, - quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, - **kwargs) - await self.poll_for_task_status(subscription_key, **kwargs) - download_list = await self.get_rodin_download_list(task_uuid, **kwargs) - model = await self.download_files(download_list, task_uuid) - - return (model,) - - -class Rodin3D_Detail(Rodin3DAPI): - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "Images": - ( - IO.IMAGE, - { - "forceInput":True, - } - ) - }, - "optional": { - **COMMON_PARAMETERS - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, + mesh_mode, quality_override = get_quality_mode(Polygon_count) + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, } + task_uuid, subscription_key = await create_generate_task( + images=m_images, + seed=Seed, + material=Material_Type, + quality_override=quality_override, + tier=tier, + mesh_mode=mesh_mode, + auth_kwargs=auth, + ) + await poll_for_task_status(subscription_key, auth_kwargs=auth) + download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) + model = await download_files(download_list, task_uuid) - async def api_call( - self, + return comfy_io.NodeOutput(model) + + +class Rodin3D_Detail(comfy_io.ComfyNode): + """Generate 3D Assets using Rodin API""" + + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="Rodin3D_Detail", + display_name="Rodin 3D Generate - Detail Generate", + category="api node/3d/Rodin", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("Images"), + *COMMON_PARAMETERS, + ], + outputs=[comfy_io.String.Output(display_name="3D Model Path")], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, Images, Seed, Material_Type, Polygon_count, - **kwargs - ): + ) -> comfy_io.NodeOutput: tier = "Detail" num_images = Images.shape[0] m_images = [] for i in range(num_images): m_images.append(Images[i]) - mesh_mode, quality_override = self.get_quality_mode(Polygon_count) - task_uuid, subscription_key = await self.create_generate_task(images=m_images, seed=Seed, material=Material_Type, - quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, - **kwargs) - await self.poll_for_task_status(subscription_key, **kwargs) - download_list = await self.get_rodin_download_list(task_uuid, **kwargs) - model = await self.download_files(download_list, task_uuid) - - return (model,) - - -class Rodin3D_Smooth(Rodin3DAPI): - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "Images": - ( - IO.IMAGE, - { - "forceInput":True, - } - ) - }, - "optional": { - **COMMON_PARAMETERS - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, + mesh_mode, quality_override = get_quality_mode(Polygon_count) + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, } + task_uuid, subscription_key = await create_generate_task( + images=m_images, + seed=Seed, + material=Material_Type, + quality_override=quality_override, + tier=tier, + mesh_mode=mesh_mode, + auth_kwargs=auth, + ) + await poll_for_task_status(subscription_key, auth_kwargs=auth) + download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) + model = await download_files(download_list, task_uuid) - async def api_call( - self, + return comfy_io.NodeOutput(model) + + +class Rodin3D_Smooth(comfy_io.ComfyNode): + """Generate 3D Assets using Rodin API""" + + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="Rodin3D_Smooth", + display_name="Rodin 3D Generate - Smooth Generate", + category="api node/3d/Rodin", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("Images"), + *COMMON_PARAMETERS, + ], + outputs=[comfy_io.String.Output(display_name="3D Model Path")], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, Images, Seed, Material_Type, Polygon_count, - **kwargs - ): + ) -> comfy_io.NodeOutput: tier = "Smooth" num_images = Images.shape[0] m_images = [] for i in range(num_images): m_images.append(Images[i]) - mesh_mode, quality_override = self.get_quality_mode(Polygon_count) - task_uuid, subscription_key = await self.create_generate_task(images=m_images, seed=Seed, material=Material_Type, - quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, - **kwargs) - await self.poll_for_task_status(subscription_key, **kwargs) - download_list = await self.get_rodin_download_list(task_uuid, **kwargs) - model = await self.download_files(download_list, task_uuid) - - return (model,) - - -class Rodin3D_Sketch(Rodin3DAPI): - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "Images": - ( - IO.IMAGE, - { - "forceInput":True, - } - ) - }, - "optional": { - "Seed": - ( - IO.INT, - { - "default":0, - "min":0, - "max":65535, - "display":"number" - } - ) - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, + mesh_mode, quality_override = get_quality_mode(Polygon_count) + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, } + task_uuid, subscription_key = await create_generate_task( + images=m_images, + seed=Seed, + material=Material_Type, + quality_override=quality_override, + tier=tier, + mesh_mode=mesh_mode, + auth_kwargs=auth, + ) + await poll_for_task_status(subscription_key, auth_kwargs=auth) + download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) + model = await download_files(download_list, task_uuid) - async def api_call( - self, + return comfy_io.NodeOutput(model) + + +class Rodin3D_Sketch(comfy_io.ComfyNode): + """Generate 3D Assets using Rodin API""" + + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="Rodin3D_Sketch", + display_name="Rodin 3D Generate - Sketch Generate", + category="api node/3d/Rodin", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("Images"), + comfy_io.Int.Input( + "Seed", + default=0, + min=0, + max=65535, + display_mode=comfy_io.NumberDisplay.number, + optional=True, + ), + ], + outputs=[comfy_io.String.Output(display_name="3D Model Path")], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, Images, Seed, - **kwargs - ): + ) -> comfy_io.NodeOutput: tier = "Sketch" num_images = Images.shape[0] m_images = [] @@ -461,104 +470,110 @@ class Rodin3D_Sketch(Rodin3DAPI): material_type = "PBR" quality_override = 18000 mesh_mode = "Quad" - task_uuid, subscription_key = await self.create_generate_task( - images=m_images, seed=Seed, material=material_type, quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, **kwargs - ) - await self.poll_for_task_status(subscription_key, **kwargs) - download_list = await self.get_rodin_download_list(task_uuid, **kwargs) - model = await self.download_files(download_list, task_uuid) - - return (model,) - -class Rodin3D_Gen2(Rodin3DAPI): - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "Images": - ( - IO.IMAGE, - { - "forceInput":True, - } - ) - }, - "optional": { - "Seed": ( - IO.INT, - { - "default":0, - "min":0, - "max":65535, - "display":"number" - } - ), - "Material_Type": ( - IO.COMBO, - { - "options": ["PBR", "Shaded"], - "default": "PBR" - } - ), - "Polygon_count": ( - IO.COMBO, - { - "options": ["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "2K-Triangle", "20K-Triangle", "150K-Triangle", "500K-Triangle"], - "default": "500K-Triangle" - } - ), - "TAPose": ( - IO.BOOLEAN, - { - "default": False, - } - ) - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, } + task_uuid, subscription_key = await create_generate_task( + images=m_images, + seed=Seed, + material=material_type, + quality_override=quality_override, + tier=tier, + mesh_mode=mesh_mode, + auth_kwargs=auth, + ) + await poll_for_task_status(subscription_key, auth_kwargs=auth) + download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) + model = await download_files(download_list, task_uuid) - async def api_call( - self, + return comfy_io.NodeOutput(model) + + +class Rodin3D_Gen2(comfy_io.ComfyNode): + """Generate 3D Assets using Rodin API""" + + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="Rodin3D_Gen2", + display_name="Rodin 3D Generate - Gen-2 Generate", + category="api node/3d/Rodin", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + comfy_io.Image.Input("Images"), + comfy_io.Int.Input( + "Seed", + default=0, + min=0, + max=65535, + display_mode=comfy_io.NumberDisplay.number, + optional=True, + ), + comfy_io.Combo.Input("Material_Type", options=["PBR", "Shaded"], default="PBR", optional=True), + comfy_io.Combo.Input( + "Polygon_count", + options=["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "2K-Triangle", "20K-Triangle", "150K-Triangle", "500K-Triangle"], + default="500K-Triangle", + optional=True, + ), + comfy_io.Boolean.Input("TAPose", default=False), + ], + outputs=[comfy_io.String.Output(display_name="3D Model Path")], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, Images, Seed, Material_Type, Polygon_count, TAPose, - **kwargs - ): + ) -> comfy_io.NodeOutput: tier = "Gen-2" num_images = Images.shape[0] m_images = [] for i in range(num_images): m_images.append(Images[i]) - mesh_mode, quality_override = self.get_quality_mode(Polygon_count) - task_uuid, subscription_key = await self.create_generate_task(images=m_images, seed=Seed, material=Material_Type, - quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, TAPose=TAPose, - **kwargs) - await self.poll_for_task_status(subscription_key, **kwargs) - download_list = await self.get_rodin_download_list(task_uuid, **kwargs) - model = await self.download_files(download_list, task_uuid) + mesh_mode, quality_override = get_quality_mode(Polygon_count) + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + task_uuid, subscription_key = await create_generate_task( + images=m_images, + seed=Seed, + material=Material_Type, + quality_override=quality_override, + tier=tier, + mesh_mode=mesh_mode, + TAPose=TAPose, + auth_kwargs=auth, + ) + await poll_for_task_status(subscription_key, auth_kwargs=auth) + download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) + model = await download_files(download_list, task_uuid) - return (model,) + return comfy_io.NodeOutput(model) -# A dictionary that contains all nodes you want to export with their names -# NOTE: names should be globally unique -NODE_CLASS_MAPPINGS = { - "Rodin3D_Regular": Rodin3D_Regular, - "Rodin3D_Detail": Rodin3D_Detail, - "Rodin3D_Smooth": Rodin3D_Smooth, - "Rodin3D_Sketch": Rodin3D_Sketch, - "Rodin3D_Gen2": Rodin3D_Gen2, -} -# A dictionary that contains the friendly/humanly readable titles for the nodes -NODE_DISPLAY_NAME_MAPPINGS = { - "Rodin3D_Regular": "Rodin 3D Generate - Regular Generate", - "Rodin3D_Detail": "Rodin 3D Generate - Detail Generate", - "Rodin3D_Smooth": "Rodin 3D Generate - Smooth Generate", - "Rodin3D_Sketch": "Rodin 3D Generate - Sketch Generate", - "Rodin3D_Gen2": "Rodin 3D Generate - Gen-2 Generate", -} +class Rodin3DExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + Rodin3D_Regular, + Rodin3D_Detail, + Rodin3D_Smooth, + Rodin3D_Sketch, + Rodin3D_Gen2, + ] + + +async def comfy_entrypoint() -> Rodin3DExtension: + return Rodin3DExtension() From b1fa1922df597af759150f4e26ecb276c9753ee4 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 4 Oct 2025 22:33:48 +0300 Subject: [PATCH 0705/1073] convert nodes_stable3d.py to V3 schema (#10204) --- comfy_extras/nodes_stable3d.py | 149 +++++++++++++++++++-------------- 1 file changed, 86 insertions(+), 63 deletions(-) diff --git a/comfy_extras/nodes_stable3d.py b/comfy_extras/nodes_stable3d.py index be2e34c28..c6d8a683d 100644 --- a/comfy_extras/nodes_stable3d.py +++ b/comfy_extras/nodes_stable3d.py @@ -1,6 +1,8 @@ import torch import nodes import comfy.utils +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io def camera_embeddings(elevation, azimuth): elevation = torch.as_tensor([elevation]) @@ -20,26 +22,31 @@ def camera_embeddings(elevation, azimuth): return embeddings -class StableZero123_Conditioning: +class StableZero123_Conditioning(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "clip_vision": ("CLIP_VISION",), - "init_image": ("IMAGE",), - "vae": ("VAE",), - "width": ("INT", {"default": 256, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), - "height": ("INT", {"default": 256, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - "elevation": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": False}), - "azimuth": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": False}), - }} - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") + def define_schema(cls): + return io.Schema( + node_id="StableZero123_Conditioning", + category="conditioning/3d_models", + inputs=[ + io.ClipVision.Input("clip_vision"), + io.Image.Input("init_image"), + io.Vae.Input("vae"), + io.Int.Input("width", default=256, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("height", default=256, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Float.Input("elevation", default=0.0, min=-180.0, max=180.0, step=0.1, round=False), + io.Float.Input("azimuth", default=0.0, min=-180.0, max=180.0, step=0.1, round=False) + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent") + ] + ) - FUNCTION = "encode" - - CATEGORY = "conditioning/3d_models" - - def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth): + @classmethod + def execute(cls, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth) -> io.NodeOutput: output = clip_vision.encode_image(init_image) pooled = output.image_embeds.unsqueeze(0) pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) @@ -51,30 +58,35 @@ class StableZero123_Conditioning: positive = [[cond, {"concat_latent_image": t}]] negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]] latent = torch.zeros([batch_size, 4, height // 8, width // 8]) - return (positive, negative, {"samples":latent}) + return io.NodeOutput(positive, negative, {"samples":latent}) -class StableZero123_Conditioning_Batched: +class StableZero123_Conditioning_Batched(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "clip_vision": ("CLIP_VISION",), - "init_image": ("IMAGE",), - "vae": ("VAE",), - "width": ("INT", {"default": 256, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), - "height": ("INT", {"default": 256, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - "elevation": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": False}), - "azimuth": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": False}), - "elevation_batch_increment": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": False}), - "azimuth_batch_increment": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": False}), - }} - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") + def define_schema(cls): + return io.Schema( + node_id="StableZero123_Conditioning_Batched", + category="conditioning/3d_models", + inputs=[ + io.ClipVision.Input("clip_vision"), + io.Image.Input("init_image"), + io.Vae.Input("vae"), + io.Int.Input("width", default=256, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("height", default=256, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Float.Input("elevation", default=0.0, min=-180.0, max=180.0, step=0.1, round=False), + io.Float.Input("azimuth", default=0.0, min=-180.0, max=180.0, step=0.1, round=False), + io.Float.Input("elevation_batch_increment", default=0.0, min=-180.0, max=180.0, step=0.1, round=False), + io.Float.Input("azimuth_batch_increment", default=0.0, min=-180.0, max=180.0, step=0.1, round=False) + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent") + ] + ) - FUNCTION = "encode" - - CATEGORY = "conditioning/3d_models" - - def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth, elevation_batch_increment, azimuth_batch_increment): + @classmethod + def execute(cls, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth, elevation_batch_increment, azimuth_batch_increment) -> io.NodeOutput: output = clip_vision.encode_image(init_image) pooled = output.image_embeds.unsqueeze(0) pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) @@ -93,27 +105,32 @@ class StableZero123_Conditioning_Batched: positive = [[cond, {"concat_latent_image": t}]] negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]] latent = torch.zeros([batch_size, 4, height // 8, width // 8]) - return (positive, negative, {"samples":latent, "batch_index": [0] * batch_size}) + return io.NodeOutput(positive, negative, {"samples":latent, "batch_index": [0] * batch_size}) -class SV3D_Conditioning: +class SV3D_Conditioning(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "clip_vision": ("CLIP_VISION",), - "init_image": ("IMAGE",), - "vae": ("VAE",), - "width": ("INT", {"default": 576, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), - "height": ("INT", {"default": 576, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), - "video_frames": ("INT", {"default": 21, "min": 1, "max": 4096}), - "elevation": ("FLOAT", {"default": 0.0, "min": -90.0, "max": 90.0, "step": 0.1, "round": False}), - }} - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") + def define_schema(cls): + return io.Schema( + node_id="SV3D_Conditioning", + category="conditioning/3d_models", + inputs=[ + io.ClipVision.Input("clip_vision"), + io.Image.Input("init_image"), + io.Vae.Input("vae"), + io.Int.Input("width", default=576, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("height", default=576, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("video_frames", default=21, min=1, max=4096), + io.Float.Input("elevation", default=0.0, min=-90.0, max=90.0, step=0.1, round=False) + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent") + ] + ) - FUNCTION = "encode" - - CATEGORY = "conditioning/3d_models" - - def encode(self, clip_vision, init_image, vae, width, height, video_frames, elevation): + @classmethod + def execute(cls, clip_vision, init_image, vae, width, height, video_frames, elevation) -> io.NodeOutput: output = clip_vision.encode_image(init_image) pooled = output.image_embeds.unsqueeze(0) pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) @@ -133,11 +150,17 @@ class SV3D_Conditioning: positive = [[pooled, {"concat_latent_image": t, "elevation": elevations, "azimuth": azimuths}]] negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t), "elevation": elevations, "azimuth": azimuths}]] latent = torch.zeros([video_frames, 4, height // 8, width // 8]) - return (positive, negative, {"samples":latent}) + return io.NodeOutput(positive, negative, {"samples":latent}) -NODE_CLASS_MAPPINGS = { - "StableZero123_Conditioning": StableZero123_Conditioning, - "StableZero123_Conditioning_Batched": StableZero123_Conditioning_Batched, - "SV3D_Conditioning": SV3D_Conditioning, -} +class Stable3DExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + StableZero123_Conditioning, + StableZero123_Conditioning_Batched, + SV3D_Conditioning, + ] + +async def comfy_entrypoint() -> Stable3DExtension: + return Stable3DExtension() From caf07331ff1b20f4104b9693ed244d6e22f80b5a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 4 Oct 2025 19:05:05 -0700 Subject: [PATCH 0706/1073] Remove soundfile dependency. No more torchaudio load or save. (#10210) --- comfy_extras/nodes_audio.py | 2 +- requirements.txt | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index 51c8b9dd9..1c868fcba 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -360,7 +360,7 @@ class RecordAudio: def load(self, audio): audio_path = folder_paths.get_annotated_filepath(audio) - waveform, sample_rate = torchaudio.load(audio_path) + waveform, sample_rate = load(audio_path) audio = {"waveform": waveform.unsqueeze(0), "sample_rate": sample_rate} return (audio, ) diff --git a/requirements.txt b/requirements.txt index 588c5dcf0..6c28f9478 100644 --- a/requirements.txt +++ b/requirements.txt @@ -25,6 +25,5 @@ av>=14.2.0 #non essential dependencies: kornia>=0.7.1 spandrel -soundfile pydantic~=2.0 pydantic-settings~=2.0 From 187f43696dd58f252075d2e3c6873706eb6b5fa1 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sun, 5 Oct 2025 09:34:18 +0300 Subject: [PATCH 0707/1073] fix(api-nodes): disable "std" mode for Kling2.5-turbo (#10212) --- comfy_api_nodes/nodes_kling.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index d8646f106..44fccc0c7 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -712,6 +712,9 @@ class KlingImage2VideoNode(KlingNodeBase): # Camera control type for image 2 video is always `simple` camera_control.type = KlingCameraControlType.simple + if mode == "std" and model_name == KlingVideoGenModelName.kling_v2_5_turbo.value: + mode = "pro" # October 5: currently "std" mode is not supported for this model + initial_operation = SynchronousOperation( endpoint=ApiEndpoint( path=PATH_IMAGE_TO_VIDEO, From 195e0b063950f585fe584c5ce7b0b689f8d20ff8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 5 Oct 2025 12:41:19 -0700 Subject: [PATCH 0708/1073] Remove useless code. (#10223) --- comfy/ldm/ace/vae/music_dcae_pipeline.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/comfy/ldm/ace/vae/music_dcae_pipeline.py b/comfy/ldm/ace/vae/music_dcae_pipeline.py index af81280eb..3c8830c17 100644 --- a/comfy/ldm/ace/vae/music_dcae_pipeline.py +++ b/comfy/ldm/ace/vae/music_dcae_pipeline.py @@ -23,8 +23,6 @@ class MusicDCAE(torch.nn.Module): else: self.source_sample_rate = source_sample_rate - # self.resampler = torchaudio.transforms.Resample(source_sample_rate, 44100) - self.transform = transforms.Compose([ transforms.Normalize(0.5, 0.5), ]) @@ -37,10 +35,6 @@ class MusicDCAE(torch.nn.Module): self.scale_factor = 0.1786 self.shift_factor = -1.9091 - def load_audio(self, audio_path): - audio, sr = torchaudio.load(audio_path) - return audio, sr - def forward_mel(self, audios): mels = [] for i in range(len(audios)): @@ -73,10 +67,8 @@ class MusicDCAE(torch.nn.Module): latent = self.dcae.encoder(mel.unsqueeze(0)) latents.append(latent) latents = torch.cat(latents, dim=0) - # latent_lengths = (audio_lengths / sr * 44100 / 512 / self.time_dimention_multiple).long() latents = (latents - self.shift_factor) * self.scale_factor return latents - # return latents, latent_lengths @torch.no_grad() def decode(self, latents, audio_lengths=None, sr=None): @@ -91,9 +83,7 @@ class MusicDCAE(torch.nn.Module): wav = self.vocoder.decode(mels[0]).squeeze(1) if sr is not None: - # resampler = torchaudio.transforms.Resample(44100, sr).to(latents.device).to(latents.dtype) wav = torchaudio.functional.resample(wav, 44100, sr) - # wav = resampler(wav) else: sr = 44100 pred_wavs.append(wav) @@ -101,7 +91,6 @@ class MusicDCAE(torch.nn.Module): if audio_lengths is not None: pred_wavs = [wav[:, :length].cpu() for wav, length in zip(pred_wavs, audio_lengths)] return torch.stack(pred_wavs) - # return sr, pred_wavs def forward(self, audios, audio_lengths=None, sr=None): latents, latent_lengths = self.encode(audios=audios, audio_lengths=audio_lengths, sr=sr) From 7326e46deeab97219cad32d0624991f9ffea4fe5 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 7 Oct 2025 01:57:00 +0800 Subject: [PATCH 0709/1073] Update template to 0.1.93 (#10235) * Update template to 0.1.92 * Update template to 0.1.93 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6c28f9478..db0486960 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.27.7 -comfyui-workflow-templates==0.1.91 +comfyui-workflow-templates==0.1.93 comfyui-embedded-docs==0.2.6 torch torchsde From 6bd3f8eb9ff2d7c74e8ca75ad1f854a6b256b714 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 6 Oct 2025 14:49:04 -0400 Subject: [PATCH 0710/1073] ComfyUI version 0.3.63 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index ac76fbe35..c3257d4bf 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.62" +__version__ = "0.3.63" diff --git a/pyproject.toml b/pyproject.toml index 383e7d10a..a9e3de0c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.62" +version = "0.3.63" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 6ae35158013e50698e680344ab1f54de0d59fef0 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 7 Oct 2025 02:05:57 +0300 Subject: [PATCH 0711/1073] fix(api-nodes): enable more pylint rules (#10213) --- comfy_api_nodes/apinode_utils.py | 2 +- comfy_api_nodes/nodes_moonvalley.py | 3 +-- comfy_api_nodes/nodes_recraft.py | 8 ++++---- pyproject.toml | 6 +----- 4 files changed, 7 insertions(+), 12 deletions(-) diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index 37438f835..5ac3b92aa 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -152,7 +152,7 @@ def validate_aspect_ratio( raise TypeError( f"Aspect ratio cannot reduce to any less than {minimum_ratio_str} ({minimum_ratio}), but was {aspect_ratio} ({calculated_ratio})." ) - elif calculated_ratio > maximum_ratio: + if calculated_ratio > maximum_ratio: raise TypeError( f"Aspect ratio cannot reduce to any greater than {maximum_ratio_str} ({maximum_ratio}), but was {aspect_ratio} ({calculated_ratio})." ) diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 6467dd614..55471a69d 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -473,7 +473,7 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): height=width_height["height"], use_negative_prompts=True, ) - """Upload image to comfy backend to have a URL available for further processing""" + # Get MIME type from tensor - assuming PNG format for image tensors mime_type = "image/png" @@ -591,7 +591,6 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): validated_video = validate_video_to_video_input(video) video_url = await upload_video_to_comfyapi(validated_video, auth_kwargs=auth) - """Validate prompts and inference input""" validate_prompts(prompt, negative_prompt) # Only include motion_intensity for Motion Transfer diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py index a006104b7..0bbb551b8 100644 --- a/comfy_api_nodes/nodes_recraft.py +++ b/comfy_api_nodes/nodes_recraft.py @@ -107,7 +107,7 @@ def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, co # if list already exists exists, just extend list with data for check_list in lists_to_check: for conv_tuple in check_list: - if conv_tuple[0] == parent_key and type(conv_tuple[1]) is list: + if conv_tuple[0] == parent_key and isinstance(conv_tuple[1], list): conv_tuple[1].append(formatter(data)) return True return False @@ -119,7 +119,7 @@ def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, co if formatter is None: formatter = lambda v: v # Multipart representation of value - if type(data) is not dict: + if not isinstance(data, dict): # if list already exists exists, just extend list with data added = handle_converted_lists(data, parent_key, converted_to_check) if added: @@ -136,9 +136,9 @@ def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, co for key, value in data.items(): current_key = key if parent_key is None else f"{parent_key}[{key}]" - if type(value) is dict: + if isinstance(value, dict): converted.extend(recraft_multipart_parser(value, current_key, formatter, next_check).items()) - elif type(value) is list: + elif isinstance(value, list): for ind, list_value in enumerate(value): iter_key = f"{current_key}[]" converted.extend(recraft_multipart_parser(list_value, iter_key, formatter, next_check, is_list=True).items()) diff --git a/pyproject.toml b/pyproject.toml index a9e3de0c6..abd1a5f5c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,18 +57,14 @@ messages_control.disable = [ "redefined-builtin", "unnecessary-lambda", "dangerous-default-value", + "invalid-overridden-method", # next warnings should be fixed in future "bad-classmethod-argument", # Class method should have 'cls' as first argument "wrong-import-order", # Standard imports should be placed before third party imports "logging-fstring-interpolation", # Use lazy % formatting in logging functions "ungrouped-imports", "unnecessary-pass", - "unidiomatic-typecheck", "unnecessary-lambda-assignment", "no-else-return", - "no-else-raise", - "invalid-overridden-method", "unused-variable", - "pointless-string-statement", - "redefined-outer-name", ] From a49007a7b07abfdcb10bc10c23514c48935ea914 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 7 Oct 2025 02:13:43 +0300 Subject: [PATCH 0712/1073] fix(api-nodes): allow negative_prompt PixVerse to be multiline (#10196) --- comfy_api_nodes/nodes_pixverse.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy_api_nodes/nodes_pixverse.py b/comfy_api_nodes/nodes_pixverse.py index eb98e9653..2c91bbc65 100644 --- a/comfy_api_nodes/nodes_pixverse.py +++ b/comfy_api_nodes/nodes_pixverse.py @@ -146,7 +146,7 @@ class PixverseTextToVideoNode(comfy_io.ComfyNode): comfy_io.String.Input( "negative_prompt", default="", - force_input=True, + multiline=True, tooltip="An optional text description of undesired elements on an image.", optional=True, ), @@ -284,7 +284,7 @@ class PixverseImageToVideoNode(comfy_io.ComfyNode): comfy_io.String.Input( "negative_prompt", default="", - force_input=True, + multiline=True, tooltip="An optional text description of undesired elements on an image.", optional=True, ), @@ -425,7 +425,7 @@ class PixverseTransitionVideoNode(comfy_io.ComfyNode): comfy_io.String.Input( "negative_prompt", default="", - force_input=True, + multiline=True, tooltip="An optional text description of undesired elements on an image.", optional=True, ), From e77e0a8f8fdcdc53deb8207e0d5b16ca56824a4b Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 7 Oct 2025 02:20:26 +0300 Subject: [PATCH 0713/1073] convert nodes_pika.py to V3 schema (#10216) --- comfy_api_nodes/nodes_pika.py | 779 ++++++++++++++++------------------ 1 file changed, 373 insertions(+), 406 deletions(-) diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py index a8dc43cb3..0a9f04cc2 100644 --- a/comfy_api_nodes/nodes_pika.py +++ b/comfy_api_nodes/nodes_pika.py @@ -5,14 +5,16 @@ Pika API docs: https://pika-827374fb.mintlify.app/api-reference """ from __future__ import annotations -import io +from io import BytesIO import logging from typing import Optional, TypeVar +from enum import Enum import numpy as np import torch -from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeOptions +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.input_impl import VideoFromFile from comfy_api.input_impl.video_types import VideoCodec, VideoContainer, VideoInput from comfy_api_nodes.apinode_utils import ( @@ -20,7 +22,6 @@ from comfy_api_nodes.apinode_utils import ( tensor_to_bytesio, ) from comfy_api_nodes.apis import ( - IngredientsMode, PikaBodyGenerate22C2vGenerate22PikascenesPost, PikaBodyGenerate22I2vGenerate22I2vPost, PikaBodyGenerate22KeyframeGenerate22PikaframesPost, @@ -28,10 +29,7 @@ from comfy_api_nodes.apis import ( PikaBodyGeneratePikadditionsGeneratePikadditionsPost, PikaBodyGeneratePikaffectsGeneratePikaffectsPost, PikaBodyGeneratePikaswapsGeneratePikaswapsPost, - PikaDurationEnum, - Pikaffect, PikaGenerateResponse, - PikaResolutionEnum, PikaVideoResponse, ) from comfy_api_nodes.apis.client import ( @@ -41,7 +39,6 @@ from comfy_api_nodes.apis.client import ( PollingOperation, SynchronousOperation, ) -from comfy_api_nodes.mapper_utils import model_field_to_node_input R = TypeVar("R") @@ -58,6 +55,35 @@ PATH_PIKASCENES = f"/proxy/pika/generate/{PIKA_API_VERSION}/pikascenes" PATH_VIDEO_GET = "/proxy/pika/videos" +class PikaDurationEnum(int, Enum): + integer_5 = 5 + integer_10 = 10 + + +class PikaResolutionEnum(str, Enum): + field_1080p = "1080p" + field_720p = "720p" + + +class Pikaffect(str, Enum): + Cake_ify = "Cake-ify" + Crumble = "Crumble" + Crush = "Crush" + Decapitate = "Decapitate" + Deflate = "Deflate" + Dissolve = "Dissolve" + Explode = "Explode" + Eye_pop = "Eye-pop" + Inflate = "Inflate" + Levitate = "Levitate" + Melt = "Melt" + Peel = "Peel" + Poke = "Poke" + Squish = "Squish" + Ta_da = "Ta-da" + Tear = "Tear" + + class PikaApiError(Exception): """Exception for Pika API errors.""" @@ -74,155 +100,121 @@ def is_valid_initial_response(response: PikaGenerateResponse) -> bool: return hasattr(response, "video_id") and response.video_id is not None -class PikaNodeBase(ComfyNodeABC): - """Base class for Pika nodes.""" +async def poll_for_task_status( + task_id: str, + auth_kwargs: Optional[dict[str, str]] = None, + node_id: Optional[str] = None, +) -> PikaGenerateResponse: + polling_operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"{PATH_VIDEO_GET}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=PikaVideoResponse, + ), + completed_statuses=[ + "finished", + ], + failed_statuses=["failed", "cancelled"], + status_extractor=lambda response: ( + response.status.value if response.status else None + ), + progress_extractor=lambda response: ( + response.progress if hasattr(response, "progress") else None + ), + auth_kwargs=auth_kwargs, + result_url_extractor=lambda response: ( + response.url if hasattr(response, "url") else None + ), + node_id=node_id, + estimated_duration=60 + ) + return await polling_operation.execute() - @classmethod - def get_base_inputs_types( - cls, request_model - ) -> dict[str, tuple[IO, InputTypeOptions]]: - """Get the base required inputs types common to all Pika nodes.""" - return { - "prompt_text": model_field_to_node_input( - IO.STRING, - request_model, - "promptText", - multiline=True, - ), - "negative_prompt": model_field_to_node_input( - IO.STRING, - request_model, - "negativePrompt", - multiline=True, - ), - "seed": model_field_to_node_input( - IO.INT, - request_model, - "seed", - min=0, - max=0xFFFFFFFF, - control_after_generate=True, - ), - "resolution": model_field_to_node_input( - IO.COMBO, - request_model, - "resolution", - enum_type=PikaResolutionEnum, - ), - "duration": model_field_to_node_input( - IO.COMBO, - request_model, - "duration", - enum_type=PikaDurationEnum, - ), - } - CATEGORY = "api node/video/Pika" - API_NODE = True - FUNCTION = "api_call" - RETURN_TYPES = ("VIDEO",) +async def execute_task( + initial_operation: SynchronousOperation[R, PikaGenerateResponse], + auth_kwargs: Optional[dict[str, str]] = None, + node_id: Optional[str] = None, +) -> tuple[VideoFromFile]: + """Executes the initial operation then polls for the task status until it is completed. - async def poll_for_task_status( - self, - task_id: str, - auth_kwargs: Optional[dict[str, str]] = None, - node_id: Optional[str] = None, - ) -> PikaGenerateResponse: - polling_operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"{PATH_VIDEO_GET}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=PikaVideoResponse, - ), - completed_statuses=[ - "finished", - ], - failed_statuses=["failed", "cancelled"], - status_extractor=lambda response: ( - response.status.value if response.status else None - ), - progress_extractor=lambda response: ( - response.progress if hasattr(response, "progress") else None - ), - auth_kwargs=auth_kwargs, - result_url_extractor=lambda response: ( - response.url if hasattr(response, "url") else None - ), - node_id=node_id, - estimated_duration=60 + Args: + initial_operation: The initial operation to execute. + auth_kwargs: The authentication token(s) to use for the API call. + + Returns: + A tuple containing the video file as a VIDEO output. + """ + initial_response = await initial_operation.execute() + if not is_valid_initial_response(initial_response): + error_msg = f"Pika initial request failed. Code: {initial_response.code}, Message: {initial_response.message}, Data: {initial_response.data}" + logging.error(error_msg) + raise PikaApiError(error_msg) + + task_id = initial_response.video_id + final_response = await poll_for_task_status(task_id, auth_kwargs, node_id=node_id) + if not is_valid_video_response(final_response): + error_msg = ( + f"Pika task {task_id} succeeded but no video data found in response." ) - return await polling_operation.execute() + logging.error(error_msg) + raise PikaApiError(error_msg) - async def execute_task( - self, - initial_operation: SynchronousOperation[R, PikaGenerateResponse], - auth_kwargs: Optional[dict[str, str]] = None, - node_id: Optional[str] = None, - ) -> tuple[VideoFromFile]: - """Executes the initial operation then polls for the task status until it is completed. + video_url = str(final_response.url) + logging.info("Pika task %s succeeded. Video URL: %s", task_id, video_url) - Args: - initial_operation: The initial operation to execute. - auth_kwargs: The authentication token(s) to use for the API call. - - Returns: - A tuple containing the video file as a VIDEO output. - """ - initial_response = await initial_operation.execute() - if not is_valid_initial_response(initial_response): - error_msg = f"Pika initial request failed. Code: {initial_response.code}, Message: {initial_response.message}, Data: {initial_response.data}" - logging.error(error_msg) - raise PikaApiError(error_msg) - - task_id = initial_response.video_id - final_response = await self.poll_for_task_status(task_id, auth_kwargs) - if not is_valid_video_response(final_response): - error_msg = ( - f"Pika task {task_id} succeeded but no video data found in response." - ) - logging.error(error_msg) - raise PikaApiError(error_msg) - - video_url = str(final_response.url) - logging.info("Pika task %s succeeded. Video URL: %s", task_id, video_url) - - return (await download_url_to_video_output(video_url),) + return (await download_url_to_video_output(video_url),) -class PikaImageToVideoV2_2(PikaNodeBase): +def get_base_inputs_types() -> list[comfy_io.Input]: + """Get the base required inputs types common to all Pika nodes.""" + return [ + comfy_io.String.Input("prompt_text", multiline=True), + comfy_io.String.Input("negative_prompt", multiline=True), + comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), + comfy_io.Combo.Input( + "resolution", options=[resolution.value for resolution in PikaResolutionEnum], default="1080p" + ), + comfy_io.Combo.Input( + "duration", options=[duration.value for duration in PikaDurationEnum], default=5 + ), + ] + + +class PikaImageToVideoV2_2(comfy_io.ComfyNode): """Pika 2.2 Image to Video Node.""" @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "image": ( - IO.IMAGE, - {"tooltip": "The image to convert to video"}, - ), - **cls.get_base_inputs_types(PikaBodyGenerate22I2vGenerate22I2vPost), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="PikaImageToVideoNode2_2", + display_name="Pika Image to Video", + description="Sends an image and prompt to the Pika API v2.2 to generate a video.", + category="api node/video/Pika", + inputs=[ + comfy_io.Image.Input("image", tooltip="The image to convert to video"), + *get_base_inputs_types(), + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - DESCRIPTION = "Sends an image and prompt to the Pika API v2.2 to generate a video." - - async def api_call( - self, + @classmethod + async def execute( + cls, image: torch.Tensor, prompt_text: str, negative_prompt: str, seed: int, resolution: str, duration: int, - unique_id: str, - **kwargs, - ) -> tuple[VideoFromFile]: + ) -> comfy_io.NodeOutput: # Convert image to BytesIO image_bytes_io = tensor_to_bytesio(image) image_bytes_io.seek(0) @@ -237,7 +229,10 @@ class PikaImageToVideoV2_2(PikaNodeBase): resolution=resolution, duration=duration, ) - + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } initial_operation = SynchronousOperation( endpoint=ApiEndpoint( path=PATH_IMAGE_TO_VIDEO, @@ -248,50 +243,55 @@ class PikaImageToVideoV2_2(PikaNodeBase): request=pika_request_data, files=pika_files, content_type="multipart/form-data", - auth_kwargs=kwargs, + auth_kwargs=auth, ) - - return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaTextToVideoNodeV2_2(PikaNodeBase): +class PikaTextToVideoNodeV2_2(comfy_io.ComfyNode): """Pika Text2Video v2.2 Node.""" @classmethod - def INPUT_TYPES(cls): - return { - "required": { - **cls.get_base_inputs_types(PikaBodyGenerate22T2vGenerate22T2vPost), - "aspect_ratio": model_field_to_node_input( - IO.FLOAT, - PikaBodyGenerate22T2vGenerate22T2vPost, - "aspectRatio", + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="PikaTextToVideoNode2_2", + display_name="Pika Text to Video", + description="Sends a text prompt to the Pika API v2.2 to generate a video.", + category="api node/video/Pika", + inputs=[ + *get_base_inputs_types(), + comfy_io.Float.Input( + "aspect_ratio", step=0.001, min=0.4, max=2.5, default=1.7777777777777777, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + tooltip="Aspect ratio (width / height)", + ) + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - DESCRIPTION = "Sends a text prompt to the Pika API v2.2 to generate a video." - - async def api_call( - self, + @classmethod + async def execute( + cls, prompt_text: str, negative_prompt: str, seed: int, resolution: str, duration: int, aspect_ratio: float, - unique_id: str, - **kwargs, - ) -> tuple[VideoFromFile]: + ) -> comfy_io.NodeOutput: + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } initial_operation = SynchronousOperation( endpoint=ApiEndpoint( path=PATH_TEXT_TO_VIDEO, @@ -307,62 +307,75 @@ class PikaTextToVideoNodeV2_2(PikaNodeBase): duration=duration, aspectRatio=aspect_ratio, ), - auth_kwargs=kwargs, + auth_kwargs=auth, content_type="application/x-www-form-urlencoded", ) - - return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaScenesV2_2(PikaNodeBase): +class PikaScenesV2_2(comfy_io.ComfyNode): """PikaScenes v2.2 Node.""" @classmethod - def INPUT_TYPES(cls): - image_ingredient_input = ( - IO.IMAGE, - {"tooltip": "Image that will be used as ingredient to create a video."}, - ) - return { - "required": { - **cls.get_base_inputs_types( - PikaBodyGenerate22C2vGenerate22PikascenesPost, - ), - "ingredients_mode": model_field_to_node_input( - IO.COMBO, - PikaBodyGenerate22C2vGenerate22PikascenesPost, - "ingredientsMode", - enum_type=IngredientsMode, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="PikaScenesV2_2", + display_name="Pika Scenes (Video Image Composition)", + description="Combine your images to create a video with the objects in them. Upload multiple images as ingredients and generate a high-quality video that incorporates all of them.", + category="api node/video/Pika", + inputs=[ + *get_base_inputs_types(), + comfy_io.Combo.Input( + "ingredients_mode", + options=["creative", "precise"], default="creative", ), - "aspect_ratio": model_field_to_node_input( - IO.FLOAT, - PikaBodyGenerate22C2vGenerate22PikascenesPost, - "aspectRatio", + comfy_io.Float.Input( + "aspect_ratio", step=0.001, min=0.4, max=2.5, default=1.7777777777777777, + tooltip="Aspect ratio (width / height)", ), - }, - "optional": { - "image_ingredient_1": image_ingredient_input, - "image_ingredient_2": image_ingredient_input, - "image_ingredient_3": image_ingredient_input, - "image_ingredient_4": image_ingredient_input, - "image_ingredient_5": image_ingredient_input, - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + comfy_io.Image.Input( + "image_ingredient_1", + optional=True, + tooltip="Image that will be used as ingredient to create a video.", + ), + comfy_io.Image.Input( + "image_ingredient_2", + optional=True, + tooltip="Image that will be used as ingredient to create a video.", + ), + comfy_io.Image.Input( + "image_ingredient_3", + optional=True, + tooltip="Image that will be used as ingredient to create a video.", + ), + comfy_io.Image.Input( + "image_ingredient_4", + optional=True, + tooltip="Image that will be used as ingredient to create a video.", + ), + comfy_io.Image.Input( + "image_ingredient_5", + optional=True, + tooltip="Image that will be used as ingredient to create a video.", + ), + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - DESCRIPTION = "Combine your images to create a video with the objects in them. Upload multiple images as ingredients and generate a high-quality video that incorporates all of them." - - async def api_call( - self, + @classmethod + async def execute( + cls, prompt_text: str, negative_prompt: str, seed: int, @@ -370,14 +383,12 @@ class PikaScenesV2_2(PikaNodeBase): duration: int, ingredients_mode: str, aspect_ratio: float, - unique_id: str, image_ingredient_1: Optional[torch.Tensor] = None, image_ingredient_2: Optional[torch.Tensor] = None, image_ingredient_3: Optional[torch.Tensor] = None, image_ingredient_4: Optional[torch.Tensor] = None, image_ingredient_5: Optional[torch.Tensor] = None, - **kwargs, - ) -> tuple[VideoFromFile]: + ) -> comfy_io.NodeOutput: # Convert all passed images to BytesIO all_image_bytes_io = [] for image in [ @@ -406,7 +417,10 @@ class PikaScenesV2_2(PikaNodeBase): duration=duration, aspectRatio=aspect_ratio, ) - + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } initial_operation = SynchronousOperation( endpoint=ApiEndpoint( path=PATH_PIKASCENES, @@ -417,63 +431,54 @@ class PikaScenesV2_2(PikaNodeBase): request=pika_request_data, files=pika_files, content_type="multipart/form-data", - auth_kwargs=kwargs, + auth_kwargs=auth, ) - return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikAdditionsNode(PikaNodeBase): +class PikAdditionsNode(comfy_io.ComfyNode): """Pika Pikadditions Node. Add an image into a video.""" @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "video": (IO.VIDEO, {"tooltip": "The video to add an image to."}), - "image": (IO.IMAGE, {"tooltip": "The image to add to the video."}), - "prompt_text": model_field_to_node_input( - IO.STRING, - PikaBodyGeneratePikadditionsGeneratePikadditionsPost, - "promptText", - multiline=True, - ), - "negative_prompt": model_field_to_node_input( - IO.STRING, - PikaBodyGeneratePikadditionsGeneratePikadditionsPost, - "negativePrompt", - multiline=True, - ), - "seed": model_field_to_node_input( - IO.INT, - PikaBodyGeneratePikadditionsGeneratePikadditionsPost, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="Pikadditions", + display_name="Pikadditions (Video Object Insertion)", + description="Add any object or image into your video. Upload a video and specify what you'd like to add to create a seamlessly integrated result.", + category="api node/video/Pika", + inputs=[ + comfy_io.Video.Input("video", tooltip="The video to add an image to."), + comfy_io.Image.Input("image", tooltip="The image to add to the video."), + comfy_io.String.Input("prompt_text", multiline=True), + comfy_io.String.Input("negative_prompt", multiline=True), + comfy_io.Int.Input( "seed", min=0, max=0xFFFFFFFF, control_after_generate=True, ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - DESCRIPTION = "Add any object or image into your video. Upload a video and specify what you'd like to add to create a seamlessly integrated result." - - async def api_call( - self, + @classmethod + async def execute( + cls, video: VideoInput, image: torch.Tensor, prompt_text: str, negative_prompt: str, seed: int, - unique_id: str, - **kwargs, - ) -> tuple[VideoFromFile]: + ) -> comfy_io.NodeOutput: # Convert video to BytesIO - video_bytes_io = io.BytesIO() + video_bytes_io = BytesIO() video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264) video_bytes_io.seek(0) @@ -492,7 +497,10 @@ class PikAdditionsNode(PikaNodeBase): negativePrompt=negative_prompt, seed=seed, ) - + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } initial_operation = SynchronousOperation( endpoint=ApiEndpoint( path=PATH_PIKADDITIONS, @@ -503,74 +511,51 @@ class PikAdditionsNode(PikaNodeBase): request=pika_request_data, files=pika_files, content_type="multipart/form-data", - auth_kwargs=kwargs, + auth_kwargs=auth, ) - return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaSwapsNode(PikaNodeBase): +class PikaSwapsNode(comfy_io.ComfyNode): """Pika Pikaswaps Node.""" @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "video": (IO.VIDEO, {"tooltip": "The video to swap an object in."}), - "image": ( - IO.IMAGE, - { - "tooltip": "The image used to replace the masked object in the video." - }, - ), - "mask": ( - IO.MASK, - {"tooltip": "Use the mask to define areas in the video to replace"}, - ), - "prompt_text": model_field_to_node_input( - IO.STRING, - PikaBodyGeneratePikaswapsGeneratePikaswapsPost, - "promptText", - multiline=True, - ), - "negative_prompt": model_field_to_node_input( - IO.STRING, - PikaBodyGeneratePikaswapsGeneratePikaswapsPost, - "negativePrompt", - multiline=True, - ), - "seed": model_field_to_node_input( - IO.INT, - PikaBodyGeneratePikaswapsGeneratePikaswapsPost, - "seed", - min=0, - max=0xFFFFFFFF, - control_after_generate=True, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="Pikaswaps", + display_name="Pika Swaps (Video Object Replacement)", + description="Swap out any object or region of your video with a new image or object. Define areas to replace either with a mask or coordinates.", + category="api node/video/Pika", + inputs=[ + comfy_io.Video.Input("video", tooltip="The video to swap an object in."), + comfy_io.Image.Input("image", tooltip="The image used to replace the masked object in the video."), + comfy_io.Mask.Input("mask", tooltip="Use the mask to define areas in the video to replace"), + comfy_io.String.Input("prompt_text", multiline=True), + comfy_io.String.Input("negative_prompt", multiline=True), + comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - DESCRIPTION = "Swap out any object or region of your video with a new image or object. Define areas to replace either with a mask or coordinates." - RETURN_TYPES = ("VIDEO",) - - async def api_call( - self, + @classmethod + async def execute( + cls, video: VideoInput, image: torch.Tensor, mask: torch.Tensor, prompt_text: str, negative_prompt: str, seed: int, - unique_id: str, - **kwargs, - ) -> tuple[VideoFromFile]: + ) -> comfy_io.NodeOutput: # Convert video to BytesIO - video_bytes_io = io.BytesIO() + video_bytes_io = BytesIO() video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264) video_bytes_io.seek(0) @@ -579,7 +564,7 @@ class PikaSwapsNode(PikaNodeBase): mask = mask.repeat(1, 3, 1, 1) # Convert 3-channel binary mask to BytesIO - mask_bytes_io = io.BytesIO() + mask_bytes_io = BytesIO() mask_bytes_io.write(mask.numpy().astype(np.uint8)) mask_bytes_io.seek(0) @@ -599,7 +584,10 @@ class PikaSwapsNode(PikaNodeBase): negativePrompt=negative_prompt, seed=seed, ) - + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } initial_operation = SynchronousOperation( endpoint=ApiEndpoint( path=PATH_PIKADDITIONS, @@ -610,71 +598,52 @@ class PikaSwapsNode(PikaNodeBase): request=pika_request_data, files=pika_files, content_type="multipart/form-data", - auth_kwargs=kwargs, + auth_kwargs=auth, ) - - return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaffectsNode(PikaNodeBase): +class PikaffectsNode(comfy_io.ComfyNode): """Pika Pikaffects Node.""" @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "image": ( - IO.IMAGE, - {"tooltip": "The reference image to apply the Pikaffect to."}, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="Pikaffects", + display_name="Pikaffects (Video Effects)", + description="Generate a video with a specific Pikaffect. Supported Pikaffects: Cake-ify, Crumble, Crush, Decapitate, Deflate, Dissolve, Explode, Eye-pop, Inflate, Levitate, Melt, Peel, Poke, Squish, Ta-da, Tear", + category="api node/video/Pika", + inputs=[ + comfy_io.Image.Input("image", tooltip="The reference image to apply the Pikaffect to."), + comfy_io.Combo.Input( + "pikaffect", options=[pikaffect.value for pikaffect in Pikaffect], default="Cake-ify" ), - "pikaffect": model_field_to_node_input( - IO.COMBO, - PikaBodyGeneratePikaffectsGeneratePikaffectsPost, - "pikaffect", - enum_type=Pikaffect, - default="Cake-ify", - ), - "prompt_text": model_field_to_node_input( - IO.STRING, - PikaBodyGeneratePikaffectsGeneratePikaffectsPost, - "promptText", - multiline=True, - ), - "negative_prompt": model_field_to_node_input( - IO.STRING, - PikaBodyGeneratePikaffectsGeneratePikaffectsPost, - "negativePrompt", - multiline=True, - ), - "seed": model_field_to_node_input( - IO.INT, - PikaBodyGeneratePikaffectsGeneratePikaffectsPost, - "seed", - min=0, - max=0xFFFFFFFF, - control_after_generate=True, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + comfy_io.String.Input("prompt_text", multiline=True), + comfy_io.String.Input("negative_prompt", multiline=True), + comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - DESCRIPTION = "Generate a video with a specific Pikaffect. Supported Pikaffects: Cake-ify, Crumble, Crush, Decapitate, Deflate, Dissolve, Explode, Eye-pop, Inflate, Levitate, Melt, Peel, Poke, Squish, Ta-da, Tear" - - async def api_call( - self, + @classmethod + async def execute( + cls, image: torch.Tensor, pikaffect: str, prompt_text: str, negative_prompt: str, seed: int, - unique_id: str, - **kwargs, - ) -> tuple[VideoFromFile]: - + ) -> comfy_io.NodeOutput: + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } initial_operation = SynchronousOperation( endpoint=ApiEndpoint( path=PATH_PIKAFFECTS, @@ -690,36 +659,38 @@ class PikaffectsNode(PikaNodeBase): ), files={"image": ("image.png", tensor_to_bytesio(image), "image/png")}, content_type="multipart/form-data", - auth_kwargs=kwargs, + auth_kwargs=auth, ) - - return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaStartEndFrameNode2_2(PikaNodeBase): +class PikaStartEndFrameNode2_2(comfy_io.ComfyNode): """PikaFrames v2.2 Node.""" @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "image_start": (IO.IMAGE, {"tooltip": "The first image to combine."}), - "image_end": (IO.IMAGE, {"tooltip": "The last image to combine."}), - **cls.get_base_inputs_types( - PikaBodyGenerate22KeyframeGenerate22PikaframesPost - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="PikaStartEndFrameNode2_2", + display_name="Pika Start and End Frame to Video", + description="Generate a video by combining your first and last frame. Upload two images to define the start and end points, and let the AI create a smooth transition between them.", + category="api node/video/Pika", + inputs=[ + comfy_io.Image.Input("image_start", tooltip="The first image to combine."), + comfy_io.Image.Input("image_end", tooltip="The last image to combine."), + *get_base_inputs_types(), + ], + outputs=[comfy_io.Video.Output()], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - DESCRIPTION = "Generate a video by combining your first and last frame. Upload two images to define the start and end points, and let the AI create a smooth transition between them." - - async def api_call( - self, + @classmethod + async def execute( + cls, image_start: torch.Tensor, image_end: torch.Tensor, prompt_text: str, @@ -727,15 +698,15 @@ class PikaStartEndFrameNode2_2(PikaNodeBase): seed: int, resolution: str, duration: int, - unique_id: str, - **kwargs, - ) -> tuple[VideoFromFile]: - + ) -> comfy_io.NodeOutput: pika_files = [ ("keyFrames", ("image_start.png", tensor_to_bytesio(image_start), "image/png")), ("keyFrames", ("image_end.png", tensor_to_bytesio(image_end), "image/png")), ] - + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } initial_operation = SynchronousOperation( endpoint=ApiEndpoint( path=PATH_PIKAFRAMES, @@ -752,28 +723,24 @@ class PikaStartEndFrameNode2_2(PikaNodeBase): ), files=pika_files, content_type="multipart/form-data", - auth_kwargs=kwargs, + auth_kwargs=auth, ) - - return await self.execute_task(initial_operation, auth_kwargs=kwargs, node_id=unique_id) + return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -NODE_CLASS_MAPPINGS = { - "PikaImageToVideoNode2_2": PikaImageToVideoV2_2, - "PikaTextToVideoNode2_2": PikaTextToVideoNodeV2_2, - "PikaScenesV2_2": PikaScenesV2_2, - "Pikadditions": PikAdditionsNode, - "Pikaswaps": PikaSwapsNode, - "Pikaffects": PikaffectsNode, - "PikaStartEndFrameNode2_2": PikaStartEndFrameNode2_2, -} +class PikaApiNodesExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + PikaImageToVideoV2_2, + PikaTextToVideoNodeV2_2, + PikaScenesV2_2, + PikAdditionsNode, + PikaSwapsNode, + PikaffectsNode, + PikaStartEndFrameNode2_2, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "PikaImageToVideoNode2_2": "Pika Image to Video", - "PikaTextToVideoNode2_2": "Pika Text to Video", - "PikaScenesV2_2": "Pika Scenes (Video Image Composition)", - "Pikadditions": "Pikadditions (Video Object Insertion)", - "Pikaswaps": "Pika Swaps (Video Object Replacement)", - "Pikaffects": "Pikaffects (Video Effects)", - "PikaStartEndFrameNode2_2": "Pika Start and End Frame to Video", -} + +async def comfy_entrypoint() -> PikaApiNodesExtension: + return PikaApiNodesExtension() From 8c1991042795d06c7ccfd5d1931eb994044c75ef Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 7 Oct 2025 02:26:52 +0300 Subject: [PATCH 0714/1073] convert nodes_kling.py to V3 schema (#10236) --- comfy_api_nodes/nodes_kling.py | 2146 +++++++++++++++----------------- 1 file changed, 1032 insertions(+), 1114 deletions(-) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 44fccc0c7..457b43451 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -10,6 +10,8 @@ from collections.abc import Callable import math import logging +from typing_extensions import override + import torch from comfy_api_nodes.apis import ( @@ -63,8 +65,8 @@ from comfy_api_nodes.apinode_utils import ( upload_video_to_comfyapi, upload_audio_to_comfyapi, download_url_to_image_tensor, + validate_string, ) -from comfy_api_nodes.mapper_utils import model_field_to_node_input from comfy_api_nodes.util.validation_utils import ( validate_image_dimensions, validate_image_aspect_ratio, @@ -73,8 +75,7 @@ from comfy_api_nodes.util.validation_utils import ( ) from comfy_api.input.basic_types import AudioInput from comfy_api.input.video_types import VideoInput -from comfy_api.input_impl import VideoFromFile -from comfy.comfy_types.node_typing import IO, InputTypeOptions, ComfyNodeABC +from comfy_api.latest import ComfyExtension, io as comfy_io KLING_API_VERSION = "v1" PATH_TEXT_TO_VIDEO = f"/proxy/kling/{KLING_API_VERSION}/videos/text2video" @@ -103,10 +104,113 @@ AVERAGE_DURATION_VIDEO_EXTEND = 320 R = TypeVar("R") -class KlingApiError(Exception): - """Base exception for Kling API errors.""" +MODE_TEXT2VIDEO = { + "standard mode / 5s duration / kling-v1": ("std", "5", "kling-v1"), + "standard mode / 10s duration / kling-v1": ("std", "10", "kling-v1"), + "pro mode / 5s duration / kling-v1": ("pro", "5", "kling-v1"), + "pro mode / 10s duration / kling-v1": ("pro", "10", "kling-v1"), + "standard mode / 5s duration / kling-v1-6": ("std", "5", "kling-v1-6"), + "standard mode / 10s duration / kling-v1-6": ("std", "10", "kling-v1-6"), + "pro mode / 5s duration / kling-v2-master": ("pro", "5", "kling-v2-master"), + "pro mode / 10s duration / kling-v2-master": ("pro", "10", "kling-v2-master"), + "standard mode / 5s duration / kling-v2-master": ("std", "5", "kling-v2-master"), + "standard mode / 10s duration / kling-v2-master": ("std", "10", "kling-v2-master"), + "pro mode / 5s duration / kling-v2-1-master": ("pro", "5", "kling-v2-1-master"), + "pro mode / 10s duration / kling-v2-1-master": ("pro", "10", "kling-v2-1-master"), + "pro mode / 5s duration / kling-v2-5-turbo": ("pro", "5", "kling-v2-5-turbo"), + "pro mode / 10s duration / kling-v2-5-turbo": ("pro", "10", "kling-v2-5-turbo"), +} +""" +Mapping of mode strings to their corresponding (mode, duration, model_name) tuples. +Only includes config combos that support the `image_tail` request field. - pass +See: [Kling API Docs Capability Map](https://app.klingai.com/global/dev/document-api/apiReference/model/skillsMap) +""" + + +MODE_START_END_FRAME = { + "standard mode / 5s duration / kling-v1": ("std", "5", "kling-v1"), + "pro mode / 5s duration / kling-v1": ("pro", "5", "kling-v1"), + "pro mode / 5s duration / kling-v1-5": ("pro", "5", "kling-v1-5"), + "pro mode / 10s duration / kling-v1-5": ("pro", "10", "kling-v1-5"), + "pro mode / 5s duration / kling-v1-6": ("pro", "5", "kling-v1-6"), + "pro mode / 10s duration / kling-v1-6": ("pro", "10", "kling-v1-6"), + "pro mode / 5s duration / kling-v2-1": ("pro", "5", "kling-v2-1"), + "pro mode / 10s duration / kling-v2-1": ("pro", "10", "kling-v2-1"), +} +""" +Returns a mapping of mode strings to their corresponding (mode, duration, model_name) tuples. +Only includes config combos that support the `image_tail` request field. + +See: [Kling API Docs Capability Map](https://app.klingai.com/global/dev/document-api/apiReference/model/skillsMap) +""" + + +VOICES_CONFIG = { + # English voices + "Melody": ("girlfriend_4_speech02", "en"), + "Sunny": ("genshin_vindi2", "en"), + "Sage": ("zhinen_xuesheng", "en"), + "Ace": ("AOT", "en"), + "Blossom": ("ai_shatang", "en"), + "Peppy": ("genshin_klee2", "en"), + "Dove": ("genshin_kirara", "en"), + "Shine": ("ai_kaiya", "en"), + "Anchor": ("oversea_male1", "en"), + "Lyric": ("ai_chenjiahao_712", "en"), + "Tender": ("chat1_female_new-3", "en"), + "Siren": ("chat_0407_5-1", "en"), + "Zippy": ("cartoon-boy-07", "en"), + "Bud": ("uk_boy1", "en"), + "Sprite": ("cartoon-girl-01", "en"), + "Candy": ("PeppaPig_platform", "en"), + "Beacon": ("ai_huangzhong_712", "en"), + "Rock": ("ai_huangyaoshi_712", "en"), + "Titan": ("ai_laoguowang_712", "en"), + "Grace": ("chengshu_jiejie", "en"), + "Helen": ("you_pingjing", "en"), + "Lore": ("calm_story1", "en"), + "Crag": ("uk_man2", "en"), + "Prattle": ("laopopo_speech02", "en"), + "Hearth": ("heainainai_speech02", "en"), + "The Reader": ("reader_en_m-v1", "en"), + "Commercial Lady": ("commercial_lady_en_f-v1", "en"), + # Chinese voices + "阳光少年": ("genshin_vindi2", "zh"), + "懂事小弟": ("zhinen_xuesheng", "zh"), + "运动少年": ("tiyuxi_xuedi", "zh"), + "青春少女": ("ai_shatang", "zh"), + "温柔小妹": ("genshin_klee2", "zh"), + "元气少女": ("genshin_kirara", "zh"), + "阳光男生": ("ai_kaiya", "zh"), + "幽默小哥": ("tiexin_nanyou", "zh"), + "文艺小哥": ("ai_chenjiahao_712", "zh"), + "甜美邻家": ("girlfriend_1_speech02", "zh"), + "温柔姐姐": ("chat1_female_new-3", "zh"), + "职场女青": ("girlfriend_2_speech02", "zh"), + "活泼男童": ("cartoon-boy-07", "zh"), + "俏皮女童": ("cartoon-girl-01", "zh"), + "稳重老爸": ("ai_huangyaoshi_712", "zh"), + "温柔妈妈": ("you_pingjing", "zh"), + "严肃上司": ("ai_laoguowang_712", "zh"), + "优雅贵妇": ("chengshu_jiejie", "zh"), + "慈祥爷爷": ("zhuxi_speech02", "zh"), + "唠叨爷爷": ("uk_oldman3", "zh"), + "唠叨奶奶": ("laopopo_speech02", "zh"), + "和蔼奶奶": ("heainainai_speech02", "zh"), + "东北老铁": ("dongbeilaotie_speech02", "zh"), + "重庆小伙": ("chongqingxiaohuo_speech02", "zh"), + "四川妹子": ("chuanmeizi_speech02", "zh"), + "潮汕大叔": ("chaoshandashu_speech02", "zh"), + "台湾男生": ("ai_taiwan_man2_speech02", "zh"), + "西安掌柜": ("xianzhanggui_speech02", "zh"), + "天津姐姐": ("tianjinjiejie_speech02", "zh"), + "新闻播报男": ("diyinnansang_DB_CN_M_04-v2", "zh"), + "译制片男": ("yizhipiannan-v1", "zh"), + "撒娇女友": ("tianmeixuemei-v1", "zh"), + "刀片烟嗓": ("daopianyansang-v1", "zh"), + "乖巧正太": ("mengwa-v1", "zh"), +} async def poll_until_finished( @@ -142,11 +246,6 @@ def is_valid_camera_control_configs(configs: list[float]) -> bool: return any(not math.isclose(value, 0.0) for value in configs) -def is_valid_prompt(prompt: str) -> bool: - """Verifies that the prompt is not empty.""" - return bool(prompt) - - def is_valid_task_creation_response(response: KlingText2VideoResponse) -> bool: """Verifies that the initial response contains a task ID.""" return bool(response.data.task_id) @@ -190,7 +289,7 @@ def validate_task_creation_response(response) -> None: if not is_valid_task_creation_response(response): error_msg = f"Kling initial request failed. Code: {response.code}, Message: {response.message}, Data: {response.data}" logging.error(error_msg) - raise KlingApiError(error_msg) + raise Exception(error_msg) def validate_video_result_response(response) -> None: @@ -198,7 +297,7 @@ def validate_video_result_response(response) -> None: if not is_valid_video_response(response): error_msg = f"Kling task {response.data.task_id} succeeded but no video data found in response." logging.error(f"Error: {error_msg}.\nResponse: {response}") - raise KlingApiError(error_msg) + raise Exception(error_msg) def validate_image_result_response(response) -> None: @@ -206,7 +305,7 @@ def validate_image_result_response(response) -> None: if not is_valid_image_response(response): error_msg = f"Kling task {response.data.task_id} succeeded but no image data found in response." logging.error(f"Error: {error_msg}.\nResponse: {response}") - raise KlingApiError(error_msg) + raise Exception(error_msg) def validate_input_image(image: torch.Tensor) -> None: @@ -221,21 +320,6 @@ def validate_input_image(image: torch.Tensor) -> None: validate_image_aspect_ratio(image, min_aspect_ratio=1 / 2.5, max_aspect_ratio=2.5) -def get_camera_control_input_config( - tooltip: str, default: float = 0.0 -) -> tuple[IO, InputTypeOptions]: - """Returns common InputTypeOptions for Kling camera control configurations.""" - input_config = { - "default": default, - "min": -10.0, - "max": 10.0, - "step": 0.25, - "display": "slider", - "tooltip": tooltip, - } - return IO.FLOAT, input_config - - def get_video_from_response(response) -> KlingVideoResult: """Returns the first video object from the Kling video generation task result. Will raise an error if the response is not valid. @@ -278,17 +362,6 @@ def get_images_urls_from_response(response) -> Optional[str]: return None -async def video_result_to_node_output( - video: KlingVideoResult, -) -> tuple[VideoFromFile, str, str]: - """Converts a KlingVideoResult to a tuple of (VideoFromFile, str, str) to be used as a ComfyUI node output.""" - return ( - await download_url_to_video_output(str(video.url)), - str(video.id), - str(video.duration), - ) - - async def image_result_to_node_output( images: list[KlingImageResult], ) -> torch.Tensor: @@ -302,57 +375,339 @@ async def image_result_to_node_output( return torch.cat([await download_url_to_image_tensor(str(image.url)) for image in images]) -class KlingNodeBase(ComfyNodeABC): - """Base class for Kling nodes.""" +async def execute_text2video( + auth_kwargs: dict[str, str], + node_id: str, + prompt: str, + negative_prompt: str, + cfg_scale: float, + model_name: str, + model_mode: str, + duration: str, + aspect_ratio: str, + camera_control: Optional[KlingCameraControl] = None, +) -> comfy_io.NodeOutput: + validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_TEXT_TO_VIDEO, + method=HttpMethod.POST, + request_model=KlingText2VideoRequest, + response_model=KlingText2VideoResponse, + ), + request=KlingText2VideoRequest( + prompt=prompt if prompt else None, + negative_prompt=negative_prompt if negative_prompt else None, + duration=KlingVideoGenDuration(duration), + mode=KlingVideoGenMode(model_mode), + model_name=KlingVideoGenModelName(model_name), + cfg_scale=cfg_scale, + aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio), + camera_control=camera_control, + ), + auth_kwargs=auth_kwargs, + ) - FUNCTION = "api_call" - CATEGORY = "api node/video/Kling" - API_NODE = True + task_creation_response = await initial_operation.execute() + validate_task_creation_response(task_creation_response) + + task_id = task_creation_response.data.task_id + final_response = await poll_until_finished( + auth_kwargs, + ApiEndpoint( + path=f"{PATH_TEXT_TO_VIDEO}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=KlingText2VideoResponse, + ), + result_url_extractor=get_video_url_from_response, + estimated_duration=AVERAGE_DURATION_T2V, + node_id=node_id, + ) + validate_video_result_response(final_response) + + video = get_video_from_response(final_response) + return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) -class KlingCameraControls(KlingNodeBase): +async def execute_image2video( + auth_kwargs: dict[str, str], + node_id: str, + start_frame: torch.Tensor, + prompt: str, + negative_prompt: str, + model_name: str, + cfg_scale: float, + model_mode: str, + aspect_ratio: str, + duration: str, + camera_control: Optional[KlingCameraControl] = None, + end_frame: Optional[torch.Tensor] = None, +) -> comfy_io.NodeOutput: + validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V) + validate_input_image(start_frame) + + if camera_control is not None: + # Camera control type for image 2 video is always `simple` + camera_control.type = KlingCameraControlType.simple + + if model_mode == "std" and model_name == KlingVideoGenModelName.kling_v2_5_turbo.value: + model_mode = "pro" # October 5: currently "std" mode is not supported for this model + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_IMAGE_TO_VIDEO, + method=HttpMethod.POST, + request_model=KlingImage2VideoRequest, + response_model=KlingImage2VideoResponse, + ), + request=KlingImage2VideoRequest( + model_name=KlingVideoGenModelName(model_name), + image=tensor_to_base64_string(start_frame), + image_tail=( + tensor_to_base64_string(end_frame) + if end_frame is not None + else None + ), + prompt=prompt, + negative_prompt=negative_prompt if negative_prompt else None, + cfg_scale=cfg_scale, + mode=KlingVideoGenMode(model_mode), + duration=KlingVideoGenDuration(duration), + camera_control=camera_control, + ), + auth_kwargs=auth_kwargs, + ) + + task_creation_response = await initial_operation.execute() + validate_task_creation_response(task_creation_response) + task_id = task_creation_response.data.task_id + + final_response = await poll_until_finished( + auth_kwargs, + ApiEndpoint( + path=f"{PATH_IMAGE_TO_VIDEO}/{task_id}", + method=HttpMethod.GET, + request_model=KlingImage2VideoRequest, + response_model=KlingImage2VideoResponse, + ), + result_url_extractor=get_video_url_from_response, + estimated_duration=AVERAGE_DURATION_I2V, + node_id=node_id, + ) + validate_video_result_response(final_response) + + video = get_video_from_response(final_response) + return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) + + +async def execute_video_effect( + auth_kwargs: dict[str, str], + node_id: str, + dual_character: bool, + effect_scene: KlingDualCharacterEffectsScene | KlingSingleImageEffectsScene, + model_name: str, + duration: KlingVideoGenDuration, + image_1: torch.Tensor, + image_2: Optional[torch.Tensor] = None, + model_mode: Optional[KlingVideoGenMode] = None, +) -> comfy_io.NodeOutput: + if dual_character: + request_input_field = KlingDualCharacterEffectInput( + model_name=model_name, + mode=model_mode, + images=[ + tensor_to_base64_string(image_1), + tensor_to_base64_string(image_2), + ], + duration=duration, + ) + else: + request_input_field = KlingSingleImageEffectInput( + model_name=model_name, + image=tensor_to_base64_string(image_1), + duration=duration, + ) + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_VIDEO_EFFECTS, + method=HttpMethod.POST, + request_model=KlingVideoEffectsRequest, + response_model=KlingVideoEffectsResponse, + ), + request=KlingVideoEffectsRequest( + effect_scene=effect_scene, + input=request_input_field, + ), + auth_kwargs=auth_kwargs, + ) + + task_creation_response = await initial_operation.execute() + validate_task_creation_response(task_creation_response) + task_id = task_creation_response.data.task_id + + final_response = await poll_until_finished( + auth_kwargs, + ApiEndpoint( + path=f"{PATH_VIDEO_EFFECTS}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=KlingVideoEffectsResponse, + ), + result_url_extractor=get_video_url_from_response, + estimated_duration=AVERAGE_DURATION_VIDEO_EFFECTS, + node_id=node_id, + ) + validate_video_result_response(final_response) + + video = get_video_from_response(final_response) + return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) + + +async def execute_lipsync( + auth_kwargs: dict[str, str], + node_id: str, + video: VideoInput, + audio: Optional[AudioInput] = None, + voice_language: Optional[str] = None, + model_mode: Optional[str] = None, + text: Optional[str] = None, + voice_speed: Optional[float] = None, + voice_id: Optional[str] = None, +) -> comfy_io.NodeOutput: + if text: + validate_string(text, field_name="Text", max_length=MAX_PROMPT_LENGTH_LIP_SYNC) + validate_video_dimensions(video, 720, 1920) + validate_video_duration(video, 2, 10) + + # Upload video to Comfy API and get download URL + video_url = await upload_video_to_comfyapi(video, auth_kwargs=auth_kwargs) + logging.info("Uploaded video to Comfy API. URL: %s", video_url) + + # Upload the audio file to Comfy API and get download URL + if audio: + audio_url = await upload_audio_to_comfyapi(audio, auth_kwargs=auth_kwargs) + logging.info("Uploaded audio to Comfy API. URL: %s", audio_url) + else: + audio_url = None + + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path=PATH_LIP_SYNC, + method=HttpMethod.POST, + request_model=KlingLipSyncRequest, + response_model=KlingLipSyncResponse, + ), + request=KlingLipSyncRequest( + input=KlingLipSyncInputObject( + video_url=video_url, + mode=model_mode, + text=text, + voice_language=voice_language, + voice_speed=voice_speed, + audio_type="url", + audio_url=audio_url, + voice_id=voice_id, + ), + ), + auth_kwargs=auth_kwargs, + ) + + task_creation_response = await initial_operation.execute() + validate_task_creation_response(task_creation_response) + task_id = task_creation_response.data.task_id + + final_response = await poll_until_finished( + auth_kwargs, + ApiEndpoint( + path=f"{PATH_LIP_SYNC}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=KlingLipSyncResponse, + ), + result_url_extractor=get_video_url_from_response, + estimated_duration=AVERAGE_DURATION_LIP_SYNC, + node_id=node_id, + ) + validate_video_result_response(final_response) + + video = get_video_from_response(final_response) + return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) + + +class KlingCameraControls(comfy_io.ComfyNode): """Kling Camera Controls Node""" @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "camera_control_type": model_field_to_node_input( - IO.COMBO, - KlingCameraControl, - "type", - enum_type=KlingCameraControlType, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="KlingCameraControls", + display_name="Kling Camera Controls", + category="api node/video/Kling", + description="Allows specifying configuration options for Kling Camera Controls and motion control effects.", + inputs=[ + comfy_io.Combo.Input("camera_control_type", options=[i.value for i in KlingCameraControlType]), + comfy_io.Float.Input( + "horizontal_movement", + default=0.0, + min=-10.0, + max=10.0, + step=0.25, + display_mode=comfy_io.NumberDisplay.slider, + tooltip="Controls camera's movement along horizontal axis (x-axis). Negative indicates left, positive indicates right", ), - "horizontal_movement": get_camera_control_input_config( - "Controls camera's movement along horizontal axis (x-axis). Negative indicates left, positive indicates right" + comfy_io.Float.Input( + "vertical_movement", + default=0.0, + min=-10.0, + max=10.0, + step=0.25, + display_mode=comfy_io.NumberDisplay.slider, + tooltip="Controls camera's movement along vertical axis (y-axis). Negative indicates downward, positive indicates upward.", ), - "vertical_movement": get_camera_control_input_config( - "Controls camera's movement along vertical axis (y-axis). Negative indicates downward, positive indicates upward." - ), - "pan": get_camera_control_input_config( - "Controls camera's rotation in vertical plane (x-axis). Negative indicates downward rotation, positive indicates upward rotation.", + comfy_io.Float.Input( + "pan", default=0.5, + min=-10.0, + max=10.0, + step=0.25, + display_mode=comfy_io.NumberDisplay.slider, + tooltip="Controls camera's rotation in vertical plane (x-axis). Negative indicates downward rotation, positive indicates upward rotation.", ), - "tilt": get_camera_control_input_config( - "Controls camera's rotation in horizontal plane (y-axis). Negative indicates left rotation, positive indicates right rotation.", + comfy_io.Float.Input( + "tilt", + default=0.0, + min=-10.0, + max=10.0, + step=0.25, + display_mode=comfy_io.NumberDisplay.slider, + tooltip="Controls camera's rotation in horizontal plane (y-axis). Negative indicates left rotation, positive indicates right rotation.", ), - "roll": get_camera_control_input_config( - "Controls camera's rolling amount (z-axis). Negative indicates counterclockwise, positive indicates clockwise.", + comfy_io.Float.Input( + "roll", + default=0.0, + min=-10.0, + max=10.0, + step=0.25, + display_mode=comfy_io.NumberDisplay.slider, + tooltip="Controls camera's rolling amount (z-axis). Negative indicates counterclockwise, positive indicates clockwise.", ), - "zoom": get_camera_control_input_config( - "Controls change in camera's focal length. Negative indicates narrower field of view, positive indicates wider field of view.", + comfy_io.Float.Input( + "zoom", + default=0.0, + min=-10.0, + max=10.0, + step=0.25, + display_mode=comfy_io.NumberDisplay.slider, + tooltip="Controls change in camera's focal length. Negative indicates narrower field of view, positive indicates wider field of view.", ), - } - } - - DESCRIPTION = "Allows specifying configuration options for Kling Camera Controls and motion control effects." - RETURN_TYPES = ("CAMERA_CONTROL",) - RETURN_NAMES = ("camera_control",) - FUNCTION = "main" - API_NODE = False # This is just a helper node, it doesn't make an API call + ], + outputs=[comfy_io.Custom("CAMERA_CONTROL").Output(display_name="camera_control")], + ) @classmethod - def VALIDATE_INPUTS( + def validate_inputs( cls, horizontal_movement: float, vertical_movement: float, @@ -374,8 +729,9 @@ class KlingCameraControls(KlingNodeBase): return "Invalid camera control configs: at least one of the values must be non-zero" return True - def main( - self, + @classmethod + def execute( + cls, camera_control_type: str, horizontal_movement: float, vertical_movement: float, @@ -383,8 +739,8 @@ class KlingCameraControls(KlingNodeBase): tilt: float, roll: float, zoom: float, - ) -> tuple[KlingCameraControl]: - return ( + ) -> comfy_io.NodeOutput: + return comfy_io.NodeOutput( KlingCameraControl( type=KlingCameraControlType(camera_control_type), config=KlingCameraConfig( @@ -395,303 +751,186 @@ class KlingCameraControls(KlingNodeBase): tilt=tilt, zoom=zoom, ), - ), + ) ) -class KlingTextToVideoNode(KlingNodeBase): +class KlingTextToVideoNode(comfy_io.ComfyNode): """Kling Text to Video Node""" - @staticmethod - def get_mode_string_mapping() -> dict[str, tuple[str, str, str]]: - """ - Returns a mapping of mode strings to their corresponding (mode, duration, model_name) tuples. - Only includes config combos that support the `image_tail` request field. - - See: [Kling API Docs Capability Map](https://app.klingai.com/global/dev/document-api/apiReference/model/skillsMap) - """ - return { - "standard mode / 5s duration / kling-v1": ("std", "5", "kling-v1"), - "standard mode / 10s duration / kling-v1": ("std", "10", "kling-v1"), - "pro mode / 5s duration / kling-v1": ("pro", "5", "kling-v1"), - "pro mode / 10s duration / kling-v1": ("pro", "10", "kling-v1"), - "standard mode / 5s duration / kling-v1-6": ("std", "5", "kling-v1-6"), - "standard mode / 10s duration / kling-v1-6": ("std", "10", "kling-v1-6"), - "pro mode / 5s duration / kling-v2-master": ("pro", "5", "kling-v2-master"), - "pro mode / 10s duration / kling-v2-master": ("pro", "10", "kling-v2-master"), - "standard mode / 5s duration / kling-v2-master": ("std", "5", "kling-v2-master"), - "standard mode / 10s duration / kling-v2-master": ("std", "10", "kling-v2-master"), - "pro mode / 5s duration / kling-v2-1-master": ("pro", "5", "kling-v2-1-master"), - "pro mode / 10s duration / kling-v2-1-master": ("pro", "10", "kling-v2-1-master"), - "pro mode / 5s duration / kling-v2-5-turbo": ("pro", "5", "kling-v2-5-turbo"), - "pro mode / 10s duration / kling-v2-5-turbo": ("pro", "10", "kling-v2-5-turbo"), - } - @classmethod - def INPUT_TYPES(s): - modes = list(KlingTextToVideoNode.get_mode_string_mapping().keys()) - return { - "required": { - "prompt": model_field_to_node_input( - IO.STRING, KlingText2VideoRequest, "prompt", multiline=True - ), - "negative_prompt": model_field_to_node_input( - IO.STRING, KlingText2VideoRequest, "negative_prompt", multiline=True - ), - "cfg_scale": model_field_to_node_input( - IO.FLOAT, - KlingText2VideoRequest, - "cfg_scale", - default=1.0, - min=0.0, - max=1.0, - ), - "aspect_ratio": model_field_to_node_input( - IO.COMBO, - KlingText2VideoRequest, + def define_schema(cls) -> comfy_io.Schema: + modes = list(MODE_TEXT2VIDEO.keys()) + return comfy_io.Schema( + node_id="KlingTextToVideoNode", + display_name="Kling Text to Video", + category="api node/video/Kling", + description="Kling Text to Video Node", + inputs=[ + comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), + comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), + comfy_io.Float.Input("cfg_scale", default=1.0, min=0.0, max=1.0), + comfy_io.Combo.Input( "aspect_ratio", - enum_type=KlingVideoGenAspectRatio, + options=[i.value for i in KlingVideoGenAspectRatio], + default="16:9", ), - "mode": ( - modes, - { - "default": modes[4], - "tooltip": "The configuration to use for the video generation following the format: mode / duration / model_name.", - }, + comfy_io.Combo.Input( + "mode", + options=modes, + default=modes[4], + tooltip="The configuration to use for the video generation following the format: mode / duration / model_name.", ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = ("VIDEO", "STRING", "STRING") - RETURN_NAMES = ("VIDEO", "video_id", "duration") - DESCRIPTION = "Kling Text to Video Node" - - async def get_response( - self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None - ) -> KlingText2VideoResponse: - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_TEXT_TO_VIDEO}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=KlingText2VideoResponse, - ), - result_url_extractor=get_video_url_from_response, - estimated_duration=AVERAGE_DURATION_T2V, - node_id=node_id, + ], + outputs=[ + comfy_io.Video.Output(), + comfy_io.String.Output(display_name="video_id"), + comfy_io.String.Output(display_name="duration"), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, ) - async def api_call( - self, + @classmethod + async def execute( + cls, prompt: str, negative_prompt: str, cfg_scale: float, mode: str, aspect_ratio: str, - camera_control: Optional[KlingCameraControl] = None, - model_name: Optional[str] = None, - duration: Optional[str] = None, - unique_id: Optional[str] = None, - **kwargs, - ) -> tuple[VideoFromFile, str, str]: - validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) - if model_name is None: - mode, duration, model_name = self.get_mode_string_mapping()[mode] - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_TEXT_TO_VIDEO, - method=HttpMethod.POST, - request_model=KlingText2VideoRequest, - response_model=KlingText2VideoResponse, - ), - request=KlingText2VideoRequest( - prompt=prompt if prompt else None, - negative_prompt=negative_prompt if negative_prompt else None, - duration=KlingVideoGenDuration(duration), - mode=KlingVideoGenMode(mode), - model_name=KlingVideoGenModelName(model_name), - cfg_scale=cfg_scale, - aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio), - camera_control=camera_control, - ), - auth_kwargs=kwargs, + ) -> comfy_io.NodeOutput: + model_mode, duration, model_name = MODE_TEXT2VIDEO[mode] + return await execute_text2video( + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + node_id=cls.hidden.unique_id, + prompt=prompt, + negative_prompt=negative_prompt, + cfg_scale=cfg_scale, + model_mode=model_mode, + aspect_ratio=aspect_ratio, + model_name=model_name, + duration=duration, ) - task_creation_response = await initial_operation.execute() - validate_task_creation_response(task_creation_response) - task_id = task_creation_response.data.task_id - final_response = await self.get_response( - task_id, auth_kwargs=kwargs, node_id=unique_id - ) - validate_video_result_response(final_response) - - video = get_video_from_response(final_response) - return await video_result_to_node_output(video) - - -class KlingCameraControlT2VNode(KlingTextToVideoNode): +class KlingCameraControlT2VNode(comfy_io.ComfyNode): """ Kling Text to Video Camera Control Node. This node is a text to video node, but it supports controlling the camera. Duration, mode, and model_name request fields are hard-coded because camera control is only supported in pro mode with the kling-v1-5 model at 5s duration as of 2025-05-02. """ @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": model_field_to_node_input( - IO.STRING, KlingText2VideoRequest, "prompt", multiline=True - ), - "negative_prompt": model_field_to_node_input( - IO.STRING, - KlingText2VideoRequest, - "negative_prompt", - multiline=True, - ), - "cfg_scale": model_field_to_node_input( - IO.FLOAT, - KlingText2VideoRequest, - "cfg_scale", - default=0.75, - min=0.0, - max=1.0, - ), - "aspect_ratio": model_field_to_node_input( - IO.COMBO, - KlingText2VideoRequest, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="KlingCameraControlT2VNode", + display_name="Kling Text to Video (Camera Control)", + category="api node/video/Kling", + description="Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.", + inputs=[ + comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), + comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), + comfy_io.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0), + comfy_io.Combo.Input( "aspect_ratio", - enum_type=KlingVideoGenAspectRatio, + options=[i.value for i in KlingVideoGenAspectRatio], + default="16:9", ), - "camera_control": ( - "CAMERA_CONTROL", - { - "tooltip": "Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation.", - }, + comfy_io.Custom("CAMERA_CONTROL").Input( + "camera_control", + tooltip="Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation.", ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[ + comfy_io.Video.Output(), + comfy_io.String.Output(display_name="video_id"), + comfy_io.String.Output(display_name="duration"), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - DESCRIPTION = "Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text." - - async def api_call( - self, + @classmethod + async def execute( + cls, prompt: str, negative_prompt: str, cfg_scale: float, aspect_ratio: str, camera_control: Optional[KlingCameraControl] = None, - unique_id: Optional[str] = None, - **kwargs, - ): - return await super().api_call( + ) -> comfy_io.NodeOutput: + return await execute_text2video( + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + node_id=cls.hidden.unique_id, model_name=KlingVideoGenModelName.kling_v1, cfg_scale=cfg_scale, - mode=KlingVideoGenMode.std, + model_mode=KlingVideoGenMode.std, aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio), duration=KlingVideoGenDuration.field_5, prompt=prompt, negative_prompt=negative_prompt, camera_control=camera_control, - **kwargs, ) -class KlingImage2VideoNode(KlingNodeBase): +class KlingImage2VideoNode(comfy_io.ComfyNode): """Kling Image to Video Node""" @classmethod - def INPUT_TYPES(s): - return { - "required": { - "start_frame": model_field_to_node_input( - IO.IMAGE, - KlingImage2VideoRequest, - "image", - tooltip="The reference image used to generate the video.", - ), - "prompt": model_field_to_node_input( - IO.STRING, KlingImage2VideoRequest, "prompt", multiline=True - ), - "negative_prompt": model_field_to_node_input( - IO.STRING, - KlingImage2VideoRequest, - "negative_prompt", - multiline=True, - ), - "model_name": model_field_to_node_input( - IO.COMBO, - KlingImage2VideoRequest, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="KlingImage2VideoNode", + display_name="Kling Image to Video", + category="api node/video/Kling", + description="Kling Image to Video Node", + inputs=[ + comfy_io.Image.Input("start_frame", tooltip="The reference image used to generate the video."), + comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), + comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), + comfy_io.Combo.Input( "model_name", - enum_type=KlingVideoGenModelName, + options=[i.value for i in KlingVideoGenModelName], + default="kling-v2-master", ), - "cfg_scale": model_field_to_node_input( - IO.FLOAT, - KlingImage2VideoRequest, - "cfg_scale", - default=0.8, - min=0.0, - max=1.0, - ), - "mode": model_field_to_node_input( - IO.COMBO, - KlingImage2VideoRequest, - "mode", - enum_type=KlingVideoGenMode, - ), - "aspect_ratio": model_field_to_node_input( - IO.COMBO, - KlingImage2VideoRequest, + comfy_io.Float.Input("cfg_scale", default=0.8, min=0.0, max=1.0), + comfy_io.Combo.Input("mode", options=[i.value for i in KlingVideoGenMode], default="std"), + comfy_io.Combo.Input( "aspect_ratio", - enum_type=KlingVideoGenAspectRatio, + options=[i.value for i in KlingVideoGenAspectRatio], + default="16:9", ), - "duration": model_field_to_node_input( - IO.COMBO, - KlingImage2VideoRequest, - "duration", - enum_type=KlingVideoGenDuration, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = ("VIDEO", "STRING", "STRING") - RETURN_NAMES = ("VIDEO", "video_id", "duration") - DESCRIPTION = "Kling Image to Video Node" - - async def get_response( - self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None - ) -> KlingImage2VideoResponse: - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_IMAGE_TO_VIDEO}/{task_id}", - method=HttpMethod.GET, - request_model=KlingImage2VideoRequest, - response_model=KlingImage2VideoResponse, - ), - result_url_extractor=get_video_url_from_response, - estimated_duration=AVERAGE_DURATION_I2V, - node_id=node_id, + comfy_io.Combo.Input("duration", options=[i.value for i in KlingVideoGenDuration], default="5"), + ], + outputs=[ + comfy_io.Video.Output(), + comfy_io.String.Output(display_name="video_id"), + comfy_io.String.Output(display_name="duration"), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, ) - async def api_call( - self, + @classmethod + async def execute( + cls, start_frame: torch.Tensor, prompt: str, negative_prompt: str, @@ -702,212 +941,151 @@ class KlingImage2VideoNode(KlingNodeBase): duration: str, camera_control: Optional[KlingCameraControl] = None, end_frame: Optional[torch.Tensor] = None, - unique_id: Optional[str] = None, - **kwargs, - ) -> tuple[VideoFromFile]: - validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V) - validate_input_image(start_frame) - - if camera_control is not None: - # Camera control type for image 2 video is always `simple` - camera_control.type = KlingCameraControlType.simple - - if mode == "std" and model_name == KlingVideoGenModelName.kling_v2_5_turbo.value: - mode = "pro" # October 5: currently "std" mode is not supported for this model - - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_IMAGE_TO_VIDEO, - method=HttpMethod.POST, - request_model=KlingImage2VideoRequest, - response_model=KlingImage2VideoResponse, - ), - request=KlingImage2VideoRequest( - model_name=KlingVideoGenModelName(model_name), - image=tensor_to_base64_string(start_frame), - image_tail=( - tensor_to_base64_string(end_frame) - if end_frame is not None - else None - ), - prompt=prompt, - negative_prompt=negative_prompt if negative_prompt else None, - cfg_scale=cfg_scale, - mode=KlingVideoGenMode(mode), - duration=KlingVideoGenDuration(duration), - camera_control=camera_control, - ), - auth_kwargs=kwargs, + ) -> comfy_io.NodeOutput: + return await execute_image2video( + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + node_id=cls.hidden.unique_id, + start_frame=start_frame, + prompt=prompt, + negative_prompt=negative_prompt, + cfg_scale=cfg_scale, + model_name=model_name, + aspect_ratio=aspect_ratio, + model_mode=mode, + duration=duration, + camera_control=camera_control, + end_frame=end_frame, ) - task_creation_response = await initial_operation.execute() - validate_task_creation_response(task_creation_response) - task_id = task_creation_response.data.task_id - final_response = await self.get_response( - task_id, auth_kwargs=kwargs, node_id=unique_id - ) - validate_video_result_response(final_response) - - video = get_video_from_response(final_response) - return await video_result_to_node_output(video) - - -class KlingCameraControlI2VNode(KlingImage2VideoNode): +class KlingCameraControlI2VNode(comfy_io.ComfyNode): """ Kling Image to Video Camera Control Node. This node is a image to video node, but it supports controlling the camera. Duration, mode, and model_name request fields are hard-coded because camera control is only supported in pro mode with the kling-v1-5 model at 5s duration as of 2025-05-02. """ @classmethod - def INPUT_TYPES(s): - return { - "required": { - "start_frame": model_field_to_node_input( - IO.IMAGE, KlingImage2VideoRequest, "image" + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="KlingCameraControlI2VNode", + display_name="Kling Image to Video (Camera Control)", + category="api node/video/Kling", + description="Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.", + inputs=[ + comfy_io.Image.Input( + "start_frame", + tooltip="Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix.", ), - "prompt": model_field_to_node_input( - IO.STRING, KlingImage2VideoRequest, "prompt", multiline=True - ), - "negative_prompt": model_field_to_node_input( - IO.STRING, - KlingImage2VideoRequest, - "negative_prompt", - multiline=True, - ), - "cfg_scale": model_field_to_node_input( - IO.FLOAT, - KlingImage2VideoRequest, - "cfg_scale", - default=0.75, - min=0.0, - max=1.0, - ), - "aspect_ratio": model_field_to_node_input( - IO.COMBO, - KlingImage2VideoRequest, + comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), + comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), + comfy_io.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0), + comfy_io.Combo.Input( "aspect_ratio", - enum_type=KlingVideoGenAspectRatio, + options=[i.value for i in KlingVideoGenAspectRatio], + default="16:9", ), - "camera_control": ( - "CAMERA_CONTROL", - { - "tooltip": "Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation.", - }, + comfy_io.Custom("CAMERA_CONTROL").Input( + "camera_control", + tooltip="Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation.", ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[ + comfy_io.Video.Output(), + comfy_io.String.Output(display_name="video_id"), + comfy_io.String.Output(display_name="duration"), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - DESCRIPTION = "Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image." - - async def api_call( - self, + @classmethod + async def execute( + cls, start_frame: torch.Tensor, prompt: str, negative_prompt: str, cfg_scale: float, aspect_ratio: str, camera_control: KlingCameraControl, - unique_id: Optional[str] = None, - **kwargs, - ): - return await super().api_call( + ) -> comfy_io.NodeOutput: + return await execute_image2video( + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + node_id=cls.hidden.unique_id, model_name=KlingVideoGenModelName.kling_v1_5, start_frame=start_frame, cfg_scale=cfg_scale, - mode=KlingVideoGenMode.pro, + model_mode=KlingVideoGenMode.pro, aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio), duration=KlingVideoGenDuration.field_5, prompt=prompt, negative_prompt=negative_prompt, camera_control=camera_control, - unique_id=unique_id, - **kwargs, ) -class KlingStartEndFrameNode(KlingImage2VideoNode): +class KlingStartEndFrameNode(comfy_io.ComfyNode): """ Kling First Last Frame Node. This node allows creation of a video from a first and last frame. It calls the normal image to video endpoint, but only allows the subset of input options that support the `image_tail` request field. """ - @staticmethod - def get_mode_string_mapping() -> dict[str, tuple[str, str, str]]: - """ - Returns a mapping of mode strings to their corresponding (mode, duration, model_name) tuples. - Only includes config combos that support the `image_tail` request field. - - See: [Kling API Docs Capability Map](https://app.klingai.com/global/dev/document-api/apiReference/model/skillsMap) - """ - return { - "standard mode / 5s duration / kling-v1": ("std", "5", "kling-v1"), - "pro mode / 5s duration / kling-v1": ("pro", "5", "kling-v1"), - "pro mode / 5s duration / kling-v1-5": ("pro", "5", "kling-v1-5"), - "pro mode / 10s duration / kling-v1-5": ("pro", "10", "kling-v1-5"), - "pro mode / 5s duration / kling-v1-6": ("pro", "5", "kling-v1-6"), - "pro mode / 10s duration / kling-v1-6": ("pro", "10", "kling-v1-6"), - "pro mode / 5s duration / kling-v2-1": ("pro", "5", "kling-v2-1"), - "pro mode / 10s duration / kling-v2-1": ("pro", "10", "kling-v2-1"), - } + @classmethod + def define_schema(cls) -> comfy_io.Schema: + modes = list(MODE_START_END_FRAME.keys()) + return comfy_io.Schema( + node_id="KlingStartEndFrameNode", + display_name="Kling Start-End Frame to Video", + category="api node/video/Kling", + description="Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.", + inputs=[ + comfy_io.Image.Input( + "start_frame", + tooltip="Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix.", + ), + comfy_io.Image.Input( + "end_frame", + tooltip="Reference Image - End frame control. URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px. Base64 should not include data:image prefix.", + ), + comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), + comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), + comfy_io.Float.Input("cfg_scale", default=0.5, min=0.0, max=1.0), + comfy_io.Combo.Input( + "aspect_ratio", + options=[i.value for i in KlingVideoGenAspectRatio], + default="16:9", + ), + comfy_io.Combo.Input( + "mode", + options=modes, + default=modes[2], + tooltip="The configuration to use for the video generation following the format: mode / duration / model_name.", + ), + ], + outputs=[ + comfy_io.Video.Output(), + comfy_io.String.Output(display_name="video_id"), + comfy_io.String.Output(display_name="duration"), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - modes = list(KlingStartEndFrameNode.get_mode_string_mapping().keys()) - return { - "required": { - "start_frame": model_field_to_node_input( - IO.IMAGE, KlingImage2VideoRequest, "image" - ), - "end_frame": model_field_to_node_input( - IO.IMAGE, KlingImage2VideoRequest, "image_tail" - ), - "prompt": model_field_to_node_input( - IO.STRING, KlingImage2VideoRequest, "prompt", multiline=True - ), - "negative_prompt": model_field_to_node_input( - IO.STRING, - KlingImage2VideoRequest, - "negative_prompt", - multiline=True, - ), - "cfg_scale": model_field_to_node_input( - IO.FLOAT, - KlingImage2VideoRequest, - "cfg_scale", - default=0.5, - min=0.0, - max=1.0, - ), - "aspect_ratio": model_field_to_node_input( - IO.COMBO, - KlingImage2VideoRequest, - "aspect_ratio", - enum_type=KlingVideoGenAspectRatio, - ), - "mode": ( - modes, - { - "default": modes[2], - "tooltip": "The configuration to use for the video generation following the format: mode / duration / model_name.", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - DESCRIPTION = "Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last." - - async def api_call( - self, + async def execute( + cls, start_frame: torch.Tensor, end_frame: torch.Tensor, prompt: str, @@ -915,90 +1093,78 @@ class KlingStartEndFrameNode(KlingImage2VideoNode): cfg_scale: float, aspect_ratio: str, mode: str, - unique_id: Optional[str] = None, - **kwargs, - ): - mode, duration, model_name = KlingStartEndFrameNode.get_mode_string_mapping()[ - mode - ] - return await super().api_call( + ) -> comfy_io.NodeOutput: + mode, duration, model_name = MODE_START_END_FRAME[mode] + return await execute_image2video( + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + node_id=cls.hidden.unique_id, prompt=prompt, negative_prompt=negative_prompt, model_name=model_name, start_frame=start_frame, cfg_scale=cfg_scale, - mode=mode, + model_mode=mode, aspect_ratio=aspect_ratio, duration=duration, end_frame=end_frame, - unique_id=unique_id, - **kwargs, ) -class KlingVideoExtendNode(KlingNodeBase): +class KlingVideoExtendNode(comfy_io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": model_field_to_node_input( - IO.STRING, KlingVideoExtendRequest, "prompt", multiline=True + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="KlingVideoExtendNode", + display_name="Kling Video Extend", + category="api node/video/Kling", + description="Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.", + inputs=[ + comfy_io.String.Input( + "prompt", + multiline=True, + tooltip="Positive text prompt for guiding the video extension", ), - "negative_prompt": model_field_to_node_input( - IO.STRING, - KlingVideoExtendRequest, + comfy_io.String.Input( "negative_prompt", multiline=True, + tooltip="Negative text prompt for elements to avoid in the extended video", ), - "cfg_scale": model_field_to_node_input( - IO.FLOAT, - KlingVideoExtendRequest, - "cfg_scale", - default=0.5, - min=0.0, - max=1.0, + comfy_io.Float.Input("cfg_scale", default=0.5, min=0.0, max=1.0), + comfy_io.String.Input( + "video_id", + force_input=True, + tooltip="The ID of the video to be extended. Supports videos generated by text-to-video, image-to-video, and previous video extension operations. Cannot exceed 3 minutes total duration after extension.", ), - "video_id": model_field_to_node_input( - IO.STRING, KlingVideoExtendRequest, "video_id", forceInput=True - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = ("VIDEO", "STRING", "STRING") - RETURN_NAMES = ("VIDEO", "video_id", "duration") - DESCRIPTION = "Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes." - - async def get_response( - self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None - ) -> KlingVideoExtendResponse: - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_VIDEO_EXTEND}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=KlingVideoExtendResponse, - ), - result_url_extractor=get_video_url_from_response, - estimated_duration=AVERAGE_DURATION_VIDEO_EXTEND, - node_id=node_id, + ], + outputs=[ + comfy_io.Video.Output(), + comfy_io.String.Output(display_name="video_id"), + comfy_io.String.Output(display_name="duration"), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, ) - async def api_call( - self, + @classmethod + async def execute( + cls, prompt: str, negative_prompt: str, cfg_scale: float, video_id: str, - unique_id: Optional[str] = None, - **kwargs, - ) -> tuple[VideoFromFile, str, str]: + ) -> comfy_io.NodeOutput: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } initial_operation = SynchronousOperation( endpoint=ApiEndpoint( path=PATH_VIDEO_EXTEND, @@ -1012,560 +1178,323 @@ class KlingVideoExtendNode(KlingNodeBase): cfg_scale=cfg_scale, video_id=video_id, ), - auth_kwargs=kwargs, + auth_kwargs=auth, ) task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = await self.get_response( - task_id, auth_kwargs=kwargs, node_id=unique_id - ) - validate_video_result_response(final_response) - - video = get_video_from_response(final_response) - return await video_result_to_node_output(video) - - -class KlingVideoEffectsBase(KlingNodeBase): - """Kling Video Effects Base""" - - RETURN_TYPES = ("VIDEO", "STRING", "STRING") - RETURN_NAMES = ("VIDEO", "video_id", "duration") - - async def get_response( - self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None - ) -> KlingVideoEffectsResponse: - return await poll_until_finished( - auth_kwargs, + final_response = await poll_until_finished( + auth, ApiEndpoint( - path=f"{PATH_VIDEO_EFFECTS}/{task_id}", + path=f"{PATH_VIDEO_EXTEND}/{task_id}", method=HttpMethod.GET, request_model=EmptyRequest, - response_model=KlingVideoEffectsResponse, + response_model=KlingVideoExtendResponse, ), result_url_extractor=get_video_url_from_response, - estimated_duration=AVERAGE_DURATION_VIDEO_EFFECTS, - node_id=node_id, - ) - - async def api_call( - self, - dual_character: bool, - effect_scene: KlingDualCharacterEffectsScene | KlingSingleImageEffectsScene, - model_name: str, - duration: KlingVideoGenDuration, - image_1: torch.Tensor, - image_2: Optional[torch.Tensor] = None, - mode: Optional[KlingVideoGenMode] = None, - unique_id: Optional[str] = None, - **kwargs, - ): - if dual_character: - request_input_field = KlingDualCharacterEffectInput( - model_name=model_name, - mode=mode, - images=[ - tensor_to_base64_string(image_1), - tensor_to_base64_string(image_2), - ], - duration=duration, - ) - else: - request_input_field = KlingSingleImageEffectInput( - model_name=model_name, - image=tensor_to_base64_string(image_1), - duration=duration, - ) - - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_VIDEO_EFFECTS, - method=HttpMethod.POST, - request_model=KlingVideoEffectsRequest, - response_model=KlingVideoEffectsResponse, - ), - request=KlingVideoEffectsRequest( - effect_scene=effect_scene, - input=request_input_field, - ), - auth_kwargs=kwargs, - ) - - task_creation_response = await initial_operation.execute() - validate_task_creation_response(task_creation_response) - task_id = task_creation_response.data.task_id - - final_response = await self.get_response( - task_id, auth_kwargs=kwargs, node_id=unique_id + estimated_duration=AVERAGE_DURATION_VIDEO_EXTEND, + node_id=cls.hidden.unique_id, ) validate_video_result_response(final_response) video = get_video_from_response(final_response) - return await video_result_to_node_output(video) + return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) -class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase): +class KlingDualCharacterVideoEffectNode(comfy_io.ComfyNode): """Kling Dual Character Video Effect Node""" @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image_left": (IO.IMAGE, {"tooltip": "Left side image"}), - "image_right": (IO.IMAGE, {"tooltip": "Right side image"}), - "effect_scene": model_field_to_node_input( - IO.COMBO, - KlingVideoEffectsRequest, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="KlingDualCharacterVideoEffectNode", + display_name="Kling Dual Character Video Effects", + category="api node/video/Kling", + description="Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.", + inputs=[ + comfy_io.Image.Input("image_left", tooltip="Left side image"), + comfy_io.Image.Input("image_right", tooltip="Right side image"), + comfy_io.Combo.Input( "effect_scene", - enum_type=KlingDualCharacterEffectsScene, + options=[i.value for i in KlingDualCharacterEffectsScene], ), - "model_name": model_field_to_node_input( - IO.COMBO, - KlingDualCharacterEffectInput, + comfy_io.Combo.Input( "model_name", - enum_type=KlingCharacterEffectModelName, + options=[i.value for i in KlingCharacterEffectModelName], + default="kling-v1", ), - "mode": model_field_to_node_input( - IO.COMBO, - KlingDualCharacterEffectInput, + comfy_io.Combo.Input( "mode", - enum_type=KlingVideoGenMode, + options=[i.value for i in KlingVideoGenMode], + default="std", ), - "duration": model_field_to_node_input( - IO.COMBO, - KlingDualCharacterEffectInput, + comfy_io.Combo.Input( "duration", - enum_type=KlingVideoGenDuration, + options=[i.value for i in KlingVideoGenDuration], ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[ + comfy_io.Video.Output(), + comfy_io.String.Output(display_name="duration"), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - DESCRIPTION = "Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite." - RETURN_TYPES = ("VIDEO", "STRING") - RETURN_NAMES = ("VIDEO", "duration") - - async def api_call( - self, + @classmethod + async def execute( + cls, image_left: torch.Tensor, image_right: torch.Tensor, effect_scene: KlingDualCharacterEffectsScene, model_name: KlingCharacterEffectModelName, mode: KlingVideoGenMode, duration: KlingVideoGenDuration, - unique_id: Optional[str] = None, - **kwargs, - ): - video, _, duration = await super().api_call( + ) -> comfy_io.NodeOutput: + video, _, duration = await execute_video_effect( + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + node_id=cls.hidden.unique_id, dual_character=True, effect_scene=effect_scene, model_name=model_name, - mode=mode, + model_mode=mode, duration=duration, image_1=image_left, image_2=image_right, - unique_id=unique_id, - **kwargs, ) return video, duration -class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase): +class KlingSingleImageVideoEffectNode(comfy_io.ComfyNode): """Kling Single Image Video Effect Node""" @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ( - IO.IMAGE, - { - "tooltip": " Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1" - }, - ), - "effect_scene": model_field_to_node_input( - IO.COMBO, - KlingVideoEffectsRequest, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="KlingSingleImageVideoEffectNode", + display_name="Kling Video Effects", + category="api node/video/Kling", + description="Achieve different special effects when generating a video based on the effect_scene.", + inputs=[ + comfy_io.Image.Input("image", tooltip=" Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1"), + comfy_io.Combo.Input( "effect_scene", - enum_type=KlingSingleImageEffectsScene, + options=[i.value for i in KlingSingleImageEffectsScene], ), - "model_name": model_field_to_node_input( - IO.COMBO, - KlingSingleImageEffectInput, + comfy_io.Combo.Input( "model_name", - enum_type=KlingSingleImageEffectModelName, + options=[i.value for i in KlingSingleImageEffectModelName], ), - "duration": model_field_to_node_input( - IO.COMBO, - KlingSingleImageEffectInput, + comfy_io.Combo.Input( "duration", - enum_type=KlingVideoGenDuration, + options=[i.value for i in KlingVideoGenDuration], ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[ + comfy_io.Video.Output(), + comfy_io.String.Output(display_name="video_id"), + comfy_io.String.Output(display_name="duration"), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - DESCRIPTION = "Achieve different special effects when generating a video based on the effect_scene." - - async def api_call( - self, + @classmethod + async def execute( + cls, image: torch.Tensor, effect_scene: KlingSingleImageEffectsScene, model_name: KlingSingleImageEffectModelName, duration: KlingVideoGenDuration, - unique_id: Optional[str] = None, - **kwargs, - ): - return await super().api_call( + ) -> comfy_io.NodeOutput: + return await execute_video_effect( + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + node_id=cls.hidden.unique_id, dual_character=False, effect_scene=effect_scene, model_name=model_name, duration=duration, image_1=image, - unique_id=unique_id, - **kwargs, ) -class KlingLipSyncBase(KlingNodeBase): - """Kling Lip Sync Base""" - - RETURN_TYPES = ("VIDEO", "STRING", "STRING") - RETURN_NAMES = ("VIDEO", "video_id", "duration") - - def validate_lip_sync_video(self, video: VideoInput): - """ - Validates the input video adheres to the expectations of the Kling Lip Sync API: - - Video length does not exceed 10s and is not shorter than 2s - - Length and width dimensions should both be between 720px and 1920px - - See: https://app.klingai.com/global/dev/document-api/apiReference/model/videoTolip - """ - validate_video_dimensions(video, 720, 1920) - validate_video_duration(video, 2, 10) - - def validate_text(self, text: str): - if not text: - raise ValueError("Text is required") - if len(text) > MAX_PROMPT_LENGTH_LIP_SYNC: - raise ValueError( - f"Text is too long. Maximum length is {MAX_PROMPT_LENGTH_LIP_SYNC} characters." - ) - - async def get_response( - self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None - ) -> KlingLipSyncResponse: - """Polls the Kling API endpoint until the task reaches a terminal state.""" - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_LIP_SYNC}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=KlingLipSyncResponse, - ), - result_url_extractor=get_video_url_from_response, - estimated_duration=AVERAGE_DURATION_LIP_SYNC, - node_id=node_id, - ) - - async def api_call( - self, - video: VideoInput, - audio: Optional[AudioInput] = None, - voice_language: Optional[str] = None, - mode: Optional[str] = None, - text: Optional[str] = None, - voice_speed: Optional[float] = None, - voice_id: Optional[str] = None, - unique_id: Optional[str] = None, - **kwargs, - ) -> tuple[VideoFromFile, str, str]: - if text: - self.validate_text(text) - self.validate_lip_sync_video(video) - - # Upload video to Comfy API and get download URL - video_url = await upload_video_to_comfyapi(video, auth_kwargs=kwargs) - logging.info("Uploaded video to Comfy API. URL: %s", video_url) - - # Upload the audio file to Comfy API and get download URL - if audio: - audio_url = await upload_audio_to_comfyapi(audio, auth_kwargs=kwargs) - logging.info("Uploaded audio to Comfy API. URL: %s", audio_url) - else: - audio_url = None - - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_LIP_SYNC, - method=HttpMethod.POST, - request_model=KlingLipSyncRequest, - response_model=KlingLipSyncResponse, - ), - request=KlingLipSyncRequest( - input=KlingLipSyncInputObject( - video_url=video_url, - mode=mode, - text=text, - voice_language=voice_language, - voice_speed=voice_speed, - audio_type="url", - audio_url=audio_url, - voice_id=voice_id, - ), - ), - auth_kwargs=kwargs, - ) - - task_creation_response = await initial_operation.execute() - validate_task_creation_response(task_creation_response) - task_id = task_creation_response.data.task_id - - final_response = await self.get_response( - task_id, auth_kwargs=kwargs, node_id=unique_id - ) - validate_video_result_response(final_response) - - video = get_video_from_response(final_response) - return await video_result_to_node_output(video) - - -class KlingLipSyncAudioToVideoNode(KlingLipSyncBase): +class KlingLipSyncAudioToVideoNode(comfy_io.ComfyNode): """Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file.""" @classmethod - def INPUT_TYPES(s): - return { - "required": { - "video": (IO.VIDEO, {}), - "audio": (IO.AUDIO, {}), - "voice_language": model_field_to_node_input( - IO.COMBO, - KlingLipSyncInputObject, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="KlingLipSyncAudioToVideoNode", + display_name="Kling Lip Sync Video with Audio", + category="api node/video/Kling", + description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.", + inputs=[ + comfy_io.Video.Input("video"), + comfy_io.Audio.Input("audio"), + comfy_io.Combo.Input( "voice_language", - enum_type=KlingLipSyncVoiceLanguage, + options=[i.value for i in KlingLipSyncVoiceLanguage], + default="en", ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + ], + outputs=[ + comfy_io.Video.Output(), + comfy_io.String.Output(display_name="video_id"), + comfy_io.String.Output(display_name="duration"), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) - DESCRIPTION = "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length." - - async def api_call( - self, + @classmethod + async def execute( + cls, video: VideoInput, audio: AudioInput, voice_language: str, - unique_id: Optional[str] = None, - **kwargs, - ): - return await super().api_call( + ) -> comfy_io.NodeOutput: + return await execute_lipsync( + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + node_id=cls.hidden.unique_id, video=video, audio=audio, voice_language=voice_language, - mode="audio2video", - unique_id=unique_id, - **kwargs, + model_mode="audio2video", ) -class KlingLipSyncTextToVideoNode(KlingLipSyncBase): +class KlingLipSyncTextToVideoNode(comfy_io.ComfyNode): """Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt.""" - @staticmethod - def get_voice_config() -> dict[str, tuple[str, str]]: - return { - # English voices - "Melody": ("girlfriend_4_speech02", "en"), - "Sunny": ("genshin_vindi2", "en"), - "Sage": ("zhinen_xuesheng", "en"), - "Ace": ("AOT", "en"), - "Blossom": ("ai_shatang", "en"), - "Peppy": ("genshin_klee2", "en"), - "Dove": ("genshin_kirara", "en"), - "Shine": ("ai_kaiya", "en"), - "Anchor": ("oversea_male1", "en"), - "Lyric": ("ai_chenjiahao_712", "en"), - "Tender": ("chat1_female_new-3", "en"), - "Siren": ("chat_0407_5-1", "en"), - "Zippy": ("cartoon-boy-07", "en"), - "Bud": ("uk_boy1", "en"), - "Sprite": ("cartoon-girl-01", "en"), - "Candy": ("PeppaPig_platform", "en"), - "Beacon": ("ai_huangzhong_712", "en"), - "Rock": ("ai_huangyaoshi_712", "en"), - "Titan": ("ai_laoguowang_712", "en"), - "Grace": ("chengshu_jiejie", "en"), - "Helen": ("you_pingjing", "en"), - "Lore": ("calm_story1", "en"), - "Crag": ("uk_man2", "en"), - "Prattle": ("laopopo_speech02", "en"), - "Hearth": ("heainainai_speech02", "en"), - "The Reader": ("reader_en_m-v1", "en"), - "Commercial Lady": ("commercial_lady_en_f-v1", "en"), - # Chinese voices - "阳光少年": ("genshin_vindi2", "zh"), - "懂事小弟": ("zhinen_xuesheng", "zh"), - "运动少年": ("tiyuxi_xuedi", "zh"), - "青春少女": ("ai_shatang", "zh"), - "温柔小妹": ("genshin_klee2", "zh"), - "元气少女": ("genshin_kirara", "zh"), - "阳光男生": ("ai_kaiya", "zh"), - "幽默小哥": ("tiexin_nanyou", "zh"), - "文艺小哥": ("ai_chenjiahao_712", "zh"), - "甜美邻家": ("girlfriend_1_speech02", "zh"), - "温柔姐姐": ("chat1_female_new-3", "zh"), - "职场女青": ("girlfriend_2_speech02", "zh"), - "活泼男童": ("cartoon-boy-07", "zh"), - "俏皮女童": ("cartoon-girl-01", "zh"), - "稳重老爸": ("ai_huangyaoshi_712", "zh"), - "温柔妈妈": ("you_pingjing", "zh"), - "严肃上司": ("ai_laoguowang_712", "zh"), - "优雅贵妇": ("chengshu_jiejie", "zh"), - "慈祥爷爷": ("zhuxi_speech02", "zh"), - "唠叨爷爷": ("uk_oldman3", "zh"), - "唠叨奶奶": ("laopopo_speech02", "zh"), - "和蔼奶奶": ("heainainai_speech02", "zh"), - "东北老铁": ("dongbeilaotie_speech02", "zh"), - "重庆小伙": ("chongqingxiaohuo_speech02", "zh"), - "四川妹子": ("chuanmeizi_speech02", "zh"), - "潮汕大叔": ("chaoshandashu_speech02", "zh"), - "台湾男生": ("ai_taiwan_man2_speech02", "zh"), - "西安掌柜": ("xianzhanggui_speech02", "zh"), - "天津姐姐": ("tianjinjiejie_speech02", "zh"), - "新闻播报男": ("diyinnansang_DB_CN_M_04-v2", "zh"), - "译制片男": ("yizhipiannan-v1", "zh"), - "撒娇女友": ("tianmeixuemei-v1", "zh"), - "刀片烟嗓": ("daopianyansang-v1", "zh"), - "乖巧正太": ("mengwa-v1", "zh"), - } + @classmethod + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="KlingLipSyncTextToVideoNode", + display_name="Kling Lip Sync Video with Text", + category="api node/video/Kling", + description="Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.", + inputs=[ + comfy_io.Video.Input("video"), + comfy_io.String.Input( + "text", + multiline=True, + tooltip="Text Content for Lip-Sync Video Generation. Required when mode is text2video. Maximum length is 120 characters.", + ), + comfy_io.Combo.Input( + "voice", + options=list(VOICES_CONFIG.keys()), + default="Melody", + ), + comfy_io.Float.Input( + "voice_speed", + default=1, + min=0.8, + max=2.0, + display_mode=comfy_io.NumberDisplay.slider, + tooltip="Speech Rate. Valid range: 0.8~2.0, accurate to one decimal place.", + ), + ], + outputs=[ + comfy_io.Video.Output(), + comfy_io.String.Output(display_name="video_id"), + comfy_io.String.Output(display_name="duration"), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - voice_options = list(s.get_voice_config().keys()) - return { - "required": { - "video": (IO.VIDEO, {}), - "text": model_field_to_node_input( - IO.STRING, KlingLipSyncInputObject, "text", multiline=True - ), - "voice": (voice_options, {"default": voice_options[0]}), - "voice_speed": model_field_to_node_input( - IO.FLOAT, KlingLipSyncInputObject, "voice_speed", slider=True - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - DESCRIPTION = "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length." - - async def api_call( - self, + async def execute( + cls, video: VideoInput, text: str, voice: str, voice_speed: float, - unique_id: Optional[str] = None, - **kwargs, - ): - voice_id, voice_language = KlingLipSyncTextToVideoNode.get_voice_config()[voice] - return await super().api_call( + ) -> comfy_io.NodeOutput: + voice_id, voice_language = VOICES_CONFIG[voice] + return await execute_lipsync( + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + node_id=cls.hidden.unique_id, video=video, text=text, voice_language=voice_language, voice_id=voice_id, voice_speed=voice_speed, - mode="text2video", - unique_id=unique_id, - **kwargs, + model_mode="text2video", ) -class KlingImageGenerationBase(KlingNodeBase): - """Kling Image Generation Base Node.""" - - RETURN_TYPES = ("IMAGE",) - CATEGORY = "api node/image/Kling" - - def validate_prompt(self, prompt: str, negative_prompt: Optional[str] = None): - if not prompt or len(prompt) > MAX_PROMPT_LENGTH_IMAGE_GEN: - raise ValueError( - f"Prompt must be less than {MAX_PROMPT_LENGTH_IMAGE_GEN} characters" - ) - if negative_prompt and len(negative_prompt) > MAX_PROMPT_LENGTH_IMAGE_GEN: - raise ValueError( - f"Negative prompt must be less than {MAX_PROMPT_LENGTH_IMAGE_GEN} characters" - ) - - -class KlingVirtualTryOnNode(KlingImageGenerationBase): +class KlingVirtualTryOnNode(comfy_io.ComfyNode): """Kling Virtual Try On Node.""" @classmethod - def INPUT_TYPES(s): - return { - "required": { - "human_image": (IO.IMAGE, {}), - "cloth_image": (IO.IMAGE, {}), - "model_name": model_field_to_node_input( - IO.COMBO, - KlingVirtualTryOnRequest, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="KlingVirtualTryOnNode", + display_name="Kling Virtual Try On", + category="api node/image/Kling", + description="Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.", + inputs=[ + comfy_io.Image.Input("human_image"), + comfy_io.Image.Input("cloth_image"), + comfy_io.Combo.Input( "model_name", - enum_type=KlingVirtualTryOnModelName, + options=[i.value for i in KlingVirtualTryOnModelName], + default="kolors-virtual-try-on-v1", ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - DESCRIPTION = "Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background." - - async def get_response( - self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None - ) -> KlingVirtualTryOnResponse: - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_VIRTUAL_TRY_ON}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=KlingVirtualTryOnResponse, - ), - result_url_extractor=get_images_urls_from_response, - estimated_duration=AVERAGE_DURATION_VIRTUAL_TRY_ON, - node_id=node_id, + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, ) - async def api_call( - self, + @classmethod + async def execute( + cls, human_image: torch.Tensor, cloth_image: torch.Tensor, model_name: KlingVirtualTryOnModelName, - unique_id: Optional[str] = None, - **kwargs, - ): + ) -> comfy_io.NodeOutput: + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } initial_operation = SynchronousOperation( endpoint=ApiEndpoint( path=PATH_VIRTUAL_TRY_ON, @@ -1578,113 +1507,99 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase): cloth_image=tensor_to_base64_string(cloth_image), model_name=model_name, ), - auth_kwargs=kwargs, + auth_kwargs=auth, ) task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = await self.get_response( - task_id, auth_kwargs=kwargs, node_id=unique_id + final_response = await poll_until_finished( + auth, + ApiEndpoint( + path=f"{PATH_VIRTUAL_TRY_ON}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=KlingVirtualTryOnResponse, + ), + result_url_extractor=get_images_urls_from_response, + estimated_duration=AVERAGE_DURATION_VIRTUAL_TRY_ON, + node_id=cls.hidden.unique_id, ) validate_image_result_response(final_response) images = get_images_from_response(final_response) - return (await image_result_to_node_output(images),) + return comfy_io.NodeOutput(await image_result_to_node_output(images)) -class KlingImageGenerationNode(KlingImageGenerationBase): +class KlingImageGenerationNode(comfy_io.ComfyNode): """Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.""" @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": model_field_to_node_input( - IO.STRING, - KlingImageGenerationsRequest, - "prompt", - multiline=True, - max_length=MAX_PROMPT_LENGTH_IMAGE_GEN, + def define_schema(cls) -> comfy_io.Schema: + return comfy_io.Schema( + node_id="KlingImageGenerationNode", + display_name="Kling Image Generation", + category="api node/image/Kling", + description="Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.", + inputs=[ + comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), + comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), + comfy_io.Combo.Input( + "image_type", + options=[i.value for i in KlingImageGenImageReferenceType], ), - "negative_prompt": model_field_to_node_input( - IO.STRING, - KlingImageGenerationsRequest, - "negative_prompt", - multiline=True, - ), - "image_type": model_field_to_node_input( - IO.COMBO, - KlingImageGenerationsRequest, - "image_reference", - enum_type=KlingImageGenImageReferenceType, - ), - "image_fidelity": model_field_to_node_input( - IO.FLOAT, - KlingImageGenerationsRequest, + comfy_io.Float.Input( "image_fidelity", - slider=True, + default=0.5, + min=0.0, + max=1.0, step=0.01, + display_mode=comfy_io.NumberDisplay.slider, + tooltip="Reference intensity for user-uploaded images", ), - "human_fidelity": model_field_to_node_input( - IO.FLOAT, - KlingImageGenerationsRequest, + comfy_io.Float.Input( "human_fidelity", - slider=True, + default=0.45, + min=0.0, + max=1.0, step=0.01, + display_mode=comfy_io.NumberDisplay.slider, + tooltip="Subject reference similarity", ), - "model_name": model_field_to_node_input( - IO.COMBO, - KlingImageGenerationsRequest, + comfy_io.Combo.Input( "model_name", - enum_type=KlingImageGenModelName, + options=[i.value for i in KlingImageGenModelName], + default="kling-v1", ), - "aspect_ratio": model_field_to_node_input( - IO.COMBO, - KlingImageGenerationsRequest, + comfy_io.Combo.Input( "aspect_ratio", - enum_type=KlingImageGenAspectRatio, + options=[i.value for i in KlingImageGenAspectRatio], + default="16:9", ), - "n": model_field_to_node_input( - IO.INT, - KlingImageGenerationsRequest, + comfy_io.Int.Input( "n", + default=1, + min=1, + max=9, + tooltip="Number of generated images", ), - }, - "optional": { - "image": (IO.IMAGE, {}), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - DESCRIPTION = "Kling Image Generation Node. Generate an image from a text prompt with an optional reference image." - - async def get_response( - self, - task_id: str, - auth_kwargs: Optional[dict[str, str]], - node_id: Optional[str] = None, - ) -> KlingImageGenerationsResponse: - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_IMAGE_GENERATIONS}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=KlingImageGenerationsResponse, - ), - result_url_extractor=get_images_urls_from_response, - estimated_duration=AVERAGE_DURATION_IMAGE_GEN, - node_id=node_id, + comfy_io.Image.Input("image", optional=True), + ], + outputs=[ + comfy_io.Image.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, ) - async def api_call( - self, + @classmethod + async def execute( + cls, model_name: KlingImageGenModelName, prompt: str, negative_prompt: str, @@ -1694,10 +1609,9 @@ class KlingImageGenerationNode(KlingImageGenerationBase): n: int, aspect_ratio: KlingImageGenAspectRatio, image: Optional[torch.Tensor] = None, - unique_id: Optional[str] = None, - **kwargs, - ): - self.validate_prompt(prompt, negative_prompt) + ) -> comfy_io.NodeOutput: + validate_string(prompt, field_name="prompt", min_length=1, max_length=MAX_PROMPT_LENGTH_IMAGE_GEN) + validate_string(negative_prompt, field_name="negative_prompt", max_length=MAX_PROMPT_LENGTH_IMAGE_GEN) if image is None: image_type = None @@ -1706,6 +1620,10 @@ class KlingImageGenerationNode(KlingImageGenerationBase): else: image = tensor_to_base64_string(image) + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } initial_operation = SynchronousOperation( endpoint=ApiEndpoint( path=PATH_IMAGE_GENERATIONS, @@ -1724,50 +1642,50 @@ class KlingImageGenerationNode(KlingImageGenerationBase): n=n, aspect_ratio=aspect_ratio, ), - auth_kwargs=kwargs, + auth_kwargs=auth, ) task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = await self.get_response( - task_id, auth_kwargs=kwargs, node_id=unique_id + final_response = await poll_until_finished( + auth, + ApiEndpoint( + path=f"{PATH_IMAGE_GENERATIONS}/{task_id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=KlingImageGenerationsResponse, + ), + result_url_extractor=get_images_urls_from_response, + estimated_duration=AVERAGE_DURATION_IMAGE_GEN, + node_id=cls.hidden.unique_id, ) validate_image_result_response(final_response) images = get_images_from_response(final_response) - return (await image_result_to_node_output(images),) + return comfy_io.NodeOutput(await image_result_to_node_output(images)) -NODE_CLASS_MAPPINGS = { - "KlingCameraControls": KlingCameraControls, - "KlingTextToVideoNode": KlingTextToVideoNode, - "KlingImage2VideoNode": KlingImage2VideoNode, - "KlingCameraControlI2VNode": KlingCameraControlI2VNode, - "KlingCameraControlT2VNode": KlingCameraControlT2VNode, - "KlingStartEndFrameNode": KlingStartEndFrameNode, - "KlingVideoExtendNode": KlingVideoExtendNode, - "KlingLipSyncAudioToVideoNode": KlingLipSyncAudioToVideoNode, - "KlingLipSyncTextToVideoNode": KlingLipSyncTextToVideoNode, - "KlingVirtualTryOnNode": KlingVirtualTryOnNode, - "KlingImageGenerationNode": KlingImageGenerationNode, - "KlingSingleImageVideoEffectNode": KlingSingleImageVideoEffectNode, - "KlingDualCharacterVideoEffectNode": KlingDualCharacterVideoEffectNode, -} +class KlingExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + KlingCameraControls, + KlingTextToVideoNode, + KlingImage2VideoNode, + KlingCameraControlI2VNode, + KlingCameraControlT2VNode, + KlingStartEndFrameNode, + KlingVideoExtendNode, + KlingLipSyncAudioToVideoNode, + KlingLipSyncTextToVideoNode, + KlingVirtualTryOnNode, + KlingImageGenerationNode, + KlingSingleImageVideoEffectNode, + KlingDualCharacterVideoEffectNode, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "KlingCameraControls": "Kling Camera Controls", - "KlingTextToVideoNode": "Kling Text to Video", - "KlingImage2VideoNode": "Kling Image to Video", - "KlingCameraControlI2VNode": "Kling Image to Video (Camera Control)", - "KlingCameraControlT2VNode": "Kling Text to Video (Camera Control)", - "KlingStartEndFrameNode": "Kling Start-End Frame to Video", - "KlingVideoExtendNode": "Kling Video Extend", - "KlingLipSyncAudioToVideoNode": "Kling Lip Sync Video with Audio", - "KlingLipSyncTextToVideoNode": "Kling Lip Sync Video with Text", - "KlingVirtualTryOnNode": "Kling Virtual Try On", - "KlingImageGenerationNode": "Kling Image Generation", - "KlingSingleImageVideoEffectNode": "Kling Video Effects", - "KlingDualCharacterVideoEffectNode": "Kling Dual Character Video Effects", -} + +async def comfy_entrypoint() -> KlingExtension: + return KlingExtension() From 8aea746212dc1bb1601b4dc5e8c8093d2221d89c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 6 Oct 2025 19:08:08 -0700 Subject: [PATCH 0715/1073] Implement gemma 3 as a text encoder. (#10241) Not useful yet. --- comfy/model_detection.py | 4 +- comfy/sd.py | 7 ++ comfy/text_encoders/llama.py | 133 +++++++++++++++++++++++++++------ comfy/text_encoders/lumina2.py | 26 ++++++- 4 files changed, 142 insertions(+), 28 deletions(-) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 46415c17a..7677617c0 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -365,8 +365,8 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["patch_size"] = 2 dit_config["in_channels"] = 16 dit_config["dim"] = 2304 - dit_config["cap_feat_dim"] = 2304 - dit_config["n_layers"] = 26 + dit_config["cap_feat_dim"] = state_dict['{}cap_embedder.1.weight'.format(key_prefix)].shape[1] + dit_config["n_layers"] = count_blocks(state_dict_keys, '{}layers.'.format(key_prefix) + '{}.') dit_config["n_heads"] = 24 dit_config["n_kv_heads"] = 8 dit_config["qk_norm"] = True diff --git a/comfy/sd.py b/comfy/sd.py index be225ad03..f2d95f85a 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -890,6 +890,7 @@ class TEModel(Enum): QWEN25_3B = 10 QWEN25_7B = 11 BYT5_SMALL_GLYPH = 12 + GEMMA_3_4B = 13 def detect_te_model(sd): if "text_model.encoder.layers.30.mlp.fc1.weight" in sd: @@ -912,6 +913,8 @@ def detect_te_model(sd): return TEModel.BYT5_SMALL_GLYPH return TEModel.T5_BASE if 'model.layers.0.post_feedforward_layernorm.weight' in sd: + if 'model.layers.0.self_attn.q_norm.weight' in sd: + return TEModel.GEMMA_3_4B return TEModel.GEMMA_2_2B if 'model.layers.0.self_attn.k_proj.bias' in sd: weight = sd['model.layers.0.self_attn.k_proj.bias'] @@ -1016,6 +1019,10 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.lumina2.te(**llama_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.lumina2.LuminaTokenizer tokenizer_data["spiece_model"] = clip_data[0].get("spiece_model", None) + elif te_model == TEModel.GEMMA_3_4B: + clip_target.clip = comfy.text_encoders.lumina2.te(**llama_detect(clip_data), model_type="gemma3_4b") + clip_target.tokenizer = comfy.text_encoders.lumina2.NTokenizer + tokenizer_data["spiece_model"] = clip_data[0].get("spiece_model", None) elif te_model == TEModel.LLAMA3_8: clip_target.clip = comfy.text_encoders.hidream.hidream_clip(**llama_detect(clip_data), clip_l=False, clip_g=False, t5=False, llama=True, dtype_t5=None, t5xxl_scaled_fp8=None) diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index c5a48ba9f..c050759fe 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -3,6 +3,7 @@ import torch.nn as nn from dataclasses import dataclass from typing import Optional, Any import math +import logging from comfy.ldm.modules.attention import optimized_attention_for_device import comfy.model_management @@ -28,6 +29,9 @@ class Llama2Config: mlp_activation = "silu" qkv_bias = False rope_dims = None + q_norm = None + k_norm = None + rope_scale = None @dataclass class Qwen25_3BConfig: @@ -46,6 +50,9 @@ class Qwen25_3BConfig: mlp_activation = "silu" qkv_bias = True rope_dims = None + q_norm = None + k_norm = None + rope_scale = None @dataclass class Qwen25_7BVLI_Config: @@ -64,6 +71,9 @@ class Qwen25_7BVLI_Config: mlp_activation = "silu" qkv_bias = True rope_dims = [16, 24, 24] + q_norm = None + k_norm = None + rope_scale = None @dataclass class Gemma2_2B_Config: @@ -82,6 +92,32 @@ class Gemma2_2B_Config: mlp_activation = "gelu_pytorch_tanh" qkv_bias = False rope_dims = None + q_norm = None + k_norm = None + sliding_attention = None + rope_scale = None + +@dataclass +class Gemma3_4B_Config: + vocab_size: int = 262208 + hidden_size: int = 2560 + intermediate_size: int = 10240 + num_hidden_layers: int = 34 + num_attention_heads: int = 8 + num_key_value_heads: int = 4 + max_position_embeddings: int = 131072 + rms_norm_eps: float = 1e-6 + rope_theta = [10000.0, 1000000.0] + transformer_type: str = "gemma3" + head_dim = 256 + rms_norm_add = True + mlp_activation = "gelu_pytorch_tanh" + qkv_bias = False + rope_dims = None + q_norm = "gemma3" + k_norm = "gemma3" + sliding_attention = [False, False, False, False, False, 1024] + rope_scale = [1.0, 8.0] class RMSNorm(nn.Module): def __init__(self, dim: int, eps: float = 1e-5, add=False, device=None, dtype=None): @@ -106,25 +142,40 @@ def rotate_half(x): return torch.cat((-x2, x1), dim=-1) -def precompute_freqs_cis(head_dim, position_ids, theta, rope_dims=None, device=None): - theta_numerator = torch.arange(0, head_dim, 2, device=device).float() - inv_freq = 1.0 / (theta ** (theta_numerator / head_dim)) +def precompute_freqs_cis(head_dim, position_ids, theta, rope_scale=None, rope_dims=None, device=None): + if not isinstance(theta, list): + theta = [theta] - inv_freq_expanded = inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) - position_ids_expanded = position_ids[:, None, :].float() - freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) - emb = torch.cat((freqs, freqs), dim=-1) - cos = emb.cos() - sin = emb.sin() - if rope_dims is not None and position_ids.shape[0] > 1: - mrope_section = rope_dims * 2 - cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(0) - sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(0) - else: - cos = cos.unsqueeze(1) - sin = sin.unsqueeze(1) + out = [] + for index, t in enumerate(theta): + theta_numerator = torch.arange(0, head_dim, 2, device=device).float() + inv_freq = 1.0 / (t ** (theta_numerator / head_dim)) - return (cos, sin) + if rope_scale is not None: + if isinstance(rope_scale, list): + inv_freq /= rope_scale[index] + else: + inv_freq /= rope_scale + + inv_freq_expanded = inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) + position_ids_expanded = position_ids[:, None, :].float() + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() + sin = emb.sin() + if rope_dims is not None and position_ids.shape[0] > 1: + mrope_section = rope_dims * 2 + cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(0) + sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(0) + else: + cos = cos.unsqueeze(1) + sin = sin.unsqueeze(1) + out.append((cos, sin)) + + if len(out) == 1: + return out[0] + + return out def apply_rope(xq, xk, freqs_cis): @@ -152,6 +203,14 @@ class Attention(nn.Module): self.v_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=config.qkv_bias, device=device, dtype=dtype) self.o_proj = ops.Linear(self.inner_size, config.hidden_size, bias=False, device=device, dtype=dtype) + self.q_norm = None + self.k_norm = None + + if config.q_norm == "gemma3": + self.q_norm = RMSNorm(self.head_dim, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype) + if config.k_norm == "gemma3": + self.k_norm = RMSNorm(self.head_dim, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype) + def forward( self, hidden_states: torch.Tensor, @@ -168,6 +227,11 @@ class Attention(nn.Module): xk = xk.view(batch_size, seq_length, self.num_kv_heads, self.head_dim).transpose(1, 2) xv = xv.view(batch_size, seq_length, self.num_kv_heads, self.head_dim).transpose(1, 2) + if self.q_norm is not None: + xq = self.q_norm(xq) + if self.k_norm is not None: + xk = self.k_norm(xk) + xq, xk = apply_rope(xq, xk, freqs_cis=freqs_cis) xk = xk.repeat_interleave(self.num_heads // self.num_kv_heads, dim=1) @@ -192,7 +256,7 @@ class MLP(nn.Module): return self.down_proj(self.activation(self.gate_proj(x)) * self.up_proj(x)) class TransformerBlock(nn.Module): - def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None): + def __init__(self, config: Llama2Config, index, device=None, dtype=None, ops: Any = None): super().__init__() self.self_attn = Attention(config, device=device, dtype=dtype, ops=ops) self.mlp = MLP(config, device=device, dtype=dtype, ops=ops) @@ -226,7 +290,7 @@ class TransformerBlock(nn.Module): return x class TransformerBlockGemma2(nn.Module): - def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None): + def __init__(self, config: Llama2Config, index, device=None, dtype=None, ops: Any = None): super().__init__() self.self_attn = Attention(config, device=device, dtype=dtype, ops=ops) self.mlp = MLP(config, device=device, dtype=dtype, ops=ops) @@ -235,6 +299,13 @@ class TransformerBlockGemma2(nn.Module): self.pre_feedforward_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype) self.post_feedforward_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype) + if config.sliding_attention is not None: # TODO: implement. (Not that necessary since models are trained on less than 1024 tokens) + self.sliding_attention = config.sliding_attention[index % len(config.sliding_attention)] + else: + self.sliding_attention = False + + self.transformer_type = config.transformer_type + def forward( self, x: torch.Tensor, @@ -242,6 +313,14 @@ class TransformerBlockGemma2(nn.Module): freqs_cis: Optional[torch.Tensor] = None, optimized_attention=None, ): + if self.transformer_type == 'gemma3': + if self.sliding_attention: + if x.shape[1] > self.sliding_attention: + logging.warning("Warning: sliding attention not implemented, results may be incorrect") + freqs_cis = freqs_cis[1] + else: + freqs_cis = freqs_cis[0] + # Self Attention residual = x x = self.input_layernorm(x) @@ -276,7 +355,7 @@ class Llama2_(nn.Module): device=device, dtype=dtype ) - if self.config.transformer_type == "gemma2": + if self.config.transformer_type == "gemma2" or self.config.transformer_type == "gemma3": transformer = TransformerBlockGemma2 self.normalize_in = True else: @@ -284,8 +363,8 @@ class Llama2_(nn.Module): self.normalize_in = False self.layers = nn.ModuleList([ - transformer(config, device=device, dtype=dtype, ops=ops) - for _ in range(config.num_hidden_layers) + transformer(config, index=i, device=device, dtype=dtype, ops=ops) + for i in range(config.num_hidden_layers) ]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype) # self.lm_head = ops.Linear(config.hidden_size, config.vocab_size, bias=False, device=device, dtype=dtype) @@ -305,6 +384,7 @@ class Llama2_(nn.Module): freqs_cis = precompute_freqs_cis(self.config.head_dim, position_ids, self.config.rope_theta, + self.config.rope_scale, self.config.rope_dims, device=x.device) @@ -433,3 +513,12 @@ class Gemma2_2B(BaseLlama, torch.nn.Module): self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) self.dtype = dtype + +class Gemma3_4B(BaseLlama, torch.nn.Module): + def __init__(self, config_dict, dtype, device, operations): + super().__init__() + config = Gemma3_4B_Config(**config_dict) + self.num_layers = config.num_hidden_layers + + self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) + self.dtype = dtype diff --git a/comfy/text_encoders/lumina2.py b/comfy/text_encoders/lumina2.py index 674461b75..fd986e2c1 100644 --- a/comfy/text_encoders/lumina2.py +++ b/comfy/text_encoders/lumina2.py @@ -11,23 +11,41 @@ class Gemma2BTokenizer(sd1_clip.SDTokenizer): def state_dict(self): return {"spiece_model": self.tokenizer.serialize_model()} +class Gemma3_4BTokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + tokenizer = tokenizer_data.get("spiece_model", None) + super().__init__(tokenizer, pad_with_end=False, embedding_size=2560, embedding_key='gemma3_4b', tokenizer_class=SPieceTokenizer, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_args={"add_bos": True, "add_eos": False}, tokenizer_data=tokenizer_data) + + def state_dict(self): + return {"spiece_model": self.tokenizer.serialize_model()} class LuminaTokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="gemma2_2b", tokenizer=Gemma2BTokenizer) +class NTokenizer(sd1_clip.SD1Tokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="gemma3_4b", tokenizer=Gemma3_4BTokenizer) class Gemma2_2BModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="hidden", layer_idx=-2, dtype=None, attention_mask=True, model_options={}): super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"start": 2, "pad": 0}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Gemma2_2B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) +class Gemma3_4BModel(sd1_clip.SDClipModel): + def __init__(self, device="cpu", layer="hidden", layer_idx=-2, dtype=None, attention_mask=True, model_options={}): + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"start": 2, "pad": 0}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Gemma3_4B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) class LuminaModel(sd1_clip.SD1ClipModel): - def __init__(self, device="cpu", dtype=None, model_options={}): - super().__init__(device=device, dtype=dtype, name="gemma2_2b", clip_model=Gemma2_2BModel, model_options=model_options) + def __init__(self, device="cpu", dtype=None, model_options={}, name="gemma2_2b", clip_model=Gemma2_2BModel): + super().__init__(device=device, dtype=dtype, name=name, clip_model=clip_model, model_options=model_options) -def te(dtype_llama=None, llama_scaled_fp8=None): +def te(dtype_llama=None, llama_scaled_fp8=None, model_type="gemma2_2b"): + if model_type == "gemma2_2b": + model = Gemma2_2BModel + elif model_type == "gemma3_4b": + model = Gemma3_4BModel + class LuminaTEModel_(LuminaModel): def __init__(self, device="cpu", dtype=None, model_options={}): if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: @@ -35,5 +53,5 @@ def te(dtype_llama=None, llama_scaled_fp8=None): model_options["scaled_fp8"] = llama_scaled_fp8 if dtype_llama is not None: dtype = dtype_llama - super().__init__(device=device, dtype=dtype, model_options=model_options) + super().__init__(device=device, dtype=dtype, name=model_type, model_options=model_options, clip_model=model) return LuminaTEModel_ From fc34c3d1125e970699dcb311323839ed6dda4985 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 7 Oct 2025 23:15:32 +0300 Subject: [PATCH 0716/1073] fix(ReCraft-API-node): allow custom multipart parser to return FormData (#10244) --- comfy_api_nodes/apis/client.py | 17 ++++++++++------- comfy_api_nodes/nodes_recraft.py | 28 ++++++++++++++++++++++------ 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 79de3c262..36560c9e3 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -220,13 +220,16 @@ class ApiClient: if multipart_parser and data: data = multipart_parser(data) - form = aiohttp.FormData(default_to_multipart=True) - if data: # regular text fields - for k, v in data.items(): - if v is None: - continue # aiohttp fails to serialize "None" values - # aiohttp expects strings or bytes; convert enums etc. - form.add_field(k, str(v) if not isinstance(v, (bytes, bytearray)) else v) + if isinstance(data, aiohttp.FormData): + form = data # If the parser already returned a FormData, pass it through + else: + form = aiohttp.FormData(default_to_multipart=True) + if data: # regular text fields + for k, v in data.items(): + if v is None: + continue # aiohttp fails to serialize "None" values + # aiohttp expects strings or bytes; convert enums etc. + form.add_field(k, str(v) if not isinstance(v, (bytes, bytearray)) else v) if files: file_iter = files if isinstance(files, list) else files.items() diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py index 0bbb551b8..8beed5675 100644 --- a/comfy_api_nodes/nodes_recraft.py +++ b/comfy_api_nodes/nodes_recraft.py @@ -35,6 +35,7 @@ from server import PromptServer import torch from io import BytesIO from PIL import UnidentifiedImageError +import aiohttp async def handle_recraft_file_request( @@ -82,10 +83,16 @@ async def handle_recraft_file_request( return all_bytesio -def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, converted_to_check: list[list]=None, is_list=False) -> dict: +def recraft_multipart_parser( + data, + parent_key=None, + formatter: callable = None, + converted_to_check: list[list] = None, + is_list: bool = False, + return_mode: str = "formdata" # "dict" | "formdata" +) -> dict | aiohttp.FormData: """ - Formats data such that multipart/form-data will work with requests library - when both files and data are present. + Formats data such that multipart/form-data will work with aiohttp library when both files and data are present. The OpenAI client that Recraft uses has a bizarre way of serializing lists: @@ -103,19 +110,19 @@ def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, co # Modification of a function that handled a different type of multipart parsing, big ups: # https://gist.github.com/kazqvaizer/4cebebe5db654a414132809f9f88067b - def handle_converted_lists(data, parent_key, lists_to_check=tuple[list]): + def handle_converted_lists(item, parent_key, lists_to_check=tuple[list]): # if list already exists exists, just extend list with data for check_list in lists_to_check: for conv_tuple in check_list: if conv_tuple[0] == parent_key and isinstance(conv_tuple[1], list): - conv_tuple[1].append(formatter(data)) + conv_tuple[1].append(formatter(item)) return True return False if converted_to_check is None: converted_to_check = [] - + effective_mode = return_mode if parent_key is None else "dict" if formatter is None: formatter = lambda v: v # Multipart representation of value @@ -145,6 +152,15 @@ def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, co else: converted.append((current_key, formatter(value))) + if effective_mode == "formdata": + fd = aiohttp.FormData() + for k, v in dict(converted).items(): + if isinstance(v, list): + for item in v: + fd.add_field(k, str(item)) + else: + fd.add_field(k, str(v)) + return fd return dict(converted) From 9e984c48bc6a1d1c82231c46542995dbf5a265d7 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 8 Oct 2025 00:11:37 +0300 Subject: [PATCH 0717/1073] feat(api-nodes): add Sora2 API node (#10249) --- comfy_api_nodes/apinode_utils.py | 23 +++- comfy_api_nodes/nodes_sora.py | 175 +++++++++++++++++++++++++++++++ nodes.py | 1 + 3 files changed, 194 insertions(+), 5 deletions(-) create mode 100644 comfy_api_nodes/nodes_sora.py diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index 5ac3b92aa..571c74286 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -18,7 +18,7 @@ from comfy_api_nodes.apis.client import ( UploadResponse, ) from server import PromptServer - +from comfy.cli_args import args import numpy as np from PIL import Image @@ -30,7 +30,9 @@ from io import BytesIO import av -async def download_url_to_video_output(video_url: str, timeout: int = None) -> VideoFromFile: +async def download_url_to_video_output( + video_url: str, timeout: int = None, auth_kwargs: Optional[dict[str, str]] = None +) -> VideoFromFile: """Downloads a video from a URL and returns a `VIDEO` output. Args: @@ -39,7 +41,7 @@ async def download_url_to_video_output(video_url: str, timeout: int = None) -> V Returns: A Comfy node `VIDEO` output. """ - video_io = await download_url_to_bytesio(video_url, timeout) + video_io = await download_url_to_bytesio(video_url, timeout, auth_kwargs=auth_kwargs) if video_io is None: error_msg = f"Failed to download video from {video_url}" logging.error(error_msg) @@ -164,7 +166,9 @@ def mimetype_to_extension(mime_type: str) -> str: return mime_type.split("/")[-1].lower() -async def download_url_to_bytesio(url: str, timeout: int = None) -> BytesIO: +async def download_url_to_bytesio( + url: str, timeout: int = None, auth_kwargs: Optional[dict[str, str]] = None +) -> BytesIO: """Downloads content from a URL using requests and returns it as BytesIO. Args: @@ -174,9 +178,18 @@ async def download_url_to_bytesio(url: str, timeout: int = None) -> BytesIO: Returns: BytesIO object containing the downloaded content. """ + headers = {} + if url.startswith("/proxy/"): + url = str(args.comfy_api_base).rstrip("/") + url + auth_token = auth_kwargs.get("auth_token") + comfy_api_key = auth_kwargs.get("comfy_api_key") + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + elif comfy_api_key: + headers["X-API-KEY"] = comfy_api_key timeout_cfg = aiohttp.ClientTimeout(total=timeout) if timeout else None async with aiohttp.ClientSession(timeout=timeout_cfg) as session: - async with session.get(url) as resp: + async with session.get(url, headers=headers) as resp: resp.raise_for_status() # Raises HTTPError for bad responses (4XX or 5XX) return BytesIO(await resp.read()) diff --git a/comfy_api_nodes/nodes_sora.py b/comfy_api_nodes/nodes_sora.py new file mode 100644 index 000000000..2d532d637 --- /dev/null +++ b/comfy_api_nodes/nodes_sora.py @@ -0,0 +1,175 @@ +from typing import Optional +from typing_extensions import override + +import torch +from pydantic import BaseModel, Field +from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api_nodes.apis.client import ( + ApiEndpoint, + HttpMethod, + SynchronousOperation, + PollingOperation, + EmptyRequest, +) +from comfy_api_nodes.util.validation_utils import get_number_of_images + +from comfy_api_nodes.apinode_utils import ( + download_url_to_video_output, + tensor_to_bytesio, +) + +class Sora2GenerationRequest(BaseModel): + prompt: str = Field(...) + model: str = Field(...) + seconds: str = Field(...) + size: str = Field(...) + + +class Sora2GenerationResponse(BaseModel): + id: str = Field(...) + error: Optional[dict] = Field(None) + status: Optional[str] = Field(None) + + +class OpenAIVideoSora2(comfy_io.ComfyNode): + @classmethod + def define_schema(cls): + return comfy_io.Schema( + node_id="OpenAIVideoSora2", + display_name="OpenAI Sora - Video", + category="api node/video/Sora", + description="OpenAI video and audio generation.", + inputs=[ + comfy_io.Combo.Input( + "model", + options=["sora-2", "sora-2-pro"], + default="sora-2", + ), + comfy_io.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Guiding text; may be empty if an input image is present.", + ), + comfy_io.Combo.Input( + "size", + options=[ + "720x1280", + "1280x720", + "1024x1792", + "1792x1024", + ], + default="1280x720", + ), + comfy_io.Combo.Input( + "duration", + options=[4, 8, 12], + default=8, + ), + comfy_io.Image.Input( + "image", + optional=True, + ), + comfy_io.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + step=1, + display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, + optional=True, + tooltip="Seed to determine if node should re-run; " + "actual results are nondeterministic regardless of seed.", + ), + ], + outputs=[ + comfy_io.Video.Output(), + ], + hidden=[ + comfy_io.Hidden.auth_token_comfy_org, + comfy_io.Hidden.api_key_comfy_org, + comfy_io.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + prompt: str, + size: str = "1280x720", + duration: int = 8, + seed: int = 0, + image: Optional[torch.Tensor] = None, + ): + if model == "sora-2" and size not in ("720x1280", "1280x720"): + raise ValueError("Invalid size for sora-2 model, only 720x1280 and 1280x720 are supported.") + files_input = None + if image is not None: + if get_number_of_images(image) != 1: + raise ValueError("Currently only one input image is supported.") + files_input = {"input_reference": ("image.png", tensor_to_bytesio(image), "image/png")} + auth = { + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + } + payload = Sora2GenerationRequest( + model=model, + prompt=prompt, + seconds=str(duration), + size=size, + ) + initial_operation = SynchronousOperation( + endpoint=ApiEndpoint( + path="/proxy/openai/v1/videos", + method=HttpMethod.POST, + request_model=Sora2GenerationRequest, + response_model=Sora2GenerationResponse + ), + request=payload, + files=files_input, + auth_kwargs=auth, + content_type="multipart/form-data", + ) + initial_response = await initial_operation.execute() + if initial_response.error: + raise Exception(initial_response.error.message) + + model_time_multiplier = 1 if model == "sora-2" else 2 + poll_operation = PollingOperation( + poll_endpoint=ApiEndpoint( + path=f"/proxy/openai/v1/videos/{initial_response.id}", + method=HttpMethod.GET, + request_model=EmptyRequest, + response_model=Sora2GenerationResponse + ), + completed_statuses=["completed"], + failed_statuses=["failed"], + status_extractor=lambda x: x.status, + auth_kwargs=auth, + poll_interval=8.0, + max_poll_attempts=160, + node_id=cls.hidden.unique_id, + estimated_duration=45 * (duration / 4) * model_time_multiplier, + ) + await poll_operation.execute() + return comfy_io.NodeOutput( + await download_url_to_video_output( + f"/proxy/openai/v1/videos/{initial_response.id}/content", + auth_kwargs=auth, + ) + ) + + +class OpenAISoraExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + return [ + OpenAIVideoSora2, + ] + + +async def comfy_entrypoint() -> OpenAISoraExtension: + return OpenAISoraExtension() diff --git a/nodes.py b/nodes.py index 88d712993..2a2a5f2ad 100644 --- a/nodes.py +++ b/nodes.py @@ -2357,6 +2357,7 @@ async def init_builtin_api_nodes(): "nodes_stability.py", "nodes_pika.py", "nodes_runway.py", + "nodes_sora.py", "nodes_tripo.py", "nodes_moonvalley.py", "nodes_rodin.py", From 8a15568f10c0622a7281c32fadffc51511e53c10 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 7 Oct 2025 16:55:23 -0700 Subject: [PATCH 0718/1073] Temp fix for LTXV custom nodes. (#10251) --- comfy_extras/nodes_lt.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/comfy_extras/nodes_lt.py b/comfy_extras/nodes_lt.py index b51d15804..50da5f4eb 100644 --- a/comfy_extras/nodes_lt.py +++ b/comfy_extras/nodes_lt.py @@ -34,6 +34,7 @@ class EmptyLTXVLatentVideo(io.ComfyNode): latent = torch.zeros([batch_size, 128, ((length - 1) // 8) + 1, height // 32, width // 32], device=comfy.model_management.intermediate_device()) return io.NodeOutput({"samples": latent}) + generate = execute # TODO: remove class LTXVImgToVideo(io.ComfyNode): @classmethod @@ -77,6 +78,8 @@ class LTXVImgToVideo(io.ComfyNode): return io.NodeOutput(positive, negative, {"samples": latent, "noise_mask": conditioning_latent_frames_mask}) + generate = execute # TODO: remove + def conditioning_get_any_value(conditioning, key, default=None): for t in conditioning: @@ -264,6 +267,8 @@ class LTXVAddGuide(io.ComfyNode): return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask}) + generate = execute # TODO: remove + class LTXVCropGuides(io.ComfyNode): @classmethod @@ -300,6 +305,8 @@ class LTXVCropGuides(io.ComfyNode): return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask}) + crop = execute # TODO: remove + class LTXVConditioning(io.ComfyNode): @classmethod @@ -498,6 +505,7 @@ class LTXVPreprocess(io.ComfyNode): output_images.append(preprocess(image[i], img_compression)) return io.NodeOutput(torch.stack(output_images)) + preprocess = execute # TODO: remove class LtxvExtension(ComfyExtension): @override From 19f595b788bd227004a5f7232f3b5895b46411ea Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Wed, 8 Oct 2025 11:54:00 +1100 Subject: [PATCH 0719/1073] Bump frontend to 1.27.10 (#10252) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index db0486960..85b3bb63b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.27.7 +comfyui-frontend-package==1.27.10 comfyui-workflow-templates==0.1.93 comfyui-embedded-docs==0.2.6 torch From 51697d50dc94005b1c279eb0cf45207697946020 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 8 Oct 2025 10:48:51 +0800 Subject: [PATCH 0720/1073] update template to 0.1.94 (#10253) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 85b3bb63b..d4594df39 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.27.10 -comfyui-workflow-templates==0.1.93 +comfyui-workflow-templates==0.1.94 comfyui-embedded-docs==0.2.6 torch torchsde From 637221995f7424a561bd825de3e61ea117dfe1e3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 8 Oct 2025 00:53:43 -0400 Subject: [PATCH 0721/1073] ComfyUI version 0.3.64 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index c3257d4bf..da5cde02d 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.63" +__version__ = "0.3.64" diff --git a/pyproject.toml b/pyproject.toml index abd1a5f5c..6ea839336 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.63" +version = "0.3.64" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 3e0eb8d33f9a65f2a01430f1b4a1535348af881c Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 8 Oct 2025 10:14:04 +0300 Subject: [PATCH 0722/1073] feat(V3-io): allow Enum classes for Combo options (#10237) --- comfy_api/latest/_io.py | 24 ++++++++++++++++----- comfy_api_nodes/nodes_bytedance.py | 16 +++++++------- comfy_api_nodes/nodes_kling.py | 20 +++++++++--------- comfy_api_nodes/nodes_luma.py | 18 ++++++++-------- comfy_api_nodes/nodes_pika.py | 6 +++--- comfy_api_nodes/nodes_pixverse.py | 22 +++++++++---------- comfy_api_nodes/nodes_runway.py | 12 +++++------ comfy_api_nodes/nodes_stability.py | 10 ++++----- comfy_api_nodes/nodes_vidu.py | 34 +++++++++++++++--------------- 9 files changed, 88 insertions(+), 74 deletions(-) diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 2d95cffd6..661309f19 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -336,11 +336,25 @@ class Combo(ComfyTypeIO): class Input(WidgetInput): """Combo input (dropdown).""" Type = str - def __init__(self, id: str, options: list[str]=None, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, - default: str=None, control_after_generate: bool=None, - upload: UploadType=None, image_folder: FolderType=None, - remote: RemoteOptions=None, - socketless: bool=None): + def __init__( + self, + id: str, + options: list[str] | list[int] | type[Enum] = None, + display_name: str=None, + optional=False, + tooltip: str=None, + lazy: bool=None, + default: str | int | Enum = None, + control_after_generate: bool=None, + upload: UploadType=None, + image_folder: FolderType=None, + remote: RemoteOptions=None, + socketless: bool=None, + ): + if isinstance(options, type) and issubclass(options, Enum): + options = [v.value for v in options] + if isinstance(default, Enum): + default = default.value super().__init__(id, display_name, optional, tooltip, lazy, default, socketless) self.multiselect = False self.options = options diff --git a/comfy_api_nodes/nodes_bytedance.py b/comfy_api_nodes/nodes_bytedance.py index 654d6a362..fcb01820c 100644 --- a/comfy_api_nodes/nodes_bytedance.py +++ b/comfy_api_nodes/nodes_bytedance.py @@ -249,8 +249,8 @@ class ByteDanceImageNode(comfy_io.ComfyNode): inputs=[ comfy_io.Combo.Input( "model", - options=[model.value for model in Text2ImageModelName], - default=Text2ImageModelName.seedream_3.value, + options=Text2ImageModelName, + default=Text2ImageModelName.seedream_3, tooltip="Model name", ), comfy_io.String.Input( @@ -382,8 +382,8 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode): inputs=[ comfy_io.Combo.Input( "model", - options=[model.value for model in Image2ImageModelName], - default=Image2ImageModelName.seededit_3.value, + options=Image2ImageModelName, + default=Image2ImageModelName.seededit_3, tooltip="Model name", ), comfy_io.Image.Input( @@ -676,8 +676,8 @@ class ByteDanceTextToVideoNode(comfy_io.ComfyNode): inputs=[ comfy_io.Combo.Input( "model", - options=[model.value for model in Text2VideoModelName], - default=Text2VideoModelName.seedance_1_pro.value, + options=Text2VideoModelName, + default=Text2VideoModelName.seedance_1_pro, tooltip="Model name", ), comfy_io.String.Input( @@ -793,8 +793,8 @@ class ByteDanceImageToVideoNode(comfy_io.ComfyNode): inputs=[ comfy_io.Combo.Input( "model", - options=[model.value for model in Image2VideoModelName], - default=Image2VideoModelName.seedance_1_pro.value, + options=Image2VideoModelName, + default=Image2VideoModelName.seedance_1_pro, tooltip="Model name", ), comfy_io.String.Input( diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 457b43451..fe5b8562d 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -647,7 +647,7 @@ class KlingCameraControls(comfy_io.ComfyNode): category="api node/video/Kling", description="Allows specifying configuration options for Kling Camera Controls and motion control effects.", inputs=[ - comfy_io.Combo.Input("camera_control_type", options=[i.value for i in KlingCameraControlType]), + comfy_io.Combo.Input("camera_control_type", options=KlingCameraControlType), comfy_io.Float.Input( "horizontal_movement", default=0.0, @@ -772,7 +772,7 @@ class KlingTextToVideoNode(comfy_io.ComfyNode): comfy_io.Float.Input("cfg_scale", default=1.0, min=0.0, max=1.0), comfy_io.Combo.Input( "aspect_ratio", - options=[i.value for i in KlingVideoGenAspectRatio], + options=KlingVideoGenAspectRatio, default="16:9", ), comfy_io.Combo.Input( @@ -840,7 +840,7 @@ class KlingCameraControlT2VNode(comfy_io.ComfyNode): comfy_io.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0), comfy_io.Combo.Input( "aspect_ratio", - options=[i.value for i in KlingVideoGenAspectRatio], + options=KlingVideoGenAspectRatio, default="16:9", ), comfy_io.Custom("CAMERA_CONTROL").Input( @@ -903,17 +903,17 @@ class KlingImage2VideoNode(comfy_io.ComfyNode): comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), comfy_io.Combo.Input( "model_name", - options=[i.value for i in KlingVideoGenModelName], + options=KlingVideoGenModelName, default="kling-v2-master", ), comfy_io.Float.Input("cfg_scale", default=0.8, min=0.0, max=1.0), - comfy_io.Combo.Input("mode", options=[i.value for i in KlingVideoGenMode], default="std"), + comfy_io.Combo.Input("mode", options=KlingVideoGenMode, default=KlingVideoGenMode.std), comfy_io.Combo.Input( "aspect_ratio", - options=[i.value for i in KlingVideoGenAspectRatio], - default="16:9", + options=KlingVideoGenAspectRatio, + default=KlingVideoGenAspectRatio.field_16_9, ), - comfy_io.Combo.Input("duration", options=[i.value for i in KlingVideoGenDuration], default="5"), + comfy_io.Combo.Input("duration", options=KlingVideoGenDuration, default=KlingVideoGenDuration.field_5), ], outputs=[ comfy_io.Video.Output(), @@ -984,8 +984,8 @@ class KlingCameraControlI2VNode(comfy_io.ComfyNode): comfy_io.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0), comfy_io.Combo.Input( "aspect_ratio", - options=[i.value for i in KlingVideoGenAspectRatio], - default="16:9", + options=KlingVideoGenAspectRatio, + default=KlingVideoGenAspectRatio.field_16_9, ), comfy_io.Custom("CAMERA_CONTROL").Input( "camera_control", diff --git a/comfy_api_nodes/nodes_luma.py b/comfy_api_nodes/nodes_luma.py index 9cd02ffd2..9cab2ca82 100644 --- a/comfy_api_nodes/nodes_luma.py +++ b/comfy_api_nodes/nodes_luma.py @@ -181,11 +181,11 @@ class LumaImageGenerationNode(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "model", - options=[model.value for model in LumaImageModel], + options=LumaImageModel, ), comfy_io.Combo.Input( "aspect_ratio", - options=[ratio.value for ratio in LumaAspectRatio], + options=LumaAspectRatio, default=LumaAspectRatio.ratio_16_9, ), comfy_io.Int.Input( @@ -366,7 +366,7 @@ class LumaImageModifyNode(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "model", - options=[model.value for model in LumaImageModel], + options=LumaImageModel, ), comfy_io.Int.Input( "seed", @@ -466,21 +466,21 @@ class LumaTextToVideoGenerationNode(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "model", - options=[model.value for model in LumaVideoModel], + options=LumaVideoModel, ), comfy_io.Combo.Input( "aspect_ratio", - options=[ratio.value for ratio in LumaAspectRatio], + options=LumaAspectRatio, default=LumaAspectRatio.ratio_16_9, ), comfy_io.Combo.Input( "resolution", - options=[resolution.value for resolution in LumaVideoOutputResolution], + options=LumaVideoOutputResolution, default=LumaVideoOutputResolution.res_540p, ), comfy_io.Combo.Input( "duration", - options=[dur.value for dur in LumaVideoModelOutputDuration], + options=LumaVideoModelOutputDuration, ), comfy_io.Boolean.Input( "loop", @@ -595,7 +595,7 @@ class LumaImageToVideoGenerationNode(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "model", - options=[model.value for model in LumaVideoModel], + options=LumaVideoModel, ), # comfy_io.Combo.Input( # "aspect_ratio", @@ -604,7 +604,7 @@ class LumaImageToVideoGenerationNode(comfy_io.ComfyNode): # ), comfy_io.Combo.Input( "resolution", - options=[resolution.value for resolution in LumaVideoOutputResolution], + options=LumaVideoOutputResolution, default=LumaVideoOutputResolution.res_540p, ), comfy_io.Combo.Input( diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py index 0a9f04cc2..35d6baf1c 100644 --- a/comfy_api_nodes/nodes_pika.py +++ b/comfy_api_nodes/nodes_pika.py @@ -174,10 +174,10 @@ def get_base_inputs_types() -> list[comfy_io.Input]: comfy_io.String.Input("negative_prompt", multiline=True), comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), comfy_io.Combo.Input( - "resolution", options=[resolution.value for resolution in PikaResolutionEnum], default="1080p" + "resolution", options=PikaResolutionEnum, default=PikaResolutionEnum.field_1080p ), comfy_io.Combo.Input( - "duration", options=[duration.value for duration in PikaDurationEnum], default=5 + "duration", options=PikaDurationEnum, default=PikaDurationEnum.integer_5 ), ] @@ -616,7 +616,7 @@ class PikaffectsNode(comfy_io.ComfyNode): inputs=[ comfy_io.Image.Input("image", tooltip="The reference image to apply the Pikaffect to."), comfy_io.Combo.Input( - "pikaffect", options=[pikaffect.value for pikaffect in Pikaffect], default="Cake-ify" + "pikaffect", options=Pikaffect, default="Cake-ify" ), comfy_io.String.Input("prompt_text", multiline=True), comfy_io.String.Input("negative_prompt", multiline=True), diff --git a/comfy_api_nodes/nodes_pixverse.py b/comfy_api_nodes/nodes_pixverse.py index 2c91bbc65..a97610f06 100644 --- a/comfy_api_nodes/nodes_pixverse.py +++ b/comfy_api_nodes/nodes_pixverse.py @@ -85,7 +85,7 @@ class PixverseTemplateNode(comfy_io.ComfyNode): display_name="PixVerse Template", category="api node/video/PixVerse", inputs=[ - comfy_io.Combo.Input("template", options=[list(pixverse_templates.keys())]), + comfy_io.Combo.Input("template", options=list(pixverse_templates.keys())), ], outputs=[comfy_io.Custom(PixverseIO.TEMPLATE).Output(display_name="pixverse_template")], ) @@ -120,20 +120,20 @@ class PixverseTextToVideoNode(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "aspect_ratio", - options=[ratio.value for ratio in PixverseAspectRatio], + options=PixverseAspectRatio, ), comfy_io.Combo.Input( "quality", - options=[resolution.value for resolution in PixverseQuality], + options=PixverseQuality, default=PixverseQuality.res_540p, ), comfy_io.Combo.Input( "duration_seconds", - options=[dur.value for dur in PixverseDuration], + options=PixverseDuration, ), comfy_io.Combo.Input( "motion_mode", - options=[mode.value for mode in PixverseMotionMode], + options=PixverseMotionMode, ), comfy_io.Int.Input( "seed", @@ -262,16 +262,16 @@ class PixverseImageToVideoNode(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "quality", - options=[resolution.value for resolution in PixverseQuality], + options=PixverseQuality, default=PixverseQuality.res_540p, ), comfy_io.Combo.Input( "duration_seconds", - options=[dur.value for dur in PixverseDuration], + options=PixverseDuration, ), comfy_io.Combo.Input( "motion_mode", - options=[mode.value for mode in PixverseMotionMode], + options=PixverseMotionMode, ), comfy_io.Int.Input( "seed", @@ -403,16 +403,16 @@ class PixverseTransitionVideoNode(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "quality", - options=[resolution.value for resolution in PixverseQuality], + options=PixverseQuality, default=PixverseQuality.res_540p, ), comfy_io.Combo.Input( "duration_seconds", - options=[dur.value for dur in PixverseDuration], + options=PixverseDuration, ), comfy_io.Combo.Input( "motion_mode", - options=[mode.value for mode in PixverseMotionMode], + options=PixverseMotionMode, ), comfy_io.Int.Input( "seed", diff --git a/comfy_api_nodes/nodes_runway.py b/comfy_api_nodes/nodes_runway.py index 27b2bf748..ea22692cb 100644 --- a/comfy_api_nodes/nodes_runway.py +++ b/comfy_api_nodes/nodes_runway.py @@ -200,11 +200,11 @@ class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "duration", - options=[model.value for model in Duration], + options=Duration, ), comfy_io.Combo.Input( "ratio", - options=[model.value for model in RunwayGen3aAspectRatio], + options=RunwayGen3aAspectRatio, ), comfy_io.Int.Input( "seed", @@ -300,11 +300,11 @@ class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "duration", - options=[model.value for model in Duration], + options=Duration, ), comfy_io.Combo.Input( "ratio", - options=[model.value for model in RunwayGen4TurboAspectRatio], + options=RunwayGen4TurboAspectRatio, ), comfy_io.Int.Input( "seed", @@ -408,11 +408,11 @@ class RunwayFirstLastFrameNode(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "duration", - options=[model.value for model in Duration], + options=Duration, ), comfy_io.Combo.Input( "ratio", - options=[model.value for model in RunwayGen3aAspectRatio], + options=RunwayGen3aAspectRatio, ), comfy_io.Int.Input( "seed", diff --git a/comfy_api_nodes/nodes_stability.py b/comfy_api_nodes/nodes_stability.py index 5ba5ed986..bfb67fc9d 100644 --- a/comfy_api_nodes/nodes_stability.py +++ b/comfy_api_nodes/nodes_stability.py @@ -82,8 +82,8 @@ class StabilityStableImageUltraNode(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "aspect_ratio", - options=[x.value for x in StabilityAspectRatio], - default=StabilityAspectRatio.ratio_1_1.value, + options=StabilityAspectRatio, + default=StabilityAspectRatio.ratio_1_1, tooltip="Aspect ratio of generated image.", ), comfy_io.Combo.Input( @@ -217,12 +217,12 @@ class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "model", - options=[x.value for x in Stability_SD3_5_Model], + options=Stability_SD3_5_Model, ), comfy_io.Combo.Input( "aspect_ratio", - options=[x.value for x in StabilityAspectRatio], - default=StabilityAspectRatio.ratio_1_1.value, + options=StabilityAspectRatio, + default=StabilityAspectRatio.ratio_1_1, tooltip="Aspect ratio of generated image.", ), comfy_io.Combo.Input( diff --git a/comfy_api_nodes/nodes_vidu.py b/comfy_api_nodes/nodes_vidu.py index 2f441948c..ac28b683c 100644 --- a/comfy_api_nodes/nodes_vidu.py +++ b/comfy_api_nodes/nodes_vidu.py @@ -173,8 +173,8 @@ class ViduTextToVideoNode(comfy_io.ComfyNode): inputs=[ comfy_io.Combo.Input( "model", - options=[model.value for model in VideoModelName], - default=VideoModelName.vidu_q1.value, + options=VideoModelName, + default=VideoModelName.vidu_q1, tooltip="Model name", ), comfy_io.String.Input( @@ -205,22 +205,22 @@ class ViduTextToVideoNode(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "aspect_ratio", - options=[model.value for model in AspectRatio], - default=AspectRatio.r_16_9.value, + options=AspectRatio, + default=AspectRatio.r_16_9, tooltip="The aspect ratio of the output video", optional=True, ), comfy_io.Combo.Input( "resolution", - options=[model.value for model in Resolution], - default=Resolution.r_1080p.value, + options=Resolution, + default=Resolution.r_1080p, tooltip="Supported values may vary by model & duration", optional=True, ), comfy_io.Combo.Input( "movement_amplitude", - options=[model.value for model in MovementAmplitude], - default=MovementAmplitude.auto.value, + options=MovementAmplitude, + default=MovementAmplitude.auto, tooltip="The movement amplitude of objects in the frame", optional=True, ), @@ -278,8 +278,8 @@ class ViduImageToVideoNode(comfy_io.ComfyNode): inputs=[ comfy_io.Combo.Input( "model", - options=[model.value for model in VideoModelName], - default=VideoModelName.vidu_q1.value, + options=VideoModelName, + default=VideoModelName.vidu_q1, tooltip="Model name", ), comfy_io.Image.Input( @@ -316,14 +316,14 @@ class ViduImageToVideoNode(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "resolution", - options=[model.value for model in Resolution], - default=Resolution.r_1080p.value, + options=Resolution, + default=Resolution.r_1080p, tooltip="Supported values may vary by model & duration", optional=True, ), comfy_io.Combo.Input( "movement_amplitude", - options=[model.value for model in MovementAmplitude], + options=MovementAmplitude, default=MovementAmplitude.auto.value, tooltip="The movement amplitude of objects in the frame", optional=True, @@ -388,8 +388,8 @@ class ViduReferenceVideoNode(comfy_io.ComfyNode): inputs=[ comfy_io.Combo.Input( "model", - options=[model.value for model in VideoModelName], - default=VideoModelName.vidu_q1.value, + options=VideoModelName, + default=VideoModelName.vidu_q1, tooltip="Model name", ), comfy_io.Image.Input( @@ -424,8 +424,8 @@ class ViduReferenceVideoNode(comfy_io.ComfyNode): ), comfy_io.Combo.Input( "aspect_ratio", - options=[model.value for model in AspectRatio], - default=AspectRatio.r_16_9.value, + options=AspectRatio, + default=AspectRatio.r_16_9, tooltip="The aspect ratio of the output video", optional=True, ), From 6e59934089df3375e39db174340b6a937b226c83 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 8 Oct 2025 14:49:02 -0700 Subject: [PATCH 0723/1073] Refactor model sampling sigmas code. (#10250) --- comfy/model_sampling.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index b240b7f29..2a00ed819 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -21,17 +21,23 @@ def rescale_zero_terminal_snr_sigmas(sigmas): alphas_bar[-1] = 4.8973451890853435e-08 return ((1 - alphas_bar) / alphas_bar) ** 0.5 +def reshape_sigma(sigma, noise_dim): + if sigma.nelement() == 1: + return sigma.view(()) + else: + return sigma.view(sigma.shape[:1] + (1,) * (noise_dim - 1)) + class EPS: def calculate_input(self, sigma, noise): - sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) + sigma = reshape_sigma(sigma, noise.ndim) return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 def calculate_denoised(self, sigma, model_output, model_input): - sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + sigma = reshape_sigma(sigma, model_output.ndim) return model_input - model_output * sigma def noise_scaling(self, sigma, noise, latent_image, max_denoise=False): - sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) + sigma = reshape_sigma(sigma, noise.ndim) if max_denoise: noise = noise * torch.sqrt(1.0 + sigma ** 2.0) else: @@ -45,12 +51,12 @@ class EPS: class V_PREDICTION(EPS): def calculate_denoised(self, sigma, model_output, model_input): - sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + sigma = reshape_sigma(sigma, model_output.ndim) return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 class EDM(V_PREDICTION): def calculate_denoised(self, sigma, model_output, model_input): - sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + sigma = reshape_sigma(sigma, model_output.ndim) return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) + model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 class CONST: @@ -58,15 +64,15 @@ class CONST: return noise def calculate_denoised(self, sigma, model_output, model_input): - sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + sigma = reshape_sigma(sigma, model_output.ndim) return model_input - model_output * sigma def noise_scaling(self, sigma, noise, latent_image, max_denoise=False): - sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) + sigma = reshape_sigma(sigma, noise.ndim) return sigma * noise + (1.0 - sigma) * latent_image def inverse_noise_scaling(self, sigma, latent): - sigma = sigma.view(sigma.shape[:1] + (1,) * (latent.ndim - 1)) + sigma = reshape_sigma(sigma, latent.ndim) return latent / (1.0 - sigma) class X0(EPS): @@ -80,16 +86,16 @@ class IMG_TO_IMG(X0): class COSMOS_RFLOW: def calculate_input(self, sigma, noise): sigma = (sigma / (sigma + 1)) - sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) + sigma = reshape_sigma(sigma, noise.ndim) return noise * (1.0 - sigma) def calculate_denoised(self, sigma, model_output, model_input): sigma = (sigma / (sigma + 1)) - sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + sigma = reshape_sigma(sigma, model_output.ndim) return model_input * (1.0 - sigma) - model_output * sigma def noise_scaling(self, sigma, noise, latent_image, max_denoise=False): - sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) + sigma = reshape_sigma(sigma, noise.ndim) noise = noise * sigma noise += latent_image return noise From 72c2071972d3207ed92bc20535299c5f39622818 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Wed, 8 Oct 2025 17:30:41 -0700 Subject: [PATCH 0724/1073] Mvly/node update (#10042) * updated V2V node to allow for control image input exposing steps in v2v fixing guidance_scale as input parameter TODO: allow for motion_intensity as input param. * refactor: comment out unsupported resolution and adjust default values in video nodes * set control_after_generate * adding new defaults * fixes * changed control_after_generate back to True * changed control_after_generate back to False --------- Co-authored-by: thorsten --- comfy_api_nodes/nodes_moonvalley.py | 60 ++++++++++++++++++----------- 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 55471a69d..4a56d31f8 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -335,7 +335,7 @@ def parse_width_height_from_res(resolution: str): "1:1 (1152 x 1152)": {"width": 1152, "height": 1152}, "4:3 (1536 x 1152)": {"width": 1536, "height": 1152}, "3:4 (1152 x 1536)": {"width": 1152, "height": 1536}, - "21:9 (2560 x 1080)": {"width": 2560, "height": 1080}, + # "21:9 (2560 x 1080)": {"width": 2560, "height": 1080}, } return res_map.get(resolution, {"width": 1920, "height": 1080}) @@ -388,11 +388,11 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): "negative_prompt", multiline=True, default=" gopro, bright, contrast, static, overexposed, vignette, " - "artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, " - "flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, " - "cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, " - "blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, " - "wobbly, weird, low quality, plastic, stock footage, video camera, boring", + "artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, " + "flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, " + "cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, " + "blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, " + "wobbly, weird, low quality, plastic, stock footage, video camera, boring", tooltip="Negative prompt text", ), comfy_io.Combo.Input( @@ -403,14 +403,14 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): "1:1 (1152 x 1152)", "4:3 (1536 x 1152)", "3:4 (1152 x 1536)", - "21:9 (2560 x 1080)", + # "21:9 (2560 x 1080)", ], default="16:9 (1920 x 1080)", tooltip="Resolution of the output video", ), comfy_io.Float.Input( "prompt_adherence", - default=10.0, + default=4.5, min=1.0, max=20.0, step=1.0, @@ -424,10 +424,11 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): step=1, display_mode=comfy_io.NumberDisplay.number, tooltip="Random seed value", + control_after_generate=True, ), comfy_io.Int.Input( "steps", - default=100, + default=33, min=1, max=100, step=1, @@ -468,7 +469,6 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): steps=steps, seed=seed, guidance_scale=prompt_adherence, - num_frames=128, width=width_height["width"], height=width_height["height"], use_negative_prompts=True, @@ -526,11 +526,11 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): "negative_prompt", multiline=True, default=" gopro, bright, contrast, static, overexposed, vignette, " - "artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, " - "flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, " - "cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, " - "blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, " - "wobbly, weird, low quality, plastic, stock footage, video camera, boring", + "artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, " + "flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, " + "cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, " + "blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, " + "wobbly, weird, low quality, plastic, stock footage, video camera, boring", tooltip="Negative prompt text", ), comfy_io.Int.Input( @@ -546,7 +546,7 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): comfy_io.Video.Input( "video", tooltip="The reference video used to generate the output video. Must be at least 5 seconds long. " - "Videos longer than 5s will be automatically trimmed. Only MP4 format supported.", + "Videos longer than 5s will be automatically trimmed. Only MP4 format supported.", ), comfy_io.Combo.Input( "control_type", @@ -563,6 +563,15 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): tooltip="Only used if control_type is 'Motion Transfer'", optional=True, ), + comfy_io.Int.Input( + "steps", + default=33, + min=1, + max=100, + step=1, + display_mode=comfy_io.NumberDisplay.number, + tooltip="Number of inference steps", + ), ], outputs=[comfy_io.Video.Output()], hidden=[ @@ -582,6 +591,8 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): video: Optional[VideoInput] = None, control_type: str = "Motion Transfer", motion_intensity: Optional[int] = 100, + steps=33, + prompt_adherence=4.5, ) -> comfy_io.NodeOutput: auth = { "auth_token": cls.hidden.auth_token_comfy_org, @@ -602,6 +613,8 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): negative_prompt=negative_prompt, seed=seed, control_params=control_params, + steps=steps, + guidance_scale=prompt_adherence, ) control = parse_control_parameter(control_type) @@ -653,11 +666,11 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode): "negative_prompt", multiline=True, default=" gopro, bright, contrast, static, overexposed, vignette, " - "artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, " - "flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, " - "cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, " - "blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, " - "wobbly, weird, low quality, plastic, stock footage, video camera, boring", + "artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, " + "flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, " + "cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, " + "blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, " + "wobbly, weird, low quality, plastic, stock footage, video camera, boring", tooltip="Negative prompt text", ), comfy_io.Combo.Input( @@ -675,7 +688,7 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode): ), comfy_io.Float.Input( "prompt_adherence", - default=10.0, + default=4.0, min=1.0, max=20.0, step=1.0, @@ -688,11 +701,12 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode): max=4294967295, step=1, display_mode=comfy_io.NumberDisplay.number, + control_after_generate=True, tooltip="Random seed value", ), comfy_io.Int.Input( "steps", - default=100, + default=33, min=1, max=100, step=1, From 51fb505ffa7cdae113ef4303f9ef45a06d668a90 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 9 Oct 2025 09:06:56 +0300 Subject: [PATCH 0725/1073] feat(api-nodes, pylint): use lazy formatting in logging functions (#10248) --- comfy_api_nodes/apinode_utils.py | 2 +- comfy_api_nodes/apis/client.py | 55 +++++++++++++++----------- comfy_api_nodes/apis/request_logger.py | 6 +-- comfy_api_nodes/nodes_kling.py | 4 +- comfy_api_nodes/nodes_minimax.py | 2 +- comfy_api_nodes/nodes_moonvalley.py | 14 +++---- comfy_api_nodes/nodes_rodin.py | 10 ++--- comfy_api_nodes/nodes_veo2.py | 2 +- pyproject.toml | 1 - 9 files changed, 50 insertions(+), 46 deletions(-) diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index 571c74286..2e0dc4dc1 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -431,7 +431,7 @@ async def upload_video_to_comfyapi( f"Video duration ({actual_duration:.2f}s) exceeds the maximum allowed ({max_duration}s)." ) except Exception as e: - logging.error(f"Error getting video duration: {e}") + logging.error("Error getting video duration: %s", str(e)) raise ValueError(f"Could not verify video duration from source: {e}") from e upload_mime_type = f"video/{container.value.lower()}" diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index 36560c9e3..a3ceafbae 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -359,10 +359,10 @@ class ApiClient: if params: params = {k: v for k, v in params.items() if v is not None} # aiohttp fails to serialize None values - logging.debug(f"[DEBUG] Request Headers: {request_headers}") - logging.debug(f"[DEBUG] Files: {files}") - logging.debug(f"[DEBUG] Params: {params}") - logging.debug(f"[DEBUG] Data: {data}") + logging.debug("[DEBUG] Request Headers: %s", request_headers) + logging.debug("[DEBUG] Files: %s", files) + logging.debug("[DEBUG] Params: %s", params) + logging.debug("[DEBUG] Data: %s", data) if content_type == "application/x-www-form-urlencoded": payload_args = self._create_urlencoded_form_data_args(data or {}, request_headers) @@ -592,9 +592,9 @@ class ApiClient: error_message=f"HTTP Error {exc.status}", ) - logging.debug(f"[DEBUG] API Error: {user_friendly} (Status: {status_code})") + logging.debug("[DEBUG] API Error: %s (Status: %s)", user_friendly, status_code) if response_content: - logging.debug(f"[DEBUG] Response content: {response_content}") + logging.debug("[DEBUG] Response content: %s", response_content) # Retry if eligible if status_code in self.retry_status_codes and retry_count < self.max_retries: @@ -738,11 +738,9 @@ class SynchronousOperation(Generic[T, R]): if isinstance(v, Enum): request_dict[k] = v.value - logging.debug( - f"[DEBUG] API Request: {self.endpoint.method.value} {self.endpoint.path}" - ) - logging.debug(f"[DEBUG] Request Data: {json.dumps(request_dict, indent=2)}") - logging.debug(f"[DEBUG] Query Params: {self.endpoint.query_params}") + logging.debug("[DEBUG] API Request: %s %s", self.endpoint.method.value, self.endpoint.path) + logging.debug("[DEBUG] Request Data: %s", json.dumps(request_dict, indent=2)) + logging.debug("[DEBUG] Query Params: %s", self.endpoint.query_params) response_json = await client.request( self.endpoint.method.value, @@ -757,11 +755,11 @@ class SynchronousOperation(Generic[T, R]): logging.debug("=" * 50) logging.debug("[DEBUG] RESPONSE DETAILS:") logging.debug("[DEBUG] Status Code: 200 (Success)") - logging.debug(f"[DEBUG] Response Body: {json.dumps(response_json, indent=2)}") + logging.debug("[DEBUG] Response Body: %s", json.dumps(response_json, indent=2)) logging.debug("=" * 50) parsed_response = self.endpoint.response_model.model_validate(response_json) - logging.debug(f"[DEBUG] Parsed Response: {parsed_response}") + logging.debug("[DEBUG] Parsed Response: %s", parsed_response) return parsed_response finally: if owns_client: @@ -877,7 +875,7 @@ class PollingOperation(Generic[T, R]): status = TaskStatus.PENDING for poll_count in range(1, self.max_poll_attempts + 1): try: - logging.debug(f"[DEBUG] Polling attempt #{poll_count}") + logging.debug("[DEBUG] Polling attempt #%s", poll_count) request_dict = ( None if self.request is None else self.request.model_dump(exclude_none=True) @@ -885,10 +883,13 @@ class PollingOperation(Generic[T, R]): if poll_count == 1: logging.debug( - f"[DEBUG] Poll Request: {self.poll_endpoint.method.value} {self.poll_endpoint.path}" + "[DEBUG] Poll Request: %s %s", + self.poll_endpoint.method.value, + self.poll_endpoint.path, ) logging.debug( - f"[DEBUG] Poll Request Data: {json.dumps(request_dict, indent=2) if request_dict else 'None'}" + "[DEBUG] Poll Request Data: %s", + json.dumps(request_dict, indent=2) if request_dict else "None", ) # Query task status @@ -903,7 +904,7 @@ class PollingOperation(Generic[T, R]): # Check if task is complete status = self._check_task_status(response_obj) - logging.debug(f"[DEBUG] Task Status: {status}") + logging.debug("[DEBUG] Task Status: %s", status) # If progress extractor is provided, extract progress if self.progress_extractor: @@ -917,7 +918,7 @@ class PollingOperation(Generic[T, R]): result_url = self.result_url_extractor(response_obj) if result_url: message = f"Result URL: {result_url}" - logging.debug(f"[DEBUG] {message}") + logging.debug("[DEBUG] %s", message) self._display_text_on_node(message) self.final_response = response_obj if self.progress_extractor: @@ -925,7 +926,7 @@ class PollingOperation(Generic[T, R]): return self.final_response if status == TaskStatus.FAILED: message = f"Task failed: {json.dumps(resp)}" - logging.error(f"[DEBUG] {message}") + logging.error("[DEBUG] %s", message) raise Exception(message) logging.debug("[DEBUG] Task still pending, continuing to poll...") # Task pending – wait @@ -939,7 +940,12 @@ class PollingOperation(Generic[T, R]): raise Exception( f"Polling aborted after {consecutive_errors} network errors: {str(e)}" ) from e - logging.warning("Network error (%s/%s): %s", consecutive_errors, max_consecutive_errors, str(e)) + logging.warning( + "Network error (%s/%s): %s", + consecutive_errors, + max_consecutive_errors, + str(e), + ) await asyncio.sleep(self.poll_interval) except Exception as e: # For other errors, increment count and potentially abort @@ -949,10 +955,13 @@ class PollingOperation(Generic[T, R]): f"Polling aborted after {consecutive_errors} consecutive errors: {str(e)}" ) from e - logging.error(f"[DEBUG] Polling error: {str(e)}") + logging.error("[DEBUG] Polling error: %s", str(e)) logging.warning( - f"Error during polling (attempt {poll_count}/{self.max_poll_attempts}): {str(e)}. " - f"Will retry in {self.poll_interval} seconds." + "Error during polling (attempt %s/%s): %s. Will retry in %s seconds.", + poll_count, + self.max_poll_attempts, + str(e), + self.poll_interval, ) await asyncio.sleep(self.poll_interval) diff --git a/comfy_api_nodes/apis/request_logger.py b/comfy_api_nodes/apis/request_logger.py index 2e0ca5380..c6974d35c 100644 --- a/comfy_api_nodes/apis/request_logger.py +++ b/comfy_api_nodes/apis/request_logger.py @@ -21,7 +21,7 @@ def get_log_directory(): try: os.makedirs(log_dir, exist_ok=True) except Exception as e: - logger.error(f"Error creating API log directory {log_dir}: {e}") + logger.error("Error creating API log directory %s: %s", log_dir, str(e)) # Fallback to base temp directory if sub-directory creation fails return base_temp_dir return log_dir @@ -122,9 +122,9 @@ def log_request_response( try: with open(filepath, "w", encoding="utf-8") as f: f.write("\n".join(log_content)) - logger.debug(f"API log saved to: {filepath}") + logger.debug("API log saved to: %s", filepath) except Exception as e: - logger.error(f"Error writing API log to {filepath}: {e}") + logger.error("Error writing API log to %s: %s", filepath, str(e)) if __name__ == '__main__': diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index fe5b8562d..a3cd09786 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -296,7 +296,7 @@ def validate_video_result_response(response) -> None: """Validates that the Kling task result contains a video.""" if not is_valid_video_response(response): error_msg = f"Kling task {response.data.task_id} succeeded but no video data found in response." - logging.error(f"Error: {error_msg}.\nResponse: {response}") + logging.error("Error: %s.\nResponse: %s", error_msg, response) raise Exception(error_msg) @@ -304,7 +304,7 @@ def validate_image_result_response(response) -> None: """Validates that the Kling task result contains an image.""" if not is_valid_image_response(response): error_msg = f"Kling task {response.data.task_id} succeeded but no image data found in response." - logging.error(f"Error: {error_msg}.\nResponse: {response}") + logging.error("Error: %s.\nResponse: %s", error_msg, response) raise Exception(error_msg) diff --git a/comfy_api_nodes/nodes_minimax.py b/comfy_api_nodes/nodes_minimax.py index bf560661c..caa3d4260 100644 --- a/comfy_api_nodes/nodes_minimax.py +++ b/comfy_api_nodes/nodes_minimax.py @@ -500,7 +500,7 @@ class MinimaxHailuoVideoNode(comfy_io.ComfyNode): raise Exception( f"No video was found in the response. Full response: {file_result.model_dump()}" ) - logging.info(f"Generated video URL: {file_url}") + logging.info("Generated video URL: %s", file_url) if cls.hidden.unique_id: if hasattr(file_result.file, "backup_download_url"): message = f"Result URL: {file_url}\nBackup URL: {file_result.file.backup_download_url}" diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 4a56d31f8..77e4b536c 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -237,7 +237,7 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: audio_stream = None for stream in input_container.streams: - logging.info(f"Found stream: type={stream.type}, class={type(stream)}") + logging.info("Found stream: type=%s, class=%s", stream.type, type(stream)) if isinstance(stream, av.VideoStream): # Create output video stream with same parameters video_stream = output_container.add_stream( @@ -247,7 +247,7 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: video_stream.height = stream.height video_stream.pix_fmt = "yuv420p" logging.info( - f"Added video stream: {stream.width}x{stream.height} @ {stream.average_rate}fps" + "Added video stream: %sx%s @ %sfps", stream.width, stream.height, stream.average_rate ) elif isinstance(stream, av.AudioStream): # Create output audio stream with same parameters @@ -256,9 +256,7 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: ) audio_stream.sample_rate = stream.sample_rate audio_stream.layout = stream.layout - logging.info( - f"Added audio stream: {stream.sample_rate}Hz, {stream.channels} channels" - ) + logging.info("Added audio stream: %sHz, %s channels", stream.sample_rate, stream.channels) # Calculate target frame count that's divisible by 16 fps = input_container.streams.video[0].average_rate @@ -288,9 +286,7 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: for packet in video_stream.encode(): output_container.mux(packet) - logging.info( - f"Encoded {frame_count} video frames (target: {target_frames})" - ) + logging.info("Encoded %s video frames (target: %s)", frame_count, target_frames) # Decode and re-encode audio frames if audio_stream: @@ -308,7 +304,7 @@ def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: for packet in audio_stream.encode(): output_container.mux(packet) - logging.info(f"Encoded {audio_frame_count} audio frames") + logging.info("Encoded %s audio frames", audio_frame_count) # Close containers output_container.close() diff --git a/comfy_api_nodes/nodes_rodin.py b/comfy_api_nodes/nodes_rodin.py index bd758f762..0eb762a1c 100644 --- a/comfy_api_nodes/nodes_rodin.py +++ b/comfy_api_nodes/nodes_rodin.py @@ -172,16 +172,16 @@ async def create_generate_task( logging.info("[ Rodin3D API - Submit Jobs ] Submit Generate Task Success!") subscription_key = response.jobs.subscription_key task_uuid = response.uuid - logging.info(f"[ Rodin3D API - Submit Jobs ] UUID: {task_uuid}") + logging.info("[ Rodin3D API - Submit Jobs ] UUID: %s", task_uuid) return task_uuid, subscription_key def check_rodin_status(response: Rodin3DCheckStatusResponse) -> str: all_done = all(job.status == JobStatus.Done for job in response.jobs) status_list = [str(job.status) for job in response.jobs] - logging.info(f"[ Rodin3D API - CheckStatus ] Generate Status: {status_list}") + logging.info("[ Rodin3D API - CheckStatus ] Generate Status: %s", status_list) if any(job.status == JobStatus.Failed for job in response.jobs): - logging.error(f"[ Rodin3D API - CheckStatus ] Generate Failed: {status_list}, Please try again.") + logging.error("[ Rodin3D API - CheckStatus ] Generate Failed: %s, Please try again.", status_list) raise Exception("[ Rodin3D API ] Generate Failed, Please Try again.") if all_done: return "DONE" @@ -235,7 +235,7 @@ async def download_files(url_list, task_uuid): file_path = os.path.join(save_path, file_name) if file_path.endswith(".glb"): model_file_path = file_path - logging.info(f"[ Rodin3D API - download_files ] Downloading file: {file_path}") + logging.info("[ Rodin3D API - download_files ] Downloading file: %s", file_path) max_retries = 5 for attempt in range(max_retries): try: @@ -246,7 +246,7 @@ async def download_files(url_list, task_uuid): f.write(chunk) break except Exception as e: - logging.info(f"[ Rodin3D API - download_files ] Error downloading {file_path}:{e}") + logging.info("[ Rodin3D API - download_files ] Error downloading %s:%s", file_path, str(e)) if attempt < max_retries - 1: logging.info("Retrying...") await asyncio.sleep(2) diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index 251aecd42..9d5eced1e 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -215,7 +215,7 @@ class VeoVideoGenerationNode(comfy_io.ComfyNode): initial_response = await initial_operation.execute() operation_name = initial_response.name - logging.info(f"Veo generation started with operation name: {operation_name}") + logging.info("Veo generation started with operation name: %s", operation_name) # Define status extractor function def status_extractor(response): diff --git a/pyproject.toml b/pyproject.toml index 6ea839336..5dcc49a47 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,7 +61,6 @@ messages_control.disable = [ # next warnings should be fixed in future "bad-classmethod-argument", # Class method should have 'cls' as first argument "wrong-import-order", # Standard imports should be placed before third party imports - "logging-fstring-interpolation", # Use lazy % formatting in logging functions "ungrouped-imports", "unnecessary-pass", "unnecessary-lambda-assignment", From 2ba8d7cce8b6d78efa4b853ae8df187bb13061a3 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 9 Oct 2025 09:10:23 +0300 Subject: [PATCH 0726/1073] convert nodes_model_downscale.py to V3 schema (#10199) --- comfy_extras/nodes_model_downscale.py | 61 +++++++++++++++++---------- 1 file changed, 39 insertions(+), 22 deletions(-) diff --git a/comfy_extras/nodes_model_downscale.py b/comfy_extras/nodes_model_downscale.py index 49420dee9..f7ca9699d 100644 --- a/comfy_extras/nodes_model_downscale.py +++ b/comfy_extras/nodes_model_downscale.py @@ -1,24 +1,33 @@ +from typing_extensions import override import comfy.utils +from comfy_api.latest import ComfyExtension, io -class PatchModelAddDownscale: - upscale_methods = ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"] + +class PatchModelAddDownscale(io.ComfyNode): + UPSCALE_METHODS = ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"] @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "block_number": ("INT", {"default": 3, "min": 1, "max": 32, "step": 1}), - "downscale_factor": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 9.0, "step": 0.001}), - "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), - "end_percent": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}), - "downscale_after_skip": ("BOOLEAN", {"default": True}), - "downscale_method": (s.upscale_methods,), - "upscale_method": (s.upscale_methods,), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls): + return io.Schema( + node_id="PatchModelAddDownscale", + display_name="PatchModelAddDownscale (Kohya Deep Shrink)", + category="model_patches/unet", + inputs=[ + io.Model.Input("model"), + io.Int.Input("block_number", default=3, min=1, max=32, step=1), + io.Float.Input("downscale_factor", default=2.0, min=0.1, max=9.0, step=0.001), + io.Float.Input("start_percent", default=0.0, min=0.0, max=1.0, step=0.001), + io.Float.Input("end_percent", default=0.35, min=0.0, max=1.0, step=0.001), + io.Boolean.Input("downscale_after_skip", default=True), + io.Combo.Input("downscale_method", options=cls.UPSCALE_METHODS), + io.Combo.Input("upscale_method", options=cls.UPSCALE_METHODS), + ], + outputs=[ + io.Model.Output(), + ], + ) - CATEGORY = "model_patches/unet" - - def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip, downscale_method, upscale_method): + @classmethod + def execute(cls, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip, downscale_method, upscale_method) -> io.NodeOutput: model_sampling = model.get_model_object("model_sampling") sigma_start = model_sampling.percent_to_sigma(start_percent) sigma_end = model_sampling.percent_to_sigma(end_percent) @@ -41,13 +50,21 @@ class PatchModelAddDownscale: else: m.set_model_input_block_patch(input_block_patch) m.set_model_output_block_patch(output_block_patch) - return (m, ) + return io.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "PatchModelAddDownscale": PatchModelAddDownscale, -} NODE_DISPLAY_NAME_MAPPINGS = { # Sampling - "PatchModelAddDownscale": "PatchModelAddDownscale (Kohya Deep Shrink)", + "PatchModelAddDownscale": "", } + +class ModelDownscaleExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + PatchModelAddDownscale, + ] + + +async def comfy_entrypoint() -> ModelDownscaleExtension: + return ModelDownscaleExtension() From 989f715d92678e02b0a2db948e0610027cee7d96 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 9 Oct 2025 09:11:45 +0300 Subject: [PATCH 0727/1073] convert nodes_lora_extract.py to V3 schema (#10182) --- comfy_extras/nodes_lora_extract.py | 70 ++++++++++++++++++------------ 1 file changed, 42 insertions(+), 28 deletions(-) diff --git a/comfy_extras/nodes_lora_extract.py b/comfy_extras/nodes_lora_extract.py index dfd4fe9f4..a2375cba7 100644 --- a/comfy_extras/nodes_lora_extract.py +++ b/comfy_extras/nodes_lora_extract.py @@ -5,6 +5,8 @@ import folder_paths import os import logging from enum import Enum +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io CLAMP_QUANTILE = 0.99 @@ -71,32 +73,40 @@ def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora output_sd["{}{}.diff_b".format(prefix_lora, k[len(prefix_model):-5])] = sd[k].contiguous().half().cpu() return output_sd -class LoraSave: - def __init__(self): - self.output_dir = folder_paths.get_output_directory() +class LoraSave(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LoraSave", + display_name="Extract and Save Lora", + category="_for_testing", + inputs=[ + io.String.Input("filename_prefix", default="loras/ComfyUI_extracted_lora"), + io.Int.Input("rank", default=8, min=1, max=4096, step=1), + io.Combo.Input("lora_type", options=tuple(LORA_TYPES.keys())), + io.Boolean.Input("bias_diff", default=True), + io.Model.Input( + "model_diff", + tooltip="The ModelSubtract output to be converted to a lora.", + optional=True, + ), + io.Clip.Input( + "text_encoder_diff", + tooltip="The CLIPSubtract output to be converted to a lora.", + optional=True, + ), + ], + is_experimental=True, + is_output_node=True, + ) @classmethod - def INPUT_TYPES(s): - return {"required": {"filename_prefix": ("STRING", {"default": "loras/ComfyUI_extracted_lora"}), - "rank": ("INT", {"default": 8, "min": 1, "max": 4096, "step": 1}), - "lora_type": (tuple(LORA_TYPES.keys()),), - "bias_diff": ("BOOLEAN", {"default": True}), - }, - "optional": {"model_diff": ("MODEL", {"tooltip": "The ModelSubtract output to be converted to a lora."}), - "text_encoder_diff": ("CLIP", {"tooltip": "The CLIPSubtract output to be converted to a lora."})}, - } - RETURN_TYPES = () - FUNCTION = "save" - OUTPUT_NODE = True - - CATEGORY = "_for_testing" - - def save(self, filename_prefix, rank, lora_type, bias_diff, model_diff=None, text_encoder_diff=None): + def execute(cls, filename_prefix, rank, lora_type, bias_diff, model_diff=None, text_encoder_diff=None) -> io.NodeOutput: if model_diff is None and text_encoder_diff is None: - return {} + return io.NodeOutput() lora_type = LORA_TYPES.get(lora_type) - full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, folder_paths.get_output_directory()) output_sd = {} if model_diff is not None: @@ -108,12 +118,16 @@ class LoraSave: output_checkpoint = os.path.join(full_output_folder, output_checkpoint) comfy.utils.save_torch_file(output_sd, output_checkpoint, metadata=None) - return {} + return io.NodeOutput() -NODE_CLASS_MAPPINGS = { - "LoraSave": LoraSave -} -NODE_DISPLAY_NAME_MAPPINGS = { - "LoraSave": "Extract and Save Lora" -} +class LoraSaveExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + LoraSave, + ] + + +async def comfy_entrypoint() -> LoraSaveExtension: + return LoraSaveExtension() From 6732014a0a99e85389e5c32e87bdff9e31cdcfd1 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 9 Oct 2025 09:13:15 +0300 Subject: [PATCH 0728/1073] convert nodes_compositing.py to V3 schema (#10174) --- comfy_extras/nodes_compositing.py | 123 ++++++++++++++++-------------- 1 file changed, 66 insertions(+), 57 deletions(-) diff --git a/comfy_extras/nodes_compositing.py b/comfy_extras/nodes_compositing.py index 2f994fa11..e4e4e1cbc 100644 --- a/comfy_extras/nodes_compositing.py +++ b/comfy_extras/nodes_compositing.py @@ -1,6 +1,9 @@ import torch import comfy.utils from enum import Enum +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io + def resize_mask(mask, shape): return torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[0], shape[1]), mode="bilinear").squeeze(1) @@ -101,24 +104,28 @@ def porter_duff_composite(src_image: torch.Tensor, src_alpha: torch.Tensor, dst_ return out_image, out_alpha -class PorterDuffImageComposite: +class PorterDuffImageComposite(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "source": ("IMAGE",), - "source_alpha": ("MASK",), - "destination": ("IMAGE",), - "destination_alpha": ("MASK",), - "mode": ([mode.name for mode in PorterDuffMode], {"default": PorterDuffMode.DST.name}), - }, - } + def define_schema(cls): + return io.Schema( + node_id="PorterDuffImageComposite", + display_name="Porter-Duff Image Composite", + category="mask/compositing", + inputs=[ + io.Image.Input("source"), + io.Mask.Input("source_alpha"), + io.Image.Input("destination"), + io.Mask.Input("destination_alpha"), + io.Combo.Input("mode", options=[mode.name for mode in PorterDuffMode], default=PorterDuffMode.DST.name), + ], + outputs=[ + io.Image.Output(), + io.Mask.Output(), + ], + ) - RETURN_TYPES = ("IMAGE", "MASK") - FUNCTION = "composite" - CATEGORY = "mask/compositing" - - def composite(self, source: torch.Tensor, source_alpha: torch.Tensor, destination: torch.Tensor, destination_alpha: torch.Tensor, mode): + @classmethod + def execute(cls, source: torch.Tensor, source_alpha: torch.Tensor, destination: torch.Tensor, destination_alpha: torch.Tensor, mode) -> io.NodeOutput: batch_size = min(len(source), len(source_alpha), len(destination), len(destination_alpha)) out_images = [] out_alphas = [] @@ -150,45 +157,48 @@ class PorterDuffImageComposite: out_images.append(out_image) out_alphas.append(out_alpha.squeeze(2)) - result = (torch.stack(out_images), torch.stack(out_alphas)) - return result + return io.NodeOutput(torch.stack(out_images), torch.stack(out_alphas)) -class SplitImageWithAlpha: +class SplitImageWithAlpha(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - } - } + def define_schema(cls): + return io.Schema( + node_id="SplitImageWithAlpha", + display_name="Split Image with Alpha", + category="mask/compositing", + inputs=[ + io.Image.Input("image"), + ], + outputs=[ + io.Image.Output(), + io.Mask.Output(), + ], + ) - CATEGORY = "mask/compositing" - RETURN_TYPES = ("IMAGE", "MASK") - FUNCTION = "split_image_with_alpha" - - def split_image_with_alpha(self, image: torch.Tensor): + @classmethod + def execute(cls, image: torch.Tensor) -> io.NodeOutput: out_images = [i[:,:,:3] for i in image] out_alphas = [i[:,:,3] if i.shape[2] > 3 else torch.ones_like(i[:,:,0]) for i in image] - result = (torch.stack(out_images), 1.0 - torch.stack(out_alphas)) - return result + return io.NodeOutput(torch.stack(out_images), 1.0 - torch.stack(out_alphas)) -class JoinImageWithAlpha: +class JoinImageWithAlpha(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "alpha": ("MASK",), - } - } + def define_schema(cls): + return io.Schema( + node_id="JoinImageWithAlpha", + display_name="Join Image with Alpha", + category="mask/compositing", + inputs=[ + io.Image.Input("image"), + io.Mask.Input("alpha"), + ], + outputs=[io.Image.Output()], + ) - CATEGORY = "mask/compositing" - RETURN_TYPES = ("IMAGE",) - FUNCTION = "join_image_with_alpha" - - def join_image_with_alpha(self, image: torch.Tensor, alpha: torch.Tensor): + @classmethod + def execute(cls, image: torch.Tensor, alpha: torch.Tensor) -> io.NodeOutput: batch_size = min(len(image), len(alpha)) out_images = [] @@ -196,19 +206,18 @@ class JoinImageWithAlpha: for i in range(batch_size): out_images.append(torch.cat((image[i][:,:,:3], alpha[i].unsqueeze(2)), dim=2)) - result = (torch.stack(out_images),) - return result + return io.NodeOutput(torch.stack(out_images)) -NODE_CLASS_MAPPINGS = { - "PorterDuffImageComposite": PorterDuffImageComposite, - "SplitImageWithAlpha": SplitImageWithAlpha, - "JoinImageWithAlpha": JoinImageWithAlpha, -} +class CompositingExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + PorterDuffImageComposite, + SplitImageWithAlpha, + JoinImageWithAlpha, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "PorterDuffImageComposite": "Porter-Duff Image Composite", - "SplitImageWithAlpha": "Split Image with Alpha", - "JoinImageWithAlpha": "Join Image with Alpha", -} +async def comfy_entrypoint() -> CompositingExtension: + return CompositingExtension() From cbee7d33909f168a08ab7e53d897ea284a304d84 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 9 Oct 2025 09:14:00 +0300 Subject: [PATCH 0729/1073] convert nodes_latent.py to V3 schema (#10160) --- comfy_extras/nodes_latent.py | 394 ++++++++++++++++++++--------------- 1 file changed, 224 insertions(+), 170 deletions(-) diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py index 0f90cf60c..d2df07ff9 100644 --- a/comfy_extras/nodes_latent.py +++ b/comfy_extras/nodes_latent.py @@ -2,6 +2,8 @@ import comfy.utils import comfy_extras.nodes_post_processing import torch import nodes +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io def reshape_latent_to(target_shape, latent, repeat_batch=True): @@ -13,17 +15,23 @@ def reshape_latent_to(target_shape, latent, repeat_batch=True): return latent -class LatentAdd: +class LatentAdd(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}} + def define_schema(cls): + return io.Schema( + node_id="LatentAdd", + category="latent/advanced", + inputs=[ + io.Latent.Input("samples1"), + io.Latent.Input("samples2"), + ], + outputs=[ + io.Latent.Output(), + ], + ) - RETURN_TYPES = ("LATENT",) - FUNCTION = "op" - - CATEGORY = "latent/advanced" - - def op(self, samples1, samples2): + @classmethod + def execute(cls, samples1, samples2) -> io.NodeOutput: samples_out = samples1.copy() s1 = samples1["samples"] @@ -31,19 +39,25 @@ class LatentAdd: s2 = reshape_latent_to(s1.shape, s2) samples_out["samples"] = s1 + s2 - return (samples_out,) + return io.NodeOutput(samples_out) -class LatentSubtract: +class LatentSubtract(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}} + def define_schema(cls): + return io.Schema( + node_id="LatentSubtract", + category="latent/advanced", + inputs=[ + io.Latent.Input("samples1"), + io.Latent.Input("samples2"), + ], + outputs=[ + io.Latent.Output(), + ], + ) - RETURN_TYPES = ("LATENT",) - FUNCTION = "op" - - CATEGORY = "latent/advanced" - - def op(self, samples1, samples2): + @classmethod + def execute(cls, samples1, samples2) -> io.NodeOutput: samples_out = samples1.copy() s1 = samples1["samples"] @@ -51,41 +65,49 @@ class LatentSubtract: s2 = reshape_latent_to(s1.shape, s2) samples_out["samples"] = s1 - s2 - return (samples_out,) + return io.NodeOutput(samples_out) -class LatentMultiply: +class LatentMultiply(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "samples": ("LATENT",), - "multiplier": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), - }} + def define_schema(cls): + return io.Schema( + node_id="LatentMultiply", + category="latent/advanced", + inputs=[ + io.Latent.Input("samples"), + io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01), + ], + outputs=[ + io.Latent.Output(), + ], + ) - RETURN_TYPES = ("LATENT",) - FUNCTION = "op" - - CATEGORY = "latent/advanced" - - def op(self, samples, multiplier): + @classmethod + def execute(cls, samples, multiplier) -> io.NodeOutput: samples_out = samples.copy() s1 = samples["samples"] samples_out["samples"] = s1 * multiplier - return (samples_out,) + return io.NodeOutput(samples_out) -class LatentInterpolate: +class LatentInterpolate(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "samples1": ("LATENT",), - "samples2": ("LATENT",), - "ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - }} + def define_schema(cls): + return io.Schema( + node_id="LatentInterpolate", + category="latent/advanced", + inputs=[ + io.Latent.Input("samples1"), + io.Latent.Input("samples2"), + io.Float.Input("ratio", default=1.0, min=0.0, max=1.0, step=0.01), + ], + outputs=[ + io.Latent.Output(), + ], + ) - RETURN_TYPES = ("LATENT",) - FUNCTION = "op" - - CATEGORY = "latent/advanced" - - def op(self, samples1, samples2, ratio): + @classmethod + def execute(cls, samples1, samples2, ratio) -> io.NodeOutput: samples_out = samples1.copy() s1 = samples1["samples"] @@ -104,19 +126,26 @@ class LatentInterpolate: st = torch.nan_to_num(t / mt) samples_out["samples"] = st * (m1 * ratio + m2 * (1.0 - ratio)) - return (samples_out,) + return io.NodeOutput(samples_out) -class LatentConcat: +class LatentConcat(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",), "dim": (["x", "-x", "y", "-y", "t", "-t"], )}} + def define_schema(cls): + return io.Schema( + node_id="LatentConcat", + category="latent/advanced", + inputs=[ + io.Latent.Input("samples1"), + io.Latent.Input("samples2"), + io.Combo.Input("dim", options=["x", "-x", "y", "-y", "t", "-t"]), + ], + outputs=[ + io.Latent.Output(), + ], + ) - RETURN_TYPES = ("LATENT",) - FUNCTION = "op" - - CATEGORY = "latent/advanced" - - def op(self, samples1, samples2, dim): + @classmethod + def execute(cls, samples1, samples2, dim) -> io.NodeOutput: samples_out = samples1.copy() s1 = samples1["samples"] @@ -136,22 +165,27 @@ class LatentConcat: dim = -3 samples_out["samples"] = torch.cat(c, dim=dim) - return (samples_out,) + return io.NodeOutput(samples_out) -class LatentCut: +class LatentCut(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"samples": ("LATENT",), - "dim": (["x", "y", "t"], ), - "index": ("INT", {"default": 0, "min": -nodes.MAX_RESOLUTION, "max": nodes.MAX_RESOLUTION, "step": 1}), - "amount": ("INT", {"default": 1, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 1})}} + def define_schema(cls): + return io.Schema( + node_id="LatentCut", + category="latent/advanced", + inputs=[ + io.Latent.Input("samples"), + io.Combo.Input("dim", options=["x", "y", "t"]), + io.Int.Input("index", default=0, min=-nodes.MAX_RESOLUTION, max=nodes.MAX_RESOLUTION, step=1), + io.Int.Input("amount", default=1, min=1, max=nodes.MAX_RESOLUTION, step=1), + ], + outputs=[ + io.Latent.Output(), + ], + ) - RETURN_TYPES = ("LATENT",) - FUNCTION = "op" - - CATEGORY = "latent/advanced" - - def op(self, samples, dim, index, amount): + @classmethod + def execute(cls, samples, dim, index, amount) -> io.NodeOutput: samples_out = samples.copy() s1 = samples["samples"] @@ -171,19 +205,25 @@ class LatentCut: amount = min(-index, amount) samples_out["samples"] = torch.narrow(s1, dim, index, amount) - return (samples_out,) + return io.NodeOutput(samples_out) -class LatentBatch: +class LatentBatch(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}} + def define_schema(cls): + return io.Schema( + node_id="LatentBatch", + category="latent/batch", + inputs=[ + io.Latent.Input("samples1"), + io.Latent.Input("samples2"), + ], + outputs=[ + io.Latent.Output(), + ], + ) - RETURN_TYPES = ("LATENT",) - FUNCTION = "batch" - - CATEGORY = "latent/batch" - - def batch(self, samples1, samples2): + @classmethod + def execute(cls, samples1, samples2) -> io.NodeOutput: samples_out = samples1.copy() s1 = samples1["samples"] s2 = samples2["samples"] @@ -192,20 +232,25 @@ class LatentBatch: s = torch.cat((s1, s2), dim=0) samples_out["samples"] = s samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])]) - return (samples_out,) + return io.NodeOutput(samples_out) -class LatentBatchSeedBehavior: +class LatentBatchSeedBehavior(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "samples": ("LATENT",), - "seed_behavior": (["random", "fixed"],{"default": "fixed"}),}} + def define_schema(cls): + return io.Schema( + node_id="LatentBatchSeedBehavior", + category="latent/advanced", + inputs=[ + io.Latent.Input("samples"), + io.Combo.Input("seed_behavior", options=["random", "fixed"], default="fixed"), + ], + outputs=[ + io.Latent.Output(), + ], + ) - RETURN_TYPES = ("LATENT",) - FUNCTION = "op" - - CATEGORY = "latent/advanced" - - def op(self, samples, seed_behavior): + @classmethod + def execute(cls, samples, seed_behavior) -> io.NodeOutput: samples_out = samples.copy() latent = samples["samples"] if seed_behavior == "random": @@ -215,41 +260,50 @@ class LatentBatchSeedBehavior: batch_number = samples_out.get("batch_index", [0])[0] samples_out["batch_index"] = [batch_number] * latent.shape[0] - return (samples_out,) + return io.NodeOutput(samples_out) -class LatentApplyOperation: +class LatentApplyOperation(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "samples": ("LATENT",), - "operation": ("LATENT_OPERATION",), - }} + def define_schema(cls): + return io.Schema( + node_id="LatentApplyOperation", + category="latent/advanced/operations", + is_experimental=True, + inputs=[ + io.Latent.Input("samples"), + io.LatentOperation.Input("operation"), + ], + outputs=[ + io.Latent.Output(), + ], + ) - RETURN_TYPES = ("LATENT",) - FUNCTION = "op" - - CATEGORY = "latent/advanced/operations" - EXPERIMENTAL = True - - def op(self, samples, operation): + @classmethod + def execute(cls, samples, operation) -> io.NodeOutput: samples_out = samples.copy() s1 = samples["samples"] samples_out["samples"] = operation(latent=s1) - return (samples_out,) + return io.NodeOutput(samples_out) -class LatentApplyOperationCFG: +class LatentApplyOperationCFG(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "operation": ("LATENT_OPERATION",), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls): + return io.Schema( + node_id="LatentApplyOperationCFG", + category="latent/advanced/operations", + is_experimental=True, + inputs=[ + io.Model.Input("model"), + io.LatentOperation.Input("operation"), + ], + outputs=[ + io.Model.Output(), + ], + ) - CATEGORY = "latent/advanced/operations" - EXPERIMENTAL = True - - def patch(self, model, operation): + @classmethod + def execute(cls, model, operation) -> io.NodeOutput: m = model.clone() def pre_cfg_function(args): @@ -261,21 +315,25 @@ class LatentApplyOperationCFG: return conds_out m.set_model_sampler_pre_cfg_function(pre_cfg_function) - return (m, ) + return io.NodeOutput(m) -class LatentOperationTonemapReinhard: +class LatentOperationTonemapReinhard(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}), - }} + def define_schema(cls): + return io.Schema( + node_id="LatentOperationTonemapReinhard", + category="latent/advanced/operations", + is_experimental=True, + inputs=[ + io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01), + ], + outputs=[ + io.LatentOperation.Output(), + ], + ) - RETURN_TYPES = ("LATENT_OPERATION",) - FUNCTION = "op" - - CATEGORY = "latent/advanced/operations" - EXPERIMENTAL = True - - def op(self, multiplier): + @classmethod + def execute(cls, multiplier) -> io.NodeOutput: def tonemap_reinhard(latent, **kwargs): latent_vector_magnitude = (torch.linalg.vector_norm(latent, dim=(1)) + 0.0000000001)[:,None] normalized_latent = latent / latent_vector_magnitude @@ -291,39 +349,27 @@ class LatentOperationTonemapReinhard: new_magnitude *= top return normalized_latent * new_magnitude - return (tonemap_reinhard,) + return io.NodeOutput(tonemap_reinhard) -class LatentOperationSharpen: +class LatentOperationSharpen(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "sharpen_radius": ("INT", { - "default": 9, - "min": 1, - "max": 31, - "step": 1 - }), - "sigma": ("FLOAT", { - "default": 1.0, - "min": 0.1, - "max": 10.0, - "step": 0.1 - }), - "alpha": ("FLOAT", { - "default": 0.1, - "min": 0.0, - "max": 5.0, - "step": 0.01 - }), - }} + def define_schema(cls): + return io.Schema( + node_id="LatentOperationSharpen", + category="latent/advanced/operations", + is_experimental=True, + inputs=[ + io.Int.Input("sharpen_radius", default=9, min=1, max=31, step=1), + io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.1), + io.Float.Input("alpha", default=0.1, min=0.0, max=5.0, step=0.01), + ], + outputs=[ + io.LatentOperation.Output(), + ], + ) - RETURN_TYPES = ("LATENT_OPERATION",) - FUNCTION = "op" - - CATEGORY = "latent/advanced/operations" - EXPERIMENTAL = True - - def op(self, sharpen_radius, sigma, alpha): + @classmethod + def execute(cls, sharpen_radius, sigma, alpha) -> io.NodeOutput: def sharpen(latent, **kwargs): luminance = (torch.linalg.vector_norm(latent, dim=(1)) + 1e-6)[:,None] normalized_latent = latent / luminance @@ -340,19 +386,27 @@ class LatentOperationSharpen: sharpened = torch.nn.functional.conv2d(padded_image, kernel.repeat(channels, 1, 1).unsqueeze(1), padding=kernel_size // 2, groups=channels)[:,:,sharpen_radius:-sharpen_radius, sharpen_radius:-sharpen_radius] return luminance * sharpened - return (sharpen,) + return io.NodeOutput(sharpen) -NODE_CLASS_MAPPINGS = { - "LatentAdd": LatentAdd, - "LatentSubtract": LatentSubtract, - "LatentMultiply": LatentMultiply, - "LatentInterpolate": LatentInterpolate, - "LatentConcat": LatentConcat, - "LatentCut": LatentCut, - "LatentBatch": LatentBatch, - "LatentBatchSeedBehavior": LatentBatchSeedBehavior, - "LatentApplyOperation": LatentApplyOperation, - "LatentApplyOperationCFG": LatentApplyOperationCFG, - "LatentOperationTonemapReinhard": LatentOperationTonemapReinhard, - "LatentOperationSharpen": LatentOperationSharpen, -} + +class LatentExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + LatentAdd, + LatentSubtract, + LatentMultiply, + LatentInterpolate, + LatentConcat, + LatentCut, + LatentBatch, + LatentBatchSeedBehavior, + LatentApplyOperation, + LatentApplyOperationCFG, + LatentOperationTonemapReinhard, + LatentOperationSharpen, + ] + + +async def comfy_entrypoint() -> LatentExtension: + return LatentExtension() From 139addd53c6cab97fb0ac28d1c895b3ecc7dff6c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 9 Oct 2025 13:37:35 -0700 Subject: [PATCH 0730/1073] More surgical fix for #10267 (#10276) --- comfy/model_patcher.py | 28 +++++++++++++++++++++------- comfy/ops.py | 4 +++- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 1fd03d9d1..e8c859689 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -123,16 +123,26 @@ def move_weight_functions(m, device): return memory class LowVramPatch: - def __init__(self, key, patches): + def __init__(self, key, patches, convert_func=None, set_func=None): self.key = key self.patches = patches + self.convert_func = convert_func + self.set_func = set_func + def __call__(self, weight): + if self.convert_func is not None: + weight = self.convert_func(weight.to(dtype=torch.float32, copy=True), inplace=True) + intermediate_dtype = weight.dtype - if intermediate_dtype not in [torch.float32, torch.float16, torch.bfloat16]: #intermediate_dtype has to be one that is supported in math ops + if self.set_func is None and intermediate_dtype not in [torch.float32, torch.float16, torch.bfloat16]: #intermediate_dtype has to be one that is supported in math ops intermediate_dtype = torch.float32 return comfy.float.stochastic_rounding(comfy.lora.calculate_weight(self.patches[self.key], weight.to(intermediate_dtype), self.key, intermediate_dtype=intermediate_dtype), weight.dtype, seed=string_to_seed(self.key)) - return comfy.lora.calculate_weight(self.patches[self.key], weight, self.key, intermediate_dtype=intermediate_dtype) + out = comfy.lora.calculate_weight(self.patches[self.key], weight, self.key, intermediate_dtype=intermediate_dtype) + if self.set_func is not None: + return self.set_func(out, seed=string_to_seed(self.key), return_weight=True) + else: + return out def get_key_weight(model, key): set_func = None @@ -657,13 +667,15 @@ class ModelPatcher: if force_patch_weights: self.patch_weight_to_device(weight_key) else: - m.weight_function = [LowVramPatch(weight_key, self.patches)] + _, set_func, convert_func = get_key_weight(self.model, weight_key) + m.weight_function = [LowVramPatch(weight_key, self.patches, convert_func, set_func)] patch_counter += 1 if bias_key in self.patches: if force_patch_weights: self.patch_weight_to_device(bias_key) else: - m.bias_function = [LowVramPatch(bias_key, self.patches)] + _, set_func, convert_func = get_key_weight(self.model, bias_key) + m.bias_function = [LowVramPatch(bias_key, self.patches, convert_func, set_func)] patch_counter += 1 cast_weight = True @@ -825,10 +837,12 @@ class ModelPatcher: module_mem += move_weight_functions(m, device_to) if lowvram_possible: if weight_key in self.patches: - m.weight_function.append(LowVramPatch(weight_key, self.patches)) + _, set_func, convert_func = get_key_weight(self.model, weight_key) + m.weight_function.append(LowVramPatch(weight_key, self.patches, convert_func, set_func)) patch_counter += 1 if bias_key in self.patches: - m.bias_function.append(LowVramPatch(bias_key, self.patches)) + _, set_func, convert_func = get_key_weight(self.model, bias_key) + m.bias_function.append(LowVramPatch(bias_key, self.patches, convert_func, set_func)) patch_counter += 1 cast_weight = True diff --git a/comfy/ops.py b/comfy/ops.py index 9d7dedd37..2415c96bf 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -416,8 +416,10 @@ def scaled_fp8_ops(fp8_matrix_mult=False, scale_input=False, override_dtype=None else: return weight * self.scale_weight.to(device=weight.device, dtype=weight.dtype) - def set_weight(self, weight, inplace_update=False, seed=None, **kwargs): + def set_weight(self, weight, inplace_update=False, seed=None, return_weight=False, **kwargs): weight = comfy.float.stochastic_rounding(weight / self.scale_weight.to(device=weight.device, dtype=weight.dtype), self.weight.dtype, seed=seed) + if return_weight: + return weight if inplace_update: self.weight.data.copy_(weight) else: From f3d5d328a39d2f264b35d43f0e9c5a0b4d780c2f Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 10 Oct 2025 01:15:03 +0300 Subject: [PATCH 0731/1073] fix(v3,api-nodes): V3 schema typing; corrected Pika API nodes (#10265) --- comfy_api/latest/__init__.py | 9 +- comfy_api/latest/_input/video_types.py | 4 +- comfy_api/latest/_io.py | 146 ++++++------- comfy_api/latest/_ui.py | 25 +-- comfy_api_nodes/apinode_utils.py | 2 +- comfy_api_nodes/apis/client.py | 58 +++--- comfy_api_nodes/apis/pika_defs.py | 100 +++++++++ comfy_api_nodes/nodes_pika.py | 277 ++++++++----------------- 8 files changed, 309 insertions(+), 312 deletions(-) create mode 100644 comfy_api_nodes/apis/pika_defs.py diff --git a/comfy_api/latest/__init__.py b/comfy_api/latest/__init__.py index 2cee65aa9..b19a97f1d 100644 --- a/comfy_api/latest/__init__.py +++ b/comfy_api/latest/__init__.py @@ -8,8 +8,8 @@ from comfy_api.internal.async_to_sync import create_sync_class from comfy_api.latest._input import ImageInput, AudioInput, MaskInput, LatentInput, VideoInput from comfy_api.latest._input_impl import VideoFromFile, VideoFromComponents from comfy_api.latest._util import VideoCodec, VideoContainer, VideoComponents -from comfy_api.latest._io import _IO as io #noqa: F401 -from comfy_api.latest._ui import _UI as ui #noqa: F401 +from . import _io as io +from . import _ui as ui # from comfy_api.latest._resources import _RESOURCES as resources #noqa: F401 from comfy_execution.utils import get_executing_context from comfy_execution.progress import get_progress_state, PreviewImageTuple @@ -114,6 +114,8 @@ if TYPE_CHECKING: ComfyAPISync: Type[comfy_api.latest.generated.ComfyAPISyncStub.ComfyAPISyncStub] ComfyAPISync = create_sync_class(ComfyAPI_latest) +comfy_io = io # create the new alias for io + __all__ = [ "ComfyAPI", "ComfyAPISync", @@ -121,4 +123,7 @@ __all__ = [ "InputImpl", "Types", "ComfyExtension", + "io", + "comfy_io", + "ui", ] diff --git a/comfy_api/latest/_input/video_types.py b/comfy_api/latest/_input/video_types.py index 5d95dc507..a335df4d0 100644 --- a/comfy_api/latest/_input/video_types.py +++ b/comfy_api/latest/_input/video_types.py @@ -1,6 +1,6 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import Optional, Union +from typing import Optional, Union, IO import io import av from comfy_api.util import VideoContainer, VideoCodec, VideoComponents @@ -23,7 +23,7 @@ class VideoInput(ABC): @abstractmethod def save_to( self, - path: str, + path: Union[str, IO[bytes]], format: VideoContainer = VideoContainer.AUTO, codec: VideoCodec = VideoCodec.AUTO, metadata: Optional[dict] = None diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 661309f19..0b701260f 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -1582,78 +1582,78 @@ class _UIOutput(ABC): ... -class _IO: - FolderType = FolderType - UploadType = UploadType - RemoteOptions = RemoteOptions - NumberDisplay = NumberDisplay +__all__ = [ + "FolderType", + "UploadType", + "RemoteOptions", + "NumberDisplay", - comfytype = staticmethod(comfytype) - Custom = staticmethod(Custom) - Input = Input - WidgetInput = WidgetInput - Output = Output - ComfyTypeI = ComfyTypeI - ComfyTypeIO = ComfyTypeIO - #--------------------------------- + "comfytype", + "Custom", + "Input", + "WidgetInput", + "Output", + "ComfyTypeI", + "ComfyTypeIO", # Supported Types - Boolean = Boolean - Int = Int - Float = Float - String = String - Combo = Combo - MultiCombo = MultiCombo - Image = Image - WanCameraEmbedding = WanCameraEmbedding - Webcam = Webcam - Mask = Mask - Latent = Latent - Conditioning = Conditioning - Sampler = Sampler - Sigmas = Sigmas - Noise = Noise - Guider = Guider - Clip = Clip - ControlNet = ControlNet - Vae = Vae - Model = Model - ClipVision = ClipVision - ClipVisionOutput = ClipVisionOutput - AudioEncoder = AudioEncoder - AudioEncoderOutput = AudioEncoderOutput - StyleModel = StyleModel - Gligen = Gligen - UpscaleModel = UpscaleModel - Audio = Audio - Video = Video - SVG = SVG - LoraModel = LoraModel - LossMap = LossMap - Voxel = Voxel - Mesh = Mesh - Hooks = Hooks - HookKeyframes = HookKeyframes - TimestepsRange = TimestepsRange - LatentOperation = LatentOperation - FlowControl = FlowControl - Accumulation = Accumulation - Load3DCamera = Load3DCamera - Load3D = Load3D - Load3DAnimation = Load3DAnimation - Photomaker = Photomaker - Point = Point - FaceAnalysis = FaceAnalysis - BBOX = BBOX - SEGS = SEGS - AnyType = AnyType - MultiType = MultiType - #--------------------------------- - HiddenHolder = HiddenHolder - Hidden = Hidden - NodeInfoV1 = NodeInfoV1 - NodeInfoV3 = NodeInfoV3 - Schema = Schema - ComfyNode = ComfyNode - NodeOutput = NodeOutput - add_to_dict_v1 = staticmethod(add_to_dict_v1) - add_to_dict_v3 = staticmethod(add_to_dict_v3) + "Boolean", + "Int", + "Float", + "String", + "Combo", + "MultiCombo", + "Image", + "WanCameraEmbedding", + "Webcam", + "Mask", + "Latent", + "Conditioning", + "Sampler", + "Sigmas", + "Noise", + "Guider", + "Clip", + "ControlNet", + "Vae", + "Model", + "ClipVision", + "ClipVisionOutput", + "AudioEncoder", + "AudioEncoderOutput", + "StyleModel", + "Gligen", + "UpscaleModel", + "Audio", + "Video", + "SVG", + "LoraModel", + "LossMap", + "Voxel", + "Mesh", + "Hooks", + "HookKeyframes", + "TimestepsRange", + "LatentOperation", + "FlowControl", + "Accumulation", + "Load3DCamera", + "Load3D", + "Load3DAnimation", + "Photomaker", + "Point", + "FaceAnalysis", + "BBOX", + "SEGS", + "AnyType", + "MultiType", + # Other classes + "HiddenHolder", + "Hidden", + "NodeInfoV1", + "NodeInfoV3", + "Schema", + "ComfyNode", + "NodeOutput", + "add_to_dict_v1", + "add_to_dict_v3", +] diff --git a/comfy_api/latest/_ui.py b/comfy_api/latest/_ui.py index 26a55615f..b0bbabe2a 100644 --- a/comfy_api/latest/_ui.py +++ b/comfy_api/latest/_ui.py @@ -449,15 +449,16 @@ class PreviewText(_UIOutput): return {"text": (self.value,)} -class _UI: - SavedResult = SavedResult - SavedImages = SavedImages - SavedAudios = SavedAudios - ImageSaveHelper = ImageSaveHelper - AudioSaveHelper = AudioSaveHelper - PreviewImage = PreviewImage - PreviewMask = PreviewMask - PreviewAudio = PreviewAudio - PreviewVideo = PreviewVideo - PreviewUI3D = PreviewUI3D - PreviewText = PreviewText +__all__ = [ + "SavedResult", + "SavedImages", + "SavedAudios", + "ImageSaveHelper", + "AudioSaveHelper", + "PreviewImage", + "PreviewMask", + "PreviewAudio", + "PreviewVideo", + "PreviewUI3D", + "PreviewText", +] diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index 2e0dc4dc1..4bab539f7 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -269,7 +269,7 @@ def tensor_to_bytesio( mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4'). Returns: - Named BytesIO object containing the image data. + Named BytesIO object containing the image data, with pointer set to the start of buffer. """ if not mime_type: mime_type = "image/png" diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index a3ceafbae..e08dfb093 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -98,7 +98,7 @@ import io import os import socket from aiohttp.client_exceptions import ClientError, ClientResponseError -from typing import Dict, Type, Optional, Any, TypeVar, Generic, Callable, Tuple +from typing import Type, Optional, Any, TypeVar, Generic, Callable from enum import Enum import json from urllib.parse import urljoin, urlparse @@ -175,7 +175,7 @@ class ApiClient: max_retries: int = 3, retry_delay: float = 1.0, retry_backoff_factor: float = 2.0, - retry_status_codes: Optional[Tuple[int, ...]] = None, + retry_status_codes: Optional[tuple[int, ...]] = None, session: Optional[aiohttp.ClientSession] = None, ): self.base_url = base_url @@ -199,9 +199,9 @@ class ApiClient: @staticmethod def _create_json_payload_args( - data: Optional[Dict[str, Any]] = None, - headers: Optional[Dict[str, str]] = None, - ) -> Dict[str, Any]: + data: Optional[dict[str, Any]] = None, + headers: Optional[dict[str, str]] = None, + ) -> dict[str, Any]: return { "json": data, "headers": headers, @@ -209,11 +209,11 @@ class ApiClient: def _create_form_data_args( self, - data: Dict[str, Any] | None, - files: Dict[str, Any] | None, - headers: Optional[Dict[str, str]] = None, + data: dict[str, Any] | None, + files: dict[str, Any] | None, + headers: Optional[dict[str, str]] = None, multipart_parser: Callable | None = None, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: if headers and "Content-Type" in headers: del headers["Content-Type"] @@ -254,9 +254,9 @@ class ApiClient: @staticmethod def _create_urlencoded_form_data_args( - data: Dict[str, Any], - headers: Optional[Dict[str, str]] = None, - ) -> Dict[str, Any]: + data: dict[str, Any], + headers: Optional[dict[str, str]] = None, + ) -> dict[str, Any]: headers = headers or {} headers["Content-Type"] = "application/x-www-form-urlencoded" return { @@ -264,7 +264,7 @@ class ApiClient: "headers": headers, } - def get_headers(self) -> Dict[str, str]: + def get_headers(self) -> dict[str, str]: """Get headers for API requests, including authentication if available""" headers = {"Content-Type": "application/json", "Accept": "application/json"} @@ -275,7 +275,7 @@ class ApiClient: return headers - async def _check_connectivity(self, target_url: str) -> Dict[str, bool]: + async def _check_connectivity(self, target_url: str) -> dict[str, bool]: """ Check connectivity to determine if network issues are local or server-related. @@ -316,14 +316,14 @@ class ApiClient: self, method: str, path: str, - params: Optional[Dict[str, Any]] = None, - data: Optional[Dict[str, Any]] = None, - files: Optional[Dict[str, Any] | list[tuple[str, Any]]] = None, - headers: Optional[Dict[str, str]] = None, + params: Optional[dict[str, Any]] = None, + data: Optional[dict[str, Any]] = None, + files: Optional[dict[str, Any] | list[tuple[str, Any]]] = None, + headers: Optional[dict[str, str]] = None, content_type: str = "application/json", multipart_parser: Callable | None = None, retry_count: int = 0, # Used internally for tracking retries - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Make an HTTP request to the API with automatic retries for transient errors. @@ -485,7 +485,7 @@ class ApiClient: retry_delay: Initial delay between retries in seconds retry_backoff_factor: Multiplier for the delay after each retry """ - headers: Dict[str, str] = {} + headers: dict[str, str] = {} skip_auto_headers: set[str] = set() if content_type: headers["Content-Type"] = content_type @@ -558,7 +558,7 @@ class ApiClient: *req_meta, retry_count: int, response_content: dict | str = "", - ) -> Dict[str, Any]: + ) -> dict[str, Any]: status_code = exc.status if status_code == 401: user_friendly = "Unauthorized: Please login first to use this node." @@ -659,7 +659,7 @@ class ApiEndpoint(Generic[T, R]): method: HttpMethod, request_model: Type[T], response_model: Type[R], - query_params: Optional[Dict[str, Any]] = None, + query_params: Optional[dict[str, Any]] = None, ): """Initialize an API endpoint definition. @@ -684,11 +684,11 @@ class SynchronousOperation(Generic[T, R]): self, endpoint: ApiEndpoint[T, R], request: T, - files: Optional[Dict[str, Any] | list[tuple[str, Any]]] = None, + files: Optional[dict[str, Any] | list[tuple[str, Any]]] = None, api_base: str | None = None, auth_token: Optional[str] = None, comfy_api_key: Optional[str] = None, - auth_kwargs: Optional[Dict[str, str]] = None, + auth_kwargs: Optional[dict[str, str]] = None, timeout: float = 7200.0, verify_ssl: bool = True, content_type: str = "application/json", @@ -729,7 +729,7 @@ class SynchronousOperation(Generic[T, R]): ) try: - request_dict: Optional[Dict[str, Any]] + request_dict: Optional[dict[str, Any]] if isinstance(self.request, EmptyRequest): request_dict = None else: @@ -782,14 +782,14 @@ class PollingOperation(Generic[T, R]): poll_endpoint: ApiEndpoint[EmptyRequest, R], completed_statuses: list[str], failed_statuses: list[str], - status_extractor: Callable[[R], str], - progress_extractor: Callable[[R], float] | None = None, - result_url_extractor: Callable[[R], str] | None = None, + status_extractor: Callable[[R], Optional[str]], + progress_extractor: Callable[[R], Optional[float]] | None = None, + result_url_extractor: Callable[[R], Optional[str]] | None = None, request: Optional[T] = None, api_base: str | None = None, auth_token: Optional[str] = None, comfy_api_key: Optional[str] = None, - auth_kwargs: Optional[Dict[str, str]] = None, + auth_kwargs: Optional[dict[str, str]] = None, poll_interval: float = 5.0, max_poll_attempts: int = 120, # Default max polling attempts (10 minutes with 5s interval) max_retries: int = 3, # Max retries per individual API call diff --git a/comfy_api_nodes/apis/pika_defs.py b/comfy_api_nodes/apis/pika_defs.py new file mode 100644 index 000000000..232558cd7 --- /dev/null +++ b/comfy_api_nodes/apis/pika_defs.py @@ -0,0 +1,100 @@ +from typing import Optional +from enum import Enum +from pydantic import BaseModel, Field + + +class Pikaffect(str, Enum): + Cake_ify = "Cake-ify" + Crumble = "Crumble" + Crush = "Crush" + Decapitate = "Decapitate" + Deflate = "Deflate" + Dissolve = "Dissolve" + Explode = "Explode" + Eye_pop = "Eye-pop" + Inflate = "Inflate" + Levitate = "Levitate" + Melt = "Melt" + Peel = "Peel" + Poke = "Poke" + Squish = "Squish" + Ta_da = "Ta-da" + Tear = "Tear" + + +class PikaBodyGenerate22C2vGenerate22PikascenesPost(BaseModel): + aspectRatio: Optional[float] = Field(None, description='Aspect ratio (width / height)') + duration: Optional[int] = Field(5) + ingredientsMode: str = Field(...) + negativePrompt: Optional[str] = Field(None) + promptText: Optional[str] = Field(None) + resolution: Optional[str] = Field('1080p') + seed: Optional[int] = Field(None) + + +class PikaGenerateResponse(BaseModel): + video_id: str = Field(...) + + +class PikaBodyGenerate22I2vGenerate22I2vPost(BaseModel): + duration: Optional[int] = 5 + negativePrompt: Optional[str] = Field(None) + promptText: Optional[str] = Field(None) + resolution: Optional[str] = '1080p' + seed: Optional[int] = Field(None) + + +class PikaBodyGenerate22KeyframeGenerate22PikaframesPost(BaseModel): + duration: Optional[int] = Field(None, ge=5, le=10) + negativePrompt: Optional[str] = Field(None) + promptText: str = Field(...) + resolution: Optional[str] = '1080p' + seed: Optional[int] = Field(None) + + +class PikaBodyGenerate22T2vGenerate22T2vPost(BaseModel): + aspectRatio: Optional[float] = Field( + 1.7777777777777777, + description='Aspect ratio (width / height)', + ge=0.4, + le=2.5, + ) + duration: Optional[int] = 5 + negativePrompt: Optional[str] = Field(None) + promptText: str = Field(...) + resolution: Optional[str] = '1080p' + seed: Optional[int] = Field(None) + + +class PikaBodyGeneratePikadditionsGeneratePikadditionsPost(BaseModel): + negativePrompt: Optional[str] = Field(None) + promptText: Optional[str] = Field(None) + seed: Optional[int] = Field(None) + + +class PikaBodyGeneratePikaffectsGeneratePikaffectsPost(BaseModel): + negativePrompt: Optional[str] = Field(None) + pikaffect: Optional[str] = None + promptText: Optional[str] = Field(None) + seed: Optional[int] = Field(None) + + +class PikaBodyGeneratePikaswapsGeneratePikaswapsPost(BaseModel): + negativePrompt: Optional[str] = Field(None) + promptText: Optional[str] = Field(None) + seed: Optional[int] = Field(None) + modifyRegionRoi: Optional[str] = Field(None) + + +class PikaStatusEnum(str, Enum): + queued = "queued" + started = "started" + finished = "finished" + failed = "failed" + + +class PikaVideoResponse(BaseModel): + id: str = Field(...) + progress: Optional[int] = Field(None) + status: PikaStatusEnum + url: Optional[str] = Field(None) diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py index 35d6baf1c..10f11666d 100644 --- a/comfy_api_nodes/nodes_pika.py +++ b/comfy_api_nodes/nodes_pika.py @@ -8,30 +8,17 @@ from __future__ import annotations from io import BytesIO import logging from typing import Optional, TypeVar -from enum import Enum -import numpy as np import torch from typing_extensions import override -from comfy_api.latest import ComfyExtension, io as comfy_io -from comfy_api.input_impl import VideoFromFile +from comfy_api.latest import ComfyExtension, comfy_io from comfy_api.input_impl.video_types import VideoCodec, VideoContainer, VideoInput from comfy_api_nodes.apinode_utils import ( download_url_to_video_output, tensor_to_bytesio, ) -from comfy_api_nodes.apis import ( - PikaBodyGenerate22C2vGenerate22PikascenesPost, - PikaBodyGenerate22I2vGenerate22I2vPost, - PikaBodyGenerate22KeyframeGenerate22PikaframesPost, - PikaBodyGenerate22T2vGenerate22T2vPost, - PikaBodyGeneratePikadditionsGeneratePikadditionsPost, - PikaBodyGeneratePikaffectsGeneratePikaffectsPost, - PikaBodyGeneratePikaswapsGeneratePikaswapsPost, - PikaGenerateResponse, - PikaVideoResponse, -) +from comfy_api_nodes.apis import pika_defs from comfy_api_nodes.apis.client import ( ApiEndpoint, EmptyRequest, @@ -55,116 +42,36 @@ PATH_PIKASCENES = f"/proxy/pika/generate/{PIKA_API_VERSION}/pikascenes" PATH_VIDEO_GET = "/proxy/pika/videos" -class PikaDurationEnum(int, Enum): - integer_5 = 5 - integer_10 = 10 - - -class PikaResolutionEnum(str, Enum): - field_1080p = "1080p" - field_720p = "720p" - - -class Pikaffect(str, Enum): - Cake_ify = "Cake-ify" - Crumble = "Crumble" - Crush = "Crush" - Decapitate = "Decapitate" - Deflate = "Deflate" - Dissolve = "Dissolve" - Explode = "Explode" - Eye_pop = "Eye-pop" - Inflate = "Inflate" - Levitate = "Levitate" - Melt = "Melt" - Peel = "Peel" - Poke = "Poke" - Squish = "Squish" - Ta_da = "Ta-da" - Tear = "Tear" - - -class PikaApiError(Exception): - """Exception for Pika API errors.""" - - pass - - -def is_valid_video_response(response: PikaVideoResponse) -> bool: - """Check if the video response is valid.""" - return hasattr(response, "url") and response.url is not None - - -def is_valid_initial_response(response: PikaGenerateResponse) -> bool: - """Check if the initial response is valid.""" - return hasattr(response, "video_id") and response.video_id is not None - - -async def poll_for_task_status( - task_id: str, +async def execute_task( + initial_operation: SynchronousOperation[R, pika_defs.PikaGenerateResponse], auth_kwargs: Optional[dict[str, str]] = None, node_id: Optional[str] = None, -) -> PikaGenerateResponse: - polling_operation = PollingOperation( +) -> comfy_io.NodeOutput: + task_id = (await initial_operation.execute()).video_id + final_response: pika_defs.PikaVideoResponse = await PollingOperation( poll_endpoint=ApiEndpoint( path=f"{PATH_VIDEO_GET}/{task_id}", method=HttpMethod.GET, request_model=EmptyRequest, - response_model=PikaVideoResponse, + response_model=pika_defs.PikaVideoResponse, ), - completed_statuses=[ - "finished", - ], + completed_statuses=["finished"], failed_statuses=["failed", "cancelled"], - status_extractor=lambda response: ( - response.status.value if response.status else None - ), - progress_extractor=lambda response: ( - response.progress if hasattr(response, "progress") else None - ), + status_extractor=lambda response: (response.status.value if response.status else None), + progress_extractor=lambda response: (response.progress if hasattr(response, "progress") else None), auth_kwargs=auth_kwargs, - result_url_extractor=lambda response: ( - response.url if hasattr(response, "url") else None - ), + result_url_extractor=lambda response: (response.url if hasattr(response, "url") else None), node_id=node_id, - estimated_duration=60 - ) - return await polling_operation.execute() - - -async def execute_task( - initial_operation: SynchronousOperation[R, PikaGenerateResponse], - auth_kwargs: Optional[dict[str, str]] = None, - node_id: Optional[str] = None, -) -> tuple[VideoFromFile]: - """Executes the initial operation then polls for the task status until it is completed. - - Args: - initial_operation: The initial operation to execute. - auth_kwargs: The authentication token(s) to use for the API call. - - Returns: - A tuple containing the video file as a VIDEO output. - """ - initial_response = await initial_operation.execute() - if not is_valid_initial_response(initial_response): - error_msg = f"Pika initial request failed. Code: {initial_response.code}, Message: {initial_response.message}, Data: {initial_response.data}" + estimated_duration=60, + max_poll_attempts=240, + ).execute() + if not final_response.url: + error_msg = f"Pika task {task_id} succeeded but no video data found in response:\n{final_response}" logging.error(error_msg) - raise PikaApiError(error_msg) - - task_id = initial_response.video_id - final_response = await poll_for_task_status(task_id, auth_kwargs, node_id=node_id) - if not is_valid_video_response(final_response): - error_msg = ( - f"Pika task {task_id} succeeded but no video data found in response." - ) - logging.error(error_msg) - raise PikaApiError(error_msg) - - video_url = str(final_response.url) + raise Exception(error_msg) + video_url = final_response.url logging.info("Pika task %s succeeded. Video URL: %s", task_id, video_url) - - return (await download_url_to_video_output(video_url),) + return comfy_io.NodeOutput(await download_url_to_video_output(video_url)) def get_base_inputs_types() -> list[comfy_io.Input]: @@ -173,16 +80,12 @@ def get_base_inputs_types() -> list[comfy_io.Input]: comfy_io.String.Input("prompt_text", multiline=True), comfy_io.String.Input("negative_prompt", multiline=True), comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), - comfy_io.Combo.Input( - "resolution", options=PikaResolutionEnum, default=PikaResolutionEnum.field_1080p - ), - comfy_io.Combo.Input( - "duration", options=PikaDurationEnum, default=PikaDurationEnum.integer_5 - ), + comfy_io.Combo.Input("resolution", options=["1080p", "720p"], default="1080p"), + comfy_io.Combo.Input("duration", options=[5, 10], default=5), ] -class PikaImageToVideoV2_2(comfy_io.ComfyNode): +class PikaImageToVideo(comfy_io.ComfyNode): """Pika 2.2 Image to Video Node.""" @classmethod @@ -215,14 +118,9 @@ class PikaImageToVideoV2_2(comfy_io.ComfyNode): resolution: str, duration: int, ) -> comfy_io.NodeOutput: - # Convert image to BytesIO image_bytes_io = tensor_to_bytesio(image) - image_bytes_io.seek(0) - pika_files = {"image": ("image.png", image_bytes_io, "image/png")} - - # Prepare non-file data - pika_request_data = PikaBodyGenerate22I2vGenerate22I2vPost( + pika_request_data = pika_defs.PikaBodyGenerate22I2vGenerate22I2vPost( promptText=prompt_text, negativePrompt=negative_prompt, seed=seed, @@ -237,8 +135,8 @@ class PikaImageToVideoV2_2(comfy_io.ComfyNode): endpoint=ApiEndpoint( path=PATH_IMAGE_TO_VIDEO, method=HttpMethod.POST, - request_model=PikaBodyGenerate22I2vGenerate22I2vPost, - response_model=PikaGenerateResponse, + request_model=pika_defs.PikaBodyGenerate22I2vGenerate22I2vPost, + response_model=pika_defs.PikaGenerateResponse, ), request=pika_request_data, files=pika_files, @@ -248,7 +146,7 @@ class PikaImageToVideoV2_2(comfy_io.ComfyNode): return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaTextToVideoNodeV2_2(comfy_io.ComfyNode): +class PikaTextToVideoNode(comfy_io.ComfyNode): """Pika Text2Video v2.2 Node.""" @classmethod @@ -296,10 +194,10 @@ class PikaTextToVideoNodeV2_2(comfy_io.ComfyNode): endpoint=ApiEndpoint( path=PATH_TEXT_TO_VIDEO, method=HttpMethod.POST, - request_model=PikaBodyGenerate22T2vGenerate22T2vPost, - response_model=PikaGenerateResponse, + request_model=pika_defs.PikaBodyGenerate22T2vGenerate22T2vPost, + response_model=pika_defs.PikaGenerateResponse, ), - request=PikaBodyGenerate22T2vGenerate22T2vPost( + request=pika_defs.PikaBodyGenerate22T2vGenerate22T2vPost( promptText=prompt_text, negativePrompt=negative_prompt, seed=seed, @@ -313,7 +211,7 @@ class PikaTextToVideoNodeV2_2(comfy_io.ComfyNode): return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaScenesV2_2(comfy_io.ComfyNode): +class PikaScenes(comfy_io.ComfyNode): """PikaScenes v2.2 Node.""" @classmethod @@ -389,7 +287,6 @@ class PikaScenesV2_2(comfy_io.ComfyNode): image_ingredient_4: Optional[torch.Tensor] = None, image_ingredient_5: Optional[torch.Tensor] = None, ) -> comfy_io.NodeOutput: - # Convert all passed images to BytesIO all_image_bytes_io = [] for image in [ image_ingredient_1, @@ -399,16 +296,14 @@ class PikaScenesV2_2(comfy_io.ComfyNode): image_ingredient_5, ]: if image is not None: - image_bytes_io = tensor_to_bytesio(image) - image_bytes_io.seek(0) - all_image_bytes_io.append(image_bytes_io) + all_image_bytes_io.append(tensor_to_bytesio(image)) pika_files = [ ("images", (f"image_{i}.png", image_bytes_io, "image/png")) for i, image_bytes_io in enumerate(all_image_bytes_io) ] - pika_request_data = PikaBodyGenerate22C2vGenerate22PikascenesPost( + pika_request_data = pika_defs.PikaBodyGenerate22C2vGenerate22PikascenesPost( ingredientsMode=ingredients_mode, promptText=prompt_text, negativePrompt=negative_prompt, @@ -425,8 +320,8 @@ class PikaScenesV2_2(comfy_io.ComfyNode): endpoint=ApiEndpoint( path=PATH_PIKASCENES, method=HttpMethod.POST, - request_model=PikaBodyGenerate22C2vGenerate22PikascenesPost, - response_model=PikaGenerateResponse, + request_model=pika_defs.PikaBodyGenerate22C2vGenerate22PikascenesPost, + response_model=pika_defs.PikaGenerateResponse, ), request=pika_request_data, files=pika_files, @@ -477,22 +372,16 @@ class PikAdditionsNode(comfy_io.ComfyNode): negative_prompt: str, seed: int, ) -> comfy_io.NodeOutput: - # Convert video to BytesIO video_bytes_io = BytesIO() video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264) video_bytes_io.seek(0) - # Convert image to BytesIO image_bytes_io = tensor_to_bytesio(image) - image_bytes_io.seek(0) - pika_files = { "video": ("video.mp4", video_bytes_io, "video/mp4"), "image": ("image.png", image_bytes_io, "image/png"), } - - # Prepare non-file data - pika_request_data = PikaBodyGeneratePikadditionsGeneratePikadditionsPost( + pika_request_data = pika_defs.PikaBodyGeneratePikadditionsGeneratePikadditionsPost( promptText=prompt_text, negativePrompt=negative_prompt, seed=seed, @@ -505,8 +394,8 @@ class PikAdditionsNode(comfy_io.ComfyNode): endpoint=ApiEndpoint( path=PATH_PIKADDITIONS, method=HttpMethod.POST, - request_model=PikaBodyGeneratePikadditionsGeneratePikadditionsPost, - response_model=PikaGenerateResponse, + request_model=pika_defs.PikaBodyGeneratePikadditionsGeneratePikadditionsPost, + response_model=pika_defs.PikaGenerateResponse, ), request=pika_request_data, files=pika_files, @@ -529,11 +418,25 @@ class PikaSwapsNode(comfy_io.ComfyNode): category="api node/video/Pika", inputs=[ comfy_io.Video.Input("video", tooltip="The video to swap an object in."), - comfy_io.Image.Input("image", tooltip="The image used to replace the masked object in the video."), - comfy_io.Mask.Input("mask", tooltip="Use the mask to define areas in the video to replace"), - comfy_io.String.Input("prompt_text", multiline=True), - comfy_io.String.Input("negative_prompt", multiline=True), - comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), + comfy_io.Image.Input( + "image", + tooltip="The image used to replace the masked object in the video.", + optional=True, + ), + comfy_io.Mask.Input( + "mask", + tooltip="Use the mask to define areas in the video to replace.", + optional=True, + ), + comfy_io.String.Input("prompt_text", multiline=True, optional=True), + comfy_io.String.Input("negative_prompt", multiline=True, optional=True), + comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True, optional=True), + comfy_io.String.Input( + "region_to_modify", + multiline=True, + optional=True, + tooltip="Plaintext description of the object / region to modify.", + ), ], outputs=[comfy_io.Video.Output()], hidden=[ @@ -548,41 +451,29 @@ class PikaSwapsNode(comfy_io.ComfyNode): async def execute( cls, video: VideoInput, - image: torch.Tensor, - mask: torch.Tensor, - prompt_text: str, - negative_prompt: str, - seed: int, + image: Optional[torch.Tensor] = None, + mask: Optional[torch.Tensor] = None, + prompt_text: str = "", + negative_prompt: str = "", + seed: int = 0, + region_to_modify: str = "", ) -> comfy_io.NodeOutput: - # Convert video to BytesIO video_bytes_io = BytesIO() video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264) video_bytes_io.seek(0) - - # Convert mask to binary mask with three channels - mask = torch.round(mask) - mask = mask.repeat(1, 3, 1, 1) - - # Convert 3-channel binary mask to BytesIO - mask_bytes_io = BytesIO() - mask_bytes_io.write(mask.numpy().astype(np.uint8)) - mask_bytes_io.seek(0) - - # Convert image to BytesIO - image_bytes_io = tensor_to_bytesio(image) - image_bytes_io.seek(0) - pika_files = { "video": ("video.mp4", video_bytes_io, "video/mp4"), - "image": ("image.png", image_bytes_io, "image/png"), - "modifyRegionMask": ("mask.png", mask_bytes_io, "image/png"), } + if mask is not None: + pika_files["modifyRegionMask"] = ("mask.png", tensor_to_bytesio(mask), "image/png") + if image is not None: + pika_files["image"] = ("image.png", tensor_to_bytesio(image), "image/png") - # Prepare non-file data - pika_request_data = PikaBodyGeneratePikaswapsGeneratePikaswapsPost( + pika_request_data = pika_defs.PikaBodyGeneratePikaswapsGeneratePikaswapsPost( promptText=prompt_text, negativePrompt=negative_prompt, seed=seed, + modifyRegionRoi=region_to_modify if region_to_modify else None, ) auth = { "auth_token": cls.hidden.auth_token_comfy_org, @@ -590,10 +481,10 @@ class PikaSwapsNode(comfy_io.ComfyNode): } initial_operation = SynchronousOperation( endpoint=ApiEndpoint( - path=PATH_PIKADDITIONS, + path=PATH_PIKASWAPS, method=HttpMethod.POST, - request_model=PikaBodyGeneratePikadditionsGeneratePikadditionsPost, - response_model=PikaGenerateResponse, + request_model=pika_defs.PikaBodyGeneratePikaswapsGeneratePikaswapsPost, + response_model=pika_defs.PikaGenerateResponse, ), request=pika_request_data, files=pika_files, @@ -616,7 +507,7 @@ class PikaffectsNode(comfy_io.ComfyNode): inputs=[ comfy_io.Image.Input("image", tooltip="The reference image to apply the Pikaffect to."), comfy_io.Combo.Input( - "pikaffect", options=Pikaffect, default="Cake-ify" + "pikaffect", options=pika_defs.Pikaffect, default="Cake-ify" ), comfy_io.String.Input("prompt_text", multiline=True), comfy_io.String.Input("negative_prompt", multiline=True), @@ -648,10 +539,10 @@ class PikaffectsNode(comfy_io.ComfyNode): endpoint=ApiEndpoint( path=PATH_PIKAFFECTS, method=HttpMethod.POST, - request_model=PikaBodyGeneratePikaffectsGeneratePikaffectsPost, - response_model=PikaGenerateResponse, + request_model=pika_defs.PikaBodyGeneratePikaffectsGeneratePikaffectsPost, + response_model=pika_defs.PikaGenerateResponse, ), - request=PikaBodyGeneratePikaffectsGeneratePikaffectsPost( + request=pika_defs.PikaBodyGeneratePikaffectsGeneratePikaffectsPost( pikaffect=pikaffect, promptText=prompt_text, negativePrompt=negative_prompt, @@ -664,7 +555,7 @@ class PikaffectsNode(comfy_io.ComfyNode): return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaStartEndFrameNode2_2(comfy_io.ComfyNode): +class PikaStartEndFrameNode(comfy_io.ComfyNode): """PikaFrames v2.2 Node.""" @classmethod @@ -711,10 +602,10 @@ class PikaStartEndFrameNode2_2(comfy_io.ComfyNode): endpoint=ApiEndpoint( path=PATH_PIKAFRAMES, method=HttpMethod.POST, - request_model=PikaBodyGenerate22KeyframeGenerate22PikaframesPost, - response_model=PikaGenerateResponse, + request_model=pika_defs.PikaBodyGenerate22KeyframeGenerate22PikaframesPost, + response_model=pika_defs.PikaGenerateResponse, ), - request=PikaBodyGenerate22KeyframeGenerate22PikaframesPost( + request=pika_defs.PikaBodyGenerate22KeyframeGenerate22PikaframesPost( promptText=prompt_text, negativePrompt=negative_prompt, seed=seed, @@ -732,13 +623,13 @@ class PikaApiNodesExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: return [ - PikaImageToVideoV2_2, - PikaTextToVideoNodeV2_2, - PikaScenesV2_2, + PikaImageToVideo, + PikaTextToVideoNode, + PikaScenes, PikAdditionsNode, PikaSwapsNode, PikaffectsNode, - PikaStartEndFrameNode2_2, + PikaStartEndFrameNode, ] From fc0fbf141c7deb444fe730af2f2db8e2beddaf60 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 10 Oct 2025 01:18:23 +0300 Subject: [PATCH 0732/1073] convert nodes_sd3.py and nodes_slg.py to V3 schema (#10162) --- comfy_extras/nodes_sd3.py | 232 +++++++++++++++++++++++++------------- comfy_extras/nodes_slg.py | 108 +++++++++++------- 2 files changed, 219 insertions(+), 121 deletions(-) diff --git a/comfy_extras/nodes_sd3.py b/comfy_extras/nodes_sd3.py index d75b29e60..14782cb2b 100644 --- a/comfy_extras/nodes_sd3.py +++ b/comfy_extras/nodes_sd3.py @@ -3,64 +3,83 @@ import comfy.sd import comfy.model_management import nodes import torch -import comfy_extras.nodes_slg +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io +from comfy_extras.nodes_slg import SkipLayerGuidanceDiT -class TripleCLIPLoader: +class TripleCLIPLoader(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ), "clip_name2": (folder_paths.get_filename_list("text_encoders"), ), "clip_name3": (folder_paths.get_filename_list("text_encoders"), ) - }} - RETURN_TYPES = ("CLIP",) - FUNCTION = "load_clip" + def define_schema(cls): + return io.Schema( + node_id="TripleCLIPLoader", + category="advanced/loaders", + description="[Recipes]\n\nsd3: clip-l, clip-g, t5", + inputs=[ + io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")), + io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")), + io.Combo.Input("clip_name3", options=folder_paths.get_filename_list("text_encoders")), + ], + outputs=[ + io.Clip.Output(), + ], + ) - CATEGORY = "advanced/loaders" - - DESCRIPTION = "[Recipes]\n\nsd3: clip-l, clip-g, t5" - - def load_clip(self, clip_name1, clip_name2, clip_name3): + @classmethod + def execute(cls, clip_name1, clip_name2, clip_name3) -> io.NodeOutput: clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1) clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2) clip_path3 = folder_paths.get_full_path_or_raise("text_encoders", clip_name3) clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2, clip_path3], embedding_directory=folder_paths.get_folder_paths("embeddings")) - return (clip,) + return io.NodeOutput(clip) + + load_clip = execute # TODO: remove -class EmptySD3LatentImage: - def __init__(self): - self.device = comfy.model_management.intermediate_device() +class EmptySD3LatentImage(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="EmptySD3LatentImage", + category="latent/sd3", + inputs=[ + io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("batch_size", default=1, min=1, max=4096), + ], + outputs=[ + io.Latent.Output(), + ], + ) @classmethod - def INPUT_TYPES(s): - return {"required": { "width": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}} - RETURN_TYPES = ("LATENT",) - FUNCTION = "generate" + def execute(cls, width, height, batch_size=1) -> io.NodeOutput: + latent = torch.zeros([batch_size, 16, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + return io.NodeOutput({"samples":latent}) - CATEGORY = "latent/sd3" - - def generate(self, width, height, batch_size=1): - latent = torch.zeros([batch_size, 16, height // 8, width // 8], device=self.device) - return ({"samples":latent}, ) + generate = execute # TODO: remove -class CLIPTextEncodeSD3: +class CLIPTextEncodeSD3(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "clip": ("CLIP", ), - "clip_l": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "clip_g": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "t5xxl": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "empty_padding": (["none", "empty_prompt"], ) - }} - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" + def define_schema(cls): + return io.Schema( + node_id="CLIPTextEncodeSD3", + category="advanced/conditioning", + inputs=[ + io.Clip.Input("clip"), + io.String.Input("clip_l", multiline=True, dynamic_prompts=True), + io.String.Input("clip_g", multiline=True, dynamic_prompts=True), + io.String.Input("t5xxl", multiline=True, dynamic_prompts=True), + io.Combo.Input("empty_padding", options=["none", "empty_prompt"]), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) - CATEGORY = "advanced/conditioning" - - def encode(self, clip, clip_l, clip_g, t5xxl, empty_padding): + @classmethod + def execute(cls, clip, clip_l, clip_g, t5xxl, empty_padding) -> io.NodeOutput: no_padding = empty_padding == "none" tokens = clip.tokenize(clip_g) @@ -82,57 +101,112 @@ class CLIPTextEncodeSD3: tokens["l"] += empty["l"] while len(tokens["l"]) > len(tokens["g"]): tokens["g"] += empty["g"] - return (clip.encode_from_tokens_scheduled(tokens), ) + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens)) + + encode = execute # TODO: remove -class ControlNetApplySD3(nodes.ControlNetApplyAdvanced): +class ControlNetApplySD3(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "control_net": ("CONTROL_NET", ), - "vae": ("VAE", ), - "image": ("IMAGE", ), - "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), - "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}) - }} - CATEGORY = "conditioning/controlnet" - DEPRECATED = True + def define_schema(cls) -> io.Schema: + return io.Schema( + node_id="ControlNetApplySD3", + display_name="Apply Controlnet with VAE", + category="conditioning/controlnet", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.ControlNet.Input("control_net"), + io.Vae.Input("vae"), + io.Image.Input("image"), + io.Float.Input("strength", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("start_percent", default=0.0, min=0.0, max=1.0, step=0.001), + io.Float.Input("end_percent", default=1.0, min=0.0, max=1.0, step=0.001), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + ], + is_deprecated=True, + ) + + @classmethod + def execute(cls, positive, negative, control_net, image, strength, start_percent, end_percent, vae=None) -> io.NodeOutput: + if strength == 0: + return io.NodeOutput(positive, negative) + + control_hint = image.movedim(-1, 1) + cnets = {} + + out = [] + for conditioning in [positive, negative]: + c = [] + for t in conditioning: + d = t[1].copy() + + prev_cnet = d.get('control', None) + if prev_cnet in cnets: + c_net = cnets[prev_cnet] + else: + c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent), + vae=vae, extra_concat=[]) + c_net.set_previous_controlnet(prev_cnet) + cnets[prev_cnet] = c_net + + d['control'] = c_net + d['control_apply_to_uncond'] = False + n = [t[0], d] + c.append(n) + out.append(c) + return io.NodeOutput(out[0], out[1]) + + apply_controlnet = execute # TODO: remove -class SkipLayerGuidanceSD3(comfy_extras.nodes_slg.SkipLayerGuidanceDiT): +class SkipLayerGuidanceSD3(io.ComfyNode): ''' Enhance guidance towards detailed dtructure by having another set of CFG negative with skipped layers. Inspired by Perturbed Attention Guidance (https://arxiv.org/abs/2403.17377) Experimental implementation by Dango233@StabilityAI. ''' + @classmethod - def INPUT_TYPES(s): - return {"required": {"model": ("MODEL", ), - "layers": ("STRING", {"default": "7, 8, 9", "multiline": False}), - "scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.1}), - "start_percent": ("FLOAT", {"default": 0.01, "min": 0.0, "max": 1.0, "step": 0.001}), - "end_percent": ("FLOAT", {"default": 0.15, "min": 0.0, "max": 1.0, "step": 0.001}) - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "skip_guidance_sd3" + def define_schema(cls): + return io.Schema( + node_id="SkipLayerGuidanceSD3", + category="advanced/guidance", + description="Generic version of SkipLayerGuidance node that can be used on every DiT model.", + inputs=[ + io.Model.Input("model"), + io.String.Input("layers", default="7, 8, 9", multiline=False), + io.Float.Input("scale", default=3.0, min=0.0, max=10.0, step=0.1), + io.Float.Input("start_percent", default=0.01, min=0.0, max=1.0, step=0.001), + io.Float.Input("end_percent", default=0.15, min=0.0, max=1.0, step=0.001), + ], + outputs=[ + io.Model.Output(), + ], + is_experimental=True, + ) - CATEGORY = "advanced/guidance" + @classmethod + def execute(cls, model, layers, scale, start_percent, end_percent) -> io.NodeOutput: + return SkipLayerGuidanceDiT().execute(model=model, scale=scale, start_percent=start_percent, end_percent=end_percent, double_layers=layers) - def skip_guidance_sd3(self, model, layers, scale, start_percent, end_percent): - return self.skip_guidance(model=model, scale=scale, start_percent=start_percent, end_percent=end_percent, double_layers=layers) + skip_guidance_sd3 = execute # TODO: remove -NODE_CLASS_MAPPINGS = { - "TripleCLIPLoader": TripleCLIPLoader, - "EmptySD3LatentImage": EmptySD3LatentImage, - "CLIPTextEncodeSD3": CLIPTextEncodeSD3, - "ControlNetApplySD3": ControlNetApplySD3, - "SkipLayerGuidanceSD3": SkipLayerGuidanceSD3, -} +class SD3Extension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + TripleCLIPLoader, + EmptySD3LatentImage, + CLIPTextEncodeSD3, + ControlNetApplySD3, + SkipLayerGuidanceSD3, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - # Sampling - "ControlNetApplySD3": "Apply Controlnet with VAE", -} + +async def comfy_entrypoint() -> SD3Extension: + return SD3Extension() diff --git a/comfy_extras/nodes_slg.py b/comfy_extras/nodes_slg.py index 7adff202e..f462faa8f 100644 --- a/comfy_extras/nodes_slg.py +++ b/comfy_extras/nodes_slg.py @@ -1,33 +1,40 @@ import comfy.model_patcher import comfy.samplers import re +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io -class SkipLayerGuidanceDiT: +class SkipLayerGuidanceDiT(io.ComfyNode): ''' Enhance guidance towards detailed dtructure by having another set of CFG negative with skipped layers. Inspired by Perturbed Attention Guidance (https://arxiv.org/abs/2403.17377) Original experimental implementation for SD3 by Dango233@StabilityAI. ''' + @classmethod - def INPUT_TYPES(s): - return {"required": {"model": ("MODEL", ), - "double_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}), - "single_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}), - "scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.1}), - "start_percent": ("FLOAT", {"default": 0.01, "min": 0.0, "max": 1.0, "step": 0.001}), - "end_percent": ("FLOAT", {"default": 0.15, "min": 0.0, "max": 1.0, "step": 0.001}), - "rescaling_scale": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "skip_guidance" - EXPERIMENTAL = True + def define_schema(cls): + return io.Schema( + node_id="SkipLayerGuidanceDiT", + category="advanced/guidance", + description="Generic version of SkipLayerGuidance node that can be used on every DiT model.", + is_experimental=True, + inputs=[ + io.Model.Input("model"), + io.String.Input("double_layers", default="7, 8, 9"), + io.String.Input("single_layers", default="7, 8, 9"), + io.Float.Input("scale", default=3.0, min=0.0, max=10.0, step=0.1), + io.Float.Input("start_percent", default=0.01, min=0.0, max=1.0, step=0.001), + io.Float.Input("end_percent", default=0.15, min=0.0, max=1.0, step=0.001), + io.Float.Input("rescaling_scale", default=0.0, min=0.0, max=10.0, step=0.01), + ], + outputs=[ + io.Model.Output(), + ], + ) - DESCRIPTION = "Generic version of SkipLayerGuidance node that can be used on every DiT model." - - CATEGORY = "advanced/guidance" - - def skip_guidance(self, model, scale, start_percent, end_percent, double_layers="", single_layers="", rescaling_scale=0): + @classmethod + def execute(cls, model, scale, start_percent, end_percent, double_layers="", single_layers="", rescaling_scale=0) -> io.NodeOutput: # check if layer is comma separated integers def skip(args, extra_args): return args @@ -43,7 +50,7 @@ class SkipLayerGuidanceDiT: single_layers = [int(i) for i in single_layers] if len(double_layers) == 0 and len(single_layers) == 0: - return (model, ) + return io.NodeOutput(model) def post_cfg_function(args): model = args["model"] @@ -76,29 +83,36 @@ class SkipLayerGuidanceDiT: m = model.clone() m.set_model_sampler_post_cfg_function(post_cfg_function) - return (m, ) + return io.NodeOutput(m) -class SkipLayerGuidanceDiTSimple: + skip_guidance = execute # TODO: remove + + +class SkipLayerGuidanceDiTSimple(io.ComfyNode): ''' Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass. ''' @classmethod - def INPUT_TYPES(s): - return {"required": {"model": ("MODEL", ), - "double_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}), - "single_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}), - "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), - "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "skip_guidance" - EXPERIMENTAL = True + def define_schema(cls): + return io.Schema( + node_id="SkipLayerGuidanceDiTSimple", + category="advanced/guidance", + description="Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass.", + is_experimental=True, + inputs=[ + io.Model.Input("model"), + io.String.Input("double_layers", default="7, 8, 9"), + io.String.Input("single_layers", default="7, 8, 9"), + io.Float.Input("start_percent", default=0.0, min=0.0, max=1.0, step=0.001), + io.Float.Input("end_percent", default=1.0, min=0.0, max=1.0, step=0.001), + ], + outputs=[ + io.Model.Output(), + ], + ) - DESCRIPTION = "Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass." - - CATEGORY = "advanced/guidance" - - def skip_guidance(self, model, start_percent, end_percent, double_layers="", single_layers=""): + @classmethod + def execute(cls, model, start_percent, end_percent, double_layers="", single_layers="") -> io.NodeOutput: def skip(args, extra_args): return args @@ -113,7 +127,7 @@ class SkipLayerGuidanceDiTSimple: single_layers = [int(i) for i in single_layers] if len(double_layers) == 0 and len(single_layers) == 0: - return (model, ) + return io.NodeOutput(model) def calc_cond_batch_function(args): x = args["input"] @@ -144,9 +158,19 @@ class SkipLayerGuidanceDiTSimple: m = model.clone() m.set_model_sampler_calc_cond_batch_function(calc_cond_batch_function) - return (m, ) + return io.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "SkipLayerGuidanceDiT": SkipLayerGuidanceDiT, - "SkipLayerGuidanceDiTSimple": SkipLayerGuidanceDiTSimple, -} + skip_guidance = execute # TODO: remove + + +class SkipLayerGuidanceExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + SkipLayerGuidanceDiT, + SkipLayerGuidanceDiTSimple, + ] + + +async def comfy_entrypoint() -> SkipLayerGuidanceExtension: + return SkipLayerGuidanceExtension() From f1dd6e50f891b1d2b17e4b8d26d422634fe49595 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 9 Oct 2025 16:02:40 -0700 Subject: [PATCH 0733/1073] Fix bug with applying loras on fp8 scaled without fp8 ops. (#10279) --- comfy/model_patcher.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index e8c859689..c0b68fb8c 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -130,17 +130,21 @@ class LowVramPatch: self.set_func = set_func def __call__(self, weight): + intermediate_dtype = weight.dtype if self.convert_func is not None: weight = self.convert_func(weight.to(dtype=torch.float32, copy=True), inplace=True) - intermediate_dtype = weight.dtype - if self.set_func is None and intermediate_dtype not in [torch.float32, torch.float16, torch.bfloat16]: #intermediate_dtype has to be one that is supported in math ops + if intermediate_dtype not in [torch.float32, torch.float16, torch.bfloat16]: #intermediate_dtype has to be one that is supported in math ops intermediate_dtype = torch.float32 - return comfy.float.stochastic_rounding(comfy.lora.calculate_weight(self.patches[self.key], weight.to(intermediate_dtype), self.key, intermediate_dtype=intermediate_dtype), weight.dtype, seed=string_to_seed(self.key)) + out = comfy.lora.calculate_weight(self.patches[self.key], weight.to(intermediate_dtype), self.key, intermediate_dtype=intermediate_dtype) + if self.set_func is None: + return comfy.float.stochastic_rounding(out, weight.dtype, seed=string_to_seed(self.key)) + else: + return self.set_func(out, seed=string_to_seed(self.key), return_weight=True) out = comfy.lora.calculate_weight(self.patches[self.key], weight, self.key, intermediate_dtype=intermediate_dtype) if self.set_func is not None: - return self.set_func(out, seed=string_to_seed(self.key), return_weight=True) + return self.set_func(out, seed=string_to_seed(self.key), return_weight=True).to(dtype=intermediate_dtype) else: return out From 90853fb9cd42ebbee7b3fcf46e518e5632912b11 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 10 Oct 2025 02:07:17 +0300 Subject: [PATCH 0734/1073] convert nodes_flux to V3 schema (#10122) --- comfy_extras/nodes_flux.py | 189 ++++++++++++++++++++++--------------- 1 file changed, 115 insertions(+), 74 deletions(-) diff --git a/comfy_extras/nodes_flux.py b/comfy_extras/nodes_flux.py index 25e029ffd..ce1b2e89f 100644 --- a/comfy_extras/nodes_flux.py +++ b/comfy_extras/nodes_flux.py @@ -1,60 +1,80 @@ import node_helpers import comfy.utils +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io -class CLIPTextEncodeFlux: + +class CLIPTextEncodeFlux(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "clip": ("CLIP", ), - "clip_l": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "t5xxl": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}), - }} - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" + def define_schema(cls): + return io.Schema( + node_id="CLIPTextEncodeFlux", + category="advanced/conditioning/flux", + inputs=[ + io.Clip.Input("clip"), + io.String.Input("clip_l", multiline=True, dynamic_prompts=True), + io.String.Input("t5xxl", multiline=True, dynamic_prompts=True), + io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) - CATEGORY = "advanced/conditioning/flux" - - def encode(self, clip, clip_l, t5xxl, guidance): + @classmethod + def execute(cls, clip, clip_l, t5xxl, guidance) -> io.NodeOutput: tokens = clip.tokenize(clip_l) tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"] - return (clip.encode_from_tokens_scheduled(tokens, add_dict={"guidance": guidance}), ) + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"guidance": guidance})) -class FluxGuidance: + encode = execute # TODO: remove + + +class FluxGuidance(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "conditioning": ("CONDITIONING", ), - "guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}), - }} + def define_schema(cls): + return io.Schema( + node_id="FluxGuidance", + category="advanced/conditioning/flux", + inputs=[ + io.Conditioning.Input("conditioning"), + io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "append" - - CATEGORY = "advanced/conditioning/flux" - - def append(self, conditioning, guidance): + @classmethod + def execute(cls, conditioning, guidance) -> io.NodeOutput: c = node_helpers.conditioning_set_values(conditioning, {"guidance": guidance}) - return (c, ) + return io.NodeOutput(c) + + append = execute # TODO: remove -class FluxDisableGuidance: +class FluxDisableGuidance(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "conditioning": ("CONDITIONING", ), - }} + def define_schema(cls): + return io.Schema( + node_id="FluxDisableGuidance", + category="advanced/conditioning/flux", + description="This node completely disables the guidance embed on Flux and Flux like models", + inputs=[ + io.Conditioning.Input("conditioning"), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "append" - - CATEGORY = "advanced/conditioning/flux" - DESCRIPTION = "This node completely disables the guidance embed on Flux and Flux like models" - - def append(self, conditioning): + @classmethod + def execute(cls, conditioning) -> io.NodeOutput: c = node_helpers.conditioning_set_values(conditioning, {"guidance": None}) - return (c, ) + return io.NodeOutput(c) + + append = execute # TODO: remove PREFERED_KONTEXT_RESOLUTIONS = [ @@ -78,52 +98,73 @@ PREFERED_KONTEXT_RESOLUTIONS = [ ] -class FluxKontextImageScale: +class FluxKontextImageScale(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"image": ("IMAGE", ), - }, - } + def define_schema(cls): + return io.Schema( + node_id="FluxKontextImageScale", + category="advanced/conditioning/flux", + description="This node resizes the image to one that is more optimal for flux kontext.", + inputs=[ + io.Image.Input("image"), + ], + outputs=[ + io.Image.Output(), + ], + ) - RETURN_TYPES = ("IMAGE",) - FUNCTION = "scale" - - CATEGORY = "advanced/conditioning/flux" - DESCRIPTION = "This node resizes the image to one that is more optimal for flux kontext." - - def scale(self, image): + @classmethod + def execute(cls, image) -> io.NodeOutput: width = image.shape[2] height = image.shape[1] aspect_ratio = width / height _, width, height = min((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS) image = comfy.utils.common_upscale(image.movedim(-1, 1), width, height, "lanczos", "center").movedim(1, -1) - return (image, ) + return io.NodeOutput(image) + + scale = execute # TODO: remove -class FluxKontextMultiReferenceLatentMethod: +class FluxKontextMultiReferenceLatentMethod(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "conditioning": ("CONDITIONING", ), - "reference_latents_method": (("offset", "index", "uxo/uno"), ), - }} + def define_schema(cls): + return io.Schema( + node_id="FluxKontextMultiReferenceLatentMethod", + category="advanced/conditioning/flux", + inputs=[ + io.Conditioning.Input("conditioning"), + io.Combo.Input( + "reference_latents_method", + options=["offset", "index", "uxo/uno"], + ), + ], + outputs=[ + io.Conditioning.Output(), + ], + is_experimental=True, + ) - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "append" - EXPERIMENTAL = True - - CATEGORY = "advanced/conditioning/flux" - - def append(self, conditioning, reference_latents_method): + @classmethod + def execute(cls, conditioning, reference_latents_method) -> io.NodeOutput: if "uxo" in reference_latents_method or "uso" in reference_latents_method: reference_latents_method = "uxo" c = node_helpers.conditioning_set_values(conditioning, {"reference_latents_method": reference_latents_method}) - return (c, ) + return io.NodeOutput(c) -NODE_CLASS_MAPPINGS = { - "CLIPTextEncodeFlux": CLIPTextEncodeFlux, - "FluxGuidance": FluxGuidance, - "FluxDisableGuidance": FluxDisableGuidance, - "FluxKontextImageScale": FluxKontextImageScale, - "FluxKontextMultiReferenceLatentMethod": FluxKontextMultiReferenceLatentMethod, -} + append = execute # TODO: remove + + +class FluxExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + CLIPTextEncodeFlux, + FluxGuidance, + FluxDisableGuidance, + FluxKontextImageScale, + FluxKontextMultiReferenceLatentMethod, + ] + + +async def comfy_entrypoint() -> FluxExtension: + return FluxExtension() From 81e4dac107c24b1655babc47c99c33551c96a644 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 10 Oct 2025 02:08:40 +0300 Subject: [PATCH 0735/1073] convert nodes_upscale_model.py to V3 schema (#10149) --- comfy_extras/nodes_upscale_model.py | 76 +++++++++++++++++++---------- nodes.py | 2 - 2 files changed, 51 insertions(+), 27 deletions(-) diff --git a/comfy_extras/nodes_upscale_model.py b/comfy_extras/nodes_upscale_model.py index 04c948341..4d62b87be 100644 --- a/comfy_extras/nodes_upscale_model.py +++ b/comfy_extras/nodes_upscale_model.py @@ -4,6 +4,8 @@ from comfy import model_management import torch import comfy.utils import folder_paths +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io try: from spandrel_extra_arches import EXTRA_REGISTRY @@ -13,17 +15,23 @@ try: except: pass -class UpscaleModelLoader: +class UpscaleModelLoader(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model_name": (folder_paths.get_filename_list("upscale_models"), ), - }} - RETURN_TYPES = ("UPSCALE_MODEL",) - FUNCTION = "load_model" + def define_schema(cls): + return io.Schema( + node_id="UpscaleModelLoader", + display_name="Load Upscale Model", + category="loaders", + inputs=[ + io.Combo.Input("model_name", options=folder_paths.get_filename_list("upscale_models")), + ], + outputs=[ + io.UpscaleModel.Output(), + ], + ) - CATEGORY = "loaders" - - def load_model(self, model_name): + @classmethod + def execute(cls, model_name) -> io.NodeOutput: model_path = folder_paths.get_full_path_or_raise("upscale_models", model_name) sd = comfy.utils.load_torch_file(model_path, safe_load=True) if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd: @@ -33,21 +41,29 @@ class UpscaleModelLoader: if not isinstance(out, ImageModelDescriptor): raise Exception("Upscale model must be a single-image model.") - return (out, ) + return io.NodeOutput(out) + + load_model = execute # TODO: remove -class ImageUpscaleWithModel: +class ImageUpscaleWithModel(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "upscale_model": ("UPSCALE_MODEL",), - "image": ("IMAGE",), - }} - RETURN_TYPES = ("IMAGE",) - FUNCTION = "upscale" + def define_schema(cls): + return io.Schema( + node_id="ImageUpscaleWithModel", + display_name="Upscale Image (using Model)", + category="image/upscaling", + inputs=[ + io.UpscaleModel.Input("upscale_model"), + io.Image.Input("image"), + ], + outputs=[ + io.Image.Output(), + ], + ) - CATEGORY = "image/upscaling" - - def upscale(self, upscale_model, image): + @classmethod + def execute(cls, upscale_model, image) -> io.NodeOutput: device = model_management.get_torch_device() memory_required = model_management.module_size(upscale_model.model) @@ -75,9 +91,19 @@ class ImageUpscaleWithModel: upscale_model.to("cpu") s = torch.clamp(s.movedim(-3,-1), min=0, max=1.0) - return (s,) + return io.NodeOutput(s) -NODE_CLASS_MAPPINGS = { - "UpscaleModelLoader": UpscaleModelLoader, - "ImageUpscaleWithModel": ImageUpscaleWithModel -} + upscale = execute # TODO: remove + + +class UpscaleModelExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + UpscaleModelLoader, + ImageUpscaleWithModel, + ] + + +async def comfy_entrypoint() -> UpscaleModelExtension: + return UpscaleModelExtension() diff --git a/nodes.py b/nodes.py index 2a2a5f2ad..7cfa8ca14 100644 --- a/nodes.py +++ b/nodes.py @@ -2027,7 +2027,6 @@ NODE_DISPLAY_NAME_MAPPINGS = { "DiffControlNetLoader": "Load ControlNet Model (diff)", "StyleModelLoader": "Load Style Model", "CLIPVisionLoader": "Load CLIP Vision", - "UpscaleModelLoader": "Load Upscale Model", "UNETLoader": "Load Diffusion Model", # Conditioning "CLIPVisionEncode": "CLIP Vision Encode", @@ -2065,7 +2064,6 @@ NODE_DISPLAY_NAME_MAPPINGS = { "LoadImageOutput": "Load Image (from Outputs)", "ImageScale": "Upscale Image", "ImageScaleBy": "Upscale Image By", - "ImageUpscaleWithModel": "Upscale Image (using Model)", "ImageInvert": "Invert Image", "ImagePadForOutpaint": "Pad Image for Outpainting", "ImageBatch": "Batch Images", From cdfc25a1605add750a3b1a83360b84e8e95324c6 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 10 Oct 2025 14:33:51 -0700 Subject: [PATCH 0736/1073] Fix save audio nodes saving mono audio as stereo. (#10289) --- comfy_extras/nodes_audio.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index 1c868fcba..2ed7e0b22 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -142,9 +142,10 @@ def save_audio(self, audio, filename_prefix="ComfyUI", format="flac", prompt=Non for key, value in metadata.items(): output_container.metadata[key] = value + layout = 'mono' if waveform.shape[0] == 1 else 'stereo' # Set up the output stream with appropriate properties if format == "opus": - out_stream = output_container.add_stream("libopus", rate=sample_rate) + out_stream = output_container.add_stream("libopus", rate=sample_rate, layout=layout) if quality == "64k": out_stream.bit_rate = 64000 elif quality == "96k": @@ -156,7 +157,7 @@ def save_audio(self, audio, filename_prefix="ComfyUI", format="flac", prompt=Non elif quality == "320k": out_stream.bit_rate = 320000 elif format == "mp3": - out_stream = output_container.add_stream("libmp3lame", rate=sample_rate) + out_stream = output_container.add_stream("libmp3lame", rate=sample_rate, layout=layout) if quality == "V0": #TODO i would really love to support V3 and V5 but there doesn't seem to be a way to set the qscale level, the property below is a bool out_stream.codec_context.qscale = 1 @@ -165,9 +166,9 @@ def save_audio(self, audio, filename_prefix="ComfyUI", format="flac", prompt=Non elif quality == "320k": out_stream.bit_rate = 320000 else: #format == "flac": - out_stream = output_container.add_stream("flac", rate=sample_rate) + out_stream = output_container.add_stream("flac", rate=sample_rate, layout=layout) - frame = av.AudioFrame.from_ndarray(waveform.movedim(0, 1).reshape(1, -1).float().numpy(), format='flt', layout='mono' if waveform.shape[0] == 1 else 'stereo') + frame = av.AudioFrame.from_ndarray(waveform.movedim(0, 1).reshape(1, -1).float().numpy(), format='flt', layout=layout) frame.sample_rate = sample_rate frame.pts = 0 output_container.mux(out_stream.encode(frame)) From aa895db7e876401eb3b1d2601f49d6f2aee770ca Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 11 Oct 2025 02:17:20 +0300 Subject: [PATCH 0737/1073] feat(GeminiImage-ApiNode): add aspect_ratio and release version of model (#10255) --- comfy_api_nodes/apis/gemini_api.py | 17 ++++++++++------- comfy_api_nodes/nodes_gemini.py | 24 ++++++++++++++++++------ 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/comfy_api_nodes/apis/gemini_api.py b/comfy_api_nodes/apis/gemini_api.py index 138bf035d..2bf28bf93 100644 --- a/comfy_api_nodes/apis/gemini_api.py +++ b/comfy_api_nodes/apis/gemini_api.py @@ -1,19 +1,22 @@ -from __future__ import annotations - -from typing import List, Optional +from typing import Optional from comfy_api_nodes.apis import GeminiGenerationConfig, GeminiContent, GeminiSafetySetting, GeminiSystemInstructionContent, GeminiTool, GeminiVideoMetadata from pydantic import BaseModel +class GeminiImageConfig(BaseModel): + aspectRatio: Optional[str] = None + + class GeminiImageGenerationConfig(GeminiGenerationConfig): - responseModalities: Optional[List[str]] = None + responseModalities: Optional[list[str]] = None + imageConfig: Optional[GeminiImageConfig] = None class GeminiImageGenerateContentRequest(BaseModel): - contents: List[GeminiContent] + contents: list[GeminiContent] generationConfig: Optional[GeminiImageGenerationConfig] = None - safetySettings: Optional[List[GeminiSafetySetting]] = None + safetySettings: Optional[list[GeminiSafetySetting]] = None systemInstruction: Optional[GeminiSystemInstructionContent] = None - tools: Optional[List[GeminiTool]] = None + tools: Optional[list[GeminiTool]] = None videoMetadata: Optional[GeminiVideoMetadata] = None diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 309e9a2d2..c1941cbe9 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -26,7 +26,7 @@ from comfy_api_nodes.apis import ( GeminiPart, GeminiMimeType, ) -from comfy_api_nodes.apis.gemini_api import GeminiImageGenerationConfig, GeminiImageGenerateContentRequest +from comfy_api_nodes.apis.gemini_api import GeminiImageGenerationConfig, GeminiImageGenerateContentRequest, GeminiImageConfig from comfy_api_nodes.apis.client import ( ApiEndpoint, HttpMethod, @@ -63,6 +63,7 @@ class GeminiImageModel(str, Enum): """ gemini_2_5_flash_image_preview = "gemini-2.5-flash-image-preview" + gemini_2_5_flash_image = "gemini-2.5-flash-image" def get_gemini_endpoint( @@ -538,7 +539,7 @@ class GeminiImage(ComfyNodeABC): { "tooltip": "The Gemini model to use for generating responses.", "options": [model.value for model in GeminiImageModel], - "default": GeminiImageModel.gemini_2_5_flash_image_preview.value, + "default": GeminiImageModel.gemini_2_5_flash_image.value, }, ), "seed": ( @@ -579,6 +580,14 @@ class GeminiImage(ComfyNodeABC): # "tooltip": "How many images to generate", # }, # ), + "aspect_ratio": ( + IO.COMBO, + { + "tooltip": "Defaults to matching the output image size to that of your input image, or otherwise generates 1:1 squares.", + "options": ["auto", "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"], + "default": "auto", + }, + ), }, "hidden": { "auth_token": "AUTH_TOKEN_COMFY_ORG", @@ -600,15 +609,17 @@ class GeminiImage(ComfyNodeABC): images: Optional[IO.IMAGE] = None, files: Optional[list[GeminiPart]] = None, n=1, + aspect_ratio: str = "auto", unique_id: Optional[str] = None, **kwargs, ): - # Validate inputs validate_string(prompt, strip_whitespace=True, min_length=1) - # Create parts list with text prompt as the first part parts: list[GeminiPart] = [create_text_part(prompt)] - # Add other modal parts + if not aspect_ratio: + aspect_ratio = "auto" # for backward compatability with old workflows; to-do remove this in December + image_config = GeminiImageConfig(aspectRatio=aspect_ratio) + if images is not None: image_parts = create_image_parts(images) parts.extend(image_parts) @@ -625,7 +636,8 @@ class GeminiImage(ComfyNodeABC): ), ], generationConfig=GeminiImageGenerationConfig( - responseModalities=["TEXT","IMAGE"] + responseModalities=["TEXT","IMAGE"], + imageConfig=None if aspect_ratio == "auto" else image_config, ) ), auth_kwargs=kwargs, From 14d642acd66973c81a806dc6f0562d89b4ba3506 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 11 Oct 2025 02:21:40 +0300 Subject: [PATCH 0738/1073] feat(api-nodes): add price extractor feature; small fixes to Kling & Pika nodes (#10284) --- comfy_api_nodes/apis/client.py | 15 ++++++++++++--- comfy_api_nodes/nodes_kling.py | 33 +++++++++++++++++++-------------- comfy_api_nodes/nodes_pika.py | 2 ++ 3 files changed, 33 insertions(+), 17 deletions(-) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index e08dfb093..d05e1c16a 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -782,9 +782,11 @@ class PollingOperation(Generic[T, R]): poll_endpoint: ApiEndpoint[EmptyRequest, R], completed_statuses: list[str], failed_statuses: list[str], + *, status_extractor: Callable[[R], Optional[str]], progress_extractor: Callable[[R], Optional[float]] | None = None, result_url_extractor: Callable[[R], Optional[str]] | None = None, + price_extractor: Callable[[R], Optional[float]] | None = None, request: Optional[T] = None, api_base: str | None = None, auth_token: Optional[str] = None, @@ -815,10 +817,12 @@ class PollingOperation(Generic[T, R]): self.status_extractor = status_extractor or (lambda x: getattr(x, "status", None)) self.progress_extractor = progress_extractor self.result_url_extractor = result_url_extractor + self.price_extractor = price_extractor self.node_id = node_id self.completed_statuses = completed_statuses self.failed_statuses = failed_statuses self.final_response: Optional[R] = None + self.extracted_price: Optional[float] = None async def execute(self, client: Optional[ApiClient] = None) -> R: owns_client = client is None @@ -840,6 +844,8 @@ class PollingOperation(Generic[T, R]): def _display_text_on_node(self, text: str): if not self.node_id: return + if self.extracted_price is not None: + text = f"Price: {self.extracted_price}$\n{text}" PromptServer.instance.send_progress_text(text, self.node_id) def _display_time_progress_on_node(self, time_completed: int | float): @@ -877,9 +883,7 @@ class PollingOperation(Generic[T, R]): try: logging.debug("[DEBUG] Polling attempt #%s", poll_count) - request_dict = ( - None if self.request is None else self.request.model_dump(exclude_none=True) - ) + request_dict = None if self.request is None else self.request.model_dump(exclude_none=True) if poll_count == 1: logging.debug( @@ -912,6 +916,11 @@ class PollingOperation(Generic[T, R]): if new_progress is not None: progress.update_absolute(new_progress, total=PROGRESS_BAR_MAX) + if self.price_extractor: + price = self.price_extractor(response_obj) + if price is not None: + self.extracted_price = price + if status == TaskStatus.COMPLETED: message = "Task completed successfully" if self.result_url_extractor: diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index a3cd09786..2117cfa91 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -73,6 +73,7 @@ from comfy_api_nodes.util.validation_utils import ( validate_video_dimensions, validate_video_duration, ) +from comfy_api.input_impl import VideoFromFile from comfy_api.input.basic_types import AudioInput from comfy_api.input.video_types import VideoInput from comfy_api.latest import ComfyExtension, io as comfy_io @@ -511,7 +512,7 @@ async def execute_video_effect( image_1: torch.Tensor, image_2: Optional[torch.Tensor] = None, model_mode: Optional[KlingVideoGenMode] = None, -) -> comfy_io.NodeOutput: +) -> tuple[VideoFromFile, str, str]: if dual_character: request_input_field = KlingDualCharacterEffectInput( model_name=model_name, @@ -562,7 +563,7 @@ async def execute_video_effect( validate_video_result_response(final_response) video = get_video_from_response(final_response) - return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) + return await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration) async def execute_lipsync( @@ -1271,7 +1272,7 @@ class KlingDualCharacterVideoEffectNode(comfy_io.ComfyNode): image_1=image_left, image_2=image_right, ) - return video, duration + return comfy_io.NodeOutput(video, duration) class KlingSingleImageVideoEffectNode(comfy_io.ComfyNode): @@ -1320,17 +1321,21 @@ class KlingSingleImageVideoEffectNode(comfy_io.ComfyNode): model_name: KlingSingleImageEffectModelName, duration: KlingVideoGenDuration, ) -> comfy_io.NodeOutput: - return await execute_video_effect( - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, - dual_character=False, - effect_scene=effect_scene, - model_name=model_name, - duration=duration, - image_1=image, + return comfy_io.NodeOutput( + *( + await execute_video_effect( + auth_kwargs={ + "auth_token": cls.hidden.auth_token_comfy_org, + "comfy_api_key": cls.hidden.api_key_comfy_org, + }, + node_id=cls.hidden.unique_id, + dual_character=False, + effect_scene=effect_scene, + model_name=model_name, + duration=duration, + image_1=image, + ) + ) ) diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py index 10f11666d..822cfee64 100644 --- a/comfy_api_nodes/nodes_pika.py +++ b/comfy_api_nodes/nodes_pika.py @@ -17,6 +17,7 @@ from comfy_api.input_impl.video_types import VideoCodec, VideoContainer, VideoIn from comfy_api_nodes.apinode_utils import ( download_url_to_video_output, tensor_to_bytesio, + validate_string, ) from comfy_api_nodes.apis import pika_defs from comfy_api_nodes.apis.client import ( @@ -590,6 +591,7 @@ class PikaStartEndFrameNode(comfy_io.ComfyNode): resolution: str, duration: int, ) -> comfy_io.NodeOutput: + validate_string(prompt_text, field_name="prompt_text", min_length=1) pika_files = [ ("keyFrames", ("image_start.png", tensor_to_bytesio(image_start), "image/png")), ("keyFrames", ("image_end.png", tensor_to_bytesio(image_end), "image/png")), From f43b8ab2a2eda034651187222829f72aa82eae6c Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sun, 12 Oct 2025 01:27:22 +0800 Subject: [PATCH 0739/1073] Update template to 0.1.95 (#10294) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d4594df39..9e0a5e0de 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.27.10 -comfyui-workflow-templates==0.1.94 +comfyui-workflow-templates==0.1.95 comfyui-embedded-docs==0.2.6 torch torchsde From 84e9ce32c6d9d340404ee0798a426dae52bbee8b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 11 Oct 2025 19:57:23 -0700 Subject: [PATCH 0740/1073] Implement the mmaudio VAE. (#10300) --- comfy/ldm/mmaudio/vae/__init__.py | 0 comfy/ldm/mmaudio/vae/activations.py | 120 ++++++++ comfy/ldm/mmaudio/vae/alias_free_torch.py | 157 ++++++++++ comfy/ldm/mmaudio/vae/autoencoder.py | 156 ++++++++++ comfy/ldm/mmaudio/vae/bigvgan.py | 219 +++++++++++++ comfy/ldm/mmaudio/vae/distributions.py | 92 ++++++ comfy/ldm/mmaudio/vae/vae.py | 358 ++++++++++++++++++++++ comfy/ldm/mmaudio/vae/vae_modules.py | 121 ++++++++ comfy/sd.py | 24 ++ 9 files changed, 1247 insertions(+) create mode 100644 comfy/ldm/mmaudio/vae/__init__.py create mode 100644 comfy/ldm/mmaudio/vae/activations.py create mode 100644 comfy/ldm/mmaudio/vae/alias_free_torch.py create mode 100644 comfy/ldm/mmaudio/vae/autoencoder.py create mode 100644 comfy/ldm/mmaudio/vae/bigvgan.py create mode 100644 comfy/ldm/mmaudio/vae/distributions.py create mode 100644 comfy/ldm/mmaudio/vae/vae.py create mode 100644 comfy/ldm/mmaudio/vae/vae_modules.py diff --git a/comfy/ldm/mmaudio/vae/__init__.py b/comfy/ldm/mmaudio/vae/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/comfy/ldm/mmaudio/vae/activations.py b/comfy/ldm/mmaudio/vae/activations.py new file mode 100644 index 000000000..db9192e3e --- /dev/null +++ b/comfy/ldm/mmaudio/vae/activations.py @@ -0,0 +1,120 @@ +# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +from torch import nn, sin, pow +from torch.nn import Parameter +import comfy.model_management + +class Snake(nn.Module): + ''' + Implementation of a sine-based periodic activation function + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter + References: + - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snake(256) + >>> x = torch.randn(256) + >>> x = a1(x) + ''' + def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): + ''' + Initialization. + INPUT: + - in_features: shape of the input + - alpha: trainable parameter + alpha is initialized to 1 by default, higher values = higher-frequency. + alpha will be trained along with the rest of your model. + ''' + super(Snake, self).__init__() + self.in_features = in_features + + # initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: + self.alpha = Parameter(torch.empty(in_features)) + else: + self.alpha = Parameter(torch.empty(in_features)) + + self.alpha.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + ''' + Forward pass of the function. + Applies the function to the input elementwise. + Snake ∶= x + 1/a * sin^2 (xa) + ''' + alpha = comfy.model_management.cast_to(self.alpha, dtype=x.dtype, device=x.device).unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T] + if self.alpha_logscale: + alpha = torch.exp(alpha) + x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x + + +class SnakeBeta(nn.Module): + ''' + A modified Snake function which uses separate parameters for the magnitude of the periodic components + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + References: + - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snakebeta(256) + >>> x = torch.randn(256) + >>> x = a1(x) + ''' + def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): + ''' + Initialization. + INPUT: + - in_features: shape of the input + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + alpha is initialized to 1 by default, higher values = higher-frequency. + beta is initialized to 1 by default, higher values = higher-magnitude. + alpha will be trained along with the rest of your model. + ''' + super(SnakeBeta, self).__init__() + self.in_features = in_features + + # initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: + self.alpha = Parameter(torch.empty(in_features)) + self.beta = Parameter(torch.empty(in_features)) + else: + self.alpha = Parameter(torch.empty(in_features)) + self.beta = Parameter(torch.empty(in_features)) + + self.alpha.requires_grad = alpha_trainable + self.beta.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + ''' + Forward pass of the function. + Applies the function to the input elementwise. + SnakeBeta ∶= x + 1/b * sin^2 (xa) + ''' + alpha = comfy.model_management.cast_to(self.alpha, dtype=x.dtype, device=x.device).unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T] + beta = comfy.model_management.cast_to(self.beta, dtype=x.dtype, device=x.device).unsqueeze(0).unsqueeze(-1) + if self.alpha_logscale: + alpha = torch.exp(alpha) + beta = torch.exp(beta) + x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x diff --git a/comfy/ldm/mmaudio/vae/alias_free_torch.py b/comfy/ldm/mmaudio/vae/alias_free_torch.py new file mode 100644 index 000000000..35c70b897 --- /dev/null +++ b/comfy/ldm/mmaudio/vae/alias_free_torch.py @@ -0,0 +1,157 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import math +import comfy.model_management + +if 'sinc' in dir(torch): + sinc = torch.sinc +else: + # This code is adopted from adefossez's julius.core.sinc under the MIT License + # https://adefossez.github.io/julius/julius/core.html + # LICENSE is in incl_licenses directory. + def sinc(x: torch.Tensor): + """ + Implementation of sinc, i.e. sin(pi * x) / (pi * x) + __Warning__: Different to julius.sinc, the input is multiplied by `pi`! + """ + return torch.where(x == 0, + torch.tensor(1., device=x.device, dtype=x.dtype), + torch.sin(math.pi * x) / math.pi / x) + + +# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License +# https://adefossez.github.io/julius/julius/lowpass.html +# LICENSE is in incl_licenses directory. +def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size] + even = (kernel_size % 2 == 0) + half_size = kernel_size // 2 + + #For kaiser window + delta_f = 4 * half_width + A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95 + if A > 50.: + beta = 0.1102 * (A - 8.7) + elif A >= 21.: + beta = 0.5842 * (A - 21)**0.4 + 0.07886 * (A - 21.) + else: + beta = 0. + window = torch.kaiser_window(kernel_size, beta=beta, periodic=False) + + # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio + if even: + time = (torch.arange(-half_size, half_size) + 0.5) + else: + time = torch.arange(kernel_size) - half_size + if cutoff == 0: + filter_ = torch.zeros_like(time) + else: + filter_ = 2 * cutoff * window * sinc(2 * cutoff * time) + # Normalize filter to have sum = 1, otherwise we will have a small leakage + # of the constant component in the input signal. + filter_ /= filter_.sum() + filter = filter_.view(1, 1, kernel_size) + + return filter + + +class LowPassFilter1d(nn.Module): + def __init__(self, + cutoff=0.5, + half_width=0.6, + stride: int = 1, + padding: bool = True, + padding_mode: str = 'replicate', + kernel_size: int = 12): + # kernel_size should be even number for stylegan3 setup, + # in this implementation, odd number is also possible. + super().__init__() + if cutoff < -0.: + raise ValueError("Minimum cutoff must be larger than zero.") + if cutoff > 0.5: + raise ValueError("A cutoff above 0.5 does not make sense.") + self.kernel_size = kernel_size + self.even = (kernel_size % 2 == 0) + self.pad_left = kernel_size // 2 - int(self.even) + self.pad_right = kernel_size // 2 + self.stride = stride + self.padding = padding + self.padding_mode = padding_mode + filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size) + self.register_buffer("filter", filter) + + #input [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + if self.padding: + x = F.pad(x, (self.pad_left, self.pad_right), + mode=self.padding_mode) + out = F.conv1d(x, comfy.model_management.cast_to(self.filter.expand(C, -1, -1), dtype=x.dtype, device=x.device), + stride=self.stride, groups=C) + + return out + + +class UpSample1d(nn.Module): + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size + self.stride = ratio + self.pad = self.kernel_size // ratio - 1 + self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2 + self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2 + filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, + half_width=0.6 / ratio, + kernel_size=self.kernel_size) + self.register_buffer("filter", filter) + + # x: [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + x = F.pad(x, (self.pad, self.pad), mode='replicate') + x = self.ratio * F.conv_transpose1d( + x, comfy.model_management.cast_to(self.filter.expand(C, -1, -1), dtype=x.dtype, device=x.device), stride=self.stride, groups=C) + x = x[..., self.pad_left:-self.pad_right] + + return x + + +class DownSample1d(nn.Module): + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size + self.lowpass = LowPassFilter1d(cutoff=0.5 / ratio, + half_width=0.6 / ratio, + stride=ratio, + kernel_size=self.kernel_size) + + def forward(self, x): + xx = self.lowpass(x) + + return xx + +class Activation1d(nn.Module): + def __init__(self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + # x: [B,C,T] + def forward(self, x): + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + + return x diff --git a/comfy/ldm/mmaudio/vae/autoencoder.py b/comfy/ldm/mmaudio/vae/autoencoder.py new file mode 100644 index 000000000..cbb9de302 --- /dev/null +++ b/comfy/ldm/mmaudio/vae/autoencoder.py @@ -0,0 +1,156 @@ +from typing import Literal + +import torch +import torch.nn as nn + +from .distributions import DiagonalGaussianDistribution +from .vae import VAE_16k +from .bigvgan import BigVGANVocoder +import logging + +try: + import torchaudio +except: + logging.warning("torchaudio missing, MMAudio VAE model will be broken") + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5, *, norm_fn): + return norm_fn(torch.clamp(x, min=clip_val) * C) + + +def spectral_normalize_torch(magnitudes, norm_fn): + output = dynamic_range_compression_torch(magnitudes, norm_fn=norm_fn) + return output + +class MelConverter(nn.Module): + + def __init__( + self, + *, + sampling_rate: float, + n_fft: int, + num_mels: int, + hop_size: int, + win_size: int, + fmin: float, + fmax: float, + norm_fn, + ): + super().__init__() + self.sampling_rate = sampling_rate + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.norm_fn = norm_fn + + # mel = librosa_mel_fn(sr=self.sampling_rate, + # n_fft=self.n_fft, + # n_mels=self.num_mels, + # fmin=self.fmin, + # fmax=self.fmax) + # mel_basis = torch.from_numpy(mel).float() + mel_basis = torch.empty((num_mels, 1 + n_fft // 2)) + hann_window = torch.hann_window(self.win_size) + + self.register_buffer('mel_basis', mel_basis) + self.register_buffer('hann_window', hann_window) + + @property + def device(self): + return self.mel_basis.device + + def forward(self, waveform: torch.Tensor, center: bool = False) -> torch.Tensor: + waveform = waveform.clamp(min=-1., max=1.).to(self.device) + + waveform = torch.nn.functional.pad( + waveform.unsqueeze(1), + [int((self.n_fft - self.hop_size) / 2), + int((self.n_fft - self.hop_size) / 2)], + mode='reflect') + waveform = waveform.squeeze(1) + + spec = torch.stft(waveform, + self.n_fft, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=center, + pad_mode='reflect', + normalized=False, + onesided=True, + return_complex=True) + + spec = torch.view_as_real(spec) + spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) + spec = torch.matmul(self.mel_basis, spec) + spec = spectral_normalize_torch(spec, self.norm_fn) + + return spec + +class AudioAutoencoder(nn.Module): + + def __init__( + self, + *, + # ckpt_path: str, + mode=Literal['16k', '44k'], + need_vae_encoder: bool = True, + ): + super().__init__() + + assert mode == "16k", "Only 16k mode is supported currently." + self.mel_converter = MelConverter(sampling_rate=16_000, + n_fft=1024, + num_mels=80, + hop_size=256, + win_size=1024, + fmin=0, + fmax=8_000, + norm_fn=torch.log10) + + self.vae = VAE_16k().eval() + + bigvgan_config = { + "resblock": "1", + "num_mels": 80, + "upsample_rates": [4, 4, 2, 2, 2, 2], + "upsample_kernel_sizes": [8, 8, 4, 4, 4, 4], + "upsample_initial_channel": 1536, + "resblock_kernel_sizes": [3, 7, 11], + "resblock_dilation_sizes": [ + [1, 3, 5], + [1, 3, 5], + [1, 3, 5], + ], + "activation": "snakebeta", + "snake_logscale": True, + } + + self.vocoder = BigVGANVocoder( + bigvgan_config + ).eval() + + @torch.inference_mode() + def encode_audio(self, x) -> DiagonalGaussianDistribution: + # x: (B * L) + mel = self.mel_converter(x) + dist = self.vae.encode(mel) + + return dist + + @torch.no_grad() + def decode(self, z): + mel_decoded = self.vae.decode(z) + audio = self.vocoder(mel_decoded) + + audio = torchaudio.functional.resample(audio, 16000, 44100) + return audio + + @torch.no_grad() + def encode(self, audio): + audio = audio.mean(dim=1) + audio = torchaudio.functional.resample(audio, 44100, 16000) + dist = self.encode_audio(audio) + return dist.mean diff --git a/comfy/ldm/mmaudio/vae/bigvgan.py b/comfy/ldm/mmaudio/vae/bigvgan.py new file mode 100644 index 000000000..3a24337f6 --- /dev/null +++ b/comfy/ldm/mmaudio/vae/bigvgan.py @@ -0,0 +1,219 @@ +# Copyright (c) 2022 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +import torch.nn as nn +from types import SimpleNamespace +from . import activations +from .alias_free_torch import Activation1d +import comfy.ops +ops = comfy.ops.disable_weight_init + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + +class AMPBlock1(torch.nn.Module): + + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5), activation=None): + super(AMPBlock1, self).__init__() + self.h = h + + self.convs1 = nn.ModuleList([ + ops.Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0])), + ops.Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1])), + ops.Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2])) + ]) + + self.convs2 = nn.ModuleList([ + ops.Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1)), + ops.Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1)), + ops.Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1)) + ]) + + self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers + + if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + acts1, acts2 = self.activations[::2], self.activations[1::2] + for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): + xt = a1(x) + xt = c1(xt) + xt = a2(xt) + xt = c2(xt) + x = xt + x + + return x + + +class AMPBlock2(torch.nn.Module): + + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3), activation=None): + super(AMPBlock2, self).__init__() + self.h = h + + self.convs = nn.ModuleList([ + ops.Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0])), + ops.Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1])) + ]) + + self.num_layers = len(self.convs) # total number of conv layers + + if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + for c, a in zip(self.convs, self.activations): + xt = a(x) + xt = c(xt) + x = xt + x + + return x + + +class BigVGANVocoder(torch.nn.Module): + # this is our main BigVGAN model. Applies anti-aliased periodic activation for resblocks. + def __init__(self, h): + super().__init__() + if isinstance(h, dict): + h = SimpleNamespace(**h) + self.h = h + + self.num_kernels = len(h.resblock_kernel_sizes) + self.num_upsamples = len(h.upsample_rates) + + # pre conv + self.conv_pre = ops.Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3) + + # define which AMPBlock to use. BigVGAN uses AMPBlock1 as default + resblock = AMPBlock1 if h.resblock == '1' else AMPBlock2 + + # transposed conv-based upsamplers. does not apply anti-aliasing + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): + self.ups.append( + nn.ModuleList([ + ops.ConvTranspose1d(h.upsample_initial_channel // (2**i), + h.upsample_initial_channel // (2**(i + 1)), + k, + u, + padding=(k - u) // 2) + ])) + + # residual blocks using anti-aliased multi-periodicity composition modules (AMP) + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h.upsample_initial_channel // (2**(i + 1)) + for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): + self.resblocks.append(resblock(h, ch, k, d, activation=h.activation)) + + # post conv + if h.activation == "snake": # periodic nonlinearity with snake function and anti-aliasing + activation_post = activations.Snake(ch, alpha_logscale=h.snake_logscale) + self.activation_post = Activation1d(activation=activation_post) + elif h.activation == "snakebeta": # periodic nonlinearity with snakebeta function and anti-aliasing + activation_post = activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale) + self.activation_post = Activation1d(activation=activation_post) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + self.conv_post = ops.Conv1d(ch, 1, 7, 1, padding=3) + + + def forward(self, x): + # pre conv + x = self.conv_pre(x) + + for i in range(self.num_upsamples): + # upsampling + for i_up in range(len(self.ups[i])): + x = self.ups[i][i_up](x) + # AMP blocks + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + + # post conv + x = self.activation_post(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x diff --git a/comfy/ldm/mmaudio/vae/distributions.py b/comfy/ldm/mmaudio/vae/distributions.py new file mode 100644 index 000000000..df987c5ec --- /dev/null +++ b/comfy/ldm/mmaudio/vae/distributions.py @@ -0,0 +1,92 @@ +import torch +import numpy as np + + +class AbstractDistribution: + def sample(self): + raise NotImplementedError() + + def mode(self): + raise NotImplementedError() + + +class DiracDistribution(AbstractDistribution): + def __init__(self, value): + self.value = value + + def sample(self): + return self.value + + def mode(self): + return self.value + + +class DiagonalGaussianDistribution(object): + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean, device=self.parameters.device) + + def sample(self): + x = self.mean + self.std * torch.randn(self.mean.shape, device=self.parameters.device) + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + return 0.5 * torch.sum(torch.pow(self.mean, 2) + + self.var - 1.0 - self.logvar, + dim=[1, 2, 3]) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar, + dim=[1, 2, 3]) + + def nll(self, sample, dims=[1,2,3]): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 + Compute the KL divergence between two gaussians. + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, torch.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) diff --git a/comfy/ldm/mmaudio/vae/vae.py b/comfy/ldm/mmaudio/vae/vae.py new file mode 100644 index 000000000..62f24606c --- /dev/null +++ b/comfy/ldm/mmaudio/vae/vae.py @@ -0,0 +1,358 @@ +import logging +from typing import Optional + +import torch +import torch.nn as nn + +from .vae_modules import (AttnBlock1D, Downsample1D, ResnetBlock1D, + Upsample1D, nonlinearity) +from .distributions import DiagonalGaussianDistribution + +import comfy.ops +ops = comfy.ops.disable_weight_init + +log = logging.getLogger() + +DATA_MEAN_80D = [ + -1.6058, -1.3676, -1.2520, -1.2453, -1.2078, -1.2224, -1.2419, -1.2439, -1.2922, -1.2927, + -1.3170, -1.3543, -1.3401, -1.3836, -1.3907, -1.3912, -1.4313, -1.4152, -1.4527, -1.4728, + -1.4568, -1.5101, -1.5051, -1.5172, -1.5623, -1.5373, -1.5746, -1.5687, -1.6032, -1.6131, + -1.6081, -1.6331, -1.6489, -1.6489, -1.6700, -1.6738, -1.6953, -1.6969, -1.7048, -1.7280, + -1.7361, -1.7495, -1.7658, -1.7814, -1.7889, -1.8064, -1.8221, -1.8377, -1.8417, -1.8643, + -1.8857, -1.8929, -1.9173, -1.9379, -1.9531, -1.9673, -1.9824, -2.0042, -2.0215, -2.0436, + -2.0766, -2.1064, -2.1418, -2.1855, -2.2319, -2.2767, -2.3161, -2.3572, -2.3954, -2.4282, + -2.4659, -2.5072, -2.5552, -2.6074, -2.6584, -2.7107, -2.7634, -2.8266, -2.8981, -2.9673 +] + +DATA_STD_80D = [ + 1.0291, 1.0411, 1.0043, 0.9820, 0.9677, 0.9543, 0.9450, 0.9392, 0.9343, 0.9297, 0.9276, 0.9263, + 0.9242, 0.9254, 0.9232, 0.9281, 0.9263, 0.9315, 0.9274, 0.9247, 0.9277, 0.9199, 0.9188, 0.9194, + 0.9160, 0.9161, 0.9146, 0.9161, 0.9100, 0.9095, 0.9145, 0.9076, 0.9066, 0.9095, 0.9032, 0.9043, + 0.9038, 0.9011, 0.9019, 0.9010, 0.8984, 0.8983, 0.8986, 0.8961, 0.8962, 0.8978, 0.8962, 0.8973, + 0.8993, 0.8976, 0.8995, 0.9016, 0.8982, 0.8972, 0.8974, 0.8949, 0.8940, 0.8947, 0.8936, 0.8939, + 0.8951, 0.8956, 0.9017, 0.9167, 0.9436, 0.9690, 1.0003, 1.0225, 1.0381, 1.0491, 1.0545, 1.0604, + 1.0761, 1.0929, 1.1089, 1.1196, 1.1176, 1.1156, 1.1117, 1.1070 +] + +DATA_MEAN_128D = [ + -3.3462, -2.6723, -2.4893, -2.3143, -2.2664, -2.3317, -2.1802, -2.4006, -2.2357, -2.4597, + -2.3717, -2.4690, -2.5142, -2.4919, -2.6610, -2.5047, -2.7483, -2.5926, -2.7462, -2.7033, + -2.7386, -2.8112, -2.7502, -2.9594, -2.7473, -3.0035, -2.8891, -2.9922, -2.9856, -3.0157, + -3.1191, -2.9893, -3.1718, -3.0745, -3.1879, -3.2310, -3.1424, -3.2296, -3.2791, -3.2782, + -3.2756, -3.3134, -3.3509, -3.3750, -3.3951, -3.3698, -3.4505, -3.4509, -3.5089, -3.4647, + -3.5536, -3.5788, -3.5867, -3.6036, -3.6400, -3.6747, -3.7072, -3.7279, -3.7283, -3.7795, + -3.8259, -3.8447, -3.8663, -3.9182, -3.9605, -3.9861, -4.0105, -4.0373, -4.0762, -4.1121, + -4.1488, -4.1874, -4.2461, -4.3170, -4.3639, -4.4452, -4.5282, -4.6297, -4.7019, -4.7960, + -4.8700, -4.9507, -5.0303, -5.0866, -5.1634, -5.2342, -5.3242, -5.4053, -5.4927, -5.5712, + -5.6464, -5.7052, -5.7619, -5.8410, -5.9188, -6.0103, -6.0955, -6.1673, -6.2362, -6.3120, + -6.3926, -6.4797, -6.5565, -6.6511, -6.8130, -6.9961, -7.1275, -7.2457, -7.3576, -7.4663, + -7.6136, -7.7469, -7.8815, -8.0132, -8.1515, -8.3071, -8.4722, -8.7418, -9.3975, -9.6628, + -9.7671, -9.8863, -9.9992, -10.0860, -10.1709, -10.5418, -11.2795, -11.3861 +] + +DATA_STD_128D = [ + 2.3804, 2.4368, 2.3772, 2.3145, 2.2803, 2.2510, 2.2316, 2.2083, 2.1996, 2.1835, 2.1769, 2.1659, + 2.1631, 2.1618, 2.1540, 2.1606, 2.1571, 2.1567, 2.1612, 2.1579, 2.1679, 2.1683, 2.1634, 2.1557, + 2.1668, 2.1518, 2.1415, 2.1449, 2.1406, 2.1350, 2.1313, 2.1415, 2.1281, 2.1352, 2.1219, 2.1182, + 2.1327, 2.1195, 2.1137, 2.1080, 2.1179, 2.1036, 2.1087, 2.1036, 2.1015, 2.1068, 2.0975, 2.0991, + 2.0902, 2.1015, 2.0857, 2.0920, 2.0893, 2.0897, 2.0910, 2.0881, 2.0925, 2.0873, 2.0960, 2.0900, + 2.0957, 2.0958, 2.0978, 2.0936, 2.0886, 2.0905, 2.0845, 2.0855, 2.0796, 2.0840, 2.0813, 2.0817, + 2.0838, 2.0840, 2.0917, 2.1061, 2.1431, 2.1976, 2.2482, 2.3055, 2.3700, 2.4088, 2.4372, 2.4609, + 2.4731, 2.4847, 2.5072, 2.5451, 2.5772, 2.6147, 2.6529, 2.6596, 2.6645, 2.6726, 2.6803, 2.6812, + 2.6899, 2.6916, 2.6931, 2.6998, 2.7062, 2.7262, 2.7222, 2.7158, 2.7041, 2.7485, 2.7491, 2.7451, + 2.7485, 2.7233, 2.7297, 2.7233, 2.7145, 2.6958, 2.6788, 2.6439, 2.6007, 2.4786, 2.2469, 2.1877, + 2.1392, 2.0717, 2.0107, 1.9676, 1.9140, 1.7102, 0.9101, 0.7164 +] + + +class VAE(nn.Module): + + def __init__( + self, + *, + data_dim: int, + embed_dim: int, + hidden_dim: int, + ): + super().__init__() + + if data_dim == 80: + self.data_mean = nn.Buffer(torch.tensor(DATA_MEAN_80D, dtype=torch.float32)) + self.data_std = nn.Buffer(torch.tensor(DATA_STD_80D, dtype=torch.float32)) + elif data_dim == 128: + self.data_mean = nn.Buffer(torch.tensor(DATA_MEAN_128D, dtype=torch.float32)) + self.data_std = nn.Buffer(torch.tensor(DATA_STD_128D, dtype=torch.float32)) + + self.data_mean = self.data_mean.view(1, -1, 1) + self.data_std = self.data_std.view(1, -1, 1) + + self.encoder = Encoder1D( + dim=hidden_dim, + ch_mult=(1, 2, 4), + num_res_blocks=2, + attn_layers=[3], + down_layers=[0], + in_dim=data_dim, + embed_dim=embed_dim, + ) + self.decoder = Decoder1D( + dim=hidden_dim, + ch_mult=(1, 2, 4), + num_res_blocks=2, + attn_layers=[3], + down_layers=[0], + in_dim=data_dim, + out_dim=data_dim, + embed_dim=embed_dim, + ) + + self.embed_dim = embed_dim + # self.quant_conv = nn.Conv1d(2 * embed_dim, 2 * embed_dim, 1) + # self.post_quant_conv = nn.Conv1d(embed_dim, embed_dim, 1) + + self.initialize_weights() + + def initialize_weights(self): + pass + + def encode(self, x: torch.Tensor, normalize: bool = True) -> DiagonalGaussianDistribution: + if normalize: + x = self.normalize(x) + moments = self.encoder(x) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z: torch.Tensor, unnormalize: bool = True) -> torch.Tensor: + dec = self.decoder(z) + if unnormalize: + dec = self.unnormalize(dec) + return dec + + def normalize(self, x: torch.Tensor) -> torch.Tensor: + return (x - comfy.model_management.cast_to(self.data_mean, dtype=x.dtype, device=x.device)) / comfy.model_management.cast_to(self.data_std, dtype=x.dtype, device=x.device) + + def unnormalize(self, x: torch.Tensor) -> torch.Tensor: + return x * comfy.model_management.cast_to(self.data_std, dtype=x.dtype, device=x.device) + comfy.model_management.cast_to(self.data_mean, dtype=x.dtype, device=x.device) + + def forward( + self, + x: torch.Tensor, + sample_posterior: bool = True, + rng: Optional[torch.Generator] = None, + normalize: bool = True, + unnormalize: bool = True, + ) -> tuple[torch.Tensor, DiagonalGaussianDistribution]: + + posterior = self.encode(x, normalize=normalize) + if sample_posterior: + z = posterior.sample(rng) + else: + z = posterior.mode() + dec = self.decode(z, unnormalize=unnormalize) + return dec, posterior + + def load_weights(self, src_dict) -> None: + self.load_state_dict(src_dict, strict=True) + + @property + def device(self) -> torch.device: + return next(self.parameters()).device + + def get_last_layer(self): + return self.decoder.conv_out.weight + + def remove_weight_norm(self): + return self + + +class Encoder1D(nn.Module): + + def __init__(self, + *, + dim: int, + ch_mult: tuple[int] = (1, 2, 4, 8), + num_res_blocks: int, + attn_layers: list[int] = [], + down_layers: list[int] = [], + resamp_with_conv: bool = True, + in_dim: int, + embed_dim: int, + double_z: bool = True, + kernel_size: int = 3, + clip_act: float = 256.0): + super().__init__() + self.dim = dim + self.num_layers = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.in_channels = in_dim + self.clip_act = clip_act + self.down_layers = down_layers + self.attn_layers = attn_layers + self.conv_in = ops.Conv1d(in_dim, self.dim, kernel_size=kernel_size, padding=kernel_size // 2, bias=False) + + in_ch_mult = (1, ) + tuple(ch_mult) + self.in_ch_mult = in_ch_mult + # downsampling + self.down = nn.ModuleList() + for i_level in range(self.num_layers): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = dim * in_ch_mult[i_level] + block_out = dim * ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append( + ResnetBlock1D(in_dim=block_in, + out_dim=block_out, + kernel_size=kernel_size, + use_norm=True)) + block_in = block_out + if i_level in attn_layers: + attn.append(AttnBlock1D(block_in)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level in down_layers: + down.downsample = Downsample1D(block_in, resamp_with_conv) + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock1D(in_dim=block_in, + out_dim=block_in, + kernel_size=kernel_size, + use_norm=True) + self.mid.attn_1 = AttnBlock1D(block_in) + self.mid.block_2 = ResnetBlock1D(in_dim=block_in, + out_dim=block_in, + kernel_size=kernel_size, + use_norm=True) + + # end + self.conv_out = ops.Conv1d(block_in, + 2 * embed_dim if double_z else embed_dim, + kernel_size=kernel_size, padding=kernel_size // 2, bias=False) + + self.learnable_gain = nn.Parameter(torch.zeros([])) + + def forward(self, x): + + # downsampling + h = self.conv_in(x) + for i_level in range(self.num_layers): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](h) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + h = h.clamp(-self.clip_act, self.clip_act) + if i_level in self.down_layers: + h = self.down[i_level].downsample(h) + + # middle + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + h = h.clamp(-self.clip_act, self.clip_act) + + # end + h = nonlinearity(h) + h = self.conv_out(h) * (self.learnable_gain + 1) + return h + + +class Decoder1D(nn.Module): + + def __init__(self, + *, + dim: int, + out_dim: int, + ch_mult: tuple[int] = (1, 2, 4, 8), + num_res_blocks: int, + attn_layers: list[int] = [], + down_layers: list[int] = [], + kernel_size: int = 3, + resamp_with_conv: bool = True, + in_dim: int, + embed_dim: int, + clip_act: float = 256.0): + super().__init__() + self.ch = dim + self.num_layers = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.in_channels = in_dim + self.clip_act = clip_act + self.down_layers = [i + 1 for i in down_layers] # each downlayer add one + + # compute in_ch_mult, block_in and curr_res at lowest res + block_in = dim * ch_mult[self.num_layers - 1] + + # z to block_in + self.conv_in = ops.Conv1d(embed_dim, block_in, kernel_size=kernel_size, padding=kernel_size // 2, bias=False) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock1D(in_dim=block_in, out_dim=block_in, use_norm=True) + self.mid.attn_1 = AttnBlock1D(block_in) + self.mid.block_2 = ResnetBlock1D(in_dim=block_in, out_dim=block_in, use_norm=True) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_layers)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = dim * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + block.append(ResnetBlock1D(in_dim=block_in, out_dim=block_out, use_norm=True)) + block_in = block_out + if i_level in attn_layers: + attn.append(AttnBlock1D(block_in)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level in self.down_layers: + up.upsample = Upsample1D(block_in, resamp_with_conv) + self.up.insert(0, up) # prepend to get consistent order + + # end + self.conv_out = ops.Conv1d(block_in, out_dim, kernel_size=kernel_size, padding=kernel_size // 2, bias=False) + self.learnable_gain = nn.Parameter(torch.zeros([])) + + def forward(self, z): + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + h = h.clamp(-self.clip_act, self.clip_act) + + # upsampling + for i_level in reversed(range(self.num_layers)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + h = h.clamp(-self.clip_act, self.clip_act) + if i_level in self.down_layers: + h = self.up[i_level].upsample(h) + + h = nonlinearity(h) + h = self.conv_out(h) * (self.learnable_gain + 1) + return h + + +def VAE_16k(**kwargs) -> VAE: + return VAE(data_dim=80, embed_dim=20, hidden_dim=384, **kwargs) + + +def VAE_44k(**kwargs) -> VAE: + return VAE(data_dim=128, embed_dim=40, hidden_dim=512, **kwargs) + + +def get_my_vae(name: str, **kwargs) -> VAE: + if name == '16k': + return VAE_16k(**kwargs) + if name == '44k': + return VAE_44k(**kwargs) + raise ValueError(f'Unknown model: {name}') + diff --git a/comfy/ldm/mmaudio/vae/vae_modules.py b/comfy/ldm/mmaudio/vae/vae_modules.py new file mode 100644 index 000000000..3ad05134b --- /dev/null +++ b/comfy/ldm/mmaudio/vae/vae_modules.py @@ -0,0 +1,121 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from comfy.ldm.modules.diffusionmodules.model import vae_attention +import math +import comfy.ops +ops = comfy.ops.disable_weight_init + +def nonlinearity(x): + # swish + return torch.nn.functional.silu(x) / 0.596 + +def mp_sum(a, b, t=0.5): + return a.lerp(b, t) / math.sqrt((1 - t)**2 + t**2) + +def normalize(x, dim=None, eps=1e-4): + if dim is None: + dim = list(range(1, x.ndim)) + norm = torch.linalg.vector_norm(x, dim=dim, keepdim=True, dtype=torch.float32) + norm = torch.add(eps, norm, alpha=math.sqrt(norm.numel() / x.numel())) + return x / norm.to(x.dtype) + +class ResnetBlock1D(nn.Module): + + def __init__(self, *, in_dim, out_dim=None, conv_shortcut=False, kernel_size=3, use_norm=True): + super().__init__() + self.in_dim = in_dim + out_dim = in_dim if out_dim is None else out_dim + self.out_dim = out_dim + self.use_conv_shortcut = conv_shortcut + self.use_norm = use_norm + + self.conv1 = ops.Conv1d(in_dim, out_dim, kernel_size=kernel_size, padding=kernel_size // 2, bias=False) + self.conv2 = ops.Conv1d(out_dim, out_dim, kernel_size=kernel_size, padding=kernel_size // 2, bias=False) + if self.in_dim != self.out_dim: + if self.use_conv_shortcut: + self.conv_shortcut = ops.Conv1d(in_dim, out_dim, kernel_size=kernel_size, padding=kernel_size // 2, bias=False) + else: + self.nin_shortcut = ops.Conv1d(in_dim, out_dim, kernel_size=1, padding=0, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + + # pixel norm + if self.use_norm: + x = normalize(x, dim=1) + + h = x + h = nonlinearity(h) + h = self.conv1(h) + + h = nonlinearity(h) + h = self.conv2(h) + + if self.in_dim != self.out_dim: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return mp_sum(x, h, t=0.3) + + +class AttnBlock1D(nn.Module): + + def __init__(self, in_channels, num_heads=1): + super().__init__() + self.in_channels = in_channels + + self.num_heads = num_heads + self.qkv = ops.Conv1d(in_channels, in_channels * 3, kernel_size=1, padding=0, bias=False) + self.proj_out = ops.Conv1d(in_channels, in_channels, kernel_size=1, padding=0, bias=False) + self.optimized_attention = vae_attention() + + def forward(self, x): + h = x + y = self.qkv(h) + y = y.reshape(y.shape[0], -1, 3, y.shape[-1]) + q, k, v = normalize(y, dim=1).unbind(2) + + h = self.optimized_attention(q, k, v) + h = self.proj_out(h) + + return mp_sum(x, h, t=0.3) + + +class Upsample1D(nn.Module): + + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = ops.Conv1d(in_channels, in_channels, kernel_size=3, padding=1, bias=False) + + def forward(self, x): + x = F.interpolate(x, scale_factor=2.0, mode='nearest-exact') # support 3D tensor(B,C,T) + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample1D(nn.Module): + + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv1 = ops.Conv1d(in_channels, in_channels, kernel_size=1, padding=0, bias=False) + self.conv2 = ops.Conv1d(in_channels, in_channels, kernel_size=1, padding=0, bias=False) + + def forward(self, x): + + if self.with_conv: + x = self.conv1(x) + + x = F.avg_pool1d(x, kernel_size=2, stride=2) + + if self.with_conv: + x = self.conv2(x) + + return x diff --git a/comfy/sd.py b/comfy/sd.py index f2d95f85a..b9c2e995e 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -18,6 +18,7 @@ import comfy.ldm.wan.vae2_2 import comfy.ldm.hunyuan3d.vae import comfy.ldm.ace.vae.music_dcae_pipeline import comfy.ldm.hunyuan_video.vae +import comfy.ldm.mmaudio.vae.autoencoder import comfy.pixel_space_convert import yaml import math @@ -291,6 +292,7 @@ class VAE: self.downscale_index_formula = None self.upscale_index_formula = None self.extra_1d_channel = None + self.crop_input = True if config is None: if "decoder.mid.block_1.mix_factor" in sd: @@ -542,6 +544,25 @@ class VAE: self.latent_channels = 3 self.latent_dim = 2 self.output_channels = 3 + elif "vocoder.activation_post.downsample.lowpass.filter" in sd: #MMAudio VAE + sample_rate = 16000 + if sample_rate == 16000: + mode = '16k' + else: + mode = '44k' + + self.first_stage_model = comfy.ldm.mmaudio.vae.autoencoder.AudioAutoencoder(mode=mode) + self.memory_used_encode = lambda shape, dtype: (30 * shape[2]) * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: (90 * shape[2] * 1411.2) * model_management.dtype_size(dtype) + self.latent_channels = 20 + self.output_channels = 2 + self.upscale_ratio = 512 * (44100 / sample_rate) + self.downscale_ratio = 512 * (44100 / sample_rate) + self.latent_dim = 1 + self.process_output = lambda audio: audio + self.process_input = lambda audio: audio + self.working_dtypes = [torch.float32] + self.crop_input = False else: logging.warning("WARNING: No VAE weights detected, VAE not initalized.") self.first_stage_model = None @@ -575,6 +596,9 @@ class VAE: raise RuntimeError("ERROR: VAE is invalid: None\n\nIf the VAE is from a checkpoint loader node your checkpoint does not contain a valid VAE.") def vae_encode_crop_pixels(self, pixels): + if not self.crop_input: + return pixels + downscale_ratio = self.spacial_compression_encode() dims = pixels.shape[1:-1] From a125cd84b054a57729b5eecab930ca9408719832 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 11 Oct 2025 21:28:01 -0700 Subject: [PATCH 0741/1073] Improve AMD performance. (#10302) I honestly have no idea why this improves things but it does. --- comfy/model_management.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index c5b817b62..146c00925 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -332,6 +332,7 @@ except: SUPPORT_FP8_OPS = args.supports_fp8_compute try: if is_amd(): + torch.backends.cudnn.enabled = False # Seems to improve things a lot on AMD try: rocm_version = tuple(map(int, str(torch.version.hip).split(".")[:2])) except: @@ -925,11 +926,7 @@ def vae_dtype(device=None, allowed_dtypes=[]): if d == torch.float16 and should_use_fp16(device): return d - # NOTE: bfloat16 seems to work on AMD for the VAE but is extremely slow in some cases compared to fp32 - # slowness still a problem on pytorch nightly 2.9.0.dev20250720+rocm6.4 tested on RDNA3 - # also a problem on RDNA4 except fp32 is also slow there. - # This is due to large bf16 convolutions being extremely slow. - if d == torch.bfloat16 and ((not is_amd()) or amd_min_version(device, min_rdna_version=4)) and should_use_bf16(device): + if d == torch.bfloat16 and should_use_bf16(device): return d return torch.float32 From fdc92863b6dc6d0edff85e6dbb6a2382046c020d Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Mon, 13 Oct 2025 11:32:02 +0800 Subject: [PATCH 0742/1073] Update node docs to 0.3.0 (#10318) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9e0a5e0de..bbb22364f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ comfyui-frontend-package==1.27.10 comfyui-workflow-templates==0.1.95 -comfyui-embedded-docs==0.2.6 +comfyui-embedded-docs==0.3.0 torch torchsde torchvision From 894837de9ae9efd87ea81a29af66a9c29628ef47 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sun, 12 Oct 2025 20:35:33 -0700 Subject: [PATCH 0743/1073] update extra models paths example (#10316) --- extra_model_paths.yaml.example | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/extra_model_paths.yaml.example b/extra_model_paths.yaml.example index b55913a5a..8d576e51d 100644 --- a/extra_model_paths.yaml.example +++ b/extra_model_paths.yaml.example @@ -28,7 +28,9 @@ a111: # # You can use is_default to mark that these folders should be listed first, and used as the default dirs for eg downloads # #is_default: true # checkpoints: models/checkpoints/ -# clip: models/clip/ +# text_encoders: | +# models/text_encoders/ +# models/clip/ # legacy location still supported # clip_vision: models/clip_vision/ # configs: models/configs/ # controlnet: models/controlnet/ @@ -40,6 +42,9 @@ a111: # upscale_models: models/upscale_models/ # vae: models/vae/ +# For a full list of supported keys (style_models, vae_approx, hypernetworks, photomaker, +# model_patches, audio_encoders, classifiers, etc.) see folder_paths.py. + #other_ui: # base_path: path/to/ui # checkpoints: models/checkpoints From d68ece7301c63da11e0b565da0ecc2900c8ea447 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 12 Oct 2025 20:54:41 -0700 Subject: [PATCH 0744/1073] Update the extra_model_paths.yaml.example (#10319) --- extra_model_paths.yaml.example | 43 ++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/extra_model_paths.yaml.example b/extra_model_paths.yaml.example index 8d576e51d..34df01681 100644 --- a/extra_model_paths.yaml.example +++ b/extra_model_paths.yaml.example @@ -1,25 +1,5 @@ #Rename this to extra_model_paths.yaml and ComfyUI will load it - -#config for a1111 ui -#all you have to do is change the base_path to where yours is installed -a111: - base_path: path/to/stable-diffusion-webui/ - - checkpoints: models/Stable-diffusion - configs: models/Stable-diffusion - vae: models/VAE - loras: | - models/Lora - models/LyCORIS - upscale_models: | - models/ESRGAN - models/RealESRGAN - models/SwinIR - embeddings: embeddings - hypernetworks: models/hypernetworks - controlnet: models/ControlNet - #config for comfyui #your base path should be either an existing comfy install or a central folder where you store all of your models, loras, etc. @@ -41,6 +21,29 @@ a111: # loras: models/loras/ # upscale_models: models/upscale_models/ # vae: models/vae/ +# audio_encoders: models/audio_encoders/ +# model_patches: models/model_patches/ + + +#config for a1111 ui +#all you have to do is uncomment this (remove the #) and change the base_path to where yours is installed + +#a111: +# base_path: path/to/stable-diffusion-webui/ +# checkpoints: models/Stable-diffusion +# configs: models/Stable-diffusion +# vae: models/VAE +# loras: | +# models/Lora +# models/LyCORIS +# upscale_models: | +# models/ESRGAN +# models/RealESRGAN +# models/SwinIR +# embeddings: embeddings +# hypernetworks: models/hypernetworks +# controlnet: models/ControlNet + # For a full list of supported keys (style_models, vae_approx, hypernetworks, photomaker, # model_patches, audio_encoders, classifiers, etc.) see folder_paths.py. From e693e4db6a2df8482599eed348be15f87799b910 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 13 Oct 2025 11:57:27 -0700 Subject: [PATCH 0745/1073] Always set diffusion model to eval() mode. (#10331) --- comfy/model_base.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index b0b9cde7d..8274c7dea 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -138,6 +138,7 @@ class BaseModel(torch.nn.Module): else: operations = model_config.custom_operations self.diffusion_model = unet_model(**unet_config, device=device, operations=operations) + self.diffusion_model.eval() if comfy.model_management.force_channels_last(): self.diffusion_model.to(memory_format=torch.channels_last) logging.debug("using channels last mode for diffusion model") @@ -669,7 +670,6 @@ class Lotus(BaseModel): class StableCascade_C(BaseModel): def __init__(self, model_config, model_type=ModelType.STABLE_CASCADE, device=None): super().__init__(model_config, model_type, device=device, unet_model=StageC) - self.diffusion_model.eval().requires_grad_(False) def extra_conds(self, **kwargs): out = {} @@ -698,7 +698,6 @@ class StableCascade_C(BaseModel): class StableCascade_B(BaseModel): def __init__(self, model_config, model_type=ModelType.STABLE_CASCADE, device=None): super().__init__(model_config, model_type, device=device, unet_model=StageB) - self.diffusion_model.eval().requires_grad_(False) def extra_conds(self, **kwargs): out = {} From 27ffd12c45d4237338fe8789779313db9bab59f1 Mon Sep 17 00:00:00 2001 From: Daniel Harte Date: Mon, 13 Oct 2025 20:14:52 +0100 Subject: [PATCH 0746/1073] add indent=4 kwarg to json.dumps() (#10307) --- comfy_extras/nodes_preview_any.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_preview_any.py b/comfy_extras/nodes_preview_any.py index e6805696f..e749fa6ae 100644 --- a/comfy_extras/nodes_preview_any.py +++ b/comfy_extras/nodes_preview_any.py @@ -25,7 +25,7 @@ class PreviewAny(): value = str(source) elif source is not None: try: - value = json.dumps(source) + value = json.dumps(source, indent=4) except Exception: try: value = str(source) From 95ca2e56c82c1c714dba685bd81ebf3f7baf8efa Mon Sep 17 00:00:00 2001 From: rattus128 <46076784+rattus128@users.noreply.github.com> Date: Tue, 14 Oct 2025 05:23:11 +1000 Subject: [PATCH 0747/1073] WAN2.2: Fix cache VRAM leak on error (#10308) Same change pattern as 7e8dd275c243ad460ed5015d2e13611d81d2a569 applied to WAN2.2 If this suffers an exception (such as a VRAM oom) it will leave the encode() and decode() methods which skips the cleanup of the WAN feature cache. The comfy node cache then ultimately keeps a reference this object which is in turn reffing large tensors from the failed execution. The feature cache is currently setup at a class variable on the encoder/decoder however, the encode and decode functions always clear it on both entry and exit of normal execution. Its likely the design intent is this is usable as a streaming encoder where the input comes in batches, however the functions as they are today don't support that. So simplify by bringing the cache back to local variable, so that if it does VRAM OOM the cache itself is properly garbage when the encode()/decode() functions dissappear from the stack. --- comfy/ldm/wan/vae2_2.py | 37 ++++++++++++++----------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/comfy/ldm/wan/vae2_2.py b/comfy/ldm/wan/vae2_2.py index 1f6d584a2..8e1593a54 100644 --- a/comfy/ldm/wan/vae2_2.py +++ b/comfy/ldm/wan/vae2_2.py @@ -657,51 +657,51 @@ class WanVAE(nn.Module): ) def encode(self, x): - self.clear_cache() + conv_idx = [0] + feat_map = [None] * count_conv3d(self.encoder) x = patchify(x, patch_size=2) t = x.shape[2] iter_ = 1 + (t - 1) // 4 for i in range(iter_): - self._enc_conv_idx = [0] + conv_idx = [0] if i == 0: out = self.encoder( x[:, :, :1, :, :], - feat_cache=self._enc_feat_map, - feat_idx=self._enc_conv_idx, + feat_cache=feat_map, + feat_idx=conv_idx, ) else: out_ = self.encoder( x[:, :, 1 + 4 * (i - 1):1 + 4 * i, :, :], - feat_cache=self._enc_feat_map, - feat_idx=self._enc_conv_idx, + feat_cache=feat_map, + feat_idx=conv_idx, ) out = torch.cat([out, out_], 2) mu, log_var = self.conv1(out).chunk(2, dim=1) - self.clear_cache() return mu def decode(self, z): - self.clear_cache() + conv_idx = [0] + feat_map = [None] * count_conv3d(self.decoder) iter_ = z.shape[2] x = self.conv2(z) for i in range(iter_): - self._conv_idx = [0] + conv_idx = [0] if i == 0: out = self.decoder( x[:, :, i:i + 1, :, :], - feat_cache=self._feat_map, - feat_idx=self._conv_idx, + feat_cache=feat_map, + feat_idx=conv_idx, first_chunk=True, ) else: out_ = self.decoder( x[:, :, i:i + 1, :, :], - feat_cache=self._feat_map, - feat_idx=self._conv_idx, + feat_cache=feat_map, + feat_idx=conv_idx, ) out = torch.cat([out, out_], 2) out = unpatchify(out, patch_size=2) - self.clear_cache() return out def reparameterize(self, mu, log_var): @@ -715,12 +715,3 @@ class WanVAE(nn.Module): return mu std = torch.exp(0.5 * log_var.clamp(-30.0, 20.0)) return mu + std * torch.randn_like(std) - - def clear_cache(self): - self._conv_num = count_conv3d(self.decoder) - self._conv_idx = [0] - self._feat_map = [None] * self._conv_num - # cache encode - self._enc_conv_num = count_conv3d(self.encoder) - self._enc_conv_idx = [0] - self._enc_feat_map = [None] * self._enc_conv_num From 3dfdcf66b643b6c191743d3b30fd8198ce690f2d Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 13 Oct 2025 22:36:26 +0300 Subject: [PATCH 0748/1073] convert nodes_hunyuan.py to V3 schema (#10136) --- comfy_extras/nodes_hunyuan.py | 241 +++++++++++++++++++++------------- 1 file changed, 150 insertions(+), 91 deletions(-) diff --git a/comfy_extras/nodes_hunyuan.py b/comfy_extras/nodes_hunyuan.py index db398cdf1..f7c34d059 100644 --- a/comfy_extras/nodes_hunyuan.py +++ b/comfy_extras/nodes_hunyuan.py @@ -2,42 +2,60 @@ import nodes import node_helpers import torch import comfy.model_management +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io -class CLIPTextEncodeHunyuanDiT: +class CLIPTextEncodeHunyuanDiT(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "clip": ("CLIP", ), - "bert": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "mt5xl": ("STRING", {"multiline": True, "dynamicPrompts": True}), - }} - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" + def define_schema(cls): + return io.Schema( + node_id="CLIPTextEncodeHunyuanDiT", + category="advanced/conditioning", + inputs=[ + io.Clip.Input("clip"), + io.String.Input("bert", multiline=True, dynamic_prompts=True), + io.String.Input("mt5xl", multiline=True, dynamic_prompts=True), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) - CATEGORY = "advanced/conditioning" - - def encode(self, clip, bert, mt5xl): + @classmethod + def execute(cls, clip, bert, mt5xl) -> io.NodeOutput: tokens = clip.tokenize(bert) tokens["mt5xl"] = clip.tokenize(mt5xl)["mt5xl"] - return (clip.encode_from_tokens_scheduled(tokens), ) + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens)) -class EmptyHunyuanLatentVideo: + encode = execute # TODO: remove + + +class EmptyHunyuanLatentVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "width": ("INT", {"default": 848, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 25, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}} - RETURN_TYPES = ("LATENT",) - FUNCTION = "generate" + def define_schema(cls): + return io.Schema( + node_id="EmptyHunyuanLatentVideo", + category="latent/video", + inputs=[ + io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=25, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + ], + outputs=[ + io.Latent.Output(), + ], + ) - CATEGORY = "latent/video" - - def generate(self, width, height, length, batch_size=1): + @classmethod + def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput: latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) - return ({"samples":latent}, ) + return io.NodeOutput({"samples":latent}) + + generate = execute # TODO: remove + PROMPT_TEMPLATE_ENCODE_VIDEO_I2V = ( "<|start_header_id|>system<|end_header_id|>\n\n\nDescribe the video by detailing the following aspects according to the reference image: " @@ -50,45 +68,61 @@ PROMPT_TEMPLATE_ENCODE_VIDEO_I2V = ( "<|start_header_id|>assistant<|end_header_id|>\n\n" ) -class TextEncodeHunyuanVideo_ImageToVideo: +class TextEncodeHunyuanVideo_ImageToVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "clip": ("CLIP", ), - "clip_vision_output": ("CLIP_VISION_OUTPUT", ), - "prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}), - "image_interleave": ("INT", {"default": 2, "min": 1, "max": 512, "tooltip": "How much the image influences things vs the text prompt. Higher number means more influence from the text prompt."}), - }} - RETURN_TYPES = ("CONDITIONING",) - FUNCTION = "encode" + def define_schema(cls): + return io.Schema( + node_id="TextEncodeHunyuanVideo_ImageToVideo", + category="advanced/conditioning", + inputs=[ + io.Clip.Input("clip"), + io.ClipVisionOutput.Input("clip_vision_output"), + io.String.Input("prompt", multiline=True, dynamic_prompts=True), + io.Int.Input( + "image_interleave", + default=2, + min=1, + max=512, + tooltip="How much the image influences things vs the text prompt. Higher number means more influence from the text prompt.", + ), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) - CATEGORY = "advanced/conditioning" - - def encode(self, clip, clip_vision_output, prompt, image_interleave): + @classmethod + def execute(cls, clip, clip_vision_output, prompt, image_interleave) -> io.NodeOutput: tokens = clip.tokenize(prompt, llama_template=PROMPT_TEMPLATE_ENCODE_VIDEO_I2V, image_embeds=clip_vision_output.mm_projected, image_interleave=image_interleave) - return (clip.encode_from_tokens_scheduled(tokens), ) + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens)) -class HunyuanImageToVideo: + encode = execute # TODO: remove + + +class HunyuanImageToVideo(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "vae": ("VAE", ), - "width": ("INT", {"default": 848, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), - "length": ("INT", {"default": 53, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - "guidance_type": (["v1 (concat)", "v2 (replace)", "custom"], ) - }, - "optional": {"start_image": ("IMAGE", ), - }} + def define_schema(cls): + return io.Schema( + node_id="HunyuanImageToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Vae.Input("vae"), + io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=53, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Combo.Input("guidance_type", options=["v1 (concat)", "v2 (replace)", "custom"]), + io.Image.Input("start_image", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Latent.Output(display_name="latent"), + ], + ) - RETURN_TYPES = ("CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "latent") - FUNCTION = "encode" - - CATEGORY = "conditioning/video_models" - - def encode(self, positive, vae, width, height, length, batch_size, guidance_type, start_image=None): + @classmethod + def execute(cls, positive, vae, width, height, length, batch_size, guidance_type, start_image=None) -> io.NodeOutput: latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) out_latent = {} @@ -111,51 +145,76 @@ class HunyuanImageToVideo: positive = node_helpers.conditioning_set_values(positive, cond) out_latent["samples"] = latent - return (positive, out_latent) + return io.NodeOutput(positive, out_latent) -class EmptyHunyuanImageLatent: + encode = execute # TODO: remove + + +class EmptyHunyuanImageLatent(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "width": ("INT", {"default": 2048, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}), - "height": ("INT", {"default": 2048, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 32}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}} - RETURN_TYPES = ("LATENT",) - FUNCTION = "generate" + def define_schema(cls): + return io.Schema( + node_id="EmptyHunyuanImageLatent", + category="latent", + inputs=[ + io.Int.Input("width", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32), + io.Int.Input("height", default=2048, min=64, max=nodes.MAX_RESOLUTION, step=32), + io.Int.Input("batch_size", default=1, min=1, max=4096), + ], + outputs=[ + io.Latent.Output(), + ], + ) - CATEGORY = "latent" - - def generate(self, width, height, batch_size=1): + @classmethod + def execute(cls, width, height, batch_size=1) -> io.NodeOutput: latent = torch.zeros([batch_size, 64, height // 32, width // 32], device=comfy.model_management.intermediate_device()) - return ({"samples":latent}, ) + return io.NodeOutput({"samples":latent}) -class HunyuanRefinerLatent: + generate = execute # TODO: remove + + +class HunyuanRefinerLatent(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "latent": ("LATENT", ), - "noise_augmentation": ("FLOAT", {"default": 0.10, "min": 0.0, "max": 1.0, "step": 0.01}), - }} + def define_schema(cls): + return io.Schema( + node_id="HunyuanRefinerLatent", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Latent.Input("latent"), + io.Float.Input("noise_augmentation", default=0.10, min=0.0, max=1.0, step=0.01), - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) - FUNCTION = "execute" - - def execute(self, positive, negative, latent, noise_augmentation): + @classmethod + def execute(cls, positive, negative, latent, noise_augmentation) -> io.NodeOutput: latent = latent["samples"] positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": latent, "noise_augmentation": noise_augmentation}) negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": latent, "noise_augmentation": noise_augmentation}) out_latent = {} out_latent["samples"] = torch.zeros([latent.shape[0], 32, latent.shape[-3], latent.shape[-2], latent.shape[-1]], device=comfy.model_management.intermediate_device()) - return (positive, negative, out_latent) + return io.NodeOutput(positive, negative, out_latent) -NODE_CLASS_MAPPINGS = { - "CLIPTextEncodeHunyuanDiT": CLIPTextEncodeHunyuanDiT, - "TextEncodeHunyuanVideo_ImageToVideo": TextEncodeHunyuanVideo_ImageToVideo, - "EmptyHunyuanLatentVideo": EmptyHunyuanLatentVideo, - "HunyuanImageToVideo": HunyuanImageToVideo, - "EmptyHunyuanImageLatent": EmptyHunyuanImageLatent, - "HunyuanRefinerLatent": HunyuanRefinerLatent, -} +class HunyuanExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + CLIPTextEncodeHunyuanDiT, + TextEncodeHunyuanVideo_ImageToVideo, + EmptyHunyuanLatentVideo, + HunyuanImageToVideo, + EmptyHunyuanImageLatent, + HunyuanRefinerLatent, + ] + + +async def comfy_entrypoint() -> HunyuanExtension: + return HunyuanExtension() From c8674bc6e9c0762e9fabe0e7f2762d5c36700963 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 13 Oct 2025 18:19:03 -0700 Subject: [PATCH 0749/1073] Enable RDNA4 pytorch attention on ROCm 7.0 and up. (#10332) --- comfy/model_management.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 146c00925..709ebc40b 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -345,9 +345,9 @@ try: if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx950 ENABLE_PYTORCH_ATTENTION = True -# if torch_version_numeric >= (2, 8): -# if any((a in arch) for a in ["gfx1201"]): -# ENABLE_PYTORCH_ATTENTION = True + if rocm_version >= (7, 0): + if any((a in arch) for a in ["gfx1201"]): + ENABLE_PYTORCH_ATTENTION = True if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4): if any((a in arch) for a in ["gfx1200", "gfx1201", "gfx942", "gfx950"]): # TODO: more arches SUPPORT_FP8_OPS = True From e4ea3936660a8f8dfa2467e51631362b04ad47e8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 13 Oct 2025 19:18:58 -0700 Subject: [PATCH 0750/1073] Fix loading old stable diffusion ckpt files on newer numpy. (#10333) --- comfy/utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/comfy/utils.py b/comfy/utils.py index fab28cf08..0fd03f165 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -39,7 +39,11 @@ if hasattr(torch.serialization, "add_safe_globals"): # TODO: this was added in pass ModelCheckpoint.__module__ = "pytorch_lightning.callbacks.model_checkpoint" - from numpy.core.multiarray import scalar + def scalar(*args, **kwargs): + from numpy.core.multiarray import scalar as sc + return sc(*args, **kwargs) + scalar.__module__ = "numpy.core.multiarray" + from numpy import dtype from numpy.dtypes import Float64DType from _codecs import encode From dfff7e5332530b7278c1f90c51aed525db53489e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 13 Oct 2025 19:37:19 -0700 Subject: [PATCH 0751/1073] Better memory estimation for the SD/Flux VAE on AMD. (#10334) --- comfy/sd.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index b9c2e995e..28bee248d 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -276,8 +276,13 @@ class VAE: if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format sd = diffusers_convert.convert_vae_state_dict(sd) - self.memory_used_encode = lambda shape, dtype: (1767 * shape[2] * shape[3]) * model_management.dtype_size(dtype) #These are for AutoencoderKL and need tweaking (should be lower) - self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype) + if model_management.is_amd(): + VAE_KL_MEM_RATIO = 2.73 + else: + VAE_KL_MEM_RATIO = 1.0 + + self.memory_used_encode = lambda shape, dtype: (1767 * shape[2] * shape[3]) * model_management.dtype_size(dtype) * VAE_KL_MEM_RATIO #These are for AutoencoderKL and need tweaking (should be lower) + self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype) * VAE_KL_MEM_RATIO self.downscale_ratio = 8 self.upscale_ratio = 8 self.latent_channels = 4 From 51696e3fdcdfad657cb15854345fbcbbe70eef8d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Oct 2025 23:39:55 -0400 Subject: [PATCH 0752/1073] ComfyUI version 0.3.65 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index da5cde02d..d39c1fdc4 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.64" +__version__ = "0.3.65" diff --git a/pyproject.toml b/pyproject.toml index 5dcc49a47..653604e24 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.64" +version = "0.3.65" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 3374e900d0f310100ebe54944175a36f287110cb Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 13 Oct 2025 20:43:53 -0700 Subject: [PATCH 0753/1073] Faster workflow cancelling. (#10301) --- comfy/ops.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/comfy/ops.py b/comfy/ops.py index 2415c96bf..b2096b40e 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -24,6 +24,8 @@ import comfy.float import comfy.rmsnorm import contextlib +def run_every_op(): + comfy.model_management.throw_exception_if_processing_interrupted() def scaled_dot_product_attention(q, k, v, *args, **kwargs): return torch.nn.functional.scaled_dot_product_attention(q, k, v, *args, **kwargs) @@ -109,6 +111,7 @@ class disable_weight_init: return torch.nn.functional.linear(input, weight, bias) def forward(self, *args, **kwargs): + run_every_op() if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: return self.forward_comfy_cast_weights(*args, **kwargs) else: @@ -123,6 +126,7 @@ class disable_weight_init: return self._conv_forward(input, weight, bias) def forward(self, *args, **kwargs): + run_every_op() if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: return self.forward_comfy_cast_weights(*args, **kwargs) else: @@ -137,6 +141,7 @@ class disable_weight_init: return self._conv_forward(input, weight, bias) def forward(self, *args, **kwargs): + run_every_op() if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: return self.forward_comfy_cast_weights(*args, **kwargs) else: @@ -151,6 +156,7 @@ class disable_weight_init: return self._conv_forward(input, weight, bias) def forward(self, *args, **kwargs): + run_every_op() if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: return self.forward_comfy_cast_weights(*args, **kwargs) else: @@ -165,6 +171,7 @@ class disable_weight_init: return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps) def forward(self, *args, **kwargs): + run_every_op() if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: return self.forward_comfy_cast_weights(*args, **kwargs) else: @@ -183,6 +190,7 @@ class disable_weight_init: return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps) def forward(self, *args, **kwargs): + run_every_op() if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: return self.forward_comfy_cast_weights(*args, **kwargs) else: @@ -202,6 +210,7 @@ class disable_weight_init: # return torch.nn.functional.rms_norm(input, self.normalized_shape, weight, self.eps) def forward(self, *args, **kwargs): + run_every_op() if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: return self.forward_comfy_cast_weights(*args, **kwargs) else: @@ -223,6 +232,7 @@ class disable_weight_init: output_padding, self.groups, self.dilation) def forward(self, *args, **kwargs): + run_every_op() if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: return self.forward_comfy_cast_weights(*args, **kwargs) else: @@ -244,6 +254,7 @@ class disable_weight_init: output_padding, self.groups, self.dilation) def forward(self, *args, **kwargs): + run_every_op() if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: return self.forward_comfy_cast_weights(*args, **kwargs) else: @@ -262,6 +273,7 @@ class disable_weight_init: return torch.nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse).to(dtype=output_dtype) def forward(self, *args, **kwargs): + run_every_op() if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: return self.forward_comfy_cast_weights(*args, **kwargs) else: From 84867067ea588e2a3d38a54dc34d86c96d706487 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 13 Oct 2025 23:09:12 -0700 Subject: [PATCH 0754/1073] Python 3.14 instructions. (#10337) --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 4a5a17cda..db1fdaf3c 100644 --- a/README.md +++ b/README.md @@ -197,7 +197,9 @@ comfy install ## Manual Install (Windows, Linux) -Python 3.13 is very well supported. If you have trouble with some custom node dependencies you can try 3.12 +Python 3.14 will work if you comment out the `kornia` dependency in the requirements.txt file (breaks the canny node) and install pytorch nightly but it is not recommended. + +Python 3.13 is very well supported. If you have trouble with some custom node dependencies on 3.13 you can try 3.12 Git clone this repo. From 7a883849ea21003a5a649276a4cd322cb6c2ff0b Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 14 Oct 2025 09:55:56 +0300 Subject: [PATCH 0755/1073] api-nodes: fixed dynamic pricing format; import comfy_io directly (#10336) --- comfy_api/latest/__init__.py | 7 +- comfy_api_nodes/apinode_utils.py | 14 + comfy_api_nodes/apis/client.py | 2 +- comfy_api_nodes/nodes_bfl.py | 236 +++++++-------- comfy_api_nodes/nodes_bytedance.py | 262 ++++++++--------- comfy_api_nodes/nodes_ideogram.py | 112 ++++---- comfy_api_nodes/nodes_kling.py | 428 ++++++++++++++-------------- comfy_api_nodes/nodes_luma.py | 190 ++++++------ comfy_api_nodes/nodes_minimax.py | 108 +++---- comfy_api_nodes/nodes_moonvalley.py | 118 ++++---- comfy_api_nodes/nodes_pika.py | 188 ++++++------ comfy_api_nodes/nodes_pixverse.py | 120 ++++---- comfy_api_nodes/nodes_rodin.py | 116 ++++---- comfy_api_nodes/nodes_runway.py | 112 ++++---- comfy_api_nodes/nodes_sora.py | 32 +-- comfy_api_nodes/nodes_stability.py | 244 ++++++++-------- comfy_api_nodes/nodes_veo2.py | 74 ++--- comfy_api_nodes/nodes_vidu.py | 144 +++++----- comfy_api_nodes/nodes_wan.py | 146 +++++----- 19 files changed, 1331 insertions(+), 1322 deletions(-) diff --git a/comfy_api/latest/__init__.py b/comfy_api/latest/__init__.py index b19a97f1d..b7a3fa9c1 100644 --- a/comfy_api/latest/__init__.py +++ b/comfy_api/latest/__init__.py @@ -114,7 +114,9 @@ if TYPE_CHECKING: ComfyAPISync: Type[comfy_api.latest.generated.ComfyAPISyncStub.ComfyAPISyncStub] ComfyAPISync = create_sync_class(ComfyAPI_latest) -comfy_io = io # create the new alias for io +# create new aliases for io and ui +IO = io +UI = ui __all__ = [ "ComfyAPI", @@ -124,6 +126,7 @@ __all__ = [ "Types", "ComfyExtension", "io", - "comfy_io", + "IO", "ui", + "UI", ] diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index 4bab539f7..bc3d2d07e 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -3,6 +3,7 @@ import aiohttp import io import logging import mimetypes +import os from typing import Optional, Union from comfy.utils import common_upscale from comfy_api.input_impl import VideoFromFile @@ -702,3 +703,16 @@ def image_tensor_pair_to_batch( "center", ).movedim(1, -1) return torch.cat((image1, image2), dim=0) + + +def get_size(path_or_object: Union[str, io.BytesIO]) -> int: + if isinstance(path_or_object, str): + return os.path.getsize(path_or_object) + return len(path_or_object.getvalue()) + + +def validate_container_format_is_mp4(video: VideoInput) -> None: + """Validates video container format is MP4.""" + container_format = video.get_container_format() + if container_format not in ["mp4", "mov,mp4,m4a,3gp,3g2,mj2"]: + raise ValueError(f"Only MP4 container format supported. Got: {container_format}") diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py index d05e1c16a..bdaddcc88 100644 --- a/comfy_api_nodes/apis/client.py +++ b/comfy_api_nodes/apis/client.py @@ -845,7 +845,7 @@ class PollingOperation(Generic[T, R]): if not self.node_id: return if self.extracted_price is not None: - text = f"Price: {self.extracted_price}$\n{text}" + text = f"Price: ${self.extracted_price}\n{text}" PromptServer.instance.send_progress_text(text, self.node_id) def _display_time_progress_on_node(self, time_completed: int | float): diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py index 77914021d..b6cc90f05 100644 --- a/comfy_api_nodes/nodes_bfl.py +++ b/comfy_api_nodes/nodes_bfl.py @@ -3,7 +3,7 @@ import io from inspect import cleandoc from typing import Union, Optional from typing_extensions import override -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, IO from comfy_api_nodes.apis.bfl_api import ( BFLStatus, BFLFluxExpandImageRequest, @@ -131,7 +131,7 @@ def convert_image_to_base64(image: torch.Tensor): return base64.b64encode(img_byte_arr.getvalue()).decode() -class FluxProUltraImageNode(comfy_io.ComfyNode): +class FluxProUltraImageNode(IO.ComfyNode): """ Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution. """ @@ -142,25 +142,25 @@ class FluxProUltraImageNode(comfy_io.ComfyNode): MAXIMUM_RATIO_STR = "4:1" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="FluxProUltraImageNode", display_name="Flux 1.1 [pro] Ultra Image", category="api node/image/BFL", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the image generation", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "prompt_upsampling", default=False, tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -168,21 +168,21 @@ class FluxProUltraImageNode(comfy_io.ComfyNode): control_after_generate=True, tooltip="The random seed used for creating the noise.", ), - comfy_io.String.Input( + IO.String.Input( "aspect_ratio", default="16:9", tooltip="Aspect ratio of image; must be between 1:4 and 4:1.", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "raw", default=False, tooltip="When True, generate less processed, more natural-looking images.", ), - comfy_io.Image.Input( + IO.Image.Input( "image_prompt", optional=True, ), - comfy_io.Float.Input( + IO.Float.Input( "image_prompt_strength", default=0.1, min=0.0, @@ -192,11 +192,11 @@ class FluxProUltraImageNode(comfy_io.ComfyNode): optional=True, ), ], - outputs=[comfy_io.Image.Output()], + outputs=[IO.Image.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -225,7 +225,7 @@ class FluxProUltraImageNode(comfy_io.ComfyNode): seed=0, image_prompt=None, image_prompt_strength=0.1, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: if image_prompt is None: validate_string(prompt, strip_whitespace=False) operation = SynchronousOperation( @@ -262,10 +262,10 @@ class FluxProUltraImageNode(comfy_io.ComfyNode): }, ) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return comfy_io.NodeOutput(output_image) + return IO.NodeOutput(output_image) -class FluxKontextProImageNode(comfy_io.ComfyNode): +class FluxKontextProImageNode(IO.ComfyNode): """ Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio. """ @@ -276,25 +276,25 @@ class FluxKontextProImageNode(comfy_io.ComfyNode): MAXIMUM_RATIO_STR = "4:1" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id=cls.NODE_ID, display_name=cls.DISPLAY_NAME, category="api node/image/BFL", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the image generation - specify what and how to edit.", ), - comfy_io.String.Input( + IO.String.Input( "aspect_ratio", default="16:9", tooltip="Aspect ratio of image; must be between 1:4 and 4:1.", ), - comfy_io.Float.Input( + IO.Float.Input( "guidance", default=3.0, min=0.1, @@ -302,14 +302,14 @@ class FluxKontextProImageNode(comfy_io.ComfyNode): step=0.1, tooltip="Guidance strength for the image generation process", ), - comfy_io.Int.Input( + IO.Int.Input( "steps", default=50, min=1, max=150, tooltip="Number of steps for the image generation process", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=1234, min=0, @@ -317,21 +317,21 @@ class FluxKontextProImageNode(comfy_io.ComfyNode): control_after_generate=True, tooltip="The random seed used for creating the noise.", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "prompt_upsampling", default=False, tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - comfy_io.Image.Input( + IO.Image.Input( "input_image", optional=True, ), ], - outputs=[comfy_io.Image.Output()], + outputs=[IO.Image.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -350,7 +350,7 @@ class FluxKontextProImageNode(comfy_io.ComfyNode): input_image: Optional[torch.Tensor]=None, seed=0, prompt_upsampling=False, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: aspect_ratio = validate_aspect_ratio( aspect_ratio, minimum_ratio=cls.MINIMUM_RATIO, @@ -386,7 +386,7 @@ class FluxKontextProImageNode(comfy_io.ComfyNode): }, ) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return comfy_io.NodeOutput(output_image) + return IO.NodeOutput(output_image) class FluxKontextMaxImageNode(FluxKontextProImageNode): @@ -400,45 +400,45 @@ class FluxKontextMaxImageNode(FluxKontextProImageNode): DISPLAY_NAME = "Flux.1 Kontext [max] Image" -class FluxProImageNode(comfy_io.ComfyNode): +class FluxProImageNode(IO.ComfyNode): """ Generates images synchronously based on prompt and resolution. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="FluxProImageNode", display_name="Flux 1.1 [pro] Image", category="api node/image/BFL", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the image generation", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "prompt_upsampling", default=False, tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - comfy_io.Int.Input( + IO.Int.Input( "width", default=1024, min=256, max=1440, step=32, ), - comfy_io.Int.Input( + IO.Int.Input( "height", default=768, min=256, max=1440, step=32, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -446,7 +446,7 @@ class FluxProImageNode(comfy_io.ComfyNode): control_after_generate=True, tooltip="The random seed used for creating the noise.", ), - comfy_io.Image.Input( + IO.Image.Input( "image_prompt", optional=True, ), @@ -461,11 +461,11 @@ class FluxProImageNode(comfy_io.ComfyNode): # }, # ), ], - outputs=[comfy_io.Image.Output()], + outputs=[IO.Image.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -480,7 +480,7 @@ class FluxProImageNode(comfy_io.ComfyNode): seed=0, image_prompt=None, # image_prompt_strength=0.1, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: image_prompt = ( image_prompt if image_prompt is None @@ -508,77 +508,77 @@ class FluxProImageNode(comfy_io.ComfyNode): }, ) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return comfy_io.NodeOutput(output_image) + return IO.NodeOutput(output_image) -class FluxProExpandNode(comfy_io.ComfyNode): +class FluxProExpandNode(IO.ComfyNode): """ Outpaints image based on prompt. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="FluxProExpandNode", display_name="Flux.1 Expand Image", category="api node/image/BFL", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("image"), - comfy_io.String.Input( + IO.Image.Input("image"), + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the image generation", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "prompt_upsampling", default=False, tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - comfy_io.Int.Input( + IO.Int.Input( "top", default=0, min=0, max=2048, tooltip="Number of pixels to expand at the top of the image", ), - comfy_io.Int.Input( + IO.Int.Input( "bottom", default=0, min=0, max=2048, tooltip="Number of pixels to expand at the bottom of the image", ), - comfy_io.Int.Input( + IO.Int.Input( "left", default=0, min=0, max=2048, tooltip="Number of pixels to expand at the left of the image", ), - comfy_io.Int.Input( + IO.Int.Input( "right", default=0, min=0, max=2048, tooltip="Number of pixels to expand at the right of the image", ), - comfy_io.Float.Input( + IO.Float.Input( "guidance", default=60, min=1.5, max=100, tooltip="Guidance strength for the image generation process", ), - comfy_io.Int.Input( + IO.Int.Input( "steps", default=50, min=15, max=50, tooltip="Number of steps for the image generation process", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -587,11 +587,11 @@ class FluxProExpandNode(comfy_io.ComfyNode): tooltip="The random seed used for creating the noise.", ), ], - outputs=[comfy_io.Image.Output()], + outputs=[IO.Image.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -609,7 +609,7 @@ class FluxProExpandNode(comfy_io.ComfyNode): steps: int, guidance: float, seed=0, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: image = convert_image_to_base64(image) operation = SynchronousOperation( @@ -637,51 +637,51 @@ class FluxProExpandNode(comfy_io.ComfyNode): }, ) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return comfy_io.NodeOutput(output_image) + return IO.NodeOutput(output_image) -class FluxProFillNode(comfy_io.ComfyNode): +class FluxProFillNode(IO.ComfyNode): """ Inpaints image based on mask and prompt. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="FluxProFillNode", display_name="Flux.1 Fill Image", category="api node/image/BFL", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("image"), - comfy_io.Mask.Input("mask"), - comfy_io.String.Input( + IO.Image.Input("image"), + IO.Mask.Input("mask"), + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the image generation", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "prompt_upsampling", default=False, tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - comfy_io.Float.Input( + IO.Float.Input( "guidance", default=60, min=1.5, max=100, tooltip="Guidance strength for the image generation process", ), - comfy_io.Int.Input( + IO.Int.Input( "steps", default=50, min=15, max=50, tooltip="Number of steps for the image generation process", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -690,11 +690,11 @@ class FluxProFillNode(comfy_io.ComfyNode): tooltip="The random seed used for creating the noise.", ), ], - outputs=[comfy_io.Image.Output()], + outputs=[IO.Image.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -709,7 +709,7 @@ class FluxProFillNode(comfy_io.ComfyNode): steps: int, guidance: float, seed=0, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: # prepare mask mask = resize_mask_to_image(mask, image) mask = convert_image_to_base64(convert_mask_to_image(mask)) @@ -738,35 +738,35 @@ class FluxProFillNode(comfy_io.ComfyNode): }, ) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return comfy_io.NodeOutput(output_image) + return IO.NodeOutput(output_image) -class FluxProCannyNode(comfy_io.ComfyNode): +class FluxProCannyNode(IO.ComfyNode): """ Generate image using a control image (canny). """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="FluxProCannyNode", display_name="Flux.1 Canny Control Image", category="api node/image/BFL", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("control_image"), - comfy_io.String.Input( + IO.Image.Input("control_image"), + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the image generation", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "prompt_upsampling", default=False, tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - comfy_io.Float.Input( + IO.Float.Input( "canny_low_threshold", default=0.1, min=0.01, @@ -774,7 +774,7 @@ class FluxProCannyNode(comfy_io.ComfyNode): step=0.01, tooltip="Low threshold for Canny edge detection; ignored if skip_processing is True", ), - comfy_io.Float.Input( + IO.Float.Input( "canny_high_threshold", default=0.4, min=0.01, @@ -782,26 +782,26 @@ class FluxProCannyNode(comfy_io.ComfyNode): step=0.01, tooltip="High threshold for Canny edge detection; ignored if skip_processing is True", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "skip_preprocessing", default=False, tooltip="Whether to skip preprocessing; set to True if control_image already is canny-fied, False if it is a raw image.", ), - comfy_io.Float.Input( + IO.Float.Input( "guidance", default=30, min=1, max=100, tooltip="Guidance strength for the image generation process", ), - comfy_io.Int.Input( + IO.Int.Input( "steps", default=50, min=15, max=50, tooltip="Number of steps for the image generation process", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -810,11 +810,11 @@ class FluxProCannyNode(comfy_io.ComfyNode): tooltip="The random seed used for creating the noise.", ), ], - outputs=[comfy_io.Image.Output()], + outputs=[IO.Image.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -831,7 +831,7 @@ class FluxProCannyNode(comfy_io.ComfyNode): steps: int, guidance: float, seed=0, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: control_image = convert_image_to_base64(control_image[:, :, :, :3]) preprocessed_image = None @@ -872,54 +872,54 @@ class FluxProCannyNode(comfy_io.ComfyNode): }, ) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return comfy_io.NodeOutput(output_image) + return IO.NodeOutput(output_image) -class FluxProDepthNode(comfy_io.ComfyNode): +class FluxProDepthNode(IO.ComfyNode): """ Generate image using a control image (depth). """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="FluxProDepthNode", display_name="Flux.1 Depth Control Image", category="api node/image/BFL", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("control_image"), - comfy_io.String.Input( + IO.Image.Input("control_image"), + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the image generation", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "prompt_upsampling", default=False, tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "skip_preprocessing", default=False, tooltip="Whether to skip preprocessing; set to True if control_image already is depth-ified, False if it is a raw image.", ), - comfy_io.Float.Input( + IO.Float.Input( "guidance", default=15, min=1, max=100, tooltip="Guidance strength for the image generation process", ), - comfy_io.Int.Input( + IO.Int.Input( "steps", default=50, min=15, max=50, tooltip="Number of steps for the image generation process", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -928,11 +928,11 @@ class FluxProDepthNode(comfy_io.ComfyNode): tooltip="The random seed used for creating the noise.", ), ], - outputs=[comfy_io.Image.Output()], + outputs=[IO.Image.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -947,7 +947,7 @@ class FluxProDepthNode(comfy_io.ComfyNode): steps: int, guidance: float, seed=0, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: control_image = convert_image_to_base64(control_image[:,:,:,:3]) preprocessed_image = None @@ -977,12 +977,12 @@ class FluxProDepthNode(comfy_io.ComfyNode): }, ) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return comfy_io.NodeOutput(output_image) + return IO.NodeOutput(output_image) class BFLExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ FluxProUltraImageNode, # FluxProImageNode, diff --git a/comfy_api_nodes/nodes_bytedance.py b/comfy_api_nodes/nodes_bytedance.py index fcb01820c..f3d3f8d3e 100644 --- a/comfy_api_nodes/nodes_bytedance.py +++ b/comfy_api_nodes/nodes_bytedance.py @@ -7,7 +7,7 @@ from typing_extensions import override import torch from pydantic import BaseModel, Field -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, IO from comfy_api_nodes.util.validation_utils import ( validate_image_aspect_ratio_range, get_number_of_images, @@ -237,33 +237,33 @@ async def poll_until_finished( ).execute() -class ByteDanceImageNode(comfy_io.ComfyNode): +class ByteDanceImageNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="ByteDanceImageNode", display_name="ByteDance Image", category="api node/image/ByteDance", description="Generate images using ByteDance models via api based on prompt", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=Text2ImageModelName, default=Text2ImageModelName.seedream_3, tooltip="Model name", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, tooltip="The text prompt used to generate the image", ), - comfy_io.Combo.Input( + IO.Combo.Input( "size_preset", options=[label for label, _, _ in RECOMMENDED_PRESETS], tooltip="Pick a recommended size. Select Custom to use the width and height below", ), - comfy_io.Int.Input( + IO.Int.Input( "width", default=1024, min=512, @@ -271,7 +271,7 @@ class ByteDanceImageNode(comfy_io.ComfyNode): step=64, tooltip="Custom width for image. Value is working only if `size_preset` is set to `Custom`", ), - comfy_io.Int.Input( + IO.Int.Input( "height", default=1024, min=512, @@ -279,28 +279,28 @@ class ByteDanceImageNode(comfy_io.ComfyNode): step=64, tooltip="Custom height for image. Value is working only if `size_preset` is set to `Custom`", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to use for generation", optional=True, ), - comfy_io.Float.Input( + IO.Float.Input( "guidance_scale", default=2.5, min=1.0, max=10.0, step=0.01, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Higher value makes the image follow the prompt more closely", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "watermark", default=True, tooltip="Whether to add an \"AI generated\" watermark to the image", @@ -308,12 +308,12 @@ class ByteDanceImageNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -329,7 +329,7 @@ class ByteDanceImageNode(comfy_io.ComfyNode): seed: int, guidance_scale: float, watermark: bool, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) w = h = None for label, tw, th in RECOMMENDED_PRESETS: @@ -367,57 +367,57 @@ class ByteDanceImageNode(comfy_io.ComfyNode): request=payload, auth_kwargs=auth_kwargs, ).execute() - return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) + return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) -class ByteDanceImageEditNode(comfy_io.ComfyNode): +class ByteDanceImageEditNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="ByteDanceImageEditNode", display_name="ByteDance Image Edit", category="api node/image/ByteDance", description="Edit images using ByteDance models via api based on prompt", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=Image2ImageModelName, default=Image2ImageModelName.seededit_3, tooltip="Model name", ), - comfy_io.Image.Input( + IO.Image.Input( "image", tooltip="The base image to edit", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Instruction to edit image", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to use for generation", optional=True, ), - comfy_io.Float.Input( + IO.Float.Input( "guidance_scale", default=5.5, min=1.0, max=10.0, step=0.01, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Higher value makes the image follow the prompt more closely", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "watermark", default=True, tooltip="Whether to add an \"AI generated\" watermark to the image", @@ -425,12 +425,12 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -444,7 +444,7 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode): seed: int, guidance_scale: float, watermark: bool, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) if get_number_of_images(image) != 1: raise ValueError("Exactly one input image is required.") @@ -477,42 +477,42 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode): request=payload, auth_kwargs=auth_kwargs, ).execute() - return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) + return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) -class ByteDanceSeedreamNode(comfy_io.ComfyNode): +class ByteDanceSeedreamNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="ByteDanceSeedreamNode", display_name="ByteDance Seedream 4", category="api node/image/ByteDance", description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["seedream-4-0-250828"], tooltip="Model name", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Text prompt for creating or editing an image.", ), - comfy_io.Image.Input( + IO.Image.Input( "image", tooltip="Input image(s) for image-to-image generation. " "List of 1-10 images for single or multi-reference generation.", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "size_preset", options=[label for label, _, _ in RECOMMENDED_PRESETS_SEEDREAM_4], tooltip="Pick a recommended size. Select Custom to use the width and height below.", ), - comfy_io.Int.Input( + IO.Int.Input( "width", default=2048, min=1024, @@ -521,7 +521,7 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode): tooltip="Custom width for image. Value is working only if `size_preset` is set to `Custom`", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "height", default=2048, min=1024, @@ -530,7 +530,7 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode): tooltip="Custom height for image. Value is working only if `size_preset` is set to `Custom`", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "sequential_image_generation", options=["disabled", "auto"], tooltip="Group image generation mode. " @@ -539,35 +539,35 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode): "(e.g., story scenes, character variations).", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "max_images", default=1, min=1, max=15, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Maximum number of images to generate when sequential_image_generation='auto'. " "Total images (input + generated) cannot exceed 15.", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to use for generation.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "watermark", default=True, tooltip="Whether to add an \"AI generated\" watermark to the image.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "fail_on_partial", default=True, tooltip="If enabled, abort execution if any requested images are missing or return an error.", @@ -575,12 +575,12 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -599,7 +599,7 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode): seed: int = 0, watermark: bool = True, fail_on_partial: bool = True, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) w = h = None for label, tw, th in RECOMMENDED_PRESETS_SEEDREAM_4: @@ -657,72 +657,72 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode): ).execute() if len(response.data) == 1: - return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) + return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) urls = [str(d["url"]) for d in response.data if isinstance(d, dict) and "url" in d] if fail_on_partial and len(urls) < len(response.data): raise RuntimeError(f"Only {len(urls)} of {len(response.data)} images were generated before error.") - return comfy_io.NodeOutput(torch.cat([await download_url_to_image_tensor(i) for i in urls])) + return IO.NodeOutput(torch.cat([await download_url_to_image_tensor(i) for i in urls])) -class ByteDanceTextToVideoNode(comfy_io.ComfyNode): +class ByteDanceTextToVideoNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="ByteDanceTextToVideoNode", display_name="ByteDance Text to Video", category="api node/video/ByteDance", description="Generate video using ByteDance models via api based on prompt", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=Text2VideoModelName, default=Text2VideoModelName.seedance_1_pro, tooltip="Model name", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, tooltip="The text prompt used to generate the video.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=["480p", "720p", "1080p"], tooltip="The resolution of the output video.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=["16:9", "4:3", "1:1", "3:4", "9:16", "21:9"], tooltip="The aspect ratio of the output video.", ), - comfy_io.Int.Input( + IO.Int.Input( "duration", default=5, min=3, max=12, step=1, tooltip="The duration of the output video in seconds.", - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to use for generation.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "camera_fixed", default=False, tooltip="Specifies whether to fix the camera. The platform appends an instruction " "to fix the camera to your prompt, but does not guarantee the actual effect.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "watermark", default=True, tooltip="Whether to add an \"AI generated\" watermark to the video.", @@ -730,12 +730,12 @@ class ByteDanceTextToVideoNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -751,7 +751,7 @@ class ByteDanceTextToVideoNode(comfy_io.ComfyNode): seed: int, camera_fixed: bool, watermark: bool, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"]) @@ -781,69 +781,69 @@ class ByteDanceTextToVideoNode(comfy_io.ComfyNode): ) -class ByteDanceImageToVideoNode(comfy_io.ComfyNode): +class ByteDanceImageToVideoNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="ByteDanceImageToVideoNode", display_name="ByteDance Image to Video", category="api node/video/ByteDance", description="Generate video using ByteDance models via api based on image and prompt", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=Image2VideoModelName, default=Image2VideoModelName.seedance_1_pro, tooltip="Model name", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, tooltip="The text prompt used to generate the video.", ), - comfy_io.Image.Input( + IO.Image.Input( "image", tooltip="First frame to be used for the video.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=["480p", "720p", "1080p"], tooltip="The resolution of the output video.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"], tooltip="The aspect ratio of the output video.", ), - comfy_io.Int.Input( + IO.Int.Input( "duration", default=5, min=3, max=12, step=1, tooltip="The duration of the output video in seconds.", - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to use for generation.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "camera_fixed", default=False, tooltip="Specifies whether to fix the camera. The platform appends an instruction " "to fix the camera to your prompt, but does not guarantee the actual effect.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "watermark", default=True, tooltip="Whether to add an \"AI generated\" watermark to the video.", @@ -851,12 +851,12 @@ class ByteDanceImageToVideoNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -873,7 +873,7 @@ class ByteDanceImageToVideoNode(comfy_io.ComfyNode): seed: int, camera_fixed: bool, watermark: bool, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"]) validate_image_dimensions(image, min_width=300, min_height=300, max_width=6000, max_height=6000) @@ -908,73 +908,73 @@ class ByteDanceImageToVideoNode(comfy_io.ComfyNode): ) -class ByteDanceFirstLastFrameNode(comfy_io.ComfyNode): +class ByteDanceFirstLastFrameNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="ByteDanceFirstLastFrameNode", display_name="ByteDance First-Last-Frame to Video", category="api node/video/ByteDance", description="Generate video using prompt and first and last frames.", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=[model.value for model in Image2VideoModelName], default=Image2VideoModelName.seedance_1_lite.value, tooltip="Model name", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, tooltip="The text prompt used to generate the video.", ), - comfy_io.Image.Input( + IO.Image.Input( "first_frame", tooltip="First frame to be used for the video.", ), - comfy_io.Image.Input( + IO.Image.Input( "last_frame", tooltip="Last frame to be used for the video.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=["480p", "720p", "1080p"], tooltip="The resolution of the output video.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"], tooltip="The aspect ratio of the output video.", ), - comfy_io.Int.Input( + IO.Int.Input( "duration", default=5, min=3, max=12, step=1, tooltip="The duration of the output video in seconds.", - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to use for generation.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "camera_fixed", default=False, tooltip="Specifies whether to fix the camera. The platform appends an instruction " "to fix the camera to your prompt, but does not guarantee the actual effect.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "watermark", default=True, tooltip="Whether to add an \"AI generated\" watermark to the video.", @@ -982,12 +982,12 @@ class ByteDanceFirstLastFrameNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -1005,7 +1005,7 @@ class ByteDanceFirstLastFrameNode(comfy_io.ComfyNode): seed: int, camera_fixed: bool, watermark: bool, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"]) for i in (first_frame, last_frame): @@ -1050,62 +1050,62 @@ class ByteDanceFirstLastFrameNode(comfy_io.ComfyNode): ) -class ByteDanceImageReferenceNode(comfy_io.ComfyNode): +class ByteDanceImageReferenceNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="ByteDanceImageReferenceNode", display_name="ByteDance Reference Images to Video", category="api node/video/ByteDance", description="Generate video using prompt and reference images.", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=[Image2VideoModelName.seedance_1_lite.value], default=Image2VideoModelName.seedance_1_lite.value, tooltip="Model name", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, tooltip="The text prompt used to generate the video.", ), - comfy_io.Image.Input( + IO.Image.Input( "images", tooltip="One to four images.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=["480p", "720p"], tooltip="The resolution of the output video.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"], tooltip="The aspect ratio of the output video.", ), - comfy_io.Int.Input( + IO.Int.Input( "duration", default=5, min=3, max=12, step=1, tooltip="The duration of the output video in seconds.", - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to use for generation.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "watermark", default=True, tooltip="Whether to add an \"AI generated\" watermark to the video.", @@ -1113,12 +1113,12 @@ class ByteDanceImageReferenceNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -1134,7 +1134,7 @@ class ByteDanceImageReferenceNode(comfy_io.ComfyNode): duration: int, seed: int, watermark: bool, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "watermark"]) for image in images: @@ -1180,7 +1180,7 @@ async def process_video_task( auth_kwargs: dict, node_id: str, estimated_duration: Optional[int], -) -> comfy_io.NodeOutput: +) -> IO.NodeOutput: initial_response = await SynchronousOperation( endpoint=ApiEndpoint( path=BYTEPLUS_TASK_ENDPOINT, @@ -1197,7 +1197,7 @@ async def process_video_task( estimated_duration=estimated_duration, node_id=node_id, ) - return comfy_io.NodeOutput(await download_url_to_video_output(get_video_url_from_task_status(response))) + return IO.NodeOutput(await download_url_to_video_output(get_video_url_from_task_status(response))) def raise_if_text_params(prompt: str, text_params: list[str]) -> None: @@ -1210,7 +1210,7 @@ def raise_if_text_params(prompt: str, text_params: list[str]) -> None: class ByteDanceExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ ByteDanceImageNode, ByteDanceImageEditNode, diff --git a/comfy_api_nodes/nodes_ideogram.py b/comfy_api_nodes/nodes_ideogram.py index 2d1c32e4f..9eae5f11a 100644 --- a/comfy_api_nodes/nodes_ideogram.py +++ b/comfy_api_nodes/nodes_ideogram.py @@ -1,6 +1,6 @@ from io import BytesIO from typing_extensions import override -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, IO from PIL import Image import numpy as np import torch @@ -246,76 +246,76 @@ def display_image_urls_on_node(image_urls, node_id): PromptServer.instance.send_progress_text(urls_text, node_id) -class IdeogramV1(comfy_io.ComfyNode): +class IdeogramV1(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="IdeogramV1", display_name="Ideogram V1", category="api node/image/Ideogram", description="Generates images using the Ideogram V1 model.", is_api_node=True, inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the image generation", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "turbo", default=False, tooltip="Whether to use turbo mode (faster generation, potentially lower quality)", ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=list(V1_V2_RATIO_MAP.keys()), default="1:1", tooltip="The aspect ratio for image generation.", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "magic_prompt_option", options=["AUTO", "ON", "OFF"], default="AUTO", tooltip="Determine if MagicPrompt should be used in generation", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, control_after_generate=True, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, optional=True, ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", multiline=True, default="", tooltip="Description of what to exclude from the image", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "num_images", default=1, min=1, max=8, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, optional=True, ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], ) @@ -372,39 +372,39 @@ class IdeogramV1(comfy_io.ComfyNode): raise Exception("No image URLs were generated in the response") display_image_urls_on_node(image_urls, cls.hidden.unique_id) - return comfy_io.NodeOutput(await download_and_process_images(image_urls)) + return IO.NodeOutput(await download_and_process_images(image_urls)) -class IdeogramV2(comfy_io.ComfyNode): +class IdeogramV2(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="IdeogramV2", display_name="Ideogram V2", category="api node/image/Ideogram", description="Generates images using the Ideogram V2 model.", is_api_node=True, inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the image generation", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "turbo", default=False, tooltip="Whether to use turbo mode (faster generation, potentially lower quality)", ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=list(V1_V2_RATIO_MAP.keys()), default="1:1", tooltip="The aspect ratio for image generation. Ignored if resolution is not set to AUTO.", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=list(V1_V1_RES_MAP.keys()), default="Auto", @@ -412,44 +412,44 @@ class IdeogramV2(comfy_io.ComfyNode): "If not set to AUTO, this overrides the aspect_ratio setting.", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "magic_prompt_option", options=["AUTO", "ON", "OFF"], default="AUTO", tooltip="Determine if MagicPrompt should be used in generation", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, control_after_generate=True, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "style_type", options=["AUTO", "GENERAL", "REALISTIC", "DESIGN", "RENDER_3D", "ANIME"], default="NONE", tooltip="Style type for generation (V2 only)", optional=True, ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", multiline=True, default="", tooltip="Description of what to exclude from the image", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "num_images", default=1, min=1, max=8, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, optional=True, ), #"color_palette": ( @@ -462,12 +462,12 @@ class IdeogramV2(comfy_io.ComfyNode): #), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], ) @@ -541,14 +541,14 @@ class IdeogramV2(comfy_io.ComfyNode): raise Exception("No image URLs were generated in the response") display_image_urls_on_node(image_urls, cls.hidden.unique_id) - return comfy_io.NodeOutput(await download_and_process_images(image_urls)) + return IO.NodeOutput(await download_and_process_images(image_urls)) -class IdeogramV3(comfy_io.ComfyNode): +class IdeogramV3(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="IdeogramV3", display_name="Ideogram V3", category="api node/image/Ideogram", @@ -556,30 +556,30 @@ class IdeogramV3(comfy_io.ComfyNode): "Supports both regular image generation from text prompts and image editing with mask.", is_api_node=True, inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the image generation or editing", ), - comfy_io.Image.Input( + IO.Image.Input( "image", tooltip="Optional reference image for image editing.", optional=True, ), - comfy_io.Mask.Input( + IO.Mask.Input( "mask", tooltip="Optional mask for inpainting (white areas will be replaced)", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=list(V3_RATIO_MAP.keys()), default="1:1", tooltip="The aspect ratio for image generation. Ignored if resolution is not set to Auto.", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=V3_RESOLUTIONS, default="Auto", @@ -587,57 +587,57 @@ class IdeogramV3(comfy_io.ComfyNode): "If not set to Auto, this overrides the aspect_ratio setting.", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "magic_prompt_option", options=["AUTO", "ON", "OFF"], default="AUTO", tooltip="Determine if MagicPrompt should be used in generation", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, control_after_generate=True, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "num_images", default=1, min=1, max=8, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "rendering_speed", options=["DEFAULT", "TURBO", "QUALITY"], default="DEFAULT", tooltip="Controls the trade-off between generation speed and quality", optional=True, ), - comfy_io.Image.Input( + IO.Image.Input( "character_image", tooltip="Image to use as character reference.", optional=True, ), - comfy_io.Mask.Input( + IO.Mask.Input( "character_mask", tooltip="Optional mask for character reference image.", optional=True, ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], ) @@ -826,12 +826,12 @@ class IdeogramV3(comfy_io.ComfyNode): raise Exception("No image URLs were generated in the response") display_image_urls_on_node(image_urls, cls.hidden.unique_id) - return comfy_io.NodeOutput(await download_and_process_images(image_urls)) + return IO.NodeOutput(await download_and_process_images(image_urls)) class IdeogramExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ IdeogramV1, IdeogramV2, diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 2117cfa91..67c8307c5 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -76,7 +76,7 @@ from comfy_api_nodes.util.validation_utils import ( from comfy_api.input_impl import VideoFromFile from comfy_api.input.basic_types import AudioInput from comfy_api.input.video_types import VideoInput -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, IO KLING_API_VERSION = "v1" PATH_TEXT_TO_VIDEO = f"/proxy/kling/{KLING_API_VERSION}/videos/text2video" @@ -387,7 +387,7 @@ async def execute_text2video( duration: str, aspect_ratio: str, camera_control: Optional[KlingCameraControl] = None, -) -> comfy_io.NodeOutput: +) -> IO.NodeOutput: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) initial_operation = SynchronousOperation( endpoint=ApiEndpoint( @@ -428,7 +428,7 @@ async def execute_text2video( validate_video_result_response(final_response) video = get_video_from_response(final_response) - return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) + return IO.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) async def execute_image2video( @@ -444,7 +444,7 @@ async def execute_image2video( duration: str, camera_control: Optional[KlingCameraControl] = None, end_frame: Optional[torch.Tensor] = None, -) -> comfy_io.NodeOutput: +) -> IO.NodeOutput: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V) validate_input_image(start_frame) @@ -499,7 +499,7 @@ async def execute_image2video( validate_video_result_response(final_response) video = get_video_from_response(final_response) - return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) + return IO.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) async def execute_video_effect( @@ -576,7 +576,7 @@ async def execute_lipsync( text: Optional[str] = None, voice_speed: Optional[float] = None, voice_id: Optional[str] = None, -) -> comfy_io.NodeOutput: +) -> IO.NodeOutput: if text: validate_string(text, field_name="Text", max_length=MAX_PROMPT_LENGTH_LIP_SYNC) validate_video_dimensions(video, 720, 1920) @@ -634,77 +634,77 @@ async def execute_lipsync( validate_video_result_response(final_response) video = get_video_from_response(final_response) - return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) + return IO.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) -class KlingCameraControls(comfy_io.ComfyNode): +class KlingCameraControls(IO.ComfyNode): """Kling Camera Controls Node""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="KlingCameraControls", display_name="Kling Camera Controls", category="api node/video/Kling", description="Allows specifying configuration options for Kling Camera Controls and motion control effects.", inputs=[ - comfy_io.Combo.Input("camera_control_type", options=KlingCameraControlType), - comfy_io.Float.Input( + IO.Combo.Input("camera_control_type", options=KlingCameraControlType), + IO.Float.Input( "horizontal_movement", default=0.0, min=-10.0, max=10.0, step=0.25, - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, tooltip="Controls camera's movement along horizontal axis (x-axis). Negative indicates left, positive indicates right", ), - comfy_io.Float.Input( + IO.Float.Input( "vertical_movement", default=0.0, min=-10.0, max=10.0, step=0.25, - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, tooltip="Controls camera's movement along vertical axis (y-axis). Negative indicates downward, positive indicates upward.", ), - comfy_io.Float.Input( + IO.Float.Input( "pan", default=0.5, min=-10.0, max=10.0, step=0.25, - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, tooltip="Controls camera's rotation in vertical plane (x-axis). Negative indicates downward rotation, positive indicates upward rotation.", ), - comfy_io.Float.Input( + IO.Float.Input( "tilt", default=0.0, min=-10.0, max=10.0, step=0.25, - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, tooltip="Controls camera's rotation in horizontal plane (y-axis). Negative indicates left rotation, positive indicates right rotation.", ), - comfy_io.Float.Input( + IO.Float.Input( "roll", default=0.0, min=-10.0, max=10.0, step=0.25, - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, tooltip="Controls camera's rolling amount (z-axis). Negative indicates counterclockwise, positive indicates clockwise.", ), - comfy_io.Float.Input( + IO.Float.Input( "zoom", default=0.0, min=-10.0, max=10.0, step=0.25, - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, tooltip="Controls change in camera's focal length. Negative indicates narrower field of view, positive indicates wider field of view.", ), ], - outputs=[comfy_io.Custom("CAMERA_CONTROL").Output(display_name="camera_control")], + outputs=[IO.Custom("CAMERA_CONTROL").Output(display_name="camera_control")], ) @classmethod @@ -740,8 +740,8 @@ class KlingCameraControls(comfy_io.ComfyNode): tilt: float, roll: float, zoom: float, - ) -> comfy_io.NodeOutput: - return comfy_io.NodeOutput( + ) -> IO.NodeOutput: + return IO.NodeOutput( KlingCameraControl( type=KlingCameraControlType(camera_control_type), config=KlingCameraConfig( @@ -756,27 +756,27 @@ class KlingCameraControls(comfy_io.ComfyNode): ) -class KlingTextToVideoNode(comfy_io.ComfyNode): +class KlingTextToVideoNode(IO.ComfyNode): """Kling Text to Video Node""" @classmethod - def define_schema(cls) -> comfy_io.Schema: + def define_schema(cls) -> IO.Schema: modes = list(MODE_TEXT2VIDEO.keys()) - return comfy_io.Schema( + return IO.Schema( node_id="KlingTextToVideoNode", display_name="Kling Text to Video", category="api node/video/Kling", description="Kling Text to Video Node", inputs=[ - comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), - comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), - comfy_io.Float.Input("cfg_scale", default=1.0, min=0.0, max=1.0), - comfy_io.Combo.Input( + IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), + IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), + IO.Float.Input("cfg_scale", default=1.0, min=0.0, max=1.0), + IO.Combo.Input( "aspect_ratio", options=KlingVideoGenAspectRatio, default="16:9", ), - comfy_io.Combo.Input( + IO.Combo.Input( "mode", options=modes, default=modes[4], @@ -784,14 +784,14 @@ class KlingTextToVideoNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), - comfy_io.String.Output(display_name="video_id"), - comfy_io.String.Output(display_name="duration"), + IO.Video.Output(), + IO.String.Output(display_name="video_id"), + IO.String.Output(display_name="duration"), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -804,7 +804,7 @@ class KlingTextToVideoNode(comfy_io.ComfyNode): cfg_scale: float, mode: str, aspect_ratio: str, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: model_mode, duration, model_name = MODE_TEXT2VIDEO[mode] return await execute_text2video( auth_kwargs={ @@ -822,42 +822,42 @@ class KlingTextToVideoNode(comfy_io.ComfyNode): ) -class KlingCameraControlT2VNode(comfy_io.ComfyNode): +class KlingCameraControlT2VNode(IO.ComfyNode): """ Kling Text to Video Camera Control Node. This node is a text to video node, but it supports controlling the camera. Duration, mode, and model_name request fields are hard-coded because camera control is only supported in pro mode with the kling-v1-5 model at 5s duration as of 2025-05-02. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="KlingCameraControlT2VNode", display_name="Kling Text to Video (Camera Control)", category="api node/video/Kling", description="Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.", inputs=[ - comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), - comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), - comfy_io.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0), - comfy_io.Combo.Input( + IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), + IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), + IO.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0), + IO.Combo.Input( "aspect_ratio", options=KlingVideoGenAspectRatio, default="16:9", ), - comfy_io.Custom("CAMERA_CONTROL").Input( + IO.Custom("CAMERA_CONTROL").Input( "camera_control", tooltip="Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation.", ), ], outputs=[ - comfy_io.Video.Output(), - comfy_io.String.Output(display_name="video_id"), - comfy_io.String.Output(display_name="duration"), + IO.Video.Output(), + IO.String.Output(display_name="video_id"), + IO.String.Output(display_name="duration"), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -870,7 +870,7 @@ class KlingCameraControlT2VNode(comfy_io.ComfyNode): cfg_scale: float, aspect_ratio: str, camera_control: Optional[KlingCameraControl] = None, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: return await execute_text2video( auth_kwargs={ "auth_token": cls.hidden.auth_token_comfy_org, @@ -888,43 +888,43 @@ class KlingCameraControlT2VNode(comfy_io.ComfyNode): ) -class KlingImage2VideoNode(comfy_io.ComfyNode): +class KlingImage2VideoNode(IO.ComfyNode): """Kling Image to Video Node""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="KlingImage2VideoNode", display_name="Kling Image to Video", category="api node/video/Kling", description="Kling Image to Video Node", inputs=[ - comfy_io.Image.Input("start_frame", tooltip="The reference image used to generate the video."), - comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), - comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), - comfy_io.Combo.Input( + IO.Image.Input("start_frame", tooltip="The reference image used to generate the video."), + IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), + IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), + IO.Combo.Input( "model_name", options=KlingVideoGenModelName, default="kling-v2-master", ), - comfy_io.Float.Input("cfg_scale", default=0.8, min=0.0, max=1.0), - comfy_io.Combo.Input("mode", options=KlingVideoGenMode, default=KlingVideoGenMode.std), - comfy_io.Combo.Input( + IO.Float.Input("cfg_scale", default=0.8, min=0.0, max=1.0), + IO.Combo.Input("mode", options=KlingVideoGenMode, default=KlingVideoGenMode.std), + IO.Combo.Input( "aspect_ratio", options=KlingVideoGenAspectRatio, default=KlingVideoGenAspectRatio.field_16_9, ), - comfy_io.Combo.Input("duration", options=KlingVideoGenDuration, default=KlingVideoGenDuration.field_5), + IO.Combo.Input("duration", options=KlingVideoGenDuration, default=KlingVideoGenDuration.field_5), ], outputs=[ - comfy_io.Video.Output(), - comfy_io.String.Output(display_name="video_id"), - comfy_io.String.Output(display_name="duration"), + IO.Video.Output(), + IO.String.Output(display_name="video_id"), + IO.String.Output(display_name="duration"), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -942,7 +942,7 @@ class KlingImage2VideoNode(comfy_io.ComfyNode): duration: str, camera_control: Optional[KlingCameraControl] = None, end_frame: Optional[torch.Tensor] = None, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: return await execute_image2video( auth_kwargs={ "auth_token": cls.hidden.auth_token_comfy_org, @@ -962,46 +962,46 @@ class KlingImage2VideoNode(comfy_io.ComfyNode): ) -class KlingCameraControlI2VNode(comfy_io.ComfyNode): +class KlingCameraControlI2VNode(IO.ComfyNode): """ Kling Image to Video Camera Control Node. This node is a image to video node, but it supports controlling the camera. Duration, mode, and model_name request fields are hard-coded because camera control is only supported in pro mode with the kling-v1-5 model at 5s duration as of 2025-05-02. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="KlingCameraControlI2VNode", display_name="Kling Image to Video (Camera Control)", category="api node/video/Kling", description="Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.", inputs=[ - comfy_io.Image.Input( + IO.Image.Input( "start_frame", tooltip="Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix.", ), - comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), - comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), - comfy_io.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0), - comfy_io.Combo.Input( + IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), + IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), + IO.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0), + IO.Combo.Input( "aspect_ratio", options=KlingVideoGenAspectRatio, default=KlingVideoGenAspectRatio.field_16_9, ), - comfy_io.Custom("CAMERA_CONTROL").Input( + IO.Custom("CAMERA_CONTROL").Input( "camera_control", tooltip="Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation.", ), ], outputs=[ - comfy_io.Video.Output(), - comfy_io.String.Output(display_name="video_id"), - comfy_io.String.Output(display_name="duration"), + IO.Video.Output(), + IO.String.Output(display_name="video_id"), + IO.String.Output(display_name="duration"), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -1015,7 +1015,7 @@ class KlingCameraControlI2VNode(comfy_io.ComfyNode): cfg_scale: float, aspect_ratio: str, camera_control: KlingCameraControl, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: return await execute_image2video( auth_kwargs={ "auth_token": cls.hidden.auth_token_comfy_org, @@ -1034,37 +1034,37 @@ class KlingCameraControlI2VNode(comfy_io.ComfyNode): ) -class KlingStartEndFrameNode(comfy_io.ComfyNode): +class KlingStartEndFrameNode(IO.ComfyNode): """ Kling First Last Frame Node. This node allows creation of a video from a first and last frame. It calls the normal image to video endpoint, but only allows the subset of input options that support the `image_tail` request field. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: + def define_schema(cls) -> IO.Schema: modes = list(MODE_START_END_FRAME.keys()) - return comfy_io.Schema( + return IO.Schema( node_id="KlingStartEndFrameNode", display_name="Kling Start-End Frame to Video", category="api node/video/Kling", description="Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.", inputs=[ - comfy_io.Image.Input( + IO.Image.Input( "start_frame", tooltip="Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix.", ), - comfy_io.Image.Input( + IO.Image.Input( "end_frame", tooltip="Reference Image - End frame control. URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px. Base64 should not include data:image prefix.", ), - comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), - comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), - comfy_io.Float.Input("cfg_scale", default=0.5, min=0.0, max=1.0), - comfy_io.Combo.Input( + IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), + IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), + IO.Float.Input("cfg_scale", default=0.5, min=0.0, max=1.0), + IO.Combo.Input( "aspect_ratio", options=[i.value for i in KlingVideoGenAspectRatio], default="16:9", ), - comfy_io.Combo.Input( + IO.Combo.Input( "mode", options=modes, default=modes[2], @@ -1072,14 +1072,14 @@ class KlingStartEndFrameNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), - comfy_io.String.Output(display_name="video_id"), - comfy_io.String.Output(display_name="duration"), + IO.Video.Output(), + IO.String.Output(display_name="video_id"), + IO.String.Output(display_name="duration"), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -1094,7 +1094,7 @@ class KlingStartEndFrameNode(comfy_io.ComfyNode): cfg_scale: float, aspect_ratio: str, mode: str, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: mode, duration, model_name = MODE_START_END_FRAME[mode] return await execute_image2video( auth_kwargs={ @@ -1114,41 +1114,41 @@ class KlingStartEndFrameNode(comfy_io.ComfyNode): ) -class KlingVideoExtendNode(comfy_io.ComfyNode): +class KlingVideoExtendNode(IO.ComfyNode): @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="KlingVideoExtendNode", display_name="Kling Video Extend", category="api node/video/Kling", description="Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.", inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, tooltip="Positive text prompt for guiding the video extension", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", multiline=True, tooltip="Negative text prompt for elements to avoid in the extended video", ), - comfy_io.Float.Input("cfg_scale", default=0.5, min=0.0, max=1.0), - comfy_io.String.Input( + IO.Float.Input("cfg_scale", default=0.5, min=0.0, max=1.0), + IO.String.Input( "video_id", force_input=True, tooltip="The ID of the video to be extended. Supports videos generated by text-to-video, image-to-video, and previous video extension operations. Cannot exceed 3 minutes total duration after extension.", ), ], outputs=[ - comfy_io.Video.Output(), - comfy_io.String.Output(display_name="video_id"), - comfy_io.String.Output(display_name="duration"), + IO.Video.Output(), + IO.String.Output(display_name="video_id"), + IO.String.Output(display_name="duration"), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -1160,7 +1160,7 @@ class KlingVideoExtendNode(comfy_io.ComfyNode): negative_prompt: str, cfg_scale: float, video_id: str, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) auth = { "auth_token": cls.hidden.auth_token_comfy_org, @@ -1201,49 +1201,49 @@ class KlingVideoExtendNode(comfy_io.ComfyNode): validate_video_result_response(final_response) video = get_video_from_response(final_response) - return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) + return IO.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) -class KlingDualCharacterVideoEffectNode(comfy_io.ComfyNode): +class KlingDualCharacterVideoEffectNode(IO.ComfyNode): """Kling Dual Character Video Effect Node""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="KlingDualCharacterVideoEffectNode", display_name="Kling Dual Character Video Effects", category="api node/video/Kling", description="Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.", inputs=[ - comfy_io.Image.Input("image_left", tooltip="Left side image"), - comfy_io.Image.Input("image_right", tooltip="Right side image"), - comfy_io.Combo.Input( + IO.Image.Input("image_left", tooltip="Left side image"), + IO.Image.Input("image_right", tooltip="Right side image"), + IO.Combo.Input( "effect_scene", options=[i.value for i in KlingDualCharacterEffectsScene], ), - comfy_io.Combo.Input( + IO.Combo.Input( "model_name", options=[i.value for i in KlingCharacterEffectModelName], default="kling-v1", ), - comfy_io.Combo.Input( + IO.Combo.Input( "mode", options=[i.value for i in KlingVideoGenMode], default="std", ), - comfy_io.Combo.Input( + IO.Combo.Input( "duration", options=[i.value for i in KlingVideoGenDuration], ), ], outputs=[ - comfy_io.Video.Output(), - comfy_io.String.Output(display_name="duration"), + IO.Video.Output(), + IO.String.Output(display_name="duration"), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -1257,7 +1257,7 @@ class KlingDualCharacterVideoEffectNode(comfy_io.ComfyNode): model_name: KlingCharacterEffectModelName, mode: KlingVideoGenMode, duration: KlingVideoGenDuration, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: video, _, duration = await execute_video_effect( auth_kwargs={ "auth_token": cls.hidden.auth_token_comfy_org, @@ -1272,43 +1272,43 @@ class KlingDualCharacterVideoEffectNode(comfy_io.ComfyNode): image_1=image_left, image_2=image_right, ) - return comfy_io.NodeOutput(video, duration) + return IO.NodeOutput(video, duration) -class KlingSingleImageVideoEffectNode(comfy_io.ComfyNode): +class KlingSingleImageVideoEffectNode(IO.ComfyNode): """Kling Single Image Video Effect Node""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="KlingSingleImageVideoEffectNode", display_name="Kling Video Effects", category="api node/video/Kling", description="Achieve different special effects when generating a video based on the effect_scene.", inputs=[ - comfy_io.Image.Input("image", tooltip=" Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1"), - comfy_io.Combo.Input( + IO.Image.Input("image", tooltip=" Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1"), + IO.Combo.Input( "effect_scene", options=[i.value for i in KlingSingleImageEffectsScene], ), - comfy_io.Combo.Input( + IO.Combo.Input( "model_name", options=[i.value for i in KlingSingleImageEffectModelName], ), - comfy_io.Combo.Input( + IO.Combo.Input( "duration", options=[i.value for i in KlingVideoGenDuration], ), ], outputs=[ - comfy_io.Video.Output(), - comfy_io.String.Output(display_name="video_id"), - comfy_io.String.Output(display_name="duration"), + IO.Video.Output(), + IO.String.Output(display_name="video_id"), + IO.String.Output(display_name="duration"), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -1320,8 +1320,8 @@ class KlingSingleImageVideoEffectNode(comfy_io.ComfyNode): effect_scene: KlingSingleImageEffectsScene, model_name: KlingSingleImageEffectModelName, duration: KlingVideoGenDuration, - ) -> comfy_io.NodeOutput: - return comfy_io.NodeOutput( + ) -> IO.NodeOutput: + return IO.NodeOutput( *( await execute_video_effect( auth_kwargs={ @@ -1339,34 +1339,34 @@ class KlingSingleImageVideoEffectNode(comfy_io.ComfyNode): ) -class KlingLipSyncAudioToVideoNode(comfy_io.ComfyNode): +class KlingLipSyncAudioToVideoNode(IO.ComfyNode): """Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file.""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="KlingLipSyncAudioToVideoNode", display_name="Kling Lip Sync Video with Audio", category="api node/video/Kling", description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.", inputs=[ - comfy_io.Video.Input("video"), - comfy_io.Audio.Input("audio"), - comfy_io.Combo.Input( + IO.Video.Input("video"), + IO.Audio.Input("audio"), + IO.Combo.Input( "voice_language", options=[i.value for i in KlingLipSyncVoiceLanguage], default="en", ), ], outputs=[ - comfy_io.Video.Output(), - comfy_io.String.Output(display_name="video_id"), - comfy_io.String.Output(display_name="duration"), + IO.Video.Output(), + IO.String.Output(display_name="video_id"), + IO.String.Output(display_name="duration"), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -1377,7 +1377,7 @@ class KlingLipSyncAudioToVideoNode(comfy_io.ComfyNode): video: VideoInput, audio: AudioInput, voice_language: str, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: return await execute_lipsync( auth_kwargs={ "auth_token": cls.hidden.auth_token_comfy_org, @@ -1391,46 +1391,46 @@ class KlingLipSyncAudioToVideoNode(comfy_io.ComfyNode): ) -class KlingLipSyncTextToVideoNode(comfy_io.ComfyNode): +class KlingLipSyncTextToVideoNode(IO.ComfyNode): """Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt.""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="KlingLipSyncTextToVideoNode", display_name="Kling Lip Sync Video with Text", category="api node/video/Kling", description="Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.", inputs=[ - comfy_io.Video.Input("video"), - comfy_io.String.Input( + IO.Video.Input("video"), + IO.String.Input( "text", multiline=True, tooltip="Text Content for Lip-Sync Video Generation. Required when mode is text2video. Maximum length is 120 characters.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "voice", options=list(VOICES_CONFIG.keys()), default="Melody", ), - comfy_io.Float.Input( + IO.Float.Input( "voice_speed", default=1, min=0.8, max=2.0, - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, tooltip="Speech Rate. Valid range: 0.8~2.0, accurate to one decimal place.", ), ], outputs=[ - comfy_io.Video.Output(), - comfy_io.String.Output(display_name="video_id"), - comfy_io.String.Output(display_name="duration"), + IO.Video.Output(), + IO.String.Output(display_name="video_id"), + IO.String.Output(display_name="duration"), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -1442,7 +1442,7 @@ class KlingLipSyncTextToVideoNode(comfy_io.ComfyNode): text: str, voice: str, voice_speed: float, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: voice_id, voice_language = VOICES_CONFIG[voice] return await execute_lipsync( auth_kwargs={ @@ -1459,32 +1459,32 @@ class KlingLipSyncTextToVideoNode(comfy_io.ComfyNode): ) -class KlingVirtualTryOnNode(comfy_io.ComfyNode): +class KlingVirtualTryOnNode(IO.ComfyNode): """Kling Virtual Try On Node.""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="KlingVirtualTryOnNode", display_name="Kling Virtual Try On", category="api node/image/Kling", description="Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.", inputs=[ - comfy_io.Image.Input("human_image"), - comfy_io.Image.Input("cloth_image"), - comfy_io.Combo.Input( + IO.Image.Input("human_image"), + IO.Image.Input("cloth_image"), + IO.Combo.Input( "model_name", options=[i.value for i in KlingVirtualTryOnModelName], default="kolors-virtual-try-on-v1", ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -1495,7 +1495,7 @@ class KlingVirtualTryOnNode(comfy_io.ComfyNode): human_image: torch.Tensor, cloth_image: torch.Tensor, model_name: KlingVirtualTryOnModelName, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: auth = { "auth_token": cls.hidden.auth_token_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org, @@ -1534,70 +1534,70 @@ class KlingVirtualTryOnNode(comfy_io.ComfyNode): validate_image_result_response(final_response) images = get_images_from_response(final_response) - return comfy_io.NodeOutput(await image_result_to_node_output(images)) + return IO.NodeOutput(await image_result_to_node_output(images)) -class KlingImageGenerationNode(comfy_io.ComfyNode): +class KlingImageGenerationNode(IO.ComfyNode): """Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="KlingImageGenerationNode", display_name="Kling Image Generation", category="api node/image/Kling", description="Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.", inputs=[ - comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), - comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), - comfy_io.Combo.Input( + IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), + IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), + IO.Combo.Input( "image_type", options=[i.value for i in KlingImageGenImageReferenceType], ), - comfy_io.Float.Input( + IO.Float.Input( "image_fidelity", default=0.5, min=0.0, max=1.0, step=0.01, - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, tooltip="Reference intensity for user-uploaded images", ), - comfy_io.Float.Input( + IO.Float.Input( "human_fidelity", default=0.45, min=0.0, max=1.0, step=0.01, - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, tooltip="Subject reference similarity", ), - comfy_io.Combo.Input( + IO.Combo.Input( "model_name", options=[i.value for i in KlingImageGenModelName], default="kling-v1", ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=[i.value for i in KlingImageGenAspectRatio], default="16:9", ), - comfy_io.Int.Input( + IO.Int.Input( "n", default=1, min=1, max=9, tooltip="Number of generated images", ), - comfy_io.Image.Input("image", optional=True), + IO.Image.Input("image", optional=True), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -1614,7 +1614,7 @@ class KlingImageGenerationNode(comfy_io.ComfyNode): n: int, aspect_ratio: KlingImageGenAspectRatio, image: Optional[torch.Tensor] = None, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, field_name="prompt", min_length=1, max_length=MAX_PROMPT_LENGTH_IMAGE_GEN) validate_string(negative_prompt, field_name="negative_prompt", max_length=MAX_PROMPT_LENGTH_IMAGE_GEN) @@ -1669,12 +1669,12 @@ class KlingImageGenerationNode(comfy_io.ComfyNode): validate_image_result_response(final_response) images = get_images_from_response(final_response) - return comfy_io.NodeOutput(await image_result_to_node_output(images)) + return IO.NodeOutput(await image_result_to_node_output(images)) class KlingExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ KlingCameraControls, KlingTextToVideoNode, diff --git a/comfy_api_nodes/nodes_luma.py b/comfy_api_nodes/nodes_luma.py index 9cab2ca82..610d95a77 100644 --- a/comfy_api_nodes/nodes_luma.py +++ b/comfy_api_nodes/nodes_luma.py @@ -2,7 +2,7 @@ from __future__ import annotations from inspect import cleandoc from typing import Optional from typing_extensions import override -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, IO from comfy_api.input_impl.video_types import VideoFromFile from comfy_api_nodes.apis.luma_api import ( LumaImageModel, @@ -52,24 +52,24 @@ def image_result_url_extractor(response: LumaGeneration): def video_result_url_extractor(response: LumaGeneration): return response.assets.video if hasattr(response, "assets") and hasattr(response.assets, "video") else None -class LumaReferenceNode(comfy_io.ComfyNode): +class LumaReferenceNode(IO.ComfyNode): """ Holds an image and weight for use with Luma Generate Image node. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="LumaReferenceNode", display_name="Luma Reference", category="api node/image/Luma", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input( + IO.Image.Input( "image", tooltip="Image to use as reference.", ), - comfy_io.Float.Input( + IO.Float.Input( "weight", default=1.0, min=0.0, @@ -77,71 +77,71 @@ class LumaReferenceNode(comfy_io.ComfyNode): step=0.01, tooltip="Weight of image reference.", ), - comfy_io.Custom(LumaIO.LUMA_REF).Input( + IO.Custom(LumaIO.LUMA_REF).Input( "luma_ref", optional=True, ), ], - outputs=[comfy_io.Custom(LumaIO.LUMA_REF).Output(display_name="luma_ref")], + outputs=[IO.Custom(LumaIO.LUMA_REF).Output(display_name="luma_ref")], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], ) @classmethod def execute( cls, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: if luma_ref is not None: luma_ref = luma_ref.clone() else: luma_ref = LumaReferenceChain() luma_ref.add(LumaReference(image=image, weight=round(weight, 2))) - return comfy_io.NodeOutput(luma_ref) + return IO.NodeOutput(luma_ref) -class LumaConceptsNode(comfy_io.ComfyNode): +class LumaConceptsNode(IO.ComfyNode): """ Holds one or more Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="LumaConceptsNode", display_name="Luma Concepts", category="api node/video/Luma", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "concept1", options=get_luma_concepts(include_none=True), ), - comfy_io.Combo.Input( + IO.Combo.Input( "concept2", options=get_luma_concepts(include_none=True), ), - comfy_io.Combo.Input( + IO.Combo.Input( "concept3", options=get_luma_concepts(include_none=True), ), - comfy_io.Combo.Input( + IO.Combo.Input( "concept4", options=get_luma_concepts(include_none=True), ), - comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input( + IO.Custom(LumaIO.LUMA_CONCEPTS).Input( "luma_concepts", tooltip="Optional Camera Concepts to add to the ones chosen here.", optional=True, ), ], - outputs=[comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Output(display_name="luma_concepts")], + outputs=[IO.Custom(LumaIO.LUMA_CONCEPTS).Output(display_name="luma_concepts")], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], ) @@ -153,42 +153,42 @@ class LumaConceptsNode(comfy_io.ComfyNode): concept3: str, concept4: str, luma_concepts: LumaConceptChain = None, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: chain = LumaConceptChain(str_list=[concept1, concept2, concept3, concept4]) if luma_concepts is not None: chain = luma_concepts.clone_and_merge(chain) - return comfy_io.NodeOutput(chain) + return IO.NodeOutput(chain) -class LumaImageGenerationNode(comfy_io.ComfyNode): +class LumaImageGenerationNode(IO.ComfyNode): """ Generates images synchronously based on prompt and aspect ratio. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="LumaImageNode", display_name="Luma Text to Image", category="api node/image/Luma", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the image generation", ), - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=LumaImageModel, ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=LumaAspectRatio, default=LumaAspectRatio.ratio_16_9, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -196,7 +196,7 @@ class LumaImageGenerationNode(comfy_io.ComfyNode): control_after_generate=True, tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", ), - comfy_io.Float.Input( + IO.Float.Input( "style_image_weight", default=1.0, min=0.0, @@ -204,27 +204,27 @@ class LumaImageGenerationNode(comfy_io.ComfyNode): step=0.01, tooltip="Weight of style image. Ignored if no style_image provided.", ), - comfy_io.Custom(LumaIO.LUMA_REF).Input( + IO.Custom(LumaIO.LUMA_REF).Input( "image_luma_ref", tooltip="Luma Reference node connection to influence generation with input images; up to 4 images can be considered.", optional=True, ), - comfy_io.Image.Input( + IO.Image.Input( "style_image", tooltip="Style reference image; only 1 image will be used.", optional=True, ), - comfy_io.Image.Input( + IO.Image.Input( "character_image", tooltip="Character reference images; can be a batch of multiple, up to 4 images can be considered.", optional=True, ), ], - outputs=[comfy_io.Image.Output()], + outputs=[IO.Image.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -240,7 +240,7 @@ class LumaImageGenerationNode(comfy_io.ComfyNode): image_luma_ref: LumaReferenceChain = None, style_image: torch.Tensor = None, character_image: torch.Tensor = None, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=3) auth_kwargs = { "auth_token": cls.hidden.auth_token_comfy_org, @@ -306,7 +306,7 @@ class LumaImageGenerationNode(comfy_io.ComfyNode): async with aiohttp.ClientSession() as session: async with session.get(response_poll.assets.image) as img_response: img = process_image_response(await img_response.content.read()) - return comfy_io.NodeOutput(img) + return IO.NodeOutput(img) @classmethod async def _convert_luma_refs( @@ -334,29 +334,29 @@ class LumaImageGenerationNode(comfy_io.ComfyNode): return await cls._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs) -class LumaImageModifyNode(comfy_io.ComfyNode): +class LumaImageModifyNode(IO.ComfyNode): """ Modifies images synchronously based on prompt and aspect ratio. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="LumaImageModifyNode", display_name="Luma Image to Image", category="api node/image/Luma", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input( + IO.Image.Input( "image", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the image generation", ), - comfy_io.Float.Input( + IO.Float.Input( "image_weight", default=0.1, min=0.0, @@ -364,11 +364,11 @@ class LumaImageModifyNode(comfy_io.ComfyNode): step=0.01, tooltip="Weight of the image; the closer to 1.0, the less the image will be modified.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=LumaImageModel, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -377,11 +377,11 @@ class LumaImageModifyNode(comfy_io.ComfyNode): tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", ), ], - outputs=[comfy_io.Image.Output()], + outputs=[IO.Image.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -394,7 +394,7 @@ class LumaImageModifyNode(comfy_io.ComfyNode): image: torch.Tensor, image_weight: float, seed, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: auth_kwargs = { "auth_token": cls.hidden.auth_token_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org, @@ -442,51 +442,51 @@ class LumaImageModifyNode(comfy_io.ComfyNode): async with aiohttp.ClientSession() as session: async with session.get(response_poll.assets.image) as img_response: img = process_image_response(await img_response.content.read()) - return comfy_io.NodeOutput(img) + return IO.NodeOutput(img) -class LumaTextToVideoGenerationNode(comfy_io.ComfyNode): +class LumaTextToVideoGenerationNode(IO.ComfyNode): """ Generates videos synchronously based on prompt and output_size. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="LumaVideoNode", display_name="Luma Text to Video", category="api node/video/Luma", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the video generation", ), - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=LumaVideoModel, ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=LumaAspectRatio, default=LumaAspectRatio.ratio_16_9, ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=LumaVideoOutputResolution, default=LumaVideoOutputResolution.res_540p, ), - comfy_io.Combo.Input( + IO.Combo.Input( "duration", options=LumaVideoModelOutputDuration, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "loop", default=False, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -494,17 +494,17 @@ class LumaTextToVideoGenerationNode(comfy_io.ComfyNode): control_after_generate=True, tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", ), - comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input( + IO.Custom(LumaIO.LUMA_CONCEPTS).Input( "luma_concepts", tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.", optional=True, ) ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -520,7 +520,7 @@ class LumaTextToVideoGenerationNode(comfy_io.ComfyNode): loop: bool, seed, luma_concepts: LumaConceptChain = None, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False, min_length=3) duration = duration if model != LumaVideoModel.ray_1_6 else None resolution = resolution if model != LumaVideoModel.ray_1_6 else None @@ -571,51 +571,51 @@ class LumaTextToVideoGenerationNode(comfy_io.ComfyNode): async with aiohttp.ClientSession() as session: async with session.get(response_poll.assets.video) as vid_response: - return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) + return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) -class LumaImageToVideoGenerationNode(comfy_io.ComfyNode): +class LumaImageToVideoGenerationNode(IO.ComfyNode): """ Generates videos synchronously based on prompt, input images, and output_size. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="LumaImageToVideoNode", display_name="Luma Image to Video", category="api node/video/Luma", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the video generation", ), - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=LumaVideoModel, ), - # comfy_io.Combo.Input( + # IO.Combo.Input( # "aspect_ratio", # options=[ratio.value for ratio in LumaAspectRatio], # default=LumaAspectRatio.ratio_16_9, # ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=LumaVideoOutputResolution, default=LumaVideoOutputResolution.res_540p, ), - comfy_io.Combo.Input( + IO.Combo.Input( "duration", options=[dur.value for dur in LumaVideoModelOutputDuration], ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "loop", default=False, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -623,27 +623,27 @@ class LumaImageToVideoGenerationNode(comfy_io.ComfyNode): control_after_generate=True, tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", ), - comfy_io.Image.Input( + IO.Image.Input( "first_image", tooltip="First frame of generated video.", optional=True, ), - comfy_io.Image.Input( + IO.Image.Input( "last_image", tooltip="Last frame of generated video.", optional=True, ), - comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input( + IO.Custom(LumaIO.LUMA_CONCEPTS).Input( "luma_concepts", tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.", optional=True, ) ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -660,7 +660,7 @@ class LumaImageToVideoGenerationNode(comfy_io.ComfyNode): first_image: torch.Tensor = None, last_image: torch.Tensor = None, luma_concepts: LumaConceptChain = None, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: if first_image is None and last_image is None: raise Exception( "At least one of first_image and last_image requires an input." @@ -716,7 +716,7 @@ class LumaImageToVideoGenerationNode(comfy_io.ComfyNode): async with aiohttp.ClientSession() as session: async with session.get(response_poll.assets.video) as vid_response: - return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) + return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) @classmethod async def _convert_to_keyframes( @@ -744,7 +744,7 @@ class LumaImageToVideoGenerationNode(comfy_io.ComfyNode): class LumaExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ LumaImageGenerationNode, LumaImageModifyNode, diff --git a/comfy_api_nodes/nodes_minimax.py b/comfy_api_nodes/nodes_minimax.py index caa3d4260..23be1ae65 100644 --- a/comfy_api_nodes/nodes_minimax.py +++ b/comfy_api_nodes/nodes_minimax.py @@ -4,7 +4,7 @@ import logging import torch from typing_extensions import override -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, IO from comfy_api.input_impl.video_types import VideoFromFile from comfy_api_nodes.apis import ( MinimaxVideoGenerationRequest, @@ -43,7 +43,7 @@ async def _generate_mm_video( image: Optional[torch.Tensor] = None, # used for ImageToVideo subject: Optional[torch.Tensor] = None, # used for SubjectToVideo average_duration: Optional[int] = None, -) -> comfy_io.NodeOutput: +) -> IO.NodeOutput: if image is None: validate_string(prompt_text, field_name="prompt_text") # upload image, if passed in @@ -133,35 +133,35 @@ async def _generate_mm_video( error_msg = f"Failed to download video from {file_url}" logging.error(error_msg) raise Exception(error_msg) - return comfy_io.NodeOutput(VideoFromFile(video_io)) + return IO.NodeOutput(VideoFromFile(video_io)) -class MinimaxTextToVideoNode(comfy_io.ComfyNode): +class MinimaxTextToVideoNode(IO.ComfyNode): """ Generates videos synchronously based on a prompt, and optional parameters using MiniMax's API. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="MinimaxTextToVideoNode", display_name="MiniMax Text to Video", category="api node/video/MiniMax", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt_text", multiline=True, default="", tooltip="Text prompt to guide the video generation", ), - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["T2V-01", "T2V-01-Director"], default="T2V-01", tooltip="Model to use for video generation", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -172,11 +172,11 @@ class MinimaxTextToVideoNode(comfy_io.ComfyNode): optional=True, ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -187,7 +187,7 @@ class MinimaxTextToVideoNode(comfy_io.ComfyNode): prompt_text: str, model: str = "T2V-01", seed: int = 0, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: return await _generate_mm_video( auth={ "auth_token": cls.hidden.auth_token_comfy_org, @@ -203,36 +203,36 @@ class MinimaxTextToVideoNode(comfy_io.ComfyNode): ) -class MinimaxImageToVideoNode(comfy_io.ComfyNode): +class MinimaxImageToVideoNode(IO.ComfyNode): """ Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="MinimaxImageToVideoNode", display_name="MiniMax Image to Video", category="api node/video/MiniMax", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input( + IO.Image.Input( "image", tooltip="Image to use as first frame of video generation", ), - comfy_io.String.Input( + IO.String.Input( "prompt_text", multiline=True, default="", tooltip="Text prompt to guide the video generation", ), - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["I2V-01-Director", "I2V-01", "I2V-01-live"], default="I2V-01", tooltip="Model to use for video generation", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -243,11 +243,11 @@ class MinimaxImageToVideoNode(comfy_io.ComfyNode): optional=True, ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -259,7 +259,7 @@ class MinimaxImageToVideoNode(comfy_io.ComfyNode): prompt_text: str, model: str = "I2V-01", seed: int = 0, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: return await _generate_mm_video( auth={ "auth_token": cls.hidden.auth_token_comfy_org, @@ -275,36 +275,36 @@ class MinimaxImageToVideoNode(comfy_io.ComfyNode): ) -class MinimaxSubjectToVideoNode(comfy_io.ComfyNode): +class MinimaxSubjectToVideoNode(IO.ComfyNode): """ Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="MinimaxSubjectToVideoNode", display_name="MiniMax Subject to Video", category="api node/video/MiniMax", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input( + IO.Image.Input( "subject", tooltip="Image of subject to reference for video generation", ), - comfy_io.String.Input( + IO.String.Input( "prompt_text", multiline=True, default="", tooltip="Text prompt to guide the video generation", ), - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["S2V-01"], default="S2V-01", tooltip="Model to use for video generation", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -315,11 +315,11 @@ class MinimaxSubjectToVideoNode(comfy_io.ComfyNode): optional=True, ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -331,7 +331,7 @@ class MinimaxSubjectToVideoNode(comfy_io.ComfyNode): prompt_text: str, model: str = "S2V-01", seed: int = 0, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: return await _generate_mm_video( auth={ "auth_token": cls.hidden.auth_token_comfy_org, @@ -347,24 +347,24 @@ class MinimaxSubjectToVideoNode(comfy_io.ComfyNode): ) -class MinimaxHailuoVideoNode(comfy_io.ComfyNode): +class MinimaxHailuoVideoNode(IO.ComfyNode): """Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="MinimaxHailuoVideoNode", display_name="MiniMax Hailuo Video", category="api node/video/MiniMax", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt_text", multiline=True, default="", tooltip="Text prompt to guide the video generation.", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -374,25 +374,25 @@ class MinimaxHailuoVideoNode(comfy_io.ComfyNode): tooltip="The random seed used for creating the noise.", optional=True, ), - comfy_io.Image.Input( + IO.Image.Input( "first_frame_image", tooltip="Optional image to use as the first frame to generate a video.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "prompt_optimizer", default=True, tooltip="Optimize prompt to improve generation quality when needed.", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "duration", options=[6, 10], default=6, tooltip="The length of the output video in seconds.", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=["768P", "1080P"], default="768P", @@ -400,11 +400,11 @@ class MinimaxHailuoVideoNode(comfy_io.ComfyNode): optional=True, ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -419,7 +419,7 @@ class MinimaxHailuoVideoNode(comfy_io.ComfyNode): duration: int = 6, resolution: str = "768P", model: str = "MiniMax-Hailuo-02", - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: auth = { "auth_token": cls.hidden.auth_token_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org, @@ -513,12 +513,12 @@ class MinimaxHailuoVideoNode(comfy_io.ComfyNode): error_msg = f"Failed to download video from {file_url}" logging.error(error_msg) raise Exception(error_msg) - return comfy_io.NodeOutput(VideoFromFile(video_io)) + return IO.NodeOutput(VideoFromFile(video_io)) class MinimaxExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ MinimaxTextToVideoNode, MinimaxImageToVideoNode, diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 77e4b536c..7566188dd 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -22,10 +22,11 @@ from comfy_api_nodes.apinode_utils import ( download_url_to_video_output, upload_images_to_comfyapi, upload_video_to_comfyapi, + validate_container_format_is_mp4, ) from comfy_api.input import VideoInput -from comfy_api.latest import ComfyExtension, InputImpl, io as comfy_io +from comfy_api.latest import ComfyExtension, InputImpl, IO import av import io @@ -144,7 +145,7 @@ def validate_video_to_video_input(video: VideoInput) -> VideoInput: """ width, height = _get_video_dimensions(video) _validate_video_dimensions(width, height) - _validate_container_format(video) + validate_container_format_is_mp4(video) return _validate_and_trim_duration(video) @@ -177,15 +178,6 @@ def _validate_video_dimensions(width: int, height: int) -> None: ) -def _validate_container_format(video: VideoInput) -> None: - """Validates video container format is MP4.""" - container_format = video.get_container_format() - if container_format not in ["mp4", "mov,mp4,m4a,3gp,3g2,mj2"]: - raise ValueError( - f"Only MP4 container format supported. Got: {container_format}" - ) - - def _validate_and_trim_duration(video: VideoInput) -> VideoInput: """Validates video duration and trims to 5 seconds if needed.""" duration = video.get_duration() @@ -362,25 +354,25 @@ async def get_response( ) -class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): +class MoonvalleyImg2VideoNode(IO.ComfyNode): @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="MoonvalleyImg2VideoNode", display_name="Moonvalley Marey Image to Video", category="api node/video/Moonvalley Marey", description="Moonvalley Marey Image to Video Node", inputs=[ - comfy_io.Image.Input( + IO.Image.Input( "image", tooltip="The reference image used to generate the video", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", multiline=True, default=" gopro, bright, contrast, static, overexposed, vignette, " @@ -391,7 +383,7 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): "wobbly, weird, low quality, plastic, stock footage, video camera, boring", tooltip="Negative prompt text", ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=[ "16:9 (1920 x 1080)", @@ -404,7 +396,7 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): default="16:9 (1920 x 1080)", tooltip="Resolution of the output video", ), - comfy_io.Float.Input( + IO.Float.Input( "prompt_adherence", default=4.5, min=1.0, @@ -412,17 +404,17 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): step=1.0, tooltip="Guidance scale for generation control", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=9, min=0, max=4294967295, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Random seed value", control_after_generate=True, ), - comfy_io.Int.Input( + IO.Int.Input( "steps", default=33, min=1, @@ -431,11 +423,11 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): tooltip="Number of denoising steps", ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -450,7 +442,7 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): prompt_adherence: float, seed: int, steps: int, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_image_dimensions(image, min_width=300, min_height=300, max_height=MAX_HEIGHT, max_width=MAX_WIDTH) validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) width_height = parse_width_height_from_res(resolution) @@ -500,25 +492,25 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): task_id, auth_kwargs=auth, node_id=cls.hidden.unique_id ) video = await download_url_to_video_output(final_response.output_url) - return comfy_io.NodeOutput(video) + return IO.NodeOutput(video) -class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): +class MoonvalleyVideo2VideoNode(IO.ComfyNode): @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="MoonvalleyVideo2VideoNode", display_name="Moonvalley Marey Video to Video", category="api node/video/Moonvalley Marey", description="", inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, tooltip="Describes the video to generate", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", multiline=True, default=" gopro, bright, contrast, static, overexposed, vignette, " @@ -529,28 +521,28 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): "wobbly, weird, low quality, plastic, stock footage, video camera, boring", tooltip="Negative prompt text", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=9, min=0, max=4294967295, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Random seed value", control_after_generate=False, ), - comfy_io.Video.Input( + IO.Video.Input( "video", tooltip="The reference video used to generate the output video. Must be at least 5 seconds long. " "Videos longer than 5s will be automatically trimmed. Only MP4 format supported.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "control_type", options=["Motion Transfer", "Pose Transfer"], default="Motion Transfer", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "motion_intensity", default=100, min=0, @@ -559,21 +551,21 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): tooltip="Only used if control_type is 'Motion Transfer'", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "steps", default=33, min=1, max=100, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Number of inference steps", ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -589,7 +581,7 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): motion_intensity: Optional[int] = 100, steps=33, prompt_adherence=4.5, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: auth = { "auth_token": cls.hidden.auth_token_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org, @@ -641,24 +633,24 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): ) video = await download_url_to_video_output(final_response.output_url) - return comfy_io.NodeOutput(video) + return IO.NodeOutput(video) -class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode): +class MoonvalleyTxt2VideoNode(IO.ComfyNode): @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="MoonvalleyTxt2VideoNode", display_name="Moonvalley Marey Text to Video", category="api node/video/Moonvalley Marey", description="", inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", multiline=True, default=" gopro, bright, contrast, static, overexposed, vignette, " @@ -669,7 +661,7 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode): "wobbly, weird, low quality, plastic, stock footage, video camera, boring", tooltip="Negative prompt text", ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=[ "16:9 (1920 x 1080)", @@ -682,7 +674,7 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode): default="16:9 (1920 x 1080)", tooltip="Resolution of the output video", ), - comfy_io.Float.Input( + IO.Float.Input( "prompt_adherence", default=4.0, min=1.0, @@ -690,17 +682,17 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode): step=1.0, tooltip="Guidance scale for generation control", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=9, min=0, max=4294967295, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Random seed value", ), - comfy_io.Int.Input( + IO.Int.Input( "steps", default=33, min=1, @@ -709,11 +701,11 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode): tooltip="Inference steps", ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -727,7 +719,7 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode): prompt_adherence: float, seed: int, steps: int, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) width_height = parse_width_height_from_res(resolution) @@ -768,12 +760,12 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode): ) video = await download_url_to_video_output(final_response.output_url) - return comfy_io.NodeOutput(video) + return IO.NodeOutput(video) class MoonvalleyExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ MoonvalleyImg2VideoNode, MoonvalleyTxt2VideoNode, diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py index 822cfee64..27cb0067b 100644 --- a/comfy_api_nodes/nodes_pika.py +++ b/comfy_api_nodes/nodes_pika.py @@ -12,7 +12,7 @@ from typing import Optional, TypeVar import torch from typing_extensions import override -from comfy_api.latest import ComfyExtension, comfy_io +from comfy_api.latest import ComfyExtension, IO from comfy_api.input_impl.video_types import VideoCodec, VideoContainer, VideoInput from comfy_api_nodes.apinode_utils import ( download_url_to_video_output, @@ -47,7 +47,7 @@ async def execute_task( initial_operation: SynchronousOperation[R, pika_defs.PikaGenerateResponse], auth_kwargs: Optional[dict[str, str]] = None, node_id: Optional[str] = None, -) -> comfy_io.NodeOutput: +) -> IO.NodeOutput: task_id = (await initial_operation.execute()).video_id final_response: pika_defs.PikaVideoResponse = await PollingOperation( poll_endpoint=ApiEndpoint( @@ -72,39 +72,39 @@ async def execute_task( raise Exception(error_msg) video_url = final_response.url logging.info("Pika task %s succeeded. Video URL: %s", task_id, video_url) - return comfy_io.NodeOutput(await download_url_to_video_output(video_url)) + return IO.NodeOutput(await download_url_to_video_output(video_url)) -def get_base_inputs_types() -> list[comfy_io.Input]: +def get_base_inputs_types() -> list[IO.Input]: """Get the base required inputs types common to all Pika nodes.""" return [ - comfy_io.String.Input("prompt_text", multiline=True), - comfy_io.String.Input("negative_prompt", multiline=True), - comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), - comfy_io.Combo.Input("resolution", options=["1080p", "720p"], default="1080p"), - comfy_io.Combo.Input("duration", options=[5, 10], default=5), + IO.String.Input("prompt_text", multiline=True), + IO.String.Input("negative_prompt", multiline=True), + IO.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), + IO.Combo.Input("resolution", options=["1080p", "720p"], default="1080p"), + IO.Combo.Input("duration", options=[5, 10], default=5), ] -class PikaImageToVideo(comfy_io.ComfyNode): +class PikaImageToVideo(IO.ComfyNode): """Pika 2.2 Image to Video Node.""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="PikaImageToVideoNode2_2", display_name="Pika Image to Video", description="Sends an image and prompt to the Pika API v2.2 to generate a video.", category="api node/video/Pika", inputs=[ - comfy_io.Image.Input("image", tooltip="The image to convert to video"), + IO.Image.Input("image", tooltip="The image to convert to video"), *get_base_inputs_types(), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -118,7 +118,7 @@ class PikaImageToVideo(comfy_io.ComfyNode): seed: int, resolution: str, duration: int, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: image_bytes_io = tensor_to_bytesio(image) pika_files = {"image": ("image.png", image_bytes_io, "image/png")} pika_request_data = pika_defs.PikaBodyGenerate22I2vGenerate22I2vPost( @@ -147,19 +147,19 @@ class PikaImageToVideo(comfy_io.ComfyNode): return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaTextToVideoNode(comfy_io.ComfyNode): +class PikaTextToVideoNode(IO.ComfyNode): """Pika Text2Video v2.2 Node.""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="PikaTextToVideoNode2_2", display_name="Pika Text to Video", description="Sends a text prompt to the Pika API v2.2 to generate a video.", category="api node/video/Pika", inputs=[ *get_base_inputs_types(), - comfy_io.Float.Input( + IO.Float.Input( "aspect_ratio", step=0.001, min=0.4, @@ -168,11 +168,11 @@ class PikaTextToVideoNode(comfy_io.ComfyNode): tooltip="Aspect ratio (width / height)", ) ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -186,7 +186,7 @@ class PikaTextToVideoNode(comfy_io.ComfyNode): resolution: str, duration: int, aspect_ratio: float, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: auth = { "auth_token": cls.hidden.auth_token_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org, @@ -212,24 +212,24 @@ class PikaTextToVideoNode(comfy_io.ComfyNode): return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaScenes(comfy_io.ComfyNode): +class PikaScenes(IO.ComfyNode): """PikaScenes v2.2 Node.""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="PikaScenesV2_2", display_name="Pika Scenes (Video Image Composition)", description="Combine your images to create a video with the objects in them. Upload multiple images as ingredients and generate a high-quality video that incorporates all of them.", category="api node/video/Pika", inputs=[ *get_base_inputs_types(), - comfy_io.Combo.Input( + IO.Combo.Input( "ingredients_mode", options=["creative", "precise"], default="creative", ), - comfy_io.Float.Input( + IO.Float.Input( "aspect_ratio", step=0.001, min=0.4, @@ -237,37 +237,37 @@ class PikaScenes(comfy_io.ComfyNode): default=1.7777777777777777, tooltip="Aspect ratio (width / height)", ), - comfy_io.Image.Input( + IO.Image.Input( "image_ingredient_1", optional=True, tooltip="Image that will be used as ingredient to create a video.", ), - comfy_io.Image.Input( + IO.Image.Input( "image_ingredient_2", optional=True, tooltip="Image that will be used as ingredient to create a video.", ), - comfy_io.Image.Input( + IO.Image.Input( "image_ingredient_3", optional=True, tooltip="Image that will be used as ingredient to create a video.", ), - comfy_io.Image.Input( + IO.Image.Input( "image_ingredient_4", optional=True, tooltip="Image that will be used as ingredient to create a video.", ), - comfy_io.Image.Input( + IO.Image.Input( "image_ingredient_5", optional=True, tooltip="Image that will be used as ingredient to create a video.", ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -287,7 +287,7 @@ class PikaScenes(comfy_io.ComfyNode): image_ingredient_3: Optional[torch.Tensor] = None, image_ingredient_4: Optional[torch.Tensor] = None, image_ingredient_5: Optional[torch.Tensor] = None, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: all_image_bytes_io = [] for image in [ image_ingredient_1, @@ -333,33 +333,33 @@ class PikaScenes(comfy_io.ComfyNode): return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikAdditionsNode(comfy_io.ComfyNode): +class PikAdditionsNode(IO.ComfyNode): """Pika Pikadditions Node. Add an image into a video.""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="Pikadditions", display_name="Pikadditions (Video Object Insertion)", description="Add any object or image into your video. Upload a video and specify what you'd like to add to create a seamlessly integrated result.", category="api node/video/Pika", inputs=[ - comfy_io.Video.Input("video", tooltip="The video to add an image to."), - comfy_io.Image.Input("image", tooltip="The image to add to the video."), - comfy_io.String.Input("prompt_text", multiline=True), - comfy_io.String.Input("negative_prompt", multiline=True), - comfy_io.Int.Input( + IO.Video.Input("video", tooltip="The video to add an image to."), + IO.Image.Input("image", tooltip="The image to add to the video."), + IO.String.Input("prompt_text", multiline=True), + IO.String.Input("negative_prompt", multiline=True), + IO.Int.Input( "seed", min=0, max=0xFFFFFFFF, control_after_generate=True, ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -372,7 +372,7 @@ class PikAdditionsNode(comfy_io.ComfyNode): prompt_text: str, negative_prompt: str, seed: int, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: video_bytes_io = BytesIO() video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264) video_bytes_io.seek(0) @@ -407,43 +407,43 @@ class PikAdditionsNode(comfy_io.ComfyNode): return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaSwapsNode(comfy_io.ComfyNode): +class PikaSwapsNode(IO.ComfyNode): """Pika Pikaswaps Node.""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="Pikaswaps", display_name="Pika Swaps (Video Object Replacement)", description="Swap out any object or region of your video with a new image or object. Define areas to replace either with a mask or coordinates.", category="api node/video/Pika", inputs=[ - comfy_io.Video.Input("video", tooltip="The video to swap an object in."), - comfy_io.Image.Input( + IO.Video.Input("video", tooltip="The video to swap an object in."), + IO.Image.Input( "image", tooltip="The image used to replace the masked object in the video.", optional=True, ), - comfy_io.Mask.Input( + IO.Mask.Input( "mask", tooltip="Use the mask to define areas in the video to replace.", optional=True, ), - comfy_io.String.Input("prompt_text", multiline=True, optional=True), - comfy_io.String.Input("negative_prompt", multiline=True, optional=True), - comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True, optional=True), - comfy_io.String.Input( + IO.String.Input("prompt_text", multiline=True, optional=True), + IO.String.Input("negative_prompt", multiline=True, optional=True), + IO.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True, optional=True), + IO.String.Input( "region_to_modify", multiline=True, optional=True, tooltip="Plaintext description of the object / region to modify.", ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -458,7 +458,7 @@ class PikaSwapsNode(comfy_io.ComfyNode): negative_prompt: str = "", seed: int = 0, region_to_modify: str = "", - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: video_bytes_io = BytesIO() video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264) video_bytes_io.seek(0) @@ -495,30 +495,30 @@ class PikaSwapsNode(comfy_io.ComfyNode): return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaffectsNode(comfy_io.ComfyNode): +class PikaffectsNode(IO.ComfyNode): """Pika Pikaffects Node.""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="Pikaffects", display_name="Pikaffects (Video Effects)", description="Generate a video with a specific Pikaffect. Supported Pikaffects: Cake-ify, Crumble, Crush, Decapitate, Deflate, Dissolve, Explode, Eye-pop, Inflate, Levitate, Melt, Peel, Poke, Squish, Ta-da, Tear", category="api node/video/Pika", inputs=[ - comfy_io.Image.Input("image", tooltip="The reference image to apply the Pikaffect to."), - comfy_io.Combo.Input( + IO.Image.Input("image", tooltip="The reference image to apply the Pikaffect to."), + IO.Combo.Input( "pikaffect", options=pika_defs.Pikaffect, default="Cake-ify" ), - comfy_io.String.Input("prompt_text", multiline=True), - comfy_io.String.Input("negative_prompt", multiline=True), - comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), + IO.String.Input("prompt_text", multiline=True), + IO.String.Input("negative_prompt", multiline=True), + IO.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -531,7 +531,7 @@ class PikaffectsNode(comfy_io.ComfyNode): prompt_text: str, negative_prompt: str, seed: int, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: auth = { "auth_token": cls.hidden.auth_token_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org, @@ -556,26 +556,26 @@ class PikaffectsNode(comfy_io.ComfyNode): return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) -class PikaStartEndFrameNode(comfy_io.ComfyNode): +class PikaStartEndFrameNode(IO.ComfyNode): """PikaFrames v2.2 Node.""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="PikaStartEndFrameNode2_2", display_name="Pika Start and End Frame to Video", description="Generate a video by combining your first and last frame. Upload two images to define the start and end points, and let the AI create a smooth transition between them.", category="api node/video/Pika", inputs=[ - comfy_io.Image.Input("image_start", tooltip="The first image to combine."), - comfy_io.Image.Input("image_end", tooltip="The last image to combine."), + IO.Image.Input("image_start", tooltip="The first image to combine."), + IO.Image.Input("image_end", tooltip="The last image to combine."), *get_base_inputs_types(), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -590,7 +590,7 @@ class PikaStartEndFrameNode(comfy_io.ComfyNode): seed: int, resolution: str, duration: int, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt_text, field_name="prompt_text", min_length=1) pika_files = [ ("keyFrames", ("image_start.png", tensor_to_bytesio(image_start), "image/png")), @@ -623,7 +623,7 @@ class PikaStartEndFrameNode(comfy_io.ComfyNode): class PikaApiNodesExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ PikaImageToVideo, PikaTextToVideoNode, diff --git a/comfy_api_nodes/nodes_pixverse.py b/comfy_api_nodes/nodes_pixverse.py index a97610f06..438a7f80b 100644 --- a/comfy_api_nodes/nodes_pixverse.py +++ b/comfy_api_nodes/nodes_pixverse.py @@ -29,7 +29,7 @@ from comfy_api_nodes.apinode_utils import ( validate_string, ) from comfy_api.input_impl import VideoFromFile -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, IO import torch import aiohttp @@ -73,69 +73,69 @@ async def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None): return response_upload.Resp.img_id -class PixverseTemplateNode(comfy_io.ComfyNode): +class PixverseTemplateNode(IO.ComfyNode): """ Select template for PixVerse Video generation. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="PixverseTemplateNode", display_name="PixVerse Template", category="api node/video/PixVerse", inputs=[ - comfy_io.Combo.Input("template", options=list(pixverse_templates.keys())), + IO.Combo.Input("template", options=list(pixverse_templates.keys())), ], - outputs=[comfy_io.Custom(PixverseIO.TEMPLATE).Output(display_name="pixverse_template")], + outputs=[IO.Custom(PixverseIO.TEMPLATE).Output(display_name="pixverse_template")], ) @classmethod - def execute(cls, template: str) -> comfy_io.NodeOutput: + def execute(cls, template: str) -> IO.NodeOutput: template_id = pixverse_templates.get(template, None) if template_id is None: raise Exception(f"Template '{template}' is not recognized.") # just return the integer - return comfy_io.NodeOutput(template_id) + return IO.NodeOutput(template_id) -class PixverseTextToVideoNode(comfy_io.ComfyNode): +class PixverseTextToVideoNode(IO.ComfyNode): """ Generates videos based on prompt and output_size. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="PixverseTextToVideoNode", display_name="PixVerse Text to Video", category="api node/video/PixVerse", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the video generation", ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=PixverseAspectRatio, ), - comfy_io.Combo.Input( + IO.Combo.Input( "quality", options=PixverseQuality, default=PixverseQuality.res_540p, ), - comfy_io.Combo.Input( + IO.Combo.Input( "duration_seconds", options=PixverseDuration, ), - comfy_io.Combo.Input( + IO.Combo.Input( "motion_mode", options=PixverseMotionMode, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -143,24 +143,24 @@ class PixverseTextToVideoNode(comfy_io.ComfyNode): control_after_generate=True, tooltip="Seed for video generation.", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", default="", multiline=True, tooltip="An optional text description of undesired elements on an image.", optional=True, ), - comfy_io.Custom(PixverseIO.TEMPLATE).Input( + IO.Custom(PixverseIO.TEMPLATE).Input( "pixverse_template", tooltip="An optional template to influence style of generation, created by the PixVerse Template node.", optional=True, ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -176,7 +176,7 @@ class PixverseTextToVideoNode(comfy_io.ComfyNode): seed, negative_prompt: str = None, pixverse_template: int = None, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) # 1080p is limited to 5 seconds duration # only normal motion_mode supported for 1080p or for non-5 second duration @@ -237,43 +237,43 @@ class PixverseTextToVideoNode(comfy_io.ComfyNode): async with aiohttp.ClientSession() as session: async with session.get(response_poll.Resp.url) as vid_response: - return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) + return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) -class PixverseImageToVideoNode(comfy_io.ComfyNode): +class PixverseImageToVideoNode(IO.ComfyNode): """ Generates videos based on prompt and output_size. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="PixverseImageToVideoNode", display_name="PixVerse Image to Video", category="api node/video/PixVerse", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("image"), - comfy_io.String.Input( + IO.Image.Input("image"), + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the video generation", ), - comfy_io.Combo.Input( + IO.Combo.Input( "quality", options=PixverseQuality, default=PixverseQuality.res_540p, ), - comfy_io.Combo.Input( + IO.Combo.Input( "duration_seconds", options=PixverseDuration, ), - comfy_io.Combo.Input( + IO.Combo.Input( "motion_mode", options=PixverseMotionMode, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -281,24 +281,24 @@ class PixverseImageToVideoNode(comfy_io.ComfyNode): control_after_generate=True, tooltip="Seed for video generation.", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", default="", multiline=True, tooltip="An optional text description of undesired elements on an image.", optional=True, ), - comfy_io.Custom(PixverseIO.TEMPLATE).Input( + IO.Custom(PixverseIO.TEMPLATE).Input( "pixverse_template", tooltip="An optional template to influence style of generation, created by the PixVerse Template node.", optional=True, ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -314,7 +314,7 @@ class PixverseImageToVideoNode(comfy_io.ComfyNode): seed, negative_prompt: str = None, pixverse_template: int = None, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) auth = { "auth_token": cls.hidden.auth_token_comfy_org, @@ -377,44 +377,44 @@ class PixverseImageToVideoNode(comfy_io.ComfyNode): async with aiohttp.ClientSession() as session: async with session.get(response_poll.Resp.url) as vid_response: - return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) + return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) -class PixverseTransitionVideoNode(comfy_io.ComfyNode): +class PixverseTransitionVideoNode(IO.ComfyNode): """ Generates videos based on prompt and output_size. """ @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="PixverseTransitionVideoNode", display_name="PixVerse Transition Video", category="api node/video/PixVerse", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("first_frame"), - comfy_io.Image.Input("last_frame"), - comfy_io.String.Input( + IO.Image.Input("first_frame"), + IO.Image.Input("last_frame"), + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt for the video generation", ), - comfy_io.Combo.Input( + IO.Combo.Input( "quality", options=PixverseQuality, default=PixverseQuality.res_540p, ), - comfy_io.Combo.Input( + IO.Combo.Input( "duration_seconds", options=PixverseDuration, ), - comfy_io.Combo.Input( + IO.Combo.Input( "motion_mode", options=PixverseMotionMode, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, @@ -422,7 +422,7 @@ class PixverseTransitionVideoNode(comfy_io.ComfyNode): control_after_generate=True, tooltip="Seed for video generation.", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", default="", multiline=True, @@ -430,11 +430,11 @@ class PixverseTransitionVideoNode(comfy_io.ComfyNode): optional=True, ), ], - outputs=[comfy_io.Video.Output()], + outputs=[IO.Video.Output()], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -450,7 +450,7 @@ class PixverseTransitionVideoNode(comfy_io.ComfyNode): motion_mode: str, seed, negative_prompt: str = None, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) auth = { "auth_token": cls.hidden.auth_token_comfy_org, @@ -514,12 +514,12 @@ class PixverseTransitionVideoNode(comfy_io.ComfyNode): async with aiohttp.ClientSession() as session: async with session.get(response_poll.Resp.url) as vid_response: - return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) + return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) class PixVerseExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ PixverseTextToVideoNode, PixverseImageToVideoNode, diff --git a/comfy_api_nodes/nodes_rodin.py b/comfy_api_nodes/nodes_rodin.py index 0eb762a1c..cf2172bd6 100644 --- a/comfy_api_nodes/nodes_rodin.py +++ b/comfy_api_nodes/nodes_rodin.py @@ -32,20 +32,20 @@ from comfy_api_nodes.apis.client import ( SynchronousOperation, PollingOperation, ) -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, IO COMMON_PARAMETERS = [ - comfy_io.Int.Input( + IO.Int.Input( "Seed", default=0, min=0, max=65535, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, optional=True, ), - comfy_io.Combo.Input("Material_Type", options=["PBR", "Shaded"], default="PBR", optional=True), - comfy_io.Combo.Input( + IO.Combo.Input("Material_Type", options=["PBR", "Shaded"], default="PBR", optional=True), + IO.Combo.Input( "Polygon_count", options=["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "200K-Triangle"], default="18K-Quad", @@ -259,24 +259,24 @@ async def download_files(url_list, task_uuid): return model_file_path -class Rodin3D_Regular(comfy_io.ComfyNode): +class Rodin3D_Regular(IO.ComfyNode): """Generate 3D Assets using Rodin API""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="Rodin3D_Regular", display_name="Rodin 3D Generate - Regular Generate", category="api node/3d/Rodin", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("Images"), + IO.Image.Input("Images"), *COMMON_PARAMETERS, ], - outputs=[comfy_io.String.Output(display_name="3D Model Path")], + outputs=[IO.String.Output(display_name="3D Model Path")], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, ], is_api_node=True, ) @@ -288,7 +288,7 @@ class Rodin3D_Regular(comfy_io.ComfyNode): Seed, Material_Type, Polygon_count, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: tier = "Regular" num_images = Images.shape[0] m_images = [] @@ -312,27 +312,27 @@ class Rodin3D_Regular(comfy_io.ComfyNode): download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) model = await download_files(download_list, task_uuid) - return comfy_io.NodeOutput(model) + return IO.NodeOutput(model) -class Rodin3D_Detail(comfy_io.ComfyNode): +class Rodin3D_Detail(IO.ComfyNode): """Generate 3D Assets using Rodin API""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="Rodin3D_Detail", display_name="Rodin 3D Generate - Detail Generate", category="api node/3d/Rodin", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("Images"), + IO.Image.Input("Images"), *COMMON_PARAMETERS, ], - outputs=[comfy_io.String.Output(display_name="3D Model Path")], + outputs=[IO.String.Output(display_name="3D Model Path")], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, ], is_api_node=True, ) @@ -344,7 +344,7 @@ class Rodin3D_Detail(comfy_io.ComfyNode): Seed, Material_Type, Polygon_count, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: tier = "Detail" num_images = Images.shape[0] m_images = [] @@ -368,27 +368,27 @@ class Rodin3D_Detail(comfy_io.ComfyNode): download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) model = await download_files(download_list, task_uuid) - return comfy_io.NodeOutput(model) + return IO.NodeOutput(model) -class Rodin3D_Smooth(comfy_io.ComfyNode): +class Rodin3D_Smooth(IO.ComfyNode): """Generate 3D Assets using Rodin API""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="Rodin3D_Smooth", display_name="Rodin 3D Generate - Smooth Generate", category="api node/3d/Rodin", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("Images"), + IO.Image.Input("Images"), *COMMON_PARAMETERS, ], - outputs=[comfy_io.String.Output(display_name="3D Model Path")], + outputs=[IO.String.Output(display_name="3D Model Path")], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, ], is_api_node=True, ) @@ -400,7 +400,7 @@ class Rodin3D_Smooth(comfy_io.ComfyNode): Seed, Material_Type, Polygon_count, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: tier = "Smooth" num_images = Images.shape[0] m_images = [] @@ -424,34 +424,34 @@ class Rodin3D_Smooth(comfy_io.ComfyNode): download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) model = await download_files(download_list, task_uuid) - return comfy_io.NodeOutput(model) + return IO.NodeOutput(model) -class Rodin3D_Sketch(comfy_io.ComfyNode): +class Rodin3D_Sketch(IO.ComfyNode): """Generate 3D Assets using Rodin API""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="Rodin3D_Sketch", display_name="Rodin 3D Generate - Sketch Generate", category="api node/3d/Rodin", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("Images"), - comfy_io.Int.Input( + IO.Image.Input("Images"), + IO.Int.Input( "Seed", default=0, min=0, max=65535, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, optional=True, ), ], - outputs=[comfy_io.String.Output(display_name="3D Model Path")], + outputs=[IO.String.Output(display_name="3D Model Path")], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, ], is_api_node=True, ) @@ -461,7 +461,7 @@ class Rodin3D_Sketch(comfy_io.ComfyNode): cls, Images, Seed, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: tier = "Sketch" num_images = Images.shape[0] m_images = [] @@ -487,42 +487,42 @@ class Rodin3D_Sketch(comfy_io.ComfyNode): download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) model = await download_files(download_list, task_uuid) - return comfy_io.NodeOutput(model) + return IO.NodeOutput(model) -class Rodin3D_Gen2(comfy_io.ComfyNode): +class Rodin3D_Gen2(IO.ComfyNode): """Generate 3D Assets using Rodin API""" @classmethod - def define_schema(cls) -> comfy_io.Schema: - return comfy_io.Schema( + def define_schema(cls) -> IO.Schema: + return IO.Schema( node_id="Rodin3D_Gen2", display_name="Rodin 3D Generate - Gen-2 Generate", category="api node/3d/Rodin", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("Images"), - comfy_io.Int.Input( + IO.Image.Input("Images"), + IO.Int.Input( "Seed", default=0, min=0, max=65535, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, optional=True, ), - comfy_io.Combo.Input("Material_Type", options=["PBR", "Shaded"], default="PBR", optional=True), - comfy_io.Combo.Input( + IO.Combo.Input("Material_Type", options=["PBR", "Shaded"], default="PBR", optional=True), + IO.Combo.Input( "Polygon_count", options=["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "2K-Triangle", "20K-Triangle", "150K-Triangle", "500K-Triangle"], default="500K-Triangle", optional=True, ), - comfy_io.Boolean.Input("TAPose", default=False), + IO.Boolean.Input("TAPose", default=False), ], - outputs=[comfy_io.String.Output(display_name="3D Model Path")], + outputs=[IO.String.Output(display_name="3D Model Path")], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, ], is_api_node=True, ) @@ -535,7 +535,7 @@ class Rodin3D_Gen2(comfy_io.ComfyNode): Material_Type, Polygon_count, TAPose, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: tier = "Gen-2" num_images = Images.shape[0] m_images = [] @@ -560,12 +560,12 @@ class Rodin3D_Gen2(comfy_io.ComfyNode): download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) model = await download_files(download_list, task_uuid) - return comfy_io.NodeOutput(model) + return IO.NodeOutput(model) class Rodin3DExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ Rodin3D_Regular, Rodin3D_Detail, diff --git a/comfy_api_nodes/nodes_runway.py b/comfy_api_nodes/nodes_runway.py index ea22692cb..eb03a897d 100644 --- a/comfy_api_nodes/nodes_runway.py +++ b/comfy_api_nodes/nodes_runway.py @@ -48,7 +48,7 @@ from comfy_api_nodes.apinode_utils import ( download_url_to_image_tensor, ) from comfy_api.input_impl import VideoFromFile -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, IO from comfy_api_nodes.util.validation_utils import validate_image_dimensions, validate_image_aspect_ratio PATH_IMAGE_TO_VIDEO = "/proxy/runway/image_to_video" @@ -175,11 +175,11 @@ async def generate_video( return await download_url_to_video_output(video_url) -class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode): +class RunwayImageToVideoNodeGen3a(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="RunwayImageToVideoNodeGen3a", display_name="Runway Image to Video (Gen3a Turbo)", category="api node/video/Runway", @@ -188,42 +188,42 @@ class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode): "your input selections will set your generation up for success: " "https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.", inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Text prompt for the generation", ), - comfy_io.Image.Input( + IO.Image.Input( "start_frame", tooltip="Start frame to be used for the video", ), - comfy_io.Combo.Input( + IO.Combo.Input( "duration", options=Duration, ), - comfy_io.Combo.Input( + IO.Combo.Input( "ratio", options=RunwayGen3aAspectRatio, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=4294967295, step=1, control_after_generate=True, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Random seed for generation", ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -236,7 +236,7 @@ class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode): duration: str, ratio: str, seed: int, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, min_length=1) validate_image_dimensions(start_frame, max_width=7999, max_height=7999) validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) @@ -253,7 +253,7 @@ class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode): auth_kwargs=auth_kwargs, ) - return comfy_io.NodeOutput( + return IO.NodeOutput( await generate_video( RunwayImageToVideoRequest( promptText=prompt, @@ -275,11 +275,11 @@ class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode): ) -class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode): +class RunwayImageToVideoNodeGen4(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="RunwayImageToVideoNodeGen4", display_name="Runway Image to Video (Gen4 Turbo)", category="api node/video/Runway", @@ -288,42 +288,42 @@ class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode): "your input selections will set your generation up for success: " "https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.", inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Text prompt for the generation", ), - comfy_io.Image.Input( + IO.Image.Input( "start_frame", tooltip="Start frame to be used for the video", ), - comfy_io.Combo.Input( + IO.Combo.Input( "duration", options=Duration, ), - comfy_io.Combo.Input( + IO.Combo.Input( "ratio", options=RunwayGen4TurboAspectRatio, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=4294967295, step=1, control_after_generate=True, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Random seed for generation", ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -336,7 +336,7 @@ class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode): duration: str, ratio: str, seed: int, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, min_length=1) validate_image_dimensions(start_frame, max_width=7999, max_height=7999) validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) @@ -353,7 +353,7 @@ class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode): auth_kwargs=auth_kwargs, ) - return comfy_io.NodeOutput( + return IO.NodeOutput( await generate_video( RunwayImageToVideoRequest( promptText=prompt, @@ -376,11 +376,11 @@ class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode): ) -class RunwayFirstLastFrameNode(comfy_io.ComfyNode): +class RunwayFirstLastFrameNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="RunwayFirstLastFrameNode", display_name="Runway First-Last-Frame to Video", category="api node/video/Runway", @@ -392,46 +392,46 @@ class RunwayFirstLastFrameNode(comfy_io.ComfyNode): "will set your generation up for success: " "https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.", inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Text prompt for the generation", ), - comfy_io.Image.Input( + IO.Image.Input( "start_frame", tooltip="Start frame to be used for the video", ), - comfy_io.Image.Input( + IO.Image.Input( "end_frame", tooltip="End frame to be used for the video. Supported for gen3a_turbo only.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "duration", options=Duration, ), - comfy_io.Combo.Input( + IO.Combo.Input( "ratio", options=RunwayGen3aAspectRatio, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=4294967295, step=1, control_after_generate=True, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Random seed for generation", ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -445,7 +445,7 @@ class RunwayFirstLastFrameNode(comfy_io.ComfyNode): duration: str, ratio: str, seed: int, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, min_length=1) validate_image_dimensions(start_frame, max_width=7999, max_height=7999) validate_image_dimensions(end_frame, max_width=7999, max_height=7999) @@ -467,7 +467,7 @@ class RunwayFirstLastFrameNode(comfy_io.ComfyNode): if len(download_urls) != 2: raise RunwayApiError("Failed to upload one or more images to comfy api.") - return comfy_io.NodeOutput( + return IO.NodeOutput( await generate_video( RunwayImageToVideoRequest( promptText=prompt, @@ -493,40 +493,40 @@ class RunwayFirstLastFrameNode(comfy_io.ComfyNode): ) -class RunwayTextToImageNode(comfy_io.ComfyNode): +class RunwayTextToImageNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="RunwayTextToImageNode", display_name="Runway Text to Image", category="api node/image/Runway", description="Generate an image from a text prompt using Runway's Gen 4 model. " "You can also include reference image to guide the generation.", inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Text prompt for the generation", ), - comfy_io.Combo.Input( + IO.Combo.Input( "ratio", options=[model.value for model in RunwayTextToImageAspectRatioEnum], ), - comfy_io.Image.Input( + IO.Image.Input( "reference_image", tooltip="Optional reference image to guide the generation", optional=True, ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -537,7 +537,7 @@ class RunwayTextToImageNode(comfy_io.ComfyNode): prompt: str, ratio: str, reference_image: Optional[torch.Tensor] = None, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, min_length=1) auth_kwargs = { @@ -588,12 +588,12 @@ class RunwayTextToImageNode(comfy_io.ComfyNode): if not final_response.output: raise RunwayApiError("Runway task succeeded but no image data found in response.") - return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_task_status(final_response))) + return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_task_status(final_response))) class RunwayExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ RunwayFirstLastFrameNode, RunwayImageToVideoNodeGen3a, diff --git a/comfy_api_nodes/nodes_sora.py b/comfy_api_nodes/nodes_sora.py index 2d532d637..efc954869 100644 --- a/comfy_api_nodes/nodes_sora.py +++ b/comfy_api_nodes/nodes_sora.py @@ -3,7 +3,7 @@ from typing_extensions import override import torch from pydantic import BaseModel, Field -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, IO from comfy_api_nodes.apis.client import ( ApiEndpoint, HttpMethod, @@ -31,27 +31,27 @@ class Sora2GenerationResponse(BaseModel): status: Optional[str] = Field(None) -class OpenAIVideoSora2(comfy_io.ComfyNode): +class OpenAIVideoSora2(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="OpenAIVideoSora2", display_name="OpenAI Sora - Video", category="api node/video/Sora", description="OpenAI video and audio generation.", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["sora-2", "sora-2-pro"], default="sora-2", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Guiding text; may be empty if an input image is present.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "size", options=[ "720x1280", @@ -61,22 +61,22 @@ class OpenAIVideoSora2(comfy_io.ComfyNode): ], default="1280x720", ), - comfy_io.Combo.Input( + IO.Combo.Input( "duration", options=[4, 8, 12], default=8, ), - comfy_io.Image.Input( + IO.Image.Input( "image", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, optional=True, tooltip="Seed to determine if node should re-run; " @@ -84,12 +84,12 @@ class OpenAIVideoSora2(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -155,7 +155,7 @@ class OpenAIVideoSora2(comfy_io.ComfyNode): estimated_duration=45 * (duration / 4) * model_time_multiplier, ) await poll_operation.execute() - return comfy_io.NodeOutput( + return IO.NodeOutput( await download_url_to_video_output( f"/proxy/openai/v1/videos/{initial_response.id}/content", auth_kwargs=auth, @@ -165,7 +165,7 @@ class OpenAIVideoSora2(comfy_io.ComfyNode): class OpenAISoraExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ OpenAIVideoSora2, ] diff --git a/comfy_api_nodes/nodes_stability.py b/comfy_api_nodes/nodes_stability.py index bfb67fc9d..8af03cfd1 100644 --- a/comfy_api_nodes/nodes_stability.py +++ b/comfy_api_nodes/nodes_stability.py @@ -2,7 +2,7 @@ from inspect import cleandoc from typing import Optional from typing_extensions import override -from comfy_api.latest import ComfyExtension, Input, io as comfy_io +from comfy_api.latest import ComfyExtension, Input, IO from comfy_api_nodes.apis.stability_api import ( StabilityUpscaleConservativeRequest, StabilityUpscaleCreativeRequest, @@ -56,20 +56,20 @@ def get_async_dummy_status(x: StabilityResultsGetResponse): return StabilityPollStatus.in_progress -class StabilityStableImageUltraNode(comfy_io.ComfyNode): +class StabilityStableImageUltraNode(IO.ComfyNode): """ Generates images synchronously based on prompt and resolution. """ @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="StabilityStableImageUltraNode", display_name="Stability AI Stable Image Ultra", category="api node/image/Stability AI", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", @@ -80,39 +80,39 @@ class StabilityStableImageUltraNode(comfy_io.ComfyNode): "is a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`" + "would convey a sky that was blue and green, but more green than blue.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=StabilityAspectRatio, default=StabilityAspectRatio.ratio_1_1, tooltip="Aspect ratio of generated image.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "style_preset", options=get_stability_style_presets(), tooltip="Optional desired style of generated image.", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=4294967294, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="The random seed used for creating the noise.", ), - comfy_io.Image.Input( + IO.Image.Input( "image", optional=True, ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", default="", tooltip="A blurb of text describing what you do not wish to see in the output image. This is an advanced feature.", force_input=True, optional=True, ), - comfy_io.Float.Input( + IO.Float.Input( "image_denoise", default=0.5, min=0.0, @@ -123,12 +123,12 @@ class StabilityStableImageUltraNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -143,7 +143,7 @@ class StabilityStableImageUltraNode(comfy_io.ComfyNode): image: Optional[torch.Tensor] = None, negative_prompt: str = "", image_denoise: Optional[float] = 0.5, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) # prepare image binary if image present image_binary = None @@ -193,44 +193,44 @@ class StabilityStableImageUltraNode(comfy_io.ComfyNode): image_data = base64.b64decode(response_api.image) returned_image = bytesio_to_image_tensor(BytesIO(image_data)) - return comfy_io.NodeOutput(returned_image) + return IO.NodeOutput(returned_image) -class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode): +class StabilityStableImageSD_3_5Node(IO.ComfyNode): """ Generates images synchronously based on prompt and resolution. """ @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="StabilityStableImageSD_3_5Node", display_name="Stability AI Stable Diffusion 3.5 Image", category="api node/image/Stability AI", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=Stability_SD3_5_Model, ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=StabilityAspectRatio, default=StabilityAspectRatio.ratio_1_1, tooltip="Aspect ratio of generated image.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "style_preset", options=get_stability_style_presets(), tooltip="Optional desired style of generated image.", ), - comfy_io.Float.Input( + IO.Float.Input( "cfg_scale", default=4.0, min=1.0, @@ -238,28 +238,28 @@ class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode): step=0.1, tooltip="How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt)", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=4294967294, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="The random seed used for creating the noise.", ), - comfy_io.Image.Input( + IO.Image.Input( "image", optional=True, ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", default="", tooltip="Keywords of what you do not wish to see in the output image. This is an advanced feature.", force_input=True, optional=True, ), - comfy_io.Float.Input( + IO.Float.Input( "image_denoise", default=0.5, min=0.0, @@ -270,12 +270,12 @@ class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -292,7 +292,7 @@ class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode): image: Optional[torch.Tensor] = None, negative_prompt: str = "", image_denoise: Optional[float] = 0.5, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) # prepare image binary if image present image_binary = None @@ -348,30 +348,30 @@ class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode): image_data = base64.b64decode(response_api.image) returned_image = bytesio_to_image_tensor(BytesIO(image_data)) - return comfy_io.NodeOutput(returned_image) + return IO.NodeOutput(returned_image) -class StabilityUpscaleConservativeNode(comfy_io.ComfyNode): +class StabilityUpscaleConservativeNode(IO.ComfyNode): """ Upscale image with minimal alterations to 4K resolution. """ @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="StabilityUpscaleConservativeNode", display_name="Stability AI Upscale Conservative", category="api node/image/Stability AI", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("image"), - comfy_io.String.Input( + IO.Image.Input("image"), + IO.String.Input( "prompt", multiline=True, default="", tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.", ), - comfy_io.Float.Input( + IO.Float.Input( "creativity", default=0.35, min=0.2, @@ -379,17 +379,17 @@ class StabilityUpscaleConservativeNode(comfy_io.ComfyNode): step=0.01, tooltip="Controls the likelihood of creating additional details not heavily conditioned by the init image.", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=4294967294, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="The random seed used for creating the noise.", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", default="", tooltip="Keywords of what you do not wish to see in the output image. This is an advanced feature.", @@ -398,12 +398,12 @@ class StabilityUpscaleConservativeNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -416,7 +416,7 @@ class StabilityUpscaleConservativeNode(comfy_io.ComfyNode): creativity: float, seed: int, negative_prompt: str = "", - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read() @@ -457,30 +457,30 @@ class StabilityUpscaleConservativeNode(comfy_io.ComfyNode): image_data = base64.b64decode(response_api.image) returned_image = bytesio_to_image_tensor(BytesIO(image_data)) - return comfy_io.NodeOutput(returned_image) + return IO.NodeOutput(returned_image) -class StabilityUpscaleCreativeNode(comfy_io.ComfyNode): +class StabilityUpscaleCreativeNode(IO.ComfyNode): """ Upscale image with minimal alterations to 4K resolution. """ @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="StabilityUpscaleCreativeNode", display_name="Stability AI Upscale Creative", category="api node/image/Stability AI", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("image"), - comfy_io.String.Input( + IO.Image.Input("image"), + IO.String.Input( "prompt", multiline=True, default="", tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.", ), - comfy_io.Float.Input( + IO.Float.Input( "creativity", default=0.3, min=0.1, @@ -488,22 +488,22 @@ class StabilityUpscaleCreativeNode(comfy_io.ComfyNode): step=0.01, tooltip="Controls the likelihood of creating additional details not heavily conditioned by the init image.", ), - comfy_io.Combo.Input( + IO.Combo.Input( "style_preset", options=get_stability_style_presets(), tooltip="Optional desired style of generated image.", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=4294967294, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="The random seed used for creating the noise.", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", default="", tooltip="Keywords of what you do not wish to see in the output image. This is an advanced feature.", @@ -512,12 +512,12 @@ class StabilityUpscaleCreativeNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -531,7 +531,7 @@ class StabilityUpscaleCreativeNode(comfy_io.ComfyNode): style_preset: str, seed: int, negative_prompt: str = "", - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read() @@ -591,37 +591,37 @@ class StabilityUpscaleCreativeNode(comfy_io.ComfyNode): image_data = base64.b64decode(response_poll.result) returned_image = bytesio_to_image_tensor(BytesIO(image_data)) - return comfy_io.NodeOutput(returned_image) + return IO.NodeOutput(returned_image) -class StabilityUpscaleFastNode(comfy_io.ComfyNode): +class StabilityUpscaleFastNode(IO.ComfyNode): """ Quickly upscales an image via Stability API call to 4x its original size; intended for upscaling low-quality/compressed images. """ @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="StabilityUpscaleFastNode", display_name="Stability AI Upscale Fast", category="api node/image/Stability AI", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Image.Input("image"), + IO.Image.Input("image"), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @classmethod - async def execute(cls, image: torch.Tensor) -> comfy_io.NodeOutput: + async def execute(cls, image: torch.Tensor) -> IO.NodeOutput: image_binary = tensor_to_bytesio(image, total_pixels=4096*4096).read() files = { @@ -653,26 +653,26 @@ class StabilityUpscaleFastNode(comfy_io.ComfyNode): image_data = base64.b64decode(response_api.image) returned_image = bytesio_to_image_tensor(BytesIO(image_data)) - return comfy_io.NodeOutput(returned_image) + return IO.NodeOutput(returned_image) -class StabilityTextToAudio(comfy_io.ComfyNode): +class StabilityTextToAudio(IO.ComfyNode): """Generates high-quality music and sound effects from text descriptions.""" @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="StabilityTextToAudio", display_name="Stability AI Text To Audio", category="api node/audio/Stability AI", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["stable-audio-2.5"], ), - comfy_io.String.Input("prompt", multiline=True, default=""), - comfy_io.Int.Input( + IO.String.Input("prompt", multiline=True, default=""), + IO.Int.Input( "duration", default=190, min=1, @@ -681,18 +681,18 @@ class StabilityTextToAudio(comfy_io.ComfyNode): tooltip="Controls the duration in seconds of the generated audio.", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=4294967294, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="The random seed used for generation.", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "steps", default=8, min=4, @@ -703,18 +703,18 @@ class StabilityTextToAudio(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Audio.Output(), + IO.Audio.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @classmethod - async def execute(cls, model: str, prompt: str, duration: int, seed: int, steps: int) -> comfy_io.NodeOutput: + async def execute(cls, model: str, prompt: str, duration: int, seed: int, steps: int) -> IO.NodeOutput: validate_string(prompt, max_length=10000) payload = StabilityTextToAudioRequest(prompt=prompt, model=model, duration=duration, seed=seed, steps=steps) operation = SynchronousOperation( @@ -734,27 +734,27 @@ class StabilityTextToAudio(comfy_io.ComfyNode): response_api = await operation.execute() if not response_api.audio: raise ValueError("No audio file was received in response.") - return comfy_io.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) + return IO.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) -class StabilityAudioToAudio(comfy_io.ComfyNode): +class StabilityAudioToAudio(IO.ComfyNode): """Transforms existing audio samples into new high-quality compositions using text instructions.""" @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="StabilityAudioToAudio", display_name="Stability AI Audio To Audio", category="api node/audio/Stability AI", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["stable-audio-2.5"], ), - comfy_io.String.Input("prompt", multiline=True, default=""), - comfy_io.Audio.Input("audio", tooltip="Audio must be between 6 and 190 seconds long."), - comfy_io.Int.Input( + IO.String.Input("prompt", multiline=True, default=""), + IO.Audio.Input("audio", tooltip="Audio must be between 6 and 190 seconds long."), + IO.Int.Input( "duration", default=190, min=1, @@ -763,18 +763,18 @@ class StabilityAudioToAudio(comfy_io.ComfyNode): tooltip="Controls the duration in seconds of the generated audio.", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=4294967294, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="The random seed used for generation.", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "steps", default=8, min=4, @@ -783,24 +783,24 @@ class StabilityAudioToAudio(comfy_io.ComfyNode): tooltip="Controls the number of sampling steps.", optional=True, ), - comfy_io.Float.Input( + IO.Float.Input( "strength", default=1, min=0.01, max=1.0, step=0.01, - display_mode=comfy_io.NumberDisplay.slider, + display_mode=IO.NumberDisplay.slider, tooltip="Parameter controls how much influence the audio parameter has on the generated audio.", optional=True, ), ], outputs=[ - comfy_io.Audio.Output(), + IO.Audio.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -808,7 +808,7 @@ class StabilityAudioToAudio(comfy_io.ComfyNode): @classmethod async def execute( cls, model: str, prompt: str, audio: Input.Audio, duration: int, seed: int, steps: int, strength: float - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, max_length=10000) validate_audio_duration(audio, 6, 190) payload = StabilityAudioToAudioRequest( @@ -832,27 +832,27 @@ class StabilityAudioToAudio(comfy_io.ComfyNode): response_api = await operation.execute() if not response_api.audio: raise ValueError("No audio file was received in response.") - return comfy_io.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) + return IO.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) -class StabilityAudioInpaint(comfy_io.ComfyNode): +class StabilityAudioInpaint(IO.ComfyNode): """Transforms part of existing audio sample using text instructions.""" @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="StabilityAudioInpaint", display_name="Stability AI Audio Inpaint", category="api node/audio/Stability AI", description=cleandoc(cls.__doc__ or ""), inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["stable-audio-2.5"], ), - comfy_io.String.Input("prompt", multiline=True, default=""), - comfy_io.Audio.Input("audio", tooltip="Audio must be between 6 and 190 seconds long."), - comfy_io.Int.Input( + IO.String.Input("prompt", multiline=True, default=""), + IO.Audio.Input("audio", tooltip="Audio must be between 6 and 190 seconds long."), + IO.Int.Input( "duration", default=190, min=1, @@ -861,18 +861,18 @@ class StabilityAudioInpaint(comfy_io.ComfyNode): tooltip="Controls the duration in seconds of the generated audio.", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=4294967294, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="The random seed used for generation.", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "steps", default=8, min=4, @@ -881,7 +881,7 @@ class StabilityAudioInpaint(comfy_io.ComfyNode): tooltip="Controls the number of sampling steps.", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "mask_start", default=30, min=0, @@ -889,7 +889,7 @@ class StabilityAudioInpaint(comfy_io.ComfyNode): step=1, optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "mask_end", default=190, min=0, @@ -899,12 +899,12 @@ class StabilityAudioInpaint(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Audio.Output(), + IO.Audio.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -920,7 +920,7 @@ class StabilityAudioInpaint(comfy_io.ComfyNode): steps: int, mask_start: int, mask_end: int, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_string(prompt, max_length=10000) if mask_end <= mask_start: raise ValueError(f"Value of mask_end({mask_end}) should be greater then mask_start({mask_start})") @@ -953,12 +953,12 @@ class StabilityAudioInpaint(comfy_io.ComfyNode): response_api = await operation.execute() if not response_api.audio: raise ValueError("No audio file was received in response.") - return comfy_io.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) + return IO.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) class StabilityExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ StabilityStableImageUltraNode, StabilityStableImageSD_3_5Node, diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index 9d5eced1e..4588a7991 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -6,7 +6,7 @@ from io import BytesIO from typing import Optional from typing_extensions import override -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, IO from comfy_api.input_impl.video_types import VideoFromFile from comfy_api_nodes.apis import ( VeoGenVidRequest, @@ -51,7 +51,7 @@ def get_video_url_from_response(poll_response: VeoGenVidPollResponse) -> Optiona return None -class VeoVideoGenerationNode(comfy_io.ComfyNode): +class VeoVideoGenerationNode(IO.ComfyNode): """ Generates videos from text prompts using Google's Veo API. @@ -61,71 +61,71 @@ class VeoVideoGenerationNode(comfy_io.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="VeoVideoGenerationNode", display_name="Google Veo 2 Video Generation", category="api node/video/Veo", description="Generates videos from text prompts using Google's Veo 2 API", inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Text description of the video", ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=["16:9", "9:16"], default="16:9", tooltip="Aspect ratio of the output video", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", multiline=True, default="", tooltip="Negative text prompt to guide what to avoid in the video", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "duration_seconds", default=5, min=5, max=8, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Duration of the output video in seconds", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "enhance_prompt", default=True, tooltip="Whether to enhance the prompt with AI assistance", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "person_generation", options=["ALLOW", "BLOCK"], default="ALLOW", tooltip="Whether to allow generating people in the video", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=0xFFFFFFFF, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed for video generation (0 for random)", optional=True, ), - comfy_io.Image.Input( + IO.Image.Input( "image", tooltip="Optional reference image to guide video generation", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["veo-2.0-generate-001"], default="veo-2.0-generate-001", @@ -134,12 +134,12 @@ class VeoVideoGenerationNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -302,7 +302,7 @@ class VeoVideoGenerationNode(comfy_io.ComfyNode): video_io = BytesIO(video_data) # Return VideoFromFile object - return comfy_io.NodeOutput(VideoFromFile(video_io)) + return IO.NodeOutput(VideoFromFile(video_io)) class Veo3VideoGenerationNode(VeoVideoGenerationNode): @@ -319,78 +319,78 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="Veo3VideoGenerationNode", display_name="Google Veo 3 Video Generation", category="api node/video/Veo", description="Generates videos from text prompts using Google's Veo 3 API", inputs=[ - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Text description of the video", ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=["16:9", "9:16"], default="16:9", tooltip="Aspect ratio of the output video", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", multiline=True, default="", tooltip="Negative text prompt to guide what to avoid in the video", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "duration_seconds", default=8, min=8, max=8, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Duration of the output video in seconds (Veo 3 only supports 8 seconds)", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "enhance_prompt", default=True, tooltip="Whether to enhance the prompt with AI assistance", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "person_generation", options=["ALLOW", "BLOCK"], default="ALLOW", tooltip="Whether to allow generating people in the video", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=0xFFFFFFFF, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed for video generation (0 for random)", optional=True, ), - comfy_io.Image.Input( + IO.Image.Input( "image", tooltip="Optional reference image to guide video generation", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["veo-3.0-generate-001", "veo-3.0-fast-generate-001"], default="veo-3.0-generate-001", tooltip="Veo 3 model to use for video generation", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "generate_audio", default=False, tooltip="Generate audio for the video. Supported by all Veo 3 models.", @@ -398,12 +398,12 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -411,7 +411,7 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode): class VeoExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ VeoVideoGenerationNode, Veo3VideoGenerationNode, diff --git a/comfy_api_nodes/nodes_vidu.py b/comfy_api_nodes/nodes_vidu.py index ac28b683c..639be4b2b 100644 --- a/comfy_api_nodes/nodes_vidu.py +++ b/comfy_api_nodes/nodes_vidu.py @@ -6,7 +6,7 @@ from typing_extensions import override import torch from pydantic import BaseModel, Field -from comfy_api.latest import ComfyExtension, io as comfy_io +from comfy_api.latest import ComfyExtension, IO from comfy_api_nodes.util.validation_utils import ( validate_aspect_ratio_closeness, validate_image_dimensions, @@ -161,63 +161,63 @@ async def execute_task( ) -class ViduTextToVideoNode(comfy_io.ComfyNode): +class ViduTextToVideoNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="ViduTextToVideoNode", display_name="Vidu Text To Video Generation", category="api node/video/Vidu", description="Generate video from text prompt", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=VideoModelName, default=VideoModelName.vidu_q1, tooltip="Model name", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, tooltip="A textual description for video generation", ), - comfy_io.Int.Input( + IO.Int.Input( "duration", default=5, min=5, max=5, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Duration of the output video in seconds", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed for video generation (0 for random)", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=AspectRatio, default=AspectRatio.r_16_9, tooltip="The aspect ratio of the output video", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=Resolution, default=Resolution.r_1080p, tooltip="Supported values may vary by model & duration", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "movement_amplitude", options=MovementAmplitude, default=MovementAmplitude.auto, @@ -226,12 +226,12 @@ class ViduTextToVideoNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -246,7 +246,7 @@ class ViduTextToVideoNode(comfy_io.ComfyNode): aspect_ratio: str, resolution: str, movement_amplitude: str, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: if not prompt: raise ValueError("The prompt field is required and cannot be empty.") payload = TaskCreationRequest( @@ -263,65 +263,65 @@ class ViduTextToVideoNode(comfy_io.ComfyNode): "comfy_api_key": cls.hidden.api_key_comfy_org, } results = await execute_task(VIDU_TEXT_TO_VIDEO, auth, payload, 320, cls.hidden.unique_id) - return comfy_io.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) + return IO.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) -class ViduImageToVideoNode(comfy_io.ComfyNode): +class ViduImageToVideoNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="ViduImageToVideoNode", display_name="Vidu Image To Video Generation", category="api node/video/Vidu", description="Generate video from image and optional prompt", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=VideoModelName, default=VideoModelName.vidu_q1, tooltip="Model name", ), - comfy_io.Image.Input( + IO.Image.Input( "image", tooltip="An image to be used as the start frame of the generated video", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="A textual description for video generation", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "duration", default=5, min=5, max=5, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Duration of the output video in seconds", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed for video generation (0 for random)", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=Resolution, default=Resolution.r_1080p, tooltip="Supported values may vary by model & duration", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "movement_amplitude", options=MovementAmplitude, default=MovementAmplitude.auto.value, @@ -330,12 +330,12 @@ class ViduImageToVideoNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -350,7 +350,7 @@ class ViduImageToVideoNode(comfy_io.ComfyNode): seed: int, resolution: str, movement_amplitude: str, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: if get_number_of_images(image) > 1: raise ValueError("Only one input image is allowed.") validate_image_aspect_ratio_range(image, (1, 4), (4, 1)) @@ -373,70 +373,70 @@ class ViduImageToVideoNode(comfy_io.ComfyNode): auth_kwargs=auth, ) results = await execute_task(VIDU_IMAGE_TO_VIDEO, auth, payload, 120, cls.hidden.unique_id) - return comfy_io.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) + return IO.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) -class ViduReferenceVideoNode(comfy_io.ComfyNode): +class ViduReferenceVideoNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="ViduReferenceVideoNode", display_name="Vidu Reference To Video Generation", category="api node/video/Vidu", description="Generate video from multiple images and prompt", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=VideoModelName, default=VideoModelName.vidu_q1, tooltip="Model name", ), - comfy_io.Image.Input( + IO.Image.Input( "images", tooltip="Images to use as references to generate a video with consistent subjects (max 7 images).", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, tooltip="A textual description for video generation", ), - comfy_io.Int.Input( + IO.Int.Input( "duration", default=5, min=5, max=5, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Duration of the output video in seconds", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed for video generation (0 for random)", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "aspect_ratio", options=AspectRatio, default=AspectRatio.r_16_9, tooltip="The aspect ratio of the output video", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=[model.value for model in Resolution], default=Resolution.r_1080p.value, tooltip="Supported values may vary by model & duration", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "movement_amplitude", options=[model.value for model in MovementAmplitude], default=MovementAmplitude.auto.value, @@ -445,12 +445,12 @@ class ViduReferenceVideoNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -466,7 +466,7 @@ class ViduReferenceVideoNode(comfy_io.ComfyNode): aspect_ratio: str, resolution: str, movement_amplitude: str, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: if not prompt: raise ValueError("The prompt field is required and cannot be empty.") a = get_number_of_images(images) @@ -495,68 +495,68 @@ class ViduReferenceVideoNode(comfy_io.ComfyNode): auth_kwargs=auth, ) results = await execute_task(VIDU_REFERENCE_VIDEO, auth, payload, 120, cls.hidden.unique_id) - return comfy_io.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) + return IO.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) -class ViduStartEndToVideoNode(comfy_io.ComfyNode): +class ViduStartEndToVideoNode(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="ViduStartEndToVideoNode", display_name="Vidu Start End To Video Generation", category="api node/video/Vidu", description="Generate a video from start and end frames and a prompt", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=[model.value for model in VideoModelName], default=VideoModelName.vidu_q1.value, tooltip="Model name", ), - comfy_io.Image.Input( + IO.Image.Input( "first_frame", tooltip="Start frame", ), - comfy_io.Image.Input( + IO.Image.Input( "end_frame", tooltip="End frame", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, tooltip="A textual description for video generation", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "duration", default=5, min=5, max=5, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Duration of the output video in seconds", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed for video generation (0 for random)", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=[model.value for model in Resolution], default=Resolution.r_1080p.value, tooltip="Supported values may vary by model & duration", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "movement_amplitude", options=[model.value for model in MovementAmplitude], default=MovementAmplitude.auto.value, @@ -565,12 +565,12 @@ class ViduStartEndToVideoNode(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -586,7 +586,7 @@ class ViduStartEndToVideoNode(comfy_io.ComfyNode): seed: int, resolution: str, movement_amplitude: str, - ) -> comfy_io.NodeOutput: + ) -> IO.NodeOutput: validate_aspect_ratio_closeness(first_frame, end_frame, min_rel=0.8, max_rel=1.25, strict=False) payload = TaskCreationRequest( model_name=model, @@ -605,12 +605,12 @@ class ViduStartEndToVideoNode(comfy_io.ComfyNode): for frame in (first_frame, end_frame) ] results = await execute_task(VIDU_START_END_VIDEO, auth, payload, 96, cls.hidden.unique_id) - return comfy_io.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) + return IO.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) class ViduExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ ViduTextToVideoNode, ViduImageToVideoNode, diff --git a/comfy_api_nodes/nodes_wan.py b/comfy_api_nodes/nodes_wan.py index 0be5daadb..b089bd907 100644 --- a/comfy_api_nodes/nodes_wan.py +++ b/comfy_api_nodes/nodes_wan.py @@ -4,7 +4,7 @@ from typing_extensions import override import torch from pydantic import BaseModel, Field -from comfy_api.latest import ComfyExtension, Input, io as comfy_io +from comfy_api.latest import ComfyExtension, Input, IO from comfy_api_nodes.apis.client import ( ApiEndpoint, HttpMethod, @@ -195,35 +195,35 @@ async def process_task( ).execute() -class WanTextToImageApi(comfy_io.ComfyNode): +class WanTextToImageApi(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="WanTextToImageApi", display_name="Wan Text to Image", category="api node/image/Wan", description="Generates image based on text prompt.", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["wan2.5-t2i-preview"], default="wan2.5-t2i-preview", tooltip="Model to use.", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", multiline=True, default="", tooltip="Negative text prompt to guide what to avoid.", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "width", default=1024, min=768, @@ -231,7 +231,7 @@ class WanTextToImageApi(comfy_io.ComfyNode): step=32, optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "height", default=1024, min=768, @@ -239,24 +239,24 @@ class WanTextToImageApi(comfy_io.ComfyNode): step=32, optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to use for generation.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "prompt_extend", default=True, tooltip="Whether to enhance the prompt with AI assistance.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "watermark", default=True, tooltip="Whether to add an \"AI generated\" watermark to the result.", @@ -264,12 +264,12 @@ class WanTextToImageApi(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -309,36 +309,36 @@ class WanTextToImageApi(comfy_io.ComfyNode): estimated_duration=9, poll_interval=3, ) - return comfy_io.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url))) + return IO.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url))) -class WanImageToImageApi(comfy_io.ComfyNode): +class WanImageToImageApi(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="WanImageToImageApi", display_name="Wan Image to Image", category="api node/image/Wan", description="Generates an image from one or two input images and a text prompt. " "The output image is currently fixed at 1.6 MP; its aspect ratio matches the input image(s).", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["wan2.5-i2i-preview"], default="wan2.5-i2i-preview", tooltip="Model to use.", ), - comfy_io.Image.Input( + IO.Image.Input( "image", tooltip="Single-image editing or multi-image fusion, maximum 2 images.", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", multiline=True, default="", @@ -346,7 +346,7 @@ class WanImageToImageApi(comfy_io.ComfyNode): optional=True, ), # redo this later as an optional combo of recommended resolutions - # comfy_io.Int.Input( + # IO.Int.Input( # "width", # default=1280, # min=384, @@ -354,7 +354,7 @@ class WanImageToImageApi(comfy_io.ComfyNode): # step=16, # optional=True, # ), - # comfy_io.Int.Input( + # IO.Int.Input( # "height", # default=1280, # min=384, @@ -362,18 +362,18 @@ class WanImageToImageApi(comfy_io.ComfyNode): # step=16, # optional=True, # ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to use for generation.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "watermark", default=True, tooltip="Whether to add an \"AI generated\" watermark to the result.", @@ -381,12 +381,12 @@ class WanImageToImageApi(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Image.Output(), + IO.Image.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -431,38 +431,38 @@ class WanImageToImageApi(comfy_io.ComfyNode): estimated_duration=42, poll_interval=3, ) - return comfy_io.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url))) + return IO.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url))) -class WanTextToVideoApi(comfy_io.ComfyNode): +class WanTextToVideoApi(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="WanTextToVideoApi", display_name="Wan Text to Video", category="api node/video/Wan", description="Generates video based on text prompt.", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["wan2.5-t2v-preview"], default="wan2.5-t2v-preview", tooltip="Model to use.", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", multiline=True, default="", tooltip="Negative text prompt to guide what to avoid.", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "size", options=[ "480p: 1:1 (624x624)", @@ -482,45 +482,45 @@ class WanTextToVideoApi(comfy_io.ComfyNode): default="480p: 1:1 (624x624)", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "duration", default=5, min=5, max=10, step=5, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Available durations: 5 and 10 seconds", optional=True, ), - comfy_io.Audio.Input( + IO.Audio.Input( "audio", optional=True, tooltip="Audio must contain a clear, loud voice, without extraneous noise, background music.", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to use for generation.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "generate_audio", default=False, optional=True, tooltip="If there is no audio input, generate audio automatically.", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "prompt_extend", default=True, tooltip="Whether to enhance the prompt with AI assistance.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "watermark", default=True, tooltip="Whether to add an \"AI generated\" watermark to the result.", @@ -528,12 +528,12 @@ class WanTextToVideoApi(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -582,41 +582,41 @@ class WanTextToVideoApi(comfy_io.ComfyNode): estimated_duration=120 * int(duration / 5), poll_interval=6, ) - return comfy_io.NodeOutput(await download_url_to_video_output(response.output.video_url)) + return IO.NodeOutput(await download_url_to_video_output(response.output.video_url)) -class WanImageToVideoApi(comfy_io.ComfyNode): +class WanImageToVideoApi(IO.ComfyNode): @classmethod def define_schema(cls): - return comfy_io.Schema( + return IO.Schema( node_id="WanImageToVideoApi", display_name="Wan Image to Video", category="api node/video/Wan", description="Generates video based on the first frame and text prompt.", inputs=[ - comfy_io.Combo.Input( + IO.Combo.Input( "model", options=["wan2.5-i2v-preview"], default="wan2.5-i2v-preview", tooltip="Model to use.", ), - comfy_io.Image.Input( + IO.Image.Input( "image", ), - comfy_io.String.Input( + IO.String.Input( "prompt", multiline=True, default="", tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", ), - comfy_io.String.Input( + IO.String.Input( "negative_prompt", multiline=True, default="", tooltip="Negative text prompt to guide what to avoid.", optional=True, ), - comfy_io.Combo.Input( + IO.Combo.Input( "resolution", options=[ "480P", @@ -626,45 +626,45 @@ class WanImageToVideoApi(comfy_io.ComfyNode): default="480P", optional=True, ), - comfy_io.Int.Input( + IO.Int.Input( "duration", default=5, min=5, max=10, step=5, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, tooltip="Available durations: 5 and 10 seconds", optional=True, ), - comfy_io.Audio.Input( + IO.Audio.Input( "audio", optional=True, tooltip="Audio must contain a clear, loud voice, without extraneous noise, background music.", ), - comfy_io.Int.Input( + IO.Int.Input( "seed", default=0, min=0, max=2147483647, step=1, - display_mode=comfy_io.NumberDisplay.number, + display_mode=IO.NumberDisplay.number, control_after_generate=True, tooltip="Seed to use for generation.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "generate_audio", default=False, optional=True, tooltip="If there is no audio input, generate audio automatically.", ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "prompt_extend", default=True, tooltip="Whether to enhance the prompt with AI assistance.", optional=True, ), - comfy_io.Boolean.Input( + IO.Boolean.Input( "watermark", default=True, tooltip="Whether to add an \"AI generated\" watermark to the result.", @@ -672,12 +672,12 @@ class WanImageToVideoApi(comfy_io.ComfyNode): ), ], outputs=[ - comfy_io.Video.Output(), + IO.Video.Output(), ], hidden=[ - comfy_io.Hidden.auth_token_comfy_org, - comfy_io.Hidden.api_key_comfy_org, - comfy_io.Hidden.unique_id, + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -731,12 +731,12 @@ class WanImageToVideoApi(comfy_io.ComfyNode): estimated_duration=120 * int(duration / 5), poll_interval=6, ) - return comfy_io.NodeOutput(await download_url_to_video_output(response.output.video_url)) + return IO.NodeOutput(await download_url_to_video_output(response.output.video_url)) class WanApiExtension(ComfyExtension): @override - async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: + async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ WanTextToImageApi, WanImageToImageApi, From ddfce1af4fc76768dbdd0cc4fa22d47b20a8b876 Mon Sep 17 00:00:00 2001 From: Arjan Singh <1598641+arjansingh@users.noreply.github.com> Date: Tue, 14 Oct 2025 18:08:23 -0700 Subject: [PATCH 0756/1073] Bump frontend to 1.28.6 (#10345) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index bbb22364f..a45057970 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.27.10 +comfyui-frontend-package==1.28.6 comfyui-workflow-templates==0.1.95 comfyui-embedded-docs==0.3.0 torch From 1c10b33f9bbc75114053bc041851b60767791783 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 14 Oct 2025 21:21:11 -0700 Subject: [PATCH 0757/1073] gfx942 doesn't support fp8 operations. (#10348) --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 709ebc40b..d82d5b8b0 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -349,7 +349,7 @@ try: if any((a in arch) for a in ["gfx1201"]): ENABLE_PYTORCH_ATTENTION = True if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4): - if any((a in arch) for a in ["gfx1200", "gfx1201", "gfx942", "gfx950"]): # TODO: more arches + if any((a in arch) for a in ["gfx1200", "gfx1201", "gfx950"]): # TODO: more arches, "gfx942" gives error on pytorch nightly 2.10 1013 rocm7.0 SUPPORT_FP8_OPS = True except: From f72c6616b2e91e4021591895192cef8b9d4d1c75 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Thu, 16 Oct 2025 06:12:25 +0800 Subject: [PATCH 0758/1073] Add TemporalScoreRescaling node (#10351) * Add TemporalScoreRescaling node * Mention image generation in tsr_k's tooltip --- comfy_extras/nodes_eps.py | 95 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/comfy_extras/nodes_eps.py b/comfy_extras/nodes_eps.py index 7852d85e5..4d8061741 100644 --- a/comfy_extras/nodes_eps.py +++ b/comfy_extras/nodes_eps.py @@ -1,5 +1,7 @@ +import torch from typing_extensions import override +from comfy.k_diffusion.sampling import sigma_to_half_log_snr from comfy_api.latest import ComfyExtension, io @@ -63,12 +65,105 @@ class EpsilonScaling(io.ComfyNode): return io.NodeOutput(model_clone) +def compute_tsr_rescaling_factor( + snr: torch.Tensor, tsr_k: float, tsr_variance: float +) -> torch.Tensor: + """Compute the rescaling score ratio in Temporal Score Rescaling. + + See equation (6) in https://arxiv.org/pdf/2510.01184v1. + """ + posinf_mask = torch.isposinf(snr) + rescaling_factor = (snr * tsr_variance + 1) / (snr * tsr_variance / tsr_k + 1) + return torch.where(posinf_mask, tsr_k, rescaling_factor) # when snr → inf, r = tsr_k + + +class TemporalScoreRescaling(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="TemporalScoreRescaling", + display_name="TSR - Temporal Score Rescaling", + category="model_patches/unet", + inputs=[ + io.Model.Input("model"), + io.Float.Input( + "tsr_k", + tooltip=( + "Controls the rescaling strength.\n" + "Lower k produces more detailed results; higher k produces smoother results in image generation. Setting k = 1 disables rescaling." + ), + default=0.95, + min=0.01, + max=100.0, + step=0.001, + display_mode=io.NumberDisplay.number, + ), + io.Float.Input( + "tsr_sigma", + tooltip=( + "Controls how early rescaling takes effect.\n" + "Larger values take effect earlier." + ), + default=1.0, + min=0.01, + max=100.0, + step=0.001, + display_mode=io.NumberDisplay.number, + ), + ], + outputs=[ + io.Model.Output( + display_name="patched_model", + ), + ], + description=( + "[Post-CFG Function]\n" + "TSR - Temporal Score Rescaling (2510.01184)\n\n" + "Rescaling the model's score or noise to steer the sampling diversity.\n" + ), + ) + + @classmethod + def execute(cls, model, tsr_k, tsr_sigma) -> io.NodeOutput: + tsr_variance = tsr_sigma**2 + + def temporal_score_rescaling(args): + denoised = args["denoised"] + x = args["input"] + sigma = args["sigma"] + curr_model = args["model"] + + # No rescaling (r = 1) or no noise + if tsr_k == 1 or sigma == 0: + return denoised + + model_sampling = curr_model.current_patcher.get_model_object("model_sampling") + half_log_snr = sigma_to_half_log_snr(sigma, model_sampling) + snr = (2 * half_log_snr).exp() + + # No rescaling needed (r = 1) + if snr == 0: + return denoised + + rescaling_r = compute_tsr_rescaling_factor(snr, tsr_k, tsr_variance) + + # Derived from scaled_denoised = (x - r * sigma * noise) / alpha + alpha = sigma * half_log_snr.exp() + return torch.lerp(x / alpha, denoised, rescaling_r) + + m = model.clone() + m.set_model_sampler_post_cfg_function(temporal_score_rescaling) + return io.NodeOutput(m) + + class EpsilonScalingExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ EpsilonScaling, + TemporalScoreRescaling, ] + async def comfy_entrypoint() -> EpsilonScalingExtension: return EpsilonScalingExtension() From 74b7f0b04ba19926286518b0a0179290b79bfae0 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 16 Oct 2025 01:41:45 +0300 Subject: [PATCH 0759/1073] feat(api-nodes): add Veo3.1 model (#10357) --- comfy_api_nodes/nodes_veo2.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index 4588a7991..4ab5c5186 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -27,6 +27,13 @@ from comfy_api_nodes.apinode_utils import ( ) AVERAGE_DURATION_VIDEO_GEN = 32 +MODELS_MAP = { + "veo-2.0-generate-001": "veo-2.0-generate-001", + "veo-3.1-generate": "veo-3.1-generate-preview", + "veo-3.1-fast-generate": "veo-3.1-fast-generate-preview", + "veo-3.0-generate-001": "veo-3.0-generate-001", + "veo-3.0-fast-generate-001": "veo-3.0-fast-generate-001", +} def convert_image_to_base64(image: torch.Tensor): if image is None: @@ -158,6 +165,7 @@ class VeoVideoGenerationNode(IO.ComfyNode): model="veo-2.0-generate-001", generate_audio=False, ): + model = MODELS_MAP[model] # Prepare the instances for the request instances = [] @@ -385,7 +393,7 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode): ), IO.Combo.Input( "model", - options=["veo-3.0-generate-001", "veo-3.0-fast-generate-001"], + options=list(MODELS_MAP.keys()), default="veo-3.0-generate-001", tooltip="Veo 3 model to use for video generation", optional=True, From 6b035bfce25b5336ed2a39c72972a8a36a80f9bd Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 15 Oct 2025 15:48:12 -0700 Subject: [PATCH 0760/1073] Latest pytorch stable is cu130 (#10361) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index db1fdaf3c..b0731db33 100644 --- a/README.md +++ b/README.md @@ -255,7 +255,7 @@ This is the command to install the Pytorch xpu nightly which might have some per Nvidia users should install stable pytorch using this command: -```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu129``` +```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu130``` This is the command to install pytorch nightly instead which might have performance improvements. From 493b81e48f4067da95e4cee36d42a3516338da79 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Wed, 15 Oct 2025 16:47:26 -0700 Subject: [PATCH 0761/1073] Fix order of inputs nested merge_nested_dicts (#10362) --- comfy/patcher_extension.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/patcher_extension.py b/comfy/patcher_extension.py index 46cc7b2a8..5ee4d5ee5 100644 --- a/comfy/patcher_extension.py +++ b/comfy/patcher_extension.py @@ -150,7 +150,7 @@ def merge_nested_dicts(dict1: dict, dict2: dict, copy_dict1=True): for key, value in dict2.items(): if isinstance(value, dict): curr_value = merged_dict.setdefault(key, {}) - merged_dict[key] = merge_nested_dicts(value, curr_value) + merged_dict[key] = merge_nested_dicts(curr_value, value) elif isinstance(value, list): merged_dict.setdefault(key, []).extend(value) else: From afa8a24fe1f81d447b961fdf41f47f9094d28919 Mon Sep 17 00:00:00 2001 From: Faych <90372299+neverbiasu@users.noreply.github.com> Date: Thu, 16 Oct 2025 01:16:09 +0100 Subject: [PATCH 0762/1073] refactor: Replace manual patches merging with merge_nested_dicts (#10360) --- comfy/samplers.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index c59e296a1..e7efaf470 100755 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -306,17 +306,10 @@ def _calc_cond_batch(model: BaseModel, conds: list[list[dict]], x_in: torch.Tens copy_dict1=False) if patches is not None: - # TODO: replace with merge_nested_dicts function - if "patches" in transformer_options: - cur_patches = transformer_options["patches"].copy() - for p in patches: - if p in cur_patches: - cur_patches[p] = cur_patches[p] + patches[p] - else: - cur_patches[p] = patches[p] - transformer_options["patches"] = cur_patches - else: - transformer_options["patches"] = patches + transformer_options["patches"] = comfy.patcher_extension.merge_nested_dicts( + transformer_options.get("patches", {}), + patches + ) transformer_options["cond_or_uncond"] = cond_or_uncond[:] transformer_options["uuids"] = uuids[:] From 55ac7d333c55d808be33c590a4a2e6c965d5f9a8 Mon Sep 17 00:00:00 2001 From: Arjan Singh <1598641+arjansingh@users.noreply.github.com> Date: Wed, 15 Oct 2025 20:30:39 -0700 Subject: [PATCH 0763/1073] Bump frontend to 1.28.7 (#10364) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a45057970..82457df54 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.28.6 +comfyui-frontend-package==1.28.7 comfyui-workflow-templates==0.1.95 comfyui-embedded-docs==0.3.0 torch From 4054b4bf38d11fc0c784c2d19f5fc0ed3bbc7ae4 Mon Sep 17 00:00:00 2001 From: Rizumu Ayaka Date: Thu, 16 Oct 2025 16:13:31 +0800 Subject: [PATCH 0764/1073] feat: deprecated API alert (#10366) --- server.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/server.py b/server.py index 80e9d3fa7..a44f4f237 100644 --- a/server.py +++ b/server.py @@ -48,6 +48,28 @@ async def send_socket_catch_exception(function, message): except (aiohttp.ClientError, aiohttp.ClientPayloadError, ConnectionResetError, BrokenPipeError, ConnectionError) as err: logging.warning("send error: {}".format(err)) +# Track deprecated paths that have been warned about to only warn once per file +_deprecated_paths_warned = set() + +@web.middleware +async def deprecation_warning(request: web.Request, handler): + """Middleware to warn about deprecated frontend API paths""" + path = request.path + + if (path.startswith('/scripts/') or path.startswith('/extensions/core/')): + # Only warn once per unique file path + if path not in _deprecated_paths_warned: + _deprecated_paths_warned.add(path) + logging.warning( + f"[DEPRECATION WARNING] Detected import of deprecated legacy API: {path}. " + f"This is likely caused by a custom node extension using outdated APIs. " + f"Please update your extensions or contact the extension author for an updated version." + ) + + response: web.Response = await handler(request) + return response + + @web.middleware async def compress_body(request: web.Request, handler): accept_encoding = request.headers.get("Accept-Encoding", "") @@ -159,7 +181,7 @@ class PromptServer(): self.client_session:Optional[aiohttp.ClientSession] = None self.number = 0 - middlewares = [cache_control] + middlewares = [cache_control, deprecation_warning] if args.enable_compress_response_body: middlewares.append(compress_body) From bc0ad9bb49b642e081f99f92d239d634988d52bc Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 16 Oct 2025 20:12:50 +0300 Subject: [PATCH 0765/1073] fix(api-nodes): remove "veo2" model from Veo3 node (#10372) --- comfy_api_nodes/nodes_veo2.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index 4ab5c5186..daeaa823e 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -393,7 +393,9 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode): ), IO.Combo.Input( "model", - options=list(MODELS_MAP.keys()), + options=[ + "veo-3.1-generate", "veo-3.1-fast-generate", "veo-3.0-generate-001", "veo-3.0-fast-generate-001" + ], default="veo-3.0-generate-001", tooltip="Veo 3 model to use for video generation", optional=True, From 19b466160c1cd43f707769adef6f8ed6e9fd50bf Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 16 Oct 2025 15:16:03 -0700 Subject: [PATCH 0766/1073] Workaround for nvidia issue where VAE uses 3x more memory on torch 2.9 (#10373) --- comfy/ops.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/comfy/ops.py b/comfy/ops.py index b2096b40e..893ceda98 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -52,6 +52,16 @@ try: except (ModuleNotFoundError, TypeError): logging.warning("Could not set sdpa backend priority.") +NVIDIA_MEMORY_CONV_BUG_WORKAROUND = False +try: + if comfy.model_management.is_nvidia(): + if torch.backends.cudnn.version() >= 91300 and comfy.model_management.torch_version_numeric >= (2, 9) and comfy.model_management.torch_version_numeric <= (2, 10): + #TODO: change upper bound version once it's fixed' + NVIDIA_MEMORY_CONV_BUG_WORKAROUND = True + logging.info("working around nvidia conv3d memory bug.") +except: + pass + cast_to = comfy.model_management.cast_to #TODO: remove once no more references if torch.cuda.is_available() and torch.backends.cudnn.is_available() and PerformanceFeature.AutoTune in args.fast: @@ -151,6 +161,15 @@ class disable_weight_init: def reset_parameters(self): return None + def _conv_forward(self, input, weight, bias, *args, **kwargs): + if NVIDIA_MEMORY_CONV_BUG_WORKAROUND and weight.dtype in (torch.float16, torch.bfloat16): + out = torch.cudnn_convolution(input, weight, self.padding, self.stride, self.dilation, self.groups, benchmark=False, deterministic=False, allow_tf32=True) + if bias is not None: + out += bias.reshape((1, -1) + (1,) * (out.ndim - 2)) + return out + else: + return super()._conv_forward(input, weight, bias, *args, **kwargs) + def forward_comfy_cast_weights(self, input): weight, bias = cast_bias_weight(self, input) return self._conv_forward(input, weight, bias) From b1293d50eff5f1ff2e54f73114fbe7c0f9aef8fe Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 16 Oct 2025 16:59:56 -0700 Subject: [PATCH 0767/1073] workaround also works on cudnn 91200 (#10375) --- comfy/ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ops.py b/comfy/ops.py index 893ceda98..56b07b44c 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -55,7 +55,7 @@ except (ModuleNotFoundError, TypeError): NVIDIA_MEMORY_CONV_BUG_WORKAROUND = False try: if comfy.model_management.is_nvidia(): - if torch.backends.cudnn.version() >= 91300 and comfy.model_management.torch_version_numeric >= (2, 9) and comfy.model_management.torch_version_numeric <= (2, 10): + if torch.backends.cudnn.version() >= 91200 and comfy.model_management.torch_version_numeric >= (2, 9) and comfy.model_management.torch_version_numeric <= (2, 10): #TODO: change upper bound version once it's fixed' NVIDIA_MEMORY_CONV_BUG_WORKAROUND = True logging.info("working around nvidia conv3d memory bug.") From d8d60b56093a15edc5d25486d387d3c5917dc3d3 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Thu, 16 Oct 2025 21:39:37 -0700 Subject: [PATCH 0768/1073] Do batch_slice in EasyCache's apply_cache_diff (#10376) --- comfy_extras/nodes_easycache.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_easycache.py b/comfy_extras/nodes_easycache.py index c170e9fd9..1359e2f99 100644 --- a/comfy_extras/nodes_easycache.py +++ b/comfy_extras/nodes_easycache.py @@ -244,6 +244,8 @@ class EasyCacheHolder: self.total_steps_skipped += 1 batch_offset = x.shape[0] // len(uuids) for i, uuid in enumerate(uuids): + # slice out only what is relevant to this cond + batch_slice = [slice(i*batch_offset,(i+1)*batch_offset)] # if cached dims don't match x dims, cut off excess and hope for the best (cosmos world2video) if x.shape[1:] != self.uuid_cache_diffs[uuid].shape[1:]: if not self.allow_mismatch: @@ -261,9 +263,8 @@ class EasyCacheHolder: slicing.append(slice(None, dim_u)) else: slicing.append(slice(None)) - slicing = [slice(i*batch_offset,(i+1)*batch_offset)] + slicing - x = x[slicing] - x += self.uuid_cache_diffs[uuid].to(x.device) + batch_slice = batch_slice + slicing + x[batch_slice] += self.uuid_cache_diffs[uuid].to(x.device) return x def update_cache_diff(self, output: torch.Tensor, x: torch.Tensor, uuids: list[UUID]): From b1467da4803017a418c32c159525767f45871ca3 Mon Sep 17 00:00:00 2001 From: rattus128 <46076784+rattus128@users.noreply.github.com> Date: Sat, 18 Oct 2025 06:55:15 +1000 Subject: [PATCH 0769/1073] execution: fold in dependency aware caching / Fix --cache-none with loops/lazy etc (#10368) * execution: fold in dependency aware caching This makes --cache-none compatiable with lazy and expanded subgraphs. Currently the --cache-none option is powered by the DependencyAwareCache. The cache attempts to maintain a parallel copy of the execution list data structure, however it is only setup once at the start of execution and does not get meaninigful updates to the execution list. This causes multiple problems when --cache-none is used with lazy and expanded subgraphs as the DAC does not accurately update its copy of the execution data structure. DAC has an attempt to handle subgraphs ensure_subcache however this does not accurately connect to nodes outside the subgraph. The current semantics of DAC are to free a node ASAP after the dependent nodes are executed. This means that if a subgraph refs such a node it will be requed and re-executed by the execution_list but DAC wont see it in its to-free lists anymore and leak memory. Rather than try and cover all the cases where the execution list changes from inside the cache, move the while problem to the executor which maintains an always up-to-date copy of the wanted data-structure. The executor now has a fast-moving run-local cache of its own. Each _to node has its own mini cache, and the cache is unconditionally primed at the time of add_strong_link. add_strong_link is called for all of static workflows, lazy links and expanded subgraphs so its the singular source of truth for output dependendencies. In the case of a cache-hit, the executor cache will hold the non-none value (it will respect updates if they happen somehow as well). In the case of a cache-miss, the executor caches a None and will wait for a notification to update the value when the node completes. When a node completes execution, it simply releases its mini-cache and in turn its strong refs on its direct anscestor outputs, allowing for ASAP freeing (same as the DependencyAwareCache but a little more automatic). This now allows for re-implementation of --cache-none with no cache at all. The dependency aware cache was also observing the dependency sematics for the objects and UI cache which is not accurate (this entire logic was always outputs specific). This also prepares for more complex caching strategies (such as RAM pressure based caching), where a cache can implement any freeing strategy completely independently of the DepedancyAwareness requirement. * main: re-implement --cache-none as no cache at all The execution list now tracks the dependency aware caching more correctly that the DependancyAwareCache. Change it to a cache that does nothing. * test_execution: add --cache-none to the test suite --cache-none is now expected to work universally. Run it through the full unit test suite. Propagate the server parameterization for whether or not the server is capabale of caching, so that the minority of tests that specifically check for cache hits can if else. Hard assert NOT caching in the else to give some coverage of --cache-none expected behaviour to not acutally cache. --- comfy_execution/caching.py | 174 ++++-------------------------- comfy_execution/graph.py | 31 +++++- execution.py | 34 +++--- main.py | 2 +- tests/execution/test_execution.py | 50 +++++---- 5 files changed, 101 insertions(+), 190 deletions(-) diff --git a/comfy_execution/caching.py b/comfy_execution/caching.py index 41224ce3b..566bc3f9c 100644 --- a/comfy_execution/caching.py +++ b/comfy_execution/caching.py @@ -265,6 +265,26 @@ class HierarchicalCache(BasicCache): assert cache is not None return await cache._ensure_subcache(node_id, children_ids) +class NullCache: + + async def set_prompt(self, dynprompt, node_ids, is_changed_cache): + pass + + def all_node_ids(self): + return [] + + def clean_unused(self): + pass + + def get(self, node_id): + return None + + def set(self, node_id, value): + pass + + async def ensure_subcache_for(self, node_id, children_ids): + return self + class LRUCache(BasicCache): def __init__(self, key_class, max_size=100): super().__init__(key_class) @@ -316,157 +336,3 @@ class LRUCache(BasicCache): self._mark_used(child_id) self.children[cache_key].append(self.cache_key_set.get_data_key(child_id)) return self - - -class DependencyAwareCache(BasicCache): - """ - A cache implementation that tracks dependencies between nodes and manages - their execution and caching accordingly. It extends the BasicCache class. - Nodes are removed from this cache once all of their descendants have been - executed. - """ - - def __init__(self, key_class): - """ - Initialize the DependencyAwareCache. - - Args: - key_class: The class used for generating cache keys. - """ - super().__init__(key_class) - self.descendants = {} # Maps node_id -> set of descendant node_ids - self.ancestors = {} # Maps node_id -> set of ancestor node_ids - self.executed_nodes = set() # Tracks nodes that have been executed - - async def set_prompt(self, dynprompt, node_ids, is_changed_cache): - """ - Clear the entire cache and rebuild the dependency graph. - - Args: - dynprompt: The dynamic prompt object containing node information. - node_ids: List of node IDs to initialize the cache for. - is_changed_cache: Flag indicating if the cache has changed. - """ - # Clear all existing cache data - self.cache.clear() - self.subcaches.clear() - self.descendants.clear() - self.ancestors.clear() - self.executed_nodes.clear() - - # Call the parent method to initialize the cache with the new prompt - await super().set_prompt(dynprompt, node_ids, is_changed_cache) - - # Rebuild the dependency graph - self._build_dependency_graph(dynprompt, node_ids) - - def _build_dependency_graph(self, dynprompt, node_ids): - """ - Build the dependency graph for all nodes. - - Args: - dynprompt: The dynamic prompt object containing node information. - node_ids: List of node IDs to build the graph for. - """ - self.descendants.clear() - self.ancestors.clear() - for node_id in node_ids: - self.descendants[node_id] = set() - self.ancestors[node_id] = set() - - for node_id in node_ids: - inputs = dynprompt.get_node(node_id)["inputs"] - for input_data in inputs.values(): - if is_link(input_data): # Check if the input is a link to another node - ancestor_id = input_data[0] - self.descendants[ancestor_id].add(node_id) - self.ancestors[node_id].add(ancestor_id) - - def set(self, node_id, value): - """ - Mark a node as executed and store its value in the cache. - - Args: - node_id: The ID of the node to store. - value: The value to store for the node. - """ - self._set_immediate(node_id, value) - self.executed_nodes.add(node_id) - self._cleanup_ancestors(node_id) - - def get(self, node_id): - """ - Retrieve the cached value for a node. - - Args: - node_id: The ID of the node to retrieve. - - Returns: - The cached value for the node. - """ - return self._get_immediate(node_id) - - async def ensure_subcache_for(self, node_id, children_ids): - """ - Ensure a subcache exists for a node and update dependencies. - - Args: - node_id: The ID of the parent node. - children_ids: List of child node IDs to associate with the parent node. - - Returns: - The subcache object for the node. - """ - subcache = await super()._ensure_subcache(node_id, children_ids) - for child_id in children_ids: - self.descendants[node_id].add(child_id) - self.ancestors[child_id].add(node_id) - return subcache - - def _cleanup_ancestors(self, node_id): - """ - Check if ancestors of a node can be removed from the cache. - - Args: - node_id: The ID of the node whose ancestors are to be checked. - """ - for ancestor_id in self.ancestors.get(node_id, []): - if ancestor_id in self.executed_nodes: - # Remove ancestor if all its descendants have been executed - if all(descendant in self.executed_nodes for descendant in self.descendants[ancestor_id]): - self._remove_node(ancestor_id) - - def _remove_node(self, node_id): - """ - Remove a node from the cache. - - Args: - node_id: The ID of the node to remove. - """ - cache_key = self.cache_key_set.get_data_key(node_id) - if cache_key in self.cache: - del self.cache[cache_key] - subcache_key = self.cache_key_set.get_subcache_key(node_id) - if subcache_key in self.subcaches: - del self.subcaches[subcache_key] - - def clean_unused(self): - """ - Clean up unused nodes. This is a no-op for this cache implementation. - """ - pass - - def recursive_debug_dump(self): - """ - Dump the cache and dependency graph for debugging. - - Returns: - A list containing the cache state and dependency graph. - """ - result = super().recursive_debug_dump() - result.append({ - "descendants": self.descendants, - "ancestors": self.ancestors, - "executed_nodes": list(self.executed_nodes), - }) - return result diff --git a/comfy_execution/graph.py b/comfy_execution/graph.py index f4b427265..d5bbacde3 100644 --- a/comfy_execution/graph.py +++ b/comfy_execution/graph.py @@ -153,8 +153,9 @@ class TopologicalSort: continue _, _, input_info = self.get_input_info(unique_id, input_name) is_lazy = input_info is not None and "lazy" in input_info and input_info["lazy"] - if (include_lazy or not is_lazy) and not self.is_cached(from_node_id): - node_ids.append(from_node_id) + if (include_lazy or not is_lazy): + if not self.is_cached(from_node_id): + node_ids.append(from_node_id) links.append((from_node_id, from_socket, unique_id)) for link in links: @@ -194,10 +195,34 @@ class ExecutionList(TopologicalSort): super().__init__(dynprompt) self.output_cache = output_cache self.staged_node_id = None + self.execution_cache = {} + self.execution_cache_listeners = {} def is_cached(self, node_id): return self.output_cache.get(node_id) is not None + def cache_link(self, from_node_id, to_node_id): + if not to_node_id in self.execution_cache: + self.execution_cache[to_node_id] = {} + self.execution_cache[to_node_id][from_node_id] = self.output_cache.get(from_node_id) + if not from_node_id in self.execution_cache_listeners: + self.execution_cache_listeners[from_node_id] = set() + self.execution_cache_listeners[from_node_id].add(to_node_id) + + def get_output_cache(self, from_node_id, to_node_id): + if not to_node_id in self.execution_cache: + return None + return self.execution_cache[to_node_id].get(from_node_id) + + def cache_update(self, node_id, value): + if node_id in self.execution_cache_listeners: + for to_node_id in self.execution_cache_listeners[node_id]: + self.execution_cache[to_node_id][node_id] = value + + def add_strong_link(self, from_node_id, from_socket, to_node_id): + super().add_strong_link(from_node_id, from_socket, to_node_id) + self.cache_link(from_node_id, to_node_id) + async def stage_node_execution(self): assert self.staged_node_id is None if self.is_empty(): @@ -277,6 +302,8 @@ class ExecutionList(TopologicalSort): def complete_node_execution(self): node_id = self.staged_node_id self.pop_node(node_id) + self.execution_cache.pop(node_id, None) + self.execution_cache_listeners.pop(node_id, None) self.staged_node_id = None def get_nodes_in_cycle(self): diff --git a/execution.py b/execution.py index 1dc35738b..78c36a4b0 100644 --- a/execution.py +++ b/execution.py @@ -18,7 +18,7 @@ from comfy_execution.caching import ( BasicCache, CacheKeySetID, CacheKeySetInputSignature, - DependencyAwareCache, + NullCache, HierarchicalCache, LRUCache, ) @@ -91,13 +91,13 @@ class IsChangedCache: class CacheType(Enum): CLASSIC = 0 LRU = 1 - DEPENDENCY_AWARE = 2 + NONE = 2 class CacheSet: def __init__(self, cache_type=None, cache_size=None): - if cache_type == CacheType.DEPENDENCY_AWARE: - self.init_dependency_aware_cache() + if cache_type == CacheType.NONE: + self.init_null_cache() logging.info("Disabling intermediate node cache.") elif cache_type == CacheType.LRU: if cache_size is None: @@ -120,11 +120,12 @@ class CacheSet: self.ui = LRUCache(CacheKeySetInputSignature, max_size=cache_size) self.objects = HierarchicalCache(CacheKeySetID) - # only hold cached items while the decendents have not executed - def init_dependency_aware_cache(self): - self.outputs = DependencyAwareCache(CacheKeySetInputSignature) - self.ui = DependencyAwareCache(CacheKeySetInputSignature) - self.objects = DependencyAwareCache(CacheKeySetID) + def init_null_cache(self): + self.outputs = NullCache() + #The UI cache is expected to be iterable at the end of each workflow + #so it must cache at least a full workflow. Use Heirachical + self.ui = HierarchicalCache(CacheKeySetInputSignature) + self.objects = NullCache() def recursive_debug_dump(self): result = { @@ -135,7 +136,7 @@ class CacheSet: SENSITIVE_EXTRA_DATA_KEYS = ("auth_token_comfy_org", "api_key_comfy_org") -def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, extra_data={}): +def get_input_data(inputs, class_def, unique_id, execution_list=None, dynprompt=None, extra_data={}): is_v3 = issubclass(class_def, _ComfyNodeInternal) if is_v3: valid_inputs, schema = class_def.INPUT_TYPES(include_hidden=False, return_schema=True) @@ -153,10 +154,10 @@ def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, e if is_link(input_data) and (not input_info or not input_info.get("rawLink", False)): input_unique_id = input_data[0] output_index = input_data[1] - if outputs is None: + if execution_list is None: mark_missing() continue # This might be a lazily-evaluated input - cached_output = outputs.get(input_unique_id) + cached_output = execution_list.get_output_cache(input_unique_id, unique_id) if cached_output is None: mark_missing() continue @@ -405,6 +406,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, cached_output = caches.ui.get(unique_id) or {} server.send_sync("executed", { "node": unique_id, "display_node": display_node_id, "output": cached_output.get("output",None), "prompt_id": prompt_id }, server.client_id) get_progress_state().finish_progress(unique_id) + execution_list.cache_update(unique_id, caches.outputs.get(unique_id)) return (ExecutionResult.SUCCESS, None, None) input_data_all = None @@ -434,7 +436,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, for r in result: if is_link(r): source_node, source_output = r[0], r[1] - node_output = caches.outputs.get(source_node)[source_output] + node_output = execution_list.get_output_cache(source_node, unique_id)[source_output] for o in node_output: resolved_output.append(o) @@ -446,7 +448,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, has_subgraph = False else: get_progress_state().start_progress(unique_id) - input_data_all, missing_keys, hidden_inputs = get_input_data(inputs, class_def, unique_id, caches.outputs, dynprompt, extra_data) + input_data_all, missing_keys, hidden_inputs = get_input_data(inputs, class_def, unique_id, execution_list, dynprompt, extra_data) if server.client_id is not None: server.last_node_id = display_node_id server.send_sync("executing", { "node": unique_id, "display_node": display_node_id, "prompt_id": prompt_id }, server.client_id) @@ -549,11 +551,15 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, subcache.clean_unused() for node_id in new_output_ids: execution_list.add_node(node_id) + execution_list.cache_link(node_id, unique_id) for link in new_output_links: execution_list.add_strong_link(link[0], link[1], unique_id) pending_subgraph_results[unique_id] = cached_outputs return (ExecutionResult.PENDING, None, None) + caches.outputs.set(unique_id, output_data) + execution_list.cache_update(unique_id, output_data) + except comfy.model_management.InterruptProcessingException as iex: logging.info("Processing interrupted") diff --git a/main.py b/main.py index 35857dba8..4b4c5dcc4 100644 --- a/main.py +++ b/main.py @@ -173,7 +173,7 @@ def prompt_worker(q, server_instance): if args.cache_lru > 0: cache_type = execution.CacheType.LRU elif args.cache_none: - cache_type = execution.CacheType.DEPENDENCY_AWARE + cache_type = execution.CacheType.NONE e = execution.PromptExecutor(server_instance, cache_type=cache_type, cache_size=args.cache_lru) last_gc_collect = 0 diff --git a/tests/execution/test_execution.py b/tests/execution/test_execution.py index ef73ad9fd..ace0d2279 100644 --- a/tests/execution/test_execution.py +++ b/tests/execution/test_execution.py @@ -152,12 +152,12 @@ class TestExecution: # Initialize server and client # @fixture(scope="class", autouse=True, params=[ - # (use_lru, lru_size) - (False, 0), - (True, 0), - (True, 100), + { "extra_args" : [], "should_cache_results" : True }, + { "extra_args" : ["--cache-lru", 0], "should_cache_results" : True }, + { "extra_args" : ["--cache-lru", 100], "should_cache_results" : True }, + { "extra_args" : ["--cache-none"], "should_cache_results" : False }, ]) - def _server(self, args_pytest, request): + def server(self, args_pytest, request): # Start server pargs = [ 'python','main.py', @@ -167,12 +167,10 @@ class TestExecution: '--extra-model-paths-config', 'tests/execution/extra_model_paths.yaml', '--cpu', ] - use_lru, lru_size = request.param - if use_lru: - pargs += ['--cache-lru', str(lru_size)] + pargs += [ str(param) for param in request.param["extra_args"] ] print("Running server with args:", pargs) # noqa: T201 p = subprocess.Popen(pargs) - yield + yield request.param p.kill() torch.cuda.empty_cache() @@ -193,7 +191,7 @@ class TestExecution: return comfy_client @fixture(scope="class", autouse=True) - def shared_client(self, args_pytest, _server): + def shared_client(self, args_pytest, server): client = self.start_client(args_pytest["listen"], args_pytest["port"]) yield client del client @@ -225,7 +223,7 @@ class TestExecution: assert result.did_run(mask) assert result.did_run(lazy_mix) - def test_full_cache(self, client: ComfyClient, builder: GraphBuilder): + def test_full_cache(self, client: ComfyClient, builder: GraphBuilder, server): g = builder input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) input2 = g.node("StubImage", content="NOISE", height=512, width=512, batch_size=1) @@ -237,9 +235,12 @@ class TestExecution: client.run(g) result2 = client.run(g) for node_id, node in g.nodes.items(): - assert not result2.did_run(node), f"Node {node_id} ran, but should have been cached" + if server["should_cache_results"]: + assert not result2.did_run(node), f"Node {node_id} ran, but should have been cached" + else: + assert result2.did_run(node), f"Node {node_id} was cached, but should have been run" - def test_partial_cache(self, client: ComfyClient, builder: GraphBuilder): + def test_partial_cache(self, client: ComfyClient, builder: GraphBuilder, server): g = builder input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) input2 = g.node("StubImage", content="NOISE", height=512, width=512, batch_size=1) @@ -251,8 +252,12 @@ class TestExecution: client.run(g) mask.inputs['value'] = 0.4 result2 = client.run(g) - assert not result2.did_run(input1), "Input1 should have been cached" - assert not result2.did_run(input2), "Input2 should have been cached" + if server["should_cache_results"]: + assert not result2.did_run(input1), "Input1 should have been cached" + assert not result2.did_run(input2), "Input2 should have been cached" + else: + assert result2.did_run(input1), "Input1 should have been rerun" + assert result2.did_run(input2), "Input2 should have been rerun" def test_error(self, client: ComfyClient, builder: GraphBuilder): g = builder @@ -411,7 +416,7 @@ class TestExecution: input2 = g.node("StubImage", id="removeme", content="WHITE", height=512, width=512, batch_size=1) client.run(g) - def test_custom_is_changed(self, client: ComfyClient, builder: GraphBuilder): + def test_custom_is_changed(self, client: ComfyClient, builder: GraphBuilder, server): g = builder # Creating the nodes in this specific order previously caused a bug save = g.node("SaveImage") @@ -427,7 +432,10 @@ class TestExecution: result3 = client.run(g) result4 = client.run(g) assert result1.did_run(is_changed), "is_changed should have been run" - assert not result2.did_run(is_changed), "is_changed should have been cached" + if server["should_cache_results"]: + assert not result2.did_run(is_changed), "is_changed should have been cached" + else: + assert result2.did_run(is_changed), "is_changed should have been re-run" assert result3.did_run(is_changed), "is_changed should have been re-run" assert result4.did_run(is_changed), "is_changed should not have been cached" @@ -514,7 +522,7 @@ class TestExecution: assert len(images2) == 1, "Should have 1 image" # This tests that only constant outputs are used in the call to `IS_CHANGED` - def test_is_changed_with_outputs(self, client: ComfyClient, builder: GraphBuilder): + def test_is_changed_with_outputs(self, client: ComfyClient, builder: GraphBuilder, server): g = builder input1 = g.node("StubConstantImage", value=0.5, height=512, width=512, batch_size=1) test_node = g.node("TestIsChangedWithConstants", image=input1.out(0), value=0.5) @@ -530,7 +538,11 @@ class TestExecution: images = result.get_images(output) assert len(images) == 1, "Should have 1 image" assert numpy.array(images[0]).min() == 63 and numpy.array(images[0]).max() == 63, "Image should have value 0.25" - assert not result.did_run(test_node), "The execution should have been cached" + if server["should_cache_results"]: + assert not result.did_run(test_node), "The execution should have been cached" + else: + assert result.did_run(test_node), "The execution should have been re-run" + def test_parallel_sleep_nodes(self, client: ComfyClient, builder: GraphBuilder, skip_timing_checks): # Warmup execution to ensure server is fully initialized From 99ce2a1f66c4bcd500d76cc9a7430f7b2bf32776 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 18 Oct 2025 00:13:05 +0300 Subject: [PATCH 0770/1073] convert nodes_controlnet.py to V3 schema (#10202) --- comfy_extras/nodes_controlnet.py | 92 ++++++++++++++++++++------------ 1 file changed, 58 insertions(+), 34 deletions(-) diff --git a/comfy_extras/nodes_controlnet.py b/comfy_extras/nodes_controlnet.py index 2d20e1fed..e835feed7 100644 --- a/comfy_extras/nodes_controlnet.py +++ b/comfy_extras/nodes_controlnet.py @@ -1,20 +1,26 @@ from comfy.cldm.control_types import UNION_CONTROLNET_TYPES import nodes import comfy.utils +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io -class SetUnionControlNetType: +class SetUnionControlNetType(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"control_net": ("CONTROL_NET", ), - "type": (["auto"] + list(UNION_CONTROLNET_TYPES.keys()),) - }} + def define_schema(cls): + return io.Schema( + node_id="SetUnionControlNetType", + category="conditioning/controlnet", + inputs=[ + io.ControlNet.Input("control_net"), + io.Combo.Input("type", options=["auto"] + list(UNION_CONTROLNET_TYPES.keys())), + ], + outputs=[ + io.ControlNet.Output(), + ], + ) - CATEGORY = "conditioning/controlnet" - RETURN_TYPES = ("CONTROL_NET",) - - FUNCTION = "set_controlnet_type" - - def set_controlnet_type(self, control_net, type): + @classmethod + def execute(cls, control_net, type) -> io.NodeOutput: control_net = control_net.copy() type_number = UNION_CONTROLNET_TYPES.get(type, -1) if type_number >= 0: @@ -22,27 +28,36 @@ class SetUnionControlNetType: else: control_net.set_extra_arg("control_type", []) - return (control_net,) + return io.NodeOutput(control_net) -class ControlNetInpaintingAliMamaApply(nodes.ControlNetApplyAdvanced): + set_controlnet_type = execute # TODO: remove + + +class ControlNetInpaintingAliMamaApply(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "control_net": ("CONTROL_NET", ), - "vae": ("VAE", ), - "image": ("IMAGE", ), - "mask": ("MASK", ), - "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), - "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}) - }} + def define_schema(cls): + return io.Schema( + node_id="ControlNetInpaintingAliMamaApply", + category="conditioning/controlnet", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.ControlNet.Input("control_net"), + io.Vae.Input("vae"), + io.Image.Input("image"), + io.Mask.Input("mask"), + io.Float.Input("strength", default=1.0, min=0.0, max=10.0, step=0.01), + io.Float.Input("start_percent", default=0.0, min=0.0, max=1.0, step=0.001), + io.Float.Input("end_percent", default=1.0, min=0.0, max=1.0, step=0.001), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + ], + ) - FUNCTION = "apply_inpaint_controlnet" - - CATEGORY = "conditioning/controlnet" - - def apply_inpaint_controlnet(self, positive, negative, control_net, vae, image, mask, strength, start_percent, end_percent): + @classmethod + def execute(cls, positive, negative, control_net, vae, image, mask, strength, start_percent, end_percent) -> io.NodeOutput: extra_concat = [] if control_net.concat_mask: mask = 1.0 - mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])) @@ -50,11 +65,20 @@ class ControlNetInpaintingAliMamaApply(nodes.ControlNetApplyAdvanced): image = image * mask_apply.movedim(1, -1).repeat(1, 1, 1, image.shape[3]) extra_concat = [mask] - return self.apply_controlnet(positive, negative, control_net, image, strength, start_percent, end_percent, vae=vae, extra_concat=extra_concat) + result = nodes.ControlNetApplyAdvanced().apply_controlnet(positive, negative, control_net, image, strength, start_percent, end_percent, vae=vae, extra_concat=extra_concat) + return io.NodeOutput(result[0], result[1]) + + apply_inpaint_controlnet = execute # TODO: remove +class ControlNetExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + SetUnionControlNetType, + ControlNetInpaintingAliMamaApply, + ] -NODE_CLASS_MAPPINGS = { - "SetUnionControlNetType": SetUnionControlNetType, - "ControlNetInpaintingAliMamaApply": ControlNetInpaintingAliMamaApply, -} + +async def comfy_entrypoint() -> ControlNetExtension: + return ControlNetExtension() From 92d97380bd02d9883295aeb2d29365cecd9a765e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 17 Oct 2025 15:22:59 -0700 Subject: [PATCH 0771/1073] Update Python 3.14 installation instructions (#10385) Removed mention of installing pytorch nightly for Python 3.14. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b0731db33..c9a0644e3 100644 --- a/README.md +++ b/README.md @@ -197,7 +197,7 @@ comfy install ## Manual Install (Windows, Linux) -Python 3.14 will work if you comment out the `kornia` dependency in the requirements.txt file (breaks the canny node) and install pytorch nightly but it is not recommended. +Python 3.14 will work if you comment out the `kornia` dependency in the requirements.txt file (breaks the canny node) but it is not recommended. Python 3.13 is very well supported. If you have trouble with some custom node dependencies on 3.13 you can try 3.12 From 9da397ea2f271080406f0c14cf4f0db7221ddf70 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 17 Oct 2025 17:03:28 -0700 Subject: [PATCH 0772/1073] Disable torch compiler for cast_bias_weight function (#10384) * Disable torch compiler for cast_bias_weight function * Fix torch compile. --- comfy/ops.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy/ops.py b/comfy/ops.py index 56b07b44c..5feeb3571 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -25,6 +25,9 @@ import comfy.rmsnorm import contextlib def run_every_op(): + if torch.compiler.is_compiling(): + return + comfy.model_management.throw_exception_if_processing_interrupted() def scaled_dot_product_attention(q, k, v, *args, **kwargs): @@ -70,6 +73,7 @@ if torch.cuda.is_available() and torch.backends.cudnn.is_available() and Perform def cast_to_input(weight, input, non_blocking=False, copy=True): return comfy.model_management.cast_to(weight, input.dtype, input.device, non_blocking=non_blocking, copy=copy) +@torch.compiler.disable() def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None): if input is not None: if dtype is None: From 5b80addafd24bda5b2f9f7a35e32dbd40823c3fd Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 18 Oct 2025 19:35:46 -0700 Subject: [PATCH 0773/1073] Turn off cuda malloc by default when --fast autotune is turned on. (#10393) --- comfy/model_management.py | 3 +++ comfy/ops.py | 3 --- cuda_malloc.py | 7 ++++--- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index d82d5b8b0..7467391cd 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -371,6 +371,9 @@ try: except: pass +if torch.cuda.is_available() and torch.backends.cudnn.is_available() and PerformanceFeature.AutoTune in args.fast: + torch.backends.cudnn.benchmark = True + try: if torch_version_numeric >= (2, 5): torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True) diff --git a/comfy/ops.py b/comfy/ops.py index 5feeb3571..967134f05 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -67,9 +67,6 @@ except: cast_to = comfy.model_management.cast_to #TODO: remove once no more references -if torch.cuda.is_available() and torch.backends.cudnn.is_available() and PerformanceFeature.AutoTune in args.fast: - torch.backends.cudnn.benchmark = True - def cast_to_input(weight, input, non_blocking=False, copy=True): return comfy.model_management.cast_to(weight, input.dtype, input.device, non_blocking=non_blocking, copy=copy) diff --git a/cuda_malloc.py b/cuda_malloc.py index c1d9ae3ca..6520d5123 100644 --- a/cuda_malloc.py +++ b/cuda_malloc.py @@ -1,6 +1,6 @@ import os import importlib.util -from comfy.cli_args import args +from comfy.cli_args import args, PerformanceFeature import subprocess #Can't use pytorch to get the GPU names because the cuda malloc has to be set before the first import. @@ -75,8 +75,9 @@ if not args.cuda_malloc: spec.loader.exec_module(module) version = module.__version__ - if int(version[0]) >= 2 and "+cu" in version: #enable by default for torch version 2.0 and up only on cuda torch - args.cuda_malloc = cuda_malloc_supported() + if int(version[0]) >= 2 and "+cu" in version: # enable by default for torch version 2.0 and up only on cuda torch + if PerformanceFeature.AutoTune not in args.fast: # Autotune has issues with cuda malloc + args.cuda_malloc = cuda_malloc_supported() except: pass From 0cf33953a7c951d163088cbfe36c55d1cdf8a718 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 18 Oct 2025 20:15:34 -0700 Subject: [PATCH 0774/1073] Fix batch size above 1 giving bad output in chroma radiance. (#10394) --- comfy/ldm/chroma_radiance/model.py | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/comfy/ldm/chroma_radiance/model.py b/comfy/ldm/chroma_radiance/model.py index 47aa11b04..7d7be80f5 100644 --- a/comfy/ldm/chroma_radiance/model.py +++ b/comfy/ldm/chroma_radiance/model.py @@ -189,15 +189,15 @@ class ChromaRadiance(Chroma): nerf_pixels = nn.functional.unfold(img_orig, kernel_size=patch_size, stride=patch_size) nerf_pixels = nerf_pixels.transpose(1, 2) # -> [B, NumPatches, C * P * P] + # Reshape for per-patch processing + nerf_hidden = img_out.reshape(B * num_patches, params.hidden_size) + nerf_pixels = nerf_pixels.reshape(B * num_patches, C, patch_size**2).transpose(1, 2) + if params.nerf_tile_size > 0 and num_patches > params.nerf_tile_size: # Enable tiling if nerf_tile_size isn't 0 and we actually have more patches than # the tile size. - img_dct = self.forward_tiled_nerf(img_out, nerf_pixels, B, C, num_patches, patch_size, params) + img_dct = self.forward_tiled_nerf(nerf_hidden, nerf_pixels, B, C, num_patches, patch_size, params) else: - # Reshape for per-patch processing - nerf_hidden = img_out.reshape(B * num_patches, params.hidden_size) - nerf_pixels = nerf_pixels.reshape(B * num_patches, C, patch_size**2).transpose(1, 2) - # Get DCT-encoded pixel embeddings [pixel-dct] img_dct = self.nerf_image_embedder(nerf_pixels) @@ -240,17 +240,8 @@ class ChromaRadiance(Chroma): end = min(i + tile_size, num_patches) # Slice the current tile from the input tensors - nerf_hidden_tile = nerf_hidden[:, i:end, :] - nerf_pixels_tile = nerf_pixels[:, i:end, :] - - # Get the actual number of patches in this tile (can be smaller for the last tile) - num_patches_tile = nerf_hidden_tile.shape[1] - - # Reshape the tile for per-patch processing - # [B, NumPatches_tile, D] -> [B * NumPatches_tile, D] - nerf_hidden_tile = nerf_hidden_tile.reshape(batch * num_patches_tile, params.hidden_size) - # [B, NumPatches_tile, C*P*P] -> [B*NumPatches_tile, C, P*P] -> [B*NumPatches_tile, P*P, C] - nerf_pixels_tile = nerf_pixels_tile.reshape(batch * num_patches_tile, channels, patch_size**2).transpose(1, 2) + nerf_hidden_tile = nerf_hidden[i * batch:end * batch] + nerf_pixels_tile = nerf_pixels[i * batch:end * batch] # get DCT-encoded pixel embeddings [pixel-dct] img_dct_tile = self.nerf_image_embedder(nerf_pixels_tile) From dad076aee68ab676fb390d9663ab9e343824a080 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 18 Oct 2025 20:19:52 -0700 Subject: [PATCH 0775/1073] Speed up chroma radiance. (#10395) --- comfy/model_detection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 7677617c0..141f1e164 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -213,7 +213,7 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["nerf_mlp_ratio"] = 4 dit_config["nerf_depth"] = 4 dit_config["nerf_max_freqs"] = 8 - dit_config["nerf_tile_size"] = 32 + dit_config["nerf_tile_size"] = 512 dit_config["nerf_final_head_type"] = "conv" if f"{key_prefix}nerf_final_layer_conv.norm.scale" in state_dict_keys else "linear" dit_config["nerf_embedder_dtype"] = torch.float32 else: From b4f30bd4087a79b4c4fc89bb67b9889adb866294 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 18 Oct 2025 22:25:35 -0700 Subject: [PATCH 0776/1073] Pytorch is stupid. (#10398) --- comfy/ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ops.py b/comfy/ops.py index 967134f05..934e21261 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -58,7 +58,7 @@ except (ModuleNotFoundError, TypeError): NVIDIA_MEMORY_CONV_BUG_WORKAROUND = False try: if comfy.model_management.is_nvidia(): - if torch.backends.cudnn.version() >= 91200 and comfy.model_management.torch_version_numeric >= (2, 9) and comfy.model_management.torch_version_numeric <= (2, 10): + if torch.backends.cudnn.version() >= 91002 and comfy.model_management.torch_version_numeric >= (2, 9) and comfy.model_management.torch_version_numeric <= (2, 10): #TODO: change upper bound version once it's fixed' NVIDIA_MEMORY_CONV_BUG_WORKAROUND = True logging.info("working around nvidia conv3d memory bug.") From b5c59b763c6b14e1362ec4274b09eca4f3f7091b Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sun, 19 Oct 2025 13:05:46 -0700 Subject: [PATCH 0777/1073] Deprecation warning on unused files (#10387) * only warn for unused files * include internal extensions --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index a44f4f237..10c2698b5 100644 --- a/server.py +++ b/server.py @@ -56,7 +56,7 @@ async def deprecation_warning(request: web.Request, handler): """Middleware to warn about deprecated frontend API paths""" path = request.path - if (path.startswith('/scripts/') or path.startswith('/extensions/core/')): + if path.startswith("/scripts/ui") or path.startswith("/extensions/core/"): # Only warn once per unique file path if path not in _deprecated_paths_warned: _deprecated_paths_warned.add(path) From a4787ac83bf6c83eeb459ed80fc9b36f63d2a3a7 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 21 Oct 2025 03:28:36 +0800 Subject: [PATCH 0778/1073] Update template to 0.2.1 (#10413) * Update template to 0.1.97 * Update template to 0.2.1 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 82457df54..dd2afcab0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.28.7 -comfyui-workflow-templates==0.1.95 +comfyui-workflow-templates==0.2.1 comfyui-embedded-docs==0.3.0 torch torchsde From 2c2aa409b01f513de88d2245931e5836ed1cd718 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 20 Oct 2025 12:43:24 -0700 Subject: [PATCH 0779/1073] Log message for cudnn disable on AMD. (#10418) --- comfy/model_management.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 7467391cd..a2c318ec3 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -333,6 +333,7 @@ SUPPORT_FP8_OPS = args.supports_fp8_compute try: if is_amd(): torch.backends.cudnn.enabled = False # Seems to improve things a lot on AMD + logging.info("Set: torch.backends.cudnn.enabled = False for better AMD performance.") try: rocm_version = tuple(map(int, str(torch.version.hip).split(".")[:2])) except: From b7992f871af38d89a459080caa57cc359ed93a46 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 20 Oct 2025 16:03:06 -0700 Subject: [PATCH 0780/1073] =?UTF-8?q?Revert=20"execution:=20fold=20in=20de?= =?UTF-8?q?pendency=20aware=20caching=20/=20Fix=20--cache-none=20with=20l?= =?UTF-8?q?=E2=80=A6"=20(#10422)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit b1467da4803017a418c32c159525767f45871ca3. --- comfy_execution/caching.py | 174 ++++++++++++++++++++++++++---- comfy_execution/graph.py | 31 +----- execution.py | 34 +++--- main.py | 2 +- tests/execution/test_execution.py | 50 ++++----- 5 files changed, 190 insertions(+), 101 deletions(-) diff --git a/comfy_execution/caching.py b/comfy_execution/caching.py index 566bc3f9c..41224ce3b 100644 --- a/comfy_execution/caching.py +++ b/comfy_execution/caching.py @@ -265,26 +265,6 @@ class HierarchicalCache(BasicCache): assert cache is not None return await cache._ensure_subcache(node_id, children_ids) -class NullCache: - - async def set_prompt(self, dynprompt, node_ids, is_changed_cache): - pass - - def all_node_ids(self): - return [] - - def clean_unused(self): - pass - - def get(self, node_id): - return None - - def set(self, node_id, value): - pass - - async def ensure_subcache_for(self, node_id, children_ids): - return self - class LRUCache(BasicCache): def __init__(self, key_class, max_size=100): super().__init__(key_class) @@ -336,3 +316,157 @@ class LRUCache(BasicCache): self._mark_used(child_id) self.children[cache_key].append(self.cache_key_set.get_data_key(child_id)) return self + + +class DependencyAwareCache(BasicCache): + """ + A cache implementation that tracks dependencies between nodes and manages + their execution and caching accordingly. It extends the BasicCache class. + Nodes are removed from this cache once all of their descendants have been + executed. + """ + + def __init__(self, key_class): + """ + Initialize the DependencyAwareCache. + + Args: + key_class: The class used for generating cache keys. + """ + super().__init__(key_class) + self.descendants = {} # Maps node_id -> set of descendant node_ids + self.ancestors = {} # Maps node_id -> set of ancestor node_ids + self.executed_nodes = set() # Tracks nodes that have been executed + + async def set_prompt(self, dynprompt, node_ids, is_changed_cache): + """ + Clear the entire cache and rebuild the dependency graph. + + Args: + dynprompt: The dynamic prompt object containing node information. + node_ids: List of node IDs to initialize the cache for. + is_changed_cache: Flag indicating if the cache has changed. + """ + # Clear all existing cache data + self.cache.clear() + self.subcaches.clear() + self.descendants.clear() + self.ancestors.clear() + self.executed_nodes.clear() + + # Call the parent method to initialize the cache with the new prompt + await super().set_prompt(dynprompt, node_ids, is_changed_cache) + + # Rebuild the dependency graph + self._build_dependency_graph(dynprompt, node_ids) + + def _build_dependency_graph(self, dynprompt, node_ids): + """ + Build the dependency graph for all nodes. + + Args: + dynprompt: The dynamic prompt object containing node information. + node_ids: List of node IDs to build the graph for. + """ + self.descendants.clear() + self.ancestors.clear() + for node_id in node_ids: + self.descendants[node_id] = set() + self.ancestors[node_id] = set() + + for node_id in node_ids: + inputs = dynprompt.get_node(node_id)["inputs"] + for input_data in inputs.values(): + if is_link(input_data): # Check if the input is a link to another node + ancestor_id = input_data[0] + self.descendants[ancestor_id].add(node_id) + self.ancestors[node_id].add(ancestor_id) + + def set(self, node_id, value): + """ + Mark a node as executed and store its value in the cache. + + Args: + node_id: The ID of the node to store. + value: The value to store for the node. + """ + self._set_immediate(node_id, value) + self.executed_nodes.add(node_id) + self._cleanup_ancestors(node_id) + + def get(self, node_id): + """ + Retrieve the cached value for a node. + + Args: + node_id: The ID of the node to retrieve. + + Returns: + The cached value for the node. + """ + return self._get_immediate(node_id) + + async def ensure_subcache_for(self, node_id, children_ids): + """ + Ensure a subcache exists for a node and update dependencies. + + Args: + node_id: The ID of the parent node. + children_ids: List of child node IDs to associate with the parent node. + + Returns: + The subcache object for the node. + """ + subcache = await super()._ensure_subcache(node_id, children_ids) + for child_id in children_ids: + self.descendants[node_id].add(child_id) + self.ancestors[child_id].add(node_id) + return subcache + + def _cleanup_ancestors(self, node_id): + """ + Check if ancestors of a node can be removed from the cache. + + Args: + node_id: The ID of the node whose ancestors are to be checked. + """ + for ancestor_id in self.ancestors.get(node_id, []): + if ancestor_id in self.executed_nodes: + # Remove ancestor if all its descendants have been executed + if all(descendant in self.executed_nodes for descendant in self.descendants[ancestor_id]): + self._remove_node(ancestor_id) + + def _remove_node(self, node_id): + """ + Remove a node from the cache. + + Args: + node_id: The ID of the node to remove. + """ + cache_key = self.cache_key_set.get_data_key(node_id) + if cache_key in self.cache: + del self.cache[cache_key] + subcache_key = self.cache_key_set.get_subcache_key(node_id) + if subcache_key in self.subcaches: + del self.subcaches[subcache_key] + + def clean_unused(self): + """ + Clean up unused nodes. This is a no-op for this cache implementation. + """ + pass + + def recursive_debug_dump(self): + """ + Dump the cache and dependency graph for debugging. + + Returns: + A list containing the cache state and dependency graph. + """ + result = super().recursive_debug_dump() + result.append({ + "descendants": self.descendants, + "ancestors": self.ancestors, + "executed_nodes": list(self.executed_nodes), + }) + return result diff --git a/comfy_execution/graph.py b/comfy_execution/graph.py index d5bbacde3..f4b427265 100644 --- a/comfy_execution/graph.py +++ b/comfy_execution/graph.py @@ -153,9 +153,8 @@ class TopologicalSort: continue _, _, input_info = self.get_input_info(unique_id, input_name) is_lazy = input_info is not None and "lazy" in input_info and input_info["lazy"] - if (include_lazy or not is_lazy): - if not self.is_cached(from_node_id): - node_ids.append(from_node_id) + if (include_lazy or not is_lazy) and not self.is_cached(from_node_id): + node_ids.append(from_node_id) links.append((from_node_id, from_socket, unique_id)) for link in links: @@ -195,34 +194,10 @@ class ExecutionList(TopologicalSort): super().__init__(dynprompt) self.output_cache = output_cache self.staged_node_id = None - self.execution_cache = {} - self.execution_cache_listeners = {} def is_cached(self, node_id): return self.output_cache.get(node_id) is not None - def cache_link(self, from_node_id, to_node_id): - if not to_node_id in self.execution_cache: - self.execution_cache[to_node_id] = {} - self.execution_cache[to_node_id][from_node_id] = self.output_cache.get(from_node_id) - if not from_node_id in self.execution_cache_listeners: - self.execution_cache_listeners[from_node_id] = set() - self.execution_cache_listeners[from_node_id].add(to_node_id) - - def get_output_cache(self, from_node_id, to_node_id): - if not to_node_id in self.execution_cache: - return None - return self.execution_cache[to_node_id].get(from_node_id) - - def cache_update(self, node_id, value): - if node_id in self.execution_cache_listeners: - for to_node_id in self.execution_cache_listeners[node_id]: - self.execution_cache[to_node_id][node_id] = value - - def add_strong_link(self, from_node_id, from_socket, to_node_id): - super().add_strong_link(from_node_id, from_socket, to_node_id) - self.cache_link(from_node_id, to_node_id) - async def stage_node_execution(self): assert self.staged_node_id is None if self.is_empty(): @@ -302,8 +277,6 @@ class ExecutionList(TopologicalSort): def complete_node_execution(self): node_id = self.staged_node_id self.pop_node(node_id) - self.execution_cache.pop(node_id, None) - self.execution_cache_listeners.pop(node_id, None) self.staged_node_id = None def get_nodes_in_cycle(self): diff --git a/execution.py b/execution.py index 78c36a4b0..1dc35738b 100644 --- a/execution.py +++ b/execution.py @@ -18,7 +18,7 @@ from comfy_execution.caching import ( BasicCache, CacheKeySetID, CacheKeySetInputSignature, - NullCache, + DependencyAwareCache, HierarchicalCache, LRUCache, ) @@ -91,13 +91,13 @@ class IsChangedCache: class CacheType(Enum): CLASSIC = 0 LRU = 1 - NONE = 2 + DEPENDENCY_AWARE = 2 class CacheSet: def __init__(self, cache_type=None, cache_size=None): - if cache_type == CacheType.NONE: - self.init_null_cache() + if cache_type == CacheType.DEPENDENCY_AWARE: + self.init_dependency_aware_cache() logging.info("Disabling intermediate node cache.") elif cache_type == CacheType.LRU: if cache_size is None: @@ -120,12 +120,11 @@ class CacheSet: self.ui = LRUCache(CacheKeySetInputSignature, max_size=cache_size) self.objects = HierarchicalCache(CacheKeySetID) - def init_null_cache(self): - self.outputs = NullCache() - #The UI cache is expected to be iterable at the end of each workflow - #so it must cache at least a full workflow. Use Heirachical - self.ui = HierarchicalCache(CacheKeySetInputSignature) - self.objects = NullCache() + # only hold cached items while the decendents have not executed + def init_dependency_aware_cache(self): + self.outputs = DependencyAwareCache(CacheKeySetInputSignature) + self.ui = DependencyAwareCache(CacheKeySetInputSignature) + self.objects = DependencyAwareCache(CacheKeySetID) def recursive_debug_dump(self): result = { @@ -136,7 +135,7 @@ class CacheSet: SENSITIVE_EXTRA_DATA_KEYS = ("auth_token_comfy_org", "api_key_comfy_org") -def get_input_data(inputs, class_def, unique_id, execution_list=None, dynprompt=None, extra_data={}): +def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, extra_data={}): is_v3 = issubclass(class_def, _ComfyNodeInternal) if is_v3: valid_inputs, schema = class_def.INPUT_TYPES(include_hidden=False, return_schema=True) @@ -154,10 +153,10 @@ def get_input_data(inputs, class_def, unique_id, execution_list=None, dynprompt= if is_link(input_data) and (not input_info or not input_info.get("rawLink", False)): input_unique_id = input_data[0] output_index = input_data[1] - if execution_list is None: + if outputs is None: mark_missing() continue # This might be a lazily-evaluated input - cached_output = execution_list.get_output_cache(input_unique_id, unique_id) + cached_output = outputs.get(input_unique_id) if cached_output is None: mark_missing() continue @@ -406,7 +405,6 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, cached_output = caches.ui.get(unique_id) or {} server.send_sync("executed", { "node": unique_id, "display_node": display_node_id, "output": cached_output.get("output",None), "prompt_id": prompt_id }, server.client_id) get_progress_state().finish_progress(unique_id) - execution_list.cache_update(unique_id, caches.outputs.get(unique_id)) return (ExecutionResult.SUCCESS, None, None) input_data_all = None @@ -436,7 +434,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, for r in result: if is_link(r): source_node, source_output = r[0], r[1] - node_output = execution_list.get_output_cache(source_node, unique_id)[source_output] + node_output = caches.outputs.get(source_node)[source_output] for o in node_output: resolved_output.append(o) @@ -448,7 +446,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, has_subgraph = False else: get_progress_state().start_progress(unique_id) - input_data_all, missing_keys, hidden_inputs = get_input_data(inputs, class_def, unique_id, execution_list, dynprompt, extra_data) + input_data_all, missing_keys, hidden_inputs = get_input_data(inputs, class_def, unique_id, caches.outputs, dynprompt, extra_data) if server.client_id is not None: server.last_node_id = display_node_id server.send_sync("executing", { "node": unique_id, "display_node": display_node_id, "prompt_id": prompt_id }, server.client_id) @@ -551,15 +549,11 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, subcache.clean_unused() for node_id in new_output_ids: execution_list.add_node(node_id) - execution_list.cache_link(node_id, unique_id) for link in new_output_links: execution_list.add_strong_link(link[0], link[1], unique_id) pending_subgraph_results[unique_id] = cached_outputs return (ExecutionResult.PENDING, None, None) - caches.outputs.set(unique_id, output_data) - execution_list.cache_update(unique_id, output_data) - except comfy.model_management.InterruptProcessingException as iex: logging.info("Processing interrupted") diff --git a/main.py b/main.py index 4b4c5dcc4..35857dba8 100644 --- a/main.py +++ b/main.py @@ -173,7 +173,7 @@ def prompt_worker(q, server_instance): if args.cache_lru > 0: cache_type = execution.CacheType.LRU elif args.cache_none: - cache_type = execution.CacheType.NONE + cache_type = execution.CacheType.DEPENDENCY_AWARE e = execution.PromptExecutor(server_instance, cache_type=cache_type, cache_size=args.cache_lru) last_gc_collect = 0 diff --git a/tests/execution/test_execution.py b/tests/execution/test_execution.py index ace0d2279..ef73ad9fd 100644 --- a/tests/execution/test_execution.py +++ b/tests/execution/test_execution.py @@ -152,12 +152,12 @@ class TestExecution: # Initialize server and client # @fixture(scope="class", autouse=True, params=[ - { "extra_args" : [], "should_cache_results" : True }, - { "extra_args" : ["--cache-lru", 0], "should_cache_results" : True }, - { "extra_args" : ["--cache-lru", 100], "should_cache_results" : True }, - { "extra_args" : ["--cache-none"], "should_cache_results" : False }, + # (use_lru, lru_size) + (False, 0), + (True, 0), + (True, 100), ]) - def server(self, args_pytest, request): + def _server(self, args_pytest, request): # Start server pargs = [ 'python','main.py', @@ -167,10 +167,12 @@ class TestExecution: '--extra-model-paths-config', 'tests/execution/extra_model_paths.yaml', '--cpu', ] - pargs += [ str(param) for param in request.param["extra_args"] ] + use_lru, lru_size = request.param + if use_lru: + pargs += ['--cache-lru', str(lru_size)] print("Running server with args:", pargs) # noqa: T201 p = subprocess.Popen(pargs) - yield request.param + yield p.kill() torch.cuda.empty_cache() @@ -191,7 +193,7 @@ class TestExecution: return comfy_client @fixture(scope="class", autouse=True) - def shared_client(self, args_pytest, server): + def shared_client(self, args_pytest, _server): client = self.start_client(args_pytest["listen"], args_pytest["port"]) yield client del client @@ -223,7 +225,7 @@ class TestExecution: assert result.did_run(mask) assert result.did_run(lazy_mix) - def test_full_cache(self, client: ComfyClient, builder: GraphBuilder, server): + def test_full_cache(self, client: ComfyClient, builder: GraphBuilder): g = builder input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) input2 = g.node("StubImage", content="NOISE", height=512, width=512, batch_size=1) @@ -235,12 +237,9 @@ class TestExecution: client.run(g) result2 = client.run(g) for node_id, node in g.nodes.items(): - if server["should_cache_results"]: - assert not result2.did_run(node), f"Node {node_id} ran, but should have been cached" - else: - assert result2.did_run(node), f"Node {node_id} was cached, but should have been run" + assert not result2.did_run(node), f"Node {node_id} ran, but should have been cached" - def test_partial_cache(self, client: ComfyClient, builder: GraphBuilder, server): + def test_partial_cache(self, client: ComfyClient, builder: GraphBuilder): g = builder input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) input2 = g.node("StubImage", content="NOISE", height=512, width=512, batch_size=1) @@ -252,12 +251,8 @@ class TestExecution: client.run(g) mask.inputs['value'] = 0.4 result2 = client.run(g) - if server["should_cache_results"]: - assert not result2.did_run(input1), "Input1 should have been cached" - assert not result2.did_run(input2), "Input2 should have been cached" - else: - assert result2.did_run(input1), "Input1 should have been rerun" - assert result2.did_run(input2), "Input2 should have been rerun" + assert not result2.did_run(input1), "Input1 should have been cached" + assert not result2.did_run(input2), "Input2 should have been cached" def test_error(self, client: ComfyClient, builder: GraphBuilder): g = builder @@ -416,7 +411,7 @@ class TestExecution: input2 = g.node("StubImage", id="removeme", content="WHITE", height=512, width=512, batch_size=1) client.run(g) - def test_custom_is_changed(self, client: ComfyClient, builder: GraphBuilder, server): + def test_custom_is_changed(self, client: ComfyClient, builder: GraphBuilder): g = builder # Creating the nodes in this specific order previously caused a bug save = g.node("SaveImage") @@ -432,10 +427,7 @@ class TestExecution: result3 = client.run(g) result4 = client.run(g) assert result1.did_run(is_changed), "is_changed should have been run" - if server["should_cache_results"]: - assert not result2.did_run(is_changed), "is_changed should have been cached" - else: - assert result2.did_run(is_changed), "is_changed should have been re-run" + assert not result2.did_run(is_changed), "is_changed should have been cached" assert result3.did_run(is_changed), "is_changed should have been re-run" assert result4.did_run(is_changed), "is_changed should not have been cached" @@ -522,7 +514,7 @@ class TestExecution: assert len(images2) == 1, "Should have 1 image" # This tests that only constant outputs are used in the call to `IS_CHANGED` - def test_is_changed_with_outputs(self, client: ComfyClient, builder: GraphBuilder, server): + def test_is_changed_with_outputs(self, client: ComfyClient, builder: GraphBuilder): g = builder input1 = g.node("StubConstantImage", value=0.5, height=512, width=512, batch_size=1) test_node = g.node("TestIsChangedWithConstants", image=input1.out(0), value=0.5) @@ -538,11 +530,7 @@ class TestExecution: images = result.get_images(output) assert len(images) == 1, "Should have 1 image" assert numpy.array(images[0]).min() == 63 and numpy.array(images[0]).max() == 63, "Image should have value 0.25" - if server["should_cache_results"]: - assert not result.did_run(test_node), "The execution should have been cached" - else: - assert result.did_run(test_node), "The execution should have been re-run" - + assert not result.did_run(test_node), "The execution should have been cached" def test_parallel_sleep_nodes(self, client: ComfyClient, builder: GraphBuilder, skip_timing_checks): # Warmup execution to ensure server is fully initialized From 560b1bdfca77d9441ca2924fd9d6baa8dda05cd7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 20 Oct 2025 15:44:38 -0400 Subject: [PATCH 0781/1073] ComfyUI version v0.3.66 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index d39c1fdc4..33a06bbb0 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.65" +__version__ = "0.3.66" diff --git a/pyproject.toml b/pyproject.toml index 653604e24..0c6b23a25 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.65" +version = "0.3.66" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 9cdc64998f8990aed7688b0ebe89bc3b97733764 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 21 Oct 2025 16:15:23 -0700 Subject: [PATCH 0782/1073] Only disable cudnn on newer AMD GPUs. (#10437) --- comfy/model_management.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index a2c318ec3..79d6ff9d4 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -330,15 +330,21 @@ except: SUPPORT_FP8_OPS = args.supports_fp8_compute + +AMD_RDNA2_AND_OLDER_ARCH = ["gfx1030", "gfx1031", "gfx1010", "gfx1011", "gfx1012", "gfx906", "gfx900", "gfx803"] + try: if is_amd(): - torch.backends.cudnn.enabled = False # Seems to improve things a lot on AMD - logging.info("Set: torch.backends.cudnn.enabled = False for better AMD performance.") + arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName + if not (any((a in arch) for a in AMD_RDNA2_AND_OLDER_ARCH)): + torch.backends.cudnn.enabled = False # Seems to improve things a lot on AMD + logging.info("Set: torch.backends.cudnn.enabled = False for better AMD performance.") + try: rocm_version = tuple(map(int, str(torch.version.hip).split(".")[:2])) except: rocm_version = (6, -1) - arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName + logging.info("AMD arch: {}".format(arch)) logging.info("ROCm version: {}".format(rocm_version)) if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: @@ -1331,7 +1337,7 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma if is_amd(): arch = torch.cuda.get_device_properties(device).gcnArchName - if any((a in arch) for a in ["gfx1030", "gfx1031", "gfx1010", "gfx1011", "gfx1012", "gfx906", "gfx900", "gfx803"]): # RDNA2 and older don't support bf16 + if any((a in arch) for a in AMD_RDNA2_AND_OLDER_ARCH): # RDNA2 and older don't support bf16 if manual_cast: return True return False From f13cff0be65e35d34876b173bba2fec6bd94746b Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Tue, 21 Oct 2025 20:16:16 -0700 Subject: [PATCH 0783/1073] Add custom node published subgraphs endpoint (#10438) * Add get_subgraphs_dir to ComfyExtension and PUBLISHED_SUBGRAPH_DIRS to nodes.py * Created initial endpoints, although the returned paths are a bit off currently * Fix path and actually return real data * Sanitize returned /api/global_subgraphs entries * Remove leftover function from early prototyping * Remove added whitespace * Add None check for sanitize_entry --- app/subgraph_manager.py | 112 ++++++++++++++++++++++++++++++++++++++++ server.py | 3 ++ 2 files changed, 115 insertions(+) create mode 100644 app/subgraph_manager.py diff --git a/app/subgraph_manager.py b/app/subgraph_manager.py new file mode 100644 index 000000000..dbe404541 --- /dev/null +++ b/app/subgraph_manager.py @@ -0,0 +1,112 @@ +from __future__ import annotations + +from typing import TypedDict +import os +import folder_paths +import glob +from aiohttp import web +import hashlib + + +class Source: + custom_node = "custom_node" + +class SubgraphEntry(TypedDict): + source: str + """ + Source of subgraph - custom_nodes vs templates. + """ + path: str + """ + Relative path of the subgraph file. + For custom nodes, will be the relative directory like /subgraphs/.json + """ + name: str + """ + Name of subgraph file. + """ + info: CustomNodeSubgraphEntryInfo + """ + Additional info about subgraph; in the case of custom_nodes, will contain nodepack name + """ + data: str + +class CustomNodeSubgraphEntryInfo(TypedDict): + node_pack: str + """Node pack name.""" + +class SubgraphManager: + def __init__(self): + self.cached_custom_node_subgraphs: dict[SubgraphEntry] | None = None + + async def load_entry_data(self, entry: SubgraphEntry): + with open(entry['path'], 'r') as f: + entry['data'] = f.read() + return entry + + async def sanitize_entry(self, entry: SubgraphEntry | None, remove_data=False) -> SubgraphEntry | None: + if entry is None: + return None + entry = entry.copy() + entry.pop('path', None) + if remove_data: + entry.pop('data', None) + return entry + + async def sanitize_entries(self, entries: dict[str, SubgraphEntry], remove_data=False) -> dict[str, SubgraphEntry]: + entries = entries.copy() + for key in list(entries.keys()): + entries[key] = await self.sanitize_entry(entries[key], remove_data) + return entries + + async def get_custom_node_subgraphs(self, loadedModules, force_reload=False): + # if not forced to reload and cached, return cache + if not force_reload and self.cached_custom_node_subgraphs is not None: + return self.cached_custom_node_subgraphs + # Load subgraphs from custom nodes + subfolder = "subgraphs" + subgraphs_dict: dict[SubgraphEntry] = {} + + for folder in folder_paths.get_folder_paths("custom_nodes"): + pattern = os.path.join(folder, f"*/{subfolder}/*.json") + matched_files = glob.glob(pattern) + for file in matched_files: + # replace backslashes with forward slashes + file = file.replace('\\', '/') + info: CustomNodeSubgraphEntryInfo = { + "node_pack": "custom_nodes." + file.split('/')[-3] + } + source = Source.custom_node + # hash source + path to make sure id will be as unique as possible, but + # reproducible across backend reloads + id = hashlib.sha256(f"{source}{file}".encode()).hexdigest() + entry: SubgraphEntry = { + "source": Source.custom_node, + "name": os.path.splitext(os.path.basename(file))[0], + "path": file, + "info": info, + } + subgraphs_dict[id] = entry + self.cached_custom_node_subgraphs = subgraphs_dict + return subgraphs_dict + + async def get_custom_node_subgraph(self, id: str, loadedModules): + subgraphs = await self.get_custom_node_subgraphs(loadedModules) + entry: SubgraphEntry = subgraphs.get(id, None) + if entry is not None and entry.get('data', None) is None: + await self.load_entry_data(entry) + return entry + + def add_routes(self, routes, loadedModules): + @routes.get("/global_subgraphs") + async def get_global_subgraphs(request): + subgraphs_dict = await self.get_custom_node_subgraphs(loadedModules) + # NOTE: we may want to include other sources of global subgraphs such as templates in the future; + # that's the reasoning for the current implementation + return web.json_response(await self.sanitize_entries(subgraphs_dict, remove_data=True)) + + @routes.get("/global_subgraphs/{id}") + async def get_global_subgraph(request): + id = request.match_info.get("id", None) + subgraph = await self.get_custom_node_subgraph(id, loadedModules) + return web.json_response(await self.sanitize_entry(subgraph)) diff --git a/server.py b/server.py index 10c2698b5..fe58db286 100644 --- a/server.py +++ b/server.py @@ -35,6 +35,7 @@ from comfy_api.internal import _ComfyNodeInternal from app.user_manager import UserManager from app.model_manager import ModelFileManager from app.custom_node_manager import CustomNodeManager +from app.subgraph_manager import SubgraphManager from typing import Optional, Union from api_server.routes.internal.internal_routes import InternalRoutes from protocol import BinaryEventTypes @@ -173,6 +174,7 @@ class PromptServer(): self.user_manager = UserManager() self.model_file_manager = ModelFileManager() self.custom_node_manager = CustomNodeManager() + self.subgraph_manager = SubgraphManager() self.internal_routes = InternalRoutes(self) self.supports = ["custom_nodes_from_web"] self.prompt_queue = execution.PromptQueue(self) @@ -819,6 +821,7 @@ class PromptServer(): self.user_manager.add_routes(self.routes) self.model_file_manager.add_routes(self.routes) self.custom_node_manager.add_routes(self.routes, self.app, nodes.LOADED_MODULE_DIRS.items()) + self.subgraph_manager.add_routes(self.routes, nodes.LOADED_MODULE_DIRS.items()) self.app.add_subapp('/internal', self.internal_routes.get_app()) # Prefix every route with /api for easier matching for delegation. From 4739d7717fea56750d0ef98c64268d9c1e487d78 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Thu, 23 Oct 2025 05:49:05 +1000 Subject: [PATCH 0784/1073] execution: fold in dependency aware caching / Fix --cache-none with loops/lazy etc (Resubmit) (#10440) * execution: fold in dependency aware caching This makes --cache-none compatiable with lazy and expanded subgraphs. Currently the --cache-none option is powered by the DependencyAwareCache. The cache attempts to maintain a parallel copy of the execution list data structure, however it is only setup once at the start of execution and does not get meaninigful updates to the execution list. This causes multiple problems when --cache-none is used with lazy and expanded subgraphs as the DAC does not accurately update its copy of the execution data structure. DAC has an attempt to handle subgraphs ensure_subcache however this does not accurately connect to nodes outside the subgraph. The current semantics of DAC are to free a node ASAP after the dependent nodes are executed. This means that if a subgraph refs such a node it will be requed and re-executed by the execution_list but DAC wont see it in its to-free lists anymore and leak memory. Rather than try and cover all the cases where the execution list changes from inside the cache, move the while problem to the executor which maintains an always up-to-date copy of the wanted data-structure. The executor now has a fast-moving run-local cache of its own. Each _to node has its own mini cache, and the cache is unconditionally primed at the time of add_strong_link. add_strong_link is called for all of static workflows, lazy links and expanded subgraphs so its the singular source of truth for output dependendencies. In the case of a cache-hit, the executor cache will hold the non-none value (it will respect updates if they happen somehow as well). In the case of a cache-miss, the executor caches a None and will wait for a notification to update the value when the node completes. When a node completes execution, it simply releases its mini-cache and in turn its strong refs on its direct anscestor outputs, allowing for ASAP freeing (same as the DependencyAwareCache but a little more automatic). This now allows for re-implementation of --cache-none with no cache at all. The dependency aware cache was also observing the dependency sematics for the objects and UI cache which is not accurate (this entire logic was always outputs specific). This also prepares for more complex caching strategies (such as RAM pressure based caching), where a cache can implement any freeing strategy completely independently of the DepedancyAwareness requirement. * main: re-implement --cache-none as no cache at all The execution list now tracks the dependency aware caching more correctly that the DependancyAwareCache. Change it to a cache that does nothing. * test_execution: add --cache-none to the test suite --cache-none is now expected to work universally. Run it through the full unit test suite. Propagate the server parameterization for whether or not the server is capabale of caching, so that the minority of tests that specifically check for cache hits can if else. Hard assert NOT caching in the else to give some coverage of --cache-none expected behaviour to not acutally cache. --- comfy_execution/caching.py | 174 ++++-------------------------- comfy_execution/graph.py | 32 +++++- execution.py | 34 +++--- main.py | 2 +- tests/execution/test_execution.py | 50 +++++---- 5 files changed, 102 insertions(+), 190 deletions(-) diff --git a/comfy_execution/caching.py b/comfy_execution/caching.py index 41224ce3b..566bc3f9c 100644 --- a/comfy_execution/caching.py +++ b/comfy_execution/caching.py @@ -265,6 +265,26 @@ class HierarchicalCache(BasicCache): assert cache is not None return await cache._ensure_subcache(node_id, children_ids) +class NullCache: + + async def set_prompt(self, dynprompt, node_ids, is_changed_cache): + pass + + def all_node_ids(self): + return [] + + def clean_unused(self): + pass + + def get(self, node_id): + return None + + def set(self, node_id, value): + pass + + async def ensure_subcache_for(self, node_id, children_ids): + return self + class LRUCache(BasicCache): def __init__(self, key_class, max_size=100): super().__init__(key_class) @@ -316,157 +336,3 @@ class LRUCache(BasicCache): self._mark_used(child_id) self.children[cache_key].append(self.cache_key_set.get_data_key(child_id)) return self - - -class DependencyAwareCache(BasicCache): - """ - A cache implementation that tracks dependencies between nodes and manages - their execution and caching accordingly. It extends the BasicCache class. - Nodes are removed from this cache once all of their descendants have been - executed. - """ - - def __init__(self, key_class): - """ - Initialize the DependencyAwareCache. - - Args: - key_class: The class used for generating cache keys. - """ - super().__init__(key_class) - self.descendants = {} # Maps node_id -> set of descendant node_ids - self.ancestors = {} # Maps node_id -> set of ancestor node_ids - self.executed_nodes = set() # Tracks nodes that have been executed - - async def set_prompt(self, dynprompt, node_ids, is_changed_cache): - """ - Clear the entire cache and rebuild the dependency graph. - - Args: - dynprompt: The dynamic prompt object containing node information. - node_ids: List of node IDs to initialize the cache for. - is_changed_cache: Flag indicating if the cache has changed. - """ - # Clear all existing cache data - self.cache.clear() - self.subcaches.clear() - self.descendants.clear() - self.ancestors.clear() - self.executed_nodes.clear() - - # Call the parent method to initialize the cache with the new prompt - await super().set_prompt(dynprompt, node_ids, is_changed_cache) - - # Rebuild the dependency graph - self._build_dependency_graph(dynprompt, node_ids) - - def _build_dependency_graph(self, dynprompt, node_ids): - """ - Build the dependency graph for all nodes. - - Args: - dynprompt: The dynamic prompt object containing node information. - node_ids: List of node IDs to build the graph for. - """ - self.descendants.clear() - self.ancestors.clear() - for node_id in node_ids: - self.descendants[node_id] = set() - self.ancestors[node_id] = set() - - for node_id in node_ids: - inputs = dynprompt.get_node(node_id)["inputs"] - for input_data in inputs.values(): - if is_link(input_data): # Check if the input is a link to another node - ancestor_id = input_data[0] - self.descendants[ancestor_id].add(node_id) - self.ancestors[node_id].add(ancestor_id) - - def set(self, node_id, value): - """ - Mark a node as executed and store its value in the cache. - - Args: - node_id: The ID of the node to store. - value: The value to store for the node. - """ - self._set_immediate(node_id, value) - self.executed_nodes.add(node_id) - self._cleanup_ancestors(node_id) - - def get(self, node_id): - """ - Retrieve the cached value for a node. - - Args: - node_id: The ID of the node to retrieve. - - Returns: - The cached value for the node. - """ - return self._get_immediate(node_id) - - async def ensure_subcache_for(self, node_id, children_ids): - """ - Ensure a subcache exists for a node and update dependencies. - - Args: - node_id: The ID of the parent node. - children_ids: List of child node IDs to associate with the parent node. - - Returns: - The subcache object for the node. - """ - subcache = await super()._ensure_subcache(node_id, children_ids) - for child_id in children_ids: - self.descendants[node_id].add(child_id) - self.ancestors[child_id].add(node_id) - return subcache - - def _cleanup_ancestors(self, node_id): - """ - Check if ancestors of a node can be removed from the cache. - - Args: - node_id: The ID of the node whose ancestors are to be checked. - """ - for ancestor_id in self.ancestors.get(node_id, []): - if ancestor_id in self.executed_nodes: - # Remove ancestor if all its descendants have been executed - if all(descendant in self.executed_nodes for descendant in self.descendants[ancestor_id]): - self._remove_node(ancestor_id) - - def _remove_node(self, node_id): - """ - Remove a node from the cache. - - Args: - node_id: The ID of the node to remove. - """ - cache_key = self.cache_key_set.get_data_key(node_id) - if cache_key in self.cache: - del self.cache[cache_key] - subcache_key = self.cache_key_set.get_subcache_key(node_id) - if subcache_key in self.subcaches: - del self.subcaches[subcache_key] - - def clean_unused(self): - """ - Clean up unused nodes. This is a no-op for this cache implementation. - """ - pass - - def recursive_debug_dump(self): - """ - Dump the cache and dependency graph for debugging. - - Returns: - A list containing the cache state and dependency graph. - """ - result = super().recursive_debug_dump() - result.append({ - "descendants": self.descendants, - "ancestors": self.ancestors, - "executed_nodes": list(self.executed_nodes), - }) - return result diff --git a/comfy_execution/graph.py b/comfy_execution/graph.py index f4b427265..341c9735d 100644 --- a/comfy_execution/graph.py +++ b/comfy_execution/graph.py @@ -153,8 +153,9 @@ class TopologicalSort: continue _, _, input_info = self.get_input_info(unique_id, input_name) is_lazy = input_info is not None and "lazy" in input_info and input_info["lazy"] - if (include_lazy or not is_lazy) and not self.is_cached(from_node_id): - node_ids.append(from_node_id) + if (include_lazy or not is_lazy): + if not self.is_cached(from_node_id): + node_ids.append(from_node_id) links.append((from_node_id, from_socket, unique_id)) for link in links: @@ -194,10 +195,35 @@ class ExecutionList(TopologicalSort): super().__init__(dynprompt) self.output_cache = output_cache self.staged_node_id = None + self.execution_cache = {} + self.execution_cache_listeners = {} def is_cached(self, node_id): return self.output_cache.get(node_id) is not None + def cache_link(self, from_node_id, to_node_id): + if not to_node_id in self.execution_cache: + self.execution_cache[to_node_id] = {} + self.execution_cache[to_node_id][from_node_id] = self.output_cache.get(from_node_id) + if not from_node_id in self.execution_cache_listeners: + self.execution_cache_listeners[from_node_id] = set() + self.execution_cache_listeners[from_node_id].add(to_node_id) + + def get_output_cache(self, from_node_id, to_node_id): + if not to_node_id in self.execution_cache: + return None + return self.execution_cache[to_node_id].get(from_node_id) + + def cache_update(self, node_id, value): + if node_id in self.execution_cache_listeners: + for to_node_id in self.execution_cache_listeners[node_id]: + if to_node_id in self.execution_cache: + self.execution_cache[to_node_id][node_id] = value + + def add_strong_link(self, from_node_id, from_socket, to_node_id): + super().add_strong_link(from_node_id, from_socket, to_node_id) + self.cache_link(from_node_id, to_node_id) + async def stage_node_execution(self): assert self.staged_node_id is None if self.is_empty(): @@ -277,6 +303,8 @@ class ExecutionList(TopologicalSort): def complete_node_execution(self): node_id = self.staged_node_id self.pop_node(node_id) + self.execution_cache.pop(node_id, None) + self.execution_cache_listeners.pop(node_id, None) self.staged_node_id = None def get_nodes_in_cycle(self): diff --git a/execution.py b/execution.py index 1dc35738b..78c36a4b0 100644 --- a/execution.py +++ b/execution.py @@ -18,7 +18,7 @@ from comfy_execution.caching import ( BasicCache, CacheKeySetID, CacheKeySetInputSignature, - DependencyAwareCache, + NullCache, HierarchicalCache, LRUCache, ) @@ -91,13 +91,13 @@ class IsChangedCache: class CacheType(Enum): CLASSIC = 0 LRU = 1 - DEPENDENCY_AWARE = 2 + NONE = 2 class CacheSet: def __init__(self, cache_type=None, cache_size=None): - if cache_type == CacheType.DEPENDENCY_AWARE: - self.init_dependency_aware_cache() + if cache_type == CacheType.NONE: + self.init_null_cache() logging.info("Disabling intermediate node cache.") elif cache_type == CacheType.LRU: if cache_size is None: @@ -120,11 +120,12 @@ class CacheSet: self.ui = LRUCache(CacheKeySetInputSignature, max_size=cache_size) self.objects = HierarchicalCache(CacheKeySetID) - # only hold cached items while the decendents have not executed - def init_dependency_aware_cache(self): - self.outputs = DependencyAwareCache(CacheKeySetInputSignature) - self.ui = DependencyAwareCache(CacheKeySetInputSignature) - self.objects = DependencyAwareCache(CacheKeySetID) + def init_null_cache(self): + self.outputs = NullCache() + #The UI cache is expected to be iterable at the end of each workflow + #so it must cache at least a full workflow. Use Heirachical + self.ui = HierarchicalCache(CacheKeySetInputSignature) + self.objects = NullCache() def recursive_debug_dump(self): result = { @@ -135,7 +136,7 @@ class CacheSet: SENSITIVE_EXTRA_DATA_KEYS = ("auth_token_comfy_org", "api_key_comfy_org") -def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, extra_data={}): +def get_input_data(inputs, class_def, unique_id, execution_list=None, dynprompt=None, extra_data={}): is_v3 = issubclass(class_def, _ComfyNodeInternal) if is_v3: valid_inputs, schema = class_def.INPUT_TYPES(include_hidden=False, return_schema=True) @@ -153,10 +154,10 @@ def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, e if is_link(input_data) and (not input_info or not input_info.get("rawLink", False)): input_unique_id = input_data[0] output_index = input_data[1] - if outputs is None: + if execution_list is None: mark_missing() continue # This might be a lazily-evaluated input - cached_output = outputs.get(input_unique_id) + cached_output = execution_list.get_output_cache(input_unique_id, unique_id) if cached_output is None: mark_missing() continue @@ -405,6 +406,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, cached_output = caches.ui.get(unique_id) or {} server.send_sync("executed", { "node": unique_id, "display_node": display_node_id, "output": cached_output.get("output",None), "prompt_id": prompt_id }, server.client_id) get_progress_state().finish_progress(unique_id) + execution_list.cache_update(unique_id, caches.outputs.get(unique_id)) return (ExecutionResult.SUCCESS, None, None) input_data_all = None @@ -434,7 +436,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, for r in result: if is_link(r): source_node, source_output = r[0], r[1] - node_output = caches.outputs.get(source_node)[source_output] + node_output = execution_list.get_output_cache(source_node, unique_id)[source_output] for o in node_output: resolved_output.append(o) @@ -446,7 +448,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, has_subgraph = False else: get_progress_state().start_progress(unique_id) - input_data_all, missing_keys, hidden_inputs = get_input_data(inputs, class_def, unique_id, caches.outputs, dynprompt, extra_data) + input_data_all, missing_keys, hidden_inputs = get_input_data(inputs, class_def, unique_id, execution_list, dynprompt, extra_data) if server.client_id is not None: server.last_node_id = display_node_id server.send_sync("executing", { "node": unique_id, "display_node": display_node_id, "prompt_id": prompt_id }, server.client_id) @@ -549,11 +551,15 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, subcache.clean_unused() for node_id in new_output_ids: execution_list.add_node(node_id) + execution_list.cache_link(node_id, unique_id) for link in new_output_links: execution_list.add_strong_link(link[0], link[1], unique_id) pending_subgraph_results[unique_id] = cached_outputs return (ExecutionResult.PENDING, None, None) + caches.outputs.set(unique_id, output_data) + execution_list.cache_update(unique_id, output_data) + except comfy.model_management.InterruptProcessingException as iex: logging.info("Processing interrupted") diff --git a/main.py b/main.py index 35857dba8..4b4c5dcc4 100644 --- a/main.py +++ b/main.py @@ -173,7 +173,7 @@ def prompt_worker(q, server_instance): if args.cache_lru > 0: cache_type = execution.CacheType.LRU elif args.cache_none: - cache_type = execution.CacheType.DEPENDENCY_AWARE + cache_type = execution.CacheType.NONE e = execution.PromptExecutor(server_instance, cache_type=cache_type, cache_size=args.cache_lru) last_gc_collect = 0 diff --git a/tests/execution/test_execution.py b/tests/execution/test_execution.py index ef73ad9fd..ace0d2279 100644 --- a/tests/execution/test_execution.py +++ b/tests/execution/test_execution.py @@ -152,12 +152,12 @@ class TestExecution: # Initialize server and client # @fixture(scope="class", autouse=True, params=[ - # (use_lru, lru_size) - (False, 0), - (True, 0), - (True, 100), + { "extra_args" : [], "should_cache_results" : True }, + { "extra_args" : ["--cache-lru", 0], "should_cache_results" : True }, + { "extra_args" : ["--cache-lru", 100], "should_cache_results" : True }, + { "extra_args" : ["--cache-none"], "should_cache_results" : False }, ]) - def _server(self, args_pytest, request): + def server(self, args_pytest, request): # Start server pargs = [ 'python','main.py', @@ -167,12 +167,10 @@ class TestExecution: '--extra-model-paths-config', 'tests/execution/extra_model_paths.yaml', '--cpu', ] - use_lru, lru_size = request.param - if use_lru: - pargs += ['--cache-lru', str(lru_size)] + pargs += [ str(param) for param in request.param["extra_args"] ] print("Running server with args:", pargs) # noqa: T201 p = subprocess.Popen(pargs) - yield + yield request.param p.kill() torch.cuda.empty_cache() @@ -193,7 +191,7 @@ class TestExecution: return comfy_client @fixture(scope="class", autouse=True) - def shared_client(self, args_pytest, _server): + def shared_client(self, args_pytest, server): client = self.start_client(args_pytest["listen"], args_pytest["port"]) yield client del client @@ -225,7 +223,7 @@ class TestExecution: assert result.did_run(mask) assert result.did_run(lazy_mix) - def test_full_cache(self, client: ComfyClient, builder: GraphBuilder): + def test_full_cache(self, client: ComfyClient, builder: GraphBuilder, server): g = builder input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) input2 = g.node("StubImage", content="NOISE", height=512, width=512, batch_size=1) @@ -237,9 +235,12 @@ class TestExecution: client.run(g) result2 = client.run(g) for node_id, node in g.nodes.items(): - assert not result2.did_run(node), f"Node {node_id} ran, but should have been cached" + if server["should_cache_results"]: + assert not result2.did_run(node), f"Node {node_id} ran, but should have been cached" + else: + assert result2.did_run(node), f"Node {node_id} was cached, but should have been run" - def test_partial_cache(self, client: ComfyClient, builder: GraphBuilder): + def test_partial_cache(self, client: ComfyClient, builder: GraphBuilder, server): g = builder input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1) input2 = g.node("StubImage", content="NOISE", height=512, width=512, batch_size=1) @@ -251,8 +252,12 @@ class TestExecution: client.run(g) mask.inputs['value'] = 0.4 result2 = client.run(g) - assert not result2.did_run(input1), "Input1 should have been cached" - assert not result2.did_run(input2), "Input2 should have been cached" + if server["should_cache_results"]: + assert not result2.did_run(input1), "Input1 should have been cached" + assert not result2.did_run(input2), "Input2 should have been cached" + else: + assert result2.did_run(input1), "Input1 should have been rerun" + assert result2.did_run(input2), "Input2 should have been rerun" def test_error(self, client: ComfyClient, builder: GraphBuilder): g = builder @@ -411,7 +416,7 @@ class TestExecution: input2 = g.node("StubImage", id="removeme", content="WHITE", height=512, width=512, batch_size=1) client.run(g) - def test_custom_is_changed(self, client: ComfyClient, builder: GraphBuilder): + def test_custom_is_changed(self, client: ComfyClient, builder: GraphBuilder, server): g = builder # Creating the nodes in this specific order previously caused a bug save = g.node("SaveImage") @@ -427,7 +432,10 @@ class TestExecution: result3 = client.run(g) result4 = client.run(g) assert result1.did_run(is_changed), "is_changed should have been run" - assert not result2.did_run(is_changed), "is_changed should have been cached" + if server["should_cache_results"]: + assert not result2.did_run(is_changed), "is_changed should have been cached" + else: + assert result2.did_run(is_changed), "is_changed should have been re-run" assert result3.did_run(is_changed), "is_changed should have been re-run" assert result4.did_run(is_changed), "is_changed should not have been cached" @@ -514,7 +522,7 @@ class TestExecution: assert len(images2) == 1, "Should have 1 image" # This tests that only constant outputs are used in the call to `IS_CHANGED` - def test_is_changed_with_outputs(self, client: ComfyClient, builder: GraphBuilder): + def test_is_changed_with_outputs(self, client: ComfyClient, builder: GraphBuilder, server): g = builder input1 = g.node("StubConstantImage", value=0.5, height=512, width=512, batch_size=1) test_node = g.node("TestIsChangedWithConstants", image=input1.out(0), value=0.5) @@ -530,7 +538,11 @@ class TestExecution: images = result.get_images(output) assert len(images) == 1, "Should have 1 image" assert numpy.array(images[0]).min() == 63 and numpy.array(images[0]).max() == 63, "Image should have value 0.25" - assert not result.did_run(test_node), "The execution should have been cached" + if server["should_cache_results"]: + assert not result.did_run(test_node), "The execution should have been cached" + else: + assert result.did_run(test_node), "The execution should have been re-run" + def test_parallel_sleep_nodes(self, client: ComfyClient, builder: GraphBuilder, skip_timing_checks): # Warmup execution to ensure server is fully initialized From a1864c01f29cc43fe6bf823fc3fd46ba2781c2e0 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 22 Oct 2025 14:26:22 -0700 Subject: [PATCH 0785/1073] Small readme improvement. (#10442) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index c9a0644e3..434d4ff06 100644 --- a/README.md +++ b/README.md @@ -201,6 +201,8 @@ Python 3.14 will work if you comment out the `kornia` dependency in the requirem Python 3.13 is very well supported. If you have trouble with some custom node dependencies on 3.13 you can try 3.12 +### Instructions: + Git clone this repo. Put your SD checkpoints (the huge ckpt/safetensors files) in: models/checkpoints From 1bcda6df987a6c92b39d8b6d29e0b029450d67d0 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 23 Oct 2025 18:21:14 -0700 Subject: [PATCH 0786/1073] WIP way to support multi multi dimensional latents. (#10456) --- comfy/model_base.py | 10 ++++- comfy/nested_tensor.py | 91 ++++++++++++++++++++++++++++++++++++++++++ comfy/sample.py | 27 ++++++++++--- comfy/samplers.py | 23 +++++++---- comfy/utils.py | 22 ++++++++++ 5 files changed, 158 insertions(+), 15 deletions(-) create mode 100644 comfy/nested_tensor.py diff --git a/comfy/model_base.py b/comfy/model_base.py index 8274c7dea..e877f19ac 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -197,8 +197,14 @@ class BaseModel(torch.nn.Module): extra_conds[o] = extra t = self.process_timestep(t, x=x, **extra_conds) - model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() - return self.model_sampling.calculate_denoised(sigma, model_output, x) + if "latent_shapes" in extra_conds: + xc = utils.unpack_latents(xc, extra_conds.pop("latent_shapes")) + + model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds) + if len(model_output) > 1 and not torch.is_tensor(model_output): + model_output, _ = utils.pack_latents(model_output) + + return self.model_sampling.calculate_denoised(sigma, model_output.float(), x) def process_timestep(self, timestep, **kwargs): return timestep diff --git a/comfy/nested_tensor.py b/comfy/nested_tensor.py new file mode 100644 index 000000000..b700816fa --- /dev/null +++ b/comfy/nested_tensor.py @@ -0,0 +1,91 @@ +import torch + +class NestedTensor: + def __init__(self, tensors): + self.tensors = list(tensors) + self.is_nested = True + + def _copy(self): + return NestedTensor(self.tensors) + + def apply_operation(self, other, operation): + o = self._copy() + if isinstance(other, NestedTensor): + for i, t in enumerate(o.tensors): + o.tensors[i] = operation(t, other.tensors[i]) + else: + for i, t in enumerate(o.tensors): + o.tensors[i] = operation(t, other) + return o + + def __add__(self, b): + return self.apply_operation(b, lambda x, y: x + y) + + def __sub__(self, b): + return self.apply_operation(b, lambda x, y: x - y) + + def __mul__(self, b): + return self.apply_operation(b, lambda x, y: x * y) + + # def __itruediv__(self, b): + # return self.apply_operation(b, lambda x, y: x / y) + + def __truediv__(self, b): + return self.apply_operation(b, lambda x, y: x / y) + + def __getitem__(self, *args, **kwargs): + return self.apply_operation(None, lambda x, y: x.__getitem__(*args, **kwargs)) + + def unbind(self): + return self.tensors + + def to(self, *args, **kwargs): + o = self._copy() + for i, t in enumerate(o.tensors): + o.tensors[i] = t.to(*args, **kwargs) + return o + + def new_ones(self, *args, **kwargs): + return self.tensors[0].new_ones(*args, **kwargs) + + def float(self): + return self.to(dtype=torch.float) + + def chunk(self, *args, **kwargs): + return self.apply_operation(None, lambda x, y: x.chunk(*args, **kwargs)) + + def size(self): + return self.tensors[0].size() + + @property + def shape(self): + return self.tensors[0].shape + + @property + def ndim(self): + dims = 0 + for t in self.tensors: + dims = max(t.ndim, dims) + return dims + + @property + def device(self): + return self.tensors[0].device + + @property + def dtype(self): + return self.tensors[0].dtype + + @property + def layout(self): + return self.tensors[0].layout + + +def cat_nested(tensors, *args, **kwargs): + cated_tensors = [] + for i in range(len(tensors[0].tensors)): + tens = [] + for j in range(len(tensors)): + tens.append(tensors[j].tensors[i]) + cated_tensors.append(torch.cat(tens, *args, **kwargs)) + return NestedTensor(cated_tensors) diff --git a/comfy/sample.py b/comfy/sample.py index be5a7e246..b1395da84 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -4,13 +4,9 @@ import comfy.samplers import comfy.utils import numpy as np import logging +import comfy.nested_tensor -def prepare_noise(latent_image, seed, noise_inds=None): - """ - creates random noise given a latent image and a seed. - optional arg skip can be used to skip and discard x number of noise generations for a given seed - """ - generator = torch.manual_seed(seed) +def prepare_noise_inner(latent_image, generator, noise_inds=None): if noise_inds is None: return torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu") @@ -22,9 +18,28 @@ def prepare_noise(latent_image, seed, noise_inds=None): noises.append(noise) noises = [noises[i] for i in inverse] noises = torch.cat(noises, axis=0) + +def prepare_noise(latent_image, seed, noise_inds=None): + """ + creates random noise given a latent image and a seed. + optional arg skip can be used to skip and discard x number of noise generations for a given seed + """ + generator = torch.manual_seed(seed) + + if latent_image.is_nested: + tensors = latent_image.unbind() + noises = [] + for t in tensors: + noises.append(prepare_noise_inner(t, generator, noise_inds)) + noises = comfy.nested_tensor.NestedTensor(noises) + else: + noises = prepare_noise_inner(latent_image, generator, noise_inds) + return noises def fix_empty_latent_channels(model, latent_image): + if latent_image.is_nested: + return latent_image latent_format = model.get_model_object("latent_format") #Resize the empty latent image so it has the right number of channels if latent_format.latent_channels != latent_image.shape[1] and torch.count_nonzero(latent_image) == 0: latent_image = comfy.utils.repeat_to_batch_size(latent_image, latent_format.latent_channels, dim=1) diff --git a/comfy/samplers.py b/comfy/samplers.py index e7efaf470..fa4640842 100755 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -782,7 +782,7 @@ def ksampler(sampler_name, extra_options={}, inpaint_options={}): return KSAMPLER(sampler_function, extra_options, inpaint_options) -def process_conds(model, noise, conds, device, latent_image=None, denoise_mask=None, seed=None): +def process_conds(model, noise, conds, device, latent_image=None, denoise_mask=None, seed=None, latent_shapes=None): for k in conds: conds[k] = conds[k][:] resolve_areas_and_cond_masks_multidim(conds[k], noise.shape[2:], device) @@ -792,7 +792,7 @@ def process_conds(model, noise, conds, device, latent_image=None, denoise_mask=N if hasattr(model, 'extra_conds'): for k in conds: - conds[k] = encode_model_conds(model.extra_conds, conds[k], noise, device, k, latent_image=latent_image, denoise_mask=denoise_mask, seed=seed) + conds[k] = encode_model_conds(model.extra_conds, conds[k], noise, device, k, latent_image=latent_image, denoise_mask=denoise_mask, seed=seed, latent_shapes=latent_shapes) #make sure each cond area has an opposite one with the same area for k in conds: @@ -962,11 +962,11 @@ class CFGGuider: def predict_noise(self, x, timestep, model_options={}, seed=None): return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed) - def inner_sample(self, noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed): + def inner_sample(self, noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed, latent_shapes=None): if latent_image is not None and torch.count_nonzero(latent_image) > 0: #Don't shift the empty latent image. latent_image = self.inner_model.process_latent_in(latent_image) - self.conds = process_conds(self.inner_model, noise, self.conds, device, latent_image, denoise_mask, seed) + self.conds = process_conds(self.inner_model, noise, self.conds, device, latent_image, denoise_mask, seed, latent_shapes=latent_shapes) extra_model_options = comfy.model_patcher.create_model_options_clone(self.model_options) extra_model_options.setdefault("transformer_options", {})["sample_sigmas"] = sigmas @@ -980,7 +980,7 @@ class CFGGuider: samples = executor.execute(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) return self.inner_model.process_latent_out(samples.to(torch.float32)) - def outer_sample(self, noise, latent_image, sampler, sigmas, denoise_mask=None, callback=None, disable_pbar=False, seed=None): + def outer_sample(self, noise, latent_image, sampler, sigmas, denoise_mask=None, callback=None, disable_pbar=False, seed=None, latent_shapes=None): self.inner_model, self.conds, self.loaded_models = comfy.sampler_helpers.prepare_sampling(self.model_patcher, noise.shape, self.conds, self.model_options) device = self.model_patcher.load_device @@ -994,7 +994,7 @@ class CFGGuider: try: self.model_patcher.pre_run() - output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) + output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed, latent_shapes=latent_shapes) finally: self.model_patcher.cleanup() @@ -1007,6 +1007,12 @@ class CFGGuider: if sigmas.shape[-1] == 0: return latent_image + if latent_image.is_nested: + latent_image, latent_shapes = comfy.utils.pack_latents(latent_image.unbind()) + noise, _ = comfy.utils.pack_latents(noise.unbind()) + else: + latent_shapes = [latent_image.shape] + self.conds = {} for k in self.original_conds: self.conds[k] = list(map(lambda a: a.copy(), self.original_conds[k])) @@ -1026,7 +1032,7 @@ class CFGGuider: self, comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.OUTER_SAMPLE, self.model_options, is_model_options=True) ) - output = executor.execute(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) + output = executor.execute(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed, latent_shapes=latent_shapes) finally: cast_to_load_options(self.model_options, device=self.model_patcher.offload_device) self.model_options = orig_model_options @@ -1034,6 +1040,9 @@ class CFGGuider: self.model_patcher.restore_hook_patches() del self.conds + + if len(latent_shapes) > 1: + output = comfy.nested_tensor.NestedTensor(comfy.utils.unpack_latents(output, latent_shapes)) return output diff --git a/comfy/utils.py b/comfy/utils.py index 0fd03f165..4bd281057 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -1106,3 +1106,25 @@ def upscale_dit_mask(mask: torch.Tensor, img_size_in, img_size_out): dim=1 ) return out + +def pack_latents(latents): + latent_shapes = [] + tensors = [] + for tensor in latents: + latent_shapes.append(tensor.shape) + tensors.append(tensor.reshape(tensor.shape[0], 1, -1)) + + latent = torch.cat(tensors, dim=-1) + return latent, latent_shapes + +def unpack_latents(combined_latent, latent_shapes): + if len(latent_shapes) > 1: + output_tensors = [] + for shape in latent_shapes: + cut = math.prod(shape[1:]) + tens = combined_latent[:, :, :cut] + combined_latent = combined_latent[:, :, cut:] + output_tensors.append(tens.reshape([tens.shape[0]] + list(shape)[1:])) + else: + output_tensors = combined_latent + return output_tensors From 24188b3141aace272cb91b85578c76f5a8f70e1c Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Fri, 24 Oct 2025 13:36:30 +0800 Subject: [PATCH 0787/1073] Update template to 0.2.2 (#10461) Fix template typo issue --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index dd2afcab0..8570c66b6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.28.7 -comfyui-workflow-templates==0.2.1 +comfyui-workflow-templates==0.2.2 comfyui-embedded-docs==0.3.0 torch torchsde From 388b306a2b48070737b092b51e76de933baee9ad Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 24 Oct 2025 08:37:16 +0300 Subject: [PATCH 0788/1073] feat(api-nodes): network client v2: async ops, cancellation, downloads, refactor (#10390) * feat(api-nodes): implement new API client for V3 nodes * feat(api-nodes): implement new API client for V3 nodes * feat(api-nodes): implement new API client for V3 nodes * converted WAN nodes to use new client; polishing * fix(auth): do not leak authentification for the absolute urls * convert BFL API nodes to use new API client; remove deprecated BFL nodes * converted Google Veo nodes * fix(Veo3.1 model): take into account "generate_audio" parameter --- comfy_api_nodes/apinode_utils.py | 435 +--------- comfy_api_nodes/apis/bfl_api.py | 51 +- comfy_api_nodes/apis/veo_api.py | 111 +++ comfy_api_nodes/nodes_bfl.py | 605 ++++---------- comfy_api_nodes/nodes_bytedance.py | 279 ++----- comfy_api_nodes/nodes_gemini.py | 5 +- comfy_api_nodes/nodes_kling.py | 350 +++----- comfy_api_nodes/nodes_luma.py | 2 +- comfy_api_nodes/nodes_minimax.py | 2 +- comfy_api_nodes/nodes_moonvalley.py | 366 ++------- comfy_api_nodes/nodes_openai.py | 4 +- comfy_api_nodes/nodes_pika.py | 6 +- comfy_api_nodes/nodes_pixverse.py | 13 +- comfy_api_nodes/nodes_recraft.py | 4 +- comfy_api_nodes/nodes_runway.py | 199 ++--- comfy_api_nodes/nodes_sora.py | 74 +- comfy_api_nodes/nodes_stability.py | 8 +- comfy_api_nodes/nodes_veo2.py | 176 ++-- comfy_api_nodes/nodes_vidu.py | 131 +-- comfy_api_nodes/nodes_wan.py | 245 +++--- comfy_api_nodes/util/__init__.py | 87 ++ comfy_api_nodes/util/_helpers.py | 71 ++ comfy_api_nodes/util/client.py | 941 ++++++++++++++++++++++ comfy_api_nodes/util/common_exceptions.py | 14 + comfy_api_nodes/util/conversions.py | 407 ++++++++++ comfy_api_nodes/util/download_helpers.py | 249 ++++++ comfy_api_nodes/util/upload_helpers.py | 338 ++++++++ comfy_api_nodes/util/validation_utils.py | 58 +- pyproject.toml | 2 + 29 files changed, 2935 insertions(+), 2298 deletions(-) create mode 100644 comfy_api_nodes/apis/veo_api.py create mode 100644 comfy_api_nodes/util/_helpers.py create mode 100644 comfy_api_nodes/util/client.py create mode 100644 comfy_api_nodes/util/common_exceptions.py create mode 100644 comfy_api_nodes/util/conversions.py create mode 100644 comfy_api_nodes/util/download_helpers.py create mode 100644 comfy_api_nodes/util/upload_helpers.py diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index bc3d2d07e..e3d282059 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -1,15 +1,10 @@ from __future__ import annotations import aiohttp -import io -import logging import mimetypes -import os from typing import Optional, Union from comfy.utils import common_upscale -from comfy_api.input_impl import VideoFromFile from comfy_api.util import VideoContainer, VideoCodec from comfy_api.input.video_types import VideoInput -from comfy_api.input.basic_types import AudioInput from comfy_api_nodes.apis.client import ( ApiClient, ApiEndpoint, @@ -26,43 +21,8 @@ from PIL import Image import torch import math import base64 -import uuid +from .util import tensor_to_bytesio, bytesio_to_image_tensor from io import BytesIO -import av - - -async def download_url_to_video_output( - video_url: str, timeout: int = None, auth_kwargs: Optional[dict[str, str]] = None -) -> VideoFromFile: - """Downloads a video from a URL and returns a `VIDEO` output. - - Args: - video_url: The URL of the video to download. - - Returns: - A Comfy node `VIDEO` output. - """ - video_io = await download_url_to_bytesio(video_url, timeout, auth_kwargs=auth_kwargs) - if video_io is None: - error_msg = f"Failed to download video from {video_url}" - logging.error(error_msg) - raise ValueError(error_msg) - return VideoFromFile(video_io) - - -def downscale_image_tensor(image, total_pixels=1536 * 1024) -> torch.Tensor: - """Downscale input image tensor to roughly the specified total pixels.""" - samples = image.movedim(-1, 1) - total = int(total_pixels) - scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) - if scale_by >= 1: - return image - width = round(samples.shape[3] * scale_by) - height = round(samples.shape[2] * scale_by) - - s = common_upscale(samples, width, height, "lanczos", "disabled") - s = s.movedim(1, -1) - return s async def validate_and_cast_response( @@ -162,11 +122,6 @@ def validate_aspect_ratio( return aspect_ratio -def mimetype_to_extension(mime_type: str) -> str: - """Converts a MIME type to a file extension.""" - return mime_type.split("/")[-1].lower() - - async def download_url_to_bytesio( url: str, timeout: int = None, auth_kwargs: Optional[dict[str, str]] = None ) -> BytesIO: @@ -195,136 +150,11 @@ async def download_url_to_bytesio( return BytesIO(await resp.read()) -def bytesio_to_image_tensor(image_bytesio: BytesIO, mode: str = "RGBA") -> torch.Tensor: - """Converts image data from BytesIO to a torch.Tensor. - - Args: - image_bytesio: BytesIO object containing the image data. - mode: The PIL mode to convert the image to (e.g., "RGB", "RGBA"). - - Returns: - A torch.Tensor representing the image (1, H, W, C). - - Raises: - PIL.UnidentifiedImageError: If the image data cannot be identified. - ValueError: If the specified mode is invalid. - """ - image = Image.open(image_bytesio) - image = image.convert(mode) - image_array = np.array(image).astype(np.float32) / 255.0 - return torch.from_numpy(image_array).unsqueeze(0) - - -async def download_url_to_image_tensor(url: str, timeout: int = None) -> torch.Tensor: - """Downloads an image from a URL and returns a [B, H, W, C] tensor.""" - image_bytesio = await download_url_to_bytesio(url, timeout) - return bytesio_to_image_tensor(image_bytesio) - - def process_image_response(response_content: bytes | str) -> torch.Tensor: """Uses content from a Response object and converts it to a torch.Tensor""" return bytesio_to_image_tensor(BytesIO(response_content)) -def _tensor_to_pil(image: torch.Tensor, total_pixels: int = 2048 * 2048) -> Image.Image: - """Converts a single torch.Tensor image [H, W, C] to a PIL Image, optionally downscaling.""" - if len(image.shape) > 3: - image = image[0] - # TODO: remove alpha if not allowed and present - input_tensor = image.cpu() - input_tensor = downscale_image_tensor( - input_tensor.unsqueeze(0), total_pixels=total_pixels - ).squeeze() - image_np = (input_tensor.numpy() * 255).astype(np.uint8) - img = Image.fromarray(image_np) - return img - - -def _pil_to_bytesio(img: Image.Image, mime_type: str = "image/png") -> BytesIO: - """Converts a PIL Image to a BytesIO object.""" - if not mime_type: - mime_type = "image/png" - - img_byte_arr = io.BytesIO() - # Derive PIL format from MIME type (e.g., 'image/png' -> 'PNG') - pil_format = mime_type.split("/")[-1].upper() - if pil_format == "JPG": - pil_format = "JPEG" - img.save(img_byte_arr, format=pil_format) - img_byte_arr.seek(0) - return img_byte_arr - - -def tensor_to_bytesio( - image: torch.Tensor, - name: Optional[str] = None, - total_pixels: int = 2048 * 2048, - mime_type: str = "image/png", -) -> BytesIO: - """Converts a torch.Tensor image to a named BytesIO object. - - Args: - image: Input torch.Tensor image. - name: Optional filename for the BytesIO object. - total_pixels: Maximum total pixels for potential downscaling. - mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4'). - - Returns: - Named BytesIO object containing the image data, with pointer set to the start of buffer. - """ - if not mime_type: - mime_type = "image/png" - - pil_image = _tensor_to_pil(image, total_pixels=total_pixels) - img_binary = _pil_to_bytesio(pil_image, mime_type=mime_type) - img_binary.name = ( - f"{name if name else uuid.uuid4()}.{mimetype_to_extension(mime_type)}" - ) - return img_binary - - -def tensor_to_base64_string( - image_tensor: torch.Tensor, - total_pixels: int = 2048 * 2048, - mime_type: str = "image/png", -) -> str: - """Convert [B, H, W, C] or [H, W, C] tensor to a base64 string. - - Args: - image_tensor: Input torch.Tensor image. - total_pixels: Maximum total pixels for potential downscaling. - mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4'). - - Returns: - Base64 encoded string of the image. - """ - pil_image = _tensor_to_pil(image_tensor, total_pixels=total_pixels) - img_byte_arr = _pil_to_bytesio(pil_image, mime_type=mime_type) - img_bytes = img_byte_arr.getvalue() - # Encode bytes to base64 string - base64_encoded_string = base64.b64encode(img_bytes).decode("utf-8") - return base64_encoded_string - - -def tensor_to_data_uri( - image_tensor: torch.Tensor, - total_pixels: int = 2048 * 2048, - mime_type: str = "image/png", -) -> str: - """Converts a tensor image to a Data URI string. - - Args: - image_tensor: Input torch.Tensor image. - total_pixels: Maximum total pixels for potential downscaling. - mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp'). - - Returns: - Data URI string (e.g., 'data:image/png;base64,...'). - """ - base64_string = tensor_to_base64_string(image_tensor, total_pixels, mime_type) - return f"data:{mime_type};base64,{base64_string}" - - def text_filepath_to_base64_string(filepath: str) -> str: """Converts a text file to a base64 string.""" with open(filepath, "rb") as f: @@ -392,7 +222,7 @@ def video_to_base64_string( container_format: Optional container format to use (defaults to video.container if available) codec: Optional codec to use (defaults to video.codec if available) """ - video_bytes_io = io.BytesIO() + video_bytes_io = BytesIO() # Use provided format/codec if specified, otherwise use video's own if available format_to_use = container_format if container_format is not None else getattr(video, 'container', VideoContainer.MP4) @@ -403,214 +233,6 @@ def video_to_base64_string( return base64.b64encode(video_bytes_io.getvalue()).decode("utf-8") -async def upload_video_to_comfyapi( - video: VideoInput, - auth_kwargs: Optional[dict[str, str]] = None, - container: VideoContainer = VideoContainer.MP4, - codec: VideoCodec = VideoCodec.H264, - max_duration: Optional[int] = None, -) -> str: - """ - Uploads a single video to ComfyUI API and returns its download URL. - Uses the specified container and codec for saving the video before upload. - - Args: - video: VideoInput object (Comfy VIDEO type). - auth_kwargs: Optional authentication token(s). - container: The video container format to use (default: MP4). - codec: The video codec to use (default: H264). - max_duration: Optional maximum duration of the video in seconds. If the video is longer than this, an error will be raised. - - Returns: - The download URL for the uploaded video file. - """ - if max_duration is not None: - try: - actual_duration = video.duration_seconds - if actual_duration is not None and actual_duration > max_duration: - raise ValueError( - f"Video duration ({actual_duration:.2f}s) exceeds the maximum allowed ({max_duration}s)." - ) - except Exception as e: - logging.error("Error getting video duration: %s", str(e)) - raise ValueError(f"Could not verify video duration from source: {e}") from e - - upload_mime_type = f"video/{container.value.lower()}" - filename = f"uploaded_video.{container.value.lower()}" - - # Convert VideoInput to BytesIO using specified container/codec - video_bytes_io = io.BytesIO() - video.save_to(video_bytes_io, format=container, codec=codec) - video_bytes_io.seek(0) - - return await upload_file_to_comfyapi(video_bytes_io, filename, upload_mime_type, auth_kwargs) - - -def audio_tensor_to_contiguous_ndarray(waveform: torch.Tensor) -> np.ndarray: - """ - Prepares audio waveform for av library by converting to a contiguous numpy array. - - Args: - waveform: a tensor of shape (1, channels, samples) derived from a Comfy `AUDIO` type. - - Returns: - Contiguous numpy array of the audio waveform. If the audio was batched, - the first item is taken. - """ - if waveform.ndim != 3 or waveform.shape[0] != 1: - raise ValueError("Expected waveform tensor shape (1, channels, samples)") - - # If batch is > 1, take first item - if waveform.shape[0] > 1: - waveform = waveform[0] - - # Prepare for av: remove batch dim, move to CPU, make contiguous, convert to numpy array - audio_data_np = waveform.squeeze(0).cpu().contiguous().numpy() - if audio_data_np.dtype != np.float32: - audio_data_np = audio_data_np.astype(np.float32) - - return audio_data_np - - -def audio_ndarray_to_bytesio( - audio_data_np: np.ndarray, - sample_rate: int, - container_format: str = "mp4", - codec_name: str = "aac", -) -> BytesIO: - """ - Encodes a numpy array of audio data into a BytesIO object. - """ - audio_bytes_io = io.BytesIO() - with av.open(audio_bytes_io, mode="w", format=container_format) as output_container: - audio_stream = output_container.add_stream(codec_name, rate=sample_rate) - frame = av.AudioFrame.from_ndarray( - audio_data_np, - format="fltp", - layout="stereo" if audio_data_np.shape[0] > 1 else "mono", - ) - frame.sample_rate = sample_rate - frame.pts = 0 - - for packet in audio_stream.encode(frame): - output_container.mux(packet) - - # Flush stream - for packet in audio_stream.encode(None): - output_container.mux(packet) - - audio_bytes_io.seek(0) - return audio_bytes_io - - -async def upload_audio_to_comfyapi( - audio: AudioInput, - auth_kwargs: Optional[dict[str, str]] = None, - container_format: str = "mp4", - codec_name: str = "aac", - mime_type: str = "audio/mp4", - filename: str = "uploaded_audio.mp4", -) -> str: - """ - Uploads a single audio input to ComfyUI API and returns its download URL. - Encodes the raw waveform into the specified format before uploading. - - Args: - audio: a Comfy `AUDIO` type (contains waveform tensor and sample_rate) - auth_kwargs: Optional authentication token(s). - - Returns: - The download URL for the uploaded audio file. - """ - sample_rate: int = audio["sample_rate"] - waveform: torch.Tensor = audio["waveform"] - audio_data_np = audio_tensor_to_contiguous_ndarray(waveform) - audio_bytes_io = audio_ndarray_to_bytesio( - audio_data_np, sample_rate, container_format, codec_name - ) - - return await upload_file_to_comfyapi(audio_bytes_io, filename, mime_type, auth_kwargs) - - -def f32_pcm(wav: torch.Tensor) -> torch.Tensor: - """Convert audio to float 32 bits PCM format. Copy-paste from nodes_audio.py file.""" - if wav.dtype.is_floating_point: - return wav - elif wav.dtype == torch.int16: - return wav.float() / (2 ** 15) - elif wav.dtype == torch.int32: - return wav.float() / (2 ** 31) - raise ValueError(f"Unsupported wav dtype: {wav.dtype}") - - -def audio_bytes_to_audio_input(audio_bytes: bytes,) -> dict: - """ - Decode any common audio container from bytes using PyAV and return - a Comfy AUDIO dict: {"waveform": [1, C, T] float32, "sample_rate": int}. - """ - with av.open(io.BytesIO(audio_bytes)) as af: - if not af.streams.audio: - raise ValueError("No audio stream found in response.") - stream = af.streams.audio[0] - - in_sr = int(stream.codec_context.sample_rate) - out_sr = in_sr - - frames: list[torch.Tensor] = [] - n_channels = stream.channels or 1 - - for frame in af.decode(streams=stream.index): - arr = frame.to_ndarray() # shape can be [C, T] or [T, C] or [T] - buf = torch.from_numpy(arr) - if buf.ndim == 1: - buf = buf.unsqueeze(0) # [T] -> [1, T] - elif buf.shape[0] != n_channels and buf.shape[-1] == n_channels: - buf = buf.transpose(0, 1).contiguous() # [T, C] -> [C, T] - elif buf.shape[0] != n_channels: - buf = buf.reshape(-1, n_channels).t().contiguous() # fallback to [C, T] - frames.append(buf) - - if not frames: - raise ValueError("Decoded zero audio frames.") - - wav = torch.cat(frames, dim=1) # [C, T] - wav = f32_pcm(wav) - return {"waveform": wav.unsqueeze(0).contiguous(), "sample_rate": out_sr} - - -def audio_input_to_mp3(audio: AudioInput) -> io.BytesIO: - waveform = audio["waveform"].cpu() - - output_buffer = io.BytesIO() - output_container = av.open(output_buffer, mode='w', format="mp3") - - out_stream = output_container.add_stream("libmp3lame", rate=audio["sample_rate"]) - out_stream.bit_rate = 320000 - - frame = av.AudioFrame.from_ndarray(waveform.movedim(0, 1).reshape(1, -1).float().numpy(), format='flt', layout='mono' if waveform.shape[0] == 1 else 'stereo') - frame.sample_rate = audio["sample_rate"] - frame.pts = 0 - output_container.mux(out_stream.encode(frame)) - output_container.mux(out_stream.encode(None)) - output_container.close() - output_buffer.seek(0) - return output_buffer - - -def audio_to_base64_string( - audio: AudioInput, container_format: str = "mp4", codec_name: str = "aac" -) -> str: - """Converts an audio input to a base64 string.""" - sample_rate: int = audio["sample_rate"] - waveform: torch.Tensor = audio["waveform"] - audio_data_np = audio_tensor_to_contiguous_ndarray(waveform) - audio_bytes_io = audio_ndarray_to_bytesio( - audio_data_np, sample_rate, container_format, codec_name - ) - audio_bytes = audio_bytes_io.getvalue() - return base64.b64encode(audio_bytes).decode("utf-8") - - async def upload_images_to_comfyapi( image: torch.Tensor, max_images=8, @@ -663,56 +285,3 @@ def resize_mask_to_image( if not allow_gradient: mask = (mask > 0.5).float() return mask - - -def validate_string( - string: str, - strip_whitespace=True, - field_name="prompt", - min_length=None, - max_length=None, -): - if string is None: - raise Exception(f"Field '{field_name}' cannot be empty.") - if strip_whitespace: - string = string.strip() - if min_length and len(string) < min_length: - raise Exception( - f"Field '{field_name}' cannot be shorter than {min_length} characters; was {len(string)} characters long." - ) - if max_length and len(string) > max_length: - raise Exception( - f" Field '{field_name} cannot be longer than {max_length} characters; was {len(string)} characters long." - ) - - -def image_tensor_pair_to_batch( - image1: torch.Tensor, image2: torch.Tensor -) -> torch.Tensor: - """ - Converts a pair of image tensors to a batch tensor. - If the images are not the same size, the smaller image is resized to - match the larger image. - """ - if image1.shape[1:] != image2.shape[1:]: - image2 = common_upscale( - image2.movedim(-1, 1), - image1.shape[2], - image1.shape[1], - "bilinear", - "center", - ).movedim(1, -1) - return torch.cat((image1, image2), dim=0) - - -def get_size(path_or_object: Union[str, io.BytesIO]) -> int: - if isinstance(path_or_object, str): - return os.path.getsize(path_or_object) - return len(path_or_object.getvalue()) - - -def validate_container_format_is_mp4(video: VideoInput) -> None: - """Validates video container format is MP4.""" - container_format = video.get_container_format() - if container_format not in ["mp4", "mov,mp4,m4a,3gp,3g2,mj2"]: - raise ValueError(f"Only MP4 container format supported. Got: {container_format}") diff --git a/comfy_api_nodes/apis/bfl_api.py b/comfy_api_nodes/apis/bfl_api.py index 0e90aef7c..0fc8c0607 100644 --- a/comfy_api_nodes/apis/bfl_api.py +++ b/comfy_api_nodes/apis/bfl_api.py @@ -50,44 +50,6 @@ class BFLFluxFillImageRequest(BaseModel): mask: str = Field(None, description='A Base64-encoded string representing the mask of the areas you with to modify.') -class BFLFluxCannyImageRequest(BaseModel): - prompt: str = Field(..., description='Text prompt for image generation') - prompt_upsampling: Optional[bool] = Field( - None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.' - ) - canny_low_threshold: Optional[int] = Field(None, description='Low threshold for Canny edge detection') - canny_high_threshold: Optional[int] = Field(None, description='High threshold for Canny edge detection') - seed: Optional[int] = Field(None, description='The seed value for reproducibility.') - steps: conint(ge=15, le=50) = Field(..., description='Number of steps for the image generation process') - guidance: confloat(ge=1, le=100) = Field(..., description='Guidance strength for the image generation process') - safety_tolerance: Optional[conint(ge=0, le=6)] = Field( - 6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.' - ) - output_format: Optional[BFLOutputFormat] = Field( - BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png'] - ) - control_image: Optional[str] = Field(None, description='Base64 encoded image to use as control input if no preprocessed image is provided') - preprocessed_image: Optional[str] = Field(None, description='Optional pre-processed image that will bypass the control preprocessing step') - - -class BFLFluxDepthImageRequest(BaseModel): - prompt: str = Field(..., description='Text prompt for image generation') - prompt_upsampling: Optional[bool] = Field( - None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.' - ) - seed: Optional[int] = Field(None, description='The seed value for reproducibility.') - steps: conint(ge=15, le=50) = Field(..., description='Number of steps for the image generation process') - guidance: confloat(ge=1, le=100) = Field(..., description='Guidance strength for the image generation process') - safety_tolerance: Optional[conint(ge=0, le=6)] = Field( - 6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.' - ) - output_format: Optional[BFLOutputFormat] = Field( - BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png'] - ) - control_image: Optional[str] = Field(None, description='Base64 encoded image to use as control input if no preprocessed image is provided') - preprocessed_image: Optional[str] = Field(None, description='Optional pre-processed image that will bypass the control preprocessing step') - - class BFLFluxProGenerateRequest(BaseModel): prompt: str = Field(..., description='The text prompt for image generation.') prompt_upsampling: Optional[bool] = Field( @@ -160,15 +122,8 @@ class BFLStatus(str, Enum): error = "Error" -class BFLFluxProStatusResponse(BaseModel): +class BFLFluxStatusResponse(BaseModel): id: str = Field(..., description="The unique identifier for the generation task.") status: BFLStatus = Field(..., description="The status of the task.") - result: Optional[Dict[str, Any]] = Field( - None, description="The result of the task (null if not completed)." - ) - progress: confloat(ge=0.0, le=1.0) = Field( - ..., description="The progress of the task (0.0 to 1.0)." - ) - details: Optional[Dict[str, Any]] = Field( - None, description="Additional details about the task (null if not available)." - ) + result: Optional[Dict[str, Any]] = Field(None, description="The result of the task (null if not completed).") + progress: Optional[float] = Field(None, description="The progress of the task (0.0 to 1.0).", ge=0.0, le=1.0) diff --git a/comfy_api_nodes/apis/veo_api.py b/comfy_api_nodes/apis/veo_api.py new file mode 100644 index 000000000..a55137afb --- /dev/null +++ b/comfy_api_nodes/apis/veo_api.py @@ -0,0 +1,111 @@ +from typing import Optional, Union +from enum import Enum + +from pydantic import BaseModel, Field + + +class Image2(BaseModel): + bytesBase64Encoded: str + gcsUri: Optional[str] = None + mimeType: Optional[str] = None + + +class Image3(BaseModel): + bytesBase64Encoded: Optional[str] = None + gcsUri: str + mimeType: Optional[str] = None + + +class Instance1(BaseModel): + image: Optional[Union[Image2, Image3]] = Field( + None, description='Optional image to guide video generation' + ) + prompt: str = Field(..., description='Text description of the video') + + +class PersonGeneration1(str, Enum): + ALLOW = 'ALLOW' + BLOCK = 'BLOCK' + + +class Parameters1(BaseModel): + aspectRatio: Optional[str] = Field(None, examples=['16:9']) + durationSeconds: Optional[int] = None + enhancePrompt: Optional[bool] = None + generateAudio: Optional[bool] = Field( + None, + description='Generate audio for the video. Only supported by veo 3 models.', + ) + negativePrompt: Optional[str] = None + personGeneration: Optional[PersonGeneration1] = None + sampleCount: Optional[int] = None + seed: Optional[int] = None + storageUri: Optional[str] = Field( + None, description='Optional Cloud Storage URI to upload the video' + ) + + +class VeoGenVidRequest(BaseModel): + instances: Optional[list[Instance1]] = None + parameters: Optional[Parameters1] = None + + +class VeoGenVidResponse(BaseModel): + name: str = Field( + ..., + description='Operation resource name', + examples=[ + 'projects/PROJECT_ID/locations/us-central1/publishers/google/models/MODEL_ID/operations/a1b07c8e-7b5a-4aba-bb34-3e1ccb8afcc8' + ], + ) + + +class VeoGenVidPollRequest(BaseModel): + operationName: str = Field( + ..., + description='Full operation name (from predict response)', + examples=[ + 'projects/PROJECT_ID/locations/us-central1/publishers/google/models/MODEL_ID/operations/OPERATION_ID' + ], + ) + + +class Video(BaseModel): + bytesBase64Encoded: Optional[str] = Field( + None, description='Base64-encoded video content' + ) + gcsUri: Optional[str] = Field(None, description='Cloud Storage URI of the video') + mimeType: Optional[str] = Field(None, description='Video MIME type') + + +class Error1(BaseModel): + code: Optional[int] = Field(None, description='Error code') + message: Optional[str] = Field(None, description='Error message') + + +class Response1(BaseModel): + field_type: Optional[str] = Field( + None, + alias='@type', + examples=[ + 'type.googleapis.com/cloud.ai.large_models.vision.GenerateVideoResponse' + ], + ) + raiMediaFilteredCount: Optional[int] = Field( + None, description='Count of media filtered by responsible AI policies' + ) + raiMediaFilteredReasons: Optional[list[str]] = Field( + None, description='Reasons why media was filtered by responsible AI policies' + ) + videos: Optional[list[Video]] = None + + +class VeoGenVidPollResponse(BaseModel): + done: Optional[bool] = None + error: Optional[Error1] = Field( + None, description='Error details if operation failed' + ) + name: Optional[str] = None + response: Optional[Response1] = Field( + None, description='The actual prediction response if done is true' + ) diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py index b6cc90f05..baa74fd52 100644 --- a/comfy_api_nodes/nodes_bfl.py +++ b/comfy_api_nodes/nodes_bfl.py @@ -1,136 +1,43 @@ -import asyncio -import io from inspect import cleandoc -from typing import Union, Optional +from typing import Optional + +import torch from typing_extensions import override -from comfy_api.latest import ComfyExtension, IO + +from comfy_api.latest import IO, ComfyExtension +from comfy_api_nodes.apinode_utils import ( + resize_mask_to_image, + validate_aspect_ratio, +) from comfy_api_nodes.apis.bfl_api import ( - BFLStatus, BFLFluxExpandImageRequest, BFLFluxFillImageRequest, - BFLFluxCannyImageRequest, - BFLFluxDepthImageRequest, - BFLFluxProGenerateRequest, BFLFluxKontextProGenerateRequest, - BFLFluxProUltraGenerateRequest, + BFLFluxProGenerateRequest, BFLFluxProGenerateResponse, + BFLFluxProUltraGenerateRequest, + BFLFluxStatusResponse, + BFLStatus, ) -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.util import ( ApiEndpoint, - HttpMethod, - SynchronousOperation, -) -from comfy_api_nodes.apinode_utils import ( - downscale_image_tensor, - validate_aspect_ratio, - process_image_response, - resize_mask_to_image, + download_url_to_image_tensor, + poll_op, + sync_op, + tensor_to_base64_string, validate_string, ) -import numpy as np -from PIL import Image -import aiohttp -import torch -import base64 -import time -from server import PromptServer - def convert_mask_to_image(mask: torch.Tensor): """ Make mask have the expected amount of dims (4) and channels (3) to be recognized as an image. """ mask = mask.unsqueeze(-1) - mask = torch.cat([mask]*3, dim=-1) + mask = torch.cat([mask] * 3, dim=-1) return mask -async def handle_bfl_synchronous_operation( - operation: SynchronousOperation, - timeout_bfl_calls=360, - node_id: Union[str, None] = None, -): - response_api: BFLFluxProGenerateResponse = await operation.execute() - return await _poll_until_generated( - response_api.polling_url, timeout=timeout_bfl_calls, node_id=node_id - ) - - -async def _poll_until_generated( - polling_url: str, timeout=360, node_id: Union[str, None] = None -): - # used bfl-comfy-nodes to verify code implementation: - # https://github.com/black-forest-labs/bfl-comfy-nodes/tree/main - start_time = time.time() - retries_404 = 0 - max_retries_404 = 5 - retry_404_seconds = 2 - retry_202_seconds = 2 - retry_pending_seconds = 1 - - async with aiohttp.ClientSession() as session: - # NOTE: should True loop be replaced with checking if workflow has been interrupted? - while True: - if node_id: - time_elapsed = time.time() - start_time - PromptServer.instance.send_progress_text( - f"Generating ({time_elapsed:.0f}s)", node_id - ) - - async with session.get(polling_url) as response: - if response.status == 200: - result = await response.json() - if result["status"] == BFLStatus.ready: - img_url = result["result"]["sample"] - if node_id: - PromptServer.instance.send_progress_text( - f"Result URL: {img_url}", node_id - ) - async with session.get(img_url) as img_resp: - return process_image_response(await img_resp.content.read()) - elif result["status"] in [ - BFLStatus.request_moderated, - BFLStatus.content_moderated, - ]: - status = result["status"] - raise Exception( - f"BFL API did not return an image due to: {status}." - ) - elif result["status"] == BFLStatus.error: - raise Exception(f"BFL API encountered an error: {result}.") - elif result["status"] == BFLStatus.pending: - await asyncio.sleep(retry_pending_seconds) - continue - elif response.status == 404: - if retries_404 < max_retries_404: - retries_404 += 1 - await asyncio.sleep(retry_404_seconds) - continue - raise Exception( - f"BFL API could not find task after {max_retries_404} tries." - ) - elif response.status == 202: - await asyncio.sleep(retry_202_seconds) - elif time.time() - start_time > timeout: - raise Exception( - f"BFL API experienced a timeout; could not return request under {timeout} seconds." - ) - else: - raise Exception(f"BFL API encountered an error: {response.json()}") - -def convert_image_to_base64(image: torch.Tensor): - scaled_image = downscale_image_tensor(image, total_pixels=2048 * 2048) - # remove batch dimension if present - if len(scaled_image.shape) > 3: - scaled_image = scaled_image[0] - image_np = (scaled_image.numpy() * 255).astype(np.uint8) - img = Image.fromarray(image_np) - img_byte_arr = io.BytesIO() - img.save(img_byte_arr, format="PNG") - return base64.b64encode(img_byte_arr.getvalue()).decode() - - class FluxProUltraImageNode(IO.ComfyNode): """ Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution. @@ -158,7 +65,9 @@ class FluxProUltraImageNode(IO.ComfyNode): IO.Boolean.Input( "prompt_upsampling", default=False, - tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", + tooltip="Whether to perform upsampling on the prompt. " + "If active, automatically modifies the prompt for more creative generation, " + "but results are nondeterministic (same seed will not produce exactly the same result).", ), IO.Int.Input( "seed", @@ -220,22 +129,19 @@ class FluxProUltraImageNode(IO.ComfyNode): cls, prompt: str, aspect_ratio: str, - prompt_upsampling=False, - raw=False, - seed=0, - image_prompt=None, - image_prompt_strength=0.1, + prompt_upsampling: bool = False, + raw: bool = False, + seed: int = 0, + image_prompt: Optional[torch.Tensor] = None, + image_prompt_strength: float = 0.1, ) -> IO.NodeOutput: if image_prompt is None: validate_string(prompt, strip_whitespace=False) - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/bfl/flux-pro-1.1-ultra/generate", - method=HttpMethod.POST, - request_model=BFLFluxProUltraGenerateRequest, - response_model=BFLFluxProGenerateResponse, - ), - request=BFLFluxProUltraGenerateRequest( + initial_response = await sync_op( + cls, + ApiEndpoint(path="/proxy/bfl/flux-pro-1.1-ultra/generate", method="POST"), + response_model=BFLFluxProGenerateResponse, + data=BFLFluxProUltraGenerateRequest( prompt=prompt, prompt_upsampling=prompt_upsampling, seed=seed, @@ -247,22 +153,26 @@ class FluxProUltraImageNode(IO.ComfyNode): maximum_ratio_str=cls.MAXIMUM_RATIO_STR, ), raw=raw, - image_prompt=( - image_prompt - if image_prompt is None - else convert_image_to_base64(image_prompt) - ), - image_prompt_strength=( - None if image_prompt is None else round(image_prompt_strength, 2) - ), + image_prompt=(image_prompt if image_prompt is None else tensor_to_base64_string(image_prompt)), + image_prompt_strength=(None if image_prompt is None else round(image_prompt_strength, 2)), ), - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return IO.NodeOutput(output_image) + response = await poll_op( + cls, + ApiEndpoint(initial_response.polling_url), + response_model=BFLFluxStatusResponse, + status_extractor=lambda r: r.status, + progress_extractor=lambda r: r.progress, + completed_statuses=[BFLStatus.ready], + failed_statuses=[ + BFLStatus.request_moderated, + BFLStatus.content_moderated, + BFLStatus.error, + BFLStatus.task_not_found, + ], + queued_statuses=[], + ) + return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"])) class FluxKontextProImageNode(IO.ComfyNode): @@ -347,7 +257,7 @@ class FluxKontextProImageNode(IO.ComfyNode): aspect_ratio: str, guidance: float, steps: int, - input_image: Optional[torch.Tensor]=None, + input_image: Optional[torch.Tensor] = None, seed=0, prompt_upsampling=False, ) -> IO.NodeOutput: @@ -360,33 +270,36 @@ class FluxKontextProImageNode(IO.ComfyNode): ) if input_image is None: validate_string(prompt, strip_whitespace=False) - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=cls.BFL_PATH, - method=HttpMethod.POST, - request_model=BFLFluxKontextProGenerateRequest, - response_model=BFLFluxProGenerateResponse, - ), - request=BFLFluxKontextProGenerateRequest( + initial_response = await sync_op( + cls, + ApiEndpoint(path=cls.BFL_PATH, method="POST"), + response_model=BFLFluxProGenerateResponse, + data=BFLFluxKontextProGenerateRequest( prompt=prompt, prompt_upsampling=prompt_upsampling, guidance=round(guidance, 1), steps=steps, seed=seed, aspect_ratio=aspect_ratio, - input_image=( - input_image - if input_image is None - else convert_image_to_base64(input_image) - ) + input_image=(input_image if input_image is None else tensor_to_base64_string(input_image)), ), - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return IO.NodeOutput(output_image) + response = await poll_op( + cls, + ApiEndpoint(initial_response.polling_url), + response_model=BFLFluxStatusResponse, + status_extractor=lambda r: r.status, + progress_extractor=lambda r: r.progress, + completed_statuses=[BFLStatus.ready], + failed_statuses=[ + BFLStatus.request_moderated, + BFLStatus.content_moderated, + BFLStatus.error, + BFLStatus.task_not_found, + ], + queued_statuses=[], + ) + return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"])) class FluxKontextMaxImageNode(FluxKontextProImageNode): @@ -422,7 +335,9 @@ class FluxProImageNode(IO.ComfyNode): IO.Boolean.Input( "prompt_upsampling", default=False, - tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", + tooltip="Whether to perform upsampling on the prompt. " + "If active, automatically modifies the prompt for more creative generation, " + "but results are nondeterministic (same seed will not produce exactly the same result).", ), IO.Int.Input( "width", @@ -481,20 +396,15 @@ class FluxProImageNode(IO.ComfyNode): image_prompt=None, # image_prompt_strength=0.1, ) -> IO.NodeOutput: - image_prompt = ( - image_prompt - if image_prompt is None - else convert_image_to_base64(image_prompt) - ) - - operation = SynchronousOperation( - endpoint=ApiEndpoint( + image_prompt = image_prompt if image_prompt is None else tensor_to_base64_string(image_prompt) + initial_response = await sync_op( + cls, + ApiEndpoint( path="/proxy/bfl/flux-pro-1.1/generate", - method=HttpMethod.POST, - request_model=BFLFluxProGenerateRequest, - response_model=BFLFluxProGenerateResponse, + method="POST", ), - request=BFLFluxProGenerateRequest( + response_model=BFLFluxProGenerateResponse, + data=BFLFluxProGenerateRequest( prompt=prompt, prompt_upsampling=prompt_upsampling, width=width, @@ -502,13 +412,23 @@ class FluxProImageNode(IO.ComfyNode): seed=seed, image_prompt=image_prompt, ), - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return IO.NodeOutput(output_image) + response = await poll_op( + cls, + ApiEndpoint(initial_response.polling_url), + response_model=BFLFluxStatusResponse, + status_extractor=lambda r: r.status, + progress_extractor=lambda r: r.progress, + completed_statuses=[BFLStatus.ready], + failed_statuses=[ + BFLStatus.request_moderated, + BFLStatus.content_moderated, + BFLStatus.error, + BFLStatus.task_not_found, + ], + queued_statuses=[], + ) + return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"])) class FluxProExpandNode(IO.ComfyNode): @@ -534,7 +454,9 @@ class FluxProExpandNode(IO.ComfyNode): IO.Boolean.Input( "prompt_upsampling", default=False, - tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", + tooltip="Whether to perform upsampling on the prompt. " + "If active, automatically modifies the prompt for more creative generation, " + "but results are nondeterministic (same seed will not produce exactly the same result).", ), IO.Int.Input( "top", @@ -610,16 +532,11 @@ class FluxProExpandNode(IO.ComfyNode): guidance: float, seed=0, ) -> IO.NodeOutput: - image = convert_image_to_base64(image) - - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/bfl/flux-pro-1.0-expand/generate", - method=HttpMethod.POST, - request_model=BFLFluxExpandImageRequest, - response_model=BFLFluxProGenerateResponse, - ), - request=BFLFluxExpandImageRequest( + initial_response = await sync_op( + cls, + ApiEndpoint(path="/proxy/bfl/flux-pro-1.0-expand/generate", method="POST"), + response_model=BFLFluxProGenerateResponse, + data=BFLFluxExpandImageRequest( prompt=prompt, prompt_upsampling=prompt_upsampling, top=top, @@ -629,16 +546,25 @@ class FluxProExpandNode(IO.ComfyNode): steps=steps, guidance=guidance, seed=seed, - image=image, + image=tensor_to_base64_string(image), ), - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return IO.NodeOutput(output_image) - + response = await poll_op( + cls, + ApiEndpoint(initial_response.polling_url), + response_model=BFLFluxStatusResponse, + status_extractor=lambda r: r.status, + progress_extractor=lambda r: r.progress, + completed_statuses=[BFLStatus.ready], + failed_statuses=[ + BFLStatus.request_moderated, + BFLStatus.content_moderated, + BFLStatus.error, + BFLStatus.task_not_found, + ], + queued_statuses=[], + ) + return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"])) class FluxProFillNode(IO.ComfyNode): @@ -665,7 +591,9 @@ class FluxProFillNode(IO.ComfyNode): IO.Boolean.Input( "prompt_upsampling", default=False, - tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", + tooltip="Whether to perform upsampling on the prompt. " + "If active, automatically modifies the prompt for more creative generation, " + "but results are nondeterministic (same seed will not produce exactly the same result).", ), IO.Float.Input( "guidance", @@ -712,272 +640,37 @@ class FluxProFillNode(IO.ComfyNode): ) -> IO.NodeOutput: # prepare mask mask = resize_mask_to_image(mask, image) - mask = convert_image_to_base64(convert_mask_to_image(mask)) - # make sure image will have alpha channel removed - image = convert_image_to_base64(image[:, :, :, :3]) - - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/bfl/flux-pro-1.0-fill/generate", - method=HttpMethod.POST, - request_model=BFLFluxFillImageRequest, - response_model=BFLFluxProGenerateResponse, - ), - request=BFLFluxFillImageRequest( + mask = tensor_to_base64_string(convert_mask_to_image(mask)) + initial_response = await sync_op( + cls, + ApiEndpoint(path="/proxy/bfl/flux-pro-1.0-fill/generate", method="POST"), + response_model=BFLFluxProGenerateResponse, + data=BFLFluxFillImageRequest( prompt=prompt, prompt_upsampling=prompt_upsampling, steps=steps, guidance=guidance, seed=seed, - image=image, + image=tensor_to_base64_string(image[:, :, :, :3]), # make sure image will have alpha channel removed mask=mask, ), - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return IO.NodeOutput(output_image) - - -class FluxProCannyNode(IO.ComfyNode): - """ - Generate image using a control image (canny). - """ - - @classmethod - def define_schema(cls) -> IO.Schema: - return IO.Schema( - node_id="FluxProCannyNode", - display_name="Flux.1 Canny Control Image", - category="api node/image/BFL", - description=cleandoc(cls.__doc__ or ""), - inputs=[ - IO.Image.Input("control_image"), - IO.String.Input( - "prompt", - multiline=True, - default="", - tooltip="Prompt for the image generation", - ), - IO.Boolean.Input( - "prompt_upsampling", - default=False, - tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", - ), - IO.Float.Input( - "canny_low_threshold", - default=0.1, - min=0.01, - max=0.99, - step=0.01, - tooltip="Low threshold for Canny edge detection; ignored if skip_processing is True", - ), - IO.Float.Input( - "canny_high_threshold", - default=0.4, - min=0.01, - max=0.99, - step=0.01, - tooltip="High threshold for Canny edge detection; ignored if skip_processing is True", - ), - IO.Boolean.Input( - "skip_preprocessing", - default=False, - tooltip="Whether to skip preprocessing; set to True if control_image already is canny-fied, False if it is a raw image.", - ), - IO.Float.Input( - "guidance", - default=30, - min=1, - max=100, - tooltip="Guidance strength for the image generation process", - ), - IO.Int.Input( - "steps", - default=50, - min=15, - max=50, - tooltip="Number of steps for the image generation process", - ), - IO.Int.Input( - "seed", - default=0, - min=0, - max=0xFFFFFFFFFFFFFFFF, - control_after_generate=True, - tooltip="The random seed used for creating the noise.", - ), + response = await poll_op( + cls, + ApiEndpoint(initial_response.polling_url), + response_model=BFLFluxStatusResponse, + status_extractor=lambda r: r.status, + progress_extractor=lambda r: r.progress, + completed_statuses=[BFLStatus.ready], + failed_statuses=[ + BFLStatus.request_moderated, + BFLStatus.content_moderated, + BFLStatus.error, + BFLStatus.task_not_found, ], - outputs=[IO.Image.Output()], - hidden=[ - IO.Hidden.auth_token_comfy_org, - IO.Hidden.api_key_comfy_org, - IO.Hidden.unique_id, - ], - is_api_node=True, + queued_statuses=[], ) - - @classmethod - async def execute( - cls, - control_image: torch.Tensor, - prompt: str, - prompt_upsampling: bool, - canny_low_threshold: float, - canny_high_threshold: float, - skip_preprocessing: bool, - steps: int, - guidance: float, - seed=0, - ) -> IO.NodeOutput: - control_image = convert_image_to_base64(control_image[:, :, :, :3]) - preprocessed_image = None - - # scale canny threshold between 0-500, to match BFL's API - def scale_value(value: float, min_val=0, max_val=500): - return min_val + value * (max_val - min_val) - canny_low_threshold = int(round(scale_value(canny_low_threshold))) - canny_high_threshold = int(round(scale_value(canny_high_threshold))) - - - if skip_preprocessing: - preprocessed_image = control_image - control_image = None - canny_low_threshold = None - canny_high_threshold = None - - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/bfl/flux-pro-1.0-canny/generate", - method=HttpMethod.POST, - request_model=BFLFluxCannyImageRequest, - response_model=BFLFluxProGenerateResponse, - ), - request=BFLFluxCannyImageRequest( - prompt=prompt, - prompt_upsampling=prompt_upsampling, - steps=steps, - guidance=guidance, - seed=seed, - control_image=control_image, - canny_low_threshold=canny_low_threshold, - canny_high_threshold=canny_high_threshold, - preprocessed_image=preprocessed_image, - ), - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return IO.NodeOutput(output_image) - - -class FluxProDepthNode(IO.ComfyNode): - """ - Generate image using a control image (depth). - """ - - @classmethod - def define_schema(cls) -> IO.Schema: - return IO.Schema( - node_id="FluxProDepthNode", - display_name="Flux.1 Depth Control Image", - category="api node/image/BFL", - description=cleandoc(cls.__doc__ or ""), - inputs=[ - IO.Image.Input("control_image"), - IO.String.Input( - "prompt", - multiline=True, - default="", - tooltip="Prompt for the image generation", - ), - IO.Boolean.Input( - "prompt_upsampling", - default=False, - tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", - ), - IO.Boolean.Input( - "skip_preprocessing", - default=False, - tooltip="Whether to skip preprocessing; set to True if control_image already is depth-ified, False if it is a raw image.", - ), - IO.Float.Input( - "guidance", - default=15, - min=1, - max=100, - tooltip="Guidance strength for the image generation process", - ), - IO.Int.Input( - "steps", - default=50, - min=15, - max=50, - tooltip="Number of steps for the image generation process", - ), - IO.Int.Input( - "seed", - default=0, - min=0, - max=0xFFFFFFFFFFFFFFFF, - control_after_generate=True, - tooltip="The random seed used for creating the noise.", - ), - ], - outputs=[IO.Image.Output()], - hidden=[ - IO.Hidden.auth_token_comfy_org, - IO.Hidden.api_key_comfy_org, - IO.Hidden.unique_id, - ], - is_api_node=True, - ) - - @classmethod - async def execute( - cls, - control_image: torch.Tensor, - prompt: str, - prompt_upsampling: bool, - skip_preprocessing: bool, - steps: int, - guidance: float, - seed=0, - ) -> IO.NodeOutput: - control_image = convert_image_to_base64(control_image[:,:,:,:3]) - preprocessed_image = None - - if skip_preprocessing: - preprocessed_image = control_image - control_image = None - - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/bfl/flux-pro-1.0-depth/generate", - method=HttpMethod.POST, - request_model=BFLFluxDepthImageRequest, - response_model=BFLFluxProGenerateResponse, - ), - request=BFLFluxDepthImageRequest( - prompt=prompt, - prompt_upsampling=prompt_upsampling, - steps=steps, - guidance=guidance, - seed=seed, - control_image=control_image, - preprocessed_image=preprocessed_image, - ), - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - ) - output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) - return IO.NodeOutput(output_image) + return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"])) class BFLExtension(ComfyExtension): @@ -990,8 +683,6 @@ class BFLExtension(ComfyExtension): FluxKontextMaxImageNode, FluxProExpandNode, FluxProFillNode, - FluxProCannyNode, - FluxProDepthNode, ] diff --git a/comfy_api_nodes/nodes_bytedance.py b/comfy_api_nodes/nodes_bytedance.py index f3d3f8d3e..534af380d 100644 --- a/comfy_api_nodes/nodes_bytedance.py +++ b/comfy_api_nodes/nodes_bytedance.py @@ -1,35 +1,27 @@ import logging import math from enum import Enum -from typing import Literal, Optional, Type, Union -from typing_extensions import override +from typing import Literal, Optional, Union import torch from pydantic import BaseModel, Field +from typing_extensions import override -from comfy_api.latest import ComfyExtension, IO -from comfy_api_nodes.util.validation_utils import ( - validate_image_aspect_ratio_range, - get_number_of_images, - validate_image_dimensions, -) -from comfy_api_nodes.apis.client import ( +from comfy_api.latest import IO, ComfyExtension +from comfy_api_nodes.util import ( ApiEndpoint, - EmptyRequest, - HttpMethod, - SynchronousOperation, - PollingOperation, - T, -) -from comfy_api_nodes.apinode_utils import ( download_url_to_image_tensor, download_url_to_video_output, - upload_images_to_comfyapi, - validate_string, + get_number_of_images, image_tensor_pair_to_batch, + poll_op, + sync_op, + upload_images_to_comfyapi, + validate_image_aspect_ratio_range, + validate_image_dimensions, + validate_string, ) - BYTEPLUS_IMAGE_ENDPOINT = "/proxy/byteplus/api/v3/images/generations" # Long-running tasks endpoints(e.g., video) @@ -46,13 +38,14 @@ class Image2ImageModelName(str, Enum): class Text2VideoModelName(str, Enum): - seedance_1_pro = "seedance-1-0-pro-250528" + seedance_1_pro = "seedance-1-0-pro-250528" seedance_1_lite = "seedance-1-0-lite-t2v-250428" class Image2VideoModelName(str, Enum): """note(August 31): Pro model only supports FirstFrame: https://docs.byteplus.com/en/docs/ModelArk/1520757""" - seedance_1_pro = "seedance-1-0-pro-250528" + + seedance_1_pro = "seedance-1-0-pro-250528" seedance_1_lite = "seedance-1-0-lite-i2v-250428" @@ -208,35 +201,6 @@ def get_video_url_from_task_status(response: TaskStatusResponse) -> Union[str, N return None -async def poll_until_finished( - auth_kwargs: dict[str, str], - task_id: str, - estimated_duration: Optional[int] = None, - node_id: Optional[str] = None, -) -> TaskStatusResponse: - """Polls the ByteDance API endpoint until the task reaches a terminal state, then returns the response.""" - return await PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"{BYTEPLUS_TASK_STATUS_ENDPOINT}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=TaskStatusResponse, - ), - completed_statuses=[ - "succeeded", - ], - failed_statuses=[ - "cancelled", - "failed", - ], - status_extractor=lambda response: response.status, - auth_kwargs=auth_kwargs, - result_url_extractor=get_video_url_from_task_status, - estimated_duration=estimated_duration, - node_id=node_id, - ).execute() - - class ByteDanceImageNode(IO.ComfyNode): @classmethod @@ -303,7 +267,7 @@ class ByteDanceImageNode(IO.ComfyNode): IO.Boolean.Input( "watermark", default=True, - tooltip="Whether to add an \"AI generated\" watermark to the image", + tooltip='Whether to add an "AI generated" watermark to the image', optional=True, ), ], @@ -341,8 +305,7 @@ class ByteDanceImageNode(IO.ComfyNode): w, h = width, height if not (512 <= w <= 2048) or not (512 <= h <= 2048): raise ValueError( - f"Custom size out of range: {w}x{h}. " - "Both width and height must be between 512 and 2048 pixels." + f"Custom size out of range: {w}x{h}. " "Both width and height must be between 512 and 2048 pixels." ) payload = Text2ImageTaskCreationRequest( @@ -353,20 +316,12 @@ class ByteDanceImageNode(IO.ComfyNode): guidance_scale=guidance_scale, watermark=watermark, ) - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - response = await SynchronousOperation( - endpoint=ApiEndpoint( - path=BYTEPLUS_IMAGE_ENDPOINT, - method=HttpMethod.POST, - request_model=Text2ImageTaskCreationRequest, - response_model=ImageTaskCreationResponse, - ), - request=payload, - auth_kwargs=auth_kwargs, - ).execute() + response = await sync_op( + cls, + ApiEndpoint(path=BYTEPLUS_IMAGE_ENDPOINT, method="POST"), + data=payload, + response_model=ImageTaskCreationResponse, + ) return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) @@ -420,7 +375,7 @@ class ByteDanceImageEditNode(IO.ComfyNode): IO.Boolean.Input( "watermark", default=True, - tooltip="Whether to add an \"AI generated\" watermark to the image", + tooltip='Whether to add an "AI generated" watermark to the image', optional=True, ), ], @@ -449,16 +404,7 @@ class ByteDanceImageEditNode(IO.ComfyNode): if get_number_of_images(image) != 1: raise ValueError("Exactly one input image is required.") validate_image_aspect_ratio_range(image, (1, 3), (3, 1)) - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - source_url = (await upload_images_to_comfyapi( - image, - max_images=1, - mime_type="image/png", - auth_kwargs=auth_kwargs, - ))[0] + source_url = (await upload_images_to_comfyapi(cls, image, max_images=1, mime_type="image/png"))[0] payload = Image2ImageTaskCreationRequest( model=model, prompt=prompt, @@ -467,16 +413,12 @@ class ByteDanceImageEditNode(IO.ComfyNode): guidance_scale=guidance_scale, watermark=watermark, ) - response = await SynchronousOperation( - endpoint=ApiEndpoint( - path=BYTEPLUS_IMAGE_ENDPOINT, - method=HttpMethod.POST, - request_model=Image2ImageTaskCreationRequest, - response_model=ImageTaskCreationResponse, - ), - request=payload, - auth_kwargs=auth_kwargs, - ).execute() + response = await sync_op( + cls, + ApiEndpoint(path=BYTEPLUS_IMAGE_ENDPOINT, method="POST"), + data=payload, + response_model=ImageTaskCreationResponse, + ) return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) @@ -504,7 +446,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode): IO.Image.Input( "image", tooltip="Input image(s) for image-to-image generation. " - "List of 1-10 images for single or multi-reference generation.", + "List of 1-10 images for single or multi-reference generation.", optional=True, ), IO.Combo.Input( @@ -534,9 +476,9 @@ class ByteDanceSeedreamNode(IO.ComfyNode): "sequential_image_generation", options=["disabled", "auto"], tooltip="Group image generation mode. " - "'disabled' generates a single image. " - "'auto' lets the model decide whether to generate multiple related images " - "(e.g., story scenes, character variations).", + "'disabled' generates a single image. " + "'auto' lets the model decide whether to generate multiple related images " + "(e.g., story scenes, character variations).", optional=True, ), IO.Int.Input( @@ -547,7 +489,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode): step=1, display_mode=IO.NumberDisplay.number, tooltip="Maximum number of images to generate when sequential_image_generation='auto'. " - "Total images (input + generated) cannot exceed 15.", + "Total images (input + generated) cannot exceed 15.", optional=True, ), IO.Int.Input( @@ -564,7 +506,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode): IO.Boolean.Input( "watermark", default=True, - tooltip="Whether to add an \"AI generated\" watermark to the image.", + tooltip='Whether to add an "AI generated" watermark to the image.', optional=True, ), IO.Boolean.Input( @@ -611,8 +553,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode): w, h = width, height if not (1024 <= w <= 4096) or not (1024 <= h <= 4096): raise ValueError( - f"Custom size out of range: {w}x{h}. " - "Both width and height must be between 1024 and 4096 pixels." + f"Custom size out of range: {w}x{h}. " "Both width and height must be between 1024 and 4096 pixels." ) n_input_images = get_number_of_images(image) if image is not None else 0 if n_input_images > 10: @@ -621,41 +562,31 @@ class ByteDanceSeedreamNode(IO.ComfyNode): raise ValueError( "The maximum number of generated images plus the number of reference images cannot exceed 15." ) - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } reference_images_urls = [] if n_input_images: for i in image: validate_image_aspect_ratio_range(i, (1, 3), (3, 1)) - reference_images_urls = (await upload_images_to_comfyapi( + reference_images_urls = await upload_images_to_comfyapi( + cls, image, max_images=n_input_images, mime_type="image/png", - auth_kwargs=auth_kwargs, - )) - payload = Seedream4TaskCreationRequest( - model=model, - prompt=prompt, - image=reference_images_urls, - size=f"{w}x{h}", - seed=seed, - sequential_image_generation=sequential_image_generation, - sequential_image_generation_options=Seedream4Options(max_images=max_images), - watermark=watermark, - ) - response = await SynchronousOperation( - endpoint=ApiEndpoint( - path=BYTEPLUS_IMAGE_ENDPOINT, - method=HttpMethod.POST, - request_model=Seedream4TaskCreationRequest, - response_model=ImageTaskCreationResponse, + ) + response = await sync_op( + cls, + ApiEndpoint(path=BYTEPLUS_IMAGE_ENDPOINT, method="POST"), + response_model=ImageTaskCreationResponse, + data=Seedream4TaskCreationRequest( + model=model, + prompt=prompt, + image=reference_images_urls, + size=f"{w}x{h}", + seed=seed, + sequential_image_generation=sequential_image_generation, + sequential_image_generation_options=Seedream4Options(max_images=max_images), + watermark=watermark, ), - request=payload, - auth_kwargs=auth_kwargs, - ).execute() - + ) if len(response.data) == 1: return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) urls = [str(d["url"]) for d in response.data if isinstance(d, dict) and "url" in d] @@ -719,13 +650,13 @@ class ByteDanceTextToVideoNode(IO.ComfyNode): "camera_fixed", default=False, tooltip="Specifies whether to fix the camera. The platform appends an instruction " - "to fix the camera to your prompt, but does not guarantee the actual effect.", + "to fix the camera to your prompt, but does not guarantee the actual effect.", optional=True, ), IO.Boolean.Input( "watermark", default=True, - tooltip="Whether to add an \"AI generated\" watermark to the video.", + tooltip='Whether to add an "AI generated" watermark to the video.', optional=True, ), ], @@ -764,19 +695,9 @@ class ByteDanceTextToVideoNode(IO.ComfyNode): f"--camerafixed {str(camera_fixed).lower()} " f"--watermark {str(watermark).lower()}" ) - - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } return await process_video_task( - request_model=Text2VideoTaskCreationRequest, - payload=Text2VideoTaskCreationRequest( - model=model, - content=[TaskTextContent(text=prompt)], - ), - auth_kwargs=auth_kwargs, - node_id=cls.hidden.unique_id, + cls, + payload=Text2VideoTaskCreationRequest(model=model, content=[TaskTextContent(text=prompt)]), estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))), ) @@ -840,13 +761,13 @@ class ByteDanceImageToVideoNode(IO.ComfyNode): "camera_fixed", default=False, tooltip="Specifies whether to fix the camera. The platform appends an instruction " - "to fix the camera to your prompt, but does not guarantee the actual effect.", + "to fix the camera to your prompt, but does not guarantee the actual effect.", optional=True, ), IO.Boolean.Input( "watermark", default=True, - tooltip="Whether to add an \"AI generated\" watermark to the video.", + tooltip='Whether to add an "AI generated" watermark to the video.', optional=True, ), ], @@ -879,13 +800,7 @@ class ByteDanceImageToVideoNode(IO.ComfyNode): validate_image_dimensions(image, min_width=300, min_height=300, max_width=6000, max_height=6000) validate_image_aspect_ratio_range(image, (2, 5), (5, 2), strict=False) # 0.4 to 2.5 - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - - image_url = (await upload_images_to_comfyapi(image, max_images=1, auth_kwargs=auth_kwargs))[0] - + image_url = (await upload_images_to_comfyapi(cls, image, max_images=1))[0] prompt = ( f"{prompt} " f"--resolution {resolution} " @@ -897,13 +812,11 @@ class ByteDanceImageToVideoNode(IO.ComfyNode): ) return await process_video_task( - request_model=Image2VideoTaskCreationRequest, + cls, payload=Image2VideoTaskCreationRequest( model=model, content=[TaskTextContent(text=prompt), TaskImageContent(image_url=TaskImageContentUrl(url=image_url))], ), - auth_kwargs=auth_kwargs, - node_id=cls.hidden.unique_id, estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))), ) @@ -971,13 +884,13 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode): "camera_fixed", default=False, tooltip="Specifies whether to fix the camera. The platform appends an instruction " - "to fix the camera to your prompt, but does not guarantee the actual effect.", + "to fix the camera to your prompt, but does not guarantee the actual effect.", optional=True, ), IO.Boolean.Input( "watermark", default=True, - tooltip="Whether to add an \"AI generated\" watermark to the video.", + tooltip='Whether to add an "AI generated" watermark to the video.', optional=True, ), ], @@ -1012,16 +925,11 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode): validate_image_dimensions(i, min_width=300, min_height=300, max_width=6000, max_height=6000) validate_image_aspect_ratio_range(i, (2, 5), (5, 2), strict=False) # 0.4 to 2.5 - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - download_urls = await upload_images_to_comfyapi( + cls, image_tensor_pair_to_batch(first_frame, last_frame), max_images=2, mime_type="image/png", - auth_kwargs=auth_kwargs, ) prompt = ( @@ -1035,7 +943,7 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode): ) return await process_video_task( - request_model=Image2VideoTaskCreationRequest, + cls, payload=Image2VideoTaskCreationRequest( model=model, content=[ @@ -1044,8 +952,6 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode): TaskImageContent(image_url=TaskImageContentUrl(url=str(download_urls[1])), role="last_frame"), ], ), - auth_kwargs=auth_kwargs, - node_id=cls.hidden.unique_id, estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))), ) @@ -1108,7 +1014,7 @@ class ByteDanceImageReferenceNode(IO.ComfyNode): IO.Boolean.Input( "watermark", default=True, - tooltip="Whether to add an \"AI generated\" watermark to the video.", + tooltip='Whether to add an "AI generated" watermark to the video.', optional=True, ), ], @@ -1141,15 +1047,7 @@ class ByteDanceImageReferenceNode(IO.ComfyNode): validate_image_dimensions(image, min_width=300, min_height=300, max_width=6000, max_height=6000) validate_image_aspect_ratio_range(image, (2, 5), (5, 2), strict=False) # 0.4 to 2.5 - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - - image_urls = await upload_images_to_comfyapi( - images, max_images=4, mime_type="image/png", auth_kwargs=auth_kwargs - ) - + image_urls = await upload_images_to_comfyapi(cls, images, max_images=4, mime_type="image/png") prompt = ( f"{prompt} " f"--resolution {resolution} " @@ -1160,42 +1058,32 @@ class ByteDanceImageReferenceNode(IO.ComfyNode): ) x = [ TaskTextContent(text=prompt), - *[TaskImageContent(image_url=TaskImageContentUrl(url=str(i)), role="reference_image") for i in image_urls] + *[TaskImageContent(image_url=TaskImageContentUrl(url=str(i)), role="reference_image") for i in image_urls], ] return await process_video_task( - request_model=Image2VideoTaskCreationRequest, - payload=Image2VideoTaskCreationRequest( - model=model, - content=x, - ), - auth_kwargs=auth_kwargs, - node_id=cls.hidden.unique_id, + cls, + payload=Image2VideoTaskCreationRequest(model=model, content=x), estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))), ) async def process_video_task( - request_model: Type[T], + cls: type[IO.ComfyNode], payload: Union[Text2VideoTaskCreationRequest, Image2VideoTaskCreationRequest], - auth_kwargs: dict, - node_id: str, estimated_duration: Optional[int], ) -> IO.NodeOutput: - initial_response = await SynchronousOperation( - endpoint=ApiEndpoint( - path=BYTEPLUS_TASK_ENDPOINT, - method=HttpMethod.POST, - request_model=request_model, - response_model=TaskCreationResponse, - ), - request=payload, - auth_kwargs=auth_kwargs, - ).execute() - response = await poll_until_finished( - auth_kwargs, - initial_response.id, + initial_response = await sync_op( + cls, + ApiEndpoint(path=BYTEPLUS_TASK_ENDPOINT, method="POST"), + data=payload, + response_model=TaskCreationResponse, + ) + response = await poll_op( + cls, + ApiEndpoint(path=f"{BYTEPLUS_TASK_STATUS_ENDPOINT}/{initial_response.id}"), + status_extractor=lambda r: r.status, estimated_duration=estimated_duration, - node_id=node_id, + response_model=TaskStatusResponse, ) return IO.NodeOutput(await download_url_to_video_output(get_video_url_from_task_status(response))) @@ -1221,5 +1109,6 @@ class ByteDanceExtension(ComfyExtension): ByteDanceImageReferenceNode, ] + async def comfy_entrypoint() -> ByteDanceExtension: return ByteDanceExtension() diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index c1941cbe9..ca11b67ed 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -33,12 +33,9 @@ from comfy_api_nodes.apis.client import ( SynchronousOperation, ) from comfy_api_nodes.apinode_utils import ( - validate_string, - audio_to_base64_string, video_to_base64_string, - tensor_to_base64_string, - bytesio_to_image_tensor, ) +from comfy_api_nodes.util import validate_string, tensor_to_base64_string, bytesio_to_image_tensor, audio_to_base64_string from comfy_api.util import VideoContainer, VideoCodec diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 67c8307c5..eea65c9ac 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -5,8 +5,7 @@ For source of truth on the allowed permutations of request fields, please refere """ from __future__ import annotations -from typing import Optional, TypeVar, Any -from collections.abc import Callable +from typing import Optional, TypeVar import math import logging @@ -15,7 +14,6 @@ from typing_extensions import override import torch from comfy_api_nodes.apis import ( - KlingTaskStatus, KlingCameraControl, KlingCameraConfig, KlingCameraControlType, @@ -52,26 +50,20 @@ from comfy_api_nodes.apis import ( KlingCharacterEffectModelName, KlingSingleImageEffectModelName, ) -from comfy_api_nodes.apis.client import ( - ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, - EmptyRequest, -) -from comfy_api_nodes.apinode_utils import ( - tensor_to_base64_string, - download_url_to_video_output, - upload_video_to_comfyapi, - upload_audio_to_comfyapi, - download_url_to_image_tensor, - validate_string, -) -from comfy_api_nodes.util.validation_utils import ( +from comfy_api_nodes.util import ( validate_image_dimensions, validate_image_aspect_ratio, validate_video_dimensions, validate_video_duration, + tensor_to_base64_string, + validate_string, + upload_audio_to_comfyapi, + download_url_to_image_tensor, + upload_video_to_comfyapi, + download_url_to_video_output, + sync_op, + ApiEndpoint, + poll_op, ) from comfy_api.input_impl import VideoFromFile from comfy_api.input.basic_types import AudioInput @@ -214,34 +206,6 @@ VOICES_CONFIG = { } -async def poll_until_finished( - auth_kwargs: dict[str, str], - api_endpoint: ApiEndpoint[Any, R], - result_url_extractor: Optional[Callable[[R], str]] = None, - estimated_duration: Optional[int] = None, - node_id: Optional[str] = None, -) -> R: - """Polls the Kling API endpoint until the task reaches a terminal state, then returns the response.""" - return await PollingOperation( - poll_endpoint=api_endpoint, - completed_statuses=[ - KlingTaskStatus.succeed.value, - ], - failed_statuses=[KlingTaskStatus.failed.value], - status_extractor=lambda response: ( - response.data.task_status.value - if response.data and response.data.task_status - else None - ), - auth_kwargs=auth_kwargs, - result_url_extractor=result_url_extractor, - estimated_duration=estimated_duration, - node_id=node_id, - poll_interval=16.0, - max_poll_attempts=256, - ).execute() - - def is_valid_camera_control_configs(configs: list[float]) -> bool: """Verifies that at least one camera control configuration is non-zero.""" return any(not math.isclose(value, 0.0) for value in configs) @@ -377,8 +341,7 @@ async def image_result_to_node_output( async def execute_text2video( - auth_kwargs: dict[str, str], - node_id: str, + cls: type[IO.ComfyNode], prompt: str, negative_prompt: str, cfg_scale: float, @@ -389,14 +352,11 @@ async def execute_text2video( camera_control: Optional[KlingCameraControl] = None, ) -> IO.NodeOutput: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_TEXT_TO_VIDEO, - method=HttpMethod.POST, - request_model=KlingText2VideoRequest, - response_model=KlingText2VideoResponse, - ), - request=KlingText2VideoRequest( + task_creation_response = await sync_op( + cls, + ApiEndpoint(path=PATH_TEXT_TO_VIDEO, method="POST"), + response_model=KlingText2VideoResponse, + data=KlingText2VideoRequest( prompt=prompt if prompt else None, negative_prompt=negative_prompt if negative_prompt else None, duration=KlingVideoGenDuration(duration), @@ -406,24 +366,17 @@ async def execute_text2video( aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio), camera_control=camera_control, ), - auth_kwargs=auth_kwargs, ) - task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_TEXT_TO_VIDEO}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=KlingText2VideoResponse, - ), - result_url_extractor=get_video_url_from_response, + final_response = await poll_op( + cls, + ApiEndpoint(path=f"{PATH_TEXT_TO_VIDEO}/{task_id}"), + response_model=KlingText2VideoResponse, estimated_duration=AVERAGE_DURATION_T2V, - node_id=node_id, + status_extractor=lambda r: (r.data.task_status.value if r.data and r.data.task_status else None), ) validate_video_result_response(final_response) @@ -432,8 +385,7 @@ async def execute_text2video( async def execute_image2video( - auth_kwargs: dict[str, str], - node_id: str, + cls: type[IO.ComfyNode], start_frame: torch.Tensor, prompt: str, negative_prompt: str, @@ -455,14 +407,11 @@ async def execute_image2video( if model_mode == "std" and model_name == KlingVideoGenModelName.kling_v2_5_turbo.value: model_mode = "pro" # October 5: currently "std" mode is not supported for this model - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_IMAGE_TO_VIDEO, - method=HttpMethod.POST, - request_model=KlingImage2VideoRequest, - response_model=KlingImage2VideoResponse, - ), - request=KlingImage2VideoRequest( + task_creation_response = await sync_op( + cls, + ApiEndpoint(path=PATH_IMAGE_TO_VIDEO, method="POST"), + response_model=KlingImage2VideoResponse, + data=KlingImage2VideoRequest( model_name=KlingVideoGenModelName(model_name), image=tensor_to_base64_string(start_frame), image_tail=( @@ -477,24 +426,17 @@ async def execute_image2video( duration=KlingVideoGenDuration(duration), camera_control=camera_control, ), - auth_kwargs=auth_kwargs, ) - task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_IMAGE_TO_VIDEO}/{task_id}", - method=HttpMethod.GET, - request_model=KlingImage2VideoRequest, - response_model=KlingImage2VideoResponse, - ), - result_url_extractor=get_video_url_from_response, + final_response = await poll_op( + cls, + ApiEndpoint(path=f"{PATH_IMAGE_TO_VIDEO}/{task_id}"), + response_model=KlingImage2VideoResponse, estimated_duration=AVERAGE_DURATION_I2V, - node_id=node_id, + status_extractor=lambda r: (r.data.task_status.value if r.data and r.data.task_status else None), ) validate_video_result_response(final_response) @@ -503,8 +445,7 @@ async def execute_image2video( async def execute_video_effect( - auth_kwargs: dict[str, str], - node_id: str, + cls: type[IO.ComfyNode], dual_character: bool, effect_scene: KlingDualCharacterEffectsScene | KlingSingleImageEffectsScene, model_name: str, @@ -530,35 +471,25 @@ async def execute_video_effect( duration=duration, ) - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_VIDEO_EFFECTS, - method=HttpMethod.POST, - request_model=KlingVideoEffectsRequest, - response_model=KlingVideoEffectsResponse, - ), - request=KlingVideoEffectsRequest( + task_creation_response = await sync_op( + cls, + endpoint=ApiEndpoint(path=PATH_VIDEO_EFFECTS, method="POST"), + response_model=KlingVideoEffectsResponse, + data=KlingVideoEffectsRequest( effect_scene=effect_scene, input=request_input_field, ), - auth_kwargs=auth_kwargs, ) - task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_VIDEO_EFFECTS}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=KlingVideoEffectsResponse, - ), - result_url_extractor=get_video_url_from_response, + final_response = await poll_op( + cls, + ApiEndpoint(path=f"{PATH_VIDEO_EFFECTS}/{task_id}"), + response_model=KlingVideoEffectsResponse, estimated_duration=AVERAGE_DURATION_VIDEO_EFFECTS, - node_id=node_id, + status_extractor=lambda r: (r.data.task_status.value if r.data and r.data.task_status else None), ) validate_video_result_response(final_response) @@ -567,8 +498,7 @@ async def execute_video_effect( async def execute_lipsync( - auth_kwargs: dict[str, str], - node_id: str, + cls: type[IO.ComfyNode], video: VideoInput, audio: Optional[AudioInput] = None, voice_language: Optional[str] = None, @@ -583,24 +513,21 @@ async def execute_lipsync( validate_video_duration(video, 2, 10) # Upload video to Comfy API and get download URL - video_url = await upload_video_to_comfyapi(video, auth_kwargs=auth_kwargs) + video_url = await upload_video_to_comfyapi(cls, video) logging.info("Uploaded video to Comfy API. URL: %s", video_url) # Upload the audio file to Comfy API and get download URL if audio: - audio_url = await upload_audio_to_comfyapi(audio, auth_kwargs=auth_kwargs) + audio_url = await upload_audio_to_comfyapi(cls, audio) logging.info("Uploaded audio to Comfy API. URL: %s", audio_url) else: audio_url = None - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_LIP_SYNC, - method=HttpMethod.POST, - request_model=KlingLipSyncRequest, - response_model=KlingLipSyncResponse, - ), - request=KlingLipSyncRequest( + task_creation_response = await sync_op( + cls, + ApiEndpoint(PATH_LIP_SYNC, "POST"), + response_model=KlingLipSyncResponse, + data=KlingLipSyncRequest( input=KlingLipSyncInputObject( video_url=video_url, mode=model_mode, @@ -612,24 +539,17 @@ async def execute_lipsync( voice_id=voice_id, ), ), - auth_kwargs=auth_kwargs, ) - task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_LIP_SYNC}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=KlingLipSyncResponse, - ), - result_url_extractor=get_video_url_from_response, + final_response = await poll_op( + cls, + ApiEndpoint(path=f"{PATH_LIP_SYNC}/{task_id}"), + response_model=KlingLipSyncResponse, estimated_duration=AVERAGE_DURATION_LIP_SYNC, - node_id=node_id, + status_extractor=lambda r: (r.data.task_status.value if r.data and r.data.task_status else None), ) validate_video_result_response(final_response) @@ -807,11 +727,7 @@ class KlingTextToVideoNode(IO.ComfyNode): ) -> IO.NodeOutput: model_mode, duration, model_name = MODE_TEXT2VIDEO[mode] return await execute_text2video( - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, + cls, prompt=prompt, negative_prompt=negative_prompt, cfg_scale=cfg_scale, @@ -872,11 +788,7 @@ class KlingCameraControlT2VNode(IO.ComfyNode): camera_control: Optional[KlingCameraControl] = None, ) -> IO.NodeOutput: return await execute_text2video( - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, + cls, model_name=KlingVideoGenModelName.kling_v1, cfg_scale=cfg_scale, model_mode=KlingVideoGenMode.std, @@ -944,11 +856,7 @@ class KlingImage2VideoNode(IO.ComfyNode): end_frame: Optional[torch.Tensor] = None, ) -> IO.NodeOutput: return await execute_image2video( - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, + cls, start_frame=start_frame, prompt=prompt, negative_prompt=negative_prompt, @@ -1017,11 +925,7 @@ class KlingCameraControlI2VNode(IO.ComfyNode): camera_control: KlingCameraControl, ) -> IO.NodeOutput: return await execute_image2video( - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, + cls, model_name=KlingVideoGenModelName.kling_v1_5, start_frame=start_frame, cfg_scale=cfg_scale, @@ -1097,11 +1001,7 @@ class KlingStartEndFrameNode(IO.ComfyNode): ) -> IO.NodeOutput: mode, duration, model_name = MODE_START_END_FRAME[mode] return await execute_image2video( - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, + cls, prompt=prompt, negative_prompt=negative_prompt, model_name=model_name, @@ -1162,41 +1062,27 @@ class KlingVideoExtendNode(IO.ComfyNode): video_id: str, ) -> IO.NodeOutput: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_VIDEO_EXTEND, - method=HttpMethod.POST, - request_model=KlingVideoExtendRequest, - response_model=KlingVideoExtendResponse, - ), - request=KlingVideoExtendRequest( + task_creation_response = await sync_op( + cls, + ApiEndpoint(path=PATH_VIDEO_EXTEND, method="POST"), + response_model=KlingVideoExtendResponse, + data=KlingVideoExtendRequest( prompt=prompt if prompt else None, negative_prompt=negative_prompt if negative_prompt else None, cfg_scale=cfg_scale, video_id=video_id, ), - auth_kwargs=auth, ) - task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = await poll_until_finished( - auth, - ApiEndpoint( - path=f"{PATH_VIDEO_EXTEND}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=KlingVideoExtendResponse, - ), - result_url_extractor=get_video_url_from_response, + final_response = await poll_op( + cls, + ApiEndpoint(path=f"{PATH_VIDEO_EXTEND}/{task_id}"), + response_model=KlingVideoExtendResponse, estimated_duration=AVERAGE_DURATION_VIDEO_EXTEND, - node_id=cls.hidden.unique_id, + status_extractor=lambda r: (r.data.task_status.value if r.data and r.data.task_status else None), ) validate_video_result_response(final_response) @@ -1259,11 +1145,7 @@ class KlingDualCharacterVideoEffectNode(IO.ComfyNode): duration: KlingVideoGenDuration, ) -> IO.NodeOutput: video, _, duration = await execute_video_effect( - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, + cls, dual_character=True, effect_scene=effect_scene, model_name=model_name, @@ -1324,11 +1206,7 @@ class KlingSingleImageVideoEffectNode(IO.ComfyNode): return IO.NodeOutput( *( await execute_video_effect( - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, + cls, dual_character=False, effect_scene=effect_scene, model_name=model_name, @@ -1379,11 +1257,7 @@ class KlingLipSyncAudioToVideoNode(IO.ComfyNode): voice_language: str, ) -> IO.NodeOutput: return await execute_lipsync( - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, + cls, video=video, audio=audio, voice_language=voice_language, @@ -1445,11 +1319,7 @@ class KlingLipSyncTextToVideoNode(IO.ComfyNode): ) -> IO.NodeOutput: voice_id, voice_language = VOICES_CONFIG[voice] return await execute_lipsync( - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, + cls, video=video, text=text, voice_language=voice_language, @@ -1496,40 +1366,26 @@ class KlingVirtualTryOnNode(IO.ComfyNode): cloth_image: torch.Tensor, model_name: KlingVirtualTryOnModelName, ) -> IO.NodeOutput: - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_VIRTUAL_TRY_ON, - method=HttpMethod.POST, - request_model=KlingVirtualTryOnRequest, - response_model=KlingVirtualTryOnResponse, - ), - request=KlingVirtualTryOnRequest( + task_creation_response = await sync_op( + cls, + ApiEndpoint(path=PATH_VIRTUAL_TRY_ON, method="POST"), + response_model=KlingVirtualTryOnResponse, + data=KlingVirtualTryOnRequest( human_image=tensor_to_base64_string(human_image), cloth_image=tensor_to_base64_string(cloth_image), model_name=model_name, ), - auth_kwargs=auth, ) - task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = await poll_until_finished( - auth, - ApiEndpoint( - path=f"{PATH_VIRTUAL_TRY_ON}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=KlingVirtualTryOnResponse, - ), - result_url_extractor=get_images_urls_from_response, + final_response = await poll_op( + cls, + ApiEndpoint(path=f"{PATH_VIRTUAL_TRY_ON}/{task_id}"), + response_model=KlingVirtualTryOnResponse, estimated_duration=AVERAGE_DURATION_VIRTUAL_TRY_ON, - node_id=cls.hidden.unique_id, + status_extractor=lambda r: (r.data.task_status.value if r.data and r.data.task_status else None), ) validate_image_result_response(final_response) @@ -1625,18 +1481,11 @@ class KlingImageGenerationNode(IO.ComfyNode): else: image = tensor_to_base64_string(image) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_IMAGE_GENERATIONS, - method=HttpMethod.POST, - request_model=KlingImageGenerationsRequest, - response_model=KlingImageGenerationsResponse, - ), - request=KlingImageGenerationsRequest( + task_creation_response = await sync_op( + cls, + ApiEndpoint(path=PATH_IMAGE_GENERATIONS, method="POST"), + response_model=KlingImageGenerationsResponse, + data=KlingImageGenerationsRequest( model_name=model_name, prompt=prompt, negative_prompt=negative_prompt, @@ -1647,24 +1496,17 @@ class KlingImageGenerationNode(IO.ComfyNode): n=n, aspect_ratio=aspect_ratio, ), - auth_kwargs=auth, ) - task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) task_id = task_creation_response.data.task_id - final_response = await poll_until_finished( - auth, - ApiEndpoint( - path=f"{PATH_IMAGE_GENERATIONS}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=KlingImageGenerationsResponse, - ), - result_url_extractor=get_images_urls_from_response, + final_response = await poll_op( + cls, + ApiEndpoint(path=f"{PATH_IMAGE_GENERATIONS}/{task_id}"), + response_model=KlingImageGenerationsResponse, estimated_duration=AVERAGE_DURATION_IMAGE_GEN, - node_id=cls.hidden.unique_id, + status_extractor=lambda r: (r.data.task_status.value if r.data and r.data.task_status else None), ) validate_image_result_response(final_response) diff --git a/comfy_api_nodes/nodes_luma.py b/comfy_api_nodes/nodes_luma.py index 610d95a77..e74441e5e 100644 --- a/comfy_api_nodes/nodes_luma.py +++ b/comfy_api_nodes/nodes_luma.py @@ -35,9 +35,9 @@ from comfy_api_nodes.apis.client import ( from comfy_api_nodes.apinode_utils import ( upload_images_to_comfyapi, process_image_response, - validate_string, ) from server import PromptServer +from comfy_api_nodes.util import validate_string import aiohttp import torch diff --git a/comfy_api_nodes/nodes_minimax.py b/comfy_api_nodes/nodes_minimax.py index 23be1ae65..e3722e79b 100644 --- a/comfy_api_nodes/nodes_minimax.py +++ b/comfy_api_nodes/nodes_minimax.py @@ -24,8 +24,8 @@ from comfy_api_nodes.apis.client import ( from comfy_api_nodes.apinode_utils import ( download_url_to_bytesio, upload_images_to_comfyapi, - validate_string, ) +from comfy_api_nodes.util import validate_string from server import PromptServer diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 7566188dd..7c31d95b3 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -1,35 +1,31 @@ import logging -from typing import Any, Callable, Optional, TypeVar +from typing import Optional + import torch from typing_extensions import override -from comfy_api_nodes.util.validation_utils import validate_image_dimensions +from comfy_api.input import VideoInput +from comfy_api.latest import IO, ComfyExtension from comfy_api_nodes.apis import ( - MoonvalleyTextToVideoRequest, + MoonvalleyPromptResponse, MoonvalleyTextToVideoInferenceParams, + MoonvalleyTextToVideoRequest, MoonvalleyVideoToVideoInferenceParams, MoonvalleyVideoToVideoRequest, - MoonvalleyPromptResponse, ) -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.util import ( ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, - EmptyRequest, -) -from comfy_api_nodes.apinode_utils import ( download_url_to_video_output, + poll_op, + sync_op, + trim_video, upload_images_to_comfyapi, upload_video_to_comfyapi, validate_container_format_is_mp4, + validate_image_dimensions, + validate_string, ) -from comfy_api.input import VideoInput -from comfy_api.latest import ComfyExtension, InputImpl, IO -import av -import io - API_UPLOADS_ENDPOINT = "/proxy/moonvalley/uploads" API_PROMPTS_ENDPOINT = "/proxy/moonvalley/prompts" API_VIDEO2VIDEO_ENDPOINT = "/proxy/moonvalley/prompts/video-to-video" @@ -51,13 +47,6 @@ MAX_VID_HEIGHT = 10000 MAX_VIDEO_SIZE = 1024 * 1024 * 1024 # 1 GB max for in-memory video processing MOONVALLEY_MAREY_MAX_PROMPT_LENGTH = 5000 -R = TypeVar("R") - - -class MoonvalleyApiError(Exception): - """Base exception for Moonvalley API errors.""" - - pass def is_valid_task_creation_response(response: MoonvalleyPromptResponse) -> bool: @@ -69,64 +58,7 @@ def validate_task_creation_response(response) -> None: if not is_valid_task_creation_response(response): error_msg = f"Moonvalley Marey API: Initial request failed. Code: {response.code}, Message: {response.message}, Data: {response}" logging.error(error_msg) - raise MoonvalleyApiError(error_msg) - - -def get_video_from_response(response): - video = response.output_url - logging.info( - "Moonvalley Marey API: Task %s succeeded. Video URL: %s", response.id, video - ) - return video - - -def get_video_url_from_response(response) -> Optional[str]: - """Returns the first video url from the Moonvalley video generation task result. - Will not raise an error if the response is not valid. - """ - if response: - return str(get_video_from_response(response)) - else: - return None - - -async def poll_until_finished( - auth_kwargs: dict[str, str], - api_endpoint: ApiEndpoint[Any, R], - result_url_extractor: Optional[Callable[[R], str]] = None, - node_id: Optional[str] = None, -) -> R: - """Polls the Moonvalley API endpoint until the task reaches a terminal state, then returns the response.""" - return await PollingOperation( - poll_endpoint=api_endpoint, - completed_statuses=[ - "completed", - ], - max_poll_attempts=240, # 64 minutes with 16s interval - poll_interval=16.0, - failed_statuses=["error"], - status_extractor=lambda response: ( - response.status if response and response.status else None - ), - auth_kwargs=auth_kwargs, - result_url_extractor=result_url_extractor, - node_id=node_id, - ).execute() - - -def validate_prompts( - prompt: str, negative_prompt: str, max_length=MOONVALLEY_MAREY_MAX_PROMPT_LENGTH -): - """Verifies that the prompt isn't empty and that neither prompt is too long.""" - if not prompt: - raise ValueError("Positive prompt is empty") - if len(prompt) > max_length: - raise ValueError(f"Positive prompt is too long: {len(prompt)} characters") - if negative_prompt and len(negative_prompt) > max_length: - raise ValueError( - f"Negative prompt is too long: {len(negative_prompt)} characters" - ) - return True + raise RuntimeError(error_msg) def validate_video_to_video_input(video: VideoInput) -> VideoInput: @@ -170,12 +102,8 @@ def _validate_video_dimensions(width: int, height: int) -> None: } if (width, height) not in supported_resolutions: - supported_list = ", ".join( - [f"{w}x{h}" for w, h in sorted(supported_resolutions)] - ) - raise ValueError( - f"Resolution {width}x{height} not supported. Supported: {supported_list}" - ) + supported_list = ", ".join([f"{w}x{h}" for w, h in sorted(supported_resolutions)]) + raise ValueError(f"Resolution {width}x{height} not supported. Supported: {supported_list}") def _validate_and_trim_duration(video: VideoInput) -> VideoInput: @@ -188,7 +116,7 @@ def _validate_and_trim_duration(video: VideoInput) -> VideoInput: def _validate_minimum_duration(duration: float) -> None: """Ensures video is at least 5 seconds long.""" if duration < 5: - raise MoonvalleyApiError("Input video must be at least 5 seconds long.") + raise ValueError("Input video must be at least 5 seconds long.") def _trim_if_too_long(video: VideoInput, duration: float) -> VideoInput: @@ -198,123 +126,6 @@ def _trim_if_too_long(video: VideoInput, duration: float) -> VideoInput: return video -def trim_video(video: VideoInput, duration_sec: float) -> VideoInput: - """ - Returns a new VideoInput object trimmed from the beginning to the specified duration, - using av to avoid loading entire video into memory. - - Args: - video: Input video to trim - duration_sec: Duration in seconds to keep from the beginning - - Returns: - VideoFromFile object that owns the output buffer - """ - output_buffer = io.BytesIO() - - input_container = None - output_container = None - - try: - # Get the stream source - this avoids loading entire video into memory - # when the source is already a file path - input_source = video.get_stream_source() - - # Open containers - input_container = av.open(input_source, mode="r") - output_container = av.open(output_buffer, mode="w", format="mp4") - - # Set up output streams for re-encoding - video_stream = None - audio_stream = None - - for stream in input_container.streams: - logging.info("Found stream: type=%s, class=%s", stream.type, type(stream)) - if isinstance(stream, av.VideoStream): - # Create output video stream with same parameters - video_stream = output_container.add_stream( - "h264", rate=stream.average_rate - ) - video_stream.width = stream.width - video_stream.height = stream.height - video_stream.pix_fmt = "yuv420p" - logging.info( - "Added video stream: %sx%s @ %sfps", stream.width, stream.height, stream.average_rate - ) - elif isinstance(stream, av.AudioStream): - # Create output audio stream with same parameters - audio_stream = output_container.add_stream( - "aac", rate=stream.sample_rate - ) - audio_stream.sample_rate = stream.sample_rate - audio_stream.layout = stream.layout - logging.info("Added audio stream: %sHz, %s channels", stream.sample_rate, stream.channels) - - # Calculate target frame count that's divisible by 16 - fps = input_container.streams.video[0].average_rate - estimated_frames = int(duration_sec * fps) - target_frames = ( - estimated_frames // 16 - ) * 16 # Round down to nearest multiple of 16 - - if target_frames == 0: - raise ValueError("Video too short: need at least 16 frames for Moonvalley") - - frame_count = 0 - audio_frame_count = 0 - - # Decode and re-encode video frames - if video_stream: - for frame in input_container.decode(video=0): - if frame_count >= target_frames: - break - - # Re-encode frame - for packet in video_stream.encode(frame): - output_container.mux(packet) - frame_count += 1 - - # Flush encoder - for packet in video_stream.encode(): - output_container.mux(packet) - - logging.info("Encoded %s video frames (target: %s)", frame_count, target_frames) - - # Decode and re-encode audio frames - if audio_stream: - input_container.seek(0) # Reset to beginning for audio - for frame in input_container.decode(audio=0): - if frame.time >= duration_sec: - break - - # Re-encode frame - for packet in audio_stream.encode(frame): - output_container.mux(packet) - audio_frame_count += 1 - - # Flush encoder - for packet in audio_stream.encode(): - output_container.mux(packet) - - logging.info("Encoded %s audio frames", audio_frame_count) - - # Close containers - output_container.close() - input_container.close() - - # Return as VideoFromFile using the buffer - output_buffer.seek(0) - return InputImpl.VideoFromFile(output_buffer) - - except Exception as e: - # Clean up on error - if input_container is not None: - input_container.close() - if output_container is not None: - output_container.close() - raise RuntimeError(f"Failed to trim video: {str(e)}") from e - - def parse_width_height_from_res(resolution: str): # Accepts a string like "16:9 (1920 x 1080)" and returns width, height as a dict res_map = { @@ -338,19 +149,14 @@ def parse_control_parameter(value): return control_map.get(value, control_map["Motion Transfer"]) -async def get_response( - task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None -) -> MoonvalleyPromptResponse: - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{API_PROMPTS_ENDPOINT}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=MoonvalleyPromptResponse, - ), - result_url_extractor=get_video_url_from_response, - node_id=node_id, +async def get_response(cls: type[IO.ComfyNode], task_id: str) -> MoonvalleyPromptResponse: + return await poll_op( + cls, + ApiEndpoint(path=f"{API_PROMPTS_ENDPOINT}/{task_id}"), + response_model=MoonvalleyPromptResponse, + status_extractor=lambda r: (r.status if r and r.status else None), + poll_interval=16.0, + max_poll_attempts=240, ) @@ -444,14 +250,10 @@ class MoonvalleyImg2VideoNode(IO.ComfyNode): steps: int, ) -> IO.NodeOutput: validate_image_dimensions(image, min_width=300, min_height=300, max_height=MAX_HEIGHT, max_width=MAX_WIDTH) - validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) + validate_string(prompt, min_length=1, max_length=MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) + validate_string(negative_prompt, field_name="negative_prompt", max_length=MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) width_height = parse_width_height_from_res(resolution) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - inference_params = MoonvalleyTextToVideoInferenceParams( negative_prompt=negative_prompt, steps=steps, @@ -464,33 +266,17 @@ class MoonvalleyImg2VideoNode(IO.ComfyNode): # Get MIME type from tensor - assuming PNG format for image tensors mime_type = "image/png" - - image_url = ( - await upload_images_to_comfyapi( - image, max_images=1, auth_kwargs=auth, mime_type=mime_type - ) - )[0] - - request = MoonvalleyTextToVideoRequest( - image_url=image_url, prompt_text=prompt, inference_params=inference_params - ) - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=API_IMG2VIDEO_ENDPOINT, - method=HttpMethod.POST, - request_model=MoonvalleyTextToVideoRequest, - response_model=MoonvalleyPromptResponse, + image_url = (await upload_images_to_comfyapi(cls, image, max_images=1, mime_type=mime_type))[0] + task_creation_response = await sync_op( + cls, + endpoint=ApiEndpoint(path=API_IMG2VIDEO_ENDPOINT, method="POST"), + response_model=MoonvalleyPromptResponse, + data=MoonvalleyTextToVideoRequest( + image_url=image_url, prompt_text=prompt, inference_params=inference_params ), - request=request, - auth_kwargs=auth, ) - task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) - task_id = task_creation_response.id - - final_response = await get_response( - task_id, auth_kwargs=auth, node_id=cls.hidden.unique_id - ) + final_response = await get_response(cls, task_creation_response.id) video = await download_url_to_video_output(final_response.output_url) return IO.NodeOutput(video) @@ -582,15 +368,10 @@ class MoonvalleyVideo2VideoNode(IO.ComfyNode): steps=33, prompt_adherence=4.5, ) -> IO.NodeOutput: - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - validated_video = validate_video_to_video_input(video) - video_url = await upload_video_to_comfyapi(validated_video, auth_kwargs=auth) - - validate_prompts(prompt, negative_prompt) + video_url = await upload_video_to_comfyapi(cls, validated_video) + validate_string(prompt, min_length=1, max_length=MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) + validate_string(negative_prompt, field_name="negative_prompt", max_length=MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) # Only include motion_intensity for Motion Transfer control_params = {} @@ -605,35 +386,20 @@ class MoonvalleyVideo2VideoNode(IO.ComfyNode): guidance_scale=prompt_adherence, ) - control = parse_control_parameter(control_type) - - request = MoonvalleyVideoToVideoRequest( - control_type=control, - video_url=video_url, - prompt_text=prompt, - inference_params=inference_params, - ) - - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=API_VIDEO2VIDEO_ENDPOINT, - method=HttpMethod.POST, - request_model=MoonvalleyVideoToVideoRequest, - response_model=MoonvalleyPromptResponse, + task_creation_response = await sync_op( + cls, + endpoint=ApiEndpoint(path=API_VIDEO2VIDEO_ENDPOINT, method="POST"), + response_model=MoonvalleyPromptResponse, + data=MoonvalleyVideoToVideoRequest( + control_type=parse_control_parameter(control_type), + video_url=video_url, + prompt_text=prompt, + inference_params=inference_params, ), - request=request, - auth_kwargs=auth, ) - task_creation_response = await initial_operation.execute() validate_task_creation_response(task_creation_response) - task_id = task_creation_response.id - - final_response = await get_response( - task_id, auth_kwargs=auth, node_id=cls.hidden.unique_id - ) - - video = await download_url_to_video_output(final_response.output_url) - return IO.NodeOutput(video) + final_response = await get_response(cls, task_creation_response.id) + return IO.NodeOutput(await download_url_to_video_output(final_response.output_url)) class MoonvalleyTxt2VideoNode(IO.ComfyNode): @@ -720,14 +486,10 @@ class MoonvalleyTxt2VideoNode(IO.ComfyNode): seed: int, steps: int, ) -> IO.NodeOutput: - validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) + validate_string(prompt, min_length=1, max_length=MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) + validate_string(negative_prompt, field_name="negative_prompt", max_length=MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) width_height = parse_width_height_from_res(resolution) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - inference_params = MoonvalleyTextToVideoInferenceParams( negative_prompt=negative_prompt, steps=steps, @@ -737,30 +499,16 @@ class MoonvalleyTxt2VideoNode(IO.ComfyNode): width=width_height["width"], height=width_height["height"], ) - request = MoonvalleyTextToVideoRequest( - prompt_text=prompt, inference_params=inference_params - ) - init_op = SynchronousOperation( - endpoint=ApiEndpoint( - path=API_TXT2VIDEO_ENDPOINT, - method=HttpMethod.POST, - request_model=MoonvalleyTextToVideoRequest, - response_model=MoonvalleyPromptResponse, - ), - request=request, - auth_kwargs=auth, + task_creation_response = await sync_op( + cls, + endpoint=ApiEndpoint(path=API_TXT2VIDEO_ENDPOINT, method="POST"), + response_model=MoonvalleyPromptResponse, + data=MoonvalleyTextToVideoRequest(prompt_text=prompt, inference_params=inference_params), ) - task_creation_response = await init_op.execute() validate_task_creation_response(task_creation_response) - task_id = task_creation_response.id - - final_response = await get_response( - task_id, auth_kwargs=auth, node_id=cls.hidden.unique_id - ) - - video = await download_url_to_video_output(final_response.output_url) - return IO.NodeOutput(video) + final_response = await get_response(cls, task_creation_response.id) + return IO.NodeOutput(await download_url_to_video_output(final_response.output_url)) class MoonvalleyExtension(ComfyExtension): diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index e3b81de75..c467e840c 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -43,13 +43,11 @@ from comfy_api_nodes.apis.client import ( ) from comfy_api_nodes.apinode_utils import ( - downscale_image_tensor, validate_and_cast_response, - validate_string, - tensor_to_base64_string, text_filepath_to_data_uri, ) from comfy_api_nodes.mapper_utils import model_field_to_node_input +from comfy_api_nodes.util import downscale_image_tensor, validate_string, tensor_to_base64_string RESPONSES_ENDPOINT = "/proxy/openai/v1/responses" diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py index 27cb0067b..5bb406a3b 100644 --- a/comfy_api_nodes/nodes_pika.py +++ b/comfy_api_nodes/nodes_pika.py @@ -14,11 +14,6 @@ import torch from typing_extensions import override from comfy_api.latest import ComfyExtension, IO from comfy_api.input_impl.video_types import VideoCodec, VideoContainer, VideoInput -from comfy_api_nodes.apinode_utils import ( - download_url_to_video_output, - tensor_to_bytesio, - validate_string, -) from comfy_api_nodes.apis import pika_defs from comfy_api_nodes.apis.client import ( ApiEndpoint, @@ -27,6 +22,7 @@ from comfy_api_nodes.apis.client import ( PollingOperation, SynchronousOperation, ) +from comfy_api_nodes.util import validate_string, download_url_to_video_output, tensor_to_bytesio R = TypeVar("R") diff --git a/comfy_api_nodes/nodes_pixverse.py b/comfy_api_nodes/nodes_pixverse.py index 438a7f80b..b2b841be8 100644 --- a/comfy_api_nodes/nodes_pixverse.py +++ b/comfy_api_nodes/nodes_pixverse.py @@ -24,10 +24,7 @@ from comfy_api_nodes.apis.client import ( PollingOperation, EmptyRequest, ) -from comfy_api_nodes.apinode_utils import ( - tensor_to_bytesio, - validate_string, -) +from comfy_api_nodes.util import validate_string, tensor_to_bytesio from comfy_api.input_impl import VideoFromFile from comfy_api.latest import ComfyExtension, IO @@ -50,7 +47,6 @@ def get_video_url_from_response( async def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None): # first, upload image to Pixverse and get image id to use in actual generation call - files = {"image": tensor_to_bytesio(image)} operation = SynchronousOperation( endpoint=ApiEndpoint( path="/proxy/pixverse/image/upload", @@ -59,16 +55,14 @@ async def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None): response_model=PixverseImageUploadResponse, ), request=EmptyRequest(), - files=files, + files={"image": tensor_to_bytesio(image)}, content_type="multipart/form-data", auth_kwargs=auth_kwargs, ) response_upload: PixverseImageUploadResponse = await operation.execute() if response_upload.Resp is None: - raise Exception( - f"PixVerse image upload request failed: '{response_upload.ErrMsg}'" - ) + raise Exception(f"PixVerse image upload request failed: '{response_upload.ErrMsg}'") return response_upload.Resp.img_id @@ -95,7 +89,6 @@ class PixverseTemplateNode(IO.ComfyNode): template_id = pixverse_templates.get(template, None) if template_id is None: raise Exception(f"Template '{template}' is not recognized.") - # just return the integer return IO.NodeOutput(template_id) diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py index 8beed5675..8ee7e55c4 100644 --- a/comfy_api_nodes/nodes_recraft.py +++ b/comfy_api_nodes/nodes_recraft.py @@ -24,12 +24,10 @@ from comfy_api_nodes.apis.client import ( EmptyRequest, ) from comfy_api_nodes.apinode_utils import ( - bytesio_to_image_tensor, download_url_to_bytesio, - tensor_to_bytesio, resize_mask_to_image, - validate_string, ) +from comfy_api_nodes.util import validate_string, tensor_to_bytesio, bytesio_to_image_tensor from server import PromptServer import torch diff --git a/comfy_api_nodes/nodes_runway.py b/comfy_api_nodes/nodes_runway.py index eb03a897d..0543d1d0e 100644 --- a/comfy_api_nodes/nodes_runway.py +++ b/comfy_api_nodes/nodes_runway.py @@ -11,7 +11,7 @@ User Guides: """ -from typing import Union, Optional, Any +from typing import Union, Optional from typing_extensions import override from enum import Enum @@ -21,7 +21,6 @@ from comfy_api_nodes.apis import ( RunwayImageToVideoRequest, RunwayImageToVideoResponse, RunwayTaskStatusResponse as TaskStatusResponse, - RunwayTaskStatusEnum as TaskStatus, RunwayModelEnum as Model, RunwayDurationEnum as Duration, RunwayAspectRatioEnum as AspectRatio, @@ -33,23 +32,20 @@ from comfy_api_nodes.apis import ( ReferenceImage, RunwayTextToImageAspectRatioEnum, ) -from comfy_api_nodes.apis.client import ( - ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, - EmptyRequest, -) -from comfy_api_nodes.apinode_utils import ( - upload_images_to_comfyapi, - download_url_to_video_output, +from comfy_api_nodes.util import ( image_tensor_pair_to_batch, validate_string, + validate_image_dimensions, + validate_image_aspect_ratio, + upload_images_to_comfyapi, + download_url_to_video_output, download_url_to_image_tensor, + ApiEndpoint, + sync_op, + poll_op, ) from comfy_api.input_impl import VideoFromFile from comfy_api.latest import ComfyExtension, IO -from comfy_api_nodes.util.validation_utils import validate_image_dimensions, validate_image_aspect_ratio PATH_IMAGE_TO_VIDEO = "/proxy/runway/image_to_video" PATH_TEXT_TO_IMAGE = "/proxy/runway/text_to_image" @@ -91,31 +87,6 @@ def get_video_url_from_task_status(response: TaskStatusResponse) -> Union[str, N return None -async def poll_until_finished( - auth_kwargs: dict[str, str], - api_endpoint: ApiEndpoint[Any, TaskStatusResponse], - estimated_duration: Optional[int] = None, - node_id: Optional[str] = None, -) -> TaskStatusResponse: - """Polls the Runway API endpoint until the task reaches a terminal state, then returns the response.""" - return await PollingOperation( - poll_endpoint=api_endpoint, - completed_statuses=[ - TaskStatus.SUCCEEDED.value, - ], - failed_statuses=[ - TaskStatus.FAILED.value, - TaskStatus.CANCELLED.value, - ], - status_extractor=lambda response: response.status.value, - auth_kwargs=auth_kwargs, - result_url_extractor=get_video_url_from_task_status, - estimated_duration=estimated_duration, - node_id=node_id, - progress_extractor=extract_progress_from_task_status, - ).execute() - - def extract_progress_from_task_status( response: TaskStatusResponse, ) -> Union[float, None]: @@ -132,42 +103,32 @@ def get_image_url_from_task_status(response: TaskStatusResponse) -> Union[str, N async def get_response( - task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None, estimated_duration: Optional[int] = None + cls: type[IO.ComfyNode], task_id: str, estimated_duration: Optional[int] = None ) -> TaskStatusResponse: """Poll the task status until it is finished then get the response.""" - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=f"{PATH_GET_TASK_STATUS}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=TaskStatusResponse, - ), + return await poll_op( + cls, + ApiEndpoint(path=f"{PATH_GET_TASK_STATUS}/{task_id}"), + response_model=TaskStatusResponse, + status_extractor=lambda r: r.status.value, estimated_duration=estimated_duration, - node_id=node_id, + progress_extractor=extract_progress_from_task_status, ) async def generate_video( + cls: type[IO.ComfyNode], request: RunwayImageToVideoRequest, - auth_kwargs: dict[str, str], - node_id: Optional[str] = None, estimated_duration: Optional[int] = None, ) -> VideoFromFile: - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_IMAGE_TO_VIDEO, - method=HttpMethod.POST, - request_model=RunwayImageToVideoRequest, - response_model=RunwayImageToVideoResponse, - ), - request=request, - auth_kwargs=auth_kwargs, + initial_response = await sync_op( + cls, + endpoint=ApiEndpoint(path=PATH_IMAGE_TO_VIDEO, method="POST"), + response_model=RunwayImageToVideoResponse, + data=request, ) - initial_response = await initial_operation.execute() - - final_response = await get_response(initial_response.id, auth_kwargs, node_id, estimated_duration) + final_response = await get_response(cls, initial_response.id, estimated_duration) if not final_response.output: raise RunwayApiError("Runway task succeeded but no video data found in response.") @@ -184,9 +145,9 @@ class RunwayImageToVideoNodeGen3a(IO.ComfyNode): display_name="Runway Image to Video (Gen3a Turbo)", category="api node/video/Runway", description="Generate a video from a single starting frame using Gen3a Turbo model. " - "Before diving in, review these best practices to ensure that " - "your input selections will set your generation up for success: " - "https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.", + "Before diving in, review these best practices to ensure that " + "your input selections will set your generation up for success: " + "https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.", inputs=[ IO.String.Input( "prompt", @@ -241,20 +202,16 @@ class RunwayImageToVideoNodeGen3a(IO.ComfyNode): validate_image_dimensions(start_frame, max_width=7999, max_height=7999) validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - download_urls = await upload_images_to_comfyapi( + cls, start_frame, max_images=1, mime_type="image/png", - auth_kwargs=auth_kwargs, ) return IO.NodeOutput( await generate_video( + cls, RunwayImageToVideoRequest( promptText=prompt, seed=seed, @@ -262,15 +219,9 @@ class RunwayImageToVideoNodeGen3a(IO.ComfyNode): duration=Duration(duration), ratio=AspectRatio(ratio), promptImage=RunwayPromptImageObject( - root=[ - RunwayPromptImageDetailedObject( - uri=str(download_urls[0]), position="first" - ) - ] + root=[RunwayPromptImageDetailedObject(uri=str(download_urls[0]), position="first")] ), ), - auth_kwargs=auth_kwargs, - node_id=cls.hidden.unique_id, ) ) @@ -284,9 +235,9 @@ class RunwayImageToVideoNodeGen4(IO.ComfyNode): display_name="Runway Image to Video (Gen4 Turbo)", category="api node/video/Runway", description="Generate a video from a single starting frame using Gen4 Turbo model. " - "Before diving in, review these best practices to ensure that " - "your input selections will set your generation up for success: " - "https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.", + "Before diving in, review these best practices to ensure that " + "your input selections will set your generation up for success: " + "https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.", inputs=[ IO.String.Input( "prompt", @@ -341,20 +292,16 @@ class RunwayImageToVideoNodeGen4(IO.ComfyNode): validate_image_dimensions(start_frame, max_width=7999, max_height=7999) validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - download_urls = await upload_images_to_comfyapi( + cls, start_frame, max_images=1, mime_type="image/png", - auth_kwargs=auth_kwargs, ) return IO.NodeOutput( await generate_video( + cls, RunwayImageToVideoRequest( promptText=prompt, seed=seed, @@ -362,15 +309,9 @@ class RunwayImageToVideoNodeGen4(IO.ComfyNode): duration=Duration(duration), ratio=AspectRatio(ratio), promptImage=RunwayPromptImageObject( - root=[ - RunwayPromptImageDetailedObject( - uri=str(download_urls[0]), position="first" - ) - ] + root=[RunwayPromptImageDetailedObject(uri=str(download_urls[0]), position="first")] ), ), - auth_kwargs=auth_kwargs, - node_id=cls.hidden.unique_id, estimated_duration=AVERAGE_DURATION_FLF_SECONDS, ) ) @@ -385,12 +326,12 @@ class RunwayFirstLastFrameNode(IO.ComfyNode): display_name="Runway First-Last-Frame to Video", category="api node/video/Runway", description="Upload first and last keyframes, draft a prompt, and generate a video. " - "More complex transitions, such as cases where the Last frame is completely different " - "from the First frame, may benefit from the longer 10s duration. " - "This would give the generation more time to smoothly transition between the two inputs. " - "Before diving in, review these best practices to ensure that your input selections " - "will set your generation up for success: " - "https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.", + "More complex transitions, such as cases where the Last frame is completely different " + "from the First frame, may benefit from the longer 10s duration. " + "This would give the generation more time to smoothly transition between the two inputs. " + "Before diving in, review these best practices to ensure that your input selections " + "will set your generation up for success: " + "https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.", inputs=[ IO.String.Input( "prompt", @@ -452,23 +393,19 @@ class RunwayFirstLastFrameNode(IO.ComfyNode): validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) validate_image_aspect_ratio(end_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - stacked_input_images = image_tensor_pair_to_batch(start_frame, end_frame) download_urls = await upload_images_to_comfyapi( + cls, stacked_input_images, max_images=2, mime_type="image/png", - auth_kwargs=auth_kwargs, ) if len(download_urls) != 2: raise RunwayApiError("Failed to upload one or more images to comfy api.") return IO.NodeOutput( await generate_video( + cls, RunwayImageToVideoRequest( promptText=prompt, seed=seed, @@ -477,17 +414,11 @@ class RunwayFirstLastFrameNode(IO.ComfyNode): ratio=AspectRatio(ratio), promptImage=RunwayPromptImageObject( root=[ - RunwayPromptImageDetailedObject( - uri=str(download_urls[0]), position="first" - ), - RunwayPromptImageDetailedObject( - uri=str(download_urls[1]), position="last" - ), + RunwayPromptImageDetailedObject(uri=str(download_urls[0]), position="first"), + RunwayPromptImageDetailedObject(uri=str(download_urls[1]), position="last"), ] ), ), - auth_kwargs=auth_kwargs, - node_id=cls.hidden.unique_id, estimated_duration=AVERAGE_DURATION_FLF_SECONDS, ) ) @@ -502,7 +433,7 @@ class RunwayTextToImageNode(IO.ComfyNode): display_name="Runway Text to Image", category="api node/image/Runway", description="Generate an image from a text prompt using Runway's Gen 4 model. " - "You can also include reference image to guide the generation.", + "You can also include reference image to guide the generation.", inputs=[ IO.String.Input( "prompt", @@ -540,49 +471,34 @@ class RunwayTextToImageNode(IO.ComfyNode): ) -> IO.NodeOutput: validate_string(prompt, min_length=1) - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - # Prepare reference images if provided reference_images = None if reference_image is not None: validate_image_dimensions(reference_image, max_width=7999, max_height=7999) validate_image_aspect_ratio(reference_image, min_aspect_ratio=0.5, max_aspect_ratio=2.0) download_urls = await upload_images_to_comfyapi( + cls, reference_image, max_images=1, mime_type="image/png", - auth_kwargs=auth_kwargs, ) reference_images = [ReferenceImage(uri=str(download_urls[0]))] - request = RunwayTextToImageRequest( - promptText=prompt, - model=Model4.gen4_image, - ratio=ratio, - referenceImages=reference_images, - ) - - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_TEXT_TO_IMAGE, - method=HttpMethod.POST, - request_model=RunwayTextToImageRequest, - response_model=RunwayTextToImageResponse, + initial_response = await sync_op( + cls, + endpoint=ApiEndpoint(path=PATH_TEXT_TO_IMAGE, method="POST"), + response_model=RunwayTextToImageResponse, + data=RunwayTextToImageRequest( + promptText=prompt, + model=Model4.gen4_image, + ratio=ratio, + referenceImages=reference_images, ), - request=request, - auth_kwargs=auth_kwargs, ) - initial_response = await initial_operation.execute() - - # Poll for completion final_response = await get_response( + cls, initial_response.id, - auth_kwargs=auth_kwargs, - node_id=cls.hidden.unique_id, estimated_duration=AVERAGE_DURATION_T2I_SECONDS, ) if not final_response.output: @@ -601,5 +517,6 @@ class RunwayExtension(ComfyExtension): RunwayTextToImageNode, ] + async def comfy_entrypoint() -> RunwayExtension: return RunwayExtension() diff --git a/comfy_api_nodes/nodes_sora.py b/comfy_api_nodes/nodes_sora.py index efc954869..92b225d40 100644 --- a/comfy_api_nodes/nodes_sora.py +++ b/comfy_api_nodes/nodes_sora.py @@ -1,23 +1,20 @@ from typing import Optional -from typing_extensions import override import torch from pydantic import BaseModel, Field -from comfy_api.latest import ComfyExtension, IO -from comfy_api_nodes.apis.client import ( - ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, - EmptyRequest, -) -from comfy_api_nodes.util.validation_utils import get_number_of_images +from typing_extensions import override -from comfy_api_nodes.apinode_utils import ( +from comfy_api.latest import IO, ComfyExtension +from comfy_api_nodes.util import ( + ApiEndpoint, download_url_to_video_output, + get_number_of_images, + poll_op, + sync_op, tensor_to_bytesio, ) + class Sora2GenerationRequest(BaseModel): prompt: str = Field(...) model: str = Field(...) @@ -80,7 +77,7 @@ class OpenAIVideoSora2(IO.ComfyNode): control_after_generate=True, optional=True, tooltip="Seed to determine if node should re-run; " - "actual results are nondeterministic regardless of seed.", + "actual results are nondeterministic regardless of seed.", ), ], outputs=[ @@ -111,55 +108,34 @@ class OpenAIVideoSora2(IO.ComfyNode): if get_number_of_images(image) != 1: raise ValueError("Currently only one input image is supported.") files_input = {"input_reference": ("image.png", tensor_to_bytesio(image), "image/png")} - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - payload = Sora2GenerationRequest( - model=model, - prompt=prompt, - seconds=str(duration), - size=size, - ) - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/openai/v1/videos", - method=HttpMethod.POST, - request_model=Sora2GenerationRequest, - response_model=Sora2GenerationResponse + initial_response = await sync_op( + cls, + endpoint=ApiEndpoint(path="/proxy/openai/v1/videos", method="POST"), + data=Sora2GenerationRequest( + model=model, + prompt=prompt, + seconds=str(duration), + size=size, ), - request=payload, files=files_input, - auth_kwargs=auth, + response_model=Sora2GenerationResponse, content_type="multipart/form-data", ) - initial_response = await initial_operation.execute() if initial_response.error: - raise Exception(initial_response.error.message) + raise Exception(initial_response.error["message"]) model_time_multiplier = 1 if model == "sora-2" else 2 - poll_operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"/proxy/openai/v1/videos/{initial_response.id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=Sora2GenerationResponse - ), - completed_statuses=["completed"], - failed_statuses=["failed"], + await poll_op( + cls, + poll_endpoint=ApiEndpoint(path=f"/proxy/openai/v1/videos/{initial_response.id}"), + response_model=Sora2GenerationResponse, status_extractor=lambda x: x.status, - auth_kwargs=auth, poll_interval=8.0, max_poll_attempts=160, - node_id=cls.hidden.unique_id, - estimated_duration=45 * (duration / 4) * model_time_multiplier, + estimated_duration=int(45 * (duration / 4) * model_time_multiplier), ) - await poll_operation.execute() return IO.NodeOutput( - await download_url_to_video_output( - f"/proxy/openai/v1/videos/{initial_response.id}/content", - auth_kwargs=auth, - ) + await download_url_to_video_output(f"/proxy/openai/v1/videos/{initial_response.id}/content", cls=cls), ) diff --git a/comfy_api_nodes/nodes_stability.py b/comfy_api_nodes/nodes_stability.py index 8af03cfd1..783666ddf 100644 --- a/comfy_api_nodes/nodes_stability.py +++ b/comfy_api_nodes/nodes_stability.py @@ -27,14 +27,14 @@ from comfy_api_nodes.apis.client import ( PollingOperation, EmptyRequest, ) -from comfy_api_nodes.apinode_utils import ( +from comfy_api_nodes.util import ( + validate_audio_duration, + validate_string, + audio_input_to_mp3, bytesio_to_image_tensor, tensor_to_bytesio, - validate_string, audio_bytes_to_audio_input, - audio_input_to_mp3, ) -from comfy_api_nodes.util.validation_utils import validate_audio_duration import torch import base64 diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index daeaa823e..d37e9e9b4 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -1,28 +1,21 @@ -import logging import base64 -import aiohttp -import torch from io import BytesIO -from typing import Optional + from typing_extensions import override -from comfy_api.latest import ComfyExtension, IO from comfy_api.input_impl.video_types import VideoFromFile -from comfy_api_nodes.apis import ( - VeoGenVidRequest, - VeoGenVidResponse, +from comfy_api.latest import IO, ComfyExtension +from comfy_api_nodes.apis.veo_api import ( VeoGenVidPollRequest, VeoGenVidPollResponse, + VeoGenVidRequest, + VeoGenVidResponse, ) -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.util import ( ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, -) - -from comfy_api_nodes.apinode_utils import ( - downscale_image_tensor, + download_url_to_video_output, + poll_op, + sync_op, tensor_to_base64_string, ) @@ -35,28 +28,6 @@ MODELS_MAP = { "veo-3.0-fast-generate-001": "veo-3.0-fast-generate-001", } -def convert_image_to_base64(image: torch.Tensor): - if image is None: - return None - - scaled_image = downscale_image_tensor(image, total_pixels=2048*2048) - return tensor_to_base64_string(scaled_image) - - -def get_video_url_from_response(poll_response: VeoGenVidPollResponse) -> Optional[str]: - if ( - poll_response.response - and hasattr(poll_response.response, "videos") - and poll_response.response.videos - and len(poll_response.response.videos) > 0 - ): - video = poll_response.response.videos[0] - else: - return None - if hasattr(video, "gcsUri") and video.gcsUri: - return str(video.gcsUri) - return None - class VeoVideoGenerationNode(IO.ComfyNode): """ @@ -169,18 +140,13 @@ class VeoVideoGenerationNode(IO.ComfyNode): # Prepare the instances for the request instances = [] - instance = { - "prompt": prompt - } + instance = {"prompt": prompt} # Add image if provided if image is not None: - image_base64 = convert_image_to_base64(image) + image_base64 = tensor_to_base64_string(image) if image_base64: - instance["image"] = { - "bytesBase64Encoded": image_base64, - "mimeType": "image/png" - } + instance["image"] = {"bytesBase64Encoded": image_base64, "mimeType": "image/png"} instances.append(instance) @@ -198,119 +164,77 @@ class VeoVideoGenerationNode(IO.ComfyNode): if seed > 0: parameters["seed"] = seed # Only add generateAudio for Veo 3 models - if "veo-3.0" in model: + if model.find("veo-2.0") == -1: parameters["generateAudio"] = generate_audio - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - # Initial request to start video generation - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=f"/proxy/veo/{model}/generate", - method=HttpMethod.POST, - request_model=VeoGenVidRequest, - response_model=VeoGenVidResponse - ), - request=VeoGenVidRequest( + initial_response = await sync_op( + cls, + ApiEndpoint(path=f"/proxy/veo/{model}/generate", method="POST"), + response_model=VeoGenVidResponse, + data=VeoGenVidRequest( instances=instances, - parameters=parameters + parameters=parameters, ), - auth_kwargs=auth, ) - initial_response = await initial_operation.execute() - operation_name = initial_response.name - - logging.info("Veo generation started with operation name: %s", operation_name) - - # Define status extractor function def status_extractor(response): # Only return "completed" if the operation is done, regardless of success or failure # We'll check for errors after polling completes return "completed" if response.done else "pending" - # Define progress extractor function - def progress_extractor(response): - # Could be enhanced if the API provides progress information - return None - - # Define the polling operation - poll_operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"/proxy/veo/{model}/poll", - method=HttpMethod.POST, - request_model=VeoGenVidPollRequest, - response_model=VeoGenVidPollResponse - ), - completed_statuses=["completed"], - failed_statuses=[], # No failed statuses, we'll handle errors after polling + poll_response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/veo/{model}/poll", method="POST"), + response_model=VeoGenVidPollResponse, status_extractor=status_extractor, - progress_extractor=progress_extractor, - request=VeoGenVidPollRequest( - operationName=operation_name + data=VeoGenVidPollRequest( + operationName=initial_response.name, ), - auth_kwargs=auth, poll_interval=5.0, - result_url_extractor=get_video_url_from_response, - node_id=cls.hidden.unique_id, estimated_duration=AVERAGE_DURATION_VIDEO_GEN, ) - # Execute the polling operation - poll_response = await poll_operation.execute() - # Now check for errors in the final response # Check for error in poll response - if hasattr(poll_response, 'error') and poll_response.error: - error_message = f"Veo API error: {poll_response.error.message} (code: {poll_response.error.code})" - logging.error(error_message) - raise Exception(error_message) + if poll_response.error: + raise Exception(f"Veo API error: {poll_response.error.message} (code: {poll_response.error.code})") # Check for RAI filtered content - if (hasattr(poll_response.response, 'raiMediaFilteredCount') and - poll_response.response.raiMediaFilteredCount > 0): + if ( + hasattr(poll_response.response, "raiMediaFilteredCount") + and poll_response.response.raiMediaFilteredCount > 0 + ): # Extract reason message if available - if (hasattr(poll_response.response, 'raiMediaFilteredReasons') and - poll_response.response.raiMediaFilteredReasons): + if ( + hasattr(poll_response.response, "raiMediaFilteredReasons") + and poll_response.response.raiMediaFilteredReasons + ): reason = poll_response.response.raiMediaFilteredReasons[0] error_message = f"Content filtered by Google's Responsible AI practices: {reason} ({poll_response.response.raiMediaFilteredCount} videos filtered.)" else: error_message = f"Content filtered by Google's Responsible AI practices ({poll_response.response.raiMediaFilteredCount} videos filtered.)" - logging.error(error_message) raise Exception(error_message) # Extract video data - if poll_response.response and hasattr(poll_response.response, 'videos') and poll_response.response.videos and len(poll_response.response.videos) > 0: + if ( + poll_response.response + and hasattr(poll_response.response, "videos") + and poll_response.response.videos + and len(poll_response.response.videos) > 0 + ): video = poll_response.response.videos[0] # Check if video is provided as base64 or URL - if hasattr(video, 'bytesBase64Encoded') and video.bytesBase64Encoded: - # Decode base64 string to bytes - video_data = base64.b64decode(video.bytesBase64Encoded) - elif hasattr(video, 'gcsUri') and video.gcsUri: - # Download from URL - async with aiohttp.ClientSession() as session: - async with session.get(video.gcsUri) as video_response: - video_data = await video_response.content.read() - else: - raise Exception("Video returned but no data or URL was provided") - else: - raise Exception("Video generation completed but no video was returned") + if hasattr(video, "bytesBase64Encoded") and video.bytesBase64Encoded: + return IO.NodeOutput(VideoFromFile(BytesIO(base64.b64decode(video.bytesBase64Encoded)))) - if not video_data: - raise Exception("No video data was returned") + if hasattr(video, "gcsUri") and video.gcsUri: + return IO.NodeOutput(await download_url_to_video_output(video.gcsUri)) - logging.info("Video generation completed successfully") - - # Convert video data to BytesIO object - video_io = BytesIO(video_data) - - # Return VideoFromFile object - return IO.NodeOutput(VideoFromFile(video_io)) + raise Exception("Video returned but no data or URL was provided") + raise Exception("Video generation completed but no video was returned") class Veo3VideoGenerationNode(VeoVideoGenerationNode): @@ -394,7 +318,10 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode): IO.Combo.Input( "model", options=[ - "veo-3.1-generate", "veo-3.1-fast-generate", "veo-3.0-generate-001", "veo-3.0-fast-generate-001" + "veo-3.1-generate", + "veo-3.1-fast-generate", + "veo-3.0-generate-001", + "veo-3.0-fast-generate-001", ], default="veo-3.0-generate-001", tooltip="Veo 3 model to use for video generation", @@ -427,5 +354,6 @@ class VeoExtension(ComfyExtension): Veo3VideoGenerationNode, ] + async def comfy_entrypoint() -> VeoExtension: return VeoExtension() diff --git a/comfy_api_nodes/nodes_vidu.py b/comfy_api_nodes/nodes_vidu.py index 639be4b2b..0e0572f8c 100644 --- a/comfy_api_nodes/nodes_vidu.py +++ b/comfy_api_nodes/nodes_vidu.py @@ -1,27 +1,23 @@ import logging from enum import Enum -from typing import Any, Callable, Optional, Literal, TypeVar -from typing_extensions import override +from typing import Literal, Optional, TypeVar import torch from pydantic import BaseModel, Field +from typing_extensions import override -from comfy_api.latest import ComfyExtension, IO -from comfy_api_nodes.util.validation_utils import ( - validate_aspect_ratio_closeness, - validate_image_dimensions, - validate_image_aspect_ratio_range, - get_number_of_images, -) -from comfy_api_nodes.apis.client import ( +from comfy_api.latest import IO, ComfyExtension +from comfy_api_nodes.util import ( ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, - EmptyRequest, + download_url_to_video_output, + get_number_of_images, + poll_op, + sync_op, + upload_images_to_comfyapi, + validate_aspect_ratio_closeness, + validate_image_aspect_ratio_range, + validate_image_dimensions, ) -from comfy_api_nodes.apinode_utils import download_url_to_video_output, upload_images_to_comfyapi - VIDU_TEXT_TO_VIDEO = "/proxy/vidu/text2video" VIDU_IMAGE_TO_VIDEO = "/proxy/vidu/img2video" @@ -31,8 +27,9 @@ VIDU_GET_GENERATION_STATUS = "/proxy/vidu/tasks/%s/creations" R = TypeVar("R") + class VideoModelName(str, Enum): - vidu_q1 = 'viduq1' + vidu_q1 = "viduq1" class AspectRatio(str, Enum): @@ -63,17 +60,9 @@ class TaskCreationRequest(BaseModel): images: Optional[list[str]] = Field(None, description="Base64 encoded string or image URL") -class TaskStatus(str, Enum): - created = "created" - queueing = "queueing" - processing = "processing" - success = "success" - failed = "failed" - - class TaskCreationResponse(BaseModel): task_id: str = Field(...) - state: TaskStatus = Field(...) + state: str = Field(...) created_at: str = Field(...) code: Optional[int] = Field(None, description="Error code") @@ -85,32 +74,11 @@ class TaskResult(BaseModel): class TaskStatusResponse(BaseModel): - state: TaskStatus = Field(...) + state: str = Field(...) err_code: Optional[str] = Field(None) creations: list[TaskResult] = Field(..., description="Generated results") -async def poll_until_finished( - auth_kwargs: dict[str, str], - api_endpoint: ApiEndpoint[Any, R], - result_url_extractor: Optional[Callable[[R], str]] = None, - estimated_duration: Optional[int] = None, - node_id: Optional[str] = None, -) -> R: - return await PollingOperation( - poll_endpoint=api_endpoint, - completed_statuses=[TaskStatus.success.value], - failed_statuses=[TaskStatus.failed.value], - status_extractor=lambda response: response.state.value, - auth_kwargs=auth_kwargs, - result_url_extractor=result_url_extractor, - estimated_duration=estimated_duration, - node_id=node_id, - poll_interval=16.0, - max_poll_attempts=256, - ).execute() - - def get_video_url_from_response(response) -> Optional[str]: if response.creations: return response.creations[0].url @@ -127,37 +95,27 @@ def get_video_from_response(response) -> TaskResult: async def execute_task( + cls: type[IO.ComfyNode], vidu_endpoint: str, - auth_kwargs: Optional[dict[str, str]], payload: TaskCreationRequest, estimated_duration: int, - node_id: str, ) -> R: - response = await SynchronousOperation( - endpoint=ApiEndpoint( - path=vidu_endpoint, - method=HttpMethod.POST, - request_model=TaskCreationRequest, - response_model=TaskCreationResponse, - ), - request=payload, - auth_kwargs=auth_kwargs, - ).execute() - if response.state == TaskStatus.failed: + response = await sync_op( + cls, + endpoint=ApiEndpoint(path=vidu_endpoint, method="POST"), + response_model=TaskCreationResponse, + data=payload, + ) + if response.state == "failed": error_msg = f"Vidu request failed. Code: {response.code}" logging.error(error_msg) raise RuntimeError(error_msg) - return await poll_until_finished( - auth_kwargs, - ApiEndpoint( - path=VIDU_GET_GENERATION_STATUS % response.task_id, - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=TaskStatusResponse, - ), - result_url_extractor=get_video_url_from_response, + return await poll_op( + cls, + ApiEndpoint(path=VIDU_GET_GENERATION_STATUS % response.task_id), + response_model=TaskStatusResponse, + status_extractor=lambda r: r.state.value, estimated_duration=estimated_duration, - node_id=node_id, ) @@ -258,11 +216,7 @@ class ViduTextToVideoNode(IO.ComfyNode): resolution=resolution, movement_amplitude=movement_amplitude, ) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - results = await execute_task(VIDU_TEXT_TO_VIDEO, auth, payload, 320, cls.hidden.unique_id) + results = await execute_task(cls, VIDU_TEXT_TO_VIDEO, payload, 320) return IO.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) @@ -362,17 +316,13 @@ class ViduImageToVideoNode(IO.ComfyNode): resolution=resolution, movement_amplitude=movement_amplitude, ) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } payload.images = await upload_images_to_comfyapi( + cls, image, max_images=1, mime_type="image/png", - auth_kwargs=auth, ) - results = await execute_task(VIDU_IMAGE_TO_VIDEO, auth, payload, 120, cls.hidden.unique_id) + results = await execute_task(cls, VIDU_IMAGE_TO_VIDEO, payload, 120) return IO.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) @@ -484,17 +434,13 @@ class ViduReferenceVideoNode(IO.ComfyNode): resolution=resolution, movement_amplitude=movement_amplitude, ) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } payload.images = await upload_images_to_comfyapi( + cls, images, max_images=7, mime_type="image/png", - auth_kwargs=auth, ) - results = await execute_task(VIDU_REFERENCE_VIDEO, auth, payload, 120, cls.hidden.unique_id) + results = await execute_task(cls, VIDU_REFERENCE_VIDEO, payload, 120) return IO.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) @@ -596,15 +542,11 @@ class ViduStartEndToVideoNode(IO.ComfyNode): resolution=resolution, movement_amplitude=movement_amplitude, ) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } payload.images = [ - (await upload_images_to_comfyapi(frame, max_images=1, mime_type="image/png", auth_kwargs=auth))[0] + (await upload_images_to_comfyapi(cls, frame, max_images=1, mime_type="image/png"))[0] for frame in (first_frame, end_frame) ] - results = await execute_task(VIDU_START_END_VIDEO, auth, payload, 96, cls.hidden.unique_id) + results = await execute_task(cls, VIDU_START_END_VIDEO, payload, 96) return IO.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) @@ -618,5 +560,6 @@ class ViduExtension(ComfyExtension): ViduStartEndToVideoNode, ] + async def comfy_entrypoint() -> ViduExtension: return ViduExtension() diff --git a/comfy_api_nodes/nodes_wan.py b/comfy_api_nodes/nodes_wan.py index b089bd907..2aab3c2ff 100644 --- a/comfy_api_nodes/nodes_wan.py +++ b/comfy_api_nodes/nodes_wan.py @@ -1,28 +1,24 @@ import re -from typing import Optional, Type, Union -from typing_extensions import override +from typing import Optional import torch from pydantic import BaseModel, Field -from comfy_api.latest import ComfyExtension, Input, IO -from comfy_api_nodes.apis.client import ( - ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, - EmptyRequest, - R, - T, -) -from comfy_api_nodes.util.validation_utils import get_number_of_images, validate_audio_duration +from typing_extensions import override -from comfy_api_nodes.apinode_utils import ( +from comfy_api.latest import IO, ComfyExtension, Input +from comfy_api_nodes.util import ( + ApiEndpoint, + audio_to_base64_string, download_url_to_image_tensor, download_url_to_video_output, + get_number_of_images, + poll_op, + sync_op, tensor_to_base64_string, - audio_to_base64_string, + validate_audio_duration, ) + class Text2ImageInputField(BaseModel): prompt: str = Field(...) negative_prompt: Optional[str] = Field(None) @@ -146,53 +142,7 @@ class VideoTaskStatusResponse(BaseModel): request_id: str = Field(...) -RES_IN_PARENS = re.compile(r'\((\d+)\s*[x×]\s*(\d+)\)') - - -async def process_task( - auth_kwargs: dict[str, str], - url: str, - request_model: Type[T], - response_model: Type[R], - payload: Union[ - Text2ImageTaskCreationRequest, - Image2ImageTaskCreationRequest, - Text2VideoTaskCreationRequest, - Image2VideoTaskCreationRequest, - ], - node_id: str, - estimated_duration: int, - poll_interval: int, -) -> Type[R]: - initial_response = await SynchronousOperation( - endpoint=ApiEndpoint( - path=url, - method=HttpMethod.POST, - request_model=request_model, - response_model=TaskCreationResponse, - ), - request=payload, - auth_kwargs=auth_kwargs, - ).execute() - - if not initial_response.output: - raise Exception(f"Unknown error occurred: {initial_response.code} - {initial_response.message}") - - return await PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=response_model, - ), - completed_statuses=["SUCCEEDED"], - failed_statuses=["FAILED", "CANCELED", "UNKNOWN"], - status_extractor=lambda x: x.output.task_status, - estimated_duration=estimated_duration, - poll_interval=poll_interval, - node_id=node_id, - auth_kwargs=auth_kwargs, - ).execute() +RES_IN_PARENS = re.compile(r"\((\d+)\s*[x×]\s*(\d+)\)") class WanTextToImageApi(IO.ComfyNode): @@ -259,7 +209,7 @@ class WanTextToImageApi(IO.ComfyNode): IO.Boolean.Input( "watermark", default=True, - tooltip="Whether to add an \"AI generated\" watermark to the result.", + tooltip='Whether to add an "AI generated" watermark to the result.', optional=True, ), ], @@ -286,26 +236,28 @@ class WanTextToImageApi(IO.ComfyNode): prompt_extend: bool = True, watermark: bool = True, ): - payload = Text2ImageTaskCreationRequest( - model=model, - input=Text2ImageInputField(prompt=prompt, negative_prompt=negative_prompt), - parameters=Txt2ImageParametersField( - size=f"{width}*{height}", - seed=seed, - prompt_extend=prompt_extend, - watermark=watermark, + initial_response = await sync_op( + cls, + ApiEndpoint(path="/proxy/wan/api/v1/services/aigc/text2image/image-synthesis", method="POST"), + response_model=TaskCreationResponse, + data=Text2ImageTaskCreationRequest( + model=model, + input=Text2ImageInputField(prompt=prompt, negative_prompt=negative_prompt), + parameters=Txt2ImageParametersField( + size=f"{width}*{height}", + seed=seed, + prompt_extend=prompt_extend, + watermark=watermark, + ), ), ) - response = await process_task( - { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - "/proxy/wan/api/v1/services/aigc/text2image/image-synthesis", - request_model=Text2ImageTaskCreationRequest, + if not initial_response.output: + raise Exception(f"Unknown error occurred: {initial_response.code} - {initial_response.message}") + response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"), response_model=ImageTaskStatusResponse, - payload=payload, - node_id=cls.hidden.unique_id, + status_extractor=lambda x: x.output.task_status, estimated_duration=9, poll_interval=3, ) @@ -320,7 +272,7 @@ class WanImageToImageApi(IO.ComfyNode): display_name="Wan Image to Image", category="api node/image/Wan", description="Generates an image from one or two input images and a text prompt. " - "The output image is currently fixed at 1.6 MP; its aspect ratio matches the input image(s).", + "The output image is currently fixed at 1.6 MP; its aspect ratio matches the input image(s).", inputs=[ IO.Combo.Input( "model", @@ -376,7 +328,7 @@ class WanImageToImageApi(IO.ComfyNode): IO.Boolean.Input( "watermark", default=True, - tooltip="Whether to add an \"AI generated\" watermark to the result.", + tooltip='Whether to add an "AI generated" watermark to the result.', optional=True, ), ], @@ -408,28 +360,30 @@ class WanImageToImageApi(IO.ComfyNode): raise ValueError(f"Expected 1 or 2 input images, got {n_images}.") images = [] for i in image: - images.append("data:image/png;base64," + tensor_to_base64_string(i, total_pixels=4096*4096)) - payload = Image2ImageTaskCreationRequest( - model=model, - input=Image2ImageInputField(prompt=prompt, negative_prompt=negative_prompt, images=images), - parameters=Image2ImageParametersField( - # size=f"{width}*{height}", - seed=seed, - watermark=watermark, + images.append("data:image/png;base64," + tensor_to_base64_string(i, total_pixels=4096 * 4096)) + initial_response = await sync_op( + cls, + ApiEndpoint(path="/proxy/wan/api/v1/services/aigc/image2image/image-synthesis", method="POST"), + response_model=TaskCreationResponse, + data=Image2ImageTaskCreationRequest( + model=model, + input=Image2ImageInputField(prompt=prompt, negative_prompt=negative_prompt, images=images), + parameters=Image2ImageParametersField( + # size=f"{width}*{height}", + seed=seed, + watermark=watermark, + ), ), ) - response = await process_task( - { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - "/proxy/wan/api/v1/services/aigc/image2image/image-synthesis", - request_model=Image2ImageTaskCreationRequest, + if not initial_response.output: + raise Exception(f"Unknown error occurred: {initial_response.code} - {initial_response.message}") + response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"), response_model=ImageTaskStatusResponse, - payload=payload, - node_id=cls.hidden.unique_id, + status_extractor=lambda x: x.output.task_status, estimated_duration=42, - poll_interval=3, + poll_interval=4, ) return IO.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url))) @@ -523,7 +477,7 @@ class WanTextToVideoApi(IO.ComfyNode): IO.Boolean.Input( "watermark", default=True, - tooltip="Whether to add an \"AI generated\" watermark to the result.", + tooltip='Whether to add an "AI generated" watermark to the result.', optional=True, ), ], @@ -557,28 +511,31 @@ class WanTextToVideoApi(IO.ComfyNode): if audio is not None: validate_audio_duration(audio, 3.0, 29.0) audio_url = "data:audio/mp3;base64," + audio_to_base64_string(audio, "mp3", "libmp3lame") - payload = Text2VideoTaskCreationRequest( - model=model, - input=Text2VideoInputField(prompt=prompt, negative_prompt=negative_prompt, audio_url=audio_url), - parameters=Text2VideoParametersField( - size=f"{width}*{height}", - duration=duration, - seed=seed, - audio=generate_audio, - prompt_extend=prompt_extend, - watermark=watermark, + + initial_response = await sync_op( + cls, + ApiEndpoint(path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis", method="POST"), + response_model=TaskCreationResponse, + data=Text2VideoTaskCreationRequest( + model=model, + input=Text2VideoInputField(prompt=prompt, negative_prompt=negative_prompt, audio_url=audio_url), + parameters=Text2VideoParametersField( + size=f"{width}*{height}", + duration=duration, + seed=seed, + audio=generate_audio, + prompt_extend=prompt_extend, + watermark=watermark, + ), ), ) - response = await process_task( - { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - "/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis", - request_model=Text2VideoTaskCreationRequest, + if not initial_response.output: + raise Exception(f"Unknown error occurred: {initial_response.code} - {initial_response.message}") + response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"), response_model=VideoTaskStatusResponse, - payload=payload, - node_id=cls.hidden.unique_id, + status_extractor=lambda x: x.output.task_status, estimated_duration=120 * int(duration / 5), poll_interval=6, ) @@ -667,7 +624,7 @@ class WanImageToVideoApi(IO.ComfyNode): IO.Boolean.Input( "watermark", default=True, - tooltip="Whether to add an \"AI generated\" watermark to the result.", + tooltip='Whether to add an "AI generated" watermark to the result.', optional=True, ), ], @@ -699,35 +656,37 @@ class WanImageToVideoApi(IO.ComfyNode): ): if get_number_of_images(image) != 1: raise ValueError("Exactly one input image is required.") - image_url = "data:image/png;base64," + tensor_to_base64_string(image, total_pixels=2000*2000) + image_url = "data:image/png;base64," + tensor_to_base64_string(image, total_pixels=2000 * 2000) audio_url = None if audio is not None: validate_audio_duration(audio, 3.0, 29.0) audio_url = "data:audio/mp3;base64," + audio_to_base64_string(audio, "mp3", "libmp3lame") - payload = Image2VideoTaskCreationRequest( - model=model, - input=Image2VideoInputField( - prompt=prompt, negative_prompt=negative_prompt, img_url=image_url, audio_url=audio_url - ), - parameters=Image2VideoParametersField( - resolution=resolution, - duration=duration, - seed=seed, - audio=generate_audio, - prompt_extend=prompt_extend, - watermark=watermark, + initial_response = await sync_op( + cls, + ApiEndpoint(path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis", method="POST"), + response_model=TaskCreationResponse, + data=Image2VideoTaskCreationRequest( + model=model, + input=Image2VideoInputField( + prompt=prompt, negative_prompt=negative_prompt, img_url=image_url, audio_url=audio_url + ), + parameters=Image2VideoParametersField( + resolution=resolution, + duration=duration, + seed=seed, + audio=generate_audio, + prompt_extend=prompt_extend, + watermark=watermark, + ), ), ) - response = await process_task( - { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - "/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis", - request_model=Image2VideoTaskCreationRequest, + if not initial_response.output: + raise Exception(f"Unknown error occurred: {initial_response.code} - {initial_response.message}") + response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"), response_model=VideoTaskStatusResponse, - payload=payload, - node_id=cls.hidden.unique_id, + status_extractor=lambda x: x.output.task_status, estimated_duration=120 * int(duration / 5), poll_interval=6, ) diff --git a/comfy_api_nodes/util/__init__.py b/comfy_api_nodes/util/__init__.py index e69de29bb..c2ec391aa 100644 --- a/comfy_api_nodes/util/__init__.py +++ b/comfy_api_nodes/util/__init__.py @@ -0,0 +1,87 @@ +from ._helpers import get_fs_object_size +from .client import ( + ApiEndpoint, + poll_op, + poll_op_raw, + sync_op, + sync_op_raw, +) +from .conversions import ( + audio_bytes_to_audio_input, + audio_input_to_mp3, + audio_to_base64_string, + bytesio_to_image_tensor, + downscale_image_tensor, + image_tensor_pair_to_batch, + pil_to_bytesio, + tensor_to_base64_string, + tensor_to_bytesio, + tensor_to_pil, + trim_video, +) +from .download_helpers import ( + download_url_to_bytesio, + download_url_to_image_tensor, + download_url_to_video_output, +) +from .upload_helpers import ( + upload_audio_to_comfyapi, + upload_file_to_comfyapi, + upload_images_to_comfyapi, + upload_video_to_comfyapi, +) +from .validation_utils import ( + get_number_of_images, + validate_aspect_ratio_closeness, + validate_audio_duration, + validate_container_format_is_mp4, + validate_image_aspect_ratio, + validate_image_aspect_ratio_range, + validate_image_dimensions, + validate_string, + validate_video_dimensions, + validate_video_duration, +) + +__all__ = [ + # API client + "ApiEndpoint", + "poll_op", + "poll_op_raw", + "sync_op", + "sync_op_raw", + # Upload helpers + "upload_audio_to_comfyapi", + "upload_file_to_comfyapi", + "upload_images_to_comfyapi", + "upload_video_to_comfyapi", + # Download helpers + "download_url_to_bytesio", + "download_url_to_image_tensor", + "download_url_to_video_output", + # Conversions + "audio_bytes_to_audio_input", + "audio_input_to_mp3", + "audio_to_base64_string", + "bytesio_to_image_tensor", + "downscale_image_tensor", + "image_tensor_pair_to_batch", + "pil_to_bytesio", + "tensor_to_base64_string", + "tensor_to_bytesio", + "tensor_to_pil", + "trim_video", + # Validation utilities + "get_number_of_images", + "validate_aspect_ratio_closeness", + "validate_audio_duration", + "validate_container_format_is_mp4", + "validate_image_aspect_ratio", + "validate_image_aspect_ratio_range", + "validate_image_dimensions", + "validate_string", + "validate_video_dimensions", + "validate_video_duration", + # Misc functions + "get_fs_object_size", +] diff --git a/comfy_api_nodes/util/_helpers.py b/comfy_api_nodes/util/_helpers.py new file mode 100644 index 000000000..328fe5227 --- /dev/null +++ b/comfy_api_nodes/util/_helpers.py @@ -0,0 +1,71 @@ +import asyncio +import contextlib +import os +import time +from io import BytesIO +from typing import Callable, Optional, Union + +from comfy.cli_args import args +from comfy.model_management import processing_interrupted +from comfy_api.latest import IO + +from .common_exceptions import ProcessingInterrupted + + +def is_processing_interrupted() -> bool: + """Return True if user/runtime requested interruption.""" + return processing_interrupted() + + +def get_node_id(node_cls: type[IO.ComfyNode]) -> str: + return node_cls.hidden.unique_id + + +def get_auth_header(node_cls: type[IO.ComfyNode]) -> dict[str, str]: + if node_cls.hidden.auth_token_comfy_org: + return {"Authorization": f"Bearer {node_cls.hidden.auth_token_comfy_org}"} + if node_cls.hidden.api_key_comfy_org: + return {"X-API-KEY": node_cls.hidden.api_key_comfy_org} + return {} + + +def default_base_url() -> str: + return getattr(args, "comfy_api_base", "https://api.comfy.org") + + +async def sleep_with_interrupt( + seconds: float, + node_cls: Optional[type[IO.ComfyNode]], + label: Optional[str] = None, + start_ts: Optional[float] = None, + estimated_total: Optional[int] = None, + *, + display_callback: Optional[Callable[[type[IO.ComfyNode], str, int, Optional[int]], None]] = None, +): + """ + Sleep in 1s slices while: + - Checking for interruption (raises ProcessingInterrupted). + - Optionally emitting time progress via display_callback (if provided). + """ + end = time.monotonic() + seconds + while True: + if is_processing_interrupted(): + raise ProcessingInterrupted("Task cancelled") + now = time.monotonic() + if start_ts is not None and label and display_callback: + with contextlib.suppress(Exception): + display_callback(node_cls, label, int(now - start_ts), estimated_total) + if now >= end: + break + await asyncio.sleep(min(1.0, end - now)) + + +def mimetype_to_extension(mime_type: str) -> str: + """Converts a MIME type to a file extension.""" + return mime_type.split("/")[-1].lower() + + +def get_fs_object_size(path_or_object: Union[str, BytesIO]) -> int: + if isinstance(path_or_object, str): + return os.path.getsize(path_or_object) + return len(path_or_object.getvalue()) diff --git a/comfy_api_nodes/util/client.py b/comfy_api_nodes/util/client.py new file mode 100644 index 000000000..5833b118f --- /dev/null +++ b/comfy_api_nodes/util/client.py @@ -0,0 +1,941 @@ +import asyncio +import contextlib +import json +import logging +import socket +import time +import uuid +from dataclasses import dataclass +from enum import Enum +from io import BytesIO +from typing import Any, Callable, Iterable, Literal, Optional, Type, TypeVar, Union +from urllib.parse import urljoin, urlparse + +import aiohttp +from aiohttp.client_exceptions import ClientError, ContentTypeError +from pydantic import BaseModel + +from comfy import utils +from comfy_api.latest import IO +from comfy_api_nodes.apis import request_logger +from server import PromptServer + +from ._helpers import ( + default_base_url, + get_auth_header, + get_node_id, + is_processing_interrupted, + sleep_with_interrupt, +) +from .common_exceptions import ApiServerError, LocalNetworkError, ProcessingInterrupted + +M = TypeVar("M", bound=BaseModel) + + +class ApiEndpoint: + def __init__( + self, + path: str, + method: Literal["GET", "POST", "PUT", "DELETE", "PATCH"] = "GET", + *, + query_params: Optional[dict[str, Any]] = None, + headers: Optional[dict[str, str]] = None, + ): + self.path = path + self.method = method + self.query_params = query_params or {} + self.headers = headers or {} + + +@dataclass +class _RequestConfig: + node_cls: type[IO.ComfyNode] + endpoint: ApiEndpoint + timeout: float + content_type: str + data: Optional[dict[str, Any]] + files: Optional[Union[dict[str, Any], list[tuple[str, Any]]]] + multipart_parser: Optional[Callable] + max_retries: int + retry_delay: float + retry_backoff: float + wait_label: str = "Waiting" + monitor_progress: bool = True + estimated_total: Optional[int] = None + final_label_on_success: Optional[str] = "Completed" + progress_origin_ts: Optional[float] = None + + +@dataclass +class _PollUIState: + started: float + status_label: str = "Queued" + is_queued: bool = True + price: Optional[float] = None + estimated_duration: Optional[int] = None + base_processing_elapsed: float = 0.0 # sum of completed active intervals + active_since: Optional[float] = None # start time of current active interval (None if queued) + + +_RETRY_STATUS = {408, 429, 500, 502, 503, 504} +COMPLETED_STATUSES = ["succeeded", "succeed", "success", "completed"] +FAILED_STATUSES = ["cancelled", "canceled", "failed", "error"] +QUEUED_STATUSES = ["created", "queued", "queueing", "submitted"] + + +async def sync_op( + cls: type[IO.ComfyNode], + endpoint: ApiEndpoint, + *, + response_model: Type[M], + data: Optional[BaseModel] = None, + files: Optional[Union[dict[str, Any], list[tuple[str, Any]]]] = None, + content_type: str = "application/json", + timeout: float = 3600.0, + multipart_parser: Optional[Callable] = None, + max_retries: int = 3, + retry_delay: float = 1.0, + retry_backoff: float = 2.0, + wait_label: str = "Waiting for server", + estimated_duration: Optional[int] = None, + final_label_on_success: Optional[str] = "Completed", + progress_origin_ts: Optional[float] = None, + monitor_progress: bool = True, +) -> M: + raw = await sync_op_raw( + cls, + endpoint, + data=data, + files=files, + content_type=content_type, + timeout=timeout, + multipart_parser=multipart_parser, + max_retries=max_retries, + retry_delay=retry_delay, + retry_backoff=retry_backoff, + wait_label=wait_label, + estimated_duration=estimated_duration, + as_binary=False, + final_label_on_success=final_label_on_success, + progress_origin_ts=progress_origin_ts, + monitor_progress=monitor_progress, + ) + if not isinstance(raw, dict): + raise Exception("Expected JSON response to validate into a Pydantic model, got non-JSON (binary or text).") + return _validate_or_raise(response_model, raw) + + +async def poll_op( + cls: type[IO.ComfyNode], + poll_endpoint: ApiEndpoint, + *, + response_model: Type[M], + status_extractor: Callable[[M], Optional[Union[str, int]]], + progress_extractor: Optional[Callable[[M], Optional[int]]] = None, + price_extractor: Optional[Callable[[M], Optional[float]]] = None, + completed_statuses: Optional[list[Union[str, int]]] = None, + failed_statuses: Optional[list[Union[str, int]]] = None, + queued_statuses: Optional[list[Union[str, int]]] = None, + data: Optional[BaseModel] = None, + poll_interval: float = 5.0, + max_poll_attempts: int = 120, + timeout_per_poll: float = 120.0, + max_retries_per_poll: int = 3, + retry_delay_per_poll: float = 1.0, + retry_backoff_per_poll: float = 2.0, + estimated_duration: Optional[int] = None, + cancel_endpoint: Optional[ApiEndpoint] = None, + cancel_timeout: float = 10.0, +) -> M: + raw = await poll_op_raw( + cls, + poll_endpoint=poll_endpoint, + status_extractor=_wrap_model_extractor(response_model, status_extractor), + progress_extractor=_wrap_model_extractor(response_model, progress_extractor), + price_extractor=_wrap_model_extractor(response_model, price_extractor), + completed_statuses=completed_statuses, + failed_statuses=failed_statuses, + queued_statuses=queued_statuses, + data=data, + poll_interval=poll_interval, + max_poll_attempts=max_poll_attempts, + timeout_per_poll=timeout_per_poll, + max_retries_per_poll=max_retries_per_poll, + retry_delay_per_poll=retry_delay_per_poll, + retry_backoff_per_poll=retry_backoff_per_poll, + estimated_duration=estimated_duration, + cancel_endpoint=cancel_endpoint, + cancel_timeout=cancel_timeout, + ) + if not isinstance(raw, dict): + raise Exception("Expected JSON response to validate into a Pydantic model, got non-JSON (binary or text).") + return _validate_or_raise(response_model, raw) + + +async def sync_op_raw( + cls: type[IO.ComfyNode], + endpoint: ApiEndpoint, + *, + data: Optional[Union[dict[str, Any], BaseModel]] = None, + files: Optional[Union[dict[str, Any], list[tuple[str, Any]]]] = None, + content_type: str = "application/json", + timeout: float = 3600.0, + multipart_parser: Optional[Callable] = None, + max_retries: int = 3, + retry_delay: float = 1.0, + retry_backoff: float = 2.0, + wait_label: str = "Waiting for server", + estimated_duration: Optional[int] = None, + as_binary: bool = False, + final_label_on_success: Optional[str] = "Completed", + progress_origin_ts: Optional[float] = None, + monitor_progress: bool = True, +) -> Union[dict[str, Any], bytes]: + """ + Make a single network request. + - If as_binary=False (default): returns JSON dict (or {'_raw': ''} if non-JSON). + - If as_binary=True: returns bytes. + """ + if isinstance(data, BaseModel): + data = data.model_dump(exclude_none=True) + for k, v in list(data.items()): + if isinstance(v, Enum): + data[k] = v.value + cfg = _RequestConfig( + node_cls=cls, + endpoint=endpoint, + timeout=timeout, + content_type=content_type, + data=data, + files=files, + multipart_parser=multipart_parser, + max_retries=max_retries, + retry_delay=retry_delay, + retry_backoff=retry_backoff, + wait_label=wait_label, + monitor_progress=monitor_progress, + estimated_total=estimated_duration, + final_label_on_success=final_label_on_success, + progress_origin_ts=progress_origin_ts, + ) + return await _request_base(cfg, expect_binary=as_binary) + + +async def poll_op_raw( + cls: type[IO.ComfyNode], + poll_endpoint: ApiEndpoint, + *, + status_extractor: Callable[[dict[str, Any]], Optional[Union[str, int]]], + progress_extractor: Optional[Callable[[dict[str, Any]], Optional[int]]] = None, + price_extractor: Optional[Callable[[dict[str, Any]], Optional[float]]] = None, + completed_statuses: Optional[list[Union[str, int]]] = None, + failed_statuses: Optional[list[Union[str, int]]] = None, + queued_statuses: Optional[list[Union[str, int]]] = None, + data: Optional[Union[dict[str, Any], BaseModel]] = None, + poll_interval: float = 5.0, + max_poll_attempts: int = 120, + timeout_per_poll: float = 120.0, + max_retries_per_poll: int = 3, + retry_delay_per_poll: float = 1.0, + retry_backoff_per_poll: float = 2.0, + estimated_duration: Optional[int] = None, + cancel_endpoint: Optional[ApiEndpoint] = None, + cancel_timeout: float = 10.0, +) -> dict[str, Any]: + """ + Polls an endpoint until the task reaches a terminal state. Displays time while queued/processing, + checks interruption every second, and calls Cancel endpoint (if provided) on interruption. + + Uses default complete, failed and queued states assumption. + + Returns the final JSON response from the poll endpoint. + """ + completed_states = _normalize_statuses(COMPLETED_STATUSES if completed_statuses is None else completed_statuses) + failed_states = _normalize_statuses(FAILED_STATUSES if failed_statuses is None else failed_statuses) + queued_states = _normalize_statuses(QUEUED_STATUSES if queued_statuses is None else queued_statuses) + started = time.monotonic() + consumed_attempts = 0 # counts only non-queued polls + + progress_bar = utils.ProgressBar(100) if progress_extractor else None + last_progress: Optional[int] = None + + state = _PollUIState(started=started, estimated_duration=estimated_duration) + stop_ticker = asyncio.Event() + + async def _ticker(): + """Emit a UI update every second while polling is in progress.""" + try: + while not stop_ticker.is_set(): + if is_processing_interrupted(): + break + now = time.monotonic() + proc_elapsed = state.base_processing_elapsed + ( + (now - state.active_since) if state.active_since is not None else 0.0 + ) + _display_time_progress( + cls, + status=state.status_label, + elapsed_seconds=int(now - state.started), + estimated_total=state.estimated_duration, + price=state.price, + is_queued=state.is_queued, + processing_elapsed_seconds=int(proc_elapsed), + ) + await asyncio.sleep(1.0) + except Exception as exc: + logging.debug("Polling ticker exited: %s", exc) + + ticker_task = asyncio.create_task(_ticker()) + try: + while consumed_attempts < max_poll_attempts: + try: + resp_json = await sync_op_raw( + cls, + poll_endpoint, + data=data, + timeout=timeout_per_poll, + max_retries=max_retries_per_poll, + retry_delay=retry_delay_per_poll, + retry_backoff=retry_backoff_per_poll, + wait_label="Checking", + estimated_duration=None, + as_binary=False, + final_label_on_success=None, + monitor_progress=False, + ) + if not isinstance(resp_json, dict): + raise Exception("Polling endpoint returned non-JSON response.") + except ProcessingInterrupted: + if cancel_endpoint: + with contextlib.suppress(Exception): + await sync_op_raw( + cls, + cancel_endpoint, + timeout=cancel_timeout, + max_retries=0, + wait_label="Cancelling task", + estimated_duration=None, + as_binary=False, + final_label_on_success=None, + monitor_progress=False, + ) + raise + + try: + status = _normalize_status_value(status_extractor(resp_json)) + except Exception as e: + logging.error("Status extraction failed: %s", e) + status = None + + if price_extractor: + new_price = price_extractor(resp_json) + if new_price is not None: + state.price = new_price + + if progress_extractor: + new_progress = progress_extractor(resp_json) + if new_progress is not None and last_progress != new_progress: + progress_bar.update_absolute(new_progress, total=100) + last_progress = new_progress + + now_ts = time.monotonic() + is_queued = status in queued_states + + if is_queued: + if state.active_since is not None: # If we just moved from active -> queued, close the active interval + state.base_processing_elapsed += now_ts - state.active_since + state.active_since = None + else: + if state.active_since is None: # If we just moved from queued -> active, open a new active interval + state.active_since = now_ts + + state.is_queued = is_queued + state.status_label = status or ("Queued" if is_queued else "Processing") + if status in completed_states: + if state.active_since is not None: + state.base_processing_elapsed += now_ts - state.active_since + state.active_since = None + stop_ticker.set() + with contextlib.suppress(Exception): + await ticker_task + + if progress_bar and last_progress != 100: + progress_bar.update_absolute(100, total=100) + + _display_time_progress( + cls, + status=status if status else "Completed", + elapsed_seconds=int(now_ts - started), + estimated_total=estimated_duration, + price=state.price, + is_queued=False, + processing_elapsed_seconds=int(state.base_processing_elapsed), + ) + return resp_json + + if status in failed_states: + msg = f"Task failed: {json.dumps(resp_json)}" + logging.error(msg) + raise Exception(msg) + + try: + await sleep_with_interrupt(poll_interval, cls, None, None, None) + except ProcessingInterrupted: + if cancel_endpoint: + with contextlib.suppress(Exception): + await sync_op_raw( + cls, + cancel_endpoint, + timeout=cancel_timeout, + max_retries=0, + wait_label="Cancelling task", + estimated_duration=None, + as_binary=False, + final_label_on_success=None, + monitor_progress=False, + ) + raise + if not is_queued: + consumed_attempts += 1 + + raise Exception( + f"Polling timed out after {max_poll_attempts} non-queued attempts " + f"(~{int(max_poll_attempts * poll_interval)}s of active polling)." + ) + except ProcessingInterrupted: + raise + except (LocalNetworkError, ApiServerError): + raise + except Exception as e: + raise Exception(f"Polling aborted due to error: {e}") from e + finally: + stop_ticker.set() + with contextlib.suppress(Exception): + await ticker_task + + +def _display_text( + node_cls: type[IO.ComfyNode], + text: Optional[str], + *, + status: Optional[Union[str, int]] = None, + price: Optional[float] = None, +) -> None: + display_lines: list[str] = [] + if status: + display_lines.append(f"Status: {status.capitalize() if isinstance(status, str) else status}") + if price is not None: + display_lines.append(f"Price: ${float(price):,.4f}") + if text is not None: + display_lines.append(text) + if display_lines: + PromptServer.instance.send_progress_text("\n".join(display_lines), get_node_id(node_cls)) + + +def _display_time_progress( + node_cls: type[IO.ComfyNode], + status: Optional[Union[str, int]], + elapsed_seconds: int, + estimated_total: Optional[int] = None, + *, + price: Optional[float] = None, + is_queued: Optional[bool] = None, + processing_elapsed_seconds: Optional[int] = None, +) -> None: + if estimated_total is not None and estimated_total > 0 and is_queued is False: + pe = processing_elapsed_seconds if processing_elapsed_seconds is not None else elapsed_seconds + remaining = max(0, int(estimated_total) - int(pe)) + time_line = f"Time elapsed: {int(elapsed_seconds)}s (~{remaining}s remaining)" + else: + time_line = f"Time elapsed: {int(elapsed_seconds)}s" + _display_text(node_cls, time_line, status=status, price=price) + + +async def _diagnose_connectivity() -> dict[str, bool]: + """Best-effort connectivity diagnostics to distinguish local vs. server issues.""" + results = { + "internet_accessible": False, + "api_accessible": False, + "is_local_issue": False, + "is_api_issue": False, + } + timeout = aiohttp.ClientTimeout(total=5.0) + async with aiohttp.ClientSession(timeout=timeout) as session: + try: + async with session.get("https://www.google.com") as resp: + results["internet_accessible"] = resp.status < 500 + except (ClientError, asyncio.TimeoutError, socket.gaierror): + results["is_local_issue"] = True + return results + + parsed = urlparse(default_base_url()) + health_url = f"{parsed.scheme}://{parsed.netloc}/health" + with contextlib.suppress(ClientError, asyncio.TimeoutError): + async with session.get(health_url) as resp: + results["api_accessible"] = resp.status < 500 + results["is_api_issue"] = results["internet_accessible"] and not results["api_accessible"] + return results + + +def _unpack_tuple(t: tuple) -> tuple[str, Any, str]: + """Normalize (filename, value, content_type).""" + if len(t) == 2: + return t[0], t[1], "application/octet-stream" + if len(t) == 3: + return t[0], t[1], t[2] + raise ValueError("files tuple must be (filename, file[, content_type])") + + +def _merge_params(endpoint_params: dict[str, Any], method: str, data: Optional[dict[str, Any]]) -> dict[str, Any]: + params = dict(endpoint_params or {}) + if method.upper() == "GET" and data: + for k, v in data.items(): + if v is not None: + params[k] = v + return params + + +def _friendly_http_message(status: int, body: Any) -> str: + if status == 401: + return "Unauthorized: Please login first to use this node." + if status == 402: + return "Payment Required: Please add credits to your account to use this node." + if status == 409: + return "There is a problem with your account. Please contact support@comfy.org." + if status == 429: + return "Rate Limit Exceeded: Please try again later." + try: + if isinstance(body, dict): + err = body.get("error") + if isinstance(err, dict): + msg = err.get("message") + typ = err.get("type") + if msg and typ: + return f"API Error: {msg} (Type: {typ})" + if msg: + return f"API Error: {msg}" + return f"API Error: {json.dumps(body)}" + else: + txt = str(body) + if len(txt) <= 200: + return f"API Error (raw): {txt}" + return f"API Error (status {status})" + except Exception: + return f"HTTP {status}: Unknown error" + + +def _generate_operation_id(method: str, path: str, attempt: int) -> str: + slug = path.strip("/").replace("/", "_") or "op" + return f"{method}_{slug}_try{attempt}_{uuid.uuid4().hex[:8]}" + + +def _snapshot_request_body_for_logging( + content_type: str, + method: str, + data: Optional[dict[str, Any]], + files: Optional[Union[dict[str, Any], list[tuple[str, Any]]]], +) -> Optional[Union[dict[str, Any], str]]: + if method.upper() == "GET": + return None + if content_type == "multipart/form-data": + form_fields = sorted([k for k, v in (data or {}).items() if v is not None]) + file_fields: list[dict[str, str]] = [] + if files: + file_iter = files if isinstance(files, list) else list(files.items()) + for field_name, file_obj in file_iter: + if file_obj is None: + continue + if isinstance(file_obj, tuple): + filename = file_obj[0] + else: + filename = getattr(file_obj, "name", field_name) + file_fields.append({"field": field_name, "filename": str(filename or "")}) + return {"_multipart": True, "form_fields": form_fields, "file_fields": file_fields} + if content_type == "application/x-www-form-urlencoded": + return data or {} + return data or {} + + +async def _request_base(cfg: _RequestConfig, expect_binary: bool): + """Core request with retries, per-second interruption monitoring, true cancellation, and friendly errors.""" + url = cfg.endpoint.path + parsed_url = urlparse(url) + if not parsed_url.scheme and not parsed_url.netloc: # is URL relative? + url = urljoin(default_base_url().rstrip("/") + "/", url.lstrip("/")) + + method = cfg.endpoint.method + params = _merge_params(cfg.endpoint.query_params, method, cfg.data if method == "GET" else None) + + async def _monitor(stop_evt: asyncio.Event, start_ts: float): + """Every second: update elapsed time and signal interruption.""" + try: + while not stop_evt.is_set(): + if is_processing_interrupted(): + return + if cfg.monitor_progress: + _display_time_progress( + cfg.node_cls, cfg.wait_label, int(time.monotonic() - start_ts), cfg.estimated_total + ) + await asyncio.sleep(1.0) + except asyncio.CancelledError: + return # normal shutdown + + start_time = cfg.progress_origin_ts if cfg.progress_origin_ts is not None else time.monotonic() + attempt = 0 + delay = cfg.retry_delay + operation_succeeded: bool = False + final_elapsed_seconds: Optional[int] = None + while True: + attempt += 1 + stop_event = asyncio.Event() + monitor_task: Optional[asyncio.Task] = None + sess: Optional[aiohttp.ClientSession] = None + + operation_id = _generate_operation_id(method, cfg.endpoint.path, attempt) + logging.debug("[DEBUG] HTTP %s %s (attempt %d)", method, url, attempt) + + payload_headers = {"Accept": "*/*"} + if not parsed_url.scheme and not parsed_url.netloc: # is URL relative? + payload_headers.update(get_auth_header(cfg.node_cls)) + if cfg.endpoint.headers: + payload_headers.update(cfg.endpoint.headers) + + payload_kw: dict[str, Any] = {"headers": payload_headers} + if method == "GET": + payload_headers.pop("Content-Type", None) + request_body_log = _snapshot_request_body_for_logging(cfg.content_type, method, cfg.data, cfg.files) + try: + if cfg.monitor_progress: + monitor_task = asyncio.create_task(_monitor(stop_event, start_time)) + + timeout = aiohttp.ClientTimeout(total=cfg.timeout) + sess = aiohttp.ClientSession(timeout=timeout) + + if cfg.content_type == "multipart/form-data" and method != "GET": + # aiohttp will set Content-Type boundary; remove any fixed Content-Type + payload_headers.pop("Content-Type", None) + if cfg.multipart_parser and cfg.data: + form = cfg.multipart_parser(cfg.data) + if not isinstance(form, aiohttp.FormData): + raise ValueError("multipart_parser must return aiohttp.FormData") + else: + form = aiohttp.FormData(default_to_multipart=True) + if cfg.data: + for k, v in cfg.data.items(): + if v is None: + continue + form.add_field(k, str(v) if not isinstance(v, (bytes, bytearray)) else v) + if cfg.files: + file_iter = cfg.files if isinstance(cfg.files, list) else cfg.files.items() + for field_name, file_obj in file_iter: + if file_obj is None: + continue + if isinstance(file_obj, tuple): + filename, file_value, content_type = _unpack_tuple(file_obj) + else: + filename = getattr(file_obj, "name", field_name) + file_value = file_obj + content_type = "application/octet-stream" + # Attempt to rewind BytesIO for retries + if isinstance(file_value, BytesIO): + with contextlib.suppress(Exception): + file_value.seek(0) + form.add_field(field_name, file_value, filename=filename, content_type=content_type) + payload_kw["data"] = form + elif cfg.content_type == "application/x-www-form-urlencoded" and method != "GET": + payload_headers["Content-Type"] = "application/x-www-form-urlencoded" + payload_kw["data"] = cfg.data or {} + elif method != "GET": + payload_headers["Content-Type"] = "application/json" + payload_kw["json"] = cfg.data or {} + + try: + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, + request_url=url, + request_headers=dict(payload_headers) if payload_headers else None, + request_params=dict(params) if params else None, + request_data=request_body_log, + ) + except Exception as _log_e: + logging.debug("[DEBUG] request logging failed: %s", _log_e) + + req_coro = sess.request(method, url, params=params, **payload_kw) + req_task = asyncio.create_task(req_coro) + + # Race: request vs. monitor (interruption) + tasks = {req_task} + if monitor_task: + tasks.add(monitor_task) + done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) + + if monitor_task and monitor_task in done: + # Interrupted – cancel the request and abort + if req_task in pending: + req_task.cancel() + raise ProcessingInterrupted("Task cancelled") + + # Otherwise, request finished + resp = await req_task + async with resp: + if resp.status >= 400: + try: + body = await resp.json() + except (ContentTypeError, json.JSONDecodeError): + body = await resp.text() + if resp.status in _RETRY_STATUS and attempt <= cfg.max_retries: + logging.warning( + "HTTP %s %s -> %s. Retrying in %.2fs (retry %d of %d).", + method, + url, + resp.status, + delay, + attempt, + cfg.max_retries, + ) + try: + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, + request_url=url, + response_status_code=resp.status, + response_headers=dict(resp.headers), + response_content=body, + error_message=_friendly_http_message(resp.status, body), + ) + except Exception as _log_e: + logging.debug("[DEBUG] response logging failed: %s", _log_e) + + await sleep_with_interrupt( + delay, + cfg.node_cls, + cfg.wait_label if cfg.monitor_progress else None, + start_time if cfg.monitor_progress else None, + cfg.estimated_total, + display_callback=_display_time_progress if cfg.monitor_progress else None, + ) + delay *= cfg.retry_backoff + continue + msg = _friendly_http_message(resp.status, body) + try: + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, + request_url=url, + response_status_code=resp.status, + response_headers=dict(resp.headers), + response_content=body, + error_message=msg, + ) + except Exception as _log_e: + logging.debug("[DEBUG] response logging failed: %s", _log_e) + raise Exception(msg) + + if expect_binary: + buff = bytearray() + last_tick = time.monotonic() + async for chunk in resp.content.iter_chunked(64 * 1024): + buff.extend(chunk) + now = time.monotonic() + if now - last_tick >= 1.0: + last_tick = now + if is_processing_interrupted(): + raise ProcessingInterrupted("Task cancelled") + if cfg.monitor_progress: + _display_time_progress( + cfg.node_cls, cfg.wait_label, int(now - start_time), cfg.estimated_total + ) + bytes_payload = bytes(buff) + operation_succeeded = True + final_elapsed_seconds = int(time.monotonic() - start_time) + try: + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, + request_url=url, + response_status_code=resp.status, + response_headers=dict(resp.headers), + response_content=bytes_payload, + ) + except Exception as _log_e: + logging.debug("[DEBUG] response logging failed: %s", _log_e) + return bytes_payload + else: + try: + payload = await resp.json() + response_content_to_log: Any = payload + except (ContentTypeError, json.JSONDecodeError): + text = await resp.text() + try: + payload = json.loads(text) if text else {} + except json.JSONDecodeError: + payload = {"_raw": text} + response_content_to_log = payload if isinstance(payload, dict) else text + operation_succeeded = True + final_elapsed_seconds = int(time.monotonic() - start_time) + try: + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, + request_url=url, + response_status_code=resp.status, + response_headers=dict(resp.headers), + response_content=response_content_to_log, + ) + except Exception as _log_e: + logging.debug("[DEBUG] response logging failed: %s", _log_e) + return payload + + except ProcessingInterrupted: + logging.debug("Polling was interrupted by user") + raise + except (ClientError, asyncio.TimeoutError, socket.gaierror) as e: + if attempt <= cfg.max_retries: + logging.warning( + "Connection error calling %s %s. Retrying in %.2fs (%d/%d): %s", + method, + url, + delay, + attempt, + cfg.max_retries, + str(e), + ) + try: + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, + request_url=url, + request_headers=dict(payload_headers) if payload_headers else None, + request_params=dict(params) if params else None, + request_data=request_body_log, + error_message=f"{type(e).__name__}: {str(e)} (will retry)", + ) + except Exception as _log_e: + logging.debug("[DEBUG] request error logging failed: %s", _log_e) + await sleep_with_interrupt( + delay, + cfg.node_cls, + cfg.wait_label if cfg.monitor_progress else None, + start_time if cfg.monitor_progress else None, + cfg.estimated_total, + display_callback=_display_time_progress if cfg.monitor_progress else None, + ) + delay *= cfg.retry_backoff + continue + diag = await _diagnose_connectivity() + if diag.get("is_local_issue"): + try: + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, + request_url=url, + request_headers=dict(payload_headers) if payload_headers else None, + request_params=dict(params) if params else None, + request_data=request_body_log, + error_message=f"LocalNetworkError: {str(e)}", + ) + except Exception as _log_e: + logging.debug("[DEBUG] final error logging failed: %s", _log_e) + raise LocalNetworkError( + "Unable to connect to the API server due to local network issues. " + "Please check your internet connection and try again." + ) from e + try: + request_logger.log_request_response( + operation_id=operation_id, + request_method=method, + request_url=url, + request_headers=dict(payload_headers) if payload_headers else None, + request_params=dict(params) if params else None, + request_data=request_body_log, + error_message=f"ApiServerError: {str(e)}", + ) + except Exception as _log_e: + logging.debug("[DEBUG] final error logging failed: %s", _log_e) + raise ApiServerError( + f"The API server at {default_base_url()} is currently unreachable. " + f"The service may be experiencing issues." + ) from e + finally: + stop_event.set() + if monitor_task: + monitor_task.cancel() + with contextlib.suppress(Exception): + await monitor_task + if sess: + with contextlib.suppress(Exception): + await sess.close() + if operation_succeeded and cfg.monitor_progress and cfg.final_label_on_success: + _display_time_progress( + cfg.node_cls, + status=cfg.final_label_on_success, + elapsed_seconds=( + final_elapsed_seconds + if final_elapsed_seconds is not None + else int(time.monotonic() - start_time) + ), + estimated_total=cfg.estimated_total, + price=None, + is_queued=False, + processing_elapsed_seconds=final_elapsed_seconds, + ) + + +def _validate_or_raise(response_model: Type[M], payload: Any) -> M: + try: + return response_model.model_validate(payload) + except Exception as e: + logging.error( + "Response validation failed for %s: %s", + getattr(response_model, "__name__", response_model), + e, + ) + raise Exception( + f"Response validation failed for {getattr(response_model, '__name__', response_model)}: {e}" + ) from e + + +def _wrap_model_extractor( + response_model: Type[M], + extractor: Optional[Callable[[M], Any]], +) -> Optional[Callable[[dict[str, Any]], Any]]: + """Wrap a typed extractor so it can be used by the dict-based poller. + Validates the dict into `response_model` before invoking `extractor`. + Uses a small per-wrapper cache keyed by `id(dict)` to avoid re-validating + the same response for multiple extractors in a single poll attempt. + """ + if extractor is None: + return None + _cache: dict[int, M] = {} + + def _wrapped(d: dict[str, Any]) -> Any: + try: + key = id(d) + model = _cache.get(key) + if model is None: + model = response_model.model_validate(d) + _cache[key] = model + return extractor(model) + except Exception as e: + logging.error("Extractor failed (typed -> dict wrapper): %s", e) + raise + + return _wrapped + + +def _normalize_statuses(values: Optional[Iterable[Union[str, int]]]) -> set[Union[str, int]]: + if not values: + return set() + out: set[Union[str, int]] = set() + for v in values: + nv = _normalize_status_value(v) + if nv is not None: + out.add(nv) + return out + + +def _normalize_status_value(val: Union[str, int, None]) -> Union[str, int, None]: + if isinstance(val, str): + return val.strip().lower() + return val diff --git a/comfy_api_nodes/util/common_exceptions.py b/comfy_api_nodes/util/common_exceptions.py new file mode 100644 index 000000000..0606a4407 --- /dev/null +++ b/comfy_api_nodes/util/common_exceptions.py @@ -0,0 +1,14 @@ +class NetworkError(Exception): + """Base exception for network-related errors with diagnostic information.""" + + +class LocalNetworkError(NetworkError): + """Exception raised when local network connectivity issues are detected.""" + + +class ApiServerError(NetworkError): + """Exception raised when the API server is unreachable but internet is working.""" + + +class ProcessingInterrupted(Exception): + """Operation was interrupted by user/runtime via processing_interrupted().""" diff --git a/comfy_api_nodes/util/conversions.py b/comfy_api_nodes/util/conversions.py new file mode 100644 index 000000000..10cd1051b --- /dev/null +++ b/comfy_api_nodes/util/conversions.py @@ -0,0 +1,407 @@ +import base64 +import logging +import math +import uuid +from io import BytesIO +from typing import Optional + +import av +import numpy as np +import torch +from PIL import Image + +from comfy.utils import common_upscale +from comfy_api.latest import Input, InputImpl + +from ._helpers import mimetype_to_extension + + +def bytesio_to_image_tensor(image_bytesio: BytesIO, mode: str = "RGBA") -> torch.Tensor: + """Converts image data from BytesIO to a torch.Tensor. + + Args: + image_bytesio: BytesIO object containing the image data. + mode: The PIL mode to convert the image to (e.g., "RGB", "RGBA"). + + Returns: + A torch.Tensor representing the image (1, H, W, C). + + Raises: + PIL.UnidentifiedImageError: If the image data cannot be identified. + ValueError: If the specified mode is invalid. + """ + image = Image.open(image_bytesio) + image = image.convert(mode) + image_array = np.array(image).astype(np.float32) / 255.0 + return torch.from_numpy(image_array).unsqueeze(0) + + +def image_tensor_pair_to_batch(image1: torch.Tensor, image2: torch.Tensor) -> torch.Tensor: + """ + Converts a pair of image tensors to a batch tensor. + If the images are not the same size, the smaller image is resized to + match the larger image. + """ + if image1.shape[1:] != image2.shape[1:]: + image2 = common_upscale( + image2.movedim(-1, 1), + image1.shape[2], + image1.shape[1], + "bilinear", + "center", + ).movedim(1, -1) + return torch.cat((image1, image2), dim=0) + + +def tensor_to_bytesio( + image: torch.Tensor, + name: Optional[str] = None, + total_pixels: int = 2048 * 2048, + mime_type: str = "image/png", +) -> BytesIO: + """Converts a torch.Tensor image to a named BytesIO object. + + Args: + image: Input torch.Tensor image. + name: Optional filename for the BytesIO object. + total_pixels: Maximum total pixels for potential downscaling. + mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4'). + + Returns: + Named BytesIO object containing the image data, with pointer set to the start of buffer. + """ + if not mime_type: + mime_type = "image/png" + + pil_image = tensor_to_pil(image, total_pixels=total_pixels) + img_binary = pil_to_bytesio(pil_image, mime_type=mime_type) + img_binary.name = f"{name if name else uuid.uuid4()}.{mimetype_to_extension(mime_type)}" + return img_binary + + +def tensor_to_pil(image: torch.Tensor, total_pixels: int = 2048 * 2048) -> Image.Image: + """Converts a single torch.Tensor image [H, W, C] to a PIL Image, optionally downscaling.""" + if len(image.shape) > 3: + image = image[0] + # TODO: remove alpha if not allowed and present + input_tensor = image.cpu() + input_tensor = downscale_image_tensor(input_tensor.unsqueeze(0), total_pixels=total_pixels).squeeze() + image_np = (input_tensor.numpy() * 255).astype(np.uint8) + img = Image.fromarray(image_np) + return img + + +def tensor_to_base64_string( + image_tensor: torch.Tensor, + total_pixels: int = 2048 * 2048, + mime_type: str = "image/png", +) -> str: + """Convert [B, H, W, C] or [H, W, C] tensor to a base64 string. + + Args: + image_tensor: Input torch.Tensor image. + total_pixels: Maximum total pixels for potential downscaling. + mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4'). + + Returns: + Base64 encoded string of the image. + """ + pil_image = tensor_to_pil(image_tensor, total_pixels=total_pixels) + img_byte_arr = pil_to_bytesio(pil_image, mime_type=mime_type) + img_bytes = img_byte_arr.getvalue() + # Encode bytes to base64 string + base64_encoded_string = base64.b64encode(img_bytes).decode("utf-8") + return base64_encoded_string + + +def pil_to_bytesio(img: Image.Image, mime_type: str = "image/png") -> BytesIO: + """Converts a PIL Image to a BytesIO object.""" + if not mime_type: + mime_type = "image/png" + + img_byte_arr = BytesIO() + # Derive PIL format from MIME type (e.g., 'image/png' -> 'PNG') + pil_format = mime_type.split("/")[-1].upper() + if pil_format == "JPG": + pil_format = "JPEG" + img.save(img_byte_arr, format=pil_format) + img_byte_arr.seek(0) + return img_byte_arr + + +def downscale_image_tensor(image, total_pixels=1536 * 1024) -> torch.Tensor: + """Downscale input image tensor to roughly the specified total pixels.""" + samples = image.movedim(-1, 1) + total = int(total_pixels) + scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) + if scale_by >= 1: + return image + width = round(samples.shape[3] * scale_by) + height = round(samples.shape[2] * scale_by) + + s = common_upscale(samples, width, height, "lanczos", "disabled") + s = s.movedim(1, -1) + return s + + +def tensor_to_data_uri( + image_tensor: torch.Tensor, + total_pixels: int = 2048 * 2048, + mime_type: str = "image/png", +) -> str: + """Converts a tensor image to a Data URI string. + + Args: + image_tensor: Input torch.Tensor image. + total_pixels: Maximum total pixels for potential downscaling. + mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp'). + + Returns: + Data URI string (e.g., 'data:image/png;base64,...'). + """ + base64_string = tensor_to_base64_string(image_tensor, total_pixels, mime_type) + return f"data:{mime_type};base64,{base64_string}" + + +def audio_to_base64_string(audio: Input.Audio, container_format: str = "mp4", codec_name: str = "aac") -> str: + """Converts an audio input to a base64 string.""" + sample_rate: int = audio["sample_rate"] + waveform: torch.Tensor = audio["waveform"] + audio_data_np = audio_tensor_to_contiguous_ndarray(waveform) + audio_bytes_io = audio_ndarray_to_bytesio(audio_data_np, sample_rate, container_format, codec_name) + audio_bytes = audio_bytes_io.getvalue() + return base64.b64encode(audio_bytes).decode("utf-8") + + +def audio_ndarray_to_bytesio( + audio_data_np: np.ndarray, + sample_rate: int, + container_format: str = "mp4", + codec_name: str = "aac", +) -> BytesIO: + """ + Encodes a numpy array of audio data into a BytesIO object. + """ + audio_bytes_io = BytesIO() + with av.open(audio_bytes_io, mode="w", format=container_format) as output_container: + audio_stream = output_container.add_stream(codec_name, rate=sample_rate) + frame = av.AudioFrame.from_ndarray( + audio_data_np, + format="fltp", + layout="stereo" if audio_data_np.shape[0] > 1 else "mono", + ) + frame.sample_rate = sample_rate + frame.pts = 0 + + for packet in audio_stream.encode(frame): + output_container.mux(packet) + + # Flush stream + for packet in audio_stream.encode(None): + output_container.mux(packet) + + audio_bytes_io.seek(0) + return audio_bytes_io + + +def audio_tensor_to_contiguous_ndarray(waveform: torch.Tensor) -> np.ndarray: + """ + Prepares audio waveform for av library by converting to a contiguous numpy array. + + Args: + waveform: a tensor of shape (1, channels, samples) derived from a Comfy `AUDIO` type. + + Returns: + Contiguous numpy array of the audio waveform. If the audio was batched, + the first item is taken. + """ + if waveform.ndim != 3 or waveform.shape[0] != 1: + raise ValueError("Expected waveform tensor shape (1, channels, samples)") + + # If batch is > 1, take first item + if waveform.shape[0] > 1: + waveform = waveform[0] + + # Prepare for av: remove batch dim, move to CPU, make contiguous, convert to numpy array + audio_data_np = waveform.squeeze(0).cpu().contiguous().numpy() + if audio_data_np.dtype != np.float32: + audio_data_np = audio_data_np.astype(np.float32) + + return audio_data_np + + +def audio_input_to_mp3(audio: Input.Audio) -> BytesIO: + waveform = audio["waveform"].cpu() + + output_buffer = BytesIO() + output_container = av.open(output_buffer, mode="w", format="mp3") + + out_stream = output_container.add_stream("libmp3lame", rate=audio["sample_rate"]) + out_stream.bit_rate = 320000 + + frame = av.AudioFrame.from_ndarray( + waveform.movedim(0, 1).reshape(1, -1).float().numpy(), + format="flt", + layout="mono" if waveform.shape[0] == 1 else "stereo", + ) + frame.sample_rate = audio["sample_rate"] + frame.pts = 0 + output_container.mux(out_stream.encode(frame)) + output_container.mux(out_stream.encode(None)) + output_container.close() + output_buffer.seek(0) + return output_buffer + + +def trim_video(video: Input.Video, duration_sec: float) -> Input.Video: + """ + Returns a new VideoInput object trimmed from the beginning to the specified duration, + using av to avoid loading entire video into memory. + + Args: + video: Input video to trim + duration_sec: Duration in seconds to keep from the beginning + + Returns: + VideoFromFile object that owns the output buffer + """ + output_buffer = BytesIO() + input_container = None + output_container = None + + try: + # Get the stream source - this avoids loading entire video into memory + # when the source is already a file path + input_source = video.get_stream_source() + + # Open containers + input_container = av.open(input_source, mode="r") + output_container = av.open(output_buffer, mode="w", format="mp4") + + # Set up output streams for re-encoding + video_stream = None + audio_stream = None + + for stream in input_container.streams: + logging.info("Found stream: type=%s, class=%s", stream.type, type(stream)) + if isinstance(stream, av.VideoStream): + # Create output video stream with same parameters + video_stream = output_container.add_stream("h264", rate=stream.average_rate) + video_stream.width = stream.width + video_stream.height = stream.height + video_stream.pix_fmt = "yuv420p" + logging.info("Added video stream: %sx%s @ %sfps", stream.width, stream.height, stream.average_rate) + elif isinstance(stream, av.AudioStream): + # Create output audio stream with same parameters + audio_stream = output_container.add_stream("aac", rate=stream.sample_rate) + audio_stream.sample_rate = stream.sample_rate + audio_stream.layout = stream.layout + logging.info("Added audio stream: %sHz, %s channels", stream.sample_rate, stream.channels) + + # Calculate target frame count that's divisible by 16 + fps = input_container.streams.video[0].average_rate + estimated_frames = int(duration_sec * fps) + target_frames = (estimated_frames // 16) * 16 # Round down to nearest multiple of 16 + + if target_frames == 0: + raise ValueError("Video too short: need at least 16 frames for Moonvalley") + + frame_count = 0 + audio_frame_count = 0 + + # Decode and re-encode video frames + if video_stream: + for frame in input_container.decode(video=0): + if frame_count >= target_frames: + break + + # Re-encode frame + for packet in video_stream.encode(frame): + output_container.mux(packet) + frame_count += 1 + + # Flush encoder + for packet in video_stream.encode(): + output_container.mux(packet) + + logging.info("Encoded %s video frames (target: %s)", frame_count, target_frames) + + # Decode and re-encode audio frames + if audio_stream: + input_container.seek(0) # Reset to beginning for audio + for frame in input_container.decode(audio=0): + if frame.time >= duration_sec: + break + + # Re-encode frame + for packet in audio_stream.encode(frame): + output_container.mux(packet) + audio_frame_count += 1 + + # Flush encoder + for packet in audio_stream.encode(): + output_container.mux(packet) + + logging.info("Encoded %s audio frames", audio_frame_count) + + # Close containers + output_container.close() + input_container.close() + + # Return as VideoFromFile using the buffer + output_buffer.seek(0) + return InputImpl.VideoFromFile(output_buffer) + + except Exception as e: + # Clean up on error + if input_container is not None: + input_container.close() + if output_container is not None: + output_container.close() + raise RuntimeError(f"Failed to trim video: {str(e)}") from e + + +def _f32_pcm(wav: torch.Tensor) -> torch.Tensor: + """Convert audio to float 32 bits PCM format. Copy-paste from nodes_audio.py file.""" + if wav.dtype.is_floating_point: + return wav + elif wav.dtype == torch.int16: + return wav.float() / (2**15) + elif wav.dtype == torch.int32: + return wav.float() / (2**31) + raise ValueError(f"Unsupported wav dtype: {wav.dtype}") + + +def audio_bytes_to_audio_input(audio_bytes: bytes) -> dict: + """ + Decode any common audio container from bytes using PyAV and return + a Comfy AUDIO dict: {"waveform": [1, C, T] float32, "sample_rate": int}. + """ + with av.open(BytesIO(audio_bytes)) as af: + if not af.streams.audio: + raise ValueError("No audio stream found in response.") + stream = af.streams.audio[0] + + in_sr = int(stream.codec_context.sample_rate) + out_sr = in_sr + + frames: list[torch.Tensor] = [] + n_channels = stream.channels or 1 + + for frame in af.decode(streams=stream.index): + arr = frame.to_ndarray() # shape can be [C, T] or [T, C] or [T] + buf = torch.from_numpy(arr) + if buf.ndim == 1: + buf = buf.unsqueeze(0) # [T] -> [1, T] + elif buf.shape[0] != n_channels and buf.shape[-1] == n_channels: + buf = buf.transpose(0, 1).contiguous() # [T, C] -> [C, T] + elif buf.shape[0] != n_channels: + buf = buf.reshape(-1, n_channels).t().contiguous() # fallback to [C, T] + frames.append(buf) + + if not frames: + raise ValueError("Decoded zero audio frames.") + + wav = torch.cat(frames, dim=1) # [C, T] + wav = _f32_pcm(wav) + return {"waveform": wav.unsqueeze(0).contiguous(), "sample_rate": out_sr} diff --git a/comfy_api_nodes/util/download_helpers.py b/comfy_api_nodes/util/download_helpers.py new file mode 100644 index 000000000..055e690de --- /dev/null +++ b/comfy_api_nodes/util/download_helpers.py @@ -0,0 +1,249 @@ +import asyncio +import contextlib +import uuid +from io import BytesIO +from pathlib import Path +from typing import IO, Optional, Union +from urllib.parse import urljoin, urlparse + +import aiohttp +import torch +from aiohttp.client_exceptions import ClientError, ContentTypeError + +from comfy_api.input_impl import VideoFromFile +from comfy_api.latest import IO as COMFY_IO +from comfy_api_nodes.apis import request_logger + +from ._helpers import ( + default_base_url, + get_auth_header, + is_processing_interrupted, + sleep_with_interrupt, +) +from .client import _diagnose_connectivity +from .common_exceptions import ApiServerError, LocalNetworkError, ProcessingInterrupted +from .conversions import bytesio_to_image_tensor + +_RETRY_STATUS = {408, 429, 500, 502, 503, 504} + + +async def download_url_to_bytesio( + url: str, + dest: Optional[Union[BytesIO, IO[bytes], str, Path]], + *, + timeout: Optional[float] = None, + max_retries: int = 3, + retry_delay: float = 1.0, + retry_backoff: float = 2.0, + cls: type[COMFY_IO.ComfyNode] = None, +) -> None: + """Stream-download a URL to `dest`. + + `dest` must be one of: + - a BytesIO (rewound to 0 after write), + - a file-like object opened in binary write mode (must implement .write()), + - a filesystem path (str | pathlib.Path), which will be opened with 'wb'. + + If `url` starts with `/proxy/`, `cls` must be provided so the URL can be expanded + to an absolute URL and authentication headers can be applied. + + Raises: + ProcessingInterrupted, LocalNetworkError, ApiServerError, Exception (HTTP and other errors) + """ + if not isinstance(dest, (str, Path)) and not hasattr(dest, "write"): + raise ValueError("dest must be a path (str|Path) or a binary-writable object providing .write().") + + attempt = 0 + delay = retry_delay + headers: dict[str, str] = {} + + parsed_url = urlparse(url) + if not parsed_url.scheme and not parsed_url.netloc: # is URL relative? + if cls is None: + raise ValueError("For relative 'cloud' paths, the `cls` parameter is required.") + url = urljoin(default_base_url().rstrip("/") + "/", url.lstrip("/")) + headers = get_auth_header(cls) + + while True: + attempt += 1 + op_id = _generate_operation_id("GET", url, attempt) + timeout_cfg = aiohttp.ClientTimeout(total=timeout) + + is_path_sink = isinstance(dest, (str, Path)) + fhandle = None + session: Optional[aiohttp.ClientSession] = None + stop_evt: Optional[asyncio.Event] = None + monitor_task: Optional[asyncio.Task] = None + req_task: Optional[asyncio.Task] = None + + try: + with contextlib.suppress(Exception): + request_logger.log_request_response(operation_id=op_id, request_method="GET", request_url=url) + + session = aiohttp.ClientSession(timeout=timeout_cfg) + stop_evt = asyncio.Event() + + async def _monitor(): + try: + while not stop_evt.is_set(): + if is_processing_interrupted(): + return + await asyncio.sleep(1.0) + except asyncio.CancelledError: + return + + monitor_task = asyncio.create_task(_monitor()) + + req_task = asyncio.create_task(session.get(url, headers=headers)) + done, pending = await asyncio.wait({req_task, monitor_task}, return_when=asyncio.FIRST_COMPLETED) + + if monitor_task in done and req_task in pending: + req_task.cancel() + with contextlib.suppress(Exception): + await req_task + raise ProcessingInterrupted("Task cancelled") + + try: + resp = await req_task + except asyncio.CancelledError: + raise ProcessingInterrupted("Task cancelled") from None + + async with resp: + if resp.status >= 400: + with contextlib.suppress(Exception): + try: + body = await resp.json() + except (ContentTypeError, ValueError): + text = await resp.text() + body = text if len(text) <= 4096 else f"[text {len(text)} bytes]" + request_logger.log_request_response( + operation_id=op_id, + request_method="GET", + request_url=url, + response_status_code=resp.status, + response_headers=dict(resp.headers), + response_content=body, + error_message=f"HTTP {resp.status}", + ) + + if resp.status in _RETRY_STATUS and attempt <= max_retries: + await sleep_with_interrupt(delay, cls, None, None, None) + delay *= retry_backoff + continue + raise Exception(f"Failed to download (HTTP {resp.status}).") + + if is_path_sink: + p = Path(str(dest)) + with contextlib.suppress(Exception): + p.parent.mkdir(parents=True, exist_ok=True) + fhandle = open(p, "wb") + sink = fhandle + else: + sink = dest # BytesIO or file-like + + written = 0 + while True: + try: + chunk = await asyncio.wait_for(resp.content.read(1024 * 1024), timeout=1.0) + except asyncio.TimeoutError: + chunk = b"" + except asyncio.CancelledError: + raise ProcessingInterrupted("Task cancelled") from None + + if is_processing_interrupted(): + raise ProcessingInterrupted("Task cancelled") + + if not chunk: + if resp.content.at_eof(): + break + continue + + sink.write(chunk) + written += len(chunk) + + if isinstance(dest, BytesIO): + with contextlib.suppress(Exception): + dest.seek(0) + + with contextlib.suppress(Exception): + request_logger.log_request_response( + operation_id=op_id, + request_method="GET", + request_url=url, + response_status_code=resp.status, + response_headers=dict(resp.headers), + response_content=f"[streamed {written} bytes to dest]", + ) + return + except asyncio.CancelledError: + raise ProcessingInterrupted("Task cancelled") from None + except (ClientError, asyncio.TimeoutError) as e: + if attempt <= max_retries: + with contextlib.suppress(Exception): + request_logger.log_request_response( + operation_id=op_id, + request_method="GET", + request_url=url, + error_message=f"{type(e).__name__}: {str(e)} (will retry)", + ) + await sleep_with_interrupt(delay, cls, None, None, None) + delay *= retry_backoff + continue + + diag = await _diagnose_connectivity() + if diag.get("is_local_issue"): + raise LocalNetworkError( + "Unable to connect to the network. Please check your internet connection and try again." + ) from e + raise ApiServerError("The remote service appears unreachable at this time.") from e + finally: + if stop_evt is not None: + stop_evt.set() + if monitor_task: + monitor_task.cancel() + with contextlib.suppress(Exception): + await monitor_task + if req_task and not req_task.done(): + req_task.cancel() + with contextlib.suppress(Exception): + await req_task + if session: + with contextlib.suppress(Exception): + await session.close() + if fhandle: + with contextlib.suppress(Exception): + fhandle.flush() + fhandle.close() + + +async def download_url_to_image_tensor( + url: str, + *, + timeout: float = None, + cls: type[COMFY_IO.ComfyNode] = None, +) -> torch.Tensor: + """Downloads an image from a URL and returns a [B, H, W, C] tensor.""" + result = BytesIO() + await download_url_to_bytesio(url, result, timeout=timeout, cls=cls) + return bytesio_to_image_tensor(result) + + +async def download_url_to_video_output( + video_url: str, + *, + timeout: float = None, + cls: type[COMFY_IO.ComfyNode] = None, +) -> VideoFromFile: + """Downloads a video from a URL and returns a `VIDEO` output.""" + result = BytesIO() + await download_url_to_bytesio(video_url, result, timeout=timeout, cls=cls) + return VideoFromFile(result) + + +def _generate_operation_id(method: str, url: str, attempt: int) -> str: + try: + parsed = urlparse(url) + slug = (parsed.path.rsplit("/", 1)[-1] or parsed.netloc or "download").strip("/").replace("/", "_") + except Exception: + slug = "download" + return f"{method}_{slug}_try{attempt}_{uuid.uuid4().hex[:8]}" diff --git a/comfy_api_nodes/util/upload_helpers.py b/comfy_api_nodes/util/upload_helpers.py new file mode 100644 index 000000000..a345d451d --- /dev/null +++ b/comfy_api_nodes/util/upload_helpers.py @@ -0,0 +1,338 @@ +import asyncio +import contextlib +import logging +import time +import uuid +from io import BytesIO +from typing import Optional, Union +from urllib.parse import urlparse + +import aiohttp +import torch +from pydantic import BaseModel, Field + +from comfy_api.latest import IO, Input +from comfy_api.util import VideoCodec, VideoContainer +from comfy_api_nodes.apis import request_logger + +from ._helpers import is_processing_interrupted, sleep_with_interrupt +from .client import ( + ApiEndpoint, + _diagnose_connectivity, + _display_time_progress, + sync_op, +) +from .common_exceptions import ApiServerError, LocalNetworkError, ProcessingInterrupted +from .conversions import ( + audio_ndarray_to_bytesio, + audio_tensor_to_contiguous_ndarray, + tensor_to_bytesio, +) + + +class UploadRequest(BaseModel): + file_name: str = Field(..., description="Filename to upload") + content_type: Optional[str] = Field( + None, + description="Mime type of the file. For example: image/png, image/jpeg, video/mp4, etc.", + ) + + +class UploadResponse(BaseModel): + download_url: str = Field(..., description="URL to GET uploaded file") + upload_url: str = Field(..., description="URL to PUT file to upload") + + +async def upload_images_to_comfyapi( + cls: type[IO.ComfyNode], + image: torch.Tensor, + *, + max_images: int = 8, + mime_type: Optional[str] = None, + wait_label: Optional[str] = "Uploading", +) -> list[str]: + """ + Uploads images to ComfyUI API and returns download URLs. + To upload multiple images, stack them in the batch dimension first. + """ + # if batch, try to upload each file if max_images is greater than 0 + download_urls: list[str] = [] + is_batch = len(image.shape) > 3 + batch_len = image.shape[0] if is_batch else 1 + + for idx in range(min(batch_len, max_images)): + tensor = image[idx] if is_batch else image + img_io = tensor_to_bytesio(tensor, mime_type=mime_type) + url = await upload_file_to_comfyapi(cls, img_io, img_io.name, mime_type, wait_label) + download_urls.append(url) + return download_urls + + +async def upload_audio_to_comfyapi( + cls: type[IO.ComfyNode], + audio: Input.Audio, + *, + container_format: str = "mp4", + codec_name: str = "aac", + mime_type: str = "audio/mp4", + filename: str = "uploaded_audio.mp4", +) -> str: + """ + Uploads a single audio input to ComfyUI API and returns its download URL. + Encodes the raw waveform into the specified format before uploading. + """ + sample_rate: int = audio["sample_rate"] + waveform: torch.Tensor = audio["waveform"] + audio_data_np = audio_tensor_to_contiguous_ndarray(waveform) + audio_bytes_io = audio_ndarray_to_bytesio(audio_data_np, sample_rate, container_format, codec_name) + return await upload_file_to_comfyapi(cls, audio_bytes_io, filename, mime_type) + + +async def upload_video_to_comfyapi( + cls: type[IO.ComfyNode], + video: Input.Video, + *, + container: VideoContainer = VideoContainer.MP4, + codec: VideoCodec = VideoCodec.H264, + max_duration: Optional[int] = None, +) -> str: + """ + Uploads a single video to ComfyUI API and returns its download URL. + Uses the specified container and codec for saving the video before upload. + """ + if max_duration is not None: + try: + actual_duration = video.get_duration() + if actual_duration > max_duration: + raise ValueError( + f"Video duration ({actual_duration:.2f}s) exceeds the maximum allowed ({max_duration}s)." + ) + except Exception as e: + logging.error("Error getting video duration: %s", str(e)) + raise ValueError(f"Could not verify video duration from source: {e}") from e + + upload_mime_type = f"video/{container.value.lower()}" + filename = f"uploaded_video.{container.value.lower()}" + + # Convert VideoInput to BytesIO using specified container/codec + video_bytes_io = BytesIO() + video.save_to(video_bytes_io, format=container, codec=codec) + video_bytes_io.seek(0) + + return await upload_file_to_comfyapi(cls, video_bytes_io, filename, upload_mime_type) + + +async def upload_file_to_comfyapi( + cls: type[IO.ComfyNode], + file_bytes_io: BytesIO, + filename: str, + upload_mime_type: Optional[str], + wait_label: Optional[str] = "Uploading", +) -> str: + """Uploads a single file to ComfyUI API and returns its download URL.""" + if upload_mime_type is None: + request_object = UploadRequest(file_name=filename) + else: + request_object = UploadRequest(file_name=filename, content_type=upload_mime_type) + create_resp = await sync_op( + cls, + endpoint=ApiEndpoint(path="/customers/storage", method="POST"), + data=request_object, + response_model=UploadResponse, + final_label_on_success=None, + monitor_progress=False, + ) + await upload_file( + cls, + create_resp.upload_url, + file_bytes_io, + content_type=upload_mime_type, + wait_label=wait_label, + ) + return create_resp.download_url + + +async def upload_file( + cls: type[IO.ComfyNode], + upload_url: str, + file: Union[BytesIO, str], + *, + content_type: Optional[str] = None, + max_retries: int = 3, + retry_delay: float = 1.0, + retry_backoff: float = 2.0, + wait_label: Optional[str] = None, +) -> None: + """ + Upload a file to a signed URL (e.g., S3 pre-signed PUT) with retries, Comfy progress display, and interruption. + + Args: + cls: Node class (provides auth context + UI progress hooks). + upload_url: Pre-signed PUT URL. + file: BytesIO or path string. + content_type: Explicit MIME type. If None, we *suppress* Content-Type. + max_retries: Maximum retry attempts. + retry_delay: Initial delay in seconds. + retry_backoff: Exponential backoff factor. + wait_label: Progress label shown in Comfy UI. + + Raises: + ProcessingInterrupted, LocalNetworkError, ApiServerError, Exception + """ + if isinstance(file, BytesIO): + with contextlib.suppress(Exception): + file.seek(0) + data = file.read() + elif isinstance(file, str): + with open(file, "rb") as f: + data = f.read() + else: + raise ValueError("file must be a BytesIO or a filesystem path string") + + headers: dict[str, str] = {} + skip_auto_headers: set[str] = set() + if content_type: + headers["Content-Type"] = content_type + else: + skip_auto_headers.add("Content-Type") # Don't let aiohttp add Content-Type, it can break the signed request + + attempt = 0 + delay = retry_delay + start_ts = time.monotonic() + op_uuid = uuid.uuid4().hex[:8] + while True: + attempt += 1 + operation_id = _generate_operation_id("PUT", upload_url, attempt, op_uuid) + timeout = aiohttp.ClientTimeout(total=None) + stop_evt = asyncio.Event() + + async def _monitor(): + try: + while not stop_evt.is_set(): + if is_processing_interrupted(): + return + if wait_label: + _display_time_progress(cls, wait_label, int(time.monotonic() - start_ts), None) + await asyncio.sleep(1.0) + except asyncio.CancelledError: + return + + monitor_task = asyncio.create_task(_monitor()) + sess: Optional[aiohttp.ClientSession] = None + try: + try: + request_logger.log_request_response( + operation_id=operation_id, + request_method="PUT", + request_url=upload_url, + request_headers=headers or None, + request_params=None, + request_data=f"[File data {len(data)} bytes]", + ) + except Exception as e: + logging.debug("[DEBUG] upload request logging failed: %s", e) + + sess = aiohttp.ClientSession(timeout=timeout) + req = sess.put(upload_url, data=data, headers=headers, skip_auto_headers=skip_auto_headers) + req_task = asyncio.create_task(req) + + done, pending = await asyncio.wait({req_task, monitor_task}, return_when=asyncio.FIRST_COMPLETED) + + if monitor_task in done and req_task in pending: + req_task.cancel() + raise ProcessingInterrupted("Upload cancelled") + + try: + resp = await req_task + except asyncio.CancelledError: + raise ProcessingInterrupted("Upload cancelled") from None + + async with resp: + if resp.status >= 400: + with contextlib.suppress(Exception): + try: + body = await resp.json() + except Exception: + body = await resp.text() + msg = f"Upload failed with status {resp.status}" + request_logger.log_request_response( + operation_id=operation_id, + request_method="PUT", + request_url=upload_url, + response_status_code=resp.status, + response_headers=dict(resp.headers), + response_content=body, + error_message=msg, + ) + if resp.status in {408, 429, 500, 502, 503, 504} and attempt <= max_retries: + await sleep_with_interrupt( + delay, + cls, + wait_label, + start_ts, + None, + display_callback=_display_time_progress if wait_label else None, + ) + delay *= retry_backoff + continue + raise Exception(f"Failed to upload (HTTP {resp.status}).") + try: + request_logger.log_request_response( + operation_id=operation_id, + request_method="PUT", + request_url=upload_url, + response_status_code=resp.status, + response_headers=dict(resp.headers), + response_content="File uploaded successfully.", + ) + except Exception as e: + logging.debug("[DEBUG] upload response logging failed: %s", e) + return + except asyncio.CancelledError: + raise ProcessingInterrupted("Task cancelled") from None + except (aiohttp.ClientError, asyncio.TimeoutError) as e: + if attempt <= max_retries: + with contextlib.suppress(Exception): + request_logger.log_request_response( + operation_id=operation_id, + request_method="PUT", + request_url=upload_url, + request_headers=headers or None, + request_data=f"[File data {len(data)} bytes]", + error_message=f"{type(e).__name__}: {str(e)} (will retry)", + ) + await sleep_with_interrupt( + delay, + cls, + wait_label, + start_ts, + None, + display_callback=_display_time_progress if wait_label else None, + ) + delay *= retry_backoff + continue + + diag = await _diagnose_connectivity() + if diag.get("is_local_issue"): + raise LocalNetworkError( + "Unable to connect to the network. Please check your internet connection and try again." + ) from e + raise ApiServerError("The API service appears unreachable at this time.") from e + finally: + stop_evt.set() + if monitor_task: + monitor_task.cancel() + with contextlib.suppress(Exception): + await monitor_task + if sess: + with contextlib.suppress(Exception): + await sess.close() + + +def _generate_operation_id(method: str, url: str, attempt: int, op_uuid: str) -> str: + try: + parsed = urlparse(url) + slug = (parsed.path.rsplit("/", 1)[-1] or parsed.netloc or "upload").strip("/").replace("/", "_") + except Exception: + slug = "upload" + return f"{method}_{slug}_{op_uuid}_try{attempt}" diff --git a/comfy_api_nodes/util/validation_utils.py b/comfy_api_nodes/util/validation_utils.py index ca913e9b3..22da05bc1 100644 --- a/comfy_api_nodes/util/validation_utils.py +++ b/comfy_api_nodes/util/validation_utils.py @@ -2,6 +2,8 @@ import logging from typing import Optional import torch + +from comfy_api.input.video_types import VideoInput from comfy_api.latest import Input @@ -28,9 +30,7 @@ def validate_image_dimensions( if max_width is not None and width > max_width: raise ValueError(f"Image width must be at most {max_width}px, got {width}px") if min_height is not None and height < min_height: - raise ValueError( - f"Image height must be at least {min_height}px, got {height}px" - ) + raise ValueError(f"Image height must be at least {min_height}px, got {height}px") if max_height is not None and height > max_height: raise ValueError(f"Image height must be at most {max_height}px, got {height}px") @@ -44,13 +44,9 @@ def validate_image_aspect_ratio( aspect_ratio = width / height if min_aspect_ratio is not None and aspect_ratio < min_aspect_ratio: - raise ValueError( - f"Image aspect ratio must be at least {min_aspect_ratio}, got {aspect_ratio}" - ) + raise ValueError(f"Image aspect ratio must be at least {min_aspect_ratio}, got {aspect_ratio}") if max_aspect_ratio is not None and aspect_ratio > max_aspect_ratio: - raise ValueError( - f"Image aspect ratio must be at most {max_aspect_ratio}, got {aspect_ratio}" - ) + raise ValueError(f"Image aspect ratio must be at most {max_aspect_ratio}, got {aspect_ratio}") def validate_image_aspect_ratio_range( @@ -58,7 +54,7 @@ def validate_image_aspect_ratio_range( min_ratio: tuple[float, float], # e.g. (1, 4) max_ratio: tuple[float, float], # e.g. (4, 1) *, - strict: bool = True, # True -> (min, max); False -> [min, max] + strict: bool = True, # True -> (min, max); False -> [min, max] ) -> float: a1, b1 = min_ratio a2, b2 = max_ratio @@ -85,7 +81,7 @@ def validate_aspect_ratio_closeness( min_rel: float, max_rel: float, *, - strict: bool = False, # True => exclusive, False => inclusive + strict: bool = False, # True => exclusive, False => inclusive ) -> None: w1, h1 = get_image_dimensions(start_img) w2, h2 = get_image_dimensions(end_img) @@ -118,9 +114,7 @@ def validate_video_dimensions( if max_width is not None and width > max_width: raise ValueError(f"Video width must be at most {max_width}px, got {width}px") if min_height is not None and height < min_height: - raise ValueError( - f"Video height must be at least {min_height}px, got {height}px" - ) + raise ValueError(f"Video height must be at least {min_height}px, got {height}px") if max_height is not None and height > max_height: raise ValueError(f"Video height must be at most {max_height}px, got {height}px") @@ -138,13 +132,9 @@ def validate_video_duration( epsilon = 0.0001 if min_duration is not None and min_duration - epsilon > duration: - raise ValueError( - f"Video duration must be at least {min_duration}s, got {duration}s" - ) + raise ValueError(f"Video duration must be at least {min_duration}s, got {duration}s") if max_duration is not None and duration > max_duration + epsilon: - raise ValueError( - f"Video duration must be at most {max_duration}s, got {duration}s" - ) + raise ValueError(f"Video duration must be at most {max_duration}s, got {duration}s") def get_number_of_images(images): @@ -165,3 +155,31 @@ def validate_audio_duration( raise ValueError(f"Audio duration must be at least {min_duration}s, got {dur + eps:.2f}s") if max_duration is not None and dur - eps > max_duration: raise ValueError(f"Audio duration must be at most {max_duration}s, got {dur - eps:.2f}s") + + +def validate_string( + string: str, + strip_whitespace=True, + field_name="prompt", + min_length=None, + max_length=None, +): + if string is None: + raise Exception(f"Field '{field_name}' cannot be empty.") + if strip_whitespace: + string = string.strip() + if min_length and len(string) < min_length: + raise Exception( + f"Field '{field_name}' cannot be shorter than {min_length} characters; was {len(string)} characters long." + ) + if max_length and len(string) > max_length: + raise Exception( + f" Field '{field_name} cannot be longer than {max_length} characters; was {len(string)} characters long." + ) + + +def validate_container_format_is_mp4(video: VideoInput) -> None: + """Validates video container format is MP4.""" + container_format = video.get_container_format() + if container_format not in ["mp4", "mov,mp4,m4a,3gp,3g2,mj2"]: + raise ValueError(f"Only MP4 container format supported. Got: {container_format}") diff --git a/pyproject.toml b/pyproject.toml index 0c6b23a25..fcc4854a5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,6 +50,8 @@ messages_control.disable = [ "too-many-branches", "too-many-locals", "too-many-arguments", + "too-many-return-statements", + "too-many-nested-blocks", "duplicate-code", "abstract-method", "superfluous-parens", From dd5af0c5871376c377b2e30f9725b67a768eea6f Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 25 Oct 2025 01:48:34 +0300 Subject: [PATCH 0789/1073] convert Tripo API nodes to V3 schema (#10469) --- comfy_api_nodes/apis/tripo_api.py | 15 +- comfy_api_nodes/nodes_tripo.py | 892 ++++++++++++----------- comfy_api_nodes/util/__init__.py | 2 + comfy_api_nodes/util/download_helpers.py | 12 + 4 files changed, 503 insertions(+), 418 deletions(-) diff --git a/comfy_api_nodes/apis/tripo_api.py b/comfy_api_nodes/apis/tripo_api.py index 9f43d4d09..713260e2a 100644 --- a/comfy_api_nodes/apis/tripo_api.py +++ b/comfy_api_nodes/apis/tripo_api.py @@ -1,13 +1,20 @@ from __future__ import annotations -from comfy_api_nodes.apis import ( - TripoModelVersion, - TripoTextureQuality, -) from enum import Enum from typing import Optional, List, Dict, Any, Union from pydantic import BaseModel, Field, RootModel +class TripoModelVersion(str, Enum): + v2_5_20250123 = 'v2.5-20250123' + v2_0_20240919 = 'v2.0-20240919' + v1_4_20240625 = 'v1.4-20240625' + + +class TripoTextureQuality(str, Enum): + standard = 'standard' + detailed = 'detailed' + + class TripoStyle(str, Enum): PERSON_TO_CARTOON = "person:person2cartoon" ANIMAL_VENOM = "animal:venom" diff --git a/comfy_api_nodes/nodes_tripo.py b/comfy_api_nodes/nodes_tripo.py index d08cf9007..697100ff2 100644 --- a/comfy_api_nodes/nodes_tripo.py +++ b/comfy_api_nodes/nodes_tripo.py @@ -1,46 +1,39 @@ import os -from folder_paths import get_output_directory -from comfy_api_nodes.mapper_utils import model_field_to_node_input -from comfy.comfy_types.node_typing import IO -from comfy_api_nodes.apis import ( - TripoOrientation, - TripoModelVersion, -) +from typing import Optional + +import torch +from typing_extensions import override + +from comfy_api.latest import IO, ComfyExtension from comfy_api_nodes.apis.tripo_api import ( - TripoTaskType, - TripoStyle, - TripoFileReference, + TripoAnimateRetargetRequest, + TripoAnimateRigRequest, + TripoConvertModelRequest, TripoFileEmptyReference, - TripoUrlReference, + TripoFileReference, + TripoImageToModelRequest, + TripoModelVersion, + TripoMultiviewToModelRequest, + TripoOrientation, + TripoRefineModelRequest, + TripoStyle, TripoTaskResponse, TripoTaskStatus, + TripoTaskType, TripoTextToModelRequest, - TripoImageToModelRequest, - TripoMultiviewToModelRequest, TripoTextureModelRequest, - TripoRefineModelRequest, - TripoAnimateRigRequest, - TripoAnimateRetargetRequest, - TripoConvertModelRequest, + TripoUrlReference, ) - -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.util import ( ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, - EmptyRequest, -) -from comfy_api_nodes.apinode_utils import ( + download_url_as_bytesio, + poll_op, + sync_op, upload_images_to_comfyapi, - download_url_to_bytesio, ) +from folder_paths import get_output_directory -async def upload_image_to_tripo(image, **kwargs): - urls = await upload_images_to_comfyapi(image, max_images=1, auth_kwargs=kwargs) - return TripoFileReference(TripoUrlReference(url=urls[0], type="jpeg")) - def get_model_url_from_response(response: TripoTaskResponse) -> str: if response.data is not None: for key in ["pbr_model", "model", "base_model"]: @@ -50,20 +43,18 @@ def get_model_url_from_response(response: TripoTaskResponse) -> str: async def poll_until_finished( - kwargs: dict[str, str], + node_cls: type[IO.ComfyNode], response: TripoTaskResponse, -) -> tuple[str, str]: + average_duration: Optional[int] = None, +) -> IO.NodeOutput: """Polls the Tripo API endpoint until the task reaches a terminal state, then returns the response.""" if response.code != 0: raise RuntimeError(f"Failed to generate mesh: {response.error}") task_id = response.data.task_id - response_poll = await PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"/proxy/tripo/v2/openapi/task/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=TripoTaskResponse, - ), + response_poll = await poll_op( + node_cls, + poll_endpoint=ApiEndpoint(path=f"/proxy/tripo/v2/openapi/task/{task_id}"), + response_model=TripoTaskResponse, completed_statuses=[TripoTaskStatus.SUCCESS], failed_statuses=[ TripoTaskStatus.FAILED, @@ -73,72 +64,84 @@ async def poll_until_finished( TripoTaskStatus.EXPIRED, ], status_extractor=lambda x: x.data.status, - auth_kwargs=kwargs, - node_id=kwargs["unique_id"], - result_url_extractor=get_model_url_from_response, progress_extractor=lambda x: x.data.progress, - ).execute() + estimated_duration=average_duration, + ) if response_poll.data.status == TripoTaskStatus.SUCCESS: url = get_model_url_from_response(response_poll) - bytesio = await download_url_to_bytesio(url) + bytesio = await download_url_as_bytesio(url) # Save the downloaded model file model_file = f"tripo_model_{task_id}.glb" with open(os.path.join(get_output_directory(), model_file), "wb") as f: f.write(bytesio.getvalue()) - return model_file, task_id + return IO.NodeOutput(model_file, task_id) raise RuntimeError(f"Failed to generate mesh: {response_poll}") -class TripoTextToModelNode: +class TripoTextToModelNode(IO.ComfyNode): """ Generates 3D models synchronously based on a text prompt using Tripo's API. """ - AVERAGE_DURATION = 80 + @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ("STRING", {"multiline": True}), - }, - "optional": { - "negative_prompt": ("STRING", {"multiline": True}), - "model_version": model_field_to_node_input(IO.COMBO, TripoTextToModelRequest, "model_version", enum_type=TripoModelVersion), - "style": model_field_to_node_input(IO.COMBO, TripoTextToModelRequest, "style", enum_type=TripoStyle, default="None"), - "texture": ("BOOLEAN", {"default": True}), - "pbr": ("BOOLEAN", {"default": True}), - "image_seed": ("INT", {"default": 42}), - "model_seed": ("INT", {"default": 42}), - "texture_seed": ("INT", {"default": 42}), - "texture_quality": (["standard", "detailed"], {"default": "standard"}), - "face_limit": ("INT", {"min": -1, "max": 500000, "default": -1}), - "quad": ("BOOLEAN", {"default": False}) - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + def define_schema(cls): + return IO.Schema( + node_id="TripoTextToModelNode", + display_name="Tripo: Text to Model", + category="api node/3d/Tripo", + inputs=[ + IO.String.Input("prompt", multiline=True), + IO.String.Input("negative_prompt", multiline=True, optional=True), + IO.Combo.Input( + "model_version", options=TripoModelVersion, default=TripoModelVersion.v2_5_20250123, optional=True + ), + IO.Combo.Input("style", options=TripoStyle, default="None", optional=True), + IO.Boolean.Input("texture", default=True, optional=True), + IO.Boolean.Input("pbr", default=True, optional=True), + IO.Int.Input("image_seed", default=42, optional=True), + IO.Int.Input("model_seed", default=42, optional=True), + IO.Int.Input("texture_seed", default=42, optional=True), + IO.Combo.Input("texture_quality", default="standard", options=["standard", "detailed"], optional=True), + IO.Int.Input("face_limit", default=-1, min=-1, max=500000, optional=True), + IO.Boolean.Input("quad", default=False, optional=True), + ], + outputs=[ + IO.String.Output(display_name="model_file"), + IO.Custom("MODEL_TASK_ID").Output(display_name="model task_id"), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + is_output_node=True, + ) - RETURN_TYPES = ("STRING", "MODEL_TASK_ID",) - RETURN_NAMES = ("model_file", "model task_id") - FUNCTION = "generate_mesh" - CATEGORY = "api node/3d/Tripo" - API_NODE = True - OUTPUT_NODE = True - - async def generate_mesh(self, prompt, negative_prompt=None, model_version=None, style=None, texture=None, pbr=None, image_seed=None, model_seed=None, texture_seed=None, texture_quality=None, face_limit=None, quad=None, **kwargs): + @classmethod + async def execute( + cls, + prompt: str, + negative_prompt: Optional[str] = None, + model_version=None, + style: Optional[str] = None, + texture: Optional[bool] = None, + pbr: Optional[bool] = None, + image_seed: Optional[int] = None, + model_seed: Optional[int] = None, + texture_seed: Optional[int] = None, + texture_quality: Optional[str] = None, + face_limit: Optional[int] = None, + quad: Optional[bool] = None, + ) -> IO.NodeOutput: style_enum = None if style == "None" else style if not prompt: raise RuntimeError("Prompt is required") - response = await SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/tripo/v2/openapi/task", - method=HttpMethod.POST, - request_model=TripoTextToModelRequest, - response_model=TripoTaskResponse, - ), - request=TripoTextToModelRequest( + response = await sync_op( + cls, + endpoint=ApiEndpoint(path="/proxy/tripo/v2/openapi/task", method="POST"), + response_model=TripoTaskResponse, + data=TripoTextToModelRequest( type=TripoTaskType.TEXT_TO_MODEL, prompt=prompt, negative_prompt=negative_prompt if negative_prompt else None, @@ -152,64 +155,89 @@ class TripoTextToModelNode: texture_quality=texture_quality, face_limit=face_limit, auto_size=True, - quad=quad + quad=quad, ), - auth_kwargs=kwargs, - ).execute() - return await poll_until_finished(kwargs, response) + ) + return await poll_until_finished(cls, response, average_duration=80) -class TripoImageToModelNode: +class TripoImageToModelNode(IO.ComfyNode): """ Generates 3D models synchronously based on a single image using Tripo's API. """ - AVERAGE_DURATION = 80 + @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - }, - "optional": { - "model_version": model_field_to_node_input(IO.COMBO, TripoImageToModelRequest, "model_version", enum_type=TripoModelVersion), - "style": model_field_to_node_input(IO.COMBO, TripoTextToModelRequest, "style", enum_type=TripoStyle, default="None"), - "texture": ("BOOLEAN", {"default": True}), - "pbr": ("BOOLEAN", {"default": True}), - "model_seed": ("INT", {"default": 42}), - "orientation": model_field_to_node_input(IO.COMBO, TripoImageToModelRequest, "orientation", enum_type=TripoOrientation), - "texture_seed": ("INT", {"default": 42}), - "texture_quality": (["standard", "detailed"], {"default": "standard"}), - "texture_alignment": (["original_image", "geometry"], {"default": "original_image"}), - "face_limit": ("INT", {"min": -1, "max": 500000, "default": -1}), - "quad": ("BOOLEAN", {"default": False}) - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + def define_schema(cls): + return IO.Schema( + node_id="TripoImageToModelNode", + display_name="Tripo: Image to Model", + category="api node/3d/Tripo", + inputs=[ + IO.Image.Input("image"), + IO.Combo.Input( + "model_version", + options=TripoModelVersion, + tooltip="The model version to use for generation", + optional=True, + ), + IO.Combo.Input("style", options=TripoStyle, default="None", optional=True), + IO.Boolean.Input("texture", default=True, optional=True), + IO.Boolean.Input("pbr", default=True, optional=True), + IO.Int.Input("model_seed", default=42, optional=True), + IO.Combo.Input( + "orientation", options=TripoOrientation, default=TripoOrientation.DEFAULT, optional=True + ), + IO.Int.Input("texture_seed", default=42, optional=True), + IO.Combo.Input("texture_quality", default="standard", options=["standard", "detailed"], optional=True), + IO.Combo.Input( + "texture_alignment", default="original_image", options=["original_image", "geometry"], optional=True + ), + IO.Int.Input("face_limit", default=-1, min=-1, max=500000, optional=True), + IO.Boolean.Input("quad", default=False, optional=True), + ], + outputs=[ + IO.String.Output(display_name="model_file"), + IO.Custom("MODEL_TASK_ID").Output(display_name="model task_id"), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + is_output_node=True, + ) - RETURN_TYPES = ("STRING", "MODEL_TASK_ID",) - RETURN_NAMES = ("model_file", "model task_id") - FUNCTION = "generate_mesh" - CATEGORY = "api node/3d/Tripo" - API_NODE = True - OUTPUT_NODE = True - - async def generate_mesh(self, image, model_version=None, style=None, texture=None, pbr=None, model_seed=None, orientation=None, texture_alignment=None, texture_seed=None, texture_quality=None, face_limit=None, quad=None, **kwargs): + @classmethod + async def execute( + cls, + image: torch.Tensor, + model_version: Optional[str] = None, + style: Optional[str] = None, + texture: Optional[bool] = None, + pbr: Optional[bool] = None, + model_seed: Optional[int] = None, + orientation=None, + texture_seed: Optional[int] = None, + texture_quality: Optional[str] = None, + texture_alignment: Optional[str] = None, + face_limit: Optional[int] = None, + quad: Optional[bool] = None, + ) -> IO.NodeOutput: style_enum = None if style == "None" else style if image is None: raise RuntimeError("Image is required") - tripo_file = await upload_image_to_tripo(image, **kwargs) - response = await SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/tripo/v2/openapi/task", - method=HttpMethod.POST, - request_model=TripoImageToModelRequest, - response_model=TripoTaskResponse, - ), - request=TripoImageToModelRequest( + tripo_file = TripoFileReference( + root=TripoUrlReference( + url=(await upload_images_to_comfyapi(cls, image, max_images=1))[0], + type="jpeg", + ) + ) + response = await sync_op( + cls, + endpoint=ApiEndpoint(path="/proxy/tripo/v2/openapi/task", method="POST"), + response_model=TripoTaskResponse, + data=TripoImageToModelRequest( type=TripoTaskType.IMAGE_TO_MODEL, file=tripo_file, model_version=model_version, @@ -223,80 +251,105 @@ class TripoImageToModelNode: texture_quality=texture_quality, face_limit=face_limit, auto_size=True, - quad=quad + quad=quad, ), - auth_kwargs=kwargs, - ).execute() - return await poll_until_finished(kwargs, response) + ) + return await poll_until_finished(cls, response, average_duration=80) -class TripoMultiviewToModelNode: +class TripoMultiviewToModelNode(IO.ComfyNode): """ Generates 3D models synchronously based on up to four images (front, left, back, right) using Tripo's API. """ - AVERAGE_DURATION = 80 + @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - }, - "optional": { - "image_left": ("IMAGE",), - "image_back": ("IMAGE",), - "image_right": ("IMAGE",), - "model_version": model_field_to_node_input(IO.COMBO, TripoMultiviewToModelRequest, "model_version", enum_type=TripoModelVersion), - "orientation": model_field_to_node_input(IO.COMBO, TripoImageToModelRequest, "orientation", enum_type=TripoOrientation), - "texture": ("BOOLEAN", {"default": True}), - "pbr": ("BOOLEAN", {"default": True}), - "model_seed": ("INT", {"default": 42}), - "texture_seed": ("INT", {"default": 42}), - "texture_quality": (["standard", "detailed"], {"default": "standard"}), - "texture_alignment": (["original_image", "geometry"], {"default": "original_image"}), - "face_limit": ("INT", {"min": -1, "max": 500000, "default": -1}), - "quad": ("BOOLEAN", {"default": False}) - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + def define_schema(cls): + return IO.Schema( + node_id="TripoMultiviewToModelNode", + display_name="Tripo: Multiview to Model", + category="api node/3d/Tripo", + inputs=[ + IO.Image.Input("image"), + IO.Image.Input("image_left", optional=True), + IO.Image.Input("image_back", optional=True), + IO.Image.Input("image_right", optional=True), + IO.Combo.Input( + "model_version", + options=TripoModelVersion, + optional=True, + tooltip="The model version to use for generation", + ), + IO.Combo.Input( + "orientation", + options=TripoOrientation, + default=TripoOrientation.DEFAULT, + optional=True, + ), + IO.Boolean.Input("texture", default=True, optional=True), + IO.Boolean.Input("pbr", default=True, optional=True), + IO.Int.Input("model_seed", default=42, optional=True), + IO.Int.Input("texture_seed", default=42, optional=True), + IO.Combo.Input("texture_quality", default="standard", options=["standard", "detailed"], optional=True), + IO.Combo.Input( + "texture_alignment", default="original_image", options=["original_image", "geometry"], optional=True + ), + IO.Int.Input("face_limit", default=-1, min=-1, max=500000, optional=True), + IO.Boolean.Input("quad", default=False, optional=True), + ], + outputs=[ + IO.String.Output(display_name="model_file"), + IO.Custom("MODEL_TASK_ID").Output(display_name="model task_id"), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + is_output_node=True, + ) - RETURN_TYPES = ("STRING", "MODEL_TASK_ID",) - RETURN_NAMES = ("model_file", "model task_id") - FUNCTION = "generate_mesh" - CATEGORY = "api node/3d/Tripo" - API_NODE = True - OUTPUT_NODE = True - - async def generate_mesh(self, image, image_left=None, image_back=None, image_right=None, model_version=None, orientation=None, texture=None, pbr=None, model_seed=None, texture_seed=None, texture_quality=None, texture_alignment=None, face_limit=None, quad=None, **kwargs): + @classmethod + async def execute( + cls, + image: torch.Tensor, + image_left: Optional[torch.Tensor] = None, + image_back: Optional[torch.Tensor] = None, + image_right: Optional[torch.Tensor] = None, + model_version: Optional[str] = None, + orientation: Optional[str] = None, + texture: Optional[bool] = None, + pbr: Optional[bool] = None, + model_seed: Optional[int] = None, + texture_seed: Optional[int] = None, + texture_quality: Optional[str] = None, + texture_alignment: Optional[str] = None, + face_limit: Optional[int] = None, + quad: Optional[bool] = None, + ) -> IO.NodeOutput: if image is None: raise RuntimeError("front image for multiview is required") images = [] - image_dict = { - "image": image, - "image_left": image_left, - "image_back": image_back, - "image_right": image_right - } + image_dict = {"image": image, "image_left": image_left, "image_back": image_back, "image_right": image_right} if image_left is None and image_back is None and image_right is None: raise RuntimeError("At least one of left, back, or right image must be provided for multiview") for image_name in ["image", "image_left", "image_back", "image_right"]: image_ = image_dict[image_name] if image_ is not None: - tripo_file = await upload_image_to_tripo(image_, **kwargs) - images.append(tripo_file) + images.append( + TripoFileReference( + root=TripoUrlReference( + url=(await upload_images_to_comfyapi(cls, image_, max_images=1))[0], type="jpeg" + ) + ) + ) else: images.append(TripoFileEmptyReference()) - response = await SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/tripo/v2/openapi/task", - method=HttpMethod.POST, - request_model=TripoMultiviewToModelRequest, - response_model=TripoTaskResponse, - ), - request=TripoMultiviewToModelRequest( + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/tripo/v2/openapi/task", method="POST"), + response_model=TripoTaskResponse, + data=TripoMultiviewToModelRequest( type=TripoTaskType.MULTIVIEW_TO_MODEL, files=images, model_version=model_version, @@ -310,272 +363,283 @@ class TripoMultiviewToModelNode: face_limit=face_limit, quad=quad, ), - auth_kwargs=kwargs, - ).execute() - return await poll_until_finished(kwargs, response) + ) + return await poll_until_finished(cls, response, average_duration=80) -class TripoTextureNode: +class TripoTextureNode(IO.ComfyNode): + @classmethod - def INPUT_TYPES(s): - return { - "required": { - "model_task_id": ("MODEL_TASK_ID",), - }, - "optional": { - "texture": ("BOOLEAN", {"default": True}), - "pbr": ("BOOLEAN", {"default": True}), - "texture_seed": ("INT", {"default": 42}), - "texture_quality": (["standard", "detailed"], {"default": "standard"}), - "texture_alignment": (["original_image", "geometry"], {"default": "original_image"}), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + def define_schema(cls): + return IO.Schema( + node_id="TripoTextureNode", + display_name="Tripo: Texture model", + category="api node/3d/Tripo", + inputs=[ + IO.Custom("MODEL_TASK_ID").Input("model_task_id"), + IO.Boolean.Input("texture", default=True, optional=True), + IO.Boolean.Input("pbr", default=True, optional=True), + IO.Int.Input("texture_seed", default=42, optional=True), + IO.Combo.Input("texture_quality", default="standard", options=["standard", "detailed"], optional=True), + IO.Combo.Input( + "texture_alignment", default="original_image", options=["original_image", "geometry"], optional=True + ), + ], + outputs=[ + IO.String.Output(display_name="model_file"), + IO.Custom("MODEL_TASK_ID").Output(display_name="model task_id"), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + is_output_node=True, + ) - RETURN_TYPES = ("STRING", "MODEL_TASK_ID",) - RETURN_NAMES = ("model_file", "model task_id") - FUNCTION = "generate_mesh" - CATEGORY = "api node/3d/Tripo" - API_NODE = True - OUTPUT_NODE = True - AVERAGE_DURATION = 80 - - async def generate_mesh(self, model_task_id, texture=None, pbr=None, texture_seed=None, texture_quality=None, texture_alignment=None, **kwargs): - response = await SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/tripo/v2/openapi/task", - method=HttpMethod.POST, - request_model=TripoTextureModelRequest, - response_model=TripoTaskResponse, - ), - request=TripoTextureModelRequest( + @classmethod + async def execute( + cls, + model_task_id, + texture: Optional[bool] = None, + pbr: Optional[bool] = None, + texture_seed: Optional[int] = None, + texture_quality: Optional[str] = None, + texture_alignment: Optional[str] = None, + ) -> IO.NodeOutput: + response = await sync_op( + cls, + endpoint=ApiEndpoint(path="/proxy/tripo/v2/openapi/task", method="POST"), + response_model=TripoTaskResponse, + data=TripoTextureModelRequest( original_model_task_id=model_task_id, texture=texture, pbr=pbr, texture_seed=texture_seed, texture_quality=texture_quality, - texture_alignment=texture_alignment + texture_alignment=texture_alignment, ), - auth_kwargs=kwargs, - ).execute() - return await poll_until_finished(kwargs, response) + ) + return await poll_until_finished(cls, response, average_duration=80) -class TripoRefineNode: +class TripoRefineNode(IO.ComfyNode): + @classmethod - def INPUT_TYPES(s): - return { - "required": { - "model_task_id": ("MODEL_TASK_ID", { - "tooltip": "Must be a v1.4 Tripo model" - }), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + def define_schema(cls): + return IO.Schema( + node_id="TripoRefineNode", + display_name="Tripo: Refine Draft model", + category="api node/3d/Tripo", + description="Refine a draft model created by v1.4 Tripo models only.", + inputs=[ + IO.Custom("MODEL_TASK_ID").Input("model_task_id", tooltip="Must be a v1.4 Tripo model"), + ], + outputs=[ + IO.String.Output(display_name="model_file"), + IO.Custom("MODEL_TASK_ID").Output(display_name="model task_id"), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + is_output_node=True, + ) - DESCRIPTION = "Refine a draft model created by v1.4 Tripo models only." - - RETURN_TYPES = ("STRING", "MODEL_TASK_ID",) - RETURN_NAMES = ("model_file", "model task_id") - FUNCTION = "generate_mesh" - CATEGORY = "api node/3d/Tripo" - API_NODE = True - OUTPUT_NODE = True - AVERAGE_DURATION = 240 - - async def generate_mesh(self, model_task_id, **kwargs): - response = await SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/tripo/v2/openapi/task", - method=HttpMethod.POST, - request_model=TripoRefineModelRequest, - response_model=TripoTaskResponse, - ), - request=TripoRefineModelRequest( - draft_model_task_id=model_task_id - ), - auth_kwargs=kwargs, - ).execute() - return await poll_until_finished(kwargs, response) - - -class TripoRigNode: @classmethod - def INPUT_TYPES(s): - return { - "required": { - "original_model_task_id": ("MODEL_TASK_ID",), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = ("STRING", "RIG_TASK_ID") - RETURN_NAMES = ("model_file", "rig task_id") - FUNCTION = "generate_mesh" - CATEGORY = "api node/3d/Tripo" - API_NODE = True - OUTPUT_NODE = True - AVERAGE_DURATION = 180 - - async def generate_mesh(self, original_model_task_id, **kwargs): - response = await SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/tripo/v2/openapi/task", - method=HttpMethod.POST, - request_model=TripoAnimateRigRequest, - response_model=TripoTaskResponse, - ), - request=TripoAnimateRigRequest( - original_model_task_id=original_model_task_id, - out_format="glb", - spec="tripo" - ), - auth_kwargs=kwargs, - ).execute() - return await poll_until_finished(kwargs, response) + async def execute(cls, model_task_id) -> IO.NodeOutput: + response = await sync_op( + cls, + endpoint=ApiEndpoint(path="/proxy/tripo/v2/openapi/task", method="POST"), + response_model=TripoTaskResponse, + data=TripoRefineModelRequest(draft_model_task_id=model_task_id), + ) + return await poll_until_finished(cls, response, average_duration=240) -class TripoRetargetNode: +class TripoRigNode(IO.ComfyNode): + @classmethod - def INPUT_TYPES(s): - return { - "required": { - "original_model_task_id": ("RIG_TASK_ID",), - "animation": ([ - "preset:idle", - "preset:walk", - "preset:climb", - "preset:jump", - "preset:slash", - "preset:shoot", - "preset:hurt", - "preset:fall", - "preset:turn", - ],), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + def define_schema(cls): + return IO.Schema( + node_id="TripoRigNode", + display_name="Tripo: Rig model", + category="api node/3d/Tripo", + inputs=[IO.Custom("MODEL_TASK_ID").Input("original_model_task_id")], + outputs=[ + IO.String.Output(display_name="model_file"), + IO.Custom("RIG_TASK_ID").Output(display_name="rig task_id"), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + is_output_node=True, + ) - RETURN_TYPES = ("STRING", "RETARGET_TASK_ID") - RETURN_NAMES = ("model_file", "retarget task_id") - FUNCTION = "generate_mesh" - CATEGORY = "api node/3d/Tripo" - API_NODE = True - OUTPUT_NODE = True - AVERAGE_DURATION = 30 + @classmethod + async def execute(cls, original_model_task_id) -> IO.NodeOutput: + response = await sync_op( + cls, + endpoint=ApiEndpoint(path="/proxy/tripo/v2/openapi/task", method="POST"), + response_model=TripoTaskResponse, + data=TripoAnimateRigRequest(original_model_task_id=original_model_task_id, out_format="glb", spec="tripo"), + ) + return await poll_until_finished(cls, response, average_duration=180) - async def generate_mesh(self, animation, original_model_task_id, **kwargs): - response = await SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/tripo/v2/openapi/task", - method=HttpMethod.POST, - request_model=TripoAnimateRetargetRequest, - response_model=TripoTaskResponse, - ), - request=TripoAnimateRetargetRequest( + +class TripoRetargetNode(IO.ComfyNode): + + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="TripoRetargetNode", + display_name="Tripo: Retarget rigged model", + category="api node/3d/Tripo", + inputs=[ + IO.Custom("RIG_TASK_ID").Input("original_model_task_id"), + IO.Combo.Input( + "animation", + options=[ + "preset:idle", + "preset:walk", + "preset:climb", + "preset:jump", + "preset:slash", + "preset:shoot", + "preset:hurt", + "preset:fall", + "preset:turn", + ], + ), + ], + outputs=[ + IO.String.Output(display_name="model_file"), + IO.Custom("RETARGET_TASK_ID").Output(display_name="retarget task_id"), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + is_output_node=True, + ) + + @classmethod + async def execute(cls, original_model_task_id, animation: str) -> IO.NodeOutput: + response = await sync_op( + cls, + endpoint=ApiEndpoint(path="/proxy/tripo/v2/openapi/task", method="POST"), + response_model=TripoTaskResponse, + data=TripoAnimateRetargetRequest( original_model_task_id=original_model_task_id, animation=animation, out_format="glb", - bake_animation=True + bake_animation=True, ), - auth_kwargs=kwargs, - ).execute() - return await poll_until_finished(kwargs, response) + ) + return await poll_until_finished(cls, response, average_duration=30) -class TripoConversionNode: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "original_model_task_id": ("MODEL_TASK_ID,RIG_TASK_ID,RETARGET_TASK_ID",), - "format": (["GLTF", "USDZ", "FBX", "OBJ", "STL", "3MF"],), - }, - "optional": { - "quad": ("BOOLEAN", {"default": False}), - "face_limit": ("INT", {"min": -1, "max": 500000, "default": -1}), - "texture_size": ("INT", {"min": 128, "max": 4096, "default": 4096}), - "texture_format": (["BMP", "DPX", "HDR", "JPEG", "OPEN_EXR", "PNG", "TARGA", "TIFF", "WEBP"], {"default": "JPEG"}) - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } +class TripoConversionNode(IO.ComfyNode): @classmethod - def VALIDATE_INPUTS(cls, input_types): + def define_schema(cls): + return IO.Schema( + node_id="TripoConversionNode", + display_name="Tripo: Convert model", + category="api node/3d/Tripo", + inputs=[ + IO.Custom("MODEL_TASK_ID,RIG_TASK_ID,RETARGET_TASK_ID").Input("original_model_task_id"), + IO.Combo.Input("format", options=["GLTF", "USDZ", "FBX", "OBJ", "STL", "3MF"]), + IO.Boolean.Input("quad", default=False, optional=True), + IO.Int.Input( + "face_limit", + default=-1, + min=-1, + max=500000, + optional=True, + ), + IO.Int.Input( + "texture_size", + default=4096, + min=128, + max=4096, + optional=True, + ), + IO.Combo.Input( + "texture_format", + options=["BMP", "DPX", "HDR", "JPEG", "OPEN_EXR", "PNG", "TARGA", "TIFF", "WEBP"], + default="JPEG", + optional=True, + ), + ], + outputs=[], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + is_output_node=True, + ) + + @classmethod + def validate_inputs(cls, input_types): # The min and max of input1 and input2 are still validated because # we didn't take `input1` or `input2` as arguments if input_types["original_model_task_id"] not in ("MODEL_TASK_ID", "RIG_TASK_ID", "RETARGET_TASK_ID"): return "original_model_task_id must be MODEL_TASK_ID, RIG_TASK_ID or RETARGET_TASK_ID type" return True - RETURN_TYPES = () - FUNCTION = "generate_mesh" - CATEGORY = "api node/3d/Tripo" - API_NODE = True - OUTPUT_NODE = True - AVERAGE_DURATION = 30 - - async def generate_mesh(self, original_model_task_id, format, quad, face_limit, texture_size, texture_format, **kwargs): + @classmethod + async def execute( + cls, + original_model_task_id, + format: str, + quad: bool, + face_limit: int, + texture_size: int, + texture_format: str, + ) -> IO.NodeOutput: if not original_model_task_id: raise RuntimeError("original_model_task_id is required") - response = await SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/tripo/v2/openapi/task", - method=HttpMethod.POST, - request_model=TripoConvertModelRequest, - response_model=TripoTaskResponse, - ), - request=TripoConvertModelRequest( + response = await sync_op( + cls, + endpoint=ApiEndpoint(path="/proxy/tripo/v2/openapi/task", method="POST"), + response_model=TripoTaskResponse, + data=TripoConvertModelRequest( original_model_task_id=original_model_task_id, format=format, quad=quad if quad else None, face_limit=face_limit if face_limit != -1 else None, texture_size=texture_size if texture_size != 4096 else None, - texture_format=texture_format if texture_format != "JPEG" else None + texture_format=texture_format if texture_format != "JPEG" else None, ), - auth_kwargs=kwargs, - ).execute() - return await poll_until_finished(kwargs, response) + ) + return await poll_until_finished(cls, response, average_duration=30) -NODE_CLASS_MAPPINGS = { - "TripoTextToModelNode": TripoTextToModelNode, - "TripoImageToModelNode": TripoImageToModelNode, - "TripoMultiviewToModelNode": TripoMultiviewToModelNode, - "TripoTextureNode": TripoTextureNode, - "TripoRefineNode": TripoRefineNode, - "TripoRigNode": TripoRigNode, - "TripoRetargetNode": TripoRetargetNode, - "TripoConversionNode": TripoConversionNode, -} +class TripoExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + TripoTextToModelNode, + TripoImageToModelNode, + TripoMultiviewToModelNode, + TripoTextureNode, + TripoRefineNode, + TripoRigNode, + TripoRetargetNode, + TripoConversionNode, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "TripoTextToModelNode": "Tripo: Text to Model", - "TripoImageToModelNode": "Tripo: Image to Model", - "TripoMultiviewToModelNode": "Tripo: Multiview to Model", - "TripoTextureNode": "Tripo: Texture model", - "TripoRefineNode": "Tripo: Refine Draft model", - "TripoRigNode": "Tripo: Rig model", - "TripoRetargetNode": "Tripo: Retarget rigged model", - "TripoConversionNode": "Tripo: Convert model", -} + +async def comfy_entrypoint() -> TripoExtension: + return TripoExtension() diff --git a/comfy_api_nodes/util/__init__.py b/comfy_api_nodes/util/__init__.py index c2ec391aa..ab96760cb 100644 --- a/comfy_api_nodes/util/__init__.py +++ b/comfy_api_nodes/util/__init__.py @@ -20,6 +20,7 @@ from .conversions import ( trim_video, ) from .download_helpers import ( + download_url_as_bytesio, download_url_to_bytesio, download_url_to_image_tensor, download_url_to_video_output, @@ -56,6 +57,7 @@ __all__ = [ "upload_images_to_comfyapi", "upload_video_to_comfyapi", # Download helpers + "download_url_as_bytesio", "download_url_to_bytesio", "download_url_to_image_tensor", "download_url_to_video_output", diff --git a/comfy_api_nodes/util/download_helpers.py b/comfy_api_nodes/util/download_helpers.py index 055e690de..791dd5a50 100644 --- a/comfy_api_nodes/util/download_helpers.py +++ b/comfy_api_nodes/util/download_helpers.py @@ -240,6 +240,18 @@ async def download_url_to_video_output( return VideoFromFile(result) +async def download_url_as_bytesio( + url: str, + *, + timeout: float = None, + cls: type[COMFY_IO.ComfyNode] = None, +) -> BytesIO: + """Downloads content from a URL and returns a new BytesIO (rewound to 0).""" + result = BytesIO() + await download_url_to_bytesio(url, result, timeout=timeout, cls=cls) + return result + + def _generate_operation_id(method: str, url: str, attempt: int) -> str: try: parsed = urlparse(url) From 426cde37f10dc391f9601ab938e02c0faa42db14 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 24 Oct 2025 16:56:51 -0700 Subject: [PATCH 0790/1073] Remove useless function (#10472) --- comfy/model_management.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 79d6ff9d4..cf015a29a 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -998,12 +998,6 @@ def device_supports_non_blocking(device): return False return True -def device_should_use_non_blocking(device): - if not device_supports_non_blocking(device): - return False - return False - # return True #TODO: figure out why this causes memory issues on Nvidia and possibly others - def force_channels_last(): if args.force_channels_last: return True From e86b79ab9ea7e740b80490353f3f5763840ede81 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sun, 26 Oct 2025 00:35:30 +0300 Subject: [PATCH 0791/1073] convert Gemini API nodes to V3 schema (#10476) --- comfy_api_nodes/apinode_utils.py | 26 -- comfy_api_nodes/nodes_gemini.py | 629 +++++++++++----------------- comfy_api_nodes/util/__init__.py | 2 + comfy_api_nodes/util/conversions.py | 25 ++ 4 files changed, 282 insertions(+), 400 deletions(-) diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index e3d282059..4182c8f80 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -3,8 +3,6 @@ import aiohttp import mimetypes from typing import Optional, Union from comfy.utils import common_upscale -from comfy_api.util import VideoContainer, VideoCodec -from comfy_api.input.video_types import VideoInput from comfy_api_nodes.apis.client import ( ApiClient, ApiEndpoint, @@ -209,30 +207,6 @@ async def upload_file_to_comfyapi( return response.download_url -def video_to_base64_string( - video: VideoInput, - container_format: VideoContainer = None, - codec: VideoCodec = None -) -> str: - """ - Converts a video input to a base64 string. - - Args: - video: The video input to convert - container_format: Optional container format to use (defaults to video.container if available) - codec: Optional codec to use (defaults to video.codec if available) - """ - video_bytes_io = BytesIO() - - # Use provided format/codec if specified, otherwise use video's own if available - format_to_use = container_format if container_format is not None else getattr(video, 'container', VideoContainer.MP4) - codec_to_use = codec if codec is not None else getattr(video, 'codec', VideoCodec.H264) - - video.save_to(video_bytes_io, format=format_to_use, codec=codec_to_use) - video_bytes_io.seek(0) - return base64.b64encode(video_bytes_io.getvalue()).decode("utf-8") - - async def upload_images_to_comfyapi( image: torch.Tensor, max_images=8, diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index ca11b67ed..67f2469ad 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -2,42 +2,47 @@ API Nodes for Gemini Multimodal LLM Usage via Remote API See: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference """ + from __future__ import annotations -import json -import time -import os -import uuid import base64 -from io import BytesIO +import json +import os +import time +import uuid from enum import Enum -from typing import Optional, Literal +from io import BytesIO +from typing import Literal, Optional import torch +from typing_extensions import override import folder_paths -from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict -from server import PromptServer +from comfy_api.latest import IO, ComfyExtension, Input +from comfy_api.util import VideoCodec, VideoContainer from comfy_api_nodes.apis import ( GeminiContent, GeminiGenerateContentRequest, GeminiGenerateContentResponse, GeminiInlineData, - GeminiPart, GeminiMimeType, + GeminiPart, ) -from comfy_api_nodes.apis.gemini_api import GeminiImageGenerationConfig, GeminiImageGenerateContentRequest, GeminiImageConfig -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.apis.gemini_api import ( + GeminiImageConfig, + GeminiImageGenerateContentRequest, + GeminiImageGenerationConfig, +) +from comfy_api_nodes.util import ( ApiEndpoint, - HttpMethod, - SynchronousOperation, -) -from comfy_api_nodes.apinode_utils import ( + audio_to_base64_string, + bytesio_to_image_tensor, + sync_op, + tensor_to_base64_string, + validate_string, video_to_base64_string, ) -from comfy_api_nodes.util import validate_string, tensor_to_base64_string, bytesio_to_image_tensor, audio_to_base64_string -from comfy_api.util import VideoContainer, VideoCodec - +from server import PromptServer GEMINI_BASE_ENDPOINT = "/proxy/vertexai/gemini" GEMINI_MAX_INPUT_FILE_SIZE = 20 * 1024 * 1024 # 20 MB @@ -63,50 +68,6 @@ class GeminiImageModel(str, Enum): gemini_2_5_flash_image = "gemini-2.5-flash-image" -def get_gemini_endpoint( - model: GeminiModel, -) -> ApiEndpoint[GeminiGenerateContentRequest, GeminiGenerateContentResponse]: - """ - Get the API endpoint for a given Gemini model. - - Args: - model: The Gemini model to use, either as enum or string value. - - Returns: - ApiEndpoint configured for the specific Gemini model. - """ - if isinstance(model, str): - model = GeminiModel(model) - return ApiEndpoint( - path=f"{GEMINI_BASE_ENDPOINT}/{model.value}", - method=HttpMethod.POST, - request_model=GeminiGenerateContentRequest, - response_model=GeminiGenerateContentResponse, - ) - - -def get_gemini_image_endpoint( - model: GeminiImageModel, -) -> ApiEndpoint[GeminiGenerateContentRequest, GeminiGenerateContentResponse]: - """ - Get the API endpoint for a given Gemini model. - - Args: - model: The Gemini model to use, either as enum or string value. - - Returns: - ApiEndpoint configured for the specific Gemini model. - """ - if isinstance(model, str): - model = GeminiImageModel(model) - return ApiEndpoint( - path=f"{GEMINI_BASE_ENDPOINT}/{model.value}", - method=HttpMethod.POST, - request_model=GeminiImageGenerateContentRequest, - response_model=GeminiGenerateContentResponse, - ) - - def create_image_parts(image_input: torch.Tensor) -> list[GeminiPart]: """ Convert image tensor input to Gemini API compatible parts. @@ -119,9 +80,7 @@ def create_image_parts(image_input: torch.Tensor) -> list[GeminiPart]: """ image_parts: list[GeminiPart] = [] for image_index in range(image_input.shape[0]): - image_as_b64 = tensor_to_base64_string( - image_input[image_index].unsqueeze(0) - ) + image_as_b64 = tensor_to_base64_string(image_input[image_index].unsqueeze(0)) image_parts.append( GeminiPart( inlineData=GeminiInlineData( @@ -133,37 +92,7 @@ def create_image_parts(image_input: torch.Tensor) -> list[GeminiPart]: return image_parts -def create_text_part(text: str) -> GeminiPart: - """ - Create a text part for the Gemini API request. - - Args: - text: The text content to include in the request. - - Returns: - A GeminiPart object with the text content. - """ - return GeminiPart(text=text) - - -def get_parts_from_response( - response: GeminiGenerateContentResponse -) -> list[GeminiPart]: - """ - Extract all parts from the Gemini API response. - - Args: - response: The API response from Gemini. - - Returns: - List of response parts from the first candidate. - """ - return response.candidates[0].content.parts - - -def get_parts_by_type( - response: GeminiGenerateContentResponse, part_type: Literal["text"] | str -) -> list[GeminiPart]: +def get_parts_by_type(response: GeminiGenerateContentResponse, part_type: Literal["text"] | str) -> list[GeminiPart]: """ Filter response parts by their type. @@ -175,14 +104,10 @@ def get_parts_by_type( List of response parts matching the requested type. """ parts = [] - for part in get_parts_from_response(response): + for part in response.candidates[0].content.parts: if part_type == "text" and hasattr(part, "text") and part.text: parts.append(part) - elif ( - hasattr(part, "inlineData") - and part.inlineData - and part.inlineData.mimeType == part_type - ): + elif hasattr(part, "inlineData") and part.inlineData and part.inlineData.mimeType == part_type: parts.append(part) # Skip parts that don't match the requested type return parts @@ -210,11 +135,11 @@ def get_image_from_response(response: GeminiGenerateContentResponse) -> torch.Te returned_image = bytesio_to_image_tensor(BytesIO(image_data)) image_tensors.append(returned_image) if len(image_tensors) == 0: - return torch.zeros((1,1024,1024,4)) + return torch.zeros((1, 1024, 1024, 4)) return torch.cat(image_tensors, dim=0) -class GeminiNode(ComfyNodeABC): +class GeminiNode(IO.ComfyNode): """ Node to generate text responses from a Gemini model. @@ -225,96 +150,79 @@ class GeminiNode(ComfyNodeABC): """ @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Text inputs to the model, used to generate a response. You can include detailed instructions, questions, or context for the model.", - }, + def define_schema(cls): + return IO.Schema( + node_id="GeminiNode", + display_name="Google Gemini", + category="api node/text/Gemini", + description="Generate text responses with Google's Gemini AI model. " + "You can provide multiple types of inputs (text, images, audio, video) " + "as context for generating more relevant and meaningful responses.", + inputs=[ + IO.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Text inputs to the model, used to generate a response. " + "You can include detailed instructions, questions, or context for the model.", ), - "model": ( - IO.COMBO, - { - "tooltip": "The Gemini model to use for generating responses.", - "options": [model.value for model in GeminiModel], - "default": GeminiModel.gemini_2_5_pro.value, - }, + IO.Combo.Input( + "model", + options=GeminiModel, + default=GeminiModel.gemini_2_5_pro, + tooltip="The Gemini model to use for generating responses.", ), - "seed": ( - IO.INT, - { - "default": 42, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "When seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used.", - }, + IO.Int.Input( + "seed", + default=42, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="When seed is fixed to a specific value, the model makes a best effort to provide " + "the same response for repeated requests. Deterministic output isn't guaranteed. " + "Also, changing the model or parameter settings, such as the temperature, " + "can cause variations in the response even when you use the same seed value. " + "By default, a random seed value is used.", ), - }, - "optional": { - "images": ( - IO.IMAGE, - { - "default": None, - "tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node.", - }, + IO.Image.Input( + "images", + optional=True, + tooltip="Optional image(s) to use as context for the model. " + "To include multiple images, you can use the Batch Images node.", ), - "audio": ( - IO.AUDIO, - { - "tooltip": "Optional audio to use as context for the model.", - "default": None, - }, + IO.Audio.Input( + "audio", + optional=True, + tooltip="Optional audio to use as context for the model.", ), - "video": ( - IO.VIDEO, - { - "tooltip": "Optional video to use as context for the model.", - "default": None, - }, + IO.Video.Input( + "video", + optional=True, + tooltip="Optional video to use as context for the model.", ), - "files": ( - "GEMINI_INPUT_FILES", - { - "default": None, - "tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node.", - }, + IO.Custom("GEMINI_INPUT_FILES").Input( + "files", + optional=True, + tooltip="Optional file(s) to use as context for the model. " + "Accepts inputs from the Gemini Generate Content Input Files node.", ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - DESCRIPTION = "Generate text responses with Google's Gemini AI model. You can provide multiple types of inputs (text, images, audio, video) as context for generating more relevant and meaningful responses." - RETURN_TYPES = ("STRING",) - FUNCTION = "api_call" - CATEGORY = "api node/text/Gemini" - API_NODE = True - - def create_video_parts(self, video_input: IO.VIDEO, **kwargs) -> list[GeminiPart]: - """ - Convert video input to Gemini API compatible parts. - - Args: - video_input: Video tensor from ComfyUI. - **kwargs: Additional arguments to pass to the conversion function. - - Returns: - List of GeminiPart objects containing the encoded video. - """ - - base_64_string = video_to_base64_string( - video_input, - container_format=VideoContainer.MP4, - codec=VideoCodec.H264 + ], + outputs=[ + IO.String.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, ) + + @classmethod + def create_video_parts(cls, video_input: Input.Video) -> list[GeminiPart]: + """Convert video input to Gemini API compatible parts.""" + + base_64_string = video_to_base64_string(video_input, container_format=VideoContainer.MP4, codec=VideoCodec.H264) return [ GeminiPart( inlineData=GeminiInlineData( @@ -324,7 +232,8 @@ class GeminiNode(ComfyNodeABC): ) ] - def create_audio_parts(self, audio_input: IO.AUDIO) -> list[GeminiPart]: + @classmethod + def create_audio_parts(cls, audio_input: Input.Audio) -> list[GeminiPart]: """ Convert audio input to Gemini API compatible parts. @@ -337,10 +246,10 @@ class GeminiNode(ComfyNodeABC): audio_parts: list[GeminiPart] = [] for batch_index in range(audio_input["waveform"].shape[0]): # Recreate an IO.AUDIO object for the given batch dimension index - audio_at_index = { - "waveform": audio_input["waveform"][batch_index].unsqueeze(0), - "sample_rate": audio_input["sample_rate"], - } + audio_at_index = Input.Audio( + waveform=audio_input["waveform"][batch_index].unsqueeze(0), + sample_rate=audio_input["sample_rate"], + ) # Convert to MP3 format for compatibility with Gemini API audio_bytes = audio_to_base64_string( audio_at_index, @@ -357,38 +266,38 @@ class GeminiNode(ComfyNodeABC): ) return audio_parts - async def api_call( - self, + @classmethod + async def execute( + cls, prompt: str, - model: GeminiModel, - images: Optional[IO.IMAGE] = None, - audio: Optional[IO.AUDIO] = None, - video: Optional[IO.VIDEO] = None, + model: str, + seed: int, + images: Optional[torch.Tensor] = None, + audio: Optional[Input.Audio] = None, + video: Optional[Input.Video] = None, files: Optional[list[GeminiPart]] = None, - unique_id: Optional[str] = None, - **kwargs, - ) -> tuple[str]: - # Validate inputs + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) # Create parts list with text prompt as the first part - parts: list[GeminiPart] = [create_text_part(prompt)] + parts: list[GeminiPart] = [GeminiPart(text=prompt)] # Add other modal parts if images is not None: image_parts = create_image_parts(images) parts.extend(image_parts) if audio is not None: - parts.extend(self.create_audio_parts(audio)) + parts.extend(cls.create_audio_parts(audio)) if video is not None: - parts.extend(self.create_video_parts(video)) + parts.extend(cls.create_video_parts(video)) if files is not None: parts.extend(files) # Create response - response = await SynchronousOperation( - endpoint=get_gemini_endpoint(model), - request=GeminiGenerateContentRequest( + response = await sync_op( + cls, + endpoint=ApiEndpoint(path=f"{GEMINI_BASE_ENDPOINT}/{model}", method="POST"), + data=GeminiGenerateContentRequest( contents=[ GeminiContent( role="user", @@ -396,15 +305,15 @@ class GeminiNode(ComfyNodeABC): ) ] ), - auth_kwargs=kwargs, - ).execute() + response_model=GeminiGenerateContentResponse, + ) # Get result output output_text = get_text_from_response(response) - if unique_id and output_text: + if output_text: # Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button. render_spec = { - "node_id": unique_id, + "node_id": cls.hidden.unique_id, "component": "ChatHistoryWidget", "props": { "history": json.dumps( @@ -424,10 +333,10 @@ class GeminiNode(ComfyNodeABC): render_spec, ) - return (output_text or "Empty response from Gemini model...",) + return IO.NodeOutput(output_text or "Empty response from Gemini model...") -class GeminiInputFiles(ComfyNodeABC): +class GeminiInputFiles(IO.ComfyNode): """ Loads and formats input files for use with the Gemini API. @@ -438,7 +347,7 @@ class GeminiInputFiles(ComfyNodeABC): """ @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: + def define_schema(cls): """ For details about the supported file input types, see: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference @@ -453,39 +362,37 @@ class GeminiInputFiles(ComfyNodeABC): ] input_files = sorted(input_files, key=lambda x: x.name) input_files = [f.name for f in input_files] - return { - "required": { - "file": ( - IO.COMBO, - { - "tooltip": "Input files to include as context for the model. Only accepts text (.txt) and PDF (.pdf) files for now.", - "options": input_files, - "default": input_files[0] if input_files else None, - }, + return IO.Schema( + node_id="GeminiInputFiles", + display_name="Gemini Input Files", + category="api node/text/Gemini", + description="Loads and prepares input files to include as inputs for Gemini LLM nodes. " + "The files will be read by the Gemini model when generating a response. " + "The contents of the text file count toward the token limit. " + "🛈 TIP: Can be chained together with other Gemini Input File nodes.", + inputs=[ + IO.Combo.Input( + "file", + options=input_files, + default=input_files[0] if input_files else None, + tooltip="Input files to include as context for the model. " + "Only accepts text (.txt) and PDF (.pdf) files for now.", ), - }, - "optional": { - "GEMINI_INPUT_FILES": ( + IO.Custom("GEMINI_INPUT_FILES").Input( "GEMINI_INPUT_FILES", - { - "tooltip": "An optional additional file(s) to batch together with the file loaded from this node. Allows chaining of input files so that a single message can include multiple input files.", - "default": None, - }, + optional=True, + tooltip="An optional additional file(s) to batch together with the file loaded from this node. " + "Allows chaining of input files so that a single message can include multiple input files.", ), - }, - } - - DESCRIPTION = "Loads and prepares input files to include as inputs for Gemini LLM nodes. The files will be read by the Gemini model when generating a response. The contents of the text file count toward the token limit. 🛈 TIP: Can be chained together with other Gemini Input File nodes." - RETURN_TYPES = ("GEMINI_INPUT_FILES",) - FUNCTION = "prepare_files" - CATEGORY = "api node/text/Gemini" - - def create_file_part(self, file_path: str) -> GeminiPart: - mime_type = ( - GeminiMimeType.application_pdf - if file_path.endswith(".pdf") - else GeminiMimeType.text_plain + ], + outputs=[ + IO.Custom("GEMINI_INPUT_FILES").Output(), + ], ) + + @classmethod + def create_file_part(cls, file_path: str) -> GeminiPart: + mime_type = GeminiMimeType.application_pdf if file_path.endswith(".pdf") else GeminiMimeType.text_plain # Use base64 string directly, not the data URI with open(file_path, "rb") as f: file_content = f.read() @@ -498,120 +405,95 @@ class GeminiInputFiles(ComfyNodeABC): ) ) - def prepare_files( - self, file: str, GEMINI_INPUT_FILES: list[GeminiPart] = [] - ) -> tuple[list[GeminiPart]]: - """ - Loads and formats input files for Gemini API. - """ - file_path = folder_paths.get_annotated_filepath(file) - input_file_content = self.create_file_part(file_path) - files = [input_file_content] + GEMINI_INPUT_FILES - return (files,) - - -class GeminiImage(ComfyNodeABC): - """ - Node to generate text and image responses from a Gemini model. - - This node allows users to interact with Google's Gemini AI models, providing - multimodal inputs (text, images, files) to generate coherent - text and image responses. The node works with the latest Gemini models, handling the - API communication and response parsing. - """ @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Text prompt for generation", - }, - ), - "model": ( - IO.COMBO, - { - "tooltip": "The Gemini model to use for generating responses.", - "options": [model.value for model in GeminiImageModel], - "default": GeminiImageModel.gemini_2_5_flash_image.value, - }, - ), - "seed": ( - IO.INT, - { - "default": 42, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "When seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used.", - }, - ), - }, - "optional": { - "images": ( - IO.IMAGE, - { - "default": None, - "tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node.", - }, - ), - "files": ( - "GEMINI_INPUT_FILES", - { - "default": None, - "tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node.", - }, - ), - # TODO: later we can add this parameter later - # "n": ( - # IO.INT, - # { - # "default": 1, - # "min": 1, - # "max": 8, - # "step": 1, - # "display": "number", - # "tooltip": "How many images to generate", - # }, - # ), - "aspect_ratio": ( - IO.COMBO, - { - "tooltip": "Defaults to matching the output image size to that of your input image, or otherwise generates 1:1 squares.", - "options": ["auto", "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"], - "default": "auto", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } + def execute(cls, file: str, GEMINI_INPUT_FILES: Optional[list[GeminiPart]] = None) -> IO.NodeOutput: + """Loads and formats input files for Gemini API.""" + if GEMINI_INPUT_FILES is None: + GEMINI_INPUT_FILES = [] + file_path = folder_paths.get_annotated_filepath(file) + input_file_content = cls.create_file_part(file_path) + return IO.NodeOutput([input_file_content] + GEMINI_INPUT_FILES) - RETURN_TYPES = (IO.IMAGE, IO.STRING) - FUNCTION = "api_call" - CATEGORY = "api node/image/Gemini" - DESCRIPTION = "Edit images synchronously via Google API." - API_NODE = True - async def api_call( - self, +class GeminiImage(IO.ComfyNode): + + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="GeminiImageNode", + display_name="Google Gemini Image", + category="api node/image/Gemini", + description="Edit images synchronously via Google API.", + inputs=[ + IO.String.Input( + "prompt", + multiline=True, + tooltip="Text prompt for generation", + default="", + ), + IO.Combo.Input( + "model", + options=GeminiImageModel, + default=GeminiImageModel.gemini_2_5_flash_image, + tooltip="The Gemini model to use for generating responses.", + ), + IO.Int.Input( + "seed", + default=42, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="When seed is fixed to a specific value, the model makes a best effort to provide " + "the same response for repeated requests. Deterministic output isn't guaranteed. " + "Also, changing the model or parameter settings, such as the temperature, " + "can cause variations in the response even when you use the same seed value. " + "By default, a random seed value is used.", + ), + IO.Image.Input( + "images", + optional=True, + tooltip="Optional image(s) to use as context for the model. " + "To include multiple images, you can use the Batch Images node.", + ), + IO.Custom("GEMINI_INPUT_FILES").Input( + "files", + optional=True, + tooltip="Optional file(s) to use as context for the model. " + "Accepts inputs from the Gemini Generate Content Input Files node.", + ), + IO.Combo.Input( + "aspect_ratio", + options=["auto", "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"], + default="auto", + tooltip="Defaults to matching the output image size to that of your input image, " + "or otherwise generates 1:1 squares.", + optional=True, + ), + ], + outputs=[ + IO.Image.Output(), + IO.String.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, prompt: str, - model: GeminiImageModel, - images: Optional[IO.IMAGE] = None, + model: str, + seed: int, + images: Optional[torch.Tensor] = None, files: Optional[list[GeminiPart]] = None, - n=1, aspect_ratio: str = "auto", - unique_id: Optional[str] = None, - **kwargs, - ): + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) - parts: list[GeminiPart] = [create_text_part(prompt)] + parts: list[GeminiPart] = [GeminiPart(text=prompt)] if not aspect_ratio: aspect_ratio = "auto" # for backward compatability with old workflows; to-do remove this in December @@ -623,29 +505,27 @@ class GeminiImage(ComfyNodeABC): if files is not None: parts.extend(files) - response = await SynchronousOperation( - endpoint=get_gemini_image_endpoint(model), - request=GeminiImageGenerateContentRequest( + response = await sync_op( + cls, + endpoint=ApiEndpoint(path=f"{GEMINI_BASE_ENDPOINT}/{model}", method="POST"), + data=GeminiImageGenerateContentRequest( contents=[ - GeminiContent( - role="user", - parts=parts, - ), + GeminiContent(role="user", parts=parts), ], generationConfig=GeminiImageGenerationConfig( - responseModalities=["TEXT","IMAGE"], + responseModalities=["TEXT", "IMAGE"], imageConfig=None if aspect_ratio == "auto" else image_config, - ) + ), ), - auth_kwargs=kwargs, - ).execute() + response_model=GeminiGenerateContentResponse, + ) output_image = get_image_from_response(response) output_text = get_text_from_response(response) - if unique_id and output_text: + if output_text: # Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button. render_spec = { - "node_id": unique_id, + "node_id": cls.hidden.unique_id, "component": "ChatHistoryWidget", "props": { "history": json.dumps( @@ -666,17 +546,18 @@ class GeminiImage(ComfyNodeABC): ) output_text = output_text or "Empty response from Gemini model..." - return (output_image, output_text,) + return IO.NodeOutput(output_image, output_text) -NODE_CLASS_MAPPINGS = { - "GeminiNode": GeminiNode, - "GeminiImageNode": GeminiImage, - "GeminiInputFiles": GeminiInputFiles, -} +class GeminiExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + GeminiNode, + GeminiImage, + GeminiInputFiles, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "GeminiNode": "Google Gemini", - "GeminiImageNode": "Google Gemini Image", - "GeminiInputFiles": "Gemini Input Files", -} + +async def comfy_entrypoint() -> GeminiExtension: + return GeminiExtension() diff --git a/comfy_api_nodes/util/__init__.py b/comfy_api_nodes/util/__init__.py index ab96760cb..0cca2b59b 100644 --- a/comfy_api_nodes/util/__init__.py +++ b/comfy_api_nodes/util/__init__.py @@ -18,6 +18,7 @@ from .conversions import ( tensor_to_bytesio, tensor_to_pil, trim_video, + video_to_base64_string, ) from .download_helpers import ( download_url_as_bytesio, @@ -73,6 +74,7 @@ __all__ = [ "tensor_to_bytesio", "tensor_to_pil", "trim_video", + "video_to_base64_string", # Validation utilities "get_number_of_images", "validate_aspect_ratio_closeness", diff --git a/comfy_api_nodes/util/conversions.py b/comfy_api_nodes/util/conversions.py index 10cd1051b..9f4c90c5c 100644 --- a/comfy_api_nodes/util/conversions.py +++ b/comfy_api_nodes/util/conversions.py @@ -12,6 +12,7 @@ from PIL import Image from comfy.utils import common_upscale from comfy_api.latest import Input, InputImpl +from comfy_api.util import VideoContainer, VideoCodec from ._helpers import mimetype_to_extension @@ -173,6 +174,30 @@ def audio_to_base64_string(audio: Input.Audio, container_format: str = "mp4", co return base64.b64encode(audio_bytes).decode("utf-8") +def video_to_base64_string( + video: Input.Video, + container_format: VideoContainer = None, + codec: VideoCodec = None +) -> str: + """ + Converts a video input to a base64 string. + + Args: + video: The video input to convert + container_format: Optional container format to use (defaults to video.container if available) + codec: Optional codec to use (defaults to video.codec if available) + """ + video_bytes_io = BytesIO() + + # Use provided format/codec if specified, otherwise use video's own if available + format_to_use = container_format if container_format is not None else getattr(video, 'container', VideoContainer.MP4) + codec_to_use = codec if codec is not None else getattr(video, 'codec', VideoCodec.H264) + + video.save_to(video_bytes_io, format=format_to_use, codec=codec_to_use) + video_bytes_io.seek(0) + return base64.b64encode(video_bytes_io.getvalue()).decode("utf-8") + + def audio_ndarray_to_bytesio( audio_data_np: np.ndarray, sample_rate: int, From 098a352f136c610071bcb74f13e5b0ca16e6e7b3 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 25 Oct 2025 17:05:22 -0700 Subject: [PATCH 0792/1073] Add warning for torch-directml usage (#10482) Added a warning message about the state of torch-directml. --- comfy/model_management.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index cf015a29a..afe78f36e 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -89,6 +89,7 @@ if args.deterministic: directml_enabled = False if args.directml is not None: + logging.warning("WARNING: torch-directml barely works, is very slow, has not been updated in over 1 year and might be removed soon, please don't use it, there are better options.") import torch_directml directml_enabled = True device_index = args.directml From f6bbc1ac846b7d9a73ae50c3a45cf5a41058c54d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 25 Oct 2025 20:07:29 -0700 Subject: [PATCH 0793/1073] Fix mistake. (#10484) --- comfy/sample.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sample.py b/comfy/sample.py index b1395da84..2f8f3a51c 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -17,7 +17,7 @@ def prepare_noise_inner(latent_image, generator, noise_inds=None): if i in unique_inds: noises.append(noise) noises = [noises[i] for i in inverse] - noises = torch.cat(noises, axis=0) + return torch.cat(noises, axis=0) def prepare_noise(latent_image, seed, noise_inds=None): """ From 9d529e53084bdec28f684f3886a26c93598e7338 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sun, 26 Oct 2025 08:51:06 +0200 Subject: [PATCH 0794/1073] fix(api-nodes): random issues on Windows by capturing general OSError for retries (#10486) --- comfy_api_nodes/util/client.py | 15 +++++---------- comfy_api_nodes/util/download_helpers.py | 6 +++--- comfy_api_nodes/util/upload_helpers.py | 4 ++-- 3 files changed, 10 insertions(+), 15 deletions(-) diff --git a/comfy_api_nodes/util/client.py b/comfy_api_nodes/util/client.py index 5833b118f..9c036d64b 100644 --- a/comfy_api_nodes/util/client.py +++ b/comfy_api_nodes/util/client.py @@ -2,7 +2,6 @@ import asyncio import contextlib import json import logging -import socket import time import uuid from dataclasses import dataclass @@ -456,24 +455,20 @@ async def _diagnose_connectivity() -> dict[str, bool]: results = { "internet_accessible": False, "api_accessible": False, - "is_local_issue": False, - "is_api_issue": False, } timeout = aiohttp.ClientTimeout(total=5.0) async with aiohttp.ClientSession(timeout=timeout) as session: - try: + with contextlib.suppress(ClientError, OSError): async with session.get("https://www.google.com") as resp: results["internet_accessible"] = resp.status < 500 - except (ClientError, asyncio.TimeoutError, socket.gaierror): - results["is_local_issue"] = True + if not results["internet_accessible"]: return results parsed = urlparse(default_base_url()) health_url = f"{parsed.scheme}://{parsed.netloc}/health" - with contextlib.suppress(ClientError, asyncio.TimeoutError): + with contextlib.suppress(ClientError, OSError): async with session.get(health_url) as resp: results["api_accessible"] = resp.status < 500 - results["is_api_issue"] = results["internet_accessible"] and not results["api_accessible"] return results @@ -790,7 +785,7 @@ async def _request_base(cfg: _RequestConfig, expect_binary: bool): except ProcessingInterrupted: logging.debug("Polling was interrupted by user") raise - except (ClientError, asyncio.TimeoutError, socket.gaierror) as e: + except (ClientError, OSError) as e: if attempt <= cfg.max_retries: logging.warning( "Connection error calling %s %s. Retrying in %.2fs (%d/%d): %s", @@ -824,7 +819,7 @@ async def _request_base(cfg: _RequestConfig, expect_binary: bool): delay *= cfg.retry_backoff continue diag = await _diagnose_connectivity() - if diag.get("is_local_issue"): + if not diag["internet_accessible"]: try: request_logger.log_request_response( operation_id=operation_id, diff --git a/comfy_api_nodes/util/download_helpers.py b/comfy_api_nodes/util/download_helpers.py index 791dd5a50..f89045e12 100644 --- a/comfy_api_nodes/util/download_helpers.py +++ b/comfy_api_nodes/util/download_helpers.py @@ -32,7 +32,7 @@ async def download_url_to_bytesio( dest: Optional[Union[BytesIO, IO[bytes], str, Path]], *, timeout: Optional[float] = None, - max_retries: int = 3, + max_retries: int = 5, retry_delay: float = 1.0, retry_backoff: float = 2.0, cls: type[COMFY_IO.ComfyNode] = None, @@ -177,7 +177,7 @@ async def download_url_to_bytesio( return except asyncio.CancelledError: raise ProcessingInterrupted("Task cancelled") from None - except (ClientError, asyncio.TimeoutError) as e: + except (ClientError, OSError) as e: if attempt <= max_retries: with contextlib.suppress(Exception): request_logger.log_request_response( @@ -191,7 +191,7 @@ async def download_url_to_bytesio( continue diag = await _diagnose_connectivity() - if diag.get("is_local_issue"): + if not diag["internet_accessible"]: raise LocalNetworkError( "Unable to connect to the network. Please check your internet connection and try again." ) from e diff --git a/comfy_api_nodes/util/upload_helpers.py b/comfy_api_nodes/util/upload_helpers.py index a345d451d..7bfc61704 100644 --- a/comfy_api_nodes/util/upload_helpers.py +++ b/comfy_api_nodes/util/upload_helpers.py @@ -290,7 +290,7 @@ async def upload_file( return except asyncio.CancelledError: raise ProcessingInterrupted("Task cancelled") from None - except (aiohttp.ClientError, asyncio.TimeoutError) as e: + except (aiohttp.ClientError, OSError) as e: if attempt <= max_retries: with contextlib.suppress(Exception): request_logger.log_request_response( @@ -313,7 +313,7 @@ async def upload_file( continue diag = await _diagnose_connectivity() - if diag.get("is_local_issue"): + if not diag["internet_accessible"]: raise LocalNetworkError( "Unable to connect to the network. Please check your internet connection and try again." ) from e From c170fd2db598a0bdce56f80e22e83e10ad731421 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 26 Oct 2025 17:23:01 -0700 Subject: [PATCH 0795/1073] Bump portable deps workflow to torch cu130 python 3.13.9 (#10493) --- .github/workflows/windows_release_dependencies.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/windows_release_dependencies.yml b/.github/workflows/windows_release_dependencies.yml index f1e2946e6..f61ee21a2 100644 --- a/.github/workflows/windows_release_dependencies.yml +++ b/.github/workflows/windows_release_dependencies.yml @@ -17,7 +17,7 @@ on: description: 'cuda version' required: true type: string - default: "129" + default: "130" python_minor: description: 'python minor version' @@ -29,7 +29,7 @@ on: description: 'python patch version' required: true type: string - default: "6" + default: "9" # push: # branches: # - master From 601ee1775a3c06c9b4de1fa7d808af8625b2fcd5 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 27 Oct 2025 20:54:00 -0700 Subject: [PATCH 0796/1073] Add a bat to run comfyui portable without api nodes. (#10504) --- .../advanced/run_nvidia_gpu_disable_api_nodes.bat | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .ci/windows_nvidia_base_files/advanced/run_nvidia_gpu_disable_api_nodes.bat diff --git a/.ci/windows_nvidia_base_files/advanced/run_nvidia_gpu_disable_api_nodes.bat b/.ci/windows_nvidia_base_files/advanced/run_nvidia_gpu_disable_api_nodes.bat new file mode 100644 index 000000000..cfe4b9f0e --- /dev/null +++ b/.ci/windows_nvidia_base_files/advanced/run_nvidia_gpu_disable_api_nodes.bat @@ -0,0 +1,2 @@ +..\python_embeded\python.exe -s ..\ComfyUI\main.py --windows-standalone-build --disable-api-nodes +pause From c305deed56a6ed259563b2047d9fcd51471e6590 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 28 Oct 2025 13:24:16 +0800 Subject: [PATCH 0797/1073] Update template to 0.2.3 (#10503) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8570c66b6..121301669 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.28.7 -comfyui-workflow-templates==0.2.2 +comfyui-workflow-templates==0.2.3 comfyui-embedded-docs==0.3.0 torch torchsde From 55bad303754eb60fa98f3ccf598e95502b819149 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 28 Oct 2025 07:25:29 +0200 Subject: [PATCH 0798/1073] feat(api-nodes): add LTXV API nodes (#10496) --- comfy_api_nodes/nodes_ltxv.py | 191 ++++++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 192 insertions(+) create mode 100644 comfy_api_nodes/nodes_ltxv.py diff --git a/comfy_api_nodes/nodes_ltxv.py b/comfy_api_nodes/nodes_ltxv.py new file mode 100644 index 000000000..e6ad6e27a --- /dev/null +++ b/comfy_api_nodes/nodes_ltxv.py @@ -0,0 +1,191 @@ +from io import BytesIO +from typing import Optional + +import torch +from pydantic import BaseModel, Field +from typing_extensions import override + +from comfy_api.input_impl import VideoFromFile +from comfy_api.latest import IO, ComfyExtension +from comfy_api_nodes.util import ( + ApiEndpoint, + get_number_of_images, + sync_op_raw, + upload_images_to_comfyapi, + validate_string, +) + +MODELS_MAP = { + "LTX-2 (Pro)": "ltx-2-pro", + "LTX-2 (Fast)": "ltx-2-fast", +} + + +class ExecuteTaskRequest(BaseModel): + prompt: str = Field(...) + model: str = Field(...) + duration: int = Field(...) + resolution: str = Field(...) + fps: Optional[int] = Field(25) + generate_audio: Optional[bool] = Field(True) + image_uri: Optional[str] = Field(None) + + +class TextToVideoNode(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="LtxvApiTextToVideo", + display_name="LTXV Text To Video", + category="api node/video/LTXV", + description="Professional-quality videos with customizable duration and resolution.", + inputs=[ + IO.Combo.Input("model", options=list(MODELS_MAP.keys())), + IO.String.Input( + "prompt", + multiline=True, + default="", + ), + IO.Combo.Input("duration", options=[6, 8, 10], default=8), + IO.Combo.Input( + "resolution", + options=[ + "1920x1080", + "2560x1440", + "3840x2160", + ], + ), + IO.Combo.Input("fps", options=[25, 50], default=25), + IO.Boolean.Input( + "generate_audio", + default=False, + optional=True, + tooltip="When true, the generated video will include AI-generated audio matching the scene.", + ), + ], + outputs=[ + IO.Video.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + prompt: str, + duration: int, + resolution: str, + fps: int = 25, + generate_audio: bool = False, + ) -> IO.NodeOutput: + validate_string(prompt, min_length=1, max_length=10000) + response = await sync_op_raw( + cls, + ApiEndpoint("/proxy/ltx/v1/text-to-video", "POST"), + data=ExecuteTaskRequest( + prompt=prompt, + model=MODELS_MAP[model], + duration=duration, + resolution=resolution, + fps=fps, + generate_audio=generate_audio, + ), + as_binary=True, + max_retries=1, + ) + return IO.NodeOutput(VideoFromFile(BytesIO(response))) + + +class ImageToVideoNode(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="LtxvApiImageToVideo", + display_name="LTXV Image To Video", + category="api node/video/LTXV", + description="Professional-quality videos with customizable duration and resolution based on start image.", + inputs=[ + IO.Image.Input("image", tooltip="First frame to be used for the video."), + IO.Combo.Input("model", options=list(MODELS_MAP.keys())), + IO.String.Input( + "prompt", + multiline=True, + default="", + ), + IO.Combo.Input("duration", options=[6, 8, 10], default=8), + IO.Combo.Input( + "resolution", + options=[ + "1920x1080", + "2560x1440", + "3840x2160", + ], + ), + IO.Combo.Input("fps", options=[25, 50], default=25), + IO.Boolean.Input( + "generate_audio", + default=False, + optional=True, + tooltip="When true, the generated video will include AI-generated audio matching the scene.", + ), + ], + outputs=[ + IO.Video.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + image: torch.Tensor, + model: str, + prompt: str, + duration: int, + resolution: str, + fps: int = 25, + generate_audio: bool = False, + ) -> IO.NodeOutput: + validate_string(prompt, min_length=1, max_length=10000) + if get_number_of_images(image) != 1: + raise ValueError("Currently only one input image is supported.") + response = await sync_op_raw( + cls, + ApiEndpoint("/proxy/ltx/v1/image-to-video", "POST"), + data=ExecuteTaskRequest( + image_uri=(await upload_images_to_comfyapi(cls, image, max_images=1, mime_type="image/png"))[0], + prompt=prompt, + model=MODELS_MAP[model], + duration=duration, + resolution=resolution, + fps=fps, + generate_audio=generate_audio, + ), + as_binary=True, + max_retries=1, + ) + return IO.NodeOutput(VideoFromFile(BytesIO(response))) + + +class LtxvApiExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + TextToVideoNode, + ImageToVideoNode, + ] + + +async def comfy_entrypoint() -> LtxvApiExtension: + return LtxvApiExtension() diff --git a/nodes.py b/nodes.py index 7cfa8ca14..12e365ca9 100644 --- a/nodes.py +++ b/nodes.py @@ -2349,6 +2349,7 @@ async def init_builtin_api_nodes(): "nodes_kling.py", "nodes_bfl.py", "nodes_bytedance.py", + "nodes_ltxv.py", "nodes_luma.py", "nodes_recraft.py", "nodes_pixverse.py", From 6abc30aae9bd13f31dafd32552a365f2df2cf715 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 28 Oct 2025 13:56:30 +0800 Subject: [PATCH 0799/1073] Update template to 0.2.4 (#10505) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 121301669..cc3d4ca94 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.28.7 -comfyui-workflow-templates==0.2.3 +comfyui-workflow-templates==0.2.4 comfyui-embedded-docs==0.3.0 torch torchsde From 614b8d3345424481d94a22fe7496d908c1a5c526 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Tue, 28 Oct 2025 00:01:13 -0700 Subject: [PATCH 0800/1073] frontend bump to 1.28.8 (#10506) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cc3d4ca94..4d84b0d3e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.28.7 +comfyui-frontend-package==1.28.8 comfyui-workflow-templates==0.2.4 comfyui-embedded-docs==0.3.0 torch From f2bb3230b796f6a486894fc3b597db2c0b9538c9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Oct 2025 03:03:59 -0400 Subject: [PATCH 0801/1073] ComfyUI version v0.3.67 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 33a06bbb0..db48b05c4 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.66" +__version__ = "0.3.67" diff --git a/pyproject.toml b/pyproject.toml index fcc4854a5..ab054355c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.66" +version = "0.3.67" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From b61a40cbc9c2eb648b4d22bb513ed3ab2e2f0fd7 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 28 Oct 2025 00:21:45 -0700 Subject: [PATCH 0802/1073] Bump stable portable to cu130 python 3.13.9 (#10508) --- .github/workflows/release-stable-all.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release-stable-all.yml b/.github/workflows/release-stable-all.yml index 5c1024599..7dca7277b 100644 --- a/.github/workflows/release-stable-all.yml +++ b/.github/workflows/release-stable-all.yml @@ -18,9 +18,9 @@ jobs: uses: ./.github/workflows/stable-release.yml with: git_tag: ${{ inputs.git_tag }} - cache_tag: "cu129" + cache_tag: "cu130" python_minor: "13" - python_patch: "6" + python_patch: "9" rel_name: "nvidia" rel_extra_name: "" test_release: true From 8cf2ba4ba64203551276513068ee81145e90f0bc Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 28 Oct 2025 00:23:52 -0700 Subject: [PATCH 0803/1073] Remove comfy api key from queue api. (#10502) --- execution.py | 8 +++----- main.py | 11 +++++++++-- server.py | 11 ++++++++--- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/execution.py b/execution.py index 78c36a4b0..b14bb14c7 100644 --- a/execution.py +++ b/execution.py @@ -1116,7 +1116,7 @@ class PromptQueue: messages: List[str] def task_done(self, item_id, history_result, - status: Optional['PromptQueue.ExecutionStatus']): + status: Optional['PromptQueue.ExecutionStatus'], process_item=None): with self.mutex: prompt = self.currently_running.pop(item_id) if len(self.history) > MAXIMUM_HISTORY_SIZE: @@ -1126,10 +1126,8 @@ class PromptQueue: if status is not None: status_dict = copy.deepcopy(status._asdict()) - # Remove sensitive data from extra_data before storing in history - for sensitive_val in SENSITIVE_EXTRA_DATA_KEYS: - if sensitive_val in prompt[3]: - prompt[3].pop(sensitive_val) + if process_item is not None: + prompt = process_item(prompt) self.history[prompt[1]] = { "prompt": prompt, diff --git a/main.py b/main.py index 4b4c5dcc4..8d466d2eb 100644 --- a/main.py +++ b/main.py @@ -192,14 +192,21 @@ def prompt_worker(q, server_instance): prompt_id = item[1] server_instance.last_prompt_id = prompt_id - e.execute(item[2], prompt_id, item[3], item[4]) + sensitive = item[5] + extra_data = item[3].copy() + for k in sensitive: + extra_data[k] = sensitive[k] + + e.execute(item[2], prompt_id, extra_data, item[4]) need_gc = True + + remove_sensitive = lambda prompt: prompt[:5] + prompt[6:] q.task_done(item_id, e.history_result, status=execution.PromptQueue.ExecutionStatus( status_str='success' if e.success else 'error', completed=e.success, - messages=e.status_messages)) + messages=e.status_messages), process_item=remove_sensitive) if server_instance.client_id is not None: server_instance.send_sync("executing", {"node": None, "prompt_id": prompt_id}, server_instance.client_id) diff --git a/server.py b/server.py index fe58db286..5d773b10a 100644 --- a/server.py +++ b/server.py @@ -691,8 +691,9 @@ class PromptServer(): async def get_queue(request): queue_info = {} current_queue = self.prompt_queue.get_current_queue_volatile() - queue_info['queue_running'] = current_queue[0] - queue_info['queue_pending'] = current_queue[1] + remove_sensitive = lambda queue: [x[:5] for x in queue] + queue_info['queue_running'] = remove_sensitive(current_queue[0]) + queue_info['queue_pending'] = remove_sensitive(current_queue[1]) return web.json_response(queue_info) @routes.post("/prompt") @@ -728,7 +729,11 @@ class PromptServer(): extra_data["client_id"] = json_data["client_id"] if valid[0]: outputs_to_execute = valid[2] - self.prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute)) + sensitive = {} + for sensitive_val in execution.SENSITIVE_EXTRA_DATA_KEYS: + if sensitive_val in extra_data: + sensitive[sensitive_val] = extra_data.pop(sensitive_val) + self.prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute, sensitive)) response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]} return web.json_response(response) else: From 3bea4efc6b23d76c6b0672cd90421a9024e13fdb Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 28 Oct 2025 01:45:45 -0700 Subject: [PATCH 0804/1073] Tell users to update nvidia drivers if problem with portable. (#10510) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 434d4ff06..4204777e9 100644 --- a/README.md +++ b/README.md @@ -176,6 +176,8 @@ Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you If you have trouble extracting it, right click the file -> properties -> unblock +Update your Nvidia drivers if it doesn't start. + #### Alternative Downloads: [Experimental portable for AMD GPUs](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_amd.7z) From 22e40d2ace0f53da025b3a41cbe4b664ef807097 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 28 Oct 2025 12:08:08 -0700 Subject: [PATCH 0805/1073] Tell users to update their nvidia drivers if portable doesn't start. (#10518) --- .../advanced/run_nvidia_gpu_disable_api_nodes.bat | 1 + .ci/windows_nvidia_base_files/run_nvidia_gpu.bat | 1 + .../run_nvidia_gpu_fast_fp16_accumulation.bat | 1 + 3 files changed, 3 insertions(+) diff --git a/.ci/windows_nvidia_base_files/advanced/run_nvidia_gpu_disable_api_nodes.bat b/.ci/windows_nvidia_base_files/advanced/run_nvidia_gpu_disable_api_nodes.bat index cfe4b9f0e..ed00583b6 100644 --- a/.ci/windows_nvidia_base_files/advanced/run_nvidia_gpu_disable_api_nodes.bat +++ b/.ci/windows_nvidia_base_files/advanced/run_nvidia_gpu_disable_api_nodes.bat @@ -1,2 +1,3 @@ ..\python_embeded\python.exe -s ..\ComfyUI\main.py --windows-standalone-build --disable-api-nodes +echo If you see this and ComfyUI did not start try updating your Nvidia Drivers to the latest. pause diff --git a/.ci/windows_nvidia_base_files/run_nvidia_gpu.bat b/.ci/windows_nvidia_base_files/run_nvidia_gpu.bat index 274d7c948..4898a424f 100755 --- a/.ci/windows_nvidia_base_files/run_nvidia_gpu.bat +++ b/.ci/windows_nvidia_base_files/run_nvidia_gpu.bat @@ -1,2 +1,3 @@ .\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build +echo If you see this and ComfyUI did not start try updating your Nvidia Drivers to the latest. pause diff --git a/.ci/windows_nvidia_base_files/run_nvidia_gpu_fast_fp16_accumulation.bat b/.ci/windows_nvidia_base_files/run_nvidia_gpu_fast_fp16_accumulation.bat index 38f06ecb2..32611e4af 100644 --- a/.ci/windows_nvidia_base_files/run_nvidia_gpu_fast_fp16_accumulation.bat +++ b/.ci/windows_nvidia_base_files/run_nvidia_gpu_fast_fp16_accumulation.bat @@ -1,2 +1,3 @@ .\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build --fast fp16_accumulation +echo If you see this and ComfyUI did not start try updating your Nvidia Drivers to the latest. pause From 8817f8fc148c5a63ffd3f854975df8e72c740540 Mon Sep 17 00:00:00 2001 From: contentis Date: Tue, 28 Oct 2025 21:20:53 +0100 Subject: [PATCH 0806/1073] Mixed Precision Quantization System (#10498) * Implement mixed precision operations with a registry design and metadate for quant spec in checkpoint. * Updated design using Tensor Subclasses * Fix FP8 MM * An actually functional POC * Remove CK reference and ensure correct compute dtype * Update unit tests * ruff lint * Implement mixed precision operations with a registry design and metadate for quant spec in checkpoint. * Updated design using Tensor Subclasses * Fix FP8 MM * An actually functional POC * Remove CK reference and ensure correct compute dtype * Update unit tests * ruff lint * Fix missing keys * Rename quant dtype parameter * Rename quant dtype parameter * Fix unittests for CPU build --- comfy/model_base.py | 10 +- comfy/model_detection.py | 20 + comfy/ops.py | 146 +++++- comfy/quant_ops.py | 437 ++++++++++++++++++ comfy/sd.py | 13 +- comfy/supported_models_base.py | 1 + .../comfy_quant/test_mixed_precision.py | 232 ++++++++++ tests-unit/comfy_quant/test_quant_registry.py | 190 ++++++++ 8 files changed, 1030 insertions(+), 19 deletions(-) create mode 100644 comfy/quant_ops.py create mode 100644 tests-unit/comfy_quant/test_mixed_precision.py create mode 100644 tests-unit/comfy_quant/test_quant_registry.py diff --git a/comfy/model_base.py b/comfy/model_base.py index e877f19ac..7c788d085 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -134,7 +134,7 @@ class BaseModel(torch.nn.Module): if not unet_config.get("disable_unet_model_creation", False): if model_config.custom_operations is None: fp8 = model_config.optimizations.get("fp8", False) - operations = comfy.ops.pick_operations(unet_config.get("dtype", None), self.manual_cast_dtype, fp8_optimizations=fp8, scaled_fp8=model_config.scaled_fp8) + operations = comfy.ops.pick_operations(unet_config.get("dtype", None), self.manual_cast_dtype, fp8_optimizations=fp8, scaled_fp8=model_config.scaled_fp8, model_config=model_config) else: operations = model_config.custom_operations self.diffusion_model = unet_model(**unet_config, device=device, operations=operations) @@ -333,6 +333,14 @@ class BaseModel(torch.nn.Module): if self.model_config.scaled_fp8 is not None: unet_state_dict["scaled_fp8"] = torch.tensor([], dtype=self.model_config.scaled_fp8) + # Save mixed precision metadata + if hasattr(self.model_config, 'layer_quant_config') and self.model_config.layer_quant_config: + metadata = { + "format_version": "1.0", + "layers": self.model_config.layer_quant_config + } + unet_state_dict["_quantization_metadata"] = metadata + unet_state_dict = self.model_config.process_unet_state_dict_for_saving(unet_state_dict) if self.model_type == ModelType.V_PREDICTION: diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 141f1e164..3142a7fc3 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -6,6 +6,20 @@ import math import logging import torch + +def detect_layer_quantization(metadata): + quant_key = "_quantization_metadata" + if metadata is not None and quant_key in metadata: + quant_metadata = metadata.pop(quant_key) + quant_metadata = json.loads(quant_metadata) + if isinstance(quant_metadata, dict) and "layers" in quant_metadata: + logging.info(f"Found quantization metadata (version {quant_metadata.get('format_version', 'unknown')})") + return quant_metadata["layers"] + else: + raise ValueError("Invalid quantization metadata format") + return None + + def count_blocks(state_dict_keys, prefix_string): count = 0 while True: @@ -701,6 +715,12 @@ def model_config_from_unet(state_dict, unet_key_prefix, use_base_if_no_match=Fal else: model_config.optimizations["fp8"] = True + # Detect per-layer quantization (mixed precision) + layer_quant_config = detect_layer_quantization(metadata) + if layer_quant_config: + model_config.layer_quant_config = layer_quant_config + logging.info(f"Detected mixed precision quantization: {len(layer_quant_config)} layers quantized") + return model_config def unet_prefix_from_state_dict(state_dict): diff --git a/comfy/ops.py b/comfy/ops.py index 934e21261..93731eedf 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -344,6 +344,10 @@ class manual_cast(disable_weight_init): def fp8_linear(self, input): + """ + Legacy FP8 linear function for backward compatibility. + Uses QuantizedTensor subclass for dispatch. + """ dtype = self.weight.dtype if dtype not in [torch.float8_e4m3fn]: return None @@ -355,9 +359,9 @@ def fp8_linear(self, input): input_shape = input.shape input_dtype = input.dtype + if len(input.shape) == 3: w, bias = cast_bias_weight(self, input, dtype=dtype, bias_dtype=input_dtype) - w = w.t() scale_weight = self.scale_weight scale_input = self.scale_input @@ -368,23 +372,18 @@ def fp8_linear(self, input): if scale_input is None: scale_input = torch.ones((), device=input.device, dtype=torch.float32) - input = torch.clamp(input, min=-448, max=448, out=input) - input = input.reshape(-1, input_shape[2]).to(dtype).contiguous() else: scale_input = scale_input.to(input.device) - input = (input * (1.0 / scale_input).to(input_dtype)).reshape(-1, input_shape[2]).to(dtype).contiguous() - if bias is not None: - o = torch._scaled_mm(input, w, out_dtype=input_dtype, bias=bias, scale_a=scale_input, scale_b=scale_weight) - else: - o = torch._scaled_mm(input, w, out_dtype=input_dtype, scale_a=scale_input, scale_b=scale_weight) - - if isinstance(o, tuple): - o = o[0] + # Wrap weight in QuantizedTensor - this enables unified dispatch + # Call F.linear - __torch_dispatch__ routes to fp8_linear handler in quant_ops.py! + layout_params_weight = {'scale': scale_weight, 'orig_dtype': input_dtype} + quantized_weight = QuantizedTensor(w, TensorCoreFP8Layout, layout_params_weight) + quantized_input = QuantizedTensor.from_float(input.reshape(-1, input_shape[2]), TensorCoreFP8Layout, scale=scale_input, dtype=dtype) + o = torch.nn.functional.linear(quantized_input, quantized_weight, bias) if tensor_2d: return o.reshape(input_shape[0], -1) - return o.reshape((-1, input_shape[1], self.weight.shape[0])) return None @@ -478,7 +477,128 @@ if CUBLAS_IS_AVAILABLE: def forward(self, *args, **kwargs): return super().forward(*args, **kwargs) -def pick_operations(weight_dtype, compute_dtype, load_device=None, disable_fast_fp8=False, fp8_optimizations=False, scaled_fp8=None): + +# ============================================================================== +# Mixed Precision Operations +# ============================================================================== +from .quant_ops import QuantizedTensor, TensorCoreFP8Layout + +QUANT_FORMAT_MIXINS = { + "float8_e4m3fn": { + "dtype": torch.float8_e4m3fn, + "layout_type": TensorCoreFP8Layout, + "parameters": { + "weight_scale": torch.nn.Parameter(torch.zeros((), dtype=torch.float32), requires_grad=False), + "input_scale": torch.nn.Parameter(torch.zeros((), dtype=torch.float32), requires_grad=False), + } + } +} + +class MixedPrecisionOps(disable_weight_init): + _layer_quant_config = {} + _compute_dtype = torch.bfloat16 + + class Linear(torch.nn.Module, CastWeightBiasOp): + def __init__( + self, + in_features: int, + out_features: int, + bias: bool = True, + device=None, + dtype=None, + ) -> None: + super().__init__() + + self.factory_kwargs = {"device": device, "dtype": MixedPrecisionOps._compute_dtype} + # self.factory_kwargs = {"device": device, "dtype": dtype} + + self.in_features = in_features + self.out_features = out_features + if bias: + self.bias = torch.nn.Parameter(torch.empty(out_features, **self.factory_kwargs)) + else: + self.register_parameter("bias", None) + + self.tensor_class = None + + def reset_parameters(self): + return None + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, + strict, missing_keys, unexpected_keys, error_msgs): + + device = self.factory_kwargs["device"] + layer_name = prefix.rstrip('.') + weight_key = f"{prefix}weight" + weight = state_dict.pop(weight_key, None) + if weight is None: + raise ValueError(f"Missing weight for layer {layer_name}") + + manually_loaded_keys = [weight_key] + + if layer_name not in MixedPrecisionOps._layer_quant_config: + self.weight = torch.nn.Parameter(weight.to(device=device, dtype=MixedPrecisionOps._compute_dtype), requires_grad=False) + else: + quant_format = MixedPrecisionOps._layer_quant_config[layer_name].get("format", None) + if quant_format is None: + raise ValueError(f"Unknown quantization format for layer {layer_name}") + + mixin = QUANT_FORMAT_MIXINS[quant_format] + self.layout_type = mixin["layout_type"] + + scale_key = f"{prefix}weight_scale" + layout_params = { + 'scale': state_dict.pop(scale_key, None), + 'orig_dtype': MixedPrecisionOps._compute_dtype + } + if layout_params['scale'] is not None: + manually_loaded_keys.append(scale_key) + + self.weight = torch.nn.Parameter( + QuantizedTensor(weight.to(device=device, dtype=mixin["dtype"]), self.layout_type, layout_params), + requires_grad=False + ) + + for param_name, param_value in mixin["parameters"].items(): + param_key = f"{prefix}{param_name}" + _v = state_dict.pop(param_key, None) + if _v is None: + continue + setattr(self, param_name, torch.nn.Parameter(_v.to(device=device), requires_grad=False)) + manually_loaded_keys.append(param_key) + + super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) + + for key in manually_loaded_keys: + if key in missing_keys: + missing_keys.remove(key) + + def _forward(self, input, weight, bias): + return torch.nn.functional.linear(input, weight, bias) + + def forward_comfy_cast_weights(self, input): + weight, bias = cast_bias_weight(self, input) + return self._forward(input, weight, bias) + + def forward(self, input, *args, **kwargs): + run_every_op() + + if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: + return self.forward_comfy_cast_weights(input, *args, **kwargs) + if (getattr(self, 'layout_type', None) is not None and + getattr(self, 'input_scale', None) is not None and + not isinstance(input, QuantizedTensor)): + input = QuantizedTensor.from_float(input, self.layout_type, scale=self.input_scale, fp8_dtype=self.weight.dtype) + return self._forward(input, self.weight, self.bias) + + +def pick_operations(weight_dtype, compute_dtype, load_device=None, disable_fast_fp8=False, fp8_optimizations=False, scaled_fp8=None, model_config=None): + if model_config and hasattr(model_config, 'layer_quant_config') and model_config.layer_quant_config: + MixedPrecisionOps._layer_quant_config = model_config.layer_quant_config + MixedPrecisionOps._compute_dtype = compute_dtype + logging.info(f"Using mixed precision operations: {len(model_config.layer_quant_config)} quantized layers") + return MixedPrecisionOps + fp8_compute = comfy.model_management.supports_fp8_compute(load_device) if scaled_fp8 is not None: return scaled_fp8_ops(fp8_matrix_mult=fp8_compute and fp8_optimizations, scale_input=fp8_optimizations, override_dtype=scaled_fp8) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py new file mode 100644 index 000000000..b14e03084 --- /dev/null +++ b/comfy/quant_ops.py @@ -0,0 +1,437 @@ +import torch +import logging +from typing import Tuple, Dict + +_LAYOUT_REGISTRY = {} +_GENERIC_UTILS = {} + + +def register_layout_op(torch_op, layout_type): + """ + Decorator to register a layout-specific operation handler. + Args: + torch_op: PyTorch operation (e.g., torch.ops.aten.linear.default) + layout_type: Layout class (e.g., TensorCoreFP8Layout) + Example: + @register_layout_op(torch.ops.aten.linear.default, TensorCoreFP8Layout) + def fp8_linear(func, args, kwargs): + # FP8-specific linear implementation + ... + """ + def decorator(handler_func): + if torch_op not in _LAYOUT_REGISTRY: + _LAYOUT_REGISTRY[torch_op] = {} + _LAYOUT_REGISTRY[torch_op][layout_type] = handler_func + return handler_func + return decorator + + +def register_generic_util(torch_op): + """ + Decorator to register a generic utility that works for all layouts. + Args: + torch_op: PyTorch operation (e.g., torch.ops.aten.detach.default) + + Example: + @register_generic_util(torch.ops.aten.detach.default) + def generic_detach(func, args, kwargs): + # Works for any layout + ... + """ + def decorator(handler_func): + _GENERIC_UTILS[torch_op] = handler_func + return handler_func + return decorator + + +def _get_layout_from_args(args): + for arg in args: + if isinstance(arg, QuantizedTensor): + return arg._layout_type + elif isinstance(arg, (list, tuple)): + for item in arg: + if isinstance(item, QuantizedTensor): + return item._layout_type + return None + + +def _move_layout_params_to_device(params, device): + new_params = {} + for k, v in params.items(): + if isinstance(v, torch.Tensor): + new_params[k] = v.to(device=device) + else: + new_params[k] = v + return new_params + + +def _copy_layout_params(params): + new_params = {} + for k, v in params.items(): + if isinstance(v, torch.Tensor): + new_params[k] = v.clone() + else: + new_params[k] = v + return new_params + + +class QuantizedLayout: + """ + Base class for quantization layouts. + + A layout encapsulates the format-specific logic for quantization/dequantization + and provides a uniform interface for extracting raw tensors needed for computation. + + New quantization formats should subclass this and implement the required methods. + """ + @classmethod + def quantize(cls, tensor, **kwargs) -> Tuple[torch.Tensor, Dict]: + raise NotImplementedError(f"{cls.__name__} must implement quantize()") + + @staticmethod + def dequantize(qdata, **layout_params) -> torch.Tensor: + raise NotImplementedError("TensorLayout must implement dequantize()") + + @classmethod + def get_plain_tensors(cls, qtensor) -> torch.Tensor: + raise NotImplementedError(f"{cls.__name__} must implement get_plain_tensors()") + + +class QuantizedTensor(torch.Tensor): + """ + Universal quantized tensor that works with any layout. + + This tensor subclass uses a pluggable layout system to support multiple + quantization formats (FP8, INT4, INT8, etc.) without code duplication. + + The layout_type determines format-specific behavior, while common operations + (detach, clone, to) are handled generically. + + Attributes: + _qdata: The quantized tensor data + _layout_type: Layout class (e.g., TensorCoreFP8Layout) + _layout_params: Dict with layout-specific params (scale, zero_point, etc.) + """ + + @staticmethod + def __new__(cls, qdata, layout_type, layout_params): + """ + Create a quantized tensor. + + Args: + qdata: The quantized data tensor + layout_type: Layout class (subclass of QuantizedLayout) + layout_params: Dict with layout-specific parameters + """ + return torch.Tensor._make_subclass(cls, qdata, require_grad=False) + + def __init__(self, qdata, layout_type, layout_params): + self._qdata = qdata.contiguous() + self._layout_type = layout_type + self._layout_params = layout_params + + def __repr__(self): + layout_name = self._layout_type.__name__ + param_str = ", ".join(f"{k}={v}" for k, v in list(self._layout_params.items())[:2]) + return f"QuantizedTensor(shape={self.shape}, layout={layout_name}, {param_str})" + + @property + def layout_type(self): + return self._layout_type + + def __tensor_flatten__(self): + """ + Tensor flattening protocol for proper device movement. + """ + inner_tensors = ["_qdata"] + ctx = { + "layout_type": self._layout_type, + } + + tensor_params = {} + non_tensor_params = {} + for k, v in self._layout_params.items(): + if isinstance(v, torch.Tensor): + tensor_params[k] = v + else: + non_tensor_params[k] = v + + ctx["tensor_param_keys"] = list(tensor_params.keys()) + ctx["non_tensor_params"] = non_tensor_params + + for k, v in tensor_params.items(): + attr_name = f"_layout_param_{k}" + object.__setattr__(self, attr_name, v) + inner_tensors.append(attr_name) + + return inner_tensors, ctx + + @staticmethod + def __tensor_unflatten__(inner_tensors, ctx, outer_size, outer_stride): + """ + Tensor unflattening protocol for proper device movement. + Reconstructs the QuantizedTensor after device movement. + """ + layout_type = ctx["layout_type"] + layout_params = dict(ctx["non_tensor_params"]) + + for key in ctx["tensor_param_keys"]: + attr_name = f"_layout_param_{key}" + layout_params[key] = inner_tensors[attr_name] + + return QuantizedTensor(inner_tensors["_q_data"], layout_type, layout_params) + + @classmethod + def from_float(cls, tensor, layout_type, **quantize_kwargs) -> 'QuantizedTensor': + qdata, layout_params = layout_type.quantize(tensor, **quantize_kwargs) + return cls(qdata, layout_type, layout_params) + + def dequantize(self) -> torch.Tensor: + return self._layout_type.dequantize(self._qdata, **self._layout_params) + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + + # Step 1: Check generic utilities first (detach, clone, to, etc.) + if func in _GENERIC_UTILS: + return _GENERIC_UTILS[func](func, args, kwargs) + + # Step 2: Check layout-specific handlers (linear, matmul, etc.) + layout_type = _get_layout_from_args(args) + if layout_type and func in _LAYOUT_REGISTRY: + handler = _LAYOUT_REGISTRY[func].get(layout_type) + if handler: + return handler(func, args, kwargs) + + # Step 3: Fallback to dequantization + if isinstance(args[0] if args else None, QuantizedTensor): + logging.info(f"QuantizedTensor: Unhandled operation {func}, falling back to dequantization. kwargs={kwargs}") + return cls._dequant_and_fallback(func, args, kwargs) + + @classmethod + def _dequant_and_fallback(cls, func, args, kwargs): + def dequant_arg(arg): + if isinstance(arg, QuantizedTensor): + return arg.dequantize() + elif isinstance(arg, (list, tuple)): + return type(arg)(dequant_arg(a) for a in arg) + return arg + + new_args = dequant_arg(args) + new_kwargs = dequant_arg(kwargs) + return func(*new_args, **new_kwargs) + + +# ============================================================================== +# Generic Utilities (Layout-Agnostic Operations) +# ============================================================================== + +def _create_transformed_qtensor(qt, transform_fn): + new_data = transform_fn(qt._qdata) + new_params = _copy_layout_params(qt._layout_params) + return QuantizedTensor(new_data, qt._layout_type, new_params) + + +def _handle_device_transfer(qt, target_device, target_dtype=None, target_layout=None, op_name="to"): + if target_dtype is not None and target_dtype != qt.dtype: + logging.warning( + f"QuantizedTensor: dtype conversion requested to {target_dtype}, " + f"but not supported for quantized tensors. Ignoring dtype." + ) + + if target_layout is not None and target_layout != torch.strided: + logging.warning( + f"QuantizedTensor: layout change requested to {target_layout}, " + f"but not supported. Ignoring layout." + ) + + # Handle device transfer + current_device = qt._qdata.device + if target_device is not None: + # Normalize device for comparison + if isinstance(target_device, str): + target_device = torch.device(target_device) + if isinstance(current_device, str): + current_device = torch.device(current_device) + + if target_device != current_device: + logging.debug(f"QuantizedTensor.{op_name}: Moving from {current_device} to {target_device}") + new_q_data = qt._qdata.to(device=target_device) + new_params = _move_layout_params_to_device(qt._layout_params, target_device) + new_qt = QuantizedTensor(new_q_data, qt._layout_type, new_params) + logging.debug(f"QuantizedTensor.{op_name}: Created new tensor on {target_device}") + return new_qt + + logging.debug(f"QuantizedTensor.{op_name}: No device change needed, returning original") + return qt + + +@register_generic_util(torch.ops.aten.detach.default) +def generic_detach(func, args, kwargs): + """Detach operation - creates a detached copy of the quantized tensor.""" + qt = args[0] + if isinstance(qt, QuantizedTensor): + return _create_transformed_qtensor(qt, lambda x: x.detach()) + return func(*args, **kwargs) + + +@register_generic_util(torch.ops.aten.clone.default) +def generic_clone(func, args, kwargs): + """Clone operation - creates a deep copy of the quantized tensor.""" + qt = args[0] + if isinstance(qt, QuantizedTensor): + return _create_transformed_qtensor(qt, lambda x: x.clone()) + return func(*args, **kwargs) + + +@register_generic_util(torch.ops.aten._to_copy.default) +def generic_to_copy(func, args, kwargs): + """Device/dtype transfer operation - handles .to(device) calls.""" + qt = args[0] + if isinstance(qt, QuantizedTensor): + return _handle_device_transfer( + qt, + target_device=kwargs.get('device', None), + target_dtype=kwargs.get('dtype', None), + op_name="_to_copy" + ) + return func(*args, **kwargs) + + +@register_generic_util(torch.ops.aten.to.dtype_layout) +def generic_to_dtype_layout(func, args, kwargs): + """Handle .to(device) calls using the dtype_layout variant.""" + qt = args[0] + if isinstance(qt, QuantizedTensor): + return _handle_device_transfer( + qt, + target_device=kwargs.get('device', None), + target_dtype=kwargs.get('dtype', None), + target_layout=kwargs.get('layout', None), + op_name="to" + ) + return func(*args, **kwargs) + + +@register_generic_util(torch.ops.aten.copy_.default) +def generic_copy_(func, args, kwargs): + qt_dest = args[0] + src = args[1] + + if isinstance(qt_dest, QuantizedTensor): + if isinstance(src, QuantizedTensor): + # Copy from another quantized tensor + qt_dest._qdata.copy_(src._qdata) + qt_dest._layout_type = src._layout_type + qt_dest._layout_params = _copy_layout_params(src._layout_params) + else: + # Copy from regular tensor - just copy raw data + qt_dest._qdata.copy_(src) + return qt_dest + return func(*args, **kwargs) + + +@register_generic_util(torch.ops.aten._has_compatible_shallow_copy_type.default) +def generic_has_compatible_shallow_copy_type(func, args, kwargs): + return True + +# ============================================================================== +# FP8 Layout + Operation Handlers +# ============================================================================== +class TensorCoreFP8Layout(QuantizedLayout): + """ + Storage format: + - qdata: FP8 tensor (torch.float8_e4m3fn or torch.float8_e5m2) + - scale: Scalar tensor (float32) for dequantization + - orig_dtype: Original dtype before quantization (for casting back) + """ + @classmethod + def quantize(cls, tensor, scale=None, dtype=torch.float8_e4m3fn): + orig_dtype = tensor.dtype + + if scale is None: + scale = torch.amax(tensor.abs()) / torch.finfo(dtype).max + + if not isinstance(scale, torch.Tensor): + scale = torch.tensor(scale) + scale = scale.to(device=tensor.device, dtype=torch.float32) + + lp_amax = torch.finfo(dtype).max + tensor_scaled = tensor.float() / scale + torch.clamp(tensor_scaled, min=-lp_amax, max=lp_amax, out=tensor_scaled) + qdata = tensor_scaled.to(dtype, memory_format=torch.contiguous_format) + + layout_params = { + 'scale': scale, + 'orig_dtype': orig_dtype + } + return qdata, layout_params + + @staticmethod + def dequantize(qdata, scale, orig_dtype, **kwargs): + plain_tensor = torch.ops.aten._to_copy.default(qdata, dtype=orig_dtype) + return plain_tensor * scale + + @classmethod + def get_plain_tensors(cls, qtensor): + return qtensor._qdata, qtensor._layout_params['scale'] + + +@register_layout_op(torch.ops.aten.linear.default, TensorCoreFP8Layout) +def fp8_linear(func, args, kwargs): + input_tensor = args[0] + weight = args[1] + bias = args[2] if len(args) > 2 else None + + if isinstance(input_tensor, QuantizedTensor) and isinstance(weight, QuantizedTensor): + plain_input, scale_a = TensorCoreFP8Layout.get_plain_tensors(input_tensor) + plain_weight, scale_b = TensorCoreFP8Layout.get_plain_tensors(weight) + + out_dtype = kwargs.get("out_dtype") + if out_dtype is None: + out_dtype = input_tensor._layout_params['orig_dtype'] + + weight_t = plain_weight.t() + + tensor_2d = False + if len(plain_input.shape) == 2: + tensor_2d = True + plain_input = plain_input.unsqueeze(1) + + input_shape = plain_input.shape + if len(input_shape) != 3: + return None + + try: + output = torch._scaled_mm( + plain_input.reshape(-1, input_shape[2]), + weight_t, + bias=bias, + scale_a=scale_a, + scale_b=scale_b, + out_dtype=out_dtype, + ) + if not tensor_2d: + output = output.reshape((-1, input_shape[1], weight.shape[0])) + + if output.dtype in [torch.float8_e4m3fn, torch.float8_e5m2]: + output_scale = scale_a * scale_b + output_params = { + 'scale': output_scale, + 'orig_dtype': input_tensor._layout_params['orig_dtype'] + } + return QuantizedTensor(output, TensorCoreFP8Layout, output_params) + else: + return output + + except Exception as e: + raise RuntimeError(f"FP8 _scaled_mm failed, falling back to dequantization: {e}") + + # Case 2: DQ Fallback + if isinstance(weight, QuantizedTensor): + weight = weight.dequantize() + if isinstance(input_tensor, QuantizedTensor): + input_tensor = input_tensor.dequantize() + + return torch.nn.functional.linear(input_tensor, weight, bias) diff --git a/comfy/sd.py b/comfy/sd.py index 28bee248d..6411bb27d 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -1262,7 +1262,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c return (model_patcher, clip, vae, clipvision) -def load_diffusion_model_state_dict(sd, model_options={}): +def load_diffusion_model_state_dict(sd, model_options={}, metadata=None): """ Loads a UNet diffusion model from a state dictionary, supporting both diffusers and regular formats. @@ -1296,7 +1296,7 @@ def load_diffusion_model_state_dict(sd, model_options={}): weight_dtype = comfy.utils.weight_dtype(sd) load_device = model_management.get_torch_device() - model_config = model_detection.model_config_from_unet(sd, "") + model_config = model_detection.model_config_from_unet(sd, "", metadata=metadata) if model_config is not None: new_sd = sd @@ -1330,7 +1330,10 @@ def load_diffusion_model_state_dict(sd, model_options={}): else: unet_dtype = dtype - manual_cast_dtype = model_management.unet_manual_cast(unet_dtype, load_device, model_config.supported_inference_dtypes) + if hasattr(model_config, "layer_quant_config"): + manual_cast_dtype = model_management.unet_manual_cast(None, load_device, model_config.supported_inference_dtypes) + else: + manual_cast_dtype = model_management.unet_manual_cast(unet_dtype, load_device, model_config.supported_inference_dtypes) model_config.set_inference_dtype(unet_dtype, manual_cast_dtype) model_config.custom_operations = model_options.get("custom_operations", model_config.custom_operations) if model_options.get("fp8_optimizations", False): @@ -1346,8 +1349,8 @@ def load_diffusion_model_state_dict(sd, model_options={}): def load_diffusion_model(unet_path, model_options={}): - sd = comfy.utils.load_torch_file(unet_path) - model = load_diffusion_model_state_dict(sd, model_options=model_options) + sd, metadata = comfy.utils.load_torch_file(unet_path, return_metadata=True) + model = load_diffusion_model_state_dict(sd, model_options=model_options, metadata=metadata) if model is None: logging.error("ERROR UNSUPPORTED DIFFUSION MODEL {}".format(unet_path)) raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(unet_path, model_detection_error_hint(unet_path, sd))) diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index 54573abb1..e4bd74514 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -50,6 +50,7 @@ class BASE: manual_cast_dtype = None custom_operations = None scaled_fp8 = None + layer_quant_config = None # Per-layer quantization configuration for mixed precision optimizations = {"fp8": False} @classmethod diff --git a/tests-unit/comfy_quant/test_mixed_precision.py b/tests-unit/comfy_quant/test_mixed_precision.py new file mode 100644 index 000000000..267bc177b --- /dev/null +++ b/tests-unit/comfy_quant/test_mixed_precision.py @@ -0,0 +1,232 @@ +import unittest +import torch +import sys +import os + +# Add comfy to path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) + +def has_gpu(): + return torch.cuda.is_available() + +from comfy.cli_args import args +if not has_gpu(): + args.cpu = True + +from comfy import ops +from comfy.quant_ops import QuantizedTensor, TensorCoreFP8Layout + + +class SimpleModel(torch.nn.Module): + def __init__(self, operations=ops.disable_weight_init): + super().__init__() + self.layer1 = operations.Linear(10, 20, device="cpu", dtype=torch.bfloat16) + self.layer2 = operations.Linear(20, 30, device="cpu", dtype=torch.bfloat16) + self.layer3 = operations.Linear(30, 40, device="cpu", dtype=torch.bfloat16) + + def forward(self, x): + x = self.layer1(x) + x = torch.nn.functional.relu(x) + x = self.layer2(x) + x = torch.nn.functional.relu(x) + x = self.layer3(x) + return x + + +class TestMixedPrecisionOps(unittest.TestCase): + + def test_all_layers_standard(self): + """Test that model with no quantization works normally""" + # Configure no quantization + ops.MixedPrecisionOps._layer_quant_config = {} + + # Create model + model = SimpleModel(operations=ops.MixedPrecisionOps) + + # Initialize weights manually + model.layer1.weight = torch.nn.Parameter(torch.randn(20, 10, dtype=torch.bfloat16)) + model.layer1.bias = torch.nn.Parameter(torch.randn(20, dtype=torch.bfloat16)) + model.layer2.weight = torch.nn.Parameter(torch.randn(30, 20, dtype=torch.bfloat16)) + model.layer2.bias = torch.nn.Parameter(torch.randn(30, dtype=torch.bfloat16)) + model.layer3.weight = torch.nn.Parameter(torch.randn(40, 30, dtype=torch.bfloat16)) + model.layer3.bias = torch.nn.Parameter(torch.randn(40, dtype=torch.bfloat16)) + + # Initialize weight_function and bias_function + for layer in [model.layer1, model.layer2, model.layer3]: + layer.weight_function = [] + layer.bias_function = [] + + # Forward pass + input_tensor = torch.randn(5, 10, dtype=torch.bfloat16) + output = model(input_tensor) + + self.assertEqual(output.shape, (5, 40)) + self.assertEqual(output.dtype, torch.bfloat16) + + def test_mixed_precision_load(self): + """Test loading a mixed precision model from state dict""" + # Configure mixed precision: layer1 is FP8, layer2 and layer3 are standard + layer_quant_config = { + "layer1": { + "format": "float8_e4m3fn", + "params": {} + }, + "layer3": { + "format": "float8_e4m3fn", + "params": {} + } + } + ops.MixedPrecisionOps._layer_quant_config = layer_quant_config + + # Create state dict with mixed precision + fp8_weight1 = torch.randn(20, 10, dtype=torch.float32).to(torch.float8_e4m3fn) + fp8_weight3 = torch.randn(40, 30, dtype=torch.float32).to(torch.float8_e4m3fn) + + state_dict = { + # Layer 1: FP8 E4M3FN + "layer1.weight": fp8_weight1, + "layer1.bias": torch.randn(20, dtype=torch.bfloat16), + "layer1.weight_scale": torch.tensor(2.0, dtype=torch.float32), + + # Layer 2: Standard BF16 + "layer2.weight": torch.randn(30, 20, dtype=torch.bfloat16), + "layer2.bias": torch.randn(30, dtype=torch.bfloat16), + + # Layer 3: FP8 E4M3FN + "layer3.weight": fp8_weight3, + "layer3.bias": torch.randn(40, dtype=torch.bfloat16), + "layer3.weight_scale": torch.tensor(1.5, dtype=torch.float32), + } + + # Create model and load state dict (strict=False because custom loading pops keys) + model = SimpleModel(operations=ops.MixedPrecisionOps) + model.load_state_dict(state_dict, strict=False) + + # Verify weights are wrapped in QuantizedTensor + self.assertIsInstance(model.layer1.weight, QuantizedTensor) + self.assertEqual(model.layer1.weight._layout_type, TensorCoreFP8Layout) + + # Layer 2 should NOT be quantized + self.assertNotIsInstance(model.layer2.weight, QuantizedTensor) + + # Layer 3 should be quantized + self.assertIsInstance(model.layer3.weight, QuantizedTensor) + self.assertEqual(model.layer3.weight._layout_type, TensorCoreFP8Layout) + + # Verify scales were loaded + self.assertEqual(model.layer1.weight._layout_params['scale'].item(), 2.0) + self.assertEqual(model.layer3.weight._layout_params['scale'].item(), 1.5) + + # Forward pass + input_tensor = torch.randn(5, 10, dtype=torch.bfloat16) + output = model(input_tensor) + + self.assertEqual(output.shape, (5, 40)) + + def test_state_dict_quantized_preserved(self): + """Test that quantized weights are preserved in state_dict()""" + # Configure mixed precision + layer_quant_config = { + "layer1": { + "format": "float8_e4m3fn", + "params": {} + } + } + ops.MixedPrecisionOps._layer_quant_config = layer_quant_config + + # Create and load model + fp8_weight = torch.randn(20, 10, dtype=torch.float32).to(torch.float8_e4m3fn) + state_dict1 = { + "layer1.weight": fp8_weight, + "layer1.bias": torch.randn(20, dtype=torch.bfloat16), + "layer1.weight_scale": torch.tensor(3.0, dtype=torch.float32), + "layer2.weight": torch.randn(30, 20, dtype=torch.bfloat16), + "layer2.bias": torch.randn(30, dtype=torch.bfloat16), + "layer3.weight": torch.randn(40, 30, dtype=torch.bfloat16), + "layer3.bias": torch.randn(40, dtype=torch.bfloat16), + } + + model = SimpleModel(operations=ops.MixedPrecisionOps) + model.load_state_dict(state_dict1, strict=False) + + # Save state dict + state_dict2 = model.state_dict() + + # Verify layer1.weight is a QuantizedTensor with scale preserved + self.assertIsInstance(state_dict2["layer1.weight"], QuantizedTensor) + self.assertEqual(state_dict2["layer1.weight"]._layout_params['scale'].item(), 3.0) + self.assertEqual(state_dict2["layer1.weight"]._layout_type, TensorCoreFP8Layout) + + # Verify non-quantized layers are standard tensors + self.assertNotIsInstance(state_dict2["layer2.weight"], QuantizedTensor) + self.assertNotIsInstance(state_dict2["layer3.weight"], QuantizedTensor) + + def test_weight_function_compatibility(self): + """Test that weight_function (LoRA) works with quantized layers""" + # Configure FP8 quantization + layer_quant_config = { + "layer1": { + "format": "float8_e4m3fn", + "params": {} + } + } + ops.MixedPrecisionOps._layer_quant_config = layer_quant_config + + # Create and load model + fp8_weight = torch.randn(20, 10, dtype=torch.float32).to(torch.float8_e4m3fn) + state_dict = { + "layer1.weight": fp8_weight, + "layer1.bias": torch.randn(20, dtype=torch.bfloat16), + "layer1.weight_scale": torch.tensor(2.0, dtype=torch.float32), + "layer2.weight": torch.randn(30, 20, dtype=torch.bfloat16), + "layer2.bias": torch.randn(30, dtype=torch.bfloat16), + "layer3.weight": torch.randn(40, 30, dtype=torch.bfloat16), + "layer3.bias": torch.randn(40, dtype=torch.bfloat16), + } + + model = SimpleModel(operations=ops.MixedPrecisionOps) + model.load_state_dict(state_dict, strict=False) + + # Add a weight function (simulating LoRA) + # This should trigger dequantization during forward pass + def apply_lora(weight): + lora_delta = torch.randn_like(weight) * 0.01 + return weight + lora_delta + + model.layer1.weight_function.append(apply_lora) + + # Forward pass should work with LoRA (triggers weight_function path) + input_tensor = torch.randn(5, 10, dtype=torch.bfloat16) + output = model(input_tensor) + + self.assertEqual(output.shape, (5, 40)) + + def test_error_handling_unknown_format(self): + """Test that unknown formats raise error""" + # Configure with unknown format + layer_quant_config = { + "layer1": { + "format": "unknown_format_xyz", + "params": {} + } + } + ops.MixedPrecisionOps._layer_quant_config = layer_quant_config + + # Create state dict + state_dict = { + "layer1.weight": torch.randn(20, 10, dtype=torch.bfloat16), + "layer1.bias": torch.randn(20, dtype=torch.bfloat16), + "layer2.weight": torch.randn(30, 20, dtype=torch.bfloat16), + "layer2.bias": torch.randn(30, dtype=torch.bfloat16), + "layer3.weight": torch.randn(40, 30, dtype=torch.bfloat16), + "layer3.bias": torch.randn(40, dtype=torch.bfloat16), + } + + # Load should raise KeyError for unknown format in QUANT_FORMAT_MIXINS + model = SimpleModel(operations=ops.MixedPrecisionOps) + with self.assertRaises(KeyError): + model.load_state_dict(state_dict, strict=False) + +if __name__ == "__main__": + unittest.main() + diff --git a/tests-unit/comfy_quant/test_quant_registry.py b/tests-unit/comfy_quant/test_quant_registry.py new file mode 100644 index 000000000..477811029 --- /dev/null +++ b/tests-unit/comfy_quant/test_quant_registry.py @@ -0,0 +1,190 @@ +import unittest +import torch +import sys +import os + +# Add comfy to path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) + +def has_gpu(): + return torch.cuda.is_available() + +from comfy.cli_args import args +if not has_gpu(): + args.cpu = True + +from comfy.quant_ops import QuantizedTensor, TensorCoreFP8Layout + + +class TestQuantizedTensor(unittest.TestCase): + """Test the QuantizedTensor subclass with FP8 layout""" + + def test_creation(self): + """Test creating a QuantizedTensor with TensorCoreFP8Layout""" + fp8_data = torch.randn(256, 128, dtype=torch.float32).to(torch.float8_e4m3fn) + scale = torch.tensor(2.0) + layout_params = {'scale': scale, 'orig_dtype': torch.bfloat16} + + qt = QuantizedTensor(fp8_data, TensorCoreFP8Layout, layout_params) + + self.assertIsInstance(qt, QuantizedTensor) + self.assertEqual(qt.shape, (256, 128)) + self.assertEqual(qt.dtype, torch.float8_e4m3fn) + self.assertEqual(qt._layout_params['scale'], scale) + self.assertEqual(qt._layout_params['orig_dtype'], torch.bfloat16) + self.assertEqual(qt._layout_type, TensorCoreFP8Layout) + + def test_dequantize(self): + """Test explicit dequantization""" + + fp8_data = torch.ones(10, 20, dtype=torch.float32).to(torch.float8_e4m3fn) + scale = torch.tensor(3.0) + layout_params = {'scale': scale, 'orig_dtype': torch.float32} + + qt = QuantizedTensor(fp8_data, TensorCoreFP8Layout, layout_params) + dequantized = qt.dequantize() + + self.assertEqual(dequantized.dtype, torch.float32) + self.assertTrue(torch.allclose(dequantized, torch.ones(10, 20) * 3.0, rtol=0.1)) + + def test_from_float(self): + """Test creating QuantizedTensor from float tensor""" + float_tensor = torch.randn(64, 32, dtype=torch.float32) + scale = torch.tensor(1.5) + + qt = QuantizedTensor.from_float( + float_tensor, + TensorCoreFP8Layout, + scale=scale, + dtype=torch.float8_e4m3fn + ) + + self.assertIsInstance(qt, QuantizedTensor) + self.assertEqual(qt.dtype, torch.float8_e4m3fn) + self.assertEqual(qt.shape, (64, 32)) + + # Verify dequantization gives approximately original values + dequantized = qt.dequantize() + mean_rel_error = ((dequantized - float_tensor).abs() / (float_tensor.abs() + 1e-6)).mean() + self.assertLess(mean_rel_error, 0.1) + + +class TestGenericUtilities(unittest.TestCase): + """Test generic utility operations""" + + def test_detach(self): + """Test detach operation on quantized tensor""" + fp8_data = torch.randn(10, 20, dtype=torch.float32).to(torch.float8_e4m3fn) + scale = torch.tensor(1.5) + layout_params = {'scale': scale, 'orig_dtype': torch.float32} + qt = QuantizedTensor(fp8_data, TensorCoreFP8Layout, layout_params) + + # Detach should return a new QuantizedTensor + qt_detached = qt.detach() + + self.assertIsInstance(qt_detached, QuantizedTensor) + self.assertEqual(qt_detached.shape, qt.shape) + self.assertEqual(qt_detached._layout_type, TensorCoreFP8Layout) + + def test_clone(self): + """Test clone operation on quantized tensor""" + fp8_data = torch.randn(10, 20, dtype=torch.float32).to(torch.float8_e4m3fn) + scale = torch.tensor(1.5) + layout_params = {'scale': scale, 'orig_dtype': torch.float32} + qt = QuantizedTensor(fp8_data, TensorCoreFP8Layout, layout_params) + + # Clone should return a new QuantizedTensor + qt_cloned = qt.clone() + + self.assertIsInstance(qt_cloned, QuantizedTensor) + self.assertEqual(qt_cloned.shape, qt.shape) + self.assertEqual(qt_cloned._layout_type, TensorCoreFP8Layout) + + # Verify it's a deep copy + self.assertIsNot(qt_cloned._qdata, qt._qdata) + + @unittest.skipUnless(has_gpu(), "GPU not available") + def test_to_device(self): + """Test device transfer""" + fp8_data = torch.randn(10, 20, dtype=torch.float32).to(torch.float8_e4m3fn) + scale = torch.tensor(1.5) + layout_params = {'scale': scale, 'orig_dtype': torch.float32} + qt = QuantizedTensor(fp8_data, TensorCoreFP8Layout, layout_params) + + # Moving to same device should work (CPU to CPU) + qt_cpu = qt.to('cpu') + + self.assertIsInstance(qt_cpu, QuantizedTensor) + self.assertEqual(qt_cpu.device.type, 'cpu') + self.assertEqual(qt_cpu._layout_params['scale'].device.type, 'cpu') + + +class TestTensorCoreFP8Layout(unittest.TestCase): + """Test the TensorCoreFP8Layout implementation""" + + def test_quantize(self): + """Test quantization method""" + float_tensor = torch.randn(32, 64, dtype=torch.float32) + scale = torch.tensor(1.5) + + qdata, layout_params = TensorCoreFP8Layout.quantize( + float_tensor, + scale=scale, + dtype=torch.float8_e4m3fn + ) + + self.assertEqual(qdata.dtype, torch.float8_e4m3fn) + self.assertEqual(qdata.shape, float_tensor.shape) + self.assertIn('scale', layout_params) + self.assertIn('orig_dtype', layout_params) + self.assertEqual(layout_params['orig_dtype'], torch.float32) + + def test_dequantize(self): + """Test dequantization method""" + float_tensor = torch.ones(10, 20, dtype=torch.float32) * 3.0 + scale = torch.tensor(1.0) + + qdata, layout_params = TensorCoreFP8Layout.quantize( + float_tensor, + scale=scale, + dtype=torch.float8_e4m3fn + ) + + dequantized = TensorCoreFP8Layout.dequantize(qdata, **layout_params) + + # Should approximately match original + self.assertTrue(torch.allclose(dequantized, float_tensor, rtol=0.1, atol=0.1)) + + +class TestFallbackMechanism(unittest.TestCase): + """Test fallback for unsupported operations""" + + def test_unsupported_op_dequantizes(self): + """Test that unsupported operations fall back to dequantization""" + # Set seed for reproducibility + torch.manual_seed(42) + + # Create quantized tensor + a_fp32 = torch.randn(10, 20, dtype=torch.float32) + scale = torch.tensor(1.0) + a_q = QuantizedTensor.from_float( + a_fp32, + TensorCoreFP8Layout, + scale=scale, + dtype=torch.float8_e4m3fn + ) + + # Call an operation that doesn't have a registered handler + # For example, torch.abs + result = torch.abs(a_q) + + # Should work via fallback (dequantize → abs → return) + self.assertNotIsInstance(result, QuantizedTensor) + expected = torch.abs(a_fp32) + # FP8 introduces quantization error, so use loose tolerance + mean_error = (result - expected).abs().mean() + self.assertLess(mean_error, 0.05, f"Mean error {mean_error:.4f} is too large") + + +if __name__ == "__main__": + unittest.main() From d202c2ba7404affd58a2199aeb514b3cc48e0ef3 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Wed, 29 Oct 2025 06:22:08 +1000 Subject: [PATCH 0807/1073] execution: Allow a subgraph nodes to execute multiple times (#10499) In the case of --cache-none lazy and subgraph execution can cause anything to be run multiple times per workflow. If that rerun nodes is in itself a subgraph generator, this will crash for two reasons. pending_subgraph_results[] does not cleanup entries after their use. So when a pending_subgraph_result is consumed, remove it from the list so that if the corresponding node is fully re-executed this misses lookup and it fall through to execute the node as it should. Secondly, theres is an explicit enforcement against dups in the addition of subgraphs nodes as ephemerals to the dymprompt. Remove this enforcement as the use case is now valid. --- execution.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/execution.py b/execution.py index b14bb14c7..20e106213 100644 --- a/execution.py +++ b/execution.py @@ -445,6 +445,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, resolved_outputs.append(tuple(resolved_output)) output_data = merge_result_data(resolved_outputs, class_def) output_ui = [] + del pending_subgraph_results[unique_id] has_subgraph = False else: get_progress_state().start_progress(unique_id) @@ -527,10 +528,6 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, if new_graph is None: cached_outputs.append((False, node_outputs)) else: - # Check for conflicts - for node_id in new_graph.keys(): - if dynprompt.has_node(node_id): - raise DuplicateNodeError(f"Attempt to add duplicate node {node_id}. Ensure node ids are unique and deterministic or use graph_utils.GraphBuilder.") for node_id, node_info in new_graph.items(): new_node_ids.append(node_id) display_id = node_info.get("override_display_id", unique_id) From 210f7a1ba580d57d817ca68346cb72b8d0a26ad2 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 28 Oct 2025 23:38:05 +0200 Subject: [PATCH 0808/1073] convert nodes_recraft.py to V3 schema (#10507) --- comfy_api_nodes/nodes_recraft.py | 1319 +++++++++++++----------------- 1 file changed, 585 insertions(+), 734 deletions(-) diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py index 8ee7e55c4..dee186cd6 100644 --- a/comfy_api_nodes/nodes_recraft.py +++ b/comfy_api_nodes/nodes_recraft.py @@ -1,82 +1,71 @@ -from __future__ import annotations -from inspect import cleandoc -from typing import Optional +from io import BytesIO +from typing import Optional, Union + +import aiohttp +import torch +from PIL import UnidentifiedImageError +from typing_extensions import override + from comfy.utils import ProgressBar -from comfy_extras.nodes_images import SVG # Added -from comfy.comfy_types.node_typing import IO +from comfy_api.latest import IO, ComfyExtension +from comfy_api_nodes.apinode_utils import ( + resize_mask_to_image, +) from comfy_api_nodes.apis.recraft_api import ( - RecraftImageGenerationRequest, - RecraftImageGenerationResponse, - RecraftImageSize, - RecraftModel, - RecraftStyle, - RecraftStyleV3, RecraftColor, RecraftColorChain, RecraftControls, + RecraftImageGenerationRequest, + RecraftImageGenerationResponse, + RecraftImageSize, RecraftIO, + RecraftModel, + RecraftStyle, + RecraftStyleV3, get_v3_substyles, ) -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.util import ( ApiEndpoint, - HttpMethod, - SynchronousOperation, - EmptyRequest, + bytesio_to_image_tensor, + download_url_as_bytesio, + sync_op, + tensor_to_bytesio, + validate_string, ) -from comfy_api_nodes.apinode_utils import ( - download_url_to_bytesio, - resize_mask_to_image, -) -from comfy_api_nodes.util import validate_string, tensor_to_bytesio, bytesio_to_image_tensor -from server import PromptServer - -import torch -from io import BytesIO -from PIL import UnidentifiedImageError -import aiohttp +from comfy_extras.nodes_images import SVG async def handle_recraft_file_request( + cls: type[IO.ComfyNode], image: torch.Tensor, path: str, - mask: torch.Tensor=None, - total_pixels=4096*4096, - timeout=1024, + mask: Optional[torch.Tensor] = None, + total_pixels: int = 4096 * 4096, + timeout: int = 1024, request=None, - auth_kwargs: dict[str,str] = None, ) -> list[BytesIO]: - """ - Handle sending common Recraft file-only request to get back file bytes. - """ - if request is None: - request = EmptyRequest() + """Handle sending common Recraft file-only request to get back file bytes.""" - files = { - 'image': tensor_to_bytesio(image, total_pixels=total_pixels).read() - } + files = {"image": tensor_to_bytesio(image, total_pixels=total_pixels).read()} if mask is not None: - files['mask'] = tensor_to_bytesio(mask, total_pixels=total_pixels).read() + files["mask"] = tensor_to_bytesio(mask, total_pixels=total_pixels).read() - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=path, - method=HttpMethod.POST, - request_model=type(request), - response_model=RecraftImageGenerationResponse, - ), - request=request, + response = await sync_op( + cls, + endpoint=ApiEndpoint(path=path, method="POST"), + response_model=RecraftImageGenerationResponse, + data=request if request else None, files=files, content_type="multipart/form-data", - auth_kwargs=auth_kwargs, multipart_parser=recraft_multipart_parser, + max_retries=1, ) - response: RecraftImageGenerationResponse = await operation.execute() all_bytesio = [] if response.image is not None: - all_bytesio.append(await download_url_to_bytesio(response.image.url, timeout=timeout)) + all_bytesio.append(await download_url_as_bytesio(response.image.url, timeout=timeout)) else: for data in response.data: - all_bytesio.append(await download_url_to_bytesio(data.url, timeout=timeout)) + all_bytesio.append(await download_url_as_bytesio(data.url, timeout=timeout)) return all_bytesio @@ -84,11 +73,11 @@ async def handle_recraft_file_request( def recraft_multipart_parser( data, parent_key=None, - formatter: callable = None, - converted_to_check: list[list] = None, + formatter: Optional[type[callable]] = None, + converted_to_check: Optional[list[list]] = None, is_list: bool = False, - return_mode: str = "formdata" # "dict" | "formdata" -) -> dict | aiohttp.FormData: + return_mode: str = "formdata", # "dict" | "formdata" +) -> Union[dict, aiohttp.FormData]: """ Formats data such that multipart/form-data will work with aiohttp library when both files and data are present. @@ -108,8 +97,8 @@ def recraft_multipart_parser( # Modification of a function that handled a different type of multipart parsing, big ups: # https://gist.github.com/kazqvaizer/4cebebe5db654a414132809f9f88067b - def handle_converted_lists(item, parent_key, lists_to_check=tuple[list]): - # if list already exists exists, just extend list with data + def handle_converted_lists(item, parent_key, lists_to_check=list[list]): + # if list already exists, just extend list with data for check_list in lists_to_check: for conv_tuple in check_list: if conv_tuple[0] == parent_key and isinstance(conv_tuple[1], list): @@ -125,7 +114,7 @@ def recraft_multipart_parser( formatter = lambda v: v # Multipart representation of value if not isinstance(data, dict): - # if list already exists exists, just extend list with data + # if list already exists, just extend list with data added = handle_converted_lists(data, parent_key, converted_to_check) if added: return {} @@ -146,7 +135,9 @@ def recraft_multipart_parser( elif isinstance(value, list): for ind, list_value in enumerate(value): iter_key = f"{current_key}[]" - converted.extend(recraft_multipart_parser(list_value, iter_key, formatter, next_check, is_list=True).items()) + converted.extend( + recraft_multipart_parser(list_value, iter_key, formatter, next_check, is_list=True).items() + ) else: converted.append((current_key, formatter(value))) @@ -166,6 +157,7 @@ class handle_recraft_image_output: """ Catch an exception related to receiving SVG data instead of image, when Infinite Style Library style_id is in use. """ + def __init__(self): pass @@ -174,243 +166,225 @@ class handle_recraft_image_output: def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None and exc_type is UnidentifiedImageError: - raise Exception("Received output data was not an image; likely an SVG. If you used style_id, make sure it is not a Vector art style.") + raise Exception( + "Received output data was not an image; likely an SVG. " + "If you used style_id, make sure it is not a Vector art style." + ) -class RecraftColorRGBNode: - """ - Create Recraft Color by choosing specific RGB values. - """ - - RETURN_TYPES = (RecraftIO.COLOR,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - RETURN_NAMES = ("recraft_color",) - FUNCTION = "create_color" - CATEGORY = "api node/image/Recraft" +class RecraftColorRGBNode(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftColorRGB", + display_name="Recraft Color RGB", + category="api node/image/Recraft", + description="Create Recraft Color by choosing specific RGB values.", + inputs=[ + IO.Int.Input("r", default=0, min=0, max=255, tooltip="Red value of color."), + IO.Int.Input("g", default=0, min=0, max=255, tooltip="Green value of color."), + IO.Int.Input("b", default=0, min=0, max=255, tooltip="Blue value of color."), + IO.Custom(RecraftIO.COLOR).Input("recraft_color", optional=True), + ], + outputs=[ + IO.Custom(RecraftIO.COLOR).Output(display_name="recraft_color"), + ], + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "r": (IO.INT, { - "default": 0, - "min": 0, - "max": 255, - "tooltip": "Red value of color." - }), - "g": (IO.INT, { - "default": 0, - "min": 0, - "max": 255, - "tooltip": "Green value of color." - }), - "b": (IO.INT, { - "default": 0, - "min": 0, - "max": 255, - "tooltip": "Blue value of color." - }), - }, - "optional": { - "recraft_color": (RecraftIO.COLOR,), - } - } - - def create_color(self, r: int, g: int, b: int, recraft_color: RecraftColorChain=None): + def execute(cls, r: int, g: int, b: int, recraft_color: RecraftColorChain = None) -> IO.NodeOutput: recraft_color = recraft_color.clone() if recraft_color else RecraftColorChain() recraft_color.add(RecraftColor(r, g, b)) - return (recraft_color, ) + return IO.NodeOutput(recraft_color) -class RecraftControlsNode: - """ - Create Recraft Controls for customizing Recraft generation. - """ - - RETURN_TYPES = (RecraftIO.CONTROLS,) - RETURN_NAMES = ("recraft_controls",) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "create_controls" - CATEGORY = "api node/image/Recraft" +class RecraftControlsNode(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftControls", + display_name="Recraft Controls", + category="api node/image/Recraft", + description="Create Recraft Controls for customizing Recraft generation.", + inputs=[ + IO.Custom(RecraftIO.COLOR).Input("colors", optional=True), + IO.Custom(RecraftIO.COLOR).Input("background_color", optional=True), + ], + outputs=[ + IO.Custom(RecraftIO.CONTROLS).Output(display_name="recraft_controls"), + ], + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - }, - "optional": { - "colors": (RecraftIO.COLOR,), - "background_color": (RecraftIO.COLOR,), - } - } - - def create_controls(self, colors: RecraftColorChain=None, background_color: RecraftColorChain=None): - return (RecraftControls(colors=colors, background_color=background_color), ) + def execute(cls, colors: RecraftColorChain = None, background_color: RecraftColorChain = None) -> IO.NodeOutput: + return IO.NodeOutput(RecraftControls(colors=colors, background_color=background_color)) -class RecraftStyleV3RealisticImageNode: - """ - Select realistic_image style and optional substyle. - """ - - RETURN_TYPES = (RecraftIO.STYLEV3,) - RETURN_NAMES = ("recraft_style",) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "create_style" - CATEGORY = "api node/image/Recraft" - +class RecraftStyleV3RealisticImageNode(IO.ComfyNode): RECRAFT_STYLE = RecraftStyleV3.realistic_image @classmethod - def INPUT_TYPES(s): - return { - "required": { - "substyle": (get_v3_substyles(s.RECRAFT_STYLE),), - } - } + def define_schema(cls): + return IO.Schema( + node_id="RecraftStyleV3RealisticImage", + display_name="Recraft Style - Realistic Image", + category="api node/image/Recraft", + description="Select realistic_image style and optional substyle.", + inputs=[ + IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)), + ], + outputs=[ + IO.Custom(RecraftIO.STYLEV3).Output(display_name="recraft_style"), + ], + ) - def create_style(self, substyle: str): + @classmethod + def execute(cls, substyle: str) -> IO.NodeOutput: if substyle == "None": substyle = None - return (RecraftStyle(self.RECRAFT_STYLE, substyle),) + return IO.NodeOutput(RecraftStyle(cls.RECRAFT_STYLE, substyle)) class RecraftStyleV3DigitalIllustrationNode(RecraftStyleV3RealisticImageNode): - """ - Select digital_illustration style and optional substyle. - """ - RECRAFT_STYLE = RecraftStyleV3.digital_illustration + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftStyleV3DigitalIllustration", + display_name="Recraft Style - Digital Illustration", + category="api node/image/Recraft", + description="Select realistic_image style and optional substyle.", + inputs=[ + IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)), + ], + outputs=[ + IO.Custom(RecraftIO.STYLEV3).Output(display_name="recraft_style"), + ], + ) + class RecraftStyleV3VectorIllustrationNode(RecraftStyleV3RealisticImageNode): - """ - Select vector_illustration style and optional substyle. - """ - RECRAFT_STYLE = RecraftStyleV3.vector_illustration + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftStyleV3VectorIllustrationNode", + display_name="Recraft Style - Realistic Image", + category="api node/image/Recraft", + description="Select realistic_image style and optional substyle.", + inputs=[ + IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE)), + ], + outputs=[ + IO.Custom(RecraftIO.STYLEV3).Output(display_name="recraft_style"), + ], + ) + class RecraftStyleV3LogoRasterNode(RecraftStyleV3RealisticImageNode): - """ - Select vector_illustration style and optional substyle. - """ - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "substyle": (get_v3_substyles(s.RECRAFT_STYLE, include_none=False),), - } - } - RECRAFT_STYLE = RecraftStyleV3.logo_raster + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftStyleV3LogoRaster", + display_name="Recraft Style - Logo Raster", + category="api node/image/Recraft", + description="Select realistic_image style and optional substyle.", + inputs=[ + IO.Combo.Input("substyle", options=get_v3_substyles(cls.RECRAFT_STYLE, include_none=False)), + ], + outputs=[ + IO.Custom(RecraftIO.STYLEV3).Output(display_name="recraft_style"), + ], + ) -class RecraftStyleInfiniteStyleLibrary: - """ - Select style based on preexisting UUID from Recraft's Infinite Style Library. - """ - RETURN_TYPES = (RecraftIO.STYLEV3,) - RETURN_NAMES = ("recraft_style",) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "create_style" - CATEGORY = "api node/image/Recraft" +class RecraftStyleInfiniteStyleLibrary(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftStyleV3InfiniteStyleLibrary", + display_name="Recraft Style - Infinite Style Library", + category="api node/image/Recraft", + description="Select style based on preexisting UUID from Recraft's Infinite Style Library.", + inputs=[ + IO.String.Input("style_id", default="", tooltip="UUID of style from Infinite Style Library."), + ], + outputs=[ + IO.Custom(RecraftIO.STYLEV3).Output(display_name="recraft_style"), + ], + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "style_id": (IO.STRING, { - "default": "", - "tooltip": "UUID of style from Infinite Style Library.", - }) - } - } - - def create_style(self, style_id: str): + def execute(cls, style_id: str) -> IO.NodeOutput: if not style_id: raise Exception("The style_id input cannot be empty.") - return (RecraftStyle(style_id=style_id),) + return IO.NodeOutput(RecraftStyle(style_id=style_id)) -class RecraftTextToImageNode: - """ - Generates images synchronously based on prompt and resolution. - """ - - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Recraft" +class RecraftTextToImageNode(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftTextToImageNode", + display_name="Recraft Text to Image", + category="api node/image/Recraft", + description="Generates images synchronously based on prompt and resolution.", + inputs=[ + IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."), + IO.Combo.Input( + "size", + options=[res.value for res in RecraftImageSize], + default=RecraftImageSize.res_1024x1024, + tooltip="The size of the generated image.", + ), + IO.Int.Input( + "n", + default=1, + min=1, + max=6, + tooltip="The number of images to generate.", + ), + IO.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="Seed to determine if node should re-run; " + "actual results are nondeterministic regardless of seed.", + ), + IO.Custom(RecraftIO.STYLEV3).Input("recraft_style", optional=True), + IO.String.Input( + "negative_prompt", + default="", + force_input=True, + tooltip="An optional text description of undesired elements on an image.", + optional=True, + ), + IO.Custom(RecraftIO.CONTROLS).Input( + "recraft_controls", + tooltip="Optional additional controls over the generation via the Recraft Controls node.", + optional=True, + ), + ], + outputs=[ + IO.Image.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation.", - }, - ), - "size": ( - [res.value for res in RecraftImageSize], - { - "default": RecraftImageSize.res_1024x1024, - "tooltip": "The size of the generated image.", - }, - ), - "n": ( - IO.INT, - { - "default": 1, - "min": 1, - "max": 6, - "tooltip": "The number of images to generate.", - }, - ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", - }, - ), - }, - "optional": { - "recraft_style": (RecraftIO.STYLEV3,), - "negative_prompt": ( - IO.STRING, - { - "default": "", - "forceInput": True, - "tooltip": "An optional text description of undesired elements on an image.", - }, - ), - "recraft_controls": ( - RecraftIO.CONTROLS, - { - "tooltip": "Optional additional controls over the generation via the Recraft Controls node." - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - async def api_call( - self, + async def execute( + cls, prompt: str, size: str, n: int, @@ -418,9 +392,7 @@ class RecraftTextToImageNode: recraft_style: RecraftStyle = None, negative_prompt: str = None, recraft_controls: RecraftControls = None, - unique_id: Optional[str] = None, - **kwargs, - ): + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False, max_length=1000) default_style = RecraftStyle(RecraftStyleV3.realistic_image) if recraft_style is None: @@ -433,14 +405,11 @@ class RecraftTextToImageNode: if not negative_prompt: negative_prompt = None - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/recraft/image_generation", - method=HttpMethod.POST, - request_model=RecraftImageGenerationRequest, - response_model=RecraftImageGenerationResponse, - ), - request=RecraftImageGenerationRequest( + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/recraft/image_generation", method="POST"), + response_model=RecraftImageGenerationResponse, + data=RecraftImageGenerationRequest( prompt=prompt, negative_prompt=negative_prompt, model=RecraftModel.recraftv3, @@ -451,109 +420,83 @@ class RecraftTextToImageNode: style_id=recraft_style.style_id, controls=controls_api, ), - auth_kwargs=kwargs, + max_retries=1, ) - response: RecraftImageGenerationResponse = await operation.execute() images = [] - urls = [] for data in response.data: with handle_recraft_image_output(): - if unique_id and data.url: - urls.append(data.url) - urls_string = '\n'.join(urls) - PromptServer.instance.send_progress_text( - f"Result URL: {urls_string}", unique_id - ) - image = bytesio_to_image_tensor( - await download_url_to_bytesio(data.url, timeout=1024) - ) + image = bytesio_to_image_tensor(await download_url_as_bytesio(data.url, timeout=1024)) if len(image.shape) < 4: image = image.unsqueeze(0) images.append(image) - output_image = torch.cat(images, dim=0) - return (output_image,) + return IO.NodeOutput(torch.cat(images, dim=0)) -class RecraftImageToImageNode: - """ - Modify image based on prompt and strength. - """ - - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Recraft" +class RecraftImageToImageNode(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftImageToImageNode", + display_name="Recraft Image to Image", + category="api node/image/Recraft", + description="Modify image based on prompt and strength.", + inputs=[ + IO.Image.Input("image"), + IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."), + IO.Int.Input( + "n", + default=1, + min=1, + max=6, + tooltip="The number of images to generate.", + ), + IO.Float.Input( + "strength", + default=0.5, + min=0.0, + max=1.0, + step=0.01, + tooltip="Defines the difference with the original image, should lie in [0, 1], " + "where 0 means almost identical, and 1 means miserable similarity.", + ), + IO.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="Seed to determine if node should re-run; " + "actual results are nondeterministic regardless of seed.", + ), + IO.Custom(RecraftIO.STYLEV3).Input("recraft_style", optional=True), + IO.String.Input( + "negative_prompt", + default="", + force_input=True, + tooltip="An optional text description of undesired elements on an image.", + optional=True, + ), + IO.Custom(RecraftIO.CONTROLS).Input( + "recraft_controls", + tooltip="Optional additional controls over the generation via the Recraft Controls node.", + optional=True, + ), + ], + outputs=[ + IO.Image.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE, ), - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation.", - }, - ), - "n": ( - IO.INT, - { - "default": 1, - "min": 1, - "max": 6, - "tooltip": "The number of images to generate.", - }, - ), - "strength": ( - IO.FLOAT, - { - "default": 0.5, - "min": 0.0, - "max": 1.0, - "step": 0.01, - "tooltip": "Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity." - } - ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", - }, - ), - }, - "optional": { - "recraft_style": (RecraftIO.STYLEV3,), - "negative_prompt": ( - IO.STRING, - { - "default": "", - "forceInput": True, - "tooltip": "An optional text description of undesired elements on an image.", - }, - ), - "recraft_controls": ( - RecraftIO.CONTROLS, - { - "tooltip": "Optional additional controls over the generation via the Recraft Controls node." - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } - - async def api_call( - self, + async def execute( + cls, image: torch.Tensor, prompt: str, n: int, @@ -562,8 +505,7 @@ class RecraftImageToImageNode: recraft_style: RecraftStyle = None, negative_prompt: str = None, recraft_controls: RecraftControls = None, - **kwargs, - ): + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False, max_length=1000) default_style = RecraftStyle(RecraftStyleV3.realistic_image) if recraft_style is None: @@ -593,83 +535,69 @@ class RecraftImageToImageNode: pbar = ProgressBar(total) for i in range(total): sub_bytes = await handle_recraft_file_request( + cls, image=image[i], path="/proxy/recraft/images/imageToImage", request=request, - auth_kwargs=kwargs, ) with handle_recraft_image_output(): images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) pbar.update(1) - images_tensor = torch.cat(images, dim=0) - return (images_tensor, ) + return IO.NodeOutput(torch.cat(images, dim=0)) -class RecraftImageInpaintingNode: - """ - Modify image based on prompt and mask. - """ - - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Recraft" +class RecraftImageInpaintingNode(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftImageInpaintingNode", + display_name="Recraft Image Inpainting", + category="api node/image/Recraft", + description="Modify image based on prompt and mask.", + inputs=[ + IO.Image.Input("image"), + IO.Mask.Input("mask"), + IO.String.Input("prompt", multiline=True, default="", tooltip="Prompt for the image generation."), + IO.Int.Input( + "n", + default=1, + min=1, + max=6, + tooltip="The number of images to generate.", + ), + IO.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="Seed to determine if node should re-run; " + "actual results are nondeterministic regardless of seed.", + ), + IO.Custom(RecraftIO.STYLEV3).Input("recraft_style", optional=True), + IO.String.Input( + "negative_prompt", + default="", + force_input=True, + tooltip="An optional text description of undesired elements on an image.", + optional=True, + ), + ], + outputs=[ + IO.Image.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE, ), - "mask": (IO.MASK, ), - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation.", - }, - ), - "n": ( - IO.INT, - { - "default": 1, - "min": 1, - "max": 6, - "tooltip": "The number of images to generate.", - }, - ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", - }, - ), - }, - "optional": { - "recraft_style": (RecraftIO.STYLEV3,), - "negative_prompt": ( - IO.STRING, - { - "default": "", - "forceInput": True, - "tooltip": "An optional text description of undesired elements on an image.", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } - - async def api_call( - self, + async def execute( + cls, image: torch.Tensor, mask: torch.Tensor, prompt: str, @@ -677,8 +605,7 @@ class RecraftImageInpaintingNode: seed, recraft_style: RecraftStyle = None, negative_prompt: str = None, - **kwargs, - ): + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False, max_length=1000) default_style = RecraftStyle(RecraftStyleV3.realistic_image) if recraft_style is None: @@ -705,96 +632,73 @@ class RecraftImageInpaintingNode: pbar = ProgressBar(total) for i in range(total): sub_bytes = await handle_recraft_file_request( + cls, image=image[i], - mask=mask[i:i+1], + mask=mask[i : i + 1], path="/proxy/recraft/images/inpaint", request=request, - auth_kwargs=kwargs, ) with handle_recraft_image_output(): images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) pbar.update(1) - images_tensor = torch.cat(images, dim=0) - return (images_tensor, ) + return IO.NodeOutput(torch.cat(images, dim=0)) -class RecraftTextToVectorNode: - """ - Generates SVG synchronously based on prompt and resolution. - """ - - RETURN_TYPES = ("SVG",) # Changed - DESCRIPTION = cleandoc(__doc__ or "") if 'cleandoc' in globals() else __doc__ # Keep cleandoc if other nodes use it - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Recraft" +class RecraftTextToVectorNode(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftTextToVectorNode", + display_name="Recraft Text to Vector", + category="api node/image/Recraft", + description="Generates SVG synchronously based on prompt and resolution.", + inputs=[ + IO.String.Input("prompt", default="", tooltip="Prompt for the image generation.", multiline=True), + IO.Combo.Input("substyle", options=get_v3_substyles(RecraftStyleV3.vector_illustration)), + IO.Combo.Input( + "size", + options=[res.value for res in RecraftImageSize], + default=RecraftImageSize.res_1024x1024, + tooltip="The size of the generated image.", + ), + IO.Int.Input("n", default=1, min=1, max=6, tooltip="The number of images to generate."), + IO.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="Seed to determine if node should re-run; " + "actual results are nondeterministic regardless of seed.", + ), + IO.String.Input( + "negative_prompt", + default="", + force_input=True, + tooltip="An optional text description of undesired elements on an image.", + optional=True, + ), + IO.Custom(RecraftIO.CONTROLS).Input( + "recraft_controls", + tooltip="Optional additional controls over the generation via the Recraft Controls node.", + optional=True, + ), + ], + outputs=[ + IO.SVG.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation.", - }, - ), - "substyle": (get_v3_substyles(RecraftStyleV3.vector_illustration),), - "size": ( - [res.value for res in RecraftImageSize], - { - "default": RecraftImageSize.res_1024x1024, - "tooltip": "The size of the generated image.", - }, - ), - "n": ( - IO.INT, - { - "default": 1, - "min": 1, - "max": 6, - "tooltip": "The number of images to generate.", - }, - ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", - }, - ), - }, - "optional": { - "negative_prompt": ( - IO.STRING, - { - "default": "", - "forceInput": True, - "tooltip": "An optional text description of undesired elements on an image.", - }, - ), - "recraft_controls": ( - RecraftIO.CONTROLS, - { - "tooltip": "Optional additional controls over the generation via the Recraft Controls node." - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - async def api_call( - self, + async def execute( + cls, prompt: str, substyle: str, size: str, @@ -802,9 +706,7 @@ class RecraftTextToVectorNode: seed, negative_prompt: str = None, recraft_controls: RecraftControls = None, - unique_id: Optional[str] = None, - **kwargs, - ): + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False, max_length=1000) # create RecraftStyle so strings will be formatted properly (i.e. "None" will become None) recraft_style = RecraftStyle(RecraftStyleV3.vector_illustration, substyle=substyle) @@ -816,14 +718,11 @@ class RecraftTextToVectorNode: if not negative_prompt: negative_prompt = None - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/recraft/image_generation", - method=HttpMethod.POST, - request_model=RecraftImageGenerationRequest, - response_model=RecraftImageGenerationResponse, - ), - request=RecraftImageGenerationRequest( + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/recraft/image_generation", method="POST"), + response_model=RecraftImageGenerationResponse, + data=RecraftImageGenerationRequest( prompt=prompt, negative_prompt=negative_prompt, model=RecraftModel.recraftv3, @@ -833,139 +732,105 @@ class RecraftTextToVectorNode: substyle=recraft_style.substyle, controls=controls_api, ), - auth_kwargs=kwargs, + max_retries=1, ) - response: RecraftImageGenerationResponse = await operation.execute() svg_data = [] - urls = [] for data in response.data: - if unique_id and data.url: - urls.append(data.url) - # Print result on each iteration in case of error - PromptServer.instance.send_progress_text( - f"Result URL: {' '.join(urls)}", unique_id - ) - svg_data.append(await download_url_to_bytesio(data.url, timeout=1024)) + svg_data.append(await download_url_as_bytesio(data.url, timeout=1024)) - return (SVG(svg_data),) + return IO.NodeOutput(SVG(svg_data)) -class RecraftVectorizeImageNode: - """ - Generates SVG synchronously from an input image. - """ - - RETURN_TYPES = ("SVG",) # Changed - DESCRIPTION = cleandoc(__doc__ or "") if 'cleandoc' in globals() else __doc__ # Keep cleandoc if other nodes use it - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Recraft" +class RecraftVectorizeImageNode(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftVectorizeImageNode", + display_name="Recraft Vectorize Image", + category="api node/image/Recraft", + description="Generates SVG synchronously from an input image.", + inputs=[ + IO.Image.Input("image"), + ], + outputs=[ + IO.SVG.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE, ), - }, - "optional": { - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } - - async def api_call( - self, - image: torch.Tensor, - **kwargs, - ): + async def execute(cls, image: torch.Tensor) -> IO.NodeOutput: svgs = [] total = image.shape[0] pbar = ProgressBar(total) for i in range(total): sub_bytes = await handle_recraft_file_request( + cls, image=image[i], path="/proxy/recraft/images/vectorize", - auth_kwargs=kwargs, ) svgs.append(SVG(sub_bytes)) pbar.update(1) - return (SVG.combine_all(svgs), ) + return IO.NodeOutput(SVG.combine_all(svgs)) -class RecraftReplaceBackgroundNode: - """ - Replace background on image, based on provided prompt. - """ - - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Recraft" +class RecraftReplaceBackgroundNode(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftReplaceBackgroundNode", + display_name="Recraft Replace Background", + category="api node/image/Recraft", + description="Replace background on image, based on provided prompt.", + inputs=[ + IO.Image.Input("image"), + IO.String.Input("prompt", tooltip="Prompt for the image generation.", default="", multiline=True), + IO.Int.Input("n", default=1, min=1, max=6, tooltip="The number of images to generate."), + IO.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="Seed to determine if node should re-run; " + "actual results are nondeterministic regardless of seed.", + ), + IO.Custom(RecraftIO.STYLEV3).Input("recraft_style", optional=True), + IO.String.Input( + "negative_prompt", + default="", + force_input=True, + tooltip="An optional text description of undesired elements on an image.", + optional=True, + ), + ], + outputs=[ + IO.Image.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE, ), - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Prompt for the image generation.", - }, - ), - "n": ( - IO.INT, - { - "default": 1, - "min": 1, - "max": 6, - "tooltip": "The number of images to generate.", - }, - ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "control_after_generate": True, - "tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", - }, - ), - }, - "optional": { - "recraft_style": (RecraftIO.STYLEV3,), - "negative_prompt": ( - IO.STRING, - { - "default": "", - "forceInput": True, - "tooltip": "An optional text description of undesired elements on an image.", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } - - async def api_call( - self, + async def execute( + cls, image: torch.Tensor, prompt: str, n: int, seed, recraft_style: RecraftStyle = None, negative_prompt: str = None, - **kwargs, - ): + ) -> IO.NodeOutput: default_style = RecraftStyle(RecraftStyleV3.realistic_image) if recraft_style is None: recraft_style = default_style @@ -988,165 +853,151 @@ class RecraftReplaceBackgroundNode: pbar = ProgressBar(total) for i in range(total): sub_bytes = await handle_recraft_file_request( + cls, image=image[i], path="/proxy/recraft/images/replaceBackground", request=request, - auth_kwargs=kwargs, ) images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) pbar.update(1) - images_tensor = torch.cat(images, dim=0) - return (images_tensor, ) + return IO.NodeOutput(torch.cat(images, dim=0)) -class RecraftRemoveBackgroundNode: - """ - Remove background from image, and return processed image and mask. - """ - - RETURN_TYPES = (IO.IMAGE, IO.MASK) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Recraft" +class RecraftRemoveBackgroundNode(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftRemoveBackgroundNode", + display_name="Recraft Remove Background", + category="api node/image/Recraft", + description="Remove background from image, and return processed image and mask.", + inputs=[ + IO.Image.Input("image"), + ], + outputs=[ + IO.Image.Output(), + IO.Mask.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE, ), - }, - "optional": { - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } - - async def api_call( - self, - image: torch.Tensor, - **kwargs, - ): + async def execute(cls, image: torch.Tensor) -> IO.NodeOutput: images = [] total = image.shape[0] pbar = ProgressBar(total) for i in range(total): sub_bytes = await handle_recraft_file_request( + cls, image=image[i], path="/proxy/recraft/images/removeBackground", - auth_kwargs=kwargs, ) images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) pbar.update(1) images_tensor = torch.cat(images, dim=0) # use alpha channel as masks, in B,H,W format - masks_tensor = images_tensor[:,:,:,-1:].squeeze(-1) - return (images_tensor, masks_tensor) + masks_tensor = images_tensor[:, :, :, -1:].squeeze(-1) + return IO.NodeOutput(images_tensor, masks_tensor) -class RecraftCrispUpscaleNode: - """ - Upscale image synchronously. - Enhances a given raster image using ‘crisp upscale’ tool, increasing image resolution, making the image sharper and cleaner. - """ - - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Recraft" - +class RecraftCrispUpscaleNode(IO.ComfyNode): RECRAFT_PATH = "/proxy/recraft/images/crispUpscale" @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": (IO.IMAGE, ), - }, - "optional": { - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - }, - } + def define_schema(cls): + return IO.Schema( + node_id="RecraftCrispUpscaleNode", + display_name="Recraft Crisp Upscale Image", + category="api node/image/Recraft", + description="Upscale image synchronously.\n" + "Enhances a given raster image using ‘crisp upscale’ tool, " + "increasing image resolution, making the image sharper and cleaner.", + inputs=[ + IO.Image.Input("image"), + ], + outputs=[ + IO.Image.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) - async def api_call( - self, - image: torch.Tensor, - **kwargs, - ): + @classmethod + async def execute(cls, image: torch.Tensor) -> IO.NodeOutput: images = [] total = image.shape[0] pbar = ProgressBar(total) for i in range(total): sub_bytes = await handle_recraft_file_request( + cls, image=image[i], - path=self.RECRAFT_PATH, - auth_kwargs=kwargs, + path=cls.RECRAFT_PATH, ) images.append(torch.cat([bytesio_to_image_tensor(x) for x in sub_bytes], dim=0)) pbar.update(1) - images_tensor = torch.cat(images, dim=0) - return (images_tensor,) + return IO.NodeOutput(torch.cat(images, dim=0)) class RecraftCreativeUpscaleNode(RecraftCrispUpscaleNode): - """ - Upscale image synchronously. - Enhances a given raster image using ‘creative upscale’ tool, boosting resolution with a focus on refining small details and faces. - """ - - RETURN_TYPES = (IO.IMAGE,) - DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value - FUNCTION = "api_call" - API_NODE = True - CATEGORY = "api node/image/Recraft" - RECRAFT_PATH = "/proxy/recraft/images/creativeUpscale" + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RecraftCreativeUpscaleNode", + display_name="Recraft Creative Upscale Image", + category="api node/image/Recraft", + description="Upscale image synchronously.\n" + "Enhances a given raster image using ‘creative upscale’ tool, " + "boosting resolution with a focus on refining small details and faces.", + inputs=[ + IO.Image.Input("image"), + ], + outputs=[ + IO.Image.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) -# A dictionary that contains all nodes you want to export with their names -# NOTE: names should be globally unique -NODE_CLASS_MAPPINGS = { - "RecraftTextToImageNode": RecraftTextToImageNode, - "RecraftImageToImageNode": RecraftImageToImageNode, - "RecraftImageInpaintingNode": RecraftImageInpaintingNode, - "RecraftTextToVectorNode": RecraftTextToVectorNode, - "RecraftVectorizeImageNode": RecraftVectorizeImageNode, - "RecraftRemoveBackgroundNode": RecraftRemoveBackgroundNode, - "RecraftReplaceBackgroundNode": RecraftReplaceBackgroundNode, - "RecraftCrispUpscaleNode": RecraftCrispUpscaleNode, - "RecraftCreativeUpscaleNode": RecraftCreativeUpscaleNode, - "RecraftStyleV3RealisticImage": RecraftStyleV3RealisticImageNode, - "RecraftStyleV3DigitalIllustration": RecraftStyleV3DigitalIllustrationNode, - "RecraftStyleV3LogoRaster": RecraftStyleV3LogoRasterNode, - "RecraftStyleV3InfiniteStyleLibrary": RecraftStyleInfiniteStyleLibrary, - "RecraftColorRGB": RecraftColorRGBNode, - "RecraftControls": RecraftControlsNode, -} -# A dictionary that contains the friendly/humanly readable titles for the nodes -NODE_DISPLAY_NAME_MAPPINGS = { - "RecraftTextToImageNode": "Recraft Text to Image", - "RecraftImageToImageNode": "Recraft Image to Image", - "RecraftImageInpaintingNode": "Recraft Image Inpainting", - "RecraftTextToVectorNode": "Recraft Text to Vector", - "RecraftVectorizeImageNode": "Recraft Vectorize Image", - "RecraftRemoveBackgroundNode": "Recraft Remove Background", - "RecraftReplaceBackgroundNode": "Recraft Replace Background", - "RecraftCrispUpscaleNode": "Recraft Crisp Upscale Image", - "RecraftCreativeUpscaleNode": "Recraft Creative Upscale Image", - "RecraftStyleV3RealisticImage": "Recraft Style - Realistic Image", - "RecraftStyleV3DigitalIllustration": "Recraft Style - Digital Illustration", - "RecraftStyleV3LogoRaster": "Recraft Style - Logo Raster", - "RecraftStyleV3InfiniteStyleLibrary": "Recraft Style - Infinite Style Library", - "RecraftColorRGB": "Recraft Color RGB", - "RecraftControls": "Recraft Controls", -} +class RecraftExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + RecraftTextToImageNode, + RecraftImageToImageNode, + RecraftImageInpaintingNode, + RecraftTextToVectorNode, + RecraftVectorizeImageNode, + RecraftRemoveBackgroundNode, + RecraftReplaceBackgroundNode, + RecraftCrispUpscaleNode, + RecraftCreativeUpscaleNode, + RecraftStyleV3RealisticImageNode, + RecraftStyleV3DigitalIllustrationNode, + RecraftStyleV3LogoRasterNode, + RecraftStyleInfiniteStyleLibrary, + RecraftColorRGBNode, + RecraftControlsNode, + ] + + +async def comfy_entrypoint() -> RecraftExtension: + return RecraftExtension() From 3fa7a5c04ae69ad168a875e8d3d453783d60899d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 28 Oct 2025 21:21:01 -0700 Subject: [PATCH 0809/1073] Speed up offloading using pinned memory. (#10526) To enable this feature use: --fast pinned_memory --- comfy/cli_args.py | 1 + comfy/model_management.py | 30 ++++++++++++++++++++++++++++++ comfy/model_patcher.py | 26 +++++++++++++++++++++++++- 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index cc1f12482..001abd843 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -144,6 +144,7 @@ class PerformanceFeature(enum.Enum): Fp8MatrixMultiplication = "fp8_matrix_mult" CublasOps = "cublas_ops" AutoTune = "autotune" + PinnedMem = "pinned_memory" parser.add_argument("--fast", nargs="*", type=PerformanceFeature, help="Enable some untested and potentially quality deteriorating optimizations. --fast with no arguments enables everything. You can pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: {}".format(" ".join(map(lambda c: c.value, PerformanceFeature)))) diff --git a/comfy/model_management.py b/comfy/model_management.py index afe78f36e..3e5b977d4 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1080,6 +1080,36 @@ def cast_to_device(tensor, device, dtype, copy=False): non_blocking = device_supports_non_blocking(device) return cast_to(tensor, dtype=dtype, device=device, non_blocking=non_blocking, copy=copy) +def pin_memory(tensor): + if PerformanceFeature.PinnedMem not in args.fast: + return False + + if not is_nvidia(): + return False + + if not is_device_cpu(tensor.device): + return False + + if torch.cuda.cudart().cudaHostRegister(tensor.data_ptr(), tensor.numel() * tensor.element_size(), 1) == 0: + return True + + return False + +def unpin_memory(tensor): + if PerformanceFeature.PinnedMem not in args.fast: + return False + + if not is_nvidia(): + return False + + if not is_device_cpu(tensor.device): + return False + + if torch.cuda.cudart().cudaHostUnregister(tensor.data_ptr()) == 0: + return True + + return False + def sage_attention_enabled(): return args.use_sage_attention diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index c0b68fb8c..aec73349c 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -238,6 +238,7 @@ class ModelPatcher: self.force_cast_weights = False self.patches_uuid = uuid.uuid4() self.parent = None + self.pinned = set() self.attachments: dict[str] = {} self.additional_models: dict[str, list[ModelPatcher]] = {} @@ -618,6 +619,21 @@ class ModelPatcher: else: set_func(out_weight, inplace_update=inplace_update, seed=string_to_seed(key)) + def pin_weight_to_device(self, key): + weight, set_func, convert_func = get_key_weight(self.model, key) + if comfy.model_management.pin_memory(weight): + self.pinned.add(key) + + def unpin_weight(self, key): + if key in self.pinned: + weight, set_func, convert_func = get_key_weight(self.model, key) + comfy.model_management.unpin_memory(weight) + self.pinned.remove(key) + + def unpin_all_weights(self): + for key in list(self.pinned): + self.unpin_weight(key) + def _load_list(self): loading = [] for n, m in self.model.named_modules(): @@ -683,6 +699,8 @@ class ModelPatcher: patch_counter += 1 cast_weight = True + for param in params: + self.pin_weight_to_device("{}.{}".format(n, param)) else: if hasattr(m, "comfy_cast_weights"): wipe_lowvram_weight(m) @@ -713,7 +731,9 @@ class ModelPatcher: continue for param in params: - self.patch_weight_to_device("{}.{}".format(n, param), device_to=device_to) + key = "{}.{}".format(n, param) + self.unpin_weight(key) + self.patch_weight_to_device(key, device_to=device_to) logging.debug("lowvram: loaded module regularly {} {}".format(n, m)) m.comfy_patched_weights = True @@ -762,6 +782,7 @@ class ModelPatcher: self.eject_model() if unpatch_weights: self.unpatch_hooks() + self.unpin_all_weights() if self.model.model_lowvram: for m in self.model.modules(): move_weight_functions(m, device_to) @@ -857,6 +878,9 @@ class ModelPatcher: memory_freed += module_mem logging.debug("freed {}".format(n)) + for param in params: + self.pin_weight_to_device("{}.{}".format(n, param)) + self.model.model_lowvram = True self.model.lowvram_patch_counter += patch_counter self.model.model_loaded_weight_memory -= memory_freed From e525673f7201b6c49af0fa0e6baf44e4e98bb10c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 28 Oct 2025 21:37:00 -0700 Subject: [PATCH 0810/1073] Fix issue. (#10527) --- comfy/sd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd.py b/comfy/sd.py index 6411bb27d..de4eee96e 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -1330,7 +1330,7 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None): else: unet_dtype = dtype - if hasattr(model_config, "layer_quant_config"): + if model_config.layer_quant_config is not None: manual_cast_dtype = model_management.unet_manual_cast(None, load_device, model_config.supported_inference_dtypes) else: manual_cast_dtype = model_management.unet_manual_cast(unet_dtype, load_device, model_config.supported_inference_dtypes) From 6c14f3afac0ea28dba24fe8783e7c1f09c03b31f Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 29 Oct 2025 20:14:56 +0200 Subject: [PATCH 0811/1073] use new API client in Luma and Minimax nodes (#10528) --- comfy_api_nodes/apinode_utils.py | 81 ------ comfy_api_nodes/apis/minimax_api.py | 120 ++++++++ comfy_api_nodes/nodes_ideogram.py | 2 +- comfy_api_nodes/nodes_luma.py | 354 ++++++----------------- comfy_api_nodes/nodes_minimax.py | 237 +++++---------- comfy_api_nodes/util/client.py | 2 +- comfy_api_nodes/util/download_helpers.py | 3 +- 7 files changed, 283 insertions(+), 516 deletions(-) create mode 100644 comfy_api_nodes/apis/minimax_api.py diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index 4182c8f80..6a72b9d1d 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -3,14 +3,6 @@ import aiohttp import mimetypes from typing import Optional, Union from comfy.utils import common_upscale -from comfy_api_nodes.apis.client import ( - ApiClient, - ApiEndpoint, - HttpMethod, - SynchronousOperation, - UploadRequest, - UploadResponse, -) from server import PromptServer from comfy.cli_args import args @@ -19,7 +11,6 @@ from PIL import Image import torch import math import base64 -from .util import tensor_to_bytesio, bytesio_to_image_tensor from io import BytesIO @@ -148,11 +139,6 @@ async def download_url_to_bytesio( return BytesIO(await resp.read()) -def process_image_response(response_content: bytes | str) -> torch.Tensor: - """Uses content from a Response object and converts it to a torch.Tensor""" - return bytesio_to_image_tensor(BytesIO(response_content)) - - def text_filepath_to_base64_string(filepath: str) -> str: """Converts a text file to a base64 string.""" with open(filepath, "rb") as f: @@ -169,73 +155,6 @@ def text_filepath_to_data_uri(filepath: str) -> str: return f"data:{mime_type};base64,{base64_string}" -async def upload_file_to_comfyapi( - file_bytes_io: BytesIO, - filename: str, - upload_mime_type: Optional[str], - auth_kwargs: Optional[dict[str, str]] = None, -) -> str: - """ - Uploads a single file to ComfyUI API and returns its download URL. - - Args: - file_bytes_io: BytesIO object containing the file data. - filename: The filename of the file. - upload_mime_type: MIME type of the file. - auth_kwargs: Optional authentication token(s). - - Returns: - The download URL for the uploaded file. - """ - if upload_mime_type is None: - request_object = UploadRequest(file_name=filename) - else: - request_object = UploadRequest(file_name=filename, content_type=upload_mime_type) - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/customers/storage", - method=HttpMethod.POST, - request_model=UploadRequest, - response_model=UploadResponse, - ), - request=request_object, - auth_kwargs=auth_kwargs, - ) - - response: UploadResponse = await operation.execute() - await ApiClient.upload_file(response.upload_url, file_bytes_io, content_type=upload_mime_type) - return response.download_url - - -async def upload_images_to_comfyapi( - image: torch.Tensor, - max_images=8, - auth_kwargs: Optional[dict[str, str]] = None, - mime_type: Optional[str] = None, -) -> list[str]: - """ - Uploads images to ComfyUI API and returns download URLs. - To upload multiple images, stack them in the batch dimension first. - - Args: - image: Input torch.Tensor image. - max_images: Maximum number of images to upload. - auth_kwargs: Optional authentication token(s). - mime_type: Optional MIME type for the image. - """ - # if batch, try to upload each file if max_images is greater than 0 - download_urls: list[str] = [] - is_batch = len(image.shape) > 3 - batch_len = image.shape[0] if is_batch else 1 - - for idx in range(min(batch_len, max_images)): - tensor = image[idx] if is_batch else image - img_io = tensor_to_bytesio(tensor, mime_type=mime_type) - url = await upload_file_to_comfyapi(img_io, img_io.name, mime_type, auth_kwargs) - download_urls.append(url) - return download_urls - - def resize_mask_to_image( mask: torch.Tensor, image: torch.Tensor, diff --git a/comfy_api_nodes/apis/minimax_api.py b/comfy_api_nodes/apis/minimax_api.py new file mode 100644 index 000000000..d747e177a --- /dev/null +++ b/comfy_api_nodes/apis/minimax_api.py @@ -0,0 +1,120 @@ +from enum import Enum +from typing import Optional + +from pydantic import BaseModel, Field + + +class MinimaxBaseResponse(BaseModel): + status_code: int = Field( + ..., + description='Status code. 0 indicates success, other values indicate errors.', + ) + status_msg: str = Field( + ..., description='Specific error details or success message.' + ) + + +class File(BaseModel): + bytes: Optional[int] = Field(None, description='File size in bytes') + created_at: Optional[int] = Field( + None, description='Unix timestamp when the file was created, in seconds' + ) + download_url: Optional[str] = Field( + None, description='The URL to download the video' + ) + backup_download_url: Optional[str] = Field( + None, description='The backup URL to download the video' + ) + + file_id: Optional[int] = Field(None, description='Unique identifier for the file') + filename: Optional[str] = Field(None, description='The name of the file') + purpose: Optional[str] = Field(None, description='The purpose of using the file') + + +class MinimaxFileRetrieveResponse(BaseModel): + base_resp: MinimaxBaseResponse + file: File + + +class MiniMaxModel(str, Enum): + T2V_01_Director = 'T2V-01-Director' + I2V_01_Director = 'I2V-01-Director' + S2V_01 = 'S2V-01' + I2V_01 = 'I2V-01' + I2V_01_live = 'I2V-01-live' + T2V_01 = 'T2V-01' + Hailuo_02 = 'MiniMax-Hailuo-02' + + +class Status6(str, Enum): + Queueing = 'Queueing' + Preparing = 'Preparing' + Processing = 'Processing' + Success = 'Success' + Fail = 'Fail' + + +class MinimaxTaskResultResponse(BaseModel): + base_resp: MinimaxBaseResponse + file_id: Optional[str] = Field( + None, + description='After the task status changes to Success, this field returns the file ID corresponding to the generated video.', + ) + status: Status6 = Field( + ..., + description="Task status: 'Queueing' (in queue), 'Preparing' (task is preparing), 'Processing' (generating), 'Success' (task completed successfully), or 'Fail' (task failed).", + ) + task_id: str = Field(..., description='The task ID being queried.') + + +class SubjectReferenceItem(BaseModel): + image: Optional[str] = Field( + None, description='URL or base64 encoding of the subject reference image.' + ) + mask: Optional[str] = Field( + None, + description='URL or base64 encoding of the mask for the subject reference image.', + ) + + +class MinimaxVideoGenerationRequest(BaseModel): + callback_url: Optional[str] = Field( + None, + description='Optional. URL to receive real-time status updates about the video generation task.', + ) + first_frame_image: Optional[str] = Field( + None, + description='URL or base64 encoding of the first frame image. Required when model is I2V-01, I2V-01-Director, or I2V-01-live.', + ) + model: MiniMaxModel = Field( + ..., + description='Required. ID of model. Options: T2V-01-Director, I2V-01-Director, S2V-01, I2V-01, I2V-01-live, T2V-01', + ) + prompt: Optional[str] = Field( + None, + description='Description of the video. Should be less than 2000 characters. Supports camera movement instructions in [brackets].', + max_length=2000, + ) + prompt_optimizer: Optional[bool] = Field( + True, + description='If true (default), the model will automatically optimize the prompt. Set to false for more precise control.', + ) + subject_reference: Optional[list[SubjectReferenceItem]] = Field( + None, + description='Only available when model is S2V-01. The model will generate a video based on the subject uploaded through this parameter.', + ) + duration: Optional[int] = Field( + None, + description="The length of the output video in seconds." + ) + resolution: Optional[str] = Field( + None, + description="The dimensions of the video display. 1080p corresponds to 1920 x 1080 pixels, 768p corresponds to 1366 x 768 pixels." + ) + + +class MinimaxVideoGenerationResponse(BaseModel): + base_resp: MinimaxBaseResponse + task_id: str = Field( + ..., description='The task ID for the asynchronous video generation task.' + ) diff --git a/comfy_api_nodes/nodes_ideogram.py b/comfy_api_nodes/nodes_ideogram.py index 9eae5f11a..d8fd3378b 100644 --- a/comfy_api_nodes/nodes_ideogram.py +++ b/comfy_api_nodes/nodes_ideogram.py @@ -20,9 +20,9 @@ from comfy_api_nodes.apis.client import ( from comfy_api_nodes.apinode_utils import ( download_url_to_bytesio, - bytesio_to_image_tensor, resize_mask_to_image, ) +from comfy_api_nodes.util import bytesio_to_image_tensor from server import PromptServer V1_V1_RES_MAP = { diff --git a/comfy_api_nodes/nodes_luma.py b/comfy_api_nodes/nodes_luma.py index e74441e5e..894f2b08c 100644 --- a/comfy_api_nodes/nodes_luma.py +++ b/comfy_api_nodes/nodes_luma.py @@ -1,69 +1,51 @@ -from __future__ import annotations -from inspect import cleandoc from typing import Optional + +import torch from typing_extensions import override -from comfy_api.latest import ComfyExtension, IO -from comfy_api.input_impl.video_types import VideoFromFile + +from comfy_api.latest import IO, ComfyExtension from comfy_api_nodes.apis.luma_api import ( - LumaImageModel, - LumaVideoModel, - LumaVideoOutputResolution, - LumaVideoModelOutputDuration, LumaAspectRatio, - LumaState, - LumaImageGenerationRequest, - LumaGenerationRequest, - LumaGeneration, LumaCharacterRef, - LumaModifyImageRef, + LumaConceptChain, + LumaGeneration, + LumaGenerationRequest, + LumaImageGenerationRequest, LumaImageIdentity, + LumaImageModel, + LumaImageReference, + LumaIO, + LumaKeyframes, + LumaModifyImageRef, LumaReference, LumaReferenceChain, - LumaImageReference, - LumaKeyframes, - LumaConceptChain, - LumaIO, + LumaVideoModel, + LumaVideoModelOutputDuration, + LumaVideoOutputResolution, get_luma_concepts, ) -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.util import ( ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, - EmptyRequest, -) -from comfy_api_nodes.apinode_utils import ( + download_url_to_image_tensor, + download_url_to_video_output, + poll_op, + sync_op, upload_images_to_comfyapi, - process_image_response, + validate_string, ) -from server import PromptServer -from comfy_api_nodes.util import validate_string - -import aiohttp -import torch -from io import BytesIO LUMA_T2V_AVERAGE_DURATION = 105 LUMA_I2V_AVERAGE_DURATION = 100 -def image_result_url_extractor(response: LumaGeneration): - return response.assets.image if hasattr(response, "assets") and hasattr(response.assets, "image") else None - -def video_result_url_extractor(response: LumaGeneration): - return response.assets.video if hasattr(response, "assets") and hasattr(response.assets, "video") else None class LumaReferenceNode(IO.ComfyNode): - """ - Holds an image and weight for use with Luma Generate Image node. - """ - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="LumaReferenceNode", display_name="Luma Reference", category="api node/image/Luma", - description=cleandoc(cls.__doc__ or ""), + description="Holds an image and weight for use with Luma Generate Image node.", inputs=[ IO.Image.Input( "image", @@ -83,17 +65,10 @@ class LumaReferenceNode(IO.ComfyNode): ), ], outputs=[IO.Custom(LumaIO.LUMA_REF).Output(display_name="luma_ref")], - hidden=[ - IO.Hidden.auth_token_comfy_org, - IO.Hidden.api_key_comfy_org, - IO.Hidden.unique_id, - ], ) @classmethod - def execute( - cls, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None - ) -> IO.NodeOutput: + def execute(cls, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None) -> IO.NodeOutput: if luma_ref is not None: luma_ref = luma_ref.clone() else: @@ -103,17 +78,13 @@ class LumaReferenceNode(IO.ComfyNode): class LumaConceptsNode(IO.ComfyNode): - """ - Holds one or more Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes. - """ - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="LumaConceptsNode", display_name="Luma Concepts", category="api node/video/Luma", - description=cleandoc(cls.__doc__ or ""), + description="Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.", inputs=[ IO.Combo.Input( "concept1", @@ -138,11 +109,6 @@ class LumaConceptsNode(IO.ComfyNode): ), ], outputs=[IO.Custom(LumaIO.LUMA_CONCEPTS).Output(display_name="luma_concepts")], - hidden=[ - IO.Hidden.auth_token_comfy_org, - IO.Hidden.api_key_comfy_org, - IO.Hidden.unique_id, - ], ) @classmethod @@ -161,17 +127,13 @@ class LumaConceptsNode(IO.ComfyNode): class LumaImageGenerationNode(IO.ComfyNode): - """ - Generates images synchronously based on prompt and aspect ratio. - """ - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="LumaImageNode", display_name="Luma Text to Image", category="api node/image/Luma", - description=cleandoc(cls.__doc__ or ""), + description="Generates images synchronously based on prompt and aspect ratio.", inputs=[ IO.String.Input( "prompt", @@ -237,45 +199,30 @@ class LumaImageGenerationNode(IO.ComfyNode): aspect_ratio: str, seed, style_image_weight: float, - image_luma_ref: LumaReferenceChain = None, - style_image: torch.Tensor = None, - character_image: torch.Tensor = None, + image_luma_ref: Optional[LumaReferenceChain] = None, + style_image: Optional[torch.Tensor] = None, + character_image: Optional[torch.Tensor] = None, ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=3) - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } # handle image_luma_ref api_image_ref = None if image_luma_ref is not None: - api_image_ref = await cls._convert_luma_refs( - image_luma_ref, max_refs=4, auth_kwargs=auth_kwargs, - ) + api_image_ref = await cls._convert_luma_refs(image_luma_ref, max_refs=4) # handle style_luma_ref api_style_ref = None if style_image is not None: - api_style_ref = await cls._convert_style_image( - style_image, weight=style_image_weight, auth_kwargs=auth_kwargs, - ) + api_style_ref = await cls._convert_style_image(style_image, weight=style_image_weight) # handle character_ref images character_ref = None if character_image is not None: - download_urls = await upload_images_to_comfyapi( - character_image, max_images=4, auth_kwargs=auth_kwargs, - ) - character_ref = LumaCharacterRef( - identity0=LumaImageIdentity(images=download_urls) - ) + download_urls = await upload_images_to_comfyapi(cls, character_image, max_images=4) + character_ref = LumaCharacterRef(identity0=LumaImageIdentity(images=download_urls)) - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/luma/generations/image", - method=HttpMethod.POST, - request_model=LumaImageGenerationRequest, - response_model=LumaGeneration, - ), - request=LumaImageGenerationRequest( + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/luma/generations/image", method="POST"), + response_model=LumaGeneration, + data=LumaImageGenerationRequest( prompt=prompt, model=model, aspect_ratio=aspect_ratio, @@ -283,41 +230,21 @@ class LumaImageGenerationNode(IO.ComfyNode): style_ref=api_style_ref, character_ref=character_ref, ), - auth_kwargs=auth_kwargs, ) - response_api: LumaGeneration = await operation.execute() - - operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"/proxy/luma/generations/{response_api.id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=LumaGeneration, - ), - completed_statuses=[LumaState.completed], - failed_statuses=[LumaState.failed], + response_poll = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/luma/generations/{response_api.id}"), + response_model=LumaGeneration, status_extractor=lambda x: x.state, - result_url_extractor=image_result_url_extractor, - node_id=cls.hidden.unique_id, - auth_kwargs=auth_kwargs, ) - response_poll = await operation.execute() - - async with aiohttp.ClientSession() as session: - async with session.get(response_poll.assets.image) as img_response: - img = process_image_response(await img_response.content.read()) - return IO.NodeOutput(img) + return IO.NodeOutput(await download_url_to_image_tensor(response_poll.assets.image)) @classmethod - async def _convert_luma_refs( - cls, luma_ref: LumaReferenceChain, max_refs: int, auth_kwargs: Optional[dict[str,str]] = None - ): + async def _convert_luma_refs(cls, luma_ref: LumaReferenceChain, max_refs: int): luma_urls = [] ref_count = 0 for ref in luma_ref.refs: - download_urls = await upload_images_to_comfyapi( - ref.image, max_images=1, auth_kwargs=auth_kwargs - ) + download_urls = await upload_images_to_comfyapi(cls, ref.image, max_images=1) luma_urls.append(download_urls[0]) ref_count += 1 if ref_count >= max_refs: @@ -325,27 +252,19 @@ class LumaImageGenerationNode(IO.ComfyNode): return luma_ref.create_api_model(download_urls=luma_urls, max_refs=max_refs) @classmethod - async def _convert_style_image( - cls, style_image: torch.Tensor, weight: float, auth_kwargs: Optional[dict[str,str]] = None - ): - chain = LumaReferenceChain( - first_ref=LumaReference(image=style_image, weight=weight) - ) - return await cls._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs) + async def _convert_style_image(cls, style_image: torch.Tensor, weight: float): + chain = LumaReferenceChain(first_ref=LumaReference(image=style_image, weight=weight)) + return await cls._convert_luma_refs(chain, max_refs=1) class LumaImageModifyNode(IO.ComfyNode): - """ - Modifies images synchronously based on prompt and aspect ratio. - """ - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="LumaImageModifyNode", display_name="Luma Image to Image", category="api node/image/Luma", - description=cleandoc(cls.__doc__ or ""), + description="Modifies images synchronously based on prompt and aspect ratio.", inputs=[ IO.Image.Input( "image", @@ -395,68 +314,37 @@ class LumaImageModifyNode(IO.ComfyNode): image_weight: float, seed, ) -> IO.NodeOutput: - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - # first, upload image - download_urls = await upload_images_to_comfyapi( - image, max_images=1, auth_kwargs=auth_kwargs, - ) + download_urls = await upload_images_to_comfyapi(cls, image, max_images=1) image_url = download_urls[0] - # next, make Luma call with download url provided - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/luma/generations/image", - method=HttpMethod.POST, - request_model=LumaImageGenerationRequest, - response_model=LumaGeneration, - ), - request=LumaImageGenerationRequest( + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/luma/generations/image", method="POST"), + response_model=LumaGeneration, + data=LumaImageGenerationRequest( prompt=prompt, model=model, modify_image_ref=LumaModifyImageRef( - url=image_url, weight=round(max(min(1.0-image_weight, 0.98), 0.0), 2) + url=image_url, weight=round(max(min(1.0 - image_weight, 0.98), 0.0), 2) ), ), - auth_kwargs=auth_kwargs, ) - response_api: LumaGeneration = await operation.execute() - - operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"/proxy/luma/generations/{response_api.id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=LumaGeneration, - ), - completed_statuses=[LumaState.completed], - failed_statuses=[LumaState.failed], + response_poll = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/luma/generations/{response_api.id}"), + response_model=LumaGeneration, status_extractor=lambda x: x.state, - result_url_extractor=image_result_url_extractor, - node_id=cls.hidden.unique_id, - auth_kwargs=auth_kwargs, ) - response_poll = await operation.execute() - - async with aiohttp.ClientSession() as session: - async with session.get(response_poll.assets.image) as img_response: - img = process_image_response(await img_response.content.read()) - return IO.NodeOutput(img) + return IO.NodeOutput(await download_url_to_image_tensor(response_poll.assets.image)) class LumaTextToVideoGenerationNode(IO.ComfyNode): - """ - Generates videos synchronously based on prompt and output_size. - """ - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="LumaVideoNode", display_name="Luma Text to Video", category="api node/video/Luma", - description=cleandoc(cls.__doc__ or ""), + description="Generates videos synchronously based on prompt and output_size.", inputs=[ IO.String.Input( "prompt", @@ -498,7 +386,7 @@ class LumaTextToVideoGenerationNode(IO.ComfyNode): "luma_concepts", tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.", optional=True, - ) + ), ], outputs=[IO.Video.Output()], hidden=[ @@ -519,24 +407,17 @@ class LumaTextToVideoGenerationNode(IO.ComfyNode): duration: str, loop: bool, seed, - luma_concepts: LumaConceptChain = None, + luma_concepts: Optional[LumaConceptChain] = None, ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False, min_length=3) duration = duration if model != LumaVideoModel.ray_1_6 else None resolution = resolution if model != LumaVideoModel.ray_1_6 else None - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/luma/generations", - method=HttpMethod.POST, - request_model=LumaGenerationRequest, - response_model=LumaGeneration, - ), - request=LumaGenerationRequest( + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/luma/generations", method="POST"), + response_model=LumaGeneration, + data=LumaGenerationRequest( prompt=prompt, model=model, resolution=resolution, @@ -545,47 +426,25 @@ class LumaTextToVideoGenerationNode(IO.ComfyNode): loop=loop, concepts=luma_concepts.create_api_model() if luma_concepts else None, ), - auth_kwargs=auth_kwargs, ) - response_api: LumaGeneration = await operation.execute() - - if cls.hidden.unique_id: - PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", cls.hidden.unique_id) - - operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"/proxy/luma/generations/{response_api.id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=LumaGeneration, - ), - completed_statuses=[LumaState.completed], - failed_statuses=[LumaState.failed], + response_poll = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/luma/generations/{response_api.id}"), + response_model=LumaGeneration, status_extractor=lambda x: x.state, - result_url_extractor=video_result_url_extractor, - node_id=cls.hidden.unique_id, estimated_duration=LUMA_T2V_AVERAGE_DURATION, - auth_kwargs=auth_kwargs, ) - response_poll = await operation.execute() - - async with aiohttp.ClientSession() as session: - async with session.get(response_poll.assets.video) as vid_response: - return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) + return IO.NodeOutput(await download_url_to_video_output(response_poll.assets.video)) class LumaImageToVideoGenerationNode(IO.ComfyNode): - """ - Generates videos synchronously based on prompt, input images, and output_size. - """ - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="LumaImageToVideoNode", display_name="Luma Image to Video", category="api node/video/Luma", - description=cleandoc(cls.__doc__ or ""), + description="Generates videos synchronously based on prompt, input images, and output_size.", inputs=[ IO.String.Input( "prompt", @@ -637,7 +496,7 @@ class LumaImageToVideoGenerationNode(IO.ComfyNode): "luma_concepts", tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.", optional=True, - ) + ), ], outputs=[IO.Video.Output()], hidden=[ @@ -662,25 +521,15 @@ class LumaImageToVideoGenerationNode(IO.ComfyNode): luma_concepts: LumaConceptChain = None, ) -> IO.NodeOutput: if first_image is None and last_image is None: - raise Exception( - "At least one of first_image and last_image requires an input." - ) - auth_kwargs = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - keyframes = await cls._convert_to_keyframes(first_image, last_image, auth_kwargs=auth_kwargs) + raise Exception("At least one of first_image and last_image requires an input.") + keyframes = await cls._convert_to_keyframes(first_image, last_image) duration = duration if model != LumaVideoModel.ray_1_6 else None resolution = resolution if model != LumaVideoModel.ray_1_6 else None - - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/luma/generations", - method=HttpMethod.POST, - request_model=LumaGenerationRequest, - response_model=LumaGeneration, - ), - request=LumaGenerationRequest( + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/luma/generations", method="POST"), + response_model=LumaGeneration, + data=LumaGenerationRequest( prompt=prompt, model=model, aspect_ratio=LumaAspectRatio.ratio_16_9, # ignored, but still needed by the API for some reason @@ -690,54 +539,31 @@ class LumaImageToVideoGenerationNode(IO.ComfyNode): keyframes=keyframes, concepts=luma_concepts.create_api_model() if luma_concepts else None, ), - auth_kwargs=auth_kwargs, ) - response_api: LumaGeneration = await operation.execute() - - if cls.hidden.unique_id: - PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", cls.hidden.unique_id) - - operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"/proxy/luma/generations/{response_api.id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=LumaGeneration, - ), - completed_statuses=[LumaState.completed], - failed_statuses=[LumaState.failed], + response_poll = await poll_op( + cls, + poll_endpoint=ApiEndpoint(path=f"/proxy/luma/generations/{response_api.id}"), + response_model=LumaGeneration, status_extractor=lambda x: x.state, - result_url_extractor=video_result_url_extractor, - node_id=cls.hidden.unique_id, estimated_duration=LUMA_I2V_AVERAGE_DURATION, - auth_kwargs=auth_kwargs, ) - response_poll = await operation.execute() - - async with aiohttp.ClientSession() as session: - async with session.get(response_poll.assets.video) as vid_response: - return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) + return IO.NodeOutput(await download_url_to_video_output(response_poll.assets.video)) @classmethod async def _convert_to_keyframes( cls, first_image: torch.Tensor = None, last_image: torch.Tensor = None, - auth_kwargs: Optional[dict[str,str]] = None, ): if first_image is None and last_image is None: return None frame0 = None frame1 = None if first_image is not None: - download_urls = await upload_images_to_comfyapi( - first_image, max_images=1, auth_kwargs=auth_kwargs, - ) + download_urls = await upload_images_to_comfyapi(cls, first_image, max_images=1) frame0 = LumaImageReference(type="image", url=download_urls[0]) if last_image is not None: - download_urls = await upload_images_to_comfyapi( - last_image, max_images=1, auth_kwargs=auth_kwargs, - ) + download_urls = await upload_images_to_comfyapi(cls, last_image, max_images=1) frame1 = LumaImageReference(type="image", url=download_urls[0]) return LumaKeyframes(frame0=frame0, frame1=frame1) diff --git a/comfy_api_nodes/nodes_minimax.py b/comfy_api_nodes/nodes_minimax.py index e3722e79b..05cbb700f 100644 --- a/comfy_api_nodes/nodes_minimax.py +++ b/comfy_api_nodes/nodes_minimax.py @@ -1,71 +1,57 @@ -from inspect import cleandoc from typing import Optional -import logging -import torch +import torch from typing_extensions import override -from comfy_api.latest import ComfyExtension, IO -from comfy_api.input_impl.video_types import VideoFromFile -from comfy_api_nodes.apis import ( + +from comfy_api.latest import IO, ComfyExtension +from comfy_api_nodes.apis.minimax_api import ( + MinimaxFileRetrieveResponse, + MiniMaxModel, + MinimaxTaskResultResponse, MinimaxVideoGenerationRequest, MinimaxVideoGenerationResponse, - MinimaxFileRetrieveResponse, - MinimaxTaskResultResponse, SubjectReferenceItem, - MiniMaxModel, ) -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.util import ( ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, - EmptyRequest, -) -from comfy_api_nodes.apinode_utils import ( - download_url_to_bytesio, + download_url_to_video_output, + poll_op, + sync_op, upload_images_to_comfyapi, + validate_string, ) -from comfy_api_nodes.util import validate_string -from server import PromptServer - I2V_AVERAGE_DURATION = 114 T2V_AVERAGE_DURATION = 234 async def _generate_mm_video( + cls: type[IO.ComfyNode], *, - auth: dict[str, str], - node_id: str, prompt_text: str, seed: int, model: str, - image: Optional[torch.Tensor] = None, # used for ImageToVideo - subject: Optional[torch.Tensor] = None, # used for SubjectToVideo + image: Optional[torch.Tensor] = None, # used for ImageToVideo + subject: Optional[torch.Tensor] = None, # used for SubjectToVideo average_duration: Optional[int] = None, ) -> IO.NodeOutput: if image is None: validate_string(prompt_text, field_name="prompt_text") - # upload image, if passed in image_url = None if image is not None: - image_url = (await upload_images_to_comfyapi(image, max_images=1, auth_kwargs=auth))[0] + image_url = (await upload_images_to_comfyapi(cls, image, max_images=1))[0] # TODO: figure out how to deal with subject properly, API returns invalid params when using S2V-01 model subject_reference = None if subject is not None: - subject_url = (await upload_images_to_comfyapi(subject, max_images=1, auth_kwargs=auth))[0] + subject_url = (await upload_images_to_comfyapi(cls, subject, max_images=1))[0] subject_reference = [SubjectReferenceItem(image=subject_url)] - - video_generate_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/minimax/video_generation", - method=HttpMethod.POST, - request_model=MinimaxVideoGenerationRequest, - response_model=MinimaxVideoGenerationResponse, - ), - request=MinimaxVideoGenerationRequest( + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/minimax/video_generation", method="POST"), + response_model=MinimaxVideoGenerationResponse, + data=MinimaxVideoGenerationRequest( model=MiniMaxModel(model), prompt=prompt_text, callback_url=None, @@ -73,81 +59,50 @@ async def _generate_mm_video( subject_reference=subject_reference, prompt_optimizer=None, ), - auth_kwargs=auth, ) - response = await video_generate_operation.execute() task_id = response.task_id if not task_id: raise Exception(f"MiniMax generation failed: {response.base_resp}") - video_generate_operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path="/proxy/minimax/query/video_generation", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=MinimaxTaskResultResponse, - query_params={"task_id": task_id}, - ), - completed_statuses=["Success"], - failed_statuses=["Fail"], + task_result = await poll_op( + cls, + ApiEndpoint(path="/proxy/minimax/query/video_generation", query_params={"task_id": task_id}), + response_model=MinimaxTaskResultResponse, status_extractor=lambda x: x.status.value, estimated_duration=average_duration, - node_id=node_id, - auth_kwargs=auth, ) - task_result = await video_generate_operation.execute() file_id = task_result.file_id if file_id is None: raise Exception("Request was not successful. Missing file ID.") - file_retrieve_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/minimax/files/retrieve", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=MinimaxFileRetrieveResponse, - query_params={"file_id": int(file_id)}, - ), - request=EmptyRequest(), - auth_kwargs=auth, + file_result = await sync_op( + cls, + ApiEndpoint(path="/proxy/minimax/files/retrieve", query_params={"file_id": int(file_id)}), + response_model=MinimaxFileRetrieveResponse, ) - file_result = await file_retrieve_operation.execute() file_url = file_result.file.download_url if file_url is None: - raise Exception( - f"No video was found in the response. Full response: {file_result.model_dump()}" - ) - logging.info("Generated video URL: %s", file_url) - if node_id: - if hasattr(file_result.file, "backup_download_url"): - message = f"Result URL: {file_url}\nBackup URL: {file_result.file.backup_download_url}" - else: - message = f"Result URL: {file_url}" - PromptServer.instance.send_progress_text(message, node_id) - - # Download and return as VideoFromFile - video_io = await download_url_to_bytesio(file_url) - if video_io is None: - error_msg = f"Failed to download video from {file_url}" - logging.error(error_msg) - raise Exception(error_msg) - return IO.NodeOutput(VideoFromFile(video_io)) + raise Exception(f"No video was found in the response. Full response: {file_result.model_dump()}") + if file_result.file.backup_download_url: + try: + return IO.NodeOutput(await download_url_to_video_output(file_url, timeout=10, max_retries=2)) + except Exception: # if we have a second URL to retrieve the result, try again using that one + return IO.NodeOutput( + await download_url_to_video_output(file_result.file.backup_download_url, max_retries=3) + ) + return IO.NodeOutput(await download_url_to_video_output(file_url)) class MinimaxTextToVideoNode(IO.ComfyNode): - """ - Generates videos synchronously based on a prompt, and optional parameters using MiniMax's API. - """ - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="MinimaxTextToVideoNode", display_name="MiniMax Text to Video", category="api node/video/MiniMax", - description=cleandoc(cls.__doc__ or ""), + description="Generates videos synchronously based on a prompt, and optional parameters.", inputs=[ IO.String.Input( "prompt_text", @@ -189,11 +144,7 @@ class MinimaxTextToVideoNode(IO.ComfyNode): seed: int = 0, ) -> IO.NodeOutput: return await _generate_mm_video( - auth={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, + cls, prompt_text=prompt_text, seed=seed, model=model, @@ -204,17 +155,13 @@ class MinimaxTextToVideoNode(IO.ComfyNode): class MinimaxImageToVideoNode(IO.ComfyNode): - """ - Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API. - """ - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="MinimaxImageToVideoNode", display_name="MiniMax Image to Video", category="api node/video/MiniMax", - description=cleandoc(cls.__doc__ or ""), + description="Generates videos synchronously based on an image and prompt, and optional parameters.", inputs=[ IO.Image.Input( "image", @@ -261,11 +208,7 @@ class MinimaxImageToVideoNode(IO.ComfyNode): seed: int = 0, ) -> IO.NodeOutput: return await _generate_mm_video( - auth={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, + cls, prompt_text=prompt_text, seed=seed, model=model, @@ -276,17 +219,13 @@ class MinimaxImageToVideoNode(IO.ComfyNode): class MinimaxSubjectToVideoNode(IO.ComfyNode): - """ - Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API. - """ - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="MinimaxSubjectToVideoNode", display_name="MiniMax Subject to Video", category="api node/video/MiniMax", - description=cleandoc(cls.__doc__ or ""), + description="Generates videos synchronously based on an image and prompt, and optional parameters.", inputs=[ IO.Image.Input( "subject", @@ -333,11 +272,7 @@ class MinimaxSubjectToVideoNode(IO.ComfyNode): seed: int = 0, ) -> IO.NodeOutput: return await _generate_mm_video( - auth={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, - node_id=cls.hidden.unique_id, + cls, prompt_text=prompt_text, seed=seed, model=model, @@ -348,15 +283,13 @@ class MinimaxSubjectToVideoNode(IO.ComfyNode): class MinimaxHailuoVideoNode(IO.ComfyNode): - """Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.""" - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="MinimaxHailuoVideoNode", display_name="MiniMax Hailuo Video", category="api node/video/MiniMax", - description=cleandoc(cls.__doc__ or ""), + description="Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.", inputs=[ IO.String.Input( "prompt_text", @@ -420,10 +353,6 @@ class MinimaxHailuoVideoNode(IO.ComfyNode): resolution: str = "768P", model: str = "MiniMax-Hailuo-02", ) -> IO.NodeOutput: - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } if first_frame_image is None: validate_string(prompt_text, field_name="prompt_text") @@ -435,16 +364,13 @@ class MinimaxHailuoVideoNode(IO.ComfyNode): # upload image, if passed in image_url = None if first_frame_image is not None: - image_url = (await upload_images_to_comfyapi(first_frame_image, max_images=1, auth_kwargs=auth))[0] + image_url = (await upload_images_to_comfyapi(cls, first_frame_image, max_images=1))[0] - video_generate_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/minimax/video_generation", - method=HttpMethod.POST, - request_model=MinimaxVideoGenerationRequest, - response_model=MinimaxVideoGenerationResponse, - ), - request=MinimaxVideoGenerationRequest( + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/minimax/video_generation", method="POST"), + response_model=MinimaxVideoGenerationResponse, + data=MinimaxVideoGenerationRequest( model=MiniMaxModel(model), prompt=prompt_text, callback_url=None, @@ -453,67 +379,42 @@ class MinimaxHailuoVideoNode(IO.ComfyNode): duration=duration, resolution=resolution, ), - auth_kwargs=auth, ) - response = await video_generate_operation.execute() task_id = response.task_id if not task_id: raise Exception(f"MiniMax generation failed: {response.base_resp}") average_duration = 120 if resolution == "768P" else 240 - video_generate_operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path="/proxy/minimax/query/video_generation", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=MinimaxTaskResultResponse, - query_params={"task_id": task_id}, - ), - completed_statuses=["Success"], - failed_statuses=["Fail"], + task_result = await poll_op( + cls, + ApiEndpoint(path="/proxy/minimax/query/video_generation", query_params={"task_id": task_id}), + response_model=MinimaxTaskResultResponse, status_extractor=lambda x: x.status.value, estimated_duration=average_duration, - node_id=cls.hidden.unique_id, - auth_kwargs=auth, ) - task_result = await video_generate_operation.execute() file_id = task_result.file_id if file_id is None: raise Exception("Request was not successful. Missing file ID.") - file_retrieve_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/minimax/files/retrieve", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=MinimaxFileRetrieveResponse, - query_params={"file_id": int(file_id)}, - ), - request=EmptyRequest(), - auth_kwargs=auth, + file_result = await sync_op( + cls, + ApiEndpoint(path="/proxy/minimax/files/retrieve", query_params={"file_id": int(file_id)}), + response_model=MinimaxFileRetrieveResponse, ) - file_result = await file_retrieve_operation.execute() file_url = file_result.file.download_url if file_url is None: - raise Exception( - f"No video was found in the response. Full response: {file_result.model_dump()}" - ) - logging.info("Generated video URL: %s", file_url) - if cls.hidden.unique_id: - if hasattr(file_result.file, "backup_download_url"): - message = f"Result URL: {file_url}\nBackup URL: {file_result.file.backup_download_url}" - else: - message = f"Result URL: {file_url}" - PromptServer.instance.send_progress_text(message, cls.hidden.unique_id) + raise Exception(f"No video was found in the response. Full response: {file_result.model_dump()}") - video_io = await download_url_to_bytesio(file_url) - if video_io is None: - error_msg = f"Failed to download video from {file_url}" - logging.error(error_msg) - raise Exception(error_msg) - return IO.NodeOutput(VideoFromFile(video_io)) + if file_result.file.backup_download_url: + try: + return IO.NodeOutput(await download_url_to_video_output(file_url, timeout=10, max_retries=2)) + except Exception: # if we have a second URL to retrieve the result, try again using that one + return IO.NodeOutput( + await download_url_to_video_output(file_result.file.backup_download_url, max_retries=3) + ) + return IO.NodeOutput(await download_url_to_video_output(file_url)) class MinimaxExtension(ComfyExtension): diff --git a/comfy_api_nodes/util/client.py b/comfy_api_nodes/util/client.py index 9c036d64b..9ae512fe5 100644 --- a/comfy_api_nodes/util/client.py +++ b/comfy_api_nodes/util/client.py @@ -78,7 +78,7 @@ class _PollUIState: _RETRY_STATUS = {408, 429, 500, 502, 503, 504} COMPLETED_STATUSES = ["succeeded", "succeed", "success", "completed"] -FAILED_STATUSES = ["cancelled", "canceled", "failed", "error"] +FAILED_STATUSES = ["cancelled", "canceled", "fail", "failed", "error"] QUEUED_STATUSES = ["created", "queued", "queueing", "submitted"] diff --git a/comfy_api_nodes/util/download_helpers.py b/comfy_api_nodes/util/download_helpers.py index f89045e12..364874bed 100644 --- a/comfy_api_nodes/util/download_helpers.py +++ b/comfy_api_nodes/util/download_helpers.py @@ -232,11 +232,12 @@ async def download_url_to_video_output( video_url: str, *, timeout: float = None, + max_retries: int = 5, cls: type[COMFY_IO.ComfyNode] = None, ) -> VideoFromFile: """Downloads a video from a URL and returns a `VIDEO` output.""" result = BytesIO() - await download_url_to_bytesio(video_url, result, timeout=timeout, cls=cls) + await download_url_to_bytesio(video_url, result, timeout=timeout, max_retries=max_retries, cls=cls) return VideoFromFile(result) From 1a58087ac2eb64be3645934d0025aafaa5bdce38 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 29 Oct 2025 12:43:51 -0700 Subject: [PATCH 0812/1073] Reduce memory usage for fp8 scaled op. (#10531) --- comfy/quant_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index b14e03084..fb35a0d40 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -358,7 +358,7 @@ class TensorCoreFP8Layout(QuantizedLayout): scale = scale.to(device=tensor.device, dtype=torch.float32) lp_amax = torch.finfo(dtype).max - tensor_scaled = tensor.float() / scale + tensor_scaled = tensor * (1.0 / scale).to(tensor.dtype) torch.clamp(tensor_scaled, min=-lp_amax, max=lp_amax, out=tensor_scaled) qdata = tensor_scaled.to(dtype, memory_format=torch.contiguous_format) From ec4fc2a09a390d0d81500c51fb9e4d8a7a5ce1fc Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 29 Oct 2025 12:48:06 -0700 Subject: [PATCH 0813/1073] Fix case of weights not being unpinned. (#10533) --- comfy/model_patcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index aec73349c..119119e96 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -1283,5 +1283,6 @@ class ModelPatcher: self.clear_cached_hook_weights() def __del__(self): + self.unpin_all_weights() self.detach(unpatch_all=False) From ab7ab5be23fb9b71d1790f424e7dcf91dc1fe0cc Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Thu, 30 Oct 2025 07:17:46 +1000 Subject: [PATCH 0814/1073] Fix Race condition in --async-offload that can cause corruption (#10501) * mm: factor out the current stream getter Make this a reusable function. * ops: sync the offload stream with the consumption of w&b This sync is nessacary as pytorch will queue cuda async frees on the same stream as created to tensor. In the case of async offload, this will be on the offload stream. Weights and biases can go out of scope in python which then triggers the pytorch garbage collector to queue the free operation on the offload stream possible before the compute stream has used the weight. This causes a use after free on weight data leading to total corruption of some workflows. So sync the offload stream with the compute stream after the weight has been used so the free has to wait for the weight to be used. The cast_bias_weight is extended in a backwards compatible way with the new behaviour opt-in on a defaulted parameter. This handles custom node packs calling cast_bias_weight and defeatures async-offload for them (as they do not handle the race). The pattern is now: cast_bias_weight(... , offloadable=True) #This might be offloaded thing(weight, bias, ...) uncast_bias_weight(...) * controlnet: adopt new cast_bias_weight synchronization scheme This is nessacary for safe async weight offloading. * mm: sync the last stream in the queue, not the next Currently this peeks ahead to sync the next stream in the queue of streams with the compute stream. This doesnt allow a lot of parallelization, as then end result is you can only get one weight load ahead regardless of how many streams you have. Rotate the loop logic here to synchronize the end of the queue before returning the next stream. This allows weights to be loaded ahead of the compute streams position. --- comfy/controlnet.py | 17 +++--- comfy/model_management.py | 28 +++++---- comfy/ops.py | 121 ++++++++++++++++++++++++++++---------- 3 files changed, 114 insertions(+), 52 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index f08ff4b36..0b5e30f52 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -310,11 +310,13 @@ class ControlLoraOps: self.bias = None def forward(self, input): - weight, bias = comfy.ops.cast_bias_weight(self, input) + weight, bias, offload_stream = comfy.ops.cast_bias_weight(self, input, offloadable=True) if self.up is not None: - return torch.nn.functional.linear(input, weight + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), bias) + x = torch.nn.functional.linear(input, weight + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), bias) else: - return torch.nn.functional.linear(input, weight, bias) + x = torch.nn.functional.linear(input, weight, bias) + comfy.ops.uncast_bias_weight(self, weight, bias, offload_stream) + return x class Conv2d(torch.nn.Module, comfy.ops.CastWeightBiasOp): def __init__( @@ -350,12 +352,13 @@ class ControlLoraOps: def forward(self, input): - weight, bias = comfy.ops.cast_bias_weight(self, input) + weight, bias, offload_stream = comfy.ops.cast_bias_weight(self, input, offloadable=True) if self.up is not None: - return torch.nn.functional.conv2d(input, weight + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), bias, self.stride, self.padding, self.dilation, self.groups) + x = torch.nn.functional.conv2d(input, weight + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), bias, self.stride, self.padding, self.dilation, self.groups) else: - return torch.nn.functional.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups) - + x = torch.nn.functional.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups) + comfy.ops.uncast_bias_weight(self, weight, bias, offload_stream) + return x class ControlLora(ControlNet): def __init__(self, control_weights, global_average_pooling=False, model_options={}): #TODO? model_options diff --git a/comfy/model_management.py b/comfy/model_management.py index 3e5b977d4..79c0dfdb4 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1013,6 +1013,16 @@ if args.async_offload: NUM_STREAMS = 2 logging.info("Using async weight offloading with {} streams".format(NUM_STREAMS)) +def current_stream(device): + if device is None: + return None + if is_device_cuda(device): + return torch.cuda.current_stream() + elif is_device_xpu(device): + return torch.xpu.current_stream() + else: + return None + stream_counters = {} def get_offload_stream(device): stream_counter = stream_counters.get(device, 0) @@ -1021,21 +1031,17 @@ def get_offload_stream(device): if device in STREAMS: ss = STREAMS[device] - s = ss[stream_counter] + #Sync the oldest stream in the queue with the current + ss[stream_counter].wait_stream(current_stream(device)) stream_counter = (stream_counter + 1) % len(ss) - if is_device_cuda(device): - ss[stream_counter].wait_stream(torch.cuda.current_stream()) - elif is_device_xpu(device): - ss[stream_counter].wait_stream(torch.xpu.current_stream()) stream_counters[device] = stream_counter - return s + return ss[stream_counter] elif is_device_cuda(device): ss = [] for k in range(NUM_STREAMS): ss.append(torch.cuda.Stream(device=device, priority=0)) STREAMS[device] = ss s = ss[stream_counter] - stream_counter = (stream_counter + 1) % len(ss) stream_counters[device] = stream_counter return s elif is_device_xpu(device): @@ -1044,18 +1050,14 @@ def get_offload_stream(device): ss.append(torch.xpu.Stream(device=device, priority=0)) STREAMS[device] = ss s = ss[stream_counter] - stream_counter = (stream_counter + 1) % len(ss) stream_counters[device] = stream_counter return s return None def sync_stream(device, stream): - if stream is None: + if stream is None or current_stream(device) is None: return - if is_device_cuda(device): - torch.cuda.current_stream().wait_stream(stream) - elif is_device_xpu(device): - torch.xpu.current_stream().wait_stream(stream) + current_stream(device).wait_stream(stream) def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False, stream=None): if device is None or weight.device == device: diff --git a/comfy/ops.py b/comfy/ops.py index 93731eedf..71ca7a2bd 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -70,8 +70,12 @@ cast_to = comfy.model_management.cast_to #TODO: remove once no more references def cast_to_input(weight, input, non_blocking=False, copy=True): return comfy.model_management.cast_to(weight, input.dtype, input.device, non_blocking=non_blocking, copy=copy) + @torch.compiler.disable() -def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None): +def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, offloadable=False): + # NOTE: offloadable=False is a a legacy and if you are a custom node author reading this please pass + # offloadable=True and call uncast_bias_weight() after your last usage of the weight/bias. This + # will add async-offload support to your cast and improve performance. if input is not None: if dtype is None: dtype = input.dtype @@ -80,7 +84,11 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None): if device is None: device = input.device - offload_stream = comfy.model_management.get_offload_stream(device) + if offloadable: + offload_stream = comfy.model_management.get_offload_stream(device) + else: + offload_stream = None + if offload_stream is not None: wf_context = offload_stream else: @@ -105,7 +113,24 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None): weight = f(weight) comfy.model_management.sync_stream(device, offload_stream) - return weight, bias + if offloadable: + return weight, bias, offload_stream + else: + #Legacy function signature + return weight, bias + + +def uncast_bias_weight(s, weight, bias, offload_stream): + if offload_stream is None: + return + if weight is not None: + device = weight.device + else: + if bias is None: + return + device = bias.device + offload_stream.wait_stream(comfy.model_management.current_stream(device)) + class CastWeightBiasOp: comfy_cast_weights = False @@ -118,8 +143,10 @@ class disable_weight_init: return None def forward_comfy_cast_weights(self, input): - weight, bias = cast_bias_weight(self, input) - return torch.nn.functional.linear(input, weight, bias) + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) + x = torch.nn.functional.linear(input, weight, bias) + uncast_bias_weight(self, weight, bias, offload_stream) + return x def forward(self, *args, **kwargs): run_every_op() @@ -133,8 +160,10 @@ class disable_weight_init: return None def forward_comfy_cast_weights(self, input): - weight, bias = cast_bias_weight(self, input) - return self._conv_forward(input, weight, bias) + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) + x = self._conv_forward(input, weight, bias) + uncast_bias_weight(self, weight, bias, offload_stream) + return x def forward(self, *args, **kwargs): run_every_op() @@ -148,8 +177,10 @@ class disable_weight_init: return None def forward_comfy_cast_weights(self, input): - weight, bias = cast_bias_weight(self, input) - return self._conv_forward(input, weight, bias) + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) + x = self._conv_forward(input, weight, bias) + uncast_bias_weight(self, weight, bias, offload_stream) + return x def forward(self, *args, **kwargs): run_every_op() @@ -172,8 +203,10 @@ class disable_weight_init: return super()._conv_forward(input, weight, bias, *args, **kwargs) def forward_comfy_cast_weights(self, input): - weight, bias = cast_bias_weight(self, input) - return self._conv_forward(input, weight, bias) + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) + x = self._conv_forward(input, weight, bias) + uncast_bias_weight(self, weight, bias, offload_stream) + return x def forward(self, *args, **kwargs): run_every_op() @@ -187,8 +220,10 @@ class disable_weight_init: return None def forward_comfy_cast_weights(self, input): - weight, bias = cast_bias_weight(self, input) - return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps) + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) + x = torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps) + uncast_bias_weight(self, weight, bias, offload_stream) + return x def forward(self, *args, **kwargs): run_every_op() @@ -203,11 +238,14 @@ class disable_weight_init: def forward_comfy_cast_weights(self, input): if self.weight is not None: - weight, bias = cast_bias_weight(self, input) + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) else: weight = None bias = None - return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps) + offload_stream = None + x = torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps) + uncast_bias_weight(self, weight, bias, offload_stream) + return x def forward(self, *args, **kwargs): run_every_op() @@ -223,11 +261,15 @@ class disable_weight_init: def forward_comfy_cast_weights(self, input): if self.weight is not None: - weight, bias = cast_bias_weight(self, input) + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) else: weight = None - return comfy.rmsnorm.rms_norm(input, weight, self.eps) # TODO: switch to commented out line when old torch is deprecated - # return torch.nn.functional.rms_norm(input, self.normalized_shape, weight, self.eps) + bias = None + offload_stream = None + x = comfy.rmsnorm.rms_norm(input, weight, self.eps) # TODO: switch to commented out line when old torch is deprecated + # x = torch.nn.functional.rms_norm(input, self.normalized_shape, weight, self.eps) + uncast_bias_weight(self, weight, bias, offload_stream) + return x def forward(self, *args, **kwargs): run_every_op() @@ -246,10 +288,12 @@ class disable_weight_init: input, output_size, self.stride, self.padding, self.kernel_size, num_spatial_dims, self.dilation) - weight, bias = cast_bias_weight(self, input) - return torch.nn.functional.conv_transpose2d( + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) + x = torch.nn.functional.conv_transpose2d( input, weight, bias, self.stride, self.padding, output_padding, self.groups, self.dilation) + uncast_bias_weight(self, weight, bias, offload_stream) + return x def forward(self, *args, **kwargs): run_every_op() @@ -268,10 +312,12 @@ class disable_weight_init: input, output_size, self.stride, self.padding, self.kernel_size, num_spatial_dims, self.dilation) - weight, bias = cast_bias_weight(self, input) - return torch.nn.functional.conv_transpose1d( + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) + x = torch.nn.functional.conv_transpose1d( input, weight, bias, self.stride, self.padding, output_padding, self.groups, self.dilation) + uncast_bias_weight(self, weight, bias, offload_stream) + return x def forward(self, *args, **kwargs): run_every_op() @@ -289,8 +335,11 @@ class disable_weight_init: output_dtype = out_dtype if self.weight.dtype == torch.float16 or self.weight.dtype == torch.bfloat16: out_dtype = None - weight, bias = cast_bias_weight(self, device=input.device, dtype=out_dtype) - return torch.nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse).to(dtype=output_dtype) + weight, bias, offload_stream = cast_bias_weight(self, device=input.device, dtype=out_dtype, offloadable=True) + x = torch.nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse).to(dtype=output_dtype) + uncast_bias_weight(self, weight, bias, offload_stream) + return x + def forward(self, *args, **kwargs): run_every_op() @@ -361,7 +410,7 @@ def fp8_linear(self, input): input_dtype = input.dtype if len(input.shape) == 3: - w, bias = cast_bias_weight(self, input, dtype=dtype, bias_dtype=input_dtype) + w, bias, offload_stream = cast_bias_weight(self, input, dtype=dtype, bias_dtype=input_dtype, offloadable=True) scale_weight = self.scale_weight scale_input = self.scale_input @@ -382,6 +431,8 @@ def fp8_linear(self, input): quantized_input = QuantizedTensor.from_float(input.reshape(-1, input_shape[2]), TensorCoreFP8Layout, scale=scale_input, dtype=dtype) o = torch.nn.functional.linear(quantized_input, quantized_weight, bias) + uncast_bias_weight(self, w, bias, offload_stream) + if tensor_2d: return o.reshape(input_shape[0], -1) return o.reshape((-1, input_shape[1], self.weight.shape[0])) @@ -404,8 +455,10 @@ class fp8_ops(manual_cast): except Exception as e: logging.info("Exception during fp8 op: {}".format(e)) - weight, bias = cast_bias_weight(self, input) - return torch.nn.functional.linear(input, weight, bias) + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) + x = torch.nn.functional.linear(input, weight, bias) + uncast_bias_weight(self, weight, bias, offload_stream) + return x def scaled_fp8_ops(fp8_matrix_mult=False, scale_input=False, override_dtype=None): logging.info("Using scaled fp8: fp8 matrix mult: {}, scale input: {}".format(fp8_matrix_mult, scale_input)) @@ -433,12 +486,14 @@ def scaled_fp8_ops(fp8_matrix_mult=False, scale_input=False, override_dtype=None if out is not None: return out - weight, bias = cast_bias_weight(self, input) + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) if weight.numel() < input.numel(): #TODO: optimize - return torch.nn.functional.linear(input, weight * self.scale_weight.to(device=weight.device, dtype=weight.dtype), bias) + x = torch.nn.functional.linear(input, weight * self.scale_weight.to(device=weight.device, dtype=weight.dtype), bias) else: - return torch.nn.functional.linear(input * self.scale_weight.to(device=weight.device, dtype=weight.dtype), weight, bias) + x = torch.nn.functional.linear(input * self.scale_weight.to(device=weight.device, dtype=weight.dtype), weight, bias) + uncast_bias_weight(self, weight, bias, offload_stream) + return x def convert_weight(self, weight, inplace=False, **kwargs): if inplace: @@ -577,8 +632,10 @@ class MixedPrecisionOps(disable_weight_init): return torch.nn.functional.linear(input, weight, bias) def forward_comfy_cast_weights(self, input): - weight, bias = cast_bias_weight(self, input) - return self._forward(input, weight, bias) + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) + x = self._forward(input, weight, bias) + uncast_bias_weight(self, weight, bias, offload_stream) + return x def forward(self, input, *args, **kwargs): run_every_op() From 25de7b1bfa22dd98922f047a1342cc97f8e46c5b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 29 Oct 2025 14:20:27 -0700 Subject: [PATCH 0815/1073] Try to fix slow load issue on low ram hardware with pinned mem. (#10536) --- comfy/model_patcher.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 119119e96..74b9e48bc 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -658,6 +658,7 @@ class ModelPatcher: loading = self._load_list() load_completely = [] + offloaded = [] loading.sort(reverse=True) for x in loading: n = x[1] @@ -699,8 +700,7 @@ class ModelPatcher: patch_counter += 1 cast_weight = True - for param in params: - self.pin_weight_to_device("{}.{}".format(n, param)) + offloaded.append((module_mem, n, m, params)) else: if hasattr(m, "comfy_cast_weights"): wipe_lowvram_weight(m) @@ -741,6 +741,12 @@ class ModelPatcher: for x in load_completely: x[2].to(device_to) + for x in offloaded: + n = x[1] + params = x[3] + for param in params: + self.pin_weight_to_device("{}.{}".format(n, param)) + if lowvram_counter > 0: logging.info("loaded partially {} {} {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), patch_counter)) self.model.model_lowvram = True From 906c0899575a83ac69bb095e835fdec748891da4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 29 Oct 2025 16:29:01 -0700 Subject: [PATCH 0816/1073] Fix small performance regression with fp8 fast and scaled fp8. (#10537) --- comfy/ops.py | 6 +++++- comfy/quant_ops.py | 5 +++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 71ca7a2bd..18f6b804b 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -421,14 +421,18 @@ def fp8_linear(self, input): if scale_input is None: scale_input = torch.ones((), device=input.device, dtype=torch.float32) + input = torch.clamp(input, min=-448, max=448, out=input) + input = input.reshape(-1, input_shape[2]).to(dtype).contiguous() + layout_params_weight = {'scale': scale_input, 'orig_dtype': input_dtype} + quantized_input = QuantizedTensor(input.reshape(-1, input_shape[2]).to(dtype).contiguous(), TensorCoreFP8Layout, layout_params_weight) else: scale_input = scale_input.to(input.device) + quantized_input = QuantizedTensor.from_float(input.reshape(-1, input_shape[2]), TensorCoreFP8Layout, scale=scale_input, dtype=dtype) # Wrap weight in QuantizedTensor - this enables unified dispatch # Call F.linear - __torch_dispatch__ routes to fp8_linear handler in quant_ops.py! layout_params_weight = {'scale': scale_weight, 'orig_dtype': input_dtype} quantized_weight = QuantizedTensor(w, TensorCoreFP8Layout, layout_params_weight) - quantized_input = QuantizedTensor.from_float(input.reshape(-1, input_shape[2]), TensorCoreFP8Layout, scale=scale_input, dtype=dtype) o = torch.nn.functional.linear(quantized_input, quantized_weight, bias) uncast_bias_weight(self, w, bias, offload_stream) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index fb35a0d40..c822fe53c 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -357,9 +357,10 @@ class TensorCoreFP8Layout(QuantizedLayout): scale = torch.tensor(scale) scale = scale.to(device=tensor.device, dtype=torch.float32) - lp_amax = torch.finfo(dtype).max tensor_scaled = tensor * (1.0 / scale).to(tensor.dtype) - torch.clamp(tensor_scaled, min=-lp_amax, max=lp_amax, out=tensor_scaled) + # TODO: uncomment this if it's actually needed because the clamp has a small performance penality' + # lp_amax = torch.finfo(dtype).max + # torch.clamp(tensor_scaled, min=-lp_amax, max=lp_amax, out=tensor_scaled) qdata = tensor_scaled.to(dtype, memory_format=torch.contiguous_format) layout_params = { From 998bf60bebd03e57a55e106434657849342b733f Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Wed, 29 Oct 2025 16:37:06 -0700 Subject: [PATCH 0817/1073] Add units/info for the numbers displayed on 'load completely' and 'load partially' log messages (#10538) --- comfy/model_patcher.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 74b9e48bc..ed3f3f5cb 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -655,6 +655,7 @@ class ModelPatcher: mem_counter = 0 patch_counter = 0 lowvram_counter = 0 + lowvram_mem_counter = 0 loading = self._load_list() load_completely = [] @@ -675,6 +676,7 @@ class ModelPatcher: if mem_counter + module_mem >= lowvram_model_memory: lowvram_weight = True lowvram_counter += 1 + lowvram_mem_counter += module_mem if hasattr(m, "prev_comfy_cast_weights"): #Already lowvramed continue @@ -748,10 +750,10 @@ class ModelPatcher: self.pin_weight_to_device("{}.{}".format(n, param)) if lowvram_counter > 0: - logging.info("loaded partially {} {} {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), patch_counter)) + logging.info("loaded partially; {:.2f} MB usable, {:.2f} MB loaded, {:.2f} MB offloaded, lowvram patches: {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), lowvram_mem_counter / (1024 * 1024), patch_counter)) self.model.model_lowvram = True else: - logging.info("loaded completely {} {} {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), full_load)) + logging.info("loaded completely; {:.2f} MB usable, {:.2f} MB loaded, full load: {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), full_load)) self.model.model_lowvram = False if full_load: self.model.to(device_to) From 163b629c70a349c7d1e91eebc5365713e770af8a Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 30 Oct 2025 08:49:03 +0200 Subject: [PATCH 0818/1073] use new API client in Pixverse and Ideogram nodes (#10543) --- comfy_api_nodes/apinode_utils.py | 109 +------------ comfy_api_nodes/nodes_bfl.py | 43 +---- comfy_api_nodes/nodes_bytedance.py | 12 +- comfy_api_nodes/nodes_ideogram.py | 137 ++++------------ comfy_api_nodes/nodes_kling.py | 2 +- comfy_api_nodes/nodes_pixverse.py | 194 ++++++----------------- comfy_api_nodes/nodes_recraft.py | 4 +- comfy_api_nodes/nodes_runway.py | 10 +- comfy_api_nodes/nodes_vidu.py | 12 +- comfy_api_nodes/util/__init__.py | 10 +- comfy_api_nodes/util/conversions.py | 21 +++ comfy_api_nodes/util/validation_utils.py | 125 ++++++++++----- 12 files changed, 220 insertions(+), 459 deletions(-) diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py index 6a72b9d1d..ecd604ff8 100644 --- a/comfy_api_nodes/apinode_utils.py +++ b/comfy_api_nodes/apinode_utils.py @@ -1,15 +1,12 @@ from __future__ import annotations import aiohttp import mimetypes -from typing import Optional, Union -from comfy.utils import common_upscale +from typing import Union from server import PromptServer -from comfy.cli_args import args import numpy as np from PIL import Image import torch -import math import base64 from io import BytesIO @@ -60,85 +57,6 @@ async def validate_and_cast_response( return torch.stack(image_tensors, dim=0) -def validate_aspect_ratio( - aspect_ratio: str, - minimum_ratio: float, - maximum_ratio: float, - minimum_ratio_str: str, - maximum_ratio_str: str, -) -> float: - """Validates and casts an aspect ratio string to a float. - - Args: - aspect_ratio: The aspect ratio string to validate. - minimum_ratio: The minimum aspect ratio. - maximum_ratio: The maximum aspect ratio. - minimum_ratio_str: The minimum aspect ratio string. - maximum_ratio_str: The maximum aspect ratio string. - - Returns: - The validated and cast aspect ratio. - - Raises: - Exception: If the aspect ratio is not valid. - """ - # get ratio values - numbers = aspect_ratio.split(":") - if len(numbers) != 2: - raise TypeError( - f"Aspect ratio must be in the format X:Y, such as 16:9, but was {aspect_ratio}." - ) - try: - numerator = int(numbers[0]) - denominator = int(numbers[1]) - except ValueError as exc: - raise TypeError( - f"Aspect ratio must contain numbers separated by ':', such as 16:9, but was {aspect_ratio}." - ) from exc - calculated_ratio = numerator / denominator - # if not close to minimum and maximum, check bounds - if not math.isclose(calculated_ratio, minimum_ratio) or not math.isclose( - calculated_ratio, maximum_ratio - ): - if calculated_ratio < minimum_ratio: - raise TypeError( - f"Aspect ratio cannot reduce to any less than {minimum_ratio_str} ({minimum_ratio}), but was {aspect_ratio} ({calculated_ratio})." - ) - if calculated_ratio > maximum_ratio: - raise TypeError( - f"Aspect ratio cannot reduce to any greater than {maximum_ratio_str} ({maximum_ratio}), but was {aspect_ratio} ({calculated_ratio})." - ) - return aspect_ratio - - -async def download_url_to_bytesio( - url: str, timeout: int = None, auth_kwargs: Optional[dict[str, str]] = None -) -> BytesIO: - """Downloads content from a URL using requests and returns it as BytesIO. - - Args: - url: The URL to download. - timeout: Request timeout in seconds. Defaults to None (no timeout). - - Returns: - BytesIO object containing the downloaded content. - """ - headers = {} - if url.startswith("/proxy/"): - url = str(args.comfy_api_base).rstrip("/") + url - auth_token = auth_kwargs.get("auth_token") - comfy_api_key = auth_kwargs.get("comfy_api_key") - if auth_token: - headers["Authorization"] = f"Bearer {auth_token}" - elif comfy_api_key: - headers["X-API-KEY"] = comfy_api_key - timeout_cfg = aiohttp.ClientTimeout(total=timeout) if timeout else None - async with aiohttp.ClientSession(timeout=timeout_cfg) as session: - async with session.get(url, headers=headers) as resp: - resp.raise_for_status() # Raises HTTPError for bad responses (4XX or 5XX) - return BytesIO(await resp.read()) - - def text_filepath_to_base64_string(filepath: str) -> str: """Converts a text file to a base64 string.""" with open(filepath, "rb") as f: @@ -153,28 +71,3 @@ def text_filepath_to_data_uri(filepath: str) -> str: if mime_type is None: mime_type = "application/octet-stream" return f"data:{mime_type};base64,{base64_string}" - - -def resize_mask_to_image( - mask: torch.Tensor, - image: torch.Tensor, - upscale_method="nearest-exact", - crop="disabled", - allow_gradient=True, - add_channel_dim=False, -): - """ - Resize mask to be the same dimensions as an image, while maintaining proper format for API calls. - """ - _, H, W, _ = image.shape - mask = mask.unsqueeze(-1) - mask = mask.movedim(-1, 1) - mask = common_upscale( - mask, width=W, height=H, upscale_method=upscale_method, crop=crop - ) - mask = mask.movedim(1, -1) - if not add_channel_dim: - mask = mask.squeeze(-1) - if not allow_gradient: - mask = (mask > 0.5).float() - return mask diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py index baa74fd52..1740fb377 100644 --- a/comfy_api_nodes/nodes_bfl.py +++ b/comfy_api_nodes/nodes_bfl.py @@ -5,10 +5,6 @@ import torch from typing_extensions import override from comfy_api.latest import IO, ComfyExtension -from comfy_api_nodes.apinode_utils import ( - resize_mask_to_image, - validate_aspect_ratio, -) from comfy_api_nodes.apis.bfl_api import ( BFLFluxExpandImageRequest, BFLFluxFillImageRequest, @@ -23,8 +19,10 @@ from comfy_api_nodes.util import ( ApiEndpoint, download_url_to_image_tensor, poll_op, + resize_mask_to_image, sync_op, tensor_to_base64_string, + validate_aspect_ratio_string, validate_string, ) @@ -43,11 +41,6 @@ class FluxProUltraImageNode(IO.ComfyNode): Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution. """ - MINIMUM_RATIO = 1 / 4 - MAXIMUM_RATIO = 4 / 1 - MINIMUM_RATIO_STR = "1:4" - MAXIMUM_RATIO_STR = "4:1" - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( @@ -112,16 +105,7 @@ class FluxProUltraImageNode(IO.ComfyNode): @classmethod def validate_inputs(cls, aspect_ratio: str): - try: - validate_aspect_ratio( - aspect_ratio, - minimum_ratio=cls.MINIMUM_RATIO, - maximum_ratio=cls.MAXIMUM_RATIO, - minimum_ratio_str=cls.MINIMUM_RATIO_STR, - maximum_ratio_str=cls.MAXIMUM_RATIO_STR, - ) - except Exception as e: - return str(e) + validate_aspect_ratio_string(aspect_ratio, (1, 4), (4, 1)) return True @classmethod @@ -145,13 +129,7 @@ class FluxProUltraImageNode(IO.ComfyNode): prompt=prompt, prompt_upsampling=prompt_upsampling, seed=seed, - aspect_ratio=validate_aspect_ratio( - aspect_ratio, - minimum_ratio=cls.MINIMUM_RATIO, - maximum_ratio=cls.MAXIMUM_RATIO, - minimum_ratio_str=cls.MINIMUM_RATIO_STR, - maximum_ratio_str=cls.MAXIMUM_RATIO_STR, - ), + aspect_ratio=aspect_ratio, raw=raw, image_prompt=(image_prompt if image_prompt is None else tensor_to_base64_string(image_prompt)), image_prompt_strength=(None if image_prompt is None else round(image_prompt_strength, 2)), @@ -180,11 +158,6 @@ class FluxKontextProImageNode(IO.ComfyNode): Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio. """ - MINIMUM_RATIO = 1 / 4 - MAXIMUM_RATIO = 4 / 1 - MINIMUM_RATIO_STR = "1:4" - MAXIMUM_RATIO_STR = "4:1" - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( @@ -261,13 +234,7 @@ class FluxKontextProImageNode(IO.ComfyNode): seed=0, prompt_upsampling=False, ) -> IO.NodeOutput: - aspect_ratio = validate_aspect_ratio( - aspect_ratio, - minimum_ratio=cls.MINIMUM_RATIO, - maximum_ratio=cls.MAXIMUM_RATIO, - minimum_ratio_str=cls.MINIMUM_RATIO_STR, - maximum_ratio_str=cls.MAXIMUM_RATIO_STR, - ) + validate_aspect_ratio_string(aspect_ratio, (1, 4), (4, 1)) if input_image is None: validate_string(prompt, strip_whitespace=False) initial_response = await sync_op( diff --git a/comfy_api_nodes/nodes_bytedance.py b/comfy_api_nodes/nodes_bytedance.py index 534af380d..caced471e 100644 --- a/comfy_api_nodes/nodes_bytedance.py +++ b/comfy_api_nodes/nodes_bytedance.py @@ -17,7 +17,7 @@ from comfy_api_nodes.util import ( poll_op, sync_op, upload_images_to_comfyapi, - validate_image_aspect_ratio_range, + validate_image_aspect_ratio, validate_image_dimensions, validate_string, ) @@ -403,7 +403,7 @@ class ByteDanceImageEditNode(IO.ComfyNode): validate_string(prompt, strip_whitespace=True, min_length=1) if get_number_of_images(image) != 1: raise ValueError("Exactly one input image is required.") - validate_image_aspect_ratio_range(image, (1, 3), (3, 1)) + validate_image_aspect_ratio(image, (1, 3), (3, 1)) source_url = (await upload_images_to_comfyapi(cls, image, max_images=1, mime_type="image/png"))[0] payload = Image2ImageTaskCreationRequest( model=model, @@ -565,7 +565,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode): reference_images_urls = [] if n_input_images: for i in image: - validate_image_aspect_ratio_range(i, (1, 3), (3, 1)) + validate_image_aspect_ratio(i, (1, 3), (3, 1)) reference_images_urls = await upload_images_to_comfyapi( cls, image, @@ -798,7 +798,7 @@ class ByteDanceImageToVideoNode(IO.ComfyNode): validate_string(prompt, strip_whitespace=True, min_length=1) raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"]) validate_image_dimensions(image, min_width=300, min_height=300, max_width=6000, max_height=6000) - validate_image_aspect_ratio_range(image, (2, 5), (5, 2), strict=False) # 0.4 to 2.5 + validate_image_aspect_ratio(image, (2, 5), (5, 2), strict=False) # 0.4 to 2.5 image_url = (await upload_images_to_comfyapi(cls, image, max_images=1))[0] prompt = ( @@ -923,7 +923,7 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode): raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"]) for i in (first_frame, last_frame): validate_image_dimensions(i, min_width=300, min_height=300, max_width=6000, max_height=6000) - validate_image_aspect_ratio_range(i, (2, 5), (5, 2), strict=False) # 0.4 to 2.5 + validate_image_aspect_ratio(i, (2, 5), (5, 2), strict=False) # 0.4 to 2.5 download_urls = await upload_images_to_comfyapi( cls, @@ -1045,7 +1045,7 @@ class ByteDanceImageReferenceNode(IO.ComfyNode): raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "watermark"]) for image in images: validate_image_dimensions(image, min_width=300, min_height=300, max_width=6000, max_height=6000) - validate_image_aspect_ratio_range(image, (2, 5), (5, 2), strict=False) # 0.4 to 2.5 + validate_image_aspect_ratio(image, (2, 5), (5, 2), strict=False) # 0.4 to 2.5 image_urls = await upload_images_to_comfyapi(cls, images, max_images=4, mime_type="image/png") prompt = ( diff --git a/comfy_api_nodes/nodes_ideogram.py b/comfy_api_nodes/nodes_ideogram.py index d8fd3378b..48f94e612 100644 --- a/comfy_api_nodes/nodes_ideogram.py +++ b/comfy_api_nodes/nodes_ideogram.py @@ -1,6 +1,6 @@ from io import BytesIO from typing_extensions import override -from comfy_api.latest import ComfyExtension, IO +from comfy_api.latest import IO, ComfyExtension from PIL import Image import numpy as np import torch @@ -11,19 +11,13 @@ from comfy_api_nodes.apis import ( IdeogramV3Request, IdeogramV3EditRequest, ) - -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.util import ( ApiEndpoint, - HttpMethod, - SynchronousOperation, -) - -from comfy_api_nodes.apinode_utils import ( - download_url_to_bytesio, + bytesio_to_image_tensor, + download_url_as_bytesio, resize_mask_to_image, + sync_op, ) -from comfy_api_nodes.util import bytesio_to_image_tensor -from server import PromptServer V1_V1_RES_MAP = { "Auto":"AUTO", @@ -220,7 +214,7 @@ async def download_and_process_images(image_urls): for image_url in image_urls: # Using functions from apinode_utils.py to handle downloading and processing - image_bytesio = await download_url_to_bytesio(image_url) # Download image content to BytesIO + image_bytesio = await download_url_as_bytesio(image_url) # Download image content to BytesIO img_tensor = bytesio_to_image_tensor(image_bytesio, mode="RGB") # Convert to torch.Tensor with RGB mode image_tensors.append(img_tensor) @@ -233,19 +227,6 @@ async def download_and_process_images(image_urls): return stacked_tensors -def display_image_urls_on_node(image_urls, node_id): - if node_id and image_urls: - if len(image_urls) == 1: - PromptServer.instance.send_progress_text( - f"Generated Image URL:\n{image_urls[0]}", node_id - ) - else: - urls_text = "Generated Image URLs:\n" + "\n".join( - f"{i+1}. {url}" for i, url in enumerate(image_urls) - ) - PromptServer.instance.send_progress_text(urls_text, node_id) - - class IdeogramV1(IO.ComfyNode): @classmethod @@ -334,44 +315,30 @@ class IdeogramV1(IO.ComfyNode): aspect_ratio = V1_V2_RATIO_MAP.get(aspect_ratio, None) model = "V_1_TURBO" if turbo else "V_1" - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/ideogram/generate", - method=HttpMethod.POST, - request_model=IdeogramGenerateRequest, - response_model=IdeogramGenerateResponse, - ), - request=IdeogramGenerateRequest( + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/ideogram/generate", method="POST"), + response_model=IdeogramGenerateResponse, + data=IdeogramGenerateRequest( image_request=ImageRequest( prompt=prompt, model=model, num_images=num_images, seed=seed, aspect_ratio=aspect_ratio if aspect_ratio != "ASPECT_1_1" else None, - magic_prompt_option=( - magic_prompt_option if magic_prompt_option != "AUTO" else None - ), + magic_prompt_option=(magic_prompt_option if magic_prompt_option != "AUTO" else None), negative_prompt=negative_prompt if negative_prompt else None, ) ), - auth_kwargs=auth, + max_retries=1, ) - response = await operation.execute() - if not response.data or len(response.data) == 0: raise Exception("No images were generated in the response") image_urls = [image_data.url for image_data in response.data if image_data.url] - if not image_urls: raise Exception("No image URLs were generated in the response") - - display_image_urls_on_node(image_urls, cls.hidden.unique_id) return IO.NodeOutput(await download_and_process_images(image_urls)) @@ -500,18 +467,11 @@ class IdeogramV2(IO.ComfyNode): else: final_aspect_ratio = aspect_ratio if aspect_ratio != "ASPECT_1_1" else None - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/ideogram/generate", - method=HttpMethod.POST, - request_model=IdeogramGenerateRequest, - response_model=IdeogramGenerateResponse, - ), - request=IdeogramGenerateRequest( + response = await sync_op( + cls, + endpoint=ApiEndpoint(path="/proxy/ideogram/generate", method="POST"), + response_model=IdeogramGenerateResponse, + data=IdeogramGenerateRequest( image_request=ImageRequest( prompt=prompt, model=model, @@ -519,28 +479,20 @@ class IdeogramV2(IO.ComfyNode): seed=seed, aspect_ratio=final_aspect_ratio, resolution=final_resolution, - magic_prompt_option=( - magic_prompt_option if magic_prompt_option != "AUTO" else None - ), + magic_prompt_option=(magic_prompt_option if magic_prompt_option != "AUTO" else None), style_type=style_type if style_type != "NONE" else None, negative_prompt=negative_prompt if negative_prompt else None, color_palette=color_palette if color_palette else None, ) ), - auth_kwargs=auth, + max_retries=1, ) - - response = await operation.execute() - if not response.data or len(response.data) == 0: raise Exception("No images were generated in the response") image_urls = [image_data.url for image_data in response.data if image_data.url] - if not image_urls: raise Exception("No image URLs were generated in the response") - - display_image_urls_on_node(image_urls, cls.hidden.unique_id) return IO.NodeOutput(await download_and_process_images(image_urls)) @@ -656,10 +608,6 @@ class IdeogramV3(IO.ComfyNode): character_image=None, character_mask=None, ): - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } if rendering_speed == "BALANCED": # for backward compatibility rendering_speed = "DEFAULT" @@ -694,9 +642,6 @@ class IdeogramV3(IO.ComfyNode): # Check if both image and mask are provided for editing mode if image is not None and mask is not None: - # Edit mode - path = "/proxy/ideogram/ideogram-v3/edit" - # Process image and mask input_tensor = image.squeeze().cpu() # Resize mask to match image dimension @@ -749,27 +694,20 @@ class IdeogramV3(IO.ComfyNode): if character_mask_binary: files["character_mask_binary"] = character_mask_binary - # Execute the operation for edit mode - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=path, - method=HttpMethod.POST, - request_model=IdeogramV3EditRequest, - response_model=IdeogramGenerateResponse, - ), - request=edit_request, + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/ideogram/ideogram-v3/edit", method="POST"), + response_model=IdeogramGenerateResponse, + data=edit_request, files=files, content_type="multipart/form-data", - auth_kwargs=auth, + max_retries=1, ) elif image is not None or mask is not None: # If only one of image or mask is provided, raise an error raise Exception("Ideogram V3 image editing requires both an image AND a mask") else: - # Generation mode - path = "/proxy/ideogram/ideogram-v3/generate" - # Create generation request gen_request = IdeogramV3Request( prompt=prompt, @@ -800,32 +738,22 @@ class IdeogramV3(IO.ComfyNode): if files: gen_request.style_type = "AUTO" - # Execute the operation for generation mode - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=path, - method=HttpMethod.POST, - request_model=IdeogramV3Request, - response_model=IdeogramGenerateResponse, - ), - request=gen_request, + response = await sync_op( + cls, + endpoint=ApiEndpoint(path="/proxy/ideogram/ideogram-v3/generate", method="POST"), + response_model=IdeogramGenerateResponse, + data=gen_request, files=files if files else None, content_type="multipart/form-data", - auth_kwargs=auth, + max_retries=1, ) - # Execute the operation and process response - response = await operation.execute() - if not response.data or len(response.data) == 0: raise Exception("No images were generated in the response") image_urls = [image_data.url for image_data in response.data if image_data.url] - if not image_urls: raise Exception("No image URLs were generated in the response") - - display_image_urls_on_node(image_urls, cls.hidden.unique_id) return IO.NodeOutput(await download_and_process_images(image_urls)) @@ -838,5 +766,6 @@ class IdeogramExtension(ComfyExtension): IdeogramV3, ] + async def comfy_entrypoint() -> IdeogramExtension: return IdeogramExtension() diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index eea65c9ac..7b23e9cf9 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -282,7 +282,7 @@ def validate_input_image(image: torch.Tensor) -> None: See: https://app.klingai.com/global/dev/document-api/apiReference/model/imageToVideo """ validate_image_dimensions(image, min_width=300, min_height=300) - validate_image_aspect_ratio(image, min_aspect_ratio=1 / 2.5, max_aspect_ratio=2.5) + validate_image_aspect_ratio(image, (1, 2.5), (2.5, 1)) def get_video_from_response(response) -> KlingVideoResult: diff --git a/comfy_api_nodes/nodes_pixverse.py b/comfy_api_nodes/nodes_pixverse.py index b2b841be8..6e1686af0 100644 --- a/comfy_api_nodes/nodes_pixverse.py +++ b/comfy_api_nodes/nodes_pixverse.py @@ -1,7 +1,6 @@ -from inspect import cleandoc -from typing import Optional +import torch from typing_extensions import override -from io import BytesIO +from comfy_api.latest import IO, ComfyExtension from comfy_api_nodes.apis.pixverse_api import ( PixverseTextVideoRequest, PixverseImageVideoRequest, @@ -17,53 +16,30 @@ from comfy_api_nodes.apis.pixverse_api import ( PixverseIO, pixverse_templates, ) -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.util import ( ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, - EmptyRequest, + download_url_to_video_output, + poll_op, + sync_op, + tensor_to_bytesio, + validate_string, ) -from comfy_api_nodes.util import validate_string, tensor_to_bytesio -from comfy_api.input_impl import VideoFromFile -from comfy_api.latest import ComfyExtension, IO - -import torch -import aiohttp - AVERAGE_DURATION_T2V = 32 AVERAGE_DURATION_I2V = 30 AVERAGE_DURATION_T2T = 52 -def get_video_url_from_response( - response: PixverseGenerationStatusResponse, -) -> Optional[str]: - if response.Resp is None or response.Resp.url is None: - return None - return str(response.Resp.url) - - -async def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None): - # first, upload image to Pixverse and get image id to use in actual generation call - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/pixverse/image/upload", - method=HttpMethod.POST, - request_model=EmptyRequest, - response_model=PixverseImageUploadResponse, - ), - request=EmptyRequest(), +async def upload_image_to_pixverse(cls: type[IO.ComfyNode], image: torch.Tensor): + response_upload = await sync_op( + cls, + ApiEndpoint(path="/proxy/pixverse/image/upload", method="POST"), + response_model=PixverseImageUploadResponse, files={"image": tensor_to_bytesio(image)}, content_type="multipart/form-data", - auth_kwargs=auth_kwargs, ) - response_upload: PixverseImageUploadResponse = await operation.execute() - if response_upload.Resp is None: raise Exception(f"PixVerse image upload request failed: '{response_upload.ErrMsg}'") - return response_upload.Resp.img_id @@ -93,17 +69,13 @@ class PixverseTemplateNode(IO.ComfyNode): class PixverseTextToVideoNode(IO.ComfyNode): - """ - Generates videos based on prompt and output_size. - """ - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="PixverseTextToVideoNode", display_name="PixVerse Text to Video", category="api node/video/PixVerse", - description=cleandoc(cls.__doc__ or ""), + description="Generates videos based on prompt and output_size.", inputs=[ IO.String.Input( "prompt", @@ -170,7 +142,7 @@ class PixverseTextToVideoNode(IO.ComfyNode): negative_prompt: str = None, pixverse_template: int = None, ) -> IO.NodeOutput: - validate_string(prompt, strip_whitespace=False) + validate_string(prompt, strip_whitespace=False, min_length=1) # 1080p is limited to 5 seconds duration # only normal motion_mode supported for 1080p or for non-5 second duration if quality == PixverseQuality.res_1080p: @@ -179,18 +151,11 @@ class PixverseTextToVideoNode(IO.ComfyNode): elif duration_seconds != PixverseDuration.dur_5: motion_mode = PixverseMotionMode.normal - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/pixverse/video/text/generate", - method=HttpMethod.POST, - request_model=PixverseTextVideoRequest, - response_model=PixverseVideoResponse, - ), - request=PixverseTextVideoRequest( + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/pixverse/video/text/generate", method="POST"), + response_model=PixverseVideoResponse, + data=PixverseTextVideoRequest( prompt=prompt, aspect_ratio=aspect_ratio, quality=quality, @@ -200,20 +165,14 @@ class PixverseTextToVideoNode(IO.ComfyNode): template_id=pixverse_template, seed=seed, ), - auth_kwargs=auth, ) - response_api = await operation.execute() - if response_api.Resp is None: raise Exception(f"PixVerse request failed: '{response_api.ErrMsg}'") - operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"/proxy/pixverse/video/result/{response_api.Resp.video_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=PixverseGenerationStatusResponse, - ), + response_poll = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/pixverse/video/result/{response_api.Resp.video_id}"), + response_model=PixverseGenerationStatusResponse, completed_statuses=[PixverseStatus.successful], failed_statuses=[ PixverseStatus.contents_moderation, @@ -221,30 +180,19 @@ class PixverseTextToVideoNode(IO.ComfyNode): PixverseStatus.deleted, ], status_extractor=lambda x: x.Resp.status, - auth_kwargs=auth, - node_id=cls.hidden.unique_id, - result_url_extractor=get_video_url_from_response, estimated_duration=AVERAGE_DURATION_T2V, ) - response_poll = await operation.execute() - - async with aiohttp.ClientSession() as session: - async with session.get(response_poll.Resp.url) as vid_response: - return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) + return IO.NodeOutput(await download_url_to_video_output(response_poll.Resp.url)) class PixverseImageToVideoNode(IO.ComfyNode): - """ - Generates videos based on prompt and output_size. - """ - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="PixverseImageToVideoNode", display_name="PixVerse Image to Video", category="api node/video/PixVerse", - description=cleandoc(cls.__doc__ or ""), + description="Generates videos based on prompt and output_size.", inputs=[ IO.Image.Input("image"), IO.String.Input( @@ -309,11 +257,7 @@ class PixverseImageToVideoNode(IO.ComfyNode): pixverse_template: int = None, ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - img_id = await upload_image_to_pixverse(image, auth_kwargs=auth) + img_id = await upload_image_to_pixverse(cls, image) # 1080p is limited to 5 seconds duration # only normal motion_mode supported for 1080p or for non-5 second duration @@ -323,14 +267,11 @@ class PixverseImageToVideoNode(IO.ComfyNode): elif duration_seconds != PixverseDuration.dur_5: motion_mode = PixverseMotionMode.normal - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/pixverse/video/img/generate", - method=HttpMethod.POST, - request_model=PixverseImageVideoRequest, - response_model=PixverseVideoResponse, - ), - request=PixverseImageVideoRequest( + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/pixverse/video/img/generate", method="POST"), + response_model=PixverseVideoResponse, + data=PixverseImageVideoRequest( img_id=img_id, prompt=prompt, quality=quality, @@ -340,20 +281,15 @@ class PixverseImageToVideoNode(IO.ComfyNode): template_id=pixverse_template, seed=seed, ), - auth_kwargs=auth, ) - response_api = await operation.execute() if response_api.Resp is None: raise Exception(f"PixVerse request failed: '{response_api.ErrMsg}'") - operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"/proxy/pixverse/video/result/{response_api.Resp.video_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=PixverseGenerationStatusResponse, - ), + response_poll = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/pixverse/video/result/{response_api.Resp.video_id}"), + response_model=PixverseGenerationStatusResponse, completed_statuses=[PixverseStatus.successful], failed_statuses=[ PixverseStatus.contents_moderation, @@ -361,30 +297,19 @@ class PixverseImageToVideoNode(IO.ComfyNode): PixverseStatus.deleted, ], status_extractor=lambda x: x.Resp.status, - auth_kwargs=auth, - node_id=cls.hidden.unique_id, - result_url_extractor=get_video_url_from_response, estimated_duration=AVERAGE_DURATION_I2V, ) - response_poll = await operation.execute() - - async with aiohttp.ClientSession() as session: - async with session.get(response_poll.Resp.url) as vid_response: - return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) + return IO.NodeOutput(await download_url_to_video_output(response_poll.Resp.url)) class PixverseTransitionVideoNode(IO.ComfyNode): - """ - Generates videos based on prompt and output_size. - """ - @classmethod def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="PixverseTransitionVideoNode", display_name="PixVerse Transition Video", category="api node/video/PixVerse", - description=cleandoc(cls.__doc__ or ""), + description="Generates videos based on prompt and output_size.", inputs=[ IO.Image.Input("first_frame"), IO.Image.Input("last_frame"), @@ -445,12 +370,8 @@ class PixverseTransitionVideoNode(IO.ComfyNode): negative_prompt: str = None, ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - first_frame_id = await upload_image_to_pixverse(first_frame, auth_kwargs=auth) - last_frame_id = await upload_image_to_pixverse(last_frame, auth_kwargs=auth) + first_frame_id = await upload_image_to_pixverse(cls, first_frame) + last_frame_id = await upload_image_to_pixverse(cls, last_frame) # 1080p is limited to 5 seconds duration # only normal motion_mode supported for 1080p or for non-5 second duration @@ -460,14 +381,11 @@ class PixverseTransitionVideoNode(IO.ComfyNode): elif duration_seconds != PixverseDuration.dur_5: motion_mode = PixverseMotionMode.normal - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/pixverse/video/transition/generate", - method=HttpMethod.POST, - request_model=PixverseTransitionVideoRequest, - response_model=PixverseVideoResponse, - ), - request=PixverseTransitionVideoRequest( + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/pixverse/video/transition/generate", method="POST"), + response_model=PixverseVideoResponse, + data=PixverseTransitionVideoRequest( first_frame_img=first_frame_id, last_frame_img=last_frame_id, prompt=prompt, @@ -477,20 +395,15 @@ class PixverseTransitionVideoNode(IO.ComfyNode): negative_prompt=negative_prompt if negative_prompt else None, seed=seed, ), - auth_kwargs=auth, ) - response_api = await operation.execute() if response_api.Resp is None: raise Exception(f"PixVerse request failed: '{response_api.ErrMsg}'") - operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"/proxy/pixverse/video/result/{response_api.Resp.video_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=PixverseGenerationStatusResponse, - ), + response_poll = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/pixverse/video/result/{response_api.Resp.video_id}"), + response_model=PixverseGenerationStatusResponse, completed_statuses=[PixverseStatus.successful], failed_statuses=[ PixverseStatus.contents_moderation, @@ -498,16 +411,9 @@ class PixverseTransitionVideoNode(IO.ComfyNode): PixverseStatus.deleted, ], status_extractor=lambda x: x.Resp.status, - auth_kwargs=auth, - node_id=cls.hidden.unique_id, - result_url_extractor=get_video_url_from_response, estimated_duration=AVERAGE_DURATION_T2V, ) - response_poll = await operation.execute() - - async with aiohttp.ClientSession() as session: - async with session.get(response_poll.Resp.url) as vid_response: - return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) + return IO.NodeOutput(await download_url_to_video_output(response_poll.Resp.url)) class PixVerseExtension(ComfyExtension): diff --git a/comfy_api_nodes/nodes_recraft.py b/comfy_api_nodes/nodes_recraft.py index dee186cd6..e3440b946 100644 --- a/comfy_api_nodes/nodes_recraft.py +++ b/comfy_api_nodes/nodes_recraft.py @@ -8,9 +8,6 @@ from typing_extensions import override from comfy.utils import ProgressBar from comfy_api.latest import IO, ComfyExtension -from comfy_api_nodes.apinode_utils import ( - resize_mask_to_image, -) from comfy_api_nodes.apis.recraft_api import ( RecraftColor, RecraftColorChain, @@ -28,6 +25,7 @@ from comfy_api_nodes.util import ( ApiEndpoint, bytesio_to_image_tensor, download_url_as_bytesio, + resize_mask_to_image, sync_op, tensor_to_bytesio, validate_string, diff --git a/comfy_api_nodes/nodes_runway.py b/comfy_api_nodes/nodes_runway.py index 0543d1d0e..2fdafbbfe 100644 --- a/comfy_api_nodes/nodes_runway.py +++ b/comfy_api_nodes/nodes_runway.py @@ -200,7 +200,7 @@ class RunwayImageToVideoNodeGen3a(IO.ComfyNode): ) -> IO.NodeOutput: validate_string(prompt, min_length=1) validate_image_dimensions(start_frame, max_width=7999, max_height=7999) - validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) + validate_image_aspect_ratio(start_frame, (1, 2), (2, 1)) download_urls = await upload_images_to_comfyapi( cls, @@ -290,7 +290,7 @@ class RunwayImageToVideoNodeGen4(IO.ComfyNode): ) -> IO.NodeOutput: validate_string(prompt, min_length=1) validate_image_dimensions(start_frame, max_width=7999, max_height=7999) - validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) + validate_image_aspect_ratio(start_frame, (1, 2), (2, 1)) download_urls = await upload_images_to_comfyapi( cls, @@ -390,8 +390,8 @@ class RunwayFirstLastFrameNode(IO.ComfyNode): validate_string(prompt, min_length=1) validate_image_dimensions(start_frame, max_width=7999, max_height=7999) validate_image_dimensions(end_frame, max_width=7999, max_height=7999) - validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) - validate_image_aspect_ratio(end_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) + validate_image_aspect_ratio(start_frame, (1, 2), (2, 1)) + validate_image_aspect_ratio(end_frame, (1, 2), (2, 1)) stacked_input_images = image_tensor_pair_to_batch(start_frame, end_frame) download_urls = await upload_images_to_comfyapi( @@ -475,7 +475,7 @@ class RunwayTextToImageNode(IO.ComfyNode): reference_images = None if reference_image is not None: validate_image_dimensions(reference_image, max_width=7999, max_height=7999) - validate_image_aspect_ratio(reference_image, min_aspect_ratio=0.5, max_aspect_ratio=2.0) + validate_image_aspect_ratio(reference_image, (1, 2), (2, 1)) download_urls = await upload_images_to_comfyapi( cls, reference_image, diff --git a/comfy_api_nodes/nodes_vidu.py b/comfy_api_nodes/nodes_vidu.py index 0e0572f8c..7a679f0d9 100644 --- a/comfy_api_nodes/nodes_vidu.py +++ b/comfy_api_nodes/nodes_vidu.py @@ -14,9 +14,9 @@ from comfy_api_nodes.util import ( poll_op, sync_op, upload_images_to_comfyapi, - validate_aspect_ratio_closeness, - validate_image_aspect_ratio_range, + validate_image_aspect_ratio, validate_image_dimensions, + validate_images_aspect_ratio_closeness, ) VIDU_TEXT_TO_VIDEO = "/proxy/vidu/text2video" @@ -114,7 +114,7 @@ async def execute_task( cls, ApiEndpoint(path=VIDU_GET_GENERATION_STATUS % response.task_id), response_model=TaskStatusResponse, - status_extractor=lambda r: r.state.value, + status_extractor=lambda r: r.state, estimated_duration=estimated_duration, ) @@ -307,7 +307,7 @@ class ViduImageToVideoNode(IO.ComfyNode): ) -> IO.NodeOutput: if get_number_of_images(image) > 1: raise ValueError("Only one input image is allowed.") - validate_image_aspect_ratio_range(image, (1, 4), (4, 1)) + validate_image_aspect_ratio(image, (1, 4), (4, 1)) payload = TaskCreationRequest( model_name=model, prompt=prompt, @@ -423,7 +423,7 @@ class ViduReferenceVideoNode(IO.ComfyNode): if a > 7: raise ValueError("Too many images, maximum allowed is 7.") for image in images: - validate_image_aspect_ratio_range(image, (1, 4), (4, 1)) + validate_image_aspect_ratio(image, (1, 4), (4, 1)) validate_image_dimensions(image, min_width=128, min_height=128) payload = TaskCreationRequest( model_name=model, @@ -533,7 +533,7 @@ class ViduStartEndToVideoNode(IO.ComfyNode): resolution: str, movement_amplitude: str, ) -> IO.NodeOutput: - validate_aspect_ratio_closeness(first_frame, end_frame, min_rel=0.8, max_rel=1.25, strict=False) + validate_images_aspect_ratio_closeness(first_frame, end_frame, min_rel=0.8, max_rel=1.25, strict=False) payload = TaskCreationRequest( model_name=model, prompt=prompt, diff --git a/comfy_api_nodes/util/__init__.py b/comfy_api_nodes/util/__init__.py index 0cca2b59b..bbc71363a 100644 --- a/comfy_api_nodes/util/__init__.py +++ b/comfy_api_nodes/util/__init__.py @@ -14,6 +14,7 @@ from .conversions import ( downscale_image_tensor, image_tensor_pair_to_batch, pil_to_bytesio, + resize_mask_to_image, tensor_to_base64_string, tensor_to_bytesio, tensor_to_pil, @@ -34,12 +35,12 @@ from .upload_helpers import ( ) from .validation_utils import ( get_number_of_images, - validate_aspect_ratio_closeness, + validate_aspect_ratio_string, validate_audio_duration, validate_container_format_is_mp4, validate_image_aspect_ratio, - validate_image_aspect_ratio_range, validate_image_dimensions, + validate_images_aspect_ratio_closeness, validate_string, validate_video_dimensions, validate_video_duration, @@ -70,6 +71,7 @@ __all__ = [ "downscale_image_tensor", "image_tensor_pair_to_batch", "pil_to_bytesio", + "resize_mask_to_image", "tensor_to_base64_string", "tensor_to_bytesio", "tensor_to_pil", @@ -77,12 +79,12 @@ __all__ = [ "video_to_base64_string", # Validation utilities "get_number_of_images", - "validate_aspect_ratio_closeness", + "validate_aspect_ratio_string", "validate_audio_duration", "validate_container_format_is_mp4", "validate_image_aspect_ratio", - "validate_image_aspect_ratio_range", "validate_image_dimensions", + "validate_images_aspect_ratio_closeness", "validate_string", "validate_video_dimensions", "validate_video_duration", diff --git a/comfy_api_nodes/util/conversions.py b/comfy_api_nodes/util/conversions.py index 9f4c90c5c..b59c2bd84 100644 --- a/comfy_api_nodes/util/conversions.py +++ b/comfy_api_nodes/util/conversions.py @@ -430,3 +430,24 @@ def audio_bytes_to_audio_input(audio_bytes: bytes) -> dict: wav = torch.cat(frames, dim=1) # [C, T] wav = _f32_pcm(wav) return {"waveform": wav.unsqueeze(0).contiguous(), "sample_rate": out_sr} + + +def resize_mask_to_image( + mask: torch.Tensor, + image: torch.Tensor, + upscale_method="nearest-exact", + crop="disabled", + allow_gradient=True, + add_channel_dim=False, +): + """Resize mask to be the same dimensions as an image, while maintaining proper format for API calls.""" + _, height, width, _ = image.shape + mask = mask.unsqueeze(-1) + mask = mask.movedim(-1, 1) + mask = common_upscale(mask, width=width, height=height, upscale_method=upscale_method, crop=crop) + mask = mask.movedim(1, -1) + if not add_channel_dim: + mask = mask.squeeze(-1) + if not allow_gradient: + mask = (mask > 0.5).float() + return mask diff --git a/comfy_api_nodes/util/validation_utils.py b/comfy_api_nodes/util/validation_utils.py index 22da05bc1..ec7006aed 100644 --- a/comfy_api_nodes/util/validation_utils.py +++ b/comfy_api_nodes/util/validation_utils.py @@ -37,63 +37,62 @@ def validate_image_dimensions( def validate_image_aspect_ratio( image: torch.Tensor, - min_aspect_ratio: Optional[float] = None, - max_aspect_ratio: Optional[float] = None, -): - width, height = get_image_dimensions(image) - aspect_ratio = width / height - - if min_aspect_ratio is not None and aspect_ratio < min_aspect_ratio: - raise ValueError(f"Image aspect ratio must be at least {min_aspect_ratio}, got {aspect_ratio}") - if max_aspect_ratio is not None and aspect_ratio > max_aspect_ratio: - raise ValueError(f"Image aspect ratio must be at most {max_aspect_ratio}, got {aspect_ratio}") - - -def validate_image_aspect_ratio_range( - image: torch.Tensor, - min_ratio: tuple[float, float], # e.g. (1, 4) - max_ratio: tuple[float, float], # e.g. (4, 1) + min_ratio: Optional[tuple[float, float]] = None, # e.g. (1, 4) + max_ratio: Optional[tuple[float, float]] = None, # e.g. (4, 1) *, strict: bool = True, # True -> (min, max); False -> [min, max] ) -> float: - a1, b1 = min_ratio - a2, b2 = max_ratio - if a1 <= 0 or b1 <= 0 or a2 <= 0 or b2 <= 0: - raise ValueError("Ratios must be positive, like (1, 4) or (4, 1).") - lo, hi = (a1 / b1), (a2 / b2) - if lo > hi: - lo, hi = hi, lo - a1, b1, a2, b2 = a2, b2, a1, b1 # swap only for error text + """Validates that image aspect ratio is within min and max. If a bound is None, that side is not checked.""" w, h = get_image_dimensions(image) if w <= 0 or h <= 0: raise ValueError(f"Invalid image dimensions: {w}x{h}") ar = w / h - ok = (lo < ar < hi) if strict else (lo <= ar <= hi) - if not ok: - op = "<" if strict else "≤" - raise ValueError(f"Image aspect ratio {ar:.6g} is outside allowed range: {a1}:{b1} {op} ratio {op} {a2}:{b2}") + _assert_ratio_bounds(ar, min_ratio=min_ratio, max_ratio=max_ratio, strict=strict) return ar -def validate_aspect_ratio_closeness( - start_img, - end_img, - min_rel: float, - max_rel: float, +def validate_images_aspect_ratio_closeness( + first_image: torch.Tensor, + second_image: torch.Tensor, + min_rel: float, # e.g. 0.8 + max_rel: float, # e.g. 1.25 *, - strict: bool = False, # True => exclusive, False => inclusive -) -> None: - w1, h1 = get_image_dimensions(start_img) - w2, h2 = get_image_dimensions(end_img) + strict: bool = False, # True -> (min, max); False -> [min, max] +) -> float: + """ + Validates that the two images' aspect ratios are 'close'. + The closeness factor is C = max(ar1, ar2) / min(ar1, ar2) (C >= 1). + We require C <= limit, where limit = max(max_rel, 1.0 / min_rel). + + Returns the computed closeness factor C. + """ + w1, h1 = get_image_dimensions(first_image) + w2, h2 = get_image_dimensions(second_image) if min(w1, h1, w2, h2) <= 0: raise ValueError("Invalid image dimensions") ar1 = w1 / h1 ar2 = w2 / h2 - # Normalize so it is symmetric (no need to check both ar1/ar2 and ar2/ar1) closeness = max(ar1, ar2) / min(ar1, ar2) - limit = max(max_rel, 1.0 / min_rel) # for 0.8..1.25 this is 1.25 + limit = max(max_rel, 1.0 / min_rel) if (closeness >= limit) if strict else (closeness > limit): - raise ValueError(f"Aspect ratios must be close: start/end={ar1/ar2:.4f}, allowed range {min_rel}–{max_rel}.") + raise ValueError( + f"Aspect ratios must be close: ar1/ar2={ar1/ar2:.2g}, " + f"allowed range {min_rel}–{max_rel} (limit {limit:.2g})." + ) + return closeness + + +def validate_aspect_ratio_string( + aspect_ratio: str, + min_ratio: Optional[tuple[float, float]] = None, # e.g. (1, 4) + max_ratio: Optional[tuple[float, float]] = None, # e.g. (4, 1) + *, + strict: bool = False, # True -> (min, max); False -> [min, max] +) -> float: + """Parses 'X:Y' and validates it against optional bounds. Returns the numeric ratio.""" + ar = _parse_aspect_ratio_string(aspect_ratio) + _assert_ratio_bounds(ar, min_ratio=min_ratio, max_ratio=max_ratio, strict=strict) + return ar def validate_video_dimensions( @@ -183,3 +182,49 @@ def validate_container_format_is_mp4(video: VideoInput) -> None: container_format = video.get_container_format() if container_format not in ["mp4", "mov,mp4,m4a,3gp,3g2,mj2"]: raise ValueError(f"Only MP4 container format supported. Got: {container_format}") + + +def _ratio_from_tuple(r: tuple[float, float]) -> float: + a, b = r + if a <= 0 or b <= 0: + raise ValueError(f"Ratios must be positive, got {a}:{b}.") + return a / b + + +def _assert_ratio_bounds( + ar: float, + *, + min_ratio: Optional[tuple[float, float]] = None, + max_ratio: Optional[tuple[float, float]] = None, + strict: bool = True, +) -> None: + """Validate a numeric aspect ratio against optional min/max ratio bounds.""" + lo = _ratio_from_tuple(min_ratio) if min_ratio is not None else None + hi = _ratio_from_tuple(max_ratio) if max_ratio is not None else None + + if lo is not None and hi is not None and lo > hi: + lo, hi = hi, lo # normalize order if caller swapped them + + if lo is not None: + if (ar <= lo) if strict else (ar < lo): + op = "<" if strict else "≤" + raise ValueError(f"Aspect ratio `{ar:.2g}` must be {op} {lo:.2g}.") + if hi is not None: + if (ar >= hi) if strict else (ar > hi): + op = "<" if strict else "≤" + raise ValueError(f"Aspect ratio `{ar:.2g}` must be {op} {hi:.2g}.") + + +def _parse_aspect_ratio_string(ar_str: str) -> float: + """Parse 'X:Y' with integer parts into a positive float ratio X/Y.""" + parts = ar_str.split(":") + if len(parts) != 2: + raise ValueError(f"Aspect ratio must be 'X:Y' (e.g., 16:9), got '{ar_str}'.") + try: + a = int(parts[0].strip()) + b = int(parts[1].strip()) + except ValueError as exc: + raise ValueError(f"Aspect ratio must contain integers separated by ':', got '{ar_str}'.") from exc + if a <= 0 or b <= 0: + raise ValueError(f"Aspect ratio parts must be positive integers, got {a}:{b}.") + return a / b From dfac94695be95076d8028d04005a744f3ec0de8d Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 30 Oct 2025 19:22:35 +0200 Subject: [PATCH 0819/1073] fix img2img operation in Dall2 node (#10552) --- comfy_api_nodes/nodes_openai.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index c467e840c..b4568fc85 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -225,7 +225,7 @@ class OpenAIDalle2(ComfyNodeABC): ), files=( { - "image": img_binary, + "image": ("image.png", img_binary, "image/png"), } if img_binary else None From 513b0c46fba3bf40191d684ff81207ad935f1717 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Fri, 31 Oct 2025 07:39:02 +1000 Subject: [PATCH 0820/1073] Add RAM Pressure cache mode (#10454) * execution: Roll the UI cache into the outputs Currently the UI cache is parallel to the output cache with expectations of being a content superset of the output cache. At the same time the UI and output cache are maintained completely seperately, making it awkward to free the output cache content without changing the behaviour of the UI cache. There are two actual users (getters) of the UI cache. The first is the case of a direct content hit on the output cache when executing a node. This case is very naturally handled by merging the UI and outputs cache. The second case is the history JSON generation at the end of the prompt. This currently works by asking the cache for all_node_ids and then pulling the cache contents for those nodes. all_node_ids is the nodes of the dynamic prompt. So fold the UI cache into the output cache. The current UI cache setter now writes to a prompt-scope dict. When the output cache is set, just get this value from the dict and tuple up with the outputs. When generating the history, simply iterate prompt-scope dict. This prepares support for more complex caching strategies (like RAM pressure caching) where less than 1 workflow will be cached and it will be desirable to keep the UI cache and output cache in sync. * sd: Implement RAM getter for VAE * model_patcher: Implement RAM getter for ModelPatcher * sd: Implement RAM getter for CLIP * Implement RAM Pressure cache Implement a cache sensitive to RAM pressure. When RAM headroom drops down below a certain threshold, evict RAM-expensive nodes from the cache. Models and tensors are measured directly for RAM usage. An OOM score is then computed based on the RAM usage of the node. Note the due to indirection through shared objects (like a model patcher), multiple nodes can account the same RAM as their individual usage. The intent is this will free chains of nodes particularly model loaders and associate loras as they all score similar and are sorted in close to each other. Has a bias towards unloading model nodes mid flow while being able to keep results like text encodings and VAE. * execution: Convert the cache entry to NamedTuple As commented in review. Convert this to a named tuple and abstract away the tuple type completely from graph.py. --- comfy/cli_args.py | 1 + comfy/model_patcher.py | 3 ++ comfy/sd.py | 14 +++++++ comfy_execution/caching.py | 83 ++++++++++++++++++++++++++++++++++++++ comfy_execution/graph.py | 9 ++++- execution.py | 81 +++++++++++++++++++++---------------- main.py | 4 +- 7 files changed, 157 insertions(+), 38 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 001abd843..3d5bc7c90 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -105,6 +105,7 @@ cache_group = parser.add_mutually_exclusive_group() cache_group.add_argument("--cache-classic", action="store_true", help="Use the old style (aggressive) caching.") cache_group.add_argument("--cache-lru", type=int, default=0, help="Use LRU caching with a maximum of N node results cached. May use more RAM/VRAM.") cache_group.add_argument("--cache-none", action="store_true", help="Reduced RAM/VRAM usage at the expense of executing every node for each run.") +cache_group.add_argument("--cache-ram", nargs='?', const=4.0, type=float, default=0, help="Use RAM pressure caching with the specified headroom threshold. If available RAM drops below the threhold the cache remove large items to free RAM. Default 4GB") attn_group = parser.add_mutually_exclusive_group() attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization. Ignored when xformers is used.") diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index ed3f3f5cb..674a214ca 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -276,6 +276,9 @@ class ModelPatcher: self.size = comfy.model_management.module_size(self.model) return self.size + def get_ram_usage(self): + return self.model_size() + def loaded_size(self): return self.model.model_loaded_weight_memory diff --git a/comfy/sd.py b/comfy/sd.py index de4eee96e..9e5ebbf15 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -143,6 +143,9 @@ class CLIP: n.apply_hooks_to_conds = self.apply_hooks_to_conds return n + def get_ram_usage(self): + return self.patcher.get_ram_usage() + def add_patches(self, patches, strength_patch=1.0, strength_model=1.0): return self.patcher.add_patches(patches, strength_patch, strength_model) @@ -293,6 +296,7 @@ class VAE: self.working_dtypes = [torch.bfloat16, torch.float32] self.disable_offload = False self.not_video = False + self.size = None self.downscale_index_formula = None self.upscale_index_formula = None @@ -595,6 +599,16 @@ class VAE: self.patcher = comfy.model_patcher.ModelPatcher(self.first_stage_model, load_device=self.device, offload_device=offload_device) logging.info("VAE load device: {}, offload device: {}, dtype: {}".format(self.device, offload_device, self.vae_dtype)) + self.model_size() + + def model_size(self): + if self.size is not None: + return self.size + self.size = comfy.model_management.module_size(self.first_stage_model) + return self.size + + def get_ram_usage(self): + return self.model_size() def throw_exception_if_invalid(self): if self.first_stage_model is None: diff --git a/comfy_execution/caching.py b/comfy_execution/caching.py index 566bc3f9c..b498f43e7 100644 --- a/comfy_execution/caching.py +++ b/comfy_execution/caching.py @@ -1,4 +1,9 @@ +import bisect +import gc import itertools +import psutil +import time +import torch from typing import Sequence, Mapping, Dict from comfy_execution.graph import DynamicPrompt from abc import ABC, abstractmethod @@ -188,6 +193,9 @@ class BasicCache: self._clean_cache() self._clean_subcaches() + def poll(self, **kwargs): + pass + def _set_immediate(self, node_id, value): assert self.initialized cache_key = self.cache_key_set.get_data_key(node_id) @@ -276,6 +284,9 @@ class NullCache: def clean_unused(self): pass + def poll(self, **kwargs): + pass + def get(self, node_id): return None @@ -336,3 +347,75 @@ class LRUCache(BasicCache): self._mark_used(child_id) self.children[cache_key].append(self.cache_key_set.get_data_key(child_id)) return self + + +#Iterating the cache for usage analysis might be expensive, so if we trigger make sure +#to take a chunk out to give breathing space on high-node / low-ram-per-node flows. + +RAM_CACHE_HYSTERESIS = 1.1 + +#This is kinda in GB but not really. It needs to be non-zero for the below heuristic +#and as long as Multi GB models dwarf this it will approximate OOM scoring OK + +RAM_CACHE_DEFAULT_RAM_USAGE = 0.1 + +#Exponential bias towards evicting older workflows so garbage will be taken out +#in constantly changing setups. + +RAM_CACHE_OLD_WORKFLOW_OOM_MULTIPLIER = 1.3 + +class RAMPressureCache(LRUCache): + + def __init__(self, key_class): + super().__init__(key_class, 0) + self.timestamps = {} + + def clean_unused(self): + self._clean_subcaches() + + def set(self, node_id, value): + self.timestamps[self.cache_key_set.get_data_key(node_id)] = time.time() + super().set(node_id, value) + + def get(self, node_id): + self.timestamps[self.cache_key_set.get_data_key(node_id)] = time.time() + return super().get(node_id) + + def poll(self, ram_headroom): + def _ram_gb(): + return psutil.virtual_memory().available / (1024**3) + + if _ram_gb() > ram_headroom: + return + gc.collect() + if _ram_gb() > ram_headroom: + return + + clean_list = [] + + for key, (outputs, _), in self.cache.items(): + oom_score = RAM_CACHE_OLD_WORKFLOW_OOM_MULTIPLIER ** (self.generation - self.used_generation[key]) + + ram_usage = RAM_CACHE_DEFAULT_RAM_USAGE + def scan_list_for_ram_usage(outputs): + nonlocal ram_usage + for output in outputs: + if isinstance(output, list): + scan_list_for_ram_usage(output) + elif isinstance(output, torch.Tensor) and output.device.type == 'cpu': + #score Tensors at a 50% discount for RAM usage as they are likely to + #be high value intermediates + ram_usage += (output.numel() * output.element_size()) * 0.5 + elif hasattr(output, "get_ram_usage"): + ram_usage += output.get_ram_usage() + scan_list_for_ram_usage(outputs) + + oom_score *= ram_usage + #In the case where we have no information on the node ram usage at all, + #break OOM score ties on the last touch timestamp (pure LRU) + bisect.insort(clean_list, (oom_score, self.timestamps[key], key)) + + while _ram_gb() < ram_headroom * RAM_CACHE_HYSTERESIS and clean_list: + _, _, key = clean_list.pop() + del self.cache[key] + gc.collect() diff --git a/comfy_execution/graph.py b/comfy_execution/graph.py index 341c9735d..0d811e354 100644 --- a/comfy_execution/graph.py +++ b/comfy_execution/graph.py @@ -209,10 +209,15 @@ class ExecutionList(TopologicalSort): self.execution_cache_listeners[from_node_id] = set() self.execution_cache_listeners[from_node_id].add(to_node_id) - def get_output_cache(self, from_node_id, to_node_id): + def get_cache(self, from_node_id, to_node_id): if not to_node_id in self.execution_cache: return None - return self.execution_cache[to_node_id].get(from_node_id) + value = self.execution_cache[to_node_id].get(from_node_id) + if value is None: + return None + #Write back to the main cache on touch. + self.output_cache.set(from_node_id, value) + return value def cache_update(self, node_id, value): if node_id in self.execution_cache_listeners: diff --git a/execution.py b/execution.py index 20e106213..17c77beab 100644 --- a/execution.py +++ b/execution.py @@ -21,6 +21,7 @@ from comfy_execution.caching import ( NullCache, HierarchicalCache, LRUCache, + RAMPressureCache, ) from comfy_execution.graph import ( DynamicPrompt, @@ -88,49 +89,56 @@ class IsChangedCache: return self.is_changed[node_id] +class CacheEntry(NamedTuple): + ui: dict + outputs: list + + class CacheType(Enum): CLASSIC = 0 LRU = 1 NONE = 2 + RAM_PRESSURE = 3 class CacheSet: - def __init__(self, cache_type=None, cache_size=None): + def __init__(self, cache_type=None, cache_args={}): if cache_type == CacheType.NONE: self.init_null_cache() logging.info("Disabling intermediate node cache.") + elif cache_type == CacheType.RAM_PRESSURE: + cache_ram = cache_args.get("ram", 16.0) + self.init_ram_cache(cache_ram) + logging.info("Using RAM pressure cache.") elif cache_type == CacheType.LRU: - if cache_size is None: - cache_size = 0 + cache_size = cache_args.get("lru", 0) self.init_lru_cache(cache_size) logging.info("Using LRU cache") else: self.init_classic_cache() - self.all = [self.outputs, self.ui, self.objects] + self.all = [self.outputs, self.objects] # Performs like the old cache -- dump data ASAP def init_classic_cache(self): self.outputs = HierarchicalCache(CacheKeySetInputSignature) - self.ui = HierarchicalCache(CacheKeySetInputSignature) self.objects = HierarchicalCache(CacheKeySetID) def init_lru_cache(self, cache_size): self.outputs = LRUCache(CacheKeySetInputSignature, max_size=cache_size) - self.ui = LRUCache(CacheKeySetInputSignature, max_size=cache_size) + self.objects = HierarchicalCache(CacheKeySetID) + + def init_ram_cache(self, min_headroom): + self.outputs = RAMPressureCache(CacheKeySetInputSignature) self.objects = HierarchicalCache(CacheKeySetID) def init_null_cache(self): self.outputs = NullCache() - #The UI cache is expected to be iterable at the end of each workflow - #so it must cache at least a full workflow. Use Heirachical - self.ui = HierarchicalCache(CacheKeySetInputSignature) self.objects = NullCache() def recursive_debug_dump(self): result = { "outputs": self.outputs.recursive_debug_dump(), - "ui": self.ui.recursive_debug_dump(), } return result @@ -157,14 +165,14 @@ def get_input_data(inputs, class_def, unique_id, execution_list=None, dynprompt= if execution_list is None: mark_missing() continue # This might be a lazily-evaluated input - cached_output = execution_list.get_output_cache(input_unique_id, unique_id) - if cached_output is None: + cached = execution_list.get_cache(input_unique_id, unique_id) + if cached is None or cached.outputs is None: mark_missing() continue - if output_index >= len(cached_output): + if output_index >= len(cached.outputs): mark_missing() continue - obj = cached_output[output_index] + obj = cached.outputs[output_index] input_data_all[x] = obj elif input_category is not None: input_data_all[x] = [input_data] @@ -393,7 +401,7 @@ def format_value(x): else: return str(x) -async def execute(server, dynprompt, caches, current_item, extra_data, executed, prompt_id, execution_list, pending_subgraph_results, pending_async_nodes): +async def execute(server, dynprompt, caches, current_item, extra_data, executed, prompt_id, execution_list, pending_subgraph_results, pending_async_nodes, ui_outputs): unique_id = current_item real_node_id = dynprompt.get_real_node_id(unique_id) display_node_id = dynprompt.get_display_node_id(unique_id) @@ -401,12 +409,15 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, inputs = dynprompt.get_node(unique_id)['inputs'] class_type = dynprompt.get_node(unique_id)['class_type'] class_def = nodes.NODE_CLASS_MAPPINGS[class_type] - if caches.outputs.get(unique_id) is not None: + cached = caches.outputs.get(unique_id) + if cached is not None: if server.client_id is not None: - cached_output = caches.ui.get(unique_id) or {} - server.send_sync("executed", { "node": unique_id, "display_node": display_node_id, "output": cached_output.get("output",None), "prompt_id": prompt_id }, server.client_id) + cached_ui = cached.ui or {} + server.send_sync("executed", { "node": unique_id, "display_node": display_node_id, "output": cached_ui.get("output",None), "prompt_id": prompt_id }, server.client_id) + if cached.ui is not None: + ui_outputs[unique_id] = cached.ui get_progress_state().finish_progress(unique_id) - execution_list.cache_update(unique_id, caches.outputs.get(unique_id)) + execution_list.cache_update(unique_id, cached) return (ExecutionResult.SUCCESS, None, None) input_data_all = None @@ -436,8 +447,8 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, for r in result: if is_link(r): source_node, source_output = r[0], r[1] - node_output = execution_list.get_output_cache(source_node, unique_id)[source_output] - for o in node_output: + node_cached = execution_list.get_cache(source_node, unique_id) + for o in node_cached.outputs[source_output]: resolved_output.append(o) else: @@ -507,7 +518,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, asyncio.create_task(await_completion()) return (ExecutionResult.PENDING, None, None) if len(output_ui) > 0: - caches.ui.set(unique_id, { + ui_outputs[unique_id] = { "meta": { "node_id": unique_id, "display_node": display_node_id, @@ -515,7 +526,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, "real_node_id": real_node_id, }, "output": output_ui - }) + } if server.client_id is not None: server.send_sync("executed", { "node": unique_id, "display_node": display_node_id, "output": output_ui, "prompt_id": prompt_id }, server.client_id) if has_subgraph: @@ -554,8 +565,9 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, pending_subgraph_results[unique_id] = cached_outputs return (ExecutionResult.PENDING, None, None) - caches.outputs.set(unique_id, output_data) - execution_list.cache_update(unique_id, output_data) + cache_entry = CacheEntry(ui=ui_outputs.get(unique_id), outputs=output_data) + execution_list.cache_update(unique_id, cache_entry) + caches.outputs.set(unique_id, cache_entry) except comfy.model_management.InterruptProcessingException as iex: logging.info("Processing interrupted") @@ -600,14 +612,14 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, return (ExecutionResult.SUCCESS, None, None) class PromptExecutor: - def __init__(self, server, cache_type=False, cache_size=None): - self.cache_size = cache_size + def __init__(self, server, cache_type=False, cache_args=None): + self.cache_args = cache_args self.cache_type = cache_type self.server = server self.reset() def reset(self): - self.caches = CacheSet(cache_type=self.cache_type, cache_size=self.cache_size) + self.caches = CacheSet(cache_type=self.cache_type, cache_args=self.cache_args) self.status_messages = [] self.success = True @@ -682,6 +694,7 @@ class PromptExecutor: broadcast=False) pending_subgraph_results = {} pending_async_nodes = {} # TODO - Unify this with pending_subgraph_results + ui_node_outputs = {} executed = set() execution_list = ExecutionList(dynamic_prompt, self.caches.outputs) current_outputs = self.caches.outputs.all_node_ids() @@ -695,7 +708,7 @@ class PromptExecutor: break assert node_id is not None, "Node ID should not be None at this point" - result, error, ex = await execute(self.server, dynamic_prompt, self.caches, node_id, extra_data, executed, prompt_id, execution_list, pending_subgraph_results, pending_async_nodes) + result, error, ex = await execute(self.server, dynamic_prompt, self.caches, node_id, extra_data, executed, prompt_id, execution_list, pending_subgraph_results, pending_async_nodes, ui_node_outputs) self.success = result != ExecutionResult.FAILURE if result == ExecutionResult.FAILURE: self.handle_execution_error(prompt_id, dynamic_prompt.original_prompt, current_outputs, executed, error, ex) @@ -704,18 +717,16 @@ class PromptExecutor: execution_list.unstage_node_execution() else: # result == ExecutionResult.SUCCESS: execution_list.complete_node_execution() + self.caches.outputs.poll(ram_headroom=self.cache_args["ram"]) else: # Only execute when the while-loop ends without break self.add_message("execution_success", { "prompt_id": prompt_id }, broadcast=False) ui_outputs = {} meta_outputs = {} - all_node_ids = self.caches.ui.all_node_ids() - for node_id in all_node_ids: - ui_info = self.caches.ui.get(node_id) - if ui_info is not None: - ui_outputs[node_id] = ui_info["output"] - meta_outputs[node_id] = ui_info["meta"] + for node_id, ui_info in ui_node_outputs.items(): + ui_outputs[node_id] = ui_info["output"] + meta_outputs[node_id] = ui_info["meta"] self.history_result = { "outputs": ui_outputs, "meta": meta_outputs, diff --git a/main.py b/main.py index 8d466d2eb..e1b0f1620 100644 --- a/main.py +++ b/main.py @@ -172,10 +172,12 @@ def prompt_worker(q, server_instance): cache_type = execution.CacheType.CLASSIC if args.cache_lru > 0: cache_type = execution.CacheType.LRU + elif args.cache_ram > 0: + cache_type = execution.CacheType.RAM_PRESSURE elif args.cache_none: cache_type = execution.CacheType.NONE - e = execution.PromptExecutor(server_instance, cache_type=cache_type, cache_size=args.cache_lru) + e = execution.PromptExecutor(server_instance, cache_type=cache_type, cache_args={ "lru" : args.cache_lru, "ram" : args.cache_ram } ) last_gc_collect = 0 need_gc = False gc_collect_interval = 10.0 From 614cf9805e1056216487a2d1b1a07206d77f87e7 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 30 Oct 2025 19:11:38 -0700 Subject: [PATCH 0821/1073] Add a ScaleROPE node. Currently only works on WAN models. (#10559) --- comfy/ldm/wan/model.py | 20 ++++++++++++---- comfy/model_patcher.py | 13 +++++++++++ comfy_extras/nodes_rope.py | 47 ++++++++++++++++++++++++++++++++++++++ nodes.py | 1 + 4 files changed, 77 insertions(+), 4 deletions(-) create mode 100644 comfy_extras/nodes_rope.py diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 90c347d3d..77876c2e7 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -588,7 +588,7 @@ class WanModel(torch.nn.Module): x = self.unpatchify(x, grid_sizes) return x - def rope_encode(self, t, h, w, t_start=0, steps_t=None, steps_h=None, steps_w=None, device=None, dtype=None): + def rope_encode(self, t, h, w, t_start=0, steps_t=None, steps_h=None, steps_w=None, device=None, dtype=None, transformer_options={}): patch_size = self.patch_size t_len = ((t + (patch_size[0] // 2)) // patch_size[0]) h_len = ((h + (patch_size[1] // 2)) // patch_size[1]) @@ -601,10 +601,22 @@ class WanModel(torch.nn.Module): if steps_w is None: steps_w = w_len + h_start = 0 + w_start = 0 + rope_options = transformer_options.get("rope_options", None) + if rope_options is not None: + t_len = t_len * rope_options.get("scale_t", 1.0) + h_len = h_len * rope_options.get("scale_y", 1.0) + w_len = w_len * rope_options.get("scale_x", 1.0) + + t_start += rope_options.get("shift_t", 0.0) + h_start += rope_options.get("shift_y", 0.0) + w_start += rope_options.get("shift_x", 0.0) + img_ids = torch.zeros((steps_t, steps_h, steps_w, 3), device=device, dtype=dtype) img_ids[:, :, :, 0] = img_ids[:, :, :, 0] + torch.linspace(t_start, t_start + (t_len - 1), steps=steps_t, device=device, dtype=dtype).reshape(-1, 1, 1) - img_ids[:, :, :, 1] = img_ids[:, :, :, 1] + torch.linspace(0, h_len - 1, steps=steps_h, device=device, dtype=dtype).reshape(1, -1, 1) - img_ids[:, :, :, 2] = img_ids[:, :, :, 2] + torch.linspace(0, w_len - 1, steps=steps_w, device=device, dtype=dtype).reshape(1, 1, -1) + img_ids[:, :, :, 1] = img_ids[:, :, :, 1] + torch.linspace(h_start, h_start + (h_len - 1), steps=steps_h, device=device, dtype=dtype).reshape(1, -1, 1) + img_ids[:, :, :, 2] = img_ids[:, :, :, 2] + torch.linspace(w_start, w_start + (w_len - 1), steps=steps_w, device=device, dtype=dtype).reshape(1, 1, -1) img_ids = img_ids.reshape(1, -1, img_ids.shape[-1]) freqs = self.rope_embedder(img_ids).movedim(1, 2) @@ -630,7 +642,7 @@ class WanModel(torch.nn.Module): if self.ref_conv is not None and "reference_latent" in kwargs: t_len += 1 - freqs = self.rope_encode(t_len, h, w, device=x.device, dtype=x.dtype) + freqs = self.rope_encode(t_len, h, w, device=x.device, dtype=x.dtype, transformer_options=transformer_options) return self.forward_orig(x, timestep, context, clip_fea=clip_fea, freqs=freqs, transformer_options=transformer_options, **kwargs)[:, :, :t, :h, :w] def unpatchify(self, x, grid_sizes): diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 674a214ca..3e8799983 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -454,6 +454,19 @@ class ModelPatcher: def set_model_post_input_patch(self, patch): self.set_model_patch(patch, "post_input") + def set_model_rope_options(self, scale_x, shift_x, scale_y, shift_y, scale_t, shift_t, **kwargs): + rope_options = self.model_options["transformer_options"].get("rope_options", {}) + rope_options["scale_x"] = scale_x + rope_options["scale_y"] = scale_y + rope_options["scale_t"] = scale_t + + rope_options["shift_x"] = shift_x + rope_options["shift_y"] = shift_y + rope_options["shift_t"] = shift_t + + self.model_options["transformer_options"]["rope_options"] = rope_options + + def add_object_patch(self, name, obj): self.object_patches[name] = obj diff --git a/comfy_extras/nodes_rope.py b/comfy_extras/nodes_rope.py new file mode 100644 index 000000000..d1feb031e --- /dev/null +++ b/comfy_extras/nodes_rope.py @@ -0,0 +1,47 @@ +from comfy_api.latest import ComfyExtension, io +from typing_extensions import override + + +class ScaleROPE(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="ScaleROPE", + category="advanced/model_patches", + description="Scale and shift the ROPE of the model.", + is_experimental=True, + inputs=[ + io.Model.Input("model"), + io.Float.Input("scale_x", default=1.0, min=0.0, max=100.0, step=0.1), + io.Float.Input("shift_x", default=0.0, min=-256.0, max=256.0, step=0.1), + + io.Float.Input("scale_y", default=1.0, min=0.0, max=100.0, step=0.1), + io.Float.Input("shift_y", default=0.0, min=-256.0, max=256.0, step=0.1), + + io.Float.Input("scale_t", default=1.0, min=0.0, max=100.0, step=0.1), + io.Float.Input("shift_t", default=0.0, min=-256.0, max=256.0, step=0.1), + + + ], + outputs=[ + io.Model.Output(), + ], + ) + + @classmethod + def execute(cls, model, scale_x, shift_x, scale_y, shift_y, scale_t, shift_t) -> io.NodeOutput: + m = model.clone() + m.set_model_rope_options(scale_x, shift_x, scale_y, shift_y, scale_t, shift_t) + return io.NodeOutput(m) + + +class RopeExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + ScaleROPE + ] + + +async def comfy_entrypoint() -> RopeExtension: + return RopeExtension() diff --git a/nodes.py b/nodes.py index 12e365ca9..5689f6fe1 100644 --- a/nodes.py +++ b/nodes.py @@ -2329,6 +2329,7 @@ async def init_builtin_extra_nodes(): "nodes_model_patch.py", "nodes_easycache.py", "nodes_audio_encoder.py", + "nodes_rope.py", ] import_failed = [] From 27d1bd882925e3bbdffb405cea098ac52bb20ac5 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 30 Oct 2025 19:51:58 -0700 Subject: [PATCH 0822/1073] Fix rope scaling. (#10560) --- comfy/ldm/wan/model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 77876c2e7..5ec1511ce 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -605,9 +605,9 @@ class WanModel(torch.nn.Module): w_start = 0 rope_options = transformer_options.get("rope_options", None) if rope_options is not None: - t_len = t_len * rope_options.get("scale_t", 1.0) - h_len = h_len * rope_options.get("scale_y", 1.0) - w_len = w_len * rope_options.get("scale_x", 1.0) + t_len = (t_len - 1.0) * rope_options.get("scale_t", 1.0) + 1.0 + h_len = (h_len - 1.0) * rope_options.get("scale_y", 1.0) + 1.0 + w_len = (w_len - 1.0) * rope_options.get("scale_x", 1.0) + 1.0 t_start += rope_options.get("shift_t", 0.0) h_start += rope_options.get("shift_y", 0.0) From 7f374e42c833c69c71605507b90f79cc26d14a71 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 31 Oct 2025 12:41:40 -0700 Subject: [PATCH 0823/1073] ScaleROPE now works on Lumina models. (#10578) --- comfy/ldm/lumina/model.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index f87d98ac0..b4494a51d 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -522,7 +522,7 @@ class NextDiT(nn.Module): max_cap_len = max(l_effective_cap_len) max_img_len = max(l_effective_img_len) - position_ids = torch.zeros(bsz, max_seq_len, 3, dtype=torch.int32, device=device) + position_ids = torch.zeros(bsz, max_seq_len, 3, dtype=torch.float32, device=device) for i in range(bsz): cap_len = l_effective_cap_len[i] @@ -531,10 +531,22 @@ class NextDiT(nn.Module): H_tokens, W_tokens = H // pH, W // pW assert H_tokens * W_tokens == img_len - position_ids[i, :cap_len, 0] = torch.arange(cap_len, dtype=torch.int32, device=device) + rope_options = transformer_options.get("rope_options", None) + h_scale = 1.0 + w_scale = 1.0 + h_start = 0 + w_start = 0 + if rope_options is not None: + h_scale = rope_options.get("scale_y", 1.0) + w_scale = rope_options.get("scale_x", 1.0) + + h_start = rope_options.get("shift_y", 0.0) + w_start = rope_options.get("shift_x", 0.0) + + position_ids[i, :cap_len, 0] = torch.arange(cap_len, dtype=torch.float32, device=device) position_ids[i, cap_len:cap_len+img_len, 0] = cap_len - row_ids = torch.arange(H_tokens, dtype=torch.int32, device=device).view(-1, 1).repeat(1, W_tokens).flatten() - col_ids = torch.arange(W_tokens, dtype=torch.int32, device=device).view(1, -1).repeat(H_tokens, 1).flatten() + row_ids = (torch.arange(H_tokens, dtype=torch.float32, device=device) * h_scale + h_start).view(-1, 1).repeat(1, W_tokens).flatten() + col_ids = (torch.arange(W_tokens, dtype=torch.float32, device=device) * w_scale + w_start).view(1, -1).repeat(H_tokens, 1).flatten() position_ids[i, cap_len:cap_len+img_len, 1] = row_ids position_ids[i, cap_len:cap_len+img_len, 2] = col_ids From c58c13b2bad6df0de93cc0cf107e96522a3cb5b3 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 31 Oct 2025 21:25:17 -0700 Subject: [PATCH 0824/1073] Fix torch compile regression on fp8 ops. (#10580) --- comfy/ops.py | 24 +++++------------ comfy/quant_ops.py | 27 +++++++++++++++---- .../comfy_quant/test_mixed_precision.py | 8 +++--- tests-unit/comfy_quant/test_quant_registry.py | 20 +++++++------- 4 files changed, 43 insertions(+), 36 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 18f6b804b..279f6b1a7 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -401,15 +401,9 @@ def fp8_linear(self, input): if dtype not in [torch.float8_e4m3fn]: return None - tensor_2d = False - if len(input.shape) == 2: - tensor_2d = True - input = input.unsqueeze(1) - - input_shape = input.shape input_dtype = input.dtype - if len(input.shape) == 3: + if input.ndim == 3 or input.ndim == 2: w, bias, offload_stream = cast_bias_weight(self, input, dtype=dtype, bias_dtype=input_dtype, offloadable=True) scale_weight = self.scale_weight @@ -422,24 +416,20 @@ def fp8_linear(self, input): if scale_input is None: scale_input = torch.ones((), device=input.device, dtype=torch.float32) input = torch.clamp(input, min=-448, max=448, out=input) - input = input.reshape(-1, input_shape[2]).to(dtype).contiguous() layout_params_weight = {'scale': scale_input, 'orig_dtype': input_dtype} - quantized_input = QuantizedTensor(input.reshape(-1, input_shape[2]).to(dtype).contiguous(), TensorCoreFP8Layout, layout_params_weight) + quantized_input = QuantizedTensor(input.to(dtype).contiguous(), "TensorCoreFP8Layout", layout_params_weight) else: scale_input = scale_input.to(input.device) - quantized_input = QuantizedTensor.from_float(input.reshape(-1, input_shape[2]), TensorCoreFP8Layout, scale=scale_input, dtype=dtype) + quantized_input = QuantizedTensor.from_float(input, "TensorCoreFP8Layout", scale=scale_input, dtype=dtype) # Wrap weight in QuantizedTensor - this enables unified dispatch # Call F.linear - __torch_dispatch__ routes to fp8_linear handler in quant_ops.py! layout_params_weight = {'scale': scale_weight, 'orig_dtype': input_dtype} - quantized_weight = QuantizedTensor(w, TensorCoreFP8Layout, layout_params_weight) + quantized_weight = QuantizedTensor(w, "TensorCoreFP8Layout", layout_params_weight) o = torch.nn.functional.linear(quantized_input, quantized_weight, bias) uncast_bias_weight(self, w, bias, offload_stream) - - if tensor_2d: - return o.reshape(input_shape[0], -1) - return o.reshape((-1, input_shape[1], self.weight.shape[0])) + return o return None @@ -540,12 +530,12 @@ if CUBLAS_IS_AVAILABLE: # ============================================================================== # Mixed Precision Operations # ============================================================================== -from .quant_ops import QuantizedTensor, TensorCoreFP8Layout +from .quant_ops import QuantizedTensor QUANT_FORMAT_MIXINS = { "float8_e4m3fn": { "dtype": torch.float8_e4m3fn, - "layout_type": TensorCoreFP8Layout, + "layout_type": "TensorCoreFP8Layout", "parameters": { "weight_scale": torch.nn.Parameter(torch.zeros((), dtype=torch.float32), requires_grad=False), "input_scale": torch.nn.Parameter(torch.zeros((), dtype=torch.float32), requires_grad=False), diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index c822fe53c..873f173ed 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -123,7 +123,7 @@ class QuantizedTensor(torch.Tensor): layout_type: Layout class (subclass of QuantizedLayout) layout_params: Dict with layout-specific parameters """ - return torch.Tensor._make_subclass(cls, qdata, require_grad=False) + return torch.Tensor._make_wrapper_subclass(cls, qdata.shape, device=qdata.device, dtype=qdata.dtype, requires_grad=False) def __init__(self, qdata, layout_type, layout_params): self._qdata = qdata.contiguous() @@ -183,11 +183,11 @@ class QuantizedTensor(torch.Tensor): @classmethod def from_float(cls, tensor, layout_type, **quantize_kwargs) -> 'QuantizedTensor': - qdata, layout_params = layout_type.quantize(tensor, **quantize_kwargs) + qdata, layout_params = LAYOUTS[layout_type].quantize(tensor, **quantize_kwargs) return cls(qdata, layout_type, layout_params) def dequantize(self) -> torch.Tensor: - return self._layout_type.dequantize(self._qdata, **self._layout_params) + return LAYOUTS[self._layout_type].dequantize(self._qdata, **self._layout_params) @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): @@ -379,7 +379,12 @@ class TensorCoreFP8Layout(QuantizedLayout): return qtensor._qdata, qtensor._layout_params['scale'] -@register_layout_op(torch.ops.aten.linear.default, TensorCoreFP8Layout) +LAYOUTS = { + "TensorCoreFP8Layout": TensorCoreFP8Layout, +} + + +@register_layout_op(torch.ops.aten.linear.default, "TensorCoreFP8Layout") def fp8_linear(func, args, kwargs): input_tensor = args[0] weight = args[1] @@ -422,7 +427,7 @@ def fp8_linear(func, args, kwargs): 'scale': output_scale, 'orig_dtype': input_tensor._layout_params['orig_dtype'] } - return QuantizedTensor(output, TensorCoreFP8Layout, output_params) + return QuantizedTensor(output, "TensorCoreFP8Layout", output_params) else: return output @@ -436,3 +441,15 @@ def fp8_linear(func, args, kwargs): input_tensor = input_tensor.dequantize() return torch.nn.functional.linear(input_tensor, weight, bias) + + +@register_layout_op(torch.ops.aten.view.default, "TensorCoreFP8Layout") +@register_layout_op(torch.ops.aten.t.default, "TensorCoreFP8Layout") +def fp8_func(func, args, kwargs): + input_tensor = args[0] + if isinstance(input_tensor, QuantizedTensor): + plain_input, scale_a = TensorCoreFP8Layout.get_plain_tensors(input_tensor) + ar = list(args) + ar[0] = plain_input + return QuantizedTensor(func(*ar, **kwargs), "TensorCoreFP8Layout", input_tensor._layout_params) + return func(*args, **kwargs) diff --git a/tests-unit/comfy_quant/test_mixed_precision.py b/tests-unit/comfy_quant/test_mixed_precision.py index 267bc177b..f8d1fd04e 100644 --- a/tests-unit/comfy_quant/test_mixed_precision.py +++ b/tests-unit/comfy_quant/test_mixed_precision.py @@ -14,7 +14,7 @@ if not has_gpu(): args.cpu = True from comfy import ops -from comfy.quant_ops import QuantizedTensor, TensorCoreFP8Layout +from comfy.quant_ops import QuantizedTensor class SimpleModel(torch.nn.Module): @@ -104,14 +104,14 @@ class TestMixedPrecisionOps(unittest.TestCase): # Verify weights are wrapped in QuantizedTensor self.assertIsInstance(model.layer1.weight, QuantizedTensor) - self.assertEqual(model.layer1.weight._layout_type, TensorCoreFP8Layout) + self.assertEqual(model.layer1.weight._layout_type, "TensorCoreFP8Layout") # Layer 2 should NOT be quantized self.assertNotIsInstance(model.layer2.weight, QuantizedTensor) # Layer 3 should be quantized self.assertIsInstance(model.layer3.weight, QuantizedTensor) - self.assertEqual(model.layer3.weight._layout_type, TensorCoreFP8Layout) + self.assertEqual(model.layer3.weight._layout_type, "TensorCoreFP8Layout") # Verify scales were loaded self.assertEqual(model.layer1.weight._layout_params['scale'].item(), 2.0) @@ -155,7 +155,7 @@ class TestMixedPrecisionOps(unittest.TestCase): # Verify layer1.weight is a QuantizedTensor with scale preserved self.assertIsInstance(state_dict2["layer1.weight"], QuantizedTensor) self.assertEqual(state_dict2["layer1.weight"]._layout_params['scale'].item(), 3.0) - self.assertEqual(state_dict2["layer1.weight"]._layout_type, TensorCoreFP8Layout) + self.assertEqual(state_dict2["layer1.weight"]._layout_type, "TensorCoreFP8Layout") # Verify non-quantized layers are standard tensors self.assertNotIsInstance(state_dict2["layer2.weight"], QuantizedTensor) diff --git a/tests-unit/comfy_quant/test_quant_registry.py b/tests-unit/comfy_quant/test_quant_registry.py index 477811029..9cb54ede8 100644 --- a/tests-unit/comfy_quant/test_quant_registry.py +++ b/tests-unit/comfy_quant/test_quant_registry.py @@ -25,14 +25,14 @@ class TestQuantizedTensor(unittest.TestCase): scale = torch.tensor(2.0) layout_params = {'scale': scale, 'orig_dtype': torch.bfloat16} - qt = QuantizedTensor(fp8_data, TensorCoreFP8Layout, layout_params) + qt = QuantizedTensor(fp8_data, "TensorCoreFP8Layout", layout_params) self.assertIsInstance(qt, QuantizedTensor) self.assertEqual(qt.shape, (256, 128)) self.assertEqual(qt.dtype, torch.float8_e4m3fn) self.assertEqual(qt._layout_params['scale'], scale) self.assertEqual(qt._layout_params['orig_dtype'], torch.bfloat16) - self.assertEqual(qt._layout_type, TensorCoreFP8Layout) + self.assertEqual(qt._layout_type, "TensorCoreFP8Layout") def test_dequantize(self): """Test explicit dequantization""" @@ -41,7 +41,7 @@ class TestQuantizedTensor(unittest.TestCase): scale = torch.tensor(3.0) layout_params = {'scale': scale, 'orig_dtype': torch.float32} - qt = QuantizedTensor(fp8_data, TensorCoreFP8Layout, layout_params) + qt = QuantizedTensor(fp8_data, "TensorCoreFP8Layout", layout_params) dequantized = qt.dequantize() self.assertEqual(dequantized.dtype, torch.float32) @@ -54,7 +54,7 @@ class TestQuantizedTensor(unittest.TestCase): qt = QuantizedTensor.from_float( float_tensor, - TensorCoreFP8Layout, + "TensorCoreFP8Layout", scale=scale, dtype=torch.float8_e4m3fn ) @@ -77,28 +77,28 @@ class TestGenericUtilities(unittest.TestCase): fp8_data = torch.randn(10, 20, dtype=torch.float32).to(torch.float8_e4m3fn) scale = torch.tensor(1.5) layout_params = {'scale': scale, 'orig_dtype': torch.float32} - qt = QuantizedTensor(fp8_data, TensorCoreFP8Layout, layout_params) + qt = QuantizedTensor(fp8_data, "TensorCoreFP8Layout", layout_params) # Detach should return a new QuantizedTensor qt_detached = qt.detach() self.assertIsInstance(qt_detached, QuantizedTensor) self.assertEqual(qt_detached.shape, qt.shape) - self.assertEqual(qt_detached._layout_type, TensorCoreFP8Layout) + self.assertEqual(qt_detached._layout_type, "TensorCoreFP8Layout") def test_clone(self): """Test clone operation on quantized tensor""" fp8_data = torch.randn(10, 20, dtype=torch.float32).to(torch.float8_e4m3fn) scale = torch.tensor(1.5) layout_params = {'scale': scale, 'orig_dtype': torch.float32} - qt = QuantizedTensor(fp8_data, TensorCoreFP8Layout, layout_params) + qt = QuantizedTensor(fp8_data, "TensorCoreFP8Layout", layout_params) # Clone should return a new QuantizedTensor qt_cloned = qt.clone() self.assertIsInstance(qt_cloned, QuantizedTensor) self.assertEqual(qt_cloned.shape, qt.shape) - self.assertEqual(qt_cloned._layout_type, TensorCoreFP8Layout) + self.assertEqual(qt_cloned._layout_type, "TensorCoreFP8Layout") # Verify it's a deep copy self.assertIsNot(qt_cloned._qdata, qt._qdata) @@ -109,7 +109,7 @@ class TestGenericUtilities(unittest.TestCase): fp8_data = torch.randn(10, 20, dtype=torch.float32).to(torch.float8_e4m3fn) scale = torch.tensor(1.5) layout_params = {'scale': scale, 'orig_dtype': torch.float32} - qt = QuantizedTensor(fp8_data, TensorCoreFP8Layout, layout_params) + qt = QuantizedTensor(fp8_data, "TensorCoreFP8Layout", layout_params) # Moving to same device should work (CPU to CPU) qt_cpu = qt.to('cpu') @@ -169,7 +169,7 @@ class TestFallbackMechanism(unittest.TestCase): scale = torch.tensor(1.0) a_q = QuantizedTensor.from_float( a_fp32, - TensorCoreFP8Layout, + "TensorCoreFP8Layout", scale=scale, dtype=torch.float8_e4m3fn ) From 5f109fe6a06a3462b31a066bcfd650de67d66102 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 1 Nov 2025 21:13:39 +0200 Subject: [PATCH 0825/1073] added 12s-20s as available output durations for the LTXV API nodes (#10570) --- comfy_api_nodes/nodes_ltxv.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/comfy_api_nodes/nodes_ltxv.py b/comfy_api_nodes/nodes_ltxv.py index e6ad6e27a..0b757a62b 100644 --- a/comfy_api_nodes/nodes_ltxv.py +++ b/comfy_api_nodes/nodes_ltxv.py @@ -46,7 +46,7 @@ class TextToVideoNode(IO.ComfyNode): multiline=True, default="", ), - IO.Combo.Input("duration", options=[6, 8, 10], default=8), + IO.Combo.Input("duration", options=[6, 8, 10, 12, 14, 16, 18, 20], default=8), IO.Combo.Input( "resolution", options=[ @@ -85,6 +85,10 @@ class TextToVideoNode(IO.ComfyNode): generate_audio: bool = False, ) -> IO.NodeOutput: validate_string(prompt, min_length=1, max_length=10000) + if duration > 10 and (model != "LTX-2 (Fast)" or resolution != "1920x1080" or fps != 25): + raise ValueError( + "Durations over 10s are only available for the Fast model at 1920x1080 resolution and 25 FPS." + ) response = await sync_op_raw( cls, ApiEndpoint("/proxy/ltx/v1/text-to-video", "POST"), @@ -118,7 +122,7 @@ class ImageToVideoNode(IO.ComfyNode): multiline=True, default="", ), - IO.Combo.Input("duration", options=[6, 8, 10], default=8), + IO.Combo.Input("duration", options=[6, 8, 10, 12, 14, 16, 18, 20], default=8), IO.Combo.Input( "resolution", options=[ @@ -158,6 +162,10 @@ class ImageToVideoNode(IO.ComfyNode): generate_audio: bool = False, ) -> IO.NodeOutput: validate_string(prompt, min_length=1, max_length=10000) + if duration > 10 and (model != "LTX-2 (Fast)" or resolution != "1920x1080" or fps != 25): + raise ValueError( + "Durations over 10s are only available for the Fast model at 1920x1080 resolution and 25 FPS." + ) if get_number_of_images(image) != 1: raise ValueError("Currently only one input image is supported.") response = await sync_op_raw( From 20182a393f43ab1fdf798f8da6aac0ef6116e7e6 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 1 Nov 2025 21:14:06 +0200 Subject: [PATCH 0826/1073] convert StabilityAI to use new API client (#10582) --- comfy_api_nodes/nodes_stability.py | 179 ++++++++--------------------- comfy_api_nodes/util/client.py | 4 +- 2 files changed, 48 insertions(+), 135 deletions(-) diff --git a/comfy_api_nodes/nodes_stability.py b/comfy_api_nodes/nodes_stability.py index 783666ddf..bb7ceed78 100644 --- a/comfy_api_nodes/nodes_stability.py +++ b/comfy_api_nodes/nodes_stability.py @@ -20,13 +20,6 @@ from comfy_api_nodes.apis.stability_api import ( StabilityAudioInpaintRequest, StabilityAudioResponse, ) -from comfy_api_nodes.apis.client import ( - ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, - EmptyRequest, -) from comfy_api_nodes.util import ( validate_audio_duration, validate_string, @@ -34,6 +27,9 @@ from comfy_api_nodes.util import ( bytesio_to_image_tensor, tensor_to_bytesio, audio_bytes_to_audio_input, + sync_op, + poll_op, + ApiEndpoint, ) import torch @@ -161,19 +157,11 @@ class StabilityStableImageUltraNode(IO.ComfyNode): "image": image_binary } - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/stability/v2beta/stable-image/generate/ultra", - method=HttpMethod.POST, - request_model=StabilityStableUltraRequest, - response_model=StabilityStableUltraResponse, - ), - request=StabilityStableUltraRequest( + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/stability/v2beta/stable-image/generate/ultra", method="POST"), + response_model=StabilityStableUltraResponse, + data=StabilityStableUltraRequest( prompt=prompt, negative_prompt=negative_prompt, aspect_ratio=aspect_ratio, @@ -183,9 +171,7 @@ class StabilityStableImageUltraNode(IO.ComfyNode): ), files=files, content_type="multipart/form-data", - auth_kwargs=auth, ) - response_api = await operation.execute() if response_api.finish_reason != "SUCCESS": raise Exception(f"Stable Image Ultra generation failed: {response_api.finish_reason}.") @@ -313,19 +299,11 @@ class StabilityStableImageSD_3_5Node(IO.ComfyNode): "image": image_binary } - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/stability/v2beta/stable-image/generate/sd3", - method=HttpMethod.POST, - request_model=StabilityStable3_5Request, - response_model=StabilityStableUltraResponse, - ), - request=StabilityStable3_5Request( + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/stability/v2beta/stable-image/generate/sd3", method="POST"), + response_model=StabilityStableUltraResponse, + data=StabilityStable3_5Request( prompt=prompt, negative_prompt=negative_prompt, aspect_ratio=aspect_ratio, @@ -338,9 +316,7 @@ class StabilityStableImageSD_3_5Node(IO.ComfyNode): ), files=files, content_type="multipart/form-data", - auth_kwargs=auth, ) - response_api = await operation.execute() if response_api.finish_reason != "SUCCESS": raise Exception(f"Stable Diffusion 3.5 Image generation failed: {response_api.finish_reason}.") @@ -427,19 +403,11 @@ class StabilityUpscaleConservativeNode(IO.ComfyNode): "image": image_binary } - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/stability/v2beta/stable-image/upscale/conservative", - method=HttpMethod.POST, - request_model=StabilityUpscaleConservativeRequest, - response_model=StabilityStableUltraResponse, - ), - request=StabilityUpscaleConservativeRequest( + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/stability/v2beta/stable-image/upscale/conservative", method="POST"), + response_model=StabilityStableUltraResponse, + data=StabilityUpscaleConservativeRequest( prompt=prompt, negative_prompt=negative_prompt, creativity=round(creativity,2), @@ -447,9 +415,7 @@ class StabilityUpscaleConservativeNode(IO.ComfyNode): ), files=files, content_type="multipart/form-data", - auth_kwargs=auth, ) - response_api = await operation.execute() if response_api.finish_reason != "SUCCESS": raise Exception(f"Stability Upscale Conservative generation failed: {response_api.finish_reason}.") @@ -544,19 +510,11 @@ class StabilityUpscaleCreativeNode(IO.ComfyNode): "image": image_binary } - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/stability/v2beta/stable-image/upscale/creative", - method=HttpMethod.POST, - request_model=StabilityUpscaleCreativeRequest, - response_model=StabilityAsyncResponse, - ), - request=StabilityUpscaleCreativeRequest( + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/stability/v2beta/stable-image/upscale/creative", method="POST"), + response_model=StabilityAsyncResponse, + data=StabilityUpscaleCreativeRequest( prompt=prompt, negative_prompt=negative_prompt, creativity=round(creativity,2), @@ -565,25 +523,15 @@ class StabilityUpscaleCreativeNode(IO.ComfyNode): ), files=files, content_type="multipart/form-data", - auth_kwargs=auth, ) - response_api = await operation.execute() - operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"/proxy/stability/v2beta/results/{response_api.id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=StabilityResultsGetResponse, - ), + response_poll = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/stability/v2beta/results/{response_api.id}"), + response_model=StabilityResultsGetResponse, poll_interval=3, - completed_statuses=[StabilityPollStatus.finished], - failed_statuses=[StabilityPollStatus.failed], status_extractor=lambda x: get_async_dummy_status(x), - auth_kwargs=auth, - node_id=cls.hidden.unique_id, ) - response_poll: StabilityResultsGetResponse = await operation.execute() if response_poll.finish_reason != "SUCCESS": raise Exception(f"Stability Upscale Creative generation failed: {response_poll.finish_reason}.") @@ -628,24 +576,13 @@ class StabilityUpscaleFastNode(IO.ComfyNode): "image": image_binary } - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/stability/v2beta/stable-image/upscale/fast", - method=HttpMethod.POST, - request_model=EmptyRequest, - response_model=StabilityStableUltraResponse, - ), - request=EmptyRequest(), + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/stability/v2beta/stable-image/upscale/fast", method="POST"), + response_model=StabilityStableUltraResponse, files=files, content_type="multipart/form-data", - auth_kwargs=auth, ) - response_api = await operation.execute() if response_api.finish_reason != "SUCCESS": raise Exception(f"Stability Upscale Fast failed: {response_api.finish_reason}.") @@ -717,21 +654,13 @@ class StabilityTextToAudio(IO.ComfyNode): async def execute(cls, model: str, prompt: str, duration: int, seed: int, steps: int) -> IO.NodeOutput: validate_string(prompt, max_length=10000) payload = StabilityTextToAudioRequest(prompt=prompt, model=model, duration=duration, seed=seed, steps=steps) - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/stability/v2beta/audio/stable-audio-2/text-to-audio", - method=HttpMethod.POST, - request_model=StabilityTextToAudioRequest, - response_model=StabilityAudioResponse, - ), - request=payload, + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/stability/v2beta/audio/stable-audio-2/text-to-audio", method="POST"), + response_model=StabilityAudioResponse, + data=payload, content_type="multipart/form-data", - auth_kwargs= { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, ) - response_api = await operation.execute() if not response_api.audio: raise ValueError("No audio file was received in response.") return IO.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) @@ -814,22 +743,14 @@ class StabilityAudioToAudio(IO.ComfyNode): payload = StabilityAudioToAudioRequest( prompt=prompt, model=model, duration=duration, seed=seed, steps=steps, strength=strength ) - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/stability/v2beta/audio/stable-audio-2/audio-to-audio", - method=HttpMethod.POST, - request_model=StabilityAudioToAudioRequest, - response_model=StabilityAudioResponse, - ), - request=payload, + response_api = await sync_op( + cls, + ApiEndpoint(path="/proxy/stability/v2beta/audio/stable-audio-2/audio-to-audio", method="POST"), + response_model=StabilityAudioResponse, + data=payload, content_type="multipart/form-data", files={"audio": audio_input_to_mp3(audio)}, - auth_kwargs= { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, ) - response_api = await operation.execute() if not response_api.audio: raise ValueError("No audio file was received in response.") return IO.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) @@ -935,22 +856,14 @@ class StabilityAudioInpaint(IO.ComfyNode): mask_start=mask_start, mask_end=mask_end, ) - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/stability/v2beta/audio/stable-audio-2/inpaint", - method=HttpMethod.POST, - request_model=StabilityAudioInpaintRequest, - response_model=StabilityAudioResponse, - ), - request=payload, + response_api = await sync_op( + cls, + endpoint=ApiEndpoint(path="/proxy/stability/v2beta/audio/stable-audio-2/inpaint", method="POST"), + response_model=StabilityAudioResponse, + data=payload, content_type="multipart/form-data", files={"audio": audio_input_to_mp3(audio)}, - auth_kwargs={ - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - }, ) - response_api = await operation.execute() if not response_api.audio: raise ValueError("No audio file was received in response.") return IO.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) diff --git a/comfy_api_nodes/util/client.py b/comfy_api_nodes/util/client.py index 9ae512fe5..65bb35f0f 100644 --- a/comfy_api_nodes/util/client.py +++ b/comfy_api_nodes/util/client.py @@ -77,7 +77,7 @@ class _PollUIState: _RETRY_STATUS = {408, 429, 500, 502, 503, 504} -COMPLETED_STATUSES = ["succeeded", "succeed", "success", "completed"] +COMPLETED_STATUSES = ["succeeded", "succeed", "success", "completed", "finished"] FAILED_STATUSES = ["cancelled", "canceled", "fail", "failed", "error"] QUEUED_STATUSES = ["created", "queued", "queueing", "submitted"] @@ -589,7 +589,7 @@ async def _request_base(cfg: _RequestConfig, expect_binary: bool): operation_id = _generate_operation_id(method, cfg.endpoint.path, attempt) logging.debug("[DEBUG] HTTP %s %s (attempt %d)", method, url, attempt) - payload_headers = {"Accept": "*/*"} + payload_headers = {"Accept": "*/*"} if expect_binary else {"Accept": "application/json"} if not parsed_url.scheme and not parsed_url.netloc: # is URL relative? payload_headers.update(get_auth_header(cfg.node_cls)) if cfg.endpoint.headers: From 44869ff786dc90b36172fd766c9a110e4c40c04b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 1 Nov 2025 14:25:59 -0700 Subject: [PATCH 0827/1073] Fix issue with pinned memory. (#10597) --- comfy/model_patcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 3e8799983..5a31a8734 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -298,6 +298,7 @@ class ModelPatcher: n.backup = self.backup n.object_patches_backup = self.object_patches_backup n.parent = self + n.pinned = self.pinned n.force_cast_weights = self.force_cast_weights From 135fa49ec23320834f774cf3def9e51ad3773f86 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Sun, 2 Nov 2025 08:48:53 +1000 Subject: [PATCH 0828/1073] Small speed improvements to --async-offload (#10593) * ops: dont take an offload stream if you dont need one * ops: prioritize mem transfer The async offload streams reason for existence is to transfer from RAM to GPU. The post processing compute steps are a bonus on the side stream, but if the compute stream is running a long kernel, it can stall the side stream, as it wait to type-cast the bias before transferring the weight. So do a pure xfer of the weight straight up, then do everything bias, then go back to fix the weight type and do weight patches. --- comfy/ops.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 279f6b1a7..0c8f23848 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -84,7 +84,8 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, of if device is None: device = input.device - if offloadable: + if offloadable and (device != s.weight.device or + (s.bias is not None and device != s.bias.device)): offload_stream = comfy.model_management.get_offload_stream(device) else: offload_stream = None @@ -94,20 +95,24 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, of else: wf_context = contextlib.nullcontext() - bias = None non_blocking = comfy.model_management.device_supports_non_blocking(device) - if s.bias is not None: - has_function = len(s.bias_function) > 0 - bias = comfy.model_management.cast_to(s.bias, bias_dtype, device, non_blocking=non_blocking, copy=has_function, stream=offload_stream) - if has_function: + weight_has_function = len(s.weight_function) > 0 + bias_has_function = len(s.bias_function) > 0 + + weight = comfy.model_management.cast_to(s.weight, None, device, non_blocking=non_blocking, copy=weight_has_function, stream=offload_stream) + + bias = None + if s.bias is not None: + bias = comfy.model_management.cast_to(s.bias, bias_dtype, device, non_blocking=non_blocking, copy=bias_has_function, stream=offload_stream) + + if bias_has_function: with wf_context: for f in s.bias_function: bias = f(bias) - has_function = len(s.weight_function) > 0 - weight = comfy.model_management.cast_to(s.weight, dtype, device, non_blocking=non_blocking, copy=has_function, stream=offload_stream) - if has_function: + weight = weight.to(dtype=dtype) + if weight_has_function: with wf_context: for f in s.weight_function: weight = f(weight) From 97ff9fae7e728cffdfc3aee6d72aa1e0d0b78702 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 2 Nov 2025 10:14:04 -0800 Subject: [PATCH 0829/1073] Clarify help text for --fast argument (#10609) Updated help text for the --fast argument to clarify potential risks. --- comfy/cli_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 3d5bc7c90..3947e62a8 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -147,7 +147,7 @@ class PerformanceFeature(enum.Enum): AutoTune = "autotune" PinnedMem = "pinned_memory" -parser.add_argument("--fast", nargs="*", type=PerformanceFeature, help="Enable some untested and potentially quality deteriorating optimizations. --fast with no arguments enables everything. You can pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: {}".format(" ".join(map(lambda c: c.value, PerformanceFeature)))) +parser.add_argument("--fast", nargs="*", type=PerformanceFeature, help="Enable some untested and potentially quality deteriorating optimizations. This is used to test new features so using it might crash your comfyui. --fast with no arguments enables everything. You can pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: {}".format(" ".join(map(lambda c: c.value, PerformanceFeature)))) parser.add_argument("--mmap-torch-files", action="store_true", help="Use mmap when loading ckpt/pt files.") parser.add_argument("--disable-mmap", action="store_true", help="Don't use mmap when loading safetensors.") From 6d6a18b0b730351416556eeb0990ab219ffec189 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 3 Nov 2025 10:04:56 +0200 Subject: [PATCH 0830/1073] fix(api-nodes-cloud): stop using sub-folder and absolute path for output of Rodin3D nodes (#10556) --- comfy_api_nodes/nodes_rodin.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/comfy_api_nodes/nodes_rodin.py b/comfy_api_nodes/nodes_rodin.py index cf2172bd6..ad4029236 100644 --- a/comfy_api_nodes/nodes_rodin.py +++ b/comfy_api_nodes/nodes_rodin.py @@ -225,21 +225,20 @@ async def get_rodin_download_list(uuid, auth_kwargs: Optional[dict[str, str]] = async def download_files(url_list, task_uuid): - save_path = os.path.join(comfy_paths.get_output_directory(), f"Rodin3D_{task_uuid}") + result_folder_name = f"Rodin3D_{task_uuid}" + save_path = os.path.join(comfy_paths.get_output_directory(), result_folder_name) os.makedirs(save_path, exist_ok=True) model_file_path = None async with aiohttp.ClientSession() as session: for i in url_list.list: - url = i.url - file_name = i.name - file_path = os.path.join(save_path, file_name) + file_path = os.path.join(save_path, i.name) if file_path.endswith(".glb"): - model_file_path = file_path + model_file_path = os.path.join(result_folder_name, i.name) logging.info("[ Rodin3D API - download_files ] Downloading file: %s", file_path) max_retries = 5 for attempt in range(max_retries): try: - async with session.get(url) as resp: + async with session.get(i.url) as resp: resp.raise_for_status() with open(file_path, "wb") as f: async for chunk in resp.content.iter_chunked(32 * 1024): From 88df172790b8ed7b2e6ea7c0f0bd63ca3553921b Mon Sep 17 00:00:00 2001 From: EverNebula Date: Mon, 3 Nov 2025 16:16:40 +0800 Subject: [PATCH 0831/1073] fix(caching): treat bytes as hashable (#10567) --- comfy_execution/caching.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_execution/caching.py b/comfy_execution/caching.py index b498f43e7..e077f78b0 100644 --- a/comfy_execution/caching.py +++ b/comfy_execution/caching.py @@ -53,7 +53,7 @@ class Unhashable: def to_hashable(obj): # So that we don't infinitely recurse since frozenset and tuples # are Sequences. - if isinstance(obj, (int, float, str, bool, type(None))): + if isinstance(obj, (int, float, str, bool, bytes, type(None))): return obj elif isinstance(obj, Mapping): return frozenset([(to_hashable(k), to_hashable(v)) for k, v in sorted(obj.items())]) From 1f3f7a2823017ad193e060d9221fb6a52f2eba3a Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 3 Nov 2025 10:21:47 +0200 Subject: [PATCH 0832/1073] convert nodes_hypernetwork.py to V3 schema (#10583) --- comfy_extras/nodes_hypernetwork.py | 48 ++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/comfy_extras/nodes_hypernetwork.py b/comfy_extras/nodes_hypernetwork.py index 665632292..2a6a87a81 100644 --- a/comfy_extras/nodes_hypernetwork.py +++ b/comfy_extras/nodes_hypernetwork.py @@ -2,6 +2,9 @@ import comfy.utils import folder_paths import torch import logging +from comfy_api.latest import IO, ComfyExtension +from typing_extensions import override + def load_hypernetwork_patch(path, strength): sd = comfy.utils.load_torch_file(path, safe_load=True) @@ -94,27 +97,42 @@ def load_hypernetwork_patch(path, strength): return hypernetwork_patch(out, strength) -class HypernetworkLoader: +class HypernetworkLoader(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "hypernetwork_name": (folder_paths.get_filename_list("hypernetworks"), ), - "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "load_hypernetwork" + def define_schema(cls): + return IO.Schema( + node_id="HypernetworkLoader", + category="loaders", + inputs=[ + IO.Model.Input("model"), + IO.Combo.Input("hypernetwork_name", options=folder_paths.get_filename_list("hypernetworks")), + IO.Float.Input("strength", default=1.0, min=-10.0, max=10.0, step=0.01), + ], + outputs=[ + IO.Model.Output(), + ], + ) - CATEGORY = "loaders" - - def load_hypernetwork(self, model, hypernetwork_name, strength): + @classmethod + def execute(cls, model, hypernetwork_name, strength) -> IO.NodeOutput: hypernetwork_path = folder_paths.get_full_path_or_raise("hypernetworks", hypernetwork_name) model_hypernetwork = model.clone() patch = load_hypernetwork_patch(hypernetwork_path, strength) if patch is not None: model_hypernetwork.set_model_attn1_patch(patch) model_hypernetwork.set_model_attn2_patch(patch) - return (model_hypernetwork,) + return IO.NodeOutput(model_hypernetwork) -NODE_CLASS_MAPPINGS = { - "HypernetworkLoader": HypernetworkLoader -} + load_hypernetwork = execute # TODO: remove + + +class HyperNetworkExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + HypernetworkLoader, + ] + + +async def comfy_entrypoint() -> HyperNetworkExtension: + return HyperNetworkExtension() From e617cddf244e4b789afba4b4ece01661b12fdde5 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 3 Nov 2025 10:28:13 +0200 Subject: [PATCH 0833/1073] convert nodes_openai.py to V3 schema (#10604) --- comfy_api_nodes/apinode_utils.py | 73 -- comfy_api_nodes/nodes_openai.py | 1094 +++++++++++---------------- comfy_api_nodes/util/__init__.py | 4 + comfy_api_nodes/util/conversions.py | 19 +- 4 files changed, 482 insertions(+), 708 deletions(-) delete mode 100644 comfy_api_nodes/apinode_utils.py diff --git a/comfy_api_nodes/apinode_utils.py b/comfy_api_nodes/apinode_utils.py deleted file mode 100644 index ecd604ff8..000000000 --- a/comfy_api_nodes/apinode_utils.py +++ /dev/null @@ -1,73 +0,0 @@ -from __future__ import annotations -import aiohttp -import mimetypes -from typing import Union -from server import PromptServer - -import numpy as np -from PIL import Image -import torch -import base64 -from io import BytesIO - - -async def validate_and_cast_response( - response, timeout: int = None, node_id: Union[str, None] = None -) -> torch.Tensor: - """Validates and casts a response to a torch.Tensor. - - Args: - response: The response to validate and cast. - timeout: Request timeout in seconds. Defaults to None (no timeout). - - Returns: - A torch.Tensor representing the image (1, H, W, C). - - Raises: - ValueError: If the response is not valid. - """ - # validate raw JSON response - data = response.data - if not data or len(data) == 0: - raise ValueError("No images returned from API endpoint") - - # Initialize list to store image tensors - image_tensors: list[torch.Tensor] = [] - - # Process each image in the data array - async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session: - for img_data in data: - img_bytes: bytes - if img_data.b64_json: - img_bytes = base64.b64decode(img_data.b64_json) - elif img_data.url: - if node_id: - PromptServer.instance.send_progress_text(f"Result URL: {img_data.url}", node_id) - async with session.get(img_data.url) as resp: - if resp.status != 200: - raise ValueError("Failed to download generated image") - img_bytes = await resp.read() - else: - raise ValueError("Invalid image payload – neither URL nor base64 data present.") - - pil_img = Image.open(BytesIO(img_bytes)).convert("RGBA") - arr = np.asarray(pil_img).astype(np.float32) / 255.0 - image_tensors.append(torch.from_numpy(arr)) - - return torch.stack(image_tensors, dim=0) - - -def text_filepath_to_base64_string(filepath: str) -> str: - """Converts a text file to a base64 string.""" - with open(filepath, "rb") as f: - file_content = f.read() - return base64.b64encode(file_content).decode("utf-8") - - -def text_filepath_to_data_uri(filepath: str) -> str: - """Converts a text file to a data URI.""" - base64_string = text_filepath_to_base64_string(filepath) - mime_type, _ = mimetypes.guess_type(filepath) - if mime_type is None: - mime_type = "application/octet-stream" - return f"data:{mime_type};base64,{base64_string}" diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index b4568fc85..acf35d276 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -1,18 +1,19 @@ -import io -from typing import TypedDict, Optional +from io import BytesIO +from typing import Optional, Union import json import os import time -import re import uuid from enum import Enum from inspect import cleandoc import numpy as np import torch from PIL import Image -from comfy.comfy_types.node_typing import IO, ComfyNodeABC, InputTypeDict from server import PromptServer import folder_paths +import base64 +from comfy_api.latest import IO, ComfyExtension +from typing_extensions import override from comfy_api_nodes.apis import ( @@ -23,7 +24,6 @@ from comfy_api_nodes.apis import ( OpenAIResponse, CreateModelResponseProperties, Item, - Includable, OutputContent, InputImageContent, Detail, @@ -34,41 +34,22 @@ from comfy_api_nodes.apis import ( InputFileContent, ) -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.util import ( + downscale_image_tensor, + download_url_to_bytesio, + validate_string, + tensor_to_base64_string, ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, - EmptyRequest, -) - -from comfy_api_nodes.apinode_utils import ( - validate_and_cast_response, + sync_op, + poll_op, text_filepath_to_data_uri, ) -from comfy_api_nodes.mapper_utils import model_field_to_node_input -from comfy_api_nodes.util import downscale_image_tensor, validate_string, tensor_to_base64_string RESPONSES_ENDPOINT = "/proxy/openai/v1/responses" STARTING_POINT_ID_PATTERN = r"" -class HistoryEntry(TypedDict): - """Type definition for a single history entry in the chat.""" - - prompt: str - response: str - response_id: str - timestamp: float - - -class ChatHistory(TypedDict): - """Type definition for the chat history dictionary.""" - - __annotations__: dict[str, list[HistoryEntry]] - - class SupportedOpenAIModel(str, Enum): o4_mini = "o4-mini" o1 = "o1" @@ -83,98 +64,123 @@ class SupportedOpenAIModel(str, Enum): gpt_5_nano = "gpt-5-nano" -class OpenAIDalle2(ComfyNodeABC): +async def validate_and_cast_response(response, timeout: int = None) -> torch.Tensor: + """Validates and casts a response to a torch.Tensor. + + Args: + response: The response to validate and cast. + timeout: Request timeout in seconds. Defaults to None (no timeout). + + Returns: + A torch.Tensor representing the image (1, H, W, C). + + Raises: + ValueError: If the response is not valid. + """ + # validate raw JSON response + data = response.data + if not data or len(data) == 0: + raise ValueError("No images returned from API endpoint") + + # Initialize list to store image tensors + image_tensors: list[torch.Tensor] = [] + + # Process each image in the data array + for img_data in data: + if img_data.b64_json: + img_io = BytesIO(base64.b64decode(img_data.b64_json)) + elif img_data.url: + img_io = BytesIO() + await download_url_to_bytesio(img_data.url, img_io, timeout=timeout) + else: + raise ValueError("Invalid image payload – neither URL nor base64 data present.") + + pil_img = Image.open(img_io).convert("RGBA") + arr = np.asarray(pil_img).astype(np.float32) / 255.0 + image_tensors.append(torch.from_numpy(arr)) + + return torch.stack(image_tensors, dim=0) + + +class OpenAIDalle2(IO.ComfyNode): """ Generates images synchronously via OpenAI's DALL·E 2 endpoint. """ - def __init__(self): - pass + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="OpenAIDalle2", + display_name="OpenAI DALL·E 2", + category="api node/image/OpenAI", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + IO.String.Input( + "prompt", + default="", + multiline=True, + tooltip="Text prompt for DALL·E", + ), + IO.Int.Input( + "seed", + default=0, + min=0, + max=2**31 - 1, + step=1, + display_mode=IO.NumberDisplay.number, + control_after_generate=True, + tooltip="not implemented yet in backend", + optional=True, + ), + IO.Combo.Input( + "size", + default="1024x1024", + options=["256x256", "512x512", "1024x1024"], + tooltip="Image size", + optional=True, + ), + IO.Int.Input( + "n", + default=1, + min=1, + max=8, + step=1, + tooltip="How many images to generate", + display_mode=IO.NumberDisplay.number, + optional=True, + ), + IO.Image.Input( + "image", + tooltip="Optional reference image for image editing.", + optional=True, + ), + IO.Mask.Input( + "mask", + tooltip="Optional mask for inpainting (white areas will be replaced)", + optional=True, + ), + ], + outputs=[ + IO.Image.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Text prompt for DALL·E", - }, - ), - }, - "optional": { - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2**31 - 1, - "step": 1, - "display": "number", - "control_after_generate": True, - "tooltip": "not implemented yet in backend", - }, - ), - "size": ( - IO.COMBO, - { - "options": ["256x256", "512x512", "1024x1024"], - "default": "1024x1024", - "tooltip": "Image size", - }, - ), - "n": ( - IO.INT, - { - "default": 1, - "min": 1, - "max": 8, - "step": 1, - "display": "number", - "tooltip": "How many images to generate", - }, - ), - "image": ( - IO.IMAGE, - { - "default": None, - "tooltip": "Optional reference image for image editing.", - }, - ), - "mask": ( - IO.MASK, - { - "default": None, - "tooltip": "Optional mask for inpainting (white areas will be replaced)", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = (IO.IMAGE,) - FUNCTION = "api_call" - CATEGORY = "api node/image/OpenAI" - DESCRIPTION = cleandoc(__doc__ or "") - API_NODE = True - - async def api_call( - self, + async def execute( + cls, prompt, seed=0, image=None, mask=None, n=1, size="1024x1024", - unique_id=None, - **kwargs, - ): + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) model = "dall-e-2" path = "/proxy/openai/images/generations" @@ -200,7 +206,7 @@ class OpenAIDalle2(ComfyNodeABC): image_np = (rgba_tensor.numpy() * 255).astype(np.uint8) img = Image.fromarray(image_np) - img_byte_arr = io.BytesIO() + img_byte_arr = BytesIO() img.save(img_byte_arr, format="PNG") img_byte_arr.seek(0) img_binary = img_byte_arr # .getvalue() @@ -208,15 +214,11 @@ class OpenAIDalle2(ComfyNodeABC): elif image is not None or mask is not None: raise Exception("Dall-E 2 image editing requires an image AND a mask") - # Build the operation - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=path, - method=HttpMethod.POST, - request_model=request_class, - response_model=OpenAIImageGenerationResponse, - ), - request=request_class( + response = await sync_op( + cls, + ApiEndpoint(path=path, method="POST"), + response_model=OpenAIImageGenerationResponse, + data=request_class( model=model, prompt=prompt, n=n, @@ -231,109 +233,92 @@ class OpenAIDalle2(ComfyNodeABC): else None ), content_type=content_type, - auth_kwargs=kwargs, ) - response = await operation.execute() - - img_tensor = await validate_and_cast_response(response, node_id=unique_id) - return (img_tensor,) + return IO.NodeOutput(await validate_and_cast_response(response)) -class OpenAIDalle3(ComfyNodeABC): +class OpenAIDalle3(IO.ComfyNode): """ Generates images synchronously via OpenAI's DALL·E 3 endpoint. """ - def __init__(self): - pass + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="OpenAIDalle3", + display_name="OpenAI DALL·E 3", + category="api node/image/OpenAI", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + IO.String.Input( + "prompt", + default="", + multiline=True, + tooltip="Text prompt for DALL·E", + ), + IO.Int.Input( + "seed", + default=0, + min=0, + max=2 ** 31 - 1, + step=1, + display_mode=IO.NumberDisplay.number, + control_after_generate=True, + tooltip="not implemented yet in backend", + optional=True, + ), + IO.Combo.Input( + "quality", + default="standard", + options=["standard", "hd"], + tooltip="Image quality", + optional=True, + ), + IO.Combo.Input( + "style", + default="natural", + options=["natural", "vivid"], + tooltip="Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images.", + optional=True, + ), + IO.Combo.Input( + "size", + default="1024x1024", + options=["1024x1024", "1024x1792", "1792x1024"], + tooltip="Image size", + optional=True, + ), + ], + outputs=[ + IO.Image.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Text prompt for DALL·E", - }, - ), - }, - "optional": { - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2**31 - 1, - "step": 1, - "display": "number", - "control_after_generate": True, - "tooltip": "not implemented yet in backend", - }, - ), - "quality": ( - IO.COMBO, - { - "options": ["standard", "hd"], - "default": "standard", - "tooltip": "Image quality", - }, - ), - "style": ( - IO.COMBO, - { - "options": ["natural", "vivid"], - "default": "natural", - "tooltip": "Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images.", - }, - ), - "size": ( - IO.COMBO, - { - "options": ["1024x1024", "1024x1792", "1792x1024"], - "default": "1024x1024", - "tooltip": "Image size", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = (IO.IMAGE,) - FUNCTION = "api_call" - CATEGORY = "api node/image/OpenAI" - DESCRIPTION = cleandoc(__doc__ or "") - API_NODE = True - - async def api_call( - self, + async def execute( + cls, prompt, seed=0, style="natural", quality="standard", size="1024x1024", - unique_id=None, - **kwargs, - ): + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) model = "dall-e-3" # build the operation - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/openai/images/generations", - method=HttpMethod.POST, - request_model=OpenAIImageGenerationRequest, - response_model=OpenAIImageGenerationResponse, - ), - request=OpenAIImageGenerationRequest( + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/openai/images/generations", method="POST"), + response_model=OpenAIImageGenerationResponse, + data=OpenAIImageGenerationRequest( model=model, prompt=prompt, quality=quality, @@ -341,114 +326,97 @@ class OpenAIDalle3(ComfyNodeABC): style=style, seed=seed, ), - auth_kwargs=kwargs, ) - response = await operation.execute() - - img_tensor = await validate_and_cast_response(response, node_id=unique_id) - return (img_tensor,) + return IO.NodeOutput(await validate_and_cast_response(response)) -class OpenAIGPTImage1(ComfyNodeABC): +class OpenAIGPTImage1(IO.ComfyNode): """ Generates images synchronously via OpenAI's GPT Image 1 endpoint. """ - def __init__(self): - pass + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="OpenAIGPTImage1", + display_name="OpenAI GPT Image 1", + category="api node/image/OpenAI", + description=cleandoc(cls.__doc__ or ""), + inputs=[ + IO.String.Input( + "prompt", + default="", + multiline=True, + tooltip="Text prompt for GPT Image 1", + ), + IO.Int.Input( + "seed", + default=0, + min=0, + max=2 ** 31 - 1, + step=1, + display_mode=IO.NumberDisplay.number, + control_after_generate=True, + tooltip="not implemented yet in backend", + optional=True, + ), + IO.Combo.Input( + "quality", + default="low", + options=["low", "medium", "high"], + tooltip="Image quality, affects cost and generation time.", + optional=True, + ), + IO.Combo.Input( + "background", + default="opaque", + options=["opaque", "transparent"], + tooltip="Return image with or without background", + optional=True, + ), + IO.Combo.Input( + "size", + default="auto", + options=["auto", "1024x1024", "1024x1536", "1536x1024"], + tooltip="Image size", + optional=True, + ), + IO.Int.Input( + "n", + default=1, + min=1, + max=8, + step=1, + tooltip="How many images to generate", + display_mode=IO.NumberDisplay.number, + optional=True, + ), + IO.Image.Input( + "image", + tooltip="Optional reference image for image editing.", + optional=True, + ), + IO.Mask.Input( + "mask", + tooltip="Optional mask for inpainting (white areas will be replaced)", + optional=True, + ), + ], + outputs=[ + IO.Image.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Text prompt for GPT Image 1", - }, - ), - }, - "optional": { - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 2**31 - 1, - "step": 1, - "display": "number", - "control_after_generate": True, - "tooltip": "not implemented yet in backend", - }, - ), - "quality": ( - IO.COMBO, - { - "options": ["low", "medium", "high"], - "default": "low", - "tooltip": "Image quality, affects cost and generation time.", - }, - ), - "background": ( - IO.COMBO, - { - "options": ["opaque", "transparent"], - "default": "opaque", - "tooltip": "Return image with or without background", - }, - ), - "size": ( - IO.COMBO, - { - "options": ["auto", "1024x1024", "1024x1536", "1536x1024"], - "default": "auto", - "tooltip": "Image size", - }, - ), - "n": ( - IO.INT, - { - "default": 1, - "min": 1, - "max": 8, - "step": 1, - "display": "number", - "tooltip": "How many images to generate", - }, - ), - "image": ( - IO.IMAGE, - { - "default": None, - "tooltip": "Optional reference image for image editing.", - }, - ), - "mask": ( - IO.MASK, - { - "default": None, - "tooltip": "Optional mask for inpainting (white areas will be replaced)", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - RETURN_TYPES = (IO.IMAGE,) - FUNCTION = "api_call" - CATEGORY = "api node/image/OpenAI" - DESCRIPTION = cleandoc(__doc__ or "") - API_NODE = True - - async def api_call( - self, + async def execute( + cls, prompt, seed=0, quality="low", @@ -457,9 +425,7 @@ class OpenAIGPTImage1(ComfyNodeABC): mask=None, n=1, size="1024x1024", - unique_id=None, - **kwargs, - ): + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) model = "gpt-image-1" path = "/proxy/openai/images/generations" @@ -480,7 +446,7 @@ class OpenAIGPTImage1(ComfyNodeABC): image_np = (scaled_image.numpy() * 255).astype(np.uint8) img = Image.fromarray(image_np) - img_byte_arr = io.BytesIO() + img_byte_arr = BytesIO() img.save(img_byte_arr, format="PNG") img_byte_arr.seek(0) @@ -504,20 +470,17 @@ class OpenAIGPTImage1(ComfyNodeABC): mask_np = (scaled_mask.numpy() * 255).astype(np.uint8) mask_img = Image.fromarray(mask_np) - mask_img_byte_arr = io.BytesIO() + mask_img_byte_arr = BytesIO() mask_img.save(mask_img_byte_arr, format="PNG") mask_img_byte_arr.seek(0) files.append(("mask", ("mask.png", mask_img_byte_arr, "image/png"))) # Build the operation - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=path, - method=HttpMethod.POST, - request_model=request_class, - response_model=OpenAIImageGenerationResponse, - ), - request=request_class( + response = await sync_op( + cls, + ApiEndpoint(path=path, method="POST"), + response_model=OpenAIImageGenerationResponse, + data=request_class( model=model, prompt=prompt, quality=quality, @@ -528,127 +491,70 @@ class OpenAIGPTImage1(ComfyNodeABC): ), files=files if files else None, content_type=content_type, - auth_kwargs=kwargs, ) - response = await operation.execute() - - img_tensor = await validate_and_cast_response(response, node_id=unique_id) - return (img_tensor,) + return IO.NodeOutput(await validate_and_cast_response(response)) -class OpenAITextNode(ComfyNodeABC): - """ - Base class for OpenAI text generation nodes. - """ - - RETURN_TYPES = (IO.STRING,) - FUNCTION = "api_call" - CATEGORY = "api node/text/OpenAI" - API_NODE = True - - -class OpenAIChatNode(OpenAITextNode): +class OpenAIChatNode(IO.ComfyNode): """ Node to generate text responses from an OpenAI model. """ - def __init__(self) -> None: - """Initialize the chat node with a new session ID and empty history.""" - self.current_session_id: str = str(uuid.uuid4()) - self.history: dict[str, list[HistoryEntry]] = {} - self.previous_response_id: Optional[str] = None + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="OpenAIChatNode", + display_name="OpenAI ChatGPT", + category="api node/text/OpenAI", + description="Generate text responses from an OpenAI model.", + inputs=[ + IO.String.Input( + "prompt", + default="", + multiline=True, + tooltip="Text inputs to the model, used to generate a response.", + ), + IO.Boolean.Input( + "persist_context", + default=False, + tooltip="This parameter is deprecated and has no effect.", + ), + IO.Combo.Input( + "model", + options=SupportedOpenAIModel, + tooltip="The model used to generate the response", + ), + IO.Image.Input( + "images", + tooltip="Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node.", + optional=True, + ), + IO.Custom("OPENAI_INPUT_FILES").Input( + "files", + optional=True, + tooltip="Optional file(s) to use as context for the model. Accepts inputs from the OpenAI Chat Input Files node.", + ), + IO.Custom("OPENAI_CHAT_CONFIG").Input( + "advanced_options", + optional=True, + tooltip="Optional configuration for the model. Accepts inputs from the OpenAI Chat Advanced Options node.", + ), + ], + outputs=[ + IO.String.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "prompt": ( - IO.STRING, - { - "multiline": True, - "default": "", - "tooltip": "Text inputs to the model, used to generate a response.", - }, - ), - "persist_context": ( - IO.BOOLEAN, - { - "default": True, - "tooltip": "Persist chat context between calls (multi-turn conversation)", - }, - ), - "model": model_field_to_node_input( - IO.COMBO, - OpenAICreateResponse, - "model", - enum_type=SupportedOpenAIModel, - ), - }, - "optional": { - "images": ( - IO.IMAGE, - { - "default": None, - "tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node.", - }, - ), - "files": ( - "OPENAI_INPUT_FILES", - { - "default": None, - "tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the OpenAI Chat Input Files node.", - }, - ), - "advanced_options": ( - "OPENAI_CHAT_CONFIG", - { - "default": None, - "tooltip": "Optional configuration for the model. Accepts inputs from the OpenAI Chat Advanced Options node.", - }, - ), - }, - "hidden": { - "auth_token": "AUTH_TOKEN_COMFY_ORG", - "comfy_api_key": "API_KEY_COMFY_ORG", - "unique_id": "UNIQUE_ID", - }, - } - - DESCRIPTION = "Generate text responses from an OpenAI model." - - async def get_result_response( - self, - response_id: str, - include: Optional[list[Includable]] = None, - auth_kwargs: Optional[dict[str, str]] = None, - ) -> OpenAIResponse: - """ - Retrieve a model response with the given ID from the OpenAI API. - - Args: - response_id (str): The ID of the response to retrieve. - include (Optional[List[Includable]]): Additional fields to include - in the response. See the `include` parameter for Response - creation above for more information. - - """ - return await PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"{RESPONSES_ENDPOINT}/{response_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=OpenAIResponse, - query_params={"include": include}, - ), - completed_statuses=["completed"], - failed_statuses=["failed"], - status_extractor=lambda response: response.status, - auth_kwargs=auth_kwargs, - ).execute() - def get_message_content_from_response( - self, response: OpenAIResponse + cls, response: OpenAIResponse ) -> list[OutputContent]: """Extract message content from the API response.""" for output in response.output: @@ -656,8 +562,9 @@ class OpenAIChatNode(OpenAITextNode): return output.root.content raise TypeError("No output message found in response") + @classmethod def get_text_from_message_content( - self, message_content: list[OutputContent] + cls, message_content: list[OutputContent] ) -> str: """Extract text content from message content.""" for content_item in message_content: @@ -665,58 +572,9 @@ class OpenAIChatNode(OpenAITextNode): return str(content_item.root.text) return "No text output found in response" - def get_history_text(self, session_id: str) -> str: - """Convert the entire history for a given session to JSON string.""" - return json.dumps(self.history[session_id]) - - def display_history_on_node(self, session_id: str, node_id: str) -> None: - """Display formatted chat history on the node UI.""" - render_spec = { - "node_id": node_id, - "component": "ChatHistoryWidget", - "props": { - "history": self.get_history_text(session_id), - }, - } - PromptServer.instance.send_sync( - "display_component", - render_spec, - ) - - def add_to_history( - self, session_id: str, prompt: str, output_text: str, response_id: str - ) -> None: - """Add a new entry to the chat history.""" - if session_id not in self.history: - self.history[session_id] = [] - self.history[session_id].append( - { - "prompt": prompt, - "response": output_text, - "response_id": response_id, - "timestamp": time.time(), - } - ) - - def parse_output_text_from_response(self, response: OpenAIResponse) -> str: - """Extract text output from the API response.""" - message_contents = self.get_message_content_from_response(response) - return self.get_text_from_message_content(message_contents) - - def generate_new_session_id(self) -> str: - """Generate a new unique session ID.""" - return str(uuid.uuid4()) - - def get_session_id(self, persist_context: bool) -> str: - """Get the current or generate a new session ID based on context persistence.""" - return ( - self.current_session_id - if persist_context - else self.generate_new_session_id() - ) - + @classmethod def tensor_to_input_image_content( - self, image: torch.Tensor, detail_level: Detail = "auto" + cls, image: torch.Tensor, detail_level: Detail = "auto" ) -> InputImageContent: """Convert a tensor to an input image content object.""" return InputImageContent( @@ -725,21 +583,27 @@ class OpenAIChatNode(OpenAITextNode): type="input_image", ) + @classmethod def create_input_message_contents( - self, + cls, prompt: str, image: Optional[torch.Tensor] = None, files: Optional[list[InputFileContent]] = None, ) -> InputMessageContentList: """Create a list of input message contents from prompt and optional image.""" - content_list: list[InputContent] = [ + content_list: list[Union[InputContent, InputTextContent, InputImageContent, InputFileContent]] = [ InputTextContent(text=prompt, type="input_text"), ] if image is not None: for i in range(image.shape[0]): content_list.append( - self.tensor_to_input_image_content(image[i].unsqueeze(0)) + InputImageContent( + detail="auto", + image_url=f"data:image/png;base64,{tensor_to_base64_string(image[i].unsqueeze(0))}", + type="input_image", + ) ) + if files is not None: content_list.extend(files) @@ -747,80 +611,28 @@ class OpenAIChatNode(OpenAITextNode): root=content_list, ) - def parse_response_id_from_prompt(self, prompt: str) -> Optional[str]: - """Extract response ID from prompt if it exists.""" - parsed_id = re.search(STARTING_POINT_ID_PATTERN, prompt) - return parsed_id.group(1) if parsed_id else None - - def strip_response_tag_from_prompt(self, prompt: str) -> str: - """Remove the response ID tag from the prompt.""" - return re.sub(STARTING_POINT_ID_PATTERN, "", prompt.strip()) - - def delete_history_after_response_id( - self, new_start_id: str, session_id: str - ) -> None: - """Delete history entries after a specific response ID.""" - if session_id not in self.history: - return - - new_history = [] - i = 0 - while ( - i < len(self.history[session_id]) - and self.history[session_id][i]["response_id"] != new_start_id - ): - new_history.append(self.history[session_id][i]) - i += 1 - - # Since it's the new starting point (not the response being edited), we include it as well - if i < len(self.history[session_id]): - new_history.append(self.history[session_id][i]) - - self.history[session_id] = new_history - - async def api_call( - self, + @classmethod + async def execute( + cls, prompt: str, - persist_context: bool, - model: SupportedOpenAIModel, - unique_id: Optional[str] = None, + persist_context: bool = False, + model: SupportedOpenAIModel = SupportedOpenAIModel.gpt_5.value, images: Optional[torch.Tensor] = None, files: Optional[list[InputFileContent]] = None, advanced_options: Optional[CreateModelResponseProperties] = None, - **kwargs, - ) -> tuple[str]: - # Validate inputs + ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) - session_id = self.get_session_id(persist_context) - response_id_override = self.parse_response_id_from_prompt(prompt) - if response_id_override: - is_starting_from_beginning = response_id_override == "start" - if is_starting_from_beginning: - self.history[session_id] = [] - previous_response_id = None - else: - previous_response_id = response_id_override - self.delete_history_after_response_id(response_id_override, session_id) - prompt = self.strip_response_tag_from_prompt(prompt) - elif persist_context: - previous_response_id = self.previous_response_id - else: - previous_response_id = None - # Create response - create_response = await SynchronousOperation( - endpoint=ApiEndpoint( - path=RESPONSES_ENDPOINT, - method=HttpMethod.POST, - request_model=OpenAICreateResponse, - response_model=OpenAIResponse, - ), - request=OpenAICreateResponse( + create_response = await sync_op( + cls, + ApiEndpoint(path=RESPONSES_ENDPOINT, method="POST"), + response_model=OpenAIResponse, + data=OpenAICreateResponse( input=[ Item( root=InputMessage( - content=self.create_input_message_contents( + content=cls.create_input_message_contents( prompt, images, files ), role="user", @@ -830,36 +642,57 @@ class OpenAIChatNode(OpenAITextNode): store=True, stream=False, model=model, - previous_response_id=previous_response_id, + previous_response_id=None, **( advanced_options.model_dump(exclude_none=True) if advanced_options else {} ), ), - auth_kwargs=kwargs, - ).execute() + ) response_id = create_response.id # Get result output - result_response = await self.get_result_response(response_id, auth_kwargs=kwargs) - output_text = self.parse_output_text_from_response(result_response) + result_response = await poll_op( + cls, + ApiEndpoint(path=f"{RESPONSES_ENDPOINT}/{response_id}"), + response_model=OpenAIResponse, + status_extractor=lambda response: response.status, + completed_statuses=["incomplete", "completed"] + ) + output_text = cls.get_text_from_message_content(cls.get_message_content_from_response(result_response)) # Update history - self.add_to_history(session_id, prompt, output_text, response_id) - self.display_history_on_node(session_id, unique_id) - self.previous_response_id = response_id - - return (output_text,) + render_spec = { + "node_id": cls.hidden.unique_id, + "component": "ChatHistoryWidget", + "props": { + "history": json.dumps( + [ + { + "prompt": prompt, + "response": output_text, + "response_id": str(uuid.uuid4()), + "timestamp": time.time(), + } + ] + ), + }, + } + PromptServer.instance.send_sync( + "display_component", + render_spec, + ) + return IO.NodeOutput(output_text) -class OpenAIInputFiles(ComfyNodeABC): +class OpenAIInputFiles(IO.ComfyNode): """ Loads and formats input files for OpenAI API. """ @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: + def define_schema(cls): """ For details about the supported file input types, see: https://platform.openai.com/docs/guides/pdf-files?api-mode=responses @@ -874,97 +707,92 @@ class OpenAIInputFiles(ComfyNodeABC): ] input_files = sorted(input_files, key=lambda x: x.name) input_files = [f.name for f in input_files] - return { - "required": { - "file": ( - IO.COMBO, - { - "tooltip": "Input files to include as context for the model. Only accepts text (.txt) and PDF (.pdf) files for now.", - "options": input_files, - "default": input_files[0] if input_files else None, - }, + return IO.Schema( + node_id="OpenAIInputFiles", + display_name="OpenAI ChatGPT Input Files", + category="api node/text/OpenAI", + description="Loads and prepares input files (text, pdf, etc.) to include as inputs for the OpenAI Chat Node. The files will be read by the OpenAI model when generating a response. 🛈 TIP: Can be chained together with other OpenAI Input File nodes.", + inputs=[ + IO.Combo.Input( + "file", + options=input_files, + default=input_files[0] if input_files else None, + tooltip="Input files to include as context for the model. Only accepts text (.txt) and PDF (.pdf) files for now.", ), - }, - "optional": { - "OPENAI_INPUT_FILES": ( + IO.Custom("OPENAI_INPUT_FILES").Input( "OPENAI_INPUT_FILES", - { - "tooltip": "An optional additional file(s) to batch together with the file loaded from this node. Allows chaining of input files so that a single message can include multiple input files.", - "default": None, - }, + tooltip="An optional additional file(s) to batch together with the file loaded from this node. Allows chaining of input files so that a single message can include multiple input files.", + optional=True, ), - }, - } + ], + outputs=[ + IO.Custom("OPENAI_INPUT_FILES").Output(), + ], + ) - DESCRIPTION = "Loads and prepares input files (text, pdf, etc.) to include as inputs for the OpenAI Chat Node. The files will be read by the OpenAI model when generating a response. 🛈 TIP: Can be chained together with other OpenAI Input File nodes." - RETURN_TYPES = ("OPENAI_INPUT_FILES",) - FUNCTION = "prepare_files" - CATEGORY = "api node/text/OpenAI" - - def create_input_file_content(self, file_path: str) -> InputFileContent: + @classmethod + def create_input_file_content(cls, file_path: str) -> InputFileContent: return InputFileContent( file_data=text_filepath_to_data_uri(file_path), filename=os.path.basename(file_path), type="input_file", ) - def prepare_files( - self, file: str, OPENAI_INPUT_FILES: list[InputFileContent] = [] - ) -> tuple[list[InputFileContent]]: + @classmethod + def execute(cls, file: str, OPENAI_INPUT_FILES: list[InputFileContent] = []) -> IO.NodeOutput: """ Loads and formats input files for OpenAI API. """ file_path = folder_paths.get_annotated_filepath(file) - input_file_content = self.create_input_file_content(file_path) + input_file_content = cls.create_input_file_content(file_path) files = [input_file_content] + OPENAI_INPUT_FILES - return (files,) + return IO.NodeOutput(files) -class OpenAIChatConfig(ComfyNodeABC): +class OpenAIChatConfig(IO.ComfyNode): """Allows setting additional configuration for the OpenAI Chat Node.""" - RETURN_TYPES = ("OPENAI_CHAT_CONFIG",) - FUNCTION = "configure" - DESCRIPTION = ( - "Allows specifying advanced configuration options for the OpenAI Chat Nodes." - ) - CATEGORY = "api node/text/OpenAI" - @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "truncation": ( - IO.COMBO, - { - "options": ["auto", "disabled"], - "default": "auto", - "tooltip": "The truncation strategy to use for the model response. auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.disabled: If a model response will exceed the context window size for a model, the request will fail with a 400 error", - }, + def define_schema(cls): + return IO.Schema( + node_id="OpenAIChatConfig", + display_name="OpenAI ChatGPT Advanced Options", + category="api node/text/OpenAI", + description="Allows specifying advanced configuration options for the OpenAI Chat Nodes.", + inputs=[ + IO.Combo.Input( + "truncation", + options=["auto", "disabled"], + default="auto", + tooltip="The truncation strategy to use for the model response. auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.disabled: If a model response will exceed the context window size for a model, the request will fail with a 400 error", ), - }, - "optional": { - "max_output_tokens": model_field_to_node_input( - IO.INT, - OpenAICreateResponse, + IO.Int.Input( "max_output_tokens", min=16, default=4096, max=16384, tooltip="An upper bound for the number of tokens that can be generated for a response, including visible output tokens", + optional=True, ), - "instructions": model_field_to_node_input( - IO.STRING, OpenAICreateResponse, "instructions", multiline=True + IO.String.Input( + "instructions", + multiline=True, + optional=True, + tooltip="Instructions for the model on how to generate the response", ), - }, - } + ], + outputs=[ + IO.Custom("OPENAI_CHAT_CONFIG").Output(), + ], + ) - def configure( - self, + @classmethod + def execute( + cls, truncation: bool, instructions: Optional[str] = None, max_output_tokens: Optional[int] = None, - ) -> tuple[CreateModelResponseProperties]: + ) -> IO.NodeOutput: """ Configure advanced options for the OpenAI Chat Node. @@ -974,29 +802,27 @@ class OpenAIChatConfig(ComfyNodeABC): They are not exposed as inputs at all to avoid having to manually remove depending on model choice. """ - return ( + return IO.NodeOutput( CreateModelResponseProperties( instructions=instructions, truncation=truncation, max_output_tokens=max_output_tokens, - ), + ) ) -NODE_CLASS_MAPPINGS = { - "OpenAIDalle2": OpenAIDalle2, - "OpenAIDalle3": OpenAIDalle3, - "OpenAIGPTImage1": OpenAIGPTImage1, - "OpenAIChatNode": OpenAIChatNode, - "OpenAIInputFiles": OpenAIInputFiles, - "OpenAIChatConfig": OpenAIChatConfig, -} +class OpenAIExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + OpenAIDalle2, + OpenAIDalle3, + OpenAIGPTImage1, + OpenAIChatNode, + OpenAIInputFiles, + OpenAIChatConfig, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "OpenAIDalle2": "OpenAI DALL·E 2", - "OpenAIDalle3": "OpenAI DALL·E 3", - "OpenAIGPTImage1": "OpenAI GPT Image 1", - "OpenAIChatNode": "OpenAI ChatGPT", - "OpenAIInputFiles": "OpenAI ChatGPT Input Files", - "OpenAIChatConfig": "OpenAI ChatGPT Advanced Options", -} + +async def comfy_entrypoint() -> OpenAIExtension: + return OpenAIExtension() diff --git a/comfy_api_nodes/util/__init__.py b/comfy_api_nodes/util/__init__.py index bbc71363a..21013b591 100644 --- a/comfy_api_nodes/util/__init__.py +++ b/comfy_api_nodes/util/__init__.py @@ -18,6 +18,8 @@ from .conversions import ( tensor_to_base64_string, tensor_to_bytesio, tensor_to_pil, + text_filepath_to_base64_string, + text_filepath_to_data_uri, trim_video, video_to_base64_string, ) @@ -75,6 +77,8 @@ __all__ = [ "tensor_to_base64_string", "tensor_to_bytesio", "tensor_to_pil", + "text_filepath_to_base64_string", + "text_filepath_to_data_uri", "trim_video", "video_to_base64_string", # Validation utilities diff --git a/comfy_api_nodes/util/conversions.py b/comfy_api_nodes/util/conversions.py index b59c2bd84..971dc57de 100644 --- a/comfy_api_nodes/util/conversions.py +++ b/comfy_api_nodes/util/conversions.py @@ -1,6 +1,7 @@ import base64 import logging import math +import mimetypes import uuid from io import BytesIO from typing import Optional @@ -12,7 +13,7 @@ from PIL import Image from comfy.utils import common_upscale from comfy_api.latest import Input, InputImpl -from comfy_api.util import VideoContainer, VideoCodec +from comfy_api.util import VideoCodec, VideoContainer from ._helpers import mimetype_to_extension @@ -451,3 +452,19 @@ def resize_mask_to_image( if not allow_gradient: mask = (mask > 0.5).float() return mask + + +def text_filepath_to_base64_string(filepath: str) -> str: + """Converts a text file to a base64 string.""" + with open(filepath, "rb") as f: + file_content = f.read() + return base64.b64encode(file_content).decode("utf-8") + + +def text_filepath_to_data_uri(filepath: str) -> str: + """Converts a text file to a data URI.""" + base64_string = text_filepath_to_base64_string(filepath) + mime_type, _ = mimetypes.guess_type(filepath) + if mime_type is None: + mime_type = "application/octet-stream" + return f"data:{mime_type};base64,{base64_string}" From 4e2110c794b187c9326d604e7c0b0a4fad81148a Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 3 Nov 2025 10:29:08 +0200 Subject: [PATCH 0834/1073] feat(Pika-API-nodes): use new API client (#10608) --- .../apis/{pika_defs.py => pika_api.py} | 0 comfy_api_nodes/nodes_pika.py | 181 ++++++------------ 2 files changed, 57 insertions(+), 124 deletions(-) rename comfy_api_nodes/apis/{pika_defs.py => pika_api.py} (100%) diff --git a/comfy_api_nodes/apis/pika_defs.py b/comfy_api_nodes/apis/pika_api.py similarity index 100% rename from comfy_api_nodes/apis/pika_defs.py rename to comfy_api_nodes/apis/pika_api.py diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py index 5bb406a3b..51148211b 100644 --- a/comfy_api_nodes/nodes_pika.py +++ b/comfy_api_nodes/nodes_pika.py @@ -7,24 +7,23 @@ from __future__ import annotations from io import BytesIO import logging -from typing import Optional, TypeVar +from typing import Optional import torch from typing_extensions import override from comfy_api.latest import ComfyExtension, IO from comfy_api.input_impl.video_types import VideoCodec, VideoContainer, VideoInput -from comfy_api_nodes.apis import pika_defs -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.apis import pika_api as pika_defs +from comfy_api_nodes.util import ( + validate_string, + download_url_to_video_output, + tensor_to_bytesio, ApiEndpoint, - EmptyRequest, - HttpMethod, - PollingOperation, - SynchronousOperation, + sync_op, + poll_op, ) -from comfy_api_nodes.util import validate_string, download_url_to_video_output, tensor_to_bytesio -R = TypeVar("R") PATH_PIKADDITIONS = "/proxy/pika/generate/pikadditions" PATH_PIKASWAPS = "/proxy/pika/generate/pikaswaps" @@ -40,28 +39,18 @@ PATH_VIDEO_GET = "/proxy/pika/videos" async def execute_task( - initial_operation: SynchronousOperation[R, pika_defs.PikaGenerateResponse], - auth_kwargs: Optional[dict[str, str]] = None, - node_id: Optional[str] = None, + task_id: str, + cls: type[IO.ComfyNode], ) -> IO.NodeOutput: - task_id = (await initial_operation.execute()).video_id - final_response: pika_defs.PikaVideoResponse = await PollingOperation( - poll_endpoint=ApiEndpoint( - path=f"{PATH_VIDEO_GET}/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=pika_defs.PikaVideoResponse, - ), - completed_statuses=["finished"], - failed_statuses=["failed", "cancelled"], + final_response: pika_defs.PikaVideoResponse = await poll_op( + cls, + ApiEndpoint(path=f"{PATH_VIDEO_GET}/{task_id}"), + response_model=pika_defs.PikaVideoResponse, status_extractor=lambda response: (response.status.value if response.status else None), progress_extractor=lambda response: (response.progress if hasattr(response, "progress") else None), - auth_kwargs=auth_kwargs, - result_url_extractor=lambda response: (response.url if hasattr(response, "url") else None), - node_id=node_id, estimated_duration=60, max_poll_attempts=240, - ).execute() + ) if not final_response.url: error_msg = f"Pika task {task_id} succeeded but no video data found in response:\n{final_response}" logging.error(error_msg) @@ -124,23 +113,15 @@ class PikaImageToVideo(IO.ComfyNode): resolution=resolution, duration=duration, ) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_IMAGE_TO_VIDEO, - method=HttpMethod.POST, - request_model=pika_defs.PikaBodyGenerate22I2vGenerate22I2vPost, - response_model=pika_defs.PikaGenerateResponse, - ), - request=pika_request_data, + initial_operation = await sync_op( + cls, + ApiEndpoint(path=PATH_IMAGE_TO_VIDEO, method="POST"), + response_model=pika_defs.PikaGenerateResponse, + data=pika_request_data, files=pika_files, content_type="multipart/form-data", - auth_kwargs=auth, ) - return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) + return await execute_task(initial_operation.video_id, cls) class PikaTextToVideoNode(IO.ComfyNode): @@ -183,18 +164,11 @@ class PikaTextToVideoNode(IO.ComfyNode): duration: int, aspect_ratio: float, ) -> IO.NodeOutput: - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_TEXT_TO_VIDEO, - method=HttpMethod.POST, - request_model=pika_defs.PikaBodyGenerate22T2vGenerate22T2vPost, - response_model=pika_defs.PikaGenerateResponse, - ), - request=pika_defs.PikaBodyGenerate22T2vGenerate22T2vPost( + initial_operation = await sync_op( + cls, + ApiEndpoint(path=PATH_TEXT_TO_VIDEO, method="POST"), + response_model=pika_defs.PikaGenerateResponse, + data=pika_defs.PikaBodyGenerate22T2vGenerate22T2vPost( promptText=prompt_text, negativePrompt=negative_prompt, seed=seed, @@ -202,10 +176,9 @@ class PikaTextToVideoNode(IO.ComfyNode): duration=duration, aspectRatio=aspect_ratio, ), - auth_kwargs=auth, content_type="application/x-www-form-urlencoded", ) - return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) + return await execute_task(initial_operation.video_id, cls) class PikaScenes(IO.ComfyNode): @@ -309,24 +282,16 @@ class PikaScenes(IO.ComfyNode): duration=duration, aspectRatio=aspect_ratio, ) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_PIKASCENES, - method=HttpMethod.POST, - request_model=pika_defs.PikaBodyGenerate22C2vGenerate22PikascenesPost, - response_model=pika_defs.PikaGenerateResponse, - ), - request=pika_request_data, + initial_operation = await sync_op( + cls, + ApiEndpoint(path=PATH_PIKASCENES, method="POST"), + response_model=pika_defs.PikaGenerateResponse, + data=pika_request_data, files=pika_files, content_type="multipart/form-data", - auth_kwargs=auth, ) - return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) + return await execute_task(initial_operation.video_id, cls) class PikAdditionsNode(IO.ComfyNode): @@ -383,24 +348,16 @@ class PikAdditionsNode(IO.ComfyNode): negativePrompt=negative_prompt, seed=seed, ) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_PIKADDITIONS, - method=HttpMethod.POST, - request_model=pika_defs.PikaBodyGeneratePikadditionsGeneratePikadditionsPost, - response_model=pika_defs.PikaGenerateResponse, - ), - request=pika_request_data, + initial_operation = await sync_op( + cls, + ApiEndpoint(path=PATH_PIKADDITIONS, method="POST"), + response_model=pika_defs.PikaGenerateResponse, + data=pika_request_data, files=pika_files, content_type="multipart/form-data", - auth_kwargs=auth, ) - return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) + return await execute_task(initial_operation.video_id, cls) class PikaSwapsNode(IO.ComfyNode): @@ -472,23 +429,15 @@ class PikaSwapsNode(IO.ComfyNode): seed=seed, modifyRegionRoi=region_to_modify if region_to_modify else None, ) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_PIKASWAPS, - method=HttpMethod.POST, - request_model=pika_defs.PikaBodyGeneratePikaswapsGeneratePikaswapsPost, - response_model=pika_defs.PikaGenerateResponse, - ), - request=pika_request_data, + initial_operation = await sync_op( + cls, + ApiEndpoint(path=PATH_PIKASWAPS, method="POST"), + response_model=pika_defs.PikaGenerateResponse, + data=pika_request_data, files=pika_files, content_type="multipart/form-data", - auth_kwargs=auth, ) - return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) + return await execute_task(initial_operation.video_id, cls) class PikaffectsNode(IO.ComfyNode): @@ -528,18 +477,11 @@ class PikaffectsNode(IO.ComfyNode): negative_prompt: str, seed: int, ) -> IO.NodeOutput: - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_PIKAFFECTS, - method=HttpMethod.POST, - request_model=pika_defs.PikaBodyGeneratePikaffectsGeneratePikaffectsPost, - response_model=pika_defs.PikaGenerateResponse, - ), - request=pika_defs.PikaBodyGeneratePikaffectsGeneratePikaffectsPost( + initial_operation = await sync_op( + cls, + ApiEndpoint(path=PATH_PIKAFFECTS, method="POST"), + response_model=pika_defs.PikaGenerateResponse, + data=pika_defs.PikaBodyGeneratePikaffectsGeneratePikaffectsPost( pikaffect=pikaffect, promptText=prompt_text, negativePrompt=negative_prompt, @@ -547,9 +489,8 @@ class PikaffectsNode(IO.ComfyNode): ), files={"image": ("image.png", tensor_to_bytesio(image), "image/png")}, content_type="multipart/form-data", - auth_kwargs=auth, ) - return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) + return await execute_task(initial_operation.video_id, cls) class PikaStartEndFrameNode(IO.ComfyNode): @@ -592,18 +533,11 @@ class PikaStartEndFrameNode(IO.ComfyNode): ("keyFrames", ("image_start.png", tensor_to_bytesio(image_start), "image/png")), ("keyFrames", ("image_end.png", tensor_to_bytesio(image_end), "image/png")), ] - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } - initial_operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=PATH_PIKAFRAMES, - method=HttpMethod.POST, - request_model=pika_defs.PikaBodyGenerate22KeyframeGenerate22PikaframesPost, - response_model=pika_defs.PikaGenerateResponse, - ), - request=pika_defs.PikaBodyGenerate22KeyframeGenerate22PikaframesPost( + initial_operation = await sync_op( + cls, + ApiEndpoint(path=PATH_PIKAFRAMES, method="POST"), + response_model=pika_defs.PikaGenerateResponse, + data=pika_defs.PikaBodyGenerate22KeyframeGenerate22PikaframesPost( promptText=prompt_text, negativePrompt=negative_prompt, seed=seed, @@ -612,9 +546,8 @@ class PikaStartEndFrameNode(IO.ComfyNode): ), files=pika_files, content_type="multipart/form-data", - auth_kwargs=auth, ) - return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) + return await execute_task(initial_operation.video_id, cls) class PikaApiNodesExtension(ComfyExtension): From e974e554ca23be505b72bc9c1614f4285c1db6e3 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 4 Nov 2025 02:59:44 +0800 Subject: [PATCH 0835/1073] chore: update embedded docs to v0.3.1 (#10614) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4d84b0d3e..856e373de 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ comfyui-frontend-package==1.28.8 comfyui-workflow-templates==0.2.4 -comfyui-embedded-docs==0.3.0 +comfyui-embedded-docs==0.3.1 torch torchsde torchvision From 958a17199ac519504e390ea0d53295ceb8cbd2c1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 3 Nov 2025 14:08:30 -0800 Subject: [PATCH 0836/1073] People should update their pytorch versions. (#10618) --- comfy/quant_ops.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index 873f173ed..5af6f118e 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -418,6 +418,10 @@ def fp8_linear(func, args, kwargs): scale_b=scale_b, out_dtype=out_dtype, ) + + if isinstance(output, tuple): # TODO: remove when we drop support for torch 2.4 + output = output[0] + if not tensor_2d: output = output.reshape((-1, input_shape[1], weight.shape[0])) From 0652cb8e2d343f68e38285755835c77bda7f6389 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 3 Nov 2025 14:37:12 -0800 Subject: [PATCH 0837/1073] Speed up torch.compile (#10620) --- comfy/ops.py | 1 - 1 file changed, 1 deletion(-) diff --git a/comfy/ops.py b/comfy/ops.py index 0c8f23848..afe498caa 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -71,7 +71,6 @@ def cast_to_input(weight, input, non_blocking=False, copy=True): return comfy.model_management.cast_to(weight, input.dtype, input.device, non_blocking=non_blocking, copy=copy) -@torch.compiler.disable() def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, offloadable=False): # NOTE: offloadable=False is a a legacy and if you are a custom node author reading this please pass # offloadable=True and call uncast_bias_weight() after your last usage of the weight/bias. This From e199c8cc6758d388792fd66b99e8de832814ff91 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 3 Nov 2025 14:58:24 -0800 Subject: [PATCH 0838/1073] Fixes (#10621) --- comfy/quant_ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index 5af6f118e..835fc4b8d 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -131,7 +131,7 @@ class QuantizedTensor(torch.Tensor): self._layout_params = layout_params def __repr__(self): - layout_name = self._layout_type.__name__ + layout_name = self._layout_type param_str = ", ".join(f"{k}={v}" for k, v in list(self._layout_params.items())[:2]) return f"QuantizedTensor(shape={self.shape}, layout={layout_name}, {param_str})" @@ -179,7 +179,7 @@ class QuantizedTensor(torch.Tensor): attr_name = f"_layout_param_{key}" layout_params[key] = inner_tensors[attr_name] - return QuantizedTensor(inner_tensors["_q_data"], layout_type, layout_params) + return QuantizedTensor(inner_tensors["_qdata"], layout_type, layout_params) @classmethod def from_float(cls, tensor, layout_type, **quantize_kwargs) -> 'QuantizedTensor': From 6b88478f9fe0874c0e17468c9fca3a0a84e6c781 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 3 Nov 2025 16:22:10 -0800 Subject: [PATCH 0839/1073] Bring back fp8 torch compile performance to what it should be. (#10622) --- comfy/quant_ops.py | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index 835fc4b8d..ed7b29963 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -126,7 +126,7 @@ class QuantizedTensor(torch.Tensor): return torch.Tensor._make_wrapper_subclass(cls, qdata.shape, device=qdata.device, dtype=qdata.dtype, requires_grad=False) def __init__(self, qdata, layout_type, layout_params): - self._qdata = qdata.contiguous() + self._qdata = qdata self._layout_type = layout_type self._layout_params = layout_params @@ -411,7 +411,7 @@ def fp8_linear(func, args, kwargs): try: output = torch._scaled_mm( - plain_input.reshape(-1, input_shape[2]), + plain_input.reshape(-1, input_shape[2]).contiguous(), weight_t, bias=bias, scale_a=scale_a, @@ -447,6 +447,43 @@ def fp8_linear(func, args, kwargs): return torch.nn.functional.linear(input_tensor, weight, bias) +@register_layout_op(torch.ops.aten.addmm.default, "TensorCoreFP8Layout") +def fp8_addmm(func, args, kwargs): + input_tensor = args[1] + weight = args[2] + bias = args[0] + + if isinstance(input_tensor, QuantizedTensor) and isinstance(weight, QuantizedTensor): + out_dtype = kwargs.get("out_dtype") + if out_dtype is None: + out_dtype = input_tensor._layout_params['orig_dtype'] + + plain_input, scale_a = TensorCoreFP8Layout.get_plain_tensors(input_tensor) + plain_weight, scale_b = TensorCoreFP8Layout.get_plain_tensors(weight) + + output = torch._scaled_mm( + plain_input.contiguous(), + plain_weight, + bias=bias, + scale_a=scale_a, + scale_b=scale_b, + out_dtype=out_dtype, + ) + + if isinstance(output, tuple): # TODO: remove when we drop support for torch 2.4 + output = output[0] + return output + + a = list(args) + if isinstance(args[0], QuantizedTensor): + a[0] = args[0].dequantize() + if isinstance(args[1], QuantizedTensor): + a[1] = args[1].dequantize() + if isinstance(args[2], QuantizedTensor): + a[2] = args[2].dequantize() + + return func(*a, **kwargs) + @register_layout_op(torch.ops.aten.view.default, "TensorCoreFP8Layout") @register_layout_op(torch.ops.aten.t.default, "TensorCoreFP8Layout") def fp8_func(func, args, kwargs): From 0f4ef3afa0772ad11d6d72ad21fb1e089c2fcf5f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 3 Nov 2025 18:47:14 -0800 Subject: [PATCH 0840/1073] This seems to slow things down slightly on Linux. (#10624) --- comfy/ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ops.py b/comfy/ops.py index afe498caa..733bff99d 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -35,7 +35,7 @@ def scaled_dot_product_attention(q, k, v, *args, **kwargs): try: - if torch.cuda.is_available(): + if torch.cuda.is_available() and comfy.model_management.WINDOWS: from torch.nn.attention import SDPBackend, sdpa_kernel import inspect if "set_priority" in inspect.signature(sdpa_kernel).parameters: From af4b7b5edb339a15aa443e32aefbceac1810baa0 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 3 Nov 2025 19:14:20 -0800 Subject: [PATCH 0841/1073] More fp8 torch.compile regressions fixed. (#10625) --- comfy/quant_ops.py | 54 ++++++++++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 19 deletions(-) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index ed7b29963..c56e32a73 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -446,6 +446,25 @@ def fp8_linear(func, args, kwargs): return torch.nn.functional.linear(input_tensor, weight, bias) +def fp8_mm_(input_tensor, weight, bias=None, out_dtype=None): + if out_dtype is None: + out_dtype = input_tensor._layout_params['orig_dtype'] + + plain_input, scale_a = TensorCoreFP8Layout.get_plain_tensors(input_tensor) + plain_weight, scale_b = TensorCoreFP8Layout.get_plain_tensors(weight) + + output = torch._scaled_mm( + plain_input.contiguous(), + plain_weight, + bias=bias, + scale_a=scale_a, + scale_b=scale_b, + out_dtype=out_dtype, + ) + + if isinstance(output, tuple): # TODO: remove when we drop support for torch 2.4 + output = output[0] + return output @register_layout_op(torch.ops.aten.addmm.default, "TensorCoreFP8Layout") def fp8_addmm(func, args, kwargs): @@ -454,25 +473,7 @@ def fp8_addmm(func, args, kwargs): bias = args[0] if isinstance(input_tensor, QuantizedTensor) and isinstance(weight, QuantizedTensor): - out_dtype = kwargs.get("out_dtype") - if out_dtype is None: - out_dtype = input_tensor._layout_params['orig_dtype'] - - plain_input, scale_a = TensorCoreFP8Layout.get_plain_tensors(input_tensor) - plain_weight, scale_b = TensorCoreFP8Layout.get_plain_tensors(weight) - - output = torch._scaled_mm( - plain_input.contiguous(), - plain_weight, - bias=bias, - scale_a=scale_a, - scale_b=scale_b, - out_dtype=out_dtype, - ) - - if isinstance(output, tuple): # TODO: remove when we drop support for torch 2.4 - output = output[0] - return output + return fp8_mm_(input_tensor, weight, bias=bias, out_dtype=kwargs.get("out_dtype", None)) a = list(args) if isinstance(args[0], QuantizedTensor): @@ -484,6 +485,21 @@ def fp8_addmm(func, args, kwargs): return func(*a, **kwargs) +@register_layout_op(torch.ops.aten.mm.default, "TensorCoreFP8Layout") +def fp8_mm(func, args, kwargs): + input_tensor = args[0] + weight = args[1] + + if isinstance(input_tensor, QuantizedTensor) and isinstance(weight, QuantizedTensor): + return fp8_mm_(input_tensor, weight, bias=None, out_dtype=kwargs.get("out_dtype", None)) + + a = list(args) + if isinstance(args[0], QuantizedTensor): + a[0] = args[0].dequantize() + if isinstance(args[1], QuantizedTensor): + a[1] = args[1].dequantize() + return func(*a, **kwargs) + @register_layout_op(torch.ops.aten.view.default, "TensorCoreFP8Layout") @register_layout_op(torch.ops.aten.t.default, "TensorCoreFP8Layout") def fp8_func(func, args, kwargs): From 9c71a667904a049975531f2a7dd55f4a8fc92652 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 5 Nov 2025 02:51:53 +0800 Subject: [PATCH 0842/1073] chore: update workflow templates to v0.2.11 (#10634) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 856e373de..249c36dee 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.28.8 -comfyui-workflow-templates==0.2.4 +comfyui-workflow-templates==0.2.11 comfyui-embedded-docs==0.3.1 torch torchsde From a389ee01bb7ba5174729906a7f85bd08b5c2cb87 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Wed, 5 Nov 2025 08:14:10 +1000 Subject: [PATCH 0843/1073] caching: Handle None outputs tuple case (#10637) --- comfy_execution/caching.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy_execution/caching.py b/comfy_execution/caching.py index e077f78b0..326a279fc 100644 --- a/comfy_execution/caching.py +++ b/comfy_execution/caching.py @@ -399,6 +399,8 @@ class RAMPressureCache(LRUCache): ram_usage = RAM_CACHE_DEFAULT_RAM_USAGE def scan_list_for_ram_usage(outputs): nonlocal ram_usage + if outputs is None: + return for output in outputs: if isinstance(output, list): scan_list_for_ram_usage(output) From 7f3e4d486cd77c3ad30eb4714ec18bdaf29e2b5c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 4 Nov 2025 14:37:50 -0800 Subject: [PATCH 0844/1073] Limit amount of pinned memory on windows to prevent issues. (#10638) --- comfy/model_management.py | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 79c0dfdb4..0d040e55e 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1082,8 +1082,20 @@ def cast_to_device(tensor, device, dtype, copy=False): non_blocking = device_supports_non_blocking(device) return cast_to(tensor, dtype=dtype, device=device, non_blocking=non_blocking, copy=copy) + +PINNED_MEMORY = {} +TOTAL_PINNED_MEMORY = 0 +if PerformanceFeature.PinnedMem in args.fast: + if WINDOWS: + MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.45 # Windows limit is apparently 50% + else: + MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.95 +else: + MAX_PINNED_MEMORY = -1 + def pin_memory(tensor): - if PerformanceFeature.PinnedMem not in args.fast: + global TOTAL_PINNED_MEMORY + if MAX_PINNED_MEMORY <= 0: return False if not is_nvidia(): @@ -1092,13 +1104,21 @@ def pin_memory(tensor): if not is_device_cpu(tensor.device): return False - if torch.cuda.cudart().cudaHostRegister(tensor.data_ptr(), tensor.numel() * tensor.element_size(), 1) == 0: + size = tensor.numel() * tensor.element_size() + if (TOTAL_PINNED_MEMORY + size) > MAX_PINNED_MEMORY: + return False + + ptr = tensor.data_ptr() + if torch.cuda.cudart().cudaHostRegister(ptr, size, 1) == 0: + PINNED_MEMORY[ptr] = size + TOTAL_PINNED_MEMORY += size return True return False def unpin_memory(tensor): - if PerformanceFeature.PinnedMem not in args.fast: + global TOTAL_PINNED_MEMORY + if MAX_PINNED_MEMORY <= 0: return False if not is_nvidia(): @@ -1107,7 +1127,11 @@ def unpin_memory(tensor): if not is_device_cpu(tensor.device): return False - if torch.cuda.cudart().cudaHostUnregister(tensor.data_ptr()) == 0: + ptr = tensor.data_ptr() + if torch.cuda.cudart().cudaHostUnregister(ptr) == 0: + TOTAL_PINNED_MEMORY -= PINNED_MEMORY.pop(ptr) + if len(PINNED_MEMORY) == 0: + TOTAL_PINNED_MEMORY = 0 return True return False From 265adad858e1f31b66cd3523a02b16f5d34ced52 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 4 Nov 2025 19:42:23 -0500 Subject: [PATCH 0845/1073] ComfyUI version v0.3.68 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index db48b05c4..25d1a4157 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.67" +__version__ = "0.3.68" diff --git a/pyproject.toml b/pyproject.toml index ab054355c..79ff3f74a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.67" +version = "0.3.68" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 4cd881866bad0cde70273cc123d725693c1f2759 Mon Sep 17 00:00:00 2001 From: contentis Date: Wed, 5 Nov 2025 02:10:11 +0100 Subject: [PATCH 0846/1073] Use single apply_rope function across models (#10547) --- comfy/ldm/flux/layers.py | 4 +- comfy/ldm/flux/math.py | 10 +--- comfy/ldm/lightricks/model.py | 88 ++++++++++++++--------------------- comfy/ldm/qwen_image/model.py | 36 +++++++------- comfy/ldm/wan/model.py | 1 + 5 files changed, 59 insertions(+), 80 deletions(-) diff --git a/comfy/ldm/flux/layers.py b/comfy/ldm/flux/layers.py index ef21b416b..a3eab0470 100644 --- a/comfy/ldm/flux/layers.py +++ b/comfy/ldm/flux/layers.py @@ -195,8 +195,8 @@ class DoubleStreamBlock(nn.Module): txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1]:] # calculate the img bloks - img = img + apply_mod(self.img_attn.proj(img_attn), img_mod1.gate, None, modulation_dims_img) - img = img + apply_mod(self.img_mlp(apply_mod(self.img_norm2(img), (1 + img_mod2.scale), img_mod2.shift, modulation_dims_img)), img_mod2.gate, None, modulation_dims_img) + img += apply_mod(self.img_attn.proj(img_attn), img_mod1.gate, None, modulation_dims_img) + img += apply_mod(self.img_mlp(apply_mod(self.img_norm2(img), (1 + img_mod2.scale), img_mod2.shift, modulation_dims_img)), img_mod2.gate, None, modulation_dims_img) # calculate the txt bloks txt += apply_mod(self.txt_attn.proj(txt_attn), txt_mod1.gate, None, modulation_dims_txt) diff --git a/comfy/ldm/flux/math.py b/comfy/ldm/flux/math.py index 8deda0d4a..158420290 100644 --- a/comfy/ldm/flux/math.py +++ b/comfy/ldm/flux/math.py @@ -7,15 +7,7 @@ import comfy.model_management def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None, transformer_options={}) -> Tensor: - q_shape = q.shape - k_shape = k.shape - - if pe is not None: - q = q.to(dtype=pe.dtype).reshape(*q.shape[:-1], -1, 1, 2) - k = k.to(dtype=pe.dtype).reshape(*k.shape[:-1], -1, 1, 2) - q = (pe[..., 0] * q[..., 0] + pe[..., 1] * q[..., 1]).reshape(*q_shape).type_as(v) - k = (pe[..., 0] * k[..., 0] + pe[..., 1] * k[..., 1]).reshape(*k_shape).type_as(v) - + q, k = apply_rope(q, k, pe) heads = q.shape[1] x = optimized_attention(q, k, v, heads, skip_reshape=True, mask=mask, transformer_options=transformer_options) return x diff --git a/comfy/ldm/lightricks/model.py b/comfy/ldm/lightricks/model.py index def365ba7..5bcba998b 100644 --- a/comfy/ldm/lightricks/model.py +++ b/comfy/ldm/lightricks/model.py @@ -3,12 +3,11 @@ from torch import nn import comfy.patcher_extension import comfy.ldm.modules.attention import comfy.ldm.common_dit -from einops import rearrange import math from typing import Dict, Optional, Tuple from .symmetric_patchifier import SymmetricPatchifier, latent_to_pixel_coords - +from comfy.ldm.flux.math import apply_rope1 def get_timestep_embedding( timesteps: torch.Tensor, @@ -238,20 +237,6 @@ class FeedForward(nn.Module): return self.net(x) -def apply_rotary_emb(input_tensor, freqs_cis): #TODO: remove duplicate funcs and pick the best/fastest one - cos_freqs = freqs_cis[0] - sin_freqs = freqs_cis[1] - - t_dup = rearrange(input_tensor, "... (d r) -> ... d r", r=2) - t1, t2 = t_dup.unbind(dim=-1) - t_dup = torch.stack((-t2, t1), dim=-1) - input_tensor_rot = rearrange(t_dup, "... d r -> ... (d r)") - - out = input_tensor * cos_freqs + input_tensor_rot * sin_freqs - - return out - - class CrossAttention(nn.Module): def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., attn_precision=None, dtype=None, device=None, operations=None): super().__init__() @@ -281,8 +266,8 @@ class CrossAttention(nn.Module): k = self.k_norm(k) if pe is not None: - q = apply_rotary_emb(q, pe) - k = apply_rotary_emb(k, pe) + q = apply_rope1(q.unsqueeze(1), pe).squeeze(1) + k = apply_rope1(k.unsqueeze(1), pe).squeeze(1) if mask is None: out = comfy.ldm.modules.attention.optimized_attention(q, k, v, self.heads, attn_precision=self.attn_precision, transformer_options=transformer_options) @@ -306,12 +291,17 @@ class BasicTransformerBlock(nn.Module): def forward(self, x, context=None, attention_mask=None, timestep=None, pe=None, transformer_options={}): shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None, None].to(device=x.device, dtype=x.dtype) + timestep.reshape(x.shape[0], timestep.shape[1], self.scale_shift_table.shape[0], -1)).unbind(dim=2) - x += self.attn1(comfy.ldm.common_dit.rms_norm(x) * (1 + scale_msa) + shift_msa, pe=pe, transformer_options=transformer_options) * gate_msa + norm_x = comfy.ldm.common_dit.rms_norm(x) + attn1_input = torch.addcmul(norm_x, norm_x, scale_msa).add_(shift_msa) + attn1_result = self.attn1(attn1_input, pe=pe, transformer_options=transformer_options) + x.addcmul_(attn1_result, gate_msa) x += self.attn2(x, context=context, mask=attention_mask, transformer_options=transformer_options) - y = comfy.ldm.common_dit.rms_norm(x) * (1 + scale_mlp) + shift_mlp - x += self.ff(y) * gate_mlp + norm_x = comfy.ldm.common_dit.rms_norm(x) + y = torch.addcmul(norm_x, norm_x, scale_mlp).add_(shift_mlp) + ff_result = self.ff(y) + x.addcmul_(ff_result, gate_mlp) return x @@ -327,41 +317,35 @@ def get_fractional_positions(indices_grid, max_pos): def precompute_freqs_cis(indices_grid, dim, out_dtype, theta=10000.0, max_pos=[20, 2048, 2048]): - dtype = torch.float32 #self.dtype + dtype = torch.float32 + device = indices_grid.device + # Get fractional positions and compute frequency indices fractional_positions = get_fractional_positions(indices_grid, max_pos) + indices = theta ** torch.linspace(0, 1, dim // 6, device=device, dtype=dtype) * math.pi / 2 - start = 1 - end = theta - device = fractional_positions.device + # Compute frequencies and apply cos/sin + freqs = (indices * (fractional_positions.unsqueeze(-1) * 2 - 1)).transpose(-1, -2).flatten(2) + cos_vals = freqs.cos().repeat_interleave(2, dim=-1) + sin_vals = freqs.sin().repeat_interleave(2, dim=-1) - indices = theta ** ( - torch.linspace( - math.log(start, theta), - math.log(end, theta), - dim // 6, - device=device, - dtype=dtype, - ) - ) - indices = indices.to(dtype=dtype) - - indices = indices * math.pi / 2 - - freqs = ( - (indices * (fractional_positions.unsqueeze(-1) * 2 - 1)) - .transpose(-1, -2) - .flatten(2) - ) - - cos_freq = freqs.cos().repeat_interleave(2, dim=-1) - sin_freq = freqs.sin().repeat_interleave(2, dim=-1) + # Pad if dim is not divisible by 6 if dim % 6 != 0: - cos_padding = torch.ones_like(cos_freq[:, :, : dim % 6]) - sin_padding = torch.zeros_like(cos_freq[:, :, : dim % 6]) - cos_freq = torch.cat([cos_padding, cos_freq], dim=-1) - sin_freq = torch.cat([sin_padding, sin_freq], dim=-1) - return cos_freq.to(out_dtype), sin_freq.to(out_dtype) + padding_size = dim % 6 + cos_vals = torch.cat([torch.ones_like(cos_vals[:, :, :padding_size]), cos_vals], dim=-1) + sin_vals = torch.cat([torch.zeros_like(sin_vals[:, :, :padding_size]), sin_vals], dim=-1) + + # Reshape and extract one value per pair (since repeat_interleave duplicates each value) + cos_vals = cos_vals.reshape(*cos_vals.shape[:2], -1, 2)[..., 0] # [B, N, dim//2] + sin_vals = sin_vals.reshape(*sin_vals.shape[:2], -1, 2)[..., 0] # [B, N, dim//2] + + # Build rotation matrix [[cos, -sin], [sin, cos]] and add heads dimension + freqs_cis = torch.stack([ + torch.stack([cos_vals, -sin_vals], dim=-1), + torch.stack([sin_vals, cos_vals], dim=-1) + ], dim=-2).unsqueeze(1) # [B, 1, N, dim//2, 2, 2] + + return freqs_cis.to(out_dtype) class LTXVModel(torch.nn.Module): @@ -501,7 +485,7 @@ class LTXVModel(torch.nn.Module): shift, scale = scale_shift_values[:, :, 0], scale_shift_values[:, :, 1] x = self.norm_out(x) # Modulation - x = x * (1 + scale) + shift + x = torch.addcmul(x, x, scale).add_(shift) x = self.proj_out(x) x = self.patchifier.unpatchify( diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index b9f60c2b7..81d3ee7c0 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -10,6 +10,7 @@ from comfy.ldm.modules.attention import optimized_attention_masked from comfy.ldm.flux.layers import EmbedND import comfy.ldm.common_dit import comfy.patcher_extension +from comfy.ldm.flux.math import apply_rope1 class GELU(nn.Module): def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True, dtype=None, device=None, operations=None): @@ -134,33 +135,34 @@ class Attention(nn.Module): image_rotary_emb: Optional[torch.Tensor] = None, transformer_options={}, ) -> Tuple[torch.Tensor, torch.Tensor]: + batch_size = hidden_states.shape[0] + seq_img = hidden_states.shape[1] seq_txt = encoder_hidden_states.shape[1] - img_query = self.to_q(hidden_states).unflatten(-1, (self.heads, -1)) - img_key = self.to_k(hidden_states).unflatten(-1, (self.heads, -1)) - img_value = self.to_v(hidden_states).unflatten(-1, (self.heads, -1)) + # Project and reshape to BHND format (batch, heads, seq, dim) + img_query = self.to_q(hidden_states).view(batch_size, seq_img, self.heads, -1).transpose(1, 2).contiguous() + img_key = self.to_k(hidden_states).view(batch_size, seq_img, self.heads, -1).transpose(1, 2).contiguous() + img_value = self.to_v(hidden_states).view(batch_size, seq_img, self.heads, -1).transpose(1, 2) - txt_query = self.add_q_proj(encoder_hidden_states).unflatten(-1, (self.heads, -1)) - txt_key = self.add_k_proj(encoder_hidden_states).unflatten(-1, (self.heads, -1)) - txt_value = self.add_v_proj(encoder_hidden_states).unflatten(-1, (self.heads, -1)) + txt_query = self.add_q_proj(encoder_hidden_states).view(batch_size, seq_txt, self.heads, -1).transpose(1, 2).contiguous() + txt_key = self.add_k_proj(encoder_hidden_states).view(batch_size, seq_txt, self.heads, -1).transpose(1, 2).contiguous() + txt_value = self.add_v_proj(encoder_hidden_states).view(batch_size, seq_txt, self.heads, -1).transpose(1, 2) img_query = self.norm_q(img_query) img_key = self.norm_k(img_key) txt_query = self.norm_added_q(txt_query) txt_key = self.norm_added_k(txt_key) - joint_query = torch.cat([txt_query, img_query], dim=1) - joint_key = torch.cat([txt_key, img_key], dim=1) - joint_value = torch.cat([txt_value, img_value], dim=1) + joint_query = torch.cat([txt_query, img_query], dim=2) + joint_key = torch.cat([txt_key, img_key], dim=2) + joint_value = torch.cat([txt_value, img_value], dim=2) - joint_query = apply_rotary_emb(joint_query, image_rotary_emb) - joint_key = apply_rotary_emb(joint_key, image_rotary_emb) + joint_query = apply_rope1(joint_query, image_rotary_emb) + joint_key = apply_rope1(joint_key, image_rotary_emb) - joint_query = joint_query.flatten(start_dim=2) - joint_key = joint_key.flatten(start_dim=2) - joint_value = joint_value.flatten(start_dim=2) - - joint_hidden_states = optimized_attention_masked(joint_query, joint_key, joint_value, self.heads, attention_mask, transformer_options=transformer_options) + joint_hidden_states = optimized_attention_masked(joint_query, joint_key, joint_value, self.heads, + attention_mask, transformer_options=transformer_options, + skip_reshape=True) txt_attn_output = joint_hidden_states[:, :seq_txt, :] img_attn_output = joint_hidden_states[:, seq_txt:, :] @@ -413,7 +415,7 @@ class QwenImageTransformer2DModel(nn.Module): txt_start = round(max(((x.shape[-1] + (self.patch_size // 2)) // self.patch_size) // 2, ((x.shape[-2] + (self.patch_size // 2)) // self.patch_size) // 2)) txt_ids = torch.arange(txt_start, txt_start + context.shape[1], device=x.device).reshape(1, -1, 1).repeat(x.shape[0], 1, 3) ids = torch.cat((txt_ids, img_ids), dim=1) - image_rotary_emb = self.pe_embedder(ids).squeeze(1).unsqueeze(2).to(x.dtype) + image_rotary_emb = self.pe_embedder(ids).to(torch.float32).contiguous() del ids, txt_ids, img_ids hidden_states = self.img_in(hidden_states) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index 5ec1511ce..a9d5e10d9 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -232,6 +232,7 @@ class WanAttentionBlock(nn.Module): # assert e[0].dtype == torch.float32 # self-attention + x = x.contiguous() # otherwise implicit in LayerNorm y = self.self_attn( torch.addcmul(repeat_e(e[0], x), self.norm1(x), 1 + repeat_e(e[1], x)), freqs, transformer_options=transformer_options) From c4a6b389de1014471a75a46ee57d2fdac4f8df93 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 4 Nov 2025 19:47:35 -0800 Subject: [PATCH 0847/1073] Lower ltxv mem usage to what it was before previous pr. (#10643) Bring back qwen behavior to what it was before previous pr. --- comfy/ldm/lightricks/model.py | 22 +++++++++++----------- comfy/ldm/qwen_image/model.py | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/comfy/ldm/lightricks/model.py b/comfy/ldm/lightricks/model.py index 5bcba998b..593f7940f 100644 --- a/comfy/ldm/lightricks/model.py +++ b/comfy/ldm/lightricks/model.py @@ -291,17 +291,17 @@ class BasicTransformerBlock(nn.Module): def forward(self, x, context=None, attention_mask=None, timestep=None, pe=None, transformer_options={}): shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None, None].to(device=x.device, dtype=x.dtype) + timestep.reshape(x.shape[0], timestep.shape[1], self.scale_shift_table.shape[0], -1)).unbind(dim=2) - norm_x = comfy.ldm.common_dit.rms_norm(x) - attn1_input = torch.addcmul(norm_x, norm_x, scale_msa).add_(shift_msa) - attn1_result = self.attn1(attn1_input, pe=pe, transformer_options=transformer_options) - x.addcmul_(attn1_result, gate_msa) + attn1_input = comfy.ldm.common_dit.rms_norm(x) + attn1_input = torch.addcmul(attn1_input, attn1_input, scale_msa).add_(shift_msa) + attn1_input = self.attn1(attn1_input, pe=pe, transformer_options=transformer_options) + x.addcmul_(attn1_input, gate_msa) + del attn1_input x += self.attn2(x, context=context, mask=attention_mask, transformer_options=transformer_options) - norm_x = comfy.ldm.common_dit.rms_norm(x) - y = torch.addcmul(norm_x, norm_x, scale_mlp).add_(shift_mlp) - ff_result = self.ff(y) - x.addcmul_(ff_result, gate_mlp) + y = comfy.ldm.common_dit.rms_norm(x) + y = torch.addcmul(y, y, scale_mlp).add_(shift_mlp) + x.addcmul_(self.ff(y), gate_mlp) return x @@ -336,8 +336,8 @@ def precompute_freqs_cis(indices_grid, dim, out_dtype, theta=10000.0, max_pos=[2 sin_vals = torch.cat([torch.zeros_like(sin_vals[:, :, :padding_size]), sin_vals], dim=-1) # Reshape and extract one value per pair (since repeat_interleave duplicates each value) - cos_vals = cos_vals.reshape(*cos_vals.shape[:2], -1, 2)[..., 0] # [B, N, dim//2] - sin_vals = sin_vals.reshape(*sin_vals.shape[:2], -1, 2)[..., 0] # [B, N, dim//2] + cos_vals = cos_vals.reshape(*cos_vals.shape[:2], -1, 2)[..., 0].to(out_dtype) # [B, N, dim//2] + sin_vals = sin_vals.reshape(*sin_vals.shape[:2], -1, 2)[..., 0].to(out_dtype) # [B, N, dim//2] # Build rotation matrix [[cos, -sin], [sin, cos]] and add heads dimension freqs_cis = torch.stack([ @@ -345,7 +345,7 @@ def precompute_freqs_cis(indices_grid, dim, out_dtype, theta=10000.0, max_pos=[2 torch.stack([sin_vals, cos_vals], dim=-1) ], dim=-2).unsqueeze(1) # [B, 1, N, dim//2, 2, 2] - return freqs_cis.to(out_dtype) + return freqs_cis class LTXVModel(torch.nn.Module): diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index 81d3ee7c0..e5d0d17c1 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -415,7 +415,7 @@ class QwenImageTransformer2DModel(nn.Module): txt_start = round(max(((x.shape[-1] + (self.patch_size // 2)) // self.patch_size) // 2, ((x.shape[-2] + (self.patch_size // 2)) // self.patch_size) // 2)) txt_ids = torch.arange(txt_start, txt_start + context.shape[1], device=x.device).reshape(1, -1, 1).repeat(x.shape[0], 1, 3) ids = torch.cat((txt_ids, img_ids), dim=1) - image_rotary_emb = self.pe_embedder(ids).to(torch.float32).contiguous() + image_rotary_emb = self.pe_embedder(ids).to(x.dtype).contiguous() del ids, txt_ids, img_ids hidden_states = self.img_in(hidden_states) From bda0eb2448135797d5a72f7236ce26d07e555baf Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 5 Nov 2025 12:16:00 +0200 Subject: [PATCH 0848/1073] feat(API-nodes): move Rodin3D nodes to new client; removed old api client.py (#10645) --- comfy_api_nodes/apis/PixverseController.py | 17 - comfy_api_nodes/apis/PixverseDto.py | 57 - comfy_api_nodes/apis/client.py | 981 ------------------ comfy_api_nodes/nodes_rodin.py | 196 ++-- comfy_api_nodes/util/client.py | 4 +- comfy_api_nodes/util/download_helpers.py | 2 +- .../{apis => util}/request_logger.py | 4 +- comfy_api_nodes/util/upload_helpers.py | 2 +- 8 files changed, 75 insertions(+), 1188 deletions(-) delete mode 100644 comfy_api_nodes/apis/PixverseController.py delete mode 100644 comfy_api_nodes/apis/PixverseDto.py delete mode 100644 comfy_api_nodes/apis/client.py rename comfy_api_nodes/{apis => util}/request_logger.py (100%) diff --git a/comfy_api_nodes/apis/PixverseController.py b/comfy_api_nodes/apis/PixverseController.py deleted file mode 100644 index 310c0f546..000000000 --- a/comfy_api_nodes/apis/PixverseController.py +++ /dev/null @@ -1,17 +0,0 @@ -# generated by datamodel-codegen: -# filename: filtered-openapi.yaml -# timestamp: 2025-04-29T23:44:54+00:00 - -from __future__ import annotations - -from typing import Optional - -from pydantic import BaseModel - -from . import PixverseDto - - -class ResponseData(BaseModel): - ErrCode: Optional[int] = None - ErrMsg: Optional[str] = None - Resp: Optional[PixverseDto.V2OpenAPII2VResp] = None diff --git a/comfy_api_nodes/apis/PixverseDto.py b/comfy_api_nodes/apis/PixverseDto.py deleted file mode 100644 index 323c38e96..000000000 --- a/comfy_api_nodes/apis/PixverseDto.py +++ /dev/null @@ -1,57 +0,0 @@ -# generated by datamodel-codegen: -# filename: filtered-openapi.yaml -# timestamp: 2025-04-29T23:44:54+00:00 - -from __future__ import annotations - -from typing import Optional - -from pydantic import BaseModel, Field - - -class V2OpenAPII2VResp(BaseModel): - video_id: Optional[int] = Field(None, description='Video_id') - - -class V2OpenAPIT2VReq(BaseModel): - aspect_ratio: str = Field( - ..., description='Aspect ratio (16:9, 4:3, 1:1, 3:4, 9:16)', examples=['16:9'] - ) - duration: int = Field( - ..., - description='Video duration (5, 8 seconds, --model=v3.5 only allows 5,8; --quality=1080p does not support 8s)', - examples=[5], - ) - model: str = Field( - ..., description='Model version (only supports v3.5)', examples=['v3.5'] - ) - motion_mode: Optional[str] = Field( - 'normal', - description='Motion mode (normal, fast, --fast only available when duration=5; --quality=1080p does not support fast)', - examples=['normal'], - ) - negative_prompt: Optional[str] = Field( - None, description='Negative prompt\n', max_length=2048 - ) - prompt: str = Field(..., description='Prompt', max_length=2048) - quality: str = Field( - ..., - description='Video quality ("360p"(Turbo model), "540p", "720p", "1080p")', - examples=['540p'], - ) - seed: Optional[int] = Field(None, description='Random seed, range: 0 - 2147483647') - style: Optional[str] = Field( - None, - description='Style (effective when model=v3.5, "anime", "3d_animation", "clay", "comic", "cyberpunk") Do not include style parameter unless needed', - examples=['anime'], - ) - template_id: Optional[int] = Field( - None, - description='Template ID (template_id must be activated before use)', - examples=[302325299692608], - ) - water_mark: Optional[bool] = Field( - False, - description='Watermark (true: add watermark, false: no watermark)', - examples=[False], - ) diff --git a/comfy_api_nodes/apis/client.py b/comfy_api_nodes/apis/client.py deleted file mode 100644 index bdaddcc88..000000000 --- a/comfy_api_nodes/apis/client.py +++ /dev/null @@ -1,981 +0,0 @@ -""" -API Client Framework for api.comfy.org. - -This module provides a flexible framework for making API requests from ComfyUI nodes. -It supports both synchronous and asynchronous API operations with proper type validation. - -Key Components: --------------- -1. ApiClient - Handles HTTP requests with authentication and error handling -2. ApiEndpoint - Defines a single HTTP endpoint with its request/response models -3. ApiOperation - Executes a single synchronous API operation - -Usage Examples: --------------- - -# Example 1: Synchronous API Operation -# ------------------------------------ -# For a simple API call that returns the result immediately: - -# 1. Create the API client -api_client = ApiClient( - base_url="https://api.example.com", - auth_token="your_auth_token_here", - comfy_api_key="your_comfy_api_key_here", - timeout=30.0, - verify_ssl=True -) - -# 2. Define the endpoint -user_info_endpoint = ApiEndpoint( - path="/v1/users/me", - method=HttpMethod.GET, - request_model=EmptyRequest, # No request body needed - response_model=UserProfile, # Pydantic model for the response - query_params=None -) - -# 3. Create the request object -request = EmptyRequest() - -# 4. Create and execute the operation -operation = ApiOperation( - endpoint=user_info_endpoint, - request=request -) -user_profile = await operation.execute(client=api_client) # Returns immediately with the result - - -# Example 2: Asynchronous API Operation with Polling -# ------------------------------------------------- -# For an API that starts a task and requires polling for completion: - -# 1. Define the endpoints (initial request and polling) -generate_image_endpoint = ApiEndpoint( - path="/v1/images/generate", - method=HttpMethod.POST, - request_model=ImageGenerationRequest, - response_model=TaskCreatedResponse, - query_params=None -) - -check_task_endpoint = ApiEndpoint( - path="/v1/tasks/{task_id}", - method=HttpMethod.GET, - request_model=EmptyRequest, - response_model=ImageGenerationResult, - query_params=None -) - -# 2. Create the request object -request = ImageGenerationRequest( - prompt="a beautiful sunset over mountains", - width=1024, - height=1024, - num_images=1 -) - -# 3. Create and execute the polling operation -operation = PollingOperation( - initial_endpoint=generate_image_endpoint, - initial_request=request, - poll_endpoint=check_task_endpoint, - task_id_field="task_id", - status_field="status", - completed_statuses=["completed"], - failed_statuses=["failed", "error"] -) - -# This will make the initial request and then poll until completion -result = await operation.execute(client=api_client) # Returns the final ImageGenerationResult when done -""" - -from __future__ import annotations -import aiohttp -import asyncio -import logging -import io -import os -import socket -from aiohttp.client_exceptions import ClientError, ClientResponseError -from typing import Type, Optional, Any, TypeVar, Generic, Callable -from enum import Enum -import json -from urllib.parse import urljoin, urlparse -from pydantic import BaseModel, Field -import uuid # For generating unique operation IDs - -from server import PromptServer -from comfy.cli_args import args -from comfy import utils -from . import request_logger - -T = TypeVar("T", bound=BaseModel) -R = TypeVar("R", bound=BaseModel) -P = TypeVar("P", bound=BaseModel) # For poll response - -PROGRESS_BAR_MAX = 100 - - -class NetworkError(Exception): - """Base exception for network-related errors with diagnostic information.""" - pass - - -class LocalNetworkError(NetworkError): - """Exception raised when local network connectivity issues are detected.""" - pass - - -class ApiServerError(NetworkError): - """Exception raised when the API server is unreachable but internet is working.""" - pass - - -class EmptyRequest(BaseModel): - """Base class for empty request bodies. - For GET requests, fields will be sent as query parameters.""" - - pass - - -class UploadRequest(BaseModel): - file_name: str = Field(..., description="Filename to upload") - content_type: Optional[str] = Field( - None, - description="Mime type of the file. For example: image/png, image/jpeg, video/mp4, etc.", - ) - - -class UploadResponse(BaseModel): - download_url: str = Field(..., description="URL to GET uploaded file") - upload_url: str = Field(..., description="URL to PUT file to upload") - - -class HttpMethod(str, Enum): - GET = "GET" - POST = "POST" - PUT = "PUT" - DELETE = "DELETE" - PATCH = "PATCH" - - -class ApiClient: - """ - Client for making HTTP requests to an API with authentication, error handling, and retry logic. - """ - - def __init__( - self, - base_url: str, - auth_token: Optional[str] = None, - comfy_api_key: Optional[str] = None, - timeout: float = 3600.0, - verify_ssl: bool = True, - max_retries: int = 3, - retry_delay: float = 1.0, - retry_backoff_factor: float = 2.0, - retry_status_codes: Optional[tuple[int, ...]] = None, - session: Optional[aiohttp.ClientSession] = None, - ): - self.base_url = base_url - self.auth_token = auth_token - self.comfy_api_key = comfy_api_key - self.timeout = timeout - self.verify_ssl = verify_ssl - self.max_retries = max_retries - self.retry_delay = retry_delay - self.retry_backoff_factor = retry_backoff_factor - # Default retry status codes: 408 (Request Timeout), 429 (Too Many Requests), - # 500, 502, 503, 504 (Server Errors) - self.retry_status_codes = retry_status_codes or (408, 429, 500, 502, 503, 504) - self._session: Optional[aiohttp.ClientSession] = session - self._owns_session = session is None # Track if we have to close it - - @staticmethod - def _generate_operation_id(path: str) -> str: - """Generates a unique operation ID for logging.""" - return f"{path.strip('/').replace('/', '_')}_{uuid.uuid4().hex[:8]}" - - @staticmethod - def _create_json_payload_args( - data: Optional[dict[str, Any]] = None, - headers: Optional[dict[str, str]] = None, - ) -> dict[str, Any]: - return { - "json": data, - "headers": headers, - } - - def _create_form_data_args( - self, - data: dict[str, Any] | None, - files: dict[str, Any] | None, - headers: Optional[dict[str, str]] = None, - multipart_parser: Callable | None = None, - ) -> dict[str, Any]: - if headers and "Content-Type" in headers: - del headers["Content-Type"] - - if multipart_parser and data: - data = multipart_parser(data) - - if isinstance(data, aiohttp.FormData): - form = data # If the parser already returned a FormData, pass it through - else: - form = aiohttp.FormData(default_to_multipart=True) - if data: # regular text fields - for k, v in data.items(): - if v is None: - continue # aiohttp fails to serialize "None" values - # aiohttp expects strings or bytes; convert enums etc. - form.add_field(k, str(v) if not isinstance(v, (bytes, bytearray)) else v) - - if files: - file_iter = files if isinstance(files, list) else files.items() - for field_name, file_obj in file_iter: - if file_obj is None: - continue # aiohttp fails to serialize "None" values - # file_obj can be (filename, bytes/io.BytesIO, content_type) tuple - if isinstance(file_obj, tuple): - filename, file_value, content_type = self._unpack_tuple(file_obj) - else: - file_value = file_obj - filename = getattr(file_obj, "name", field_name) - content_type = "application/octet-stream" - - form.add_field( - name=field_name, - value=file_value, - filename=filename, - content_type=content_type, - ) - return {"data": form, "headers": headers or {}} - - @staticmethod - def _create_urlencoded_form_data_args( - data: dict[str, Any], - headers: Optional[dict[str, str]] = None, - ) -> dict[str, Any]: - headers = headers or {} - headers["Content-Type"] = "application/x-www-form-urlencoded" - return { - "data": data, - "headers": headers, - } - - def get_headers(self) -> dict[str, str]: - """Get headers for API requests, including authentication if available""" - headers = {"Content-Type": "application/json", "Accept": "application/json"} - - if self.auth_token: - headers["Authorization"] = f"Bearer {self.auth_token}" - elif self.comfy_api_key: - headers["X-API-KEY"] = self.comfy_api_key - - return headers - - async def _check_connectivity(self, target_url: str) -> dict[str, bool]: - """ - Check connectivity to determine if network issues are local or server-related. - - Args: - target_url: URL to check connectivity to - - Returns: - Dictionary with connectivity status details - """ - results = { - "internet_accessible": False, - "api_accessible": False, - "is_local_issue": False, - "is_api_issue": False, - } - timeout = aiohttp.ClientTimeout(total=5.0) - async with aiohttp.ClientSession(timeout=timeout) as session: - try: - async with session.get("https://www.google.com", ssl=self.verify_ssl) as resp: - results["internet_accessible"] = resp.status < 500 - except (ClientError, asyncio.TimeoutError, socket.gaierror): - results["is_local_issue"] = True - return results # cannot reach the internet – early exit - - # Now check API health endpoint - parsed = urlparse(target_url) - health_url = f"{parsed.scheme}://{parsed.netloc}/health" - try: - async with session.get(health_url, ssl=self.verify_ssl) as resp: - results["api_accessible"] = resp.status < 500 - except ClientError: - pass # leave as False - - results["is_api_issue"] = results["internet_accessible"] and not results["api_accessible"] - return results - - async def request( - self, - method: str, - path: str, - params: Optional[dict[str, Any]] = None, - data: Optional[dict[str, Any]] = None, - files: Optional[dict[str, Any] | list[tuple[str, Any]]] = None, - headers: Optional[dict[str, str]] = None, - content_type: str = "application/json", - multipart_parser: Callable | None = None, - retry_count: int = 0, # Used internally for tracking retries - ) -> dict[str, Any]: - """ - Make an HTTP request to the API with automatic retries for transient errors. - - Args: - method: HTTP method (GET, POST, etc.) - path: API endpoint path (will be joined with base_url) - params: Query parameters - data: body data - files: Files to upload - headers: Additional headers - content_type: Content type of the request. Defaults to application/json. - retry_count: Internal parameter for tracking retries, do not set manually - - Returns: - Parsed JSON response - - Raises: - LocalNetworkError: If local network connectivity issues are detected - ApiServerError: If the API server is unreachable but internet is working - Exception: For other request failures - """ - - # Build full URL and merge headers - relative_path = path.lstrip("/") - url = urljoin(self.base_url, relative_path) - self._check_auth(self.auth_token, self.comfy_api_key) - - request_headers = self.get_headers() - if headers: - request_headers.update(headers) - if files: - request_headers.pop("Content-Type", None) - if params: - params = {k: v for k, v in params.items() if v is not None} # aiohttp fails to serialize None values - - logging.debug("[DEBUG] Request Headers: %s", request_headers) - logging.debug("[DEBUG] Files: %s", files) - logging.debug("[DEBUG] Params: %s", params) - logging.debug("[DEBUG] Data: %s", data) - - if content_type == "application/x-www-form-urlencoded": - payload_args = self._create_urlencoded_form_data_args(data or {}, request_headers) - elif content_type == "multipart/form-data": - payload_args = self._create_form_data_args(data, files, request_headers, multipart_parser) - else: - payload_args = self._create_json_payload_args(data, request_headers) - - operation_id = self._generate_operation_id(path) - request_logger.log_request_response( - operation_id=operation_id, - request_method=method, - request_url=url, - request_headers=request_headers, - request_params=params, - request_data=data if content_type == "application/json" else "[form-data or other]", - ) - - session = await self._get_session() - try: - async with session.request( - method, - url, - params=params, - ssl=self.verify_ssl, - **payload_args, - ) as resp: - if resp.status >= 400: - try: - error_data = await resp.json() - except (aiohttp.ContentTypeError, json.JSONDecodeError): - error_data = await resp.text() - - return await self._handle_http_error( - ClientResponseError(resp.request_info, resp.history, status=resp.status, message=error_data), - operation_id, - method, - url, - params, - data, - files, - headers, - content_type, - multipart_parser, - retry_count=retry_count, - response_content=error_data, - ) - - # Success – parse JSON (safely) and log - try: - payload = await resp.json() - response_content_to_log = payload - except (aiohttp.ContentTypeError, json.JSONDecodeError): - payload = {} - response_content_to_log = await resp.text() - - request_logger.log_request_response( - operation_id=operation_id, - request_method=method, - request_url=url, - response_status_code=resp.status, - response_headers=dict(resp.headers), - response_content=response_content_to_log, - ) - return payload - - except (ClientError, asyncio.TimeoutError, socket.gaierror) as e: - # Treat as *connection* problem – optionally retry, else escalate - if retry_count < self.max_retries: - delay = self.retry_delay * (self.retry_backoff_factor ** retry_count) - logging.warning("Connection error. Retrying in %.2fs (%s/%s): %s", delay, retry_count + 1, - self.max_retries, str(e)) - await asyncio.sleep(delay) - return await self.request( - method, - path, - params=params, - data=data, - files=files, - headers=headers, - content_type=content_type, - multipart_parser=multipart_parser, - retry_count=retry_count + 1, - ) - # One final connectivity check for diagnostics - connectivity = await self._check_connectivity(self.base_url) - if connectivity["is_local_issue"]: - raise LocalNetworkError( - "Unable to connect to the API server due to local network issues. " - "Please check your internet connection and try again." - ) from e - raise ApiServerError( - f"The API server at {self.base_url} is currently unreachable. " - f"The service may be experiencing issues. Please try again later." - ) from e - - @staticmethod - def _check_auth(auth_token, comfy_api_key): - """Verify that an auth token is present or comfy_api_key is present""" - if auth_token is None and comfy_api_key is None: - raise Exception("Unauthorized: Please login first to use this node.") - return auth_token or comfy_api_key - - @staticmethod - async def upload_file( - upload_url: str, - file: io.BytesIO | str, - content_type: str | None = None, - max_retries: int = 3, - retry_delay: float = 1.0, - retry_backoff_factor: float = 2.0, - ) -> aiohttp.ClientResponse: - """Upload a file to the API with retry logic. - - Args: - upload_url: The URL to upload to - file: Either a file path string, BytesIO object, or tuple of (file_path, filename) - content_type: Optional mime type to set for the upload - max_retries: Maximum number of retry attempts - retry_delay: Initial delay between retries in seconds - retry_backoff_factor: Multiplier for the delay after each retry - """ - headers: dict[str, str] = {} - skip_auto_headers: set[str] = set() - if content_type: - headers["Content-Type"] = content_type - else: - # tell aiohttp not to add Content-Type that will break the request signature and result in a 403 status. - skip_auto_headers.add("Content-Type") - - # Extract file bytes - if isinstance(file, io.BytesIO): - file.seek(0) - data = file.read() - elif isinstance(file, str): - with open(file, "rb") as f: - data = f.read() - else: - raise ValueError("File must be BytesIO or str path") - - parsed = urlparse(upload_url) - basename = os.path.basename(parsed.path) or parsed.netloc or "upload" - operation_id = f"upload_{basename}_{uuid.uuid4().hex[:8]}" - request_logger.log_request_response( - operation_id=operation_id, - request_method="PUT", - request_url=upload_url, - request_headers=headers, - request_data=f"[File data {len(data)} bytes]", - ) - - delay = retry_delay - for attempt in range(max_retries + 1): - try: - timeout = aiohttp.ClientTimeout(total=None) # honour server side timeouts - async with aiohttp.ClientSession(timeout=timeout) as session: - async with session.put( - upload_url, data=data, headers=headers, skip_auto_headers=skip_auto_headers, - ) as resp: - resp.raise_for_status() - request_logger.log_request_response( - operation_id=operation_id, - request_method="PUT", - request_url=upload_url, - response_status_code=resp.status, - response_headers=dict(resp.headers), - response_content="File uploaded successfully.", - ) - return resp - except (ClientError, asyncio.TimeoutError) as e: - request_logger.log_request_response( - operation_id=operation_id, - request_method="PUT", - request_url=upload_url, - response_status_code=e.status if hasattr(e, "status") else None, - response_headers=dict(e.headers) if hasattr(e, "headers") else None, - response_content=None, - error_message=f"{type(e).__name__}: {str(e)}", - ) - if attempt < max_retries: - logging.warning( - "Upload failed (%s/%s). Retrying in %.2fs. %s", attempt + 1, max_retries, delay, str(e) - ) - await asyncio.sleep(delay) - delay *= retry_backoff_factor - else: - raise NetworkError(f"Failed to upload file after {max_retries + 1} attempts: {e}") from e - - async def _handle_http_error( - self, - exc: ClientResponseError, - operation_id: str, - *req_meta, - retry_count: int, - response_content: dict | str = "", - ) -> dict[str, Any]: - status_code = exc.status - if status_code == 401: - user_friendly = "Unauthorized: Please login first to use this node." - elif status_code == 402: - user_friendly = "Payment Required: Please add credits to your account to use this node." - elif status_code == 409: - user_friendly = "There is a problem with your account. Please contact support@comfy.org." - elif status_code == 429: - user_friendly = "Rate Limit Exceeded: Please try again later." - else: - if isinstance(response_content, dict): - if "error" in response_content and "message" in response_content["error"]: - user_friendly = f"API Error: {response_content['error']['message']}" - if "type" in response_content["error"]: - user_friendly += f" (Type: {response_content['error']['type']})" - else: # Handle cases where error is just a JSON dict with unknown format - user_friendly = f"API Error: {json.dumps(response_content)}" - else: - if len(response_content) < 200: # Arbitrary limit for display - user_friendly = f"API Error (raw): {response_content}" - else: - user_friendly = f"API Error (raw, status {response_content})" - - request_logger.log_request_response( - operation_id=operation_id, - request_method=req_meta[0], - request_url=req_meta[1], - response_status_code=exc.status, - response_headers=dict(req_meta[5]) if req_meta[5] else None, - response_content=response_content, - error_message=f"HTTP Error {exc.status}", - ) - - logging.debug("[DEBUG] API Error: %s (Status: %s)", user_friendly, status_code) - if response_content: - logging.debug("[DEBUG] Response content: %s", response_content) - - # Retry if eligible - if status_code in self.retry_status_codes and retry_count < self.max_retries: - delay = self.retry_delay * (self.retry_backoff_factor ** retry_count) - logging.warning( - "HTTP error %s. Retrying in %.2fs (%s/%s)", - status_code, - delay, - retry_count + 1, - self.max_retries, - ) - await asyncio.sleep(delay) - return await self.request( - req_meta[0], # method - req_meta[1].replace(self.base_url, ""), # path - params=req_meta[2], - data=req_meta[3], - files=req_meta[4], - headers=req_meta[5], - content_type=req_meta[6], - multipart_parser=req_meta[7], - retry_count=retry_count + 1, - ) - - raise Exception(user_friendly) from exc - - @staticmethod - def _unpack_tuple(t): - """Helper to normalise (filename, file, content_type) tuples.""" - if len(t) == 3: - return t - elif len(t) == 2: - return t[0], t[1], "application/octet-stream" - else: - raise ValueError("files tuple must be (filename, file[, content_type])") - - async def _get_session(self) -> aiohttp.ClientSession: - if self._session is None or self._session.closed: - timeout = aiohttp.ClientTimeout(total=self.timeout) - self._session = aiohttp.ClientSession(timeout=timeout) - self._owns_session = True - return self._session - - async def close(self) -> None: - if self._owns_session and self._session and not self._session.closed: - await self._session.close() - - async def __aenter__(self) -> "ApiClient": - """Allow usage as async‑context‑manager – ensures clean teardown""" - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.close() - - -class ApiEndpoint(Generic[T, R]): - """Defines an API endpoint with its request and response types""" - - def __init__( - self, - path: str, - method: HttpMethod, - request_model: Type[T], - response_model: Type[R], - query_params: Optional[dict[str, Any]] = None, - ): - """Initialize an API endpoint definition. - - Args: - path: The URL path for this endpoint, can include placeholders like {id} - method: The HTTP method to use (GET, POST, etc.) - request_model: Pydantic model class that defines the structure and validation rules for API requests to this endpoint - response_model: Pydantic model class that defines the structure and validation rules for API responses from this endpoint - query_params: Optional dictionary of query parameters to include in the request - """ - self.path = path - self.method = method - self.request_model = request_model - self.response_model = response_model - self.query_params = query_params or {} - - -class SynchronousOperation(Generic[T, R]): - """Represents a single synchronous API operation.""" - - def __init__( - self, - endpoint: ApiEndpoint[T, R], - request: T, - files: Optional[dict[str, Any] | list[tuple[str, Any]]] = None, - api_base: str | None = None, - auth_token: Optional[str] = None, - comfy_api_key: Optional[str] = None, - auth_kwargs: Optional[dict[str, str]] = None, - timeout: float = 7200.0, - verify_ssl: bool = True, - content_type: str = "application/json", - multipart_parser: Callable | None = None, - max_retries: int = 3, - retry_delay: float = 1.0, - retry_backoff_factor: float = 2.0, - ) -> None: - self.endpoint = endpoint - self.request = request - self.files = files - self.api_base: str = api_base or args.comfy_api_base - self.auth_token = auth_token - self.comfy_api_key = comfy_api_key - if auth_kwargs is not None: - self.auth_token = auth_kwargs.get("auth_token", self.auth_token) - self.comfy_api_key = auth_kwargs.get("comfy_api_key", self.comfy_api_key) - self.timeout = timeout - self.verify_ssl = verify_ssl - self.content_type = content_type - self.multipart_parser = multipart_parser - self.max_retries = max_retries - self.retry_delay = retry_delay - self.retry_backoff_factor = retry_backoff_factor - - async def execute(self, client: Optional[ApiClient] = None) -> R: - owns_client = client is None - if owns_client: - client = ApiClient( - base_url=self.api_base, - auth_token=self.auth_token, - comfy_api_key=self.comfy_api_key, - timeout=self.timeout, - verify_ssl=self.verify_ssl, - max_retries=self.max_retries, - retry_delay=self.retry_delay, - retry_backoff_factor=self.retry_backoff_factor, - ) - - try: - request_dict: Optional[dict[str, Any]] - if isinstance(self.request, EmptyRequest): - request_dict = None - else: - request_dict = self.request.model_dump(exclude_none=True) - for k, v in list(request_dict.items()): - if isinstance(v, Enum): - request_dict[k] = v.value - - logging.debug("[DEBUG] API Request: %s %s", self.endpoint.method.value, self.endpoint.path) - logging.debug("[DEBUG] Request Data: %s", json.dumps(request_dict, indent=2)) - logging.debug("[DEBUG] Query Params: %s", self.endpoint.query_params) - - response_json = await client.request( - self.endpoint.method.value, - self.endpoint.path, - params=self.endpoint.query_params, - data=request_dict, - files=self.files, - content_type=self.content_type, - multipart_parser=self.multipart_parser, - ) - - logging.debug("=" * 50) - logging.debug("[DEBUG] RESPONSE DETAILS:") - logging.debug("[DEBUG] Status Code: 200 (Success)") - logging.debug("[DEBUG] Response Body: %s", json.dumps(response_json, indent=2)) - logging.debug("=" * 50) - - parsed_response = self.endpoint.response_model.model_validate(response_json) - logging.debug("[DEBUG] Parsed Response: %s", parsed_response) - return parsed_response - finally: - if owns_client: - await client.close() - - -class TaskStatus(str, Enum): - """Enum for task status values""" - - COMPLETED = "completed" - FAILED = "failed" - PENDING = "pending" - - -class PollingOperation(Generic[T, R]): - """Represents an asynchronous API operation that requires polling for completion.""" - - def __init__( - self, - poll_endpoint: ApiEndpoint[EmptyRequest, R], - completed_statuses: list[str], - failed_statuses: list[str], - *, - status_extractor: Callable[[R], Optional[str]], - progress_extractor: Callable[[R], Optional[float]] | None = None, - result_url_extractor: Callable[[R], Optional[str]] | None = None, - price_extractor: Callable[[R], Optional[float]] | None = None, - request: Optional[T] = None, - api_base: str | None = None, - auth_token: Optional[str] = None, - comfy_api_key: Optional[str] = None, - auth_kwargs: Optional[dict[str, str]] = None, - poll_interval: float = 5.0, - max_poll_attempts: int = 120, # Default max polling attempts (10 minutes with 5s interval) - max_retries: int = 3, # Max retries per individual API call - retry_delay: float = 1.0, - retry_backoff_factor: float = 2.0, - estimated_duration: Optional[float] = None, - node_id: Optional[str] = None, - ) -> None: - self.poll_endpoint = poll_endpoint - self.request = request - self.api_base: str = api_base or args.comfy_api_base - self.auth_token = auth_token - self.comfy_api_key = comfy_api_key - if auth_kwargs is not None: - self.auth_token = auth_kwargs.get("auth_token", self.auth_token) - self.comfy_api_key = auth_kwargs.get("comfy_api_key", self.comfy_api_key) - self.poll_interval = poll_interval - self.max_poll_attempts = max_poll_attempts - self.max_retries = max_retries - self.retry_delay = retry_delay - self.retry_backoff_factor = retry_backoff_factor - self.estimated_duration = estimated_duration - self.status_extractor = status_extractor or (lambda x: getattr(x, "status", None)) - self.progress_extractor = progress_extractor - self.result_url_extractor = result_url_extractor - self.price_extractor = price_extractor - self.node_id = node_id - self.completed_statuses = completed_statuses - self.failed_statuses = failed_statuses - self.final_response: Optional[R] = None - self.extracted_price: Optional[float] = None - - async def execute(self, client: Optional[ApiClient] = None) -> R: - owns_client = client is None - if owns_client: - client = ApiClient( - base_url=self.api_base, - auth_token=self.auth_token, - comfy_api_key=self.comfy_api_key, - max_retries=self.max_retries, - retry_delay=self.retry_delay, - retry_backoff_factor=self.retry_backoff_factor, - ) - try: - return await self._poll_until_complete(client) - finally: - if owns_client: - await client.close() - - def _display_text_on_node(self, text: str): - if not self.node_id: - return - if self.extracted_price is not None: - text = f"Price: ${self.extracted_price}\n{text}" - PromptServer.instance.send_progress_text(text, self.node_id) - - def _display_time_progress_on_node(self, time_completed: int | float): - if not self.node_id: - return - if self.estimated_duration is not None: - remaining = max(0, int(self.estimated_duration) - time_completed) - message = f"Task in progress: {time_completed}s (~{remaining}s remaining)" - else: - message = f"Task in progress: {time_completed}s" - self._display_text_on_node(message) - - def _check_task_status(self, response: R) -> TaskStatus: - try: - status = self.status_extractor(response) - if status in self.completed_statuses: - return TaskStatus.COMPLETED - if status in self.failed_statuses: - return TaskStatus.FAILED - return TaskStatus.PENDING - except Exception as e: - logging.error("Error extracting status: %s", e) - return TaskStatus.PENDING - - async def _poll_until_complete(self, client: ApiClient) -> R: - """Poll until the task is complete""" - consecutive_errors = 0 - max_consecutive_errors = min(5, self.max_retries * 2) # Limit consecutive errors - - if self.progress_extractor: - progress = utils.ProgressBar(PROGRESS_BAR_MAX) - - status = TaskStatus.PENDING - for poll_count in range(1, self.max_poll_attempts + 1): - try: - logging.debug("[DEBUG] Polling attempt #%s", poll_count) - - request_dict = None if self.request is None else self.request.model_dump(exclude_none=True) - - if poll_count == 1: - logging.debug( - "[DEBUG] Poll Request: %s %s", - self.poll_endpoint.method.value, - self.poll_endpoint.path, - ) - logging.debug( - "[DEBUG] Poll Request Data: %s", - json.dumps(request_dict, indent=2) if request_dict else "None", - ) - - # Query task status - resp = await client.request( - self.poll_endpoint.method.value, - self.poll_endpoint.path, - params=self.poll_endpoint.query_params, - data=request_dict, - ) - consecutive_errors = 0 # reset on success - response_obj: R = self.poll_endpoint.response_model.model_validate(resp) - - # Check if task is complete - status = self._check_task_status(response_obj) - logging.debug("[DEBUG] Task Status: %s", status) - - # If progress extractor is provided, extract progress - if self.progress_extractor: - new_progress = self.progress_extractor(response_obj) - if new_progress is not None: - progress.update_absolute(new_progress, total=PROGRESS_BAR_MAX) - - if self.price_extractor: - price = self.price_extractor(response_obj) - if price is not None: - self.extracted_price = price - - if status == TaskStatus.COMPLETED: - message = "Task completed successfully" - if self.result_url_extractor: - result_url = self.result_url_extractor(response_obj) - if result_url: - message = f"Result URL: {result_url}" - logging.debug("[DEBUG] %s", message) - self._display_text_on_node(message) - self.final_response = response_obj - if self.progress_extractor: - progress.update(100) - return self.final_response - if status == TaskStatus.FAILED: - message = f"Task failed: {json.dumps(resp)}" - logging.error("[DEBUG] %s", message) - raise Exception(message) - logging.debug("[DEBUG] Task still pending, continuing to poll...") - # Task pending – wait - for i in range(int(self.poll_interval)): - self._display_time_progress_on_node((poll_count - 1) * self.poll_interval + i) - await asyncio.sleep(1) - - except (LocalNetworkError, ApiServerError, NetworkError) as e: - consecutive_errors += 1 - if consecutive_errors >= max_consecutive_errors: - raise Exception( - f"Polling aborted after {consecutive_errors} network errors: {str(e)}" - ) from e - logging.warning( - "Network error (%s/%s): %s", - consecutive_errors, - max_consecutive_errors, - str(e), - ) - await asyncio.sleep(self.poll_interval) - except Exception as e: - # For other errors, increment count and potentially abort - consecutive_errors += 1 - if consecutive_errors >= max_consecutive_errors or status == TaskStatus.FAILED: - raise Exception( - f"Polling aborted after {consecutive_errors} consecutive errors: {str(e)}" - ) from e - - logging.error("[DEBUG] Polling error: %s", str(e)) - logging.warning( - "Error during polling (attempt %s/%s): %s. Will retry in %s seconds.", - poll_count, - self.max_poll_attempts, - str(e), - self.poll_interval, - ) - await asyncio.sleep(self.poll_interval) - - # If we've exhausted all polling attempts - raise Exception( - f"Polling timed out after {self.max_poll_attempts} attempts (" f"{self.max_poll_attempts * self.poll_interval} seconds). " - "The operation may still be running on the server but is taking longer than expected." - ) diff --git a/comfy_api_nodes/nodes_rodin.py b/comfy_api_nodes/nodes_rodin.py index ad4029236..e60e7a6d6 100644 --- a/comfy_api_nodes/nodes_rodin.py +++ b/comfy_api_nodes/nodes_rodin.py @@ -5,12 +5,9 @@ Rodin API docs: https://developer.hyper3d.ai/ """ -from __future__ import annotations from inspect import cleandoc import folder_paths as comfy_paths -import aiohttp import os -import asyncio import logging import math from typing import Optional @@ -26,11 +23,11 @@ from comfy_api_nodes.apis.rodin_api import ( Rodin3DDownloadResponse, JobStatus, ) -from comfy_api_nodes.apis.client import ( +from comfy_api_nodes.util import ( + sync_op, + poll_op, ApiEndpoint, - HttpMethod, - SynchronousOperation, - PollingOperation, + download_url_to_bytesio, ) from comfy_api.latest import ComfyExtension, IO @@ -121,35 +118,31 @@ def tensor_to_filelike(tensor, max_pixels: int = 2048*2048): async def create_generate_task( + cls: type[IO.ComfyNode], images=None, seed=1, material="PBR", quality_override=18000, tier="Regular", mesh_mode="Quad", - TAPose = False, - auth_kwargs: Optional[dict[str, str]] = None, + ta_pose: bool = False, ): if images is None: raise Exception("Rodin 3D generate requires at least 1 image.") if len(images) > 5: raise Exception("Rodin 3D generate requires up to 5 image.") - path = "/proxy/rodin/api/v2/rodin" - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path=path, - method=HttpMethod.POST, - request_model=Rodin3DGenerateRequest, - response_model=Rodin3DGenerateResponse, - ), - request=Rodin3DGenerateRequest( + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/rodin/api/v2/rodin", method="POST"), + response_model=Rodin3DGenerateResponse, + data=Rodin3DGenerateRequest( seed=seed, tier=tier, material=material, quality_override=quality_override, mesh_mode=mesh_mode, - TAPose=TAPose, + TAPose=ta_pose, ), files=[ ( @@ -159,11 +152,8 @@ async def create_generate_task( for image in images if image is not None ], content_type="multipart/form-data", - auth_kwargs=auth_kwargs, ) - response = await operation.execute() - if hasattr(response, "error"): error_message = f"Rodin3D Create 3D generate Task Failed. Message: {response.message}, error: {response.error}" logging.error(error_message) @@ -187,74 +177,46 @@ def check_rodin_status(response: Rodin3DCheckStatusResponse) -> str: return "DONE" return "Generating" +def extract_progress(response: Rodin3DCheckStatusResponse) -> Optional[int]: + if not response.jobs: + return None + completed_count = sum(1 for job in response.jobs if job.status == JobStatus.Done) + return int((completed_count / len(response.jobs)) * 100) -async def poll_for_task_status( - subscription_key, auth_kwargs: Optional[dict[str, str]] = None, -) -> Rodin3DCheckStatusResponse: - poll_operation = PollingOperation( - poll_endpoint=ApiEndpoint( - path="/proxy/rodin/api/v2/status", - method=HttpMethod.POST, - request_model=Rodin3DCheckStatusRequest, - response_model=Rodin3DCheckStatusResponse, - ), - request=Rodin3DCheckStatusRequest(subscription_key=subscription_key), - completed_statuses=["DONE"], - failed_statuses=["FAILED"], - status_extractor=check_rodin_status, - poll_interval=3.0, - auth_kwargs=auth_kwargs, - ) + +async def poll_for_task_status(subscription_key: str, cls: type[IO.ComfyNode]) -> Rodin3DCheckStatusResponse: logging.info("[ Rodin3D API - CheckStatus ] Generate Start!") - return await poll_operation.execute() - - -async def get_rodin_download_list(uuid, auth_kwargs: Optional[dict[str, str]] = None) -> Rodin3DDownloadResponse: - logging.info("[ Rodin3D API - Downloading ] Generate Successfully!") - operation = SynchronousOperation( - endpoint=ApiEndpoint( - path="/proxy/rodin/api/v2/download", - method=HttpMethod.POST, - request_model=Rodin3DDownloadRequest, - response_model=Rodin3DDownloadResponse, - ), - request=Rodin3DDownloadRequest(task_uuid=uuid), - auth_kwargs=auth_kwargs, + return await poll_op( + cls, + ApiEndpoint(path="/proxy/rodin/api/v2/status", method="POST"), + response_model=Rodin3DCheckStatusResponse, + data=Rodin3DCheckStatusRequest(subscription_key=subscription_key), + status_extractor=check_rodin_status, + progress_extractor=extract_progress, ) - return await operation.execute() -async def download_files(url_list, task_uuid): +async def get_rodin_download_list(uuid: str, cls: type[IO.ComfyNode]) -> Rodin3DDownloadResponse: + logging.info("[ Rodin3D API - Downloading ] Generate Successfully!") + return await sync_op( + cls, + ApiEndpoint(path="/proxy/rodin/api/v2/download", method="POST"), + response_model=Rodin3DDownloadResponse, + data=Rodin3DDownloadRequest(task_uuid=uuid), + monitor_progress=False, + ) + + +async def download_files(url_list, task_uuid: str): result_folder_name = f"Rodin3D_{task_uuid}" save_path = os.path.join(comfy_paths.get_output_directory(), result_folder_name) os.makedirs(save_path, exist_ok=True) model_file_path = None - async with aiohttp.ClientSession() as session: - for i in url_list.list: - file_path = os.path.join(save_path, i.name) - if file_path.endswith(".glb"): - model_file_path = os.path.join(result_folder_name, i.name) - logging.info("[ Rodin3D API - download_files ] Downloading file: %s", file_path) - max_retries = 5 - for attempt in range(max_retries): - try: - async with session.get(i.url) as resp: - resp.raise_for_status() - with open(file_path, "wb") as f: - async for chunk in resp.content.iter_chunked(32 * 1024): - f.write(chunk) - break - except Exception as e: - logging.info("[ Rodin3D API - download_files ] Error downloading %s:%s", file_path, str(e)) - if attempt < max_retries - 1: - logging.info("Retrying...") - await asyncio.sleep(2) - else: - logging.info( - "[ Rodin3D API - download_files ] Failed to download %s after %s attempts.", - file_path, - max_retries, - ) + for i in url_list.list: + file_path = os.path.join(save_path, i.name) + if file_path.endswith(".glb"): + model_file_path = os.path.join(result_folder_name, i.name) + await download_url_to_bytesio(i.url, file_path) return model_file_path @@ -276,6 +238,7 @@ class Rodin3D_Regular(IO.ComfyNode): hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -294,21 +257,17 @@ class Rodin3D_Regular(IO.ComfyNode): for i in range(num_images): m_images.append(Images[i]) mesh_mode, quality_override = get_quality_mode(Polygon_count) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } task_uuid, subscription_key = await create_generate_task( + cls, images=m_images, seed=Seed, material=Material_Type, quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, - auth_kwargs=auth, ) - await poll_for_task_status(subscription_key, auth_kwargs=auth) - download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) + await poll_for_task_status(subscription_key, cls) + download_list = await get_rodin_download_list(task_uuid, cls) model = await download_files(download_list, task_uuid) return IO.NodeOutput(model) @@ -332,6 +291,7 @@ class Rodin3D_Detail(IO.ComfyNode): hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -350,21 +310,17 @@ class Rodin3D_Detail(IO.ComfyNode): for i in range(num_images): m_images.append(Images[i]) mesh_mode, quality_override = get_quality_mode(Polygon_count) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } task_uuid, subscription_key = await create_generate_task( + cls, images=m_images, seed=Seed, material=Material_Type, quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, - auth_kwargs=auth, ) - await poll_for_task_status(subscription_key, auth_kwargs=auth) - download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) + await poll_for_task_status(subscription_key, cls) + download_list = await get_rodin_download_list(task_uuid, cls) model = await download_files(download_list, task_uuid) return IO.NodeOutput(model) @@ -388,6 +344,7 @@ class Rodin3D_Smooth(IO.ComfyNode): hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -400,27 +357,22 @@ class Rodin3D_Smooth(IO.ComfyNode): Material_Type, Polygon_count, ) -> IO.NodeOutput: - tier = "Smooth" num_images = Images.shape[0] m_images = [] for i in range(num_images): m_images.append(Images[i]) mesh_mode, quality_override = get_quality_mode(Polygon_count) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } task_uuid, subscription_key = await create_generate_task( + cls, images=m_images, seed=Seed, material=Material_Type, quality_override=quality_override, - tier=tier, + tier="Smooth", mesh_mode=mesh_mode, - auth_kwargs=auth, ) - await poll_for_task_status(subscription_key, auth_kwargs=auth) - download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) + await poll_for_task_status(subscription_key, cls) + download_list = await get_rodin_download_list(task_uuid, cls) model = await download_files(download_list, task_uuid) return IO.NodeOutput(model) @@ -451,6 +403,7 @@ class Rodin3D_Sketch(IO.ComfyNode): hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -461,29 +414,21 @@ class Rodin3D_Sketch(IO.ComfyNode): Images, Seed, ) -> IO.NodeOutput: - tier = "Sketch" num_images = Images.shape[0] m_images = [] for i in range(num_images): m_images.append(Images[i]) - material_type = "PBR" - quality_override = 18000 - mesh_mode = "Quad" - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } task_uuid, subscription_key = await create_generate_task( + cls, images=m_images, seed=Seed, - material=material_type, - quality_override=quality_override, - tier=tier, - mesh_mode=mesh_mode, - auth_kwargs=auth, + material="PBR", + quality_override=18000, + tier="Sketch", + mesh_mode="Quad", ) - await poll_for_task_status(subscription_key, auth_kwargs=auth) - download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) + await poll_for_task_status(subscription_key, cls) + download_list = await get_rodin_download_list(task_uuid, cls) model = await download_files(download_list, task_uuid) return IO.NodeOutput(model) @@ -522,6 +467,7 @@ class Rodin3D_Gen2(IO.ComfyNode): hidden=[ IO.Hidden.auth_token_comfy_org, IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, ], is_api_node=True, ) @@ -541,22 +487,18 @@ class Rodin3D_Gen2(IO.ComfyNode): for i in range(num_images): m_images.append(Images[i]) mesh_mode, quality_override = get_quality_mode(Polygon_count) - auth = { - "auth_token": cls.hidden.auth_token_comfy_org, - "comfy_api_key": cls.hidden.api_key_comfy_org, - } task_uuid, subscription_key = await create_generate_task( + cls, images=m_images, seed=Seed, material=Material_Type, quality_override=quality_override, tier=tier, mesh_mode=mesh_mode, - TAPose=TAPose, - auth_kwargs=auth, + ta_pose=TAPose, ) - await poll_for_task_status(subscription_key, auth_kwargs=auth) - download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) + await poll_for_task_status(subscription_key, cls) + download_list = await get_rodin_download_list(task_uuid, cls) model = await download_files(download_list, task_uuid) return IO.NodeOutput(model) diff --git a/comfy_api_nodes/util/client.py b/comfy_api_nodes/util/client.py index 65bb35f0f..2d5dcd648 100644 --- a/comfy_api_nodes/util/client.py +++ b/comfy_api_nodes/util/client.py @@ -16,9 +16,9 @@ from pydantic import BaseModel from comfy import utils from comfy_api.latest import IO -from comfy_api_nodes.apis import request_logger from server import PromptServer +from . import request_logger from ._helpers import ( default_base_url, get_auth_header, @@ -77,7 +77,7 @@ class _PollUIState: _RETRY_STATUS = {408, 429, 500, 502, 503, 504} -COMPLETED_STATUSES = ["succeeded", "succeed", "success", "completed", "finished"] +COMPLETED_STATUSES = ["succeeded", "succeed", "success", "completed", "finished", "done"] FAILED_STATUSES = ["cancelled", "canceled", "fail", "failed", "error"] QUEUED_STATUSES = ["created", "queued", "queueing", "submitted"] diff --git a/comfy_api_nodes/util/download_helpers.py b/comfy_api_nodes/util/download_helpers.py index 364874bed..14207dc68 100644 --- a/comfy_api_nodes/util/download_helpers.py +++ b/comfy_api_nodes/util/download_helpers.py @@ -12,8 +12,8 @@ from aiohttp.client_exceptions import ClientError, ContentTypeError from comfy_api.input_impl import VideoFromFile from comfy_api.latest import IO as COMFY_IO -from comfy_api_nodes.apis import request_logger +from . import request_logger from ._helpers import ( default_base_url, get_auth_header, diff --git a/comfy_api_nodes/apis/request_logger.py b/comfy_api_nodes/util/request_logger.py similarity index 100% rename from comfy_api_nodes/apis/request_logger.py rename to comfy_api_nodes/util/request_logger.py index c6974d35c..ac52e2eab 100644 --- a/comfy_api_nodes/apis/request_logger.py +++ b/comfy_api_nodes/util/request_logger.py @@ -1,11 +1,11 @@ from __future__ import annotations -import os import datetime +import hashlib import json import logging +import os import re -import hashlib from typing import Any import folder_paths diff --git a/comfy_api_nodes/util/upload_helpers.py b/comfy_api_nodes/util/upload_helpers.py index 7bfc61704..632450d9b 100644 --- a/comfy_api_nodes/util/upload_helpers.py +++ b/comfy_api_nodes/util/upload_helpers.py @@ -13,8 +13,8 @@ from pydantic import BaseModel, Field from comfy_api.latest import IO, Input from comfy_api.util import VideoCodec, VideoContainer -from comfy_api_nodes.apis import request_logger +from . import request_logger from ._helpers import is_processing_interrupted, sleep_with_interrupt from .client import ( ApiEndpoint, From 97f198e4215680a83749ba95849f3cdcfa7aa64a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 5 Nov 2025 15:07:35 -0800 Subject: [PATCH 0849/1073] Fix qwen controlnet regression. (#10657) --- comfy/ldm/qwen_image/controlnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/qwen_image/controlnet.py b/comfy/ldm/qwen_image/controlnet.py index 92ac3cf0a..a6d408104 100644 --- a/comfy/ldm/qwen_image/controlnet.py +++ b/comfy/ldm/qwen_image/controlnet.py @@ -44,7 +44,7 @@ class QwenImageControlNetModel(QwenImageTransformer2DModel): txt_start = round(max(((x.shape[-1] + (self.patch_size // 2)) // self.patch_size) // 2, ((x.shape[-2] + (self.patch_size // 2)) // self.patch_size) // 2)) txt_ids = torch.arange(txt_start, txt_start + context.shape[1], device=x.device).reshape(1, -1, 1).repeat(x.shape[0], 1, 3) ids = torch.cat((txt_ids, img_ids), dim=1) - image_rotary_emb = self.pe_embedder(ids).squeeze(1).unsqueeze(2).to(x.dtype) + image_rotary_emb = self.pe_embedder(ids).to(x.dtype).contiguous() del ids, txt_ids, img_ids hidden_states = self.img_in(hidden_states) + self.controlnet_x_embedder(hint) From 1d69245981f9fb3861018613246042296d887dd3 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 5 Nov 2025 15:08:13 -0800 Subject: [PATCH 0850/1073] Enable pinned memory by default on Nvidia. (#10656) Removed the --fast pinned_memory flag. You can use --disable-pinned-memory to disable it. Please report if it causes any issues. --- comfy/cli_args.py | 3 ++- comfy/model_management.py | 22 +++++++++------------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 3947e62a8..2f30b72d2 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -145,10 +145,11 @@ class PerformanceFeature(enum.Enum): Fp8MatrixMultiplication = "fp8_matrix_mult" CublasOps = "cublas_ops" AutoTune = "autotune" - PinnedMem = "pinned_memory" parser.add_argument("--fast", nargs="*", type=PerformanceFeature, help="Enable some untested and potentially quality deteriorating optimizations. This is used to test new features so using it might crash your comfyui. --fast with no arguments enables everything. You can pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: {}".format(" ".join(map(lambda c: c.value, PerformanceFeature)))) +parser.add_argument("--disable-pinned-memory", action="store_true", help="Disable pinned memory use.") + parser.add_argument("--mmap-torch-files", action="store_true", help="Use mmap when loading ckpt/pt files.") parser.add_argument("--disable-mmap", action="store_true", help="Don't use mmap when loading safetensors.") diff --git a/comfy/model_management.py b/comfy/model_management.py index 0d040e55e..4d13c52c1 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1085,22 +1085,21 @@ def cast_to_device(tensor, device, dtype, copy=False): PINNED_MEMORY = {} TOTAL_PINNED_MEMORY = 0 -if PerformanceFeature.PinnedMem in args.fast: - if WINDOWS: - MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.45 # Windows limit is apparently 50% - else: - MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.95 -else: - MAX_PINNED_MEMORY = -1 +MAX_PINNED_MEMORY = -1 +if not args.disable_pinned_memory: + if is_nvidia(): + if WINDOWS: + MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.45 # Windows limit is apparently 50% + else: + MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.95 + logging.info("Enabled pinned memory {}".format(MAX_PINNED_MEMORY // (1024 * 1024))) + def pin_memory(tensor): global TOTAL_PINNED_MEMORY if MAX_PINNED_MEMORY <= 0: return False - if not is_nvidia(): - return False - if not is_device_cpu(tensor.device): return False @@ -1121,9 +1120,6 @@ def unpin_memory(tensor): if MAX_PINNED_MEMORY <= 0: return False - if not is_nvidia(): - return False - if not is_device_cpu(tensor.device): return False From 09dc24c8a982776abd5cb2f71e3d041139e1d5b2 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 5 Nov 2025 16:11:15 -0800 Subject: [PATCH 0851/1073] Pinned mem also seems to work on AMD. (#10658) --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 4d13c52c1..7a30c4bec 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1087,7 +1087,7 @@ PINNED_MEMORY = {} TOTAL_PINNED_MEMORY = 0 MAX_PINNED_MEMORY = -1 if not args.disable_pinned_memory: - if is_nvidia(): + if is_nvidia() or is_amd(): if WINDOWS: MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.45 # Windows limit is apparently 50% else: From e05c90712670fa4a2ffebd44046fc78747193a36 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 6 Nov 2025 01:11:30 -0800 Subject: [PATCH 0852/1073] Clarify release cycle. (#10667) --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4204777e9..8142f595b 100644 --- a/README.md +++ b/README.md @@ -112,10 +112,11 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git ## Release Process -ComfyUI follows a weekly release cycle targeting Friday but this regularly changes because of model releases or large changes to the codebase. There are three interconnected repositories: +ComfyUI follows a weekly release cycle targeting Monday but this regularly changes because of model releases or large changes to the codebase. There are three interconnected repositories: 1. **[ComfyUI Core](https://github.com/comfyanonymous/ComfyUI)** - - Releases a new stable version (e.g., v0.7.0) + - Releases a new stable version (e.g., v0.7.0) roughly every week. + - Commits outside of the stable release tags may be very unstable and break many custom nodes. - Serves as the foundation for the desktop release 2. **[ComfyUI Desktop](https://github.com/Comfy-Org/desktop)** From eb1c42f6498ce44aef4dbed3bb665ac98a28254d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 6 Nov 2025 17:24:28 -0800 Subject: [PATCH 0853/1073] Tell users they need to upload their logs in bug reports. (#10671) --- .github/ISSUE_TEMPLATE/bug-report.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 3cf2717b7..6556677e0 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -8,13 +8,15 @@ body: Before submitting a **Bug Report**, please ensure the following: - **1:** You are running the latest version of ComfyUI. - - **2:** You have looked at the existing bug reports and made sure this isn't already reported. + - **2:** You have your ComfyUI logs and relevant workflow on hand and will post them in this bug report. - **3:** You confirmed that the bug is not caused by a custom node. You can disable all custom nodes by passing - `--disable-all-custom-nodes` command line argument. + `--disable-all-custom-nodes` command line argument. If you have custom node try updating them to the latest version. - **4:** This is an actual bug in ComfyUI, not just a support question. A bug is when you can specify exact steps to replicate what went wrong and others will be able to repeat your steps and see the same issue happen. - If unsure, ask on the [ComfyUI Matrix Space](https://app.element.io/#/room/%23comfyui_space%3Amatrix.org) or the [Comfy Org Discord](https://discord.gg/comfyorg) first. + ## Very Important + + Please make sure that you post ALL your ComfyUI logs in the bug report. A bug report without logs will likely be ignored. - type: checkboxes id: custom-nodes-test attributes: From cf97b033ee80cf245b4592d42f89e6de67e409a4 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Fri, 7 Nov 2025 12:20:48 +1000 Subject: [PATCH 0854/1073] mm: guard against double pin and unpin explicitly (#10672) As commented, if you let cuda be the one to detect double pin/unpinning it actually creates an asyc GPU error. --- comfy/model_management.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 7a30c4bec..a13b24cea 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1103,6 +1103,12 @@ def pin_memory(tensor): if not is_device_cpu(tensor.device): return False + if tensor.is_pinned(): + #NOTE: Cuda does detect when a tensor is already pinned and would + #error below, but there are proven cases where this also queues an error + #on the GPU async. So dont trust the CUDA API and guard here + return False + size = tensor.numel() * tensor.element_size() if (TOTAL_PINNED_MEMORY + size) > MAX_PINNED_MEMORY: return False @@ -1123,6 +1129,12 @@ def unpin_memory(tensor): if not is_device_cpu(tensor.device): return False + if not tensor.is_pinned(): + #NOTE: Cuda does detect when a tensor is already pinned and would + #error below, but there are proven cases where this also queues an error + #on the GPU async. So dont trust the CUDA API and guard here + return False + ptr = tensor.data_ptr() if torch.cuda.cudart().cudaHostUnregister(ptr) == 0: TOTAL_PINNED_MEMORY -= PINNED_MEMORY.pop(ptr) From a1a70362ca376cff05a0514e0ce771ab26d92fd9 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 7 Nov 2025 08:15:05 -0800 Subject: [PATCH 0855/1073] Only unpin tensor if it was pinned by ComfyUI (#10677) --- comfy/model_management.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index a13b24cea..7012df858 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1129,13 +1129,18 @@ def unpin_memory(tensor): if not is_device_cpu(tensor.device): return False - if not tensor.is_pinned(): - #NOTE: Cuda does detect when a tensor is already pinned and would - #error below, but there are proven cases where this also queues an error - #on the GPU async. So dont trust the CUDA API and guard here + ptr = tensor.data_ptr() + size = tensor.numel() * tensor.element_size() + + size_stored = PINNED_MEMORY.get(ptr, None) + if size_stored is None: + logging.warning("Tried to unpin tensor not pinned by ComfyUI") + return False + + if size != size_stored: + logging.warning("Size of pinned tensor changed") return False - ptr = tensor.data_ptr() if torch.cuda.cudart().cudaHostUnregister(ptr) == 0: TOTAL_PINNED_MEMORY -= PINNED_MEMORY.pop(ptr) if len(PINNED_MEMORY) == 0: From 2abd2b5c2049a9625b342bcb7decedd5d1645f66 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 8 Nov 2025 12:52:02 -0800 Subject: [PATCH 0856/1073] Make ScaleROPE node work on Flux. (#10686) --- comfy/ldm/flux/model.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index 14f90cea5..b9d36f202 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -210,7 +210,7 @@ class Flux(nn.Module): img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels) return img - def process_img(self, x, index=0, h_offset=0, w_offset=0): + def process_img(self, x, index=0, h_offset=0, w_offset=0, transformer_options={}): bs, c, h, w = x.shape patch_size = self.patch_size x = comfy.ldm.common_dit.pad_to_patch_size(x, (patch_size, patch_size)) @@ -222,10 +222,22 @@ class Flux(nn.Module): h_offset = ((h_offset + (patch_size // 2)) // patch_size) w_offset = ((w_offset + (patch_size // 2)) // patch_size) - img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) + steps_h = h_len + steps_w = w_len + + rope_options = transformer_options.get("rope_options", None) + if rope_options is not None: + h_len = (h_len - 1.0) * rope_options.get("scale_y", 1.0) + 1.0 + w_len = (w_len - 1.0) * rope_options.get("scale_x", 1.0) + 1.0 + + index += rope_options.get("shift_t", 0.0) + h_offset += rope_options.get("shift_y", 0.0) + w_offset += rope_options.get("shift_x", 0.0) + + img_ids = torch.zeros((steps_h, steps_w, 3), device=x.device, dtype=x.dtype) img_ids[:, :, 0] = img_ids[:, :, 1] + index - img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(h_offset, h_len - 1 + h_offset, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1) - img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(w_offset, w_len - 1 + w_offset, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) + img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(h_offset, h_len - 1 + h_offset, steps=steps_h, device=x.device, dtype=x.dtype).unsqueeze(1) + img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(w_offset, w_len - 1 + w_offset, steps=steps_w, device=x.device, dtype=x.dtype).unsqueeze(0) return img, repeat(img_ids, "h w c -> b (h w) c", b=bs) def forward(self, x, timestep, context, y=None, guidance=None, ref_latents=None, control=None, transformer_options={}, **kwargs): @@ -241,7 +253,7 @@ class Flux(nn.Module): h_len = ((h_orig + (patch_size // 2)) // patch_size) w_len = ((w_orig + (patch_size // 2)) // patch_size) - img, img_ids = self.process_img(x) + img, img_ids = self.process_img(x, transformer_options=transformer_options) img_tokens = img.shape[1] if ref_latents is not None: h = 0 From e632e5de281b91dd7199636dd6d82126fbfb07d5 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 9 Nov 2025 15:06:39 -0800 Subject: [PATCH 0857/1073] Add logging for model unloading. (#10692) --- comfy/model_patcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 5a31a8734..17e06a869 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -909,6 +909,7 @@ class ModelPatcher: self.model.model_lowvram = True self.model.lowvram_patch_counter += patch_counter self.model.model_loaded_weight_memory -= memory_freed + logging.info("loaded partially: {:.2f} MB loaded, lowvram patches: {}".format(self.model.model_loaded_weight_memory / (1024 * 1024), self.model.lowvram_patch_counter)) return memory_freed def partially_load(self, device_to, extra_memory=0, force_patch_weights=False): From dea899f22125d38a8b48147d6cce89a2b659fdeb Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 9 Nov 2025 15:51:33 -0800 Subject: [PATCH 0858/1073] Unload weights if vram usage goes up between runs. (#10690) --- comfy/model_management.py | 11 +++++++++-- comfy/model_patcher.py | 20 +++++++++++++------- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 7012df858..a4410f2ec 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -503,7 +503,11 @@ class LoadedModel: use_more_vram = lowvram_model_memory if use_more_vram == 0: use_more_vram = 1e32 - self.model_use_more_vram(use_more_vram, force_patch_weights=force_patch_weights) + if use_more_vram > 0: + self.model_use_more_vram(use_more_vram, force_patch_weights=force_patch_weights) + else: + self.model.partially_unload(self.model.offload_device, -use_more_vram, force_patch_weights=force_patch_weights) + real_model = self.model.model if is_intel_xpu() and not args.disable_ipex_optimize and 'ipex' in globals() and real_model is not None: @@ -689,7 +693,10 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu current_free_mem = get_free_memory(torch_dev) + loaded_memory lowvram_model_memory = max(128 * 1024 * 1024, (current_free_mem - minimum_memory_required), min(current_free_mem * MIN_WEIGHT_MEMORY_RATIO, current_free_mem - minimum_inference_memory())) - lowvram_model_memory = max(0.1, lowvram_model_memory - loaded_memory) + lowvram_model_memory = lowvram_model_memory - loaded_memory + + if lowvram_model_memory == 0: + lowvram_model_memory = 0.1 if vram_set_state == VRAMState.NO_VRAM: lowvram_model_memory = 0.1 diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 17e06a869..68b0a9192 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -843,7 +843,7 @@ class ModelPatcher: self.object_patches_backup.clear() - def partially_unload(self, device_to, memory_to_free=0): + def partially_unload(self, device_to, memory_to_free=0, force_patch_weights=False): with self.use_ejected(): hooks_unpatched = False memory_freed = 0 @@ -887,13 +887,19 @@ class ModelPatcher: module_mem += move_weight_functions(m, device_to) if lowvram_possible: if weight_key in self.patches: - _, set_func, convert_func = get_key_weight(self.model, weight_key) - m.weight_function.append(LowVramPatch(weight_key, self.patches, convert_func, set_func)) - patch_counter += 1 + if force_patch_weights: + self.patch_weight_to_device(weight_key) + else: + _, set_func, convert_func = get_key_weight(self.model, weight_key) + m.weight_function.append(LowVramPatch(weight_key, self.patches, convert_func, set_func)) + patch_counter += 1 if bias_key in self.patches: - _, set_func, convert_func = get_key_weight(self.model, bias_key) - m.bias_function.append(LowVramPatch(bias_key, self.patches, convert_func, set_func)) - patch_counter += 1 + if force_patch_weights: + self.patch_weight_to_device(bias_key) + else: + _, set_func, convert_func = get_key_weight(self.model, bias_key) + m.bias_function.append(LowVramPatch(bias_key, self.patches, convert_func, set_func)) + patch_counter += 1 cast_weight = True if cast_weight: From c350009236e5d172a3050c04043ea70a301378ca Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Mon, 10 Nov 2025 13:52:11 +1000 Subject: [PATCH 0859/1073] ops: Put weight cast on the offload stream (#10697) This needs to be on the offload stream. This reproduced a black screen with low resolution images on a slow bus when using FP8. --- comfy/ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 733bff99d..96dffa85d 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -110,9 +110,9 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, of for f in s.bias_function: bias = f(bias) - weight = weight.to(dtype=dtype) - if weight_has_function: + if weight_has_function or weight.dtype != dtype: with wf_context: + weight = weight.to(dtype=dtype) for f in s.weight_function: weight = f(weight) From 5ebcab3c7d974963a89cecd37296a22fdb73bd2b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 10 Nov 2025 12:35:29 -0800 Subject: [PATCH 0860/1073] Update CI workflow to remove dead macOS runner. (#10704) * Update CI workflow to remove dead macOS runner. * revert * revert --- .github/workflows/test-ci.yml | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/workflows/test-ci.yml b/.github/workflows/test-ci.yml index 418dca0ab..1660ec8e3 100644 --- a/.github/workflows/test-ci.yml +++ b/.github/workflows/test-ci.yml @@ -21,14 +21,15 @@ jobs: fail-fast: false matrix: # os: [macos, linux, windows] - os: [macos, linux] - python_version: ["3.9", "3.10", "3.11", "3.12"] + # os: [macos, linux] + os: [linux] + python_version: ["3.10", "3.11", "3.12"] cuda_version: ["12.1"] torch_version: ["stable"] include: - - os: macos - runner_label: [self-hosted, macOS] - flags: "--use-pytorch-cross-attention" + # - os: macos + # runner_label: [self-hosted, macOS] + # flags: "--use-pytorch-cross-attention" - os: linux runner_label: [self-hosted, Linux] flags: "" @@ -73,14 +74,15 @@ jobs: strategy: fail-fast: false matrix: - os: [macos, linux] + # os: [macos, linux] + os: [linux] python_version: ["3.11"] cuda_version: ["12.1"] torch_version: ["nightly"] include: - - os: macos - runner_label: [self-hosted, macOS] - flags: "--use-pytorch-cross-attention" + # - os: macos + # runner_label: [self-hosted, macOS] + # flags: "--use-pytorch-cross-attention" - os: linux runner_label: [self-hosted, Linux] flags: "" From 119941174704081a16a4c3f303d99f2fb1e95cde Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 11 Nov 2025 16:33:30 -0800 Subject: [PATCH 0861/1073] Don't pin tensor if not a torch.nn.parameter.Parameter (#10718) --- comfy/model_management.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index a4410f2ec..d8913082a 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1107,6 +1107,9 @@ def pin_memory(tensor): if MAX_PINNED_MEMORY <= 0: return False + if type(tensor) is not torch.nn.parameter.Parameter: + return False + if not is_device_cpu(tensor.device): return False @@ -1116,6 +1119,9 @@ def pin_memory(tensor): #on the GPU async. So dont trust the CUDA API and guard here return False + if not tensor.is_contiguous(): + return False + size = tensor.numel() * tensor.element_size() if (TOTAL_PINNED_MEMORY + size) > MAX_PINNED_MEMORY: return False From e1d85e7577d8f6355bd4cb3449bcb0a7e5f80cb8 Mon Sep 17 00:00:00 2001 From: Qiacheng Li Date: Wed, 12 Nov 2025 12:21:05 -0800 Subject: [PATCH 0862/1073] Update README.md for Intel Arc GPU installation, remove IPEX (#10729) IPEX is no longer needed for Intel Arc GPUs. Removing instruction to setup ipex. --- README.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/README.md b/README.md index 8142f595b..9e28803a2 100644 --- a/README.md +++ b/README.md @@ -242,7 +242,7 @@ RDNA 4 (RX 9000 series): ### Intel GPUs (Windows and Linux) -(Option 1) Intel Arc GPU users can install native PyTorch with torch.xpu support using pip. More information can be found [here](https://pytorch.org/docs/main/notes/get_start_xpu.html) +Intel Arc GPU users can install native PyTorch with torch.xpu support using pip. More information can be found [here](https://pytorch.org/docs/main/notes/get_start_xpu.html) 1. To install PyTorch xpu, use the following command: @@ -252,10 +252,6 @@ This is the command to install the Pytorch xpu nightly which might have some per ```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/xpu``` -(Option 2) Alternatively, Intel GPUs supported by Intel Extension for PyTorch (IPEX) can leverage IPEX for improved performance. - -1. visit [Installation](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=gpu) for more information. - ### NVIDIA Nvidia users should install stable pytorch using this command: From 18e7d6dba5f1012d4cf09e8f777dc85d56ff25c0 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Thu, 13 Nov 2025 07:19:53 +1000 Subject: [PATCH 0863/1073] mm/mp: always unload re-used but modified models (#10724) The partial unloader path in model re-use flow skips straight to the actual unload without any check of the patching UUID. This means that if you do an upscale flow with a model patch on an existing model, it will not apply your patchings. Fix by delaying the partial_unload until after the uuid checks. This is done by making partial_unload a model of partial_load where extra_mem is -ve. --- comfy/model_management.py | 5 +---- comfy/model_patcher.py | 3 +++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index d8913082a..a21df54b3 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -503,10 +503,7 @@ class LoadedModel: use_more_vram = lowvram_model_memory if use_more_vram == 0: use_more_vram = 1e32 - if use_more_vram > 0: - self.model_use_more_vram(use_more_vram, force_patch_weights=force_patch_weights) - else: - self.model.partially_unload(self.model.offload_device, -use_more_vram, force_patch_weights=force_patch_weights) + self.model_use_more_vram(use_more_vram, force_patch_weights=force_patch_weights) real_model = self.model.model diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 68b0a9192..cf1b0d441 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -928,6 +928,9 @@ class ModelPatcher: extra_memory += (used - self.model.model_loaded_weight_memory) self.patch_model(load_weights=False) + if extra_memory < 0 and not unpatch_weights: + self.partially_unload(self.offload_device, -extra_memory, force_patch_weights=force_patch_weights) + return 0 full_load = False if self.model.model_lowvram == False and self.model.model_loaded_weight_memory > 0: self.apply_hooks(self.forced_hooks, force_apply=True) From 1c7eaeca1013e4315f36e0d4d274faa106001121 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Thu, 13 Nov 2025 07:20:53 +1000 Subject: [PATCH 0864/1073] qwen: reduce VRAM usage (#10725) Clean up a bunch of stacked and no-longer-needed tensors on the QWEN VRAM peak (currently FFN). With this I go from OOMing at B=37x1328x1328 to being able to succesfully run B=47 (RTX5090). --- comfy/ldm/qwen_image/model.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index e5d0d17c1..427ea19c1 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -236,10 +236,10 @@ class QwenImageTransformerBlock(nn.Module): img_mod1, img_mod2 = img_mod_params.chunk(2, dim=-1) txt_mod1, txt_mod2 = txt_mod_params.chunk(2, dim=-1) - img_normed = self.img_norm1(hidden_states) - img_modulated, img_gate1 = self._modulate(img_normed, img_mod1) - txt_normed = self.txt_norm1(encoder_hidden_states) - txt_modulated, txt_gate1 = self._modulate(txt_normed, txt_mod1) + img_modulated, img_gate1 = self._modulate(self.img_norm1(hidden_states), img_mod1) + del img_mod1 + txt_modulated, txt_gate1 = self._modulate(self.txt_norm1(encoder_hidden_states), txt_mod1) + del txt_mod1 img_attn_output, txt_attn_output = self.attn( hidden_states=img_modulated, @@ -248,16 +248,20 @@ class QwenImageTransformerBlock(nn.Module): image_rotary_emb=image_rotary_emb, transformer_options=transformer_options, ) + del img_modulated + del txt_modulated hidden_states = hidden_states + img_gate1 * img_attn_output encoder_hidden_states = encoder_hidden_states + txt_gate1 * txt_attn_output + del img_attn_output + del txt_attn_output + del img_gate1 + del txt_gate1 - img_normed2 = self.img_norm2(hidden_states) - img_modulated2, img_gate2 = self._modulate(img_normed2, img_mod2) + img_modulated2, img_gate2 = self._modulate(self.img_norm2(hidden_states), img_mod2) hidden_states = torch.addcmul(hidden_states, img_gate2, self.img_mlp(img_modulated2)) - txt_normed2 = self.txt_norm2(encoder_hidden_states) - txt_modulated2, txt_gate2 = self._modulate(txt_normed2, txt_mod2) + txt_modulated2, txt_gate2 = self._modulate(self.txt_norm2(encoder_hidden_states), txt_mod2) encoder_hidden_states = torch.addcmul(encoder_hidden_states, txt_gate2, self.txt_mlp(txt_modulated2)) return encoder_hidden_states, hidden_states From 8b0b93df51d04f08eb779cb84dc331fa18b43ae8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 12 Nov 2025 14:04:41 -0800 Subject: [PATCH 0865/1073] Update Python 3.14 compatibility notes in README (#10730) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9e28803a2..f51807ad5 100644 --- a/README.md +++ b/README.md @@ -200,7 +200,7 @@ comfy install ## Manual Install (Windows, Linux) -Python 3.14 will work if you comment out the `kornia` dependency in the requirements.txt file (breaks the canny node) but it is not recommended. +Python 3.14 works but you may encounter issues with the torch compile node. The free threaded variant is still missing some dependencies. Python 3.13 is very well supported. If you have trouble with some custom node dependencies on 3.13 you can try 3.12 From 3b3ef9a77ac03ed516a45063f9736f33085cecca Mon Sep 17 00:00:00 2001 From: contentis Date: Thu, 13 Nov 2025 00:26:52 +0100 Subject: [PATCH 0866/1073] Quantized Ops fixes (#10715) * offload support, bug fixes, remove mixins * add readme --- QUANTIZATION.md | 168 +++++++++++++++++++++++++++++++++++++++++++++ comfy/ops.py | 37 ++++------ comfy/quant_ops.py | 39 ++++++++++- 3 files changed, 219 insertions(+), 25 deletions(-) create mode 100644 QUANTIZATION.md diff --git a/QUANTIZATION.md b/QUANTIZATION.md new file mode 100644 index 000000000..1693e13f3 --- /dev/null +++ b/QUANTIZATION.md @@ -0,0 +1,168 @@ +# The Comfy guide to Quantization + + +## How does quantization work? + +Quantization aims to map a high-precision value x_f to a lower precision format with minimal loss in accuracy. These smaller formats then serve to reduce the models memory footprint and increase throughput by using specialized hardware. + +When simply converting a value from FP16 to FP8 using the round-nearest method we might hit two issues: +- The dynamic range of FP16 (-65,504, 65,504) far exceeds FP8 formats like E4M3 (-448, 448) or E5M2 (-57,344, 57,344), potentially resulting in clipped values +- The original values are concentrated in a small range (e.g. -1,1) leaving many FP8-bits "unused" + +By using a scaling factor, we aim to map these values into the quantized-dtype range, making use of the full spectrum. One of the easiest approaches, and common, is using per-tensor absolute-maximum scaling. + +``` +absmax = max(abs(tensor)) +scale = amax / max_dynamic_range_low_precision + +# Quantization +tensor_q = (tensor / scale).to(low_precision_dtype) + +# De-Quantization +tensor_dq = tensor_q.to(fp16) * scale + +tensor_dq ~ tensor +``` + +Given that additional information (scaling factor) is needed to "interpret" the quantized values, we describe those as derived datatypes. + + +## Quantization in Comfy + +``` +QuantizedTensor (torch.Tensor subclass) + ↓ __torch_dispatch__ +Two-Level Registry (generic + layout handlers) + ↓ +MixedPrecisionOps + Metadata Detection +``` + +### Representation + +To represent these derived datatypes, ComfyUI uses a subclass of torch.Tensor to implements these using the `QuantizedTensor` class found in `comfy/quant_ops.py` + +A `Layout` class defines how a specific quantization format behaves: +- Required parameters +- Quantize method +- De-Quantize method + +```python +from comfy.quant_ops import QuantizedLayout + +class MyLayout(QuantizedLayout): + @classmethod + def quantize(cls, tensor, **kwargs): + # Convert to quantized format + qdata = ... + params = {'scale': ..., 'orig_dtype': tensor.dtype} + return qdata, params + + @staticmethod + def dequantize(qdata, scale, orig_dtype, **kwargs): + return qdata.to(orig_dtype) * scale +``` + +To then run operations using these QuantizedTensors we use two registry systems to define supported operations. +The first is a **generic registry** that handles operations common to all quantized formats (e.g., `.to()`, `.clone()`, `.reshape()`). + +The second registry is layout-specific and allows to implement fast-paths like nn.Linear. +```python +from comfy.quant_ops import register_layout_op + +@register_layout_op(torch.ops.aten.linear.default, MyLayout) +def my_linear(func, args, kwargs): + # Extract tensors, call optimized kernel + ... +``` +When `torch.nn.functional.linear()` is called with QuantizedTensor arguments, `__torch_dispatch__` automatically routes to the registered implementation. +For any unsupported operation, QuantizedTensor will fallback to call `dequantize` and dispatch using the high-precision implementation. + + +### Mixed Precision + +The `MixedPrecisionOps` class (lines 542-648 in `comfy/ops.py`) enables per-layer quantization decisions, allowing different layers in a model to use different precisions. This is activated when a model config contains a `layer_quant_config` dictionary that specifies which layers should be quantized and how. + +**Architecture:** + +```python +class MixedPrecisionOps(disable_weight_init): + _layer_quant_config = {} # Maps layer names to quantization configs + _compute_dtype = torch.bfloat16 # Default compute / dequantize precision +``` + +**Key mechanism:** + +The custom `Linear._load_from_state_dict()` method inspects each layer during model loading: +- If the layer name is **not** in `_layer_quant_config`: load weight as regular tensor in `_compute_dtype` +- If the layer name **is** in `_layer_quant_config`: + - Load weight as `QuantizedTensor` with the specified layout (e.g., `TensorCoreFP8Layout`) + - Load associated quantization parameters (scales, block_size, etc.) + +**Why it's needed:** + +Not all layers tolerate quantization equally. Sensitive operations like final projections can be kept in higher precision, while compute-heavy matmuls are quantized. This provides most of the performance benefits while maintaining quality. + +The system is selected in `pick_operations()` when `model_config.layer_quant_config` is present, making it the highest-priority operation mode. + + +## Checkpoint Format + +Quantized checkpoints are stored as standard safetensors files with quantized weight tensors and associated scaling parameters, plus a `_quantization_metadata` JSON entry describing the quantization scheme. + +The quantized checkpoint will contain the same layers as the original checkpoint but: +- The weights are stored as quantized values, sometimes using a different storage datatype. E.g. uint8 container for fp8. +- For each quantized weight a number of additional scaling parameters are stored alongside depending on the recipe. +- We store a metadata.json in the metadata of the final safetensor containing the `_quantization_metadata` describing which layers are quantized and what layout has been used. + +### Scaling Parameters details +We define 4 possible scaling parameters that should cover most recipes in the near-future: +- **weight_scale**: quantization scalers for the weights +- **weight_scale_2**: global scalers in the context of double scaling +- **pre_quant_scale**: scalers used for smoothing salient weights +- **input_scale**: quantization scalers for the activations + +| Format | Storage dtype | weight_scale | weight_scale_2 | pre_quant_scale | input_scale | +|--------|---------------|--------------|----------------|-----------------|-------------| +| float8_e4m3fn | float32 | float32 (scalar) | - | - | float32 (scalar) | + +You can find the defined formats in `comfy/quant_ops.py` (QUANT_ALGOS). + +### Quantization Metadata + +The metadata stored alongside the checkpoint contains: +- **format_version**: String to define a version of the standard +- **layers**: A dictionary mapping layer names to their quantization format. The format string maps to the definitions found in `QUANT_ALGOS`. + +Example: +```json +{ + "_quantization_metadata": { + "format_version": "1.0", + "layers": { + "model.layers.0.mlp.up_proj": "float8_e4m3fn", + "model.layers.0.mlp.down_proj": "float8_e4m3fn", + "model.layers.1.mlp.up_proj": "float8_e4m3fn" + } + } +} +``` + + +## Creating Quantized Checkpoints + +To create compatible checkpoints, use any quantization tool provided the output follows the checkpoint format described above and uses a layout defined in `QUANT_ALGOS`. + +### Weight Quantization + +Weight quantization is straightforward - compute the scaling factor directly from the weight tensor using the absolute maximum method described earlier. Each layer's weights are quantized independently and stored with their corresponding `weight_scale` parameter. + +### Calibration (for Activation Quantization) + +Activation quantization (e.g., for FP8 Tensor Core operations) requires `input_scale` parameters that cannot be determined from static weights alone. Since activation values depend on actual inputs, we use **post-training calibration (PTQ)**: + +1. **Collect statistics**: Run inference on N representative samples +2. **Track activations**: Record the absolute maximum (`amax`) of inputs to each quantized layer +3. **Compute scales**: Derive `input_scale` from collected statistics +4. **Store in checkpoint**: Save `input_scale` parameters alongside weights + +The calibration dataset should be representative of your target use case. For diffusion models, this typically means a diverse set of prompts and generation parameters. \ No newline at end of file diff --git a/comfy/ops.py b/comfy/ops.py index 96dffa85d..2a90a5ba2 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -77,7 +77,10 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, of # will add async-offload support to your cast and improve performance. if input is not None: if dtype is None: - dtype = input.dtype + if isinstance(input, QuantizedTensor): + dtype = input._layout_params["orig_dtype"] + else: + dtype = input.dtype if bias_dtype is None: bias_dtype = dtype if device is None: @@ -534,18 +537,7 @@ if CUBLAS_IS_AVAILABLE: # ============================================================================== # Mixed Precision Operations # ============================================================================== -from .quant_ops import QuantizedTensor - -QUANT_FORMAT_MIXINS = { - "float8_e4m3fn": { - "dtype": torch.float8_e4m3fn, - "layout_type": "TensorCoreFP8Layout", - "parameters": { - "weight_scale": torch.nn.Parameter(torch.zeros((), dtype=torch.float32), requires_grad=False), - "input_scale": torch.nn.Parameter(torch.zeros((), dtype=torch.float32), requires_grad=False), - } - } -} +from .quant_ops import QuantizedTensor, QUANT_ALGOS class MixedPrecisionOps(disable_weight_init): _layer_quant_config = {} @@ -596,23 +588,24 @@ class MixedPrecisionOps(disable_weight_init): if quant_format is None: raise ValueError(f"Unknown quantization format for layer {layer_name}") - mixin = QUANT_FORMAT_MIXINS[quant_format] - self.layout_type = mixin["layout_type"] + qconfig = QUANT_ALGOS[quant_format] + self.layout_type = qconfig["comfy_tensor_layout"] - scale_key = f"{prefix}weight_scale" + weight_scale_key = f"{prefix}weight_scale" layout_params = { - 'scale': state_dict.pop(scale_key, None), - 'orig_dtype': MixedPrecisionOps._compute_dtype + 'scale': state_dict.pop(weight_scale_key, None), + 'orig_dtype': MixedPrecisionOps._compute_dtype, + 'block_size': qconfig.get("group_size", None), } if layout_params['scale'] is not None: - manually_loaded_keys.append(scale_key) + manually_loaded_keys.append(weight_scale_key) self.weight = torch.nn.Parameter( - QuantizedTensor(weight.to(device=device, dtype=mixin["dtype"]), self.layout_type, layout_params), + QuantizedTensor(weight.to(device=device), self.layout_type, layout_params), requires_grad=False ) - for param_name, param_value in mixin["parameters"].items(): + for param_name in qconfig["parameters"]: param_key = f"{prefix}{param_name}" _v = state_dict.pop(param_key, None) if _v is None: @@ -643,7 +636,7 @@ class MixedPrecisionOps(disable_weight_init): if (getattr(self, 'layout_type', None) is not None and getattr(self, 'input_scale', None) is not None and not isinstance(input, QuantizedTensor)): - input = QuantizedTensor.from_float(input, self.layout_type, scale=self.input_scale, fp8_dtype=self.weight.dtype) + input = QuantizedTensor.from_float(input, self.layout_type, scale=self.input_scale, dtype=self.weight.dtype) return self._forward(input, self.weight, self.bias) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index c56e32a73..1d058bece 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -74,6 +74,12 @@ def _copy_layout_params(params): new_params[k] = v return new_params +def _copy_layout_params_inplace(src, dst, non_blocking=False): + for k, v in src.items(): + if isinstance(v, torch.Tensor): + dst[k].copy_(v, non_blocking=non_blocking) + else: + dst[k] = v class QuantizedLayout: """ @@ -318,13 +324,13 @@ def generic_to_dtype_layout(func, args, kwargs): def generic_copy_(func, args, kwargs): qt_dest = args[0] src = args[1] - + non_blocking = args[2] if len(args) > 2 else False if isinstance(qt_dest, QuantizedTensor): if isinstance(src, QuantizedTensor): # Copy from another quantized tensor - qt_dest._qdata.copy_(src._qdata) + qt_dest._qdata.copy_(src._qdata, non_blocking=non_blocking) qt_dest._layout_type = src._layout_type - qt_dest._layout_params = _copy_layout_params(src._layout_params) + _copy_layout_params_inplace(src._layout_params, qt_dest._layout_params, non_blocking=non_blocking) else: # Copy from regular tensor - just copy raw data qt_dest._qdata.copy_(src) @@ -336,6 +342,26 @@ def generic_copy_(func, args, kwargs): def generic_has_compatible_shallow_copy_type(func, args, kwargs): return True + +@register_generic_util(torch.ops.aten.empty_like.default) +def generic_empty_like(func, args, kwargs): + """Empty_like operation - creates an empty tensor with the same quantized structure.""" + qt = args[0] + if isinstance(qt, QuantizedTensor): + # Create empty tensor with same shape and dtype as the quantized data + hp_dtype = kwargs.pop('dtype', qt._layout_params["orig_dtype"]) + new_qdata = torch.empty_like(qt._qdata, **kwargs) + + # Handle device transfer for layout params + target_device = kwargs.get('device', new_qdata.device) + new_params = _move_layout_params_to_device(qt._layout_params, target_device) + + # Update orig_dtype if dtype is specified + new_params['orig_dtype'] = hp_dtype + + return QuantizedTensor(new_qdata, qt._layout_type, new_params) + return func(*args, **kwargs) + # ============================================================================== # FP8 Layout + Operation Handlers # ============================================================================== @@ -378,6 +404,13 @@ class TensorCoreFP8Layout(QuantizedLayout): def get_plain_tensors(cls, qtensor): return qtensor._qdata, qtensor._layout_params['scale'] +QUANT_ALGOS = { + "float8_e4m3fn": { + "storage_t": torch.float8_e4m3fn, + "parameters": {"weight_scale", "input_scale"}, + "comfy_tensor_layout": "TensorCoreFP8Layout", + }, +} LAYOUTS = { "TensorCoreFP8Layout": TensorCoreFP8Layout, From f91078b1ffa484c424f78814f54de4d5846e4daa Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 13 Nov 2025 20:05:26 +0200 Subject: [PATCH 0867/1073] add PR template for API-Nodes (#10736) --- .github/PULL_REQUEST_TEMPLATE/api-node.md | 21 ++++++++ .github/workflows/api-node-template.yml | 58 +++++++++++++++++++++++ 2 files changed, 79 insertions(+) create mode 100644 .github/PULL_REQUEST_TEMPLATE/api-node.md create mode 100644 .github/workflows/api-node-template.yml diff --git a/.github/PULL_REQUEST_TEMPLATE/api-node.md b/.github/PULL_REQUEST_TEMPLATE/api-node.md new file mode 100644 index 000000000..f62744878 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/api-node.md @@ -0,0 +1,21 @@ + + +## API Node PR Checklist + +### Scope +- [ ] **Is API Node Change** + +### Pricing & Billing +- [ ] **Need pricing update** +- [ ] **No pricing update** + +If **Need pricing update**: +- [ ] Metronome rate cards updated +- [ ] Auto‑billing tests updated and passing + +### QA +- [ ] **QA done** +- [ ] **QA not required** + +### Comms +- [ ] Informed **@Kosinkadink** diff --git a/.github/workflows/api-node-template.yml b/.github/workflows/api-node-template.yml new file mode 100644 index 000000000..0775f9979 --- /dev/null +++ b/.github/workflows/api-node-template.yml @@ -0,0 +1,58 @@ +name: Append API Node PR template + +on: + pull_request_target: + types: [opened, reopened, synchronize, edited, ready_for_review] + paths: + - 'comfy_api_nodes/**' # only run if these files changed + +permissions: + contents: read + pull-requests: write + +jobs: + inject: + runs-on: ubuntu-latest + steps: + - name: Ensure template exists and append to PR body + uses: actions/github-script@v7 + with: + script: | + const { owner, repo } = context.repo; + const number = context.payload.pull_request.number; + const templatePath = '.github/PULL_REQUEST_TEMPLATE/api-node.md'; + const marker = ''; + + const { data: pr } = await github.rest.pulls.get({ owner, repo, pull_number: number }); + + let templateText; + try { + const res = await github.rest.repos.getContent({ + owner, + repo, + path: templatePath, + ref: pr.base.ref + }); + const buf = Buffer.from(res.data.content, res.data.encoding || 'base64'); + templateText = buf.toString('utf8'); + } catch (e) { + core.setFailed(`Required PR template not found at "${templatePath}" on ${pr.base.ref}. Please add it to the repo.`); + return; + } + + // Enforce the presence of the marker inside the template (for idempotence) + if (!templateText.includes(marker)) { + core.setFailed(`Template at "${templatePath}" does not contain the required marker:\n${marker}\nAdd it so we can detect duplicates safely.`); + return; + } + + // If the PR already contains the marker, do not append again. + const body = pr.body || ''; + if (body.includes(marker)) { + core.info('Template already present in PR body; nothing to inject.'); + return; + } + + const newBody = (body ? body + '\n\n' : '') + templateText + '\n'; + await github.rest.pulls.update({ owner, repo, pull_number: number, body: newBody }); + core.notice('API Node template appended to PR description.'); From 2fde9597f4b02c5f06c1a5ceb3ca2fa6d74966ec Mon Sep 17 00:00:00 2001 From: ric-yu Date: Thu, 13 Nov 2025 15:11:52 -0800 Subject: [PATCH 0868/1073] feat: add create_time dict to prompt field in /history and /queue (#10741) --- server.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server.py b/server.py index 5d773b10a..d059d3dc9 100644 --- a/server.py +++ b/server.py @@ -2,6 +2,7 @@ import os import sys import asyncio import traceback +import time import nodes import folder_paths @@ -733,6 +734,7 @@ class PromptServer(): for sensitive_val in execution.SENSITIVE_EXTRA_DATA_KEYS: if sensitive_val in extra_data: sensitive[sensitive_val] = extra_data.pop(sensitive_val) + extra_data["create_time"] = int(time.time() * 1000) # timestamp in milliseconds self.prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute, sensitive)) response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]} return web.json_response(response) From 94c298f9625b0fd9af8ea07a73075fdefe0d9e57 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Fri, 14 Nov 2025 10:02:03 +1000 Subject: [PATCH 0869/1073] flux: reduce VRAM usage (#10737) Cleanup a bunch of stack tensors on Flux. This take me from B=19 to B=22 for 1600x1600 on RTX5090. --- comfy/ldm/flux/layers.py | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/comfy/ldm/flux/layers.py b/comfy/ldm/flux/layers.py index a3eab0470..f4bf56e01 100644 --- a/comfy/ldm/flux/layers.py +++ b/comfy/ldm/flux/layers.py @@ -167,39 +167,55 @@ class DoubleStreamBlock(nn.Module): img_modulated = self.img_norm1(img) img_modulated = apply_mod(img_modulated, (1 + img_mod1.scale), img_mod1.shift, modulation_dims_img) img_qkv = self.img_attn.qkv(img_modulated) + del img_modulated img_q, img_k, img_v = img_qkv.view(img_qkv.shape[0], img_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + del img_qkv img_q, img_k = self.img_attn.norm(img_q, img_k, img_v) # prepare txt for attention txt_modulated = self.txt_norm1(txt) txt_modulated = apply_mod(txt_modulated, (1 + txt_mod1.scale), txt_mod1.shift, modulation_dims_txt) txt_qkv = self.txt_attn.qkv(txt_modulated) + del txt_modulated txt_q, txt_k, txt_v = txt_qkv.view(txt_qkv.shape[0], txt_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + del txt_qkv txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v) if self.flipped_img_txt: + q = torch.cat((img_q, txt_q), dim=2) + del img_q, txt_q + k = torch.cat((img_k, txt_k), dim=2) + del img_k, txt_k + v = torch.cat((img_v, txt_v), dim=2) + del img_v, txt_v # run actual attention - attn = attention(torch.cat((img_q, txt_q), dim=2), - torch.cat((img_k, txt_k), dim=2), - torch.cat((img_v, txt_v), dim=2), + attn = attention(q, k, v, pe=pe, mask=attn_mask, transformer_options=transformer_options) + del q, k, v img_attn, txt_attn = attn[:, : img.shape[1]], attn[:, img.shape[1]:] else: + q = torch.cat((txt_q, img_q), dim=2) + del txt_q, img_q + k = torch.cat((txt_k, img_k), dim=2) + del txt_k, img_k + v = torch.cat((txt_v, img_v), dim=2) + del txt_v, img_v # run actual attention - attn = attention(torch.cat((txt_q, img_q), dim=2), - torch.cat((txt_k, img_k), dim=2), - torch.cat((txt_v, img_v), dim=2), + attn = attention(q, k, v, pe=pe, mask=attn_mask, transformer_options=transformer_options) + del q, k, v txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1]:] # calculate the img bloks img += apply_mod(self.img_attn.proj(img_attn), img_mod1.gate, None, modulation_dims_img) + del img_attn img += apply_mod(self.img_mlp(apply_mod(self.img_norm2(img), (1 + img_mod2.scale), img_mod2.shift, modulation_dims_img)), img_mod2.gate, None, modulation_dims_img) # calculate the txt bloks txt += apply_mod(self.txt_attn.proj(txt_attn), txt_mod1.gate, None, modulation_dims_txt) + del txt_attn txt += apply_mod(self.txt_mlp(apply_mod(self.txt_norm2(txt), (1 + txt_mod2.scale), txt_mod2.shift, modulation_dims_txt)), txt_mod2.gate, None, modulation_dims_txt) if txt.dtype == torch.float16: @@ -249,12 +265,15 @@ class SingleStreamBlock(nn.Module): qkv, mlp = torch.split(self.linear1(apply_mod(self.pre_norm(x), (1 + mod.scale), mod.shift, modulation_dims)), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1) q, k, v = qkv.view(qkv.shape[0], qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + del qkv q, k = self.norm(q, k, v) # compute attention attn = attention(q, k, v, pe=pe, mask=attn_mask, transformer_options=transformer_options) + del q, k, v # compute activation in mlp stream, cat again and run second linear layer - output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2)) + mlp = self.mlp_act(mlp) + output = self.linear2(torch.cat((attn, mlp), 2)) x += apply_mod(output, mod.gate, None, modulation_dims) if x.dtype == torch.float16: x = torch.nan_to_num(x, nan=0.0, posinf=65504, neginf=-65504) From 1ef328c007a419c2c429df0f80532cc11579dc97 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 13 Nov 2025 18:32:39 -0800 Subject: [PATCH 0870/1073] Better instructions for the portable. (#10743) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f51807ad5..cd8273b0d 100644 --- a/README.md +++ b/README.md @@ -173,7 +173,7 @@ There is a portable standalone build for Windows that should work for running on ### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_nvidia.7z) -Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you put your Stable Diffusion checkpoints/models (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints +Simply download, extract with [7-Zip](https://7-zip.org) or with the windows explorer on recent windows versions and run. For smaller models you normally only need to put the checkpoints (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints but many of the larger models have multiple files. Make sure to follow the instructions to know which subfolder to put them in ComfyUI\models\ If you have trouble extracting it, right click the file -> properties -> unblock From f60923590c3f2fd05e166e2ec57968aaf7007dd0 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 13 Nov 2025 22:28:05 -0800 Subject: [PATCH 0871/1073] Use same code for chroma and flux blocks so that optimizations are shared. (#10746) --- comfy/ldm/chroma/layers.py | 121 ----------------------------- comfy/ldm/chroma/model.py | 7 +- comfy/ldm/chroma_radiance/model.py | 7 +- comfy/ldm/flux/layers.py | 31 ++++++-- 4 files changed, 31 insertions(+), 135 deletions(-) diff --git a/comfy/ldm/chroma/layers.py b/comfy/ldm/chroma/layers.py index fc7110cce..9f4ad5bd2 100644 --- a/comfy/ldm/chroma/layers.py +++ b/comfy/ldm/chroma/layers.py @@ -1,12 +1,9 @@ import torch from torch import Tensor, nn -from comfy.ldm.flux.math import attention from comfy.ldm.flux.layers import ( MLPEmbedder, RMSNorm, - QKNorm, - SelfAttention, ModulationOut, ) @@ -48,124 +45,6 @@ class Approximator(nn.Module): return x -class DoubleStreamBlock(nn.Module): - def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False, flipped_img_txt=False, dtype=None, device=None, operations=None): - super().__init__() - - mlp_hidden_dim = int(hidden_size * mlp_ratio) - self.num_heads = num_heads - self.hidden_size = hidden_size - self.img_norm1 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias, dtype=dtype, device=device, operations=operations) - - self.img_norm2 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - self.img_mlp = nn.Sequential( - operations.Linear(hidden_size, mlp_hidden_dim, bias=True, dtype=dtype, device=device), - nn.GELU(approximate="tanh"), - operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), - ) - - self.txt_norm1 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias, dtype=dtype, device=device, operations=operations) - - self.txt_norm2 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - self.txt_mlp = nn.Sequential( - operations.Linear(hidden_size, mlp_hidden_dim, bias=True, dtype=dtype, device=device), - nn.GELU(approximate="tanh"), - operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), - ) - self.flipped_img_txt = flipped_img_txt - - def forward(self, img: Tensor, txt: Tensor, pe: Tensor, vec: Tensor, attn_mask=None, transformer_options={}): - (img_mod1, img_mod2), (txt_mod1, txt_mod2) = vec - - # prepare image for attention - img_modulated = torch.addcmul(img_mod1.shift, 1 + img_mod1.scale, self.img_norm1(img)) - img_qkv = self.img_attn.qkv(img_modulated) - img_q, img_k, img_v = img_qkv.view(img_qkv.shape[0], img_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) - img_q, img_k = self.img_attn.norm(img_q, img_k, img_v) - - # prepare txt for attention - txt_modulated = torch.addcmul(txt_mod1.shift, 1 + txt_mod1.scale, self.txt_norm1(txt)) - txt_qkv = self.txt_attn.qkv(txt_modulated) - txt_q, txt_k, txt_v = txt_qkv.view(txt_qkv.shape[0], txt_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) - txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v) - - # run actual attention - attn = attention(torch.cat((txt_q, img_q), dim=2), - torch.cat((txt_k, img_k), dim=2), - torch.cat((txt_v, img_v), dim=2), - pe=pe, mask=attn_mask, transformer_options=transformer_options) - - txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :] - - # calculate the img bloks - img.addcmul_(img_mod1.gate, self.img_attn.proj(img_attn)) - img.addcmul_(img_mod2.gate, self.img_mlp(torch.addcmul(img_mod2.shift, 1 + img_mod2.scale, self.img_norm2(img)))) - - # calculate the txt bloks - txt.addcmul_(txt_mod1.gate, self.txt_attn.proj(txt_attn)) - txt.addcmul_(txt_mod2.gate, self.txt_mlp(torch.addcmul(txt_mod2.shift, 1 + txt_mod2.scale, self.txt_norm2(txt)))) - - if txt.dtype == torch.float16: - txt = torch.nan_to_num(txt, nan=0.0, posinf=65504, neginf=-65504) - - return img, txt - - -class SingleStreamBlock(nn.Module): - """ - A DiT block with parallel linear layers as described in - https://arxiv.org/abs/2302.05442 and adapted modulation interface. - """ - - def __init__( - self, - hidden_size: int, - num_heads: int, - mlp_ratio: float = 4.0, - qk_scale: float = None, - dtype=None, - device=None, - operations=None - ): - super().__init__() - self.hidden_dim = hidden_size - self.num_heads = num_heads - head_dim = hidden_size // num_heads - self.scale = qk_scale or head_dim**-0.5 - - self.mlp_hidden_dim = int(hidden_size * mlp_ratio) - # qkv and mlp_in - self.linear1 = operations.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim, dtype=dtype, device=device) - # proj and mlp_out - self.linear2 = operations.Linear(hidden_size + self.mlp_hidden_dim, hidden_size, dtype=dtype, device=device) - - self.norm = QKNorm(head_dim, dtype=dtype, device=device, operations=operations) - - self.hidden_size = hidden_size - self.pre_norm = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - - self.mlp_act = nn.GELU(approximate="tanh") - - def forward(self, x: Tensor, pe: Tensor, vec: Tensor, attn_mask=None, transformer_options={}) -> Tensor: - mod = vec - x_mod = torch.addcmul(mod.shift, 1 + mod.scale, self.pre_norm(x)) - qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1) - - q, k, v = qkv.view(qkv.shape[0], qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) - q, k = self.norm(q, k, v) - - # compute attention - attn = attention(q, k, v, pe=pe, mask=attn_mask, transformer_options=transformer_options) - # compute activation in mlp stream, cat again and run second linear layer - output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2)) - x.addcmul_(mod.gate, output) - if x.dtype == torch.float16: - x = torch.nan_to_num(x, nan=0.0, posinf=65504, neginf=-65504) - return x - - class LastLayer(nn.Module): def __init__(self, hidden_size: int, patch_size: int, out_channels: int, dtype=None, device=None, operations=None): super().__init__() diff --git a/comfy/ldm/chroma/model.py b/comfy/ldm/chroma/model.py index ad1c523fe..67bf70eb1 100644 --- a/comfy/ldm/chroma/model.py +++ b/comfy/ldm/chroma/model.py @@ -11,12 +11,12 @@ import comfy.ldm.common_dit from comfy.ldm.flux.layers import ( EmbedND, timestep_embedding, + DoubleStreamBlock, + SingleStreamBlock, ) from .layers import ( - DoubleStreamBlock, LastLayer, - SingleStreamBlock, Approximator, ChromaModulationOut, ) @@ -90,6 +90,7 @@ class Chroma(nn.Module): self.num_heads, mlp_ratio=params.mlp_ratio, qkv_bias=params.qkv_bias, + modulation=False, dtype=dtype, device=device, operations=operations ) for _ in range(params.depth) @@ -98,7 +99,7 @@ class Chroma(nn.Module): self.single_blocks = nn.ModuleList( [ - SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio, dtype=dtype, device=device, operations=operations) + SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio, modulation=False, dtype=dtype, device=device, operations=operations) for _ in range(params.depth_single_blocks) ] ) diff --git a/comfy/ldm/chroma_radiance/model.py b/comfy/ldm/chroma_radiance/model.py index 7d7be80f5..e643b4414 100644 --- a/comfy/ldm/chroma_radiance/model.py +++ b/comfy/ldm/chroma_radiance/model.py @@ -10,12 +10,10 @@ from torch import Tensor, nn from einops import repeat import comfy.ldm.common_dit -from comfy.ldm.flux.layers import EmbedND +from comfy.ldm.flux.layers import EmbedND, DoubleStreamBlock, SingleStreamBlock from comfy.ldm.chroma.model import Chroma, ChromaParams from comfy.ldm.chroma.layers import ( - DoubleStreamBlock, - SingleStreamBlock, Approximator, ) from .layers import ( @@ -89,7 +87,6 @@ class ChromaRadiance(Chroma): dtype=dtype, device=device, operations=operations ) - self.double_blocks = nn.ModuleList( [ DoubleStreamBlock( @@ -97,6 +94,7 @@ class ChromaRadiance(Chroma): self.num_heads, mlp_ratio=params.mlp_ratio, qkv_bias=params.qkv_bias, + modulation=False, dtype=dtype, device=device, operations=operations ) for _ in range(params.depth) @@ -109,6 +107,7 @@ class ChromaRadiance(Chroma): self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio, + modulation=False, dtype=dtype, device=device, operations=operations, ) for _ in range(params.depth_single_blocks) diff --git a/comfy/ldm/flux/layers.py b/comfy/ldm/flux/layers.py index f4bf56e01..23150a712 100644 --- a/comfy/ldm/flux/layers.py +++ b/comfy/ldm/flux/layers.py @@ -130,13 +130,17 @@ def apply_mod(tensor, m_mult, m_add=None, modulation_dims=None): class DoubleStreamBlock(nn.Module): - def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False, flipped_img_txt=False, dtype=None, device=None, operations=None): + def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False, flipped_img_txt=False, modulation=True, dtype=None, device=None, operations=None): super().__init__() mlp_hidden_dim = int(hidden_size * mlp_ratio) self.num_heads = num_heads self.hidden_size = hidden_size - self.img_mod = Modulation(hidden_size, double=True, dtype=dtype, device=device, operations=operations) + self.modulation = modulation + + if self.modulation: + self.img_mod = Modulation(hidden_size, double=True, dtype=dtype, device=device, operations=operations) + self.img_norm1 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias, dtype=dtype, device=device, operations=operations) @@ -147,7 +151,9 @@ class DoubleStreamBlock(nn.Module): operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), ) - self.txt_mod = Modulation(hidden_size, double=True, dtype=dtype, device=device, operations=operations) + if self.modulation: + self.txt_mod = Modulation(hidden_size, double=True, dtype=dtype, device=device, operations=operations) + self.txt_norm1 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias, dtype=dtype, device=device, operations=operations) @@ -160,8 +166,11 @@ class DoubleStreamBlock(nn.Module): self.flipped_img_txt = flipped_img_txt def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor, attn_mask=None, modulation_dims_img=None, modulation_dims_txt=None, transformer_options={}): - img_mod1, img_mod2 = self.img_mod(vec) - txt_mod1, txt_mod2 = self.txt_mod(vec) + if self.modulation: + img_mod1, img_mod2 = self.img_mod(vec) + txt_mod1, txt_mod2 = self.txt_mod(vec) + else: + (img_mod1, img_mod2), (txt_mod1, txt_mod2) = vec # prepare image for attention img_modulated = self.img_norm1(img) @@ -236,6 +245,7 @@ class SingleStreamBlock(nn.Module): num_heads: int, mlp_ratio: float = 4.0, qk_scale: float = None, + modulation=True, dtype=None, device=None, operations=None @@ -258,10 +268,17 @@ class SingleStreamBlock(nn.Module): self.pre_norm = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) self.mlp_act = nn.GELU(approximate="tanh") - self.modulation = Modulation(hidden_size, double=False, dtype=dtype, device=device, operations=operations) + if modulation: + self.modulation = Modulation(hidden_size, double=False, dtype=dtype, device=device, operations=operations) + else: + self.modulation = None def forward(self, x: Tensor, vec: Tensor, pe: Tensor, attn_mask=None, modulation_dims=None, transformer_options={}) -> Tensor: - mod, _ = self.modulation(vec) + if self.modulation: + mod, _ = self.modulation(vec) + else: + mod = vec + qkv, mlp = torch.split(self.linear1(apply_mod(self.pre_norm(x), (1 + mod.scale), mod.shift, modulation_dims)), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1) q, k, v = qkv.view(qkv.shape[0], qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) From 443056c401c53953bb8eee6da71b9ad29afe2581 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 14 Nov 2025 00:26:05 -0800 Subject: [PATCH 0872/1073] Fix custom nodes import error. (#10747) This should fix the import errors but will break if the custom nodes actually try to use the class. --- comfy/ldm/chroma/layers.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/ldm/chroma/layers.py b/comfy/ldm/chroma/layers.py index 9f4ad5bd2..2d5684348 100644 --- a/comfy/ldm/chroma/layers.py +++ b/comfy/ldm/chroma/layers.py @@ -7,6 +7,9 @@ from comfy.ldm.flux.layers import ( ModulationOut, ) +# TODO: remove this in a few months +SingleStreamBlock = None +DoubleStreamBlock = None class ChromaModulationOut(ModulationOut): From bd01d9f7fd241a45bd08b60dfedbe78577383cc4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 15 Nov 2025 03:54:40 -0800 Subject: [PATCH 0873/1073] Add left padding support to tokenizers. (#10753) --- comfy/sd1_clip.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index f8a7c2a1b..3066de2d7 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -460,7 +460,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No return embed_out class SDTokenizer: - def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, min_padding=None, tokenizer_data={}, tokenizer_args={}): + def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, min_padding=None, pad_left=False, tokenizer_data={}, tokenizer_args={}): if tokenizer_path is None: tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer") self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, **tokenizer_args) @@ -468,6 +468,7 @@ class SDTokenizer: self.min_length = tokenizer_data.get("{}_min_length".format(embedding_key), min_length) self.end_token = None self.min_padding = min_padding + self.pad_left = pad_left empty = self.tokenizer('')["input_ids"] self.tokenizer_adds_end_token = has_end_token @@ -522,6 +523,12 @@ class SDTokenizer: return (embed, "{} {}".format(embedding_name[len(stripped):], leftover)) return (embed, leftover) + def pad_tokens(self, tokens, amount): + if self.pad_left: + for i in range(amount): + tokens.insert(0, (self.pad_token, 1.0, 0)) + else: + tokens.extend([(self.pad_token, 1.0, 0)] * amount) def tokenize_with_weights(self, text:str, return_word_ids=False, tokenizer_options={}, **kwargs): ''' @@ -600,7 +607,7 @@ class SDTokenizer: if self.end_token is not None: batch.append((self.end_token, 1.0, 0)) if self.pad_to_max_length: - batch.extend([(self.pad_token, 1.0, 0)] * (remaining_length)) + self.pad_tokens(batch, remaining_length) #start new batch batch = [] if self.start_token is not None: @@ -614,11 +621,11 @@ class SDTokenizer: if self.end_token is not None: batch.append((self.end_token, 1.0, 0)) if min_padding is not None: - batch.extend([(self.pad_token, 1.0, 0)] * min_padding) + self.pad_tokens(batch, min_padding) if self.pad_to_max_length and len(batch) < self.max_length: - batch.extend([(self.pad_token, 1.0, 0)] * (self.max_length - len(batch))) + self.pad_tokens(batch, self.max_length - len(batch)) if min_length is not None and len(batch) < min_length: - batch.extend([(self.pad_token, 1.0, 0)] * (min_length - len(batch))) + self.pad_tokens(batch, min_length - len(batch)) if not return_word_ids: batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens] From 9a0238256873711bd38ce0e0b1d15a617a1ee454 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 15 Nov 2025 21:18:49 +0200 Subject: [PATCH 0874/1073] chore(api-nodes): mark OpenAIDalle2 and OpenAIDalle3 nodes as deprecated (#10757) --- comfy_api_nodes/nodes_openai.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index acf35d276..e08bec08c 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -169,6 +169,7 @@ class OpenAIDalle2(IO.ComfyNode): IO.Hidden.unique_id, ], is_api_node=True, + is_deprecated=True, ) @classmethod @@ -299,6 +300,7 @@ class OpenAIDalle3(IO.ComfyNode): IO.Hidden.unique_id, ], is_api_node=True, + is_deprecated=True, ) @classmethod From 2d4a08b717c492fa45e98bd70beb48d4e77cb464 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 15 Nov 2025 22:37:34 +0200 Subject: [PATCH 0875/1073] Revert "chore(api-nodes): mark OpenAIDalle2 and OpenAIDalle3 nodes as deprecated (#10757)" (#10759) This reverts commit 9a0238256873711bd38ce0e0b1d15a617a1ee454. --- comfy_api_nodes/nodes_openai.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index e08bec08c..acf35d276 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -169,7 +169,6 @@ class OpenAIDalle2(IO.ComfyNode): IO.Hidden.unique_id, ], is_api_node=True, - is_deprecated=True, ) @classmethod @@ -300,7 +299,6 @@ class OpenAIDalle3(IO.ComfyNode): IO.Hidden.unique_id, ], is_api_node=True, - is_deprecated=True, ) @classmethod From 7d6103325e1c97aa54f963253e3e7f1d6da6947f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 16 Nov 2025 00:01:14 -0800 Subject: [PATCH 0876/1073] Change ROCm nightly install command to 7.1 (#10764) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cd8273b0d..c0384099d 100644 --- a/README.md +++ b/README.md @@ -221,7 +221,7 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins This is the command to install the nightly with ROCm 7.0 which might have some performance improvements: -```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm7.0``` +```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm7.1``` ### AMD GPUs (Experimental: Windows and Linux), RDNA 3, 3.5 and 4 only. From 3d0003c24c1aec9f0c021dbc70ffb7cd8cf0685c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 17 Nov 2025 17:17:24 -0500 Subject: [PATCH 0877/1073] ComfyUI version 0.3.69 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 25d1a4157..1e554eb9f 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.68" +__version__ = "0.3.69" diff --git a/pyproject.toml b/pyproject.toml index 79ff3f74a..63778286f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.68" +version = "0.3.69" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 27cbac865ec226cfd9c1563327b0d62cf5dbd484 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 17 Nov 2025 16:04:04 -0800 Subject: [PATCH 0878/1073] Add release workflow for NVIDIA cu126 (#10777) --- .github/workflows/release-stable-all.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.github/workflows/release-stable-all.yml b/.github/workflows/release-stable-all.yml index 7dca7277b..f7de3a7c3 100644 --- a/.github/workflows/release-stable-all.yml +++ b/.github/workflows/release-stable-all.yml @@ -43,6 +43,23 @@ jobs: test_release: true secrets: inherit + release_nvidia_cu126: + permissions: + contents: "write" + packages: "write" + pull-requests: "read" + name: "Release NVIDIA cu126" + uses: ./.github/workflows/stable-release.yml + with: + git_tag: ${{ inputs.git_tag }} + cache_tag: "cu126" + python_minor: "12" + python_patch: "10" + rel_name: "nvidia" + rel_extra_name: "_cu126" + test_release: true + secrets: inherit + release_amd_rocm: permissions: contents: "write" From f41e5f398d5d4059a3c87cf157bd932afcce3c0d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 17 Nov 2025 16:59:19 -0800 Subject: [PATCH 0879/1073] Update README with new portable download link (#10778) --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c0384099d..323dfc587 100644 --- a/README.md +++ b/README.md @@ -183,7 +183,9 @@ Update your Nvidia drivers if it doesn't start. [Experimental portable for AMD GPUs](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_amd.7z) -[Portable with pytorch cuda 12.8 and python 3.12](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_nvidia_cu128.7z) (Supports Nvidia 10 series and older GPUs). +[Portable with pytorch cuda 12.8 and python 3.12](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_nvidia_cu128.7z). + +[Portable with pytorch cuda 12.6 and python 3.12](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_nvidia_cu128.7z) (Supports Nvidia 10 series and older GPUs). #### How do I share models between another UI and ComfyUI? From fdf49a28617f742d746ad209e57ed7420b3535dc Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 18 Nov 2025 11:04:06 +0800 Subject: [PATCH 0880/1073] Fix the portable download link for CUDA 12.6 (#10780) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 323dfc587..28beec427 100644 --- a/README.md +++ b/README.md @@ -185,7 +185,7 @@ Update your Nvidia drivers if it doesn't start. [Portable with pytorch cuda 12.8 and python 3.12](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_nvidia_cu128.7z). -[Portable with pytorch cuda 12.6 and python 3.12](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_nvidia_cu128.7z) (Supports Nvidia 10 series and older GPUs). +[Portable with pytorch cuda 12.6 and python 3.12](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_nvidia_cu126.7z) (Supports Nvidia 10 series and older GPUs). #### How do I share models between another UI and ComfyUI? From 47bfd5a33fa984a1102fc2bd7b25c91a69ace288 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 17 Nov 2025 21:26:44 -0800 Subject: [PATCH 0881/1073] Native block swap custom nodes considered harmful. (#10783) --- comfy_extras/nodes_nop.py | 39 +++++++++++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 40 insertions(+) create mode 100644 comfy_extras/nodes_nop.py diff --git a/comfy_extras/nodes_nop.py b/comfy_extras/nodes_nop.py new file mode 100644 index 000000000..953061bcb --- /dev/null +++ b/comfy_extras/nodes_nop.py @@ -0,0 +1,39 @@ +from comfy_api.latest import ComfyExtension, io +from typing_extensions import override +# If you write a node that is so useless that it breaks ComfyUI it will be featured in this exclusive list + +# "native" block swap nodes are placebo at best and break the ComfyUI memory management system. +# They are also considered harmful because instead of users reporting issues with the built in +# memory management they install these stupid nodes and complain even harder. Now it completely +# breaks with some of the new ComfyUI memory optimizations so I have made the decision to NOP it +# out of all workflows. +class wanBlockSwap(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="wanBlockSwap", + category="", + description="NOP", + inputs=[ + io.Model.Input("model"), + ], + outputs=[ + io.Model.Output(), + ], + is_deprecated=True, + ) + + @classmethod + def execute(cls, model) -> io.NodeOutput: + return io.NodeOutput(model) + + +class NopExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + wanBlockSwap + ] + +async def comfy_entrypoint() -> NopExtension: + return NopExtension() diff --git a/nodes.py b/nodes.py index 5689f6fe1..f6aeedc78 100644 --- a/nodes.py +++ b/nodes.py @@ -2330,6 +2330,7 @@ async def init_builtin_extra_nodes(): "nodes_easycache.py", "nodes_audio_encoder.py", "nodes_rope.py", + "nodes_nop.py", ] import_failed = [] From 048f49adbd19ac2d9c7c87682c832b7827a4b29d Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 18 Nov 2025 13:59:27 +0200 Subject: [PATCH 0882/1073] chore(api-nodes): adjusted PR template; set min python version for pylint to 3.10 (#10787) --- .github/PULL_REQUEST_TEMPLATE/api-node.md | 2 +- .github/workflows/api-node-template.yml | 2 +- pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE/api-node.md b/.github/PULL_REQUEST_TEMPLATE/api-node.md index f62744878..c1f1bafb1 100644 --- a/.github/PULL_REQUEST_TEMPLATE/api-node.md +++ b/.github/PULL_REQUEST_TEMPLATE/api-node.md @@ -18,4 +18,4 @@ If **Need pricing update**: - [ ] **QA not required** ### Comms -- [ ] Informed **@Kosinkadink** +- [ ] Informed **Kosinkadink** diff --git a/.github/workflows/api-node-template.yml b/.github/workflows/api-node-template.yml index 0775f9979..fdb81c0c5 100644 --- a/.github/workflows/api-node-template.yml +++ b/.github/workflows/api-node-template.yml @@ -2,7 +2,7 @@ name: Append API Node PR template on: pull_request_target: - types: [opened, reopened, synchronize, edited, ready_for_review] + types: [opened, reopened, synchronize, ready_for_review] paths: - 'comfy_api_nodes/**' # only run if these files changed diff --git a/pyproject.toml b/pyproject.toml index 63778286f..a14b383b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,7 @@ lint.select = [ exclude = ["*.ipynb", "**/generated/*.pyi"] [tool.pylint] -master.py-version = "3.9" +master.py-version = "3.10" master.extension-pkg-allow-list = [ "pydantic", ] From e1ab6bb394b82fa654d5bc84043f97479d12f84c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Tue, 18 Nov 2025 17:00:21 +0200 Subject: [PATCH 0883/1073] EasyCache: Fix for mismatch in input/output channels with some models (#10788) Slices model input with output channels so the caching tracks only the noise channels, resolves channel mismatch with models like WanVideo I2V Also fix for slicing deprecation in pytorch 2.9 --- comfy_extras/nodes_easycache.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/comfy_extras/nodes_easycache.py b/comfy_extras/nodes_easycache.py index 1359e2f99..11b23ffdb 100644 --- a/comfy_extras/nodes_easycache.py +++ b/comfy_extras/nodes_easycache.py @@ -11,13 +11,13 @@ if TYPE_CHECKING: def easycache_forward_wrapper(executor, *args, **kwargs): # get values from args - x: torch.Tensor = args[0] transformer_options: dict[str] = args[-1] if not isinstance(transformer_options, dict): transformer_options = kwargs.get("transformer_options") if not transformer_options: transformer_options = args[-2] easycache: EasyCacheHolder = transformer_options["easycache"] + x: torch.Tensor = args[0][:, :easycache.output_channels] sigmas = transformer_options["sigmas"] uuids = transformer_options["uuids"] if sigmas is not None and easycache.is_past_end_timestep(sigmas): @@ -82,13 +82,13 @@ def easycache_forward_wrapper(executor, *args, **kwargs): def lazycache_predict_noise_wrapper(executor, *args, **kwargs): # get values from args - x: torch.Tensor = args[0] timestep: float = args[1] model_options: dict[str] = args[2] easycache: LazyCacheHolder = model_options["transformer_options"]["easycache"] if easycache.is_past_end_timestep(timestep): return executor(*args, **kwargs) # prepare next x_prev + x: torch.Tensor = args[0][:, :easycache.output_channels] next_x_prev = x input_change = None do_easycache = easycache.should_do_easycache(timestep) @@ -173,7 +173,7 @@ def easycache_sample_wrapper(executor, *args, **kwargs): class EasyCacheHolder: - def __init__(self, reuse_threshold: float, start_percent: float, end_percent: float, subsample_factor: int, offload_cache_diff: bool, verbose: bool=False): + def __init__(self, reuse_threshold: float, start_percent: float, end_percent: float, subsample_factor: int, offload_cache_diff: bool, verbose: bool=False, output_channels: int=None): self.name = "EasyCache" self.reuse_threshold = reuse_threshold self.start_percent = start_percent @@ -202,6 +202,7 @@ class EasyCacheHolder: self.allow_mismatch = True self.cut_from_start = True self.state_metadata = None + self.output_channels = output_channels def is_past_end_timestep(self, timestep: float) -> bool: return not (timestep[0] > self.end_t).item() @@ -264,7 +265,7 @@ class EasyCacheHolder: else: slicing.append(slice(None)) batch_slice = batch_slice + slicing - x[batch_slice] += self.uuid_cache_diffs[uuid].to(x.device) + x[tuple(batch_slice)] += self.uuid_cache_diffs[uuid].to(x.device) return x def update_cache_diff(self, output: torch.Tensor, x: torch.Tensor, uuids: list[UUID]): @@ -283,7 +284,7 @@ class EasyCacheHolder: else: slicing.append(slice(None)) skip_dim = False - x = x[slicing] + x = x[tuple(slicing)] diff = output - x batch_offset = diff.shape[0] // len(uuids) for i, uuid in enumerate(uuids): @@ -323,7 +324,7 @@ class EasyCacheHolder: return self def clone(self): - return EasyCacheHolder(self.reuse_threshold, self.start_percent, self.end_percent, self.subsample_factor, self.offload_cache_diff, self.verbose) + return EasyCacheHolder(self.reuse_threshold, self.start_percent, self.end_percent, self.subsample_factor, self.offload_cache_diff, self.verbose, output_channels=self.output_channels) class EasyCacheNode(io.ComfyNode): @@ -350,7 +351,7 @@ class EasyCacheNode(io.ComfyNode): @classmethod def execute(cls, model: io.Model.Type, reuse_threshold: float, start_percent: float, end_percent: float, verbose: bool) -> io.NodeOutput: model = model.clone() - model.model_options["transformer_options"]["easycache"] = EasyCacheHolder(reuse_threshold, start_percent, end_percent, subsample_factor=8, offload_cache_diff=False, verbose=verbose) + model.model_options["transformer_options"]["easycache"] = EasyCacheHolder(reuse_threshold, start_percent, end_percent, subsample_factor=8, offload_cache_diff=False, verbose=verbose, output_channels=model.model.latent_format.latent_channels) model.add_wrapper_with_key(comfy.patcher_extension.WrappersMP.OUTER_SAMPLE, "easycache", easycache_sample_wrapper) model.add_wrapper_with_key(comfy.patcher_extension.WrappersMP.CALC_COND_BATCH, "easycache", easycache_calc_cond_batch_wrapper) model.add_wrapper_with_key(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, "easycache", easycache_forward_wrapper) @@ -358,7 +359,7 @@ class EasyCacheNode(io.ComfyNode): class LazyCacheHolder: - def __init__(self, reuse_threshold: float, start_percent: float, end_percent: float, subsample_factor: int, offload_cache_diff: bool, verbose: bool=False): + def __init__(self, reuse_threshold: float, start_percent: float, end_percent: float, subsample_factor: int, offload_cache_diff: bool, verbose: bool=False, output_channels: int=None): self.name = "LazyCache" self.reuse_threshold = reuse_threshold self.start_percent = start_percent @@ -382,6 +383,7 @@ class LazyCacheHolder: self.approx_output_change_rates = [] self.total_steps_skipped = 0 self.state_metadata = None + self.output_channels = output_channels def has_cache_diff(self) -> bool: return self.cache_diff is not None @@ -456,7 +458,7 @@ class LazyCacheHolder: return self def clone(self): - return LazyCacheHolder(self.reuse_threshold, self.start_percent, self.end_percent, self.subsample_factor, self.offload_cache_diff, self.verbose) + return LazyCacheHolder(self.reuse_threshold, self.start_percent, self.end_percent, self.subsample_factor, self.offload_cache_diff, self.verbose, output_channels=self.output_channels) class LazyCacheNode(io.ComfyNode): @classmethod @@ -482,7 +484,7 @@ class LazyCacheNode(io.ComfyNode): @classmethod def execute(cls, model: io.Model.Type, reuse_threshold: float, start_percent: float, end_percent: float, verbose: bool) -> io.NodeOutput: model = model.clone() - model.model_options["transformer_options"]["easycache"] = LazyCacheHolder(reuse_threshold, start_percent, end_percent, subsample_factor=8, offload_cache_diff=False, verbose=verbose) + model.model_options["transformer_options"]["easycache"] = LazyCacheHolder(reuse_threshold, start_percent, end_percent, subsample_factor=8, offload_cache_diff=False, verbose=verbose, output_channels=model.model.latent_format.latent_channels) model.add_wrapper_with_key(comfy.patcher_extension.WrappersMP.OUTER_SAMPLE, "lazycache", easycache_sample_wrapper) model.add_wrapper_with_key(comfy.patcher_extension.WrappersMP.PREDICT_NOISE, "lazycache", lazycache_predict_noise_wrapper) return io.NodeOutput(model) From d52697457608a045cafc3b6d6cb89f0a49ba0709 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 18 Nov 2025 13:46:19 -0800 Subject: [PATCH 0884/1073] Fix hunyuan 3d 2.0 (#10792) --- comfy/ldm/flux/math.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy/ldm/flux/math.py b/comfy/ldm/flux/math.py index 158420290..6a22df8bc 100644 --- a/comfy/ldm/flux/math.py +++ b/comfy/ldm/flux/math.py @@ -7,7 +7,8 @@ import comfy.model_management def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None, transformer_options={}) -> Tensor: - q, k = apply_rope(q, k, pe) + if pe is not None: + q, k = apply_rope(q, k, pe) heads = q.shape[1] x = optimized_attention(q, k, v, heads, skip_reshape=True, mask=mask, transformer_options=transformer_options) return x From 24fdb92edf2e96fe757c480aa7f12be5bdfa3a15 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 19 Nov 2025 00:26:44 +0200 Subject: [PATCH 0885/1073] feat(api-nodes): add new Gemini model (#10789) --- comfy_api_nodes/apis/gemini_api.py | 231 +++++++++++++++++++++++++++-- comfy_api_nodes/nodes_gemini.py | 47 +++--- 2 files changed, 246 insertions(+), 32 deletions(-) diff --git a/comfy_api_nodes/apis/gemini_api.py b/comfy_api_nodes/apis/gemini_api.py index 2bf28bf93..f63e02693 100644 --- a/comfy_api_nodes/apis/gemini_api.py +++ b/comfy_api_nodes/apis/gemini_api.py @@ -1,22 +1,229 @@ -from typing import Optional +from datetime import date +from enum import Enum +from typing import Any -from comfy_api_nodes.apis import GeminiGenerationConfig, GeminiContent, GeminiSafetySetting, GeminiSystemInstructionContent, GeminiTool, GeminiVideoMetadata -from pydantic import BaseModel +from pydantic import BaseModel, Field + + +class GeminiSafetyCategory(str, Enum): + HARM_CATEGORY_SEXUALLY_EXPLICIT = "HARM_CATEGORY_SEXUALLY_EXPLICIT" + HARM_CATEGORY_HATE_SPEECH = "HARM_CATEGORY_HATE_SPEECH" + HARM_CATEGORY_HARASSMENT = "HARM_CATEGORY_HARASSMENT" + HARM_CATEGORY_DANGEROUS_CONTENT = "HARM_CATEGORY_DANGEROUS_CONTENT" + + +class GeminiSafetyThreshold(str, Enum): + OFF = "OFF" + BLOCK_NONE = "BLOCK_NONE" + BLOCK_LOW_AND_ABOVE = "BLOCK_LOW_AND_ABOVE" + BLOCK_MEDIUM_AND_ABOVE = "BLOCK_MEDIUM_AND_ABOVE" + BLOCK_ONLY_HIGH = "BLOCK_ONLY_HIGH" + + +class GeminiSafetySetting(BaseModel): + category: GeminiSafetyCategory + threshold: GeminiSafetyThreshold + + +class GeminiRole(str, Enum): + user = "user" + model = "model" + + +class GeminiMimeType(str, Enum): + application_pdf = "application/pdf" + audio_mpeg = "audio/mpeg" + audio_mp3 = "audio/mp3" + audio_wav = "audio/wav" + image_png = "image/png" + image_jpeg = "image/jpeg" + image_webp = "image/webp" + text_plain = "text/plain" + video_mov = "video/mov" + video_mpeg = "video/mpeg" + video_mp4 = "video/mp4" + video_mpg = "video/mpg" + video_avi = "video/avi" + video_wmv = "video/wmv" + video_mpegps = "video/mpegps" + video_flv = "video/flv" + + +class GeminiInlineData(BaseModel): + data: str | None = Field( + None, + description="The base64 encoding of the image, PDF, or video to include inline in the prompt. " + "When including media inline, you must also specify the media type (mimeType) of the data. Size limit: 20MB", + ) + mimeType: GeminiMimeType | None = Field(None) + + +class GeminiPart(BaseModel): + inlineData: GeminiInlineData | None = Field(None) + text: str | None = Field(None) + + +class GeminiTextPart(BaseModel): + text: str | None = Field(None) + + +class GeminiContent(BaseModel): + parts: list[GeminiPart] = Field(...) + role: GeminiRole = Field(..., examples=["user"]) + + +class GeminiSystemInstructionContent(BaseModel): + parts: list[GeminiTextPart] = Field( + ..., + description="A list of ordered parts that make up a single message. " + "Different parts may have different IANA MIME types.", + ) + role: GeminiRole = Field( + ..., + description="The identity of the entity that creates the message. " + "The following values are supported: " + "user: This indicates that the message is sent by a real person, typically a user-generated message. " + "model: This indicates that the message is generated by the model. " + "The model value is used to insert messages from model into the conversation during multi-turn conversations. " + "For non-multi-turn conversations, this field can be left blank or unset.", + ) + + +class GeminiFunctionDeclaration(BaseModel): + description: str | None = Field(None) + name: str = Field(...) + parameters: dict[str, Any] = Field(..., description="JSON schema for the function parameters") + + +class GeminiTool(BaseModel): + functionDeclarations: list[GeminiFunctionDeclaration] | None = Field(None) + + +class GeminiOffset(BaseModel): + nanos: int | None = Field(None, ge=0, le=999999999) + seconds: int | None = Field(None, ge=-315576000000, le=315576000000) + + +class GeminiVideoMetadata(BaseModel): + endOffset: GeminiOffset | None = Field(None) + startOffset: GeminiOffset | None = Field(None) + + +class GeminiGenerationConfig(BaseModel): + maxOutputTokens: int | None = Field(None, ge=16, le=8192) + seed: int | None = Field(None) + stopSequences: list[str] | None = Field(None) + temperature: float | None = Field(1, ge=0.0, le=2.0) + topK: int | None = Field(40, ge=1) + topP: float | None = Field(0.95, ge=0.0, le=1.0) class GeminiImageConfig(BaseModel): - aspectRatio: Optional[str] = None + aspectRatio: str | None = Field(None) + resolution: str | None = Field(None) class GeminiImageGenerationConfig(GeminiGenerationConfig): - responseModalities: Optional[list[str]] = None - imageConfig: Optional[GeminiImageConfig] = None + responseModalities: list[str] | None = Field(None) + imageConfig: GeminiImageConfig | None = Field(None) class GeminiImageGenerateContentRequest(BaseModel): - contents: list[GeminiContent] - generationConfig: Optional[GeminiImageGenerationConfig] = None - safetySettings: Optional[list[GeminiSafetySetting]] = None - systemInstruction: Optional[GeminiSystemInstructionContent] = None - tools: Optional[list[GeminiTool]] = None - videoMetadata: Optional[GeminiVideoMetadata] = None + contents: list[GeminiContent] = Field(...) + generationConfig: GeminiImageGenerationConfig | None = Field(None) + safetySettings: list[GeminiSafetySetting] | None = Field(None) + systemInstruction: GeminiSystemInstructionContent | None = Field(None) + tools: list[GeminiTool] | None = Field(None) + videoMetadata: GeminiVideoMetadata | None = Field(None) + + +class GeminiGenerateContentRequest(BaseModel): + contents: list[GeminiContent] = Field(...) + generationConfig: GeminiGenerationConfig | None = Field(None) + safetySettings: list[GeminiSafetySetting] | None = Field(None) + systemInstruction: GeminiSystemInstructionContent | None = Field(None) + tools: list[GeminiTool] | None = Field(None) + videoMetadata: GeminiVideoMetadata | None = Field(None) + + +class Modality(str, Enum): + MODALITY_UNSPECIFIED = "MODALITY_UNSPECIFIED" + TEXT = "TEXT" + IMAGE = "IMAGE" + VIDEO = "VIDEO" + AUDIO = "AUDIO" + DOCUMENT = "DOCUMENT" + + +class ModalityTokenCount(BaseModel): + modality: Modality | None = None + tokenCount: int | None = Field(None, description="Number of tokens for the given modality.") + + +class Probability(str, Enum): + NEGLIGIBLE = "NEGLIGIBLE" + LOW = "LOW" + MEDIUM = "MEDIUM" + HIGH = "HIGH" + UNKNOWN = "UNKNOWN" + + +class GeminiSafetyRating(BaseModel): + category: GeminiSafetyCategory | None = None + probability: Probability | None = Field( + None, + description="The probability that the content violates the specified safety category", + ) + + +class GeminiCitation(BaseModel): + authors: list[str] | None = None + endIndex: int | None = None + license: str | None = None + publicationDate: date | None = None + startIndex: int | None = None + title: str | None = None + uri: str | None = None + + +class GeminiCitationMetadata(BaseModel): + citations: list[GeminiCitation] | None = None + + +class GeminiCandidate(BaseModel): + citationMetadata: GeminiCitationMetadata | None = None + content: GeminiContent | None = None + finishReason: str | None = None + safetyRatings: list[GeminiSafetyRating] | None = None + + +class GeminiPromptFeedback(BaseModel): + blockReason: str | None = None + blockReasonMessage: str | None = None + safetyRatings: list[GeminiSafetyRating] | None = None + + +class GeminiUsageMetadata(BaseModel): + cachedContentTokenCount: int | None = Field( + None, + description="Output only. Number of tokens in the cached part in the input (the cached content).", + ) + candidatesTokenCount: int | None = Field(None, description="Number of tokens in the response(s).") + candidatesTokensDetails: list[ModalityTokenCount] | None = Field( + None, description="Breakdown of candidate tokens by modality." + ) + promptTokenCount: int | None = Field( + None, + description="Number of tokens in the request. When cachedContent is set, this is still the total effective prompt size meaning this includes the number of tokens in the cached content.", + ) + promptTokensDetails: list[ModalityTokenCount] | None = Field( + None, description="Breakdown of prompt tokens by modality." + ) + thoughtsTokenCount: int | None = Field(None, description="Number of tokens present in thoughts output.") + toolUsePromptTokenCount: int | None = Field(None, description="Number of tokens present in tool-use prompt(s).") + + +class GeminiGenerateContentResponse(BaseModel): + candidates: list[GeminiCandidate] | None = Field(None) + promptFeedback: GeminiPromptFeedback | None = Field(None) + usageMetadata: GeminiUsageMetadata | None = Field(None) diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 67f2469ad..6e746eebd 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -3,8 +3,6 @@ API Nodes for Gemini Multimodal LLM Usage via Remote API See: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference """ -from __future__ import annotations - import base64 import json import os @@ -12,7 +10,7 @@ import time import uuid from enum import Enum from io import BytesIO -from typing import Literal, Optional +from typing import Literal import torch from typing_extensions import override @@ -20,18 +18,17 @@ from typing_extensions import override import folder_paths from comfy_api.latest import IO, ComfyExtension, Input from comfy_api.util import VideoCodec, VideoContainer -from comfy_api_nodes.apis import ( +from comfy_api_nodes.apis.gemini_api import ( GeminiContent, GeminiGenerateContentRequest, GeminiGenerateContentResponse, - GeminiInlineData, - GeminiMimeType, - GeminiPart, -) -from comfy_api_nodes.apis.gemini_api import ( GeminiImageConfig, GeminiImageGenerateContentRequest, GeminiImageGenerationConfig, + GeminiInlineData, + GeminiMimeType, + GeminiPart, + GeminiRole, ) from comfy_api_nodes.util import ( ApiEndpoint, @@ -57,6 +54,7 @@ class GeminiModel(str, Enum): gemini_2_5_flash_preview_04_17 = "gemini-2.5-flash-preview-04-17" gemini_2_5_pro = "gemini-2.5-pro" gemini_2_5_flash = "gemini-2.5-flash" + gemini_3_0_pro = "gemini-3-pro-preview" class GeminiImageModel(str, Enum): @@ -103,6 +101,16 @@ def get_parts_by_type(response: GeminiGenerateContentResponse, part_type: Litera Returns: List of response parts matching the requested type. """ + if response.candidates is None: + if response.promptFeedback.blockReason: + feedback = response.promptFeedback + raise ValueError( + f"Gemini API blocked the request. Reason: {feedback.blockReason} ({feedback.blockReasonMessage})" + ) + raise NotImplementedError( + "Gemini returned no response candidates. " + "Please report to ComfyUI repository with the example of workflow to reproduce this." + ) parts = [] for part in response.candidates[0].content.parts: if part_type == "text" and hasattr(part, "text") and part.text: @@ -272,10 +280,10 @@ class GeminiNode(IO.ComfyNode): prompt: str, model: str, seed: int, - images: Optional[torch.Tensor] = None, - audio: Optional[Input.Audio] = None, - video: Optional[Input.Video] = None, - files: Optional[list[GeminiPart]] = None, + images: torch.Tensor | None = None, + audio: Input.Audio | None = None, + video: Input.Video | None = None, + files: list[GeminiPart] | None = None, ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) @@ -300,7 +308,7 @@ class GeminiNode(IO.ComfyNode): data=GeminiGenerateContentRequest( contents=[ GeminiContent( - role="user", + role=GeminiRole.user, parts=parts, ) ] @@ -308,7 +316,6 @@ class GeminiNode(IO.ComfyNode): response_model=GeminiGenerateContentResponse, ) - # Get result output output_text = get_text_from_response(response) if output_text: # Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button. @@ -406,7 +413,7 @@ class GeminiInputFiles(IO.ComfyNode): ) @classmethod - def execute(cls, file: str, GEMINI_INPUT_FILES: Optional[list[GeminiPart]] = None) -> IO.NodeOutput: + def execute(cls, file: str, GEMINI_INPUT_FILES: list[GeminiPart] | None = None) -> IO.NodeOutput: """Loads and formats input files for Gemini API.""" if GEMINI_INPUT_FILES is None: GEMINI_INPUT_FILES = [] @@ -421,7 +428,7 @@ class GeminiImage(IO.ComfyNode): def define_schema(cls): return IO.Schema( node_id="GeminiImageNode", - display_name="Google Gemini Image", + display_name="Nano Banana (Google Gemini Image)", category="api node/image/Gemini", description="Edit images synchronously via Google API.", inputs=[ @@ -488,8 +495,8 @@ class GeminiImage(IO.ComfyNode): prompt: str, model: str, seed: int, - images: Optional[torch.Tensor] = None, - files: Optional[list[GeminiPart]] = None, + images: torch.Tensor | None = None, + files: list[GeminiPart] | None = None, aspect_ratio: str = "auto", ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) @@ -510,7 +517,7 @@ class GeminiImage(IO.ComfyNode): endpoint=ApiEndpoint(path=f"{GEMINI_BASE_ENDPOINT}/{model}", method="POST"), data=GeminiImageGenerateContentRequest( contents=[ - GeminiContent(role="user", parts=parts), + GeminiContent(role=GeminiRole.user, parts=parts), ], generationConfig=GeminiImageGenerationConfig( responseModalities=["TEXT", "IMAGE"], From b5c8be8b1db44ded07cb1b437b9f33ebff5848c1 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 18 Nov 2025 19:37:20 -0500 Subject: [PATCH 0886/1073] ComfyUI 0.3.70 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 1e554eb9f..9b77aabe9 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.69" +__version__ = "0.3.70" diff --git a/pyproject.toml b/pyproject.toml index a14b383b3..289b7145b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.69" +version = "0.3.70" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 17027f2a6a20a31e2c6f3be2b1a06f39ad3a68d9 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 18 Nov 2025 19:36:03 -0800 Subject: [PATCH 0887/1073] Add a way to disable the final norm in the llama based TE models. (#10794) --- comfy/text_encoders/llama.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index c050759fe..feb44bbb0 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -32,6 +32,7 @@ class Llama2Config: q_norm = None k_norm = None rope_scale = None + final_norm: bool = True @dataclass class Qwen25_3BConfig: @@ -53,6 +54,7 @@ class Qwen25_3BConfig: q_norm = None k_norm = None rope_scale = None + final_norm: bool = True @dataclass class Qwen25_7BVLI_Config: @@ -74,6 +76,7 @@ class Qwen25_7BVLI_Config: q_norm = None k_norm = None rope_scale = None + final_norm: bool = True @dataclass class Gemma2_2B_Config: @@ -96,6 +99,7 @@ class Gemma2_2B_Config: k_norm = None sliding_attention = None rope_scale = None + final_norm: bool = True @dataclass class Gemma3_4B_Config: @@ -118,6 +122,7 @@ class Gemma3_4B_Config: k_norm = "gemma3" sliding_attention = [False, False, False, False, False, 1024] rope_scale = [1.0, 8.0] + final_norm: bool = True class RMSNorm(nn.Module): def __init__(self, dim: int, eps: float = 1e-5, add=False, device=None, dtype=None): @@ -366,7 +371,12 @@ class Llama2_(nn.Module): transformer(config, index=i, device=device, dtype=dtype, ops=ops) for i in range(config.num_hidden_layers) ]) - self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype) + + if config.final_norm: + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype) + else: + self.norm = None + # self.lm_head = ops.Linear(config.hidden_size, config.vocab_size, bias=False, device=device, dtype=dtype) def forward(self, x, attention_mask=None, embeds=None, num_tokens=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None, position_ids=None, embeds_info=[]): @@ -421,14 +431,16 @@ class Llama2_(nn.Module): if i == intermediate_output: intermediate = x.clone() - x = self.norm(x) + if self.norm is not None: + x = self.norm(x) + if all_intermediate is not None: all_intermediate.append(x.unsqueeze(1).clone()) if all_intermediate is not None: intermediate = torch.cat(all_intermediate, dim=1) - if intermediate is not None and final_layer_norm_intermediate: + if intermediate is not None and final_layer_norm_intermediate and self.norm is not None: intermediate = self.norm(intermediate) return x, intermediate From 65ee24c9789b93660ebe978a3186486f105298c2 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 19 Nov 2025 11:25:28 +0200 Subject: [PATCH 0888/1073] change display name of PreviewAny node to "Preview as Text" (#10796) --- comfy_extras/nodes_preview_any.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_preview_any.py b/comfy_extras/nodes_preview_any.py index e749fa6ae..139b07c93 100644 --- a/comfy_extras/nodes_preview_any.py +++ b/comfy_extras/nodes_preview_any.py @@ -39,5 +39,5 @@ NODE_CLASS_MAPPINGS = { } NODE_DISPLAY_NAME_MAPPINGS = { - "PreviewAny": "Preview Any", + "PreviewAny": "Preview as Text", } From 6a1d3a1ae131f3fff7f45a7e835eb10e9d1338ee Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 20 Nov 2025 00:49:01 +0200 Subject: [PATCH 0889/1073] convert hunyuan3d.py to V3 schema (#10664) --- comfy_api/latest/__init__.py | 4 +- comfy_api/latest/_io.py | 5 +- comfy_api/latest/_util/__init__.py | 3 + comfy_api/latest/_util/geometry_types.py | 12 + comfy_extras/nodes_hunyuan3d.py | 274 +++++++++++++---------- 5 files changed, 178 insertions(+), 120 deletions(-) create mode 100644 comfy_api/latest/_util/geometry_types.py diff --git a/comfy_api/latest/__init__.py b/comfy_api/latest/__init__.py index b7a3fa9c1..176ae36e0 100644 --- a/comfy_api/latest/__init__.py +++ b/comfy_api/latest/__init__.py @@ -7,7 +7,7 @@ from comfy_api.internal.singleton import ProxiedSingleton from comfy_api.internal.async_to_sync import create_sync_class from comfy_api.latest._input import ImageInput, AudioInput, MaskInput, LatentInput, VideoInput from comfy_api.latest._input_impl import VideoFromFile, VideoFromComponents -from comfy_api.latest._util import VideoCodec, VideoContainer, VideoComponents +from comfy_api.latest._util import VideoCodec, VideoContainer, VideoComponents, MESH, VOXEL from . import _io as io from . import _ui as ui # from comfy_api.latest._resources import _RESOURCES as resources #noqa: F401 @@ -104,6 +104,8 @@ class Types: VideoCodec = VideoCodec VideoContainer = VideoContainer VideoComponents = VideoComponents + MESH = MESH + VOXEL = VOXEL ComfyAPI = ComfyAPI_latest diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 0b701260f..863254ce7 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -27,6 +27,7 @@ from comfy_api.internal import (_ComfyNodeInternal, _NodeOutputInternal, classpr prune_dict, shallow_clone_class) from comfy_api.latest._resources import Resources, ResourcesLocal from comfy_execution.graph_utils import ExecutionBlocker +from ._util import MESH, VOXEL # from comfy_extras.nodes_images import SVG as SVG_ # NOTE: needs to be moved before can be imported due to circular reference @@ -656,11 +657,11 @@ class LossMap(ComfyTypeIO): @comfytype(io_type="VOXEL") class Voxel(ComfyTypeIO): - Type = Any # TODO: VOXEL class is defined in comfy_extras/nodes_hunyuan3d.py; should be moved to somewhere else before referenced directly in v3 + Type = VOXEL @comfytype(io_type="MESH") class Mesh(ComfyTypeIO): - Type = Any # TODO: MESH class is defined in comfy_extras/nodes_hunyuan3d.py; should be moved to somewhere else before referenced directly in v3 + Type = MESH @comfytype(io_type="HOOKS") class Hooks(ComfyTypeIO): diff --git a/comfy_api/latest/_util/__init__.py b/comfy_api/latest/_util/__init__.py index 9019c46db..fc5431dda 100644 --- a/comfy_api/latest/_util/__init__.py +++ b/comfy_api/latest/_util/__init__.py @@ -1,8 +1,11 @@ from .video_types import VideoContainer, VideoCodec, VideoComponents +from .geometry_types import VOXEL, MESH __all__ = [ # Utility Types "VideoContainer", "VideoCodec", "VideoComponents", + "VOXEL", + "MESH", ] diff --git a/comfy_api/latest/_util/geometry_types.py b/comfy_api/latest/_util/geometry_types.py new file mode 100644 index 000000000..385122778 --- /dev/null +++ b/comfy_api/latest/_util/geometry_types.py @@ -0,0 +1,12 @@ +import torch + + +class VOXEL: + def __init__(self, data: torch.Tensor): + self.data = data + + +class MESH: + def __init__(self, vertices: torch.Tensor, faces: torch.Tensor): + self.vertices = vertices + self.faces = faces diff --git a/comfy_extras/nodes_hunyuan3d.py b/comfy_extras/nodes_hunyuan3d.py index f6e71e0a8..adca14f62 100644 --- a/comfy_extras/nodes_hunyuan3d.py +++ b/comfy_extras/nodes_hunyuan3d.py @@ -7,63 +7,79 @@ from comfy.ldm.modules.diffusionmodules.mmdit import get_1d_sincos_pos_embed_fro import folder_paths import comfy.model_management from comfy.cli_args import args +from typing_extensions import override +from comfy_api.latest import ComfyExtension, IO, Types +from comfy_api.latest._util import MESH, VOXEL # only for backward compatibility if someone import it from this file (will be removed later) # noqa -class EmptyLatentHunyuan3Dv2: + +class EmptyLatentHunyuan3Dv2(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "resolution": ("INT", {"default": 3072, "min": 1, "max": 8192}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}), - } - } + def define_schema(cls): + return IO.Schema( + node_id="EmptyLatentHunyuan3Dv2", + category="latent/3d", + inputs=[ + IO.Int.Input("resolution", default=3072, min=1, max=8192), + IO.Int.Input("batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."), + ], + outputs=[ + IO.Latent.Output(), + ] + ) - RETURN_TYPES = ("LATENT",) - FUNCTION = "generate" - - CATEGORY = "latent/3d" - - def generate(self, resolution, batch_size): + @classmethod + def execute(cls, resolution, batch_size) -> IO.NodeOutput: latent = torch.zeros([batch_size, 64, resolution], device=comfy.model_management.intermediate_device()) - return ({"samples": latent, "type": "hunyuan3dv2"}, ) + return IO.NodeOutput({"samples": latent, "type": "hunyuan3dv2"}) -class Hunyuan3Dv2Conditioning: + generate = execute # TODO: remove + + +class Hunyuan3Dv2Conditioning(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"clip_vision_output": ("CLIP_VISION_OUTPUT",), - }} + def define_schema(cls): + return IO.Schema( + node_id="Hunyuan3Dv2Conditioning", + category="conditioning/video_models", + inputs=[ + IO.ClipVisionOutput.Input("clip_vision_output"), + ], + outputs=[ + IO.Conditioning.Output(display_name="positive"), + IO.Conditioning.Output(display_name="negative"), + ] + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING") - RETURN_NAMES = ("positive", "negative") - - FUNCTION = "encode" - - CATEGORY = "conditioning/video_models" - - def encode(self, clip_vision_output): + @classmethod + def execute(cls, clip_vision_output) -> IO.NodeOutput: embeds = clip_vision_output.last_hidden_state positive = [[embeds, {}]] negative = [[torch.zeros_like(embeds), {}]] - return (positive, negative) + return IO.NodeOutput(positive, negative) + + encode = execute # TODO: remove -class Hunyuan3Dv2ConditioningMultiView: +class Hunyuan3Dv2ConditioningMultiView(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {}, - "optional": {"front": ("CLIP_VISION_OUTPUT",), - "left": ("CLIP_VISION_OUTPUT",), - "back": ("CLIP_VISION_OUTPUT",), - "right": ("CLIP_VISION_OUTPUT",), }} + def define_schema(cls): + return IO.Schema( + node_id="Hunyuan3Dv2ConditioningMultiView", + category="conditioning/video_models", + inputs=[ + IO.ClipVisionOutput.Input("front", optional=True), + IO.ClipVisionOutput.Input("left", optional=True), + IO.ClipVisionOutput.Input("back", optional=True), + IO.ClipVisionOutput.Input("right", optional=True), + ], + outputs=[ + IO.Conditioning.Output(display_name="positive"), + IO.Conditioning.Output(display_name="negative"), + ] + ) - RETURN_TYPES = ("CONDITIONING", "CONDITIONING") - RETURN_NAMES = ("positive", "negative") - - FUNCTION = "encode" - - CATEGORY = "conditioning/video_models" - - def encode(self, front=None, left=None, back=None, right=None): + @classmethod + def execute(cls, front=None, left=None, back=None, right=None) -> IO.NodeOutput: all_embeds = [front, left, back, right] out = [] pos_embeds = None @@ -76,29 +92,35 @@ class Hunyuan3Dv2ConditioningMultiView: embeds = torch.cat(out, dim=1) positive = [[embeds, {}]] negative = [[torch.zeros_like(embeds), {}]] - return (positive, negative) + return IO.NodeOutput(positive, negative) + + encode = execute # TODO: remove -class VOXEL: - def __init__(self, data): - self.data = data - -class VAEDecodeHunyuan3D: +class VAEDecodeHunyuan3D(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"samples": ("LATENT", ), - "vae": ("VAE", ), - "num_chunks": ("INT", {"default": 8000, "min": 1000, "max": 500000}), - "octree_resolution": ("INT", {"default": 256, "min": 16, "max": 512}), - }} - RETURN_TYPES = ("VOXEL",) - FUNCTION = "decode" + def define_schema(cls): + return IO.Schema( + node_id="VAEDecodeHunyuan3D", + category="latent/3d", + inputs=[ + IO.Latent.Input("samples"), + IO.Vae.Input("vae"), + IO.Int.Input("num_chunks", default=8000, min=1000, max=500000), + IO.Int.Input("octree_resolution", default=256, min=16, max=512), + ], + outputs=[ + IO.Voxel.Output(), + ] + ) - CATEGORY = "latent/3d" + @classmethod + def execute(cls, vae, samples, num_chunks, octree_resolution) -> IO.NodeOutput: + voxels = Types.VOXEL(vae.decode(samples["samples"], vae_options={"num_chunks": num_chunks, "octree_resolution": octree_resolution})) + return IO.NodeOutput(voxels) + + decode = execute # TODO: remove - def decode(self, vae, samples, num_chunks, octree_resolution): - voxels = VOXEL(vae.decode(samples["samples"], vae_options={"num_chunks": num_chunks, "octree_resolution": octree_resolution})) - return (voxels, ) def voxel_to_mesh(voxels, threshold=0.5, device=None): if device is None: @@ -396,24 +418,24 @@ def voxel_to_mesh_surfnet(voxels, threshold=0.5, device=None): return final_vertices, faces -class MESH: - def __init__(self, vertices, faces): - self.vertices = vertices - self.faces = faces - -class VoxelToMeshBasic: +class VoxelToMeshBasic(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"voxel": ("VOXEL", ), - "threshold": ("FLOAT", {"default": 0.6, "min": -1.0, "max": 1.0, "step": 0.01}), - }} - RETURN_TYPES = ("MESH",) - FUNCTION = "decode" + def define_schema(cls): + return IO.Schema( + node_id="VoxelToMeshBasic", + category="3d", + inputs=[ + IO.Voxel.Input("voxel"), + IO.Float.Input("threshold", default=0.6, min=-1.0, max=1.0, step=0.01), + ], + outputs=[ + IO.Mesh.Output(), + ] + ) - CATEGORY = "3d" - - def decode(self, voxel, threshold): + @classmethod + def execute(cls, voxel, threshold) -> IO.NodeOutput: vertices = [] faces = [] for x in voxel.data: @@ -421,21 +443,29 @@ class VoxelToMeshBasic: vertices.append(v) faces.append(f) - return (MESH(torch.stack(vertices), torch.stack(faces)), ) + return IO.NodeOutput(Types.MESH(torch.stack(vertices), torch.stack(faces))) -class VoxelToMesh: + decode = execute # TODO: remove + + +class VoxelToMesh(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"voxel": ("VOXEL", ), - "algorithm": (["surface net", "basic"], ), - "threshold": ("FLOAT", {"default": 0.6, "min": -1.0, "max": 1.0, "step": 0.01}), - }} - RETURN_TYPES = ("MESH",) - FUNCTION = "decode" + def define_schema(cls): + return IO.Schema( + node_id="VoxelToMesh", + category="3d", + inputs=[ + IO.Voxel.Input("voxel"), + IO.Combo.Input("algorithm", options=["surface net", "basic"]), + IO.Float.Input("threshold", default=0.6, min=-1.0, max=1.0, step=0.01), + ], + outputs=[ + IO.Mesh.Output(), + ] + ) - CATEGORY = "3d" - - def decode(self, voxel, algorithm, threshold): + @classmethod + def execute(cls, voxel, algorithm, threshold) -> IO.NodeOutput: vertices = [] faces = [] @@ -449,7 +479,9 @@ class VoxelToMesh: vertices.append(v) faces.append(f) - return (MESH(torch.stack(vertices), torch.stack(faces)), ) + return IO.NodeOutput(Types.MESH(torch.stack(vertices), torch.stack(faces))) + + decode = execute # TODO: remove def save_glb(vertices, faces, filepath, metadata=None): @@ -581,31 +613,32 @@ def save_glb(vertices, faces, filepath, metadata=None): return filepath -class SaveGLB: +class SaveGLB(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"mesh": ("MESH", ), - "filename_prefix": ("STRING", {"default": "mesh/ComfyUI"}), }, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, } + def define_schema(cls): + return IO.Schema( + node_id="SaveGLB", + category="3d", + is_output_node=True, + inputs=[ + IO.Mesh.Input("mesh"), + IO.String.Input("filename_prefix", default="mesh/ComfyUI"), + ], + hidden=[IO.Hidden.prompt, IO.Hidden.extra_pnginfo] + ) - RETURN_TYPES = () - FUNCTION = "save" - - OUTPUT_NODE = True - - CATEGORY = "3d" - - def save(self, mesh, filename_prefix, prompt=None, extra_pnginfo=None): + @classmethod + def execute(cls, mesh, filename_prefix) -> IO.NodeOutput: full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, folder_paths.get_output_directory()) results = [] metadata = {} if not args.disable_metadata: - if prompt is not None: - metadata["prompt"] = json.dumps(prompt) - if extra_pnginfo is not None: - for x in extra_pnginfo: - metadata[x] = json.dumps(extra_pnginfo[x]) + if cls.hidden.prompt is not None: + metadata["prompt"] = json.dumps(cls.hidden.prompt) + if cls.hidden.extra_pnginfo is not None: + for x in cls.hidden.extra_pnginfo: + metadata[x] = json.dumps(cls.hidden.extra_pnginfo[x]) for i in range(mesh.vertices.shape[0]): f = f"{filename}_{counter:05}_.glb" @@ -616,15 +649,22 @@ class SaveGLB: "type": "output" }) counter += 1 - return {"ui": {"3d": results}} + return IO.NodeOutput(ui={"3d": results}) -NODE_CLASS_MAPPINGS = { - "EmptyLatentHunyuan3Dv2": EmptyLatentHunyuan3Dv2, - "Hunyuan3Dv2Conditioning": Hunyuan3Dv2Conditioning, - "Hunyuan3Dv2ConditioningMultiView": Hunyuan3Dv2ConditioningMultiView, - "VAEDecodeHunyuan3D": VAEDecodeHunyuan3D, - "VoxelToMeshBasic": VoxelToMeshBasic, - "VoxelToMesh": VoxelToMesh, - "SaveGLB": SaveGLB, -} +class Hunyuan3dExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + EmptyLatentHunyuan3Dv2, + Hunyuan3Dv2Conditioning, + Hunyuan3Dv2ConditioningMultiView, + VAEDecodeHunyuan3D, + VoxelToMeshBasic, + VoxelToMesh, + SaveGLB, + ] + + +async def comfy_entrypoint() -> Hunyuan3dExtension: + return Hunyuan3dExtension() From 7601e89255cde24667d3b4e6022f1385d901748b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 19 Nov 2025 17:17:15 -0800 Subject: [PATCH 0890/1073] Fix workflow name. (#10806) --- .github/workflows/release-stable-all.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-stable-all.yml b/.github/workflows/release-stable-all.yml index f7de3a7c3..9274b4170 100644 --- a/.github/workflows/release-stable-all.yml +++ b/.github/workflows/release-stable-all.yml @@ -14,7 +14,7 @@ jobs: contents: "write" packages: "write" pull-requests: "read" - name: "Release NVIDIA Default (cu129)" + name: "Release NVIDIA Default (cu130)" uses: ./.github/workflows/stable-release.yml with: git_tag: ${{ inputs.git_tag }} From 394348f5caaa062eac11a57e2997aacccd4246eb Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 20 Nov 2025 03:44:04 +0200 Subject: [PATCH 0891/1073] feat(api-nodes): add Topaz API nodes (#10755) --- comfy_api_nodes/apis/topaz_api.py | 133 ++++++++++ comfy_api_nodes/nodes_topaz.py | 421 ++++++++++++++++++++++++++++++ comfy_api_nodes/util/client.py | 9 +- nodes.py | 1 + 4 files changed, 560 insertions(+), 4 deletions(-) create mode 100644 comfy_api_nodes/apis/topaz_api.py create mode 100644 comfy_api_nodes/nodes_topaz.py diff --git a/comfy_api_nodes/apis/topaz_api.py b/comfy_api_nodes/apis/topaz_api.py new file mode 100644 index 000000000..4d9e62e72 --- /dev/null +++ b/comfy_api_nodes/apis/topaz_api.py @@ -0,0 +1,133 @@ +from typing import Optional, Union + +from pydantic import BaseModel, Field + + +class ImageEnhanceRequest(BaseModel): + model: str = Field("Reimagine") + output_format: str = Field("jpeg") + subject_detection: str = Field("All") + face_enhancement: bool = Field(True) + face_enhancement_creativity: float = Field(0, description="Is ignored if face_enhancement is false") + face_enhancement_strength: float = Field(0.8, description="Is ignored if face_enhancement is false") + source_url: str = Field(...) + output_width: Optional[int] = Field(None) + output_height: Optional[int] = Field(None) + crop_to_fill: bool = Field(False) + prompt: Optional[str] = Field(None, description="Text prompt for creative upscaling guidance") + creativity: int = Field(3, description="Creativity settings range from 1 to 9") + face_preservation: str = Field("true", description="To preserve the identity of characters") + color_preservation: str = Field("true", description="To preserve the original color") + + +class ImageAsyncTaskResponse(BaseModel): + process_id: str = Field(...) + + +class ImageStatusResponse(BaseModel): + process_id: str = Field(...) + status: str = Field(...) + progress: Optional[int] = Field(None) + credits: int = Field(...) + + +class ImageDownloadResponse(BaseModel): + download_url: str = Field(...) + expiry: int = Field(...) + + +class Resolution(BaseModel): + width: int = Field(...) + height: int = Field(...) + + +class CreateCreateVideoRequestSource(BaseModel): + container: str = Field(...) + size: int = Field(..., description="Size of the video file in bytes") + duration: int = Field(..., description="Duration of the video file in seconds") + frameCount: int = Field(..., description="Total number of frames in the video") + frameRate: int = Field(...) + resolution: Resolution = Field(...) + + +class VideoFrameInterpolationFilter(BaseModel): + model: str = Field(...) + slowmo: Optional[int] = Field(None) + fps: int = Field(...) + duplicate: bool = Field(...) + duplicate_threshold: float = Field(...) + + +class VideoEnhancementFilter(BaseModel): + model: str = Field(...) + auto: Optional[str] = Field(None, description="Auto, Manual, Relative") + focusFixLevel: Optional[str] = Field(None, description="Downscales video input for correction of blurred subjects") + compression: Optional[float] = Field(None, description="Strength of compression recovery") + details: Optional[float] = Field(None, description="Amount of detail reconstruction") + prenoise: Optional[float] = Field(None, description="Amount of noise to add to input to reduce over-smoothing") + noise: Optional[float] = Field(None, description="Amount of noise reduction") + halo: Optional[float] = Field(None, description="Amount of halo reduction") + preblur: Optional[float] = Field(None, description="Anti-aliasing and deblurring strength") + blur: Optional[float] = Field(None, description="Amount of sharpness applied") + grain: Optional[float] = Field(None, description="Grain after AI model processing") + grainSize: Optional[float] = Field(None, description="Size of generated grain") + recoverOriginalDetailValue: Optional[float] = Field(None, description="Source details into the output video") + creativity: Optional[str] = Field(None, description="Creativity level(high, low) for slc-1 only") + isOptimizedMode: Optional[bool] = Field(None, description="Set to true for Starlight Creative (slc-1) only") + + +class OutputInformationVideo(BaseModel): + resolution: Resolution = Field(...) + frameRate: int = Field(...) + audioCodec: Optional[str] = Field(..., description="Required if audioTransfer is Copy or Convert") + audioTransfer: str = Field(..., description="Copy, Convert, None") + dynamicCompressionLevel: str = Field(..., description="Low, Mid, High") + + +class Overrides(BaseModel): + isPaidDiffusion: bool = Field(True) + + +class CreateVideoRequest(BaseModel): + source: CreateCreateVideoRequestSource = Field(...) + filters: list[Union[VideoFrameInterpolationFilter, VideoEnhancementFilter]] = Field(...) + output: OutputInformationVideo = Field(...) + overrides: Overrides = Field(Overrides(isPaidDiffusion=True)) + + +class CreateVideoResponse(BaseModel): + requestId: str = Field(...) + + +class VideoAcceptResponse(BaseModel): + uploadId: str = Field(...) + urls: list[str] = Field(...) + + +class VideoCompleteUploadRequestPart(BaseModel): + partNum: int = Field(...) + eTag: str = Field(...) + + +class VideoCompleteUploadRequest(BaseModel): + uploadResults: list[VideoCompleteUploadRequestPart] = Field(...) + + +class VideoCompleteUploadResponse(BaseModel): + message: str = Field(..., description="Confirmation message") + + +class VideoStatusResponseEstimates(BaseModel): + cost: list[int] = Field(...) + + +class VideoStatusResponseDownloadUrl(BaseModel): + url: str = Field(...) + + +class VideoStatusResponse(BaseModel): + status: str = Field(...) + estimates: Optional[VideoStatusResponseEstimates] = Field(None) + progress: Optional[float] = Field(None) + message: Optional[str] = Field("") + download: Optional[VideoStatusResponseDownloadUrl] = Field(None) diff --git a/comfy_api_nodes/nodes_topaz.py b/comfy_api_nodes/nodes_topaz.py new file mode 100644 index 000000000..79c7bf43d --- /dev/null +++ b/comfy_api_nodes/nodes_topaz.py @@ -0,0 +1,421 @@ +import builtins +from io import BytesIO + +import aiohttp +import torch +from typing_extensions import override + +from comfy_api.input.video_types import VideoInput +from comfy_api.latest import IO, ComfyExtension +from comfy_api_nodes.apis import topaz_api +from comfy_api_nodes.util import ( + ApiEndpoint, + download_url_to_image_tensor, + download_url_to_video_output, + get_fs_object_size, + get_number_of_images, + poll_op, + sync_op, + upload_images_to_comfyapi, + validate_container_format_is_mp4, +) + +UPSCALER_MODELS_MAP = { + "Starlight (Astra) Fast": "slf-1", + "Starlight (Astra) Creative": "slc-1", +} +UPSCALER_VALUES_MAP = { + "FullHD (1080p)": 1920, + "4K (2160p)": 3840, +} + + +class TopazImageEnhance(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="TopazImageEnhance", + display_name="Topaz Image Enhance", + category="api node/image/Topaz", + description="Industry-standard upscaling and image enhancement.", + inputs=[ + IO.Combo.Input("model", options=["Reimagine"]), + IO.Image.Input("image"), + IO.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Optional text prompt for creative upscaling guidance.", + optional=True, + ), + IO.Combo.Input( + "subject_detection", + options=["All", "Foreground", "Background"], + optional=True, + ), + IO.Boolean.Input( + "face_enhancement", + default=True, + optional=True, + tooltip="Enhance faces (if present) during processing.", + ), + IO.Float.Input( + "face_enhancement_creativity", + default=0.0, + min=0.0, + max=1.0, + step=0.01, + display_mode=IO.NumberDisplay.number, + optional=True, + tooltip="Set the creativity level for face enhancement.", + ), + IO.Float.Input( + "face_enhancement_strength", + default=1.0, + min=0.0, + max=1.0, + step=0.01, + display_mode=IO.NumberDisplay.number, + optional=True, + tooltip="Controls how sharp enhanced faces are relative to the background.", + ), + IO.Boolean.Input( + "crop_to_fill", + default=False, + optional=True, + tooltip="By default, the image is letterboxed when the output aspect ratio differs. " + "Enable to crop the image to fill the output dimensions.", + ), + IO.Int.Input( + "output_width", + default=0, + min=0, + max=32000, + step=1, + display_mode=IO.NumberDisplay.number, + optional=True, + tooltip="Zero value means to calculate automatically (usually it will be original size or output_height if specified).", + ), + IO.Int.Input( + "output_height", + default=0, + min=0, + max=32000, + step=1, + display_mode=IO.NumberDisplay.number, + optional=True, + tooltip="Zero value means to output in the same height as original or output width.", + ), + IO.Int.Input( + "creativity", + default=3, + min=1, + max=9, + step=1, + display_mode=IO.NumberDisplay.slider, + optional=True, + ), + IO.Boolean.Input( + "face_preservation", + default=True, + optional=True, + tooltip="Preserve subjects' facial identity.", + ), + IO.Boolean.Input( + "color_preservation", + default=True, + optional=True, + tooltip="Preserve the original colors.", + ), + ], + outputs=[ + IO.Image.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model: str, + image: torch.Tensor, + prompt: str = "", + subject_detection: str = "All", + face_enhancement: bool = True, + face_enhancement_creativity: float = 1.0, + face_enhancement_strength: float = 0.8, + crop_to_fill: bool = False, + output_width: int = 0, + output_height: int = 0, + creativity: int = 3, + face_preservation: bool = True, + color_preservation: bool = True, + ) -> IO.NodeOutput: + if get_number_of_images(image) != 1: + raise ValueError("Only one input image is supported.") + download_url = await upload_images_to_comfyapi(cls, image, max_images=1, mime_type="image/png") + initial_response = await sync_op( + cls, + ApiEndpoint(path="/proxy/topaz/image/v1/enhance-gen/async", method="POST"), + response_model=topaz_api.ImageAsyncTaskResponse, + data=topaz_api.ImageEnhanceRequest( + model=model, + prompt=prompt, + subject_detection=subject_detection, + face_enhancement=face_enhancement, + face_enhancement_creativity=face_enhancement_creativity, + face_enhancement_strength=face_enhancement_strength, + crop_to_fill=crop_to_fill, + output_width=output_width if output_width else None, + output_height=output_height if output_height else None, + creativity=creativity, + face_preservation=str(face_preservation).lower(), + color_preservation=str(color_preservation).lower(), + source_url=download_url[0], + output_format="png", + ), + content_type="multipart/form-data", + ) + + await poll_op( + cls, + poll_endpoint=ApiEndpoint(path=f"/proxy/topaz/image/v1/status/{initial_response.process_id}"), + response_model=topaz_api.ImageStatusResponse, + status_extractor=lambda x: x.status, + progress_extractor=lambda x: getattr(x, "progress", 0), + price_extractor=lambda x: x.credits * 0.08, + poll_interval=8.0, + max_poll_attempts=160, + estimated_duration=60, + ) + + results = await sync_op( + cls, + ApiEndpoint(path=f"/proxy/topaz/image/v1/download/{initial_response.process_id}"), + response_model=topaz_api.ImageDownloadResponse, + monitor_progress=False, + ) + return IO.NodeOutput(await download_url_to_image_tensor(results.download_url)) + + +class TopazVideoEnhance(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="TopazVideoEnhance", + display_name="Topaz Video Enhance", + category="api node/video/Topaz", + description="Breathe new life into video with powerful upscaling and recovery technology.", + inputs=[ + IO.Video.Input("video"), + IO.Boolean.Input("upscaler_enabled", default=True), + IO.Combo.Input("upscaler_model", options=list(UPSCALER_MODELS_MAP.keys())), + IO.Combo.Input("upscaler_resolution", options=list(UPSCALER_VALUES_MAP.keys())), + IO.Combo.Input( + "upscaler_creativity", + options=["low", "middle", "high"], + default="low", + tooltip="Creativity level (applies only to Starlight (Astra) Creative).", + optional=True, + ), + IO.Boolean.Input("interpolation_enabled", default=False, optional=True), + IO.Combo.Input("interpolation_model", options=["apo-8"], default="apo-8", optional=True), + IO.Int.Input( + "interpolation_slowmo", + default=1, + min=1, + max=16, + display_mode=IO.NumberDisplay.number, + tooltip="Slow-motion factor applied to the input video. " + "For example, 2 makes the output twice as slow and doubles the duration.", + optional=True, + ), + IO.Int.Input( + "interpolation_frame_rate", + default=60, + min=15, + max=240, + display_mode=IO.NumberDisplay.number, + tooltip="Output frame rate.", + optional=True, + ), + IO.Boolean.Input( + "interpolation_duplicate", + default=False, + tooltip="Analyze the input for duplicate frames and remove them.", + optional=True, + ), + IO.Float.Input( + "interpolation_duplicate_threshold", + default=0.01, + min=0.001, + max=0.1, + step=0.001, + display_mode=IO.NumberDisplay.number, + tooltip="Detection sensitivity for duplicate frames.", + optional=True, + ), + IO.Combo.Input( + "dynamic_compression_level", + options=["Low", "Mid", "High"], + default="Low", + tooltip="CQP level.", + optional=True, + ), + ], + outputs=[ + IO.Video.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + video: VideoInput, + upscaler_enabled: bool, + upscaler_model: str, + upscaler_resolution: str, + upscaler_creativity: str = "low", + interpolation_enabled: bool = False, + interpolation_model: str = "apo-8", + interpolation_slowmo: int = 1, + interpolation_frame_rate: int = 60, + interpolation_duplicate: bool = False, + interpolation_duplicate_threshold: float = 0.01, + dynamic_compression_level: str = "Low", + ) -> IO.NodeOutput: + if upscaler_enabled is False and interpolation_enabled is False: + raise ValueError("There is nothing to do: both upscaling and interpolation are disabled.") + src_width, src_height = video.get_dimensions() + video_components = video.get_components() + src_frame_rate = int(video_components.frame_rate) + duration_sec = video.get_duration() + estimated_frames = int(duration_sec * src_frame_rate) + validate_container_format_is_mp4(video) + src_video_stream = video.get_stream_source() + target_width = src_width + target_height = src_height + target_frame_rate = src_frame_rate + filters = [] + if upscaler_enabled: + target_width = UPSCALER_VALUES_MAP[upscaler_resolution] + target_height = UPSCALER_VALUES_MAP[upscaler_resolution] + filters.append( + topaz_api.VideoEnhancementFilter( + model=UPSCALER_MODELS_MAP[upscaler_model], + creativity=(upscaler_creativity if UPSCALER_MODELS_MAP[upscaler_model] == "slc-1" else None), + isOptimizedMode=(True if UPSCALER_MODELS_MAP[upscaler_model] == "slc-1" else None), + ), + ) + if interpolation_enabled: + target_frame_rate = interpolation_frame_rate + filters.append( + topaz_api.VideoFrameInterpolationFilter( + model=interpolation_model, + slowmo=interpolation_slowmo, + fps=interpolation_frame_rate, + duplicate=interpolation_duplicate, + duplicate_threshold=interpolation_duplicate_threshold, + ), + ) + initial_res = await sync_op( + cls, + ApiEndpoint(path="/proxy/topaz/video/", method="POST"), + response_model=topaz_api.CreateVideoResponse, + data=topaz_api.CreateVideoRequest( + source=topaz_api.CreateCreateVideoRequestSource( + container="mp4", + size=get_fs_object_size(src_video_stream), + duration=int(duration_sec), + frameCount=estimated_frames, + frameRate=src_frame_rate, + resolution=topaz_api.Resolution(width=src_width, height=src_height), + ), + filters=filters, + output=topaz_api.OutputInformationVideo( + resolution=topaz_api.Resolution(width=target_width, height=target_height), + frameRate=target_frame_rate, + audioCodec="AAC", + audioTransfer="Copy", + dynamicCompressionLevel=dynamic_compression_level, + ), + ), + wait_label="Creating task", + final_label_on_success="Task created", + ) + upload_res = await sync_op( + cls, + ApiEndpoint( + path=f"/proxy/topaz/video/{initial_res.requestId}/accept", + method="PATCH", + ), + response_model=topaz_api.VideoAcceptResponse, + wait_label="Preparing upload", + final_label_on_success="Upload started", + ) + if len(upload_res.urls) > 1: + raise NotImplementedError( + "Large files are not currently supported. Please open an issue in the ComfyUI repository." + ) + async with aiohttp.ClientSession(headers={"Content-Type": "video/mp4"}) as session: + if isinstance(src_video_stream, BytesIO): + src_video_stream.seek(0) + async with session.put(upload_res.urls[0], data=src_video_stream, raise_for_status=True) as res: + upload_etag = res.headers["Etag"] + else: + with builtins.open(src_video_stream, "rb") as video_file: + async with session.put(upload_res.urls[0], data=video_file, raise_for_status=True) as res: + upload_etag = res.headers["Etag"] + await sync_op( + cls, + ApiEndpoint( + path=f"/proxy/topaz/video/{initial_res.requestId}/complete-upload", + method="PATCH", + ), + response_model=topaz_api.VideoCompleteUploadResponse, + data=topaz_api.VideoCompleteUploadRequest( + uploadResults=[ + topaz_api.VideoCompleteUploadRequestPart( + partNum=1, + eTag=upload_etag, + ), + ], + ), + wait_label="Finalizing upload", + final_label_on_success="Upload completed", + ) + final_response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/topaz/video/{initial_res.requestId}/status"), + response_model=topaz_api.VideoStatusResponse, + status_extractor=lambda x: x.status, + progress_extractor=lambda x: getattr(x, "progress", 0), + price_extractor=lambda x: (x.estimates.cost[0] * 0.08 if x.estimates and x.estimates.cost[0] else None), + poll_interval=10.0, + max_poll_attempts=320, + ) + return IO.NodeOutput(await download_url_to_video_output(final_response.download.url)) + + +class TopazExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + TopazImageEnhance, + TopazVideoEnhance, + ] + + +async def comfy_entrypoint() -> TopazExtension: + return TopazExtension() diff --git a/comfy_api_nodes/util/client.py b/comfy_api_nodes/util/client.py index 2d5dcd648..ad6e3c0d0 100644 --- a/comfy_api_nodes/util/client.py +++ b/comfy_api_nodes/util/client.py @@ -77,9 +77,9 @@ class _PollUIState: _RETRY_STATUS = {408, 429, 500, 502, 503, 504} -COMPLETED_STATUSES = ["succeeded", "succeed", "success", "completed", "finished", "done"] -FAILED_STATUSES = ["cancelled", "canceled", "fail", "failed", "error"] -QUEUED_STATUSES = ["created", "queued", "queueing", "submitted"] +COMPLETED_STATUSES = ["succeeded", "succeed", "success", "completed", "finished", "done", "complete"] +FAILED_STATUSES = ["cancelled", "canceled", "canceling", "fail", "failed", "error"] +QUEUED_STATUSES = ["created", "queued", "queueing", "submitted", "initializing"] async def sync_op( @@ -424,7 +424,8 @@ def _display_text( if status: display_lines.append(f"Status: {status.capitalize() if isinstance(status, str) else status}") if price is not None: - display_lines.append(f"Price: ${float(price):,.4f}") + p = f"{float(price):,.4f}".rstrip("0").rstrip(".") + display_lines.append(f"Price: ${p}") if text is not None: display_lines.append(text) if display_lines: diff --git a/nodes.py b/nodes.py index f6aeedc78..ac14e39a7 100644 --- a/nodes.py +++ b/nodes.py @@ -2359,6 +2359,7 @@ async def init_builtin_api_nodes(): "nodes_pika.py", "nodes_runway.py", "nodes_sora.py", + "nodes_topaz.py", "nodes_tripo.py", "nodes_moonvalley.py", "nodes_rodin.py", From cb96d4d18c78ee09d5fd70954ffcb4ad2c7f0d7a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 19 Nov 2025 20:56:23 -0800 Subject: [PATCH 0892/1073] Disable workaround on newer cudnn. (#10807) --- comfy/ops.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy/ops.py b/comfy/ops.py index 2a90a5ba2..640622fd1 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -58,7 +58,8 @@ except (ModuleNotFoundError, TypeError): NVIDIA_MEMORY_CONV_BUG_WORKAROUND = False try: if comfy.model_management.is_nvidia(): - if torch.backends.cudnn.version() >= 91002 and comfy.model_management.torch_version_numeric >= (2, 9) and comfy.model_management.torch_version_numeric <= (2, 10): + cudnn_version = torch.backends.cudnn.version() + if (cudnn_version >= 91002 and cudnn_version < 91500) and comfy.model_management.torch_version_numeric >= (2, 9) and comfy.model_management.torch_version_numeric <= (2, 10): #TODO: change upper bound version once it's fixed' NVIDIA_MEMORY_CONV_BUG_WORKAROUND = True logging.info("working around nvidia conv3d memory bug.") From 87b0359392219841c2214e1eb06678840cae470e Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Wed, 19 Nov 2025 22:36:56 -0800 Subject: [PATCH 0893/1073] Update server templates handler to use new multi-package distribution (comfyui-workflow-templates versions >=0.3) (#10791) * update templates for monorepo * refactor --- app/frontend_management.py | 67 ++++++++++++++++++++++++++++++++++++-- requirements.txt | 2 +- server.py | 32 ++++++++++++++---- 3 files changed, 92 insertions(+), 9 deletions(-) diff --git a/app/frontend_management.py b/app/frontend_management.py index cce0c117d..bdaa85812 100644 --- a/app/frontend_management.py +++ b/app/frontend_management.py @@ -10,7 +10,8 @@ import importlib from dataclasses import dataclass from functools import cached_property from pathlib import Path -from typing import TypedDict, Optional +from typing import Dict, TypedDict, Optional +from aiohttp import web from importlib.metadata import version import requests @@ -257,7 +258,54 @@ comfyui-frontend-package is not installed. sys.exit(-1) @classmethod - def templates_path(cls) -> str: + def template_asset_map(cls) -> Optional[Dict[str, str]]: + """Return a mapping of template asset names to their absolute paths.""" + try: + from comfyui_workflow_templates import ( + get_asset_path, + iter_templates, + ) + except ImportError: + logging.error( + f""" +********** ERROR *********** + +comfyui-workflow-templates is not installed. + +{frontend_install_warning_message()} + +********** ERROR *********** +""".strip() + ) + return None + + try: + template_entries = list(iter_templates()) + except Exception as exc: + logging.error(f"Failed to enumerate workflow templates: {exc}") + return None + + asset_map: Dict[str, str] = {} + try: + for entry in template_entries: + for asset in entry.assets: + asset_map[asset.filename] = get_asset_path( + entry.template_id, asset.filename + ) + except Exception as exc: + logging.error(f"Failed to resolve template asset paths: {exc}") + return None + + if not asset_map: + logging.error("No workflow template assets found. Did the packages install correctly?") + return None + + return asset_map + + + @classmethod + def legacy_templates_path(cls) -> Optional[str]: + """Return the legacy templates directory shipped inside the meta package.""" try: import comfyui_workflow_templates @@ -276,6 +324,7 @@ comfyui-workflow-templates is not installed. ********** ERROR *********** """.strip() ) + return None @classmethod def embedded_docs_path(cls) -> str: @@ -392,3 +441,17 @@ comfyui-workflow-templates is not installed. logging.info("Falling back to the default frontend.") check_frontend_version() return cls.default_frontend_path() + @classmethod + def template_asset_handler(cls): + assets = cls.template_asset_map() + if not assets: + return None + + async def serve_template(request: web.Request) -> web.StreamResponse: + rel_path = request.match_info.get("path", "") + target = assets.get(rel_path) + if target is None: + raise web.HTTPNotFound() + return web.FileResponse(target) + + return serve_template diff --git a/requirements.txt b/requirements.txt index 249c36dee..36c39f338 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.28.8 -comfyui-workflow-templates==0.2.11 +comfyui-workflow-templates==0.3.1 comfyui-embedded-docs==0.3.1 torch torchsde diff --git a/server.py b/server.py index d059d3dc9..d9d5c491f 100644 --- a/server.py +++ b/server.py @@ -30,7 +30,7 @@ import comfy.model_management from comfy_api import feature_flags import node_helpers from comfyui_version import __version__ -from app.frontend_management import FrontendManager +from app.frontend_management import FrontendManager, parse_version from comfy_api.internal import _ComfyNodeInternal from app.user_manager import UserManager @@ -849,11 +849,31 @@ class PromptServer(): for name, dir in nodes.EXTENSION_WEB_DIRS.items(): self.app.add_routes([web.static('/extensions/' + name, dir)]) - workflow_templates_path = FrontendManager.templates_path() - if workflow_templates_path: - self.app.add_routes([ - web.static('/templates', workflow_templates_path) - ]) + installed_templates_version = FrontendManager.get_installed_templates_version() + use_legacy_templates = True + if installed_templates_version: + try: + use_legacy_templates = ( + parse_version(installed_templates_version) + < parse_version("0.3.0") + ) + except Exception as exc: + logging.warning( + "Unable to parse templates version '%s': %s", + installed_templates_version, + exc, + ) + + if use_legacy_templates: + workflow_templates_path = FrontendManager.legacy_templates_path() + if workflow_templates_path: + self.app.add_routes([ + web.static('/templates', workflow_templates_path) + ]) + else: + handler = FrontendManager.template_asset_handler() + if handler: + self.app.router.add_get("/templates/{path:.*}", handler) # Serve embedded documentation from the package embedded_docs_path = FrontendManager.embedded_docs_path() From f5e66d5e47271253edad5c4eddd817b0d6a23340 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 20 Nov 2025 12:08:03 -0800 Subject: [PATCH 0894/1073] Fix ImageBatch with different channel count. (#10815) --- nodes.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nodes.py b/nodes.py index ac14e39a7..75e820e66 100644 --- a/nodes.py +++ b/nodes.py @@ -1852,6 +1852,10 @@ class ImageBatch: CATEGORY = "image" def batch(self, image1, image2): + if image1.shape[-1] != image2.shape[-1]: + channels = min(image1.shape[-1], image2.shape[-1]) + image1 = image1[..., :channels] + image2 = image2[..., :channels] if image1.shape[1:] != image2.shape[1:]: image2 = comfy.utils.common_upscale(image2.movedim(-1,1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1,-1) s = torch.cat((image1, image2), dim=0) From 9e00ce5b76ec04be37375310512a443605b95077 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Thu, 20 Nov 2025 14:42:46 -0800 Subject: [PATCH 0895/1073] Make Batch Images node add alpha channel when one of the inputs has it (#10816) * When one Batch Image input has alpha and one does not, add empty alpha channel * Use torch.nn.functional.pad --- nodes.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nodes.py b/nodes.py index 75e820e66..030371633 100644 --- a/nodes.py +++ b/nodes.py @@ -1853,9 +1853,10 @@ class ImageBatch: def batch(self, image1, image2): if image1.shape[-1] != image2.shape[-1]: - channels = min(image1.shape[-1], image2.shape[-1]) - image1 = image1[..., :channels] - image2 = image2[..., :channels] + if image1.shape[-1] > image2.shape[-1]: + image2 = torch.nn.functional.pad(image2, (0,1), mode='constant', value=1.0) + else: + image1 = torch.nn.functional.pad(image1, (0,1), mode='constant', value=1.0) if image1.shape[1:] != image2.shape[1:]: image2 = comfy.utils.common_upscale(image2.movedim(-1,1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1,-1) s = torch.cat((image1, image2), dim=0) From 7b8389578e88dcd13b1cf6aea5404047298c9183 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 21 Nov 2025 02:17:47 +0200 Subject: [PATCH 0896/1073] feat(api-nodes): add Nano Banana Pro (#10814) * feat(api-nodes): add Nano Banana Pro * frontend bump to 1.28.9 --- comfy_api_nodes/apis/gemini_api.py | 5 +- comfy_api_nodes/nodes_gemini.py | 205 ++++++++++++++++++++++++++++- comfy_api_nodes/util/client.py | 13 +- requirements.txt | 2 +- 4 files changed, 215 insertions(+), 10 deletions(-) diff --git a/comfy_api_nodes/apis/gemini_api.py b/comfy_api_nodes/apis/gemini_api.py index f63e02693..710f173f1 100644 --- a/comfy_api_nodes/apis/gemini_api.py +++ b/comfy_api_nodes/apis/gemini_api.py @@ -68,7 +68,7 @@ class GeminiTextPart(BaseModel): class GeminiContent(BaseModel): - parts: list[GeminiPart] = Field(...) + parts: list[GeminiPart] = Field([]) role: GeminiRole = Field(..., examples=["user"]) @@ -120,7 +120,7 @@ class GeminiGenerationConfig(BaseModel): class GeminiImageConfig(BaseModel): aspectRatio: str | None = Field(None) - resolution: str | None = Field(None) + imageSize: str | None = Field(None) class GeminiImageGenerationConfig(GeminiGenerationConfig): @@ -227,3 +227,4 @@ class GeminiGenerateContentResponse(BaseModel): candidates: list[GeminiCandidate] | None = Field(None) promptFeedback: GeminiPromptFeedback | None = Field(None) usageMetadata: GeminiUsageMetadata | None = Field(None) + modelVersion: str | None = Field(None) diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 6e746eebd..be752c885 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -29,11 +29,13 @@ from comfy_api_nodes.apis.gemini_api import ( GeminiMimeType, GeminiPart, GeminiRole, + Modality, ) from comfy_api_nodes.util import ( ApiEndpoint, audio_to_base64_string, bytesio_to_image_tensor, + get_number_of_images, sync_op, tensor_to_base64_string, validate_string, @@ -147,6 +149,49 @@ def get_image_from_response(response: GeminiGenerateContentResponse) -> torch.Te return torch.cat(image_tensors, dim=0) +def calculate_tokens_price(response: GeminiGenerateContentResponse) -> float | None: + if not response.modelVersion: + return None + # Define prices (Cost per 1,000,000 tokens), see https://cloud.google.com/vertex-ai/generative-ai/pricing + if response.modelVersion in ("gemini-2.5-pro-preview-05-06", "gemini-2.5-pro"): + input_tokens_price = 1.25 + output_text_tokens_price = 10.0 + output_image_tokens_price = 0.0 + elif response.modelVersion in ( + "gemini-2.5-flash-preview-04-17", + "gemini-2.5-flash", + ): + input_tokens_price = 0.30 + output_text_tokens_price = 2.50 + output_image_tokens_price = 0.0 + elif response.modelVersion in ( + "gemini-2.5-flash-image-preview", + "gemini-2.5-flash-image", + ): + input_tokens_price = 0.30 + output_text_tokens_price = 2.50 + output_image_tokens_price = 30.0 + elif response.modelVersion == "gemini-3-pro-preview": + input_tokens_price = 2 + output_text_tokens_price = 12.0 + output_image_tokens_price = 0.0 + elif response.modelVersion == "gemini-3-pro-image-preview": + input_tokens_price = 2 + output_text_tokens_price = 12.0 + output_image_tokens_price = 120.0 + else: + return None + final_price = response.usageMetadata.promptTokenCount * input_tokens_price + for i in response.usageMetadata.candidatesTokensDetails: + if i.modality == Modality.IMAGE: + final_price += output_image_tokens_price * i.tokenCount # for Nano Banana models + else: + final_price += output_text_tokens_price * i.tokenCount + if response.usageMetadata.thoughtsTokenCount: + final_price += output_text_tokens_price * response.usageMetadata.thoughtsTokenCount + return final_price / 1_000_000.0 + + class GeminiNode(IO.ComfyNode): """ Node to generate text responses from a Gemini model. @@ -314,6 +359,7 @@ class GeminiNode(IO.ComfyNode): ] ), response_model=GeminiGenerateContentResponse, + price_extractor=calculate_tokens_price, ) output_text = get_text_from_response(response) @@ -476,6 +522,13 @@ class GeminiImage(IO.ComfyNode): "or otherwise generates 1:1 squares.", optional=True, ), + IO.Combo.Input( + "response_modalities", + options=["IMAGE+TEXT", "IMAGE"], + tooltip="Choose 'IMAGE' for image-only output, or " + "'IMAGE+TEXT' to return both the generated image and a text response.", + optional=True, + ), ], outputs=[ IO.Image.Output(), @@ -498,6 +551,7 @@ class GeminiImage(IO.ComfyNode): images: torch.Tensor | None = None, files: list[GeminiPart] | None = None, aspect_ratio: str = "auto", + response_modalities: str = "IMAGE+TEXT", ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) parts: list[GeminiPart] = [GeminiPart(text=prompt)] @@ -520,17 +574,16 @@ class GeminiImage(IO.ComfyNode): GeminiContent(role=GeminiRole.user, parts=parts), ], generationConfig=GeminiImageGenerationConfig( - responseModalities=["TEXT", "IMAGE"], + responseModalities=(["IMAGE"] if response_modalities == "IMAGE" else ["TEXT", "IMAGE"]), imageConfig=None if aspect_ratio == "auto" else image_config, ), ), response_model=GeminiGenerateContentResponse, + price_extractor=calculate_tokens_price, ) - output_image = get_image_from_response(response) output_text = get_text_from_response(response) if output_text: - # Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button. render_spec = { "node_id": cls.hidden.unique_id, "component": "ChatHistoryWidget", @@ -551,9 +604,150 @@ class GeminiImage(IO.ComfyNode): "display_component", render_spec, ) + return IO.NodeOutput(get_image_from_response(response), output_text) - output_text = output_text or "Empty response from Gemini model..." - return IO.NodeOutput(output_image, output_text) + +class GeminiImage2(IO.ComfyNode): + + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="GeminiImage2Node", + display_name="Nano Banana Pro (Google Gemini Image)", + category="api node/image/Gemini", + description="Generate or edit images synchronously via Google Vertex API.", + inputs=[ + IO.String.Input( + "prompt", + multiline=True, + tooltip="Text prompt describing the image to generate or the edits to apply. " + "Include any constraints, styles, or details the model should follow.", + default="", + ), + IO.Combo.Input( + "model", + options=["gemini-3-pro-image-preview"], + ), + IO.Int.Input( + "seed", + default=42, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="When the seed is fixed to a specific value, the model makes a best effort to provide " + "the same response for repeated requests. Deterministic output isn't guaranteed. " + "Also, changing the model or parameter settings, such as the temperature, " + "can cause variations in the response even when you use the same seed value. " + "By default, a random seed value is used.", + ), + IO.Combo.Input( + "aspect_ratio", + options=["auto", "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"], + default="auto", + tooltip="If set to 'auto', matches your input image's aspect ratio; " + "if no image is provided, generates a 1:1 square.", + ), + IO.Combo.Input( + "resolution", + options=["1K", "2K", "4K"], + tooltip="Target output resolution. For 2K/4K the native Gemini upscaler is used.", + ), + IO.Combo.Input( + "response_modalities", + options=["IMAGE+TEXT", "IMAGE"], + tooltip="Choose 'IMAGE' for image-only output, or " + "'IMAGE+TEXT' to return both the generated image and a text response.", + ), + IO.Image.Input( + "images", + optional=True, + tooltip="Optional reference image(s). " + "To include multiple images, use the Batch Images node (up to 14).", + ), + IO.Custom("GEMINI_INPUT_FILES").Input( + "files", + optional=True, + tooltip="Optional file(s) to use as context for the model. " + "Accepts inputs from the Gemini Generate Content Input Files node.", + ), + ], + outputs=[ + IO.Image.Output(), + IO.String.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + prompt: str, + model: str, + seed: int, + aspect_ratio: str, + resolution: str, + response_modalities: str, + images: torch.Tensor | None = None, + files: list[GeminiPart] | None = None, + ) -> IO.NodeOutput: + validate_string(prompt, strip_whitespace=True, min_length=1) + + parts: list[GeminiPart] = [GeminiPart(text=prompt)] + if images is not None: + if get_number_of_images(images) > 14: + raise ValueError("The current maximum number of supported images is 14.") + parts.extend(create_image_parts(images)) + if files is not None: + parts.extend(files) + + image_config = GeminiImageConfig(imageSize=resolution) + if aspect_ratio != "auto": + image_config.aspectRatio = aspect_ratio + + response = await sync_op( + cls, + ApiEndpoint(path=f"{GEMINI_BASE_ENDPOINT}/{model}", method="POST"), + data=GeminiImageGenerateContentRequest( + contents=[ + GeminiContent(role=GeminiRole.user, parts=parts), + ], + generationConfig=GeminiImageGenerationConfig( + responseModalities=(["IMAGE"] if response_modalities == "IMAGE" else ["TEXT", "IMAGE"]), + imageConfig=image_config, + ), + ), + response_model=GeminiGenerateContentResponse, + price_extractor=calculate_tokens_price, + ) + + output_text = get_text_from_response(response) + if output_text: + render_spec = { + "node_id": cls.hidden.unique_id, + "component": "ChatHistoryWidget", + "props": { + "history": json.dumps( + [ + { + "prompt": prompt, + "response": output_text, + "response_id": str(uuid.uuid4()), + "timestamp": time.time(), + } + ] + ), + }, + } + PromptServer.instance.send_sync( + "display_component", + render_spec, + ) + return IO.NodeOutput(get_image_from_response(response), output_text) class GeminiExtension(ComfyExtension): @@ -562,6 +756,7 @@ class GeminiExtension(ComfyExtension): return [ GeminiNode, GeminiImage, + GeminiImage2, GeminiInputFiles, ] diff --git a/comfy_api_nodes/util/client.py b/comfy_api_nodes/util/client.py index ad6e3c0d0..bf01d7d36 100644 --- a/comfy_api_nodes/util/client.py +++ b/comfy_api_nodes/util/client.py @@ -63,6 +63,7 @@ class _RequestConfig: estimated_total: Optional[int] = None final_label_on_success: Optional[str] = "Completed" progress_origin_ts: Optional[float] = None + price_extractor: Optional[Callable[[dict[str, Any]], Optional[float]]] = None @dataclass @@ -87,6 +88,7 @@ async def sync_op( endpoint: ApiEndpoint, *, response_model: Type[M], + price_extractor: Optional[Callable[[M], Optional[float]]] = None, data: Optional[BaseModel] = None, files: Optional[Union[dict[str, Any], list[tuple[str, Any]]]] = None, content_type: str = "application/json", @@ -104,6 +106,7 @@ async def sync_op( raw = await sync_op_raw( cls, endpoint, + price_extractor=_wrap_model_extractor(response_model, price_extractor), data=data, files=files, content_type=content_type, @@ -175,6 +178,7 @@ async def sync_op_raw( cls: type[IO.ComfyNode], endpoint: ApiEndpoint, *, + price_extractor: Optional[Callable[[dict[str, Any]], Optional[float]]] = None, data: Optional[Union[dict[str, Any], BaseModel]] = None, files: Optional[Union[dict[str, Any], list[tuple[str, Any]]]] = None, content_type: str = "application/json", @@ -216,6 +220,7 @@ async def sync_op_raw( estimated_total=estimated_duration, final_label_on_success=final_label_on_success, progress_origin_ts=progress_origin_ts, + price_extractor=price_extractor, ) return await _request_base(cfg, expect_binary=as_binary) @@ -425,7 +430,8 @@ def _display_text( display_lines.append(f"Status: {status.capitalize() if isinstance(status, str) else status}") if price is not None: p = f"{float(price):,.4f}".rstrip("0").rstrip(".") - display_lines.append(f"Price: ${p}") + if p != "0": + display_lines.append(f"Price: ${p}") if text is not None: display_lines.append(text) if display_lines: @@ -581,6 +587,7 @@ async def _request_base(cfg: _RequestConfig, expect_binary: bool): delay = cfg.retry_delay operation_succeeded: bool = False final_elapsed_seconds: Optional[int] = None + extracted_price: Optional[float] = None while True: attempt += 1 stop_event = asyncio.Event() @@ -768,6 +775,8 @@ async def _request_base(cfg: _RequestConfig, expect_binary: bool): except json.JSONDecodeError: payload = {"_raw": text} response_content_to_log = payload if isinstance(payload, dict) else text + with contextlib.suppress(Exception): + extracted_price = cfg.price_extractor(payload) if cfg.price_extractor else None operation_succeeded = True final_elapsed_seconds = int(time.monotonic() - start_time) try: @@ -872,7 +881,7 @@ async def _request_base(cfg: _RequestConfig, expect_binary: bool): else int(time.monotonic() - start_time) ), estimated_total=cfg.estimated_total, - price=None, + price=extracted_price, is_queued=False, processing_elapsed_seconds=final_elapsed_seconds, ) diff --git a/requirements.txt b/requirements.txt index 36c39f338..8c1946f3d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.28.8 +comfyui-frontend-package==1.28.9 comfyui-workflow-templates==0.3.1 comfyui-embedded-docs==0.3.1 torch From b75d349f25ccb702895c6f1b8af7aded63a7f7e2 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 21 Nov 2025 02:33:54 +0200 Subject: [PATCH 0897/1073] fix(KlingLipSyncAudioToVideoNode): convert audio to mp3 format (#10811) --- comfy_api_nodes/nodes_kling.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 7b23e9cf9..36852038b 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -518,7 +518,9 @@ async def execute_lipsync( # Upload the audio file to Comfy API and get download URL if audio: - audio_url = await upload_audio_to_comfyapi(cls, audio) + audio_url = await upload_audio_to_comfyapi( + cls, audio, container_format="mp3", codec_name="libmp3lame", mime_type="audio/mpeg", filename="output.mp3" + ) logging.info("Uploaded audio to Comfy API. URL: %s", audio_url) else: audio_url = None From 10e90a5757906ecdb71b84d41173813d7f62c140 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Thu, 20 Nov 2025 18:20:52 -0800 Subject: [PATCH 0898/1073] bump comfyui-workflow-templates for nano banana 2 (#10818) * bump templates * bump templates --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8c1946f3d..624aa7362 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.28.9 -comfyui-workflow-templates==0.3.1 +comfyui-workflow-templates==0.6.0 comfyui-embedded-docs==0.3.1 torch torchsde From 943b3b615d40542ea19bc8ff8ad2950c0a094605 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 20 Nov 2025 19:44:43 -0800 Subject: [PATCH 0899/1073] HunyuanVideo 1.5 (#10819) * init * update * Update model.py * Update model.py * remove print * Fix text encoding * Prevent empty negative prompt Really doesn't work otherwise * fp16 works * I2V * Update model_base.py * Update nodes_hunyuan.py * Better latent rgb factors * Use the correct sigclip output... * Support HunyuanVideo1.5 SR model * whitespaces... * Proper latent channel count * SR model fixes This also still needs timesteps scheduling based on the noise scale, can be used with two samplers too already * vae_refiner: roll the convolution through temporal Work in progress. Roll the convolution through time using 2-latent-frame chunks and a FIFO queue for the convolution seams. * Support HunyuanVideo15 latent resampler * fix * Some cleanup Co-Authored-By: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> * Proper hyvid15 I2V channels Co-Authored-By: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> * Fix TokenRefiner for fp16 Otherwise x.sum has infs, just in case only casting if input is fp16, I don't know if necessary. * Bugfix for the HunyuanVideo15 SR model * vae_refiner: roll the convolution through temporal II Roll the convolution through time using 2-latent-frame chunks and a FIFO queue for the convolution seams. Added support for encoder, lowered to 1 latent frame to save more VRAM, made work for Hunyuan Image 3.0 (as code shared). Fixed names, cleaned up code. * Allow any number of input frames in VAE. * Better VAE encode mem estimation. * Lowvram fix. * Fix hunyuan image 2.1 refiner. * Fix mistake. * Name changes. * Rename. * Whitespace. * Fix. * Fix. --------- Co-authored-by: kijai <40791699+kijai@users.noreply.github.com> Co-authored-by: Rattus --- comfy/latent_formats.py | 60 ++++ comfy/ldm/hunyuan_video/model.py | 54 +++- comfy/ldm/hunyuan_video/upsampler.py | 120 ++++++++ comfy/ldm/hunyuan_video/vae_refiner.py | 284 +++++++++++------- comfy/model_base.py | 91 ++++++ comfy/model_detection.py | 10 + comfy/sd.py | 12 +- comfy/supported_models.py | 50 ++- comfy/text_encoders/hunyuan_video.py | 9 + comfy/text_encoders/qwen_image.py | 4 +- comfy_api/latest/_io.py | 4 + comfy_extras/nodes_hunyuan.py | 201 ++++++++++++- folder_paths.py | 2 + .../put_latent_upscale_models_here | 0 nodes.py | 2 +- 15 files changed, 777 insertions(+), 126 deletions(-) create mode 100644 comfy/ldm/hunyuan_video/upsampler.py create mode 100644 models/latent_upscale_models/put_latent_upscale_models_here diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index 77e642a94..204fc048d 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -611,6 +611,66 @@ class HunyuanImage21Refiner(LatentFormat): latent_dimensions = 3 scale_factor = 1.03682 + def process_in(self, latent): + out = latent * self.scale_factor + out = torch.cat((out[:, :, :1], out), dim=2) + out = out.permute(0, 2, 1, 3, 4) + b, f_times_2, c, h, w = out.shape + out = out.reshape(b, f_times_2 // 2, 2 * c, h, w) + out = out.permute(0, 2, 1, 3, 4).contiguous() + return out + + def process_out(self, latent): + z = latent / self.scale_factor + z = z.permute(0, 2, 1, 3, 4) + b, f, c, h, w = z.shape + z = z.reshape(b, f, 2, c // 2, h, w) + z = z.permute(0, 1, 2, 3, 4, 5).reshape(b, f * 2, c // 2, h, w) + z = z.permute(0, 2, 1, 3, 4) + z = z[:, :, 1:] + return z + +class HunyuanVideo15(LatentFormat): + latent_rgb_factors = [ + [ 0.0568, -0.0521, -0.0131], + [ 0.0014, 0.0735, 0.0326], + [ 0.0186, 0.0531, -0.0138], + [-0.0031, 0.0051, 0.0288], + [ 0.0110, 0.0556, 0.0432], + [-0.0041, -0.0023, -0.0485], + [ 0.0530, 0.0413, 0.0253], + [ 0.0283, 0.0251, 0.0339], + [ 0.0277, -0.0372, -0.0093], + [ 0.0393, 0.0944, 0.1131], + [ 0.0020, 0.0251, 0.0037], + [-0.0017, 0.0012, 0.0234], + [ 0.0468, 0.0436, 0.0203], + [ 0.0354, 0.0439, -0.0233], + [ 0.0090, 0.0123, 0.0346], + [ 0.0382, 0.0029, 0.0217], + [ 0.0261, -0.0300, 0.0030], + [-0.0088, -0.0220, -0.0283], + [-0.0272, -0.0121, -0.0363], + [-0.0664, -0.0622, 0.0144], + [ 0.0414, 0.0479, 0.0529], + [ 0.0355, 0.0612, -0.0247], + [ 0.0147, 0.0264, 0.0174], + [ 0.0438, 0.0038, 0.0542], + [ 0.0431, -0.0573, -0.0033], + [-0.0162, -0.0211, -0.0406], + [-0.0487, -0.0295, -0.0393], + [ 0.0005, -0.0109, 0.0253], + [ 0.0296, 0.0591, 0.0353], + [ 0.0119, 0.0181, -0.0306], + [-0.0085, -0.0362, 0.0229], + [ 0.0005, -0.0106, 0.0242] + ] + + latent_rgb_factors_bias = [ 0.0456, -0.0202, -0.0644] + latent_channels = 32 + latent_dimensions = 3 + scale_factor = 1.03682 + class Hunyuan3Dv2(LatentFormat): latent_channels = 64 latent_dimensions = 1 diff --git a/comfy/ldm/hunyuan_video/model.py b/comfy/ldm/hunyuan_video/model.py index 5132e6c07..f75c6e0e1 100644 --- a/comfy/ldm/hunyuan_video/model.py +++ b/comfy/ldm/hunyuan_video/model.py @@ -6,7 +6,6 @@ import comfy.ldm.flux.layers import comfy.ldm.modules.diffusionmodules.mmdit from comfy.ldm.modules.attention import optimized_attention - from dataclasses import dataclass from einops import repeat @@ -42,6 +41,8 @@ class HunyuanVideoParams: guidance_embed: bool byt5: bool meanflow: bool + use_cond_type_embedding: bool + vision_in_dim: int class SelfAttentionRef(nn.Module): @@ -157,7 +158,10 @@ class TokenRefiner(nn.Module): t = self.t_embedder(timestep_embedding(timesteps, 256, time_factor=1.0).to(x.dtype)) # m = mask.float().unsqueeze(-1) # c = (x.float() * m).sum(dim=1) / m.sum(dim=1) #TODO: the following works when the x.shape is the same length as the tokens but might break otherwise - c = x.sum(dim=1) / x.shape[1] + if x.dtype == torch.float16: + c = x.float().sum(dim=1) / x.shape[1] + else: + c = x.sum(dim=1) / x.shape[1] c = t + self.c_embedder(c.to(x.dtype)) x = self.input_embedder(x) @@ -196,11 +200,15 @@ class HunyuanVideo(nn.Module): def __init__(self, image_model=None, final_layer=True, dtype=None, device=None, operations=None, **kwargs): super().__init__() self.dtype = dtype + operation_settings = {"operations": operations, "device": device, "dtype": dtype} + params = HunyuanVideoParams(**kwargs) self.params = params self.patch_size = params.patch_size self.in_channels = params.in_channels self.out_channels = params.out_channels + self.use_cond_type_embedding = params.use_cond_type_embedding + self.vision_in_dim = params.vision_in_dim if params.hidden_size % params.num_heads != 0: raise ValueError( f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}" @@ -266,6 +274,18 @@ class HunyuanVideo(nn.Module): if final_layer: self.final_layer = LastLayer(self.hidden_size, self.patch_size[-1], self.out_channels, dtype=dtype, device=device, operations=operations) + # HunyuanVideo 1.5 specific modules + if self.vision_in_dim is not None: + from comfy.ldm.wan.model import MLPProj + self.vision_in = MLPProj(in_dim=self.vision_in_dim, out_dim=self.hidden_size, operation_settings=operation_settings) + else: + self.vision_in = None + if self.use_cond_type_embedding: + # 0: text_encoder feature 1: byt5 feature 2: vision_encoder feature + self.cond_type_embedding = nn.Embedding(3, self.hidden_size) + else: + self.cond_type_embedding = None + def forward_orig( self, img: Tensor, @@ -276,6 +296,7 @@ class HunyuanVideo(nn.Module): timesteps: Tensor, y: Tensor = None, txt_byt5=None, + clip_fea=None, guidance: Tensor = None, guiding_frame_index=None, ref_latent=None, @@ -331,12 +352,31 @@ class HunyuanVideo(nn.Module): txt = self.txt_in(txt, timesteps, txt_mask, transformer_options=transformer_options) + if self.cond_type_embedding is not None: + self.cond_type_embedding.to(txt.device) + cond_emb = self.cond_type_embedding(torch.zeros_like(txt[:, :, 0], device=txt.device, dtype=torch.long)) + txt = txt + cond_emb.to(txt.dtype) + if self.byt5_in is not None and txt_byt5 is not None: txt_byt5 = self.byt5_in(txt_byt5) + if self.cond_type_embedding is not None: + cond_emb = self.cond_type_embedding(torch.ones_like(txt_byt5[:, :, 0], device=txt_byt5.device, dtype=torch.long)) + txt_byt5 = txt_byt5 + cond_emb.to(txt_byt5.dtype) + txt = torch.cat((txt_byt5, txt), dim=1) # byt5 first for HunyuanVideo1.5 + else: + txt = torch.cat((txt, txt_byt5), dim=1) txt_byt5_ids = torch.zeros((txt_ids.shape[0], txt_byt5.shape[1], txt_ids.shape[-1]), device=txt_ids.device, dtype=txt_ids.dtype) - txt = torch.cat((txt, txt_byt5), dim=1) txt_ids = torch.cat((txt_ids, txt_byt5_ids), dim=1) + if clip_fea is not None: + txt_vision_states = self.vision_in(clip_fea) + if self.cond_type_embedding is not None: + cond_emb = self.cond_type_embedding(2 * torch.ones_like(txt_vision_states[:, :, 0], dtype=torch.long, device=txt_vision_states.device)) + txt_vision_states = txt_vision_states + cond_emb + txt = torch.cat((txt_vision_states.to(txt.dtype), txt), dim=1) + extra_txt_ids = torch.zeros((txt_ids.shape[0], txt_vision_states.shape[1], txt_ids.shape[-1]), device=txt_ids.device, dtype=txt_ids.dtype) + txt_ids = torch.cat((txt_ids, extra_txt_ids), dim=1) + ids = torch.cat((img_ids, txt_ids), dim=1) pe = self.pe_embedder(ids) @@ -430,14 +470,14 @@ class HunyuanVideo(nn.Module): img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0) return repeat(img_ids, "h w c -> b (h w) c", b=bs) - def forward(self, x, timestep, context, y=None, txt_byt5=None, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, disable_time_r=False, control=None, transformer_options={}, **kwargs): + def forward(self, x, timestep, context, y=None, txt_byt5=None, clip_fea=None, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, disable_time_r=False, control=None, transformer_options={}, **kwargs): return comfy.patcher_extension.WrapperExecutor.new_class_executor( self._forward, self, comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) - ).execute(x, timestep, context, y, txt_byt5, guidance, attention_mask, guiding_frame_index, ref_latent, disable_time_r, control, transformer_options, **kwargs) + ).execute(x, timestep, context, y, txt_byt5, clip_fea, guidance, attention_mask, guiding_frame_index, ref_latent, disable_time_r, control, transformer_options, **kwargs) - def _forward(self, x, timestep, context, y=None, txt_byt5=None, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, disable_time_r=False, control=None, transformer_options={}, **kwargs): + def _forward(self, x, timestep, context, y=None, txt_byt5=None, clip_fea=None, guidance=None, attention_mask=None, guiding_frame_index=None, ref_latent=None, disable_time_r=False, control=None, transformer_options={}, **kwargs): bs = x.shape[0] if len(self.patch_size) == 3: img_ids = self.img_ids(x) @@ -445,5 +485,5 @@ class HunyuanVideo(nn.Module): else: img_ids = self.img_ids_2d(x) txt_ids = torch.zeros((bs, context.shape[1], 2), device=x.device, dtype=x.dtype) - out = self.forward_orig(x, img_ids, context, txt_ids, attention_mask, timestep, y, txt_byt5, guidance, guiding_frame_index, ref_latent, disable_time_r=disable_time_r, control=control, transformer_options=transformer_options) + out = self.forward_orig(x, img_ids, context, txt_ids, attention_mask, timestep, y, txt_byt5, clip_fea, guidance, guiding_frame_index, ref_latent, disable_time_r=disable_time_r, control=control, transformer_options=transformer_options) return out diff --git a/comfy/ldm/hunyuan_video/upsampler.py b/comfy/ldm/hunyuan_video/upsampler.py new file mode 100644 index 000000000..9f5e91a59 --- /dev/null +++ b/comfy/ldm/hunyuan_video/upsampler.py @@ -0,0 +1,120 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from comfy.ldm.hunyuan_video.vae_refiner import RMS_norm, ResnetBlock, VideoConv3d +import model_management, model_patcher + +class SRResidualCausalBlock3D(nn.Module): + def __init__(self, channels: int): + super().__init__() + self.block = nn.Sequential( + VideoConv3d(channels, channels, kernel_size=3), + nn.SiLU(inplace=True), + VideoConv3d(channels, channels, kernel_size=3), + nn.SiLU(inplace=True), + VideoConv3d(channels, channels, kernel_size=3), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return x + self.block(x) + +class SRModel3DV2(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + hidden_channels: int = 64, + num_blocks: int = 6, + global_residual: bool = False, + ): + super().__init__() + self.in_conv = VideoConv3d(in_channels, hidden_channels, kernel_size=3) + self.blocks = nn.ModuleList([SRResidualCausalBlock3D(hidden_channels) for _ in range(num_blocks)]) + self.out_conv = VideoConv3d(hidden_channels, out_channels, kernel_size=3) + self.global_residual = bool(global_residual) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + y = self.in_conv(x) + for blk in self.blocks: + y = blk(y) + y = self.out_conv(y) + if self.global_residual and (y.shape == residual.shape): + y = y + residual + return y + + +class Upsampler(nn.Module): + def __init__( + self, + z_channels: int, + out_channels: int, + block_out_channels: tuple[int, ...], + num_res_blocks: int = 2, + ): + super().__init__() + self.num_res_blocks = num_res_blocks + self.block_out_channels = block_out_channels + self.z_channels = z_channels + + ch = block_out_channels[0] + self.conv_in = VideoConv3d(z_channels, ch, kernel_size=3) + + self.up = nn.ModuleList() + + for i, tgt in enumerate(block_out_channels): + stage = nn.Module() + stage.block = nn.ModuleList([ResnetBlock(in_channels=ch if j == 0 else tgt, + out_channels=tgt, + temb_channels=0, + conv_shortcut=False, + conv_op=VideoConv3d, norm_op=RMS_norm) + for j in range(num_res_blocks + 1)]) + ch = tgt + self.up.append(stage) + + self.norm_out = RMS_norm(ch) + self.conv_out = VideoConv3d(ch, out_channels, kernel_size=3) + + def forward(self, z): + """ + Args: + z: (B, C, T, H, W) + target_shape: (H, W) + """ + # z to block_in + repeats = self.block_out_channels[0] // (self.z_channels) + x = self.conv_in(z) + z.repeat_interleave(repeats=repeats, dim=1) + + # upsampling + for stage in self.up: + for blk in stage.block: + x = blk(x) + + out = self.conv_out(F.silu(self.norm_out(x))) + return out + +UPSAMPLERS = { + "720p": SRModel3DV2, + "1080p": Upsampler, +} + +class HunyuanVideo15SRModel(): + def __init__(self, model_type, config): + self.load_device = model_management.vae_device() + offload_device = model_management.vae_offload_device() + self.dtype = model_management.vae_dtype(self.load_device) + self.model_class = UPSAMPLERS.get(model_type) + self.model = self.model_class(**config).eval() + + self.patcher = model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) + + def load_sd(self, sd): + return self.model.load_state_dict(sd, strict=True) + + def get_sd(self): + return self.model.state_dict() + + def resample_latent(self, latent): + model_management.load_model_gpu(self.patcher) + return self.model(latent.to(self.load_device)) diff --git a/comfy/ldm/hunyuan_video/vae_refiner.py b/comfy/ldm/hunyuan_video/vae_refiner.py index c2a0b507d..9f750dcc4 100644 --- a/comfy/ldm/hunyuan_video/vae_refiner.py +++ b/comfy/ldm/hunyuan_video/vae_refiner.py @@ -4,8 +4,40 @@ import torch.nn.functional as F from comfy.ldm.modules.diffusionmodules.model import ResnetBlock, AttnBlock, VideoConv3d, Normalize import comfy.ops import comfy.ldm.models.autoencoder +import comfy.model_management ops = comfy.ops.disable_weight_init +class NoPadConv3d(nn.Module): + def __init__(self, n_channels, out_channels, kernel_size, stride=1, dilation=1, padding=0, **kwargs): + super().__init__() + self.conv = ops.Conv3d(n_channels, out_channels, kernel_size, stride=stride, dilation=dilation, **kwargs) + + def forward(self, x): + return self.conv(x) + + +def conv_carry_causal_3d(xl, op, conv_carry_in=None, conv_carry_out=None): + + x = xl[0] + xl.clear() + + if conv_carry_out is not None: + to_push = x[:, :, -2:, :, :].clone() + conv_carry_out.append(to_push) + + if isinstance(op, NoPadConv3d): + if conv_carry_in is None: + x = torch.nn.functional.pad(x, (1, 1, 1, 1, 2, 0), mode = 'replicate') + else: + carry_len = conv_carry_in[0].shape[2] + x = torch.cat([conv_carry_in.pop(0), x], dim=2) + x = torch.nn.functional.pad(x, (1, 1, 1, 1, 2 - carry_len, 0), mode = 'replicate') + + out = op(x) + + return out + + class RMS_norm(nn.Module): def __init__(self, dim): super().__init__() @@ -14,7 +46,7 @@ class RMS_norm(nn.Module): self.gamma = nn.Parameter(torch.empty(shape)) def forward(self, x): - return F.normalize(x, dim=1) * self.scale * self.gamma + return F.normalize(x, dim=1) * self.scale * comfy.model_management.cast_to(self.gamma, dtype=x.dtype, device=x.device) class DnSmpl(nn.Module): def __init__(self, ic, oc, tds=True, refiner_vae=True, op=VideoConv3d): @@ -27,11 +59,12 @@ class DnSmpl(nn.Module): self.tds = tds self.gs = fct * ic // oc - def forward(self, x): + def forward(self, x, conv_carry_in=None, conv_carry_out=None): r1 = 2 if self.tds else 1 - h = self.conv(x) + h = conv_carry_causal_3d([x], self.conv, conv_carry_in, conv_carry_out) + + if self.tds and self.refiner_vae and conv_carry_in is None: - if self.tds and self.refiner_vae: hf = h[:, :, :1, :, :] b, c, f, ht, wd = hf.shape hf = hf.reshape(b, c, f, ht // 2, 2, wd // 2, 2) @@ -39,14 +72,7 @@ class DnSmpl(nn.Module): hf = hf.reshape(b, 2 * 2 * c, f, ht // 2, wd // 2) hf = torch.cat([hf, hf], dim=1) - hn = h[:, :, 1:, :, :] - b, c, frms, ht, wd = hn.shape - nf = frms // r1 - hn = hn.reshape(b, c, nf, r1, ht // 2, 2, wd // 2, 2) - hn = hn.permute(0, 3, 5, 7, 1, 2, 4, 6) - hn = hn.reshape(b, r1 * 2 * 2 * c, nf, ht // 2, wd // 2) - - h = torch.cat([hf, hn], dim=2) + h = h[:, :, 1:, :, :] xf = x[:, :, :1, :, :] b, ci, f, ht, wd = xf.shape @@ -54,34 +80,32 @@ class DnSmpl(nn.Module): xf = xf.permute(0, 4, 6, 1, 2, 3, 5) xf = xf.reshape(b, 2 * 2 * ci, f, ht // 2, wd // 2) B, C, T, H, W = xf.shape - xf = xf.view(B, h.shape[1], self.gs // 2, T, H, W).mean(dim=2) + xf = xf.view(B, hf.shape[1], self.gs // 2, T, H, W).mean(dim=2) - xn = x[:, :, 1:, :, :] - b, ci, frms, ht, wd = xn.shape - nf = frms // r1 - xn = xn.reshape(b, ci, nf, r1, ht // 2, 2, wd // 2, 2) - xn = xn.permute(0, 3, 5, 7, 1, 2, 4, 6) - xn = xn.reshape(b, r1 * 2 * 2 * ci, nf, ht // 2, wd // 2) - B, C, T, H, W = xn.shape - xn = xn.view(B, h.shape[1], self.gs, T, H, W).mean(dim=2) - sc = torch.cat([xf, xn], dim=2) - else: - b, c, frms, ht, wd = h.shape + x = x[:, :, 1:, :, :] - nf = frms // r1 - h = h.reshape(b, c, nf, r1, ht // 2, 2, wd // 2, 2) - h = h.permute(0, 3, 5, 7, 1, 2, 4, 6) - h = h.reshape(b, r1 * 2 * 2 * c, nf, ht // 2, wd // 2) + if h.shape[2] == 0: + return hf + xf - b, ci, frms, ht, wd = x.shape - nf = frms // r1 - sc = x.reshape(b, ci, nf, r1, ht // 2, 2, wd // 2, 2) - sc = sc.permute(0, 3, 5, 7, 1, 2, 4, 6) - sc = sc.reshape(b, r1 * 2 * 2 * ci, nf, ht // 2, wd // 2) - B, C, T, H, W = sc.shape - sc = sc.view(B, h.shape[1], self.gs, T, H, W).mean(dim=2) + b, c, frms, ht, wd = h.shape + nf = frms // r1 + h = h.reshape(b, c, nf, r1, ht // 2, 2, wd // 2, 2) + h = h.permute(0, 3, 5, 7, 1, 2, 4, 6) + h = h.reshape(b, r1 * 2 * 2 * c, nf, ht // 2, wd // 2) - return h + sc + b, ci, frms, ht, wd = x.shape + nf = frms // r1 + x = x.reshape(b, ci, nf, r1, ht // 2, 2, wd // 2, 2) + x = x.permute(0, 3, 5, 7, 1, 2, 4, 6) + x = x.reshape(b, r1 * 2 * 2 * ci, nf, ht // 2, wd // 2) + B, C, T, H, W = x.shape + x = x.view(B, h.shape[1], self.gs, T, H, W).mean(dim=2) + + if self.tds and self.refiner_vae and conv_carry_in is None: + h = torch.cat([hf, h], dim=2) + x = torch.cat([xf, x], dim=2) + + return h + x class UpSmpl(nn.Module): @@ -94,11 +118,11 @@ class UpSmpl(nn.Module): self.tus = tus self.rp = fct * oc // ic - def forward(self, x): + def forward(self, x, conv_carry_in=None, conv_carry_out=None): r1 = 2 if self.tus else 1 - h = self.conv(x) + h = conv_carry_causal_3d([x], self.conv, conv_carry_in, conv_carry_out) - if self.tus and self.refiner_vae: + if self.tus and self.refiner_vae and conv_carry_in is None: hf = h[:, :, :1, :, :] b, c, f, ht, wd = hf.shape nc = c // (2 * 2) @@ -107,14 +131,7 @@ class UpSmpl(nn.Module): hf = hf.reshape(b, nc, f, ht * 2, wd * 2) hf = hf[:, : hf.shape[1] // 2] - hn = h[:, :, 1:, :, :] - b, c, frms, ht, wd = hn.shape - nc = c // (r1 * 2 * 2) - hn = hn.reshape(b, r1, 2, 2, nc, frms, ht, wd) - hn = hn.permute(0, 4, 5, 1, 6, 2, 7, 3) - hn = hn.reshape(b, nc, frms * r1, ht * 2, wd * 2) - - h = torch.cat([hf, hn], dim=2) + h = h[:, :, 1:, :, :] xf = x[:, :, :1, :, :] b, ci, f, ht, wd = xf.shape @@ -125,29 +142,43 @@ class UpSmpl(nn.Module): xf = xf.permute(0, 3, 4, 5, 1, 6, 2) xf = xf.reshape(b, nc, f, ht * 2, wd * 2) - xn = x[:, :, 1:, :, :] - xn = xn.repeat_interleave(repeats=self.rp, dim=1) - b, c, frms, ht, wd = xn.shape - nc = c // (r1 * 2 * 2) - xn = xn.reshape(b, r1, 2, 2, nc, frms, ht, wd) - xn = xn.permute(0, 4, 5, 1, 6, 2, 7, 3) - xn = xn.reshape(b, nc, frms * r1, ht * 2, wd * 2) - sc = torch.cat([xf, xn], dim=2) - else: - b, c, frms, ht, wd = h.shape - nc = c // (r1 * 2 * 2) - h = h.reshape(b, r1, 2, 2, nc, frms, ht, wd) - h = h.permute(0, 4, 5, 1, 6, 2, 7, 3) - h = h.reshape(b, nc, frms * r1, ht * 2, wd * 2) + x = x[:, :, 1:, :, :] - sc = x.repeat_interleave(repeats=self.rp, dim=1) - b, c, frms, ht, wd = sc.shape - nc = c // (r1 * 2 * 2) - sc = sc.reshape(b, r1, 2, 2, nc, frms, ht, wd) - sc = sc.permute(0, 4, 5, 1, 6, 2, 7, 3) - sc = sc.reshape(b, nc, frms * r1, ht * 2, wd * 2) + b, c, frms, ht, wd = h.shape + nc = c // (r1 * 2 * 2) + h = h.reshape(b, r1, 2, 2, nc, frms, ht, wd) + h = h.permute(0, 4, 5, 1, 6, 2, 7, 3) + h = h.reshape(b, nc, frms * r1, ht * 2, wd * 2) - return h + sc + x = x.repeat_interleave(repeats=self.rp, dim=1) + b, c, frms, ht, wd = x.shape + nc = c // (r1 * 2 * 2) + x = x.reshape(b, r1, 2, 2, nc, frms, ht, wd) + x = x.permute(0, 4, 5, 1, 6, 2, 7, 3) + x = x.reshape(b, nc, frms * r1, ht * 2, wd * 2) + + if self.tus and self.refiner_vae and conv_carry_in is None: + h = torch.cat([hf, h], dim=2) + x = torch.cat([xf, x], dim=2) + + return h + x + +class HunyuanRefinerResnetBlock(ResnetBlock): + def __init__(self, in_channels, out_channels, conv_op=NoPadConv3d, norm_op=RMS_norm): + super().__init__(in_channels=in_channels, out_channels=out_channels, temb_channels=0, conv_op=conv_op, norm_op=norm_op) + + def forward(self, x, conv_carry_in=None, conv_carry_out=None): + h = x + h = [ self.swish(self.norm1(x)) ] + h = conv_carry_causal_3d(h, self.conv1, conv_carry_in=conv_carry_in, conv_carry_out=conv_carry_out) + + h = [ self.dropout(self.swish(self.norm2(h))) ] + h = conv_carry_causal_3d(h, self.conv2, conv_carry_in=conv_carry_in, conv_carry_out=conv_carry_out) + + if self.in_channels != self.out_channels: + x = self.nin_shortcut(x) + + return x+h class Encoder(nn.Module): def __init__(self, in_channels, z_channels, block_out_channels, num_res_blocks, @@ -160,7 +191,7 @@ class Encoder(nn.Module): self.refiner_vae = refiner_vae if self.refiner_vae: - conv_op = VideoConv3d + conv_op = NoPadConv3d norm_op = RMS_norm else: conv_op = ops.Conv3d @@ -175,10 +206,9 @@ class Encoder(nn.Module): for i, tgt in enumerate(block_out_channels): stage = nn.Module() - stage.block = nn.ModuleList([ResnetBlock(in_channels=ch if j == 0 else tgt, - out_channels=tgt, - temb_channels=0, - conv_op=conv_op, norm_op=norm_op) + stage.block = nn.ModuleList([HunyuanRefinerResnetBlock(in_channels=ch if j == 0 else tgt, + out_channels=tgt, + conv_op=conv_op, norm_op=norm_op) for j in range(num_res_blocks)]) ch = tgt if i < depth: @@ -188,9 +218,9 @@ class Encoder(nn.Module): self.down.append(stage) self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=conv_op, norm_op=norm_op) + self.mid.block_1 = HunyuanRefinerResnetBlock(in_channels=ch, out_channels=ch, conv_op=conv_op, norm_op=norm_op) self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv3d, norm_op=norm_op) - self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=conv_op, norm_op=norm_op) + self.mid.block_2 = HunyuanRefinerResnetBlock(in_channels=ch, out_channels=ch, conv_op=conv_op, norm_op=norm_op) self.norm_out = norm_op(ch) self.conv_out = conv_op(ch, z_channels << 1, 3, 1, 1) @@ -201,31 +231,50 @@ class Encoder(nn.Module): if not self.refiner_vae and x.shape[2] == 1: x = x.expand(-1, -1, self.ffactor_temporal, -1, -1) - x = self.conv_in(x) + if self.refiner_vae: + xl = [x[:, :, :1, :, :]] + if x.shape[2] > self.ffactor_temporal: + xl += torch.split(x[:, :, 1: 1 + ((x.shape[2] - 1) // self.ffactor_temporal) * self.ffactor_temporal, :, :], self.ffactor_temporal * 2, dim=2) + x = xl + else: + x = [x] + out = [] - for stage in self.down: - for blk in stage.block: - x = blk(x) - if hasattr(stage, 'downsample'): - x = stage.downsample(x) + conv_carry_in = None - x = self.mid.block_2(self.mid.attn_1(self.mid.block_1(x))) + for i, x1 in enumerate(x): + conv_carry_out = [] + if i == len(x) - 1: + conv_carry_out = None + x1 = [ x1 ] + x1 = conv_carry_causal_3d(x1, self.conv_in, conv_carry_in, conv_carry_out) + + for stage in self.down: + for blk in stage.block: + x1 = blk(x1, conv_carry_in, conv_carry_out) + if hasattr(stage, 'downsample'): + x1 = stage.downsample(x1, conv_carry_in, conv_carry_out) + + out.append(x1) + conv_carry_in = conv_carry_out + + if len(out) > 1: + out = torch.cat(out, dim=2) + else: + out = out[0] + + x = self.mid.block_2(self.mid.attn_1(self.mid.block_1(out))) + del out b, c, t, h, w = x.shape grp = c // (self.z_channels << 1) skip = x.view(b, c // grp, grp, t, h, w).mean(2) - out = self.conv_out(F.silu(self.norm_out(x))) + skip + out = conv_carry_causal_3d([F.silu(self.norm_out(x))], self.conv_out) + skip if self.refiner_vae: out = self.regul(out)[0] - out = torch.cat((out[:, :, :1], out), dim=2) - out = out.permute(0, 2, 1, 3, 4) - b, f_times_2, c, h, w = out.shape - out = out.reshape(b, f_times_2 // 2, 2 * c, h, w) - out = out.permute(0, 2, 1, 3, 4).contiguous() - return out class Decoder(nn.Module): @@ -239,7 +288,7 @@ class Decoder(nn.Module): self.refiner_vae = refiner_vae if self.refiner_vae: - conv_op = VideoConv3d + conv_op = NoPadConv3d norm_op = RMS_norm else: conv_op = ops.Conv3d @@ -249,9 +298,9 @@ class Decoder(nn.Module): self.conv_in = conv_op(z_channels, ch, kernel_size=3, stride=1, padding=1) self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=conv_op, norm_op=norm_op) + self.mid.block_1 = HunyuanRefinerResnetBlock(in_channels=ch, out_channels=ch, conv_op=conv_op, norm_op=norm_op) self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv3d, norm_op=norm_op) - self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, temb_channels=0, conv_op=conv_op, norm_op=norm_op) + self.mid.block_2 = HunyuanRefinerResnetBlock(in_channels=ch, out_channels=ch, conv_op=conv_op, norm_op=norm_op) self.up = nn.ModuleList() depth = (ffactor_spatial >> 1).bit_length() @@ -259,10 +308,9 @@ class Decoder(nn.Module): for i, tgt in enumerate(block_out_channels): stage = nn.Module() - stage.block = nn.ModuleList([ResnetBlock(in_channels=ch if j == 0 else tgt, - out_channels=tgt, - temb_channels=0, - conv_op=conv_op, norm_op=norm_op) + stage.block = nn.ModuleList([HunyuanRefinerResnetBlock(in_channels=ch if j == 0 else tgt, + out_channels=tgt, + conv_op=conv_op, norm_op=norm_op) for j in range(num_res_blocks + 1)]) ch = tgt if i < depth: @@ -275,27 +323,41 @@ class Decoder(nn.Module): self.conv_out = conv_op(ch, out_channels, 3, stride=1, padding=1) def forward(self, z): - if self.refiner_vae: - z = z.permute(0, 2, 1, 3, 4) - b, f, c, h, w = z.shape - z = z.reshape(b, f, 2, c // 2, h, w) - z = z.permute(0, 1, 2, 3, 4, 5).reshape(b, f * 2, c // 2, h, w) - z = z.permute(0, 2, 1, 3, 4) - z = z[:, :, 1:] - - x = self.conv_in(z) + z.repeat_interleave(self.block_out_channels[0] // self.z_channels, 1) + x = conv_carry_causal_3d([z], self.conv_in) + z.repeat_interleave(self.block_out_channels[0] // self.z_channels, 1) x = self.mid.block_2(self.mid.attn_1(self.mid.block_1(x))) - for stage in self.up: - for blk in stage.block: - x = blk(x) - if hasattr(stage, 'upsample'): - x = stage.upsample(x) + if self.refiner_vae: + x = torch.split(x, 2, dim=2) + else: + x = [ x ] + out = [] - out = self.conv_out(F.silu(self.norm_out(x))) + conv_carry_in = None + + for i, x1 in enumerate(x): + conv_carry_out = [] + if i == len(x) - 1: + conv_carry_out = None + for stage in self.up: + for blk in stage.block: + x1 = blk(x1, conv_carry_in, conv_carry_out) + if hasattr(stage, 'upsample'): + x1 = stage.upsample(x1, conv_carry_in, conv_carry_out) + + x1 = [ F.silu(self.norm_out(x1)) ] + x1 = conv_carry_causal_3d(x1, self.conv_out, conv_carry_in, conv_carry_out) + out.append(x1) + conv_carry_in = conv_carry_out + del x + + if len(out) > 1: + out = torch.cat(out, dim=2) + else: + out = out[0] if not self.refiner_vae: if z.shape[-3] == 1: out = out[:, :, -1:] return out + diff --git a/comfy/model_base.py b/comfy/model_base.py index 7c788d085..e14b552c5 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1536,3 +1536,94 @@ class HunyuanImage21Refiner(HunyuanImage21): out = super().extra_conds(**kwargs) out['disable_time_r'] = comfy.conds.CONDConstant(True) return out + +class HunyuanVideo15(HunyuanVideo): + def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + super().__init__(model_config, model_type, device=device) + + def concat_cond(self, **kwargs): + noise = kwargs.get("noise", None) + extra_channels = self.diffusion_model.img_in.proj.weight.shape[1] - noise.shape[1] - 1 #noise 32 img cond 32 + mask 1 + if extra_channels == 0: + return None + + image = kwargs.get("concat_latent_image", None) + device = kwargs["device"] + + if image is None: + shape_image = list(noise.shape) + shape_image[1] = extra_channels + image = torch.zeros(shape_image, dtype=noise.dtype, layout=noise.layout, device=noise.device) + else: + latent_dim = self.latent_format.latent_channels + image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") + for i in range(0, image.shape[1], latent_dim): + image[:, i: i + latent_dim] = self.process_latent_in(image[:, i: i + latent_dim]) + image = utils.resize_to_batch_size(image, noise.shape[0]) + + mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None)) + if mask is None: + mask = torch.zeros_like(noise)[:, :1] + else: + mask = 1.0 - mask + mask = utils.common_upscale(mask.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") + if mask.shape[-3] < noise.shape[-3]: + mask = torch.nn.functional.pad(mask, (0, 0, 0, 0, 0, noise.shape[-3] - mask.shape[-3]), mode='constant', value=0) + mask = utils.resize_to_batch_size(mask, noise.shape[0]) + + return torch.cat((image, mask), dim=1) + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + attention_mask = kwargs.get("attention_mask", None) + if attention_mask is not None: + if torch.numel(attention_mask) != attention_mask.sum(): + out['attention_mask'] = comfy.conds.CONDRegular(attention_mask) + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + + conditioning_byt5small = kwargs.get("conditioning_byt5small", None) + if conditioning_byt5small is not None: + out['txt_byt5'] = comfy.conds.CONDRegular(conditioning_byt5small) + + guidance = kwargs.get("guidance", 6.0) + if guidance is not None: + out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance])) + + clip_vision_output = kwargs.get("clip_vision_output", None) + if clip_vision_output is not None: + out['clip_fea'] = comfy.conds.CONDRegular(clip_vision_output.last_hidden_state) + + return out + +class HunyuanVideo15_SR_Distilled(HunyuanVideo15): + def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + super().__init__(model_config, model_type, device=device) + + def concat_cond(self, **kwargs): + noise = kwargs.get("noise", None) + image = kwargs.get("concat_latent_image", None) + noise_augmentation = kwargs.get("noise_augmentation", 0.0) + device = kwargs["device"] + + if image is None: + image = torch.zeros([noise.shape[0], noise.shape[1] * 2 + 2, noise.shape[-3], noise.shape[-2], noise.shape[-1]], device=comfy.model_management.intermediate_device()) + else: + image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") + #image = self.process_latent_in(image) # scaling wasn't applied in reference code + image = utils.resize_to_batch_size(image, noise.shape[0]) + lq_image_slice = slice(noise.shape[1] + 1, 2 * noise.shape[1] + 1) + if noise_augmentation > 0: + generator = torch.Generator(device="cpu") + generator.manual_seed(kwargs.get("seed", 0) - 10) + noise = torch.randn(image[:, lq_image_slice].shape, generator=generator, dtype=image.dtype, device="cpu").to(image.device) + image[:, lq_image_slice] = noise_augmentation * noise + min(1.0 - noise_augmentation, 0.75) * image[:, lq_image_slice] + else: + image[:, lq_image_slice] = 0.75 * image[:, lq_image_slice] + return image + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + out['disable_time_r'] = comfy.conds.CONDConstant(False) + return out diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 3142a7fc3..0131ca25a 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -186,6 +186,16 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): guidance_keys = list(filter(lambda a: a.startswith("{}guidance_in.".format(key_prefix)), state_dict_keys)) dit_config["guidance_embed"] = len(guidance_keys) > 0 + + # HunyuanVideo 1.5 + if '{}cond_type_embedding.weight'.format(key_prefix) in state_dict_keys: + dit_config["use_cond_type_embedding"] = True + else: + dit_config["use_cond_type_embedding"] = False + if '{}vision_in.proj.0.weight'.format(key_prefix) in state_dict_keys: + dit_config["vision_in_dim"] = state_dict['{}vision_in.proj.0.weight'.format(key_prefix)].shape[0] + else: + dit_config["vision_in_dim"] = None return dit_config if '{}double_blocks.0.img_attn.norm.key_norm.scale'.format(key_prefix) in state_dict_keys and ('{}img_in.weight'.format(key_prefix) in state_dict_keys or f"{key_prefix}distilled_guidance_layer.norms.0.scale" in state_dict_keys): #Flux, Chroma or Chroma Radiance (has no img_in.weight) diff --git a/comfy/sd.py b/comfy/sd.py index 9e5ebbf15..dc0905ada 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -441,20 +441,20 @@ class VAE: elif "decoder.conv_in.conv.weight" in sd and sd['decoder.conv_in.conv.weight'].shape[1] == 32: ddconfig = {"block_out_channels": [128, 256, 512, 1024, 1024], "in_channels": 3, "out_channels": 3, "num_res_blocks": 2, "ffactor_spatial": 16, "ffactor_temporal": 4, "downsample_match_channel": True, "upsample_match_channel": True} ddconfig['z_channels'] = sd["decoder.conv_in.conv.weight"].shape[1] - self.latent_channels = 64 + self.latent_channels = 32 self.upscale_ratio = (lambda a: max(0, a * 4 - 3), 16, 16) self.upscale_index_formula = (4, 16, 16) self.downscale_ratio = (lambda a: max(0, math.floor((a + 3) / 4)), 16, 16) self.downscale_index_formula = (4, 16, 16) self.latent_dim = 3 - self.not_video = True + self.not_video = False self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.EmptyRegularizer"}, encoder_config={'target': "comfy.ldm.hunyuan_video.vae_refiner.Encoder", 'params': ddconfig}, decoder_config={'target': "comfy.ldm.hunyuan_video.vae_refiner.Decoder", 'params': ddconfig}) - self.memory_used_encode = lambda shape, dtype: (1400 * shape[-2] * shape[-1]) * model_management.dtype_size(dtype) - self.memory_used_decode = lambda shape, dtype: (1400 * shape[-3] * shape[-2] * shape[-1] * 16 * 16) * model_management.dtype_size(dtype) + self.memory_used_encode = lambda shape, dtype: (1400 * 9 * shape[-2] * shape[-1]) * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: (2800 * 4 * shape[-2] * shape[-1] * 16 * 16) * model_management.dtype_size(dtype) elif "decoder.conv_in.conv.weight" in sd: ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} ddconfig["conv3d"] = True @@ -911,6 +911,7 @@ class CLIPType(Enum): OMNIGEN2 = 17 QWEN_IMAGE = 18 HUNYUAN_IMAGE = 19 + HUNYUAN_VIDEO_15 = 20 def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}): @@ -1126,6 +1127,9 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip elif clip_type == CLIPType.HUNYUAN_IMAGE: clip_target.clip = comfy.text_encoders.hunyuan_image.te(**llama_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.hunyuan_image.HunyuanImageTokenizer + elif clip_type == CLIPType.HUNYUAN_VIDEO_15: + clip_target.clip = comfy.text_encoders.hunyuan_image.te(**llama_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.hunyuan_video.HunyuanVideo15Tokenizer else: clip_target.clip = sdxl_clip.SDXLClipModel clip_target.tokenizer = sdxl_clip.SDXLTokenizer diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 4064bdae1..2e64b85e8 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1374,6 +1374,54 @@ class HunyuanImage21Refiner(HunyuanVideo): out = model_base.HunyuanImage21Refiner(self, device=device) return out -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, WAN22_Animate, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage] +class HunyuanVideo15(HunyuanVideo): + unet_config = { + "image_model": "hunyuan_video", + "vision_in_dim": 1152, + } + + sampling_settings = { + "shift": 7.0, + } + memory_usage_factor = 4.0 #TODO + supported_inference_dtypes = [torch.float16, torch.bfloat16, torch.float32] + + latent_format = latent_formats.HunyuanVideo15 + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.HunyuanVideo15(self, device=device) + return out + + def clip_target(self, state_dict={}): + pref = self.text_encoder_key_prefix[0] + hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_7b.transformer.".format(pref)) + return supported_models_base.ClipTarget(comfy.text_encoders.hunyuan_video.HunyuanVideo15Tokenizer, comfy.text_encoders.hunyuan_image.te(**hunyuan_detect)) + + +class HunyuanVideo15_SR_Distilled(HunyuanVideo): + unet_config = { + "image_model": "hunyuan_video", + "vision_in_dim": 1152, + "in_channels": 98, + } + + sampling_settings = { + "shift": 2.0, + } + memory_usage_factor = 4.0 #TODO + supported_inference_dtypes = [torch.float16, torch.bfloat16, torch.float32] + + latent_format = latent_formats.HunyuanVideo15 + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.HunyuanVideo15_SR_Distilled(self, device=device) + return out + + def clip_target(self, state_dict={}): + pref = self.text_encoder_key_prefix[0] + hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_7b.transformer.".format(pref)) + return supported_models_base.ClipTarget(comfy.text_encoders.hunyuan_video.HunyuanVideo15Tokenizer, comfy.text_encoders.hunyuan_image.te(**hunyuan_detect)) + +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo15_SR_Distilled, HunyuanVideo15, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, WAN22_Animate, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage] models += [SVD_img2vid] diff --git a/comfy/text_encoders/hunyuan_video.py b/comfy/text_encoders/hunyuan_video.py index b02148b33..557094f49 100644 --- a/comfy/text_encoders/hunyuan_video.py +++ b/comfy/text_encoders/hunyuan_video.py @@ -1,6 +1,7 @@ from comfy import sd1_clip import comfy.model_management import comfy.text_encoders.llama +from .hunyuan_image import HunyuanImageTokenizer from transformers import LlamaTokenizerFast import torch import os @@ -73,6 +74,14 @@ class HunyuanVideoTokenizer: return {} +class HunyuanVideo15Tokenizer(HunyuanImageTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + self.llama_template = "<|im_start|>system\nYou are a helpful assistant. Describe the video by detailing the following aspects:\n1. The main content and theme of the video.\n2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects.\n3. Actions, events, behaviors temporal relationships, physical movement changes of the objects.\n4. background environment, light, style and atmosphere.\n5. camera angles, movements, and transitions used in the video.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" + + def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): + return super().tokenize_with_weights(text, return_word_ids, prevent_empty_text=True, **kwargs) + class HunyuanVideoClipModel(torch.nn.Module): def __init__(self, dtype_llama=None, device="cpu", dtype=None, model_options={}): super().__init__() diff --git a/comfy/text_encoders/qwen_image.py b/comfy/text_encoders/qwen_image.py index 40fa67937..c0d32a6ef 100644 --- a/comfy/text_encoders/qwen_image.py +++ b/comfy/text_encoders/qwen_image.py @@ -17,12 +17,14 @@ class QwenImageTokenizer(sd1_clip.SD1Tokenizer): self.llama_template = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" self.llama_template_images = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n" - def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, images=[], **kwargs): + def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, images=[], prevent_empty_text=False, **kwargs): skip_template = False if text.startswith('<|im_start|>'): skip_template = True if text.startswith('<|start_header_id|>'): skip_template = True + if prevent_empty_text and text == '': + text = ' ' if skip_template: llama_text = text diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 863254ce7..79c0722a9 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -629,6 +629,10 @@ class UpscaleModel(ComfyTypeIO): if TYPE_CHECKING: Type = ImageModelDescriptor +@comfytype(io_type="LATENT_UPSCALE_MODEL") +class LatentUpscaleModel(ComfyTypeIO): + Type = Any + @comfytype(io_type="AUDIO") class Audio(ComfyTypeIO): class AudioDict(TypedDict): diff --git a/comfy_extras/nodes_hunyuan.py b/comfy_extras/nodes_hunyuan.py index f7c34d059..5a2e8cc61 100644 --- a/comfy_extras/nodes_hunyuan.py +++ b/comfy_extras/nodes_hunyuan.py @@ -4,7 +4,8 @@ import torch import comfy.model_management from typing_extensions import override from comfy_api.latest import ComfyExtension, io - +from comfy.ldm.hunyuan_video.upsampler import HunyuanVideo15SRModel +import folder_paths class CLIPTextEncodeHunyuanDiT(io.ComfyNode): @classmethod @@ -57,6 +58,199 @@ class EmptyHunyuanLatentVideo(io.ComfyNode): generate = execute # TODO: remove +class EmptyHunyuanVideo15Latent(EmptyHunyuanLatentVideo): + @classmethod + def define_schema(cls): + schema = super().define_schema() + schema.node_id = "EmptyHunyuanVideo15Latent" + return schema + + @classmethod + def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput: + # Using scale factor of 16 instead of 8 + latent = torch.zeros([batch_size, 32, ((length - 1) // 4) + 1, height // 16, width // 16], device=comfy.model_management.intermediate_device()) + return io.NodeOutput({"samples": latent}) + + generate = execute # TODO: remove + + +class HunyuanVideo15ImageToVideo(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="HunyuanVideo15ImageToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=33, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Image.Input("start_image", optional=True), + io.ClipVisionOutput.Input("clip_vision_output", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) + + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None, clip_vision_output=None) -> io.NodeOutput: + latent = torch.zeros([batch_size, 32, ((length - 1) // 4) + 1, height // 16, width // 16], device=comfy.model_management.intermediate_device()) + + if start_image is not None: + start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + + encoded = vae.encode(start_image[:, :, :, :3]) + concat_latent_image = torch.zeros((latent.shape[0], 32, latent.shape[2], latent.shape[3], latent.shape[4]), device=comfy.model_management.intermediate_device()) + concat_latent_image[:, :, :encoded.shape[2], :, :] = encoded + + mask = torch.ones((1, 1, latent.shape[2], concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=start_image.device, dtype=start_image.dtype) + mask[:, :, :((start_image.shape[0] - 1) // 4) + 1] = 0.0 + + positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) + negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) + + if clip_vision_output is not None: + positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output}) + negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output}) + + out_latent = {} + out_latent["samples"] = latent + return io.NodeOutput(positive, negative, out_latent) + + +class HunyuanVideo15SuperResolution(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="HunyuanVideo15SuperResolution", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae", optional=True), + io.Image.Input("start_image", optional=True), + io.ClipVisionOutput.Input("clip_vision_output", optional=True), + io.Latent.Input("latent"), + io.Float.Input("noise_augmentation", default=0.70, min=0.0, max=1.0, step=0.01), + + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) + + @classmethod + def execute(cls, positive, negative, latent, noise_augmentation, vae=None, start_image=None, clip_vision_output=None) -> io.NodeOutput: + in_latent = latent["samples"] + in_channels = in_latent.shape[1] + cond_latent = torch.zeros([in_latent.shape[0], in_channels * 2 + 2, in_latent.shape[-3], in_latent.shape[-2], in_latent.shape[-1]], device=comfy.model_management.intermediate_device()) + cond_latent[:, in_channels + 1 : 2 * in_channels + 1] = in_latent + cond_latent[:, 2 * in_channels + 1] = 1 + if start_image is not None: + start_image = comfy.utils.common_upscale(start_image.movedim(-1, 1), in_latent.shape[-1] * 16, in_latent.shape[-2] * 16, "bilinear", "center").movedim(1, -1) + encoded = vae.encode(start_image[:, :, :, :3]) + cond_latent[:, :in_channels, :encoded.shape[2], :, :] = encoded + cond_latent[:, in_channels + 1, 0] = 1 + + positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": cond_latent, "noise_augmentation": noise_augmentation}) + negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": cond_latent, "noise_augmentation": noise_augmentation}) + if clip_vision_output is not None: + positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output}) + negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output}) + + return io.NodeOutput(positive, negative, latent) + + +class LatentUpscaleModelLoader(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LatentUpscaleModelLoader", + display_name="Load Latent Upscale Model", + category="loaders", + inputs=[ + io.Combo.Input("model_name", options=folder_paths.get_filename_list("latent_upscale_models")), + ], + outputs=[ + io.LatentUpscaleModel.Output(), + ], + ) + + @classmethod + def execute(cls, model_name) -> io.NodeOutput: + model_path = folder_paths.get_full_path_or_raise("upscale_models", model_name) + sd = comfy.utils.load_torch_file(model_path, safe_load=True) + + if "blocks.0.block.0.conv.weight" in sd: + config = { + "in_channels": sd["in_conv.conv.weight"].shape[1], + "out_channels": sd["out_conv.conv.weight"].shape[0], + "hidden_channels": sd["in_conv.conv.weight"].shape[0], + "num_blocks": len([k for k in sd.keys() if k.startswith("blocks.") and k.endswith(".block.0.conv.weight")]), + "global_residual": False, + } + model_type = "720p" + elif "up.0.block.0.conv1.conv.weight" in sd: + sd = {key.replace("nin_shortcut", "nin_shortcut.conv", 1): value for key, value in sd.items()} + config = { + "z_channels": sd["conv_in.conv.weight"].shape[1], + "out_channels": sd["conv_out.conv.weight"].shape[0], + "block_out_channels": tuple(sd[f"up.{i}.block.0.conv1.conv.weight"].shape[0] for i in range(len([k for k in sd.keys() if k.startswith("up.") and k.endswith(".block.0.conv1.conv.weight")]))), + } + model_type = "1080p" + + model = HunyuanVideo15SRModel(model_type, config) + model.load_sd(sd) + + return io.NodeOutput(model) + + +class HunyuanVideo15LatentUpscaleWithModel(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="HunyuanVideo15LatentUpscaleWithModel", + display_name="Hunyuan Video 15 Latent Upscale With Model", + category="latent", + inputs=[ + io.LatentUpscaleModel.Input("model"), + io.Latent.Input("samples"), + io.Combo.Input("upscale_method", options=["nearest-exact", "bilinear", "area", "bicubic", "bislerp"], default="bilinear"), + io.Int.Input("width", default=1280, min=0, max=16384, step=8), + io.Int.Input("height", default=720, min=0, max=16384, step=8), + io.Combo.Input("crop", options=["disabled", "center"]), + ], + outputs=[ + io.Latent.Output(), + ], + ) + + @classmethod + def execute(cls, model, samples, upscale_method, width, height, crop) -> io.NodeOutput: + if width == 0 and height == 0: + return io.NodeOutput(samples) + else: + if width == 0: + height = max(64, height) + width = max(64, round(samples["samples"].shape[-1] * height / samples["samples"].shape[-2])) + elif height == 0: + width = max(64, width) + height = max(64, round(samples["samples"].shape[-2] * width / samples["samples"].shape[-1])) + else: + width = max(64, width) + height = max(64, height) + s = comfy.utils.common_upscale(samples["samples"], width // 16, height // 16, upscale_method, crop) + s = model.resample_latent(s) + return io.NodeOutput({"samples": s.cpu().float()}) + + PROMPT_TEMPLATE_ENCODE_VIDEO_I2V = ( "<|start_header_id|>system<|end_header_id|>\n\n\nDescribe the video by detailing the following aspects according to the reference image: " "1. The main content and theme of the video." @@ -210,6 +404,11 @@ class HunyuanExtension(ComfyExtension): CLIPTextEncodeHunyuanDiT, TextEncodeHunyuanVideo_ImageToVideo, EmptyHunyuanLatentVideo, + EmptyHunyuanVideo15Latent, + HunyuanVideo15ImageToVideo, + HunyuanVideo15SuperResolution, + HunyuanVideo15LatentUpscaleWithModel, + LatentUpscaleModelLoader, HunyuanImageToVideo, EmptyHunyuanImageLatent, HunyuanRefinerLatent, diff --git a/folder_paths.py b/folder_paths.py index f110d832b..ffdc4d020 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -38,6 +38,8 @@ folder_names_and_paths["gligen"] = ([os.path.join(models_dir, "gligen")], suppor folder_names_and_paths["upscale_models"] = ([os.path.join(models_dir, "upscale_models")], supported_pt_extensions) +folder_names_and_paths["latent_upscale_models"] = ([os.path.join(models_dir, "latent_upscale_models")], supported_pt_extensions) + folder_names_and_paths["custom_nodes"] = ([os.path.join(base_path, "custom_nodes")], set()) folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetworks")], supported_pt_extensions) diff --git a/models/latent_upscale_models/put_latent_upscale_models_here b/models/latent_upscale_models/put_latent_upscale_models_here new file mode 100644 index 000000000..e69de29bb diff --git a/nodes.py b/nodes.py index 030371633..f023ae3b6 100644 --- a/nodes.py +++ b/nodes.py @@ -957,7 +957,7 @@ class DualCLIPLoader: def INPUT_TYPES(s): return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ), "clip_name2": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["sdxl", "sd3", "flux", "hunyuan_video", "hidream", "hunyuan_image"], ), + "type": (["sdxl", "sd3", "flux", "hunyuan_video", "hidream", "hunyuan_image", "hunyuan_video_15"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), From 33981237527a3d84d4e9c3b113f75d6dd37af6a4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 20 Nov 2025 20:39:37 -0800 Subject: [PATCH 0900/1073] Fix wrong path. (#10821) --- comfy_extras/nodes_hunyuan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_hunyuan.py b/comfy_extras/nodes_hunyuan.py index 5a2e8cc61..aa36a471f 100644 --- a/comfy_extras/nodes_hunyuan.py +++ b/comfy_extras/nodes_hunyuan.py @@ -185,7 +185,7 @@ class LatentUpscaleModelLoader(io.ComfyNode): @classmethod def execute(cls, model_name) -> io.NodeOutput: - model_path = folder_paths.get_full_path_or_raise("upscale_models", model_name) + model_path = folder_paths.get_full_path_or_raise("latent_upscale_models", model_name) sd = comfy.utils.load_torch_file(model_path, safe_load=True) if "blocks.0.block.0.conv.weight" in sd: From c55fd7481626d8bee8044ea7512ea996d13a1b90 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 21 Nov 2025 00:49:13 -0500 Subject: [PATCH 0901/1073] ComfyUI 0.3.71 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 9b77aabe9..b4655d553 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.70" +__version__ = "0.3.71" diff --git a/pyproject.toml b/pyproject.toml index 289b7145b..280dbaf53 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.70" +version = "0.3.71" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From ecb683b057a19f1a05d18d6d0b0ee9a6c6c8f4a0 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Fri, 21 Nov 2025 13:34:47 -0800 Subject: [PATCH 0902/1073] update frontend to 1.30 (#10793) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 624aa7362..f83d561c9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.28.9 +comfyui-frontend-package==1.30.6 comfyui-workflow-templates==0.6.0 comfyui-embedded-docs==0.3.1 torch From 532938b16b544e4492ba0ffbe18b201b1a7bc55f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 21 Nov 2025 14:51:55 -0800 Subject: [PATCH 0903/1073] --disable-api-nodes now sets CSP header to force frontend offline. (#10829) --- comfy/cli_args.py | 2 +- server.py | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 2f30b72d2..d2b60e347 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -160,7 +160,7 @@ parser.add_argument("--windows-standalone-build", action="store_true", help="Win parser.add_argument("--disable-metadata", action="store_true", help="Disable saving prompt metadata in files.") parser.add_argument("--disable-all-custom-nodes", action="store_true", help="Disable loading all custom nodes.") parser.add_argument("--whitelist-custom-nodes", type=str, nargs='+', default=[], help="Specify custom node folders to load even when --disable-all-custom-nodes is enabled.") -parser.add_argument("--disable-api-nodes", action="store_true", help="Disable loading all api nodes.") +parser.add_argument("--disable-api-nodes", action="store_true", help="Disable loading all api nodes. Also prevents the frontend from communicating with the internet.") parser.add_argument("--multi-user", action="store_true", help="Enables per-user storage.") diff --git a/server.py b/server.py index d9d5c491f..0fd2e49e3 100644 --- a/server.py +++ b/server.py @@ -164,6 +164,22 @@ def create_origin_only_middleware(): return origin_only_middleware + +def create_block_external_middleware(): + @web.middleware + async def block_external_middleware(request: web.Request, handler): + if request.method == "OPTIONS": + # Pre-flight request. Reply successfully: + response = web.Response() + else: + response = await handler(request) + + response.headers['Content-Security-Policy'] = "default-src 'self'; script-src 'self' 'unsafe-inline' blob:; style-src 'self' 'unsafe-inline'; img-src 'self' data: blob:; font-src 'self'; connect-src 'self'; frame-src 'self'; object-src 'self';" + return response + + return block_external_middleware + + class PromptServer(): def __init__(self, loop): PromptServer.instance = self @@ -193,6 +209,9 @@ class PromptServer(): else: middlewares.append(create_origin_only_middleware()) + if args.disable_api_nodes: + middlewares.append(create_block_external_middleware()) + max_upload_size = round(args.max_upload_size * 1024 * 1024) self.app = web.Application(client_max_size=max_upload_size, middlewares=middlewares) self.sockets = dict() From a9c35256bccd4018fbe74bf1e857cc18bd1900ed Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Sat, 22 Nov 2025 02:28:29 -0800 Subject: [PATCH 0904/1073] Update requirements.txt (#10834) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f83d561c9..8e308cd6c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.30.6 -comfyui-workflow-templates==0.6.0 +comfyui-workflow-templates==0.7.9 comfyui-embedded-docs==0.3.1 torch torchsde From d89c29f25992713ec3102017c189858a457f1215 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 22 Nov 2025 19:51:53 -0800 Subject: [PATCH 0905/1073] Add display names to Hunyuan latent video nodes. (#10837) --- comfy_extras/nodes_hunyuan.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_hunyuan.py b/comfy_extras/nodes_hunyuan.py index aa36a471f..32be182f1 100644 --- a/comfy_extras/nodes_hunyuan.py +++ b/comfy_extras/nodes_hunyuan.py @@ -38,6 +38,7 @@ class EmptyHunyuanLatentVideo(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="EmptyHunyuanLatentVideo", + display_name="Empty HunyuanVideo 1.0 Latent", category="latent/video", inputs=[ io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), @@ -63,6 +64,7 @@ class EmptyHunyuanVideo15Latent(EmptyHunyuanLatentVideo): def define_schema(cls): schema = super().define_schema() schema.node_id = "EmptyHunyuanVideo15Latent" + schema.display_name = "Empty HunyuanVideo 1.5 Latent" return schema @classmethod @@ -71,8 +73,6 @@ class EmptyHunyuanVideo15Latent(EmptyHunyuanLatentVideo): latent = torch.zeros([batch_size, 32, ((length - 1) // 4) + 1, height // 16, width // 16], device=comfy.model_management.intermediate_device()) return io.NodeOutput({"samples": latent}) - generate = execute # TODO: remove - class HunyuanVideo15ImageToVideo(io.ComfyNode): @classmethod From cbd68e3d587a1b345bdc6ebcd8a8c6ba1a9d3af3 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 23 Nov 2025 01:55:22 -0800 Subject: [PATCH 0906/1073] Add better error message for common error. (#10846) --- comfy/cldm/cldm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index ec01665e2..c93c2e909 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -413,7 +413,8 @@ class ControlNet(nn.Module): out_middle = [] if self.num_classes is not None: - assert y.shape[0] == x.shape[0] + if y is None: + raise ValueError("y is None, did you try using a controlnet for SDXL on SD1?") emb = emb + self.label_emb(y) h = x From f66183a54142be693ab858e9f1f06ed62439a92e Mon Sep 17 00:00:00 2001 From: guill Date: Sun, 23 Nov 2025 22:56:20 -0800 Subject: [PATCH 0907/1073] [fix] Fixes non-async public API access (#10857) It looks like the synchronous version of the public API broke due to an addition of `from __future__ import annotations`. This change updates the async-to-sync adapter to work with both types of type annotations. --- comfy_api/internal/async_to_sync.py | 47 ++++++--- tests/execution/test_public_api.py | 153 ++++++++++++++++++++++++++++ 2 files changed, 184 insertions(+), 16 deletions(-) create mode 100644 tests/execution/test_public_api.py diff --git a/comfy_api/internal/async_to_sync.py b/comfy_api/internal/async_to_sync.py index f5f805a62..257ade82e 100644 --- a/comfy_api/internal/async_to_sync.py +++ b/comfy_api/internal/async_to_sync.py @@ -8,7 +8,7 @@ import os import textwrap import threading from enum import Enum -from typing import Optional, Type, get_origin, get_args +from typing import Optional, Type, get_origin, get_args, get_type_hints class TypeTracker: @@ -220,11 +220,18 @@ class AsyncToSyncConverter: self._async_instance = async_class(*args, **kwargs) # Handle annotated class attributes (like execution: Execution) - # Get all annotations from the class hierarchy - all_annotations = {} - for base_class in reversed(inspect.getmro(async_class)): - if hasattr(base_class, "__annotations__"): - all_annotations.update(base_class.__annotations__) + # Get all annotations from the class hierarchy and resolve string annotations + try: + # get_type_hints resolves string annotations to actual type objects + # This handles classes using 'from __future__ import annotations' + all_annotations = get_type_hints(async_class) + except Exception: + # Fallback to raw annotations if get_type_hints fails + # (e.g., for undefined forward references) + all_annotations = {} + for base_class in reversed(inspect.getmro(async_class)): + if hasattr(base_class, "__annotations__"): + all_annotations.update(base_class.__annotations__) # For each annotated attribute, check if it needs to be created or wrapped for attr_name, attr_type in all_annotations.items(): @@ -625,15 +632,19 @@ class AsyncToSyncConverter: """Extract class attributes that are classes themselves.""" class_attributes = [] + # Get resolved type hints to handle string annotations + try: + type_hints = get_type_hints(async_class) + except Exception: + type_hints = {} + # Look for class attributes that are classes for name, attr in sorted(inspect.getmembers(async_class)): if isinstance(attr, type) and not name.startswith("_"): class_attributes.append((name, attr)) - elif ( - hasattr(async_class, "__annotations__") - and name in async_class.__annotations__ - ): - annotation = async_class.__annotations__[name] + elif name in type_hints: + # Use resolved type hint instead of raw annotation + annotation = type_hints[name] if isinstance(annotation, type): class_attributes.append((name, annotation)) @@ -908,11 +919,15 @@ class AsyncToSyncConverter: attribute_mappings = {} # First check annotations for typed attributes (including from parent classes) - # Collect all annotations from the class hierarchy - all_annotations = {} - for base_class in reversed(inspect.getmro(async_class)): - if hasattr(base_class, "__annotations__"): - all_annotations.update(base_class.__annotations__) + # Resolve string annotations to actual types + try: + all_annotations = get_type_hints(async_class) + except Exception: + # Fallback to raw annotations + all_annotations = {} + for base_class in reversed(inspect.getmro(async_class)): + if hasattr(base_class, "__annotations__"): + all_annotations.update(base_class.__annotations__) for attr_name, attr_type in sorted(all_annotations.items()): for class_name, class_type in class_attributes: diff --git a/tests/execution/test_public_api.py b/tests/execution/test_public_api.py new file mode 100644 index 000000000..52bc2fcd8 --- /dev/null +++ b/tests/execution/test_public_api.py @@ -0,0 +1,153 @@ +""" +Tests for public ComfyAPI and ComfyAPISync functions. + +These tests verify that the public API methods work correctly in both sync and async contexts, +ensuring that the sync wrapper generation (via get_type_hints() in async_to_sync.py) correctly +handles string annotations from 'from __future__ import annotations'. +""" + +import pytest +import time +import subprocess +import torch +from pytest import fixture +from comfy_execution.graph_utils import GraphBuilder +from tests.execution.test_execution import ComfyClient + + +@pytest.mark.execution +class TestPublicAPI: + """Test suite for public ComfyAPI and ComfyAPISync methods.""" + + @fixture(scope="class", autouse=True) + def _server(self, args_pytest): + """Start ComfyUI server for testing.""" + pargs = [ + 'python', 'main.py', + '--output-directory', args_pytest["output_dir"], + '--listen', args_pytest["listen"], + '--port', str(args_pytest["port"]), + '--extra-model-paths-config', 'tests/execution/extra_model_paths.yaml', + '--cpu', + ] + p = subprocess.Popen(pargs) + yield + p.kill() + torch.cuda.empty_cache() + + @fixture(scope="class", autouse=True) + def shared_client(self, args_pytest, _server): + """Create shared client with connection retry.""" + client = ComfyClient() + n_tries = 5 + for i in range(n_tries): + time.sleep(4) + try: + client.connect(listen=args_pytest["listen"], port=args_pytest["port"]) + break + except ConnectionRefusedError: + if i == n_tries - 1: + raise + yield client + del client + torch.cuda.empty_cache() + + @fixture + def client(self, shared_client, request): + """Set test name for each test.""" + shared_client.set_test_name(f"public_api[{request.node.name}]") + yield shared_client + + @fixture + def builder(self, request): + """Create GraphBuilder for each test.""" + yield GraphBuilder(prefix=request.node.name) + + def test_sync_progress_update_executes(self, client: ComfyClient, builder: GraphBuilder): + """Test that TestSyncProgressUpdate executes without errors. + + This test validates that api_sync.execution.set_progress() works correctly, + which is the primary code path fixed by adding get_type_hints() to async_to_sync.py. + """ + g = builder + image = g.node("StubImage", content="BLACK", height=256, width=256, batch_size=1) + + # Use TestSyncProgressUpdate with short sleep + progress_node = g.node("TestSyncProgressUpdate", + value=image.out(0), + sleep_seconds=0.5) + output = g.node("SaveImage", images=progress_node.out(0)) + + # Execute workflow + result = client.run(g) + + # Verify execution + assert result.did_run(progress_node), "Progress node should have executed" + assert result.did_run(output), "Output node should have executed" + + # Verify output + images = result.get_images(output) + assert len(images) == 1, "Should have produced 1 image" + + def test_async_progress_update_executes(self, client: ComfyClient, builder: GraphBuilder): + """Test that TestAsyncProgressUpdate executes without errors. + + This test validates that await api.execution.set_progress() works correctly + in async contexts. + """ + g = builder + image = g.node("StubImage", content="WHITE", height=256, width=256, batch_size=1) + + # Use TestAsyncProgressUpdate with short sleep + progress_node = g.node("TestAsyncProgressUpdate", + value=image.out(0), + sleep_seconds=0.5) + output = g.node("SaveImage", images=progress_node.out(0)) + + # Execute workflow + result = client.run(g) + + # Verify execution + assert result.did_run(progress_node), "Async progress node should have executed" + assert result.did_run(output), "Output node should have executed" + + # Verify output + images = result.get_images(output) + assert len(images) == 1, "Should have produced 1 image" + + def test_sync_and_async_progress_together(self, client: ComfyClient, builder: GraphBuilder): + """Test both sync and async progress updates in same workflow. + + This test ensures that both ComfyAPISync and ComfyAPI can coexist and work + correctly in the same workflow execution. + """ + g = builder + image1 = g.node("StubImage", content="BLACK", height=256, width=256, batch_size=1) + image2 = g.node("StubImage", content="WHITE", height=256, width=256, batch_size=1) + + # Use both types of progress nodes + sync_progress = g.node("TestSyncProgressUpdate", + value=image1.out(0), + sleep_seconds=0.3) + async_progress = g.node("TestAsyncProgressUpdate", + value=image2.out(0), + sleep_seconds=0.3) + + # Create outputs + output1 = g.node("SaveImage", images=sync_progress.out(0)) + output2 = g.node("SaveImage", images=async_progress.out(0)) + + # Execute workflow + result = client.run(g) + + # Both should execute successfully + assert result.did_run(sync_progress), "Sync progress node should have executed" + assert result.did_run(async_progress), "Async progress node should have executed" + assert result.did_run(output1), "First output node should have executed" + assert result.did_run(output2), "Second output node should have executed" + + # Verify outputs + images1 = result.get_images(output1) + images2 = result.get_images(output2) + assert len(images1) == 1, "Should have produced 1 image from sync node" + assert len(images2) == 1, "Should have produced 1 image from async node" From 3bd71554a2df14b862cc5e1e875df37ba24af1ac Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 24 Nov 2025 19:48:37 +0200 Subject: [PATCH 0908/1073] fix(api-nodes): edge cases in responses for Gemini models (#10860) --- comfy_api_nodes/apis/gemini_api.py | 6 +++--- comfy_api_nodes/nodes_gemini.py | 21 +++++++++++---------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/comfy_api_nodes/apis/gemini_api.py b/comfy_api_nodes/apis/gemini_api.py index 710f173f1..d34590d28 100644 --- a/comfy_api_nodes/apis/gemini_api.py +++ b/comfy_api_nodes/apis/gemini_api.py @@ -113,9 +113,9 @@ class GeminiGenerationConfig(BaseModel): maxOutputTokens: int | None = Field(None, ge=16, le=8192) seed: int | None = Field(None) stopSequences: list[str] | None = Field(None) - temperature: float | None = Field(1, ge=0.0, le=2.0) - topK: int | None = Field(40, ge=1) - topP: float | None = Field(0.95, ge=0.0, le=1.0) + temperature: float | None = Field(None, ge=0.0, le=2.0) + topK: int | None = Field(None, ge=1) + topP: float | None = Field(None, ge=0.0, le=1.0) class GeminiImageConfig(BaseModel): diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index be752c885..938a20f84 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -104,14 +104,14 @@ def get_parts_by_type(response: GeminiGenerateContentResponse, part_type: Litera List of response parts matching the requested type. """ if response.candidates is None: - if response.promptFeedback.blockReason: + if response.promptFeedback and response.promptFeedback.blockReason: feedback = response.promptFeedback raise ValueError( f"Gemini API blocked the request. Reason: {feedback.blockReason} ({feedback.blockReasonMessage})" ) - raise NotImplementedError( - "Gemini returned no response candidates. " - "Please report to ComfyUI repository with the example of workflow to reproduce this." + raise ValueError( + "Gemini API returned no response candidates. If you are using the `IMAGE` modality, " + "try changing it to `IMAGE+TEXT` to view the model's reasoning and understand why image generation failed." ) parts = [] for part in response.candidates[0].content.parts: @@ -182,11 +182,12 @@ def calculate_tokens_price(response: GeminiGenerateContentResponse) -> float | N else: return None final_price = response.usageMetadata.promptTokenCount * input_tokens_price - for i in response.usageMetadata.candidatesTokensDetails: - if i.modality == Modality.IMAGE: - final_price += output_image_tokens_price * i.tokenCount # for Nano Banana models - else: - final_price += output_text_tokens_price * i.tokenCount + if response.usageMetadata.candidatesTokensDetails: + for i in response.usageMetadata.candidatesTokensDetails: + if i.modality == Modality.IMAGE: + final_price += output_image_tokens_price * i.tokenCount # for Nano Banana models + else: + final_price += output_text_tokens_price * i.tokenCount if response.usageMetadata.thoughtsTokenCount: final_price += output_text_tokens_price * response.usageMetadata.thoughtsTokenCount return final_price / 1_000_000.0 @@ -645,7 +646,7 @@ class GeminiImage2(IO.ComfyNode): options=["auto", "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"], default="auto", tooltip="If set to 'auto', matches your input image's aspect ratio; " - "if no image is provided, generates a 1:1 square.", + "if no image is provided, a 16:9 square is usually generated.", ), IO.Combo.Input( "resolution", From 1286fcfe40b98052e4edbe9a02f12ad89ac74924 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 24 Nov 2025 20:24:29 +0200 Subject: [PATCH 0909/1073] add get_frame_count and get_frame_rate methods to VideoInput class (#10851) --- comfy_api/latest/_input/video_types.py | 28 ++++++++ comfy_api/latest/_input_impl/video_types.py | 72 +++++++++++++++++++++ comfy_api_nodes/nodes_topaz.py | 15 ++--- 3 files changed, 106 insertions(+), 9 deletions(-) diff --git a/comfy_api/latest/_input/video_types.py b/comfy_api/latest/_input/video_types.py index a335df4d0..87c81d73a 100644 --- a/comfy_api/latest/_input/video_types.py +++ b/comfy_api/latest/_input/video_types.py @@ -1,5 +1,6 @@ from __future__ import annotations from abc import ABC, abstractmethod +from fractions import Fraction from typing import Optional, Union, IO import io import av @@ -72,6 +73,33 @@ class VideoInput(ABC): frame_count = components.images.shape[0] return float(frame_count / components.frame_rate) + def get_frame_count(self) -> int: + """ + Returns the number of frames in the video. + + Default implementation uses :meth:`get_components`, which may require + loading all frames into memory. File-based implementations should + override this method and use container/stream metadata instead. + + Returns: + Total number of frames as an integer. + """ + return int(self.get_components().images.shape[0]) + + def get_frame_rate(self) -> Fraction: + """ + Returns the frame rate of the video. + + Default implementation materializes the video into memory via + `get_components()`. Subclasses that can inspect the underlying + container (e.g. `VideoFromFile`) should override this with a more + efficient implementation. + + Returns: + Frame rate as a Fraction. + """ + return self.get_components().frame_rate + def get_container_format(self) -> str: """ Returns the container format of the video (e.g., 'mp4', 'mov', 'avi'). diff --git a/comfy_api/latest/_input_impl/video_types.py b/comfy_api/latest/_input_impl/video_types.py index f646504c8..bde37f90a 100644 --- a/comfy_api/latest/_input_impl/video_types.py +++ b/comfy_api/latest/_input_impl/video_types.py @@ -121,6 +121,71 @@ class VideoFromFile(VideoInput): raise ValueError(f"Could not determine duration for file '{self.__file}'") + def get_frame_count(self) -> int: + """ + Returns the number of frames in the video without materializing them as + torch tensors. + """ + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) + + with av.open(self.__file, mode="r") as container: + video_stream = self._get_first_video_stream(container) + # 1. Prefer the frames field if available + if video_stream.frames and video_stream.frames > 0: + return int(video_stream.frames) + + # 2. Try to estimate from duration and average_rate using only metadata + if container.duration is not None and video_stream.average_rate: + duration_seconds = float(container.duration / av.time_base) + estimated_frames = int(round(duration_seconds * float(video_stream.average_rate))) + if estimated_frames > 0: + return estimated_frames + + if ( + getattr(video_stream, "duration", None) is not None + and getattr(video_stream, "time_base", None) is not None + and video_stream.average_rate + ): + duration_seconds = float(video_stream.duration * video_stream.time_base) + estimated_frames = int(round(duration_seconds * float(video_stream.average_rate))) + if estimated_frames > 0: + return estimated_frames + + # 3. Last resort: decode frames and count them (streaming) + frame_count = 0 + container.seek(0) + for packet in container.demux(video_stream): + for _ in packet.decode(): + frame_count += 1 + + if frame_count == 0: + raise ValueError(f"Could not determine frame count for file '{self.__file}'") + return frame_count + + def get_frame_rate(self) -> Fraction: + """ + Returns the average frame rate of the video using container metadata + without decoding all frames. + """ + if isinstance(self.__file, io.BytesIO): + self.__file.seek(0) + + with av.open(self.__file, mode="r") as container: + video_stream = self._get_first_video_stream(container) + # Preferred: use PyAV's average_rate (usually already a Fraction-like) + if video_stream.average_rate: + return Fraction(video_stream.average_rate) + + # Fallback: estimate from frames + duration if available + if video_stream.frames and container.duration: + duration_seconds = float(container.duration / av.time_base) + if duration_seconds > 0: + return Fraction(video_stream.frames / duration_seconds).limit_denominator() + + # Last resort: match get_components_internal default + return Fraction(1) + def get_container_format(self) -> str: """ Returns the container format of the video (e.g., 'mp4', 'mov', 'avi'). @@ -238,6 +303,13 @@ class VideoFromFile(VideoInput): packet.stream = stream_map[packet.stream] output_container.mux(packet) + def _get_first_video_stream(self, container: InputContainer): + video_stream = next((s for s in container.streams if s.type == "video"), None) + if video_stream is None: + raise ValueError(f"No video stream found in file '{self.__file}'") + return video_stream + + class VideoFromComponents(VideoInput): """ Class representing video input from tensors. diff --git a/comfy_api_nodes/nodes_topaz.py b/comfy_api_nodes/nodes_topaz.py index 79c7bf43d..f522756e5 100644 --- a/comfy_api_nodes/nodes_topaz.py +++ b/comfy_api_nodes/nodes_topaz.py @@ -5,8 +5,7 @@ import aiohttp import torch from typing_extensions import override -from comfy_api.input.video_types import VideoInput -from comfy_api.latest import IO, ComfyExtension +from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis import topaz_api from comfy_api_nodes.util import ( ApiEndpoint, @@ -282,7 +281,7 @@ class TopazVideoEnhance(IO.ComfyNode): @classmethod async def execute( cls, - video: VideoInput, + video: Input.Video, upscaler_enabled: bool, upscaler_model: str, upscaler_resolution: str, @@ -297,12 +296,10 @@ class TopazVideoEnhance(IO.ComfyNode): ) -> IO.NodeOutput: if upscaler_enabled is False and interpolation_enabled is False: raise ValueError("There is nothing to do: both upscaling and interpolation are disabled.") - src_width, src_height = video.get_dimensions() - video_components = video.get_components() - src_frame_rate = int(video_components.frame_rate) - duration_sec = video.get_duration() - estimated_frames = int(duration_sec * src_frame_rate) validate_container_format_is_mp4(video) + src_width, src_height = video.get_dimensions() + src_frame_rate = int(video.get_frame_rate()) + duration_sec = video.get_duration() src_video_stream = video.get_stream_source() target_width = src_width target_height = src_height @@ -338,7 +335,7 @@ class TopazVideoEnhance(IO.ComfyNode): container="mp4", size=get_fs_object_size(src_video_stream), duration=int(duration_sec), - frameCount=estimated_frames, + frameCount=video.get_frame_count(), frameRate=src_frame_rate, resolution=topaz_api.Resolution(width=src_width, height=src_height), ), From 3d1fdaf9f448b34e4eba68bfd8e8de373ec0d22d Mon Sep 17 00:00:00 2001 From: Haoming <73768377+Haoming02@users.noreply.github.com> Date: Tue, 25 Nov 2025 02:30:40 +0800 Subject: [PATCH 0910/1073] block info (#10843) --- comfy/ldm/chroma/model.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/comfy/ldm/chroma/model.py b/comfy/ldm/chroma/model.py index 67bf70eb1..a72f8cc47 100644 --- a/comfy/ldm/chroma/model.py +++ b/comfy/ldm/chroma/model.py @@ -179,7 +179,10 @@ class Chroma(nn.Module): pe = self.pe_embedder(ids) blocks_replace = patches_replace.get("dit", {}) + transformer_options["total_blocks"] = len(self.double_blocks) + transformer_options["block_type"] = "double" for i, block in enumerate(self.double_blocks): + transformer_options["block_index"] = i if i not in self.skip_mmdit: double_mod = ( self.get_modulations(mod_vectors, "double_img", idx=i), @@ -222,7 +225,10 @@ class Chroma(nn.Module): img = torch.cat((txt, img), 1) + transformer_options["total_blocks"] = len(self.single_blocks) + transformer_options["block_type"] = "single" for i, block in enumerate(self.single_blocks): + transformer_options["block_index"] = i if i not in self.skip_dit: single_mod = self.get_modulations(mod_vectors, "single", idx=i) if ("single_block", i) in blocks_replace: From 6a6d456c88723538e3d0e5e942f78109ece5b73d Mon Sep 17 00:00:00 2001 From: Haoming <73768377+Haoming02@users.noreply.github.com> Date: Tue, 25 Nov 2025 02:38:38 +0800 Subject: [PATCH 0911/1073] block info (#10842) --- comfy/ldm/qwen_image/model.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index 427ea19c1..8c75670cd 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -439,7 +439,10 @@ class QwenImageTransformer2DModel(nn.Module): patches = transformer_options.get("patches", {}) blocks_replace = patches_replace.get("dit", {}) + transformer_options["total_blocks"] = len(self.transformer_blocks) + transformer_options["block_type"] = "double" for i, block in enumerate(self.transformer_blocks): + transformer_options["block_index"] = i if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} From b2ef58e2b17e73ca8cd376a1cdc976518ebbc168 Mon Sep 17 00:00:00 2001 From: Haoming <73768377+Haoming02@users.noreply.github.com> Date: Tue, 25 Nov 2025 02:40:09 +0800 Subject: [PATCH 0912/1073] block info (#10844) --- comfy/ldm/hunyuan_video/model.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/comfy/ldm/hunyuan_video/model.py b/comfy/ldm/hunyuan_video/model.py index f75c6e0e1..2749c53f5 100644 --- a/comfy/ldm/hunyuan_video/model.py +++ b/comfy/ldm/hunyuan_video/model.py @@ -389,7 +389,10 @@ class HunyuanVideo(nn.Module): attn_mask = None blocks_replace = patches_replace.get("dit", {}) + transformer_options["total_blocks"] = len(self.double_blocks) + transformer_options["block_type"] = "double" for i, block in enumerate(self.double_blocks): + transformer_options["block_index"] = i if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} @@ -411,7 +414,10 @@ class HunyuanVideo(nn.Module): img = torch.cat((img, txt), 1) + transformer_options["total_blocks"] = len(self.single_blocks) + transformer_options["block_type"] = "single" for i, block in enumerate(self.single_blocks): + transformer_options["block_index"] = i if ("single_block", i) in blocks_replace: def block_wrap(args): out = {} From 22a2644e57530ee40e13486ccd7c953b87072093 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 24 Nov 2025 16:45:54 -0800 Subject: [PATCH 0913/1073] Bump transformers version in requirements.txt (#10869) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8e308cd6c..b7014f956 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ torchvision torchaudio numpy>=1.25.0 einops -transformers>=4.37.2 +transformers>=4.50.3 tokenizers>=0.13.3 sentencepiece safetensors>=0.4.2 From 25022e0b0965975b35bcaf28b153184d60a4f9de Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 24 Nov 2025 22:48:53 -0800 Subject: [PATCH 0914/1073] Cleanup and fix issues with text encoder quants. (#10872) --- comfy/model_patcher.py | 3 +- comfy/ops.py | 168 +++++++++--------- comfy/quant_ops.py | 12 ++ comfy/sd.py | 9 +- comfy/sd1_clip.py | 18 +- comfy/text_encoders/hunyuan_video.py | 3 + .../comfy_quant/test_mixed_precision.py | 17 +- 7 files changed, 128 insertions(+), 102 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index cf1b0d441..6551ced5a 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -231,7 +231,6 @@ class ModelPatcher: self.object_patches_backup = {} self.weight_wrapper_patches = {} self.model_options = {"transformer_options":{}} - self.model_size() self.load_device = load_device self.offload_device = offload_device self.weight_inplace_update = weight_inplace_update @@ -286,7 +285,7 @@ class ModelPatcher: return self.model.lowvram_patch_counter def clone(self): - n = self.__class__(self.model, self.load_device, self.offload_device, self.size, weight_inplace_update=self.weight_inplace_update) + n = self.__class__(self.model, self.load_device, self.offload_device, self.model_size(), weight_inplace_update=self.weight_inplace_update) n.patches = {} for k in self.patches: n.patches[k] = self.patches[k][:] diff --git a/comfy/ops.py b/comfy/ops.py index 640622fd1..af185ec24 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -540,113 +540,115 @@ if CUBLAS_IS_AVAILABLE: # ============================================================================== from .quant_ops import QuantizedTensor, QUANT_ALGOS -class MixedPrecisionOps(disable_weight_init): - _layer_quant_config = {} - _compute_dtype = torch.bfloat16 - class Linear(torch.nn.Module, CastWeightBiasOp): - def __init__( - self, - in_features: int, - out_features: int, - bias: bool = True, - device=None, - dtype=None, - ) -> None: - super().__init__() +def mixed_precision_ops(layer_quant_config={}, compute_dtype=torch.bfloat16, full_precision_mm=False): + class MixedPrecisionOps(manual_cast): + _layer_quant_config = layer_quant_config + _compute_dtype = compute_dtype + _full_precision_mm = full_precision_mm - self.factory_kwargs = {"device": device, "dtype": MixedPrecisionOps._compute_dtype} - # self.factory_kwargs = {"device": device, "dtype": dtype} + class Linear(torch.nn.Module, CastWeightBiasOp): + def __init__( + self, + in_features: int, + out_features: int, + bias: bool = True, + device=None, + dtype=None, + ) -> None: + super().__init__() - self.in_features = in_features - self.out_features = out_features - if bias: - self.bias = torch.nn.Parameter(torch.empty(out_features, **self.factory_kwargs)) - else: - self.register_parameter("bias", None) + self.factory_kwargs = {"device": device, "dtype": MixedPrecisionOps._compute_dtype} + # self.factory_kwargs = {"device": device, "dtype": dtype} - self.tensor_class = None + self.in_features = in_features + self.out_features = out_features + if bias: + self.bias = torch.nn.Parameter(torch.empty(out_features, **self.factory_kwargs)) + else: + self.register_parameter("bias", None) - def reset_parameters(self): - return None + self.tensor_class = None + self._full_precision_mm = MixedPrecisionOps._full_precision_mm - def _load_from_state_dict(self, state_dict, prefix, local_metadata, - strict, missing_keys, unexpected_keys, error_msgs): + def reset_parameters(self): + return None - device = self.factory_kwargs["device"] - layer_name = prefix.rstrip('.') - weight_key = f"{prefix}weight" - weight = state_dict.pop(weight_key, None) - if weight is None: - raise ValueError(f"Missing weight for layer {layer_name}") + def _load_from_state_dict(self, state_dict, prefix, local_metadata, + strict, missing_keys, unexpected_keys, error_msgs): - manually_loaded_keys = [weight_key] + device = self.factory_kwargs["device"] + layer_name = prefix.rstrip('.') + weight_key = f"{prefix}weight" + weight = state_dict.pop(weight_key, None) + if weight is None: + raise ValueError(f"Missing weight for layer {layer_name}") - if layer_name not in MixedPrecisionOps._layer_quant_config: - self.weight = torch.nn.Parameter(weight.to(device=device, dtype=MixedPrecisionOps._compute_dtype), requires_grad=False) - else: - quant_format = MixedPrecisionOps._layer_quant_config[layer_name].get("format", None) - if quant_format is None: - raise ValueError(f"Unknown quantization format for layer {layer_name}") + manually_loaded_keys = [weight_key] - qconfig = QUANT_ALGOS[quant_format] - self.layout_type = qconfig["comfy_tensor_layout"] + if layer_name not in MixedPrecisionOps._layer_quant_config: + self.weight = torch.nn.Parameter(weight.to(device=device, dtype=MixedPrecisionOps._compute_dtype), requires_grad=False) + else: + quant_format = MixedPrecisionOps._layer_quant_config[layer_name].get("format", None) + if quant_format is None: + raise ValueError(f"Unknown quantization format for layer {layer_name}") - weight_scale_key = f"{prefix}weight_scale" - layout_params = { - 'scale': state_dict.pop(weight_scale_key, None), - 'orig_dtype': MixedPrecisionOps._compute_dtype, - 'block_size': qconfig.get("group_size", None), - } - if layout_params['scale'] is not None: - manually_loaded_keys.append(weight_scale_key) + qconfig = QUANT_ALGOS[quant_format] + self.layout_type = qconfig["comfy_tensor_layout"] - self.weight = torch.nn.Parameter( - QuantizedTensor(weight.to(device=device), self.layout_type, layout_params), - requires_grad=False - ) + weight_scale_key = f"{prefix}weight_scale" + layout_params = { + 'scale': state_dict.pop(weight_scale_key, None), + 'orig_dtype': MixedPrecisionOps._compute_dtype, + 'block_size': qconfig.get("group_size", None), + } + if layout_params['scale'] is not None: + manually_loaded_keys.append(weight_scale_key) - for param_name in qconfig["parameters"]: - param_key = f"{prefix}{param_name}" - _v = state_dict.pop(param_key, None) - if _v is None: - continue - setattr(self, param_name, torch.nn.Parameter(_v.to(device=device), requires_grad=False)) - manually_loaded_keys.append(param_key) + self.weight = torch.nn.Parameter( + QuantizedTensor(weight.to(device=device), self.layout_type, layout_params), + requires_grad=False + ) - super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) + for param_name in qconfig["parameters"]: + param_key = f"{prefix}{param_name}" + _v = state_dict.pop(param_key, None) + if _v is None: + continue + setattr(self, param_name, torch.nn.Parameter(_v.to(device=device), requires_grad=False)) + manually_loaded_keys.append(param_key) - for key in manually_loaded_keys: - if key in missing_keys: - missing_keys.remove(key) + super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) - def _forward(self, input, weight, bias): - return torch.nn.functional.linear(input, weight, bias) + for key in manually_loaded_keys: + if key in missing_keys: + missing_keys.remove(key) - def forward_comfy_cast_weights(self, input): - weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) - x = self._forward(input, weight, bias) - uncast_bias_weight(self, weight, bias, offload_stream) - return x + def _forward(self, input, weight, bias): + return torch.nn.functional.linear(input, weight, bias) - def forward(self, input, *args, **kwargs): - run_every_op() + def forward_comfy_cast_weights(self, input): + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) + x = self._forward(input, weight, bias) + uncast_bias_weight(self, weight, bias, offload_stream) + return x - if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: - return self.forward_comfy_cast_weights(input, *args, **kwargs) - if (getattr(self, 'layout_type', None) is not None and - getattr(self, 'input_scale', None) is not None and - not isinstance(input, QuantizedTensor)): - input = QuantizedTensor.from_float(input, self.layout_type, scale=self.input_scale, dtype=self.weight.dtype) - return self._forward(input, self.weight, self.bias) + def forward(self, input, *args, **kwargs): + run_every_op() + if self._full_precision_mm or self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: + return self.forward_comfy_cast_weights(input, *args, **kwargs) + if (getattr(self, 'layout_type', None) is not None and + getattr(self, 'input_scale', None) is not None and + not isinstance(input, QuantizedTensor)): + input = QuantizedTensor.from_float(input, self.layout_type, scale=self.input_scale, dtype=self.weight.dtype) + return self._forward(input, self.weight, self.bias) + return MixedPrecisionOps def pick_operations(weight_dtype, compute_dtype, load_device=None, disable_fast_fp8=False, fp8_optimizations=False, scaled_fp8=None, model_config=None): if model_config and hasattr(model_config, 'layer_quant_config') and model_config.layer_quant_config: - MixedPrecisionOps._layer_quant_config = model_config.layer_quant_config - MixedPrecisionOps._compute_dtype = compute_dtype logging.info(f"Using mixed precision operations: {len(model_config.layer_quant_config)} quantized layers") - return MixedPrecisionOps + return mixed_precision_ops(model_config.layer_quant_config, compute_dtype) fp8_compute = comfy.model_management.supports_fp8_compute(load_device) if scaled_fp8 is not None: diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index 1d058bece..905b4729e 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -338,6 +338,18 @@ def generic_copy_(func, args, kwargs): return func(*args, **kwargs) +@register_generic_util(torch.ops.aten.to.dtype) +def generic_to_dtype(func, args, kwargs): + """Handle .to(dtype) calls - dtype conversion only.""" + src = args[0] + if isinstance(src, QuantizedTensor): + # For dtype-only conversion, just change the orig_dtype, no real cast is needed + target_dtype = args[1] if len(args) > 1 else kwargs.get('dtype') + src._layout_params["orig_dtype"] = target_dtype + return src + return func(*args, **kwargs) + + @register_generic_util(torch.ops.aten._has_compatible_shallow_copy_type.default) def generic_has_compatible_shallow_copy_type(func, args, kwargs): return True diff --git a/comfy/sd.py b/comfy/sd.py index dc0905ada..b6df0bd61 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -917,7 +917,12 @@ class CLIPType(Enum): def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}): clip_data = [] for p in ckpt_paths: - clip_data.append(comfy.utils.load_torch_file(p, safe_load=True)) + sd, metadata = comfy.utils.load_torch_file(p, safe_load=True, return_metadata=True) + if metadata is not None: + quant_metadata = metadata.get("_quantization_metadata", None) + if quant_metadata is not None: + sd["_quantization_metadata"] = quant_metadata + clip_data.append(sd) return load_text_encoder_state_dicts(clip_data, embedding_directory=embedding_directory, clip_type=clip_type, model_options=model_options) @@ -1142,6 +1147,8 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip parameters = 0 for c in clip_data: + if "_quantization_metadata" in c: + c.pop("_quantization_metadata") parameters += comfy.utils.calculate_parameters(c) tokenizer_data, model_options = comfy.text_encoders.long_clipl.model_options_long_clip(c, tokenizer_data, model_options) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 3066de2d7..8f509bab1 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -109,13 +109,23 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): operations = model_options.get("custom_operations", None) scaled_fp8 = None + quantization_metadata = model_options.get("quantization_metadata", None) if operations is None: - scaled_fp8 = model_options.get("scaled_fp8", None) - if scaled_fp8 is not None: - operations = comfy.ops.scaled_fp8_ops(fp8_matrix_mult=False, override_dtype=scaled_fp8) + layer_quant_config = None + if quantization_metadata is not None: + layer_quant_config = json.loads(quantization_metadata).get("layers", None) + + if layer_quant_config is not None: + operations = comfy.ops.mixed_precision_ops(layer_quant_config, dtype, full_precision_mm=True) + logging.info(f"Using MixedPrecisionOps for text encoder: {len(layer_quant_config)} quantized layers") else: - operations = comfy.ops.manual_cast + # Fallback to scaled_fp8_ops for backward compatibility + scaled_fp8 = model_options.get("scaled_fp8", None) + if scaled_fp8 is not None: + operations = comfy.ops.scaled_fp8_ops(fp8_matrix_mult=False, override_dtype=scaled_fp8) + else: + operations = comfy.ops.manual_cast self.operations = operations self.transformer = model_class(config, dtype, device, self.operations) diff --git a/comfy/text_encoders/hunyuan_video.py b/comfy/text_encoders/hunyuan_video.py index 557094f49..0110517bb 100644 --- a/comfy/text_encoders/hunyuan_video.py +++ b/comfy/text_encoders/hunyuan_video.py @@ -18,6 +18,9 @@ def llama_detect(state_dict, prefix=""): if scaled_fp8_key in state_dict: out["llama_scaled_fp8"] = state_dict[scaled_fp8_key].dtype + if "_quantization_metadata" in state_dict: + out["llama_quantization_metadata"] = state_dict["_quantization_metadata"] + return out diff --git a/tests-unit/comfy_quant/test_mixed_precision.py b/tests-unit/comfy_quant/test_mixed_precision.py index f8d1fd04e..63361309f 100644 --- a/tests-unit/comfy_quant/test_mixed_precision.py +++ b/tests-unit/comfy_quant/test_mixed_precision.py @@ -37,11 +37,8 @@ class TestMixedPrecisionOps(unittest.TestCase): def test_all_layers_standard(self): """Test that model with no quantization works normally""" - # Configure no quantization - ops.MixedPrecisionOps._layer_quant_config = {} - # Create model - model = SimpleModel(operations=ops.MixedPrecisionOps) + model = SimpleModel(operations=ops.mixed_precision_ops({})) # Initialize weights manually model.layer1.weight = torch.nn.Parameter(torch.randn(20, 10, dtype=torch.bfloat16)) @@ -76,7 +73,6 @@ class TestMixedPrecisionOps(unittest.TestCase): "params": {} } } - ops.MixedPrecisionOps._layer_quant_config = layer_quant_config # Create state dict with mixed precision fp8_weight1 = torch.randn(20, 10, dtype=torch.float32).to(torch.float8_e4m3fn) @@ -99,7 +95,7 @@ class TestMixedPrecisionOps(unittest.TestCase): } # Create model and load state dict (strict=False because custom loading pops keys) - model = SimpleModel(operations=ops.MixedPrecisionOps) + model = SimpleModel(operations=ops.mixed_precision_ops(layer_quant_config)) model.load_state_dict(state_dict, strict=False) # Verify weights are wrapped in QuantizedTensor @@ -132,7 +128,6 @@ class TestMixedPrecisionOps(unittest.TestCase): "params": {} } } - ops.MixedPrecisionOps._layer_quant_config = layer_quant_config # Create and load model fp8_weight = torch.randn(20, 10, dtype=torch.float32).to(torch.float8_e4m3fn) @@ -146,7 +141,7 @@ class TestMixedPrecisionOps(unittest.TestCase): "layer3.bias": torch.randn(40, dtype=torch.bfloat16), } - model = SimpleModel(operations=ops.MixedPrecisionOps) + model = SimpleModel(operations=ops.mixed_precision_ops(layer_quant_config)) model.load_state_dict(state_dict1, strict=False) # Save state dict @@ -170,7 +165,6 @@ class TestMixedPrecisionOps(unittest.TestCase): "params": {} } } - ops.MixedPrecisionOps._layer_quant_config = layer_quant_config # Create and load model fp8_weight = torch.randn(20, 10, dtype=torch.float32).to(torch.float8_e4m3fn) @@ -184,7 +178,7 @@ class TestMixedPrecisionOps(unittest.TestCase): "layer3.bias": torch.randn(40, dtype=torch.bfloat16), } - model = SimpleModel(operations=ops.MixedPrecisionOps) + model = SimpleModel(operations=ops.mixed_precision_ops(layer_quant_config)) model.load_state_dict(state_dict, strict=False) # Add a weight function (simulating LoRA) @@ -210,7 +204,6 @@ class TestMixedPrecisionOps(unittest.TestCase): "params": {} } } - ops.MixedPrecisionOps._layer_quant_config = layer_quant_config # Create state dict state_dict = { @@ -223,7 +216,7 @@ class TestMixedPrecisionOps(unittest.TestCase): } # Load should raise KeyError for unknown format in QUANT_FORMAT_MIXINS - model = SimpleModel(operations=ops.MixedPrecisionOps) + model = SimpleModel(operations=ops.mixed_precision_ops(layer_quant_config)) with self.assertRaises(KeyError): model.load_state_dict(state_dict, strict=False) From b6805429b9c2f3aa919035bea849ecd1de3ac8e4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 24 Nov 2025 23:48:20 -0800 Subject: [PATCH 0915/1073] Allow pinning quantized tensors. (#10873) --- comfy/model_management.py | 6 +++++- comfy/quant_ops.py | 8 ++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index a21df54b3..a9327ac80 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1098,13 +1098,14 @@ if not args.disable_pinned_memory: MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.95 logging.info("Enabled pinned memory {}".format(MAX_PINNED_MEMORY // (1024 * 1024))) +PINNING_ALLOWED_TYPES = set(["Parameter", "QuantizedTensor"]) def pin_memory(tensor): global TOTAL_PINNED_MEMORY if MAX_PINNED_MEMORY <= 0: return False - if type(tensor) is not torch.nn.parameter.Parameter: + if type(tensor).__name__ not in PINNING_ALLOWED_TYPES: return False if not is_device_cpu(tensor.device): @@ -1124,6 +1125,9 @@ def pin_memory(tensor): return False ptr = tensor.data_ptr() + if ptr == 0: + return False + if torch.cuda.cudart().cudaHostRegister(ptr, size, 1) == 0: PINNED_MEMORY[ptr] = size TOTAL_PINNED_MEMORY += size diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index 905b4729e..e938144a7 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -228,6 +228,14 @@ class QuantizedTensor(torch.Tensor): new_kwargs = dequant_arg(kwargs) return func(*new_args, **new_kwargs) + def data_ptr(self): + return self._qdata.data_ptr() + + def is_pinned(self): + return self._qdata.is_pinned() + + def is_contiguous(self): + return self._qdata.is_contiguous() # ============================================================================== # Generic Utilities (Layout-Agnostic Operations) From acfaa5c4a132e1c01bc9d94e76b0d667c899bfd1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 24 Nov 2025 23:55:49 -0800 Subject: [PATCH 0916/1073] Don't try fp8 matrix mult in quantized ops if not supported by hardware. (#10874) --- comfy/ops.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index af185ec24..785aa1c9f 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -646,11 +646,12 @@ def mixed_precision_ops(layer_quant_config={}, compute_dtype=torch.bfloat16, ful return MixedPrecisionOps def pick_operations(weight_dtype, compute_dtype, load_device=None, disable_fast_fp8=False, fp8_optimizations=False, scaled_fp8=None, model_config=None): + fp8_compute = comfy.model_management.supports_fp8_compute(load_device) # TODO: if we support more ops this needs to be more granular + if model_config and hasattr(model_config, 'layer_quant_config') and model_config.layer_quant_config: logging.info(f"Using mixed precision operations: {len(model_config.layer_quant_config)} quantized layers") - return mixed_precision_ops(model_config.layer_quant_config, compute_dtype) + return mixed_precision_ops(model_config.layer_quant_config, compute_dtype, full_precision_mm=not fp8_compute) - fp8_compute = comfy.model_management.supports_fp8_compute(load_device) if scaled_fp8 is not None: return scaled_fp8_ops(fp8_matrix_mult=fp8_compute and fp8_optimizations, scale_input=fp8_optimizations, override_dtype=scaled_fp8) From 015a0599d08f1072155b9213d488b73e502fea3c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 25 Nov 2025 00:23:19 -0800 Subject: [PATCH 0917/1073] I found a case where this is needed (#10875) --- comfy/quant_ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index e938144a7..0c16bcf8d 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -405,8 +405,8 @@ class TensorCoreFP8Layout(QuantizedLayout): tensor_scaled = tensor * (1.0 / scale).to(tensor.dtype) # TODO: uncomment this if it's actually needed because the clamp has a small performance penality' - # lp_amax = torch.finfo(dtype).max - # torch.clamp(tensor_scaled, min=-lp_amax, max=lp_amax, out=tensor_scaled) + lp_amax = torch.finfo(dtype).max + torch.clamp(tensor_scaled, min=-lp_amax, max=lp_amax, out=tensor_scaled) qdata = tensor_scaled.to(dtype, memory_format=torch.contiguous_format) layout_params = { From 6b573ae0cb11000a0330a35d9e31917c22c874a4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 25 Nov 2025 07:50:19 -0800 Subject: [PATCH 0918/1073] Flux 2 (#10879) --- comfy/latent_formats.py | 9 +++ comfy/ldm/flux/layers.py | 90 +++++++++++++++++++-------- comfy/ldm/flux/model.py | 80 ++++++++++++++++++------ comfy/ldm/models/autoencoder.py | 42 +++++++++++++ comfy/model_base.py | 23 +++++-- comfy/model_detection.py | 50 +++++++++++---- comfy/sd.py | 26 +++++++- comfy/supported_models.py | 34 +++++++++- comfy/text_encoders/flux.py | 107 +++++++++++++++++++++++++++++++- comfy/text_encoders/llama.py | 31 +++++++++ comfy_extras/nodes_flux.py | 80 +++++++++++++++++++++++- nodes.py | 2 +- 12 files changed, 506 insertions(+), 68 deletions(-) diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index 204fc048d..e98c7d6d8 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -178,6 +178,15 @@ class Flux(SD3): def process_out(self, latent): return (latent / self.scale_factor) + self.shift_factor +class Flux2(LatentFormat): + latent_channels = 128 + + def process_in(self, latent): + return latent + + def process_out(self, latent): + return latent + class Mochi(LatentFormat): latent_channels = 12 latent_dimensions = 3 diff --git a/comfy/ldm/flux/layers.py b/comfy/ldm/flux/layers.py index 23150a712..2472ab79c 100644 --- a/comfy/ldm/flux/layers.py +++ b/comfy/ldm/flux/layers.py @@ -48,11 +48,11 @@ def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 10 return embedding class MLPEmbedder(nn.Module): - def __init__(self, in_dim: int, hidden_dim: int, dtype=None, device=None, operations=None): + def __init__(self, in_dim: int, hidden_dim: int, bias=True, dtype=None, device=None, operations=None): super().__init__() - self.in_layer = operations.Linear(in_dim, hidden_dim, bias=True, dtype=dtype, device=device) + self.in_layer = operations.Linear(in_dim, hidden_dim, bias=bias, dtype=dtype, device=device) self.silu = nn.SiLU() - self.out_layer = operations.Linear(hidden_dim, hidden_dim, bias=True, dtype=dtype, device=device) + self.out_layer = operations.Linear(hidden_dim, hidden_dim, bias=bias, dtype=dtype, device=device) def forward(self, x: Tensor) -> Tensor: return self.out_layer(self.silu(self.in_layer(x))) @@ -80,14 +80,14 @@ class QKNorm(torch.nn.Module): class SelfAttention(nn.Module): - def __init__(self, dim: int, num_heads: int = 8, qkv_bias: bool = False, dtype=None, device=None, operations=None): + def __init__(self, dim: int, num_heads: int = 8, qkv_bias: bool = False, proj_bias: bool = True, dtype=None, device=None, operations=None): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.qkv = operations.Linear(dim, dim * 3, bias=qkv_bias, dtype=dtype, device=device) self.norm = QKNorm(head_dim, dtype=dtype, device=device, operations=operations) - self.proj = operations.Linear(dim, dim, dtype=dtype, device=device) + self.proj = operations.Linear(dim, dim, bias=proj_bias, dtype=dtype, device=device) @dataclass @@ -98,11 +98,11 @@ class ModulationOut: class Modulation(nn.Module): - def __init__(self, dim: int, double: bool, dtype=None, device=None, operations=None): + def __init__(self, dim: int, double: bool, bias=True, dtype=None, device=None, operations=None): super().__init__() self.is_double = double self.multiplier = 6 if double else 3 - self.lin = operations.Linear(dim, self.multiplier * dim, bias=True, dtype=dtype, device=device) + self.lin = operations.Linear(dim, self.multiplier * dim, bias=bias, dtype=dtype, device=device) def forward(self, vec: Tensor) -> tuple: if vec.ndim == 2: @@ -129,8 +129,18 @@ def apply_mod(tensor, m_mult, m_add=None, modulation_dims=None): return tensor +class SiLUActivation(nn.Module): + def __init__(self): + super().__init__() + self.gate_fn = nn.SiLU() + + def forward(self, x: Tensor) -> Tensor: + x1, x2 = x.chunk(2, dim=-1) + return self.gate_fn(x1) * x2 + + class DoubleStreamBlock(nn.Module): - def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False, flipped_img_txt=False, modulation=True, dtype=None, device=None, operations=None): + def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False, flipped_img_txt=False, modulation=True, mlp_silu_act=False, proj_bias=True, dtype=None, device=None, operations=None): super().__init__() mlp_hidden_dim = int(hidden_size * mlp_ratio) @@ -142,27 +152,44 @@ class DoubleStreamBlock(nn.Module): self.img_mod = Modulation(hidden_size, double=True, dtype=dtype, device=device, operations=operations) self.img_norm1 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias, dtype=dtype, device=device, operations=operations) + self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias, proj_bias=proj_bias, dtype=dtype, device=device, operations=operations) self.img_norm2 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - self.img_mlp = nn.Sequential( - operations.Linear(hidden_size, mlp_hidden_dim, bias=True, dtype=dtype, device=device), - nn.GELU(approximate="tanh"), - operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), - ) + + if mlp_silu_act: + self.img_mlp = nn.Sequential( + operations.Linear(hidden_size, mlp_hidden_dim * 2, bias=False, dtype=dtype, device=device), + SiLUActivation(), + operations.Linear(mlp_hidden_dim, hidden_size, bias=False, dtype=dtype, device=device), + ) + else: + self.img_mlp = nn.Sequential( + operations.Linear(hidden_size, mlp_hidden_dim, bias=True, dtype=dtype, device=device), + nn.GELU(approximate="tanh"), + operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), + ) if self.modulation: self.txt_mod = Modulation(hidden_size, double=True, dtype=dtype, device=device, operations=operations) self.txt_norm1 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias, dtype=dtype, device=device, operations=operations) + self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias, proj_bias=proj_bias, dtype=dtype, device=device, operations=operations) self.txt_norm2 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - self.txt_mlp = nn.Sequential( - operations.Linear(hidden_size, mlp_hidden_dim, bias=True, dtype=dtype, device=device), - nn.GELU(approximate="tanh"), - operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), - ) + + if mlp_silu_act: + self.txt_mlp = nn.Sequential( + operations.Linear(hidden_size, mlp_hidden_dim * 2, bias=False, dtype=dtype, device=device), + SiLUActivation(), + operations.Linear(mlp_hidden_dim, hidden_size, bias=False, dtype=dtype, device=device), + ) + else: + self.txt_mlp = nn.Sequential( + operations.Linear(hidden_size, mlp_hidden_dim, bias=True, dtype=dtype, device=device), + nn.GELU(approximate="tanh"), + operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), + ) + self.flipped_img_txt = flipped_img_txt def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor, attn_mask=None, modulation_dims_img=None, modulation_dims_txt=None, transformer_options={}): @@ -246,6 +273,8 @@ class SingleStreamBlock(nn.Module): mlp_ratio: float = 4.0, qk_scale: float = None, modulation=True, + mlp_silu_act=False, + bias=True, dtype=None, device=None, operations=None @@ -257,17 +286,24 @@ class SingleStreamBlock(nn.Module): self.scale = qk_scale or head_dim**-0.5 self.mlp_hidden_dim = int(hidden_size * mlp_ratio) + + self.mlp_hidden_dim_first = self.mlp_hidden_dim + if mlp_silu_act: + self.mlp_hidden_dim_first = int(hidden_size * mlp_ratio * 2) + self.mlp_act = SiLUActivation() + else: + self.mlp_act = nn.GELU(approximate="tanh") + # qkv and mlp_in - self.linear1 = operations.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim, dtype=dtype, device=device) + self.linear1 = operations.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim_first, bias=bias, dtype=dtype, device=device) # proj and mlp_out - self.linear2 = operations.Linear(hidden_size + self.mlp_hidden_dim, hidden_size, dtype=dtype, device=device) + self.linear2 = operations.Linear(hidden_size + self.mlp_hidden_dim, hidden_size, bias=bias, dtype=dtype, device=device) self.norm = QKNorm(head_dim, dtype=dtype, device=device, operations=operations) self.hidden_size = hidden_size self.pre_norm = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - self.mlp_act = nn.GELU(approximate="tanh") if modulation: self.modulation = Modulation(hidden_size, double=False, dtype=dtype, device=device, operations=operations) else: @@ -279,7 +315,7 @@ class SingleStreamBlock(nn.Module): else: mod = vec - qkv, mlp = torch.split(self.linear1(apply_mod(self.pre_norm(x), (1 + mod.scale), mod.shift, modulation_dims)), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1) + qkv, mlp = torch.split(self.linear1(apply_mod(self.pre_norm(x), (1 + mod.scale), mod.shift, modulation_dims)), [3 * self.hidden_size, self.mlp_hidden_dim_first], dim=-1) q, k, v = qkv.view(qkv.shape[0], qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) del qkv @@ -298,11 +334,11 @@ class SingleStreamBlock(nn.Module): class LastLayer(nn.Module): - def __init__(self, hidden_size: int, patch_size: int, out_channels: int, dtype=None, device=None, operations=None): + def __init__(self, hidden_size: int, patch_size: int, out_channels: int, bias=True, dtype=None, device=None, operations=None): super().__init__() self.norm_final = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - self.linear = operations.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True, dtype=dtype, device=device) - self.adaLN_modulation = nn.Sequential(nn.SiLU(), operations.Linear(hidden_size, 2 * hidden_size, bias=True, dtype=dtype, device=device)) + self.linear = operations.Linear(hidden_size, patch_size * patch_size * out_channels, bias=bias, dtype=dtype, device=device) + self.adaLN_modulation = nn.Sequential(nn.SiLU(), operations.Linear(hidden_size, 2 * hidden_size, bias=bias, dtype=dtype, device=device)) def forward(self, x: Tensor, vec: Tensor, modulation_dims=None) -> Tensor: if vec.ndim == 2: diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index b9d36f202..1a24e6d95 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -15,6 +15,7 @@ from .layers import ( MLPEmbedder, SingleStreamBlock, timestep_embedding, + Modulation ) @dataclass @@ -33,6 +34,11 @@ class FluxParams: patch_size: int qkv_bias: bool guidance_embed: bool + global_modulation: bool = False + mlp_silu_act: bool = False + ops_bias: bool = True + default_ref_method: str = "offset" + ref_index_scale: float = 1.0 class Flux(nn.Module): @@ -58,13 +64,17 @@ class Flux(nn.Module): self.hidden_size = params.hidden_size self.num_heads = params.num_heads self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim) - self.img_in = operations.Linear(self.in_channels, self.hidden_size, bias=True, dtype=dtype, device=device) - self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size, dtype=dtype, device=device, operations=operations) - self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size, dtype=dtype, device=device, operations=operations) + self.img_in = operations.Linear(self.in_channels, self.hidden_size, bias=params.ops_bias, dtype=dtype, device=device) + self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size, bias=params.ops_bias, dtype=dtype, device=device, operations=operations) + if params.vec_in_dim is not None: + self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size, dtype=dtype, device=device, operations=operations) + else: + self.vector_in = None + self.guidance_in = ( - MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size, dtype=dtype, device=device, operations=operations) if params.guidance_embed else nn.Identity() + MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size, bias=params.ops_bias, dtype=dtype, device=device, operations=operations) if params.guidance_embed else nn.Identity() ) - self.txt_in = operations.Linear(params.context_in_dim, self.hidden_size, dtype=dtype, device=device) + self.txt_in = operations.Linear(params.context_in_dim, self.hidden_size, bias=params.ops_bias, dtype=dtype, device=device) self.double_blocks = nn.ModuleList( [ @@ -73,6 +83,9 @@ class Flux(nn.Module): self.num_heads, mlp_ratio=params.mlp_ratio, qkv_bias=params.qkv_bias, + modulation=params.global_modulation is False, + mlp_silu_act=params.mlp_silu_act, + proj_bias=params.ops_bias, dtype=dtype, device=device, operations=operations ) for _ in range(params.depth) @@ -81,13 +94,30 @@ class Flux(nn.Module): self.single_blocks = nn.ModuleList( [ - SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio, dtype=dtype, device=device, operations=operations) + SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio, modulation=params.global_modulation is False, mlp_silu_act=params.mlp_silu_act, bias=params.ops_bias, dtype=dtype, device=device, operations=operations) for _ in range(params.depth_single_blocks) ] ) if final_layer: - self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels, dtype=dtype, device=device, operations=operations) + self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels, bias=params.ops_bias, dtype=dtype, device=device, operations=operations) + + if params.global_modulation: + self.double_stream_modulation_img = Modulation( + self.hidden_size, + double=True, + bias=False, + dtype=dtype, device=device, operations=operations + ) + self.double_stream_modulation_txt = Modulation( + self.hidden_size, + double=True, + bias=False, + dtype=dtype, device=device, operations=operations + ) + self.single_stream_modulation = Modulation( + self.hidden_size, double=False, bias=False, dtype=dtype, device=device, operations=operations + ) def forward_orig( self, @@ -103,9 +133,6 @@ class Flux(nn.Module): attn_mask: Tensor = None, ) -> Tensor: - if y is None: - y = torch.zeros((img.shape[0], self.params.vec_in_dim), device=img.device, dtype=img.dtype) - patches = transformer_options.get("patches", {}) patches_replace = transformer_options.get("patches_replace", {}) if img.ndim != 3 or txt.ndim != 3: @@ -118,9 +145,17 @@ class Flux(nn.Module): if guidance is not None: vec = vec + self.guidance_in(timestep_embedding(guidance, 256).to(img.dtype)) - vec = vec + self.vector_in(y[:, :self.params.vec_in_dim]) + if self.vector_in is not None: + if y is None: + y = torch.zeros((img.shape[0], self.params.vec_in_dim), device=img.device, dtype=img.dtype) + vec = vec + self.vector_in(y[:, :self.params.vec_in_dim]) + txt = self.txt_in(txt) + vec_orig = vec + if self.params.global_modulation: + vec = (self.double_stream_modulation_img(vec_orig), self.double_stream_modulation_txt(vec_orig)) + if "post_input" in patches: for p in patches["post_input"]: out = p({"img": img, "txt": txt, "img_ids": img_ids, "txt_ids": txt_ids}) @@ -177,6 +212,9 @@ class Flux(nn.Module): img = torch.cat((txt, img), 1) + if self.params.global_modulation: + vec, _ = self.single_stream_modulation(vec_orig) + for i, block in enumerate(self.single_blocks): if ("single_block", i) in blocks_replace: def block_wrap(args): @@ -207,7 +245,7 @@ class Flux(nn.Module): img = img[:, txt.shape[1] :, ...] - img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels) + img = self.final_layer(img, vec_orig) # (N, T, patch_size ** 2 * out_channels) return img def process_img(self, x, index=0, h_offset=0, w_offset=0, transformer_options={}): @@ -234,10 +272,10 @@ class Flux(nn.Module): h_offset += rope_options.get("shift_y", 0.0) w_offset += rope_options.get("shift_x", 0.0) - img_ids = torch.zeros((steps_h, steps_w, 3), device=x.device, dtype=x.dtype) + img_ids = torch.zeros((steps_h, steps_w, len(self.params.axes_dim)), device=x.device, dtype=torch.float32) img_ids[:, :, 0] = img_ids[:, :, 1] + index - img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(h_offset, h_len - 1 + h_offset, steps=steps_h, device=x.device, dtype=x.dtype).unsqueeze(1) - img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(w_offset, w_len - 1 + w_offset, steps=steps_w, device=x.device, dtype=x.dtype).unsqueeze(0) + img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(h_offset, h_len - 1 + h_offset, steps=steps_h, device=x.device, dtype=torch.float32).unsqueeze(1) + img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(w_offset, w_len - 1 + w_offset, steps=steps_w, device=x.device, dtype=torch.float32).unsqueeze(0) return img, repeat(img_ids, "h w c -> b (h w) c", b=bs) def forward(self, x, timestep, context, y=None, guidance=None, ref_latents=None, control=None, transformer_options={}, **kwargs): @@ -259,10 +297,10 @@ class Flux(nn.Module): h = 0 w = 0 index = 0 - ref_latents_method = kwargs.get("ref_latents_method", "offset") + ref_latents_method = kwargs.get("ref_latents_method", self.params.default_ref_method) for ref in ref_latents: if ref_latents_method == "index": - index += 1 + index += self.params.ref_index_scale h_offset = 0 w_offset = 0 elif ref_latents_method == "uxo": @@ -286,7 +324,11 @@ class Flux(nn.Module): img = torch.cat([img, kontext], dim=1) img_ids = torch.cat([img_ids, kontext_ids], dim=1) - txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype) + txt_ids = torch.zeros((bs, context.shape[1], len(self.params.axes_dim)), device=x.device, dtype=torch.float32) + + if len(self.params.axes_dim) == 4: # Flux 2 + txt_ids[:, :, 3] = torch.linspace(0, context.shape[1] - 1, steps=context.shape[1], device=x.device, dtype=torch.float32) + out = self.forward_orig(img, img_ids, context, txt_ids, timestep, y, guidance, control, transformer_options, attn_mask=kwargs.get("attention_mask", None)) out = out[:, :img_tokens] - return rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2)[:,:,:h_orig,:w_orig] + return rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=self.patch_size, pw=self.patch_size)[:,:,:h_orig,:w_orig] diff --git a/comfy/ldm/models/autoencoder.py b/comfy/ldm/models/autoencoder.py index 611d36a1b..4f50810dc 100644 --- a/comfy/ldm/models/autoencoder.py +++ b/comfy/ldm/models/autoencoder.py @@ -9,6 +9,8 @@ from comfy.ldm.modules.distributions.distributions import DiagonalGaussianDistri from comfy.ldm.util import get_obj_from_str, instantiate_from_config from comfy.ldm.modules.ema import LitEma import comfy.ops +from einops import rearrange +import comfy.model_management class DiagonalGaussianRegularizer(torch.nn.Module): def __init__(self, sample: bool = False): @@ -179,6 +181,21 @@ class AutoencodingEngineLegacy(AutoencodingEngine): self.post_quant_conv = conv_op(embed_dim, ddconfig["z_channels"], 1) self.embed_dim = embed_dim + if ddconfig.get("batch_norm_latent", False): + self.bn_eps = 1e-4 + self.bn_momentum = 0.1 + self.ps = [2, 2] + self.bn = torch.nn.BatchNorm2d(math.prod(self.ps) * ddconfig["z_channels"], + eps=self.bn_eps, + momentum=self.bn_momentum, + affine=False, + track_running_stats=True, + ) + self.bn.eval() + else: + self.bn = None + + def get_autoencoder_params(self) -> list: params = super().get_autoencoder_params() return params @@ -201,11 +218,36 @@ class AutoencodingEngineLegacy(AutoencodingEngine): z = torch.cat(z, 0) z, reg_log = self.regularization(z) + + if self.bn is not None: + z = rearrange(z, + "... c (i pi) (j pj) -> ... (c pi pj) i j", + pi=self.ps[0], + pj=self.ps[1], + ) + + z = torch.nn.functional.batch_norm(z, + comfy.model_management.cast_to(self.bn.running_mean, dtype=z.dtype, device=z.device), + comfy.model_management.cast_to(self.bn.running_var, dtype=z.dtype, device=z.device), + momentum=self.bn_momentum, + eps=self.bn_eps) + if return_reg_log: return z, reg_log return z def decode(self, z: torch.Tensor, **decoder_kwargs) -> torch.Tensor: + if self.bn is not None: + s = torch.sqrt(comfy.model_management.cast_to(self.bn.running_var.view(1, -1, 1, 1), dtype=z.dtype, device=z.device) + self.bn_eps) + m = comfy.model_management.cast_to(self.bn.running_mean.view(1, -1, 1, 1), dtype=z.dtype, device=z.device) + z = z * s + m + z = rearrange( + z, + "... (c pi pj) i j -> ... c (i pi) (j pj)", + pi=self.ps[0], + pj=self.ps[1], + ) + if self.max_batch_size is None: dec = self.post_quant_conv(z) dec = self.decoder(dec, **decoder_kwargs) diff --git a/comfy/model_base.py b/comfy/model_base.py index e14b552c5..cad79ecbd 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -898,12 +898,13 @@ class Flux(BaseModel): attention_mask = kwargs.get("attention_mask", None) if attention_mask is not None: shape = kwargs["noise"].shape - mask_ref_size = kwargs["attention_mask_img_shape"] - # the model will pad to the patch size, and then divide - # essentially dividing and rounding up - (h_tok, w_tok) = (math.ceil(shape[2] / self.diffusion_model.patch_size), math.ceil(shape[3] / self.diffusion_model.patch_size)) - attention_mask = utils.upscale_dit_mask(attention_mask, mask_ref_size, (h_tok, w_tok)) - out['attention_mask'] = comfy.conds.CONDRegular(attention_mask) + mask_ref_size = kwargs.get("attention_mask_img_shape", None) + if mask_ref_size is not None: + # the model will pad to the patch size, and then divide + # essentially dividing and rounding up + (h_tok, w_tok) = (math.ceil(shape[2] / self.diffusion_model.patch_size), math.ceil(shape[3] / self.diffusion_model.patch_size)) + attention_mask = utils.upscale_dit_mask(attention_mask, mask_ref_size, (h_tok, w_tok)) + out['attention_mask'] = comfy.conds.CONDRegular(attention_mask) guidance = kwargs.get("guidance", 3.5) if guidance is not None: @@ -928,6 +929,16 @@ class Flux(BaseModel): out['ref_latents'] = list([1, 16, sum(map(lambda a: math.prod(a.size()), ref_latents)) // 16]) return out +class Flux2(Flux): + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + target_text_len = 512 + if cross_attn.shape[1] < target_text_len: + cross_attn = torch.nn.functional.pad(cross_attn, (0, 0, target_text_len - cross_attn.shape[1], 0)) + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + return out class GenmoMochi(BaseModel): def __init__(self, model_config, model_type=ModelType.FLOW, device=None): diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 0131ca25a..b2ba1459d 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -200,26 +200,54 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): if '{}double_blocks.0.img_attn.norm.key_norm.scale'.format(key_prefix) in state_dict_keys and ('{}img_in.weight'.format(key_prefix) in state_dict_keys or f"{key_prefix}distilled_guidance_layer.norms.0.scale" in state_dict_keys): #Flux, Chroma or Chroma Radiance (has no img_in.weight) dit_config = {} - dit_config["image_model"] = "flux" + if '{}double_stream_modulation_img.lin.weight'.format(key_prefix) in state_dict_keys: + dit_config["image_model"] = "flux2" + dit_config["axes_dim"] = [32, 32, 32, 32] + dit_config["num_heads"] = 48 + dit_config["mlp_ratio"] = 3.0 + dit_config["theta"] = 2000 + dit_config["out_channels"] = 128 + dit_config["global_modulation"] = True + dit_config["vec_in_dim"] = None + dit_config["mlp_silu_act"] = True + dit_config["qkv_bias"] = False + dit_config["ops_bias"] = False + dit_config["default_ref_method"] = "index" + dit_config["ref_index_scale"] = 10.0 + patch_size = 1 + else: + dit_config["image_model"] = "flux" + dit_config["axes_dim"] = [16, 56, 56] + dit_config["num_heads"] = 24 + dit_config["mlp_ratio"] = 4.0 + dit_config["theta"] = 10000 + dit_config["out_channels"] = 16 + dit_config["qkv_bias"] = True + patch_size = 2 + dit_config["in_channels"] = 16 - patch_size = 2 + dit_config["hidden_size"] = 3072 + dit_config["context_in_dim"] = 4096 + dit_config["patch_size"] = patch_size in_key = "{}img_in.weight".format(key_prefix) if in_key in state_dict_keys: - dit_config["in_channels"] = state_dict[in_key].shape[1] // (patch_size * patch_size) - dit_config["out_channels"] = 16 + w = state_dict[in_key] + dit_config["in_channels"] = w.shape[1] // (patch_size * patch_size) + dit_config["hidden_size"] = w.shape[0] + + txt_in_key = "{}txt_in.weight".format(key_prefix) + if txt_in_key in state_dict_keys: + w = state_dict[txt_in_key] + dit_config["context_in_dim"] = w.shape[1] + dit_config["hidden_size"] = w.shape[0] + vec_in_key = '{}vector_in.in_layer.weight'.format(key_prefix) if vec_in_key in state_dict_keys: dit_config["vec_in_dim"] = state_dict[vec_in_key].shape[1] - dit_config["context_in_dim"] = 4096 - dit_config["hidden_size"] = 3072 - dit_config["mlp_ratio"] = 4.0 - dit_config["num_heads"] = 24 + dit_config["depth"] = count_blocks(state_dict_keys, '{}double_blocks.'.format(key_prefix) + '{}.') dit_config["depth_single_blocks"] = count_blocks(state_dict_keys, '{}single_blocks.'.format(key_prefix) + '{}.') - dit_config["axes_dim"] = [16, 56, 56] - dit_config["theta"] = 10000 - dit_config["qkv_bias"] = True if '{}distilled_guidance_layer.0.norms.0.scale'.format(key_prefix) in state_dict_keys or '{}distilled_guidance_layer.norms.0.scale'.format(key_prefix) in state_dict_keys: #Chroma dit_config["image_model"] = "chroma" dit_config["in_channels"] = 64 diff --git a/comfy/sd.py b/comfy/sd.py index b6df0bd61..14dd8944c 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -356,7 +356,7 @@ class VAE: self.memory_used_encode = lambda shape, dtype: (700 * shape[2] * shape[3]) * model_management.dtype_size(dtype) self.memory_used_decode = lambda shape, dtype: (700 * shape[2] * shape[3] * 32 * 32) * model_management.dtype_size(dtype) - elif sd['decoder.conv_in.weight'].shape[1] == 32: + elif sd['decoder.conv_in.weight'].shape[1] == 32 and sd['decoder.conv_in.weight'].ndim == 5: ddconfig = {"block_out_channels": [128, 256, 512, 1024, 1024], "in_channels": 3, "out_channels": 3, "num_res_blocks": 2, "ffactor_spatial": 16, "ffactor_temporal": 4, "downsample_match_channel": True, "upsample_match_channel": True, "refiner_vae": False} self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.weight"].shape[1] self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] @@ -382,6 +382,17 @@ class VAE: self.upscale_ratio = 4 self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.weight"].shape[1] + if 'decoder.post_quant_conv.weight' in sd: + sd = comfy.utils.state_dict_prefix_replace(sd, {"decoder.post_quant_conv.": "post_quant_conv.", "encoder.quant_conv.": "quant_conv."}) + + if 'bn.running_mean' in sd: + ddconfig["batch_norm_latent"] = True + self.downscale_ratio *= 2 + self.upscale_ratio *= 2 + self.latent_channels *= 4 + old_memory_used_decode = self.memory_used_decode + self.memory_used_decode = lambda shape, dtype: old_memory_used_decode(shape, dtype) * 4.0 + if 'post_quant_conv.weight' in sd: self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=sd['post_quant_conv.weight'].shape[1]) else: @@ -940,6 +951,8 @@ class TEModel(Enum): QWEN25_7B = 11 BYT5_SMALL_GLYPH = 12 GEMMA_3_4B = 13 + MISTRAL3_24B = 14 + MISTRAL3_24B_PRUNED_FLUX2 = 15 def detect_te_model(sd): if "text_model.encoder.layers.30.mlp.fc1.weight" in sd: @@ -972,6 +985,13 @@ def detect_te_model(sd): if weight.shape[0] == 512: return TEModel.QWEN25_7B if "model.layers.0.post_attention_layernorm.weight" in sd: + weight = sd['model.layers.0.post_attention_layernorm.weight'] + if weight.shape[0] == 5120: + if "model.layers.39.post_attention_layernorm.weight" in sd: + return TEModel.MISTRAL3_24B + else: + return TEModel.MISTRAL3_24B_PRUNED_FLUX2 + return TEModel.LLAMA3_8 return None @@ -1086,6 +1106,10 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip else: clip_target.clip = comfy.text_encoders.qwen_image.te(**llama_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.qwen_image.QwenImageTokenizer + elif te_model == TEModel.MISTRAL3_24B or te_model == TEModel.MISTRAL3_24B_PRUNED_FLUX2: + clip_target.clip = comfy.text_encoders.flux.flux2_te(**llama_detect(clip_data), pruned=te_model == TEModel.MISTRAL3_24B_PRUNED_FLUX2) + clip_target.tokenizer = comfy.text_encoders.flux.Flux2Tokenizer + tokenizer_data["tekken_model"] = clip_data[0].get("tekken_model", None) else: # clip_l if clip_type == CLIPType.SD3: diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 2e64b85e8..8fe8e63f6 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -741,6 +741,37 @@ class FluxSchnell(Flux): out = model_base.Flux(self, model_type=model_base.ModelType.FLOW, device=device) return out +class Flux2(Flux): + unet_config = { + "image_model": "flux2", + } + + sampling_settings = { + "shift": 2.02, + } + + unet_extra_config = {} + latent_format = latent_formats.Flux2 + + supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32] + + vae_key_prefix = ["vae."] + text_encoder_key_prefix = ["text_encoders."] + + def __init__(self, unet_config): + super().__init__(unet_config) + self.memory_usage_factor = self.memory_usage_factor * (2.0 * 2.0) * 2.36 + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.Flux2(self, device=device) + return out + + def clip_target(self, state_dict={}): + return None # TODO + pref = self.text_encoder_key_prefix[0] + t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref)) + return supported_models_base.ClipTarget(comfy.text_encoders.flux.FluxTokenizer, comfy.text_encoders.flux.flux_clip(**t5_detect)) + class GenmoMochi(supported_models_base.BASE): unet_config = { "image_model": "mochi_preview", @@ -1422,6 +1453,7 @@ class HunyuanVideo15_SR_Distilled(HunyuanVideo): hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_7b.transformer.".format(pref)) return supported_models_base.ClipTarget(comfy.text_encoders.hunyuan_video.HunyuanVideo15Tokenizer, comfy.text_encoders.hunyuan_image.te(**hunyuan_detect)) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo15_SR_Distilled, HunyuanVideo15, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, WAN22_Animate, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo15_SR_Distilled, HunyuanVideo15, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, WAN22_Animate, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage, Flux2] + models += [SVD_img2vid] diff --git a/comfy/text_encoders/flux.py b/comfy/text_encoders/flux.py index d61ef6668..8dbbca16e 100644 --- a/comfy/text_encoders/flux.py +++ b/comfy/text_encoders/flux.py @@ -1,10 +1,13 @@ from comfy import sd1_clip import comfy.text_encoders.t5 import comfy.text_encoders.sd3_clip +import comfy.text_encoders.llama import comfy.model_management -from transformers import T5TokenizerFast +from transformers import T5TokenizerFast, LlamaTokenizerFast import torch import os +import json +import base64 class T5XXLTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): @@ -68,3 +71,105 @@ def flux_clip(dtype_t5=None, t5xxl_scaled_fp8=None): model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8 super().__init__(dtype_t5=dtype_t5, device=device, dtype=dtype, model_options=model_options) return FluxClipModel_ + +def load_mistral_tokenizer(data): + if torch.is_tensor(data): + data = data.numpy().tobytes() + + try: + from transformers.integrations.mistral import MistralConverter + except ModuleNotFoundError: + from transformers.models.pixtral.convert_pixtral_weights_to_hf import MistralConverter + + mistral_vocab = json.loads(data) + + special_tokens = {} + vocab = {} + + max_vocab = mistral_vocab["config"]["default_vocab_size"] + + for w in mistral_vocab["vocab"]: + r = w["rank"] + if r >= max_vocab: + continue + + vocab[base64.b64decode(w["token_bytes"])] = r + + for w in mistral_vocab["special_tokens"]: + if "token_bytes" in w: + special_tokens[base64.b64decode(w["token_bytes"])] = w["rank"] + else: + special_tokens[w["token_str"]] = w["rank"] + + all_special = [] + for v in special_tokens: + all_special.append(v) + + special_tokens.update(vocab) + vocab = special_tokens + return {"tokenizer_object": MistralConverter(vocab=vocab, additional_special_tokens=all_special).converted(), "legacy": False} + +class MistralTokenizerClass: + @staticmethod + def from_pretrained(path, **kwargs): + return LlamaTokenizerFast(**kwargs) + +class Mistral3Tokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + self.tekken_data = tokenizer_data.get("tekken_model", None) + super().__init__("", pad_with_end=False, embedding_size=5120, embedding_key='mistral3_24b', tokenizer_class=MistralTokenizerClass, has_end_token=False, pad_to_max_length=False, pad_token=11, max_length=99999999, min_length=1, pad_left=True, tokenizer_args=load_mistral_tokenizer(self.tekken_data), tokenizer_data=tokenizer_data) + + def state_dict(self): + return {"tekken_model": self.tekken_data} + +class Flux2Tokenizer(sd1_clip.SD1Tokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="mistral3_24b", tokenizer=Mistral3Tokenizer) + self.llama_template = '[SYSTEM_PROMPT]You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object\nattribution and actions without speculation.[/SYSTEM_PROMPT][INST]{}[/INST]' + + def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, **kwargs): + if llama_template is None: + llama_text = self.llama_template.format(text) + else: + llama_text = llama_template.format(text) + + tokens = super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, disable_weights=True, **kwargs) + return tokens + +class Mistral3_24BModel(sd1_clip.SDClipModel): + def __init__(self, device="cpu", layer="all", layer_idx=None, dtype=None, attention_mask=True, model_options={}): + textmodel_json_config = {} + num_layers = model_options.get("num_layers", None) + if num_layers is not None: + textmodel_json_config["num_hidden_layers"] = num_layers + if num_layers < 40: + textmodel_json_config["final_norm"] = False + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"start": 1, "pad": 0}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Mistral3Small24B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) + +class Flux2TEModel(sd1_clip.SD1ClipModel): + def __init__(self, device="cpu", dtype=None, model_options={}, name="mistral3_24b", clip_model=Mistral3_24BModel): + super().__init__(device=device, dtype=dtype, name=name, clip_model=clip_model, model_options=model_options) + + def encode_token_weights(self, token_weight_pairs): + out, pooled, extra = super().encode_token_weights(token_weight_pairs) + + out = torch.stack((out[:, 10], out[:, 20], out[:, 30]), dim=1) + out = out.movedim(1, 2) + out = out.reshape(out.shape[0], out.shape[1], -1) + return out, pooled, extra + +def flux2_te(dtype_llama=None, llama_scaled_fp8=None, llama_quantization_metadata=None, pruned=False): + class Flux2TEModel_(Flux2TEModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: + model_options = model_options.copy() + model_options["scaled_fp8"] = llama_scaled_fp8 + if dtype_llama is not None: + dtype = dtype_llama + if llama_quantization_metadata is not None: + model_options["quantization_metadata"] = llama_quantization_metadata + if pruned: + model_options = model_options.copy() + model_options["num_layers"] = 30 + super().__init__(device=device, dtype=dtype, model_options=model_options) + return Flux2TEModel_ diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index feb44bbb0..749ff581b 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -34,6 +34,28 @@ class Llama2Config: rope_scale = None final_norm: bool = True +@dataclass +class Mistral3Small24BConfig: + vocab_size: int = 131072 + hidden_size: int = 5120 + intermediate_size: int = 32768 + num_hidden_layers: int = 40 + num_attention_heads: int = 32 + num_key_value_heads: int = 8 + max_position_embeddings: int = 8192 + rms_norm_eps: float = 1e-5 + rope_theta: float = 1000000000.0 + transformer_type: str = "llama" + head_dim = 128 + rms_norm_add = False + mlp_activation = "silu" + qkv_bias = False + rope_dims = None + q_norm = None + k_norm = None + rope_scale = None + final_norm: bool = True + @dataclass class Qwen25_3BConfig: vocab_size: int = 151936 @@ -465,6 +487,15 @@ class Llama2(BaseLlama, torch.nn.Module): self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) self.dtype = dtype +class Mistral3Small24B(BaseLlama, torch.nn.Module): + def __init__(self, config_dict, dtype, device, operations): + super().__init__() + config = Mistral3Small24BConfig(**config_dict) + self.num_layers = config.num_hidden_layers + + self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) + self.dtype = dtype + class Qwen25_3B(BaseLlama, torch.nn.Module): def __init__(self, config_dict, dtype, device, operations): super().__init__() diff --git a/comfy_extras/nodes_flux.py b/comfy_extras/nodes_flux.py index ce1b2e89f..d9c4bba81 100644 --- a/comfy_extras/nodes_flux.py +++ b/comfy_extras/nodes_flux.py @@ -2,7 +2,10 @@ import node_helpers import comfy.utils from typing_extensions import override from comfy_api.latest import ComfyExtension, io - +import comfy.model_management +import torch +import math +import nodes class CLIPTextEncodeFlux(io.ComfyNode): @classmethod @@ -30,6 +33,27 @@ class CLIPTextEncodeFlux(io.ComfyNode): encode = execute # TODO: remove +class EmptyFlux2LatentImage(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="EmptyFlux2LatentImage", + display_name="Empty Flux 2 Latent", + category="latent", + inputs=[ + io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("batch_size", default=1, min=1, max=4096), + ], + outputs=[ + io.Latent.Output(), + ], + ) + + @classmethod + def execute(cls, width, height, batch_size=1) -> io.NodeOutput: + latent = torch.zeros([batch_size, 128, height // 16, width // 16], device=comfy.model_management.intermediate_device()) + return io.NodeOutput({"samples": latent}) class FluxGuidance(io.ComfyNode): @classmethod @@ -154,6 +178,58 @@ class FluxKontextMultiReferenceLatentMethod(io.ComfyNode): append = execute # TODO: remove +def generalized_time_snr_shift(t, mu: float, sigma: float): + return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma) + + +def compute_empirical_mu(image_seq_len: int, num_steps: int) -> float: + a1, b1 = 8.73809524e-05, 1.89833333 + a2, b2 = 0.00016927, 0.45666666 + + if image_seq_len > 4300: + mu = a2 * image_seq_len + b2 + return float(mu) + + m_200 = a2 * image_seq_len + b2 + m_10 = a1 * image_seq_len + b1 + + a = (m_200 - m_10) / 190.0 + b = m_200 - 200.0 * a + mu = a * num_steps + b + + return float(mu) + + +def get_schedule(num_steps: int, image_seq_len: int) -> list[float]: + mu = compute_empirical_mu(image_seq_len, num_steps) + timesteps = torch.linspace(1, 0, num_steps + 1) + timesteps = generalized_time_snr_shift(timesteps, mu, 1.0) + return timesteps + + +class Flux2Scheduler(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="Flux2Scheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Int.Input("steps", default=20, min=1, max=4096), + io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=1), + io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=1), + ], + outputs=[ + io.Sigmas.Output(), + ], + ) + + @classmethod + def execute(cls, steps, width, height) -> io.NodeOutput: + seq_len = (width * height / (16 * 16)) + sigmas = get_schedule(steps, round(seq_len)) + return io.NodeOutput(sigmas) + + class FluxExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: @@ -163,6 +239,8 @@ class FluxExtension(ComfyExtension): FluxDisableGuidance, FluxKontextImageScale, FluxKontextMultiReferenceLatentMethod, + EmptyFlux2LatentImage, + Flux2Scheduler, ] diff --git a/nodes.py b/nodes.py index f023ae3b6..f4835c02e 100644 --- a/nodes.py +++ b/nodes.py @@ -929,7 +929,7 @@ class CLIPLoader: @classmethod def INPUT_TYPES(s): return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image"], ), + "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image", "flux2"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), From 5c7b08ca58f5412b3a814b374793cacdb5b5f0a7 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 25 Nov 2025 18:09:07 +0200 Subject: [PATCH 0919/1073] [API Nodes] add Flux.2 Pro node (#10880) --- comfy_api_nodes/apis/bfl_api.py | 28 +++- comfy_api_nodes/nodes_bfl.py | 238 +++++++++++++++---------------- comfy_api_nodes/util/__init__.py | 2 + 3 files changed, 143 insertions(+), 125 deletions(-) diff --git a/comfy_api_nodes/apis/bfl_api.py b/comfy_api_nodes/apis/bfl_api.py index 0fc8c0607..d8d3557b3 100644 --- a/comfy_api_nodes/apis/bfl_api.py +++ b/comfy_api_nodes/apis/bfl_api.py @@ -70,6 +70,29 @@ class BFLFluxProGenerateRequest(BaseModel): # ) +class Flux2ProGenerateRequest(BaseModel): + prompt: str = Field(...) + width: int = Field(1024, description="Must be a multiple of 32.") + height: int = Field(768, description="Must be a multiple of 32.") + seed: int | None = Field(None) + prompt_upsampling: bool | None = Field(None) + input_image: str | None = Field(None, description="Base64 encoded image for image-to-image generation") + input_image_2: str | None = Field(None, description="Base64 encoded image for image-to-image generation") + input_image_3: str | None = Field(None, description="Base64 encoded image for image-to-image generation") + input_image_4: str | None = Field(None, description="Base64 encoded image for image-to-image generation") + input_image_5: str | None = Field(None, description="Base64 encoded image for image-to-image generation") + input_image_6: str | None = Field(None, description="Base64 encoded image for image-to-image generation") + input_image_7: str | None = Field(None, description="Base64 encoded image for image-to-image generation") + input_image_8: str | None = Field(None, description="Base64 encoded image for image-to-image generation") + input_image_9: str | None = Field(None, description="Base64 encoded image for image-to-image generation") + safety_tolerance: int | None = Field( + 5, description="Tolerance level for input and output moderation. Value 0 being most strict.", ge=0, le=5 + ) + output_format: str | None = Field( + "png", description="Output format for the generated image. Can be 'jpeg' or 'png'." + ) + + class BFLFluxKontextProGenerateRequest(BaseModel): prompt: str = Field(..., description='The text prompt for what you wannt to edit.') input_image: Optional[str] = Field(None, description='Image to edit in base64 format') @@ -109,8 +132,9 @@ class BFLFluxProUltraGenerateRequest(BaseModel): class BFLFluxProGenerateResponse(BaseModel): - id: str = Field(..., description='The unique identifier for the generation task.') - polling_url: str = Field(..., description='URL to poll for the generation result.') + id: str = Field(..., description="The unique identifier for the generation task.") + polling_url: str = Field(..., description="URL to poll for the generation result.") + cost: float | None = Field(None, description="Price in cents") class BFLStatus(str, Enum): diff --git a/comfy_api_nodes/nodes_bfl.py b/comfy_api_nodes/nodes_bfl.py index 1740fb377..8826dea0c 100644 --- a/comfy_api_nodes/nodes_bfl.py +++ b/comfy_api_nodes/nodes_bfl.py @@ -1,7 +1,7 @@ from inspect import cleandoc -from typing import Optional import torch +from pydantic import BaseModel from typing_extensions import override from comfy_api.latest import IO, ComfyExtension @@ -9,15 +9,16 @@ from comfy_api_nodes.apis.bfl_api import ( BFLFluxExpandImageRequest, BFLFluxFillImageRequest, BFLFluxKontextProGenerateRequest, - BFLFluxProGenerateRequest, BFLFluxProGenerateResponse, BFLFluxProUltraGenerateRequest, BFLFluxStatusResponse, BFLStatus, + Flux2ProGenerateRequest, ) from comfy_api_nodes.util import ( ApiEndpoint, download_url_to_image_tensor, + get_number_of_images, poll_op, resize_mask_to_image, sync_op, @@ -116,7 +117,7 @@ class FluxProUltraImageNode(IO.ComfyNode): prompt_upsampling: bool = False, raw: bool = False, seed: int = 0, - image_prompt: Optional[torch.Tensor] = None, + image_prompt: torch.Tensor | None = None, image_prompt_strength: float = 0.1, ) -> IO.NodeOutput: if image_prompt is None: @@ -230,7 +231,7 @@ class FluxKontextProImageNode(IO.ComfyNode): aspect_ratio: str, guidance: float, steps: int, - input_image: Optional[torch.Tensor] = None, + input_image: torch.Tensor | None = None, seed=0, prompt_upsampling=False, ) -> IO.NodeOutput: @@ -280,124 +281,6 @@ class FluxKontextMaxImageNode(FluxKontextProImageNode): DISPLAY_NAME = "Flux.1 Kontext [max] Image" -class FluxProImageNode(IO.ComfyNode): - """ - Generates images synchronously based on prompt and resolution. - """ - - @classmethod - def define_schema(cls) -> IO.Schema: - return IO.Schema( - node_id="FluxProImageNode", - display_name="Flux 1.1 [pro] Image", - category="api node/image/BFL", - description=cleandoc(cls.__doc__ or ""), - inputs=[ - IO.String.Input( - "prompt", - multiline=True, - default="", - tooltip="Prompt for the image generation", - ), - IO.Boolean.Input( - "prompt_upsampling", - default=False, - tooltip="Whether to perform upsampling on the prompt. " - "If active, automatically modifies the prompt for more creative generation, " - "but results are nondeterministic (same seed will not produce exactly the same result).", - ), - IO.Int.Input( - "width", - default=1024, - min=256, - max=1440, - step=32, - ), - IO.Int.Input( - "height", - default=768, - min=256, - max=1440, - step=32, - ), - IO.Int.Input( - "seed", - default=0, - min=0, - max=0xFFFFFFFFFFFFFFFF, - control_after_generate=True, - tooltip="The random seed used for creating the noise.", - ), - IO.Image.Input( - "image_prompt", - optional=True, - ), - # "image_prompt_strength": ( - # IO.FLOAT, - # { - # "default": 0.1, - # "min": 0.0, - # "max": 1.0, - # "step": 0.01, - # "tooltip": "Blend between the prompt and the image prompt.", - # }, - # ), - ], - outputs=[IO.Image.Output()], - hidden=[ - IO.Hidden.auth_token_comfy_org, - IO.Hidden.api_key_comfy_org, - IO.Hidden.unique_id, - ], - is_api_node=True, - ) - - @classmethod - async def execute( - cls, - prompt: str, - prompt_upsampling, - width: int, - height: int, - seed=0, - image_prompt=None, - # image_prompt_strength=0.1, - ) -> IO.NodeOutput: - image_prompt = image_prompt if image_prompt is None else tensor_to_base64_string(image_prompt) - initial_response = await sync_op( - cls, - ApiEndpoint( - path="/proxy/bfl/flux-pro-1.1/generate", - method="POST", - ), - response_model=BFLFluxProGenerateResponse, - data=BFLFluxProGenerateRequest( - prompt=prompt, - prompt_upsampling=prompt_upsampling, - width=width, - height=height, - seed=seed, - image_prompt=image_prompt, - ), - ) - response = await poll_op( - cls, - ApiEndpoint(initial_response.polling_url), - response_model=BFLFluxStatusResponse, - status_extractor=lambda r: r.status, - progress_extractor=lambda r: r.progress, - completed_statuses=[BFLStatus.ready], - failed_statuses=[ - BFLStatus.request_moderated, - BFLStatus.content_moderated, - BFLStatus.error, - BFLStatus.task_not_found, - ], - queued_statuses=[], - ) - return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"])) - - class FluxProExpandNode(IO.ComfyNode): """ Outpaints image based on prompt. @@ -640,16 +523,125 @@ class FluxProFillNode(IO.ComfyNode): return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"])) +class Flux2ProImageNode(IO.ComfyNode): + + @classmethod + def define_schema(cls) -> IO.Schema: + return IO.Schema( + node_id="Flux2ProImageNode", + display_name="Flux.2 [pro] Image", + category="api node/image/BFL", + description="Generates images synchronously based on prompt and resolution.", + inputs=[ + IO.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Prompt for the image generation or edit", + ), + IO.Int.Input( + "width", + default=1024, + min=256, + max=2048, + step=32, + ), + IO.Int.Input( + "height", + default=768, + min=256, + max=2048, + step=32, + ), + IO.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="The random seed used for creating the noise.", + ), + IO.Boolean.Input( + "prompt_upsampling", + default=False, + tooltip="Whether to perform upsampling on the prompt. " + "If active, automatically modifies the prompt for more creative generation, " + "but results are nondeterministic (same seed will not produce exactly the same result).", + ), + IO.Image.Input("images", optional=True, tooltip="Up to 4 images to be used as references."), + ], + outputs=[IO.Image.Output()], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + prompt: str, + width: int, + height: int, + seed: int, + prompt_upsampling: bool, + images: torch.Tensor | None = None, + ) -> IO.NodeOutput: + reference_images = {} + if images is not None: + if get_number_of_images(images) > 9: + raise ValueError("The current maximum number of supported images is 9.") + for image_index in range(images.shape[0]): + key_name = f"input_image_{image_index + 1}" if image_index else "input_image" + reference_images[key_name] = tensor_to_base64_string(images[image_index], total_pixels=2048 * 2048) + initial_response = await sync_op( + cls, + ApiEndpoint(path="/proxy/bfl/flux-2-pro/generate", method="POST"), + response_model=BFLFluxProGenerateResponse, + data=Flux2ProGenerateRequest( + prompt=prompt, + width=width, + height=height, + seed=seed, + prompt_upsampling=prompt_upsampling, + **reference_images, + ), + ) + + def price_extractor(_r: BaseModel) -> float | None: + return None if initial_response.cost is None else initial_response.cost / 100 + + response = await poll_op( + cls, + ApiEndpoint(initial_response.polling_url), + response_model=BFLFluxStatusResponse, + status_extractor=lambda r: r.status, + progress_extractor=lambda r: r.progress, + price_extractor=price_extractor, + completed_statuses=[BFLStatus.ready], + failed_statuses=[ + BFLStatus.request_moderated, + BFLStatus.content_moderated, + BFLStatus.error, + BFLStatus.task_not_found, + ], + queued_statuses=[], + ) + return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"])) + + class BFLExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ FluxProUltraImageNode, - # FluxProImageNode, FluxKontextProImageNode, FluxKontextMaxImageNode, FluxProExpandNode, FluxProFillNode, + Flux2ProImageNode, ] diff --git a/comfy_api_nodes/util/__init__.py b/comfy_api_nodes/util/__init__.py index 21013b591..80292fb3c 100644 --- a/comfy_api_nodes/util/__init__.py +++ b/comfy_api_nodes/util/__init__.py @@ -36,6 +36,7 @@ from .upload_helpers import ( upload_video_to_comfyapi, ) from .validation_utils import ( + get_image_dimensions, get_number_of_images, validate_aspect_ratio_string, validate_audio_duration, @@ -82,6 +83,7 @@ __all__ = [ "trim_video", "video_to_base64_string", # Validation utilities + "get_image_dimensions", "get_number_of_images", "validate_aspect_ratio_string", "validate_audio_duration", From af81cb962d9dd283ddb551962cc223b5a186a1ce Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 25 Nov 2025 08:40:32 -0800 Subject: [PATCH 0920/1073] Add Flux 2 support to README. (#10882) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 28beec427..b9300ab07 100644 --- a/README.md +++ b/README.md @@ -67,6 +67,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [HiDream](https://comfyanonymous.github.io/ComfyUI_examples/hidream/) - [Qwen Image](https://comfyanonymous.github.io/ComfyUI_examples/qwen_image/) - [Hunyuan Image 2.1](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_image/) + - [Flux 2](https://comfyanonymous.github.io/ComfyUI_examples/flux2/) - Image Editing Models - [Omnigen 2](https://comfyanonymous.github.io/ComfyUI_examples/omnigen/) - [Flux Kontext](https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-kontext-image-editing-model) From 828b1b9953175b6df79459f417d1032869d0b46a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Nov 2025 12:40:58 -0500 Subject: [PATCH 0921/1073] ComfyUI version v0.3.72 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index b4655d553..dac038c26 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.71" +__version__ = "0.3.72" diff --git a/pyproject.toml b/pyproject.toml index 280dbaf53..75df8fb7c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.71" +version = "0.3.72" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From dff996ca39d86265bbabf15e666484e051f0b3d5 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 25 Nov 2025 11:30:24 -0800 Subject: [PATCH 0922/1073] Fix crash. (#10885) --- comfy/text_encoders/flux.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/text_encoders/flux.py b/comfy/text_encoders/flux.py index 8dbbca16e..024504a5b 100644 --- a/comfy/text_encoders/flux.py +++ b/comfy/text_encoders/flux.py @@ -87,6 +87,7 @@ def load_mistral_tokenizer(data): vocab = {} max_vocab = mistral_vocab["config"]["default_vocab_size"] + max_vocab -= len(mistral_vocab["special_tokens"]) for w in mistral_vocab["vocab"]: r = w["rank"] From 18b79acba95d44b4ea00bbbfc1856bc71bd58841 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 26 Nov 2025 03:58:21 +0800 Subject: [PATCH 0923/1073] Update workflow templates to v0.7.20 (#10883) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b7014f956..5f20816d6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.30.6 -comfyui-workflow-templates==0.7.9 +comfyui-workflow-templates==0.7.20 comfyui-embedded-docs==0.3.1 torch torchsde From d196a905bb379a6d800d0c13f9b4fdea3965311a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 25 Nov 2025 11:58:39 -0800 Subject: [PATCH 0924/1073] Lower vram usage for flux 2 text encoder. (#10887) --- comfy/sd1_clip.py | 7 ++++--- comfy/text_encoders/flux.py | 4 ++-- comfy/text_encoders/llama.py | 12 +++++++++--- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 8f509bab1..0fc9ab3db 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -90,7 +90,6 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): special_tokens={"start": 49406, "end": 49407, "pad": 49407}, layer_norm_hidden_state=True, enable_attention_masks=False, zero_out_masked=False, return_projected_pooled=True, return_attention_masks=False, model_options={}): # clip-vit-base-patch32 super().__init__() - assert layer in self.LAYERS if textmodel_json_config is None: textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json") @@ -164,7 +163,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): def set_clip_options(self, options): layer_idx = options.get("layer", self.layer_idx) self.return_projected_pooled = options.get("projected_pooled", self.return_projected_pooled) - if self.layer == "all": + if isinstance(self.layer, list) or self.layer == "all": pass elif layer_idx is None or abs(layer_idx) > self.num_layers: self.layer = "last" @@ -266,7 +265,9 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): if self.enable_attention_masks: attention_mask_model = attention_mask - if self.layer == "all": + if isinstance(self.layer, list): + intermediate_output = self.layer + elif self.layer == "all": intermediate_output = "all" else: intermediate_output = self.layer_idx diff --git a/comfy/text_encoders/flux.py b/comfy/text_encoders/flux.py index 024504a5b..99f4812bb 100644 --- a/comfy/text_encoders/flux.py +++ b/comfy/text_encoders/flux.py @@ -138,7 +138,7 @@ class Flux2Tokenizer(sd1_clip.SD1Tokenizer): return tokens class Mistral3_24BModel(sd1_clip.SDClipModel): - def __init__(self, device="cpu", layer="all", layer_idx=None, dtype=None, attention_mask=True, model_options={}): + def __init__(self, device="cpu", layer=[10, 20, 30], layer_idx=None, dtype=None, attention_mask=True, model_options={}): textmodel_json_config = {} num_layers = model_options.get("num_layers", None) if num_layers is not None: @@ -154,7 +154,7 @@ class Flux2TEModel(sd1_clip.SD1ClipModel): def encode_token_weights(self, token_weight_pairs): out, pooled, extra = super().encode_token_weights(token_weight_pairs) - out = torch.stack((out[:, 10], out[:, 20], out[:, 30]), dim=1) + out = torch.stack((out[:, 0], out[:, 1], out[:, 2]), dim=1) out = out.movedim(1, 2) out = out.reshape(out.shape[0], out.shape[1], -1) return out, pooled, extra diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index 749ff581b..d47ed27bc 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -434,8 +434,12 @@ class Llama2_(nn.Module): intermediate = None all_intermediate = None + only_layers = None if intermediate_output is not None: - if intermediate_output == "all": + if isinstance(intermediate_output, list): + all_intermediate = [] + only_layers = set(intermediate_output) + elif intermediate_output == "all": all_intermediate = [] intermediate_output = None elif intermediate_output < 0: @@ -443,7 +447,8 @@ class Llama2_(nn.Module): for i, layer in enumerate(self.layers): if all_intermediate is not None: - all_intermediate.append(x.unsqueeze(1).clone()) + if only_layers is None or (i in only_layers): + all_intermediate.append(x.unsqueeze(1).clone()) x = layer( x=x, attention_mask=mask, @@ -457,7 +462,8 @@ class Llama2_(nn.Module): x = self.norm(x) if all_intermediate is not None: - all_intermediate.append(x.unsqueeze(1).clone()) + if only_layers is None or ((i + 1) in only_layers): + all_intermediate.append(x.unsqueeze(1).clone()) if all_intermediate is not None: intermediate = torch.cat(all_intermediate, dim=1) From 0c18842acbdf546883b08808dd9feea7605d7649 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Nov 2025 14:59:37 -0500 Subject: [PATCH 0925/1073] ComfyUI v0.3.73 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index dac038c26..f8818838e 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.72" +__version__ = "0.3.73" diff --git a/pyproject.toml b/pyproject.toml index 75df8fb7c..7e4bac12d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.72" +version = "0.3.73" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From e9aae31fa241a6a63a368800146ea91629d4e8c2 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 25 Nov 2025 15:41:45 -0800 Subject: [PATCH 0926/1073] Z Image model. (#10892) --- comfy/ldm/lumina/model.py | 219 +++++++------------- comfy/ldm/modules/diffusionmodules/mmdit.py | 6 +- comfy/model_base.py | 4 + comfy/model_detection.py | 29 ++- comfy/sd.py | 8 + comfy/text_encoders/llama.py | 31 +++ comfy/text_encoders/z_image.py | 48 +++++ 7 files changed, 196 insertions(+), 149 deletions(-) create mode 100644 comfy/text_encoders/z_image.py diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index b4494a51d..c8643eb82 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -11,6 +11,7 @@ import comfy.ldm.common_dit from comfy.ldm.modules.diffusionmodules.mmdit import TimestepEmbedder from comfy.ldm.modules.attention import optimized_attention_masked from comfy.ldm.flux.layers import EmbedND +from comfy.ldm.flux.math import apply_rope import comfy.patcher_extension @@ -31,6 +32,7 @@ class JointAttention(nn.Module): n_heads: int, n_kv_heads: Optional[int], qk_norm: bool, + out_bias: bool = False, operation_settings={}, ): """ @@ -59,7 +61,7 @@ class JointAttention(nn.Module): self.out = operation_settings.get("operations").Linear( n_heads * self.head_dim, dim, - bias=False, + bias=out_bias, device=operation_settings.get("device"), dtype=operation_settings.get("dtype"), ) @@ -70,35 +72,6 @@ class JointAttention(nn.Module): else: self.q_norm = self.k_norm = nn.Identity() - @staticmethod - def apply_rotary_emb( - x_in: torch.Tensor, - freqs_cis: torch.Tensor, - ) -> torch.Tensor: - """ - Apply rotary embeddings to input tensors using the given frequency - tensor. - - This function applies rotary embeddings to the given query 'xq' and - key 'xk' tensors using the provided frequency tensor 'freqs_cis'. The - input tensors are reshaped as complex numbers, and the frequency tensor - is reshaped for broadcasting compatibility. The resulting tensors - contain rotary embeddings and are returned as real tensors. - - Args: - x_in (torch.Tensor): Query or Key tensor to apply rotary embeddings. - freqs_cis (torch.Tensor): Precomputed frequency tensor for complex - exponentials. - - Returns: - Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor - and key tensor with rotary embeddings. - """ - - t_ = x_in.reshape(*x_in.shape[:-1], -1, 1, 2) - t_out = freqs_cis[..., 0] * t_[..., 0] + freqs_cis[..., 1] * t_[..., 1] - return t_out.reshape(*x_in.shape) - def forward( self, x: torch.Tensor, @@ -134,8 +107,7 @@ class JointAttention(nn.Module): xq = self.q_norm(xq) xk = self.k_norm(xk) - xq = JointAttention.apply_rotary_emb(xq, freqs_cis=freqs_cis) - xk = JointAttention.apply_rotary_emb(xk, freqs_cis=freqs_cis) + xq, xk = apply_rope(xq, xk, freqs_cis) n_rep = self.n_local_heads // self.n_local_kv_heads if n_rep >= 1: @@ -215,6 +187,8 @@ class JointTransformerBlock(nn.Module): norm_eps: float, qk_norm: bool, modulation=True, + z_image_modulation=False, + attn_out_bias=False, operation_settings={}, ) -> None: """ @@ -235,10 +209,10 @@ class JointTransformerBlock(nn.Module): super().__init__() self.dim = dim self.head_dim = dim // n_heads - self.attention = JointAttention(dim, n_heads, n_kv_heads, qk_norm, operation_settings=operation_settings) + self.attention = JointAttention(dim, n_heads, n_kv_heads, qk_norm, out_bias=attn_out_bias, operation_settings=operation_settings) self.feed_forward = FeedForward( dim=dim, - hidden_dim=4 * dim, + hidden_dim=dim, multiple_of=multiple_of, ffn_dim_multiplier=ffn_dim_multiplier, operation_settings=operation_settings, @@ -252,16 +226,27 @@ class JointTransformerBlock(nn.Module): self.modulation = modulation if modulation: - self.adaLN_modulation = nn.Sequential( - nn.SiLU(), - operation_settings.get("operations").Linear( - min(dim, 1024), - 4 * dim, - bias=True, - device=operation_settings.get("device"), - dtype=operation_settings.get("dtype"), - ), - ) + if z_image_modulation: + self.adaLN_modulation = nn.Sequential( + operation_settings.get("operations").Linear( + min(dim, 256), + 4 * dim, + bias=True, + device=operation_settings.get("device"), + dtype=operation_settings.get("dtype"), + ), + ) + else: + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + operation_settings.get("operations").Linear( + min(dim, 1024), + 4 * dim, + bias=True, + device=operation_settings.get("device"), + dtype=operation_settings.get("dtype"), + ), + ) def forward( self, @@ -323,7 +308,7 @@ class FinalLayer(nn.Module): The final layer of NextDiT. """ - def __init__(self, hidden_size, patch_size, out_channels, operation_settings={}): + def __init__(self, hidden_size, patch_size, out_channels, z_image_modulation=False, operation_settings={}): super().__init__() self.norm_final = operation_settings.get("operations").LayerNorm( hidden_size, @@ -340,10 +325,15 @@ class FinalLayer(nn.Module): dtype=operation_settings.get("dtype"), ) + if z_image_modulation: + min_mod = 256 + else: + min_mod = 1024 + self.adaLN_modulation = nn.Sequential( nn.SiLU(), operation_settings.get("operations").Linear( - min(hidden_size, 1024), + min(hidden_size, min_mod), hidden_size, bias=True, device=operation_settings.get("device"), @@ -373,12 +363,16 @@ class NextDiT(nn.Module): n_heads: int = 32, n_kv_heads: Optional[int] = None, multiple_of: int = 256, - ffn_dim_multiplier: Optional[float] = None, + ffn_dim_multiplier: float = 4.0, norm_eps: float = 1e-5, qk_norm: bool = False, cap_feat_dim: int = 5120, axes_dims: List[int] = (16, 56, 56), axes_lens: List[int] = (1, 512, 512), + rope_theta=10000.0, + z_image_modulation=False, + time_scale=1.0, + pad_tokens_multiple=None, image_model=None, device=None, dtype=None, @@ -390,6 +384,8 @@ class NextDiT(nn.Module): self.in_channels = in_channels self.out_channels = in_channels self.patch_size = patch_size + self.time_scale = time_scale + self.pad_tokens_multiple = pad_tokens_multiple self.x_embedder = operation_settings.get("operations").Linear( in_features=patch_size * patch_size * in_channels, @@ -411,6 +407,7 @@ class NextDiT(nn.Module): norm_eps, qk_norm, modulation=True, + z_image_modulation=z_image_modulation, operation_settings=operation_settings, ) for layer_id in range(n_refiner_layers) @@ -434,7 +431,7 @@ class NextDiT(nn.Module): ] ) - self.t_embedder = TimestepEmbedder(min(dim, 1024), **operation_settings) + self.t_embedder = TimestepEmbedder(min(dim, 1024), output_size=256 if z_image_modulation else None, **operation_settings) self.cap_embedder = nn.Sequential( operation_settings.get("operations").RMSNorm(cap_feat_dim, eps=norm_eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")), operation_settings.get("operations").Linear( @@ -457,18 +454,24 @@ class NextDiT(nn.Module): ffn_dim_multiplier, norm_eps, qk_norm, + z_image_modulation=z_image_modulation, + attn_out_bias=False, operation_settings=operation_settings, ) for layer_id in range(n_layers) ] ) self.norm_final = operation_settings.get("operations").RMSNorm(dim, eps=norm_eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) - self.final_layer = FinalLayer(dim, patch_size, self.out_channels, operation_settings=operation_settings) + self.final_layer = FinalLayer(dim, patch_size, self.out_channels, z_image_modulation=z_image_modulation, operation_settings=operation_settings) + + if self.pad_tokens_multiple is not None: + self.x_pad_token = nn.Parameter(torch.empty((1, dim), device=device, dtype=dtype)) + self.cap_pad_token = nn.Parameter(torch.empty((1, dim), device=device, dtype=dtype)) assert (dim // n_heads) == sum(axes_dims) self.axes_dims = axes_dims self.axes_lens = axes_lens - self.rope_embedder = EmbedND(dim=dim // n_heads, theta=10000.0, axes_dim=axes_dims) + self.rope_embedder = EmbedND(dim=dim // n_heads, theta=rope_theta, axes_dim=axes_dims) self.dim = dim self.n_heads = n_heads @@ -503,108 +506,42 @@ class NextDiT(nn.Module): bsz = len(x) pH = pW = self.patch_size device = x[0].device - dtype = x[0].dtype - if cap_mask is not None: - l_effective_cap_len = cap_mask.sum(dim=1).tolist() - else: - l_effective_cap_len = [num_tokens] * bsz + if self.pad_tokens_multiple is not None: + pad_extra = (-cap_feats.shape[1]) % self.pad_tokens_multiple + cap_feats = torch.cat((cap_feats, self.cap_pad_token.to(device=cap_feats.device, dtype=cap_feats.dtype).unsqueeze(0).repeat(cap_feats.shape[0], pad_extra, 1)), dim=1) - if cap_mask is not None and not torch.is_floating_point(cap_mask): - cap_mask = (cap_mask - 1).to(dtype) * torch.finfo(dtype).max + cap_pos_ids = torch.zeros(bsz, cap_feats.shape[1], 3, dtype=torch.float32, device=device) + cap_pos_ids[:, :, 0] = torch.arange(cap_feats.shape[1], dtype=torch.float32, device=device) + 1.0 - img_sizes = [(img.size(1), img.size(2)) for img in x] - l_effective_img_len = [(H // pH) * (W // pW) for (H, W) in img_sizes] + B, C, H, W = x.shape + x = self.x_embedder(x.view(B, C, H // pH, pH, W // pW, pW).permute(0, 2, 4, 3, 5, 1).flatten(3).flatten(1, 2)) - max_seq_len = max( - (cap_len+img_len for cap_len, img_len in zip(l_effective_cap_len, l_effective_img_len)) - ) - max_cap_len = max(l_effective_cap_len) - max_img_len = max(l_effective_img_len) + H_tokens, W_tokens = H // pH, W // pW + x_pos_ids = torch.zeros((bsz, x.shape[1], 3), dtype=torch.float32, device=device) + x_pos_ids[:, :, 0] = cap_feats.shape[1] + 1 + x_pos_ids[:, :, 1] = torch.arange(H_tokens, dtype=torch.float32, device=device).view(-1, 1).repeat(1, W_tokens).flatten() + x_pos_ids[:, :, 2] = torch.arange(W_tokens, dtype=torch.float32, device=device).view(1, -1).repeat(H_tokens, 1).flatten() - position_ids = torch.zeros(bsz, max_seq_len, 3, dtype=torch.float32, device=device) + if self.pad_tokens_multiple is not None: + pad_extra = (-x.shape[1]) % self.pad_tokens_multiple + x = torch.cat((x, self.x_pad_token.to(device=x.device, dtype=x.dtype).unsqueeze(0).repeat(x.shape[0], pad_extra, 1)), dim=1) + x_pos_ids = torch.nn.functional.pad(x_pos_ids, (0, 0, 0, pad_extra)) - for i in range(bsz): - cap_len = l_effective_cap_len[i] - img_len = l_effective_img_len[i] - H, W = img_sizes[i] - H_tokens, W_tokens = H // pH, W // pW - assert H_tokens * W_tokens == img_len - - rope_options = transformer_options.get("rope_options", None) - h_scale = 1.0 - w_scale = 1.0 - h_start = 0 - w_start = 0 - if rope_options is not None: - h_scale = rope_options.get("scale_y", 1.0) - w_scale = rope_options.get("scale_x", 1.0) - - h_start = rope_options.get("shift_y", 0.0) - w_start = rope_options.get("shift_x", 0.0) - - position_ids[i, :cap_len, 0] = torch.arange(cap_len, dtype=torch.float32, device=device) - position_ids[i, cap_len:cap_len+img_len, 0] = cap_len - row_ids = (torch.arange(H_tokens, dtype=torch.float32, device=device) * h_scale + h_start).view(-1, 1).repeat(1, W_tokens).flatten() - col_ids = (torch.arange(W_tokens, dtype=torch.float32, device=device) * w_scale + w_start).view(1, -1).repeat(H_tokens, 1).flatten() - position_ids[i, cap_len:cap_len+img_len, 1] = row_ids - position_ids[i, cap_len:cap_len+img_len, 2] = col_ids - - freqs_cis = self.rope_embedder(position_ids).movedim(1, 2).to(dtype) - - # build freqs_cis for cap and image individually - cap_freqs_cis_shape = list(freqs_cis.shape) - # cap_freqs_cis_shape[1] = max_cap_len - cap_freqs_cis_shape[1] = cap_feats.shape[1] - cap_freqs_cis = torch.zeros(*cap_freqs_cis_shape, device=device, dtype=freqs_cis.dtype) - - img_freqs_cis_shape = list(freqs_cis.shape) - img_freqs_cis_shape[1] = max_img_len - img_freqs_cis = torch.zeros(*img_freqs_cis_shape, device=device, dtype=freqs_cis.dtype) - - for i in range(bsz): - cap_len = l_effective_cap_len[i] - img_len = l_effective_img_len[i] - cap_freqs_cis[i, :cap_len] = freqs_cis[i, :cap_len] - img_freqs_cis[i, :img_len] = freqs_cis[i, cap_len:cap_len+img_len] + freqs_cis = self.rope_embedder(torch.cat((cap_pos_ids, x_pos_ids), dim=1)).movedim(1, 2) # refine context for layer in self.context_refiner: - cap_feats = layer(cap_feats, cap_mask, cap_freqs_cis, transformer_options=transformer_options) + cap_feats = layer(cap_feats, cap_mask, freqs_cis[:, :cap_pos_ids.shape[1]], transformer_options=transformer_options) - # refine image - flat_x = [] - for i in range(bsz): - img = x[i] - C, H, W = img.size() - img = img.view(C, H // pH, pH, W // pW, pW).permute(1, 3, 2, 4, 0).flatten(2).flatten(0, 1) - flat_x.append(img) - x = flat_x - padded_img_embed = torch.zeros(bsz, max_img_len, x[0].shape[-1], device=device, dtype=x[0].dtype) - padded_img_mask = torch.zeros(bsz, max_img_len, dtype=dtype, device=device) - for i in range(bsz): - padded_img_embed[i, :l_effective_img_len[i]] = x[i] - padded_img_mask[i, l_effective_img_len[i]:] = -torch.finfo(dtype).max - - padded_img_embed = self.x_embedder(padded_img_embed) - padded_img_mask = padded_img_mask.unsqueeze(1) + padded_img_mask = None for layer in self.noise_refiner: - padded_img_embed = layer(padded_img_embed, padded_img_mask, img_freqs_cis, t, transformer_options=transformer_options) - - if cap_mask is not None: - mask = torch.zeros(bsz, max_seq_len, dtype=dtype, device=device) - mask[:, :max_cap_len] = cap_mask[:, :max_cap_len] - else: - mask = None - - padded_full_embed = torch.zeros(bsz, max_seq_len, self.dim, device=device, dtype=x[0].dtype) - for i in range(bsz): - cap_len = l_effective_cap_len[i] - img_len = l_effective_img_len[i] - - padded_full_embed[i, :cap_len] = cap_feats[i, :cap_len] - padded_full_embed[i, cap_len:cap_len+img_len] = padded_img_embed[i, :img_len] + x = layer(x, padded_img_mask, freqs_cis[:, cap_pos_ids.shape[1]:], t, transformer_options=transformer_options) + padded_full_embed = torch.cat((cap_feats, x), dim=1) + mask = None + img_sizes = [(H, W)] * bsz + l_effective_cap_len = [cap_feats.shape[1]] * bsz return padded_full_embed, mask, img_sizes, l_effective_cap_len, freqs_cis def forward(self, x, timesteps, context, num_tokens, attention_mask=None, **kwargs): @@ -627,7 +564,7 @@ class NextDiT(nn.Module): y: (N,) tensor of text tokens/features """ - t = self.t_embedder(t, dtype=x.dtype) # (N, D) + t = self.t_embedder(t * self.time_scale, dtype=x.dtype) # (N, D) adaln_input = t cap_feats = self.cap_embedder(cap_feats) # (N, L, D) # todo check if able to batchify w.o. redundant compute diff --git a/comfy/ldm/modules/diffusionmodules/mmdit.py b/comfy/ldm/modules/diffusionmodules/mmdit.py index 42f406f1a..0dc8fe789 100644 --- a/comfy/ldm/modules/diffusionmodules/mmdit.py +++ b/comfy/ldm/modules/diffusionmodules/mmdit.py @@ -211,12 +211,14 @@ class TimestepEmbedder(nn.Module): Embeds scalar timesteps into vector representations. """ - def __init__(self, hidden_size, frequency_embedding_size=256, dtype=None, device=None, operations=None): + def __init__(self, hidden_size, frequency_embedding_size=256, output_size=None, dtype=None, device=None, operations=None): super().__init__() + if output_size is None: + output_size = hidden_size self.mlp = nn.Sequential( operations.Linear(frequency_embedding_size, hidden_size, bias=True, dtype=dtype, device=device), nn.SiLU(), - operations.Linear(hidden_size, hidden_size, bias=True, dtype=dtype, device=device), + operations.Linear(hidden_size, output_size, bias=True, dtype=dtype, device=device), ) self.frequency_embedding_size = frequency_embedding_size diff --git a/comfy/model_base.py b/comfy/model_base.py index cad79ecbd..cc21b1de9 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1114,9 +1114,13 @@ class Lumina2(BaseModel): if torch.numel(attention_mask) != attention_mask.sum(): out['attention_mask'] = comfy.conds.CONDRegular(attention_mask) out['num_tokens'] = comfy.conds.CONDConstant(max(1, torch.sum(attention_mask).item())) + cross_attn = kwargs.get("cross_attn", None) if cross_attn is not None: out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + if 'num_tokens' not in out: + out['num_tokens'] = comfy.conds.CONDConstant(cross_attn.shape[1]) + return out class WAN21(BaseModel): diff --git a/comfy/model_detection.py b/comfy/model_detection.py index b2ba1459d..7afe4a798 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -416,14 +416,31 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["image_model"] = "lumina2" dit_config["patch_size"] = 2 dit_config["in_channels"] = 16 - dit_config["dim"] = 2304 - dit_config["cap_feat_dim"] = state_dict['{}cap_embedder.1.weight'.format(key_prefix)].shape[1] + w = state_dict['{}cap_embedder.1.weight'.format(key_prefix)] + dit_config["dim"] = w.shape[0] + dit_config["cap_feat_dim"] = w.shape[1] dit_config["n_layers"] = count_blocks(state_dict_keys, '{}layers.'.format(key_prefix) + '{}.') - dit_config["n_heads"] = 24 - dit_config["n_kv_heads"] = 8 dit_config["qk_norm"] = True - dit_config["axes_dims"] = [32, 32, 32] - dit_config["axes_lens"] = [300, 512, 512] + + if dit_config["dim"] == 2304: # Original Lumina 2 + dit_config["n_heads"] = 24 + dit_config["n_kv_heads"] = 8 + dit_config["axes_dims"] = [32, 32, 32] + dit_config["axes_lens"] = [300, 512, 512] + dit_config["rope_theta"] = 10000.0 + dit_config["ffn_dim_multiplier"] = 4.0 + elif dit_config["dim"] == 3840: # Z image + dit_config["n_heads"] = 30 + dit_config["n_kv_heads"] = 30 + dit_config["axes_dims"] = [32, 48, 48] + dit_config["axes_lens"] = [1536, 512, 512] + dit_config["rope_theta"] = 256.0 + dit_config["ffn_dim_multiplier"] = (8.0 / 3.0) + dit_config["z_image_modulation"] = True + dit_config["time_scale"] = 1000.0 + if '{}cap_pad_token'.format(key_prefix) in state_dict_keys: + dit_config["pad_tokens_multiple"] = 32 + return dit_config if '{}head.modulation'.format(key_prefix) in state_dict_keys: # Wan 2.1 diff --git a/comfy/sd.py b/comfy/sd.py index 14dd8944c..350fae92b 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -52,6 +52,7 @@ import comfy.text_encoders.ace import comfy.text_encoders.omnigen2 import comfy.text_encoders.qwen_image import comfy.text_encoders.hunyuan_image +import comfy.text_encoders.z_image import comfy.model_patcher import comfy.lora @@ -953,6 +954,8 @@ class TEModel(Enum): GEMMA_3_4B = 13 MISTRAL3_24B = 14 MISTRAL3_24B_PRUNED_FLUX2 = 15 + QWEN3_4B = 16 + def detect_te_model(sd): if "text_model.encoder.layers.30.mlp.fc1.weight" in sd: @@ -985,6 +988,8 @@ def detect_te_model(sd): if weight.shape[0] == 512: return TEModel.QWEN25_7B if "model.layers.0.post_attention_layernorm.weight" in sd: + if 'model.layers.0.self_attn.q_norm.weight' in sd: + return TEModel.QWEN3_4B weight = sd['model.layers.0.post_attention_layernorm.weight'] if weight.shape[0] == 5120: if "model.layers.39.post_attention_layernorm.weight" in sd: @@ -1110,6 +1115,9 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.flux.flux2_te(**llama_detect(clip_data), pruned=te_model == TEModel.MISTRAL3_24B_PRUNED_FLUX2) clip_target.tokenizer = comfy.text_encoders.flux.Flux2Tokenizer tokenizer_data["tekken_model"] = clip_data[0].get("tekken_model", None) + elif te_model == TEModel.QWEN3_4B: + clip_target.clip = comfy.text_encoders.z_image.te(**llama_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.z_image.ZImageTokenizer else: # clip_l if clip_type == CLIPType.SD3: diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index d47ed27bc..cd4b5f76c 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -78,6 +78,28 @@ class Qwen25_3BConfig: rope_scale = None final_norm: bool = True +@dataclass +class Qwen3_4BConfig: + vocab_size: int = 151936 + hidden_size: int = 2560 + intermediate_size: int = 9728 + num_hidden_layers: int = 36 + num_attention_heads: int = 32 + num_key_value_heads: int = 8 + max_position_embeddings: int = 40960 + rms_norm_eps: float = 1e-6 + rope_theta: float = 1000000.0 + transformer_type: str = "llama" + head_dim = 128 + rms_norm_add = False + mlp_activation = "silu" + qkv_bias = False + rope_dims = None + q_norm = "gemma3" + k_norm = "gemma3" + rope_scale = None + final_norm: bool = True + @dataclass class Qwen25_7BVLI_Config: vocab_size: int = 152064 @@ -511,6 +533,15 @@ class Qwen25_3B(BaseLlama, torch.nn.Module): self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) self.dtype = dtype +class Qwen3_4B(BaseLlama, torch.nn.Module): + def __init__(self, config_dict, dtype, device, operations): + super().__init__() + config = Qwen3_4BConfig(**config_dict) + self.num_layers = config.num_hidden_layers + + self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) + self.dtype = dtype + class Qwen25_7BVLI(BaseLlama, torch.nn.Module): def __init__(self, config_dict, dtype, device, operations): super().__init__() diff --git a/comfy/text_encoders/z_image.py b/comfy/text_encoders/z_image.py new file mode 100644 index 000000000..bb9273b20 --- /dev/null +++ b/comfy/text_encoders/z_image.py @@ -0,0 +1,48 @@ +from transformers import Qwen2Tokenizer +import comfy.text_encoders.llama +from comfy import sd1_clip +import os + +class Qwen3Tokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer") + super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2560, embedding_key='qwen3_4b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, pad_token=151643, tokenizer_data=tokenizer_data) + + +class ZImageTokenizer(sd1_clip.SD1Tokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="qwen3_4b", tokenizer=Qwen3Tokenizer) + self.llama_template = "<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" + + def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, **kwargs): + if llama_template is None: + llama_text = self.llama_template.format(text) + else: + llama_text = llama_template.format(text) + + tokens = super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, disable_weights=True, **kwargs) + return tokens + + +class Qwen3_4BModel(sd1_clip.SDClipModel): + def __init__(self, device="cpu", layer="hidden", layer_idx=-2, dtype=None, attention_mask=True, model_options={}): + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen3_4B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) + + +class ZImageTEModel(sd1_clip.SD1ClipModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + super().__init__(device=device, dtype=dtype, name="qwen3_4b", clip_model=Qwen3_4BModel, model_options=model_options) + + +def te(dtype_llama=None, llama_scaled_fp8=None, llama_quantization_metadata=None): + class ZImageTEModel_(ZImageTEModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: + model_options = model_options.copy() + model_options["scaled_fp8"] = llama_scaled_fp8 + if dtype_llama is not None: + dtype = dtype_llama + if llama_quantization_metadata is not None: + model_options["quantization_metadata"] = llama_quantization_metadata + super().__init__(device=device, dtype=dtype, model_options=model_options) + return ZImageTEModel_ From 0e24dbb19f34f242edb77c550396cf6806f7b22f Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 25 Nov 2025 16:02:51 -0800 Subject: [PATCH 0927/1073] Adjustments to Z Image. (#10893) --- comfy/supported_models.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 8fe8e63f6..af8120400 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -21,6 +21,7 @@ import comfy.text_encoders.ace import comfy.text_encoders.omnigen2 import comfy.text_encoders.qwen_image import comfy.text_encoders.hunyuan_image +import comfy.text_encoders.z_image from . import supported_models_base from . import latent_formats @@ -994,7 +995,7 @@ class Lumina2(supported_models_base.BASE): "shift": 6.0, } - memory_usage_factor = 1.2 + memory_usage_factor = 1.4 unet_extra_config = {} latent_format = latent_formats.Flux @@ -1013,6 +1014,24 @@ class Lumina2(supported_models_base.BASE): hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}gemma2_2b.transformer.".format(pref)) return supported_models_base.ClipTarget(comfy.text_encoders.lumina2.LuminaTokenizer, comfy.text_encoders.lumina2.te(**hunyuan_detect)) +class ZImage(Lumina2): + unet_config = { + "image_model": "lumina2", + "dim": 3840, + } + + sampling_settings = { + "multiplier": 1.0, + "shift": 3.0, + } + + memory_usage_factor = 1.7 + + def clip_target(self, state_dict={}): + pref = self.text_encoder_key_prefix[0] + hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen3_4b.transformer.".format(pref)) + return supported_models_base.ClipTarget(comfy.text_encoders.z_image.ZImageTokenizer, comfy.text_encoders.z_image.te(**hunyuan_detect)) + class WAN21_T2V(supported_models_base.BASE): unet_config = { "image_model": "wan2.1", @@ -1453,7 +1472,7 @@ class HunyuanVideo15_SR_Distilled(HunyuanVideo): hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_7b.transformer.".format(pref)) return supported_models_base.ClipTarget(comfy.text_encoders.hunyuan_video.HunyuanVideo15Tokenizer, comfy.text_encoders.hunyuan_image.te(**hunyuan_detect)) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo15_SR_Distilled, HunyuanVideo15, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, WAN22_Animate, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage, Flux2] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo15_SR_Distilled, HunyuanVideo15, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, ZImage, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, WAN22_Animate, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage, Flux2] models += [SVD_img2vid] From bdb10a583f1b1e495ee00dbd1674f11016a6a93e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 25 Nov 2025 21:07:58 -0800 Subject: [PATCH 0928/1073] Fix loras not working on mixed fp8. (#10899) --- comfy/model_patcher.py | 2 +- comfy/ops.py | 22 +++++++++++++++++++++- comfy/quant_ops.py | 21 ++++++++++++++------- comfy/weight_adapter/lora.py | 1 + 4 files changed, 37 insertions(+), 9 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 6551ced5a..73adc7f70 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -132,7 +132,7 @@ class LowVramPatch: def __call__(self, weight): intermediate_dtype = weight.dtype if self.convert_func is not None: - weight = self.convert_func(weight.to(dtype=torch.float32, copy=True), inplace=True) + weight = self.convert_func(weight, inplace=False) if intermediate_dtype not in [torch.float32, torch.float16, torch.bfloat16]: #intermediate_dtype has to be one that is supported in math ops intermediate_dtype = torch.float32 diff --git a/comfy/ops.py b/comfy/ops.py index 785aa1c9f..a0ff4e8f1 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -117,6 +117,8 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, of if weight_has_function or weight.dtype != dtype: with wf_context: weight = weight.to(dtype=dtype) + if isinstance(weight, QuantizedTensor): + weight = weight.dequantize() for f in s.weight_function: weight = f(weight) @@ -502,7 +504,7 @@ def scaled_fp8_ops(fp8_matrix_mult=False, scale_input=False, override_dtype=None weight *= self.scale_weight.to(device=weight.device, dtype=weight.dtype) return weight else: - return weight * self.scale_weight.to(device=weight.device, dtype=weight.dtype) + return weight.to(dtype=torch.float32) * self.scale_weight.to(device=weight.device, dtype=torch.float32) def set_weight(self, weight, inplace_update=False, seed=None, return_weight=False, **kwargs): weight = comfy.float.stochastic_rounding(weight / self.scale_weight.to(device=weight.device, dtype=weight.dtype), self.weight.dtype, seed=seed) @@ -643,6 +645,24 @@ def mixed_precision_ops(layer_quant_config={}, compute_dtype=torch.bfloat16, ful not isinstance(input, QuantizedTensor)): input = QuantizedTensor.from_float(input, self.layout_type, scale=self.input_scale, dtype=self.weight.dtype) return self._forward(input, self.weight, self.bias) + + def convert_weight(self, weight, inplace=False, **kwargs): + if isinstance(weight, QuantizedTensor): + return weight.dequantize() + else: + return weight + + def set_weight(self, weight, inplace_update=False, seed=None, return_weight=False, **kwargs): + if getattr(self, 'layout_type', None) is not None: + weight = QuantizedTensor.from_float(weight, self.layout_type, scale=None, dtype=self.weight.dtype, stochastic_rounding=seed, inplace_ops=True) + else: + weight = weight.to(self.weight.dtype) + if return_weight: + return weight + + assert inplace_update is False # TODO: eventually remove the inplace_update stuff + self.weight = torch.nn.Parameter(weight, requires_grad=False) + return MixedPrecisionOps def pick_operations(weight_dtype, compute_dtype, load_device=None, disable_fast_fp8=False, fp8_optimizations=False, scaled_fp8=None, model_config=None): diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index 0c16bcf8d..d2f3e7397 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -1,6 +1,7 @@ import torch import logging from typing import Tuple, Dict +import comfy.float _LAYOUT_REGISTRY = {} _GENERIC_UTILS = {} @@ -393,7 +394,7 @@ class TensorCoreFP8Layout(QuantizedLayout): - orig_dtype: Original dtype before quantization (for casting back) """ @classmethod - def quantize(cls, tensor, scale=None, dtype=torch.float8_e4m3fn): + def quantize(cls, tensor, scale=None, dtype=torch.float8_e4m3fn, stochastic_rounding=0, inplace_ops=False): orig_dtype = tensor.dtype if scale is None: @@ -403,17 +404,23 @@ class TensorCoreFP8Layout(QuantizedLayout): scale = torch.tensor(scale) scale = scale.to(device=tensor.device, dtype=torch.float32) - tensor_scaled = tensor * (1.0 / scale).to(tensor.dtype) - # TODO: uncomment this if it's actually needed because the clamp has a small performance penality' - lp_amax = torch.finfo(dtype).max - torch.clamp(tensor_scaled, min=-lp_amax, max=lp_amax, out=tensor_scaled) - qdata = tensor_scaled.to(dtype, memory_format=torch.contiguous_format) + if inplace_ops: + tensor *= (1.0 / scale).to(tensor.dtype) + else: + tensor = tensor * (1.0 / scale).to(tensor.dtype) + + if stochastic_rounding > 0: + tensor = comfy.float.stochastic_rounding(tensor, dtype=dtype, seed=stochastic_rounding) + else: + lp_amax = torch.finfo(dtype).max + torch.clamp(tensor, min=-lp_amax, max=lp_amax, out=tensor) + tensor = tensor.to(dtype, memory_format=torch.contiguous_format) layout_params = { 'scale': scale, 'orig_dtype': orig_dtype } - return qdata, layout_params + return tensor, layout_params @staticmethod def dequantize(qdata, scale, orig_dtype, **kwargs): diff --git a/comfy/weight_adapter/lora.py b/comfy/weight_adapter/lora.py index 4db004e50..3cc60bb1b 100644 --- a/comfy/weight_adapter/lora.py +++ b/comfy/weight_adapter/lora.py @@ -194,6 +194,7 @@ class LoRAAdapter(WeightAdapterBase): lora_diff = torch.mm( mat1.flatten(start_dim=1), mat2.flatten(start_dim=1) ).reshape(weight.shape) + del mat1, mat2 if dora_scale is not None: weight = weight_decompose( dora_scale, From 90b3995ec842335e44d70e0521ff6ff6c3ff9aaa Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 26 Nov 2025 00:34:15 -0500 Subject: [PATCH 0929/1073] ComfyUI v0.3.74 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index f8818838e..b565c7367 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.73" +__version__ = "0.3.74" diff --git a/pyproject.toml b/pyproject.toml index 7e4bac12d..ccf0fcdb9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.73" +version = "0.3.74" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 58b85746618e2bc2dd32024c89403926aad59f48 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 25 Nov 2025 23:36:19 -0800 Subject: [PATCH 0930/1073] Fix Flux2 reference image mem estimation. (#10905) --- comfy/model_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index cc21b1de9..9b76c285e 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -926,7 +926,7 @@ class Flux(BaseModel): out = {} ref_latents = kwargs.get("reference_latents", None) if ref_latents is not None: - out['ref_latents'] = list([1, 16, sum(map(lambda a: math.prod(a.size()), ref_latents)) // 16]) + out['ref_latents'] = list([1, 16, sum(map(lambda a: math.prod(a.size()[2:]), ref_latents))]) return out class Flux2(Flux): From 8402c8700a29a97bc5d706d6a0b14c41bc2c2d8a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 26 Nov 2025 02:41:13 -0500 Subject: [PATCH 0931/1073] ComfyUI version v0.3.75 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index b565c7367..fa4b4f4b0 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.74" +__version__ = "0.3.75" diff --git a/pyproject.toml b/pyproject.toml index ccf0fcdb9..9009e65fe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.74" +version = "0.3.75" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From f16219e3aadcb7a301a1a313ab8989c3ebe53764 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 26 Nov 2025 01:00:43 -0800 Subject: [PATCH 0932/1073] Add cheap latent preview for flux 2. (#10907) Thank you to the person who calculated them. You saved me a percent of my time. --- comfy/latent_formats.py | 40 ++++++++++++++++++++++++++++++++++++++++ latent_preview.py | 7 +++++-- 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index e98c7d6d8..8e110f45d 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -6,6 +6,7 @@ class LatentFormat: latent_dimensions = 2 latent_rgb_factors = None latent_rgb_factors_bias = None + latent_rgb_factors_reshape = None taesd_decoder_name = None def process_in(self, latent): @@ -181,6 +182,45 @@ class Flux(SD3): class Flux2(LatentFormat): latent_channels = 128 + def __init__(self): + self.latent_rgb_factors =[ + [0.0058, 0.0113, 0.0073], + [0.0495, 0.0443, 0.0836], + [-0.0099, 0.0096, 0.0644], + [0.2144, 0.3009, 0.3652], + [0.0166, -0.0039, -0.0054], + [0.0157, 0.0103, -0.0160], + [-0.0398, 0.0902, -0.0235], + [-0.0052, 0.0095, 0.0109], + [-0.3527, -0.2712, -0.1666], + [-0.0301, -0.0356, -0.0180], + [-0.0107, 0.0078, 0.0013], + [0.0746, 0.0090, -0.0941], + [0.0156, 0.0169, 0.0070], + [-0.0034, -0.0040, -0.0114], + [0.0032, 0.0181, 0.0080], + [-0.0939, -0.0008, 0.0186], + [0.0018, 0.0043, 0.0104], + [0.0284, 0.0056, -0.0127], + [-0.0024, -0.0022, -0.0030], + [0.1207, -0.0026, 0.0065], + [0.0128, 0.0101, 0.0142], + [0.0137, -0.0072, -0.0007], + [0.0095, 0.0092, -0.0059], + [0.0000, -0.0077, -0.0049], + [-0.0465, -0.0204, -0.0312], + [0.0095, 0.0012, -0.0066], + [0.0290, -0.0034, 0.0025], + [0.0220, 0.0169, -0.0048], + [-0.0332, -0.0457, -0.0468], + [-0.0085, 0.0389, 0.0609], + [-0.0076, 0.0003, -0.0043], + [-0.0111, -0.0460, -0.0614], + ] + + self.latent_rgb_factors_bias = [-0.0329, -0.0718, -0.0851] + self.latent_rgb_factors_reshape = lambda t: t.reshape(t.shape[0], 32, 2, 2, t.shape[-2], t.shape[-1]).permute(0, 1, 4, 2, 5, 3).reshape(t.shape[0], 32, t.shape[-2] * 2, t.shape[-1] * 2) + def process_in(self, latent): return latent diff --git a/latent_preview.py b/latent_preview.py index 95d3cb733..ddf6dcf49 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -37,13 +37,16 @@ class TAESDPreviewerImpl(LatentPreviewer): class Latent2RGBPreviewer(LatentPreviewer): - def __init__(self, latent_rgb_factors, latent_rgb_factors_bias=None): + def __init__(self, latent_rgb_factors, latent_rgb_factors_bias=None, latent_rgb_factors_reshape=None): self.latent_rgb_factors = torch.tensor(latent_rgb_factors, device="cpu").transpose(0, 1) self.latent_rgb_factors_bias = None if latent_rgb_factors_bias is not None: self.latent_rgb_factors_bias = torch.tensor(latent_rgb_factors_bias, device="cpu") + self.latent_rgb_factors_reshape = latent_rgb_factors_reshape def decode_latent_to_preview(self, x0): + if self.latent_rgb_factors_reshape is not None: + x0 = self.latent_rgb_factors_reshape(x0) self.latent_rgb_factors = self.latent_rgb_factors.to(dtype=x0.dtype, device=x0.device) if self.latent_rgb_factors_bias is not None: self.latent_rgb_factors_bias = self.latent_rgb_factors_bias.to(dtype=x0.dtype, device=x0.device) @@ -85,7 +88,7 @@ def get_previewer(device, latent_format): if previewer is None: if latent_format.latent_rgb_factors is not None: - previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors, latent_format.latent_rgb_factors_bias) + previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors, latent_format.latent_rgb_factors_bias, latent_format.latent_rgb_factors_reshape) return previewer def prepare_callback(model, steps, x0_output_dict=None): From 8938aa3f3064415758fa8f3a628476535a676183 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 26 Nov 2025 19:14:02 +0200 Subject: [PATCH 0933/1073] add Veo3 First-Last-Frame node (#10878) --- comfy_api_nodes/apis/veo_api.py | 38 +++----- comfy_api_nodes/nodes_veo2.py | 155 ++++++++++++++++++++++++++++++++ 2 files changed, 168 insertions(+), 25 deletions(-) diff --git a/comfy_api_nodes/apis/veo_api.py b/comfy_api_nodes/apis/veo_api.py index a55137afb..8328d1aa4 100644 --- a/comfy_api_nodes/apis/veo_api.py +++ b/comfy_api_nodes/apis/veo_api.py @@ -1,34 +1,21 @@ -from typing import Optional, Union -from enum import Enum +from typing import Optional from pydantic import BaseModel, Field -class Image2(BaseModel): - bytesBase64Encoded: str - gcsUri: Optional[str] = None - mimeType: Optional[str] = None +class VeoRequestInstanceImage(BaseModel): + bytesBase64Encoded: str | None = Field(None) + gcsUri: str | None = Field(None) + mimeType: str | None = Field(None) -class Image3(BaseModel): - bytesBase64Encoded: Optional[str] = None - gcsUri: str - mimeType: Optional[str] = None - - -class Instance1(BaseModel): - image: Optional[Union[Image2, Image3]] = Field( - None, description='Optional image to guide video generation' - ) +class VeoRequestInstance(BaseModel): + image: VeoRequestInstanceImage | None = Field(None) + lastFrame: VeoRequestInstanceImage | None = Field(None) prompt: str = Field(..., description='Text description of the video') -class PersonGeneration1(str, Enum): - ALLOW = 'ALLOW' - BLOCK = 'BLOCK' - - -class Parameters1(BaseModel): +class VeoRequestParameters(BaseModel): aspectRatio: Optional[str] = Field(None, examples=['16:9']) durationSeconds: Optional[int] = None enhancePrompt: Optional[bool] = None @@ -37,17 +24,18 @@ class Parameters1(BaseModel): description='Generate audio for the video. Only supported by veo 3 models.', ) negativePrompt: Optional[str] = None - personGeneration: Optional[PersonGeneration1] = None + personGeneration: str | None = Field(None, description="ALLOW or BLOCK") sampleCount: Optional[int] = None seed: Optional[int] = None storageUri: Optional[str] = Field( None, description='Optional Cloud Storage URI to upload the video' ) + resolution: str | None = Field(None) class VeoGenVidRequest(BaseModel): - instances: Optional[list[Instance1]] = None - parameters: Optional[Parameters1] = None + instances: list[VeoRequestInstance] | None = Field(None) + parameters: VeoRequestParameters | None = Field(None) class VeoGenVidResponse(BaseModel): diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index d37e9e9b4..a54dc13ab 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -1,6 +1,7 @@ import base64 from io import BytesIO +import torch from typing_extensions import override from comfy_api.input_impl.video_types import VideoFromFile @@ -10,6 +11,9 @@ from comfy_api_nodes.apis.veo_api import ( VeoGenVidPollResponse, VeoGenVidRequest, VeoGenVidResponse, + VeoRequestInstance, + VeoRequestInstanceImage, + VeoRequestParameters, ) from comfy_api_nodes.util import ( ApiEndpoint, @@ -346,12 +350,163 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode): ) +class Veo3FirstLastFrameNode(IO.ComfyNode): + + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="Veo3FirstLastFrameNode", + display_name="Google Veo 3 First-Last-Frame to Video", + category="api node/video/Veo", + description="Generate video using prompt and first and last frames.", + inputs=[ + IO.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Text description of the video", + ), + IO.String.Input( + "negative_prompt", + multiline=True, + default="", + tooltip="Negative text prompt to guide what to avoid in the video", + ), + IO.Combo.Input("resolution", options=["720p", "1080p"]), + IO.Combo.Input( + "aspect_ratio", + options=["16:9", "9:16"], + default="16:9", + tooltip="Aspect ratio of the output video", + ), + IO.Int.Input( + "duration", + default=8, + min=4, + max=8, + step=2, + display_mode=IO.NumberDisplay.slider, + tooltip="Duration of the output video in seconds", + ), + IO.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFF, + step=1, + display_mode=IO.NumberDisplay.number, + control_after_generate=True, + tooltip="Seed for video generation", + ), + IO.Image.Input("first_frame", tooltip="Start frame"), + IO.Image.Input("last_frame", tooltip="End frame"), + IO.Combo.Input( + "model", + options=["veo-3.1-generate", "veo-3.1-fast-generate"], + default="veo-3.1-fast-generate", + ), + IO.Boolean.Input( + "generate_audio", + default=True, + tooltip="Generate audio for the video.", + ), + ], + outputs=[ + IO.Video.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + prompt: str, + negative_prompt: str, + resolution: str, + aspect_ratio: str, + duration: int, + seed: int, + first_frame: torch.Tensor, + last_frame: torch.Tensor, + model: str, + generate_audio: bool, + ): + model = MODELS_MAP[model] + initial_response = await sync_op( + cls, + ApiEndpoint(path=f"/proxy/veo/{model}/generate", method="POST"), + response_model=VeoGenVidResponse, + data=VeoGenVidRequest( + instances=[ + VeoRequestInstance( + prompt=prompt, + image=VeoRequestInstanceImage( + bytesBase64Encoded=tensor_to_base64_string(first_frame), mimeType="image/png" + ), + lastFrame=VeoRequestInstanceImage( + bytesBase64Encoded=tensor_to_base64_string(last_frame), mimeType="image/png" + ), + ), + ], + parameters=VeoRequestParameters( + aspectRatio=aspect_ratio, + personGeneration="ALLOW", + durationSeconds=duration, + enhancePrompt=True, # cannot be False for Veo3 + seed=seed, + generateAudio=generate_audio, + negativePrompt=negative_prompt, + resolution=resolution, + ), + ), + ) + poll_response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/veo/{model}/poll", method="POST"), + response_model=VeoGenVidPollResponse, + status_extractor=lambda r: "completed" if r.done else "pending", + data=VeoGenVidPollRequest( + operationName=initial_response.name, + ), + poll_interval=5.0, + estimated_duration=AVERAGE_DURATION_VIDEO_GEN, + ) + + if poll_response.error: + raise Exception(f"Veo API error: {poll_response.error.message} (code: {poll_response.error.code})") + + response = poll_response.response + filtered_count = response.raiMediaFilteredCount + if filtered_count: + reasons = response.raiMediaFilteredReasons or [] + reason_part = f": {reasons[0]}" if reasons else "" + raise Exception( + f"Content blocked by Google's Responsible AI filters{reason_part} " + f"({filtered_count} video{'s' if filtered_count != 1 else ''} filtered)." + ) + + if response.videos: + video = response.videos[0] + if video.bytesBase64Encoded: + return IO.NodeOutput(VideoFromFile(BytesIO(base64.b64decode(video.bytesBase64Encoded)))) + if video.gcsUri: + return IO.NodeOutput(await download_url_to_video_output(video.gcsUri)) + raise Exception("Video returned but no data or URL was provided") + raise Exception("Video generation completed but no video was returned") + + class VeoExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: return [ VeoVideoGenerationNode, Veo3VideoGenerationNode, + Veo3FirstLastFrameNode, ] From 1105e0d139001ad602d0f883406bfce41e54ae67 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 26 Nov 2025 19:23:14 +0200 Subject: [PATCH 0934/1073] improve UX for batch uploads in upload_images_to_comfyapi (#10913) --- comfy_api_nodes/util/upload_helpers.py | 43 +++++++++++++------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/comfy_api_nodes/util/upload_helpers.py b/comfy_api_nodes/util/upload_helpers.py index 632450d9b..b9019841f 100644 --- a/comfy_api_nodes/util/upload_helpers.py +++ b/comfy_api_nodes/util/upload_helpers.py @@ -4,7 +4,7 @@ import logging import time import uuid from io import BytesIO -from typing import Optional, Union +from typing import Optional from urllib.parse import urlparse import aiohttp @@ -48,8 +48,9 @@ async def upload_images_to_comfyapi( image: torch.Tensor, *, max_images: int = 8, - mime_type: Optional[str] = None, - wait_label: Optional[str] = "Uploading", + mime_type: str | None = None, + wait_label: str | None = "Uploading", + show_batch_index: bool = True, ) -> list[str]: """ Uploads images to ComfyUI API and returns download URLs. @@ -59,11 +60,18 @@ async def upload_images_to_comfyapi( download_urls: list[str] = [] is_batch = len(image.shape) > 3 batch_len = image.shape[0] if is_batch else 1 + num_to_upload = min(batch_len, max_images) + batch_start_ts = time.monotonic() - for idx in range(min(batch_len, max_images)): + for idx in range(num_to_upload): tensor = image[idx] if is_batch else image img_io = tensor_to_bytesio(tensor, mime_type=mime_type) - url = await upload_file_to_comfyapi(cls, img_io, img_io.name, mime_type, wait_label) + + effective_label = wait_label + if wait_label and show_batch_index and num_to_upload > 1: + effective_label = f"{wait_label} ({idx + 1}/{num_to_upload})" + + url = await upload_file_to_comfyapi(cls, img_io, img_io.name, mime_type, effective_label, batch_start_ts) download_urls.append(url) return download_urls @@ -126,8 +134,9 @@ async def upload_file_to_comfyapi( cls: type[IO.ComfyNode], file_bytes_io: BytesIO, filename: str, - upload_mime_type: Optional[str], - wait_label: Optional[str] = "Uploading", + upload_mime_type: str | None, + wait_label: str | None = "Uploading", + progress_origin_ts: float | None = None, ) -> str: """Uploads a single file to ComfyUI API and returns its download URL.""" if upload_mime_type is None: @@ -148,6 +157,7 @@ async def upload_file_to_comfyapi( file_bytes_io, content_type=upload_mime_type, wait_label=wait_label, + progress_origin_ts=progress_origin_ts, ) return create_resp.download_url @@ -155,27 +165,18 @@ async def upload_file_to_comfyapi( async def upload_file( cls: type[IO.ComfyNode], upload_url: str, - file: Union[BytesIO, str], + file: BytesIO | str, *, - content_type: Optional[str] = None, + content_type: str | None = None, max_retries: int = 3, retry_delay: float = 1.0, retry_backoff: float = 2.0, - wait_label: Optional[str] = None, + wait_label: str | None = None, + progress_origin_ts: float | None = None, ) -> None: """ Upload a file to a signed URL (e.g., S3 pre-signed PUT) with retries, Comfy progress display, and interruption. - Args: - cls: Node class (provides auth context + UI progress hooks). - upload_url: Pre-signed PUT URL. - file: BytesIO or path string. - content_type: Explicit MIME type. If None, we *suppress* Content-Type. - max_retries: Maximum retry attempts. - retry_delay: Initial delay in seconds. - retry_backoff: Exponential backoff factor. - wait_label: Progress label shown in Comfy UI. - Raises: ProcessingInterrupted, LocalNetworkError, ApiServerError, Exception """ @@ -198,7 +199,7 @@ async def upload_file( attempt = 0 delay = retry_delay - start_ts = time.monotonic() + start_ts = progress_origin_ts if progress_origin_ts is not None else time.monotonic() op_uuid = uuid.uuid4().hex[:8] while True: attempt += 1 From 8908ee262862f1252d1363d55c59872fb3361066 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 26 Nov 2025 20:38:30 +0200 Subject: [PATCH 0935/1073] fix(gemini): use first 10 images as fileData (URLs) and remaining images as inline base64 (#10918) --- comfy_api_nodes/apis/gemini_api.py | 6 ++++ comfy_api_nodes/nodes_gemini.py | 55 ++++++++++++++++++++---------- 2 files changed, 43 insertions(+), 18 deletions(-) diff --git a/comfy_api_nodes/apis/gemini_api.py b/comfy_api_nodes/apis/gemini_api.py index d34590d28..a380ecc86 100644 --- a/comfy_api_nodes/apis/gemini_api.py +++ b/comfy_api_nodes/apis/gemini_api.py @@ -58,8 +58,14 @@ class GeminiInlineData(BaseModel): mimeType: GeminiMimeType | None = Field(None) +class GeminiFileData(BaseModel): + fileUri: str | None = Field(None) + mimeType: GeminiMimeType | None = Field(None) + + class GeminiPart(BaseModel): inlineData: GeminiInlineData | None = Field(None) + fileData: GeminiFileData | None = Field(None) text: str | None = Field(None) diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 938a20f84..976d9c225 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -20,6 +20,7 @@ from comfy_api.latest import IO, ComfyExtension, Input from comfy_api.util import VideoCodec, VideoContainer from comfy_api_nodes.apis.gemini_api import ( GeminiContent, + GeminiFileData, GeminiGenerateContentRequest, GeminiGenerateContentResponse, GeminiImageConfig, @@ -38,6 +39,7 @@ from comfy_api_nodes.util import ( get_number_of_images, sync_op, tensor_to_base64_string, + upload_images_to_comfyapi, validate_string, video_to_base64_string, ) @@ -68,24 +70,43 @@ class GeminiImageModel(str, Enum): gemini_2_5_flash_image = "gemini-2.5-flash-image" -def create_image_parts(image_input: torch.Tensor) -> list[GeminiPart]: - """ - Convert image tensor input to Gemini API compatible parts. - - Args: - image_input: Batch of image tensors from ComfyUI. - - Returns: - List of GeminiPart objects containing the encoded images. - """ +async def create_image_parts( + cls: type[IO.ComfyNode], + images: torch.Tensor, + image_limit: int = 0, +) -> list[GeminiPart]: image_parts: list[GeminiPart] = [] - for image_index in range(image_input.shape[0]): - image_as_b64 = tensor_to_base64_string(image_input[image_index].unsqueeze(0)) + if image_limit < 0: + raise ValueError("image_limit must be greater than or equal to 0 when creating Gemini image parts.") + total_images = get_number_of_images(images) + if total_images <= 0: + raise ValueError("No images provided to create_image_parts; at least one image is required.") + + # If image_limit == 0 --> use all images; otherwise clamp to image_limit. + effective_max = total_images if image_limit == 0 else min(total_images, image_limit) + + # Number of images we'll send as URLs (fileData) + num_url_images = min(effective_max, 10) # Vertex API max number of image links + reference_images_urls = await upload_images_to_comfyapi( + cls, + images, + max_images=num_url_images, + ) + for reference_image_url in reference_images_urls: + image_parts.append( + GeminiPart( + fileData=GeminiFileData( + mimeType=GeminiMimeType.image_png, + fileUri=reference_image_url, + ) + ) + ) + for idx in range(num_url_images, effective_max): image_parts.append( GeminiPart( inlineData=GeminiInlineData( mimeType=GeminiMimeType.image_png, - data=image_as_b64, + data=tensor_to_base64_string(images[idx]), ) ) ) @@ -338,8 +359,7 @@ class GeminiNode(IO.ComfyNode): # Add other modal parts if images is not None: - image_parts = create_image_parts(images) - parts.extend(image_parts) + parts.extend(await create_image_parts(cls, images)) if audio is not None: parts.extend(cls.create_audio_parts(audio)) if video is not None: @@ -562,8 +582,7 @@ class GeminiImage(IO.ComfyNode): image_config = GeminiImageConfig(aspectRatio=aspect_ratio) if images is not None: - image_parts = create_image_parts(images) - parts.extend(image_parts) + parts.extend(await create_image_parts(cls, images)) if files is not None: parts.extend(files) @@ -702,7 +721,7 @@ class GeminiImage2(IO.ComfyNode): if images is not None: if get_number_of_images(images) > 14: raise ValueError("The current maximum number of supported images is 14.") - parts.extend(create_image_parts(images)) + parts.extend(await create_image_parts(cls, images)) if files is not None: parts.extend(files) From 234c3dc85f7e61a537bbf6d8999c5880c5e0b746 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Wed, 26 Nov 2025 11:58:08 -0800 Subject: [PATCH 0936/1073] Bump frontend to 1.32.9 (#10867) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5f20816d6..9291552d3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.30.6 +comfyui-frontend-package==1.32.9 comfyui-workflow-templates==0.7.20 comfyui-embedded-docs==0.3.1 torch From 58c6ed541d5aaf6d9b12f63bc23c33164e1cf7a3 Mon Sep 17 00:00:00 2001 From: Terry Jia Date: Wed, 26 Nov 2025 14:58:27 -0500 Subject: [PATCH 0937/1073] Merge 3d animation node (#10025) --- comfy_extras/nodes_load_3d.py | 110 +++++++--------------------------- 1 file changed, 23 insertions(+), 87 deletions(-) diff --git a/comfy_extras/nodes_load_3d.py b/comfy_extras/nodes_load_3d.py index 899608149..54c66ef68 100644 --- a/comfy_extras/nodes_load_3d.py +++ b/comfy_extras/nodes_load_3d.py @@ -7,6 +7,10 @@ from comfy_api.input_impl import VideoFromFile from pathlib import Path +from PIL import Image +import numpy as np + +import uuid def normalize_path(path): return path.replace('\\', '/') @@ -34,58 +38,6 @@ class Load3D(): "height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), }} - RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "IMAGE", "LOAD3D_CAMERA", IO.VIDEO) - RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "lineart", "camera_info", "recording_video") - - FUNCTION = "process" - EXPERIMENTAL = True - - CATEGORY = "3d" - - def process(self, model_file, image, **kwargs): - image_path = folder_paths.get_annotated_filepath(image['image']) - mask_path = folder_paths.get_annotated_filepath(image['mask']) - normal_path = folder_paths.get_annotated_filepath(image['normal']) - lineart_path = folder_paths.get_annotated_filepath(image['lineart']) - - load_image_node = nodes.LoadImage() - output_image, ignore_mask = load_image_node.load_image(image=image_path) - ignore_image, output_mask = load_image_node.load_image(image=mask_path) - normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path) - lineart_image, ignore_mask3 = load_image_node.load_image(image=lineart_path) - - video = None - - if image['recording'] != "": - recording_video_path = folder_paths.get_annotated_filepath(image['recording']) - - video = VideoFromFile(recording_video_path) - - return output_image, output_mask, model_file, normal_image, lineart_image, image['camera_info'], video - -class Load3DAnimation(): - @classmethod - def INPUT_TYPES(s): - input_dir = os.path.join(folder_paths.get_input_directory(), "3d") - - os.makedirs(input_dir, exist_ok=True) - - input_path = Path(input_dir) - base_path = Path(folder_paths.get_input_directory()) - - files = [ - normalize_path(str(file_path.relative_to(base_path))) - for file_path in input_path.rglob("*") - if file_path.suffix.lower() in {'.gltf', '.glb', '.fbx'} - ] - - return {"required": { - "model_file": (sorted(files), {"file_upload": True}), - "image": ("LOAD_3D_ANIMATION", {}), - "width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), - "height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), - }} - RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "LOAD3D_CAMERA", IO.VIDEO) RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "camera_info", "recording_video") @@ -120,7 +72,8 @@ class Preview3D(): "model_file": ("STRING", {"default": "", "multiline": False}), }, "optional": { - "camera_info": ("LOAD3D_CAMERA", {}) + "camera_info": ("LOAD3D_CAMERA", {}), + "bg_image": ("IMAGE", {}) }} OUTPUT_NODE = True @@ -133,50 +86,33 @@ class Preview3D(): def process(self, model_file, **kwargs): camera_info = kwargs.get("camera_info", None) + bg_image = kwargs.get("bg_image", None) + + bg_image_path = None + if bg_image is not None: + + img_array = (bg_image[0].cpu().numpy() * 255).astype(np.uint8) + img = Image.fromarray(img_array) + + temp_dir = folder_paths.get_temp_directory() + filename = f"bg_{uuid.uuid4().hex}.png" + bg_image_path = os.path.join(temp_dir, filename) + img.save(bg_image_path, compress_level=1) + + bg_image_path = f"temp/{filename}" return { "ui": { - "result": [model_file, camera_info] - } - } - -class Preview3DAnimation(): - @classmethod - def INPUT_TYPES(s): - return {"required": { - "model_file": ("STRING", {"default": "", "multiline": False}), - }, - "optional": { - "camera_info": ("LOAD3D_CAMERA", {}) - }} - - OUTPUT_NODE = True - RETURN_TYPES = () - - CATEGORY = "3d" - - FUNCTION = "process" - EXPERIMENTAL = True - - def process(self, model_file, **kwargs): - camera_info = kwargs.get("camera_info", None) - - return { - "ui": { - "result": [model_file, camera_info] + "result": [model_file, camera_info, bg_image_path] } } NODE_CLASS_MAPPINGS = { "Load3D": Load3D, - "Load3DAnimation": Load3DAnimation, "Preview3D": Preview3D, - "Preview3DAnimation": Preview3DAnimation } NODE_DISPLAY_NAME_MAPPINGS = { - "Load3D": "Load 3D", - "Load3DAnimation": "Load 3D - Animation", - "Preview3D": "Preview 3D", - "Preview3DAnimation": "Preview 3D - Animation" + "Load3D": "Load 3D & Animation", + "Preview3D": "Preview 3D & Animation", } From 55f654db3ddaf5a10ac6dbe79774c23c350d279d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 26 Nov 2025 12:16:40 -0800 Subject: [PATCH 0938/1073] Fix the CSP offline feature. (#10923) --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index 0fd2e49e3..fca5050bd 100644 --- a/server.py +++ b/server.py @@ -174,7 +174,7 @@ def create_block_external_middleware(): else: response = await handler(request) - response.headers['Content-Security-Policy'] = "default-src 'self'; script-src 'self' 'unsafe-inline' blob:; style-src 'self' 'unsafe-inline'; img-src 'self' data: blob:; font-src 'self'; connect-src 'self'; frame-src 'self'; object-src 'self';" + response.headers['Content-Security-Policy'] = "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' blob:; style-src 'self' 'unsafe-inline'; img-src 'self' data: blob:; font-src 'self'; connect-src 'self'; frame-src 'self'; object-src 'self';" return response return block_external_middleware From dd41b745497cdbbafb0bd745f590726b0e41f9f3 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 26 Nov 2025 12:36:38 -0800 Subject: [PATCH 0939/1073] Add Z Image to readme. (#10924) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index b9300ab07..91fb510e1 100644 --- a/README.md +++ b/README.md @@ -68,6 +68,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Qwen Image](https://comfyanonymous.github.io/ComfyUI_examples/qwen_image/) - [Hunyuan Image 2.1](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_image/) - [Flux 2](https://comfyanonymous.github.io/ComfyUI_examples/flux2/) + - [Z Image](https://comfyanonymous.github.io/ComfyUI_examples/z_image/) - Image Editing Models - [Omnigen 2](https://comfyanonymous.github.io/ComfyUI_examples/omnigen/) - [Flux Kontext](https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-kontext-image-editing-model) From d8433c63fdacef24f40da401b02ebba272bf1fbb Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 27 Nov 2025 00:42:01 +0200 Subject: [PATCH 0940/1073] chore(api-nodes): remove chat widgets from OpenAI/Gemini nodes (#10861) --- comfy_api_nodes/nodes_gemini.py | 77 +-------------------------------- comfy_api_nodes/nodes_openai.py | 46 ++++---------------- 2 files changed, 11 insertions(+), 112 deletions(-) diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 976d9c225..08f7b0f64 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -4,10 +4,7 @@ See: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/infer """ import base64 -import json import os -import time -import uuid from enum import Enum from io import BytesIO from typing import Literal @@ -43,7 +40,6 @@ from comfy_api_nodes.util import ( validate_string, video_to_base64_string, ) -from server import PromptServer GEMINI_BASE_ENDPOINT = "/proxy/vertexai/gemini" GEMINI_MAX_INPUT_FILE_SIZE = 20 * 1024 * 1024 # 20 MB @@ -384,29 +380,6 @@ class GeminiNode(IO.ComfyNode): ) output_text = get_text_from_response(response) - if output_text: - # Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button. - render_spec = { - "node_id": cls.hidden.unique_id, - "component": "ChatHistoryWidget", - "props": { - "history": json.dumps( - [ - { - "prompt": prompt, - "response": output_text, - "response_id": str(uuid.uuid4()), - "timestamp": time.time(), - } - ] - ), - }, - } - PromptServer.instance.send_sync( - "display_component", - render_spec, - ) - return IO.NodeOutput(output_text or "Empty response from Gemini model...") @@ -601,30 +574,7 @@ class GeminiImage(IO.ComfyNode): response_model=GeminiGenerateContentResponse, price_extractor=calculate_tokens_price, ) - - output_text = get_text_from_response(response) - if output_text: - render_spec = { - "node_id": cls.hidden.unique_id, - "component": "ChatHistoryWidget", - "props": { - "history": json.dumps( - [ - { - "prompt": prompt, - "response": output_text, - "response_id": str(uuid.uuid4()), - "timestamp": time.time(), - } - ] - ), - }, - } - PromptServer.instance.send_sync( - "display_component", - render_spec, - ) - return IO.NodeOutput(get_image_from_response(response), output_text) + return IO.NodeOutput(get_image_from_response(response), get_text_from_response(response)) class GeminiImage2(IO.ComfyNode): @@ -744,30 +694,7 @@ class GeminiImage2(IO.ComfyNode): response_model=GeminiGenerateContentResponse, price_extractor=calculate_tokens_price, ) - - output_text = get_text_from_response(response) - if output_text: - render_spec = { - "node_id": cls.hidden.unique_id, - "component": "ChatHistoryWidget", - "props": { - "history": json.dumps( - [ - { - "prompt": prompt, - "response": output_text, - "response_id": str(uuid.uuid4()), - "timestamp": time.time(), - } - ] - ), - }, - } - PromptServer.instance.send_sync( - "display_component", - render_spec, - ) - return IO.NodeOutput(get_image_from_response(response), output_text) + return IO.NodeOutput(get_image_from_response(response), get_text_from_response(response)) class GeminiExtension(ComfyExtension): diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index acf35d276..c8da5464b 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -1,15 +1,10 @@ from io import BytesIO -from typing import Optional, Union -import json import os -import time -import uuid from enum import Enum from inspect import cleandoc import numpy as np import torch from PIL import Image -from server import PromptServer import folder_paths import base64 from comfy_api.latest import IO, ComfyExtension @@ -587,11 +582,11 @@ class OpenAIChatNode(IO.ComfyNode): def create_input_message_contents( cls, prompt: str, - image: Optional[torch.Tensor] = None, - files: Optional[list[InputFileContent]] = None, + image: torch.Tensor | None = None, + files: list[InputFileContent] | None = None, ) -> InputMessageContentList: """Create a list of input message contents from prompt and optional image.""" - content_list: list[Union[InputContent, InputTextContent, InputImageContent, InputFileContent]] = [ + content_list: list[InputContent | InputTextContent | InputImageContent | InputFileContent] = [ InputTextContent(text=prompt, type="input_text"), ] if image is not None: @@ -617,9 +612,9 @@ class OpenAIChatNode(IO.ComfyNode): prompt: str, persist_context: bool = False, model: SupportedOpenAIModel = SupportedOpenAIModel.gpt_5.value, - images: Optional[torch.Tensor] = None, - files: Optional[list[InputFileContent]] = None, - advanced_options: Optional[CreateModelResponseProperties] = None, + images: torch.Tensor | None = None, + files: list[InputFileContent] | None = None, + advanced_options: CreateModelResponseProperties | None = None, ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) @@ -660,30 +655,7 @@ class OpenAIChatNode(IO.ComfyNode): status_extractor=lambda response: response.status, completed_statuses=["incomplete", "completed"] ) - output_text = cls.get_text_from_message_content(cls.get_message_content_from_response(result_response)) - - # Update history - render_spec = { - "node_id": cls.hidden.unique_id, - "component": "ChatHistoryWidget", - "props": { - "history": json.dumps( - [ - { - "prompt": prompt, - "response": output_text, - "response_id": str(uuid.uuid4()), - "timestamp": time.time(), - } - ] - ), - }, - } - PromptServer.instance.send_sync( - "display_component", - render_spec, - ) - return IO.NodeOutput(output_text) + return IO.NodeOutput(cls.get_text_from_message_content(cls.get_message_content_from_response(result_response))) class OpenAIInputFiles(IO.ComfyNode): @@ -790,8 +762,8 @@ class OpenAIChatConfig(IO.ComfyNode): def execute( cls, truncation: bool, - instructions: Optional[str] = None, - max_output_tokens: Optional[int] = None, + instructions: str | None = None, + max_output_tokens: int | None = None, ) -> IO.NodeOutput: """ Configure advanced options for the OpenAI Chat Node. From a2d60aad0f8e03657d501842460123f6eaaf6791 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 27 Nov 2025 00:55:31 +0200 Subject: [PATCH 0941/1073] convert nodes_customer_sampler.py to V3 schema (#10206) --- comfy_extras/nodes_custom_sampler.py | 1182 ++++++++++++++------------ 1 file changed, 633 insertions(+), 549 deletions(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index d011f433b..fbb080886 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -3,272 +3,312 @@ import comfy.samplers import comfy.sample from comfy.k_diffusion import sampling as k_diffusion_sampling from comfy.k_diffusion import sa_solver -from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict import latent_preview import torch import comfy.utils import node_helpers +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io -class BasicScheduler: +class BasicScheduler(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"model": ("MODEL",), - "scheduler": (comfy.samplers.SCHEDULER_NAMES, ), - "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), - "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/schedulers" + def define_schema(cls): + return io.Schema( + node_id="BasicScheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Model.Input("model"), + io.Combo.Input("scheduler", options=comfy.samplers.SCHEDULER_NAMES), + io.Int.Input("steps", default=20, min=1, max=10000), + io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01), + ], + outputs=[io.Sigmas.Output()] + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, model, scheduler, steps, denoise): + @classmethod + def execute(cls, model, scheduler, steps, denoise) -> io.NodeOutput: total_steps = steps if denoise < 1.0: if denoise <= 0.0: - return (torch.FloatTensor([]),) + return io.NodeOutput(torch.FloatTensor([])) total_steps = int(steps/denoise) sigmas = comfy.samplers.calculate_sigmas(model.get_model_object("model_sampling"), scheduler, total_steps).cpu() sigmas = sigmas[-(steps + 1):] - return (sigmas, ) + return io.NodeOutput(sigmas) + + get_sigmas = execute -class KarrasScheduler: +class KarrasScheduler(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), - "sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}), - "sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}), - "rho": ("FLOAT", {"default": 7.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/schedulers" + def define_schema(cls): + return io.Schema( + node_id="KarrasScheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Int.Input("steps", default=20, min=1, max=10000), + io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False), + io.Float.Input("sigma_min", default=0.0291675, min=0.0, max=5000.0, step=0.01, round=False), + io.Float.Input("rho", default=7.0, min=0.0, max=100.0, step=0.01, round=False), + ], + outputs=[io.Sigmas.Output()] + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, steps, sigma_max, sigma_min, rho): + @classmethod + def execute(cls, steps, sigma_max, sigma_min, rho) -> io.NodeOutput: sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho) - return (sigmas, ) + return io.NodeOutput(sigmas) -class ExponentialScheduler: + get_sigmas = execute + +class ExponentialScheduler(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), - "sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}), - "sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/schedulers" + def define_schema(cls): + return io.Schema( + node_id="ExponentialScheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Int.Input("steps", default=20, min=1, max=10000), + io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False), + io.Float.Input("sigma_min", default=0.0291675, min=0.0, max=5000.0, step=0.01, round=False), + ], + outputs=[io.Sigmas.Output()] + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, steps, sigma_max, sigma_min): + @classmethod + def execute(cls, steps, sigma_max, sigma_min) -> io.NodeOutput: sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=sigma_min, sigma_max=sigma_max) - return (sigmas, ) + return io.NodeOutput(sigmas) -class PolyexponentialScheduler: + get_sigmas = execute + +class PolyexponentialScheduler(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), - "sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}), - "sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}), - "rho": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/schedulers" + def define_schema(cls): + return io.Schema( + node_id="PolyexponentialScheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Int.Input("steps", default=20, min=1, max=10000), + io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False), + io.Float.Input("sigma_min", default=0.0291675, min=0.0, max=5000.0, step=0.01, round=False), + io.Float.Input("rho", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + ], + outputs=[io.Sigmas.Output()] + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, steps, sigma_max, sigma_min, rho): + @classmethod + def execute(cls, steps, sigma_max, sigma_min, rho) -> io.NodeOutput: sigmas = k_diffusion_sampling.get_sigmas_polyexponential(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho) - return (sigmas, ) + return io.NodeOutput(sigmas) -class LaplaceScheduler: + get_sigmas = execute + +class LaplaceScheduler(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), - "sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}), - "sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}), - "mu": ("FLOAT", {"default": 0.0, "min": -10.0, "max": 10.0, "step":0.1, "round": False}), - "beta": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 10.0, "step":0.1, "round": False}), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/schedulers" + def define_schema(cls): + return io.Schema( + node_id="LaplaceScheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Int.Input("steps", default=20, min=1, max=10000), + io.Float.Input("sigma_max", default=14.614642, min=0.0, max=5000.0, step=0.01, round=False), + io.Float.Input("sigma_min", default=0.0291675, min=0.0, max=5000.0, step=0.01, round=False), + io.Float.Input("mu", default=0.0, min=-10.0, max=10.0, step=0.1, round=False), + io.Float.Input("beta", default=0.5, min=0.0, max=10.0, step=0.1, round=False), + ], + outputs=[io.Sigmas.Output()] + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, steps, sigma_max, sigma_min, mu, beta): + @classmethod + def execute(cls, steps, sigma_max, sigma_min, mu, beta) -> io.NodeOutput: sigmas = k_diffusion_sampling.get_sigmas_laplace(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, mu=mu, beta=beta) - return (sigmas, ) + return io.NodeOutput(sigmas) + + get_sigmas = execute -class SDTurboScheduler: +class SDTurboScheduler(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"model": ("MODEL",), - "steps": ("INT", {"default": 1, "min": 1, "max": 10}), - "denoise": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/schedulers" + def define_schema(cls): + return io.Schema( + node_id="SDTurboScheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Model.Input("model"), + io.Int.Input("steps", default=1, min=1, max=10), + io.Float.Input("denoise", default=1.0, min=0, max=1.0, step=0.01), + ], + outputs=[io.Sigmas.Output()] + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, model, steps, denoise): + @classmethod + def execute(cls, model, steps, denoise) -> io.NodeOutput: start_step = 10 - int(10 * denoise) timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[start_step:start_step + steps] sigmas = model.get_model_object("model_sampling").sigma(timesteps) sigmas = torch.cat([sigmas, sigmas.new_zeros([1])]) - return (sigmas, ) + return io.NodeOutput(sigmas) -class BetaSamplingScheduler: + get_sigmas = execute + +class BetaSamplingScheduler(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"model": ("MODEL",), - "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), - "alpha": ("FLOAT", {"default": 0.6, "min": 0.0, "max": 50.0, "step":0.01, "round": False}), - "beta": ("FLOAT", {"default": 0.6, "min": 0.0, "max": 50.0, "step":0.01, "round": False}), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/schedulers" + def define_schema(cls): + return io.Schema( + node_id="BetaSamplingScheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Model.Input("model"), + io.Int.Input("steps", default=20, min=1, max=10000), + io.Float.Input("alpha", default=0.6, min=0.0, max=50.0, step=0.01, round=False), + io.Float.Input("beta", default=0.6, min=0.0, max=50.0, step=0.01, round=False), + ], + outputs=[io.Sigmas.Output()] + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, model, steps, alpha, beta): + @classmethod + def execute(cls, model, steps, alpha, beta) -> io.NodeOutput: sigmas = comfy.samplers.beta_scheduler(model.get_model_object("model_sampling"), steps, alpha=alpha, beta=beta) - return (sigmas, ) + return io.NodeOutput(sigmas) -class VPScheduler: + get_sigmas = execute + +class VPScheduler(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), - "beta_d": ("FLOAT", {"default": 19.9, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}), #TODO: fix default values - "beta_min": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 5000.0, "step":0.01, "round": False}), - "eps_s": ("FLOAT", {"default": 0.001, "min": 0.0, "max": 1.0, "step":0.0001, "round": False}), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/schedulers" + def define_schema(cls): + return io.Schema( + node_id="VPScheduler", + category="sampling/custom_sampling/schedulers", + inputs=[ + io.Int.Input("steps", default=20, min=1, max=10000), + io.Float.Input("beta_d", default=19.9, min=0.0, max=5000.0, step=0.01, round=False), #TODO: fix default values + io.Float.Input("beta_min", default=0.1, min=0.0, max=5000.0, step=0.01, round=False), + io.Float.Input("eps_s", default=0.001, min=0.0, max=1.0, step=0.0001, round=False), + ], + outputs=[io.Sigmas.Output()] + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, steps, beta_d, beta_min, eps_s): + @classmethod + def execute(cls, steps, beta_d, beta_min, eps_s) -> io.NodeOutput: sigmas = k_diffusion_sampling.get_sigmas_vp(n=steps, beta_d=beta_d, beta_min=beta_min, eps_s=eps_s) - return (sigmas, ) + return io.NodeOutput(sigmas) -class SplitSigmas: + get_sigmas = execute + +class SplitSigmas(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"sigmas": ("SIGMAS", ), - "step": ("INT", {"default": 0, "min": 0, "max": 10000}), - } - } - RETURN_TYPES = ("SIGMAS","SIGMAS") - RETURN_NAMES = ("high_sigmas", "low_sigmas") - CATEGORY = "sampling/custom_sampling/sigmas" + def define_schema(cls): + return io.Schema( + node_id="SplitSigmas", + category="sampling/custom_sampling/sigmas", + inputs=[ + io.Sigmas.Input("sigmas"), + io.Int.Input("step", default=0, min=0, max=10000), + ], + outputs=[ + io.Sigmas.Output(display_name="high_sigmas"), + io.Sigmas.Output(display_name="low_sigmas"), + ] + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, sigmas, step): + @classmethod + def execute(cls, sigmas, step) -> io.NodeOutput: sigmas1 = sigmas[:step + 1] sigmas2 = sigmas[step:] - return (sigmas1, sigmas2) + return io.NodeOutput(sigmas1, sigmas2) -class SplitSigmasDenoise: + get_sigmas = execute + +class SplitSigmasDenoise(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"sigmas": ("SIGMAS", ), - "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - } - } - RETURN_TYPES = ("SIGMAS","SIGMAS") - RETURN_NAMES = ("high_sigmas", "low_sigmas") - CATEGORY = "sampling/custom_sampling/sigmas" + def define_schema(cls): + return io.Schema( + node_id="SplitSigmasDenoise", + category="sampling/custom_sampling/sigmas", + inputs=[ + io.Sigmas.Input("sigmas"), + io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01), + ], + outputs=[ + io.Sigmas.Output(display_name="high_sigmas"), + io.Sigmas.Output(display_name="low_sigmas"), + ] + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, sigmas, denoise): + @classmethod + def execute(cls, sigmas, denoise) -> io.NodeOutput: steps = max(sigmas.shape[-1] - 1, 0) total_steps = round(steps * denoise) sigmas1 = sigmas[:-(total_steps)] sigmas2 = sigmas[-(total_steps + 1):] - return (sigmas1, sigmas2) + return io.NodeOutput(sigmas1, sigmas2) -class FlipSigmas: + get_sigmas = execute + +class FlipSigmas(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"sigmas": ("SIGMAS", ), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/sigmas" + def define_schema(cls): + return io.Schema( + node_id="FlipSigmas", + category="sampling/custom_sampling/sigmas", + inputs=[io.Sigmas.Input("sigmas")], + outputs=[io.Sigmas.Output()] + ) - FUNCTION = "get_sigmas" - - def get_sigmas(self, sigmas): + @classmethod + def execute(cls, sigmas) -> io.NodeOutput: if len(sigmas) == 0: - return (sigmas,) + return io.NodeOutput(sigmas) sigmas = sigmas.flip(0) if sigmas[0] == 0: sigmas[0] = 0.0001 - return (sigmas,) + return io.NodeOutput(sigmas) -class SetFirstSigma: + get_sigmas = execute + +class SetFirstSigma(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"sigmas": ("SIGMAS", ), - "sigma": ("FLOAT", {"default": 136.0, "min": 0.0, "max": 20000.0, "step": 0.001, "round": False}), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/sigmas" + def define_schema(cls): + return io.Schema( + node_id="SetFirstSigma", + category="sampling/custom_sampling/sigmas", + inputs=[ + io.Sigmas.Input("sigmas"), + io.Float.Input("sigma", default=136.0, min=0.0, max=20000.0, step=0.001, round=False), + ], + outputs=[io.Sigmas.Output()] + ) - FUNCTION = "set_first_sigma" - - def set_first_sigma(self, sigmas, sigma): + @classmethod + def execute(cls, sigmas, sigma) -> io.NodeOutput: sigmas = sigmas.clone() sigmas[0] = sigma - return (sigmas, ) + return io.NodeOutput(sigmas) -class ExtendIntermediateSigmas: + set_first_sigma = execute + +class ExtendIntermediateSigmas(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"sigmas": ("SIGMAS", ), - "steps": ("INT", {"default": 2, "min": 1, "max": 100}), - "start_at_sigma": ("FLOAT", {"default": -1.0, "min": -1.0, "max": 20000.0, "step": 0.01, "round": False}), - "end_at_sigma": ("FLOAT", {"default": 12.0, "min": 0.0, "max": 20000.0, "step": 0.01, "round": False}), - "spacing": (['linear', 'cosine', 'sine'],), - } - } - RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling/sigmas" + def define_schema(cls): + return io.Schema( + node_id="ExtendIntermediateSigmas", + category="sampling/custom_sampling/sigmas", + inputs=[ + io.Sigmas.Input("sigmas"), + io.Int.Input("steps", default=2, min=1, max=100), + io.Float.Input("start_at_sigma", default=-1.0, min=-1.0, max=20000.0, step=0.01, round=False), + io.Float.Input("end_at_sigma", default=12.0, min=0.0, max=20000.0, step=0.01, round=False), + io.Combo.Input("spacing", options=['linear', 'cosine', 'sine']), + ], + outputs=[io.Sigmas.Output()] + ) - FUNCTION = "extend" - - def extend(self, sigmas: torch.Tensor, steps: int, start_at_sigma: float, end_at_sigma: float, spacing: str): + @classmethod + def execute(cls, sigmas: torch.Tensor, steps: int, start_at_sigma: float, end_at_sigma: float, spacing: str) -> io.NodeOutput: if start_at_sigma < 0: start_at_sigma = float("inf") @@ -299,27 +339,27 @@ class ExtendIntermediateSigmas: extended_sigmas = torch.FloatTensor(extended_sigmas) - return (extended_sigmas,) + return io.NodeOutput(extended_sigmas) + + extend = execute -class SamplingPercentToSigma: +class SamplingPercentToSigma(io.ComfyNode): @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "model": (IO.MODEL, {}), - "sampling_percent": (IO.FLOAT, {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.0001}), - "return_actual_sigma": (IO.BOOLEAN, {"default": False, "tooltip": "Return the actual sigma value instead of the value used for interval checks.\nThis only affects results at 0.0 and 1.0."}), - } - } + def define_schema(cls): + return io.Schema( + node_id="SamplingPercentToSigma", + category="sampling/custom_sampling/sigmas", + inputs=[ + io.Model.Input("model"), + io.Float.Input("sampling_percent", default=0.0, min=0.0, max=1.0, step=0.0001), + io.Boolean.Input("return_actual_sigma", default=False, tooltip="Return the actual sigma value instead of the value used for interval checks.\nThis only affects results at 0.0 and 1.0."), + ], + outputs=[io.Float.Output(display_name="sigma_value")] + ) - RETURN_TYPES = (IO.FLOAT,) - RETURN_NAMES = ("sigma_value",) - CATEGORY = "sampling/custom_sampling/sigmas" - - FUNCTION = "get_sigma" - - def get_sigma(self, model, sampling_percent, return_actual_sigma): + @classmethod + def execute(cls, model, sampling_percent, return_actual_sigma) -> io.NodeOutput: model_sampling = model.get_model_object("model_sampling") sigma_val = model_sampling.percent_to_sigma(sampling_percent) if return_actual_sigma: @@ -327,212 +367,234 @@ class SamplingPercentToSigma: sigma_val = model_sampling.sigma_max.item() elif sampling_percent == 1.0: sigma_val = model_sampling.sigma_min.item() - return (sigma_val,) + return io.NodeOutput(sigma_val) + + get_sigma = execute -class KSamplerSelect: +class KSamplerSelect(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"sampler_name": (comfy.samplers.SAMPLER_NAMES, ), - } - } - RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling/samplers" + def define_schema(cls): + return io.Schema( + node_id="KSamplerSelect", + category="sampling/custom_sampling/samplers", + inputs=[io.Combo.Input("sampler_name", options=comfy.samplers.SAMPLER_NAMES)], + outputs=[io.Sampler.Output()] + ) - FUNCTION = "get_sampler" - - def get_sampler(self, sampler_name): + @classmethod + def execute(cls, sampler_name) -> io.NodeOutput: sampler = comfy.samplers.sampler_object(sampler_name) - return (sampler, ) + return io.NodeOutput(sampler) -class SamplerDPMPP_3M_SDE: + get_sampler = execute + +class SamplerDPMPP_3M_SDE(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "noise_device": (['gpu', 'cpu'], ), - } - } - RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling/samplers" + def define_schema(cls): + return io.Schema( + node_id="SamplerDPMPP_3M_SDE", + category="sampling/custom_sampling/samplers", + inputs=[ + io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + io.Combo.Input("noise_device", options=['gpu', 'cpu']), + ], + outputs=[io.Sampler.Output()] + ) - FUNCTION = "get_sampler" - - def get_sampler(self, eta, s_noise, noise_device): + @classmethod + def execute(cls, eta, s_noise, noise_device) -> io.NodeOutput: if noise_device == 'cpu': sampler_name = "dpmpp_3m_sde" else: sampler_name = "dpmpp_3m_sde_gpu" sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise}) - return (sampler, ) + return io.NodeOutput(sampler) -class SamplerDPMPP_2M_SDE: + get_sampler = execute + +class SamplerDPMPP_2M_SDE(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"solver_type": (['midpoint', 'heun'], ), - "eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "noise_device": (['gpu', 'cpu'], ), - } - } - RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling/samplers" + def define_schema(cls): + return io.Schema( + node_id="SamplerDPMPP_2M_SDE", + category="sampling/custom_sampling/samplers", + inputs=[ + io.Combo.Input("solver_type", options=['midpoint', 'heun']), + io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + io.Combo.Input("noise_device", options=['gpu', 'cpu']), + ], + outputs=[io.Sampler.Output()] + ) - FUNCTION = "get_sampler" - - def get_sampler(self, solver_type, eta, s_noise, noise_device): + @classmethod + def execute(cls, solver_type, eta, s_noise, noise_device) -> io.NodeOutput: if noise_device == 'cpu': sampler_name = "dpmpp_2m_sde" else: sampler_name = "dpmpp_2m_sde_gpu" sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "solver_type": solver_type}) - return (sampler, ) + return io.NodeOutput(sampler) + + get_sampler = execute -class SamplerDPMPP_SDE: +class SamplerDPMPP_SDE(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "r": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "noise_device": (['gpu', 'cpu'], ), - } - } - RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling/samplers" + def define_schema(cls): + return io.Schema( + node_id="SamplerDPMPP_SDE", + category="sampling/custom_sampling/samplers", + inputs=[ + io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("r", default=0.5, min=0.0, max=100.0, step=0.01, round=False), + io.Combo.Input("noise_device", options=['gpu', 'cpu']), + ], + outputs=[io.Sampler.Output()] + ) - FUNCTION = "get_sampler" - - def get_sampler(self, eta, s_noise, r, noise_device): + @classmethod + def execute(cls, eta, s_noise, r, noise_device) -> io.NodeOutput: if noise_device == 'cpu': sampler_name = "dpmpp_sde" else: sampler_name = "dpmpp_sde_gpu" sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "r": r}) - return (sampler, ) + return io.NodeOutput(sampler) -class SamplerDPMPP_2S_Ancestral: + get_sampler = execute + +class SamplerDPMPP_2S_Ancestral(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - } - } - RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling/samplers" + def define_schema(cls): + return io.Schema( + node_id="SamplerDPMPP_2S_Ancestral", + category="sampling/custom_sampling/samplers", + inputs=[ + io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + ], + outputs=[io.Sampler.Output()] + ) - FUNCTION = "get_sampler" - - def get_sampler(self, eta, s_noise): + @classmethod + def execute(cls, eta, s_noise) -> io.NodeOutput: sampler = comfy.samplers.ksampler("dpmpp_2s_ancestral", {"eta": eta, "s_noise": s_noise}) - return (sampler, ) + return io.NodeOutput(sampler) -class SamplerEulerAncestral: + get_sampler = execute + +class SamplerEulerAncestral(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - } - } - RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling/samplers" + def define_schema(cls): + return io.Schema( + node_id="SamplerEulerAncestral", + category="sampling/custom_sampling/samplers", + inputs=[ + io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + ], + outputs=[io.Sampler.Output()] + ) - FUNCTION = "get_sampler" - - def get_sampler(self, eta, s_noise): + @classmethod + def execute(cls, eta, s_noise) -> io.NodeOutput: sampler = comfy.samplers.ksampler("euler_ancestral", {"eta": eta, "s_noise": s_noise}) - return (sampler, ) + return io.NodeOutput(sampler) -class SamplerEulerAncestralCFGPP: + get_sampler = execute + +class SamplerEulerAncestralCFGPP(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step":0.01, "round": False}), - "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step":0.01, "round": False}), - }} - RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling/samplers" + def define_schema(cls): + return io.Schema( + node_id="SamplerEulerAncestralCFGPP", + display_name="SamplerEulerAncestralCFG++", + category="sampling/custom_sampling/samplers", + inputs=[ + io.Float.Input("eta", default=1.0, min=0.0, max=1.0, step=0.01, round=False), + io.Float.Input("s_noise", default=1.0, min=0.0, max=10.0, step=0.01, round=False), + ], + outputs=[io.Sampler.Output()] + ) - FUNCTION = "get_sampler" - - def get_sampler(self, eta, s_noise): + @classmethod + def execute(cls, eta, s_noise) -> io.NodeOutput: sampler = comfy.samplers.ksampler( "euler_ancestral_cfg_pp", {"eta": eta, "s_noise": s_noise}) - return (sampler, ) + return io.NodeOutput(sampler) -class SamplerLMS: + get_sampler = execute + +class SamplerLMS(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"order": ("INT", {"default": 4, "min": 1, "max": 100}), - } - } - RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling/samplers" + def define_schema(cls): + return io.Schema( + node_id="SamplerLMS", + category="sampling/custom_sampling/samplers", + inputs=[io.Int.Input("order", default=4, min=1, max=100)], + outputs=[io.Sampler.Output()] + ) - FUNCTION = "get_sampler" - - def get_sampler(self, order): + @classmethod + def execute(cls, order) -> io.NodeOutput: sampler = comfy.samplers.ksampler("lms", {"order": order}) - return (sampler, ) + return io.NodeOutput(sampler) -class SamplerDPMAdaptative: + get_sampler = execute + +class SamplerDPMAdaptative(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"order": ("INT", {"default": 3, "min": 2, "max": 3}), - "rtol": ("FLOAT", {"default": 0.05, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "atol": ("FLOAT", {"default": 0.0078, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "h_init": ("FLOAT", {"default": 0.05, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "pcoeff": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "icoeff": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "dcoeff": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "accept_safety": ("FLOAT", {"default": 0.81, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "eta": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), - } - } - RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling/samplers" + def define_schema(cls): + return io.Schema( + node_id="SamplerDPMAdaptative", + category="sampling/custom_sampling/samplers", + inputs=[ + io.Int.Input("order", default=3, min=2, max=3), + io.Float.Input("rtol", default=0.05, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("atol", default=0.0078, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("h_init", default=0.05, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("pcoeff", default=0.0, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("icoeff", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("dcoeff", default=0.0, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("accept_safety", default=0.81, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("eta", default=0.0, min=0.0, max=100.0, step=0.01, round=False), + io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + ], + outputs=[io.Sampler.Output()] + ) - FUNCTION = "get_sampler" - - def get_sampler(self, order, rtol, atol, h_init, pcoeff, icoeff, dcoeff, accept_safety, eta, s_noise): + @classmethod + def execute(cls, order, rtol, atol, h_init, pcoeff, icoeff, dcoeff, accept_safety, eta, s_noise) -> io.NodeOutput: sampler = comfy.samplers.ksampler("dpm_adaptive", {"order": order, "rtol": rtol, "atol": atol, "h_init": h_init, "pcoeff": pcoeff, "icoeff": icoeff, "dcoeff": dcoeff, "accept_safety": accept_safety, "eta": eta, "s_noise":s_noise }) - return (sampler, ) + return io.NodeOutput(sampler) + + get_sampler = execute -class SamplerER_SDE(ComfyNodeABC): +class SamplerER_SDE(io.ComfyNode): @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "solver_type": (IO.COMBO, {"options": ["ER-SDE", "Reverse-time SDE", "ODE"]}), - "max_stage": (IO.INT, {"default": 3, "min": 1, "max": 3}), - "eta": ( - IO.FLOAT, - {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": False, "tooltip": "Stochastic strength of reverse-time SDE.\nWhen eta=0, it reduces to deterministic ODE. This setting doesn't apply to ER-SDE solver type."}, - ), - "s_noise": (IO.FLOAT, {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": False}), - } - } + def define_schema(cls): + return io.Schema( + node_id="SamplerER_SDE", + category="sampling/custom_sampling/samplers", + inputs=[ + io.Combo.Input("solver_type", options=["ER-SDE", "Reverse-time SDE", "ODE"]), + io.Int.Input("max_stage", default=3, min=1, max=3), + io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False, tooltip="Stochastic strength of reverse-time SDE.\nWhen eta=0, it reduces to deterministic ODE. This setting doesn't apply to ER-SDE solver type."), + io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + ], + outputs=[io.Sampler.Output()] + ) - RETURN_TYPES = (IO.SAMPLER,) - CATEGORY = "sampling/custom_sampling/samplers" - - FUNCTION = "get_sampler" - - def get_sampler(self, solver_type, max_stage, eta, s_noise): + @classmethod + def execute(cls, solver_type, max_stage, eta, s_noise) -> io.NodeOutput: if solver_type == "ODE" or (solver_type == "Reverse-time SDE" and eta == 0): eta = 0 s_noise = 0 @@ -548,32 +610,33 @@ class SamplerER_SDE(ComfyNodeABC): sampler_name = "er_sde" sampler = comfy.samplers.ksampler(sampler_name, {"s_noise": s_noise, "noise_scaler": noise_scaler, "max_stage": max_stage}) - return (sampler,) + return io.NodeOutput(sampler) + + get_sampler = execute -class SamplerSASolver(ComfyNodeABC): +class SamplerSASolver(io.ComfyNode): @classmethod - def INPUT_TYPES(cls) -> InputTypeDict: - return { - "required": { - "model": (IO.MODEL, {}), - "eta": (IO.FLOAT, {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": False},), - "sde_start_percent": (IO.FLOAT, {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001},), - "sde_end_percent": (IO.FLOAT, {"default": 0.8, "min": 0.0, "max": 1.0, "step": 0.001},), - "s_noise": (IO.FLOAT, {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": False},), - "predictor_order": (IO.INT, {"default": 3, "min": 1, "max": 6}), - "corrector_order": (IO.INT, {"default": 4, "min": 0, "max": 6}), - "use_pece": (IO.BOOLEAN, {}), - "simple_order_2": (IO.BOOLEAN, {}), - } - } + def define_schema(cls): + return io.Schema( + node_id="SamplerSASolver", + category="sampling/custom_sampling/samplers", + inputs=[ + io.Model.Input("model"), + io.Float.Input("eta", default=1.0, min=0.0, max=10.0, step=0.01, round=False), + io.Float.Input("sde_start_percent", default=0.2, min=0.0, max=1.0, step=0.001), + io.Float.Input("sde_end_percent", default=0.8, min=0.0, max=1.0, step=0.001), + io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False), + io.Int.Input("predictor_order", default=3, min=1, max=6), + io.Int.Input("corrector_order", default=4, min=0, max=6), + io.Boolean.Input("use_pece"), + io.Boolean.Input("simple_order_2"), + ], + outputs=[io.Sampler.Output()] + ) - RETURN_TYPES = (IO.SAMPLER,) - CATEGORY = "sampling/custom_sampling/samplers" - - FUNCTION = "get_sampler" - - def get_sampler(self, model, eta, sde_start_percent, sde_end_percent, s_noise, predictor_order, corrector_order, use_pece, simple_order_2): + @classmethod + def execute(cls, model, eta, sde_start_percent, sde_end_percent, s_noise, predictor_order, corrector_order, use_pece, simple_order_2) -> io.NodeOutput: model_sampling = model.get_model_object("model_sampling") start_sigma = model_sampling.percent_to_sigma(sde_start_percent) end_sigma = model_sampling.percent_to_sigma(sde_end_percent) @@ -591,7 +654,9 @@ class SamplerSASolver(ComfyNodeABC): "simple_order_2": simple_order_2, }, ) - return (sampler,) + return io.NodeOutput(sampler) + + get_sampler = execute class Noise_EmptyNoise: @@ -612,30 +677,31 @@ class Noise_RandomNoise: batch_inds = input_latent["batch_index"] if "batch_index" in input_latent else None return comfy.sample.prepare_noise(latent_image, self.seed, batch_inds) -class SamplerCustom: +class SamplerCustom(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"model": ("MODEL",), - "add_noise": ("BOOLEAN", {"default": True}), - "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "control_after_generate": True}), - "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), - "positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "sampler": ("SAMPLER", ), - "sigmas": ("SIGMAS", ), - "latent_image": ("LATENT", ), - } - } + def define_schema(cls): + return io.Schema( + node_id="SamplerCustom", + category="sampling/custom_sampling", + inputs=[ + io.Model.Input("model"), + io.Boolean.Input("add_noise", default=True), + io.Int.Input("noise_seed", default=0, min=0, max=0xffffffffffffffff, control_after_generate=True), + io.Float.Input("cfg", default=8.0, min=0.0, max=100.0, step=0.1, round=0.01), + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Sampler.Input("sampler"), + io.Sigmas.Input("sigmas"), + io.Latent.Input("latent_image"), + ], + outputs=[ + io.Latent.Output(display_name="output"), + io.Latent.Output(display_name="denoised_output"), + ] + ) - RETURN_TYPES = ("LATENT","LATENT") - RETURN_NAMES = ("output", "denoised_output") - - FUNCTION = "sample" - - CATEGORY = "sampling/custom_sampling" - - def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, sigmas, latent_image): + @classmethod + def execute(cls, model, add_noise, noise_seed, cfg, positive, negative, sampler, sigmas, latent_image) -> io.NodeOutput: latent = latent_image latent_image = latent["samples"] latent = latent.copy() @@ -664,52 +730,58 @@ class SamplerCustom: out_denoised["samples"] = model.model.process_latent_out(x0_output["x0"].cpu()) else: out_denoised = out - return (out, out_denoised) + return io.NodeOutput(out, out_denoised) + + sample = execute class Guider_Basic(comfy.samplers.CFGGuider): def set_conds(self, positive): self.inner_set_conds({"positive": positive}) -class BasicGuider: +class BasicGuider(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"model": ("MODEL",), - "conditioning": ("CONDITIONING", ), - } - } + def define_schema(cls): + return io.Schema( + node_id="BasicGuider", + category="sampling/custom_sampling/guiders", + inputs=[ + io.Model.Input("model"), + io.Conditioning.Input("conditioning"), + ], + outputs=[io.Guider.Output()] + ) - RETURN_TYPES = ("GUIDER",) - - FUNCTION = "get_guider" - CATEGORY = "sampling/custom_sampling/guiders" - - def get_guider(self, model, conditioning): + @classmethod + def execute(cls, model, conditioning) -> io.NodeOutput: guider = Guider_Basic(model) guider.set_conds(conditioning) - return (guider,) + return io.NodeOutput(guider) -class CFGGuider: + get_guider = execute + +class CFGGuider(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"model": ("MODEL",), - "positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), - } - } + def define_schema(cls): + return io.Schema( + node_id="CFGGuider", + category="sampling/custom_sampling/guiders", + inputs=[ + io.Model.Input("model"), + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Float.Input("cfg", default=8.0, min=0.0, max=100.0, step=0.1, round=0.01), + ], + outputs=[io.Guider.Output()] + ) - RETURN_TYPES = ("GUIDER",) - - FUNCTION = "get_guider" - CATEGORY = "sampling/custom_sampling/guiders" - - def get_guider(self, model, positive, negative, cfg): + @classmethod + def execute(cls, model, positive, negative, cfg) -> io.NodeOutput: guider = comfy.samplers.CFGGuider(model) guider.set_conds(positive, negative) guider.set_cfg(cfg) - return (guider,) + return io.NodeOutput(guider) + + get_guider = execute class Guider_DualCFG(comfy.samplers.CFGGuider): def set_cfg(self, cfg1, cfg2, nested=False): @@ -740,84 +812,88 @@ class Guider_DualCFG(comfy.samplers.CFGGuider): out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, middle_cond, positive_cond], x, timestep, model_options) return comfy.samplers.cfg_function(self.inner_model, out[1], out[0], self.cfg2, x, timestep, model_options=model_options, cond=middle_cond, uncond=negative_cond) + (out[2] - out[1]) * self.cfg1 -class DualCFGGuider: +class DualCFGGuider(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"model": ("MODEL",), - "cond1": ("CONDITIONING", ), - "cond2": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "cfg_conds": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), - "cfg_cond2_negative": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), - "style": (["regular", "nested"],), - } - } + def define_schema(cls): + return io.Schema( + node_id="DualCFGGuider", + category="sampling/custom_sampling/guiders", + inputs=[ + io.Model.Input("model"), + io.Conditioning.Input("cond1"), + io.Conditioning.Input("cond2"), + io.Conditioning.Input("negative"), + io.Float.Input("cfg_conds", default=8.0, min=0.0, max=100.0, step=0.1, round=0.01), + io.Float.Input("cfg_cond2_negative", default=8.0, min=0.0, max=100.0, step=0.1, round=0.01), + io.Combo.Input("style", options=["regular", "nested"]), + ], + outputs=[io.Guider.Output()] + ) - RETURN_TYPES = ("GUIDER",) - - FUNCTION = "get_guider" - CATEGORY = "sampling/custom_sampling/guiders" - - def get_guider(self, model, cond1, cond2, negative, cfg_conds, cfg_cond2_negative, style): + @classmethod + def execute(cls, model, cond1, cond2, negative, cfg_conds, cfg_cond2_negative, style) -> io.NodeOutput: guider = Guider_DualCFG(model) guider.set_conds(cond1, cond2, negative) guider.set_cfg(cfg_conds, cfg_cond2_negative, nested=(style == "nested")) - return (guider,) + return io.NodeOutput(guider) -class DisableNoise: + get_guider = execute + +class DisableNoise(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required":{ - } - } + def define_schema(cls): + return io.Schema( + node_id="DisableNoise", + category="sampling/custom_sampling/noise", + inputs=[], + outputs=[io.Noise.Output()] + ) - RETURN_TYPES = ("NOISE",) - FUNCTION = "get_noise" - CATEGORY = "sampling/custom_sampling/noise" - - def get_noise(self): - return (Noise_EmptyNoise(),) - - -class RandomNoise(DisableNoise): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "noise_seed": ("INT", { - "default": 0, - "min": 0, - "max": 0xffffffffffffffff, - "control_after_generate": True, - }), - } - } + def execute(cls) -> io.NodeOutput: + return io.NodeOutput(Noise_EmptyNoise()) - def get_noise(self, noise_seed): - return (Noise_RandomNoise(noise_seed),) + get_noise = execute -class SamplerCustomAdvanced: +class RandomNoise(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"noise": ("NOISE", ), - "guider": ("GUIDER", ), - "sampler": ("SAMPLER", ), - "sigmas": ("SIGMAS", ), - "latent_image": ("LATENT", ), - } - } + def define_schema(cls): + return io.Schema( + node_id="RandomNoise", + category="sampling/custom_sampling/noise", + inputs=[io.Int.Input("noise_seed", default=0, min=0, max=0xffffffffffffffff, control_after_generate=True)], + outputs=[io.Noise.Output()] + ) - RETURN_TYPES = ("LATENT","LATENT") - RETURN_NAMES = ("output", "denoised_output") + @classmethod + def execute(cls, noise_seed) -> io.NodeOutput: + return io.NodeOutput(Noise_RandomNoise(noise_seed)) - FUNCTION = "sample" + get_noise = execute - CATEGORY = "sampling/custom_sampling" - def sample(self, noise, guider, sampler, sigmas, latent_image): +class SamplerCustomAdvanced(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="SamplerCustomAdvanced", + category="sampling/custom_sampling", + inputs=[ + io.Noise.Input("noise"), + io.Guider.Input("guider"), + io.Sampler.Input("sampler"), + io.Sigmas.Input("sigmas"), + io.Latent.Input("latent_image"), + ], + outputs=[ + io.Latent.Output(display_name="output"), + io.Latent.Output(display_name="denoised_output"), + ] + ) + + @classmethod + def execute(cls, noise, guider, sampler, sigmas, latent_image) -> io.NodeOutput: latent = latent_image latent_image = latent["samples"] latent = latent.copy() @@ -842,28 +918,32 @@ class SamplerCustomAdvanced: out_denoised["samples"] = guider.model_patcher.model.process_latent_out(x0_output["x0"].cpu()) else: out_denoised = out - return (out, out_denoised) + return io.NodeOutput(out, out_denoised) -class AddNoise: + sample = execute + +class AddNoise(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": - {"model": ("MODEL",), - "noise": ("NOISE", ), - "sigmas": ("SIGMAS", ), - "latent_image": ("LATENT", ), - } - } + def define_schema(cls): + return io.Schema( + node_id="AddNoise", + category="_for_testing/custom_sampling/noise", + is_experimental=True, + inputs=[ + io.Model.Input("model"), + io.Noise.Input("noise"), + io.Sigmas.Input("sigmas"), + io.Latent.Input("latent_image"), + ], + outputs=[ + io.Latent.Output(), + ] + ) - RETURN_TYPES = ("LATENT",) - - FUNCTION = "add_noise" - - CATEGORY = "_for_testing/custom_sampling/noise" - - def add_noise(self, model, noise, sigmas, latent_image): + @classmethod + def execute(cls, model, noise, sigmas, latent_image) -> io.NodeOutput: if len(sigmas) == 0: - return latent_image + return io.NodeOutput(latent_image) latent = latent_image latent_image = latent["samples"] @@ -887,46 +967,50 @@ class AddNoise: out = latent.copy() out["samples"] = noisy - return (out,) + return io.NodeOutput(out) + + add_noise = execute -NODE_CLASS_MAPPINGS = { - "SamplerCustom": SamplerCustom, - "BasicScheduler": BasicScheduler, - "KarrasScheduler": KarrasScheduler, - "ExponentialScheduler": ExponentialScheduler, - "PolyexponentialScheduler": PolyexponentialScheduler, - "LaplaceScheduler": LaplaceScheduler, - "VPScheduler": VPScheduler, - "BetaSamplingScheduler": BetaSamplingScheduler, - "SDTurboScheduler": SDTurboScheduler, - "KSamplerSelect": KSamplerSelect, - "SamplerEulerAncestral": SamplerEulerAncestral, - "SamplerEulerAncestralCFGPP": SamplerEulerAncestralCFGPP, - "SamplerLMS": SamplerLMS, - "SamplerDPMPP_3M_SDE": SamplerDPMPP_3M_SDE, - "SamplerDPMPP_2M_SDE": SamplerDPMPP_2M_SDE, - "SamplerDPMPP_SDE": SamplerDPMPP_SDE, - "SamplerDPMPP_2S_Ancestral": SamplerDPMPP_2S_Ancestral, - "SamplerDPMAdaptative": SamplerDPMAdaptative, - "SamplerER_SDE": SamplerER_SDE, - "SamplerSASolver": SamplerSASolver, - "SplitSigmas": SplitSigmas, - "SplitSigmasDenoise": SplitSigmasDenoise, - "FlipSigmas": FlipSigmas, - "SetFirstSigma": SetFirstSigma, - "ExtendIntermediateSigmas": ExtendIntermediateSigmas, - "SamplingPercentToSigma": SamplingPercentToSigma, +class CustomSamplersExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + SamplerCustom, + BasicScheduler, + KarrasScheduler, + ExponentialScheduler, + PolyexponentialScheduler, + LaplaceScheduler, + VPScheduler, + BetaSamplingScheduler, + SDTurboScheduler, + KSamplerSelect, + SamplerEulerAncestral, + SamplerEulerAncestralCFGPP, + SamplerLMS, + SamplerDPMPP_3M_SDE, + SamplerDPMPP_2M_SDE, + SamplerDPMPP_SDE, + SamplerDPMPP_2S_Ancestral, + SamplerDPMAdaptative, + SamplerER_SDE, + SamplerSASolver, + SplitSigmas, + SplitSigmasDenoise, + FlipSigmas, + SetFirstSigma, + ExtendIntermediateSigmas, + SamplingPercentToSigma, + CFGGuider, + DualCFGGuider, + BasicGuider, + RandomNoise, + DisableNoise, + AddNoise, + SamplerCustomAdvanced, + ] - "CFGGuider": CFGGuider, - "DualCFGGuider": DualCFGGuider, - "BasicGuider": BasicGuider, - "RandomNoise": RandomNoise, - "DisableNoise": DisableNoise, - "AddNoise": AddNoise, - "SamplerCustomAdvanced": SamplerCustomAdvanced, -} -NODE_DISPLAY_NAME_MAPPINGS = { - "SamplerEulerAncestralCFGPP": "SamplerEulerAncestralCFG++", -} +async def comfy_entrypoint() -> CustomSamplersExtension: + return CustomSamplersExtension() From cc6a8dcd1ad9cc9ef7602ee141174a0cea0ed4ce Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Thu, 27 Nov 2025 08:18:08 +0800 Subject: [PATCH 0942/1073] Dataset Processing Nodes and Improved LoRA Trainer Nodes with multi resolution supports. (#10708) * Create nodes_dataset.py * Add encoded dataset caching mechanism * make training node to work with our dataset system * allow trainer node to get different resolution dataset * move all dataset related implementation to nodes_dataset * Rewrite dataset system with new io schema * Rewrite training system with new io schema * add ui pbar * Add outputs' id/name * Fix bad id/naming * use single process instead of input list when no need * fix wrong output_list flag * use torch.load/save and fix bad behaviors --- comfy_extras/nodes_dataset.py | 1532 +++++++++++++++++++++++++++++++++ comfy_extras/nodes_train.py | 967 ++++++++++----------- nodes.py | 1 + 3 files changed, 1980 insertions(+), 520 deletions(-) create mode 100644 comfy_extras/nodes_dataset.py diff --git a/comfy_extras/nodes_dataset.py b/comfy_extras/nodes_dataset.py new file mode 100644 index 000000000..b23867505 --- /dev/null +++ b/comfy_extras/nodes_dataset.py @@ -0,0 +1,1532 @@ +import logging +import os +import math +import json + +import numpy as np +import torch +from PIL import Image +from typing_extensions import override + +import folder_paths +import node_helpers +from comfy_api.latest import ComfyExtension, io + + +def load_and_process_images(image_files, input_dir): + """Utility function to load and process a list of images. + + Args: + image_files: List of image filenames + input_dir: Base directory containing the images + resize_method: How to handle images of different sizes ("None", "Stretch", "Crop", "Pad") + + Returns: + torch.Tensor: Batch of processed images + """ + if not image_files: + raise ValueError("No valid images found in input") + + output_images = [] + + for file in image_files: + image_path = os.path.join(input_dir, file) + img = node_helpers.pillow(Image.open, image_path) + + if img.mode == "I": + img = img.point(lambda i: i * (1 / 255)) + img = img.convert("RGB") + img_array = np.array(img).astype(np.float32) / 255.0 + img_tensor = torch.from_numpy(img_array)[None,] + output_images.append(img_tensor) + + return output_images + + +class LoadImageDataSetFromFolderNode(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LoadImageDataSetFromFolder", + display_name="Load Image Dataset from Folder", + category="dataset", + is_experimental=True, + inputs=[ + io.Combo.Input( + "folder", + options=folder_paths.get_input_subfolders(), + tooltip="The folder to load images from.", + ) + ], + outputs=[ + io.Image.Output( + display_name="images", + is_output_list=True, + tooltip="List of loaded images", + ) + ], + ) + + @classmethod + def execute(cls, folder): + sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder) + valid_extensions = [".png", ".jpg", ".jpeg", ".webp"] + image_files = [ + f + for f in os.listdir(sub_input_dir) + if any(f.lower().endswith(ext) for ext in valid_extensions) + ] + output_tensor = load_and_process_images(image_files, sub_input_dir) + return io.NodeOutput(output_tensor) + + +class LoadImageTextDataSetFromFolderNode(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LoadImageTextDataSetFromFolder", + display_name="Load Image and Text Dataset from Folder", + category="dataset", + is_experimental=True, + inputs=[ + io.Combo.Input( + "folder", + options=folder_paths.get_input_subfolders(), + tooltip="The folder to load images from.", + ) + ], + outputs=[ + io.Image.Output( + display_name="images", + is_output_list=True, + tooltip="List of loaded images", + ), + io.String.Output( + display_name="texts", + is_output_list=True, + tooltip="List of text captions", + ), + ], + ) + + @classmethod + def execute(cls, folder): + logging.info(f"Loading images from folder: {folder}") + + sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder) + valid_extensions = [".png", ".jpg", ".jpeg", ".webp"] + + image_files = [] + for item in os.listdir(sub_input_dir): + path = os.path.join(sub_input_dir, item) + if any(item.lower().endswith(ext) for ext in valid_extensions): + image_files.append(path) + elif os.path.isdir(path): + # Support kohya-ss/sd-scripts folder structure + repeat = 1 + if item.split("_")[0].isdigit(): + repeat = int(item.split("_")[0]) + image_files.extend( + [ + os.path.join(path, f) + for f in os.listdir(path) + if any(f.lower().endswith(ext) for ext in valid_extensions) + ] + * repeat + ) + + caption_file_path = [ + f.replace(os.path.splitext(f)[1], ".txt") for f in image_files + ] + captions = [] + for caption_file in caption_file_path: + caption_path = os.path.join(sub_input_dir, caption_file) + if os.path.exists(caption_path): + with open(caption_path, "r", encoding="utf-8") as f: + caption = f.read().strip() + captions.append(caption) + else: + captions.append("") + + output_tensor = load_and_process_images(image_files, sub_input_dir) + + logging.info(f"Loaded {len(output_tensor)} images from {sub_input_dir}.") + return io.NodeOutput(output_tensor, captions) + + +def save_images_to_folder(image_list, output_dir, prefix="image"): + """Utility function to save a list of image tensors to disk. + + Args: + image_list: List of image tensors (each [1, H, W, C] or [H, W, C] or [C, H, W]) + output_dir: Directory to save images to + prefix: Filename prefix + + Returns: + List of saved filenames + """ + os.makedirs(output_dir, exist_ok=True) + saved_files = [] + + for idx, img_tensor in enumerate(image_list): + # Handle different tensor shapes + if isinstance(img_tensor, torch.Tensor): + # Remove batch dimension if present [1, H, W, C] -> [H, W, C] + if img_tensor.dim() == 4 and img_tensor.shape[0] == 1: + img_tensor = img_tensor.squeeze(0) + + # If tensor is [C, H, W], permute to [H, W, C] + if img_tensor.dim() == 3 and img_tensor.shape[0] in [1, 3, 4]: + if ( + img_tensor.shape[0] <= 4 + and img_tensor.shape[1] > 4 + and img_tensor.shape[2] > 4 + ): + img_tensor = img_tensor.permute(1, 2, 0) + + # Convert to numpy and scale to 0-255 + img_array = img_tensor.cpu().numpy() + img_array = np.clip(img_array * 255.0, 0, 255).astype(np.uint8) + + # Convert to PIL Image + img = Image.fromarray(img_array) + else: + raise ValueError(f"Expected torch.Tensor, got {type(img_tensor)}") + + # Save image + filename = f"{prefix}_{idx:05d}.png" + filepath = os.path.join(output_dir, filename) + img.save(filepath) + saved_files.append(filename) + + return saved_files + + +class SaveImageDataSetToFolderNode(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="SaveImageDataSetToFolder", + display_name="Save Image Dataset to Folder", + category="dataset", + is_experimental=True, + is_output_node=True, + is_input_list=True, # Receive images as list + inputs=[ + io.Image.Input("images", tooltip="List of images to save."), + io.String.Input( + "folder_name", + default="dataset", + tooltip="Name of the folder to save images to (inside output directory).", + ), + io.String.Input( + "filename_prefix", + default="image", + tooltip="Prefix for saved image filenames.", + ), + ], + outputs=[], + ) + + @classmethod + def execute(cls, images, folder_name, filename_prefix): + # Extract scalar values + folder_name = folder_name[0] + filename_prefix = filename_prefix[0] + + output_dir = os.path.join(folder_paths.get_output_directory(), folder_name) + saved_files = save_images_to_folder(images, output_dir, filename_prefix) + + logging.info(f"Saved {len(saved_files)} images to {output_dir}.") + return io.NodeOutput() + + +class SaveImageTextDataSetToFolderNode(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="SaveImageTextDataSetToFolder", + display_name="Save Image and Text Dataset to Folder", + category="dataset", + is_experimental=True, + is_output_node=True, + is_input_list=True, # Receive both images and texts as lists + inputs=[ + io.Image.Input("images", tooltip="List of images to save."), + io.String.Input("texts", tooltip="List of text captions to save."), + io.String.Input( + "folder_name", + default="dataset", + tooltip="Name of the folder to save images to (inside output directory).", + ), + io.String.Input( + "filename_prefix", + default="image", + tooltip="Prefix for saved image filenames.", + ), + ], + outputs=[], + ) + + @classmethod + def execute(cls, images, texts, folder_name, filename_prefix): + # Extract scalar values + folder_name = folder_name[0] + filename_prefix = filename_prefix[0] + + output_dir = os.path.join(folder_paths.get_output_directory(), folder_name) + saved_files = save_images_to_folder(images, output_dir, filename_prefix) + + # Save captions + for idx, (filename, caption) in enumerate(zip(saved_files, texts)): + caption_filename = filename.replace(".png", ".txt") + caption_path = os.path.join(output_dir, caption_filename) + with open(caption_path, "w", encoding="utf-8") as f: + f.write(caption) + + logging.info(f"Saved {len(saved_files)} images and captions to {output_dir}.") + return io.NodeOutput() + + +# ========== Helper Functions for Transform Nodes ========== + + +def tensor_to_pil(img_tensor): + """Convert tensor to PIL Image.""" + if img_tensor.dim() == 4 and img_tensor.shape[0] == 1: + img_tensor = img_tensor.squeeze(0) + img_array = (img_tensor.cpu().numpy() * 255).clip(0, 255).astype(np.uint8) + return Image.fromarray(img_array) + + +def pil_to_tensor(img): + """Convert PIL Image to tensor.""" + img_array = np.array(img).astype(np.float32) / 255.0 + return torch.from_numpy(img_array)[None,] + + +# ========== Base Classes for Transform Nodes ========== + + +class ImageProcessingNode(io.ComfyNode): + """Base class for image processing nodes that operate on images. + + Child classes should set: + node_id: Unique node identifier (required) + display_name: Display name (optional, defaults to node_id) + description: Node description (optional) + extra_inputs: List of additional io.Input objects beyond "images" (optional) + is_group_process: None (auto-detect), True (group), or False (individual) (optional) + is_output_list: True (list output) or False (single output) (optional, default True) + + Child classes must implement ONE of: + _process(cls, image, **kwargs) -> tensor (for single-item processing) + _group_process(cls, images, **kwargs) -> list[tensor] (for group processing) + """ + + node_id = None + display_name = None + description = None + extra_inputs = [] + is_group_process = None # None = auto-detect, True/False = explicit + is_output_list = None # None = auto-detect based on processing mode + + @classmethod + def _detect_processing_mode(cls): + """Detect whether this node uses group or individual processing. + + Returns: + bool: True if group processing, False if individual processing + """ + # Explicit setting takes precedence + if cls.is_group_process is not None: + return cls.is_group_process + + # Check which method is overridden by looking at the defining class in MRO + base_class = ImageProcessingNode + + # Find which class in MRO defines _process + process_definer = None + for klass in cls.__mro__: + if "_process" in klass.__dict__: + process_definer = klass + break + + # Find which class in MRO defines _group_process + group_definer = None + for klass in cls.__mro__: + if "_group_process" in klass.__dict__: + group_definer = klass + break + + # Check what was overridden (not defined in base class) + has_process = process_definer is not None and process_definer is not base_class + has_group = group_definer is not None and group_definer is not base_class + + if has_process and has_group: + raise ValueError( + f"{cls.__name__}: Cannot override both _process and _group_process. " + "Override only one, or set is_group_process explicitly." + ) + if not has_process and not has_group: + raise ValueError( + f"{cls.__name__}: Must override either _process or _group_process" + ) + + return has_group + + @classmethod + def define_schema(cls): + if cls.node_id is None: + raise NotImplementedError(f"{cls.__name__} must set node_id class variable") + + is_group = cls._detect_processing_mode() + + # Auto-detect is_output_list if not explicitly set + # Single processing: False (backend collects results into list) + # Group processing: True by default (can be False for single-output nodes) + output_is_list = ( + cls.is_output_list if cls.is_output_list is not None else is_group + ) + + inputs = [ + io.Image.Input( + "images", + tooltip=( + "List of images to process." if is_group else "Image to process." + ), + ) + ] + inputs.extend(cls.extra_inputs) + + return io.Schema( + node_id=cls.node_id, + display_name=cls.display_name or cls.node_id, + category="dataset/image", + is_experimental=True, + is_input_list=is_group, # True for group, False for individual + inputs=inputs, + outputs=[ + io.Image.Output( + display_name="images", + is_output_list=output_is_list, + tooltip="Processed images", + ) + ], + ) + + @classmethod + def execute(cls, images, **kwargs): + """Execute the node. Routes to _process or _group_process based on mode.""" + is_group = cls._detect_processing_mode() + + # Extract scalar values from lists for parameters + params = {} + for k, v in kwargs.items(): + if isinstance(v, list) and len(v) == 1: + params[k] = v[0] + else: + params[k] = v + + if is_group: + # Group processing: images is list, call _group_process + result = cls._group_process(images, **params) + else: + # Individual processing: images is single item, call _process + result = cls._process(images, **params) + + return io.NodeOutput(result) + + @classmethod + def _process(cls, image, **kwargs): + """Override this method for single-item processing. + + Args: + image: tensor - Single image tensor + **kwargs: Additional parameters (already extracted from lists) + + Returns: + tensor - Processed image + """ + raise NotImplementedError(f"{cls.__name__} must implement _process method") + + @classmethod + def _group_process(cls, images, **kwargs): + """Override this method for group processing. + + Args: + images: list[tensor] - List of image tensors + **kwargs: Additional parameters (already extracted from lists) + + Returns: + list[tensor] - Processed images + """ + raise NotImplementedError( + f"{cls.__name__} must implement _group_process method" + ) + + +class TextProcessingNode(io.ComfyNode): + """Base class for text processing nodes that operate on texts. + + Child classes should set: + node_id: Unique node identifier (required) + display_name: Display name (optional, defaults to node_id) + description: Node description (optional) + extra_inputs: List of additional io.Input objects beyond "texts" (optional) + is_group_process: None (auto-detect), True (group), or False (individual) (optional) + is_output_list: True (list output) or False (single output) (optional, default True) + + Child classes must implement ONE of: + _process(cls, text, **kwargs) -> str (for single-item processing) + _group_process(cls, texts, **kwargs) -> list[str] (for group processing) + """ + + node_id = None + display_name = None + description = None + extra_inputs = [] + is_group_process = None # None = auto-detect, True/False = explicit + is_output_list = None # None = auto-detect based on processing mode + + @classmethod + def _detect_processing_mode(cls): + """Detect whether this node uses group or individual processing. + + Returns: + bool: True if group processing, False if individual processing + """ + # Explicit setting takes precedence + if cls.is_group_process is not None: + return cls.is_group_process + + # Check which method is overridden by looking at the defining class in MRO + base_class = TextProcessingNode + + # Find which class in MRO defines _process + process_definer = None + for klass in cls.__mro__: + if "_process" in klass.__dict__: + process_definer = klass + break + + # Find which class in MRO defines _group_process + group_definer = None + for klass in cls.__mro__: + if "_group_process" in klass.__dict__: + group_definer = klass + break + + # Check what was overridden (not defined in base class) + has_process = process_definer is not None and process_definer is not base_class + has_group = group_definer is not None and group_definer is not base_class + + if has_process and has_group: + raise ValueError( + f"{cls.__name__}: Cannot override both _process and _group_process. " + "Override only one, or set is_group_process explicitly." + ) + if not has_process and not has_group: + raise ValueError( + f"{cls.__name__}: Must override either _process or _group_process" + ) + + return has_group + + @classmethod + def define_schema(cls): + if cls.node_id is None: + raise NotImplementedError(f"{cls.__name__} must set node_id class variable") + + is_group = cls._detect_processing_mode() + + inputs = [ + io.String.Input( + "texts", + tooltip="List of texts to process." if is_group else "Text to process.", + ) + ] + inputs.extend(cls.extra_inputs) + + return io.Schema( + node_id=cls.node_id, + display_name=cls.display_name or cls.node_id, + category="dataset/text", + is_experimental=True, + is_input_list=is_group, # True for group, False for individual + inputs=inputs, + outputs=[ + io.String.Output( + display_name="texts", + is_output_list=cls.is_output_list, + tooltip="Processed texts", + ) + ], + ) + + @classmethod + def execute(cls, texts, **kwargs): + """Execute the node. Routes to _process or _group_process based on mode.""" + is_group = cls._detect_processing_mode() + + # Extract scalar values from lists for parameters + params = {} + for k, v in kwargs.items(): + if isinstance(v, list) and len(v) == 1: + params[k] = v[0] + else: + params[k] = v + + if is_group: + # Group processing: texts is list, call _group_process + result = cls._group_process(texts, **params) + else: + # Individual processing: texts is single item, call _process + result = cls._process(texts, **params) + + # Wrap result based on is_output_list + if cls.is_output_list: + # Result should already be a list (or will be for individual) + return io.NodeOutput(result if is_group else [result]) + else: + # Single output - wrap in list for NodeOutput + return io.NodeOutput([result]) + + @classmethod + def _process(cls, text, **kwargs): + """Override this method for single-item processing. + + Args: + text: str - Single text string + **kwargs: Additional parameters (already extracted from lists) + + Returns: + str - Processed text + """ + raise NotImplementedError(f"{cls.__name__} must implement _process method") + + @classmethod + def _group_process(cls, texts, **kwargs): + """Override this method for group processing. + + Args: + texts: list[str] - List of text strings + **kwargs: Additional parameters (already extracted from lists) + + Returns: + list[str] - Processed texts + """ + raise NotImplementedError( + f"{cls.__name__} must implement _group_process method" + ) + + +# ========== Image Transform Nodes ========== + + +class ResizeImagesToSameSizeNode(ImageProcessingNode): + node_id = "ResizeImagesToSameSize" + display_name = "Resize Images to Same Size" + description = "Resize all images to the same width and height." + extra_inputs = [ + io.Int.Input("width", default=512, min=1, max=8192, tooltip="Target width."), + io.Int.Input("height", default=512, min=1, max=8192, tooltip="Target height."), + io.Combo.Input( + "mode", + options=["stretch", "crop_center", "pad"], + default="stretch", + tooltip="Resize mode.", + ), + ] + + @classmethod + def _process(cls, image, width, height, mode): + img = tensor_to_pil(image) + + if mode == "stretch": + img = img.resize((width, height), Image.Resampling.LANCZOS) + elif mode == "crop_center": + left = max(0, (img.width - width) // 2) + top = max(0, (img.height - height) // 2) + right = min(img.width, left + width) + bottom = min(img.height, top + height) + img = img.crop((left, top, right, bottom)) + if img.width != width or img.height != height: + img = img.resize((width, height), Image.Resampling.LANCZOS) + elif mode == "pad": + img.thumbnail((width, height), Image.Resampling.LANCZOS) + new_img = Image.new("RGB", (width, height), (0, 0, 0)) + paste_x = (width - img.width) // 2 + paste_y = (height - img.height) // 2 + new_img.paste(img, (paste_x, paste_y)) + img = new_img + + return pil_to_tensor(img) + + +class ResizeImagesToPixelCountNode(ImageProcessingNode): + node_id = "ResizeImagesToPixelCount" + display_name = "Resize Images to Pixel Count" + description = "Resize images so that the total pixel count matches the specified number while preserving aspect ratio." + extra_inputs = [ + io.Int.Input( + "pixel_count", + default=512 * 512, + min=1, + max=8192 * 8192, + tooltip="Target pixel count.", + ), + io.Int.Input( + "steps", + default=64, + min=1, + max=128, + tooltip="The stepping for resize width/height.", + ), + ] + + @classmethod + def _process(cls, image, pixel_count, steps): + img = tensor_to_pil(image) + w, h = img.size + pixel_count_ratio = math.sqrt(pixel_count / (w * h)) + new_w = int(w * pixel_count_ratio / steps) * steps + new_h = int(h * pixel_count_ratio / steps) * steps + logging.info(f"Resizing from {w}x{h} to {new_w}x{new_h}") + img = img.resize((new_w, new_h), Image.Resampling.LANCZOS) + return pil_to_tensor(img) + + +class ResizeImagesByShorterEdgeNode(ImageProcessingNode): + node_id = "ResizeImagesByShorterEdge" + display_name = "Resize Images by Shorter Edge" + description = "Resize images so that the shorter edge matches the specified length while preserving aspect ratio." + extra_inputs = [ + io.Int.Input( + "shorter_edge", + default=512, + min=1, + max=8192, + tooltip="Target length for the shorter edge.", + ), + ] + + @classmethod + def _process(cls, image, shorter_edge): + img = tensor_to_pil(image) + w, h = img.size + if w < h: + new_w = shorter_edge + new_h = int(h * (shorter_edge / w)) + else: + new_h = shorter_edge + new_w = int(w * (shorter_edge / h)) + img = img.resize((new_w, new_h), Image.Resampling.LANCZOS) + return pil_to_tensor(img) + + +class ResizeImagesByLongerEdgeNode(ImageProcessingNode): + node_id = "ResizeImagesByLongerEdge" + display_name = "Resize Images by Longer Edge" + description = "Resize images so that the longer edge matches the specified length while preserving aspect ratio." + extra_inputs = [ + io.Int.Input( + "longer_edge", + default=1024, + min=1, + max=8192, + tooltip="Target length for the longer edge.", + ), + ] + + @classmethod + def _process(cls, image, longer_edge): + img = tensor_to_pil(image) + w, h = img.size + if w > h: + new_w = longer_edge + new_h = int(h * (longer_edge / w)) + else: + new_h = longer_edge + new_w = int(w * (longer_edge / h)) + img = img.resize((new_w, new_h), Image.Resampling.LANCZOS) + return pil_to_tensor(img) + + +class CenterCropImagesNode(ImageProcessingNode): + node_id = "CenterCropImages" + display_name = "Center Crop Images" + description = "Center crop all images to the specified dimensions." + extra_inputs = [ + io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."), + io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."), + ] + + @classmethod + def _process(cls, image, width, height): + img = tensor_to_pil(image) + left = max(0, (img.width - width) // 2) + top = max(0, (img.height - height) // 2) + right = min(img.width, left + width) + bottom = min(img.height, top + height) + img = img.crop((left, top, right, bottom)) + return pil_to_tensor(img) + + +class RandomCropImagesNode(ImageProcessingNode): + node_id = "RandomCropImages" + display_name = "Random Crop Images" + description = ( + "Randomly crop all images to the specified dimensions (for data augmentation)." + ) + extra_inputs = [ + io.Int.Input("width", default=512, min=1, max=8192, tooltip="Crop width."), + io.Int.Input("height", default=512, min=1, max=8192, tooltip="Crop height."), + io.Int.Input( + "seed", default=0, min=0, max=0xFFFFFFFFFFFFFFFF, tooltip="Random seed." + ), + ] + + @classmethod + def _process(cls, image, width, height, seed): + np.random.seed(seed % (2**32 - 1)) + img = tensor_to_pil(image) + max_left = max(0, img.width - width) + max_top = max(0, img.height - height) + left = np.random.randint(0, max_left + 1) if max_left > 0 else 0 + top = np.random.randint(0, max_top + 1) if max_top > 0 else 0 + right = min(img.width, left + width) + bottom = min(img.height, top + height) + img = img.crop((left, top, right, bottom)) + return pil_to_tensor(img) + + +class FlipImagesNode(ImageProcessingNode): + node_id = "FlipImages" + display_name = "Flip Images" + description = "Flip all images horizontally or vertically." + extra_inputs = [ + io.Combo.Input( + "direction", + options=["horizontal", "vertical"], + default="horizontal", + tooltip="Flip direction.", + ), + ] + + @classmethod + def _process(cls, image, direction): + img = tensor_to_pil(image) + if direction == "horizontal": + img = img.transpose(Image.FLIP_LEFT_RIGHT) + else: + img = img.transpose(Image.FLIP_TOP_BOTTOM) + return pil_to_tensor(img) + + +class NormalizeImagesNode(ImageProcessingNode): + node_id = "NormalizeImages" + display_name = "Normalize Images" + description = "Normalize images using mean and standard deviation." + extra_inputs = [ + io.Float.Input( + "mean", + default=0.5, + min=0.0, + max=1.0, + tooltip="Mean value for normalization.", + ), + io.Float.Input( + "std", + default=0.5, + min=0.001, + max=1.0, + tooltip="Standard deviation for normalization.", + ), + ] + + @classmethod + def _process(cls, image, mean, std): + return (image - mean) / std + + +class AdjustBrightnessNode(ImageProcessingNode): + node_id = "AdjustBrightness" + display_name = "Adjust Brightness" + description = "Adjust brightness of all images." + extra_inputs = [ + io.Float.Input( + "factor", + default=1.0, + min=0.0, + max=2.0, + tooltip="Brightness factor. 1.0 = no change, <1.0 = darker, >1.0 = brighter.", + ), + ] + + @classmethod + def _process(cls, image, factor): + return (image * factor).clamp(0.0, 1.0) + + +class AdjustContrastNode(ImageProcessingNode): + node_id = "AdjustContrast" + display_name = "Adjust Contrast" + description = "Adjust contrast of all images." + extra_inputs = [ + io.Float.Input( + "factor", + default=1.0, + min=0.0, + max=2.0, + tooltip="Contrast factor. 1.0 = no change, <1.0 = less contrast, >1.0 = more contrast.", + ), + ] + + @classmethod + def _process(cls, image, factor): + return ((image - 0.5) * factor + 0.5).clamp(0.0, 1.0) + + +class ShuffleDatasetNode(ImageProcessingNode): + node_id = "ShuffleDataset" + display_name = "Shuffle Image Dataset" + description = "Randomly shuffle the order of images in the dataset." + is_group_process = True # Requires full list to shuffle + extra_inputs = [ + io.Int.Input( + "seed", default=0, min=0, max=0xFFFFFFFFFFFFFFFF, tooltip="Random seed." + ), + ] + + @classmethod + def _group_process(cls, images, seed): + np.random.seed(seed % (2**32 - 1)) + indices = np.random.permutation(len(images)) + return [images[i] for i in indices] + + +class ShuffleImageTextDatasetNode(io.ComfyNode): + """Special node that shuffles both images and texts together.""" + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="ShuffleImageTextDataset", + display_name="Shuffle Image-Text Dataset", + category="dataset/image", + is_experimental=True, + is_input_list=True, + inputs=[ + io.Image.Input("images", tooltip="List of images to shuffle."), + io.String.Input("texts", tooltip="List of texts to shuffle."), + io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + tooltip="Random seed.", + ), + ], + outputs=[ + io.Image.Output( + display_name="images", + is_output_list=True, + tooltip="Shuffled images", + ), + io.String.Output( + display_name="texts", is_output_list=True, tooltip="Shuffled texts" + ), + ], + ) + + @classmethod + def execute(cls, images, texts, seed): + seed = seed[0] # Extract scalar + np.random.seed(seed % (2**32 - 1)) + indices = np.random.permutation(len(images)) + shuffled_images = [images[i] for i in indices] + shuffled_texts = [texts[i] for i in indices] + return io.NodeOutput(shuffled_images, shuffled_texts) + + +# ========== Text Transform Nodes ========== + + +class TextToLowercaseNode(TextProcessingNode): + node_id = "TextToLowercase" + display_name = "Text to Lowercase" + description = "Convert all texts to lowercase." + + @classmethod + def _process(cls, text): + return text.lower() + + +class TextToUppercaseNode(TextProcessingNode): + node_id = "TextToUppercase" + display_name = "Text to Uppercase" + description = "Convert all texts to uppercase." + + @classmethod + def _process(cls, text): + return text.upper() + + +class TruncateTextNode(TextProcessingNode): + node_id = "TruncateText" + display_name = "Truncate Text" + description = "Truncate all texts to a maximum length." + extra_inputs = [ + io.Int.Input( + "max_length", default=77, min=1, max=10000, tooltip="Maximum text length." + ), + ] + + @classmethod + def _process(cls, text, max_length): + return text[:max_length] + + +class AddTextPrefixNode(TextProcessingNode): + node_id = "AddTextPrefix" + display_name = "Add Text Prefix" + description = "Add a prefix to all texts." + extra_inputs = [ + io.String.Input("prefix", default="", tooltip="Prefix to add."), + ] + + @classmethod + def _process(cls, text, prefix): + return prefix + text + + +class AddTextSuffixNode(TextProcessingNode): + node_id = "AddTextSuffix" + display_name = "Add Text Suffix" + description = "Add a suffix to all texts." + extra_inputs = [ + io.String.Input("suffix", default="", tooltip="Suffix to add."), + ] + + @classmethod + def _process(cls, text, suffix): + return text + suffix + + +class ReplaceTextNode(TextProcessingNode): + node_id = "ReplaceText" + display_name = "Replace Text" + description = "Replace text in all texts." + extra_inputs = [ + io.String.Input("find", default="", tooltip="Text to find."), + io.String.Input("replace", default="", tooltip="Text to replace with."), + ] + + @classmethod + def _process(cls, text, find, replace): + return text.replace(find, replace) + + +class StripWhitespaceNode(TextProcessingNode): + node_id = "StripWhitespace" + display_name = "Strip Whitespace" + description = "Strip leading and trailing whitespace from all texts." + + @classmethod + def _process(cls, text): + return text.strip() + + +# ========== Group Processing Example Nodes ========== + + +class ImageDeduplicationNode(ImageProcessingNode): + """Remove duplicate or very similar images from the dataset using perceptual hashing.""" + + node_id = "ImageDeduplication" + display_name = "Image Deduplication" + description = "Remove duplicate or very similar images from the dataset." + is_group_process = True # Requires full list to compare images + extra_inputs = [ + io.Float.Input( + "similarity_threshold", + default=0.95, + min=0.0, + max=1.0, + tooltip="Similarity threshold (0-1). Higher means more similar. Images above this threshold are considered duplicates.", + ), + ] + + @classmethod + def _group_process(cls, images, similarity_threshold): + """Remove duplicate images using perceptual hashing.""" + if len(images) == 0: + return [] + + # Compute simple perceptual hash for each image + def compute_hash(img_tensor): + """Compute a simple perceptual hash by resizing to 8x8 and comparing to average.""" + img = tensor_to_pil(img_tensor) + # Resize to 8x8 + img_small = img.resize((8, 8), Image.Resampling.LANCZOS).convert("L") + # Get pixels + pixels = list(img_small.getdata()) + # Compute average + avg = sum(pixels) / len(pixels) + # Create hash (1 if above average, 0 otherwise) + hash_bits = "".join("1" if p > avg else "0" for p in pixels) + return hash_bits + + def hamming_distance(hash1, hash2): + """Compute Hamming distance between two hash strings.""" + return sum(c1 != c2 for c1, c2 in zip(hash1, hash2)) + + # Compute hashes for all images + hashes = [compute_hash(img) for img in images] + + # Find duplicates + keep_indices = [] + for i in range(len(images)): + is_duplicate = False + for j in keep_indices: + # Compare hashes + distance = hamming_distance(hashes[i], hashes[j]) + similarity = 1.0 - (distance / 64.0) # 64 bits total + if similarity >= similarity_threshold: + is_duplicate = True + logging.info( + f"Image {i} is similar to image {j} (similarity: {similarity:.3f}), skipping" + ) + break + + if not is_duplicate: + keep_indices.append(i) + + # Return only unique images + unique_images = [images[i] for i in keep_indices] + logging.info( + f"Deduplication: kept {len(unique_images)} out of {len(images)} images" + ) + return unique_images + + +class ImageGridNode(ImageProcessingNode): + """Combine multiple images into a single grid/collage.""" + + node_id = "ImageGrid" + display_name = "Image Grid" + description = "Arrange multiple images into a grid layout." + is_group_process = True # Requires full list to create grid + is_output_list = False # Outputs single grid image + extra_inputs = [ + io.Int.Input( + "columns", + default=4, + min=1, + max=20, + tooltip="Number of columns in the grid.", + ), + io.Int.Input( + "cell_width", + default=256, + min=32, + max=2048, + tooltip="Width of each cell in the grid.", + ), + io.Int.Input( + "cell_height", + default=256, + min=32, + max=2048, + tooltip="Height of each cell in the grid.", + ), + io.Int.Input( + "padding", default=4, min=0, max=50, tooltip="Padding between images." + ), + ] + + @classmethod + def _group_process(cls, images, columns, cell_width, cell_height, padding): + """Arrange images into a grid.""" + if len(images) == 0: + raise ValueError("Cannot create grid from empty image list") + + # Calculate grid dimensions + num_images = len(images) + rows = (num_images + columns - 1) // columns # Ceiling division + + # Calculate total grid size + grid_width = columns * cell_width + (columns - 1) * padding + grid_height = rows * cell_height + (rows - 1) * padding + + # Create blank grid + grid = Image.new("RGB", (grid_width, grid_height), (0, 0, 0)) + + # Place images + for idx, img_tensor in enumerate(images): + row = idx // columns + col = idx % columns + + # Convert to PIL and resize to cell size + img = tensor_to_pil(img_tensor) + img = img.resize((cell_width, cell_height), Image.Resampling.LANCZOS) + + # Calculate position + x = col * (cell_width + padding) + y = row * (cell_height + padding) + + # Paste into grid + grid.paste(img, (x, y)) + + logging.info( + f"Created {columns}x{rows} grid with {num_images} images ({grid_width}x{grid_height})" + ) + return pil_to_tensor(grid) + + +class MergeImageListsNode(ImageProcessingNode): + """Merge multiple image lists into a single list.""" + + node_id = "MergeImageLists" + display_name = "Merge Image Lists" + description = "Concatenate multiple image lists into one." + is_group_process = True # Receives images as list + + @classmethod + def _group_process(cls, images): + """Simply return the images list (already merged by input handling).""" + # When multiple list inputs are connected, they're concatenated + # For now, this is a simple pass-through + logging.info(f"Merged image list contains {len(images)} images") + return images + + +class MergeTextListsNode(TextProcessingNode): + """Merge multiple text lists into a single list.""" + + node_id = "MergeTextLists" + display_name = "Merge Text Lists" + description = "Concatenate multiple text lists into one." + is_group_process = True # Receives texts as list + + @classmethod + def _group_process(cls, texts): + """Simply return the texts list (already merged by input handling).""" + # When multiple list inputs are connected, they're concatenated + # For now, this is a simple pass-through + logging.info(f"Merged text list contains {len(texts)} texts") + return texts + + +# ========== Training Dataset Nodes ========== + + +class MakeTrainingDataset(io.ComfyNode): + """Encode images with VAE and texts with CLIP to create a training dataset.""" + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="MakeTrainingDataset", + display_name="Make Training Dataset", + category="dataset", + is_experimental=True, + is_input_list=True, # images and texts as lists + inputs=[ + io.Image.Input("images", tooltip="List of images to encode."), + io.Vae.Input( + "vae", tooltip="VAE model for encoding images to latents." + ), + io.Clip.Input( + "clip", tooltip="CLIP model for encoding text to conditioning." + ), + io.String.Input( + "texts", + optional=True, + tooltip="List of text captions. Can be length n (matching images), 1 (repeated for all), or omitted (uses empty string).", + ), + ], + outputs=[ + io.Latent.Output( + display_name="latents", + is_output_list=True, + tooltip="List of latent dicts", + ), + io.Conditioning.Output( + display_name="conditioning", + is_output_list=True, + tooltip="List of conditioning lists", + ), + ], + ) + + @classmethod + def execute(cls, images, vae, clip, texts=None): + # Extract scalars (vae and clip are single values wrapped in lists) + vae = vae[0] + clip = clip[0] + + # Handle text list + num_images = len(images) + + if texts is None or len(texts) == 0: + # Treat as [""] for unconditional training + texts = [""] + + if len(texts) == 1 and num_images > 1: + # Repeat single text for all images + texts = texts * num_images + elif len(texts) != num_images: + raise ValueError( + f"Number of texts ({len(texts)}) does not match number of images ({num_images}). " + f"Text list should have length {num_images}, 1, or 0." + ) + + # Encode images with VAE + logging.info(f"Encoding {num_images} images with VAE...") + latents_list = [] # list[{"samples": tensor}] + for img_tensor in images: + # img_tensor is [1, H, W, 3] + latent_tensor = vae.encode(img_tensor[:, :, :, :3]) + latents_list.append({"samples": latent_tensor}) + + # Encode texts with CLIP + logging.info(f"Encoding {len(texts)} texts with CLIP...") + conditioning_list = [] # list[list[cond]] + for text in texts: + if text == "": + cond = clip.encode_from_tokens_scheduled(clip.tokenize("")) + else: + tokens = clip.tokenize(text) + cond = clip.encode_from_tokens_scheduled(tokens) + conditioning_list.append(cond) + + logging.info( + f"Created dataset with {len(latents_list)} latents and {len(conditioning_list)} conditioning." + ) + return io.NodeOutput(latents_list, conditioning_list) + + +class SaveTrainingDataset(io.ComfyNode): + """Save encoded training dataset (latents + conditioning) to disk.""" + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="SaveTrainingDataset", + display_name="Save Training Dataset", + category="dataset", + is_experimental=True, + is_output_node=True, + is_input_list=True, # Receive lists + inputs=[ + io.Latent.Input( + "latents", + tooltip="List of latent dicts from MakeTrainingDataset.", + ), + io.Conditioning.Input( + "conditioning", + tooltip="List of conditioning lists from MakeTrainingDataset.", + ), + io.String.Input( + "folder_name", + default="training_dataset", + tooltip="Name of folder to save dataset (inside output directory).", + ), + io.Int.Input( + "shard_size", + default=1000, + min=1, + max=100000, + tooltip="Number of samples per shard file.", + ), + ], + outputs=[], + ) + + @classmethod + def execute(cls, latents, conditioning, folder_name, shard_size): + # Extract scalars + folder_name = folder_name[0] + shard_size = shard_size[0] + + # latents: list[{"samples": tensor}] + # conditioning: list[list[cond]] + + # Validate lengths match + if len(latents) != len(conditioning): + raise ValueError( + f"Number of latents ({len(latents)}) does not match number of conditions ({len(conditioning)}). " + f"Something went wrong in dataset preparation." + ) + + # Create output directory + output_dir = os.path.join(folder_paths.get_output_directory(), folder_name) + os.makedirs(output_dir, exist_ok=True) + + # Prepare data pairs + num_samples = len(latents) + num_shards = (num_samples + shard_size - 1) // shard_size # Ceiling division + + logging.info( + f"Saving {num_samples} samples to {num_shards} shards in {output_dir}..." + ) + + # Save data in shards + for shard_idx in range(num_shards): + start_idx = shard_idx * shard_size + end_idx = min(start_idx + shard_size, num_samples) + + # Get shard data (list of latent dicts and conditioning lists) + shard_data = { + "latents": latents[start_idx:end_idx], + "conditioning": conditioning[start_idx:end_idx], + } + + # Save shard + shard_filename = f"shard_{shard_idx:04d}.pkl" + shard_path = os.path.join(output_dir, shard_filename) + + with open(shard_path, "wb") as f: + torch.save(shard_data, f) + + logging.info( + f"Saved shard {shard_idx + 1}/{num_shards}: {shard_filename} ({end_idx - start_idx} samples)" + ) + + # Save metadata + metadata = { + "num_samples": num_samples, + "num_shards": num_shards, + "shard_size": shard_size, + } + metadata_path = os.path.join(output_dir, "metadata.json") + with open(metadata_path, "w") as f: + json.dump(metadata, f, indent=2) + + logging.info(f"Successfully saved {num_samples} samples to {output_dir}.") + return io.NodeOutput() + + +class LoadTrainingDataset(io.ComfyNode): + """Load encoded training dataset from disk.""" + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LoadTrainingDataset", + display_name="Load Training Dataset", + category="dataset", + is_experimental=True, + inputs=[ + io.String.Input( + "folder_name", + default="training_dataset", + tooltip="Name of folder containing the saved dataset (inside output directory).", + ), + ], + outputs=[ + io.Latent.Output( + display_name="latents", + is_output_list=True, + tooltip="List of latent dicts", + ), + io.Conditioning.Output( + display_name="conditioning", + is_output_list=True, + tooltip="List of conditioning lists", + ), + ], + ) + + @classmethod + def execute(cls, folder_name): + # Get dataset directory + dataset_dir = os.path.join(folder_paths.get_output_directory(), folder_name) + + if not os.path.exists(dataset_dir): + raise ValueError(f"Dataset directory not found: {dataset_dir}") + + # Find all shard files + shard_files = sorted( + [ + f + for f in os.listdir(dataset_dir) + if f.startswith("shard_") and f.endswith(".pkl") + ] + ) + + if not shard_files: + raise ValueError(f"No shard files found in {dataset_dir}") + + logging.info(f"Loading {len(shard_files)} shards from {dataset_dir}...") + + # Load all shards + all_latents = [] # list[{"samples": tensor}] + all_conditioning = [] # list[list[cond]] + + for shard_file in shard_files: + shard_path = os.path.join(dataset_dir, shard_file) + + with open(shard_path, "rb") as f: + shard_data = torch.load(f) + + all_latents.extend(shard_data["latents"]) + all_conditioning.extend(shard_data["conditioning"]) + + logging.info(f"Loaded {shard_file}: {len(shard_data['latents'])} samples") + + logging.info( + f"Successfully loaded {len(all_latents)} samples from {dataset_dir}." + ) + return io.NodeOutput(all_latents, all_conditioning) + + +# ========== Extension Setup ========== + + +class DatasetExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + # Data loading/saving nodes + LoadImageDataSetFromFolderNode, + LoadImageTextDataSetFromFolderNode, + SaveImageDataSetToFolderNode, + SaveImageTextDataSetToFolderNode, + # Image transform nodes + ResizeImagesToSameSizeNode, + ResizeImagesToPixelCountNode, + ResizeImagesByShorterEdgeNode, + ResizeImagesByLongerEdgeNode, + CenterCropImagesNode, + RandomCropImagesNode, + FlipImagesNode, + NormalizeImagesNode, + AdjustBrightnessNode, + AdjustContrastNode, + ShuffleDatasetNode, + ShuffleImageTextDatasetNode, + # Text transform nodes + TextToLowercaseNode, + TextToUppercaseNode, + TruncateTextNode, + AddTextPrefixNode, + AddTextSuffixNode, + ReplaceTextNode, + StripWhitespaceNode, + # Group processing examples + ImageDeduplicationNode, + ImageGridNode, + MergeImageListsNode, + MergeTextListsNode, + # Training dataset nodes + MakeTrainingDataset, + SaveTrainingDataset, + LoadTrainingDataset, + ] + + +async def comfy_entrypoint() -> DatasetExtension: + return DatasetExtension() diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py index 9e6ec6780..cb24ab709 100644 --- a/comfy_extras/nodes_train.py +++ b/comfy_extras/nodes_train.py @@ -1,15 +1,13 @@ -import datetime -import json import logging import os import numpy as np import safetensors import torch -from PIL import Image, ImageDraw, ImageFont -from PIL.PngImagePlugin import PngInfo import torch.utils.checkpoint -import tqdm +from tqdm.auto import trange +from PIL import Image, ImageDraw, ImageFont +from typing_extensions import override import comfy.samplers import comfy.sd @@ -18,9 +16,9 @@ import comfy.model_management import comfy_extras.nodes_custom_sampler import folder_paths import node_helpers -from comfy.cli_args import args -from comfy.comfy_types.node_typing import IO from comfy.weight_adapter import adapters, adapter_maps +from comfy_api.latest import ComfyExtension, io, ui +from comfy.utils import ProgressBar def make_batch_extra_option_dict(d, indicies, full_size=None): @@ -56,7 +54,18 @@ def process_cond_list(d, prefix=""): class TrainSampler(comfy.samplers.Sampler): - def __init__(self, loss_fn, optimizer, loss_callback=None, batch_size=1, grad_acc=1, total_steps=1, seed=0, training_dtype=torch.bfloat16): + def __init__( + self, + loss_fn, + optimizer, + loss_callback=None, + batch_size=1, + grad_acc=1, + total_steps=1, + seed=0, + training_dtype=torch.bfloat16, + real_dataset=None, + ): self.loss_fn = loss_fn self.optimizer = optimizer self.loss_callback = loss_callback @@ -65,54 +74,138 @@ class TrainSampler(comfy.samplers.Sampler): self.grad_acc = grad_acc self.seed = seed self.training_dtype = training_dtype + self.real_dataset: list[torch.Tensor] | None = real_dataset - def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): + def fwd_bwd( + self, + model_wrap, + batch_sigmas, + batch_noise, + batch_latent, + cond, + indicies, + extra_args, + dataset_size, + bwd=True, + ): + xt = model_wrap.inner_model.model_sampling.noise_scaling( + batch_sigmas, batch_noise, batch_latent, False + ) + x0 = model_wrap.inner_model.model_sampling.noise_scaling( + torch.zeros_like(batch_sigmas), + torch.zeros_like(batch_noise), + batch_latent, + False, + ) + + model_wrap.conds["positive"] = [cond[i] for i in indicies] + batch_extra_args = make_batch_extra_option_dict( + extra_args, indicies, full_size=dataset_size + ) + + with torch.autocast(xt.device.type, dtype=self.training_dtype): + x0_pred = model_wrap( + xt.requires_grad_(True), + batch_sigmas.requires_grad_(True), + **batch_extra_args, + ) + loss = self.loss_fn(x0_pred, x0) + if bwd: + bwd_loss = loss / self.grad_acc + bwd_loss.backward() + return loss + + def sample( + self, + model_wrap, + sigmas, + extra_args, + callback, + noise, + latent_image=None, + denoise_mask=None, + disable_pbar=False, + ): model_wrap.conds = process_cond_list(model_wrap.conds) cond = model_wrap.conds["positive"] dataset_size = sigmas.size(0) torch.cuda.empty_cache() - for i in (pbar:=tqdm.trange(self.total_steps, desc="Training LoRA", smoothing=0.01, disable=not comfy.utils.PROGRESS_BAR_ENABLED)): - noisegen = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(self.seed + i * 1000) - indicies = torch.randperm(dataset_size)[:self.batch_size].tolist() - - batch_latent = torch.stack([latent_image[i] for i in indicies]) - batch_noise = noisegen.generate_noise({"samples": batch_latent}).to(batch_latent.device) - batch_sigmas = [ - model_wrap.inner_model.model_sampling.percent_to_sigma( - torch.rand((1,)).item() - ) for _ in range(min(self.batch_size, dataset_size)) - ] - batch_sigmas = torch.tensor(batch_sigmas).to(batch_latent.device) - - xt = model_wrap.inner_model.model_sampling.noise_scaling( - batch_sigmas, - batch_noise, - batch_latent, - False + ui_pbar = ProgressBar(self.total_steps) + for i in ( + pbar := trange( + self.total_steps, + desc="Training LoRA", + smoothing=0.01, + disable=not comfy.utils.PROGRESS_BAR_ENABLED, ) - x0 = model_wrap.inner_model.model_sampling.noise_scaling( - torch.zeros_like(batch_sigmas), - torch.zeros_like(batch_noise), - batch_latent, - False + ): + noisegen = comfy_extras.nodes_custom_sampler.Noise_RandomNoise( + self.seed + i * 1000 ) + indicies = torch.randperm(dataset_size)[: self.batch_size].tolist() - model_wrap.conds["positive"] = [ - cond[i] for i in indicies - ] - batch_extra_args = make_batch_extra_option_dict(extra_args, indicies, full_size=dataset_size) + if self.real_dataset is None: + batch_latent = torch.stack([latent_image[i] for i in indicies]) + batch_noise = noisegen.generate_noise({"samples": batch_latent}).to( + batch_latent.device + ) + batch_sigmas = [ + model_wrap.inner_model.model_sampling.percent_to_sigma( + torch.rand((1,)).item() + ) + for _ in range(min(self.batch_size, dataset_size)) + ] + batch_sigmas = torch.tensor(batch_sigmas).to(batch_latent.device) - with torch.autocast(xt.device.type, dtype=self.training_dtype): - x0_pred = model_wrap(xt, batch_sigmas, **batch_extra_args) - loss = self.loss_fn(x0_pred, x0) - loss.backward() - if self.loss_callback: - self.loss_callback(loss.item()) - pbar.set_postfix({"loss": f"{loss.item():.4f}"}) + loss = self.fwd_bwd( + model_wrap, + batch_sigmas, + batch_noise, + batch_latent, + cond, + indicies, + extra_args, + dataset_size, + bwd=True, + ) + if self.loss_callback: + self.loss_callback(loss.item()) + pbar.set_postfix({"loss": f"{loss.item():.4f}"}) + else: + total_loss = 0 + for index in indicies: + single_latent = self.real_dataset[index].to(latent_image) + batch_noise = noisegen.generate_noise( + {"samples": single_latent} + ).to(single_latent.device) + batch_sigmas = ( + model_wrap.inner_model.model_sampling.percent_to_sigma( + torch.rand((1,)).item() + ) + ) + batch_sigmas = torch.tensor([batch_sigmas]).to(single_latent.device) + loss = self.fwd_bwd( + model_wrap, + batch_sigmas, + batch_noise, + single_latent, + cond, + [index], + extra_args, + dataset_size, + bwd=False, + ) + total_loss += loss + total_loss = total_loss / self.grad_acc / len(indicies) + total_loss.backward() + if self.loss_callback: + self.loss_callback(total_loss.item()) + pbar.set_postfix({"loss": f"{total_loss.item():.4f}"}) - if (i+1) % self.grad_acc == 0: + if (i + 1) % self.grad_acc == 0: self.optimizer.step() self.optimizer.zero_grad() + ui_pbar.update(1) torch.cuda.empty_cache() return torch.zeros_like(latent_image) @@ -134,233 +227,6 @@ class BiasDiff(torch.nn.Module): return self.passive_memory_usage() -def load_and_process_images(image_files, input_dir, resize_method="None", w=None, h=None): - """Utility function to load and process a list of images. - - Args: - image_files: List of image filenames - input_dir: Base directory containing the images - resize_method: How to handle images of different sizes ("None", "Stretch", "Crop", "Pad") - - Returns: - torch.Tensor: Batch of processed images - """ - if not image_files: - raise ValueError("No valid images found in input") - - output_images = [] - - for file in image_files: - image_path = os.path.join(input_dir, file) - img = node_helpers.pillow(Image.open, image_path) - - if img.mode == "I": - img = img.point(lambda i: i * (1 / 255)) - img = img.convert("RGB") - - if w is None and h is None: - w, h = img.size[0], img.size[1] - - # Resize image to first image - if img.size[0] != w or img.size[1] != h: - if resize_method == "Stretch": - img = img.resize((w, h), Image.Resampling.LANCZOS) - elif resize_method == "Crop": - img = img.crop((0, 0, w, h)) - elif resize_method == "Pad": - img = img.resize((w, h), Image.Resampling.LANCZOS) - elif resize_method == "None": - raise ValueError( - "Your input image size does not match the first image in the dataset. Either select a valid resize method or use the same size for all images." - ) - - img_array = np.array(img).astype(np.float32) / 255.0 - img_tensor = torch.from_numpy(img_array)[None,] - output_images.append(img_tensor) - - return torch.cat(output_images, dim=0) - - -class LoadImageSetNode: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "images": ( - [ - f - for f in os.listdir(folder_paths.get_input_directory()) - if f.endswith((".png", ".jpg", ".jpeg", ".webp", ".bmp", ".gif", ".jpe", ".apng", ".tif", ".tiff")) - ], - {"image_upload": True, "allow_batch": True}, - ) - }, - "optional": { - "resize_method": ( - ["None", "Stretch", "Crop", "Pad"], - {"default": "None"}, - ), - }, - } - - INPUT_IS_LIST = True - RETURN_TYPES = ("IMAGE",) - FUNCTION = "load_images" - CATEGORY = "loaders" - EXPERIMENTAL = True - DESCRIPTION = "Loads a batch of images from a directory for training." - - @classmethod - def VALIDATE_INPUTS(s, images, resize_method): - filenames = images[0] if isinstance(images[0], list) else images - - for image in filenames: - if not folder_paths.exists_annotated_filepath(image): - return "Invalid image file: {}".format(image) - return True - - def load_images(self, input_files, resize_method): - input_dir = folder_paths.get_input_directory() - valid_extensions = [".png", ".jpg", ".jpeg", ".webp", ".bmp", ".gif", ".jpe", ".apng", ".tif", ".tiff"] - image_files = [ - f - for f in input_files - if any(f.lower().endswith(ext) for ext in valid_extensions) - ] - output_tensor = load_and_process_images(image_files, input_dir, resize_method) - return (output_tensor,) - - -class LoadImageSetFromFolderNode: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "folder": (folder_paths.get_input_subfolders(), {"tooltip": "The folder to load images from."}) - }, - "optional": { - "resize_method": ( - ["None", "Stretch", "Crop", "Pad"], - {"default": "None"}, - ), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "load_images" - CATEGORY = "loaders" - EXPERIMENTAL = True - DESCRIPTION = "Loads a batch of images from a directory for training." - - def load_images(self, folder, resize_method): - sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder) - valid_extensions = [".png", ".jpg", ".jpeg", ".webp"] - image_files = [ - f - for f in os.listdir(sub_input_dir) - if any(f.lower().endswith(ext) for ext in valid_extensions) - ] - output_tensor = load_and_process_images(image_files, sub_input_dir, resize_method) - return (output_tensor,) - - -class LoadImageTextSetFromFolderNode: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "folder": (folder_paths.get_input_subfolders(), {"tooltip": "The folder to load images from."}), - "clip": (IO.CLIP, {"tooltip": "The CLIP model used for encoding the text."}), - }, - "optional": { - "resize_method": ( - ["None", "Stretch", "Crop", "Pad"], - {"default": "None"}, - ), - "width": ( - IO.INT, - { - "default": -1, - "min": -1, - "max": 10000, - "step": 1, - "tooltip": "The width to resize the images to. -1 means use the original width.", - }, - ), - "height": ( - IO.INT, - { - "default": -1, - "min": -1, - "max": 10000, - "step": 1, - "tooltip": "The height to resize the images to. -1 means use the original height.", - }, - ) - }, - } - - RETURN_TYPES = ("IMAGE", IO.CONDITIONING,) - FUNCTION = "load_images" - CATEGORY = "loaders" - EXPERIMENTAL = True - DESCRIPTION = "Loads a batch of images and caption from a directory for training." - - def load_images(self, folder, clip, resize_method, width=None, height=None): - if clip is None: - raise RuntimeError("ERROR: clip input is invalid: None\n\nIf the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model.") - - logging.info(f"Loading images from folder: {folder}") - - sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder) - valid_extensions = [".png", ".jpg", ".jpeg", ".webp"] - - image_files = [] - for item in os.listdir(sub_input_dir): - path = os.path.join(sub_input_dir, item) - if any(item.lower().endswith(ext) for ext in valid_extensions): - image_files.append(path) - elif os.path.isdir(path): - # Support kohya-ss/sd-scripts folder structure - repeat = 1 - if item.split("_")[0].isdigit(): - repeat = int(item.split("_")[0]) - image_files.extend([ - os.path.join(path, f) for f in os.listdir(path) if any(f.lower().endswith(ext) for ext in valid_extensions) - ] * repeat) - - caption_file_path = [ - f.replace(os.path.splitext(f)[1], ".txt") - for f in image_files - ] - captions = [] - for caption_file in caption_file_path: - caption_path = os.path.join(sub_input_dir, caption_file) - if os.path.exists(caption_path): - with open(caption_path, "r", encoding="utf-8") as f: - caption = f.read().strip() - captions.append(caption) - else: - captions.append("") - - width = width if width != -1 else None - height = height if height != -1 else None - output_tensor = load_and_process_images(image_files, sub_input_dir, resize_method, width, height) - - logging.info(f"Loaded {len(output_tensor)} images from {sub_input_dir}.") - - logging.info(f"Encoding captions from {sub_input_dir}.") - conditions = [] - empty_cond = clip.encode_from_tokens_scheduled(clip.tokenize("")) - for text in captions: - if text == "": - conditions.append(empty_cond) - tokens = clip.tokenize(text) - conditions.extend(clip.encode_from_tokens_scheduled(tokens)) - logging.info(f"Encoded {len(conditions)} captions from {sub_input_dir}.") - return (output_tensor, conditions) - - def draw_loss_graph(loss_map, steps): width, height = 500, 300 img = Image.new("RGB", (width, height), "white") @@ -379,10 +245,14 @@ def draw_loss_graph(loss_map, steps): return img -def find_all_highest_child_module_with_forward(model: torch.nn.Module, result = None, name = None): +def find_all_highest_child_module_with_forward( + model: torch.nn.Module, result=None, name=None +): if result is None: result = [] - elif hasattr(model, "forward") and not isinstance(model, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict)): + elif hasattr(model, "forward") and not isinstance( + model, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict) + ): result.append(model) logging.debug(f"Found module with forward: {name} ({model.__class__.__name__})") return result @@ -396,12 +266,13 @@ def patch(m): if not hasattr(m, "forward"): return org_forward = m.forward + def fwd(args, kwargs): return org_forward(*args, **kwargs) + def checkpointing_fwd(*args, **kwargs): - return torch.utils.checkpoint.checkpoint( - fwd, args, kwargs, use_reentrant=False - ) + return torch.utils.checkpoint.checkpoint(fwd, args, kwargs, use_reentrant=False) + m.org_forward = org_forward m.forward = checkpointing_fwd @@ -412,130 +283,126 @@ def unpatch(m): del m.org_forward -class TrainLoraNode: +class TrainLoraNode(io.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "model": (IO.MODEL, {"tooltip": "The model to train the LoRA on."}), - "latents": ( - "LATENT", - { - "tooltip": "The Latents to use for training, serve as dataset/input of the model." - }, + def define_schema(cls): + return io.Schema( + node_id="TrainLoraNode", + display_name="Train LoRA", + category="training", + is_experimental=True, + is_input_list=True, # All inputs become lists + inputs=[ + io.Model.Input("model", tooltip="The model to train the LoRA on."), + io.Latent.Input( + "latents", + tooltip="The Latents to use for training, serve as dataset/input of the model.", ), - "positive": ( - IO.CONDITIONING, - {"tooltip": "The positive conditioning to use for training."}, + io.Conditioning.Input( + "positive", tooltip="The positive conditioning to use for training." ), - "batch_size": ( - IO.INT, - { - "default": 1, - "min": 1, - "max": 10000, - "step": 1, - "tooltip": "The batch size to use for training.", - }, + io.Int.Input( + "batch_size", + default=1, + min=1, + max=10000, + tooltip="The batch size to use for training.", ), - "grad_accumulation_steps": ( - IO.INT, - { - "default": 1, - "min": 1, - "max": 1024, - "step": 1, - "tooltip": "The number of gradient accumulation steps to use for training.", - } + io.Int.Input( + "grad_accumulation_steps", + default=1, + min=1, + max=1024, + tooltip="The number of gradient accumulation steps to use for training.", ), - "steps": ( - IO.INT, - { - "default": 16, - "min": 1, - "max": 100000, - "tooltip": "The number of steps to train the LoRA for.", - }, + io.Int.Input( + "steps", + default=16, + min=1, + max=100000, + tooltip="The number of steps to train the LoRA for.", ), - "learning_rate": ( - IO.FLOAT, - { - "default": 0.0005, - "min": 0.0000001, - "max": 1.0, - "step": 0.000001, - "tooltip": "The learning rate to use for training.", - }, + io.Float.Input( + "learning_rate", + default=0.0005, + min=0.0000001, + max=1.0, + step=0.0000001, + tooltip="The learning rate to use for training.", ), - "rank": ( - IO.INT, - { - "default": 8, - "min": 1, - "max": 128, - "tooltip": "The rank of the LoRA layers.", - }, + io.Int.Input( + "rank", + default=8, + min=1, + max=128, + tooltip="The rank of the LoRA layers.", ), - "optimizer": ( - ["AdamW", "Adam", "SGD", "RMSprop"], - { - "default": "AdamW", - "tooltip": "The optimizer to use for training.", - }, + io.Combo.Input( + "optimizer", + options=["AdamW", "Adam", "SGD", "RMSprop"], + default="AdamW", + tooltip="The optimizer to use for training.", ), - "loss_function": ( - ["MSE", "L1", "Huber", "SmoothL1"], - { - "default": "MSE", - "tooltip": "The loss function to use for training.", - }, + io.Combo.Input( + "loss_function", + options=["MSE", "L1", "Huber", "SmoothL1"], + default="MSE", + tooltip="The loss function to use for training.", ), - "seed": ( - IO.INT, - { - "default": 0, - "min": 0, - "max": 0xFFFFFFFFFFFFFFFF, - "tooltip": "The seed to use for training (used in generator for LoRA weight initialization and noise sampling)", - }, + io.Int.Input( + "seed", + default=0, + min=0, + max=0xFFFFFFFFFFFFFFFF, + tooltip="The seed to use for training (used in generator for LoRA weight initialization and noise sampling)", ), - "training_dtype": ( - ["bf16", "fp32"], - {"default": "bf16", "tooltip": "The dtype to use for training."}, + io.Combo.Input( + "training_dtype", + options=["bf16", "fp32"], + default="bf16", + tooltip="The dtype to use for training.", ), - "lora_dtype": ( - ["bf16", "fp32"], - {"default": "bf16", "tooltip": "The dtype to use for lora."}, + io.Combo.Input( + "lora_dtype", + options=["bf16", "fp32"], + default="bf16", + tooltip="The dtype to use for lora.", ), - "algorithm": ( - list(adapter_maps.keys()), - {"default": list(adapter_maps.keys())[0], "tooltip": "The algorithm to use for training."}, + io.Combo.Input( + "algorithm", + options=list(adapter_maps.keys()), + default=list(adapter_maps.keys())[0], + tooltip="The algorithm to use for training.", ), - "gradient_checkpointing": ( - IO.BOOLEAN, - { - "default": True, - "tooltip": "Use gradient checkpointing for training.", - } + io.Boolean.Input( + "gradient_checkpointing", + default=True, + tooltip="Use gradient checkpointing for training.", ), - "existing_lora": ( - folder_paths.get_filename_list("loras") + ["[None]"], - { - "default": "[None]", - "tooltip": "The existing LoRA to append to. Set to None for new LoRA.", - }, + io.Combo.Input( + "existing_lora", + options=folder_paths.get_filename_list("loras") + ["[None]"], + default="[None]", + tooltip="The existing LoRA to append to. Set to None for new LoRA.", ), - }, - } + ], + outputs=[ + io.Model.Output( + display_name="model", tooltip="Model with LoRA applied" + ), + io.Custom("LORA_MODEL").Output( + display_name="lora", tooltip="LoRA weights" + ), + io.Custom("LOSS_MAP").Output( + display_name="loss_map", tooltip="Loss history" + ), + io.Int.Output(display_name="steps", tooltip="Total training steps"), + ], + ) - RETURN_TYPES = (IO.MODEL, IO.LORA_MODEL, IO.LOSS_MAP, IO.INT) - RETURN_NAMES = ("model_with_lora", "lora", "loss", "steps") - FUNCTION = "train" - CATEGORY = "training" - EXPERIMENTAL = True - - def train( - self, + @classmethod + def execute( + cls, model, latents, positive, @@ -553,13 +420,74 @@ class TrainLoraNode: gradient_checkpointing, existing_lora, ): + # Extract scalars from lists (due to is_input_list=True) + model = model[0] + batch_size = batch_size[0] + steps = steps[0] + grad_accumulation_steps = grad_accumulation_steps[0] + learning_rate = learning_rate[0] + rank = rank[0] + optimizer = optimizer[0] + loss_function = loss_function[0] + seed = seed[0] + training_dtype = training_dtype[0] + lora_dtype = lora_dtype[0] + algorithm = algorithm[0] + gradient_checkpointing = gradient_checkpointing[0] + existing_lora = existing_lora[0] + + # Handle latents - either single dict or list of dicts + if len(latents) == 1: + latents = latents[0]["samples"] # Single latent dict + else: + latent_list = [] + for latent in latents: + latent = latent["samples"] + bs = latent.shape[0] + if bs != 1: + for sub_latent in latent: + latent_list.append(sub_latent[None]) + else: + latent_list.append(latent) + latents = latent_list + + # Handle conditioning - either single list or list of lists + if len(positive) == 1: + positive = positive[0] # Single conditioning list + else: + # Multiple conditioning lists - flatten + flat_positive = [] + for cond in positive: + if isinstance(cond, list): + flat_positive.extend(cond) + else: + flat_positive.append(cond) + positive = flat_positive + mp = model.clone() dtype = node_helpers.string_to_torch_dtype(training_dtype) lora_dtype = node_helpers.string_to_torch_dtype(lora_dtype) mp.set_model_compute_dtype(dtype) - latents = latents["samples"].to(dtype) - num_images = latents.shape[0] + # latents here can be list of different size latent or one large batch + if isinstance(latents, list): + all_shapes = set() + latents = [t.to(dtype) for t in latents] + for latent in latents: + all_shapes.add(latent.shape) + logging.info(f"Latent shapes: {all_shapes}") + if len(all_shapes) > 1: + multi_res = True + else: + multi_res = False + latents = torch.cat(latents, dim=0) + num_images = len(latents) + elif isinstance(latents, torch.Tensor): + latents = latents.to(dtype) + num_images = latents.shape[0] + else: + logging.error(f"Invalid latents type: {type(latents)}") + logging.info(f"Total Images: {num_images}, Total Captions: {len(positive)}") if len(positive) == 1 and num_images > 1: positive = positive * num_images @@ -591,9 +519,7 @@ class TrainLoraNode: shape = m.weight.shape if len(shape) >= 2: alpha = float(existing_weights.get(f"{key}.alpha", 1.0)) - dora_scale = existing_weights.get( - f"{key}.dora_scale", None - ) + dora_scale = existing_weights.get(f"{key}.dora_scale", None) for adapter_cls in adapters: existing_adapter = adapter_cls.load( n, existing_weights, alpha, dora_scale @@ -605,7 +531,9 @@ class TrainLoraNode: adapter_cls = adapter_maps[algorithm] if existing_adapter is not None: - train_adapter = existing_adapter.to_train().to(lora_dtype) + train_adapter = existing_adapter.to_train().to( + lora_dtype + ) else: # Use LoRA with alpha=1.0 by default train_adapter = adapter_cls.create_train( @@ -629,7 +557,9 @@ class TrainLoraNode: if hasattr(m, "bias") and m.bias is not None: key = "{}.bias".format(n) bias = torch.nn.Parameter( - torch.zeros(m.bias.shape, dtype=lora_dtype, requires_grad=True) + torch.zeros( + m.bias.shape, dtype=lora_dtype, requires_grad=True + ) ) bias_module = BiasDiff(bias) lora_sd["{}.diff_b".format(n)] = bias @@ -657,24 +587,31 @@ class TrainLoraNode: # setup models if gradient_checkpointing: - for m in find_all_highest_child_module_with_forward(mp.model.diffusion_model): + for m in find_all_highest_child_module_with_forward( + mp.model.diffusion_model + ): patch(m) mp.model.requires_grad_(False) - comfy.model_management.load_models_gpu([mp], memory_required=1e20, force_full_load=True) + comfy.model_management.load_models_gpu( + [mp], memory_required=1e20, force_full_load=True + ) # Setup sampler and guider like in test script loss_map = {"loss": []} + def loss_callback(loss): loss_map["loss"].append(loss) + train_sampler = TrainSampler( criterion, optimizer, loss_callback=loss_callback, batch_size=batch_size, grad_acc=grad_accumulation_steps, - total_steps=steps*grad_accumulation_steps, + total_steps=steps * grad_accumulation_steps, seed=seed, - training_dtype=dtype + training_dtype=dtype, + real_dataset=latents if multi_res else None, ) guider = comfy_extras.nodes_custom_sampler.Guider_Basic(mp) guider.set_conds(positive) # Set conditioning from input @@ -684,12 +621,15 @@ class TrainLoraNode: # Generate dummy sigmas and noise sigmas = torch.tensor(range(num_images)) noise = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(seed) + if multi_res: + # use first latent as dummy latent if multi_res + latents = latents[0].repeat(num_images, 1, 1, 1) guider.sample( noise.generate_noise({"samples": latents}), latents, train_sampler, sigmas, - seed=noise.seed + seed=noise.seed, ) finally: for m in mp.model.modules(): @@ -702,111 +642,118 @@ class TrainLoraNode: for param in lora_sd: lora_sd[param] = lora_sd[param].to(lora_dtype) - return (mp, lora_sd, loss_map, steps + existing_steps) + return io.NodeOutput(mp, lora_sd, loss_map, steps + existing_steps) -class LoraModelLoader: - def __init__(self): - self.loaded_lora = None +class LoraModelLoader(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LoraModelLoader", + display_name="Load LoRA Model", + category="loaders", + is_experimental=True, + inputs=[ + io.Model.Input( + "model", tooltip="The diffusion model the LoRA will be applied to." + ), + io.Custom("LORA_MODEL").Input( + "lora", tooltip="The LoRA model to apply to the diffusion model." + ), + io.Float.Input( + "strength_model", + default=1.0, + min=-100.0, + max=100.0, + tooltip="How strongly to modify the diffusion model. This value can be negative.", + ), + ], + outputs=[ + io.Model.Output( + display_name="model", tooltip="The modified diffusion model." + ), + ], + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}), - "lora": (IO.LORA_MODEL, {"tooltip": "The LoRA model to apply to the diffusion model."}), - "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}), - } - } - - RETURN_TYPES = ("MODEL",) - OUTPUT_TOOLTIPS = ("The modified diffusion model.",) - FUNCTION = "load_lora_model" - - CATEGORY = "loaders" - DESCRIPTION = "Load Trained LoRA weights from Train LoRA node." - EXPERIMENTAL = True - - def load_lora_model(self, model, lora, strength_model): + def execute(cls, model, lora, strength_model): if strength_model == 0: - return (model, ) + return io.NodeOutput(model) - model_lora, _ = comfy.sd.load_lora_for_models(model, None, lora, strength_model, 0) - return (model_lora, ) + model_lora, _ = comfy.sd.load_lora_for_models( + model, None, lora, strength_model, 0 + ) + return io.NodeOutput(model_lora) -class SaveLoRA: - def __init__(self): - self.output_dir = folder_paths.get_output_directory() +class SaveLoRA(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="SaveLoRA", + display_name="Save LoRA Weights", + category="loaders", + is_experimental=True, + is_output_node=True, + inputs=[ + io.Custom("LORA_MODEL").Input( + "lora", + tooltip="The LoRA model to save. Do not use the model with LoRA layers.", + ), + io.String.Input( + "prefix", + default="loras/ComfyUI_trained_lora", + tooltip="The prefix to use for the saved LoRA file.", + ), + io.Int.Input( + "steps", + optional=True, + tooltip="Optional: The number of steps to LoRA has been trained for, used to name the saved file.", + ), + ], + outputs=[], + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "lora": ( - IO.LORA_MODEL, - { - "tooltip": "The LoRA model to save. Do not use the model with LoRA layers." - }, - ), - "prefix": ( - "STRING", - { - "default": "loras/ComfyUI_trained_lora", - "tooltip": "The prefix to use for the saved LoRA file.", - }, - ), - }, - "optional": { - "steps": ( - IO.INT, - { - "forceInput": True, - "tooltip": "Optional: The number of steps to LoRA has been trained for, used to name the saved file.", - }, - ), - }, - } - - RETURN_TYPES = () - FUNCTION = "save" - CATEGORY = "loaders" - EXPERIMENTAL = True - OUTPUT_NODE = True - - def save(self, lora, prefix, steps=None): - full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(prefix, self.output_dir) + def execute(cls, lora, prefix, steps=None): + output_dir = folder_paths.get_output_directory() + full_output_folder, filename, counter, subfolder, filename_prefix = ( + folder_paths.get_save_image_path(prefix, output_dir) + ) if steps is None: output_checkpoint = f"{filename}_{counter:05}_.safetensors" else: output_checkpoint = f"{filename}_{steps}_steps_{counter:05}_.safetensors" output_checkpoint = os.path.join(full_output_folder, output_checkpoint) safetensors.torch.save_file(lora, output_checkpoint) - return {} + return io.NodeOutput() -class LossGraphNode: - def __init__(self): - self.output_dir = folder_paths.get_temp_directory() +class LossGraphNode(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LossGraphNode", + display_name="Plot Loss Graph", + category="training", + is_experimental=True, + is_output_node=True, + inputs=[ + io.Custom("LOSS_MAP").Input( + "loss", tooltip="Loss map from training node." + ), + io.String.Input( + "filename_prefix", + default="loss_graph", + tooltip="Prefix for the saved loss graph image.", + ), + ], + outputs=[], + hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo], + ) @classmethod - def INPUT_TYPES(s): - return { - "required": { - "loss": (IO.LOSS_MAP, {"default": {}}), - "filename_prefix": (IO.STRING, {"default": "loss_graph"}), - }, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, - } - - RETURN_TYPES = () - FUNCTION = "plot_loss" - OUTPUT_NODE = True - CATEGORY = "training" - EXPERIMENTAL = True - DESCRIPTION = "Plots the loss graph and saves it to the output directory." - - def plot_loss(self, loss, filename_prefix, prompt=None, extra_pnginfo=None): + def execute(cls, loss, filename_prefix, prompt=None, extra_pnginfo=None): loss_values = loss["loss"] width, height = 800, 480 margin = 40 @@ -849,47 +796,27 @@ class LossGraphNode: (margin - 30, height - 10), f"{min_loss:.2f}", font=font, fill="black" ) - metadata = None - if not args.disable_metadata: - metadata = PngInfo() - if prompt is not None: - metadata.add_text("prompt", json.dumps(prompt)) - if extra_pnginfo is not None: - for x in extra_pnginfo: - metadata.add_text(x, json.dumps(extra_pnginfo[x])) + # Convert PIL image to tensor for PreviewImage + img_array = np.array(img).astype(np.float32) / 255.0 + img_tensor = torch.from_numpy(img_array)[None,] # [1, H, W, 3] - date = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") - img.save( - os.path.join(self.output_dir, f"{filename_prefix}_{date}.png"), - pnginfo=metadata, - ) - return { - "ui": { - "images": [ - { - "filename": f"{filename_prefix}_{date}.png", - "subfolder": "", - "type": "temp", - } - ] - } - } + # Return preview UI + return io.NodeOutput(ui=ui.PreviewImage(img_tensor, cls=cls)) -NODE_CLASS_MAPPINGS = { - "TrainLoraNode": TrainLoraNode, - "SaveLoRANode": SaveLoRA, - "LoraModelLoader": LoraModelLoader, - "LoadImageSetFromFolderNode": LoadImageSetFromFolderNode, - "LoadImageTextSetFromFolderNode": LoadImageTextSetFromFolderNode, - "LossGraphNode": LossGraphNode, -} +# ========== Extension Setup ========== -NODE_DISPLAY_NAME_MAPPINGS = { - "TrainLoraNode": "Train LoRA", - "SaveLoRANode": "Save LoRA Weights", - "LoraModelLoader": "Load LoRA Model", - "LoadImageSetFromFolderNode": "Load Image Dataset from Folder", - "LoadImageTextSetFromFolderNode": "Load Image and Text Dataset from Folder", - "LossGraphNode": "Plot Loss Graph", -} + +class TrainingExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + TrainLoraNode, + LoraModelLoader, + SaveLoRA, + LossGraphNode, + ] + + +async def comfy_entrypoint() -> TrainingExtension: + return TrainingExtension() diff --git a/nodes.py b/nodes.py index f4835c02e..bf73eb90e 100644 --- a/nodes.py +++ b/nodes.py @@ -2278,6 +2278,7 @@ async def init_builtin_extra_nodes(): "nodes_images.py", "nodes_video_model.py", "nodes_train.py", + "nodes_dataset.py", "nodes_sag.py", "nodes_perpneg.py", "nodes_stable3d.py", From eaf68c9b5bbfbcdac8988741f3948678c9465c1d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 26 Nov 2025 16:25:32 -0800 Subject: [PATCH 0943/1073] Make lora training work on Z Image and remove some redundant nodes. (#10927) --- comfy/ldm/lumina/model.py | 4 +- comfy_extras/nodes_dataset.py | 102 +--------------------------------- 2 files changed, 3 insertions(+), 103 deletions(-) diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index c8643eb82..565400b54 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -509,7 +509,7 @@ class NextDiT(nn.Module): if self.pad_tokens_multiple is not None: pad_extra = (-cap_feats.shape[1]) % self.pad_tokens_multiple - cap_feats = torch.cat((cap_feats, self.cap_pad_token.to(device=cap_feats.device, dtype=cap_feats.dtype).unsqueeze(0).repeat(cap_feats.shape[0], pad_extra, 1)), dim=1) + cap_feats = torch.cat((cap_feats, self.cap_pad_token.to(device=cap_feats.device, dtype=cap_feats.dtype, copy=True).unsqueeze(0).repeat(cap_feats.shape[0], pad_extra, 1)), dim=1) cap_pos_ids = torch.zeros(bsz, cap_feats.shape[1], 3, dtype=torch.float32, device=device) cap_pos_ids[:, :, 0] = torch.arange(cap_feats.shape[1], dtype=torch.float32, device=device) + 1.0 @@ -525,7 +525,7 @@ class NextDiT(nn.Module): if self.pad_tokens_multiple is not None: pad_extra = (-x.shape[1]) % self.pad_tokens_multiple - x = torch.cat((x, self.x_pad_token.to(device=x.device, dtype=x.dtype).unsqueeze(0).repeat(x.shape[0], pad_extra, 1)), dim=1) + x = torch.cat((x, self.x_pad_token.to(device=x.device, dtype=x.dtype, copy=True).unsqueeze(0).repeat(x.shape[0], pad_extra, 1)), dim=1) x_pos_ids = torch.nn.functional.pad(x_pos_ids, (0, 0, 0, pad_extra)) freqs_cis = self.rope_embedder(torch.cat((cap_pos_ids, x_pos_ids), dim=1)).movedim(1, 2) diff --git a/comfy_extras/nodes_dataset.py b/comfy_extras/nodes_dataset.py index b23867505..4789d7d53 100644 --- a/comfy_extras/nodes_dataset.py +++ b/comfy_extras/nodes_dataset.py @@ -1,6 +1,5 @@ import logging import os -import math import json import numpy as np @@ -624,79 +623,6 @@ class TextProcessingNode(io.ComfyNode): # ========== Image Transform Nodes ========== -class ResizeImagesToSameSizeNode(ImageProcessingNode): - node_id = "ResizeImagesToSameSize" - display_name = "Resize Images to Same Size" - description = "Resize all images to the same width and height." - extra_inputs = [ - io.Int.Input("width", default=512, min=1, max=8192, tooltip="Target width."), - io.Int.Input("height", default=512, min=1, max=8192, tooltip="Target height."), - io.Combo.Input( - "mode", - options=["stretch", "crop_center", "pad"], - default="stretch", - tooltip="Resize mode.", - ), - ] - - @classmethod - def _process(cls, image, width, height, mode): - img = tensor_to_pil(image) - - if mode == "stretch": - img = img.resize((width, height), Image.Resampling.LANCZOS) - elif mode == "crop_center": - left = max(0, (img.width - width) // 2) - top = max(0, (img.height - height) // 2) - right = min(img.width, left + width) - bottom = min(img.height, top + height) - img = img.crop((left, top, right, bottom)) - if img.width != width or img.height != height: - img = img.resize((width, height), Image.Resampling.LANCZOS) - elif mode == "pad": - img.thumbnail((width, height), Image.Resampling.LANCZOS) - new_img = Image.new("RGB", (width, height), (0, 0, 0)) - paste_x = (width - img.width) // 2 - paste_y = (height - img.height) // 2 - new_img.paste(img, (paste_x, paste_y)) - img = new_img - - return pil_to_tensor(img) - - -class ResizeImagesToPixelCountNode(ImageProcessingNode): - node_id = "ResizeImagesToPixelCount" - display_name = "Resize Images to Pixel Count" - description = "Resize images so that the total pixel count matches the specified number while preserving aspect ratio." - extra_inputs = [ - io.Int.Input( - "pixel_count", - default=512 * 512, - min=1, - max=8192 * 8192, - tooltip="Target pixel count.", - ), - io.Int.Input( - "steps", - default=64, - min=1, - max=128, - tooltip="The stepping for resize width/height.", - ), - ] - - @classmethod - def _process(cls, image, pixel_count, steps): - img = tensor_to_pil(image) - w, h = img.size - pixel_count_ratio = math.sqrt(pixel_count / (w * h)) - new_w = int(w * pixel_count_ratio / steps) * steps - new_h = int(h * pixel_count_ratio / steps) * steps - logging.info(f"Resizing from {w}x{h} to {new_w}x{new_h}") - img = img.resize((new_w, new_h), Image.Resampling.LANCZOS) - return pil_to_tensor(img) - - class ResizeImagesByShorterEdgeNode(ImageProcessingNode): node_id = "ResizeImagesByShorterEdge" display_name = "Resize Images by Shorter Edge" @@ -801,29 +727,6 @@ class RandomCropImagesNode(ImageProcessingNode): return pil_to_tensor(img) -class FlipImagesNode(ImageProcessingNode): - node_id = "FlipImages" - display_name = "Flip Images" - description = "Flip all images horizontally or vertically." - extra_inputs = [ - io.Combo.Input( - "direction", - options=["horizontal", "vertical"], - default="horizontal", - tooltip="Flip direction.", - ), - ] - - @classmethod - def _process(cls, image, direction): - img = tensor_to_pil(image) - if direction == "horizontal": - img = img.transpose(Image.FLIP_LEFT_RIGHT) - else: - img = img.transpose(Image.FLIP_TOP_BOTTOM) - return pil_to_tensor(img) - - class NormalizeImagesNode(ImageProcessingNode): node_id = "NormalizeImages" display_name = "Normalize Images" @@ -1470,7 +1373,7 @@ class LoadTrainingDataset(io.ComfyNode): shard_path = os.path.join(dataset_dir, shard_file) with open(shard_path, "rb") as f: - shard_data = torch.load(f) + shard_data = torch.load(f, weights_only=True) all_latents.extend(shard_data["latents"]) all_conditioning.extend(shard_data["conditioning"]) @@ -1496,13 +1399,10 @@ class DatasetExtension(ComfyExtension): SaveImageDataSetToFolderNode, SaveImageTextDataSetToFolderNode, # Image transform nodes - ResizeImagesToSameSizeNode, - ResizeImagesToPixelCountNode, ResizeImagesByShorterEdgeNode, ResizeImagesByLongerEdgeNode, CenterCropImagesNode, RandomCropImagesNode, - FlipImagesNode, NormalizeImagesNode, AdjustBrightnessNode, AdjustContrastNode, From c38e7d6599be1bdce580ccfdbb20b928315af05e Mon Sep 17 00:00:00 2001 From: Haoming <73768377+Haoming02@users.noreply.github.com> Date: Thu, 27 Nov 2025 12:28:44 +0800 Subject: [PATCH 0944/1073] block info (#10841) --- comfy/ldm/flux/model.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index 1a24e6d95..d5674dea6 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -171,7 +171,10 @@ class Flux(nn.Module): pe = None blocks_replace = patches_replace.get("dit", {}) + transformer_options["total_blocks"] = len(self.double_blocks) + transformer_options["block_type"] = "double" for i, block in enumerate(self.double_blocks): + transformer_options["block_index"] = i if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} @@ -215,7 +218,10 @@ class Flux(nn.Module): if self.params.global_modulation: vec, _ = self.single_stream_modulation(vec_orig) + transformer_options["total_blocks"] = len(self.single_blocks) + transformer_options["block_type"] = "single" for i, block in enumerate(self.single_blocks): + transformer_options["block_index"] = i if ("single_block", i) in blocks_replace: def block_wrap(args): out = {} From f17251bec65b5760cfedec29eace7d77f4b35130 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Thu, 27 Nov 2025 16:03:03 +1000 Subject: [PATCH 0945/1073] Account for the VRAM cost of weight offloading (#10733) * mm: default to 0 for NUM_STREAMS Dont count the compute stream as an offload stream. This makes async offload accounting easier. * mm: remove 128MB minimum This is from a previous offloading system requirement. Remove it to make behaviour of the loader and partial unloader consistent. * mp: order the module list by offload expense Calculate an approximate offloading temporary VRAM cost to offload a weight and primary order the module load list by that. In the simple case this is just the same as the module weight, but with Loras, a weight with a lora consumes considerably more VRAM to do the Lora application on-the-fly. This will slightly prioritize lora weights, but is really for proper VRAM offload accounting. * mp: Account for the VRAM cost of weight offloading when checking the VRAM headroom, assume that the weight needs to be offloaded, and only load if it has space for both the load and offload * the number of streams. As the weights are ordered from largest to smallest by offload cost this is guaranteed to fit in VRAM (tm), as all weights that follow will be smaller. Make the partial unload aware of this system as well by saving the budget for offload VRAM to the model state and accounting accordingly. Its possible that partial unload increases the size of the largest offloaded weights, and thus needs to unload a little bit more than asked to accomodate the bigger temp buffers. Honor the existing codes floor on model weight loading of 128MB by having the patcher honor this separately withough regard to offloading. Otherwise when MM specifies its 128MB minimum, MP will see the biggest weights, and budget that 128MB to only offload buffer and load nothing which isnt the intent of these minimums. The same clamp applies in case of partial offload of the currently loading model. --- comfy/model_management.py | 6 ++-- comfy/model_patcher.py | 59 +++++++++++++++++++++++++++++---------- 2 files changed, 48 insertions(+), 17 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index a9327ac80..9c403d580 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -689,7 +689,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu loaded_memory = loaded_model.model_loaded_memory() current_free_mem = get_free_memory(torch_dev) + loaded_memory - lowvram_model_memory = max(128 * 1024 * 1024, (current_free_mem - minimum_memory_required), min(current_free_mem * MIN_WEIGHT_MEMORY_RATIO, current_free_mem - minimum_inference_memory())) + lowvram_model_memory = max(0, (current_free_mem - minimum_memory_required), min(current_free_mem * MIN_WEIGHT_MEMORY_RATIO, current_free_mem - minimum_inference_memory())) lowvram_model_memory = lowvram_model_memory - loaded_memory if lowvram_model_memory == 0: @@ -1012,7 +1012,7 @@ def force_channels_last(): STREAMS = {} -NUM_STREAMS = 1 +NUM_STREAMS = 0 if args.async_offload: NUM_STREAMS = 2 logging.info("Using async weight offloading with {} streams".format(NUM_STREAMS)) @@ -1030,7 +1030,7 @@ def current_stream(device): stream_counters = {} def get_offload_stream(device): stream_counter = stream_counters.get(device, 0) - if NUM_STREAMS <= 1: + if NUM_STREAMS == 0: return None if device in STREAMS: diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 73adc7f70..3eac77275 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -148,6 +148,15 @@ class LowVramPatch: else: return out +#The above patch logic may cast up the weight to fp32, and do math. Go with fp32 x 3 +LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR = 3 + +def low_vram_patch_estimate_vram(model, key): + weight, set_func, convert_func = get_key_weight(model, key) + if weight is None: + return 0 + return weight.numel() * torch.float32.itemsize * LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR + def get_key_weight(model, key): set_func = None convert_func = None @@ -269,6 +278,9 @@ class ModelPatcher: if not hasattr(self.model, 'current_weight_patches_uuid'): self.model.current_weight_patches_uuid = None + if not hasattr(self.model, 'model_offload_buffer_memory'): + self.model.model_offload_buffer_memory = 0 + def model_size(self): if self.size > 0: return self.size @@ -662,7 +674,16 @@ class ModelPatcher: skip = True # skip random weights in non leaf modules break if not skip and (hasattr(m, "comfy_cast_weights") or len(params) > 0): - loading.append((comfy.model_management.module_size(m), n, m, params)) + module_mem = comfy.model_management.module_size(m) + module_offload_mem = module_mem + if hasattr(m, "comfy_cast_weights"): + weight_key = "{}.weight".format(n) + bias_key = "{}.bias".format(n) + if weight_key in self.patches: + module_offload_mem += low_vram_patch_estimate_vram(self.model, weight_key) + if bias_key in self.patches: + module_offload_mem += low_vram_patch_estimate_vram(self.model, bias_key) + loading.append((module_offload_mem, module_mem, n, m, params)) return loading def load(self, device_to=None, lowvram_model_memory=0, force_patch_weights=False, full_load=False): @@ -676,20 +697,22 @@ class ModelPatcher: load_completely = [] offloaded = [] + offload_buffer = 0 loading.sort(reverse=True) for x in loading: - n = x[1] - m = x[2] - params = x[3] - module_mem = x[0] + module_offload_mem, module_mem, n, m, params = x lowvram_weight = False + potential_offload = max(offload_buffer, module_offload_mem * (comfy.model_management.NUM_STREAMS + 1)) + lowvram_fits = mem_counter + module_mem + potential_offload < lowvram_model_memory + weight_key = "{}.weight".format(n) bias_key = "{}.bias".format(n) if not full_load and hasattr(m, "comfy_cast_weights"): - if mem_counter + module_mem >= lowvram_model_memory: + if not lowvram_fits: + offload_buffer = potential_offload lowvram_weight = True lowvram_counter += 1 lowvram_mem_counter += module_mem @@ -723,9 +746,11 @@ class ModelPatcher: if hasattr(m, "comfy_cast_weights"): wipe_lowvram_weight(m) - if full_load or mem_counter + module_mem < lowvram_model_memory: + if full_load or lowvram_fits: mem_counter += module_mem load_completely.append((module_mem, n, m, params)) + else: + offload_buffer = potential_offload if cast_weight and hasattr(m, "comfy_cast_weights"): m.prev_comfy_cast_weights = m.comfy_cast_weights @@ -766,7 +791,7 @@ class ModelPatcher: self.pin_weight_to_device("{}.{}".format(n, param)) if lowvram_counter > 0: - logging.info("loaded partially; {:.2f} MB usable, {:.2f} MB loaded, {:.2f} MB offloaded, lowvram patches: {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), lowvram_mem_counter / (1024 * 1024), patch_counter)) + logging.info("loaded partially; {:.2f} MB usable, {:.2f} MB loaded, {:.2f} MB offloaded, {:.2f} MB buffer reserved, lowvram patches: {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), lowvram_mem_counter / (1024 * 1024), offload_buffer / (1024 * 1024), patch_counter)) self.model.model_lowvram = True else: logging.info("loaded completely; {:.2f} MB usable, {:.2f} MB loaded, full load: {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), full_load)) @@ -778,6 +803,7 @@ class ModelPatcher: self.model.lowvram_patch_counter += patch_counter self.model.device = device_to self.model.model_loaded_weight_memory = mem_counter + self.model.model_offload_buffer_memory = offload_buffer self.model.current_weight_patches_uuid = self.patches_uuid for callback in self.get_all_callbacks(CallbacksMP.ON_LOAD): @@ -831,6 +857,7 @@ class ModelPatcher: self.model.to(device_to) self.model.device = device_to self.model.model_loaded_weight_memory = 0 + self.model.model_offload_buffer_memory = 0 for m in self.model.modules(): if hasattr(m, "comfy_patched_weights"): @@ -849,13 +876,14 @@ class ModelPatcher: patch_counter = 0 unload_list = self._load_list() unload_list.sort() + offload_buffer = self.model.model_offload_buffer_memory + for unload in unload_list: - if memory_to_free < memory_freed: + if memory_to_free + offload_buffer - self.model.model_offload_buffer_memory < memory_freed: break - module_mem = unload[0] - n = unload[1] - m = unload[2] - params = unload[3] + module_offload_mem, module_mem, n, m, params = unload + + potential_offload = (comfy.model_management.NUM_STREAMS + 1) * module_offload_mem lowvram_possible = hasattr(m, "comfy_cast_weights") if hasattr(m, "comfy_patched_weights") and m.comfy_patched_weights == True: @@ -906,15 +934,18 @@ class ModelPatcher: m.comfy_cast_weights = True m.comfy_patched_weights = False memory_freed += module_mem + offload_buffer = max(offload_buffer, potential_offload) logging.debug("freed {}".format(n)) for param in params: self.pin_weight_to_device("{}.{}".format(n, param)) + self.model.model_lowvram = True self.model.lowvram_patch_counter += patch_counter self.model.model_loaded_weight_memory -= memory_freed - logging.info("loaded partially: {:.2f} MB loaded, lowvram patches: {}".format(self.model.model_loaded_weight_memory / (1024 * 1024), self.model.lowvram_patch_counter)) + self.model.model_offload_buffer_memory = offload_buffer + logging.info("Unloaded partially: {:.2f} MB freed, {:.2f} MB remains loaded, {:.2f} MB buffer reserved, lowvram patches: {}".format(memory_freed / (1024 * 1024), self.model.model_loaded_weight_memory / (1024 * 1024), offload_buffer / (1024 * 1024), self.model.lowvram_patch_counter)) return memory_freed def partially_load(self, device_to, extra_memory=0, force_patch_weights=False): From 3f382a4f9884f7b672557028adb9bb85d075820d Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Fri, 28 Nov 2025 02:06:30 +1000 Subject: [PATCH 0946/1073] quant ops: Dequantize weight in-place (#10935) In flux2 these weights are huge (200MB). As plain_tensor is a throw-away deep copy, do this multiplication in-place to save VRAM. --- comfy/quant_ops.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index d2f3e7397..9b924560b 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -425,7 +425,8 @@ class TensorCoreFP8Layout(QuantizedLayout): @staticmethod def dequantize(qdata, scale, orig_dtype, **kwargs): plain_tensor = torch.ops.aten._to_copy.default(qdata, dtype=orig_dtype) - return plain_tensor * scale + plain_tensor.mul_(scale) + return plain_tensor @classmethod def get_plain_tensors(cls, qtensor): From b59750a86a4687056528f1d59470e207063a73a3 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Fri, 28 Nov 2025 06:12:56 +0800 Subject: [PATCH 0947/1073] Update template to 0.7.23 (#10949) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9291552d3..e0b2c566b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.32.9 -comfyui-workflow-templates==0.7.20 +comfyui-workflow-templates==0.7.23 comfyui-embedded-docs==0.3.1 torch torchsde From 9d8a817985bb069685e440b38762f95dc834d242 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 27 Nov 2025 14:46:12 -0800 Subject: [PATCH 0948/1073] Enable async offloading by default on Nvidia. (#10953) Add --disable-async-offload to disable it. If this causes OOMs that go away when you --disable-async-offload please report it. --- comfy/cli_args.py | 3 ++- comfy/model_management.py | 13 +++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index d2b60e347..5f0dfaa10 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -131,7 +131,8 @@ vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for e parser.add_argument("--reserve-vram", type=float, default=None, help="Set the amount of vram in GB you want to reserve for use by your OS/other software. By default some amount is reserved depending on your OS.") -parser.add_argument("--async-offload", action="store_true", help="Use async weight offloading.") +parser.add_argument("--async-offload", nargs='?', const=2, type=int, default=None, metavar="NUM_STREAMS", help="Use async weight offloading. An optional argument controls the amount of offload streams. Default is 2. Enabled by default on Nvidia.") +parser.add_argument("--disable-async-offload", action="store_true", help="Disable async weight offloading.") parser.add_argument("--force-non-blocking", action="store_true", help="Force ComfyUI to use non-blocking operations for all applicable tensors. This may improve performance on some non-Nvidia systems but can cause issues with some workflows.") diff --git a/comfy/model_management.py b/comfy/model_management.py index 9c403d580..38c506df5 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1013,8 +1013,17 @@ def force_channels_last(): STREAMS = {} NUM_STREAMS = 0 -if args.async_offload: - NUM_STREAMS = 2 +if args.async_offload is not None: + NUM_STREAMS = args.async_offload +else: + # Enable by default on Nvidia + if is_nvidia(): + NUM_STREAMS = 2 + +if args.disable_async_offload: + NUM_STREAMS = 0 + +if NUM_STREAMS > 0: logging.info("Using async weight offloading with {} streams".format(NUM_STREAMS)) def current_stream(device): From 52e778fff3c1d6f32c8d14cba9864faddba8475d Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 28 Nov 2025 12:52:59 +0200 Subject: [PATCH 0949/1073] feat(Kling-API-Nodes): add v2-5-turbo model to FirstLastFrame node (#10938) --- comfy_api_nodes/nodes_kling.py | 60 +++++++++++++++------------------- 1 file changed, 26 insertions(+), 34 deletions(-) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 36852038b..23a7f55f1 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -4,8 +4,6 @@ For source of truth on the allowed permutations of request fields, please refere - [Compatibility Table](https://app.klingai.com/global/dev/document-api/apiReference/model/skillsMap) """ -from __future__ import annotations -from typing import Optional, TypeVar import math import logging @@ -66,9 +64,7 @@ from comfy_api_nodes.util import ( poll_op, ) from comfy_api.input_impl import VideoFromFile -from comfy_api.input.basic_types import AudioInput -from comfy_api.input.video_types import VideoInput -from comfy_api.latest import ComfyExtension, IO +from comfy_api.latest import ComfyExtension, IO, Input KLING_API_VERSION = "v1" PATH_TEXT_TO_VIDEO = f"/proxy/kling/{KLING_API_VERSION}/videos/text2video" @@ -94,8 +90,6 @@ AVERAGE_DURATION_IMAGE_GEN = 32 AVERAGE_DURATION_VIDEO_EFFECTS = 320 AVERAGE_DURATION_VIDEO_EXTEND = 320 -R = TypeVar("R") - MODE_TEXT2VIDEO = { "standard mode / 5s duration / kling-v1": ("std", "5", "kling-v1"), @@ -130,6 +124,8 @@ MODE_START_END_FRAME = { "pro mode / 10s duration / kling-v1-6": ("pro", "10", "kling-v1-6"), "pro mode / 5s duration / kling-v2-1": ("pro", "5", "kling-v2-1"), "pro mode / 10s duration / kling-v2-1": ("pro", "10", "kling-v2-1"), + "pro mode / 5s duration / kling-v2-5-turbo": ("pro", "5", "kling-v2-5-turbo"), + "pro mode / 10s duration / kling-v2-5-turbo": ("pro", "10", "kling-v2-5-turbo"), } """ Returns a mapping of mode strings to their corresponding (mode, duration, model_name) tuples. @@ -296,7 +292,7 @@ def get_video_from_response(response) -> KlingVideoResult: return video -def get_video_url_from_response(response) -> Optional[str]: +def get_video_url_from_response(response) -> str | None: """Returns the first video url from the Kling video generation task result. Will not raise an error if the response is not valid. """ @@ -315,7 +311,7 @@ def get_images_from_response(response) -> list[KlingImageResult]: return images -def get_images_urls_from_response(response) -> Optional[str]: +def get_images_urls_from_response(response) -> str | None: """Returns the list of image urls from the Kling image generation task result. Will not raise an error if the response is not valid. If there is only one image, returns the url as a string. If there are multiple images, returns a list of urls. """ @@ -349,7 +345,7 @@ async def execute_text2video( model_mode: str, duration: str, aspect_ratio: str, - camera_control: Optional[KlingCameraControl] = None, + camera_control: KlingCameraControl | None = None, ) -> IO.NodeOutput: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) task_creation_response = await sync_op( @@ -394,8 +390,8 @@ async def execute_image2video( model_mode: str, aspect_ratio: str, duration: str, - camera_control: Optional[KlingCameraControl] = None, - end_frame: Optional[torch.Tensor] = None, + camera_control: KlingCameraControl | None = None, + end_frame: torch.Tensor | None = None, ) -> IO.NodeOutput: validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V) validate_input_image(start_frame) @@ -451,8 +447,8 @@ async def execute_video_effect( model_name: str, duration: KlingVideoGenDuration, image_1: torch.Tensor, - image_2: Optional[torch.Tensor] = None, - model_mode: Optional[KlingVideoGenMode] = None, + image_2: torch.Tensor | None = None, + model_mode: KlingVideoGenMode | None = None, ) -> tuple[VideoFromFile, str, str]: if dual_character: request_input_field = KlingDualCharacterEffectInput( @@ -499,13 +495,13 @@ async def execute_video_effect( async def execute_lipsync( cls: type[IO.ComfyNode], - video: VideoInput, - audio: Optional[AudioInput] = None, - voice_language: Optional[str] = None, - model_mode: Optional[str] = None, - text: Optional[str] = None, - voice_speed: Optional[float] = None, - voice_id: Optional[str] = None, + video: Input.Video, + audio: Input.Audio | None = None, + voice_language: str | None = None, + model_mode: str | None = None, + text: str | None = None, + voice_speed: float | None = None, + voice_id: str | None = None, ) -> IO.NodeOutput: if text: validate_string(text, field_name="Text", max_length=MAX_PROMPT_LENGTH_LIP_SYNC) @@ -787,7 +783,7 @@ class KlingCameraControlT2VNode(IO.ComfyNode): negative_prompt: str, cfg_scale: float, aspect_ratio: str, - camera_control: Optional[KlingCameraControl] = None, + camera_control: KlingCameraControl | None = None, ) -> IO.NodeOutput: return await execute_text2video( cls, @@ -854,8 +850,8 @@ class KlingImage2VideoNode(IO.ComfyNode): mode: str, aspect_ratio: str, duration: str, - camera_control: Optional[KlingCameraControl] = None, - end_frame: Optional[torch.Tensor] = None, + camera_control: KlingCameraControl | None = None, + end_frame: torch.Tensor | None = None, ) -> IO.NodeOutput: return await execute_image2video( cls, @@ -965,15 +961,11 @@ class KlingStartEndFrameNode(IO.ComfyNode): IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), IO.Float.Input("cfg_scale", default=0.5, min=0.0, max=1.0), - IO.Combo.Input( - "aspect_ratio", - options=[i.value for i in KlingVideoGenAspectRatio], - default="16:9", - ), + IO.Combo.Input("aspect_ratio", options=["16:9", "9:16", "1:1"]), IO.Combo.Input( "mode", options=modes, - default=modes[2], + default=modes[8], tooltip="The configuration to use for the video generation following the format: mode / duration / model_name.", ), ], @@ -1254,8 +1246,8 @@ class KlingLipSyncAudioToVideoNode(IO.ComfyNode): @classmethod async def execute( cls, - video: VideoInput, - audio: AudioInput, + video: Input.Video, + audio: Input.Audio, voice_language: str, ) -> IO.NodeOutput: return await execute_lipsync( @@ -1314,7 +1306,7 @@ class KlingLipSyncTextToVideoNode(IO.ComfyNode): @classmethod async def execute( cls, - video: VideoInput, + video: Input.Video, text: str, voice: str, voice_speed: float, @@ -1471,7 +1463,7 @@ class KlingImageGenerationNode(IO.ComfyNode): human_fidelity: float, n: int, aspect_ratio: KlingImageGenAspectRatio, - image: Optional[torch.Tensor] = None, + image: torch.Tensor | None = None, ) -> IO.NodeOutput: validate_string(prompt, field_name="prompt", min_length=1, max_length=MAX_PROMPT_LENGTH_IMAGE_GEN) validate_string(negative_prompt, field_name="negative_prompt", max_length=MAX_PROMPT_LENGTH_IMAGE_GEN) From ca7808f240d4d53e594d3b95753240313864c992 Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" <128333288+ltdrdata@users.noreply.github.com> Date: Sat, 29 Nov 2025 05:43:17 +0900 Subject: [PATCH 0950/1073] fix(user_manager): fix typo in move_userdata dest validation (#10967) Check `dest` instead of `source` when validating destination path in move_userdata endpoint. --- app/user_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/user_manager.py b/app/user_manager.py index a2d376c0c..675f6c0c6 100644 --- a/app/user_manager.py +++ b/app/user_manager.py @@ -424,7 +424,7 @@ class UserManager(): return source dest = get_user_data_path(request, check_exists=False, param="dest") - if not isinstance(source, str): + if not isinstance(dest, str): return dest overwrite = request.query.get("overwrite", 'true') != "false" From f55c98a89f76fc06c435a728bc2e76b6b4051463 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 28 Nov 2025 13:16:46 -0800 Subject: [PATCH 0951/1073] Disable offload stream when torch compile. (#10961) --- comfy/model_management.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 38c506df5..d8ce80010 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1042,6 +1042,9 @@ def get_offload_stream(device): if NUM_STREAMS == 0: return None + if torch.compiler.is_compiling(): + return None + if device in STREAMS: ss = STREAMS[device] #Sync the oldest stream in the queue with the current From 6484ac89dc683b178d9ef3f7406800f7132147ba Mon Sep 17 00:00:00 2001 From: Urle Sistiana <55231606+urlesistiana@users.noreply.github.com> Date: Sat, 29 Nov 2025 05:33:07 +0800 Subject: [PATCH 0952/1073] fix QuantizedTensor.is_contiguous (#10956) (#10959) --- comfy/quant_ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index 9b924560b..bb1fb860c 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -235,8 +235,8 @@ class QuantizedTensor(torch.Tensor): def is_pinned(self): return self._qdata.is_pinned() - def is_contiguous(self): - return self._qdata.is_contiguous() + def is_contiguous(self, *arg, **kwargs): + return self._qdata.is_contiguous(*arg, **kwargs) # ============================================================================== # Generic Utilities (Layout-Agnostic Operations) From 0ff0457892467ef8a6ea235dcd0618c10ca44ee3 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Sat, 29 Nov 2025 07:38:12 +1000 Subject: [PATCH 0953/1073] mm: wrap the raw stream in context manager (#10958) The documentation of torch.foo.Stream being usable with with: suggests it starts at version 2.7. Use the old API for backwards compatibility. --- comfy/model_management.py | 19 +++++++++++++++---- comfy/ops.py | 2 ++ 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index d8ce80010..aeddbaefe 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1055,7 +1055,9 @@ def get_offload_stream(device): elif is_device_cuda(device): ss = [] for k in range(NUM_STREAMS): - ss.append(torch.cuda.Stream(device=device, priority=0)) + s1 = torch.cuda.Stream(device=device, priority=0) + s1.as_context = torch.cuda.stream + ss.append(s1) STREAMS[device] = ss s = ss[stream_counter] stream_counters[device] = stream_counter @@ -1063,7 +1065,9 @@ def get_offload_stream(device): elif is_device_xpu(device): ss = [] for k in range(NUM_STREAMS): - ss.append(torch.xpu.Stream(device=device, priority=0)) + s1 = torch.xpu.Stream(device=device, priority=0) + s1.as_context = torch.xpu.stream + ss.append(s1) STREAMS[device] = ss s = ss[stream_counter] stream_counters[device] = stream_counter @@ -1081,12 +1085,19 @@ def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False, str if dtype is None or weight.dtype == dtype: return weight if stream is not None: - with stream: + wf_context = stream + if hasattr(wf_context, "as_context"): + wf_context = wf_context.as_context(stream) + with wf_context: return weight.to(dtype=dtype, copy=copy) return weight.to(dtype=dtype, copy=copy) + if stream is not None: - with stream: + wf_context = stream + if hasattr(wf_context, "as_context"): + wf_context = wf_context.as_context(stream) + with wf_context: r = torch.empty_like(weight, dtype=dtype, device=device) r.copy_(weight, non_blocking=non_blocking) else: diff --git a/comfy/ops.py b/comfy/ops.py index a0ff4e8f1..61a2f0754 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -95,6 +95,8 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, of if offload_stream is not None: wf_context = offload_stream + if hasattr(wf_context, "as_context"): + wf_context = wf_context.as_context(offload_stream) else: wf_context = contextlib.nullcontext() From 065a2fbbec6af5c8e19a3add29703f83faf672d6 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 28 Nov 2025 16:37:39 -0800 Subject: [PATCH 0954/1073] Update driver link in AMD portable README (#10974) --- .ci/windows_amd_base_files/README_VERY_IMPORTANT.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.ci/windows_amd_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_amd_base_files/README_VERY_IMPORTANT.txt index 96a500be2..2cbb00d99 100755 --- a/.ci/windows_amd_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/windows_amd_base_files/README_VERY_IMPORTANT.txt @@ -1,5 +1,5 @@ -As of the time of writing this you need this preview driver for best results: -https://www.amd.com/en/resources/support-articles/release-notes/RN-AMDGPU-WINDOWS-PYTORCH-PREVIEW.html +As of the time of writing this you need this driver for best results: +https://www.amd.com/en/resources/support-articles/release-notes/RN-AMDGPU-WINDOWS-PYTORCH-7-1-1.html HOW TO RUN: @@ -25,3 +25,4 @@ In the ComfyUI directory you will find a file: extra_model_paths.yaml.example Rename this file to: extra_model_paths.yaml and edit it with your favorite text editor. + From b9070857092a78cc952d70025fdcc0ff540d96ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Sat, 29 Nov 2025 02:40:19 +0200 Subject: [PATCH 0955/1073] Support video tiny VAEs (#10884) * Support video tiny VAEs * lighttaew scaling fix * Also support video taes in previews Only first frame for now as live preview playback is currently only available through VHS custom nodes. * Support Wan 2.1 lightVAE * Relocate elif block and set Wan VAE dim directly without using pruning rate for lightvae --- comfy/latent_formats.py | 5 +- comfy/sd.py | 34 +++++++- comfy/taesd/taehv.py | 171 ++++++++++++++++++++++++++++++++++++++++ latent_preview.py | 28 +++++-- nodes.py | 18 ++++- 5 files changed, 244 insertions(+), 12 deletions(-) create mode 100644 comfy/taesd/taehv.py diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index 8e110f45d..f1ca0151e 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -431,6 +431,7 @@ class HunyuanVideo(LatentFormat): ] latent_rgb_factors_bias = [ 0.0259, -0.0192, -0.0761] + taesd_decoder_name = "taehv" class Cosmos1CV8x8x8(LatentFormat): latent_channels = 16 @@ -494,7 +495,7 @@ class Wan21(LatentFormat): ]).view(1, self.latent_channels, 1, 1, 1) - self.taesd_decoder_name = None #TODO + self.taesd_decoder_name = "lighttaew2_1" def process_in(self, latent): latents_mean = self.latents_mean.to(latent.device, latent.dtype) @@ -565,6 +566,7 @@ class Wan22(Wan21): def __init__(self): self.scale_factor = 1.0 + self.taesd_decoder_name = "lighttaew2_2" self.latents_mean = torch.tensor([ -0.2289, -0.0052, -0.1323, -0.2339, -0.2799, 0.0174, 0.1838, 0.1557, -0.1382, 0.0542, 0.2813, 0.0891, 0.1570, -0.0098, 0.0375, -0.1825, @@ -719,6 +721,7 @@ class HunyuanVideo15(LatentFormat): latent_channels = 32 latent_dimensions = 3 scale_factor = 1.03682 + taesd_decoder_name = "lighttaehy1_5" class Hunyuan3Dv2(LatentFormat): latent_channels = 64 diff --git a/comfy/sd.py b/comfy/sd.py index 350fae92b..9eeb0c45a 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -60,6 +60,8 @@ import comfy.lora_convert import comfy.hooks import comfy.t2i_adapter.adapter import comfy.taesd.taesd +import comfy.taesd.taehv +import comfy.latent_formats import comfy.ldm.flux.redux @@ -508,13 +510,14 @@ class VAE: self.memory_used_encode = lambda shape, dtype: 3300 * shape[3] * shape[4] * model_management.dtype_size(dtype) self.memory_used_decode = lambda shape, dtype: 8000 * shape[3] * shape[4] * (16 * 16) * model_management.dtype_size(dtype) else: # Wan 2.1 VAE + dim = sd["decoder.head.0.gamma"].shape[0] self.upscale_ratio = (lambda a: max(0, a * 4 - 3), 8, 8) self.upscale_index_formula = (4, 8, 8) self.downscale_ratio = (lambda a: max(0, math.floor((a + 3) / 4)), 8, 8) self.downscale_index_formula = (4, 8, 8) self.latent_dim = 3 self.latent_channels = 16 - ddconfig = {"dim": 96, "z_dim": self.latent_channels, "dim_mult": [1, 2, 4, 4], "num_res_blocks": 2, "attn_scales": [], "temperal_downsample": [False, True, True], "dropout": 0.0} + ddconfig = {"dim": dim, "z_dim": self.latent_channels, "dim_mult": [1, 2, 4, 4], "num_res_blocks": 2, "attn_scales": [], "temperal_downsample": [False, True, True], "dropout": 0.0} self.first_stage_model = comfy.ldm.wan.vae.WanVAE(**ddconfig) self.working_dtypes = [torch.bfloat16, torch.float16, torch.float32] self.memory_used_encode = lambda shape, dtype: 6000 * shape[3] * shape[4] * model_management.dtype_size(dtype) @@ -584,6 +587,35 @@ class VAE: self.process_input = lambda audio: audio self.working_dtypes = [torch.float32] self.crop_input = False + elif "decoder.22.bias" in sd: # taehv, taew and lighttae + self.latent_channels = sd["decoder.1.weight"].shape[1] + self.latent_dim = 3 + self.upscale_ratio = (lambda a: max(0, a * 4 - 3), 16, 16) + self.upscale_index_formula = (4, 16, 16) + self.downscale_ratio = (lambda a: max(0, math.floor((a + 3) / 4)), 16, 16) + self.downscale_index_formula = (4, 16, 16) + if self.latent_channels == 48: # Wan 2.2 + self.first_stage_model = comfy.taesd.taehv.TAEHV(latent_channels=self.latent_channels, latent_format=None) # taehv doesn't need scaling + self.process_input = lambda image: (_ for _ in ()).throw(NotImplementedError("This light tae doesn't support encoding currently")) + self.process_output = lambda image: image + self.memory_used_decode = lambda shape, dtype: (1800 * (max(1, (shape[-3] ** 0.7 * 0.1)) * shape[-2] * shape[-1] * 16 * 16) * model_management.dtype_size(dtype)) + elif self.latent_channels == 32 and sd["decoder.22.bias"].shape[0] == 12: # lighttae_hv15 + self.first_stage_model = comfy.taesd.taehv.TAEHV(latent_channels=self.latent_channels, latent_format=comfy.latent_formats.HunyuanVideo15) + self.process_input = lambda image: (_ for _ in ()).throw(NotImplementedError("This light tae doesn't support encoding currently")) + self.memory_used_decode = lambda shape, dtype: (1200 * (max(1, (shape[-3] ** 0.7 * 0.05)) * shape[-2] * shape[-1] * 32 * 32) * model_management.dtype_size(dtype)) + else: + if sd["decoder.1.weight"].dtype == torch.float16: # taehv currently only available in float16, so assume it's not lighttaew2_1 as otherwise state dicts are identical + latent_format=comfy.latent_formats.HunyuanVideo + else: + latent_format=None # lighttaew2_1 doesn't need scaling + self.first_stage_model = comfy.taesd.taehv.TAEHV(latent_channels=self.latent_channels, latent_format=latent_format) + self.process_input = self.process_output = lambda image: image + self.upscale_ratio = (lambda a: max(0, a * 4 - 3), 8, 8) + self.upscale_index_formula = (4, 8, 8) + self.downscale_ratio = (lambda a: max(0, math.floor((a + 3) / 4)), 8, 8) + self.downscale_index_formula = (4, 8, 8) + self.memory_used_encode = lambda shape, dtype: (700 * (max(1, (shape[-3] ** 0.66 * 0.11)) * shape[-2] * shape[-1]) * model_management.dtype_size(dtype)) + self.memory_used_decode = lambda shape, dtype: (50 * (max(1, (shape[-3] ** 0.65 * 0.26)) * shape[-2] * shape[-1] * 32 * 32) * model_management.dtype_size(dtype)) else: logging.warning("WARNING: No VAE weights detected, VAE not initalized.") self.first_stage_model = None diff --git a/comfy/taesd/taehv.py b/comfy/taesd/taehv.py new file mode 100644 index 000000000..3dfe1e4d4 --- /dev/null +++ b/comfy/taesd/taehv.py @@ -0,0 +1,171 @@ +# Tiny AutoEncoder for HunyuanVideo and WanVideo https://github.com/madebyollin/taehv + +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm.auto import tqdm +from collections import namedtuple, deque + +import comfy.ops +operations=comfy.ops.disable_weight_init + +DecoderResult = namedtuple("DecoderResult", ("frame", "memory")) +TWorkItem = namedtuple("TWorkItem", ("input_tensor", "block_index")) + +def conv(n_in, n_out, **kwargs): + return operations.Conv2d(n_in, n_out, 3, padding=1, **kwargs) + +class Clamp(nn.Module): + def forward(self, x): + return torch.tanh(x / 3) * 3 + +class MemBlock(nn.Module): + def __init__(self, n_in, n_out, act_func): + super().__init__() + self.conv = nn.Sequential(conv(n_in * 2, n_out), act_func, conv(n_out, n_out), act_func, conv(n_out, n_out)) + self.skip = operations.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity() + self.act = act_func + def forward(self, x, past): + return self.act(self.conv(torch.cat([x, past], 1)) + self.skip(x)) + +class TPool(nn.Module): + def __init__(self, n_f, stride): + super().__init__() + self.stride = stride + self.conv = operations.Conv2d(n_f*stride,n_f, 1, bias=False) + def forward(self, x): + _NT, C, H, W = x.shape + return self.conv(x.reshape(-1, self.stride * C, H, W)) + +class TGrow(nn.Module): + def __init__(self, n_f, stride): + super().__init__() + self.stride = stride + self.conv = operations.Conv2d(n_f, n_f*stride, 1, bias=False) + def forward(self, x): + _NT, C, H, W = x.shape + x = self.conv(x) + return x.reshape(-1, C, H, W) + +def apply_model_with_memblocks(model, x, parallel, show_progress_bar): + + B, T, C, H, W = x.shape + if parallel: + x = x.reshape(B*T, C, H, W) + # parallel over input timesteps, iterate over blocks + for b in tqdm(model, disable=not show_progress_bar): + if isinstance(b, MemBlock): + BT, C, H, W = x.shape + T = BT // B + _x = x.reshape(B, T, C, H, W) + mem = F.pad(_x, (0,0,0,0,0,0,1,0), value=0)[:,:T].reshape(x.shape) + x = b(x, mem) + else: + x = b(x) + BT, C, H, W = x.shape + T = BT // B + x = x.view(B, T, C, H, W) + else: + out = [] + work_queue = deque([TWorkItem(xt, 0) for t, xt in enumerate(x.reshape(B, T * C, H, W).chunk(T, dim=1))]) + progress_bar = tqdm(range(T), disable=not show_progress_bar) + mem = [None] * len(model) + while work_queue: + xt, i = work_queue.popleft() + if i == 0: + progress_bar.update(1) + if i == len(model): + out.append(xt) + del xt + else: + b = model[i] + if isinstance(b, MemBlock): + if mem[i] is None: + xt_new = b(xt, xt * 0) + mem[i] = xt.detach().clone() + else: + xt_new = b(xt, mem[i]) + mem[i] = xt.detach().clone() + del xt + work_queue.appendleft(TWorkItem(xt_new, i+1)) + elif isinstance(b, TPool): + if mem[i] is None: + mem[i] = [] + mem[i].append(xt.detach().clone()) + if len(mem[i]) == b.stride: + B, C, H, W = xt.shape + xt = b(torch.cat(mem[i], 1).view(B*b.stride, C, H, W)) + mem[i] = [] + work_queue.appendleft(TWorkItem(xt, i+1)) + elif isinstance(b, TGrow): + xt = b(xt) + NT, C, H, W = xt.shape + for xt_next in reversed(xt.view(B, b.stride*C, H, W).chunk(b.stride, 1)): + work_queue.appendleft(TWorkItem(xt_next, i+1)) + del xt + else: + xt = b(xt) + work_queue.appendleft(TWorkItem(xt, i+1)) + progress_bar.close() + x = torch.stack(out, 1) + return x + + +class TAEHV(nn.Module): + def __init__(self, latent_channels, parallel=False, decoder_time_upscale=(True, True), decoder_space_upscale=(True, True, True), latent_format=None, show_progress_bar=True): + super().__init__() + self.image_channels = 3 + self.patch_size = 1 + self.latent_channels = latent_channels + self.parallel = parallel + self.latent_format = latent_format + self.show_progress_bar = show_progress_bar + self.process_in = latent_format().process_in if latent_format is not None else (lambda x: x) + self.process_out = latent_format().process_out if latent_format is not None else (lambda x: x) + if self.latent_channels in [48, 32]: # Wan 2.2 and HunyuanVideo1.5 + self.patch_size = 2 + if self.latent_channels == 32: # HunyuanVideo1.5 + act_func = nn.LeakyReLU(0.2, inplace=True) + else: # HunyuanVideo, Wan 2.1 + act_func = nn.ReLU(inplace=True) + + self.encoder = nn.Sequential( + conv(self.image_channels*self.patch_size**2, 64), act_func, + TPool(64, 2), conv(64, 64, stride=2, bias=False), MemBlock(64, 64, act_func), MemBlock(64, 64, act_func), MemBlock(64, 64, act_func), + TPool(64, 2), conv(64, 64, stride=2, bias=False), MemBlock(64, 64, act_func), MemBlock(64, 64, act_func), MemBlock(64, 64, act_func), + TPool(64, 1), conv(64, 64, stride=2, bias=False), MemBlock(64, 64, act_func), MemBlock(64, 64, act_func), MemBlock(64, 64, act_func), + conv(64, self.latent_channels), + ) + n_f = [256, 128, 64, 64] + self.frames_to_trim = 2**sum(decoder_time_upscale) - 1 + self.decoder = nn.Sequential( + Clamp(), conv(self.latent_channels, n_f[0]), act_func, + MemBlock(n_f[0], n_f[0], act_func), MemBlock(n_f[0], n_f[0], act_func), MemBlock(n_f[0], n_f[0], act_func), nn.Upsample(scale_factor=2 if decoder_space_upscale[0] else 1), TGrow(n_f[0], 1), conv(n_f[0], n_f[1], bias=False), + MemBlock(n_f[1], n_f[1], act_func), MemBlock(n_f[1], n_f[1], act_func), MemBlock(n_f[1], n_f[1], act_func), nn.Upsample(scale_factor=2 if decoder_space_upscale[1] else 1), TGrow(n_f[1], 2 if decoder_time_upscale[0] else 1), conv(n_f[1], n_f[2], bias=False), + MemBlock(n_f[2], n_f[2], act_func), MemBlock(n_f[2], n_f[2], act_func), MemBlock(n_f[2], n_f[2], act_func), nn.Upsample(scale_factor=2 if decoder_space_upscale[2] else 1), TGrow(n_f[2], 2 if decoder_time_upscale[1] else 1), conv(n_f[2], n_f[3], bias=False), + act_func, conv(n_f[3], self.image_channels*self.patch_size**2), + ) + @property + def show_progress_bar(self): + return self._show_progress_bar + + @show_progress_bar.setter + def show_progress_bar(self, value): + self._show_progress_bar = value + + def encode(self, x, **kwargs): + if self.patch_size > 1: x = F.pixel_unshuffle(x, self.patch_size) + x = x.movedim(2, 1) # [B, C, T, H, W] -> [B, T, C, H, W] + if x.shape[1] % 4 != 0: + # pad at end to multiple of 4 + n_pad = 4 - x.shape[1] % 4 + padding = x[:, -1:].repeat_interleave(n_pad, dim=1) + x = torch.cat([x, padding], 1) + x = apply_model_with_memblocks(self.encoder, x, self.parallel, self.show_progress_bar).movedim(2, 1) + return self.process_out(x) + + def decode(self, x, **kwargs): + x = self.process_in(x).movedim(2, 1) # [B, C, T, H, W] -> [B, T, C, H, W] + x = apply_model_with_memblocks(self.decoder, x, self.parallel, self.show_progress_bar) + if self.patch_size > 1: x = F.pixel_shuffle(x, self.patch_size) + return x[:, self.frames_to_trim:].movedim(2, 1) diff --git a/latent_preview.py b/latent_preview.py index ddf6dcf49..66bded4b9 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -2,17 +2,24 @@ import torch from PIL import Image from comfy.cli_args import args, LatentPreviewMethod from comfy.taesd.taesd import TAESD +from comfy.sd import VAE import comfy.model_management import folder_paths import comfy.utils import logging MAX_PREVIEW_RESOLUTION = args.preview_size +VIDEO_TAES = ["taehv", "lighttaew2_2", "lighttaew2_1", "lighttaehy1_5"] -def preview_to_image(latent_image): - latents_ubyte = (((latent_image + 1.0) / 2.0).clamp(0, 1) # change scale from -1..1 to 0..1 - .mul(0xFF) # to 0..255 - ) +def preview_to_image(latent_image, do_scale=True): + if do_scale: + latents_ubyte = (((latent_image + 1.0) / 2.0).clamp(0, 1) # change scale from -1..1 to 0..1 + .mul(0xFF) # to 0..255 + ) + else: + latents_ubyte = (latent_image.clamp(0, 1) + .mul(0xFF) # to 0..255 + ) if comfy.model_management.directml_enabled: latents_ubyte = latents_ubyte.to(dtype=torch.uint8) latents_ubyte = latents_ubyte.to(device="cpu", dtype=torch.uint8, non_blocking=comfy.model_management.device_supports_non_blocking(latent_image.device)) @@ -35,6 +42,10 @@ class TAESDPreviewerImpl(LatentPreviewer): x_sample = self.taesd.decode(x0[:1])[0].movedim(0, 2) return preview_to_image(x_sample) +class TAEHVPreviewerImpl(TAESDPreviewerImpl): + def decode_latent_to_preview(self, x0): + x_sample = self.taesd.decode(x0[:1, :, :1])[0][0] + return preview_to_image(x_sample, do_scale=False) class Latent2RGBPreviewer(LatentPreviewer): def __init__(self, latent_rgb_factors, latent_rgb_factors_bias=None, latent_rgb_factors_reshape=None): @@ -81,8 +92,13 @@ def get_previewer(device, latent_format): if method == LatentPreviewMethod.TAESD: if taesd_decoder_path: - taesd = TAESD(None, taesd_decoder_path, latent_channels=latent_format.latent_channels).to(device) - previewer = TAESDPreviewerImpl(taesd) + if latent_format.taesd_decoder_name in VIDEO_TAES: + taesd = VAE(comfy.utils.load_torch_file(taesd_decoder_path)) + taesd.first_stage_model.show_progress_bar = False + previewer = TAEHVPreviewerImpl(taesd) + else: + taesd = TAESD(None, taesd_decoder_path, latent_channels=latent_format.latent_channels).to(device) + previewer = TAESDPreviewerImpl(taesd) else: logging.warning("Warning: TAESD previews enabled, but could not find models/vae_approx/{}".format(latent_format.taesd_decoder_name)) diff --git a/nodes.py b/nodes.py index bf73eb90e..495dec806 100644 --- a/nodes.py +++ b/nodes.py @@ -692,8 +692,10 @@ class LoraLoaderModelOnly(LoraLoader): return (self.load_lora(model, None, lora_name, strength_model, 0)[0],) class VAELoader: + video_taes = ["taehv", "lighttaew2_2", "lighttaew2_1", "lighttaehy1_5"] + image_taes = ["taesd", "taesdxl", "taesd3", "taef1"] @staticmethod - def vae_list(): + def vae_list(s): vaes = folder_paths.get_filename_list("vae") approx_vaes = folder_paths.get_filename_list("vae_approx") sdxl_taesd_enc = False @@ -722,6 +724,11 @@ class VAELoader: f1_taesd_dec = True elif v.startswith("taef1_decoder."): f1_taesd_enc = True + else: + for tae in s.video_taes: + if v.startswith(tae): + vaes.append(v) + if sd1_taesd_dec and sd1_taesd_enc: vaes.append("taesd") if sdxl_taesd_dec and sdxl_taesd_enc: @@ -765,7 +772,7 @@ class VAELoader: @classmethod def INPUT_TYPES(s): - return {"required": { "vae_name": (s.vae_list(), )}} + return {"required": { "vae_name": (s.vae_list(s), )}} RETURN_TYPES = ("VAE",) FUNCTION = "load_vae" @@ -776,10 +783,13 @@ class VAELoader: if vae_name == "pixel_space": sd = {} sd["pixel_space_vae"] = torch.tensor(1.0) - elif vae_name in ["taesd", "taesdxl", "taesd3", "taef1"]: + elif vae_name in self.image_taes: sd = self.load_taesd(vae_name) else: - vae_path = folder_paths.get_full_path_or_raise("vae", vae_name) + if os.path.splitext(vae_name)[0] in self.video_taes: + vae_path = folder_paths.get_full_path_or_raise("vae_approx", vae_name) + else: + vae_path = folder_paths.get_full_path_or_raise("vae", vae_name) sd = comfy.utils.load_torch_file(vae_path) vae = comfy.sd.VAE(sd=sd) vae.throw_exception_if_invalid() From 52a32e2b323b90295ab05a8c299590c890f2ecb6 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 28 Nov 2025 18:12:42 -0800 Subject: [PATCH 0956/1073] Support some z image lora formats. (#10978) --- comfy/lora.py | 8 ++++++++ comfy/utils.py | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/comfy/lora.py b/comfy/lora.py index 36d26293a..360cd128f 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -313,6 +313,14 @@ def model_lora_keys_unet(model, key_map={}): key_map["transformer.{}".format(key_lora)] = k key_map["lycoris_{}".format(key_lora.replace(".", "_"))] = k #SimpleTuner lycoris format + if isinstance(model, comfy.model_base.Lumina2): + diffusers_keys = comfy.utils.z_image_to_diffusers(model.model_config.unet_config, output_prefix="diffusion_model.") + for k in diffusers_keys: + to = diffusers_keys[k] + key_lora = k[:-len(".weight")] + key_map["diffusion_model.{}".format(key_lora)] = to + key_map["lycoris_{}".format(key_lora.replace(".", "_"))] = to + return key_map diff --git a/comfy/utils.py b/comfy/utils.py index 4bd281057..21bd6e8cf 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -675,6 +675,51 @@ def flux_to_diffusers(mmdit_config, output_prefix=""): return key_map +def z_image_to_diffusers(mmdit_config, output_prefix=""): + n_layers = mmdit_config.get("n_layers", 0) + hidden_size = mmdit_config.get("dim", 0) + + key_map = {} + + for index in range(n_layers): + prefix_from = "layers.{}".format(index) + prefix_to = "{}layers.{}".format(output_prefix, index) + + for end in ("weight", "bias"): + k = "{}.attention.".format(prefix_from) + qkv = "{}.attention.qkv.{}".format(prefix_to, end) + + key_map["{}to_q.{}".format(k, end)] = (qkv, (0, 0, hidden_size)) + key_map["{}to_k.{}".format(k, end)] = (qkv, (0, hidden_size, hidden_size)) + key_map["{}to_v.{}".format(k, end)] = (qkv, (0, hidden_size * 2, hidden_size)) + + block_map = { + "attention.norm_q.weight": "attention.q_norm.weight", + "attention.norm_k.weight": "attention.k_norm.weight", + "attention.to_out.0.weight": "attention.out.weight", + "attention.to_out.0.bias": "attention.out.bias", + } + + for k in block_map: + key_map["{}.{}".format(prefix_from, k)] = "{}.{}".format(prefix_to, block_map[k]) + + MAP_BASIC = { + # Final layer + ("final_layer.linear.weight", "all_final_layer.2-1.linear.weight"), + ("final_layer.linear.bias", "all_final_layer.2-1.linear.bias"), + ("final_layer.adaLN_modulation.1.weight", "all_final_layer.2-1.adaLN_modulation.1.weight"), + ("final_layer.adaLN_modulation.1.bias", "all_final_layer.2-1.adaLN_modulation.1.bias"), + # X embedder + ("x_embedder.weight", "all_x_embedder.2-1.weight"), + ("x_embedder.bias", "all_x_embedder.2-1.bias"), + } + + for k in MAP_BASIC: + key_map[k[1]] = "{}{}".format(output_prefix, k[0]) + + return key_map + + def repeat_to_batch_size(tensor, batch_size, dim=0): if tensor.shape[dim] > batch_size: return tensor.narrow(dim, 0, batch_size) From af96d9812de3e420abd43275d9a5960535b6333c Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" <128333288+ltdrdata@users.noreply.github.com> Date: Sat, 29 Nov 2025 11:28:42 +0900 Subject: [PATCH 0957/1073] feat(security): add System User protection with `__` prefix (#10966) * feat(security): add System User protection with `__` prefix Add protected namespace for custom nodes to store sensitive data (API keys, licenses) that cannot be accessed via HTTP endpoints. Key changes: - New API: get_system_user_directory() for internal access - New API: get_public_user_directory() with structural blocking - 3-layer defense: header validation, path blocking, creation prevention - 54 tests covering security, edge cases, and backward compatibility System Users use `__` prefix (e.g., __system, __cache) following Python's private member convention. They exist in user_directory/ but are completely blocked from /userdata HTTP endpoints. * style: remove unused imports --- app/user_manager.py | 21 +- folder_paths.py | 65 +++ .../app_test/user_manager_system_user_test.py | 193 +++++++++ .../folder_paths_test/system_user_test.py | 206 ++++++++++ .../system_user_endpoint_test.py | 375 ++++++++++++++++++ 5 files changed, 855 insertions(+), 5 deletions(-) create mode 100644 tests-unit/app_test/user_manager_system_user_test.py create mode 100644 tests-unit/folder_paths_test/system_user_test.py create mode 100644 tests-unit/prompt_server_test/system_user_endpoint_test.py diff --git a/app/user_manager.py b/app/user_manager.py index 675f6c0c6..e2c00dab2 100644 --- a/app/user_manager.py +++ b/app/user_manager.py @@ -59,6 +59,9 @@ class UserManager(): user = "default" if args.multi_user and "comfy-user" in request.headers: user = request.headers["comfy-user"] + # Block System Users (use same error message to prevent probing) + if user.startswith(folder_paths.SYSTEM_USER_PREFIX): + raise KeyError("Unknown user: " + user) if user not in self.users: raise KeyError("Unknown user: " + user) @@ -66,15 +69,16 @@ class UserManager(): return user def get_request_user_filepath(self, request, file, type="userdata", create_dir=True): - user_directory = folder_paths.get_user_directory() - if type == "userdata": - root_dir = user_directory + root_dir = folder_paths.get_user_directory() else: raise KeyError("Unknown filepath type:" + type) user = self.get_request_user_id(request) - path = user_root = os.path.abspath(os.path.join(root_dir, user)) + user_root = folder_paths.get_public_user_directory(user) + if user_root is None: + return None + path = user_root # prevent leaving /{type} if os.path.commonpath((root_dir, user_root)) != root_dir: @@ -101,7 +105,11 @@ class UserManager(): name = name.strip() if not name: raise ValueError("username not provided") + if name.startswith(folder_paths.SYSTEM_USER_PREFIX): + raise ValueError("System User prefix not allowed") user_id = re.sub("[^a-zA-Z0-9-_]+", '-', name) + if user_id.startswith(folder_paths.SYSTEM_USER_PREFIX): + raise ValueError("System User prefix not allowed") user_id = user_id + "_" + str(uuid.uuid4()) self.users[user_id] = name @@ -132,7 +140,10 @@ class UserManager(): if username in self.users.values(): return web.json_response({"error": "Duplicate username."}, status=400) - user_id = self.add_user(username) + try: + user_id = self.add_user(username) + except ValueError as e: + return web.json_response({"error": str(e)}, status=400) return web.json_response(user_id) @routes.get("/userdata") diff --git a/folder_paths.py b/folder_paths.py index ffdc4d020..9c96540e3 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -137,6 +137,71 @@ def set_user_directory(user_dir: str) -> None: user_directory = user_dir +# System User Protection - Protects system directories from HTTP endpoint access +# System Users are internal-only users that cannot be accessed via HTTP endpoints. +# They use the '__' prefix convention (similar to Python's private member convention). +SYSTEM_USER_PREFIX = "__" + + +def get_system_user_directory(name: str = "system") -> str: + """ + Get the path to a System User directory. + + System User directories (prefixed with '__') are only accessible via internal API, + not through HTTP endpoints. Use this for storing system-internal data that + should not be exposed to users. + + Args: + name: System user name (e.g., "system", "cache"). Must be alphanumeric + with underscores allowed, but cannot start with underscore. + + Returns: + Absolute path to the system user directory. + + Raises: + ValueError: If name is empty, invalid, or starts with underscore. + + Example: + >>> get_system_user_directory("cache") + '/path/to/user/__cache' + """ + if not name or not isinstance(name, str): + raise ValueError("System user name cannot be empty") + if not name.replace("_", "").isalnum(): + raise ValueError(f"Invalid system user name: '{name}'") + if name.startswith("_"): + raise ValueError("System user name should not start with underscore") + return os.path.join(get_user_directory(), f"{SYSTEM_USER_PREFIX}{name}") + + +def get_public_user_directory(user_id: str) -> str | None: + """ + Get the path to a Public User directory for HTTP endpoint access. + + This function provides structural security by returning None for any + System User (prefixed with '__'). All HTTP endpoints should use this + function instead of directly constructing user paths. + + Args: + user_id: User identifier from HTTP request. + + Returns: + Absolute path to the user directory, or None if user_id is invalid + or refers to a System User. + + Example: + >>> get_public_user_directory("default") + '/path/to/user/default' + >>> get_public_user_directory("__system") + None + """ + if not user_id or not isinstance(user_id, str): + return None + if user_id.startswith(SYSTEM_USER_PREFIX): + return None + return os.path.join(get_user_directory(), user_id) + + #NOTE: used in http server so don't put folders that should not be accessed remotely def get_directory_by_type(type_name: str) -> str | None: if type_name == "output": diff --git a/tests-unit/app_test/user_manager_system_user_test.py b/tests-unit/app_test/user_manager_system_user_test.py new file mode 100644 index 000000000..63b1ac5e5 --- /dev/null +++ b/tests-unit/app_test/user_manager_system_user_test.py @@ -0,0 +1,193 @@ +"""Tests for System User Protection in user_manager.py + +Tests cover: +- get_request_user_id(): 1st defense layer - blocks System Users from HTTP headers +- get_request_user_filepath(): 2nd defense layer - structural blocking via get_public_user_directory() +- add_user(): 3rd defense layer - prevents creation of System User names +- Defense layers integration tests +""" + +import pytest +from unittest.mock import MagicMock, patch +import tempfile + +import folder_paths +from app.user_manager import UserManager + + +@pytest.fixture +def mock_user_directory(): + """Create a temporary user directory.""" + with tempfile.TemporaryDirectory() as temp_dir: + original_dir = folder_paths.get_user_directory() + folder_paths.set_user_directory(temp_dir) + yield temp_dir + folder_paths.set_user_directory(original_dir) + + +@pytest.fixture +def user_manager(mock_user_directory): + """Create a UserManager instance for testing.""" + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + manager = UserManager() + # Add a default user for testing + manager.users = {"default": "default", "test_user_123": "Test User"} + yield manager + + +@pytest.fixture +def mock_request(): + """Create a mock request object.""" + request = MagicMock() + request.headers = {} + return request + + +class TestGetRequestUserId: + """Tests for get_request_user_id() - 1st defense layer. + + Verifies: + - System Users (__ prefix) in HTTP header are rejected with KeyError + - Public Users pass through successfully + """ + + def test_system_user_raises_error(self, user_manager, mock_request): + """Test System User in header raises KeyError.""" + mock_request.headers = {"comfy-user": "__system"} + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + with pytest.raises(KeyError, match="Unknown user"): + user_manager.get_request_user_id(mock_request) + + def test_system_user_cache_raises_error(self, user_manager, mock_request): + """Test System User cache raises KeyError.""" + mock_request.headers = {"comfy-user": "__cache"} + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + with pytest.raises(KeyError, match="Unknown user"): + user_manager.get_request_user_id(mock_request) + + def test_normal_user_works(self, user_manager, mock_request): + """Test normal user access works.""" + mock_request.headers = {"comfy-user": "default"} + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + user_id = user_manager.get_request_user_id(mock_request) + assert user_id == "default" + + def test_unknown_user_raises_error(self, user_manager, mock_request): + """Test unknown user raises KeyError.""" + mock_request.headers = {"comfy-user": "unknown_user"} + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + with pytest.raises(KeyError, match="Unknown user"): + user_manager.get_request_user_id(mock_request) + + +class TestGetRequestUserFilepath: + """Tests for get_request_user_filepath() - 2nd defense layer. + + Verifies: + - Returns None when get_public_user_directory() returns None (System User) + - Acts as backup defense if 1st layer is bypassed + """ + + def test_system_user_returns_none(self, user_manager, mock_request, mock_user_directory): + """Test System User returns None (structural blocking).""" + # First, we need to mock get_request_user_id to return System User + # But actually, get_request_user_id will raise KeyError first + # So we test via get_public_user_directory returning None + mock_request.headers = {"comfy-user": "default"} + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + # Patch get_public_user_directory to return None for testing + with patch.object(folder_paths, 'get_public_user_directory', return_value=None): + result = user_manager.get_request_user_filepath(mock_request, "test.txt") + assert result is None + + def test_normal_user_gets_path(self, user_manager, mock_request, mock_user_directory): + """Test normal user gets valid filepath.""" + mock_request.headers = {"comfy-user": "default"} + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + path = user_manager.get_request_user_filepath(mock_request, "test.txt") + assert path is not None + assert "default" in path + assert path.endswith("test.txt") + + +class TestAddUser: + """Tests for add_user() - 3rd defense layer (creation-time blocking). + + Verifies: + - System User name (__ prefix) creation is rejected with ValueError + - Sanitized usernames that become System User are also rejected + """ + + def test_system_user_prefix_name_raises(self, user_manager): + """Test System User prefix in name raises ValueError.""" + with pytest.raises(ValueError, match="System User prefix not allowed"): + user_manager.add_user("__system") + + def test_system_user_prefix_cache_raises(self, user_manager): + """Test System User cache prefix raises ValueError.""" + with pytest.raises(ValueError, match="System User prefix not allowed"): + user_manager.add_user("__cache") + + def test_sanitized_system_user_prefix_raises(self, user_manager): + """Test sanitized name becoming System User prefix raises ValueError (bypass prevention).""" + # "__test" directly starts with System User prefix + with pytest.raises(ValueError, match="System User prefix not allowed"): + user_manager.add_user("__test") + + def test_normal_user_creation(self, user_manager, mock_user_directory): + """Test normal user creation works.""" + user_id = user_manager.add_user("Normal User") + assert user_id is not None + assert not user_id.startswith("__") + assert "Normal-User" in user_id or "Normal_User" in user_id + + def test_empty_name_raises(self, user_manager): + """Test empty name raises ValueError.""" + with pytest.raises(ValueError, match="username not provided"): + user_manager.add_user("") + + def test_whitespace_only_raises(self, user_manager): + """Test whitespace-only name raises ValueError.""" + with pytest.raises(ValueError, match="username not provided"): + user_manager.add_user(" ") + + +class TestDefenseLayers: + """Integration tests for all three defense layers. + + Verifies: + - Each defense layer blocks System Users independently + - System User bypass is impossible through any layer + """ + + def test_layer1_get_request_user_id(self, user_manager, mock_request): + """Test 1st defense layer blocks System Users.""" + mock_request.headers = {"comfy-user": "__system"} + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + with pytest.raises(KeyError): + user_manager.get_request_user_id(mock_request) + + def test_layer2_get_public_user_directory(self): + """Test 2nd defense layer blocks System Users.""" + result = folder_paths.get_public_user_directory("__system") + assert result is None + + def test_layer3_add_user(self, user_manager): + """Test 3rd defense layer blocks System User creation.""" + with pytest.raises(ValueError): + user_manager.add_user("__system") diff --git a/tests-unit/folder_paths_test/system_user_test.py b/tests-unit/folder_paths_test/system_user_test.py new file mode 100644 index 000000000..cd46459f1 --- /dev/null +++ b/tests-unit/folder_paths_test/system_user_test.py @@ -0,0 +1,206 @@ +"""Tests for System User Protection in folder_paths.py + +Tests cover: +- get_system_user_directory(): Internal API for custom nodes to access System User directories +- get_public_user_directory(): HTTP endpoint access with System User blocking +- Backward compatibility: Existing APIs unchanged +- Security: Path traversal and injection prevention +""" + +import pytest +import os +import tempfile + +from folder_paths import ( + get_system_user_directory, + get_public_user_directory, + get_user_directory, + set_user_directory, +) + + +@pytest.fixture(scope="module") +def mock_user_directory(): + """Create a temporary user directory for testing.""" + with tempfile.TemporaryDirectory() as temp_dir: + original_dir = get_user_directory() + set_user_directory(temp_dir) + yield temp_dir + set_user_directory(original_dir) + + +class TestGetSystemUserDirectory: + """Tests for get_system_user_directory() - internal API for System User directories. + + Verifies: + - Custom nodes can access System User directories via internal API + - Input validation prevents path traversal attacks + """ + + def test_default_name(self, mock_user_directory): + """Test default 'system' name.""" + path = get_system_user_directory() + assert path.endswith("__system") + assert mock_user_directory in path + + def test_custom_name(self, mock_user_directory): + """Test custom system user name.""" + path = get_system_user_directory("cache") + assert path.endswith("__cache") + assert "__cache" in path + + def test_name_with_underscore(self, mock_user_directory): + """Test name with underscore in middle.""" + path = get_system_user_directory("my_cache") + assert "__my_cache" in path + + def test_empty_name_raises(self): + """Test empty name raises ValueError.""" + with pytest.raises(ValueError, match="cannot be empty"): + get_system_user_directory("") + + def test_none_name_raises(self): + """Test None name raises ValueError.""" + with pytest.raises(ValueError, match="cannot be empty"): + get_system_user_directory(None) + + def test_name_starting_with_underscore_raises(self): + """Test name starting with underscore raises ValueError.""" + with pytest.raises(ValueError, match="should not start with underscore"): + get_system_user_directory("_system") + + def test_path_traversal_raises(self): + """Test path traversal attempt raises ValueError (security).""" + with pytest.raises(ValueError, match="Invalid system user name"): + get_system_user_directory("../escape") + + def test_path_traversal_middle_raises(self): + """Test path traversal in middle raises ValueError (security).""" + with pytest.raises(ValueError, match="Invalid system user name"): + get_system_user_directory("system/../other") + + def test_special_chars_raise(self): + """Test special characters raise ValueError (security).""" + with pytest.raises(ValueError, match="Invalid system user name"): + get_system_user_directory("system!") + + def test_returns_absolute_path(self, mock_user_directory): + """Test returned path is absolute.""" + path = get_system_user_directory("test") + assert os.path.isabs(path) + + +class TestGetPublicUserDirectory: + """Tests for get_public_user_directory() - HTTP endpoint access with System User blocking. + + Verifies: + - System Users (__ prefix) return None, blocking HTTP access + - Public Users get valid paths + - New endpoints using this function are automatically protected + """ + + def test_normal_user(self, mock_user_directory): + """Test normal user returns valid path.""" + path = get_public_user_directory("default") + assert path is not None + assert "default" in path + assert mock_user_directory in path + + def test_system_user_returns_none(self): + """Test System User (__ prefix) returns None - blocks HTTP access.""" + assert get_public_user_directory("__system") is None + + def test_system_user_cache_returns_none(self): + """Test System User cache returns None.""" + assert get_public_user_directory("__cache") is None + + def test_empty_user_returns_none(self): + """Test empty user returns None.""" + assert get_public_user_directory("") is None + + def test_none_user_returns_none(self): + """Test None user returns None.""" + assert get_public_user_directory(None) is None + + def test_header_injection_returns_none(self): + """Test header injection attempt returns None (security).""" + assert get_public_user_directory("__system\r\nX-Injected: true") is None + + def test_null_byte_injection_returns_none(self): + """Test null byte injection handling (security).""" + # Note: startswith check happens before any path operations + result = get_public_user_directory("user\x00__system") + # This should return a path since it doesn't start with __ + # The actual security comes from the path not being __* + assert result is not None or result is None # Depends on validation + + def test_path_traversal_attempt(self, mock_user_directory): + """Test path traversal attempt handling.""" + # This function doesn't validate paths, only reserved prefix + # Path traversal should be handled by the caller + path = get_public_user_directory("../../../etc/passwd") + # Returns path but doesn't start with __, so not None + # Actual path validation happens in user_manager + assert path is not None or "__" not in "../../../etc/passwd" + + def test_returns_absolute_path(self, mock_user_directory): + """Test returned path is absolute.""" + path = get_public_user_directory("testuser") + assert path is not None + assert os.path.isabs(path) + + +class TestBackwardCompatibility: + """Tests for backward compatibility with existing APIs. + + Verifies: + - get_user_directory() API unchanged + - Existing user data remains accessible + """ + + def test_get_user_directory_unchanged(self, mock_user_directory): + """Test get_user_directory() still works as before.""" + user_dir = get_user_directory() + assert user_dir is not None + assert os.path.isabs(user_dir) + assert user_dir == mock_user_directory + + def test_existing_user_accessible(self, mock_user_directory): + """Test existing users can access their directories.""" + path = get_public_user_directory("default") + assert path is not None + assert "default" in path + + +class TestEdgeCases: + """Tests for edge cases in System User detection. + + Verifies: + - Only __ prefix is blocked (not _, not middle __) + - Bypass attempts are prevented + """ + + def test_prefix_only(self): + """Test prefix-only string is blocked.""" + assert get_public_user_directory("__") is None + + def test_single_underscore_allowed(self): + """Test single underscore prefix is allowed (not System User).""" + path = get_public_user_directory("_system") + assert path is not None + assert "_system" in path + + def test_triple_underscore_blocked(self): + """Test triple underscore is blocked (starts with __).""" + assert get_public_user_directory("___system") is None + + def test_underscore_in_middle_allowed(self): + """Test underscore in middle is allowed.""" + path = get_public_user_directory("my__system") + assert path is not None + assert "my__system" in path + + def test_leading_space_allowed(self): + """Test leading space + prefix is allowed (doesn't start with __).""" + path = get_public_user_directory(" __system") + assert path is not None diff --git a/tests-unit/prompt_server_test/system_user_endpoint_test.py b/tests-unit/prompt_server_test/system_user_endpoint_test.py new file mode 100644 index 000000000..22ac00af9 --- /dev/null +++ b/tests-unit/prompt_server_test/system_user_endpoint_test.py @@ -0,0 +1,375 @@ +"""E2E Tests for System User Protection HTTP Endpoints + +Tests cover: +- HTTP endpoint blocking: System Users cannot access /userdata (GET, POST, DELETE, move) +- User creation blocking: System User names cannot be created via POST /users +- Backward compatibility: Public Users work as before +- Custom node scenario: Internal API works while HTTP is blocked +- Structural security: get_public_user_directory() provides automatic protection +""" + +import pytest +import os +from aiohttp import web +from app.user_manager import UserManager +from unittest.mock import patch +import folder_paths + + +@pytest.fixture +def mock_user_directory(tmp_path): + """Create a temporary user directory.""" + original_dir = folder_paths.get_user_directory() + folder_paths.set_user_directory(str(tmp_path)) + yield tmp_path + folder_paths.set_user_directory(original_dir) + + +@pytest.fixture +def user_manager_multi_user(mock_user_directory): + """Create UserManager in multi-user mode.""" + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + um = UserManager() + # Add test users + um.users = {"default": "default", "test_user_123": "Test User"} + yield um + + +@pytest.fixture +def app_multi_user(user_manager_multi_user): + """Create app with multi-user mode enabled.""" + app = web.Application() + routes = web.RouteTableDef() + user_manager_multi_user.add_routes(routes) + app.add_routes(routes) + return app + + +class TestSystemUserEndpointBlocking: + """E2E tests for System User blocking on all HTTP endpoints. + + Verifies: + - GET /userdata blocked for System Users + - POST /userdata blocked for System Users + - DELETE /userdata blocked for System Users + - POST /userdata/.../move/... blocked for System Users + """ + + @pytest.mark.asyncio + async def test_userdata_get_blocks_system_user( + self, aiohttp_client, app_multi_user, mock_user_directory + ): + """ + GET /userdata with System User header should be blocked. + """ + # Create test directory for System User (simulating internal creation) + system_user_dir = mock_user_directory / "__system" + system_user_dir.mkdir() + (system_user_dir / "secret.txt").write_text("sensitive data") + + client = await aiohttp_client(app_multi_user) + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + # Attempt to access System User's data via HTTP + resp = await client.get( + "/userdata?dir=.", + headers={"comfy-user": "__system"} + ) + + # Should be blocked (403 Forbidden or similar error) + assert resp.status in [400, 403, 500], \ + f"System User access should be blocked, got {resp.status}" + + @pytest.mark.asyncio + async def test_userdata_post_blocks_system_user( + self, aiohttp_client, app_multi_user, mock_user_directory + ): + """ + POST /userdata with System User header should be blocked. + """ + client = await aiohttp_client(app_multi_user) + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + resp = await client.post( + "/userdata/test.txt", + headers={"comfy-user": "__system"}, + data=b"malicious content" + ) + + assert resp.status in [400, 403, 500], \ + f"System User write should be blocked, got {resp.status}" + + # Verify no file was created + assert not (mock_user_directory / "__system" / "test.txt").exists() + + @pytest.mark.asyncio + async def test_userdata_delete_blocks_system_user( + self, aiohttp_client, app_multi_user, mock_user_directory + ): + """ + DELETE /userdata with System User header should be blocked. + """ + # Create a file in System User directory + system_user_dir = mock_user_directory / "__system" + system_user_dir.mkdir() + secret_file = system_user_dir / "secret.txt" + secret_file.write_text("do not delete") + + client = await aiohttp_client(app_multi_user) + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + resp = await client.delete( + "/userdata/secret.txt", + headers={"comfy-user": "__system"} + ) + + assert resp.status in [400, 403, 500], \ + f"System User delete should be blocked, got {resp.status}" + + # Verify file still exists + assert secret_file.exists() + + @pytest.mark.asyncio + async def test_v2_userdata_blocks_system_user( + self, aiohttp_client, app_multi_user, mock_user_directory + ): + """ + GET /v2/userdata with System User header should be blocked. + """ + client = await aiohttp_client(app_multi_user) + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + resp = await client.get( + "/v2/userdata", + headers={"comfy-user": "__system"} + ) + + assert resp.status in [400, 403, 500], \ + f"System User v2 access should be blocked, got {resp.status}" + + @pytest.mark.asyncio + async def test_move_userdata_blocks_system_user( + self, aiohttp_client, app_multi_user, mock_user_directory + ): + """ + POST /userdata/{file}/move/{dest} with System User header should be blocked. + """ + system_user_dir = mock_user_directory / "__system" + system_user_dir.mkdir() + (system_user_dir / "source.txt").write_text("sensitive data") + + client = await aiohttp_client(app_multi_user) + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + resp = await client.post( + "/userdata/source.txt/move/dest.txt", + headers={"comfy-user": "__system"} + ) + + assert resp.status in [400, 403, 500], \ + f"System User move should be blocked, got {resp.status}" + + # Verify source file still exists (move was blocked) + assert (system_user_dir / "source.txt").exists() + + +class TestSystemUserCreationBlocking: + """E2E tests for blocking System User name creation via POST /users. + + Verifies: + - POST /users returns 400 for System User name (not 500) + """ + + @pytest.mark.asyncio + async def test_post_users_blocks_system_user_name( + self, aiohttp_client, app_multi_user + ): + """POST /users with System User name should return 400 Bad Request.""" + client = await aiohttp_client(app_multi_user) + + resp = await client.post( + "/users", + json={"username": "__system"} + ) + + assert resp.status == 400, \ + f"System User creation should return 400, got {resp.status}" + + @pytest.mark.asyncio + async def test_post_users_blocks_system_user_prefix_variations( + self, aiohttp_client, app_multi_user + ): + """POST /users with any System User prefix variation should return 400 Bad Request.""" + client = await aiohttp_client(app_multi_user) + + system_user_names = ["__system", "__cache", "__config", "__anything"] + + for name in system_user_names: + resp = await client.post("/users", json={"username": name}) + assert resp.status == 400, \ + f"System User name '{name}' should return 400, got {resp.status}" + + +class TestPublicUserStillWorks: + """E2E tests for backward compatibility - Public Users should work as before. + + Verifies: + - Public Users can access their data via HTTP + - Public Users can create files via HTTP + """ + + @pytest.mark.asyncio + async def test_public_user_can_access_userdata( + self, aiohttp_client, app_multi_user, mock_user_directory + ): + """ + Public Users should still be able to access their data. + """ + # Create test directory for Public User + user_dir = mock_user_directory / "default" + user_dir.mkdir() + test_dir = user_dir / "workflows" + test_dir.mkdir() + (test_dir / "test.json").write_text('{"test": true}') + + client = await aiohttp_client(app_multi_user) + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + resp = await client.get( + "/userdata?dir=workflows", + headers={"comfy-user": "default"} + ) + + assert resp.status == 200 + data = await resp.json() + assert "test.json" in data + + @pytest.mark.asyncio + async def test_public_user_can_create_files( + self, aiohttp_client, app_multi_user, mock_user_directory + ): + """ + Public Users should still be able to create files. + """ + # Create user directory + user_dir = mock_user_directory / "default" + user_dir.mkdir() + + client = await aiohttp_client(app_multi_user) + + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + resp = await client.post( + "/userdata/newfile.txt", + headers={"comfy-user": "default"}, + data=b"user content" + ) + + assert resp.status == 200 + assert (user_dir / "newfile.txt").exists() + + +class TestCustomNodeScenario: + """Tests for custom node use case: internal API access vs HTTP blocking. + + Verifies: + - Internal API (get_system_user_directory) works for custom nodes + - HTTP endpoint cannot access data created via internal API + """ + + def test_internal_api_can_access_system_user(self, mock_user_directory): + """ + Internal API (get_system_user_directory) should work for custom nodes. + """ + # Custom node uses internal API + system_path = folder_paths.get_system_user_directory("mynode_config") + + assert system_path is not None + assert "__mynode_config" in system_path + + # Can create and write to System User directory + os.makedirs(system_path, exist_ok=True) + config_file = os.path.join(system_path, "settings.json") + with open(config_file, "w") as f: + f.write('{"api_key": "secret"}') + + assert os.path.exists(config_file) + + @pytest.mark.asyncio + async def test_http_cannot_access_internal_data( + self, aiohttp_client, app_multi_user, mock_user_directory + ): + """ + HTTP endpoint cannot access data created via internal API. + """ + # Custom node creates data via internal API + system_path = folder_paths.get_system_user_directory("mynode_config") + os.makedirs(system_path, exist_ok=True) + with open(os.path.join(system_path, "secret.json"), "w") as f: + f.write('{"api_key": "secret"}') + + client = await aiohttp_client(app_multi_user) + + # Attacker tries to access via HTTP + with patch('app.user_manager.args') as mock_args: + mock_args.multi_user = True + resp = await client.get( + "/userdata/secret.json", + headers={"comfy-user": "__mynode_config"} + ) + + # Should be blocked + assert resp.status in [400, 403, 500] + + +class TestStructuralSecurity: + """Tests for structural security pattern. + + Verifies: + - get_public_user_directory() automatically blocks System Users + - New endpoints using this function are automatically protected + """ + + def test_get_public_user_directory_blocks_system_user(self): + """ + Any code using get_public_user_directory() is automatically protected. + """ + # This is the structural security - any new endpoint using this function + # will automatically block System Users + assert folder_paths.get_public_user_directory("__system") is None + assert folder_paths.get_public_user_directory("__cache") is None + assert folder_paths.get_public_user_directory("__anything") is None + + # Public Users work + assert folder_paths.get_public_user_directory("default") is not None + assert folder_paths.get_public_user_directory("user123") is not None + + def test_structural_security_pattern(self, mock_user_directory): + """ + Demonstrate the structural security pattern for new endpoints. + + Any new endpoint should follow this pattern: + 1. Get user from request + 2. Use get_public_user_directory() - automatically blocks System Users + 3. If None, return error + """ + def new_endpoint_handler(user_id: str) -> str | None: + """Example of how new endpoints should be implemented.""" + user_path = folder_paths.get_public_user_directory(user_id) + if user_path is None: + return None # Blocked + return user_path + + # System Users are automatically blocked + assert new_endpoint_handler("__system") is None + assert new_endpoint_handler("__secret") is None + + # Public Users work + assert new_endpoint_handler("default") is not None From 5151cff293607c2191981fd16c62c1b1a6939695 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 28 Nov 2025 20:55:00 -0800 Subject: [PATCH 0958/1073] Add some missing z image lora layers. (#10980) --- comfy/lora.py | 9 +++++---- comfy/utils.py | 51 +++++++++++++++++++++++++++++++++++--------------- 2 files changed, 41 insertions(+), 19 deletions(-) diff --git a/comfy/lora.py b/comfy/lora.py index 360cd128f..3a9077869 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -316,10 +316,11 @@ def model_lora_keys_unet(model, key_map={}): if isinstance(model, comfy.model_base.Lumina2): diffusers_keys = comfy.utils.z_image_to_diffusers(model.model_config.unet_config, output_prefix="diffusion_model.") for k in diffusers_keys: - to = diffusers_keys[k] - key_lora = k[:-len(".weight")] - key_map["diffusion_model.{}".format(key_lora)] = to - key_map["lycoris_{}".format(key_lora.replace(".", "_"))] = to + if k.endswith(".weight"): + to = diffusers_keys[k] + key_lora = k[:-len(".weight")] + key_map["diffusion_model.{}".format(key_lora)] = to + key_map["lycoris_{}".format(key_lora.replace(".", "_"))] = to return key_map diff --git a/comfy/utils.py b/comfy/utils.py index 21bd6e8cf..37485e497 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -678,17 +678,14 @@ def flux_to_diffusers(mmdit_config, output_prefix=""): def z_image_to_diffusers(mmdit_config, output_prefix=""): n_layers = mmdit_config.get("n_layers", 0) hidden_size = mmdit_config.get("dim", 0) - + n_context_refiner = mmdit_config.get("n_refiner_layers", 2) + n_noise_refiner = mmdit_config.get("n_refiner_layers", 2) key_map = {} - for index in range(n_layers): - prefix_from = "layers.{}".format(index) - prefix_to = "{}layers.{}".format(output_prefix, index) - + def add_block_keys(prefix_from, prefix_to, has_adaln=True): for end in ("weight", "bias"): k = "{}.attention.".format(prefix_from) qkv = "{}.attention.qkv.{}".format(prefix_to, end) - key_map["{}to_q.{}".format(k, end)] = (qkv, (0, 0, hidden_size)) key_map["{}to_k.{}".format(k, end)] = (qkv, (0, hidden_size, hidden_size)) key_map["{}to_v.{}".format(k, end)] = (qkv, (0, hidden_size * 2, hidden_size)) @@ -698,28 +695,52 @@ def z_image_to_diffusers(mmdit_config, output_prefix=""): "attention.norm_k.weight": "attention.k_norm.weight", "attention.to_out.0.weight": "attention.out.weight", "attention.to_out.0.bias": "attention.out.bias", + "attention_norm1.weight": "attention_norm1.weight", + "attention_norm2.weight": "attention_norm2.weight", + "feed_forward.w1.weight": "feed_forward.w1.weight", + "feed_forward.w2.weight": "feed_forward.w2.weight", + "feed_forward.w3.weight": "feed_forward.w3.weight", + "ffn_norm1.weight": "ffn_norm1.weight", + "ffn_norm2.weight": "ffn_norm2.weight", } + if has_adaln: + block_map["adaLN_modulation.0.weight"] = "adaLN_modulation.0.weight" + block_map["adaLN_modulation.0.bias"] = "adaLN_modulation.0.bias" + for k, v in block_map.items(): + key_map["{}.{}".format(prefix_from, k)] = "{}.{}".format(prefix_to, v) - for k in block_map: - key_map["{}.{}".format(prefix_from, k)] = "{}.{}".format(prefix_to, block_map[k]) + for i in range(n_layers): + add_block_keys("layers.{}".format(i), "{}layers.{}".format(output_prefix, i)) - MAP_BASIC = { - # Final layer + for i in range(n_context_refiner): + add_block_keys("context_refiner.{}".format(i), "{}context_refiner.{}".format(output_prefix, i)) + + for i in range(n_noise_refiner): + add_block_keys("noise_refiner.{}".format(i), "{}noise_refiner.{}".format(output_prefix, i)) + + MAP_BASIC = [ ("final_layer.linear.weight", "all_final_layer.2-1.linear.weight"), ("final_layer.linear.bias", "all_final_layer.2-1.linear.bias"), ("final_layer.adaLN_modulation.1.weight", "all_final_layer.2-1.adaLN_modulation.1.weight"), ("final_layer.adaLN_modulation.1.bias", "all_final_layer.2-1.adaLN_modulation.1.bias"), - # X embedder ("x_embedder.weight", "all_x_embedder.2-1.weight"), ("x_embedder.bias", "all_x_embedder.2-1.bias"), - } + ("x_pad_token", "x_pad_token"), + ("cap_embedder.0.weight", "cap_embedder.0.weight"), + ("cap_embedder.1.weight", "cap_embedder.1.weight"), + ("cap_embedder.1.bias", "cap_embedder.1.bias"), + ("cap_pad_token", "cap_pad_token"), + ("t_embedder.mlp.0.weight", "t_embedder.mlp.0.weight"), + ("t_embedder.mlp.0.bias", "t_embedder.mlp.0.bias"), + ("t_embedder.mlp.2.weight", "t_embedder.mlp.2.weight"), + ("t_embedder.mlp.2.bias", "t_embedder.mlp.2.bias"), + ] - for k in MAP_BASIC: - key_map[k[1]] = "{}{}".format(output_prefix, k[0]) + for c, diffusers in MAP_BASIC: + key_map[diffusers] = "{}{}".format(output_prefix, c) return key_map - def repeat_to_batch_size(tensor, batch_size, dim=0): if tensor.shape[dim] > batch_size: return tensor.narrow(dim, 0, batch_size) From 0a6746898d6864d65e2fc7504e5e875f8c19c0ba Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 29 Nov 2025 15:00:55 -0800 Subject: [PATCH 0959/1073] Make the ScaleRope node work on Z Image and Lumina. (#10994) --- comfy/ldm/lumina/model.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index 565400b54..7d7e9112c 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -517,11 +517,23 @@ class NextDiT(nn.Module): B, C, H, W = x.shape x = self.x_embedder(x.view(B, C, H // pH, pH, W // pW, pW).permute(0, 2, 4, 3, 5, 1).flatten(3).flatten(1, 2)) + rope_options = transformer_options.get("rope_options", None) + h_scale = 1.0 + w_scale = 1.0 + h_start = 0 + w_start = 0 + if rope_options is not None: + h_scale = rope_options.get("scale_y", 1.0) + w_scale = rope_options.get("scale_x", 1.0) + + h_start = rope_options.get("shift_y", 0.0) + w_start = rope_options.get("shift_x", 0.0) + H_tokens, W_tokens = H // pH, W // pW x_pos_ids = torch.zeros((bsz, x.shape[1], 3), dtype=torch.float32, device=device) x_pos_ids[:, :, 0] = cap_feats.shape[1] + 1 - x_pos_ids[:, :, 1] = torch.arange(H_tokens, dtype=torch.float32, device=device).view(-1, 1).repeat(1, W_tokens).flatten() - x_pos_ids[:, :, 2] = torch.arange(W_tokens, dtype=torch.float32, device=device).view(1, -1).repeat(H_tokens, 1).flatten() + x_pos_ids[:, :, 1] = (torch.arange(H_tokens, dtype=torch.float32, device=device) * h_scale + h_start).view(-1, 1).repeat(1, W_tokens).flatten() + x_pos_ids[:, :, 2] = (torch.arange(W_tokens, dtype=torch.float32, device=device) * w_scale + w_start).view(1, -1).repeat(H_tokens, 1).flatten() if self.pad_tokens_multiple is not None: pad_extra = (-x.shape[1]) % self.pad_tokens_multiple From 4967f81778f84b41acc40ed03536dd71dd88e5f2 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sun, 30 Nov 2025 10:07:26 +0800 Subject: [PATCH 0960/1073] update template to 0.7.25 (#10996) * update template to 0.7.24 * Update template to 0.7.25 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e0b2c566b..386477808 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.32.9 -comfyui-workflow-templates==0.7.23 +comfyui-workflow-templates==0.7.25 comfyui-embedded-docs==0.3.1 torch torchsde From f8b981ae9a5676311624bbafa636a1874db79459 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 30 Nov 2025 01:21:31 -0800 Subject: [PATCH 0961/1073] Next AMD portable will have pytorch with ROCm 7.1.1 (#11002) --- .github/workflows/release-stable-all.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release-stable-all.yml b/.github/workflows/release-stable-all.yml index 9274b4170..d72ece2ce 100644 --- a/.github/workflows/release-stable-all.yml +++ b/.github/workflows/release-stable-all.yml @@ -65,11 +65,11 @@ jobs: contents: "write" packages: "write" pull-requests: "read" - name: "Release AMD ROCm 6.4.4" + name: "Release AMD ROCm 7.1.1" uses: ./.github/workflows/stable-release.yml with: git_tag: ${{ inputs.git_tag }} - cache_tag: "rocm644" + cache_tag: "rocm711" python_minor: "12" python_patch: "10" rel_name: "amd" From 7dbd5dfe91f057b83dcba0c127f712f6d71f7def Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Mon, 1 Dec 2025 10:27:17 -0800 Subject: [PATCH 0962/1073] bump comfyui-frontend-package to 1.32.10 (#11018) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 386477808..045b2ac54 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.32.9 +comfyui-frontend-package==1.32.10 comfyui-workflow-templates==0.7.25 comfyui-embedded-docs==0.3.1 torch From 2640acb31ccfddee57ba22d5245bf456e8dffe53 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 1 Dec 2025 14:13:48 -0800 Subject: [PATCH 0963/1073] Update qwen tokenizer to add qwen 3 tokens. (#11029) Doesn't actually change anything for current workflows because none of the current models have a template with the think tokens. --- .../qwen25_tokenizer/tokenizer_config.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/comfy/text_encoders/qwen25_tokenizer/tokenizer_config.json b/comfy/text_encoders/qwen25_tokenizer/tokenizer_config.json index 67688e82c..df5b5d7fe 100644 --- a/comfy/text_encoders/qwen25_tokenizer/tokenizer_config.json +++ b/comfy/text_encoders/qwen25_tokenizer/tokenizer_config.json @@ -179,36 +179,36 @@ "special": false }, "151665": { - "content": "<|img|>", + "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, - "special": true + "special": false }, "151666": { - "content": "<|endofimg|>", + "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, - "special": true + "special": false }, "151667": { - "content": "<|meta|>", + "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, - "special": true + "special": false }, "151668": { - "content": "<|endofmeta|>", + "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, - "special": true + "special": false } }, "additional_special_tokens": [ From 1cb7e22a95701f2619d1ddf5683ea221b58a0c13 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 2 Dec 2025 02:11:52 +0200 Subject: [PATCH 0964/1073] [API Nodes] add Kling O1 model support (#11025) * feat(api-nodes): add Kling O1 model support * fix: increase max allowed duration to 10.05 seconds * fix(VideoInput): respect "format" argument --- comfy_api/latest/_input_impl/video_types.py | 5 +- comfy_api_nodes/apis/kling_api.py | 66 +++ comfy_api_nodes/nodes_kling.py | 444 +++++++++++++++++++- comfy_api_nodes/util/upload_helpers.py | 3 +- 4 files changed, 499 insertions(+), 19 deletions(-) create mode 100644 comfy_api_nodes/apis/kling_api.py diff --git a/comfy_api/latest/_input_impl/video_types.py b/comfy_api/latest/_input_impl/video_types.py index bde37f90a..7231bf13c 100644 --- a/comfy_api/latest/_input_impl/video_types.py +++ b/comfy_api/latest/_input_impl/video_types.py @@ -336,7 +336,10 @@ class VideoFromComponents(VideoInput): raise ValueError("Only MP4 format is supported for now") if codec != VideoCodec.AUTO and codec != VideoCodec.H264: raise ValueError("Only H264 codec is supported for now") - with av.open(path, mode='w', options={'movflags': 'use_metadata_tags'}) as output: + extra_kwargs = {} + if format != VideoContainer.AUTO: + extra_kwargs["format"] = format.value + with av.open(path, mode='w', options={'movflags': 'use_metadata_tags'}, **extra_kwargs) as output: # Add metadata before writing any streams if metadata is not None: for key, value in metadata.items(): diff --git a/comfy_api_nodes/apis/kling_api.py b/comfy_api_nodes/apis/kling_api.py new file mode 100644 index 000000000..0a3b447c5 --- /dev/null +++ b/comfy_api_nodes/apis/kling_api.py @@ -0,0 +1,66 @@ +from pydantic import BaseModel, Field + + +class OmniProText2VideoRequest(BaseModel): + model_name: str = Field(..., description="kling-video-o1") + aspect_ratio: str = Field(..., description="'16:9', '9:16' or '1:1'") + duration: str = Field(..., description="'5' or '10'") + prompt: str = Field(...) + mode: str = Field("pro") + + +class OmniParamImage(BaseModel): + image_url: str = Field(...) + type: str | None = Field(None, description="Can be 'first_frame' or 'end_frame'") + + +class OmniParamVideo(BaseModel): + video_url: str = Field(...) + refer_type: str | None = Field(..., description="Can be 'base' or 'feature'") + keep_original_sound: str = Field(..., description="'yes' or 'no'") + + +class OmniProFirstLastFrameRequest(BaseModel): + model_name: str = Field(..., description="kling-video-o1") + image_list: list[OmniParamImage] = Field(..., min_length=1, max_length=7) + duration: str = Field(..., description="'5' or '10'") + prompt: str = Field(...) + mode: str = Field("pro") + + +class OmniProReferences2VideoRequest(BaseModel): + model_name: str = Field(..., description="kling-video-o1") + aspect_ratio: str | None = Field(..., description="'16:9', '9:16' or '1:1'") + image_list: list[OmniParamImage] | None = Field( + None, max_length=7, description="Max length 4 when video is present." + ) + video_list: list[OmniParamVideo] | None = Field(None, max_length=1) + duration: str | None = Field(..., description="From 3 to 10.") + prompt: str = Field(...) + mode: str = Field("pro") + + +class TaskStatusVideoResult(BaseModel): + duration: str | None = Field(None, description="Total video duration") + id: str | None = Field(None, description="Generated video ID") + url: str | None = Field(None, description="URL for generated video") + + +class TaskStatusVideoResults(BaseModel): + videos: list[TaskStatusVideoResult] | None = Field(None) + + +class TaskStatusVideoResponseData(BaseModel): + created_at: int | None = Field(None, description="Task creation time") + updated_at: int | None = Field(None, description="Task update time") + task_status: str | None = None + task_status_msg: str | None = Field(None, description="Additional failure reason. Only for polling endpoint.") + task_id: str | None = Field(None, description="Task ID") + task_result: TaskStatusVideoResults | None = Field(None) + + +class TaskStatusVideoResponse(BaseModel): + code: int | None = Field(None, description="Error code") + message: str | None = Field(None, description="Error message") + request_id: str | None = Field(None, description="Request ID") + data: TaskStatusVideoResponseData | None = Field(None) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 23a7f55f1..850c44db6 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -4,13 +4,13 @@ For source of truth on the allowed permutations of request fields, please refere - [Compatibility Table](https://app.klingai.com/global/dev/document-api/apiReference/model/skillsMap) """ -import math import logging - -from typing_extensions import override +import math import torch +from typing_extensions import override +from comfy_api.latest import IO, ComfyExtension, Input, InputImpl from comfy_api_nodes.apis import ( KlingCameraControl, KlingCameraConfig, @@ -48,23 +48,31 @@ from comfy_api_nodes.apis import ( KlingCharacterEffectModelName, KlingSingleImageEffectModelName, ) +from comfy_api_nodes.apis.kling_api import ( + OmniParamImage, + OmniParamVideo, + OmniProFirstLastFrameRequest, + OmniProReferences2VideoRequest, + OmniProText2VideoRequest, + TaskStatusVideoResponse, +) from comfy_api_nodes.util import ( - validate_image_dimensions, + ApiEndpoint, + download_url_to_image_tensor, + download_url_to_video_output, + get_number_of_images, + poll_op, + sync_op, + tensor_to_base64_string, + upload_audio_to_comfyapi, + upload_images_to_comfyapi, + upload_video_to_comfyapi, validate_image_aspect_ratio, + validate_image_dimensions, + validate_string, validate_video_dimensions, validate_video_duration, - tensor_to_base64_string, - validate_string, - upload_audio_to_comfyapi, - download_url_to_image_tensor, - upload_video_to_comfyapi, - download_url_to_video_output, - sync_op, - ApiEndpoint, - poll_op, ) -from comfy_api.input_impl import VideoFromFile -from comfy_api.latest import ComfyExtension, IO, Input KLING_API_VERSION = "v1" PATH_TEXT_TO_VIDEO = f"/proxy/kling/{KLING_API_VERSION}/videos/text2video" @@ -202,6 +210,20 @@ VOICES_CONFIG = { } +async def finish_omni_video_task(cls: type[IO.ComfyNode], response: TaskStatusVideoResponse) -> IO.NodeOutput: + if response.code: + raise RuntimeError( + f"Kling request failed. Code: {response.code}, Message: {response.message}, Data: {response.data}" + ) + final_response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/kling/v1/videos/omni-video/{response.data.task_id}"), + response_model=TaskStatusVideoResponse, + status_extractor=lambda r: (r.data.task_status if r.data else None), + ) + return IO.NodeOutput(await download_url_to_video_output(final_response.data.task_result.videos[0].url)) + + def is_valid_camera_control_configs(configs: list[float]) -> bool: """Verifies that at least one camera control configuration is non-zero.""" return any(not math.isclose(value, 0.0) for value in configs) @@ -449,7 +471,7 @@ async def execute_video_effect( image_1: torch.Tensor, image_2: torch.Tensor | None = None, model_mode: KlingVideoGenMode | None = None, -) -> tuple[VideoFromFile, str, str]: +) -> tuple[InputImpl.VideoFromFile, str, str]: if dual_character: request_input_field = KlingDualCharacterEffectInput( model_name=model_name, @@ -736,6 +758,386 @@ class KlingTextToVideoNode(IO.ComfyNode): ) +class OmniProTextToVideoNode(IO.ComfyNode): + + @classmethod + def define_schema(cls) -> IO.Schema: + return IO.Schema( + node_id="KlingOmniProTextToVideoNode", + display_name="Kling Omni Text to Video (Pro)", + category="api node/video/Kling", + description="Use text prompts to generate videos with the latest Kling model.", + inputs=[ + IO.Combo.Input("model_name", options=["kling-video-o1"]), + IO.String.Input( + "prompt", + multiline=True, + tooltip="A text prompt describing the video content. " + "This can include both positive and negative descriptions.", + ), + IO.Combo.Input("aspect_ratio", options=["16:9", "9:16", "1:1"]), + IO.Combo.Input("duration", options=[5, 10]), + ], + outputs=[ + IO.Video.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model_name: str, + prompt: str, + aspect_ratio: str, + duration: int, + ) -> IO.NodeOutput: + validate_string(prompt, min_length=1, max_length=2500) + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), + response_model=TaskStatusVideoResponse, + data=OmniProText2VideoRequest( + model_name=model_name, + prompt=prompt, + aspect_ratio=aspect_ratio, + duration=str(duration), + ), + ) + return await finish_omni_video_task(cls, response) + + +class OmniProFirstLastFrameNode(IO.ComfyNode): + + @classmethod + def define_schema(cls) -> IO.Schema: + return IO.Schema( + node_id="KlingOmniProFirstLastFrameNode", + display_name="Kling Omni First-Last-Frame to Video (Pro)", + category="api node/video/Kling", + description="Use a start frame, an optional end frame, or reference images with the latest Kling model.", + inputs=[ + IO.Combo.Input("model_name", options=["kling-video-o1"]), + IO.String.Input( + "prompt", + multiline=True, + tooltip="A text prompt describing the video content. " + "This can include both positive and negative descriptions.", + ), + IO.Combo.Input("duration", options=["5", "10"]), + IO.Image.Input("first_frame"), + IO.Image.Input( + "end_frame", + optional=True, + tooltip="An optional end frame for the video. " + "This cannot be used simultaneously with 'reference_images'.", + ), + IO.Image.Input( + "reference_images", + optional=True, + tooltip="Up to 6 additional reference images.", + ), + ], + outputs=[ + IO.Video.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model_name: str, + prompt: str, + duration: int, + first_frame: Input.Image, + end_frame: Input.Image | None = None, + reference_images: Input.Image | None = None, + ) -> IO.NodeOutput: + validate_string(prompt, min_length=1, max_length=2500) + if end_frame is not None and reference_images is not None: + raise ValueError("The 'end_frame' input cannot be used simultaneously with 'reference_images'.") + validate_image_dimensions(first_frame, min_width=300, min_height=300) + validate_image_aspect_ratio(first_frame, (1, 2.5), (2.5, 1)) + image_list: list[OmniParamImage] = [ + OmniParamImage( + image_url=(await upload_images_to_comfyapi(cls, first_frame, wait_label="Uploading first frame"))[0], + type="first_frame", + ) + ] + if end_frame is not None: + validate_image_dimensions(end_frame, min_width=300, min_height=300) + validate_image_aspect_ratio(end_frame, (1, 2.5), (2.5, 1)) + image_list.append( + OmniParamImage( + image_url=(await upload_images_to_comfyapi(cls, end_frame, wait_label="Uploading end frame"))[0], + type="end_frame", + ) + ) + if reference_images is not None: + if get_number_of_images(reference_images) > 6: + raise ValueError("The maximum number of reference images allowed is 6.") + for i in reference_images: + validate_image_dimensions(i, min_width=300, min_height=300) + validate_image_aspect_ratio(i, (1, 2.5), (2.5, 1)) + for i in await upload_images_to_comfyapi(cls, reference_images, wait_label="Uploading reference frame(s)"): + image_list.append(OmniParamImage(image_url=i)) + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), + response_model=TaskStatusVideoResponse, + data=OmniProFirstLastFrameRequest( + model_name=model_name, + prompt=prompt, + duration=str(duration), + image_list=image_list, + ), + ) + return await finish_omni_video_task(cls, response) + + +class OmniProImageToVideoNode(IO.ComfyNode): + + @classmethod + def define_schema(cls) -> IO.Schema: + return IO.Schema( + node_id="KlingOmniProImageToVideoNode", + display_name="Kling Omni Image to Video (Pro)", + category="api node/video/Kling", + description="Use up to 7 reference images to generate a video with the latest Kling model.", + inputs=[ + IO.Combo.Input("model_name", options=["kling-video-o1"]), + IO.String.Input( + "prompt", + multiline=True, + tooltip="A text prompt describing the video content. " + "This can include both positive and negative descriptions.", + ), + IO.Combo.Input("aspect_ratio", options=["16:9", "9:16", "1:1"]), + IO.Int.Input("duration", default=3, min=3, max=10, display_mode=IO.NumberDisplay.slider), + IO.Image.Input( + "reference_images", + tooltip="Up to 7 reference images.", + ), + ], + outputs=[ + IO.Video.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model_name: str, + prompt: str, + aspect_ratio: str, + duration: int, + reference_images: Input.Image, + ) -> IO.NodeOutput: + validate_string(prompt, min_length=1, max_length=2500) + if get_number_of_images(reference_images) > 7: + raise ValueError("The maximum number of reference images is 7.") + for i in reference_images: + validate_image_dimensions(i, min_width=300, min_height=300) + validate_image_aspect_ratio(i, (1, 2.5), (2.5, 1)) + image_list: list[OmniParamImage] = [] + for i in await upload_images_to_comfyapi(cls, reference_images, wait_label="Uploading reference image"): + image_list.append(OmniParamImage(image_url=i)) + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), + response_model=TaskStatusVideoResponse, + data=OmniProReferences2VideoRequest( + model_name=model_name, + prompt=prompt, + aspect_ratio=aspect_ratio, + duration=str(duration), + image_list=image_list, + ), + ) + return await finish_omni_video_task(cls, response) + + +class OmniProVideoToVideoNode(IO.ComfyNode): + + @classmethod + def define_schema(cls) -> IO.Schema: + return IO.Schema( + node_id="KlingOmniProVideoToVideoNode", + display_name="Kling Omni Video to Video (Pro)", + category="api node/video/Kling", + description="Use a video and up to 4 reference images to generate a video with the latest Kling model.", + inputs=[ + IO.Combo.Input("model_name", options=["kling-video-o1"]), + IO.String.Input( + "prompt", + multiline=True, + tooltip="A text prompt describing the video content. " + "This can include both positive and negative descriptions.", + ), + IO.Combo.Input("aspect_ratio", options=["16:9", "9:16", "1:1"]), + IO.Int.Input("duration", default=3, min=3, max=10, display_mode=IO.NumberDisplay.slider), + IO.Video.Input("reference_video", tooltip="Video to use as a reference."), + IO.Boolean.Input("keep_original_sound", default=True), + IO.Image.Input( + "reference_images", + tooltip="Up to 4 additional reference images.", + optional=True, + ), + ], + outputs=[ + IO.Video.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model_name: str, + prompt: str, + aspect_ratio: str, + duration: int, + reference_video: Input.Video, + keep_original_sound: bool, + reference_images: Input.Image | None = None, + ) -> IO.NodeOutput: + validate_string(prompt, min_length=1, max_length=2500) + validate_video_duration(reference_video, min_duration=3.0, max_duration=10.05) + validate_video_dimensions(reference_video, min_width=720, min_height=720, max_width=2160, max_height=2160) + image_list: list[OmniParamImage] = [] + if reference_images is not None: + if get_number_of_images(reference_images) > 4: + raise ValueError("The maximum number of reference images allowed with a video input is 4.") + for i in reference_images: + validate_image_dimensions(i, min_width=300, min_height=300) + validate_image_aspect_ratio(i, (1, 2.5), (2.5, 1)) + for i in await upload_images_to_comfyapi(cls, reference_images, wait_label="Uploading reference image"): + image_list.append(OmniParamImage(image_url=i)) + video_list = [ + OmniParamVideo( + video_url=await upload_video_to_comfyapi(cls, reference_video, wait_label="Uploading reference video"), + refer_type="feature", + keep_original_sound="yes" if keep_original_sound else "no", + ) + ] + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), + response_model=TaskStatusVideoResponse, + data=OmniProReferences2VideoRequest( + model_name=model_name, + prompt=prompt, + aspect_ratio=aspect_ratio, + duration=str(duration), + image_list=image_list if image_list else None, + video_list=video_list, + ), + ) + return await finish_omni_video_task(cls, response) + + +class OmniProEditVideoNode(IO.ComfyNode): + + @classmethod + def define_schema(cls) -> IO.Schema: + return IO.Schema( + node_id="KlingOmniProEditVideoNode", + display_name="Kling Omni Edit Video (Pro)", + category="api node/video/Kling", + description="Edit an existing video with the latest model from Kling.", + inputs=[ + IO.Combo.Input("model_name", options=["kling-video-o1"]), + IO.String.Input( + "prompt", + multiline=True, + tooltip="A text prompt describing the video content. " + "This can include both positive and negative descriptions.", + ), + IO.Video.Input("video", tooltip="Video for editing. The output video length will be the same."), + IO.Boolean.Input("keep_original_sound", default=True), + IO.Image.Input( + "reference_images", + tooltip="Up to 4 additional reference images.", + optional=True, + ), + ], + outputs=[ + IO.Video.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model_name: str, + prompt: str, + video: Input.Video, + keep_original_sound: bool, + reference_images: Input.Image | None = None, + ) -> IO.NodeOutput: + validate_string(prompt, min_length=1, max_length=2500) + validate_video_duration(video, min_duration=3.0, max_duration=10.05) + validate_video_dimensions(video, min_width=720, min_height=720, max_width=2160, max_height=2160) + image_list: list[OmniParamImage] = [] + if reference_images is not None: + if get_number_of_images(reference_images) > 4: + raise ValueError("The maximum number of reference images allowed with a video input is 4.") + for i in reference_images: + validate_image_dimensions(i, min_width=300, min_height=300) + validate_image_aspect_ratio(i, (1, 2.5), (2.5, 1)) + for i in await upload_images_to_comfyapi(cls, reference_images, wait_label="Uploading reference image"): + image_list.append(OmniParamImage(image_url=i)) + video_list = [ + OmniParamVideo( + video_url=await upload_video_to_comfyapi(cls, video, wait_label="Uploading base video"), + refer_type="base", + keep_original_sound="yes" if keep_original_sound else "no", + ) + ] + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), + response_model=TaskStatusVideoResponse, + data=OmniProReferences2VideoRequest( + model_name=model_name, + prompt=prompt, + aspect_ratio=None, + duration=None, + image_list=image_list if image_list else None, + video_list=video_list, + ), + ) + return await finish_omni_video_task(cls, response) + + class KlingCameraControlT2VNode(IO.ComfyNode): """ Kling Text to Video Camera Control Node. This node is a text to video node, but it supports controlling the camera. @@ -1162,7 +1564,10 @@ class KlingSingleImageVideoEffectNode(IO.ComfyNode): category="api node/video/Kling", description="Achieve different special effects when generating a video based on the effect_scene.", inputs=[ - IO.Image.Input("image", tooltip=" Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1"), + IO.Image.Input( + "image", + tooltip=" Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1", + ), IO.Combo.Input( "effect_scene", options=[i.value for i in KlingSingleImageEffectsScene], @@ -1525,6 +1930,11 @@ class KlingExtension(ComfyExtension): KlingImageGenerationNode, KlingSingleImageVideoEffectNode, KlingDualCharacterVideoEffectNode, + OmniProTextToVideoNode, + OmniProFirstLastFrameNode, + OmniProImageToVideoNode, + OmniProVideoToVideoNode, + OmniProEditVideoNode, ] diff --git a/comfy_api_nodes/util/upload_helpers.py b/comfy_api_nodes/util/upload_helpers.py index b9019841f..0532bea9a 100644 --- a/comfy_api_nodes/util/upload_helpers.py +++ b/comfy_api_nodes/util/upload_helpers.py @@ -103,6 +103,7 @@ async def upload_video_to_comfyapi( container: VideoContainer = VideoContainer.MP4, codec: VideoCodec = VideoCodec.H264, max_duration: Optional[int] = None, + wait_label: str | None = "Uploading", ) -> str: """ Uploads a single video to ComfyUI API and returns its download URL. @@ -127,7 +128,7 @@ async def upload_video_to_comfyapi( video.save_to(video_bytes_io, format=container, codec=codec) video_bytes_io.seek(0) - return await upload_file_to_comfyapi(cls, video_bytes_io, filename, upload_mime_type) + return await upload_file_to_comfyapi(cls, video_bytes_io, filename, upload_mime_type, wait_label) async def upload_file_to_comfyapi( From 30c259cac8c08ff8d015f9aff3151cb525c9b702 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 1 Dec 2025 20:25:35 -0500 Subject: [PATCH 0965/1073] ComfyUI version v0.3.76 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index fa4b4f4b0..4b039356e 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.75" +__version__ = "0.3.76" diff --git a/pyproject.toml b/pyproject.toml index 9009e65fe..02b94a0ce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.75" +version = "0.3.76" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 878db3a727c1c6049bc1c4959cdfabc35eaf3d56 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 1 Dec 2025 17:56:17 -0800 Subject: [PATCH 0966/1073] Implement the Ovis image model. (#11030) --- comfy/ldm/chroma/model.py | 3 +- comfy/ldm/flux/layers.py | 68 +++++++++++++++++++++-------------- comfy/ldm/flux/model.py | 21 ++++++++--- comfy/model_detection.py | 10 +++++- comfy/sd.py | 13 +++++-- comfy/text_encoders/llama.py | 31 ++++++++++++++++ comfy/text_encoders/ovis.py | 69 ++++++++++++++++++++++++++++++++++++ nodes.py | 2 +- 8 files changed, 182 insertions(+), 35 deletions(-) create mode 100644 comfy/text_encoders/ovis.py diff --git a/comfy/ldm/chroma/model.py b/comfy/ldm/chroma/model.py index a72f8cc47..2e8ef0687 100644 --- a/comfy/ldm/chroma/model.py +++ b/comfy/ldm/chroma/model.py @@ -40,7 +40,8 @@ class ChromaParams: out_dim: int hidden_dim: int n_layers: int - + txt_ids_dims: list + vec_in_dim: int diff --git a/comfy/ldm/flux/layers.py b/comfy/ldm/flux/layers.py index 2472ab79c..60f2bdae2 100644 --- a/comfy/ldm/flux/layers.py +++ b/comfy/ldm/flux/layers.py @@ -57,6 +57,35 @@ class MLPEmbedder(nn.Module): def forward(self, x: Tensor) -> Tensor: return self.out_layer(self.silu(self.in_layer(x))) +class YakMLP(nn.Module): + def __init__(self, hidden_size: int, intermediate_size: int, dtype=None, device=None, operations=None): + super().__init__() + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.gate_proj = operations.Linear(self.hidden_size, self.intermediate_size, bias=True, dtype=dtype, device=device) + self.up_proj = operations.Linear(self.hidden_size, self.intermediate_size, bias=True, dtype=dtype, device=device) + self.down_proj = operations.Linear(self.intermediate_size, self.hidden_size, bias=True, dtype=dtype, device=device) + self.act_fn = nn.SiLU() + + def forward(self, x: Tensor) -> Tensor: + down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + return down_proj + +def build_mlp(hidden_size, mlp_hidden_dim, mlp_silu_act=False, yak_mlp=False, dtype=None, device=None, operations=None): + if yak_mlp: + return YakMLP(hidden_size, mlp_hidden_dim, dtype=dtype, device=device, operations=operations) + if mlp_silu_act: + return nn.Sequential( + operations.Linear(hidden_size, mlp_hidden_dim * 2, bias=False, dtype=dtype, device=device), + SiLUActivation(), + operations.Linear(mlp_hidden_dim, hidden_size, bias=False, dtype=dtype, device=device), + ) + else: + return nn.Sequential( + operations.Linear(hidden_size, mlp_hidden_dim, bias=True, dtype=dtype, device=device), + nn.GELU(approximate="tanh"), + operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), + ) class RMSNorm(torch.nn.Module): def __init__(self, dim: int, dtype=None, device=None, operations=None): @@ -140,7 +169,7 @@ class SiLUActivation(nn.Module): class DoubleStreamBlock(nn.Module): - def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False, flipped_img_txt=False, modulation=True, mlp_silu_act=False, proj_bias=True, dtype=None, device=None, operations=None): + def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False, flipped_img_txt=False, modulation=True, mlp_silu_act=False, proj_bias=True, yak_mlp=False, dtype=None, device=None, operations=None): super().__init__() mlp_hidden_dim = int(hidden_size * mlp_ratio) @@ -156,18 +185,7 @@ class DoubleStreamBlock(nn.Module): self.img_norm2 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - if mlp_silu_act: - self.img_mlp = nn.Sequential( - operations.Linear(hidden_size, mlp_hidden_dim * 2, bias=False, dtype=dtype, device=device), - SiLUActivation(), - operations.Linear(mlp_hidden_dim, hidden_size, bias=False, dtype=dtype, device=device), - ) - else: - self.img_mlp = nn.Sequential( - operations.Linear(hidden_size, mlp_hidden_dim, bias=True, dtype=dtype, device=device), - nn.GELU(approximate="tanh"), - operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), - ) + self.img_mlp = build_mlp(hidden_size, mlp_hidden_dim, mlp_silu_act=mlp_silu_act, yak_mlp=yak_mlp, dtype=dtype, device=device, operations=operations) if self.modulation: self.txt_mod = Modulation(hidden_size, double=True, dtype=dtype, device=device, operations=operations) @@ -177,18 +195,7 @@ class DoubleStreamBlock(nn.Module): self.txt_norm2 = operations.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device) - if mlp_silu_act: - self.txt_mlp = nn.Sequential( - operations.Linear(hidden_size, mlp_hidden_dim * 2, bias=False, dtype=dtype, device=device), - SiLUActivation(), - operations.Linear(mlp_hidden_dim, hidden_size, bias=False, dtype=dtype, device=device), - ) - else: - self.txt_mlp = nn.Sequential( - operations.Linear(hidden_size, mlp_hidden_dim, bias=True, dtype=dtype, device=device), - nn.GELU(approximate="tanh"), - operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device), - ) + self.txt_mlp = build_mlp(hidden_size, mlp_hidden_dim, mlp_silu_act=mlp_silu_act, yak_mlp=yak_mlp, dtype=dtype, device=device, operations=operations) self.flipped_img_txt = flipped_img_txt @@ -275,6 +282,7 @@ class SingleStreamBlock(nn.Module): modulation=True, mlp_silu_act=False, bias=True, + yak_mlp=False, dtype=None, device=None, operations=None @@ -288,12 +296,17 @@ class SingleStreamBlock(nn.Module): self.mlp_hidden_dim = int(hidden_size * mlp_ratio) self.mlp_hidden_dim_first = self.mlp_hidden_dim + self.yak_mlp = yak_mlp if mlp_silu_act: self.mlp_hidden_dim_first = int(hidden_size * mlp_ratio * 2) self.mlp_act = SiLUActivation() else: self.mlp_act = nn.GELU(approximate="tanh") + if self.yak_mlp: + self.mlp_hidden_dim_first *= 2 + self.mlp_act = nn.SiLU() + # qkv and mlp_in self.linear1 = operations.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim_first, bias=bias, dtype=dtype, device=device) # proj and mlp_out @@ -325,7 +338,10 @@ class SingleStreamBlock(nn.Module): attn = attention(q, k, v, pe=pe, mask=attn_mask, transformer_options=transformer_options) del q, k, v # compute activation in mlp stream, cat again and run second linear layer - mlp = self.mlp_act(mlp) + if self.yak_mlp: + mlp = self.mlp_act(mlp[..., self.mlp_hidden_dim_first // 2:]) * mlp[..., :self.mlp_hidden_dim_first // 2] + else: + mlp = self.mlp_act(mlp) output = self.linear2(torch.cat((attn, mlp), 2)) x += apply_mod(output, mod.gate, None, modulation_dims) if x.dtype == torch.float16: diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index d5674dea6..f40c2a7a9 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -15,7 +15,8 @@ from .layers import ( MLPEmbedder, SingleStreamBlock, timestep_embedding, - Modulation + Modulation, + RMSNorm ) @dataclass @@ -34,11 +35,14 @@ class FluxParams: patch_size: int qkv_bias: bool guidance_embed: bool + txt_ids_dims: list global_modulation: bool = False mlp_silu_act: bool = False ops_bias: bool = True default_ref_method: str = "offset" ref_index_scale: float = 1.0 + yak_mlp: bool = False + txt_norm: bool = False class Flux(nn.Module): @@ -76,6 +80,11 @@ class Flux(nn.Module): ) self.txt_in = operations.Linear(params.context_in_dim, self.hidden_size, bias=params.ops_bias, dtype=dtype, device=device) + if params.txt_norm: + self.txt_norm = RMSNorm(params.context_in_dim, dtype=dtype, device=device, operations=operations) + else: + self.txt_norm = None + self.double_blocks = nn.ModuleList( [ DoubleStreamBlock( @@ -86,6 +95,7 @@ class Flux(nn.Module): modulation=params.global_modulation is False, mlp_silu_act=params.mlp_silu_act, proj_bias=params.ops_bias, + yak_mlp=params.yak_mlp, dtype=dtype, device=device, operations=operations ) for _ in range(params.depth) @@ -94,7 +104,7 @@ class Flux(nn.Module): self.single_blocks = nn.ModuleList( [ - SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio, modulation=params.global_modulation is False, mlp_silu_act=params.mlp_silu_act, bias=params.ops_bias, dtype=dtype, device=device, operations=operations) + SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio, modulation=params.global_modulation is False, mlp_silu_act=params.mlp_silu_act, bias=params.ops_bias, yak_mlp=params.yak_mlp, dtype=dtype, device=device, operations=operations) for _ in range(params.depth_single_blocks) ] ) @@ -150,6 +160,8 @@ class Flux(nn.Module): y = torch.zeros((img.shape[0], self.params.vec_in_dim), device=img.device, dtype=img.dtype) vec = vec + self.vector_in(y[:, :self.params.vec_in_dim]) + if self.txt_norm is not None: + txt = self.txt_norm(txt) txt = self.txt_in(txt) vec_orig = vec @@ -332,8 +344,9 @@ class Flux(nn.Module): txt_ids = torch.zeros((bs, context.shape[1], len(self.params.axes_dim)), device=x.device, dtype=torch.float32) - if len(self.params.axes_dim) == 4: # Flux 2 - txt_ids[:, :, 3] = torch.linspace(0, context.shape[1] - 1, steps=context.shape[1], device=x.device, dtype=torch.float32) + if len(self.params.txt_ids_dims) > 0: + for i in self.params.txt_ids_dims: + txt_ids[:, :, i] = torch.linspace(0, context.shape[1] - 1, steps=context.shape[1], device=x.device, dtype=torch.float32) out = self.forward_orig(img, img_ids, context, txt_ids, timestep, y, guidance, control, transformer_options, attn_mask=kwargs.get("attention_mask", None)) out = out[:, :img_tokens] diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 7afe4a798..7d0517e61 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -208,12 +208,12 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["theta"] = 2000 dit_config["out_channels"] = 128 dit_config["global_modulation"] = True - dit_config["vec_in_dim"] = None dit_config["mlp_silu_act"] = True dit_config["qkv_bias"] = False dit_config["ops_bias"] = False dit_config["default_ref_method"] = "index" dit_config["ref_index_scale"] = 10.0 + dit_config["txt_ids_dims"] = [3] patch_size = 1 else: dit_config["image_model"] = "flux" @@ -223,6 +223,7 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["theta"] = 10000 dit_config["out_channels"] = 16 dit_config["qkv_bias"] = True + dit_config["txt_ids_dims"] = [] patch_size = 2 dit_config["in_channels"] = 16 @@ -245,6 +246,8 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): vec_in_key = '{}vector_in.in_layer.weight'.format(key_prefix) if vec_in_key in state_dict_keys: dit_config["vec_in_dim"] = state_dict[vec_in_key].shape[1] + else: + dit_config["vec_in_dim"] = None dit_config["depth"] = count_blocks(state_dict_keys, '{}double_blocks.'.format(key_prefix) + '{}.') dit_config["depth_single_blocks"] = count_blocks(state_dict_keys, '{}single_blocks.'.format(key_prefix) + '{}.') @@ -270,6 +273,11 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["nerf_embedder_dtype"] = torch.float32 else: dit_config["guidance_embed"] = "{}guidance_in.in_layer.weight".format(key_prefix) in state_dict_keys + dit_config["yak_mlp"] = '{}double_blocks.0.img_mlp.gate_proj.weight'.format(key_prefix) in state_dict_keys + dit_config["txt_norm"] = "{}txt_norm.scale".format(key_prefix) in state_dict_keys + if dit_config["yak_mlp"] and dit_config["txt_norm"]: # Ovis model + dit_config["txt_ids_dims"] = [1, 2] + return dit_config if '{}t5_yproj.weight'.format(key_prefix) in state_dict_keys: #Genmo mochi preview diff --git a/comfy/sd.py b/comfy/sd.py index 9eeb0c45a..f9e5efab5 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -53,6 +53,7 @@ import comfy.text_encoders.omnigen2 import comfy.text_encoders.qwen_image import comfy.text_encoders.hunyuan_image import comfy.text_encoders.z_image +import comfy.text_encoders.ovis import comfy.model_patcher import comfy.lora @@ -956,6 +957,7 @@ class CLIPType(Enum): QWEN_IMAGE = 18 HUNYUAN_IMAGE = 19 HUNYUAN_VIDEO_15 = 20 + OVIS = 21 def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}): @@ -987,6 +989,7 @@ class TEModel(Enum): MISTRAL3_24B = 14 MISTRAL3_24B_PRUNED_FLUX2 = 15 QWEN3_4B = 16 + QWEN3_2B = 17 def detect_te_model(sd): @@ -1020,9 +1023,12 @@ def detect_te_model(sd): if weight.shape[0] == 512: return TEModel.QWEN25_7B if "model.layers.0.post_attention_layernorm.weight" in sd: - if 'model.layers.0.self_attn.q_norm.weight' in sd: - return TEModel.QWEN3_4B weight = sd['model.layers.0.post_attention_layernorm.weight'] + if 'model.layers.0.self_attn.q_norm.weight' in sd: + if weight.shape[0] == 2560: + return TEModel.QWEN3_4B + elif weight.shape[0] == 2048: + return TEModel.QWEN3_2B if weight.shape[0] == 5120: if "model.layers.39.post_attention_layernorm.weight" in sd: return TEModel.MISTRAL3_24B @@ -1150,6 +1156,9 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip elif te_model == TEModel.QWEN3_4B: clip_target.clip = comfy.text_encoders.z_image.te(**llama_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.z_image.ZImageTokenizer + elif te_model == TEModel.QWEN3_2B: + clip_target.clip = comfy.text_encoders.ovis.te(**llama_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.ovis.OvisTokenizer else: # clip_l if clip_type == CLIPType.SD3: diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py index cd4b5f76c..0d07ac8c6 100644 --- a/comfy/text_encoders/llama.py +++ b/comfy/text_encoders/llama.py @@ -100,6 +100,28 @@ class Qwen3_4BConfig: rope_scale = None final_norm: bool = True +@dataclass +class Ovis25_2BConfig: + vocab_size: int = 151936 + hidden_size: int = 2048 + intermediate_size: int = 6144 + num_hidden_layers: int = 28 + num_attention_heads: int = 16 + num_key_value_heads: int = 8 + max_position_embeddings: int = 40960 + rms_norm_eps: float = 1e-6 + rope_theta: float = 1000000.0 + transformer_type: str = "llama" + head_dim = 128 + rms_norm_add = False + mlp_activation = "silu" + qkv_bias = False + rope_dims = None + q_norm = "gemma3" + k_norm = "gemma3" + rope_scale = None + final_norm: bool = True + @dataclass class Qwen25_7BVLI_Config: vocab_size: int = 152064 @@ -542,6 +564,15 @@ class Qwen3_4B(BaseLlama, torch.nn.Module): self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) self.dtype = dtype +class Ovis25_2B(BaseLlama, torch.nn.Module): + def __init__(self, config_dict, dtype, device, operations): + super().__init__() + config = Ovis25_2BConfig(**config_dict) + self.num_layers = config.num_hidden_layers + + self.model = Llama2_(config, device=device, dtype=dtype, ops=operations) + self.dtype = dtype + class Qwen25_7BVLI(BaseLlama, torch.nn.Module): def __init__(self, config_dict, dtype, device, operations): super().__init__() diff --git a/comfy/text_encoders/ovis.py b/comfy/text_encoders/ovis.py new file mode 100644 index 000000000..81c9bd51c --- /dev/null +++ b/comfy/text_encoders/ovis.py @@ -0,0 +1,69 @@ +from transformers import Qwen2Tokenizer +import comfy.text_encoders.llama +from comfy import sd1_clip +import os +import torch +import numbers + +class Qwen3Tokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer") + super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='qwen3_2b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=284, pad_token=151643, tokenizer_data=tokenizer_data) + + +class OvisTokenizer(sd1_clip.SD1Tokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="qwen3_2b", tokenizer=Qwen3Tokenizer) + self.llama_template = "<|im_start|>user\nDescribe the image by detailing the color, quantity, text, shape, size, texture, spatial relationships of the objects and background: {}<|im_end|>\n<|im_start|>assistant\n\n\n\n\n" + + def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, **kwargs): + if llama_template is None: + llama_text = self.llama_template.format(text) + else: + llama_text = llama_template.format(text) + + tokens = super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, disable_weights=True, **kwargs) + return tokens + +class Ovis25_2BModel(sd1_clip.SDClipModel): + def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=True, model_options={}): + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Ovis25_2B, enable_attention_masks=attention_mask, return_attention_masks=False, zero_out_masked=True, model_options=model_options) + + +class OvisTEModel(sd1_clip.SD1ClipModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + super().__init__(device=device, dtype=dtype, name="qwen3_2b", clip_model=Ovis25_2BModel, model_options=model_options) + + def encode_token_weights(self, token_weight_pairs, template_end=-1): + out, pooled = super().encode_token_weights(token_weight_pairs) + tok_pairs = token_weight_pairs["qwen3_2b"][0] + count_im_start = 0 + if template_end == -1: + for i, v in enumerate(tok_pairs): + elem = v[0] + if not torch.is_tensor(elem): + if isinstance(elem, numbers.Integral): + if elem == 4004 and count_im_start < 1: + template_end = i + count_im_start += 1 + + if out.shape[1] > (template_end + 1): + if tok_pairs[template_end + 1][0] == 25: + template_end += 1 + + out = out[:, template_end:] + return out, pooled, {} + + +def te(dtype_llama=None, llama_scaled_fp8=None, llama_quantization_metadata=None): + class OvisTEModel_(OvisTEModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: + model_options = model_options.copy() + model_options["scaled_fp8"] = llama_scaled_fp8 + if dtype_llama is not None: + dtype = dtype_llama + if llama_quantization_metadata is not None: + model_options["quantization_metadata"] = llama_quantization_metadata + super().__init__(device=device, dtype=dtype, model_options=model_options) + return OvisTEModel_ diff --git a/nodes.py b/nodes.py index 495dec806..d5e5dc228 100644 --- a/nodes.py +++ b/nodes.py @@ -939,7 +939,7 @@ class CLIPLoader: @classmethod def INPUT_TYPES(s): return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image", "flux2"], ), + "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image", "flux2", "ovis"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), From c55dc857d5da5af203caf720ed7056047d382544 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Mon, 1 Dec 2025 17:56:38 -0800 Subject: [PATCH 0967/1073] bump comfyui-frontend-package to 1.33.10 (#11028) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 045b2ac54..f98848e20 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.32.10 +comfyui-frontend-package==1.33.10 comfyui-workflow-templates==0.7.25 comfyui-embedded-docs==0.3.1 torch From b4a20acc54b0b94dc05a1bd09dc0b54dd12203f1 Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" <128333288+ltdrdata@users.noreply.github.com> Date: Tue, 2 Dec 2025 12:32:52 +0900 Subject: [PATCH 0968/1073] feat: Support ComfyUI-Manager for pip version (#7555) --- comfy/cli_args.py | 7 +++++++ comfy_api/feature_flags.py | 1 + main.py | 30 ++++++++++++++++++++++++++++++ manager_requirements.txt | 1 + nodes.py | 9 +++++++++ server.py | 8 +++++++- 6 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 manager_requirements.txt diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 5f0dfaa10..209fc185b 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -121,6 +121,12 @@ upcast.add_argument("--force-upcast-attention", action="store_true", help="Force upcast.add_argument("--dont-upcast-attention", action="store_true", help="Disable all upcasting of attention. Should be unnecessary except for debugging.") +parser.add_argument("--enable-manager", action="store_true", help="Enable the ComfyUI-Manager feature.") +manager_group = parser.add_mutually_exclusive_group() +manager_group.add_argument("--disable-manager-ui", action="store_true", help="Disables only the ComfyUI-Manager UI and endpoints. Scheduled installations and similar background tasks will still operate.") +manager_group.add_argument("--enable-manager-legacy-ui", action="store_true", help="Enables the legacy UI of ComfyUI-Manager") + + vram_group = parser.add_mutually_exclusive_group() vram_group.add_argument("--gpu-only", action="store_true", help="Store and run everything (text encoders/CLIP models, etc... on the GPU).") vram_group.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.") @@ -168,6 +174,7 @@ parser.add_argument("--multi-user", action="store_true", help="Enables per-user parser.add_argument("--verbose", default='INFO', const='DEBUG', nargs="?", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Set the logging level') parser.add_argument("--log-stdout", action="store_true", help="Send normal process output to stdout instead of stderr (default).") + # The default built-in provider hosted under web/ DEFAULT_VERSION_STRING = "comfyanonymous/ComfyUI@latest" diff --git a/comfy_api/feature_flags.py b/comfy_api/feature_flags.py index 0d4389a6e..bfb77eb5f 100644 --- a/comfy_api/feature_flags.py +++ b/comfy_api/feature_flags.py @@ -13,6 +13,7 @@ from comfy.cli_args import args SERVER_FEATURE_FLAGS: Dict[str, Any] = { "supports_preview_metadata": True, "max_upload_size": args.max_upload_size * 1024 * 1024, # Convert MB to bytes + "extension": {"manager": {"supports_v4": True}}, } diff --git a/main.py b/main.py index e1b0f1620..0cd815d9e 100644 --- a/main.py +++ b/main.py @@ -15,6 +15,7 @@ from comfy_execution.progress import get_progress_state from comfy_execution.utils import get_executing_context from comfy_api import feature_flags + if __name__ == "__main__": #NOTE: These do not do anything on core ComfyUI, they are for custom nodes. os.environ['HF_HUB_DISABLE_TELEMETRY'] = '1' @@ -22,6 +23,23 @@ if __name__ == "__main__": setup_logger(log_level=args.verbose, use_stdout=args.log_stdout) + +def handle_comfyui_manager_unavailable(): + if not args.windows_standalone_build: + logging.warning(f"\n\nYou appear to be running comfyui-manager from source, this is not recommended. Please install comfyui-manager using the following command:\ncommand:\n\t{sys.executable} -m pip install --pre comfyui_manager\n") + args.enable_manager = False + + +if args.enable_manager: + if importlib.util.find_spec("comfyui_manager"): + import comfyui_manager + + if not comfyui_manager.__file__ or not comfyui_manager.__file__.endswith('__init__.py'): + handle_comfyui_manager_unavailable() + else: + handle_comfyui_manager_unavailable() + + def apply_custom_paths(): # extra model paths extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml") @@ -79,6 +97,11 @@ def execute_prestartup_script(): for possible_module in possible_modules: module_path = os.path.join(custom_node_path, possible_module) + + if args.enable_manager: + if comfyui_manager.should_be_disabled(module_path): + continue + if os.path.isfile(module_path) or module_path.endswith(".disabled") or module_path == "__pycache__": continue @@ -101,6 +124,10 @@ def execute_prestartup_script(): logging.info("") apply_custom_paths() + +if args.enable_manager: + comfyui_manager.prestartup() + execute_prestartup_script() @@ -323,6 +350,9 @@ def start_comfyui(asyncio_loop=None): asyncio.set_event_loop(asyncio_loop) prompt_server = server.PromptServer(asyncio_loop) + if args.enable_manager and not args.disable_manager_ui: + comfyui_manager.start() + hook_breaker_ac10a0.save_functions() asyncio_loop.run_until_complete(nodes.init_extra_nodes( init_custom_nodes=(not args.disable_all_custom_nodes) or len(args.whitelist_custom_nodes) > 0, diff --git a/manager_requirements.txt b/manager_requirements.txt new file mode 100644 index 000000000..52cc5389c --- /dev/null +++ b/manager_requirements.txt @@ -0,0 +1 @@ +comfyui_manager==4.0.3b3 diff --git a/nodes.py b/nodes.py index d5e5dc228..4c910a34b 100644 --- a/nodes.py +++ b/nodes.py @@ -43,6 +43,9 @@ import folder_paths import latent_preview import node_helpers +if args.enable_manager: + import comfyui_manager + def before_node_execution(): comfy.model_management.throw_exception_if_processing_interrupted() @@ -2243,6 +2246,12 @@ async def init_external_custom_nodes(): if args.disable_all_custom_nodes and possible_module not in args.whitelist_custom_nodes: logging.info(f"Skipping {possible_module} due to disable_all_custom_nodes and whitelist_custom_nodes") continue + + if args.enable_manager: + if comfyui_manager.should_be_disabled(module_path): + logging.info(f"Blocked by policy: {module_path}") + continue + time_before = time.perf_counter() success = await load_custom_node(module_path, base_node_names, module_parent="custom_nodes") node_import_times.append((time.perf_counter() - time_before, module_path, success)) diff --git a/server.py b/server.py index fca5050bd..e3bd056d9 100644 --- a/server.py +++ b/server.py @@ -44,6 +44,9 @@ from protocol import BinaryEventTypes # Import cache control middleware from middleware.cache_middleware import cache_control +if args.enable_manager: + import comfyui_manager + async def send_socket_catch_exception(function, message): try: await function(message) @@ -212,6 +215,9 @@ class PromptServer(): if args.disable_api_nodes: middlewares.append(create_block_external_middleware()) + if args.enable_manager: + middlewares.append(comfyui_manager.create_middleware()) + max_upload_size = round(args.max_upload_size * 1024 * 1024) self.app = web.Application(client_max_size=max_upload_size, middlewares=middlewares) self.sockets = dict() @@ -599,7 +605,7 @@ class PromptServer(): system_stats = { "system": { - "os": os.name, + "os": sys.platform, "ram_total": ram_total, "ram_free": ram_free, "comfyui_version": __version__, From a17cf1c3871ad582c85c2bb6fddb63ec9c6df0ce Mon Sep 17 00:00:00 2001 From: Yoland Yan <4950057+yoland68@users.noreply.github.com> Date: Mon, 1 Dec 2025 19:40:44 -0800 Subject: [PATCH 0969/1073] Add @guill as a code owner (#11031) --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/CODEOWNERS b/CODEOWNERS index b7aca9b26..51acc4986 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,3 +1,4 @@ # Admins * @comfyanonymous * @kosinkadink +* @guill From 44baa0b7f32dd0c2ff0a9898aeb6c7929d855cd3 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Tue, 2 Dec 2025 11:46:29 -0800 Subject: [PATCH 0970/1073] Fix CODEOWNERS formatting to have all on the same line, otherwise only last line applies (#11053) --- CODEOWNERS | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index 51acc4986..4d5448636 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,4 +1,2 @@ # Admins -* @comfyanonymous -* @kosinkadink -* @guill +* @comfyanonymous @kosinkadink @guill From 33d6aec3b70bc6f3e5bba26c85bd8f3bb1380d08 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 2 Dec 2025 21:50:13 +0200 Subject: [PATCH 0971/1073] add check for the format arg type in VideoFromComponents.save_to function (#11046) * add check for the format var type in VideoFromComponents.save_to function * convert "format" to VideoContainer enum --- comfy_api/latest/_input_impl/video_types.py | 2 +- comfy_extras/nodes_video.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy_api/latest/_input_impl/video_types.py b/comfy_api/latest/_input_impl/video_types.py index 7231bf13c..a4cd3737d 100644 --- a/comfy_api/latest/_input_impl/video_types.py +++ b/comfy_api/latest/_input_impl/video_types.py @@ -337,7 +337,7 @@ class VideoFromComponents(VideoInput): if codec != VideoCodec.AUTO and codec != VideoCodec.H264: raise ValueError("Only H264 codec is supported for now") extra_kwargs = {} - if format != VideoContainer.AUTO: + if isinstance(format, VideoContainer) and format != VideoContainer.AUTO: extra_kwargs["format"] = format.value with av.open(path, mode='w', options={'movflags': 'use_metadata_tags'}, **extra_kwargs) as output: # Add metadata before writing any streams diff --git a/comfy_extras/nodes_video.py b/comfy_extras/nodes_video.py index 69fabb12e..6cf6e39bf 100644 --- a/comfy_extras/nodes_video.py +++ b/comfy_extras/nodes_video.py @@ -88,7 +88,7 @@ class SaveVideo(io.ComfyNode): ) @classmethod - def execute(cls, video: VideoInput, filename_prefix, format, codec) -> io.NodeOutput: + def execute(cls, video: VideoInput, filename_prefix, format: str, codec) -> io.NodeOutput: width, height = video.get_dimensions() full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path( filename_prefix, @@ -108,7 +108,7 @@ class SaveVideo(io.ComfyNode): file = f"{filename}_{counter:05}_.{VideoContainer.get_extension(format)}" video.save_to( os.path.join(full_output_folder, file), - format=format, + format=VideoContainer(format), codec=codec, metadata=saved_metadata ) From daaceac769a1355ab975758ede064317ea7514b4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 2 Dec 2025 14:11:58 -0800 Subject: [PATCH 0972/1073] Hack to make zimage work in fp16. (#11057) --- comfy/ldm/lumina/model.py | 18 +++++++++++------- comfy/supported_models.py | 2 ++ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index 7d7e9112c..070b5da09 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -22,6 +22,10 @@ def modulate(x, scale): # Core NextDiT Model # ############################################################################# +def clamp_fp16(x): + if x.dtype == torch.float16: + return torch.nan_to_num(x, nan=0.0, posinf=65504, neginf=-65504) + return x class JointAttention(nn.Module): """Multi-head attention module.""" @@ -169,7 +173,7 @@ class FeedForward(nn.Module): # @torch.compile def _forward_silu_gating(self, x1, x3): - return F.silu(x1) * x3 + return clamp_fp16(F.silu(x1) * x3) def forward(self, x): return self.w2(self._forward_silu_gating(self.w1(x), self.w3(x))) @@ -273,27 +277,27 @@ class JointTransformerBlock(nn.Module): scale_msa, gate_msa, scale_mlp, gate_mlp = self.adaLN_modulation(adaln_input).chunk(4, dim=1) x = x + gate_msa.unsqueeze(1).tanh() * self.attention_norm2( - self.attention( + clamp_fp16(self.attention( modulate(self.attention_norm1(x), scale_msa), x_mask, freqs_cis, transformer_options=transformer_options, - ) + )) ) x = x + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2( - self.feed_forward( + clamp_fp16(self.feed_forward( modulate(self.ffn_norm1(x), scale_mlp), - ) + )) ) else: assert adaln_input is None x = x + self.attention_norm2( - self.attention( + clamp_fp16(self.attention( self.attention_norm1(x), x_mask, freqs_cis, transformer_options=transformer_options, - ) + )) ) x = x + self.ffn_norm2( self.feed_forward( diff --git a/comfy/supported_models.py b/comfy/supported_models.py index af8120400..afd97160b 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1027,6 +1027,8 @@ class ZImage(Lumina2): memory_usage_factor = 1.7 + supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32] + def clip_target(self, state_dict={}): pref = self.text_encoder_key_prefix[0] hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen3_4b.transformer.".format(pref)) From 277237ccc1499bac7fcd221a666dfe7a32ac4206 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Wed, 3 Dec 2025 08:24:19 +1000 Subject: [PATCH 0973/1073] attention: use flag based OOM fallback (#11038) Exception ref all local variables for the lifetime of exception context. Just set a flag and then if to dump the exception before falling back. --- comfy/ldm/modules/attention.py | 3 +++ comfy/ldm/modules/diffusionmodules/model.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 7437e0567..a8800ded0 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -517,6 +517,7 @@ def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_resha @wrap_attn def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False, **kwargs): + exception_fallback = False if skip_reshape: b, _, _, dim_head = q.shape tensor_layout = "HND" @@ -541,6 +542,8 @@ def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape= out = sageattn(q, k, v, attn_mask=mask, is_causal=False, tensor_layout=tensor_layout) except Exception as e: logging.error("Error running sage attention: {}, using pytorch attention instead.".format(e)) + exception_fallback = True + if exception_fallback: if tensor_layout == "NHD": q, k, v = map( lambda t: t.transpose(1, 2), diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 4245eedca..de1e01cc8 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -279,6 +279,7 @@ def pytorch_attention(q, k, v): orig_shape = q.shape B = orig_shape[0] C = orig_shape[1] + oom_fallback = False q, k, v = map( lambda t: t.view(B, 1, C, -1).transpose(2, 3).contiguous(), (q, k, v), @@ -289,6 +290,8 @@ def pytorch_attention(q, k, v): out = out.transpose(2, 3).reshape(orig_shape) except model_management.OOM_EXCEPTION: logging.warning("scaled_dot_product_attention OOMed: switched to slice attention") + oom_fallback = True + if oom_fallback: out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(orig_shape) return out From b94d394a64dd0af06bca44b96c66549bb463331d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 2 Dec 2025 18:38:31 -0800 Subject: [PATCH 0974/1073] Support Z Image alibaba pai fun controlnets. (#11062) These are not actual controlnets so put it in the models/model_patches folder and use the ModelPatchLoader + QwenImageDiffsynthControlnet node to use it. --- comfy/ldm/lumina/controlnet.py | 113 ++++++++++++++++++++++++++++++ comfy/ldm/lumina/model.py | 24 ++++--- comfy_extras/nodes_model_patch.py | 101 +++++++++++++++++++++++++- 3 files changed, 229 insertions(+), 9 deletions(-) create mode 100644 comfy/ldm/lumina/controlnet.py diff --git a/comfy/ldm/lumina/controlnet.py b/comfy/ldm/lumina/controlnet.py new file mode 100644 index 000000000..fd7ce3b5c --- /dev/null +++ b/comfy/ldm/lumina/controlnet.py @@ -0,0 +1,113 @@ +import torch +from torch import nn + +from .model import JointTransformerBlock + +class ZImageControlTransformerBlock(JointTransformerBlock): + def __init__( + self, + layer_id: int, + dim: int, + n_heads: int, + n_kv_heads: int, + multiple_of: int, + ffn_dim_multiplier: float, + norm_eps: float, + qk_norm: bool, + modulation=True, + block_id=0, + operation_settings=None, + ): + super().__init__(layer_id, dim, n_heads, n_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, qk_norm, modulation, z_image_modulation=True, operation_settings=operation_settings) + self.block_id = block_id + if block_id == 0: + self.before_proj = operation_settings.get("operations").Linear(self.dim, self.dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.after_proj = operation_settings.get("operations").Linear(self.dim, self.dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + + def forward(self, c, x, **kwargs): + if self.block_id == 0: + c = self.before_proj(c) + x + c = super().forward(c, **kwargs) + c_skip = self.after_proj(c) + return c_skip, c + +class ZImage_Control(torch.nn.Module): + def __init__( + self, + dim: int = 3840, + n_heads: int = 30, + n_kv_heads: int = 30, + multiple_of: int = 256, + ffn_dim_multiplier: float = (8.0 / 3.0), + norm_eps: float = 1e-5, + qk_norm: bool = True, + dtype=None, + device=None, + operations=None, + **kwargs + ): + super().__init__() + operation_settings = {"operations": operations, "device": device, "dtype": dtype} + + self.additional_in_dim = 0 + self.control_in_dim = 16 + n_refiner_layers = 2 + self.n_control_layers = 6 + self.control_layers = nn.ModuleList( + [ + ZImageControlTransformerBlock( + i, + dim, + n_heads, + n_kv_heads, + multiple_of, + ffn_dim_multiplier, + norm_eps, + qk_norm, + block_id=i, + operation_settings=operation_settings, + ) + for i in range(self.n_control_layers) + ] + ) + + all_x_embedder = {} + patch_size = 2 + f_patch_size = 1 + x_embedder = operations.Linear(f_patch_size * patch_size * patch_size * self.control_in_dim, dim, bias=True, device=device, dtype=dtype) + all_x_embedder[f"{patch_size}-{f_patch_size}"] = x_embedder + + self.control_all_x_embedder = nn.ModuleDict(all_x_embedder) + self.control_noise_refiner = nn.ModuleList( + [ + JointTransformerBlock( + layer_id, + dim, + n_heads, + n_kv_heads, + multiple_of, + ffn_dim_multiplier, + norm_eps, + qk_norm, + modulation=True, + z_image_modulation=True, + operation_settings=operation_settings, + ) + for layer_id in range(n_refiner_layers) + ] + ) + + def forward(self, cap_feats, control_context, x_freqs_cis, adaln_input): + patch_size = 2 + f_patch_size = 1 + pH = pW = patch_size + B, C, H, W = control_context.shape + control_context = self.control_all_x_embedder[f"{patch_size}-{f_patch_size}"](control_context.view(B, C, H // pH, pH, W // pW, pW).permute(0, 2, 4, 3, 5, 1).flatten(3).flatten(1, 2)) + + x_attn_mask = None + for layer in self.control_noise_refiner: + control_context = layer(control_context, x_attn_mask, x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input) + return control_context + + def forward_control_block(self, layer_id, control_context, x, x_attn_mask, x_freqs_cis, adaln_input): + return self.control_layers[layer_id](control_context, x, x_mask=x_attn_mask, freqs_cis=x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input=adaln_input) diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index 070b5da09..f1c1a0ec3 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -568,7 +568,7 @@ class NextDiT(nn.Module): ).execute(x, timesteps, context, num_tokens, attention_mask, **kwargs) # def forward(self, x, t, cap_feats, cap_mask): - def _forward(self, x, timesteps, context, num_tokens, attention_mask=None, **kwargs): + def _forward(self, x, timesteps, context, num_tokens, attention_mask=None, transformer_options={}, **kwargs): t = 1.0 - timesteps cap_feats = context cap_mask = attention_mask @@ -585,16 +585,24 @@ class NextDiT(nn.Module): cap_feats = self.cap_embedder(cap_feats) # (N, L, D) # todo check if able to batchify w.o. redundant compute + patches = transformer_options.get("patches", {}) transformer_options = kwargs.get("transformer_options", {}) x_is_tensor = isinstance(x, torch.Tensor) - x, mask, img_size, cap_size, freqs_cis = self.patchify_and_embed(x, cap_feats, cap_mask, t, num_tokens, transformer_options=transformer_options) - freqs_cis = freqs_cis.to(x.device) + img, mask, img_size, cap_size, freqs_cis = self.patchify_and_embed(x, cap_feats, cap_mask, t, num_tokens, transformer_options=transformer_options) + freqs_cis = freqs_cis.to(img.device) - for layer in self.layers: - x = layer(x, mask, freqs_cis, adaln_input, transformer_options=transformer_options) + for i, layer in enumerate(self.layers): + img = layer(img, mask, freqs_cis, adaln_input, transformer_options=transformer_options) + if "double_block" in patches: + for p in patches["double_block"]: + out = p({"img": img[:, cap_size[0]:], "txt": img[:, :cap_size[0]], "pe": freqs_cis[:, cap_size[0]:], "vec": adaln_input, "x": x, "block_index": i, "transformer_options": transformer_options}) + if "img" in out: + img[:, cap_size[0]:] = out["img"] + if "txt" in out: + img[:, :cap_size[0]] = out["txt"] - x = self.final_layer(x, adaln_input) - x = self.unpatchify(x, img_size, cap_size, return_tensor=x_is_tensor)[:,:,:h,:w] + img = self.final_layer(img, adaln_input) + img = self.unpatchify(img, img_size, cap_size, return_tensor=x_is_tensor)[:, :, :h, :w] - return -x + return -img diff --git a/comfy_extras/nodes_model_patch.py b/comfy_extras/nodes_model_patch.py index 783c59b6b..c61810dbf 100644 --- a/comfy_extras/nodes_model_patch.py +++ b/comfy_extras/nodes_model_patch.py @@ -6,6 +6,7 @@ import comfy.ops import comfy.model_management import comfy.ldm.common_dit import comfy.latent_formats +import comfy.ldm.lumina.controlnet class BlockWiseControlBlock(torch.nn.Module): @@ -189,6 +190,35 @@ class SigLIPMultiFeatProjModel(torch.nn.Module): return embedding +def z_image_convert(sd): + replace_keys = {".attention.to_out.0.bias": ".attention.out.bias", + ".attention.norm_k.weight": ".attention.k_norm.weight", + ".attention.norm_q.weight": ".attention.q_norm.weight", + ".attention.to_out.0.weight": ".attention.out.weight" + } + + out_sd = {} + for k in sorted(sd.keys()): + w = sd[k] + + k_out = k + if k_out.endswith(".attention.to_k.weight"): + cc = [w] + continue + if k_out.endswith(".attention.to_q.weight"): + cc = [w] + cc + continue + if k_out.endswith(".attention.to_v.weight"): + cc = cc + [w] + w = torch.cat(cc, dim=0) + k_out = k_out.replace(".attention.to_v.weight", ".attention.qkv.weight") + + for r, rr in replace_keys.items(): + k_out = k_out.replace(r, rr) + out_sd[k_out] = w + + return out_sd + class ModelPatchLoader: @classmethod def INPUT_TYPES(s): @@ -211,6 +241,9 @@ class ModelPatchLoader: elif 'feature_embedder.mid_layer_norm.bias' in sd: sd = comfy.utils.state_dict_prefix_replace(sd, {"feature_embedder.": ""}, filter_keys=True) model = SigLIPMultiFeatProjModel(device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast) + elif 'control_all_x_embedder.2-1.weight' in sd: # alipai z image fun controlnet + sd = z_image_convert(sd) + model = comfy.ldm.lumina.controlnet.ZImage_Control(device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast) model.load_state_dict(sd) model = comfy.model_patcher.ModelPatcher(model, load_device=comfy.model_management.get_torch_device(), offload_device=comfy.model_management.unet_offload_device()) @@ -263,6 +296,69 @@ class DiffSynthCnetPatch: def models(self): return [self.model_patch] +class ZImageControlPatch: + def __init__(self, model_patch, vae, image, strength): + self.model_patch = model_patch + self.vae = vae + self.image = image + self.strength = strength + self.encoded_image = self.encode_latent_cond(image) + self.encoded_image_size = (image.shape[1], image.shape[2]) + self.temp_data = None + + def encode_latent_cond(self, image): + latent_image = comfy.latent_formats.Flux().process_in(self.vae.encode(image)) + return latent_image + + def __call__(self, kwargs): + x = kwargs.get("x") + img = kwargs.get("img") + txt = kwargs.get("txt") + pe = kwargs.get("pe") + vec = kwargs.get("vec") + block_index = kwargs.get("block_index") + spacial_compression = self.vae.spacial_compression_encode() + if self.encoded_image is None or self.encoded_image_size != (x.shape[-2] * spacial_compression, x.shape[-1] * spacial_compression): + image_scaled = comfy.utils.common_upscale(self.image.movedim(-1, 1), x.shape[-1] * spacial_compression, x.shape[-2] * spacial_compression, "area", "center") + loaded_models = comfy.model_management.loaded_models(only_currently_used=True) + self.encoded_image = self.encode_latent_cond(image_scaled.movedim(1, -1)) + self.encoded_image_size = (image_scaled.shape[-2], image_scaled.shape[-1]) + comfy.model_management.load_models_gpu(loaded_models) + + cnet_index = (block_index // 5) + cnet_index_float = (block_index / 5) + + kwargs.pop("img") # we do ops in place + kwargs.pop("txt") + + cnet_blocks = self.model_patch.model.n_control_layers + if cnet_index_float > (cnet_blocks - 1): + self.temp_data = None + return kwargs + + if self.temp_data is None or self.temp_data[0] > cnet_index: + self.temp_data = (-1, (None, self.model_patch.model(txt, self.encoded_image.to(img.dtype), pe, vec))) + + while self.temp_data[0] < cnet_index and (self.temp_data[0] + 1) < cnet_blocks: + next_layer = self.temp_data[0] + 1 + self.temp_data = (next_layer, self.model_patch.model.forward_control_block(next_layer, self.temp_data[1][1], img[:, :self.temp_data[1][1].shape[1]], None, pe, vec)) + + if cnet_index_float == self.temp_data[0]: + img[:, :self.temp_data[1][0].shape[1]] += (self.temp_data[1][0] * self.strength) + if cnet_blocks == self.temp_data[0] + 1: + self.temp_data = None + + return kwargs + + def to(self, device_or_dtype): + if isinstance(device_or_dtype, torch.device): + self.encoded_image = self.encoded_image.to(device_or_dtype) + self.temp_data = None + return self + + def models(self): + return [self.model_patch] + class QwenImageDiffsynthControlnet: @classmethod def INPUT_TYPES(s): @@ -289,7 +385,10 @@ class QwenImageDiffsynthControlnet: mask = mask.unsqueeze(2) mask = 1.0 - mask - model_patched.set_model_double_block_patch(DiffSynthCnetPatch(model_patch, vae, image, strength, mask)) + if isinstance(model_patch.model, comfy.ldm.lumina.controlnet.ZImage_Control): + model_patched.set_model_double_block_patch(ZImageControlPatch(model_patch, vae, image, strength)) + else: + model_patched.set_model_double_block_patch(DiffSynthCnetPatch(model_patch, vae, image, strength, mask)) return (model_patched,) From 3f512f5659cfbb3c53999cde6ff557591740252b Mon Sep 17 00:00:00 2001 From: Jim Heising Date: Tue, 2 Dec 2025 19:29:27 -0800 Subject: [PATCH 0975/1073] Added PATCH method to CORS headers (#11066) Added PATCH http method to access-control-allow-header-methods header because there are now PATCH endpoints exposed in the API. See https://github.com/comfyanonymous/ComfyUI/blob/277237ccc1499bac7fcd221a666dfe7a32ac4206/api_server/routes/internal/internal_routes.py#L34 for an example of an API endpoint that uses the PATCH method. --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index e3bd056d9..ac4f42222 100644 --- a/server.py +++ b/server.py @@ -98,7 +98,7 @@ def create_cors_middleware(allowed_origin: str): response = await handler(request) response.headers['Access-Control-Allow-Origin'] = allowed_origin - response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, PUT, OPTIONS' + response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, PUT, OPTIONS, PATCH' response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization' response.headers['Access-Control-Allow-Credentials'] = 'true' return response From 73f5649196f472d3719e2e7513e0a9d029cc3e38 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Wed, 3 Dec 2025 13:49:29 +1000 Subject: [PATCH 0976/1073] Implement temporal rolling VAE (Major VRAM reductions in Hunyuan and Kandinsky) (#10995) * hunyuan upsampler: rework imports Remove the transitive import of VideoConv3d and Resnet and takes these from actual implementation source. * model: remove unused give_pre_end According to git grep, this is not used now, and was not used in the initial commit that introduced it (see below). This semantic is difficult to implement temporal roll VAE for (and would defeat the purpose). Rather than implement the complex if, just delete the unused feature. (venv) rattus@rattus-box2:~/ComfyUI$ git log --oneline 220afe33 (HEAD) Initial commit. (venv) rattus@rattus-box2:~/ComfyUI$ git grep give_pre comfy/ldm/modules/diffusionmodules/model.py: resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, comfy/ldm/modules/diffusionmodules/model.py: self.give_pre_end = give_pre_end comfy/ldm/modules/diffusionmodules/model.py: if self.give_pre_end: (venv) rattus@rattus-box2:~/ComfyUI$ git co origin/master Previous HEAD position was 220afe33 Initial commit. HEAD is now at 9d8a8179 Enable async offloading by default on Nvidia. (#10953) (venv) rattus@rattus-box2:~/ComfyUI$ git grep give_pre comfy/ldm/modules/diffusionmodules/model.py: resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, comfy/ldm/modules/diffusionmodules/model.py: self.give_pre_end = give_pre_end comfy/ldm/modules/diffusionmodules/model.py: if self.give_pre_end: * move refiner VAE temporal roller to core Move the carrying conv op to the common VAE code and give it a better name. Roll the carry implementation logic for Resnet into the base class and scrap the Hunyuan specific subclass. * model: Add temporal roll to main VAE decoder If there are no attention layers, its a standard resnet and VideoConv3d is asked for, substitute in the temporal rolloing VAE algorithm. This reduces VAE usage by the temporal dimension (can be huge VRAM savings). * model: Add temporal roll to main VAE encoder If there are no attention layers, its a standard resnet and VideoConv3d is asked for, substitute in the temporal rolling VAE algorithm. This reduces VAE usage by the temporal dimension (can be huge VRAM savings). --- comfy/ldm/hunyuan_video/upsampler.py | 3 +- comfy/ldm/hunyuan_video/vae_refiner.py | 94 +++------ comfy/ldm/modules/diffusionmodules/model.py | 207 ++++++++++++++------ 3 files changed, 174 insertions(+), 130 deletions(-) diff --git a/comfy/ldm/hunyuan_video/upsampler.py b/comfy/ldm/hunyuan_video/upsampler.py index 9f5e91a59..85f515f67 100644 --- a/comfy/ldm/hunyuan_video/upsampler.py +++ b/comfy/ldm/hunyuan_video/upsampler.py @@ -1,7 +1,8 @@ import torch import torch.nn as nn import torch.nn.functional as F -from comfy.ldm.hunyuan_video.vae_refiner import RMS_norm, ResnetBlock, VideoConv3d +from comfy.ldm.modules.diffusionmodules.model import ResnetBlock, VideoConv3d +from comfy.ldm.hunyuan_video.vae_refiner import RMS_norm import model_management, model_patcher class SRResidualCausalBlock3D(nn.Module): diff --git a/comfy/ldm/hunyuan_video/vae_refiner.py b/comfy/ldm/hunyuan_video/vae_refiner.py index 9f750dcc4..ddf77cd0e 100644 --- a/comfy/ldm/hunyuan_video/vae_refiner.py +++ b/comfy/ldm/hunyuan_video/vae_refiner.py @@ -1,42 +1,12 @@ import torch import torch.nn as nn import torch.nn.functional as F -from comfy.ldm.modules.diffusionmodules.model import ResnetBlock, AttnBlock, VideoConv3d, Normalize +from comfy.ldm.modules.diffusionmodules.model import ResnetBlock, AttnBlock, CarriedConv3d, Normalize, conv_carry_causal_3d, torch_cat_if_needed import comfy.ops import comfy.ldm.models.autoencoder import comfy.model_management ops = comfy.ops.disable_weight_init -class NoPadConv3d(nn.Module): - def __init__(self, n_channels, out_channels, kernel_size, stride=1, dilation=1, padding=0, **kwargs): - super().__init__() - self.conv = ops.Conv3d(n_channels, out_channels, kernel_size, stride=stride, dilation=dilation, **kwargs) - - def forward(self, x): - return self.conv(x) - - -def conv_carry_causal_3d(xl, op, conv_carry_in=None, conv_carry_out=None): - - x = xl[0] - xl.clear() - - if conv_carry_out is not None: - to_push = x[:, :, -2:, :, :].clone() - conv_carry_out.append(to_push) - - if isinstance(op, NoPadConv3d): - if conv_carry_in is None: - x = torch.nn.functional.pad(x, (1, 1, 1, 1, 2, 0), mode = 'replicate') - else: - carry_len = conv_carry_in[0].shape[2] - x = torch.cat([conv_carry_in.pop(0), x], dim=2) - x = torch.nn.functional.pad(x, (1, 1, 1, 1, 2 - carry_len, 0), mode = 'replicate') - - out = op(x) - - return out - class RMS_norm(nn.Module): def __init__(self, dim): @@ -49,7 +19,7 @@ class RMS_norm(nn.Module): return F.normalize(x, dim=1) * self.scale * comfy.model_management.cast_to(self.gamma, dtype=x.dtype, device=x.device) class DnSmpl(nn.Module): - def __init__(self, ic, oc, tds=True, refiner_vae=True, op=VideoConv3d): + def __init__(self, ic, oc, tds, refiner_vae, op): super().__init__() fct = 2 * 2 * 2 if tds else 1 * 2 * 2 assert oc % fct == 0 @@ -109,7 +79,7 @@ class DnSmpl(nn.Module): class UpSmpl(nn.Module): - def __init__(self, ic, oc, tus=True, refiner_vae=True, op=VideoConv3d): + def __init__(self, ic, oc, tus, refiner_vae, op): super().__init__() fct = 2 * 2 * 2 if tus else 1 * 2 * 2 self.conv = op(ic, oc * fct, kernel_size=3, stride=1, padding=1) @@ -163,23 +133,6 @@ class UpSmpl(nn.Module): return h + x -class HunyuanRefinerResnetBlock(ResnetBlock): - def __init__(self, in_channels, out_channels, conv_op=NoPadConv3d, norm_op=RMS_norm): - super().__init__(in_channels=in_channels, out_channels=out_channels, temb_channels=0, conv_op=conv_op, norm_op=norm_op) - - def forward(self, x, conv_carry_in=None, conv_carry_out=None): - h = x - h = [ self.swish(self.norm1(x)) ] - h = conv_carry_causal_3d(h, self.conv1, conv_carry_in=conv_carry_in, conv_carry_out=conv_carry_out) - - h = [ self.dropout(self.swish(self.norm2(h))) ] - h = conv_carry_causal_3d(h, self.conv2, conv_carry_in=conv_carry_in, conv_carry_out=conv_carry_out) - - if self.in_channels != self.out_channels: - x = self.nin_shortcut(x) - - return x+h - class Encoder(nn.Module): def __init__(self, in_channels, z_channels, block_out_channels, num_res_blocks, ffactor_spatial, ffactor_temporal, downsample_match_channel=True, refiner_vae=True, **_): @@ -191,7 +144,7 @@ class Encoder(nn.Module): self.refiner_vae = refiner_vae if self.refiner_vae: - conv_op = NoPadConv3d + conv_op = CarriedConv3d norm_op = RMS_norm else: conv_op = ops.Conv3d @@ -206,9 +159,10 @@ class Encoder(nn.Module): for i, tgt in enumerate(block_out_channels): stage = nn.Module() - stage.block = nn.ModuleList([HunyuanRefinerResnetBlock(in_channels=ch if j == 0 else tgt, - out_channels=tgt, - conv_op=conv_op, norm_op=norm_op) + stage.block = nn.ModuleList([ResnetBlock(in_channels=ch if j == 0 else tgt, + out_channels=tgt, + temb_channels=0, + conv_op=conv_op, norm_op=norm_op) for j in range(num_res_blocks)]) ch = tgt if i < depth: @@ -218,9 +172,9 @@ class Encoder(nn.Module): self.down.append(stage) self.mid = nn.Module() - self.mid.block_1 = HunyuanRefinerResnetBlock(in_channels=ch, out_channels=ch, conv_op=conv_op, norm_op=norm_op) + self.mid.block_1 = ResnetBlock(in_channels=ch, out_channels=ch, conv_op=conv_op, norm_op=norm_op) self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv3d, norm_op=norm_op) - self.mid.block_2 = HunyuanRefinerResnetBlock(in_channels=ch, out_channels=ch, conv_op=conv_op, norm_op=norm_op) + self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, conv_op=conv_op, norm_op=norm_op) self.norm_out = norm_op(ch) self.conv_out = conv_op(ch, z_channels << 1, 3, 1, 1) @@ -246,22 +200,20 @@ class Encoder(nn.Module): conv_carry_out = [] if i == len(x) - 1: conv_carry_out = None + x1 = [ x1 ] x1 = conv_carry_causal_3d(x1, self.conv_in, conv_carry_in, conv_carry_out) for stage in self.down: for blk in stage.block: - x1 = blk(x1, conv_carry_in, conv_carry_out) + x1 = blk(x1, None, conv_carry_in, conv_carry_out) if hasattr(stage, 'downsample'): x1 = stage.downsample(x1, conv_carry_in, conv_carry_out) out.append(x1) conv_carry_in = conv_carry_out - if len(out) > 1: - out = torch.cat(out, dim=2) - else: - out = out[0] + out = torch_cat_if_needed(out, dim=2) x = self.mid.block_2(self.mid.attn_1(self.mid.block_1(out))) del out @@ -288,7 +240,7 @@ class Decoder(nn.Module): self.refiner_vae = refiner_vae if self.refiner_vae: - conv_op = NoPadConv3d + conv_op = CarriedConv3d norm_op = RMS_norm else: conv_op = ops.Conv3d @@ -298,9 +250,9 @@ class Decoder(nn.Module): self.conv_in = conv_op(z_channels, ch, kernel_size=3, stride=1, padding=1) self.mid = nn.Module() - self.mid.block_1 = HunyuanRefinerResnetBlock(in_channels=ch, out_channels=ch, conv_op=conv_op, norm_op=norm_op) + self.mid.block_1 = ResnetBlock(in_channels=ch, out_channels=ch, conv_op=conv_op, norm_op=norm_op) self.mid.attn_1 = AttnBlock(ch, conv_op=ops.Conv3d, norm_op=norm_op) - self.mid.block_2 = HunyuanRefinerResnetBlock(in_channels=ch, out_channels=ch, conv_op=conv_op, norm_op=norm_op) + self.mid.block_2 = ResnetBlock(in_channels=ch, out_channels=ch, conv_op=conv_op, norm_op=norm_op) self.up = nn.ModuleList() depth = (ffactor_spatial >> 1).bit_length() @@ -308,9 +260,10 @@ class Decoder(nn.Module): for i, tgt in enumerate(block_out_channels): stage = nn.Module() - stage.block = nn.ModuleList([HunyuanRefinerResnetBlock(in_channels=ch if j == 0 else tgt, - out_channels=tgt, - conv_op=conv_op, norm_op=norm_op) + stage.block = nn.ModuleList([ResnetBlock(in_channels=ch if j == 0 else tgt, + out_channels=tgt, + temb_channels=0, + conv_op=conv_op, norm_op=norm_op) for j in range(num_res_blocks + 1)]) ch = tgt if i < depth: @@ -340,7 +293,7 @@ class Decoder(nn.Module): conv_carry_out = None for stage in self.up: for blk in stage.block: - x1 = blk(x1, conv_carry_in, conv_carry_out) + x1 = blk(x1, None, conv_carry_in, conv_carry_out) if hasattr(stage, 'upsample'): x1 = stage.upsample(x1, conv_carry_in, conv_carry_out) @@ -350,10 +303,7 @@ class Decoder(nn.Module): conv_carry_in = conv_carry_out del x - if len(out) > 1: - out = torch.cat(out, dim=2) - else: - out = out[0] + out = torch_cat_if_needed(out, dim=2) if not self.refiner_vae: if z.shape[-3] == 1: diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index de1e01cc8..681a55db5 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -13,6 +13,12 @@ if model_management.xformers_enabled_vae(): import xformers import xformers.ops +def torch_cat_if_needed(xl, dim): + if len(xl) > 1: + return torch.cat(xl, dim) + else: + return xl[0] + def get_timestep_embedding(timesteps, embedding_dim): """ This matches the implementation in Denoising Diffusion Probabilistic Models: @@ -43,6 +49,37 @@ def Normalize(in_channels, num_groups=32): return ops.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) +class CarriedConv3d(nn.Module): + def __init__(self, n_channels, out_channels, kernel_size, stride=1, dilation=1, padding=0, **kwargs): + super().__init__() + self.conv = ops.Conv3d(n_channels, out_channels, kernel_size, stride=stride, dilation=dilation, **kwargs) + + def forward(self, x): + return self.conv(x) + + +def conv_carry_causal_3d(xl, op, conv_carry_in=None, conv_carry_out=None): + + x = xl[0] + xl.clear() + + if isinstance(op, CarriedConv3d): + if conv_carry_in is None: + x = torch.nn.functional.pad(x, (1, 1, 1, 1, 2, 0), mode = 'replicate') + else: + carry_len = conv_carry_in[0].shape[2] + x = torch.nn.functional.pad(x, (1, 1, 1, 1, 2 - carry_len, 0), mode = 'replicate') + x = torch.cat([conv_carry_in.pop(0), x], dim=2) + + if conv_carry_out is not None: + to_push = x[:, :, -2:, :, :].clone() + conv_carry_out.append(to_push) + + out = op(x) + + return out + + class VideoConv3d(nn.Module): def __init__(self, n_channels, out_channels, kernel_size, stride=1, dilation=1, padding_mode='replicate', padding=1, **kwargs): super().__init__() @@ -89,29 +126,24 @@ class Upsample(nn.Module): stride=1, padding=1) - def forward(self, x): + def forward(self, x, conv_carry_in=None, conv_carry_out=None): scale_factor = self.scale_factor if isinstance(scale_factor, (int, float)): scale_factor = (scale_factor,) * (x.ndim - 2) if x.ndim == 5 and scale_factor[0] > 1.0: - t = x.shape[2] - if t > 1: - a, b = x.split((1, t - 1), dim=2) - del x - b = interpolate_up(b, scale_factor) - else: - a = x - - a = interpolate_up(a.squeeze(2), scale_factor=scale_factor[1:]).unsqueeze(2) - if t > 1: - x = torch.cat((a, b), dim=2) - else: - x = a + results = [] + if conv_carry_in is None: + first = x[:, :, :1, :, :] + results.append(interpolate_up(first.squeeze(2), scale_factor=scale_factor[1:]).unsqueeze(2)) + x = x[:, :, 1:, :, :] + if x.shape[2] > 0: + results.append(interpolate_up(x, scale_factor)) + x = torch_cat_if_needed(results, dim=2) else: x = interpolate_up(x, scale_factor) if self.with_conv: - x = self.conv(x) + x = conv_carry_causal_3d([x], self.conv, conv_carry_in, conv_carry_out) return x @@ -127,17 +159,20 @@ class Downsample(nn.Module): stride=stride, padding=0) - def forward(self, x): + def forward(self, x, conv_carry_in=None, conv_carry_out=None): if self.with_conv: - if x.ndim == 4: + if isinstance(self.conv, CarriedConv3d): + x = conv_carry_causal_3d([x], self.conv, conv_carry_in, conv_carry_out) + elif x.ndim == 4: pad = (0, 1, 0, 1) mode = "constant" x = torch.nn.functional.pad(x, pad, mode=mode, value=0) + x = self.conv(x) elif x.ndim == 5: pad = (1, 1, 1, 1, 2, 0) mode = "replicate" x = torch.nn.functional.pad(x, pad, mode=mode) - x = self.conv(x) + x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x @@ -183,23 +218,23 @@ class ResnetBlock(nn.Module): stride=1, padding=0) - def forward(self, x, temb=None): + def forward(self, x, temb=None, conv_carry_in=None, conv_carry_out=None): h = x h = self.norm1(h) - h = self.swish(h) - h = self.conv1(h) + h = [ self.swish(h) ] + h = conv_carry_causal_3d(h, self.conv1, conv_carry_in=conv_carry_in, conv_carry_out=conv_carry_out) if temb is not None: h = h + self.temb_proj(self.swish(temb))[:,:,None,None] h = self.norm2(h) h = self.swish(h) - h = self.dropout(h) - h = self.conv2(h) + h = [ self.dropout(h) ] + h = conv_carry_causal_3d(h, self.conv2, conv_carry_in=conv_carry_in, conv_carry_out=conv_carry_out) if self.in_channels != self.out_channels: if self.use_conv_shortcut: - x = self.conv_shortcut(x) + x = conv_carry_causal_3d([x], self.conv_shortcut, conv_carry_in=conv_carry_in, conv_carry_out=conv_carry_out) else: x = self.nin_shortcut(x) @@ -520,9 +555,14 @@ class Encoder(nn.Module): self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels + self.carried = False if conv3d: - conv_op = VideoConv3d + if not attn_resolutions: + conv_op = CarriedConv3d + self.carried = True + else: + conv_op = VideoConv3d mid_attn_conv_op = ops.Conv3d else: conv_op = ops.Conv2d @@ -535,6 +575,7 @@ class Encoder(nn.Module): stride=1, padding=1) + self.time_compress = 1 curr_res = resolution in_ch_mult = (1,)+tuple(ch_mult) self.in_ch_mult = in_ch_mult @@ -561,10 +602,15 @@ class Encoder(nn.Module): if time_compress is not None: if (self.num_resolutions - 1 - i_level) > math.log2(time_compress): stride = (1, 2, 2) + else: + self.time_compress *= 2 down.downsample = Downsample(block_in, resamp_with_conv, stride=stride, conv_op=conv_op) curr_res = curr_res // 2 self.down.append(down) + if time_compress is not None: + self.time_compress = time_compress + # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock(in_channels=block_in, @@ -590,15 +636,42 @@ class Encoder(nn.Module): def forward(self, x): # timestep embedding temb = None - # downsampling - h = self.conv_in(x) - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](h, temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - if i_level != self.num_resolutions-1: - h = self.down[i_level].downsample(h) + + if self.carried: + xl = [x[:, :, :1, :, :]] + if x.shape[2] > self.time_compress: + tc = self.time_compress + xl += torch.split(x[:, :, 1: 1 + ((x.shape[2] - 1) // tc) * tc, :, :], tc * 2, dim = 2) + x = xl + else: + x = [x] + out = [] + + conv_carry_in = None + + for i, x1 in enumerate(x): + conv_carry_out = [] + if i == len(x) - 1: + conv_carry_out = None + + # downsampling + x1 = [ x1 ] + h1 = conv_carry_causal_3d(x1, self.conv_in, conv_carry_in, conv_carry_out) + + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h1 = self.down[i_level].block[i_block](h1, temb, conv_carry_in, conv_carry_out) + if len(self.down[i_level].attn) > 0: + assert i == 0 #carried should not happen if attn exists + h1 = self.down[i_level].attn[i_block](h1) + if i_level != self.num_resolutions-1: + h1 = self.down[i_level].downsample(h1, conv_carry_in, conv_carry_out) + + out.append(h1) + conv_carry_in = conv_carry_out + + h = torch_cat_if_needed(out, dim=2) + del out # middle h = self.mid.block_1(h, temb) @@ -607,15 +680,15 @@ class Encoder(nn.Module): # end h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) + h = [ nonlinearity(h) ] + h = conv_carry_causal_3d(h, self.conv_out) return h class Decoder(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, + resolution, z_channels, tanh_out=False, use_linear_attn=False, conv_out_op=ops.Conv2d, resnet_op=ResnetBlock, attn_op=AttnBlock, @@ -629,12 +702,18 @@ class Decoder(nn.Module): self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels - self.give_pre_end = give_pre_end self.tanh_out = tanh_out + self.carried = False if conv3d: - conv_op = VideoConv3d - conv_out_op = VideoConv3d + if not attn_resolutions and resnet_op == ResnetBlock: + conv_op = CarriedConv3d + conv_out_op = CarriedConv3d + self.carried = True + else: + conv_op = VideoConv3d + conv_out_op = VideoConv3d + mid_attn_conv_op = ops.Conv3d else: conv_op = ops.Conv2d @@ -709,29 +788,43 @@ class Decoder(nn.Module): temb = None # z to block_in - h = self.conv_in(z) + h = conv_carry_causal_3d([z], self.conv_in) # middle h = self.mid.block_1(h, temb, **kwargs) h = self.mid.attn_1(h, **kwargs) h = self.mid.block_2(h, temb, **kwargs) + if self.carried: + h = torch.split(h, 2, dim=2) + else: + h = [ h ] + out = [] + + conv_carry_in = None + # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block](h, temb, **kwargs) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h, **kwargs) - if i_level != 0: - h = self.up[i_level].upsample(h) + for i, h1 in enumerate(h): + conv_carry_out = [] + if i == len(h) - 1: + conv_carry_out = None + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks+1): + h1 = self.up[i_level].block[i_block](h1, temb, conv_carry_in, conv_carry_out, **kwargs) + if len(self.up[i_level].attn) > 0: + assert i == 0 #carried should not happen if attn exists + h1 = self.up[i_level].attn[i_block](h1, **kwargs) + if i_level != 0: + h1 = self.up[i_level].upsample(h1, conv_carry_in, conv_carry_out) - # end - if self.give_pre_end: - return h + h1 = self.norm_out(h1) + h1 = [ nonlinearity(h1) ] + h1 = conv_carry_causal_3d(h1, self.conv_out, conv_carry_in, conv_carry_out) + if self.tanh_out: + h1 = torch.tanh(h1) + out.append(h1) + conv_carry_in = conv_carry_out - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h, **kwargs) - if self.tanh_out: - h = torch.tanh(h) - return h + out = torch_cat_if_needed(out, dim=2) + + return out From c120eee5bacca643062657d2a7efad83c7d4d828 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Tue, 2 Dec 2025 21:17:13 -0800 Subject: [PATCH 0977/1073] Add MatchType, DynamicCombo, and Autogrow support to V3 Schema (#10832) * Added output_matchtypes to generated json for v3, initial backend support for MatchType, created nodes_logic.py and added SwitchNode * Fixed providing list of allowed_types * Add workaround in validation.py for V3 Combo outputs not working as Combo inputs * Make match type receive_type pass validation * Also add MatchType check to input_type in validation - will likely trigger when connecting to non-lazy stuff * Make sure this PR only has MatchType stuff * Initial work on DynamicCombo * Add get_dynamic function, not yet filled out correctly * Mark Switch node as Beta * Make sure other unfinished dynamic types are not accidentally used * Send DynamicCombo.Option inputs in the same format as normal v1 inputs * add dynamic combo test node * Support validation of inputs and outputs * Add missing input params to DynamicCombo.Input * Add get_all function to inputs for id validation purposes * Fix imports for v3 returning everything when doing io/ui/IO/UI instead of what is in __all__ of _io.py and _ui.py * Modifying behavior of get_dynamic in V3 + serialization so can be used in execution code * Fix v3 schema validation code after changes * Refactor hidden_values for v3 in execution.py to be more general v3_data, add helper functions for dynamic behavior, preparing for restructuring dynamic type into object (not finished yet) * Add nesting of inputs on DynamicCombo during execution * Work with latest frontend commits * Fix cringe arrows * frontend will no longer namespace dynamic inputs widgets so reflect that in code, refactor build_nested_inputs * Prepare Autogrow support for the love of the game * satisfy ruff * Create test nodes for Autogrow to collab with frontend development * Add nested combo to DCTestNode * Remove array support from build_nested_inputs, properly handle missing expected values * Make execution.validate_inputs properly validate required dynamic inputs, renamed dynamic_data to dynamic_paths for clarity * MatchType does not need any DynamicInput/Output features on backend; will increase compatibility with dynamic types * Probably need this for ruff check * Change MatchType to have template be the first and only required param; output id's do nothing right now, so no need * Fix merge regression with LatentUpscaleModel type not being put in __all__ for _io.py, fix invalid type hint for validate_inputs * Make Switch node inputs optional, disallow both inputs from being missing, and still work properly with lazy; when one input is missing, use the other no matter what the switch is set to * Satisfy ruff * Move MatchType code above the types that inherit from DynamicInput * Add DynamicSlot type, awaiting frontend support * Make curr_prefix creation happen in Autogrow, move curr_prefix in DynamicCombo to only be created if input exists in live_inputs * I was confused, fixing accidentally redundant curr_prefix addition in Autogrow * Make sure Autogrow inputs are force_input = True when WidgetInput, fix runtime validation by removing original input from expected inputs, fix min/max bounds, change test nodes slightly * Remove unnecessary id usage in Autogrow test node outputs * Commented out Switch node + test nodes * Remove commented out code from Autogrow * Make TemplatePrefix max more clear, allow max == 1 * Replace all dict[str] with dict[str, Any] * Renamed add_to_dict_live_inputs to expand_schema_for_dynamic * Fixed typo in DynamicSlot input code * note about live_inputs not being present soon in get_v1_info (internal function anyway) * For now, hide DynamicCombo and Autogrow from public interface * Removed comment --- comfy_api/latest/__init__.py | 4 +- comfy_api/latest/_io.py | 416 ++++++++++++++++++++++++++------- comfy_api/latest/_io_public.py | 1 + comfy_api/latest/_ui_public.py | 1 + comfy_api/v0_0_2/__init__.py | 6 +- comfy_execution/validation.py | 6 + comfy_extras/nodes_logic.py | 155 ++++++++++++ execution.py | 40 ++-- nodes.py | 1 + 9 files changed, 525 insertions(+), 105 deletions(-) create mode 100644 comfy_api/latest/_io_public.py create mode 100644 comfy_api/latest/_ui_public.py create mode 100644 comfy_extras/nodes_logic.py diff --git a/comfy_api/latest/__init__.py b/comfy_api/latest/__init__.py index 176ae36e0..0fa01d1e7 100644 --- a/comfy_api/latest/__init__.py +++ b/comfy_api/latest/__init__.py @@ -8,8 +8,8 @@ from comfy_api.internal.async_to_sync import create_sync_class from comfy_api.latest._input import ImageInput, AudioInput, MaskInput, LatentInput, VideoInput from comfy_api.latest._input_impl import VideoFromFile, VideoFromComponents from comfy_api.latest._util import VideoCodec, VideoContainer, VideoComponents, MESH, VOXEL -from . import _io as io -from . import _ui as ui +from . import _io_public as io +from . import _ui_public as ui # from comfy_api.latest._resources import _RESOURCES as resources #noqa: F401 from comfy_execution.utils import get_executing_context from comfy_execution.progress import get_progress_state, PreviewImageTuple diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 79c0722a9..257f07c42 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -4,6 +4,7 @@ import copy import inspect from abc import ABC, abstractmethod from collections import Counter +from collections.abc import Iterable from dataclasses import asdict, dataclass from enum import Enum from typing import Any, Callable, Literal, TypedDict, TypeVar, TYPE_CHECKING @@ -150,6 +151,9 @@ class _IO_V3: def __init__(self): pass + def validate(self): + pass + @property def io_type(self): return self.Parent.io_type @@ -182,6 +186,9 @@ class Input(_IO_V3): def get_io_type(self): return _StringIOType(self.io_type) + def get_all(self) -> list[Input]: + return [self] + class WidgetInput(Input): ''' Base class for a V3 Input with widget. @@ -814,13 +821,61 @@ class MultiType: else: return super().as_dict() +@comfytype(io_type="COMFY_MATCHTYPE_V3") +class MatchType(ComfyTypeIO): + class Template: + def __init__(self, template_id: str, allowed_types: _ComfyType | list[_ComfyType] = AnyType): + self.template_id = template_id + # account for syntactic sugar + if not isinstance(allowed_types, Iterable): + allowed_types = [allowed_types] + for t in allowed_types: + if not isinstance(t, type): + if not isinstance(t, _ComfyType): + raise ValueError(f"Allowed types must be a ComfyType or a list of ComfyTypes, got {t.__class__.__name__}") + else: + if not issubclass(t, _ComfyType): + raise ValueError(f"Allowed types must be a ComfyType or a list of ComfyTypes, got {t.__name__}") + self.allowed_types = allowed_types + + def as_dict(self): + return { + "template_id": self.template_id, + "allowed_types": ",".join([t.io_type for t in self.allowed_types]), + } + + class Input(Input): + def __init__(self, id: str, template: MatchType.Template, + display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None): + super().__init__(id, display_name, optional, tooltip, lazy, extra_dict) + self.template = template + + def as_dict(self): + return super().as_dict() | prune_dict({ + "template": self.template.as_dict(), + }) + + class Output(Output): + def __init__(self, template: MatchType.Template, id: str=None, display_name: str=None, tooltip: str=None, + is_output_list=False): + super().__init__(id, display_name, tooltip, is_output_list) + self.template = template + + def as_dict(self): + return super().as_dict() | prune_dict({ + "template": self.template.as_dict(), + }) + class DynamicInput(Input, ABC): ''' Abstract class for dynamic input registration. ''' - @abstractmethod def get_dynamic(self) -> list[Input]: - ... + return [] + + def expand_schema_for_dynamic(self, d: dict[str, Any], live_inputs: dict[str, Any], curr_prefix=''): + pass + class DynamicOutput(Output, ABC): ''' @@ -830,99 +885,223 @@ class DynamicOutput(Output, ABC): is_output_list=False): super().__init__(id, display_name, tooltip, is_output_list) - @abstractmethod def get_dynamic(self) -> list[Output]: - ... + return [] @comfytype(io_type="COMFY_AUTOGROW_V3") -class AutogrowDynamic(ComfyTypeI): - Type = list[Any] - class Input(DynamicInput): - def __init__(self, id: str, template_input: Input, min: int=1, max: int=None, - display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None): - super().__init__(id, display_name, optional, tooltip, lazy, extra_dict) - self.template_input = template_input - if min is not None: - assert(min >= 1) - if max is not None: - assert(max >= 1) +class Autogrow(ComfyTypeI): + Type = dict[str, Any] + _MaxNames = 100 # NOTE: max 100 names for sanity + + class _AutogrowTemplate: + def __init__(self, input: Input): + # dynamic inputs are not allowed as the template input + assert(not isinstance(input, DynamicInput)) + self.input = copy.copy(input) + if isinstance(self.input, WidgetInput): + self.input.force_input = True + self.names: list[str] = [] + self.cached_inputs = {} + + def _create_input(self, input: Input, name: str): + new_input = copy.copy(self.input) + new_input.id = name + return new_input + + def _create_cached_inputs(self): + for name in self.names: + self.cached_inputs[name] = self._create_input(self.input, name) + + def get_all(self) -> list[Input]: + return list(self.cached_inputs.values()) + + def as_dict(self): + return prune_dict({ + "input": create_input_dict_v1([self.input]), + }) + + def validate(self): + self.input.validate() + + def expand_schema_for_dynamic(self, d: dict[str, Any], live_inputs: dict[str, Any], curr_prefix=''): + real_inputs = [] + for name, input in self.cached_inputs.items(): + if name in live_inputs: + real_inputs.append(input) + add_to_input_dict_v1(d, real_inputs, live_inputs, curr_prefix) + add_dynamic_id_mapping(d, real_inputs, curr_prefix) + + class TemplatePrefix(_AutogrowTemplate): + def __init__(self, input: Input, prefix: str, min: int=1, max: int=10): + super().__init__(input) + self.prefix = prefix + assert(min >= 0) + assert(max >= 1) + assert(max <= Autogrow._MaxNames) self.min = min self.max = max + self.names = [f"{self.prefix}{i}" for i in range(self.max)] + self._create_cached_inputs() + + def as_dict(self): + return super().as_dict() | prune_dict({ + "prefix": self.prefix, + "min": self.min, + "max": self.max, + }) + + class TemplateNames(_AutogrowTemplate): + def __init__(self, input: Input, names: list[str], min: int=1): + super().__init__(input) + self.names = names[:Autogrow._MaxNames] + assert(min >= 0) + self.min = min + self._create_cached_inputs() + + def as_dict(self): + return super().as_dict() | prune_dict({ + "names": self.names, + "min": self.min, + }) + + class Input(DynamicInput): + def __init__(self, id: str, template: Autogrow.TemplatePrefix | Autogrow.TemplateNames, + display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None): + super().__init__(id, display_name, optional, tooltip, lazy, extra_dict) + self.template = template + + def as_dict(self): + return super().as_dict() | prune_dict({ + "template": self.template.as_dict(), + }) def get_dynamic(self) -> list[Input]: - curr_count = 1 - new_inputs = [] - for i in range(self.min): - new_input = copy.copy(self.template_input) - new_input.id = f"{new_input.id}{curr_count}_${self.id}_ag$" - if new_input.display_name is not None: - new_input.display_name = f"{new_input.display_name}{curr_count}" - new_input.optional = self.optional or new_input.optional - if isinstance(self.template_input, WidgetInput): - new_input.force_input = True - new_inputs.append(new_input) - curr_count += 1 - # pretend to expand up to max - for i in range(curr_count-1, self.max): - new_input = copy.copy(self.template_input) - new_input.id = f"{new_input.id}{curr_count}_${self.id}_ag$" - if new_input.display_name is not None: - new_input.display_name = f"{new_input.display_name}{curr_count}" - new_input.optional = True - if isinstance(self.template_input, WidgetInput): - new_input.force_input = True - new_inputs.append(new_input) - curr_count += 1 - return new_inputs + return self.template.get_all() -@comfytype(io_type="COMFY_COMBODYNAMIC_V3") -class ComboDynamic(ComfyTypeI): - class Input(DynamicInput): - def __init__(self, id: str): - pass + def get_all(self) -> list[Input]: + return [self] + self.template.get_all() -@comfytype(io_type="COMFY_MATCHTYPE_V3") -class MatchType(ComfyTypeIO): - class Template: - def __init__(self, template_id: str, allowed_types: _ComfyType | list[_ComfyType]): - self.template_id = template_id - self.allowed_types = [allowed_types] if isinstance(allowed_types, _ComfyType) else allowed_types + def validate(self): + self.template.validate() + + def expand_schema_for_dynamic(self, d: dict[str, Any], live_inputs: dict[str, Any], curr_prefix=''): + curr_prefix = f"{curr_prefix}{self.id}." + # need to remove self from expected inputs dictionary; replaced by template inputs in frontend + for inner_dict in d.values(): + if self.id in inner_dict: + del inner_dict[self.id] + self.template.expand_schema_for_dynamic(d, live_inputs, curr_prefix) + +@comfytype(io_type="COMFY_DYNAMICCOMBO_V3") +class DynamicCombo(ComfyTypeI): + Type = dict[str, Any] + + class Option: + def __init__(self, key: str, inputs: list[Input]): + self.key = key + self.inputs = inputs def as_dict(self): return { - "template_id": self.template_id, - "allowed_types": "".join(t.io_type for t in self.allowed_types), + "key": self.key, + "inputs": create_input_dict_v1(self.inputs), } class Input(DynamicInput): - def __init__(self, id: str, template: MatchType.Template, + def __init__(self, id: str, options: list[DynamicCombo.Option], display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None): super().__init__(id, display_name, optional, tooltip, lazy, extra_dict) - self.template = template + self.options = options + + def expand_schema_for_dynamic(self, d: dict[str, Any], live_inputs: dict[str, Any], curr_prefix=''): + # check if dynamic input's id is in live_inputs + if self.id in live_inputs: + curr_prefix = f"{curr_prefix}{self.id}." + key = live_inputs[self.id] + selected_option = None + for option in self.options: + if option.key == key: + selected_option = option + break + if selected_option is not None: + add_to_input_dict_v1(d, selected_option.inputs, live_inputs, curr_prefix) + add_dynamic_id_mapping(d, selected_option.inputs, curr_prefix, self) def get_dynamic(self) -> list[Input]: - return [self] + return [input for option in self.options for input in option.inputs] + + def get_all(self) -> list[Input]: + return [self] + [input for option in self.options for input in option.inputs] def as_dict(self): return super().as_dict() | prune_dict({ - "template": self.template.as_dict(), + "options": [o.as_dict() for o in self.options], }) - class Output(DynamicOutput): - def __init__(self, id: str, template: MatchType.Template, display_name: str=None, tooltip: str=None, - is_output_list=False): - super().__init__(id, display_name, tooltip, is_output_list) - self.template = template + def validate(self): + # make sure all nested inputs are validated + for option in self.options: + for input in option.inputs: + input.validate() - def get_dynamic(self) -> list[Output]: - return [self] +@comfytype(io_type="COMFY_DYNAMICSLOT_V3") +class DynamicSlot(ComfyTypeI): + Type = dict[str, Any] + + class Input(DynamicInput): + def __init__(self, slot: Input, inputs: list[Input], + display_name: str=None, tooltip: str=None, lazy: bool=None, extra_dict=None): + assert(not isinstance(slot, DynamicInput)) + self.slot = copy.copy(slot) + self.slot.display_name = slot.display_name if slot.display_name is not None else display_name + optional = True + self.slot.tooltip = slot.tooltip if slot.tooltip is not None else tooltip + self.slot.lazy = slot.lazy if slot.lazy is not None else lazy + self.slot.extra_dict = slot.extra_dict if slot.extra_dict is not None else extra_dict + super().__init__(slot.id, self.slot.display_name, optional, self.slot.tooltip, self.slot.lazy, self.slot.extra_dict) + self.inputs = inputs + self.force_input = None + # force widget inputs to have no widgets, otherwise this would be awkward + if isinstance(self.slot, WidgetInput): + self.force_input = True + self.slot.force_input = True + + def expand_schema_for_dynamic(self, d: dict[str, Any], live_inputs: dict[str, Any], curr_prefix=''): + if self.id in live_inputs: + curr_prefix = f"{curr_prefix}{self.id}." + add_to_input_dict_v1(d, self.inputs, live_inputs, curr_prefix) + add_dynamic_id_mapping(d, [self.slot] + self.inputs, curr_prefix) + + def get_dynamic(self) -> list[Input]: + return [self.slot] + self.inputs + + def get_all(self) -> list[Input]: + return [self] + [self.slot] + self.inputs def as_dict(self): return super().as_dict() | prune_dict({ - "template": self.template.as_dict(), + "slotType": str(self.slot.get_io_type()), + "inputs": create_input_dict_v1(self.inputs), + "forceInput": self.force_input, }) + def validate(self): + self.slot.validate() + for input in self.inputs: + input.validate() + +def add_dynamic_id_mapping(d: dict[str, Any], inputs: list[Input], curr_prefix: str, self: DynamicInput=None): + dynamic = d.setdefault("dynamic_paths", {}) + if self is not None: + dynamic[self.id] = f"{curr_prefix}{self.id}" + for i in inputs: + if not isinstance(i, DynamicInput): + dynamic[f"{i.id}"] = f"{curr_prefix}{i.id}" + +class V3Data(TypedDict): + hidden_inputs: dict[str, Any] + dynamic_paths: dict[str, Any] class HiddenHolder: def __init__(self, unique_id: str, prompt: Any, @@ -984,6 +1163,7 @@ class NodeInfoV1: output_is_list: list[bool]=None output_name: list[str]=None output_tooltips: list[str]=None + output_matchtypes: list[str]=None name: str=None display_name: str=None description: str=None @@ -1061,7 +1241,11 @@ class Schema: '''Validate the schema: - verify ids on inputs and outputs are unique - both internally and in relation to each other ''' - input_ids = [i.id for i in self.inputs] if self.inputs is not None else [] + nested_inputs: list[Input] = [] + if self.inputs is not None: + for input in self.inputs: + nested_inputs.extend(input.get_all()) + input_ids = [i.id for i in nested_inputs] if nested_inputs is not None else [] output_ids = [o.id for o in self.outputs] if self.outputs is not None else [] input_set = set(input_ids) output_set = set(output_ids) @@ -1077,6 +1261,13 @@ class Schema: issues.append(f"Ids must be unique between inputs and outputs, but {intersection} are not.") if len(issues) > 0: raise ValueError("\n".join(issues)) + # validate inputs and outputs + if self.inputs is not None: + for input in self.inputs: + input.validate() + if self.outputs is not None: + for output in self.outputs: + output.validate() def finalize(self): """Add hidden based on selected schema options, and give outputs without ids default ids.""" @@ -1102,19 +1293,10 @@ class Schema: if output.id is None: output.id = f"_{i}_{output.io_type}_" - def get_v1_info(self, cls) -> NodeInfoV1: + def get_v1_info(self, cls, live_inputs: dict[str, Any]=None) -> NodeInfoV1: + # NOTE: live_inputs will not be used anymore very soon and this will be done another way # get V1 inputs - input = { - "required": {} - } - if self.inputs: - for i in self.inputs: - if isinstance(i, DynamicInput): - dynamic_inputs = i.get_dynamic() - for d in dynamic_inputs: - add_to_dict_v1(d, input) - else: - add_to_dict_v1(i, input) + input = create_input_dict_v1(self.inputs, live_inputs) if self.hidden: for hidden in self.hidden: input.setdefault("hidden", {})[hidden.name] = (hidden.value,) @@ -1123,12 +1305,24 @@ class Schema: output_is_list = [] output_name = [] output_tooltips = [] + output_matchtypes = [] + any_matchtypes = False if self.outputs: for o in self.outputs: output.append(o.io_type) output_is_list.append(o.is_output_list) output_name.append(o.display_name if o.display_name else o.io_type) output_tooltips.append(o.tooltip if o.tooltip else None) + # special handling for MatchType + if isinstance(o, MatchType.Output): + output_matchtypes.append(o.template.template_id) + any_matchtypes = True + else: + output_matchtypes.append(None) + + # clear out lists that are all None + if not any_matchtypes: + output_matchtypes = None info = NodeInfoV1( input=input, @@ -1137,6 +1331,7 @@ class Schema: output_is_list=output_is_list, output_name=output_name, output_tooltips=output_tooltips, + output_matchtypes=output_matchtypes, name=self.node_id, display_name=self.display_name, category=self.category, @@ -1182,16 +1377,57 @@ class Schema: return info -def add_to_dict_v1(i: Input, input: dict): +def create_input_dict_v1(inputs: list[Input], live_inputs: dict[str, Any]=None) -> dict: + input = { + "required": {} + } + add_to_input_dict_v1(input, inputs, live_inputs) + return input + +def add_to_input_dict_v1(d: dict[str, Any], inputs: list[Input], live_inputs: dict[str, Any]=None, curr_prefix=''): + for i in inputs: + if isinstance(i, DynamicInput): + add_to_dict_v1(i, d) + if live_inputs is not None: + i.expand_schema_for_dynamic(d, live_inputs, curr_prefix) + else: + add_to_dict_v1(i, d) + +def add_to_dict_v1(i: Input, d: dict, dynamic_dict: dict=None): key = "optional" if i.optional else "required" as_dict = i.as_dict() # for v1, we don't want to include the optional key as_dict.pop("optional", None) - input.setdefault(key, {})[i.id] = (i.get_io_type(), as_dict) + if dynamic_dict is None: + value = (i.get_io_type(), as_dict) + else: + value = (i.get_io_type(), as_dict, dynamic_dict) + d.setdefault(key, {})[i.id] = value def add_to_dict_v3(io: Input | Output, d: dict): d[io.id] = (io.get_io_type(), io.as_dict()) +def build_nested_inputs(values: dict[str, Any], v3_data: V3Data): + paths = v3_data.get("dynamic_paths", None) + if paths is None: + return values + values = values.copy() + result = {} + + for key, path in paths.items(): + parts = path.split(".") + current = result + + for i, p in enumerate(parts): + is_last = (i == len(parts) - 1) + + if is_last: + current[p] = values.pop(key, None) + else: + current = current.setdefault(p, {}) + + values.update(result) + return values class _ComfyNodeBaseInternal(_ComfyNodeInternal): @@ -1311,12 +1547,12 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal): @final @classmethod - def PREPARE_CLASS_CLONE(cls, hidden_inputs: dict) -> type[ComfyNode]: + def PREPARE_CLASS_CLONE(cls, v3_data: V3Data) -> type[ComfyNode]: """Creates clone of real node class to prevent monkey-patching.""" c_type: type[ComfyNode] = cls if is_class(cls) else type(cls) type_clone: type[ComfyNode] = shallow_clone_class(c_type) # set hidden - type_clone.hidden = HiddenHolder.from_dict(hidden_inputs) + type_clone.hidden = HiddenHolder.from_dict(v3_data["hidden_inputs"]) return type_clone @final @@ -1433,14 +1669,18 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal): @final @classmethod - def INPUT_TYPES(cls, include_hidden=True, return_schema=False) -> dict[str, dict] | tuple[dict[str, dict], Schema]: + def INPUT_TYPES(cls, include_hidden=True, return_schema=False, live_inputs=None) -> dict[str, dict] | tuple[dict[str, dict], Schema, V3Data]: schema = cls.FINALIZE_SCHEMA() - info = schema.get_v1_info(cls) + info = schema.get_v1_info(cls, live_inputs) input = info.input if not include_hidden: input.pop("hidden", None) if return_schema: - return input, schema + v3_data: V3Data = {} + dynamic = input.pop("dynamic_paths", None) + if dynamic is not None: + v3_data["dynamic_paths"] = dynamic + return input, schema, v3_data return input @final @@ -1513,7 +1753,7 @@ class ComfyNode(_ComfyNodeBaseInternal): raise NotImplementedError @classmethod - def validate_inputs(cls, **kwargs) -> bool: + def validate_inputs(cls, **kwargs) -> bool | str: """Optionally, define this function to validate inputs; equivalent to V1's VALIDATE_INPUTS.""" raise NotImplementedError @@ -1628,6 +1868,7 @@ __all__ = [ "StyleModel", "Gligen", "UpscaleModel", + "LatentUpscaleModel", "Audio", "Video", "SVG", @@ -1651,6 +1892,10 @@ __all__ = [ "SEGS", "AnyType", "MultiType", + # Dynamic Types + "MatchType", + # "DynamicCombo", + # "Autogrow", # Other classes "HiddenHolder", "Hidden", @@ -1661,4 +1906,5 @@ __all__ = [ "NodeOutput", "add_to_dict_v1", "add_to_dict_v3", + "V3Data", ] diff --git a/comfy_api/latest/_io_public.py b/comfy_api/latest/_io_public.py new file mode 100644 index 000000000..43c7680f3 --- /dev/null +++ b/comfy_api/latest/_io_public.py @@ -0,0 +1 @@ +from ._io import * # noqa: F403 diff --git a/comfy_api/latest/_ui_public.py b/comfy_api/latest/_ui_public.py new file mode 100644 index 000000000..85b11d78b --- /dev/null +++ b/comfy_api/latest/_ui_public.py @@ -0,0 +1 @@ +from ._ui import * # noqa: F403 diff --git a/comfy_api/v0_0_2/__init__.py b/comfy_api/v0_0_2/__init__.py index de0f95001..c4fa1d971 100644 --- a/comfy_api/v0_0_2/__init__.py +++ b/comfy_api/v0_0_2/__init__.py @@ -6,7 +6,7 @@ from comfy_api.latest import ( ) from typing import Type, TYPE_CHECKING from comfy_api.internal.async_to_sync import create_sync_class -from comfy_api.latest import io, ui, ComfyExtension #noqa: F401 +from comfy_api.latest import io, ui, IO, UI, ComfyExtension #noqa: F401 class ComfyAPIAdapter_v0_0_2(ComfyAPI_latest): @@ -42,4 +42,8 @@ __all__ = [ "InputImpl", "Types", "ComfyExtension", + "io", + "IO", + "ui", + "UI", ] diff --git a/comfy_execution/validation.py b/comfy_execution/validation.py index cec105fc9..24c0b4ed7 100644 --- a/comfy_execution/validation.py +++ b/comfy_execution/validation.py @@ -1,4 +1,5 @@ from __future__ import annotations +from comfy_api.latest import IO def validate_node_input( @@ -23,6 +24,11 @@ def validate_node_input( if not received_type != input_type: return True + # If the received type or input_type is a MatchType, we can return True immediately; + # validation for this is handled by the frontend + if received_type == IO.MatchType.io_type or input_type == IO.MatchType.io_type: + return True + # Not equal, and not strings if not isinstance(received_type, str) or not isinstance(input_type, str): return False diff --git a/comfy_extras/nodes_logic.py b/comfy_extras/nodes_logic.py new file mode 100644 index 000000000..95a6ba788 --- /dev/null +++ b/comfy_extras/nodes_logic.py @@ -0,0 +1,155 @@ +from typing import TypedDict +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io +from comfy_api.latest import _io + + + +class SwitchNode(io.ComfyNode): + @classmethod + def define_schema(cls): + template = io.MatchType.Template("switch") + return io.Schema( + node_id="ComfySwitchNode", + display_name="Switch", + category="logic", + is_experimental=True, + inputs=[ + io.Boolean.Input("switch"), + io.MatchType.Input("on_false", template=template, lazy=True, optional=True), + io.MatchType.Input("on_true", template=template, lazy=True, optional=True), + ], + outputs=[ + io.MatchType.Output(template=template, display_name="output"), + ], + ) + + @classmethod + def check_lazy_status(cls, switch, on_false=..., on_true=...): + # We use ... instead of None, as None is passed for connected-but-unevaluated inputs. + # This trick allows us to ignore the value of the switch and still be able to run execute(). + + # One of the inputs may be missing, in which case we need to evaluate the other input + if on_false is ...: + return ["on_true"] + if on_true is ...: + return ["on_false"] + # Normal lazy switch operation + if switch and on_true is None: + return ["on_true"] + if not switch and on_false is None: + return ["on_false"] + + @classmethod + def validate_inputs(cls, switch, on_false=..., on_true=...): + # This check happens before check_lazy_status(), so we can eliminate the case where + # both inputs are missing. + if on_false is ... and on_true is ...: + return "At least one of on_false or on_true must be connected to Switch node" + return True + + @classmethod + def execute(cls, switch, on_true=..., on_false=...) -> io.NodeOutput: + if on_true is ...: + return io.NodeOutput(on_false) + if on_false is ...: + return io.NodeOutput(on_true) + return io.NodeOutput(on_true if switch else on_false) + + +class DCTestNode(io.ComfyNode): + class DCValues(TypedDict): + combo: str + string: str + integer: int + image: io.Image.Type + subcombo: dict[str] + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="DCTestNode", + display_name="DCTest", + category="logic", + is_output_node=True, + inputs=[_io.DynamicCombo.Input("combo", options=[ + _io.DynamicCombo.Option("option1", [io.String.Input("string")]), + _io.DynamicCombo.Option("option2", [io.Int.Input("integer")]), + _io.DynamicCombo.Option("option3", [io.Image.Input("image")]), + _io.DynamicCombo.Option("option4", [ + _io.DynamicCombo.Input("subcombo", options=[ + _io.DynamicCombo.Option("opt1", [io.Float.Input("float_x"), io.Float.Input("float_y")]), + _io.DynamicCombo.Option("opt2", [io.Mask.Input("mask1", optional=True)]), + ]) + ])] + )], + outputs=[io.AnyType.Output()], + ) + + @classmethod + def execute(cls, combo: DCValues) -> io.NodeOutput: + combo_val = combo["combo"] + if combo_val == "option1": + return io.NodeOutput(combo["string"]) + elif combo_val == "option2": + return io.NodeOutput(combo["integer"]) + elif combo_val == "option3": + return io.NodeOutput(combo["image"]) + elif combo_val == "option4": + return io.NodeOutput(f"{combo['subcombo']}") + else: + raise ValueError(f"Invalid combo: {combo_val}") + + +class AutogrowNamesTestNode(io.ComfyNode): + @classmethod + def define_schema(cls): + template = _io.Autogrow.TemplateNames(input=io.Float.Input("float"), names=["a", "b", "c"]) + return io.Schema( + node_id="AutogrowNamesTestNode", + display_name="AutogrowNamesTest", + category="logic", + inputs=[ + _io.Autogrow.Input("autogrow", template=template) + ], + outputs=[io.String.Output()], + ) + + @classmethod + def execute(cls, autogrow: _io.Autogrow.Type) -> io.NodeOutput: + vals = list(autogrow.values()) + combined = ",".join([str(x) for x in vals]) + return io.NodeOutput(combined) + +class AutogrowPrefixTestNode(io.ComfyNode): + @classmethod + def define_schema(cls): + template = _io.Autogrow.TemplatePrefix(input=io.Float.Input("float"), prefix="float", min=1, max=10) + return io.Schema( + node_id="AutogrowPrefixTestNode", + display_name="AutogrowPrefixTest", + category="logic", + inputs=[ + _io.Autogrow.Input("autogrow", template=template) + ], + outputs=[io.String.Output()], + ) + + @classmethod + def execute(cls, autogrow: _io.Autogrow.Type) -> io.NodeOutput: + vals = list(autogrow.values()) + combined = ",".join([str(x) for x in vals]) + return io.NodeOutput(combined) + +class LogicExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + # SwitchNode, + # DCTestNode, + # AutogrowNamesTestNode, + # AutogrowPrefixTestNode, + ] + +async def comfy_entrypoint() -> LogicExtension: + return LogicExtension() diff --git a/execution.py b/execution.py index 17c77beab..c2186ac98 100644 --- a/execution.py +++ b/execution.py @@ -34,7 +34,7 @@ from comfy_execution.validation import validate_node_input from comfy_execution.progress import get_progress_state, reset_progress_state, add_progress_handler, WebUIProgressHandler from comfy_execution.utils import CurrentNodeContext from comfy_api.internal import _ComfyNodeInternal, _NodeOutputInternal, first_real_override, is_class, make_locked_method_func -from comfy_api.latest import io +from comfy_api.latest import io, _io class ExecutionResult(Enum): @@ -76,7 +76,7 @@ class IsChangedCache: return self.is_changed[node_id] # Intentionally do not use cached outputs here. We only want constants in IS_CHANGED - input_data_all, _, hidden_inputs = get_input_data(node["inputs"], class_def, node_id, None) + input_data_all, _, v3_data = get_input_data(node["inputs"], class_def, node_id, None) try: is_changed = await _async_map_node_over_list(self.prompt_id, node_id, class_def, input_data_all, is_changed_name) is_changed = await resolve_map_node_over_list_results(is_changed) @@ -146,8 +146,9 @@ SENSITIVE_EXTRA_DATA_KEYS = ("auth_token_comfy_org", "api_key_comfy_org") def get_input_data(inputs, class_def, unique_id, execution_list=None, dynprompt=None, extra_data={}): is_v3 = issubclass(class_def, _ComfyNodeInternal) + v3_data: io.V3Data = {} if is_v3: - valid_inputs, schema = class_def.INPUT_TYPES(include_hidden=False, return_schema=True) + valid_inputs, schema, v3_data = class_def.INPUT_TYPES(include_hidden=False, return_schema=True, live_inputs=inputs) else: valid_inputs = class_def.INPUT_TYPES() input_data_all = {} @@ -207,7 +208,8 @@ def get_input_data(inputs, class_def, unique_id, execution_list=None, dynprompt= input_data_all[x] = [extra_data.get("auth_token_comfy_org", None)] if h[x] == "API_KEY_COMFY_ORG": input_data_all[x] = [extra_data.get("api_key_comfy_org", None)] - return input_data_all, missing_keys, hidden_inputs_v3 + v3_data["hidden_inputs"] = hidden_inputs_v3 + return input_data_all, missing_keys, v3_data map_node_over_list = None #Don't hook this please @@ -223,7 +225,7 @@ async def resolve_map_node_over_list_results(results): raise exc return [x.result() if isinstance(x, asyncio.Task) else x for x in results] -async def _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, func, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None, hidden_inputs=None): +async def _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, func, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None, v3_data=None): # check if node wants the lists input_is_list = getattr(obj, "INPUT_IS_LIST", False) @@ -259,13 +261,16 @@ async def _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, f if is_class(obj): type_obj = obj obj.VALIDATE_CLASS() - class_clone = obj.PREPARE_CLASS_CLONE(hidden_inputs) + class_clone = obj.PREPARE_CLASS_CLONE(v3_data) # otherwise, use class instance to populate/reuse some fields else: type_obj = type(obj) type_obj.VALIDATE_CLASS() - class_clone = type_obj.PREPARE_CLASS_CLONE(hidden_inputs) + class_clone = type_obj.PREPARE_CLASS_CLONE(v3_data) f = make_locked_method_func(type_obj, func, class_clone) + # in case of dynamic inputs, restructure inputs to expected nested dict + if v3_data is not None: + inputs = _io.build_nested_inputs(inputs, v3_data) # V1 else: f = getattr(obj, func) @@ -320,8 +325,8 @@ def merge_result_data(results, obj): output.append([o[i] for o in results]) return output -async def get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=None, pre_execute_cb=None, hidden_inputs=None): - return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs) +async def get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=None, pre_execute_cb=None, v3_data=None): + return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, v3_data=v3_data) has_pending_task = any(isinstance(r, asyncio.Task) and not r.done() for r in return_values) if has_pending_task: return return_values, {}, False, has_pending_task @@ -460,7 +465,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, has_subgraph = False else: get_progress_state().start_progress(unique_id) - input_data_all, missing_keys, hidden_inputs = get_input_data(inputs, class_def, unique_id, execution_list, dynprompt, extra_data) + input_data_all, missing_keys, v3_data = get_input_data(inputs, class_def, unique_id, execution_list, dynprompt, extra_data) if server.client_id is not None: server.last_node_id = display_node_id server.send_sync("executing", { "node": unique_id, "display_node": display_node_id, "prompt_id": prompt_id }, server.client_id) @@ -475,7 +480,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, else: lazy_status_present = getattr(obj, "check_lazy_status", None) is not None if lazy_status_present: - required_inputs = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, "check_lazy_status", allow_interrupt=True, hidden_inputs=hidden_inputs) + required_inputs = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, "check_lazy_status", allow_interrupt=True, v3_data=v3_data) required_inputs = await resolve_map_node_over_list_results(required_inputs) required_inputs = set(sum([r for r in required_inputs if isinstance(r,list)], [])) required_inputs = [x for x in required_inputs if isinstance(x,str) and ( @@ -507,7 +512,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, def pre_execute_cb(call_index): # TODO - How to handle this with async functions without contextvars (which requires Python 3.12)? GraphBuilder.set_default_prefix(unique_id, call_index, 0) - output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs) + output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, v3_data=v3_data) if has_pending_tasks: pending_async_nodes[unique_id] = output_data unblock = execution_list.add_external_block(unique_id) @@ -745,18 +750,17 @@ async def validate_inputs(prompt_id, prompt, item, validated): class_type = prompt[unique_id]['class_type'] obj_class = nodes.NODE_CLASS_MAPPINGS[class_type] - class_inputs = obj_class.INPUT_TYPES() - valid_inputs = set(class_inputs.get('required',{})).union(set(class_inputs.get('optional',{}))) - errors = [] valid = True validate_function_inputs = [] validate_has_kwargs = False if issubclass(obj_class, _ComfyNodeInternal): + class_inputs, _, _ = obj_class.INPUT_TYPES(include_hidden=False, return_schema=True, live_inputs=inputs) validate_function_name = "validate_inputs" validate_function = first_real_override(obj_class, validate_function_name) else: + class_inputs = obj_class.INPUT_TYPES() validate_function_name = "VALIDATE_INPUTS" validate_function = getattr(obj_class, validate_function_name, None) if validate_function is not None: @@ -765,6 +769,8 @@ async def validate_inputs(prompt_id, prompt, item, validated): validate_has_kwargs = argspec.varkw is not None received_types = {} + valid_inputs = set(class_inputs.get('required',{})).union(set(class_inputs.get('optional',{}))) + for x in valid_inputs: input_type, input_category, extra_info = get_input_info(obj_class, x, class_inputs) assert extra_info is not None @@ -935,7 +941,7 @@ async def validate_inputs(prompt_id, prompt, item, validated): continue if len(validate_function_inputs) > 0 or validate_has_kwargs: - input_data_all, _, hidden_inputs = get_input_data(inputs, obj_class, unique_id) + input_data_all, _, v3_data = get_input_data(inputs, obj_class, unique_id) input_filtered = {} for x in input_data_all: if x in validate_function_inputs or validate_has_kwargs: @@ -943,7 +949,7 @@ async def validate_inputs(prompt_id, prompt, item, validated): if 'input_types' in validate_function_inputs: input_filtered['input_types'] = [received_types] - ret = await _async_map_node_over_list(prompt_id, unique_id, obj_class, input_filtered, validate_function_name, hidden_inputs=hidden_inputs) + ret = await _async_map_node_over_list(prompt_id, unique_id, obj_class, input_filtered, validate_function_name, v3_data=v3_data) ret = await resolve_map_node_over_list_results(ret) for x in input_filtered: for i, r in enumerate(ret): diff --git a/nodes.py b/nodes.py index 4c910a34b..356aa63df 100644 --- a/nodes.py +++ b/nodes.py @@ -2355,6 +2355,7 @@ async def init_builtin_extra_nodes(): "nodes_easycache.py", "nodes_audio_encoder.py", "nodes_rope.py", + "nodes_logic.py", "nodes_nop.py", ] From 861817d22d2659099811b56005c9eaea18d64c73 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 2 Dec 2025 21:47:51 -0800 Subject: [PATCH 0978/1073] Fix issue with portable updater. (#11070) This should fix the problem with the portable updater not working with portables created from a separate branch on the repo. This does not affect any current portables who were all created on the master branch. --- .ci/update_windows/update.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.ci/update_windows/update.py b/.ci/update_windows/update.py index 51a263203..59ece5130 100755 --- a/.ci/update_windows/update.py +++ b/.ci/update_windows/update.py @@ -66,8 +66,10 @@ if branch is None: try: ref = repo.lookup_reference('refs/remotes/origin/master') except: - print("pulling.") # noqa: T201 - pull(repo) + print("fetching.") # noqa: T201 + for remote in repo.remotes: + if remote.name == "origin": + remote.fetch() ref = repo.lookup_reference('refs/remotes/origin/master') repo.checkout(ref) branch = repo.lookup_branch('master') @@ -149,3 +151,4 @@ try: shutil.copy(stable_update_script, stable_update_script_to) except: pass + From 519c9411653df99761053c30e101816e0ca3c24b Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Wed, 3 Dec 2025 17:28:45 +1000 Subject: [PATCH 0979/1073] Prs/lora reservations (reduce massive Lora reservations especially on Flux2) (#11069) * mp: only count the offload cost of math once This was previously bundling the combined weight storage and computation cost * ops: put all post async transfer compute on the main stream Some models have massive weights that need either complex dequantization or lora patching. Don't do these patchings on the offload stream, instead do them on the main stream to syncrhonize the potentially large vram spikes for these compute processes. This avoids having to assume a worst case scenario of multiple offload streams all spiking VRAM is parallel with whatever the main stream is doing. --- comfy/model_patcher.py | 4 ++-- comfy/ops.py | 39 ++++++++++++++++++++++----------------- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 3eac77275..df2d8e827 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -704,7 +704,7 @@ class ModelPatcher: lowvram_weight = False - potential_offload = max(offload_buffer, module_offload_mem * (comfy.model_management.NUM_STREAMS + 1)) + potential_offload = max(offload_buffer, module_offload_mem + (comfy.model_management.NUM_STREAMS * module_mem)) lowvram_fits = mem_counter + module_mem + potential_offload < lowvram_model_memory weight_key = "{}.weight".format(n) @@ -883,7 +883,7 @@ class ModelPatcher: break module_offload_mem, module_mem, n, m, params = unload - potential_offload = (comfy.model_management.NUM_STREAMS + 1) * module_offload_mem + potential_offload = module_offload_mem + (comfy.model_management.NUM_STREAMS * module_mem) lowvram_possible = hasattr(m, "comfy_cast_weights") if hasattr(m, "comfy_patched_weights") and m.comfy_patched_weights == True: diff --git a/comfy/ops.py b/comfy/ops.py index 61a2f0754..eae434e68 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -111,22 +111,24 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, of if s.bias is not None: bias = comfy.model_management.cast_to(s.bias, bias_dtype, device, non_blocking=non_blocking, copy=bias_has_function, stream=offload_stream) - if bias_has_function: - with wf_context: - for f in s.bias_function: - bias = f(bias) + comfy.model_management.sync_stream(device, offload_stream) + + bias_a = bias + weight_a = weight + + if s.bias is not None: + for f in s.bias_function: + bias = f(bias) if weight_has_function or weight.dtype != dtype: - with wf_context: - weight = weight.to(dtype=dtype) - if isinstance(weight, QuantizedTensor): - weight = weight.dequantize() - for f in s.weight_function: - weight = f(weight) + weight = weight.to(dtype=dtype) + if isinstance(weight, QuantizedTensor): + weight = weight.dequantize() + for f in s.weight_function: + weight = f(weight) - comfy.model_management.sync_stream(device, offload_stream) if offloadable: - return weight, bias, offload_stream + return weight, bias, (offload_stream, weight_a, bias_a) else: #Legacy function signature return weight, bias @@ -135,13 +137,16 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, of def uncast_bias_weight(s, weight, bias, offload_stream): if offload_stream is None: return - if weight is not None: - device = weight.device + os, weight_a, bias_a = offload_stream + if os is None: + return + if weight_a is not None: + device = weight_a.device else: - if bias is None: + if bias_a is None: return - device = bias.device - offload_stream.wait_stream(comfy.model_management.current_stream(device)) + device = bias_a.device + os.wait_stream(comfy.model_management.current_stream(device)) class CastWeightBiasOp: From 19f2192d69d13445131b72ad1d87167f59b66fc4 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 3 Dec 2025 18:37:35 +0200 Subject: [PATCH 0980/1073] fix(V3-Schema): use empty list defaults for Schema.inputs/outputs/hidden to avoid None issues (#11083) --- comfy_api/latest/_io.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 257f07c42..866c3e0eb 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -5,7 +5,7 @@ import inspect from abc import ABC, abstractmethod from collections import Counter from collections.abc import Iterable -from dataclasses import asdict, dataclass +from dataclasses import asdict, dataclass, field from enum import Enum from typing import Any, Callable, Literal, TypedDict, TypeVar, TYPE_CHECKING from typing_extensions import NotRequired, final @@ -1199,9 +1199,9 @@ class Schema: """Display name of node.""" category: str = "sd" """The category of the node, as per the "Add Node" menu.""" - inputs: list[Input]=None - outputs: list[Output]=None - hidden: list[Hidden]=None + inputs: list[Input] = field(default_factory=list) + outputs: list[Output] = field(default_factory=list) + hidden: list[Hidden] = field(default_factory=list) description: str="" """Node description, shown as a tooltip when hovering over the node.""" is_input_list: bool = False From 87c104bfc1928f0b018a50f5867f425e10482929 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 3 Dec 2025 18:55:44 +0200 Subject: [PATCH 0981/1073] add support for "@image" reference format in Kling Omni API nodes (#11082) --- comfy_api_nodes/apis/kling_api.py | 30 +++++-- comfy_api_nodes/nodes_kling.py | 138 ++++++++++++++++++++++++++++-- 2 files changed, 155 insertions(+), 13 deletions(-) diff --git a/comfy_api_nodes/apis/kling_api.py b/comfy_api_nodes/apis/kling_api.py index 0a3b447c5..d8949f8ac 100644 --- a/comfy_api_nodes/apis/kling_api.py +++ b/comfy_api_nodes/apis/kling_api.py @@ -46,21 +46,41 @@ class TaskStatusVideoResult(BaseModel): url: str | None = Field(None, description="URL for generated video") -class TaskStatusVideoResults(BaseModel): +class TaskStatusImageResult(BaseModel): + index: int = Field(..., description="Image Number,0-9") + url: str = Field(..., description="URL for generated image") + + +class OmniTaskStatusResults(BaseModel): videos: list[TaskStatusVideoResult] | None = Field(None) + images: list[TaskStatusImageResult] | None = Field(None) -class TaskStatusVideoResponseData(BaseModel): +class OmniTaskStatusResponseData(BaseModel): created_at: int | None = Field(None, description="Task creation time") updated_at: int | None = Field(None, description="Task update time") task_status: str | None = None task_status_msg: str | None = Field(None, description="Additional failure reason. Only for polling endpoint.") task_id: str | None = Field(None, description="Task ID") - task_result: TaskStatusVideoResults | None = Field(None) + task_result: OmniTaskStatusResults | None = Field(None) -class TaskStatusVideoResponse(BaseModel): +class OmniTaskStatusResponse(BaseModel): code: int | None = Field(None, description="Error code") message: str | None = Field(None, description="Error message") request_id: str | None = Field(None, description="Request ID") - data: TaskStatusVideoResponseData | None = Field(None) + data: OmniTaskStatusResponseData | None = Field(None) + + +class OmniImageParamImage(BaseModel): + image: str = Field(...) + + +class OmniProImageRequest(BaseModel): + model_name: str = Field(..., description="kling-image-o1") + resolution: str = Field(..., description="'1k' or '2k'") + aspect_ratio: str | None = Field(...) + prompt: str = Field(...) + mode: str = Field("pro") + n: int | None = Field(1, le=9) + image_list: list[OmniImageParamImage] | None = Field(..., max_length=10) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 850c44db6..6c840dc47 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -6,6 +6,7 @@ For source of truth on the allowed permutations of request fields, please refere import logging import math +import re import torch from typing_extensions import override @@ -49,12 +50,14 @@ from comfy_api_nodes.apis import ( KlingSingleImageEffectModelName, ) from comfy_api_nodes.apis.kling_api import ( + OmniImageParamImage, OmniParamImage, OmniParamVideo, OmniProFirstLastFrameRequest, + OmniProImageRequest, OmniProReferences2VideoRequest, OmniProText2VideoRequest, - TaskStatusVideoResponse, + OmniTaskStatusResponse, ) from comfy_api_nodes.util import ( ApiEndpoint, @@ -210,7 +213,36 @@ VOICES_CONFIG = { } -async def finish_omni_video_task(cls: type[IO.ComfyNode], response: TaskStatusVideoResponse) -> IO.NodeOutput: +def normalize_omni_prompt_references(prompt: str) -> str: + """ + Rewrites Kling Omni-style placeholders used in the app, like: + + @image, @image1, @image2, ... @imageN + @video, @video1, @video2, ... @videoN + + into the API-compatible form: + + <<>>, <<>>, ... + <<>>, <<>>, ... + + This is a UX shim for ComfyUI so users can type the same syntax as in the Kling app. + """ + if not prompt: + return prompt + + def _image_repl(match): + return f"<<>>" + + def _video_repl(match): + return f"<<>>" + + # (? and not @imageFoo + prompt = re.sub(r"(?\d*)(?!\w)", _image_repl, prompt) + return re.sub(r"(?\d*)(?!\w)", _video_repl, prompt) + + +async def finish_omni_video_task(cls: type[IO.ComfyNode], response: OmniTaskStatusResponse) -> IO.NodeOutput: if response.code: raise RuntimeError( f"Kling request failed. Code: {response.code}, Message: {response.message}, Data: {response.data}" @@ -218,8 +250,9 @@ async def finish_omni_video_task(cls: type[IO.ComfyNode], response: TaskStatusVi final_response = await poll_op( cls, ApiEndpoint(path=f"/proxy/kling/v1/videos/omni-video/{response.data.task_id}"), - response_model=TaskStatusVideoResponse, + response_model=OmniTaskStatusResponse, status_extractor=lambda r: (r.data.task_status if r.data else None), + max_poll_attempts=160, ) return IO.NodeOutput(await download_url_to_video_output(final_response.data.task_result.videos[0].url)) @@ -801,7 +834,7 @@ class OmniProTextToVideoNode(IO.ComfyNode): response = await sync_op( cls, ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), - response_model=TaskStatusVideoResponse, + response_model=OmniTaskStatusResponse, data=OmniProText2VideoRequest( model_name=model_name, prompt=prompt, @@ -864,6 +897,7 @@ class OmniProFirstLastFrameNode(IO.ComfyNode): end_frame: Input.Image | None = None, reference_images: Input.Image | None = None, ) -> IO.NodeOutput: + prompt = normalize_omni_prompt_references(prompt) validate_string(prompt, min_length=1, max_length=2500) if end_frame is not None and reference_images is not None: raise ValueError("The 'end_frame' input cannot be used simultaneously with 'reference_images'.") @@ -895,7 +929,7 @@ class OmniProFirstLastFrameNode(IO.ComfyNode): response = await sync_op( cls, ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), - response_model=TaskStatusVideoResponse, + response_model=OmniTaskStatusResponse, data=OmniProFirstLastFrameRequest( model_name=model_name, prompt=prompt, @@ -950,6 +984,7 @@ class OmniProImageToVideoNode(IO.ComfyNode): duration: int, reference_images: Input.Image, ) -> IO.NodeOutput: + prompt = normalize_omni_prompt_references(prompt) validate_string(prompt, min_length=1, max_length=2500) if get_number_of_images(reference_images) > 7: raise ValueError("The maximum number of reference images is 7.") @@ -962,7 +997,7 @@ class OmniProImageToVideoNode(IO.ComfyNode): response = await sync_op( cls, ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), - response_model=TaskStatusVideoResponse, + response_model=OmniTaskStatusResponse, data=OmniProReferences2VideoRequest( model_name=model_name, prompt=prompt, @@ -1023,6 +1058,7 @@ class OmniProVideoToVideoNode(IO.ComfyNode): keep_original_sound: bool, reference_images: Input.Image | None = None, ) -> IO.NodeOutput: + prompt = normalize_omni_prompt_references(prompt) validate_string(prompt, min_length=1, max_length=2500) validate_video_duration(reference_video, min_duration=3.0, max_duration=10.05) validate_video_dimensions(reference_video, min_width=720, min_height=720, max_width=2160, max_height=2160) @@ -1045,7 +1081,7 @@ class OmniProVideoToVideoNode(IO.ComfyNode): response = await sync_op( cls, ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), - response_model=TaskStatusVideoResponse, + response_model=OmniTaskStatusResponse, data=OmniProReferences2VideoRequest( model_name=model_name, prompt=prompt, @@ -1103,6 +1139,7 @@ class OmniProEditVideoNode(IO.ComfyNode): keep_original_sound: bool, reference_images: Input.Image | None = None, ) -> IO.NodeOutput: + prompt = normalize_omni_prompt_references(prompt) validate_string(prompt, min_length=1, max_length=2500) validate_video_duration(video, min_duration=3.0, max_duration=10.05) validate_video_dimensions(video, min_width=720, min_height=720, max_width=2160, max_height=2160) @@ -1125,7 +1162,7 @@ class OmniProEditVideoNode(IO.ComfyNode): response = await sync_op( cls, ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), - response_model=TaskStatusVideoResponse, + response_model=OmniTaskStatusResponse, data=OmniProReferences2VideoRequest( model_name=model_name, prompt=prompt, @@ -1138,6 +1175,90 @@ class OmniProEditVideoNode(IO.ComfyNode): return await finish_omni_video_task(cls, response) +class OmniProImageNode(IO.ComfyNode): + + @classmethod + def define_schema(cls) -> IO.Schema: + return IO.Schema( + node_id="KlingOmniProImageNode", + display_name="Kling Omni Image (Pro)", + category="api node/image/Kling", + description="Create or edit images with the latest model from Kling.", + inputs=[ + IO.Combo.Input("model_name", options=["kling-image-o1"]), + IO.String.Input( + "prompt", + multiline=True, + tooltip="A text prompt describing the image content. " + "This can include both positive and negative descriptions.", + ), + IO.Combo.Input("resolution", options=["1K", "2K"]), + IO.Combo.Input( + "aspect_ratio", + options=["16:9", "9:16", "1:1", "4:3", "3:4", "3:2", "2:3", "21:9"], + ), + IO.Image.Input( + "reference_images", + tooltip="Up to 10 additional reference images.", + optional=True, + ), + ], + outputs=[ + IO.Image.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model_name: str, + prompt: str, + resolution: str, + aspect_ratio: str, + reference_images: Input.Image | None = None, + ) -> IO.NodeOutput: + prompt = normalize_omni_prompt_references(prompt) + validate_string(prompt, min_length=1, max_length=2500) + image_list: list[OmniImageParamImage] = [] + if reference_images is not None: + if get_number_of_images(reference_images) > 10: + raise ValueError("The maximum number of reference images is 10.") + for i in reference_images: + validate_image_dimensions(i, min_width=300, min_height=300) + validate_image_aspect_ratio(i, (1, 2.5), (2.5, 1)) + for i in await upload_images_to_comfyapi(cls, reference_images, wait_label="Uploading reference image"): + image_list.append(OmniImageParamImage(image=i)) + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/kling/v1/images/omni-image", method="POST"), + response_model=OmniTaskStatusResponse, + data=OmniProImageRequest( + model_name=model_name, + prompt=prompt, + resolution=resolution.lower(), + aspect_ratio=aspect_ratio, + image_list=image_list if image_list else None, + ), + ) + if response.code: + raise RuntimeError( + f"Kling request failed. Code: {response.code}, Message: {response.message}, Data: {response.data}" + ) + final_response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/kling/v1/images/omni-image/{response.data.task_id}"), + response_model=OmniTaskStatusResponse, + status_extractor=lambda r: (r.data.task_status if r.data else None), + ) + return IO.NodeOutput(await download_url_to_image_tensor(final_response.data.task_result.images[0].url)) + + class KlingCameraControlT2VNode(IO.ComfyNode): """ Kling Text to Video Camera Control Node. This node is a text to video node, but it supports controlling the camera. @@ -1935,6 +2056,7 @@ class KlingExtension(ComfyExtension): OmniProImageToVideoNode, OmniProVideoToVideoNode, OmniProEditVideoNode, + # OmniProImageNode, # need support from backend ] From 440268d3940eb14a01595439bbc05c4aacde9c72 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 3 Dec 2025 23:52:31 +0200 Subject: [PATCH 0982/1073] convert nodes_load_3d.py to V3 schema (#10990) --- comfy_api/latest/_ui.py | 13 +++- comfy_extras/nodes_load_3d.py | 127 ++++++++++++++++------------------ 2 files changed, 71 insertions(+), 69 deletions(-) diff --git a/comfy_api/latest/_ui.py b/comfy_api/latest/_ui.py index b0bbabe2a..6d1bea599 100644 --- a/comfy_api/latest/_ui.py +++ b/comfy_api/latest/_ui.py @@ -3,6 +3,7 @@ from __future__ import annotations import json import os import random +import uuid from io import BytesIO from typing import Type @@ -436,9 +437,19 @@ class PreviewUI3D(_UIOutput): def __init__(self, model_file, camera_info, **kwargs): self.model_file = model_file self.camera_info = camera_info + self.bg_image_path = None + bg_image = kwargs.get("bg_image", None) + if bg_image is not None: + img_array = (bg_image[0].cpu().numpy() * 255).astype(np.uint8) + img = PILImage.fromarray(img_array) + temp_dir = folder_paths.get_temp_directory() + filename = f"bg_{uuid.uuid4().hex}.png" + bg_image_path = os.path.join(temp_dir, filename) + img.save(bg_image_path, compress_level=1) + self.bg_image_path = f"temp/{filename}" def as_dict(self): - return {"result": [self.model_file, self.camera_info]} + return {"result": [self.model_file, self.camera_info, self.bg_image_path]} class PreviewText(_UIOutput): diff --git a/comfy_extras/nodes_load_3d.py b/comfy_extras/nodes_load_3d.py index 54c66ef68..545588ef8 100644 --- a/comfy_extras/nodes_load_3d.py +++ b/comfy_extras/nodes_load_3d.py @@ -2,22 +2,18 @@ import nodes import folder_paths import os -from comfy.comfy_types import IO -from comfy_api.input_impl import VideoFromFile +from typing_extensions import override +from comfy_api.latest import IO, ComfyExtension, InputImpl, UI from pathlib import Path -from PIL import Image -import numpy as np - -import uuid def normalize_path(path): return path.replace('\\', '/') -class Load3D(): +class Load3D(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): + def define_schema(cls): input_dir = os.path.join(folder_paths.get_input_directory(), "3d") os.makedirs(input_dir, exist_ok=True) @@ -30,23 +26,29 @@ class Load3D(): for file_path in input_path.rglob("*") if file_path.suffix.lower() in {'.gltf', '.glb', '.obj', '.fbx', '.stl'} ] + return IO.Schema( + node_id="Load3D", + display_name="Load 3D & Animation", + category="3d", + is_experimental=True, + inputs=[ + IO.Combo.Input("model_file", options=sorted(files), upload=IO.UploadType.model), + IO.Load3D.Input("image"), + IO.Int.Input("width", default=1024, min=1, max=4096, step=1), + IO.Int.Input("height", default=1024, min=1, max=4096, step=1), + ], + outputs=[ + IO.Image.Output(display_name="image"), + IO.Mask.Output(display_name="mask"), + IO.String.Output(display_name="mesh_path"), + IO.Image.Output(display_name="normal"), + IO.Load3DCamera.Output(display_name="camera_info"), + IO.Video.Output(display_name="recording_video"), + ], + ) - return {"required": { - "model_file": (sorted(files), {"file_upload": True}), - "image": ("LOAD_3D", {}), - "width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), - "height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), - }} - - RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "LOAD3D_CAMERA", IO.VIDEO) - RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "camera_info", "recording_video") - - FUNCTION = "process" - EXPERIMENTAL = True - - CATEGORY = "3d" - - def process(self, model_file, image, **kwargs): + @classmethod + def execute(cls, model_file, image, **kwargs) -> IO.NodeOutput: image_path = folder_paths.get_annotated_filepath(image['image']) mask_path = folder_paths.get_annotated_filepath(image['mask']) normal_path = folder_paths.get_annotated_filepath(image['normal']) @@ -61,58 +63,47 @@ class Load3D(): if image['recording'] != "": recording_video_path = folder_paths.get_annotated_filepath(image['recording']) - video = VideoFromFile(recording_video_path) + video = InputImpl.VideoFromFile(recording_video_path) - return output_image, output_mask, model_file, normal_image, image['camera_info'], video + return IO.NodeOutput(output_image, output_mask, model_file, normal_image, image['camera_info'], video) -class Preview3D(): + process = execute # TODO: remove + + +class Preview3D(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "model_file": ("STRING", {"default": "", "multiline": False}), - }, - "optional": { - "camera_info": ("LOAD3D_CAMERA", {}), - "bg_image": ("IMAGE", {}) - }} + def define_schema(cls): + return IO.Schema( + node_id="Preview3D", + display_name="Preview 3D & Animation", + category="3d", + is_experimental=True, + is_output_node=True, + inputs=[ + IO.String.Input("model_file", default="", multiline=False), + IO.Load3DCamera.Input("camera_info", optional=True), + IO.Image.Input("bg_image", optional=True), + ], + outputs=[], + ) - OUTPUT_NODE = True - RETURN_TYPES = () - - CATEGORY = "3d" - - FUNCTION = "process" - EXPERIMENTAL = True - - def process(self, model_file, **kwargs): + @classmethod + def execute(cls, model_file, **kwargs) -> IO.NodeOutput: camera_info = kwargs.get("camera_info", None) bg_image = kwargs.get("bg_image", None) + return IO.NodeOutput(ui=UI.PreviewUI3D(model_file, camera_info, bg_image=bg_image)) - bg_image_path = None - if bg_image is not None: + process = execute # TODO: remove - img_array = (bg_image[0].cpu().numpy() * 255).astype(np.uint8) - img = Image.fromarray(img_array) - temp_dir = folder_paths.get_temp_directory() - filename = f"bg_{uuid.uuid4().hex}.png" - bg_image_path = os.path.join(temp_dir, filename) - img.save(bg_image_path, compress_level=1) +class Load3DExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + Load3D, + Preview3D, + ] - bg_image_path = f"temp/{filename}" - return { - "ui": { - "result": [model_file, camera_info, bg_image_path] - } - } - -NODE_CLASS_MAPPINGS = { - "Load3D": Load3D, - "Preview3D": Preview3D, -} - -NODE_DISPLAY_NAME_MAPPINGS = { - "Load3D": "Load 3D & Animation", - "Preview3D": "Preview 3D & Animation", -} +async def comfy_entrypoint() -> Load3DExtension: + return Load3DExtension() From dce518c2b4f99634b5fdde1924d9b0bd468fe1ce Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 4 Dec 2025 03:35:04 +0200 Subject: [PATCH 0983/1073] convert nodes_audio.py to V3 schema (#10798) --- comfy_api/latest/_ui.py | 9 +- comfy_extras/nodes_audio.py | 744 ++++++++++++++++++------------------ 2 files changed, 382 insertions(+), 371 deletions(-) diff --git a/comfy_api/latest/_ui.py b/comfy_api/latest/_ui.py index 6d1bea599..5a75a3aae 100644 --- a/comfy_api/latest/_ui.py +++ b/comfy_api/latest/_ui.py @@ -319,9 +319,10 @@ class AudioSaveHelper: for key, value in metadata.items(): output_container.metadata[key] = value + layout = "mono" if waveform.shape[0] == 1 else "stereo" # Set up the output stream with appropriate properties if format == "opus": - out_stream = output_container.add_stream("libopus", rate=sample_rate) + out_stream = output_container.add_stream("libopus", rate=sample_rate, layout=layout) if quality == "64k": out_stream.bit_rate = 64000 elif quality == "96k": @@ -333,7 +334,7 @@ class AudioSaveHelper: elif quality == "320k": out_stream.bit_rate = 320000 elif format == "mp3": - out_stream = output_container.add_stream("libmp3lame", rate=sample_rate) + out_stream = output_container.add_stream("libmp3lame", rate=sample_rate, layout=layout) if quality == "V0": # TODO i would really love to support V3 and V5 but there doesn't seem to be a way to set the qscale level, the property below is a bool out_stream.codec_context.qscale = 1 @@ -342,12 +343,12 @@ class AudioSaveHelper: elif quality == "320k": out_stream.bit_rate = 320000 else: # format == "flac": - out_stream = output_container.add_stream("flac", rate=sample_rate) + out_stream = output_container.add_stream("flac", rate=sample_rate, layout=layout) frame = av.AudioFrame.from_ndarray( waveform.movedim(0, 1).reshape(1, -1).float().numpy(), format="flt", - layout="mono" if waveform.shape[0] == 1 else "stereo", + layout=layout, ) frame.sample_rate = sample_rate frame.pts = 0 diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index 2ed7e0b22..812301fb7 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -6,65 +6,80 @@ import torch import comfy.model_management import folder_paths import os -import io -import json -import random import hashlib import node_helpers import logging -from comfy.cli_args import args -from comfy.comfy_types import FileLocator +from typing_extensions import override +from comfy_api.latest import ComfyExtension, IO, UI -class EmptyLatentAudio: - def __init__(self): - self.device = comfy.model_management.intermediate_device() +class EmptyLatentAudio(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="EmptyLatentAudio", + display_name="Empty Latent Audio", + category="latent/audio", + inputs=[ + IO.Float.Input("seconds", default=47.6, min=1.0, max=1000.0, step=0.1), + IO.Int.Input( + "batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch." + ), + ], + outputs=[IO.Latent.Output()], + ) @classmethod - def INPUT_TYPES(s): - return {"required": {"seconds": ("FLOAT", {"default": 47.6, "min": 1.0, "max": 1000.0, "step": 0.1}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}), - }} - RETURN_TYPES = ("LATENT",) - FUNCTION = "generate" - - CATEGORY = "latent/audio" - - def generate(self, seconds, batch_size): + def execute(cls, seconds, batch_size) -> IO.NodeOutput: length = round((seconds * 44100 / 2048) / 2) * 2 - latent = torch.zeros([batch_size, 64, length], device=self.device) - return ({"samples":latent, "type": "audio"}, ) + latent = torch.zeros([batch_size, 64, length], device=comfy.model_management.intermediate_device()) + return IO.NodeOutput({"samples":latent, "type": "audio"}) -class ConditioningStableAudio: + generate = execute # TODO: remove + + +class ConditioningStableAudio(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "seconds_start": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.1}), - "seconds_total": ("FLOAT", {"default": 47.0, "min": 0.0, "max": 1000.0, "step": 0.1}), - }} + def define_schema(cls): + return IO.Schema( + node_id="ConditioningStableAudio", + category="conditioning", + inputs=[ + IO.Conditioning.Input("positive"), + IO.Conditioning.Input("negative"), + IO.Float.Input("seconds_start", default=0.0, min=0.0, max=1000.0, step=0.1), + IO.Float.Input("seconds_total", default=47.0, min=0.0, max=1000.0, step=0.1), + ], + outputs=[ + IO.Conditioning.Output(display_name="positive"), + IO.Conditioning.Output(display_name="negative"), + ], + ) - RETURN_TYPES = ("CONDITIONING","CONDITIONING") - RETURN_NAMES = ("positive", "negative") - - FUNCTION = "append" - - CATEGORY = "conditioning" - - def append(self, positive, negative, seconds_start, seconds_total): + @classmethod + def execute(cls, positive, negative, seconds_start, seconds_total) -> IO.NodeOutput: positive = node_helpers.conditioning_set_values(positive, {"seconds_start": seconds_start, "seconds_total": seconds_total}) negative = node_helpers.conditioning_set_values(negative, {"seconds_start": seconds_start, "seconds_total": seconds_total}) - return (positive, negative) + return IO.NodeOutput(positive, negative) -class VAEEncodeAudio: + append = execute # TODO: remove + + +class VAEEncodeAudio(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "audio": ("AUDIO", ), "vae": ("VAE", )}} - RETURN_TYPES = ("LATENT",) - FUNCTION = "encode" + def define_schema(cls): + return IO.Schema( + node_id="VAEEncodeAudio", + display_name="VAE Encode Audio", + category="latent/audio", + inputs=[ + IO.Audio.Input("audio"), + IO.Vae.Input("vae"), + ], + outputs=[IO.Latent.Output()], + ) - CATEGORY = "latent/audio" - - def encode(self, vae, audio): + @classmethod + def execute(cls, vae, audio) -> IO.NodeOutput: sample_rate = audio["sample_rate"] if 44100 != sample_rate: waveform = torchaudio.functional.resample(audio["waveform"], sample_rate, 44100) @@ -72,213 +87,134 @@ class VAEEncodeAudio: waveform = audio["waveform"] t = vae.encode(waveform.movedim(1, -1)) - return ({"samples":t}, ) + return IO.NodeOutput({"samples":t}) -class VAEDecodeAudio: + encode = execute # TODO: remove + + +class VAEDecodeAudio(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}} - RETURN_TYPES = ("AUDIO",) - FUNCTION = "decode" + def define_schema(cls): + return IO.Schema( + node_id="VAEDecodeAudio", + display_name="VAE Decode Audio", + category="latent/audio", + inputs=[ + IO.Latent.Input("samples"), + IO.Vae.Input("vae"), + ], + outputs=[IO.Audio.Output()], + ) - CATEGORY = "latent/audio" - - def decode(self, vae, samples): + @classmethod + def execute(cls, vae, samples) -> IO.NodeOutput: audio = vae.decode(samples["samples"]).movedim(-1, 1) std = torch.std(audio, dim=[1,2], keepdim=True) * 5.0 std[std < 1.0] = 1.0 audio /= std - return ({"waveform": audio, "sample_rate": 44100}, ) + return IO.NodeOutput({"waveform": audio, "sample_rate": 44100}) + + decode = execute # TODO: remove -def save_audio(self, audio, filename_prefix="ComfyUI", format="flac", prompt=None, extra_pnginfo=None, quality="128k"): - - filename_prefix += self.prefix_append - full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) - results: list[FileLocator] = [] - - # Prepare metadata dictionary - metadata = {} - if not args.disable_metadata: - if prompt is not None: - metadata["prompt"] = json.dumps(prompt) - if extra_pnginfo is not None: - for x in extra_pnginfo: - metadata[x] = json.dumps(extra_pnginfo[x]) - - # Opus supported sample rates - OPUS_RATES = [8000, 12000, 16000, 24000, 48000] - - for (batch_number, waveform) in enumerate(audio["waveform"].cpu()): - filename_with_batch_num = filename.replace("%batch_num%", str(batch_number)) - file = f"{filename_with_batch_num}_{counter:05}_.{format}" - output_path = os.path.join(full_output_folder, file) - - # Use original sample rate initially - sample_rate = audio["sample_rate"] - - # Handle Opus sample rate requirements - if format == "opus": - if sample_rate > 48000: - sample_rate = 48000 - elif sample_rate not in OPUS_RATES: - # Find the next highest supported rate - for rate in sorted(OPUS_RATES): - if rate > sample_rate: - sample_rate = rate - break - if sample_rate not in OPUS_RATES: # Fallback if still not supported - sample_rate = 48000 - - # Resample if necessary - if sample_rate != audio["sample_rate"]: - waveform = torchaudio.functional.resample(waveform, audio["sample_rate"], sample_rate) - - # Create output with specified format - output_buffer = io.BytesIO() - output_container = av.open(output_buffer, mode='w', format=format) - - # Set metadata on the container - for key, value in metadata.items(): - output_container.metadata[key] = value - - layout = 'mono' if waveform.shape[0] == 1 else 'stereo' - # Set up the output stream with appropriate properties - if format == "opus": - out_stream = output_container.add_stream("libopus", rate=sample_rate, layout=layout) - if quality == "64k": - out_stream.bit_rate = 64000 - elif quality == "96k": - out_stream.bit_rate = 96000 - elif quality == "128k": - out_stream.bit_rate = 128000 - elif quality == "192k": - out_stream.bit_rate = 192000 - elif quality == "320k": - out_stream.bit_rate = 320000 - elif format == "mp3": - out_stream = output_container.add_stream("libmp3lame", rate=sample_rate, layout=layout) - if quality == "V0": - #TODO i would really love to support V3 and V5 but there doesn't seem to be a way to set the qscale level, the property below is a bool - out_stream.codec_context.qscale = 1 - elif quality == "128k": - out_stream.bit_rate = 128000 - elif quality == "320k": - out_stream.bit_rate = 320000 - else: #format == "flac": - out_stream = output_container.add_stream("flac", rate=sample_rate, layout=layout) - - frame = av.AudioFrame.from_ndarray(waveform.movedim(0, 1).reshape(1, -1).float().numpy(), format='flt', layout=layout) - frame.sample_rate = sample_rate - frame.pts = 0 - output_container.mux(out_stream.encode(frame)) - - # Flush encoder - output_container.mux(out_stream.encode(None)) - - # Close containers - output_container.close() - - # Write the output to file - output_buffer.seek(0) - with open(output_path, 'wb') as f: - f.write(output_buffer.getbuffer()) - - results.append({ - "filename": file, - "subfolder": subfolder, - "type": self.type - }) - counter += 1 - - return { "ui": { "audio": results } } - -class SaveAudio: - def __init__(self): - self.output_dir = folder_paths.get_output_directory() - self.type = "output" - self.prefix_append = "" +class SaveAudio(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="SaveAudio", + display_name="Save Audio (FLAC)", + category="audio", + inputs=[ + IO.Audio.Input("audio"), + IO.String.Input("filename_prefix", default="audio/ComfyUI"), + ], + hidden=[IO.Hidden.prompt, IO.Hidden.extra_pnginfo], + is_output_node=True, + ) @classmethod - def INPUT_TYPES(s): - return {"required": { "audio": ("AUDIO", ), - "filename_prefix": ("STRING", {"default": "audio/ComfyUI"}), - }, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, - } + def execute(cls, audio, filename_prefix="ComfyUI", format="flac") -> IO.NodeOutput: + return IO.NodeOutput( + ui=UI.AudioSaveHelper.get_save_audio_ui(audio, filename_prefix=filename_prefix, cls=cls, format=format) + ) - RETURN_TYPES = () - FUNCTION = "save_flac" + save_flac = execute # TODO: remove - OUTPUT_NODE = True - CATEGORY = "audio" - - def save_flac(self, audio, filename_prefix="ComfyUI", format="flac", prompt=None, extra_pnginfo=None): - return save_audio(self, audio, filename_prefix, format, prompt, extra_pnginfo) - -class SaveAudioMP3: - def __init__(self): - self.output_dir = folder_paths.get_output_directory() - self.type = "output" - self.prefix_append = "" +class SaveAudioMP3(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="SaveAudioMP3", + display_name="Save Audio (MP3)", + category="audio", + inputs=[ + IO.Audio.Input("audio"), + IO.String.Input("filename_prefix", default="audio/ComfyUI"), + IO.Combo.Input("quality", options=["V0", "128k", "320k"], default="V0"), + ], + hidden=[IO.Hidden.prompt, IO.Hidden.extra_pnginfo], + is_output_node=True, + ) @classmethod - def INPUT_TYPES(s): - return {"required": { "audio": ("AUDIO", ), - "filename_prefix": ("STRING", {"default": "audio/ComfyUI"}), - "quality": (["V0", "128k", "320k"], {"default": "V0"}), - }, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, - } + def execute(cls, audio, filename_prefix="ComfyUI", format="mp3", quality="128k") -> IO.NodeOutput: + return IO.NodeOutput( + ui=UI.AudioSaveHelper.get_save_audio_ui( + audio, filename_prefix=filename_prefix, cls=cls, format=format, quality=quality + ) + ) - RETURN_TYPES = () - FUNCTION = "save_mp3" + save_mp3 = execute # TODO: remove - OUTPUT_NODE = True - CATEGORY = "audio" - - def save_mp3(self, audio, filename_prefix="ComfyUI", format="mp3", prompt=None, extra_pnginfo=None, quality="128k"): - return save_audio(self, audio, filename_prefix, format, prompt, extra_pnginfo, quality) - -class SaveAudioOpus: - def __init__(self): - self.output_dir = folder_paths.get_output_directory() - self.type = "output" - self.prefix_append = "" +class SaveAudioOpus(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="SaveAudioOpus", + display_name="Save Audio (Opus)", + category="audio", + inputs=[ + IO.Audio.Input("audio"), + IO.String.Input("filename_prefix", default="audio/ComfyUI"), + IO.Combo.Input("quality", options=["64k", "96k", "128k", "192k", "320k"], default="128k"), + ], + hidden=[IO.Hidden.prompt, IO.Hidden.extra_pnginfo], + is_output_node=True, + ) @classmethod - def INPUT_TYPES(s): - return {"required": { "audio": ("AUDIO", ), - "filename_prefix": ("STRING", {"default": "audio/ComfyUI"}), - "quality": (["64k", "96k", "128k", "192k", "320k"], {"default": "128k"}), - }, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, - } + def execute(cls, audio, filename_prefix="ComfyUI", format="opus", quality="V3") -> IO.NodeOutput: + return IO.NodeOutput( + ui=UI.AudioSaveHelper.get_save_audio_ui( + audio, filename_prefix=filename_prefix, cls=cls, format=format, quality=quality + ) + ) - RETURN_TYPES = () - FUNCTION = "save_opus" + save_opus = execute # TODO: remove - OUTPUT_NODE = True - CATEGORY = "audio" - - def save_opus(self, audio, filename_prefix="ComfyUI", format="opus", prompt=None, extra_pnginfo=None, quality="V3"): - return save_audio(self, audio, filename_prefix, format, prompt, extra_pnginfo, quality) - -class PreviewAudio(SaveAudio): - def __init__(self): - self.output_dir = folder_paths.get_temp_directory() - self.type = "temp" - self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) +class PreviewAudio(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="PreviewAudio", + display_name="Preview Audio", + category="audio", + inputs=[ + IO.Audio.Input("audio"), + ], + hidden=[IO.Hidden.prompt, IO.Hidden.extra_pnginfo], + is_output_node=True, + ) @classmethod - def INPUT_TYPES(s): - return {"required": - {"audio": ("AUDIO", ), }, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, - } + def execute(cls, audio) -> IO.NodeOutput: + return IO.NodeOutput(ui=UI.PreviewAudio(audio, cls=cls)) + + save_flac = execute # TODO: remove + def f32_pcm(wav: torch.Tensor) -> torch.Tensor: """Convert audio to float 32 bits PCM format.""" @@ -316,26 +252,30 @@ def load(filepath: str) -> tuple[torch.Tensor, int]: wav = f32_pcm(wav) return wav, sr -class LoadAudio: +class LoadAudio(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): + def define_schema(cls): input_dir = folder_paths.get_input_directory() files = folder_paths.filter_files_content_types(os.listdir(input_dir), ["audio", "video"]) - return {"required": {"audio": (sorted(files), {"audio_upload": True})}} + return IO.Schema( + node_id="LoadAudio", + display_name="Load Audio", + category="audio", + inputs=[ + IO.Combo.Input("audio", upload=IO.UploadType.audio, options=sorted(files)), + ], + outputs=[IO.Audio.Output()], + ) - CATEGORY = "audio" - - RETURN_TYPES = ("AUDIO", ) - FUNCTION = "load" - - def load(self, audio): + @classmethod + def execute(cls, audio) -> IO.NodeOutput: audio_path = folder_paths.get_annotated_filepath(audio) waveform, sample_rate = load(audio_path) audio = {"waveform": waveform.unsqueeze(0), "sample_rate": sample_rate} - return (audio, ) + return IO.NodeOutput(audio) @classmethod - def IS_CHANGED(s, audio): + def fingerprint_inputs(cls, audio): image_path = folder_paths.get_annotated_filepath(audio) m = hashlib.sha256() with open(image_path, 'rb') as f: @@ -343,46 +283,69 @@ class LoadAudio: return m.digest().hex() @classmethod - def VALIDATE_INPUTS(s, audio): + def validate_inputs(cls, audio): if not folder_paths.exists_annotated_filepath(audio): return "Invalid audio file: {}".format(audio) return True -class RecordAudio: + load = execute # TODO: remove + + +class RecordAudio(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": {"audio": ("AUDIO_RECORD", {})}} + def define_schema(cls): + return IO.Schema( + node_id="RecordAudio", + display_name="Record Audio", + category="audio", + inputs=[ + IO.Custom("AUDIO_RECORD").Input("audio"), + ], + outputs=[IO.Audio.Output()], + ) - CATEGORY = "audio" - - RETURN_TYPES = ("AUDIO", ) - FUNCTION = "load" - - def load(self, audio): + @classmethod + def execute(cls, audio) -> IO.NodeOutput: audio_path = folder_paths.get_annotated_filepath(audio) waveform, sample_rate = load(audio_path) audio = {"waveform": waveform.unsqueeze(0), "sample_rate": sample_rate} - return (audio, ) + return IO.NodeOutput(audio) + + load = execute # TODO: remove -class TrimAudioDuration: +class TrimAudioDuration(IO.ComfyNode): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "audio": ("AUDIO",), - "start_index": ("FLOAT", {"default": 0.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.01, "tooltip": "Start time in seconds, can be negative to count from the end (supports sub-seconds)."}), - "duration": ("FLOAT", {"default": 60.0, "min": 0.0, "step": 0.01, "tooltip": "Duration in seconds"}), - }, - } + def define_schema(cls): + return IO.Schema( + node_id="TrimAudioDuration", + display_name="Trim Audio Duration", + description="Trim audio tensor into chosen time range.", + category="audio", + inputs=[ + IO.Audio.Input("audio"), + IO.Float.Input( + "start_index", + default=0.0, + min=-0xffffffffffffffff, + max=0xffffffffffffffff, + step=0.01, + tooltip="Start time in seconds, can be negative to count from the end (supports sub-seconds).", + ), + IO.Float.Input( + "duration", + default=60.0, + min=0.0, + step=0.01, + tooltip="Duration in seconds", + ), + ], + outputs=[IO.Audio.Output()], + ) - FUNCTION = "trim" - RETURN_TYPES = ("AUDIO",) - CATEGORY = "audio" - DESCRIPTION = "Trim audio tensor into chosen time range." - - def trim(self, audio, start_index, duration): + @classmethod + def execute(cls, audio, start_index, duration) -> IO.NodeOutput: waveform = audio["waveform"] sample_rate = audio["sample_rate"] audio_length = waveform.shape[-1] @@ -399,23 +362,30 @@ class TrimAudioDuration: if start_frame >= end_frame: raise ValueError("AudioTrim: Start time must be less than end time and be within the audio length.") - return ({"waveform": waveform[..., start_frame:end_frame], "sample_rate": sample_rate},) + return IO.NodeOutput({"waveform": waveform[..., start_frame:end_frame], "sample_rate": sample_rate}) + + trim = execute # TODO: remove -class SplitAudioChannels: +class SplitAudioChannels(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "audio": ("AUDIO",), - }} + def define_schema(cls): + return IO.Schema( + node_id="SplitAudioChannels", + display_name="Split Audio Channels", + description="Separates the audio into left and right channels.", + category="audio", + inputs=[ + IO.Audio.Input("audio"), + ], + outputs=[ + IO.Audio.Output(display_name="left"), + IO.Audio.Output(display_name="right"), + ], + ) - RETURN_TYPES = ("AUDIO", "AUDIO") - RETURN_NAMES = ("left", "right") - FUNCTION = "separate" - CATEGORY = "audio" - DESCRIPTION = "Separates the audio into left and right channels." - - def separate(self, audio): + @classmethod + def execute(cls, audio) -> IO.NodeOutput: waveform = audio["waveform"] sample_rate = audio["sample_rate"] @@ -425,7 +395,9 @@ class SplitAudioChannels: left_channel = waveform[..., 0:1, :] right_channel = waveform[..., 1:2, :] - return ({"waveform": left_channel, "sample_rate": sample_rate}, {"waveform": right_channel, "sample_rate": sample_rate}) + return IO.NodeOutput({"waveform": left_channel, "sample_rate": sample_rate}, {"waveform": right_channel, "sample_rate": sample_rate}) + + separate = execute # TODO: remove def match_audio_sample_rates(waveform_1, sample_rate_1, waveform_2, sample_rate_2): @@ -443,21 +415,29 @@ def match_audio_sample_rates(waveform_1, sample_rate_1, waveform_2, sample_rate_ return waveform_1, waveform_2, output_sample_rate -class AudioConcat: +class AudioConcat(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "audio1": ("AUDIO",), - "audio2": ("AUDIO",), - "direction": (['after', 'before'], {"default": 'after', "tooltip": "Whether to append audio2 after or before audio1."}), - }} + def define_schema(cls): + return IO.Schema( + node_id="AudioConcat", + display_name="Audio Concat", + description="Concatenates the audio1 to audio2 in the specified direction.", + category="audio", + inputs=[ + IO.Audio.Input("audio1"), + IO.Audio.Input("audio2"), + IO.Combo.Input( + "direction", + options=['after', 'before'], + default="after", + tooltip="Whether to append audio2 after or before audio1.", + ) + ], + outputs=[IO.Audio.Output()], + ) - RETURN_TYPES = ("AUDIO",) - FUNCTION = "concat" - CATEGORY = "audio" - DESCRIPTION = "Concatenates the audio1 to audio2 in the specified direction." - - def concat(self, audio1, audio2, direction): + @classmethod + def execute(cls, audio1, audio2, direction) -> IO.NodeOutput: waveform_1 = audio1["waveform"] waveform_2 = audio2["waveform"] sample_rate_1 = audio1["sample_rate"] @@ -477,26 +457,33 @@ class AudioConcat: elif direction == 'before': concatenated_audio = torch.cat((waveform_2, waveform_1), dim=2) - return ({"waveform": concatenated_audio, "sample_rate": output_sample_rate},) + return IO.NodeOutput({"waveform": concatenated_audio, "sample_rate": output_sample_rate}) + + concat = execute # TODO: remove -class AudioMerge: +class AudioMerge(IO.ComfyNode): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "audio1": ("AUDIO",), - "audio2": ("AUDIO",), - "merge_method": (["add", "mean", "subtract", "multiply"], {"tooltip": "The method used to combine the audio waveforms."}), - }, - } + def define_schema(cls): + return IO.Schema( + node_id="AudioMerge", + display_name="Audio Merge", + description="Combine two audio tracks by overlaying their waveforms.", + category="audio", + inputs=[ + IO.Audio.Input("audio1"), + IO.Audio.Input("audio2"), + IO.Combo.Input( + "merge_method", + options=["add", "mean", "subtract", "multiply"], + tooltip="The method used to combine the audio waveforms.", + ) + ], + outputs=[IO.Audio.Output()], + ) - FUNCTION = "merge" - RETURN_TYPES = ("AUDIO",) - CATEGORY = "audio" - DESCRIPTION = "Combine two audio tracks by overlaying their waveforms." - - def merge(self, audio1, audio2, merge_method): + @classmethod + def execute(cls, audio1, audio2, merge_method) -> IO.NodeOutput: waveform_1 = audio1["waveform"] waveform_2 = audio2["waveform"] sample_rate_1 = audio1["sample_rate"] @@ -530,85 +517,108 @@ class AudioMerge: if max_val > 1.0: waveform = waveform / max_val - return ({"waveform": waveform, "sample_rate": output_sample_rate},) + return IO.NodeOutput({"waveform": waveform, "sample_rate": output_sample_rate}) + + merge = execute # TODO: remove -class AudioAdjustVolume: +class AudioAdjustVolume(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "audio": ("AUDIO",), - "volume": ("INT", {"default": 1.0, "min": -100, "max": 100, "tooltip": "Volume adjustment in decibels (dB). 0 = no change, +6 = double, -6 = half, etc"}), - }} + def define_schema(cls): + return IO.Schema( + node_id="AudioAdjustVolume", + display_name="Audio Adjust Volume", + category="audio", + inputs=[ + IO.Audio.Input("audio"), + IO.Int.Input( + "volume", + default=1, + min=-100, + max=100, + tooltip="Volume adjustment in decibels (dB). 0 = no change, +6 = double, -6 = half, etc", + ) + ], + outputs=[IO.Audio.Output()], + ) - RETURN_TYPES = ("AUDIO",) - FUNCTION = "adjust_volume" - CATEGORY = "audio" - - def adjust_volume(self, audio, volume): + @classmethod + def execute(cls, audio, volume) -> IO.NodeOutput: if volume == 0: - return (audio,) + return IO.NodeOutput(audio) waveform = audio["waveform"] sample_rate = audio["sample_rate"] gain = 10 ** (volume / 20) waveform = waveform * gain - return ({"waveform": waveform, "sample_rate": sample_rate},) + return IO.NodeOutput({"waveform": waveform, "sample_rate": sample_rate}) + + adjust_volume = execute # TODO: remove -class EmptyAudio: +class EmptyAudio(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { - "duration": ("FLOAT", {"default": 60.0, "min": 0.0, "max": 0xffffffffffffffff, "step": 0.01, "tooltip": "Duration of the empty audio clip in seconds"}), - "sample_rate": ("INT", {"default": 44100, "tooltip": "Sample rate of the empty audio clip."}), - "channels": ("INT", {"default": 2, "min": 1, "max": 2, "tooltip": "Number of audio channels (1 for mono, 2 for stereo)."}), - }} + def define_schema(cls): + return IO.Schema( + node_id="EmptyAudio", + display_name="Empty Audio", + category="audio", + inputs=[ + IO.Float.Input( + "duration", + default=60.0, + min=0.0, + max=0xffffffffffffffff, + step=0.01, + tooltip="Duration of the empty audio clip in seconds", + ), + IO.Float.Input( + "sample_rate", + default=44100, + tooltip="Sample rate of the empty audio clip.", + ), + IO.Float.Input( + "channels", + default=2, + min=1, + max=2, + tooltip="Number of audio channels (1 for mono, 2 for stereo).", + ), + ], + outputs=[IO.Audio.Output()], + ) - RETURN_TYPES = ("AUDIO",) - FUNCTION = "create_empty_audio" - CATEGORY = "audio" - - def create_empty_audio(self, duration, sample_rate, channels): + @classmethod + def execute(cls, duration, sample_rate, channels) -> IO.NodeOutput: num_samples = int(round(duration * sample_rate)) waveform = torch.zeros((1, channels, num_samples), dtype=torch.float32) - return ({"waveform": waveform, "sample_rate": sample_rate},) + return IO.NodeOutput({"waveform": waveform, "sample_rate": sample_rate}) + + create_empty_audio = execute # TODO: remove -NODE_CLASS_MAPPINGS = { - "EmptyLatentAudio": EmptyLatentAudio, - "VAEEncodeAudio": VAEEncodeAudio, - "VAEDecodeAudio": VAEDecodeAudio, - "SaveAudio": SaveAudio, - "SaveAudioMP3": SaveAudioMP3, - "SaveAudioOpus": SaveAudioOpus, - "LoadAudio": LoadAudio, - "PreviewAudio": PreviewAudio, - "ConditioningStableAudio": ConditioningStableAudio, - "RecordAudio": RecordAudio, - "TrimAudioDuration": TrimAudioDuration, - "SplitAudioChannels": SplitAudioChannels, - "AudioConcat": AudioConcat, - "AudioMerge": AudioMerge, - "AudioAdjustVolume": AudioAdjustVolume, - "EmptyAudio": EmptyAudio, -} +class AudioExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + EmptyLatentAudio, + VAEEncodeAudio, + VAEDecodeAudio, + SaveAudio, + SaveAudioMP3, + SaveAudioOpus, + LoadAudio, + PreviewAudio, + ConditioningStableAudio, + RecordAudio, + TrimAudioDuration, + SplitAudioChannels, + AudioConcat, + AudioMerge, + AudioAdjustVolume, + EmptyAudio, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "EmptyLatentAudio": "Empty Latent Audio", - "VAEEncodeAudio": "VAE Encode Audio", - "VAEDecodeAudio": "VAE Decode Audio", - "PreviewAudio": "Preview Audio", - "LoadAudio": "Load Audio", - "SaveAudio": "Save Audio (FLAC)", - "SaveAudioMP3": "Save Audio (MP3)", - "SaveAudioOpus": "Save Audio (Opus)", - "RecordAudio": "Record Audio", - "TrimAudioDuration": "Trim Audio Duration", - "SplitAudioChannels": "Split Audio Channels", - "AudioConcat": "Audio Concat", - "AudioMerge": "Audio Merge", - "AudioAdjustVolume": "Audio Adjust Volume", - "EmptyAudio": "Empty Audio", -} +async def comfy_entrypoint() -> AudioExtension: + return AudioExtension() From ecdc8697d53919a9178bf53ef327a110582db8ea Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 3 Dec 2025 19:49:28 -0800 Subject: [PATCH 0984/1073] Qwen Image Lora training fix from #11090 (#11094) --- comfy_extras/nodes_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py index cb24ab709..19b8baaf4 100644 --- a/comfy_extras/nodes_train.py +++ b/comfy_extras/nodes_train.py @@ -623,7 +623,7 @@ class TrainLoraNode(io.ComfyNode): noise = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(seed) if multi_res: # use first latent as dummy latent if multi_res - latents = latents[0].repeat(num_images, 1, 1, 1) + latents = latents[0].repeat((num_images,) + ((1,) * (latents[0].ndim - 1))) guider.sample( noise.generate_noise({"samples": latents}), latents, From ea17add3c62197b10fd0b71d9169d339adc55c47 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 3 Dec 2025 20:15:15 -0800 Subject: [PATCH 0985/1073] Fix case where text encoders where running on the CPU instead of GPU. (#11095) --- comfy/sd.py | 2 ++ comfy/sd1_clip.py | 9 ++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/comfy/sd.py b/comfy/sd.py index f9e5efab5..734bd2845 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -193,6 +193,7 @@ class CLIP: self.cond_stage_model.set_clip_options({"projected_pooled": False}) self.load_model() + self.cond_stage_model.set_clip_options({"execution_device": self.patcher.load_device}) all_hooks.reset() self.patcher.patch_hooks(None) if show_pbar: @@ -240,6 +241,7 @@ class CLIP: self.cond_stage_model.set_clip_options({"projected_pooled": False}) self.load_model() + self.cond_stage_model.set_clip_options({"execution_device": self.patcher.load_device}) o = self.cond_stage_model.encode_token_weights(tokens) cond, pooled = o[:2] if return_dict: diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 0fc9ab3db..503a51843 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -147,6 +147,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): self.layer_norm_hidden_state = layer_norm_hidden_state self.return_projected_pooled = return_projected_pooled self.return_attention_masks = return_attention_masks + self.execution_device = None if layer == "hidden": assert layer_idx is not None @@ -163,6 +164,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): def set_clip_options(self, options): layer_idx = options.get("layer", self.layer_idx) self.return_projected_pooled = options.get("projected_pooled", self.return_projected_pooled) + self.execution_device = options.get("execution_device", self.execution_device) if isinstance(self.layer, list) or self.layer == "all": pass elif layer_idx is None or abs(layer_idx) > self.num_layers: @@ -175,6 +177,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): self.layer = self.options_default[0] self.layer_idx = self.options_default[1] self.return_projected_pooled = self.options_default[2] + self.execution_device = None def process_tokens(self, tokens, device): end_token = self.special_tokens.get("end", None) @@ -258,7 +261,11 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): return torch.cat(embeds_out), torch.tensor(attention_masks, device=device, dtype=torch.long), num_tokens, embeds_info def forward(self, tokens): - device = self.transformer.get_input_embeddings().weight.device + if self.execution_device is None: + device = self.transformer.get_input_embeddings().weight.device + else: + device = self.execution_device + embeds, attention_mask, num_tokens, embeds_info = self.process_tokens(tokens, device) attention_mask_model = None From 6be85c7920224b45bbc6417e00147815e78c12a9 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Thu, 4 Dec 2025 14:28:44 +1000 Subject: [PATCH 0986/1073] mp: use look-ahead actuals for stream offload VRAM calculation (#11096) TIL that the WAN TE has a 2GB weight followed by 16MB as the next size down. This means that team 8GB VRAM would fully offload the TE in async offload mode as it just multiplied this giant size my the num streams. Do the more complex logic of summing up the upcoming to-load weight sizes to avoid triple counting this massive weight. partial unload does the converse of recording the NS most recent unloads as they go. --- comfy/model_patcher.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index df2d8e827..3dcac3eef 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -699,12 +699,12 @@ class ModelPatcher: offloaded = [] offload_buffer = 0 loading.sort(reverse=True) - for x in loading: + for i, x in enumerate(loading): module_offload_mem, module_mem, n, m, params = x lowvram_weight = False - potential_offload = max(offload_buffer, module_offload_mem + (comfy.model_management.NUM_STREAMS * module_mem)) + potential_offload = max(offload_buffer, module_offload_mem + sum([ x1[1] for x1 in loading[i+1:i+1+comfy.model_management.NUM_STREAMS]])) lowvram_fits = mem_counter + module_mem + potential_offload < lowvram_model_memory weight_key = "{}.weight".format(n) @@ -876,14 +876,18 @@ class ModelPatcher: patch_counter = 0 unload_list = self._load_list() unload_list.sort() + offload_buffer = self.model.model_offload_buffer_memory + if len(unload_list) > 0: + NS = comfy.model_management.NUM_STREAMS + offload_weight_factor = [ min(offload_buffer / (NS + 1), unload_list[0][1]) ] * NS for unload in unload_list: if memory_to_free + offload_buffer - self.model.model_offload_buffer_memory < memory_freed: break module_offload_mem, module_mem, n, m, params = unload - potential_offload = module_offload_mem + (comfy.model_management.NUM_STREAMS * module_mem) + potential_offload = module_offload_mem + sum(offload_weight_factor) lowvram_possible = hasattr(m, "comfy_cast_weights") if hasattr(m, "comfy_patched_weights") and m.comfy_patched_weights == True: @@ -935,6 +939,8 @@ class ModelPatcher: m.comfy_patched_weights = False memory_freed += module_mem offload_buffer = max(offload_buffer, potential_offload) + offload_weight_factor.append(module_mem) + offload_weight_factor.pop(0) logging.debug("freed {}".format(n)) for param in params: From f4bdf5f8302ef10db99644a8672e614ddb29c473 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Fri, 5 Dec 2025 03:50:04 +1000 Subject: [PATCH 0987/1073] sd: revise hy VAE VRAM (#11105) This was recently collapsed down to rolling VAE through temporal. Clamp The time dimension. --- comfy/sd.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 734bd2845..fe4dd65f8 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -483,8 +483,10 @@ class VAE: self.latent_dim = 3 self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.conv.weight"].shape[1] self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=sd['post_quant_conv.weight'].shape[1]) - self.memory_used_decode = lambda shape, dtype: (1500 * shape[2] * shape[3] * shape[4] * (4 * 8 * 8)) * model_management.dtype_size(dtype) - self.memory_used_encode = lambda shape, dtype: (900 * max(shape[2], 2) * shape[3] * shape[4]) * model_management.dtype_size(dtype) + #This is likely to significantly over-estimate with single image or low frame counts as the + #implementation is able to completely skip caching. Rework if used as an image only VAE + self.memory_used_decode = lambda shape, dtype: (2800 * min(8, ((shape[2] - 1) * 4) + 1) * shape[3] * shape[4] * (8 * 8)) * model_management.dtype_size(dtype) + self.memory_used_encode = lambda shape, dtype: (1400 * min(9, shape[2]) * shape[3] * shape[4]) * model_management.dtype_size(dtype) self.working_dtypes = [torch.bfloat16, torch.float16, torch.float32] elif "decoder.unpatcher3d.wavelets" in sd: self.upscale_ratio = (lambda a: max(0, a * 8 - 7), 8, 8) From 9bc893c5bbd2838bdd15ebd40e3a3e548ce3e4f0 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Fri, 5 Dec 2025 03:50:36 +1000 Subject: [PATCH 0988/1073] sd: bump HY1.5 VAE estimate (#11107) Im able to push vram above estimate on partial unload. Bump the estimate. This is experimentally determined with a 720P and 480P datapoint calibrating for 24GB VRAM total. --- comfy/sd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd.py b/comfy/sd.py index fe4dd65f8..03bdb33d5 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -471,7 +471,7 @@ class VAE: decoder_config={'target': "comfy.ldm.hunyuan_video.vae_refiner.Decoder", 'params': ddconfig}) self.memory_used_encode = lambda shape, dtype: (1400 * 9 * shape[-2] * shape[-1]) * model_management.dtype_size(dtype) - self.memory_used_decode = lambda shape, dtype: (2800 * 4 * shape[-2] * shape[-1] * 16 * 16) * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: (3600 * 4 * shape[-2] * shape[-1] * 16 * 16) * model_management.dtype_size(dtype) elif "decoder.conv_in.conv.weight" in sd: ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} ddconfig["conv3d"] = True From 3c8456223c5f6a41af7d99219b391c8c58acb552 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 5 Dec 2025 00:05:28 +0200 Subject: [PATCH 0989/1073] [API Nodes]: fixes and refactor (#11104) * chore(api-nodes): applied ruff's pyupgrade(python3.10) to api-nodes client's to folder * chore(api-nodes): add validate_video_frame_count function from LTX PR * chore(api-nodes): replace deprecated V1 imports * fix(api-nodes): the types returned by the "poll_op" function are now correct. --- comfy_api_nodes/util/__init__.py | 2 + comfy_api_nodes/util/_helpers.py | 14 +-- comfy_api_nodes/util/client.py | 145 ++++++++++++----------- comfy_api_nodes/util/conversions.py | 21 ++-- comfy_api_nodes/util/download_helpers.py | 20 ++-- comfy_api_nodes/util/request_logger.py | 2 - comfy_api_nodes/util/upload_helpers.py | 16 ++- comfy_api_nodes/util/validation_utils.py | 61 ++++++---- 8 files changed, 146 insertions(+), 135 deletions(-) diff --git a/comfy_api_nodes/util/__init__.py b/comfy_api_nodes/util/__init__.py index 80292fb3c..4cc22abfb 100644 --- a/comfy_api_nodes/util/__init__.py +++ b/comfy_api_nodes/util/__init__.py @@ -47,6 +47,7 @@ from .validation_utils import ( validate_string, validate_video_dimensions, validate_video_duration, + validate_video_frame_count, ) __all__ = [ @@ -94,6 +95,7 @@ __all__ = [ "validate_string", "validate_video_dimensions", "validate_video_duration", + "validate_video_frame_count", # Misc functions "get_fs_object_size", ] diff --git a/comfy_api_nodes/util/_helpers.py b/comfy_api_nodes/util/_helpers.py index 328fe5227..491e6b6a8 100644 --- a/comfy_api_nodes/util/_helpers.py +++ b/comfy_api_nodes/util/_helpers.py @@ -2,8 +2,8 @@ import asyncio import contextlib import os import time +from collections.abc import Callable from io import BytesIO -from typing import Callable, Optional, Union from comfy.cli_args import args from comfy.model_management import processing_interrupted @@ -35,12 +35,12 @@ def default_base_url() -> str: async def sleep_with_interrupt( seconds: float, - node_cls: Optional[type[IO.ComfyNode]], - label: Optional[str] = None, - start_ts: Optional[float] = None, - estimated_total: Optional[int] = None, + node_cls: type[IO.ComfyNode] | None, + label: str | None = None, + start_ts: float | None = None, + estimated_total: int | None = None, *, - display_callback: Optional[Callable[[type[IO.ComfyNode], str, int, Optional[int]], None]] = None, + display_callback: Callable[[type[IO.ComfyNode], str, int, int | None], None] | None = None, ): """ Sleep in 1s slices while: @@ -65,7 +65,7 @@ def mimetype_to_extension(mime_type: str) -> str: return mime_type.split("/")[-1].lower() -def get_fs_object_size(path_or_object: Union[str, BytesIO]) -> int: +def get_fs_object_size(path_or_object: str | BytesIO) -> int: if isinstance(path_or_object, str): return os.path.getsize(path_or_object) return len(path_or_object.getvalue()) diff --git a/comfy_api_nodes/util/client.py b/comfy_api_nodes/util/client.py index bf01d7d36..bf37cba5f 100644 --- a/comfy_api_nodes/util/client.py +++ b/comfy_api_nodes/util/client.py @@ -4,10 +4,11 @@ import json import logging import time import uuid +from collections.abc import Callable, Iterable from dataclasses import dataclass from enum import Enum from io import BytesIO -from typing import Any, Callable, Iterable, Literal, Optional, Type, TypeVar, Union +from typing import Any, Literal, TypeVar from urllib.parse import urljoin, urlparse import aiohttp @@ -37,8 +38,8 @@ class ApiEndpoint: path: str, method: Literal["GET", "POST", "PUT", "DELETE", "PATCH"] = "GET", *, - query_params: Optional[dict[str, Any]] = None, - headers: Optional[dict[str, str]] = None, + query_params: dict[str, Any] | None = None, + headers: dict[str, str] | None = None, ): self.path = path self.method = method @@ -52,18 +53,18 @@ class _RequestConfig: endpoint: ApiEndpoint timeout: float content_type: str - data: Optional[dict[str, Any]] - files: Optional[Union[dict[str, Any], list[tuple[str, Any]]]] - multipart_parser: Optional[Callable] + data: dict[str, Any] | None + files: dict[str, Any] | list[tuple[str, Any]] | None + multipart_parser: Callable | None max_retries: int retry_delay: float retry_backoff: float wait_label: str = "Waiting" monitor_progress: bool = True - estimated_total: Optional[int] = None - final_label_on_success: Optional[str] = "Completed" - progress_origin_ts: Optional[float] = None - price_extractor: Optional[Callable[[dict[str, Any]], Optional[float]]] = None + estimated_total: int | None = None + final_label_on_success: str | None = "Completed" + progress_origin_ts: float | None = None + price_extractor: Callable[[dict[str, Any]], float | None] | None = None @dataclass @@ -71,10 +72,10 @@ class _PollUIState: started: float status_label: str = "Queued" is_queued: bool = True - price: Optional[float] = None - estimated_duration: Optional[int] = None + price: float | None = None + estimated_duration: int | None = None base_processing_elapsed: float = 0.0 # sum of completed active intervals - active_since: Optional[float] = None # start time of current active interval (None if queued) + active_since: float | None = None # start time of current active interval (None if queued) _RETRY_STATUS = {408, 429, 500, 502, 503, 504} @@ -87,20 +88,20 @@ async def sync_op( cls: type[IO.ComfyNode], endpoint: ApiEndpoint, *, - response_model: Type[M], - price_extractor: Optional[Callable[[M], Optional[float]]] = None, - data: Optional[BaseModel] = None, - files: Optional[Union[dict[str, Any], list[tuple[str, Any]]]] = None, + response_model: type[M], + price_extractor: Callable[[M | Any], float | None] | None = None, + data: BaseModel | None = None, + files: dict[str, Any] | list[tuple[str, Any]] | None = None, content_type: str = "application/json", timeout: float = 3600.0, - multipart_parser: Optional[Callable] = None, + multipart_parser: Callable | None = None, max_retries: int = 3, retry_delay: float = 1.0, retry_backoff: float = 2.0, wait_label: str = "Waiting for server", - estimated_duration: Optional[int] = None, - final_label_on_success: Optional[str] = "Completed", - progress_origin_ts: Optional[float] = None, + estimated_duration: int | None = None, + final_label_on_success: str | None = "Completed", + progress_origin_ts: float | None = None, monitor_progress: bool = True, ) -> M: raw = await sync_op_raw( @@ -131,22 +132,22 @@ async def poll_op( cls: type[IO.ComfyNode], poll_endpoint: ApiEndpoint, *, - response_model: Type[M], - status_extractor: Callable[[M], Optional[Union[str, int]]], - progress_extractor: Optional[Callable[[M], Optional[int]]] = None, - price_extractor: Optional[Callable[[M], Optional[float]]] = None, - completed_statuses: Optional[list[Union[str, int]]] = None, - failed_statuses: Optional[list[Union[str, int]]] = None, - queued_statuses: Optional[list[Union[str, int]]] = None, - data: Optional[BaseModel] = None, + response_model: type[M], + status_extractor: Callable[[M | Any], str | int | None], + progress_extractor: Callable[[M | Any], int | None] | None = None, + price_extractor: Callable[[M | Any], float | None] | None = None, + completed_statuses: list[str | int] | None = None, + failed_statuses: list[str | int] | None = None, + queued_statuses: list[str | int] | None = None, + data: BaseModel | None = None, poll_interval: float = 5.0, max_poll_attempts: int = 120, timeout_per_poll: float = 120.0, max_retries_per_poll: int = 3, retry_delay_per_poll: float = 1.0, retry_backoff_per_poll: float = 2.0, - estimated_duration: Optional[int] = None, - cancel_endpoint: Optional[ApiEndpoint] = None, + estimated_duration: int | None = None, + cancel_endpoint: ApiEndpoint | None = None, cancel_timeout: float = 10.0, ) -> M: raw = await poll_op_raw( @@ -178,22 +179,22 @@ async def sync_op_raw( cls: type[IO.ComfyNode], endpoint: ApiEndpoint, *, - price_extractor: Optional[Callable[[dict[str, Any]], Optional[float]]] = None, - data: Optional[Union[dict[str, Any], BaseModel]] = None, - files: Optional[Union[dict[str, Any], list[tuple[str, Any]]]] = None, + price_extractor: Callable[[dict[str, Any]], float | None] | None = None, + data: dict[str, Any] | BaseModel | None = None, + files: dict[str, Any] | list[tuple[str, Any]] | None = None, content_type: str = "application/json", timeout: float = 3600.0, - multipart_parser: Optional[Callable] = None, + multipart_parser: Callable | None = None, max_retries: int = 3, retry_delay: float = 1.0, retry_backoff: float = 2.0, wait_label: str = "Waiting for server", - estimated_duration: Optional[int] = None, + estimated_duration: int | None = None, as_binary: bool = False, - final_label_on_success: Optional[str] = "Completed", - progress_origin_ts: Optional[float] = None, + final_label_on_success: str | None = "Completed", + progress_origin_ts: float | None = None, monitor_progress: bool = True, -) -> Union[dict[str, Any], bytes]: +) -> dict[str, Any] | bytes: """ Make a single network request. - If as_binary=False (default): returns JSON dict (or {'_raw': ''} if non-JSON). @@ -229,21 +230,21 @@ async def poll_op_raw( cls: type[IO.ComfyNode], poll_endpoint: ApiEndpoint, *, - status_extractor: Callable[[dict[str, Any]], Optional[Union[str, int]]], - progress_extractor: Optional[Callable[[dict[str, Any]], Optional[int]]] = None, - price_extractor: Optional[Callable[[dict[str, Any]], Optional[float]]] = None, - completed_statuses: Optional[list[Union[str, int]]] = None, - failed_statuses: Optional[list[Union[str, int]]] = None, - queued_statuses: Optional[list[Union[str, int]]] = None, - data: Optional[Union[dict[str, Any], BaseModel]] = None, + status_extractor: Callable[[dict[str, Any]], str | int | None], + progress_extractor: Callable[[dict[str, Any]], int | None] | None = None, + price_extractor: Callable[[dict[str, Any]], float | None] | None = None, + completed_statuses: list[str | int] | None = None, + failed_statuses: list[str | int] | None = None, + queued_statuses: list[str | int] | None = None, + data: dict[str, Any] | BaseModel | None = None, poll_interval: float = 5.0, max_poll_attempts: int = 120, timeout_per_poll: float = 120.0, max_retries_per_poll: int = 3, retry_delay_per_poll: float = 1.0, retry_backoff_per_poll: float = 2.0, - estimated_duration: Optional[int] = None, - cancel_endpoint: Optional[ApiEndpoint] = None, + estimated_duration: int | None = None, + cancel_endpoint: ApiEndpoint | None = None, cancel_timeout: float = 10.0, ) -> dict[str, Any]: """ @@ -261,7 +262,7 @@ async def poll_op_raw( consumed_attempts = 0 # counts only non-queued polls progress_bar = utils.ProgressBar(100) if progress_extractor else None - last_progress: Optional[int] = None + last_progress: int | None = None state = _PollUIState(started=started, estimated_duration=estimated_duration) stop_ticker = asyncio.Event() @@ -420,10 +421,10 @@ async def poll_op_raw( def _display_text( node_cls: type[IO.ComfyNode], - text: Optional[str], + text: str | None, *, - status: Optional[Union[str, int]] = None, - price: Optional[float] = None, + status: str | int | None = None, + price: float | None = None, ) -> None: display_lines: list[str] = [] if status: @@ -440,13 +441,13 @@ def _display_text( def _display_time_progress( node_cls: type[IO.ComfyNode], - status: Optional[Union[str, int]], + status: str | int | None, elapsed_seconds: int, - estimated_total: Optional[int] = None, + estimated_total: int | None = None, *, - price: Optional[float] = None, - is_queued: Optional[bool] = None, - processing_elapsed_seconds: Optional[int] = None, + price: float | None = None, + is_queued: bool | None = None, + processing_elapsed_seconds: int | None = None, ) -> None: if estimated_total is not None and estimated_total > 0 and is_queued is False: pe = processing_elapsed_seconds if processing_elapsed_seconds is not None else elapsed_seconds @@ -488,7 +489,7 @@ def _unpack_tuple(t: tuple) -> tuple[str, Any, str]: raise ValueError("files tuple must be (filename, file[, content_type])") -def _merge_params(endpoint_params: dict[str, Any], method: str, data: Optional[dict[str, Any]]) -> dict[str, Any]: +def _merge_params(endpoint_params: dict[str, Any], method: str, data: dict[str, Any] | None) -> dict[str, Any]: params = dict(endpoint_params or {}) if method.upper() == "GET" and data: for k, v in data.items(): @@ -534,9 +535,9 @@ def _generate_operation_id(method: str, path: str, attempt: int) -> str: def _snapshot_request_body_for_logging( content_type: str, method: str, - data: Optional[dict[str, Any]], - files: Optional[Union[dict[str, Any], list[tuple[str, Any]]]], -) -> Optional[Union[dict[str, Any], str]]: + data: dict[str, Any] | None, + files: dict[str, Any] | list[tuple[str, Any]] | None, +) -> dict[str, Any] | str | None: if method.upper() == "GET": return None if content_type == "multipart/form-data": @@ -586,13 +587,13 @@ async def _request_base(cfg: _RequestConfig, expect_binary: bool): attempt = 0 delay = cfg.retry_delay operation_succeeded: bool = False - final_elapsed_seconds: Optional[int] = None - extracted_price: Optional[float] = None + final_elapsed_seconds: int | None = None + extracted_price: float | None = None while True: attempt += 1 stop_event = asyncio.Event() - monitor_task: Optional[asyncio.Task] = None - sess: Optional[aiohttp.ClientSession] = None + monitor_task: asyncio.Task | None = None + sess: aiohttp.ClientSession | None = None operation_id = _generate_operation_id(method, cfg.endpoint.path, attempt) logging.debug("[DEBUG] HTTP %s %s (attempt %d)", method, url, attempt) @@ -887,7 +888,7 @@ async def _request_base(cfg: _RequestConfig, expect_binary: bool): ) -def _validate_or_raise(response_model: Type[M], payload: Any) -> M: +def _validate_or_raise(response_model: type[M], payload: Any) -> M: try: return response_model.model_validate(payload) except Exception as e: @@ -902,9 +903,9 @@ def _validate_or_raise(response_model: Type[M], payload: Any) -> M: def _wrap_model_extractor( - response_model: Type[M], - extractor: Optional[Callable[[M], Any]], -) -> Optional[Callable[[dict[str, Any]], Any]]: + response_model: type[M], + extractor: Callable[[M], Any] | None, +) -> Callable[[dict[str, Any]], Any] | None: """Wrap a typed extractor so it can be used by the dict-based poller. Validates the dict into `response_model` before invoking `extractor`. Uses a small per-wrapper cache keyed by `id(dict)` to avoid re-validating @@ -929,10 +930,10 @@ def _wrap_model_extractor( return _wrapped -def _normalize_statuses(values: Optional[Iterable[Union[str, int]]]) -> set[Union[str, int]]: +def _normalize_statuses(values: Iterable[str | int] | None) -> set[str | int]: if not values: return set() - out: set[Union[str, int]] = set() + out: set[str | int] = set() for v in values: nv = _normalize_status_value(v) if nv is not None: @@ -940,7 +941,7 @@ def _normalize_statuses(values: Optional[Iterable[Union[str, int]]]) -> set[Unio return out -def _normalize_status_value(val: Union[str, int, None]) -> Union[str, int, None]: +def _normalize_status_value(val: str | int | None) -> str | int | None: if isinstance(val, str): return val.strip().lower() return val diff --git a/comfy_api_nodes/util/conversions.py b/comfy_api_nodes/util/conversions.py index 971dc57de..c57457580 100644 --- a/comfy_api_nodes/util/conversions.py +++ b/comfy_api_nodes/util/conversions.py @@ -4,7 +4,6 @@ import math import mimetypes import uuid from io import BytesIO -from typing import Optional import av import numpy as np @@ -12,8 +11,7 @@ import torch from PIL import Image from comfy.utils import common_upscale -from comfy_api.latest import Input, InputImpl -from comfy_api.util import VideoCodec, VideoContainer +from comfy_api.latest import Input, InputImpl, Types from ._helpers import mimetype_to_extension @@ -57,7 +55,7 @@ def image_tensor_pair_to_batch(image1: torch.Tensor, image2: torch.Tensor) -> to def tensor_to_bytesio( image: torch.Tensor, - name: Optional[str] = None, + name: str | None = None, total_pixels: int = 2048 * 2048, mime_type: str = "image/png", ) -> BytesIO: @@ -177,8 +175,8 @@ def audio_to_base64_string(audio: Input.Audio, container_format: str = "mp4", co def video_to_base64_string( video: Input.Video, - container_format: VideoContainer = None, - codec: VideoCodec = None + container_format: Types.VideoContainer | None = None, + codec: Types.VideoCodec | None = None, ) -> str: """ Converts a video input to a base64 string. @@ -189,12 +187,11 @@ def video_to_base64_string( codec: Optional codec to use (defaults to video.codec if available) """ video_bytes_io = BytesIO() - - # Use provided format/codec if specified, otherwise use video's own if available - format_to_use = container_format if container_format is not None else getattr(video, 'container', VideoContainer.MP4) - codec_to_use = codec if codec is not None else getattr(video, 'codec', VideoCodec.H264) - - video.save_to(video_bytes_io, format=format_to_use, codec=codec_to_use) + video.save_to( + video_bytes_io, + format=container_format or getattr(video, "container", Types.VideoContainer.MP4), + codec=codec or getattr(video, "codec", Types.VideoCodec.H264), + ) video_bytes_io.seek(0) return base64.b64encode(video_bytes_io.getvalue()).decode("utf-8") diff --git a/comfy_api_nodes/util/download_helpers.py b/comfy_api_nodes/util/download_helpers.py index 14207dc68..3e0d0352d 100644 --- a/comfy_api_nodes/util/download_helpers.py +++ b/comfy_api_nodes/util/download_helpers.py @@ -3,15 +3,15 @@ import contextlib import uuid from io import BytesIO from pathlib import Path -from typing import IO, Optional, Union +from typing import IO from urllib.parse import urljoin, urlparse import aiohttp import torch from aiohttp.client_exceptions import ClientError, ContentTypeError -from comfy_api.input_impl import VideoFromFile from comfy_api.latest import IO as COMFY_IO +from comfy_api.latest import InputImpl from . import request_logger from ._helpers import ( @@ -29,9 +29,9 @@ _RETRY_STATUS = {408, 429, 500, 502, 503, 504} async def download_url_to_bytesio( url: str, - dest: Optional[Union[BytesIO, IO[bytes], str, Path]], + dest: BytesIO | IO[bytes] | str | Path | None, *, - timeout: Optional[float] = None, + timeout: float | None = None, max_retries: int = 5, retry_delay: float = 1.0, retry_backoff: float = 2.0, @@ -71,10 +71,10 @@ async def download_url_to_bytesio( is_path_sink = isinstance(dest, (str, Path)) fhandle = None - session: Optional[aiohttp.ClientSession] = None - stop_evt: Optional[asyncio.Event] = None - monitor_task: Optional[asyncio.Task] = None - req_task: Optional[asyncio.Task] = None + session: aiohttp.ClientSession | None = None + stop_evt: asyncio.Event | None = None + monitor_task: asyncio.Task | None = None + req_task: asyncio.Task | None = None try: with contextlib.suppress(Exception): @@ -234,11 +234,11 @@ async def download_url_to_video_output( timeout: float = None, max_retries: int = 5, cls: type[COMFY_IO.ComfyNode] = None, -) -> VideoFromFile: +) -> InputImpl.VideoFromFile: """Downloads a video from a URL and returns a `VIDEO` output.""" result = BytesIO() await download_url_to_bytesio(video_url, result, timeout=timeout, max_retries=max_retries, cls=cls) - return VideoFromFile(result) + return InputImpl.VideoFromFile(result) async def download_url_as_bytesio( diff --git a/comfy_api_nodes/util/request_logger.py b/comfy_api_nodes/util/request_logger.py index ac52e2eab..e0cb4428d 100644 --- a/comfy_api_nodes/util/request_logger.py +++ b/comfy_api_nodes/util/request_logger.py @@ -1,5 +1,3 @@ -from __future__ import annotations - import datetime import hashlib import json diff --git a/comfy_api_nodes/util/upload_helpers.py b/comfy_api_nodes/util/upload_helpers.py index 0532bea9a..b8d33f4d1 100644 --- a/comfy_api_nodes/util/upload_helpers.py +++ b/comfy_api_nodes/util/upload_helpers.py @@ -4,15 +4,13 @@ import logging import time import uuid from io import BytesIO -from typing import Optional from urllib.parse import urlparse import aiohttp import torch from pydantic import BaseModel, Field -from comfy_api.latest import IO, Input -from comfy_api.util import VideoCodec, VideoContainer +from comfy_api.latest import IO, Input, Types from . import request_logger from ._helpers import is_processing_interrupted, sleep_with_interrupt @@ -32,7 +30,7 @@ from .conversions import ( class UploadRequest(BaseModel): file_name: str = Field(..., description="Filename to upload") - content_type: Optional[str] = Field( + content_type: str | None = Field( None, description="Mime type of the file. For example: image/png, image/jpeg, video/mp4, etc.", ) @@ -56,7 +54,7 @@ async def upload_images_to_comfyapi( Uploads images to ComfyUI API and returns download URLs. To upload multiple images, stack them in the batch dimension first. """ - # if batch, try to upload each file if max_images is greater than 0 + # if batched, try to upload each file if max_images is greater than 0 download_urls: list[str] = [] is_batch = len(image.shape) > 3 batch_len = image.shape[0] if is_batch else 1 @@ -100,9 +98,9 @@ async def upload_video_to_comfyapi( cls: type[IO.ComfyNode], video: Input.Video, *, - container: VideoContainer = VideoContainer.MP4, - codec: VideoCodec = VideoCodec.H264, - max_duration: Optional[int] = None, + container: Types.VideoContainer = Types.VideoContainer.MP4, + codec: Types.VideoCodec = Types.VideoCodec.H264, + max_duration: int | None = None, wait_label: str | None = "Uploading", ) -> str: """ @@ -220,7 +218,7 @@ async def upload_file( return monitor_task = asyncio.create_task(_monitor()) - sess: Optional[aiohttp.ClientSession] = None + sess: aiohttp.ClientSession | None = None try: try: request_logger.log_request_response( diff --git a/comfy_api_nodes/util/validation_utils.py b/comfy_api_nodes/util/validation_utils.py index ec7006aed..f01edea96 100644 --- a/comfy_api_nodes/util/validation_utils.py +++ b/comfy_api_nodes/util/validation_utils.py @@ -1,9 +1,7 @@ import logging -from typing import Optional import torch -from comfy_api.input.video_types import VideoInput from comfy_api.latest import Input @@ -18,10 +16,10 @@ def get_image_dimensions(image: torch.Tensor) -> tuple[int, int]: def validate_image_dimensions( image: torch.Tensor, - min_width: Optional[int] = None, - max_width: Optional[int] = None, - min_height: Optional[int] = None, - max_height: Optional[int] = None, + min_width: int | None = None, + max_width: int | None = None, + min_height: int | None = None, + max_height: int | None = None, ): height, width = get_image_dimensions(image) @@ -37,8 +35,8 @@ def validate_image_dimensions( def validate_image_aspect_ratio( image: torch.Tensor, - min_ratio: Optional[tuple[float, float]] = None, # e.g. (1, 4) - max_ratio: Optional[tuple[float, float]] = None, # e.g. (4, 1) + min_ratio: tuple[float, float] | None = None, # e.g. (1, 4) + max_ratio: tuple[float, float] | None = None, # e.g. (4, 1) *, strict: bool = True, # True -> (min, max); False -> [min, max] ) -> float: @@ -54,8 +52,8 @@ def validate_image_aspect_ratio( def validate_images_aspect_ratio_closeness( first_image: torch.Tensor, second_image: torch.Tensor, - min_rel: float, # e.g. 0.8 - max_rel: float, # e.g. 1.25 + min_rel: float, # e.g. 0.8 + max_rel: float, # e.g. 1.25 *, strict: bool = False, # True -> (min, max); False -> [min, max] ) -> float: @@ -84,8 +82,8 @@ def validate_images_aspect_ratio_closeness( def validate_aspect_ratio_string( aspect_ratio: str, - min_ratio: Optional[tuple[float, float]] = None, # e.g. (1, 4) - max_ratio: Optional[tuple[float, float]] = None, # e.g. (4, 1) + min_ratio: tuple[float, float] | None = None, # e.g. (1, 4) + max_ratio: tuple[float, float] | None = None, # e.g. (4, 1) *, strict: bool = False, # True -> (min, max); False -> [min, max] ) -> float: @@ -97,10 +95,10 @@ def validate_aspect_ratio_string( def validate_video_dimensions( video: Input.Video, - min_width: Optional[int] = None, - max_width: Optional[int] = None, - min_height: Optional[int] = None, - max_height: Optional[int] = None, + min_width: int | None = None, + max_width: int | None = None, + min_height: int | None = None, + max_height: int | None = None, ): try: width, height = video.get_dimensions() @@ -120,8 +118,8 @@ def validate_video_dimensions( def validate_video_duration( video: Input.Video, - min_duration: Optional[float] = None, - max_duration: Optional[float] = None, + min_duration: float | None = None, + max_duration: float | None = None, ): try: duration = video.get_duration() @@ -136,6 +134,23 @@ def validate_video_duration( raise ValueError(f"Video duration must be at most {max_duration}s, got {duration}s") +def validate_video_frame_count( + video: Input.Video, + min_frame_count: int | None = None, + max_frame_count: int | None = None, +): + try: + frame_count = video.get_frame_count() + except Exception as e: + logging.error("Error getting frame count of video: %s", e) + return + + if min_frame_count is not None and min_frame_count > frame_count: + raise ValueError(f"Video frame count must be at least {min_frame_count}, got {frame_count}") + if max_frame_count is not None and frame_count > max_frame_count: + raise ValueError(f"Video frame count must be at most {max_frame_count}, got {frame_count}") + + def get_number_of_images(images): if isinstance(images, torch.Tensor): return images.shape[0] if images.ndim >= 4 else 1 @@ -144,8 +159,8 @@ def get_number_of_images(images): def validate_audio_duration( audio: Input.Audio, - min_duration: Optional[float] = None, - max_duration: Optional[float] = None, + min_duration: float | None = None, + max_duration: float | None = None, ) -> None: sr = int(audio["sample_rate"]) dur = int(audio["waveform"].shape[-1]) / sr @@ -177,7 +192,7 @@ def validate_string( ) -def validate_container_format_is_mp4(video: VideoInput) -> None: +def validate_container_format_is_mp4(video: Input.Video) -> None: """Validates video container format is MP4.""" container_format = video.get_container_format() if container_format not in ["mp4", "mov,mp4,m4a,3gp,3g2,mj2"]: @@ -194,8 +209,8 @@ def _ratio_from_tuple(r: tuple[float, float]) -> float: def _assert_ratio_bounds( ar: float, *, - min_ratio: Optional[tuple[float, float]] = None, - max_ratio: Optional[tuple[float, float]] = None, + min_ratio: tuple[float, float] | None = None, + max_ratio: tuple[float, float] | None = None, strict: bool = True, ) -> None: """Validate a numeric aspect ratio against optional min/max ratio bounds.""" From 35fa091340c60612dfb71cb6822dc23b99a5dac2 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 4 Dec 2025 19:52:09 -0800 Subject: [PATCH 0990/1073] Forgot to put this in README. (#11112) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 91fb510e1..ed857df9f 100644 --- a/README.md +++ b/README.md @@ -81,6 +81,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Hunyuan Video](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/) - [Wan 2.1](https://comfyanonymous.github.io/ComfyUI_examples/wan/) - [Wan 2.2](https://comfyanonymous.github.io/ComfyUI_examples/wan22/) + - [Hunyuan Video 1.5](https://docs.comfy.org/tutorials/video/hunyuan/hunyuan-video-1-5) - Audio Models - [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/) - [ACE Step](https://comfyanonymous.github.io/ComfyUI_examples/audio/) From 0ec05b1481d12b299bc945dbd407b773cfb66483 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Fri, 5 Dec 2025 11:05:38 -0800 Subject: [PATCH 0991/1073] Remove line made unnecessary (and wrong) after transformer_options was added to NextDiT's _forward definition (#11118) --- comfy/ldm/lumina/model.py | 1 - 1 file changed, 1 deletion(-) diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index f1c1a0ec3..6c24fed9b 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -586,7 +586,6 @@ class NextDiT(nn.Module): cap_feats = self.cap_embedder(cap_feats) # (N, L, D) # todo check if able to batchify w.o. redundant compute patches = transformer_options.get("patches", {}) - transformer_options = kwargs.get("transformer_options", {}) x_is_tensor = isinstance(x, torch.Tensor) img, mask, img_size, cap_size, freqs_cis = self.patchify_and_embed(x, cap_feats, cap_mask, t, num_tokens, transformer_options=transformer_options) freqs_cis = freqs_cis.to(img.device) From 43071e3de3780f984a46549e90935a0bf405e9df Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 5 Dec 2025 11:35:42 -0800 Subject: [PATCH 0992/1073] Make old scaled fp8 format use the new mixed quant ops system. (#11000) --- comfy/model_base.py | 14 +- comfy/model_detection.py | 33 +--- comfy/model_patcher.py | 20 +-- comfy/ops.py | 145 +++++++----------- comfy/quant_ops.py | 30 ++-- comfy/sd.py | 68 ++++++-- comfy/sd1_clip.py | 22 +-- comfy/supported_models_base.py | 3 +- comfy/text_encoders/cosmos.py | 12 +- comfy/text_encoders/flux.py | 12 +- comfy/text_encoders/genmo.py | 6 +- comfy/text_encoders/hidream.py | 10 +- comfy/text_encoders/hunyuan_image.py | 12 +- comfy/text_encoders/hunyuan_video.py | 23 ++- comfy/text_encoders/lumina2.py | 6 +- comfy/text_encoders/omnigen2.py | 6 +- comfy/text_encoders/ovis.py | 5 +- comfy/text_encoders/pixart_t5.py | 6 +- comfy/text_encoders/qwen_image.py | 6 +- comfy/text_encoders/sd3_clip.py | 19 +-- comfy/text_encoders/wan.py | 6 +- comfy/text_encoders/z_image.py | 5 +- comfy/utils.py | 66 ++++++++ .../comfy_quant/test_mixed_precision.py | 18 ++- 24 files changed, 278 insertions(+), 275 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 9b76c285e..3cedd4f31 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -134,7 +134,7 @@ class BaseModel(torch.nn.Module): if not unet_config.get("disable_unet_model_creation", False): if model_config.custom_operations is None: fp8 = model_config.optimizations.get("fp8", False) - operations = comfy.ops.pick_operations(unet_config.get("dtype", None), self.manual_cast_dtype, fp8_optimizations=fp8, scaled_fp8=model_config.scaled_fp8, model_config=model_config) + operations = comfy.ops.pick_operations(unet_config.get("dtype", None), self.manual_cast_dtype, fp8_optimizations=fp8, model_config=model_config) else: operations = model_config.custom_operations self.diffusion_model = unet_model(**unet_config, device=device, operations=operations) @@ -329,18 +329,6 @@ class BaseModel(torch.nn.Module): extra_sds.append(self.model_config.process_clip_vision_state_dict_for_saving(clip_vision_state_dict)) unet_state_dict = self.diffusion_model.state_dict() - - if self.model_config.scaled_fp8 is not None: - unet_state_dict["scaled_fp8"] = torch.tensor([], dtype=self.model_config.scaled_fp8) - - # Save mixed precision metadata - if hasattr(self.model_config, 'layer_quant_config') and self.model_config.layer_quant_config: - metadata = { - "format_version": "1.0", - "layers": self.model_config.layer_quant_config - } - unet_state_dict["_quantization_metadata"] = metadata - unet_state_dict = self.model_config.process_unet_state_dict_for_saving(unet_state_dict) if self.model_type == ModelType.V_PREDICTION: diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 7d0517e61..fd1907627 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -6,20 +6,6 @@ import math import logging import torch - -def detect_layer_quantization(metadata): - quant_key = "_quantization_metadata" - if metadata is not None and quant_key in metadata: - quant_metadata = metadata.pop(quant_key) - quant_metadata = json.loads(quant_metadata) - if isinstance(quant_metadata, dict) and "layers" in quant_metadata: - logging.info(f"Found quantization metadata (version {quant_metadata.get('format_version', 'unknown')})") - return quant_metadata["layers"] - else: - raise ValueError("Invalid quantization metadata format") - return None - - def count_blocks(state_dict_keys, prefix_string): count = 0 while True: @@ -767,22 +753,11 @@ def model_config_from_unet(state_dict, unet_key_prefix, use_base_if_no_match=Fal if model_config is None and use_base_if_no_match: model_config = comfy.supported_models_base.BASE(unet_config) - scaled_fp8_key = "{}scaled_fp8".format(unet_key_prefix) - if scaled_fp8_key in state_dict: - scaled_fp8_weight = state_dict.pop(scaled_fp8_key) - model_config.scaled_fp8 = scaled_fp8_weight.dtype - if model_config.scaled_fp8 == torch.float32: - model_config.scaled_fp8 = torch.float8_e4m3fn - if scaled_fp8_weight.nelement() == 2: - model_config.optimizations["fp8"] = False - else: - model_config.optimizations["fp8"] = True - # Detect per-layer quantization (mixed precision) - layer_quant_config = detect_layer_quantization(metadata) - if layer_quant_config: - model_config.layer_quant_config = layer_quant_config - logging.info(f"Detected mixed precision quantization: {len(layer_quant_config)} layers quantized") + quant_config = comfy.utils.detect_layer_quantization(state_dict, unet_key_prefix) + if quant_config: + model_config.quant_config = quant_config + logging.info("Detected mixed precision quantization") return model_config diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 3dcac3eef..215784874 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -126,27 +126,11 @@ class LowVramPatch: def __init__(self, key, patches, convert_func=None, set_func=None): self.key = key self.patches = patches - self.convert_func = convert_func + self.convert_func = convert_func # TODO: remove self.set_func = set_func def __call__(self, weight): - intermediate_dtype = weight.dtype - if self.convert_func is not None: - weight = self.convert_func(weight, inplace=False) - - if intermediate_dtype not in [torch.float32, torch.float16, torch.bfloat16]: #intermediate_dtype has to be one that is supported in math ops - intermediate_dtype = torch.float32 - out = comfy.lora.calculate_weight(self.patches[self.key], weight.to(intermediate_dtype), self.key, intermediate_dtype=intermediate_dtype) - if self.set_func is None: - return comfy.float.stochastic_rounding(out, weight.dtype, seed=string_to_seed(self.key)) - else: - return self.set_func(out, seed=string_to_seed(self.key), return_weight=True) - - out = comfy.lora.calculate_weight(self.patches[self.key], weight, self.key, intermediate_dtype=intermediate_dtype) - if self.set_func is not None: - return self.set_func(out, seed=string_to_seed(self.key), return_weight=True).to(dtype=intermediate_dtype) - else: - return out + return comfy.lora.calculate_weight(self.patches[self.key], weight, self.key, intermediate_dtype=weight.dtype) #The above patch logic may cast up the weight to fp32, and do math. Go with fp32 x 3 LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR = 3 diff --git a/comfy/ops.py b/comfy/ops.py index eae434e68..dc06709a1 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -23,6 +23,7 @@ from comfy.cli_args import args, PerformanceFeature import comfy.float import comfy.rmsnorm import contextlib +import json def run_every_op(): if torch.compiler.is_compiling(): @@ -422,22 +423,12 @@ def fp8_linear(self, input): if input.ndim == 3 or input.ndim == 2: w, bias, offload_stream = cast_bias_weight(self, input, dtype=dtype, bias_dtype=input_dtype, offloadable=True) + scale_weight = torch.ones((), device=input.device, dtype=torch.float32) - scale_weight = self.scale_weight - scale_input = self.scale_input - if scale_weight is None: - scale_weight = torch.ones((), device=input.device, dtype=torch.float32) - else: - scale_weight = scale_weight.to(input.device) - - if scale_input is None: - scale_input = torch.ones((), device=input.device, dtype=torch.float32) - input = torch.clamp(input, min=-448, max=448, out=input) - layout_params_weight = {'scale': scale_input, 'orig_dtype': input_dtype} - quantized_input = QuantizedTensor(input.to(dtype).contiguous(), "TensorCoreFP8Layout", layout_params_weight) - else: - scale_input = scale_input.to(input.device) - quantized_input = QuantizedTensor.from_float(input, "TensorCoreFP8Layout", scale=scale_input, dtype=dtype) + scale_input = torch.ones((), device=input.device, dtype=torch.float32) + input = torch.clamp(input, min=-448, max=448, out=input) + layout_params_weight = {'scale': scale_input, 'orig_dtype': input_dtype} + quantized_input = QuantizedTensor(input.to(dtype).contiguous(), "TensorCoreFP8Layout", layout_params_weight) # Wrap weight in QuantizedTensor - this enables unified dispatch # Call F.linear - __torch_dispatch__ routes to fp8_linear handler in quant_ops.py! @@ -458,7 +449,7 @@ class fp8_ops(manual_cast): return None def forward_comfy_cast_weights(self, input): - if not self.training: + if len(self.weight_function) == 0 and len(self.bias_function) == 0: try: out = fp8_linear(self, input) if out is not None: @@ -471,59 +462,6 @@ class fp8_ops(manual_cast): uncast_bias_weight(self, weight, bias, offload_stream) return x -def scaled_fp8_ops(fp8_matrix_mult=False, scale_input=False, override_dtype=None): - logging.info("Using scaled fp8: fp8 matrix mult: {}, scale input: {}".format(fp8_matrix_mult, scale_input)) - class scaled_fp8_op(manual_cast): - class Linear(manual_cast.Linear): - def __init__(self, *args, **kwargs): - if override_dtype is not None: - kwargs['dtype'] = override_dtype - super().__init__(*args, **kwargs) - - def reset_parameters(self): - if not hasattr(self, 'scale_weight'): - self.scale_weight = torch.nn.parameter.Parameter(data=torch.ones((), device=self.weight.device, dtype=torch.float32), requires_grad=False) - - if not scale_input: - self.scale_input = None - - if not hasattr(self, 'scale_input'): - self.scale_input = torch.nn.parameter.Parameter(data=torch.ones((), device=self.weight.device, dtype=torch.float32), requires_grad=False) - return None - - def forward_comfy_cast_weights(self, input): - if fp8_matrix_mult: - out = fp8_linear(self, input) - if out is not None: - return out - - weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) - - if weight.numel() < input.numel(): #TODO: optimize - x = torch.nn.functional.linear(input, weight * self.scale_weight.to(device=weight.device, dtype=weight.dtype), bias) - else: - x = torch.nn.functional.linear(input * self.scale_weight.to(device=weight.device, dtype=weight.dtype), weight, bias) - uncast_bias_weight(self, weight, bias, offload_stream) - return x - - def convert_weight(self, weight, inplace=False, **kwargs): - if inplace: - weight *= self.scale_weight.to(device=weight.device, dtype=weight.dtype) - return weight - else: - return weight.to(dtype=torch.float32) * self.scale_weight.to(device=weight.device, dtype=torch.float32) - - def set_weight(self, weight, inplace_update=False, seed=None, return_weight=False, **kwargs): - weight = comfy.float.stochastic_rounding(weight / self.scale_weight.to(device=weight.device, dtype=weight.dtype), self.weight.dtype, seed=seed) - if return_weight: - return weight - if inplace_update: - self.weight.data.copy_(weight) - else: - self.weight = torch.nn.Parameter(weight, requires_grad=False) - - return scaled_fp8_op - CUBLAS_IS_AVAILABLE = False try: from cublas_ops import CublasLinear @@ -550,9 +488,9 @@ if CUBLAS_IS_AVAILABLE: from .quant_ops import QuantizedTensor, QUANT_ALGOS -def mixed_precision_ops(layer_quant_config={}, compute_dtype=torch.bfloat16, full_precision_mm=False): +def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_precision_mm=False): class MixedPrecisionOps(manual_cast): - _layer_quant_config = layer_quant_config + _quant_config = quant_config _compute_dtype = compute_dtype _full_precision_mm = full_precision_mm @@ -595,27 +533,36 @@ def mixed_precision_ops(layer_quant_config={}, compute_dtype=torch.bfloat16, ful manually_loaded_keys = [weight_key] - if layer_name not in MixedPrecisionOps._layer_quant_config: + layer_conf = state_dict.pop(f"{prefix}comfy_quant", None) + if layer_conf is not None: + layer_conf = json.loads(layer_conf.numpy().tobytes()) + + if layer_conf is None: self.weight = torch.nn.Parameter(weight.to(device=device, dtype=MixedPrecisionOps._compute_dtype), requires_grad=False) else: - quant_format = MixedPrecisionOps._layer_quant_config[layer_name].get("format", None) - if quant_format is None: + self.quant_format = layer_conf.get("format", None) + if not self._full_precision_mm: + self._full_precision_mm = layer_conf.get("full_precision_matrix_mult", False) + + if self.quant_format is None: raise ValueError(f"Unknown quantization format for layer {layer_name}") - qconfig = QUANT_ALGOS[quant_format] + qconfig = QUANT_ALGOS[self.quant_format] self.layout_type = qconfig["comfy_tensor_layout"] weight_scale_key = f"{prefix}weight_scale" + scale = state_dict.pop(weight_scale_key, None) layout_params = { - 'scale': state_dict.pop(weight_scale_key, None), + 'scale': scale, 'orig_dtype': MixedPrecisionOps._compute_dtype, 'block_size': qconfig.get("group_size", None), } - if layout_params['scale'] is not None: + + if scale is not None: manually_loaded_keys.append(weight_scale_key) self.weight = torch.nn.Parameter( - QuantizedTensor(weight.to(device=device), self.layout_type, layout_params), + QuantizedTensor(weight.to(device=device, dtype=qconfig.get("storage_t", None)), self.layout_type, layout_params), requires_grad=False ) @@ -624,7 +571,7 @@ def mixed_precision_ops(layer_quant_config={}, compute_dtype=torch.bfloat16, ful _v = state_dict.pop(param_key, None) if _v is None: continue - setattr(self, param_name, torch.nn.Parameter(_v.to(device=device), requires_grad=False)) + self.register_parameter(param_name, torch.nn.Parameter(_v.to(device=device), requires_grad=False)) manually_loaded_keys.append(param_key) super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) @@ -633,6 +580,16 @@ def mixed_precision_ops(layer_quant_config={}, compute_dtype=torch.bfloat16, ful if key in missing_keys: missing_keys.remove(key) + def state_dict(self, *args, destination=None, prefix="", **kwargs): + sd = super().state_dict(*args, destination=destination, prefix=prefix, **kwargs) + if isinstance(self.weight, QuantizedTensor): + sd["{}weight_scale".format(prefix)] = self.weight._layout_params['scale'] + quant_conf = {"format": self.quant_format} + if self._full_precision_mm: + quant_conf["full_precision_matrix_mult"] = True + sd["{}comfy_quant".format(prefix)] = torch.frombuffer(json.dumps(quant_conf).encode('utf-8'), dtype=torch.uint8) + return sd + def _forward(self, input, weight, bias): return torch.nn.functional.linear(input, weight, bias) @@ -648,9 +605,8 @@ def mixed_precision_ops(layer_quant_config={}, compute_dtype=torch.bfloat16, ful if self._full_precision_mm or self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: return self.forward_comfy_cast_weights(input, *args, **kwargs) if (getattr(self, 'layout_type', None) is not None and - getattr(self, 'input_scale', None) is not None and not isinstance(input, QuantizedTensor)): - input = QuantizedTensor.from_float(input, self.layout_type, scale=self.input_scale, dtype=self.weight.dtype) + input = QuantizedTensor.from_float(input, self.layout_type, scale=getattr(self, 'input_scale', None), dtype=self.weight.dtype) return self._forward(input, self.weight, self.bias) def convert_weight(self, weight, inplace=False, **kwargs): @@ -661,7 +617,7 @@ def mixed_precision_ops(layer_quant_config={}, compute_dtype=torch.bfloat16, ful def set_weight(self, weight, inplace_update=False, seed=None, return_weight=False, **kwargs): if getattr(self, 'layout_type', None) is not None: - weight = QuantizedTensor.from_float(weight, self.layout_type, scale=None, dtype=self.weight.dtype, stochastic_rounding=seed, inplace_ops=True) + weight = QuantizedTensor.from_float(weight, self.layout_type, scale="recalculate", dtype=self.weight.dtype, stochastic_rounding=seed, inplace_ops=True) else: weight = weight.to(self.weight.dtype) if return_weight: @@ -670,17 +626,28 @@ def mixed_precision_ops(layer_quant_config={}, compute_dtype=torch.bfloat16, ful assert inplace_update is False # TODO: eventually remove the inplace_update stuff self.weight = torch.nn.Parameter(weight, requires_grad=False) + def _apply(self, fn, recurse=True): # This is to get torch.compile + moving weights to another device working + if recurse: + for module in self.children(): + module._apply(fn) + + for key, param in self._parameters.items(): + if param is None: + continue + self.register_parameter(key, torch.nn.Parameter(fn(param), requires_grad=False)) + for key, buf in self._buffers.items(): + if buf is not None: + self._buffers[key] = fn(buf) + return self + return MixedPrecisionOps -def pick_operations(weight_dtype, compute_dtype, load_device=None, disable_fast_fp8=False, fp8_optimizations=False, scaled_fp8=None, model_config=None): +def pick_operations(weight_dtype, compute_dtype, load_device=None, disable_fast_fp8=False, fp8_optimizations=False, model_config=None): fp8_compute = comfy.model_management.supports_fp8_compute(load_device) # TODO: if we support more ops this needs to be more granular - if model_config and hasattr(model_config, 'layer_quant_config') and model_config.layer_quant_config: - logging.info(f"Using mixed precision operations: {len(model_config.layer_quant_config)} quantized layers") - return mixed_precision_ops(model_config.layer_quant_config, compute_dtype, full_precision_mm=not fp8_compute) - - if scaled_fp8 is not None: - return scaled_fp8_ops(fp8_matrix_mult=fp8_compute and fp8_optimizations, scale_input=fp8_optimizations, override_dtype=scaled_fp8) + if model_config and hasattr(model_config, 'quant_config') and model_config.quant_config: + logging.info("Using mixed precision operations") + return mixed_precision_ops(model_config.quant_config, compute_dtype, full_precision_mm=not fp8_compute) if ( fp8_compute and diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index bb1fb860c..571d3f760 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -238,6 +238,9 @@ class QuantizedTensor(torch.Tensor): def is_contiguous(self, *arg, **kwargs): return self._qdata.is_contiguous(*arg, **kwargs) + def storage(self): + return self._qdata.storage() + # ============================================================================== # Generic Utilities (Layout-Agnostic Operations) # ============================================================================== @@ -249,12 +252,6 @@ def _create_transformed_qtensor(qt, transform_fn): def _handle_device_transfer(qt, target_device, target_dtype=None, target_layout=None, op_name="to"): - if target_dtype is not None and target_dtype != qt.dtype: - logging.warning( - f"QuantizedTensor: dtype conversion requested to {target_dtype}, " - f"but not supported for quantized tensors. Ignoring dtype." - ) - if target_layout is not None and target_layout != torch.strided: logging.warning( f"QuantizedTensor: layout change requested to {target_layout}, " @@ -274,6 +271,8 @@ def _handle_device_transfer(qt, target_device, target_dtype=None, target_layout= logging.debug(f"QuantizedTensor.{op_name}: Moving from {current_device} to {target_device}") new_q_data = qt._qdata.to(device=target_device) new_params = _move_layout_params_to_device(qt._layout_params, target_device) + if target_dtype is not None: + new_params["orig_dtype"] = target_dtype new_qt = QuantizedTensor(new_q_data, qt._layout_type, new_params) logging.debug(f"QuantizedTensor.{op_name}: Created new tensor on {target_device}") return new_qt @@ -339,7 +338,9 @@ def generic_copy_(func, args, kwargs): # Copy from another quantized tensor qt_dest._qdata.copy_(src._qdata, non_blocking=non_blocking) qt_dest._layout_type = src._layout_type + orig_dtype = qt_dest._layout_params["orig_dtype"] _copy_layout_params_inplace(src._layout_params, qt_dest._layout_params, non_blocking=non_blocking) + qt_dest._layout_params["orig_dtype"] = orig_dtype else: # Copy from regular tensor - just copy raw data qt_dest._qdata.copy_(src) @@ -397,17 +398,20 @@ class TensorCoreFP8Layout(QuantizedLayout): def quantize(cls, tensor, scale=None, dtype=torch.float8_e4m3fn, stochastic_rounding=0, inplace_ops=False): orig_dtype = tensor.dtype - if scale is None: + if isinstance(scale, str) and scale == "recalculate": scale = torch.amax(tensor.abs()) / torch.finfo(dtype).max - if not isinstance(scale, torch.Tensor): - scale = torch.tensor(scale) - scale = scale.to(device=tensor.device, dtype=torch.float32) + if scale is not None: + if not isinstance(scale, torch.Tensor): + scale = torch.tensor(scale) + scale = scale.to(device=tensor.device, dtype=torch.float32) - if inplace_ops: - tensor *= (1.0 / scale).to(tensor.dtype) + if inplace_ops: + tensor *= (1.0 / scale).to(tensor.dtype) + else: + tensor = tensor * (1.0 / scale).to(tensor.dtype) else: - tensor = tensor * (1.0 / scale).to(tensor.dtype) + scale = torch.ones((), device=tensor.device, dtype=torch.float32) if stochastic_rounding > 0: tensor = comfy.float.stochastic_rounding(tensor, dtype=dtype, seed=stochastic_rounding) diff --git a/comfy/sd.py b/comfy/sd.py index 03bdb33d5..092715d79 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -968,10 +968,8 @@ def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DI clip_data = [] for p in ckpt_paths: sd, metadata = comfy.utils.load_torch_file(p, safe_load=True, return_metadata=True) - if metadata is not None: - quant_metadata = metadata.get("_quantization_metadata", None) - if quant_metadata is not None: - sd["_quantization_metadata"] = quant_metadata + if model_options.get("custom_operations", None) is None: + sd, metadata = comfy.utils.convert_old_quants(sd, model_prefix="", metadata=metadata) clip_data.append(sd) return load_text_encoder_state_dicts(clip_data, embedding_directory=embedding_directory, clip_type=clip_type, model_options=model_options) @@ -1088,7 +1086,7 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(clip_l=False, clip_g=True, t5=False) clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer elif clip_type == CLIPType.HIDREAM: - clip_target.clip = comfy.text_encoders.hidream.hidream_clip(clip_l=False, clip_g=True, t5=False, llama=False, dtype_t5=None, dtype_llama=None, t5xxl_scaled_fp8=None, llama_scaled_fp8=None) + clip_target.clip = comfy.text_encoders.hidream.hidream_clip(clip_l=False, clip_g=True, t5=False, llama=False, dtype_t5=None, dtype_llama=None) clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer else: clip_target.clip = sdxl_clip.SDXLRefinerClipModel @@ -1112,7 +1110,7 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip tokenizer_data["spiece_model"] = clip_data[0].get("spiece_model", None) elif clip_type == CLIPType.HIDREAM: clip_target.clip = comfy.text_encoders.hidream.hidream_clip(**t5xxl_detect(clip_data), - clip_l=False, clip_g=False, t5=True, llama=False, dtype_llama=None, llama_scaled_fp8=None) + clip_l=False, clip_g=False, t5=True, llama=False, dtype_llama=None) clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer else: #CLIPType.MOCHI clip_target.clip = comfy.text_encoders.genmo.mochi_te(**t5xxl_detect(clip_data)) @@ -1141,7 +1139,7 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip tokenizer_data["spiece_model"] = clip_data[0].get("spiece_model", None) elif te_model == TEModel.LLAMA3_8: clip_target.clip = comfy.text_encoders.hidream.hidream_clip(**llama_detect(clip_data), - clip_l=False, clip_g=False, t5=False, llama=True, dtype_t5=None, t5xxl_scaled_fp8=None) + clip_l=False, clip_g=False, t5=False, llama=True, dtype_t5=None) clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer elif te_model == TEModel.QWEN25_3B: clip_target.clip = comfy.text_encoders.omnigen2.te(**llama_detect(clip_data)) @@ -1169,7 +1167,7 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(clip_l=True, clip_g=False, t5=False) clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer elif clip_type == CLIPType.HIDREAM: - clip_target.clip = comfy.text_encoders.hidream.hidream_clip(clip_l=True, clip_g=False, t5=False, llama=False, dtype_t5=None, dtype_llama=None, t5xxl_scaled_fp8=None, llama_scaled_fp8=None) + clip_target.clip = comfy.text_encoders.hidream.hidream_clip(clip_l=True, clip_g=False, t5=False, llama=False, dtype_t5=None, dtype_llama=None) clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer else: clip_target.clip = sd1_clip.SD1ClipModel @@ -1224,8 +1222,6 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip parameters = 0 for c in clip_data: - if "_quantization_metadata" in c: - c.pop("_quantization_metadata") parameters += comfy.utils.calculate_parameters(c) tokenizer_data, model_options = comfy.text_encoders.long_clipl.model_options_long_clip(c, tokenizer_data, model_options) @@ -1295,6 +1291,10 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c weight_dtype = comfy.utils.weight_dtype(sd, diffusion_model_prefix) load_device = model_management.get_torch_device() + custom_operations = model_options.get("custom_operations", None) + if custom_operations is None: + sd, metadata = comfy.utils.convert_old_quants(sd, diffusion_model_prefix, metadata=metadata) + model_config = model_detection.model_config_from_unet(sd, diffusion_model_prefix, metadata=metadata) if model_config is None: logging.warning("Warning, This is not a checkpoint file, trying to load it as a diffusion model only.") @@ -1303,18 +1303,22 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c return None return (diffusion_model, None, VAE(sd={}), None) # The VAE object is there to throw an exception if it's actually used' - unet_weight_dtype = list(model_config.supported_inference_dtypes) - if model_config.scaled_fp8 is not None: + if model_config.quant_config is not None: weight_dtype = None - model_config.custom_operations = model_options.get("custom_operations", None) + if custom_operations is not None: + model_config.custom_operations = custom_operations + unet_dtype = model_options.get("dtype", model_options.get("weight_dtype", None)) if unet_dtype is None: unet_dtype = model_management.unet_dtype(model_params=parameters, supported_dtypes=unet_weight_dtype, weight_dtype=weight_dtype) - manual_cast_dtype = model_management.unet_manual_cast(unet_dtype, load_device, model_config.supported_inference_dtypes) + if model_config.quant_config is not None: + manual_cast_dtype = model_management.unet_manual_cast(None, load_device, model_config.supported_inference_dtypes) + else: + manual_cast_dtype = model_management.unet_manual_cast(unet_dtype, load_device, model_config.supported_inference_dtypes) model_config.set_inference_dtype(unet_dtype, manual_cast_dtype) if model_config.clip_vision_prefix is not None: @@ -1332,6 +1336,27 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c vae = VAE(sd=vae_sd, metadata=metadata) if output_clip: + if te_model_options.get("custom_operations", None) is None: + scaled_fp8_list = [] + for k in list(sd.keys()): # Convert scaled fp8 to mixed ops + if k.endswith(".scaled_fp8"): + scaled_fp8_list.append(k[:-len("scaled_fp8")]) + + if len(scaled_fp8_list) > 0: + out_sd = {} + for k in sd: + skip = False + for pref in scaled_fp8_list: + skip = skip or k.startswith(pref) + if not skip: + out_sd[k] = sd[k] + + for pref in scaled_fp8_list: + quant_sd, qmetadata = comfy.utils.convert_old_quants(sd, pref, metadata={}) + for k in quant_sd: + out_sd[k] = quant_sd[k] + sd = out_sd + clip_target = model_config.clip_target(state_dict=sd) if clip_target is not None: clip_sd = model_config.process_clip_state_dict(sd) @@ -1394,6 +1419,9 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None): if len(temp_sd) > 0: sd = temp_sd + custom_operations = model_options.get("custom_operations", None) + if custom_operations is None: + sd, metadata = comfy.utils.convert_old_quants(sd, "", metadata=metadata) parameters = comfy.utils.calculate_parameters(sd) weight_dtype = comfy.utils.weight_dtype(sd) @@ -1424,7 +1452,7 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None): offload_device = model_management.unet_offload_device() unet_weight_dtype = list(model_config.supported_inference_dtypes) - if model_config.scaled_fp8 is not None: + if model_config.quant_config is not None: weight_dtype = None if dtype is None: @@ -1432,12 +1460,15 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None): else: unet_dtype = dtype - if model_config.layer_quant_config is not None: + if model_config.quant_config is not None: manual_cast_dtype = model_management.unet_manual_cast(None, load_device, model_config.supported_inference_dtypes) else: manual_cast_dtype = model_management.unet_manual_cast(unet_dtype, load_device, model_config.supported_inference_dtypes) model_config.set_inference_dtype(unet_dtype, manual_cast_dtype) - model_config.custom_operations = model_options.get("custom_operations", model_config.custom_operations) + + if custom_operations is not None: + model_config.custom_operations = custom_operations + if model_options.get("fp8_optimizations", False): model_config.optimizations["fp8"] = True @@ -1476,6 +1507,9 @@ def save_checkpoint(output_path, model, clip=None, vae=None, clip_vision=None, m if vae is not None: vae_sd = vae.get_sd() + if metadata is None: + metadata = {} + model_management.load_models_gpu(load_models, force_patch_weights=True) clip_vision_sd = clip_vision.get_sd() if clip_vision is not None else None sd = model.model.state_dict_for_saving(clip_sd, vae_sd, clip_vision_sd) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 503a51843..962948dae 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -107,29 +107,17 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): config[k] = v operations = model_options.get("custom_operations", None) - scaled_fp8 = None - quantization_metadata = model_options.get("quantization_metadata", None) + quant_config = model_options.get("quantization_metadata", None) if operations is None: - layer_quant_config = None - if quantization_metadata is not None: - layer_quant_config = json.loads(quantization_metadata).get("layers", None) - - if layer_quant_config is not None: - operations = comfy.ops.mixed_precision_ops(layer_quant_config, dtype, full_precision_mm=True) - logging.info(f"Using MixedPrecisionOps for text encoder: {len(layer_quant_config)} quantized layers") + if quant_config is not None: + operations = comfy.ops.mixed_precision_ops(quant_config, dtype, full_precision_mm=True) + logging.info("Using MixedPrecisionOps for text encoder") else: - # Fallback to scaled_fp8_ops for backward compatibility - scaled_fp8 = model_options.get("scaled_fp8", None) - if scaled_fp8 is not None: - operations = comfy.ops.scaled_fp8_ops(fp8_matrix_mult=False, override_dtype=scaled_fp8) - else: - operations = comfy.ops.manual_cast + operations = comfy.ops.manual_cast self.operations = operations self.transformer = model_class(config, dtype, device, self.operations) - if scaled_fp8 is not None: - self.transformer.scaled_fp8 = torch.nn.Parameter(torch.tensor([], dtype=scaled_fp8)) self.num_layers = self.transformer.num_layers diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index e4bd74514..9fd84d329 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -49,8 +49,7 @@ class BASE: manual_cast_dtype = None custom_operations = None - scaled_fp8 = None - layer_quant_config = None # Per-layer quantization configuration for mixed precision + quant_config = None # quantization configuration for mixed precision optimizations = {"fp8": False} @classmethod diff --git a/comfy/text_encoders/cosmos.py b/comfy/text_encoders/cosmos.py index a1adb5242..448381fa9 100644 --- a/comfy/text_encoders/cosmos.py +++ b/comfy/text_encoders/cosmos.py @@ -7,10 +7,10 @@ from transformers import T5TokenizerFast class T5XXLModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=True, model_options={}): textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_old_config_xxl.json") - t5xxl_scaled_fp8 = model_options.get("t5xxl_scaled_fp8", None) - if t5xxl_scaled_fp8 is not None: + t5xxl_quantization_metadata = model_options.get("t5xxl_quantization_metadata", None) + if t5xxl_quantization_metadata is not None: model_options = model_options.copy() - model_options["scaled_fp8"] = t5xxl_scaled_fp8 + model_options["quantization_metadata"] = t5xxl_quantization_metadata super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, zero_out_masked=attention_mask, model_options=model_options) @@ -30,12 +30,12 @@ class CosmosT5Tokenizer(sd1_clip.SD1Tokenizer): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="t5xxl", tokenizer=T5XXLTokenizer) -def te(dtype_t5=None, t5xxl_scaled_fp8=None): +def te(dtype_t5=None, t5_quantization_metadata=None): class CosmosTEModel_(CosmosT5XXL): def __init__(self, device="cpu", dtype=None, model_options={}): - if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options: + if t5_quantization_metadata is not None: model_options = model_options.copy() - model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8 + model_options["t5xxl_quantization_metadata"] = t5_quantization_metadata if dtype is None: dtype = dtype_t5 super().__init__(device=device, dtype=dtype, model_options=model_options) diff --git a/comfy/text_encoders/flux.py b/comfy/text_encoders/flux.py index 99f4812bb..21d93d757 100644 --- a/comfy/text_encoders/flux.py +++ b/comfy/text_encoders/flux.py @@ -63,12 +63,12 @@ class FluxClipModel(torch.nn.Module): else: return self.t5xxl.load_sd(sd) -def flux_clip(dtype_t5=None, t5xxl_scaled_fp8=None): +def flux_clip(dtype_t5=None, t5_quantization_metadata=None): class FluxClipModel_(FluxClipModel): def __init__(self, device="cpu", dtype=None, model_options={}): - if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options: + if t5_quantization_metadata is not None: model_options = model_options.copy() - model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8 + model_options["t5xxl_quantization_metadata"] = t5_quantization_metadata super().__init__(dtype_t5=dtype_t5, device=device, dtype=dtype, model_options=model_options) return FluxClipModel_ @@ -159,15 +159,13 @@ class Flux2TEModel(sd1_clip.SD1ClipModel): out = out.reshape(out.shape[0], out.shape[1], -1) return out, pooled, extra -def flux2_te(dtype_llama=None, llama_scaled_fp8=None, llama_quantization_metadata=None, pruned=False): +def flux2_te(dtype_llama=None, llama_quantization_metadata=None, pruned=False): class Flux2TEModel_(Flux2TEModel): def __init__(self, device="cpu", dtype=None, model_options={}): - if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: - model_options = model_options.copy() - model_options["scaled_fp8"] = llama_scaled_fp8 if dtype_llama is not None: dtype = dtype_llama if llama_quantization_metadata is not None: + model_options = model_options.copy() model_options["quantization_metadata"] = llama_quantization_metadata if pruned: model_options = model_options.copy() diff --git a/comfy/text_encoders/genmo.py b/comfy/text_encoders/genmo.py index 9dcf190a2..5daea8135 100644 --- a/comfy/text_encoders/genmo.py +++ b/comfy/text_encoders/genmo.py @@ -26,12 +26,12 @@ class MochiT5Tokenizer(sd1_clip.SD1Tokenizer): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="t5xxl", tokenizer=T5XXLTokenizer) -def mochi_te(dtype_t5=None, t5xxl_scaled_fp8=None): +def mochi_te(dtype_t5=None, t5_quantization_metadata=None): class MochiTEModel_(MochiT5XXL): def __init__(self, device="cpu", dtype=None, model_options={}): - if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options: + if t5_quantization_metadata is not None: model_options = model_options.copy() - model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8 + model_options["t5xxl_quantization_metadata"] = t5_quantization_metadata if dtype is None: dtype = dtype_t5 super().__init__(device=device, dtype=dtype, model_options=model_options) diff --git a/comfy/text_encoders/hidream.py b/comfy/text_encoders/hidream.py index dbcf52784..600b34480 100644 --- a/comfy/text_encoders/hidream.py +++ b/comfy/text_encoders/hidream.py @@ -142,14 +142,14 @@ class HiDreamTEModel(torch.nn.Module): return self.llama.load_sd(sd) -def hidream_clip(clip_l=True, clip_g=True, t5=True, llama=True, dtype_t5=None, dtype_llama=None, t5xxl_scaled_fp8=None, llama_scaled_fp8=None): +def hidream_clip(clip_l=True, clip_g=True, t5=True, llama=True, dtype_t5=None, dtype_llama=None, t5_quantization_metadata=None, llama_quantization_metadata=None): class HiDreamTEModel_(HiDreamTEModel): def __init__(self, device="cpu", dtype=None, model_options={}): - if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options: + if t5_quantization_metadata is not None: model_options = model_options.copy() - model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8 - if llama_scaled_fp8 is not None and "llama_scaled_fp8" not in model_options: + model_options["t5xxl_quantization_metadata"] = t5_quantization_metadata + if llama_quantization_metadata is not None: model_options = model_options.copy() - model_options["llama_scaled_fp8"] = llama_scaled_fp8 + model_options["llama_quantization_metadata"] = llama_quantization_metadata super().__init__(clip_l=clip_l, clip_g=clip_g, t5=t5, llama=llama, dtype_t5=dtype_t5, dtype_llama=dtype_llama, device=device, dtype=dtype, model_options=model_options) return HiDreamTEModel_ diff --git a/comfy/text_encoders/hunyuan_image.py b/comfy/text_encoders/hunyuan_image.py index ff04726e1..cd198036c 100644 --- a/comfy/text_encoders/hunyuan_image.py +++ b/comfy/text_encoders/hunyuan_image.py @@ -40,10 +40,10 @@ class HunyuanImageTokenizer(QwenImageTokenizer): class Qwen25_7BVLIModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="hidden", layer_idx=-3, dtype=None, attention_mask=True, model_options={}): - llama_scaled_fp8 = model_options.get("qwen_scaled_fp8", None) - if llama_scaled_fp8 is not None: + llama_quantization_metadata = model_options.get("llama_quantization_metadata", None) + if llama_quantization_metadata is not None: model_options = model_options.copy() - model_options["scaled_fp8"] = llama_scaled_fp8 + model_options["quantization_metadata"] = llama_quantization_metadata super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen25_7BVLI, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) @@ -91,12 +91,12 @@ class HunyuanImageTEModel(QwenImageTEModel): else: return super().load_sd(sd) -def te(byt5=True, dtype_llama=None, llama_scaled_fp8=None): +def te(byt5=True, dtype_llama=None, llama_quantization_metadata=None): class QwenImageTEModel_(HunyuanImageTEModel): def __init__(self, device="cpu", dtype=None, model_options={}): - if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: + if llama_quantization_metadata is not None: model_options = model_options.copy() - model_options["qwen_scaled_fp8"] = llama_scaled_fp8 + model_options["llama_quantization_metadata"] = llama_quantization_metadata if dtype_llama is not None: dtype = dtype_llama super().__init__(byt5=byt5, device=device, dtype=dtype, model_options=model_options) diff --git a/comfy/text_encoders/hunyuan_video.py b/comfy/text_encoders/hunyuan_video.py index 0110517bb..a9a6c525e 100644 --- a/comfy/text_encoders/hunyuan_video.py +++ b/comfy/text_encoders/hunyuan_video.py @@ -6,7 +6,7 @@ from transformers import LlamaTokenizerFast import torch import os import numbers - +import comfy.utils def llama_detect(state_dict, prefix=""): out = {} @@ -14,12 +14,9 @@ def llama_detect(state_dict, prefix=""): if t5_key in state_dict: out["dtype_llama"] = state_dict[t5_key].dtype - scaled_fp8_key = "{}scaled_fp8".format(prefix) - if scaled_fp8_key in state_dict: - out["llama_scaled_fp8"] = state_dict[scaled_fp8_key].dtype - - if "_quantization_metadata" in state_dict: - out["llama_quantization_metadata"] = state_dict["_quantization_metadata"] + quant = comfy.utils.detect_layer_quantization(state_dict, prefix) + if quant is not None: + out["llama_quantization_metadata"] = quant return out @@ -31,10 +28,10 @@ class LLAMA3Tokenizer(sd1_clip.SDTokenizer): class LLAMAModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="hidden", layer_idx=-3, dtype=None, attention_mask=True, model_options={}, special_tokens={"start": 128000, "pad": 128258}): - llama_scaled_fp8 = model_options.get("llama_scaled_fp8", None) - if llama_scaled_fp8 is not None: + llama_quantization_metadata = model_options.get("llama_quantization_metadata", None) + if llama_quantization_metadata is not None: model_options = model_options.copy() - model_options["scaled_fp8"] = llama_scaled_fp8 + model_options["quantization_metadata"] = llama_quantization_metadata textmodel_json_config = {} vocab_size = model_options.get("vocab_size", None) @@ -161,11 +158,11 @@ class HunyuanVideoClipModel(torch.nn.Module): return self.llama.load_sd(sd) -def hunyuan_video_clip(dtype_llama=None, llama_scaled_fp8=None): +def hunyuan_video_clip(dtype_llama=None, llama_quantization_metadata=None): class HunyuanVideoClipModel_(HunyuanVideoClipModel): def __init__(self, device="cpu", dtype=None, model_options={}): - if llama_scaled_fp8 is not None and "llama_scaled_fp8" not in model_options: + if llama_quantization_metadata is not None: model_options = model_options.copy() - model_options["llama_scaled_fp8"] = llama_scaled_fp8 + model_options["llama_quantization_metadata"] = llama_quantization_metadata super().__init__(dtype_llama=dtype_llama, device=device, dtype=dtype, model_options=model_options) return HunyuanVideoClipModel_ diff --git a/comfy/text_encoders/lumina2.py b/comfy/text_encoders/lumina2.py index fd986e2c1..7a6cfdab2 100644 --- a/comfy/text_encoders/lumina2.py +++ b/comfy/text_encoders/lumina2.py @@ -40,7 +40,7 @@ class LuminaModel(sd1_clip.SD1ClipModel): super().__init__(device=device, dtype=dtype, name=name, clip_model=clip_model, model_options=model_options) -def te(dtype_llama=None, llama_scaled_fp8=None, model_type="gemma2_2b"): +def te(dtype_llama=None, llama_quantization_metadata=None, model_type="gemma2_2b"): if model_type == "gemma2_2b": model = Gemma2_2BModel elif model_type == "gemma3_4b": @@ -48,9 +48,9 @@ def te(dtype_llama=None, llama_scaled_fp8=None, model_type="gemma2_2b"): class LuminaTEModel_(LuminaModel): def __init__(self, device="cpu", dtype=None, model_options={}): - if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: + if llama_quantization_metadata is not None: model_options = model_options.copy() - model_options["scaled_fp8"] = llama_scaled_fp8 + model_options["quantization_metadata"] = llama_quantization_metadata if dtype_llama is not None: dtype = dtype_llama super().__init__(device=device, dtype=dtype, name=model_type, model_options=model_options, clip_model=model) diff --git a/comfy/text_encoders/omnigen2.py b/comfy/text_encoders/omnigen2.py index 1a01b2dd4..50aa4121f 100644 --- a/comfy/text_encoders/omnigen2.py +++ b/comfy/text_encoders/omnigen2.py @@ -32,12 +32,12 @@ class Omnigen2Model(sd1_clip.SD1ClipModel): super().__init__(device=device, dtype=dtype, name="qwen25_3b", clip_model=Qwen25_3BModel, model_options=model_options) -def te(dtype_llama=None, llama_scaled_fp8=None): +def te(dtype_llama=None, llama_quantization_metadata=None): class Omnigen2TEModel_(Omnigen2Model): def __init__(self, device="cpu", dtype=None, model_options={}): - if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: + if llama_quantization_metadata is not None: model_options = model_options.copy() - model_options["scaled_fp8"] = llama_scaled_fp8 + model_options["quantization_metadata"] = llama_quantization_metadata if dtype_llama is not None: dtype = dtype_llama super().__init__(device=device, dtype=dtype, model_options=model_options) diff --git a/comfy/text_encoders/ovis.py b/comfy/text_encoders/ovis.py index 81c9bd51c..5754424d2 100644 --- a/comfy/text_encoders/ovis.py +++ b/comfy/text_encoders/ovis.py @@ -55,12 +55,9 @@ class OvisTEModel(sd1_clip.SD1ClipModel): return out, pooled, {} -def te(dtype_llama=None, llama_scaled_fp8=None, llama_quantization_metadata=None): +def te(dtype_llama=None, llama_quantization_metadata=None): class OvisTEModel_(OvisTEModel): def __init__(self, device="cpu", dtype=None, model_options={}): - if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: - model_options = model_options.copy() - model_options["scaled_fp8"] = llama_scaled_fp8 if dtype_llama is not None: dtype = dtype_llama if llama_quantization_metadata is not None: diff --git a/comfy/text_encoders/pixart_t5.py b/comfy/text_encoders/pixart_t5.py index 5f383de07..e5e5f18be 100644 --- a/comfy/text_encoders/pixart_t5.py +++ b/comfy/text_encoders/pixart_t5.py @@ -30,12 +30,12 @@ class PixArtTokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="t5xxl", tokenizer=T5XXLTokenizer) -def pixart_te(dtype_t5=None, t5xxl_scaled_fp8=None): +def pixart_te(dtype_t5=None, t5_quantization_metadata=None): class PixArtTEModel_(PixArtT5XXL): def __init__(self, device="cpu", dtype=None, model_options={}): - if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options: + if t5_quantization_metadata is not None: model_options = model_options.copy() - model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8 + model_options["t5xxl_quantization_metadata"] = t5_quantization_metadata if dtype is None: dtype = dtype_t5 super().__init__(device=device, dtype=dtype, model_options=model_options) diff --git a/comfy/text_encoders/qwen_image.py b/comfy/text_encoders/qwen_image.py index c0d32a6ef..5c14dec23 100644 --- a/comfy/text_encoders/qwen_image.py +++ b/comfy/text_encoders/qwen_image.py @@ -85,12 +85,12 @@ class QwenImageTEModel(sd1_clip.SD1ClipModel): return out, pooled, extra -def te(dtype_llama=None, llama_scaled_fp8=None): +def te(dtype_llama=None, llama_quantization_metadata=None): class QwenImageTEModel_(QwenImageTEModel): def __init__(self, device="cpu", dtype=None, model_options={}): - if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: + if llama_quantization_metadata is not None: model_options = model_options.copy() - model_options["scaled_fp8"] = llama_scaled_fp8 + model_options["quantization_metadata"] = llama_quantization_metadata if dtype_llama is not None: dtype = dtype_llama super().__init__(device=device, dtype=dtype, model_options=model_options) diff --git a/comfy/text_encoders/sd3_clip.py b/comfy/text_encoders/sd3_clip.py index ff5d412db..8b153c72b 100644 --- a/comfy/text_encoders/sd3_clip.py +++ b/comfy/text_encoders/sd3_clip.py @@ -6,14 +6,15 @@ import torch import os import comfy.model_management import logging +import comfy.utils class T5XXLModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=False, model_options={}): textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_config_xxl.json") - t5xxl_scaled_fp8 = model_options.get("t5xxl_scaled_fp8", None) - if t5xxl_scaled_fp8 is not None: + t5xxl_quantization_metadata = model_options.get("t5xxl_quantization_metadata", None) + if t5xxl_quantization_metadata is not None: model_options = model_options.copy() - model_options["scaled_fp8"] = t5xxl_scaled_fp8 + model_options["quantization_metadata"] = t5xxl_quantization_metadata model_options = {**model_options, "model_name": "t5xxl"} super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) @@ -25,9 +26,9 @@ def t5_xxl_detect(state_dict, prefix=""): if t5_key in state_dict: out["dtype_t5"] = state_dict[t5_key].dtype - scaled_fp8_key = "{}scaled_fp8".format(prefix) - if scaled_fp8_key in state_dict: - out["t5xxl_scaled_fp8"] = state_dict[scaled_fp8_key].dtype + quant = comfy.utils.detect_layer_quantization(state_dict, prefix) + if quant is not None: + out["t5_quantization_metadata"] = quant return out @@ -156,11 +157,11 @@ class SD3ClipModel(torch.nn.Module): else: return self.t5xxl.load_sd(sd) -def sd3_clip(clip_l=True, clip_g=True, t5=True, dtype_t5=None, t5xxl_scaled_fp8=None, t5_attention_mask=False): +def sd3_clip(clip_l=True, clip_g=True, t5=True, dtype_t5=None, t5_quantization_metadata=None, t5_attention_mask=False): class SD3ClipModel_(SD3ClipModel): def __init__(self, device="cpu", dtype=None, model_options={}): - if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options: + if t5_quantization_metadata is not None: model_options = model_options.copy() - model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8 + model_options["t5xxl_quantization_metadata"] = t5_quantization_metadata super().__init__(clip_l=clip_l, clip_g=clip_g, t5=t5, dtype_t5=dtype_t5, t5_attention_mask=t5_attention_mask, device=device, dtype=dtype, model_options=model_options) return SD3ClipModel_ diff --git a/comfy/text_encoders/wan.py b/comfy/text_encoders/wan.py index d50fa4b28..164a57edd 100644 --- a/comfy/text_encoders/wan.py +++ b/comfy/text_encoders/wan.py @@ -25,12 +25,12 @@ class WanT5Model(sd1_clip.SD1ClipModel): def __init__(self, device="cpu", dtype=None, model_options={}, **kwargs): super().__init__(device=device, dtype=dtype, model_options=model_options, name="umt5xxl", clip_model=UMT5XXlModel, **kwargs) -def te(dtype_t5=None, t5xxl_scaled_fp8=None): +def te(dtype_t5=None, t5_quantization_metadata=None): class WanTEModel(WanT5Model): def __init__(self, device="cpu", dtype=None, model_options={}): - if t5xxl_scaled_fp8 is not None and "scaled_fp8" not in model_options: + if t5_quantization_metadata is not None: model_options = model_options.copy() - model_options["scaled_fp8"] = t5xxl_scaled_fp8 + model_options["quantization_metadata"] = t5_quantization_metadata if dtype_t5 is not None: dtype = dtype_t5 super().__init__(device=device, dtype=dtype, model_options=model_options) diff --git a/comfy/text_encoders/z_image.py b/comfy/text_encoders/z_image.py index bb9273b20..19adde0b7 100644 --- a/comfy/text_encoders/z_image.py +++ b/comfy/text_encoders/z_image.py @@ -34,12 +34,9 @@ class ZImageTEModel(sd1_clip.SD1ClipModel): super().__init__(device=device, dtype=dtype, name="qwen3_4b", clip_model=Qwen3_4BModel, model_options=model_options) -def te(dtype_llama=None, llama_scaled_fp8=None, llama_quantization_metadata=None): +def te(dtype_llama=None, llama_quantization_metadata=None): class ZImageTEModel_(ZImageTEModel): def __init__(self, device="cpu", dtype=None, model_options={}): - if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: - model_options = model_options.copy() - model_options["scaled_fp8"] = llama_scaled_fp8 if dtype_llama is not None: dtype = dtype_llama if llama_quantization_metadata is not None: diff --git a/comfy/utils.py b/comfy/utils.py index 37485e497..89846bc95 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -29,6 +29,7 @@ import itertools from torch.nn.functional import interpolate from einops import rearrange from comfy.cli_args import args +import json MMAP_TORCH_FILES = args.mmap_torch_files DISABLE_MMAP = args.disable_mmap @@ -1194,3 +1195,68 @@ def unpack_latents(combined_latent, latent_shapes): else: output_tensors = combined_latent return output_tensors + +def detect_layer_quantization(state_dict, prefix): + for k in state_dict: + if k.startswith(prefix) and k.endswith(".comfy_quant"): + logging.info("Found quantization metadata version 1") + return {"mixed_ops": True} + return None + +def convert_old_quants(state_dict, model_prefix="", metadata={}): + if metadata is None: + metadata = {} + + quant_metadata = None + if "_quantization_metadata" not in metadata: + scaled_fp8_key = "{}scaled_fp8".format(model_prefix) + + if scaled_fp8_key in state_dict: + scaled_fp8_weight = state_dict[scaled_fp8_key] + scaled_fp8_dtype = scaled_fp8_weight.dtype + if scaled_fp8_dtype == torch.float32: + scaled_fp8_dtype = torch.float8_e4m3fn + + if scaled_fp8_weight.nelement() == 2: + full_precision_matrix_mult = True + else: + full_precision_matrix_mult = False + + out_sd = {} + layers = {} + for k in list(state_dict.keys()): + if not k.startswith(model_prefix): + out_sd[k] = state_dict[k] + continue + k_out = k + w = state_dict.pop(k) + layer = None + if k_out.endswith(".scale_weight"): + layer = k_out[:-len(".scale_weight")] + k_out = "{}.weight_scale".format(layer) + + if layer is not None: + layer_conf = {"format": "float8_e4m3fn"} # TODO: check if anyone did some non e4m3fn scaled checkpoints + if full_precision_matrix_mult: + layer_conf["full_precision_matrix_mult"] = full_precision_matrix_mult + layers[layer] = layer_conf + + if k_out.endswith(".scale_input"): + layer = k_out[:-len(".scale_input")] + k_out = "{}.input_scale".format(layer) + if w.item() == 1.0: + continue + + out_sd[k_out] = w + + state_dict = out_sd + quant_metadata = {"layers": layers} + else: + quant_metadata = json.loads(metadata["_quantization_metadata"]) + + if quant_metadata is not None: + layers = quant_metadata["layers"] + for k, v in layers.items(): + state_dict["{}.comfy_quant".format(k)] = torch.frombuffer(json.dumps(v).encode('utf-8'), dtype=torch.uint8) + + return state_dict, metadata diff --git a/tests-unit/comfy_quant/test_mixed_precision.py b/tests-unit/comfy_quant/test_mixed_precision.py index 63361309f..3a54941e6 100644 --- a/tests-unit/comfy_quant/test_mixed_precision.py +++ b/tests-unit/comfy_quant/test_mixed_precision.py @@ -2,6 +2,7 @@ import unittest import torch import sys import os +import json # Add comfy to path sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) @@ -15,6 +16,7 @@ if not has_gpu(): from comfy import ops from comfy.quant_ops import QuantizedTensor +import comfy.utils class SimpleModel(torch.nn.Module): @@ -94,8 +96,9 @@ class TestMixedPrecisionOps(unittest.TestCase): "layer3.weight_scale": torch.tensor(1.5, dtype=torch.float32), } + state_dict, _ = comfy.utils.convert_old_quants(state_dict, metadata={"_quantization_metadata": json.dumps({"layers": layer_quant_config})}) # Create model and load state dict (strict=False because custom loading pops keys) - model = SimpleModel(operations=ops.mixed_precision_ops(layer_quant_config)) + model = SimpleModel(operations=ops.mixed_precision_ops({})) model.load_state_dict(state_dict, strict=False) # Verify weights are wrapped in QuantizedTensor @@ -115,7 +118,8 @@ class TestMixedPrecisionOps(unittest.TestCase): # Forward pass input_tensor = torch.randn(5, 10, dtype=torch.bfloat16) - output = model(input_tensor) + with torch.inference_mode(): + output = model(input_tensor) self.assertEqual(output.shape, (5, 40)) @@ -141,7 +145,8 @@ class TestMixedPrecisionOps(unittest.TestCase): "layer3.bias": torch.randn(40, dtype=torch.bfloat16), } - model = SimpleModel(operations=ops.mixed_precision_ops(layer_quant_config)) + state_dict1, _ = comfy.utils.convert_old_quants(state_dict1, metadata={"_quantization_metadata": json.dumps({"layers": layer_quant_config})}) + model = SimpleModel(operations=ops.mixed_precision_ops({})) model.load_state_dict(state_dict1, strict=False) # Save state dict @@ -178,7 +183,8 @@ class TestMixedPrecisionOps(unittest.TestCase): "layer3.bias": torch.randn(40, dtype=torch.bfloat16), } - model = SimpleModel(operations=ops.mixed_precision_ops(layer_quant_config)) + state_dict, _ = comfy.utils.convert_old_quants(state_dict, metadata={"_quantization_metadata": json.dumps({"layers": layer_quant_config})}) + model = SimpleModel(operations=ops.mixed_precision_ops({})) model.load_state_dict(state_dict, strict=False) # Add a weight function (simulating LoRA) @@ -215,8 +221,10 @@ class TestMixedPrecisionOps(unittest.TestCase): "layer3.bias": torch.randn(40, dtype=torch.bfloat16), } + state_dict, _ = comfy.utils.convert_old_quants(state_dict, metadata={"_quantization_metadata": json.dumps({"layers": layer_quant_config})}) + # Load should raise KeyError for unknown format in QUANT_FORMAT_MIXINS - model = SimpleModel(operations=ops.mixed_precision_ops(layer_quant_config)) + model = SimpleModel(operations=ops.mixed_precision_ops({})) with self.assertRaises(KeyError): model.load_state_dict(state_dict, strict=False) From 6fd463aec958f02be79a264eafd6c8fe7e52762a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 5 Dec 2025 12:33:16 -0800 Subject: [PATCH 0993/1073] Fix regression when text encoder loaded directly on GPU. (#11129) --- comfy/ops.py | 2 ++ comfy/sd.py | 44 ++++++++++++++++++++++++-------------------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index dc06709a1..35237c9f7 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -552,6 +552,8 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec weight_scale_key = f"{prefix}weight_scale" scale = state_dict.pop(weight_scale_key, None) + if scale is not None: + scale = scale.to(device) layout_params = { 'scale': scale, 'orig_dtype': MixedPrecisionOps._compute_dtype, diff --git a/comfy/sd.py b/comfy/sd.py index 092715d79..c350322f8 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -98,7 +98,7 @@ def load_lora_for_models(model, clip, lora, strength_model, strength_clip): class CLIP: - def __init__(self, target=None, embedding_directory=None, no_init=False, tokenizer_data={}, parameters=0, model_options={}): + def __init__(self, target=None, embedding_directory=None, no_init=False, tokenizer_data={}, parameters=0, state_dict=[], model_options={}): if no_init: return params = target.params.copy() @@ -129,6 +129,27 @@ class CLIP: self.patcher.hook_mode = comfy.hooks.EnumHookMode.MinVram self.patcher.is_clip = True self.apply_hooks_to_conds = None + if len(state_dict) > 0: + if isinstance(state_dict, list): + for c in state_dict: + m, u = self.load_sd(c) + if len(m) > 0: + logging.warning("clip missing: {}".format(m)) + + if len(u) > 0: + logging.debug("clip unexpected: {}".format(u)) + else: + m, u = self.load_sd(state_dict, full_model=True) + if len(m) > 0: + m_filter = list(filter(lambda a: ".logit_scale" not in a and ".transformer.text_projection.weight" not in a, m)) + if len(m_filter) > 0: + logging.warning("clip missing: {}".format(m)) + else: + logging.debug("clip missing: {}".format(m)) + + if len(u) > 0: + logging.debug("clip unexpected {}:".format(u)) + if params['device'] == load_device: model_management.load_models_gpu([self.patcher], force_full_load=True) self.layer_idx = None @@ -1225,14 +1246,7 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip parameters += comfy.utils.calculate_parameters(c) tokenizer_data, model_options = comfy.text_encoders.long_clipl.model_options_long_clip(c, tokenizer_data, model_options) - clip = CLIP(clip_target, embedding_directory=embedding_directory, parameters=parameters, tokenizer_data=tokenizer_data, model_options=model_options) - for c in clip_data: - m, u = clip.load_sd(c) - if len(m) > 0: - logging.warning("clip missing: {}".format(m)) - - if len(u) > 0: - logging.debug("clip unexpected: {}".format(u)) + clip = CLIP(clip_target, embedding_directory=embedding_directory, parameters=parameters, tokenizer_data=tokenizer_data, state_dict=clip_data, model_options=model_options) return clip def load_gligen(ckpt_path): @@ -1362,17 +1376,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c clip_sd = model_config.process_clip_state_dict(sd) if len(clip_sd) > 0: parameters = comfy.utils.calculate_parameters(clip_sd) - clip = CLIP(clip_target, embedding_directory=embedding_directory, tokenizer_data=clip_sd, parameters=parameters, model_options=te_model_options) - m, u = clip.load_sd(clip_sd, full_model=True) - if len(m) > 0: - m_filter = list(filter(lambda a: ".logit_scale" not in a and ".transformer.text_projection.weight" not in a, m)) - if len(m_filter) > 0: - logging.warning("clip missing: {}".format(m)) - else: - logging.debug("clip missing: {}".format(m)) - - if len(u) > 0: - logging.debug("clip unexpected {}:".format(u)) + clip = CLIP(clip_target, embedding_directory=embedding_directory, tokenizer_data=clip_sd, parameters=parameters, state_dict=clip_sd, model_options=te_model_options) else: logging.warning("no CLIP/text encoder weights in checkpoint, the text encoder model will not be loaded.") From 79d17ba2339aaf4f3422673b3dad24ba4dbd7552 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Fri, 5 Dec 2025 22:42:46 +0200 Subject: [PATCH 0994/1073] Context windows fixes and features (#10975) * Apply cond slice fix * Add FreeNoise * Update context_windows.py * Add option to retain condition by indexes for each window This allows for example Wan/HunyuanVideo image to video to "work" by using the initial start frame for each window, otherwise windows beyond first will be pure T2V generations. * Update context_windows.py * Allow splitting multiple conds into different windows * Add handling for audio_embed * whitespace * Allow freenoise to work on other dims, handle 4D batch timestep Refactor Freenoise function. And fix batch handling as timesteps seem to be expanded to batch size now. * Disable experimental options for now So that the Freenoise and bugfixes can be merged first --------- Co-authored-by: Jedrzej Kosinski Co-authored-by: ozbayb <17261091+ozbayb@users.noreply.github.com> --- comfy/context_windows.py | 104 ++++++++++++++++++++++---- comfy_extras/nodes_context_windows.py | 22 +++++- 2 files changed, 108 insertions(+), 18 deletions(-) diff --git a/comfy/context_windows.py b/comfy/context_windows.py index 041f380f9..5c412d1c2 100644 --- a/comfy/context_windows.py +++ b/comfy/context_windows.py @@ -51,26 +51,36 @@ class ContextHandlerABC(ABC): class IndexListContextWindow(ContextWindowABC): - def __init__(self, index_list: list[int], dim: int=0): + def __init__(self, index_list: list[int], dim: int=0, total_frames: int=0): self.index_list = index_list self.context_length = len(index_list) self.dim = dim + self.total_frames = total_frames + self.center_ratio = (min(index_list) + max(index_list)) / (2 * total_frames) - def get_tensor(self, full: torch.Tensor, device=None, dim=None) -> torch.Tensor: + def get_tensor(self, full: torch.Tensor, device=None, dim=None, retain_index_list=[]) -> torch.Tensor: if dim is None: dim = self.dim if dim == 0 and full.shape[dim] == 1: return full - idx = [slice(None)] * dim + [self.index_list] - return full[idx].to(device) + idx = tuple([slice(None)] * dim + [self.index_list]) + window = full[idx] + if retain_index_list: + idx = tuple([slice(None)] * dim + [retain_index_list]) + window[idx] = full[idx] + return window.to(device) def add_window(self, full: torch.Tensor, to_add: torch.Tensor, dim=None) -> torch.Tensor: if dim is None: dim = self.dim - idx = [slice(None)] * dim + [self.index_list] + idx = tuple([slice(None)] * dim + [self.index_list]) full[idx] += to_add return full + def get_region_index(self, num_regions: int) -> int: + region_idx = int(self.center_ratio * num_regions) + return min(max(region_idx, 0), num_regions - 1) + class IndexListCallbacks: EVALUATE_CONTEXT_WINDOWS = "evaluate_context_windows" @@ -94,7 +104,8 @@ class ContextFuseMethod: ContextResults = collections.namedtuple("ContextResults", ['window_idx', 'sub_conds_out', 'sub_conds', 'window']) class IndexListContextHandler(ContextHandlerABC): - def __init__(self, context_schedule: ContextSchedule, fuse_method: ContextFuseMethod, context_length: int=1, context_overlap: int=0, context_stride: int=1, closed_loop=False, dim=0): + def __init__(self, context_schedule: ContextSchedule, fuse_method: ContextFuseMethod, context_length: int=1, context_overlap: int=0, context_stride: int=1, + closed_loop: bool=False, dim:int=0, freenoise: bool=False, cond_retain_index_list: list[int]=[], split_conds_to_windows: bool=False): self.context_schedule = context_schedule self.fuse_method = fuse_method self.context_length = context_length @@ -103,13 +114,18 @@ class IndexListContextHandler(ContextHandlerABC): self.closed_loop = closed_loop self.dim = dim self._step = 0 + self.freenoise = freenoise + self.cond_retain_index_list = [int(x.strip()) for x in cond_retain_index_list.split(",")] if cond_retain_index_list else [] + self.split_conds_to_windows = split_conds_to_windows self.callbacks = {} def should_use_context(self, model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep: torch.Tensor, model_options: dict[str]) -> bool: # for now, assume first dim is batch - should have stored on BaseModel in actual implementation if x_in.size(self.dim) > self.context_length: - logging.info(f"Using context windows {self.context_length} for {x_in.size(self.dim)} frames.") + logging.info(f"Using context windows {self.context_length} with overlap {self.context_overlap} for {x_in.size(self.dim)} frames.") + if self.cond_retain_index_list: + logging.info(f"Retaining original cond for indexes: {self.cond_retain_index_list}") return True return False @@ -123,6 +139,11 @@ class IndexListContextHandler(ContextHandlerABC): return None # reuse or resize cond items to match context requirements resized_cond = [] + # if multiple conds, split based on primary region + if self.split_conds_to_windows and len(cond_in) > 1: + region = window.get_region_index(len(cond_in)) + logging.info(f"Splitting conds to windows; using region {region} for window {window[0]}-{window[-1]} with center ratio {window.center_ratio:.3f}") + cond_in = [cond_in[region]] # cond object is a list containing a dict - outer list is irrelevant, so just loop through it for actual_cond in cond_in: resized_actual_cond = actual_cond.copy() @@ -146,12 +167,19 @@ class IndexListContextHandler(ContextHandlerABC): # when in dictionary, look for tensors and CONDCrossAttn [comfy/conds.py] (has cond attr that is a tensor) for cond_key, cond_value in new_cond_item.items(): if isinstance(cond_value, torch.Tensor): - if cond_value.ndim < self.dim and cond_value.size(0) == x_in.size(self.dim): + if (self.dim < cond_value.ndim and cond_value(self.dim) == x_in.size(self.dim)) or \ + (cond_value.ndim < self.dim and cond_value.size(0) == x_in.size(self.dim)): new_cond_item[cond_key] = window.get_tensor(cond_value, device) + # Handle audio_embed (temporal dim is 1) + elif cond_key == "audio_embed" and hasattr(cond_value, "cond") and isinstance(cond_value.cond, torch.Tensor): + audio_cond = cond_value.cond + if audio_cond.ndim > 1 and audio_cond.size(1) == x_in.size(self.dim): + new_cond_item[cond_key] = cond_value._copy_with(window.get_tensor(audio_cond, device, dim=1)) # if has cond that is a Tensor, check if needs to be subset elif hasattr(cond_value, "cond") and isinstance(cond_value.cond, torch.Tensor): - if cond_value.cond.ndim < self.dim and cond_value.cond.size(0) == x_in.size(self.dim): - new_cond_item[cond_key] = cond_value._copy_with(window.get_tensor(cond_value.cond, device)) + if (self.dim < cond_value.cond.ndim and cond_value.cond.size(self.dim) == x_in.size(self.dim)) or \ + (cond_value.cond.ndim < self.dim and cond_value.cond.size(0) == x_in.size(self.dim)): + new_cond_item[cond_key] = cond_value._copy_with(window.get_tensor(cond_value.cond, device, retain_index_list=self.cond_retain_index_list)) elif cond_key == "num_video_frames": # for SVD new_cond_item[cond_key] = cond_value._copy_with(cond_value.cond) new_cond_item[cond_key].cond = window.context_length @@ -164,7 +192,7 @@ class IndexListContextHandler(ContextHandlerABC): return resized_cond def set_step(self, timestep: torch.Tensor, model_options: dict[str]): - mask = torch.isclose(model_options["transformer_options"]["sample_sigmas"], timestep, rtol=0.0001) + mask = torch.isclose(model_options["transformer_options"]["sample_sigmas"], timestep[0], rtol=0.0001) matches = torch.nonzero(mask) if torch.numel(matches) == 0: raise Exception("No sample_sigmas matched current timestep; something went wrong.") @@ -173,7 +201,7 @@ class IndexListContextHandler(ContextHandlerABC): def get_context_windows(self, model: BaseModel, x_in: torch.Tensor, model_options: dict[str]) -> list[IndexListContextWindow]: full_length = x_in.size(self.dim) # TODO: choose dim based on model context_windows = self.context_schedule.func(full_length, self, model_options) - context_windows = [IndexListContextWindow(window, dim=self.dim) for window in context_windows] + context_windows = [IndexListContextWindow(window, dim=self.dim, total_frames=full_length) for window in context_windows] return context_windows def execute(self, calc_cond_batch: Callable, model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep: torch.Tensor, model_options: dict[str]): @@ -250,8 +278,8 @@ class IndexListContextHandler(ContextHandlerABC): prev_weight = (bias_total / (bias_total + bias)) new_weight = (bias / (bias_total + bias)) # account for dims of tensors - idx_window = [slice(None)] * self.dim + [idx] - pos_window = [slice(None)] * self.dim + [pos] + idx_window = tuple([slice(None)] * self.dim + [idx]) + pos_window = tuple([slice(None)] * self.dim + [pos]) # apply new values conds_final[i][idx_window] = conds_final[i][idx_window] * prev_weight + sub_conds_out[i][pos_window] * new_weight biases_final[i][idx] = bias_total + bias @@ -287,6 +315,28 @@ def create_prepare_sampling_wrapper(model: ModelPatcher): ) +def _sampler_sample_wrapper(executor, guider, sigmas, extra_args, callback, noise, *args, **kwargs): + model_options = extra_args.get("model_options", None) + if model_options is None: + raise Exception("model_options not found in sampler_sample_wrapper; this should never happen, something went wrong.") + handler: IndexListContextHandler = model_options.get("context_handler", None) + if handler is None: + raise Exception("context_handler not found in sampler_sample_wrapper; this should never happen, something went wrong.") + if not handler.freenoise: + return executor(guider, sigmas, extra_args, callback, noise, *args, **kwargs) + noise = apply_freenoise(noise, handler.dim, handler.context_length, handler.context_overlap, extra_args["seed"]) + + return executor(guider, sigmas, extra_args, callback, noise, *args, **kwargs) + + +def create_sampler_sample_wrapper(model: ModelPatcher): + model.add_wrapper_with_key( + comfy.patcher_extension.WrappersMP.SAMPLER_SAMPLE, + "ContextWindows_sampler_sample", + _sampler_sample_wrapper + ) + + def match_weights_to_dim(weights: list[float], x_in: torch.Tensor, dim: int, device=None) -> torch.Tensor: total_dims = len(x_in.shape) weights_tensor = torch.Tensor(weights).to(device=device) @@ -538,3 +588,29 @@ def shift_window_to_end(window: list[int], num_frames: int): for i in range(len(window)): # 2) add end_delta to each val to slide windows to end window[i] = window[i] + end_delta + + +# https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved/blob/90fb1331201a4b29488089e4fbffc0d82cc6d0a9/animatediff/sample_settings.py#L465 +def apply_freenoise(noise: torch.Tensor, dim: int, context_length: int, context_overlap: int, seed: int): + logging.info("Context windows: Applying FreeNoise") + generator = torch.Generator(device='cpu').manual_seed(seed) + latent_video_length = noise.shape[dim] + delta = context_length - context_overlap + + for start_idx in range(0, latent_video_length - context_length, delta): + place_idx = start_idx + context_length + + actual_delta = min(delta, latent_video_length - place_idx) + if actual_delta <= 0: + break + + list_idx = torch.randperm(actual_delta, generator=generator, device='cpu') + start_idx + + source_slice = [slice(None)] * noise.ndim + source_slice[dim] = list_idx + target_slice = [slice(None)] * noise.ndim + target_slice[dim] = slice(place_idx, place_idx + actual_delta) + + noise[tuple(target_slice)] = noise[tuple(source_slice)] + + return noise diff --git a/comfy_extras/nodes_context_windows.py b/comfy_extras/nodes_context_windows.py index 1c3d9e697..3799a9004 100644 --- a/comfy_extras/nodes_context_windows.py +++ b/comfy_extras/nodes_context_windows.py @@ -26,6 +26,9 @@ class ContextWindowsManualNode(io.ComfyNode): io.Boolean.Input("closed_loop", default=False, tooltip="Whether to close the context window loop; only applicable to looped schedules."), io.Combo.Input("fuse_method", options=comfy.context_windows.ContextFuseMethods.LIST_STATIC, default=comfy.context_windows.ContextFuseMethods.PYRAMID, tooltip="The method to use to fuse the context windows."), io.Int.Input("dim", min=0, max=5, default=0, tooltip="The dimension to apply the context windows to."), + io.Boolean.Input("freenoise", default=False, tooltip="Whether to apply FreeNoise noise shuffling, improves window blending."), + #io.String.Input("cond_retain_index_list", default="", tooltip="List of latent indices to retain in the conditioning tensors for each window, for example setting this to '0' will use the initial start image for each window."), + #io.Boolean.Input("split_conds_to_windows", default=False, tooltip="Whether to split multiple conditionings (created by ConditionCombine) to each window based on region index."), ], outputs=[ io.Model.Output(tooltip="The model with context windows applied during sampling."), @@ -34,7 +37,8 @@ class ContextWindowsManualNode(io.ComfyNode): ) @classmethod - def execute(cls, model: io.Model.Type, context_length: int, context_overlap: int, context_schedule: str, context_stride: int, closed_loop: bool, fuse_method: str, dim: int) -> io.Model: + def execute(cls, model: io.Model.Type, context_length: int, context_overlap: int, context_schedule: str, context_stride: int, closed_loop: bool, fuse_method: str, dim: int, freenoise: bool, + cond_retain_index_list: list[int]=[], split_conds_to_windows: bool=False) -> io.Model: model = model.clone() model.model_options["context_handler"] = comfy.context_windows.IndexListContextHandler( context_schedule=comfy.context_windows.get_matching_context_schedule(context_schedule), @@ -43,9 +47,15 @@ class ContextWindowsManualNode(io.ComfyNode): context_overlap=context_overlap, context_stride=context_stride, closed_loop=closed_loop, - dim=dim) + dim=dim, + freenoise=freenoise, + cond_retain_index_list=cond_retain_index_list, + split_conds_to_windows=split_conds_to_windows + ) # make memory usage calculation only take into account the context window latents comfy.context_windows.create_prepare_sampling_wrapper(model) + if freenoise: # no other use for this wrapper at this time + comfy.context_windows.create_sampler_sample_wrapper(model) return io.NodeOutput(model) class WanContextWindowsManualNode(ContextWindowsManualNode): @@ -68,14 +78,18 @@ class WanContextWindowsManualNode(ContextWindowsManualNode): io.Int.Input("context_stride", min=1, default=1, tooltip="The stride of the context window; only applicable to uniform schedules."), io.Boolean.Input("closed_loop", default=False, tooltip="Whether to close the context window loop; only applicable to looped schedules."), io.Combo.Input("fuse_method", options=comfy.context_windows.ContextFuseMethods.LIST_STATIC, default=comfy.context_windows.ContextFuseMethods.PYRAMID, tooltip="The method to use to fuse the context windows."), + io.Boolean.Input("freenoise", default=False, tooltip="Whether to apply FreeNoise noise shuffling, improves window blending."), + #io.String.Input("cond_retain_index_list", default="", tooltip="List of latent indices to retain in the conditioning tensors for each window, for example setting this to '0' will use the initial start image for each window."), + #io.Boolean.Input("split_conds_to_windows", default=False, tooltip="Whether to split multiple conditionings (created by ConditionCombine) to each window based on region index."), ] return schema @classmethod - def execute(cls, model: io.Model.Type, context_length: int, context_overlap: int, context_schedule: str, context_stride: int, closed_loop: bool, fuse_method: str) -> io.Model: + def execute(cls, model: io.Model.Type, context_length: int, context_overlap: int, context_schedule: str, context_stride: int, closed_loop: bool, fuse_method: str, freenoise: bool, + cond_retain_index_list: list[int]=[], split_conds_to_windows: bool=False) -> io.Model: context_length = max(((context_length - 1) // 4) + 1, 1) # at least length 1 context_overlap = max(((context_overlap - 1) // 4) + 1, 0) # at least overlap 0 - return super().execute(model, context_length, context_overlap, context_schedule, context_stride, closed_loop, fuse_method, dim=2) + return super().execute(model, context_length, context_overlap, context_schedule, context_stride, closed_loop, fuse_method, dim=2, freenoise=freenoise, cond_retain_index_list=cond_retain_index_list, split_conds_to_windows=split_conds_to_windows) class ContextWindowsExtension(ComfyExtension): From 092ee8a5008c8d558b0a72cc7961a31d9cc5400b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 5 Dec 2025 15:25:31 -0800 Subject: [PATCH 0995/1073] Fix some custom nodes. (#11134) --- comfy/supported_models_base.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index 9fd84d329..0e7a829ba 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -17,6 +17,7 @@ """ import torch +import logging from . import model_base from . import utils from . import latent_formats @@ -117,3 +118,7 @@ class BASE: def set_inference_dtype(self, dtype, manual_cast_dtype): self.unet_config['dtype'] = dtype self.manual_cast_dtype = manual_cast_dtype + + def __getattr__(self, name): + logging.warning("\nWARNING, you accessed {} from the model config object which doesn't exist. Please fix your code.\n".format(name)) + return None From bed12674a1d2c4bfdfbdd098686390f807996c90 Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" <128333288+ltdrdata@users.noreply.github.com> Date: Sat, 6 Dec 2025 08:45:38 +0900 Subject: [PATCH 0996/1073] docs: add ComfyUI-Manager documentation and update to v4.0.3b4 (#11133) - Add manager setup instructions and command line options to README - Document --enable-manager, --enable-manager-legacy-ui, and --disable-manager-ui flags - Bump comfyui_manager version from 4.0.3b3 to 4.0.3b4 --- README.md | 26 ++++++++++++++++++++++++++ manager_requirements.txt | 2 +- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ed857df9f..bae955b1b 100644 --- a/README.md +++ b/README.md @@ -320,6 +320,32 @@ For models compatible with Iluvatar Extension for PyTorch. Here's a step-by-step 1. Install the Iluvatar Corex Toolkit by adhering to the platform-specific instructions on the [Installation](https://support.iluvatar.com/#/DocumentCentre?id=1&nameCenter=2&productId=520117912052801536) 2. Launch ComfyUI by running `python main.py` + +## [ComfyUI-Manager](https://github.com/Comfy-Org/ComfyUI-Manager/tree/manager-v4) + +**ComfyUI-Manager** is an extension that allows you to easily install, update, and manage custom nodes for ComfyUI. + +### Setup + +1. Install the manager dependencies: + ```bash + pip install -r manager_requirements.txt + ``` + +2. Enable the manager with the `--enable-manager` flag when running ComfyUI: + ```bash + python main.py --enable-manager + ``` + +### Command Line Options + +| Flag | Description | +|------|-------------| +| `--enable-manager` | Enable ComfyUI-Manager | +| `--enable-manager-legacy-ui` | Use the legacy manager UI instead of the new UI (requires `--enable-manager`) | +| `--disable-manager-ui` | Disable the manager UI and endpoints while keeping background features like security checks and scheduled installation completion (requires `--enable-manager`) | + + # Running ```python main.py``` diff --git a/manager_requirements.txt b/manager_requirements.txt index 52cc5389c..b95cefb74 100644 --- a/manager_requirements.txt +++ b/manager_requirements.txt @@ -1 +1 @@ -comfyui_manager==4.0.3b3 +comfyui_manager==4.0.3b4 From fd109325db7126f92c2dfb7e6b25310eded8c1f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Sat, 6 Dec 2025 05:20:22 +0200 Subject: [PATCH 0997/1073] Kandinsky5 model support (#10988) * Add Kandinsky5 model support lite and pro T2V tested to work * Update kandinsky5.py * Fix fp8 * Fix fp8_scaled text encoder * Add transformer_options for attention * Code cleanup, optimizations, use fp32 for all layers originally at fp32 * ImageToVideo -node * Fix I2V, add necessary latent post process nodes * Support text to image model * Support block replace patches (SLG mostly) * Support official LoRAs * Don't scale RoPE for lite model as that just doesn't work... * Update supported_models.py * Rever RoPE scaling to simpler one * Fix typo * Handle latent dim difference for image model in the VAE instead * Add node to use different prompts for clip_l and qwen25_7b * Reduce peak VRAM usage a bit * Further reduce peak VRAM consumption by chunking ffn * Update chunking * Update memory_usage_factor * Code cleanup, don't force the fp32 layers as it has minimal effect * Allow for stronger changes with first frames normalization Default values are too weak for any meaningful changes, these should probably be exposed as advanced node options when that's available. * Add image model's own chat template, remove unused image2video template * Remove hard error in ReplaceVideoLatentFrames -node * Update kandinsky5.py * Update supported_models.py * Fix typos in prompt template They were now fixed in the original repository as well * Update ReplaceVideoLatentFrames Add tooltips Make source optional Better handle negative index * Rename NormalizeVideoLatentFrames -node For bit better clarity what it does * Fix NormalizeVideoLatentStart node out on non-op --- comfy/ldm/kandinsky5/model.py | 407 ++++++++++++++++++++++++++++++ comfy/lora.py | 7 + comfy/model_base.py | 47 ++++ comfy/model_detection.py | 18 ++ comfy/sd.py | 11 + comfy/supported_models.py | 56 +++- comfy/text_encoders/kandinsky5.py | 68 +++++ comfy_api/latest/_io.py | 2 + comfy_extras/nodes_kandinsky5.py | 136 ++++++++++ comfy_extras/nodes_latent.py | 39 ++- nodes.py | 3 +- 11 files changed, 791 insertions(+), 3 deletions(-) create mode 100644 comfy/ldm/kandinsky5/model.py create mode 100644 comfy/text_encoders/kandinsky5.py create mode 100644 comfy_extras/nodes_kandinsky5.py diff --git a/comfy/ldm/kandinsky5/model.py b/comfy/ldm/kandinsky5/model.py new file mode 100644 index 000000000..a653e02fc --- /dev/null +++ b/comfy/ldm/kandinsky5/model.py @@ -0,0 +1,407 @@ +import torch +from torch import nn +import math + +import comfy.ldm.common_dit +from comfy.ldm.modules.attention import optimized_attention +from comfy.ldm.flux.math import apply_rope1 +from comfy.ldm.flux.layers import EmbedND + +def attention(q, k, v, heads, transformer_options={}): + return optimized_attention( + q.transpose(1, 2), + k.transpose(1, 2), + v.transpose(1, 2), + heads=heads, + skip_reshape=True, + transformer_options=transformer_options + ) + +def apply_scale_shift_norm(norm, x, scale, shift): + return torch.addcmul(shift, norm(x), scale + 1.0) + +def apply_gate_sum(x, out, gate): + return torch.addcmul(x, gate, out) + +def get_shift_scale_gate(params): + shift, scale, gate = torch.chunk(params, 3, dim=-1) + return tuple(x.unsqueeze(1) for x in (shift, scale, gate)) + +def get_freqs(dim, max_period=10000.0): + return torch.exp(-math.log(max_period) * torch.arange(start=0, end=dim, dtype=torch.float32) / dim) + + +class TimeEmbeddings(nn.Module): + def __init__(self, model_dim, time_dim, max_period=10000.0, operation_settings=None): + super().__init__() + assert model_dim % 2 == 0 + self.model_dim = model_dim + self.max_period = max_period + self.register_buffer("freqs", get_freqs(model_dim // 2, max_period), persistent=False) + operations = operation_settings.get("operations") + self.in_layer = operations.Linear(model_dim, time_dim, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.activation = nn.SiLU() + self.out_layer = operations.Linear(time_dim, time_dim, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + + def forward(self, timestep, dtype): + args = torch.outer(timestep, self.freqs.to(device=timestep.device)) + time_embed = torch.cat([torch.cos(args), torch.sin(args)], dim=-1).to(dtype) + time_embed = self.out_layer(self.activation(self.in_layer(time_embed))) + return time_embed + + +class TextEmbeddings(nn.Module): + def __init__(self, text_dim, model_dim, operation_settings=None): + super().__init__() + operations = operation_settings.get("operations") + self.in_layer = operations.Linear(text_dim, model_dim, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.norm = operations.LayerNorm(model_dim, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + + def forward(self, text_embed): + text_embed = self.in_layer(text_embed) + return self.norm(text_embed).type_as(text_embed) + + +class VisualEmbeddings(nn.Module): + def __init__(self, visual_dim, model_dim, patch_size, operation_settings=None): + super().__init__() + self.patch_size = patch_size + operations = operation_settings.get("operations") + self.in_layer = operations.Linear(visual_dim, model_dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + + def forward(self, x): + x = x.movedim(1, -1) # B C T H W -> B T H W C + B, T, H, W, dim = x.shape + pt, ph, pw = self.patch_size + + x = x.view( + B, + T // pt, pt, + H // ph, ph, + W // pw, pw, + dim, + ).permute(0, 1, 3, 5, 2, 4, 6, 7).flatten(4, 7) + + return self.in_layer(x) + + +class Modulation(nn.Module): + def __init__(self, time_dim, model_dim, num_params, operation_settings=None): + super().__init__() + self.activation = nn.SiLU() + self.out_layer = operation_settings.get("operations").Linear(time_dim, num_params * model_dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + + def forward(self, x): + return self.out_layer(self.activation(x)) + + +class SelfAttention(nn.Module): + def __init__(self, num_channels, head_dim, operation_settings=None): + super().__init__() + assert num_channels % head_dim == 0 + self.num_heads = num_channels // head_dim + self.head_dim = head_dim + + operations = operation_settings.get("operations") + self.to_query = operations.Linear(num_channels, num_channels, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.to_key = operations.Linear(num_channels, num_channels, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.to_value = operations.Linear(num_channels, num_channels, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.query_norm = operations.RMSNorm(head_dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.key_norm = operations.RMSNorm(head_dim, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + + self.out_layer = operations.Linear(num_channels, num_channels, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.num_chunks = 2 + + def _compute_qk(self, x, freqs, proj_fn, norm_fn): + result = proj_fn(x).view(*x.shape[:-1], self.num_heads, -1) + return apply_rope1(norm_fn(result), freqs) + + def _forward(self, x, freqs, transformer_options={}): + q = self._compute_qk(x, freqs, self.to_query, self.query_norm) + k = self._compute_qk(x, freqs, self.to_key, self.key_norm) + v = self.to_value(x).view(*x.shape[:-1], self.num_heads, -1) + out = attention(q, k, v, self.num_heads, transformer_options=transformer_options) + return self.out_layer(out) + + def _forward_chunked(self, x, freqs, transformer_options={}): + def process_chunks(proj_fn, norm_fn): + x_chunks = torch.chunk(x, self.num_chunks, dim=1) + freqs_chunks = torch.chunk(freqs, self.num_chunks, dim=1) + chunks = [] + for x_chunk, freqs_chunk in zip(x_chunks, freqs_chunks): + chunks.append(self._compute_qk(x_chunk, freqs_chunk, proj_fn, norm_fn)) + return torch.cat(chunks, dim=1) + + q = process_chunks(self.to_query, self.query_norm) + k = process_chunks(self.to_key, self.key_norm) + v = self.to_value(x).view(*x.shape[:-1], self.num_heads, -1) + out = attention(q, k, v, self.num_heads, transformer_options=transformer_options) + return self.out_layer(out) + + def forward(self, x, freqs, transformer_options={}): + if x.shape[1] > 8192: + return self._forward_chunked(x, freqs, transformer_options=transformer_options) + else: + return self._forward(x, freqs, transformer_options=transformer_options) + + +class CrossAttention(SelfAttention): + def get_qkv(self, x, context): + q = self.to_query(x).view(*x.shape[:-1], self.num_heads, -1) + k = self.to_key(context).view(*context.shape[:-1], self.num_heads, -1) + v = self.to_value(context).view(*context.shape[:-1], self.num_heads, -1) + return q, k, v + + def forward(self, x, context, transformer_options={}): + q, k, v = self.get_qkv(x, context) + out = attention(self.query_norm(q), self.key_norm(k), v, self.num_heads, transformer_options=transformer_options) + return self.out_layer(out) + + +class FeedForward(nn.Module): + def __init__(self, dim, ff_dim, operation_settings=None): + super().__init__() + operations = operation_settings.get("operations") + self.in_layer = operations.Linear(dim, ff_dim, bias=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.activation = nn.GELU() + self.out_layer = operations.Linear(ff_dim, dim, bias=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.num_chunks = 4 + + def _forward(self, x): + return self.out_layer(self.activation(self.in_layer(x))) + + def _forward_chunked(self, x): + chunks = torch.chunk(x, self.num_chunks, dim=1) + output_chunks = [] + for chunk in chunks: + output_chunks.append(self._forward(chunk)) + return torch.cat(output_chunks, dim=1) + + def forward(self, x): + if x.shape[1] > 8192: + return self._forward_chunked(x) + else: + return self._forward(x) + + +class OutLayer(nn.Module): + def __init__(self, model_dim, time_dim, visual_dim, patch_size, operation_settings=None): + super().__init__() + self.patch_size = patch_size + self.modulation = Modulation(time_dim, model_dim, 2, operation_settings=operation_settings) + operations = operation_settings.get("operations") + self.norm = operations.LayerNorm(model_dim, elementwise_affine=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.out_layer = operations.Linear(model_dim, math.prod(patch_size) * visual_dim, bias=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + + def forward(self, visual_embed, time_embed): + B, T, H, W, _ = visual_embed.shape + shift, scale = torch.chunk(self.modulation(time_embed), 2, dim=-1) + scale = scale[:, None, None, None, :] + shift = shift[:, None, None, None, :] + visual_embed = apply_scale_shift_norm(self.norm, visual_embed, scale, shift) + x = self.out_layer(visual_embed) + + out_dim = x.shape[-1] // (self.patch_size[0] * self.patch_size[1] * self.patch_size[2]) + x = x.view( + B, T, H, W, + out_dim, + self.patch_size[0], self.patch_size[1], self.patch_size[2] + ) + return x.permute(0, 4, 1, 5, 2, 6, 3, 7).flatten(2, 3).flatten(3, 4).flatten(4, 5) + + +class TransformerEncoderBlock(nn.Module): + def __init__(self, model_dim, time_dim, ff_dim, head_dim, operation_settings=None): + super().__init__() + self.text_modulation = Modulation(time_dim, model_dim, 6, operation_settings=operation_settings) + operations = operation_settings.get("operations") + + self.self_attention_norm = operations.LayerNorm(model_dim, elementwise_affine=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.self_attention = SelfAttention(model_dim, head_dim, operation_settings=operation_settings) + + self.feed_forward_norm = operations.LayerNorm(model_dim, elementwise_affine=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.feed_forward = FeedForward(model_dim, ff_dim, operation_settings=operation_settings) + + def forward(self, x, time_embed, freqs, transformer_options={}): + self_attn_params, ff_params = torch.chunk(self.text_modulation(time_embed), 2, dim=-1) + shift, scale, gate = get_shift_scale_gate(self_attn_params) + out = apply_scale_shift_norm(self.self_attention_norm, x, scale, shift) + out = self.self_attention(out, freqs, transformer_options=transformer_options) + x = apply_gate_sum(x, out, gate) + + shift, scale, gate = get_shift_scale_gate(ff_params) + out = apply_scale_shift_norm(self.feed_forward_norm, x, scale, shift) + out = self.feed_forward(out) + x = apply_gate_sum(x, out, gate) + return x + + +class TransformerDecoderBlock(nn.Module): + def __init__(self, model_dim, time_dim, ff_dim, head_dim, operation_settings=None): + super().__init__() + self.visual_modulation = Modulation(time_dim, model_dim, 9, operation_settings=operation_settings) + + operations = operation_settings.get("operations") + self.self_attention_norm = operations.LayerNorm(model_dim, elementwise_affine=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.self_attention = SelfAttention(model_dim, head_dim, operation_settings=operation_settings) + + self.cross_attention_norm = operations.LayerNorm(model_dim, elementwise_affine=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.cross_attention = CrossAttention(model_dim, head_dim, operation_settings=operation_settings) + + self.feed_forward_norm = operations.LayerNorm(model_dim, elementwise_affine=False, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")) + self.feed_forward = FeedForward(model_dim, ff_dim, operation_settings=operation_settings) + + def forward(self, visual_embed, text_embed, time_embed, freqs, transformer_options={}): + self_attn_params, cross_attn_params, ff_params = torch.chunk(self.visual_modulation(time_embed), 3, dim=-1) + # self attention + shift, scale, gate = get_shift_scale_gate(self_attn_params) + visual_out = apply_scale_shift_norm(self.self_attention_norm, visual_embed, scale, shift) + visual_out = self.self_attention(visual_out, freqs, transformer_options=transformer_options) + visual_embed = apply_gate_sum(visual_embed, visual_out, gate) + # cross attention + shift, scale, gate = get_shift_scale_gate(cross_attn_params) + visual_out = apply_scale_shift_norm(self.cross_attention_norm, visual_embed, scale, shift) + visual_out = self.cross_attention(visual_out, text_embed, transformer_options=transformer_options) + visual_embed = apply_gate_sum(visual_embed, visual_out, gate) + # feed forward + shift, scale, gate = get_shift_scale_gate(ff_params) + visual_out = apply_scale_shift_norm(self.feed_forward_norm, visual_embed, scale, shift) + visual_out = self.feed_forward(visual_out) + visual_embed = apply_gate_sum(visual_embed, visual_out, gate) + return visual_embed + + +class Kandinsky5(nn.Module): + def __init__( + self, + in_visual_dim=16, out_visual_dim=16, in_text_dim=3584, in_text_dim2=768, time_dim=512, + model_dim=1792, ff_dim=7168, visual_embed_dim=132, patch_size=(1, 2, 2), num_text_blocks=2, num_visual_blocks=32, + axes_dims=(16, 24, 24), rope_scale_factor=(1.0, 2.0, 2.0), + dtype=None, device=None, operations=None, **kwargs + ): + super().__init__() + head_dim = sum(axes_dims) + self.rope_scale_factor = rope_scale_factor + self.in_visual_dim = in_visual_dim + self.model_dim = model_dim + self.patch_size = patch_size + self.visual_embed_dim = visual_embed_dim + self.dtype = dtype + self.device = device + operation_settings = {"operations": operations, "device": device, "dtype": dtype} + + self.time_embeddings = TimeEmbeddings(model_dim, time_dim, operation_settings=operation_settings) + self.text_embeddings = TextEmbeddings(in_text_dim, model_dim, operation_settings=operation_settings) + self.pooled_text_embeddings = TextEmbeddings(in_text_dim2, time_dim, operation_settings=operation_settings) + self.visual_embeddings = VisualEmbeddings(visual_embed_dim, model_dim, patch_size, operation_settings=operation_settings) + + self.text_transformer_blocks = nn.ModuleList( + [TransformerEncoderBlock(model_dim, time_dim, ff_dim, head_dim, operation_settings=operation_settings) for _ in range(num_text_blocks)] + ) + + self.visual_transformer_blocks = nn.ModuleList( + [TransformerDecoderBlock(model_dim, time_dim, ff_dim, head_dim, operation_settings=operation_settings) for _ in range(num_visual_blocks)] + ) + + self.out_layer = OutLayer(model_dim, time_dim, out_visual_dim, patch_size, operation_settings=operation_settings) + + self.rope_embedder_3d = EmbedND(dim=head_dim, theta=10000.0, axes_dim=axes_dims) + self.rope_embedder_1d = EmbedND(dim=head_dim, theta=10000.0, axes_dim=[head_dim]) + + def rope_encode_1d(self, seq_len, seq_start=0, steps=None, device=None, dtype=None, transformer_options={}): + steps = seq_len if steps is None else steps + seq_ids = torch.linspace(seq_start, seq_start + (seq_len - 1), steps=steps, device=device, dtype=dtype) + seq_ids = seq_ids.reshape(-1, 1).unsqueeze(0) # Shape: (1, steps, 1) + freqs = self.rope_embedder_1d(seq_ids).movedim(1, 2) + return freqs + + def rope_encode_3d(self, t, h, w, t_start=0, steps_t=None, steps_h=None, steps_w=None, device=None, dtype=None, transformer_options={}): + + patch_size = self.patch_size + t_len = ((t + (patch_size[0] // 2)) // patch_size[0]) + h_len = ((h + (patch_size[1] // 2)) // patch_size[1]) + w_len = ((w + (patch_size[2] // 2)) // patch_size[2]) + + if steps_t is None: + steps_t = t_len + if steps_h is None: + steps_h = h_len + if steps_w is None: + steps_w = w_len + + h_start = 0 + w_start = 0 + rope_options = transformer_options.get("rope_options", None) + if rope_options is not None: + t_len = (t_len - 1.0) * rope_options.get("scale_t", 1.0) + 1.0 + h_len = (h_len - 1.0) * rope_options.get("scale_y", 1.0) + 1.0 + w_len = (w_len - 1.0) * rope_options.get("scale_x", 1.0) + 1.0 + + t_start += rope_options.get("shift_t", 0.0) + h_start += rope_options.get("shift_y", 0.0) + w_start += rope_options.get("shift_x", 0.0) + else: + rope_scale_factor = self.rope_scale_factor + if self.model_dim == 4096: # pro video model uses different rope scaling at higher resolutions + if h * w >= 14080: + rope_scale_factor = (1.0, 3.16, 3.16) + + t_len = (t_len - 1.0) / rope_scale_factor[0] + 1.0 + h_len = (h_len - 1.0) / rope_scale_factor[1] + 1.0 + w_len = (w_len - 1.0) / rope_scale_factor[2] + 1.0 + + img_ids = torch.zeros((steps_t, steps_h, steps_w, 3), device=device, dtype=dtype) + img_ids[:, :, :, 0] = img_ids[:, :, :, 0] + torch.linspace(t_start, t_start + (t_len - 1), steps=steps_t, device=device, dtype=dtype).reshape(-1, 1, 1) + img_ids[:, :, :, 1] = img_ids[:, :, :, 1] + torch.linspace(h_start, h_start + (h_len - 1), steps=steps_h, device=device, dtype=dtype).reshape(1, -1, 1) + img_ids[:, :, :, 2] = img_ids[:, :, :, 2] + torch.linspace(w_start, w_start + (w_len - 1), steps=steps_w, device=device, dtype=dtype).reshape(1, 1, -1) + img_ids = img_ids.reshape(1, -1, img_ids.shape[-1]) + + freqs = self.rope_embedder_3d(img_ids).movedim(1, 2) + return freqs + + def forward_orig(self, x, timestep, context, y, freqs, freqs_text, transformer_options={}, **kwargs): + patches_replace = transformer_options.get("patches_replace", {}) + context = self.text_embeddings(context) + time_embed = self.time_embeddings(timestep, x.dtype) + self.pooled_text_embeddings(y) + + for block in self.text_transformer_blocks: + context = block(context, time_embed, freqs_text, transformer_options=transformer_options) + + visual_embed = self.visual_embeddings(x) + visual_shape = visual_embed.shape[:-1] + visual_embed = visual_embed.flatten(1, -2) + + blocks_replace = patches_replace.get("dit", {}) + transformer_options["total_blocks"] = len(self.visual_transformer_blocks) + transformer_options["block_type"] = "double" + for i, block in enumerate(self.visual_transformer_blocks): + transformer_options["block_index"] = i + if ("double_block", i) in blocks_replace: + def block_wrap(args): + return block(x=args["x"], context=args["context"], time_embed=args["time_embed"], freqs=args["freqs"], transformer_options=args.get("transformer_options")) + visual_embed = blocks_replace[("double_block", i)]({"x": visual_embed, "context": context, "time_embed": time_embed, "freqs": freqs, "transformer_options": transformer_options}, {"original_block": block_wrap})["x"] + else: + visual_embed = block(visual_embed, context, time_embed, freqs=freqs, transformer_options=transformer_options) + + visual_embed = visual_embed.reshape(*visual_shape, -1) + return self.out_layer(visual_embed, time_embed) + + def _forward(self, x, timestep, context, y, time_dim_replace=None, transformer_options={}, **kwargs): + bs, c, t_len, h, w = x.shape + x = comfy.ldm.common_dit.pad_to_patch_size(x, self.patch_size) + + if time_dim_replace is not None: + time_dim_replace = comfy.ldm.common_dit.pad_to_patch_size(time_dim_replace, self.patch_size) + x[:, :time_dim_replace.shape[1], :time_dim_replace.shape[2]] = time_dim_replace + + freqs = self.rope_encode_3d(t_len, h, w, device=x.device, dtype=x.dtype, transformer_options=transformer_options) + freqs_text = self.rope_encode_1d(context.shape[1], device=x.device, dtype=x.dtype, transformer_options=transformer_options) + + return self.forward_orig(x, timestep, context, y, freqs, freqs_text, transformer_options=transformer_options, **kwargs) + + def forward(self, x, timestep, context, y, time_dim_replace=None, transformer_options={}, **kwargs): + return comfy.patcher_extension.WrapperExecutor.new_class_executor( + self._forward, + self, + comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options) + ).execute(x, timestep, context, y, time_dim_replace=time_dim_replace, transformer_options=transformer_options, **kwargs) diff --git a/comfy/lora.py b/comfy/lora.py index 3a9077869..e7202ce97 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -322,6 +322,13 @@ def model_lora_keys_unet(model, key_map={}): key_map["diffusion_model.{}".format(key_lora)] = to key_map["lycoris_{}".format(key_lora.replace(".", "_"))] = to + if isinstance(model, comfy.model_base.Kandinsky5): + for k in sdk: + if k.startswith("diffusion_model.") and k.endswith(".weight"): + key_lora = k[len("diffusion_model."):-len(".weight")] + key_map["{}".format(key_lora)] = k + key_map["transformer.{}".format(key_lora)] = k + return key_map diff --git a/comfy/model_base.py b/comfy/model_base.py index 3cedd4f31..0be006cc2 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -47,6 +47,7 @@ import comfy.ldm.chroma_radiance.model import comfy.ldm.ace.model import comfy.ldm.omnigen.omnigen2 import comfy.ldm.qwen_image.model +import comfy.ldm.kandinsky5.model import comfy.model_management import comfy.patcher_extension @@ -1630,3 +1631,49 @@ class HunyuanVideo15_SR_Distilled(HunyuanVideo15): out = super().extra_conds(**kwargs) out['disable_time_r'] = comfy.conds.CONDConstant(False) return out + +class Kandinsky5(BaseModel): + def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.kandinsky5.model.Kandinsky5) + + def encode_adm(self, **kwargs): + return kwargs["pooled_output"] + + def concat_cond(self, **kwargs): + noise = kwargs.get("noise", None) + device = kwargs["device"] + image = torch.zeros_like(noise) + + mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None)) + if mask is None: + mask = torch.zeros_like(noise)[:, :1] + else: + mask = 1.0 - mask + mask = utils.common_upscale(mask.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") + if mask.shape[-3] < noise.shape[-3]: + mask = torch.nn.functional.pad(mask, (0, 0, 0, 0, 0, noise.shape[-3] - mask.shape[-3]), mode='constant', value=0) + mask = utils.resize_to_batch_size(mask, noise.shape[0]) + + return torch.cat((image, mask), dim=1) + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + attention_mask = kwargs.get("attention_mask", None) + if attention_mask is not None: + out['attention_mask'] = comfy.conds.CONDRegular(attention_mask) + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + + time_dim_replace = kwargs.get("time_dim_replace", None) + if time_dim_replace is not None: + out['time_dim_replace'] = comfy.conds.CONDRegular(self.process_latent_in(time_dim_replace)) + + return out + +class Kandinsky5Image(Kandinsky5): + def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + super().__init__(model_config, model_type, device=device) + + def concat_cond(self, **kwargs): + return None diff --git a/comfy/model_detection.py b/comfy/model_detection.py index fd1907627..30b33a486 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -611,6 +611,24 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["num_layers"] = count_blocks(state_dict_keys, '{}transformer_blocks.'.format(key_prefix) + '{}.') return dit_config + if '{}visual_transformer_blocks.0.cross_attention.key_norm.weight'.format(key_prefix) in state_dict_keys: # Kandinsky 5 + dit_config = {} + model_dim = state_dict['{}visual_embeddings.in_layer.bias'.format(key_prefix)].shape[0] + dit_config["model_dim"] = model_dim + if model_dim in [4096, 2560]: # pro video and lite image + dit_config["axes_dims"] = (32, 48, 48) + if model_dim == 2560: # lite image + dit_config["rope_scale_factor"] = (1.0, 1.0, 1.0) + elif model_dim == 1792: # lite video + dit_config["axes_dims"] = (16, 24, 24) + dit_config["time_dim"] = state_dict['{}time_embeddings.in_layer.bias'.format(key_prefix)].shape[0] + dit_config["image_model"] = "kandinsky5" + dit_config["ff_dim"] = state_dict['{}visual_transformer_blocks.0.feed_forward.in_layer.weight'.format(key_prefix)].shape[0] + dit_config["visual_embed_dim"] = state_dict['{}visual_embeddings.in_layer.weight'.format(key_prefix)].shape[1] + dit_config["num_text_blocks"] = count_blocks(state_dict_keys, '{}text_transformer_blocks.'.format(key_prefix) + '{}.') + dit_config["num_visual_blocks"] = count_blocks(state_dict_keys, '{}visual_transformer_blocks.'.format(key_prefix) + '{}.') + return dit_config + if '{}input_blocks.0.0.weight'.format(key_prefix) not in state_dict_keys: return None diff --git a/comfy/sd.py b/comfy/sd.py index c350322f8..754b1703d 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -54,6 +54,7 @@ import comfy.text_encoders.qwen_image import comfy.text_encoders.hunyuan_image import comfy.text_encoders.z_image import comfy.text_encoders.ovis +import comfy.text_encoders.kandinsky5 import comfy.model_patcher import comfy.lora @@ -766,6 +767,8 @@ class VAE: self.throw_exception_if_invalid() pixel_samples = None do_tile = False + if self.latent_dim == 2 and samples_in.ndim == 5: + samples_in = samples_in[:, :, 0] try: memory_used = self.memory_used_decode(samples_in.shape, self.vae_dtype) model_management.load_models_gpu([self.patcher], memory_required=memory_used, force_full_load=self.disable_offload) @@ -983,6 +986,8 @@ class CLIPType(Enum): HUNYUAN_IMAGE = 19 HUNYUAN_VIDEO_15 = 20 OVIS = 21 + KANDINSKY5 = 22 + KANDINSKY5_IMAGE = 23 def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}): @@ -1231,6 +1236,12 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip elif clip_type == CLIPType.HUNYUAN_VIDEO_15: clip_target.clip = comfy.text_encoders.hunyuan_image.te(**llama_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.hunyuan_video.HunyuanVideo15Tokenizer + elif clip_type == CLIPType.KANDINSKY5: + clip_target.clip = comfy.text_encoders.kandinsky5.te(**llama_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.kandinsky5.Kandinsky5Tokenizer + elif clip_type == CLIPType.KANDINSKY5_IMAGE: + clip_target.clip = comfy.text_encoders.kandinsky5.te(**llama_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.kandinsky5.Kandinsky5TokenizerImage else: clip_target.clip = sdxl_clip.SDXLClipModel clip_target.tokenizer = sdxl_clip.SDXLTokenizer diff --git a/comfy/supported_models.py b/comfy/supported_models.py index afd97160b..91cc4ef08 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -21,6 +21,7 @@ import comfy.text_encoders.ace import comfy.text_encoders.omnigen2 import comfy.text_encoders.qwen_image import comfy.text_encoders.hunyuan_image +import comfy.text_encoders.kandinsky5 import comfy.text_encoders.z_image from . import supported_models_base @@ -1474,7 +1475,60 @@ class HunyuanVideo15_SR_Distilled(HunyuanVideo): hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_7b.transformer.".format(pref)) return supported_models_base.ClipTarget(comfy.text_encoders.hunyuan_video.HunyuanVideo15Tokenizer, comfy.text_encoders.hunyuan_image.te(**hunyuan_detect)) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo15_SR_Distilled, HunyuanVideo15, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, ZImage, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, WAN22_Animate, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage, Flux2] +class Kandinsky5(supported_models_base.BASE): + unet_config = { + "image_model": "kandinsky5", + } + + sampling_settings = { + "shift": 10.0, + } + + unet_extra_config = {} + latent_format = latent_formats.HunyuanVideo + + memory_usage_factor = 1.1 #TODO + + supported_inference_dtypes = [torch.bfloat16, torch.float32] + + vae_key_prefix = ["vae."] + text_encoder_key_prefix = ["text_encoders."] + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.Kandinsky5(self, device=device) + return out + + def clip_target(self, state_dict={}): + pref = self.text_encoder_key_prefix[0] + hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_7b.transformer.".format(pref)) + return supported_models_base.ClipTarget(comfy.text_encoders.kandinsky5.Kandinsky5Tokenizer, comfy.text_encoders.kandinsky5.te(**hunyuan_detect)) + + +class Kandinsky5Image(Kandinsky5): + unet_config = { + "image_model": "kandinsky5", + "model_dim": 2560, + "visual_embed_dim": 64, + } + + sampling_settings = { + "shift": 3.0, + } + + latent_format = latent_formats.Flux + memory_usage_factor = 1.1 #TODO + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.Kandinsky5Image(self, device=device) + return out + + def clip_target(self, state_dict={}): + pref = self.text_encoder_key_prefix[0] + hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_7b.transformer.".format(pref)) + return supported_models_base.ClipTarget(comfy.text_encoders.kandinsky5.Kandinsky5TokenizerImage, comfy.text_encoders.kandinsky5.te(**hunyuan_detect)) + + +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo15_SR_Distilled, HunyuanVideo15, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, ZImage, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, WAN22_Animate, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage, Kandinsky5Image, Kandinsky5] models += [SVD_img2vid] diff --git a/comfy/text_encoders/kandinsky5.py b/comfy/text_encoders/kandinsky5.py new file mode 100644 index 000000000..22f991c36 --- /dev/null +++ b/comfy/text_encoders/kandinsky5.py @@ -0,0 +1,68 @@ +from comfy import sd1_clip +from .qwen_image import QwenImageTokenizer, QwenImageTEModel +from .llama import Qwen25_7BVLI + + +class Kandinsky5Tokenizer(QwenImageTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + self.llama_template = "<|im_start|>system\nYou are a prompt engineer. Describe the video in detail.\nDescribe how the camera moves or shakes, describe the zoom and view angle, whether it follows the objects.\nDescribe the location of the video, main characters or objects and their action.\nDescribe the dynamism of the video and presented actions.\nName the visual style of the video: whether it is a professional footage, user generated content, some kind of animation, video game or screen content.\nDescribe the visual effects, postprocessing and transitions if they are presented in the video.\nPay attention to the order of key actions shown in the scene.<|im_end|>\n<|im_start|>user\n{}<|im_end|>" + self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + + def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): + out = super().tokenize_with_weights(text, return_word_ids, **kwargs) + out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids, **kwargs) + + return out + + +class Kandinsky5TokenizerImage(Kandinsky5Tokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + self.llama_template = "<|im_start|>system\nYou are a promt engineer. Describe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>" + + +class Qwen25_7BVLIModel(sd1_clip.SDClipModel): + def __init__(self, device="cpu", layer="hidden", layer_idx=-1, dtype=None, attention_mask=True, model_options={}): + llama_scaled_fp8 = model_options.get("qwen_scaled_fp8", None) + if llama_scaled_fp8 is not None: + model_options = model_options.copy() + model_options["scaled_fp8"] = llama_scaled_fp8 + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=Qwen25_7BVLI, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) + + +class Kandinsky5TEModel(QwenImageTEModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + super(QwenImageTEModel, self).__init__(device=device, dtype=dtype, name="qwen25_7b", clip_model=Qwen25_7BVLIModel, model_options=model_options) + self.clip_l = sd1_clip.SDClipModel(device=device, dtype=dtype, return_projected_pooled=False, model_options=model_options) + + def encode_token_weights(self, token_weight_pairs): + cond, p, extra = super().encode_token_weights(token_weight_pairs, template_end=-1) + l_out, l_pooled = self.clip_l.encode_token_weights(token_weight_pairs["l"]) + + return cond, l_pooled, extra + + def set_clip_options(self, options): + super().set_clip_options(options) + self.clip_l.set_clip_options(options) + + def reset_clip_options(self): + super().reset_clip_options() + self.clip_l.reset_clip_options() + + def load_sd(self, sd): + if "text_model.encoder.layers.1.mlp.fc1.weight" in sd: + return self.clip_l.load_sd(sd) + else: + return super().load_sd(sd) + +def te(dtype_llama=None, llama_scaled_fp8=None): + class Kandinsky5TEModel_(Kandinsky5TEModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: + model_options = model_options.copy() + model_options["qwen_scaled_fp8"] = llama_scaled_fp8 + if dtype_llama is not None: + dtype = dtype_llama + super().__init__(device=device, dtype=dtype, model_options=model_options) + return Kandinsky5TEModel_ diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 866c3e0eb..d7cbe68cf 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -568,6 +568,8 @@ class Conditioning(ComfyTypeIO): '''Used by WAN Camera.''' time_dim_concat: NotRequired[torch.Tensor] '''Used by WAN Phantom Subject.''' + time_dim_replace: NotRequired[torch.Tensor] + '''Used by Kandinsky5 I2V.''' CondList = list[tuple[torch.Tensor, PooledDict]] Type = CondList diff --git a/comfy_extras/nodes_kandinsky5.py b/comfy_extras/nodes_kandinsky5.py new file mode 100644 index 000000000..9cb234be1 --- /dev/null +++ b/comfy_extras/nodes_kandinsky5.py @@ -0,0 +1,136 @@ +import nodes +import node_helpers +import torch +import comfy.model_management +import comfy.utils + +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io + + +class Kandinsky5ImageToVideo(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="Kandinsky5ImageToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Int.Input("width", default=768, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=512, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=121, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Image.Input("start_image", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent", tooltip="Empty video latent"), + io.Latent.Output(display_name="cond_latent", tooltip="Clean encoded start images, used to replace the noisy start of the model output latents"), + ], + ) + + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None) -> io.NodeOutput: + latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) + cond_latent_out = {} + if start_image is not None: + start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + encoded = vae.encode(start_image[:, :, :, :3]) + cond_latent_out["samples"] = encoded + + mask = torch.ones((1, 1, latent.shape[2], latent.shape[-2], latent.shape[-1]), device=start_image.device, dtype=start_image.dtype) + mask[:, :, :((start_image.shape[0] - 1) // 4) + 1] = 0.0 + + positive = node_helpers.conditioning_set_values(positive, {"time_dim_replace": encoded, "concat_mask": mask}) + negative = node_helpers.conditioning_set_values(negative, {"time_dim_replace": encoded, "concat_mask": mask}) + + out_latent = {} + out_latent["samples"] = latent + return io.NodeOutput(positive, negative, out_latent, cond_latent_out) + + +def adaptive_mean_std_normalization(source, reference, clump_mean_low=0.3, clump_mean_high=0.35, clump_std_low=0.35, clump_std_high=0.5): + source_mean = source.mean(dim=(1, 3, 4), keepdim=True) # mean over C, H, W + source_std = source.std(dim=(1, 3, 4), keepdim=True) # std over C, H, W + + reference_mean = torch.clamp(reference.mean(), source_mean - clump_mean_low, source_mean + clump_mean_high) + reference_std = torch.clamp(reference.std(), source_std - clump_std_low, source_std + clump_std_high) + + # normalization + normalized = (source - source_mean) / (source_std + 1e-8) + normalized = normalized * reference_std + reference_mean + + return normalized + + +class NormalizeVideoLatentStart(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="NormalizeVideoLatentStart", + category="conditioning/video_models", + description="Normalizes the initial frames of a video latent to match the mean and standard deviation of subsequent reference frames. Helps reduce differences between the starting frames and the rest of the video.", + inputs=[ + io.Latent.Input("latent"), + io.Int.Input("start_frame_count", default=4, min=1, max=nodes.MAX_RESOLUTION, step=1, tooltip="Number of latent frames to normalize, counted from the start"), + io.Int.Input("reference_frame_count", default=5, min=1, max=nodes.MAX_RESOLUTION, step=1, tooltip="Number of latent frames after the start frames to use as reference"), + ], + outputs=[ + io.Latent.Output(display_name="latent"), + ], + ) + + @classmethod + def execute(cls, latent, start_frame_count, reference_frame_count) -> io.NodeOutput: + if latent["samples"].shape[2] <= 1: + return io.NodeOutput(latent) + s = latent.copy() + samples = latent["samples"].clone() + + first_frames = samples[:, :, :start_frame_count] + reference_frames_data = samples[:, :, start_frame_count:start_frame_count+min(reference_frame_count, samples.shape[2]-1)] + normalized_first_frames = adaptive_mean_std_normalization(first_frames, reference_frames_data) + + samples[:, :, :start_frame_count] = normalized_first_frames + s["samples"] = samples + return io.NodeOutput(s) + + +class CLIPTextEncodeKandinsky5(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="CLIPTextEncodeKandinsky5", + category="advanced/conditioning/kandinsky5", + inputs=[ + io.Clip.Input("clip"), + io.String.Input("clip_l", multiline=True, dynamic_prompts=True), + io.String.Input("qwen25_7b", multiline=True, dynamic_prompts=True), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) + + @classmethod + def execute(cls, clip, clip_l, qwen25_7b) -> io.NodeOutput: + tokens = clip.tokenize(clip_l) + tokens["qwen25_7b"] = clip.tokenize(qwen25_7b)["qwen25_7b"] + + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens)) + + +class Kandinsky5Extension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + Kandinsky5ImageToVideo, + NormalizeVideoLatentStart, + CLIPTextEncodeKandinsky5, + ] + +async def comfy_entrypoint() -> Kandinsky5Extension: + return Kandinsky5Extension() diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py index d2df07ff9..e439b18ef 100644 --- a/comfy_extras/nodes_latent.py +++ b/comfy_extras/nodes_latent.py @@ -4,7 +4,7 @@ import torch import nodes from typing_extensions import override from comfy_api.latest import ComfyExtension, io - +import logging def reshape_latent_to(target_shape, latent, repeat_batch=True): if latent.shape[1:] != target_shape[1:]: @@ -388,6 +388,42 @@ class LatentOperationSharpen(io.ComfyNode): return luminance * sharpened return io.NodeOutput(sharpen) +class ReplaceVideoLatentFrames(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="ReplaceVideoLatentFrames", + category="latent/batch", + inputs=[ + io.Latent.Input("destination", tooltip="The destination latent where frames will be replaced."), + io.Latent.Input("source", optional=True, tooltip="The source latent providing frames to insert into the destination latent. If not provided, the destination latent is returned unchanged."), + io.Int.Input("index", default=0, min=-nodes.MAX_RESOLUTION, max=nodes.MAX_RESOLUTION, step=1, tooltip="The starting latent frame index in the destination latent where the source latent frames will be placed. Negative values count from the end."), + ], + outputs=[ + io.Latent.Output(), + ], + ) + + @classmethod + def execute(cls, destination, index, source=None) -> io.NodeOutput: + if source is None: + return io.NodeOutput(destination) + dest_frames = destination["samples"].shape[2] + source_frames = source["samples"].shape[2] + if index < 0: + index = dest_frames + index + if index > dest_frames: + logging.warning(f"ReplaceVideoLatentFrames: Index {index} is out of bounds for destination latent frames {dest_frames}.") + return io.NodeOutput(destination) + if index + source_frames > dest_frames: + logging.warning(f"ReplaceVideoLatentFrames: Source latent frames {source_frames} do not fit within destination latent frames {dest_frames} at the specified index {index}.") + return io.NodeOutput(destination) + s = source.copy() + s_source = source["samples"] + s_destination = destination["samples"].clone() + s_destination[:, :, index:index + s_source.shape[2]] = s_source + s["samples"] = s_destination + return io.NodeOutput(s) class LatentExtension(ComfyExtension): @override @@ -405,6 +441,7 @@ class LatentExtension(ComfyExtension): LatentApplyOperationCFG, LatentOperationTonemapReinhard, LatentOperationSharpen, + ReplaceVideoLatentFrames ] diff --git a/nodes.py b/nodes.py index 356aa63df..8d28a725d 100644 --- a/nodes.py +++ b/nodes.py @@ -970,7 +970,7 @@ class DualCLIPLoader: def INPUT_TYPES(s): return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ), "clip_name2": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["sdxl", "sd3", "flux", "hunyuan_video", "hidream", "hunyuan_image", "hunyuan_video_15"], ), + "type": (["sdxl", "sd3", "flux", "hunyuan_video", "hidream", "hunyuan_image", "hunyuan_video_15", "kandinsky5", "kandinsky5_image"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), @@ -2357,6 +2357,7 @@ async def init_builtin_extra_nodes(): "nodes_rope.py", "nodes_logic.py", "nodes_nop.py", + "nodes_kandinsky5.py", ] import_failed = [] From ae676ed105663bb225153c8bca406f00edf738b4 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 5 Dec 2025 20:01:19 -0800 Subject: [PATCH 0998/1073] Fix regression. (#11137) --- comfy/supported_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 91cc4ef08..383c82c3e 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1529,6 +1529,6 @@ class Kandinsky5Image(Kandinsky5): return supported_models_base.ClipTarget(comfy.text_encoders.kandinsky5.Kandinsky5TokenizerImage, comfy.text_encoders.kandinsky5.te(**hunyuan_detect)) -models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo15_SR_Distilled, HunyuanVideo15, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, ZImage, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, WAN22_Animate, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage, Kandinsky5Image, Kandinsky5] +models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo15_SR_Distilled, HunyuanVideo15, HunyuanImage21Refiner, HunyuanImage21, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, ZImage, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, WAN22_Camera, WAN22_S2V, WAN21_HuMo, WAN22_Animate, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ChromaRadiance, ACEStep, Omnigen2, QwenImage, Flux2, Kandinsky5Image, Kandinsky5] models += [SVD_img2vid] From 117bf3f2bd9235cb5942a1de10a534c9869c7444 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 6 Dec 2025 06:22:02 +0200 Subject: [PATCH 0999/1073] convert nodes_freelunch.py to the V3 schema (#10904) --- comfy_extras/nodes_freelunch.py | 89 +++++++++++++++++---------- comfy_extras/nodes_model_downscale.py | 5 -- 2 files changed, 57 insertions(+), 37 deletions(-) diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py index e3ac58447..3429b731e 100644 --- a/comfy_extras/nodes_freelunch.py +++ b/comfy_extras/nodes_freelunch.py @@ -2,6 +2,8 @@ import torch import logging +from typing_extensions import override +from comfy_api.latest import ComfyExtension, IO def Fourier_filter(x, threshold, scale): # FFT @@ -22,21 +24,26 @@ def Fourier_filter(x, threshold, scale): return x_filtered.to(x.dtype) -class FreeU: +class FreeU(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "b1": ("FLOAT", {"default": 1.1, "min": 0.0, "max": 10.0, "step": 0.01}), - "b2": ("FLOAT", {"default": 1.2, "min": 0.0, "max": 10.0, "step": 0.01}), - "s1": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 10.0, "step": 0.01}), - "s2": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 10.0, "step": 0.01}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls): + return IO.Schema( + node_id="FreeU", + category="model_patches/unet", + inputs=[ + IO.Model.Input("model"), + IO.Float.Input("b1", default=1.1, min=0.0, max=10.0, step=0.01), + IO.Float.Input("b2", default=1.2, min=0.0, max=10.0, step=0.01), + IO.Float.Input("s1", default=0.9, min=0.0, max=10.0, step=0.01), + IO.Float.Input("s2", default=0.2, min=0.0, max=10.0, step=0.01), + ], + outputs=[ + IO.Model.Output(), + ], + ) - CATEGORY = "model_patches/unet" - - def patch(self, model, b1, b2, s1, s2): + @classmethod + def execute(cls, model, b1, b2, s1, s2) -> IO.NodeOutput: model_channels = model.model.model_config.unet_config["model_channels"] scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)} on_cpu_devices = {} @@ -59,23 +66,31 @@ class FreeU: m = model.clone() m.set_model_output_block_patch(output_block_patch) - return (m, ) + return IO.NodeOutput(m) -class FreeU_V2: + patch = execute # TODO: remove + + +class FreeU_V2(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "b1": ("FLOAT", {"default": 1.3, "min": 0.0, "max": 10.0, "step": 0.01}), - "b2": ("FLOAT", {"default": 1.4, "min": 0.0, "max": 10.0, "step": 0.01}), - "s1": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 10.0, "step": 0.01}), - "s2": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 10.0, "step": 0.01}), - }} - RETURN_TYPES = ("MODEL",) - FUNCTION = "patch" + def define_schema(cls): + return IO.Schema( + node_id="FreeU_V2", + category="model_patches/unet", + inputs=[ + IO.Model.Input("model"), + IO.Float.Input("b1", default=1.3, min=0.0, max=10.0, step=0.01), + IO.Float.Input("b2", default=1.4, min=0.0, max=10.0, step=0.01), + IO.Float.Input("s1", default=0.9, min=0.0, max=10.0, step=0.01), + IO.Float.Input("s2", default=0.2, min=0.0, max=10.0, step=0.01), + ], + outputs=[ + IO.Model.Output(), + ], + ) - CATEGORY = "model_patches/unet" - - def patch(self, model, b1, b2, s1, s2): + @classmethod + def execute(cls, model, b1, b2, s1, s2) -> IO.NodeOutput: model_channels = model.model.model_config.unet_config["model_channels"] scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)} on_cpu_devices = {} @@ -105,9 +120,19 @@ class FreeU_V2: m = model.clone() m.set_model_output_block_patch(output_block_patch) - return (m, ) + return IO.NodeOutput(m) -NODE_CLASS_MAPPINGS = { - "FreeU": FreeU, - "FreeU_V2": FreeU_V2, -} + patch = execute # TODO: remove + + +class FreelunchExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + FreeU, + FreeU_V2, + ] + + +async def comfy_entrypoint() -> FreelunchExtension: + return FreelunchExtension() diff --git a/comfy_extras/nodes_model_downscale.py b/comfy_extras/nodes_model_downscale.py index f7ca9699d..dec2ae841 100644 --- a/comfy_extras/nodes_model_downscale.py +++ b/comfy_extras/nodes_model_downscale.py @@ -53,11 +53,6 @@ class PatchModelAddDownscale(io.ComfyNode): return io.NodeOutput(m) -NODE_DISPLAY_NAME_MAPPINGS = { - # Sampling - "PatchModelAddDownscale": "", -} - class ModelDownscaleExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: From 913f86b72740f84f759786a698108840a09b6498 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 6 Dec 2025 06:24:10 +0200 Subject: [PATCH 1000/1073] [V3] convert nodes_mask.py to V3 schema (#10669) * convert nodes_mask.py to V3 schema * set "Preview Mask" as display name for MaskPreview --- comfy_extras/nodes_mask.py | 508 +++++++++++++++++++------------------ 1 file changed, 263 insertions(+), 245 deletions(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index a5e405008..290e6f55e 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -3,11 +3,10 @@ import scipy.ndimage import torch import comfy.utils import node_helpers -import folder_paths -import random +from typing_extensions import override +from comfy_api.latest import ComfyExtension, IO, UI import nodes -from nodes import MAX_RESOLUTION def composite(destination, source, x, y, mask = None, multiplier = 8, resize_source = False): source = source.to(destination.device) @@ -46,202 +45,213 @@ def composite(destination, source, x, y, mask = None, multiplier = 8, resize_sou destination[..., top:bottom, left:right] = source_portion + destination_portion return destination -class LatentCompositeMasked: +class LatentCompositeMasked(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "destination": ("LATENT",), - "source": ("LATENT",), - "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "resize_source": ("BOOLEAN", {"default": False}), - }, - "optional": { - "mask": ("MASK",), - } - } - RETURN_TYPES = ("LATENT",) - FUNCTION = "composite" + def define_schema(cls): + return IO.Schema( + node_id="LatentCompositeMasked", + category="latent", + inputs=[ + IO.Latent.Input("destination"), + IO.Latent.Input("source"), + IO.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=8), + IO.Int.Input("y", default=0, min=0, max=nodes.MAX_RESOLUTION, step=8), + IO.Boolean.Input("resize_source", default=False), + IO.Mask.Input("mask", optional=True), + ], + outputs=[IO.Latent.Output()], + ) - CATEGORY = "latent" - - def composite(self, destination, source, x, y, resize_source, mask = None): + @classmethod + def execute(cls, destination, source, x, y, resize_source, mask = None) -> IO.NodeOutput: output = destination.copy() destination = destination["samples"].clone() source = source["samples"] output["samples"] = composite(destination, source, x, y, mask, 8, resize_source) - return (output,) + return IO.NodeOutput(output) -class ImageCompositeMasked: + composite = execute # TODO: remove + + +class ImageCompositeMasked(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "destination": ("IMAGE",), - "source": ("IMAGE",), - "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - "resize_source": ("BOOLEAN", {"default": False}), - }, - "optional": { - "mask": ("MASK",), - } - } - RETURN_TYPES = ("IMAGE",) - FUNCTION = "composite" + def define_schema(cls): + return IO.Schema( + node_id="ImageCompositeMasked", + category="image", + inputs=[ + IO.Image.Input("destination"), + IO.Image.Input("source"), + IO.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), + IO.Int.Input("y", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), + IO.Boolean.Input("resize_source", default=False), + IO.Mask.Input("mask", optional=True), + ], + outputs=[IO.Image.Output()], + ) - CATEGORY = "image" - - def composite(self, destination, source, x, y, resize_source, mask = None): + @classmethod + def execute(cls, destination, source, x, y, resize_source, mask = None) -> IO.NodeOutput: destination, source = node_helpers.image_alpha_fix(destination, source) destination = destination.clone().movedim(-1, 1) output = composite(destination, source.movedim(-1, 1), x, y, mask, 1, resize_source).movedim(1, -1) - return (output,) + return IO.NodeOutput(output) -class MaskToImage: + composite = execute # TODO: remove + + +class MaskToImage(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "mask": ("MASK",), - } - } + def define_schema(cls): + return IO.Schema( + node_id="MaskToImage", + display_name="Convert Mask to Image", + category="mask", + inputs=[ + IO.Mask.Input("mask"), + ], + outputs=[IO.Image.Output()], + ) - CATEGORY = "mask" - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "mask_to_image" - - def mask_to_image(self, mask): + @classmethod + def execute(cls, mask) -> IO.NodeOutput: result = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) - return (result,) + return IO.NodeOutput(result) -class ImageToMask: + mask_to_image = execute # TODO: remove + + +class ImageToMask(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "channel": (["red", "green", "blue", "alpha"],), - } - } + def define_schema(cls): + return IO.Schema( + node_id="ImageToMask", + display_name="Convert Image to Mask", + category="mask", + inputs=[ + IO.Image.Input("image"), + IO.Combo.Input("channel", options=["red", "green", "blue", "alpha"]), + ], + outputs=[IO.Mask.Output()], + ) - CATEGORY = "mask" - - RETURN_TYPES = ("MASK",) - FUNCTION = "image_to_mask" - - def image_to_mask(self, image, channel): + @classmethod + def execute(cls, image, channel) -> IO.NodeOutput: channels = ["red", "green", "blue", "alpha"] mask = image[:, :, :, channels.index(channel)] - return (mask,) + return IO.NodeOutput(mask) -class ImageColorToMask: + image_to_mask = execute # TODO: remove + + +class ImageColorToMask(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}), - } - } + def define_schema(cls): + return IO.Schema( + node_id="ImageColorToMask", + category="mask", + inputs=[ + IO.Image.Input("image"), + IO.Int.Input("color", default=0, min=0, max=0xFFFFFF, step=1, display_mode=IO.NumberDisplay.number), + ], + outputs=[IO.Mask.Output()], + ) - CATEGORY = "mask" - - RETURN_TYPES = ("MASK",) - FUNCTION = "image_to_mask" - - def image_to_mask(self, image, color): + @classmethod + def execute(cls, image, color) -> IO.NodeOutput: temp = (torch.clamp(image, 0, 1.0) * 255.0).round().to(torch.int) temp = torch.bitwise_left_shift(temp[:,:,:,0], 16) + torch.bitwise_left_shift(temp[:,:,:,1], 8) + temp[:,:,:,2] mask = torch.where(temp == color, 1.0, 0).float() - return (mask,) + return IO.NodeOutput(mask) -class SolidMask: + image_to_mask = execute # TODO: remove + + +class SolidMask(IO.ComfyNode): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), - "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), - } - } + def define_schema(cls): + return IO.Schema( + node_id="SolidMask", + category="mask", + inputs=[ + IO.Float.Input("value", default=1.0, min=0.0, max=1.0, step=0.01), + IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1), + IO.Int.Input("height", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1), + ], + outputs=[IO.Mask.Output()], + ) - CATEGORY = "mask" - - RETURN_TYPES = ("MASK",) - - FUNCTION = "solid" - - def solid(self, value, width, height): + @classmethod + def execute(cls, value, width, height) -> IO.NodeOutput: out = torch.full((1, height, width), value, dtype=torch.float32, device="cpu") - return (out,) + return IO.NodeOutput(out) -class InvertMask: + solid = execute # TODO: remove + + +class InvertMask(IO.ComfyNode): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "mask": ("MASK",), - } - } + def define_schema(cls): + return IO.Schema( + node_id="InvertMask", + category="mask", + inputs=[ + IO.Mask.Input("mask"), + ], + outputs=[IO.Mask.Output()], + ) - CATEGORY = "mask" - - RETURN_TYPES = ("MASK",) - - FUNCTION = "invert" - - def invert(self, mask): + @classmethod + def execute(cls, mask) -> IO.NodeOutput: out = 1.0 - mask - return (out,) + return IO.NodeOutput(out) -class CropMask: + invert = execute # TODO: remove + + +class CropMask(IO.ComfyNode): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "mask": ("MASK",), - "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), - "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), - } - } + def define_schema(cls): + return IO.Schema( + node_id="CropMask", + category="mask", + inputs=[ + IO.Mask.Input("mask"), + IO.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), + IO.Int.Input("y", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), + IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1), + IO.Int.Input("height", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1), + ], + outputs=[IO.Mask.Output()], + ) - CATEGORY = "mask" - - RETURN_TYPES = ("MASK",) - - FUNCTION = "crop" - - def crop(self, mask, x, y, width, height): + @classmethod + def execute(cls, mask, x, y, width, height) -> IO.NodeOutput: mask = mask.reshape((-1, mask.shape[-2], mask.shape[-1])) out = mask[:, y:y + height, x:x + width] - return (out,) + return IO.NodeOutput(out) -class MaskComposite: + crop = execute # TODO: remove + + +class MaskComposite(IO.ComfyNode): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "destination": ("MASK",), - "source": ("MASK",), - "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - "operation": (["multiply", "add", "subtract", "and", "or", "xor"],), - } - } + def define_schema(cls): + return IO.Schema( + node_id="MaskComposite", + category="mask", + inputs=[ + IO.Mask.Input("destination"), + IO.Mask.Input("source"), + IO.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), + IO.Int.Input("y", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), + IO.Combo.Input("operation", options=["multiply", "add", "subtract", "and", "or", "xor"]), + ], + outputs=[IO.Mask.Output()], + ) - CATEGORY = "mask" - - RETURN_TYPES = ("MASK",) - - FUNCTION = "combine" - - def combine(self, destination, source, x, y, operation): + @classmethod + def execute(cls, destination, source, x, y, operation) -> IO.NodeOutput: output = destination.reshape((-1, destination.shape[-2], destination.shape[-1])).clone() source = source.reshape((-1, source.shape[-2], source.shape[-1])) @@ -267,28 +277,29 @@ class MaskComposite: output = torch.clamp(output, 0.0, 1.0) - return (output,) + return IO.NodeOutput(output) -class FeatherMask: + combine = execute # TODO: remove + + +class FeatherMask(IO.ComfyNode): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "mask": ("MASK",), - "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - } - } + def define_schema(cls): + return IO.Schema( + node_id="FeatherMask", + category="mask", + inputs=[ + IO.Mask.Input("mask"), + IO.Int.Input("left", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), + IO.Int.Input("top", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), + IO.Int.Input("right", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), + IO.Int.Input("bottom", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), + ], + outputs=[IO.Mask.Output()], + ) - CATEGORY = "mask" - - RETURN_TYPES = ("MASK",) - - FUNCTION = "feather" - - def feather(self, mask, left, top, right, bottom): + @classmethod + def execute(cls, mask, left, top, right, bottom) -> IO.NodeOutput: output = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).clone() left = min(left, output.shape[-1]) @@ -312,26 +323,28 @@ class FeatherMask: feather_rate = (y + 1) / bottom output[:, -y, :] *= feather_rate - return (output,) + return IO.NodeOutput(output) -class GrowMask: + feather = execute # TODO: remove + + +class GrowMask(IO.ComfyNode): @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "mask": ("MASK",), - "expand": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1}), - "tapered_corners": ("BOOLEAN", {"default": True}), - }, - } + def define_schema(cls): + return IO.Schema( + node_id="GrowMask", + display_name="Grow Mask", + category="mask", + inputs=[ + IO.Mask.Input("mask"), + IO.Int.Input("expand", default=0, min=-nodes.MAX_RESOLUTION, max=nodes.MAX_RESOLUTION, step=1), + IO.Boolean.Input("tapered_corners", default=True), + ], + outputs=[IO.Mask.Output()], + ) - CATEGORY = "mask" - - RETURN_TYPES = ("MASK",) - - FUNCTION = "expand_mask" - - def expand_mask(self, mask, expand, tapered_corners): + @classmethod + def execute(cls, mask, expand, tapered_corners) -> IO.NodeOutput: c = 0 if tapered_corners else 1 kernel = np.array([[c, 1, c], [1, 1, 1], @@ -347,69 +360,74 @@ class GrowMask: output = scipy.ndimage.grey_dilation(output, footprint=kernel) output = torch.from_numpy(output) out.append(output) - return (torch.stack(out, dim=0),) + return IO.NodeOutput(torch.stack(out, dim=0)) -class ThresholdMask: + expand_mask = execute # TODO: remove + + +class ThresholdMask(IO.ComfyNode): @classmethod - def INPUT_TYPES(s): - return { - "required": { - "mask": ("MASK",), - "value": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), - } - } + def define_schema(cls): + return IO.Schema( + node_id="ThresholdMask", + category="mask", + inputs=[ + IO.Mask.Input("mask"), + IO.Float.Input("value", default=0.5, min=0.0, max=1.0, step=0.01), + ], + outputs=[IO.Mask.Output()], + ) - CATEGORY = "mask" - - RETURN_TYPES = ("MASK",) - FUNCTION = "image_to_mask" - - def image_to_mask(self, mask, value): + @classmethod + def execute(cls, mask, value) -> IO.NodeOutput: mask = (mask > value).float() - return (mask,) + return IO.NodeOutput(mask) + + image_to_mask = execute # TODO: remove + # Mask Preview - original implement from # https://github.com/cubiq/ComfyUI_essentials/blob/9d9f4bedfc9f0321c19faf71855e228c93bd0dc9/mask.py#L81 # upstream requested in https://github.com/Kosinkadink/rfcs/blob/main/rfcs/0000-corenodes.md#preview-nodes -class MaskPreview(nodes.SaveImage): - def __init__(self): - self.output_dir = folder_paths.get_temp_directory() - self.type = "temp" - self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) - self.compress_level = 4 +class MaskPreview(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="MaskPreview", + display_name="Preview Mask", + category="mask", + description="Saves the input images to your ComfyUI output directory.", + inputs=[ + IO.Mask.Input("mask"), + ], + hidden=[IO.Hidden.prompt, IO.Hidden.extra_pnginfo], + is_output_node=True, + ) @classmethod - def INPUT_TYPES(s): - return { - "required": {"mask": ("MASK",), }, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, - } - - FUNCTION = "execute" - CATEGORY = "mask" - - def execute(self, mask, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): - preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) - return self.save_images(preview, filename_prefix, prompt, extra_pnginfo) + def execute(cls, mask, filename_prefix="ComfyUI") -> IO.NodeOutput: + return IO.NodeOutput(ui=UI.PreviewMask(mask)) -NODE_CLASS_MAPPINGS = { - "LatentCompositeMasked": LatentCompositeMasked, - "ImageCompositeMasked": ImageCompositeMasked, - "MaskToImage": MaskToImage, - "ImageToMask": ImageToMask, - "ImageColorToMask": ImageColorToMask, - "SolidMask": SolidMask, - "InvertMask": InvertMask, - "CropMask": CropMask, - "MaskComposite": MaskComposite, - "FeatherMask": FeatherMask, - "GrowMask": GrowMask, - "ThresholdMask": ThresholdMask, - "MaskPreview": MaskPreview -} +class MaskExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + LatentCompositeMasked, + ImageCompositeMasked, + MaskToImage, + ImageToMask, + ImageColorToMask, + SolidMask, + InvertMask, + CropMask, + MaskComposite, + FeatherMask, + GrowMask, + ThresholdMask, + MaskPreview, + ] -NODE_DISPLAY_NAME_MAPPINGS = { - "ImageToMask": "Convert Image to Mask", - "MaskToImage": "Convert Mask to Image", -} + +async def comfy_entrypoint() -> MaskExtension: + return MaskExtension() From d7a0aef65033bf0fe56e521577a44fac1830b8b3 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 5 Dec 2025 21:15:21 -0800 Subject: [PATCH 1001/1073] Set OCL_SET_SVM_SIZE on AMD. (#11139) --- cuda_malloc.py | 27 +++++++++++++++++---------- main.py | 3 +++ 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/cuda_malloc.py b/cuda_malloc.py index 6520d5123..ee2bc4b69 100644 --- a/cuda_malloc.py +++ b/cuda_malloc.py @@ -63,18 +63,22 @@ def cuda_malloc_supported(): return True +version = "" + +try: + torch_spec = importlib.util.find_spec("torch") + for folder in torch_spec.submodule_search_locations: + ver_file = os.path.join(folder, "version.py") + if os.path.isfile(ver_file): + spec = importlib.util.spec_from_file_location("torch_version_import", ver_file) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + version = module.__version__ +except: + pass + if not args.cuda_malloc: try: - version = "" - torch_spec = importlib.util.find_spec("torch") - for folder in torch_spec.submodule_search_locations: - ver_file = os.path.join(folder, "version.py") - if os.path.isfile(ver_file): - spec = importlib.util.spec_from_file_location("torch_version_import", ver_file) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - version = module.__version__ - if int(version[0]) >= 2 and "+cu" in version: # enable by default for torch version 2.0 and up only on cuda torch if PerformanceFeature.AutoTune not in args.fast: # Autotune has issues with cuda malloc args.cuda_malloc = cuda_malloc_supported() @@ -90,3 +94,6 @@ if args.cuda_malloc and not args.disable_cuda_malloc: env_var += ",backend:cudaMallocAsync" os.environ['PYTORCH_CUDA_ALLOC_CONF'] = env_var + +def get_torch_version_noimport(): + return str(version) diff --git a/main.py b/main.py index 0cd815d9e..0d02a087b 100644 --- a/main.py +++ b/main.py @@ -167,6 +167,9 @@ if __name__ == "__main__": os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8" import cuda_malloc + if "rocm" in cuda_malloc.get_torch_version_noimport(): + os.environ['OCL_SET_SVM_SIZE'] = '262144' # set at the request of AMD + if 'torch' in sys.modules: logging.warning("WARNING: Potential Error in code: Torch already imported, torch should never be imported before this point.") From 76f18e955dcbc88ed13d6802194fd897927f93e5 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Sat, 6 Dec 2025 13:28:08 +0200 Subject: [PATCH 1002/1073] marked all Pika API nodes a deprecated (#11146) --- comfy_api_nodes/nodes_pika.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py index 51148211b..acd88c391 100644 --- a/comfy_api_nodes/nodes_pika.py +++ b/comfy_api_nodes/nodes_pika.py @@ -92,6 +92,7 @@ class PikaImageToVideo(IO.ComfyNode): IO.Hidden.unique_id, ], is_api_node=True, + is_deprecated=True, ) @classmethod @@ -152,6 +153,7 @@ class PikaTextToVideoNode(IO.ComfyNode): IO.Hidden.unique_id, ], is_api_node=True, + is_deprecated=True, ) @classmethod @@ -239,6 +241,7 @@ class PikaScenes(IO.ComfyNode): IO.Hidden.unique_id, ], is_api_node=True, + is_deprecated=True, ) @classmethod @@ -323,6 +326,7 @@ class PikAdditionsNode(IO.ComfyNode): IO.Hidden.unique_id, ], is_api_node=True, + is_deprecated=True, ) @classmethod @@ -399,6 +403,7 @@ class PikaSwapsNode(IO.ComfyNode): IO.Hidden.unique_id, ], is_api_node=True, + is_deprecated=True, ) @classmethod @@ -466,6 +471,7 @@ class PikaffectsNode(IO.ComfyNode): IO.Hidden.unique_id, ], is_api_node=True, + is_deprecated=True, ) @classmethod @@ -515,6 +521,7 @@ class PikaStartEndFrameNode(IO.ComfyNode): IO.Hidden.unique_id, ], is_api_node=True, + is_deprecated=True, ) @classmethod From 7ac7d69d948e75c3a230d1262daab84d75aff895 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Sat, 6 Dec 2025 20:09:44 +0200 Subject: [PATCH 1003/1073] Fix EmptyAudio node input types (#11149) --- comfy_extras/nodes_audio.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index 812301fb7..c7916443c 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -573,12 +573,14 @@ class EmptyAudio(IO.ComfyNode): step=0.01, tooltip="Duration of the empty audio clip in seconds", ), - IO.Float.Input( + IO.Int.Input( "sample_rate", default=44100, tooltip="Sample rate of the empty audio clip.", + min=1, + max=192000, ), - IO.Float.Input( + IO.Int.Input( "channels", default=2, min=1, From 50ca97e7765d9bbdbeec31a75f1f6c747d76948c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 6 Dec 2025 15:36:20 -0800 Subject: [PATCH 1004/1073] Speed up lora compute and lower memory usage by doing it in fp16. (#11161) --- comfy/model_management.py | 14 ++++++++++++++ comfy/model_patcher.py | 5 +++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index aeddbaefe..40717b1e4 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1492,6 +1492,20 @@ def extended_fp16_support(): return True +LORA_COMPUTE_DTYPES = {} +def lora_compute_dtype(device): + dtype = LORA_COMPUTE_DTYPES.get(device, None) + if dtype is not None: + return dtype + + if should_use_fp16(device): + dtype = torch.float16 + else: + dtype = torch.float32 + + LORA_COMPUTE_DTYPES[device] = dtype + return dtype + def soft_empty_cache(force=False): global cpu_state if cpu_state == CPUState.MPS: diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 215784874..4f076a6aa 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -614,10 +614,11 @@ class ModelPatcher: if key not in self.backup: self.backup[key] = collections.namedtuple('Dimension', ['weight', 'inplace_update'])(weight.to(device=self.offload_device, copy=inplace_update), inplace_update) + temp_dtype = comfy.model_management.lora_compute_dtype(device_to) if device_to is not None: - temp_weight = comfy.model_management.cast_to_device(weight, device_to, torch.float32, copy=True) + temp_weight = comfy.model_management.cast_to_device(weight, device_to, temp_dtype, copy=True) else: - temp_weight = weight.to(torch.float32, copy=True) + temp_weight = weight.to(temp_dtype, copy=True) if convert_func is not None: temp_weight = convert_func(temp_weight, inplace=True) From 4086acf3c2f0ca3a8861b04f6179fa9f908e3e25 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Sun, 7 Dec 2025 09:42:09 +1000 Subject: [PATCH 1005/1073] Fix on-load VRAM OOM (#11144) slow down the CPU on model load to not run ahead. This fixes a VRAM on flux 2 load. I went to try and debug this with the memory trace pickles, which needs --disable-cuda-malloc which made the bug go away. So I tried this synchronize and it worked. The has some very complex interactions with the cuda malloc async and I dont have solid theory on this one yet. Still debugging but this gets us over the OOM for the moment. --- comfy/model_patcher.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 4f076a6aa..5b1ccb824 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -762,6 +762,8 @@ class ModelPatcher: key = "{}.{}".format(n, param) self.unpin_weight(key) self.patch_weight_to_device(key, device_to=device_to) + if comfy.model_management.is_device_cuda(device_to): + torch.cuda.synchronize() logging.debug("lowvram: loaded module regularly {} {}".format(n, m)) m.comfy_patched_weights = True From 329480da5ab32949a411548f821ea60ab3e90dc7 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 6 Dec 2025 17:50:10 -0800 Subject: [PATCH 1006/1073] Fix qwen scaled fp8 not working with kandinsky. Make basic t2i wf work. (#11162) --- comfy/ldm/kandinsky5/model.py | 8 +++++++- comfy/text_encoders/kandinsky5.py | 12 ++++++------ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/comfy/ldm/kandinsky5/model.py b/comfy/ldm/kandinsky5/model.py index a653e02fc..1509de2f8 100644 --- a/comfy/ldm/kandinsky5/model.py +++ b/comfy/ldm/kandinsky5/model.py @@ -387,6 +387,9 @@ class Kandinsky5(nn.Module): return self.out_layer(visual_embed, time_embed) def _forward(self, x, timestep, context, y, time_dim_replace=None, transformer_options={}, **kwargs): + original_dims = x.ndim + if original_dims == 4: + x = x.unsqueeze(2) bs, c, t_len, h, w = x.shape x = comfy.ldm.common_dit.pad_to_patch_size(x, self.patch_size) @@ -397,7 +400,10 @@ class Kandinsky5(nn.Module): freqs = self.rope_encode_3d(t_len, h, w, device=x.device, dtype=x.dtype, transformer_options=transformer_options) freqs_text = self.rope_encode_1d(context.shape[1], device=x.device, dtype=x.dtype, transformer_options=transformer_options) - return self.forward_orig(x, timestep, context, y, freqs, freqs_text, transformer_options=transformer_options, **kwargs) + out = self.forward_orig(x, timestep, context, y, freqs, freqs_text, transformer_options=transformer_options, **kwargs) + if original_dims == 4: + out = out.squeeze(2) + return out def forward(self, x, timestep, context, y, time_dim_replace=None, transformer_options={}, **kwargs): return comfy.patcher_extension.WrapperExecutor.new_class_executor( diff --git a/comfy/text_encoders/kandinsky5.py b/comfy/text_encoders/kandinsky5.py index 22f991c36..be086458c 100644 --- a/comfy/text_encoders/kandinsky5.py +++ b/comfy/text_encoders/kandinsky5.py @@ -24,10 +24,10 @@ class Kandinsky5TokenizerImage(Kandinsky5Tokenizer): class Qwen25_7BVLIModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="hidden", layer_idx=-1, dtype=None, attention_mask=True, model_options={}): - llama_scaled_fp8 = model_options.get("qwen_scaled_fp8", None) - if llama_scaled_fp8 is not None: + llama_quantization_metadata = model_options.get("llama_quantization_metadata", None) + if llama_quantization_metadata is not None: model_options = model_options.copy() - model_options["scaled_fp8"] = llama_scaled_fp8 + model_options["quantization_metadata"] = llama_quantization_metadata super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=Qwen25_7BVLI, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) @@ -56,12 +56,12 @@ class Kandinsky5TEModel(QwenImageTEModel): else: return super().load_sd(sd) -def te(dtype_llama=None, llama_scaled_fp8=None): +def te(dtype_llama=None, llama_quantization_metadata=None): class Kandinsky5TEModel_(Kandinsky5TEModel): def __init__(self, device="cpu", dtype=None, model_options={}): - if llama_scaled_fp8 is not None and "scaled_fp8" not in model_options: + if llama_quantization_metadata is not None: model_options = model_options.copy() - model_options["qwen_scaled_fp8"] = llama_scaled_fp8 + model_options["llama_quantization_metadata"] = llama_quantization_metadata if dtype_llama is not None: dtype = dtype_llama super().__init__(device=device, dtype=dtype, model_options=model_options) From 56fa7dbe380cb5591c5542f8aa51ce2fc26beedf Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 7 Dec 2025 04:44:55 -0800 Subject: [PATCH 1007/1073] Properly load the newbie diffusion model. (#11172) There is still one of the text encoders missing and I didn't actually test it. --- comfy/ldm/lumina/model.py | 35 +++++++++++++++++++++++++++++++++++ comfy/model_base.py | 4 ++++ comfy/model_detection.py | 3 +++ 3 files changed, 42 insertions(+) diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index 6c24fed9b..c47df49ca 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -377,6 +377,7 @@ class NextDiT(nn.Module): z_image_modulation=False, time_scale=1.0, pad_tokens_multiple=None, + clip_text_dim=None, image_model=None, device=None, dtype=None, @@ -447,6 +448,31 @@ class NextDiT(nn.Module): ), ) + self.clip_text_pooled_proj = None + + if clip_text_dim is not None: + self.clip_text_dim = clip_text_dim + self.clip_text_pooled_proj = nn.Sequential( + operation_settings.get("operations").RMSNorm(clip_text_dim, eps=norm_eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype")), + operation_settings.get("operations").Linear( + clip_text_dim, + clip_text_dim, + bias=True, + device=operation_settings.get("device"), + dtype=operation_settings.get("dtype"), + ), + ) + self.time_text_embed = nn.Sequential( + nn.SiLU(), + operation_settings.get("operations").Linear( + min(dim, 1024) + clip_text_dim, + min(dim, 1024), + bias=True, + device=operation_settings.get("device"), + dtype=operation_settings.get("dtype"), + ), + ) + self.layers = nn.ModuleList( [ JointTransformerBlock( @@ -585,6 +611,15 @@ class NextDiT(nn.Module): cap_feats = self.cap_embedder(cap_feats) # (N, L, D) # todo check if able to batchify w.o. redundant compute + if self.clip_text_pooled_proj is not None: + pooled = kwargs.get("clip_text_pooled", None) + if pooled is not None: + pooled = self.clip_text_pooled_proj(pooled) + else: + pooled = torch.zeros((1, self.clip_text_dim), device=x.device, dtype=x.dtype) + + adaln_input = self.time_text_embed(torch.cat((t, pooled), dim=-1)) + patches = transformer_options.get("patches", {}) x_is_tensor = isinstance(x, torch.Tensor) img, mask, img_size, cap_size, freqs_cis = self.patchify_and_embed(x, cap_feats, cap_mask, t, num_tokens, transformer_options=transformer_options) diff --git a/comfy/model_base.py b/comfy/model_base.py index 0be006cc2..6b8a8454d 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1110,6 +1110,10 @@ class Lumina2(BaseModel): if 'num_tokens' not in out: out['num_tokens'] = comfy.conds.CONDConstant(cross_attn.shape[1]) + clip_text_pooled = kwargs["pooled_output"] # Newbie + if clip_text_pooled is not None: + out['clip_text_pooled'] = comfy.conds.CONDRegular(clip_text_pooled) + return out class WAN21(BaseModel): diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 30b33a486..74c547427 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -423,6 +423,9 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["axes_lens"] = [300, 512, 512] dit_config["rope_theta"] = 10000.0 dit_config["ffn_dim_multiplier"] = 4.0 + ctd_weight = state_dict.get('{}clip_text_pooled_proj.0.weight'.format(key_prefix), None) + if ctd_weight is not None: + dit_config["clip_text_dim"] = ctd_weight.shape[0] elif dit_config["dim"] == 3840: # Z image dit_config["n_heads"] = 30 dit_config["n_kv_heads"] = 30 From ec7f65187d85e22ea23345ce0d919e11768f255e Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 8 Dec 2025 11:21:41 +0200 Subject: [PATCH 1008/1073] chore(comfy_api): replace absolute imports with relative (#11145) --- comfy_api/latest/__init__.py | 8 ++++---- comfy_api/latest/_input/video_types.py | 2 +- comfy_api/latest/_input_impl/video_types.py | 4 ++-- comfy_api/latest/_io.py | 2 +- comfy_api/latest/_ui.py | 2 +- comfy_api/latest/_util/video_types.py | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/comfy_api/latest/__init__.py b/comfy_api/latest/__init__.py index 0fa01d1e7..35e1ac853 100644 --- a/comfy_api/latest/__init__.py +++ b/comfy_api/latest/__init__.py @@ -5,9 +5,9 @@ from typing import Type, TYPE_CHECKING from comfy_api.internal import ComfyAPIBase from comfy_api.internal.singleton import ProxiedSingleton from comfy_api.internal.async_to_sync import create_sync_class -from comfy_api.latest._input import ImageInput, AudioInput, MaskInput, LatentInput, VideoInput -from comfy_api.latest._input_impl import VideoFromFile, VideoFromComponents -from comfy_api.latest._util import VideoCodec, VideoContainer, VideoComponents, MESH, VOXEL +from ._input import ImageInput, AudioInput, MaskInput, LatentInput, VideoInput +from ._input_impl import VideoFromFile, VideoFromComponents +from ._util import VideoCodec, VideoContainer, VideoComponents, MESH, VOXEL from . import _io_public as io from . import _ui_public as ui # from comfy_api.latest._resources import _RESOURCES as resources #noqa: F401 @@ -80,7 +80,7 @@ class ComfyExtension(ABC): async def on_load(self) -> None: """ Called when an extension is loaded. - This should be used to initialize any global resources neeeded by the extension. + This should be used to initialize any global resources needed by the extension. """ @abstractmethod diff --git a/comfy_api/latest/_input/video_types.py b/comfy_api/latest/_input/video_types.py index 87c81d73a..e634a0311 100644 --- a/comfy_api/latest/_input/video_types.py +++ b/comfy_api/latest/_input/video_types.py @@ -4,7 +4,7 @@ from fractions import Fraction from typing import Optional, Union, IO import io import av -from comfy_api.util import VideoContainer, VideoCodec, VideoComponents +from .._util import VideoContainer, VideoCodec, VideoComponents class VideoInput(ABC): """ diff --git a/comfy_api/latest/_input_impl/video_types.py b/comfy_api/latest/_input_impl/video_types.py index a4cd3737d..ea35c6062 100644 --- a/comfy_api/latest/_input_impl/video_types.py +++ b/comfy_api/latest/_input_impl/video_types.py @@ -3,14 +3,14 @@ from av.container import InputContainer from av.subtitles.stream import SubtitleStream from fractions import Fraction from typing import Optional -from comfy_api.latest._input import AudioInput, VideoInput +from .._input import AudioInput, VideoInput import av import io import json import numpy as np import math import torch -from comfy_api.latest._util import VideoContainer, VideoCodec, VideoComponents +from .._util import VideoContainer, VideoCodec, VideoComponents def container_to_output_format(container_format: str | None) -> str | None: diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index d7cbe68cf..313a5af20 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -26,7 +26,7 @@ if TYPE_CHECKING: from comfy_api.input import VideoInput from comfy_api.internal import (_ComfyNodeInternal, _NodeOutputInternal, classproperty, copy_class, first_real_override, is_class, prune_dict, shallow_clone_class) -from comfy_api.latest._resources import Resources, ResourcesLocal +from ._resources import Resources, ResourcesLocal from comfy_execution.graph_utils import ExecutionBlocker from ._util import MESH, VOXEL diff --git a/comfy_api/latest/_ui.py b/comfy_api/latest/_ui.py index 5a75a3aae..2babe209a 100644 --- a/comfy_api/latest/_ui.py +++ b/comfy_api/latest/_ui.py @@ -22,7 +22,7 @@ import folder_paths # used for image preview from comfy.cli_args import args -from comfy_api.latest._io import ComfyNode, FolderType, Image, _UIOutput +from ._io import ComfyNode, FolderType, Image, _UIOutput class SavedResult(dict): diff --git a/comfy_api/latest/_util/video_types.py b/comfy_api/latest/_util/video_types.py index c3e3d8e3a..fd3b5a510 100644 --- a/comfy_api/latest/_util/video_types.py +++ b/comfy_api/latest/_util/video_types.py @@ -3,7 +3,7 @@ from dataclasses import dataclass from enum import Enum from fractions import Fraction from typing import Optional -from comfy_api.latest._input import ImageInput, AudioInput +from .._input import ImageInput, AudioInput class VideoCodec(str, Enum): AUTO = "auto" From 058f084371ef2ed0c456118dfdd3d0bfed17259b Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Mon, 8 Dec 2025 17:22:51 +0800 Subject: [PATCH 1009/1073] Update workflow templates to v0.7.51 (#11150) * chore: update workflow templates to v0.7.50 * Update template to 0.7.51 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f98848e20..12a7c1089 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.33.10 -comfyui-workflow-templates==0.7.25 +comfyui-workflow-templates==0.7.51 comfyui-embedded-docs==0.3.1 torch torchsde From 85c4b4ae262c2de360891dd23c6504da2f5a6014 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 8 Dec 2025 11:27:02 +0200 Subject: [PATCH 1010/1073] chore: replace imports of deprecated V1 classes (#11127) --- comfy_api_nodes/apis/veo_api.py | 2 +- comfy_api_nodes/nodes_gemini.py | 19 ++++++++++--------- comfy_api_nodes/nodes_ltxv.py | 17 +++++++---------- comfy_api_nodes/nodes_moonvalley.py | 19 ++++++++----------- comfy_api_nodes/nodes_runway.py | 29 +++++++++++++---------------- comfy_api_nodes/nodes_veo2.py | 12 +++++------- comfy_extras/nodes_video.py | 27 +++++++++++---------------- 7 files changed, 55 insertions(+), 70 deletions(-) diff --git a/comfy_api_nodes/apis/veo_api.py b/comfy_api_nodes/apis/veo_api.py index 8328d1aa4..23ca725b7 100644 --- a/comfy_api_nodes/apis/veo_api.py +++ b/comfy_api_nodes/apis/veo_api.py @@ -85,7 +85,7 @@ class Response1(BaseModel): raiMediaFilteredReasons: Optional[list[str]] = Field( None, description='Reasons why media was filtered by responsible AI policies' ) - videos: Optional[list[Video]] = None + videos: Optional[list[Video]] = Field(None) class VeoGenVidPollResponse(BaseModel): diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 08f7b0f64..0b7422ef7 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -13,8 +13,7 @@ import torch from typing_extensions import override import folder_paths -from comfy_api.latest import IO, ComfyExtension, Input -from comfy_api.util import VideoCodec, VideoContainer +from comfy_api.latest import IO, ComfyExtension, Input, Types from comfy_api_nodes.apis.gemini_api import ( GeminiContent, GeminiFileData, @@ -68,7 +67,7 @@ class GeminiImageModel(str, Enum): async def create_image_parts( cls: type[IO.ComfyNode], - images: torch.Tensor, + images: Input.Image, image_limit: int = 0, ) -> list[GeminiPart]: image_parts: list[GeminiPart] = [] @@ -154,8 +153,8 @@ def get_text_from_response(response: GeminiGenerateContentResponse) -> str: return "\n".join([part.text for part in parts]) -def get_image_from_response(response: GeminiGenerateContentResponse) -> torch.Tensor: - image_tensors: list[torch.Tensor] = [] +def get_image_from_response(response: GeminiGenerateContentResponse) -> Input.Image: + image_tensors: list[Input.Image] = [] parts = get_parts_by_type(response, "image/png") for part in parts: image_data = base64.b64decode(part.inlineData.data) @@ -293,7 +292,9 @@ class GeminiNode(IO.ComfyNode): def create_video_parts(cls, video_input: Input.Video) -> list[GeminiPart]: """Convert video input to Gemini API compatible parts.""" - base_64_string = video_to_base64_string(video_input, container_format=VideoContainer.MP4, codec=VideoCodec.H264) + base_64_string = video_to_base64_string( + video_input, container_format=Types.VideoContainer.MP4, codec=Types.VideoCodec.H264 + ) return [ GeminiPart( inlineData=GeminiInlineData( @@ -343,7 +344,7 @@ class GeminiNode(IO.ComfyNode): prompt: str, model: str, seed: int, - images: torch.Tensor | None = None, + images: Input.Image | None = None, audio: Input.Audio | None = None, video: Input.Video | None = None, files: list[GeminiPart] | None = None, @@ -542,7 +543,7 @@ class GeminiImage(IO.ComfyNode): prompt: str, model: str, seed: int, - images: torch.Tensor | None = None, + images: Input.Image | None = None, files: list[GeminiPart] | None = None, aspect_ratio: str = "auto", response_modalities: str = "IMAGE+TEXT", @@ -662,7 +663,7 @@ class GeminiImage2(IO.ComfyNode): aspect_ratio: str, resolution: str, response_modalities: str, - images: torch.Tensor | None = None, + images: Input.Image | None = None, files: list[GeminiPart] | None = None, ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) diff --git a/comfy_api_nodes/nodes_ltxv.py b/comfy_api_nodes/nodes_ltxv.py index 0b757a62b..7e61560dc 100644 --- a/comfy_api_nodes/nodes_ltxv.py +++ b/comfy_api_nodes/nodes_ltxv.py @@ -1,12 +1,9 @@ from io import BytesIO -from typing import Optional -import torch from pydantic import BaseModel, Field from typing_extensions import override -from comfy_api.input_impl import VideoFromFile -from comfy_api.latest import IO, ComfyExtension +from comfy_api.latest import IO, ComfyExtension, Input, InputImpl from comfy_api_nodes.util import ( ApiEndpoint, get_number_of_images, @@ -26,9 +23,9 @@ class ExecuteTaskRequest(BaseModel): model: str = Field(...) duration: int = Field(...) resolution: str = Field(...) - fps: Optional[int] = Field(25) - generate_audio: Optional[bool] = Field(True) - image_uri: Optional[str] = Field(None) + fps: int | None = Field(25) + generate_audio: bool | None = Field(True) + image_uri: str | None = Field(None) class TextToVideoNode(IO.ComfyNode): @@ -103,7 +100,7 @@ class TextToVideoNode(IO.ComfyNode): as_binary=True, max_retries=1, ) - return IO.NodeOutput(VideoFromFile(BytesIO(response))) + return IO.NodeOutput(InputImpl.VideoFromFile(BytesIO(response))) class ImageToVideoNode(IO.ComfyNode): @@ -153,7 +150,7 @@ class ImageToVideoNode(IO.ComfyNode): @classmethod async def execute( cls, - image: torch.Tensor, + image: Input.Image, model: str, prompt: str, duration: int, @@ -183,7 +180,7 @@ class ImageToVideoNode(IO.ComfyNode): as_binary=True, max_retries=1, ) - return IO.NodeOutput(VideoFromFile(BytesIO(response))) + return IO.NodeOutput(InputImpl.VideoFromFile(BytesIO(response))) class LtxvApiExtension(ComfyExtension): diff --git a/comfy_api_nodes/nodes_moonvalley.py b/comfy_api_nodes/nodes_moonvalley.py index 7c31d95b3..2771e4790 100644 --- a/comfy_api_nodes/nodes_moonvalley.py +++ b/comfy_api_nodes/nodes_moonvalley.py @@ -1,11 +1,8 @@ import logging -from typing import Optional -import torch from typing_extensions import override -from comfy_api.input import VideoInput -from comfy_api.latest import IO, ComfyExtension +from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis import ( MoonvalleyPromptResponse, MoonvalleyTextToVideoInferenceParams, @@ -61,7 +58,7 @@ def validate_task_creation_response(response) -> None: raise RuntimeError(error_msg) -def validate_video_to_video_input(video: VideoInput) -> VideoInput: +def validate_video_to_video_input(video: Input.Video) -> Input.Video: """ Validates and processes video input for Moonvalley Video-to-Video generation. @@ -82,7 +79,7 @@ def validate_video_to_video_input(video: VideoInput) -> VideoInput: return _validate_and_trim_duration(video) -def _get_video_dimensions(video: VideoInput) -> tuple[int, int]: +def _get_video_dimensions(video: Input.Video) -> tuple[int, int]: """Extracts video dimensions with error handling.""" try: return video.get_dimensions() @@ -106,7 +103,7 @@ def _validate_video_dimensions(width: int, height: int) -> None: raise ValueError(f"Resolution {width}x{height} not supported. Supported: {supported_list}") -def _validate_and_trim_duration(video: VideoInput) -> VideoInput: +def _validate_and_trim_duration(video: Input.Video) -> Input.Video: """Validates video duration and trims to 5 seconds if needed.""" duration = video.get_duration() _validate_minimum_duration(duration) @@ -119,7 +116,7 @@ def _validate_minimum_duration(duration: float) -> None: raise ValueError("Input video must be at least 5 seconds long.") -def _trim_if_too_long(video: VideoInput, duration: float) -> VideoInput: +def _trim_if_too_long(video: Input.Video, duration: float) -> Input.Video: """Trims video to 5 seconds if longer.""" if duration > 5: return trim_video(video, 5) @@ -241,7 +238,7 @@ class MoonvalleyImg2VideoNode(IO.ComfyNode): @classmethod async def execute( cls, - image: torch.Tensor, + image: Input.Image, prompt: str, negative_prompt: str, resolution: str, @@ -362,9 +359,9 @@ class MoonvalleyVideo2VideoNode(IO.ComfyNode): prompt: str, negative_prompt: str, seed: int, - video: Optional[VideoInput] = None, + video: Input.Video | None = None, control_type: str = "Motion Transfer", - motion_intensity: Optional[int] = 100, + motion_intensity: int | None = 100, steps=33, prompt_adherence=4.5, ) -> IO.NodeOutput: diff --git a/comfy_api_nodes/nodes_runway.py b/comfy_api_nodes/nodes_runway.py index 2fdafbbfe..3c55039c9 100644 --- a/comfy_api_nodes/nodes_runway.py +++ b/comfy_api_nodes/nodes_runway.py @@ -11,12 +11,11 @@ User Guides: """ -from typing import Union, Optional -from typing_extensions import override from enum import Enum -import torch +from typing_extensions import override +from comfy_api.latest import IO, ComfyExtension, Input, InputImpl from comfy_api_nodes.apis import ( RunwayImageToVideoRequest, RunwayImageToVideoResponse, @@ -44,8 +43,6 @@ from comfy_api_nodes.util import ( sync_op, poll_op, ) -from comfy_api.input_impl import VideoFromFile -from comfy_api.latest import ComfyExtension, IO PATH_IMAGE_TO_VIDEO = "/proxy/runway/image_to_video" PATH_TEXT_TO_IMAGE = "/proxy/runway/text_to_image" @@ -80,7 +77,7 @@ class RunwayGen3aAspectRatio(str, Enum): field_1280_768 = "1280:768" -def get_video_url_from_task_status(response: TaskStatusResponse) -> Union[str, None]: +def get_video_url_from_task_status(response: TaskStatusResponse) -> str | None: """Returns the video URL from the task status response if it exists.""" if hasattr(response, "output") and len(response.output) > 0: return response.output[0] @@ -89,13 +86,13 @@ def get_video_url_from_task_status(response: TaskStatusResponse) -> Union[str, N def extract_progress_from_task_status( response: TaskStatusResponse, -) -> Union[float, None]: +) -> float | None: if hasattr(response, "progress") and response.progress is not None: return response.progress * 100 return None -def get_image_url_from_task_status(response: TaskStatusResponse) -> Union[str, None]: +def get_image_url_from_task_status(response: TaskStatusResponse) -> str | None: """Returns the image URL from the task status response if it exists.""" if hasattr(response, "output") and len(response.output) > 0: return response.output[0] @@ -103,7 +100,7 @@ def get_image_url_from_task_status(response: TaskStatusResponse) -> Union[str, N async def get_response( - cls: type[IO.ComfyNode], task_id: str, estimated_duration: Optional[int] = None + cls: type[IO.ComfyNode], task_id: str, estimated_duration: int | None = None ) -> TaskStatusResponse: """Poll the task status until it is finished then get the response.""" return await poll_op( @@ -119,8 +116,8 @@ async def get_response( async def generate_video( cls: type[IO.ComfyNode], request: RunwayImageToVideoRequest, - estimated_duration: Optional[int] = None, -) -> VideoFromFile: + estimated_duration: int | None = None, +) -> InputImpl.VideoFromFile: initial_response = await sync_op( cls, endpoint=ApiEndpoint(path=PATH_IMAGE_TO_VIDEO, method="POST"), @@ -193,7 +190,7 @@ class RunwayImageToVideoNodeGen3a(IO.ComfyNode): async def execute( cls, prompt: str, - start_frame: torch.Tensor, + start_frame: Input.Image, duration: str, ratio: str, seed: int, @@ -283,7 +280,7 @@ class RunwayImageToVideoNodeGen4(IO.ComfyNode): async def execute( cls, prompt: str, - start_frame: torch.Tensor, + start_frame: Input.Image, duration: str, ratio: str, seed: int, @@ -381,8 +378,8 @@ class RunwayFirstLastFrameNode(IO.ComfyNode): async def execute( cls, prompt: str, - start_frame: torch.Tensor, - end_frame: torch.Tensor, + start_frame: Input.Image, + end_frame: Input.Image, duration: str, ratio: str, seed: int, @@ -467,7 +464,7 @@ class RunwayTextToImageNode(IO.ComfyNode): cls, prompt: str, ratio: str, - reference_image: Optional[torch.Tensor] = None, + reference_image: Input.Image | None = None, ) -> IO.NodeOutput: validate_string(prompt, min_length=1) diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index a54dc13ab..e165b8380 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -1,11 +1,9 @@ import base64 from io import BytesIO -import torch from typing_extensions import override -from comfy_api.input_impl.video_types import VideoFromFile -from comfy_api.latest import IO, ComfyExtension +from comfy_api.latest import IO, ComfyExtension, Input, InputImpl from comfy_api_nodes.apis.veo_api import ( VeoGenVidPollRequest, VeoGenVidPollResponse, @@ -232,7 +230,7 @@ class VeoVideoGenerationNode(IO.ComfyNode): # Check if video is provided as base64 or URL if hasattr(video, "bytesBase64Encoded") and video.bytesBase64Encoded: - return IO.NodeOutput(VideoFromFile(BytesIO(base64.b64decode(video.bytesBase64Encoded)))) + return IO.NodeOutput(InputImpl.VideoFromFile(BytesIO(base64.b64decode(video.bytesBase64Encoded)))) if hasattr(video, "gcsUri") and video.gcsUri: return IO.NodeOutput(await download_url_to_video_output(video.gcsUri)) @@ -431,8 +429,8 @@ class Veo3FirstLastFrameNode(IO.ComfyNode): aspect_ratio: str, duration: int, seed: int, - first_frame: torch.Tensor, - last_frame: torch.Tensor, + first_frame: Input.Image, + last_frame: Input.Image, model: str, generate_audio: bool, ): @@ -493,7 +491,7 @@ class Veo3FirstLastFrameNode(IO.ComfyNode): if response.videos: video = response.videos[0] if video.bytesBase64Encoded: - return IO.NodeOutput(VideoFromFile(BytesIO(base64.b64decode(video.bytesBase64Encoded)))) + return IO.NodeOutput(InputImpl.VideoFromFile(BytesIO(base64.b64decode(video.bytesBase64Encoded)))) if video.gcsUri: return IO.NodeOutput(await download_url_to_video_output(video.gcsUri)) raise Exception("Video returned but no data or URL was provided") diff --git a/comfy_extras/nodes_video.py b/comfy_extras/nodes_video.py index 6cf6e39bf..c609e03da 100644 --- a/comfy_extras/nodes_video.py +++ b/comfy_extras/nodes_video.py @@ -8,10 +8,7 @@ import json from typing import Optional from typing_extensions import override from fractions import Fraction -from comfy_api.input import AudioInput, ImageInput, VideoInput -from comfy_api.input_impl import VideoFromComponents, VideoFromFile -from comfy_api.util import VideoCodec, VideoComponents, VideoContainer -from comfy_api.latest import ComfyExtension, io, ui +from comfy_api.latest import ComfyExtension, io, ui, Input, InputImpl, Types from comfy.cli_args import args class SaveWEBM(io.ComfyNode): @@ -28,7 +25,6 @@ class SaveWEBM(io.ComfyNode): io.Float.Input("fps", default=24.0, min=0.01, max=1000.0, step=0.01), io.Float.Input("crf", default=32.0, min=0, max=63.0, step=1, tooltip="Higher crf means lower quality with a smaller file size, lower crf means higher quality higher filesize."), ], - outputs=[], hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo], is_output_node=True, ) @@ -79,16 +75,15 @@ class SaveVideo(io.ComfyNode): inputs=[ io.Video.Input("video", tooltip="The video to save."), io.String.Input("filename_prefix", default="video/ComfyUI", tooltip="The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."), - io.Combo.Input("format", options=VideoContainer.as_input(), default="auto", tooltip="The format to save the video as."), - io.Combo.Input("codec", options=VideoCodec.as_input(), default="auto", tooltip="The codec to use for the video."), + io.Combo.Input("format", options=Types.VideoContainer.as_input(), default="auto", tooltip="The format to save the video as."), + io.Combo.Input("codec", options=Types.VideoCodec.as_input(), default="auto", tooltip="The codec to use for the video."), ], - outputs=[], hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo], is_output_node=True, ) @classmethod - def execute(cls, video: VideoInput, filename_prefix, format: str, codec) -> io.NodeOutput: + def execute(cls, video: Input.Video, filename_prefix, format: str, codec) -> io.NodeOutput: width, height = video.get_dimensions() full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path( filename_prefix, @@ -105,10 +100,10 @@ class SaveVideo(io.ComfyNode): metadata["prompt"] = cls.hidden.prompt if len(metadata) > 0: saved_metadata = metadata - file = f"{filename}_{counter:05}_.{VideoContainer.get_extension(format)}" + file = f"{filename}_{counter:05}_.{Types.VideoContainer.get_extension(format)}" video.save_to( os.path.join(full_output_folder, file), - format=VideoContainer(format), + format=Types.VideoContainer(format), codec=codec, metadata=saved_metadata ) @@ -135,9 +130,9 @@ class CreateVideo(io.ComfyNode): ) @classmethod - def execute(cls, images: ImageInput, fps: float, audio: Optional[AudioInput] = None) -> io.NodeOutput: + def execute(cls, images: Input.Image, fps: float, audio: Optional[Input.Audio] = None) -> io.NodeOutput: return io.NodeOutput( - VideoFromComponents(VideoComponents(images=images, audio=audio, frame_rate=Fraction(fps))) + InputImpl.VideoFromComponents(Types.VideoComponents(images=images, audio=audio, frame_rate=Fraction(fps))) ) class GetVideoComponents(io.ComfyNode): @@ -159,11 +154,11 @@ class GetVideoComponents(io.ComfyNode): ) @classmethod - def execute(cls, video: VideoInput) -> io.NodeOutput: + def execute(cls, video: Input.Video) -> io.NodeOutput: components = video.get_components() - return io.NodeOutput(components.images, components.audio, float(components.frame_rate)) + class LoadVideo(io.ComfyNode): @classmethod def define_schema(cls): @@ -185,7 +180,7 @@ class LoadVideo(io.ComfyNode): @classmethod def execute(cls, file) -> io.NodeOutput: video_path = folder_paths.get_annotated_filepath(file) - return io.NodeOutput(VideoFromFile(video_path)) + return io.NodeOutput(InputImpl.VideoFromFile(video_path)) @classmethod def fingerprint_inputs(s, file): From c3c6313fc7b24a5811efde7cfe10b7cbbea52663 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 8 Dec 2025 11:28:17 +0200 Subject: [PATCH 1011/1073] Added "system_prompt" input to Gemini nodes (#11177) --- comfy_api_nodes/apis/gemini_api.py | 10 +----- comfy_api_nodes/nodes_gemini.py | 52 ++++++++++++++++++++++++++++-- 2 files changed, 51 insertions(+), 11 deletions(-) diff --git a/comfy_api_nodes/apis/gemini_api.py b/comfy_api_nodes/apis/gemini_api.py index a380ecc86..f8edc38c9 100644 --- a/comfy_api_nodes/apis/gemini_api.py +++ b/comfy_api_nodes/apis/gemini_api.py @@ -84,15 +84,7 @@ class GeminiSystemInstructionContent(BaseModel): description="A list of ordered parts that make up a single message. " "Different parts may have different IANA MIME types.", ) - role: GeminiRole = Field( - ..., - description="The identity of the entity that creates the message. " - "The following values are supported: " - "user: This indicates that the message is sent by a real person, typically a user-generated message. " - "model: This indicates that the message is generated by the model. " - "The model value is used to insert messages from model into the conversation during multi-turn conversations. " - "For non-multi-turn conversations, this field can be left blank or unset.", - ) + role: GeminiRole | None = Field(..., description="The role field of systemInstruction may be ignored.") class GeminiFunctionDeclaration(BaseModel): diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 0b7422ef7..ad0f4b4d1 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -26,6 +26,8 @@ from comfy_api_nodes.apis.gemini_api import ( GeminiMimeType, GeminiPart, GeminiRole, + GeminiSystemInstructionContent, + GeminiTextPart, Modality, ) from comfy_api_nodes.util import ( @@ -42,6 +44,14 @@ from comfy_api_nodes.util import ( GEMINI_BASE_ENDPOINT = "/proxy/vertexai/gemini" GEMINI_MAX_INPUT_FILE_SIZE = 20 * 1024 * 1024 # 20 MB +GEMINI_IMAGE_SYS_PROMPT = ( + "You are an expert image-generation engine. You must ALWAYS produce an image.\n" + "Interpret all user input—regardless of " + "format, intent, or abstraction—as literal visual directives for image composition.\n" + "If a prompt is conversational or lacks specific visual details, " + "you must creatively invent a concrete visual scenario that depicts the concept.\n" + "Prioritize generating the visual representation above any text, formatting, or conversational requests." +) class GeminiModel(str, Enum): @@ -276,6 +286,13 @@ class GeminiNode(IO.ComfyNode): tooltip="Optional file(s) to use as context for the model. " "Accepts inputs from the Gemini Generate Content Input Files node.", ), + IO.String.Input( + "system_prompt", + multiline=True, + default="", + optional=True, + tooltip="Foundational instructions that dictate an AI's behavior.", + ), ], outputs=[ IO.String.Output(), @@ -348,6 +365,7 @@ class GeminiNode(IO.ComfyNode): audio: Input.Audio | None = None, video: Input.Video | None = None, files: list[GeminiPart] | None = None, + system_prompt: str = "", ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) @@ -364,7 +382,10 @@ class GeminiNode(IO.ComfyNode): if files is not None: parts.extend(files) - # Create response + gemini_system_prompt = None + if system_prompt: + gemini_system_prompt = GeminiSystemInstructionContent(parts=[GeminiTextPart(text=system_prompt)], role=None) + response = await sync_op( cls, endpoint=ApiEndpoint(path=f"{GEMINI_BASE_ENDPOINT}/{model}", method="POST"), @@ -374,7 +395,8 @@ class GeminiNode(IO.ComfyNode): role=GeminiRole.user, parts=parts, ) - ] + ], + systemInstruction=gemini_system_prompt, ), response_model=GeminiGenerateContentResponse, price_extractor=calculate_tokens_price, @@ -524,6 +546,13 @@ class GeminiImage(IO.ComfyNode): "'IMAGE+TEXT' to return both the generated image and a text response.", optional=True, ), + IO.String.Input( + "system_prompt", + multiline=True, + default=GEMINI_IMAGE_SYS_PROMPT, + optional=True, + tooltip="Foundational instructions that dictate an AI's behavior.", + ), ], outputs=[ IO.Image.Output(), @@ -547,6 +576,7 @@ class GeminiImage(IO.ComfyNode): files: list[GeminiPart] | None = None, aspect_ratio: str = "auto", response_modalities: str = "IMAGE+TEXT", + system_prompt: str = "", ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) parts: list[GeminiPart] = [GeminiPart(text=prompt)] @@ -560,6 +590,10 @@ class GeminiImage(IO.ComfyNode): if files is not None: parts.extend(files) + gemini_system_prompt = None + if system_prompt: + gemini_system_prompt = GeminiSystemInstructionContent(parts=[GeminiTextPart(text=system_prompt)], role=None) + response = await sync_op( cls, endpoint=ApiEndpoint(path=f"{GEMINI_BASE_ENDPOINT}/{model}", method="POST"), @@ -571,6 +605,7 @@ class GeminiImage(IO.ComfyNode): responseModalities=(["IMAGE"] if response_modalities == "IMAGE" else ["TEXT", "IMAGE"]), imageConfig=None if aspect_ratio == "auto" else image_config, ), + systemInstruction=gemini_system_prompt, ), response_model=GeminiGenerateContentResponse, price_extractor=calculate_tokens_price, @@ -641,6 +676,13 @@ class GeminiImage2(IO.ComfyNode): tooltip="Optional file(s) to use as context for the model. " "Accepts inputs from the Gemini Generate Content Input Files node.", ), + IO.String.Input( + "system_prompt", + multiline=True, + default=GEMINI_IMAGE_SYS_PROMPT, + optional=True, + tooltip="Foundational instructions that dictate an AI's behavior.", + ), ], outputs=[ IO.Image.Output(), @@ -665,6 +707,7 @@ class GeminiImage2(IO.ComfyNode): response_modalities: str, images: Input.Image | None = None, files: list[GeminiPart] | None = None, + system_prompt: str = "", ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=1) @@ -680,6 +723,10 @@ class GeminiImage2(IO.ComfyNode): if aspect_ratio != "auto": image_config.aspectRatio = aspect_ratio + gemini_system_prompt = None + if system_prompt: + gemini_system_prompt = GeminiSystemInstructionContent(parts=[GeminiTextPart(text=system_prompt)], role=None) + response = await sync_op( cls, ApiEndpoint(path=f"{GEMINI_BASE_ENDPOINT}/{model}", method="POST"), @@ -691,6 +738,7 @@ class GeminiImage2(IO.ComfyNode): responseModalities=(["IMAGE"] if response_modalities == "IMAGE" else ["TEXT", "IMAGE"]), imageConfig=image_config, ), + systemInstruction=gemini_system_prompt, ), response_model=GeminiGenerateContentResponse, price_extractor=calculate_tokens_price, From fd271dedfde6e192a1f1a025521070876e89e04a Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Mon, 8 Dec 2025 11:33:46 +0200 Subject: [PATCH 1012/1073] [API Nodes] add support for seedance-1-0-pro-fast model (#10947) * feat(api-nodes): add support for seedance-1-0-pro-fast model * feat(api-nodes): add support for seedream-4.5 model --- comfy_api_nodes/apis/bytedance_api.py | 144 +++++++++++++++ comfy_api_nodes/nodes_bytedance.py | 255 ++++++-------------------- 2 files changed, 196 insertions(+), 203 deletions(-) create mode 100644 comfy_api_nodes/apis/bytedance_api.py diff --git a/comfy_api_nodes/apis/bytedance_api.py b/comfy_api_nodes/apis/bytedance_api.py new file mode 100644 index 000000000..77cd76f9b --- /dev/null +++ b/comfy_api_nodes/apis/bytedance_api.py @@ -0,0 +1,144 @@ +from typing import Literal + +from pydantic import BaseModel, Field + + +class Text2ImageTaskCreationRequest(BaseModel): + model: str = Field(...) + prompt: str = Field(...) + response_format: str | None = Field("url") + size: str | None = Field(None) + seed: int | None = Field(0, ge=0, le=2147483647) + guidance_scale: float | None = Field(..., ge=1.0, le=10.0) + watermark: bool | None = Field(True) + + +class Image2ImageTaskCreationRequest(BaseModel): + model: str = Field(...) + prompt: str = Field(...) + response_format: str | None = Field("url") + image: str = Field(..., description="Base64 encoded string or image URL") + size: str | None = Field("adaptive") + seed: int | None = Field(..., ge=0, le=2147483647) + guidance_scale: float | None = Field(..., ge=1.0, le=10.0) + watermark: bool | None = Field(True) + + +class Seedream4Options(BaseModel): + max_images: int = Field(15) + + +class Seedream4TaskCreationRequest(BaseModel): + model: str = Field(...) + prompt: str = Field(...) + response_format: str = Field("url") + image: list[str] | None = Field(None, description="Image URLs") + size: str = Field(...) + seed: int = Field(..., ge=0, le=2147483647) + sequential_image_generation: str = Field("disabled") + sequential_image_generation_options: Seedream4Options = Field(Seedream4Options(max_images=15)) + watermark: bool = Field(True) + + +class ImageTaskCreationResponse(BaseModel): + model: str = Field(...) + created: int = Field(..., description="Unix timestamp (in seconds) indicating time when the request was created.") + data: list = Field([], description="Contains information about the generated image(s).") + error: dict = Field({}, description="Contains `code` and `message` fields in case of error.") + + +class TaskTextContent(BaseModel): + type: str = Field("text") + text: str = Field(...) + + +class TaskImageContentUrl(BaseModel): + url: str = Field(...) + + +class TaskImageContent(BaseModel): + type: str = Field("image_url") + image_url: TaskImageContentUrl = Field(...) + role: Literal["first_frame", "last_frame", "reference_image"] | None = Field(None) + + +class Text2VideoTaskCreationRequest(BaseModel): + model: str = Field(...) + content: list[TaskTextContent] = Field(..., min_length=1) + + +class Image2VideoTaskCreationRequest(BaseModel): + model: str = Field(...) + content: list[TaskTextContent | TaskImageContent] = Field(..., min_length=2) + + +class TaskCreationResponse(BaseModel): + id: str = Field(...) + + +class TaskStatusError(BaseModel): + code: str = Field(...) + message: str = Field(...) + + +class TaskStatusResult(BaseModel): + video_url: str = Field(...) + + +class TaskStatusResponse(BaseModel): + id: str = Field(...) + model: str = Field(...) + status: Literal["queued", "running", "cancelled", "succeeded", "failed"] = Field(...) + error: TaskStatusError | None = Field(None) + content: TaskStatusResult | None = Field(None) + + +RECOMMENDED_PRESETS = [ + ("1024x1024 (1:1)", 1024, 1024), + ("864x1152 (3:4)", 864, 1152), + ("1152x864 (4:3)", 1152, 864), + ("1280x720 (16:9)", 1280, 720), + ("720x1280 (9:16)", 720, 1280), + ("832x1248 (2:3)", 832, 1248), + ("1248x832 (3:2)", 1248, 832), + ("1512x648 (21:9)", 1512, 648), + ("2048x2048 (1:1)", 2048, 2048), + ("Custom", None, None), +] + +RECOMMENDED_PRESETS_SEEDREAM_4 = [ + ("2048x2048 (1:1)", 2048, 2048), + ("2304x1728 (4:3)", 2304, 1728), + ("1728x2304 (3:4)", 1728, 2304), + ("2560x1440 (16:9)", 2560, 1440), + ("1440x2560 (9:16)", 1440, 2560), + ("2496x1664 (3:2)", 2496, 1664), + ("1664x2496 (2:3)", 1664, 2496), + ("3024x1296 (21:9)", 3024, 1296), + ("4096x4096 (1:1)", 4096, 4096), + ("Custom", None, None), +] + +# The time in this dictionary are given for 10 seconds duration. +VIDEO_TASKS_EXECUTION_TIME = { + "seedance-1-0-lite-t2v-250428": { + "480p": 40, + "720p": 60, + "1080p": 90, + }, + "seedance-1-0-lite-i2v-250428": { + "480p": 40, + "720p": 60, + "1080p": 90, + }, + "seedance-1-0-pro-250528": { + "480p": 70, + "720p": 85, + "1080p": 115, + }, + "seedance-1-0-pro-fast-251015": { + "480p": 50, + "720p": 65, + "1080p": 100, + }, +} diff --git a/comfy_api_nodes/nodes_bytedance.py b/comfy_api_nodes/nodes_bytedance.py index caced471e..57c0218d0 100644 --- a/comfy_api_nodes/nodes_bytedance.py +++ b/comfy_api_nodes/nodes_bytedance.py @@ -1,13 +1,27 @@ import logging import math -from enum import Enum -from typing import Literal, Optional, Union import torch -from pydantic import BaseModel, Field from typing_extensions import override -from comfy_api.latest import IO, ComfyExtension +from comfy_api.latest import IO, ComfyExtension, Input +from comfy_api_nodes.apis.bytedance_api import ( + RECOMMENDED_PRESETS, + RECOMMENDED_PRESETS_SEEDREAM_4, + VIDEO_TASKS_EXECUTION_TIME, + Image2ImageTaskCreationRequest, + Image2VideoTaskCreationRequest, + ImageTaskCreationResponse, + Seedream4Options, + Seedream4TaskCreationRequest, + TaskCreationResponse, + TaskImageContent, + TaskImageContentUrl, + TaskStatusResponse, + TaskTextContent, + Text2ImageTaskCreationRequest, + Text2VideoTaskCreationRequest, +) from comfy_api_nodes.util import ( ApiEndpoint, download_url_to_image_tensor, @@ -29,162 +43,6 @@ BYTEPLUS_TASK_ENDPOINT = "/proxy/byteplus/api/v3/contents/generations/tasks" BYTEPLUS_TASK_STATUS_ENDPOINT = "/proxy/byteplus/api/v3/contents/generations/tasks" # + /{task_id} -class Text2ImageModelName(str, Enum): - seedream_3 = "seedream-3-0-t2i-250415" - - -class Image2ImageModelName(str, Enum): - seededit_3 = "seededit-3-0-i2i-250628" - - -class Text2VideoModelName(str, Enum): - seedance_1_pro = "seedance-1-0-pro-250528" - seedance_1_lite = "seedance-1-0-lite-t2v-250428" - - -class Image2VideoModelName(str, Enum): - """note(August 31): Pro model only supports FirstFrame: https://docs.byteplus.com/en/docs/ModelArk/1520757""" - - seedance_1_pro = "seedance-1-0-pro-250528" - seedance_1_lite = "seedance-1-0-lite-i2v-250428" - - -class Text2ImageTaskCreationRequest(BaseModel): - model: Text2ImageModelName = Text2ImageModelName.seedream_3 - prompt: str = Field(...) - response_format: Optional[str] = Field("url") - size: Optional[str] = Field(None) - seed: Optional[int] = Field(0, ge=0, le=2147483647) - guidance_scale: Optional[float] = Field(..., ge=1.0, le=10.0) - watermark: Optional[bool] = Field(True) - - -class Image2ImageTaskCreationRequest(BaseModel): - model: Image2ImageModelName = Image2ImageModelName.seededit_3 - prompt: str = Field(...) - response_format: Optional[str] = Field("url") - image: str = Field(..., description="Base64 encoded string or image URL") - size: Optional[str] = Field("adaptive") - seed: Optional[int] = Field(..., ge=0, le=2147483647) - guidance_scale: Optional[float] = Field(..., ge=1.0, le=10.0) - watermark: Optional[bool] = Field(True) - - -class Seedream4Options(BaseModel): - max_images: int = Field(15) - - -class Seedream4TaskCreationRequest(BaseModel): - model: str = Field("seedream-4-0-250828") - prompt: str = Field(...) - response_format: str = Field("url") - image: Optional[list[str]] = Field(None, description="Image URLs") - size: str = Field(...) - seed: int = Field(..., ge=0, le=2147483647) - sequential_image_generation: str = Field("disabled") - sequential_image_generation_options: Seedream4Options = Field(Seedream4Options(max_images=15)) - watermark: bool = Field(True) - - -class ImageTaskCreationResponse(BaseModel): - model: str = Field(...) - created: int = Field(..., description="Unix timestamp (in seconds) indicating time when the request was created.") - data: list = Field([], description="Contains information about the generated image(s).") - error: dict = Field({}, description="Contains `code` and `message` fields in case of error.") - - -class TaskTextContent(BaseModel): - type: str = Field("text") - text: str = Field(...) - - -class TaskImageContentUrl(BaseModel): - url: str = Field(...) - - -class TaskImageContent(BaseModel): - type: str = Field("image_url") - image_url: TaskImageContentUrl = Field(...) - role: Optional[Literal["first_frame", "last_frame", "reference_image"]] = Field(None) - - -class Text2VideoTaskCreationRequest(BaseModel): - model: Text2VideoModelName = Text2VideoModelName.seedance_1_pro - content: list[TaskTextContent] = Field(..., min_length=1) - - -class Image2VideoTaskCreationRequest(BaseModel): - model: Image2VideoModelName = Image2VideoModelName.seedance_1_pro - content: list[Union[TaskTextContent, TaskImageContent]] = Field(..., min_length=2) - - -class TaskCreationResponse(BaseModel): - id: str = Field(...) - - -class TaskStatusError(BaseModel): - code: str = Field(...) - message: str = Field(...) - - -class TaskStatusResult(BaseModel): - video_url: str = Field(...) - - -class TaskStatusResponse(BaseModel): - id: str = Field(...) - model: str = Field(...) - status: Literal["queued", "running", "cancelled", "succeeded", "failed"] = Field(...) - error: Optional[TaskStatusError] = Field(None) - content: Optional[TaskStatusResult] = Field(None) - - -RECOMMENDED_PRESETS = [ - ("1024x1024 (1:1)", 1024, 1024), - ("864x1152 (3:4)", 864, 1152), - ("1152x864 (4:3)", 1152, 864), - ("1280x720 (16:9)", 1280, 720), - ("720x1280 (9:16)", 720, 1280), - ("832x1248 (2:3)", 832, 1248), - ("1248x832 (3:2)", 1248, 832), - ("1512x648 (21:9)", 1512, 648), - ("2048x2048 (1:1)", 2048, 2048), - ("Custom", None, None), -] - -RECOMMENDED_PRESETS_SEEDREAM_4 = [ - ("2048x2048 (1:1)", 2048, 2048), - ("2304x1728 (4:3)", 2304, 1728), - ("1728x2304 (3:4)", 1728, 2304), - ("2560x1440 (16:9)", 2560, 1440), - ("1440x2560 (9:16)", 1440, 2560), - ("2496x1664 (3:2)", 2496, 1664), - ("1664x2496 (2:3)", 1664, 2496), - ("3024x1296 (21:9)", 3024, 1296), - ("4096x4096 (1:1)", 4096, 4096), - ("Custom", None, None), -] - -# The time in this dictionary are given for 10 seconds duration. -VIDEO_TASKS_EXECUTION_TIME = { - "seedance-1-0-lite-t2v-250428": { - "480p": 40, - "720p": 60, - "1080p": 90, - }, - "seedance-1-0-lite-i2v-250428": { - "480p": 40, - "720p": 60, - "1080p": 90, - }, - "seedance-1-0-pro-250528": { - "480p": 70, - "720p": 85, - "1080p": 115, - }, -} - - def get_image_url_from_response(response: ImageTaskCreationResponse) -> str: if response.error: error_msg = f"ByteDance request failed. Code: {response.error['code']}, message: {response.error['message']}" @@ -194,13 +52,6 @@ def get_image_url_from_response(response: ImageTaskCreationResponse) -> str: return response.data[0]["url"] -def get_video_url_from_task_status(response: TaskStatusResponse) -> Union[str, None]: - """Returns the video URL from the task status response if it exists.""" - if hasattr(response, "content") and response.content: - return response.content.video_url - return None - - class ByteDanceImageNode(IO.ComfyNode): @classmethod @@ -211,12 +62,7 @@ class ByteDanceImageNode(IO.ComfyNode): category="api node/image/ByteDance", description="Generate images using ByteDance models via api based on prompt", inputs=[ - IO.Combo.Input( - "model", - options=Text2ImageModelName, - default=Text2ImageModelName.seedream_3, - tooltip="Model name", - ), + IO.Combo.Input("model", options=["seedream-3-0-t2i-250415"]), IO.String.Input( "prompt", multiline=True, @@ -335,12 +181,7 @@ class ByteDanceImageEditNode(IO.ComfyNode): category="api node/image/ByteDance", description="Edit images using ByteDance models via api based on prompt", inputs=[ - IO.Combo.Input( - "model", - options=Image2ImageModelName, - default=Image2ImageModelName.seededit_3, - tooltip="Model name", - ), + IO.Combo.Input("model", options=["seededit-3-0-i2i-250628"]), IO.Image.Input( "image", tooltip="The base image to edit", @@ -394,7 +235,7 @@ class ByteDanceImageEditNode(IO.ComfyNode): async def execute( cls, model: str, - image: torch.Tensor, + image: Input.Image, prompt: str, seed: int, guidance_scale: float, @@ -434,7 +275,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode): inputs=[ IO.Combo.Input( "model", - options=["seedream-4-0-250828"], + options=["seedream-4-5-251128", "seedream-4-0-250828"], tooltip="Model name", ), IO.String.Input( @@ -459,7 +300,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode): default=2048, min=1024, max=4096, - step=64, + step=8, tooltip="Custom width for image. Value is working only if `size_preset` is set to `Custom`", optional=True, ), @@ -468,7 +309,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode): default=2048, min=1024, max=4096, - step=64, + step=8, tooltip="Custom height for image. Value is working only if `size_preset` is set to `Custom`", optional=True, ), @@ -532,7 +373,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode): cls, model: str, prompt: str, - image: torch.Tensor = None, + image: Input.Image | None = None, size_preset: str = RECOMMENDED_PRESETS_SEEDREAM_4[0][0], width: int = 2048, height: int = 2048, @@ -555,6 +396,18 @@ class ByteDanceSeedreamNode(IO.ComfyNode): raise ValueError( f"Custom size out of range: {w}x{h}. " "Both width and height must be between 1024 and 4096 pixels." ) + out_num_pixels = w * h + mp_provided = out_num_pixels / 1_000_000.0 + if "seedream-4-5" in model and out_num_pixels < 3686400: + raise ValueError( + f"Minimum image resolution that Seedream 4.5 can generate is 3.68MP, " + f"but {mp_provided:.2f}MP provided." + ) + if "seedream-4-0" in model and out_num_pixels < 921600: + raise ValueError( + f"Minimum image resolution that the selected model can generate is 0.92MP, " + f"but {mp_provided:.2f}MP provided." + ) n_input_images = get_number_of_images(image) if image is not None else 0 if n_input_images > 10: raise ValueError(f"Maximum of 10 reference images are supported, but {n_input_images} received.") @@ -607,9 +460,8 @@ class ByteDanceTextToVideoNode(IO.ComfyNode): inputs=[ IO.Combo.Input( "model", - options=Text2VideoModelName, - default=Text2VideoModelName.seedance_1_pro, - tooltip="Model name", + options=["seedance-1-0-pro-250528", "seedance-1-0-lite-t2v-250428", "seedance-1-0-pro-fast-251015"], + default="seedance-1-0-pro-fast-251015", ), IO.String.Input( "prompt", @@ -714,9 +566,8 @@ class ByteDanceImageToVideoNode(IO.ComfyNode): inputs=[ IO.Combo.Input( "model", - options=Image2VideoModelName, - default=Image2VideoModelName.seedance_1_pro, - tooltip="Model name", + options=["seedance-1-0-pro-250528", "seedance-1-0-lite-t2v-250428", "seedance-1-0-pro-fast-251015"], + default="seedance-1-0-pro-fast-251015", ), IO.String.Input( "prompt", @@ -787,7 +638,7 @@ class ByteDanceImageToVideoNode(IO.ComfyNode): cls, model: str, prompt: str, - image: torch.Tensor, + image: Input.Image, resolution: str, aspect_ratio: str, duration: int, @@ -833,9 +684,8 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode): inputs=[ IO.Combo.Input( "model", - options=[model.value for model in Image2VideoModelName], - default=Image2VideoModelName.seedance_1_lite.value, - tooltip="Model name", + options=["seedance-1-0-pro-250528", "seedance-1-0-lite-i2v-250428"], + default="seedance-1-0-lite-i2v-250428", ), IO.String.Input( "prompt", @@ -910,8 +760,8 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode): cls, model: str, prompt: str, - first_frame: torch.Tensor, - last_frame: torch.Tensor, + first_frame: Input.Image, + last_frame: Input.Image, resolution: str, aspect_ratio: str, duration: int, @@ -968,9 +818,8 @@ class ByteDanceImageReferenceNode(IO.ComfyNode): inputs=[ IO.Combo.Input( "model", - options=[Image2VideoModelName.seedance_1_lite.value], - default=Image2VideoModelName.seedance_1_lite.value, - tooltip="Model name", + options=["seedance-1-0-pro-250528", "seedance-1-0-lite-i2v-250428"], + default="seedance-1-0-lite-i2v-250428", ), IO.String.Input( "prompt", @@ -1034,7 +883,7 @@ class ByteDanceImageReferenceNode(IO.ComfyNode): cls, model: str, prompt: str, - images: torch.Tensor, + images: Input.Image, resolution: str, aspect_ratio: str, duration: int, @@ -1069,8 +918,8 @@ class ByteDanceImageReferenceNode(IO.ComfyNode): async def process_video_task( cls: type[IO.ComfyNode], - payload: Union[Text2VideoTaskCreationRequest, Image2VideoTaskCreationRequest], - estimated_duration: Optional[int], + payload: Text2VideoTaskCreationRequest | Image2VideoTaskCreationRequest, + estimated_duration: int | None, ) -> IO.NodeOutput: initial_response = await sync_op( cls, @@ -1085,7 +934,7 @@ async def process_video_task( estimated_duration=estimated_duration, response_model=TaskStatusResponse, ) - return IO.NodeOutput(await download_url_to_video_output(get_video_url_from_task_status(response))) + return IO.NodeOutput(await download_url_to_video_output(response.content.video_url)) def raise_if_text_params(prompt: str, text_params: list[str]) -> None: From 8e889c535d1fc407bf27dbf8359eef9580f2ed60 Mon Sep 17 00:00:00 2001 From: dxqb <183307934+dxqb@users.noreply.github.com> Date: Mon, 8 Dec 2025 21:17:26 +0100 Subject: [PATCH 1013/1073] Support "transformer." LoRA prefix for Z-Image (#11135) --- comfy/lora.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/lora.py b/comfy/lora.py index e7202ce97..2ed0acb9d 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -320,6 +320,7 @@ def model_lora_keys_unet(model, key_map={}): to = diffusers_keys[k] key_lora = k[:-len(".weight")] key_map["diffusion_model.{}".format(key_lora)] = to + key_map["transformer.{}".format(key_lora)] = to key_map["lycoris_{}".format(key_lora.replace(".", "_"))] = to if isinstance(model, comfy.model_base.Kandinsky5): From 60ee574748209a17ade1c7524e228be2802d1589 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Tue, 9 Dec 2025 06:18:06 +1000 Subject: [PATCH 1014/1073] retune lowVramPatch VRAM accounting (#11173) In the lowvram case, this now does its math in the model dtype in the post de-quantization domain. Account for that. The patching was also put back on the compute stream getting it off-peak so relax the MATH_FACTOR to only x2 so get out of the worst-case assumption of everything peaking at once. --- comfy/model_patcher.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 5b1ccb824..8b5edeb52 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -132,14 +132,14 @@ class LowVramPatch: def __call__(self, weight): return comfy.lora.calculate_weight(self.patches[self.key], weight, self.key, intermediate_dtype=weight.dtype) -#The above patch logic may cast up the weight to fp32, and do math. Go with fp32 x 3 -LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR = 3 +LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR = 2 def low_vram_patch_estimate_vram(model, key): weight, set_func, convert_func = get_key_weight(model, key) if weight is None: return 0 - return weight.numel() * torch.float32.itemsize * LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR + model_dtype = getattr(model, "manual_cast_dtype", torch.float32) + return weight.numel() * model_dtype.itemsize * LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR def get_key_weight(model, key): set_func = None From 935493f6c186de8808508713a465d6bda75e5ce4 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 9 Dec 2025 04:18:53 +0800 Subject: [PATCH 1015/1073] chore: update workflow templates to v0.7.54 (#11192) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 12a7c1089..4bd4b21c3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.33.10 -comfyui-workflow-templates==0.7.51 +comfyui-workflow-templates==0.7.54 comfyui-embedded-docs==0.3.1 torch torchsde From 3b0368aa34182fc7c97de92d59b609c77138def2 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 8 Dec 2025 14:38:36 -0800 Subject: [PATCH 1016/1073] Fix regression. (#11194) --- comfy/model_patcher.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 8b5edeb52..a7d24ac13 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -139,6 +139,9 @@ def low_vram_patch_estimate_vram(model, key): if weight is None: return 0 model_dtype = getattr(model, "manual_cast_dtype", torch.float32) + if model_dtype is None: + model_dtype = weight.dtype + return weight.numel() * model_dtype.itemsize * LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR def get_key_weight(model, key): From d50f342c90802830c1178ad9d7f2783dc2821af1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 8 Dec 2025 20:20:04 -0800 Subject: [PATCH 1017/1073] Fix potential issue. (#11201) --- comfy/model_patcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index a7d24ac13..2e8ce2613 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -923,7 +923,7 @@ class ModelPatcher: patch_counter += 1 cast_weight = True - if cast_weight: + if cast_weight and hasattr(m, "comfy_cast_weights"): m.prev_comfy_cast_weights = m.comfy_cast_weights m.comfy_cast_weights = True m.comfy_patched_weights = False From e136b6dbb0b08341388f5bf9a00b1fca29992eb3 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Tue, 9 Dec 2025 14:21:31 +1000 Subject: [PATCH 1018/1073] dequantization offload accounting (fixes Flux2 OOMs - incl TEs) (#11171) * make setattr safe for non existent attributes Handle the case where the attribute doesnt exist by returning a static sentinel (distinct from None). If the sentinel is passed in as the set value, del the attr. * Account for dequantization and type-casts in offload costs When measuring the cost of offload, identify weights that need a type change or dequantization and add the size of the conversion result to the offload cost. This is mutually exclusive with lowvram patches which already has a large conservative estimate and wont overlap the dequant cost so\ dont double count. * Set the compute type on CLIP MPs So that the loader can know the size of weights for dequant accounting. --- comfy/model_patcher.py | 19 +++++++++++++------ comfy/sd.py | 2 ++ comfy/utils.py | 9 +++++++-- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 2e8ce2613..a486c2723 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -35,6 +35,7 @@ import comfy.model_management import comfy.patcher_extension import comfy.utils from comfy.comfy_types import UnetWrapperFunction +from comfy.quant_ops import QuantizedTensor from comfy.patcher_extension import CallbacksMP, PatcherInjection, WrappersMP @@ -665,12 +666,18 @@ class ModelPatcher: module_mem = comfy.model_management.module_size(m) module_offload_mem = module_mem if hasattr(m, "comfy_cast_weights"): - weight_key = "{}.weight".format(n) - bias_key = "{}.bias".format(n) - if weight_key in self.patches: - module_offload_mem += low_vram_patch_estimate_vram(self.model, weight_key) - if bias_key in self.patches: - module_offload_mem += low_vram_patch_estimate_vram(self.model, bias_key) + def check_module_offload_mem(key): + if key in self.patches: + return low_vram_patch_estimate_vram(self.model, key) + model_dtype = getattr(self.model, "manual_cast_dtype", None) + weight, _, _ = get_key_weight(self.model, key) + if model_dtype is None or weight is None: + return 0 + if (weight.dtype != model_dtype or isinstance(weight, QuantizedTensor)): + return weight.numel() * model_dtype.itemsize + return 0 + module_offload_mem += check_module_offload_mem("{}.weight".format(n)) + module_offload_mem += check_module_offload_mem("{}.bias".format(n)) loading.append((module_offload_mem, module_mem, n, m, params)) return loading diff --git a/comfy/sd.py b/comfy/sd.py index 754b1703d..a16f2d14f 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -127,6 +127,8 @@ class CLIP: self.tokenizer = tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) self.patcher = comfy.model_patcher.ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device) + #Match torch.float32 hardcode upcast in TE implemention + self.patcher.set_model_compute_dtype(torch.float32) self.patcher.hook_mode = comfy.hooks.EnumHookMode.MinVram self.patcher.is_clip = True self.apply_hooks_to_conds = None diff --git a/comfy/utils.py b/comfy/utils.py index 89846bc95..9dc0d76ac 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -803,12 +803,17 @@ def safetensors_header(safetensors_path, max_size=100*1024*1024): return None return f.read(length_of_header) +ATTR_UNSET={} + def set_attr(obj, attr, value): attrs = attr.split(".") for name in attrs[:-1]: obj = getattr(obj, name) - prev = getattr(obj, attrs[-1]) - setattr(obj, attrs[-1], value) + prev = getattr(obj, attrs[-1], ATTR_UNSET) + if value is ATTR_UNSET: + delattr(obj, attrs[-1]) + else: + setattr(obj, attrs[-1], value) return prev def set_attr_param(obj, attr, value): From cabc4d351ff620ece87f18019d98131ebcbdf1aa Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Mon, 8 Dec 2025 20:22:02 -0800 Subject: [PATCH 1019/1073] bump comfyui-frontend-package to 1.33.13 (patch) (#11200) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4bd4b21c3..11a7ac245 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.33.10 +comfyui-frontend-package==1.33.13 comfyui-workflow-templates==0.7.54 comfyui-embedded-docs==0.3.1 torch From b9fb542703085c58f082b4a822329fb6670e8016 Mon Sep 17 00:00:00 2001 From: Lodestone Date: Tue, 9 Dec 2025 11:33:29 +0700 Subject: [PATCH 1020/1073] add chroma-radiance-x0 mode (#11197) --- comfy/ldm/chroma_radiance/model.py | 20 ++++++++++++++++++-- comfy/model_detection.py | 2 ++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/chroma_radiance/model.py b/comfy/ldm/chroma_radiance/model.py index e643b4414..70d173889 100644 --- a/comfy/ldm/chroma_radiance/model.py +++ b/comfy/ldm/chroma_radiance/model.py @@ -37,7 +37,7 @@ class ChromaRadianceParams(ChromaParams): nerf_final_head_type: str # None means use the same dtype as the model. nerf_embedder_dtype: Optional[torch.dtype] - + use_x0: bool class ChromaRadiance(Chroma): """ @@ -159,6 +159,9 @@ class ChromaRadiance(Chroma): self.skip_dit = [] self.lite = False + if params.use_x0: + self.register_buffer("__x0__", torch.tensor([])) + @property def _nerf_final_layer(self) -> nn.Module: if self.params.nerf_final_head_type == "linear": @@ -276,6 +279,12 @@ class ChromaRadiance(Chroma): params_dict |= overrides return params.__class__(**params_dict) + def _apply_x0_residual(self, predicted, noisy, timesteps): + + # non zero during training to prevent 0 div + eps = 0.0 + return (noisy - predicted) / (timesteps.view(-1,1,1,1) + eps) + def _forward( self, x: Tensor, @@ -316,4 +325,11 @@ class ChromaRadiance(Chroma): transformer_options, attn_mask=kwargs.get("attention_mask", None), ) - return self.forward_nerf(img, img_out, params)[:, :, :h, :w] + + out = self.forward_nerf(img, img_out, params)[:, :, :h, :w] + + # If x0 variant → v-pred, just return this instead + if hasattr(self, "__x0__"): + out = self._apply_x0_residual(out, img, timestep) + return out + diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 74c547427..19e6aa954 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -257,6 +257,8 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["nerf_tile_size"] = 512 dit_config["nerf_final_head_type"] = "conv" if f"{key_prefix}nerf_final_layer_conv.norm.scale" in state_dict_keys else "linear" dit_config["nerf_embedder_dtype"] = torch.float32 + if "__x0__" in state_dict_keys: # x0 pred + dit_config["use_x0"] = True else: dit_config["guidance_embed"] = "{}guidance_in.in_layer.weight".format(key_prefix) in state_dict_keys dit_config["yak_mlp"] = '{}double_blocks.0.img_mlp.gate_proj.weight'.format(key_prefix) in state_dict_keys From 9d252f3b70c0e89cbb581e28bb1862593c4e5ceb Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Tue, 9 Dec 2025 15:55:13 +1000 Subject: [PATCH 1021/1073] ops: delete dead code (#11204) This became dead code in https://github.com/comfyanonymous/ComfyUI/pull/11069 --- comfy/ops.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 35237c9f7..6f34d50fc 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -22,7 +22,6 @@ import comfy.model_management from comfy.cli_args import args, PerformanceFeature import comfy.float import comfy.rmsnorm -import contextlib import json def run_every_op(): @@ -94,13 +93,6 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None, of else: offload_stream = None - if offload_stream is not None: - wf_context = offload_stream - if hasattr(wf_context, "as_context"): - wf_context = wf_context.as_context(offload_stream) - else: - wf_context = contextlib.nullcontext() - non_blocking = comfy.model_management.device_supports_non_blocking(device) weight_has_function = len(s.weight_function) > 0 From e2a800e7ef225260c078ce484c75bb40161d9d94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Tue, 9 Dec 2025 23:59:16 +0200 Subject: [PATCH 1022/1073] Fix for HunyuanVideo1.5 meanflow distil (#11212) --- comfy/ldm/hunyuan_video/model.py | 3 ++- comfy/model_detection.py | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy/ldm/hunyuan_video/model.py b/comfy/ldm/hunyuan_video/model.py index 2749c53f5..55ab550f8 100644 --- a/comfy/ldm/hunyuan_video/model.py +++ b/comfy/ldm/hunyuan_video/model.py @@ -43,6 +43,7 @@ class HunyuanVideoParams: meanflow: bool use_cond_type_embedding: bool vision_in_dim: int + meanflow_sum: bool class SelfAttentionRef(nn.Module): @@ -317,7 +318,7 @@ class HunyuanVideo(nn.Module): timesteps_r = transformer_options['sample_sigmas'][w[0] + 1] timesteps_r = timesteps_r.unsqueeze(0).to(device=timesteps.device, dtype=timesteps.dtype) vec_r = self.time_r_in(timestep_embedding(timesteps_r, 256, time_factor=1000.0).to(img.dtype)) - vec = (vec + vec_r) / 2 + vec = (vec + vec_r) if self.params.meanflow_sum else (vec + vec_r) / 2 if ref_latent is not None: ref_latent_ids = self.img_ids(ref_latent) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 19e6aa954..1f5d34bdd 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -180,8 +180,10 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["use_cond_type_embedding"] = False if '{}vision_in.proj.0.weight'.format(key_prefix) in state_dict_keys: dit_config["vision_in_dim"] = state_dict['{}vision_in.proj.0.weight'.format(key_prefix)].shape[0] + dit_config["meanflow_sum"] = True else: dit_config["vision_in_dim"] = None + dit_config["meanflow_sum"] = False return dit_config if '{}double_blocks.0.img_attn.norm.key_norm.scale'.format(key_prefix) in state_dict_keys and ('{}img_in.weight'.format(key_prefix) in state_dict_keys or f"{key_prefix}distilled_guidance_layer.norms.0.scale" in state_dict_keys): #Flux, Chroma or Chroma Radiance (has no img_in.weight) From 791e30ff5037fa5e7aa4e1396099ea8d6bfb020b Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 9 Dec 2025 14:03:21 -0800 Subject: [PATCH 1023/1073] Fix nan issue when quantizing fp16 tensor. (#11213) --- comfy/quant_ops.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index 571d3f760..cd96541d7 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -399,7 +399,10 @@ class TensorCoreFP8Layout(QuantizedLayout): orig_dtype = tensor.dtype if isinstance(scale, str) and scale == "recalculate": - scale = torch.amax(tensor.abs()) / torch.finfo(dtype).max + scale = torch.amax(tensor.abs()).to(dtype=torch.float32) / torch.finfo(dtype).max + if tensor.dtype not in [torch.float32, torch.bfloat16]: # Prevent scale from being too small + tensor_info = torch.finfo(tensor.dtype) + scale = (1.0 / torch.clamp((1.0 / scale), min=tensor_info.min, max=tensor_info.max)) if scale is not None: if not isinstance(scale, torch.Tensor): From fc657f471a29d07696ca16b566000e8e555d67d1 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 9 Dec 2025 18:22:09 -0500 Subject: [PATCH 1024/1073] ComfyUI version v0.4.0 From now on ComfyUI will do version numbers a bit differently, every stable off the master branch will increment the minor version. Anytime a fix needs to be backported onto a stable version the patch version will be incremented. Example: We release v0.6.0 off the master branch then a day later a bug is discovered and we decide to backport the fix onto the v0.6.0 stable, this will be done in a separate branch in the main repository and this new stable will be tagged v0.6.1 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 4b039356e..2f083edaf 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.76" +__version__ = "0.4.0" diff --git a/pyproject.toml b/pyproject.toml index 02b94a0ce..e4d3d616a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.76" +version = "0.4.0" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From f668c2e3c99df40561b416cf62b0fd9eec96007a Mon Sep 17 00:00:00 2001 From: Benjamin Lu Date: Tue, 9 Dec 2025 19:27:07 -0800 Subject: [PATCH 1025/1073] bump comfyui-frontend-package to 1.34.8 (#11220) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 11a7ac245..9e9b25328 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.33.13 +comfyui-frontend-package==1.34.8 comfyui-workflow-templates==0.7.54 comfyui-embedded-docs==0.3.1 torch From 36357bbcc3c515e37a742457a2b2ab4b7ccc17a8 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 10 Dec 2025 21:55:09 +0200 Subject: [PATCH 1026/1073] process the NodeV1 dict results correctly (#11237) --- comfy_api/latest/_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 313a5af20..79217c813 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -1815,7 +1815,7 @@ class NodeOutput(_NodeOutputInternal): ui = data["ui"] if "expand" in data: expand = data["expand"] - return cls(args=args, ui=ui, expand=expand) + return cls(*args, ui=ui, expand=expand) def __getitem__(self, index) -> Any: return self.args[index] From 17c92a9f2843d7b9b727531066be2378b350a6ae Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 10 Dec 2025 16:59:48 -0800 Subject: [PATCH 1027/1073] Tweak Z Image memory estimation. (#11254) --- comfy/supported_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 383c82c3e..dd0f09f32 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1026,7 +1026,7 @@ class ZImage(Lumina2): "shift": 3.0, } - memory_usage_factor = 1.7 + memory_usage_factor = 2.0 supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32] From 57ddb7fd13d817e7259c2c992a852832b6b0f07a Mon Sep 17 00:00:00 2001 From: Johnpaul Chiwetelu <49923152+Myestery@users.noreply.github.com> Date: Thu, 11 Dec 2025 03:49:49 +0100 Subject: [PATCH 1028/1073] Fix: filter hidden files from /internal/files endpoint (#11191) --- api_server/routes/internal/internal_routes.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/api_server/routes/internal/internal_routes.py b/api_server/routes/internal/internal_routes.py index 613b0f7c7..b224306da 100644 --- a/api_server/routes/internal/internal_routes.py +++ b/api_server/routes/internal/internal_routes.py @@ -58,8 +58,13 @@ class InternalRoutes: return web.json_response({"error": "Invalid directory type"}, status=400) directory = get_directory_by_type(directory_type) + + def is_visible_file(entry: os.DirEntry) -> bool: + """Filter out hidden files (e.g., .DS_Store on macOS).""" + return entry.is_file() and not entry.name.startswith('.') + sorted_files = sorted( - (entry for entry in os.scandir(directory) if entry.is_file()), + (entry for entry in os.scandir(directory) if is_visible_file(entry)), key=lambda entry: -entry.stat().st_mtime ) return web.json_response([entry.name for entry in sorted_files], status=200) From e711aaf1a75120195c56ebd1f1ce829c6b7b84db Mon Sep 17 00:00:00 2001 From: Farshore <168402472+jiangchengchengark@users.noreply.github.com> Date: Thu, 11 Dec 2025 11:02:26 +0800 Subject: [PATCH 1029/1073] =?UTF-8?q?Lower=20VAE=20loading=20requirements?= =?UTF-8?q?=EF=BC=9ACreate=20a=20new=20branch=20for=20GPU=20memory=20calcu?= =?UTF-8?q?lations=20in=20qwen-image=20vae=20(#11199)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- comfy/sd.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index a16f2d14f..1cad98aef 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -549,8 +549,10 @@ class VAE: ddconfig = {"dim": dim, "z_dim": self.latent_channels, "dim_mult": [1, 2, 4, 4], "num_res_blocks": 2, "attn_scales": [], "temperal_downsample": [False, True, True], "dropout": 0.0} self.first_stage_model = comfy.ldm.wan.vae.WanVAE(**ddconfig) self.working_dtypes = [torch.bfloat16, torch.float16, torch.float32] - self.memory_used_encode = lambda shape, dtype: 6000 * shape[3] * shape[4] * model_management.dtype_size(dtype) - self.memory_used_decode = lambda shape, dtype: 7000 * shape[3] * shape[4] * (8 * 8) * model_management.dtype_size(dtype) + self.memory_used_encode = lambda shape, dtype: (1500 if shape[2]<=4 else 6000) * shape[3] * shape[4] * model_management.dtype_size(dtype) + self.memory_used_decode = lambda shape, dtype: (2200 if shape[2]<=4 else 7000) * shape[3] * shape[4] * (8*8) * model_management.dtype_size(dtype) + + # Hunyuan 3d v2 2.0 & 2.1 elif "geo_decoder.cross_attn_decoder.ln_1.bias" in sd: From 93948e3fc598c14082f744fe82fae056b64ff481 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 11 Dec 2025 08:11:12 +0200 Subject: [PATCH 1030/1073] feat(api-nodes): enable Kling Omni O1 node (#11229) --- comfy_api_nodes/nodes_kling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index 6c840dc47..a2cc87d84 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -2056,7 +2056,7 @@ class KlingExtension(ComfyExtension): OmniProImageToVideoNode, OmniProVideoToVideoNode, OmniProEditVideoNode, - # OmniProImageNode, # need support from backend + OmniProImageNode, ] From f8321eb57b29a4b34cecd27d5d6365adf5e6e601 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 10 Dec 2025 22:30:31 -0800 Subject: [PATCH 1031/1073] Adjust memory usage factor. (#11257) --- comfy/supported_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index dd0f09f32..ef8c75c09 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -541,7 +541,7 @@ class SD3(supported_models_base.BASE): unet_extra_config = {} latent_format = latent_formats.SD3 - memory_usage_factor = 1.2 + memory_usage_factor = 1.6 text_encoder_key_prefix = ["text_encoders."] From fdebe182966d1dd9bee3138264937137bd2302d8 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 11 Dec 2025 14:09:35 -0800 Subject: [PATCH 1032/1073] Fix regular chroma radiance (#11276) --- comfy/model_detection.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 1f5d34bdd..94b54b7c2 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -261,6 +261,8 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["nerf_embedder_dtype"] = torch.float32 if "__x0__" in state_dict_keys: # x0 pred dit_config["use_x0"] = True + else: + dit_config["use_x0"] = False else: dit_config["guidance_embed"] = "{}guidance_in.in_layer.weight".format(key_prefix) in state_dict_keys dit_config["yak_mlp"] = '{}double_blocks.0.img_mlp.gate_proj.weight'.format(key_prefix) in state_dict_keys From ae65433a602470eea271df47af0eb871d146a002 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 11 Dec 2025 14:15:00 -0800 Subject: [PATCH 1033/1073] This only works on radiance. (#11277) --- comfy/model_detection.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 94b54b7c2..dd6a703f6 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -259,10 +259,10 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["nerf_tile_size"] = 512 dit_config["nerf_final_head_type"] = "conv" if f"{key_prefix}nerf_final_layer_conv.norm.scale" in state_dict_keys else "linear" dit_config["nerf_embedder_dtype"] = torch.float32 - if "__x0__" in state_dict_keys: # x0 pred - dit_config["use_x0"] = True - else: - dit_config["use_x0"] = False + if "__x0__" in state_dict_keys: # x0 pred + dit_config["use_x0"] = True + else: + dit_config["use_x0"] = False else: dit_config["guidance_embed"] = "{}guidance_in.in_layer.weight".format(key_prefix) in state_dict_keys dit_config["yak_mlp"] = '{}double_blocks.0.img_mlp.gate_proj.weight'.format(key_prefix) in state_dict_keys From eeb020b9b77e1f3c0c2806bc1e38c7ba9576439e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 11 Dec 2025 14:33:09 -0800 Subject: [PATCH 1034/1073] Better chroma radiance and other models vram estimation. (#11278) --- comfy/supported_models.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index ef8c75c09..834dfcffc 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -965,7 +965,7 @@ class CosmosT2IPredict2(supported_models_base.BASE): def __init__(self, unet_config): super().__init__(unet_config) - self.memory_usage_factor = (unet_config.get("model_channels", 2048) / 2048) * 0.9 + self.memory_usage_factor = (unet_config.get("model_channels", 2048) / 2048) * 0.95 def get_model(self, state_dict, prefix="", device=None): out = model_base.CosmosPredict2(self, device=device) @@ -1289,7 +1289,7 @@ class ChromaRadiance(Chroma): latent_format = comfy.latent_formats.ChromaRadiance # Pixel-space model, no spatial compression for model input. - memory_usage_factor = 0.038 + memory_usage_factor = 0.044 def get_model(self, state_dict, prefix="", device=None): return model_base.ChromaRadiance(self, device=device) @@ -1332,7 +1332,7 @@ class Omnigen2(supported_models_base.BASE): "shift": 2.6, } - memory_usage_factor = 1.65 #TODO + memory_usage_factor = 1.95 #TODO unet_extra_config = {} latent_format = latent_formats.Flux @@ -1397,7 +1397,7 @@ class HunyuanImage21(HunyuanVideo): latent_format = latent_formats.HunyuanImage21 - memory_usage_factor = 7.7 + memory_usage_factor = 8.7 supported_inference_dtypes = [torch.bfloat16, torch.float32] @@ -1488,7 +1488,7 @@ class Kandinsky5(supported_models_base.BASE): unet_extra_config = {} latent_format = latent_formats.HunyuanVideo - memory_usage_factor = 1.1 #TODO + memory_usage_factor = 1.25 #TODO supported_inference_dtypes = [torch.bfloat16, torch.float32] @@ -1517,7 +1517,7 @@ class Kandinsky5Image(Kandinsky5): } latent_format = latent_formats.Flux - memory_usage_factor = 1.1 #TODO + memory_usage_factor = 1.25 #TODO def get_model(self, state_dict, prefix="", device=None): out = model_base.Kandinsky5Image(self, device=device) From 338d9ae3bbf24a9a06996cdf1c2f228acc65fd96 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 11 Dec 2025 15:56:33 -0800 Subject: [PATCH 1035/1073] Make portable updater work with repos in unmerged state. (#11281) --- .ci/update_windows/update.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.ci/update_windows/update.py b/.ci/update_windows/update.py index 59ece5130..fe646a6ed 100755 --- a/.ci/update_windows/update.py +++ b/.ci/update_windows/update.py @@ -53,6 +53,16 @@ try: repo.stash(ident) except KeyError: print("nothing to stash") # noqa: T201 +except: + print("Could not stash, cleaning index and trying again.") # noqa: T201 + repo.state_cleanup() + repo.index.read_tree(repo.head.peel().tree) + repo.index.write() + try: + repo.stash(ident) + except KeyError: + print("nothing to stash.") # noqa: T201 + backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S')) print("creating backup branch: {}".format(backup_branch_name)) # noqa: T201 try: From 982876d59a659adb085be5e236aacc4f2c54c19c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Fri, 12 Dec 2025 05:29:34 +0200 Subject: [PATCH 1036/1073] WanMove support (#11247) --- comfy_api/latest/_io.py | 8 + comfy_extras/nodes_wanmove.py | 535 ++++++++++++++++++++++++++++++++++ nodes.py | 1 + 3 files changed, 544 insertions(+) create mode 100644 comfy_extras/nodes_wanmove.py diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 79217c813..2b634d172 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -774,6 +774,13 @@ class AudioEncoder(ComfyTypeIO): class AudioEncoderOutput(ComfyTypeIO): Type = Any +@comfytype(io_type="TRACKS") +class Tracks(ComfyTypeIO): + class TrackDict(TypedDict): + track_path: torch.Tensor + track_visibility: torch.Tensor + Type = TrackDict + @comfytype(io_type="COMFY_MULTITYPED_V3") class MultiType: Type = Any @@ -1894,6 +1901,7 @@ __all__ = [ "SEGS", "AnyType", "MultiType", + "Tracks", # Dynamic Types "MatchType", # "DynamicCombo", diff --git a/comfy_extras/nodes_wanmove.py b/comfy_extras/nodes_wanmove.py new file mode 100644 index 000000000..5f39afa46 --- /dev/null +++ b/comfy_extras/nodes_wanmove.py @@ -0,0 +1,535 @@ +import nodes +import node_helpers +import torch +import torchvision.transforms.functional as TF +import comfy.model_management +import comfy.utils +import numpy as np +from typing_extensions import override +from comfy_api.latest import ComfyExtension, io +from comfy_extras.nodes_wan import parse_json_tracks + +# https://github.com/ali-vilab/Wan-Move/blob/main/wan/modules/trajectory.py +from PIL import Image, ImageDraw + +SKIP_ZERO = False + +def get_pos_emb( + pos_k: torch.Tensor, # A 1D tensor containing positions for which to generate embeddings. + pos_emb_dim: int, + theta_func: callable = lambda i, d: torch.pow(10000, torch.mul(2, torch.div(i.to(torch.float32), d))), #Function to compute thetas based on position and embedding dimensions. + device: torch.device = torch.device("cpu"), + dtype: torch.dtype = torch.float32, +) -> torch.Tensor: # The position embeddings (batch_size, pos_emb_dim) + + assert pos_emb_dim % 2 == 0, "The dimension of position embeddings must be even." + pos_k = pos_k.to(device, dtype) + if SKIP_ZERO: + pos_k = pos_k + 1 + batch_size = pos_k.size(0) + + denominator = torch.arange(0, pos_emb_dim // 2, device=device, dtype=dtype) + # Expand denominator to match the shape needed for broadcasting + denominator_expanded = denominator.view(1, -1).expand(batch_size, -1) + + thetas = theta_func(denominator_expanded, pos_emb_dim) + + # Ensure pos_k is in the correct shape for broadcasting + pos_k_expanded = pos_k.view(-1, 1).to(dtype) + sin_thetas = torch.sin(torch.div(pos_k_expanded, thetas)) + cos_thetas = torch.cos(torch.div(pos_k_expanded, thetas)) + + # Concatenate sine and cosine embeddings along the last dimension + pos_emb = torch.cat([sin_thetas, cos_thetas], dim=-1) + + return pos_emb + +def create_pos_embeddings( + pred_tracks: torch.Tensor, # the predicted tracks, [T, N, 2] + pred_visibility: torch.Tensor, # the predicted visibility [T, N] + downsample_ratios: list[int], # the ratios for downsampling time, height, and width + height: int, # the height of the feature map + width: int, # the width of the feature map + track_num: int = -1, # the number of tracks to use + t_down_strategy: str = "sample", # the strategy for downsampling time dimension +): + assert t_down_strategy in ["sample", "average"], "Invalid strategy for downsampling time dimension." + + t, n, _ = pred_tracks.shape + t_down, h_down, w_down = downsample_ratios + track_pos = - torch.ones(n, (t-1) // t_down + 1, 2, dtype=torch.long) + + if track_num == -1: + track_num = n + + tracks_idx = torch.randperm(n)[:track_num] + tracks = pred_tracks[:, tracks_idx] + visibility = pred_visibility[:, tracks_idx] + + for t_idx in range(0, t, t_down): + if t_down_strategy == "sample" or t_idx == 0: + cur_tracks = tracks[t_idx] # [N, 2] + cur_visibility = visibility[t_idx] # [N] + else: + cur_tracks = tracks[t_idx:t_idx+t_down].mean(dim=0) + cur_visibility = torch.any(visibility[t_idx:t_idx+t_down], dim=0) + + for i in range(track_num): + if not cur_visibility[i] or cur_tracks[i][0] < 0 or cur_tracks[i][1] < 0 or cur_tracks[i][0] >= width or cur_tracks[i][1] >= height: + continue + x, y = cur_tracks[i] + x, y = int(x // w_down), int(y // h_down) + track_pos[i, t_idx // t_down, 0], track_pos[i, t_idx // t_down, 1] = y, x + + return track_pos # the position embeddings, [N, T', 2], 2 = height, width + +def replace_feature( + vae_feature: torch.Tensor, # [B, C', T', H', W'] + track_pos: torch.Tensor, # [B, N, T', 2] + strength: float = 1.0 +) -> torch.Tensor: + b, _, t, h, w = vae_feature.shape + assert b == track_pos.shape[0], "Batch size mismatch." + n = track_pos.shape[1] + + # Shuffle the trajectory order + track_pos = track_pos[:, torch.randperm(n), :, :] + + # Extract coordinates at time steps ≥ 1 and generate a valid mask + current_pos = track_pos[:, :, 1:, :] # [B, N, T-1, 2] + mask = (current_pos[..., 0] >= 0) & (current_pos[..., 1] >= 0) # [B, N, T-1] + + # Get all valid indices + valid_indices = mask.nonzero(as_tuple=False) # [num_valid, 3] + num_valid = valid_indices.shape[0] + + if num_valid == 0: + return vae_feature + + # Decompose valid indices into each dimension + batch_idx = valid_indices[:, 0] + track_idx = valid_indices[:, 1] + t_rel = valid_indices[:, 2] + t_target = t_rel + 1 # Convert to original time step indices + + # Extract target position coordinates + h_target = current_pos[batch_idx, track_idx, t_rel, 0].long() # Ensure integer indices + w_target = current_pos[batch_idx, track_idx, t_rel, 1].long() + + # Extract source position coordinates (t=0) + h_source = track_pos[batch_idx, track_idx, 0, 0].long() + w_source = track_pos[batch_idx, track_idx, 0, 1].long() + + # Get source features and assign to target positions + src_features = vae_feature[batch_idx, :, 0, h_source, w_source] + dst_features = vae_feature[batch_idx, :, t_target, h_target, w_target] + + vae_feature[batch_idx, :, t_target, h_target, w_target] = dst_features + (src_features - dst_features) * strength + + + return vae_feature + +# Visualize functions + +def _draw_gradient_polyline_on_overlay(overlay, line_width, points, start_color, opacity=1.0): + draw = ImageDraw.Draw(overlay, 'RGBA') + points = points[::-1] + + # Compute total length + total_length = 0 + segment_lengths = [] + for i in range(len(points) - 1): + dx = points[i + 1][0] - points[i][0] + dy = points[i + 1][1] - points[i][1] + length = (dx * dx + dy * dy) ** 0.5 + segment_lengths.append(length) + total_length += length + + if total_length == 0: + return + + accumulated_length = 0 + + # Draw the gradient polyline + for idx, (start_point, end_point) in enumerate(zip(points[:-1], points[1:])): + segment_length = segment_lengths[idx] + steps = max(int(segment_length), 1) + + for i in range(steps): + current_length = accumulated_length + (i / steps) * segment_length + ratio = current_length / total_length + + alpha = int(255 * (1 - ratio) * opacity) + color = (*start_color, alpha) + + x = int(start_point[0] + (end_point[0] - start_point[0]) * i / steps) + y = int(start_point[1] + (end_point[1] - start_point[1]) * i / steps) + + dynamic_line_width = max(int(line_width * (1 - ratio)), 1) + draw.line([(x, y), (x + 1, y)], fill=color, width=dynamic_line_width) + + accumulated_length += segment_length + + +def add_weighted(rgb, track): + rgb = np.array(rgb) # [H, W, C] "RGB" + track = np.array(track) # [H, W, C] "RGBA" + + alpha = track[:, :, 3] / 255.0 + alpha = np.stack([alpha] * 3, axis=-1) + blend_img = track[:, :, :3] * alpha + rgb * (1 - alpha) + + return Image.fromarray(blend_img.astype(np.uint8)) + +def draw_tracks_on_video(video, tracks, visibility=None, track_frame=24, circle_size=12, opacity=0.5, line_width=16): + color_map = [(102, 153, 255), (0, 255, 255), (255, 255, 0), (255, 102, 204), (0, 255, 0)] + + video = video.byte().cpu().numpy() # (81, 480, 832, 3) + tracks = tracks[0].long().detach().cpu().numpy() + if visibility is not None: + visibility = visibility[0].detach().cpu().numpy() + + num_frames, height, width = video.shape[:3] + num_tracks = tracks.shape[1] + alpha_opacity = int(255 * opacity) + + output_frames = [] + for t in range(num_frames): + frame_rgb = video[t].astype(np.float32) + + # Create a single RGBA overlay for all tracks in this frame + overlay = Image.new("RGBA", (width, height), (0, 0, 0, 0)) + draw_overlay = ImageDraw.Draw(overlay) + + polyline_data = [] + + # Draw all circles on a single overlay + for n in range(num_tracks): + if visibility is not None and visibility[t, n] == 0: + continue + + track_coord = tracks[t, n] + color = color_map[n % len(color_map)] + circle_color = color + (alpha_opacity,) + + draw_overlay.ellipse((track_coord[0] - circle_size, track_coord[1] - circle_size, track_coord[0] + circle_size, track_coord[1] + circle_size), + fill=circle_color + ) + + # Store polyline data for batch processing + tracks_coord = tracks[max(t - track_frame, 0):t + 1, n] + if len(tracks_coord) > 1: + polyline_data.append((tracks_coord, color)) + + # Blend circles overlay once + overlay_np = np.array(overlay) + alpha = overlay_np[:, :, 3:4] / 255.0 + frame_rgb = overlay_np[:, :, :3] * alpha + frame_rgb * (1 - alpha) + + # Draw all polylines on a single overlay + if polyline_data: + polyline_overlay = Image.new("RGBA", (width, height), (0, 0, 0, 0)) + for tracks_coord, color in polyline_data: + _draw_gradient_polyline_on_overlay(polyline_overlay, line_width, tracks_coord, color, opacity) + + # Blend polylines overlay once + polyline_np = np.array(polyline_overlay) + alpha = polyline_np[:, :, 3:4] / 255.0 + frame_rgb = polyline_np[:, :, :3] * alpha + frame_rgb * (1 - alpha) + + output_frames.append(Image.fromarray(frame_rgb.astype(np.uint8))) + + return output_frames + + +class WanMoveVisualizeTracks(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="WanMoveVisualizeTracks", + category="conditioning/video_models", + inputs=[ + io.Image.Input("images"), + io.Tracks.Input("tracks", optional=True), + io.Int.Input("line_resolution", default=24, min=1, max=1024), + io.Int.Input("circle_size", default=12, min=1, max=128), + io.Float.Input("opacity", default=0.75, min=0.0, max=1.0, step=0.01), + io.Int.Input("line_width", default=16, min=1, max=128), + ], + outputs=[ + io.Image.Output(), + ], + ) + + @classmethod + def execute(cls, images, line_resolution, circle_size, opacity, line_width, tracks=None) -> io.NodeOutput: + if tracks is None: + return io.NodeOutput(images) + + track_path = tracks["track_path"].unsqueeze(0) + track_visibility = tracks["track_visibility"].unsqueeze(0) + images_in = images * 255.0 + if images_in.shape[0] != track_path.shape[1]: + repeat_count = track_path.shape[1] // images.shape[0] + images_in = images_in.repeat(repeat_count, 1, 1, 1) + track_video = draw_tracks_on_video(images_in, track_path, track_visibility, track_frame=line_resolution, circle_size=circle_size, opacity=opacity, line_width=line_width) + track_video = torch.stack([TF.to_tensor(frame) for frame in track_video], dim=0).movedim(1, -1).float() + + return io.NodeOutput(track_video.to(comfy.model_management.intermediate_device())) + + +class WanMoveTracksFromCoords(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="WanMoveTracksFromCoords", + category="conditioning/video_models", + inputs=[ + io.String.Input("track_coords", force_input=True, default="[]", optional=True), + io.Mask.Input("track_mask", optional=True), + ], + outputs=[ + io.Tracks.Output(), + io.Int.Output(display_name="track_length"), + ], + ) + + @classmethod + def execute(cls, track_coords, track_mask=None) -> io.NodeOutput: + device=comfy.model_management.intermediate_device() + + tracks_data = parse_json_tracks(track_coords) + track_length = len(tracks_data[0]) + + track_list = [ + [[track[frame]['x'], track[frame]['y']] for track in tracks_data] + for frame in range(len(tracks_data[0])) + ] + tracks = torch.tensor(track_list, dtype=torch.float32, device=device) # [frames, num_tracks, 2] + + num_tracks = tracks.shape[-2] + if track_mask is None: + track_visibility = torch.ones((track_length, num_tracks), dtype=torch.bool, device=device) + else: + track_visibility = (track_mask > 0).any(dim=(1, 2)).unsqueeze(-1) + + out_track_info = {} + out_track_info["track_path"] = tracks + out_track_info["track_visibility"] = track_visibility + return io.NodeOutput(out_track_info, track_length) + + +class GenerateTracks(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="GenerateTracks", + category="conditioning/video_models", + inputs=[ + io.Int.Input("width", default=832, min=16, max=4096, step=16), + io.Int.Input("height", default=480, min=16, max=4096, step=16), + io.Float.Input("start_x", default=0.0, min=0.0, max=1.0, step=0.01, tooltip="Normalized X coordinate (0-1) for start position."), + io.Float.Input("start_y", default=0.0, min=0.0, max=1.0, step=0.01, tooltip="Normalized Y coordinate (0-1) for start position."), + io.Float.Input("end_x", default=1.0, min=0.0, max=1.0, step=0.01, tooltip="Normalized X coordinate (0-1) for end position."), + io.Float.Input("end_y", default=1.0, min=0.0, max=1.0, step=0.01, tooltip="Normalized Y coordinate (0-1) for end position."), + io.Int.Input("num_frames", default=81, min=1, max=1024), + io.Int.Input("num_tracks", default=5, min=1, max=100), + io.Float.Input("track_spread", default=0.025, min=0.0, max=1.0, step=0.001, tooltip="Normalized distance between tracks. Tracks are spread perpendicular to the motion direction."), + io.Boolean.Input("bezier", default=False, tooltip="Enable Bezier curve path using the mid point as control point."), + io.Float.Input("mid_x", default=0.5, min=0.0, max=1.0, step=0.01, tooltip="Normalized X control point for Bezier curve. Only used when 'bezier' is enabled."), + io.Float.Input("mid_y", default=0.5, min=0.0, max=1.0, step=0.01, tooltip="Normalized Y control point for Bezier curve. Only used when 'bezier' is enabled."), + io.Combo.Input( + "interpolation", + options=["linear", "ease_in", "ease_out", "ease_in_out", "constant"], + tooltip="Controls the timing/speed of movement along the path.", + ), + io.Mask.Input("track_mask", optional=True, tooltip="Optional mask to indicate visible frames."), + ], + outputs=[ + io.Tracks.Output(), + io.Int.Output(display_name="track_length"), + ], + ) + + @classmethod + def execute(cls, width, height, start_x, start_y, mid_x, mid_y, end_x, end_y, num_frames, num_tracks, + track_spread, bezier=False, interpolation="linear", track_mask=None) -> io.NodeOutput: + device = comfy.model_management.intermediate_device() + track_length = num_frames + + # normalized coordinates to pixel coordinates + start_x_px = start_x * width + start_y_px = start_y * height + mid_x_px = mid_x * width + mid_y_px = mid_y * height + end_x_px = end_x * width + end_y_px = end_y * height + + track_spread_px = track_spread * (width + height) / 2 # Use average of width/height for spread to keep it proportional + + t = torch.linspace(0, 1, num_frames, device=device) + if interpolation == "constant": # All points stay at start position + interp_values = torch.zeros_like(t) + elif interpolation == "linear": + interp_values = t + elif interpolation == "ease_in": + interp_values = t ** 2 + elif interpolation == "ease_out": + interp_values = 1 - (1 - t) ** 2 + elif interpolation == "ease_in_out": + interp_values = t * t * (3 - 2 * t) + + if bezier: # apply interpolation to t for timing control along the bezier path + t_interp = interp_values + one_minus_t = 1 - t_interp + x_positions = one_minus_t ** 2 * start_x_px + 2 * one_minus_t * t_interp * mid_x_px + t_interp ** 2 * end_x_px + y_positions = one_minus_t ** 2 * start_y_px + 2 * one_minus_t * t_interp * mid_y_px + t_interp ** 2 * end_y_px + tangent_x = 2 * one_minus_t * (mid_x_px - start_x_px) + 2 * t_interp * (end_x_px - mid_x_px) + tangent_y = 2 * one_minus_t * (mid_y_px - start_y_px) + 2 * t_interp * (end_y_px - mid_y_px) + else: # calculate base x and y positions for each frame (center track) + x_positions = start_x_px + (end_x_px - start_x_px) * interp_values + y_positions = start_y_px + (end_y_px - start_y_px) * interp_values + # For non-bezier, tangent is constant (direction from start to end) + tangent_x = torch.full_like(t, end_x_px - start_x_px) + tangent_y = torch.full_like(t, end_y_px - start_y_px) + + track_list = [] + for frame_idx in range(num_frames): + # Calculate perpendicular direction at this frame + tx = tangent_x[frame_idx].item() + ty = tangent_y[frame_idx].item() + length = (tx ** 2 + ty ** 2) ** 0.5 + + if length > 0: # Perpendicular unit vector (rotate 90 degrees) + perp_x = -ty / length + perp_y = tx / length + else: # If tangent is zero, spread horizontally + perp_x = 1.0 + perp_y = 0.0 + + frame_tracks = [] + for track_idx in range(num_tracks): # center tracks around the main path offset ranges from -(num_tracks-1)/2 to +(num_tracks-1)/2 + offset = (track_idx - (num_tracks - 1) / 2) * track_spread_px + track_x = x_positions[frame_idx].item() + perp_x * offset + track_y = y_positions[frame_idx].item() + perp_y * offset + frame_tracks.append([track_x, track_y]) + track_list.append(frame_tracks) + + tracks = torch.tensor(track_list, dtype=torch.float32, device=device) # [frames, num_tracks, 2] + + if track_mask is None: + track_visibility = torch.ones((track_length, num_tracks), dtype=torch.bool, device=device) + else: + track_visibility = (track_mask > 0).any(dim=(1, 2)).unsqueeze(-1) + + out_track_info = {} + out_track_info["track_path"] = tracks + out_track_info["track_visibility"] = track_visibility + return io.NodeOutput(out_track_info, track_length) + + +class WanMoveConcatTrack(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="WanMoveConcatTrack", + category="conditioning/video_models", + inputs=[ + io.Tracks.Input("tracks_1"), + io.Tracks.Input("tracks_2", optional=True), + ], + outputs=[ + io.Tracks.Output(), + ], + ) + + @classmethod + def execute(cls, tracks_1=None, tracks_2=None) -> io.NodeOutput: + if tracks_2 is None: + return io.NodeOutput(tracks_1) + + tracks_out = torch.cat([tracks_1["track_path"], tracks_2["track_path"]], dim=1) # Concatenate along the track dimension + mask_out = torch.cat([tracks_1["track_visibility"], tracks_2["track_visibility"]], dim=-1) + + out_track_info = {} + out_track_info["track_path"] = tracks_out + out_track_info["track_visibility"] = mask_out + return io.NodeOutput(out_track_info) + + +class WanMoveTrackToVideo(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="WanMoveTrackToVideo", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Tracks.Input("tracks", optional=True), + io.Float.Input("strength", default=1.0, min=0.0, max=100.0, step=0.01, tooltip="Strength of the track conditioning."), + io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Image.Input("start_image"), + io.ClipVisionOutput.Input("clip_vision_output", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) + + @classmethod + def execute(cls, positive, negative, vae, width, height, length, batch_size, strength, tracks=None, start_image=None, clip_vision_output=None) -> io.NodeOutput: + device=comfy.model_management.intermediate_device() + latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=device) + if start_image is not None: + start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1) + image = torch.ones((length, height, width, start_image.shape[-1]), device=start_image.device, dtype=start_image.dtype) * 0.5 + image[:start_image.shape[0]] = start_image + + concat_latent_image = vae.encode(image[:, :, :, :3]) + mask = torch.ones((1, 1, latent.shape[2], concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=start_image.device, dtype=start_image.dtype) + mask[:, :, :((start_image.shape[0] - 1) // 4) + 1] = 0.0 + + if tracks is not None and strength > 0.0: + tracks_path = tracks["track_path"][:length] # [T, N, 2] + num_tracks = tracks_path.shape[-2] + + track_visibility = tracks.get("track_visibility", torch.ones((length, num_tracks), dtype=torch.bool, device=device)) + + track_pos = create_pos_embeddings(tracks_path, track_visibility, [4, 8, 8], height, width, track_num=num_tracks) + track_pos = comfy.utils.resize_to_batch_size(track_pos.unsqueeze(0), batch_size) + concat_latent_image_pos = replace_feature(concat_latent_image, track_pos, strength) + else: + concat_latent_image_pos = concat_latent_image + + positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image_pos, "concat_mask": mask}) + negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent_image, "concat_mask": mask}) + + if clip_vision_output is not None: + positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output}) + negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output}) + + out_latent = {} + out_latent["samples"] = latent + return io.NodeOutput(positive, negative, out_latent) + + +class WanMoveExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + WanMoveTrackToVideo, + WanMoveTracksFromCoords, + WanMoveConcatTrack, + WanMoveVisualizeTracks, + GenerateTracks, + ] + +async def comfy_entrypoint() -> WanMoveExtension: + return WanMoveExtension() diff --git a/nodes.py b/nodes.py index 8d28a725d..8678f510a 100644 --- a/nodes.py +++ b/nodes.py @@ -2358,6 +2358,7 @@ async def init_builtin_extra_nodes(): "nodes_logic.py", "nodes_nop.py", "nodes_kandinsky5.py", + "nodes_wanmove.py", ] import_failed = [] From 5495589db38409353a85b06df7d10f8de2f9c78d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 11 Dec 2025 20:32:27 -0800 Subject: [PATCH 1037/1073] Respect the dtype the op was initialized in for non quant mixed op. (#11282) --- comfy/ops.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 6f34d50fc..6ae6e791a 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -497,8 +497,10 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec ) -> None: super().__init__() - self.factory_kwargs = {"device": device, "dtype": MixedPrecisionOps._compute_dtype} - # self.factory_kwargs = {"device": device, "dtype": dtype} + if dtype is None: + dtype = MixedPrecisionOps._compute_dtype + + self.factory_kwargs = {"device": device, "dtype": dtype} self.in_features = in_features self.out_features = out_features @@ -530,7 +532,10 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec layer_conf = json.loads(layer_conf.numpy().tobytes()) if layer_conf is None: - self.weight = torch.nn.Parameter(weight.to(device=device, dtype=MixedPrecisionOps._compute_dtype), requires_grad=False) + dtype = self.factory_kwargs["dtype"] + self.weight = torch.nn.Parameter(weight.to(device=device, dtype=dtype), requires_grad=False) + if dtype != MixedPrecisionOps._compute_dtype: + self.comfy_cast_weights = True else: self.quant_format = layer_conf.get("format", None) if not self._full_precision_mm: From 908fd7d7496f6de88722263e1e00fcd3d22e584f Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 12 Dec 2025 10:18:31 +0200 Subject: [PATCH 1038/1073] feat(api-nodes): new TextToVideoWithAudio and ImageToVideoWithAudio nodes (#11267) --- comfy_api_nodes/apis/kling_api.py | 28 ++++- comfy_api_nodes/nodes_kling.py | 169 ++++++++++++++++++++++++++---- 2 files changed, 174 insertions(+), 23 deletions(-) diff --git a/comfy_api_nodes/apis/kling_api.py b/comfy_api_nodes/apis/kling_api.py index d8949f8ac..80a758466 100644 --- a/comfy_api_nodes/apis/kling_api.py +++ b/comfy_api_nodes/apis/kling_api.py @@ -51,25 +51,25 @@ class TaskStatusImageResult(BaseModel): url: str = Field(..., description="URL for generated image") -class OmniTaskStatusResults(BaseModel): +class TaskStatusResults(BaseModel): videos: list[TaskStatusVideoResult] | None = Field(None) images: list[TaskStatusImageResult] | None = Field(None) -class OmniTaskStatusResponseData(BaseModel): +class TaskStatusResponseData(BaseModel): created_at: int | None = Field(None, description="Task creation time") updated_at: int | None = Field(None, description="Task update time") task_status: str | None = None task_status_msg: str | None = Field(None, description="Additional failure reason. Only for polling endpoint.") task_id: str | None = Field(None, description="Task ID") - task_result: OmniTaskStatusResults | None = Field(None) + task_result: TaskStatusResults | None = Field(None) -class OmniTaskStatusResponse(BaseModel): +class TaskStatusResponse(BaseModel): code: int | None = Field(None, description="Error code") message: str | None = Field(None, description="Error message") request_id: str | None = Field(None, description="Request ID") - data: OmniTaskStatusResponseData | None = Field(None) + data: TaskStatusResponseData | None = Field(None) class OmniImageParamImage(BaseModel): @@ -84,3 +84,21 @@ class OmniProImageRequest(BaseModel): mode: str = Field("pro") n: int | None = Field(1, le=9) image_list: list[OmniImageParamImage] | None = Field(..., max_length=10) + + +class TextToVideoWithAudioRequest(BaseModel): + model_name: str = Field(..., description="kling-v2-6") + aspect_ratio: str = Field(..., description="'16:9', '9:16' or '1:1'") + duration: str = Field(..., description="'5' or '10'") + prompt: str = Field(...) + mode: str = Field("pro") + sound: str = Field(..., description="'on' or 'off'") + + +class ImageToVideoWithAudioRequest(BaseModel): + model_name: str = Field(..., description="kling-v2-6") + image: str = Field(...) + duration: str = Field(..., description="'5' or '10'") + prompt: str = Field(...) + mode: str = Field("pro") + sound: str = Field(..., description="'on' or 'off'") diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index a2cc87d84..e545fe490 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -50,6 +50,7 @@ from comfy_api_nodes.apis import ( KlingSingleImageEffectModelName, ) from comfy_api_nodes.apis.kling_api import ( + ImageToVideoWithAudioRequest, OmniImageParamImage, OmniParamImage, OmniParamVideo, @@ -57,7 +58,8 @@ from comfy_api_nodes.apis.kling_api import ( OmniProImageRequest, OmniProReferences2VideoRequest, OmniProText2VideoRequest, - OmniTaskStatusResponse, + TaskStatusResponse, + TextToVideoWithAudioRequest, ) from comfy_api_nodes.util import ( ApiEndpoint, @@ -242,7 +244,7 @@ def normalize_omni_prompt_references(prompt: str) -> str: return re.sub(r"(?\d*)(?!\w)", _video_repl, prompt) -async def finish_omni_video_task(cls: type[IO.ComfyNode], response: OmniTaskStatusResponse) -> IO.NodeOutput: +async def finish_omni_video_task(cls: type[IO.ComfyNode], response: TaskStatusResponse) -> IO.NodeOutput: if response.code: raise RuntimeError( f"Kling request failed. Code: {response.code}, Message: {response.message}, Data: {response.data}" @@ -250,7 +252,7 @@ async def finish_omni_video_task(cls: type[IO.ComfyNode], response: OmniTaskStat final_response = await poll_op( cls, ApiEndpoint(path=f"/proxy/kling/v1/videos/omni-video/{response.data.task_id}"), - response_model=OmniTaskStatusResponse, + response_model=TaskStatusResponse, status_extractor=lambda r: (r.data.task_status if r.data else None), max_poll_attempts=160, ) @@ -483,12 +485,12 @@ async def execute_image2video( task_id = task_creation_response.data.task_id final_response = await poll_op( - cls, - ApiEndpoint(path=f"{PATH_IMAGE_TO_VIDEO}/{task_id}"), - response_model=KlingImage2VideoResponse, - estimated_duration=AVERAGE_DURATION_I2V, - status_extractor=lambda r: (r.data.task_status.value if r.data and r.data.task_status else None), - ) + cls, + ApiEndpoint(path=f"{PATH_IMAGE_TO_VIDEO}/{task_id}"), + response_model=KlingImage2VideoResponse, + estimated_duration=AVERAGE_DURATION_I2V, + status_extractor=lambda r: (r.data.task_status.value if r.data and r.data.task_status else None), + ) validate_video_result_response(final_response) video = get_video_from_response(final_response) @@ -834,7 +836,7 @@ class OmniProTextToVideoNode(IO.ComfyNode): response = await sync_op( cls, ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), - response_model=OmniTaskStatusResponse, + response_model=TaskStatusResponse, data=OmniProText2VideoRequest( model_name=model_name, prompt=prompt, @@ -929,7 +931,7 @@ class OmniProFirstLastFrameNode(IO.ComfyNode): response = await sync_op( cls, ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), - response_model=OmniTaskStatusResponse, + response_model=TaskStatusResponse, data=OmniProFirstLastFrameRequest( model_name=model_name, prompt=prompt, @@ -997,7 +999,7 @@ class OmniProImageToVideoNode(IO.ComfyNode): response = await sync_op( cls, ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), - response_model=OmniTaskStatusResponse, + response_model=TaskStatusResponse, data=OmniProReferences2VideoRequest( model_name=model_name, prompt=prompt, @@ -1081,7 +1083,7 @@ class OmniProVideoToVideoNode(IO.ComfyNode): response = await sync_op( cls, ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), - response_model=OmniTaskStatusResponse, + response_model=TaskStatusResponse, data=OmniProReferences2VideoRequest( model_name=model_name, prompt=prompt, @@ -1162,7 +1164,7 @@ class OmniProEditVideoNode(IO.ComfyNode): response = await sync_op( cls, ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"), - response_model=OmniTaskStatusResponse, + response_model=TaskStatusResponse, data=OmniProReferences2VideoRequest( model_name=model_name, prompt=prompt, @@ -1237,7 +1239,7 @@ class OmniProImageNode(IO.ComfyNode): response = await sync_op( cls, ApiEndpoint(path="/proxy/kling/v1/images/omni-image", method="POST"), - response_model=OmniTaskStatusResponse, + response_model=TaskStatusResponse, data=OmniProImageRequest( model_name=model_name, prompt=prompt, @@ -1253,7 +1255,7 @@ class OmniProImageNode(IO.ComfyNode): final_response = await poll_op( cls, ApiEndpoint(path=f"/proxy/kling/v1/images/omni-image/{response.data.task_id}"), - response_model=OmniTaskStatusResponse, + response_model=TaskStatusResponse, status_extractor=lambda r: (r.data.task_status if r.data else None), ) return IO.NodeOutput(await download_url_to_image_tensor(final_response.data.task_result.images[0].url)) @@ -1328,9 +1330,8 @@ class KlingImage2VideoNode(IO.ComfyNode): def define_schema(cls) -> IO.Schema: return IO.Schema( node_id="KlingImage2VideoNode", - display_name="Kling Image to Video", + display_name="Kling Image(First Frame) to Video", category="api node/video/Kling", - description="Kling Image to Video Node", inputs=[ IO.Image.Input("start_frame", tooltip="The reference image used to generate the video."), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), @@ -2034,6 +2035,136 @@ class KlingImageGenerationNode(IO.ComfyNode): return IO.NodeOutput(await image_result_to_node_output(images)) +class TextToVideoWithAudio(IO.ComfyNode): + + @classmethod + def define_schema(cls) -> IO.Schema: + return IO.Schema( + node_id="KlingTextToVideoWithAudio", + display_name="Kling Text to Video with Audio", + category="api node/video/Kling", + inputs=[ + IO.Combo.Input("model_name", options=["kling-v2-6"]), + IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt."), + IO.Combo.Input("mode", options=["pro"]), + IO.Combo.Input("aspect_ratio", options=["16:9", "9:16", "1:1"]), + IO.Combo.Input("duration", options=[5, 10]), + IO.Boolean.Input("generate_audio", default=True), + ], + outputs=[ + IO.Video.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model_name: str, + prompt: str, + mode: str, + aspect_ratio: str, + duration: int, + generate_audio: bool, + ) -> IO.NodeOutput: + validate_string(prompt, min_length=1, max_length=2500) + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/kling/v1/videos/text2video", method="POST"), + response_model=TaskStatusResponse, + data=TextToVideoWithAudioRequest( + model_name=model_name, + prompt=prompt, + mode=mode, + aspect_ratio=aspect_ratio, + duration=str(duration), + sound="on" if generate_audio else "off", + ), + ) + if response.code: + raise RuntimeError( + f"Kling request failed. Code: {response.code}, Message: {response.message}, Data: {response.data}" + ) + final_response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/kling/v1/videos/text2video/{response.data.task_id}"), + response_model=TaskStatusResponse, + status_extractor=lambda r: (r.data.task_status if r.data else None), + ) + return IO.NodeOutput(await download_url_to_video_output(final_response.data.task_result.videos[0].url)) + + +class ImageToVideoWithAudio(IO.ComfyNode): + + @classmethod + def define_schema(cls) -> IO.Schema: + return IO.Schema( + node_id="KlingImageToVideoWithAudio", + display_name="Kling Image(First Frame) to Video with Audio", + category="api node/video/Kling", + inputs=[ + IO.Combo.Input("model_name", options=["kling-v2-6"]), + IO.Image.Input("start_frame"), + IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt."), + IO.Combo.Input("mode", options=["pro"]), + IO.Combo.Input("duration", options=[5, 10]), + IO.Boolean.Input("generate_audio", default=True), + ], + outputs=[ + IO.Video.Output(), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + ) + + @classmethod + async def execute( + cls, + model_name: str, + start_frame: Input.Image, + prompt: str, + mode: str, + duration: int, + generate_audio: bool, + ) -> IO.NodeOutput: + validate_string(prompt, min_length=1, max_length=2500) + validate_image_dimensions(start_frame, min_width=300, min_height=300) + validate_image_aspect_ratio(start_frame, (1, 2.5), (2.5, 1)) + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/kling/v1/videos/image2video", method="POST"), + response_model=TaskStatusResponse, + data=ImageToVideoWithAudioRequest( + model_name=model_name, + image=(await upload_images_to_comfyapi(cls, start_frame))[0], + prompt=prompt, + mode=mode, + duration=str(duration), + sound="on" if generate_audio else "off", + ), + ) + if response.code: + raise RuntimeError( + f"Kling request failed. Code: {response.code}, Message: {response.message}, Data: {response.data}" + ) + final_response = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/kling/v1/videos/image2video/{response.data.task_id}"), + response_model=TaskStatusResponse, + status_extractor=lambda r: (r.data.task_status if r.data else None), + ) + return IO.NodeOutput(await download_url_to_video_output(final_response.data.task_result.videos[0].url)) + + class KlingExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: @@ -2057,6 +2188,8 @@ class KlingExtension(ComfyExtension): OmniProVideoToVideoNode, OmniProEditVideoNode, OmniProImageNode, + TextToVideoWithAudio, + ImageToVideoWithAudio, ] From c5a47a16924e1be96241553a1448b298e57e50a1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 12 Dec 2025 08:49:35 -0800 Subject: [PATCH 1039/1073] Fix bias dtype issue in mixed ops. (#11293) --- comfy/ops.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 6ae6e791a..0384c8717 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -504,10 +504,7 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec self.in_features = in_features self.out_features = out_features - if bias: - self.bias = torch.nn.Parameter(torch.empty(out_features, **self.factory_kwargs)) - else: - self.register_parameter("bias", None) + self._has_bias = bias self.tensor_class = None self._full_precision_mm = MixedPrecisionOps._full_precision_mm @@ -536,6 +533,10 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec self.weight = torch.nn.Parameter(weight.to(device=device, dtype=dtype), requires_grad=False) if dtype != MixedPrecisionOps._compute_dtype: self.comfy_cast_weights = True + if self._has_bias: + self.bias = torch.nn.Parameter(torch.empty(self.out_features, device=device, dtype=dtype)) + else: + self.register_parameter("bias", None) else: self.quant_format = layer_conf.get("format", None) if not self._full_precision_mm: @@ -565,6 +566,11 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec requires_grad=False ) + if self._has_bias: + self.bias = torch.nn.Parameter(torch.empty(self.out_features, device=device, dtype=MixedPrecisionOps._compute_dtype)) + else: + self.register_parameter("bias", None) + for param_name in qconfig["parameters"]: param_key = f"{prefix}{param_name}" _v = state_dict.pop(param_key, None) From da2bfb5b0af26c7a1c44ec951dbd0fffe413c793 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 12 Dec 2025 22:39:11 -0800 Subject: [PATCH 1040/1073] Basic implementation of z image fun control union 2.0 (#11304) The inpaint part is currently missing and will be implemented later. I think they messed up this model pretty bad. They added some control_noise_refiner blocks but don't actually use them. There is a typo in their code so instead of doing control_noise_refiner -> control_layers it runs the whole control_layers twice. Unfortunately they trained with this typo so the model works but is kind of slow and would probably perform a lot better if they corrected their code and trained it again. --- comfy/ldm/lumina/controlnet.py | 95 +++++++++++++++++++++++-------- comfy/ldm/lumina/model.py | 16 +++++- comfy/model_patcher.py | 3 + comfy_extras/nodes_model_patch.py | 72 +++++++++++++++++------ 4 files changed, 142 insertions(+), 44 deletions(-) diff --git a/comfy/ldm/lumina/controlnet.py b/comfy/ldm/lumina/controlnet.py index fd7ce3b5c..8e2de7977 100644 --- a/comfy/ldm/lumina/controlnet.py +++ b/comfy/ldm/lumina/controlnet.py @@ -41,6 +41,11 @@ class ZImage_Control(torch.nn.Module): ffn_dim_multiplier: float = (8.0 / 3.0), norm_eps: float = 1e-5, qk_norm: bool = True, + n_control_layers=6, + control_in_dim=16, + additional_in_dim=0, + broken=False, + refiner_control=False, dtype=None, device=None, operations=None, @@ -49,10 +54,11 @@ class ZImage_Control(torch.nn.Module): super().__init__() operation_settings = {"operations": operations, "device": device, "dtype": dtype} - self.additional_in_dim = 0 - self.control_in_dim = 16 + self.broken = broken + self.additional_in_dim = additional_in_dim + self.control_in_dim = control_in_dim n_refiner_layers = 2 - self.n_control_layers = 6 + self.n_control_layers = n_control_layers self.control_layers = nn.ModuleList( [ ZImageControlTransformerBlock( @@ -74,28 +80,49 @@ class ZImage_Control(torch.nn.Module): all_x_embedder = {} patch_size = 2 f_patch_size = 1 - x_embedder = operations.Linear(f_patch_size * patch_size * patch_size * self.control_in_dim, dim, bias=True, device=device, dtype=dtype) + x_embedder = operations.Linear(f_patch_size * patch_size * patch_size * (self.control_in_dim + self.additional_in_dim), dim, bias=True, device=device, dtype=dtype) all_x_embedder[f"{patch_size}-{f_patch_size}"] = x_embedder + self.refiner_control = refiner_control + self.control_all_x_embedder = nn.ModuleDict(all_x_embedder) - self.control_noise_refiner = nn.ModuleList( - [ - JointTransformerBlock( - layer_id, - dim, - n_heads, - n_kv_heads, - multiple_of, - ffn_dim_multiplier, - norm_eps, - qk_norm, - modulation=True, - z_image_modulation=True, - operation_settings=operation_settings, - ) - for layer_id in range(n_refiner_layers) - ] - ) + if self.refiner_control: + self.control_noise_refiner = nn.ModuleList( + [ + ZImageControlTransformerBlock( + layer_id, + dim, + n_heads, + n_kv_heads, + multiple_of, + ffn_dim_multiplier, + norm_eps, + qk_norm, + block_id=layer_id, + operation_settings=operation_settings, + ) + for layer_id in range(n_refiner_layers) + ] + ) + else: + self.control_noise_refiner = nn.ModuleList( + [ + JointTransformerBlock( + layer_id, + dim, + n_heads, + n_kv_heads, + multiple_of, + ffn_dim_multiplier, + norm_eps, + qk_norm, + modulation=True, + z_image_modulation=True, + operation_settings=operation_settings, + ) + for layer_id in range(n_refiner_layers) + ] + ) def forward(self, cap_feats, control_context, x_freqs_cis, adaln_input): patch_size = 2 @@ -105,9 +132,29 @@ class ZImage_Control(torch.nn.Module): control_context = self.control_all_x_embedder[f"{patch_size}-{f_patch_size}"](control_context.view(B, C, H // pH, pH, W // pW, pW).permute(0, 2, 4, 3, 5, 1).flatten(3).flatten(1, 2)) x_attn_mask = None - for layer in self.control_noise_refiner: - control_context = layer(control_context, x_attn_mask, x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input) + if not self.refiner_control: + for layer in self.control_noise_refiner: + control_context = layer(control_context, x_attn_mask, x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input) + return control_context + def forward_noise_refiner_block(self, layer_id, control_context, x, x_attn_mask, x_freqs_cis, adaln_input): + if self.refiner_control: + if self.broken: + if layer_id == 0: + return self.control_layers[layer_id](control_context, x, x_mask=x_attn_mask, freqs_cis=x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input=adaln_input) + if layer_id > 0: + out = None + for i in range(1, len(self.control_layers)): + o, control_context = self.control_layers[i](control_context, x, x_mask=x_attn_mask, freqs_cis=x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input=adaln_input) + if out is None: + out = o + + return (out, control_context) + else: + return self.control_noise_refiner[layer_id](control_context, x, x_mask=x_attn_mask, freqs_cis=x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input=adaln_input) + else: + return (None, control_context) + def forward_control_block(self, layer_id, control_context, x, x_attn_mask, x_freqs_cis, adaln_input): return self.control_layers[layer_id](control_context, x, x_mask=x_attn_mask, freqs_cis=x_freqs_cis[:control_context.shape[0], :control_context.shape[1]], adaln_input=adaln_input) diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index c47df49ca..96cb37fa6 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -536,6 +536,7 @@ class NextDiT(nn.Module): bsz = len(x) pH = pW = self.patch_size device = x[0].device + orig_x = x if self.pad_tokens_multiple is not None: pad_extra = (-cap_feats.shape[1]) % self.pad_tokens_multiple @@ -572,13 +573,21 @@ class NextDiT(nn.Module): freqs_cis = self.rope_embedder(torch.cat((cap_pos_ids, x_pos_ids), dim=1)).movedim(1, 2) + patches = transformer_options.get("patches", {}) + # refine context for layer in self.context_refiner: cap_feats = layer(cap_feats, cap_mask, freqs_cis[:, :cap_pos_ids.shape[1]], transformer_options=transformer_options) padded_img_mask = None - for layer in self.noise_refiner: + x_input = x + for i, layer in enumerate(self.noise_refiner): x = layer(x, padded_img_mask, freqs_cis[:, cap_pos_ids.shape[1]:], t, transformer_options=transformer_options) + if "noise_refiner" in patches: + for p in patches["noise_refiner"]: + out = p({"img": x, "img_input": x_input, "txt": cap_feats, "pe": freqs_cis[:, cap_pos_ids.shape[1]:], "vec": t, "x": orig_x, "block_index": i, "transformer_options": transformer_options, "block_type": "noise_refiner"}) + if "img" in out: + x = out["img"] padded_full_embed = torch.cat((cap_feats, x), dim=1) mask = None @@ -622,14 +631,15 @@ class NextDiT(nn.Module): patches = transformer_options.get("patches", {}) x_is_tensor = isinstance(x, torch.Tensor) - img, mask, img_size, cap_size, freqs_cis = self.patchify_and_embed(x, cap_feats, cap_mask, t, num_tokens, transformer_options=transformer_options) + img, mask, img_size, cap_size, freqs_cis = self.patchify_and_embed(x, cap_feats, cap_mask, adaln_input, num_tokens, transformer_options=transformer_options) freqs_cis = freqs_cis.to(img.device) + img_input = img for i, layer in enumerate(self.layers): img = layer(img, mask, freqs_cis, adaln_input, transformer_options=transformer_options) if "double_block" in patches: for p in patches["double_block"]: - out = p({"img": img[:, cap_size[0]:], "txt": img[:, :cap_size[0]], "pe": freqs_cis[:, cap_size[0]:], "vec": adaln_input, "x": x, "block_index": i, "transformer_options": transformer_options}) + out = p({"img": img[:, cap_size[0]:], "img_input": img_input[:, cap_size[0]:], "txt": img[:, :cap_size[0]], "pe": freqs_cis[:, cap_size[0]:], "vec": adaln_input, "x": x, "block_index": i, "transformer_options": transformer_options}) if "img" in out: img[:, cap_size[0]:] = out["img"] if "txt" in out: diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index a486c2723..93d26c690 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -454,6 +454,9 @@ class ModelPatcher: def set_model_post_input_patch(self, patch): self.set_model_patch(patch, "post_input") + def set_model_noise_refiner_patch(self, patch): + self.set_model_patch(patch, "noise_refiner") + def set_model_rope_options(self, scale_x, shift_x, scale_y, shift_y, scale_t, shift_t, **kwargs): rope_options = self.model_options["transformer_options"].get("rope_options", {}) rope_options["scale_x"] = scale_x diff --git a/comfy_extras/nodes_model_patch.py b/comfy_extras/nodes_model_patch.py index c61810dbf..ec0e790dc 100644 --- a/comfy_extras/nodes_model_patch.py +++ b/comfy_extras/nodes_model_patch.py @@ -243,7 +243,13 @@ class ModelPatchLoader: model = SigLIPMultiFeatProjModel(device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast) elif 'control_all_x_embedder.2-1.weight' in sd: # alipai z image fun controlnet sd = z_image_convert(sd) - model = comfy.ldm.lumina.controlnet.ZImage_Control(device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast) + config = {} + if 'control_layers.14.adaLN_modulation.0.weight' in sd: + config['n_control_layers'] = 15 + config['additional_in_dim'] = 17 + config['refiner_control'] = True + config['broken'] = True + model = comfy.ldm.lumina.controlnet.ZImage_Control(device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast, **config) model.load_state_dict(sd) model = comfy.model_patcher.ModelPatcher(model, load_device=comfy.model_management.get_torch_device(), offload_device=comfy.model_management.unet_offload_device()) @@ -297,56 +303,86 @@ class DiffSynthCnetPatch: return [self.model_patch] class ZImageControlPatch: - def __init__(self, model_patch, vae, image, strength): + def __init__(self, model_patch, vae, image, strength, inpaint_image=None, mask=None): self.model_patch = model_patch self.vae = vae self.image = image + self.inpaint_image = inpaint_image + self.mask = mask self.strength = strength self.encoded_image = self.encode_latent_cond(image) self.encoded_image_size = (image.shape[1], image.shape[2]) self.temp_data = None - def encode_latent_cond(self, image): - latent_image = comfy.latent_formats.Flux().process_in(self.vae.encode(image)) - return latent_image + def encode_latent_cond(self, control_image, inpaint_image=None): + latent_image = comfy.latent_formats.Flux().process_in(self.vae.encode(control_image)) + if self.model_patch.model.additional_in_dim > 0: + if self.mask is None: + mask_ = torch.zeros_like(latent_image)[:, :1] + else: + mask_ = comfy.utils.common_upscale(self.mask.mean(dim=1, keepdim=True), latent_image.shape[-1], latent_image.shape[-2], "bilinear", "none") + if inpaint_image is None: + inpaint_image = torch.ones_like(control_image) * 0.5 + + inpaint_image_latent = comfy.latent_formats.Flux().process_in(self.vae.encode(inpaint_image)) + + return torch.cat([latent_image, mask_, inpaint_image_latent], dim=1) + else: + return latent_image def __call__(self, kwargs): x = kwargs.get("x") img = kwargs.get("img") + img_input = kwargs.get("img_input") txt = kwargs.get("txt") pe = kwargs.get("pe") vec = kwargs.get("vec") block_index = kwargs.get("block_index") + block_type = kwargs.get("block_type", "") spacial_compression = self.vae.spacial_compression_encode() if self.encoded_image is None or self.encoded_image_size != (x.shape[-2] * spacial_compression, x.shape[-1] * spacial_compression): image_scaled = comfy.utils.common_upscale(self.image.movedim(-1, 1), x.shape[-1] * spacial_compression, x.shape[-2] * spacial_compression, "area", "center") + inpaint_scaled = None + if self.inpaint_image is not None: + inpaint_scaled = comfy.utils.common_upscale(self.inpaint_image.movedim(-1, 1), x.shape[-1] * spacial_compression, x.shape[-2] * spacial_compression, "area", "center").movedim(1, -1) loaded_models = comfy.model_management.loaded_models(only_currently_used=True) - self.encoded_image = self.encode_latent_cond(image_scaled.movedim(1, -1)) + self.encoded_image = self.encode_latent_cond(image_scaled.movedim(1, -1), inpaint_scaled) self.encoded_image_size = (image_scaled.shape[-2], image_scaled.shape[-1]) comfy.model_management.load_models_gpu(loaded_models) - cnet_index = (block_index // 5) - cnet_index_float = (block_index / 5) + cnet_blocks = self.model_patch.model.n_control_layers + div = round(30 / cnet_blocks) + + cnet_index = (block_index // div) + cnet_index_float = (block_index / div) kwargs.pop("img") # we do ops in place kwargs.pop("txt") - cnet_blocks = self.model_patch.model.n_control_layers if cnet_index_float > (cnet_blocks - 1): self.temp_data = None return kwargs if self.temp_data is None or self.temp_data[0] > cnet_index: - self.temp_data = (-1, (None, self.model_patch.model(txt, self.encoded_image.to(img.dtype), pe, vec))) + if block_type == "noise_refiner": + self.temp_data = (-3, (None, self.model_patch.model(txt, self.encoded_image.to(img.dtype), pe, vec))) + else: + self.temp_data = (-1, (None, self.model_patch.model(txt, self.encoded_image.to(img.dtype), pe, vec))) - while self.temp_data[0] < cnet_index and (self.temp_data[0] + 1) < cnet_blocks: + if block_type == "noise_refiner": next_layer = self.temp_data[0] + 1 - self.temp_data = (next_layer, self.model_patch.model.forward_control_block(next_layer, self.temp_data[1][1], img[:, :self.temp_data[1][1].shape[1]], None, pe, vec)) + self.temp_data = (next_layer, self.model_patch.model.forward_noise_refiner_block(block_index, self.temp_data[1][1], img_input[:, :self.temp_data[1][1].shape[1]], None, pe, vec)) + if self.temp_data[1][0] is not None: + img[:, :self.temp_data[1][0].shape[1]] += (self.temp_data[1][0] * self.strength) + else: + while self.temp_data[0] < cnet_index and (self.temp_data[0] + 1) < cnet_blocks: + next_layer = self.temp_data[0] + 1 + self.temp_data = (next_layer, self.model_patch.model.forward_control_block(next_layer, self.temp_data[1][1], img_input[:, :self.temp_data[1][1].shape[1]], None, pe, vec)) - if cnet_index_float == self.temp_data[0]: - img[:, :self.temp_data[1][0].shape[1]] += (self.temp_data[1][0] * self.strength) - if cnet_blocks == self.temp_data[0] + 1: - self.temp_data = None + if cnet_index_float == self.temp_data[0]: + img[:, :self.temp_data[1][0].shape[1]] += (self.temp_data[1][0] * self.strength) + if cnet_blocks == self.temp_data[0] + 1: + self.temp_data = None return kwargs @@ -386,7 +422,9 @@ class QwenImageDiffsynthControlnet: mask = 1.0 - mask if isinstance(model_patch.model, comfy.ldm.lumina.controlnet.ZImage_Control): - model_patched.set_model_double_block_patch(ZImageControlPatch(model_patch, vae, image, strength)) + patch = ZImageControlPatch(model_patch, vae, image, strength, mask=mask) + model_patched.set_model_noise_refiner_patch(patch) + model_patched.set_model_double_block_patch(patch) else: model_patched.set_model_double_block_patch(DiffSynthCnetPatch(model_patch, vae, image, strength, mask)) return (model_patched,) From 971cefe7d4ca15c949d5d901a663cb66562a4f10 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 13 Dec 2025 15:45:23 -0800 Subject: [PATCH 1041/1073] Fix pytorch warnings. (#11314) --- comfy/ops.py | 2 +- comfy/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 0384c8717..16889bb82 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -592,7 +592,7 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec quant_conf = {"format": self.quant_format} if self._full_precision_mm: quant_conf["full_precision_matrix_mult"] = True - sd["{}comfy_quant".format(prefix)] = torch.frombuffer(json.dumps(quant_conf).encode('utf-8'), dtype=torch.uint8) + sd["{}comfy_quant".format(prefix)] = torch.tensor(list(json.dumps(quant_conf).encode('utf-8')), dtype=torch.uint8) return sd def _forward(self, input, weight, bias): diff --git a/comfy/utils.py b/comfy/utils.py index 9dc0d76ac..3866cda2e 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -1262,6 +1262,6 @@ def convert_old_quants(state_dict, model_prefix="", metadata={}): if quant_metadata is not None: layers = quant_metadata["layers"] for k, v in layers.items(): - state_dict["{}.comfy_quant".format(k)] = torch.frombuffer(json.dumps(v).encode('utf-8'), dtype=torch.uint8) + state_dict["{}.comfy_quant".format(k)] = torch.tensor(list(json.dumps(v).encode('utf-8')), dtype=torch.uint8) return state_dict, metadata From 6592bffc609da4738b111dbffca1f473972f3574 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Sun, 14 Dec 2025 13:03:29 +0800 Subject: [PATCH 1042/1073] seeds_2: add phi_2 variant and sampler node (#11309) * Add phi_2 solver type to seeds_2 * Add sampler node of seeds_2 --- comfy/k_diffusion/sampling.py | 15 ++++++++++++--- comfy_extras/nodes_custom_sampler.py | 26 ++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 0e2cda291..753c66afa 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -1557,10 +1557,13 @@ def sample_er_sde(model, x, sigmas, extra_args=None, callback=None, disable=None @torch.no_grad() -def sample_seeds_2(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=0.5): +def sample_seeds_2(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=0.5, solver_type="phi_1"): """SEEDS-2 - Stochastic Explicit Exponential Derivative-free Solvers (VP Data Prediction) stage 2. arXiv: https://arxiv.org/abs/2305.14267 (NeurIPS 2023) """ + if solver_type not in {"phi_1", "phi_2"}: + raise ValueError("solver_type must be 'phi_1' or 'phi_2'") + extra_args = {} if extra_args is None else extra_args seed = extra_args.get("seed", None) noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler @@ -1600,8 +1603,14 @@ def sample_seeds_2(model, x, sigmas, extra_args=None, callback=None, disable=Non denoised_2 = model(x_2, sigma_s_1 * s_in, **extra_args) # Step 2 - denoised_d = torch.lerp(denoised, denoised_2, fac) - x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x - alpha_t * ei_h_phi_1(-h_eta) * denoised_d + if solver_type == "phi_1": + denoised_d = torch.lerp(denoised, denoised_2, fac) + x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x - alpha_t * ei_h_phi_1(-h_eta) * denoised_d + elif solver_type == "phi_2": + b2 = ei_h_phi_2(-h_eta) / r + b1 = ei_h_phi_1(-h_eta) - b2 + x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x - alpha_t * (b1 * denoised + b2 * denoised_2) + if inject_noise: segment_factor = (r - 1) * h * eta sde_noise = sde_noise * segment_factor.exp() diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index fbb080886..71ea4e9ec 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -659,6 +659,31 @@ class SamplerSASolver(io.ComfyNode): get_sampler = execute +class SamplerSEEDS2(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="SamplerSEEDS2", + category="sampling/custom_sampling/samplers", + inputs=[ + io.Combo.Input("solver_type", options=["phi_1", "phi_2"]), + io.Float.Input("eta", default=1.0, min=0.0, max=100.0, step=0.01, round=False, tooltip="Stochastic strength"), + io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False, tooltip="SDE noise multiplier"), + io.Float.Input("r", default=0.5, min=0.01, max=1.0, step=0.01, round=False, tooltip="Relative step size for the intermediate stage (c2 node)"), + ], + outputs=[io.Sampler.Output()] + ) + + @classmethod + def execute(cls, solver_type, eta, s_noise, r) -> io.NodeOutput: + sampler_name = "seeds_2" + sampler = comfy.samplers.ksampler( + sampler_name, + {"eta": eta, "s_noise": s_noise, "r": r, "solver_type": solver_type}, + ) + return io.NodeOutput(sampler) + + class Noise_EmptyNoise: def __init__(self): self.seed = 0 @@ -996,6 +1021,7 @@ class CustomSamplersExtension(ComfyExtension): SamplerDPMAdaptative, SamplerER_SDE, SamplerSASolver, + SamplerSEEDS2, SplitSigmas, SplitSigmasDenoise, FlipSigmas, From 5ac3b26a7dedb9b13c681abe8733c54f13353273 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sun, 14 Dec 2025 01:02:50 -0800 Subject: [PATCH 1043/1073] Update warning for old pytorch version. (#11319) Versions below 2.4 are no longer supported. We will not break support on purpose but will not fix it if we do. --- comfy/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/utils.py b/comfy/utils.py index 3866cda2e..8d4e2b445 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -53,7 +53,7 @@ if hasattr(torch.serialization, "add_safe_globals"): # TODO: this was added in ALWAYS_SAFE_LOAD = True logging.info("Checkpoint files will always be loaded safely.") else: - logging.info("Warning, you are using an old pytorch version and some ckpt/pt files might be loaded unsafely. Upgrading to 2.4 or above is recommended.") + logging.warning("Warning, you are using an old pytorch version and some ckpt/pt files might be loaded unsafely. Upgrading to 2.4 or above is recommended as older versions of pytorch are no longer supported.") def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False): if device is None: From a5e85017d8574cb99024d320f7a53a77a9e6aa5a Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" <128333288+ltdrdata@users.noreply.github.com> Date: Tue, 16 Dec 2025 04:24:01 +0900 Subject: [PATCH 1044/1073] bump manager requirments to the 4.0.3b5 (#11324) --- manager_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manager_requirements.txt b/manager_requirements.txt index b95cefb74..5ef0d3a1d 100644 --- a/manager_requirements.txt +++ b/manager_requirements.txt @@ -1 +1 @@ -comfyui_manager==4.0.3b4 +comfyui_manager==4.0.3b5 From 51347f9fb8a8e60d3add049c6f241822c84c8a87 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 16 Dec 2025 05:28:55 +0800 Subject: [PATCH 1045/1073] chore: update workflow templates to v0.7.59 (#11337) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9e9b25328..117260515 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.34.8 -comfyui-workflow-templates==0.7.54 +comfyui-workflow-templates==0.7.59 comfyui-embedded-docs==0.3.1 torch torchsde From 5cb1e0c9a0439f1f95a0b372474bd4845e38009c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 15 Dec 2025 13:49:29 -0800 Subject: [PATCH 1046/1073] Disable guards on transformer_options when torch.compile (#11317) --- comfy_extras/nodes_torch_compile.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_torch_compile.py b/comfy_extras/nodes_torch_compile.py index adbeece2f..c43e8ad63 100644 --- a/comfy_extras/nodes_torch_compile.py +++ b/comfy_extras/nodes_torch_compile.py @@ -2,6 +2,8 @@ from typing_extensions import override from comfy_api.latest import ComfyExtension, io from comfy_api.torch_helpers import set_torch_compile_wrapper +def skip_torch_compile_dict(guard_entries): + return [("transformer_options" not in entry.name) for entry in guard_entries] class TorchCompileModel(io.ComfyNode): @classmethod @@ -23,7 +25,7 @@ class TorchCompileModel(io.ComfyNode): @classmethod def execute(cls, model, backend) -> io.NodeOutput: m = model.clone() - set_torch_compile_wrapper(model=m, backend=backend) + set_torch_compile_wrapper(model=m, backend=backend, options={"guard_filter_fn": skip_torch_compile_dict}) return io.NodeOutput(m) From af91eb6c9931d0a2c99cf8a6d4974a6abf9a09fa Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 16 Dec 2025 01:30:24 +0200 Subject: [PATCH 1047/1073] api-nodes: drop Kling v1 model (#11307) --- comfy_api_nodes/nodes_kling.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index e545fe490..1a6364fa0 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -105,10 +105,6 @@ AVERAGE_DURATION_VIDEO_EXTEND = 320 MODE_TEXT2VIDEO = { - "standard mode / 5s duration / kling-v1": ("std", "5", "kling-v1"), - "standard mode / 10s duration / kling-v1": ("std", "10", "kling-v1"), - "pro mode / 5s duration / kling-v1": ("pro", "5", "kling-v1"), - "pro mode / 10s duration / kling-v1": ("pro", "10", "kling-v1"), "standard mode / 5s duration / kling-v1-6": ("std", "5", "kling-v1-6"), "standard mode / 10s duration / kling-v1-6": ("std", "10", "kling-v1-6"), "pro mode / 5s duration / kling-v2-master": ("pro", "5", "kling-v2-master"), @@ -129,8 +125,6 @@ See: [Kling API Docs Capability Map](https://app.klingai.com/global/dev/document MODE_START_END_FRAME = { - "standard mode / 5s duration / kling-v1": ("std", "5", "kling-v1"), - "pro mode / 5s duration / kling-v1": ("pro", "5", "kling-v1"), "pro mode / 5s duration / kling-v1-5": ("pro", "5", "kling-v1-5"), "pro mode / 10s duration / kling-v1-5": ("pro", "10", "kling-v1-5"), "pro mode / 5s duration / kling-v1-6": ("pro", "5", "kling-v1-6"), @@ -754,7 +748,7 @@ class KlingTextToVideoNode(IO.ComfyNode): IO.Combo.Input( "mode", options=modes, - default=modes[4], + default=modes[8], tooltip="The configuration to use for the video generation following the format: mode / duration / model_name.", ), ], @@ -1489,7 +1483,7 @@ class KlingStartEndFrameNode(IO.ComfyNode): IO.Combo.Input( "mode", options=modes, - default=modes[8], + default=modes[6], tooltip="The configuration to use for the video generation following the format: mode / duration / model_name.", ), ], @@ -1952,7 +1946,7 @@ class KlingImageGenerationNode(IO.ComfyNode): IO.Combo.Input( "model_name", options=[i.value for i in KlingImageGenModelName], - default="kling-v1", + default="kling-v2", ), IO.Combo.Input( "aspect_ratio", From 33c7f1179d4a961e4ca1dd78188c5134e0ee8e8c Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 16 Dec 2025 01:32:29 +0200 Subject: [PATCH 1048/1073] drop Pika API nodes (#11306) --- comfy_api_nodes/apis/pika_api.py | 100 ------ comfy_api_nodes/nodes_pika.py | 575 ------------------------------- nodes.py | 1 - 3 files changed, 676 deletions(-) delete mode 100644 comfy_api_nodes/apis/pika_api.py delete mode 100644 comfy_api_nodes/nodes_pika.py diff --git a/comfy_api_nodes/apis/pika_api.py b/comfy_api_nodes/apis/pika_api.py deleted file mode 100644 index 232558cd7..000000000 --- a/comfy_api_nodes/apis/pika_api.py +++ /dev/null @@ -1,100 +0,0 @@ -from typing import Optional -from enum import Enum -from pydantic import BaseModel, Field - - -class Pikaffect(str, Enum): - Cake_ify = "Cake-ify" - Crumble = "Crumble" - Crush = "Crush" - Decapitate = "Decapitate" - Deflate = "Deflate" - Dissolve = "Dissolve" - Explode = "Explode" - Eye_pop = "Eye-pop" - Inflate = "Inflate" - Levitate = "Levitate" - Melt = "Melt" - Peel = "Peel" - Poke = "Poke" - Squish = "Squish" - Ta_da = "Ta-da" - Tear = "Tear" - - -class PikaBodyGenerate22C2vGenerate22PikascenesPost(BaseModel): - aspectRatio: Optional[float] = Field(None, description='Aspect ratio (width / height)') - duration: Optional[int] = Field(5) - ingredientsMode: str = Field(...) - negativePrompt: Optional[str] = Field(None) - promptText: Optional[str] = Field(None) - resolution: Optional[str] = Field('1080p') - seed: Optional[int] = Field(None) - - -class PikaGenerateResponse(BaseModel): - video_id: str = Field(...) - - -class PikaBodyGenerate22I2vGenerate22I2vPost(BaseModel): - duration: Optional[int] = 5 - negativePrompt: Optional[str] = Field(None) - promptText: Optional[str] = Field(None) - resolution: Optional[str] = '1080p' - seed: Optional[int] = Field(None) - - -class PikaBodyGenerate22KeyframeGenerate22PikaframesPost(BaseModel): - duration: Optional[int] = Field(None, ge=5, le=10) - negativePrompt: Optional[str] = Field(None) - promptText: str = Field(...) - resolution: Optional[str] = '1080p' - seed: Optional[int] = Field(None) - - -class PikaBodyGenerate22T2vGenerate22T2vPost(BaseModel): - aspectRatio: Optional[float] = Field( - 1.7777777777777777, - description='Aspect ratio (width / height)', - ge=0.4, - le=2.5, - ) - duration: Optional[int] = 5 - negativePrompt: Optional[str] = Field(None) - promptText: str = Field(...) - resolution: Optional[str] = '1080p' - seed: Optional[int] = Field(None) - - -class PikaBodyGeneratePikadditionsGeneratePikadditionsPost(BaseModel): - negativePrompt: Optional[str] = Field(None) - promptText: Optional[str] = Field(None) - seed: Optional[int] = Field(None) - - -class PikaBodyGeneratePikaffectsGeneratePikaffectsPost(BaseModel): - negativePrompt: Optional[str] = Field(None) - pikaffect: Optional[str] = None - promptText: Optional[str] = Field(None) - seed: Optional[int] = Field(None) - - -class PikaBodyGeneratePikaswapsGeneratePikaswapsPost(BaseModel): - negativePrompt: Optional[str] = Field(None) - promptText: Optional[str] = Field(None) - seed: Optional[int] = Field(None) - modifyRegionRoi: Optional[str] = Field(None) - - -class PikaStatusEnum(str, Enum): - queued = "queued" - started = "started" - finished = "finished" - failed = "failed" - - -class PikaVideoResponse(BaseModel): - id: str = Field(...) - progress: Optional[int] = Field(None) - status: PikaStatusEnum - url: Optional[str] = Field(None) diff --git a/comfy_api_nodes/nodes_pika.py b/comfy_api_nodes/nodes_pika.py deleted file mode 100644 index acd88c391..000000000 --- a/comfy_api_nodes/nodes_pika.py +++ /dev/null @@ -1,575 +0,0 @@ -""" -Pika x ComfyUI API Nodes - -Pika API docs: https://pika-827374fb.mintlify.app/api-reference -""" -from __future__ import annotations - -from io import BytesIO -import logging -from typing import Optional - -import torch - -from typing_extensions import override -from comfy_api.latest import ComfyExtension, IO -from comfy_api.input_impl.video_types import VideoCodec, VideoContainer, VideoInput -from comfy_api_nodes.apis import pika_api as pika_defs -from comfy_api_nodes.util import ( - validate_string, - download_url_to_video_output, - tensor_to_bytesio, - ApiEndpoint, - sync_op, - poll_op, -) - - -PATH_PIKADDITIONS = "/proxy/pika/generate/pikadditions" -PATH_PIKASWAPS = "/proxy/pika/generate/pikaswaps" -PATH_PIKAFFECTS = "/proxy/pika/generate/pikaffects" - -PIKA_API_VERSION = "2.2" -PATH_TEXT_TO_VIDEO = f"/proxy/pika/generate/{PIKA_API_VERSION}/t2v" -PATH_IMAGE_TO_VIDEO = f"/proxy/pika/generate/{PIKA_API_VERSION}/i2v" -PATH_PIKAFRAMES = f"/proxy/pika/generate/{PIKA_API_VERSION}/pikaframes" -PATH_PIKASCENES = f"/proxy/pika/generate/{PIKA_API_VERSION}/pikascenes" - -PATH_VIDEO_GET = "/proxy/pika/videos" - - -async def execute_task( - task_id: str, - cls: type[IO.ComfyNode], -) -> IO.NodeOutput: - final_response: pika_defs.PikaVideoResponse = await poll_op( - cls, - ApiEndpoint(path=f"{PATH_VIDEO_GET}/{task_id}"), - response_model=pika_defs.PikaVideoResponse, - status_extractor=lambda response: (response.status.value if response.status else None), - progress_extractor=lambda response: (response.progress if hasattr(response, "progress") else None), - estimated_duration=60, - max_poll_attempts=240, - ) - if not final_response.url: - error_msg = f"Pika task {task_id} succeeded but no video data found in response:\n{final_response}" - logging.error(error_msg) - raise Exception(error_msg) - video_url = final_response.url - logging.info("Pika task %s succeeded. Video URL: %s", task_id, video_url) - return IO.NodeOutput(await download_url_to_video_output(video_url)) - - -def get_base_inputs_types() -> list[IO.Input]: - """Get the base required inputs types common to all Pika nodes.""" - return [ - IO.String.Input("prompt_text", multiline=True), - IO.String.Input("negative_prompt", multiline=True), - IO.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), - IO.Combo.Input("resolution", options=["1080p", "720p"], default="1080p"), - IO.Combo.Input("duration", options=[5, 10], default=5), - ] - - -class PikaImageToVideo(IO.ComfyNode): - """Pika 2.2 Image to Video Node.""" - - @classmethod - def define_schema(cls) -> IO.Schema: - return IO.Schema( - node_id="PikaImageToVideoNode2_2", - display_name="Pika Image to Video", - description="Sends an image and prompt to the Pika API v2.2 to generate a video.", - category="api node/video/Pika", - inputs=[ - IO.Image.Input("image", tooltip="The image to convert to video"), - *get_base_inputs_types(), - ], - outputs=[IO.Video.Output()], - hidden=[ - IO.Hidden.auth_token_comfy_org, - IO.Hidden.api_key_comfy_org, - IO.Hidden.unique_id, - ], - is_api_node=True, - is_deprecated=True, - ) - - @classmethod - async def execute( - cls, - image: torch.Tensor, - prompt_text: str, - negative_prompt: str, - seed: int, - resolution: str, - duration: int, - ) -> IO.NodeOutput: - image_bytes_io = tensor_to_bytesio(image) - pika_files = {"image": ("image.png", image_bytes_io, "image/png")} - pika_request_data = pika_defs.PikaBodyGenerate22I2vGenerate22I2vPost( - promptText=prompt_text, - negativePrompt=negative_prompt, - seed=seed, - resolution=resolution, - duration=duration, - ) - initial_operation = await sync_op( - cls, - ApiEndpoint(path=PATH_IMAGE_TO_VIDEO, method="POST"), - response_model=pika_defs.PikaGenerateResponse, - data=pika_request_data, - files=pika_files, - content_type="multipart/form-data", - ) - return await execute_task(initial_operation.video_id, cls) - - -class PikaTextToVideoNode(IO.ComfyNode): - """Pika Text2Video v2.2 Node.""" - - @classmethod - def define_schema(cls) -> IO.Schema: - return IO.Schema( - node_id="PikaTextToVideoNode2_2", - display_name="Pika Text to Video", - description="Sends a text prompt to the Pika API v2.2 to generate a video.", - category="api node/video/Pika", - inputs=[ - *get_base_inputs_types(), - IO.Float.Input( - "aspect_ratio", - step=0.001, - min=0.4, - max=2.5, - default=1.7777777777777777, - tooltip="Aspect ratio (width / height)", - ) - ], - outputs=[IO.Video.Output()], - hidden=[ - IO.Hidden.auth_token_comfy_org, - IO.Hidden.api_key_comfy_org, - IO.Hidden.unique_id, - ], - is_api_node=True, - is_deprecated=True, - ) - - @classmethod - async def execute( - cls, - prompt_text: str, - negative_prompt: str, - seed: int, - resolution: str, - duration: int, - aspect_ratio: float, - ) -> IO.NodeOutput: - initial_operation = await sync_op( - cls, - ApiEndpoint(path=PATH_TEXT_TO_VIDEO, method="POST"), - response_model=pika_defs.PikaGenerateResponse, - data=pika_defs.PikaBodyGenerate22T2vGenerate22T2vPost( - promptText=prompt_text, - negativePrompt=negative_prompt, - seed=seed, - resolution=resolution, - duration=duration, - aspectRatio=aspect_ratio, - ), - content_type="application/x-www-form-urlencoded", - ) - return await execute_task(initial_operation.video_id, cls) - - -class PikaScenes(IO.ComfyNode): - """PikaScenes v2.2 Node.""" - - @classmethod - def define_schema(cls) -> IO.Schema: - return IO.Schema( - node_id="PikaScenesV2_2", - display_name="Pika Scenes (Video Image Composition)", - description="Combine your images to create a video with the objects in them. Upload multiple images as ingredients and generate a high-quality video that incorporates all of them.", - category="api node/video/Pika", - inputs=[ - *get_base_inputs_types(), - IO.Combo.Input( - "ingredients_mode", - options=["creative", "precise"], - default="creative", - ), - IO.Float.Input( - "aspect_ratio", - step=0.001, - min=0.4, - max=2.5, - default=1.7777777777777777, - tooltip="Aspect ratio (width / height)", - ), - IO.Image.Input( - "image_ingredient_1", - optional=True, - tooltip="Image that will be used as ingredient to create a video.", - ), - IO.Image.Input( - "image_ingredient_2", - optional=True, - tooltip="Image that will be used as ingredient to create a video.", - ), - IO.Image.Input( - "image_ingredient_3", - optional=True, - tooltip="Image that will be used as ingredient to create a video.", - ), - IO.Image.Input( - "image_ingredient_4", - optional=True, - tooltip="Image that will be used as ingredient to create a video.", - ), - IO.Image.Input( - "image_ingredient_5", - optional=True, - tooltip="Image that will be used as ingredient to create a video.", - ), - ], - outputs=[IO.Video.Output()], - hidden=[ - IO.Hidden.auth_token_comfy_org, - IO.Hidden.api_key_comfy_org, - IO.Hidden.unique_id, - ], - is_api_node=True, - is_deprecated=True, - ) - - @classmethod - async def execute( - cls, - prompt_text: str, - negative_prompt: str, - seed: int, - resolution: str, - duration: int, - ingredients_mode: str, - aspect_ratio: float, - image_ingredient_1: Optional[torch.Tensor] = None, - image_ingredient_2: Optional[torch.Tensor] = None, - image_ingredient_3: Optional[torch.Tensor] = None, - image_ingredient_4: Optional[torch.Tensor] = None, - image_ingredient_5: Optional[torch.Tensor] = None, - ) -> IO.NodeOutput: - all_image_bytes_io = [] - for image in [ - image_ingredient_1, - image_ingredient_2, - image_ingredient_3, - image_ingredient_4, - image_ingredient_5, - ]: - if image is not None: - all_image_bytes_io.append(tensor_to_bytesio(image)) - - pika_files = [ - ("images", (f"image_{i}.png", image_bytes_io, "image/png")) - for i, image_bytes_io in enumerate(all_image_bytes_io) - ] - - pika_request_data = pika_defs.PikaBodyGenerate22C2vGenerate22PikascenesPost( - ingredientsMode=ingredients_mode, - promptText=prompt_text, - negativePrompt=negative_prompt, - seed=seed, - resolution=resolution, - duration=duration, - aspectRatio=aspect_ratio, - ) - initial_operation = await sync_op( - cls, - ApiEndpoint(path=PATH_PIKASCENES, method="POST"), - response_model=pika_defs.PikaGenerateResponse, - data=pika_request_data, - files=pika_files, - content_type="multipart/form-data", - ) - - return await execute_task(initial_operation.video_id, cls) - - -class PikAdditionsNode(IO.ComfyNode): - """Pika Pikadditions Node. Add an image into a video.""" - - @classmethod - def define_schema(cls) -> IO.Schema: - return IO.Schema( - node_id="Pikadditions", - display_name="Pikadditions (Video Object Insertion)", - description="Add any object or image into your video. Upload a video and specify what you'd like to add to create a seamlessly integrated result.", - category="api node/video/Pika", - inputs=[ - IO.Video.Input("video", tooltip="The video to add an image to."), - IO.Image.Input("image", tooltip="The image to add to the video."), - IO.String.Input("prompt_text", multiline=True), - IO.String.Input("negative_prompt", multiline=True), - IO.Int.Input( - "seed", - min=0, - max=0xFFFFFFFF, - control_after_generate=True, - ), - ], - outputs=[IO.Video.Output()], - hidden=[ - IO.Hidden.auth_token_comfy_org, - IO.Hidden.api_key_comfy_org, - IO.Hidden.unique_id, - ], - is_api_node=True, - is_deprecated=True, - ) - - @classmethod - async def execute( - cls, - video: VideoInput, - image: torch.Tensor, - prompt_text: str, - negative_prompt: str, - seed: int, - ) -> IO.NodeOutput: - video_bytes_io = BytesIO() - video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264) - video_bytes_io.seek(0) - - image_bytes_io = tensor_to_bytesio(image) - pika_files = { - "video": ("video.mp4", video_bytes_io, "video/mp4"), - "image": ("image.png", image_bytes_io, "image/png"), - } - pika_request_data = pika_defs.PikaBodyGeneratePikadditionsGeneratePikadditionsPost( - promptText=prompt_text, - negativePrompt=negative_prompt, - seed=seed, - ) - initial_operation = await sync_op( - cls, - ApiEndpoint(path=PATH_PIKADDITIONS, method="POST"), - response_model=pika_defs.PikaGenerateResponse, - data=pika_request_data, - files=pika_files, - content_type="multipart/form-data", - ) - - return await execute_task(initial_operation.video_id, cls) - - -class PikaSwapsNode(IO.ComfyNode): - """Pika Pikaswaps Node.""" - - @classmethod - def define_schema(cls) -> IO.Schema: - return IO.Schema( - node_id="Pikaswaps", - display_name="Pika Swaps (Video Object Replacement)", - description="Swap out any object or region of your video with a new image or object. Define areas to replace either with a mask or coordinates.", - category="api node/video/Pika", - inputs=[ - IO.Video.Input("video", tooltip="The video to swap an object in."), - IO.Image.Input( - "image", - tooltip="The image used to replace the masked object in the video.", - optional=True, - ), - IO.Mask.Input( - "mask", - tooltip="Use the mask to define areas in the video to replace.", - optional=True, - ), - IO.String.Input("prompt_text", multiline=True, optional=True), - IO.String.Input("negative_prompt", multiline=True, optional=True), - IO.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True, optional=True), - IO.String.Input( - "region_to_modify", - multiline=True, - optional=True, - tooltip="Plaintext description of the object / region to modify.", - ), - ], - outputs=[IO.Video.Output()], - hidden=[ - IO.Hidden.auth_token_comfy_org, - IO.Hidden.api_key_comfy_org, - IO.Hidden.unique_id, - ], - is_api_node=True, - is_deprecated=True, - ) - - @classmethod - async def execute( - cls, - video: VideoInput, - image: Optional[torch.Tensor] = None, - mask: Optional[torch.Tensor] = None, - prompt_text: str = "", - negative_prompt: str = "", - seed: int = 0, - region_to_modify: str = "", - ) -> IO.NodeOutput: - video_bytes_io = BytesIO() - video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264) - video_bytes_io.seek(0) - pika_files = { - "video": ("video.mp4", video_bytes_io, "video/mp4"), - } - if mask is not None: - pika_files["modifyRegionMask"] = ("mask.png", tensor_to_bytesio(mask), "image/png") - if image is not None: - pika_files["image"] = ("image.png", tensor_to_bytesio(image), "image/png") - - pika_request_data = pika_defs.PikaBodyGeneratePikaswapsGeneratePikaswapsPost( - promptText=prompt_text, - negativePrompt=negative_prompt, - seed=seed, - modifyRegionRoi=region_to_modify if region_to_modify else None, - ) - initial_operation = await sync_op( - cls, - ApiEndpoint(path=PATH_PIKASWAPS, method="POST"), - response_model=pika_defs.PikaGenerateResponse, - data=pika_request_data, - files=pika_files, - content_type="multipart/form-data", - ) - return await execute_task(initial_operation.video_id, cls) - - -class PikaffectsNode(IO.ComfyNode): - """Pika Pikaffects Node.""" - - @classmethod - def define_schema(cls) -> IO.Schema: - return IO.Schema( - node_id="Pikaffects", - display_name="Pikaffects (Video Effects)", - description="Generate a video with a specific Pikaffect. Supported Pikaffects: Cake-ify, Crumble, Crush, Decapitate, Deflate, Dissolve, Explode, Eye-pop, Inflate, Levitate, Melt, Peel, Poke, Squish, Ta-da, Tear", - category="api node/video/Pika", - inputs=[ - IO.Image.Input("image", tooltip="The reference image to apply the Pikaffect to."), - IO.Combo.Input( - "pikaffect", options=pika_defs.Pikaffect, default="Cake-ify" - ), - IO.String.Input("prompt_text", multiline=True), - IO.String.Input("negative_prompt", multiline=True), - IO.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), - ], - outputs=[IO.Video.Output()], - hidden=[ - IO.Hidden.auth_token_comfy_org, - IO.Hidden.api_key_comfy_org, - IO.Hidden.unique_id, - ], - is_api_node=True, - is_deprecated=True, - ) - - @classmethod - async def execute( - cls, - image: torch.Tensor, - pikaffect: str, - prompt_text: str, - negative_prompt: str, - seed: int, - ) -> IO.NodeOutput: - initial_operation = await sync_op( - cls, - ApiEndpoint(path=PATH_PIKAFFECTS, method="POST"), - response_model=pika_defs.PikaGenerateResponse, - data=pika_defs.PikaBodyGeneratePikaffectsGeneratePikaffectsPost( - pikaffect=pikaffect, - promptText=prompt_text, - negativePrompt=negative_prompt, - seed=seed, - ), - files={"image": ("image.png", tensor_to_bytesio(image), "image/png")}, - content_type="multipart/form-data", - ) - return await execute_task(initial_operation.video_id, cls) - - -class PikaStartEndFrameNode(IO.ComfyNode): - """PikaFrames v2.2 Node.""" - - @classmethod - def define_schema(cls) -> IO.Schema: - return IO.Schema( - node_id="PikaStartEndFrameNode2_2", - display_name="Pika Start and End Frame to Video", - description="Generate a video by combining your first and last frame. Upload two images to define the start and end points, and let the AI create a smooth transition between them.", - category="api node/video/Pika", - inputs=[ - IO.Image.Input("image_start", tooltip="The first image to combine."), - IO.Image.Input("image_end", tooltip="The last image to combine."), - *get_base_inputs_types(), - ], - outputs=[IO.Video.Output()], - hidden=[ - IO.Hidden.auth_token_comfy_org, - IO.Hidden.api_key_comfy_org, - IO.Hidden.unique_id, - ], - is_api_node=True, - is_deprecated=True, - ) - - @classmethod - async def execute( - cls, - image_start: torch.Tensor, - image_end: torch.Tensor, - prompt_text: str, - negative_prompt: str, - seed: int, - resolution: str, - duration: int, - ) -> IO.NodeOutput: - validate_string(prompt_text, field_name="prompt_text", min_length=1) - pika_files = [ - ("keyFrames", ("image_start.png", tensor_to_bytesio(image_start), "image/png")), - ("keyFrames", ("image_end.png", tensor_to_bytesio(image_end), "image/png")), - ] - initial_operation = await sync_op( - cls, - ApiEndpoint(path=PATH_PIKAFRAMES, method="POST"), - response_model=pika_defs.PikaGenerateResponse, - data=pika_defs.PikaBodyGenerate22KeyframeGenerate22PikaframesPost( - promptText=prompt_text, - negativePrompt=negative_prompt, - seed=seed, - resolution=resolution, - duration=duration, - ), - files=pika_files, - content_type="multipart/form-data", - ) - return await execute_task(initial_operation.video_id, cls) - - -class PikaApiNodesExtension(ComfyExtension): - @override - async def get_node_list(self) -> list[type[IO.ComfyNode]]: - return [ - PikaImageToVideo, - PikaTextToVideoNode, - PikaScenes, - PikAdditionsNode, - PikaSwapsNode, - PikaffectsNode, - PikaStartEndFrameNode, - ] - - -async def comfy_entrypoint() -> PikaApiNodesExtension: - return PikaApiNodesExtension() diff --git a/nodes.py b/nodes.py index 8678f510a..3fa543294 100644 --- a/nodes.py +++ b/nodes.py @@ -2384,7 +2384,6 @@ async def init_builtin_api_nodes(): "nodes_recraft.py", "nodes_pixverse.py", "nodes_stability.py", - "nodes_pika.py", "nodes_runway.py", "nodes_sora.py", "nodes_topaz.py", From dbd330454ada04609c69fda2ae7c002d7ea05f67 Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" <128333288+ltdrdata@users.noreply.github.com> Date: Tue, 16 Dec 2025 08:57:39 +0900 Subject: [PATCH 1049/1073] feat(preview): add per-queue live preview method override (#11261) - Add set_preview_method() to override live preview method per queue item - Read extra_data.preview_method from /prompt request - Support values: taesd, latent2rgb, none, auto, default - "default" or unset uses server's CLI --preview-method setting - Add 44 tests (37 unit + 7 E2E) --- comfy/cli_args.py | 7 + execution.py | 3 + latent_preview.py | 10 + .../preview_method_override_test.py | 352 +++++++++++++++++ tests/execution/test_preview_method.py | 358 ++++++++++++++++++ 5 files changed, 730 insertions(+) create mode 100644 tests-unit/execution_test/preview_method_override_test.py create mode 100644 tests/execution/test_preview_method.py diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 209fc185b..dae9a895d 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -97,6 +97,13 @@ class LatentPreviewMethod(enum.Enum): Latent2RGB = "latent2rgb" TAESD = "taesd" + @classmethod + def from_string(cls, value: str): + for member in cls: + if member.value == value: + return member + return None + parser.add_argument("--preview-method", type=LatentPreviewMethod, default=LatentPreviewMethod.NoPreviews, help="Default preview method for sampler nodes.", action=EnumAction) parser.add_argument("--preview-size", type=int, default=512, help="Sets the maximum preview size for sampler nodes.") diff --git a/execution.py b/execution.py index c2186ac98..0c239efd7 100644 --- a/execution.py +++ b/execution.py @@ -13,6 +13,7 @@ import asyncio import torch import comfy.model_management +from latent_preview import set_preview_method import nodes from comfy_execution.caching import ( BasicCache, @@ -669,6 +670,8 @@ class PromptExecutor: asyncio.run(self.execute_async(prompt, prompt_id, extra_data, execute_outputs)) async def execute_async(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): + set_preview_method(extra_data.get("preview_method")) + nodes.interrupt_processing(False) if "client_id" in extra_data: diff --git a/latent_preview.py b/latent_preview.py index 66bded4b9..d52e3f7a1 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -8,6 +8,8 @@ import folder_paths import comfy.utils import logging +default_preview_method = args.preview_method + MAX_PREVIEW_RESOLUTION = args.preview_size VIDEO_TAES = ["taehv", "lighttaew2_2", "lighttaew2_1", "lighttaehy1_5"] @@ -125,3 +127,11 @@ def prepare_callback(model, steps, x0_output_dict=None): pbar.update_absolute(step + 1, total_steps, preview_bytes) return callback +def set_preview_method(override: str = None): + if override and override != "default": + method = LatentPreviewMethod.from_string(override) + if method is not None: + args.preview_method = method + return + args.preview_method = default_preview_method + diff --git a/tests-unit/execution_test/preview_method_override_test.py b/tests-unit/execution_test/preview_method_override_test.py new file mode 100644 index 000000000..79432d610 --- /dev/null +++ b/tests-unit/execution_test/preview_method_override_test.py @@ -0,0 +1,352 @@ +""" +Unit tests for Queue-specific Preview Method Override feature. + +Tests the preview method override functionality: +- LatentPreviewMethod.from_string() method +- set_preview_method() function in latent_preview.py +- default_preview_method variable +- Integration with args.preview_method +""" +import pytest +from comfy.cli_args import args, LatentPreviewMethod +from latent_preview import set_preview_method, default_preview_method + + +class TestLatentPreviewMethodFromString: + """Test LatentPreviewMethod.from_string() classmethod.""" + + @pytest.mark.parametrize("value,expected", [ + ("auto", LatentPreviewMethod.Auto), + ("latent2rgb", LatentPreviewMethod.Latent2RGB), + ("taesd", LatentPreviewMethod.TAESD), + ("none", LatentPreviewMethod.NoPreviews), + ]) + def test_valid_values_return_enum(self, value, expected): + """Valid string values should return corresponding enum.""" + assert LatentPreviewMethod.from_string(value) == expected + + @pytest.mark.parametrize("invalid", [ + "invalid", + "TAESD", # Case sensitive + "AUTO", # Case sensitive + "Latent2RGB", # Case sensitive + "latent", + "", + "default", # default is special, not a method + ]) + def test_invalid_values_return_none(self, invalid): + """Invalid string values should return None.""" + assert LatentPreviewMethod.from_string(invalid) is None + + +class TestLatentPreviewMethodEnumValues: + """Test LatentPreviewMethod enum has expected values.""" + + def test_enum_values(self): + """Verify enum values match expected strings.""" + assert LatentPreviewMethod.NoPreviews.value == "none" + assert LatentPreviewMethod.Auto.value == "auto" + assert LatentPreviewMethod.Latent2RGB.value == "latent2rgb" + assert LatentPreviewMethod.TAESD.value == "taesd" + + def test_enum_count(self): + """Verify exactly 4 preview methods exist.""" + assert len(LatentPreviewMethod) == 4 + + +class TestSetPreviewMethod: + """Test set_preview_method() function from latent_preview.py.""" + + def setup_method(self): + """Store original value before each test.""" + self.original = args.preview_method + + def teardown_method(self): + """Restore original value after each test.""" + args.preview_method = self.original + + def test_override_with_taesd(self): + """'taesd' should set args.preview_method to TAESD.""" + set_preview_method("taesd") + assert args.preview_method == LatentPreviewMethod.TAESD + + def test_override_with_latent2rgb(self): + """'latent2rgb' should set args.preview_method to Latent2RGB.""" + set_preview_method("latent2rgb") + assert args.preview_method == LatentPreviewMethod.Latent2RGB + + def test_override_with_auto(self): + """'auto' should set args.preview_method to Auto.""" + set_preview_method("auto") + assert args.preview_method == LatentPreviewMethod.Auto + + def test_override_with_none_value(self): + """'none' should set args.preview_method to NoPreviews.""" + set_preview_method("none") + assert args.preview_method == LatentPreviewMethod.NoPreviews + + def test_default_restores_original(self): + """'default' should restore to default_preview_method.""" + # First override to something else + set_preview_method("taesd") + assert args.preview_method == LatentPreviewMethod.TAESD + + # Then use 'default' to restore + set_preview_method("default") + assert args.preview_method == default_preview_method + + def test_none_param_restores_original(self): + """None parameter should restore to default_preview_method.""" + # First override to something else + set_preview_method("taesd") + assert args.preview_method == LatentPreviewMethod.TAESD + + # Then use None to restore + set_preview_method(None) + assert args.preview_method == default_preview_method + + def test_empty_string_restores_original(self): + """Empty string should restore to default_preview_method.""" + set_preview_method("taesd") + set_preview_method("") + assert args.preview_method == default_preview_method + + def test_invalid_value_restores_original(self): + """Invalid value should restore to default_preview_method.""" + set_preview_method("taesd") + set_preview_method("invalid_method") + assert args.preview_method == default_preview_method + + def test_case_sensitive_invalid_restores(self): + """Case-mismatched values should restore to default.""" + set_preview_method("taesd") + set_preview_method("TAESD") # Wrong case + assert args.preview_method == default_preview_method + + +class TestDefaultPreviewMethod: + """Test default_preview_method module variable.""" + + def test_default_is_not_none(self): + """default_preview_method should not be None.""" + assert default_preview_method is not None + + def test_default_is_enum_member(self): + """default_preview_method should be a LatentPreviewMethod enum.""" + assert isinstance(default_preview_method, LatentPreviewMethod) + + def test_default_matches_args_initial(self): + """default_preview_method should match CLI default or user setting.""" + # This tests that default_preview_method was captured at module load + # After set_preview_method(None), args should equal default + original = args.preview_method + set_preview_method("taesd") + set_preview_method(None) + assert args.preview_method == default_preview_method + args.preview_method = original + + +class TestArgsPreviewMethodModification: + """Test args.preview_method can be modified correctly.""" + + def setup_method(self): + """Store original value before each test.""" + self.original = args.preview_method + + def teardown_method(self): + """Restore original value after each test.""" + args.preview_method = self.original + + def test_args_accepts_all_enum_values(self): + """args.preview_method should accept all LatentPreviewMethod values.""" + for method in LatentPreviewMethod: + args.preview_method = method + assert args.preview_method == method + + def test_args_modification_and_restoration(self): + """args.preview_method should be modifiable and restorable.""" + original = args.preview_method + + args.preview_method = LatentPreviewMethod.TAESD + assert args.preview_method == LatentPreviewMethod.TAESD + + args.preview_method = original + assert args.preview_method == original + + +class TestExecutionFlow: + """Test the execution flow pattern used in execution.py.""" + + def setup_method(self): + """Store original value before each test.""" + self.original = args.preview_method + + def teardown_method(self): + """Restore original value after each test.""" + args.preview_method = self.original + + def test_sequential_executions_with_different_methods(self): + """Simulate multiple queue executions with different preview methods.""" + # Execution 1: taesd + set_preview_method("taesd") + assert args.preview_method == LatentPreviewMethod.TAESD + + # Execution 2: none + set_preview_method("none") + assert args.preview_method == LatentPreviewMethod.NoPreviews + + # Execution 3: default (restore) + set_preview_method("default") + assert args.preview_method == default_preview_method + + # Execution 4: auto + set_preview_method("auto") + assert args.preview_method == LatentPreviewMethod.Auto + + # Execution 5: no override (None) + set_preview_method(None) + assert args.preview_method == default_preview_method + + def test_override_then_default_pattern(self): + """Test the pattern: override -> execute -> next call restores.""" + # First execution with override + set_preview_method("latent2rgb") + assert args.preview_method == LatentPreviewMethod.Latent2RGB + + # Second execution without override restores default + set_preview_method(None) + assert args.preview_method == default_preview_method + + def test_extra_data_simulation(self): + """Simulate extra_data.get('preview_method') patterns.""" + # Simulate: extra_data = {"preview_method": "taesd"} + extra_data = {"preview_method": "taesd"} + set_preview_method(extra_data.get("preview_method")) + assert args.preview_method == LatentPreviewMethod.TAESD + + # Simulate: extra_data = {} + extra_data = {} + set_preview_method(extra_data.get("preview_method")) + assert args.preview_method == default_preview_method + + # Simulate: extra_data = {"preview_method": "default"} + extra_data = {"preview_method": "default"} + set_preview_method(extra_data.get("preview_method")) + assert args.preview_method == default_preview_method + + +class TestRealWorldScenarios: + """Tests using real-world prompt data patterns.""" + + def setup_method(self): + """Store original value before each test.""" + self.original = args.preview_method + + def teardown_method(self): + """Restore original value after each test.""" + args.preview_method = self.original + + def test_captured_prompt_without_preview_method(self): + """ + Test with captured prompt that has no preview_method. + Based on: tests-unit/execution_test/fixtures/default_prompt.json + """ + # Real captured extra_data structure (preview_method absent) + extra_data = { + "extra_pnginfo": {"workflow": {}}, + "client_id": "271314f0dabd48e5aaa488ed7a4ceb0d", + "create_time": 1765416558179 + } + + set_preview_method(extra_data.get("preview_method")) + assert args.preview_method == default_preview_method + + def test_captured_prompt_with_preview_method_taesd(self): + """Test captured prompt with preview_method: taesd.""" + extra_data = { + "extra_pnginfo": {"workflow": {}}, + "client_id": "271314f0dabd48e5aaa488ed7a4ceb0d", + "preview_method": "taesd" + } + + set_preview_method(extra_data.get("preview_method")) + assert args.preview_method == LatentPreviewMethod.TAESD + + def test_captured_prompt_with_preview_method_none(self): + """Test captured prompt with preview_method: none (disable preview).""" + extra_data = { + "extra_pnginfo": {"workflow": {}}, + "client_id": "test-client", + "preview_method": "none" + } + + set_preview_method(extra_data.get("preview_method")) + assert args.preview_method == LatentPreviewMethod.NoPreviews + + def test_captured_prompt_with_preview_method_latent2rgb(self): + """Test captured prompt with preview_method: latent2rgb.""" + extra_data = { + "extra_pnginfo": {"workflow": {}}, + "client_id": "test-client", + "preview_method": "latent2rgb" + } + + set_preview_method(extra_data.get("preview_method")) + assert args.preview_method == LatentPreviewMethod.Latent2RGB + + def test_captured_prompt_with_preview_method_auto(self): + """Test captured prompt with preview_method: auto.""" + extra_data = { + "extra_pnginfo": {"workflow": {}}, + "client_id": "test-client", + "preview_method": "auto" + } + + set_preview_method(extra_data.get("preview_method")) + assert args.preview_method == LatentPreviewMethod.Auto + + def test_captured_prompt_with_preview_method_default(self): + """Test captured prompt with preview_method: default (use CLI setting).""" + # First set to something else + set_preview_method("taesd") + assert args.preview_method == LatentPreviewMethod.TAESD + + # Then simulate a prompt with "default" + extra_data = { + "extra_pnginfo": {"workflow": {}}, + "client_id": "test-client", + "preview_method": "default" + } + + set_preview_method(extra_data.get("preview_method")) + assert args.preview_method == default_preview_method + + def test_sequential_queue_with_different_preview_methods(self): + """ + Simulate real queue scenario: multiple prompts with different settings. + This tests the actual usage pattern in ComfyUI. + """ + # Queue 1: User wants TAESD preview + extra_data_1 = {"client_id": "client-1", "preview_method": "taesd"} + set_preview_method(extra_data_1.get("preview_method")) + assert args.preview_method == LatentPreviewMethod.TAESD + + # Queue 2: User wants no preview (faster execution) + extra_data_2 = {"client_id": "client-2", "preview_method": "none"} + set_preview_method(extra_data_2.get("preview_method")) + assert args.preview_method == LatentPreviewMethod.NoPreviews + + # Queue 3: User doesn't specify (use server default) + extra_data_3 = {"client_id": "client-3"} + set_preview_method(extra_data_3.get("preview_method")) + assert args.preview_method == default_preview_method + + # Queue 4: User explicitly wants default + extra_data_4 = {"client_id": "client-4", "preview_method": "default"} + set_preview_method(extra_data_4.get("preview_method")) + assert args.preview_method == default_preview_method + + # Queue 5: User wants latent2rgb + extra_data_5 = {"client_id": "client-5", "preview_method": "latent2rgb"} + set_preview_method(extra_data_5.get("preview_method")) + assert args.preview_method == LatentPreviewMethod.Latent2RGB diff --git a/tests/execution/test_preview_method.py b/tests/execution/test_preview_method.py new file mode 100644 index 000000000..c3037553b --- /dev/null +++ b/tests/execution/test_preview_method.py @@ -0,0 +1,358 @@ +""" +E2E tests for Queue-specific Preview Method Override feature. + +Tests actual execution with different preview_method values. +Requires a running ComfyUI server with models. + +Usage: + COMFYUI_SERVER=http://localhost:8988 pytest test_preview_method_e2e.py -v -m preview_method + +Note: + These tests execute actual image generation and wait for completion. + Tests verify preview image transmission based on preview_method setting. +""" +import os +import json +import pytest +import uuid +import time +import random +import websocket +import urllib.request +from pathlib import Path + + +# Server configuration +SERVER_URL = os.environ.get("COMFYUI_SERVER", "http://localhost:8988") +SERVER_HOST = SERVER_URL.replace("http://", "").replace("https://", "") + +# Use existing inference graph fixture +GRAPH_FILE = Path(__file__).parent.parent / "inference" / "graphs" / "default_graph_sdxl1_0.json" + + +def is_server_running() -> bool: + """Check if ComfyUI server is running.""" + try: + request = urllib.request.Request(f"{SERVER_URL}/system_stats") + with urllib.request.urlopen(request, timeout=2.0): + return True + except Exception: + return False + + +def prepare_graph_for_test(graph: dict, steps: int = 5) -> dict: + """Prepare graph for testing: randomize seeds and reduce steps.""" + adapted = json.loads(json.dumps(graph)) # Deep copy + for node_id, node in adapted.items(): + inputs = node.get("inputs", {}) + # Handle both "seed" and "noise_seed" (used by KSamplerAdvanced) + if "seed" in inputs: + inputs["seed"] = random.randint(0, 2**32 - 1) + if "noise_seed" in inputs: + inputs["noise_seed"] = random.randint(0, 2**32 - 1) + # Reduce steps for faster testing (default 20 -> 5) + if "steps" in inputs: + inputs["steps"] = steps + return adapted + + +# Alias for backward compatibility +randomize_seed = prepare_graph_for_test + + +class PreviewMethodClient: + """Client for testing preview_method with WebSocket execution tracking.""" + + def __init__(self, server_address: str): + self.server_address = server_address + self.client_id = str(uuid.uuid4()) + self.ws = None + + def connect(self): + """Connect to WebSocket.""" + self.ws = websocket.WebSocket() + self.ws.settimeout(120) # 2 minute timeout for sampling + self.ws.connect(f"ws://{self.server_address}/ws?clientId={self.client_id}") + + def close(self): + """Close WebSocket connection.""" + if self.ws: + self.ws.close() + + def queue_prompt(self, prompt: dict, extra_data: dict = None) -> dict: + """Queue a prompt and return response with prompt_id.""" + data = { + "prompt": prompt, + "client_id": self.client_id, + "extra_data": extra_data or {} + } + req = urllib.request.Request( + f"http://{self.server_address}/prompt", + data=json.dumps(data).encode("utf-8"), + headers={"Content-Type": "application/json"} + ) + return json.loads(urllib.request.urlopen(req).read()) + + def wait_for_execution(self, prompt_id: str, timeout: float = 120.0) -> dict: + """ + Wait for execution to complete via WebSocket. + + Returns: + dict with keys: completed, error, preview_count, execution_time + """ + result = { + "completed": False, + "error": None, + "preview_count": 0, + "execution_time": 0.0 + } + + start_time = time.time() + self.ws.settimeout(timeout) + + try: + while True: + out = self.ws.recv() + elapsed = time.time() - start_time + + if isinstance(out, str): + message = json.loads(out) + msg_type = message.get("type") + data = message.get("data", {}) + + if data.get("prompt_id") != prompt_id: + continue + + if msg_type == "executing": + if data.get("node") is None: + # Execution complete + result["completed"] = True + result["execution_time"] = elapsed + break + + elif msg_type == "execution_error": + result["error"] = data + result["execution_time"] = elapsed + break + + elif msg_type == "progress": + # Progress update during sampling + pass + + elif isinstance(out, bytes): + # Binary data = preview image + result["preview_count"] += 1 + + except websocket.WebSocketTimeoutException: + result["error"] = "Timeout waiting for execution" + result["execution_time"] = time.time() - start_time + + return result + + +def load_graph() -> dict: + """Load the SDXL graph fixture with randomized seed.""" + with open(GRAPH_FILE) as f: + graph = json.load(f) + return randomize_seed(graph) # Avoid caching + + +# Skip all tests if server is not running +pytestmark = [ + pytest.mark.skipif( + not is_server_running(), + reason=f"ComfyUI server not running at {SERVER_URL}" + ), + pytest.mark.preview_method, + pytest.mark.execution, +] + + +@pytest.fixture +def client(): + """Create and connect a test client.""" + c = PreviewMethodClient(SERVER_HOST) + c.connect() + yield c + c.close() + + +@pytest.fixture +def graph(): + """Load the test graph.""" + return load_graph() + + +class TestPreviewMethodExecution: + """Test actual execution with different preview methods.""" + + def test_execution_with_latent2rgb(self, client, graph): + """ + Execute with preview_method=latent2rgb. + Should complete and potentially receive preview images. + """ + extra_data = {"preview_method": "latent2rgb"} + + response = client.queue_prompt(graph, extra_data) + assert "prompt_id" in response + + result = client.wait_for_execution(response["prompt_id"]) + + # Should complete (may error if model missing, but that's separate) + assert result["completed"] or result["error"] is not None + # Execution should take some time (sampling) + if result["completed"]: + assert result["execution_time"] > 0.5, "Execution too fast - likely didn't run" + # latent2rgb should produce previews + print(f"latent2rgb: {result['preview_count']} previews in {result['execution_time']:.2f}s") # noqa: T201 + + def test_execution_with_taesd(self, client, graph): + """ + Execute with preview_method=taesd. + TAESD provides higher quality previews. + """ + extra_data = {"preview_method": "taesd"} + + response = client.queue_prompt(graph, extra_data) + assert "prompt_id" in response + + result = client.wait_for_execution(response["prompt_id"]) + + assert result["completed"] or result["error"] is not None + if result["completed"]: + assert result["execution_time"] > 0.5 + # taesd should also produce previews + print(f"taesd: {result['preview_count']} previews in {result['execution_time']:.2f}s") # noqa: T201 + + def test_execution_with_none_preview(self, client, graph): + """ + Execute with preview_method=none. + No preview images should be generated. + """ + extra_data = {"preview_method": "none"} + + response = client.queue_prompt(graph, extra_data) + assert "prompt_id" in response + + result = client.wait_for_execution(response["prompt_id"]) + + assert result["completed"] or result["error"] is not None + if result["completed"]: + # With "none", should receive no preview images + assert result["preview_count"] == 0, \ + f"Expected no previews with 'none', got {result['preview_count']}" + print(f"none: {result['preview_count']} previews in {result['execution_time']:.2f}s") # noqa: T201 + + def test_execution_with_default(self, client, graph): + """ + Execute with preview_method=default. + Should use server's CLI default setting. + """ + extra_data = {"preview_method": "default"} + + response = client.queue_prompt(graph, extra_data) + assert "prompt_id" in response + + result = client.wait_for_execution(response["prompt_id"]) + + assert result["completed"] or result["error"] is not None + if result["completed"]: + print(f"default: {result['preview_count']} previews in {result['execution_time']:.2f}s") # noqa: T201 + + def test_execution_without_preview_method(self, client, graph): + """ + Execute without preview_method in extra_data. + Should use server's default preview method. + """ + extra_data = {} # No preview_method + + response = client.queue_prompt(graph, extra_data) + assert "prompt_id" in response + + result = client.wait_for_execution(response["prompt_id"]) + + assert result["completed"] or result["error"] is not None + if result["completed"]: + print(f"(no override): {result['preview_count']} previews in {result['execution_time']:.2f}s") # noqa: T201 + + +class TestPreviewMethodComparison: + """Compare preview behavior between different methods.""" + + def test_none_vs_latent2rgb_preview_count(self, client, graph): + """ + Compare preview counts: 'none' should have 0, others should have >0. + This is the key verification that preview_method actually works. + """ + results = {} + + # Run with none (randomize seed to avoid caching) + graph_none = randomize_seed(graph) + extra_data_none = {"preview_method": "none"} + response = client.queue_prompt(graph_none, extra_data_none) + results["none"] = client.wait_for_execution(response["prompt_id"]) + + # Run with latent2rgb (randomize seed again) + graph_rgb = randomize_seed(graph) + extra_data_rgb = {"preview_method": "latent2rgb"} + response = client.queue_prompt(graph_rgb, extra_data_rgb) + results["latent2rgb"] = client.wait_for_execution(response["prompt_id"]) + + # Verify both completed + assert results["none"]["completed"], f"'none' execution failed: {results['none']['error']}" + assert results["latent2rgb"]["completed"], f"'latent2rgb' execution failed: {results['latent2rgb']['error']}" + + # Key assertion: 'none' should have 0 previews + assert results["none"]["preview_count"] == 0, \ + f"'none' should have 0 previews, got {results['none']['preview_count']}" + + # 'latent2rgb' should have at least 1 preview (depends on steps) + assert results["latent2rgb"]["preview_count"] > 0, \ + f"'latent2rgb' should have >0 previews, got {results['latent2rgb']['preview_count']}" + + print("\nPreview count comparison:") # noqa: T201 + print(f" none: {results['none']['preview_count']} previews") # noqa: T201 + print(f" latent2rgb: {results['latent2rgb']['preview_count']} previews") # noqa: T201 + + +class TestPreviewMethodSequential: + """Test sequential execution with different preview methods.""" + + def test_sequential_different_methods(self, client, graph): + """ + Execute multiple prompts sequentially with different preview methods. + Each should complete independently with correct preview behavior. + """ + methods = ["latent2rgb", "none", "default"] + results = [] + + for method in methods: + # Randomize seed for each execution to avoid caching + graph_run = randomize_seed(graph) + extra_data = {"preview_method": method} + response = client.queue_prompt(graph_run, extra_data) + + result = client.wait_for_execution(response["prompt_id"]) + results.append({ + "method": method, + "completed": result["completed"], + "preview_count": result["preview_count"], + "execution_time": result["execution_time"], + "error": result["error"] + }) + + # All should complete or have clear errors + for r in results: + assert r["completed"] or r["error"] is not None, \ + f"Method {r['method']} neither completed nor errored" + + # "none" should have zero previews if completed + none_result = next(r for r in results if r["method"] == "none") + if none_result["completed"]: + assert none_result["preview_count"] == 0, \ + f"'none' should have 0 previews, got {none_result['preview_count']}" + + print("\nSequential execution results:") # noqa: T201 + for r in results: + status = "✓" if r["completed"] else f"✗ ({r['error']})" + print(f" {r['method']}: {status}, {r['preview_count']} previews, {r['execution_time']:.2f}s") # noqa: T201 From 43e0d4e3ccfe8b5eac81bcee6f912f661849aafb Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 16 Dec 2025 02:01:10 +0200 Subject: [PATCH 1050/1073] comfy_api: remove usage of "Type","List" and "Dict" types (#11238) --- comfy_api/feature_flags.py | 10 +++++----- comfy_api/internal/api_registry.py | 10 +++++----- comfy_api/internal/async_to_sync.py | 14 ++++++------- comfy_api/internal/singleton.py | 6 +++--- comfy_api/latest/__init__.py | 4 ++-- comfy_api/latest/_input/basic_types.py | 4 ++-- comfy_api/latest/_ui.py | 27 +++++++++++++------------- comfy_api/version_list.py | 3 +-- 8 files changed, 38 insertions(+), 40 deletions(-) diff --git a/comfy_api/feature_flags.py b/comfy_api/feature_flags.py index bfb77eb5f..de167f037 100644 --- a/comfy_api/feature_flags.py +++ b/comfy_api/feature_flags.py @@ -5,12 +5,12 @@ This module handles capability negotiation between frontend and backend, allowing graceful protocol evolution while maintaining backward compatibility. """ -from typing import Any, Dict +from typing import Any from comfy.cli_args import args # Default server capabilities -SERVER_FEATURE_FLAGS: Dict[str, Any] = { +SERVER_FEATURE_FLAGS: dict[str, Any] = { "supports_preview_metadata": True, "max_upload_size": args.max_upload_size * 1024 * 1024, # Convert MB to bytes "extension": {"manager": {"supports_v4": True}}, @@ -18,7 +18,7 @@ SERVER_FEATURE_FLAGS: Dict[str, Any] = { def get_connection_feature( - sockets_metadata: Dict[str, Dict[str, Any]], + sockets_metadata: dict[str, dict[str, Any]], sid: str, feature_name: str, default: Any = False @@ -42,7 +42,7 @@ def get_connection_feature( def supports_feature( - sockets_metadata: Dict[str, Dict[str, Any]], + sockets_metadata: dict[str, dict[str, Any]], sid: str, feature_name: str ) -> bool: @@ -60,7 +60,7 @@ def supports_feature( return get_connection_feature(sockets_metadata, sid, feature_name, False) is True -def get_server_features() -> Dict[str, Any]: +def get_server_features() -> dict[str, Any]: """ Get the server's feature flags. diff --git a/comfy_api/internal/api_registry.py b/comfy_api/internal/api_registry.py index 7e3375cf6..2b1cb016a 100644 --- a/comfy_api/internal/api_registry.py +++ b/comfy_api/internal/api_registry.py @@ -1,4 +1,4 @@ -from typing import Type, List, NamedTuple +from typing import NamedTuple from comfy_api.internal.singleton import ProxiedSingleton from packaging import version as packaging_version @@ -10,7 +10,7 @@ class ComfyAPIBase(ProxiedSingleton): class ComfyAPIWithVersion(NamedTuple): version: str - api_class: Type[ComfyAPIBase] + api_class: type[ComfyAPIBase] def parse_version(version_str: str) -> packaging_version.Version: @@ -23,16 +23,16 @@ def parse_version(version_str: str) -> packaging_version.Version: return packaging_version.parse(version_str) -registered_versions: List[ComfyAPIWithVersion] = [] +registered_versions: list[ComfyAPIWithVersion] = [] -def register_versions(versions: List[ComfyAPIWithVersion]): +def register_versions(versions: list[ComfyAPIWithVersion]): versions.sort(key=lambda x: parse_version(x.version)) global registered_versions registered_versions = versions -def get_all_versions() -> List[ComfyAPIWithVersion]: +def get_all_versions() -> list[ComfyAPIWithVersion]: """ Returns a list of all registered ComfyAPI versions. """ diff --git a/comfy_api/internal/async_to_sync.py b/comfy_api/internal/async_to_sync.py index 257ade82e..c9b0576e1 100644 --- a/comfy_api/internal/async_to_sync.py +++ b/comfy_api/internal/async_to_sync.py @@ -8,7 +8,7 @@ import os import textwrap import threading from enum import Enum -from typing import Optional, Type, get_origin, get_args, get_type_hints +from typing import Optional, get_origin, get_args, get_type_hints class TypeTracker: @@ -193,7 +193,7 @@ class AsyncToSyncConverter: return result_container["result"] @classmethod - def create_sync_class(cls, async_class: Type, thread_pool_size=10) -> Type: + def create_sync_class(cls, async_class: type, thread_pool_size=10) -> type: """ Creates a new class with synchronous versions of all async methods. @@ -563,7 +563,7 @@ class AsyncToSyncConverter: @classmethod def _generate_imports( - cls, async_class: Type, type_tracker: TypeTracker + cls, async_class: type, type_tracker: TypeTracker ) -> list[str]: """Generate import statements for the stub file.""" imports = [] @@ -628,7 +628,7 @@ class AsyncToSyncConverter: return imports @classmethod - def _get_class_attributes(cls, async_class: Type) -> list[tuple[str, Type]]: + def _get_class_attributes(cls, async_class: type) -> list[tuple[str, type]]: """Extract class attributes that are classes themselves.""" class_attributes = [] @@ -654,7 +654,7 @@ class AsyncToSyncConverter: def _generate_inner_class_stub( cls, name: str, - attr: Type, + attr: type, indent: str = " ", type_tracker: Optional[TypeTracker] = None, ) -> list[str]: @@ -782,7 +782,7 @@ class AsyncToSyncConverter: return processed @classmethod - def generate_stub_file(cls, async_class: Type, sync_class: Type) -> None: + def generate_stub_file(cls, async_class: type, sync_class: type) -> None: """ Generate a .pyi stub file for the sync class to help IDEs with type checking. """ @@ -988,7 +988,7 @@ class AsyncToSyncConverter: logging.error(traceback.format_exc()) -def create_sync_class(async_class: Type, thread_pool_size=10) -> Type: +def create_sync_class(async_class: type, thread_pool_size=10) -> type: """ Creates a sync version of an async class diff --git a/comfy_api/internal/singleton.py b/comfy_api/internal/singleton.py index 75f16f98e..d89380262 100644 --- a/comfy_api/internal/singleton.py +++ b/comfy_api/internal/singleton.py @@ -1,4 +1,4 @@ -from typing import Type, TypeVar +from typing import TypeVar class SingletonMetaclass(type): T = TypeVar("T", bound="SingletonMetaclass") @@ -11,13 +11,13 @@ class SingletonMetaclass(type): ) return cls._instances[cls] - def inject_instance(cls: Type[T], instance: T) -> None: + def inject_instance(cls: type[T], instance: T) -> None: assert cls not in SingletonMetaclass._instances, ( "Cannot inject instance after first instantiation" ) SingletonMetaclass._instances[cls] = instance - def get_instance(cls: Type[T], *args, **kwargs) -> T: + def get_instance(cls: type[T], *args, **kwargs) -> T: """ Gets the singleton instance of the class, creating it if it doesn't exist. """ diff --git a/comfy_api/latest/__init__.py b/comfy_api/latest/__init__.py index 35e1ac853..fab63c7df 100644 --- a/comfy_api/latest/__init__.py +++ b/comfy_api/latest/__init__.py @@ -1,7 +1,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import Type, TYPE_CHECKING +from typing import TYPE_CHECKING from comfy_api.internal import ComfyAPIBase from comfy_api.internal.singleton import ProxiedSingleton from comfy_api.internal.async_to_sync import create_sync_class @@ -113,7 +113,7 @@ ComfyAPI = ComfyAPI_latest if TYPE_CHECKING: import comfy_api.latest.generated.ComfyAPISyncStub # type: ignore - ComfyAPISync: Type[comfy_api.latest.generated.ComfyAPISyncStub.ComfyAPISyncStub] + ComfyAPISync: type[comfy_api.latest.generated.ComfyAPISyncStub.ComfyAPISyncStub] ComfyAPISync = create_sync_class(ComfyAPI_latest) # create new aliases for io and ui diff --git a/comfy_api/latest/_input/basic_types.py b/comfy_api/latest/_input/basic_types.py index 245c6cbb1..d73deabd2 100644 --- a/comfy_api/latest/_input/basic_types.py +++ b/comfy_api/latest/_input/basic_types.py @@ -1,5 +1,5 @@ import torch -from typing import TypedDict, List, Optional +from typing import TypedDict, Optional ImageInput = torch.Tensor """ @@ -39,4 +39,4 @@ class LatentInput(TypedDict): Optional noise mask tensor in the same format as samples. """ - batch_index: Optional[List[int]] + batch_index: Optional[list[int]] diff --git a/comfy_api/latest/_ui.py b/comfy_api/latest/_ui.py index 2babe209a..e238cdf3c 100644 --- a/comfy_api/latest/_ui.py +++ b/comfy_api/latest/_ui.py @@ -5,7 +5,6 @@ import os import random import uuid from io import BytesIO -from typing import Type import av import numpy as np @@ -83,7 +82,7 @@ class ImageSaveHelper: return PILImage.fromarray(np.clip(255.0 * image_tensor.cpu().numpy(), 0, 255).astype(np.uint8)) @staticmethod - def _create_png_metadata(cls: Type[ComfyNode] | None) -> PngInfo | None: + def _create_png_metadata(cls: type[ComfyNode] | None) -> PngInfo | None: """Creates a PngInfo object with prompt and extra_pnginfo.""" if args.disable_metadata or cls is None or not cls.hidden: return None @@ -96,7 +95,7 @@ class ImageSaveHelper: return metadata @staticmethod - def _create_animated_png_metadata(cls: Type[ComfyNode] | None) -> PngInfo | None: + def _create_animated_png_metadata(cls: type[ComfyNode] | None) -> PngInfo | None: """Creates a PngInfo object with prompt and extra_pnginfo for animated PNGs (APNG).""" if args.disable_metadata or cls is None or not cls.hidden: return None @@ -121,7 +120,7 @@ class ImageSaveHelper: return metadata @staticmethod - def _create_webp_metadata(pil_image: PILImage.Image, cls: Type[ComfyNode] | None) -> PILImage.Exif: + def _create_webp_metadata(pil_image: PILImage.Image, cls: type[ComfyNode] | None) -> PILImage.Exif: """Creates EXIF metadata bytes for WebP images.""" exif_data = pil_image.getexif() if args.disable_metadata or cls is None or cls.hidden is None: @@ -137,7 +136,7 @@ class ImageSaveHelper: @staticmethod def save_images( - images, filename_prefix: str, folder_type: FolderType, cls: Type[ComfyNode] | None, compress_level = 4, + images, filename_prefix: str, folder_type: FolderType, cls: type[ComfyNode] | None, compress_level = 4, ) -> list[SavedResult]: """Saves a batch of images as individual PNG files.""" full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path( @@ -155,7 +154,7 @@ class ImageSaveHelper: return results @staticmethod - def get_save_images_ui(images, filename_prefix: str, cls: Type[ComfyNode] | None, compress_level=4) -> SavedImages: + def get_save_images_ui(images, filename_prefix: str, cls: type[ComfyNode] | None, compress_level=4) -> SavedImages: """Saves a batch of images and returns a UI object for the node output.""" return SavedImages( ImageSaveHelper.save_images( @@ -169,7 +168,7 @@ class ImageSaveHelper: @staticmethod def save_animated_png( - images, filename_prefix: str, folder_type: FolderType, cls: Type[ComfyNode] | None, fps: float, compress_level: int + images, filename_prefix: str, folder_type: FolderType, cls: type[ComfyNode] | None, fps: float, compress_level: int ) -> SavedResult: """Saves a batch of images as a single animated PNG.""" full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path( @@ -191,7 +190,7 @@ class ImageSaveHelper: @staticmethod def get_save_animated_png_ui( - images, filename_prefix: str, cls: Type[ComfyNode] | None, fps: float, compress_level: int + images, filename_prefix: str, cls: type[ComfyNode] | None, fps: float, compress_level: int ) -> SavedImages: """Saves an animated PNG and returns a UI object for the node output.""" result = ImageSaveHelper.save_animated_png( @@ -209,7 +208,7 @@ class ImageSaveHelper: images, filename_prefix: str, folder_type: FolderType, - cls: Type[ComfyNode] | None, + cls: type[ComfyNode] | None, fps: float, lossless: bool, quality: int, @@ -238,7 +237,7 @@ class ImageSaveHelper: def get_save_animated_webp_ui( images, filename_prefix: str, - cls: Type[ComfyNode] | None, + cls: type[ComfyNode] | None, fps: float, lossless: bool, quality: int, @@ -267,7 +266,7 @@ class AudioSaveHelper: audio: dict, filename_prefix: str, folder_type: FolderType, - cls: Type[ComfyNode] | None, + cls: type[ComfyNode] | None, format: str = "flac", quality: str = "128k", ) -> list[SavedResult]: @@ -372,7 +371,7 @@ class AudioSaveHelper: @staticmethod def get_save_audio_ui( - audio, filename_prefix: str, cls: Type[ComfyNode] | None, format: str = "flac", quality: str = "128k", + audio, filename_prefix: str, cls: type[ComfyNode] | None, format: str = "flac", quality: str = "128k", ) -> SavedAudios: """Save and instantly wrap for UI.""" return SavedAudios( @@ -388,7 +387,7 @@ class AudioSaveHelper: class PreviewImage(_UIOutput): - def __init__(self, image: Image.Type, animated: bool = False, cls: Type[ComfyNode] = None, **kwargs): + def __init__(self, image: Image.Type, animated: bool = False, cls: type[ComfyNode] = None, **kwargs): self.values = ImageSaveHelper.save_images( image, filename_prefix="ComfyUI_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for _ in range(5)), @@ -412,7 +411,7 @@ class PreviewMask(PreviewImage): class PreviewAudio(_UIOutput): - def __init__(self, audio: dict, cls: Type[ComfyNode] = None, **kwargs): + def __init__(self, audio: dict, cls: type[ComfyNode] = None, **kwargs): self.values = AudioSaveHelper.save_audio( audio, filename_prefix="ComfyUI_temp_" + "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(5)), diff --git a/comfy_api/version_list.py b/comfy_api/version_list.py index 7cb1871d5..be6e1db66 100644 --- a/comfy_api/version_list.py +++ b/comfy_api/version_list.py @@ -2,9 +2,8 @@ from comfy_api.latest import ComfyAPI_latest from comfy_api.v0_0_2 import ComfyAPIAdapter_v0_0_2 from comfy_api.v0_0_1 import ComfyAPIAdapter_v0_0_1 from comfy_api.internal import ComfyAPIBase -from typing import List, Type -supported_versions: List[Type[ComfyAPIBase]] = [ +supported_versions: list[type[ComfyAPIBase]] = [ ComfyAPI_latest, ComfyAPIAdapter_v0_0_2, ComfyAPIAdapter_v0_0_1, From 77b2f7c228a0db6643bb7f29be4db0bff6799db2 Mon Sep 17 00:00:00 2001 From: drozbay <17261091+drozbay@users.noreply.github.com> Date: Mon, 15 Dec 2025 17:06:32 -0700 Subject: [PATCH 1051/1073] Add context windows callback for custom cond handling (#11208) Co-authored-by: ozbayb <17261091+ozbayb@users.noreply.github.com> --- comfy/context_windows.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/comfy/context_windows.py b/comfy/context_windows.py index 5c412d1c2..2979b3ca1 100644 --- a/comfy/context_windows.py +++ b/comfy/context_windows.py @@ -87,6 +87,7 @@ class IndexListCallbacks: COMBINE_CONTEXT_WINDOW_RESULTS = "combine_context_window_results" EXECUTE_START = "execute_start" EXECUTE_CLEANUP = "execute_cleanup" + RESIZE_COND_ITEM = "resize_cond_item" def init_callbacks(self): return {} @@ -166,6 +167,18 @@ class IndexListContextHandler(ContextHandlerABC): new_cond_item = cond_item.copy() # when in dictionary, look for tensors and CONDCrossAttn [comfy/conds.py] (has cond attr that is a tensor) for cond_key, cond_value in new_cond_item.items(): + # Allow callbacks to handle custom conditioning items + handled = False + for callback in comfy.patcher_extension.get_all_callbacks( + IndexListCallbacks.RESIZE_COND_ITEM, self.callbacks + ): + result = callback(cond_key, cond_value, window, x_in, device, new_cond_item) + if result is not None: + new_cond_item[cond_key] = result + handled = True + break + if handled: + continue if isinstance(cond_value, torch.Tensor): if (self.dim < cond_value.ndim and cond_value(self.dim) == x_in.size(self.dim)) or \ (cond_value.ndim < self.dim and cond_value.size(0) == x_in.size(self.dim)): From 70541d4e7769c6c40eae6594e677355eacd181fe Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 15 Dec 2025 16:20:34 -0800 Subject: [PATCH 1052/1073] Support the new qwen edit 2511 reference method. (#11340) index_timestep_zero can be selected in the FluxKontextMultiReferenceLatentMethod now with the display name set to the more generic "Edit Model Reference Method" node. --- comfy/ldm/qwen_image/model.py | 47 +++++++++++++++++++++++++++++------ comfy_extras/nodes_flux.py | 3 ++- 2 files changed, 41 insertions(+), 9 deletions(-) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index 8c75670cd..96590088b 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -218,9 +218,24 @@ class QwenImageTransformerBlock(nn.Module): operations=operations, ) - def _modulate(self, x: torch.Tensor, mod_params: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + def _apply_gate(self, x, y, gate, timestep_zero_index=None): + if timestep_zero_index is not None: + return y + torch.cat((x[:, :timestep_zero_index] * gate[0], x[:, timestep_zero_index:] * gate[1]), dim=1) + else: + return torch.addcmul(y, gate, x) + + def _modulate(self, x: torch.Tensor, mod_params: torch.Tensor, timestep_zero_index=None) -> Tuple[torch.Tensor, torch.Tensor]: shift, scale, gate = torch.chunk(mod_params, 3, dim=-1) - return torch.addcmul(shift.unsqueeze(1), x, 1 + scale.unsqueeze(1)), gate.unsqueeze(1) + if timestep_zero_index is not None: + actual_batch = shift.size(0) // 2 + shift, shift_0 = shift[:actual_batch], shift[actual_batch:] + scale, scale_0 = scale[:actual_batch], scale[actual_batch:] + gate, gate_0 = gate[:actual_batch], gate[actual_batch:] + reg = torch.addcmul(shift.unsqueeze(1), x[:, :timestep_zero_index], 1 + scale.unsqueeze(1)) + zero = torch.addcmul(shift_0.unsqueeze(1), x[:, timestep_zero_index:], 1 + scale_0.unsqueeze(1)) + return torch.cat((reg, zero), dim=1), (gate.unsqueeze(1), gate_0.unsqueeze(1)) + else: + return torch.addcmul(shift.unsqueeze(1), x, 1 + scale.unsqueeze(1)), gate.unsqueeze(1) def forward( self, @@ -229,14 +244,19 @@ class QwenImageTransformerBlock(nn.Module): encoder_hidden_states_mask: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + timestep_zero_index=None, transformer_options={}, ) -> Tuple[torch.Tensor, torch.Tensor]: img_mod_params = self.img_mod(temb) + + if timestep_zero_index is not None: + temb = temb.chunk(2, dim=0)[0] + txt_mod_params = self.txt_mod(temb) img_mod1, img_mod2 = img_mod_params.chunk(2, dim=-1) txt_mod1, txt_mod2 = txt_mod_params.chunk(2, dim=-1) - img_modulated, img_gate1 = self._modulate(self.img_norm1(hidden_states), img_mod1) + img_modulated, img_gate1 = self._modulate(self.img_norm1(hidden_states), img_mod1, timestep_zero_index) del img_mod1 txt_modulated, txt_gate1 = self._modulate(self.txt_norm1(encoder_hidden_states), txt_mod1) del txt_mod1 @@ -251,15 +271,15 @@ class QwenImageTransformerBlock(nn.Module): del img_modulated del txt_modulated - hidden_states = hidden_states + img_gate1 * img_attn_output + hidden_states = self._apply_gate(img_attn_output, hidden_states, img_gate1, timestep_zero_index) encoder_hidden_states = encoder_hidden_states + txt_gate1 * txt_attn_output del img_attn_output del txt_attn_output del img_gate1 del txt_gate1 - img_modulated2, img_gate2 = self._modulate(self.img_norm2(hidden_states), img_mod2) - hidden_states = torch.addcmul(hidden_states, img_gate2, self.img_mlp(img_modulated2)) + img_modulated2, img_gate2 = self._modulate(self.img_norm2(hidden_states), img_mod2, timestep_zero_index) + hidden_states = self._apply_gate(self.img_mlp(img_modulated2), hidden_states, img_gate2, timestep_zero_index) txt_modulated2, txt_gate2 = self._modulate(self.txt_norm2(encoder_hidden_states), txt_mod2) encoder_hidden_states = torch.addcmul(encoder_hidden_states, txt_gate2, self.txt_mlp(txt_modulated2)) @@ -391,11 +411,14 @@ class QwenImageTransformer2DModel(nn.Module): hidden_states, img_ids, orig_shape = self.process_img(x) num_embeds = hidden_states.shape[1] + timestep_zero_index = None if ref_latents is not None: h = 0 w = 0 index = 0 - index_ref_method = kwargs.get("ref_latents_method", "index") == "index" + ref_method = kwargs.get("ref_latents_method", "index") + index_ref_method = (ref_method == "index") or (ref_method == "index_timestep_zero") + timestep_zero = ref_method == "index_timestep_zero" for ref in ref_latents: if index_ref_method: index += 1 @@ -415,6 +438,10 @@ class QwenImageTransformer2DModel(nn.Module): kontext, kontext_ids, _ = self.process_img(ref, index=index, h_offset=h_offset, w_offset=w_offset) hidden_states = torch.cat([hidden_states, kontext], dim=1) img_ids = torch.cat([img_ids, kontext_ids], dim=1) + if timestep_zero: + if index > 0: + timestep = torch.cat([timestep, timestep * 0], dim=0) + timestep_zero_index = num_embeds txt_start = round(max(((x.shape[-1] + (self.patch_size // 2)) // self.patch_size) // 2, ((x.shape[-2] + (self.patch_size // 2)) // self.patch_size) // 2)) txt_ids = torch.arange(txt_start, txt_start + context.shape[1], device=x.device).reshape(1, -1, 1).repeat(x.shape[0], 1, 3) @@ -446,7 +473,7 @@ class QwenImageTransformer2DModel(nn.Module): if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} - out["txt"], out["img"] = block(hidden_states=args["img"], encoder_hidden_states=args["txt"], encoder_hidden_states_mask=encoder_hidden_states_mask, temb=args["vec"], image_rotary_emb=args["pe"], transformer_options=args["transformer_options"]) + out["txt"], out["img"] = block(hidden_states=args["img"], encoder_hidden_states=args["txt"], encoder_hidden_states_mask=encoder_hidden_states_mask, temb=args["vec"], image_rotary_emb=args["pe"], timestep_zero_index=timestep_zero_index, transformer_options=args["transformer_options"]) return out out = blocks_replace[("double_block", i)]({"img": hidden_states, "txt": encoder_hidden_states, "vec": temb, "pe": image_rotary_emb, "transformer_options": transformer_options}, {"original_block": block_wrap}) hidden_states = out["img"] @@ -458,6 +485,7 @@ class QwenImageTransformer2DModel(nn.Module): encoder_hidden_states_mask=encoder_hidden_states_mask, temb=temb, image_rotary_emb=image_rotary_emb, + timestep_zero_index=timestep_zero_index, transformer_options=transformer_options, ) @@ -474,6 +502,9 @@ class QwenImageTransformer2DModel(nn.Module): if add is not None: hidden_states[:, :add.shape[1]] += add + if timestep_zero_index is not None: + temb = temb.chunk(2, dim=0)[0] + hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) diff --git a/comfy_extras/nodes_flux.py b/comfy_extras/nodes_flux.py index d9c4bba81..12c8ed3e6 100644 --- a/comfy_extras/nodes_flux.py +++ b/comfy_extras/nodes_flux.py @@ -154,12 +154,13 @@ class FluxKontextMultiReferenceLatentMethod(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="FluxKontextMultiReferenceLatentMethod", + display_name="Edit Model Reference Method", category="advanced/conditioning/flux", inputs=[ io.Conditioning.Input("conditioning"), io.Combo.Input( "reference_latents_method", - options=["offset", "index", "uxo/uno"], + options=["offset", "index", "uxo/uno", "index_timestep_zero"], ), ], outputs=[ From d02d0e5744f2e06fc40834d3c5bb387de4532007 Mon Sep 17 00:00:00 2001 From: seed93 Date: Tue, 16 Dec 2025 09:38:46 +0800 Subject: [PATCH 1053/1073] [add] tripo3.0 (#10663) * [add] tripo3.0 * [tripo] change paramter order * change order --------- Co-authored-by: liangd --- comfy_api_nodes/apis/tripo_api.py | 46 ++++++++++++++--- comfy_api_nodes/nodes_tripo.py | 86 ++++++++++++++++++++++++++++++- 2 files changed, 122 insertions(+), 10 deletions(-) diff --git a/comfy_api_nodes/apis/tripo_api.py b/comfy_api_nodes/apis/tripo_api.py index 713260e2a..ffaaa7dc1 100644 --- a/comfy_api_nodes/apis/tripo_api.py +++ b/comfy_api_nodes/apis/tripo_api.py @@ -5,11 +5,17 @@ from typing import Optional, List, Dict, Any, Union from pydantic import BaseModel, Field, RootModel class TripoModelVersion(str, Enum): + v3_0_20250812 = 'v3.0-20250812' v2_5_20250123 = 'v2.5-20250123' v2_0_20240919 = 'v2.0-20240919' v1_4_20240625 = 'v1.4-20240625' +class TripoGeometryQuality(str, Enum): + standard = 'standard' + detailed = 'detailed' + + class TripoTextureQuality(str, Enum): standard = 'standard' detailed = 'detailed' @@ -61,14 +67,20 @@ class TripoSpec(str, Enum): class TripoAnimation(str, Enum): IDLE = "preset:idle" WALK = "preset:walk" + RUN = "preset:run" + DIVE = "preset:dive" CLIMB = "preset:climb" JUMP = "preset:jump" - RUN = "preset:run" SLASH = "preset:slash" SHOOT = "preset:shoot" HURT = "preset:hurt" FALL = "preset:fall" TURN = "preset:turn" + QUADRUPED_WALK = "preset:quadruped:walk" + HEXAPOD_WALK = "preset:hexapod:walk" + OCTOPOD_WALK = "preset:octopod:walk" + SERPENTINE_MARCH = "preset:serpentine:march" + AQUATIC_MARCH = "preset:aquatic:march" class TripoStylizeStyle(str, Enum): LEGO = "lego" @@ -105,6 +117,11 @@ class TripoTaskStatus(str, Enum): BANNED = "banned" EXPIRED = "expired" +class TripoFbxPreset(str, Enum): + BLENDER = "blender" + MIXAMO = "mixamo" + _3DSMAX = "3dsmax" + class TripoFileTokenReference(BaseModel): type: Optional[str] = Field(None, description='The type of the reference') file_token: str @@ -142,6 +159,7 @@ class TripoTextToModelRequest(BaseModel): model_seed: Optional[int] = Field(None, description='The seed for the model') texture_seed: Optional[int] = Field(None, description='The seed for the texture') texture_quality: Optional[TripoTextureQuality] = TripoTextureQuality.standard + geometry_quality: Optional[TripoGeometryQuality] = TripoGeometryQuality.standard style: Optional[TripoStyle] = None auto_size: Optional[bool] = Field(False, description='Whether to auto-size the model') quad: Optional[bool] = Field(False, description='Whether to apply quad to the generated model') @@ -156,6 +174,7 @@ class TripoImageToModelRequest(BaseModel): model_seed: Optional[int] = Field(None, description='The seed for the model') texture_seed: Optional[int] = Field(None, description='The seed for the texture') texture_quality: Optional[TripoTextureQuality] = TripoTextureQuality.standard + geometry_quality: Optional[TripoGeometryQuality] = TripoGeometryQuality.standard texture_alignment: Optional[TripoTextureAlignment] = Field(TripoTextureAlignment.ORIGINAL_IMAGE, description='The texture alignment method') style: Optional[TripoStyle] = Field(None, description='The style to apply to the generated model') auto_size: Optional[bool] = Field(False, description='Whether to auto-size the model') @@ -173,6 +192,7 @@ class TripoMultiviewToModelRequest(BaseModel): model_seed: Optional[int] = Field(None, description='The seed for the model') texture_seed: Optional[int] = Field(None, description='The seed for the texture') texture_quality: Optional[TripoTextureQuality] = TripoTextureQuality.standard + geometry_quality: Optional[TripoGeometryQuality] = TripoGeometryQuality.standard texture_alignment: Optional[TripoTextureAlignment] = TripoTextureAlignment.ORIGINAL_IMAGE auto_size: Optional[bool] = Field(False, description='Whether to auto-size the model') orientation: Optional[TripoOrientation] = Field(TripoOrientation.DEFAULT, description='The orientation for the model') @@ -219,14 +239,24 @@ class TripoConvertModelRequest(BaseModel): type: TripoTaskType = Field(TripoTaskType.CONVERT_MODEL, description='Type of task') format: TripoConvertFormat = Field(..., description='The format to convert to') original_model_task_id: str = Field(..., description='The task ID of the original model') - quad: Optional[bool] = Field(False, description='Whether to apply quad to the model') - force_symmetry: Optional[bool] = Field(False, description='Whether to force symmetry') - face_limit: Optional[int] = Field(10000, description='The number of faces to limit the conversion to') - flatten_bottom: Optional[bool] = Field(False, description='Whether to flatten the bottom of the model') - flatten_bottom_threshold: Optional[float] = Field(0.01, description='The threshold for flattening the bottom') - texture_size: Optional[int] = Field(4096, description='The size of the texture') + quad: Optional[bool] = Field(None, description='Whether to apply quad to the model') + force_symmetry: Optional[bool] = Field(None, description='Whether to force symmetry') + face_limit: Optional[int] = Field(None, description='The number of faces to limit the conversion to') + flatten_bottom: Optional[bool] = Field(None, description='Whether to flatten the bottom of the model') + flatten_bottom_threshold: Optional[float] = Field(None, description='The threshold for flattening the bottom') + texture_size: Optional[int] = Field(None, description='The size of the texture') texture_format: Optional[TripoTextureFormat] = Field(TripoTextureFormat.JPEG, description='The format of the texture') - pivot_to_center_bottom: Optional[bool] = Field(False, description='Whether to pivot to the center bottom') + pivot_to_center_bottom: Optional[bool] = Field(None, description='Whether to pivot to the center bottom') + scale_factor: Optional[float] = Field(None, description='The scale factor for the model') + with_animation: Optional[bool] = Field(None, description='Whether to include animations') + pack_uv: Optional[bool] = Field(None, description='Whether to pack the UVs') + bake: Optional[bool] = Field(None, description='Whether to bake the model') + part_names: Optional[List[str]] = Field(None, description='The names of the parts to include') + fbx_preset: Optional[TripoFbxPreset] = Field(None, description='The preset for the FBX export') + export_vertex_colors: Optional[bool] = Field(None, description='Whether to export the vertex colors') + export_orientation: Optional[TripoOrientation] = Field(None, description='The orientation for the export') + animate_in_place: Optional[bool] = Field(None, description='Whether to animate in place') + class TripoTaskRequest(RootModel): root: Union[ diff --git a/comfy_api_nodes/nodes_tripo.py b/comfy_api_nodes/nodes_tripo.py index 697100ff2..bd3c24fb3 100644 --- a/comfy_api_nodes/nodes_tripo.py +++ b/comfy_api_nodes/nodes_tripo.py @@ -102,8 +102,9 @@ class TripoTextToModelNode(IO.ComfyNode): IO.Int.Input("model_seed", default=42, optional=True), IO.Int.Input("texture_seed", default=42, optional=True), IO.Combo.Input("texture_quality", default="standard", options=["standard", "detailed"], optional=True), - IO.Int.Input("face_limit", default=-1, min=-1, max=500000, optional=True), + IO.Int.Input("face_limit", default=-1, min=-1, max=2000000, optional=True), IO.Boolean.Input("quad", default=False, optional=True), + IO.Combo.Input("geometry_quality", default="standard", options=["standard", "detailed"], optional=True), ], outputs=[ IO.String.Output(display_name="model_file"), @@ -131,6 +132,7 @@ class TripoTextToModelNode(IO.ComfyNode): model_seed: Optional[int] = None, texture_seed: Optional[int] = None, texture_quality: Optional[str] = None, + geometry_quality: Optional[str] = None, face_limit: Optional[int] = None, quad: Optional[bool] = None, ) -> IO.NodeOutput: @@ -154,6 +156,7 @@ class TripoTextToModelNode(IO.ComfyNode): texture_seed=texture_seed, texture_quality=texture_quality, face_limit=face_limit, + geometry_quality=geometry_quality, auto_size=True, quad=quad, ), @@ -194,6 +197,7 @@ class TripoImageToModelNode(IO.ComfyNode): ), IO.Int.Input("face_limit", default=-1, min=-1, max=500000, optional=True), IO.Boolean.Input("quad", default=False, optional=True), + IO.Combo.Input("geometry_quality", default="standard", options=["standard", "detailed"], optional=True), ], outputs=[ IO.String.Output(display_name="model_file"), @@ -220,6 +224,7 @@ class TripoImageToModelNode(IO.ComfyNode): orientation=None, texture_seed: Optional[int] = None, texture_quality: Optional[str] = None, + geometry_quality: Optional[str] = None, texture_alignment: Optional[str] = None, face_limit: Optional[int] = None, quad: Optional[bool] = None, @@ -246,6 +251,7 @@ class TripoImageToModelNode(IO.ComfyNode): pbr=pbr, model_seed=model_seed, orientation=orientation, + geometry_quality=geometry_quality, texture_alignment=texture_alignment, texture_seed=texture_seed, texture_quality=texture_quality, @@ -295,6 +301,7 @@ class TripoMultiviewToModelNode(IO.ComfyNode): ), IO.Int.Input("face_limit", default=-1, min=-1, max=500000, optional=True), IO.Boolean.Input("quad", default=False, optional=True), + IO.Combo.Input("geometry_quality", default="standard", options=["standard", "detailed"], optional=True), ], outputs=[ IO.String.Output(display_name="model_file"), @@ -323,6 +330,7 @@ class TripoMultiviewToModelNode(IO.ComfyNode): model_seed: Optional[int] = None, texture_seed: Optional[int] = None, texture_quality: Optional[str] = None, + geometry_quality: Optional[str] = None, texture_alignment: Optional[str] = None, face_limit: Optional[int] = None, quad: Optional[bool] = None, @@ -359,6 +367,7 @@ class TripoMultiviewToModelNode(IO.ComfyNode): model_seed=model_seed, texture_seed=texture_seed, texture_quality=texture_quality, + geometry_quality=geometry_quality, texture_alignment=texture_alignment, face_limit=face_limit, quad=quad, @@ -508,6 +517,8 @@ class TripoRetargetNode(IO.ComfyNode): options=[ "preset:idle", "preset:walk", + "preset:run", + "preset:dive", "preset:climb", "preset:jump", "preset:slash", @@ -515,6 +526,11 @@ class TripoRetargetNode(IO.ComfyNode): "preset:hurt", "preset:fall", "preset:turn", + "preset:quadruped:walk", + "preset:hexapod:walk", + "preset:octopod:walk", + "preset:serpentine:march", + "preset:aquatic:march" ], ), ], @@ -563,7 +579,7 @@ class TripoConversionNode(IO.ComfyNode): "face_limit", default=-1, min=-1, - max=500000, + max=2000000, optional=True, ), IO.Int.Input( @@ -579,6 +595,40 @@ class TripoConversionNode(IO.ComfyNode): default="JPEG", optional=True, ), + IO.Boolean.Input("force_symmetry", default=False, optional=True), + IO.Boolean.Input("flatten_bottom", default=False, optional=True), + IO.Float.Input( + "flatten_bottom_threshold", + default=0.0, + min=0.0, + max=1.0, + optional=True, + ), + IO.Boolean.Input("pivot_to_center_bottom", default=False, optional=True), + IO.Float.Input( + "scale_factor", + default=1.0, + min=0.0, + optional=True, + ), + IO.Boolean.Input("with_animation", default=False, optional=True), + IO.Boolean.Input("pack_uv", default=False, optional=True), + IO.Boolean.Input("bake", default=False, optional=True), + IO.String.Input("part_names", default="", optional=True), # comma-separated list + IO.Combo.Input( + "fbx_preset", + options=["blender", "mixamo", "3dsmax"], + default="blender", + optional=True, + ), + IO.Boolean.Input("export_vertex_colors", default=False, optional=True), + IO.Combo.Input( + "export_orientation", + options=["align_image", "default"], + default="default", + optional=True, + ), + IO.Boolean.Input("animate_in_place", default=False, optional=True), ], outputs=[], hidden=[ @@ -604,12 +654,31 @@ class TripoConversionNode(IO.ComfyNode): original_model_task_id, format: str, quad: bool, + force_symmetry: bool, face_limit: int, + flatten_bottom: bool, + flatten_bottom_threshold: float, texture_size: int, texture_format: str, + pivot_to_center_bottom: bool, + scale_factor: float, + with_animation: bool, + pack_uv: bool, + bake: bool, + part_names: str, + fbx_preset: str, + export_vertex_colors: bool, + export_orientation: str, + animate_in_place: bool, ) -> IO.NodeOutput: if not original_model_task_id: raise RuntimeError("original_model_task_id is required") + + # Parse part_names from comma-separated string to list + part_names_list = None + if part_names and part_names.strip(): + part_names_list = [name.strip() for name in part_names.split(',') if name.strip()] + response = await sync_op( cls, endpoint=ApiEndpoint(path="/proxy/tripo/v2/openapi/task", method="POST"), @@ -618,9 +687,22 @@ class TripoConversionNode(IO.ComfyNode): original_model_task_id=original_model_task_id, format=format, quad=quad if quad else None, + force_symmetry=force_symmetry if force_symmetry else None, face_limit=face_limit if face_limit != -1 else None, + flatten_bottom=flatten_bottom if flatten_bottom else None, + flatten_bottom_threshold=flatten_bottom_threshold if flatten_bottom_threshold != 0.0 else None, texture_size=texture_size if texture_size != 4096 else None, texture_format=texture_format if texture_format != "JPEG" else None, + pivot_to_center_bottom=pivot_to_center_bottom if pivot_to_center_bottom else None, + scale_factor=scale_factor if scale_factor != 1.0 else None, + with_animation=with_animation if with_animation else None, + pack_uv=pack_uv if pack_uv else None, + bake=bake if bake else None, + part_names=part_names_list, + fbx_preset=fbx_preset if fbx_preset != "blender" else None, + export_vertex_colors=export_vertex_colors if export_vertex_colors else None, + export_orientation=export_orientation if export_orientation != "default" else None, + animate_in_place=animate_in_place if animate_in_place else None, ), ) return await poll_until_finished(cls, response, average_duration=30) From 41bcf0619db87d443d468c9ddad4454bdbc1b084 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 15 Dec 2025 17:51:06 -0800 Subject: [PATCH 1054/1073] Add code to detect if a z image fun controlnet is broken or not. (#11341) --- comfy_extras/nodes_model_patch.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_model_patch.py b/comfy_extras/nodes_model_patch.py index ec0e790dc..fdd5d0d3f 100644 --- a/comfy_extras/nodes_model_patch.py +++ b/comfy_extras/nodes_model_patch.py @@ -248,7 +248,10 @@ class ModelPatchLoader: config['n_control_layers'] = 15 config['additional_in_dim'] = 17 config['refiner_control'] = True - config['broken'] = True + ref_weight = sd.get("control_noise_refiner.0.after_proj.weight", None) + if ref_weight is not None: + if torch.count_nonzero(ref_weight) == 0: + config['broken'] = True model = comfy.ldm.lumina.controlnet.ZImage_Control(device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast, **config) model.load_state_dict(sd) From fc4af8606880be0374cf1f1f45bc5730e6d22bf5 Mon Sep 17 00:00:00 2001 From: Haoming <73768377+Haoming02@users.noreply.github.com> Date: Tue, 16 Dec 2025 09:57:28 +0800 Subject: [PATCH 1055/1073] [BlockInfo] Lumina (#11227) * block info * device * Make tensor int again --------- Co-authored-by: Jedrzej Kosinski --- comfy/ldm/lumina/model.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/ldm/lumina/model.py b/comfy/ldm/lumina/model.py index 96cb37fa6..5628e2ba3 100644 --- a/comfy/ldm/lumina/model.py +++ b/comfy/ldm/lumina/model.py @@ -634,8 +634,11 @@ class NextDiT(nn.Module): img, mask, img_size, cap_size, freqs_cis = self.patchify_and_embed(x, cap_feats, cap_mask, adaln_input, num_tokens, transformer_options=transformer_options) freqs_cis = freqs_cis.to(img.device) + transformer_options["total_blocks"] = len(self.layers) + transformer_options["block_type"] = "double" img_input = img for i, layer in enumerate(self.layers): + transformer_options["block_index"] = i img = layer(img, mask, freqs_cis, adaln_input, transformer_options=transformer_options) if "double_block" in patches: for p in patches["double_block"]: From ea2c117bc3c9d3b38d68e651905ed0d6dd682f92 Mon Sep 17 00:00:00 2001 From: Haoming <73768377+Haoming02@users.noreply.github.com> Date: Tue, 16 Dec 2025 09:59:16 +0800 Subject: [PATCH 1056/1073] [BlockInfo] Wan (#10845) * block info * animate * tensor * device * revert --- comfy/ldm/wan/model.py | 21 ++++++++++++++++++--- comfy/ldm/wan/model_animate.py | 3 +++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/wan/model.py b/comfy/ldm/wan/model.py index a9d5e10d9..4216ce831 100644 --- a/comfy/ldm/wan/model.py +++ b/comfy/ldm/wan/model.py @@ -568,7 +568,10 @@ class WanModel(torch.nn.Module): patches_replace = transformer_options.get("patches_replace", {}) blocks_replace = patches_replace.get("dit", {}) + transformer_options["total_blocks"] = len(self.blocks) + transformer_options["block_type"] = "double" for i, block in enumerate(self.blocks): + transformer_options["block_index"] = i if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} @@ -763,7 +766,10 @@ class VaceWanModel(WanModel): patches_replace = transformer_options.get("patches_replace", {}) blocks_replace = patches_replace.get("dit", {}) + transformer_options["total_blocks"] = len(self.blocks) + transformer_options["block_type"] = "double" for i, block in enumerate(self.blocks): + transformer_options["block_index"] = i if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} @@ -862,7 +868,10 @@ class CameraWanModel(WanModel): patches_replace = transformer_options.get("patches_replace", {}) blocks_replace = patches_replace.get("dit", {}) + transformer_options["total_blocks"] = len(self.blocks) + transformer_options["block_type"] = "double" for i, block in enumerate(self.blocks): + transformer_options["block_index"] = i if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} @@ -1326,16 +1335,19 @@ class WanModel_S2V(WanModel): patches_replace = transformer_options.get("patches_replace", {}) blocks_replace = patches_replace.get("dit", {}) + transformer_options["total_blocks"] = len(self.blocks) + transformer_options["block_type"] = "double" for i, block in enumerate(self.blocks): + transformer_options["block_index"] = i if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} - out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"]) + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], transformer_options=args["transformer_options"]) return out - out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs}, {"original_block": block_wrap}) + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs, "transformer_options": transformer_options}, {"original_block": block_wrap}) x = out["img"] else: - x = block(x, e=e0, freqs=freqs, context=context) + x = block(x, e=e0, freqs=freqs, context=context, transformer_options=transformer_options) if audio_emb is not None: x = self.audio_injector(x, i, audio_emb, audio_emb_global, seq_len) # head @@ -1574,7 +1586,10 @@ class HumoWanModel(WanModel): patches_replace = transformer_options.get("patches_replace", {}) blocks_replace = patches_replace.get("dit", {}) + transformer_options["total_blocks"] = len(self.blocks) + transformer_options["block_type"] = "double" for i, block in enumerate(self.blocks): + transformer_options["block_index"] = i if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} diff --git a/comfy/ldm/wan/model_animate.py b/comfy/ldm/wan/model_animate.py index 7c87835d4..84d7adec4 100644 --- a/comfy/ldm/wan/model_animate.py +++ b/comfy/ldm/wan/model_animate.py @@ -523,7 +523,10 @@ class AnimateWanModel(WanModel): patches_replace = transformer_options.get("patches_replace", {}) blocks_replace = patches_replace.get("dit", {}) + transformer_options["total_blocks"] = len(self.blocks) + transformer_options["block_type"] = "double" for i, block in enumerate(self.blocks): + transformer_options["block_index"] = i if ("double_block", i) in blocks_replace: def block_wrap(args): out = {} From 683569de5527379d9a095af88a9e1349fb7e46b5 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 15 Dec 2025 19:33:27 -0800 Subject: [PATCH 1057/1073] Only enable fp16 on ZImage on newer pytorch. (#11344) --- comfy/supported_models.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 834dfcffc..1888f35ba 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -28,6 +28,7 @@ from . import supported_models_base from . import latent_formats from . import diffusers_convert +import comfy.model_management class SD15(supported_models_base.BASE): unet_config = { @@ -1028,7 +1029,13 @@ class ZImage(Lumina2): memory_usage_factor = 2.0 - supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32] + supported_inference_dtypes = [torch.bfloat16, torch.float32] + + def __init__(self, unet_config): + super().__init__(unet_config) + if comfy.model_management.extended_fp16_support(): + self.supported_inference_dtypes = self.supported_inference_dtypes.copy() + self.supported_inference_dtypes.insert(1, torch.float16) def clip_target(self, state_dict={}): pref = self.text_encoder_key_prefix[0] From 3d082c32065e0653490b9a4ae45dd33b6c7bffb7 Mon Sep 17 00:00:00 2001 From: Christian Byrne Date: Mon, 15 Dec 2025 20:35:37 -0800 Subject: [PATCH 1058/1073] bump comfyui-frontend-package to 1.34.9 (patch) (#11342) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 117260515..9b9e61683 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.34.8 +comfyui-frontend-package==1.34.9 comfyui-workflow-templates==0.7.59 comfyui-embedded-docs==0.3.1 torch From 645ee1881e739b3013eeb26dbb335280bfbf443e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 15 Dec 2025 20:38:12 -0800 Subject: [PATCH 1059/1073] Inpainting for z image fun control. Use the ZImageFunControlnet node. (#11346) image -> control image ex: pose inpaint_image -> image for inpainting mask -> inpaint mask --- comfy_extras/nodes_model_patch.py | 77 ++++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 16 deletions(-) diff --git a/comfy_extras/nodes_model_patch.py b/comfy_extras/nodes_model_patch.py index fdd5d0d3f..2a0cfcf18 100644 --- a/comfy_extras/nodes_model_patch.py +++ b/comfy_extras/nodes_model_patch.py @@ -313,22 +313,46 @@ class ZImageControlPatch: self.inpaint_image = inpaint_image self.mask = mask self.strength = strength - self.encoded_image = self.encode_latent_cond(image) - self.encoded_image_size = (image.shape[1], image.shape[2]) + self.is_inpaint = self.model_patch.model.additional_in_dim > 0 + + skip_encoding = False + if self.image is not None and self.inpaint_image is not None: + if self.image.shape != self.inpaint_image.shape: + skip_encoding = True + + if skip_encoding: + self.encoded_image = None + else: + self.encoded_image = self.encode_latent_cond(self.image, self.inpaint_image) + if self.image is None: + self.encoded_image_size = (self.inpaint_image.shape[1], self.inpaint_image.shape[2]) + else: + self.encoded_image_size = (self.image.shape[1], self.image.shape[2]) self.temp_data = None - def encode_latent_cond(self, control_image, inpaint_image=None): - latent_image = comfy.latent_formats.Flux().process_in(self.vae.encode(control_image)) - if self.model_patch.model.additional_in_dim > 0: - if self.mask is None: - mask_ = torch.zeros_like(latent_image)[:, :1] - else: - mask_ = comfy.utils.common_upscale(self.mask.mean(dim=1, keepdim=True), latent_image.shape[-1], latent_image.shape[-2], "bilinear", "none") + def encode_latent_cond(self, control_image=None, inpaint_image=None): + latent_image = None + if control_image is not None: + latent_image = comfy.latent_formats.Flux().process_in(self.vae.encode(control_image)) + + if self.is_inpaint: if inpaint_image is None: inpaint_image = torch.ones_like(control_image) * 0.5 + if self.mask is not None: + mask_inpaint = comfy.utils.common_upscale(self.mask.view(self.mask.shape[0], -1, self.mask.shape[-2], self.mask.shape[-1]).mean(dim=1, keepdim=True), inpaint_image.shape[-2], inpaint_image.shape[-3], "bilinear", "center") + inpaint_image = ((inpaint_image - 0.5) * mask_inpaint.movedim(1, -1).round()) + 0.5 + inpaint_image_latent = comfy.latent_formats.Flux().process_in(self.vae.encode(inpaint_image)) + if self.mask is None: + mask_ = torch.zeros_like(inpaint_image_latent)[:, :1] + else: + mask_ = comfy.utils.common_upscale(self.mask.view(self.mask.shape[0], -1, self.mask.shape[-2], self.mask.shape[-1]).mean(dim=1, keepdim=True), inpaint_image_latent.shape[-1], inpaint_image_latent.shape[-2], "nearest", "center") + + if latent_image is None: + latent_image = comfy.latent_formats.Flux().process_in(self.vae.encode(torch.ones_like(inpaint_image) * 0.5)) + return torch.cat([latent_image, mask_, inpaint_image_latent], dim=1) else: return latent_image @@ -344,13 +368,18 @@ class ZImageControlPatch: block_type = kwargs.get("block_type", "") spacial_compression = self.vae.spacial_compression_encode() if self.encoded_image is None or self.encoded_image_size != (x.shape[-2] * spacial_compression, x.shape[-1] * spacial_compression): - image_scaled = comfy.utils.common_upscale(self.image.movedim(-1, 1), x.shape[-1] * spacial_compression, x.shape[-2] * spacial_compression, "area", "center") + image_scaled = None + if self.image is not None: + image_scaled = comfy.utils.common_upscale(self.image.movedim(-1, 1), x.shape[-1] * spacial_compression, x.shape[-2] * spacial_compression, "area", "center").movedim(1, -1) + self.encoded_image_size = (image_scaled.shape[-3], image_scaled.shape[-2]) + inpaint_scaled = None if self.inpaint_image is not None: inpaint_scaled = comfy.utils.common_upscale(self.inpaint_image.movedim(-1, 1), x.shape[-1] * spacial_compression, x.shape[-2] * spacial_compression, "area", "center").movedim(1, -1) + self.encoded_image_size = (inpaint_scaled.shape[-3], inpaint_scaled.shape[-2]) + loaded_models = comfy.model_management.loaded_models(only_currently_used=True) - self.encoded_image = self.encode_latent_cond(image_scaled.movedim(1, -1), inpaint_scaled) - self.encoded_image_size = (image_scaled.shape[-2], image_scaled.shape[-1]) + self.encoded_image = self.encode_latent_cond(image_scaled, inpaint_scaled) comfy.model_management.load_models_gpu(loaded_models) cnet_blocks = self.model_patch.model.n_control_layers @@ -391,7 +420,8 @@ class ZImageControlPatch: def to(self, device_or_dtype): if isinstance(device_or_dtype, torch.device): - self.encoded_image = self.encoded_image.to(device_or_dtype) + if self.encoded_image is not None: + self.encoded_image = self.encoded_image.to(device_or_dtype) self.temp_data = None return self @@ -414,9 +444,12 @@ class QwenImageDiffsynthControlnet: CATEGORY = "advanced/loaders/qwen" - def diffsynth_controlnet(self, model, model_patch, vae, image, strength, mask=None): + def diffsynth_controlnet(self, model, model_patch, vae, image=None, strength=1.0, inpaint_image=None, mask=None): model_patched = model.clone() - image = image[:, :, :, :3] + if image is not None: + image = image[:, :, :, :3] + if inpaint_image is not None: + inpaint_image = inpaint_image[:, :, :, :3] if mask is not None: if mask.ndim == 3: mask = mask.unsqueeze(1) @@ -425,13 +458,24 @@ class QwenImageDiffsynthControlnet: mask = 1.0 - mask if isinstance(model_patch.model, comfy.ldm.lumina.controlnet.ZImage_Control): - patch = ZImageControlPatch(model_patch, vae, image, strength, mask=mask) + patch = ZImageControlPatch(model_patch, vae, image, strength, inpaint_image=inpaint_image, mask=mask) model_patched.set_model_noise_refiner_patch(patch) model_patched.set_model_double_block_patch(patch) else: model_patched.set_model_double_block_patch(DiffSynthCnetPatch(model_patch, vae, image, strength, mask)) return (model_patched,) +class ZImageFunControlnet(QwenImageDiffsynthControlnet): + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "model_patch": ("MODEL_PATCH",), + "vae": ("VAE",), + "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + }, + "optional": {"image": ("IMAGE",), "inpaint_image": ("IMAGE",), "mask": ("MASK",)}} + + CATEGORY = "advanced/loaders/zimage" class UsoStyleProjectorPatch: def __init__(self, model_patch, encoded_image): @@ -479,5 +523,6 @@ class USOStyleReference: NODE_CLASS_MAPPINGS = { "ModelPatchLoader": ModelPatchLoader, "QwenImageDiffsynthControlnet": QwenImageDiffsynthControlnet, + "ZImageFunControlnet": ZImageFunControlnet, "USOStyleReference": USOStyleReference, } From bc606d7d645f9edfcac7cca3558210d3ee391d94 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 15 Dec 2025 22:26:55 -0800 Subject: [PATCH 1060/1073] Add a way to set the default ref method in the qwen image code. (#11349) --- comfy/ldm/qwen_image/model.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index 96590088b..8481f7711 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -322,6 +322,7 @@ class QwenImageTransformer2DModel(nn.Module): pooled_projection_dim: int = 768, guidance_embeds: bool = False, axes_dims_rope: Tuple[int, int, int] = (16, 56, 56), + default_ref_method="index", image_model=None, final_layer=True, dtype=None, @@ -334,6 +335,7 @@ class QwenImageTransformer2DModel(nn.Module): self.in_channels = in_channels self.out_channels = out_channels or in_channels self.inner_dim = num_attention_heads * attention_head_dim + self.default_ref_method = default_ref_method self.pe_embedder = EmbedND(dim=attention_head_dim, theta=10000, axes_dim=list(axes_dims_rope)) @@ -416,7 +418,7 @@ class QwenImageTransformer2DModel(nn.Module): h = 0 w = 0 index = 0 - ref_method = kwargs.get("ref_latents_method", "index") + ref_method = kwargs.get("ref_latents_method", self.default_ref_method) index_ref_method = (ref_method == "index") or (ref_method == "index_timestep_zero") timestep_zero = ref_method == "index_timestep_zero" for ref in ref_latents: From 9304e47351be8d178a093b30bcaf5d72c3a2baf5 Mon Sep 17 00:00:00 2001 From: Benjamin Lu Date: Mon, 15 Dec 2025 23:24:18 -0800 Subject: [PATCH 1061/1073] Update workflows for new release process (#11064) * Update release workflows for branch process * Adjust branch order in workflow triggers * Revert changes in test workflows --- .github/workflows/test-ci.yml | 1 + .github/workflows/test-execution.yml | 4 ++-- .github/workflows/test-launch.yml | 4 ++-- .github/workflows/test-unit.yml | 4 ++-- .github/workflows/update-version.yml | 1 + 5 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test-ci.yml b/.github/workflows/test-ci.yml index 1660ec8e3..adfc5dd32 100644 --- a/.github/workflows/test-ci.yml +++ b/.github/workflows/test-ci.yml @@ -5,6 +5,7 @@ on: push: branches: - master + - release/** paths-ignore: - 'app/**' - 'input/**' diff --git a/.github/workflows/test-execution.yml b/.github/workflows/test-execution.yml index 00ef07ebf..9012633d8 100644 --- a/.github/workflows/test-execution.yml +++ b/.github/workflows/test-execution.yml @@ -2,9 +2,9 @@ name: Execution Tests on: push: - branches: [ main, master ] + branches: [ main, master, release/** ] pull_request: - branches: [ main, master ] + branches: [ main, master, release/** ] jobs: test: diff --git a/.github/workflows/test-launch.yml b/.github/workflows/test-launch.yml index 1735fd83b..fd70aff23 100644 --- a/.github/workflows/test-launch.yml +++ b/.github/workflows/test-launch.yml @@ -2,9 +2,9 @@ name: Test server launches without errors on: push: - branches: [ main, master ] + branches: [ main, master, release/** ] pull_request: - branches: [ main, master ] + branches: [ main, master, release/** ] jobs: test: diff --git a/.github/workflows/test-unit.yml b/.github/workflows/test-unit.yml index 00caf5b8a..d05179cd3 100644 --- a/.github/workflows/test-unit.yml +++ b/.github/workflows/test-unit.yml @@ -2,9 +2,9 @@ name: Unit Tests on: push: - branches: [ main, master ] + branches: [ main, master, release/** ] pull_request: - branches: [ main, master ] + branches: [ main, master, release/** ] jobs: test: diff --git a/.github/workflows/update-version.yml b/.github/workflows/update-version.yml index d9d488974..c2343cc39 100644 --- a/.github/workflows/update-version.yml +++ b/.github/workflows/update-version.yml @@ -6,6 +6,7 @@ on: - "pyproject.toml" branches: - master + - release/** jobs: update-version: From 65e2103b09f66e45438445fb0e99709ae7639869 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Tue, 16 Dec 2025 23:51:48 +0200 Subject: [PATCH 1062/1073] feat(api-nodes): add Wan2.6 model to video nodes (#11357) --- comfy_api_nodes/nodes_wan.py | 162 ++++++++++++++++++++--------------- 1 file changed, 95 insertions(+), 67 deletions(-) diff --git a/comfy_api_nodes/nodes_wan.py b/comfy_api_nodes/nodes_wan.py index 2aab3c2ff..17b680e13 100644 --- a/comfy_api_nodes/nodes_wan.py +++ b/comfy_api_nodes/nodes_wan.py @@ -1,7 +1,5 @@ import re -from typing import Optional -import torch from pydantic import BaseModel, Field from typing_extensions import override @@ -21,26 +19,26 @@ from comfy_api_nodes.util import ( class Text2ImageInputField(BaseModel): prompt: str = Field(...) - negative_prompt: Optional[str] = Field(None) + negative_prompt: str | None = Field(None) class Image2ImageInputField(BaseModel): prompt: str = Field(...) - negative_prompt: Optional[str] = Field(None) + negative_prompt: str | None = Field(None) images: list[str] = Field(..., min_length=1, max_length=2) class Text2VideoInputField(BaseModel): prompt: str = Field(...) - negative_prompt: Optional[str] = Field(None) - audio_url: Optional[str] = Field(None) + negative_prompt: str | None = Field(None) + audio_url: str | None = Field(None) class Image2VideoInputField(BaseModel): prompt: str = Field(...) - negative_prompt: Optional[str] = Field(None) + negative_prompt: str | None = Field(None) img_url: str = Field(...) - audio_url: Optional[str] = Field(None) + audio_url: str | None = Field(None) class Txt2ImageParametersField(BaseModel): @@ -52,7 +50,7 @@ class Txt2ImageParametersField(BaseModel): class Image2ImageParametersField(BaseModel): - size: Optional[str] = Field(None) + size: str | None = Field(None) n: int = Field(1, description="Number of images to generate.") # we support only value=1 seed: int = Field(..., ge=0, le=2147483647) watermark: bool = Field(True) @@ -61,19 +59,21 @@ class Image2ImageParametersField(BaseModel): class Text2VideoParametersField(BaseModel): size: str = Field(...) seed: int = Field(..., ge=0, le=2147483647) - duration: int = Field(5, ge=5, le=10) + duration: int = Field(5, ge=5, le=15) prompt_extend: bool = Field(True) watermark: bool = Field(True) - audio: bool = Field(False, description="Should be audio generated automatically") + audio: bool = Field(False, description="Whether to generate audio automatically.") + shot_type: str = Field("single") class Image2VideoParametersField(BaseModel): resolution: str = Field(...) seed: int = Field(..., ge=0, le=2147483647) - duration: int = Field(5, ge=5, le=10) + duration: int = Field(5, ge=5, le=15) prompt_extend: bool = Field(True) watermark: bool = Field(True) - audio: bool = Field(False, description="Should be audio generated automatically") + audio: bool = Field(False, description="Whether to generate audio automatically.") + shot_type: str = Field("single") class Text2ImageTaskCreationRequest(BaseModel): @@ -106,39 +106,39 @@ class TaskCreationOutputField(BaseModel): class TaskCreationResponse(BaseModel): - output: Optional[TaskCreationOutputField] = Field(None) + output: TaskCreationOutputField | None = Field(None) request_id: str = Field(...) - code: Optional[str] = Field(None, description="The error code of the failed request.") - message: Optional[str] = Field(None, description="Details of the failed request.") + code: str | None = Field(None, description="Error code for the failed request.") + message: str | None = Field(None, description="Details about the failed request.") class TaskResult(BaseModel): - url: Optional[str] = Field(None) - code: Optional[str] = Field(None) - message: Optional[str] = Field(None) + url: str | None = Field(None) + code: str | None = Field(None) + message: str | None = Field(None) class ImageTaskStatusOutputField(TaskCreationOutputField): task_id: str = Field(...) task_status: str = Field(...) - results: Optional[list[TaskResult]] = Field(None) + results: list[TaskResult] | None = Field(None) class VideoTaskStatusOutputField(TaskCreationOutputField): task_id: str = Field(...) task_status: str = Field(...) - video_url: Optional[str] = Field(None) - code: Optional[str] = Field(None) - message: Optional[str] = Field(None) + video_url: str | None = Field(None) + code: str | None = Field(None) + message: str | None = Field(None) class ImageTaskStatusResponse(BaseModel): - output: Optional[ImageTaskStatusOutputField] = Field(None) + output: ImageTaskStatusOutputField | None = Field(None) request_id: str = Field(...) class VideoTaskStatusResponse(BaseModel): - output: Optional[VideoTaskStatusOutputField] = Field(None) + output: VideoTaskStatusOutputField | None = Field(None) request_id: str = Field(...) @@ -152,7 +152,7 @@ class WanTextToImageApi(IO.ComfyNode): node_id="WanTextToImageApi", display_name="Wan Text to Image", category="api node/image/Wan", - description="Generates image based on text prompt.", + description="Generates an image based on a text prompt.", inputs=[ IO.Combo.Input( "model", @@ -164,13 +164,13 @@ class WanTextToImageApi(IO.ComfyNode): "prompt", multiline=True, default="", - tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", + tooltip="Prompt describing the elements and visual features. Supports English and Chinese.", ), IO.String.Input( "negative_prompt", multiline=True, default="", - tooltip="Negative text prompt to guide what to avoid.", + tooltip="Negative prompt describing what to avoid.", optional=True, ), IO.Int.Input( @@ -209,7 +209,7 @@ class WanTextToImageApi(IO.ComfyNode): IO.Boolean.Input( "watermark", default=True, - tooltip='Whether to add an "AI generated" watermark to the result.', + tooltip="Whether to add an AI-generated watermark to the result.", optional=True, ), ], @@ -252,7 +252,7 @@ class WanTextToImageApi(IO.ComfyNode): ), ) if not initial_response.output: - raise Exception(f"Unknown error occurred: {initial_response.code} - {initial_response.message}") + raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}") response = await poll_op( cls, ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"), @@ -272,7 +272,7 @@ class WanImageToImageApi(IO.ComfyNode): display_name="Wan Image to Image", category="api node/image/Wan", description="Generates an image from one or two input images and a text prompt. " - "The output image is currently fixed at 1.6 MP; its aspect ratio matches the input image(s).", + "The output image is currently fixed at 1.6 MP, and its aspect ratio matches the input image(s).", inputs=[ IO.Combo.Input( "model", @@ -282,19 +282,19 @@ class WanImageToImageApi(IO.ComfyNode): ), IO.Image.Input( "image", - tooltip="Single-image editing or multi-image fusion, maximum 2 images.", + tooltip="Single-image editing or multi-image fusion. Maximum 2 images.", ), IO.String.Input( "prompt", multiline=True, default="", - tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", + tooltip="Prompt describing the elements and visual features. Supports English and Chinese.", ), IO.String.Input( "negative_prompt", multiline=True, default="", - tooltip="Negative text prompt to guide what to avoid.", + tooltip="Negative prompt describing what to avoid.", optional=True, ), # redo this later as an optional combo of recommended resolutions @@ -328,7 +328,7 @@ class WanImageToImageApi(IO.ComfyNode): IO.Boolean.Input( "watermark", default=True, - tooltip='Whether to add an "AI generated" watermark to the result.', + tooltip="Whether to add an AI-generated watermark to the result.", optional=True, ), ], @@ -347,7 +347,7 @@ class WanImageToImageApi(IO.ComfyNode): async def execute( cls, model: str, - image: torch.Tensor, + image: Input.Image, prompt: str, negative_prompt: str = "", # width: int = 1024, @@ -357,7 +357,7 @@ class WanImageToImageApi(IO.ComfyNode): ): n_images = get_number_of_images(image) if n_images not in (1, 2): - raise ValueError(f"Expected 1 or 2 input images, got {n_images}.") + raise ValueError(f"Expected 1 or 2 input images, but got {n_images}.") images = [] for i in image: images.append("data:image/png;base64," + tensor_to_base64_string(i, total_pixels=4096 * 4096)) @@ -376,7 +376,7 @@ class WanImageToImageApi(IO.ComfyNode): ), ) if not initial_response.output: - raise Exception(f"Unknown error occurred: {initial_response.code} - {initial_response.message}") + raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}") response = await poll_op( cls, ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"), @@ -395,25 +395,25 @@ class WanTextToVideoApi(IO.ComfyNode): node_id="WanTextToVideoApi", display_name="Wan Text to Video", category="api node/video/Wan", - description="Generates video based on text prompt.", + description="Generates a video based on a text prompt.", inputs=[ IO.Combo.Input( "model", - options=["wan2.5-t2v-preview"], - default="wan2.5-t2v-preview", + options=["wan2.5-t2v-preview", "wan2.6-t2v"], + default="wan2.6-t2v", tooltip="Model to use.", ), IO.String.Input( "prompt", multiline=True, default="", - tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", + tooltip="Prompt describing the elements and visual features. Supports English and Chinese.", ), IO.String.Input( "negative_prompt", multiline=True, default="", - tooltip="Negative text prompt to guide what to avoid.", + tooltip="Negative prompt describing what to avoid.", optional=True, ), IO.Combo.Input( @@ -433,23 +433,23 @@ class WanTextToVideoApi(IO.ComfyNode): "1080p: 4:3 (1632x1248)", "1080p: 3:4 (1248x1632)", ], - default="480p: 1:1 (624x624)", + default="720p: 1:1 (960x960)", optional=True, ), IO.Int.Input( "duration", default=5, min=5, - max=10, + max=15, step=5, display_mode=IO.NumberDisplay.number, - tooltip="Available durations: 5 and 10 seconds", + tooltip="A 15-second duration is available only for the Wan 2.6 model.", optional=True, ), IO.Audio.Input( "audio", optional=True, - tooltip="Audio must contain a clear, loud voice, without extraneous noise, background music.", + tooltip="Audio must contain a clear, loud voice, without extraneous noise or background music.", ), IO.Int.Input( "seed", @@ -466,7 +466,7 @@ class WanTextToVideoApi(IO.ComfyNode): "generate_audio", default=False, optional=True, - tooltip="If there is no audio input, generate audio automatically.", + tooltip="If no audio input is provided, generate audio automatically.", ), IO.Boolean.Input( "prompt_extend", @@ -477,7 +477,15 @@ class WanTextToVideoApi(IO.ComfyNode): IO.Boolean.Input( "watermark", default=True, - tooltip='Whether to add an "AI generated" watermark to the result.', + tooltip="Whether to add an AI-generated watermark to the result.", + optional=True, + ), + IO.Combo.Input( + "shot_type", + options=["single", "multi"], + tooltip="Specifies the shot type for the generated video, that is, whether the video is a " + "single continuous shot or multiple shots with cuts. " + "This parameter takes effect only when prompt_extend is True.", optional=True, ), ], @@ -498,14 +506,19 @@ class WanTextToVideoApi(IO.ComfyNode): model: str, prompt: str, negative_prompt: str = "", - size: str = "480p: 1:1 (624x624)", + size: str = "720p: 1:1 (960x960)", duration: int = 5, - audio: Optional[Input.Audio] = None, + audio: Input.Audio | None = None, seed: int = 0, generate_audio: bool = False, prompt_extend: bool = True, watermark: bool = True, + shot_type: str = "single", ): + if "480p" in size and model == "wan2.6-t2v": + raise ValueError("The Wan 2.6 model does not support 480p.") + if duration == 15 and model == "wan2.5-t2v-preview": + raise ValueError("A 15-second duration is supported only by the Wan 2.6 model.") width, height = RES_IN_PARENS.search(size).groups() audio_url = None if audio is not None: @@ -526,11 +539,12 @@ class WanTextToVideoApi(IO.ComfyNode): audio=generate_audio, prompt_extend=prompt_extend, watermark=watermark, + shot_type=shot_type, ), ), ) if not initial_response.output: - raise Exception(f"Unknown error occurred: {initial_response.code} - {initial_response.message}") + raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}") response = await poll_op( cls, ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"), @@ -549,12 +563,12 @@ class WanImageToVideoApi(IO.ComfyNode): node_id="WanImageToVideoApi", display_name="Wan Image to Video", category="api node/video/Wan", - description="Generates video based on the first frame and text prompt.", + description="Generates a video from the first frame and a text prompt.", inputs=[ IO.Combo.Input( "model", - options=["wan2.5-i2v-preview"], - default="wan2.5-i2v-preview", + options=["wan2.5-i2v-preview", "wan2.6-i2v"], + default="wan2.6-i2v", tooltip="Model to use.", ), IO.Image.Input( @@ -564,13 +578,13 @@ class WanImageToVideoApi(IO.ComfyNode): "prompt", multiline=True, default="", - tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", + tooltip="Prompt describing the elements and visual features. Supports English and Chinese.", ), IO.String.Input( "negative_prompt", multiline=True, default="", - tooltip="Negative text prompt to guide what to avoid.", + tooltip="Negative prompt describing what to avoid.", optional=True, ), IO.Combo.Input( @@ -580,23 +594,23 @@ class WanImageToVideoApi(IO.ComfyNode): "720P", "1080P", ], - default="480P", + default="720P", optional=True, ), IO.Int.Input( "duration", default=5, min=5, - max=10, + max=15, step=5, display_mode=IO.NumberDisplay.number, - tooltip="Available durations: 5 and 10 seconds", + tooltip="Duration 15 available only for WAN2.6 model.", optional=True, ), IO.Audio.Input( "audio", optional=True, - tooltip="Audio must contain a clear, loud voice, without extraneous noise, background music.", + tooltip="Audio must contain a clear, loud voice, without extraneous noise or background music.", ), IO.Int.Input( "seed", @@ -613,7 +627,7 @@ class WanImageToVideoApi(IO.ComfyNode): "generate_audio", default=False, optional=True, - tooltip="If there is no audio input, generate audio automatically.", + tooltip="If no audio input is provided, generate audio automatically.", ), IO.Boolean.Input( "prompt_extend", @@ -624,7 +638,15 @@ class WanImageToVideoApi(IO.ComfyNode): IO.Boolean.Input( "watermark", default=True, - tooltip='Whether to add an "AI generated" watermark to the result.', + tooltip="Whether to add an AI-generated watermark to the result.", + optional=True, + ), + IO.Combo.Input( + "shot_type", + options=["single", "multi"], + tooltip="Specifies the shot type for the generated video, that is, whether the video is a " + "single continuous shot or multiple shots with cuts. " + "This parameter takes effect only when prompt_extend is True.", optional=True, ), ], @@ -643,19 +665,24 @@ class WanImageToVideoApi(IO.ComfyNode): async def execute( cls, model: str, - image: torch.Tensor, + image: Input.Image, prompt: str, negative_prompt: str = "", - resolution: str = "480P", + resolution: str = "720P", duration: int = 5, - audio: Optional[Input.Audio] = None, + audio: Input.Audio | None = None, seed: int = 0, generate_audio: bool = False, prompt_extend: bool = True, watermark: bool = True, + shot_type: str = "single", ): if get_number_of_images(image) != 1: raise ValueError("Exactly one input image is required.") + if "480P" in resolution and model == "wan2.6-i2v": + raise ValueError("The Wan 2.6 model does not support 480P.") + if duration == 15 and model == "wan2.5-i2v-preview": + raise ValueError("A 15-second duration is supported only by the Wan 2.6 model.") image_url = "data:image/png;base64," + tensor_to_base64_string(image, total_pixels=2000 * 2000) audio_url = None if audio is not None: @@ -677,11 +704,12 @@ class WanImageToVideoApi(IO.ComfyNode): audio=generate_audio, prompt_extend=prompt_extend, watermark=watermark, + shot_type=shot_type, ), ), ) if not initial_response.output: - raise Exception(f"Unknown error occurred: {initial_response.code} - {initial_response.message}") + raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}") response = await poll_op( cls, ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"), From ffdd53b327f7ebd48cf81a1c8b06d846cf354a66 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 16 Dec 2025 14:03:17 -0800 Subject: [PATCH 1063/1073] Check state dict key to auto enable the index_timestep_zero ref method. (#11362) --- comfy/ldm/qwen_image/model.py | 3 +++ comfy/model_detection.py | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/comfy/ldm/qwen_image/model.py b/comfy/ldm/qwen_image/model.py index 8481f7711..902af30ed 100644 --- a/comfy/ldm/qwen_image/model.py +++ b/comfy/ldm/qwen_image/model.py @@ -363,6 +363,9 @@ class QwenImageTransformer2DModel(nn.Module): for _ in range(num_layers) ]) + if self.default_ref_method == "index_timestep_zero": + self.register_buffer("__index_timestep_zero__", torch.tensor([])) + if final_layer: self.norm_out = LastLayer(self.inner_dim, self.inner_dim, dtype=dtype, device=device, operations=operations) self.proj_out = operations.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True, dtype=dtype, device=device) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index dd6a703f6..7148c77fd 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -259,7 +259,7 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["nerf_tile_size"] = 512 dit_config["nerf_final_head_type"] = "conv" if f"{key_prefix}nerf_final_layer_conv.norm.scale" in state_dict_keys else "linear" dit_config["nerf_embedder_dtype"] = torch.float32 - if "__x0__" in state_dict_keys: # x0 pred + if "{}__x0__".format(key_prefix) in state_dict_keys: # x0 pred dit_config["use_x0"] = True else: dit_config["use_x0"] = False @@ -618,6 +618,8 @@ def detect_unet_config(state_dict, key_prefix, metadata=None): dit_config["image_model"] = "qwen_image" dit_config["in_channels"] = state_dict['{}img_in.weight'.format(key_prefix)].shape[1] dit_config["num_layers"] = count_blocks(state_dict_keys, '{}transformer_blocks.'.format(key_prefix) + '{}.') + if "{}__index_timestep_zero__".format(key_prefix) in state_dict_keys: # 2511 + dit_config["default_ref_method"] = "index_timestep_zero" return dit_config if '{}visual_transformer_blocks.0.cross_attention.key_norm.weight'.format(key_prefix) in state_dict_keys: # Kandinsky 5 From 827bb1512b17e349238e69b2d4f463390a5b0d14 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Wed, 17 Dec 2025 12:35:43 +0800 Subject: [PATCH 1064/1073] Add exp_heun_2_x0 sampler series (#11360) --- comfy/k_diffusion/sampling.py | 11 +++++++++++ comfy/samplers.py | 2 +- comfy_extras/nodes_custom_sampler.py | 11 ++++++++++- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 753c66afa..c004b3b47 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -1618,6 +1618,17 @@ def sample_seeds_2(model, x, sigmas, extra_args=None, callback=None, disable=Non x = x + sde_noise * sigmas[i + 1] * s_noise return x +@torch.no_grad() +def sample_exp_heun_2_x0(model, x, sigmas, extra_args=None, callback=None, disable=None, solver_type="phi_2"): + """Deterministic exponential Heun second order method in data prediction (x0) and logSNR time.""" + return sample_seeds_2(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=0.0, s_noise=0.0, noise_sampler=None, r=1.0, solver_type=solver_type) + + +@torch.no_grad() +def sample_exp_heun_2_x0_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type="phi_2"): + """Stochastic exponential Heun second order method in data prediction (x0) and logSNR time.""" + return sample_seeds_2(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, r=1.0, solver_type=solver_type) + @torch.no_grad() def sample_seeds_3(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r_1=1./3, r_2=2./3): diff --git a/comfy/samplers.py b/comfy/samplers.py index fa4640842..8340d376c 100755 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -720,7 +720,7 @@ class Sampler: sigma = float(sigmas[0]) return math.isclose(max_sigma, sigma, rel_tol=1e-05) or sigma > max_sigma -KSAMPLER_NAMES = ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2","dpm_2", "dpm_2_ancestral", +KSAMPLER_NAMES = ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 71ea4e9ec..7ee4caac1 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -671,7 +671,16 @@ class SamplerSEEDS2(io.ComfyNode): io.Float.Input("s_noise", default=1.0, min=0.0, max=100.0, step=0.01, round=False, tooltip="SDE noise multiplier"), io.Float.Input("r", default=0.5, min=0.01, max=1.0, step=0.01, round=False, tooltip="Relative step size for the intermediate stage (c2 node)"), ], - outputs=[io.Sampler.Output()] + outputs=[io.Sampler.Output()], + description=( + "This sampler node can represent multiple samplers:\n\n" + "seeds_2\n" + "- default setting\n\n" + "exp_heun_2_x0\n" + "- solver_type=phi_2, r=1.0, eta=0.0\n\n" + "exp_heun_2_x0_sde\n" + "- solver_type=phi_2, r=1.0, eta=1.0, s_noise=1.0" + ) ) @classmethod From 3a5f239cb622d7d8b1706d0b63c469dfef2eaf73 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 17 Dec 2025 03:46:11 -0500 Subject: [PATCH 1065/1073] ComfyUI v0.5.0 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 2f083edaf..5edf270e7 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.4.0" +__version__ = "0.5.0" diff --git a/pyproject.toml b/pyproject.toml index e4d3d616a..c402f278c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.4.0" +version = "0.5.0" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 887143854bb2ae1e0f975e4461f376844a1628c8 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 17 Dec 2025 19:43:41 +0200 Subject: [PATCH 1066/1073] feat(api-nodes): add GPT-Image-1.5 (#11368) --- comfy_api_nodes/apis/openai_api.py | 52 +++++++ comfy_api_nodes/nodes_openai.py | 209 +++++++++++++++------------- comfy_api_nodes/util/conversions.py | 2 +- 3 files changed, 168 insertions(+), 95 deletions(-) create mode 100644 comfy_api_nodes/apis/openai_api.py diff --git a/comfy_api_nodes/apis/openai_api.py b/comfy_api_nodes/apis/openai_api.py new file mode 100644 index 000000000..ae5bb2673 --- /dev/null +++ b/comfy_api_nodes/apis/openai_api.py @@ -0,0 +1,52 @@ +from pydantic import BaseModel, Field + + +class Datum2(BaseModel): + b64_json: str | None = Field(None, description="Base64 encoded image data") + revised_prompt: str | None = Field(None, description="Revised prompt") + url: str | None = Field(None, description="URL of the image") + + +class InputTokensDetails(BaseModel): + image_tokens: int | None = None + text_tokens: int | None = None + + +class Usage(BaseModel): + input_tokens: int | None = None + input_tokens_details: InputTokensDetails | None = None + output_tokens: int | None = None + total_tokens: int | None = None + + +class OpenAIImageGenerationResponse(BaseModel): + data: list[Datum2] | None = None + usage: Usage | None = None + + +class OpenAIImageEditRequest(BaseModel): + background: str | None = Field(None, description="Background transparency") + model: str = Field(...) + moderation: str | None = Field(None) + n: int | None = Field(None, description="The number of images to generate") + output_compression: int | None = Field(None, description="Compression level for JPEG or WebP (0-100)") + output_format: str | None = Field(None) + prompt: str = Field(...) + quality: str | None = Field(None, description="Size of the image (e.g., 1024x1024, 1536x1024, auto)") + size: str | None = Field(None, description="Size of the output image") + + +class OpenAIImageGenerationRequest(BaseModel): + background: str | None = Field(None, description="Background transparency") + model: str | None = Field(None) + moderation: str | None = Field(None) + n: int | None = Field( + None, + description="The number of images to generate.", + ) + output_compression: int | None = Field(None, description="Compression level for JPEG or WebP (0-100)") + output_format: str | None = Field(None) + prompt: str = Field(...) + quality: str | None = Field(None, description="The quality of the generated image") + size: str | None = Field(None, description="Size of the image (e.g., 1024x1024, 1536x1024, auto)") + style: str | None = Field(None, description="Style of the image (only for dall-e-3)") diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index c8da5464b..a6205a34f 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -1,46 +1,45 @@ -from io import BytesIO +import base64 import os from enum import Enum -from inspect import cleandoc +from io import BytesIO + import numpy as np import torch from PIL import Image -import folder_paths -import base64 -from comfy_api.latest import IO, ComfyExtension from typing_extensions import override - +import folder_paths +from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis import ( - OpenAIImageGenerationRequest, - OpenAIImageEditRequest, - OpenAIImageGenerationResponse, - OpenAICreateResponse, - OpenAIResponse, CreateModelResponseProperties, - Item, - OutputContent, - InputImageContent, Detail, - InputTextContent, - InputMessage, - InputMessageContentList, InputContent, InputFileContent, + InputImageContent, + InputMessage, + InputMessageContentList, + InputTextContent, + Item, + OpenAICreateResponse, + OpenAIResponse, + OutputContent, +) +from comfy_api_nodes.apis.openai_api import ( + OpenAIImageEditRequest, + OpenAIImageGenerationRequest, + OpenAIImageGenerationResponse, ) - from comfy_api_nodes.util import ( - downscale_image_tensor, - download_url_to_bytesio, - validate_string, - tensor_to_base64_string, ApiEndpoint, - sync_op, + download_url_to_bytesio, + downscale_image_tensor, poll_op, + sync_op, + tensor_to_base64_string, text_filepath_to_data_uri, + validate_string, ) - RESPONSES_ENDPOINT = "/proxy/openai/v1/responses" STARTING_POINT_ID_PATTERN = r"" @@ -98,9 +97,6 @@ async def validate_and_cast_response(response, timeout: int = None) -> torch.Ten class OpenAIDalle2(IO.ComfyNode): - """ - Generates images synchronously via OpenAI's DALL·E 2 endpoint. - """ @classmethod def define_schema(cls): @@ -108,7 +104,7 @@ class OpenAIDalle2(IO.ComfyNode): node_id="OpenAIDalle2", display_name="OpenAI DALL·E 2", category="api node/image/OpenAI", - description=cleandoc(cls.__doc__ or ""), + description="Generates images synchronously via OpenAI's DALL·E 2 endpoint.", inputs=[ IO.String.Input( "prompt", @@ -234,9 +230,6 @@ class OpenAIDalle2(IO.ComfyNode): class OpenAIDalle3(IO.ComfyNode): - """ - Generates images synchronously via OpenAI's DALL·E 3 endpoint. - """ @classmethod def define_schema(cls): @@ -244,7 +237,7 @@ class OpenAIDalle3(IO.ComfyNode): node_id="OpenAIDalle3", display_name="OpenAI DALL·E 3", category="api node/image/OpenAI", - description=cleandoc(cls.__doc__ or ""), + description="Generates images synchronously via OpenAI's DALL·E 3 endpoint.", inputs=[ IO.String.Input( "prompt", @@ -326,10 +319,16 @@ class OpenAIDalle3(IO.ComfyNode): return IO.NodeOutput(await validate_and_cast_response(response)) +def calculate_tokens_price_image_1(response: OpenAIImageGenerationResponse) -> float | None: + # https://platform.openai.com/docs/pricing + return ((response.usage.input_tokens * 10.0) + (response.usage.output_tokens * 40.0)) / 1_000_000.0 + + +def calculate_tokens_price_image_1_5(response: OpenAIImageGenerationResponse) -> float | None: + return ((response.usage.input_tokens * 8.0) + (response.usage.output_tokens * 32.0)) / 1_000_000.0 + + class OpenAIGPTImage1(IO.ComfyNode): - """ - Generates images synchronously via OpenAI's GPT Image 1 endpoint. - """ @classmethod def define_schema(cls): @@ -337,13 +336,13 @@ class OpenAIGPTImage1(IO.ComfyNode): node_id="OpenAIGPTImage1", display_name="OpenAI GPT Image 1", category="api node/image/OpenAI", - description=cleandoc(cls.__doc__ or ""), + description="Generates images synchronously via OpenAI's GPT Image 1 endpoint.", inputs=[ IO.String.Input( "prompt", default="", multiline=True, - tooltip="Text prompt for GPT Image 1", + tooltip="Text prompt for GPT Image", ), IO.Int.Input( "seed", @@ -365,8 +364,8 @@ class OpenAIGPTImage1(IO.ComfyNode): ), IO.Combo.Input( "background", - default="opaque", - options=["opaque", "transparent"], + default="auto", + options=["auto", "opaque", "transparent"], tooltip="Return image with or without background", optional=True, ), @@ -397,6 +396,11 @@ class OpenAIGPTImage1(IO.ComfyNode): tooltip="Optional mask for inpainting (white areas will be replaced)", optional=True, ), + IO.Combo.Input( + "model", + options=["gpt-image-1", "gpt-image-1.5"], + optional=True, + ), ], outputs=[ IO.Image.Output(), @@ -412,32 +416,34 @@ class OpenAIGPTImage1(IO.ComfyNode): @classmethod async def execute( cls, - prompt, - seed=0, - quality="low", - background="opaque", - image=None, - mask=None, - n=1, - size="1024x1024", + prompt: str, + seed: int = 0, + quality: str = "low", + background: str = "opaque", + image: Input.Image | None = None, + mask: Input.Image | None = None, + n: int = 1, + size: str = "1024x1024", + model: str = "gpt-image-1", ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False) - model = "gpt-image-1" - path = "/proxy/openai/images/generations" - content_type = "application/json" - request_class = OpenAIImageGenerationRequest - files = [] + + if mask is not None and image is None: + raise ValueError("Cannot use a mask without an input image") + + if model == "gpt-image-1": + price_extractor = calculate_tokens_price_image_1 + elif model == "gpt-image-1.5": + price_extractor = calculate_tokens_price_image_1_5 + else: + raise ValueError(f"Unknown model: {model}") if image is not None: - path = "/proxy/openai/images/edits" - request_class = OpenAIImageEditRequest - content_type = "multipart/form-data" - + files = [] batch_size = image.shape[0] - for i in range(batch_size): - single_image = image[i : i + 1] - scaled_image = downscale_image_tensor(single_image).squeeze() + single_image = image[i: i + 1] + scaled_image = downscale_image_tensor(single_image, total_pixels=2048*2048).squeeze() image_np = (scaled_image.numpy() * 255).astype(np.uint8) img = Image.fromarray(image_np) @@ -450,44 +456,59 @@ class OpenAIGPTImage1(IO.ComfyNode): else: files.append(("image[]", (f"image_{i}.png", img_byte_arr, "image/png"))) - if mask is not None: - if image is None: - raise Exception("Cannot use a mask without an input image") - if image.shape[0] != 1: - raise Exception("Cannot use a mask with multiple image") - if mask.shape[1:] != image.shape[1:-1]: - raise Exception("Mask and Image must be the same size") - batch, height, width = mask.shape - rgba_mask = torch.zeros(height, width, 4, device="cpu") - rgba_mask[:, :, 3] = 1 - mask.squeeze().cpu() + if mask is not None: + if image.shape[0] != 1: + raise Exception("Cannot use a mask with multiple image") + if mask.shape[1:] != image.shape[1:-1]: + raise Exception("Mask and Image must be the same size") + _, height, width = mask.shape + rgba_mask = torch.zeros(height, width, 4, device="cpu") + rgba_mask[:, :, 3] = 1 - mask.squeeze().cpu() - scaled_mask = downscale_image_tensor(rgba_mask.unsqueeze(0)).squeeze() + scaled_mask = downscale_image_tensor(rgba_mask.unsqueeze(0), total_pixels=2048*2048).squeeze() - mask_np = (scaled_mask.numpy() * 255).astype(np.uint8) - mask_img = Image.fromarray(mask_np) - mask_img_byte_arr = BytesIO() - mask_img.save(mask_img_byte_arr, format="PNG") - mask_img_byte_arr.seek(0) - files.append(("mask", ("mask.png", mask_img_byte_arr, "image/png"))) - - # Build the operation - response = await sync_op( - cls, - ApiEndpoint(path=path, method="POST"), - response_model=OpenAIImageGenerationResponse, - data=request_class( - model=model, - prompt=prompt, - quality=quality, - background=background, - n=n, - seed=seed, - size=size, - ), - files=files if files else None, - content_type=content_type, - ) + mask_np = (scaled_mask.numpy() * 255).astype(np.uint8) + mask_img = Image.fromarray(mask_np) + mask_img_byte_arr = BytesIO() + mask_img.save(mask_img_byte_arr, format="PNG") + mask_img_byte_arr.seek(0) + files.append(("mask", ("mask.png", mask_img_byte_arr, "image/png"))) + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/openai/images/edits", method="POST"), + response_model=OpenAIImageGenerationResponse, + data=OpenAIImageEditRequest( + model=model, + prompt=prompt, + quality=quality, + background=background, + n=n, + seed=seed, + size=size, + moderation="low", + ), + content_type="multipart/form-data", + files=files, + price_extractor=price_extractor, + ) + else: + response = await sync_op( + cls, + ApiEndpoint(path="/proxy/openai/images/generations", method="POST"), + response_model=OpenAIImageGenerationResponse, + data=OpenAIImageGenerationRequest( + model=model, + prompt=prompt, + quality=quality, + background=background, + n=n, + seed=seed, + size=size, + moderation="low", + ), + price_extractor=price_extractor, + ) return IO.NodeOutput(await validate_and_cast_response(response)) diff --git a/comfy_api_nodes/util/conversions.py b/comfy_api_nodes/util/conversions.py index c57457580..d64239c86 100644 --- a/comfy_api_nodes/util/conversions.py +++ b/comfy_api_nodes/util/conversions.py @@ -129,7 +129,7 @@ def pil_to_bytesio(img: Image.Image, mime_type: str = "image/png") -> BytesIO: return img_byte_arr -def downscale_image_tensor(image, total_pixels=1536 * 1024) -> torch.Tensor: +def downscale_image_tensor(image: torch.Tensor, total_pixels: int = 1536 * 1024) -> torch.Tensor: """Downscale input image tensor to roughly the specified total pixels.""" samples = image.movedim(-1, 1) total = int(total_pixels) From c08f97f34407a1bc6cc8d1447d6c12893399acba Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Wed, 17 Dec 2025 20:24:25 +0200 Subject: [PATCH 1067/1073] fix regression in V3 nodes processing (#11375) --- comfy_api/latest/_io.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 2b634d172..4b14e5ded 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -1556,12 +1556,12 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal): @final @classmethod - def PREPARE_CLASS_CLONE(cls, v3_data: V3Data) -> type[ComfyNode]: + def PREPARE_CLASS_CLONE(cls, v3_data: V3Data | None) -> type[ComfyNode]: """Creates clone of real node class to prevent monkey-patching.""" c_type: type[ComfyNode] = cls if is_class(cls) else type(cls) type_clone: type[ComfyNode] = shallow_clone_class(c_type) # set hidden - type_clone.hidden = HiddenHolder.from_dict(v3_data["hidden_inputs"]) + type_clone.hidden = HiddenHolder.from_dict(v3_data["hidden_inputs"] if v3_data else None) return type_clone @final From 5d9ad0c6bf177095aea5026cd872b1faf873669b Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Thu, 18 Dec 2025 02:57:40 +0800 Subject: [PATCH 1068/1073] Fix the last step with non-zero sigma in sa_solver (#11380) --- comfy/k_diffusion/sampling.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index c004b3b47..1ba9edad7 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -1776,7 +1776,7 @@ def sample_sa_solver(model, x, sigmas, extra_args=None, callback=None, disable=F # Predictor if sigmas[i + 1] == 0: # Denoising step - x = denoised + x_pred = denoised else: tau_t = tau_func(sigmas[i + 1]) curr_lambdas = lambdas[i - predictor_order_used + 1:i + 1] @@ -1797,7 +1797,7 @@ def sample_sa_solver(model, x, sigmas, extra_args=None, callback=None, disable=F if tau_t > 0 and s_noise > 0: noise = noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * tau_t ** 2 * h).expm1().neg().sqrt() * s_noise x_pred = x_pred + noise - return x + return x_pred @torch.no_grad() From 16d85ea13342cebc8349a95236c94bde5ac3cb2a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 17 Dec 2025 16:43:18 -0800 Subject: [PATCH 1069/1073] Better handle torch being imported by prestartup nodes. (#11383) --- main.py | 66 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 32 insertions(+), 34 deletions(-) diff --git a/main.py b/main.py index 0d02a087b..0e07a95da 100644 --- a/main.py +++ b/main.py @@ -23,6 +23,38 @@ if __name__ == "__main__": setup_logger(log_level=args.verbose, use_stdout=args.log_stdout) +if os.name == "nt": + os.environ['MIMALLOC_PURGE_DELAY'] = '0' + +if __name__ == "__main__": + os.environ['TORCH_ROCM_AOTRITON_ENABLE_EXPERIMENTAL'] = '1' + if args.default_device is not None: + default_dev = args.default_device + devices = list(range(32)) + devices.remove(default_dev) + devices.insert(0, default_dev) + devices = ','.join(map(str, devices)) + os.environ['CUDA_VISIBLE_DEVICES'] = str(devices) + os.environ['HIP_VISIBLE_DEVICES'] = str(devices) + + if args.cuda_device is not None: + os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) + os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device) + os.environ["ASCEND_RT_VISIBLE_DEVICES"] = str(args.cuda_device) + logging.info("Set cuda device to: {}".format(args.cuda_device)) + + if args.oneapi_device_selector is not None: + os.environ['ONEAPI_DEVICE_SELECTOR'] = args.oneapi_device_selector + logging.info("Set oneapi device selector to: {}".format(args.oneapi_device_selector)) + + if args.deterministic: + if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ: + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8" + + import cuda_malloc + if "rocm" in cuda_malloc.get_torch_version_noimport(): + os.environ['OCL_SET_SVM_SIZE'] = '262144' # set at the request of AMD + def handle_comfyui_manager_unavailable(): if not args.windows_standalone_build: @@ -137,40 +169,6 @@ import shutil import threading import gc - -if os.name == "nt": - os.environ['MIMALLOC_PURGE_DELAY'] = '0' - -if __name__ == "__main__": - os.environ['TORCH_ROCM_AOTRITON_ENABLE_EXPERIMENTAL'] = '1' - if args.default_device is not None: - default_dev = args.default_device - devices = list(range(32)) - devices.remove(default_dev) - devices.insert(0, default_dev) - devices = ','.join(map(str, devices)) - os.environ['CUDA_VISIBLE_DEVICES'] = str(devices) - os.environ['HIP_VISIBLE_DEVICES'] = str(devices) - - if args.cuda_device is not None: - os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) - os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device) - os.environ["ASCEND_RT_VISIBLE_DEVICES"] = str(args.cuda_device) - logging.info("Set cuda device to: {}".format(args.cuda_device)) - - if args.oneapi_device_selector is not None: - os.environ['ONEAPI_DEVICE_SELECTOR'] = args.oneapi_device_selector - logging.info("Set oneapi device selector to: {}".format(args.oneapi_device_selector)) - - if args.deterministic: - if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ: - os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8" - - import cuda_malloc - if "rocm" in cuda_malloc.get_torch_version_noimport(): - os.environ['OCL_SET_SVM_SIZE'] = '262144' # set at the request of AMD - - if 'torch' in sys.modules: logging.warning("WARNING: Potential Error in code: Torch already imported, torch should never be imported before this point.") From ba6080bbab070934ea6e870c5fc30dbf702eb445 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 17 Dec 2025 21:04:50 -0500 Subject: [PATCH 1070/1073] ComfyUI v0.5.1 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 5edf270e7..b45309198 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.5.0" +__version__ = "0.5.1" diff --git a/pyproject.toml b/pyproject.toml index c402f278c..3a6960811 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.5.0" +version = "0.5.1" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 86dbb89fc95f0cb652ae5d6cb923f641a58e295d Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Thu, 18 Dec 2025 11:15:27 +0800 Subject: [PATCH 1071/1073] Resolution bucketing and Trainer implementation refactoring (#11117) --- comfy/sampler_helpers.py | 9 +- comfy_extras/nodes_dataset.py | 96 ++- comfy_extras/nodes_post_processing.py | 11 +- comfy_extras/nodes_train.py | 854 +++++++++++++++++++------- 4 files changed, 738 insertions(+), 232 deletions(-) diff --git a/comfy/sampler_helpers.py b/comfy/sampler_helpers.py index e46971afb..e158e8a84 100644 --- a/comfy/sampler_helpers.py +++ b/comfy/sampler_helpers.py @@ -122,20 +122,21 @@ def estimate_memory(model, noise_shape, conds): minimum_memory_required = model.model.memory_required([noise_shape[0]] + list(noise_shape[1:]), cond_shapes=cond_shapes_min) return memory_required, minimum_memory_required -def prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None): +def prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None, skip_load_model=False): executor = comfy.patcher_extension.WrapperExecutor.new_executor( _prepare_sampling, comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.PREPARE_SAMPLING, model_options, is_model_options=True) ) - return executor.execute(model, noise_shape, conds, model_options=model_options) + return executor.execute(model, noise_shape, conds, model_options=model_options, skip_load_model=skip_load_model) -def _prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None): +def _prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None, skip_load_model=False): real_model: BaseModel = None models, inference_memory = get_additional_models(conds, model.model_dtype()) models += get_additional_models_from_model_options(model_options) models += model.get_nested_additional_models() # TODO: does this require inference_memory update? memory_required, minimum_memory_required = estimate_memory(model, noise_shape, conds) - comfy.model_management.load_models_gpu([model] + models, memory_required=memory_required + inference_memory, minimum_memory_required=minimum_memory_required + inference_memory) + models_list = [model] if not skip_load_model else [] + comfy.model_management.load_models_gpu(models_list + models, memory_required=memory_required + inference_memory, minimum_memory_required=minimum_memory_required + inference_memory) real_model = model.model return real_model, conds, models diff --git a/comfy_extras/nodes_dataset.py b/comfy_extras/nodes_dataset.py index 4789d7d53..513aecf3a 100644 --- a/comfy_extras/nodes_dataset.py +++ b/comfy_extras/nodes_dataset.py @@ -1125,6 +1125,99 @@ class MergeTextListsNode(TextProcessingNode): # ========== Training Dataset Nodes ========== +class ResolutionBucket(io.ComfyNode): + """Bucket latents and conditions by resolution for efficient batch training.""" + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="ResolutionBucket", + display_name="Resolution Bucket", + category="dataset", + is_experimental=True, + is_input_list=True, + inputs=[ + io.Latent.Input( + "latents", + tooltip="List of latent dicts to bucket by resolution.", + ), + io.Conditioning.Input( + "conditioning", + tooltip="List of conditioning lists (must match latents length).", + ), + ], + outputs=[ + io.Latent.Output( + display_name="latents", + is_output_list=True, + tooltip="List of batched latent dicts, one per resolution bucket.", + ), + io.Conditioning.Output( + display_name="conditioning", + is_output_list=True, + tooltip="List of condition lists, one per resolution bucket.", + ), + ], + ) + + @classmethod + def execute(cls, latents, conditioning): + # latents: list[{"samples": tensor}] where tensor is (B, C, H, W), typically B=1 + # conditioning: list[list[cond]] + + # Validate lengths match + if len(latents) != len(conditioning): + raise ValueError( + f"Number of latents ({len(latents)}) does not match number of conditions ({len(conditioning)})." + ) + + # Flatten latents and conditions to individual samples + flat_latents = [] # list of (C, H, W) tensors + flat_conditions = [] # list of condition lists + + for latent_dict, cond in zip(latents, conditioning): + samples = latent_dict["samples"] # (B, C, H, W) + batch_size = samples.shape[0] + + # cond is a list of conditions with length == batch_size + for i in range(batch_size): + flat_latents.append(samples[i]) # (C, H, W) + flat_conditions.append(cond[i]) # single condition + + # Group by resolution (H, W) + buckets = {} # (H, W) -> {"latents": list, "conditions": list} + + for latent, cond in zip(flat_latents, flat_conditions): + # latent shape is (..., H, W) (B, C, H, W) or (B, T, C, H ,W) + h, w = latent.shape[-2], latent.shape[-1] + key = (h, w) + + if key not in buckets: + buckets[key] = {"latents": [], "conditions": []} + + buckets[key]["latents"].append(latent) + buckets[key]["conditions"].append(cond) + + # Convert buckets to output format + output_latents = [] # list[{"samples": tensor}] where tensor is (Bi, ..., H, W) + output_conditions = [] # list[list[cond]] where each inner list has Bi conditions + + for (h, w), bucket_data in buckets.items(): + # Stack latents into batch: list of (..., H, W) -> (Bi, ..., H, W) + stacked_latents = torch.stack(bucket_data["latents"], dim=0) + output_latents.append({"samples": stacked_latents}) + + # Conditions stay as list of condition lists + output_conditions.append(bucket_data["conditions"]) + + logging.info( + f"Resolution bucket ({h}x{w}): {len(bucket_data['latents'])} samples" + ) + + logging.info(f"Created {len(buckets)} resolution buckets from {len(flat_latents)} samples") + return io.NodeOutput(output_latents, output_conditions) + + class MakeTrainingDataset(io.ComfyNode): """Encode images with VAE and texts with CLIP to create a training dataset.""" @@ -1373,7 +1466,7 @@ class LoadTrainingDataset(io.ComfyNode): shard_path = os.path.join(dataset_dir, shard_file) with open(shard_path, "rb") as f: - shard_data = torch.load(f, weights_only=True) + shard_data = torch.load(f) all_latents.extend(shard_data["latents"]) all_conditioning.extend(shard_data["conditioning"]) @@ -1425,6 +1518,7 @@ class DatasetExtension(ComfyExtension): MakeTrainingDataset, SaveTrainingDataset, LoadTrainingDataset, + ResolutionBucket, ] diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index 34c388a5a..ca2cdeb50 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -221,6 +221,7 @@ class ImageScaleToTotalPixels(io.ComfyNode): io.Image.Input("image"), io.Combo.Input("upscale_method", options=cls.upscale_methods), io.Float.Input("megapixels", default=1.0, min=0.01, max=16.0, step=0.01), + io.Int.Input("resolution_steps", default=1, min=1, max=256), ], outputs=[ io.Image.Output(), @@ -228,15 +229,15 @@ class ImageScaleToTotalPixels(io.ComfyNode): ) @classmethod - def execute(cls, image, upscale_method, megapixels) -> io.NodeOutput: + def execute(cls, image, upscale_method, megapixels, resolution_steps) -> io.NodeOutput: samples = image.movedim(-1,1) - total = int(megapixels * 1024 * 1024) + total = megapixels * 1024 * 1024 scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) - width = round(samples.shape[3] * scale_by) - height = round(samples.shape[2] * scale_by) + width = round(samples.shape[3] * scale_by / resolution_steps) * resolution_steps + height = round(samples.shape[2] * scale_by / resolution_steps) * resolution_steps - s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled") + s = comfy.utils.common_upscale(samples, int(width), int(height), upscale_method, "disabled") s = s.movedim(1,-1) return io.NodeOutput(s) diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py index 19b8baaf4..88bc8c8e8 100644 --- a/comfy_extras/nodes_train.py +++ b/comfy_extras/nodes_train.py @@ -10,6 +10,7 @@ from PIL import Image, ImageDraw, ImageFont from typing_extensions import override import comfy.samplers +import comfy.sampler_helpers import comfy.sd import comfy.utils import comfy.model_management @@ -21,6 +22,68 @@ from comfy_api.latest import ComfyExtension, io, ui from comfy.utils import ProgressBar +class TrainGuider(comfy_extras.nodes_custom_sampler.Guider_Basic): + """ + CFGGuider with modifications for training specific logic + """ + def outer_sample( + self, + noise, + latent_image, + sampler, + sigmas, + denoise_mask=None, + callback=None, + disable_pbar=False, + seed=None, + latent_shapes=None, + ): + self.inner_model, self.conds, self.loaded_models = ( + comfy.sampler_helpers.prepare_sampling( + self.model_patcher, + noise.shape, + self.conds, + self.model_options, + skip_load_model=True, # skip load model as we manage it in TrainLoraNode.execute() + ) + ) + device = self.model_patcher.load_device + + if denoise_mask is not None: + denoise_mask = comfy.sampler_helpers.prepare_mask( + denoise_mask, noise.shape, device + ) + + noise = noise.to(device) + latent_image = latent_image.to(device) + sigmas = sigmas.to(device) + comfy.samplers.cast_to_load_options( + self.model_options, device=device, dtype=self.model_patcher.model_dtype() + ) + + try: + self.model_patcher.pre_run() + output = self.inner_sample( + noise, + latent_image, + device, + sampler, + sigmas, + denoise_mask, + callback, + disable_pbar, + seed, + latent_shapes=latent_shapes, + ) + finally: + self.model_patcher.cleanup() + + comfy.sampler_helpers.cleanup_models(self.conds, self.loaded_models) + del self.inner_model + del self.loaded_models + return output + + def make_batch_extra_option_dict(d, indicies, full_size=None): new_dict = {} for k, v in d.items(): @@ -65,6 +128,7 @@ class TrainSampler(comfy.samplers.Sampler): seed=0, training_dtype=torch.bfloat16, real_dataset=None, + bucket_latents=None, ): self.loss_fn = loss_fn self.optimizer = optimizer @@ -75,6 +139,28 @@ class TrainSampler(comfy.samplers.Sampler): self.seed = seed self.training_dtype = training_dtype self.real_dataset: list[torch.Tensor] | None = real_dataset + # Bucket mode data + self.bucket_latents: list[torch.Tensor] | None = ( + bucket_latents # list of (Bi, C, Hi, Wi) + ) + # Precompute bucket offsets and weights for sampling + if bucket_latents is not None: + self._init_bucket_data(bucket_latents) + else: + self.bucket_offsets = None + self.bucket_weights = None + self.num_images = None + + def _init_bucket_data(self, bucket_latents): + """Initialize bucket offsets and weights for sampling.""" + self.bucket_offsets = [0] + bucket_sizes = [] + for lat in bucket_latents: + bucket_sizes.append(lat.shape[0]) + self.bucket_offsets.append(self.bucket_offsets[-1] + lat.shape[0]) + self.num_images = self.bucket_offsets[-1] + # Weights for sampling buckets proportional to their size + self.bucket_weights = torch.tensor(bucket_sizes, dtype=torch.float32) def fwd_bwd( self, @@ -115,6 +201,108 @@ class TrainSampler(comfy.samplers.Sampler): bwd_loss.backward() return loss + def _generate_batch_sigmas(self, model_wrap, batch_size, device): + """Generate random sigma values for a batch.""" + batch_sigmas = [ + model_wrap.inner_model.model_sampling.percent_to_sigma( + torch.rand((1,)).item() + ) + for _ in range(batch_size) + ] + return torch.tensor(batch_sigmas).to(device) + + def _train_step_bucket_mode(self, model_wrap, cond, extra_args, noisegen, latent_image, pbar): + """Execute one training step in bucket mode.""" + # Sample bucket (weighted by size), then sample batch from bucket + bucket_idx = torch.multinomial(self.bucket_weights, 1).item() + bucket_latent = self.bucket_latents[bucket_idx] # (Bi, C, Hi, Wi) + bucket_size = bucket_latent.shape[0] + bucket_offset = self.bucket_offsets[bucket_idx] + + # Sample indices from this bucket (use all if bucket_size < batch_size) + actual_batch_size = min(self.batch_size, bucket_size) + relative_indices = torch.randperm(bucket_size)[:actual_batch_size].tolist() + # Convert to absolute indices for fwd_bwd (cond is flattened, use absolute index) + absolute_indices = [bucket_offset + idx for idx in relative_indices] + + batch_latent = bucket_latent[relative_indices].to(latent_image) # (actual_batch_size, C, H, W) + batch_noise = noisegen.generate_noise({"samples": batch_latent}).to( + batch_latent.device + ) + batch_sigmas = self._generate_batch_sigmas(model_wrap, actual_batch_size, batch_latent.device) + + loss = self.fwd_bwd( + model_wrap, + batch_sigmas, + batch_noise, + batch_latent, + cond, # Use flattened cond with absolute indices + absolute_indices, + extra_args, + self.num_images, + bwd=True, + ) + if self.loss_callback: + self.loss_callback(loss.item()) + pbar.set_postfix({"loss": f"{loss.item():.4f}", "bucket": bucket_idx}) + + def _train_step_standard_mode(self, model_wrap, cond, extra_args, noisegen, latent_image, dataset_size, pbar): + """Execute one training step in standard (non-bucket, non-multi-res) mode.""" + indicies = torch.randperm(dataset_size)[: self.batch_size].tolist() + batch_latent = torch.stack([latent_image[i] for i in indicies]) + batch_noise = noisegen.generate_noise({"samples": batch_latent}).to( + batch_latent.device + ) + batch_sigmas = self._generate_batch_sigmas(model_wrap, min(self.batch_size, dataset_size), batch_latent.device) + + loss = self.fwd_bwd( + model_wrap, + batch_sigmas, + batch_noise, + batch_latent, + cond, + indicies, + extra_args, + dataset_size, + bwd=True, + ) + if self.loss_callback: + self.loss_callback(loss.item()) + pbar.set_postfix({"loss": f"{loss.item():.4f}"}) + + def _train_step_multires_mode(self, model_wrap, cond, extra_args, noisegen, latent_image, dataset_size, pbar): + """Execute one training step in multi-resolution mode (real_dataset is set).""" + indicies = torch.randperm(dataset_size)[: self.batch_size].tolist() + total_loss = 0 + for index in indicies: + single_latent = self.real_dataset[index].to(latent_image) + batch_noise = noisegen.generate_noise( + {"samples": single_latent} + ).to(single_latent.device) + batch_sigmas = ( + model_wrap.inner_model.model_sampling.percent_to_sigma( + torch.rand((1,)).item() + ) + ) + batch_sigmas = torch.tensor([batch_sigmas]).to(single_latent.device) + loss = self.fwd_bwd( + model_wrap, + batch_sigmas, + batch_noise, + single_latent, + cond, + [index], + extra_args, + dataset_size, + bwd=False, + ) + total_loss += loss + total_loss = total_loss / self.grad_acc / len(indicies) + total_loss.backward() + if self.loss_callback: + self.loss_callback(total_loss.item()) + pbar.set_postfix({"loss": f"{total_loss.item():.4f}"}) + def sample( self, model_wrap, @@ -142,70 +330,18 @@ class TrainSampler(comfy.samplers.Sampler): noisegen = comfy_extras.nodes_custom_sampler.Noise_RandomNoise( self.seed + i * 1000 ) - indicies = torch.randperm(dataset_size)[: self.batch_size].tolist() - if self.real_dataset is None: - batch_latent = torch.stack([latent_image[i] for i in indicies]) - batch_noise = noisegen.generate_noise({"samples": batch_latent}).to( - batch_latent.device - ) - batch_sigmas = [ - model_wrap.inner_model.model_sampling.percent_to_sigma( - torch.rand((1,)).item() - ) - for _ in range(min(self.batch_size, dataset_size)) - ] - batch_sigmas = torch.tensor(batch_sigmas).to(batch_latent.device) - - loss = self.fwd_bwd( - model_wrap, - batch_sigmas, - batch_noise, - batch_latent, - cond, - indicies, - extra_args, - dataset_size, - bwd=True, - ) - if self.loss_callback: - self.loss_callback(loss.item()) - pbar.set_postfix({"loss": f"{loss.item():.4f}"}) + if self.bucket_latents is not None: + self._train_step_bucket_mode(model_wrap, cond, extra_args, noisegen, latent_image, pbar) + elif self.real_dataset is None: + self._train_step_standard_mode(model_wrap, cond, extra_args, noisegen, latent_image, dataset_size, pbar) else: - total_loss = 0 - for index in indicies: - single_latent = self.real_dataset[index].to(latent_image) - batch_noise = noisegen.generate_noise( - {"samples": single_latent} - ).to(single_latent.device) - batch_sigmas = ( - model_wrap.inner_model.model_sampling.percent_to_sigma( - torch.rand((1,)).item() - ) - ) - batch_sigmas = torch.tensor([batch_sigmas]).to(single_latent.device) - loss = self.fwd_bwd( - model_wrap, - batch_sigmas, - batch_noise, - single_latent, - cond, - [index], - extra_args, - dataset_size, - bwd=False, - ) - total_loss += loss - total_loss = total_loss / self.grad_acc / len(indicies) - total_loss.backward() - if self.loss_callback: - self.loss_callback(total_loss.item()) - pbar.set_postfix({"loss": f"{total_loss.item():.4f}"}) + self._train_step_multires_mode(model_wrap, cond, extra_args, noisegen, latent_image, dataset_size, pbar) if (i + 1) % self.grad_acc == 0: self.optimizer.step() self.optimizer.zero_grad() - ui_pbar.update(1) + ui_pbar.update(1) torch.cuda.empty_cache() return torch.zeros_like(latent_image) @@ -283,6 +419,364 @@ def unpatch(m): del m.org_forward +def _process_latents_bucket_mode(latents): + """Process latents for bucket mode training. + + Args: + latents: list[{"samples": tensor}] where each tensor is (Bi, C, Hi, Wi) + + Returns: + list of latent tensors + """ + bucket_latents = [] + for latent_dict in latents: + bucket_latents.append(latent_dict["samples"]) # (Bi, C, Hi, Wi) + return bucket_latents + + +def _process_latents_standard_mode(latents): + """Process latents for standard (non-bucket) mode training. + + Args: + latents: list of latent dicts or single latent dict + + Returns: + Processed latents (tensor or list of tensors) + """ + if len(latents) == 1: + return latents[0]["samples"] # Single latent dict + + latent_list = [] + for latent in latents: + latent = latent["samples"] + bs = latent.shape[0] + if bs != 1: + for sub_latent in latent: + latent_list.append(sub_latent[None]) + else: + latent_list.append(latent) + return latent_list + + +def _process_conditioning(positive): + """Process conditioning - either single list or list of lists. + + Args: + positive: list of conditioning + + Returns: + Flattened conditioning list + """ + if len(positive) == 1: + return positive[0] # Single conditioning list + + # Multiple conditioning lists - flatten + flat_positive = [] + for cond in positive: + if isinstance(cond, list): + flat_positive.extend(cond) + else: + flat_positive.append(cond) + return flat_positive + + +def _prepare_latents_and_count(latents, dtype, bucket_mode): + """Convert latents to dtype and compute image counts. + + Args: + latents: Latents (tensor, list of tensors, or bucket list) + dtype: Target dtype + bucket_mode: Whether bucket mode is enabled + + Returns: + tuple: (processed_latents, num_images, multi_res) + """ + if bucket_mode: + # In bucket mode, latents is list of tensors (Bi, C, Hi, Wi) + latents = [t.to(dtype) for t in latents] + num_buckets = len(latents) + num_images = sum(t.shape[0] for t in latents) + multi_res = False # Not using multi_res path in bucket mode + + logging.info(f"Bucket mode: {num_buckets} buckets, {num_images} total samples") + for i, lat in enumerate(latents): + logging.info(f" Bucket {i}: shape {lat.shape}") + return latents, num_images, multi_res + + # Non-bucket mode + if isinstance(latents, list): + all_shapes = set() + latents = [t.to(dtype) for t in latents] + for latent in latents: + all_shapes.add(latent.shape) + logging.info(f"Latent shapes: {all_shapes}") + if len(all_shapes) > 1: + multi_res = True + else: + multi_res = False + latents = torch.cat(latents, dim=0) + num_images = len(latents) + elif isinstance(latents, torch.Tensor): + latents = latents.to(dtype) + num_images = latents.shape[0] + multi_res = False + else: + logging.error(f"Invalid latents type: {type(latents)}") + num_images = 0 + multi_res = False + + return latents, num_images, multi_res + + +def _validate_and_expand_conditioning(positive, num_images, bucket_mode): + """Validate conditioning count matches image count, expand if needed. + + Args: + positive: Conditioning list + num_images: Number of images + bucket_mode: Whether bucket mode is enabled + + Returns: + Validated/expanded conditioning list + + Raises: + ValueError: If conditioning count doesn't match image count + """ + if bucket_mode: + return positive # Skip validation in bucket mode + + logging.info(f"Total Images: {num_images}, Total Captions: {len(positive)}") + if len(positive) == 1 and num_images > 1: + return positive * num_images + elif len(positive) != num_images: + raise ValueError( + f"Number of positive conditions ({len(positive)}) does not match number of images ({num_images})." + ) + return positive + + +def _load_existing_lora(existing_lora): + """Load existing LoRA weights if provided. + + Args: + existing_lora: LoRA filename or "[None]" + + Returns: + tuple: (existing_weights dict, existing_steps int) + """ + if existing_lora == "[None]": + return {}, 0 + + lora_path = folder_paths.get_full_path_or_raise("loras", existing_lora) + # Extract steps from filename like "trained_lora_10_steps_20250225_203716" + existing_steps = int(existing_lora.split("_steps_")[0].split("_")[-1]) + existing_weights = {} + if lora_path: + existing_weights = comfy.utils.load_torch_file(lora_path) + return existing_weights, existing_steps + + +def _create_weight_adapter( + module, module_name, existing_weights, algorithm, lora_dtype, rank +): + """Create a weight adapter for a module with weight. + + Args: + module: The module to create adapter for + module_name: Name of the module + existing_weights: Dict of existing LoRA weights + algorithm: Algorithm name for new adapters + lora_dtype: dtype for LoRA weights + rank: Rank for new LoRA adapters + + Returns: + tuple: (train_adapter, lora_params dict) + """ + key = f"{module_name}.weight" + shape = module.weight.shape + lora_params = {} + + if len(shape) >= 2: + alpha = float(existing_weights.get(f"{key}.alpha", 1.0)) + dora_scale = existing_weights.get(f"{key}.dora_scale", None) + + # Try to load existing adapter + existing_adapter = None + for adapter_cls in adapters: + existing_adapter = adapter_cls.load( + module_name, existing_weights, alpha, dora_scale + ) + if existing_adapter is not None: + break + + if existing_adapter is None: + adapter_cls = adapter_maps[algorithm] + + if existing_adapter is not None: + train_adapter = existing_adapter.to_train().to(lora_dtype) + else: + # Use LoRA with alpha=1.0 by default + train_adapter = adapter_cls.create_train( + module.weight, rank=rank, alpha=1.0 + ).to(lora_dtype) + + for name, parameter in train_adapter.named_parameters(): + lora_params[f"{module_name}.{name}"] = parameter + + return train_adapter.train().requires_grad_(True), lora_params + else: + # 1D weight - use BiasDiff + diff = torch.nn.Parameter( + torch.zeros(module.weight.shape, dtype=lora_dtype, requires_grad=True) + ) + diff_module = BiasDiff(diff).train().requires_grad_(True) + lora_params[f"{module_name}.diff"] = diff + return diff_module, lora_params + + +def _create_bias_adapter(module, module_name, lora_dtype): + """Create a bias adapter for a module with bias. + + Args: + module: The module with bias + module_name: Name of the module + lora_dtype: dtype for LoRA weights + + Returns: + tuple: (bias_module, lora_params dict) + """ + bias = torch.nn.Parameter( + torch.zeros(module.bias.shape, dtype=lora_dtype, requires_grad=True) + ) + bias_module = BiasDiff(bias).train().requires_grad_(True) + lora_params = {f"{module_name}.diff_b": bias} + return bias_module, lora_params + + +def _setup_lora_adapters(mp, existing_weights, algorithm, lora_dtype, rank): + """Setup all LoRA adapters on the model. + + Args: + mp: Model patcher + existing_weights: Dict of existing LoRA weights + algorithm: Algorithm name for new adapters + lora_dtype: dtype for LoRA weights + rank: Rank for new LoRA adapters + + Returns: + tuple: (lora_sd dict, all_weight_adapters list) + """ + lora_sd = {} + all_weight_adapters = [] + + for n, m in mp.model.named_modules(): + if hasattr(m, "weight_function"): + if m.weight is not None: + adapter, params = _create_weight_adapter( + m, n, existing_weights, algorithm, lora_dtype, rank + ) + lora_sd.update(params) + key = f"{n}.weight" + mp.add_weight_wrapper(key, adapter) + all_weight_adapters.append(adapter) + + if hasattr(m, "bias") and m.bias is not None: + bias_adapter, bias_params = _create_bias_adapter(m, n, lora_dtype) + lora_sd.update(bias_params) + key = f"{n}.bias" + mp.add_weight_wrapper(key, bias_adapter) + all_weight_adapters.append(bias_adapter) + + return lora_sd, all_weight_adapters + + +def _create_optimizer(optimizer_name, parameters, learning_rate): + """Create optimizer based on name. + + Args: + optimizer_name: Name of optimizer ("Adam", "AdamW", "SGD", "RMSprop") + parameters: Parameters to optimize + learning_rate: Learning rate + + Returns: + Optimizer instance + """ + if optimizer_name == "Adam": + return torch.optim.Adam(parameters, lr=learning_rate) + elif optimizer_name == "AdamW": + return torch.optim.AdamW(parameters, lr=learning_rate) + elif optimizer_name == "SGD": + return torch.optim.SGD(parameters, lr=learning_rate) + elif optimizer_name == "RMSprop": + return torch.optim.RMSprop(parameters, lr=learning_rate) + + +def _create_loss_function(loss_function_name): + """Create loss function based on name. + + Args: + loss_function_name: Name of loss function ("MSE", "L1", "Huber", "SmoothL1") + + Returns: + Loss function instance + """ + if loss_function_name == "MSE": + return torch.nn.MSELoss() + elif loss_function_name == "L1": + return torch.nn.L1Loss() + elif loss_function_name == "Huber": + return torch.nn.HuberLoss() + elif loss_function_name == "SmoothL1": + return torch.nn.SmoothL1Loss() + + +def _run_training_loop( + guider, train_sampler, latents, num_images, seed, bucket_mode, multi_res +): + """Execute the training loop. + + Args: + guider: The guider object + train_sampler: The training sampler + latents: Latent tensors + num_images: Number of images + seed: Random seed + bucket_mode: Whether bucket mode is enabled + multi_res: Whether multi-resolution mode is enabled + """ + sigmas = torch.tensor(range(num_images)) + noise = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(seed) + + if bucket_mode: + # Use first bucket's first latent as dummy for guider + dummy_latent = latents[0][:1].repeat(num_images, 1, 1, 1) + guider.sample( + noise.generate_noise({"samples": dummy_latent}), + dummy_latent, + train_sampler, + sigmas, + seed=noise.seed, + ) + elif multi_res: + # use first latent as dummy latent if multi_res + latents = latents[0].repeat(num_images, 1, 1, 1) + guider.sample( + noise.generate_noise({"samples": latents}), + latents, + train_sampler, + sigmas, + seed=noise.seed, + ) + else: + guider.sample( + noise.generate_noise({"samples": latents}), + latents, + train_sampler, + sigmas, + seed=noise.seed, + ) + + class TrainLoraNode(io.ComfyNode): @classmethod def define_schema(cls): @@ -385,6 +879,11 @@ class TrainLoraNode(io.ComfyNode): default="[None]", tooltip="The existing LoRA to append to. Set to None for new LoRA.", ), + io.Boolean.Input( + "bucket_mode", + default=False, + tooltip="Enable resolution bucket mode. When enabled, expects pre-bucketed latents from ResolutionBucket node.", + ), ], outputs=[ io.Model.Output( @@ -419,6 +918,7 @@ class TrainLoraNode(io.ComfyNode): algorithm, gradient_checkpointing, existing_lora, + bucket_mode, ): # Extract scalars from lists (due to is_input_list=True) model = model[0] @@ -427,215 +927,125 @@ class TrainLoraNode(io.ComfyNode): grad_accumulation_steps = grad_accumulation_steps[0] learning_rate = learning_rate[0] rank = rank[0] - optimizer = optimizer[0] - loss_function = loss_function[0] + optimizer_name = optimizer[0] + loss_function_name = loss_function[0] seed = seed[0] training_dtype = training_dtype[0] lora_dtype = lora_dtype[0] algorithm = algorithm[0] gradient_checkpointing = gradient_checkpointing[0] existing_lora = existing_lora[0] + bucket_mode = bucket_mode[0] - # Handle latents - either single dict or list of dicts - if len(latents) == 1: - latents = latents[0]["samples"] # Single latent dict + # Process latents based on mode + if bucket_mode: + latents = _process_latents_bucket_mode(latents) else: - latent_list = [] - for latent in latents: - latent = latent["samples"] - bs = latent.shape[0] - if bs != 1: - for sub_latent in latent: - latent_list.append(sub_latent[None]) - else: - latent_list.append(latent) - latents = latent_list + latents = _process_latents_standard_mode(latents) - # Handle conditioning - either single list or list of lists - if len(positive) == 1: - positive = positive[0] # Single conditioning list - else: - # Multiple conditioning lists - flatten - flat_positive = [] - for cond in positive: - if isinstance(cond, list): - flat_positive.extend(cond) - else: - flat_positive.append(cond) - positive = flat_positive + # Process conditioning + positive = _process_conditioning(positive) + # Setup model and dtype mp = model.clone() dtype = node_helpers.string_to_torch_dtype(training_dtype) lora_dtype = node_helpers.string_to_torch_dtype(lora_dtype) mp.set_model_compute_dtype(dtype) - # latents here can be list of different size latent or one large batch - if isinstance(latents, list): - all_shapes = set() - latents = [t.to(dtype) for t in latents] - for latent in latents: - all_shapes.add(latent.shape) - logging.info(f"Latent shapes: {all_shapes}") - if len(all_shapes) > 1: - multi_res = True - else: - multi_res = False - latents = torch.cat(latents, dim=0) - num_images = len(latents) - elif isinstance(latents, torch.Tensor): - latents = latents.to(dtype) - num_images = latents.shape[0] - else: - logging.error(f"Invalid latents type: {type(latents)}") + # Prepare latents and compute counts + latents, num_images, multi_res = _prepare_latents_and_count( + latents, dtype, bucket_mode + ) - logging.info(f"Total Images: {num_images}, Total Captions: {len(positive)}") - if len(positive) == 1 and num_images > 1: - positive = positive * num_images - elif len(positive) != num_images: - raise ValueError( - f"Number of positive conditions ({len(positive)}) does not match number of images ({num_images})." - ) + # Validate and expand conditioning + positive = _validate_and_expand_conditioning(positive, num_images, bucket_mode) with torch.inference_mode(False): - lora_sd = {} - generator = torch.Generator() - generator.manual_seed(seed) + # Setup models for training + mp.model.requires_grad_(False) # Load existing LoRA weights if provided - existing_weights = {} - existing_steps = 0 - if existing_lora != "[None]": - lora_path = folder_paths.get_full_path_or_raise("loras", existing_lora) - # Extract steps from filename like "trained_lora_10_steps_20250225_203716" - existing_steps = int(existing_lora.split("_steps_")[0].split("_")[-1]) - if lora_path: - existing_weights = comfy.utils.load_torch_file(lora_path) + existing_weights, existing_steps = _load_existing_lora(existing_lora) - all_weight_adapters = [] - for n, m in mp.model.named_modules(): - if hasattr(m, "weight_function"): - if m.weight is not None: - key = "{}.weight".format(n) - shape = m.weight.shape - if len(shape) >= 2: - alpha = float(existing_weights.get(f"{key}.alpha", 1.0)) - dora_scale = existing_weights.get(f"{key}.dora_scale", None) - for adapter_cls in adapters: - existing_adapter = adapter_cls.load( - n, existing_weights, alpha, dora_scale - ) - if existing_adapter is not None: - break - else: - existing_adapter = None - adapter_cls = adapter_maps[algorithm] + # Setup LoRA adapters + lora_sd, all_weight_adapters = _setup_lora_adapters( + mp, existing_weights, algorithm, lora_dtype, rank + ) - if existing_adapter is not None: - train_adapter = existing_adapter.to_train().to( - lora_dtype - ) - else: - # Use LoRA with alpha=1.0 by default - train_adapter = adapter_cls.create_train( - m.weight, rank=rank, alpha=1.0 - ).to(lora_dtype) - for name, parameter in train_adapter.named_parameters(): - lora_sd[f"{n}.{name}"] = parameter + # Create optimizer and loss function + optimizer = _create_optimizer( + optimizer_name, lora_sd.values(), learning_rate + ) + criterion = _create_loss_function(loss_function_name) - mp.add_weight_wrapper(key, train_adapter) - all_weight_adapters.append(train_adapter) - else: - diff = torch.nn.Parameter( - torch.zeros( - m.weight.shape, dtype=lora_dtype, requires_grad=True - ) - ) - diff_module = BiasDiff(diff) - mp.add_weight_wrapper(key, BiasDiff(diff)) - all_weight_adapters.append(diff_module) - lora_sd["{}.diff".format(n)] = diff - if hasattr(m, "bias") and m.bias is not None: - key = "{}.bias".format(n) - bias = torch.nn.Parameter( - torch.zeros( - m.bias.shape, dtype=lora_dtype, requires_grad=True - ) - ) - bias_module = BiasDiff(bias) - lora_sd["{}.diff_b".format(n)] = bias - mp.add_weight_wrapper(key, BiasDiff(bias)) - all_weight_adapters.append(bias_module) - - if optimizer == "Adam": - optimizer = torch.optim.Adam(lora_sd.values(), lr=learning_rate) - elif optimizer == "AdamW": - optimizer = torch.optim.AdamW(lora_sd.values(), lr=learning_rate) - elif optimizer == "SGD": - optimizer = torch.optim.SGD(lora_sd.values(), lr=learning_rate) - elif optimizer == "RMSprop": - optimizer = torch.optim.RMSprop(lora_sd.values(), lr=learning_rate) - - # Setup loss function based on selection - if loss_function == "MSE": - criterion = torch.nn.MSELoss() - elif loss_function == "L1": - criterion = torch.nn.L1Loss() - elif loss_function == "Huber": - criterion = torch.nn.HuberLoss() - elif loss_function == "SmoothL1": - criterion = torch.nn.SmoothL1Loss() - - # setup models + # Setup gradient checkpointing if gradient_checkpointing: for m in find_all_highest_child_module_with_forward( mp.model.diffusion_model ): patch(m) - mp.model.requires_grad_(False) + + torch.cuda.empty_cache() + # With force_full_load=False we should be able to have offloading + # But for offloading in training we need custom AutoGrad hooks for fwd/bwd comfy.model_management.load_models_gpu( [mp], memory_required=1e20, force_full_load=True ) + torch.cuda.empty_cache() - # Setup sampler and guider like in test script + # Setup loss tracking loss_map = {"loss": []} def loss_callback(loss): loss_map["loss"].append(loss) - train_sampler = TrainSampler( - criterion, - optimizer, - loss_callback=loss_callback, - batch_size=batch_size, - grad_acc=grad_accumulation_steps, - total_steps=steps * grad_accumulation_steps, - seed=seed, - training_dtype=dtype, - real_dataset=latents if multi_res else None, - ) - guider = comfy_extras.nodes_custom_sampler.Guider_Basic(mp) - guider.set_conds(positive) # Set conditioning from input + # Create sampler + if bucket_mode: + train_sampler = TrainSampler( + criterion, + optimizer, + loss_callback=loss_callback, + batch_size=batch_size, + grad_acc=grad_accumulation_steps, + total_steps=steps * grad_accumulation_steps, + seed=seed, + training_dtype=dtype, + bucket_latents=latents, + ) + else: + train_sampler = TrainSampler( + criterion, + optimizer, + loss_callback=loss_callback, + batch_size=batch_size, + grad_acc=grad_accumulation_steps, + total_steps=steps * grad_accumulation_steps, + seed=seed, + training_dtype=dtype, + real_dataset=latents if multi_res else None, + ) - # Training loop + # Setup guider + guider = TrainGuider(mp) + guider.set_conds(positive) + + # Run training loop try: - # Generate dummy sigmas and noise - sigmas = torch.tensor(range(num_images)) - noise = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(seed) - if multi_res: - # use first latent as dummy latent if multi_res - latents = latents[0].repeat((num_images,) + ((1,) * (latents[0].ndim - 1))) - guider.sample( - noise.generate_noise({"samples": latents}), - latents, + _run_training_loop( + guider, train_sampler, - sigmas, - seed=noise.seed, + latents, + num_images, + seed, + bucket_mode, + multi_res, ) finally: for m in mp.model.modules(): unpatch(m) del train_sampler, optimizer + # Finalize adapters for adapter in all_weight_adapters: adapter.requires_grad_(False) @@ -645,7 +1055,7 @@ class TrainLoraNode(io.ComfyNode): return io.NodeOutput(mp, lora_sd, loss_map, steps + existing_steps) -class LoraModelLoader(io.ComfyNode): +class LoraModelLoader(io.ComfyNode):# @classmethod def define_schema(cls): return io.Schema( From bf7dc63bd6acdedca67598856e05080d90eeec90 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 17 Dec 2025 20:29:32 -0800 Subject: [PATCH 1072/1073] skip_load_model -> force_full_load (#11390) This should be a bit more clear and less prone to potential breakage if the logic of the load models changes a bit. --- comfy/sampler_helpers.py | 9 ++++----- comfy_extras/nodes_train.py | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/comfy/sampler_helpers.py b/comfy/sampler_helpers.py index e158e8a84..9134e6d71 100644 --- a/comfy/sampler_helpers.py +++ b/comfy/sampler_helpers.py @@ -122,21 +122,20 @@ def estimate_memory(model, noise_shape, conds): minimum_memory_required = model.model.memory_required([noise_shape[0]] + list(noise_shape[1:]), cond_shapes=cond_shapes_min) return memory_required, minimum_memory_required -def prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None, skip_load_model=False): +def prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None, force_full_load=False): executor = comfy.patcher_extension.WrapperExecutor.new_executor( _prepare_sampling, comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.PREPARE_SAMPLING, model_options, is_model_options=True) ) - return executor.execute(model, noise_shape, conds, model_options=model_options, skip_load_model=skip_load_model) + return executor.execute(model, noise_shape, conds, model_options=model_options, force_full_load=force_full_load) -def _prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None, skip_load_model=False): +def _prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None, force_full_load=False): real_model: BaseModel = None models, inference_memory = get_additional_models(conds, model.model_dtype()) models += get_additional_models_from_model_options(model_options) models += model.get_nested_additional_models() # TODO: does this require inference_memory update? memory_required, minimum_memory_required = estimate_memory(model, noise_shape, conds) - models_list = [model] if not skip_load_model else [] - comfy.model_management.load_models_gpu(models_list + models, memory_required=memory_required + inference_memory, minimum_memory_required=minimum_memory_required + inference_memory) + comfy.model_management.load_models_gpu([model] + models, memory_required=memory_required + inference_memory, minimum_memory_required=minimum_memory_required + inference_memory, force_full_load=force_full_load) real_model = model.model return real_model, conds, models diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py index 88bc8c8e8..364804205 100644 --- a/comfy_extras/nodes_train.py +++ b/comfy_extras/nodes_train.py @@ -44,7 +44,7 @@ class TrainGuider(comfy_extras.nodes_custom_sampler.Guider_Basic): noise.shape, self.conds, self.model_options, - skip_load_model=True, # skip load model as we manage it in TrainLoraNode.execute() + force_full_load=True, # mirror behavior in TrainLoraNode.execute() to keep model loaded ) ) device = self.model_patcher.load_device From 1ca89b810e921efce95fb4d254a8c6c93180450b Mon Sep 17 00:00:00 2001 From: ric-yu Date: Wed, 17 Dec 2025 21:44:31 -0800 Subject: [PATCH 1073/1073] Add unified jobs API with /api/jobs endpoints (#11054) * feat: create a /jobs api to return queue and history jobs * update unused vars * include priority * create jobs helper file * fix ruff * update how we set error message * include execution error in both responses * rename error -> failed, fix output shape * re-use queue and history functions * set workflow id * allow srot by exec duration * fix tests * send priority and remove error msg * use ws messages to get start and end times * revert main.py fully * refactor: move all /jobs business logic to jobs.py * fix failing test * remove some tests * fix non dict nodes * address comments * filter by workflow id and remove null fields * add clearer typing - remove get("..") or .. * refactor query params to top get_job(s) doc, add remove_sensitive_from_queue * add brief comment explaining why we skip animated * comment that format field is for frontend backward compatibility * fix whitespace --------- Co-authored-by: Jedrzej Kosinski Co-authored-by: guill --- comfy_execution/jobs.py | 291 ++++++++++++++++++++++++ server.py | 135 ++++++++++- tests/execution/test_execution.py | 134 +++++++++++ tests/execution/test_jobs.py | 361 ++++++++++++++++++++++++++++++ 4 files changed, 918 insertions(+), 3 deletions(-) create mode 100644 comfy_execution/jobs.py create mode 100644 tests/execution/test_jobs.py diff --git a/comfy_execution/jobs.py b/comfy_execution/jobs.py new file mode 100644 index 000000000..59fb49357 --- /dev/null +++ b/comfy_execution/jobs.py @@ -0,0 +1,291 @@ +""" +Job utilities for the /api/jobs endpoint. +Provides normalization and helper functions for job status tracking. +""" + +from typing import Optional + +from comfy_api.internal import prune_dict + + +class JobStatus: + """Job status constants.""" + PENDING = 'pending' + IN_PROGRESS = 'in_progress' + COMPLETED = 'completed' + FAILED = 'failed' + + ALL = [PENDING, IN_PROGRESS, COMPLETED, FAILED] + + +# Media types that can be previewed in the frontend +PREVIEWABLE_MEDIA_TYPES = frozenset({'images', 'video', 'audio'}) + +# 3D file extensions for preview fallback (no dedicated media_type exists) +THREE_D_EXTENSIONS = frozenset({'.obj', '.fbx', '.gltf', '.glb'}) + + +def _extract_job_metadata(extra_data: dict) -> tuple[Optional[int], Optional[str]]: + """Extract create_time and workflow_id from extra_data. + + Returns: + tuple: (create_time, workflow_id) + """ + create_time = extra_data.get('create_time') + extra_pnginfo = extra_data.get('extra_pnginfo', {}) + workflow_id = extra_pnginfo.get('workflow', {}).get('id') + return create_time, workflow_id + + +def is_previewable(media_type: str, item: dict) -> bool: + """ + Check if an output item is previewable. + Matches frontend logic in ComfyUI_frontend/src/stores/queueStore.ts + Maintains backwards compatibility with existing logic. + + Priority: + 1. media_type is 'images', 'video', or 'audio' + 2. format field starts with 'video/' or 'audio/' + 3. filename has a 3D extension (.obj, .fbx, .gltf, .glb) + """ + if media_type in PREVIEWABLE_MEDIA_TYPES: + return True + + # Check format field (MIME type). + # Maintains backwards compatibility with how custom node outputs are handled in the frontend. + fmt = item.get('format', '') + if fmt and (fmt.startswith('video/') or fmt.startswith('audio/')): + return True + + # Check for 3D files by extension + filename = item.get('filename', '').lower() + if any(filename.endswith(ext) for ext in THREE_D_EXTENSIONS): + return True + + return False + + +def normalize_queue_item(item: tuple, status: str) -> dict: + """Convert queue item tuple to unified job dict. + + Expects item with sensitive data already removed (5 elements). + """ + priority, prompt_id, _, extra_data, _ = item + create_time, workflow_id = _extract_job_metadata(extra_data) + + return prune_dict({ + 'id': prompt_id, + 'status': status, + 'priority': priority, + 'create_time': create_time, + 'outputs_count': 0, + 'workflow_id': workflow_id, + }) + + +def normalize_history_item(prompt_id: str, history_item: dict, include_outputs: bool = False) -> dict: + """Convert history item dict to unified job dict. + + History items have sensitive data already removed (prompt tuple has 5 elements). + """ + prompt_tuple = history_item['prompt'] + priority, _, prompt, extra_data, _ = prompt_tuple + create_time, workflow_id = _extract_job_metadata(extra_data) + + status_info = history_item.get('status', {}) + status_str = status_info.get('status_str') if status_info else None + if status_str == 'success': + status = JobStatus.COMPLETED + elif status_str == 'error': + status = JobStatus.FAILED + else: + status = JobStatus.COMPLETED + + outputs = history_item.get('outputs', {}) + outputs_count, preview_output = get_outputs_summary(outputs) + + execution_error = None + execution_start_time = None + execution_end_time = None + if status_info: + messages = status_info.get('messages', []) + for entry in messages: + if isinstance(entry, (list, tuple)) and len(entry) >= 2: + event_name, event_data = entry[0], entry[1] + if isinstance(event_data, dict): + if event_name == 'execution_start': + execution_start_time = event_data.get('timestamp') + elif event_name in ('execution_success', 'execution_error', 'execution_interrupted'): + execution_end_time = event_data.get('timestamp') + if event_name == 'execution_error': + execution_error = event_data + + job = prune_dict({ + 'id': prompt_id, + 'status': status, + 'priority': priority, + 'create_time': create_time, + 'execution_start_time': execution_start_time, + 'execution_end_time': execution_end_time, + 'execution_error': execution_error, + 'outputs_count': outputs_count, + 'preview_output': preview_output, + 'workflow_id': workflow_id, + }) + + if include_outputs: + job['outputs'] = outputs + job['execution_status'] = status_info + job['workflow'] = { + 'prompt': prompt, + 'extra_data': extra_data, + } + + return job + + +def get_outputs_summary(outputs: dict) -> tuple[int, Optional[dict]]: + """ + Count outputs and find preview in a single pass. + Returns (outputs_count, preview_output). + + Preview priority (matching frontend): + 1. type="output" with previewable media + 2. Any previewable media + """ + count = 0 + preview_output = None + fallback_preview = None + + for node_id, node_outputs in outputs.items(): + if not isinstance(node_outputs, dict): + continue + for media_type, items in node_outputs.items(): + # 'animated' is a boolean flag, not actual output items + if media_type == 'animated' or not isinstance(items, list): + continue + + for item in items: + if not isinstance(item, dict): + continue + count += 1 + + if preview_output is None and is_previewable(media_type, item): + enriched = { + **item, + 'nodeId': node_id, + 'mediaType': media_type + } + if item.get('type') == 'output': + preview_output = enriched + elif fallback_preview is None: + fallback_preview = enriched + + return count, preview_output or fallback_preview + + +def apply_sorting(jobs: list[dict], sort_by: str, sort_order: str) -> list[dict]: + """Sort jobs list by specified field and order.""" + reverse = (sort_order == 'desc') + + if sort_by == 'execution_duration': + def get_sort_key(job): + start = job.get('execution_start_time', 0) + end = job.get('execution_end_time', 0) + return end - start if end and start else 0 + else: + def get_sort_key(job): + return job.get('create_time', 0) + + return sorted(jobs, key=get_sort_key, reverse=reverse) + + +def get_job(prompt_id: str, running: list, queued: list, history: dict) -> Optional[dict]: + """ + Get a single job by prompt_id from history or queue. + + Args: + prompt_id: The prompt ID to look up + running: List of currently running queue items + queued: List of pending queue items + history: Dict of history items keyed by prompt_id + + Returns: + Job dict with full details, or None if not found + """ + if prompt_id in history: + return normalize_history_item(prompt_id, history[prompt_id], include_outputs=True) + + for item in running: + if item[1] == prompt_id: + return normalize_queue_item(item, JobStatus.IN_PROGRESS) + + for item in queued: + if item[1] == prompt_id: + return normalize_queue_item(item, JobStatus.PENDING) + + return None + + +def get_all_jobs( + running: list, + queued: list, + history: dict, + status_filter: Optional[list[str]] = None, + workflow_id: Optional[str] = None, + sort_by: str = "created_at", + sort_order: str = "desc", + limit: Optional[int] = None, + offset: int = 0 +) -> tuple[list[dict], int]: + """ + Get all jobs (running, pending, completed) with filtering and sorting. + + Args: + running: List of currently running queue items + queued: List of pending queue items + history: Dict of history items keyed by prompt_id + status_filter: List of statuses to include (from JobStatus.ALL) + workflow_id: Filter by workflow ID + sort_by: Field to sort by ('created_at', 'execution_duration') + sort_order: 'asc' or 'desc' + limit: Maximum number of items to return + offset: Number of items to skip + + Returns: + tuple: (jobs_list, total_count) + """ + jobs = [] + + if status_filter is None: + status_filter = JobStatus.ALL + + if JobStatus.IN_PROGRESS in status_filter: + for item in running: + jobs.append(normalize_queue_item(item, JobStatus.IN_PROGRESS)) + + if JobStatus.PENDING in status_filter: + for item in queued: + jobs.append(normalize_queue_item(item, JobStatus.PENDING)) + + include_completed = JobStatus.COMPLETED in status_filter + include_failed = JobStatus.FAILED in status_filter + if include_completed or include_failed: + for prompt_id, history_item in history.items(): + is_failed = history_item.get('status', {}).get('status_str') == 'error' + if (is_failed and include_failed) or (not is_failed and include_completed): + jobs.append(normalize_history_item(prompt_id, history_item)) + + if workflow_id: + jobs = [j for j in jobs if j.get('workflow_id') == workflow_id] + + jobs = apply_sorting(jobs, sort_by, sort_order) + + total_count = len(jobs) + + if offset > 0: + jobs = jobs[offset:] + if limit is not None: + jobs = jobs[:limit] + + return (jobs, total_count) diff --git a/server.py b/server.py index ac4f42222..c27f8be7d 100644 --- a/server.py +++ b/server.py @@ -7,6 +7,7 @@ import time import nodes import folder_paths import execution +from comfy_execution.jobs import JobStatus, get_job, get_all_jobs import uuid import urllib import json @@ -47,6 +48,12 @@ from middleware.cache_middleware import cache_control if args.enable_manager: import comfyui_manager + +def _remove_sensitive_from_queue(queue: list) -> list: + """Remove sensitive data (index 5) from queue item tuples.""" + return [item[:5] for item in queue] + + async def send_socket_catch_exception(function, message): try: await function(message) @@ -694,6 +701,129 @@ class PromptServer(): out[node_class] = node_info(node_class) return web.json_response(out) + @routes.get("/api/jobs") + async def get_jobs(request): + """List all jobs with filtering, sorting, and pagination. + + Query parameters: + status: Filter by status (comma-separated): pending, in_progress, completed, failed + workflow_id: Filter by workflow ID + sort_by: Sort field: created_at (default), execution_duration + sort_order: Sort direction: asc, desc (default) + limit: Max items to return (positive integer) + offset: Items to skip (non-negative integer, default 0) + """ + query = request.rel_url.query + + status_param = query.get('status') + workflow_id = query.get('workflow_id') + sort_by = query.get('sort_by', 'created_at').lower() + sort_order = query.get('sort_order', 'desc').lower() + + status_filter = None + if status_param: + status_filter = [s.strip().lower() for s in status_param.split(',') if s.strip()] + invalid_statuses = [s for s in status_filter if s not in JobStatus.ALL] + if invalid_statuses: + return web.json_response( + {"error": f"Invalid status value(s): {', '.join(invalid_statuses)}. Valid values: {', '.join(JobStatus.ALL)}"}, + status=400 + ) + + if sort_by not in {'created_at', 'execution_duration'}: + return web.json_response( + {"error": "sort_by must be 'created_at' or 'execution_duration'"}, + status=400 + ) + + if sort_order not in {'asc', 'desc'}: + return web.json_response( + {"error": "sort_order must be 'asc' or 'desc'"}, + status=400 + ) + + limit = None + + # If limit is provided, validate that it is a positive integer, else continue without a limit + if 'limit' in query: + try: + limit = int(query.get('limit')) + if limit <= 0: + return web.json_response( + {"error": "limit must be a positive integer"}, + status=400 + ) + except (ValueError, TypeError): + return web.json_response( + {"error": "limit must be an integer"}, + status=400 + ) + + offset = 0 + if 'offset' in query: + try: + offset = int(query.get('offset')) + if offset < 0: + offset = 0 + except (ValueError, TypeError): + return web.json_response( + {"error": "offset must be an integer"}, + status=400 + ) + + running, queued = self.prompt_queue.get_current_queue_volatile() + history = self.prompt_queue.get_history() + + running = _remove_sensitive_from_queue(running) + queued = _remove_sensitive_from_queue(queued) + + jobs, total = get_all_jobs( + running, queued, history, + status_filter=status_filter, + workflow_id=workflow_id, + sort_by=sort_by, + sort_order=sort_order, + limit=limit, + offset=offset + ) + + has_more = (offset + len(jobs)) < total + + return web.json_response({ + 'jobs': jobs, + 'pagination': { + 'offset': offset, + 'limit': limit, + 'total': total, + 'has_more': has_more + } + }) + + @routes.get("/api/jobs/{job_id}") + async def get_job_by_id(request): + """Get a single job by ID.""" + job_id = request.match_info.get("job_id", None) + if not job_id: + return web.json_response( + {"error": "job_id is required"}, + status=400 + ) + + running, queued = self.prompt_queue.get_current_queue_volatile() + history = self.prompt_queue.get_history(prompt_id=job_id) + + running = _remove_sensitive_from_queue(running) + queued = _remove_sensitive_from_queue(queued) + + job = get_job(job_id, running, queued, history) + if job is None: + return web.json_response( + {"error": "Job not found"}, + status=404 + ) + + return web.json_response(job) + @routes.get("/history") async def get_history(request): max_items = request.rel_url.query.get("max_items", None) @@ -717,9 +847,8 @@ class PromptServer(): async def get_queue(request): queue_info = {} current_queue = self.prompt_queue.get_current_queue_volatile() - remove_sensitive = lambda queue: [x[:5] for x in queue] - queue_info['queue_running'] = remove_sensitive(current_queue[0]) - queue_info['queue_pending'] = remove_sensitive(current_queue[1]) + queue_info['queue_running'] = _remove_sensitive_from_queue(current_queue[0]) + queue_info['queue_pending'] = _remove_sensitive_from_queue(current_queue[1]) return web.json_response(queue_info) @routes.post("/prompt") diff --git a/tests/execution/test_execution.py b/tests/execution/test_execution.py index ace0d2279..f73ca7e3c 100644 --- a/tests/execution/test_execution.py +++ b/tests/execution/test_execution.py @@ -99,6 +99,37 @@ class ComfyClient: with urllib.request.urlopen(url) as response: return json.loads(response.read()) + def get_jobs(self, status=None, limit=None, offset=None, sort_by=None, sort_order=None): + url = "http://{}/api/jobs".format(self.server_address) + params = {} + if status is not None: + params["status"] = status + if limit is not None: + params["limit"] = limit + if offset is not None: + params["offset"] = offset + if sort_by is not None: + params["sort_by"] = sort_by + if sort_order is not None: + params["sort_order"] = sort_order + + if params: + url_values = urllib.parse.urlencode(params) + url = "{}?{}".format(url, url_values) + + with urllib.request.urlopen(url) as response: + return json.loads(response.read()) + + def get_job(self, job_id): + url = "http://{}/api/jobs/{}".format(self.server_address, job_id) + try: + with urllib.request.urlopen(url) as response: + return json.loads(response.read()) + except urllib.error.HTTPError as e: + if e.code == 404: + return None + raise + def set_test_name(self, name): self.test_name = name @@ -877,3 +908,106 @@ class TestExecution: result = client.get_all_history(max_items=5, offset=len(all_history) - 1) assert len(result) <= 1, "Should return at most 1 item when offset is near end" + + # Jobs API tests + def test_jobs_api_job_structure( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test that job objects have required fields""" + self._create_history_item(client, builder) + + jobs_response = client.get_jobs(status="completed", limit=1) + assert len(jobs_response["jobs"]) > 0, "Should have at least one job" + + job = jobs_response["jobs"][0] + assert "id" in job, "Job should have id" + assert "status" in job, "Job should have status" + assert "create_time" in job, "Job should have create_time" + assert "outputs_count" in job, "Job should have outputs_count" + assert "preview_output" in job, "Job should have preview_output" + + def test_jobs_api_preview_output_structure( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test that preview_output has correct structure""" + self._create_history_item(client, builder) + + jobs_response = client.get_jobs(status="completed", limit=1) + job = jobs_response["jobs"][0] + + if job["preview_output"] is not None: + preview = job["preview_output"] + assert "filename" in preview, "Preview should have filename" + assert "nodeId" in preview, "Preview should have nodeId" + assert "mediaType" in preview, "Preview should have mediaType" + + def test_jobs_api_pagination( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test jobs API pagination""" + for _ in range(5): + self._create_history_item(client, builder) + + first_page = client.get_jobs(limit=2, offset=0) + second_page = client.get_jobs(limit=2, offset=2) + + assert len(first_page["jobs"]) <= 2, "First page should have at most 2 jobs" + assert len(second_page["jobs"]) <= 2, "Second page should have at most 2 jobs" + + first_ids = {j["id"] for j in first_page["jobs"]} + second_ids = {j["id"] for j in second_page["jobs"]} + assert first_ids.isdisjoint(second_ids), "Pages should have different jobs" + + def test_jobs_api_sorting( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test jobs API sorting""" + for _ in range(3): + self._create_history_item(client, builder) + + desc_jobs = client.get_jobs(sort_order="desc") + asc_jobs = client.get_jobs(sort_order="asc") + + if len(desc_jobs["jobs"]) >= 2: + desc_times = [j["create_time"] for j in desc_jobs["jobs"] if j["create_time"]] + asc_times = [j["create_time"] for j in asc_jobs["jobs"] if j["create_time"]] + if len(desc_times) >= 2: + assert desc_times == sorted(desc_times, reverse=True), "Desc should be newest first" + if len(asc_times) >= 2: + assert asc_times == sorted(asc_times), "Asc should be oldest first" + + def test_jobs_api_status_filter( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test jobs API status filtering""" + self._create_history_item(client, builder) + + completed_jobs = client.get_jobs(status="completed") + assert len(completed_jobs["jobs"]) > 0, "Should have completed jobs from history" + + for job in completed_jobs["jobs"]: + assert job["status"] == "completed", "Should only return completed jobs" + + # Pending jobs are transient - just verify filter doesn't error + pending_jobs = client.get_jobs(status="pending") + for job in pending_jobs["jobs"]: + assert job["status"] == "pending", "Should only return pending jobs" + + def test_get_job_by_id( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test getting a single job by ID""" + result = self._create_history_item(client, builder) + prompt_id = result.get_prompt_id() + + job = client.get_job(prompt_id) + assert job is not None, "Should find the job" + assert job["id"] == prompt_id, "Job ID should match" + assert "outputs" in job, "Single job should include outputs" + + def test_get_job_not_found( + self, client: ComfyClient, builder: GraphBuilder + ): + """Test getting a non-existent job returns 404""" + job = client.get_job("nonexistent-job-id") + assert job is None, "Non-existent job should return None" diff --git a/tests/execution/test_jobs.py b/tests/execution/test_jobs.py new file mode 100644 index 000000000..918c8080a --- /dev/null +++ b/tests/execution/test_jobs.py @@ -0,0 +1,361 @@ +"""Unit tests for comfy_execution/jobs.py""" + +from comfy_execution.jobs import ( + JobStatus, + is_previewable, + normalize_queue_item, + normalize_history_item, + get_outputs_summary, + apply_sorting, +) + + +class TestJobStatus: + """Test JobStatus constants.""" + + def test_status_values(self): + """Status constants should have expected string values.""" + assert JobStatus.PENDING == 'pending' + assert JobStatus.IN_PROGRESS == 'in_progress' + assert JobStatus.COMPLETED == 'completed' + assert JobStatus.FAILED == 'failed' + + def test_all_contains_all_statuses(self): + """ALL should contain all status values.""" + assert JobStatus.PENDING in JobStatus.ALL + assert JobStatus.IN_PROGRESS in JobStatus.ALL + assert JobStatus.COMPLETED in JobStatus.ALL + assert JobStatus.FAILED in JobStatus.ALL + assert len(JobStatus.ALL) == 4 + + +class TestIsPreviewable: + """Unit tests for is_previewable()""" + + def test_previewable_media_types(self): + """Images, video, audio media types should be previewable.""" + for media_type in ['images', 'video', 'audio']: + assert is_previewable(media_type, {}) is True + + def test_non_previewable_media_types(self): + """Other media types should not be previewable.""" + for media_type in ['latents', 'text', 'metadata', 'files']: + assert is_previewable(media_type, {}) is False + + def test_3d_extensions_previewable(self): + """3D file extensions should be previewable regardless of media_type.""" + for ext in ['.obj', '.fbx', '.gltf', '.glb']: + item = {'filename': f'model{ext}'} + assert is_previewable('files', item) is True + + def test_3d_extensions_case_insensitive(self): + """3D extension check should be case insensitive.""" + item = {'filename': 'MODEL.GLB'} + assert is_previewable('files', item) is True + + def test_video_format_previewable(self): + """Items with video/ format should be previewable.""" + item = {'format': 'video/mp4'} + assert is_previewable('files', item) is True + + def test_audio_format_previewable(self): + """Items with audio/ format should be previewable.""" + item = {'format': 'audio/wav'} + assert is_previewable('files', item) is True + + def test_other_format_not_previewable(self): + """Items with other format should not be previewable.""" + item = {'format': 'application/json'} + assert is_previewable('files', item) is False + + +class TestGetOutputsSummary: + """Unit tests for get_outputs_summary()""" + + def test_empty_outputs(self): + """Empty outputs should return 0 count and None preview.""" + count, preview = get_outputs_summary({}) + assert count == 0 + assert preview is None + + def test_counts_across_multiple_nodes(self): + """Outputs from multiple nodes should all be counted.""" + outputs = { + 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]}, + 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]}, + 'node3': {'images': [ + {'filename': 'c.png', 'type': 'output'}, + {'filename': 'd.png', 'type': 'output'} + ]} + } + count, preview = get_outputs_summary(outputs) + assert count == 4 + + def test_skips_animated_key_and_non_list_values(self): + """The 'animated' key and non-list values should be skipped.""" + outputs = { + 'node1': { + 'images': [{'filename': 'test.png', 'type': 'output'}], + 'animated': [True], # Should skip due to key name + 'metadata': 'string', # Should skip due to non-list + 'count': 42 # Should skip due to non-list + } + } + count, preview = get_outputs_summary(outputs) + assert count == 1 + + def test_preview_prefers_type_output(self): + """Items with type='output' should be preferred for preview.""" + outputs = { + 'node1': { + 'images': [ + {'filename': 'temp.png', 'type': 'temp'}, + {'filename': 'output.png', 'type': 'output'} + ] + } + } + count, preview = get_outputs_summary(outputs) + assert count == 2 + assert preview['filename'] == 'output.png' + + def test_preview_fallback_when_no_output_type(self): + """If no type='output', should use first previewable.""" + outputs = { + 'node1': { + 'images': [ + {'filename': 'temp1.png', 'type': 'temp'}, + {'filename': 'temp2.png', 'type': 'temp'} + ] + } + } + count, preview = get_outputs_summary(outputs) + assert preview['filename'] == 'temp1.png' + + def test_non_previewable_media_types_counted_but_no_preview(self): + """Non-previewable media types should be counted but not used as preview.""" + outputs = { + 'node1': { + 'latents': [ + {'filename': 'latent1.safetensors'}, + {'filename': 'latent2.safetensors'} + ] + } + } + count, preview = get_outputs_summary(outputs) + assert count == 2 + assert preview is None + + def test_previewable_media_types(self): + """Images, video, and audio media types should be previewable.""" + for media_type in ['images', 'video', 'audio']: + outputs = { + 'node1': { + media_type: [{'filename': 'test.file', 'type': 'output'}] + } + } + count, preview = get_outputs_summary(outputs) + assert preview is not None, f"{media_type} should be previewable" + + def test_3d_files_previewable(self): + """3D file extensions should be previewable.""" + for ext in ['.obj', '.fbx', '.gltf', '.glb']: + outputs = { + 'node1': { + 'files': [{'filename': f'model{ext}', 'type': 'output'}] + } + } + count, preview = get_outputs_summary(outputs) + assert preview is not None, f"3D file {ext} should be previewable" + + def test_format_mime_type_previewable(self): + """Files with video/ or audio/ format should be previewable.""" + for fmt in ['video/x-custom', 'audio/x-custom']: + outputs = { + 'node1': { + 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}] + } + } + count, preview = get_outputs_summary(outputs) + assert preview is not None, f"Format {fmt} should be previewable" + + def test_preview_enriched_with_node_metadata(self): + """Preview should include nodeId, mediaType, and original fields.""" + outputs = { + 'node123': { + 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}] + } + } + count, preview = get_outputs_summary(outputs) + assert preview['nodeId'] == 'node123' + assert preview['mediaType'] == 'images' + assert preview['subfolder'] == 'outputs' + + +class TestApplySorting: + """Unit tests for apply_sorting()""" + + def test_sort_by_create_time_desc(self): + """Default sort by create_time descending.""" + jobs = [ + {'id': 'a', 'create_time': 100}, + {'id': 'b', 'create_time': 300}, + {'id': 'c', 'create_time': 200}, + ] + result = apply_sorting(jobs, 'created_at', 'desc') + assert [j['id'] for j in result] == ['b', 'c', 'a'] + + def test_sort_by_create_time_asc(self): + """Sort by create_time ascending.""" + jobs = [ + {'id': 'a', 'create_time': 100}, + {'id': 'b', 'create_time': 300}, + {'id': 'c', 'create_time': 200}, + ] + result = apply_sorting(jobs, 'created_at', 'asc') + assert [j['id'] for j in result] == ['a', 'c', 'b'] + + def test_sort_by_execution_duration(self): + """Sort by execution_duration should order by duration.""" + jobs = [ + {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s + {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s + {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s + ] + result = apply_sorting(jobs, 'execution_duration', 'desc') + assert [j['id'] for j in result] == ['a', 'c', 'b'] + + def test_sort_with_none_values(self): + """Jobs with None values should sort as 0.""" + jobs = [ + {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, + {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None}, + {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, + ] + result = apply_sorting(jobs, 'execution_duration', 'asc') + assert result[0]['id'] == 'b' # None treated as 0, comes first + + +class TestNormalizeQueueItem: + """Unit tests for normalize_queue_item()""" + + def test_basic_normalization(self): + """Queue item should be normalized to job dict.""" + item = ( + 10, # priority/number + 'prompt-123', # prompt_id + {'nodes': {}}, # prompt + { + 'create_time': 1234567890, + 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}} + }, # extra_data + ['node1'], # outputs_to_execute + ) + job = normalize_queue_item(item, JobStatus.PENDING) + + assert job['id'] == 'prompt-123' + assert job['status'] == 'pending' + assert job['priority'] == 10 + assert job['create_time'] == 1234567890 + assert 'execution_start_time' not in job + assert 'execution_end_time' not in job + assert 'execution_error' not in job + assert 'preview_output' not in job + assert job['outputs_count'] == 0 + assert job['workflow_id'] == 'workflow-abc' + + +class TestNormalizeHistoryItem: + """Unit tests for normalize_history_item()""" + + def test_completed_job(self): + """Completed history item should have correct status and times from messages.""" + history_item = { + 'prompt': ( + 5, # priority + 'prompt-456', + {'nodes': {}}, + { + 'create_time': 1234567890000, + 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}} + }, + ['node1'], + ), + 'status': { + 'status_str': 'success', + 'completed': True, + 'messages': [ + ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}), + ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}), + ] + }, + 'outputs': {}, + } + job = normalize_history_item('prompt-456', history_item) + + assert job['id'] == 'prompt-456' + assert job['status'] == 'completed' + assert job['priority'] == 5 + assert job['execution_start_time'] == 1234567890500 + assert job['execution_end_time'] == 1234567893000 + assert job['workflow_id'] == 'workflow-xyz' + + def test_failed_job(self): + """Failed history item should have failed status and error from messages.""" + history_item = { + 'prompt': ( + 5, + 'prompt-789', + {'nodes': {}}, + {'create_time': 1234567890000}, + ['node1'], + ), + 'status': { + 'status_str': 'error', + 'completed': False, + 'messages': [ + ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}), + ('execution_error', { + 'prompt_id': 'prompt-789', + 'node_id': '5', + 'node_type': 'KSampler', + 'exception_message': 'CUDA out of memory', + 'exception_type': 'RuntimeError', + 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'], + 'timestamp': 1234567891000, + }) + ] + }, + 'outputs': {}, + } + + job = normalize_history_item('prompt-789', history_item) + assert job['status'] == 'failed' + assert job['execution_start_time'] == 1234567890500 + assert job['execution_end_time'] == 1234567891000 + assert job['execution_error']['node_id'] == '5' + assert job['execution_error']['node_type'] == 'KSampler' + assert job['execution_error']['exception_message'] == 'CUDA out of memory' + + def test_include_outputs(self): + """When include_outputs=True, should include full output data.""" + history_item = { + 'prompt': ( + 5, + 'prompt-123', + {'nodes': {'1': {}}}, + {'create_time': 1234567890, 'client_id': 'abc'}, + ['node1'], + ), + 'status': {'status_str': 'success', 'completed': True, 'messages': []}, + 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}}, + } + job = normalize_history_item('prompt-123', history_item, include_outputs=True) + + assert 'outputs' in job + assert 'workflow' in job + assert 'execution_status' in job + assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}} + assert job['workflow'] == { + 'prompt': {'nodes': {'1': {}}}, + 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'}, + }